mirror of
https://github.com/lancedb/lancedb.git
synced 2025-12-23 13:29:57 +00:00
Compare commits
52 Commits
python-v0.
...
rmeng/patc
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
24526bda4c | ||
|
|
055efdcdb6 | ||
|
|
bc582bb702 | ||
|
|
df9c41f342 | ||
|
|
0bd6ac945e | ||
|
|
c9d5475333 | ||
|
|
3850d5fb35 | ||
|
|
b37c58342e | ||
|
|
a06e64f22d | ||
|
|
e983198f0e | ||
|
|
76e7b4abf8 | ||
|
|
5f6eb4651e | ||
|
|
805c78bb20 | ||
|
|
4746281b21 | ||
|
|
7b3b6bdccd | ||
|
|
37e1124c0f | ||
|
|
93f037ee41 | ||
|
|
e4fc06825a | ||
|
|
fe89a373a2 | ||
|
|
3d3915edef | ||
|
|
e2e8b6aee4 | ||
|
|
12dbca5248 | ||
|
|
a6babfa651 | ||
|
|
75ede86fab | ||
|
|
becd649130 | ||
|
|
9d2fb7d602 | ||
|
|
fdb5d6fdf1 | ||
|
|
2f13fa225f | ||
|
|
e933de003d | ||
|
|
05fd387425 | ||
|
|
82a1da554c | ||
|
|
a7c0d80b9e | ||
|
|
71323a064a | ||
|
|
df48454b70 | ||
|
|
6603414885 | ||
|
|
c256f6c502 | ||
|
|
cc03f90379 | ||
|
|
975da09b02 | ||
|
|
c32e17b497 | ||
|
|
0528abdf97 | ||
|
|
1090c311e8 | ||
|
|
e767cbb374 | ||
|
|
3d7c48feca | ||
|
|
08d62550bb | ||
|
|
b272408b05 | ||
|
|
46ffa87cd4 | ||
|
|
cd9fc37b95 | ||
|
|
431f94e564 | ||
|
|
c1a7d65473 | ||
|
|
1e5ccb1614 | ||
|
|
2e7ab373dc | ||
|
|
c7fbc4aaee |
@@ -1,5 +1,5 @@
|
|||||||
[bumpversion]
|
[bumpversion]
|
||||||
current_version = 0.4.17
|
current_version = 0.4.20
|
||||||
commit = True
|
commit = True
|
||||||
message = Bump version: {current_version} → {new_version}
|
message = Bump version: {current_version} → {new_version}
|
||||||
tag = True
|
tag = True
|
||||||
|
|||||||
33
.github/labeler.yml
vendored
Normal file
33
.github/labeler.yml
vendored
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
version: 1
|
||||||
|
appendOnly: true
|
||||||
|
# Labels are applied based on conventional commits standard
|
||||||
|
# https://www.conventionalcommits.org/en/v1.0.0/
|
||||||
|
# These labels are later used in release notes. See .github/release.yml
|
||||||
|
labels:
|
||||||
|
# If the PR title has an ! before the : it will be considered a breaking change
|
||||||
|
# For example, `feat!: add new feature` will be considered a breaking change
|
||||||
|
- label: breaking-change
|
||||||
|
title: "^[^:]+!:.*"
|
||||||
|
- label: breaking-change
|
||||||
|
body: "BREAKING CHANGE"
|
||||||
|
- label: enhancement
|
||||||
|
title: "^feat(\\(.+\\))?!?:.*"
|
||||||
|
- label: bug
|
||||||
|
title: "^fix(\\(.+\\))?!?:.*"
|
||||||
|
- label: documentation
|
||||||
|
title: "^docs(\\(.+\\))?!?:.*"
|
||||||
|
- label: performance
|
||||||
|
title: "^perf(\\(.+\\))?!?:.*"
|
||||||
|
- label: ci
|
||||||
|
title: "^ci(\\(.+\\))?!?:.*"
|
||||||
|
- label: chore
|
||||||
|
title: "^(chore|test|build|style)(\\(.+\\))?!?:.*"
|
||||||
|
- label: Python
|
||||||
|
files:
|
||||||
|
- "^python\\/.*"
|
||||||
|
- label: Rust
|
||||||
|
files:
|
||||||
|
- "^rust\\/.*"
|
||||||
|
- label: typescript
|
||||||
|
files:
|
||||||
|
- "^node\\/.*"
|
||||||
25
.github/release.yml
vendored
Normal file
25
.github/release.yml
vendored
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
# TODO: create separate templates for Python and other releases.
|
||||||
|
changelog:
|
||||||
|
exclude:
|
||||||
|
labels:
|
||||||
|
- ci
|
||||||
|
- chore
|
||||||
|
categories:
|
||||||
|
- title: Breaking Changes 🛠
|
||||||
|
labels:
|
||||||
|
- breaking-change
|
||||||
|
- title: New Features 🎉
|
||||||
|
labels:
|
||||||
|
- enhancement
|
||||||
|
- title: Bug Fixes 🐛
|
||||||
|
labels:
|
||||||
|
- bug
|
||||||
|
- title: Documentation 📚
|
||||||
|
labels:
|
||||||
|
- documentation
|
||||||
|
- title: Performance Improvements 🚀
|
||||||
|
labels:
|
||||||
|
- performance
|
||||||
|
- title: Other Changes
|
||||||
|
labels:
|
||||||
|
- "*"
|
||||||
81
.github/workflows/dev.yml
vendored
Normal file
81
.github/workflows/dev.yml
vendored
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
name: PR Checks
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request_target:
|
||||||
|
types: [opened, edited, synchronize, reopened]
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
labeler:
|
||||||
|
permissions:
|
||||||
|
pull-requests: write
|
||||||
|
name: Label PR
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: srvaroa/labeler@master
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
commitlint:
|
||||||
|
permissions:
|
||||||
|
pull-requests: write
|
||||||
|
name: Verify PR title / description conforms to semantic-release
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/setup-node@v3
|
||||||
|
with:
|
||||||
|
node-version: "18"
|
||||||
|
# These rules are disabled because Github will always ensure there
|
||||||
|
# is a blank line between the title and the body and Github will
|
||||||
|
# word wrap the description field to ensure a reasonable max line
|
||||||
|
# length.
|
||||||
|
- run: npm install @commitlint/config-conventional
|
||||||
|
- run: >
|
||||||
|
echo 'module.exports = {
|
||||||
|
"rules": {
|
||||||
|
"body-max-line-length": [0, "always", Infinity],
|
||||||
|
"footer-max-line-length": [0, "always", Infinity],
|
||||||
|
"body-leading-blank": [0, "always"]
|
||||||
|
}
|
||||||
|
}' > .commitlintrc.js
|
||||||
|
- run: npx commitlint --extends @commitlint/config-conventional --verbose <<< $COMMIT_MSG
|
||||||
|
env:
|
||||||
|
COMMIT_MSG: >
|
||||||
|
${{ github.event.pull_request.title }}
|
||||||
|
|
||||||
|
${{ github.event.pull_request.body }}
|
||||||
|
- if: failure()
|
||||||
|
uses: actions/github-script@v6
|
||||||
|
with:
|
||||||
|
script: |
|
||||||
|
const message = `**ACTION NEEDED**
|
||||||
|
|
||||||
|
Lance follows the [Conventional Commits specification](https://www.conventionalcommits.org/en/v1.0.0/) for release automation.
|
||||||
|
|
||||||
|
The PR title and description are used as the merge commit message.\
|
||||||
|
Please update your PR title and description to match the specification.
|
||||||
|
|
||||||
|
For details on the error please inspect the "PR Title Check" action.
|
||||||
|
`
|
||||||
|
// Get list of current comments
|
||||||
|
const comments = await github.paginate(github.rest.issues.listComments, {
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo,
|
||||||
|
issue_number: context.issue.number
|
||||||
|
});
|
||||||
|
// Check if this job already commented
|
||||||
|
for (const comment of comments) {
|
||||||
|
if (comment.body === message) {
|
||||||
|
return // Already commented
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Post the comment about Conventional Commits
|
||||||
|
github.rest.issues.createComment({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo,
|
||||||
|
issue_number: context.issue.number,
|
||||||
|
body: message
|
||||||
|
})
|
||||||
|
core.setFailed(message)
|
||||||
3
.github/workflows/nodejs.yml
vendored
3
.github/workflows/nodejs.yml
vendored
@@ -52,8 +52,7 @@ jobs:
|
|||||||
cargo fmt --all -- --check
|
cargo fmt --all -- --check
|
||||||
cargo clippy --all --all-features -- -D warnings
|
cargo clippy --all --all-features -- -D warnings
|
||||||
npm ci
|
npm ci
|
||||||
npm run lint
|
npm run lint-ci
|
||||||
npm run chkformat
|
|
||||||
linux:
|
linux:
|
||||||
name: Linux (NodeJS ${{ matrix.node-version }})
|
name: Linux (NodeJS ${{ matrix.node-version }})
|
||||||
timeout-minutes: 30
|
timeout-minutes: 30
|
||||||
|
|||||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -6,7 +6,7 @@
|
|||||||
venv
|
venv
|
||||||
|
|
||||||
.vscode
|
.vscode
|
||||||
|
.zed
|
||||||
rust/target
|
rust/target
|
||||||
rust/Cargo.lock
|
rust/Cargo.lock
|
||||||
|
|
||||||
|
|||||||
@@ -10,9 +10,12 @@ repos:
|
|||||||
rev: v0.2.2
|
rev: v0.2.2
|
||||||
hooks:
|
hooks:
|
||||||
- id: ruff
|
- id: ruff
|
||||||
- repo: https://github.com/pre-commit/mirrors-prettier
|
- repo: local
|
||||||
rev: v3.1.0
|
|
||||||
hooks:
|
hooks:
|
||||||
- id: prettier
|
- id: local-biome-check
|
||||||
|
name: biome check
|
||||||
|
entry: npx biome check
|
||||||
|
language: system
|
||||||
|
types: [text]
|
||||||
files: "nodejs/.*"
|
files: "nodejs/.*"
|
||||||
exclude: nodejs/lancedb/native.d.ts|nodejs/dist/.*
|
exclude: nodejs/lancedb/native.d.ts|nodejs/dist/.*
|
||||||
|
|||||||
26
Cargo.toml
26
Cargo.toml
@@ -14,22 +14,22 @@ keywords = ["lancedb", "lance", "database", "vector", "search"]
|
|||||||
categories = ["database-implementations"]
|
categories = ["database-implementations"]
|
||||||
|
|
||||||
[workspace.dependencies]
|
[workspace.dependencies]
|
||||||
lance = { "version" = "=0.10.12", "features" = ["dynamodb"] }
|
lance = { "version" = "=0.10.18", "features" = ["dynamodb"] }
|
||||||
lance-index = { "version" = "=0.10.12" }
|
lance-index = { "version" = "=0.10.18" }
|
||||||
lance-linalg = { "version" = "=0.10.12" }
|
lance-linalg = { "version" = "=0.10.18" }
|
||||||
lance-testing = { "version" = "=0.10.12" }
|
lance-testing = { "version" = "=0.10.18" }
|
||||||
# Note that this one does not include pyarrow
|
# Note that this one does not include pyarrow
|
||||||
arrow = { version = "50.0", optional = false }
|
arrow = { version = "51.0", optional = false }
|
||||||
arrow-array = "50.0"
|
arrow-array = "51.0"
|
||||||
arrow-data = "50.0"
|
arrow-data = "51.0"
|
||||||
arrow-ipc = "50.0"
|
arrow-ipc = "51.0"
|
||||||
arrow-ord = "50.0"
|
arrow-ord = "51.0"
|
||||||
arrow-schema = "50.0"
|
arrow-schema = "51.0"
|
||||||
arrow-arith = "50.0"
|
arrow-arith = "51.0"
|
||||||
arrow-cast = "50.0"
|
arrow-cast = "51.0"
|
||||||
async-trait = "0"
|
async-trait = "0"
|
||||||
chrono = "0.4.35"
|
chrono = "0.4.35"
|
||||||
half = { "version" = "=2.3.1", default-features = false, features = [
|
half = { "version" = "=2.4.1", default-features = false, features = [
|
||||||
"num-traits",
|
"num-traits",
|
||||||
] }
|
] }
|
||||||
futures = "0"
|
futures = "0"
|
||||||
|
|||||||
@@ -20,7 +20,7 @@
|
|||||||
|
|
||||||
<hr />
|
<hr />
|
||||||
|
|
||||||
LanceDB is an open-source database for vector-search built with persistent storage, which greatly simplifies retrevial, filtering and management of embeddings.
|
LanceDB is an open-source database for vector-search built with persistent storage, which greatly simplifies retrieval, filtering and management of embeddings.
|
||||||
|
|
||||||
The key features of LanceDB include:
|
The key features of LanceDB include:
|
||||||
|
|
||||||
@@ -36,7 +36,7 @@ The key features of LanceDB include:
|
|||||||
|
|
||||||
* GPU support in building vector index(*).
|
* GPU support in building vector index(*).
|
||||||
|
|
||||||
* Ecosystem integrations with [LangChain 🦜️🔗](https://python.langchain.com/en/latest/modules/indexes/vectorstores/examples/lanecdb.html), [LlamaIndex 🦙](https://gpt-index.readthedocs.io/en/latest/examples/vector_stores/LanceDBIndexDemo.html), Apache-Arrow, Pandas, Polars, DuckDB and more on the way.
|
* Ecosystem integrations with [LangChain 🦜️🔗](https://python.langchain.com/docs/integrations/vectorstores/lancedb/), [LlamaIndex 🦙](https://gpt-index.readthedocs.io/en/latest/examples/vector_stores/LanceDBIndexDemo.html), Apache-Arrow, Pandas, Polars, DuckDB and more on the way.
|
||||||
|
|
||||||
LanceDB's core is written in Rust 🦀 and is built using <a href="https://github.com/lancedb/lance">Lance</a>, an open-source columnar format designed for performant ML workloads.
|
LanceDB's core is written in Rust 🦀 and is built using <a href="https://github.com/lancedb/lance">Lance</a>, an open-source columnar format designed for performant ML workloads.
|
||||||
|
|
||||||
|
|||||||
@@ -119,7 +119,7 @@ nav:
|
|||||||
- Polars: python/polars_arrow.md
|
- Polars: python/polars_arrow.md
|
||||||
- DuckDB: python/duckdb.md
|
- DuckDB: python/duckdb.md
|
||||||
- LangChain:
|
- LangChain:
|
||||||
- LangChain 🔗: https://python.langchain.com/docs/integrations/vectorstores/lancedb/
|
- LangChain 🔗: integrations/langchain.md
|
||||||
- LangChain JS/TS 🔗: https://js.langchain.com/docs/integrations/vectorstores/lancedb
|
- LangChain JS/TS 🔗: https://js.langchain.com/docs/integrations/vectorstores/lancedb
|
||||||
- LlamaIndex 🦙: https://docs.llamaindex.ai/en/stable/examples/vector_stores/LanceDBIndexDemo/
|
- LlamaIndex 🦙: https://docs.llamaindex.ai/en/stable/examples/vector_stores/LanceDBIndexDemo/
|
||||||
- Pydantic: python/pydantic.md
|
- Pydantic: python/pydantic.md
|
||||||
|
|||||||
@@ -159,7 +159,7 @@ Allows you to set parameters when registering a `sentence-transformers` object.
|
|||||||
from lancedb.embeddings import get_registry
|
from lancedb.embeddings import get_registry
|
||||||
|
|
||||||
db = lancedb.connect("/tmp/db")
|
db = lancedb.connect("/tmp/db")
|
||||||
model = get_registry.get("sentence-transformers").create(name="BAAI/bge-small-en-v1.5", device="cpu")
|
model = get_registry().get("sentence-transformers").create(name="BAAI/bge-small-en-v1.5", device="cpu")
|
||||||
|
|
||||||
class Words(LanceModel):
|
class Words(LanceModel):
|
||||||
text: str = model.SourceField()
|
text: str = model.SourceField()
|
||||||
@@ -206,6 +206,44 @@ print(actual.text)
|
|||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
|
### Ollama embeddings
|
||||||
|
Generate embeddings via the [ollama](https://github.com/ollama/ollama-python) python library. More details:
|
||||||
|
|
||||||
|
- [Ollama docs on embeddings](https://github.com/ollama/ollama/blob/main/docs/api.md#generate-embeddings)
|
||||||
|
- [Ollama blog on embeddings](https://ollama.com/blog/embedding-models)
|
||||||
|
|
||||||
|
| Parameter | Type | Default Value | Description |
|
||||||
|
|------------------------|----------------------------|--------------------------|------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
|
| `name` | `str` | `nomic-embed-text` | The name of the model. |
|
||||||
|
| `host` | `str` | `http://localhost:11434` | The Ollama host to connect to. |
|
||||||
|
| `options` | `ollama.Options` or `dict` | `None` | Additional model parameters listed in the documentation for the [Modelfile](./modelfile.md#valid-parameters-and-values) such as `temperature`. |
|
||||||
|
| `keep_alive` | `float` or `str` | `"5m"` | Controls how long the model will stay loaded into memory following the request. |
|
||||||
|
| `ollama_client_kwargs` | `dict` | `{}` | kwargs that can be past to the `ollama.Client`. |
|
||||||
|
|
||||||
|
```python
|
||||||
|
import lancedb
|
||||||
|
from lancedb.pydantic import LanceModel, Vector
|
||||||
|
from lancedb.embeddings import get_registry
|
||||||
|
|
||||||
|
db = lancedb.connect("/tmp/db")
|
||||||
|
func = get_registry().get("ollama").create(name="nomic-embed-text")
|
||||||
|
|
||||||
|
class Words(LanceModel):
|
||||||
|
text: str = func.SourceField()
|
||||||
|
vector: Vector(func.ndims()) = func.VectorField()
|
||||||
|
|
||||||
|
table = db.create_table("words", schema=Words, mode="overwrite")
|
||||||
|
table.add([
|
||||||
|
{"text": "hello world"},
|
||||||
|
{"text": "goodbye world"}
|
||||||
|
])
|
||||||
|
|
||||||
|
query = "greetings"
|
||||||
|
actual = table.search(query).limit(1).to_pydantic(Words)[0]
|
||||||
|
print(actual.text)
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
### OpenAI embeddings
|
### OpenAI embeddings
|
||||||
LanceDB registers the OpenAI embeddings function in the registry by default, as `openai`. Below are the parameters that you can customize when creating the instances:
|
LanceDB registers the OpenAI embeddings function in the registry by default, as `openai`. Below are the parameters that you can customize when creating the instances:
|
||||||
|
|
||||||
|
|||||||
@@ -46,7 +46,7 @@ For this purpose, LanceDB introduces an **embedding functions API**, that allow
|
|||||||
|
|
||||||
```python
|
```python
|
||||||
class Pets(LanceModel):
|
class Pets(LanceModel):
|
||||||
vector: Vector(clip.ndims) = clip.VectorField()
|
vector: Vector(clip.ndims()) = clip.VectorField()
|
||||||
image_uri: str = clip.SourceField()
|
image_uri: str = clip.SourceField()
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -149,7 +149,7 @@ You can also use the integration for adding utility operations in the schema. Fo
|
|||||||
|
|
||||||
```python
|
```python
|
||||||
class Pets(LanceModel):
|
class Pets(LanceModel):
|
||||||
vector: Vector(clip.ndims) = clip.VectorField()
|
vector: Vector(clip.ndims()) = clip.VectorField()
|
||||||
image_uri: str = clip.SourceField()
|
image_uri: str = clip.SourceField()
|
||||||
|
|
||||||
@property
|
@property
|
||||||
|
|||||||
@@ -299,6 +299,14 @@ LanceDB can also connect to S3-compatible stores, such as MinIO. To do so, you m
|
|||||||
|
|
||||||
This can also be done with the ``AWS_ENDPOINT`` and ``AWS_DEFAULT_REGION`` environment variables.
|
This can also be done with the ``AWS_ENDPOINT`` and ``AWS_DEFAULT_REGION`` environment variables.
|
||||||
|
|
||||||
|
!!! tip "Local servers"
|
||||||
|
|
||||||
|
For local development, the server often has a `http` endpoint rather than a
|
||||||
|
secure `https` endpoint. In this case, you must also set the `ALLOW_HTTP`
|
||||||
|
environment variable to `true` to allow non-TLS connections, or pass the
|
||||||
|
storage option `allow_http` as `true`. If you do not do this, you will get
|
||||||
|
an error like `URL scheme is not allowed`.
|
||||||
|
|
||||||
#### S3 Express
|
#### S3 Express
|
||||||
|
|
||||||
LanceDB supports [S3 Express One Zone](https://aws.amazon.com/s3/storage-classes/express-one-zone/) endpoints, but requires additional configuration. Also, S3 Express endpoints only support connecting from an EC2 instance within the same region.
|
LanceDB supports [S3 Express One Zone](https://aws.amazon.com/s3/storage-classes/express-one-zone/) endpoints, but requires additional configuration. Also, S3 Express endpoints only support connecting from an EC2 instance within the same region.
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ Get started using these examples and quick links.
|
|||||||
| Integrations | |
|
| Integrations | |
|
||||||
|---|---:|
|
|---|---:|
|
||||||
| <h3> LlamaIndex </h3>LlamaIndex is a simple, flexible data framework for connecting custom data sources to large language models. Llama index integrates with LanceDB as the serverless VectorDB. <h3>[Lean More](https://gpt-index.readthedocs.io/en/latest/examples/vector_stores/LanceDBIndexDemo.html) </h3> |<img src="../assets/llama-index.jpg" alt="image" width="150" height="auto">|
|
| <h3> LlamaIndex </h3>LlamaIndex is a simple, flexible data framework for connecting custom data sources to large language models. Llama index integrates with LanceDB as the serverless VectorDB. <h3>[Lean More](https://gpt-index.readthedocs.io/en/latest/examples/vector_stores/LanceDBIndexDemo.html) </h3> |<img src="../assets/llama-index.jpg" alt="image" width="150" height="auto">|
|
||||||
| <h3>Langchain</h3>Langchain allows building applications with LLMs through composability <h3>[Lean More](https://python.langchain.com/docs/integrations/vectorstores/lancedb) | <img src="../assets/langchain.png" alt="image" width="150" height="auto">|
|
| <h3>Langchain</h3>Langchain allows building applications with LLMs through composability <h3>[Lean More](https://lancedb.github.io/lancedb/integrations/langchain/) | <img src="../assets/langchain.png" alt="image" width="150" height="auto">|
|
||||||
| <h3>Langchain TS</h3> Javascript bindings for Langchain. It integrates with LanceDB's serverless vectordb allowing you to build powerful AI applications through composibility using only serverless functions. <h3>[Learn More]( https://js.langchain.com/docs/modules/data_connection/vectorstores/integrations/lancedb) | <img src="../assets/langchain.png" alt="image" width="150" height="auto">|
|
| <h3>Langchain TS</h3> Javascript bindings for Langchain. It integrates with LanceDB's serverless vectordb allowing you to build powerful AI applications through composibility using only serverless functions. <h3>[Learn More]( https://js.langchain.com/docs/modules/data_connection/vectorstores/integrations/lancedb) | <img src="../assets/langchain.png" alt="image" width="150" height="auto">|
|
||||||
| <h3>Voxel51</h3> It is an open source toolkit that enables you to build better computer vision workflows by improving the quality of your datasets and delivering insights about your models.<h3>[Learn More](./voxel51.md) | <img src="../assets/voxel.gif" alt="image" width="150" height="auto">|
|
| <h3>Voxel51</h3> It is an open source toolkit that enables you to build better computer vision workflows by improving the quality of your datasets and delivering insights about your models.<h3>[Learn More](./voxel51.md) | <img src="../assets/voxel.gif" alt="image" width="150" height="auto">|
|
||||||
| <h3>PromptTools</h3> Offers a set of free, open-source tools for testing and experimenting with models, prompts, and configurations. The core idea is to enable developers to evaluate prompts using familiar interfaces like code and notebooks. You can use it to experiment with different configurations of LanceDB, and test how LanceDB integrates with the LLM of your choice.<h3>[Learn More](./prompttools.md) | <img src="../assets/prompttools.jpeg" alt="image" width="150" height="auto">|
|
| <h3>PromptTools</h3> Offers a set of free, open-source tools for testing and experimenting with models, prompts, and configurations. The core idea is to enable developers to evaluate prompts using familiar interfaces like code and notebooks. You can use it to experiment with different configurations of LanceDB, and test how LanceDB integrates with the LLM of your choice.<h3>[Learn More](./prompttools.md) | <img src="../assets/prompttools.jpeg" alt="image" width="150" height="auto">|
|
||||||
|
|||||||
92
docs/src/integrations/langchain.md
Normal file
92
docs/src/integrations/langchain.md
Normal file
@@ -0,0 +1,92 @@
|
|||||||
|
# Langchain
|
||||||
|

|
||||||
|
|
||||||
|
## Quick Start
|
||||||
|
You can load your document data using langchain's loaders, for this example we are using `TextLoader` and `OpenAIEmbeddings` as the embedding model.
|
||||||
|
```python
|
||||||
|
import os
|
||||||
|
from langchain.document_loaders import TextLoader
|
||||||
|
from langchain.vectorstores import LanceDB
|
||||||
|
from langchain_openai import OpenAIEmbeddings
|
||||||
|
from langchain_text_splitters import CharacterTextSplitter
|
||||||
|
|
||||||
|
os.environ["OPENAI_API_KEY"] = "sk-..."
|
||||||
|
|
||||||
|
loader = TextLoader("../../modules/state_of_the_union.txt") # Replace with your data path
|
||||||
|
documents = loader.load()
|
||||||
|
|
||||||
|
documents = CharacterTextSplitter().split_documents(documents)
|
||||||
|
embeddings = OpenAIEmbeddings()
|
||||||
|
|
||||||
|
docsearch = LanceDB.from_documents(documents, embeddings)
|
||||||
|
query = "What did the president say about Ketanji Brown Jackson"
|
||||||
|
docs = docsearch.similarity_search(query)
|
||||||
|
print(docs[0].page_content)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Documentation
|
||||||
|
In the above example `LanceDB` vector store class object is created using `from_documents()` method which is a `classmethod` and returns the initialized class object.
|
||||||
|
You can also use `LanceDB.from_texts(texts: List[str],embedding: Embeddings)` class method.
|
||||||
|
|
||||||
|
The exhaustive list of parameters for `LanceDB` vector store are :
|
||||||
|
- `connection`: (Optional) `lancedb.db.LanceDBConnection` connection object to use. If not provided, a new connection will be created.
|
||||||
|
- `embedding`: Langchain embedding model.
|
||||||
|
- `vector_key`: (Optional) Column name to use for vector's in the table. Defaults to `'vector'`.
|
||||||
|
- `id_key`: (Optional) Column name to use for id's in the table. Defaults to `'id'`.
|
||||||
|
- `text_key`: (Optional) Column name to use for text in the table. Defaults to `'text'`.
|
||||||
|
- `table_name`: (Optional) Name of your table in the database. Defaults to `'vectorstore'`.
|
||||||
|
- `api_key`: (Optional) API key to use for LanceDB cloud database. Defaults to `None`.
|
||||||
|
- `region`: (Optional) Region to use for LanceDB cloud database. Only for LanceDB Cloud, defaults to `None`.
|
||||||
|
- `mode`: (Optional) Mode to use for adding data to the table. Defaults to `'overwrite'`.
|
||||||
|
|
||||||
|
```python
|
||||||
|
db_url = "db://lang_test" # url of db you created
|
||||||
|
api_key = "xxxxx" # your API key
|
||||||
|
region="us-east-1-dev" # your selected region
|
||||||
|
|
||||||
|
vector_store = LanceDB(
|
||||||
|
uri=db_url,
|
||||||
|
api_key=api_key, #(dont include for local API)
|
||||||
|
region=region, #(dont include for local API)
|
||||||
|
embedding=embeddings,
|
||||||
|
table_name='langchain_test' #Optional
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Methods
|
||||||
|
To add texts and store respective embeddings automatically:
|
||||||
|
##### add_texts()
|
||||||
|
- `texts`: `Iterable` of strings to add to the vectorstore.
|
||||||
|
- `metadatas`: Optional `list[dict()]` of metadatas associated with the texts.
|
||||||
|
- `ids`: Optional `list` of ids to associate with the texts.
|
||||||
|
|
||||||
|
|
||||||
|
```python
|
||||||
|
vector_store.add_texts(texts = ['test_123'], metadatas =[{'source' :'wiki'}])
|
||||||
|
|
||||||
|
#Additionaly, to explore the table you can load it into a df or save it in a csv file:
|
||||||
|
|
||||||
|
tbl = vector_store.get_table()
|
||||||
|
print("tbl:", tbl)
|
||||||
|
pd_df = tbl.to_pandas()
|
||||||
|
pd_df.to_csv("docsearch.csv", index=False)
|
||||||
|
|
||||||
|
# you can also create a new vector store object using an older connection object:
|
||||||
|
vector_store = LanceDB(connection=tbl, embedding=embeddings)
|
||||||
|
```
|
||||||
|
For index creation make sure your table has enough data in it. An ANN index is ususally not needed for datasets ~100K vectors. For large-scale (>1M) or higher dimension vectors, it is beneficial to create an ANN index.
|
||||||
|
##### create_index()
|
||||||
|
- `col_name`: `Optional[str] = None`
|
||||||
|
- `vector_col`: `Optional[str] = None`
|
||||||
|
- `num_partitions`: `Optional[int] = 256`
|
||||||
|
- `num_sub_vectors`: `Optional[int] = 96`
|
||||||
|
- `index_cache_size`: `Optional[int] = None`
|
||||||
|
|
||||||
|
```python
|
||||||
|
# for creating vector index
|
||||||
|
vector_store.create_index(vector_col='vector', metric = 'cosine')
|
||||||
|
|
||||||
|
# for creating scalar index(for non-vector columns)
|
||||||
|
vector_store.create_index(col_name='text')
|
||||||
|
|
||||||
|
```
|
||||||
@@ -236,7 +236,10 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"def embed_func(c):\n",
|
"def embed_func(c):\n",
|
||||||
" rs = client.embeddings.create(input=c, model=\"text-embedding-ada-002\")\n",
|
" rs = client.embeddings.create(input=c, model=\"text-embedding-ada-002\")\n",
|
||||||
" return [rs.data[0].embedding]"
|
" return [\n",
|
||||||
|
" data.embedding\n",
|
||||||
|
" for data in rs.data\n",
|
||||||
|
" ]"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ excluded_globs = [
|
|||||||
"../src/embedding.md",
|
"../src/embedding.md",
|
||||||
"../src/examples/*.md",
|
"../src/examples/*.md",
|
||||||
"../src/integrations/voxel51.md",
|
"../src/integrations/voxel51.md",
|
||||||
|
"../src/integrations/langchain.md",
|
||||||
"../src/guides/tables.md",
|
"../src/guides/tables.md",
|
||||||
"../src/python/duckdb.md",
|
"../src/python/duckdb.md",
|
||||||
"../src/embeddings/*.md",
|
"../src/embeddings/*.md",
|
||||||
|
|||||||
74
node/package-lock.json
generated
74
node/package-lock.json
generated
@@ -1,12 +1,12 @@
|
|||||||
{
|
{
|
||||||
"name": "vectordb",
|
"name": "vectordb",
|
||||||
"version": "0.4.17",
|
"version": "0.4.20",
|
||||||
"lockfileVersion": 3,
|
"lockfileVersion": 3,
|
||||||
"requires": true,
|
"requires": true,
|
||||||
"packages": {
|
"packages": {
|
||||||
"": {
|
"": {
|
||||||
"name": "vectordb",
|
"name": "vectordb",
|
||||||
"version": "0.4.17",
|
"version": "0.4.20",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"x64",
|
"x64",
|
||||||
"arm64"
|
"arm64"
|
||||||
@@ -52,11 +52,11 @@
|
|||||||
"uuid": "^9.0.0"
|
"uuid": "^9.0.0"
|
||||||
},
|
},
|
||||||
"optionalDependencies": {
|
"optionalDependencies": {
|
||||||
"@lancedb/vectordb-darwin-arm64": "0.4.17",
|
"@lancedb/vectordb-darwin-arm64": "0.4.20",
|
||||||
"@lancedb/vectordb-darwin-x64": "0.4.17",
|
"@lancedb/vectordb-darwin-x64": "0.4.20",
|
||||||
"@lancedb/vectordb-linux-arm64-gnu": "0.4.17",
|
"@lancedb/vectordb-linux-arm64-gnu": "0.4.20",
|
||||||
"@lancedb/vectordb-linux-x64-gnu": "0.4.17",
|
"@lancedb/vectordb-linux-x64-gnu": "0.4.20",
|
||||||
"@lancedb/vectordb-win32-x64-msvc": "0.4.17"
|
"@lancedb/vectordb-win32-x64-msvc": "0.4.20"
|
||||||
},
|
},
|
||||||
"peerDependencies": {
|
"peerDependencies": {
|
||||||
"@apache-arrow/ts": "^14.0.2",
|
"@apache-arrow/ts": "^14.0.2",
|
||||||
@@ -333,6 +333,66 @@
|
|||||||
"@jridgewell/sourcemap-codec": "^1.4.10"
|
"@jridgewell/sourcemap-codec": "^1.4.10"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"node_modules/@lancedb/vectordb-darwin-arm64": {
|
||||||
|
"version": "0.4.20",
|
||||||
|
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-arm64/-/vectordb-darwin-arm64-0.4.20.tgz",
|
||||||
|
"integrity": "sha512-ffP2K4sA5mQTgePyARw1y8dPN996FmpvyAYoWO+TSItaXlhcXvc+KVa5udNMCZMDYeEnEv2Xpj6k4PwW3oBz+A==",
|
||||||
|
"cpu": [
|
||||||
|
"arm64"
|
||||||
|
],
|
||||||
|
"optional": true,
|
||||||
|
"os": [
|
||||||
|
"darwin"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"node_modules/@lancedb/vectordb-darwin-x64": {
|
||||||
|
"version": "0.4.20",
|
||||||
|
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-x64/-/vectordb-darwin-x64-0.4.20.tgz",
|
||||||
|
"integrity": "sha512-GSYsXE20RIehDu30FjREhJdEzhnwOTV7ZsrSXagStzLY1gr7pyd7sfqxmmUtdD09di7LnQoiM71AOpPTa01YwQ==",
|
||||||
|
"cpu": [
|
||||||
|
"x64"
|
||||||
|
],
|
||||||
|
"optional": true,
|
||||||
|
"os": [
|
||||||
|
"darwin"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"node_modules/@lancedb/vectordb-linux-arm64-gnu": {
|
||||||
|
"version": "0.4.20",
|
||||||
|
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-arm64-gnu/-/vectordb-linux-arm64-gnu-0.4.20.tgz",
|
||||||
|
"integrity": "sha512-FpNOjOsz3nJVm6EBGyNgbOW2aFhsWZ/igeY45Z8hbZaaK2YBwrg/DASoNlUzgv6IR8cUaGJ2irNVJfsKR2cG6g==",
|
||||||
|
"cpu": [
|
||||||
|
"arm64"
|
||||||
|
],
|
||||||
|
"optional": true,
|
||||||
|
"os": [
|
||||||
|
"linux"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"node_modules/@lancedb/vectordb-linux-x64-gnu": {
|
||||||
|
"version": "0.4.20",
|
||||||
|
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-x64-gnu/-/vectordb-linux-x64-gnu-0.4.20.tgz",
|
||||||
|
"integrity": "sha512-pOqWjrRZQSrLTlQPkjidRii7NZDw8Xu9pN6ouVu2JAK8n81FXaPtFCyAI+Y3v9GpnYDN0rvD4eQ36aHAVPsa2g==",
|
||||||
|
"cpu": [
|
||||||
|
"x64"
|
||||||
|
],
|
||||||
|
"optional": true,
|
||||||
|
"os": [
|
||||||
|
"linux"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"node_modules/@lancedb/vectordb-win32-x64-msvc": {
|
||||||
|
"version": "0.4.20",
|
||||||
|
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-win32-x64-msvc/-/vectordb-win32-x64-msvc-0.4.20.tgz",
|
||||||
|
"integrity": "sha512-5J5SsYSJ7jRCmU/sgwVHdrGz43B/7R2T9OEoFTKyVAtqTZdu75rkytXyn9SyEayXVhlUOaw76N0ASm0hAoDS/A==",
|
||||||
|
"cpu": [
|
||||||
|
"x64"
|
||||||
|
],
|
||||||
|
"optional": true,
|
||||||
|
"os": [
|
||||||
|
"win32"
|
||||||
|
]
|
||||||
|
},
|
||||||
"node_modules/@neon-rs/cli": {
|
"node_modules/@neon-rs/cli": {
|
||||||
"version": "0.0.160",
|
"version": "0.0.160",
|
||||||
"resolved": "https://registry.npmjs.org/@neon-rs/cli/-/cli-0.0.160.tgz",
|
"resolved": "https://registry.npmjs.org/@neon-rs/cli/-/cli-0.0.160.tgz",
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "vectordb",
|
"name": "vectordb",
|
||||||
"version": "0.4.17",
|
"version": "0.4.20",
|
||||||
"description": " Serverless, low-latency vector database for AI applications",
|
"description": " Serverless, low-latency vector database for AI applications",
|
||||||
"main": "dist/index.js",
|
"main": "dist/index.js",
|
||||||
"types": "dist/index.d.ts",
|
"types": "dist/index.d.ts",
|
||||||
@@ -88,10 +88,10 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"optionalDependencies": {
|
"optionalDependencies": {
|
||||||
"@lancedb/vectordb-darwin-arm64": "0.4.17",
|
"@lancedb/vectordb-darwin-arm64": "0.4.20",
|
||||||
"@lancedb/vectordb-darwin-x64": "0.4.17",
|
"@lancedb/vectordb-darwin-x64": "0.4.20",
|
||||||
"@lancedb/vectordb-linux-arm64-gnu": "0.4.17",
|
"@lancedb/vectordb-linux-arm64-gnu": "0.4.20",
|
||||||
"@lancedb/vectordb-linux-x64-gnu": "0.4.17",
|
"@lancedb/vectordb-linux-x64-gnu": "0.4.20",
|
||||||
"@lancedb/vectordb-win32-x64-msvc": "0.4.17"
|
"@lancedb/vectordb-win32-x64-msvc": "0.4.20"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -27,23 +27,23 @@ import {
|
|||||||
RecordBatch,
|
RecordBatch,
|
||||||
makeData,
|
makeData,
|
||||||
Struct,
|
Struct,
|
||||||
Float,
|
type Float,
|
||||||
DataType,
|
DataType,
|
||||||
Binary,
|
Binary,
|
||||||
Float32
|
Float32
|
||||||
} from 'apache-arrow'
|
} from "apache-arrow";
|
||||||
import { type EmbeddingFunction } from './index'
|
import { type EmbeddingFunction } from "./index";
|
||||||
import { sanitizeSchema } from './sanitize'
|
import { sanitizeSchema } from "./sanitize";
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Options to control how a column should be converted to a vector array
|
* Options to control how a column should be converted to a vector array
|
||||||
*/
|
*/
|
||||||
export class VectorColumnOptions {
|
export class VectorColumnOptions {
|
||||||
/** Vector column type. */
|
/** Vector column type. */
|
||||||
type: Float = new Float32()
|
type: Float = new Float32();
|
||||||
|
|
||||||
constructor(values?: Partial<VectorColumnOptions>) {
|
constructor(values?: Partial<VectorColumnOptions>) {
|
||||||
Object.assign(this, values)
|
Object.assign(this, values);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -60,7 +60,7 @@ export class MakeArrowTableOptions {
|
|||||||
* The schema must be specified if there are no records (e.g. to make
|
* The schema must be specified if there are no records (e.g. to make
|
||||||
* an empty table)
|
* an empty table)
|
||||||
*/
|
*/
|
||||||
schema?: Schema
|
schema?: Schema;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Mapping from vector column name to expected type
|
* Mapping from vector column name to expected type
|
||||||
@@ -80,7 +80,9 @@ export class MakeArrowTableOptions {
|
|||||||
*/
|
*/
|
||||||
vectorColumns: Record<string, VectorColumnOptions> = {
|
vectorColumns: Record<string, VectorColumnOptions> = {
|
||||||
vector: new VectorColumnOptions()
|
vector: new VectorColumnOptions()
|
||||||
}
|
};
|
||||||
|
|
||||||
|
embeddings?: EmbeddingFunction<any>;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* If true then string columns will be encoded with dictionary encoding
|
* If true then string columns will be encoded with dictionary encoding
|
||||||
@@ -91,10 +93,10 @@ export class MakeArrowTableOptions {
|
|||||||
*
|
*
|
||||||
* If `schema` is provided then this property is ignored.
|
* If `schema` is provided then this property is ignored.
|
||||||
*/
|
*/
|
||||||
dictionaryEncodeStrings: boolean = false
|
dictionaryEncodeStrings: boolean = false;
|
||||||
|
|
||||||
constructor(values?: Partial<MakeArrowTableOptions>) {
|
constructor(values?: Partial<MakeArrowTableOptions>) {
|
||||||
Object.assign(this, values)
|
Object.assign(this, values);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -197,55 +199,64 @@ export function makeArrowTable (
|
|||||||
data: Array<Record<string, any>>,
|
data: Array<Record<string, any>>,
|
||||||
options?: Partial<MakeArrowTableOptions>
|
options?: Partial<MakeArrowTableOptions>
|
||||||
): ArrowTable {
|
): ArrowTable {
|
||||||
if (data.length === 0 && (options?.schema === undefined || options?.schema === null)) {
|
if (
|
||||||
throw new Error('At least one record or a schema needs to be provided')
|
data.length === 0 &&
|
||||||
|
(options?.schema === undefined || options?.schema === null)
|
||||||
|
) {
|
||||||
|
throw new Error("At least one record or a schema needs to be provided");
|
||||||
}
|
}
|
||||||
|
|
||||||
const opt = new MakeArrowTableOptions(options !== undefined ? options : {})
|
const opt = new MakeArrowTableOptions(options !== undefined ? options : {});
|
||||||
if (opt.schema !== undefined && opt.schema !== null) {
|
if (opt.schema !== undefined && opt.schema !== null) {
|
||||||
opt.schema = sanitizeSchema(opt.schema)
|
opt.schema = sanitizeSchema(opt.schema);
|
||||||
|
opt.schema = validateSchemaEmbeddings(opt.schema, data, opt.embeddings);
|
||||||
}
|
}
|
||||||
const columns: Record<string, Vector> = {}
|
|
||||||
|
const columns: Record<string, Vector> = {};
|
||||||
// TODO: sample dataset to find missing columns
|
// TODO: sample dataset to find missing columns
|
||||||
// Prefer the field ordering of the schema, if present
|
// Prefer the field ordering of the schema, if present
|
||||||
const columnNames = ((opt.schema) != null) ? (opt.schema.names as string[]) : Object.keys(data[0])
|
const columnNames =
|
||||||
|
opt.schema != null ? (opt.schema.names as string[]) : Object.keys(data[0]);
|
||||||
for (const colName of columnNames) {
|
for (const colName of columnNames) {
|
||||||
if (data.length !== 0 && !Object.prototype.hasOwnProperty.call(data[0], colName)) {
|
if (
|
||||||
|
data.length !== 0 &&
|
||||||
|
!Object.prototype.hasOwnProperty.call(data[0], colName)
|
||||||
|
) {
|
||||||
// The field is present in the schema, but not in the data, skip it
|
// The field is present in the schema, but not in the data, skip it
|
||||||
continue
|
continue;
|
||||||
}
|
}
|
||||||
// Extract a single column from the records (transpose from row-major to col-major)
|
// Extract a single column from the records (transpose from row-major to col-major)
|
||||||
let values = data.map((datum) => datum[colName])
|
let values = data.map((datum) => datum[colName]);
|
||||||
|
|
||||||
// By default (type === undefined) arrow will infer the type from the JS type
|
// By default (type === undefined) arrow will infer the type from the JS type
|
||||||
let type
|
let type;
|
||||||
if (opt.schema !== undefined) {
|
if (opt.schema !== undefined) {
|
||||||
// If there is a schema provided, then use that for the type instead
|
// If there is a schema provided, then use that for the type instead
|
||||||
type = opt.schema?.fields.filter((f) => f.name === colName)[0]?.type
|
type = opt.schema?.fields.filter((f) => f.name === colName)[0]?.type;
|
||||||
if (DataType.isInt(type) && type.bitWidth === 64) {
|
if (DataType.isInt(type) && type.bitWidth === 64) {
|
||||||
// wrap in BigInt to avoid bug: https://github.com/apache/arrow/issues/40051
|
// wrap in BigInt to avoid bug: https://github.com/apache/arrow/issues/40051
|
||||||
values = values.map((v) => {
|
values = values.map((v) => {
|
||||||
if (v === null) {
|
if (v === null) {
|
||||||
return v
|
return v;
|
||||||
}
|
}
|
||||||
return BigInt(v)
|
return BigInt(v);
|
||||||
})
|
});
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Otherwise, check to see if this column is one of the vector columns
|
// Otherwise, check to see if this column is one of the vector columns
|
||||||
// defined by opt.vectorColumns and, if so, use the fixed size list type
|
// defined by opt.vectorColumns and, if so, use the fixed size list type
|
||||||
const vectorColumnOptions = opt.vectorColumns[colName]
|
const vectorColumnOptions = opt.vectorColumns[colName];
|
||||||
if (vectorColumnOptions !== undefined) {
|
if (vectorColumnOptions !== undefined) {
|
||||||
type = newVectorType(values[0].length, vectorColumnOptions.type)
|
type = newVectorType(values[0].length, vectorColumnOptions.type);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
// Convert an Array of JS values to an arrow vector
|
// Convert an Array of JS values to an arrow vector
|
||||||
columns[colName] = makeVector(values, type, opt.dictionaryEncodeStrings)
|
columns[colName] = makeVector(values, type, opt.dictionaryEncodeStrings);
|
||||||
} catch (error: unknown) {
|
} catch (error: unknown) {
|
||||||
// eslint-disable-next-line @typescript-eslint/restrict-template-expressions
|
// eslint-disable-next-line @typescript-eslint/restrict-template-expressions
|
||||||
throw Error(`Could not convert column "${colName}" to Arrow: ${error}`)
|
throw Error(`Could not convert column "${colName}" to Arrow: ${error}`);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -260,12 +271,14 @@ export function makeArrowTable (
|
|||||||
// To work around this we first create a table with the wrong schema and
|
// To work around this we first create a table with the wrong schema and
|
||||||
// then patch the schema of the batches so we can use
|
// then patch the schema of the batches so we can use
|
||||||
// `new ArrowTable(schema, batches)` which does not do any schema inference
|
// `new ArrowTable(schema, batches)` which does not do any schema inference
|
||||||
const firstTable = new ArrowTable(columns)
|
const firstTable = new ArrowTable(columns);
|
||||||
|
const batchesFixed = firstTable.batches.map(
|
||||||
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
|
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
|
||||||
const batchesFixed = firstTable.batches.map(batch => new RecordBatch(opt.schema!, batch.data))
|
(batch) => new RecordBatch(opt.schema!, batch.data)
|
||||||
return new ArrowTable(opt.schema, batchesFixed)
|
);
|
||||||
|
return new ArrowTable(opt.schema, batchesFixed);
|
||||||
} else {
|
} else {
|
||||||
return new ArrowTable(columns)
|
return new ArrowTable(columns);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -273,84 +286,101 @@ export function makeArrowTable (
|
|||||||
* Create an empty Arrow table with the provided schema
|
* Create an empty Arrow table with the provided schema
|
||||||
*/
|
*/
|
||||||
export function makeEmptyTable(schema: Schema): ArrowTable {
|
export function makeEmptyTable(schema: Schema): ArrowTable {
|
||||||
return makeArrowTable([], { schema })
|
return makeArrowTable([], { schema });
|
||||||
}
|
}
|
||||||
|
|
||||||
// Helper function to convert Array<Array<any>> to a variable sized list array
|
// Helper function to convert Array<Array<any>> to a variable sized list array
|
||||||
function makeListVector(lists: any[][]): Vector<any> {
|
function makeListVector(lists: any[][]): Vector<any> {
|
||||||
if (lists.length === 0 || lists[0].length === 0) {
|
if (lists.length === 0 || lists[0].length === 0) {
|
||||||
throw Error('Cannot infer list vector from empty array or empty list')
|
throw Error("Cannot infer list vector from empty array or empty list");
|
||||||
}
|
}
|
||||||
const sampleList = lists[0]
|
const sampleList = lists[0];
|
||||||
let inferredType
|
let inferredType;
|
||||||
try {
|
try {
|
||||||
const sampleVector = makeVector(sampleList)
|
const sampleVector = makeVector(sampleList);
|
||||||
inferredType = sampleVector.type
|
inferredType = sampleVector.type;
|
||||||
} catch (error: unknown) {
|
} catch (error: unknown) {
|
||||||
// eslint-disable-next-line @typescript-eslint/restrict-template-expressions
|
// eslint-disable-next-line @typescript-eslint/restrict-template-expressions
|
||||||
throw Error(`Cannot infer list vector. Cannot infer inner type: ${error}`)
|
throw Error(`Cannot infer list vector. Cannot infer inner type: ${error}`);
|
||||||
}
|
}
|
||||||
|
|
||||||
const listBuilder = makeBuilder({
|
const listBuilder = makeBuilder({
|
||||||
type: new List(new Field('item', inferredType, true))
|
type: new List(new Field("item", inferredType, true))
|
||||||
})
|
});
|
||||||
for (const list of lists) {
|
for (const list of lists) {
|
||||||
listBuilder.append(list)
|
listBuilder.append(list);
|
||||||
}
|
}
|
||||||
return listBuilder.finish().toVector()
|
return listBuilder.finish().toVector();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Helper function to convert an Array of JS values to an Arrow Vector
|
// Helper function to convert an Array of JS values to an Arrow Vector
|
||||||
function makeVector (values: any[], type?: DataType, stringAsDictionary?: boolean): Vector<any> {
|
function makeVector(
|
||||||
|
values: any[],
|
||||||
|
type?: DataType,
|
||||||
|
stringAsDictionary?: boolean
|
||||||
|
): Vector<any> {
|
||||||
if (type !== undefined) {
|
if (type !== undefined) {
|
||||||
// No need for inference, let Arrow create it
|
// No need for inference, let Arrow create it
|
||||||
return vectorFromArray(values, type)
|
return vectorFromArray(values, type);
|
||||||
}
|
}
|
||||||
if (values.length === 0) {
|
if (values.length === 0) {
|
||||||
throw Error('makeVector requires at least one value or the type must be specfied')
|
throw Error(
|
||||||
|
"makeVector requires at least one value or the type must be specfied"
|
||||||
|
);
|
||||||
}
|
}
|
||||||
const sampleValue = values.find(val => val !== null && val !== undefined)
|
const sampleValue = values.find((val) => val !== null && val !== undefined);
|
||||||
if (sampleValue === undefined) {
|
if (sampleValue === undefined) {
|
||||||
throw Error('makeVector cannot infer the type if all values are null or undefined')
|
throw Error(
|
||||||
|
"makeVector cannot infer the type if all values are null or undefined"
|
||||||
|
);
|
||||||
}
|
}
|
||||||
if (Array.isArray(sampleValue)) {
|
if (Array.isArray(sampleValue)) {
|
||||||
// Default Arrow inference doesn't handle list types
|
// Default Arrow inference doesn't handle list types
|
||||||
return makeListVector(values)
|
return makeListVector(values);
|
||||||
} else if (Buffer.isBuffer(sampleValue)) {
|
} else if (Buffer.isBuffer(sampleValue)) {
|
||||||
// Default Arrow inference doesn't handle Buffer
|
// Default Arrow inference doesn't handle Buffer
|
||||||
return vectorFromArray(values, new Binary())
|
return vectorFromArray(values, new Binary());
|
||||||
} else if (!(stringAsDictionary ?? false) && (typeof sampleValue === 'string' || sampleValue instanceof String)) {
|
} else if (
|
||||||
|
!(stringAsDictionary ?? false) &&
|
||||||
|
(typeof sampleValue === "string" || sampleValue instanceof String)
|
||||||
|
) {
|
||||||
// If the type is string then don't use Arrow's default inference unless dictionaries are requested
|
// If the type is string then don't use Arrow's default inference unless dictionaries are requested
|
||||||
// because it will always use dictionary encoding for strings
|
// because it will always use dictionary encoding for strings
|
||||||
return vectorFromArray(values, new Utf8())
|
return vectorFromArray(values, new Utf8());
|
||||||
} else {
|
} else {
|
||||||
// Convert a JS array of values to an arrow vector
|
// Convert a JS array of values to an arrow vector
|
||||||
return vectorFromArray(values)
|
return vectorFromArray(values);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async function applyEmbeddings<T> (table: ArrowTable, embeddings?: EmbeddingFunction<T>, schema?: Schema): Promise<ArrowTable> {
|
async function applyEmbeddings<T>(
|
||||||
|
table: ArrowTable,
|
||||||
|
embeddings?: EmbeddingFunction<T>,
|
||||||
|
schema?: Schema
|
||||||
|
): Promise<ArrowTable> {
|
||||||
if (embeddings == null) {
|
if (embeddings == null) {
|
||||||
return table
|
return table;
|
||||||
}
|
}
|
||||||
if (schema !== undefined && schema !== null) {
|
if (schema !== undefined && schema !== null) {
|
||||||
schema = sanitizeSchema(schema)
|
schema = sanitizeSchema(schema);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Convert from ArrowTable to Record<String, Vector>
|
// Convert from ArrowTable to Record<String, Vector>
|
||||||
const colEntries = [...Array(table.numCols).keys()].map((_, idx) => {
|
const colEntries = [...Array(table.numCols).keys()].map((_, idx) => {
|
||||||
const name = table.schema.fields[idx].name
|
const name = table.schema.fields[idx].name;
|
||||||
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
|
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
|
||||||
const vec = table.getChildAt(idx)!
|
const vec = table.getChildAt(idx)!;
|
||||||
return [name, vec]
|
return [name, vec];
|
||||||
})
|
});
|
||||||
const newColumns = Object.fromEntries(colEntries)
|
const newColumns = Object.fromEntries(colEntries);
|
||||||
|
|
||||||
const sourceColumn = newColumns[embeddings.sourceColumn]
|
const sourceColumn = newColumns[embeddings.sourceColumn];
|
||||||
const destColumn = embeddings.destColumn ?? 'vector'
|
const destColumn = embeddings.destColumn ?? "vector";
|
||||||
const innerDestType = embeddings.embeddingDataType ?? new Float32()
|
const innerDestType = embeddings.embeddingDataType ?? new Float32();
|
||||||
if (sourceColumn === undefined) {
|
if (sourceColumn === undefined) {
|
||||||
throw new Error(`Cannot apply embedding function because the source column '${embeddings.sourceColumn}' was not present in the data`)
|
throw new Error(
|
||||||
|
`Cannot apply embedding function because the source column '${embeddings.sourceColumn}' was not present in the data`
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (table.numRows === 0) {
|
if (table.numRows === 0) {
|
||||||
@@ -358,45 +388,60 @@ async function applyEmbeddings<T> (table: ArrowTable, embeddings?: EmbeddingFunc
|
|||||||
// We have an empty table and it already has the embedding column so no work needs to be done
|
// We have an empty table and it already has the embedding column so no work needs to be done
|
||||||
// Note: we don't return an error like we did below because this is a common occurrence. For example,
|
// Note: we don't return an error like we did below because this is a common occurrence. For example,
|
||||||
// if we call convertToTable with 0 records and a schema that includes the embedding
|
// if we call convertToTable with 0 records and a schema that includes the embedding
|
||||||
return table
|
return table;
|
||||||
}
|
}
|
||||||
if (embeddings.embeddingDimension !== undefined) {
|
if (embeddings.embeddingDimension !== undefined) {
|
||||||
const destType = newVectorType(embeddings.embeddingDimension, innerDestType)
|
const destType = newVectorType(
|
||||||
newColumns[destColumn] = makeVector([], destType)
|
embeddings.embeddingDimension,
|
||||||
|
innerDestType
|
||||||
|
);
|
||||||
|
newColumns[destColumn] = makeVector([], destType);
|
||||||
} else if (schema != null) {
|
} else if (schema != null) {
|
||||||
const destField = schema.fields.find(f => f.name === destColumn)
|
const destField = schema.fields.find((f) => f.name === destColumn);
|
||||||
if (destField != null) {
|
if (destField != null) {
|
||||||
newColumns[destColumn] = makeVector([], destField.type)
|
newColumns[destColumn] = makeVector([], destField.type);
|
||||||
} else {
|
} else {
|
||||||
throw new Error(`Attempt to apply embeddings to an empty table failed because schema was missing embedding column '${destColumn}'`)
|
throw new Error(
|
||||||
|
`Attempt to apply embeddings to an empty table failed because schema was missing embedding column '${destColumn}'`
|
||||||
|
);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
throw new Error('Attempt to apply embeddings to an empty table when the embeddings function does not specify `embeddingDimension`')
|
throw new Error(
|
||||||
|
"Attempt to apply embeddings to an empty table when the embeddings function does not specify `embeddingDimension`"
|
||||||
|
);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (Object.prototype.hasOwnProperty.call(newColumns, destColumn)) {
|
if (Object.prototype.hasOwnProperty.call(newColumns, destColumn)) {
|
||||||
throw new Error(`Attempt to apply embeddings to table failed because column ${destColumn} already existed`)
|
throw new Error(
|
||||||
|
`Attempt to apply embeddings to table failed because column ${destColumn} already existed`
|
||||||
|
);
|
||||||
}
|
}
|
||||||
if (table.batches.length > 1) {
|
if (table.batches.length > 1) {
|
||||||
throw new Error('Internal error: `makeArrowTable` unexpectedly created a table with more than one batch')
|
throw new Error(
|
||||||
|
"Internal error: `makeArrowTable` unexpectedly created a table with more than one batch"
|
||||||
|
);
|
||||||
}
|
}
|
||||||
const values = sourceColumn.toArray()
|
const values = sourceColumn.toArray();
|
||||||
const vectors = await embeddings.embed(values as T[])
|
const vectors = await embeddings.embed(values as T[]);
|
||||||
if (vectors.length !== values.length) {
|
if (vectors.length !== values.length) {
|
||||||
throw new Error('Embedding function did not return an embedding for each input element')
|
throw new Error(
|
||||||
|
"Embedding function did not return an embedding for each input element"
|
||||||
|
);
|
||||||
}
|
}
|
||||||
const destType = newVectorType(vectors[0].length, innerDestType)
|
const destType = newVectorType(vectors[0].length, innerDestType);
|
||||||
newColumns[destColumn] = makeVector(vectors, destType)
|
newColumns[destColumn] = makeVector(vectors, destType);
|
||||||
}
|
}
|
||||||
|
|
||||||
const newTable = new ArrowTable(newColumns)
|
const newTable = new ArrowTable(newColumns);
|
||||||
if (schema != null) {
|
if (schema != null) {
|
||||||
if (schema.fields.find(f => f.name === destColumn) === undefined) {
|
if (schema.fields.find((f) => f.name === destColumn) === undefined) {
|
||||||
throw new Error(`When using embedding functions and specifying a schema the schema should include the embedding column but the column ${destColumn} was missing`)
|
throw new Error(
|
||||||
|
`When using embedding functions and specifying a schema the schema should include the embedding column but the column ${destColumn} was missing`
|
||||||
|
);
|
||||||
}
|
}
|
||||||
return alignTable(newTable, schema)
|
return alignTable(newTable, schema);
|
||||||
}
|
}
|
||||||
return newTable
|
return newTable;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -422,16 +467,19 @@ export async function convertToTable<T> (
|
|||||||
embeddings?: EmbeddingFunction<T>,
|
embeddings?: EmbeddingFunction<T>,
|
||||||
makeTableOptions?: Partial<MakeArrowTableOptions>
|
makeTableOptions?: Partial<MakeArrowTableOptions>
|
||||||
): Promise<ArrowTable> {
|
): Promise<ArrowTable> {
|
||||||
const table = makeArrowTable(data, makeTableOptions)
|
const table = makeArrowTable(data, makeTableOptions);
|
||||||
return await applyEmbeddings(table, embeddings, makeTableOptions?.schema)
|
return await applyEmbeddings(table, embeddings, makeTableOptions?.schema);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Creates the Arrow Type for a Vector column with dimension `dim`
|
// Creates the Arrow Type for a Vector column with dimension `dim`
|
||||||
function newVectorType <T extends Float> (dim: number, innerType: T): FixedSizeList<T> {
|
function newVectorType<T extends Float>(
|
||||||
|
dim: number,
|
||||||
|
innerType: T
|
||||||
|
): FixedSizeList<T> {
|
||||||
// Somewhere we always default to have the elements nullable, so we need to set it to true
|
// Somewhere we always default to have the elements nullable, so we need to set it to true
|
||||||
// otherwise we often get schema mismatches because the stored data always has schema with nullable elements
|
// otherwise we often get schema mismatches because the stored data always has schema with nullable elements
|
||||||
const children = new Field<T>('item', innerType, true)
|
const children = new Field<T>("item", innerType, true);
|
||||||
return new FixedSizeList(dim, children)
|
return new FixedSizeList(dim, children);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -447,11 +495,11 @@ export async function fromRecordsToBuffer<T> (
|
|||||||
schema?: Schema
|
schema?: Schema
|
||||||
): Promise<Buffer> {
|
): Promise<Buffer> {
|
||||||
if (schema !== undefined && schema !== null) {
|
if (schema !== undefined && schema !== null) {
|
||||||
schema = sanitizeSchema(schema)
|
schema = sanitizeSchema(schema);
|
||||||
}
|
}
|
||||||
const table = await convertToTable(data, embeddings, { schema })
|
const table = await convertToTable(data, embeddings, { schema, embeddings });
|
||||||
const writer = RecordBatchFileWriter.writeAll(table)
|
const writer = RecordBatchFileWriter.writeAll(table);
|
||||||
return Buffer.from(await writer.toUint8Array())
|
return Buffer.from(await writer.toUint8Array());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -467,11 +515,11 @@ export async function fromRecordsToStreamBuffer<T> (
|
|||||||
schema?: Schema
|
schema?: Schema
|
||||||
): Promise<Buffer> {
|
): Promise<Buffer> {
|
||||||
if (schema !== null && schema !== undefined) {
|
if (schema !== null && schema !== undefined) {
|
||||||
schema = sanitizeSchema(schema)
|
schema = sanitizeSchema(schema);
|
||||||
}
|
}
|
||||||
const table = await convertToTable(data, embeddings, { schema })
|
const table = await convertToTable(data, embeddings, { schema });
|
||||||
const writer = RecordBatchStreamWriter.writeAll(table)
|
const writer = RecordBatchStreamWriter.writeAll(table);
|
||||||
return Buffer.from(await writer.toUint8Array())
|
return Buffer.from(await writer.toUint8Array());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -488,11 +536,11 @@ export async function fromTableToBuffer<T> (
|
|||||||
schema?: Schema
|
schema?: Schema
|
||||||
): Promise<Buffer> {
|
): Promise<Buffer> {
|
||||||
if (schema !== null && schema !== undefined) {
|
if (schema !== null && schema !== undefined) {
|
||||||
schema = sanitizeSchema(schema)
|
schema = sanitizeSchema(schema);
|
||||||
}
|
}
|
||||||
const tableWithEmbeddings = await applyEmbeddings(table, embeddings, schema)
|
const tableWithEmbeddings = await applyEmbeddings(table, embeddings, schema);
|
||||||
const writer = RecordBatchFileWriter.writeAll(tableWithEmbeddings)
|
const writer = RecordBatchFileWriter.writeAll(tableWithEmbeddings);
|
||||||
return Buffer.from(await writer.toUint8Array())
|
return Buffer.from(await writer.toUint8Array());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -509,43 +557,81 @@ export async function fromTableToStreamBuffer<T> (
|
|||||||
schema?: Schema
|
schema?: Schema
|
||||||
): Promise<Buffer> {
|
): Promise<Buffer> {
|
||||||
if (schema !== null && schema !== undefined) {
|
if (schema !== null && schema !== undefined) {
|
||||||
schema = sanitizeSchema(schema)
|
schema = sanitizeSchema(schema);
|
||||||
}
|
}
|
||||||
const tableWithEmbeddings = await applyEmbeddings(table, embeddings, schema)
|
const tableWithEmbeddings = await applyEmbeddings(table, embeddings, schema);
|
||||||
const writer = RecordBatchStreamWriter.writeAll(tableWithEmbeddings)
|
const writer = RecordBatchStreamWriter.writeAll(tableWithEmbeddings);
|
||||||
return Buffer.from(await writer.toUint8Array())
|
return Buffer.from(await writer.toUint8Array());
|
||||||
}
|
}
|
||||||
|
|
||||||
function alignBatch(batch: RecordBatch, schema: Schema): RecordBatch {
|
function alignBatch(batch: RecordBatch, schema: Schema): RecordBatch {
|
||||||
const alignedChildren = []
|
const alignedChildren = [];
|
||||||
for (const field of schema.fields) {
|
for (const field of schema.fields) {
|
||||||
const indexInBatch = batch.schema.fields?.findIndex(
|
const indexInBatch = batch.schema.fields?.findIndex(
|
||||||
(f) => f.name === field.name
|
(f) => f.name === field.name
|
||||||
)
|
);
|
||||||
if (indexInBatch < 0) {
|
if (indexInBatch < 0) {
|
||||||
throw new Error(
|
throw new Error(
|
||||||
`The column ${field.name} was not found in the Arrow Table`
|
`The column ${field.name} was not found in the Arrow Table`
|
||||||
)
|
);
|
||||||
}
|
}
|
||||||
alignedChildren.push(batch.data.children[indexInBatch])
|
alignedChildren.push(batch.data.children[indexInBatch]);
|
||||||
}
|
}
|
||||||
const newData = makeData({
|
const newData = makeData({
|
||||||
type: new Struct(schema.fields),
|
type: new Struct(schema.fields),
|
||||||
length: batch.numRows,
|
length: batch.numRows,
|
||||||
nullCount: batch.nullCount,
|
nullCount: batch.nullCount,
|
||||||
children: alignedChildren
|
children: alignedChildren
|
||||||
})
|
});
|
||||||
return new RecordBatch(schema, newData)
|
return new RecordBatch(schema, newData);
|
||||||
}
|
}
|
||||||
|
|
||||||
function alignTable(table: ArrowTable, schema: Schema): ArrowTable {
|
function alignTable(table: ArrowTable, schema: Schema): ArrowTable {
|
||||||
const alignedBatches = table.batches.map((batch) =>
|
const alignedBatches = table.batches.map((batch) =>
|
||||||
alignBatch(batch, schema)
|
alignBatch(batch, schema)
|
||||||
)
|
);
|
||||||
return new ArrowTable(schema, alignedBatches)
|
return new ArrowTable(schema, alignedBatches);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Creates an empty Arrow Table
|
// Creates an empty Arrow Table
|
||||||
export function createEmptyTable(schema: Schema): ArrowTable {
|
export function createEmptyTable(schema: Schema): ArrowTable {
|
||||||
return new ArrowTable(sanitizeSchema(schema))
|
return new ArrowTable(sanitizeSchema(schema));
|
||||||
|
}
|
||||||
|
|
||||||
|
function validateSchemaEmbeddings(
|
||||||
|
schema: Schema<any>,
|
||||||
|
data: Array<Record<string, unknown>>,
|
||||||
|
embeddings: EmbeddingFunction<any> | undefined
|
||||||
|
) {
|
||||||
|
const fields = [];
|
||||||
|
const missingEmbeddingFields = [];
|
||||||
|
|
||||||
|
// First we check if the field is a `FixedSizeList`
|
||||||
|
// Then we check if the data contains the field
|
||||||
|
// if it does not, we add it to the list of missing embedding fields
|
||||||
|
// Finally, we check if those missing embedding fields are `this._embeddings`
|
||||||
|
// if they are not, we throw an error
|
||||||
|
for (const field of schema.fields) {
|
||||||
|
if (field.type instanceof FixedSizeList) {
|
||||||
|
if (data.length !== 0 && data?.[0]?.[field.name] === undefined) {
|
||||||
|
missingEmbeddingFields.push(field);
|
||||||
|
} else {
|
||||||
|
fields.push(field);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
fields.push(field);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (missingEmbeddingFields.length > 0 && embeddings === undefined) {
|
||||||
|
console.log({ missingEmbeddingFields, embeddings });
|
||||||
|
|
||||||
|
throw new Error(
|
||||||
|
`Table has embeddings: "${missingEmbeddingFields
|
||||||
|
.map((f) => f.name)
|
||||||
|
.join(",")}", but no embedding function was provided`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
return new Schema(fields);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,19 +12,20 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
import { type Schema, Table as ArrowTable, tableFromIPC } from 'apache-arrow'
|
import { type Schema, Table as ArrowTable, tableFromIPC } from "apache-arrow";
|
||||||
import {
|
import {
|
||||||
createEmptyTable,
|
createEmptyTable,
|
||||||
fromRecordsToBuffer,
|
fromRecordsToBuffer,
|
||||||
fromTableToBuffer,
|
fromTableToBuffer,
|
||||||
makeArrowTable
|
makeArrowTable
|
||||||
} from './arrow'
|
} from "./arrow";
|
||||||
import type { EmbeddingFunction } from './embedding/embedding_function'
|
import type { EmbeddingFunction } from "./embedding/embedding_function";
|
||||||
import { RemoteConnection } from './remote'
|
import { RemoteConnection } from "./remote";
|
||||||
import { Query } from './query'
|
import { Query } from "./query";
|
||||||
import { isEmbeddingFunction } from './embedding/embedding_function'
|
import { isEmbeddingFunction } from "./embedding/embedding_function";
|
||||||
import { type Literal, toSQL } from './util'
|
import { type Literal, toSQL } from "./util";
|
||||||
import { type HttpMiddleware } from './middleware'
|
|
||||||
|
import { type HttpMiddleware } from "./middleware";
|
||||||
|
|
||||||
const {
|
const {
|
||||||
databaseNew,
|
databaseNew,
|
||||||
@@ -48,14 +49,18 @@ const {
|
|||||||
tableAlterColumns,
|
tableAlterColumns,
|
||||||
tableDropColumns
|
tableDropColumns
|
||||||
// eslint-disable-next-line @typescript-eslint/no-var-requires
|
// eslint-disable-next-line @typescript-eslint/no-var-requires
|
||||||
} = require('../native.js')
|
} = require("../native.js");
|
||||||
|
|
||||||
export { Query }
|
export { Query };
|
||||||
export type { EmbeddingFunction }
|
export type { EmbeddingFunction };
|
||||||
export { OpenAIEmbeddingFunction } from './embedding/openai'
|
export { OpenAIEmbeddingFunction } from "./embedding/openai";
|
||||||
export { convertToTable, makeArrowTable, type MakeArrowTableOptions } from './arrow'
|
export {
|
||||||
|
convertToTable,
|
||||||
|
makeArrowTable,
|
||||||
|
type MakeArrowTableOptions
|
||||||
|
} from "./arrow";
|
||||||
|
|
||||||
const defaultAwsRegion = 'us-west-2'
|
const defaultAwsRegion = "us-west-2";
|
||||||
|
|
||||||
export interface AwsCredentials {
|
export interface AwsCredentials {
|
||||||
accessKeyId: string
|
accessKeyId: string
|
||||||
@@ -129,18 +134,18 @@ export interface ConnectionOptions {
|
|||||||
}
|
}
|
||||||
|
|
||||||
function getAwsArgs(opts: ConnectionOptions): any[] {
|
function getAwsArgs(opts: ConnectionOptions): any[] {
|
||||||
const callArgs: any[] = []
|
const callArgs: any[] = [];
|
||||||
const awsCredentials = opts.awsCredentials
|
const awsCredentials = opts.awsCredentials;
|
||||||
if (awsCredentials !== undefined) {
|
if (awsCredentials !== undefined) {
|
||||||
callArgs.push(awsCredentials.accessKeyId)
|
callArgs.push(awsCredentials.accessKeyId);
|
||||||
callArgs.push(awsCredentials.secretKey)
|
callArgs.push(awsCredentials.secretKey);
|
||||||
callArgs.push(awsCredentials.sessionToken)
|
callArgs.push(awsCredentials.sessionToken);
|
||||||
} else {
|
} else {
|
||||||
callArgs.fill(undefined, 0, 3)
|
callArgs.fill(undefined, 0, 3);
|
||||||
}
|
}
|
||||||
|
|
||||||
callArgs.push(opts.awsRegion)
|
callArgs.push(opts.awsRegion);
|
||||||
return callArgs
|
return callArgs;
|
||||||
}
|
}
|
||||||
|
|
||||||
export interface CreateTableOptions<T> {
|
export interface CreateTableOptions<T> {
|
||||||
@@ -163,7 +168,7 @@ export interface CreateTableOptions<T> {
|
|||||||
/**
|
/**
|
||||||
* Connect to a LanceDB instance at the given URI.
|
* Connect to a LanceDB instance at the given URI.
|
||||||
*
|
*
|
||||||
* Accpeted formats:
|
* Accepted formats:
|
||||||
*
|
*
|
||||||
* - `/path/to/database` - local database
|
* - `/path/to/database` - local database
|
||||||
* - `s3://bucket/path/to/database` or `gs://bucket/path/to/database` - database on cloud storage
|
* - `s3://bucket/path/to/database` or `gs://bucket/path/to/database` - database on cloud storage
|
||||||
@@ -173,7 +178,7 @@ export interface CreateTableOptions<T> {
|
|||||||
*
|
*
|
||||||
* @see {@link ConnectionOptions} for more details on the URI format.
|
* @see {@link ConnectionOptions} for more details on the URI format.
|
||||||
*/
|
*/
|
||||||
export async function connect (uri: string): Promise<Connection>
|
export async function connect(uri: string): Promise<Connection>;
|
||||||
/**
|
/**
|
||||||
* Connect to a LanceDB instance with connection options.
|
* Connect to a LanceDB instance with connection options.
|
||||||
*
|
*
|
||||||
@@ -181,48 +186,48 @@ export async function connect (uri: string): Promise<Connection>
|
|||||||
*/
|
*/
|
||||||
export async function connect(
|
export async function connect(
|
||||||
opts: Partial<ConnectionOptions>
|
opts: Partial<ConnectionOptions>
|
||||||
): Promise<Connection>
|
): Promise<Connection>;
|
||||||
export async function connect(
|
export async function connect(
|
||||||
arg: string | Partial<ConnectionOptions>
|
arg: string | Partial<ConnectionOptions>
|
||||||
): Promise<Connection> {
|
): Promise<Connection> {
|
||||||
let opts: ConnectionOptions
|
let opts: ConnectionOptions;
|
||||||
if (typeof arg === 'string') {
|
if (typeof arg === "string") {
|
||||||
opts = { uri: arg }
|
opts = { uri: arg };
|
||||||
} else {
|
} else {
|
||||||
const keys = Object.keys(arg)
|
const keys = Object.keys(arg);
|
||||||
if (keys.length === 1 && keys[0] === 'uri' && typeof arg.uri === 'string') {
|
if (keys.length === 1 && keys[0] === "uri" && typeof arg.uri === "string") {
|
||||||
opts = { uri: arg.uri }
|
opts = { uri: arg.uri };
|
||||||
} else {
|
} else {
|
||||||
opts = Object.assign(
|
opts = Object.assign(
|
||||||
{
|
{
|
||||||
uri: '',
|
uri: "",
|
||||||
awsCredentials: undefined,
|
awsCredentials: undefined,
|
||||||
awsRegion: defaultAwsRegion,
|
awsRegion: defaultAwsRegion,
|
||||||
apiKey: undefined,
|
apiKey: undefined,
|
||||||
region: defaultAwsRegion
|
region: defaultAwsRegion
|
||||||
},
|
},
|
||||||
arg
|
arg
|
||||||
)
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (opts.uri.startsWith('db://')) {
|
if (opts.uri.startsWith("db://")) {
|
||||||
// Remote connection
|
// Remote connection
|
||||||
return new RemoteConnection(opts)
|
return new RemoteConnection(opts);
|
||||||
}
|
}
|
||||||
|
|
||||||
const storageOptions = opts.storageOptions ?? {};
|
const storageOptions = opts.storageOptions ?? {};
|
||||||
if (opts.awsCredentials?.accessKeyId !== undefined) {
|
if (opts.awsCredentials?.accessKeyId !== undefined) {
|
||||||
storageOptions.aws_access_key_id = opts.awsCredentials.accessKeyId
|
storageOptions.aws_access_key_id = opts.awsCredentials.accessKeyId;
|
||||||
}
|
}
|
||||||
if (opts.awsCredentials?.secretKey !== undefined) {
|
if (opts.awsCredentials?.secretKey !== undefined) {
|
||||||
storageOptions.aws_secret_access_key = opts.awsCredentials.secretKey
|
storageOptions.aws_secret_access_key = opts.awsCredentials.secretKey;
|
||||||
}
|
}
|
||||||
if (opts.awsCredentials?.sessionToken !== undefined) {
|
if (opts.awsCredentials?.sessionToken !== undefined) {
|
||||||
storageOptions.aws_session_token = opts.awsCredentials.sessionToken
|
storageOptions.aws_session_token = opts.awsCredentials.sessionToken;
|
||||||
}
|
}
|
||||||
if (opts.awsRegion !== undefined) {
|
if (opts.awsRegion !== undefined) {
|
||||||
storageOptions.region = opts.awsRegion
|
storageOptions.region = opts.awsRegion;
|
||||||
}
|
}
|
||||||
// It's a pain to pass a record to Rust, so we convert it to an array of key-value pairs
|
// It's a pain to pass a record to Rust, so we convert it to an array of key-value pairs
|
||||||
const storageOptionsArr = Object.entries(storageOptions);
|
const storageOptionsArr = Object.entries(storageOptions);
|
||||||
@@ -231,8 +236,8 @@ export async function connect (
|
|||||||
opts.uri,
|
opts.uri,
|
||||||
storageOptionsArr,
|
storageOptionsArr,
|
||||||
opts.readConsistencyInterval
|
opts.readConsistencyInterval
|
||||||
)
|
);
|
||||||
return new LocalConnection(db, opts)
|
return new LocalConnection(db, opts);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -533,7 +538,11 @@ export interface Table<T = number[]> {
|
|||||||
* @param data the new data to insert
|
* @param data the new data to insert
|
||||||
* @param args parameters controlling how the operation should behave
|
* @param args parameters controlling how the operation should behave
|
||||||
*/
|
*/
|
||||||
mergeInsert: (on: string, data: Array<Record<string, unknown>> | ArrowTable, args: MergeInsertArgs) => Promise<void>
|
mergeInsert: (
|
||||||
|
on: string,
|
||||||
|
data: Array<Record<string, unknown>> | ArrowTable,
|
||||||
|
args: MergeInsertArgs
|
||||||
|
) => Promise<void>
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* List the indicies on this table.
|
* List the indicies on this table.
|
||||||
@@ -558,7 +567,9 @@ export interface Table<T = number[]> {
|
|||||||
* expressions will be evaluated for each row in the
|
* expressions will be evaluated for each row in the
|
||||||
* table, and can reference existing columns in the table.
|
* table, and can reference existing columns in the table.
|
||||||
*/
|
*/
|
||||||
addColumns(newColumnTransforms: Array<{ name: string, valueSql: string }>): Promise<void>
|
addColumns(
|
||||||
|
newColumnTransforms: Array<{ name: string, valueSql: string }>
|
||||||
|
): Promise<void>
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Alter the name or nullability of columns.
|
* Alter the name or nullability of columns.
|
||||||
@@ -699,23 +710,23 @@ export interface IndexStats {
|
|||||||
* A connection to a LanceDB database.
|
* A connection to a LanceDB database.
|
||||||
*/
|
*/
|
||||||
export class LocalConnection implements Connection {
|
export class LocalConnection implements Connection {
|
||||||
private readonly _options: () => ConnectionOptions
|
private readonly _options: () => ConnectionOptions;
|
||||||
private readonly _db: any
|
private readonly _db: any;
|
||||||
|
|
||||||
constructor(db: any, options: ConnectionOptions) {
|
constructor(db: any, options: ConnectionOptions) {
|
||||||
this._options = () => options
|
this._options = () => options;
|
||||||
this._db = db
|
this._db = db;
|
||||||
}
|
}
|
||||||
|
|
||||||
get uri(): string {
|
get uri(): string {
|
||||||
return this._options().uri
|
return this._options().uri;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the names of all tables in the database.
|
* Get the names of all tables in the database.
|
||||||
*/
|
*/
|
||||||
async tableNames(): Promise<string[]> {
|
async tableNames(): Promise<string[]> {
|
||||||
return databaseTableNames.call(this._db)
|
return databaseTableNames.call(this._db);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -723,7 +734,7 @@ export class LocalConnection implements Connection {
|
|||||||
*
|
*
|
||||||
* @param name The name of the table.
|
* @param name The name of the table.
|
||||||
*/
|
*/
|
||||||
async openTable (name: string): Promise<Table>
|
async openTable(name: string): Promise<Table>;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Open a table in the database.
|
* Open a table in the database.
|
||||||
@@ -734,23 +745,20 @@ export class LocalConnection implements Connection {
|
|||||||
async openTable<T>(
|
async openTable<T>(
|
||||||
name: string,
|
name: string,
|
||||||
embeddings: EmbeddingFunction<T>
|
embeddings: EmbeddingFunction<T>
|
||||||
): Promise<Table<T>>
|
): Promise<Table<T>>;
|
||||||
async openTable<T>(
|
async openTable<T>(
|
||||||
name: string,
|
name: string,
|
||||||
embeddings?: EmbeddingFunction<T>
|
embeddings?: EmbeddingFunction<T>
|
||||||
): Promise<Table<T>>
|
): Promise<Table<T>>;
|
||||||
async openTable<T>(
|
async openTable<T>(
|
||||||
name: string,
|
name: string,
|
||||||
embeddings?: EmbeddingFunction<T>
|
embeddings?: EmbeddingFunction<T>
|
||||||
): Promise<Table<T>> {
|
): Promise<Table<T>> {
|
||||||
const tbl = await databaseOpenTable.call(
|
const tbl = await databaseOpenTable.call(this._db, name);
|
||||||
this._db,
|
|
||||||
name,
|
|
||||||
)
|
|
||||||
if (embeddings !== undefined) {
|
if (embeddings !== undefined) {
|
||||||
return new LocalTable(tbl, name, this._options(), embeddings)
|
return new LocalTable(tbl, name, this._options(), embeddings);
|
||||||
} else {
|
} else {
|
||||||
return new LocalTable(tbl, name, this._options())
|
return new LocalTable(tbl, name, this._options());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -760,32 +768,32 @@ export class LocalConnection implements Connection {
|
|||||||
optsOrEmbedding?: WriteOptions | EmbeddingFunction<T>,
|
optsOrEmbedding?: WriteOptions | EmbeddingFunction<T>,
|
||||||
opt?: WriteOptions
|
opt?: WriteOptions
|
||||||
): Promise<Table<T>> {
|
): Promise<Table<T>> {
|
||||||
if (typeof name === 'string') {
|
if (typeof name === "string") {
|
||||||
let writeOptions: WriteOptions = new DefaultWriteOptions()
|
let writeOptions: WriteOptions = new DefaultWriteOptions();
|
||||||
if (opt !== undefined && isWriteOptions(opt)) {
|
if (opt !== undefined && isWriteOptions(opt)) {
|
||||||
writeOptions = opt
|
writeOptions = opt;
|
||||||
} else if (
|
} else if (
|
||||||
optsOrEmbedding !== undefined &&
|
optsOrEmbedding !== undefined &&
|
||||||
isWriteOptions(optsOrEmbedding)
|
isWriteOptions(optsOrEmbedding)
|
||||||
) {
|
) {
|
||||||
writeOptions = optsOrEmbedding
|
writeOptions = optsOrEmbedding;
|
||||||
}
|
}
|
||||||
|
|
||||||
let embeddings: undefined | EmbeddingFunction<T>
|
let embeddings: undefined | EmbeddingFunction<T>;
|
||||||
if (
|
if (
|
||||||
optsOrEmbedding !== undefined &&
|
optsOrEmbedding !== undefined &&
|
||||||
isEmbeddingFunction(optsOrEmbedding)
|
isEmbeddingFunction(optsOrEmbedding)
|
||||||
) {
|
) {
|
||||||
embeddings = optsOrEmbedding
|
embeddings = optsOrEmbedding;
|
||||||
}
|
}
|
||||||
return await this.createTableImpl({
|
return await this.createTableImpl({
|
||||||
name,
|
name,
|
||||||
data,
|
data,
|
||||||
embeddingFunction: embeddings,
|
embeddingFunction: embeddings,
|
||||||
writeOptions
|
writeOptions
|
||||||
})
|
});
|
||||||
}
|
}
|
||||||
return await this.createTableImpl(name)
|
return await this.createTableImpl(name);
|
||||||
}
|
}
|
||||||
|
|
||||||
private async createTableImpl<T>({
|
private async createTableImpl<T>({
|
||||||
@@ -801,27 +809,27 @@ export class LocalConnection implements Connection {
|
|||||||
embeddingFunction?: EmbeddingFunction<T> | undefined
|
embeddingFunction?: EmbeddingFunction<T> | undefined
|
||||||
writeOptions?: WriteOptions | undefined
|
writeOptions?: WriteOptions | undefined
|
||||||
}): Promise<Table<T>> {
|
}): Promise<Table<T>> {
|
||||||
let buffer: Buffer
|
let buffer: Buffer;
|
||||||
|
|
||||||
function isEmpty(
|
function isEmpty(
|
||||||
data: Array<Record<string, unknown>> | ArrowTable<any>
|
data: Array<Record<string, unknown>> | ArrowTable<any>
|
||||||
): boolean {
|
): boolean {
|
||||||
if (data instanceof ArrowTable) {
|
if (data instanceof ArrowTable) {
|
||||||
return data.data.length === 0
|
return data.data.length === 0;
|
||||||
}
|
}
|
||||||
return data.length === 0
|
return data.length === 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (data === undefined || isEmpty(data)) {
|
if (data === undefined || isEmpty(data)) {
|
||||||
if (schema === undefined) {
|
if (schema === undefined) {
|
||||||
throw new Error('Either data or schema needs to defined')
|
throw new Error("Either data or schema needs to defined");
|
||||||
}
|
}
|
||||||
buffer = await fromTableToBuffer(createEmptyTable(schema))
|
buffer = await fromTableToBuffer(createEmptyTable(schema));
|
||||||
} else if (data instanceof ArrowTable) {
|
} else if (data instanceof ArrowTable) {
|
||||||
buffer = await fromTableToBuffer(data, embeddingFunction, schema)
|
buffer = await fromTableToBuffer(data, embeddingFunction, schema);
|
||||||
} else {
|
} else {
|
||||||
// data is Array<Record<...>>
|
// data is Array<Record<...>>
|
||||||
buffer = await fromRecordsToBuffer(data, embeddingFunction, schema)
|
buffer = await fromRecordsToBuffer(data, embeddingFunction, schema);
|
||||||
}
|
}
|
||||||
|
|
||||||
const tbl = await tableCreate.call(
|
const tbl = await tableCreate.call(
|
||||||
@@ -830,11 +838,11 @@ export class LocalConnection implements Connection {
|
|||||||
buffer,
|
buffer,
|
||||||
writeOptions?.writeMode?.toString(),
|
writeOptions?.writeMode?.toString(),
|
||||||
...getAwsArgs(this._options())
|
...getAwsArgs(this._options())
|
||||||
)
|
);
|
||||||
if (embeddingFunction !== undefined) {
|
if (embeddingFunction !== undefined) {
|
||||||
return new LocalTable(tbl, name, this._options(), embeddingFunction)
|
return new LocalTable(tbl, name, this._options(), embeddingFunction);
|
||||||
} else {
|
} else {
|
||||||
return new LocalTable(tbl, name, this._options())
|
return new LocalTable(tbl, name, this._options());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -843,22 +851,22 @@ export class LocalConnection implements Connection {
|
|||||||
* @param name The name of the table to drop.
|
* @param name The name of the table to drop.
|
||||||
*/
|
*/
|
||||||
async dropTable(name: string): Promise<void> {
|
async dropTable(name: string): Promise<void> {
|
||||||
await databaseDropTable.call(this._db, name)
|
await databaseDropTable.call(this._db, name);
|
||||||
}
|
}
|
||||||
|
|
||||||
withMiddleware(middleware: HttpMiddleware): Connection {
|
withMiddleware(middleware: HttpMiddleware): Connection {
|
||||||
return this
|
return this;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
export class LocalTable<T = number[]> implements Table<T> {
|
export class LocalTable<T = number[]> implements Table<T> {
|
||||||
private _tbl: any
|
private _tbl: any;
|
||||||
private readonly _name: string
|
private readonly _name: string;
|
||||||
private readonly _isElectron: boolean
|
private readonly _isElectron: boolean;
|
||||||
private readonly _embeddings?: EmbeddingFunction<T>
|
private readonly _embeddings?: EmbeddingFunction<T>;
|
||||||
private readonly _options: () => ConnectionOptions
|
private readonly _options: () => ConnectionOptions;
|
||||||
|
|
||||||
constructor (tbl: any, name: string, options: ConnectionOptions)
|
constructor(tbl: any, name: string, options: ConnectionOptions);
|
||||||
/**
|
/**
|
||||||
* @param tbl
|
* @param tbl
|
||||||
* @param name
|
* @param name
|
||||||
@@ -870,22 +878,22 @@ export class LocalTable<T = number[]> implements Table<T> {
|
|||||||
name: string,
|
name: string,
|
||||||
options: ConnectionOptions,
|
options: ConnectionOptions,
|
||||||
embeddings: EmbeddingFunction<T>
|
embeddings: EmbeddingFunction<T>
|
||||||
)
|
);
|
||||||
constructor(
|
constructor(
|
||||||
tbl: any,
|
tbl: any,
|
||||||
name: string,
|
name: string,
|
||||||
options: ConnectionOptions,
|
options: ConnectionOptions,
|
||||||
embeddings?: EmbeddingFunction<T>
|
embeddings?: EmbeddingFunction<T>
|
||||||
) {
|
) {
|
||||||
this._tbl = tbl
|
this._tbl = tbl;
|
||||||
this._name = name
|
this._name = name;
|
||||||
this._embeddings = embeddings
|
this._embeddings = embeddings;
|
||||||
this._options = () => options
|
this._options = () => options;
|
||||||
this._isElectron = this.checkElectron()
|
this._isElectron = this.checkElectron();
|
||||||
}
|
}
|
||||||
|
|
||||||
get name(): string {
|
get name(): string {
|
||||||
return this._name
|
return this._name;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -893,7 +901,7 @@ export class LocalTable<T = number[]> implements Table<T> {
|
|||||||
* @param query The query search term
|
* @param query The query search term
|
||||||
*/
|
*/
|
||||||
search(query: T): Query<T> {
|
search(query: T): Query<T> {
|
||||||
return new Query(query, this._tbl, this._embeddings)
|
return new Query(query, this._tbl, this._embeddings);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -901,10 +909,10 @@ export class LocalTable<T = number[]> implements Table<T> {
|
|||||||
* @param value The filter criteria (like SQL where clause syntax)
|
* @param value The filter criteria (like SQL where clause syntax)
|
||||||
*/
|
*/
|
||||||
filter(value: string): Query<T> {
|
filter(value: string): Query<T> {
|
||||||
return new Query(undefined, this._tbl, this._embeddings).filter(value)
|
return new Query(undefined, this._tbl, this._embeddings).filter(value);
|
||||||
}
|
}
|
||||||
|
|
||||||
where = this.filter
|
where = this.filter;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Insert records into this Table.
|
* Insert records into this Table.
|
||||||
@@ -915,13 +923,16 @@ export class LocalTable<T = number[]> implements Table<T> {
|
|||||||
async add(
|
async add(
|
||||||
data: Array<Record<string, unknown>> | ArrowTable
|
data: Array<Record<string, unknown>> | ArrowTable
|
||||||
): Promise<number> {
|
): Promise<number> {
|
||||||
const schema = await this.schema
|
const schema = await this.schema;
|
||||||
let tbl: ArrowTable
|
|
||||||
|
let tbl: ArrowTable;
|
||||||
|
|
||||||
if (data instanceof ArrowTable) {
|
if (data instanceof ArrowTable) {
|
||||||
tbl = data
|
tbl = data;
|
||||||
} else {
|
} else {
|
||||||
tbl = makeArrowTable(data, { schema })
|
tbl = makeArrowTable(data, { schema, embeddings: this._embeddings });
|
||||||
}
|
}
|
||||||
|
|
||||||
return tableAdd
|
return tableAdd
|
||||||
.call(
|
.call(
|
||||||
this._tbl,
|
this._tbl,
|
||||||
@@ -930,8 +941,8 @@ export class LocalTable<T = number[]> implements Table<T> {
|
|||||||
...getAwsArgs(this._options())
|
...getAwsArgs(this._options())
|
||||||
)
|
)
|
||||||
.then((newTable: any) => {
|
.then((newTable: any) => {
|
||||||
this._tbl = newTable
|
this._tbl = newTable;
|
||||||
})
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -943,11 +954,11 @@ export class LocalTable<T = number[]> implements Table<T> {
|
|||||||
async overwrite(
|
async overwrite(
|
||||||
data: Array<Record<string, unknown>> | ArrowTable
|
data: Array<Record<string, unknown>> | ArrowTable
|
||||||
): Promise<number> {
|
): Promise<number> {
|
||||||
let buffer: Buffer
|
let buffer: Buffer;
|
||||||
if (data instanceof ArrowTable) {
|
if (data instanceof ArrowTable) {
|
||||||
buffer = await fromTableToBuffer(data, this._embeddings)
|
buffer = await fromTableToBuffer(data, this._embeddings);
|
||||||
} else {
|
} else {
|
||||||
buffer = await fromRecordsToBuffer(data, this._embeddings)
|
buffer = await fromRecordsToBuffer(data, this._embeddings);
|
||||||
}
|
}
|
||||||
return tableAdd
|
return tableAdd
|
||||||
.call(
|
.call(
|
||||||
@@ -957,8 +968,8 @@ export class LocalTable<T = number[]> implements Table<T> {
|
|||||||
...getAwsArgs(this._options())
|
...getAwsArgs(this._options())
|
||||||
)
|
)
|
||||||
.then((newTable: any) => {
|
.then((newTable: any) => {
|
||||||
this._tbl = newTable
|
this._tbl = newTable;
|
||||||
})
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -970,22 +981,22 @@ export class LocalTable<T = number[]> implements Table<T> {
|
|||||||
return tableCreateVectorIndex
|
return tableCreateVectorIndex
|
||||||
.call(this._tbl, indexParams)
|
.call(this._tbl, indexParams)
|
||||||
.then((newTable: any) => {
|
.then((newTable: any) => {
|
||||||
this._tbl = newTable
|
this._tbl = newTable;
|
||||||
})
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
async createScalarIndex(column: string, replace?: boolean): Promise<void> {
|
async createScalarIndex(column: string, replace?: boolean): Promise<void> {
|
||||||
if (replace === undefined) {
|
if (replace === undefined) {
|
||||||
replace = true
|
replace = true;
|
||||||
}
|
}
|
||||||
return tableCreateScalarIndex.call(this._tbl, column, replace)
|
return tableCreateScalarIndex.call(this._tbl, column, replace);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns the number of rows in this table.
|
* Returns the number of rows in this table.
|
||||||
*/
|
*/
|
||||||
async countRows(filter?: string): Promise<number> {
|
async countRows(filter?: string): Promise<number> {
|
||||||
return tableCountRows.call(this._tbl, filter)
|
return tableCountRows.call(this._tbl, filter);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -995,8 +1006,8 @@ export class LocalTable<T = number[]> implements Table<T> {
|
|||||||
*/
|
*/
|
||||||
async delete(filter: string): Promise<void> {
|
async delete(filter: string): Promise<void> {
|
||||||
return tableDelete.call(this._tbl, filter).then((newTable: any) => {
|
return tableDelete.call(this._tbl, filter).then((newTable: any) => {
|
||||||
this._tbl = newTable
|
this._tbl = newTable;
|
||||||
})
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -1007,54 +1018,64 @@ export class LocalTable<T = number[]> implements Table<T> {
|
|||||||
* @returns
|
* @returns
|
||||||
*/
|
*/
|
||||||
async update(args: UpdateArgs | UpdateSqlArgs): Promise<void> {
|
async update(args: UpdateArgs | UpdateSqlArgs): Promise<void> {
|
||||||
let filter: string | null
|
let filter: string | null;
|
||||||
let updates: Record<string, string>
|
let updates: Record<string, string>;
|
||||||
|
|
||||||
if ('valuesSql' in args) {
|
if ("valuesSql" in args) {
|
||||||
filter = args.where ?? null
|
filter = args.where ?? null;
|
||||||
updates = args.valuesSql
|
updates = args.valuesSql;
|
||||||
} else {
|
} else {
|
||||||
filter = args.where ?? null
|
filter = args.where ?? null;
|
||||||
updates = {}
|
updates = {};
|
||||||
for (const [key, value] of Object.entries(args.values)) {
|
for (const [key, value] of Object.entries(args.values)) {
|
||||||
updates[key] = toSQL(value)
|
updates[key] = toSQL(value);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return tableUpdate
|
return tableUpdate
|
||||||
.call(this._tbl, filter, updates)
|
.call(this._tbl, filter, updates)
|
||||||
.then((newTable: any) => {
|
.then((newTable: any) => {
|
||||||
this._tbl = newTable
|
this._tbl = newTable;
|
||||||
})
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
async mergeInsert (on: string, data: Array<Record<string, unknown>> | ArrowTable, args: MergeInsertArgs): Promise<void> {
|
async mergeInsert(
|
||||||
let whenMatchedUpdateAll = false
|
on: string,
|
||||||
let whenMatchedUpdateAllFilt = null
|
data: Array<Record<string, unknown>> | ArrowTable,
|
||||||
if (args.whenMatchedUpdateAll !== undefined && args.whenMatchedUpdateAll !== null) {
|
args: MergeInsertArgs
|
||||||
whenMatchedUpdateAll = true
|
): Promise<void> {
|
||||||
|
let whenMatchedUpdateAll = false;
|
||||||
|
let whenMatchedUpdateAllFilt = null;
|
||||||
|
if (
|
||||||
|
args.whenMatchedUpdateAll !== undefined &&
|
||||||
|
args.whenMatchedUpdateAll !== null
|
||||||
|
) {
|
||||||
|
whenMatchedUpdateAll = true;
|
||||||
if (args.whenMatchedUpdateAll !== true) {
|
if (args.whenMatchedUpdateAll !== true) {
|
||||||
whenMatchedUpdateAllFilt = args.whenMatchedUpdateAll
|
whenMatchedUpdateAllFilt = args.whenMatchedUpdateAll;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
const whenNotMatchedInsertAll = args.whenNotMatchedInsertAll ?? false
|
const whenNotMatchedInsertAll = args.whenNotMatchedInsertAll ?? false;
|
||||||
let whenNotMatchedBySourceDelete = false
|
let whenNotMatchedBySourceDelete = false;
|
||||||
let whenNotMatchedBySourceDeleteFilt = null
|
let whenNotMatchedBySourceDeleteFilt = null;
|
||||||
if (args.whenNotMatchedBySourceDelete !== undefined && args.whenNotMatchedBySourceDelete !== null) {
|
if (
|
||||||
whenNotMatchedBySourceDelete = true
|
args.whenNotMatchedBySourceDelete !== undefined &&
|
||||||
|
args.whenNotMatchedBySourceDelete !== null
|
||||||
|
) {
|
||||||
|
whenNotMatchedBySourceDelete = true;
|
||||||
if (args.whenNotMatchedBySourceDelete !== true) {
|
if (args.whenNotMatchedBySourceDelete !== true) {
|
||||||
whenNotMatchedBySourceDeleteFilt = args.whenNotMatchedBySourceDelete
|
whenNotMatchedBySourceDeleteFilt = args.whenNotMatchedBySourceDelete;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const schema = await this.schema
|
const schema = await this.schema;
|
||||||
let tbl: ArrowTable
|
let tbl: ArrowTable;
|
||||||
if (data instanceof ArrowTable) {
|
if (data instanceof ArrowTable) {
|
||||||
tbl = data
|
tbl = data;
|
||||||
} else {
|
} else {
|
||||||
tbl = makeArrowTable(data, { schema })
|
tbl = makeArrowTable(data, { schema });
|
||||||
}
|
}
|
||||||
const buffer = await fromTableToBuffer(tbl, this._embeddings, schema)
|
const buffer = await fromTableToBuffer(tbl, this._embeddings, schema);
|
||||||
|
|
||||||
this._tbl = await tableMergeInsert.call(
|
this._tbl = await tableMergeInsert.call(
|
||||||
this._tbl,
|
this._tbl,
|
||||||
@@ -1065,7 +1086,7 @@ export class LocalTable<T = number[]> implements Table<T> {
|
|||||||
whenNotMatchedBySourceDelete,
|
whenNotMatchedBySourceDelete,
|
||||||
whenNotMatchedBySourceDeleteFilt,
|
whenNotMatchedBySourceDeleteFilt,
|
||||||
buffer
|
buffer
|
||||||
)
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -1090,9 +1111,9 @@ export class LocalTable<T = number[]> implements Table<T> {
|
|||||||
return tableCleanupOldVersions
|
return tableCleanupOldVersions
|
||||||
.call(this._tbl, olderThan, deleteUnverified)
|
.call(this._tbl, olderThan, deleteUnverified)
|
||||||
.then((res: { newTable: any, metrics: CleanupStats }) => {
|
.then((res: { newTable: any, metrics: CleanupStats }) => {
|
||||||
this._tbl = res.newTable
|
this._tbl = res.newTable;
|
||||||
return res.metrics
|
return res.metrics;
|
||||||
})
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -1107,32 +1128,32 @@ export class LocalTable<T = number[]> implements Table<T> {
|
|||||||
* @returns Metrics about the compaction operation.
|
* @returns Metrics about the compaction operation.
|
||||||
*/
|
*/
|
||||||
async compactFiles(options?: CompactionOptions): Promise<CompactionMetrics> {
|
async compactFiles(options?: CompactionOptions): Promise<CompactionMetrics> {
|
||||||
const optionsArg = options ?? {}
|
const optionsArg = options ?? {};
|
||||||
return tableCompactFiles
|
return tableCompactFiles
|
||||||
.call(this._tbl, optionsArg)
|
.call(this._tbl, optionsArg)
|
||||||
.then((res: { newTable: any, metrics: CompactionMetrics }) => {
|
.then((res: { newTable: any, metrics: CompactionMetrics }) => {
|
||||||
this._tbl = res.newTable
|
this._tbl = res.newTable;
|
||||||
return res.metrics
|
return res.metrics;
|
||||||
})
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
async listIndices(): Promise<VectorIndex[]> {
|
async listIndices(): Promise<VectorIndex[]> {
|
||||||
return tableListIndices.call(this._tbl)
|
return tableListIndices.call(this._tbl);
|
||||||
}
|
}
|
||||||
|
|
||||||
async indexStats(indexUuid: string): Promise<IndexStats> {
|
async indexStats(indexUuid: string): Promise<IndexStats> {
|
||||||
return tableIndexStats.call(this._tbl, indexUuid)
|
return tableIndexStats.call(this._tbl, indexUuid);
|
||||||
}
|
}
|
||||||
|
|
||||||
get schema(): Promise<Schema> {
|
get schema(): Promise<Schema> {
|
||||||
// empty table
|
// empty table
|
||||||
return this.getSchema()
|
return this.getSchema();
|
||||||
}
|
}
|
||||||
|
|
||||||
private async getSchema(): Promise<Schema> {
|
private async getSchema(): Promise<Schema> {
|
||||||
const buffer = await tableSchema.call(this._tbl, this._isElectron)
|
const buffer = await tableSchema.call(this._tbl, this._isElectron);
|
||||||
const table = tableFromIPC(buffer)
|
const table = tableFromIPC(buffer);
|
||||||
return table.schema
|
return table.schema;
|
||||||
}
|
}
|
||||||
|
|
||||||
// See https://github.com/electron/electron/issues/2288
|
// See https://github.com/electron/electron/issues/2288
|
||||||
@@ -1140,28 +1161,30 @@ export class LocalTable<T = number[]> implements Table<T> {
|
|||||||
try {
|
try {
|
||||||
// eslint-disable-next-line no-prototype-builtins
|
// eslint-disable-next-line no-prototype-builtins
|
||||||
return (
|
return (
|
||||||
Object.prototype.hasOwnProperty.call(process?.versions, 'electron') ||
|
Object.prototype.hasOwnProperty.call(process?.versions, "electron") ||
|
||||||
navigator?.userAgent?.toLowerCase()?.includes(' electron')
|
navigator?.userAgent?.toLowerCase()?.includes(" electron")
|
||||||
)
|
);
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
return false
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async addColumns (newColumnTransforms: Array<{ name: string, valueSql: string }>): Promise<void> {
|
async addColumns(
|
||||||
return tableAddColumns.call(this._tbl, newColumnTransforms)
|
newColumnTransforms: Array<{ name: string, valueSql: string }>
|
||||||
|
): Promise<void> {
|
||||||
|
return tableAddColumns.call(this._tbl, newColumnTransforms);
|
||||||
}
|
}
|
||||||
|
|
||||||
async alterColumns(columnAlterations: ColumnAlteration[]): Promise<void> {
|
async alterColumns(columnAlterations: ColumnAlteration[]): Promise<void> {
|
||||||
return tableAlterColumns.call(this._tbl, columnAlterations)
|
return tableAlterColumns.call(this._tbl, columnAlterations);
|
||||||
}
|
}
|
||||||
|
|
||||||
async dropColumns(columnNames: string[]): Promise<void> {
|
async dropColumns(columnNames: string[]): Promise<void> {
|
||||||
return tableDropColumns.call(this._tbl, columnNames)
|
return tableDropColumns.call(this._tbl, columnNames);
|
||||||
}
|
}
|
||||||
|
|
||||||
withMiddleware(middleware: HttpMiddleware): Table<T> {
|
withMiddleware(middleware: HttpMiddleware): Table<T> {
|
||||||
return this
|
return this;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1184,7 +1207,7 @@ export interface CompactionOptions {
|
|||||||
*/
|
*/
|
||||||
targetRowsPerFragment?: number
|
targetRowsPerFragment?: number
|
||||||
/**
|
/**
|
||||||
* The maximum number of rows per group. Defaults to 1024.
|
* The maximum number of T per group. Defaults to 1024.
|
||||||
*/
|
*/
|
||||||
maxRowsPerGroup?: number
|
maxRowsPerGroup?: number
|
||||||
/**
|
/**
|
||||||
@@ -1284,21 +1307,21 @@ export interface IvfPQIndexConfig {
|
|||||||
*/
|
*/
|
||||||
index_cache_size?: number
|
index_cache_size?: number
|
||||||
|
|
||||||
type: 'ivf_pq'
|
type: "ivf_pq"
|
||||||
}
|
}
|
||||||
|
|
||||||
export type VectorIndexParams = IvfPQIndexConfig
|
export type VectorIndexParams = IvfPQIndexConfig;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Write mode for writing a table.
|
* Write mode for writing a table.
|
||||||
*/
|
*/
|
||||||
export enum WriteMode {
|
export enum WriteMode {
|
||||||
/** Create a new {@link Table}. */
|
/** Create a new {@link Table}. */
|
||||||
Create = 'create',
|
Create = "create",
|
||||||
/** Overwrite the existing {@link Table} if presented. */
|
/** Overwrite the existing {@link Table} if presented. */
|
||||||
Overwrite = 'overwrite',
|
Overwrite = "overwrite",
|
||||||
/** Append new data to the table. */
|
/** Append new data to the table. */
|
||||||
Append = 'append',
|
Append = "append",
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -1310,14 +1333,14 @@ export interface WriteOptions {
|
|||||||
}
|
}
|
||||||
|
|
||||||
export class DefaultWriteOptions implements WriteOptions {
|
export class DefaultWriteOptions implements WriteOptions {
|
||||||
writeMode = WriteMode.Create
|
writeMode = WriteMode.Create;
|
||||||
}
|
}
|
||||||
|
|
||||||
export function isWriteOptions(value: any): value is WriteOptions {
|
export function isWriteOptions(value: any): value is WriteOptions {
|
||||||
return (
|
return (
|
||||||
Object.keys(value).length === 1 &&
|
Object.keys(value).length === 1 &&
|
||||||
(value.writeMode === undefined || typeof value.writeMode === 'string')
|
(value.writeMode === undefined || typeof value.writeMode === "string")
|
||||||
)
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -1327,15 +1350,15 @@ export enum MetricType {
|
|||||||
/**
|
/**
|
||||||
* Euclidean distance
|
* Euclidean distance
|
||||||
*/
|
*/
|
||||||
L2 = 'l2',
|
L2 = "l2",
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Cosine distance
|
* Cosine distance
|
||||||
*/
|
*/
|
||||||
Cosine = 'cosine',
|
Cosine = "cosine",
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Dot product
|
* Dot product
|
||||||
*/
|
*/
|
||||||
Dot = 'dot',
|
Dot = "dot",
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -51,7 +51,7 @@ describe('LanceDB Mirrored Store Integration test', function () {
|
|||||||
|
|
||||||
const dir = tmpdir()
|
const dir = tmpdir()
|
||||||
console.log(dir)
|
console.log(dir)
|
||||||
const conn = await lancedb.connect(`s3://lancedb-integtest?mirroredStore=${dir}`)
|
const conn = await lancedb.connect({ uri: `s3://lancedb-integtest?mirroredStore=${dir}`, storageOptions: { allowHttp: 'true' } })
|
||||||
const data = Array(200).fill({ vector: Array(128).fill(1.0), id: 0 })
|
const data = Array(200).fill({ vector: Array(128).fill(1.0), id: 0 })
|
||||||
data.push(...Array(200).fill({ vector: Array(128).fill(1.0), id: 1 }))
|
data.push(...Array(200).fill({ vector: Array(128).fill(1.0), id: 1 }))
|
||||||
data.push(...Array(200).fill({ vector: Array(128).fill(1.0), id: 2 }))
|
data.push(...Array(200).fill({ vector: Array(128).fill(1.0), id: 2 }))
|
||||||
|
|||||||
@@ -140,6 +140,9 @@ export class RemoteConnection implements Connection {
|
|||||||
schema = nameOrOpts.schema
|
schema = nameOrOpts.schema
|
||||||
embeddings = nameOrOpts.embeddingFunction
|
embeddings = nameOrOpts.embeddingFunction
|
||||||
tableName = nameOrOpts.name
|
tableName = nameOrOpts.name
|
||||||
|
if (data === undefined) {
|
||||||
|
data = nameOrOpts.data
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let buffer: Buffer
|
let buffer: Buffer
|
||||||
|
|||||||
@@ -32,7 +32,7 @@ import {
|
|||||||
Bool,
|
Bool,
|
||||||
Date_,
|
Date_,
|
||||||
Decimal,
|
Decimal,
|
||||||
DataType,
|
type DataType,
|
||||||
Dictionary,
|
Dictionary,
|
||||||
Binary,
|
Binary,
|
||||||
Float32,
|
Float32,
|
||||||
@@ -74,12 +74,12 @@ import {
|
|||||||
DurationNanosecond,
|
DurationNanosecond,
|
||||||
DurationMicrosecond,
|
DurationMicrosecond,
|
||||||
DurationMillisecond,
|
DurationMillisecond,
|
||||||
DurationSecond,
|
DurationSecond
|
||||||
} from "apache-arrow";
|
} from "apache-arrow";
|
||||||
import type { IntBitWidth, TimeBitWidth } from "apache-arrow/type";
|
import type { IntBitWidth, TimeBitWidth } from "apache-arrow/type";
|
||||||
|
|
||||||
function sanitizeMetadata(
|
function sanitizeMetadata(
|
||||||
metadataLike?: unknown,
|
metadataLike?: unknown
|
||||||
): Map<string, string> | undefined {
|
): Map<string, string> | undefined {
|
||||||
if (metadataLike === undefined || metadataLike === null) {
|
if (metadataLike === undefined || metadataLike === null) {
|
||||||
return undefined;
|
return undefined;
|
||||||
@@ -90,7 +90,7 @@ function sanitizeMetadata(
|
|||||||
for (const item of metadataLike) {
|
for (const item of metadataLike) {
|
||||||
if (!(typeof item[0] === "string" || !(typeof item[1] === "string"))) {
|
if (!(typeof item[0] === "string" || !(typeof item[1] === "string"))) {
|
||||||
throw Error(
|
throw Error(
|
||||||
"Expected metadata, if present, to be a Map<string, string> but it had non-string keys or values",
|
"Expected metadata, if present, to be a Map<string, string> but it had non-string keys or values"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -105,7 +105,7 @@ function sanitizeInt(typeLike: object) {
|
|||||||
typeof typeLike.isSigned !== "boolean"
|
typeof typeLike.isSigned !== "boolean"
|
||||||
) {
|
) {
|
||||||
throw Error(
|
throw Error(
|
||||||
"Expected an Int Type to have a `bitWidth` and `isSigned` property",
|
"Expected an Int Type to have a `bitWidth` and `isSigned` property"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
return new Int(typeLike.isSigned, typeLike.bitWidth as IntBitWidth);
|
return new Int(typeLike.isSigned, typeLike.bitWidth as IntBitWidth);
|
||||||
@@ -128,7 +128,7 @@ function sanitizeDecimal(typeLike: object) {
|
|||||||
typeof typeLike.bitWidth !== "number"
|
typeof typeLike.bitWidth !== "number"
|
||||||
) {
|
) {
|
||||||
throw Error(
|
throw Error(
|
||||||
"Expected a Decimal Type to have `scale`, `precision`, and `bitWidth` properties",
|
"Expected a Decimal Type to have `scale`, `precision`, and `bitWidth` properties"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
return new Decimal(typeLike.scale, typeLike.precision, typeLike.bitWidth);
|
return new Decimal(typeLike.scale, typeLike.precision, typeLike.bitWidth);
|
||||||
@@ -149,7 +149,7 @@ function sanitizeTime(typeLike: object) {
|
|||||||
typeof typeLike.bitWidth !== "number"
|
typeof typeLike.bitWidth !== "number"
|
||||||
) {
|
) {
|
||||||
throw Error(
|
throw Error(
|
||||||
"Expected a Time type to have `unit` and `bitWidth` properties",
|
"Expected a Time type to have `unit` and `bitWidth` properties"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
return new Time(typeLike.unit, typeLike.bitWidth as TimeBitWidth);
|
return new Time(typeLike.unit, typeLike.bitWidth as TimeBitWidth);
|
||||||
@@ -172,7 +172,7 @@ function sanitizeTypedTimestamp(
|
|||||||
| typeof TimestampNanosecond
|
| typeof TimestampNanosecond
|
||||||
| typeof TimestampMicrosecond
|
| typeof TimestampMicrosecond
|
||||||
| typeof TimestampMillisecond
|
| typeof TimestampMillisecond
|
||||||
| typeof TimestampSecond,
|
| typeof TimestampSecond
|
||||||
) {
|
) {
|
||||||
let timezone = null;
|
let timezone = null;
|
||||||
if ("timezone" in typeLike && typeof typeLike.timezone === "string") {
|
if ("timezone" in typeLike && typeof typeLike.timezone === "string") {
|
||||||
@@ -191,7 +191,7 @@ function sanitizeInterval(typeLike: object) {
|
|||||||
function sanitizeList(typeLike: object) {
|
function sanitizeList(typeLike: object) {
|
||||||
if (!("children" in typeLike) || !Array.isArray(typeLike.children)) {
|
if (!("children" in typeLike) || !Array.isArray(typeLike.children)) {
|
||||||
throw Error(
|
throw Error(
|
||||||
"Expected a List type to have an array-like `children` property",
|
"Expected a List type to have an array-like `children` property"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
if (typeLike.children.length !== 1) {
|
if (typeLike.children.length !== 1) {
|
||||||
@@ -203,7 +203,7 @@ function sanitizeList(typeLike: object) {
|
|||||||
function sanitizeStruct(typeLike: object) {
|
function sanitizeStruct(typeLike: object) {
|
||||||
if (!("children" in typeLike) || !Array.isArray(typeLike.children)) {
|
if (!("children" in typeLike) || !Array.isArray(typeLike.children)) {
|
||||||
throw Error(
|
throw Error(
|
||||||
"Expected a Struct type to have an array-like `children` property",
|
"Expected a Struct type to have an array-like `children` property"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
return new Struct(typeLike.children.map((child) => sanitizeField(child)));
|
return new Struct(typeLike.children.map((child) => sanitizeField(child)));
|
||||||
@@ -216,47 +216,47 @@ function sanitizeUnion(typeLike: object) {
|
|||||||
typeof typeLike.mode !== "number"
|
typeof typeLike.mode !== "number"
|
||||||
) {
|
) {
|
||||||
throw Error(
|
throw Error(
|
||||||
"Expected a Union type to have `typeIds` and `mode` properties",
|
"Expected a Union type to have `typeIds` and `mode` properties"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
if (!("children" in typeLike) || !Array.isArray(typeLike.children)) {
|
if (!("children" in typeLike) || !Array.isArray(typeLike.children)) {
|
||||||
throw Error(
|
throw Error(
|
||||||
"Expected a Union type to have an array-like `children` property",
|
"Expected a Union type to have an array-like `children` property"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
return new Union(
|
return new Union(
|
||||||
typeLike.mode,
|
typeLike.mode,
|
||||||
typeLike.typeIds as any,
|
typeLike.typeIds as any,
|
||||||
typeLike.children.map((child) => sanitizeField(child)),
|
typeLike.children.map((child) => sanitizeField(child))
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
function sanitizeTypedUnion(
|
function sanitizeTypedUnion(
|
||||||
typeLike: object,
|
typeLike: object,
|
||||||
UnionType: typeof DenseUnion | typeof SparseUnion,
|
UnionType: typeof DenseUnion | typeof SparseUnion
|
||||||
) {
|
) {
|
||||||
if (!("typeIds" in typeLike)) {
|
if (!("typeIds" in typeLike)) {
|
||||||
throw Error(
|
throw Error(
|
||||||
"Expected a DenseUnion/SparseUnion type to have a `typeIds` property",
|
"Expected a DenseUnion/SparseUnion type to have a `typeIds` property"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
if (!("children" in typeLike) || !Array.isArray(typeLike.children)) {
|
if (!("children" in typeLike) || !Array.isArray(typeLike.children)) {
|
||||||
throw Error(
|
throw Error(
|
||||||
"Expected a DenseUnion/SparseUnion type to have an array-like `children` property",
|
"Expected a DenseUnion/SparseUnion type to have an array-like `children` property"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
return new UnionType(
|
return new UnionType(
|
||||||
typeLike.typeIds as any,
|
typeLike.typeIds as any,
|
||||||
typeLike.children.map((child) => sanitizeField(child)),
|
typeLike.children.map((child) => sanitizeField(child))
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
function sanitizeFixedSizeBinary(typeLike: object) {
|
function sanitizeFixedSizeBinary(typeLike: object) {
|
||||||
if (!("byteWidth" in typeLike) || typeof typeLike.byteWidth !== "number") {
|
if (!("byteWidth" in typeLike) || typeof typeLike.byteWidth !== "number") {
|
||||||
throw Error(
|
throw Error(
|
||||||
"Expected a FixedSizeBinary type to have a `byteWidth` property",
|
"Expected a FixedSizeBinary type to have a `byteWidth` property"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
return new FixedSizeBinary(typeLike.byteWidth);
|
return new FixedSizeBinary(typeLike.byteWidth);
|
||||||
@@ -268,7 +268,7 @@ function sanitizeFixedSizeList(typeLike: object) {
|
|||||||
}
|
}
|
||||||
if (!("children" in typeLike) || !Array.isArray(typeLike.children)) {
|
if (!("children" in typeLike) || !Array.isArray(typeLike.children)) {
|
||||||
throw Error(
|
throw Error(
|
||||||
"Expected a FixedSizeList type to have an array-like `children` property",
|
"Expected a FixedSizeList type to have an array-like `children` property"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
if (typeLike.children.length !== 1) {
|
if (typeLike.children.length !== 1) {
|
||||||
@@ -276,14 +276,14 @@ function sanitizeFixedSizeList(typeLike: object) {
|
|||||||
}
|
}
|
||||||
return new FixedSizeList(
|
return new FixedSizeList(
|
||||||
typeLike.listSize,
|
typeLike.listSize,
|
||||||
sanitizeField(typeLike.children[0]),
|
sanitizeField(typeLike.children[0])
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
function sanitizeMap(typeLike: object) {
|
function sanitizeMap(typeLike: object) {
|
||||||
if (!("children" in typeLike) || !Array.isArray(typeLike.children)) {
|
if (!("children" in typeLike) || !Array.isArray(typeLike.children)) {
|
||||||
throw Error(
|
throw Error(
|
||||||
"Expected a Map type to have an array-like `children` property",
|
"Expected a Map type to have an array-like `children` property"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
if (!("keysSorted" in typeLike) || typeof typeLike.keysSorted !== "boolean") {
|
if (!("keysSorted" in typeLike) || typeof typeLike.keysSorted !== "boolean") {
|
||||||
@@ -291,7 +291,7 @@ function sanitizeMap(typeLike: object) {
|
|||||||
}
|
}
|
||||||
return new Map_(
|
return new Map_(
|
||||||
typeLike.children.map((field) => sanitizeField(field)) as any,
|
typeLike.children.map((field) => sanitizeField(field)) as any,
|
||||||
typeLike.keysSorted,
|
typeLike.keysSorted
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -319,7 +319,7 @@ function sanitizeDictionary(typeLike: object) {
|
|||||||
sanitizeType(typeLike.dictionary),
|
sanitizeType(typeLike.dictionary),
|
||||||
sanitizeType(typeLike.indices) as any,
|
sanitizeType(typeLike.indices) as any,
|
||||||
typeLike.id,
|
typeLike.id,
|
||||||
typeLike.isOrdered,
|
typeLike.isOrdered
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -454,7 +454,7 @@ function sanitizeField(fieldLike: unknown): Field {
|
|||||||
!("nullable" in fieldLike)
|
!("nullable" in fieldLike)
|
||||||
) {
|
) {
|
||||||
throw Error(
|
throw Error(
|
||||||
"The field passed in is missing a `type`/`name`/`nullable` property",
|
"The field passed in is missing a `type`/`name`/`nullable` property"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
const type = sanitizeType(fieldLike.type);
|
const type = sanitizeType(fieldLike.type);
|
||||||
@@ -489,7 +489,7 @@ export function sanitizeSchema(schemaLike: unknown): Schema {
|
|||||||
}
|
}
|
||||||
if (!("fields" in schemaLike)) {
|
if (!("fields" in schemaLike)) {
|
||||||
throw Error(
|
throw Error(
|
||||||
"The schema passed in does not appear to be a schema (no 'fields' property)",
|
"The schema passed in does not appear to be a schema (no 'fields' property)"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
let metadata;
|
let metadata;
|
||||||
@@ -498,11 +498,11 @@ export function sanitizeSchema(schemaLike: unknown): Schema {
|
|||||||
}
|
}
|
||||||
if (!Array.isArray(schemaLike.fields)) {
|
if (!Array.isArray(schemaLike.fields)) {
|
||||||
throw Error(
|
throw Error(
|
||||||
"The schema passed in had a 'fields' property but it was not an array",
|
"The schema passed in had a 'fields' property but it was not an array"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
const sanitizedFields = schemaLike.fields.map((field) =>
|
const sanitizedFields = schemaLike.fields.map((field) =>
|
||||||
sanitizeField(field),
|
sanitizeField(field)
|
||||||
);
|
);
|
||||||
return new Schema(sanitizedFields, metadata);
|
return new Schema(sanitizedFields, metadata);
|
||||||
}
|
}
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -1,3 +0,0 @@
|
|||||||
**/dist/**/*
|
|
||||||
**/native.js
|
|
||||||
**/native.d.ts
|
|
||||||
1
nodejs/.gitignore
vendored
Normal file
1
nodejs/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
yarn.lock
|
||||||
@@ -1 +0,0 @@
|
|||||||
.eslintignore
|
|
||||||
@@ -43,29 +43,20 @@ npm run test
|
|||||||
|
|
||||||
### Running lint / format
|
### Running lint / format
|
||||||
|
|
||||||
LanceDb uses eslint for linting. VSCode does not need any plugins to use eslint. However, it
|
LanceDb uses [biome](https://biomejs.dev/) for linting and formatting. if you are using VSCode you will need to install the official [Biome](https://marketplace.visualstudio.com/items?itemName=biomejs.biome) extension.
|
||||||
may need some additional configuration. Make sure that eslint.experimental.useFlatConfig is
|
To manually lint your code you can run:
|
||||||
set to true. Also, if your vscode root folder is the repo root then you will need to set
|
|
||||||
the eslint.workingDirectories to ["nodejs"]. To manually lint your code you can run:
|
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
npm run lint
|
npm run lint
|
||||||
```
|
```
|
||||||
|
|
||||||
LanceDb uses prettier for formatting. If you are using VSCode you will need to install the
|
to automatically fix all fixable issues:
|
||||||
"Prettier - Code formatter" extension. You should then configure it to be the default formatter
|
|
||||||
for typescript and you should enable format on save. To manually check your code's format you
|
|
||||||
can run:
|
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
npm run chkformat
|
npm run lint-fix
|
||||||
```
|
```
|
||||||
|
|
||||||
If you need to manually format your code you can run:
|
If you do not have your workspace root set to the `nodejs` directory, unfortunately the extension will not work. You can still run the linting and formatting commands manually.
|
||||||
|
|
||||||
```sh
|
|
||||||
npx prettier --write .
|
|
||||||
```
|
|
||||||
|
|
||||||
### Generating docs
|
### Generating docs
|
||||||
|
|
||||||
|
|||||||
@@ -13,32 +13,26 @@
|
|||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
import {
|
import {
|
||||||
convertToTable,
|
|
||||||
fromTableToBuffer,
|
|
||||||
makeArrowTable,
|
|
||||||
makeEmptyTable,
|
|
||||||
} from "../dist/arrow";
|
|
||||||
import {
|
|
||||||
Field,
|
|
||||||
FixedSizeList,
|
|
||||||
Float16,
|
|
||||||
Float32,
|
|
||||||
Int32,
|
|
||||||
tableFromIPC,
|
|
||||||
Schema,
|
|
||||||
Float64,
|
|
||||||
type Table,
|
|
||||||
Binary,
|
Binary,
|
||||||
Bool,
|
Bool,
|
||||||
Utf8,
|
|
||||||
Struct,
|
|
||||||
List,
|
|
||||||
DataType,
|
DataType,
|
||||||
Dictionary,
|
Dictionary,
|
||||||
Int64,
|
Field,
|
||||||
|
FixedSizeList,
|
||||||
Float,
|
Float,
|
||||||
Precision,
|
Float16,
|
||||||
|
Float32,
|
||||||
|
Float64,
|
||||||
|
Int32,
|
||||||
|
Int64,
|
||||||
|
List,
|
||||||
MetadataVersion,
|
MetadataVersion,
|
||||||
|
Precision,
|
||||||
|
Schema,
|
||||||
|
Struct,
|
||||||
|
type Table,
|
||||||
|
Utf8,
|
||||||
|
tableFromIPC,
|
||||||
} from "apache-arrow";
|
} from "apache-arrow";
|
||||||
import {
|
import {
|
||||||
Dictionary as OldDictionary,
|
Dictionary as OldDictionary,
|
||||||
@@ -46,14 +40,20 @@ import {
|
|||||||
FixedSizeList as OldFixedSizeList,
|
FixedSizeList as OldFixedSizeList,
|
||||||
Float32 as OldFloat32,
|
Float32 as OldFloat32,
|
||||||
Int32 as OldInt32,
|
Int32 as OldInt32,
|
||||||
Struct as OldStruct,
|
|
||||||
Schema as OldSchema,
|
Schema as OldSchema,
|
||||||
|
Struct as OldStruct,
|
||||||
TimestampNanosecond as OldTimestampNanosecond,
|
TimestampNanosecond as OldTimestampNanosecond,
|
||||||
Utf8 as OldUtf8,
|
Utf8 as OldUtf8,
|
||||||
} from "apache-arrow-old";
|
} from "apache-arrow-old";
|
||||||
import { type EmbeddingFunction } from "../dist/embedding/embedding_function";
|
import {
|
||||||
|
convertToTable,
|
||||||
|
fromTableToBuffer,
|
||||||
|
makeArrowTable,
|
||||||
|
makeEmptyTable,
|
||||||
|
} from "../lancedb/arrow";
|
||||||
|
import { type EmbeddingFunction } from "../lancedb/embedding/embedding_function";
|
||||||
|
|
||||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
// biome-ignore lint/suspicious/noExplicitAny: skip
|
||||||
function sampleRecords(): Array<Record<string, any>> {
|
function sampleRecords(): Array<Record<string, any>> {
|
||||||
return [
|
return [
|
||||||
{
|
{
|
||||||
@@ -438,7 +438,7 @@ describe("when using two versions of arrow", function () {
|
|||||||
new OldField("ts_no_tz", new OldTimestampNanosecond(null)),
|
new OldField("ts_no_tz", new OldTimestampNanosecond(null)),
|
||||||
]),
|
]),
|
||||||
),
|
),
|
||||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
// biome-ignore lint/suspicious/noExplicitAny: skip
|
||||||
]) as any;
|
]) as any;
|
||||||
schema.metadataVersion = MetadataVersion.V5;
|
schema.metadataVersion = MetadataVersion.V5;
|
||||||
const table = makeArrowTable([], { schema });
|
const table = makeArrowTable([], { schema });
|
||||||
|
|||||||
@@ -14,11 +14,13 @@
|
|||||||
|
|
||||||
import * as tmp from "tmp";
|
import * as tmp from "tmp";
|
||||||
|
|
||||||
import { Connection, connect } from "../dist/index.js";
|
import { Connection, connect } from "../lancedb";
|
||||||
|
|
||||||
describe("when connecting", () => {
|
describe("when connecting", () => {
|
||||||
let tmpDir: tmp.DirResult;
|
let tmpDir: tmp.DirResult;
|
||||||
beforeEach(() => (tmpDir = tmp.dirSync({ unsafeCleanup: true })));
|
beforeEach(() => {
|
||||||
|
tmpDir = tmp.dirSync({ unsafeCleanup: true });
|
||||||
|
});
|
||||||
afterEach(() => tmpDir.removeCallback());
|
afterEach(() => tmpDir.removeCallback());
|
||||||
|
|
||||||
it("should connect", async () => {
|
it("should connect", async () => {
|
||||||
|
|||||||
@@ -14,7 +14,11 @@
|
|||||||
|
|
||||||
/* eslint-disable @typescript-eslint/naming-convention */
|
/* eslint-disable @typescript-eslint/naming-convention */
|
||||||
|
|
||||||
import { connect } from "../dist";
|
import {
|
||||||
|
CreateKeyCommand,
|
||||||
|
KMSClient,
|
||||||
|
ScheduleKeyDeletionCommand,
|
||||||
|
} from "@aws-sdk/client-kms";
|
||||||
import {
|
import {
|
||||||
CreateBucketCommand,
|
CreateBucketCommand,
|
||||||
DeleteBucketCommand,
|
DeleteBucketCommand,
|
||||||
@@ -23,11 +27,7 @@ import {
|
|||||||
ListObjectsV2Command,
|
ListObjectsV2Command,
|
||||||
S3Client,
|
S3Client,
|
||||||
} from "@aws-sdk/client-s3";
|
} from "@aws-sdk/client-s3";
|
||||||
import {
|
import { connect } from "../lancedb";
|
||||||
CreateKeyCommand,
|
|
||||||
ScheduleKeyDeletionCommand,
|
|
||||||
KMSClient,
|
|
||||||
} from "@aws-sdk/client-kms";
|
|
||||||
|
|
||||||
// Skip these tests unless the S3_TEST environment variable is set
|
// Skip these tests unless the S3_TEST environment variable is set
|
||||||
const maybeDescribe = process.env.S3_TEST ? describe : describe.skip;
|
const maybeDescribe = process.env.S3_TEST ? describe : describe.skip;
|
||||||
@@ -63,9 +63,10 @@ class S3Bucket {
|
|||||||
// Delete the bucket if it already exists
|
// Delete the bucket if it already exists
|
||||||
try {
|
try {
|
||||||
await this.deleteBucket(client, name);
|
await this.deleteBucket(client, name);
|
||||||
} catch (e) {
|
} catch {
|
||||||
// It's fine if the bucket doesn't exist
|
// It's fine if the bucket doesn't exist
|
||||||
}
|
}
|
||||||
|
// biome-ignore lint/style/useNamingConvention: we dont control s3's api
|
||||||
await client.send(new CreateBucketCommand({ Bucket: name }));
|
await client.send(new CreateBucketCommand({ Bucket: name }));
|
||||||
return new S3Bucket(name);
|
return new S3Bucket(name);
|
||||||
}
|
}
|
||||||
@@ -78,27 +79,32 @@ class S3Bucket {
|
|||||||
static async deleteBucket(client: S3Client, name: string) {
|
static async deleteBucket(client: S3Client, name: string) {
|
||||||
// Must delete all objects before we can delete the bucket
|
// Must delete all objects before we can delete the bucket
|
||||||
const objects = await client.send(
|
const objects = await client.send(
|
||||||
|
// biome-ignore lint/style/useNamingConvention: we dont control s3's api
|
||||||
new ListObjectsV2Command({ Bucket: name }),
|
new ListObjectsV2Command({ Bucket: name }),
|
||||||
);
|
);
|
||||||
if (objects.Contents) {
|
if (objects.Contents) {
|
||||||
for (const object of objects.Contents) {
|
for (const object of objects.Contents) {
|
||||||
await client.send(
|
await client.send(
|
||||||
|
// biome-ignore lint/style/useNamingConvention: we dont control s3's api
|
||||||
new DeleteObjectCommand({ Bucket: name, Key: object.Key }),
|
new DeleteObjectCommand({ Bucket: name, Key: object.Key }),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// biome-ignore lint/style/useNamingConvention: we dont control s3's api
|
||||||
await client.send(new DeleteBucketCommand({ Bucket: name }));
|
await client.send(new DeleteBucketCommand({ Bucket: name }));
|
||||||
}
|
}
|
||||||
|
|
||||||
public async assertAllEncrypted(path: string, keyId: string) {
|
public async assertAllEncrypted(path: string, keyId: string) {
|
||||||
const client = S3Bucket.s3Client();
|
const client = S3Bucket.s3Client();
|
||||||
const objects = await client.send(
|
const objects = await client.send(
|
||||||
|
// biome-ignore lint/style/useNamingConvention: we dont control s3's api
|
||||||
new ListObjectsV2Command({ Bucket: this.name, Prefix: path }),
|
new ListObjectsV2Command({ Bucket: this.name, Prefix: path }),
|
||||||
);
|
);
|
||||||
if (objects.Contents) {
|
if (objects.Contents) {
|
||||||
for (const object of objects.Contents) {
|
for (const object of objects.Contents) {
|
||||||
const metadata = await client.send(
|
const metadata = await client.send(
|
||||||
|
// biome-ignore lint/style/useNamingConvention: we dont control s3's api
|
||||||
new HeadObjectCommand({ Bucket: this.name, Key: object.Key }),
|
new HeadObjectCommand({ Bucket: this.name, Key: object.Key }),
|
||||||
);
|
);
|
||||||
expect(metadata.ServerSideEncryption).toBe("aws:kms");
|
expect(metadata.ServerSideEncryption).toBe("aws:kms");
|
||||||
@@ -137,6 +143,7 @@ class KmsKey {
|
|||||||
|
|
||||||
public async delete() {
|
public async delete() {
|
||||||
const client = KmsKey.kmsClient();
|
const client = KmsKey.kmsClient();
|
||||||
|
// biome-ignore lint/style/useNamingConvention: we dont control s3's api
|
||||||
await client.send(new ScheduleKeyDeletionCommand({ KeyId: this.keyId }));
|
await client.send(new ScheduleKeyDeletionCommand({ KeyId: this.keyId }));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -16,18 +16,18 @@ import * as fs from "fs";
|
|||||||
import * as path from "path";
|
import * as path from "path";
|
||||||
import * as tmp from "tmp";
|
import * as tmp from "tmp";
|
||||||
|
|
||||||
import { Table, connect } from "../dist";
|
|
||||||
import {
|
import {
|
||||||
Schema,
|
|
||||||
Field,
|
Field,
|
||||||
Float32,
|
|
||||||
Int32,
|
|
||||||
FixedSizeList,
|
FixedSizeList,
|
||||||
Int64,
|
Float32,
|
||||||
Float64,
|
Float64,
|
||||||
|
Int32,
|
||||||
|
Int64,
|
||||||
|
Schema,
|
||||||
} from "apache-arrow";
|
} from "apache-arrow";
|
||||||
import { makeArrowTable } from "../dist/arrow";
|
import { Table, connect } from "../lancedb";
|
||||||
import { Index } from "../dist/indices";
|
import { makeArrowTable } from "../lancedb/arrow";
|
||||||
|
import { Index } from "../lancedb/indices";
|
||||||
|
|
||||||
describe("Given a table", () => {
|
describe("Given a table", () => {
|
||||||
let tmpDir: tmp.DirResult;
|
let tmpDir: tmp.DirResult;
|
||||||
|
|||||||
136
nodejs/biome.json
Normal file
136
nodejs/biome.json
Normal file
@@ -0,0 +1,136 @@
|
|||||||
|
{
|
||||||
|
"$schema": "https://biomejs.dev/schemas/1.7.3/schema.json",
|
||||||
|
"organizeImports": {
|
||||||
|
"enabled": true
|
||||||
|
},
|
||||||
|
"files": {
|
||||||
|
"ignore": [
|
||||||
|
"**/dist/**/*",
|
||||||
|
"**/native.js",
|
||||||
|
"**/native.d.ts",
|
||||||
|
"**/npm/**/*",
|
||||||
|
"**/.vscode/**"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"formatter": {
|
||||||
|
"indentStyle": "space"
|
||||||
|
},
|
||||||
|
"linter": {
|
||||||
|
"enabled": true,
|
||||||
|
"rules": {
|
||||||
|
"recommended": false,
|
||||||
|
"complexity": {
|
||||||
|
"noBannedTypes": "error",
|
||||||
|
"noExtraBooleanCast": "error",
|
||||||
|
"noMultipleSpacesInRegularExpressionLiterals": "error",
|
||||||
|
"noUselessCatch": "error",
|
||||||
|
"noUselessThisAlias": "error",
|
||||||
|
"noUselessTypeConstraint": "error",
|
||||||
|
"noWith": "error"
|
||||||
|
},
|
||||||
|
"correctness": {
|
||||||
|
"noConstAssign": "error",
|
||||||
|
"noConstantCondition": "error",
|
||||||
|
"noEmptyCharacterClassInRegex": "error",
|
||||||
|
"noEmptyPattern": "error",
|
||||||
|
"noGlobalObjectCalls": "error",
|
||||||
|
"noInnerDeclarations": "error",
|
||||||
|
"noInvalidConstructorSuper": "error",
|
||||||
|
"noNewSymbol": "error",
|
||||||
|
"noNonoctalDecimalEscape": "error",
|
||||||
|
"noPrecisionLoss": "error",
|
||||||
|
"noSelfAssign": "error",
|
||||||
|
"noSetterReturn": "error",
|
||||||
|
"noSwitchDeclarations": "error",
|
||||||
|
"noUndeclaredVariables": "error",
|
||||||
|
"noUnreachable": "error",
|
||||||
|
"noUnreachableSuper": "error",
|
||||||
|
"noUnsafeFinally": "error",
|
||||||
|
"noUnsafeOptionalChaining": "error",
|
||||||
|
"noUnusedLabels": "error",
|
||||||
|
"noUnusedVariables": "error",
|
||||||
|
"useIsNan": "error",
|
||||||
|
"useValidForDirection": "error",
|
||||||
|
"useYield": "error"
|
||||||
|
},
|
||||||
|
"style": {
|
||||||
|
"noNamespace": "error",
|
||||||
|
"useAsConstAssertion": "error",
|
||||||
|
"useBlockStatements": "off",
|
||||||
|
"useNamingConvention": {
|
||||||
|
"level": "error",
|
||||||
|
"options": {
|
||||||
|
"strictCase": false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"suspicious": {
|
||||||
|
"noAssignInExpressions": "error",
|
||||||
|
"noAsyncPromiseExecutor": "error",
|
||||||
|
"noCatchAssign": "error",
|
||||||
|
"noClassAssign": "error",
|
||||||
|
"noCompareNegZero": "error",
|
||||||
|
"noControlCharactersInRegex": "error",
|
||||||
|
"noDebugger": "error",
|
||||||
|
"noDuplicateCase": "error",
|
||||||
|
"noDuplicateClassMembers": "error",
|
||||||
|
"noDuplicateObjectKeys": "error",
|
||||||
|
"noDuplicateParameters": "error",
|
||||||
|
"noEmptyBlockStatements": "error",
|
||||||
|
"noExplicitAny": "error",
|
||||||
|
"noExtraNonNullAssertion": "error",
|
||||||
|
"noFallthroughSwitchClause": "error",
|
||||||
|
"noFunctionAssign": "error",
|
||||||
|
"noGlobalAssign": "error",
|
||||||
|
"noImportAssign": "error",
|
||||||
|
"noMisleadingCharacterClass": "error",
|
||||||
|
"noMisleadingInstantiator": "error",
|
||||||
|
"noPrototypeBuiltins": "error",
|
||||||
|
"noRedeclare": "error",
|
||||||
|
"noShadowRestrictedNames": "error",
|
||||||
|
"noUnsafeDeclarationMerging": "error",
|
||||||
|
"noUnsafeNegation": "error",
|
||||||
|
"useGetterReturn": "error",
|
||||||
|
"useValidTypeof": "error"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"ignore": ["**/dist/**/*", "**/native.js", "**/native.d.ts"]
|
||||||
|
},
|
||||||
|
"javascript": {
|
||||||
|
"globals": []
|
||||||
|
},
|
||||||
|
"overrides": [
|
||||||
|
{
|
||||||
|
"include": ["**/*.ts", "**/*.tsx", "**/*.mts", "**/*.cts"],
|
||||||
|
"linter": {
|
||||||
|
"rules": {
|
||||||
|
"correctness": {
|
||||||
|
"noConstAssign": "off",
|
||||||
|
"noGlobalObjectCalls": "off",
|
||||||
|
"noInvalidConstructorSuper": "off",
|
||||||
|
"noNewSymbol": "off",
|
||||||
|
"noSetterReturn": "off",
|
||||||
|
"noUndeclaredVariables": "off",
|
||||||
|
"noUnreachable": "off",
|
||||||
|
"noUnreachableSuper": "off"
|
||||||
|
},
|
||||||
|
"style": {
|
||||||
|
"noArguments": "error",
|
||||||
|
"noVar": "error",
|
||||||
|
"useConst": "error"
|
||||||
|
},
|
||||||
|
"suspicious": {
|
||||||
|
"noDuplicateClassMembers": "off",
|
||||||
|
"noDuplicateObjectKeys": "off",
|
||||||
|
"noDuplicateParameters": "off",
|
||||||
|
"noFunctionAssign": "off",
|
||||||
|
"noImportAssign": "off",
|
||||||
|
"noRedeclare": "off",
|
||||||
|
"noUnsafeNegation": "off",
|
||||||
|
"useGetterReturn": "off"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
@@ -1,28 +0,0 @@
|
|||||||
/* eslint-disable @typescript-eslint/naming-convention */
|
|
||||||
// @ts-check
|
|
||||||
|
|
||||||
const eslint = require("@eslint/js");
|
|
||||||
const tseslint = require("typescript-eslint");
|
|
||||||
const eslintConfigPrettier = require("eslint-config-prettier");
|
|
||||||
const jsdoc = require("eslint-plugin-jsdoc");
|
|
||||||
|
|
||||||
module.exports = tseslint.config(
|
|
||||||
eslint.configs.recommended,
|
|
||||||
jsdoc.configs["flat/recommended"],
|
|
||||||
eslintConfigPrettier,
|
|
||||||
...tseslint.configs.recommended,
|
|
||||||
{
|
|
||||||
rules: {
|
|
||||||
"@typescript-eslint/naming-convention": "error",
|
|
||||||
"jsdoc/require-returns": "off",
|
|
||||||
"jsdoc/require-param": "off",
|
|
||||||
"jsdoc/require-jsdoc": [
|
|
||||||
"error",
|
|
||||||
{
|
|
||||||
publicOnly: true,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
},
|
|
||||||
plugins: jsdoc,
|
|
||||||
},
|
|
||||||
);
|
|
||||||
@@ -13,25 +13,25 @@
|
|||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
import {
|
import {
|
||||||
Field,
|
|
||||||
makeBuilder,
|
|
||||||
RecordBatchFileWriter,
|
|
||||||
Utf8,
|
|
||||||
type Vector,
|
|
||||||
FixedSizeList,
|
|
||||||
vectorFromArray,
|
|
||||||
type Schema,
|
|
||||||
Table as ArrowTable,
|
Table as ArrowTable,
|
||||||
RecordBatchStreamWriter,
|
Binary,
|
||||||
|
DataType,
|
||||||
|
Field,
|
||||||
|
FixedSizeList,
|
||||||
|
type Float,
|
||||||
|
Float32,
|
||||||
List,
|
List,
|
||||||
RecordBatch,
|
RecordBatch,
|
||||||
makeData,
|
RecordBatchFileWriter,
|
||||||
|
RecordBatchStreamWriter,
|
||||||
|
Schema,
|
||||||
Struct,
|
Struct,
|
||||||
type Float,
|
Utf8,
|
||||||
DataType,
|
type Vector,
|
||||||
Binary,
|
makeBuilder,
|
||||||
Float32,
|
makeData,
|
||||||
type makeTable,
|
type makeTable,
|
||||||
|
vectorFromArray,
|
||||||
} from "apache-arrow";
|
} from "apache-arrow";
|
||||||
import { type EmbeddingFunction } from "./embedding/embedding_function";
|
import { type EmbeddingFunction } from "./embedding/embedding_function";
|
||||||
import { sanitizeSchema } from "./sanitize";
|
import { sanitizeSchema } from "./sanitize";
|
||||||
@@ -85,6 +85,7 @@ export class MakeArrowTableOptions {
|
|||||||
vectorColumns: Record<string, VectorColumnOptions> = {
|
vectorColumns: Record<string, VectorColumnOptions> = {
|
||||||
vector: new VectorColumnOptions(),
|
vector: new VectorColumnOptions(),
|
||||||
};
|
};
|
||||||
|
embeddings?: EmbeddingFunction<unknown>;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* If true then string columns will be encoded with dictionary encoding
|
* If true then string columns will be encoded with dictionary encoding
|
||||||
@@ -208,6 +209,7 @@ export function makeArrowTable(
|
|||||||
const opt = new MakeArrowTableOptions(options !== undefined ? options : {});
|
const opt = new MakeArrowTableOptions(options !== undefined ? options : {});
|
||||||
if (opt.schema !== undefined && opt.schema !== null) {
|
if (opt.schema !== undefined && opt.schema !== null) {
|
||||||
opt.schema = sanitizeSchema(opt.schema);
|
opt.schema = sanitizeSchema(opt.schema);
|
||||||
|
opt.schema = validateSchemaEmbeddings(opt.schema, data, opt.embeddings);
|
||||||
}
|
}
|
||||||
const columns: Record<string, Vector> = {};
|
const columns: Record<string, Vector> = {};
|
||||||
// TODO: sample dataset to find missing columns
|
// TODO: sample dataset to find missing columns
|
||||||
@@ -287,8 +289,8 @@ export function makeArrowTable(
|
|||||||
// then patch the schema of the batches so we can use
|
// then patch the schema of the batches so we can use
|
||||||
// `new ArrowTable(schema, batches)` which does not do any schema inference
|
// `new ArrowTable(schema, batches)` which does not do any schema inference
|
||||||
const firstTable = new ArrowTable(columns);
|
const firstTable = new ArrowTable(columns);
|
||||||
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
|
|
||||||
const batchesFixed = firstTable.batches.map(
|
const batchesFixed = firstTable.batches.map(
|
||||||
|
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
|
||||||
(batch) => new RecordBatch(opt.schema!, batch.data),
|
(batch) => new RecordBatch(opt.schema!, batch.data),
|
||||||
);
|
);
|
||||||
return new ArrowTable(opt.schema, batchesFixed);
|
return new ArrowTable(opt.schema, batchesFixed);
|
||||||
@@ -313,7 +315,7 @@ function makeListVector(lists: unknown[][]): Vector<unknown> {
|
|||||||
throw Error("Cannot infer list vector from empty array or empty list");
|
throw Error("Cannot infer list vector from empty array or empty list");
|
||||||
}
|
}
|
||||||
const sampleList = lists[0];
|
const sampleList = lists[0];
|
||||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
// biome-ignore lint/suspicious/noExplicitAny: skip
|
||||||
let inferredType: any;
|
let inferredType: any;
|
||||||
try {
|
try {
|
||||||
const sampleVector = makeVector(sampleList);
|
const sampleVector = makeVector(sampleList);
|
||||||
@@ -337,7 +339,7 @@ function makeVector(
|
|||||||
values: unknown[],
|
values: unknown[],
|
||||||
type?: DataType,
|
type?: DataType,
|
||||||
stringAsDictionary?: boolean,
|
stringAsDictionary?: boolean,
|
||||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
// biome-ignore lint/suspicious/noExplicitAny: skip
|
||||||
): Vector<any> {
|
): Vector<any> {
|
||||||
if (type !== undefined) {
|
if (type !== undefined) {
|
||||||
// No need for inference, let Arrow create it
|
// No need for inference, let Arrow create it
|
||||||
@@ -648,3 +650,41 @@ function alignTable(table: ArrowTable, schema: Schema): ArrowTable {
|
|||||||
export function createEmptyTable(schema: Schema): ArrowTable {
|
export function createEmptyTable(schema: Schema): ArrowTable {
|
||||||
return new ArrowTable(sanitizeSchema(schema));
|
return new ArrowTable(sanitizeSchema(schema));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function validateSchemaEmbeddings(
|
||||||
|
schema: Schema,
|
||||||
|
data: Array<Record<string, unknown>>,
|
||||||
|
embeddings: EmbeddingFunction<unknown> | undefined,
|
||||||
|
) {
|
||||||
|
const fields = [];
|
||||||
|
const missingEmbeddingFields = [];
|
||||||
|
|
||||||
|
// First we check if the field is a `FixedSizeList`
|
||||||
|
// Then we check if the data contains the field
|
||||||
|
// if it does not, we add it to the list of missing embedding fields
|
||||||
|
// Finally, we check if those missing embedding fields are `this._embeddings`
|
||||||
|
// if they are not, we throw an error
|
||||||
|
for (const field of schema.fields) {
|
||||||
|
if (field.type instanceof FixedSizeList) {
|
||||||
|
if (data.length !== 0 && data?.[0]?.[field.name] === undefined) {
|
||||||
|
missingEmbeddingFields.push(field);
|
||||||
|
} else {
|
||||||
|
fields.push(field);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
fields.push(field);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (missingEmbeddingFields.length > 0 && embeddings === undefined) {
|
||||||
|
console.log({ missingEmbeddingFields, embeddings });
|
||||||
|
|
||||||
|
throw new Error(
|
||||||
|
`Table has embeddings: "${missingEmbeddingFields
|
||||||
|
.map((f) => f.name)
|
||||||
|
.join(",")}", but no embedding function was provided`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
return new Schema(fields);
|
||||||
|
}
|
||||||
|
|||||||
@@ -12,15 +12,15 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
import { Table as ArrowTable, Schema } from "apache-arrow";
|
||||||
import { fromTableToBuffer, makeArrowTable, makeEmptyTable } from "./arrow";
|
import { fromTableToBuffer, makeArrowTable, makeEmptyTable } from "./arrow";
|
||||||
import { ConnectionOptions, Connection as LanceDbConnection } from "./native";
|
import { ConnectionOptions, Connection as LanceDbConnection } from "./native";
|
||||||
import { Table } from "./table";
|
import { Table } from "./table";
|
||||||
import { Table as ArrowTable, Schema } from "apache-arrow";
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Connect to a LanceDB instance at the given URI.
|
* Connect to a LanceDB instance at the given URI.
|
||||||
*
|
*
|
||||||
* Accpeted formats:
|
* Accepted formats:
|
||||||
*
|
*
|
||||||
* - `/path/to/database` - local database
|
* - `/path/to/database` - local database
|
||||||
* - `s3://bucket/path/to/database` or `gs://bucket/path/to/database` - database on cloud storage
|
* - `s3://bucket/path/to/database` or `gs://bucket/path/to/database` - database on cloud storage
|
||||||
@@ -77,6 +77,18 @@ export interface OpenTableOptions {
|
|||||||
* The available options are described at https://lancedb.github.io/lancedb/guides/storage/
|
* The available options are described at https://lancedb.github.io/lancedb/guides/storage/
|
||||||
*/
|
*/
|
||||||
storageOptions?: Record<string, string>;
|
storageOptions?: Record<string, string>;
|
||||||
|
/**
|
||||||
|
* Set the size of the index cache, specified as a number of entries
|
||||||
|
*
|
||||||
|
* The exact meaning of an "entry" will depend on the type of index:
|
||||||
|
* - IVF: there is one entry for each IVF partition
|
||||||
|
* - BTREE: there is one entry for the entire index
|
||||||
|
*
|
||||||
|
* This cache applies to the entire opened table, across all indices.
|
||||||
|
* Setting this value higher will increase performance on larger datasets
|
||||||
|
* at the expense of more RAM
|
||||||
|
*/
|
||||||
|
indexCacheSize?: number;
|
||||||
}
|
}
|
||||||
|
|
||||||
export interface TableNamesOptions {
|
export interface TableNamesOptions {
|
||||||
@@ -160,6 +172,7 @@ export class Connection {
|
|||||||
const innerTable = await this.inner.openTable(
|
const innerTable = await this.inner.openTable(
|
||||||
name,
|
name,
|
||||||
cleanseStorageOptions(options?.storageOptions),
|
cleanseStorageOptions(options?.storageOptions),
|
||||||
|
options?.indexCacheSize,
|
||||||
);
|
);
|
||||||
return new Table(innerTable);
|
return new Table(innerTable);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,8 +12,8 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
import { type EmbeddingFunction } from "./embedding_function";
|
|
||||||
import type OpenAI from "openai";
|
import type OpenAI from "openai";
|
||||||
|
import { type EmbeddingFunction } from "./embedding_function";
|
||||||
|
|
||||||
export class OpenAIEmbeddingFunction implements EmbeddingFunction<string> {
|
export class OpenAIEmbeddingFunction implements EmbeddingFunction<string> {
|
||||||
private readonly _openai: OpenAI;
|
private readonly _openai: OpenAI;
|
||||||
|
|||||||
@@ -12,14 +12,14 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
import { RecordBatch, tableFromIPC, Table as ArrowTable } from "apache-arrow";
|
import { Table as ArrowTable, RecordBatch, tableFromIPC } from "apache-arrow";
|
||||||
|
import { type IvfPqOptions } from "./indices";
|
||||||
import {
|
import {
|
||||||
RecordBatchIterator as NativeBatchIterator,
|
RecordBatchIterator as NativeBatchIterator,
|
||||||
Query as NativeQuery,
|
Query as NativeQuery,
|
||||||
Table as NativeTable,
|
Table as NativeTable,
|
||||||
VectorQuery as NativeVectorQuery,
|
VectorQuery as NativeVectorQuery,
|
||||||
} from "./native";
|
} from "./native";
|
||||||
import { type IvfPqOptions } from "./indices";
|
|
||||||
export class RecordBatchIterator implements AsyncIterator<RecordBatch> {
|
export class RecordBatchIterator implements AsyncIterator<RecordBatch> {
|
||||||
private promisedInner?: Promise<NativeBatchIterator>;
|
private promisedInner?: Promise<NativeBatchIterator>;
|
||||||
private inner?: NativeBatchIterator;
|
private inner?: NativeBatchIterator;
|
||||||
@@ -29,7 +29,7 @@ export class RecordBatchIterator implements AsyncIterator<RecordBatch> {
|
|||||||
this.promisedInner = promise;
|
this.promisedInner = promise;
|
||||||
}
|
}
|
||||||
|
|
||||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
// biome-ignore lint/suspicious/noExplicitAny: skip
|
||||||
async next(): Promise<IteratorResult<RecordBatch<any>>> {
|
async next(): Promise<IteratorResult<RecordBatch<any>>> {
|
||||||
if (this.inner === undefined) {
|
if (this.inner === undefined) {
|
||||||
this.inner = await this.promisedInner;
|
this.inner = await this.promisedInner;
|
||||||
@@ -56,7 +56,9 @@ export class QueryBase<
|
|||||||
QueryType,
|
QueryType,
|
||||||
> implements AsyncIterable<RecordBatch>
|
> implements AsyncIterable<RecordBatch>
|
||||||
{
|
{
|
||||||
protected constructor(protected inner: NativeQueryType) {}
|
protected constructor(protected inner: NativeQueryType) {
|
||||||
|
// intentionally empty
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A filter statement to be applied to this query.
|
* A filter statement to be applied to this query.
|
||||||
@@ -150,7 +152,7 @@ export class QueryBase<
|
|||||||
return new RecordBatchIterator(this.nativeExecute());
|
return new RecordBatchIterator(this.nativeExecute());
|
||||||
}
|
}
|
||||||
|
|
||||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
// biome-ignore lint/suspicious/noExplicitAny: skip
|
||||||
[Symbol.asyncIterator](): AsyncIterator<RecordBatch<any>> {
|
[Symbol.asyncIterator](): AsyncIterator<RecordBatch<any>> {
|
||||||
const promise = this.nativeExecute();
|
const promise = this.nativeExecute();
|
||||||
return new RecordBatchIterator(promise);
|
return new RecordBatchIterator(promise);
|
||||||
@@ -368,7 +370,7 @@ export class Query extends QueryBase<NativeQuery, Query> {
|
|||||||
* a default `limit` of 10 will be used. @see {@link Query#limit}
|
* a default `limit` of 10 will be used. @see {@link Query#limit}
|
||||||
*/
|
*/
|
||||||
nearestTo(vector: unknown): VectorQuery {
|
nearestTo(vector: unknown): VectorQuery {
|
||||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
// biome-ignore lint/suspicious/noExplicitAny: skip
|
||||||
const vectorQuery = this.inner.nearestTo(Float32Array.from(vector as any));
|
const vectorQuery = this.inner.nearestTo(Float32Array.from(vector as any));
|
||||||
return new VectorQuery(vectorQuery);
|
return new VectorQuery(vectorQuery);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -21,60 +21,60 @@
|
|||||||
// and so we must sanitize the input to ensure that it is compatible.
|
// and so we must sanitize the input to ensure that it is compatible.
|
||||||
|
|
||||||
import {
|
import {
|
||||||
Field,
|
Binary,
|
||||||
Utf8,
|
|
||||||
FixedSizeBinary,
|
|
||||||
FixedSizeList,
|
|
||||||
Schema,
|
|
||||||
List,
|
|
||||||
Struct,
|
|
||||||
Float,
|
|
||||||
Bool,
|
Bool,
|
||||||
|
DataType,
|
||||||
|
DateDay,
|
||||||
|
DateMillisecond,
|
||||||
|
type DateUnit,
|
||||||
Date_,
|
Date_,
|
||||||
Decimal,
|
Decimal,
|
||||||
DataType,
|
DenseUnion,
|
||||||
Dictionary,
|
Dictionary,
|
||||||
Binary,
|
|
||||||
Float32,
|
|
||||||
Interval,
|
|
||||||
Map_,
|
|
||||||
Duration,
|
Duration,
|
||||||
Union,
|
DurationMicrosecond,
|
||||||
Time,
|
DurationMillisecond,
|
||||||
Timestamp,
|
DurationNanosecond,
|
||||||
Type,
|
DurationSecond,
|
||||||
Null,
|
Field,
|
||||||
|
FixedSizeBinary,
|
||||||
|
FixedSizeList,
|
||||||
|
Float,
|
||||||
|
Float16,
|
||||||
|
Float32,
|
||||||
|
Float64,
|
||||||
Int,
|
Int,
|
||||||
type Precision,
|
|
||||||
type DateUnit,
|
|
||||||
Int8,
|
Int8,
|
||||||
Int16,
|
Int16,
|
||||||
Int32,
|
Int32,
|
||||||
Int64,
|
Int64,
|
||||||
|
Interval,
|
||||||
|
IntervalDayTime,
|
||||||
|
IntervalYearMonth,
|
||||||
|
List,
|
||||||
|
Map_,
|
||||||
|
Null,
|
||||||
|
type Precision,
|
||||||
|
Schema,
|
||||||
|
SparseUnion,
|
||||||
|
Struct,
|
||||||
|
Time,
|
||||||
|
TimeMicrosecond,
|
||||||
|
TimeMillisecond,
|
||||||
|
TimeNanosecond,
|
||||||
|
TimeSecond,
|
||||||
|
Timestamp,
|
||||||
|
TimestampMicrosecond,
|
||||||
|
TimestampMillisecond,
|
||||||
|
TimestampNanosecond,
|
||||||
|
TimestampSecond,
|
||||||
|
Type,
|
||||||
Uint8,
|
Uint8,
|
||||||
Uint16,
|
Uint16,
|
||||||
Uint32,
|
Uint32,
|
||||||
Uint64,
|
Uint64,
|
||||||
Float16,
|
Union,
|
||||||
Float64,
|
Utf8,
|
||||||
DateDay,
|
|
||||||
DateMillisecond,
|
|
||||||
DenseUnion,
|
|
||||||
SparseUnion,
|
|
||||||
TimeNanosecond,
|
|
||||||
TimeMicrosecond,
|
|
||||||
TimeMillisecond,
|
|
||||||
TimeSecond,
|
|
||||||
TimestampNanosecond,
|
|
||||||
TimestampMicrosecond,
|
|
||||||
TimestampMillisecond,
|
|
||||||
TimestampSecond,
|
|
||||||
IntervalDayTime,
|
|
||||||
IntervalYearMonth,
|
|
||||||
DurationNanosecond,
|
|
||||||
DurationMicrosecond,
|
|
||||||
DurationMillisecond,
|
|
||||||
DurationSecond,
|
|
||||||
} from "apache-arrow";
|
} from "apache-arrow";
|
||||||
import type { IntBitWidth, TKeys, TimeBitWidth } from "apache-arrow/type";
|
import type { IntBitWidth, TKeys, TimeBitWidth } from "apache-arrow/type";
|
||||||
|
|
||||||
@@ -228,7 +228,7 @@ function sanitizeUnion(typeLike: object) {
|
|||||||
|
|
||||||
return new Union(
|
return new Union(
|
||||||
typeLike.mode,
|
typeLike.mode,
|
||||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
// biome-ignore lint/suspicious/noExplicitAny: skip
|
||||||
typeLike.typeIds as any,
|
typeLike.typeIds as any,
|
||||||
typeLike.children.map((child) => sanitizeField(child)),
|
typeLike.children.map((child) => sanitizeField(child)),
|
||||||
);
|
);
|
||||||
@@ -294,7 +294,7 @@ function sanitizeMap(typeLike: object) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
return new Map_(
|
return new Map_(
|
||||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
// biome-ignore lint/suspicious/noExplicitAny: skip
|
||||||
typeLike.children.map((field) => sanitizeField(field)) as any,
|
typeLike.children.map((field) => sanitizeField(field)) as any,
|
||||||
typeLike.keysSorted,
|
typeLike.keysSorted,
|
||||||
);
|
);
|
||||||
@@ -328,7 +328,7 @@ function sanitizeDictionary(typeLike: object) {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
// biome-ignore lint/suspicious/noExplicitAny: skip
|
||||||
function sanitizeType(typeLike: unknown): DataType<any> {
|
function sanitizeType(typeLike: unknown): DataType<any> {
|
||||||
if (typeof typeLike !== "object" || typeLike === null) {
|
if (typeof typeLike !== "object" || typeLike === null) {
|
||||||
throw Error("Expected a Type but object was null/undefined");
|
throw Error("Expected a Type but object was null/undefined");
|
||||||
|
|||||||
@@ -13,6 +13,8 @@
|
|||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
import { Schema, tableFromIPC } from "apache-arrow";
|
import { Schema, tableFromIPC } from "apache-arrow";
|
||||||
|
import { Data, fromDataToBuffer } from "./arrow";
|
||||||
|
import { IndexOptions } from "./indices";
|
||||||
import {
|
import {
|
||||||
AddColumnsSql,
|
AddColumnsSql,
|
||||||
ColumnAlteration,
|
ColumnAlteration,
|
||||||
@@ -20,8 +22,6 @@ import {
|
|||||||
Table as _NativeTable,
|
Table as _NativeTable,
|
||||||
} from "./native";
|
} from "./native";
|
||||||
import { Query, VectorQuery } from "./query";
|
import { Query, VectorQuery } from "./query";
|
||||||
import { IndexOptions } from "./indices";
|
|
||||||
import { Data, fromDataToBuffer } from "./arrow";
|
|
||||||
|
|
||||||
export { IndexConfig } from "./native";
|
export { IndexConfig } from "./native";
|
||||||
/**
|
/**
|
||||||
@@ -169,21 +169,24 @@ export class Table {
|
|||||||
* // If the column has a vector (fixed size list) data type then
|
* // If the column has a vector (fixed size list) data type then
|
||||||
* // an IvfPq vector index will be created.
|
* // an IvfPq vector index will be created.
|
||||||
* const table = await conn.openTable("my_table");
|
* const table = await conn.openTable("my_table");
|
||||||
* await table.createIndex(["vector"]);
|
* await table.createIndex("vector");
|
||||||
* @example
|
* @example
|
||||||
* // For advanced control over vector index creation you can specify
|
* // For advanced control over vector index creation you can specify
|
||||||
* // the index type and options.
|
* // the index type and options.
|
||||||
* const table = await conn.openTable("my_table");
|
* const table = await conn.openTable("my_table");
|
||||||
* await table.createIndex(["vector"], I)
|
* await table.createIndex("vector", {
|
||||||
* .ivf_pq({ num_partitions: 128, num_sub_vectors: 16 })
|
* config: lancedb.Index.ivfPq({
|
||||||
* .build();
|
* numPartitions: 128,
|
||||||
|
* numSubVectors: 16,
|
||||||
|
* }),
|
||||||
|
* });
|
||||||
* @example
|
* @example
|
||||||
* // Or create a Scalar index
|
* // Or create a Scalar index
|
||||||
* await table.createIndex("my_float_col").build();
|
* await table.createIndex("my_float_col");
|
||||||
*/
|
*/
|
||||||
async createIndex(column: string, options?: Partial<IndexOptions>) {
|
async createIndex(column: string, options?: Partial<IndexOptions>) {
|
||||||
// Bit of a hack to get around the fact that TS has no package-scope.
|
// Bit of a hack to get around the fact that TS has no package-scope.
|
||||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
// biome-ignore lint/suspicious/noExplicitAny: skip
|
||||||
const nativeIndex = (options?.config as any)?.inner;
|
const nativeIndex = (options?.config as any)?.inner;
|
||||||
await this.inner.createIndex(nativeIndex, column, options?.replace);
|
await this.inner.createIndex(nativeIndex, column, options?.replace);
|
||||||
}
|
}
|
||||||
@@ -197,8 +200,7 @@ export class Table {
|
|||||||
* vector similarity, sorting, and more.
|
* vector similarity, sorting, and more.
|
||||||
*
|
*
|
||||||
* Note: By default, all columns are returned. For best performance, you should
|
* Note: By default, all columns are returned. For best performance, you should
|
||||||
* only fetch the columns you need. See [`Query::select_with_projection`] for
|
* only fetch the columns you need.
|
||||||
* more details.
|
|
||||||
*
|
*
|
||||||
* When appropriate, various indices and statistics based pruning will be used to
|
* When appropriate, various indices and statistics based pruning will be used to
|
||||||
* accelerate the query.
|
* accelerate the query.
|
||||||
@@ -207,8 +209,11 @@ export class Table {
|
|||||||
* //
|
* //
|
||||||
* // This query will return up to 1000 rows whose value in the `id` column
|
* // This query will return up to 1000 rows whose value in the `id` column
|
||||||
* // is greater than 5. LanceDb supports a broad set of filtering functions.
|
* // is greater than 5. LanceDb supports a broad set of filtering functions.
|
||||||
* for await (const batch of table.query()
|
* for await (const batch of table
|
||||||
* .filter("id > 1").select(["id"]).limit(20)) {
|
* .query()
|
||||||
|
* .where("id > 1")
|
||||||
|
* .select(["id"])
|
||||||
|
* .limit(20)) {
|
||||||
* console.log(batch);
|
* console.log(batch);
|
||||||
* }
|
* }
|
||||||
* @example
|
* @example
|
||||||
@@ -218,12 +223,13 @@ export class Table {
|
|||||||
* // closest to the query vector [1.0, 2.0, 3.0]. If an index has been created
|
* // closest to the query vector [1.0, 2.0, 3.0]. If an index has been created
|
||||||
* // on the "vector" column then this will perform an ANN search.
|
* // on the "vector" column then this will perform an ANN search.
|
||||||
* //
|
* //
|
||||||
* // The `refine_factor` and `nprobes` methods are used to control the recall /
|
* // The `refineFactor` and `nprobes` methods are used to control the recall /
|
||||||
* // latency tradeoff of the search.
|
* // latency tradeoff of the search.
|
||||||
* for await (const batch of table.query()
|
* for await (const batch of table
|
||||||
* .nearestTo([1, 2, 3])
|
* .query()
|
||||||
* .refineFactor(5).nprobe(10)
|
* .where("id > 1")
|
||||||
* .limit(10)) {
|
* .select(["id"])
|
||||||
|
* .limit(20)) {
|
||||||
* console.log(batch);
|
* console.log(batch);
|
||||||
* }
|
* }
|
||||||
* @example
|
* @example
|
||||||
@@ -286,43 +292,45 @@ export class Table {
|
|||||||
await this.inner.dropColumns(columnNames);
|
await this.inner.dropColumns(columnNames);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/** Retrieve the version of the table */
|
||||||
* Retrieve the version of the table
|
|
||||||
*
|
|
||||||
* LanceDb supports versioning. Every operation that modifies the table increases
|
|
||||||
* version. As long as a version hasn't been deleted you can `[Self::checkout]` that
|
|
||||||
* version to view the data at that point. In addition, you can `[Self::restore]` the
|
|
||||||
* version to replace the current table with a previous version.
|
|
||||||
*/
|
|
||||||
async version(): Promise<number> {
|
async version(): Promise<number> {
|
||||||
return await this.inner.version();
|
return await this.inner.version();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Checks out a specific version of the Table
|
* Checks out a specific version of the table _This is an in-place operation._
|
||||||
*
|
*
|
||||||
* Any read operation on the table will now access the data at the checked out version.
|
* This allows viewing previous versions of the table. If you wish to
|
||||||
* As a consequence, calling this method will disable any read consistency interval
|
* keep writing to the dataset starting from an old version, then use
|
||||||
* that was previously set.
|
* the `restore` function.
|
||||||
*
|
*
|
||||||
* This is a read-only operation that turns the table into a sort of "view"
|
* Calling this method will set the table into time-travel mode. If you
|
||||||
* or "detached head". Other table instances will not be affected. To make the change
|
* wish to return to standard mode, call `checkoutLatest`.
|
||||||
* permanent you can use the `[Self::restore]` method.
|
* @param {number} version The version to checkout
|
||||||
|
* @example
|
||||||
|
* ```typescript
|
||||||
|
* import * as lancedb from "@lancedb/lancedb"
|
||||||
|
* const db = await lancedb.connect("./.lancedb");
|
||||||
|
* const table = await db.createTable("my_table", [
|
||||||
|
* { vector: [1.1, 0.9], type: "vector" },
|
||||||
|
* ]);
|
||||||
*
|
*
|
||||||
* Any operation that modifies the table will fail while the table is in a checked
|
* console.log(await table.version()); // 1
|
||||||
* out state.
|
* console.log(table.display());
|
||||||
*
|
* await table.add([{ vector: [0.5, 0.2], type: "vector" }]);
|
||||||
* To return the table to a normal state use `[Self::checkout_latest]`
|
* await table.checkout(1);
|
||||||
|
* console.log(await table.version()); // 2
|
||||||
|
* ```
|
||||||
*/
|
*/
|
||||||
async checkout(version: number): Promise<void> {
|
async checkout(version: number): Promise<void> {
|
||||||
await this.inner.checkout(version);
|
await this.inner.checkout(version);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Ensures the table is pointing at the latest version
|
* Checkout the latest version of the table. _This is an in-place operation._
|
||||||
*
|
*
|
||||||
* This can be used to manually update a table when the read_consistency_interval is None
|
* The table will be set back into standard mode, and will track the latest
|
||||||
* It can also be used to undo a `[Self::checkout]` operation
|
* version of the table.
|
||||||
*/
|
*/
|
||||||
async checkoutLatest(): Promise<void> {
|
async checkoutLatest(): Promise<void> {
|
||||||
await this.inner.checkoutLatest();
|
await this.inner.checkoutLatest();
|
||||||
@@ -344,9 +352,7 @@ export class Table {
|
|||||||
await this.inner.restore();
|
await this.inner.restore();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/** List all indices that have been created with {@link Table.createIndex} */
|
||||||
* List all indices that have been created with Self::create_index
|
|
||||||
*/
|
|
||||||
async listIndices(): Promise<IndexConfig[]> {
|
async listIndices(): Promise<IndexConfig[]> {
|
||||||
return await this.inner.listIndices();
|
return await this.inner.listIndices();
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,16 +1,10 @@
|
|||||||
{
|
{
|
||||||
"name": "@lancedb/lancedb-darwin-arm64",
|
"name": "@lancedb/lancedb-darwin-arm64",
|
||||||
"version": "0.4.17",
|
"version": "0.4.20",
|
||||||
"os": [
|
"os": ["darwin"],
|
||||||
"darwin"
|
"cpu": ["arm64"],
|
||||||
],
|
|
||||||
"cpu": [
|
|
||||||
"arm64"
|
|
||||||
],
|
|
||||||
"main": "lancedb.darwin-arm64.node",
|
"main": "lancedb.darwin-arm64.node",
|
||||||
"files": [
|
"files": ["lancedb.darwin-arm64.node"],
|
||||||
"lancedb.darwin-arm64.node"
|
|
||||||
],
|
|
||||||
"license": "Apache 2.0",
|
"license": "Apache 2.0",
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": ">= 18"
|
"node": ">= 18"
|
||||||
|
|||||||
@@ -1,16 +1,10 @@
|
|||||||
{
|
{
|
||||||
"name": "@lancedb/lancedb-darwin-x64",
|
"name": "@lancedb/lancedb-darwin-x64",
|
||||||
"version": "0.4.17",
|
"version": "0.4.20",
|
||||||
"os": [
|
"os": ["darwin"],
|
||||||
"darwin"
|
"cpu": ["x64"],
|
||||||
],
|
|
||||||
"cpu": [
|
|
||||||
"x64"
|
|
||||||
],
|
|
||||||
"main": "lancedb.darwin-x64.node",
|
"main": "lancedb.darwin-x64.node",
|
||||||
"files": [
|
"files": ["lancedb.darwin-x64.node"],
|
||||||
"lancedb.darwin-x64.node"
|
|
||||||
],
|
|
||||||
"license": "Apache 2.0",
|
"license": "Apache 2.0",
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": ">= 18"
|
"node": ">= 18"
|
||||||
|
|||||||
@@ -1,21 +1,13 @@
|
|||||||
{
|
{
|
||||||
"name": "@lancedb/lancedb-linux-arm64-gnu",
|
"name": "@lancedb/lancedb-linux-arm64-gnu",
|
||||||
"version": "0.4.17",
|
"version": "0.4.20",
|
||||||
"os": [
|
"os": ["linux"],
|
||||||
"linux"
|
"cpu": ["arm64"],
|
||||||
],
|
|
||||||
"cpu": [
|
|
||||||
"arm64"
|
|
||||||
],
|
|
||||||
"main": "lancedb.linux-arm64-gnu.node",
|
"main": "lancedb.linux-arm64-gnu.node",
|
||||||
"files": [
|
"files": ["lancedb.linux-arm64-gnu.node"],
|
||||||
"lancedb.linux-arm64-gnu.node"
|
|
||||||
],
|
|
||||||
"license": "Apache 2.0",
|
"license": "Apache 2.0",
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": ">= 18"
|
"node": ">= 18"
|
||||||
},
|
},
|
||||||
"libc": [
|
"libc": ["glibc"]
|
||||||
"glibc"
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,21 +1,13 @@
|
|||||||
{
|
{
|
||||||
"name": "@lancedb/lancedb-linux-x64-gnu",
|
"name": "@lancedb/lancedb-linux-x64-gnu",
|
||||||
"version": "0.4.17",
|
"version": "0.4.20",
|
||||||
"os": [
|
"os": ["linux"],
|
||||||
"linux"
|
"cpu": ["x64"],
|
||||||
],
|
|
||||||
"cpu": [
|
|
||||||
"x64"
|
|
||||||
],
|
|
||||||
"main": "lancedb.linux-x64-gnu.node",
|
"main": "lancedb.linux-x64-gnu.node",
|
||||||
"files": [
|
"files": ["lancedb.linux-x64-gnu.node"],
|
||||||
"lancedb.linux-x64-gnu.node"
|
|
||||||
],
|
|
||||||
"license": "Apache 2.0",
|
"license": "Apache 2.0",
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": ">= 18"
|
"node": ">= 18"
|
||||||
},
|
},
|
||||||
"libc": [
|
"libc": ["glibc"]
|
||||||
"glibc"
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,16 +1,10 @@
|
|||||||
{
|
{
|
||||||
"name": "@lancedb/lancedb-win32-x64-msvc",
|
"name": "@lancedb/lancedb-win32-x64-msvc",
|
||||||
"version": "0.4.14",
|
"version": "0.4.14",
|
||||||
"os": [
|
"os": ["win32"],
|
||||||
"win32"
|
"cpu": ["x64"],
|
||||||
],
|
|
||||||
"cpu": [
|
|
||||||
"x64"
|
|
||||||
],
|
|
||||||
"main": "lancedb.win32-x64-msvc.node",
|
"main": "lancedb.win32-x64-msvc.node",
|
||||||
"files": [
|
"files": ["lancedb.win32-x64-msvc.node"],
|
||||||
"lancedb.win32-x64-msvc.node"
|
|
||||||
],
|
|
||||||
"license": "Apache 2.0",
|
"license": "Apache 2.0",
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": ">= 18"
|
"node": ">= 18"
|
||||||
|
|||||||
243
nodejs/package-lock.json
generated
243
nodejs/package-lock.json
generated
@@ -1,12 +1,12 @@
|
|||||||
{
|
{
|
||||||
"name": "@lancedb/lancedb",
|
"name": "@lancedb/lancedb",
|
||||||
"version": "0.4.16",
|
"version": "0.4.20",
|
||||||
"lockfileVersion": 3,
|
"lockfileVersion": 3,
|
||||||
"requires": true,
|
"requires": true,
|
||||||
"packages": {
|
"packages": {
|
||||||
"": {
|
"": {
|
||||||
"name": "@lancedb/lancedb",
|
"name": "@lancedb/lancedb",
|
||||||
"version": "0.4.16",
|
"version": "0.4.20",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"x64",
|
"x64",
|
||||||
"arm64"
|
"arm64"
|
||||||
@@ -24,6 +24,8 @@
|
|||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@aws-sdk/client-kms": "^3.33.0",
|
"@aws-sdk/client-kms": "^3.33.0",
|
||||||
"@aws-sdk/client-s3": "^3.33.0",
|
"@aws-sdk/client-s3": "^3.33.0",
|
||||||
|
"@biomejs/biome": "^1.7.3",
|
||||||
|
"@jest/globals": "^29.7.0",
|
||||||
"@napi-rs/cli": "^2.18.0",
|
"@napi-rs/cli": "^2.18.0",
|
||||||
"@types/jest": "^29.1.2",
|
"@types/jest": "^29.1.2",
|
||||||
"@types/tmp": "^0.2.6",
|
"@types/tmp": "^0.2.6",
|
||||||
@@ -45,13 +47,6 @@
|
|||||||
},
|
},
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": ">= 18"
|
"node": ">= 18"
|
||||||
},
|
|
||||||
"optionalDependencies": {
|
|
||||||
"@lancedb/lancedb-darwin-arm64": "0.4.16",
|
|
||||||
"@lancedb/lancedb-darwin-x64": "0.4.16",
|
|
||||||
"@lancedb/lancedb-linux-arm64-gnu": "0.4.16",
|
|
||||||
"@lancedb/lancedb-linux-x64-gnu": "0.4.16",
|
|
||||||
"@lancedb/lancedb-win32-x64-msvc": "0.4.16"
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/@75lb/deep-merge": {
|
"node_modules/@75lb/deep-merge": {
|
||||||
@@ -1660,6 +1655,161 @@
|
|||||||
"integrity": "sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==",
|
"integrity": "sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==",
|
||||||
"dev": true
|
"dev": true
|
||||||
},
|
},
|
||||||
|
"node_modules/@biomejs/biome": {
|
||||||
|
"version": "1.7.3",
|
||||||
|
"resolved": "https://registry.npmjs.org/@biomejs/biome/-/biome-1.7.3.tgz",
|
||||||
|
"integrity": "sha512-ogFQI+fpXftr+tiahA6bIXwZ7CSikygASdqMtH07J2cUzrpjyTMVc9Y97v23c7/tL1xCZhM+W9k4hYIBm7Q6cQ==",
|
||||||
|
"dev": true,
|
||||||
|
"hasInstallScript": true,
|
||||||
|
"bin": {
|
||||||
|
"biome": "bin/biome"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">=14.21.3"
|
||||||
|
},
|
||||||
|
"funding": {
|
||||||
|
"type": "opencollective",
|
||||||
|
"url": "https://opencollective.com/biome"
|
||||||
|
},
|
||||||
|
"optionalDependencies": {
|
||||||
|
"@biomejs/cli-darwin-arm64": "1.7.3",
|
||||||
|
"@biomejs/cli-darwin-x64": "1.7.3",
|
||||||
|
"@biomejs/cli-linux-arm64": "1.7.3",
|
||||||
|
"@biomejs/cli-linux-arm64-musl": "1.7.3",
|
||||||
|
"@biomejs/cli-linux-x64": "1.7.3",
|
||||||
|
"@biomejs/cli-linux-x64-musl": "1.7.3",
|
||||||
|
"@biomejs/cli-win32-arm64": "1.7.3",
|
||||||
|
"@biomejs/cli-win32-x64": "1.7.3"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@biomejs/cli-darwin-arm64": {
|
||||||
|
"version": "1.7.3",
|
||||||
|
"resolved": "https://registry.npmjs.org/@biomejs/cli-darwin-arm64/-/cli-darwin-arm64-1.7.3.tgz",
|
||||||
|
"integrity": "sha512-eDvLQWmGRqrPIRY7AIrkPHkQ3visEItJKkPYSHCscSDdGvKzYjmBJwG1Gu8+QC5ed6R7eiU63LEC0APFBobmfQ==",
|
||||||
|
"cpu": [
|
||||||
|
"arm64"
|
||||||
|
],
|
||||||
|
"dev": true,
|
||||||
|
"optional": true,
|
||||||
|
"os": [
|
||||||
|
"darwin"
|
||||||
|
],
|
||||||
|
"engines": {
|
||||||
|
"node": ">=14.21.3"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@biomejs/cli-darwin-x64": {
|
||||||
|
"version": "1.7.3",
|
||||||
|
"resolved": "https://registry.npmjs.org/@biomejs/cli-darwin-x64/-/cli-darwin-x64-1.7.3.tgz",
|
||||||
|
"integrity": "sha512-JXCaIseKRER7dIURsVlAJacnm8SG5I0RpxZ4ya3dudASYUc68WGl4+FEN03ABY3KMIq7hcK1tzsJiWlmXyosZg==",
|
||||||
|
"cpu": [
|
||||||
|
"x64"
|
||||||
|
],
|
||||||
|
"dev": true,
|
||||||
|
"optional": true,
|
||||||
|
"os": [
|
||||||
|
"darwin"
|
||||||
|
],
|
||||||
|
"engines": {
|
||||||
|
"node": ">=14.21.3"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@biomejs/cli-linux-arm64": {
|
||||||
|
"version": "1.7.3",
|
||||||
|
"resolved": "https://registry.npmjs.org/@biomejs/cli-linux-arm64/-/cli-linux-arm64-1.7.3.tgz",
|
||||||
|
"integrity": "sha512-phNTBpo7joDFastnmZsFjYcDYobLTx4qR4oPvc9tJ486Bd1SfEVPHEvJdNJrMwUQK56T+TRClOQd/8X1nnjA9w==",
|
||||||
|
"cpu": [
|
||||||
|
"arm64"
|
||||||
|
],
|
||||||
|
"dev": true,
|
||||||
|
"optional": true,
|
||||||
|
"os": [
|
||||||
|
"linux"
|
||||||
|
],
|
||||||
|
"engines": {
|
||||||
|
"node": ">=14.21.3"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@biomejs/cli-linux-arm64-musl": {
|
||||||
|
"version": "1.7.3",
|
||||||
|
"resolved": "https://registry.npmjs.org/@biomejs/cli-linux-arm64-musl/-/cli-linux-arm64-musl-1.7.3.tgz",
|
||||||
|
"integrity": "sha512-c8AlO45PNFZ1BYcwaKzdt46kYbuP6xPGuGQ6h4j3XiEDpyseRRUy/h+6gxj07XovmyxKnSX9GSZ6nVbZvcVUAw==",
|
||||||
|
"cpu": [
|
||||||
|
"arm64"
|
||||||
|
],
|
||||||
|
"dev": true,
|
||||||
|
"optional": true,
|
||||||
|
"os": [
|
||||||
|
"linux"
|
||||||
|
],
|
||||||
|
"engines": {
|
||||||
|
"node": ">=14.21.3"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@biomejs/cli-linux-x64": {
|
||||||
|
"version": "1.7.3",
|
||||||
|
"resolved": "https://registry.npmjs.org/@biomejs/cli-linux-x64/-/cli-linux-x64-1.7.3.tgz",
|
||||||
|
"integrity": "sha512-vnedYcd5p4keT3iD48oSKjOIRPYcjSNNbd8MO1bKo9ajg3GwQXZLAH+0Cvlr+eMsO67/HddWmscSQwTFrC/uPA==",
|
||||||
|
"cpu": [
|
||||||
|
"x64"
|
||||||
|
],
|
||||||
|
"dev": true,
|
||||||
|
"optional": true,
|
||||||
|
"os": [
|
||||||
|
"linux"
|
||||||
|
],
|
||||||
|
"engines": {
|
||||||
|
"node": ">=14.21.3"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@biomejs/cli-linux-x64-musl": {
|
||||||
|
"version": "1.7.3",
|
||||||
|
"resolved": "https://registry.npmjs.org/@biomejs/cli-linux-x64-musl/-/cli-linux-x64-musl-1.7.3.tgz",
|
||||||
|
"integrity": "sha512-UdEHKtYGWEX3eDmVWvQeT+z05T9/Sdt2+F/7zmMOFQ7boANeX8pcO6EkJPK3wxMudrApsNEKT26rzqK6sZRTRA==",
|
||||||
|
"cpu": [
|
||||||
|
"x64"
|
||||||
|
],
|
||||||
|
"dev": true,
|
||||||
|
"optional": true,
|
||||||
|
"os": [
|
||||||
|
"linux"
|
||||||
|
],
|
||||||
|
"engines": {
|
||||||
|
"node": ">=14.21.3"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@biomejs/cli-win32-arm64": {
|
||||||
|
"version": "1.7.3",
|
||||||
|
"resolved": "https://registry.npmjs.org/@biomejs/cli-win32-arm64/-/cli-win32-arm64-1.7.3.tgz",
|
||||||
|
"integrity": "sha512-unNCDqUKjujYkkSxs7gFIfdasttbDC4+z0kYmcqzRk6yWVoQBL4dNLcCbdnJS+qvVDNdI9rHp2NwpQ0WAdla4Q==",
|
||||||
|
"cpu": [
|
||||||
|
"arm64"
|
||||||
|
],
|
||||||
|
"dev": true,
|
||||||
|
"optional": true,
|
||||||
|
"os": [
|
||||||
|
"win32"
|
||||||
|
],
|
||||||
|
"engines": {
|
||||||
|
"node": ">=14.21.3"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@biomejs/cli-win32-x64": {
|
||||||
|
"version": "1.7.3",
|
||||||
|
"resolved": "https://registry.npmjs.org/@biomejs/cli-win32-x64/-/cli-win32-x64-1.7.3.tgz",
|
||||||
|
"integrity": "sha512-ZmByhbrnmz/UUFYB622CECwhKIPjJLLPr5zr3edhu04LzbfcOrz16VYeNq5dpO1ADG70FORhAJkaIGdaVBG00w==",
|
||||||
|
"cpu": [
|
||||||
|
"x64"
|
||||||
|
],
|
||||||
|
"dev": true,
|
||||||
|
"optional": true,
|
||||||
|
"os": [
|
||||||
|
"win32"
|
||||||
|
],
|
||||||
|
"engines": {
|
||||||
|
"node": ">=14.21.3"
|
||||||
|
}
|
||||||
|
},
|
||||||
"node_modules/@es-joy/jsdoccomment": {
|
"node_modules/@es-joy/jsdoccomment": {
|
||||||
"version": "0.42.0",
|
"version": "0.42.0",
|
||||||
"resolved": "https://registry.npmjs.org/@es-joy/jsdoccomment/-/jsdoccomment-0.42.0.tgz",
|
"resolved": "https://registry.npmjs.org/@es-joy/jsdoccomment/-/jsdoccomment-0.42.0.tgz",
|
||||||
@@ -2221,81 +2371,6 @@
|
|||||||
"@jridgewell/sourcemap-codec": "^1.4.14"
|
"@jridgewell/sourcemap-codec": "^1.4.14"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/@lancedb/lancedb-darwin-arm64": {
|
|
||||||
"version": "0.4.16",
|
|
||||||
"resolved": "https://registry.npmjs.org/@lancedb/lancedb-darwin-arm64/-/lancedb-darwin-arm64-0.4.16.tgz",
|
|
||||||
"integrity": "sha512-CV65ouIDQbBSNtdHbQSr2fqXflOuqud1cfweUS+EiK7eEOEYl7nO2oiFYO49Jy76MEwZxiP99hW825aCqIQJqg==",
|
|
||||||
"cpu": [
|
|
||||||
"arm64"
|
|
||||||
],
|
|
||||||
"optional": true,
|
|
||||||
"os": [
|
|
||||||
"darwin"
|
|
||||||
],
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 18"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/@lancedb/lancedb-darwin-x64": {
|
|
||||||
"version": "0.4.16",
|
|
||||||
"resolved": "https://registry.npmjs.org/@lancedb/lancedb-darwin-x64/-/lancedb-darwin-x64-0.4.16.tgz",
|
|
||||||
"integrity": "sha512-1CwIYCNdbFmV7fvqM+qUxbYgwxx0slcCV48PC/I19Ejitgtzw/NJiWDCvONhaLqG85lWNZm1xYceRpVv7b8seQ==",
|
|
||||||
"cpu": [
|
|
||||||
"x64"
|
|
||||||
],
|
|
||||||
"optional": true,
|
|
||||||
"os": [
|
|
||||||
"darwin"
|
|
||||||
],
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 18"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/@lancedb/lancedb-linux-arm64-gnu": {
|
|
||||||
"version": "0.4.16",
|
|
||||||
"resolved": "https://registry.npmjs.org/@lancedb/lancedb-linux-arm64-gnu/-/lancedb-linux-arm64-gnu-0.4.16.tgz",
|
|
||||||
"integrity": "sha512-CzLEbzoHKS6jV0k52YnvsiVNx0VzLp1Vz/zmbHI6HmB/XbS67qDO93Jk71MDmXq3JDw0FKFCw9ghkg+6YWq7ZA==",
|
|
||||||
"cpu": [
|
|
||||||
"arm64"
|
|
||||||
],
|
|
||||||
"optional": true,
|
|
||||||
"os": [
|
|
||||||
"linux"
|
|
||||||
],
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 18"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/@lancedb/lancedb-linux-x64-gnu": {
|
|
||||||
"version": "0.4.16",
|
|
||||||
"resolved": "https://registry.npmjs.org/@lancedb/lancedb-linux-x64-gnu/-/lancedb-linux-x64-gnu-0.4.16.tgz",
|
|
||||||
"integrity": "sha512-nKChybybi8uA0AFRHBFm7Fz3VXcRm8riv5Gs7xQsrsCtYxxf4DT/0BfUvQ0xKbwNJa+fawHRxi9BOQewdj49fg==",
|
|
||||||
"cpu": [
|
|
||||||
"x64"
|
|
||||||
],
|
|
||||||
"optional": true,
|
|
||||||
"os": [
|
|
||||||
"linux"
|
|
||||||
],
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 18"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/@lancedb/lancedb-win32-x64-msvc": {
|
|
||||||
"version": "0.4.16",
|
|
||||||
"resolved": "https://registry.npmjs.org/@lancedb/lancedb-win32-x64-msvc/-/lancedb-win32-x64-msvc-0.4.16.tgz",
|
|
||||||
"integrity": "sha512-KMeBPMpv2g+ZMVsHVibed7BydrBlxje1qS0bZTDrLw9BtZOk6XH2lh1mCDnCJI6sbAscUKNA6fDCdquhQPHL7w==",
|
|
||||||
"cpu": [
|
|
||||||
"x64"
|
|
||||||
],
|
|
||||||
"optional": true,
|
|
||||||
"os": [
|
|
||||||
"win32"
|
|
||||||
],
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 18"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/@napi-rs/cli": {
|
"node_modules/@napi-rs/cli": {
|
||||||
"version": "2.18.0",
|
"version": "2.18.0",
|
||||||
"resolved": "https://registry.npmjs.org/@napi-rs/cli/-/cli-2.18.0.tgz",
|
"resolved": "https://registry.npmjs.org/@napi-rs/cli/-/cli-2.18.0.tgz",
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "@lancedb/lancedb",
|
"name": "@lancedb/lancedb",
|
||||||
"version": "0.4.17",
|
"version": "0.4.20",
|
||||||
"main": "./dist/index.js",
|
"main": "./dist/index.js",
|
||||||
"types": "./dist/index.d.ts",
|
"types": "./dist/index.d.ts",
|
||||||
"napi": {
|
"napi": {
|
||||||
@@ -18,19 +18,16 @@
|
|||||||
},
|
},
|
||||||
"license": "Apache 2.0",
|
"license": "Apache 2.0",
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@aws-sdk/client-s3": "^3.33.0",
|
|
||||||
"@aws-sdk/client-kms": "^3.33.0",
|
"@aws-sdk/client-kms": "^3.33.0",
|
||||||
|
"@aws-sdk/client-s3": "^3.33.0",
|
||||||
|
"@biomejs/biome": "^1.7.3",
|
||||||
|
"@jest/globals": "^29.7.0",
|
||||||
"@napi-rs/cli": "^2.18.0",
|
"@napi-rs/cli": "^2.18.0",
|
||||||
"@types/jest": "^29.1.2",
|
"@types/jest": "^29.1.2",
|
||||||
"@types/tmp": "^0.2.6",
|
"@types/tmp": "^0.2.6",
|
||||||
"@typescript-eslint/eslint-plugin": "^6.19.0",
|
|
||||||
"@typescript-eslint/parser": "^6.19.0",
|
|
||||||
"apache-arrow-old": "npm:apache-arrow@13.0.0",
|
"apache-arrow-old": "npm:apache-arrow@13.0.0",
|
||||||
"eslint": "^8.57.0",
|
"eslint": "^8.57.0",
|
||||||
"eslint-config-prettier": "^9.1.0",
|
|
||||||
"eslint-plugin-jsdoc": "^48.2.1",
|
|
||||||
"jest": "^29.7.0",
|
"jest": "^29.7.0",
|
||||||
"prettier": "^3.1.0",
|
|
||||||
"shx": "^0.3.4",
|
"shx": "^0.3.4",
|
||||||
"tmp": "^0.2.3",
|
"tmp": "^0.2.3",
|
||||||
"ts-jest": "^29.1.2",
|
"ts-jest": "^29.1.2",
|
||||||
@@ -45,39 +42,26 @@
|
|||||||
"engines": {
|
"engines": {
|
||||||
"node": ">= 18"
|
"node": ">= 18"
|
||||||
},
|
},
|
||||||
"cpu": [
|
"cpu": ["x64", "arm64"],
|
||||||
"x64",
|
"os": ["darwin", "linux", "win32"],
|
||||||
"arm64"
|
|
||||||
],
|
|
||||||
"os": [
|
|
||||||
"darwin",
|
|
||||||
"linux",
|
|
||||||
"win32"
|
|
||||||
],
|
|
||||||
"scripts": {
|
"scripts": {
|
||||||
"artifacts": "napi artifacts",
|
"artifacts": "napi artifacts",
|
||||||
"build:debug": "napi build --platform --dts ../lancedb/native.d.ts --js ../lancedb/native.js dist/",
|
"build:debug": "napi build --platform --dts ../lancedb/native.d.ts --js ../lancedb/native.js lancedb",
|
||||||
"build:release": "napi build --platform --release --dts ../lancedb/native.d.ts --js ../lancedb/native.js dist/",
|
"build:release": "napi build --platform --release --dts ../lancedb/native.d.ts --js ../lancedb/native.js dist/",
|
||||||
"build": "npm run build:debug && tsc -b && shx cp lancedb/native.d.ts dist/native.d.ts",
|
"build": "npm run build:debug && tsc -b && shx cp lancedb/native.d.ts dist/native.d.ts && shx cp lancedb/*.node dist/",
|
||||||
"build-release": "npm run build:release && tsc -b && shx cp lancedb/native.d.ts dist/native.d.ts",
|
"build-release": "npm run build:release && tsc -b && shx cp lancedb/native.d.ts dist/native.d.ts",
|
||||||
"chkformat": "prettier . --check",
|
"lint-ci": "biome ci .",
|
||||||
"docs": "typedoc --plugin typedoc-plugin-markdown --out ../docs/src/js lancedb/index.ts",
|
"docs": "typedoc --plugin typedoc-plugin-markdown --out ../docs/src/js lancedb/index.ts",
|
||||||
"lint": "eslint lancedb && eslint __test__",
|
"lint": "biome check . && biome format .",
|
||||||
|
"lint-fix": "biome check --apply-unsafe . && biome format --write .",
|
||||||
"prepublishOnly": "napi prepublish -t npm",
|
"prepublishOnly": "napi prepublish -t npm",
|
||||||
"test": "npm run build && jest --verbose",
|
"test": "jest --verbose",
|
||||||
"integration": "S3_TEST=1 npm run test",
|
"integration": "S3_TEST=1 npm run test",
|
||||||
"universal": "napi universal",
|
"universal": "napi universal",
|
||||||
"version": "napi version"
|
"version": "napi version"
|
||||||
},
|
},
|
||||||
"optionalDependencies": {
|
|
||||||
"@lancedb/lancedb-darwin-arm64": "0.4.17",
|
|
||||||
"@lancedb/lancedb-darwin-x64": "0.4.17",
|
|
||||||
"@lancedb/lancedb-linux-arm64-gnu": "0.4.17",
|
|
||||||
"@lancedb/lancedb-linux-x64-gnu": "0.4.17",
|
|
||||||
"@lancedb/lancedb-win32-x64-msvc": "0.4.17"
|
|
||||||
},
|
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"openai": "^4.29.2",
|
"apache-arrow": "^15.0.0",
|
||||||
"apache-arrow": "^15.0.0"
|
"openai": "^4.29.2"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -176,6 +176,7 @@ impl Connection {
|
|||||||
&self,
|
&self,
|
||||||
name: String,
|
name: String,
|
||||||
storage_options: Option<HashMap<String, String>>,
|
storage_options: Option<HashMap<String, String>>,
|
||||||
|
index_cache_size: Option<u32>,
|
||||||
) -> napi::Result<Table> {
|
) -> napi::Result<Table> {
|
||||||
let mut builder = self.get_inner()?.open_table(&name);
|
let mut builder = self.get_inner()?.open_table(&name);
|
||||||
if let Some(storage_options) = storage_options {
|
if let Some(storage_options) = storage_options {
|
||||||
@@ -183,6 +184,9 @@ impl Connection {
|
|||||||
builder = builder.storage_option(key, value);
|
builder = builder.storage_option(key, value);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if let Some(index_cache_size) = index_cache_size {
|
||||||
|
builder = builder.index_cache_size(index_cache_size);
|
||||||
|
}
|
||||||
let tbl = builder
|
let tbl = builder
|
||||||
.execute()
|
.execute()
|
||||||
.await
|
.await
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
[bumpversion]
|
[bumpversion]
|
||||||
current_version = 0.6.9
|
current_version = 0.6.13
|
||||||
commit = True
|
commit = True
|
||||||
message = [python] Bump version: {current_version} → {new_version}
|
message = [python] Bump version: {current_version} → {new_version}
|
||||||
tag = True
|
tag = True
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ name = "_lancedb"
|
|||||||
crate-type = ["cdylib"]
|
crate-type = ["cdylib"]
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
arrow = { version = "50.0.0", features = ["pyarrow"] }
|
arrow = { version = "51.0.0", features = ["pyarrow"] }
|
||||||
lancedb = { path = "../rust/lancedb" }
|
lancedb = { path = "../rust/lancedb" }
|
||||||
env_logger = "0.10"
|
env_logger = "0.10"
|
||||||
pyo3 = { version = "0.20", features = ["extension-module", "abi3-py38"] }
|
pyo3 = { version = "0.20", features = ["extension-module", "abi3-py38"] }
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[project]
|
[project]
|
||||||
name = "lancedb"
|
name = "lancedb"
|
||||||
version = "0.6.9"
|
version = "0.6.13"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"deprecation",
|
"deprecation",
|
||||||
"pylance==0.10.12",
|
"pylance==0.10.12",
|
||||||
@@ -10,7 +10,7 @@ dependencies = [
|
|||||||
"tqdm>=4.27.0",
|
"tqdm>=4.27.0",
|
||||||
"pydantic>=1.10",
|
"pydantic>=1.10",
|
||||||
"attrs>=21.3.0",
|
"attrs>=21.3.0",
|
||||||
"semver>=3.0",
|
"semver",
|
||||||
"cachetools",
|
"cachetools",
|
||||||
"overrides>=0.7",
|
"overrides>=0.7",
|
||||||
]
|
]
|
||||||
@@ -80,6 +80,7 @@ embeddings = [
|
|||||||
"boto3>=1.28.57",
|
"boto3>=1.28.57",
|
||||||
"awscli>=1.29.57",
|
"awscli>=1.29.57",
|
||||||
"botocore>=1.31.57",
|
"botocore>=1.31.57",
|
||||||
|
"ollama",
|
||||||
]
|
]
|
||||||
azure = ["adlfs>=2024.2.0"]
|
azure = ["adlfs>=2024.2.0"]
|
||||||
|
|
||||||
|
|||||||
@@ -107,6 +107,9 @@ def connect(
|
|||||||
request_thread_pool=request_thread_pool,
|
request_thread_pool=request_thread_pool,
|
||||||
**kwargs,
|
**kwargs,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if kwargs:
|
||||||
|
raise ValueError(f"Unknown keyword arguments: {kwargs}")
|
||||||
return LanceDBConnection(uri, read_consistency_interval=read_consistency_interval)
|
return LanceDBConnection(uri, read_consistency_interval=read_consistency_interval)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -224,13 +224,23 @@ class DBConnection(EnforceOverrides):
|
|||||||
def __getitem__(self, name: str) -> LanceTable:
|
def __getitem__(self, name: str) -> LanceTable:
|
||||||
return self.open_table(name)
|
return self.open_table(name)
|
||||||
|
|
||||||
def open_table(self, name: str) -> Table:
|
def open_table(self, name: str, *, index_cache_size: Optional[int] = None) -> Table:
|
||||||
"""Open a Lance Table in the database.
|
"""Open a Lance Table in the database.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
name: str
|
name: str
|
||||||
The name of the table.
|
The name of the table.
|
||||||
|
index_cache_size: int, default 256
|
||||||
|
Set the size of the index cache, specified as a number of entries
|
||||||
|
|
||||||
|
The exact meaning of an "entry" will depend on the type of index:
|
||||||
|
* IVF - there is one entry for each IVF partition
|
||||||
|
* BTREE - there is one entry for the entire index
|
||||||
|
|
||||||
|
This cache applies to the entire opened table, across all indices.
|
||||||
|
Setting this value higher will increase performance on larger datasets
|
||||||
|
at the expense of more RAM
|
||||||
|
|
||||||
Returns
|
Returns
|
||||||
-------
|
-------
|
||||||
@@ -248,6 +258,18 @@ class DBConnection(EnforceOverrides):
|
|||||||
"""
|
"""
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
|
def rename_table(self, cur_name: str, new_name: str):
|
||||||
|
"""Rename a table in the database.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
cur_name: str
|
||||||
|
The current name of the table.
|
||||||
|
new_name: str
|
||||||
|
The new name of the table.
|
||||||
|
"""
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
def drop_database(self):
|
def drop_database(self):
|
||||||
"""
|
"""
|
||||||
Drop database
|
Drop database
|
||||||
@@ -407,7 +429,9 @@ class LanceDBConnection(DBConnection):
|
|||||||
return tbl
|
return tbl
|
||||||
|
|
||||||
@override
|
@override
|
||||||
def open_table(self, name: str) -> LanceTable:
|
def open_table(
|
||||||
|
self, name: str, *, index_cache_size: Optional[int] = None
|
||||||
|
) -> LanceTable:
|
||||||
"""Open a table in the database.
|
"""Open a table in the database.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
@@ -419,7 +443,7 @@ class LanceDBConnection(DBConnection):
|
|||||||
-------
|
-------
|
||||||
A LanceTable object representing the table.
|
A LanceTable object representing the table.
|
||||||
"""
|
"""
|
||||||
return LanceTable.open(self, name)
|
return LanceTable.open(self, name, index_cache_size=index_cache_size)
|
||||||
|
|
||||||
@override
|
@override
|
||||||
def drop_table(self, name: str, ignore_missing: bool = False):
|
def drop_table(self, name: str, ignore_missing: bool = False):
|
||||||
@@ -751,7 +775,10 @@ class AsyncConnection(object):
|
|||||||
return AsyncTable(new_table)
|
return AsyncTable(new_table)
|
||||||
|
|
||||||
async def open_table(
|
async def open_table(
|
||||||
self, name: str, storage_options: Optional[Dict[str, str]] = None
|
self,
|
||||||
|
name: str,
|
||||||
|
storage_options: Optional[Dict[str, str]] = None,
|
||||||
|
index_cache_size: Optional[int] = None,
|
||||||
) -> Table:
|
) -> Table:
|
||||||
"""Open a Lance Table in the database.
|
"""Open a Lance Table in the database.
|
||||||
|
|
||||||
@@ -764,12 +791,22 @@ class AsyncConnection(object):
|
|||||||
connection will be inherited by the table, but can be overridden here.
|
connection will be inherited by the table, but can be overridden here.
|
||||||
See available options at
|
See available options at
|
||||||
https://lancedb.github.io/lancedb/guides/storage/
|
https://lancedb.github.io/lancedb/guides/storage/
|
||||||
|
index_cache_size: int, default 256
|
||||||
|
Set the size of the index cache, specified as a number of entries
|
||||||
|
|
||||||
|
The exact meaning of an "entry" will depend on the type of index:
|
||||||
|
* IVF - there is one entry for each IVF partition
|
||||||
|
* BTREE - there is one entry for the entire index
|
||||||
|
|
||||||
|
This cache applies to the entire opened table, across all indices.
|
||||||
|
Setting this value higher will increase performance on larger datasets
|
||||||
|
at the expense of more RAM
|
||||||
|
|
||||||
Returns
|
Returns
|
||||||
-------
|
-------
|
||||||
A LanceTable object representing the table.
|
A LanceTable object representing the table.
|
||||||
"""
|
"""
|
||||||
table = await self._inner.open_table(name, storage_options)
|
table = await self._inner.open_table(name, storage_options, index_cache_size)
|
||||||
return AsyncTable(table)
|
return AsyncTable(table)
|
||||||
|
|
||||||
async def drop_table(self, name: str):
|
async def drop_table(self, name: str):
|
||||||
|
|||||||
@@ -16,6 +16,7 @@ from .bedrock import BedRockText
|
|||||||
from .cohere import CohereEmbeddingFunction
|
from .cohere import CohereEmbeddingFunction
|
||||||
from .gemini_text import GeminiText
|
from .gemini_text import GeminiText
|
||||||
from .instructor import InstructorEmbeddingFunction
|
from .instructor import InstructorEmbeddingFunction
|
||||||
|
from .ollama import OllamaEmbeddings
|
||||||
from .open_clip import OpenClipEmbeddings
|
from .open_clip import OpenClipEmbeddings
|
||||||
from .openai import OpenAIEmbeddings
|
from .openai import OpenAIEmbeddings
|
||||||
from .registry import EmbeddingFunctionRegistry, get_registry
|
from .registry import EmbeddingFunctionRegistry, get_registry
|
||||||
|
|||||||
69
python/python/lancedb/embeddings/ollama.py
Normal file
69
python/python/lancedb/embeddings/ollama.py
Normal file
@@ -0,0 +1,69 @@
|
|||||||
|
# Copyright (c) 2023. LanceDB Developers
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
from functools import cached_property
|
||||||
|
from typing import TYPE_CHECKING, List, Optional, Union
|
||||||
|
|
||||||
|
from ..util import attempt_import_or_raise
|
||||||
|
from .base import TextEmbeddingFunction
|
||||||
|
from .registry import register
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
|
||||||
|
@register("ollama")
|
||||||
|
class OllamaEmbeddings(TextEmbeddingFunction):
|
||||||
|
"""
|
||||||
|
An embedding function that uses Ollama
|
||||||
|
|
||||||
|
https://github.com/ollama/ollama/blob/main/docs/api.md#generate-embeddings
|
||||||
|
https://ollama.com/blog/embedding-models
|
||||||
|
"""
|
||||||
|
|
||||||
|
name: str = "nomic-embed-text"
|
||||||
|
host: str = "http://localhost:11434"
|
||||||
|
options: Optional[dict] = None # type = ollama.Options
|
||||||
|
keep_alive: Optional[Union[float, str]] = None
|
||||||
|
ollama_client_kwargs: Optional[dict] = {}
|
||||||
|
|
||||||
|
def ndims(self):
|
||||||
|
return len(self.generate_embeddings(["foo"])[0])
|
||||||
|
|
||||||
|
def _compute_embedding(self, text):
|
||||||
|
return self._ollama_client.embeddings(
|
||||||
|
model=self.name,
|
||||||
|
prompt=text,
|
||||||
|
options=self.options,
|
||||||
|
keep_alive=self.keep_alive,
|
||||||
|
)["embedding"]
|
||||||
|
|
||||||
|
def generate_embeddings(
|
||||||
|
self, texts: Union[List[str], "np.ndarray"]
|
||||||
|
) -> List["np.array"]:
|
||||||
|
"""
|
||||||
|
Get the embeddings for the given texts
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
texts: list[str] or np.ndarray (of str)
|
||||||
|
The texts to embed
|
||||||
|
"""
|
||||||
|
# TODO retry, rate limit, token limit
|
||||||
|
embeddings = [self._compute_embedding(text) for text in texts]
|
||||||
|
return embeddings
|
||||||
|
|
||||||
|
@cached_property
|
||||||
|
def _ollama_client(self):
|
||||||
|
ollama = attempt_import_or_raise("ollama")
|
||||||
|
# ToDo explore ollama.AsyncClient
|
||||||
|
return ollama.Client(host=self.host, **self.ollama_client_kwargs)
|
||||||
@@ -255,7 +255,13 @@ def retry_with_exponential_backoff(
|
|||||||
)
|
)
|
||||||
|
|
||||||
delay *= exponential_base * (1 + jitter * random.random())
|
delay *= exponential_base * (1 + jitter * random.random())
|
||||||
logging.info("Retrying in %s seconds...", delay)
|
logging.warning(
|
||||||
|
"Error occurred: %s \n Retrying in %s seconds (retry %s of %s) \n",
|
||||||
|
e,
|
||||||
|
delay,
|
||||||
|
num_retries,
|
||||||
|
max_retries,
|
||||||
|
)
|
||||||
time.sleep(delay)
|
time.sleep(delay)
|
||||||
|
|
||||||
return wrapper
|
return wrapper
|
||||||
|
|||||||
@@ -37,7 +37,7 @@ import pyarrow as pa
|
|||||||
import pydantic
|
import pydantic
|
||||||
import semver
|
import semver
|
||||||
|
|
||||||
PYDANTIC_VERSION = semver.Version.parse(pydantic.__version__)
|
PYDANTIC_VERSION = semver.parse_version_info(pydantic.__version__)
|
||||||
try:
|
try:
|
||||||
from pydantic_core import CoreSchema, core_schema
|
from pydantic_core import CoreSchema, core_schema
|
||||||
except ImportError:
|
except ImportError:
|
||||||
|
|||||||
@@ -30,6 +30,7 @@ from typing import (
|
|||||||
import deprecation
|
import deprecation
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import pyarrow as pa
|
import pyarrow as pa
|
||||||
|
import pyarrow.fs as pa_fs
|
||||||
import pydantic
|
import pydantic
|
||||||
|
|
||||||
from . import __version__
|
from . import __version__
|
||||||
@@ -37,7 +38,7 @@ from .arrow import AsyncRecordBatchReader
|
|||||||
from .common import VEC
|
from .common import VEC
|
||||||
from .rerankers.base import Reranker
|
from .rerankers.base import Reranker
|
||||||
from .rerankers.linear_combination import LinearCombinationReranker
|
from .rerankers.linear_combination import LinearCombinationReranker
|
||||||
from .util import safe_import_pandas
|
from .util import fs_from_uri, safe_import_pandas
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
import PIL
|
import PIL
|
||||||
@@ -665,6 +666,14 @@ class LanceFtsQueryBuilder(LanceQueryBuilder):
|
|||||||
|
|
||||||
# get the index path
|
# get the index path
|
||||||
index_path = self._table._get_fts_index_path()
|
index_path = self._table._get_fts_index_path()
|
||||||
|
|
||||||
|
# Check that we are on local filesystem
|
||||||
|
fs, _path = fs_from_uri(index_path)
|
||||||
|
if not isinstance(fs, pa_fs.LocalFileSystem):
|
||||||
|
raise NotImplementedError(
|
||||||
|
"Full-text search is only supported on the local filesystem"
|
||||||
|
)
|
||||||
|
|
||||||
# check if the index exist
|
# check if the index exist
|
||||||
if not Path(index_path).exists():
|
if not Path(index_path).exists():
|
||||||
raise FileNotFoundError(
|
raise FileNotFoundError(
|
||||||
|
|||||||
@@ -94,7 +94,7 @@ class RemoteDBConnection(DBConnection):
|
|||||||
yield item
|
yield item
|
||||||
|
|
||||||
@override
|
@override
|
||||||
def open_table(self, name: str) -> Table:
|
def open_table(self, name: str, *, index_cache_size: Optional[int] = None) -> Table:
|
||||||
"""Open a Lance Table in the database.
|
"""Open a Lance Table in the database.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
@@ -110,6 +110,12 @@ class RemoteDBConnection(DBConnection):
|
|||||||
|
|
||||||
self._client.mount_retry_adapter_for_table(name)
|
self._client.mount_retry_adapter_for_table(name)
|
||||||
|
|
||||||
|
if index_cache_size is not None:
|
||||||
|
logging.info(
|
||||||
|
"index_cache_size is ignored in LanceDb Cloud"
|
||||||
|
" (there is no local cache to configure)"
|
||||||
|
)
|
||||||
|
|
||||||
# check if table exists
|
# check if table exists
|
||||||
if self._table_cache.get(name) is None:
|
if self._table_cache.get(name) is None:
|
||||||
self._client.post(f"/v1/table/{name}/describe/")
|
self._client.post(f"/v1/table/{name}/describe/")
|
||||||
@@ -279,7 +285,25 @@ class RemoteDBConnection(DBConnection):
|
|||||||
self._client.post(
|
self._client.post(
|
||||||
f"/v1/table/{name}/drop/",
|
f"/v1/table/{name}/drop/",
|
||||||
)
|
)
|
||||||
self._table_cache.pop(name)
|
self._table_cache.pop(name, default=None)
|
||||||
|
|
||||||
|
@override
|
||||||
|
def rename_table(self, cur_name: str, new_name: str):
|
||||||
|
"""Rename a table in the database.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
cur_name: str
|
||||||
|
The current name of the table.
|
||||||
|
new_name: str
|
||||||
|
The new name of the table.
|
||||||
|
"""
|
||||||
|
self._client.post(
|
||||||
|
f"/v1/table/{cur_name}/rename/",
|
||||||
|
data={"new_table_name": new_name},
|
||||||
|
)
|
||||||
|
self._table_cache.pop(cur_name, default=None)
|
||||||
|
self._table_cache[new_name] = True
|
||||||
|
|
||||||
async def close(self):
|
async def close(self):
|
||||||
"""Close the connection to the database."""
|
"""Close the connection to the database."""
|
||||||
|
|||||||
@@ -72,7 +72,7 @@ class RemoteTable(Table):
|
|||||||
return resp
|
return resp
|
||||||
|
|
||||||
def index_stats(self, index_uuid: str):
|
def index_stats(self, index_uuid: str):
|
||||||
"""List all the indices on the table"""
|
"""List all the stats of a specified index"""
|
||||||
resp = self._conn._client.post(
|
resp = self._conn._client.post(
|
||||||
f"/v1/table/{self._name}/index/{index_uuid}/stats/"
|
f"/v1/table/{self._name}/index/{index_uuid}/stats/"
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -806,6 +806,7 @@ class _LanceLatestDatasetRef(_LanceDatasetRef):
|
|||||||
"""Reference to the latest version of a LanceDataset."""
|
"""Reference to the latest version of a LanceDataset."""
|
||||||
|
|
||||||
uri: str
|
uri: str
|
||||||
|
index_cache_size: Optional[int] = None
|
||||||
read_consistency_interval: Optional[timedelta] = None
|
read_consistency_interval: Optional[timedelta] = None
|
||||||
last_consistency_check: Optional[float] = None
|
last_consistency_check: Optional[float] = None
|
||||||
_dataset: Optional[LanceDataset] = None
|
_dataset: Optional[LanceDataset] = None
|
||||||
@@ -813,7 +814,9 @@ class _LanceLatestDatasetRef(_LanceDatasetRef):
|
|||||||
@property
|
@property
|
||||||
def dataset(self) -> LanceDataset:
|
def dataset(self) -> LanceDataset:
|
||||||
if not self._dataset:
|
if not self._dataset:
|
||||||
self._dataset = lance.dataset(self.uri)
|
self._dataset = lance.dataset(
|
||||||
|
self.uri, index_cache_size=self.index_cache_size
|
||||||
|
)
|
||||||
self.last_consistency_check = time.monotonic()
|
self.last_consistency_check = time.monotonic()
|
||||||
elif self.read_consistency_interval is not None:
|
elif self.read_consistency_interval is not None:
|
||||||
now = time.monotonic()
|
now = time.monotonic()
|
||||||
@@ -842,12 +845,15 @@ class _LanceLatestDatasetRef(_LanceDatasetRef):
|
|||||||
class _LanceTimeTravelRef(_LanceDatasetRef):
|
class _LanceTimeTravelRef(_LanceDatasetRef):
|
||||||
uri: str
|
uri: str
|
||||||
version: int
|
version: int
|
||||||
|
index_cache_size: Optional[int] = None
|
||||||
_dataset: Optional[LanceDataset] = None
|
_dataset: Optional[LanceDataset] = None
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def dataset(self) -> LanceDataset:
|
def dataset(self) -> LanceDataset:
|
||||||
if not self._dataset:
|
if not self._dataset:
|
||||||
self._dataset = lance.dataset(self.uri, version=self.version)
|
self._dataset = lance.dataset(
|
||||||
|
self.uri, version=self.version, index_cache_size=self.index_cache_size
|
||||||
|
)
|
||||||
return self._dataset
|
return self._dataset
|
||||||
|
|
||||||
@dataset.setter
|
@dataset.setter
|
||||||
@@ -884,6 +890,8 @@ class LanceTable(Table):
|
|||||||
connection: "LanceDBConnection",
|
connection: "LanceDBConnection",
|
||||||
name: str,
|
name: str,
|
||||||
version: Optional[int] = None,
|
version: Optional[int] = None,
|
||||||
|
*,
|
||||||
|
index_cache_size: Optional[int] = None,
|
||||||
):
|
):
|
||||||
self._conn = connection
|
self._conn = connection
|
||||||
self.name = name
|
self.name = name
|
||||||
@@ -892,11 +900,13 @@ class LanceTable(Table):
|
|||||||
self._ref = _LanceTimeTravelRef(
|
self._ref = _LanceTimeTravelRef(
|
||||||
uri=self._dataset_uri,
|
uri=self._dataset_uri,
|
||||||
version=version,
|
version=version,
|
||||||
|
index_cache_size=index_cache_size,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
self._ref = _LanceLatestDatasetRef(
|
self._ref = _LanceLatestDatasetRef(
|
||||||
uri=self._dataset_uri,
|
uri=self._dataset_uri,
|
||||||
read_consistency_interval=connection.read_consistency_interval,
|
read_consistency_interval=connection.read_consistency_interval,
|
||||||
|
index_cache_size=index_cache_size,
|
||||||
)
|
)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@@ -1199,6 +1209,11 @@ class LanceTable(Table):
|
|||||||
raise ValueError("Index already exists. Use replace=True to overwrite.")
|
raise ValueError("Index already exists. Use replace=True to overwrite.")
|
||||||
fs.delete_dir(path)
|
fs.delete_dir(path)
|
||||||
|
|
||||||
|
if not isinstance(fs, pa_fs.LocalFileSystem):
|
||||||
|
raise NotImplementedError(
|
||||||
|
"Full-text search is only supported on the local filesystem"
|
||||||
|
)
|
||||||
|
|
||||||
index = create_index(
|
index = create_index(
|
||||||
self._get_fts_index_path(),
|
self._get_fts_index_path(),
|
||||||
field_names,
|
field_names,
|
||||||
|
|||||||
@@ -368,6 +368,15 @@ async def test_create_exist_ok_async(tmp_path):
|
|||||||
# await db.create_table("test", schema=bad_schema, exist_ok=True)
|
# await db.create_table("test", schema=bad_schema, exist_ok=True)
|
||||||
|
|
||||||
|
|
||||||
|
def test_open_table_sync(tmp_path):
|
||||||
|
db = lancedb.connect(tmp_path)
|
||||||
|
db.create_table("test", data=[{"id": 0}])
|
||||||
|
assert db.open_table("test").count_rows() == 1
|
||||||
|
assert db.open_table("test", index_cache_size=0).count_rows() == 1
|
||||||
|
with pytest.raises(FileNotFoundError, match="does not exist"):
|
||||||
|
db.open_table("does_not_exist")
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_open_table(tmp_path):
|
async def test_open_table(tmp_path):
|
||||||
db = await lancedb.connect_async(tmp_path)
|
db = await lancedb.connect_async(tmp_path)
|
||||||
@@ -397,6 +406,10 @@ async def test_open_table(tmp_path):
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# No way to verify this yet, but at least make sure we
|
||||||
|
# can pass the parameter
|
||||||
|
await db.open_table("test", index_cache_size=0)
|
||||||
|
|
||||||
with pytest.raises(ValueError, match="was not found"):
|
with pytest.raises(ValueError, match="was not found"):
|
||||||
await db.open_table("does_not_exist")
|
await db.open_table("does_not_exist")
|
||||||
|
|
||||||
|
|||||||
@@ -45,7 +45,9 @@ except Exception:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.slow
|
@pytest.mark.slow
|
||||||
@pytest.mark.parametrize("alias", ["sentence-transformers", "openai", "huggingface"])
|
@pytest.mark.parametrize(
|
||||||
|
"alias", ["sentence-transformers", "openai", "huggingface", "ollama"]
|
||||||
|
)
|
||||||
def test_basic_text_embeddings(alias, tmp_path):
|
def test_basic_text_embeddings(alias, tmp_path):
|
||||||
db = lancedb.connect(tmp_path)
|
db = lancedb.connect(tmp_path)
|
||||||
registry = get_registry()
|
registry = get_registry()
|
||||||
|
|||||||
@@ -213,7 +213,7 @@ def test_syntax(table):
|
|||||||
# https://github.com/lancedb/lancedb/issues/769
|
# https://github.com/lancedb/lancedb/issues/769
|
||||||
table.create_fts_index("text")
|
table.create_fts_index("text")
|
||||||
with pytest.raises(ValueError, match="Syntax Error"):
|
with pytest.raises(ValueError, match="Syntax Error"):
|
||||||
table.search("they could have been dogs OR cats").limit(10).to_list()
|
table.search("they could have been dogs OR").limit(10).to_list()
|
||||||
|
|
||||||
# these should work
|
# these should work
|
||||||
|
|
||||||
|
|||||||
@@ -134,17 +134,21 @@ impl Connection {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
#[pyo3(signature = (name, storage_options = None))]
|
#[pyo3(signature = (name, storage_options = None, index_cache_size = None))]
|
||||||
pub fn open_table(
|
pub fn open_table(
|
||||||
self_: PyRef<'_, Self>,
|
self_: PyRef<'_, Self>,
|
||||||
name: String,
|
name: String,
|
||||||
storage_options: Option<HashMap<String, String>>,
|
storage_options: Option<HashMap<String, String>>,
|
||||||
|
index_cache_size: Option<u32>,
|
||||||
) -> PyResult<&PyAny> {
|
) -> PyResult<&PyAny> {
|
||||||
let inner = self_.get_inner()?.clone();
|
let inner = self_.get_inner()?.clone();
|
||||||
let mut builder = inner.open_table(name);
|
let mut builder = inner.open_table(name);
|
||||||
if let Some(storage_options) = storage_options {
|
if let Some(storage_options) = storage_options {
|
||||||
builder = builder.storage_options(storage_options);
|
builder = builder.storage_options(storage_options);
|
||||||
}
|
}
|
||||||
|
if let Some(index_cache_size) = index_cache_size {
|
||||||
|
builder = builder.index_cache_size(index_cache_size);
|
||||||
|
}
|
||||||
future_into_py(self_.py(), async move {
|
future_into_py(self_.py(), async move {
|
||||||
let table = builder.execute().await.infer_error()?;
|
let table = builder.execute().await.infer_error()?;
|
||||||
Ok(Table::new(table))
|
Ok(Table::new(table))
|
||||||
|
|||||||
@@ -35,21 +35,16 @@ impl<T> PythonErrorExt<T> for std::result::Result<T, LanceError> {
|
|||||||
match &self {
|
match &self {
|
||||||
Ok(_) => Ok(self.unwrap()),
|
Ok(_) => Ok(self.unwrap()),
|
||||||
Err(err) => match err {
|
Err(err) => match err {
|
||||||
LanceError::InvalidInput { .. } => self.value_error(),
|
LanceError::InvalidInput { .. }
|
||||||
LanceError::InvalidTableName { .. } => self.value_error(),
|
| LanceError::InvalidTableName { .. }
|
||||||
LanceError::TableNotFound { .. } => self.value_error(),
|
| LanceError::TableNotFound { .. }
|
||||||
LanceError::Schema { .. } => self.value_error(),
|
| LanceError::Schema { .. } => self.value_error(),
|
||||||
LanceError::CreateDir { .. } => self.os_error(),
|
LanceError::CreateDir { .. } => self.os_error(),
|
||||||
LanceError::TableAlreadyExists { .. } => self.runtime_error(),
|
|
||||||
LanceError::ObjectStore { .. } => Err(PyIOError::new_err(err.to_string())),
|
LanceError::ObjectStore { .. } => Err(PyIOError::new_err(err.to_string())),
|
||||||
LanceError::Lance { .. } => self.runtime_error(),
|
|
||||||
LanceError::Runtime { .. } => self.runtime_error(),
|
|
||||||
LanceError::Http { .. } => self.runtime_error(),
|
|
||||||
LanceError::Arrow { .. } => self.runtime_error(),
|
|
||||||
LanceError::NotSupported { .. } => {
|
LanceError::NotSupported { .. } => {
|
||||||
Err(PyNotImplementedError::new_err(err.to_string()))
|
Err(PyNotImplementedError::new_err(err.to_string()))
|
||||||
}
|
}
|
||||||
LanceError::Other { .. } => self.runtime_error(),
|
_ => self.runtime_error(),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
44
release_process.md
Normal file
44
release_process.md
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
# Release process
|
||||||
|
|
||||||
|
There are five total packages we release. Three are the `lancedb` packages
|
||||||
|
for Python, Rust, and Node.js. The other two are the legacy `vectordb`
|
||||||
|
packages for Rust and node.js.
|
||||||
|
|
||||||
|
The Python package is versioned and released separately from the Rust and Node.js
|
||||||
|
ones. For Rust and Node.js, the release process is shared between `lancedb` and
|
||||||
|
`vectordb` for now.
|
||||||
|
|
||||||
|
## Breaking changes
|
||||||
|
|
||||||
|
We try to avoid breaking changes, but sometimes they are necessary. When there
|
||||||
|
are breaking changes, we will increment the minor version. (This is valid
|
||||||
|
semantic versioning because we are still in `0.x` versions.)
|
||||||
|
|
||||||
|
When a PR makes a breaking change, the PR author should mark the PR using the
|
||||||
|
conventional commit markers: either exclamation mark after the type
|
||||||
|
(such as `feat!: change signature of func`) or have `BREAKING CHANGE` in the
|
||||||
|
body of the PR. A CI job will add a `breaking-change` label to the PR, which is
|
||||||
|
what will ultimately be used to CI to determine if the minor version should be
|
||||||
|
incremented.
|
||||||
|
|
||||||
|
A CI job will validate that if a `breaking-change` label is added, the minor
|
||||||
|
version is incremented in the `Cargo.toml` and `pyproject.toml` files. The only
|
||||||
|
exception is if it has already been incremented since the last stable release.
|
||||||
|
|
||||||
|
**It is the responsibility of the PR author to increment the minor version when
|
||||||
|
appropriate.**
|
||||||
|
|
||||||
|
Some things that are considered breaking changes:
|
||||||
|
|
||||||
|
* Upgrading `lance` to a new minor version. Minor version bumps in Lance are
|
||||||
|
considered breaking changes during `0.x` releases. This can change behavior
|
||||||
|
in LanceDB.
|
||||||
|
* Upgrading a dependency pin that is in the Rust API. In particular, upgrading
|
||||||
|
`DataFusion` and `Arrow` are breaking changes. Changing dependencies that are
|
||||||
|
not exposed in our public API are not considered breaking changes.
|
||||||
|
* Changing the signature of a public function or method.
|
||||||
|
* Removing a public function or method.
|
||||||
|
|
||||||
|
We do make exceptions for APIs that are marked as experimental. These are APIs
|
||||||
|
that are under active development and not in major use. These changes should not
|
||||||
|
receive the `breaking-change` label.
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "lancedb-node"
|
name = "lancedb-node"
|
||||||
version = "0.4.17"
|
version = "0.4.20"
|
||||||
description = "Serverless, low-latency vector database for AI applications"
|
description = "Serverless, low-latency vector database for AI applications"
|
||||||
license.workspace = true
|
license.workspace = true
|
||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
|
|||||||
@@ -59,7 +59,7 @@ fn database_new(mut cx: FunctionContext) -> JsResult<JsPromise> {
|
|||||||
for handle in storage_options_js {
|
for handle in storage_options_js {
|
||||||
let obj = handle.downcast::<JsArray, _>(&mut cx).unwrap();
|
let obj = handle.downcast::<JsArray, _>(&mut cx).unwrap();
|
||||||
let key = obj.get::<JsString, _, _>(&mut cx, 0)?.value(&mut cx);
|
let key = obj.get::<JsString, _, _>(&mut cx, 0)?.value(&mut cx);
|
||||||
let value = obj.get::<JsString, _, _>(&mut cx, 0)?.value(&mut cx);
|
let value = obj.get::<JsString, _, _>(&mut cx, 1)?.value(&mut cx);
|
||||||
|
|
||||||
storage_options.push((key, value));
|
storage_options.push((key, value));
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "lancedb"
|
name = "lancedb"
|
||||||
version = "0.4.17"
|
version = "0.4.20"
|
||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
description = "LanceDB: A serverless, low-latency vector database for AI applications"
|
description = "LanceDB: A serverless, low-latency vector database for AI applications"
|
||||||
license.workspace = true
|
license.workspace = true
|
||||||
@@ -40,6 +40,8 @@ serde = { version = "^1" }
|
|||||||
serde_json = { version = "1" }
|
serde_json = { version = "1" }
|
||||||
# For remote feature
|
# For remote feature
|
||||||
reqwest = { version = "0.11.24", features = ["gzip", "json"], optional = true }
|
reqwest = { version = "0.11.24", features = ["gzip", "json"], optional = true }
|
||||||
|
polars-arrow = { version = ">=0.37", optional = true }
|
||||||
|
polars = { version = ">=0.37", optional = true}
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
tempfile = "3.5.0"
|
tempfile = "3.5.0"
|
||||||
@@ -52,7 +54,8 @@ aws-sdk-kms = { version = "1.0" }
|
|||||||
aws-config = { version = "1.0" }
|
aws-config = { version = "1.0" }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = ["remote"]
|
default = []
|
||||||
remote = ["dep:reqwest"]
|
remote = ["dep:reqwest"]
|
||||||
fp16kernels = ["lance-linalg/fp16kernels"]
|
fp16kernels = ["lance-linalg/fp16kernels"]
|
||||||
s3-test = []
|
s3-test = []
|
||||||
|
polars = ["dep:polars-arrow", "dep:polars"]
|
||||||
|
|||||||
@@ -14,10 +14,12 @@
|
|||||||
|
|
||||||
use std::{pin::Pin, sync::Arc};
|
use std::{pin::Pin, sync::Arc};
|
||||||
|
|
||||||
pub use arrow_array;
|
|
||||||
pub use arrow_schema;
|
pub use arrow_schema;
|
||||||
use futures::{Stream, StreamExt};
|
use futures::{Stream, StreamExt};
|
||||||
|
|
||||||
|
#[cfg(feature = "polars")]
|
||||||
|
use {crate::polars_arrow_convertors, polars::frame::ArrowChunk, polars::prelude::DataFrame};
|
||||||
|
|
||||||
use crate::error::Result;
|
use crate::error::Result;
|
||||||
|
|
||||||
/// An iterator of batches that also has a schema
|
/// An iterator of batches that also has a schema
|
||||||
@@ -114,8 +116,183 @@ pub trait IntoArrow {
|
|||||||
fn into_arrow(self) -> Result<Box<dyn arrow_array::RecordBatchReader + Send>>;
|
fn into_arrow(self) -> Result<Box<dyn arrow_array::RecordBatchReader + Send>>;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub type BoxedRecordBatchReader = Box<dyn arrow_array::RecordBatchReader + Send>;
|
||||||
|
|
||||||
impl<T: arrow_array::RecordBatchReader + Send + 'static> IntoArrow for T {
|
impl<T: arrow_array::RecordBatchReader + Send + 'static> IntoArrow for T {
|
||||||
fn into_arrow(self) -> Result<Box<dyn arrow_array::RecordBatchReader + Send>> {
|
fn into_arrow(self) -> Result<Box<dyn arrow_array::RecordBatchReader + Send>> {
|
||||||
Ok(Box::new(self))
|
Ok(Box::new(self))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<S: Stream<Item = Result<arrow_array::RecordBatch>>> SimpleRecordBatchStream<S> {
|
||||||
|
pub fn new(stream: S, schema: Arc<arrow_schema::Schema>) -> Self {
|
||||||
|
Self { schema, stream }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#[cfg(feature = "polars")]
|
||||||
|
/// An iterator of record batches formed from a Polars DataFrame.
|
||||||
|
pub struct PolarsDataFrameRecordBatchReader {
|
||||||
|
chunks: std::vec::IntoIter<ArrowChunk>,
|
||||||
|
arrow_schema: Arc<arrow_schema::Schema>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "polars")]
|
||||||
|
impl PolarsDataFrameRecordBatchReader {
|
||||||
|
/// Creates a new `PolarsDataFrameRecordBatchReader` from a given Polars DataFrame.
|
||||||
|
/// If the input dataframe does not have aligned chunks, this function undergoes
|
||||||
|
/// the costly operation of reallocating each series as a single contigous chunk.
|
||||||
|
pub fn new(mut df: DataFrame) -> Result<Self> {
|
||||||
|
df.align_chunks();
|
||||||
|
let arrow_schema =
|
||||||
|
polars_arrow_convertors::convert_polars_df_schema_to_arrow_rb_schema(df.schema())?;
|
||||||
|
Ok(Self {
|
||||||
|
chunks: df
|
||||||
|
.iter_chunks(polars_arrow_convertors::POLARS_ARROW_FLAVOR)
|
||||||
|
.collect::<Vec<ArrowChunk>>()
|
||||||
|
.into_iter(),
|
||||||
|
arrow_schema,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "polars")]
|
||||||
|
impl Iterator for PolarsDataFrameRecordBatchReader {
|
||||||
|
type Item = std::result::Result<arrow_array::RecordBatch, arrow_schema::ArrowError>;
|
||||||
|
|
||||||
|
fn next(&mut self) -> Option<Self::Item> {
|
||||||
|
self.chunks.next().map(|chunk| {
|
||||||
|
let columns: std::result::Result<Vec<arrow_array::ArrayRef>, arrow_schema::ArrowError> =
|
||||||
|
chunk
|
||||||
|
.into_arrays()
|
||||||
|
.into_iter()
|
||||||
|
.zip(self.arrow_schema.fields.iter())
|
||||||
|
.map(|(polars_array, arrow_field)| {
|
||||||
|
polars_arrow_convertors::convert_polars_arrow_array_to_arrow_rs_array(
|
||||||
|
polars_array,
|
||||||
|
arrow_field.data_type().clone(),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
arrow_array::RecordBatch::try_new(self.arrow_schema.clone(), columns?)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "polars")]
|
||||||
|
impl arrow_array::RecordBatchReader for PolarsDataFrameRecordBatchReader {
|
||||||
|
fn schema(&self) -> Arc<arrow_schema::Schema> {
|
||||||
|
self.arrow_schema.clone()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A trait for converting the result of a LanceDB query into a Polars DataFrame with aligned
|
||||||
|
/// chunks. The resulting Polars DataFrame will have aligned chunks, but the series's
|
||||||
|
/// chunks are not guaranteed to be contiguous.
|
||||||
|
#[cfg(feature = "polars")]
|
||||||
|
pub trait IntoPolars {
|
||||||
|
fn into_polars(self) -> impl std::future::Future<Output = Result<DataFrame>> + Send;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "polars")]
|
||||||
|
impl IntoPolars for SendableRecordBatchStream {
|
||||||
|
async fn into_polars(mut self) -> Result<DataFrame> {
|
||||||
|
let polars_schema =
|
||||||
|
polars_arrow_convertors::convert_arrow_rb_schema_to_polars_df_schema(&self.schema())?;
|
||||||
|
let mut acc_df: DataFrame = DataFrame::from(&polars_schema);
|
||||||
|
while let Some(record_batch) = self.next().await {
|
||||||
|
let new_df = polars_arrow_convertors::convert_arrow_rb_to_polars_df(
|
||||||
|
&record_batch?,
|
||||||
|
&polars_schema,
|
||||||
|
)?;
|
||||||
|
acc_df = acc_df.vstack(&new_df)?;
|
||||||
|
}
|
||||||
|
Ok(acc_df)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(all(test, feature = "polars"))]
|
||||||
|
mod tests {
|
||||||
|
use super::SendableRecordBatchStream;
|
||||||
|
use crate::arrow::{
|
||||||
|
IntoArrow, IntoPolars, PolarsDataFrameRecordBatchReader, SimpleRecordBatchStream,
|
||||||
|
};
|
||||||
|
use polars::prelude::{DataFrame, NamedFrom, Series};
|
||||||
|
|
||||||
|
fn get_record_batch_reader_from_polars() -> Box<dyn arrow_array::RecordBatchReader + Send> {
|
||||||
|
let mut string_series = Series::new("string", &["ab"]);
|
||||||
|
let mut int_series = Series::new("int", &[1]);
|
||||||
|
let mut float_series = Series::new("float", &[1.0]);
|
||||||
|
let df1 = DataFrame::new(vec![string_series, int_series, float_series]).unwrap();
|
||||||
|
|
||||||
|
string_series = Series::new("string", &["bc"]);
|
||||||
|
int_series = Series::new("int", &[2]);
|
||||||
|
float_series = Series::new("float", &[2.0]);
|
||||||
|
let df2 = DataFrame::new(vec![string_series, int_series, float_series]).unwrap();
|
||||||
|
|
||||||
|
PolarsDataFrameRecordBatchReader::new(df1.vstack(&df2).unwrap())
|
||||||
|
.unwrap()
|
||||||
|
.into_arrow()
|
||||||
|
.unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn from_polars_to_arrow() {
|
||||||
|
let record_batch_reader = get_record_batch_reader_from_polars();
|
||||||
|
let schema = record_batch_reader.schema();
|
||||||
|
|
||||||
|
// Test schema conversion
|
||||||
|
assert_eq!(
|
||||||
|
schema
|
||||||
|
.fields
|
||||||
|
.iter()
|
||||||
|
.map(|field| (field.name().as_str(), field.data_type()))
|
||||||
|
.collect::<Vec<_>>(),
|
||||||
|
vec![
|
||||||
|
("string", &arrow_schema::DataType::LargeUtf8),
|
||||||
|
("int", &arrow_schema::DataType::Int32),
|
||||||
|
("float", &arrow_schema::DataType::Float64)
|
||||||
|
]
|
||||||
|
);
|
||||||
|
let record_batches: Vec<arrow_array::RecordBatch> =
|
||||||
|
record_batch_reader.map(|result| result.unwrap()).collect();
|
||||||
|
assert_eq!(record_batches.len(), 2);
|
||||||
|
assert_eq!(schema, record_batches[0].schema());
|
||||||
|
assert_eq!(record_batches[0].schema(), record_batches[1].schema());
|
||||||
|
|
||||||
|
// Test number of rows
|
||||||
|
assert_eq!(record_batches[0].num_rows(), 1);
|
||||||
|
assert_eq!(record_batches[1].num_rows(), 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn from_arrow_to_polars() {
|
||||||
|
let record_batch_reader = get_record_batch_reader_from_polars();
|
||||||
|
let schema = record_batch_reader.schema();
|
||||||
|
let stream: SendableRecordBatchStream = Box::pin(SimpleRecordBatchStream {
|
||||||
|
schema: schema.clone(),
|
||||||
|
stream: futures::stream::iter(
|
||||||
|
record_batch_reader
|
||||||
|
.into_iter()
|
||||||
|
.map(|r| r.map_err(Into::into)),
|
||||||
|
),
|
||||||
|
});
|
||||||
|
let df = stream.into_polars().await.unwrap();
|
||||||
|
|
||||||
|
// Test number of chunks and rows
|
||||||
|
assert_eq!(df.n_chunks(), 2);
|
||||||
|
assert_eq!(df.height(), 2);
|
||||||
|
|
||||||
|
// Test schema conversion
|
||||||
|
assert_eq!(
|
||||||
|
df.schema()
|
||||||
|
.into_iter()
|
||||||
|
.map(|(name, datatype)| (name.to_string(), datatype))
|
||||||
|
.collect::<Vec<_>>(),
|
||||||
|
vec![
|
||||||
|
("string".to_string(), polars::prelude::DataType::String),
|
||||||
|
("int".to_owned(), polars::prelude::DataType::Int32),
|
||||||
|
("float".to_owned(), polars::prelude::DataType::Float64)
|
||||||
|
]
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -27,12 +27,18 @@ use object_store::{aws::AwsCredential, local::LocalFileSystem};
|
|||||||
use snafu::prelude::*;
|
use snafu::prelude::*;
|
||||||
|
|
||||||
use crate::arrow::IntoArrow;
|
use crate::arrow::IntoArrow;
|
||||||
|
use crate::embeddings::{
|
||||||
|
EmbeddingDefinition, EmbeddingFunction, EmbeddingRegistry, MemoryRegistry, WithEmbeddings,
|
||||||
|
};
|
||||||
use crate::error::{CreateDirSnafu, Error, InvalidTableNameSnafu, Result};
|
use crate::error::{CreateDirSnafu, Error, InvalidTableNameSnafu, Result};
|
||||||
use crate::io::object_store::MirroringObjectStoreWrapper;
|
use crate::io::object_store::MirroringObjectStoreWrapper;
|
||||||
use crate::table::{NativeTable, WriteOptions};
|
use crate::table::{NativeTable, TableDefinition, WriteOptions};
|
||||||
use crate::utils::validate_table_name;
|
use crate::utils::validate_table_name;
|
||||||
use crate::Table;
|
use crate::Table;
|
||||||
|
|
||||||
|
#[cfg(feature = "remote")]
|
||||||
|
use log::warn;
|
||||||
|
|
||||||
pub const LANCE_FILE_EXTENSION: &str = "lance";
|
pub const LANCE_FILE_EXTENSION: &str = "lance";
|
||||||
|
|
||||||
pub type TableBuilderCallback = Box<dyn FnOnce(OpenTableBuilder) -> OpenTableBuilder + Send>;
|
pub type TableBuilderCallback = Box<dyn FnOnce(OpenTableBuilder) -> OpenTableBuilder + Send>;
|
||||||
@@ -130,9 +136,10 @@ pub struct CreateTableBuilder<const HAS_DATA: bool, T: IntoArrow> {
|
|||||||
parent: Arc<dyn ConnectionInternal>,
|
parent: Arc<dyn ConnectionInternal>,
|
||||||
pub(crate) name: String,
|
pub(crate) name: String,
|
||||||
pub(crate) data: Option<T>,
|
pub(crate) data: Option<T>,
|
||||||
pub(crate) schema: Option<SchemaRef>,
|
|
||||||
pub(crate) mode: CreateTableMode,
|
pub(crate) mode: CreateTableMode,
|
||||||
pub(crate) write_options: WriteOptions,
|
pub(crate) write_options: WriteOptions,
|
||||||
|
pub(crate) table_definition: Option<TableDefinition>,
|
||||||
|
pub(crate) embeddings: Vec<(EmbeddingDefinition, Arc<dyn EmbeddingFunction>)>,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Builder methods that only apply when we have initial data
|
// Builder methods that only apply when we have initial data
|
||||||
@@ -142,9 +149,10 @@ impl<T: IntoArrow> CreateTableBuilder<true, T> {
|
|||||||
parent,
|
parent,
|
||||||
name,
|
name,
|
||||||
data: Some(data),
|
data: Some(data),
|
||||||
schema: None,
|
|
||||||
mode: CreateTableMode::default(),
|
mode: CreateTableMode::default(),
|
||||||
write_options: WriteOptions::default(),
|
write_options: WriteOptions::default(),
|
||||||
|
table_definition: None,
|
||||||
|
embeddings: Vec::new(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -172,24 +180,43 @@ impl<T: IntoArrow> CreateTableBuilder<true, T> {
|
|||||||
parent: self.parent,
|
parent: self.parent,
|
||||||
name: self.name,
|
name: self.name,
|
||||||
data: None,
|
data: None,
|
||||||
schema: self.schema,
|
table_definition: self.table_definition,
|
||||||
mode: self.mode,
|
mode: self.mode,
|
||||||
write_options: self.write_options,
|
write_options: self.write_options,
|
||||||
|
embeddings: self.embeddings,
|
||||||
};
|
};
|
||||||
Ok((data, builder))
|
Ok((data, builder))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn add_embedding(mut self, definition: EmbeddingDefinition) -> Result<Self> {
|
||||||
|
// Early verification of the embedding name
|
||||||
|
let embedding_func = self
|
||||||
|
.parent
|
||||||
|
.embedding_registry()
|
||||||
|
.get(&definition.embedding_name)
|
||||||
|
.ok_or_else(|| Error::EmbeddingFunctionNotFound {
|
||||||
|
name: definition.embedding_name.to_string(),
|
||||||
|
reason: "No embedding function found in the connection's embedding_registry"
|
||||||
|
.to_string(),
|
||||||
|
})?;
|
||||||
|
|
||||||
|
self.embeddings.push((definition, embedding_func));
|
||||||
|
Ok(self)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Builder methods that only apply when we do not have initial data
|
// Builder methods that only apply when we do not have initial data
|
||||||
impl CreateTableBuilder<false, NoData> {
|
impl CreateTableBuilder<false, NoData> {
|
||||||
fn new(parent: Arc<dyn ConnectionInternal>, name: String, schema: SchemaRef) -> Self {
|
fn new(parent: Arc<dyn ConnectionInternal>, name: String, schema: SchemaRef) -> Self {
|
||||||
|
let table_definition = TableDefinition::new_from_schema(schema);
|
||||||
Self {
|
Self {
|
||||||
parent,
|
parent,
|
||||||
name,
|
name,
|
||||||
data: None,
|
data: None,
|
||||||
schema: Some(schema),
|
table_definition: Some(table_definition),
|
||||||
mode: CreateTableMode::default(),
|
mode: CreateTableMode::default(),
|
||||||
write_options: WriteOptions::default(),
|
write_options: WriteOptions::default(),
|
||||||
|
embeddings: Vec::new(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -347,6 +374,7 @@ impl OpenTableBuilder {
|
|||||||
pub(crate) trait ConnectionInternal:
|
pub(crate) trait ConnectionInternal:
|
||||||
Send + Sync + std::fmt::Debug + std::fmt::Display + 'static
|
Send + Sync + std::fmt::Debug + std::fmt::Display + 'static
|
||||||
{
|
{
|
||||||
|
fn embedding_registry(&self) -> &dyn EmbeddingRegistry;
|
||||||
async fn table_names(&self, options: TableNamesBuilder) -> Result<Vec<String>>;
|
async fn table_names(&self, options: TableNamesBuilder) -> Result<Vec<String>>;
|
||||||
async fn do_create_table(
|
async fn do_create_table(
|
||||||
&self,
|
&self,
|
||||||
@@ -363,7 +391,7 @@ pub(crate) trait ConnectionInternal:
|
|||||||
) -> Result<Table> {
|
) -> Result<Table> {
|
||||||
let batches = Box::new(RecordBatchIterator::new(
|
let batches = Box::new(RecordBatchIterator::new(
|
||||||
vec![],
|
vec![],
|
||||||
options.schema.as_ref().unwrap().clone(),
|
options.table_definition.clone().unwrap().schema.clone(),
|
||||||
));
|
));
|
||||||
self.do_create_table(options, batches).await
|
self.do_create_table(options, batches).await
|
||||||
}
|
}
|
||||||
@@ -450,6 +478,13 @@ impl Connection {
|
|||||||
pub async fn drop_db(&self) -> Result<()> {
|
pub async fn drop_db(&self) -> Result<()> {
|
||||||
self.internal.drop_db().await
|
self.internal.drop_db().await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get the in-memory embedding registry.
|
||||||
|
/// It's important to note that the embedding registry is not persisted across connections.
|
||||||
|
/// So if a table contains embeddings, you will need to make sure that you are using a connection that has the same embedding functions registered
|
||||||
|
pub fn embedding_registry(&self) -> &dyn EmbeddingRegistry {
|
||||||
|
self.internal.embedding_registry()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
@@ -483,6 +518,7 @@ pub struct ConnectBuilder {
|
|||||||
/// consistency only applies to read operations. Write operations are
|
/// consistency only applies to read operations. Write operations are
|
||||||
/// always consistent.
|
/// always consistent.
|
||||||
read_consistency_interval: Option<std::time::Duration>,
|
read_consistency_interval: Option<std::time::Duration>,
|
||||||
|
embedding_registry: Option<Arc<dyn EmbeddingRegistry>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ConnectBuilder {
|
impl ConnectBuilder {
|
||||||
@@ -495,6 +531,7 @@ impl ConnectBuilder {
|
|||||||
host_override: None,
|
host_override: None,
|
||||||
read_consistency_interval: None,
|
read_consistency_interval: None,
|
||||||
storage_options: HashMap::new(),
|
storage_options: HashMap::new(),
|
||||||
|
embedding_registry: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -513,6 +550,12 @@ impl ConnectBuilder {
|
|||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Provide a custom [`EmbeddingRegistry`] to use for this connection.
|
||||||
|
pub fn embedding_registry(mut self, registry: Arc<dyn EmbeddingRegistry>) -> Self {
|
||||||
|
self.embedding_registry = Some(registry);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
/// [`AwsCredential`] to use when connecting to S3.
|
/// [`AwsCredential`] to use when connecting to S3.
|
||||||
#[deprecated(note = "Pass through storage_options instead")]
|
#[deprecated(note = "Pass through storage_options instead")]
|
||||||
pub fn aws_creds(mut self, aws_creds: AwsCredential) -> Self {
|
pub fn aws_creds(mut self, aws_creds: AwsCredential) -> Self {
|
||||||
@@ -579,6 +622,7 @@ impl ConnectBuilder {
|
|||||||
let api_key = self.api_key.ok_or_else(|| Error::InvalidInput {
|
let api_key = self.api_key.ok_or_else(|| Error::InvalidInput {
|
||||||
message: "An api_key is required when connecting to LanceDb Cloud".to_string(),
|
message: "An api_key is required when connecting to LanceDb Cloud".to_string(),
|
||||||
})?;
|
})?;
|
||||||
|
warn!("The rust implementation of the remote client is not yet ready for use.");
|
||||||
let internal = Arc::new(crate::remote::db::RemoteDatabase::try_new(
|
let internal = Arc::new(crate::remote::db::RemoteDatabase::try_new(
|
||||||
&self.uri,
|
&self.uri,
|
||||||
&api_key,
|
&api_key,
|
||||||
@@ -638,6 +682,7 @@ struct Database {
|
|||||||
|
|
||||||
// Storage options to be inherited by tables created from this connection
|
// Storage options to be inherited by tables created from this connection
|
||||||
storage_options: HashMap<String, String>,
|
storage_options: HashMap<String, String>,
|
||||||
|
embedding_registry: Arc<dyn EmbeddingRegistry>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl std::fmt::Display for Database {
|
impl std::fmt::Display for Database {
|
||||||
@@ -671,7 +716,12 @@ impl Database {
|
|||||||
// TODO: pass params regardless of OS
|
// TODO: pass params regardless of OS
|
||||||
match parse_res {
|
match parse_res {
|
||||||
Ok(url) if url.scheme().len() == 1 && cfg!(windows) => {
|
Ok(url) if url.scheme().len() == 1 && cfg!(windows) => {
|
||||||
Self::open_path(uri, options.read_consistency_interval).await
|
Self::open_path(
|
||||||
|
uri,
|
||||||
|
options.read_consistency_interval,
|
||||||
|
options.embedding_registry.clone(),
|
||||||
|
)
|
||||||
|
.await
|
||||||
}
|
}
|
||||||
Ok(mut url) => {
|
Ok(mut url) => {
|
||||||
// iter thru the query params and extract the commit store param
|
// iter thru the query params and extract the commit store param
|
||||||
@@ -741,6 +791,10 @@ impl Database {
|
|||||||
None => None,
|
None => None,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let embedding_registry = options
|
||||||
|
.embedding_registry
|
||||||
|
.clone()
|
||||||
|
.unwrap_or_else(|| Arc::new(MemoryRegistry::new()));
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
uri: table_base_uri,
|
uri: table_base_uri,
|
||||||
query_string,
|
query_string,
|
||||||
@@ -749,20 +803,33 @@ impl Database {
|
|||||||
store_wrapper: write_store_wrapper,
|
store_wrapper: write_store_wrapper,
|
||||||
read_consistency_interval: options.read_consistency_interval,
|
read_consistency_interval: options.read_consistency_interval,
|
||||||
storage_options,
|
storage_options,
|
||||||
|
embedding_registry,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
Err(_) => Self::open_path(uri, options.read_consistency_interval).await,
|
Err(_) => {
|
||||||
|
Self::open_path(
|
||||||
|
uri,
|
||||||
|
options.read_consistency_interval,
|
||||||
|
options.embedding_registry.clone(),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn open_path(
|
async fn open_path(
|
||||||
path: &str,
|
path: &str,
|
||||||
read_consistency_interval: Option<std::time::Duration>,
|
read_consistency_interval: Option<std::time::Duration>,
|
||||||
|
embedding_registry: Option<Arc<dyn EmbeddingRegistry>>,
|
||||||
) -> Result<Self> {
|
) -> Result<Self> {
|
||||||
let (object_store, base_path) = ObjectStore::from_uri(path).await?;
|
let (object_store, base_path) = ObjectStore::from_uri(path).await?;
|
||||||
if object_store.is_local() {
|
if object_store.is_local() {
|
||||||
Self::try_create_dir(path).context(CreateDirSnafu { path })?;
|
Self::try_create_dir(path).context(CreateDirSnafu { path })?;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let embedding_registry =
|
||||||
|
embedding_registry.unwrap_or_else(|| Arc::new(MemoryRegistry::new()));
|
||||||
|
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
uri: path.to_string(),
|
uri: path.to_string(),
|
||||||
query_string: None,
|
query_string: None,
|
||||||
@@ -771,6 +838,7 @@ impl Database {
|
|||||||
store_wrapper: None,
|
store_wrapper: None,
|
||||||
read_consistency_interval,
|
read_consistency_interval,
|
||||||
storage_options: HashMap::new(),
|
storage_options: HashMap::new(),
|
||||||
|
embedding_registry,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -811,6 +879,9 @@ impl Database {
|
|||||||
|
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
impl ConnectionInternal for Database {
|
impl ConnectionInternal for Database {
|
||||||
|
fn embedding_registry(&self) -> &dyn EmbeddingRegistry {
|
||||||
|
self.embedding_registry.as_ref()
|
||||||
|
}
|
||||||
async fn table_names(&self, options: TableNamesBuilder) -> Result<Vec<String>> {
|
async fn table_names(&self, options: TableNamesBuilder) -> Result<Vec<String>> {
|
||||||
let mut f = self
|
let mut f = self
|
||||||
.object_store
|
.object_store
|
||||||
@@ -847,7 +918,7 @@ impl ConnectionInternal for Database {
|
|||||||
data: Box<dyn RecordBatchReader + Send>,
|
data: Box<dyn RecordBatchReader + Send>,
|
||||||
) -> Result<Table> {
|
) -> Result<Table> {
|
||||||
let table_uri = self.table_uri(&options.name)?;
|
let table_uri = self.table_uri(&options.name)?;
|
||||||
|
let embedding_registry = self.embedding_registry.clone();
|
||||||
// Inherit storage options from the connection
|
// Inherit storage options from the connection
|
||||||
let storage_options = options
|
let storage_options = options
|
||||||
.write_options
|
.write_options
|
||||||
@@ -862,6 +933,11 @@ impl ConnectionInternal for Database {
|
|||||||
storage_options.insert(key.clone(), value.clone());
|
storage_options.insert(key.clone(), value.clone());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
let data = if options.embeddings.is_empty() {
|
||||||
|
data
|
||||||
|
} else {
|
||||||
|
Box::new(WithEmbeddings::new(data, options.embeddings))
|
||||||
|
};
|
||||||
|
|
||||||
let mut write_params = options.write_options.lance_write_params.unwrap_or_default();
|
let mut write_params = options.write_options.lance_write_params.unwrap_or_default();
|
||||||
if matches!(&options.mode, CreateTableMode::Overwrite) {
|
if matches!(&options.mode, CreateTableMode::Overwrite) {
|
||||||
@@ -878,7 +954,10 @@ impl ConnectionInternal for Database {
|
|||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
Ok(table) => Ok(Table::new(Arc::new(table))),
|
Ok(table) => Ok(Table::new_with_embedding_registry(
|
||||||
|
Arc::new(table),
|
||||||
|
embedding_registry,
|
||||||
|
)),
|
||||||
Err(Error::TableAlreadyExists { name }) => match options.mode {
|
Err(Error::TableAlreadyExists { name }) => match options.mode {
|
||||||
CreateTableMode::Create => Err(Error::TableAlreadyExists { name }),
|
CreateTableMode::Create => Err(Error::TableAlreadyExists { name }),
|
||||||
CreateTableMode::ExistOk(callback) => {
|
CreateTableMode::ExistOk(callback) => {
|
||||||
@@ -909,12 +988,23 @@ impl ConnectionInternal for Database {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Some ReadParams are exposed in the OpenTableBuilder, but we also
|
||||||
|
// let the user provide their own ReadParams.
|
||||||
|
//
|
||||||
|
// If we have a user provided ReadParams use that
|
||||||
|
// If we don't then start with the default ReadParams and customize it with
|
||||||
|
// the options from the OpenTableBuilder
|
||||||
|
let read_params = options.lance_read_params.unwrap_or_else(|| ReadParams {
|
||||||
|
index_cache_size: options.index_cache_size as usize,
|
||||||
|
..Default::default()
|
||||||
|
});
|
||||||
|
|
||||||
let native_table = Arc::new(
|
let native_table = Arc::new(
|
||||||
NativeTable::open_with_params(
|
NativeTable::open_with_params(
|
||||||
&table_uri,
|
&table_uri,
|
||||||
&options.name,
|
&options.name,
|
||||||
self.store_wrapper.clone(),
|
self.store_wrapper.clone(),
|
||||||
options.lance_read_params,
|
Some(read_params),
|
||||||
self.read_consistency_interval,
|
self.read_consistency_interval,
|
||||||
)
|
)
|
||||||
.await?,
|
.await?,
|
||||||
@@ -1032,7 +1122,6 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
#[ignore = "this can't pass due to https://github.com/lancedb/lancedb/issues/1019, enable it after the bug fixed"]
|
|
||||||
async fn test_open_table() {
|
async fn test_open_table() {
|
||||||
let tmp_dir = tempdir().unwrap();
|
let tmp_dir = tempdir().unwrap();
|
||||||
let uri = tmp_dir.path().to_str().unwrap();
|
let uri = tmp_dir.path().to_str().unwrap();
|
||||||
|
|||||||
307
rust/lancedb/src/embeddings.rs
Normal file
307
rust/lancedb/src/embeddings.rs
Normal file
@@ -0,0 +1,307 @@
|
|||||||
|
// Copyright 2024 LanceDB Developers.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use lance::arrow::RecordBatchExt;
|
||||||
|
use std::{
|
||||||
|
borrow::Cow,
|
||||||
|
collections::{HashMap, HashSet},
|
||||||
|
sync::{Arc, RwLock},
|
||||||
|
};
|
||||||
|
|
||||||
|
use arrow_array::{Array, RecordBatch, RecordBatchReader};
|
||||||
|
use arrow_schema::{DataType, Field, SchemaBuilder};
|
||||||
|
// use async_trait::async_trait;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
error::Result,
|
||||||
|
table::{ColumnDefinition, ColumnKind, TableDefinition},
|
||||||
|
Error,
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Trait for embedding functions
|
||||||
|
///
|
||||||
|
/// An embedding function is a function that is applied to a column of input data
|
||||||
|
/// to produce an "embedding" of that input. This embedding is then stored in the
|
||||||
|
/// database alongside (or instead of) the original input.
|
||||||
|
///
|
||||||
|
/// An "embedding" is often a lower-dimensional representation of the input data.
|
||||||
|
/// For example, sentence-transformers can be used to embed sentences into a 768-dimensional
|
||||||
|
/// vector space. This is useful for tasks like similarity search, where we want to find
|
||||||
|
/// similar sentences to a query sentence.
|
||||||
|
///
|
||||||
|
/// To use an embedding function you must first register it with the `EmbeddingsRegistry`.
|
||||||
|
/// Then you can define it on a column in the table schema. That embedding will then be used
|
||||||
|
/// to embed the data in that column.
|
||||||
|
pub trait EmbeddingFunction: std::fmt::Debug + Send + Sync {
|
||||||
|
fn name(&self) -> &str;
|
||||||
|
/// The type of the input data
|
||||||
|
fn source_type(&self) -> Result<Cow<DataType>>;
|
||||||
|
/// The type of the output data
|
||||||
|
/// This should **always** match the output of the `embed` function
|
||||||
|
fn dest_type(&self) -> Result<Cow<DataType>>;
|
||||||
|
/// Embed the input
|
||||||
|
fn embed(&self, source: Arc<dyn Array>) -> Result<Arc<dyn Array>>;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Defines an embedding from input data into a lower-dimensional space
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)]
|
||||||
|
pub struct EmbeddingDefinition {
|
||||||
|
/// The name of the column in the input data
|
||||||
|
pub source_column: String,
|
||||||
|
/// The name of the embedding column, if not specified
|
||||||
|
/// it will be the source column with `_embedding` appended
|
||||||
|
pub dest_column: Option<String>,
|
||||||
|
/// The name of the embedding function to apply
|
||||||
|
pub embedding_name: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl EmbeddingDefinition {
|
||||||
|
pub fn new<S: Into<String>>(source_column: S, embedding_name: S, dest: Option<S>) -> Self {
|
||||||
|
Self {
|
||||||
|
source_column: source_column.into(),
|
||||||
|
dest_column: dest.map(|d| d.into()),
|
||||||
|
embedding_name: embedding_name.into(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A registry of embedding
|
||||||
|
pub trait EmbeddingRegistry: Send + Sync + std::fmt::Debug {
|
||||||
|
/// Return the names of all registered embedding functions
|
||||||
|
fn functions(&self) -> HashSet<String>;
|
||||||
|
/// Register a new [`EmbeddingFunction
|
||||||
|
/// Returns an error if the function can not be registered
|
||||||
|
fn register(&self, name: &str, function: Arc<dyn EmbeddingFunction>) -> Result<()>;
|
||||||
|
/// Get an embedding function by name
|
||||||
|
fn get(&self, name: &str) -> Option<Arc<dyn EmbeddingFunction>>;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A [`EmbeddingRegistry`] that uses in-memory [`HashMap`]s
|
||||||
|
#[derive(Debug, Default, Clone)]
|
||||||
|
pub struct MemoryRegistry {
|
||||||
|
functions: Arc<RwLock<HashMap<String, Arc<dyn EmbeddingFunction>>>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl EmbeddingRegistry for MemoryRegistry {
|
||||||
|
fn functions(&self) -> HashSet<String> {
|
||||||
|
self.functions.read().unwrap().keys().cloned().collect()
|
||||||
|
}
|
||||||
|
fn register(&self, name: &str, function: Arc<dyn EmbeddingFunction>) -> Result<()> {
|
||||||
|
self.functions
|
||||||
|
.write()
|
||||||
|
.unwrap()
|
||||||
|
.insert(name.to_string(), function);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get(&self, name: &str) -> Option<Arc<dyn EmbeddingFunction>> {
|
||||||
|
self.functions.read().unwrap().get(name).cloned()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl MemoryRegistry {
|
||||||
|
/// Create a new `MemoryRegistry`
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self::default()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A record batch reader that has embeddings applied to it
|
||||||
|
/// This is a wrapper around another record batch reader that applies an embedding function
|
||||||
|
/// when reading from the record batch
|
||||||
|
pub struct WithEmbeddings<R: RecordBatchReader> {
|
||||||
|
inner: R,
|
||||||
|
embeddings: Vec<(EmbeddingDefinition, Arc<dyn EmbeddingFunction>)>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A record batch that might have embeddings applied to it.
|
||||||
|
pub enum MaybeEmbedded<R: RecordBatchReader> {
|
||||||
|
/// The record batch reader has embeddings applied to it
|
||||||
|
Yes(WithEmbeddings<R>),
|
||||||
|
/// The record batch reader does not have embeddings applied to it
|
||||||
|
/// The inner record batch reader is returned as-is
|
||||||
|
No(R),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<R: RecordBatchReader> MaybeEmbedded<R> {
|
||||||
|
/// Create a new RecordBatchReader with embeddings applied to it if the table definition
|
||||||
|
/// specifies an embedding column and the registry contains an embedding function with that name
|
||||||
|
/// Otherwise, this is a no-op and the inner RecordBatchReader is returned.
|
||||||
|
pub fn try_new(
|
||||||
|
inner: R,
|
||||||
|
table_definition: TableDefinition,
|
||||||
|
registry: Option<Arc<dyn EmbeddingRegistry>>,
|
||||||
|
) -> Result<Self> {
|
||||||
|
if let Some(registry) = registry {
|
||||||
|
let mut embeddings = Vec::with_capacity(table_definition.column_definitions.len());
|
||||||
|
for cd in table_definition.column_definitions.iter() {
|
||||||
|
if let ColumnKind::Embedding(embedding_def) = &cd.kind {
|
||||||
|
match registry.get(&embedding_def.embedding_name) {
|
||||||
|
Some(func) => {
|
||||||
|
embeddings.push((embedding_def.clone(), func));
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
return Err(Error::EmbeddingFunctionNotFound {
|
||||||
|
name: embedding_def.embedding_name.to_string(),
|
||||||
|
reason: format!(
|
||||||
|
"Table was defined with an embedding column `{}` but no embedding function was found with that name within the registry.",
|
||||||
|
embedding_def.embedding_name
|
||||||
|
),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !embeddings.is_empty() {
|
||||||
|
return Ok(Self::Yes(WithEmbeddings { inner, embeddings }));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// No embeddings to apply
|
||||||
|
Ok(Self::No(inner))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<R: RecordBatchReader> WithEmbeddings<R> {
|
||||||
|
pub fn new(
|
||||||
|
inner: R,
|
||||||
|
embeddings: Vec<(EmbeddingDefinition, Arc<dyn EmbeddingFunction>)>,
|
||||||
|
) -> Self {
|
||||||
|
Self { inner, embeddings }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<R: RecordBatchReader> WithEmbeddings<R> {
|
||||||
|
fn dest_fields(&self) -> Result<Vec<Field>> {
|
||||||
|
let schema = self.inner.schema();
|
||||||
|
self.embeddings
|
||||||
|
.iter()
|
||||||
|
.map(|(ed, func)| {
|
||||||
|
let src_field = schema.field_with_name(&ed.source_column).unwrap();
|
||||||
|
|
||||||
|
let field_name = ed
|
||||||
|
.dest_column
|
||||||
|
.clone()
|
||||||
|
.unwrap_or_else(|| format!("{}_embedding", &ed.source_column));
|
||||||
|
Ok(Field::new(
|
||||||
|
field_name,
|
||||||
|
func.dest_type()?.into_owned(),
|
||||||
|
src_field.is_nullable(),
|
||||||
|
))
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn column_defs(&self) -> Vec<ColumnDefinition> {
|
||||||
|
let base_schema = self.inner.schema();
|
||||||
|
base_schema
|
||||||
|
.fields()
|
||||||
|
.iter()
|
||||||
|
.map(|_| ColumnDefinition {
|
||||||
|
kind: ColumnKind::Physical,
|
||||||
|
})
|
||||||
|
.chain(self.embeddings.iter().map(|(ed, _)| ColumnDefinition {
|
||||||
|
kind: ColumnKind::Embedding(ed.clone()),
|
||||||
|
}))
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn table_definition(&self) -> Result<TableDefinition> {
|
||||||
|
let base_schema = self.inner.schema();
|
||||||
|
|
||||||
|
let output_fields = self.dest_fields()?;
|
||||||
|
let column_definitions = self.column_defs();
|
||||||
|
|
||||||
|
let mut sb: SchemaBuilder = base_schema.as_ref().into();
|
||||||
|
sb.extend(output_fields);
|
||||||
|
|
||||||
|
let schema = Arc::new(sb.finish());
|
||||||
|
Ok(TableDefinition {
|
||||||
|
schema,
|
||||||
|
column_definitions,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<R: RecordBatchReader> Iterator for MaybeEmbedded<R> {
|
||||||
|
type Item = std::result::Result<RecordBatch, arrow_schema::ArrowError>;
|
||||||
|
fn next(&mut self) -> Option<Self::Item> {
|
||||||
|
match self {
|
||||||
|
Self::Yes(inner) => inner.next(),
|
||||||
|
Self::No(inner) => inner.next(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<R: RecordBatchReader> RecordBatchReader for MaybeEmbedded<R> {
|
||||||
|
fn schema(&self) -> Arc<arrow_schema::Schema> {
|
||||||
|
match self {
|
||||||
|
Self::Yes(inner) => inner.schema(),
|
||||||
|
Self::No(inner) => inner.schema(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<R: RecordBatchReader> Iterator for WithEmbeddings<R> {
|
||||||
|
type Item = std::result::Result<RecordBatch, arrow_schema::ArrowError>;
|
||||||
|
|
||||||
|
fn next(&mut self) -> Option<Self::Item> {
|
||||||
|
let batch = self.inner.next()?;
|
||||||
|
match batch {
|
||||||
|
Ok(mut batch) => {
|
||||||
|
// todo: parallelize this
|
||||||
|
for (fld, func) in self.embeddings.iter() {
|
||||||
|
let src_column = batch.column_by_name(&fld.source_column).unwrap();
|
||||||
|
let embedding = match func.embed(src_column.clone()) {
|
||||||
|
Ok(embedding) => embedding,
|
||||||
|
Err(e) => {
|
||||||
|
return Some(Err(arrow_schema::ArrowError::ComputeError(format!(
|
||||||
|
"Error computing embedding: {}",
|
||||||
|
e
|
||||||
|
))))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let dst_field_name = fld
|
||||||
|
.dest_column
|
||||||
|
.clone()
|
||||||
|
.unwrap_or_else(|| format!("{}_embedding", &fld.source_column));
|
||||||
|
|
||||||
|
let dst_field = Field::new(
|
||||||
|
dst_field_name,
|
||||||
|
embedding.data_type().clone(),
|
||||||
|
embedding.nulls().is_some(),
|
||||||
|
);
|
||||||
|
|
||||||
|
match batch.try_with_column(dst_field.clone(), embedding) {
|
||||||
|
Ok(b) => batch = b,
|
||||||
|
Err(e) => return Some(Err(e)),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
Some(Ok(batch))
|
||||||
|
}
|
||||||
|
Err(e) => Some(Err(e)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<R: RecordBatchReader> RecordBatchReader for WithEmbeddings<R> {
|
||||||
|
fn schema(&self) -> Arc<arrow_schema::Schema> {
|
||||||
|
self.table_definition()
|
||||||
|
.expect("table definition should be infallible at this point")
|
||||||
|
.into_rich_schema()
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -26,6 +26,9 @@ pub enum Error {
|
|||||||
InvalidInput { message: String },
|
InvalidInput { message: String },
|
||||||
#[snafu(display("Table '{name}' was not found"))]
|
#[snafu(display("Table '{name}' was not found"))]
|
||||||
TableNotFound { name: String },
|
TableNotFound { name: String },
|
||||||
|
#[snafu(display("Embedding function '{name}' was not found. : {reason}"))]
|
||||||
|
EmbeddingFunctionNotFound { name: String, reason: String },
|
||||||
|
|
||||||
#[snafu(display("Table '{name}' already exists"))]
|
#[snafu(display("Table '{name}' already exists"))]
|
||||||
TableAlreadyExists { name: String },
|
TableAlreadyExists { name: String },
|
||||||
#[snafu(display("Unable to created lance dataset at {path}: {source}"))]
|
#[snafu(display("Unable to created lance dataset at {path}: {source}"))]
|
||||||
@@ -112,3 +115,13 @@ impl From<url::ParseError> for Error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "polars")]
|
||||||
|
impl From<polars::prelude::PolarsError> for Error {
|
||||||
|
fn from(source: polars::prelude::PolarsError) -> Self {
|
||||||
|
Self::Other {
|
||||||
|
message: "Error in Polars DataFrame integration.".to_string(),
|
||||||
|
source: Some(Box::new(source)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -46,10 +46,18 @@ impl VectorIndex {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Deserialize)]
|
||||||
|
pub struct VectorIndexMetadata {
|
||||||
|
pub metric_type: String,
|
||||||
|
pub index_type: String,
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Debug, Deserialize)]
|
#[derive(Debug, Deserialize)]
|
||||||
pub struct VectorIndexStatistics {
|
pub struct VectorIndexStatistics {
|
||||||
pub num_indexed_rows: usize,
|
pub num_indexed_rows: usize,
|
||||||
pub num_unindexed_rows: usize,
|
pub num_unindexed_rows: usize,
|
||||||
|
pub index_type: String,
|
||||||
|
pub indices: Vec<VectorIndexMetadata>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Builder for an IVF PQ index.
|
/// Builder for an IVF PQ index.
|
||||||
|
|||||||
@@ -350,8 +350,16 @@ mod test {
|
|||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_e2e() {
|
async fn test_e2e() {
|
||||||
let dir1 = tempfile::tempdir().unwrap().into_path();
|
let dir1 = tempfile::tempdir()
|
||||||
let dir2 = tempfile::tempdir().unwrap().into_path();
|
.unwrap()
|
||||||
|
.into_path()
|
||||||
|
.canonicalize()
|
||||||
|
.unwrap();
|
||||||
|
let dir2 = tempfile::tempdir()
|
||||||
|
.unwrap()
|
||||||
|
.into_path()
|
||||||
|
.canonicalize()
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
let secondary_store = LocalFileSystem::new_with_prefix(dir2.to_str().unwrap()).unwrap();
|
let secondary_store = LocalFileSystem::new_with_prefix(dir2.to_str().unwrap()).unwrap();
|
||||||
let object_store_wrapper = Arc::new(MirroringObjectStoreWrapper {
|
let object_store_wrapper = Arc::new(MirroringObjectStoreWrapper {
|
||||||
|
|||||||
@@ -34,6 +34,16 @@
|
|||||||
//! cargo install lancedb
|
//! cargo install lancedb
|
||||||
//! ```
|
//! ```
|
||||||
//!
|
//!
|
||||||
|
//! ## Crate Features
|
||||||
|
//!
|
||||||
|
//! ### Experimental Features
|
||||||
|
//!
|
||||||
|
//! These features are not enabled by default. They are experimental or in-development features that
|
||||||
|
//! are not yet ready to be released.
|
||||||
|
//!
|
||||||
|
//! - `remote` - Enable remote client to connect to LanceDB cloud. This is not yet fully implemented
|
||||||
|
//! and should not be enabled.
|
||||||
|
//!
|
||||||
//! ### Quick Start
|
//! ### Quick Start
|
||||||
//!
|
//!
|
||||||
//! #### Connect to a database.
|
//! #### Connect to a database.
|
||||||
@@ -184,10 +194,13 @@
|
|||||||
pub mod arrow;
|
pub mod arrow;
|
||||||
pub mod connection;
|
pub mod connection;
|
||||||
pub mod data;
|
pub mod data;
|
||||||
|
pub mod embeddings;
|
||||||
pub mod error;
|
pub mod error;
|
||||||
pub mod index;
|
pub mod index;
|
||||||
pub mod io;
|
pub mod io;
|
||||||
pub mod ipc;
|
pub mod ipc;
|
||||||
|
#[cfg(feature = "polars")]
|
||||||
|
mod polars_arrow_convertors;
|
||||||
pub mod query;
|
pub mod query;
|
||||||
#[cfg(feature = "remote")]
|
#[cfg(feature = "remote")]
|
||||||
pub(crate) mod remote;
|
pub(crate) mod remote;
|
||||||
|
|||||||
123
rust/lancedb/src/polars_arrow_convertors.rs
Normal file
123
rust/lancedb/src/polars_arrow_convertors.rs
Normal file
@@ -0,0 +1,123 @@
|
|||||||
|
/// Polars and LanceDB both use Arrow for their in memory-representation, but use
|
||||||
|
/// different Rust Arrow implementations. LanceDB uses the arrow-rs crate and
|
||||||
|
/// Polars uses the polars-arrow crate.
|
||||||
|
///
|
||||||
|
/// This crate defines zero-copy conversions (of the underlying buffers)
|
||||||
|
/// between polars-arrow and arrow-rs using the C FFI.
|
||||||
|
///
|
||||||
|
/// The polars-arrow does implement conversions to and from arrow-rs, but
|
||||||
|
/// requires a feature flagged dependency on arrow-rs. The version of arrow-rs
|
||||||
|
/// depended on by polars-arrow and LanceDB may not be compatible,
|
||||||
|
/// which necessitates using the C FFI.
|
||||||
|
use crate::error::Result;
|
||||||
|
use polars::prelude::{DataFrame, Series};
|
||||||
|
use std::{mem, sync::Arc};
|
||||||
|
|
||||||
|
/// When interpreting Polars dataframes as polars-arrow record batches,
|
||||||
|
/// one must decide whether to use Arrow string/binary view types
|
||||||
|
/// instead of the standard Arrow string/binary types.
|
||||||
|
/// For now, we will not use string view types because conversions
|
||||||
|
/// for string view types from polars-arrow to arrow-rs are not yet implemented.
|
||||||
|
/// See: https://lists.apache.org/thread/w88tpz76ox8h3rxkjl4so6rg3f1rv7wt for the
|
||||||
|
/// differences in the types.
|
||||||
|
pub const POLARS_ARROW_FLAVOR: bool = false;
|
||||||
|
const IS_ARRAY_NULLABLE: bool = true;
|
||||||
|
|
||||||
|
/// Converts a Polars DataFrame schema to an Arrow RecordBatch schema.
|
||||||
|
pub fn convert_polars_df_schema_to_arrow_rb_schema(
|
||||||
|
polars_df_schema: polars::prelude::Schema,
|
||||||
|
) -> Result<Arc<arrow_schema::Schema>> {
|
||||||
|
let arrow_fields: Result<Vec<arrow_schema::Field>> = polars_df_schema
|
||||||
|
.into_iter()
|
||||||
|
.map(|(name, df_dtype)| {
|
||||||
|
let polars_arrow_dtype = df_dtype.to_arrow(POLARS_ARROW_FLAVOR);
|
||||||
|
let polars_field =
|
||||||
|
polars_arrow::datatypes::Field::new(name, polars_arrow_dtype, IS_ARRAY_NULLABLE);
|
||||||
|
convert_polars_arrow_field_to_arrow_rs_field(polars_field)
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
Ok(Arc::new(arrow_schema::Schema::new(arrow_fields?)))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Converts an Arrow RecordBatch schema to a Polars DataFrame schema.
|
||||||
|
pub fn convert_arrow_rb_schema_to_polars_df_schema(
|
||||||
|
arrow_schema: &arrow_schema::Schema,
|
||||||
|
) -> Result<polars::prelude::Schema> {
|
||||||
|
let polars_df_fields: Result<Vec<polars::prelude::Field>> = arrow_schema
|
||||||
|
.fields()
|
||||||
|
.iter()
|
||||||
|
.map(|arrow_rs_field| {
|
||||||
|
let polars_arrow_field = convert_arrow_rs_field_to_polars_arrow_field(arrow_rs_field)?;
|
||||||
|
Ok(polars::prelude::Field::new(
|
||||||
|
arrow_rs_field.name(),
|
||||||
|
polars::datatypes::DataType::from(polars_arrow_field.data_type()),
|
||||||
|
))
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
Ok(polars::prelude::Schema::from_iter(polars_df_fields?))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Converts an Arrow RecordBatch to a Polars DataFrame, using a provided Polars DataFrame schema.
|
||||||
|
pub fn convert_arrow_rb_to_polars_df(
|
||||||
|
arrow_rb: &arrow::record_batch::RecordBatch,
|
||||||
|
polars_schema: &polars::prelude::Schema,
|
||||||
|
) -> Result<DataFrame> {
|
||||||
|
let mut columns: Vec<Series> = Vec::with_capacity(arrow_rb.num_columns());
|
||||||
|
|
||||||
|
for (i, column) in arrow_rb.columns().iter().enumerate() {
|
||||||
|
let polars_df_dtype = polars_schema.try_get_at_index(i)?.1;
|
||||||
|
let polars_arrow_dtype = polars_df_dtype.to_arrow(POLARS_ARROW_FLAVOR);
|
||||||
|
let polars_array =
|
||||||
|
convert_arrow_rs_array_to_polars_arrow_array(column, polars_arrow_dtype)?;
|
||||||
|
columns.push(Series::from_arrow(
|
||||||
|
polars_schema.try_get_at_index(i)?.0,
|
||||||
|
polars_array,
|
||||||
|
)?);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(DataFrame::from_iter(columns))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Converts a polars-arrow Arrow array to an arrow-rs Arrow array.
|
||||||
|
pub fn convert_polars_arrow_array_to_arrow_rs_array(
|
||||||
|
polars_array: Box<dyn polars_arrow::array::Array>,
|
||||||
|
arrow_datatype: arrow_schema::DataType,
|
||||||
|
) -> std::result::Result<arrow_array::ArrayRef, arrow_schema::ArrowError> {
|
||||||
|
let polars_c_array = polars_arrow::ffi::export_array_to_c(polars_array);
|
||||||
|
let arrow_c_array = unsafe { mem::transmute(polars_c_array) };
|
||||||
|
Ok(arrow_array::make_array(unsafe {
|
||||||
|
arrow::ffi::from_ffi_and_data_type(arrow_c_array, arrow_datatype)
|
||||||
|
}?))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Converts an arrow-rs Arrow array to a polars-arrow Arrow array.
|
||||||
|
fn convert_arrow_rs_array_to_polars_arrow_array(
|
||||||
|
arrow_rs_array: &Arc<dyn arrow_array::Array>,
|
||||||
|
polars_arrow_dtype: polars::datatypes::ArrowDataType,
|
||||||
|
) -> Result<Box<dyn polars_arrow::array::Array>> {
|
||||||
|
let arrow_c_array = arrow::ffi::FFI_ArrowArray::new(&arrow_rs_array.to_data());
|
||||||
|
let polars_c_array = unsafe { mem::transmute(arrow_c_array) };
|
||||||
|
Ok(unsafe { polars_arrow::ffi::import_array_from_c(polars_c_array, polars_arrow_dtype) }?)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn convert_polars_arrow_field_to_arrow_rs_field(
|
||||||
|
polars_arrow_field: polars_arrow::datatypes::Field,
|
||||||
|
) -> Result<arrow_schema::Field> {
|
||||||
|
let polars_c_schema = polars_arrow::ffi::export_field_to_c(&polars_arrow_field);
|
||||||
|
let arrow_c_schema: arrow::ffi::FFI_ArrowSchema = unsafe { mem::transmute(polars_c_schema) };
|
||||||
|
let arrow_rs_dtype = arrow_schema::DataType::try_from(&arrow_c_schema)?;
|
||||||
|
Ok(arrow_schema::Field::new(
|
||||||
|
polars_arrow_field.name,
|
||||||
|
arrow_rs_dtype,
|
||||||
|
IS_ARRAY_NULLABLE,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn convert_arrow_rs_field_to_polars_arrow_field(
|
||||||
|
arrow_rs_field: &arrow_schema::Field,
|
||||||
|
) -> Result<polars_arrow::datatypes::Field> {
|
||||||
|
let arrow_rs_dtype = arrow_rs_field.data_type();
|
||||||
|
let arrow_c_schema = arrow::ffi::FFI_ArrowSchema::try_from(arrow_rs_dtype)?;
|
||||||
|
let polars_c_schema: polars_arrow::ffi::ArrowSchema = unsafe { mem::transmute(arrow_c_schema) };
|
||||||
|
Ok(unsafe { polars_arrow::ffi::import_field_from_c(&polars_c_schema) }?)
|
||||||
|
}
|
||||||
@@ -23,6 +23,7 @@ use tokio::task::spawn_blocking;
|
|||||||
use crate::connection::{
|
use crate::connection::{
|
||||||
ConnectionInternal, CreateTableBuilder, NoData, OpenTableBuilder, TableNamesBuilder,
|
ConnectionInternal, CreateTableBuilder, NoData, OpenTableBuilder, TableNamesBuilder,
|
||||||
};
|
};
|
||||||
|
use crate::embeddings::EmbeddingRegistry;
|
||||||
use crate::error::Result;
|
use crate::error::Result;
|
||||||
use crate::Table;
|
use crate::Table;
|
||||||
|
|
||||||
@@ -87,14 +88,16 @@ impl ConnectionInternal for RemoteDatabase {
|
|||||||
.await
|
.await
|
||||||
.unwrap()?;
|
.unwrap()?;
|
||||||
|
|
||||||
self.client
|
let rsp = self
|
||||||
.post(&format!("/v1/table/{}/create", options.name))
|
.client
|
||||||
|
.post(&format!("/v1/table/{}/create/", options.name))
|
||||||
.body(data_buffer)
|
.body(data_buffer)
|
||||||
.header(CONTENT_TYPE, ARROW_STREAM_CONTENT_TYPE)
|
.header(CONTENT_TYPE, ARROW_STREAM_CONTENT_TYPE)
|
||||||
// This is currently expected by LanceDb cloud but will be removed soon.
|
// This is currently expected by LanceDb cloud but will be removed soon.
|
||||||
.header("x-request-id", "na")
|
.header("x-request-id", "na")
|
||||||
.send()
|
.send()
|
||||||
.await?;
|
.await?;
|
||||||
|
self.client.check_response(rsp).await?;
|
||||||
|
|
||||||
Ok(Table::new(Arc::new(RemoteTable::new(
|
Ok(Table::new(Arc::new(RemoteTable::new(
|
||||||
self.client.clone(),
|
self.client.clone(),
|
||||||
@@ -113,4 +116,8 @@ impl ConnectionInternal for RemoteDatabase {
|
|||||||
async fn drop_db(&self) -> Result<()> {
|
async fn drop_db(&self) -> Result<()> {
|
||||||
todo!()
|
todo!()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn embedding_registry(&self) -> &dyn EmbeddingRegistry {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ use crate::{
|
|||||||
query::{Query, QueryExecutionOptions, VectorQuery},
|
query::{Query, QueryExecutionOptions, VectorQuery},
|
||||||
table::{
|
table::{
|
||||||
merge::MergeInsertBuilder, AddDataBuilder, NativeTable, OptimizeAction, OptimizeStats,
|
merge::MergeInsertBuilder, AddDataBuilder, NativeTable, OptimizeAction, OptimizeStats,
|
||||||
TableInternal, UpdateBuilder,
|
TableDefinition, TableInternal, UpdateBuilder,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -120,4 +120,7 @@ impl TableInternal for RemoteTable {
|
|||||||
async fn list_indices(&self) -> Result<Vec<IndexConfig>> {
|
async fn list_indices(&self) -> Result<Vec<IndexConfig>> {
|
||||||
todo!()
|
todo!()
|
||||||
}
|
}
|
||||||
|
async fn table_definition(&self) -> Result<TableDefinition> {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -41,10 +41,12 @@ use lance::io::WrappingObjectStore;
|
|||||||
use lance_index::IndexType;
|
use lance_index::IndexType;
|
||||||
use lance_index::{optimize::OptimizeOptions, DatasetIndexExt};
|
use lance_index::{optimize::OptimizeOptions, DatasetIndexExt};
|
||||||
use log::info;
|
use log::info;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
use snafu::whatever;
|
use snafu::whatever;
|
||||||
|
|
||||||
use crate::arrow::IntoArrow;
|
use crate::arrow::IntoArrow;
|
||||||
use crate::connection::NoData;
|
use crate::connection::NoData;
|
||||||
|
use crate::embeddings::{EmbeddingDefinition, EmbeddingRegistry, MaybeEmbedded, MemoryRegistry};
|
||||||
use crate::error::{Error, Result};
|
use crate::error::{Error, Result};
|
||||||
use crate::index::vector::{IvfPqIndexBuilder, VectorIndex, VectorIndexStatistics};
|
use crate::index::vector::{IvfPqIndexBuilder, VectorIndex, VectorIndexStatistics};
|
||||||
use crate::index::IndexConfig;
|
use crate::index::IndexConfig;
|
||||||
@@ -63,6 +65,79 @@ use self::merge::MergeInsertBuilder;
|
|||||||
pub(crate) mod dataset;
|
pub(crate) mod dataset;
|
||||||
pub mod merge;
|
pub mod merge;
|
||||||
|
|
||||||
|
/// Defines the type of column
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub enum ColumnKind {
|
||||||
|
/// Columns populated by data from the user (this is the most common case)
|
||||||
|
Physical,
|
||||||
|
/// Columns populated by applying an embedding function to the input
|
||||||
|
Embedding(EmbeddingDefinition),
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Defines a column in a table
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct ColumnDefinition {
|
||||||
|
/// The source of the column data
|
||||||
|
pub kind: ColumnKind,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct TableDefinition {
|
||||||
|
pub column_definitions: Vec<ColumnDefinition>,
|
||||||
|
pub schema: SchemaRef,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TableDefinition {
|
||||||
|
pub fn new(schema: SchemaRef, column_definitions: Vec<ColumnDefinition>) -> Self {
|
||||||
|
Self {
|
||||||
|
column_definitions,
|
||||||
|
schema,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn new_from_schema(schema: SchemaRef) -> Self {
|
||||||
|
let column_definitions = schema
|
||||||
|
.fields()
|
||||||
|
.iter()
|
||||||
|
.map(|_| ColumnDefinition {
|
||||||
|
kind: ColumnKind::Physical,
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
Self::new(schema, column_definitions)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn try_from_rich_schema(schema: SchemaRef) -> Result<Self> {
|
||||||
|
let column_definitions = schema.metadata.get("lancedb::column_definitions");
|
||||||
|
if let Some(column_definitions) = column_definitions {
|
||||||
|
let column_definitions: Vec<ColumnDefinition> =
|
||||||
|
serde_json::from_str(column_definitions).map_err(|e| Error::Runtime {
|
||||||
|
message: format!("Failed to deserialize column definitions: {}", e),
|
||||||
|
})?;
|
||||||
|
Ok(Self::new(schema, column_definitions))
|
||||||
|
} else {
|
||||||
|
let column_definitions = schema
|
||||||
|
.fields()
|
||||||
|
.iter()
|
||||||
|
.map(|_| ColumnDefinition {
|
||||||
|
kind: ColumnKind::Physical,
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
Ok(Self::new(schema, column_definitions))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn into_rich_schema(self) -> SchemaRef {
|
||||||
|
// We have full control over the structure of column definitions. This should
|
||||||
|
// not fail, except for a bug
|
||||||
|
let lancedb_metadata = serde_json::to_string(&self.column_definitions).unwrap();
|
||||||
|
let mut schema_with_metadata = (*self.schema).clone();
|
||||||
|
schema_with_metadata
|
||||||
|
.metadata
|
||||||
|
.insert("lancedb::column_definitions".to_string(), lancedb_metadata);
|
||||||
|
Arc::new(schema_with_metadata)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Optimize the dataset.
|
/// Optimize the dataset.
|
||||||
///
|
///
|
||||||
/// Similar to `VACUUM` in PostgreSQL, it offers different options to
|
/// Similar to `VACUUM` in PostgreSQL, it offers different options to
|
||||||
@@ -132,6 +207,7 @@ pub struct AddDataBuilder<T: IntoArrow> {
|
|||||||
pub(crate) data: T,
|
pub(crate) data: T,
|
||||||
pub(crate) mode: AddDataMode,
|
pub(crate) mode: AddDataMode,
|
||||||
pub(crate) write_options: WriteOptions,
|
pub(crate) write_options: WriteOptions,
|
||||||
|
embedding_registry: Option<Arc<dyn EmbeddingRegistry>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: IntoArrow> std::fmt::Debug for AddDataBuilder<T> {
|
impl<T: IntoArrow> std::fmt::Debug for AddDataBuilder<T> {
|
||||||
@@ -163,6 +239,7 @@ impl<T: IntoArrow> AddDataBuilder<T> {
|
|||||||
mode: self.mode,
|
mode: self.mode,
|
||||||
parent: self.parent,
|
parent: self.parent,
|
||||||
write_options: self.write_options,
|
write_options: self.write_options,
|
||||||
|
embedding_registry: self.embedding_registry,
|
||||||
};
|
};
|
||||||
parent.add(without_data, data).await
|
parent.add(without_data, data).await
|
||||||
}
|
}
|
||||||
@@ -280,6 +357,7 @@ pub(crate) trait TableInternal: std::fmt::Display + std::fmt::Debug + Send + Syn
|
|||||||
async fn checkout(&self, version: u64) -> Result<()>;
|
async fn checkout(&self, version: u64) -> Result<()>;
|
||||||
async fn checkout_latest(&self) -> Result<()>;
|
async fn checkout_latest(&self) -> Result<()>;
|
||||||
async fn restore(&self) -> Result<()>;
|
async fn restore(&self) -> Result<()>;
|
||||||
|
async fn table_definition(&self) -> Result<TableDefinition>;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A Table is a collection of strong typed Rows.
|
/// A Table is a collection of strong typed Rows.
|
||||||
@@ -288,6 +366,7 @@ pub(crate) trait TableInternal: std::fmt::Display + std::fmt::Debug + Send + Syn
|
|||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct Table {
|
pub struct Table {
|
||||||
inner: Arc<dyn TableInternal>,
|
inner: Arc<dyn TableInternal>,
|
||||||
|
embedding_registry: Arc<dyn EmbeddingRegistry>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl std::fmt::Display for Table {
|
impl std::fmt::Display for Table {
|
||||||
@@ -298,7 +377,20 @@ impl std::fmt::Display for Table {
|
|||||||
|
|
||||||
impl Table {
|
impl Table {
|
||||||
pub(crate) fn new(inner: Arc<dyn TableInternal>) -> Self {
|
pub(crate) fn new(inner: Arc<dyn TableInternal>) -> Self {
|
||||||
Self { inner }
|
Self {
|
||||||
|
inner,
|
||||||
|
embedding_registry: Arc::new(MemoryRegistry::new()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn new_with_embedding_registry(
|
||||||
|
inner: Arc<dyn TableInternal>,
|
||||||
|
embedding_registry: Arc<dyn EmbeddingRegistry>,
|
||||||
|
) -> Self {
|
||||||
|
Self {
|
||||||
|
inner,
|
||||||
|
embedding_registry,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Cast as [`NativeTable`], or return None it if is not a [`NativeTable`].
|
/// Cast as [`NativeTable`], or return None it if is not a [`NativeTable`].
|
||||||
@@ -340,6 +432,7 @@ impl Table {
|
|||||||
data: batches,
|
data: batches,
|
||||||
mode: AddDataMode::Append,
|
mode: AddDataMode::Append,
|
||||||
write_options: WriteOptions::default(),
|
write_options: WriteOptions::default(),
|
||||||
|
embedding_registry: Some(self.embedding_registry.clone()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -743,11 +836,10 @@ impl Table {
|
|||||||
|
|
||||||
impl From<NativeTable> for Table {
|
impl From<NativeTable> for Table {
|
||||||
fn from(table: NativeTable) -> Self {
|
fn from(table: NativeTable) -> Self {
|
||||||
Self {
|
Self::new(Arc::new(table))
|
||||||
inner: Arc::new(table),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A table in a LanceDB database.
|
/// A table in a LanceDB database.
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct NativeTable {
|
pub struct NativeTable {
|
||||||
@@ -918,7 +1010,6 @@ impl NativeTable {
|
|||||||
Some(wrapper) => params.patch_with_store_wrapper(wrapper)?,
|
Some(wrapper) => params.patch_with_store_wrapper(wrapper)?,
|
||||||
None => params,
|
None => params,
|
||||||
};
|
};
|
||||||
|
|
||||||
let storage_options = params
|
let storage_options = params
|
||||||
.store_params
|
.store_params
|
||||||
.clone()
|
.clone()
|
||||||
@@ -1061,6 +1152,26 @@ impl NativeTable {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn get_index_type(&self, index_uuid: &str) -> Result<Option<String>> {
|
||||||
|
match self.load_index_stats(index_uuid).await? {
|
||||||
|
Some(stats) => Ok(Some(stats.index_type)),
|
||||||
|
None => Ok(None),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn get_distance_type(&self, index_uuid: &str) -> Result<Option<String>> {
|
||||||
|
match self.load_index_stats(index_uuid).await? {
|
||||||
|
Some(stats) => Ok(Some(
|
||||||
|
stats
|
||||||
|
.indices
|
||||||
|
.iter()
|
||||||
|
.map(|i| i.metric_type.clone())
|
||||||
|
.collect(),
|
||||||
|
)),
|
||||||
|
None => Ok(None),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn load_indices(&self) -> Result<Vec<VectorIndex>> {
|
pub async fn load_indices(&self) -> Result<Vec<VectorIndex>> {
|
||||||
let dataset = self.dataset.get().await?;
|
let dataset = self.dataset.get().await?;
|
||||||
let (indices, mf) = futures::try_join!(dataset.load_indices(), dataset.latest_manifest())?;
|
let (indices, mf) = futures::try_join!(dataset.load_indices(), dataset.latest_manifest())?;
|
||||||
@@ -1322,6 +1433,11 @@ impl TableInternal for NativeTable {
|
|||||||
Ok(Arc::new(Schema::from(&lance_schema)))
|
Ok(Arc::new(Schema::from(&lance_schema)))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn table_definition(&self) -> Result<TableDefinition> {
|
||||||
|
let schema = self.schema().await?;
|
||||||
|
TableDefinition::try_from_rich_schema(schema)
|
||||||
|
}
|
||||||
|
|
||||||
async fn count_rows(&self, filter: Option<String>) -> Result<usize> {
|
async fn count_rows(&self, filter: Option<String>) -> Result<usize> {
|
||||||
Ok(self.dataset.get().await?.count_rows(filter).await?)
|
Ok(self.dataset.get().await?.count_rows(filter).await?)
|
||||||
}
|
}
|
||||||
@@ -1331,6 +1447,9 @@ impl TableInternal for NativeTable {
|
|||||||
add: AddDataBuilder<NoData>,
|
add: AddDataBuilder<NoData>,
|
||||||
data: Box<dyn RecordBatchReader + Send>,
|
data: Box<dyn RecordBatchReader + Send>,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
|
let data =
|
||||||
|
MaybeEmbedded::try_new(data, self.table_definition().await?, add.embedding_registry)?;
|
||||||
|
|
||||||
let mut lance_params = add.write_options.lance_write_params.unwrap_or(WriteParams {
|
let mut lance_params = add.write_options.lance_write_params.unwrap_or(WriteParams {
|
||||||
mode: match add.mode {
|
mode: match add.mode {
|
||||||
AddDataMode::Append => WriteMode::Append,
|
AddDataMode::Append => WriteMode::Append,
|
||||||
@@ -1358,8 +1477,8 @@ impl TableInternal for NativeTable {
|
|||||||
};
|
};
|
||||||
|
|
||||||
self.dataset.ensure_mutable().await?;
|
self.dataset.ensure_mutable().await?;
|
||||||
|
|
||||||
let dataset = Dataset::write(data, &self.uri, Some(lance_params)).await?;
|
let dataset = Dataset::write(data, &self.uri, Some(lance_params)).await?;
|
||||||
|
|
||||||
self.dataset.set_latest(dataset).await;
|
self.dataset.set_latest(dataset).await;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
320
rust/lancedb/tests/embedding_registry_test.rs
Normal file
320
rust/lancedb/tests/embedding_registry_test.rs
Normal file
@@ -0,0 +1,320 @@
|
|||||||
|
use std::{
|
||||||
|
borrow::Cow,
|
||||||
|
collections::{HashMap, HashSet},
|
||||||
|
iter::repeat,
|
||||||
|
sync::Arc,
|
||||||
|
};
|
||||||
|
|
||||||
|
use arrow::buffer::NullBuffer;
|
||||||
|
use arrow_array::{
|
||||||
|
Array, FixedSizeListArray, Float32Array, Int32Array, RecordBatch, RecordBatchIterator,
|
||||||
|
StringArray,
|
||||||
|
};
|
||||||
|
use arrow_schema::{DataType, Field, Schema};
|
||||||
|
use futures::StreamExt;
|
||||||
|
use lancedb::{
|
||||||
|
arrow::IntoArrow,
|
||||||
|
connect,
|
||||||
|
embeddings::{EmbeddingDefinition, EmbeddingFunction, EmbeddingRegistry},
|
||||||
|
query::ExecutableQuery,
|
||||||
|
Error, Result,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_custom_func() -> Result<()> {
|
||||||
|
let tempdir = tempfile::tempdir().unwrap();
|
||||||
|
let tempdir = tempdir.path().to_str().unwrap();
|
||||||
|
let db = connect(tempdir).execute().await?;
|
||||||
|
let embed_fun = MockEmbed::new("embed_fun".to_string(), 1);
|
||||||
|
db.embedding_registry()
|
||||||
|
.register("embed_fun", Arc::new(embed_fun.clone()))?;
|
||||||
|
|
||||||
|
let tbl = db
|
||||||
|
.create_table("test", create_some_records()?)
|
||||||
|
.add_embedding(EmbeddingDefinition::new(
|
||||||
|
"text",
|
||||||
|
&embed_fun.name,
|
||||||
|
Some("embeddings"),
|
||||||
|
))?
|
||||||
|
.execute()
|
||||||
|
.await?;
|
||||||
|
let mut res = tbl.query().execute().await?;
|
||||||
|
while let Some(Ok(batch)) = res.next().await {
|
||||||
|
let embeddings = batch.column_by_name("embeddings");
|
||||||
|
assert!(embeddings.is_some());
|
||||||
|
let embeddings = embeddings.unwrap();
|
||||||
|
assert_eq!(embeddings.data_type(), embed_fun.dest_type()?.as_ref());
|
||||||
|
}
|
||||||
|
// now make sure the embeddings are applied when
|
||||||
|
// we add new records too
|
||||||
|
tbl.add(create_some_records()?).execute().await?;
|
||||||
|
let mut res = tbl.query().execute().await?;
|
||||||
|
while let Some(Ok(batch)) = res.next().await {
|
||||||
|
let embeddings = batch.column_by_name("embeddings");
|
||||||
|
assert!(embeddings.is_some());
|
||||||
|
let embeddings = embeddings.unwrap();
|
||||||
|
assert_eq!(embeddings.data_type(), embed_fun.dest_type()?.as_ref());
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_custom_registry() -> Result<()> {
|
||||||
|
let tempdir = tempfile::tempdir().unwrap();
|
||||||
|
let tempdir = tempdir.path().to_str().unwrap();
|
||||||
|
|
||||||
|
let db = connect(tempdir)
|
||||||
|
.embedding_registry(Arc::new(MyRegistry::default()))
|
||||||
|
.execute()
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let tbl = db
|
||||||
|
.create_table("test", create_some_records()?)
|
||||||
|
.add_embedding(EmbeddingDefinition::new(
|
||||||
|
"text",
|
||||||
|
"func_1",
|
||||||
|
Some("embeddings"),
|
||||||
|
))?
|
||||||
|
.execute()
|
||||||
|
.await?;
|
||||||
|
let mut res = tbl.query().execute().await?;
|
||||||
|
while let Some(Ok(batch)) = res.next().await {
|
||||||
|
let embeddings = batch.column_by_name("embeddings");
|
||||||
|
assert!(embeddings.is_some());
|
||||||
|
let embeddings = embeddings.unwrap();
|
||||||
|
assert_eq!(
|
||||||
|
embeddings.data_type(),
|
||||||
|
MockEmbed::new("func_1".to_string(), 1)
|
||||||
|
.dest_type()?
|
||||||
|
.as_ref()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_multiple_embeddings() -> Result<()> {
|
||||||
|
let tempdir = tempfile::tempdir().unwrap();
|
||||||
|
let tempdir = tempdir.path().to_str().unwrap();
|
||||||
|
|
||||||
|
let db = connect(tempdir).execute().await?;
|
||||||
|
let func_1 = MockEmbed::new("func_1".to_string(), 1);
|
||||||
|
let func_2 = MockEmbed::new("func_2".to_string(), 10);
|
||||||
|
db.embedding_registry()
|
||||||
|
.register(&func_1.name, Arc::new(func_1.clone()))?;
|
||||||
|
db.embedding_registry()
|
||||||
|
.register(&func_2.name, Arc::new(func_2.clone()))?;
|
||||||
|
|
||||||
|
let tbl = db
|
||||||
|
.create_table("test", create_some_records()?)
|
||||||
|
.add_embedding(EmbeddingDefinition::new(
|
||||||
|
"text",
|
||||||
|
&func_1.name,
|
||||||
|
Some("first_embeddings"),
|
||||||
|
))?
|
||||||
|
.add_embedding(EmbeddingDefinition::new(
|
||||||
|
"text",
|
||||||
|
&func_2.name,
|
||||||
|
Some("second_embeddings"),
|
||||||
|
))?
|
||||||
|
.execute()
|
||||||
|
.await?;
|
||||||
|
let mut res = tbl.query().execute().await?;
|
||||||
|
while let Some(Ok(batch)) = res.next().await {
|
||||||
|
let embeddings = batch.column_by_name("first_embeddings");
|
||||||
|
assert!(embeddings.is_some());
|
||||||
|
let second_embeddings = batch.column_by_name("second_embeddings");
|
||||||
|
assert!(second_embeddings.is_some());
|
||||||
|
|
||||||
|
let embeddings = embeddings.unwrap();
|
||||||
|
assert_eq!(embeddings.data_type(), func_1.dest_type()?.as_ref());
|
||||||
|
|
||||||
|
let second_embeddings = second_embeddings.unwrap();
|
||||||
|
assert_eq!(second_embeddings.data_type(), func_2.dest_type()?.as_ref());
|
||||||
|
}
|
||||||
|
|
||||||
|
// now make sure the embeddings are applied when
|
||||||
|
// we add new records too
|
||||||
|
tbl.add(create_some_records()?).execute().await?;
|
||||||
|
let mut res = tbl.query().execute().await?;
|
||||||
|
while let Some(Ok(batch)) = res.next().await {
|
||||||
|
let embeddings = batch.column_by_name("first_embeddings");
|
||||||
|
assert!(embeddings.is_some());
|
||||||
|
let second_embeddings = batch.column_by_name("second_embeddings");
|
||||||
|
assert!(second_embeddings.is_some());
|
||||||
|
|
||||||
|
let embeddings = embeddings.unwrap();
|
||||||
|
assert_eq!(embeddings.data_type(), func_1.dest_type()?.as_ref());
|
||||||
|
|
||||||
|
let second_embeddings = second_embeddings.unwrap();
|
||||||
|
assert_eq!(second_embeddings.data_type(), func_2.dest_type()?.as_ref());
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_no_func_in_registry() -> Result<()> {
|
||||||
|
let tempdir = tempfile::tempdir().unwrap();
|
||||||
|
let tempdir = tempdir.path().to_str().unwrap();
|
||||||
|
|
||||||
|
let db = connect(tempdir).execute().await?;
|
||||||
|
|
||||||
|
let res = db
|
||||||
|
.create_table("test", create_some_records()?)
|
||||||
|
.add_embedding(EmbeddingDefinition::new(
|
||||||
|
"text",
|
||||||
|
"some_func",
|
||||||
|
Some("first_embeddings"),
|
||||||
|
));
|
||||||
|
assert!(res.is_err());
|
||||||
|
assert!(matches!(
|
||||||
|
res.err().unwrap(),
|
||||||
|
Error::EmbeddingFunctionNotFound { .. }
|
||||||
|
));
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_no_func_in_registry_on_add() -> Result<()> {
|
||||||
|
let tempdir = tempfile::tempdir().unwrap();
|
||||||
|
let tempdir = tempdir.path().to_str().unwrap();
|
||||||
|
|
||||||
|
let db = connect(tempdir).execute().await?;
|
||||||
|
db.embedding_registry().register(
|
||||||
|
"some_func",
|
||||||
|
Arc::new(MockEmbed::new("some_func".to_string(), 1)),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
db.create_table("test", create_some_records()?)
|
||||||
|
.add_embedding(EmbeddingDefinition::new(
|
||||||
|
"text",
|
||||||
|
"some_func",
|
||||||
|
Some("first_embeddings"),
|
||||||
|
))?
|
||||||
|
.execute()
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let db = connect(tempdir).execute().await?;
|
||||||
|
|
||||||
|
let tbl = db.open_table("test").execute().await?;
|
||||||
|
// This should fail because 'tbl' is expecting "some_func" to be in the registry
|
||||||
|
let res = tbl.add(create_some_records()?).execute().await;
|
||||||
|
assert!(res.is_err());
|
||||||
|
assert!(matches!(
|
||||||
|
res.unwrap_err(),
|
||||||
|
crate::Error::EmbeddingFunctionNotFound { .. }
|
||||||
|
));
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn create_some_records() -> Result<impl IntoArrow> {
|
||||||
|
const TOTAL: usize = 2;
|
||||||
|
|
||||||
|
let schema = Arc::new(Schema::new(vec![
|
||||||
|
Field::new("id", DataType::Int32, false),
|
||||||
|
Field::new("text", DataType::Utf8, true),
|
||||||
|
]));
|
||||||
|
|
||||||
|
// Create a RecordBatch stream.
|
||||||
|
let batches = RecordBatchIterator::new(
|
||||||
|
vec![RecordBatch::try_new(
|
||||||
|
schema.clone(),
|
||||||
|
vec![
|
||||||
|
Arc::new(Int32Array::from_iter_values(0..TOTAL as i32)),
|
||||||
|
Arc::new(StringArray::from_iter(
|
||||||
|
repeat(Some("hello world".to_string())).take(TOTAL),
|
||||||
|
)),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
.unwrap()]
|
||||||
|
.into_iter()
|
||||||
|
.map(Ok),
|
||||||
|
schema.clone(),
|
||||||
|
);
|
||||||
|
Ok(Box::new(batches))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
struct MyRegistry {
|
||||||
|
functions: HashMap<String, Arc<dyn EmbeddingFunction>>,
|
||||||
|
}
|
||||||
|
impl Default for MyRegistry {
|
||||||
|
fn default() -> Self {
|
||||||
|
let funcs: Vec<Arc<dyn EmbeddingFunction>> = vec![
|
||||||
|
Arc::new(MockEmbed::new("func_1".to_string(), 1)),
|
||||||
|
Arc::new(MockEmbed::new("func_2".to_string(), 10)),
|
||||||
|
];
|
||||||
|
Self {
|
||||||
|
functions: funcs
|
||||||
|
.into_iter()
|
||||||
|
.map(|f| (f.name().to_string(), f))
|
||||||
|
.collect(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// a mock registry that only has one function called `embed_fun`
|
||||||
|
impl EmbeddingRegistry for MyRegistry {
|
||||||
|
fn functions(&self) -> HashSet<String> {
|
||||||
|
self.functions.keys().cloned().collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn register(&self, _name: &str, _function: Arc<dyn EmbeddingFunction>) -> Result<()> {
|
||||||
|
Err(Error::Other {
|
||||||
|
message: "MyRegistry is read-only".to_string(),
|
||||||
|
source: None,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get(&self, name: &str) -> Option<Arc<dyn EmbeddingFunction>> {
|
||||||
|
self.functions.get(name).cloned()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
struct MockEmbed {
|
||||||
|
source_type: DataType,
|
||||||
|
dest_type: DataType,
|
||||||
|
name: String,
|
||||||
|
dim: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl MockEmbed {
|
||||||
|
pub fn new(name: String, dim: usize) -> Self {
|
||||||
|
Self {
|
||||||
|
source_type: DataType::Utf8,
|
||||||
|
dest_type: DataType::new_fixed_size_list(DataType::Float32, dim as _, true),
|
||||||
|
name,
|
||||||
|
dim,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl EmbeddingFunction for MockEmbed {
|
||||||
|
fn name(&self) -> &str {
|
||||||
|
&self.name
|
||||||
|
}
|
||||||
|
fn source_type(&self) -> Result<Cow<DataType>> {
|
||||||
|
Ok(Cow::Borrowed(&self.source_type))
|
||||||
|
}
|
||||||
|
fn dest_type(&self) -> Result<Cow<DataType>> {
|
||||||
|
Ok(Cow::Borrowed(&self.dest_type))
|
||||||
|
}
|
||||||
|
fn embed(&self, source: Arc<dyn Array>) -> Result<Arc<dyn Array>> {
|
||||||
|
// We can't use the FixedSizeListBuilder here because it always adds a null bitmap
|
||||||
|
// and we want to explicitly work with non-nullable arrays.
|
||||||
|
let len = source.len();
|
||||||
|
let inner = Arc::new(Float32Array::from(vec![Some(1.0); len * self.dim]));
|
||||||
|
let field = Field::new("item", inner.data_type().clone(), false);
|
||||||
|
let arr = FixedSizeListArray::new(
|
||||||
|
Arc::new(field),
|
||||||
|
self.dim as _,
|
||||||
|
inner,
|
||||||
|
Some(NullBuffer::new_valid(len)),
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(Arc::new(arr))
|
||||||
|
}
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user