mirror of
https://github.com/lancedb/lancedb.git
synced 2025-12-23 05:19:58 +00:00
Compare commits
162 Commits
python-v0.
...
python-v0.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f79295c697 | ||
|
|
381fad9b65 | ||
|
|
055bf91d3e | ||
|
|
050f0086b8 | ||
|
|
10fa23e0d6 | ||
|
|
43d9fc28b0 | ||
|
|
f45f0d0431 | ||
|
|
b9e3c36d82 | ||
|
|
3cd7dd3375 | ||
|
|
12d4ce4cfe | ||
|
|
3d1f102087 | ||
|
|
81afd8a42f | ||
|
|
c2aa03615a | ||
|
|
d2c6759e7f | ||
|
|
94fb9f364a | ||
|
|
fbff244ed8 | ||
|
|
7e7466d224 | ||
|
|
cceaf27d79 | ||
|
|
7a15337e03 | ||
|
|
96c66fd087 | ||
|
|
0579303602 | ||
|
|
75edb8756c | ||
|
|
88283110f4 | ||
|
|
b3a637fdeb | ||
|
|
ce24457531 | ||
|
|
087fe6343d | ||
|
|
ab8cbe62dd | ||
|
|
f076bb41f4 | ||
|
|
902fb83d54 | ||
|
|
779118339f | ||
|
|
03b62599d7 | ||
|
|
4c999fb651 | ||
|
|
6d23d32ab5 | ||
|
|
704cec34e1 | ||
|
|
a300a238db | ||
|
|
a41ff1df0a | ||
|
|
77b005d849 | ||
|
|
167fccc427 | ||
|
|
2bffbcefa5 | ||
|
|
905552f993 | ||
|
|
e4898c9313 | ||
|
|
cab36d94b2 | ||
|
|
b64252d4fd | ||
|
|
6fc006072c | ||
|
|
d4bb59b542 | ||
|
|
6b2dd6de51 | ||
|
|
dbccd9e4f1 | ||
|
|
b12ebfed4c | ||
|
|
1dadb2aefa | ||
|
|
eb9784d7f2 | ||
|
|
ba755626cc | ||
|
|
7760799cb8 | ||
|
|
4beb2d2877 | ||
|
|
a00b8595d1 | ||
|
|
9c8314b4fd | ||
|
|
c625b6f2b2 | ||
|
|
bec8fe6547 | ||
|
|
dc1150c011 | ||
|
|
afaefc6264 | ||
|
|
cb70ff8cee | ||
|
|
cbb5a841b1 | ||
|
|
c72f6770fd | ||
|
|
e5a80a5e86 | ||
|
|
8d0a7fad1f | ||
|
|
b80d4d0134 | ||
|
|
9645fe52c2 | ||
|
|
b77314168d | ||
|
|
e08d45e090 | ||
|
|
2e3ddb8382 | ||
|
|
627ca4c810 | ||
|
|
f8dae4ffe9 | ||
|
|
9eb6119468 | ||
|
|
59b57e30ed | ||
|
|
fec8d58f06 | ||
|
|
84ded9d678 | ||
|
|
65696d9713 | ||
|
|
e2f2ea32e4 | ||
|
|
d5f2eca754 | ||
|
|
7fa455a8a5 | ||
|
|
8f42b5874e | ||
|
|
274f19f560 | ||
|
|
fbcbc75b5b | ||
|
|
008f389bd0 | ||
|
|
91af6518d9 | ||
|
|
af6819762c | ||
|
|
7acece493d | ||
|
|
20e017fedc | ||
|
|
74e578b3c8 | ||
|
|
d92d9eb3d2 | ||
|
|
b6cdce7bc9 | ||
|
|
316b406265 | ||
|
|
8825c7c1dd | ||
|
|
81c85ff702 | ||
|
|
570f2154d5 | ||
|
|
0525c055fc | ||
|
|
38d11291da | ||
|
|
258e682574 | ||
|
|
d7afa600b8 | ||
|
|
5c7303ab2e | ||
|
|
5895ef4039 | ||
|
|
0528cd858a | ||
|
|
6582f43422 | ||
|
|
5c7f63388d | ||
|
|
d0bc671cac | ||
|
|
d37e17593d | ||
|
|
cb726d370e | ||
|
|
23ee132546 | ||
|
|
7fa090d330 | ||
|
|
07bc1c5397 | ||
|
|
d7a9dbb9fc | ||
|
|
00487afc7d | ||
|
|
1902d65aad | ||
|
|
c4fbb65b8e | ||
|
|
875ed7ae6f | ||
|
|
95a46a57ba | ||
|
|
51561e31a0 | ||
|
|
7b19120578 | ||
|
|
745c34a6a9 | ||
|
|
db8fa2454d | ||
|
|
a67a7b4b42 | ||
|
|
496846e532 | ||
|
|
dadcfebf8e | ||
|
|
67033dbd7f | ||
|
|
05a85cfc2a | ||
|
|
40c5d3d72b | ||
|
|
198f0f80c6 | ||
|
|
e3f2fd3892 | ||
|
|
f401ccc599 | ||
|
|
81b59139f8 | ||
|
|
1026781ab6 | ||
|
|
9c699b8cd9 | ||
|
|
34bec59bc3 | ||
|
|
a5fbbf0d66 | ||
|
|
b42721167b | ||
|
|
543dec9ff0 | ||
|
|
04f962f6b0 | ||
|
|
19e896ff69 | ||
|
|
272e4103b2 | ||
|
|
75c257ebb6 | ||
|
|
9ee152eb42 | ||
|
|
c9ae1b1737 | ||
|
|
89dc80c42a | ||
|
|
7b020ac799 | ||
|
|
529e774bbb | ||
|
|
7c12239305 | ||
|
|
d83424d6b4 | ||
|
|
8bf89f887c | ||
|
|
b2160b2304 | ||
|
|
1bb82597be | ||
|
|
e4eee38b3c | ||
|
|
64fc2be503 | ||
|
|
dc8054e90d | ||
|
|
1684940946 | ||
|
|
695813463c | ||
|
|
ed594b0f76 | ||
|
|
cee2b5ea42 | ||
|
|
f315f9665a | ||
|
|
5deb26bc8b | ||
|
|
3cc670ac38 | ||
|
|
4ade3e31e2 | ||
|
|
a222d2cd91 | ||
|
|
508e621f3d |
@@ -1,5 +1,5 @@
|
||||
[tool.bumpversion]
|
||||
current_version = "0.19.1-beta.0"
|
||||
current_version = "0.21.2-beta.1"
|
||||
parse = """(?x)
|
||||
(?P<major>0|[1-9]\\d*)\\.
|
||||
(?P<minor>0|[1-9]\\d*)\\.
|
||||
|
||||
10
.github/workflows/cargo-publish.yml
vendored
10
.github/workflows/cargo-publish.yml
vendored
@@ -5,8 +5,8 @@ on:
|
||||
tags-ignore:
|
||||
# We don't publish pre-releases for Rust. Crates.io is just a source
|
||||
# distribution, so we don't need to publish pre-releases.
|
||||
- 'v*-beta*'
|
||||
- '*-v*' # for example, python-vX.Y.Z
|
||||
- "v*-beta*"
|
||||
- "*-v*" # for example, python-vX.Y.Z
|
||||
|
||||
env:
|
||||
# This env var is used by Swatinem/rust-cache@v2 for the cache
|
||||
@@ -19,6 +19,8 @@ env:
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-22.04
|
||||
permissions:
|
||||
id-token: write
|
||||
timeout-minutes: 30
|
||||
# Only runs on tags that matches the make-release action
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
@@ -31,6 +33,8 @@ jobs:
|
||||
run: |
|
||||
sudo apt update
|
||||
sudo apt install -y protobuf-compiler libssl-dev
|
||||
- uses: rust-lang/crates-io-auth-action@v1
|
||||
id: auth
|
||||
- name: Publish the package
|
||||
run: |
|
||||
cargo publish -p lancedb --all-features --token ${{ secrets.CARGO_REGISTRY_TOKEN }}
|
||||
cargo publish -p lancedb --all-features --token ${{ steps.auth.outputs.token }}
|
||||
|
||||
7
.github/workflows/java.yml
vendored
7
.github/workflows/java.yml
vendored
@@ -35,6 +35,9 @@ jobs:
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
workspaces: java/core/lancedb-jni
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
with:
|
||||
components: rustfmt
|
||||
- name: Run cargo fmt
|
||||
run: cargo fmt --check
|
||||
working-directory: ./java/core/lancedb-jni
|
||||
@@ -68,6 +71,9 @@ jobs:
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
workspaces: java/core/lancedb-jni
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
with:
|
||||
components: rustfmt
|
||||
- name: Run cargo fmt
|
||||
run: cargo fmt --check
|
||||
working-directory: ./java/core/lancedb-jni
|
||||
@@ -110,4 +116,3 @@ jobs:
|
||||
-Djdk.reflect.useDirectMethodHandle=false \
|
||||
-Dio.netty.tryReflectionSetAccessible=true"
|
||||
JAVA_HOME=$JAVA_17 mvn clean test
|
||||
|
||||
|
||||
9
.github/workflows/make-release-commit.yml
vendored
9
.github/workflows/make-release-commit.yml
vendored
@@ -84,6 +84,7 @@ jobs:
|
||||
run: |
|
||||
pip install bump-my-version PyGithub packaging
|
||||
bash ci/bump_version.sh ${{ inputs.type }} ${{ inputs.bump-minor }} v $COMMIT_BEFORE_BUMP
|
||||
bash ci/update_lockfiles.sh --amend
|
||||
- name: Push new version tag
|
||||
if: ${{ !inputs.dry_run }}
|
||||
uses: ad-m/github-push-action@master
|
||||
@@ -92,11 +93,3 @@ jobs:
|
||||
github_token: ${{ secrets.LANCEDB_RELEASE_TOKEN }}
|
||||
branch: ${{ github.ref }}
|
||||
tags: true
|
||||
- uses: ./.github/workflows/update_package_lock
|
||||
if: ${{ !inputs.dry_run && inputs.other }}
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: ./.github/workflows/update_package_lock_nodejs
|
||||
if: ${{ !inputs.dry_run && inputs.other }}
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
5
.github/workflows/nodejs.yml
vendored
5
.github/workflows/nodejs.yml
vendored
@@ -47,6 +47,9 @@ jobs:
|
||||
run: |
|
||||
sudo apt update
|
||||
sudo apt install -y protobuf-compiler libssl-dev
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
with:
|
||||
components: rustfmt, clippy
|
||||
- name: Lint
|
||||
run: |
|
||||
cargo fmt --all -- --check
|
||||
@@ -113,7 +116,7 @@ jobs:
|
||||
set -e
|
||||
npm ci
|
||||
npm run docs
|
||||
if ! git diff --exit-code; then
|
||||
if ! git diff --exit-code -- . ':(exclude)Cargo.lock'; then
|
||||
echo "Docs need to be updated"
|
||||
echo "Run 'npm run docs', fix any warnings, and commit the changes."
|
||||
exit 1
|
||||
|
||||
34
.github/workflows/npm-publish.yml
vendored
34
.github/workflows/npm-publish.yml
vendored
@@ -505,6 +505,8 @@ jobs:
|
||||
name: vectordb NPM Publish
|
||||
needs: [node, node-macos, node-linux-gnu, node-windows]
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
# Only runs on tags that matches the make-release action
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
steps:
|
||||
@@ -537,6 +539,20 @@ jobs:
|
||||
# We need to deprecate the old package to avoid confusion.
|
||||
# Each time we publish a new version, it gets undeprecated.
|
||||
run: npm deprecate vectordb "Use @lancedb/lancedb instead."
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: main
|
||||
- name: Update package-lock.json
|
||||
run: |
|
||||
git config user.name 'Lance Release'
|
||||
git config user.email 'lance-dev@lancedb.com'
|
||||
bash ci/update_lockfiles.sh
|
||||
- name: Push new commit
|
||||
uses: ad-m/github-push-action@master
|
||||
with:
|
||||
github_token: ${{ secrets.LANCEDB_RELEASE_TOKEN }}
|
||||
branch: main
|
||||
- name: Notify Slack Action
|
||||
uses: ravsamhq/notify-slack-action@2.3.0
|
||||
if: ${{ always() }}
|
||||
@@ -546,21 +562,3 @@ jobs:
|
||||
notification_title: "{workflow} is failing"
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.ACTION_MONITORING_SLACK }}
|
||||
|
||||
update-package-lock:
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
needs: [release]
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: main
|
||||
token: ${{ secrets.LANCEDB_RELEASE_TOKEN }}
|
||||
fetch-depth: 0
|
||||
lfs: true
|
||||
- uses: ./.github/workflows/update_package_lock
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
1
.github/workflows/python.yml
vendored
1
.github/workflows/python.yml
vendored
@@ -228,6 +228,7 @@ jobs:
|
||||
- name: Install lancedb
|
||||
run: |
|
||||
pip install "pydantic<2"
|
||||
pip install pyarrow==16
|
||||
pip install --extra-index-url https://pypi.fury.io/lancedb/ -e .[tests]
|
||||
pip install tantivy
|
||||
- name: Run tests
|
||||
|
||||
4
.github/workflows/run_tests/action.yml
vendored
4
.github/workflows/run_tests/action.yml
vendored
@@ -24,8 +24,8 @@ runs:
|
||||
- name: pytest (with integration)
|
||||
shell: bash
|
||||
if: ${{ inputs.integration == 'true' }}
|
||||
run: pytest -m "not slow" -x -v --durations=30 python/python/tests
|
||||
run: pytest -m "not slow" -vv --durations=30 python/python/tests
|
||||
- name: pytest (no integration tests)
|
||||
shell: bash
|
||||
if: ${{ inputs.integration != 'true' }}
|
||||
run: pytest -m "not slow and not s3_test" -x -v --durations=30 python/python/tests
|
||||
run: pytest -m "not slow and not s3_test" -vv --durations=30 python/python/tests
|
||||
|
||||
7
.github/workflows/rust.yml
vendored
7
.github/workflows/rust.yml
vendored
@@ -40,6 +40,9 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 0
|
||||
lfs: true
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
with:
|
||||
components: rustfmt, clippy
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
workspaces: rust
|
||||
@@ -160,8 +163,8 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
target:
|
||||
- x86_64-pc-windows-msvc
|
||||
- aarch64-pc-windows-msvc
|
||||
- x86_64-pc-windows-msvc
|
||||
- aarch64-pc-windows-msvc
|
||||
defaults:
|
||||
run:
|
||||
working-directory: rust/lancedb
|
||||
|
||||
33
.github/workflows/update_package_lock/action.yml
vendored
33
.github/workflows/update_package_lock/action.yml
vendored
@@ -1,33 +0,0 @@
|
||||
name: update_package_lock
|
||||
description: "Update node's package.lock"
|
||||
|
||||
inputs:
|
||||
github_token:
|
||||
required: true
|
||||
description: "github token for the repo"
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: 20
|
||||
- name: Set git configs
|
||||
shell: bash
|
||||
run: |
|
||||
git config user.name 'Lance Release'
|
||||
git config user.email 'lance-dev@lancedb.com'
|
||||
- name: Update package-lock.json file
|
||||
working-directory: ./node
|
||||
run: |
|
||||
npm install
|
||||
git add package-lock.json
|
||||
git commit -m "Updating package-lock.json"
|
||||
shell: bash
|
||||
- name: Push changes
|
||||
if: ${{ inputs.dry_run }} == "false"
|
||||
uses: ad-m/github-push-action@master
|
||||
with:
|
||||
github_token: ${{ inputs.github_token }}
|
||||
branch: main
|
||||
tags: true
|
||||
@@ -1,33 +0,0 @@
|
||||
name: update_package_lock_nodejs
|
||||
description: "Update nodejs's package.lock"
|
||||
|
||||
inputs:
|
||||
github_token:
|
||||
required: true
|
||||
description: "github token for the repo"
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: 20
|
||||
- name: Set git configs
|
||||
shell: bash
|
||||
run: |
|
||||
git config user.name 'Lance Release'
|
||||
git config user.email 'lance-dev@lancedb.com'
|
||||
- name: Update package-lock.json file
|
||||
working-directory: ./nodejs
|
||||
run: |
|
||||
npm install
|
||||
git add package-lock.json
|
||||
git commit -m "Updating package-lock.json"
|
||||
shell: bash
|
||||
- name: Push changes
|
||||
if: ${{ inputs.dry_run }} == "false"
|
||||
uses: ad-m/github-push-action@master
|
||||
with:
|
||||
github_token: ${{ inputs.github_token }}
|
||||
branch: main
|
||||
tags: true
|
||||
24
CLAUDE.md
Normal file
24
CLAUDE.md
Normal file
@@ -0,0 +1,24 @@
|
||||
LanceDB is a database designed for retrieval, including vector, full-text, and hybrid search.
|
||||
It is a wrapper around Lance. There are two backends: local (in-process like SQLite) and
|
||||
remote (against LanceDB Cloud).
|
||||
|
||||
The core of LanceDB is written in Rust. There are bindings in Python, Typescript, and Java.
|
||||
|
||||
Project layout:
|
||||
|
||||
* `rust/lancedb`: The LanceDB core Rust implementation.
|
||||
* `python`: The Python bindings, using PyO3.
|
||||
* `nodejs`: The Typescript bindings, using napi-rs
|
||||
* `java`: The Java bindings
|
||||
|
||||
(`rust/ffi` and `node/` are for a deprecated package. You can ignore them.)
|
||||
|
||||
Common commands:
|
||||
|
||||
* Check for compiler errors: `cargo check --features remote --tests --examples`
|
||||
* Run tests: `cargo test --features remote --tests`
|
||||
* Run specific test: `cargo test --features remote -p <package_name> --test <test_name>`
|
||||
* Lint: `cargo clippy --features remote --tests --examples`
|
||||
* Format: `cargo fmt --all`
|
||||
|
||||
Before committing changes, run formatting.
|
||||
2172
Cargo.lock
generated
2172
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
55
Cargo.toml
55
Cargo.toml
@@ -21,55 +21,52 @@ categories = ["database-implementations"]
|
||||
rust-version = "1.78.0"
|
||||
|
||||
[workspace.dependencies]
|
||||
lance = { "version" = "=0.27.0", "features" = ["dynamodb"], tag = "v0.27.0-beta.2", git="https://github.com/lancedb/lance.git" }
|
||||
lance-io = { version = "=0.27.0", tag = "v0.27.0-beta.2", git="https://github.com/lancedb/lance.git" }
|
||||
lance-index = { version = "=0.27.0", tag = "v0.27.0-beta.2", git="https://github.com/lancedb/lance.git" }
|
||||
lance-linalg = { version = "=0.27.0", tag = "v0.27.0-beta.2", git="https://github.com/lancedb/lance.git" }
|
||||
lance-table = { version = "=0.27.0", tag = "v0.27.0-beta.2", git="https://github.com/lancedb/lance.git" }
|
||||
lance-testing = { version = "=0.27.0", tag = "v0.27.0-beta.2", git="https://github.com/lancedb/lance.git" }
|
||||
lance-datafusion = { version = "=0.27.0", tag = "v0.27.0-beta.2", git="https://github.com/lancedb/lance.git" }
|
||||
lance-encoding = { version = "=0.27.0", tag = "v0.27.0-beta.2", git="https://github.com/lancedb/lance.git" }
|
||||
lance = { "version" = "=0.32.0", "features" = ["dynamodb"] }
|
||||
lance-io = "=0.32.0"
|
||||
lance-index = "=0.32.0"
|
||||
lance-linalg = "=0.32.0"
|
||||
lance-table = "=0.32.0"
|
||||
lance-testing = "=0.32.0"
|
||||
lance-datafusion = "=0.32.0"
|
||||
lance-encoding = "=0.32.0"
|
||||
# Note that this one does not include pyarrow
|
||||
arrow = { version = "54.1", optional = false }
|
||||
arrow-array = "54.1"
|
||||
arrow-data = "54.1"
|
||||
arrow-ipc = "54.1"
|
||||
arrow-ord = "54.1"
|
||||
arrow-schema = "54.1"
|
||||
arrow-arith = "54.1"
|
||||
arrow-cast = "54.1"
|
||||
arrow = { version = "55.1", optional = false }
|
||||
arrow-array = "55.1"
|
||||
arrow-data = "55.1"
|
||||
arrow-ipc = "55.1"
|
||||
arrow-ord = "55.1"
|
||||
arrow-schema = "55.1"
|
||||
arrow-arith = "55.1"
|
||||
arrow-cast = "55.1"
|
||||
async-trait = "0"
|
||||
datafusion = { version = "46.0", default-features = false }
|
||||
datafusion-catalog = "46.0"
|
||||
datafusion-common = { version = "46.0", default-features = false }
|
||||
datafusion-execution = "46.0"
|
||||
datafusion-expr = "46.0"
|
||||
datafusion-physical-plan = "46.0"
|
||||
datafusion = { version = "48.0", default-features = false }
|
||||
datafusion-catalog = "48.0"
|
||||
datafusion-common = { version = "48.0", default-features = false }
|
||||
datafusion-execution = "48.0"
|
||||
datafusion-expr = "48.0"
|
||||
datafusion-physical-plan = "48.0"
|
||||
env_logger = "0.11"
|
||||
half = { "version" = "=2.4.1", default-features = false, features = [
|
||||
half = { "version" = "2.6.0", default-features = false, features = [
|
||||
"num-traits",
|
||||
] }
|
||||
futures = "0"
|
||||
log = "0.4"
|
||||
moka = { version = "0.12", features = ["future"] }
|
||||
object_store = "0.11.0"
|
||||
object_store = "0.12.0"
|
||||
pin-project = "1.0.7"
|
||||
snafu = "0.8"
|
||||
url = "2"
|
||||
num-traits = "0.2"
|
||||
rand = "0.8"
|
||||
rand = "0.9"
|
||||
regex = "1.10"
|
||||
lazy_static = "1"
|
||||
semver = "1.0.25"
|
||||
|
||||
# Temporary pins to work around downstream issues
|
||||
# https://github.com/apache/arrow-rs/commit/2fddf85afcd20110ce783ed5b4cdeb82293da30b
|
||||
chrono = "=0.4.39"
|
||||
chrono = "=0.4.41"
|
||||
# https://github.com/RustCrypto/formats/issues/1684
|
||||
base64ct = "=1.6.0"
|
||||
|
||||
# Workaround for: https://github.com/eira-fransham/crunchy/issues/13
|
||||
crunchy = "=0.2.2"
|
||||
|
||||
# Workaround for: https://github.com/Lokathor/bytemuck/issues/306
|
||||
bytemuck_derive = ">=1.8.1, <1.9.0"
|
||||
|
||||
129
README.md
129
README.md
@@ -1,94 +1,97 @@
|
||||
<a href="https://cloud.lancedb.com" target="_blank">
|
||||
<img src="https://github.com/user-attachments/assets/92dad0a2-2a37-4ce1-b783-0d1b4f30a00c" alt="LanceDB Cloud Public Beta" width="100%" style="max-width: 100%;">
|
||||
</a>
|
||||
|
||||
<div align="center">
|
||||
<p align="center">
|
||||
|
||||
<picture>
|
||||
<source media="(prefers-color-scheme: dark)" srcset="https://github.com/user-attachments/assets/ac270358-333e-4bea-a132-acefaa94040e">
|
||||
<source media="(prefers-color-scheme: light)" srcset="https://github.com/user-attachments/assets/b864d814-0d29-4784-8fd9-807297c758c0">
|
||||
<img alt="LanceDB Logo" src="https://github.com/user-attachments/assets/b864d814-0d29-4784-8fd9-807297c758c0" width=300>
|
||||
</picture>
|
||||
[](https://lancedb.com)
|
||||
[](https://lancedb.com/)
|
||||
[](https://blog.lancedb.com/)
|
||||
[](https://discord.gg/zMM32dvNtd)
|
||||
[](https://twitter.com/lancedb)
|
||||
[](https://www.linkedin.com/company/lancedb/)
|
||||
|
||||
**Search More, Manage Less**
|
||||
|
||||
<a href='https://github.com/lancedb/vectordb-recipes/tree/main' target="_blank"><img alt='LanceDB' src='https://img.shields.io/badge/VectorDB_Recipes-100000?style=for-the-badge&logo=LanceDB&logoColor=white&labelColor=645cfb&color=645cfb'/></a>
|
||||
<a href='https://lancedb.github.io/lancedb/' target="_blank"><img alt='lancdb' src='https://img.shields.io/badge/DOCS-100000?style=for-the-badge&logo=lancdb&logoColor=white&labelColor=645cfb&color=645cfb'/></a>
|
||||
[](https://blog.lancedb.com/)
|
||||
[](https://discord.gg/zMM32dvNtd)
|
||||
[](https://twitter.com/lancedb)
|
||||
[](https://gurubase.io/g/lancedb)
|
||||
<img src="docs/src/assets/lancedb.png" alt="LanceDB" width="50%">
|
||||
|
||||
</p>
|
||||
# **The Multimodal AI Lakehouse**
|
||||
|
||||
<img max-width="750px" alt="LanceDB Multimodal Search" src="https://github.com/lancedb/lancedb/assets/917119/09c5afc5-7816-4687-bae4-f2ca194426ec">
|
||||
[**How to Install** ](#how-to-install) ✦ [**Detailed Documentation**](https://lancedb.github.io/lancedb/) ✦ [**Tutorials and Recipes**](https://github.com/lancedb/vectordb-recipes/tree/main) ✦ [**Contributors**](#contributors)
|
||||
|
||||
**The ultimate multimodal data platform for AI/ML applications.**
|
||||
|
||||
LanceDB is designed for fast, scalable, and production-ready vector search. It is built on top of the Lance columnar format. You can store, index, and search over petabytes of multimodal data and vectors with ease.
|
||||
LanceDB is a central location where developers can build, train and analyze their AI workloads.
|
||||
|
||||
</p>
|
||||
</div>
|
||||
|
||||
<hr />
|
||||
<br>
|
||||
|
||||
LanceDB is an open-source database for vector-search built with persistent storage, which greatly simplifies retrieval, filtering and management of embeddings.
|
||||
## **Demo: Multimodal Search by Keyword, Vector or with SQL**
|
||||
<img max-width="750px" alt="LanceDB Multimodal Search" src="https://github.com/lancedb/lancedb/assets/917119/09c5afc5-7816-4687-bae4-f2ca194426ec">
|
||||
|
||||
The key features of LanceDB include:
|
||||
## **Star LanceDB to get updates!**
|
||||
|
||||
* Production-scale vector search with no servers to manage.
|
||||
<details>
|
||||
<summary>⭐ Click here ⭐ to see how fast we're growing!</summary>
|
||||
<picture>
|
||||
<source media="(prefers-color-scheme: dark)" srcset="https://api.star-history.com/svg?repos=lancedb/lancedb&theme=dark&type=Date">
|
||||
<img width="100%" src="https://api.star-history.com/svg?repos=lancedb/lancedb&theme=dark&type=Date">
|
||||
</picture>
|
||||
</details>
|
||||
|
||||
* Store, query and filter vectors, metadata and multi-modal data (text, images, videos, point clouds, and more).
|
||||
## **Key Features**:
|
||||
|
||||
* Support for vector similarity search, full-text search and SQL.
|
||||
- **Fast Vector Search**: Search billions of vectors in milliseconds with state-of-the-art indexing.
|
||||
- **Comprehensive Search**: Support for vector similarity search, full-text search and SQL.
|
||||
- **Multimodal Support**: Store, query and filter vectors, metadata and multimodal data (text, images, videos, point clouds, and more).
|
||||
- **Advanced Features**: Zero-copy, automatic versioning, manage versions of your data without needing extra infrastructure. GPU support in building vector index.
|
||||
|
||||
* Native Python and Javascript/Typescript support.
|
||||
### **Products**:
|
||||
- **Open Source & Local**: 100% open source, runs locally or in your cloud. No vendor lock-in.
|
||||
- **Cloud and Enterprise**: Production-scale vector search with no servers to manage. Complete data sovereignty and security.
|
||||
|
||||
* Zero-copy, automatic versioning, manage versions of your data without needing extra infrastructure.
|
||||
### **Ecosystem**:
|
||||
- **Columnar Storage**: Built on the Lance columnar format for efficient storage and analytics.
|
||||
- **Seamless Integration**: Python, Node.js, Rust, and REST APIs for easy integration. Native Python and Javascript/Typescript support.
|
||||
- **Rich Ecosystem**: Integrations with [**LangChain** 🦜️🔗](https://python.langchain.com/docs/integrations/vectorstores/lancedb/), [**LlamaIndex** 🦙](https://gpt-index.readthedocs.io/en/latest/examples/vector_stores/LanceDBIndexDemo.html), Apache-Arrow, Pandas, Polars, DuckDB and more on the way.
|
||||
|
||||
* GPU support in building vector index(*).
|
||||
## **How to Install**:
|
||||
|
||||
* Ecosystem integrations with [LangChain 🦜️🔗](https://python.langchain.com/docs/integrations/vectorstores/lancedb/), [LlamaIndex 🦙](https://gpt-index.readthedocs.io/en/latest/examples/vector_stores/LanceDBIndexDemo.html), Apache-Arrow, Pandas, Polars, DuckDB and more on the way.
|
||||
Follow the [Quickstart](https://lancedb.github.io/lancedb/basic/) doc to set up LanceDB locally.
|
||||
|
||||
LanceDB's core is written in Rust 🦀 and is built using <a href="https://github.com/lancedb/lance">Lance</a>, an open-source columnar format designed for performant ML workloads.
|
||||
**API & SDK:** We also support Python, Typescript and Rust SDKs
|
||||
|
||||
## Quick Start
|
||||
| Interface | Documentation |
|
||||
|-----------|---------------|
|
||||
| Python SDK | https://lancedb.github.io/lancedb/python/python/ |
|
||||
| Typescript SDK | https://lancedb.github.io/lancedb/js/globals/ |
|
||||
| Rust SDK | https://docs.rs/lancedb/latest/lancedb/index.html |
|
||||
| REST API | https://docs.lancedb.com/api-reference/introduction |
|
||||
|
||||
**Javascript**
|
||||
```shell
|
||||
npm install @lancedb/lancedb
|
||||
```
|
||||
## **Join Us and Contribute**
|
||||
|
||||
```javascript
|
||||
import * as lancedb from "@lancedb/lancedb";
|
||||
We welcome contributions from everyone! Whether you're a developer, researcher, or just someone who wants to help out.
|
||||
|
||||
const db = await lancedb.connect("data/sample-lancedb");
|
||||
const table = await db.createTable("vectors", [
|
||||
{ id: 1, vector: [0.1, 0.2], item: "foo", price: 10 },
|
||||
{ id: 2, vector: [1.1, 1.2], item: "bar", price: 50 },
|
||||
], {mode: 'overwrite'});
|
||||
If you have any suggestions or feature requests, please feel free to open an issue on GitHub or discuss it on our [**Discord**](https://discord.gg/G5DcmnZWKB) server.
|
||||
|
||||
[**Check out the GitHub Issues**](https://github.com/lancedb/lancedb/issues) if you would like to work on the features that are planned for the future. If you have any suggestions or feature requests, please feel free to open an issue on GitHub.
|
||||
|
||||
## **Contributors**
|
||||
|
||||
<a href="https://github.com/lancedb/lancedb/graphs/contributors">
|
||||
<img src="https://contrib.rocks/image?repo=lancedb/lancedb" />
|
||||
</a>
|
||||
|
||||
|
||||
const query = table.vectorSearch([0.1, 0.3]).limit(2);
|
||||
const results = await query.toArray();
|
||||
## **Stay in Touch With Us**
|
||||
<div align="center">
|
||||
|
||||
// You can also search for rows by specific criteria without involving a vector search.
|
||||
const rowsByCriteria = await table.query().where("price >= 10").toArray();
|
||||
```
|
||||
</br>
|
||||
|
||||
**Python**
|
||||
```shell
|
||||
pip install lancedb
|
||||
```
|
||||
[](https://lancedb.com/)
|
||||
[](https://blog.lancedb.com/)
|
||||
[](https://discord.gg/zMM32dvNtd)
|
||||
[](https://twitter.com/lancedb)
|
||||
[](https://www.linkedin.com/company/lancedb/)
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
|
||||
uri = "data/sample-lancedb"
|
||||
db = lancedb.connect(uri)
|
||||
table = db.create_table("my_table",
|
||||
data=[{"vector": [3.1, 4.1], "item": "foo", "price": 10.0},
|
||||
{"vector": [5.9, 26.5], "item": "bar", "price": 20.0}])
|
||||
result = table.search([100, 100]).limit(2).to_pandas()
|
||||
```
|
||||
|
||||
## Blogs, Tutorials & Videos
|
||||
* 📈 <a href="https://blog.lancedb.com/benchmarking-random-access-in-lance/">2000x better performance with Lance over Parquet</a>
|
||||
* 🤖 <a href="https://github.com/lancedb/vectordb-recipes/tree/main/examples/Youtube-Search-QA-Bot">Build a question and answer bot with LanceDB</a>
|
||||
</div>
|
||||
|
||||
188
ci/set_lance_version.py
Normal file
188
ci/set_lance_version.py
Normal file
@@ -0,0 +1,188 @@
|
||||
import argparse
|
||||
import sys
|
||||
import json
|
||||
|
||||
|
||||
def run_command(command: str) -> str:
|
||||
"""
|
||||
Run a shell command and return stdout as a string.
|
||||
If exit code is not 0, raise an exception with the stderr output.
|
||||
"""
|
||||
import subprocess
|
||||
|
||||
result = subprocess.run(command, shell=True, capture_output=True, text=True)
|
||||
if result.returncode != 0:
|
||||
raise Exception(f"Command failed with error: {result.stderr.strip()}")
|
||||
return result.stdout.strip()
|
||||
|
||||
|
||||
def get_latest_stable_version() -> str:
|
||||
version_line = run_command("cargo info lance | grep '^version:'")
|
||||
version = version_line.split(" ")[1].strip()
|
||||
return version
|
||||
|
||||
|
||||
def get_latest_preview_version() -> str:
|
||||
lance_tags = run_command(
|
||||
"git ls-remote --tags https://github.com/lancedb/lance.git | grep 'refs/tags/v[0-9beta.-]\\+$'"
|
||||
).splitlines()
|
||||
lance_tags = (
|
||||
tag.split("refs/tags/")[1]
|
||||
for tag in lance_tags
|
||||
if "refs/tags/" in tag and "beta" in tag
|
||||
)
|
||||
from packaging.version import Version
|
||||
|
||||
latest = max(
|
||||
(tag[1:] for tag in lance_tags if tag.startswith("v")), key=lambda t: Version(t)
|
||||
)
|
||||
return str(latest)
|
||||
|
||||
|
||||
def extract_features(line: str) -> list:
|
||||
"""
|
||||
Extracts the features from a line in Cargo.toml.
|
||||
Example: 'lance = { "version" = "=0.29.0", "features" = ["dynamodb"] }'
|
||||
Returns: ['dynamodb']
|
||||
"""
|
||||
import re
|
||||
|
||||
match = re.search(r'"features"\s*=\s*\[\s*(.*?)\s*\]', line, re.DOTALL)
|
||||
if match:
|
||||
features_str = match.group(1)
|
||||
return [f.strip('"') for f in features_str.split(",") if len(f) > 0]
|
||||
return []
|
||||
|
||||
|
||||
def update_cargo_toml(line_updater):
|
||||
"""
|
||||
Updates the Cargo.toml file by applying the line_updater function to each line.
|
||||
The line_updater function should take a line as input and return the updated line.
|
||||
"""
|
||||
with open("Cargo.toml", "r") as f:
|
||||
lines = f.readlines()
|
||||
|
||||
new_lines = []
|
||||
lance_line = ""
|
||||
is_parsing_lance_line = False
|
||||
for line in lines:
|
||||
if line.startswith("lance"):
|
||||
# Update the line using the provided function
|
||||
if line.strip().endswith("}"):
|
||||
new_lines.append(line_updater(line))
|
||||
else:
|
||||
lance_line = line
|
||||
is_parsing_lance_line = True
|
||||
elif is_parsing_lance_line:
|
||||
lance_line += line
|
||||
if line.strip().endswith("}"):
|
||||
new_lines.append(line_updater(lance_line))
|
||||
lance_line = ""
|
||||
is_parsing_lance_line = False
|
||||
else:
|
||||
print("doesn't end with }:", line)
|
||||
else:
|
||||
# Keep the line unchanged
|
||||
new_lines.append(line)
|
||||
|
||||
with open("Cargo.toml", "w") as f:
|
||||
f.writelines(new_lines)
|
||||
|
||||
|
||||
def set_stable_version(version: str):
|
||||
"""
|
||||
Sets lines to
|
||||
lance = { "version" = "=0.29.0", "features" = ["dynamodb"] }
|
||||
lance-io = "=0.29.0"
|
||||
...
|
||||
"""
|
||||
|
||||
def line_updater(line: str) -> str:
|
||||
package_name = line.split("=", maxsplit=1)[0].strip()
|
||||
features = extract_features(line)
|
||||
if features:
|
||||
return f'{package_name} = {{ "version" = "={version}", "features" = {json.dumps(features)} }}\n'
|
||||
else:
|
||||
return f'{package_name} = "={version}"\n'
|
||||
|
||||
update_cargo_toml(line_updater)
|
||||
|
||||
|
||||
def set_preview_version(version: str):
|
||||
"""
|
||||
Sets lines to
|
||||
lance = { "version" = "=0.29.0", "features" = ["dynamodb"], tag = "v0.29.0-beta.2", git="https://github.com/lancedb/lance.git" }
|
||||
lance-io = { version = "=0.29.0", tag = "v0.29.0-beta.2", git="https://github.com/lancedb/lance.git" }
|
||||
...
|
||||
"""
|
||||
|
||||
def line_updater(line: str) -> str:
|
||||
package_name = line.split("=", maxsplit=1)[0].strip()
|
||||
features = extract_features(line)
|
||||
base_version = version.split("-")[0] # Get the base version without beta suffix
|
||||
if features:
|
||||
return f'{package_name} = {{ "version" = "={base_version}", "features" = {json.dumps(features)}, "tag" = "v{version}", "git" = "https://github.com/lancedb/lance.git" }}\n'
|
||||
else:
|
||||
return f'{package_name} = {{ "version" = "={base_version}", "tag" = "v{version}", "git" = "https://github.com/lancedb/lance.git" }}\n'
|
||||
|
||||
update_cargo_toml(line_updater)
|
||||
|
||||
|
||||
def set_local_version():
|
||||
"""
|
||||
Sets lines to
|
||||
lance = { path = "../lance/rust/lance", features = ["dynamodb"] }
|
||||
lance-io = { path = "../lance/rust/lance-io" }
|
||||
...
|
||||
"""
|
||||
|
||||
def line_updater(line: str) -> str:
|
||||
package_name = line.split("=", maxsplit=1)[0].strip()
|
||||
features = extract_features(line)
|
||||
if features:
|
||||
return f'{package_name} = {{ "path" = "../lance/rust/{package_name}", "features" = {json.dumps(features)} }}\n'
|
||||
else:
|
||||
return f'{package_name} = {{ "path" = "../lance/rust/{package_name}" }}\n'
|
||||
|
||||
update_cargo_toml(line_updater)
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser(description="Set the version of the Lance package.")
|
||||
parser.add_argument(
|
||||
"version",
|
||||
type=str,
|
||||
help="The version to set for the Lance package. Use 'stable' for the latest stable version, 'preview' for latest preview version, or a specific version number (e.g., '0.1.0'). You can also specify 'local' to use a local path.",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.version == "stable":
|
||||
latest_stable_version = get_latest_stable_version()
|
||||
print(
|
||||
f"Found latest stable version: \033[1mv{latest_stable_version}\033[0m",
|
||||
file=sys.stderr,
|
||||
)
|
||||
set_stable_version(latest_stable_version)
|
||||
elif args.version == "preview":
|
||||
latest_preview_version = get_latest_preview_version()
|
||||
print(
|
||||
f"Found latest preview version: \033[1mv{latest_preview_version}\033[0m",
|
||||
file=sys.stderr,
|
||||
)
|
||||
set_preview_version(latest_preview_version)
|
||||
elif args.version == "local":
|
||||
set_local_version()
|
||||
else:
|
||||
# Parse the version number.
|
||||
version = args.version
|
||||
# Ignore initial v if present.
|
||||
if version.startswith("v"):
|
||||
version = version[1:]
|
||||
|
||||
if "beta" in version:
|
||||
set_preview_version(version)
|
||||
else:
|
||||
set_stable_version(version)
|
||||
|
||||
print("Updating lockfiles...", file=sys.stderr, end="")
|
||||
run_command("cargo metadata > /dev/null")
|
||||
print(" done.", file=sys.stderr)
|
||||
30
ci/update_lockfiles.sh
Executable file
30
ci/update_lockfiles.sh
Executable file
@@ -0,0 +1,30 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
AMEND=false
|
||||
|
||||
for arg in "$@"; do
|
||||
if [[ "$arg" == "--amend" ]]; then
|
||||
AMEND=true
|
||||
fi
|
||||
done
|
||||
|
||||
# This updates the lockfile without building
|
||||
cargo metadata --quiet > /dev/null
|
||||
|
||||
pushd nodejs || exit 1
|
||||
npm install --package-lock-only --silent
|
||||
popd
|
||||
pushd node || exit 1
|
||||
npm install --package-lock-only --silent
|
||||
popd
|
||||
|
||||
if git diff --quiet --exit-code; then
|
||||
echo "No lockfile changes to commit; skipping amend."
|
||||
elif $AMEND; then
|
||||
git add Cargo.lock nodejs/package-lock.json node/package-lock.json
|
||||
git commit --amend --no-edit
|
||||
else
|
||||
git add Cargo.lock nodejs/package-lock.json node/package-lock.json
|
||||
git commit -m "Update lockfiles"
|
||||
fi
|
||||
@@ -193,6 +193,7 @@ nav:
|
||||
- Pandas and PyArrow: python/pandas_and_pyarrow.md
|
||||
- Polars: python/polars_arrow.md
|
||||
- DuckDB: python/duckdb.md
|
||||
- Datafusion: python/datafusion.md
|
||||
- LangChain:
|
||||
- LangChain 🔗: integrations/langchain.md
|
||||
- LangChain demo: notebooks/langchain_demo.ipynb
|
||||
@@ -205,6 +206,7 @@ nav:
|
||||
- PromptTools: integrations/prompttools.md
|
||||
- dlt: integrations/dlt.md
|
||||
- phidata: integrations/phidata.md
|
||||
- Genkit: integrations/genkit.md
|
||||
- 🎯 Examples:
|
||||
- Overview: examples/index.md
|
||||
- 🐍 Python:
|
||||
@@ -247,6 +249,7 @@ nav:
|
||||
- Data management: concepts/data_management.md
|
||||
- Guides:
|
||||
- Working with tables: guides/tables.md
|
||||
- Working with SQL: guides/sql_querying.md
|
||||
- Building an ANN index: ann_indexes.md
|
||||
- Vector Search: search.md
|
||||
- Full-text search (native): fts.md
|
||||
@@ -323,6 +326,7 @@ nav:
|
||||
- Pandas and PyArrow: python/pandas_and_pyarrow.md
|
||||
- Polars: python/polars_arrow.md
|
||||
- DuckDB: python/duckdb.md
|
||||
- Datafusion: python/datafusion.md
|
||||
- LangChain 🦜️🔗↗: integrations/langchain.md
|
||||
- LangChain.js 🦜️🔗↗: https://js.langchain.com/docs/integrations/vectorstores/lancedb
|
||||
- LlamaIndex 🦙↗: integrations/llamaIndex.md
|
||||
@@ -331,6 +335,7 @@ nav:
|
||||
- PromptTools: integrations/prompttools.md
|
||||
- dlt: integrations/dlt.md
|
||||
- phidata: integrations/phidata.md
|
||||
- Genkit: integrations/genkit.md
|
||||
- Examples:
|
||||
- examples/index.md
|
||||
- 🐍 Python:
|
||||
|
||||
5
docs/overrides/partials/main.html
Normal file
5
docs/overrides/partials/main.html
Normal file
@@ -0,0 +1,5 @@
|
||||
{% extends "base.html" %}
|
||||
|
||||
{% block announce %}
|
||||
📚 Starting June 1st, 2025, please use <a href="https://lancedb.github.io/documentation" target="_blank" rel="noopener noreferrer">lancedb.github.io/documentation</a> for the latest docs.
|
||||
{% endblock %}
|
||||
12
docs/package-lock.json
generated
12
docs/package-lock.json
generated
@@ -19,7 +19,7 @@
|
||||
},
|
||||
"../node": {
|
||||
"name": "vectordb",
|
||||
"version": "0.12.0",
|
||||
"version": "0.21.2-beta.0",
|
||||
"cpu": [
|
||||
"x64",
|
||||
"arm64"
|
||||
@@ -65,11 +65,11 @@
|
||||
"uuid": "^9.0.0"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"@lancedb/vectordb-darwin-arm64": "0.12.0",
|
||||
"@lancedb/vectordb-darwin-x64": "0.12.0",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.12.0",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.12.0",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.12.0"
|
||||
"@lancedb/vectordb-darwin-arm64": "0.21.2-beta.0",
|
||||
"@lancedb/vectordb-darwin-x64": "0.21.2-beta.0",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.21.2-beta.0",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.21.2-beta.0",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.21.2-beta.0"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@apache-arrow/ts": "^14.0.2",
|
||||
|
||||
@@ -291,7 +291,7 @@ Product quantization can lead to approximately `16 * sizeof(float32) / 1 = 64` t
|
||||
|
||||
`num_partitions` is used to decide how many partitions the first level `IVF` index uses.
|
||||
Higher number of partitions could lead to more efficient I/O during queries and better accuracy, but it takes much more time to train.
|
||||
On `SIFT-1M` dataset, our benchmark shows that keeping each partition 1K-4K rows lead to a good latency / recall.
|
||||
On `SIFT-1M` dataset, our benchmark shows that keeping each partition 4K-8K rows lead to a good latency / recall.
|
||||
|
||||
`num_sub_vectors` specifies how many Product Quantization (PQ) short codes to generate on each vector. The number should be a factor of the vector dimension. Because
|
||||
PQ is a lossy compression of the original vector, a higher `num_sub_vectors` usually results in
|
||||
|
||||
BIN
docs/src/assets/hero-header.png
Normal file
BIN
docs/src/assets/hero-header.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 1.7 MiB |
BIN
docs/src/assets/lancedb.png
Normal file
BIN
docs/src/assets/lancedb.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 40 KiB |
60
docs/src/guides/sql_querying.md
Normal file
60
docs/src/guides/sql_querying.md
Normal file
@@ -0,0 +1,60 @@
|
||||
# SQL Querying
|
||||
|
||||
You can use DuckDB and Apache Datafusion to query your LanceDB tables using SQL.
|
||||
This guide will show how to query Lance tables them using both.
|
||||
|
||||
We will re-use the dataset [created previously](./tables.md):
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
|
||||
db = lancedb.connect("data/sample-lancedb")
|
||||
data = [
|
||||
{"vector": [3.1, 4.1], "item": "foo", "price": 10.0},
|
||||
{"vector": [5.9, 26.5], "item": "bar", "price": 20.0}
|
||||
]
|
||||
table = db.create_table("pd_table", data=data)
|
||||
```
|
||||
|
||||
## Querying a LanceDB Table with DuckDb
|
||||
|
||||
The `to_lance` method converts the LanceDB table to a `LanceDataset`, which is accessible to DuckDB through the Arrow compatibility layer.
|
||||
To query the resulting Lance dataset in DuckDB, all you need to do is reference the dataset by the same name in your SQL query.
|
||||
|
||||
```python
|
||||
import duckdb
|
||||
|
||||
arrow_table = table.to_lance()
|
||||
|
||||
duckdb.query("SELECT * FROM arrow_table")
|
||||
```
|
||||
|
||||
| vector | item | price |
|
||||
| ----------- | ---- | ----- |
|
||||
| [3.1, 4.1] | foo | 10.0 |
|
||||
| [5.9, 26.5] | bar | 20.0 |
|
||||
|
||||
## Querying a LanceDB Table with Apache Datafusion
|
||||
|
||||
Have the required imports before doing any querying.
|
||||
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb"
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:import-session-context"
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:import-ffi-dataset"
|
||||
```
|
||||
|
||||
Register the table created with the Datafusion session context.
|
||||
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:lance_sql_basic"
|
||||
```
|
||||
|
||||
| vector | item | price |
|
||||
| ----------- | ---- | ----- |
|
||||
| [3.1, 4.1] | foo | 10.0 |
|
||||
| [5.9, 26.5] | bar | 20.0 |
|
||||
@@ -765,7 +765,7 @@ This can be used to update zero to all rows depending on how many rows match the
|
||||
];
|
||||
const tbl = await db.createTable("my_table", data)
|
||||
|
||||
await tbl.update({
|
||||
await tbl.update({
|
||||
values: { vector: [10, 10] },
|
||||
where: "x = 2"
|
||||
});
|
||||
@@ -787,9 +787,9 @@ This can be used to update zero to all rows depending on how many rows match the
|
||||
];
|
||||
const tbl = await db.createTable("my_table", data)
|
||||
|
||||
await tbl.update({
|
||||
where: "x = 2",
|
||||
values: { vector: [10, 10] }
|
||||
await tbl.update({
|
||||
where: "x = 2",
|
||||
values: { vector: [10, 10] }
|
||||
});
|
||||
```
|
||||
|
||||
|
||||
183
docs/src/integrations/genkit.md
Normal file
183
docs/src/integrations/genkit.md
Normal file
@@ -0,0 +1,183 @@
|
||||
### genkitx-lancedb
|
||||
This is a lancedb plugin for genkit framework. It allows you to use LanceDB for ingesting and rereiving data using genkit framework.
|
||||
|
||||

|
||||
|
||||
### Installation
|
||||
```bash
|
||||
pnpm install genkitx-lancedb
|
||||
```
|
||||
|
||||
### Usage
|
||||
|
||||
Adding LanceDB plugin to your genkit instance.
|
||||
|
||||
```ts
|
||||
import { lancedbIndexerRef, lancedb, lancedbRetrieverRef, WriteMode } from 'genkitx-lancedb';
|
||||
import { textEmbedding004, vertexAI } from '@genkit-ai/vertexai';
|
||||
import { gemini } from '@genkit-ai/vertexai';
|
||||
import { z, genkit } from 'genkit';
|
||||
import { Document } from 'genkit/retriever';
|
||||
import { chunk } from 'llm-chunk';
|
||||
import { readFile } from 'fs/promises';
|
||||
import path from 'path';
|
||||
import pdf from 'pdf-parse/lib/pdf-parse';
|
||||
|
||||
const ai = genkit({
|
||||
plugins: [
|
||||
// vertexAI provides the textEmbedding004 embedder
|
||||
vertexAI(),
|
||||
|
||||
// the local vector store requires an embedder to translate from text to vector
|
||||
lancedb([
|
||||
{
|
||||
dbUri: '.db', // optional lancedb uri, default to .db
|
||||
tableName: 'table', // optional table name, default to table
|
||||
embedder: textEmbedding004,
|
||||
},
|
||||
]),
|
||||
],
|
||||
});
|
||||
```
|
||||
|
||||
You can run this app with the following command:
|
||||
```bash
|
||||
genkit start -- tsx --watch src/index.ts
|
||||
```
|
||||
|
||||
This'll add LanceDB as a retriever and indexer to the genkit instance. You can see it in the GUI view
|
||||
<img width="1710" alt="Screenshot 2025-05-11 at 7 21 05 PM" src="https://github.com/user-attachments/assets/e752f7f4-785b-4797-a11e-72ab06a531b7" />
|
||||
|
||||
**Testing retrieval on a sample table**
|
||||
Let's see the raw retrieval results
|
||||
|
||||
<img width="1710" alt="Screenshot 2025-05-11 at 7 21 05 PM" src="https://github.com/user-attachments/assets/b8d356ed-8421-4790-8fc0-d6af563b9657" />
|
||||
On running this query, you'll 5 results fetched from the lancedb table, where each result looks something like this:
|
||||
<img width="1417" alt="Screenshot 2025-05-11 at 7 21 18 PM" src="https://github.com/user-attachments/assets/77429525-36e2-4da6-a694-e58c1cf9eb83" />
|
||||
|
||||
|
||||
|
||||
## Creating a custom RAG flow
|
||||
|
||||
Now that we've seen how you can use LanceDB for in a genkit pipeline, let's refine the flow and create a RAG. A RAG flow will consist of an index and a retreiver with its outputs postprocessed an fed into an LLM for final response
|
||||
|
||||
### Creating custom indexer flows
|
||||
You can also create custom indexer flows, utilizing more options and features provided by LanceDB.
|
||||
|
||||
```ts
|
||||
export const menuPdfIndexer = lancedbIndexerRef({
|
||||
// Using all defaults, for dbUri, tableName, and embedder, etc
|
||||
});
|
||||
|
||||
const chunkingConfig = {
|
||||
minLength: 1000,
|
||||
maxLength: 2000,
|
||||
splitter: 'sentence',
|
||||
overlap: 100,
|
||||
delimiters: '',
|
||||
} as any;
|
||||
|
||||
|
||||
async function extractTextFromPdf(filePath: string) {
|
||||
const pdfFile = path.resolve(filePath);
|
||||
const dataBuffer = await readFile(pdfFile);
|
||||
const data = await pdf(dataBuffer);
|
||||
return data.text;
|
||||
}
|
||||
|
||||
export const indexMenu = ai.defineFlow(
|
||||
{
|
||||
name: 'indexMenu',
|
||||
inputSchema: z.string().describe('PDF file path'),
|
||||
outputSchema: z.void(),
|
||||
},
|
||||
async (filePath: string) => {
|
||||
filePath = path.resolve(filePath);
|
||||
|
||||
// Read the pdf.
|
||||
const pdfTxt = await ai.run('extract-text', () =>
|
||||
extractTextFromPdf(filePath)
|
||||
);
|
||||
|
||||
// Divide the pdf text into segments.
|
||||
const chunks = await ai.run('chunk-it', async () =>
|
||||
chunk(pdfTxt, chunkingConfig)
|
||||
);
|
||||
|
||||
// Convert chunks of text into documents to store in the index.
|
||||
const documents = chunks.map((text) => {
|
||||
return Document.fromText(text, { filePath });
|
||||
});
|
||||
|
||||
// Add documents to the index.
|
||||
await ai.index({
|
||||
indexer: menuPdfIndexer,
|
||||
documents,
|
||||
options: {
|
||||
writeMode: WriteMode.Overwrite,
|
||||
} as any
|
||||
});
|
||||
}
|
||||
);
|
||||
```
|
||||
|
||||
<img width="1316" alt="Screenshot 2025-05-11 at 8 35 56 PM" src="https://github.com/user-attachments/assets/e2a20ce4-d1d0-4fa2-9a84-f2cc26e3a29f" />
|
||||
|
||||
In your console, you can see the logs
|
||||
|
||||
<img width="511" alt="Screenshot 2025-05-11 at 7 19 14 PM" src="https://github.com/user-attachments/assets/243f26c5-ed38-40b6-b661-002f40f0423a" />
|
||||
|
||||
### Creating custom retriever flows
|
||||
You can also create custom retriever flows, utilizing more options and features provided by LanceDB.
|
||||
```ts
|
||||
export const menuRetriever = lancedbRetrieverRef({
|
||||
tableName: "table", // Use the same table name as the indexer.
|
||||
displayName: "Menu", // Use a custom display name.
|
||||
|
||||
export const menuQAFlow = ai.defineFlow(
|
||||
{ name: "Menu", inputSchema: z.string(), outputSchema: z.string() },
|
||||
async (input: string) => {
|
||||
// retrieve relevant documents
|
||||
const docs = await ai.retrieve({
|
||||
retriever: menuRetriever,
|
||||
query: input,
|
||||
options: {
|
||||
k: 3,
|
||||
},
|
||||
});
|
||||
|
||||
const extractedContent = docs.map(doc => {
|
||||
if (doc.content && Array.isArray(doc.content) && doc.content.length > 0) {
|
||||
if (doc.content[0].media && doc.content[0].media.url) {
|
||||
return doc.content[0].media.url;
|
||||
}
|
||||
}
|
||||
return "No content found";
|
||||
});
|
||||
|
||||
console.log("Extracted content:", extractedContent);
|
||||
|
||||
const { text } = await ai.generate({
|
||||
model: gemini('gemini-2.0-flash'),
|
||||
prompt: `
|
||||
You are acting as a helpful AI assistant that can answer
|
||||
questions about the food available on the menu at Genkit Grub Pub.
|
||||
|
||||
Use only the context provided to answer the question.
|
||||
If you don't know, do not make up an answer.
|
||||
Do not add or change items on the menu.
|
||||
|
||||
Context:
|
||||
${extractedContent.join('\n\n')}
|
||||
|
||||
Question: ${input}`,
|
||||
docs,
|
||||
});
|
||||
|
||||
return text;
|
||||
}
|
||||
);
|
||||
```
|
||||
Now using our retrieval flow, we can ask question about the ingsted PDF
|
||||
<img width="1306" alt="Screenshot 2025-05-11 at 7 18 45 PM" src="https://github.com/user-attachments/assets/86c66b13-7c12-4d5f-9d81-ae36bfb1c346" />
|
||||
|
||||
53
docs/src/js/classes/BooleanQuery.md
Normal file
53
docs/src/js/classes/BooleanQuery.md
Normal file
@@ -0,0 +1,53 @@
|
||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||
|
||||
***
|
||||
|
||||
[@lancedb/lancedb](../globals.md) / BooleanQuery
|
||||
|
||||
# Class: BooleanQuery
|
||||
|
||||
Represents a full-text query interface.
|
||||
This interface defines the structure and behavior for full-text queries,
|
||||
including methods to retrieve the query type and convert the query to a dictionary format.
|
||||
|
||||
## Implements
|
||||
|
||||
- [`FullTextQuery`](../interfaces/FullTextQuery.md)
|
||||
|
||||
## Constructors
|
||||
|
||||
### new BooleanQuery()
|
||||
|
||||
```ts
|
||||
new BooleanQuery(queries): BooleanQuery
|
||||
```
|
||||
|
||||
Creates an instance of BooleanQuery.
|
||||
|
||||
#### Parameters
|
||||
|
||||
* **queries**: [[`Occur`](../enumerations/Occur.md), [`FullTextQuery`](../interfaces/FullTextQuery.md)][]
|
||||
An array of (Occur, FullTextQuery objects) to combine.
|
||||
Occur specifies whether the query must match, or should match.
|
||||
|
||||
#### Returns
|
||||
|
||||
[`BooleanQuery`](BooleanQuery.md)
|
||||
|
||||
## Methods
|
||||
|
||||
### queryType()
|
||||
|
||||
```ts
|
||||
queryType(): FullTextQueryType
|
||||
```
|
||||
|
||||
The type of the full-text query.
|
||||
|
||||
#### Returns
|
||||
|
||||
[`FullTextQueryType`](../enumerations/FullTextQueryType.md)
|
||||
|
||||
#### Implementation of
|
||||
|
||||
[`FullTextQuery`](../interfaces/FullTextQuery.md).[`queryType`](../interfaces/FullTextQuery.md#querytype)
|
||||
@@ -40,6 +40,8 @@ Creates an instance of MatchQuery.
|
||||
- `boost`: The boost factor for the query (default is 1.0).
|
||||
- `fuzziness`: The fuzziness level for the query (default is 0).
|
||||
- `maxExpansions`: The maximum number of terms to consider for fuzzy matching (default is 50).
|
||||
- `operator`: The logical operator to use for combining terms in the query (default is "OR").
|
||||
- `prefixLength`: The number of beginning characters being unchanged for fuzzy matching.
|
||||
|
||||
* **options.boost?**: `number`
|
||||
|
||||
@@ -47,6 +49,10 @@ Creates an instance of MatchQuery.
|
||||
|
||||
* **options.maxExpansions?**: `number`
|
||||
|
||||
* **options.operator?**: [`Operator`](../enumerations/Operator.md)
|
||||
|
||||
* **options.prefixLength?**: `number`
|
||||
|
||||
#### Returns
|
||||
|
||||
[`MatchQuery`](MatchQuery.md)
|
||||
|
||||
@@ -33,20 +33,22 @@ Construct a MergeInsertBuilder. __Internal use only.__
|
||||
### execute()
|
||||
|
||||
```ts
|
||||
execute(data): Promise<void>
|
||||
execute(data, execOptions?): Promise<MergeResult>
|
||||
```
|
||||
|
||||
Executes the merge insert operation
|
||||
|
||||
Nothing is returned but the `Table` is updated
|
||||
|
||||
#### Parameters
|
||||
|
||||
* **data**: [`Data`](../type-aliases/Data.md)
|
||||
|
||||
* **execOptions?**: `Partial`<[`WriteExecutionOptions`](../interfaces/WriteExecutionOptions.md)>
|
||||
|
||||
#### Returns
|
||||
|
||||
`Promise`<`void`>
|
||||
`Promise`<[`MergeResult`](../interfaces/MergeResult.md)>
|
||||
|
||||
the merge result
|
||||
|
||||
***
|
||||
|
||||
|
||||
@@ -38,9 +38,12 @@ Creates an instance of MultiMatchQuery.
|
||||
* **options?**
|
||||
Optional parameters for the multi-match query.
|
||||
- `boosts`: An array of boost factors for each column (default is 1.0 for all).
|
||||
- `operator`: The logical operator to use for combining terms in the query (default is "OR").
|
||||
|
||||
* **options.boosts?**: `number`[]
|
||||
|
||||
* **options.operator?**: [`Operator`](../enumerations/Operator.md)
|
||||
|
||||
#### Returns
|
||||
|
||||
[`MultiMatchQuery`](MultiMatchQuery.md)
|
||||
|
||||
@@ -19,7 +19,10 @@ including methods to retrieve the query type and convert the query to a dictiona
|
||||
### new PhraseQuery()
|
||||
|
||||
```ts
|
||||
new PhraseQuery(query, column): PhraseQuery
|
||||
new PhraseQuery(
|
||||
query,
|
||||
column,
|
||||
options?): PhraseQuery
|
||||
```
|
||||
|
||||
Creates an instance of `PhraseQuery`.
|
||||
@@ -32,6 +35,12 @@ Creates an instance of `PhraseQuery`.
|
||||
* **column**: `string`
|
||||
The name of the column to search within.
|
||||
|
||||
* **options?**
|
||||
Optional parameters for the phrase query.
|
||||
- `slop`: The maximum number of intervening unmatched positions allowed between words in the phrase (default is 0).
|
||||
|
||||
* **options.slop?**: `number`
|
||||
|
||||
#### Returns
|
||||
|
||||
[`PhraseQuery`](PhraseQuery.md)
|
||||
|
||||
84
docs/src/js/classes/Session.md
Normal file
84
docs/src/js/classes/Session.md
Normal file
@@ -0,0 +1,84 @@
|
||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||
|
||||
***
|
||||
|
||||
[@lancedb/lancedb](../globals.md) / Session
|
||||
|
||||
# Class: Session
|
||||
|
||||
A session for managing caches and object stores across LanceDB operations.
|
||||
|
||||
Sessions allow you to configure cache sizes for index and metadata caches,
|
||||
which can significantly impact performance for large datasets.
|
||||
|
||||
## Constructors
|
||||
|
||||
### new Session()
|
||||
|
||||
```ts
|
||||
new Session(indexCacheSizeBytes?, metadataCacheSizeBytes?): Session
|
||||
```
|
||||
|
||||
Create a new session with custom cache sizes.
|
||||
|
||||
# Parameters
|
||||
|
||||
- `index_cache_size_bytes`: The size of the index cache in bytes.
|
||||
Defaults to 6GB if not specified.
|
||||
- `metadata_cache_size_bytes`: The size of the metadata cache in bytes.
|
||||
Defaults to 1GB if not specified.
|
||||
|
||||
#### Parameters
|
||||
|
||||
* **indexCacheSizeBytes?**: `null` \| `bigint`
|
||||
|
||||
* **metadataCacheSizeBytes?**: `null` \| `bigint`
|
||||
|
||||
#### Returns
|
||||
|
||||
[`Session`](Session.md)
|
||||
|
||||
## Methods
|
||||
|
||||
### approxNumItems()
|
||||
|
||||
```ts
|
||||
approxNumItems(): number
|
||||
```
|
||||
|
||||
Get the approximate number of items cached in the session.
|
||||
|
||||
#### Returns
|
||||
|
||||
`number`
|
||||
|
||||
***
|
||||
|
||||
### sizeBytes()
|
||||
|
||||
```ts
|
||||
sizeBytes(): bigint
|
||||
```
|
||||
|
||||
Get the current size of the session caches in bytes.
|
||||
|
||||
#### Returns
|
||||
|
||||
`bigint`
|
||||
|
||||
***
|
||||
|
||||
### default()
|
||||
|
||||
```ts
|
||||
static default(): Session
|
||||
```
|
||||
|
||||
Create a session with default cache sizes.
|
||||
|
||||
This is equivalent to creating a session with 6GB index cache
|
||||
and 1GB metadata cache.
|
||||
|
||||
#### Returns
|
||||
|
||||
[`Session`](Session.md)
|
||||
@@ -40,7 +40,7 @@ Returns the name of the table
|
||||
### add()
|
||||
|
||||
```ts
|
||||
abstract add(data, options?): Promise<void>
|
||||
abstract add(data, options?): Promise<AddResult>
|
||||
```
|
||||
|
||||
Insert records into this Table.
|
||||
@@ -54,14 +54,17 @@ Insert records into this Table.
|
||||
|
||||
#### Returns
|
||||
|
||||
`Promise`<`void`>
|
||||
`Promise`<[`AddResult`](../interfaces/AddResult.md)>
|
||||
|
||||
A promise that resolves to an object
|
||||
containing the new version number of the table
|
||||
|
||||
***
|
||||
|
||||
### addColumns()
|
||||
|
||||
```ts
|
||||
abstract addColumns(newColumnTransforms): Promise<void>
|
||||
abstract addColumns(newColumnTransforms): Promise<AddColumnsResult>
|
||||
```
|
||||
|
||||
Add new columns with defined values.
|
||||
@@ -76,14 +79,17 @@ Add new columns with defined values.
|
||||
|
||||
#### Returns
|
||||
|
||||
`Promise`<`void`>
|
||||
`Promise`<[`AddColumnsResult`](../interfaces/AddColumnsResult.md)>
|
||||
|
||||
A promise that resolves to an object
|
||||
containing the new version number of the table after adding the columns.
|
||||
|
||||
***
|
||||
|
||||
### alterColumns()
|
||||
|
||||
```ts
|
||||
abstract alterColumns(columnAlterations): Promise<void>
|
||||
abstract alterColumns(columnAlterations): Promise<AlterColumnsResult>
|
||||
```
|
||||
|
||||
Alter the name or nullability of columns.
|
||||
@@ -96,7 +102,10 @@ Alter the name or nullability of columns.
|
||||
|
||||
#### Returns
|
||||
|
||||
`Promise`<`void`>
|
||||
`Promise`<[`AlterColumnsResult`](../interfaces/AlterColumnsResult.md)>
|
||||
|
||||
A promise that resolves to an object
|
||||
containing the new version number of the table after altering the columns.
|
||||
|
||||
***
|
||||
|
||||
@@ -252,7 +261,7 @@ await table.createIndex("my_float_col");
|
||||
### delete()
|
||||
|
||||
```ts
|
||||
abstract delete(predicate): Promise<void>
|
||||
abstract delete(predicate): Promise<DeleteResult>
|
||||
```
|
||||
|
||||
Delete the rows that satisfy the predicate.
|
||||
@@ -263,7 +272,10 @@ Delete the rows that satisfy the predicate.
|
||||
|
||||
#### Returns
|
||||
|
||||
`Promise`<`void`>
|
||||
`Promise`<[`DeleteResult`](../interfaces/DeleteResult.md)>
|
||||
|
||||
A promise that resolves to an object
|
||||
containing the new version number of the table
|
||||
|
||||
***
|
||||
|
||||
@@ -284,7 +296,7 @@ Return a brief description of the table
|
||||
### dropColumns()
|
||||
|
||||
```ts
|
||||
abstract dropColumns(columnNames): Promise<void>
|
||||
abstract dropColumns(columnNames): Promise<DropColumnsResult>
|
||||
```
|
||||
|
||||
Drop one or more columns from the dataset
|
||||
@@ -303,7 +315,10 @@ then call ``cleanup_files`` to remove the old files.
|
||||
|
||||
#### Returns
|
||||
|
||||
`Promise`<`void`>
|
||||
`Promise`<[`DropColumnsResult`](../interfaces/DropColumnsResult.md)>
|
||||
|
||||
A promise that resolves to an object
|
||||
containing the new version number of the table after dropping the columns.
|
||||
|
||||
***
|
||||
|
||||
@@ -597,7 +612,7 @@ of the given query
|
||||
|
||||
#### Parameters
|
||||
|
||||
* **query**: `string` \| [`IntoVector`](../type-aliases/IntoVector.md) \| [`FullTextQuery`](../interfaces/FullTextQuery.md)
|
||||
* **query**: `string` \| [`IntoVector`](../type-aliases/IntoVector.md) \| [`MultiVector`](../type-aliases/MultiVector.md) \| [`FullTextQuery`](../interfaces/FullTextQuery.md)
|
||||
the query, a vector or string
|
||||
|
||||
* **queryType?**: `string`
|
||||
@@ -678,7 +693,7 @@ Return the table as an arrow table
|
||||
#### update(opts)
|
||||
|
||||
```ts
|
||||
abstract update(opts): Promise<void>
|
||||
abstract update(opts): Promise<UpdateResult>
|
||||
```
|
||||
|
||||
Update existing records in the Table
|
||||
@@ -689,7 +704,10 @@ Update existing records in the Table
|
||||
|
||||
##### Returns
|
||||
|
||||
`Promise`<`void`>
|
||||
`Promise`<[`UpdateResult`](../interfaces/UpdateResult.md)>
|
||||
|
||||
A promise that resolves to an object containing
|
||||
the number of rows updated and the new version number
|
||||
|
||||
##### Example
|
||||
|
||||
@@ -700,7 +718,7 @@ table.update({where:"x = 2", values:{"vector": [10, 10]}})
|
||||
#### update(opts)
|
||||
|
||||
```ts
|
||||
abstract update(opts): Promise<void>
|
||||
abstract update(opts): Promise<UpdateResult>
|
||||
```
|
||||
|
||||
Update existing records in the Table
|
||||
@@ -711,7 +729,10 @@ Update existing records in the Table
|
||||
|
||||
##### Returns
|
||||
|
||||
`Promise`<`void`>
|
||||
`Promise`<[`UpdateResult`](../interfaces/UpdateResult.md)>
|
||||
|
||||
A promise that resolves to an object containing
|
||||
the number of rows updated and the new version number
|
||||
|
||||
##### Example
|
||||
|
||||
@@ -722,7 +743,7 @@ table.update({where:"x = 2", valuesSql:{"x": "x + 1"}})
|
||||
#### update(updates, options)
|
||||
|
||||
```ts
|
||||
abstract update(updates, options?): Promise<void>
|
||||
abstract update(updates, options?): Promise<UpdateResult>
|
||||
```
|
||||
|
||||
Update existing records in the Table
|
||||
@@ -745,10 +766,6 @@ repeatedly calilng this method.
|
||||
* **updates**: `Record`<`string`, `string`> \| `Map`<`string`, `string`>
|
||||
the
|
||||
columns to update
|
||||
Keys in the map should specify the name of the column to update.
|
||||
Values in the map provide the new value of the column. These can
|
||||
be SQL literal strings (e.g. "7" or "'foo'") or they can be expressions
|
||||
based on the row being updated (e.g. "my_col + 1")
|
||||
|
||||
* **options?**: `Partial`<[`UpdateOptions`](../interfaces/UpdateOptions.md)>
|
||||
additional options to control
|
||||
@@ -756,7 +773,15 @@ repeatedly calilng this method.
|
||||
|
||||
##### Returns
|
||||
|
||||
`Promise`<`void`>
|
||||
`Promise`<[`UpdateResult`](../interfaces/UpdateResult.md)>
|
||||
|
||||
A promise that resolves to an object
|
||||
containing the number of rows updated and the new version number
|
||||
|
||||
Keys in the map should specify the name of the column to update.
|
||||
Values in the map provide the new value of the column. These can
|
||||
be SQL literal strings (e.g. "7" or "'foo'") or they can be expressions
|
||||
based on the row being updated (e.g. "my_col + 1")
|
||||
|
||||
***
|
||||
|
||||
@@ -774,7 +799,7 @@ by `query`.
|
||||
|
||||
#### Parameters
|
||||
|
||||
* **vector**: [`IntoVector`](../type-aliases/IntoVector.md)
|
||||
* **vector**: [`IntoVector`](../type-aliases/IntoVector.md) \| [`MultiVector`](../type-aliases/MultiVector.md)
|
||||
|
||||
#### Returns
|
||||
|
||||
|
||||
@@ -386,6 +386,53 @@ called then every valid row from the table will be returned.
|
||||
|
||||
***
|
||||
|
||||
### maximumNprobes()
|
||||
|
||||
```ts
|
||||
maximumNprobes(maximumNprobes): VectorQuery
|
||||
```
|
||||
|
||||
Set the maximum number of probes used.
|
||||
|
||||
This controls the maximum number of partitions that will be searched. If this
|
||||
number is greater than minimumNprobes then the excess partitions will _only_ be
|
||||
searched if we have not found enough results. This can be useful when there is
|
||||
a narrow filter to allow these queries to spend more time searching and avoid
|
||||
potential false negatives.
|
||||
|
||||
#### Parameters
|
||||
|
||||
* **maximumNprobes**: `number`
|
||||
|
||||
#### Returns
|
||||
|
||||
[`VectorQuery`](VectorQuery.md)
|
||||
|
||||
***
|
||||
|
||||
### minimumNprobes()
|
||||
|
||||
```ts
|
||||
minimumNprobes(minimumNprobes): VectorQuery
|
||||
```
|
||||
|
||||
Set the minimum number of probes used.
|
||||
|
||||
This controls the minimum number of partitions that will be searched. This
|
||||
parameter will impact every query against a vector index, regardless of the
|
||||
filter. See `nprobes` for more details. Higher values will increase recall
|
||||
but will also increase latency.
|
||||
|
||||
#### Parameters
|
||||
|
||||
* **minimumNprobes**: `number`
|
||||
|
||||
#### Returns
|
||||
|
||||
[`VectorQuery`](VectorQuery.md)
|
||||
|
||||
***
|
||||
|
||||
### nprobes()
|
||||
|
||||
```ts
|
||||
@@ -413,6 +460,10 @@ For best results we recommend tuning this parameter with a benchmark against
|
||||
your actual data to find the smallest possible value that will still give
|
||||
you the desired recall.
|
||||
|
||||
For more fine grained control over behavior when you have a very narrow filter
|
||||
you can use `minimumNprobes` and `maximumNprobes`. This method sets both
|
||||
the minimum and maximum to the same value.
|
||||
|
||||
#### Parameters
|
||||
|
||||
* **nprobes**: `number`
|
||||
|
||||
@@ -15,6 +15,14 @@ Enum representing the types of full-text queries supported.
|
||||
|
||||
## Enumeration Members
|
||||
|
||||
### Boolean
|
||||
|
||||
```ts
|
||||
Boolean: "boolean";
|
||||
```
|
||||
|
||||
***
|
||||
|
||||
### Boost
|
||||
|
||||
```ts
|
||||
|
||||
37
docs/src/js/enumerations/Occur.md
Normal file
37
docs/src/js/enumerations/Occur.md
Normal file
@@ -0,0 +1,37 @@
|
||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||
|
||||
***
|
||||
|
||||
[@lancedb/lancedb](../globals.md) / Occur
|
||||
|
||||
# Enumeration: Occur
|
||||
|
||||
Enum representing the occurrence of terms in full-text queries.
|
||||
|
||||
- `Must`: The term must be present in the document.
|
||||
- `Should`: The term should contribute to the document score, but is not required.
|
||||
- `MustNot`: The term must not be present in the document.
|
||||
|
||||
## Enumeration Members
|
||||
|
||||
### Must
|
||||
|
||||
```ts
|
||||
Must: "MUST";
|
||||
```
|
||||
|
||||
***
|
||||
|
||||
### MustNot
|
||||
|
||||
```ts
|
||||
MustNot: "MUST_NOT";
|
||||
```
|
||||
|
||||
***
|
||||
|
||||
### Should
|
||||
|
||||
```ts
|
||||
Should: "SHOULD";
|
||||
```
|
||||
28
docs/src/js/enumerations/Operator.md
Normal file
28
docs/src/js/enumerations/Operator.md
Normal file
@@ -0,0 +1,28 @@
|
||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||
|
||||
***
|
||||
|
||||
[@lancedb/lancedb](../globals.md) / Operator
|
||||
|
||||
# Enumeration: Operator
|
||||
|
||||
Enum representing the logical operators used in full-text queries.
|
||||
|
||||
- `And`: All terms must match.
|
||||
- `Or`: At least one term must match.
|
||||
|
||||
## Enumeration Members
|
||||
|
||||
### And
|
||||
|
||||
```ts
|
||||
And: "AND";
|
||||
```
|
||||
|
||||
***
|
||||
|
||||
### Or
|
||||
|
||||
```ts
|
||||
Or: "OR";
|
||||
```
|
||||
@@ -6,10 +6,13 @@
|
||||
|
||||
# Function: connect()
|
||||
|
||||
## connect(uri, options)
|
||||
## connect(uri, options, session)
|
||||
|
||||
```ts
|
||||
function connect(uri, options?): Promise<Connection>
|
||||
function connect(
|
||||
uri,
|
||||
options?,
|
||||
session?): Promise<Connection>
|
||||
```
|
||||
|
||||
Connect to a LanceDB instance at the given URI.
|
||||
@@ -29,6 +32,8 @@ Accepted formats:
|
||||
* **options?**: `Partial`<[`ConnectionOptions`](../interfaces/ConnectionOptions.md)>
|
||||
The options to use when connecting to the database
|
||||
|
||||
* **session?**: [`Session`](../classes/Session.md)
|
||||
|
||||
### Returns
|
||||
|
||||
`Promise`<[`Connection`](../classes/Connection.md)>
|
||||
@@ -77,7 +82,7 @@ Accepted formats:
|
||||
|
||||
[ConnectionOptions](../interfaces/ConnectionOptions.md) for more details on the URI format.
|
||||
|
||||
### Example
|
||||
### Examples
|
||||
|
||||
```ts
|
||||
const conn = await connect({
|
||||
@@ -85,3 +90,11 @@ const conn = await connect({
|
||||
storageOptions: {timeout: "60s"}
|
||||
});
|
||||
```
|
||||
|
||||
```ts
|
||||
const session = Session.default();
|
||||
const conn = await connect({
|
||||
uri: "/path/to/database",
|
||||
session: session
|
||||
});
|
||||
```
|
||||
|
||||
@@ -12,9 +12,12 @@
|
||||
## Enumerations
|
||||
|
||||
- [FullTextQueryType](enumerations/FullTextQueryType.md)
|
||||
- [Occur](enumerations/Occur.md)
|
||||
- [Operator](enumerations/Operator.md)
|
||||
|
||||
## Classes
|
||||
|
||||
- [BooleanQuery](classes/BooleanQuery.md)
|
||||
- [BoostQuery](classes/BoostQuery.md)
|
||||
- [Connection](classes/Connection.md)
|
||||
- [Index](classes/Index.md)
|
||||
@@ -26,6 +29,7 @@
|
||||
- [Query](classes/Query.md)
|
||||
- [QueryBase](classes/QueryBase.md)
|
||||
- [RecordBatchIterator](classes/RecordBatchIterator.md)
|
||||
- [Session](classes/Session.md)
|
||||
- [Table](classes/Table.md)
|
||||
- [TagContents](classes/TagContents.md)
|
||||
- [Tags](classes/Tags.md)
|
||||
@@ -34,13 +38,18 @@
|
||||
|
||||
## Interfaces
|
||||
|
||||
- [AddColumnsResult](interfaces/AddColumnsResult.md)
|
||||
- [AddColumnsSql](interfaces/AddColumnsSql.md)
|
||||
- [AddDataOptions](interfaces/AddDataOptions.md)
|
||||
- [AddResult](interfaces/AddResult.md)
|
||||
- [AlterColumnsResult](interfaces/AlterColumnsResult.md)
|
||||
- [ClientConfig](interfaces/ClientConfig.md)
|
||||
- [ColumnAlteration](interfaces/ColumnAlteration.md)
|
||||
- [CompactionStats](interfaces/CompactionStats.md)
|
||||
- [ConnectionOptions](interfaces/ConnectionOptions.md)
|
||||
- [CreateTableOptions](interfaces/CreateTableOptions.md)
|
||||
- [DeleteResult](interfaces/DeleteResult.md)
|
||||
- [DropColumnsResult](interfaces/DropColumnsResult.md)
|
||||
- [ExecutableQuery](interfaces/ExecutableQuery.md)
|
||||
- [FragmentStatistics](interfaces/FragmentStatistics.md)
|
||||
- [FragmentSummaryStats](interfaces/FragmentSummaryStats.md)
|
||||
@@ -54,6 +63,7 @@
|
||||
- [IndexStatistics](interfaces/IndexStatistics.md)
|
||||
- [IvfFlatOptions](interfaces/IvfFlatOptions.md)
|
||||
- [IvfPqOptions](interfaces/IvfPqOptions.md)
|
||||
- [MergeResult](interfaces/MergeResult.md)
|
||||
- [OpenTableOptions](interfaces/OpenTableOptions.md)
|
||||
- [OptimizeOptions](interfaces/OptimizeOptions.md)
|
||||
- [OptimizeStats](interfaces/OptimizeStats.md)
|
||||
@@ -64,7 +74,9 @@
|
||||
- [TableStatistics](interfaces/TableStatistics.md)
|
||||
- [TimeoutConfig](interfaces/TimeoutConfig.md)
|
||||
- [UpdateOptions](interfaces/UpdateOptions.md)
|
||||
- [UpdateResult](interfaces/UpdateResult.md)
|
||||
- [Version](interfaces/Version.md)
|
||||
- [WriteExecutionOptions](interfaces/WriteExecutionOptions.md)
|
||||
|
||||
## Type Aliases
|
||||
|
||||
@@ -73,6 +85,7 @@
|
||||
- [FieldLike](type-aliases/FieldLike.md)
|
||||
- [IntoSql](type-aliases/IntoSql.md)
|
||||
- [IntoVector](type-aliases/IntoVector.md)
|
||||
- [MultiVector](type-aliases/MultiVector.md)
|
||||
- [RecordBatchLike](type-aliases/RecordBatchLike.md)
|
||||
- [SchemaLike](type-aliases/SchemaLike.md)
|
||||
- [TableLike](type-aliases/TableLike.md)
|
||||
|
||||
15
docs/src/js/interfaces/AddColumnsResult.md
Normal file
15
docs/src/js/interfaces/AddColumnsResult.md
Normal file
@@ -0,0 +1,15 @@
|
||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||
|
||||
***
|
||||
|
||||
[@lancedb/lancedb](../globals.md) / AddColumnsResult
|
||||
|
||||
# Interface: AddColumnsResult
|
||||
|
||||
## Properties
|
||||
|
||||
### version
|
||||
|
||||
```ts
|
||||
version: number;
|
||||
```
|
||||
15
docs/src/js/interfaces/AddResult.md
Normal file
15
docs/src/js/interfaces/AddResult.md
Normal file
@@ -0,0 +1,15 @@
|
||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||
|
||||
***
|
||||
|
||||
[@lancedb/lancedb](../globals.md) / AddResult
|
||||
|
||||
# Interface: AddResult
|
||||
|
||||
## Properties
|
||||
|
||||
### version
|
||||
|
||||
```ts
|
||||
version: number;
|
||||
```
|
||||
15
docs/src/js/interfaces/AlterColumnsResult.md
Normal file
15
docs/src/js/interfaces/AlterColumnsResult.md
Normal file
@@ -0,0 +1,15 @@
|
||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||
|
||||
***
|
||||
|
||||
[@lancedb/lancedb](../globals.md) / AlterColumnsResult
|
||||
|
||||
# Interface: AlterColumnsResult
|
||||
|
||||
## Properties
|
||||
|
||||
### version
|
||||
|
||||
```ts
|
||||
version: number;
|
||||
```
|
||||
@@ -70,6 +70,17 @@ Defaults to 'us-east-1'.
|
||||
|
||||
***
|
||||
|
||||
### session?
|
||||
|
||||
```ts
|
||||
optional session: Session;
|
||||
```
|
||||
|
||||
(For LanceDB OSS only): the session to use for this connection. Holds
|
||||
shared caches and other session-specific state.
|
||||
|
||||
***
|
||||
|
||||
### storageOptions?
|
||||
|
||||
```ts
|
||||
|
||||
15
docs/src/js/interfaces/DeleteResult.md
Normal file
15
docs/src/js/interfaces/DeleteResult.md
Normal file
@@ -0,0 +1,15 @@
|
||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||
|
||||
***
|
||||
|
||||
[@lancedb/lancedb](../globals.md) / DeleteResult
|
||||
|
||||
# Interface: DeleteResult
|
||||
|
||||
## Properties
|
||||
|
||||
### version
|
||||
|
||||
```ts
|
||||
version: number;
|
||||
```
|
||||
15
docs/src/js/interfaces/DropColumnsResult.md
Normal file
15
docs/src/js/interfaces/DropColumnsResult.md
Normal file
@@ -0,0 +1,15 @@
|
||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||
|
||||
***
|
||||
|
||||
[@lancedb/lancedb](../globals.md) / DropColumnsResult
|
||||
|
||||
# Interface: DropColumnsResult
|
||||
|
||||
## Properties
|
||||
|
||||
### version
|
||||
|
||||
```ts
|
||||
version: number;
|
||||
```
|
||||
@@ -23,7 +23,7 @@ whether to remove punctuation
|
||||
### baseTokenizer?
|
||||
|
||||
```ts
|
||||
optional baseTokenizer: "raw" | "simple" | "whitespace";
|
||||
optional baseTokenizer: "raw" | "simple" | "whitespace" | "ngram";
|
||||
```
|
||||
|
||||
The tokenizer to use when building the index.
|
||||
@@ -71,6 +71,36 @@ tokens longer than this length will be ignored
|
||||
|
||||
***
|
||||
|
||||
### ngramMaxLength?
|
||||
|
||||
```ts
|
||||
optional ngramMaxLength: number;
|
||||
```
|
||||
|
||||
ngram max length
|
||||
|
||||
***
|
||||
|
||||
### ngramMinLength?
|
||||
|
||||
```ts
|
||||
optional ngramMinLength: number;
|
||||
```
|
||||
|
||||
ngram min length
|
||||
|
||||
***
|
||||
|
||||
### prefixOnly?
|
||||
|
||||
```ts
|
||||
optional prefixOnly: boolean;
|
||||
```
|
||||
|
||||
whether to only index the prefix of the token for ngram tokenizer
|
||||
|
||||
***
|
||||
|
||||
### removeStopWords?
|
||||
|
||||
```ts
|
||||
|
||||
39
docs/src/js/interfaces/MergeResult.md
Normal file
39
docs/src/js/interfaces/MergeResult.md
Normal file
@@ -0,0 +1,39 @@
|
||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||
|
||||
***
|
||||
|
||||
[@lancedb/lancedb](../globals.md) / MergeResult
|
||||
|
||||
# Interface: MergeResult
|
||||
|
||||
## Properties
|
||||
|
||||
### numDeletedRows
|
||||
|
||||
```ts
|
||||
numDeletedRows: number;
|
||||
```
|
||||
|
||||
***
|
||||
|
||||
### numInsertedRows
|
||||
|
||||
```ts
|
||||
numInsertedRows: number;
|
||||
```
|
||||
|
||||
***
|
||||
|
||||
### numUpdatedRows
|
||||
|
||||
```ts
|
||||
numUpdatedRows: number;
|
||||
```
|
||||
|
||||
***
|
||||
|
||||
### version
|
||||
|
||||
```ts
|
||||
version: number;
|
||||
```
|
||||
@@ -8,7 +8,7 @@
|
||||
|
||||
## Properties
|
||||
|
||||
### indexCacheSize?
|
||||
### ~~indexCacheSize?~~
|
||||
|
||||
```ts
|
||||
optional indexCacheSize: number;
|
||||
@@ -16,6 +16,11 @@ optional indexCacheSize: number;
|
||||
|
||||
Set the size of the index cache, specified as a number of entries
|
||||
|
||||
#### Deprecated
|
||||
|
||||
Use session-level cache configuration instead.
|
||||
Create a Session with custom cache sizes and pass it to the connect() function.
|
||||
|
||||
The exact meaning of an "entry" will depend on the type of index:
|
||||
- IVF: there is one entry for each IVF partition
|
||||
- BTREE: there is one entry for the entire index
|
||||
|
||||
@@ -24,10 +24,10 @@ The default is 7 days
|
||||
// Delete all versions older than 1 day
|
||||
const olderThan = new Date();
|
||||
olderThan.setDate(olderThan.getDate() - 1));
|
||||
tbl.cleanupOlderVersions(olderThan);
|
||||
tbl.optimize({cleanupOlderThan: olderThan});
|
||||
|
||||
// Delete all versions except the current version
|
||||
tbl.cleanupOlderVersions(new Date());
|
||||
tbl.optimize({cleanupOlderThan: new Date()});
|
||||
```
|
||||
|
||||
***
|
||||
|
||||
23
docs/src/js/interfaces/UpdateResult.md
Normal file
23
docs/src/js/interfaces/UpdateResult.md
Normal file
@@ -0,0 +1,23 @@
|
||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||
|
||||
***
|
||||
|
||||
[@lancedb/lancedb](../globals.md) / UpdateResult
|
||||
|
||||
# Interface: UpdateResult
|
||||
|
||||
## Properties
|
||||
|
||||
### rowsUpdated
|
||||
|
||||
```ts
|
||||
rowsUpdated: number;
|
||||
```
|
||||
|
||||
***
|
||||
|
||||
### version
|
||||
|
||||
```ts
|
||||
version: number;
|
||||
```
|
||||
26
docs/src/js/interfaces/WriteExecutionOptions.md
Normal file
26
docs/src/js/interfaces/WriteExecutionOptions.md
Normal file
@@ -0,0 +1,26 @@
|
||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||
|
||||
***
|
||||
|
||||
[@lancedb/lancedb](../globals.md) / WriteExecutionOptions
|
||||
|
||||
# Interface: WriteExecutionOptions
|
||||
|
||||
## Properties
|
||||
|
||||
### timeoutMs?
|
||||
|
||||
```ts
|
||||
optional timeoutMs: number;
|
||||
```
|
||||
|
||||
Maximum time to run the operation before cancelling it.
|
||||
|
||||
By default, there is a 30-second timeout that is only enforced after the
|
||||
first attempt. This is to prevent spending too long retrying to resolve
|
||||
conflicts. For example, if a write attempt takes 20 seconds and fails,
|
||||
the second attempt will be cancelled after 10 seconds, hitting the
|
||||
30-second timeout. However, a write that takes one hour and succeeds on the
|
||||
first attempt will not be cancelled.
|
||||
|
||||
When this is set, the timeout is enforced on all attempts, including the first.
|
||||
11
docs/src/js/type-aliases/MultiVector.md
Normal file
11
docs/src/js/type-aliases/MultiVector.md
Normal file
@@ -0,0 +1,11 @@
|
||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||
|
||||
***
|
||||
|
||||
[@lancedb/lancedb](../globals.md) / MultiVector
|
||||
|
||||
# Type Alias: MultiVector
|
||||
|
||||
```ts
|
||||
type MultiVector: IntoVector[];
|
||||
```
|
||||
@@ -428,7 +428,7 @@
|
||||
"\n",
|
||||
"**Why?** \n",
|
||||
"Embedding the UFO dataset and ingesting it into LanceDB takes **~2 hours on a T4 GPU**. To save time: \n",
|
||||
"- **Use the pre-prepared table with index created ** (provided below) to proceed directly to step7: search. \n",
|
||||
"- **Use the pre-prepared table with index created** (provided below) to proceed directly to **Step 7**: search. \n",
|
||||
"- **Step 5a** contains the full ingestion code for reference (run it only if necessary). \n",
|
||||
"- **Step 6** contains the details on creating the index on the multivector column"
|
||||
]
|
||||
|
||||
53
docs/src/python/datafusion.md
Normal file
53
docs/src/python/datafusion.md
Normal file
@@ -0,0 +1,53 @@
|
||||
# Apache Datafusion
|
||||
|
||||
In Python, LanceDB tables can also be queried with [Apache Datafusion](https://datafusion.apache.org/), an extensible query engine written in Rust that uses Apache Arrow as its in-memory format. This means you can write complex SQL queries to analyze your data in LanceDB.
|
||||
|
||||
This integration is done via [Datafusion FFI](https://docs.rs/datafusion-ffi/latest/datafusion_ffi/), which provides a native integration between LanceDB and Datafusion.
|
||||
The Datafusion FFI allows to pass down column selections and basic filters to LanceDB, reducing the amount of scanned data when executing your query. Additionally, the integration allows streaming data from LanceDB tables which allows to do aggregation larger-than-memory.
|
||||
|
||||
We can demonstrate this by first installing `datafusion` and `lancedb`.
|
||||
|
||||
```shell
|
||||
pip install datafusion lancedb
|
||||
```
|
||||
|
||||
We will re-use the dataset [created previously](./pandas_and_pyarrow.md):
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
|
||||
from datafusion import SessionContext
|
||||
from lance import FFILanceTableProvider
|
||||
|
||||
db = lancedb.connect("data/sample-lancedb")
|
||||
data = [
|
||||
{"vector": [3.1, 4.1], "item": "foo", "price": 10.0},
|
||||
{"vector": [5.9, 26.5], "item": "bar", "price": 20.0}
|
||||
]
|
||||
lance_table = db.create_table("lance_table", data)
|
||||
|
||||
ctx = SessionContext()
|
||||
|
||||
ffi_lance_table = FFILanceTableProvider(
|
||||
lance_table.to_lance(), with_row_id=True, with_row_addr=True
|
||||
)
|
||||
ctx.register_table_provider("ffi_lance_table", ffi_lance_table)
|
||||
```
|
||||
|
||||
The `to_lance` method converts the LanceDB table to a `LanceDataset`, which is accessible to Datafusion through the Datafusion FFI integration layer.
|
||||
To query the resulting Lance dataset in Datafusion, you first need to register the dataset with Datafusion and then just reference it by the same name in your SQL query.
|
||||
|
||||
```python
|
||||
ctx.table("ffi_lance_table")
|
||||
ctx.sql("SELECT * FROM ffi_lance_table")
|
||||
```
|
||||
|
||||
```
|
||||
┌─────────────┬─────────┬────────┬─────────────────┬─────────────────┐
|
||||
│ vector │ item │ price │ _rowid │ _rowaddr │
|
||||
│ float[] │ varchar │ double │ bigint unsigned │ bigint unsigned │
|
||||
├─────────────┼─────────┼────────┼─────────────────┼─────────────────┤
|
||||
│ [3.1, 4.1] │ foo │ 10.0 │ 0 │ 0 │
|
||||
│ [5.9, 26.5] │ bar │ 20.0 │ 1 │ 1 │
|
||||
└─────────────┴─────────┴────────┴─────────────────┴─────────────────┘
|
||||
```
|
||||
@@ -30,7 +30,8 @@ excluded_globs = [
|
||||
"../src/rag/advanced_techniques/*.md",
|
||||
"../src/guides/scalar_index.md",
|
||||
"../src/guides/storage.md",
|
||||
"../src/search.md"
|
||||
"../src/search.md",
|
||||
"../src/guides/sql_querying.md",
|
||||
]
|
||||
|
||||
python_prefix = "py"
|
||||
|
||||
@@ -7,3 +7,4 @@ tantivy==0.20.1
|
||||
--extra-index-url https://download.pytorch.org/whl/cpu
|
||||
torch
|
||||
polars>=0.19, <=1.3.0
|
||||
datafusion
|
||||
|
||||
19
java/.mvn/wrapper/maven-wrapper.properties
vendored
Normal file
19
java/.mvn/wrapper/maven-wrapper.properties
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
wrapperVersion=3.3.2
|
||||
distributionType=only-script
|
||||
distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.9.9/apache-maven-3.9.9-bin.zip
|
||||
37
java/README.md
Normal file
37
java/README.md
Normal file
@@ -0,0 +1,37 @@
|
||||
# LanceDB Java SDK
|
||||
|
||||
## Configuration and Initialization
|
||||
|
||||
### LanceDB Cloud
|
||||
|
||||
For LanceDB Cloud, use the simplified builder API:
|
||||
|
||||
```java
|
||||
import com.lancedb.lance.namespace.LanceRestNamespace;
|
||||
|
||||
// If your DB url is db://example-db, then your database here is example-db
|
||||
LanceRestNamespace namespace = LanceDBRestNamespaces.builder()
|
||||
.apiKey("your_lancedb_cloud_api_key")
|
||||
.database("your_database_name")
|
||||
.build();
|
||||
```
|
||||
|
||||
### LanceDB Enterprise
|
||||
|
||||
For Enterprise deployments, use your VPC endpoint:
|
||||
|
||||
```java
|
||||
LanceRestNamespace namespace = LanceDBRestNamespaces.builder()
|
||||
.apiKey("your_lancedb_enterprise_api_key")
|
||||
.database("your-top-dir") // Your top level folder under your cloud bucket, e.g. s3://your-bucket/your-top-dir/
|
||||
.hostOverride("http://<vpc_endpoint_dns_name>:80")
|
||||
.build();
|
||||
```
|
||||
|
||||
## Development
|
||||
|
||||
Build:
|
||||
|
||||
```shell
|
||||
./mvnw install
|
||||
```
|
||||
@@ -19,7 +19,7 @@ lancedb = { path = "../../../rust/lancedb" }
|
||||
lance = { workspace = true }
|
||||
arrow = { workspace = true, features = ["ffi"] }
|
||||
arrow-schema.workspace = true
|
||||
tokio = "1.23"
|
||||
tokio = "1.46"
|
||||
jni = "0.21.1"
|
||||
snafu.workspace = true
|
||||
lazy_static.workspace = true
|
||||
|
||||
@@ -8,18 +8,24 @@
|
||||
<parent>
|
||||
<groupId>com.lancedb</groupId>
|
||||
<artifactId>lancedb-parent</artifactId>
|
||||
<version>0.19.1-beta.0</version>
|
||||
<version>0.21.2-beta.1</version>
|
||||
<relativePath>../pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
<artifactId>lancedb-core</artifactId>
|
||||
<name>LanceDB Core</name>
|
||||
<name>${project.artifactId}</name>
|
||||
<description>LanceDB Core</description>
|
||||
<packaging>jar</packaging>
|
||||
<properties>
|
||||
<rust.release.build>false</rust.release.build>
|
||||
</properties>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>com.lancedb</groupId>
|
||||
<artifactId>lance-namespace-core</artifactId>
|
||||
<version>0.0.1</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.arrow</groupId>
|
||||
<artifactId>arrow-vector</artifactId>
|
||||
|
||||
26
java/lance-namespace/pom.xml
Normal file
26
java/lance-namespace/pom.xml
Normal file
@@ -0,0 +1,26 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<parent>
|
||||
<groupId>com.lancedb</groupId>
|
||||
<artifactId>lancedb-parent</artifactId>
|
||||
<version>0.21.2-beta.1</version>
|
||||
<relativePath>../pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
<artifactId>lancedb-lance-namespace</artifactId>
|
||||
<name>${project.artifactId}</name>
|
||||
<description>LanceDB Java Integration with Lance Namespace</description>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>com.lancedb</groupId>
|
||||
<artifactId>lance-namespace-core</artifactId>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</project>
|
||||
@@ -0,0 +1,146 @@
|
||||
/*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package com.lancedb.lancedb;
|
||||
|
||||
import com.lancedb.lance.namespace.LanceRestNamespace;
|
||||
import com.lancedb.lance.namespace.client.apache.ApiClient;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
|
||||
/** Util class to help construct a {@link LanceRestNamespace} for LanceDB. */
|
||||
public class LanceDbRestNamespaces {
|
||||
private static final String DEFAULT_REGION = "us-east-1";
|
||||
private static final String CLOUD_URL_PATTERN = "https://%s.%s.api.lancedb.com";
|
||||
|
||||
private String apiKey;
|
||||
private String database;
|
||||
private Optional<String> hostOverride = Optional.empty();
|
||||
private Optional<String> region = Optional.empty();
|
||||
private Map<String, String> additionalConfig = new HashMap<>();
|
||||
|
||||
private LanceDbRestNamespaces() {}
|
||||
|
||||
/**
|
||||
* Create a new builder instance.
|
||||
*
|
||||
* @return A new LanceRestNamespaceBuilder
|
||||
*/
|
||||
public static LanceDbRestNamespaces builder() {
|
||||
return new LanceDbRestNamespaces();
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the API key (required).
|
||||
*
|
||||
* @param apiKey The LanceDB API key
|
||||
* @return This builder
|
||||
*/
|
||||
public LanceDbRestNamespaces apiKey(String apiKey) {
|
||||
if (apiKey == null || apiKey.trim().isEmpty()) {
|
||||
throw new IllegalArgumentException("API key cannot be null or empty");
|
||||
}
|
||||
this.apiKey = apiKey;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the database name (required).
|
||||
*
|
||||
* @param database The database name
|
||||
* @return This builder
|
||||
*/
|
||||
public LanceDbRestNamespaces database(String database) {
|
||||
if (database == null || database.trim().isEmpty()) {
|
||||
throw new IllegalArgumentException("Database cannot be null or empty");
|
||||
}
|
||||
this.database = database;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set a custom host override (optional). When set, this overrides the default LanceDB Cloud URL
|
||||
* construction. Use this for LanceDB Enterprise deployments.
|
||||
*
|
||||
* @param hostOverride The complete base URL (e.g., "http://your-vpc-endpoint:80")
|
||||
* @return This builder
|
||||
*/
|
||||
public LanceDbRestNamespaces hostOverride(String hostOverride) {
|
||||
this.hostOverride = Optional.ofNullable(hostOverride);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the region for LanceDB Cloud (optional). Defaults to "us-east-1" if not specified. This is
|
||||
* ignored when hostOverride is set.
|
||||
*
|
||||
* @param region The AWS region (e.g., "us-east-1", "eu-west-1")
|
||||
* @return This builder
|
||||
*/
|
||||
public LanceDbRestNamespaces region(String region) {
|
||||
this.region = Optional.ofNullable(region);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add additional configuration parameters.
|
||||
*
|
||||
* @param key The configuration key
|
||||
* @param value The configuration value
|
||||
* @return This builder
|
||||
*/
|
||||
public LanceDbRestNamespaces config(String key, String value) {
|
||||
this.additionalConfig.put(key, value);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Build the LanceRestNamespace instance.
|
||||
*
|
||||
* @return A configured LanceRestNamespace
|
||||
* @throws IllegalStateException if required parameters are missing
|
||||
*/
|
||||
public LanceRestNamespace build() {
|
||||
// Validate required fields
|
||||
if (apiKey == null) {
|
||||
throw new IllegalStateException("API key is required");
|
||||
}
|
||||
if (database == null) {
|
||||
throw new IllegalStateException("Database is required");
|
||||
}
|
||||
|
||||
// Build configuration map
|
||||
Map<String, String> config = new HashMap<>(additionalConfig);
|
||||
config.put("headers.x-lancedb-database", database);
|
||||
config.put("headers.x-api-key", apiKey);
|
||||
|
||||
// Determine base URL
|
||||
String baseUrl;
|
||||
if (hostOverride.isPresent()) {
|
||||
baseUrl = hostOverride.get();
|
||||
config.put("host_override", hostOverride.get());
|
||||
} else {
|
||||
String effectiveRegion = region.orElse(DEFAULT_REGION);
|
||||
baseUrl = String.format(CLOUD_URL_PATTERN, database, effectiveRegion);
|
||||
config.put("region", effectiveRegion);
|
||||
}
|
||||
|
||||
// Create and configure ApiClient
|
||||
ApiClient apiClient = new ApiClient();
|
||||
apiClient.setBasePath(baseUrl);
|
||||
|
||||
return new LanceRestNamespace(apiClient, config);
|
||||
}
|
||||
}
|
||||
259
java/mvnw
vendored
Executable file
259
java/mvnw
vendored
Executable file
@@ -0,0 +1,259 @@
|
||||
#!/bin/sh
|
||||
# ----------------------------------------------------------------------------
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
# ----------------------------------------------------------------------------
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
# Apache Maven Wrapper startup batch script, version 3.3.2
|
||||
#
|
||||
# Optional ENV vars
|
||||
# -----------------
|
||||
# JAVA_HOME - location of a JDK home dir, required when download maven via java source
|
||||
# MVNW_REPOURL - repo url base for downloading maven distribution
|
||||
# MVNW_USERNAME/MVNW_PASSWORD - user and password for downloading maven
|
||||
# MVNW_VERBOSE - true: enable verbose log; debug: trace the mvnw script; others: silence the output
|
||||
# ----------------------------------------------------------------------------
|
||||
|
||||
set -euf
|
||||
[ "${MVNW_VERBOSE-}" != debug ] || set -x
|
||||
|
||||
# OS specific support.
|
||||
native_path() { printf %s\\n "$1"; }
|
||||
case "$(uname)" in
|
||||
CYGWIN* | MINGW*)
|
||||
[ -z "${JAVA_HOME-}" ] || JAVA_HOME="$(cygpath --unix "$JAVA_HOME")"
|
||||
native_path() { cygpath --path --windows "$1"; }
|
||||
;;
|
||||
esac
|
||||
|
||||
# set JAVACMD and JAVACCMD
|
||||
set_java_home() {
|
||||
# For Cygwin and MinGW, ensure paths are in Unix format before anything is touched
|
||||
if [ -n "${JAVA_HOME-}" ]; then
|
||||
if [ -x "$JAVA_HOME/jre/sh/java" ]; then
|
||||
# IBM's JDK on AIX uses strange locations for the executables
|
||||
JAVACMD="$JAVA_HOME/jre/sh/java"
|
||||
JAVACCMD="$JAVA_HOME/jre/sh/javac"
|
||||
else
|
||||
JAVACMD="$JAVA_HOME/bin/java"
|
||||
JAVACCMD="$JAVA_HOME/bin/javac"
|
||||
|
||||
if [ ! -x "$JAVACMD" ] || [ ! -x "$JAVACCMD" ]; then
|
||||
echo "The JAVA_HOME environment variable is not defined correctly, so mvnw cannot run." >&2
|
||||
echo "JAVA_HOME is set to \"$JAVA_HOME\", but \"\$JAVA_HOME/bin/java\" or \"\$JAVA_HOME/bin/javac\" does not exist." >&2
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
else
|
||||
JAVACMD="$(
|
||||
'set' +e
|
||||
'unset' -f command 2>/dev/null
|
||||
'command' -v java
|
||||
)" || :
|
||||
JAVACCMD="$(
|
||||
'set' +e
|
||||
'unset' -f command 2>/dev/null
|
||||
'command' -v javac
|
||||
)" || :
|
||||
|
||||
if [ ! -x "${JAVACMD-}" ] || [ ! -x "${JAVACCMD-}" ]; then
|
||||
echo "The java/javac command does not exist in PATH nor is JAVA_HOME set, so mvnw cannot run." >&2
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# hash string like Java String::hashCode
|
||||
hash_string() {
|
||||
str="${1:-}" h=0
|
||||
while [ -n "$str" ]; do
|
||||
char="${str%"${str#?}"}"
|
||||
h=$(((h * 31 + $(LC_CTYPE=C printf %d "'$char")) % 4294967296))
|
||||
str="${str#?}"
|
||||
done
|
||||
printf %x\\n $h
|
||||
}
|
||||
|
||||
verbose() { :; }
|
||||
[ "${MVNW_VERBOSE-}" != true ] || verbose() { printf %s\\n "${1-}"; }
|
||||
|
||||
die() {
|
||||
printf %s\\n "$1" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
trim() {
|
||||
# MWRAPPER-139:
|
||||
# Trims trailing and leading whitespace, carriage returns, tabs, and linefeeds.
|
||||
# Needed for removing poorly interpreted newline sequences when running in more
|
||||
# exotic environments such as mingw bash on Windows.
|
||||
printf "%s" "${1}" | tr -d '[:space:]'
|
||||
}
|
||||
|
||||
# parse distributionUrl and optional distributionSha256Sum, requires .mvn/wrapper/maven-wrapper.properties
|
||||
while IFS="=" read -r key value; do
|
||||
case "${key-}" in
|
||||
distributionUrl) distributionUrl=$(trim "${value-}") ;;
|
||||
distributionSha256Sum) distributionSha256Sum=$(trim "${value-}") ;;
|
||||
esac
|
||||
done <"${0%/*}/.mvn/wrapper/maven-wrapper.properties"
|
||||
[ -n "${distributionUrl-}" ] || die "cannot read distributionUrl property in ${0%/*}/.mvn/wrapper/maven-wrapper.properties"
|
||||
|
||||
case "${distributionUrl##*/}" in
|
||||
maven-mvnd-*bin.*)
|
||||
MVN_CMD=mvnd.sh _MVNW_REPO_PATTERN=/maven/mvnd/
|
||||
case "${PROCESSOR_ARCHITECTURE-}${PROCESSOR_ARCHITEW6432-}:$(uname -a)" in
|
||||
*AMD64:CYGWIN* | *AMD64:MINGW*) distributionPlatform=windows-amd64 ;;
|
||||
:Darwin*x86_64) distributionPlatform=darwin-amd64 ;;
|
||||
:Darwin*arm64) distributionPlatform=darwin-aarch64 ;;
|
||||
:Linux*x86_64*) distributionPlatform=linux-amd64 ;;
|
||||
*)
|
||||
echo "Cannot detect native platform for mvnd on $(uname)-$(uname -m), use pure java version" >&2
|
||||
distributionPlatform=linux-amd64
|
||||
;;
|
||||
esac
|
||||
distributionUrl="${distributionUrl%-bin.*}-$distributionPlatform.zip"
|
||||
;;
|
||||
maven-mvnd-*) MVN_CMD=mvnd.sh _MVNW_REPO_PATTERN=/maven/mvnd/ ;;
|
||||
*) MVN_CMD="mvn${0##*/mvnw}" _MVNW_REPO_PATTERN=/org/apache/maven/ ;;
|
||||
esac
|
||||
|
||||
# apply MVNW_REPOURL and calculate MAVEN_HOME
|
||||
# maven home pattern: ~/.m2/wrapper/dists/{apache-maven-<version>,maven-mvnd-<version>-<platform>}/<hash>
|
||||
[ -z "${MVNW_REPOURL-}" ] || distributionUrl="$MVNW_REPOURL$_MVNW_REPO_PATTERN${distributionUrl#*"$_MVNW_REPO_PATTERN"}"
|
||||
distributionUrlName="${distributionUrl##*/}"
|
||||
distributionUrlNameMain="${distributionUrlName%.*}"
|
||||
distributionUrlNameMain="${distributionUrlNameMain%-bin}"
|
||||
MAVEN_USER_HOME="${MAVEN_USER_HOME:-${HOME}/.m2}"
|
||||
MAVEN_HOME="${MAVEN_USER_HOME}/wrapper/dists/${distributionUrlNameMain-}/$(hash_string "$distributionUrl")"
|
||||
|
||||
exec_maven() {
|
||||
unset MVNW_VERBOSE MVNW_USERNAME MVNW_PASSWORD MVNW_REPOURL || :
|
||||
exec "$MAVEN_HOME/bin/$MVN_CMD" "$@" || die "cannot exec $MAVEN_HOME/bin/$MVN_CMD"
|
||||
}
|
||||
|
||||
if [ -d "$MAVEN_HOME" ]; then
|
||||
verbose "found existing MAVEN_HOME at $MAVEN_HOME"
|
||||
exec_maven "$@"
|
||||
fi
|
||||
|
||||
case "${distributionUrl-}" in
|
||||
*?-bin.zip | *?maven-mvnd-?*-?*.zip) ;;
|
||||
*) die "distributionUrl is not valid, must match *-bin.zip or maven-mvnd-*.zip, but found '${distributionUrl-}'" ;;
|
||||
esac
|
||||
|
||||
# prepare tmp dir
|
||||
if TMP_DOWNLOAD_DIR="$(mktemp -d)" && [ -d "$TMP_DOWNLOAD_DIR" ]; then
|
||||
clean() { rm -rf -- "$TMP_DOWNLOAD_DIR"; }
|
||||
trap clean HUP INT TERM EXIT
|
||||
else
|
||||
die "cannot create temp dir"
|
||||
fi
|
||||
|
||||
mkdir -p -- "${MAVEN_HOME%/*}"
|
||||
|
||||
# Download and Install Apache Maven
|
||||
verbose "Couldn't find MAVEN_HOME, downloading and installing it ..."
|
||||
verbose "Downloading from: $distributionUrl"
|
||||
verbose "Downloading to: $TMP_DOWNLOAD_DIR/$distributionUrlName"
|
||||
|
||||
# select .zip or .tar.gz
|
||||
if ! command -v unzip >/dev/null; then
|
||||
distributionUrl="${distributionUrl%.zip}.tar.gz"
|
||||
distributionUrlName="${distributionUrl##*/}"
|
||||
fi
|
||||
|
||||
# verbose opt
|
||||
__MVNW_QUIET_WGET=--quiet __MVNW_QUIET_CURL=--silent __MVNW_QUIET_UNZIP=-q __MVNW_QUIET_TAR=''
|
||||
[ "${MVNW_VERBOSE-}" != true ] || __MVNW_QUIET_WGET='' __MVNW_QUIET_CURL='' __MVNW_QUIET_UNZIP='' __MVNW_QUIET_TAR=v
|
||||
|
||||
# normalize http auth
|
||||
case "${MVNW_PASSWORD:+has-password}" in
|
||||
'') MVNW_USERNAME='' MVNW_PASSWORD='' ;;
|
||||
has-password) [ -n "${MVNW_USERNAME-}" ] || MVNW_USERNAME='' MVNW_PASSWORD='' ;;
|
||||
esac
|
||||
|
||||
if [ -z "${MVNW_USERNAME-}" ] && command -v wget >/dev/null; then
|
||||
verbose "Found wget ... using wget"
|
||||
wget ${__MVNW_QUIET_WGET:+"$__MVNW_QUIET_WGET"} "$distributionUrl" -O "$TMP_DOWNLOAD_DIR/$distributionUrlName" || die "wget: Failed to fetch $distributionUrl"
|
||||
elif [ -z "${MVNW_USERNAME-}" ] && command -v curl >/dev/null; then
|
||||
verbose "Found curl ... using curl"
|
||||
curl ${__MVNW_QUIET_CURL:+"$__MVNW_QUIET_CURL"} -f -L -o "$TMP_DOWNLOAD_DIR/$distributionUrlName" "$distributionUrl" || die "curl: Failed to fetch $distributionUrl"
|
||||
elif set_java_home; then
|
||||
verbose "Falling back to use Java to download"
|
||||
javaSource="$TMP_DOWNLOAD_DIR/Downloader.java"
|
||||
targetZip="$TMP_DOWNLOAD_DIR/$distributionUrlName"
|
||||
cat >"$javaSource" <<-END
|
||||
public class Downloader extends java.net.Authenticator
|
||||
{
|
||||
protected java.net.PasswordAuthentication getPasswordAuthentication()
|
||||
{
|
||||
return new java.net.PasswordAuthentication( System.getenv( "MVNW_USERNAME" ), System.getenv( "MVNW_PASSWORD" ).toCharArray() );
|
||||
}
|
||||
public static void main( String[] args ) throws Exception
|
||||
{
|
||||
setDefault( new Downloader() );
|
||||
java.nio.file.Files.copy( java.net.URI.create( args[0] ).toURL().openStream(), java.nio.file.Paths.get( args[1] ).toAbsolutePath().normalize() );
|
||||
}
|
||||
}
|
||||
END
|
||||
# For Cygwin/MinGW, switch paths to Windows format before running javac and java
|
||||
verbose " - Compiling Downloader.java ..."
|
||||
"$(native_path "$JAVACCMD")" "$(native_path "$javaSource")" || die "Failed to compile Downloader.java"
|
||||
verbose " - Running Downloader.java ..."
|
||||
"$(native_path "$JAVACMD")" -cp "$(native_path "$TMP_DOWNLOAD_DIR")" Downloader "$distributionUrl" "$(native_path "$targetZip")"
|
||||
fi
|
||||
|
||||
# If specified, validate the SHA-256 sum of the Maven distribution zip file
|
||||
if [ -n "${distributionSha256Sum-}" ]; then
|
||||
distributionSha256Result=false
|
||||
if [ "$MVN_CMD" = mvnd.sh ]; then
|
||||
echo "Checksum validation is not supported for maven-mvnd." >&2
|
||||
echo "Please disable validation by removing 'distributionSha256Sum' from your maven-wrapper.properties." >&2
|
||||
exit 1
|
||||
elif command -v sha256sum >/dev/null; then
|
||||
if echo "$distributionSha256Sum $TMP_DOWNLOAD_DIR/$distributionUrlName" | sha256sum -c >/dev/null 2>&1; then
|
||||
distributionSha256Result=true
|
||||
fi
|
||||
elif command -v shasum >/dev/null; then
|
||||
if echo "$distributionSha256Sum $TMP_DOWNLOAD_DIR/$distributionUrlName" | shasum -a 256 -c >/dev/null 2>&1; then
|
||||
distributionSha256Result=true
|
||||
fi
|
||||
else
|
||||
echo "Checksum validation was requested but neither 'sha256sum' or 'shasum' are available." >&2
|
||||
echo "Please install either command, or disable validation by removing 'distributionSha256Sum' from your maven-wrapper.properties." >&2
|
||||
exit 1
|
||||
fi
|
||||
if [ $distributionSha256Result = false ]; then
|
||||
echo "Error: Failed to validate Maven distribution SHA-256, your Maven distribution might be compromised." >&2
|
||||
echo "If you updated your Maven version, you need to update the specified distributionSha256Sum property." >&2
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# unzip and move
|
||||
if command -v unzip >/dev/null; then
|
||||
unzip ${__MVNW_QUIET_UNZIP:+"$__MVNW_QUIET_UNZIP"} "$TMP_DOWNLOAD_DIR/$distributionUrlName" -d "$TMP_DOWNLOAD_DIR" || die "failed to unzip"
|
||||
else
|
||||
tar xzf${__MVNW_QUIET_TAR:+"$__MVNW_QUIET_TAR"} "$TMP_DOWNLOAD_DIR/$distributionUrlName" -C "$TMP_DOWNLOAD_DIR" || die "failed to untar"
|
||||
fi
|
||||
printf %s\\n "$distributionUrl" >"$TMP_DOWNLOAD_DIR/$distributionUrlNameMain/mvnw.url"
|
||||
mv -- "$TMP_DOWNLOAD_DIR/$distributionUrlNameMain" "$MAVEN_HOME" || [ -d "$MAVEN_HOME" ] || die "fail to move MAVEN_HOME"
|
||||
|
||||
clean || :
|
||||
exec_maven "$@"
|
||||
14
java/pom.xml
14
java/pom.xml
@@ -6,11 +6,10 @@
|
||||
|
||||
<groupId>com.lancedb</groupId>
|
||||
<artifactId>lancedb-parent</artifactId>
|
||||
<version>0.19.1-beta.0</version>
|
||||
<version>0.21.2-beta.1</version>
|
||||
<packaging>pom</packaging>
|
||||
|
||||
<name>LanceDB Parent</name>
|
||||
<description>LanceDB vector database Java API</description>
|
||||
<name>${project.artifactId}</name>
|
||||
<description>LanceDB Java SDK Parent POM</description>
|
||||
<url>http://lancedb.com/</url>
|
||||
|
||||
<developers>
|
||||
@@ -29,6 +28,7 @@
|
||||
<properties>
|
||||
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
|
||||
<arrow.version>15.0.0</arrow.version>
|
||||
<lance-namespace.verison>0.0.1</lance-namespace.verison>
|
||||
<spotless.skip>false</spotless.skip>
|
||||
<spotless.version>2.30.0</spotless.version>
|
||||
<spotless.java.googlejavaformat.version>1.7</spotless.java.googlejavaformat.version>
|
||||
@@ -52,6 +52,7 @@
|
||||
|
||||
<modules>
|
||||
<module>core</module>
|
||||
<module>lance-namespace</module>
|
||||
</modules>
|
||||
|
||||
<scm>
|
||||
@@ -62,6 +63,11 @@
|
||||
|
||||
<dependencyManagement>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>com.lancedb</groupId>
|
||||
<artifactId>lance-namespace-core</artifactId>
|
||||
<version>${lance-namespace.verison}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.arrow</groupId>
|
||||
<artifactId>arrow-vector</artifactId>
|
||||
|
||||
49
node/package-lock.json
generated
49
node/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "vectordb",
|
||||
"version": "0.19.1-beta.0",
|
||||
"version": "0.21.2-beta.1",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "vectordb",
|
||||
"version": "0.19.1-beta.0",
|
||||
"version": "0.21.2-beta.1",
|
||||
"cpu": [
|
||||
"x64",
|
||||
"arm64"
|
||||
@@ -52,11 +52,11 @@
|
||||
"uuid": "^9.0.0"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"@lancedb/vectordb-darwin-arm64": "0.19.1-beta.0",
|
||||
"@lancedb/vectordb-darwin-x64": "0.19.1-beta.0",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.19.1-beta.0",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.19.1-beta.0",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.19.1-beta.0"
|
||||
"@lancedb/vectordb-darwin-arm64": "0.21.2-beta.1",
|
||||
"@lancedb/vectordb-darwin-x64": "0.21.2-beta.1",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.21.2-beta.1",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.21.2-beta.1",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.21.2-beta.1"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@apache-arrow/ts": "^14.0.2",
|
||||
@@ -327,65 +327,60 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-darwin-arm64": {
|
||||
"version": "0.19.1-beta.0",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-arm64/-/vectordb-darwin-arm64-0.19.1-beta.0.tgz",
|
||||
"integrity": "sha512-LXdyFLniwciIslw487JRmYB+PpuwBhq4jT9hNxKKMrnXfBvUEQ8HN9kxkLFy+pwFaY1wmFH0EfujnXlt8qXgqA==",
|
||||
"version": "0.21.2-beta.1",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-arm64/-/vectordb-darwin-arm64-0.21.2-beta.1.tgz",
|
||||
"integrity": "sha512-7QXVJNTei7PMuXRyyc+F3WGiudRNq9HfeOaMmMOJJpuCAO0zLq1pM9DCl5aPF5MddrodPHJxi+IWV+iAFH7zcg==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
"license": "Apache-2.0",
|
||||
"optional": true,
|
||||
"os": [
|
||||
"darwin"
|
||||
]
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-darwin-x64": {
|
||||
"version": "0.19.1-beta.0",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-x64/-/vectordb-darwin-x64-0.19.1-beta.0.tgz",
|
||||
"integrity": "sha512-kVZzc6j778vg3AftFdu8pSg/3J9YHCID70bf3QpMnl43b9ZSl4Sh8WNVYWiqfwOAElD+kOaTq9MC4f7jkW/DEQ==",
|
||||
"version": "0.21.2-beta.1",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-x64/-/vectordb-darwin-x64-0.21.2-beta.1.tgz",
|
||||
"integrity": "sha512-M/TWcJ3WVc6DNFgG/lWI7L5tQ05IF3WoWuZfRfbbimGhRvY7xf1O3uOt+jMcNJCa5mHFGCg2SZDA8mebd/mL7g==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
"license": "Apache-2.0",
|
||||
"optional": true,
|
||||
"os": [
|
||||
"darwin"
|
||||
]
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-linux-arm64-gnu": {
|
||||
"version": "0.19.1-beta.0",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-arm64-gnu/-/vectordb-linux-arm64-gnu-0.19.1-beta.0.tgz",
|
||||
"integrity": "sha512-EOrAwqUkCXBp7GVwzhyUoFIUbV+7eXDbEHo6mHgZN6E1SOAgBuPiwRqDlqA4uCYU8YhZJZabXdiLTP+EuSjpgw==",
|
||||
"version": "0.21.2-beta.1",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-arm64-gnu/-/vectordb-linux-arm64-gnu-0.21.2-beta.1.tgz",
|
||||
"integrity": "sha512-OEsM9znf9DDmdwGuTg2EVu+ebwuWQ1lCx0cYy4+hNy3ntolwMC39ePg2H9WD9SsEnQ2vcGJgBJTQLPKgXww+iQ==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
"license": "Apache-2.0",
|
||||
"optional": true,
|
||||
"os": [
|
||||
"linux"
|
||||
]
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-linux-x64-gnu": {
|
||||
"version": "0.19.1-beta.0",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-x64-gnu/-/vectordb-linux-x64-gnu-0.19.1-beta.0.tgz",
|
||||
"integrity": "sha512-Xs7wlM6LJS4/jps9Vi1TMtR9kYedzShZQ1sepJzn2cuI1jnbCrlLsFSMUfjhxy7/0+E8QO1f7cBO6gwOTunrog==",
|
||||
"version": "0.21.2-beta.1",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-x64-gnu/-/vectordb-linux-x64-gnu-0.21.2-beta.1.tgz",
|
||||
"integrity": "sha512-7FTq/O1zNzD71rgX2PEVmkct4jk2wc+ADU3rss+0VqoBSO9XeMqZEVD2WgZWuSTg6bYai//FHGDHSaknHBNsdw==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
"license": "Apache-2.0",
|
||||
"optional": true,
|
||||
"os": [
|
||||
"linux"
|
||||
]
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-win32-x64-msvc": {
|
||||
"version": "0.19.1-beta.0",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-win32-x64-msvc/-/vectordb-win32-x64-msvc-0.19.1-beta.0.tgz",
|
||||
"integrity": "sha512-tS9g2S4hxdsQfVd6S6N+HuX1Mt4iJEQjpHmmfz+WEfdc5JP6eyKoqqQzFw2oL3TJAG7HXfD88/24nCUUCHDKoA==",
|
||||
"version": "0.21.2-beta.1",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-win32-x64-msvc/-/vectordb-win32-x64-msvc-0.21.2-beta.1.tgz",
|
||||
"integrity": "sha512-mN1p/J0kdqy6MrlKtmA8set/PibqFPyytQJFAuxSLXC/rwD7vgqUCt0SI0zVWPGG7J5Y65kvdc99l7Yl7lJtwQ==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
"license": "Apache-2.0",
|
||||
"optional": true,
|
||||
"os": [
|
||||
"win32"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "vectordb",
|
||||
"version": "0.19.1-beta.0",
|
||||
"version": "0.21.2-beta.1",
|
||||
"description": " Serverless, low-latency vector database for AI applications",
|
||||
"private": false,
|
||||
"main": "dist/index.js",
|
||||
@@ -89,10 +89,10 @@
|
||||
}
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"@lancedb/vectordb-darwin-x64": "0.19.1-beta.0",
|
||||
"@lancedb/vectordb-darwin-arm64": "0.19.1-beta.0",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.19.1-beta.0",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.19.1-beta.0",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.19.1-beta.0"
|
||||
"@lancedb/vectordb-darwin-x64": "0.21.2-beta.1",
|
||||
"@lancedb/vectordb-darwin-arm64": "0.21.2-beta.1",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.21.2-beta.1",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.21.2-beta.1",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.21.2-beta.1"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -49,7 +49,7 @@ describe('LanceDB Mirrored Store Integration test', function () {
|
||||
it('s3://...?mirroredStore=... param is processed correctly', async function () {
|
||||
this.timeout(600000)
|
||||
|
||||
const dir = tmpdir()
|
||||
const dir = await fs.promises.mkdtemp(path.join(tmpdir(), 'lancedb-mirror-'))
|
||||
console.log(dir)
|
||||
const conn = await lancedb.connect({ uri: `s3://lancedb-integtest?mirroredStore=${dir}`, storageOptions: { allowHttp: 'true' } })
|
||||
const data = Array(200).fill({ vector: Array(128).fill(1.0), id: 0 })
|
||||
@@ -63,118 +63,93 @@ describe('LanceDB Mirrored Store Integration test', function () {
|
||||
const t = await conn.createTable(tableName, data, { writeMode: lancedb.WriteMode.Overwrite })
|
||||
|
||||
const mirroredPath = path.join(dir, `${tableName}.lance`)
|
||||
fs.readdir(mirroredPath, { withFileTypes: true }, (err, files) => {
|
||||
if (err != null) throw err
|
||||
// there should be three dirs
|
||||
assert.equal(files.length, 3)
|
||||
assert.isTrue(files[0].isDirectory())
|
||||
assert.isTrue(files[1].isDirectory())
|
||||
|
||||
fs.readdir(path.join(mirroredPath, '_transactions'), { withFileTypes: true }, (err, files) => {
|
||||
if (err != null) throw err
|
||||
assert.equal(files.length, 1)
|
||||
assert.isTrue(files[0].name.endsWith('.txn'))
|
||||
})
|
||||
const files = await fs.promises.readdir(mirroredPath, { withFileTypes: true })
|
||||
// there should be three dirs
|
||||
assert.equal(files.length, 3, 'files after table creation')
|
||||
assert.isTrue(files[0].isDirectory())
|
||||
assert.isTrue(files[1].isDirectory())
|
||||
|
||||
fs.readdir(path.join(mirroredPath, '_versions'), { withFileTypes: true }, (err, files) => {
|
||||
if (err != null) throw err
|
||||
assert.equal(files.length, 1)
|
||||
assert.isTrue(files[0].name.endsWith('.manifest'))
|
||||
})
|
||||
const transactionFiles = await fs.promises.readdir(path.join(mirroredPath, '_transactions'), { withFileTypes: true })
|
||||
assert.equal(transactionFiles.length, 1, 'transactionFiles after table creation')
|
||||
assert.isTrue(transactionFiles[0].name.endsWith('.txn'))
|
||||
|
||||
fs.readdir(path.join(mirroredPath, 'data'), { withFileTypes: true }, (err, files) => {
|
||||
if (err != null) throw err
|
||||
assert.equal(files.length, 1)
|
||||
assert.isTrue(files[0].name.endsWith('.lance'))
|
||||
})
|
||||
})
|
||||
const versionFiles = await fs.promises.readdir(path.join(mirroredPath, '_versions'), { withFileTypes: true })
|
||||
assert.equal(versionFiles.length, 1, 'versionFiles after table creation')
|
||||
assert.isTrue(versionFiles[0].name.endsWith('.manifest'))
|
||||
|
||||
const dataFiles = await fs.promises.readdir(path.join(mirroredPath, 'data'), { withFileTypes: true })
|
||||
assert.equal(dataFiles.length, 1, 'dataFiles after table creation')
|
||||
assert.isTrue(dataFiles[0].name.endsWith('.lance'))
|
||||
|
||||
// try create index and check if it's mirrored
|
||||
await t.createIndex({ column: 'vector', type: 'ivf_pq' })
|
||||
|
||||
fs.readdir(mirroredPath, { withFileTypes: true }, (err, files) => {
|
||||
if (err != null) throw err
|
||||
// there should be four dirs
|
||||
assert.equal(files.length, 4)
|
||||
assert.isTrue(files[0].isDirectory())
|
||||
assert.isTrue(files[1].isDirectory())
|
||||
assert.isTrue(files[2].isDirectory())
|
||||
const filesAfterIndex = await fs.promises.readdir(mirroredPath, { withFileTypes: true })
|
||||
// there should be four dirs
|
||||
assert.equal(filesAfterIndex.length, 4, 'filesAfterIndex')
|
||||
assert.isTrue(filesAfterIndex[0].isDirectory())
|
||||
assert.isTrue(filesAfterIndex[1].isDirectory())
|
||||
assert.isTrue(filesAfterIndex[2].isDirectory())
|
||||
|
||||
// Two TXs now
|
||||
fs.readdir(path.join(mirroredPath, '_transactions'), { withFileTypes: true }, (err, files) => {
|
||||
if (err != null) throw err
|
||||
assert.equal(files.length, 2)
|
||||
assert.isTrue(files[0].name.endsWith('.txn'))
|
||||
assert.isTrue(files[1].name.endsWith('.txn'))
|
||||
})
|
||||
// Two TXs now
|
||||
const transactionFilesAfterIndex = await fs.promises.readdir(path.join(mirroredPath, '_transactions'), { withFileTypes: true })
|
||||
assert.equal(transactionFilesAfterIndex.length, 2, 'transactionFilesAfterIndex')
|
||||
assert.isTrue(transactionFilesAfterIndex[0].name.endsWith('.txn'))
|
||||
assert.isTrue(transactionFilesAfterIndex[1].name.endsWith('.txn'))
|
||||
|
||||
fs.readdir(path.join(mirroredPath, 'data'), { withFileTypes: true }, (err, files) => {
|
||||
if (err != null) throw err
|
||||
assert.equal(files.length, 1)
|
||||
assert.isTrue(files[0].name.endsWith('.lance'))
|
||||
})
|
||||
const dataFilesAfterIndex = await fs.promises.readdir(path.join(mirroredPath, 'data'), { withFileTypes: true })
|
||||
assert.equal(dataFilesAfterIndex.length, 1, 'dataFilesAfterIndex')
|
||||
assert.isTrue(dataFilesAfterIndex[0].name.endsWith('.lance'))
|
||||
|
||||
fs.readdir(path.join(mirroredPath, '_indices'), { withFileTypes: true }, (err, files) => {
|
||||
if (err != null) throw err
|
||||
assert.equal(files.length, 1)
|
||||
assert.isTrue(files[0].isDirectory())
|
||||
const indicesFiles = await fs.promises.readdir(path.join(mirroredPath, '_indices'), { withFileTypes: true })
|
||||
assert.equal(indicesFiles.length, 1, 'indicesFiles')
|
||||
assert.isTrue(indicesFiles[0].isDirectory())
|
||||
|
||||
fs.readdir(path.join(mirroredPath, '_indices', files[0].name), { withFileTypes: true }, (err, files) => {
|
||||
if (err != null) throw err
|
||||
|
||||
assert.equal(files.length, 1)
|
||||
assert.isTrue(files[0].isFile())
|
||||
assert.isTrue(files[0].name.endsWith('.idx'))
|
||||
})
|
||||
})
|
||||
})
|
||||
const indexFiles = await fs.promises.readdir(path.join(mirroredPath, '_indices', indicesFiles[0].name), { withFileTypes: true })
|
||||
console.log(`DEBUG indexFiles in ${indicesFiles[0].name}:`, indexFiles.map(f => `${f.name} (${f.isFile() ? 'file' : 'dir'})`))
|
||||
assert.equal(indexFiles.length, 2, 'indexFiles')
|
||||
const fileNames = indexFiles.map(f => f.name).sort()
|
||||
assert.isTrue(fileNames.includes('auxiliary.idx'), 'auxiliary.idx should be present')
|
||||
assert.isTrue(fileNames.includes('index.idx'), 'index.idx should be present')
|
||||
assert.isTrue(indexFiles.every(f => f.isFile()), 'all index files should be files')
|
||||
|
||||
// try delete and check if it's mirrored
|
||||
await t.delete('id = 0')
|
||||
|
||||
fs.readdir(mirroredPath, { withFileTypes: true }, (err, files) => {
|
||||
if (err != null) throw err
|
||||
// there should be five dirs
|
||||
assert.equal(files.length, 5)
|
||||
assert.isTrue(files[0].isDirectory())
|
||||
assert.isTrue(files[1].isDirectory())
|
||||
assert.isTrue(files[2].isDirectory())
|
||||
assert.isTrue(files[3].isDirectory())
|
||||
assert.isTrue(files[4].isDirectory())
|
||||
const filesAfterDelete = await fs.promises.readdir(mirroredPath, { withFileTypes: true })
|
||||
// there should be five dirs
|
||||
assert.equal(filesAfterDelete.length, 5, 'filesAfterDelete')
|
||||
assert.isTrue(filesAfterDelete[0].isDirectory())
|
||||
assert.isTrue(filesAfterDelete[1].isDirectory())
|
||||
assert.isTrue(filesAfterDelete[2].isDirectory())
|
||||
assert.isTrue(filesAfterDelete[3].isDirectory())
|
||||
assert.isTrue(filesAfterDelete[4].isDirectory())
|
||||
|
||||
// Three TXs now
|
||||
fs.readdir(path.join(mirroredPath, '_transactions'), { withFileTypes: true }, (err, files) => {
|
||||
if (err != null) throw err
|
||||
assert.equal(files.length, 3)
|
||||
assert.isTrue(files[0].name.endsWith('.txn'))
|
||||
assert.isTrue(files[1].name.endsWith('.txn'))
|
||||
})
|
||||
// Three TXs now
|
||||
const transactionFilesAfterDelete = await fs.promises.readdir(path.join(mirroredPath, '_transactions'), { withFileTypes: true })
|
||||
assert.equal(transactionFilesAfterDelete.length, 3, 'transactionFilesAfterDelete')
|
||||
assert.isTrue(transactionFilesAfterDelete[0].name.endsWith('.txn'))
|
||||
assert.isTrue(transactionFilesAfterDelete[1].name.endsWith('.txn'))
|
||||
|
||||
fs.readdir(path.join(mirroredPath, 'data'), { withFileTypes: true }, (err, files) => {
|
||||
if (err != null) throw err
|
||||
assert.equal(files.length, 1)
|
||||
assert.isTrue(files[0].name.endsWith('.lance'))
|
||||
})
|
||||
const dataFilesAfterDelete = await fs.promises.readdir(path.join(mirroredPath, 'data'), { withFileTypes: true })
|
||||
assert.equal(dataFilesAfterDelete.length, 1, 'dataFilesAfterDelete')
|
||||
assert.isTrue(dataFilesAfterDelete[0].name.endsWith('.lance'))
|
||||
|
||||
fs.readdir(path.join(mirroredPath, '_indices'), { withFileTypes: true }, (err, files) => {
|
||||
if (err != null) throw err
|
||||
assert.equal(files.length, 1)
|
||||
assert.isTrue(files[0].isDirectory())
|
||||
const indicesFilesAfterDelete = await fs.promises.readdir(path.join(mirroredPath, '_indices'), { withFileTypes: true })
|
||||
assert.equal(indicesFilesAfterDelete.length, 1, 'indicesFilesAfterDelete')
|
||||
assert.isTrue(indicesFilesAfterDelete[0].isDirectory())
|
||||
|
||||
fs.readdir(path.join(mirroredPath, '_indices', files[0].name), { withFileTypes: true }, (err, files) => {
|
||||
if (err != null) throw err
|
||||
const indexFilesAfterDelete = await fs.promises.readdir(path.join(mirroredPath, '_indices', indicesFilesAfterDelete[0].name), { withFileTypes: true })
|
||||
console.log(`DEBUG indexFilesAfterDelete in ${indicesFilesAfterDelete[0].name}:`, indexFilesAfterDelete.map(f => `${f.name} (${f.isFile() ? 'file' : 'dir'})`))
|
||||
assert.equal(indexFilesAfterDelete.length, 2, 'indexFilesAfterDelete')
|
||||
const fileNamesAfterDelete = indexFilesAfterDelete.map(f => f.name).sort()
|
||||
assert.isTrue(fileNamesAfterDelete.includes('auxiliary.idx'), 'auxiliary.idx should be present after delete')
|
||||
assert.isTrue(fileNamesAfterDelete.includes('index.idx'), 'index.idx should be present after delete')
|
||||
assert.isTrue(indexFilesAfterDelete.every(f => f.isFile()), 'all index files should be files after delete')
|
||||
|
||||
assert.equal(files.length, 1)
|
||||
assert.isTrue(files[0].isFile())
|
||||
assert.isTrue(files[0].name.endsWith('.idx'))
|
||||
})
|
||||
})
|
||||
|
||||
fs.readdir(path.join(mirroredPath, '_deletions'), { withFileTypes: true }, (err, files) => {
|
||||
if (err != null) throw err
|
||||
assert.equal(files.length, 1)
|
||||
assert.isTrue(files[0].name.endsWith('.arrow'))
|
||||
})
|
||||
})
|
||||
const deletionFiles = await fs.promises.readdir(path.join(mirroredPath, '_deletions'), { withFileTypes: true })
|
||||
assert.equal(deletionFiles.length, 1, 'deletionFiles')
|
||||
assert.isTrue(deletionFiles[0].name.endsWith('.arrow'))
|
||||
})
|
||||
})
|
||||
|
||||
13
nodejs/CLAUDE.md
Normal file
13
nodejs/CLAUDE.md
Normal file
@@ -0,0 +1,13 @@
|
||||
These are the typescript bindings of LanceDB.
|
||||
The core Rust library is in the `../rust/lancedb` directory, the rust binding
|
||||
code is in the `src/` directory and the typescript bindings are in
|
||||
the `lancedb/` directory.
|
||||
|
||||
Whenever you change the Rust code, you will need to recompile: `npm run build`.
|
||||
|
||||
Common commands:
|
||||
* Build: `npm run build`
|
||||
* Lint: `npm run lint`
|
||||
* Fix lints: `npm run lint-fix`
|
||||
* Test: `npm test`
|
||||
* Run single test file: `npm test __test__/arrow.test.ts`
|
||||
@@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "lancedb-nodejs"
|
||||
edition.workspace = true
|
||||
version = "0.19.1-beta.0"
|
||||
version = "0.21.2-beta.1"
|
||||
license.workspace = true
|
||||
description.workspace = true
|
||||
repository.workspace = true
|
||||
@@ -30,6 +30,7 @@ log.workspace = true
|
||||
|
||||
# Workaround for build failure until we can fix it.
|
||||
aws-lc-sys = "=0.28.0"
|
||||
aws-lc-rs = "=1.13.0"
|
||||
|
||||
[build-dependencies]
|
||||
napi-build = "2.1"
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
import { Schema } from "apache-arrow";
|
||||
import { Bool, Field, Int32, List, Schema, Struct, Utf8 } from "apache-arrow";
|
||||
|
||||
import * as arrow15 from "apache-arrow-15";
|
||||
import * as arrow16 from "apache-arrow-16";
|
||||
@@ -11,10 +11,12 @@ import * as arrow18 from "apache-arrow-18";
|
||||
import {
|
||||
convertToTable,
|
||||
fromBufferToRecordBatch,
|
||||
fromDataToBuffer,
|
||||
fromRecordBatchToBuffer,
|
||||
fromTableToBuffer,
|
||||
makeArrowTable,
|
||||
makeEmptyTable,
|
||||
tableFromIPC,
|
||||
} from "../lancedb/arrow";
|
||||
import {
|
||||
EmbeddingFunction,
|
||||
@@ -374,6 +376,276 @@ describe.each([arrow15, arrow16, arrow17, arrow18])(
|
||||
expect(table2.numRows).toBe(4);
|
||||
expect(table2.schema).toEqual(schema);
|
||||
});
|
||||
|
||||
it("will handle missing columns in schema alignment when using embeddings", async function () {
|
||||
const schema = new Schema(
|
||||
[
|
||||
new Field("domain", new Utf8(), true),
|
||||
new Field("name", new Utf8(), true),
|
||||
new Field("description", new Utf8(), true),
|
||||
],
|
||||
new Map([["embedding_functions", JSON.stringify([])]]),
|
||||
);
|
||||
|
||||
const data = [
|
||||
{ domain: "google.com", name: "Google" },
|
||||
{ domain: "facebook.com", name: "Facebook" },
|
||||
];
|
||||
|
||||
const table = await convertToTable(data, undefined, { schema });
|
||||
|
||||
expect(table.numCols).toBe(3);
|
||||
expect(table.numRows).toBe(2);
|
||||
|
||||
const descriptionColumn = table.getChild("description");
|
||||
expect(descriptionColumn).toBeDefined();
|
||||
expect(descriptionColumn?.nullCount).toBe(2);
|
||||
expect(descriptionColumn?.toArray()).toEqual([null, null]);
|
||||
|
||||
expect(table.getChild("domain")?.toArray()).toEqual([
|
||||
"google.com",
|
||||
"facebook.com",
|
||||
]);
|
||||
expect(table.getChild("name")?.toArray()).toEqual([
|
||||
"Google",
|
||||
"Facebook",
|
||||
]);
|
||||
});
|
||||
|
||||
it("will handle completely missing nested struct columns", async function () {
|
||||
const schema = new Schema(
|
||||
[
|
||||
new Field("id", new Utf8(), true),
|
||||
new Field("name", new Utf8(), true),
|
||||
new Field(
|
||||
"metadata",
|
||||
new Struct([
|
||||
new Field("version", new Int32(), true),
|
||||
new Field("author", new Utf8(), true),
|
||||
new Field(
|
||||
"tags",
|
||||
new List(new Field("item", new Utf8(), true)),
|
||||
true,
|
||||
),
|
||||
]),
|
||||
true,
|
||||
),
|
||||
],
|
||||
new Map([["embedding_functions", JSON.stringify([])]]),
|
||||
);
|
||||
|
||||
const data = [
|
||||
{ id: "doc1", name: "Document 1" },
|
||||
{ id: "doc2", name: "Document 2" },
|
||||
];
|
||||
|
||||
const table = await convertToTable(data, undefined, { schema });
|
||||
|
||||
expect(table.numCols).toBe(3);
|
||||
expect(table.numRows).toBe(2);
|
||||
|
||||
const buf = await fromTableToBuffer(table);
|
||||
const retrievedTable = tableFromIPC(buf);
|
||||
|
||||
const rows = [];
|
||||
for (let i = 0; i < retrievedTable.numRows; i++) {
|
||||
rows.push(retrievedTable.get(i));
|
||||
}
|
||||
|
||||
expect(rows[0].metadata.version).toBe(null);
|
||||
expect(rows[0].metadata.author).toBe(null);
|
||||
expect(rows[0].metadata.tags).toBe(null);
|
||||
expect(rows[0].id).toBe("doc1");
|
||||
expect(rows[0].name).toBe("Document 1");
|
||||
});
|
||||
|
||||
it("will handle partially missing nested struct fields", async function () {
|
||||
const schema = new Schema(
|
||||
[
|
||||
new Field("id", new Utf8(), true),
|
||||
new Field(
|
||||
"metadata",
|
||||
new Struct([
|
||||
new Field("version", new Int32(), true),
|
||||
new Field("author", new Utf8(), true),
|
||||
new Field("created_at", new Utf8(), true),
|
||||
]),
|
||||
true,
|
||||
),
|
||||
],
|
||||
new Map([["embedding_functions", JSON.stringify([])]]),
|
||||
);
|
||||
|
||||
const data = [
|
||||
{ id: "doc1", metadata: { version: 1, author: "Alice" } },
|
||||
{ id: "doc2", metadata: { version: 2 } },
|
||||
];
|
||||
|
||||
const table = await convertToTable(data, undefined, { schema });
|
||||
|
||||
expect(table.numCols).toBe(2);
|
||||
expect(table.numRows).toBe(2);
|
||||
|
||||
const metadataColumn = table.getChild("metadata");
|
||||
expect(metadataColumn).toBeDefined();
|
||||
expect(metadataColumn?.type.toString()).toBe(
|
||||
"Struct<{version:Int32, author:Utf8, created_at:Utf8}>",
|
||||
);
|
||||
});
|
||||
|
||||
it("will handle multiple levels of nested structures", async function () {
|
||||
const schema = new Schema(
|
||||
[
|
||||
new Field("id", new Utf8(), true),
|
||||
new Field(
|
||||
"config",
|
||||
new Struct([
|
||||
new Field("database", new Utf8(), true),
|
||||
new Field(
|
||||
"connection",
|
||||
new Struct([
|
||||
new Field("host", new Utf8(), true),
|
||||
new Field("port", new Int32(), true),
|
||||
new Field(
|
||||
"ssl",
|
||||
new Struct([
|
||||
new Field("enabled", new Bool(), true),
|
||||
new Field("cert_path", new Utf8(), true),
|
||||
]),
|
||||
true,
|
||||
),
|
||||
]),
|
||||
true,
|
||||
),
|
||||
]),
|
||||
true,
|
||||
),
|
||||
],
|
||||
new Map([["embedding_functions", JSON.stringify([])]]),
|
||||
);
|
||||
|
||||
const data = [
|
||||
{
|
||||
id: "config1",
|
||||
config: {
|
||||
database: "postgres",
|
||||
connection: { host: "localhost" },
|
||||
},
|
||||
},
|
||||
{
|
||||
id: "config2",
|
||||
config: { database: "mysql" },
|
||||
},
|
||||
{
|
||||
id: "config3",
|
||||
},
|
||||
];
|
||||
|
||||
const table = await convertToTable(data, undefined, { schema });
|
||||
|
||||
expect(table.numCols).toBe(2);
|
||||
expect(table.numRows).toBe(3);
|
||||
|
||||
const configColumn = table.getChild("config");
|
||||
expect(configColumn).toBeDefined();
|
||||
expect(configColumn?.type.toString()).toBe(
|
||||
"Struct<{database:Utf8, connection:Struct<{host:Utf8, port:Int32, ssl:Struct<{enabled:Bool, cert_path:Utf8}>}>}>",
|
||||
);
|
||||
});
|
||||
|
||||
it("will handle missing columns in Arrow table input when using embeddings", async function () {
|
||||
const incompleteTable = makeArrowTable([
|
||||
{ domain: "google.com", name: "Google" },
|
||||
{ domain: "facebook.com", name: "Facebook" },
|
||||
]);
|
||||
|
||||
const schema = new Schema(
|
||||
[
|
||||
new Field("domain", new Utf8(), true),
|
||||
new Field("name", new Utf8(), true),
|
||||
new Field("description", new Utf8(), true),
|
||||
],
|
||||
new Map([["embedding_functions", JSON.stringify([])]]),
|
||||
);
|
||||
|
||||
const buf = await fromDataToBuffer(incompleteTable, undefined, schema);
|
||||
|
||||
expect(buf.byteLength).toBeGreaterThan(0);
|
||||
|
||||
const retrievedTable = tableFromIPC(buf);
|
||||
expect(retrievedTable.numCols).toBe(3);
|
||||
expect(retrievedTable.numRows).toBe(2);
|
||||
|
||||
const descriptionColumn = retrievedTable.getChild("description");
|
||||
expect(descriptionColumn).toBeDefined();
|
||||
expect(descriptionColumn?.nullCount).toBe(2);
|
||||
expect(descriptionColumn?.toArray()).toEqual([null, null]);
|
||||
|
||||
expect(retrievedTable.getChild("domain")?.toArray()).toEqual([
|
||||
"google.com",
|
||||
"facebook.com",
|
||||
]);
|
||||
expect(retrievedTable.getChild("name")?.toArray()).toEqual([
|
||||
"Google",
|
||||
"Facebook",
|
||||
]);
|
||||
});
|
||||
|
||||
it("should correctly retain values in nested struct fields", async function () {
|
||||
const testData = [
|
||||
{
|
||||
id: "doc1",
|
||||
vector: [1, 2, 3],
|
||||
metadata: {
|
||||
filePath: "/path/to/file1.ts",
|
||||
startLine: 10,
|
||||
endLine: 20,
|
||||
text: "function test() { return true; }",
|
||||
},
|
||||
},
|
||||
{
|
||||
id: "doc2",
|
||||
vector: [4, 5, 6],
|
||||
metadata: {
|
||||
filePath: "/path/to/file2.ts",
|
||||
startLine: 30,
|
||||
endLine: 40,
|
||||
text: "function test2() { return false; }",
|
||||
},
|
||||
},
|
||||
];
|
||||
|
||||
const table = makeArrowTable(testData);
|
||||
|
||||
const metadataField = table.schema.fields.find(
|
||||
(f) => f.name === "metadata",
|
||||
);
|
||||
expect(metadataField).toBeDefined();
|
||||
// biome-ignore lint/suspicious/noExplicitAny: accessing fields in different Arrow versions
|
||||
const childNames = metadataField?.type.children.map((c: any) => c.name);
|
||||
expect(childNames).toEqual([
|
||||
"filePath",
|
||||
"startLine",
|
||||
"endLine",
|
||||
"text",
|
||||
]);
|
||||
|
||||
const buf = await fromTableToBuffer(table);
|
||||
const retrievedTable = tableFromIPC(buf);
|
||||
|
||||
const rows = [];
|
||||
for (let i = 0; i < retrievedTable.numRows; i++) {
|
||||
rows.push(retrievedTable.get(i));
|
||||
}
|
||||
|
||||
const firstRow = rows[0];
|
||||
expect(firstRow.id).toBe("doc1");
|
||||
expect(firstRow.vector.toJSON()).toEqual([1, 2, 3]);
|
||||
expect(firstRow.metadata.filePath).toBe("/path/to/file1.ts");
|
||||
expect(firstRow.metadata.startLine).toBe(10);
|
||||
expect(firstRow.metadata.endLine).toBe(20);
|
||||
expect(firstRow.metadata.text).toBe("function test() { return true; }");
|
||||
});
|
||||
});
|
||||
|
||||
class DummyEmbedding extends EmbeddingFunction<string> {
|
||||
@@ -527,14 +799,14 @@ describe.each([arrow15, arrow16, arrow17, arrow18])(
|
||||
).rejects.toThrow("column vector was missing");
|
||||
});
|
||||
|
||||
it("will provide a nice error if run twice", async function () {
|
||||
it("will skip embedding application if already applied", async function () {
|
||||
const records = sampleRecords();
|
||||
const table = await convertToTable(records, dummyEmbeddingConfig);
|
||||
|
||||
// fromTableToBuffer will try and apply the embeddings again
|
||||
await expect(
|
||||
fromTableToBuffer(table, dummyEmbeddingConfig),
|
||||
).rejects.toThrow("already existed");
|
||||
// but should skip since the column already has non-null values
|
||||
const result = await fromTableToBuffer(table, dummyEmbeddingConfig);
|
||||
expect(result.byteLength).toBeGreaterThan(0);
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
46
nodejs/__test__/session.test.ts
Normal file
46
nodejs/__test__/session.test.ts
Normal file
@@ -0,0 +1,46 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
import * as tmp from "tmp";
|
||||
import { Session, connect } from "../lancedb";
|
||||
|
||||
describe("Session", () => {
|
||||
let tmpDir: tmp.DirResult;
|
||||
beforeEach(() => {
|
||||
tmpDir = tmp.dirSync({ unsafeCleanup: true });
|
||||
});
|
||||
afterEach(() => tmpDir.removeCallback());
|
||||
|
||||
it("should configure cache sizes and work with database operations", async () => {
|
||||
// Create session with small cache limits for testing
|
||||
const indexCacheSize = BigInt(1024 * 1024); // 1MB
|
||||
const metadataCacheSize = BigInt(512 * 1024); // 512KB
|
||||
|
||||
const session = new Session(indexCacheSize, metadataCacheSize);
|
||||
|
||||
// Record initial cache state
|
||||
const initialCacheSize = session.sizeBytes();
|
||||
const initialCacheItems = session.approxNumItems();
|
||||
|
||||
// Test session works with database connection
|
||||
const db = await connect({ uri: tmpDir.name, session: session });
|
||||
|
||||
// Create and use a table to exercise the session
|
||||
const data = Array.from({ length: 100 }, (_, i) => ({
|
||||
id: i,
|
||||
text: `item ${i}`,
|
||||
}));
|
||||
const table = await db.createTable("test", data);
|
||||
const results = await table.query().limit(5).toArray();
|
||||
|
||||
expect(results).toHaveLength(5);
|
||||
|
||||
// Verify cache usage increased after operations
|
||||
const finalCacheSize = session.sizeBytes();
|
||||
const finalCacheItems = session.approxNumItems();
|
||||
|
||||
expect(finalCacheSize).toBeGreaterThan(initialCacheSize); // Cache should have grown
|
||||
expect(finalCacheItems).toBeGreaterThanOrEqual(initialCacheItems); // Items should not decrease
|
||||
expect(initialCacheSize).toBeLessThan(indexCacheSize + metadataCacheSize); // Within limits
|
||||
});
|
||||
});
|
||||
@@ -33,7 +33,13 @@ import {
|
||||
register,
|
||||
} from "../lancedb/embedding";
|
||||
import { Index } from "../lancedb/indices";
|
||||
import { instanceOfFullTextQuery } from "../lancedb/query";
|
||||
import {
|
||||
BooleanQuery,
|
||||
Occur,
|
||||
Operator,
|
||||
instanceOfFullTextQuery,
|
||||
} from "../lancedb/query";
|
||||
import exp = require("constants");
|
||||
|
||||
describe.each([arrow15, arrow16, arrow17, arrow18])(
|
||||
"Given a table",
|
||||
@@ -95,7 +101,9 @@ describe.each([arrow15, arrow16, arrow17, arrow18])(
|
||||
});
|
||||
|
||||
it("should overwrite data if asked", async () => {
|
||||
await table.add([{ id: 1 }, { id: 2 }]);
|
||||
const addRes = await table.add([{ id: 1 }, { id: 2 }]);
|
||||
expect(addRes).toHaveProperty("version");
|
||||
expect(addRes.version).toBe(2);
|
||||
await table.add([{ id: 1 }], { mode: "overwrite" });
|
||||
await expect(table.countRows()).resolves.toBe(1);
|
||||
});
|
||||
@@ -111,7 +119,11 @@ describe.each([arrow15, arrow16, arrow17, arrow18])(
|
||||
await table.add([{ id: 1 }]);
|
||||
expect(await table.countRows("id == 1")).toBe(1);
|
||||
expect(await table.countRows("id == 7")).toBe(0);
|
||||
await table.update({ id: "7" });
|
||||
const updateRes = await table.update({ id: "7" });
|
||||
expect(updateRes).toHaveProperty("version");
|
||||
expect(updateRes.version).toBe(3);
|
||||
expect(updateRes).toHaveProperty("rowsUpdated");
|
||||
expect(updateRes.rowsUpdated).toBe(1);
|
||||
expect(await table.countRows("id == 1")).toBe(0);
|
||||
expect(await table.countRows("id == 7")).toBe(1);
|
||||
await table.add([{ id: 2 }]);
|
||||
@@ -338,11 +350,17 @@ describe("merge insert", () => {
|
||||
{ a: 3, b: "y" },
|
||||
{ a: 4, b: "z" },
|
||||
];
|
||||
await table
|
||||
const mergeInsertRes = await table
|
||||
.mergeInsert("a")
|
||||
.whenMatchedUpdateAll()
|
||||
.whenNotMatchedInsertAll()
|
||||
.execute(newData);
|
||||
.execute(newData, { timeoutMs: 10_000 });
|
||||
expect(mergeInsertRes).toHaveProperty("version");
|
||||
expect(mergeInsertRes.version).toBe(2);
|
||||
expect(mergeInsertRes.numInsertedRows).toBe(1);
|
||||
expect(mergeInsertRes.numUpdatedRows).toBe(2);
|
||||
expect(mergeInsertRes.numDeletedRows).toBe(0);
|
||||
|
||||
const expected = [
|
||||
{ a: 1, b: "a" },
|
||||
{ a: 2, b: "x" },
|
||||
@@ -350,9 +368,9 @@ describe("merge insert", () => {
|
||||
{ a: 4, b: "z" },
|
||||
];
|
||||
|
||||
expect(
|
||||
JSON.parse(JSON.stringify((await table.toArrow()).toArray())),
|
||||
).toEqual(expected);
|
||||
const result = (await table.toArrow()).toArray().sort((a, b) => a.a - b.a);
|
||||
|
||||
expect(result.map((row) => ({ ...row }))).toEqual(expected);
|
||||
});
|
||||
test("conditional update", async () => {
|
||||
const newData = [
|
||||
@@ -360,10 +378,12 @@ describe("merge insert", () => {
|
||||
{ a: 3, b: "y" },
|
||||
{ a: 4, b: "z" },
|
||||
];
|
||||
await table
|
||||
const mergeInsertRes = await table
|
||||
.mergeInsert("a")
|
||||
.whenMatchedUpdateAll({ where: "target.b = 'b'" })
|
||||
.execute(newData);
|
||||
expect(mergeInsertRes).toHaveProperty("version");
|
||||
expect(mergeInsertRes.version).toBe(2);
|
||||
|
||||
const expected = [
|
||||
{ a: 1, b: "a" },
|
||||
@@ -448,6 +468,20 @@ describe("merge insert", () => {
|
||||
res = res.sort((a, b) => a.a - b.a);
|
||||
expect(res).toEqual(expected);
|
||||
});
|
||||
|
||||
test("timeout", async () => {
|
||||
const newData = [
|
||||
{ a: 2, b: "x" },
|
||||
{ a: 4, b: "z" },
|
||||
];
|
||||
await expect(
|
||||
table
|
||||
.mergeInsert("a")
|
||||
.whenMatchedUpdateAll()
|
||||
.whenNotMatchedInsertAll()
|
||||
.execute(newData, { timeoutMs: 0 }),
|
||||
).rejects.toThrow("merge insert timed out");
|
||||
});
|
||||
});
|
||||
|
||||
describe("When creating an index", () => {
|
||||
@@ -525,6 +559,32 @@ describe("When creating an index", () => {
|
||||
rst = await tbl.query().limit(2).offset(1).nearestTo(queryVec).toArrow();
|
||||
expect(rst.numRows).toBe(1);
|
||||
|
||||
// test nprobes
|
||||
rst = await tbl.query().nearestTo(queryVec).limit(2).nprobes(50).toArrow();
|
||||
expect(rst.numRows).toBe(2);
|
||||
rst = await tbl
|
||||
.query()
|
||||
.nearestTo(queryVec)
|
||||
.limit(2)
|
||||
.minimumNprobes(15)
|
||||
.toArrow();
|
||||
expect(rst.numRows).toBe(2);
|
||||
rst = await tbl
|
||||
.query()
|
||||
.nearestTo(queryVec)
|
||||
.limit(2)
|
||||
.minimumNprobes(10)
|
||||
.maximumNprobes(20)
|
||||
.toArrow();
|
||||
expect(rst.numRows).toBe(2);
|
||||
|
||||
expect(() => tbl.query().nearestTo(queryVec).minimumNprobes(0)).toThrow(
|
||||
"Invalid input, minimum_nprobes must be greater than 0",
|
||||
);
|
||||
expect(() => tbl.query().nearestTo(queryVec).maximumNprobes(5)).toThrow(
|
||||
"Invalid input, maximum_nprobes must be greater than minimum_nprobes",
|
||||
);
|
||||
|
||||
await tbl.dropIndex("vec_idx");
|
||||
const indices2 = await tbl.listIndices();
|
||||
expect(indices2.length).toBe(0);
|
||||
@@ -1023,15 +1083,19 @@ describe("schema evolution", function () {
|
||||
{ id: 1n, vector: [0.1, 0.2] },
|
||||
]);
|
||||
// Can create a non-nullable column only through addColumns at the moment.
|
||||
await table.addColumns([
|
||||
const addColumnsRes = await table.addColumns([
|
||||
{ name: "price", valueSql: "cast(10.0 as double)" },
|
||||
]);
|
||||
expect(addColumnsRes).toHaveProperty("version");
|
||||
expect(addColumnsRes.version).toBe(2);
|
||||
expect(await table.schema()).toEqual(schema);
|
||||
|
||||
await table.alterColumns([
|
||||
const alterColumnsRes = await table.alterColumns([
|
||||
{ path: "id", rename: "new_id" },
|
||||
{ path: "price", nullable: true },
|
||||
]);
|
||||
expect(alterColumnsRes).toHaveProperty("version");
|
||||
expect(alterColumnsRes.version).toBe(3);
|
||||
|
||||
const expectedSchema = new Schema([
|
||||
new Field("new_id", new Int64(), true),
|
||||
@@ -1149,7 +1213,9 @@ describe("schema evolution", function () {
|
||||
const table = await con.createTable("vectors", [
|
||||
{ id: 1n, vector: [0.1, 0.2] },
|
||||
]);
|
||||
await table.dropColumns(["vector"]);
|
||||
const dropColumnsRes = await table.dropColumns(["vector"]);
|
||||
expect(dropColumnsRes).toHaveProperty("version");
|
||||
expect(dropColumnsRes.version).toBe(2);
|
||||
|
||||
const expectedSchema = new Schema([new Field("id", new Int64(), true)]);
|
||||
expect(await table.schema()).toEqual(expectedSchema);
|
||||
@@ -1266,6 +1332,32 @@ describe("when dealing with tags", () => {
|
||||
await table.checkoutLatest();
|
||||
expect(await table.version()).toBe(4);
|
||||
});
|
||||
|
||||
it("can checkout and restore tags", async () => {
|
||||
const conn = await connect(tmpDir.name, {
|
||||
readConsistencyInterval: 0,
|
||||
});
|
||||
|
||||
const table = await conn.createTable("my_table", [
|
||||
{ id: 1n, vector: [0.1, 0.2] },
|
||||
]);
|
||||
expect(await table.version()).toBe(1);
|
||||
expect(await table.countRows()).toBe(1);
|
||||
const tagsManager = await table.tags();
|
||||
const tag1 = "tag1";
|
||||
await tagsManager.create(tag1, 1);
|
||||
await table.add([{ id: 2n, vector: [0.3, 0.4] }]);
|
||||
const tag2 = "tag2";
|
||||
await tagsManager.create(tag2, 2);
|
||||
expect(await table.version()).toBe(2);
|
||||
await table.checkout(tag1);
|
||||
expect(await table.version()).toBe(1);
|
||||
await table.restore();
|
||||
expect(await table.version()).toBe(3);
|
||||
expect(await table.countRows()).toBe(1);
|
||||
await table.add([{ id: 3n, vector: [0.5, 0.6] }]);
|
||||
expect(await table.countRows()).toBe(2);
|
||||
});
|
||||
});
|
||||
|
||||
describe("when optimizing a dataset", () => {
|
||||
@@ -1445,7 +1537,9 @@ describe.each([arrow15, arrow16, arrow17, arrow18])(
|
||||
];
|
||||
const table = await db.createTable("test", data);
|
||||
await table.createIndex("text", {
|
||||
config: Index.fts(),
|
||||
config: Index.fts({
|
||||
withPosition: true,
|
||||
}),
|
||||
});
|
||||
|
||||
const results = await table.search("lance").toArray();
|
||||
@@ -1468,6 +1562,18 @@ describe.each([arrow15, arrow16, arrow17, arrow18])(
|
||||
|
||||
const results = await table.search("hello").toArray();
|
||||
expect(results[0].text).toBe(data[0].text);
|
||||
|
||||
const results2 = await table
|
||||
.search(new MatchQuery("hello world", "text"))
|
||||
.toArray();
|
||||
expect(results2.length).toBe(2);
|
||||
|
||||
const results3 = await table
|
||||
.search(
|
||||
new MatchQuery("hello world", "text", { operator: Operator.And }),
|
||||
)
|
||||
.toArray();
|
||||
expect(results3.length).toBe(1);
|
||||
});
|
||||
|
||||
test("full text search without lowercase", async () => {
|
||||
@@ -1498,7 +1604,9 @@ describe.each([arrow15, arrow16, arrow17, arrow18])(
|
||||
];
|
||||
const table = await db.createTable("test", data);
|
||||
await table.createIndex("text", {
|
||||
config: Index.fts(),
|
||||
config: Index.fts({
|
||||
withPosition: true,
|
||||
}),
|
||||
});
|
||||
|
||||
const results = await table.search("world").toArray();
|
||||
@@ -1542,6 +1650,114 @@ describe.each([arrow15, arrow16, arrow17, arrow18])(
|
||||
expect(resultSet.has("fob")).toBe(true);
|
||||
expect(resultSet.has("fo")).toBe(true);
|
||||
expect(resultSet.has("food")).toBe(true);
|
||||
|
||||
const prefixResults = await table
|
||||
.search(
|
||||
new MatchQuery("foo", "text", { fuzziness: 3, prefixLength: 3 }),
|
||||
)
|
||||
.toArray();
|
||||
expect(prefixResults.length).toBe(2);
|
||||
const resultSet2 = new Set(prefixResults.map((r) => r.text));
|
||||
expect(resultSet2.has("foo")).toBe(true);
|
||||
expect(resultSet2.has("food")).toBe(true);
|
||||
});
|
||||
|
||||
test("full text search boolean query", async () => {
|
||||
const db = await connect(tmpDir.name);
|
||||
const data = [
|
||||
{ text: "The cat and dog are playing" },
|
||||
{ text: "The cat is sleeping" },
|
||||
{ text: "The dog is barking" },
|
||||
{ text: "The dog chases the cat" },
|
||||
];
|
||||
const table = await db.createTable("test", data);
|
||||
await table.createIndex("text", {
|
||||
config: Index.fts({ withPosition: false }),
|
||||
});
|
||||
|
||||
const shouldResults = await table
|
||||
.search(
|
||||
new BooleanQuery([
|
||||
[Occur.Should, new MatchQuery("cat", "text")],
|
||||
[Occur.Should, new MatchQuery("dog", "text")],
|
||||
]),
|
||||
)
|
||||
.toArray();
|
||||
expect(shouldResults.length).toBe(4);
|
||||
|
||||
const mustResults = await table
|
||||
.search(
|
||||
new BooleanQuery([
|
||||
[Occur.Must, new MatchQuery("cat", "text")],
|
||||
[Occur.Must, new MatchQuery("dog", "text")],
|
||||
]),
|
||||
)
|
||||
.toArray();
|
||||
expect(mustResults.length).toBe(2);
|
||||
|
||||
const mustNotResults = await table
|
||||
.search(
|
||||
new BooleanQuery([
|
||||
[Occur.Must, new MatchQuery("cat", "text")],
|
||||
[Occur.MustNot, new MatchQuery("dog", "text")],
|
||||
]),
|
||||
)
|
||||
.toArray();
|
||||
expect(mustNotResults.length).toBe(1);
|
||||
});
|
||||
|
||||
test("full text search ngram", async () => {
|
||||
const db = await connect(tmpDir.name);
|
||||
const data = [
|
||||
{ text: "hello world", vector: [0.1, 0.2, 0.3] },
|
||||
{ text: "lance database", vector: [0.4, 0.5, 0.6] },
|
||||
{ text: "lance is cool", vector: [0.7, 0.8, 0.9] },
|
||||
];
|
||||
const table = await db.createTable("test", data);
|
||||
await table.createIndex("text", {
|
||||
config: Index.fts({ baseTokenizer: "ngram" }),
|
||||
});
|
||||
|
||||
const results = await table.search("lan").toArray();
|
||||
expect(results.length).toBe(2);
|
||||
const resultSet = new Set(results.map((r) => r.text));
|
||||
expect(resultSet.has("lance database")).toBe(true);
|
||||
expect(resultSet.has("lance is cool")).toBe(true);
|
||||
|
||||
const results2 = await table.search("nce").toArray(); // spellchecker:disable-line
|
||||
expect(results2.length).toBe(2);
|
||||
const resultSet2 = new Set(results2.map((r) => r.text));
|
||||
expect(resultSet2.has("lance database")).toBe(true);
|
||||
expect(resultSet2.has("lance is cool")).toBe(true);
|
||||
|
||||
// the default min_ngram_length is 3, so "la" should not match
|
||||
const results3 = await table.search("la").toArray();
|
||||
expect(results3.length).toBe(0);
|
||||
|
||||
// test setting min_ngram_length and prefix_only
|
||||
await table.createIndex("text", {
|
||||
config: Index.fts({
|
||||
baseTokenizer: "ngram",
|
||||
ngramMinLength: 2,
|
||||
prefixOnly: true,
|
||||
}),
|
||||
replace: true,
|
||||
});
|
||||
|
||||
const results4 = await table.search("lan").toArray();
|
||||
expect(results4.length).toBe(2);
|
||||
const resultSet4 = new Set(results4.map((r) => r.text));
|
||||
expect(resultSet4.has("lance database")).toBe(true);
|
||||
expect(resultSet4.has("lance is cool")).toBe(true);
|
||||
|
||||
const results5 = await table.search("nce").toArray(); // spellchecker:disable-line
|
||||
expect(results5.length).toBe(0);
|
||||
|
||||
const results6 = await table.search("la").toArray();
|
||||
expect(results6.length).toBe(2);
|
||||
const resultSet6 = new Set(results6.map((r) => r.text));
|
||||
expect(resultSet6.has("lance database")).toBe(true);
|
||||
expect(resultSet6.has("lance is cool")).toBe(true);
|
||||
});
|
||||
|
||||
test.each([
|
||||
@@ -1647,4 +1863,43 @@ describe("column name options", () => {
|
||||
expect(results[0].query_index).toBe(0);
|
||||
expect(results[1].query_index).toBe(1);
|
||||
});
|
||||
|
||||
test("index and search multivectors", async () => {
|
||||
const db = await connect(tmpDir.name);
|
||||
const data = [];
|
||||
// generate 512 random multivectors
|
||||
for (let i = 0; i < 256; i++) {
|
||||
data.push({
|
||||
multivector: Array.from({ length: 10 }, () =>
|
||||
Array(2).fill(Math.random()),
|
||||
),
|
||||
});
|
||||
}
|
||||
const table = await db.createTable("multivectors", data, {
|
||||
schema: new Schema([
|
||||
new Field(
|
||||
"multivector",
|
||||
new List(
|
||||
new Field(
|
||||
"item",
|
||||
new FixedSizeList(2, new Field("item", new Float32())),
|
||||
),
|
||||
),
|
||||
),
|
||||
]),
|
||||
});
|
||||
|
||||
const results = await table.search(data[0].multivector).limit(10).toArray();
|
||||
expect(results.length).toBe(10);
|
||||
|
||||
await table.createIndex("multivector", {
|
||||
config: Index.ivfPq({ numPartitions: 2, distanceType: "cosine" }),
|
||||
});
|
||||
|
||||
const results2 = await table
|
||||
.search(data[0].multivector)
|
||||
.limit(10)
|
||||
.toArray();
|
||||
expect(results2.length).toBe(10);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -107,6 +107,20 @@ export type IntoVector =
|
||||
| number[]
|
||||
| Promise<Float32Array | Float64Array | number[]>;
|
||||
|
||||
export type MultiVector = IntoVector[];
|
||||
|
||||
export function isMultiVector(value: unknown): value is MultiVector {
|
||||
return Array.isArray(value) && isIntoVector(value[0]);
|
||||
}
|
||||
|
||||
export function isIntoVector(value: unknown): value is IntoVector {
|
||||
return (
|
||||
value instanceof Float32Array ||
|
||||
value instanceof Float64Array ||
|
||||
(Array.isArray(value) && !Array.isArray(value[0]))
|
||||
);
|
||||
}
|
||||
|
||||
export function isArrowTable(value: object): value is TableLike {
|
||||
if (value instanceof ArrowTable) return true;
|
||||
return "schema" in value && "batches" in value;
|
||||
@@ -417,7 +431,9 @@ function inferSchema(
|
||||
} else {
|
||||
const inferredType = inferType(value, path, opts);
|
||||
if (inferredType === undefined) {
|
||||
throw new Error(`Failed to infer data type for field ${path.join(".")} at row ${rowI}. \
|
||||
throw new Error(`Failed to infer data type for field ${path.join(
|
||||
".",
|
||||
)} at row ${rowI}. \
|
||||
Consider providing an explicit schema.`);
|
||||
}
|
||||
pathTree.set(path, inferredType);
|
||||
@@ -639,8 +655,9 @@ function transposeData(
|
||||
): Vector {
|
||||
if (field.type instanceof Struct) {
|
||||
const childFields = field.type.children;
|
||||
const fullPath = [...path, field.name];
|
||||
const childVectors = childFields.map((child) => {
|
||||
return transposeData(data, child, [...path, child.name]);
|
||||
return transposeData(data, child, fullPath);
|
||||
});
|
||||
const structData = makeData({
|
||||
type: field.type,
|
||||
@@ -652,7 +669,14 @@ function transposeData(
|
||||
const values = data.map((datum) => {
|
||||
let current: unknown = datum;
|
||||
for (const key of valuesPath) {
|
||||
if (isObject(current) && Object.hasOwn(current, key)) {
|
||||
if (current == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
if (
|
||||
isObject(current) &&
|
||||
(Object.hasOwn(current, key) || key in current)
|
||||
) {
|
||||
current = current[key];
|
||||
} else {
|
||||
return null;
|
||||
@@ -791,11 +815,17 @@ async function applyEmbeddingsFromMetadata(
|
||||
`Cannot apply embedding function because the source column '${functionEntry.sourceColumn}' was not present in the data`,
|
||||
);
|
||||
}
|
||||
|
||||
// Check if destination column exists and handle accordingly
|
||||
if (columns[destColumn] !== undefined) {
|
||||
throw new Error(
|
||||
`Attempt to apply embeddings to table failed because column ${destColumn} already existed`,
|
||||
);
|
||||
const existingColumn = columns[destColumn];
|
||||
// If the column exists but is all null, we can fill it with embeddings
|
||||
if (existingColumn.nullCount !== existingColumn.length) {
|
||||
// Column has non-null values, skip embedding application
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (table.batches.length > 1) {
|
||||
throw new Error(
|
||||
"Internal error: `makeArrowTable` unexpectedly created a table with more than one batch",
|
||||
@@ -823,6 +853,15 @@ async function applyEmbeddingsFromMetadata(
|
||||
const vector = makeVector(vectors, destType);
|
||||
columns[destColumn] = vector;
|
||||
}
|
||||
|
||||
// Add any missing columns from the schema as null vectors
|
||||
for (const field of schema.fields) {
|
||||
if (!(field.name in columns)) {
|
||||
const nullValues = new Array(table.numRows).fill(null);
|
||||
columns[field.name] = makeVector(nullValues, field.type);
|
||||
}
|
||||
}
|
||||
|
||||
const newTable = new ArrowTable(columns);
|
||||
return alignTable(newTable, schema);
|
||||
}
|
||||
@@ -895,11 +934,23 @@ async function applyEmbeddings<T>(
|
||||
);
|
||||
}
|
||||
} else {
|
||||
// Check if destination column exists and handle accordingly
|
||||
if (Object.prototype.hasOwnProperty.call(newColumns, destColumn)) {
|
||||
throw new Error(
|
||||
`Attempt to apply embeddings to table failed because column ${destColumn} already existed`,
|
||||
);
|
||||
const existingColumn = newColumns[destColumn];
|
||||
// If the column exists but is all null, we can fill it with embeddings
|
||||
if (existingColumn.nullCount !== existingColumn.length) {
|
||||
// Column has non-null values, skip embedding application and return table as-is
|
||||
let newTable = new ArrowTable(newColumns);
|
||||
if (schema != null) {
|
||||
newTable = alignTable(newTable, schema as Schema);
|
||||
}
|
||||
return new ArrowTable(
|
||||
new Schema(newTable.schema.fields, schemaMetadata),
|
||||
newTable.batches,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if (table.batches.length > 1) {
|
||||
throw new Error(
|
||||
"Internal error: `makeArrowTable` unexpectedly created a table with more than one batch",
|
||||
@@ -959,7 +1010,21 @@ export async function convertToTable(
|
||||
embeddings?: EmbeddingFunctionConfig,
|
||||
makeTableOptions?: Partial<MakeArrowTableOptions>,
|
||||
): Promise<ArrowTable> {
|
||||
const table = makeArrowTable(data, makeTableOptions);
|
||||
let processedData = data;
|
||||
|
||||
// If we have a schema with embedding metadata, we need to preprocess the data
|
||||
// to ensure all nested fields are present
|
||||
if (
|
||||
makeTableOptions?.schema &&
|
||||
makeTableOptions.schema.metadata?.has("embedding_functions")
|
||||
) {
|
||||
processedData = ensureNestedFieldsExist(
|
||||
data,
|
||||
makeTableOptions.schema as Schema,
|
||||
);
|
||||
}
|
||||
|
||||
const table = makeArrowTable(processedData, makeTableOptions);
|
||||
return await applyEmbeddings(table, embeddings, makeTableOptions?.schema);
|
||||
}
|
||||
|
||||
@@ -1052,7 +1117,16 @@ export async function fromDataToBuffer(
|
||||
schema = sanitizeSchema(schema);
|
||||
}
|
||||
if (isArrowTable(data)) {
|
||||
return fromTableToBuffer(sanitizeTable(data), embeddings, schema);
|
||||
const table = sanitizeTable(data);
|
||||
// If we have a schema with embedding functions, we need to ensure all columns exist
|
||||
// before applying embeddings, since applyEmbeddingsFromMetadata expects all columns
|
||||
// to be present in the table
|
||||
if (schema && schema.metadata?.has("embedding_functions")) {
|
||||
const alignedTable = alignTableToSchema(table, schema);
|
||||
return fromTableToBuffer(alignedTable, embeddings, schema);
|
||||
} else {
|
||||
return fromTableToBuffer(table, embeddings, schema);
|
||||
}
|
||||
} else {
|
||||
const table = await convertToTable(data, embeddings, { schema });
|
||||
return fromTableToBuffer(table);
|
||||
@@ -1121,7 +1195,7 @@ function alignBatch(batch: RecordBatch, schema: Schema): RecordBatch {
|
||||
type: new Struct(schema.fields),
|
||||
length: batch.numRows,
|
||||
nullCount: batch.nullCount,
|
||||
children: alignedChildren,
|
||||
children: alignedChildren as unknown as ArrowData<DataType>[],
|
||||
});
|
||||
return new RecordBatch(schema, newData);
|
||||
}
|
||||
@@ -1193,6 +1267,79 @@ function validateSchemaEmbeddings(
|
||||
return new Schema(fields, schema.metadata);
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensures that all nested fields defined in the schema exist in the data,
|
||||
* filling missing fields with null values.
|
||||
*/
|
||||
export function ensureNestedFieldsExist(
|
||||
data: Array<Record<string, unknown>>,
|
||||
schema: Schema,
|
||||
): Array<Record<string, unknown>> {
|
||||
return data.map((row) => {
|
||||
const completeRow: Record<string, unknown> = {};
|
||||
|
||||
for (const field of schema.fields) {
|
||||
if (field.name in row) {
|
||||
if (
|
||||
field.type.constructor.name === "Struct" &&
|
||||
row[field.name] !== null &&
|
||||
row[field.name] !== undefined
|
||||
) {
|
||||
// Handle nested struct
|
||||
const nestedValue = row[field.name] as Record<string, unknown>;
|
||||
completeRow[field.name] = ensureStructFieldsExist(
|
||||
nestedValue,
|
||||
field.type,
|
||||
);
|
||||
} else {
|
||||
// Non-struct field or null struct value
|
||||
completeRow[field.name] = row[field.name];
|
||||
}
|
||||
} else {
|
||||
// Field is missing from the data - set to null
|
||||
completeRow[field.name] = null;
|
||||
}
|
||||
}
|
||||
|
||||
return completeRow;
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Recursively ensures that all fields in a struct type exist in the data,
|
||||
* filling missing fields with null values.
|
||||
*/
|
||||
function ensureStructFieldsExist(
|
||||
data: Record<string, unknown>,
|
||||
structType: Struct,
|
||||
): Record<string, unknown> {
|
||||
const completeStruct: Record<string, unknown> = {};
|
||||
|
||||
for (const childField of structType.children) {
|
||||
if (childField.name in data) {
|
||||
if (
|
||||
childField.type.constructor.name === "Struct" &&
|
||||
data[childField.name] !== null &&
|
||||
data[childField.name] !== undefined
|
||||
) {
|
||||
// Recursively handle nested struct
|
||||
completeStruct[childField.name] = ensureStructFieldsExist(
|
||||
data[childField.name] as Record<string, unknown>,
|
||||
childField.type,
|
||||
);
|
||||
} else {
|
||||
// Non-struct field or null struct value
|
||||
completeStruct[childField.name] = data[childField.name];
|
||||
}
|
||||
} else {
|
||||
// Field is missing - set to null
|
||||
completeStruct[childField.name] = null;
|
||||
}
|
||||
}
|
||||
|
||||
return completeStruct;
|
||||
}
|
||||
|
||||
interface JsonDataType {
|
||||
type: string;
|
||||
fields?: JsonField[];
|
||||
@@ -1326,3 +1473,64 @@ function fieldToJson(field: Field): JsonField {
|
||||
metadata: field.metadata,
|
||||
};
|
||||
}
|
||||
|
||||
function alignTableToSchema(
|
||||
table: ArrowTable,
|
||||
targetSchema: Schema,
|
||||
): ArrowTable {
|
||||
const existingColumns = new Map<string, Vector>();
|
||||
|
||||
// Map existing columns
|
||||
for (const field of table.schema.fields) {
|
||||
existingColumns.set(field.name, table.getChild(field.name)!);
|
||||
}
|
||||
|
||||
// Create vectors for all fields in target schema
|
||||
const alignedColumns: Record<string, Vector> = {};
|
||||
|
||||
for (const field of targetSchema.fields) {
|
||||
if (existingColumns.has(field.name)) {
|
||||
// Column exists, use it
|
||||
alignedColumns[field.name] = existingColumns.get(field.name)!;
|
||||
} else {
|
||||
// Column missing, create null vector
|
||||
alignedColumns[field.name] = createNullVector(field, table.numRows);
|
||||
}
|
||||
}
|
||||
|
||||
// Create new table with aligned schema and columns
|
||||
return new ArrowTable(targetSchema, alignedColumns);
|
||||
}
|
||||
|
||||
function createNullVector(field: Field, numRows: number): Vector {
|
||||
if (field.type.constructor.name === "Struct") {
|
||||
// For struct types, create a struct with null fields
|
||||
const structType = field.type as Struct;
|
||||
const childVectors = structType.children.map((childField) =>
|
||||
createNullVector(childField, numRows),
|
||||
);
|
||||
|
||||
// Create struct data
|
||||
const structData = makeData({
|
||||
type: structType,
|
||||
length: numRows,
|
||||
nullCount: 0,
|
||||
children: childVectors.map((v) => v.data[0]),
|
||||
});
|
||||
|
||||
return arrowMakeVector(structData);
|
||||
} else {
|
||||
// For other types, create a vector of nulls
|
||||
const nullBitmap = new Uint8Array(Math.ceil(numRows / 8));
|
||||
// All bits are 0, meaning all values are null
|
||||
|
||||
const data = makeData({
|
||||
type: field.type,
|
||||
length: numRows,
|
||||
nullCount: numRows,
|
||||
nullBitmap,
|
||||
});
|
||||
|
||||
return arrowMakeVector(data);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -85,6 +85,9 @@ export interface OpenTableOptions {
|
||||
/**
|
||||
* Set the size of the index cache, specified as a number of entries
|
||||
*
|
||||
* @deprecated Use session-level cache configuration instead.
|
||||
* Create a Session with custom cache sizes and pass it to the connect() function.
|
||||
*
|
||||
* The exact meaning of an "entry" will depend on the type of index:
|
||||
* - IVF: there is one entry for each IVF partition
|
||||
* - BTREE: there is one entry for the entire index
|
||||
|
||||
@@ -10,6 +10,7 @@ import {
|
||||
import {
|
||||
ConnectionOptions,
|
||||
Connection as LanceDbConnection,
|
||||
Session,
|
||||
} from "./native.js";
|
||||
|
||||
export {
|
||||
@@ -28,6 +29,13 @@ export {
|
||||
FragmentSummaryStats,
|
||||
Tags,
|
||||
TagContents,
|
||||
MergeResult,
|
||||
AddResult,
|
||||
AddColumnsResult,
|
||||
AlterColumnsResult,
|
||||
DeleteResult,
|
||||
DropColumnsResult,
|
||||
UpdateResult,
|
||||
} from "./native.js";
|
||||
|
||||
export {
|
||||
@@ -44,6 +52,8 @@ export {
|
||||
OpenTableOptions,
|
||||
} from "./connection";
|
||||
|
||||
export { Session } from "./native.js";
|
||||
|
||||
export {
|
||||
ExecutableQuery,
|
||||
Query,
|
||||
@@ -57,7 +67,10 @@ export {
|
||||
PhraseQuery,
|
||||
BoostQuery,
|
||||
MultiMatchQuery,
|
||||
BooleanQuery,
|
||||
FullTextQueryType,
|
||||
Operator,
|
||||
Occur,
|
||||
} from "./query";
|
||||
|
||||
export {
|
||||
@@ -79,7 +92,7 @@ export {
|
||||
ColumnAlteration,
|
||||
} from "./table";
|
||||
|
||||
export { MergeInsertBuilder } from "./merge";
|
||||
export { MergeInsertBuilder, WriteExecutionOptions } from "./merge";
|
||||
|
||||
export * as embedding from "./embedding";
|
||||
export * as rerankers from "./rerankers";
|
||||
@@ -90,6 +103,7 @@ export {
|
||||
RecordBatchLike,
|
||||
DataLike,
|
||||
IntoVector,
|
||||
MultiVector,
|
||||
} from "./arrow";
|
||||
export { IntoSql, packBits } from "./util";
|
||||
|
||||
@@ -120,6 +134,7 @@ export { IntoSql, packBits } from "./util";
|
||||
export async function connect(
|
||||
uri: string,
|
||||
options?: Partial<ConnectionOptions>,
|
||||
session?: Session,
|
||||
): Promise<Connection>;
|
||||
/**
|
||||
* Connect to a LanceDB instance at the given URI.
|
||||
@@ -138,31 +153,43 @@ export async function connect(
|
||||
* storageOptions: {timeout: "60s"}
|
||||
* });
|
||||
* ```
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const session = Session.default();
|
||||
* const conn = await connect({
|
||||
* uri: "/path/to/database",
|
||||
* session: session
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
export async function connect(
|
||||
options: Partial<ConnectionOptions> & { uri: string },
|
||||
): Promise<Connection>;
|
||||
export async function connect(
|
||||
uriOrOptions: string | (Partial<ConnectionOptions> & { uri: string }),
|
||||
options: Partial<ConnectionOptions> = {},
|
||||
options?: Partial<ConnectionOptions>,
|
||||
): Promise<Connection> {
|
||||
let uri: string | undefined;
|
||||
let finalOptions: Partial<ConnectionOptions> = {};
|
||||
|
||||
if (typeof uriOrOptions !== "string") {
|
||||
const { uri: uri_, ...opts } = uriOrOptions;
|
||||
uri = uri_;
|
||||
options = opts;
|
||||
finalOptions = opts;
|
||||
} else {
|
||||
uri = uriOrOptions;
|
||||
finalOptions = options || {};
|
||||
}
|
||||
|
||||
if (!uri) {
|
||||
throw new Error("uri is required");
|
||||
}
|
||||
|
||||
options = (options as ConnectionOptions) ?? {};
|
||||
(<ConnectionOptions>options).storageOptions = cleanseStorageOptions(
|
||||
(<ConnectionOptions>options).storageOptions,
|
||||
finalOptions = (finalOptions as ConnectionOptions) ?? {};
|
||||
(<ConnectionOptions>finalOptions).storageOptions = cleanseStorageOptions(
|
||||
(<ConnectionOptions>finalOptions).storageOptions,
|
||||
);
|
||||
const nativeConn = await LanceDbConnection.new(uri, options);
|
||||
const nativeConn = await LanceDbConnection.new(uri, finalOptions);
|
||||
return new LocalConnection(nativeConn);
|
||||
}
|
||||
|
||||
@@ -439,7 +439,7 @@ export interface FtsOptions {
|
||||
*
|
||||
* "raw" - Raw tokenizer. This tokenizer does not split the text into tokens and indexes the entire text as a single token.
|
||||
*/
|
||||
baseTokenizer?: "simple" | "whitespace" | "raw";
|
||||
baseTokenizer?: "simple" | "whitespace" | "raw" | "ngram";
|
||||
|
||||
/**
|
||||
* language for stemming and stop words
|
||||
@@ -472,6 +472,21 @@ export interface FtsOptions {
|
||||
* whether to remove punctuation
|
||||
*/
|
||||
asciiFolding?: boolean;
|
||||
|
||||
/**
|
||||
* ngram min length
|
||||
*/
|
||||
ngramMinLength?: number;
|
||||
|
||||
/**
|
||||
* ngram max length
|
||||
*/
|
||||
ngramMaxLength?: number;
|
||||
|
||||
/**
|
||||
* whether to only index the prefix of the token for ngram tokenizer
|
||||
*/
|
||||
prefixOnly?: boolean;
|
||||
}
|
||||
|
||||
export class Index {
|
||||
@@ -608,6 +623,9 @@ export class Index {
|
||||
options?.stem,
|
||||
options?.removeStopWords,
|
||||
options?.asciiFolding,
|
||||
options?.ngramMinLength,
|
||||
options?.ngramMaxLength,
|
||||
options?.prefixOnly,
|
||||
),
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
import { Data, Schema, fromDataToBuffer } from "./arrow";
|
||||
import { NativeMergeInsertBuilder } from "./native";
|
||||
import { MergeResult, NativeMergeInsertBuilder } from "./native";
|
||||
|
||||
/** A builder used to create and run a merge insert operation */
|
||||
export class MergeInsertBuilder {
|
||||
@@ -73,9 +73,12 @@ export class MergeInsertBuilder {
|
||||
/**
|
||||
* Executes the merge insert operation
|
||||
*
|
||||
* Nothing is returned but the `Table` is updated
|
||||
* @returns {Promise<MergeResult>} the merge result
|
||||
*/
|
||||
async execute(data: Data): Promise<void> {
|
||||
async execute(
|
||||
data: Data,
|
||||
execOptions?: Partial<WriteExecutionOptions>,
|
||||
): Promise<MergeResult> {
|
||||
let schema: Schema;
|
||||
if (this.#schema instanceof Promise) {
|
||||
schema = await this.#schema;
|
||||
@@ -83,7 +86,28 @@ export class MergeInsertBuilder {
|
||||
} else {
|
||||
schema = this.#schema;
|
||||
}
|
||||
|
||||
if (execOptions?.timeoutMs !== undefined) {
|
||||
this.#native.setTimeout(execOptions.timeoutMs);
|
||||
}
|
||||
|
||||
const buffer = await fromDataToBuffer(data, undefined, schema);
|
||||
await this.#native.execute(buffer);
|
||||
return await this.#native.execute(buffer);
|
||||
}
|
||||
}
|
||||
|
||||
export interface WriteExecutionOptions {
|
||||
/**
|
||||
* Maximum time to run the operation before cancelling it.
|
||||
*
|
||||
* By default, there is a 30-second timeout that is only enforced after the
|
||||
* first attempt. This is to prevent spending too long retrying to resolve
|
||||
* conflicts. For example, if a write attempt takes 20 seconds and fails,
|
||||
* the second attempt will be cancelled after 10 seconds, hitting the
|
||||
* 30-second timeout. However, a write that takes one hour and succeeds on the
|
||||
* first attempt will not be cancelled.
|
||||
*
|
||||
* When this is set, the timeout is enforced on all attempts, including the first.
|
||||
*/
|
||||
timeoutMs?: number;
|
||||
}
|
||||
|
||||
@@ -448,6 +448,10 @@ export class VectorQuery extends QueryBase<NativeVectorQuery> {
|
||||
* For best results we recommend tuning this parameter with a benchmark against
|
||||
* your actual data to find the smallest possible value that will still give
|
||||
* you the desired recall.
|
||||
*
|
||||
* For more fine grained control over behavior when you have a very narrow filter
|
||||
* you can use `minimumNprobes` and `maximumNprobes`. This method sets both
|
||||
* the minimum and maximum to the same value.
|
||||
*/
|
||||
nprobes(nprobes: number): VectorQuery {
|
||||
super.doCall((inner) => inner.nprobes(nprobes));
|
||||
@@ -455,6 +459,33 @@ export class VectorQuery extends QueryBase<NativeVectorQuery> {
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the minimum number of probes used.
|
||||
*
|
||||
* This controls the minimum number of partitions that will be searched. This
|
||||
* parameter will impact every query against a vector index, regardless of the
|
||||
* filter. See `nprobes` for more details. Higher values will increase recall
|
||||
* but will also increase latency.
|
||||
*/
|
||||
minimumNprobes(minimumNprobes: number): VectorQuery {
|
||||
super.doCall((inner) => inner.minimumNprobes(minimumNprobes));
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the maximum number of probes used.
|
||||
*
|
||||
* This controls the maximum number of partitions that will be searched. If this
|
||||
* number is greater than minimumNprobes then the excess partitions will _only_ be
|
||||
* searched if we have not found enough results. This can be useful when there is
|
||||
* a narrow filter to allow these queries to spend more time searching and avoid
|
||||
* potential false negatives.
|
||||
*/
|
||||
maximumNprobes(maximumNprobes: number): VectorQuery {
|
||||
super.doCall((inner) => inner.maximumNprobes(maximumNprobes));
|
||||
return this;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the distance range to use
|
||||
*
|
||||
@@ -762,6 +793,31 @@ export enum FullTextQueryType {
|
||||
MatchPhrase = "match_phrase",
|
||||
Boost = "boost",
|
||||
MultiMatch = "multi_match",
|
||||
Boolean = "boolean",
|
||||
}
|
||||
|
||||
/**
|
||||
* Enum representing the logical operators used in full-text queries.
|
||||
*
|
||||
* - `And`: All terms must match.
|
||||
* - `Or`: At least one term must match.
|
||||
*/
|
||||
export enum Operator {
|
||||
And = "AND",
|
||||
Or = "OR",
|
||||
}
|
||||
|
||||
/**
|
||||
* Enum representing the occurrence of terms in full-text queries.
|
||||
*
|
||||
* - `Must`: The term must be present in the document.
|
||||
* - `Should`: The term should contribute to the document score, but is not required.
|
||||
* - `MustNot`: The term must not be present in the document.
|
||||
*/
|
||||
export enum Occur {
|
||||
Should = "SHOULD",
|
||||
Must = "MUST",
|
||||
MustNot = "MUST_NOT",
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -791,6 +847,7 @@ export function instanceOfFullTextQuery(obj: any): obj is FullTextQuery {
|
||||
export class MatchQuery implements FullTextQuery {
|
||||
/** @ignore */
|
||||
public readonly inner: JsFullTextQuery;
|
||||
|
||||
/**
|
||||
* Creates an instance of MatchQuery.
|
||||
*
|
||||
@@ -800,6 +857,8 @@ export class MatchQuery implements FullTextQuery {
|
||||
* - `boost`: The boost factor for the query (default is 1.0).
|
||||
* - `fuzziness`: The fuzziness level for the query (default is 0).
|
||||
* - `maxExpansions`: The maximum number of terms to consider for fuzzy matching (default is 50).
|
||||
* - `operator`: The logical operator to use for combining terms in the query (default is "OR").
|
||||
* - `prefixLength`: The number of beginning characters being unchanged for fuzzy matching.
|
||||
*/
|
||||
constructor(
|
||||
query: string,
|
||||
@@ -808,6 +867,8 @@ export class MatchQuery implements FullTextQuery {
|
||||
boost?: number;
|
||||
fuzziness?: number;
|
||||
maxExpansions?: number;
|
||||
operator?: Operator;
|
||||
prefixLength?: number;
|
||||
},
|
||||
) {
|
||||
let fuzziness = options?.fuzziness;
|
||||
@@ -820,6 +881,8 @@ export class MatchQuery implements FullTextQuery {
|
||||
options?.boost ?? 1.0,
|
||||
fuzziness,
|
||||
options?.maxExpansions ?? 50,
|
||||
options?.operator ?? Operator.Or,
|
||||
options?.prefixLength ?? 0,
|
||||
);
|
||||
}
|
||||
|
||||
@@ -836,9 +899,11 @@ export class PhraseQuery implements FullTextQuery {
|
||||
*
|
||||
* @param query - The phrase to search for in the specified column.
|
||||
* @param column - The name of the column to search within.
|
||||
* @param options - Optional parameters for the phrase query.
|
||||
* - `slop`: The maximum number of intervening unmatched positions allowed between words in the phrase (default is 0).
|
||||
*/
|
||||
constructor(query: string, column: string) {
|
||||
this.inner = JsFullTextQuery.phraseQuery(query, column);
|
||||
constructor(query: string, column: string, options?: { slop?: number }) {
|
||||
this.inner = JsFullTextQuery.phraseQuery(query, column, options?.slop ?? 0);
|
||||
}
|
||||
|
||||
queryType(): FullTextQueryType {
|
||||
@@ -889,18 +954,21 @@ export class MultiMatchQuery implements FullTextQuery {
|
||||
* @param columns - An array of column names to search within.
|
||||
* @param options - Optional parameters for the multi-match query.
|
||||
* - `boosts`: An array of boost factors for each column (default is 1.0 for all).
|
||||
* - `operator`: The logical operator to use for combining terms in the query (default is "OR").
|
||||
*/
|
||||
constructor(
|
||||
query: string,
|
||||
columns: string[],
|
||||
options?: {
|
||||
boosts?: number[];
|
||||
operator?: Operator;
|
||||
},
|
||||
) {
|
||||
this.inner = JsFullTextQuery.multiMatchQuery(
|
||||
query,
|
||||
columns,
|
||||
options?.boosts,
|
||||
options?.operator ?? Operator.Or,
|
||||
);
|
||||
}
|
||||
|
||||
@@ -908,3 +976,23 @@ export class MultiMatchQuery implements FullTextQuery {
|
||||
return FullTextQueryType.MultiMatch;
|
||||
}
|
||||
}
|
||||
|
||||
export class BooleanQuery implements FullTextQuery {
|
||||
/** @ignore */
|
||||
public readonly inner: JsFullTextQuery;
|
||||
/**
|
||||
* Creates an instance of BooleanQuery.
|
||||
*
|
||||
* @param queries - An array of (Occur, FullTextQuery objects) to combine.
|
||||
* Occur specifies whether the query must match, or should match.
|
||||
*/
|
||||
constructor(queries: [Occur, FullTextQuery][]) {
|
||||
this.inner = JsFullTextQuery.booleanQuery(
|
||||
queries.map(([occur, query]) => [occur, query.inner]),
|
||||
);
|
||||
}
|
||||
|
||||
queryType(): FullTextQueryType {
|
||||
return FullTextQueryType.Boolean;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,9 +6,11 @@ import {
|
||||
Data,
|
||||
DataType,
|
||||
IntoVector,
|
||||
MultiVector,
|
||||
Schema,
|
||||
dataTypeToJson,
|
||||
fromDataToBuffer,
|
||||
isMultiVector,
|
||||
tableFromIPC,
|
||||
} from "./arrow";
|
||||
|
||||
@@ -16,12 +18,18 @@ import { EmbeddingFunctionConfig, getRegistry } from "./embedding/registry";
|
||||
import { IndexOptions } from "./indices";
|
||||
import { MergeInsertBuilder } from "./merge";
|
||||
import {
|
||||
AddColumnsResult,
|
||||
AddColumnsSql,
|
||||
AddResult,
|
||||
AlterColumnsResult,
|
||||
DeleteResult,
|
||||
DropColumnsResult,
|
||||
IndexConfig,
|
||||
IndexStatistics,
|
||||
OptimizeStats,
|
||||
TableStatistics,
|
||||
Tags,
|
||||
UpdateResult,
|
||||
Table as _NativeTable,
|
||||
} from "./native";
|
||||
import {
|
||||
@@ -69,10 +77,10 @@ export interface OptimizeOptions {
|
||||
* // Delete all versions older than 1 day
|
||||
* const olderThan = new Date();
|
||||
* olderThan.setDate(olderThan.getDate() - 1));
|
||||
* tbl.cleanupOlderVersions(olderThan);
|
||||
* tbl.optimize({cleanupOlderThan: olderThan});
|
||||
*
|
||||
* // Delete all versions except the current version
|
||||
* tbl.cleanupOlderVersions(new Date());
|
||||
* tbl.optimize({cleanupOlderThan: new Date()});
|
||||
*/
|
||||
cleanupOlderThan: Date;
|
||||
deleteUnverified: boolean;
|
||||
@@ -126,12 +134,19 @@ export abstract class Table {
|
||||
/**
|
||||
* Insert records into this Table.
|
||||
* @param {Data} data Records to be inserted into the Table
|
||||
* @returns {Promise<AddResult>} A promise that resolves to an object
|
||||
* containing the new version number of the table
|
||||
*/
|
||||
abstract add(data: Data, options?: Partial<AddDataOptions>): Promise<void>;
|
||||
abstract add(
|
||||
data: Data,
|
||||
options?: Partial<AddDataOptions>,
|
||||
): Promise<AddResult>;
|
||||
/**
|
||||
* Update existing records in the Table
|
||||
* @param opts.values The values to update. The keys are the column names and the values
|
||||
* are the values to set.
|
||||
* @returns {Promise<UpdateResult>} A promise that resolves to an object containing
|
||||
* the number of rows updated and the new version number
|
||||
* @example
|
||||
* ```ts
|
||||
* table.update({where:"x = 2", values:{"vector": [10, 10]}})
|
||||
@@ -141,11 +156,13 @@ export abstract class Table {
|
||||
opts: {
|
||||
values: Map<string, IntoSql> | Record<string, IntoSql>;
|
||||
} & Partial<UpdateOptions>,
|
||||
): Promise<void>;
|
||||
): Promise<UpdateResult>;
|
||||
/**
|
||||
* Update existing records in the Table
|
||||
* @param opts.valuesSql The values to update. The keys are the column names and the values
|
||||
* are the values to set. The values are SQL expressions.
|
||||
* @returns {Promise<UpdateResult>} A promise that resolves to an object containing
|
||||
* the number of rows updated and the new version number
|
||||
* @example
|
||||
* ```ts
|
||||
* table.update({where:"x = 2", valuesSql:{"x": "x + 1"}})
|
||||
@@ -155,7 +172,7 @@ export abstract class Table {
|
||||
opts: {
|
||||
valuesSql: Map<string, string> | Record<string, string>;
|
||||
} & Partial<UpdateOptions>,
|
||||
): Promise<void>;
|
||||
): Promise<UpdateResult>;
|
||||
/**
|
||||
* Update existing records in the Table
|
||||
*
|
||||
@@ -173,6 +190,8 @@ export abstract class Table {
|
||||
* repeatedly calilng this method.
|
||||
* @param {Map<string, string> | Record<string, string>} updates - the
|
||||
* columns to update
|
||||
* @returns {Promise<UpdateResult>} A promise that resolves to an object
|
||||
* containing the number of rows updated and the new version number
|
||||
*
|
||||
* Keys in the map should specify the name of the column to update.
|
||||
* Values in the map provide the new value of the column. These can
|
||||
@@ -184,12 +203,16 @@ export abstract class Table {
|
||||
abstract update(
|
||||
updates: Map<string, string> | Record<string, string>,
|
||||
options?: Partial<UpdateOptions>,
|
||||
): Promise<void>;
|
||||
): Promise<UpdateResult>;
|
||||
|
||||
/** Count the total number of rows in the dataset. */
|
||||
abstract countRows(filter?: string): Promise<number>;
|
||||
/** Delete the rows that satisfy the predicate. */
|
||||
abstract delete(predicate: string): Promise<void>;
|
||||
/**
|
||||
* Delete the rows that satisfy the predicate.
|
||||
* @returns {Promise<DeleteResult>} A promise that resolves to an object
|
||||
* containing the new version number of the table
|
||||
*/
|
||||
abstract delete(predicate: string): Promise<DeleteResult>;
|
||||
/**
|
||||
* Create an index to speed up queries.
|
||||
*
|
||||
@@ -325,7 +348,7 @@ export abstract class Table {
|
||||
* if the query is a string and no embedding function is defined, it will be treated as a full text search query
|
||||
*/
|
||||
abstract search(
|
||||
query: string | IntoVector | FullTextQuery,
|
||||
query: string | IntoVector | MultiVector | FullTextQuery,
|
||||
queryType?: string,
|
||||
ftsColumns?: string | string[],
|
||||
): VectorQuery | Query;
|
||||
@@ -336,22 +359,30 @@ export abstract class Table {
|
||||
* is the same thing as calling `nearestTo` on the builder returned
|
||||
* by `query`. @see {@link Query#nearestTo} for more details.
|
||||
*/
|
||||
abstract vectorSearch(vector: IntoVector): VectorQuery;
|
||||
abstract vectorSearch(vector: IntoVector | MultiVector): VectorQuery;
|
||||
/**
|
||||
* Add new columns with defined values.
|
||||
* @param {AddColumnsSql[]} newColumnTransforms pairs of column names and
|
||||
* the SQL expression to use to calculate the value of the new column. These
|
||||
* expressions will be evaluated for each row in the table, and can
|
||||
* reference existing columns in the table.
|
||||
* @returns {Promise<AddColumnsResult>} A promise that resolves to an object
|
||||
* containing the new version number of the table after adding the columns.
|
||||
*/
|
||||
abstract addColumns(newColumnTransforms: AddColumnsSql[]): Promise<void>;
|
||||
abstract addColumns(
|
||||
newColumnTransforms: AddColumnsSql[],
|
||||
): Promise<AddColumnsResult>;
|
||||
|
||||
/**
|
||||
* Alter the name or nullability of columns.
|
||||
* @param {ColumnAlteration[]} columnAlterations One or more alterations to
|
||||
* apply to columns.
|
||||
* @returns {Promise<AlterColumnsResult>} A promise that resolves to an object
|
||||
* containing the new version number of the table after altering the columns.
|
||||
*/
|
||||
abstract alterColumns(columnAlterations: ColumnAlteration[]): Promise<void>;
|
||||
abstract alterColumns(
|
||||
columnAlterations: ColumnAlteration[],
|
||||
): Promise<AlterColumnsResult>;
|
||||
/**
|
||||
* Drop one or more columns from the dataset
|
||||
*
|
||||
@@ -362,8 +393,10 @@ export abstract class Table {
|
||||
* @param {string[]} columnNames The names of the columns to drop. These can
|
||||
* be nested column references (e.g. "a.b.c") or top-level column names
|
||||
* (e.g. "a").
|
||||
* @returns {Promise<DropColumnsResult>} A promise that resolves to an object
|
||||
* containing the new version number of the table after dropping the columns.
|
||||
*/
|
||||
abstract dropColumns(columnNames: string[]): Promise<void>;
|
||||
abstract dropColumns(columnNames: string[]): Promise<DropColumnsResult>;
|
||||
/** Retrieve the version of the table */
|
||||
|
||||
abstract version(): Promise<number>;
|
||||
@@ -529,12 +562,12 @@ export class LocalTable extends Table {
|
||||
return tbl.schema;
|
||||
}
|
||||
|
||||
async add(data: Data, options?: Partial<AddDataOptions>): Promise<void> {
|
||||
async add(data: Data, options?: Partial<AddDataOptions>): Promise<AddResult> {
|
||||
const mode = options?.mode ?? "append";
|
||||
const schema = await this.schema();
|
||||
|
||||
const buffer = await fromDataToBuffer(data, undefined, schema);
|
||||
await this.inner.add(buffer, mode);
|
||||
return await this.inner.add(buffer, mode);
|
||||
}
|
||||
|
||||
async update(
|
||||
@@ -547,7 +580,7 @@ export class LocalTable extends Table {
|
||||
valuesSql: Map<string, string> | Record<string, string>;
|
||||
} & Partial<UpdateOptions>),
|
||||
options?: Partial<UpdateOptions>,
|
||||
) {
|
||||
): Promise<UpdateResult> {
|
||||
const isValues =
|
||||
"values" in optsOrUpdates && typeof optsOrUpdates.values !== "string";
|
||||
const isValuesSql =
|
||||
@@ -594,15 +627,15 @@ export class LocalTable extends Table {
|
||||
columns = Object.entries(optsOrUpdates as Record<string, string>);
|
||||
predicate = options?.where;
|
||||
}
|
||||
await this.inner.update(predicate, columns);
|
||||
return await this.inner.update(predicate, columns);
|
||||
}
|
||||
|
||||
async countRows(filter?: string): Promise<number> {
|
||||
return await this.inner.countRows(filter);
|
||||
}
|
||||
|
||||
async delete(predicate: string): Promise<void> {
|
||||
await this.inner.delete(predicate);
|
||||
async delete(predicate: string): Promise<DeleteResult> {
|
||||
return await this.inner.delete(predicate);
|
||||
}
|
||||
|
||||
async createIndex(column: string, options?: Partial<IndexOptions>) {
|
||||
@@ -637,7 +670,7 @@ export class LocalTable extends Table {
|
||||
}
|
||||
|
||||
search(
|
||||
query: string | IntoVector | FullTextQuery,
|
||||
query: string | IntoVector | MultiVector | FullTextQuery,
|
||||
queryType: string = "auto",
|
||||
ftsColumns?: string | string[],
|
||||
): VectorQuery | Query {
|
||||
@@ -684,17 +717,29 @@ export class LocalTable extends Table {
|
||||
return this.query().nearestTo(queryPromise);
|
||||
}
|
||||
|
||||
vectorSearch(vector: IntoVector): VectorQuery {
|
||||
vectorSearch(vector: IntoVector | MultiVector): VectorQuery {
|
||||
if (isMultiVector(vector)) {
|
||||
const query = this.query().nearestTo(vector[0]);
|
||||
for (const v of vector.slice(1)) {
|
||||
query.addQueryVector(v);
|
||||
}
|
||||
return query;
|
||||
}
|
||||
|
||||
return this.query().nearestTo(vector);
|
||||
}
|
||||
|
||||
// TODO: Support BatchUDF
|
||||
|
||||
async addColumns(newColumnTransforms: AddColumnsSql[]): Promise<void> {
|
||||
await this.inner.addColumns(newColumnTransforms);
|
||||
async addColumns(
|
||||
newColumnTransforms: AddColumnsSql[],
|
||||
): Promise<AddColumnsResult> {
|
||||
return await this.inner.addColumns(newColumnTransforms);
|
||||
}
|
||||
|
||||
async alterColumns(columnAlterations: ColumnAlteration[]): Promise<void> {
|
||||
async alterColumns(
|
||||
columnAlterations: ColumnAlteration[],
|
||||
): Promise<AlterColumnsResult> {
|
||||
const processedAlterations = columnAlterations.map((alteration) => {
|
||||
if (typeof alteration.dataType === "string") {
|
||||
return {
|
||||
@@ -715,11 +760,11 @@ export class LocalTable extends Table {
|
||||
}
|
||||
});
|
||||
|
||||
await this.inner.alterColumns(processedAlterations);
|
||||
return await this.inner.alterColumns(processedAlterations);
|
||||
}
|
||||
|
||||
async dropColumns(columnNames: string[]): Promise<void> {
|
||||
await this.inner.dropColumns(columnNames);
|
||||
async dropColumns(columnNames: string[]): Promise<DropColumnsResult> {
|
||||
return await this.inner.dropColumns(columnNames);
|
||||
}
|
||||
|
||||
async version(): Promise<number> {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-darwin-arm64",
|
||||
"version": "0.19.1-beta.0",
|
||||
"version": "0.21.2-beta.1",
|
||||
"os": ["darwin"],
|
||||
"cpu": ["arm64"],
|
||||
"main": "lancedb.darwin-arm64.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-darwin-x64",
|
||||
"version": "0.19.1-beta.0",
|
||||
"version": "0.21.2-beta.1",
|
||||
"os": ["darwin"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.darwin-x64.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-arm64-gnu",
|
||||
"version": "0.19.1-beta.0",
|
||||
"version": "0.21.2-beta.1",
|
||||
"os": ["linux"],
|
||||
"cpu": ["arm64"],
|
||||
"main": "lancedb.linux-arm64-gnu.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-arm64-musl",
|
||||
"version": "0.19.1-beta.0",
|
||||
"version": "0.21.2-beta.1",
|
||||
"os": ["linux"],
|
||||
"cpu": ["arm64"],
|
||||
"main": "lancedb.linux-arm64-musl.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-x64-gnu",
|
||||
"version": "0.19.1-beta.0",
|
||||
"version": "0.21.2-beta.1",
|
||||
"os": ["linux"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.linux-x64-gnu.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-x64-musl",
|
||||
"version": "0.19.1-beta.0",
|
||||
"version": "0.21.2-beta.1",
|
||||
"os": ["linux"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.linux-x64-musl.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-win32-arm64-msvc",
|
||||
"version": "0.19.1-beta.0",
|
||||
"version": "0.21.2-beta.1",
|
||||
"os": [
|
||||
"win32"
|
||||
],
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-win32-x64-msvc",
|
||||
"version": "0.19.1-beta.0",
|
||||
"version": "0.21.2-beta.1",
|
||||
"os": ["win32"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.win32-x64-msvc.node",
|
||||
|
||||
4
nodejs/package-lock.json
generated
4
nodejs/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb",
|
||||
"version": "0.19.1-beta.0",
|
||||
"version": "0.21.2-beta.1",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@lancedb/lancedb",
|
||||
"version": "0.19.1-beta.0",
|
||||
"version": "0.21.2-beta.1",
|
||||
"cpu": [
|
||||
"x64",
|
||||
"arm64"
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
"ann"
|
||||
],
|
||||
"private": false,
|
||||
"version": "0.19.1-beta.0",
|
||||
"version": "0.21.2-beta.1",
|
||||
"main": "dist/index.js",
|
||||
"exports": {
|
||||
".": "./dist/index.js",
|
||||
|
||||
@@ -74,6 +74,10 @@ impl Connection {
|
||||
builder = builder.host_override(&host_override);
|
||||
}
|
||||
|
||||
if let Some(session) = options.session {
|
||||
builder = builder.session(session.inner.clone());
|
||||
}
|
||||
|
||||
Ok(Self::inner_new(builder.execute().await.default_error()?))
|
||||
}
|
||||
|
||||
|
||||
@@ -123,34 +123,44 @@ impl Index {
|
||||
stem: Option<bool>,
|
||||
remove_stop_words: Option<bool>,
|
||||
ascii_folding: Option<bool>,
|
||||
ngram_min_length: Option<u32>,
|
||||
ngram_max_length: Option<u32>,
|
||||
prefix_only: Option<bool>,
|
||||
) -> Self {
|
||||
let mut opts = FtsIndexBuilder::default();
|
||||
let mut tokenizer_configs = opts.tokenizer_configs.clone();
|
||||
if let Some(with_position) = with_position {
|
||||
opts = opts.with_position(with_position);
|
||||
}
|
||||
if let Some(base_tokenizer) = base_tokenizer {
|
||||
tokenizer_configs = tokenizer_configs.base_tokenizer(base_tokenizer);
|
||||
opts = opts.base_tokenizer(base_tokenizer);
|
||||
}
|
||||
if let Some(language) = language {
|
||||
tokenizer_configs = tokenizer_configs.language(&language).unwrap();
|
||||
opts = opts.language(&language).unwrap();
|
||||
}
|
||||
if let Some(max_token_length) = max_token_length {
|
||||
tokenizer_configs = tokenizer_configs.max_token_length(Some(max_token_length as usize));
|
||||
opts = opts.max_token_length(Some(max_token_length as usize));
|
||||
}
|
||||
if let Some(lower_case) = lower_case {
|
||||
tokenizer_configs = tokenizer_configs.lower_case(lower_case);
|
||||
opts = opts.lower_case(lower_case);
|
||||
}
|
||||
if let Some(stem) = stem {
|
||||
tokenizer_configs = tokenizer_configs.stem(stem);
|
||||
opts = opts.stem(stem);
|
||||
}
|
||||
if let Some(remove_stop_words) = remove_stop_words {
|
||||
tokenizer_configs = tokenizer_configs.remove_stop_words(remove_stop_words);
|
||||
opts = opts.remove_stop_words(remove_stop_words);
|
||||
}
|
||||
if let Some(ascii_folding) = ascii_folding {
|
||||
tokenizer_configs = tokenizer_configs.ascii_folding(ascii_folding);
|
||||
opts = opts.ascii_folding(ascii_folding);
|
||||
}
|
||||
if let Some(ngram_min_length) = ngram_min_length {
|
||||
opts = opts.ngram_min_length(ngram_min_length);
|
||||
}
|
||||
if let Some(ngram_max_length) = ngram_max_length {
|
||||
opts = opts.ngram_max_length(ngram_max_length);
|
||||
}
|
||||
if let Some(prefix_only) = prefix_only {
|
||||
opts = opts.ngram_prefix_only(prefix_only);
|
||||
}
|
||||
opts.tokenizer_configs = tokenizer_configs;
|
||||
|
||||
Self {
|
||||
inner: Mutex::new(Some(LanceDbIndex::FTS(opts))),
|
||||
|
||||
@@ -14,6 +14,7 @@ pub mod merge;
|
||||
mod query;
|
||||
pub mod remote;
|
||||
mod rerankers;
|
||||
mod session;
|
||||
mod table;
|
||||
mod util;
|
||||
|
||||
@@ -34,6 +35,9 @@ pub struct ConnectionOptions {
|
||||
///
|
||||
/// The available options are described at https://lancedb.github.io/lancedb/guides/storage/
|
||||
pub storage_options: Option<HashMap<String, String>>,
|
||||
/// (For LanceDB OSS only): the session to use for this connection. Holds
|
||||
/// shared caches and other session-specific state.
|
||||
pub session: Option<session::Session>,
|
||||
|
||||
/// (For LanceDB cloud only): configuration for the remote HTTP client.
|
||||
pub client_config: Option<remote::ClientConfig>,
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
use std::time::Duration;
|
||||
|
||||
use lancedb::{arrow::IntoArrow, ipc::ipc_file_to_batches, table::merge::MergeInsertBuilder};
|
||||
use napi::bindgen_prelude::*;
|
||||
use napi_derive::napi;
|
||||
|
||||
use crate::error::convert_error;
|
||||
use crate::{error::convert_error, table::MergeResult};
|
||||
|
||||
#[napi]
|
||||
#[derive(Clone)]
|
||||
@@ -36,8 +38,13 @@ impl NativeMergeInsertBuilder {
|
||||
this
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn set_timeout(&mut self, timeout: u32) {
|
||||
self.inner.timeout(Duration::from_millis(timeout as u64));
|
||||
}
|
||||
|
||||
#[napi(catch_unwind)]
|
||||
pub async fn execute(&self, buf: Buffer) -> napi::Result<()> {
|
||||
pub async fn execute(&self, buf: Buffer) -> napi::Result<MergeResult> {
|
||||
let data = ipc_file_to_batches(buf.to_vec())
|
||||
.and_then(IntoArrow::into_arrow)
|
||||
.map_err(|e| {
|
||||
@@ -46,12 +53,13 @@ impl NativeMergeInsertBuilder {
|
||||
|
||||
let this = self.clone();
|
||||
|
||||
this.inner.execute(data).await.map_err(|e| {
|
||||
let res = this.inner.execute(data).await.map_err(|e| {
|
||||
napi::Error::from_reason(format!(
|
||||
"Failed to execute merge insert: {}",
|
||||
convert_error(&e)
|
||||
))
|
||||
})
|
||||
})?;
|
||||
Ok(res.into())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -4,7 +4,8 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use lancedb::index::scalar::{
|
||||
BoostQuery, FtsQuery, FullTextSearchQuery, MatchQuery, MultiMatchQuery, PhraseQuery,
|
||||
BooleanQuery, BoostQuery, FtsQuery, FullTextSearchQuery, MatchQuery, MultiMatchQuery, Occur,
|
||||
Operator, PhraseQuery,
|
||||
};
|
||||
use lancedb::query::ExecutableQuery;
|
||||
use lancedb::query::Query as LanceDbQuery;
|
||||
@@ -177,6 +178,31 @@ impl VectorQuery {
|
||||
self.inner = self.inner.clone().nprobes(nprobe as usize);
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn minimum_nprobes(&mut self, minimum_nprobe: u32) -> napi::Result<()> {
|
||||
self.inner = self
|
||||
.inner
|
||||
.clone()
|
||||
.minimum_nprobes(minimum_nprobe as usize)
|
||||
.default_error()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn maximum_nprobes(&mut self, maximum_nprobes: u32) -> napi::Result<()> {
|
||||
let maximum_nprobes = if maximum_nprobes == 0 {
|
||||
None
|
||||
} else {
|
||||
Some(maximum_nprobes as usize)
|
||||
};
|
||||
self.inner = self
|
||||
.inner
|
||||
.clone()
|
||||
.maximum_nprobes(maximum_nprobes)
|
||||
.default_error()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn distance_range(&mut self, lower_bound: Option<f64>, upper_bound: Option<f64>) {
|
||||
// napi doesn't support f32, so we have to convert to f32
|
||||
@@ -308,6 +334,8 @@ impl JsFullTextQuery {
|
||||
boost: f64,
|
||||
fuzziness: Option<u32>,
|
||||
max_expansions: u32,
|
||||
operator: String,
|
||||
prefix_length: u32,
|
||||
) -> napi::Result<Self> {
|
||||
Ok(Self {
|
||||
inner: MatchQuery::new(query)
|
||||
@@ -315,14 +343,23 @@ impl JsFullTextQuery {
|
||||
.with_boost(boost as f32)
|
||||
.with_fuzziness(fuzziness)
|
||||
.with_max_expansions(max_expansions as usize)
|
||||
.with_operator(
|
||||
Operator::try_from(operator.as_str()).map_err(|e| {
|
||||
napi::Error::from_reason(format!("Invalid operator: {}", e))
|
||||
})?,
|
||||
)
|
||||
.with_prefix_length(prefix_length)
|
||||
.into(),
|
||||
})
|
||||
}
|
||||
|
||||
#[napi(factory)]
|
||||
pub fn phrase_query(query: String, column: String) -> napi::Result<Self> {
|
||||
pub fn phrase_query(query: String, column: String, slop: u32) -> napi::Result<Self> {
|
||||
Ok(Self {
|
||||
inner: PhraseQuery::new(query).with_column(Some(column)).into(),
|
||||
inner: PhraseQuery::new(query)
|
||||
.with_column(Some(column))
|
||||
.with_slop(slop)
|
||||
.into(),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -348,6 +385,7 @@ impl JsFullTextQuery {
|
||||
query: String,
|
||||
columns: Vec<String>,
|
||||
boosts: Option<Vec<f64>>,
|
||||
operator: String,
|
||||
) -> napi::Result<Self> {
|
||||
let q = match boosts {
|
||||
Some(boosts) => MultiMatchQuery::try_new(query, columns)
|
||||
@@ -358,7 +396,37 @@ impl JsFullTextQuery {
|
||||
napi::Error::from_reason(format!("Failed to create multi match query: {}", e))
|
||||
})?;
|
||||
|
||||
Ok(Self { inner: q.into() })
|
||||
let operator = Operator::try_from(operator.as_str()).map_err(|e| {
|
||||
napi::Error::from_reason(format!("Invalid operator for multi match query: {}", e))
|
||||
})?;
|
||||
|
||||
Ok(Self {
|
||||
inner: q.with_operator(operator).into(),
|
||||
})
|
||||
}
|
||||
|
||||
#[napi(factory)]
|
||||
pub fn boolean_query(queries: Vec<(String, &JsFullTextQuery)>) -> napi::Result<Self> {
|
||||
let mut sub_queries = Vec::with_capacity(queries.len());
|
||||
for (occur, q) in queries {
|
||||
let occur = Occur::try_from(occur.as_str())
|
||||
.map_err(|e| napi::Error::from_reason(e.to_string()))?;
|
||||
sub_queries.push((occur, q.inner.clone()));
|
||||
}
|
||||
Ok(Self {
|
||||
inner: BooleanQuery::new(sub_queries).into(),
|
||||
})
|
||||
}
|
||||
|
||||
#[napi(getter)]
|
||||
pub fn query_type(&self) -> String {
|
||||
match self.inner {
|
||||
FtsQuery::Match(_) => "match".to_string(),
|
||||
FtsQuery::Phrase(_) => "phrase".to_string(),
|
||||
FtsQuery::Boost(_) => "boost".to_string(),
|
||||
FtsQuery::MultiMatch(_) => "multi_match".to_string(),
|
||||
FtsQuery::Boolean(_) => "boolean".to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
102
nodejs/src/session.rs
Normal file
102
nodejs/src/session.rs
Normal file
@@ -0,0 +1,102 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use lancedb::{ObjectStoreRegistry, Session as LanceSession};
|
||||
use napi::bindgen_prelude::*;
|
||||
use napi_derive::*;
|
||||
|
||||
/// A session for managing caches and object stores across LanceDB operations.
|
||||
///
|
||||
/// Sessions allow you to configure cache sizes for index and metadata caches,
|
||||
/// which can significantly impact memory use and performance. They can
|
||||
/// also be re-used across multiple connections to share the same cache state.
|
||||
#[napi]
|
||||
#[derive(Clone)]
|
||||
pub struct Session {
|
||||
pub(crate) inner: Arc<LanceSession>,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for Session {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("Session")
|
||||
.field("size_bytes", &self.inner.size_bytes())
|
||||
.field("approx_num_items", &self.inner.approx_num_items())
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
#[napi]
|
||||
impl Session {
|
||||
/// Create a new session with custom cache sizes.
|
||||
///
|
||||
/// # Parameters
|
||||
///
|
||||
/// - `index_cache_size_bytes`: The size of the index cache in bytes.
|
||||
/// Index data is stored in memory in this cache to speed up queries.
|
||||
/// Defaults to 6GB if not specified.
|
||||
/// - `metadata_cache_size_bytes`: The size of the metadata cache in bytes.
|
||||
/// The metadata cache stores file metadata and schema information in memory.
|
||||
/// This cache improves scan and write performance.
|
||||
/// Defaults to 1GB if not specified.
|
||||
#[napi(constructor)]
|
||||
pub fn new(
|
||||
index_cache_size_bytes: Option<BigInt>,
|
||||
metadata_cache_size_bytes: Option<BigInt>,
|
||||
) -> napi::Result<Self> {
|
||||
let index_cache_size = index_cache_size_bytes
|
||||
.map(|size| size.get_u64().1 as usize)
|
||||
.unwrap_or(6 * 1024 * 1024 * 1024); // 6GB default
|
||||
|
||||
let metadata_cache_size = metadata_cache_size_bytes
|
||||
.map(|size| size.get_u64().1 as usize)
|
||||
.unwrap_or(1024 * 1024 * 1024); // 1GB default
|
||||
|
||||
let session = LanceSession::new(
|
||||
index_cache_size,
|
||||
metadata_cache_size,
|
||||
Arc::new(ObjectStoreRegistry::default()),
|
||||
);
|
||||
|
||||
Ok(Self {
|
||||
inner: Arc::new(session),
|
||||
})
|
||||
}
|
||||
|
||||
/// Create a session with default cache sizes.
|
||||
///
|
||||
/// This is equivalent to creating a session with 6GB index cache
|
||||
/// and 1GB metadata cache.
|
||||
#[napi(factory)]
|
||||
pub fn default() -> Self {
|
||||
Self {
|
||||
inner: Arc::new(LanceSession::default()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the current size of the session caches in bytes.
|
||||
#[napi]
|
||||
pub fn size_bytes(&self) -> BigInt {
|
||||
BigInt::from(self.inner.size_bytes())
|
||||
}
|
||||
|
||||
/// Get the approximate number of items cached in the session.
|
||||
#[napi]
|
||||
pub fn approx_num_items(&self) -> u32 {
|
||||
self.inner.approx_num_items() as u32
|
||||
}
|
||||
}
|
||||
|
||||
// Implement FromNapiValue for Session to work with napi(object)
|
||||
impl napi::bindgen_prelude::FromNapiValue for Session {
|
||||
unsafe fn from_napi_value(
|
||||
env: napi::sys::napi_env,
|
||||
napi_val: napi::sys::napi_value,
|
||||
) -> napi::Result<Self> {
|
||||
let object: napi::bindgen_prelude::ClassInstance<Session> =
|
||||
napi::bindgen_prelude::ClassInstance::from_napi_value(env, napi_val)?;
|
||||
let copy = object.clone();
|
||||
Ok(copy)
|
||||
}
|
||||
}
|
||||
@@ -75,7 +75,7 @@ impl Table {
|
||||
}
|
||||
|
||||
#[napi(catch_unwind)]
|
||||
pub async fn add(&self, buf: Buffer, mode: String) -> napi::Result<()> {
|
||||
pub async fn add(&self, buf: Buffer, mode: String) -> napi::Result<AddResult> {
|
||||
let batches = ipc_file_to_batches(buf.to_vec())
|
||||
.map_err(|e| napi::Error::from_reason(format!("Failed to read IPC file: {}", e)))?;
|
||||
let mut op = self.inner_ref()?.add(batches);
|
||||
@@ -88,7 +88,8 @@ impl Table {
|
||||
return Err(napi::Error::from_reason(format!("Invalid mode: {}", mode)));
|
||||
};
|
||||
|
||||
op.execute().await.default_error()
|
||||
let res = op.execute().await.default_error()?;
|
||||
Ok(res.into())
|
||||
}
|
||||
|
||||
#[napi(catch_unwind)]
|
||||
@@ -101,8 +102,9 @@ impl Table {
|
||||
}
|
||||
|
||||
#[napi(catch_unwind)]
|
||||
pub async fn delete(&self, predicate: String) -> napi::Result<()> {
|
||||
self.inner_ref()?.delete(&predicate).await.default_error()
|
||||
pub async fn delete(&self, predicate: String) -> napi::Result<DeleteResult> {
|
||||
let res = self.inner_ref()?.delete(&predicate).await.default_error()?;
|
||||
Ok(res.into())
|
||||
}
|
||||
|
||||
#[napi(catch_unwind)]
|
||||
@@ -168,7 +170,7 @@ impl Table {
|
||||
&self,
|
||||
only_if: Option<String>,
|
||||
columns: Vec<(String, String)>,
|
||||
) -> napi::Result<u64> {
|
||||
) -> napi::Result<UpdateResult> {
|
||||
let mut op = self.inner_ref()?.update();
|
||||
if let Some(only_if) = only_if {
|
||||
op = op.only_if(only_if);
|
||||
@@ -176,7 +178,8 @@ impl Table {
|
||||
for (column_name, value) in columns {
|
||||
op = op.column(column_name, value);
|
||||
}
|
||||
op.execute().await.default_error()
|
||||
let res = op.execute().await.default_error()?;
|
||||
Ok(res.into())
|
||||
}
|
||||
|
||||
#[napi(catch_unwind)]
|
||||
@@ -190,21 +193,28 @@ impl Table {
|
||||
}
|
||||
|
||||
#[napi(catch_unwind)]
|
||||
pub async fn add_columns(&self, transforms: Vec<AddColumnsSql>) -> napi::Result<()> {
|
||||
pub async fn add_columns(
|
||||
&self,
|
||||
transforms: Vec<AddColumnsSql>,
|
||||
) -> napi::Result<AddColumnsResult> {
|
||||
let transforms = transforms
|
||||
.into_iter()
|
||||
.map(|sql| (sql.name, sql.value_sql))
|
||||
.collect::<Vec<_>>();
|
||||
let transforms = NewColumnTransform::SqlExpressions(transforms);
|
||||
self.inner_ref()?
|
||||
let res = self
|
||||
.inner_ref()?
|
||||
.add_columns(transforms, None)
|
||||
.await
|
||||
.default_error()?;
|
||||
Ok(())
|
||||
Ok(res.into())
|
||||
}
|
||||
|
||||
#[napi(catch_unwind)]
|
||||
pub async fn alter_columns(&self, alterations: Vec<ColumnAlteration>) -> napi::Result<()> {
|
||||
pub async fn alter_columns(
|
||||
&self,
|
||||
alterations: Vec<ColumnAlteration>,
|
||||
) -> napi::Result<AlterColumnsResult> {
|
||||
for alteration in &alterations {
|
||||
if alteration.rename.is_none()
|
||||
&& alteration.nullable.is_none()
|
||||
@@ -221,21 +231,23 @@ impl Table {
|
||||
.collect::<std::result::Result<Vec<_>, String>>()
|
||||
.map_err(napi::Error::from_reason)?;
|
||||
|
||||
self.inner_ref()?
|
||||
let res = self
|
||||
.inner_ref()?
|
||||
.alter_columns(&alterations)
|
||||
.await
|
||||
.default_error()?;
|
||||
Ok(())
|
||||
Ok(res.into())
|
||||
}
|
||||
|
||||
#[napi(catch_unwind)]
|
||||
pub async fn drop_columns(&self, columns: Vec<String>) -> napi::Result<()> {
|
||||
pub async fn drop_columns(&self, columns: Vec<String>) -> napi::Result<DropColumnsResult> {
|
||||
let col_refs = columns.iter().map(String::as_str).collect::<Vec<_>>();
|
||||
self.inner_ref()?
|
||||
let res = self
|
||||
.inner_ref()?
|
||||
.drop_columns(&col_refs)
|
||||
.await
|
||||
.default_error()?;
|
||||
Ok(())
|
||||
Ok(res.into())
|
||||
}
|
||||
|
||||
#[napi(catch_unwind)]
|
||||
@@ -642,6 +654,105 @@ pub struct Version {
|
||||
pub metadata: HashMap<String, String>,
|
||||
}
|
||||
|
||||
#[napi(object)]
|
||||
pub struct UpdateResult {
|
||||
pub rows_updated: i64,
|
||||
pub version: i64,
|
||||
}
|
||||
|
||||
impl From<lancedb::table::UpdateResult> for UpdateResult {
|
||||
fn from(value: lancedb::table::UpdateResult) -> Self {
|
||||
Self {
|
||||
rows_updated: value.rows_updated as i64,
|
||||
version: value.version as i64,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[napi(object)]
|
||||
pub struct AddResult {
|
||||
pub version: i64,
|
||||
}
|
||||
|
||||
impl From<lancedb::table::AddResult> for AddResult {
|
||||
fn from(value: lancedb::table::AddResult) -> Self {
|
||||
Self {
|
||||
version: value.version as i64,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[napi(object)]
|
||||
pub struct DeleteResult {
|
||||
pub version: i64,
|
||||
}
|
||||
|
||||
impl From<lancedb::table::DeleteResult> for DeleteResult {
|
||||
fn from(value: lancedb::table::DeleteResult) -> Self {
|
||||
Self {
|
||||
version: value.version as i64,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[napi(object)]
|
||||
pub struct MergeResult {
|
||||
pub version: i64,
|
||||
pub num_inserted_rows: i64,
|
||||
pub num_updated_rows: i64,
|
||||
pub num_deleted_rows: i64,
|
||||
}
|
||||
|
||||
impl From<lancedb::table::MergeResult> for MergeResult {
|
||||
fn from(value: lancedb::table::MergeResult) -> Self {
|
||||
Self {
|
||||
version: value.version as i64,
|
||||
num_inserted_rows: value.num_inserted_rows as i64,
|
||||
num_updated_rows: value.num_updated_rows as i64,
|
||||
num_deleted_rows: value.num_deleted_rows as i64,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[napi(object)]
|
||||
pub struct AddColumnsResult {
|
||||
pub version: i64,
|
||||
}
|
||||
|
||||
impl From<lancedb::table::AddColumnsResult> for AddColumnsResult {
|
||||
fn from(value: lancedb::table::AddColumnsResult) -> Self {
|
||||
Self {
|
||||
version: value.version as i64,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[napi(object)]
|
||||
pub struct AlterColumnsResult {
|
||||
pub version: i64,
|
||||
}
|
||||
|
||||
impl From<lancedb::table::AlterColumnsResult> for AlterColumnsResult {
|
||||
fn from(value: lancedb::table::AlterColumnsResult) -> Self {
|
||||
Self {
|
||||
version: value.version as i64,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[napi(object)]
|
||||
pub struct DropColumnsResult {
|
||||
pub version: i64,
|
||||
}
|
||||
|
||||
impl From<lancedb::table::DropColumnsResult> for DropColumnsResult {
|
||||
fn from(value: lancedb::table::DropColumnsResult) -> Self {
|
||||
Self {
|
||||
version: value.version as i64,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub struct TagContents {
|
||||
pub version: i64,
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
[tool.bumpversion]
|
||||
current_version = "0.22.1-beta.1"
|
||||
current_version = "0.24.2"
|
||||
parse = """(?x)
|
||||
(?P<major>0|[1-9]\\d*)\\.
|
||||
(?P<minor>0|[1-9]\\d*)\\.
|
||||
|
||||
19
python/CLAUDE.md
Normal file
19
python/CLAUDE.md
Normal file
@@ -0,0 +1,19 @@
|
||||
These are the Python bindings of LanceDB.
|
||||
The core Rust library is in the `../rust/lancedb` directory, the rust binding
|
||||
code is in the `src/` directory and the Python bindings are in the `lancedb/` directory.
|
||||
|
||||
Common commands:
|
||||
|
||||
* Build: `make develop`
|
||||
* Format: `make format`
|
||||
* Lint: `make check`
|
||||
* Fix lints: `make fix`
|
||||
* Test: `make test`
|
||||
* Doc test: `make doctest`
|
||||
|
||||
Before committing changes, run lints and then formatting.
|
||||
|
||||
When you change the Rust code, you will need to recompile the Python bindings: `make develop`.
|
||||
|
||||
When you export new types from Rust to Python, you must manually update `python/lancedb/_lancedb.pyi`
|
||||
with the corresponding type hints. You can run `pyright` to check for type errors in the Python code.
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "lancedb-python"
|
||||
version = "0.22.1-beta.1"
|
||||
version = "0.24.2"
|
||||
edition.workspace = true
|
||||
description = "Python bindings for LanceDB"
|
||||
license.workspace = true
|
||||
@@ -14,11 +14,11 @@ name = "_lancedb"
|
||||
crate-type = ["cdylib"]
|
||||
|
||||
[dependencies]
|
||||
arrow = { version = "54.1", features = ["pyarrow"] }
|
||||
arrow = { version = "55.1", features = ["pyarrow"] }
|
||||
lancedb = { path = "../rust/lancedb", default-features = false }
|
||||
env_logger.workspace = true
|
||||
pyo3 = { version = "0.23", features = ["extension-module", "abi3-py39"] }
|
||||
pyo3-async-runtimes = { version = "0.23", features = [
|
||||
pyo3 = { version = "0.24", features = ["extension-module", "abi3-py39"] }
|
||||
pyo3-async-runtimes = { version = "0.24", features = [
|
||||
"attributes",
|
||||
"tokio-runtime",
|
||||
] }
|
||||
@@ -27,7 +27,7 @@ futures.workspace = true
|
||||
tokio = { version = "1.40", features = ["sync"] }
|
||||
|
||||
[build-dependencies]
|
||||
pyo3-build-config = { version = "0.23", features = [
|
||||
pyo3-build-config = { version = "0.24", features = [
|
||||
"extension-module",
|
||||
"abi3-py39",
|
||||
] }
|
||||
|
||||
@@ -7,7 +7,7 @@ dependencies = [
|
||||
"numpy",
|
||||
"overrides>=0.7",
|
||||
"packaging",
|
||||
"pyarrow>=14",
|
||||
"pyarrow>=16",
|
||||
"pydantic>=1.10",
|
||||
"tqdm>=4.27.0",
|
||||
]
|
||||
@@ -60,6 +60,7 @@ tests = [
|
||||
"pyarrow-stubs",
|
||||
"pylance>=0.25",
|
||||
"requests",
|
||||
"datafusion",
|
||||
]
|
||||
dev = [
|
||||
"ruff",
|
||||
@@ -84,8 +85,8 @@ embeddings = [
|
||||
"boto3>=1.28.57",
|
||||
"awscli>=1.29.57",
|
||||
"botocore>=1.31.57",
|
||||
"ollama",
|
||||
"ibm-watsonx-ai>=1.1.2",
|
||||
'ibm-watsonx-ai>=1.1.2; python_version >= "3.10"',
|
||||
"ollama>=0.3.0",
|
||||
]
|
||||
azure = ["adlfs>=2024.2.0"]
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user