mirror of
https://github.com/lancedb/lancedb.git
synced 2025-12-23 05:19:58 +00:00
Compare commits
236 Commits
python-v0.
...
python-v0.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ce24457531 | ||
|
|
087fe6343d | ||
|
|
ab8cbe62dd | ||
|
|
f076bb41f4 | ||
|
|
902fb83d54 | ||
|
|
779118339f | ||
|
|
03b62599d7 | ||
|
|
4c999fb651 | ||
|
|
6d23d32ab5 | ||
|
|
704cec34e1 | ||
|
|
a300a238db | ||
|
|
a41ff1df0a | ||
|
|
77b005d849 | ||
|
|
167fccc427 | ||
|
|
2bffbcefa5 | ||
|
|
905552f993 | ||
|
|
e4898c9313 | ||
|
|
cab36d94b2 | ||
|
|
b64252d4fd | ||
|
|
6fc006072c | ||
|
|
d4bb59b542 | ||
|
|
6b2dd6de51 | ||
|
|
dbccd9e4f1 | ||
|
|
b12ebfed4c | ||
|
|
1dadb2aefa | ||
|
|
eb9784d7f2 | ||
|
|
ba755626cc | ||
|
|
7760799cb8 | ||
|
|
4beb2d2877 | ||
|
|
a00b8595d1 | ||
|
|
9c8314b4fd | ||
|
|
c625b6f2b2 | ||
|
|
bec8fe6547 | ||
|
|
dc1150c011 | ||
|
|
afaefc6264 | ||
|
|
cb70ff8cee | ||
|
|
cbb5a841b1 | ||
|
|
c72f6770fd | ||
|
|
e5a80a5e86 | ||
|
|
8d0a7fad1f | ||
|
|
b80d4d0134 | ||
|
|
9645fe52c2 | ||
|
|
b77314168d | ||
|
|
e08d45e090 | ||
|
|
2e3ddb8382 | ||
|
|
627ca4c810 | ||
|
|
f8dae4ffe9 | ||
|
|
9eb6119468 | ||
|
|
59b57e30ed | ||
|
|
fec8d58f06 | ||
|
|
84ded9d678 | ||
|
|
65696d9713 | ||
|
|
e2f2ea32e4 | ||
|
|
d5f2eca754 | ||
|
|
7fa455a8a5 | ||
|
|
8f42b5874e | ||
|
|
274f19f560 | ||
|
|
fbcbc75b5b | ||
|
|
008f389bd0 | ||
|
|
91af6518d9 | ||
|
|
af6819762c | ||
|
|
7acece493d | ||
|
|
20e017fedc | ||
|
|
74e578b3c8 | ||
|
|
d92d9eb3d2 | ||
|
|
b6cdce7bc9 | ||
|
|
316b406265 | ||
|
|
8825c7c1dd | ||
|
|
81c85ff702 | ||
|
|
570f2154d5 | ||
|
|
0525c055fc | ||
|
|
38d11291da | ||
|
|
258e682574 | ||
|
|
d7afa600b8 | ||
|
|
5c7303ab2e | ||
|
|
5895ef4039 | ||
|
|
0528cd858a | ||
|
|
6582f43422 | ||
|
|
5c7f63388d | ||
|
|
d0bc671cac | ||
|
|
d37e17593d | ||
|
|
cb726d370e | ||
|
|
23ee132546 | ||
|
|
7fa090d330 | ||
|
|
07bc1c5397 | ||
|
|
d7a9dbb9fc | ||
|
|
00487afc7d | ||
|
|
1902d65aad | ||
|
|
c4fbb65b8e | ||
|
|
875ed7ae6f | ||
|
|
95a46a57ba | ||
|
|
51561e31a0 | ||
|
|
7b19120578 | ||
|
|
745c34a6a9 | ||
|
|
db8fa2454d | ||
|
|
a67a7b4b42 | ||
|
|
496846e532 | ||
|
|
dadcfebf8e | ||
|
|
67033dbd7f | ||
|
|
05a85cfc2a | ||
|
|
40c5d3d72b | ||
|
|
198f0f80c6 | ||
|
|
e3f2fd3892 | ||
|
|
f401ccc599 | ||
|
|
81b59139f8 | ||
|
|
1026781ab6 | ||
|
|
9c699b8cd9 | ||
|
|
34bec59bc3 | ||
|
|
a5fbbf0d66 | ||
|
|
b42721167b | ||
|
|
543dec9ff0 | ||
|
|
04f962f6b0 | ||
|
|
19e896ff69 | ||
|
|
272e4103b2 | ||
|
|
75c257ebb6 | ||
|
|
9ee152eb42 | ||
|
|
c9ae1b1737 | ||
|
|
89dc80c42a | ||
|
|
7b020ac799 | ||
|
|
529e774bbb | ||
|
|
7c12239305 | ||
|
|
d83424d6b4 | ||
|
|
8bf89f887c | ||
|
|
b2160b2304 | ||
|
|
1bb82597be | ||
|
|
e4eee38b3c | ||
|
|
64fc2be503 | ||
|
|
dc8054e90d | ||
|
|
1684940946 | ||
|
|
695813463c | ||
|
|
ed594b0f76 | ||
|
|
cee2b5ea42 | ||
|
|
f315f9665a | ||
|
|
5deb26bc8b | ||
|
|
3cc670ac38 | ||
|
|
4ade3e31e2 | ||
|
|
a222d2cd91 | ||
|
|
508e621f3d | ||
|
|
a1a0472f3f | ||
|
|
3425a6d339 | ||
|
|
af54e0ce06 | ||
|
|
089905fe8f | ||
|
|
554939e5d2 | ||
|
|
7a13814922 | ||
|
|
e9f25f6a12 | ||
|
|
419a433244 | ||
|
|
a9311c4dc0 | ||
|
|
178bcf9c90 | ||
|
|
b9be092cb1 | ||
|
|
e8c0c52315 | ||
|
|
a60fa0d3b7 | ||
|
|
726d629b9b | ||
|
|
b493f56dee | ||
|
|
a8b5ad7e74 | ||
|
|
f8f6264883 | ||
|
|
d8517117f1 | ||
|
|
ab66dd5ed2 | ||
|
|
cbb9a7877c | ||
|
|
b7fc223535 | ||
|
|
1fdaf7a1a4 | ||
|
|
d11819c90c | ||
|
|
9b902272f1 | ||
|
|
8c0622fa2c | ||
|
|
2191f948c3 | ||
|
|
acc3b03004 | ||
|
|
7f091b8c8e | ||
|
|
c19bdd9a24 | ||
|
|
dad0ff5cd2 | ||
|
|
a705621067 | ||
|
|
39614fdb7d | ||
|
|
96d534d4bc | ||
|
|
5051d30d09 | ||
|
|
db853c4041 | ||
|
|
76d1d22bdc | ||
|
|
d8746c61c6 | ||
|
|
1a66df2627 | ||
|
|
44670076c1 | ||
|
|
92f0b16e46 | ||
|
|
1620ba3508 | ||
|
|
3ae90dde80 | ||
|
|
4f07fea6df | ||
|
|
3d7d82cf86 | ||
|
|
edc4e40a7b | ||
|
|
ca3806a02f | ||
|
|
35cff12e31 | ||
|
|
c6c20cb2bd | ||
|
|
26080ee4c1 | ||
|
|
ef3a2b5357 | ||
|
|
c42a201389 | ||
|
|
24e42ccd4d | ||
|
|
8a50944061 | ||
|
|
40e066bc7c | ||
|
|
b3ad105fa0 | ||
|
|
6e701d3e1b | ||
|
|
2248aa9508 | ||
|
|
a6fa69ab89 | ||
|
|
b3a4efd587 | ||
|
|
4708b60bb1 | ||
|
|
080ea2f9a4 | ||
|
|
32fdde23f8 | ||
|
|
c44e5c046c | ||
|
|
f23aa0a793 | ||
|
|
83fc2b1851 | ||
|
|
56aa133ee6 | ||
|
|
27d9e5c596 | ||
|
|
ec8271931f | ||
|
|
6c6966600c | ||
|
|
2e170c3c7b | ||
|
|
fd92e651d1 | ||
|
|
c298482ee1 | ||
|
|
d59f64b5a3 | ||
|
|
30ed8c4c43 | ||
|
|
4a2cdbf299 | ||
|
|
657843d9e9 | ||
|
|
1cd76b8498 | ||
|
|
a38f784081 | ||
|
|
647dee4e94 | ||
|
|
0844c2dd64 | ||
|
|
fd2692295c | ||
|
|
d4ea50fba1 | ||
|
|
0d42297cf8 | ||
|
|
a6d4125cbf | ||
|
|
5c32a99e61 | ||
|
|
cefaa75b24 | ||
|
|
bd62c2384f | ||
|
|
f0bc08c0d7 | ||
|
|
e52ac79c69 | ||
|
|
f091f57594 | ||
|
|
a997fd4108 | ||
|
|
1486514ccc | ||
|
|
a505bc3965 | ||
|
|
c1738250a3 | ||
|
|
1ee63984f5 | ||
|
|
2eb2c8862a | ||
|
|
4ea8e178d3 | ||
|
|
e4485a630e |
@@ -1,5 +1,5 @@
|
||||
[tool.bumpversion]
|
||||
current_version = "0.19.0-beta.0"
|
||||
current_version = "0.21.1"
|
||||
parse = """(?x)
|
||||
(?P<major>0|[1-9]\\d*)\\.
|
||||
(?P<minor>0|[1-9]\\d*)\\.
|
||||
|
||||
13
.github/workflows/docs.yml
vendored
13
.github/workflows/docs.yml
vendored
@@ -18,17 +18,24 @@ concurrency:
|
||||
group: "pages"
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
# This reduces the disk space needed for the build
|
||||
RUSTFLAGS: "-C debuginfo=0"
|
||||
# according to: https://matklad.github.io/2021/09/04/fast-rust-builds.html
|
||||
# CI builds are faster with incremental disabled.
|
||||
CARGO_INCREMENTAL: "0"
|
||||
|
||||
jobs:
|
||||
# Single deploy job since we're just deploying
|
||||
build:
|
||||
environment:
|
||||
name: github-pages
|
||||
url: ${{ steps.deployment.outputs.page_url }}
|
||||
runs-on: buildjet-8vcpu-ubuntu-2204
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Install dependecies needed for ubuntu
|
||||
- name: Install dependencies needed for ubuntu
|
||||
run: |
|
||||
sudo apt install -y protobuf-compiler libssl-dev
|
||||
rustup update && rustup default
|
||||
@@ -38,6 +45,7 @@ jobs:
|
||||
python-version: "3.10"
|
||||
cache: "pip"
|
||||
cache-dependency-path: "docs/requirements.txt"
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- name: Build Python
|
||||
working-directory: python
|
||||
run: |
|
||||
@@ -49,7 +57,6 @@ jobs:
|
||||
node-version: 20
|
||||
cache: 'npm'
|
||||
cache-dependency-path: node/package-lock.json
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- name: Install node dependencies
|
||||
working-directory: node
|
||||
run: |
|
||||
|
||||
7
.github/workflows/java.yml
vendored
7
.github/workflows/java.yml
vendored
@@ -35,6 +35,9 @@ jobs:
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
workspaces: java/core/lancedb-jni
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
with:
|
||||
components: rustfmt
|
||||
- name: Run cargo fmt
|
||||
run: cargo fmt --check
|
||||
working-directory: ./java/core/lancedb-jni
|
||||
@@ -68,6 +71,9 @@ jobs:
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
workspaces: java/core/lancedb-jni
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
with:
|
||||
components: rustfmt
|
||||
- name: Run cargo fmt
|
||||
run: cargo fmt --check
|
||||
working-directory: ./java/core/lancedb-jni
|
||||
@@ -110,4 +116,3 @@ jobs:
|
||||
-Djdk.reflect.useDirectMethodHandle=false \
|
||||
-Dio.netty.tryReflectionSetAccessible=true"
|
||||
JAVA_HOME=$JAVA_17 mvn clean test
|
||||
|
||||
|
||||
9
.github/workflows/make-release-commit.yml
vendored
9
.github/workflows/make-release-commit.yml
vendored
@@ -84,6 +84,7 @@ jobs:
|
||||
run: |
|
||||
pip install bump-my-version PyGithub packaging
|
||||
bash ci/bump_version.sh ${{ inputs.type }} ${{ inputs.bump-minor }} v $COMMIT_BEFORE_BUMP
|
||||
bash ci/update_lockfiles.sh --amend
|
||||
- name: Push new version tag
|
||||
if: ${{ !inputs.dry_run }}
|
||||
uses: ad-m/github-push-action@master
|
||||
@@ -92,11 +93,3 @@ jobs:
|
||||
github_token: ${{ secrets.LANCEDB_RELEASE_TOKEN }}
|
||||
branch: ${{ github.ref }}
|
||||
tags: true
|
||||
- uses: ./.github/workflows/update_package_lock
|
||||
if: ${{ !inputs.dry_run && inputs.other }}
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: ./.github/workflows/update_package_lock_nodejs
|
||||
if: ${{ !inputs.dry_run && inputs.other }}
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
5
.github/workflows/nodejs.yml
vendored
5
.github/workflows/nodejs.yml
vendored
@@ -47,6 +47,9 @@ jobs:
|
||||
run: |
|
||||
sudo apt update
|
||||
sudo apt install -y protobuf-compiler libssl-dev
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
with:
|
||||
components: rustfmt, clippy
|
||||
- name: Lint
|
||||
run: |
|
||||
cargo fmt --all -- --check
|
||||
@@ -113,7 +116,7 @@ jobs:
|
||||
set -e
|
||||
npm ci
|
||||
npm run docs
|
||||
if ! git diff --exit-code; then
|
||||
if ! git diff --exit-code -- . ':(exclude)Cargo.lock'; then
|
||||
echo "Docs need to be updated"
|
||||
echo "Run 'npm run docs', fix any warnings, and commit the changes."
|
||||
exit 1
|
||||
|
||||
74
.github/workflows/npm-publish.yml
vendored
74
.github/workflows/npm-publish.yml
vendored
@@ -18,6 +18,7 @@ on:
|
||||
# This should trigger a dry run (we skip the final publish step)
|
||||
paths:
|
||||
- .github/workflows/npm-publish.yml
|
||||
- Cargo.toml # Change in dependency frequently breaks builds
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
@@ -130,29 +131,24 @@ jobs:
|
||||
set -e &&
|
||||
apt-get update &&
|
||||
apt-get install -y protobuf-compiler pkg-config
|
||||
|
||||
# TODO: re-enable x64 musl builds. I could not figure out why, but it
|
||||
# consistently made GHA runners non-responsive at the end of build. Example:
|
||||
# https://github.com/lancedb/lancedb/actions/runs/13980431071/job/39144319470?pr=2250
|
||||
|
||||
# - target: x86_64-unknown-linux-musl
|
||||
# # This one seems to need some extra memory
|
||||
# host: ubuntu-2404-8x-x64
|
||||
# # https://github.com/napi-rs/napi-rs/blob/main/alpine.Dockerfile
|
||||
# docker: ghcr.io/napi-rs/napi-rs/nodejs-rust:lts-alpine
|
||||
# features: ","
|
||||
# pre_build: |-
|
||||
# set -e &&
|
||||
# apk add protobuf-dev curl &&
|
||||
# ln -s /usr/lib/gcc/x86_64-alpine-linux-musl/14.2.0/crtbeginS.o /usr/lib/crtbeginS.o &&
|
||||
# ln -s /usr/lib/libgcc_s.so /usr/lib/libgcc.so
|
||||
|
||||
- target: x86_64-unknown-linux-musl
|
||||
# This one seems to need some extra memory
|
||||
host: ubuntu-2404-8x-x64
|
||||
# https://github.com/napi-rs/napi-rs/blob/main/alpine.Dockerfile
|
||||
docker: ghcr.io/napi-rs/napi-rs/nodejs-rust:lts-alpine
|
||||
features: fp16kernels
|
||||
pre_build: |-
|
||||
set -e &&
|
||||
apk add protobuf-dev curl &&
|
||||
ln -s /usr/lib/gcc/x86_64-alpine-linux-musl/14.2.0/crtbeginS.o /usr/lib/crtbeginS.o &&
|
||||
ln -s /usr/lib/libgcc_s.so /usr/lib/libgcc.so &&
|
||||
CC=gcc &&
|
||||
CXX=g++
|
||||
- target: aarch64-unknown-linux-gnu
|
||||
host: ubuntu-2404-8x-x64
|
||||
# https://github.com/napi-rs/napi-rs/blob/main/debian-aarch64.Dockerfile
|
||||
docker: ghcr.io/napi-rs/napi-rs/nodejs-rust:lts-debian-aarch64
|
||||
# TODO: enable fp16kernels after https://github.com/lancedb/lance/pull/3559
|
||||
features: ","
|
||||
features: "fp16kernels"
|
||||
pre_build: |-
|
||||
set -e &&
|
||||
apt-get update &&
|
||||
@@ -170,8 +166,8 @@ jobs:
|
||||
set -e &&
|
||||
apk add protobuf-dev &&
|
||||
rustup target add aarch64-unknown-linux-musl &&
|
||||
export CC="/aarch64-linux-musl-cross/bin/aarch64-linux-musl-gcc" &&
|
||||
export CXX="/aarch64-linux-musl-cross/bin/aarch64-linux-musl-g++"
|
||||
export CC_aarch64_unknown_linux_musl=aarch64-linux-musl-gcc &&
|
||||
export CXX_aarch64_unknown_linux_musl=aarch64-linux-musl-g++
|
||||
name: build - ${{ matrix.settings.target }}
|
||||
runs-on: ${{ matrix.settings.host }}
|
||||
defaults:
|
||||
@@ -509,6 +505,8 @@ jobs:
|
||||
name: vectordb NPM Publish
|
||||
needs: [node, node-macos, node-linux-gnu, node-windows]
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
# Only runs on tags that matches the make-release action
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
steps:
|
||||
@@ -536,9 +534,25 @@ jobs:
|
||||
npm publish $PUBLISH_ARGS $filename
|
||||
done
|
||||
- name: Deprecate
|
||||
env:
|
||||
NODE_AUTH_TOKEN: ${{ secrets.LANCEDB_NPM_REGISTRY_TOKEN }}
|
||||
# We need to deprecate the old package to avoid confusion.
|
||||
# Each time we publish a new version, it gets undeprecated.
|
||||
run: npm deprecate vectordb "Use @lancedb/lancedb instead."
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: main
|
||||
- name: Update package-lock.json
|
||||
run: |
|
||||
git config user.name 'Lance Release'
|
||||
git config user.email 'lance-dev@lancedb.com'
|
||||
bash ci/update_lockfiles.sh
|
||||
- name: Push new commit
|
||||
uses: ad-m/github-push-action@master
|
||||
with:
|
||||
github_token: ${{ secrets.LANCEDB_RELEASE_TOKEN }}
|
||||
branch: main
|
||||
- name: Notify Slack Action
|
||||
uses: ravsamhq/notify-slack-action@2.3.0
|
||||
if: ${{ always() }}
|
||||
@@ -548,21 +562,3 @@ jobs:
|
||||
notification_title: "{workflow} is failing"
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.ACTION_MONITORING_SLACK }}
|
||||
|
||||
update-package-lock:
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
needs: [release]
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: main
|
||||
token: ${{ secrets.LANCEDB_RELEASE_TOKEN }}
|
||||
fetch-depth: 0
|
||||
lfs: true
|
||||
- uses: ./.github/workflows/update_package_lock
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
1
.github/workflows/pypi-publish.yml
vendored
1
.github/workflows/pypi-publish.yml
vendored
@@ -8,6 +8,7 @@ on:
|
||||
# This should trigger a dry run (we skip the final publish step)
|
||||
paths:
|
||||
- .github/workflows/pypi-publish.yml
|
||||
- Cargo.toml # Change in dependency frequently breaks builds
|
||||
|
||||
jobs:
|
||||
linux:
|
||||
|
||||
5
.github/workflows/python.yml
vendored
5
.github/workflows/python.yml
vendored
@@ -136,9 +136,9 @@ jobs:
|
||||
- uses: ./.github/workflows/run_tests
|
||||
with:
|
||||
integration: true
|
||||
- name: Test without pylance
|
||||
- name: Test without pylance or pandas
|
||||
run: |
|
||||
pip uninstall -y pylance
|
||||
pip uninstall -y pylance pandas
|
||||
pytest -vv python/tests/test_table.py
|
||||
# Make sure wheels are not included in the Rust cache
|
||||
- name: Delete wheels
|
||||
@@ -228,6 +228,7 @@ jobs:
|
||||
- name: Install lancedb
|
||||
run: |
|
||||
pip install "pydantic<2"
|
||||
pip install pyarrow==16
|
||||
pip install --extra-index-url https://pypi.fury.io/lancedb/ -e .[tests]
|
||||
pip install tantivy
|
||||
- name: Run tests
|
||||
|
||||
4
.github/workflows/run_tests/action.yml
vendored
4
.github/workflows/run_tests/action.yml
vendored
@@ -24,8 +24,8 @@ runs:
|
||||
- name: pytest (with integration)
|
||||
shell: bash
|
||||
if: ${{ inputs.integration == 'true' }}
|
||||
run: pytest -m "not slow" -x -v --durations=30 python/python/tests
|
||||
run: pytest -m "not slow" -vv --durations=30 python/python/tests
|
||||
- name: pytest (no integration tests)
|
||||
shell: bash
|
||||
if: ${{ inputs.integration != 'true' }}
|
||||
run: pytest -m "not slow and not s3_test" -x -v --durations=30 python/python/tests
|
||||
run: pytest -m "not slow and not s3_test" -vv --durations=30 python/python/tests
|
||||
|
||||
7
.github/workflows/rust.yml
vendored
7
.github/workflows/rust.yml
vendored
@@ -40,6 +40,9 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 0
|
||||
lfs: true
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
with:
|
||||
components: rustfmt, clippy
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
workspaces: rust
|
||||
@@ -160,8 +163,8 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
target:
|
||||
- x86_64-pc-windows-msvc
|
||||
- aarch64-pc-windows-msvc
|
||||
- x86_64-pc-windows-msvc
|
||||
- aarch64-pc-windows-msvc
|
||||
defaults:
|
||||
run:
|
||||
working-directory: rust/lancedb
|
||||
|
||||
33
.github/workflows/update_package_lock/action.yml
vendored
33
.github/workflows/update_package_lock/action.yml
vendored
@@ -1,33 +0,0 @@
|
||||
name: update_package_lock
|
||||
description: "Update node's package.lock"
|
||||
|
||||
inputs:
|
||||
github_token:
|
||||
required: true
|
||||
description: "github token for the repo"
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: 20
|
||||
- name: Set git configs
|
||||
shell: bash
|
||||
run: |
|
||||
git config user.name 'Lance Release'
|
||||
git config user.email 'lance-dev@lancedb.com'
|
||||
- name: Update package-lock.json file
|
||||
working-directory: ./node
|
||||
run: |
|
||||
npm install
|
||||
git add package-lock.json
|
||||
git commit -m "Updating package-lock.json"
|
||||
shell: bash
|
||||
- name: Push changes
|
||||
if: ${{ inputs.dry_run }} == "false"
|
||||
uses: ad-m/github-push-action@master
|
||||
with:
|
||||
github_token: ${{ inputs.github_token }}
|
||||
branch: main
|
||||
tags: true
|
||||
@@ -1,33 +0,0 @@
|
||||
name: update_package_lock_nodejs
|
||||
description: "Update nodejs's package.lock"
|
||||
|
||||
inputs:
|
||||
github_token:
|
||||
required: true
|
||||
description: "github token for the repo"
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: 20
|
||||
- name: Set git configs
|
||||
shell: bash
|
||||
run: |
|
||||
git config user.name 'Lance Release'
|
||||
git config user.email 'lance-dev@lancedb.com'
|
||||
- name: Update package-lock.json file
|
||||
working-directory: ./nodejs
|
||||
run: |
|
||||
npm install
|
||||
git add package-lock.json
|
||||
git commit -m "Updating package-lock.json"
|
||||
shell: bash
|
||||
- name: Push changes
|
||||
if: ${{ inputs.dry_run }} == "false"
|
||||
uses: ad-m/github-push-action@master
|
||||
with:
|
||||
github_token: ${{ inputs.github_token }}
|
||||
branch: main
|
||||
tags: true
|
||||
2183
Cargo.lock
generated
2183
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
57
Cargo.toml
57
Cargo.toml
@@ -21,57 +21,54 @@ categories = ["database-implementations"]
|
||||
rust-version = "1.78.0"
|
||||
|
||||
[workspace.dependencies]
|
||||
lance = { "version" = "=0.25.3", "features" = [
|
||||
lance = { "version" = "=0.31.2", "features" = [
|
||||
"dynamodb",
|
||||
], tag = "v0.25.3-beta.1", git = "https://github.com/lancedb/lance" }
|
||||
lance-io = { version = "=0.25.3", tag = "v0.25.3-beta.1", git = "https://github.com/lancedb/lance" }
|
||||
lance-index = { version = "=0.25.3", tag = "v0.25.3-beta.1", git = "https://github.com/lancedb/lance" }
|
||||
lance-linalg = { version = "=0.25.3", tag = "v0.25.3-beta.1", git = "https://github.com/lancedb/lance" }
|
||||
lance-table = { version = "=0.25.3", tag = "v0.25.3-beta.1", git = "https://github.com/lancedb/lance" }
|
||||
lance-testing = { version = "=0.25.3", tag = "v0.25.3-beta.1", git = "https://github.com/lancedb/lance" }
|
||||
lance-datafusion = { version = "=0.25.3", tag = "v0.25.3-beta.1", git = "https://github.com/lancedb/lance" }
|
||||
lance-encoding = { version = "=0.25.3", tag = "v0.25.3-beta.1", git = "https://github.com/lancedb/lance" }
|
||||
], "tag" = "v0.31.2-beta.3", "git" = "https://github.com/lancedb/lance.git" }
|
||||
lance-io = { "version" = "=0.31.2", "tag" = "v0.31.2-beta.3", "git" = "https://github.com/lancedb/lance.git" }
|
||||
lance-index = { "version" = "=0.31.2", "tag" = "v0.31.2-beta.3", "git" = "https://github.com/lancedb/lance.git" }
|
||||
lance-linalg = { "version" = "=0.31.2", "tag" = "v0.31.2-beta.3", "git" = "https://github.com/lancedb/lance.git" }
|
||||
lance-table = { "version" = "=0.31.2", "tag" = "v0.31.2-beta.3", "git" = "https://github.com/lancedb/lance.git" }
|
||||
lance-testing = { "version" = "=0.31.2", "tag" = "v0.31.2-beta.3", "git" = "https://github.com/lancedb/lance.git" }
|
||||
lance-datafusion = { "version" = "=0.31.2", "tag" = "v0.31.2-beta.3", "git" = "https://github.com/lancedb/lance.git" }
|
||||
lance-encoding = { "version" = "=0.31.2", "tag" = "v0.31.2-beta.3", "git" = "https://github.com/lancedb/lance.git" }
|
||||
# Note that this one does not include pyarrow
|
||||
arrow = { version = "54.1", optional = false }
|
||||
arrow-array = "54.1"
|
||||
arrow-data = "54.1"
|
||||
arrow-ipc = "54.1"
|
||||
arrow-ord = "54.1"
|
||||
arrow-schema = "54.1"
|
||||
arrow-arith = "54.1"
|
||||
arrow-cast = "54.1"
|
||||
arrow = { version = "55.1", optional = false }
|
||||
arrow-array = "55.1"
|
||||
arrow-data = "55.1"
|
||||
arrow-ipc = "55.1"
|
||||
arrow-ord = "55.1"
|
||||
arrow-schema = "55.1"
|
||||
arrow-arith = "55.1"
|
||||
arrow-cast = "55.1"
|
||||
async-trait = "0"
|
||||
datafusion = { version = "46.0", default-features = false }
|
||||
datafusion-catalog = "46.0"
|
||||
datafusion-common = { version = "46.0", default-features = false }
|
||||
datafusion-execution = "46.0"
|
||||
datafusion-expr = "46.0"
|
||||
datafusion-physical-plan = "46.0"
|
||||
datafusion = { version = "48.0", default-features = false }
|
||||
datafusion-catalog = "48.0"
|
||||
datafusion-common = { version = "48.0", default-features = false }
|
||||
datafusion-execution = "48.0"
|
||||
datafusion-expr = "48.0"
|
||||
datafusion-physical-plan = "48.0"
|
||||
env_logger = "0.11"
|
||||
half = { "version" = "=2.4.1", default-features = false, features = [
|
||||
half = { "version" = "2.6.0", default-features = false, features = [
|
||||
"num-traits",
|
||||
] }
|
||||
futures = "0"
|
||||
log = "0.4"
|
||||
moka = { version = "0.12", features = ["future"] }
|
||||
object_store = "0.11.0"
|
||||
object_store = "0.12.0"
|
||||
pin-project = "1.0.7"
|
||||
snafu = "0.8"
|
||||
url = "2"
|
||||
num-traits = "0.2"
|
||||
rand = "0.8"
|
||||
rand = "0.9"
|
||||
regex = "1.10"
|
||||
lazy_static = "1"
|
||||
semver = "1.0.25"
|
||||
|
||||
# Temporary pins to work around downstream issues
|
||||
# https://github.com/apache/arrow-rs/commit/2fddf85afcd20110ce783ed5b4cdeb82293da30b
|
||||
chrono = "=0.4.39"
|
||||
chrono = "=0.4.41"
|
||||
# https://github.com/RustCrypto/formats/issues/1684
|
||||
base64ct = "=1.6.0"
|
||||
|
||||
# Workaround for: https://github.com/eira-fransham/crunchy/issues/13
|
||||
crunchy = "=0.2.2"
|
||||
|
||||
# Workaround for: https://github.com/Lokathor/bytemuck/issues/306
|
||||
bytemuck_derive = ">=1.8.1, <1.9.0"
|
||||
|
||||
129
README.md
129
README.md
@@ -1,94 +1,97 @@
|
||||
<a href="https://cloud.lancedb.com" target="_blank">
|
||||
<img src="https://github.com/user-attachments/assets/92dad0a2-2a37-4ce1-b783-0d1b4f30a00c" alt="LanceDB Cloud Public Beta" width="100%" style="max-width: 100%;">
|
||||
</a>
|
||||
|
||||
<div align="center">
|
||||
<p align="center">
|
||||
|
||||
<picture>
|
||||
<source media="(prefers-color-scheme: dark)" srcset="https://github.com/user-attachments/assets/ac270358-333e-4bea-a132-acefaa94040e">
|
||||
<source media="(prefers-color-scheme: light)" srcset="https://github.com/user-attachments/assets/b864d814-0d29-4784-8fd9-807297c758c0">
|
||||
<img alt="LanceDB Logo" src="https://github.com/user-attachments/assets/b864d814-0d29-4784-8fd9-807297c758c0" width=300>
|
||||
</picture>
|
||||
[](https://lancedb.com)
|
||||
[](https://lancedb.com/)
|
||||
[](https://blog.lancedb.com/)
|
||||
[](https://discord.gg/zMM32dvNtd)
|
||||
[](https://twitter.com/lancedb)
|
||||
[](https://www.linkedin.com/company/lancedb/)
|
||||
|
||||
**Search More, Manage Less**
|
||||
|
||||
<a href='https://github.com/lancedb/vectordb-recipes/tree/main' target="_blank"><img alt='LanceDB' src='https://img.shields.io/badge/VectorDB_Recipes-100000?style=for-the-badge&logo=LanceDB&logoColor=white&labelColor=645cfb&color=645cfb'/></a>
|
||||
<a href='https://lancedb.github.io/lancedb/' target="_blank"><img alt='lancdb' src='https://img.shields.io/badge/DOCS-100000?style=for-the-badge&logo=lancdb&logoColor=white&labelColor=645cfb&color=645cfb'/></a>
|
||||
[](https://blog.lancedb.com/)
|
||||
[](https://discord.gg/zMM32dvNtd)
|
||||
[](https://twitter.com/lancedb)
|
||||
[](https://gurubase.io/g/lancedb)
|
||||
<img src="docs/src/assets/lancedb.png" alt="LanceDB" width="50%">
|
||||
|
||||
</p>
|
||||
# **The Multimodal AI Lakehouse**
|
||||
|
||||
<img max-width="750px" alt="LanceDB Multimodal Search" src="https://github.com/lancedb/lancedb/assets/917119/09c5afc5-7816-4687-bae4-f2ca194426ec">
|
||||
[**How to Install** ](#how-to-install) ✦ [**Detailed Documentation**](https://lancedb.github.io/lancedb/) ✦ [**Tutorials and Recipes**](https://github.com/lancedb/vectordb-recipes/tree/main) ✦ [**Contributors**](#contributors)
|
||||
|
||||
**The ultimate multimodal data platform for AI/ML applications.**
|
||||
|
||||
LanceDB is designed for fast, scalable, and production-ready vector search. It is built on top of the Lance columnar format. You can store, index, and search over petabytes of multimodal data and vectors with ease.
|
||||
LanceDB is a central location where developers can build, train and analyze their AI workloads.
|
||||
|
||||
</p>
|
||||
</div>
|
||||
|
||||
<hr />
|
||||
<br>
|
||||
|
||||
LanceDB is an open-source database for vector-search built with persistent storage, which greatly simplifies retrieval, filtering and management of embeddings.
|
||||
## **Demo: Multimodal Search by Keyword, Vector or with SQL**
|
||||
<img max-width="750px" alt="LanceDB Multimodal Search" src="https://github.com/lancedb/lancedb/assets/917119/09c5afc5-7816-4687-bae4-f2ca194426ec">
|
||||
|
||||
The key features of LanceDB include:
|
||||
## **Star LanceDB to get updates!**
|
||||
|
||||
* Production-scale vector search with no servers to manage.
|
||||
<details>
|
||||
<summary>⭐ Click here ⭐ to see how fast we're growing!</summary>
|
||||
<picture>
|
||||
<source media="(prefers-color-scheme: dark)" srcset="https://api.star-history.com/svg?repos=lancedb/lancedb&theme=dark&type=Date">
|
||||
<img width="100%" src="https://api.star-history.com/svg?repos=lancedb/lancedb&theme=dark&type=Date">
|
||||
</picture>
|
||||
</details>
|
||||
|
||||
* Store, query and filter vectors, metadata and multi-modal data (text, images, videos, point clouds, and more).
|
||||
## **Key Features**:
|
||||
|
||||
* Support for vector similarity search, full-text search and SQL.
|
||||
- **Fast Vector Search**: Search billions of vectors in milliseconds with state-of-the-art indexing.
|
||||
- **Comprehensive Search**: Support for vector similarity search, full-text search and SQL.
|
||||
- **Multimodal Support**: Store, query and filter vectors, metadata and multimodal data (text, images, videos, point clouds, and more).
|
||||
- **Advanced Features**: Zero-copy, automatic versioning, manage versions of your data without needing extra infrastructure. GPU support in building vector index.
|
||||
|
||||
* Native Python and Javascript/Typescript support.
|
||||
### **Products**:
|
||||
- **Open Source & Local**: 100% open source, runs locally or in your cloud. No vendor lock-in.
|
||||
- **Cloud and Enterprise**: Production-scale vector search with no servers to manage. Complete data sovereignty and security.
|
||||
|
||||
* Zero-copy, automatic versioning, manage versions of your data without needing extra infrastructure.
|
||||
### **Ecosystem**:
|
||||
- **Columnar Storage**: Built on the Lance columnar format for efficient storage and analytics.
|
||||
- **Seamless Integration**: Python, Node.js, Rust, and REST APIs for easy integration. Native Python and Javascript/Typescript support.
|
||||
- **Rich Ecosystem**: Integrations with [**LangChain** 🦜️🔗](https://python.langchain.com/docs/integrations/vectorstores/lancedb/), [**LlamaIndex** 🦙](https://gpt-index.readthedocs.io/en/latest/examples/vector_stores/LanceDBIndexDemo.html), Apache-Arrow, Pandas, Polars, DuckDB and more on the way.
|
||||
|
||||
* GPU support in building vector index(*).
|
||||
## **How to Install**:
|
||||
|
||||
* Ecosystem integrations with [LangChain 🦜️🔗](https://python.langchain.com/docs/integrations/vectorstores/lancedb/), [LlamaIndex 🦙](https://gpt-index.readthedocs.io/en/latest/examples/vector_stores/LanceDBIndexDemo.html), Apache-Arrow, Pandas, Polars, DuckDB and more on the way.
|
||||
Follow the [Quickstart](https://lancedb.github.io/lancedb/basic/) doc to set up LanceDB locally.
|
||||
|
||||
LanceDB's core is written in Rust 🦀 and is built using <a href="https://github.com/lancedb/lance">Lance</a>, an open-source columnar format designed for performant ML workloads.
|
||||
**API & SDK:** We also support Python, Typescript and Rust SDKs
|
||||
|
||||
## Quick Start
|
||||
| Interface | Documentation |
|
||||
|-----------|---------------|
|
||||
| Python SDK | https://lancedb.github.io/lancedb/python/python/ |
|
||||
| Typescript SDK | https://lancedb.github.io/lancedb/js/globals/ |
|
||||
| Rust SDK | https://docs.rs/lancedb/latest/lancedb/index.html |
|
||||
| REST API | https://docs.lancedb.com/api-reference/introduction |
|
||||
|
||||
**Javascript**
|
||||
```shell
|
||||
npm install @lancedb/lancedb
|
||||
```
|
||||
## **Join Us and Contribute**
|
||||
|
||||
```javascript
|
||||
import * as lancedb from "@lancedb/lancedb";
|
||||
We welcome contributions from everyone! Whether you're a developer, researcher, or just someone who wants to help out.
|
||||
|
||||
const db = await lancedb.connect("data/sample-lancedb");
|
||||
const table = await db.createTable("vectors", [
|
||||
{ id: 1, vector: [0.1, 0.2], item: "foo", price: 10 },
|
||||
{ id: 2, vector: [1.1, 1.2], item: "bar", price: 50 },
|
||||
], {mode: 'overwrite'});
|
||||
If you have any suggestions or feature requests, please feel free to open an issue on GitHub or discuss it on our [**Discord**](https://discord.gg/G5DcmnZWKB) server.
|
||||
|
||||
[**Check out the GitHub Issues**](https://github.com/lancedb/lancedb/issues) if you would like to work on the features that are planned for the future. If you have any suggestions or feature requests, please feel free to open an issue on GitHub.
|
||||
|
||||
## **Contributors**
|
||||
|
||||
<a href="https://github.com/lancedb/lancedb/graphs/contributors">
|
||||
<img src="https://contrib.rocks/image?repo=lancedb/lancedb" />
|
||||
</a>
|
||||
|
||||
|
||||
const query = table.vectorSearch([0.1, 0.3]).limit(2);
|
||||
const results = await query.toArray();
|
||||
## **Stay in Touch With Us**
|
||||
<div align="center">
|
||||
|
||||
// You can also search for rows by specific criteria without involving a vector search.
|
||||
const rowsByCriteria = await table.query().where("price >= 10").toArray();
|
||||
```
|
||||
</br>
|
||||
|
||||
**Python**
|
||||
```shell
|
||||
pip install lancedb
|
||||
```
|
||||
[](https://lancedb.com/)
|
||||
[](https://blog.lancedb.com/)
|
||||
[](https://discord.gg/zMM32dvNtd)
|
||||
[](https://twitter.com/lancedb)
|
||||
[](https://www.linkedin.com/company/lancedb/)
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
|
||||
uri = "data/sample-lancedb"
|
||||
db = lancedb.connect(uri)
|
||||
table = db.create_table("my_table",
|
||||
data=[{"vector": [3.1, 4.1], "item": "foo", "price": 10.0},
|
||||
{"vector": [5.9, 26.5], "item": "bar", "price": 20.0}])
|
||||
result = table.search([100, 100]).limit(2).to_pandas()
|
||||
```
|
||||
|
||||
## Blogs, Tutorials & Videos
|
||||
* 📈 <a href="https://blog.lancedb.com/benchmarking-random-access-in-lance/">2000x better performance with Lance over Parquet</a>
|
||||
* 🤖 <a href="https://github.com/lancedb/vectordb-recipes/tree/main/examples/Youtube-Search-QA-Bot">Build a question and answer bot with LanceDB</a>
|
||||
</div>
|
||||
|
||||
188
ci/set_lance_version.py
Normal file
188
ci/set_lance_version.py
Normal file
@@ -0,0 +1,188 @@
|
||||
import argparse
|
||||
import sys
|
||||
import json
|
||||
|
||||
|
||||
def run_command(command: str) -> str:
|
||||
"""
|
||||
Run a shell command and return stdout as a string.
|
||||
If exit code is not 0, raise an exception with the stderr output.
|
||||
"""
|
||||
import subprocess
|
||||
|
||||
result = subprocess.run(command, shell=True, capture_output=True, text=True)
|
||||
if result.returncode != 0:
|
||||
raise Exception(f"Command failed with error: {result.stderr.strip()}")
|
||||
return result.stdout.strip()
|
||||
|
||||
|
||||
def get_latest_stable_version() -> str:
|
||||
version_line = run_command("cargo info lance | grep '^version:'")
|
||||
version = version_line.split(" ")[1].strip()
|
||||
return version
|
||||
|
||||
|
||||
def get_latest_preview_version() -> str:
|
||||
lance_tags = run_command(
|
||||
"git ls-remote --tags https://github.com/lancedb/lance.git | grep 'refs/tags/v[0-9beta.-]\\+$'"
|
||||
).splitlines()
|
||||
lance_tags = (
|
||||
tag.split("refs/tags/")[1]
|
||||
for tag in lance_tags
|
||||
if "refs/tags/" in tag and "beta" in tag
|
||||
)
|
||||
from packaging.version import Version
|
||||
|
||||
latest = max(
|
||||
(tag[1:] for tag in lance_tags if tag.startswith("v")), key=lambda t: Version(t)
|
||||
)
|
||||
return str(latest)
|
||||
|
||||
|
||||
def extract_features(line: str) -> list:
|
||||
"""
|
||||
Extracts the features from a line in Cargo.toml.
|
||||
Example: 'lance = { "version" = "=0.29.0", "features" = ["dynamodb"] }'
|
||||
Returns: ['dynamodb']
|
||||
"""
|
||||
import re
|
||||
|
||||
match = re.search(r'"features"\s*=\s*\[\s*(.*?)\s*\]', line, re.DOTALL)
|
||||
if match:
|
||||
features_str = match.group(1)
|
||||
return [f.strip('"') for f in features_str.split(",") if len(f) > 0]
|
||||
return []
|
||||
|
||||
|
||||
def update_cargo_toml(line_updater):
|
||||
"""
|
||||
Updates the Cargo.toml file by applying the line_updater function to each line.
|
||||
The line_updater function should take a line as input and return the updated line.
|
||||
"""
|
||||
with open("Cargo.toml", "r") as f:
|
||||
lines = f.readlines()
|
||||
|
||||
new_lines = []
|
||||
lance_line = ""
|
||||
is_parsing_lance_line = False
|
||||
for line in lines:
|
||||
if line.startswith("lance"):
|
||||
# Update the line using the provided function
|
||||
if line.strip().endswith("}"):
|
||||
new_lines.append(line_updater(line))
|
||||
else:
|
||||
lance_line = line
|
||||
is_parsing_lance_line = True
|
||||
elif is_parsing_lance_line:
|
||||
lance_line += line
|
||||
if line.strip().endswith("}"):
|
||||
new_lines.append(line_updater(lance_line))
|
||||
lance_line = ""
|
||||
is_parsing_lance_line = False
|
||||
else:
|
||||
print("doesn't end with }:", line)
|
||||
else:
|
||||
# Keep the line unchanged
|
||||
new_lines.append(line)
|
||||
|
||||
with open("Cargo.toml", "w") as f:
|
||||
f.writelines(new_lines)
|
||||
|
||||
|
||||
def set_stable_version(version: str):
|
||||
"""
|
||||
Sets lines to
|
||||
lance = { "version" = "=0.29.0", "features" = ["dynamodb"] }
|
||||
lance-io = "=0.29.0"
|
||||
...
|
||||
"""
|
||||
|
||||
def line_updater(line: str) -> str:
|
||||
package_name = line.split("=", maxsplit=1)[0].strip()
|
||||
features = extract_features(line)
|
||||
if features:
|
||||
return f'{package_name} = {{ "version" = "={version}", "features" = {json.dumps(features)} }}\n'
|
||||
else:
|
||||
return f'{package_name} = "={version}"\n'
|
||||
|
||||
update_cargo_toml(line_updater)
|
||||
|
||||
|
||||
def set_preview_version(version: str):
|
||||
"""
|
||||
Sets lines to
|
||||
lance = { "version" = "=0.29.0", "features" = ["dynamodb"], tag = "v0.29.0-beta.2", git="https://github.com/lancedb/lance.git" }
|
||||
lance-io = { version = "=0.29.0", tag = "v0.29.0-beta.2", git="https://github.com/lancedb/lance.git" }
|
||||
...
|
||||
"""
|
||||
|
||||
def line_updater(line: str) -> str:
|
||||
package_name = line.split("=", maxsplit=1)[0].strip()
|
||||
features = extract_features(line)
|
||||
base_version = version.split("-")[0] # Get the base version without beta suffix
|
||||
if features:
|
||||
return f'{package_name} = {{ "version" = "={base_version}", "features" = {json.dumps(features)}, "tag" = "v{version}", "git" = "https://github.com/lancedb/lance.git" }}\n'
|
||||
else:
|
||||
return f'{package_name} = {{ "version" = "={base_version}", "tag" = "v{version}", "git" = "https://github.com/lancedb/lance.git" }}\n'
|
||||
|
||||
update_cargo_toml(line_updater)
|
||||
|
||||
|
||||
def set_local_version():
|
||||
"""
|
||||
Sets lines to
|
||||
lance = { path = "../lance/rust/lance", features = ["dynamodb"] }
|
||||
lance-io = { path = "../lance/rust/lance-io" }
|
||||
...
|
||||
"""
|
||||
|
||||
def line_updater(line: str) -> str:
|
||||
package_name = line.split("=", maxsplit=1)[0].strip()
|
||||
features = extract_features(line)
|
||||
if features:
|
||||
return f'{package_name} = {{ "path" = "../lance/rust/{package_name}", "features" = {json.dumps(features)} }}\n'
|
||||
else:
|
||||
return f'{package_name} = {{ "path" = "../lance/rust/{package_name}" }}\n'
|
||||
|
||||
update_cargo_toml(line_updater)
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser(description="Set the version of the Lance package.")
|
||||
parser.add_argument(
|
||||
"version",
|
||||
type=str,
|
||||
help="The version to set for the Lance package. Use 'stable' for the latest stable version, 'preview' for latest preview version, or a specific version number (e.g., '0.1.0'). You can also specify 'local' to use a local path.",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.version == "stable":
|
||||
latest_stable_version = get_latest_stable_version()
|
||||
print(
|
||||
f"Found latest stable version: \033[1mv{latest_stable_version}\033[0m",
|
||||
file=sys.stderr,
|
||||
)
|
||||
set_stable_version(latest_stable_version)
|
||||
elif args.version == "preview":
|
||||
latest_preview_version = get_latest_preview_version()
|
||||
print(
|
||||
f"Found latest preview version: \033[1mv{latest_preview_version}\033[0m",
|
||||
file=sys.stderr,
|
||||
)
|
||||
set_preview_version(latest_preview_version)
|
||||
elif args.version == "local":
|
||||
set_local_version()
|
||||
else:
|
||||
# Parse the version number.
|
||||
version = args.version
|
||||
# Ignore initial v if present.
|
||||
if version.startswith("v"):
|
||||
version = version[1:]
|
||||
|
||||
if "beta" in version:
|
||||
set_preview_version(version)
|
||||
else:
|
||||
set_stable_version(version)
|
||||
|
||||
print("Updating lockfiles...", file=sys.stderr, end="")
|
||||
run_command("cargo metadata > /dev/null")
|
||||
print(" done.", file=sys.stderr)
|
||||
30
ci/update_lockfiles.sh
Executable file
30
ci/update_lockfiles.sh
Executable file
@@ -0,0 +1,30 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
AMEND=false
|
||||
|
||||
for arg in "$@"; do
|
||||
if [[ "$arg" == "--amend" ]]; then
|
||||
AMEND=true
|
||||
fi
|
||||
done
|
||||
|
||||
# This updates the lockfile without building
|
||||
cargo metadata --quiet > /dev/null
|
||||
|
||||
pushd nodejs || exit 1
|
||||
npm install --package-lock-only --silent
|
||||
popd
|
||||
pushd node || exit 1
|
||||
npm install --package-lock-only --silent
|
||||
popd
|
||||
|
||||
if git diff --quiet --exit-code; then
|
||||
echo "No lockfile changes to commit; skipping amend."
|
||||
elif $AMEND; then
|
||||
git add Cargo.lock nodejs/package-lock.json node/package-lock.json
|
||||
git commit --amend --no-edit
|
||||
else
|
||||
git add Cargo.lock nodejs/package-lock.json node/package-lock.json
|
||||
git commit -m "Update lockfiles"
|
||||
fi
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
LanceDB docs are deployed to https://lancedb.github.io/lancedb/.
|
||||
|
||||
Docs is built and deployed automatically by [Github Actions](.github/workflows/docs.yml)
|
||||
Docs is built and deployed automatically by [Github Actions](../.github/workflows/docs.yml)
|
||||
whenever a commit is pushed to the `main` branch. So it is possible for the docs to show
|
||||
unreleased features.
|
||||
|
||||
|
||||
@@ -193,6 +193,7 @@ nav:
|
||||
- Pandas and PyArrow: python/pandas_and_pyarrow.md
|
||||
- Polars: python/polars_arrow.md
|
||||
- DuckDB: python/duckdb.md
|
||||
- Datafusion: python/datafusion.md
|
||||
- LangChain:
|
||||
- LangChain 🔗: integrations/langchain.md
|
||||
- LangChain demo: notebooks/langchain_demo.ipynb
|
||||
@@ -205,6 +206,7 @@ nav:
|
||||
- PromptTools: integrations/prompttools.md
|
||||
- dlt: integrations/dlt.md
|
||||
- phidata: integrations/phidata.md
|
||||
- Genkit: integrations/genkit.md
|
||||
- 🎯 Examples:
|
||||
- Overview: examples/index.md
|
||||
- 🐍 Python:
|
||||
@@ -247,6 +249,7 @@ nav:
|
||||
- Data management: concepts/data_management.md
|
||||
- Guides:
|
||||
- Working with tables: guides/tables.md
|
||||
- Working with SQL: guides/sql_querying.md
|
||||
- Building an ANN index: ann_indexes.md
|
||||
- Vector Search: search.md
|
||||
- Full-text search (native): fts.md
|
||||
@@ -323,6 +326,7 @@ nav:
|
||||
- Pandas and PyArrow: python/pandas_and_pyarrow.md
|
||||
- Polars: python/polars_arrow.md
|
||||
- DuckDB: python/duckdb.md
|
||||
- Datafusion: python/datafusion.md
|
||||
- LangChain 🦜️🔗↗: integrations/langchain.md
|
||||
- LangChain.js 🦜️🔗↗: https://js.langchain.com/docs/integrations/vectorstores/lancedb
|
||||
- LlamaIndex 🦙↗: integrations/llamaIndex.md
|
||||
@@ -331,6 +335,7 @@ nav:
|
||||
- PromptTools: integrations/prompttools.md
|
||||
- dlt: integrations/dlt.md
|
||||
- phidata: integrations/phidata.md
|
||||
- Genkit: integrations/genkit.md
|
||||
- Examples:
|
||||
- examples/index.md
|
||||
- 🐍 Python:
|
||||
|
||||
5
docs/overrides/partials/main.html
Normal file
5
docs/overrides/partials/main.html
Normal file
@@ -0,0 +1,5 @@
|
||||
{% extends "base.html" %}
|
||||
|
||||
{% block announce %}
|
||||
📚 Starting June 1st, 2025, please use <a href="https://lancedb.github.io/documentation" target="_blank" rel="noopener noreferrer">lancedb.github.io/documentation</a> for the latest docs.
|
||||
{% endblock %}
|
||||
@@ -291,7 +291,7 @@ Product quantization can lead to approximately `16 * sizeof(float32) / 1 = 64` t
|
||||
|
||||
`num_partitions` is used to decide how many partitions the first level `IVF` index uses.
|
||||
Higher number of partitions could lead to more efficient I/O during queries and better accuracy, but it takes much more time to train.
|
||||
On `SIFT-1M` dataset, our benchmark shows that keeping each partition 1K-4K rows lead to a good latency / recall.
|
||||
On `SIFT-1M` dataset, our benchmark shows that keeping each partition 4K-8K rows lead to a good latency / recall.
|
||||
|
||||
`num_sub_vectors` specifies how many Product Quantization (PQ) short codes to generate on each vector. The number should be a factor of the vector dimension. Because
|
||||
PQ is a lossy compression of the original vector, a higher `num_sub_vectors` usually results in
|
||||
|
||||
BIN
docs/src/assets/hero-header.png
Normal file
BIN
docs/src/assets/hero-header.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 1.7 MiB |
BIN
docs/src/assets/lancedb.png
Normal file
BIN
docs/src/assets/lancedb.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 40 KiB |
68
docs/src/guides/sql_querying.md
Normal file
68
docs/src/guides/sql_querying.md
Normal file
@@ -0,0 +1,68 @@
|
||||
You can use DuckDB and Apache Datafusion to query your LanceDB tables using SQL.
|
||||
This guide will show how to query Lance tables them using both.
|
||||
|
||||
We will re-use the dataset [created previously](./pandas_and_pyarrow.md):
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
|
||||
db = lancedb.connect("data/sample-lancedb")
|
||||
data = [
|
||||
{"vector": [3.1, 4.1], "item": "foo", "price": 10.0},
|
||||
{"vector": [5.9, 26.5], "item": "bar", "price": 20.0}
|
||||
]
|
||||
table = db.create_table("pd_table", data=data)
|
||||
```
|
||||
|
||||
## Querying a LanceDB Table with DuckDb
|
||||
|
||||
The `to_lance` method converts the LanceDB table to a `LanceDataset`, which is accessible to DuckDB through the Arrow compatibility layer.
|
||||
To query the resulting Lance dataset in DuckDB, all you need to do is reference the dataset by the same name in your SQL query.
|
||||
|
||||
```python
|
||||
import duckdb
|
||||
|
||||
arrow_table = table.to_lance()
|
||||
|
||||
duckdb.query("SELECT * FROM arrow_table")
|
||||
```
|
||||
|
||||
```
|
||||
┌─────────────┬─────────┬────────┐
|
||||
│ vector │ item │ price │
|
||||
│ float[] │ varchar │ double │
|
||||
├─────────────┼─────────┼────────┤
|
||||
│ [3.1, 4.1] │ foo │ 10.0 │
|
||||
│ [5.9, 26.5] │ bar │ 20.0 │
|
||||
└─────────────┴─────────┴────────┘
|
||||
```
|
||||
|
||||
## Querying a LanceDB Table with Apache Datafusion
|
||||
|
||||
Have the required imports before doing any querying.
|
||||
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb"
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:import-session-context"
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:import-ffi-dataset"
|
||||
```
|
||||
|
||||
Register the table created with the Datafusion session context.
|
||||
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:lance_sql_basic"
|
||||
```
|
||||
|
||||
```
|
||||
┌─────────────┬─────────┬────────┐
|
||||
│ vector │ item │ price │
|
||||
│ float[] │ varchar │ double │
|
||||
├─────────────┼─────────┼────────┤
|
||||
│ [3.1, 4.1] │ foo │ 10.0 │
|
||||
│ [5.9, 26.5] │ bar │ 20.0 │
|
||||
└─────────────┴─────────┴────────┘
|
||||
```
|
||||
@@ -342,7 +342,7 @@ For **read and write access**, LanceDB will need a policy such as:
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject",
|
||||
"s3:DeleteObject",
|
||||
"s3:DeleteObject"
|
||||
],
|
||||
"Resource": "arn:aws:s3:::<bucket>/<prefix>/*"
|
||||
},
|
||||
@@ -374,7 +374,7 @@ For **read-only access**, LanceDB will need a policy such as:
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:GetObject",
|
||||
"s3:GetObject"
|
||||
],
|
||||
"Resource": "arn:aws:s3:::<bucket>/<prefix>/*"
|
||||
},
|
||||
|
||||
@@ -765,7 +765,10 @@ This can be used to update zero to all rows depending on how many rows match the
|
||||
];
|
||||
const tbl = await db.createTable("my_table", data)
|
||||
|
||||
await tbl.update({vector: [10, 10]}, { where: "x = 2"})
|
||||
await tbl.update({
|
||||
values: { vector: [10, 10] },
|
||||
where: "x = 2"
|
||||
});
|
||||
```
|
||||
|
||||
=== "vectordb (deprecated)"
|
||||
@@ -784,7 +787,10 @@ This can be used to update zero to all rows depending on how many rows match the
|
||||
];
|
||||
const tbl = await db.createTable("my_table", data)
|
||||
|
||||
await tbl.update({ where: "x = 2", values: {vector: [10, 10]} })
|
||||
await tbl.update({
|
||||
where: "x = 2",
|
||||
values: { vector: [10, 10] }
|
||||
});
|
||||
```
|
||||
|
||||
#### Updating using a sql query
|
||||
@@ -1001,11 +1007,9 @@ In LanceDB OSS, users can set the `read_consistency_interval` parameter on conne
|
||||
|
||||
There are three possible settings for `read_consistency_interval`:
|
||||
|
||||
1. **Unset**: The database does not check for updates to tables made by other processes. This setting is suitable for applications where the data does not change during the lifetime of the table reference.
|
||||
2. **Zero seconds (Strong consistency)**: The database checks for updates on every read. This provides the strongest consistency guarantees, ensuring that all clients see the latest committed data. However, it has the most overhead. This setting is suitable when consistency matters more than having high QPS. For best performance, combine this setting with the storage option `new_table_enable_v2_manifest_paths` set to `true`.
|
||||
3. **Custom interval (Eventual consistency, the default)**: The database checks for updates at a custom interval. By default, this is every 5 seconds. This provides eventual consistency, allowing for some lag between write and read operations. Performance wise, this is a middle ground between strong consistency and no consistency check. This setting is suitable for applications where immediate consistency is not critical, but clients should see updated data eventually.
|
||||
|
||||
You can always force a synchronization by calling `checkout_latest()` / `checkoutLatest()` on a table.
|
||||
1. **Unset (default)**: The database does not check for updates to tables made by other processes. This provides the best query performance, but means that clients may not see the most up-to-date data. This setting is suitable for applications where the data does not change during the lifetime of the table reference.
|
||||
2. **Zero seconds (Strong consistency)**: The database checks for updates on every read. This provides the strongest consistency guarantees, ensuring that all clients see the latest committed data. However, it has the most overhead. This setting is suitable when consistency matters more than having high QPS.
|
||||
3. **Custom interval (Eventual consistency)**: The database checks for updates at a custom interval, such as every 5 seconds. This provides eventual consistency, allowing for some lag between write and read operations. Performance wise, this is a middle ground between strong consistency and no consistency check. This setting is suitable for applications where immediate consistency is not critical, but clients should see updated data eventually.
|
||||
|
||||
!!! tip "Consistency in LanceDB Cloud"
|
||||
|
||||
@@ -1043,21 +1047,7 @@ You can always force a synchronization by calling `checkout_latest()` / `checkou
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:table_async_eventual_consistency"
|
||||
```
|
||||
|
||||
For no consistency, use `None`:
|
||||
|
||||
=== "Sync API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:table_no_consistency"
|
||||
```
|
||||
|
||||
=== "Async API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:table_async_no_consistency"
|
||||
```
|
||||
|
||||
To manually check for updates you can use `checkout_latest`:
|
||||
By default, a `Table` will never check for updates from other writers. To manually check for updates you can use `checkout_latest`:
|
||||
|
||||
=== "Sync API"
|
||||
|
||||
@@ -1075,25 +1065,15 @@ You can always force a synchronization by calling `checkout_latest()` / `checkou
|
||||
To set strong consistency, use `0`:
|
||||
|
||||
```ts
|
||||
--8<-- "nodejs/examples/basic.test.ts:table_strong_consistency"
|
||||
const db = await lancedb.connect({ uri: "./.lancedb", readConsistencyInterval: 0 });
|
||||
const tbl = await db.openTable("my_table");
|
||||
```
|
||||
|
||||
For eventual consistency, specify the update interval as seconds:
|
||||
|
||||
```ts
|
||||
--8<-- "nodejs/examples/basic.test.ts:table_eventual_consistency"
|
||||
```
|
||||
|
||||
For no consistency, use `null`:
|
||||
|
||||
```ts
|
||||
--8<-- "nodejs/examples/basic.test.ts:table_no_consistency"
|
||||
```
|
||||
|
||||
To manually check for updates you can use `checkoutLatest`:
|
||||
|
||||
```ts
|
||||
--8<-- "nodejs/examples/basic.test.ts:table_checkout_latest"
|
||||
const db = await lancedb.connect({ uri: "./.lancedb", readConsistencyInterval: 5 });
|
||||
const tbl = await db.openTable("my_table");
|
||||
```
|
||||
|
||||
<!-- Node doesn't yet support the version time travel: https://github.com/lancedb/lancedb/issues/1007
|
||||
|
||||
183
docs/src/integrations/genkit.md
Normal file
183
docs/src/integrations/genkit.md
Normal file
@@ -0,0 +1,183 @@
|
||||
### genkitx-lancedb
|
||||
This is a lancedb plugin for genkit framework. It allows you to use LanceDB for ingesting and rereiving data using genkit framework.
|
||||
|
||||

|
||||
|
||||
### Installation
|
||||
```bash
|
||||
pnpm install genkitx-lancedb
|
||||
```
|
||||
|
||||
### Usage
|
||||
|
||||
Adding LanceDB plugin to your genkit instance.
|
||||
|
||||
```ts
|
||||
import { lancedbIndexerRef, lancedb, lancedbRetrieverRef, WriteMode } from 'genkitx-lancedb';
|
||||
import { textEmbedding004, vertexAI } from '@genkit-ai/vertexai';
|
||||
import { gemini } from '@genkit-ai/vertexai';
|
||||
import { z, genkit } from 'genkit';
|
||||
import { Document } from 'genkit/retriever';
|
||||
import { chunk } from 'llm-chunk';
|
||||
import { readFile } from 'fs/promises';
|
||||
import path from 'path';
|
||||
import pdf from 'pdf-parse/lib/pdf-parse';
|
||||
|
||||
const ai = genkit({
|
||||
plugins: [
|
||||
// vertexAI provides the textEmbedding004 embedder
|
||||
vertexAI(),
|
||||
|
||||
// the local vector store requires an embedder to translate from text to vector
|
||||
lancedb([
|
||||
{
|
||||
dbUri: '.db', // optional lancedb uri, default to .db
|
||||
tableName: 'table', // optional table name, default to table
|
||||
embedder: textEmbedding004,
|
||||
},
|
||||
]),
|
||||
],
|
||||
});
|
||||
```
|
||||
|
||||
You can run this app with the following command:
|
||||
```bash
|
||||
genkit start -- tsx --watch src/index.ts
|
||||
```
|
||||
|
||||
This'll add LanceDB as a retriever and indexer to the genkit instance. You can see it in the GUI view
|
||||
<img width="1710" alt="Screenshot 2025-05-11 at 7 21 05 PM" src="https://github.com/user-attachments/assets/e752f7f4-785b-4797-a11e-72ab06a531b7" />
|
||||
|
||||
**Testing retrieval on a sample table**
|
||||
Let's see the raw retrieval results
|
||||
|
||||
<img width="1710" alt="Screenshot 2025-05-11 at 7 21 05 PM" src="https://github.com/user-attachments/assets/b8d356ed-8421-4790-8fc0-d6af563b9657" />
|
||||
On running this query, you'll 5 results fetched from the lancedb table, where each result looks something like this:
|
||||
<img width="1417" alt="Screenshot 2025-05-11 at 7 21 18 PM" src="https://github.com/user-attachments/assets/77429525-36e2-4da6-a694-e58c1cf9eb83" />
|
||||
|
||||
|
||||
|
||||
## Creating a custom RAG flow
|
||||
|
||||
Now that we've seen how you can use LanceDB for in a genkit pipeline, let's refine the flow and create a RAG. A RAG flow will consist of an index and a retreiver with its outputs postprocessed an fed into an LLM for final response
|
||||
|
||||
### Creating custom indexer flows
|
||||
You can also create custom indexer flows, utilizing more options and features provided by LanceDB.
|
||||
|
||||
```ts
|
||||
export const menuPdfIndexer = lancedbIndexerRef({
|
||||
// Using all defaults, for dbUri, tableName, and embedder, etc
|
||||
});
|
||||
|
||||
const chunkingConfig = {
|
||||
minLength: 1000,
|
||||
maxLength: 2000,
|
||||
splitter: 'sentence',
|
||||
overlap: 100,
|
||||
delimiters: '',
|
||||
} as any;
|
||||
|
||||
|
||||
async function extractTextFromPdf(filePath: string) {
|
||||
const pdfFile = path.resolve(filePath);
|
||||
const dataBuffer = await readFile(pdfFile);
|
||||
const data = await pdf(dataBuffer);
|
||||
return data.text;
|
||||
}
|
||||
|
||||
export const indexMenu = ai.defineFlow(
|
||||
{
|
||||
name: 'indexMenu',
|
||||
inputSchema: z.string().describe('PDF file path'),
|
||||
outputSchema: z.void(),
|
||||
},
|
||||
async (filePath: string) => {
|
||||
filePath = path.resolve(filePath);
|
||||
|
||||
// Read the pdf.
|
||||
const pdfTxt = await ai.run('extract-text', () =>
|
||||
extractTextFromPdf(filePath)
|
||||
);
|
||||
|
||||
// Divide the pdf text into segments.
|
||||
const chunks = await ai.run('chunk-it', async () =>
|
||||
chunk(pdfTxt, chunkingConfig)
|
||||
);
|
||||
|
||||
// Convert chunks of text into documents to store in the index.
|
||||
const documents = chunks.map((text) => {
|
||||
return Document.fromText(text, { filePath });
|
||||
});
|
||||
|
||||
// Add documents to the index.
|
||||
await ai.index({
|
||||
indexer: menuPdfIndexer,
|
||||
documents,
|
||||
options: {
|
||||
writeMode: WriteMode.Overwrite,
|
||||
} as any
|
||||
});
|
||||
}
|
||||
);
|
||||
```
|
||||
|
||||
<img width="1316" alt="Screenshot 2025-05-11 at 8 35 56 PM" src="https://github.com/user-attachments/assets/e2a20ce4-d1d0-4fa2-9a84-f2cc26e3a29f" />
|
||||
|
||||
In your console, you can see the logs
|
||||
|
||||
<img width="511" alt="Screenshot 2025-05-11 at 7 19 14 PM" src="https://github.com/user-attachments/assets/243f26c5-ed38-40b6-b661-002f40f0423a" />
|
||||
|
||||
### Creating custom retriever flows
|
||||
You can also create custom retriever flows, utilizing more options and features provided by LanceDB.
|
||||
```ts
|
||||
export const menuRetriever = lancedbRetrieverRef({
|
||||
tableName: "table", // Use the same table name as the indexer.
|
||||
displayName: "Menu", // Use a custom display name.
|
||||
|
||||
export const menuQAFlow = ai.defineFlow(
|
||||
{ name: "Menu", inputSchema: z.string(), outputSchema: z.string() },
|
||||
async (input: string) => {
|
||||
// retrieve relevant documents
|
||||
const docs = await ai.retrieve({
|
||||
retriever: menuRetriever,
|
||||
query: input,
|
||||
options: {
|
||||
k: 3,
|
||||
},
|
||||
});
|
||||
|
||||
const extractedContent = docs.map(doc => {
|
||||
if (doc.content && Array.isArray(doc.content) && doc.content.length > 0) {
|
||||
if (doc.content[0].media && doc.content[0].media.url) {
|
||||
return doc.content[0].media.url;
|
||||
}
|
||||
}
|
||||
return "No content found";
|
||||
});
|
||||
|
||||
console.log("Extracted content:", extractedContent);
|
||||
|
||||
const { text } = await ai.generate({
|
||||
model: gemini('gemini-2.0-flash'),
|
||||
prompt: `
|
||||
You are acting as a helpful AI assistant that can answer
|
||||
questions about the food available on the menu at Genkit Grub Pub.
|
||||
|
||||
Use only the context provided to answer the question.
|
||||
If you don't know, do not make up an answer.
|
||||
Do not add or change items on the menu.
|
||||
|
||||
Context:
|
||||
${extractedContent.join('\n\n')}
|
||||
|
||||
Question: ${input}`,
|
||||
docs,
|
||||
});
|
||||
|
||||
return text;
|
||||
}
|
||||
);
|
||||
```
|
||||
Now using our retrieval flow, we can ask question about the ingsted PDF
|
||||
<img width="1306" alt="Screenshot 2025-05-11 at 7 18 45 PM" src="https://github.com/user-attachments/assets/86c66b13-7c12-4d5f-9d81-ae36bfb1c346" />
|
||||
|
||||
53
docs/src/js/classes/BooleanQuery.md
Normal file
53
docs/src/js/classes/BooleanQuery.md
Normal file
@@ -0,0 +1,53 @@
|
||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||
|
||||
***
|
||||
|
||||
[@lancedb/lancedb](../globals.md) / BooleanQuery
|
||||
|
||||
# Class: BooleanQuery
|
||||
|
||||
Represents a full-text query interface.
|
||||
This interface defines the structure and behavior for full-text queries,
|
||||
including methods to retrieve the query type and convert the query to a dictionary format.
|
||||
|
||||
## Implements
|
||||
|
||||
- [`FullTextQuery`](../interfaces/FullTextQuery.md)
|
||||
|
||||
## Constructors
|
||||
|
||||
### new BooleanQuery()
|
||||
|
||||
```ts
|
||||
new BooleanQuery(queries): BooleanQuery
|
||||
```
|
||||
|
||||
Creates an instance of BooleanQuery.
|
||||
|
||||
#### Parameters
|
||||
|
||||
* **queries**: [[`Occur`](../enumerations/Occur.md), [`FullTextQuery`](../interfaces/FullTextQuery.md)][]
|
||||
An array of (Occur, FullTextQuery objects) to combine.
|
||||
Occur specifies whether the query must match, or should match.
|
||||
|
||||
#### Returns
|
||||
|
||||
[`BooleanQuery`](BooleanQuery.md)
|
||||
|
||||
## Methods
|
||||
|
||||
### queryType()
|
||||
|
||||
```ts
|
||||
queryType(): FullTextQueryType
|
||||
```
|
||||
|
||||
The type of the full-text query.
|
||||
|
||||
#### Returns
|
||||
|
||||
[`FullTextQueryType`](../enumerations/FullTextQueryType.md)
|
||||
|
||||
#### Implementation of
|
||||
|
||||
[`FullTextQuery`](../interfaces/FullTextQuery.md).[`queryType`](../interfaces/FullTextQuery.md#querytype)
|
||||
@@ -22,10 +22,13 @@ including methods to retrieve the query type and convert the query to a dictiona
|
||||
new BoostQuery(
|
||||
positive,
|
||||
negative,
|
||||
negativeBoost): BoostQuery
|
||||
options?): BoostQuery
|
||||
```
|
||||
|
||||
Creates an instance of BoostQuery.
|
||||
The boost returns documents that match the positive query,
|
||||
but penalizes those that match the negative query.
|
||||
the penalty is controlled by the `negativeBoost` parameter.
|
||||
|
||||
#### Parameters
|
||||
|
||||
@@ -35,8 +38,11 @@ Creates an instance of BoostQuery.
|
||||
* **negative**: [`FullTextQuery`](../interfaces/FullTextQuery.md)
|
||||
The negative query that reduces the relevance score.
|
||||
|
||||
* **negativeBoost**: `number`
|
||||
The factor by which the negative query reduces the score.
|
||||
* **options?**
|
||||
Optional parameters for the boost query.
|
||||
- `negativeBoost`: The boost factor for the negative query (default is 0.0).
|
||||
|
||||
* **options.negativeBoost?**: `number`
|
||||
|
||||
#### Returns
|
||||
|
||||
@@ -50,6 +56,8 @@ Creates an instance of BoostQuery.
|
||||
queryType(): FullTextQueryType
|
||||
```
|
||||
|
||||
The type of the full-text query.
|
||||
|
||||
#### Returns
|
||||
|
||||
[`FullTextQueryType`](../enumerations/FullTextQueryType.md)
|
||||
@@ -57,19 +65,3 @@ queryType(): FullTextQueryType
|
||||
#### Implementation of
|
||||
|
||||
[`FullTextQuery`](../interfaces/FullTextQuery.md).[`queryType`](../interfaces/FullTextQuery.md#querytype)
|
||||
|
||||
***
|
||||
|
||||
### toDict()
|
||||
|
||||
```ts
|
||||
toDict(): Record<string, unknown>
|
||||
```
|
||||
|
||||
#### Returns
|
||||
|
||||
`Record`<`string`, `unknown`>
|
||||
|
||||
#### Implementation of
|
||||
|
||||
[`FullTextQuery`](../interfaces/FullTextQuery.md).[`toDict`](../interfaces/FullTextQuery.md#todict)
|
||||
|
||||
@@ -22,9 +22,7 @@ including methods to retrieve the query type and convert the query to a dictiona
|
||||
new MatchQuery(
|
||||
query,
|
||||
column,
|
||||
boost,
|
||||
fuzziness,
|
||||
maxExpansions): MatchQuery
|
||||
options?): MatchQuery
|
||||
```
|
||||
|
||||
Creates an instance of MatchQuery.
|
||||
@@ -37,14 +35,20 @@ Creates an instance of MatchQuery.
|
||||
* **column**: `string`
|
||||
The name of the column to search within.
|
||||
|
||||
* **boost**: `number` = `1.0`
|
||||
(Optional) The boost factor to influence the relevance score of this query. Default is `1.0`.
|
||||
* **options?**
|
||||
Optional parameters for the match query.
|
||||
- `boost`: The boost factor for the query (default is 1.0).
|
||||
- `fuzziness`: The fuzziness level for the query (default is 0).
|
||||
- `maxExpansions`: The maximum number of terms to consider for fuzzy matching (default is 50).
|
||||
- `operator`: The logical operator to use for combining terms in the query (default is "OR").
|
||||
|
||||
* **fuzziness**: `number` = `0`
|
||||
(Optional) The allowed edit distance for fuzzy matching. Default is `0`.
|
||||
* **options.boost?**: `number`
|
||||
|
||||
* **maxExpansions**: `number` = `50`
|
||||
(Optional) The maximum number of terms to consider for fuzzy matching. Default is `50`.
|
||||
* **options.fuzziness?**: `number`
|
||||
|
||||
* **options.maxExpansions?**: `number`
|
||||
|
||||
* **options.operator?**: [`Operator`](../enumerations/Operator.md)
|
||||
|
||||
#### Returns
|
||||
|
||||
@@ -58,6 +62,8 @@ Creates an instance of MatchQuery.
|
||||
queryType(): FullTextQueryType
|
||||
```
|
||||
|
||||
The type of the full-text query.
|
||||
|
||||
#### Returns
|
||||
|
||||
[`FullTextQueryType`](../enumerations/FullTextQueryType.md)
|
||||
@@ -65,19 +71,3 @@ queryType(): FullTextQueryType
|
||||
#### Implementation of
|
||||
|
||||
[`FullTextQuery`](../interfaces/FullTextQuery.md).[`queryType`](../interfaces/FullTextQuery.md#querytype)
|
||||
|
||||
***
|
||||
|
||||
### toDict()
|
||||
|
||||
```ts
|
||||
toDict(): Record<string, unknown>
|
||||
```
|
||||
|
||||
#### Returns
|
||||
|
||||
`Record`<`string`, `unknown`>
|
||||
|
||||
#### Implementation of
|
||||
|
||||
[`FullTextQuery`](../interfaces/FullTextQuery.md).[`toDict`](../interfaces/FullTextQuery.md#todict)
|
||||
|
||||
@@ -33,20 +33,22 @@ Construct a MergeInsertBuilder. __Internal use only.__
|
||||
### execute()
|
||||
|
||||
```ts
|
||||
execute(data): Promise<void>
|
||||
execute(data, execOptions?): Promise<MergeResult>
|
||||
```
|
||||
|
||||
Executes the merge insert operation
|
||||
|
||||
Nothing is returned but the `Table` is updated
|
||||
|
||||
#### Parameters
|
||||
|
||||
* **data**: [`Data`](../type-aliases/Data.md)
|
||||
|
||||
* **execOptions?**: `Partial`<[`WriteExecutionOptions`](../interfaces/WriteExecutionOptions.md)>
|
||||
|
||||
#### Returns
|
||||
|
||||
`Promise`<`void`>
|
||||
`Promise`<[`MergeResult`](../interfaces/MergeResult.md)>
|
||||
|
||||
the merge result
|
||||
|
||||
***
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ including methods to retrieve the query type and convert the query to a dictiona
|
||||
new MultiMatchQuery(
|
||||
query,
|
||||
columns,
|
||||
boosts): MultiMatchQuery
|
||||
options?): MultiMatchQuery
|
||||
```
|
||||
|
||||
Creates an instance of MultiMatchQuery.
|
||||
@@ -35,10 +35,14 @@ Creates an instance of MultiMatchQuery.
|
||||
* **columns**: `string`[]
|
||||
An array of column names to search within.
|
||||
|
||||
* **boosts**: `number`[] = `...`
|
||||
(Optional) An array of boost factors corresponding to each column. Default is an array of 1.0 for each column.
|
||||
The `boosts` array should have the same length as `columns`. If not provided, all columns will have a default boost of 1.0.
|
||||
If the length of `boosts` is less than `columns`, it will be padded with 1.0s.
|
||||
* **options?**
|
||||
Optional parameters for the multi-match query.
|
||||
- `boosts`: An array of boost factors for each column (default is 1.0 for all).
|
||||
- `operator`: The logical operator to use for combining terms in the query (default is "OR").
|
||||
|
||||
* **options.boosts?**: `number`[]
|
||||
|
||||
* **options.operator?**: [`Operator`](../enumerations/Operator.md)
|
||||
|
||||
#### Returns
|
||||
|
||||
@@ -52,6 +56,8 @@ Creates an instance of MultiMatchQuery.
|
||||
queryType(): FullTextQueryType
|
||||
```
|
||||
|
||||
The type of the full-text query.
|
||||
|
||||
#### Returns
|
||||
|
||||
[`FullTextQueryType`](../enumerations/FullTextQueryType.md)
|
||||
@@ -59,19 +65,3 @@ queryType(): FullTextQueryType
|
||||
#### Implementation of
|
||||
|
||||
[`FullTextQuery`](../interfaces/FullTextQuery.md).[`queryType`](../interfaces/FullTextQuery.md#querytype)
|
||||
|
||||
***
|
||||
|
||||
### toDict()
|
||||
|
||||
```ts
|
||||
toDict(): Record<string, unknown>
|
||||
```
|
||||
|
||||
#### Returns
|
||||
|
||||
`Record`<`string`, `unknown`>
|
||||
|
||||
#### Implementation of
|
||||
|
||||
[`FullTextQuery`](../interfaces/FullTextQuery.md).[`toDict`](../interfaces/FullTextQuery.md#todict)
|
||||
|
||||
@@ -19,7 +19,10 @@ including methods to retrieve the query type and convert the query to a dictiona
|
||||
### new PhraseQuery()
|
||||
|
||||
```ts
|
||||
new PhraseQuery(query, column): PhraseQuery
|
||||
new PhraseQuery(
|
||||
query,
|
||||
column,
|
||||
options?): PhraseQuery
|
||||
```
|
||||
|
||||
Creates an instance of `PhraseQuery`.
|
||||
@@ -32,6 +35,12 @@ Creates an instance of `PhraseQuery`.
|
||||
* **column**: `string`
|
||||
The name of the column to search within.
|
||||
|
||||
* **options?**
|
||||
Optional parameters for the phrase query.
|
||||
- `slop`: The maximum number of intervening unmatched positions allowed between words in the phrase (default is 0).
|
||||
|
||||
* **options.slop?**: `number`
|
||||
|
||||
#### Returns
|
||||
|
||||
[`PhraseQuery`](PhraseQuery.md)
|
||||
@@ -44,6 +53,8 @@ Creates an instance of `PhraseQuery`.
|
||||
queryType(): FullTextQueryType
|
||||
```
|
||||
|
||||
The type of the full-text query.
|
||||
|
||||
#### Returns
|
||||
|
||||
[`FullTextQueryType`](../enumerations/FullTextQueryType.md)
|
||||
@@ -51,19 +62,3 @@ queryType(): FullTextQueryType
|
||||
#### Implementation of
|
||||
|
||||
[`FullTextQuery`](../interfaces/FullTextQuery.md).[`queryType`](../interfaces/FullTextQuery.md#querytype)
|
||||
|
||||
***
|
||||
|
||||
### toDict()
|
||||
|
||||
```ts
|
||||
toDict(): Record<string, unknown>
|
||||
```
|
||||
|
||||
#### Returns
|
||||
|
||||
`Record`<`string`, `unknown`>
|
||||
|
||||
#### Implementation of
|
||||
|
||||
[`FullTextQuery`](../interfaces/FullTextQuery.md).[`toDict`](../interfaces/FullTextQuery.md#todict)
|
||||
|
||||
@@ -40,7 +40,7 @@ Returns the name of the table
|
||||
### add()
|
||||
|
||||
```ts
|
||||
abstract add(data, options?): Promise<void>
|
||||
abstract add(data, options?): Promise<AddResult>
|
||||
```
|
||||
|
||||
Insert records into this Table.
|
||||
@@ -54,14 +54,17 @@ Insert records into this Table.
|
||||
|
||||
#### Returns
|
||||
|
||||
`Promise`<`void`>
|
||||
`Promise`<[`AddResult`](../interfaces/AddResult.md)>
|
||||
|
||||
A promise that resolves to an object
|
||||
containing the new version number of the table
|
||||
|
||||
***
|
||||
|
||||
### addColumns()
|
||||
|
||||
```ts
|
||||
abstract addColumns(newColumnTransforms): Promise<void>
|
||||
abstract addColumns(newColumnTransforms): Promise<AddColumnsResult>
|
||||
```
|
||||
|
||||
Add new columns with defined values.
|
||||
@@ -76,14 +79,17 @@ Add new columns with defined values.
|
||||
|
||||
#### Returns
|
||||
|
||||
`Promise`<`void`>
|
||||
`Promise`<[`AddColumnsResult`](../interfaces/AddColumnsResult.md)>
|
||||
|
||||
A promise that resolves to an object
|
||||
containing the new version number of the table after adding the columns.
|
||||
|
||||
***
|
||||
|
||||
### alterColumns()
|
||||
|
||||
```ts
|
||||
abstract alterColumns(columnAlterations): Promise<void>
|
||||
abstract alterColumns(columnAlterations): Promise<AlterColumnsResult>
|
||||
```
|
||||
|
||||
Alter the name or nullability of columns.
|
||||
@@ -96,7 +102,10 @@ Alter the name or nullability of columns.
|
||||
|
||||
#### Returns
|
||||
|
||||
`Promise`<`void`>
|
||||
`Promise`<[`AlterColumnsResult`](../interfaces/AlterColumnsResult.md)>
|
||||
|
||||
A promise that resolves to an object
|
||||
containing the new version number of the table after altering the columns.
|
||||
|
||||
***
|
||||
|
||||
@@ -117,8 +126,8 @@ wish to return to standard mode, call `checkoutLatest`.
|
||||
|
||||
#### Parameters
|
||||
|
||||
* **version**: `number`
|
||||
The version to checkout
|
||||
* **version**: `string` \| `number`
|
||||
The version to checkout, could be version number or tag
|
||||
|
||||
#### Returns
|
||||
|
||||
@@ -252,7 +261,7 @@ await table.createIndex("my_float_col");
|
||||
### delete()
|
||||
|
||||
```ts
|
||||
abstract delete(predicate): Promise<void>
|
||||
abstract delete(predicate): Promise<DeleteResult>
|
||||
```
|
||||
|
||||
Delete the rows that satisfy the predicate.
|
||||
@@ -263,7 +272,10 @@ Delete the rows that satisfy the predicate.
|
||||
|
||||
#### Returns
|
||||
|
||||
`Promise`<`void`>
|
||||
`Promise`<[`DeleteResult`](../interfaces/DeleteResult.md)>
|
||||
|
||||
A promise that resolves to an object
|
||||
containing the new version number of the table
|
||||
|
||||
***
|
||||
|
||||
@@ -284,7 +296,7 @@ Return a brief description of the table
|
||||
### dropColumns()
|
||||
|
||||
```ts
|
||||
abstract dropColumns(columnNames): Promise<void>
|
||||
abstract dropColumns(columnNames): Promise<DropColumnsResult>
|
||||
```
|
||||
|
||||
Drop one or more columns from the dataset
|
||||
@@ -303,7 +315,10 @@ then call ``cleanup_files`` to remove the old files.
|
||||
|
||||
#### Returns
|
||||
|
||||
`Promise`<`void`>
|
||||
`Promise`<[`DropColumnsResult`](../interfaces/DropColumnsResult.md)>
|
||||
|
||||
A promise that resolves to an object
|
||||
containing the new version number of the table after dropping the columns.
|
||||
|
||||
***
|
||||
|
||||
@@ -454,6 +469,28 @@ Modeled after ``VACUUM`` in PostgreSQL.
|
||||
|
||||
***
|
||||
|
||||
### prewarmIndex()
|
||||
|
||||
```ts
|
||||
abstract prewarmIndex(name): Promise<void>
|
||||
```
|
||||
|
||||
Prewarm an index in the table.
|
||||
|
||||
#### Parameters
|
||||
|
||||
* **name**: `string`
|
||||
The name of the index.
|
||||
This will load the index into memory. This may reduce the cold-start time for
|
||||
future queries. If the index does not fit in the cache then this call may be
|
||||
wasteful.
|
||||
|
||||
#### Returns
|
||||
|
||||
`Promise`<`void`>
|
||||
|
||||
***
|
||||
|
||||
### query()
|
||||
|
||||
```ts
|
||||
@@ -575,7 +612,7 @@ of the given query
|
||||
|
||||
#### Parameters
|
||||
|
||||
* **query**: `string` \| [`IntoVector`](../type-aliases/IntoVector.md)
|
||||
* **query**: `string` \| [`IntoVector`](../type-aliases/IntoVector.md) \| [`FullTextQuery`](../interfaces/FullTextQuery.md)
|
||||
the query, a vector or string
|
||||
|
||||
* **queryType?**: `string`
|
||||
@@ -593,6 +630,50 @@ of the given query
|
||||
|
||||
***
|
||||
|
||||
### stats()
|
||||
|
||||
```ts
|
||||
abstract stats(): Promise<TableStatistics>
|
||||
```
|
||||
|
||||
Returns table and fragment statistics
|
||||
|
||||
#### Returns
|
||||
|
||||
`Promise`<[`TableStatistics`](../interfaces/TableStatistics.md)>
|
||||
|
||||
The table and fragment statistics
|
||||
|
||||
***
|
||||
|
||||
### tags()
|
||||
|
||||
```ts
|
||||
abstract tags(): Promise<Tags>
|
||||
```
|
||||
|
||||
Get a tags manager for this table.
|
||||
|
||||
Tags allow you to label specific versions of a table with a human-readable name.
|
||||
The returned tags manager can be used to list, create, update, or delete tags.
|
||||
|
||||
#### Returns
|
||||
|
||||
`Promise`<[`Tags`](Tags.md)>
|
||||
|
||||
A tags manager for this table
|
||||
|
||||
#### Example
|
||||
|
||||
```typescript
|
||||
const tagsManager = await table.tags();
|
||||
await tagsManager.create("v1", 1);
|
||||
const tags = await tagsManager.list();
|
||||
console.log(tags); // { "v1": { version: 1, manifestSize: ... } }
|
||||
```
|
||||
|
||||
***
|
||||
|
||||
### toArrow()
|
||||
|
||||
```ts
|
||||
@@ -612,7 +693,7 @@ Return the table as an arrow table
|
||||
#### update(opts)
|
||||
|
||||
```ts
|
||||
abstract update(opts): Promise<void>
|
||||
abstract update(opts): Promise<UpdateResult>
|
||||
```
|
||||
|
||||
Update existing records in the Table
|
||||
@@ -623,7 +704,10 @@ Update existing records in the Table
|
||||
|
||||
##### Returns
|
||||
|
||||
`Promise`<`void`>
|
||||
`Promise`<[`UpdateResult`](../interfaces/UpdateResult.md)>
|
||||
|
||||
A promise that resolves to an object containing
|
||||
the number of rows updated and the new version number
|
||||
|
||||
##### Example
|
||||
|
||||
@@ -634,7 +718,7 @@ table.update({where:"x = 2", values:{"vector": [10, 10]}})
|
||||
#### update(opts)
|
||||
|
||||
```ts
|
||||
abstract update(opts): Promise<void>
|
||||
abstract update(opts): Promise<UpdateResult>
|
||||
```
|
||||
|
||||
Update existing records in the Table
|
||||
@@ -645,7 +729,10 @@ Update existing records in the Table
|
||||
|
||||
##### Returns
|
||||
|
||||
`Promise`<`void`>
|
||||
`Promise`<[`UpdateResult`](../interfaces/UpdateResult.md)>
|
||||
|
||||
A promise that resolves to an object containing
|
||||
the number of rows updated and the new version number
|
||||
|
||||
##### Example
|
||||
|
||||
@@ -656,7 +743,7 @@ table.update({where:"x = 2", valuesSql:{"x": "x + 1"}})
|
||||
#### update(updates, options)
|
||||
|
||||
```ts
|
||||
abstract update(updates, options?): Promise<void>
|
||||
abstract update(updates, options?): Promise<UpdateResult>
|
||||
```
|
||||
|
||||
Update existing records in the Table
|
||||
@@ -679,10 +766,6 @@ repeatedly calilng this method.
|
||||
* **updates**: `Record`<`string`, `string`> \| `Map`<`string`, `string`>
|
||||
the
|
||||
columns to update
|
||||
Keys in the map should specify the name of the column to update.
|
||||
Values in the map provide the new value of the column. These can
|
||||
be SQL literal strings (e.g. "7" or "'foo'") or they can be expressions
|
||||
based on the row being updated (e.g. "my_col + 1")
|
||||
|
||||
* **options?**: `Partial`<[`UpdateOptions`](../interfaces/UpdateOptions.md)>
|
||||
additional options to control
|
||||
@@ -690,7 +773,15 @@ repeatedly calilng this method.
|
||||
|
||||
##### Returns
|
||||
|
||||
`Promise`<`void`>
|
||||
`Promise`<[`UpdateResult`](../interfaces/UpdateResult.md)>
|
||||
|
||||
A promise that resolves to an object
|
||||
containing the number of rows updated and the new version number
|
||||
|
||||
Keys in the map should specify the name of the column to update.
|
||||
Values in the map provide the new value of the column. These can
|
||||
be SQL literal strings (e.g. "7" or "'foo'") or they can be expressions
|
||||
based on the row being updated (e.g. "my_col + 1")
|
||||
|
||||
***
|
||||
|
||||
@@ -731,3 +822,26 @@ Retrieve the version of the table
|
||||
#### Returns
|
||||
|
||||
`Promise`<`number`>
|
||||
|
||||
***
|
||||
|
||||
### waitForIndex()
|
||||
|
||||
```ts
|
||||
abstract waitForIndex(indexNames, timeoutSeconds): Promise<void>
|
||||
```
|
||||
|
||||
Waits for asynchronous indexing to complete on the table.
|
||||
|
||||
#### Parameters
|
||||
|
||||
* **indexNames**: `string`[]
|
||||
The name of the indices to wait for
|
||||
|
||||
* **timeoutSeconds**: `number`
|
||||
The number of seconds to wait before timing out
|
||||
This will raise an error if the indices are not created and fully indexed within the timeout.
|
||||
|
||||
#### Returns
|
||||
|
||||
`Promise`<`void`>
|
||||
|
||||
35
docs/src/js/classes/TagContents.md
Normal file
35
docs/src/js/classes/TagContents.md
Normal file
@@ -0,0 +1,35 @@
|
||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||
|
||||
***
|
||||
|
||||
[@lancedb/lancedb](../globals.md) / TagContents
|
||||
|
||||
# Class: TagContents
|
||||
|
||||
## Constructors
|
||||
|
||||
### new TagContents()
|
||||
|
||||
```ts
|
||||
new TagContents(): TagContents
|
||||
```
|
||||
|
||||
#### Returns
|
||||
|
||||
[`TagContents`](TagContents.md)
|
||||
|
||||
## Properties
|
||||
|
||||
### manifestSize
|
||||
|
||||
```ts
|
||||
manifestSize: number;
|
||||
```
|
||||
|
||||
***
|
||||
|
||||
### version
|
||||
|
||||
```ts
|
||||
version: number;
|
||||
```
|
||||
99
docs/src/js/classes/Tags.md
Normal file
99
docs/src/js/classes/Tags.md
Normal file
@@ -0,0 +1,99 @@
|
||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||
|
||||
***
|
||||
|
||||
[@lancedb/lancedb](../globals.md) / Tags
|
||||
|
||||
# Class: Tags
|
||||
|
||||
## Constructors
|
||||
|
||||
### new Tags()
|
||||
|
||||
```ts
|
||||
new Tags(): Tags
|
||||
```
|
||||
|
||||
#### Returns
|
||||
|
||||
[`Tags`](Tags.md)
|
||||
|
||||
## Methods
|
||||
|
||||
### create()
|
||||
|
||||
```ts
|
||||
create(tag, version): Promise<void>
|
||||
```
|
||||
|
||||
#### Parameters
|
||||
|
||||
* **tag**: `string`
|
||||
|
||||
* **version**: `number`
|
||||
|
||||
#### Returns
|
||||
|
||||
`Promise`<`void`>
|
||||
|
||||
***
|
||||
|
||||
### delete()
|
||||
|
||||
```ts
|
||||
delete(tag): Promise<void>
|
||||
```
|
||||
|
||||
#### Parameters
|
||||
|
||||
* **tag**: `string`
|
||||
|
||||
#### Returns
|
||||
|
||||
`Promise`<`void`>
|
||||
|
||||
***
|
||||
|
||||
### getVersion()
|
||||
|
||||
```ts
|
||||
getVersion(tag): Promise<number>
|
||||
```
|
||||
|
||||
#### Parameters
|
||||
|
||||
* **tag**: `string`
|
||||
|
||||
#### Returns
|
||||
|
||||
`Promise`<`number`>
|
||||
|
||||
***
|
||||
|
||||
### list()
|
||||
|
||||
```ts
|
||||
list(): Promise<Record<string, TagContents>>
|
||||
```
|
||||
|
||||
#### Returns
|
||||
|
||||
`Promise`<`Record`<`string`, [`TagContents`](TagContents.md)>>
|
||||
|
||||
***
|
||||
|
||||
### update()
|
||||
|
||||
```ts
|
||||
update(tag, version): Promise<void>
|
||||
```
|
||||
|
||||
#### Parameters
|
||||
|
||||
* **tag**: `string`
|
||||
|
||||
* **version**: `number`
|
||||
|
||||
#### Returns
|
||||
|
||||
`Promise`<`void`>
|
||||
@@ -15,6 +15,14 @@ Enum representing the types of full-text queries supported.
|
||||
|
||||
## Enumeration Members
|
||||
|
||||
### Boolean
|
||||
|
||||
```ts
|
||||
Boolean: "boolean";
|
||||
```
|
||||
|
||||
***
|
||||
|
||||
### Boost
|
||||
|
||||
```ts
|
||||
|
||||
28
docs/src/js/enumerations/Occur.md
Normal file
28
docs/src/js/enumerations/Occur.md
Normal file
@@ -0,0 +1,28 @@
|
||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||
|
||||
***
|
||||
|
||||
[@lancedb/lancedb](../globals.md) / Occur
|
||||
|
||||
# Enumeration: Occur
|
||||
|
||||
Enum representing the occurrence of terms in full-text queries.
|
||||
|
||||
- `Must`: The term must be present in the document.
|
||||
- `Should`: The term should contribute to the document score, but is not required.
|
||||
|
||||
## Enumeration Members
|
||||
|
||||
### Must
|
||||
|
||||
```ts
|
||||
Must: "MUST";
|
||||
```
|
||||
|
||||
***
|
||||
|
||||
### Should
|
||||
|
||||
```ts
|
||||
Should: "SHOULD";
|
||||
```
|
||||
28
docs/src/js/enumerations/Operator.md
Normal file
28
docs/src/js/enumerations/Operator.md
Normal file
@@ -0,0 +1,28 @@
|
||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||
|
||||
***
|
||||
|
||||
[@lancedb/lancedb](../globals.md) / Operator
|
||||
|
||||
# Enumeration: Operator
|
||||
|
||||
Enum representing the logical operators used in full-text queries.
|
||||
|
||||
- `And`: All terms must match.
|
||||
- `Or`: At least one term must match.
|
||||
|
||||
## Enumeration Members
|
||||
|
||||
### And
|
||||
|
||||
```ts
|
||||
And: "AND";
|
||||
```
|
||||
|
||||
***
|
||||
|
||||
### Or
|
||||
|
||||
```ts
|
||||
Or: "OR";
|
||||
```
|
||||
@@ -12,9 +12,12 @@
|
||||
## Enumerations
|
||||
|
||||
- [FullTextQueryType](enumerations/FullTextQueryType.md)
|
||||
- [Occur](enumerations/Occur.md)
|
||||
- [Operator](enumerations/Operator.md)
|
||||
|
||||
## Classes
|
||||
|
||||
- [BooleanQuery](classes/BooleanQuery.md)
|
||||
- [BoostQuery](classes/BoostQuery.md)
|
||||
- [Connection](classes/Connection.md)
|
||||
- [Index](classes/Index.md)
|
||||
@@ -27,19 +30,28 @@
|
||||
- [QueryBase](classes/QueryBase.md)
|
||||
- [RecordBatchIterator](classes/RecordBatchIterator.md)
|
||||
- [Table](classes/Table.md)
|
||||
- [TagContents](classes/TagContents.md)
|
||||
- [Tags](classes/Tags.md)
|
||||
- [VectorColumnOptions](classes/VectorColumnOptions.md)
|
||||
- [VectorQuery](classes/VectorQuery.md)
|
||||
|
||||
## Interfaces
|
||||
|
||||
- [AddColumnsResult](interfaces/AddColumnsResult.md)
|
||||
- [AddColumnsSql](interfaces/AddColumnsSql.md)
|
||||
- [AddDataOptions](interfaces/AddDataOptions.md)
|
||||
- [AddResult](interfaces/AddResult.md)
|
||||
- [AlterColumnsResult](interfaces/AlterColumnsResult.md)
|
||||
- [ClientConfig](interfaces/ClientConfig.md)
|
||||
- [ColumnAlteration](interfaces/ColumnAlteration.md)
|
||||
- [CompactionStats](interfaces/CompactionStats.md)
|
||||
- [ConnectionOptions](interfaces/ConnectionOptions.md)
|
||||
- [CreateTableOptions](interfaces/CreateTableOptions.md)
|
||||
- [DeleteResult](interfaces/DeleteResult.md)
|
||||
- [DropColumnsResult](interfaces/DropColumnsResult.md)
|
||||
- [ExecutableQuery](interfaces/ExecutableQuery.md)
|
||||
- [FragmentStatistics](interfaces/FragmentStatistics.md)
|
||||
- [FragmentSummaryStats](interfaces/FragmentSummaryStats.md)
|
||||
- [FtsOptions](interfaces/FtsOptions.md)
|
||||
- [FullTextQuery](interfaces/FullTextQuery.md)
|
||||
- [FullTextSearchOptions](interfaces/FullTextSearchOptions.md)
|
||||
@@ -50,6 +62,7 @@
|
||||
- [IndexStatistics](interfaces/IndexStatistics.md)
|
||||
- [IvfFlatOptions](interfaces/IvfFlatOptions.md)
|
||||
- [IvfPqOptions](interfaces/IvfPqOptions.md)
|
||||
- [MergeResult](interfaces/MergeResult.md)
|
||||
- [OpenTableOptions](interfaces/OpenTableOptions.md)
|
||||
- [OptimizeOptions](interfaces/OptimizeOptions.md)
|
||||
- [OptimizeStats](interfaces/OptimizeStats.md)
|
||||
@@ -57,9 +70,12 @@
|
||||
- [RemovalStats](interfaces/RemovalStats.md)
|
||||
- [RetryConfig](interfaces/RetryConfig.md)
|
||||
- [TableNamesOptions](interfaces/TableNamesOptions.md)
|
||||
- [TableStatistics](interfaces/TableStatistics.md)
|
||||
- [TimeoutConfig](interfaces/TimeoutConfig.md)
|
||||
- [UpdateOptions](interfaces/UpdateOptions.md)
|
||||
- [UpdateResult](interfaces/UpdateResult.md)
|
||||
- [Version](interfaces/Version.md)
|
||||
- [WriteExecutionOptions](interfaces/WriteExecutionOptions.md)
|
||||
|
||||
## Type Aliases
|
||||
|
||||
|
||||
15
docs/src/js/interfaces/AddColumnsResult.md
Normal file
15
docs/src/js/interfaces/AddColumnsResult.md
Normal file
@@ -0,0 +1,15 @@
|
||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||
|
||||
***
|
||||
|
||||
[@lancedb/lancedb](../globals.md) / AddColumnsResult
|
||||
|
||||
# Interface: AddColumnsResult
|
||||
|
||||
## Properties
|
||||
|
||||
### version
|
||||
|
||||
```ts
|
||||
version: number;
|
||||
```
|
||||
15
docs/src/js/interfaces/AddResult.md
Normal file
15
docs/src/js/interfaces/AddResult.md
Normal file
@@ -0,0 +1,15 @@
|
||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||
|
||||
***
|
||||
|
||||
[@lancedb/lancedb](../globals.md) / AddResult
|
||||
|
||||
# Interface: AddResult
|
||||
|
||||
## Properties
|
||||
|
||||
### version
|
||||
|
||||
```ts
|
||||
version: number;
|
||||
```
|
||||
15
docs/src/js/interfaces/AlterColumnsResult.md
Normal file
15
docs/src/js/interfaces/AlterColumnsResult.md
Normal file
@@ -0,0 +1,15 @@
|
||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||
|
||||
***
|
||||
|
||||
[@lancedb/lancedb](../globals.md) / AlterColumnsResult
|
||||
|
||||
# Interface: AlterColumnsResult
|
||||
|
||||
## Properties
|
||||
|
||||
### version
|
||||
|
||||
```ts
|
||||
version: number;
|
||||
```
|
||||
@@ -44,7 +44,7 @@ for testing purposes.
|
||||
### readConsistencyInterval?
|
||||
|
||||
```ts
|
||||
optional readConsistencyInterval: null | number;
|
||||
optional readConsistencyInterval: number;
|
||||
```
|
||||
|
||||
(For LanceDB OSS only): The interval, in seconds, at which to check for
|
||||
|
||||
15
docs/src/js/interfaces/DeleteResult.md
Normal file
15
docs/src/js/interfaces/DeleteResult.md
Normal file
@@ -0,0 +1,15 @@
|
||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||
|
||||
***
|
||||
|
||||
[@lancedb/lancedb](../globals.md) / DeleteResult
|
||||
|
||||
# Interface: DeleteResult
|
||||
|
||||
## Properties
|
||||
|
||||
### version
|
||||
|
||||
```ts
|
||||
version: number;
|
||||
```
|
||||
15
docs/src/js/interfaces/DropColumnsResult.md
Normal file
15
docs/src/js/interfaces/DropColumnsResult.md
Normal file
@@ -0,0 +1,15 @@
|
||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||
|
||||
***
|
||||
|
||||
[@lancedb/lancedb](../globals.md) / DropColumnsResult
|
||||
|
||||
# Interface: DropColumnsResult
|
||||
|
||||
## Properties
|
||||
|
||||
### version
|
||||
|
||||
```ts
|
||||
version: number;
|
||||
```
|
||||
37
docs/src/js/interfaces/FragmentStatistics.md
Normal file
37
docs/src/js/interfaces/FragmentStatistics.md
Normal file
@@ -0,0 +1,37 @@
|
||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||
|
||||
***
|
||||
|
||||
[@lancedb/lancedb](../globals.md) / FragmentStatistics
|
||||
|
||||
# Interface: FragmentStatistics
|
||||
|
||||
## Properties
|
||||
|
||||
### lengths
|
||||
|
||||
```ts
|
||||
lengths: FragmentSummaryStats;
|
||||
```
|
||||
|
||||
Statistics on the number of rows in the table fragments
|
||||
|
||||
***
|
||||
|
||||
### numFragments
|
||||
|
||||
```ts
|
||||
numFragments: number;
|
||||
```
|
||||
|
||||
The number of fragments in the table
|
||||
|
||||
***
|
||||
|
||||
### numSmallFragments
|
||||
|
||||
```ts
|
||||
numSmallFragments: number;
|
||||
```
|
||||
|
||||
The number of uncompacted fragments in the table
|
||||
77
docs/src/js/interfaces/FragmentSummaryStats.md
Normal file
77
docs/src/js/interfaces/FragmentSummaryStats.md
Normal file
@@ -0,0 +1,77 @@
|
||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||
|
||||
***
|
||||
|
||||
[@lancedb/lancedb](../globals.md) / FragmentSummaryStats
|
||||
|
||||
# Interface: FragmentSummaryStats
|
||||
|
||||
## Properties
|
||||
|
||||
### max
|
||||
|
||||
```ts
|
||||
max: number;
|
||||
```
|
||||
|
||||
The number of rows in the fragment with the most rows
|
||||
|
||||
***
|
||||
|
||||
### mean
|
||||
|
||||
```ts
|
||||
mean: number;
|
||||
```
|
||||
|
||||
The mean number of rows in the fragments
|
||||
|
||||
***
|
||||
|
||||
### min
|
||||
|
||||
```ts
|
||||
min: number;
|
||||
```
|
||||
|
||||
The number of rows in the fragment with the fewest rows
|
||||
|
||||
***
|
||||
|
||||
### p25
|
||||
|
||||
```ts
|
||||
p25: number;
|
||||
```
|
||||
|
||||
The 25th percentile of number of rows in the fragments
|
||||
|
||||
***
|
||||
|
||||
### p50
|
||||
|
||||
```ts
|
||||
p50: number;
|
||||
```
|
||||
|
||||
The 50th percentile of number of rows in the fragments
|
||||
|
||||
***
|
||||
|
||||
### p75
|
||||
|
||||
```ts
|
||||
p75: number;
|
||||
```
|
||||
|
||||
The 75th percentile of number of rows in the fragments
|
||||
|
||||
***
|
||||
|
||||
### p99
|
||||
|
||||
```ts
|
||||
p99: number;
|
||||
```
|
||||
|
||||
The 99th percentile of number of rows in the fragments
|
||||
@@ -18,18 +18,8 @@ including methods to retrieve the query type and convert the query to a dictiona
|
||||
queryType(): FullTextQueryType
|
||||
```
|
||||
|
||||
The type of the full-text query.
|
||||
|
||||
#### Returns
|
||||
|
||||
[`FullTextQueryType`](../enumerations/FullTextQueryType.md)
|
||||
|
||||
***
|
||||
|
||||
### toDict()
|
||||
|
||||
```ts
|
||||
toDict(): Record<string, unknown>
|
||||
```
|
||||
|
||||
#### Returns
|
||||
|
||||
`Record`<`string`, `unknown`>
|
||||
|
||||
@@ -39,3 +39,11 @@ and the same name, then an error will be returned. This is true even if
|
||||
that index is out of date.
|
||||
|
||||
The default is true
|
||||
|
||||
***
|
||||
|
||||
### waitTimeoutSeconds?
|
||||
|
||||
```ts
|
||||
optional waitTimeoutSeconds: number;
|
||||
```
|
||||
|
||||
39
docs/src/js/interfaces/MergeResult.md
Normal file
39
docs/src/js/interfaces/MergeResult.md
Normal file
@@ -0,0 +1,39 @@
|
||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||
|
||||
***
|
||||
|
||||
[@lancedb/lancedb](../globals.md) / MergeResult
|
||||
|
||||
# Interface: MergeResult
|
||||
|
||||
## Properties
|
||||
|
||||
### numDeletedRows
|
||||
|
||||
```ts
|
||||
numDeletedRows: number;
|
||||
```
|
||||
|
||||
***
|
||||
|
||||
### numInsertedRows
|
||||
|
||||
```ts
|
||||
numInsertedRows: number;
|
||||
```
|
||||
|
||||
***
|
||||
|
||||
### numUpdatedRows
|
||||
|
||||
```ts
|
||||
numUpdatedRows: number;
|
||||
```
|
||||
|
||||
***
|
||||
|
||||
### version
|
||||
|
||||
```ts
|
||||
version: number;
|
||||
```
|
||||
@@ -20,3 +20,13 @@ The maximum number of rows to return in a single batch
|
||||
|
||||
Batches may have fewer rows if the underlying data is stored
|
||||
in smaller chunks.
|
||||
|
||||
***
|
||||
|
||||
### timeoutMs?
|
||||
|
||||
```ts
|
||||
optional timeoutMs: number;
|
||||
```
|
||||
|
||||
Timeout for query execution in milliseconds
|
||||
|
||||
47
docs/src/js/interfaces/TableStatistics.md
Normal file
47
docs/src/js/interfaces/TableStatistics.md
Normal file
@@ -0,0 +1,47 @@
|
||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||
|
||||
***
|
||||
|
||||
[@lancedb/lancedb](../globals.md) / TableStatistics
|
||||
|
||||
# Interface: TableStatistics
|
||||
|
||||
## Properties
|
||||
|
||||
### fragmentStats
|
||||
|
||||
```ts
|
||||
fragmentStats: FragmentStatistics;
|
||||
```
|
||||
|
||||
Statistics on table fragments
|
||||
|
||||
***
|
||||
|
||||
### numIndices
|
||||
|
||||
```ts
|
||||
numIndices: number;
|
||||
```
|
||||
|
||||
The number of indices in the table
|
||||
|
||||
***
|
||||
|
||||
### numRows
|
||||
|
||||
```ts
|
||||
numRows: number;
|
||||
```
|
||||
|
||||
The number of rows in the table
|
||||
|
||||
***
|
||||
|
||||
### totalBytes
|
||||
|
||||
```ts
|
||||
totalBytes: number;
|
||||
```
|
||||
|
||||
The total number of bytes in the table
|
||||
23
docs/src/js/interfaces/UpdateResult.md
Normal file
23
docs/src/js/interfaces/UpdateResult.md
Normal file
@@ -0,0 +1,23 @@
|
||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||
|
||||
***
|
||||
|
||||
[@lancedb/lancedb](../globals.md) / UpdateResult
|
||||
|
||||
# Interface: UpdateResult
|
||||
|
||||
## Properties
|
||||
|
||||
### rowsUpdated
|
||||
|
||||
```ts
|
||||
rowsUpdated: number;
|
||||
```
|
||||
|
||||
***
|
||||
|
||||
### version
|
||||
|
||||
```ts
|
||||
version: number;
|
||||
```
|
||||
26
docs/src/js/interfaces/WriteExecutionOptions.md
Normal file
26
docs/src/js/interfaces/WriteExecutionOptions.md
Normal file
@@ -0,0 +1,26 @@
|
||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||
|
||||
***
|
||||
|
||||
[@lancedb/lancedb](../globals.md) / WriteExecutionOptions
|
||||
|
||||
# Interface: WriteExecutionOptions
|
||||
|
||||
## Properties
|
||||
|
||||
### timeoutMs?
|
||||
|
||||
```ts
|
||||
optional timeoutMs: number;
|
||||
```
|
||||
|
||||
Maximum time to run the operation before cancelling it.
|
||||
|
||||
By default, there is a 30-second timeout that is only enforced after the
|
||||
first attempt. This is to prevent spending too long retrying to resolve
|
||||
conflicts. For example, if a write attempt takes 20 seconds and fails,
|
||||
the second attempt will be cancelled after 10 seconds, hitting the
|
||||
30-second timeout. However, a write that takes one hour and succeeds on the
|
||||
first attempt will not be cancelled.
|
||||
|
||||
When this is set, the timeout is enforced on all attempts, including the first.
|
||||
@@ -428,7 +428,7 @@
|
||||
"\n",
|
||||
"**Why?** \n",
|
||||
"Embedding the UFO dataset and ingesting it into LanceDB takes **~2 hours on a T4 GPU**. To save time: \n",
|
||||
"- **Use the pre-prepared table with index created ** (provided below) to proceed directly to step7: search. \n",
|
||||
"- **Use the pre-prepared table with index created** (provided below) to proceed directly to **Step 7**: search. \n",
|
||||
"- **Step 5a** contains the full ingestion code for reference (run it only if necessary). \n",
|
||||
"- **Step 6** contains the details on creating the index on the multivector column"
|
||||
]
|
||||
|
||||
53
docs/src/python/datafusion.md
Normal file
53
docs/src/python/datafusion.md
Normal file
@@ -0,0 +1,53 @@
|
||||
# Apache Datafusion
|
||||
|
||||
In Python, LanceDB tables can also be queried with [Apache Datafusion](https://datafusion.apache.org/), an extensible query engine written in Rust that uses Apache Arrow as its in-memory format. This means you can write complex SQL queries to analyze your data in LanceDB.
|
||||
|
||||
This integration is done via [Datafusion FFI](https://docs.rs/datafusion-ffi/latest/datafusion_ffi/), which provides a native integration between LanceDB and Datafusion.
|
||||
The Datafusion FFI allows to pass down column selections and basic filters to LanceDB, reducing the amount of scanned data when executing your query. Additionally, the integration allows streaming data from LanceDB tables which allows to do aggregation larger-than-memory.
|
||||
|
||||
We can demonstrate this by first installing `datafusion` and `lancedb`.
|
||||
|
||||
```shell
|
||||
pip install datafusion lancedb
|
||||
```
|
||||
|
||||
We will re-use the dataset [created previously](./pandas_and_pyarrow.md):
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
|
||||
from datafusion import SessionContext
|
||||
from lance import FFILanceTableProvider
|
||||
|
||||
db = lancedb.connect("data/sample-lancedb")
|
||||
data = [
|
||||
{"vector": [3.1, 4.1], "item": "foo", "price": 10.0},
|
||||
{"vector": [5.9, 26.5], "item": "bar", "price": 20.0}
|
||||
]
|
||||
lance_table = db.create_table("lance_table", data)
|
||||
|
||||
ctx = SessionContext()
|
||||
|
||||
ffi_lance_table = FFILanceTableProvider(
|
||||
lance_table.to_lance(), with_row_id=True, with_row_addr=True
|
||||
)
|
||||
ctx.register_table_provider("ffi_lance_table", ffi_lance_table)
|
||||
```
|
||||
|
||||
The `to_lance` method converts the LanceDB table to a `LanceDataset`, which is accessible to Datafusion through the Datafusion FFI integration layer.
|
||||
To query the resulting Lance dataset in Datafusion, you first need to register the dataset with Datafusion and then just reference it by the same name in your SQL query.
|
||||
|
||||
```python
|
||||
ctx.table("ffi_lance_table")
|
||||
ctx.sql("SELECT * FROM ffi_lance_table")
|
||||
```
|
||||
|
||||
```
|
||||
┌─────────────┬─────────┬────────┬─────────────────┬─────────────────┐
|
||||
│ vector │ item │ price │ _rowid │ _rowaddr │
|
||||
│ float[] │ varchar │ double │ bigint unsigned │ bigint unsigned │
|
||||
├─────────────┼─────────┼────────┼─────────────────┼─────────────────┤
|
||||
│ [3.1, 4.1] │ foo │ 10.0 │ 0 │ 0 │
|
||||
│ [5.9, 26.5] │ bar │ 20.0 │ 1 │ 1 │
|
||||
└─────────────┴─────────┴────────┴─────────────────┴─────────────────┘
|
||||
```
|
||||
@@ -11,7 +11,6 @@ likely that someone who knows the answer will see your question.
|
||||
## Common issues
|
||||
|
||||
* Multiprocessing with `fork` is not supported. You should use `spawn` instead.
|
||||
* Data returned by queries may not reflect the most recent writes, depending on configuration. LanceDB uses eventual consistency by default. See [consistency](/docs/src/guides/tables.md#consistency) for more information.
|
||||
|
||||
## Enabling logging
|
||||
|
||||
|
||||
@@ -7,3 +7,4 @@ tantivy==0.20.1
|
||||
--extra-index-url https://download.pytorch.org/whl/cpu
|
||||
torch
|
||||
polars>=0.19, <=1.3.0
|
||||
datafusion
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
<parent>
|
||||
<groupId>com.lancedb</groupId>
|
||||
<artifactId>lancedb-parent</artifactId>
|
||||
<version>0.19.0-beta.0</version>
|
||||
<version>0.21.1-final.0</version>
|
||||
<relativePath>../pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
<groupId>com.lancedb</groupId>
|
||||
<artifactId>lancedb-parent</artifactId>
|
||||
<version>0.19.0-beta.0</version>
|
||||
<version>0.21.1-final.0</version>
|
||||
<packaging>pom</packaging>
|
||||
|
||||
<name>LanceDB Parent</name>
|
||||
|
||||
51
node/package-lock.json
generated
51
node/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "vectordb",
|
||||
"version": "0.19.0-beta.0",
|
||||
"version": "0.21.1",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "vectordb",
|
||||
"version": "0.19.0-beta.0",
|
||||
"version": "0.21.1",
|
||||
"cpu": [
|
||||
"x64",
|
||||
"arm64"
|
||||
@@ -52,11 +52,11 @@
|
||||
"uuid": "^9.0.0"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"@lancedb/vectordb-darwin-arm64": "0.19.0-beta.0",
|
||||
"@lancedb/vectordb-darwin-x64": "0.19.0-beta.0",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.19.0-beta.0",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.19.0-beta.0",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.19.0-beta.0"
|
||||
"@lancedb/vectordb-darwin-arm64": "0.21.1",
|
||||
"@lancedb/vectordb-darwin-x64": "0.21.1",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.21.1",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.21.1",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.21.1"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@apache-arrow/ts": "^14.0.2",
|
||||
@@ -327,9 +327,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-darwin-arm64": {
|
||||
"version": "0.19.0-beta.0",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-arm64/-/vectordb-darwin-arm64-0.19.0-beta.0.tgz",
|
||||
"integrity": "sha512-J+A7OKq6pXdAkkU4H2hH4ZxAjPnjPIEZQHiR0Bt2NFrs97Gn9+YOkA3AXuLIdMVKq0O4CXvf/W/yulTZzn73ag==",
|
||||
"version": "0.21.1",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-arm64/-/vectordb-darwin-arm64-0.21.1.tgz",
|
||||
"integrity": "sha512-eXeOKgK5s7MSKDzA7Hl4/9E2X8tWWMNV7UJiFdwxrUcop86tM5ePBi8tApRnaQ3wBXrs99XTVBJ7+j+2gzilVA==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
@@ -340,9 +340,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-darwin-x64": {
|
||||
"version": "0.19.0-beta.0",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-x64/-/vectordb-darwin-x64-0.19.0-beta.0.tgz",
|
||||
"integrity": "sha512-cs7wAhVQYBu4PzSAQ3di/OqB9iMpBMLL+/b5Kxw42XojZixH5am0G7xdx14JzuappNHWCn52GiaqBCh6zREImg==",
|
||||
"version": "0.21.1",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-x64/-/vectordb-darwin-x64-0.21.1.tgz",
|
||||
"integrity": "sha512-vLoPWfg7OPw5vazLH5/YD/yQkZiTiPniuQgsH+xTodRfLf926lny53G7LQ6nFXNKIzX/jYKtg7AfMU8IcDLSEQ==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
@@ -353,9 +353,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-linux-arm64-gnu": {
|
||||
"version": "0.19.0-beta.0",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-arm64-gnu/-/vectordb-linux-arm64-gnu-0.19.0-beta.0.tgz",
|
||||
"integrity": "sha512-jKd+WoTIkN6W7edG7I+itj+HtbwTdCuisGZB7TSKBKtYxtfY1q7nA9igb3kNoDQOMphhqNrR1RuzRPfvE08/Zg==",
|
||||
"version": "0.21.1",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-arm64-gnu/-/vectordb-linux-arm64-gnu-0.21.1.tgz",
|
||||
"integrity": "sha512-IMAxtXj5aHCv9peziN77IxQpkYFj83KvI8zQCHzbMMXv7BspkhAd0PaUViqHqtTf2TUHjYQ66a7clZrEn+xQuQ==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
@@ -366,9 +366,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-linux-x64-gnu": {
|
||||
"version": "0.19.0-beta.0",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-x64-gnu/-/vectordb-linux-x64-gnu-0.19.0-beta.0.tgz",
|
||||
"integrity": "sha512-pdGOBaYS/SLaF/UYT+uu29mR/V33/EWkq1zxl0OOzVveB08iQmw0NWnYDoEgT4BoPo4F59r2HOPCfMK2rqWG7w==",
|
||||
"version": "0.21.1",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-x64-gnu/-/vectordb-linux-x64-gnu-0.21.1.tgz",
|
||||
"integrity": "sha512-9oPOxBsYGngIhtC/oC+fQ9V0w9mgFuj2Wyler8f5UYQdiAutsTNyOUA+XjtcROjVZrZ5oUeIrvOQSte9BbpRTg==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
@@ -379,9 +379,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-win32-x64-msvc": {
|
||||
"version": "0.19.0-beta.0",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-win32-x64-msvc/-/vectordb-win32-x64-msvc-0.19.0-beta.0.tgz",
|
||||
"integrity": "sha512-zlXaZm+/ES4zXaEzmSd4LA5zIO88Kl3oKcR5crAaObQY9B3lZVWLh0w/knA+L+Nwg8Ixo81vStBqDVde+RJm1w==",
|
||||
"version": "0.21.1",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-win32-x64-msvc/-/vectordb-win32-x64-msvc-0.21.1.tgz",
|
||||
"integrity": "sha512-XqDXFLfdjNpDZ5jaqLerdx+sDU4YLuPK3VF4TowwcOlWDrUtI/L1lAyCaKxcyz1qE3VGuZvhNU89N5ioEICb4Q==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
@@ -1184,9 +1184,10 @@
|
||||
}
|
||||
},
|
||||
"node_modules/axios": {
|
||||
"version": "1.7.7",
|
||||
"resolved": "https://registry.npmjs.org/axios/-/axios-1.7.7.tgz",
|
||||
"integrity": "sha512-S4kL7XrjgBmvdGut0sN3yJxqYzrDOnivkBiN0OFs6hLiUam3UPvswUo0kqGyhqUZGEOytHyumEdXsAkgCOUf3Q==",
|
||||
"version": "1.8.4",
|
||||
"resolved": "https://registry.npmjs.org/axios/-/axios-1.8.4.tgz",
|
||||
"integrity": "sha512-eBSYY4Y68NNlHbHBMdeDmKNtDgXWhQsJcGqzO3iLUM0GraQFSS9cVgPX5I9b3lbdFKyYoAEGAZF1DwhTaljNAw==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"follow-redirects": "^1.15.6",
|
||||
"form-data": "^4.0.0",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "vectordb",
|
||||
"version": "0.19.0-beta.0",
|
||||
"version": "0.21.1",
|
||||
"description": " Serverless, low-latency vector database for AI applications",
|
||||
"private": false,
|
||||
"main": "dist/index.js",
|
||||
@@ -89,10 +89,10 @@
|
||||
}
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"@lancedb/vectordb-darwin-x64": "0.19.0-beta.0",
|
||||
"@lancedb/vectordb-darwin-arm64": "0.19.0-beta.0",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.19.0-beta.0",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.19.0-beta.0",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.19.0-beta.0"
|
||||
"@lancedb/vectordb-darwin-x64": "0.21.1",
|
||||
"@lancedb/vectordb-darwin-arm64": "0.21.1",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.21.1",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.21.1",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.21.1"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -110,7 +110,7 @@ describe('LanceDB Mirrored Store Integration test', function () {
|
||||
|
||||
fs.readdir(path.join(mirroredPath, 'data'), { withFileTypes: true }, (err, files) => {
|
||||
if (err != null) throw err
|
||||
assert.equal(files.length, 1, `Found files: ${files.map(f => f.name)}`)
|
||||
assert.equal(files.length, 1)
|
||||
assert.isTrue(files[0].name.endsWith('.lance'))
|
||||
})
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "lancedb-nodejs"
|
||||
edition.workspace = true
|
||||
version = "0.19.0-beta.0"
|
||||
version = "0.21.1"
|
||||
license.workspace = true
|
||||
description.workspace = true
|
||||
repository.workspace = true
|
||||
@@ -28,6 +28,10 @@ napi-derive = "2.16.4"
|
||||
lzma-sys = { version = "*", features = ["static"] }
|
||||
log.workspace = true
|
||||
|
||||
# Workaround for build failure until we can fix it.
|
||||
aws-lc-sys = "=0.28.0"
|
||||
aws-lc-rs = "=1.13.0"
|
||||
|
||||
[build-dependencies]
|
||||
napi-build = "2.1"
|
||||
|
||||
|
||||
@@ -374,6 +374,71 @@ describe.each([arrow15, arrow16, arrow17, arrow18])(
|
||||
expect(table2.numRows).toBe(4);
|
||||
expect(table2.schema).toEqual(schema);
|
||||
});
|
||||
|
||||
it("should correctly retain values in nested struct fields", async function () {
|
||||
// Define test data with nested struct
|
||||
const testData = [
|
||||
{
|
||||
id: "doc1",
|
||||
vector: [1, 2, 3],
|
||||
metadata: {
|
||||
filePath: "/path/to/file1.ts",
|
||||
startLine: 10,
|
||||
endLine: 20,
|
||||
text: "function test() { return true; }",
|
||||
},
|
||||
},
|
||||
{
|
||||
id: "doc2",
|
||||
vector: [4, 5, 6],
|
||||
metadata: {
|
||||
filePath: "/path/to/file2.ts",
|
||||
startLine: 30,
|
||||
endLine: 40,
|
||||
text: "function test2() { return false; }",
|
||||
},
|
||||
},
|
||||
];
|
||||
|
||||
// Create Arrow table from the data
|
||||
const table = makeArrowTable(testData);
|
||||
|
||||
// Verify schema has the nested struct fields
|
||||
const metadataField = table.schema.fields.find(
|
||||
(f) => f.name === "metadata",
|
||||
);
|
||||
expect(metadataField).toBeDefined();
|
||||
// biome-ignore lint/suspicious/noExplicitAny: accessing fields in different Arrow versions
|
||||
const childNames = metadataField?.type.children.map((c: any) => c.name);
|
||||
expect(childNames).toEqual([
|
||||
"filePath",
|
||||
"startLine",
|
||||
"endLine",
|
||||
"text",
|
||||
]);
|
||||
|
||||
// Convert to buffer and back (simulating storage and retrieval)
|
||||
const buf = await fromTableToBuffer(table);
|
||||
const retrievedTable = tableFromIPC(buf);
|
||||
|
||||
// Verify the retrieved table has the same structure
|
||||
const rows = [];
|
||||
for (let i = 0; i < retrievedTable.numRows; i++) {
|
||||
rows.push(retrievedTable.get(i));
|
||||
}
|
||||
|
||||
// Check values in the first row
|
||||
const firstRow = rows[0];
|
||||
expect(firstRow.id).toBe("doc1");
|
||||
expect(firstRow.vector.toJSON()).toEqual([1, 2, 3]);
|
||||
|
||||
// Verify metadata values are preserved (this is where the bug is)
|
||||
expect(firstRow.metadata).toBeDefined();
|
||||
expect(firstRow.metadata.filePath).toBe("/path/to/file1.ts");
|
||||
expect(firstRow.metadata.startLine).toBe(10);
|
||||
expect(firstRow.metadata.endLine).toBe(20);
|
||||
expect(firstRow.metadata.text).toBe("function test() { return true; }");
|
||||
});
|
||||
});
|
||||
|
||||
class DummyEmbedding extends EmbeddingFunction<string> {
|
||||
@@ -527,14 +592,14 @@ describe.each([arrow15, arrow16, arrow17, arrow18])(
|
||||
).rejects.toThrow("column vector was missing");
|
||||
});
|
||||
|
||||
it("will provide a nice error if run twice", async function () {
|
||||
it("will skip embedding application if already applied", async function () {
|
||||
const records = sampleRecords();
|
||||
const table = await convertToTable(records, dummyEmbeddingConfig);
|
||||
|
||||
// fromTableToBuffer will try and apply the embeddings again
|
||||
await expect(
|
||||
fromTableToBuffer(table, dummyEmbeddingConfig),
|
||||
).rejects.toThrow("already existed");
|
||||
// but should skip since the column already has non-null values
|
||||
const result = await fromTableToBuffer(table, dummyEmbeddingConfig);
|
||||
expect(result.byteLength).toBeGreaterThan(0);
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ describe("when connecting", () => {
|
||||
it("should connect", async () => {
|
||||
const db = await connect(tmpDir.name);
|
||||
expect(db.display()).toBe(
|
||||
`ListingDatabase(uri=${tmpDir.name}, read_consistency_interval=5s)`,
|
||||
`ListingDatabase(uri=${tmpDir.name}, read_consistency_interval=None)`,
|
||||
);
|
||||
});
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@ import * as arrow16 from "apache-arrow-16";
|
||||
import * as arrow17 from "apache-arrow-17";
|
||||
import * as arrow18 from "apache-arrow-18";
|
||||
|
||||
import { Table, connect } from "../lancedb";
|
||||
import { MatchQuery, PhraseQuery, Table, connect } from "../lancedb";
|
||||
import {
|
||||
Table as ArrowTable,
|
||||
Field,
|
||||
@@ -33,6 +33,13 @@ import {
|
||||
register,
|
||||
} from "../lancedb/embedding";
|
||||
import { Index } from "../lancedb/indices";
|
||||
import {
|
||||
BooleanQuery,
|
||||
Occur,
|
||||
Operator,
|
||||
instanceOfFullTextQuery,
|
||||
} from "../lancedb/query";
|
||||
import exp = require("constants");
|
||||
|
||||
describe.each([arrow15, arrow16, arrow17, arrow18])(
|
||||
"Given a table",
|
||||
@@ -58,7 +65,7 @@ describe.each([arrow15, arrow16, arrow17, arrow18])(
|
||||
|
||||
it("be displayable", async () => {
|
||||
expect(table.display()).toMatch(
|
||||
/NativeTable\(some_table, uri=.*, read_consistency_interval=5s\)/,
|
||||
/NativeTable\(some_table, uri=.*, read_consistency_interval=None\)/,
|
||||
);
|
||||
table.close();
|
||||
expect(table.display()).toBe("ClosedTable(some_table)");
|
||||
@@ -70,8 +77,33 @@ describe.each([arrow15, arrow16, arrow17, arrow18])(
|
||||
await expect(table.countRows()).resolves.toBe(3);
|
||||
});
|
||||
|
||||
it("should overwrite data if asked", async () => {
|
||||
it("should show table stats", async () => {
|
||||
await table.add([{ id: 1 }, { id: 2 }]);
|
||||
await table.add([{ id: 1 }]);
|
||||
await expect(table.stats()).resolves.toEqual({
|
||||
fragmentStats: {
|
||||
lengths: {
|
||||
max: 2,
|
||||
mean: 1,
|
||||
min: 1,
|
||||
p25: 1,
|
||||
p50: 2,
|
||||
p75: 2,
|
||||
p99: 2,
|
||||
},
|
||||
numFragments: 2,
|
||||
numSmallFragments: 2,
|
||||
},
|
||||
numIndices: 0,
|
||||
numRows: 3,
|
||||
totalBytes: 24,
|
||||
});
|
||||
});
|
||||
|
||||
it("should overwrite data if asked", async () => {
|
||||
const addRes = await table.add([{ id: 1 }, { id: 2 }]);
|
||||
expect(addRes).toHaveProperty("version");
|
||||
expect(addRes.version).toBe(2);
|
||||
await table.add([{ id: 1 }], { mode: "overwrite" });
|
||||
await expect(table.countRows()).resolves.toBe(1);
|
||||
});
|
||||
@@ -87,7 +119,11 @@ describe.each([arrow15, arrow16, arrow17, arrow18])(
|
||||
await table.add([{ id: 1 }]);
|
||||
expect(await table.countRows("id == 1")).toBe(1);
|
||||
expect(await table.countRows("id == 7")).toBe(0);
|
||||
await table.update({ id: "7" });
|
||||
const updateRes = await table.update({ id: "7" });
|
||||
expect(updateRes).toHaveProperty("version");
|
||||
expect(updateRes.version).toBe(3);
|
||||
expect(updateRes).toHaveProperty("rowsUpdated");
|
||||
expect(updateRes.rowsUpdated).toBe(1);
|
||||
expect(await table.countRows("id == 1")).toBe(0);
|
||||
expect(await table.countRows("id == 7")).toBe(1);
|
||||
await table.add([{ id: 2 }]);
|
||||
@@ -314,11 +350,17 @@ describe("merge insert", () => {
|
||||
{ a: 3, b: "y" },
|
||||
{ a: 4, b: "z" },
|
||||
];
|
||||
await table
|
||||
const mergeInsertRes = await table
|
||||
.mergeInsert("a")
|
||||
.whenMatchedUpdateAll()
|
||||
.whenNotMatchedInsertAll()
|
||||
.execute(newData);
|
||||
.execute(newData, { timeoutMs: 10_000 });
|
||||
expect(mergeInsertRes).toHaveProperty("version");
|
||||
expect(mergeInsertRes.version).toBe(2);
|
||||
expect(mergeInsertRes.numInsertedRows).toBe(1);
|
||||
expect(mergeInsertRes.numUpdatedRows).toBe(2);
|
||||
expect(mergeInsertRes.numDeletedRows).toBe(0);
|
||||
|
||||
const expected = [
|
||||
{ a: 1, b: "a" },
|
||||
{ a: 2, b: "x" },
|
||||
@@ -326,9 +368,9 @@ describe("merge insert", () => {
|
||||
{ a: 4, b: "z" },
|
||||
];
|
||||
|
||||
expect(
|
||||
JSON.parse(JSON.stringify((await table.toArrow()).toArray())),
|
||||
).toEqual(expected);
|
||||
const result = (await table.toArrow()).toArray().sort((a, b) => a.a - b.a);
|
||||
|
||||
expect(result.map((row) => ({ ...row }))).toEqual(expected);
|
||||
});
|
||||
test("conditional update", async () => {
|
||||
const newData = [
|
||||
@@ -336,10 +378,12 @@ describe("merge insert", () => {
|
||||
{ a: 3, b: "y" },
|
||||
{ a: 4, b: "z" },
|
||||
];
|
||||
await table
|
||||
const mergeInsertRes = await table
|
||||
.mergeInsert("a")
|
||||
.whenMatchedUpdateAll({ where: "target.b = 'b'" })
|
||||
.execute(newData);
|
||||
expect(mergeInsertRes).toHaveProperty("version");
|
||||
expect(mergeInsertRes.version).toBe(2);
|
||||
|
||||
const expected = [
|
||||
{ a: 1, b: "a" },
|
||||
@@ -424,6 +468,20 @@ describe("merge insert", () => {
|
||||
res = res.sort((a, b) => a.a - b.a);
|
||||
expect(res).toEqual(expected);
|
||||
});
|
||||
|
||||
test("timeout", async () => {
|
||||
const newData = [
|
||||
{ a: 2, b: "x" },
|
||||
{ a: 4, b: "z" },
|
||||
];
|
||||
await expect(
|
||||
table
|
||||
.mergeInsert("a")
|
||||
.whenMatchedUpdateAll()
|
||||
.whenNotMatchedInsertAll()
|
||||
.execute(newData, { timeoutMs: 0 }),
|
||||
).rejects.toThrow("merge insert timed out");
|
||||
});
|
||||
});
|
||||
|
||||
describe("When creating an index", () => {
|
||||
@@ -501,11 +559,46 @@ describe("When creating an index", () => {
|
||||
rst = await tbl.query().limit(2).offset(1).nearestTo(queryVec).toArrow();
|
||||
expect(rst.numRows).toBe(1);
|
||||
|
||||
// test nprobes
|
||||
rst = await tbl.query().nearestTo(queryVec).limit(2).nprobes(50).toArrow();
|
||||
expect(rst.numRows).toBe(2);
|
||||
rst = await tbl
|
||||
.query()
|
||||
.nearestTo(queryVec)
|
||||
.limit(2)
|
||||
.minimumNprobes(15)
|
||||
.toArrow();
|
||||
expect(rst.numRows).toBe(2);
|
||||
rst = await tbl
|
||||
.query()
|
||||
.nearestTo(queryVec)
|
||||
.limit(2)
|
||||
.minimumNprobes(10)
|
||||
.maximumNprobes(20)
|
||||
.toArrow();
|
||||
expect(rst.numRows).toBe(2);
|
||||
|
||||
expect(() => tbl.query().nearestTo(queryVec).minimumNprobes(0)).toThrow(
|
||||
"Invalid input, minimum_nprobes must be greater than 0",
|
||||
);
|
||||
expect(() => tbl.query().nearestTo(queryVec).maximumNprobes(5)).toThrow(
|
||||
"Invalid input, maximum_nprobes must be greater than minimum_nprobes",
|
||||
);
|
||||
|
||||
await tbl.dropIndex("vec_idx");
|
||||
const indices2 = await tbl.listIndices();
|
||||
expect(indices2.length).toBe(0);
|
||||
});
|
||||
|
||||
it("should wait for index readiness", async () => {
|
||||
// Create an index and then wait for it to be ready
|
||||
await tbl.createIndex("vec");
|
||||
const indices = await tbl.listIndices();
|
||||
expect(indices.length).toBeGreaterThan(0);
|
||||
const idxName = indices[0].name;
|
||||
await expect(tbl.waitForIndex([idxName], 5)).resolves.toBeUndefined();
|
||||
});
|
||||
|
||||
it("should search with distance range", async () => {
|
||||
await tbl.createIndex("vec");
|
||||
|
||||
@@ -823,6 +916,7 @@ describe("When creating an index", () => {
|
||||
// Only build index over v1
|
||||
await tbl.createIndex("vec", {
|
||||
config: Index.ivfPq({ numPartitions: 2, numSubVectors: 2 }),
|
||||
waitTimeoutSeconds: 30,
|
||||
});
|
||||
|
||||
const rst = await tbl
|
||||
@@ -867,6 +961,44 @@ describe("When creating an index", () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe("When querying a table", () => {
|
||||
let tmpDir: tmp.DirResult;
|
||||
beforeEach(() => {
|
||||
tmpDir = tmp.dirSync({ unsafeCleanup: true });
|
||||
});
|
||||
afterEach(() => tmpDir.removeCallback());
|
||||
|
||||
it("should throw an error when timeout is reached", async () => {
|
||||
const db = await connect(tmpDir.name);
|
||||
const data = makeArrowTable([
|
||||
{ text: "a", vector: [0.1, 0.2] },
|
||||
{ text: "b", vector: [0.3, 0.4] },
|
||||
]);
|
||||
const table = await db.createTable("test", data);
|
||||
await table.createIndex("text", { config: Index.fts() });
|
||||
|
||||
await expect(
|
||||
table.query().where("text != 'a'").toArray({ timeoutMs: 0 }),
|
||||
).rejects.toThrow("Query timeout");
|
||||
|
||||
await expect(
|
||||
table.query().nearestTo([0.0, 0.0]).toArrow({ timeoutMs: 0 }),
|
||||
).rejects.toThrow("Query timeout");
|
||||
|
||||
await expect(
|
||||
table.search("a", "fts").toArray({ timeoutMs: 0 }),
|
||||
).rejects.toThrow("Query timeout");
|
||||
|
||||
await expect(
|
||||
table
|
||||
.query()
|
||||
.nearestToText("a")
|
||||
.nearestTo([0.0, 0.0])
|
||||
.toArrow({ timeoutMs: 0 }),
|
||||
).rejects.toThrow("Query timeout");
|
||||
});
|
||||
});
|
||||
|
||||
describe("Read consistency interval", () => {
|
||||
let tmpDir: tmp.DirResult;
|
||||
beforeEach(() => {
|
||||
@@ -951,15 +1083,19 @@ describe("schema evolution", function () {
|
||||
{ id: 1n, vector: [0.1, 0.2] },
|
||||
]);
|
||||
// Can create a non-nullable column only through addColumns at the moment.
|
||||
await table.addColumns([
|
||||
const addColumnsRes = await table.addColumns([
|
||||
{ name: "price", valueSql: "cast(10.0 as double)" },
|
||||
]);
|
||||
expect(addColumnsRes).toHaveProperty("version");
|
||||
expect(addColumnsRes.version).toBe(2);
|
||||
expect(await table.schema()).toEqual(schema);
|
||||
|
||||
await table.alterColumns([
|
||||
const alterColumnsRes = await table.alterColumns([
|
||||
{ path: "id", rename: "new_id" },
|
||||
{ path: "price", nullable: true },
|
||||
]);
|
||||
expect(alterColumnsRes).toHaveProperty("version");
|
||||
expect(alterColumnsRes.version).toBe(3);
|
||||
|
||||
const expectedSchema = new Schema([
|
||||
new Field("new_id", new Int64(), true),
|
||||
@@ -1077,7 +1213,9 @@ describe("schema evolution", function () {
|
||||
const table = await con.createTable("vectors", [
|
||||
{ id: 1n, vector: [0.1, 0.2] },
|
||||
]);
|
||||
await table.dropColumns(["vector"]);
|
||||
const dropColumnsRes = await table.dropColumns(["vector"]);
|
||||
expect(dropColumnsRes).toHaveProperty("version");
|
||||
expect(dropColumnsRes.version).toBe(2);
|
||||
|
||||
const expectedSchema = new Schema([new Field("id", new Int64(), true)]);
|
||||
expect(await table.schema()).toEqual(expectedSchema);
|
||||
@@ -1129,6 +1267,99 @@ describe("when dealing with versioning", () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe("when dealing with tags", () => {
|
||||
let tmpDir: tmp.DirResult;
|
||||
beforeEach(() => {
|
||||
tmpDir = tmp.dirSync({ unsafeCleanup: true });
|
||||
});
|
||||
afterEach(() => {
|
||||
tmpDir.removeCallback();
|
||||
});
|
||||
|
||||
it("can manage tags", async () => {
|
||||
const conn = await connect(tmpDir.name, {
|
||||
readConsistencyInterval: 0,
|
||||
});
|
||||
|
||||
const table = await conn.createTable("my_table", [
|
||||
{ id: 1n, vector: [0.1, 0.2] },
|
||||
]);
|
||||
expect(await table.version()).toBe(1);
|
||||
|
||||
await table.add([{ id: 2n, vector: [0.3, 0.4] }]);
|
||||
expect(await table.version()).toBe(2);
|
||||
|
||||
const tagsManager = await table.tags();
|
||||
|
||||
const initialTags = await tagsManager.list();
|
||||
expect(Object.keys(initialTags).length).toBe(0);
|
||||
|
||||
const tag1 = "tag1";
|
||||
await tagsManager.create(tag1, 1);
|
||||
expect(await tagsManager.getVersion(tag1)).toBe(1);
|
||||
|
||||
const tagsAfterFirst = await tagsManager.list();
|
||||
expect(Object.keys(tagsAfterFirst).length).toBe(1);
|
||||
expect(tagsAfterFirst).toHaveProperty(tag1);
|
||||
expect(tagsAfterFirst[tag1].version).toBe(1);
|
||||
|
||||
await tagsManager.create("tag2", 2);
|
||||
expect(await tagsManager.getVersion("tag2")).toBe(2);
|
||||
|
||||
const tagsAfterSecond = await tagsManager.list();
|
||||
expect(Object.keys(tagsAfterSecond).length).toBe(2);
|
||||
expect(tagsAfterSecond).toHaveProperty(tag1);
|
||||
expect(tagsAfterSecond[tag1].version).toBe(1);
|
||||
expect(tagsAfterSecond).toHaveProperty("tag2");
|
||||
expect(tagsAfterSecond["tag2"].version).toBe(2);
|
||||
|
||||
await table.add([{ id: 3n, vector: [0.5, 0.6] }]);
|
||||
await tagsManager.update(tag1, 3);
|
||||
expect(await tagsManager.getVersion(tag1)).toBe(3);
|
||||
|
||||
await tagsManager.delete("tag2");
|
||||
const tagsAfterDelete = await tagsManager.list();
|
||||
expect(Object.keys(tagsAfterDelete).length).toBe(1);
|
||||
expect(tagsAfterDelete).toHaveProperty(tag1);
|
||||
expect(tagsAfterDelete[tag1].version).toBe(3);
|
||||
|
||||
await table.add([{ id: 4n, vector: [0.7, 0.8] }]);
|
||||
expect(await table.version()).toBe(4);
|
||||
|
||||
await table.checkout(tag1);
|
||||
expect(await table.version()).toBe(3);
|
||||
|
||||
await table.checkoutLatest();
|
||||
expect(await table.version()).toBe(4);
|
||||
});
|
||||
|
||||
it("can checkout and restore tags", async () => {
|
||||
const conn = await connect(tmpDir.name, {
|
||||
readConsistencyInterval: 0,
|
||||
});
|
||||
|
||||
const table = await conn.createTable("my_table", [
|
||||
{ id: 1n, vector: [0.1, 0.2] },
|
||||
]);
|
||||
expect(await table.version()).toBe(1);
|
||||
expect(await table.countRows()).toBe(1);
|
||||
const tagsManager = await table.tags();
|
||||
const tag1 = "tag1";
|
||||
await tagsManager.create(tag1, 1);
|
||||
await table.add([{ id: 2n, vector: [0.3, 0.4] }]);
|
||||
const tag2 = "tag2";
|
||||
await tagsManager.create(tag2, 2);
|
||||
expect(await table.version()).toBe(2);
|
||||
await table.checkout(tag1);
|
||||
expect(await table.version()).toBe(1);
|
||||
await table.restore();
|
||||
expect(await table.version()).toBe(3);
|
||||
expect(await table.countRows()).toBe(1);
|
||||
await table.add([{ id: 3n, vector: [0.5, 0.6] }]);
|
||||
expect(await table.countRows()).toBe(2);
|
||||
});
|
||||
});
|
||||
|
||||
describe("when optimizing a dataset", () => {
|
||||
let tmpDir: tmp.DirResult;
|
||||
let table: Table;
|
||||
@@ -1264,6 +1495,58 @@ describe.each([arrow15, arrow16, arrow17, arrow18])(
|
||||
|
||||
const results = await table.search("hello").toArray();
|
||||
expect(results[0].text).toBe(data[0].text);
|
||||
|
||||
const query = new MatchQuery("goodbye", "text");
|
||||
expect(instanceOfFullTextQuery(query)).toBe(true);
|
||||
const results2 = await table
|
||||
.search(new MatchQuery("goodbye", "text"))
|
||||
.toArray();
|
||||
expect(results2[0].text).toBe(data[1].text);
|
||||
});
|
||||
|
||||
test("prewarm full text search index", async () => {
|
||||
const db = await connect(tmpDir.name);
|
||||
const data = [
|
||||
{ text: ["lance database", "the", "search"], vector: [0.1, 0.2, 0.3] },
|
||||
{ text: ["lance database"], vector: [0.4, 0.5, 0.6] },
|
||||
{ text: ["lance", "search"], vector: [0.7, 0.8, 0.9] },
|
||||
{ text: ["database", "search"], vector: [1.0, 1.1, 1.2] },
|
||||
{ text: ["unrelated", "doc"], vector: [1.3, 1.4, 1.5] },
|
||||
];
|
||||
const table = await db.createTable("test", data);
|
||||
await table.createIndex("text", {
|
||||
config: Index.fts(),
|
||||
});
|
||||
|
||||
// For the moment, we just confirm we can call prewarmIndex without error
|
||||
// and still search it afterwards
|
||||
await table.prewarmIndex("text_idx");
|
||||
|
||||
const results = await table.search("lance").toArray();
|
||||
expect(results.length).toBe(3);
|
||||
});
|
||||
|
||||
test("full text index on list", async () => {
|
||||
const db = await connect(tmpDir.name);
|
||||
const data = [
|
||||
{ text: ["lance database", "the", "search"], vector: [0.1, 0.2, 0.3] },
|
||||
{ text: ["lance database"], vector: [0.4, 0.5, 0.6] },
|
||||
{ text: ["lance", "search"], vector: [0.7, 0.8, 0.9] },
|
||||
{ text: ["database", "search"], vector: [1.0, 1.1, 1.2] },
|
||||
{ text: ["unrelated", "doc"], vector: [1.3, 1.4, 1.5] },
|
||||
];
|
||||
const table = await db.createTable("test", data);
|
||||
await table.createIndex("text", {
|
||||
config: Index.fts({
|
||||
withPosition: true,
|
||||
}),
|
||||
});
|
||||
|
||||
const results = await table.search("lance").toArray();
|
||||
expect(results.length).toBe(3);
|
||||
|
||||
const results2 = await table.search('"lance database"').toArray();
|
||||
expect(results2.length).toBe(2);
|
||||
});
|
||||
|
||||
test("full text search without positions", async () => {
|
||||
@@ -1279,6 +1562,18 @@ describe.each([arrow15, arrow16, arrow17, arrow18])(
|
||||
|
||||
const results = await table.search("hello").toArray();
|
||||
expect(results[0].text).toBe(data[0].text);
|
||||
|
||||
const results2 = await table
|
||||
.search(new MatchQuery("hello world", "text"))
|
||||
.toArray();
|
||||
expect(results2.length).toBe(2);
|
||||
|
||||
const results3 = await table
|
||||
.search(
|
||||
new MatchQuery("hello world", "text", { operator: Operator.And }),
|
||||
)
|
||||
.toArray();
|
||||
expect(results3.length).toBe(1);
|
||||
});
|
||||
|
||||
test("full text search without lowercase", async () => {
|
||||
@@ -1309,13 +1604,160 @@ describe.each([arrow15, arrow16, arrow17, arrow18])(
|
||||
];
|
||||
const table = await db.createTable("test", data);
|
||||
await table.createIndex("text", {
|
||||
config: Index.fts(),
|
||||
config: Index.fts({
|
||||
withPosition: true,
|
||||
}),
|
||||
});
|
||||
|
||||
const results = await table.search("world").toArray();
|
||||
expect(results.length).toBe(2);
|
||||
const phraseResults = await table.search('"hello world"').toArray();
|
||||
expect(phraseResults.length).toBe(1);
|
||||
const phraseResults2 = await table
|
||||
.search(new PhraseQuery("hello world", "text"))
|
||||
.toArray();
|
||||
expect(phraseResults2.length).toBe(1);
|
||||
});
|
||||
|
||||
test("full text search fuzzy query", async () => {
|
||||
const db = await connect(tmpDir.name);
|
||||
const data = [
|
||||
{ text: "fa", vector: [0.1, 0.2, 0.3] },
|
||||
{ text: "fo", vector: [0.4, 0.5, 0.6] },
|
||||
{ text: "fob", vector: [0.4, 0.5, 0.6] },
|
||||
{ text: "focus", vector: [0.4, 0.5, 0.6] },
|
||||
{ text: "foo", vector: [0.4, 0.5, 0.6] },
|
||||
{ text: "food", vector: [0.4, 0.5, 0.6] },
|
||||
{ text: "foul", vector: [0.4, 0.5, 0.6] },
|
||||
];
|
||||
const table = await db.createTable("test", data);
|
||||
await table.createIndex("text", {
|
||||
config: Index.fts(),
|
||||
});
|
||||
|
||||
const results = await table
|
||||
.search(new MatchQuery("foo", "text"))
|
||||
.toArray();
|
||||
expect(results.length).toBe(1);
|
||||
expect(results[0].text).toBe("foo");
|
||||
|
||||
const fuzzyResults = await table
|
||||
.search(new MatchQuery("foo", "text", { fuzziness: 1 }))
|
||||
.toArray();
|
||||
expect(fuzzyResults.length).toBe(4);
|
||||
const resultSet = new Set(fuzzyResults.map((r) => r.text));
|
||||
expect(resultSet.has("foo")).toBe(true);
|
||||
expect(resultSet.has("fob")).toBe(true);
|
||||
expect(resultSet.has("fo")).toBe(true);
|
||||
expect(resultSet.has("food")).toBe(true);
|
||||
|
||||
const prefixResults = await table
|
||||
.search(
|
||||
new MatchQuery("foo", "text", { fuzziness: 3, prefixLength: 3 }),
|
||||
)
|
||||
.toArray();
|
||||
expect(prefixResults.length).toBe(2);
|
||||
const resultSet2 = new Set(prefixResults.map((r) => r.text));
|
||||
expect(resultSet2.has("foo")).toBe(true);
|
||||
expect(resultSet2.has("food")).toBe(true);
|
||||
});
|
||||
|
||||
test("full text search boolean query", async () => {
|
||||
const db = await connect(tmpDir.name);
|
||||
const data = [
|
||||
{ text: "The cat and dog are playing" },
|
||||
{ text: "The cat is sleeping" },
|
||||
{ text: "The dog is barking" },
|
||||
{ text: "The dog chases the cat" },
|
||||
];
|
||||
const table = await db.createTable("test", data);
|
||||
await table.createIndex("text", {
|
||||
config: Index.fts({ withPosition: false }),
|
||||
});
|
||||
|
||||
const shouldResults = await table
|
||||
.search(
|
||||
new BooleanQuery([
|
||||
[Occur.Should, new MatchQuery("cat", "text")],
|
||||
[Occur.Should, new MatchQuery("dog", "text")],
|
||||
]),
|
||||
)
|
||||
.toArray();
|
||||
expect(shouldResults.length).toBe(4);
|
||||
|
||||
const mustResults = await table
|
||||
.search(
|
||||
new BooleanQuery([
|
||||
[Occur.Must, new MatchQuery("cat", "text")],
|
||||
[Occur.Must, new MatchQuery("dog", "text")],
|
||||
]),
|
||||
)
|
||||
.toArray();
|
||||
expect(mustResults.length).toBe(2);
|
||||
|
||||
const mustNotResults = await table
|
||||
.search(
|
||||
new BooleanQuery([
|
||||
[Occur.Must, new MatchQuery("cat", "text")],
|
||||
[Occur.MustNot, new MatchQuery("dog", "text")],
|
||||
]),
|
||||
)
|
||||
.toArray();
|
||||
expect(mustNotResults.length).toBe(1);
|
||||
});
|
||||
|
||||
test("full text search ngram", async () => {
|
||||
const db = await connect(tmpDir.name);
|
||||
const data = [
|
||||
{ text: "hello world", vector: [0.1, 0.2, 0.3] },
|
||||
{ text: "lance database", vector: [0.4, 0.5, 0.6] },
|
||||
{ text: "lance is cool", vector: [0.7, 0.8, 0.9] },
|
||||
];
|
||||
const table = await db.createTable("test", data);
|
||||
await table.createIndex("text", {
|
||||
config: Index.fts({ baseTokenizer: "ngram" }),
|
||||
});
|
||||
|
||||
const results = await table.search("lan").toArray();
|
||||
expect(results.length).toBe(2);
|
||||
const resultSet = new Set(results.map((r) => r.text));
|
||||
expect(resultSet.has("lance database")).toBe(true);
|
||||
expect(resultSet.has("lance is cool")).toBe(true);
|
||||
|
||||
const results2 = await table.search("nce").toArray(); // spellchecker:disable-line
|
||||
expect(results2.length).toBe(2);
|
||||
const resultSet2 = new Set(results2.map((r) => r.text));
|
||||
expect(resultSet2.has("lance database")).toBe(true);
|
||||
expect(resultSet2.has("lance is cool")).toBe(true);
|
||||
|
||||
// the default min_ngram_length is 3, so "la" should not match
|
||||
const results3 = await table.search("la").toArray();
|
||||
expect(results3.length).toBe(0);
|
||||
|
||||
// test setting min_ngram_length and prefix_only
|
||||
await table.createIndex("text", {
|
||||
config: Index.fts({
|
||||
baseTokenizer: "ngram",
|
||||
ngramMinLength: 2,
|
||||
prefixOnly: true,
|
||||
}),
|
||||
replace: true,
|
||||
});
|
||||
|
||||
const results4 = await table.search("lan").toArray();
|
||||
expect(results4.length).toBe(2);
|
||||
const resultSet4 = new Set(results4.map((r) => r.text));
|
||||
expect(resultSet4.has("lance database")).toBe(true);
|
||||
expect(resultSet4.has("lance is cool")).toBe(true);
|
||||
|
||||
const results5 = await table.search("nce").toArray(); // spellchecker:disable-line
|
||||
expect(results5.length).toBe(0);
|
||||
|
||||
const results6 = await table.search("la").toArray();
|
||||
expect(results6.length).toBe(2);
|
||||
const resultSet6 = new Set(results6.map((r) => r.text));
|
||||
expect(resultSet6.has("lance database")).toBe(true);
|
||||
expect(resultSet6.has("lance is cool")).toBe(true);
|
||||
});
|
||||
|
||||
test.each([
|
||||
|
||||
@@ -202,35 +202,5 @@ test("basic table examples", async () => {
|
||||
// --8<-- [end:create_f16_table]
|
||||
await db.dropTable("f16_tbl");
|
||||
}
|
||||
const uri = databaseDir;
|
||||
await db.createTable("my_table", [{ id: 1 }, { id: 2 }]);
|
||||
{
|
||||
// --8<-- [start:table_strong_consistency]
|
||||
const db = await lancedb.connect({ uri, readConsistencyInterval: 0 });
|
||||
const tbl = await db.openTable("my_table");
|
||||
// --8<-- [end:table_strong_consistency]
|
||||
}
|
||||
{
|
||||
// --8<-- [start:table_eventual_consistency]
|
||||
const db = await lancedb.connect({ uri, readConsistencyInterval: 5 });
|
||||
const tbl = await db.openTable("my_table");
|
||||
// --8<-- [end:table_eventual_consistency]
|
||||
}
|
||||
{
|
||||
// --8<-- [start:table_no_consistency]
|
||||
const db = await lancedb.connect({ uri, readConsistencyInterval: null });
|
||||
const tbl = await db.openTable("my_table");
|
||||
// --8<-- [end:table_no_consistency]
|
||||
}
|
||||
{
|
||||
// --8<-- [start:table_checkout_latest]
|
||||
const tbl = await db.openTable("my_table");
|
||||
|
||||
// (Other writes happen to test_table_async from another process)
|
||||
|
||||
// Check for updates
|
||||
tbl.checkoutLatest();
|
||||
// --8<-- [end:table_checkout_latest]
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
@@ -417,7 +417,9 @@ function inferSchema(
|
||||
} else {
|
||||
const inferredType = inferType(value, path, opts);
|
||||
if (inferredType === undefined) {
|
||||
throw new Error(`Failed to infer data type for field ${path.join(".")} at row ${rowI}. \
|
||||
throw new Error(`Failed to infer data type for field ${path.join(
|
||||
".",
|
||||
)} at row ${rowI}. \
|
||||
Consider providing an explicit schema.`);
|
||||
}
|
||||
pathTree.set(path, inferredType);
|
||||
@@ -639,8 +641,9 @@ function transposeData(
|
||||
): Vector {
|
||||
if (field.type instanceof Struct) {
|
||||
const childFields = field.type.children;
|
||||
const fullPath = [...path, field.name];
|
||||
const childVectors = childFields.map((child) => {
|
||||
return transposeData(data, child, [...path, child.name]);
|
||||
return transposeData(data, child, fullPath);
|
||||
});
|
||||
const structData = makeData({
|
||||
type: field.type,
|
||||
@@ -652,7 +655,14 @@ function transposeData(
|
||||
const values = data.map((datum) => {
|
||||
let current: unknown = datum;
|
||||
for (const key of valuesPath) {
|
||||
if (isObject(current) && Object.hasOwn(current, key)) {
|
||||
if (current == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
if (
|
||||
isObject(current) &&
|
||||
(Object.hasOwn(current, key) || key in current)
|
||||
) {
|
||||
current = current[key];
|
||||
} else {
|
||||
return null;
|
||||
@@ -791,11 +801,17 @@ async function applyEmbeddingsFromMetadata(
|
||||
`Cannot apply embedding function because the source column '${functionEntry.sourceColumn}' was not present in the data`,
|
||||
);
|
||||
}
|
||||
|
||||
// Check if destination column exists and handle accordingly
|
||||
if (columns[destColumn] !== undefined) {
|
||||
throw new Error(
|
||||
`Attempt to apply embeddings to table failed because column ${destColumn} already existed`,
|
||||
);
|
||||
const existingColumn = columns[destColumn];
|
||||
// If the column exists but is all null, we can fill it with embeddings
|
||||
if (existingColumn.nullCount !== existingColumn.length) {
|
||||
// Column has non-null values, skip embedding application
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (table.batches.length > 1) {
|
||||
throw new Error(
|
||||
"Internal error: `makeArrowTable` unexpectedly created a table with more than one batch",
|
||||
@@ -895,11 +911,23 @@ async function applyEmbeddings<T>(
|
||||
);
|
||||
}
|
||||
} else {
|
||||
// Check if destination column exists and handle accordingly
|
||||
if (Object.prototype.hasOwnProperty.call(newColumns, destColumn)) {
|
||||
throw new Error(
|
||||
`Attempt to apply embeddings to table failed because column ${destColumn} already existed`,
|
||||
);
|
||||
const existingColumn = newColumns[destColumn];
|
||||
// If the column exists but is all null, we can fill it with embeddings
|
||||
if (existingColumn.nullCount !== existingColumn.length) {
|
||||
// Column has non-null values, skip embedding application and return table as-is
|
||||
let newTable = new ArrowTable(newColumns);
|
||||
if (schema != null) {
|
||||
newTable = alignTable(newTable, schema as Schema);
|
||||
}
|
||||
return new ArrowTable(
|
||||
new Schema(newTable.schema.fields, schemaMetadata),
|
||||
newTable.batches,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if (table.batches.length > 1) {
|
||||
throw new Error(
|
||||
"Internal error: `makeArrowTable` unexpectedly created a table with more than one batch",
|
||||
|
||||
@@ -23,6 +23,18 @@ export {
|
||||
OptimizeStats,
|
||||
CompactionStats,
|
||||
RemovalStats,
|
||||
TableStatistics,
|
||||
FragmentStatistics,
|
||||
FragmentSummaryStats,
|
||||
Tags,
|
||||
TagContents,
|
||||
MergeResult,
|
||||
AddResult,
|
||||
AddColumnsResult,
|
||||
AlterColumnsResult,
|
||||
DeleteResult,
|
||||
DropColumnsResult,
|
||||
UpdateResult,
|
||||
} from "./native.js";
|
||||
|
||||
export {
|
||||
@@ -52,7 +64,10 @@ export {
|
||||
PhraseQuery,
|
||||
BoostQuery,
|
||||
MultiMatchQuery,
|
||||
BooleanQuery,
|
||||
FullTextQueryType,
|
||||
Operator,
|
||||
Occur,
|
||||
} from "./query";
|
||||
|
||||
export {
|
||||
@@ -74,7 +89,7 @@ export {
|
||||
ColumnAlteration,
|
||||
} from "./table";
|
||||
|
||||
export { MergeInsertBuilder } from "./merge";
|
||||
export { MergeInsertBuilder, WriteExecutionOptions } from "./merge";
|
||||
|
||||
export * as embedding from "./embedding";
|
||||
export * as rerankers from "./rerankers";
|
||||
|
||||
@@ -439,7 +439,7 @@ export interface FtsOptions {
|
||||
*
|
||||
* "raw" - Raw tokenizer. This tokenizer does not split the text into tokens and indexes the entire text as a single token.
|
||||
*/
|
||||
baseTokenizer?: "simple" | "whitespace" | "raw";
|
||||
baseTokenizer?: "simple" | "whitespace" | "raw" | "ngram";
|
||||
|
||||
/**
|
||||
* language for stemming and stop words
|
||||
@@ -472,6 +472,21 @@ export interface FtsOptions {
|
||||
* whether to remove punctuation
|
||||
*/
|
||||
asciiFolding?: boolean;
|
||||
|
||||
/**
|
||||
* ngram min length
|
||||
*/
|
||||
ngramMinLength?: number;
|
||||
|
||||
/**
|
||||
* ngram max length
|
||||
*/
|
||||
ngramMaxLength?: number;
|
||||
|
||||
/**
|
||||
* whether to only index the prefix of the token for ngram tokenizer
|
||||
*/
|
||||
prefixOnly?: boolean;
|
||||
}
|
||||
|
||||
export class Index {
|
||||
@@ -608,6 +623,9 @@ export class Index {
|
||||
options?.stem,
|
||||
options?.removeStopWords,
|
||||
options?.asciiFolding,
|
||||
options?.ngramMinLength,
|
||||
options?.ngramMaxLength,
|
||||
options?.prefixOnly,
|
||||
),
|
||||
);
|
||||
}
|
||||
@@ -681,4 +699,6 @@ export interface IndexOptions {
|
||||
* The default is true
|
||||
*/
|
||||
replace?: boolean;
|
||||
|
||||
waitTimeoutSeconds?: number;
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
import { Data, Schema, fromDataToBuffer } from "./arrow";
|
||||
import { NativeMergeInsertBuilder } from "./native";
|
||||
import { MergeResult, NativeMergeInsertBuilder } from "./native";
|
||||
|
||||
/** A builder used to create and run a merge insert operation */
|
||||
export class MergeInsertBuilder {
|
||||
@@ -73,9 +73,12 @@ export class MergeInsertBuilder {
|
||||
/**
|
||||
* Executes the merge insert operation
|
||||
*
|
||||
* Nothing is returned but the `Table` is updated
|
||||
* @returns {Promise<MergeResult>} the merge result
|
||||
*/
|
||||
async execute(data: Data): Promise<void> {
|
||||
async execute(
|
||||
data: Data,
|
||||
execOptions?: Partial<WriteExecutionOptions>,
|
||||
): Promise<MergeResult> {
|
||||
let schema: Schema;
|
||||
if (this.#schema instanceof Promise) {
|
||||
schema = await this.#schema;
|
||||
@@ -83,7 +86,28 @@ export class MergeInsertBuilder {
|
||||
} else {
|
||||
schema = this.#schema;
|
||||
}
|
||||
|
||||
if (execOptions?.timeoutMs !== undefined) {
|
||||
this.#native.setTimeout(execOptions.timeoutMs);
|
||||
}
|
||||
|
||||
const buffer = await fromDataToBuffer(data, undefined, schema);
|
||||
await this.#native.execute(buffer);
|
||||
return await this.#native.execute(buffer);
|
||||
}
|
||||
}
|
||||
|
||||
export interface WriteExecutionOptions {
|
||||
/**
|
||||
* Maximum time to run the operation before cancelling it.
|
||||
*
|
||||
* By default, there is a 30-second timeout that is only enforced after the
|
||||
* first attempt. This is to prevent spending too long retrying to resolve
|
||||
* conflicts. For example, if a write attempt takes 20 seconds and fails,
|
||||
* the second attempt will be cancelled after 10 seconds, hitting the
|
||||
* 30-second timeout. However, a write that takes one hour and succeeds on the
|
||||
* first attempt will not be cancelled.
|
||||
*
|
||||
* When this is set, the timeout is enforced on all attempts, including the first.
|
||||
*/
|
||||
timeoutMs?: number;
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ import {
|
||||
} from "./arrow";
|
||||
import { type IvfPqOptions } from "./indices";
|
||||
import {
|
||||
JsFullTextQuery,
|
||||
RecordBatchIterator as NativeBatchIterator,
|
||||
Query as NativeQuery,
|
||||
Table as NativeTable,
|
||||
@@ -63,7 +64,7 @@ class RecordBatchIterable<
|
||||
// biome-ignore lint/suspicious/noExplicitAny: skip
|
||||
[Symbol.asyncIterator](): AsyncIterator<RecordBatch<any>, any, undefined> {
|
||||
return new RecordBatchIterator(
|
||||
this.inner.execute(this.options?.maxBatchLength),
|
||||
this.inner.execute(this.options?.maxBatchLength, this.options?.timeoutMs),
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -79,6 +80,11 @@ export interface QueryExecutionOptions {
|
||||
* in smaller chunks.
|
||||
*/
|
||||
maxBatchLength?: number;
|
||||
|
||||
/**
|
||||
* Timeout for query execution in milliseconds
|
||||
*/
|
||||
timeoutMs?: number;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -172,9 +178,7 @@ export class QueryBase<NativeQueryType extends NativeQuery | NativeVectorQuery>
|
||||
columns: columns,
|
||||
});
|
||||
} else {
|
||||
// If query is a FullTextQuery object, convert it to a dict
|
||||
const queryObj = query.toDict();
|
||||
inner.fullTextSearch(queryObj);
|
||||
inner.fullTextSearch({ query: query.inner });
|
||||
}
|
||||
});
|
||||
return this;
|
||||
@@ -283,9 +287,11 @@ export class QueryBase<NativeQueryType extends NativeQuery | NativeVectorQuery>
|
||||
options?: Partial<QueryExecutionOptions>,
|
||||
): Promise<NativeBatchIterator> {
|
||||
if (this.inner instanceof Promise) {
|
||||
return this.inner.then((inner) => inner.execute(options?.maxBatchLength));
|
||||
return this.inner.then((inner) =>
|
||||
inner.execute(options?.maxBatchLength, options?.timeoutMs),
|
||||
);
|
||||
} else {
|
||||
return this.inner.execute(options?.maxBatchLength);
|
||||
return this.inner.execute(options?.maxBatchLength, options?.timeoutMs);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -442,6 +448,10 @@ export class VectorQuery extends QueryBase<NativeVectorQuery> {
|
||||
* For best results we recommend tuning this parameter with a benchmark against
|
||||
* your actual data to find the smallest possible value that will still give
|
||||
* you the desired recall.
|
||||
*
|
||||
* For more fine grained control over behavior when you have a very narrow filter
|
||||
* you can use `minimumNprobes` and `maximumNprobes`. This method sets both
|
||||
* the minimum and maximum to the same value.
|
||||
*/
|
||||
nprobes(nprobes: number): VectorQuery {
|
||||
super.doCall((inner) => inner.nprobes(nprobes));
|
||||
@@ -449,6 +459,33 @@ export class VectorQuery extends QueryBase<NativeVectorQuery> {
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the minimum number of probes used.
|
||||
*
|
||||
* This controls the minimum number of partitions that will be searched. This
|
||||
* parameter will impact every query against a vector index, regardless of the
|
||||
* filter. See `nprobes` for more details. Higher values will increase recall
|
||||
* but will also increase latency.
|
||||
*/
|
||||
minimumNprobes(minimumNprobes: number): VectorQuery {
|
||||
super.doCall((inner) => inner.minimumNprobes(minimumNprobes));
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the maximum number of probes used.
|
||||
*
|
||||
* This controls the maximum number of partitions that will be searched. If this
|
||||
* number is greater than minimumNprobes then the excess partitions will _only_ be
|
||||
* searched if we have not found enough results. This can be useful when there is
|
||||
* a narrow filter to allow these queries to spend more time searching and avoid
|
||||
* potential false negatives.
|
||||
*/
|
||||
maximumNprobes(maximumNprobes: number): VectorQuery {
|
||||
super.doCall((inner) => inner.maximumNprobes(maximumNprobes));
|
||||
return this;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the distance range to use
|
||||
*
|
||||
@@ -736,8 +773,7 @@ export class Query extends QueryBase<NativeQuery> {
|
||||
columns: columns,
|
||||
});
|
||||
} else {
|
||||
const queryObj = query.toDict();
|
||||
inner.fullTextSearch(queryObj);
|
||||
inner.fullTextSearch({ query: query.inner });
|
||||
}
|
||||
});
|
||||
return this;
|
||||
@@ -757,6 +793,31 @@ export enum FullTextQueryType {
|
||||
MatchPhrase = "match_phrase",
|
||||
Boost = "boost",
|
||||
MultiMatch = "multi_match",
|
||||
Boolean = "boolean",
|
||||
}
|
||||
|
||||
/**
|
||||
* Enum representing the logical operators used in full-text queries.
|
||||
*
|
||||
* - `And`: All terms must match.
|
||||
* - `Or`: At least one term must match.
|
||||
*/
|
||||
export enum Operator {
|
||||
And = "AND",
|
||||
Or = "OR",
|
||||
}
|
||||
|
||||
/**
|
||||
* Enum representing the occurrence of terms in full-text queries.
|
||||
*
|
||||
* - `Must`: The term must be present in the document.
|
||||
* - `Should`: The term should contribute to the document score, but is not required.
|
||||
* - `MustNot`: The term must not be present in the document.
|
||||
*/
|
||||
export enum Occur {
|
||||
Should = "SHOULD",
|
||||
Must = "MUST",
|
||||
MustNot = "MUST_NOT",
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -765,130 +826,173 @@ export enum FullTextQueryType {
|
||||
* including methods to retrieve the query type and convert the query to a dictionary format.
|
||||
*/
|
||||
export interface FullTextQuery {
|
||||
/**
|
||||
* Returns the inner query object.
|
||||
* This is the underlying query object used by the database engine.
|
||||
* @ignore
|
||||
*/
|
||||
inner: JsFullTextQuery;
|
||||
|
||||
/**
|
||||
* The type of the full-text query.
|
||||
*/
|
||||
queryType(): FullTextQueryType;
|
||||
toDict(): Record<string, unknown>;
|
||||
}
|
||||
|
||||
// biome-ignore lint/suspicious/noExplicitAny: we want any here
|
||||
export function instanceOfFullTextQuery(obj: any): obj is FullTextQuery {
|
||||
return obj != null && obj.inner instanceof JsFullTextQuery;
|
||||
}
|
||||
|
||||
export class MatchQuery implements FullTextQuery {
|
||||
/** @ignore */
|
||||
public readonly inner: JsFullTextQuery;
|
||||
|
||||
/**
|
||||
* Creates an instance of MatchQuery.
|
||||
*
|
||||
* @param query - The text query to search for.
|
||||
* @param column - The name of the column to search within.
|
||||
* @param boost - (Optional) The boost factor to influence the relevance score of this query. Default is `1.0`.
|
||||
* @param fuzziness - (Optional) The allowed edit distance for fuzzy matching. Default is `0`.
|
||||
* @param maxExpansions - (Optional) The maximum number of terms to consider for fuzzy matching. Default is `50`.
|
||||
* @param options - Optional parameters for the match query.
|
||||
* - `boost`: The boost factor for the query (default is 1.0).
|
||||
* - `fuzziness`: The fuzziness level for the query (default is 0).
|
||||
* - `maxExpansions`: The maximum number of terms to consider for fuzzy matching (default is 50).
|
||||
* - `operator`: The logical operator to use for combining terms in the query (default is "OR").
|
||||
* - `prefixLength`: The number of beginning characters being unchanged for fuzzy matching.
|
||||
*/
|
||||
constructor(
|
||||
private query: string,
|
||||
private column: string,
|
||||
private boost: number = 1.0,
|
||||
private fuzziness: number = 0,
|
||||
private maxExpansions: number = 50,
|
||||
) {}
|
||||
query: string,
|
||||
column: string,
|
||||
options?: {
|
||||
boost?: number;
|
||||
fuzziness?: number;
|
||||
maxExpansions?: number;
|
||||
operator?: Operator;
|
||||
prefixLength?: number;
|
||||
},
|
||||
) {
|
||||
let fuzziness = options?.fuzziness;
|
||||
if (fuzziness === undefined) {
|
||||
fuzziness = 0;
|
||||
}
|
||||
this.inner = JsFullTextQuery.matchQuery(
|
||||
query,
|
||||
column,
|
||||
options?.boost ?? 1.0,
|
||||
fuzziness,
|
||||
options?.maxExpansions ?? 50,
|
||||
options?.operator ?? Operator.Or,
|
||||
options?.prefixLength ?? 0,
|
||||
);
|
||||
}
|
||||
|
||||
queryType(): FullTextQueryType {
|
||||
return FullTextQueryType.Match;
|
||||
}
|
||||
|
||||
toDict(): Record<string, unknown> {
|
||||
return {
|
||||
[this.queryType()]: {
|
||||
[this.column]: {
|
||||
query: this.query,
|
||||
boost: this.boost,
|
||||
fuzziness: this.fuzziness,
|
||||
// biome-ignore lint/style/useNamingConvention: use underscore for consistency with the other APIs
|
||||
max_expansions: this.maxExpansions,
|
||||
},
|
||||
},
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
export class PhraseQuery implements FullTextQuery {
|
||||
/** @ignore */
|
||||
public readonly inner: JsFullTextQuery;
|
||||
/**
|
||||
* Creates an instance of `PhraseQuery`.
|
||||
*
|
||||
* @param query - The phrase to search for in the specified column.
|
||||
* @param column - The name of the column to search within.
|
||||
* @param options - Optional parameters for the phrase query.
|
||||
* - `slop`: The maximum number of intervening unmatched positions allowed between words in the phrase (default is 0).
|
||||
*/
|
||||
constructor(
|
||||
private query: string,
|
||||
private column: string,
|
||||
) {}
|
||||
constructor(query: string, column: string, options?: { slop?: number }) {
|
||||
this.inner = JsFullTextQuery.phraseQuery(query, column, options?.slop ?? 0);
|
||||
}
|
||||
|
||||
queryType(): FullTextQueryType {
|
||||
return FullTextQueryType.MatchPhrase;
|
||||
}
|
||||
|
||||
toDict(): Record<string, unknown> {
|
||||
return {
|
||||
[this.queryType()]: {
|
||||
[this.column]: this.query,
|
||||
},
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
export class BoostQuery implements FullTextQuery {
|
||||
/** @ignore */
|
||||
public readonly inner: JsFullTextQuery;
|
||||
/**
|
||||
* Creates an instance of BoostQuery.
|
||||
* The boost returns documents that match the positive query,
|
||||
* but penalizes those that match the negative query.
|
||||
* the penalty is controlled by the `negativeBoost` parameter.
|
||||
*
|
||||
* @param positive - The positive query that boosts the relevance score.
|
||||
* @param negative - The negative query that reduces the relevance score.
|
||||
* @param negativeBoost - The factor by which the negative query reduces the score.
|
||||
* @param options - Optional parameters for the boost query.
|
||||
* - `negativeBoost`: The boost factor for the negative query (default is 0.0).
|
||||
*/
|
||||
constructor(
|
||||
private positive: FullTextQuery,
|
||||
private negative: FullTextQuery,
|
||||
private negativeBoost: number,
|
||||
) {}
|
||||
positive: FullTextQuery,
|
||||
negative: FullTextQuery,
|
||||
options?: {
|
||||
negativeBoost?: number;
|
||||
},
|
||||
) {
|
||||
this.inner = JsFullTextQuery.boostQuery(
|
||||
positive.inner,
|
||||
negative.inner,
|
||||
options?.negativeBoost,
|
||||
);
|
||||
}
|
||||
|
||||
queryType(): FullTextQueryType {
|
||||
return FullTextQueryType.Boost;
|
||||
}
|
||||
|
||||
toDict(): Record<string, unknown> {
|
||||
return {
|
||||
[this.queryType()]: {
|
||||
positive: this.positive.toDict(),
|
||||
negative: this.negative.toDict(),
|
||||
// biome-ignore lint/style/useNamingConvention: use underscore for consistency with the other APIs
|
||||
negative_boost: this.negativeBoost,
|
||||
},
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
export class MultiMatchQuery implements FullTextQuery {
|
||||
/** @ignore */
|
||||
public readonly inner: JsFullTextQuery;
|
||||
/**
|
||||
* Creates an instance of MultiMatchQuery.
|
||||
*
|
||||
* @param query - The text query to search for across multiple columns.
|
||||
* @param columns - An array of column names to search within.
|
||||
* @param boosts - (Optional) An array of boost factors corresponding to each column. Default is an array of 1.0 for each column.
|
||||
*
|
||||
* The `boosts` array should have the same length as `columns`. If not provided, all columns will have a default boost of 1.0.
|
||||
* If the length of `boosts` is less than `columns`, it will be padded with 1.0s.
|
||||
* @param options - Optional parameters for the multi-match query.
|
||||
* - `boosts`: An array of boost factors for each column (default is 1.0 for all).
|
||||
* - `operator`: The logical operator to use for combining terms in the query (default is "OR").
|
||||
*/
|
||||
constructor(
|
||||
private query: string,
|
||||
private columns: string[],
|
||||
private boosts: number[] = columns.map(() => 1.0),
|
||||
) {}
|
||||
query: string,
|
||||
columns: string[],
|
||||
options?: {
|
||||
boosts?: number[];
|
||||
operator?: Operator;
|
||||
},
|
||||
) {
|
||||
this.inner = JsFullTextQuery.multiMatchQuery(
|
||||
query,
|
||||
columns,
|
||||
options?.boosts,
|
||||
options?.operator ?? Operator.Or,
|
||||
);
|
||||
}
|
||||
|
||||
queryType(): FullTextQueryType {
|
||||
return FullTextQueryType.MultiMatch;
|
||||
}
|
||||
}
|
||||
|
||||
toDict(): Record<string, unknown> {
|
||||
return {
|
||||
[this.queryType()]: {
|
||||
query: this.query,
|
||||
columns: this.columns,
|
||||
boost: this.boosts,
|
||||
},
|
||||
};
|
||||
export class BooleanQuery implements FullTextQuery {
|
||||
/** @ignore */
|
||||
public readonly inner: JsFullTextQuery;
|
||||
/**
|
||||
* Creates an instance of BooleanQuery.
|
||||
*
|
||||
* @param queries - An array of (Occur, FullTextQuery objects) to combine.
|
||||
* Occur specifies whether the query must match, or should match.
|
||||
*/
|
||||
constructor(queries: [Occur, FullTextQuery][]) {
|
||||
this.inner = JsFullTextQuery.booleanQuery(
|
||||
queries.map(([occur, query]) => [occur, query.inner]),
|
||||
);
|
||||
}
|
||||
|
||||
queryType(): FullTextQueryType {
|
||||
return FullTextQueryType.Boolean;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,13 +16,26 @@ import { EmbeddingFunctionConfig, getRegistry } from "./embedding/registry";
|
||||
import { IndexOptions } from "./indices";
|
||||
import { MergeInsertBuilder } from "./merge";
|
||||
import {
|
||||
AddColumnsResult,
|
||||
AddColumnsSql,
|
||||
AddResult,
|
||||
AlterColumnsResult,
|
||||
DeleteResult,
|
||||
DropColumnsResult,
|
||||
IndexConfig,
|
||||
IndexStatistics,
|
||||
OptimizeStats,
|
||||
TableStatistics,
|
||||
Tags,
|
||||
UpdateResult,
|
||||
Table as _NativeTable,
|
||||
} from "./native";
|
||||
import { Query, VectorQuery } from "./query";
|
||||
import {
|
||||
FullTextQuery,
|
||||
Query,
|
||||
VectorQuery,
|
||||
instanceOfFullTextQuery,
|
||||
} from "./query";
|
||||
import { sanitizeType } from "./sanitize";
|
||||
import { IntoSql, toSQL } from "./util";
|
||||
export { IndexConfig } from "./native";
|
||||
@@ -62,10 +75,10 @@ export interface OptimizeOptions {
|
||||
* // Delete all versions older than 1 day
|
||||
* const olderThan = new Date();
|
||||
* olderThan.setDate(olderThan.getDate() - 1));
|
||||
* tbl.cleanupOlderVersions(olderThan);
|
||||
* tbl.optimize({cleanupOlderThan: olderThan});
|
||||
*
|
||||
* // Delete all versions except the current version
|
||||
* tbl.cleanupOlderVersions(new Date());
|
||||
* tbl.optimize({cleanupOlderThan: new Date()});
|
||||
*/
|
||||
cleanupOlderThan: Date;
|
||||
deleteUnverified: boolean;
|
||||
@@ -119,12 +132,19 @@ export abstract class Table {
|
||||
/**
|
||||
* Insert records into this Table.
|
||||
* @param {Data} data Records to be inserted into the Table
|
||||
* @returns {Promise<AddResult>} A promise that resolves to an object
|
||||
* containing the new version number of the table
|
||||
*/
|
||||
abstract add(data: Data, options?: Partial<AddDataOptions>): Promise<void>;
|
||||
abstract add(
|
||||
data: Data,
|
||||
options?: Partial<AddDataOptions>,
|
||||
): Promise<AddResult>;
|
||||
/**
|
||||
* Update existing records in the Table
|
||||
* @param opts.values The values to update. The keys are the column names and the values
|
||||
* are the values to set.
|
||||
* @returns {Promise<UpdateResult>} A promise that resolves to an object containing
|
||||
* the number of rows updated and the new version number
|
||||
* @example
|
||||
* ```ts
|
||||
* table.update({where:"x = 2", values:{"vector": [10, 10]}})
|
||||
@@ -134,11 +154,13 @@ export abstract class Table {
|
||||
opts: {
|
||||
values: Map<string, IntoSql> | Record<string, IntoSql>;
|
||||
} & Partial<UpdateOptions>,
|
||||
): Promise<void>;
|
||||
): Promise<UpdateResult>;
|
||||
/**
|
||||
* Update existing records in the Table
|
||||
* @param opts.valuesSql The values to update. The keys are the column names and the values
|
||||
* are the values to set. The values are SQL expressions.
|
||||
* @returns {Promise<UpdateResult>} A promise that resolves to an object containing
|
||||
* the number of rows updated and the new version number
|
||||
* @example
|
||||
* ```ts
|
||||
* table.update({where:"x = 2", valuesSql:{"x": "x + 1"}})
|
||||
@@ -148,7 +170,7 @@ export abstract class Table {
|
||||
opts: {
|
||||
valuesSql: Map<string, string> | Record<string, string>;
|
||||
} & Partial<UpdateOptions>,
|
||||
): Promise<void>;
|
||||
): Promise<UpdateResult>;
|
||||
/**
|
||||
* Update existing records in the Table
|
||||
*
|
||||
@@ -166,6 +188,8 @@ export abstract class Table {
|
||||
* repeatedly calilng this method.
|
||||
* @param {Map<string, string> | Record<string, string>} updates - the
|
||||
* columns to update
|
||||
* @returns {Promise<UpdateResult>} A promise that resolves to an object
|
||||
* containing the number of rows updated and the new version number
|
||||
*
|
||||
* Keys in the map should specify the name of the column to update.
|
||||
* Values in the map provide the new value of the column. These can
|
||||
@@ -177,12 +201,16 @@ export abstract class Table {
|
||||
abstract update(
|
||||
updates: Map<string, string> | Record<string, string>,
|
||||
options?: Partial<UpdateOptions>,
|
||||
): Promise<void>;
|
||||
): Promise<UpdateResult>;
|
||||
|
||||
/** Count the total number of rows in the dataset. */
|
||||
abstract countRows(filter?: string): Promise<number>;
|
||||
/** Delete the rows that satisfy the predicate. */
|
||||
abstract delete(predicate: string): Promise<void>;
|
||||
/**
|
||||
* Delete the rows that satisfy the predicate.
|
||||
* @returns {Promise<DeleteResult>} A promise that resolves to an object
|
||||
* containing the new version number of the table
|
||||
*/
|
||||
abstract delete(predicate: string): Promise<DeleteResult>;
|
||||
/**
|
||||
* Create an index to speed up queries.
|
||||
*
|
||||
@@ -230,6 +258,30 @@ export abstract class Table {
|
||||
*/
|
||||
abstract dropIndex(name: string): Promise<void>;
|
||||
|
||||
/**
|
||||
* Prewarm an index in the table.
|
||||
*
|
||||
* @param name The name of the index.
|
||||
*
|
||||
* This will load the index into memory. This may reduce the cold-start time for
|
||||
* future queries. If the index does not fit in the cache then this call may be
|
||||
* wasteful.
|
||||
*/
|
||||
abstract prewarmIndex(name: string): Promise<void>;
|
||||
|
||||
/**
|
||||
* Waits for asynchronous indexing to complete on the table.
|
||||
*
|
||||
* @param indexNames The name of the indices to wait for
|
||||
* @param timeoutSeconds The number of seconds to wait before timing out
|
||||
*
|
||||
* This will raise an error if the indices are not created and fully indexed within the timeout.
|
||||
*/
|
||||
abstract waitForIndex(
|
||||
indexNames: string[],
|
||||
timeoutSeconds: number,
|
||||
): Promise<void>;
|
||||
|
||||
/**
|
||||
* Create a {@link Query} Builder.
|
||||
*
|
||||
@@ -294,7 +346,7 @@ export abstract class Table {
|
||||
* if the query is a string and no embedding function is defined, it will be treated as a full text search query
|
||||
*/
|
||||
abstract search(
|
||||
query: string | IntoVector,
|
||||
query: string | IntoVector | FullTextQuery,
|
||||
queryType?: string,
|
||||
ftsColumns?: string | string[],
|
||||
): VectorQuery | Query;
|
||||
@@ -312,15 +364,23 @@ export abstract class Table {
|
||||
* the SQL expression to use to calculate the value of the new column. These
|
||||
* expressions will be evaluated for each row in the table, and can
|
||||
* reference existing columns in the table.
|
||||
* @returns {Promise<AddColumnsResult>} A promise that resolves to an object
|
||||
* containing the new version number of the table after adding the columns.
|
||||
*/
|
||||
abstract addColumns(newColumnTransforms: AddColumnsSql[]): Promise<void>;
|
||||
abstract addColumns(
|
||||
newColumnTransforms: AddColumnsSql[],
|
||||
): Promise<AddColumnsResult>;
|
||||
|
||||
/**
|
||||
* Alter the name or nullability of columns.
|
||||
* @param {ColumnAlteration[]} columnAlterations One or more alterations to
|
||||
* apply to columns.
|
||||
* @returns {Promise<AlterColumnsResult>} A promise that resolves to an object
|
||||
* containing the new version number of the table after altering the columns.
|
||||
*/
|
||||
abstract alterColumns(columnAlterations: ColumnAlteration[]): Promise<void>;
|
||||
abstract alterColumns(
|
||||
columnAlterations: ColumnAlteration[],
|
||||
): Promise<AlterColumnsResult>;
|
||||
/**
|
||||
* Drop one or more columns from the dataset
|
||||
*
|
||||
@@ -331,8 +391,10 @@ export abstract class Table {
|
||||
* @param {string[]} columnNames The names of the columns to drop. These can
|
||||
* be nested column references (e.g. "a.b.c") or top-level column names
|
||||
* (e.g. "a").
|
||||
* @returns {Promise<DropColumnsResult>} A promise that resolves to an object
|
||||
* containing the new version number of the table after dropping the columns.
|
||||
*/
|
||||
abstract dropColumns(columnNames: string[]): Promise<void>;
|
||||
abstract dropColumns(columnNames: string[]): Promise<DropColumnsResult>;
|
||||
/** Retrieve the version of the table */
|
||||
|
||||
abstract version(): Promise<number>;
|
||||
@@ -345,7 +407,7 @@ export abstract class Table {
|
||||
*
|
||||
* Calling this method will set the table into time-travel mode. If you
|
||||
* wish to return to standard mode, call `checkoutLatest`.
|
||||
* @param {number} version The version to checkout
|
||||
* @param {number | string} version The version to checkout, could be version number or tag
|
||||
* @example
|
||||
* ```typescript
|
||||
* import * as lancedb from "@lancedb/lancedb"
|
||||
@@ -361,7 +423,8 @@ export abstract class Table {
|
||||
* console.log(await table.version()); // 2
|
||||
* ```
|
||||
*/
|
||||
abstract checkout(version: number): Promise<void>;
|
||||
abstract checkout(version: number | string): Promise<void>;
|
||||
|
||||
/**
|
||||
* Checkout the latest version of the table. _This is an in-place operation._
|
||||
*
|
||||
@@ -375,6 +438,23 @@ export abstract class Table {
|
||||
*/
|
||||
abstract listVersions(): Promise<Version[]>;
|
||||
|
||||
/**
|
||||
* Get a tags manager for this table.
|
||||
*
|
||||
* Tags allow you to label specific versions of a table with a human-readable name.
|
||||
* The returned tags manager can be used to list, create, update, or delete tags.
|
||||
*
|
||||
* @returns {Tags} A tags manager for this table
|
||||
* @example
|
||||
* ```typescript
|
||||
* const tagsManager = await table.tags();
|
||||
* await tagsManager.create("v1", 1);
|
||||
* const tags = await tagsManager.list();
|
||||
* console.log(tags); // { "v1": { version: 1, manifestSize: ... } }
|
||||
* ```
|
||||
*/
|
||||
abstract tags(): Promise<Tags>;
|
||||
|
||||
/**
|
||||
* Restore the table to the currently checked out version
|
||||
*
|
||||
@@ -434,6 +514,13 @@ export abstract class Table {
|
||||
* Use {@link Table.listIndices} to find the names of the indices.
|
||||
*/
|
||||
abstract indexStats(name: string): Promise<IndexStatistics | undefined>;
|
||||
|
||||
/** Returns table and fragment statistics
|
||||
*
|
||||
* @returns {TableStatistics} The table and fragment statistics
|
||||
*
|
||||
*/
|
||||
abstract stats(): Promise<TableStatistics>;
|
||||
}
|
||||
|
||||
export class LocalTable extends Table {
|
||||
@@ -473,12 +560,12 @@ export class LocalTable extends Table {
|
||||
return tbl.schema;
|
||||
}
|
||||
|
||||
async add(data: Data, options?: Partial<AddDataOptions>): Promise<void> {
|
||||
async add(data: Data, options?: Partial<AddDataOptions>): Promise<AddResult> {
|
||||
const mode = options?.mode ?? "append";
|
||||
const schema = await this.schema();
|
||||
|
||||
const buffer = await fromDataToBuffer(data, undefined, schema);
|
||||
await this.inner.add(buffer, mode);
|
||||
return await this.inner.add(buffer, mode);
|
||||
}
|
||||
|
||||
async update(
|
||||
@@ -491,7 +578,7 @@ export class LocalTable extends Table {
|
||||
valuesSql: Map<string, string> | Record<string, string>;
|
||||
} & Partial<UpdateOptions>),
|
||||
options?: Partial<UpdateOptions>,
|
||||
) {
|
||||
): Promise<UpdateResult> {
|
||||
const isValues =
|
||||
"values" in optsOrUpdates && typeof optsOrUpdates.values !== "string";
|
||||
const isValuesSql =
|
||||
@@ -538,38 +625,54 @@ export class LocalTable extends Table {
|
||||
columns = Object.entries(optsOrUpdates as Record<string, string>);
|
||||
predicate = options?.where;
|
||||
}
|
||||
await this.inner.update(predicate, columns);
|
||||
return await this.inner.update(predicate, columns);
|
||||
}
|
||||
|
||||
async countRows(filter?: string): Promise<number> {
|
||||
return await this.inner.countRows(filter);
|
||||
}
|
||||
|
||||
async delete(predicate: string): Promise<void> {
|
||||
await this.inner.delete(predicate);
|
||||
async delete(predicate: string): Promise<DeleteResult> {
|
||||
return await this.inner.delete(predicate);
|
||||
}
|
||||
|
||||
async createIndex(column: string, options?: Partial<IndexOptions>) {
|
||||
// Bit of a hack to get around the fact that TS has no package-scope.
|
||||
// biome-ignore lint/suspicious/noExplicitAny: skip
|
||||
const nativeIndex = (options?.config as any)?.inner;
|
||||
await this.inner.createIndex(nativeIndex, column, options?.replace);
|
||||
await this.inner.createIndex(
|
||||
nativeIndex,
|
||||
column,
|
||||
options?.replace,
|
||||
options?.waitTimeoutSeconds,
|
||||
);
|
||||
}
|
||||
|
||||
async dropIndex(name: string): Promise<void> {
|
||||
await this.inner.dropIndex(name);
|
||||
}
|
||||
|
||||
async prewarmIndex(name: string): Promise<void> {
|
||||
await this.inner.prewarmIndex(name);
|
||||
}
|
||||
|
||||
async waitForIndex(
|
||||
indexNames: string[],
|
||||
timeoutSeconds: number,
|
||||
): Promise<void> {
|
||||
await this.inner.waitForIndex(indexNames, timeoutSeconds);
|
||||
}
|
||||
|
||||
query(): Query {
|
||||
return new Query(this.inner);
|
||||
}
|
||||
|
||||
search(
|
||||
query: string | IntoVector,
|
||||
query: string | IntoVector | FullTextQuery,
|
||||
queryType: string = "auto",
|
||||
ftsColumns?: string | string[],
|
||||
): VectorQuery | Query {
|
||||
if (typeof query !== "string") {
|
||||
if (typeof query !== "string" && !instanceOfFullTextQuery(query)) {
|
||||
if (queryType === "fts") {
|
||||
throw new Error("Cannot perform full text search on a vector query");
|
||||
}
|
||||
@@ -585,7 +688,10 @@ export class LocalTable extends Table {
|
||||
|
||||
// The query type is auto or vector
|
||||
// fall back to full text search if no embedding functions are defined and the query is a string
|
||||
if (queryType === "auto" && getRegistry().length() === 0) {
|
||||
if (
|
||||
queryType === "auto" &&
|
||||
(getRegistry().length() === 0 || instanceOfFullTextQuery(query))
|
||||
) {
|
||||
return this.query().fullTextSearch(query, {
|
||||
columns: ftsColumns,
|
||||
});
|
||||
@@ -615,11 +721,15 @@ export class LocalTable extends Table {
|
||||
|
||||
// TODO: Support BatchUDF
|
||||
|
||||
async addColumns(newColumnTransforms: AddColumnsSql[]): Promise<void> {
|
||||
await this.inner.addColumns(newColumnTransforms);
|
||||
async addColumns(
|
||||
newColumnTransforms: AddColumnsSql[],
|
||||
): Promise<AddColumnsResult> {
|
||||
return await this.inner.addColumns(newColumnTransforms);
|
||||
}
|
||||
|
||||
async alterColumns(columnAlterations: ColumnAlteration[]): Promise<void> {
|
||||
async alterColumns(
|
||||
columnAlterations: ColumnAlteration[],
|
||||
): Promise<AlterColumnsResult> {
|
||||
const processedAlterations = columnAlterations.map((alteration) => {
|
||||
if (typeof alteration.dataType === "string") {
|
||||
return {
|
||||
@@ -640,19 +750,22 @@ export class LocalTable extends Table {
|
||||
}
|
||||
});
|
||||
|
||||
await this.inner.alterColumns(processedAlterations);
|
||||
return await this.inner.alterColumns(processedAlterations);
|
||||
}
|
||||
|
||||
async dropColumns(columnNames: string[]): Promise<void> {
|
||||
await this.inner.dropColumns(columnNames);
|
||||
async dropColumns(columnNames: string[]): Promise<DropColumnsResult> {
|
||||
return await this.inner.dropColumns(columnNames);
|
||||
}
|
||||
|
||||
async version(): Promise<number> {
|
||||
return await this.inner.version();
|
||||
}
|
||||
|
||||
async checkout(version: number): Promise<void> {
|
||||
await this.inner.checkout(version);
|
||||
async checkout(version: number | string): Promise<void> {
|
||||
if (typeof version === "string") {
|
||||
return this.inner.checkoutTag(version);
|
||||
}
|
||||
return this.inner.checkout(version);
|
||||
}
|
||||
|
||||
async checkoutLatest(): Promise<void> {
|
||||
@@ -671,6 +784,10 @@ export class LocalTable extends Table {
|
||||
await this.inner.restore();
|
||||
}
|
||||
|
||||
async tags(): Promise<Tags> {
|
||||
return await this.inner.tags();
|
||||
}
|
||||
|
||||
async optimize(options?: Partial<OptimizeOptions>): Promise<OptimizeStats> {
|
||||
let cleanupOlderThanMs;
|
||||
if (
|
||||
@@ -701,6 +818,11 @@ export class LocalTable extends Table {
|
||||
}
|
||||
return stats;
|
||||
}
|
||||
|
||||
async stats(): Promise<TableStatistics> {
|
||||
return await this.inner.stats();
|
||||
}
|
||||
|
||||
mergeInsert(on: string | string[]): MergeInsertBuilder {
|
||||
on = Array.isArray(on) ? on : [on];
|
||||
return new MergeInsertBuilder(this.inner.mergeInsert(on), this.schema());
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-darwin-arm64",
|
||||
"version": "0.19.0-beta.0",
|
||||
"version": "0.21.1",
|
||||
"os": ["darwin"],
|
||||
"cpu": ["arm64"],
|
||||
"main": "lancedb.darwin-arm64.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-darwin-x64",
|
||||
"version": "0.19.0-beta.0",
|
||||
"version": "0.21.1",
|
||||
"os": ["darwin"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.darwin-x64.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-arm64-gnu",
|
||||
"version": "0.19.0-beta.0",
|
||||
"version": "0.21.1",
|
||||
"os": ["linux"],
|
||||
"cpu": ["arm64"],
|
||||
"main": "lancedb.linux-arm64-gnu.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-arm64-musl",
|
||||
"version": "0.19.0-beta.0",
|
||||
"version": "0.21.1",
|
||||
"os": ["linux"],
|
||||
"cpu": ["arm64"],
|
||||
"main": "lancedb.linux-arm64-musl.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-x64-gnu",
|
||||
"version": "0.19.0-beta.0",
|
||||
"version": "0.21.1",
|
||||
"os": ["linux"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.linux-x64-gnu.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-x64-musl",
|
||||
"version": "0.19.0-beta.0",
|
||||
"version": "0.21.1",
|
||||
"os": ["linux"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.linux-x64-musl.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-win32-arm64-msvc",
|
||||
"version": "0.19.0-beta.0",
|
||||
"version": "0.21.1",
|
||||
"os": [
|
||||
"win32"
|
||||
],
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-win32-x64-msvc",
|
||||
"version": "0.19.0-beta.0",
|
||||
"version": "0.21.1",
|
||||
"os": ["win32"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.win32-x64-msvc.node",
|
||||
|
||||
252
nodejs/package-lock.json
generated
252
nodejs/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb",
|
||||
"version": "0.19.0-beta.0",
|
||||
"version": "0.21.1",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@lancedb/lancedb",
|
||||
"version": "0.19.0-beta.0",
|
||||
"version": "0.21.1",
|
||||
"cpu": [
|
||||
"x64",
|
||||
"arm64"
|
||||
@@ -2304,89 +2304,20 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@babel/code-frame": {
|
||||
"version": "7.23.5",
|
||||
"resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.23.5.tgz",
|
||||
"integrity": "sha512-CgH3s1a96LipHCmSUmYFPwY7MNx8C3avkq7i4Wl3cfa662ldtUe4VM1TPXX70pfmrlWTb6jLqTYrZyT2ZTJBgA==",
|
||||
"version": "7.26.2",
|
||||
"resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.26.2.tgz",
|
||||
"integrity": "sha512-RJlIHRueQgwWitWgF8OdFYGZX328Ax5BCemNGlqHfplnRT9ESi8JkFlvaVYbS+UubVY6dpv87Fs2u5M29iNFVQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@babel/highlight": "^7.23.4",
|
||||
"chalk": "^2.4.2"
|
||||
"@babel/helper-validator-identifier": "^7.25.9",
|
||||
"js-tokens": "^4.0.0",
|
||||
"picocolors": "^1.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=6.9.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@babel/code-frame/node_modules/ansi-styles": {
|
||||
"version": "3.2.1",
|
||||
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz",
|
||||
"integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"color-convert": "^1.9.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=4"
|
||||
}
|
||||
},
|
||||
"node_modules/@babel/code-frame/node_modules/chalk": {
|
||||
"version": "2.4.2",
|
||||
"resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz",
|
||||
"integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"ansi-styles": "^3.2.1",
|
||||
"escape-string-regexp": "^1.0.5",
|
||||
"supports-color": "^5.3.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=4"
|
||||
}
|
||||
},
|
||||
"node_modules/@babel/code-frame/node_modules/color-convert": {
|
||||
"version": "1.9.3",
|
||||
"resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz",
|
||||
"integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"color-name": "1.1.3"
|
||||
}
|
||||
},
|
||||
"node_modules/@babel/code-frame/node_modules/color-name": {
|
||||
"version": "1.1.3",
|
||||
"resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz",
|
||||
"integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==",
|
||||
"dev": true
|
||||
},
|
||||
"node_modules/@babel/code-frame/node_modules/escape-string-regexp": {
|
||||
"version": "1.0.5",
|
||||
"resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz",
|
||||
"integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==",
|
||||
"dev": true,
|
||||
"engines": {
|
||||
"node": ">=0.8.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@babel/code-frame/node_modules/has-flag": {
|
||||
"version": "3.0.0",
|
||||
"resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz",
|
||||
"integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==",
|
||||
"dev": true,
|
||||
"engines": {
|
||||
"node": ">=4"
|
||||
}
|
||||
},
|
||||
"node_modules/@babel/code-frame/node_modules/supports-color": {
|
||||
"version": "5.5.0",
|
||||
"resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz",
|
||||
"integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"has-flag": "^3.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=4"
|
||||
}
|
||||
},
|
||||
"node_modules/@babel/compat-data": {
|
||||
"version": "7.23.5",
|
||||
"resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.23.5.tgz",
|
||||
@@ -2589,19 +2520,21 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@babel/helper-string-parser": {
|
||||
"version": "7.23.4",
|
||||
"resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.23.4.tgz",
|
||||
"integrity": "sha512-803gmbQdqwdf4olxrX4AJyFBV/RTr3rSmOj0rKwesmzlfhYNDEs+/iOcznzpNWlJlIlTJC2QfPFcHB6DlzdVLQ==",
|
||||
"version": "7.25.9",
|
||||
"resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.25.9.tgz",
|
||||
"integrity": "sha512-4A/SCr/2KLd5jrtOMFzaKjVtAei3+2r/NChoBNoZ3EyP/+GlhoaEGoWOZUmFmoITP7zOJyHIMm+DYRd8o3PvHA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=6.9.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@babel/helper-validator-identifier": {
|
||||
"version": "7.22.20",
|
||||
"resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.20.tgz",
|
||||
"integrity": "sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A==",
|
||||
"version": "7.25.9",
|
||||
"resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.25.9.tgz",
|
||||
"integrity": "sha512-Ed61U6XJc3CVRfkERJWDz4dJwKe7iLmmJsbOGu9wSloNSFttHV0I8g6UAgb7qnK5ly5bGLPd4oXZlxCdANBOWQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=6.9.0"
|
||||
}
|
||||
@@ -2616,109 +2549,28 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@babel/helpers": {
|
||||
"version": "7.23.8",
|
||||
"resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.23.8.tgz",
|
||||
"integrity": "sha512-KDqYz4PiOWvDFrdHLPhKtCThtIcKVy6avWD2oG4GEvyQ+XDZwHD4YQd+H2vNMnq2rkdxsDkU82T+Vk8U/WXHRQ==",
|
||||
"version": "7.27.0",
|
||||
"resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.27.0.tgz",
|
||||
"integrity": "sha512-U5eyP/CTFPuNE3qk+WZMxFkp/4zUzdceQlfzf7DdGdhp+Fezd7HD+i8Y24ZuTMKX3wQBld449jijbGq6OdGNQg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@babel/template": "^7.22.15",
|
||||
"@babel/traverse": "^7.23.7",
|
||||
"@babel/types": "^7.23.6"
|
||||
"@babel/template": "^7.27.0",
|
||||
"@babel/types": "^7.27.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=6.9.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@babel/highlight": {
|
||||
"version": "7.23.4",
|
||||
"resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.23.4.tgz",
|
||||
"integrity": "sha512-acGdbYSfp2WheJoJm/EBBBLh/ID8KDc64ISZ9DYtBmC8/Q204PZJLHyzeB5qMzJ5trcOkybd78M4x2KWsUq++A==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"@babel/helper-validator-identifier": "^7.22.20",
|
||||
"chalk": "^2.4.2",
|
||||
"js-tokens": "^4.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=6.9.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@babel/highlight/node_modules/ansi-styles": {
|
||||
"version": "3.2.1",
|
||||
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz",
|
||||
"integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"color-convert": "^1.9.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=4"
|
||||
}
|
||||
},
|
||||
"node_modules/@babel/highlight/node_modules/chalk": {
|
||||
"version": "2.4.2",
|
||||
"resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz",
|
||||
"integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"ansi-styles": "^3.2.1",
|
||||
"escape-string-regexp": "^1.0.5",
|
||||
"supports-color": "^5.3.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=4"
|
||||
}
|
||||
},
|
||||
"node_modules/@babel/highlight/node_modules/color-convert": {
|
||||
"version": "1.9.3",
|
||||
"resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz",
|
||||
"integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"color-name": "1.1.3"
|
||||
}
|
||||
},
|
||||
"node_modules/@babel/highlight/node_modules/color-name": {
|
||||
"version": "1.1.3",
|
||||
"resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz",
|
||||
"integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==",
|
||||
"dev": true
|
||||
},
|
||||
"node_modules/@babel/highlight/node_modules/escape-string-regexp": {
|
||||
"version": "1.0.5",
|
||||
"resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz",
|
||||
"integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==",
|
||||
"dev": true,
|
||||
"engines": {
|
||||
"node": ">=0.8.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@babel/highlight/node_modules/has-flag": {
|
||||
"version": "3.0.0",
|
||||
"resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz",
|
||||
"integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==",
|
||||
"dev": true,
|
||||
"engines": {
|
||||
"node": ">=4"
|
||||
}
|
||||
},
|
||||
"node_modules/@babel/highlight/node_modules/supports-color": {
|
||||
"version": "5.5.0",
|
||||
"resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz",
|
||||
"integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"has-flag": "^3.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=4"
|
||||
}
|
||||
},
|
||||
"node_modules/@babel/parser": {
|
||||
"version": "7.23.6",
|
||||
"resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.23.6.tgz",
|
||||
"integrity": "sha512-Z2uID7YJ7oNvAI20O9X0bblw7Qqs8Q2hFy0R9tAfnfLkp5MW0UH9eUvnDSnFwKZ0AvgS1ucqR4KzvVHgnke1VQ==",
|
||||
"version": "7.27.0",
|
||||
"resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.27.0.tgz",
|
||||
"integrity": "sha512-iaepho73/2Pz7w2eMS0Q5f83+0RKI7i4xmiYeBmDzfRVbQtTOG7Ts0S4HzJVsTMGI9keU8rNfuZr8DKfSt7Yyg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@babel/types": "^7.27.0"
|
||||
},
|
||||
"bin": {
|
||||
"parser": "bin/babel-parser.js"
|
||||
},
|
||||
@@ -2904,14 +2756,15 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@babel/template": {
|
||||
"version": "7.22.15",
|
||||
"resolved": "https://registry.npmjs.org/@babel/template/-/template-7.22.15.tgz",
|
||||
"integrity": "sha512-QPErUVm4uyJa60rkI73qneDacvdvzxshT3kksGqlGWYdOTIUOwJ7RDUL8sGqslY1uXWSL6xMFKEXDS3ox2uF0w==",
|
||||
"version": "7.27.0",
|
||||
"resolved": "https://registry.npmjs.org/@babel/template/-/template-7.27.0.tgz",
|
||||
"integrity": "sha512-2ncevenBqXI6qRMukPlXwHKHchC7RyMuu4xv5JBXRfOGVcTy1mXCD12qrp7Jsoxll1EV3+9sE4GugBVRjT2jFA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@babel/code-frame": "^7.22.13",
|
||||
"@babel/parser": "^7.22.15",
|
||||
"@babel/types": "^7.22.15"
|
||||
"@babel/code-frame": "^7.26.2",
|
||||
"@babel/parser": "^7.27.0",
|
||||
"@babel/types": "^7.27.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=6.9.0"
|
||||
@@ -2948,14 +2801,14 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@babel/types": {
|
||||
"version": "7.23.6",
|
||||
"resolved": "https://registry.npmjs.org/@babel/types/-/types-7.23.6.tgz",
|
||||
"integrity": "sha512-+uarb83brBzPKN38NX1MkB6vb6+mwvR6amUulqAE7ccQw1pEl+bCia9TbdG1lsnFP7lZySvUn37CHyXQdfTwzg==",
|
||||
"version": "7.27.0",
|
||||
"resolved": "https://registry.npmjs.org/@babel/types/-/types-7.27.0.tgz",
|
||||
"integrity": "sha512-H45s8fVLYjbhFH62dIJ3WtmJ6RSPt/3DRO0ZcT2SUiYiQyz3BLVb9ADEnLl91m74aQPS3AzzeajZHYOalWe3bg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@babel/helper-string-parser": "^7.23.4",
|
||||
"@babel/helper-validator-identifier": "^7.22.20",
|
||||
"to-fast-properties": "^2.0.0"
|
||||
"@babel/helper-string-parser": "^7.25.9",
|
||||
"@babel/helper-validator-identifier": "^7.25.9"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=6.9.0"
|
||||
@@ -5550,10 +5403,11 @@
|
||||
"devOptional": true
|
||||
},
|
||||
"node_modules/axios": {
|
||||
"version": "1.7.7",
|
||||
"resolved": "https://registry.npmjs.org/axios/-/axios-1.7.7.tgz",
|
||||
"integrity": "sha512-S4kL7XrjgBmvdGut0sN3yJxqYzrDOnivkBiN0OFs6hLiUam3UPvswUo0kqGyhqUZGEOytHyumEdXsAkgCOUf3Q==",
|
||||
"version": "1.8.4",
|
||||
"resolved": "https://registry.npmjs.org/axios/-/axios-1.8.4.tgz",
|
||||
"integrity": "sha512-eBSYY4Y68NNlHbHBMdeDmKNtDgXWhQsJcGqzO3iLUM0GraQFSS9cVgPX5I9b3lbdFKyYoAEGAZF1DwhTaljNAw==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"follow-redirects": "^1.15.6",
|
||||
"form-data": "^4.0.0",
|
||||
@@ -7869,7 +7723,8 @@
|
||||
"version": "4.0.0",
|
||||
"resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz",
|
||||
"integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==",
|
||||
"dev": true
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/js-yaml": {
|
||||
"version": "3.14.1",
|
||||
@@ -9360,15 +9215,6 @@
|
||||
"integrity": "sha512-3f0uOEAQwIqGuWW2MVzYg8fV/QNnc/IpuJNG837rLuczAaLVHslWHZQj4IGiEl5Hs3kkbhwL9Ab7Hrsmuj+Smw==",
|
||||
"dev": true
|
||||
},
|
||||
"node_modules/to-fast-properties": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz",
|
||||
"integrity": "sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==",
|
||||
"dev": true,
|
||||
"engines": {
|
||||
"node": ">=4"
|
||||
}
|
||||
},
|
||||
"node_modules/to-regex-range": {
|
||||
"version": "5.0.1",
|
||||
"resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz",
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
"ann"
|
||||
],
|
||||
"private": false,
|
||||
"version": "0.19.0-beta.0",
|
||||
"version": "0.21.1",
|
||||
"main": "dist/index.js",
|
||||
"exports": {
|
||||
".": "./dist/index.js",
|
||||
@@ -29,6 +29,7 @@
|
||||
"aarch64-apple-darwin",
|
||||
"x86_64-unknown-linux-gnu",
|
||||
"aarch64-unknown-linux-gnu",
|
||||
"x86_64-unknown-linux-musl",
|
||||
"aarch64-unknown-linux-musl",
|
||||
"x86_64-pc-windows-msvc",
|
||||
"aarch64-pc-windows-msvc"
|
||||
|
||||
@@ -48,16 +48,8 @@ impl Connection {
|
||||
pub async fn new(uri: String, options: ConnectionOptions) -> napi::Result<Self> {
|
||||
let mut builder = ConnectBuilder::new(&uri);
|
||||
if let Some(interval) = options.read_consistency_interval {
|
||||
match interval {
|
||||
Either::A(seconds) => {
|
||||
builder = builder.read_consistency_interval(Some(
|
||||
std::time::Duration::from_secs_f64(seconds),
|
||||
));
|
||||
}
|
||||
Either::B(_) => {
|
||||
builder = builder.read_consistency_interval(None);
|
||||
}
|
||||
}
|
||||
builder =
|
||||
builder.read_consistency_interval(std::time::Duration::from_secs_f64(interval));
|
||||
}
|
||||
if let Some(storage_options) = options.storage_options {
|
||||
for (key, value) in storage_options {
|
||||
|
||||
@@ -123,34 +123,44 @@ impl Index {
|
||||
stem: Option<bool>,
|
||||
remove_stop_words: Option<bool>,
|
||||
ascii_folding: Option<bool>,
|
||||
ngram_min_length: Option<u32>,
|
||||
ngram_max_length: Option<u32>,
|
||||
prefix_only: Option<bool>,
|
||||
) -> Self {
|
||||
let mut opts = FtsIndexBuilder::default();
|
||||
let mut tokenizer_configs = opts.tokenizer_configs.clone();
|
||||
if let Some(with_position) = with_position {
|
||||
opts = opts.with_position(with_position);
|
||||
}
|
||||
if let Some(base_tokenizer) = base_tokenizer {
|
||||
tokenizer_configs = tokenizer_configs.base_tokenizer(base_tokenizer);
|
||||
opts = opts.base_tokenizer(base_tokenizer);
|
||||
}
|
||||
if let Some(language) = language {
|
||||
tokenizer_configs = tokenizer_configs.language(&language).unwrap();
|
||||
opts = opts.language(&language).unwrap();
|
||||
}
|
||||
if let Some(max_token_length) = max_token_length {
|
||||
tokenizer_configs = tokenizer_configs.max_token_length(Some(max_token_length as usize));
|
||||
opts = opts.max_token_length(Some(max_token_length as usize));
|
||||
}
|
||||
if let Some(lower_case) = lower_case {
|
||||
tokenizer_configs = tokenizer_configs.lower_case(lower_case);
|
||||
opts = opts.lower_case(lower_case);
|
||||
}
|
||||
if let Some(stem) = stem {
|
||||
tokenizer_configs = tokenizer_configs.stem(stem);
|
||||
opts = opts.stem(stem);
|
||||
}
|
||||
if let Some(remove_stop_words) = remove_stop_words {
|
||||
tokenizer_configs = tokenizer_configs.remove_stop_words(remove_stop_words);
|
||||
opts = opts.remove_stop_words(remove_stop_words);
|
||||
}
|
||||
if let Some(ascii_folding) = ascii_folding {
|
||||
tokenizer_configs = tokenizer_configs.ascii_folding(ascii_folding);
|
||||
opts = opts.ascii_folding(ascii_folding);
|
||||
}
|
||||
if let Some(ngram_min_length) = ngram_min_length {
|
||||
opts = opts.ngram_min_length(ngram_min_length);
|
||||
}
|
||||
if let Some(ngram_max_length) = ngram_max_length {
|
||||
opts = opts.ngram_max_length(ngram_max_length);
|
||||
}
|
||||
if let Some(prefix_only) = prefix_only {
|
||||
opts = opts.ngram_prefix_only(prefix_only);
|
||||
}
|
||||
opts.tokenizer_configs = tokenizer_configs;
|
||||
|
||||
Self {
|
||||
inner: Mutex::new(Some(LanceDbIndex::FTS(opts))),
|
||||
|
||||
@@ -4,7 +4,6 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use env_logger::Env;
|
||||
use napi::{bindgen_prelude::Null, Either};
|
||||
use napi_derive::*;
|
||||
|
||||
mod connection;
|
||||
@@ -19,6 +18,7 @@ mod table;
|
||||
mod util;
|
||||
|
||||
#[napi(object)]
|
||||
#[derive(Debug)]
|
||||
pub struct ConnectionOptions {
|
||||
/// (For LanceDB OSS only): The interval, in seconds, at which to check for
|
||||
/// updates to the table from other processes. If None, then consistency is not
|
||||
@@ -29,7 +29,7 @@ pub struct ConnectionOptions {
|
||||
/// has passed since the last check, then the table will be checked for updates.
|
||||
/// Note: this consistency only applies to read operations. Write operations are
|
||||
/// always consistent.
|
||||
pub read_consistency_interval: Option<Either<f64, Null>>,
|
||||
pub read_consistency_interval: Option<f64>,
|
||||
/// (For LanceDB OSS only): configuration for object storage.
|
||||
///
|
||||
/// The available options are described at https://lancedb.github.io/lancedb/guides/storage/
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
use std::time::Duration;
|
||||
|
||||
use lancedb::{arrow::IntoArrow, ipc::ipc_file_to_batches, table::merge::MergeInsertBuilder};
|
||||
use napi::bindgen_prelude::*;
|
||||
use napi_derive::napi;
|
||||
|
||||
use crate::error::convert_error;
|
||||
use crate::{error::convert_error, table::MergeResult};
|
||||
|
||||
#[napi]
|
||||
#[derive(Clone)]
|
||||
@@ -36,8 +38,13 @@ impl NativeMergeInsertBuilder {
|
||||
this
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn set_timeout(&mut self, timeout: u32) {
|
||||
self.inner.timeout(Duration::from_millis(timeout as u64));
|
||||
}
|
||||
|
||||
#[napi(catch_unwind)]
|
||||
pub async fn execute(&self, buf: Buffer) -> napi::Result<()> {
|
||||
pub async fn execute(&self, buf: Buffer) -> napi::Result<MergeResult> {
|
||||
let data = ipc_file_to_batches(buf.to_vec())
|
||||
.and_then(IntoArrow::into_arrow)
|
||||
.map_err(|e| {
|
||||
@@ -46,12 +53,13 @@ impl NativeMergeInsertBuilder {
|
||||
|
||||
let this = self.clone();
|
||||
|
||||
this.inner.execute(data).await.map_err(|e| {
|
||||
let res = this.inner.execute(data).await.map_err(|e| {
|
||||
napi::Error::from_reason(format!(
|
||||
"Failed to execute merge insert: {}",
|
||||
convert_error(&e)
|
||||
))
|
||||
})
|
||||
})?;
|
||||
Ok(res.into())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -3,7 +3,10 @@
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use lancedb::index::scalar::{FtsQuery, FullTextSearchQuery, MatchQuery, PhraseQuery};
|
||||
use lancedb::index::scalar::{
|
||||
BooleanQuery, BoostQuery, FtsQuery, FullTextSearchQuery, MatchQuery, MultiMatchQuery, Occur,
|
||||
Operator, PhraseQuery,
|
||||
};
|
||||
use lancedb::query::ExecutableQuery;
|
||||
use lancedb::query::Query as LanceDbQuery;
|
||||
use lancedb::query::QueryBase;
|
||||
@@ -18,7 +21,7 @@ use crate::error::NapiErrorExt;
|
||||
use crate::iterator::RecordBatchIterator;
|
||||
use crate::rerankers::Reranker;
|
||||
use crate::rerankers::RerankerCallbacks;
|
||||
use crate::util::{parse_distance_type, parse_fts_query};
|
||||
use crate::util::parse_distance_type;
|
||||
|
||||
#[napi]
|
||||
pub struct Query {
|
||||
@@ -38,51 +41,8 @@ impl Query {
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn full_text_search(&mut self, query: napi::JsUnknown) -> napi::Result<()> {
|
||||
let query = unsafe { query.cast::<napi::JsObject>() };
|
||||
let query = if let Some(query_text) = query.get::<_, String>("query").transpose() {
|
||||
let mut query_text = query_text?;
|
||||
let columns = query.get::<_, Option<Vec<String>>>("columns")?.flatten();
|
||||
|
||||
let is_phrase =
|
||||
query_text.len() >= 2 && query_text.starts_with('"') && query_text.ends_with('"');
|
||||
let is_multi_match = columns.as_ref().map(|cols| cols.len() > 1).unwrap_or(false);
|
||||
|
||||
if is_phrase {
|
||||
// Remove the surrounding quotes for phrase queries
|
||||
query_text = query_text[1..query_text.len() - 1].to_string();
|
||||
}
|
||||
|
||||
let query: FtsQuery = match (is_phrase, is_multi_match) {
|
||||
(false, _) => MatchQuery::new(query_text).into(),
|
||||
(true, false) => PhraseQuery::new(query_text).into(),
|
||||
(true, true) => {
|
||||
return Err(napi::Error::from_reason(
|
||||
"Phrase queries cannot be used with multiple columns.",
|
||||
));
|
||||
}
|
||||
};
|
||||
let mut query = FullTextSearchQuery::new_query(query);
|
||||
if let Some(cols) = columns {
|
||||
if !cols.is_empty() {
|
||||
query = query.with_columns(&cols).map_err(|e| {
|
||||
napi::Error::from_reason(format!(
|
||||
"Failed to set full text search columns: {}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
}
|
||||
}
|
||||
query
|
||||
} else if let Some(query) = query.get::<_, napi::JsObject>("query")? {
|
||||
let query = parse_fts_query(&query)?;
|
||||
FullTextSearchQuery::new_query(query)
|
||||
} else {
|
||||
return Err(napi::Error::from_reason(
|
||||
"Invalid full text search query object".to_string(),
|
||||
));
|
||||
};
|
||||
|
||||
pub fn full_text_search(&mut self, query: napi::JsObject) -> napi::Result<()> {
|
||||
let query = parse_fts_query(query)?;
|
||||
self.inner = self.inner.clone().full_text_search(query);
|
||||
Ok(())
|
||||
}
|
||||
@@ -131,11 +91,15 @@ impl Query {
|
||||
pub async fn execute(
|
||||
&self,
|
||||
max_batch_length: Option<u32>,
|
||||
timeout_ms: Option<u32>,
|
||||
) -> napi::Result<RecordBatchIterator> {
|
||||
let mut execution_opts = QueryExecutionOptions::default();
|
||||
if let Some(max_batch_length) = max_batch_length {
|
||||
execution_opts.max_batch_length = max_batch_length;
|
||||
}
|
||||
if let Some(timeout_ms) = timeout_ms {
|
||||
execution_opts.timeout = Some(std::time::Duration::from_millis(timeout_ms as u64))
|
||||
}
|
||||
let inner_stream = self
|
||||
.inner
|
||||
.execute_with_options(execution_opts)
|
||||
@@ -214,6 +178,31 @@ impl VectorQuery {
|
||||
self.inner = self.inner.clone().nprobes(nprobe as usize);
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn minimum_nprobes(&mut self, minimum_nprobe: u32) -> napi::Result<()> {
|
||||
self.inner = self
|
||||
.inner
|
||||
.clone()
|
||||
.minimum_nprobes(minimum_nprobe as usize)
|
||||
.default_error()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn maximum_nprobes(&mut self, maximum_nprobes: u32) -> napi::Result<()> {
|
||||
let maximum_nprobes = if maximum_nprobes == 0 {
|
||||
None
|
||||
} else {
|
||||
Some(maximum_nprobes as usize)
|
||||
};
|
||||
self.inner = self
|
||||
.inner
|
||||
.clone()
|
||||
.maximum_nprobes(maximum_nprobes)
|
||||
.default_error()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn distance_range(&mut self, lower_bound: Option<f64>, upper_bound: Option<f64>) {
|
||||
// napi doesn't support f32, so we have to convert to f32
|
||||
@@ -239,51 +228,8 @@ impl VectorQuery {
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn full_text_search(&mut self, query: napi::JsUnknown) -> napi::Result<()> {
|
||||
let query = unsafe { query.cast::<napi::JsObject>() };
|
||||
let query = if let Some(query_text) = query.get::<_, String>("query").transpose() {
|
||||
let mut query_text = query_text?;
|
||||
let columns = query.get::<_, Option<Vec<String>>>("columns")?.flatten();
|
||||
|
||||
let is_phrase =
|
||||
query_text.len() >= 2 && query_text.starts_with('"') && query_text.ends_with('"');
|
||||
let is_multi_match = columns.as_ref().map(|cols| cols.len() > 1).unwrap_or(false);
|
||||
|
||||
if is_phrase {
|
||||
// Remove the surrounding quotes for phrase queries
|
||||
query_text = query_text[1..query_text.len() - 1].to_string();
|
||||
}
|
||||
|
||||
let query: FtsQuery = match (is_phrase, is_multi_match) {
|
||||
(false, _) => MatchQuery::new(query_text).into(),
|
||||
(true, false) => PhraseQuery::new(query_text).into(),
|
||||
(true, true) => {
|
||||
return Err(napi::Error::from_reason(
|
||||
"Phrase queries cannot be used with multiple columns.",
|
||||
));
|
||||
}
|
||||
};
|
||||
let mut query = FullTextSearchQuery::new_query(query);
|
||||
if let Some(cols) = columns {
|
||||
if !cols.is_empty() {
|
||||
query = query.with_columns(&cols).map_err(|e| {
|
||||
napi::Error::from_reason(format!(
|
||||
"Failed to set full text search columns: {}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
}
|
||||
}
|
||||
query
|
||||
} else if let Some(query) = query.get::<_, napi::JsObject>("query")? {
|
||||
let query = parse_fts_query(&query)?;
|
||||
FullTextSearchQuery::new_query(query)
|
||||
} else {
|
||||
return Err(napi::Error::from_reason(
|
||||
"Invalid full text search query object".to_string(),
|
||||
));
|
||||
};
|
||||
|
||||
pub fn full_text_search(&mut self, query: napi::JsObject) -> napi::Result<()> {
|
||||
let query = parse_fts_query(query)?;
|
||||
self.inner = self.inner.clone().full_text_search(query);
|
||||
Ok(())
|
||||
}
|
||||
@@ -330,11 +276,15 @@ impl VectorQuery {
|
||||
pub async fn execute(
|
||||
&self,
|
||||
max_batch_length: Option<u32>,
|
||||
timeout_ms: Option<u32>,
|
||||
) -> napi::Result<RecordBatchIterator> {
|
||||
let mut execution_opts = QueryExecutionOptions::default();
|
||||
if let Some(max_batch_length) = max_batch_length {
|
||||
execution_opts.max_batch_length = max_batch_length;
|
||||
}
|
||||
if let Some(timeout_ms) = timeout_ms {
|
||||
execution_opts.timeout = Some(std::time::Duration::from_millis(timeout_ms as u64))
|
||||
}
|
||||
let inner_stream = self
|
||||
.inner
|
||||
.execute_with_options(execution_opts)
|
||||
@@ -368,3 +318,158 @@ impl VectorQuery {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[napi]
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct JsFullTextQuery {
|
||||
pub(crate) inner: FtsQuery,
|
||||
}
|
||||
|
||||
#[napi]
|
||||
impl JsFullTextQuery {
|
||||
#[napi(factory)]
|
||||
pub fn match_query(
|
||||
query: String,
|
||||
column: String,
|
||||
boost: f64,
|
||||
fuzziness: Option<u32>,
|
||||
max_expansions: u32,
|
||||
operator: String,
|
||||
prefix_length: u32,
|
||||
) -> napi::Result<Self> {
|
||||
Ok(Self {
|
||||
inner: MatchQuery::new(query)
|
||||
.with_column(Some(column))
|
||||
.with_boost(boost as f32)
|
||||
.with_fuzziness(fuzziness)
|
||||
.with_max_expansions(max_expansions as usize)
|
||||
.with_operator(
|
||||
Operator::try_from(operator.as_str()).map_err(|e| {
|
||||
napi::Error::from_reason(format!("Invalid operator: {}", e))
|
||||
})?,
|
||||
)
|
||||
.with_prefix_length(prefix_length)
|
||||
.into(),
|
||||
})
|
||||
}
|
||||
|
||||
#[napi(factory)]
|
||||
pub fn phrase_query(query: String, column: String, slop: u32) -> napi::Result<Self> {
|
||||
Ok(Self {
|
||||
inner: PhraseQuery::new(query)
|
||||
.with_column(Some(column))
|
||||
.with_slop(slop)
|
||||
.into(),
|
||||
})
|
||||
}
|
||||
|
||||
#[napi(factory)]
|
||||
#[allow(clippy::use_self)] // NAPI doesn't allow Self here but clippy reports it
|
||||
pub fn boost_query(
|
||||
positive: &JsFullTextQuery,
|
||||
negative: &JsFullTextQuery,
|
||||
negative_boost: Option<f64>,
|
||||
) -> napi::Result<Self> {
|
||||
Ok(Self {
|
||||
inner: BoostQuery::new(
|
||||
positive.inner.clone(),
|
||||
negative.inner.clone(),
|
||||
negative_boost.map(|v| v as f32),
|
||||
)
|
||||
.into(),
|
||||
})
|
||||
}
|
||||
|
||||
#[napi(factory)]
|
||||
pub fn multi_match_query(
|
||||
query: String,
|
||||
columns: Vec<String>,
|
||||
boosts: Option<Vec<f64>>,
|
||||
operator: String,
|
||||
) -> napi::Result<Self> {
|
||||
let q = match boosts {
|
||||
Some(boosts) => MultiMatchQuery::try_new(query, columns)
|
||||
.and_then(|q| q.try_with_boosts(boosts.into_iter().map(|v| v as f32).collect())),
|
||||
None => MultiMatchQuery::try_new(query, columns),
|
||||
}
|
||||
.map_err(|e| {
|
||||
napi::Error::from_reason(format!("Failed to create multi match query: {}", e))
|
||||
})?;
|
||||
|
||||
let operator = Operator::try_from(operator.as_str()).map_err(|e| {
|
||||
napi::Error::from_reason(format!("Invalid operator for multi match query: {}", e))
|
||||
})?;
|
||||
|
||||
Ok(Self {
|
||||
inner: q.with_operator(operator).into(),
|
||||
})
|
||||
}
|
||||
|
||||
#[napi(factory)]
|
||||
pub fn boolean_query(queries: Vec<(String, &JsFullTextQuery)>) -> napi::Result<Self> {
|
||||
let mut sub_queries = Vec::with_capacity(queries.len());
|
||||
for (occur, q) in queries {
|
||||
let occur = Occur::try_from(occur.as_str())
|
||||
.map_err(|e| napi::Error::from_reason(e.to_string()))?;
|
||||
sub_queries.push((occur, q.inner.clone()));
|
||||
}
|
||||
Ok(Self {
|
||||
inner: BooleanQuery::new(sub_queries).into(),
|
||||
})
|
||||
}
|
||||
|
||||
#[napi(getter)]
|
||||
pub fn query_type(&self) -> String {
|
||||
match self.inner {
|
||||
FtsQuery::Match(_) => "match".to_string(),
|
||||
FtsQuery::Phrase(_) => "phrase".to_string(),
|
||||
FtsQuery::Boost(_) => "boost".to_string(),
|
||||
FtsQuery::MultiMatch(_) => "multi_match".to_string(),
|
||||
FtsQuery::Boolean(_) => "boolean".to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_fts_query(query: napi::JsObject) -> napi::Result<FullTextSearchQuery> {
|
||||
if let Ok(Some(query)) = query.get::<_, &JsFullTextQuery>("query") {
|
||||
Ok(FullTextSearchQuery::new_query(query.inner.clone()))
|
||||
} else if let Ok(Some(query_text)) = query.get::<_, String>("query") {
|
||||
let mut query_text = query_text;
|
||||
let columns = query.get::<_, Option<Vec<String>>>("columns")?.flatten();
|
||||
|
||||
let is_phrase =
|
||||
query_text.len() >= 2 && query_text.starts_with('"') && query_text.ends_with('"');
|
||||
let is_multi_match = columns.as_ref().map(|cols| cols.len() > 1).unwrap_or(false);
|
||||
|
||||
if is_phrase {
|
||||
// Remove the surrounding quotes for phrase queries
|
||||
query_text = query_text[1..query_text.len() - 1].to_string();
|
||||
}
|
||||
|
||||
let query: FtsQuery = match (is_phrase, is_multi_match) {
|
||||
(false, _) => MatchQuery::new(query_text).into(),
|
||||
(true, false) => PhraseQuery::new(query_text).into(),
|
||||
(true, true) => {
|
||||
return Err(napi::Error::from_reason(
|
||||
"Phrase queries cannot be used with multiple columns.",
|
||||
));
|
||||
}
|
||||
};
|
||||
let mut query = FullTextSearchQuery::new_query(query);
|
||||
if let Some(cols) = columns {
|
||||
if !cols.is_empty() {
|
||||
query = query.with_columns(&cols).map_err(|e| {
|
||||
napi::Error::from_reason(format!(
|
||||
"Failed to set full text search columns: {}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
}
|
||||
}
|
||||
Ok(query)
|
||||
} else {
|
||||
Err(napi::Error::from_reason(
|
||||
"Invalid full text search query object".to_string(),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -75,7 +75,7 @@ impl Table {
|
||||
}
|
||||
|
||||
#[napi(catch_unwind)]
|
||||
pub async fn add(&self, buf: Buffer, mode: String) -> napi::Result<()> {
|
||||
pub async fn add(&self, buf: Buffer, mode: String) -> napi::Result<AddResult> {
|
||||
let batches = ipc_file_to_batches(buf.to_vec())
|
||||
.map_err(|e| napi::Error::from_reason(format!("Failed to read IPC file: {}", e)))?;
|
||||
let mut op = self.inner_ref()?.add(batches);
|
||||
@@ -88,7 +88,8 @@ impl Table {
|
||||
return Err(napi::Error::from_reason(format!("Invalid mode: {}", mode)));
|
||||
};
|
||||
|
||||
op.execute().await.default_error()
|
||||
let res = op.execute().await.default_error()?;
|
||||
Ok(res.into())
|
||||
}
|
||||
|
||||
#[napi(catch_unwind)]
|
||||
@@ -101,8 +102,9 @@ impl Table {
|
||||
}
|
||||
|
||||
#[napi(catch_unwind)]
|
||||
pub async fn delete(&self, predicate: String) -> napi::Result<()> {
|
||||
self.inner_ref()?.delete(&predicate).await.default_error()
|
||||
pub async fn delete(&self, predicate: String) -> napi::Result<DeleteResult> {
|
||||
let res = self.inner_ref()?.delete(&predicate).await.default_error()?;
|
||||
Ok(res.into())
|
||||
}
|
||||
|
||||
#[napi(catch_unwind)]
|
||||
@@ -111,6 +113,7 @@ impl Table {
|
||||
index: Option<&Index>,
|
||||
column: String,
|
||||
replace: Option<bool>,
|
||||
wait_timeout_s: Option<i64>,
|
||||
) -> napi::Result<()> {
|
||||
let lancedb_index = if let Some(index) = index {
|
||||
index.consume()?
|
||||
@@ -121,6 +124,10 @@ impl Table {
|
||||
if let Some(replace) = replace {
|
||||
builder = builder.replace(replace);
|
||||
}
|
||||
if let Some(timeout) = wait_timeout_s {
|
||||
builder =
|
||||
builder.wait_timeout(std::time::Duration::from_secs(timeout.try_into().unwrap()));
|
||||
}
|
||||
builder.execute().await.default_error()
|
||||
}
|
||||
|
||||
@@ -132,12 +139,38 @@ impl Table {
|
||||
.default_error()
|
||||
}
|
||||
|
||||
#[napi(catch_unwind)]
|
||||
pub async fn prewarm_index(&self, index_name: String) -> napi::Result<()> {
|
||||
self.inner_ref()?
|
||||
.prewarm_index(&index_name)
|
||||
.await
|
||||
.default_error()
|
||||
}
|
||||
|
||||
#[napi(catch_unwind)]
|
||||
pub async fn wait_for_index(&self, index_names: Vec<String>, timeout_s: i64) -> Result<()> {
|
||||
let timeout = std::time::Duration::from_secs(timeout_s.try_into().unwrap());
|
||||
let index_names: Vec<&str> = index_names.iter().map(|s| s.as_str()).collect();
|
||||
let slice: &[&str] = &index_names;
|
||||
|
||||
self.inner_ref()?
|
||||
.wait_for_index(slice, timeout)
|
||||
.await
|
||||
.default_error()
|
||||
}
|
||||
|
||||
#[napi(catch_unwind)]
|
||||
pub async fn stats(&self) -> Result<TableStatistics> {
|
||||
let stats = self.inner_ref()?.stats().await.default_error()?;
|
||||
Ok(stats.into())
|
||||
}
|
||||
|
||||
#[napi(catch_unwind)]
|
||||
pub async fn update(
|
||||
&self,
|
||||
only_if: Option<String>,
|
||||
columns: Vec<(String, String)>,
|
||||
) -> napi::Result<u64> {
|
||||
) -> napi::Result<UpdateResult> {
|
||||
let mut op = self.inner_ref()?.update();
|
||||
if let Some(only_if) = only_if {
|
||||
op = op.only_if(only_if);
|
||||
@@ -145,7 +178,8 @@ impl Table {
|
||||
for (column_name, value) in columns {
|
||||
op = op.column(column_name, value);
|
||||
}
|
||||
op.execute().await.default_error()
|
||||
let res = op.execute().await.default_error()?;
|
||||
Ok(res.into())
|
||||
}
|
||||
|
||||
#[napi(catch_unwind)]
|
||||
@@ -159,21 +193,28 @@ impl Table {
|
||||
}
|
||||
|
||||
#[napi(catch_unwind)]
|
||||
pub async fn add_columns(&self, transforms: Vec<AddColumnsSql>) -> napi::Result<()> {
|
||||
pub async fn add_columns(
|
||||
&self,
|
||||
transforms: Vec<AddColumnsSql>,
|
||||
) -> napi::Result<AddColumnsResult> {
|
||||
let transforms = transforms
|
||||
.into_iter()
|
||||
.map(|sql| (sql.name, sql.value_sql))
|
||||
.collect::<Vec<_>>();
|
||||
let transforms = NewColumnTransform::SqlExpressions(transforms);
|
||||
self.inner_ref()?
|
||||
let res = self
|
||||
.inner_ref()?
|
||||
.add_columns(transforms, None)
|
||||
.await
|
||||
.default_error()?;
|
||||
Ok(())
|
||||
Ok(res.into())
|
||||
}
|
||||
|
||||
#[napi(catch_unwind)]
|
||||
pub async fn alter_columns(&self, alterations: Vec<ColumnAlteration>) -> napi::Result<()> {
|
||||
pub async fn alter_columns(
|
||||
&self,
|
||||
alterations: Vec<ColumnAlteration>,
|
||||
) -> napi::Result<AlterColumnsResult> {
|
||||
for alteration in &alterations {
|
||||
if alteration.rename.is_none()
|
||||
&& alteration.nullable.is_none()
|
||||
@@ -190,21 +231,23 @@ impl Table {
|
||||
.collect::<std::result::Result<Vec<_>, String>>()
|
||||
.map_err(napi::Error::from_reason)?;
|
||||
|
||||
self.inner_ref()?
|
||||
let res = self
|
||||
.inner_ref()?
|
||||
.alter_columns(&alterations)
|
||||
.await
|
||||
.default_error()?;
|
||||
Ok(())
|
||||
Ok(res.into())
|
||||
}
|
||||
|
||||
#[napi(catch_unwind)]
|
||||
pub async fn drop_columns(&self, columns: Vec<String>) -> napi::Result<()> {
|
||||
pub async fn drop_columns(&self, columns: Vec<String>) -> napi::Result<DropColumnsResult> {
|
||||
let col_refs = columns.iter().map(String::as_str).collect::<Vec<_>>();
|
||||
self.inner_ref()?
|
||||
let res = self
|
||||
.inner_ref()?
|
||||
.drop_columns(&col_refs)
|
||||
.await
|
||||
.default_error()?;
|
||||
Ok(())
|
||||
Ok(res.into())
|
||||
}
|
||||
|
||||
#[napi(catch_unwind)]
|
||||
@@ -224,6 +267,14 @@ impl Table {
|
||||
.default_error()
|
||||
}
|
||||
|
||||
#[napi(catch_unwind)]
|
||||
pub async fn checkout_tag(&self, tag: String) -> napi::Result<()> {
|
||||
self.inner_ref()?
|
||||
.checkout_tag(tag.as_str())
|
||||
.await
|
||||
.default_error()
|
||||
}
|
||||
|
||||
#[napi(catch_unwind)]
|
||||
pub async fn checkout_latest(&self) -> napi::Result<()> {
|
||||
self.inner_ref()?.checkout_latest().await.default_error()
|
||||
@@ -256,6 +307,13 @@ impl Table {
|
||||
self.inner_ref()?.restore().await.default_error()
|
||||
}
|
||||
|
||||
#[napi(catch_unwind)]
|
||||
pub async fn tags(&self) -> napi::Result<Tags> {
|
||||
Ok(Tags {
|
||||
inner: self.inner_ref()?.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
#[napi(catch_unwind)]
|
||||
pub async fn optimize(
|
||||
&self,
|
||||
@@ -515,9 +573,257 @@ impl From<lancedb::index::IndexStatistics> for IndexStatistics {
|
||||
}
|
||||
}
|
||||
|
||||
#[napi(object)]
|
||||
pub struct TableStatistics {
|
||||
/// The total number of bytes in the table
|
||||
pub total_bytes: i64,
|
||||
|
||||
/// The number of rows in the table
|
||||
pub num_rows: i64,
|
||||
|
||||
/// The number of indices in the table
|
||||
pub num_indices: i64,
|
||||
|
||||
/// Statistics on table fragments
|
||||
pub fragment_stats: FragmentStatistics,
|
||||
}
|
||||
|
||||
#[napi(object)]
|
||||
pub struct FragmentStatistics {
|
||||
/// The number of fragments in the table
|
||||
pub num_fragments: i64,
|
||||
|
||||
/// The number of uncompacted fragments in the table
|
||||
pub num_small_fragments: i64,
|
||||
|
||||
/// Statistics on the number of rows in the table fragments
|
||||
pub lengths: FragmentSummaryStats,
|
||||
}
|
||||
|
||||
#[napi(object)]
|
||||
pub struct FragmentSummaryStats {
|
||||
/// The number of rows in the fragment with the fewest rows
|
||||
pub min: i64,
|
||||
|
||||
/// The number of rows in the fragment with the most rows
|
||||
pub max: i64,
|
||||
|
||||
/// The mean number of rows in the fragments
|
||||
pub mean: i64,
|
||||
|
||||
/// The 25th percentile of number of rows in the fragments
|
||||
pub p25: i64,
|
||||
|
||||
/// The 50th percentile of number of rows in the fragments
|
||||
pub p50: i64,
|
||||
|
||||
/// The 75th percentile of number of rows in the fragments
|
||||
pub p75: i64,
|
||||
|
||||
/// The 99th percentile of number of rows in the fragments
|
||||
pub p99: i64,
|
||||
}
|
||||
|
||||
impl From<lancedb::table::TableStatistics> for TableStatistics {
|
||||
fn from(v: lancedb::table::TableStatistics) -> Self {
|
||||
Self {
|
||||
total_bytes: v.total_bytes as i64,
|
||||
num_rows: v.num_rows as i64,
|
||||
num_indices: v.num_indices as i64,
|
||||
fragment_stats: FragmentStatistics {
|
||||
num_fragments: v.fragment_stats.num_fragments as i64,
|
||||
num_small_fragments: v.fragment_stats.num_small_fragments as i64,
|
||||
lengths: FragmentSummaryStats {
|
||||
min: v.fragment_stats.lengths.min as i64,
|
||||
max: v.fragment_stats.lengths.max as i64,
|
||||
mean: v.fragment_stats.lengths.mean as i64,
|
||||
p25: v.fragment_stats.lengths.p25 as i64,
|
||||
p50: v.fragment_stats.lengths.p50 as i64,
|
||||
p75: v.fragment_stats.lengths.p75 as i64,
|
||||
p99: v.fragment_stats.lengths.p99 as i64,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[napi(object)]
|
||||
pub struct Version {
|
||||
pub version: i64,
|
||||
pub timestamp: i64,
|
||||
pub metadata: HashMap<String, String>,
|
||||
}
|
||||
|
||||
#[napi(object)]
|
||||
pub struct UpdateResult {
|
||||
pub rows_updated: i64,
|
||||
pub version: i64,
|
||||
}
|
||||
|
||||
impl From<lancedb::table::UpdateResult> for UpdateResult {
|
||||
fn from(value: lancedb::table::UpdateResult) -> Self {
|
||||
Self {
|
||||
rows_updated: value.rows_updated as i64,
|
||||
version: value.version as i64,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[napi(object)]
|
||||
pub struct AddResult {
|
||||
pub version: i64,
|
||||
}
|
||||
|
||||
impl From<lancedb::table::AddResult> for AddResult {
|
||||
fn from(value: lancedb::table::AddResult) -> Self {
|
||||
Self {
|
||||
version: value.version as i64,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[napi(object)]
|
||||
pub struct DeleteResult {
|
||||
pub version: i64,
|
||||
}
|
||||
|
||||
impl From<lancedb::table::DeleteResult> for DeleteResult {
|
||||
fn from(value: lancedb::table::DeleteResult) -> Self {
|
||||
Self {
|
||||
version: value.version as i64,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[napi(object)]
|
||||
pub struct MergeResult {
|
||||
pub version: i64,
|
||||
pub num_inserted_rows: i64,
|
||||
pub num_updated_rows: i64,
|
||||
pub num_deleted_rows: i64,
|
||||
}
|
||||
|
||||
impl From<lancedb::table::MergeResult> for MergeResult {
|
||||
fn from(value: lancedb::table::MergeResult) -> Self {
|
||||
Self {
|
||||
version: value.version as i64,
|
||||
num_inserted_rows: value.num_inserted_rows as i64,
|
||||
num_updated_rows: value.num_updated_rows as i64,
|
||||
num_deleted_rows: value.num_deleted_rows as i64,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[napi(object)]
|
||||
pub struct AddColumnsResult {
|
||||
pub version: i64,
|
||||
}
|
||||
|
||||
impl From<lancedb::table::AddColumnsResult> for AddColumnsResult {
|
||||
fn from(value: lancedb::table::AddColumnsResult) -> Self {
|
||||
Self {
|
||||
version: value.version as i64,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[napi(object)]
|
||||
pub struct AlterColumnsResult {
|
||||
pub version: i64,
|
||||
}
|
||||
|
||||
impl From<lancedb::table::AlterColumnsResult> for AlterColumnsResult {
|
||||
fn from(value: lancedb::table::AlterColumnsResult) -> Self {
|
||||
Self {
|
||||
version: value.version as i64,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[napi(object)]
|
||||
pub struct DropColumnsResult {
|
||||
pub version: i64,
|
||||
}
|
||||
|
||||
impl From<lancedb::table::DropColumnsResult> for DropColumnsResult {
|
||||
fn from(value: lancedb::table::DropColumnsResult) -> Self {
|
||||
Self {
|
||||
version: value.version as i64,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub struct TagContents {
|
||||
pub version: i64,
|
||||
pub manifest_size: i64,
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub struct Tags {
|
||||
inner: LanceDbTable,
|
||||
}
|
||||
|
||||
#[napi]
|
||||
impl Tags {
|
||||
#[napi]
|
||||
pub async fn list(&self) -> napi::Result<HashMap<String, TagContents>> {
|
||||
let rust_tags = self.inner.tags().await.default_error()?;
|
||||
let tag_list = rust_tags.as_ref().list().await.default_error()?;
|
||||
let tag_contents = tag_list
|
||||
.into_iter()
|
||||
.map(|(k, v)| {
|
||||
(
|
||||
k,
|
||||
TagContents {
|
||||
version: v.version as i64,
|
||||
manifest_size: v.manifest_size as i64,
|
||||
},
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
Ok(tag_contents)
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async fn get_version(&self, tag: String) -> napi::Result<i64> {
|
||||
let rust_tags = self.inner.tags().await.default_error()?;
|
||||
rust_tags
|
||||
.as_ref()
|
||||
.get_version(tag.as_str())
|
||||
.await
|
||||
.map(|v| v as i64)
|
||||
.default_error()
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async unsafe fn create(&mut self, tag: String, version: i64) -> napi::Result<()> {
|
||||
let mut rust_tags = self.inner.tags().await.default_error()?;
|
||||
rust_tags
|
||||
.as_mut()
|
||||
.create(tag.as_str(), version as u64)
|
||||
.await
|
||||
.default_error()
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async unsafe fn delete(&mut self, tag: String) -> napi::Result<()> {
|
||||
let mut rust_tags = self.inner.tags().await.default_error()?;
|
||||
rust_tags
|
||||
.as_mut()
|
||||
.delete(tag.as_str())
|
||||
.await
|
||||
.default_error()
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async unsafe fn update(&mut self, tag: String, version: i64) -> napi::Result<()> {
|
||||
let mut rust_tags = self.inner.tags().await.default_error()?;
|
||||
rust_tags
|
||||
.as_mut()
|
||||
.update(tag.as_str(), version as u64)
|
||||
.await
|
||||
.default_error()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
use lancedb::index::scalar::{BoostQuery, FtsQuery, MatchQuery, MultiMatchQuery, PhraseQuery};
|
||||
use lancedb::DistanceType;
|
||||
|
||||
pub fn parse_distance_type(distance_type: impl AsRef<str>) -> napi::Result<DistanceType> {
|
||||
@@ -16,144 +15,3 @@ pub fn parse_distance_type(distance_type: impl AsRef<str>) -> napi::Result<Dista
|
||||
))),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn parse_fts_query(query: &napi::JsObject) -> napi::Result<FtsQuery> {
|
||||
let query_type = query
|
||||
.get_property_names()?
|
||||
.get_element::<napi::JsString>(0)?;
|
||||
let query_type = query_type.into_utf8()?.into_owned()?;
|
||||
let query_value =
|
||||
query
|
||||
.get::<_, napi::JsObject>(&query_type)?
|
||||
.ok_or(napi::Error::from_reason(format!(
|
||||
"query value {} not found",
|
||||
query_type
|
||||
)))?;
|
||||
|
||||
match query_type.as_str() {
|
||||
"match" => {
|
||||
let column = query_value
|
||||
.get_property_names()?
|
||||
.get_element::<napi::JsString>(0)?
|
||||
.into_utf8()?
|
||||
.into_owned()?;
|
||||
let params =
|
||||
query_value
|
||||
.get::<_, napi::JsObject>(&column)?
|
||||
.ok_or(napi::Error::from_reason(format!(
|
||||
"column {} not found",
|
||||
column
|
||||
)))?;
|
||||
|
||||
let query = params
|
||||
.get::<_, napi::JsString>("query")?
|
||||
.ok_or(napi::Error::from_reason("query not found"))?
|
||||
.into_utf8()?
|
||||
.into_owned()?;
|
||||
let boost = params
|
||||
.get::<_, napi::JsNumber>("boost")?
|
||||
.ok_or(napi::Error::from_reason("boost not found"))?
|
||||
.get_double()? as f32;
|
||||
let fuzziness = params
|
||||
.get::<_, napi::JsNumber>("fuzziness")?
|
||||
.map(|f| f.get_uint32())
|
||||
.transpose()?;
|
||||
let max_expansions = params
|
||||
.get::<_, napi::JsNumber>("max_expansions")?
|
||||
.ok_or(napi::Error::from_reason("max_expansions not found"))?
|
||||
.get_uint32()? as usize;
|
||||
|
||||
let query = MatchQuery::new(query)
|
||||
.with_column(Some(column))
|
||||
.with_boost(boost)
|
||||
.with_fuzziness(fuzziness)
|
||||
.with_max_expansions(max_expansions);
|
||||
Ok(query.into())
|
||||
}
|
||||
|
||||
"match_phrase" => {
|
||||
let column = query_value
|
||||
.get_property_names()?
|
||||
.get_element::<napi::JsString>(0)?
|
||||
.into_utf8()?
|
||||
.into_owned()?;
|
||||
let query = query_value
|
||||
.get::<_, napi::JsString>(&column)?
|
||||
.ok_or(napi::Error::from_reason(format!(
|
||||
"column {} not found",
|
||||
column
|
||||
)))?
|
||||
.into_utf8()?
|
||||
.into_owned()?;
|
||||
|
||||
let query = PhraseQuery::new(query).with_column(Some(column));
|
||||
Ok(query.into())
|
||||
}
|
||||
|
||||
"boost" => {
|
||||
let positive = query_value
|
||||
.get::<_, napi::JsObject>("positive")?
|
||||
.ok_or(napi::Error::from_reason("positive not found"))?;
|
||||
|
||||
let negative = query_value
|
||||
.get::<_, napi::JsObject>("negative")?
|
||||
.ok_or(napi::Error::from_reason("negative not found"))?;
|
||||
let negative_boost = query_value
|
||||
.get::<_, napi::JsNumber>("negative_boost")?
|
||||
.ok_or(napi::Error::from_reason("negative_boost not found"))?
|
||||
.get_double()? as f32;
|
||||
|
||||
let positive = parse_fts_query(&positive)?;
|
||||
let negative = parse_fts_query(&negative)?;
|
||||
let query = BoostQuery::new(positive, negative, Some(negative_boost));
|
||||
Ok(query.into())
|
||||
}
|
||||
|
||||
"multi_match" => {
|
||||
let query = query_value
|
||||
.get::<_, napi::JsString>("query")?
|
||||
.ok_or(napi::Error::from_reason("query not found"))?
|
||||
.into_utf8()?
|
||||
.into_owned()?;
|
||||
let columns_array = query_value
|
||||
.get::<_, napi::JsTypedArray>("columns")?
|
||||
.ok_or(napi::Error::from_reason("columns not found"))?;
|
||||
let columns_num = columns_array.get_array_length()?;
|
||||
let mut columns = Vec::with_capacity(columns_num as usize);
|
||||
for i in 0..columns_num {
|
||||
let column = columns_array
|
||||
.get_element::<napi::JsString>(i)?
|
||||
.into_utf8()?
|
||||
.into_owned()?;
|
||||
columns.push(column);
|
||||
}
|
||||
let boost_array = query_value
|
||||
.get::<_, napi::JsTypedArray>("boost")?
|
||||
.ok_or(napi::Error::from_reason("boost not found"))?;
|
||||
if boost_array.get_array_length()? != columns_num {
|
||||
return Err(napi::Error::from_reason(format!(
|
||||
"boost array length ({}) does not match columns length ({})",
|
||||
boost_array.get_array_length()?,
|
||||
columns_num
|
||||
)));
|
||||
}
|
||||
let mut boost = Vec::with_capacity(columns_num as usize);
|
||||
for i in 0..columns_num {
|
||||
let b = boost_array.get_element::<napi::JsNumber>(i)?.get_double()? as f32;
|
||||
boost.push(b);
|
||||
}
|
||||
|
||||
let query =
|
||||
MultiMatchQuery::try_new_with_boosts(query, columns, boost).map_err(|e| {
|
||||
napi::Error::from_reason(format!("Error creating MultiMatchQuery: {}", e))
|
||||
})?;
|
||||
|
||||
Ok(query.into())
|
||||
}
|
||||
|
||||
_ => Err(napi::Error::from_reason(format!(
|
||||
"Unsupported query type: {}",
|
||||
query_type
|
||||
))),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
[tool.bumpversion]
|
||||
current_version = "0.22.0-beta.1"
|
||||
current_version = "0.24.2-beta.0"
|
||||
parse = """(?x)
|
||||
(?P<major>0|[1-9]\\d*)\\.
|
||||
(?P<minor>0|[1-9]\\d*)\\.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "lancedb-python"
|
||||
version = "0.22.0-beta.1"
|
||||
version = "0.24.2-beta.0"
|
||||
edition.workspace = true
|
||||
description = "Python bindings for LanceDB"
|
||||
license.workspace = true
|
||||
@@ -14,11 +14,11 @@ name = "_lancedb"
|
||||
crate-type = ["cdylib"]
|
||||
|
||||
[dependencies]
|
||||
arrow = { version = "54.1", features = ["pyarrow"] }
|
||||
arrow = { version = "55.1", features = ["pyarrow"] }
|
||||
lancedb = { path = "../rust/lancedb", default-features = false }
|
||||
env_logger.workspace = true
|
||||
pyo3 = { version = "0.23", features = ["extension-module", "abi3-py39"] }
|
||||
pyo3-async-runtimes = { version = "0.23", features = [
|
||||
pyo3 = { version = "0.24", features = ["extension-module", "abi3-py39"] }
|
||||
pyo3-async-runtimes = { version = "0.24", features = [
|
||||
"attributes",
|
||||
"tokio-runtime",
|
||||
] }
|
||||
@@ -27,7 +27,7 @@ futures.workspace = true
|
||||
tokio = { version = "1.40", features = ["sync"] }
|
||||
|
||||
[build-dependencies]
|
||||
pyo3-build-config = { version = "0.23", features = [
|
||||
pyo3-build-config = { version = "0.24", features = [
|
||||
"extension-module",
|
||||
"abi3-py39",
|
||||
] }
|
||||
|
||||
@@ -4,11 +4,12 @@ name = "lancedb"
|
||||
dynamic = ["version"]
|
||||
dependencies = [
|
||||
"deprecation",
|
||||
"tqdm>=4.27.0",
|
||||
"pyarrow>=14",
|
||||
"pydantic>=1.10",
|
||||
"packaging",
|
||||
"numpy",
|
||||
"overrides>=0.7",
|
||||
"packaging",
|
||||
"pyarrow>=16",
|
||||
"pydantic>=1.10",
|
||||
"tqdm>=4.27.0",
|
||||
]
|
||||
description = "lancedb"
|
||||
authors = [{ name = "LanceDB Devs", email = "dev@lancedb.com" }]
|
||||
@@ -42,6 +43,9 @@ classifiers = [
|
||||
repository = "https://github.com/lancedb/lancedb"
|
||||
|
||||
[project.optional-dependencies]
|
||||
pylance = [
|
||||
"pylance>=0.25",
|
||||
]
|
||||
tests = [
|
||||
"aiohttp",
|
||||
"boto3",
|
||||
@@ -54,7 +58,9 @@ tests = [
|
||||
"polars>=0.19, <=1.3.0",
|
||||
"tantivy",
|
||||
"pyarrow-stubs",
|
||||
"pylance>=0.23.2",
|
||||
"pylance>=0.25",
|
||||
"requests",
|
||||
"datafusion",
|
||||
]
|
||||
dev = [
|
||||
"ruff",
|
||||
@@ -72,13 +78,14 @@ embeddings = [
|
||||
"pillow",
|
||||
"open-clip-torch",
|
||||
"cohere",
|
||||
"colpali-engine>=0.3.10",
|
||||
"huggingface_hub",
|
||||
"InstructorEmbedding",
|
||||
"google.generativeai",
|
||||
"boto3>=1.28.57",
|
||||
"awscli>=1.29.57",
|
||||
"botocore>=1.31.57",
|
||||
"ollama",
|
||||
"ollama>=0.3.0",
|
||||
"ibm-watsonx-ai>=1.1.2",
|
||||
]
|
||||
azure = ["adlfs>=2024.2.0"]
|
||||
|
||||
@@ -26,7 +26,7 @@ def connect(
|
||||
api_key: Optional[str] = None,
|
||||
region: str = "us-east-1",
|
||||
host_override: Optional[str] = None,
|
||||
read_consistency_interval: Optional[timedelta] = timedelta(seconds=5),
|
||||
read_consistency_interval: Optional[timedelta] = None,
|
||||
request_thread_pool: Optional[Union[int, ThreadPoolExecutor]] = None,
|
||||
client_config: Union[ClientConfig, Dict[str, Any], None] = None,
|
||||
storage_options: Optional[Dict[str, str]] = None,
|
||||
@@ -49,8 +49,9 @@ def connect(
|
||||
read_consistency_interval: timedelta, default None
|
||||
(For LanceDB OSS only)
|
||||
The interval at which to check for updates to the table from other
|
||||
processes. If None, then consistency is not checked. For strong consistency,
|
||||
set this to zero seconds. Then every read will check for updates from other
|
||||
processes. If None, then consistency is not checked. For performance
|
||||
reasons, this is the default. For strong consistency, set this to
|
||||
zero seconds. Then every read will check for updates from other
|
||||
processes. As a compromise, you can set this to a non-zero timedelta
|
||||
for eventual consistency. If more than that interval has passed since
|
||||
the last check, then the table will be checked for updates. Note: this
|
||||
@@ -121,7 +122,7 @@ async def connect_async(
|
||||
api_key: Optional[str] = None,
|
||||
region: str = "us-east-1",
|
||||
host_override: Optional[str] = None,
|
||||
read_consistency_interval: Optional[timedelta] = timedelta(seconds=5),
|
||||
read_consistency_interval: Optional[timedelta] = None,
|
||||
client_config: Optional[Union[ClientConfig, Dict[str, Any]]] = None,
|
||||
storage_options: Optional[Dict[str, str]] = None,
|
||||
) -> AsyncConnection:
|
||||
@@ -142,8 +143,9 @@ async def connect_async(
|
||||
read_consistency_interval: timedelta, default None
|
||||
(For LanceDB OSS only)
|
||||
The interval at which to check for updates to the table from other
|
||||
processes. If None, then consistency is not checked. For strong consistency,
|
||||
set this to zero seconds. Then every read will check for updates from other
|
||||
processes. If None, then consistency is not checked. For performance
|
||||
reasons, this is the default. For strong consistency, set this to
|
||||
zero seconds. Then every read will check for updates from other
|
||||
processes. As a compromise, you can set this to a non-zero timedelta
|
||||
for eventual consistency. If more than that interval has passed since
|
||||
the last check, then the table will be checked for updates. Note: this
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
from typing import Dict, List, Optional, Tuple, Any, Union, Literal
|
||||
from datetime import timedelta
|
||||
from typing import Dict, List, Optional, Tuple, Any, TypedDict, Union, Literal
|
||||
|
||||
import pyarrow as pa
|
||||
|
||||
@@ -35,8 +36,10 @@ class Table:
|
||||
async def schema(self) -> pa.Schema: ...
|
||||
async def add(
|
||||
self, data: pa.RecordBatchReader, mode: Literal["append", "overwrite"]
|
||||
) -> None: ...
|
||||
async def update(self, updates: Dict[str, str], where: Optional[str]) -> None: ...
|
||||
) -> AddResult: ...
|
||||
async def update(
|
||||
self, updates: Dict[str, str], where: Optional[str]
|
||||
) -> UpdateResult: ...
|
||||
async def count_rows(self, filter: Optional[str]) -> int: ...
|
||||
async def create_index(
|
||||
self,
|
||||
@@ -46,23 +49,34 @@ class Table:
|
||||
): ...
|
||||
async def list_versions(self) -> List[Dict[str, Any]]: ...
|
||||
async def version(self) -> int: ...
|
||||
async def checkout(self, version: int): ...
|
||||
async def checkout(self, version: Union[int, str]): ...
|
||||
async def checkout_latest(self): ...
|
||||
async def restore(self, version: Optional[int] = None): ...
|
||||
async def restore(self, version: Optional[Union[int, str]] = None): ...
|
||||
async def list_indices(self) -> list[IndexConfig]: ...
|
||||
async def delete(self, filter: str): ...
|
||||
async def add_columns(self, columns: list[tuple[str, str]]) -> None: ...
|
||||
async def add_columns_with_schema(self, schema: pa.Schema) -> None: ...
|
||||
async def alter_columns(self, columns: list[dict[str, Any]]) -> None: ...
|
||||
async def delete(self, filter: str) -> DeleteResult: ...
|
||||
async def add_columns(self, columns: list[tuple[str, str]]) -> AddColumnsResult: ...
|
||||
async def add_columns_with_schema(self, schema: pa.Schema) -> AddColumnsResult: ...
|
||||
async def alter_columns(
|
||||
self, columns: list[dict[str, Any]]
|
||||
) -> AlterColumnsResult: ...
|
||||
async def optimize(
|
||||
self,
|
||||
*,
|
||||
cleanup_since_ms: Optional[int] = None,
|
||||
delete_unverified: Optional[bool] = None,
|
||||
) -> OptimizeStats: ...
|
||||
@property
|
||||
def tags(self) -> Tags: ...
|
||||
def query(self) -> Query: ...
|
||||
def vector_search(self) -> VectorQuery: ...
|
||||
|
||||
class Tags:
|
||||
async def list(self) -> Dict[str, Tag]: ...
|
||||
async def get_version(self, tag: str) -> int: ...
|
||||
async def create(self, tag: str, version: int): ...
|
||||
async def delete(self, tag: str): ...
|
||||
async def update(self, tag: str, version: int): ...
|
||||
|
||||
class IndexConfig:
|
||||
index_type: str
|
||||
columns: List[str]
|
||||
@@ -94,7 +108,9 @@ class Query:
|
||||
def postfilter(self): ...
|
||||
def nearest_to(self, query_vec: pa.Array) -> VectorQuery: ...
|
||||
def nearest_to_text(self, query: dict) -> FTSQuery: ...
|
||||
async def execute(self, max_batch_length: Optional[int]) -> RecordBatchStream: ...
|
||||
async def execute(
|
||||
self, max_batch_length: Optional[int], timeout: Optional[timedelta]
|
||||
) -> RecordBatchStream: ...
|
||||
async def explain_plan(self, verbose: Optional[bool]) -> str: ...
|
||||
async def analyze_plan(self) -> str: ...
|
||||
def to_query_request(self) -> PyQueryRequest: ...
|
||||
@@ -110,7 +126,9 @@ class FTSQuery:
|
||||
def get_query(self) -> str: ...
|
||||
def add_query_vector(self, query_vec: pa.Array) -> None: ...
|
||||
def nearest_to(self, query_vec: pa.Array) -> HybridQuery: ...
|
||||
async def execute(self, max_batch_length: Optional[int]) -> RecordBatchStream: ...
|
||||
async def execute(
|
||||
self, max_batch_length: Optional[int], timeout: Optional[timedelta]
|
||||
) -> RecordBatchStream: ...
|
||||
def to_query_request(self) -> PyQueryRequest: ...
|
||||
|
||||
class VectorQuery:
|
||||
@@ -125,6 +143,8 @@ class VectorQuery:
|
||||
def postfilter(self): ...
|
||||
def refine_factor(self, refine_factor: int): ...
|
||||
def nprobes(self, nprobes: int): ...
|
||||
def minimum_nprobes(self, minimum_nprobes: int): ...
|
||||
def maximum_nprobes(self, maximum_nprobes: int): ...
|
||||
def bypass_vector_index(self): ...
|
||||
def nearest_to_text(self, query: dict) -> HybridQuery: ...
|
||||
def to_query_request(self) -> PyQueryRequest: ...
|
||||
@@ -140,6 +160,8 @@ class HybridQuery:
|
||||
def distance_type(self, distance_type: str): ...
|
||||
def refine_factor(self, refine_factor: int): ...
|
||||
def nprobes(self, nprobes: int): ...
|
||||
def minimum_nprobes(self, minimum_nprobes: int): ...
|
||||
def maximum_nprobes(self, maximum_nprobes: int): ...
|
||||
def bypass_vector_index(self): ...
|
||||
def to_vector_query(self) -> VectorQuery: ...
|
||||
def to_fts_query(self) -> FTSQuery: ...
|
||||
@@ -147,23 +169,21 @@ class HybridQuery:
|
||||
def get_with_row_id(self) -> bool: ...
|
||||
def to_query_request(self) -> PyQueryRequest: ...
|
||||
|
||||
class PyFullTextSearchQuery:
|
||||
columns: Optional[List[str]]
|
||||
query: str
|
||||
limit: Optional[int]
|
||||
wand_factor: Optional[float]
|
||||
class FullTextQuery:
|
||||
pass
|
||||
|
||||
class PyQueryRequest:
|
||||
limit: Optional[int]
|
||||
offset: Optional[int]
|
||||
filter: Optional[Union[str, bytes]]
|
||||
full_text_search: Optional[PyFullTextSearchQuery]
|
||||
full_text_search: Optional[FullTextQuery]
|
||||
select: Optional[Union[str, List[str]]]
|
||||
fast_search: Optional[bool]
|
||||
with_row_id: Optional[bool]
|
||||
column: Optional[str]
|
||||
query_vector: Optional[List[pa.Array]]
|
||||
nprobes: Optional[int]
|
||||
minimum_nprobes: Optional[int]
|
||||
maximum_nprobes: Optional[int]
|
||||
lower_bound: Optional[float]
|
||||
upper_bound: Optional[float]
|
||||
ef: Optional[int]
|
||||
@@ -190,3 +210,32 @@ class RemovalStats:
|
||||
class OptimizeStats:
|
||||
compaction: CompactionStats
|
||||
prune: RemovalStats
|
||||
|
||||
class Tag(TypedDict):
|
||||
version: int
|
||||
manifest_size: int
|
||||
|
||||
class AddResult:
|
||||
version: int
|
||||
|
||||
class DeleteResult:
|
||||
version: int
|
||||
|
||||
class UpdateResult:
|
||||
rows_updated: int
|
||||
version: int
|
||||
|
||||
class MergeResult:
|
||||
version: int
|
||||
num_updated_rows: int
|
||||
num_inserted_rows: int
|
||||
num_deleted_rows: int
|
||||
|
||||
class AddColumnsResult:
|
||||
version: int
|
||||
|
||||
class AlterColumnsResult:
|
||||
version: int
|
||||
|
||||
class DropColumnsResult:
|
||||
version: int
|
||||
|
||||
@@ -9,7 +9,7 @@ import numpy as np
|
||||
import pyarrow as pa
|
||||
import pyarrow.dataset
|
||||
|
||||
from .dependencies import pandas as pd
|
||||
from .dependencies import _check_for_pandas, pandas as pd
|
||||
|
||||
DATA = Union[List[dict], "pd.DataFrame", pa.Table, Iterable[pa.RecordBatch]]
|
||||
VEC = Union[list, np.ndarray, pa.Array, pa.ChunkedArray]
|
||||
@@ -63,7 +63,7 @@ def data_to_reader(
|
||||
data: DATA, schema: Optional[pa.Schema] = None
|
||||
) -> pa.RecordBatchReader:
|
||||
"""Convert various types of input into a RecordBatchReader"""
|
||||
if pd is not None and isinstance(data, pd.DataFrame):
|
||||
if _check_for_pandas(data) and isinstance(data, pd.DataFrame):
|
||||
return pa.Table.from_pandas(data, schema=schema).to_reader()
|
||||
elif isinstance(data, pa.Table):
|
||||
return data.to_reader()
|
||||
|
||||
@@ -6,7 +6,6 @@ from __future__ import annotations
|
||||
|
||||
from abc import abstractmethod
|
||||
from pathlib import Path
|
||||
from datetime import timedelta
|
||||
from typing import TYPE_CHECKING, Dict, Iterable, List, Literal, Optional, Union
|
||||
|
||||
from lancedb.embeddings.registry import EmbeddingFunctionRegistry
|
||||
@@ -33,6 +32,7 @@ import deprecation
|
||||
if TYPE_CHECKING:
|
||||
import pyarrow as pa
|
||||
from .pydantic import LanceModel
|
||||
from datetime import timedelta
|
||||
|
||||
from ._lancedb import Connection as LanceDbConnection
|
||||
from .common import DATA, URI
|
||||
@@ -318,8 +318,9 @@ class LanceDBConnection(DBConnection):
|
||||
The root uri of the database.
|
||||
read_consistency_interval: timedelta, default None
|
||||
The interval at which to check for updates to the table from other
|
||||
processes. If None, then consistency is not checked. For strong consistency,
|
||||
set this to zero seconds. Then every read will check for updates from other
|
||||
processes. If None, then consistency is not checked. For performance
|
||||
reasons, this is the default. For strong consistency, set this to
|
||||
zero seconds. Then every read will check for updates from other
|
||||
processes. As a compromise, you can set this to a non-zero timedelta
|
||||
for eventual consistency. If more than that interval has passed since
|
||||
the last check, then the table will be checked for updates. Note: this
|
||||
@@ -351,7 +352,7 @@ class LanceDBConnection(DBConnection):
|
||||
self,
|
||||
uri: URI,
|
||||
*,
|
||||
read_consistency_interval: Optional[timedelta] = timedelta(seconds=5),
|
||||
read_consistency_interval: Optional[timedelta] = None,
|
||||
storage_options: Optional[Dict[str, str]] = None,
|
||||
):
|
||||
if not isinstance(uri, Path):
|
||||
|
||||
@@ -19,3 +19,4 @@ from .imagebind import ImageBindEmbeddings
|
||||
from .jinaai import JinaEmbeddings
|
||||
from .watsonx import WatsonxEmbeddings
|
||||
from .voyageai import VoyageAIEmbeddingFunction
|
||||
from .colpali import ColPaliEmbeddings
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user