mirror of
https://github.com/lancedb/lancedb.git
synced 2025-12-26 06:39:57 +00:00
Compare commits
5 Commits
python-v0.
...
python-v0.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b67f13f642 | ||
|
|
2f12d67469 | ||
|
|
8d7cc29abb | ||
|
|
a4404e9e18 | ||
|
|
077e5bb586 |
@@ -1,5 +1,5 @@
|
|||||||
[tool.bumpversion]
|
[tool.bumpversion]
|
||||||
current_version = "0.19.1-beta.5"
|
current_version = "0.18.2-beta.1"
|
||||||
parse = """(?x)
|
parse = """(?x)
|
||||||
(?P<major>0|[1-9]\\d*)\\.
|
(?P<major>0|[1-9]\\d*)\\.
|
||||||
(?P<minor>0|[1-9]\\d*)\\.
|
(?P<minor>0|[1-9]\\d*)\\.
|
||||||
|
|||||||
13
.github/workflows/docs.yml
vendored
13
.github/workflows/docs.yml
vendored
@@ -18,24 +18,17 @@ concurrency:
|
|||||||
group: "pages"
|
group: "pages"
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
|
||||||
env:
|
|
||||||
# This reduces the disk space needed for the build
|
|
||||||
RUSTFLAGS: "-C debuginfo=0"
|
|
||||||
# according to: https://matklad.github.io/2021/09/04/fast-rust-builds.html
|
|
||||||
# CI builds are faster with incremental disabled.
|
|
||||||
CARGO_INCREMENTAL: "0"
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
# Single deploy job since we're just deploying
|
# Single deploy job since we're just deploying
|
||||||
build:
|
build:
|
||||||
environment:
|
environment:
|
||||||
name: github-pages
|
name: github-pages
|
||||||
url: ${{ steps.deployment.outputs.page_url }}
|
url: ${{ steps.deployment.outputs.page_url }}
|
||||||
runs-on: ubuntu-24.04
|
runs-on: buildjet-8vcpu-ubuntu-2204
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
- name: Install dependencies needed for ubuntu
|
- name: Install dependecies needed for ubuntu
|
||||||
run: |
|
run: |
|
||||||
sudo apt install -y protobuf-compiler libssl-dev
|
sudo apt install -y protobuf-compiler libssl-dev
|
||||||
rustup update && rustup default
|
rustup update && rustup default
|
||||||
@@ -45,7 +38,6 @@ jobs:
|
|||||||
python-version: "3.10"
|
python-version: "3.10"
|
||||||
cache: "pip"
|
cache: "pip"
|
||||||
cache-dependency-path: "docs/requirements.txt"
|
cache-dependency-path: "docs/requirements.txt"
|
||||||
- uses: Swatinem/rust-cache@v2
|
|
||||||
- name: Build Python
|
- name: Build Python
|
||||||
working-directory: python
|
working-directory: python
|
||||||
run: |
|
run: |
|
||||||
@@ -57,6 +49,7 @@ jobs:
|
|||||||
node-version: 20
|
node-version: 20
|
||||||
cache: 'npm'
|
cache: 'npm'
|
||||||
cache-dependency-path: node/package-lock.json
|
cache-dependency-path: node/package-lock.json
|
||||||
|
- uses: Swatinem/rust-cache@v2
|
||||||
- name: Install node dependencies
|
- name: Install node dependencies
|
||||||
working-directory: node
|
working-directory: node
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
6
.github/workflows/java-publish.yml
vendored
6
.github/workflows/java-publish.yml
vendored
@@ -43,7 +43,7 @@ jobs:
|
|||||||
- uses: Swatinem/rust-cache@v2
|
- uses: Swatinem/rust-cache@v2
|
||||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||||
with:
|
with:
|
||||||
toolchain: "1.81.0"
|
toolchain: "1.79.0"
|
||||||
cache-workspaces: "./java/core/lancedb-jni"
|
cache-workspaces: "./java/core/lancedb-jni"
|
||||||
# Disable full debug symbol generation to speed up CI build and keep memory down
|
# Disable full debug symbol generation to speed up CI build and keep memory down
|
||||||
# "1" means line tables only, which is useful for panic tracebacks.
|
# "1" means line tables only, which is useful for panic tracebacks.
|
||||||
@@ -97,7 +97,7 @@ jobs:
|
|||||||
- name: Dry run
|
- name: Dry run
|
||||||
if: github.event_name == 'pull_request'
|
if: github.event_name == 'pull_request'
|
||||||
run: |
|
run: |
|
||||||
mvn --batch-mode -DskipTests -Drust.release.build=true package
|
mvn --batch-mode -DskipTests package
|
||||||
- name: Set github
|
- name: Set github
|
||||||
run: |
|
run: |
|
||||||
git config --global user.email "LanceDB Github Runner"
|
git config --global user.email "LanceDB Github Runner"
|
||||||
@@ -108,7 +108,7 @@ jobs:
|
|||||||
echo "use-agent" >> ~/.gnupg/gpg.conf
|
echo "use-agent" >> ~/.gnupg/gpg.conf
|
||||||
echo "pinentry-mode loopback" >> ~/.gnupg/gpg.conf
|
echo "pinentry-mode loopback" >> ~/.gnupg/gpg.conf
|
||||||
export GPG_TTY=$(tty)
|
export GPG_TTY=$(tty)
|
||||||
mvn --batch-mode -DskipTests -Drust.release.build=true -DpushChanges=false -Dgpg.passphrase=${{ secrets.GPG_PASSPHRASE }} deploy -P deploy-to-ossrh
|
mvn --batch-mode -DskipTests -DpushChanges=false -Dgpg.passphrase=${{ secrets.GPG_PASSPHRASE }} deploy -P deploy-to-ossrh
|
||||||
env:
|
env:
|
||||||
SONATYPE_USER: ${{ secrets.SONATYPE_USER }}
|
SONATYPE_USER: ${{ secrets.SONATYPE_USER }}
|
||||||
SONATYPE_TOKEN: ${{ secrets.SONATYPE_TOKEN }}
|
SONATYPE_TOKEN: ${{ secrets.SONATYPE_TOKEN }}
|
||||||
|
|||||||
44
.github/workflows/npm-publish.yml
vendored
44
.github/workflows/npm-publish.yml
vendored
@@ -18,7 +18,6 @@ on:
|
|||||||
# This should trigger a dry run (we skip the final publish step)
|
# This should trigger a dry run (we skip the final publish step)
|
||||||
paths:
|
paths:
|
||||||
- .github/workflows/npm-publish.yml
|
- .github/workflows/npm-publish.yml
|
||||||
- Cargo.toml # Change in dependency frequently breaks builds
|
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
@@ -131,24 +130,29 @@ jobs:
|
|||||||
set -e &&
|
set -e &&
|
||||||
apt-get update &&
|
apt-get update &&
|
||||||
apt-get install -y protobuf-compiler pkg-config
|
apt-get install -y protobuf-compiler pkg-config
|
||||||
- target: x86_64-unknown-linux-musl
|
|
||||||
# This one seems to need some extra memory
|
# TODO: re-enable x64 musl builds. I could not figure out why, but it
|
||||||
host: ubuntu-2404-8x-x64
|
# consistently made GHA runners non-responsive at the end of build. Example:
|
||||||
# https://github.com/napi-rs/napi-rs/blob/main/alpine.Dockerfile
|
# https://github.com/lancedb/lancedb/actions/runs/13980431071/job/39144319470?pr=2250
|
||||||
docker: ghcr.io/napi-rs/napi-rs/nodejs-rust:lts-alpine
|
|
||||||
features: fp16kernels
|
# - target: x86_64-unknown-linux-musl
|
||||||
pre_build: |-
|
# # This one seems to need some extra memory
|
||||||
set -e &&
|
# host: ubuntu-2404-8x-x64
|
||||||
apk add protobuf-dev curl &&
|
# # https://github.com/napi-rs/napi-rs/blob/main/alpine.Dockerfile
|
||||||
ln -s /usr/lib/gcc/x86_64-alpine-linux-musl/14.2.0/crtbeginS.o /usr/lib/crtbeginS.o &&
|
# docker: ghcr.io/napi-rs/napi-rs/nodejs-rust:lts-alpine
|
||||||
ln -s /usr/lib/libgcc_s.so /usr/lib/libgcc.so &&
|
# features: ","
|
||||||
CC=gcc &&
|
# pre_build: |-
|
||||||
CXX=g++
|
# set -e &&
|
||||||
|
# apk add protobuf-dev curl &&
|
||||||
|
# ln -s /usr/lib/gcc/x86_64-alpine-linux-musl/14.2.0/crtbeginS.o /usr/lib/crtbeginS.o &&
|
||||||
|
# ln -s /usr/lib/libgcc_s.so /usr/lib/libgcc.so
|
||||||
|
|
||||||
- target: aarch64-unknown-linux-gnu
|
- target: aarch64-unknown-linux-gnu
|
||||||
host: ubuntu-2404-8x-x64
|
host: ubuntu-2404-8x-x64
|
||||||
# https://github.com/napi-rs/napi-rs/blob/main/debian-aarch64.Dockerfile
|
# https://github.com/napi-rs/napi-rs/blob/main/debian-aarch64.Dockerfile
|
||||||
docker: ghcr.io/napi-rs/napi-rs/nodejs-rust:lts-debian-aarch64
|
docker: ghcr.io/napi-rs/napi-rs/nodejs-rust:lts-debian-aarch64
|
||||||
features: "fp16kernels"
|
# TODO: enable fp16kernels after https://github.com/lancedb/lance/pull/3559
|
||||||
|
features: ","
|
||||||
pre_build: |-
|
pre_build: |-
|
||||||
set -e &&
|
set -e &&
|
||||||
apt-get update &&
|
apt-get update &&
|
||||||
@@ -166,8 +170,8 @@ jobs:
|
|||||||
set -e &&
|
set -e &&
|
||||||
apk add protobuf-dev &&
|
apk add protobuf-dev &&
|
||||||
rustup target add aarch64-unknown-linux-musl &&
|
rustup target add aarch64-unknown-linux-musl &&
|
||||||
export CC_aarch64_unknown_linux_musl=aarch64-linux-musl-gcc &&
|
export CC="/aarch64-linux-musl-cross/bin/aarch64-linux-musl-gcc" &&
|
||||||
export CXX_aarch64_unknown_linux_musl=aarch64-linux-musl-g++
|
export CXX="/aarch64-linux-musl-cross/bin/aarch64-linux-musl-g++"
|
||||||
name: build - ${{ matrix.settings.target }}
|
name: build - ${{ matrix.settings.target }}
|
||||||
runs-on: ${{ matrix.settings.host }}
|
runs-on: ${{ matrix.settings.host }}
|
||||||
defaults:
|
defaults:
|
||||||
@@ -531,12 +535,6 @@ jobs:
|
|||||||
for filename in *.tgz; do
|
for filename in *.tgz; do
|
||||||
npm publish $PUBLISH_ARGS $filename
|
npm publish $PUBLISH_ARGS $filename
|
||||||
done
|
done
|
||||||
- name: Deprecate
|
|
||||||
env:
|
|
||||||
NODE_AUTH_TOKEN: ${{ secrets.LANCEDB_NPM_REGISTRY_TOKEN }}
|
|
||||||
# We need to deprecate the old package to avoid confusion.
|
|
||||||
# Each time we publish a new version, it gets undeprecated.
|
|
||||||
run: npm deprecate vectordb "Use @lancedb/lancedb instead."
|
|
||||||
- name: Notify Slack Action
|
- name: Notify Slack Action
|
||||||
uses: ravsamhq/notify-slack-action@2.3.0
|
uses: ravsamhq/notify-slack-action@2.3.0
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
|
|||||||
1
.github/workflows/pypi-publish.yml
vendored
1
.github/workflows/pypi-publish.yml
vendored
@@ -8,7 +8,6 @@ on:
|
|||||||
# This should trigger a dry run (we skip the final publish step)
|
# This should trigger a dry run (we skip the final publish step)
|
||||||
paths:
|
paths:
|
||||||
- .github/workflows/pypi-publish.yml
|
- .github/workflows/pypi-publish.yml
|
||||||
- Cargo.toml # Change in dependency frequently breaks builds
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
linux:
|
linux:
|
||||||
|
|||||||
5
.github/workflows/python.yml
vendored
5
.github/workflows/python.yml
vendored
@@ -136,9 +136,9 @@ jobs:
|
|||||||
- uses: ./.github/workflows/run_tests
|
- uses: ./.github/workflows/run_tests
|
||||||
with:
|
with:
|
||||||
integration: true
|
integration: true
|
||||||
- name: Test without pylance or pandas
|
- name: Test without pylance
|
||||||
run: |
|
run: |
|
||||||
pip uninstall -y pylance pandas
|
pip uninstall -y pylance
|
||||||
pytest -vv python/tests/test_table.py
|
pytest -vv python/tests/test_table.py
|
||||||
# Make sure wheels are not included in the Rust cache
|
# Make sure wheels are not included in the Rust cache
|
||||||
- name: Delete wheels
|
- name: Delete wheels
|
||||||
@@ -228,7 +228,6 @@ jobs:
|
|||||||
- name: Install lancedb
|
- name: Install lancedb
|
||||||
run: |
|
run: |
|
||||||
pip install "pydantic<2"
|
pip install "pydantic<2"
|
||||||
pip install pyarrow==16
|
|
||||||
pip install --extra-index-url https://pypi.fury.io/lancedb/ -e .[tests]
|
pip install --extra-index-url https://pypi.fury.io/lancedb/ -e .[tests]
|
||||||
pip install tantivy
|
pip install tantivy
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
|
|||||||
3
.github/workflows/rust.yml
vendored
3
.github/workflows/rust.yml
vendored
@@ -40,9 +40,6 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
lfs: true
|
lfs: true
|
||||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
|
||||||
with:
|
|
||||||
components: rustfmt, clippy
|
|
||||||
- uses: Swatinem/rust-cache@v2
|
- uses: Swatinem/rust-cache@v2
|
||||||
with:
|
with:
|
||||||
workspaces: rust
|
workspaces: rust
|
||||||
|
|||||||
1117
Cargo.lock
generated
1117
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
33
Cargo.toml
33
Cargo.toml
@@ -21,14 +21,16 @@ categories = ["database-implementations"]
|
|||||||
rust-version = "1.78.0"
|
rust-version = "1.78.0"
|
||||||
|
|
||||||
[workspace.dependencies]
|
[workspace.dependencies]
|
||||||
lance = { "version" = "=0.27.2", "features" = ["dynamodb"] }
|
lance = { "version" = "=0.25.0", "features" = [
|
||||||
lance-io = { version = "=0.27.2" }
|
"dynamodb",
|
||||||
lance-index = { version = "=0.27.2" }
|
] }
|
||||||
lance-linalg = { version = "=0.27.2" }
|
lance-io = { version = "=0.25.0" }
|
||||||
lance-table = { version = "=0.27.2" }
|
lance-index = { version = "=0.25.0" }
|
||||||
lance-testing = { version = "=0.27.2" }
|
lance-linalg = { version = "=0.25.0" }
|
||||||
lance-datafusion = { version = "=0.27.2" }
|
lance-table = { version = "=0.25.0" }
|
||||||
lance-encoding = { version = "=0.27.2" }
|
lance-testing = { version = "=0.25.0" }
|
||||||
|
lance-datafusion = { version = "=0.25.0" }
|
||||||
|
lance-encoding = { version = "=0.25.0" }
|
||||||
# Note that this one does not include pyarrow
|
# Note that this one does not include pyarrow
|
||||||
arrow = { version = "54.1", optional = false }
|
arrow = { version = "54.1", optional = false }
|
||||||
arrow-array = "54.1"
|
arrow-array = "54.1"
|
||||||
@@ -39,12 +41,12 @@ arrow-schema = "54.1"
|
|||||||
arrow-arith = "54.1"
|
arrow-arith = "54.1"
|
||||||
arrow-cast = "54.1"
|
arrow-cast = "54.1"
|
||||||
async-trait = "0"
|
async-trait = "0"
|
||||||
datafusion = { version = "46.0", default-features = false }
|
datafusion = { version = "45.0", default-features = false }
|
||||||
datafusion-catalog = "46.0"
|
datafusion-catalog = "45.0"
|
||||||
datafusion-common = { version = "46.0", default-features = false }
|
datafusion-common = { version = "45.0", default-features = false }
|
||||||
datafusion-execution = "46.0"
|
datafusion-execution = "45.0"
|
||||||
datafusion-expr = "46.0"
|
datafusion-expr = "45.0"
|
||||||
datafusion-physical-plan = "46.0"
|
datafusion-physical-plan = "45.0"
|
||||||
env_logger = "0.11"
|
env_logger = "0.11"
|
||||||
half = { "version" = "=2.4.1", default-features = false, features = [
|
half = { "version" = "=2.4.1", default-features = false, features = [
|
||||||
"num-traits",
|
"num-traits",
|
||||||
@@ -61,12 +63,15 @@ rand = "0.8"
|
|||||||
regex = "1.10"
|
regex = "1.10"
|
||||||
lazy_static = "1"
|
lazy_static = "1"
|
||||||
semver = "1.0.25"
|
semver = "1.0.25"
|
||||||
|
|
||||||
# Temporary pins to work around downstream issues
|
# Temporary pins to work around downstream issues
|
||||||
# https://github.com/apache/arrow-rs/commit/2fddf85afcd20110ce783ed5b4cdeb82293da30b
|
# https://github.com/apache/arrow-rs/commit/2fddf85afcd20110ce783ed5b4cdeb82293da30b
|
||||||
chrono = "=0.4.39"
|
chrono = "=0.4.39"
|
||||||
# https://github.com/RustCrypto/formats/issues/1684
|
# https://github.com/RustCrypto/formats/issues/1684
|
||||||
base64ct = "=1.6.0"
|
base64ct = "=1.6.0"
|
||||||
|
|
||||||
# Workaround for: https://github.com/eira-fransham/crunchy/issues/13
|
# Workaround for: https://github.com/eira-fransham/crunchy/issues/13
|
||||||
crunchy = "=0.2.2"
|
crunchy = "=0.2.2"
|
||||||
|
|
||||||
# Workaround for: https://github.com/Lokathor/bytemuck/issues/306
|
# Workaround for: https://github.com/Lokathor/bytemuck/issues/306
|
||||||
bytemuck_derive = ">=1.8.1, <1.9.0"
|
bytemuck_derive = ">=1.8.1, <1.9.0"
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
LanceDB docs are deployed to https://lancedb.github.io/lancedb/.
|
LanceDB docs are deployed to https://lancedb.github.io/lancedb/.
|
||||||
|
|
||||||
Docs is built and deployed automatically by [Github Actions](../.github/workflows/docs.yml)
|
Docs is built and deployed automatically by [Github Actions](.github/workflows/docs.yml)
|
||||||
whenever a commit is pushed to the `main` branch. So it is possible for the docs to show
|
whenever a commit is pushed to the `main` branch. So it is possible for the docs to show
|
||||||
unreleased features.
|
unreleased features.
|
||||||
|
|
||||||
|
|||||||
@@ -205,7 +205,6 @@ nav:
|
|||||||
- PromptTools: integrations/prompttools.md
|
- PromptTools: integrations/prompttools.md
|
||||||
- dlt: integrations/dlt.md
|
- dlt: integrations/dlt.md
|
||||||
- phidata: integrations/phidata.md
|
- phidata: integrations/phidata.md
|
||||||
- Genkit: integrations/genkit.md
|
|
||||||
- 🎯 Examples:
|
- 🎯 Examples:
|
||||||
- Overview: examples/index.md
|
- Overview: examples/index.md
|
||||||
- 🐍 Python:
|
- 🐍 Python:
|
||||||
@@ -332,7 +331,6 @@ nav:
|
|||||||
- PromptTools: integrations/prompttools.md
|
- PromptTools: integrations/prompttools.md
|
||||||
- dlt: integrations/dlt.md
|
- dlt: integrations/dlt.md
|
||||||
- phidata: integrations/phidata.md
|
- phidata: integrations/phidata.md
|
||||||
- Genkit: integrations/genkit.md
|
|
||||||
- Examples:
|
- Examples:
|
||||||
- examples/index.md
|
- examples/index.md
|
||||||
- 🐍 Python:
|
- 🐍 Python:
|
||||||
|
|||||||
@@ -342,7 +342,7 @@ For **read and write access**, LanceDB will need a policy such as:
|
|||||||
"Action": [
|
"Action": [
|
||||||
"s3:PutObject",
|
"s3:PutObject",
|
||||||
"s3:GetObject",
|
"s3:GetObject",
|
||||||
"s3:DeleteObject"
|
"s3:DeleteObject",
|
||||||
],
|
],
|
||||||
"Resource": "arn:aws:s3:::<bucket>/<prefix>/*"
|
"Resource": "arn:aws:s3:::<bucket>/<prefix>/*"
|
||||||
},
|
},
|
||||||
@@ -374,7 +374,7 @@ For **read-only access**, LanceDB will need a policy such as:
|
|||||||
{
|
{
|
||||||
"Effect": "Allow",
|
"Effect": "Allow",
|
||||||
"Action": [
|
"Action": [
|
||||||
"s3:GetObject"
|
"s3:GetObject",
|
||||||
],
|
],
|
||||||
"Resource": "arn:aws:s3:::<bucket>/<prefix>/*"
|
"Resource": "arn:aws:s3:::<bucket>/<prefix>/*"
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -765,10 +765,7 @@ This can be used to update zero to all rows depending on how many rows match the
|
|||||||
];
|
];
|
||||||
const tbl = await db.createTable("my_table", data)
|
const tbl = await db.createTable("my_table", data)
|
||||||
|
|
||||||
await tbl.update({
|
await tbl.update({vector: [10, 10]}, { where: "x = 2"})
|
||||||
values: { vector: [10, 10] },
|
|
||||||
where: "x = 2"
|
|
||||||
});
|
|
||||||
```
|
```
|
||||||
|
|
||||||
=== "vectordb (deprecated)"
|
=== "vectordb (deprecated)"
|
||||||
@@ -787,10 +784,7 @@ This can be used to update zero to all rows depending on how many rows match the
|
|||||||
];
|
];
|
||||||
const tbl = await db.createTable("my_table", data)
|
const tbl = await db.createTable("my_table", data)
|
||||||
|
|
||||||
await tbl.update({
|
await tbl.update({ where: "x = 2", values: {vector: [10, 10]} })
|
||||||
where: "x = 2",
|
|
||||||
values: { vector: [10, 10] }
|
|
||||||
});
|
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Updating using a sql query
|
#### Updating using a sql query
|
||||||
|
|||||||
@@ -1,183 +0,0 @@
|
|||||||
### genkitx-lancedb
|
|
||||||
This is a lancedb plugin for genkit framework. It allows you to use LanceDB for ingesting and rereiving data using genkit framework.
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
### Installation
|
|
||||||
```bash
|
|
||||||
pnpm install genkitx-lancedb
|
|
||||||
```
|
|
||||||
|
|
||||||
### Usage
|
|
||||||
|
|
||||||
Adding LanceDB plugin to your genkit instance.
|
|
||||||
|
|
||||||
```ts
|
|
||||||
import { lancedbIndexerRef, lancedb, lancedbRetrieverRef, WriteMode } from 'genkitx-lancedb';
|
|
||||||
import { textEmbedding004, vertexAI } from '@genkit-ai/vertexai';
|
|
||||||
import { gemini } from '@genkit-ai/vertexai';
|
|
||||||
import { z, genkit } from 'genkit';
|
|
||||||
import { Document } from 'genkit/retriever';
|
|
||||||
import { chunk } from 'llm-chunk';
|
|
||||||
import { readFile } from 'fs/promises';
|
|
||||||
import path from 'path';
|
|
||||||
import pdf from 'pdf-parse/lib/pdf-parse';
|
|
||||||
|
|
||||||
const ai = genkit({
|
|
||||||
plugins: [
|
|
||||||
// vertexAI provides the textEmbedding004 embedder
|
|
||||||
vertexAI(),
|
|
||||||
|
|
||||||
// the local vector store requires an embedder to translate from text to vector
|
|
||||||
lancedb([
|
|
||||||
{
|
|
||||||
dbUri: '.db', // optional lancedb uri, default to .db
|
|
||||||
tableName: 'table', // optional table name, default to table
|
|
||||||
embedder: textEmbedding004,
|
|
||||||
},
|
|
||||||
]),
|
|
||||||
],
|
|
||||||
});
|
|
||||||
```
|
|
||||||
|
|
||||||
You can run this app with the following command:
|
|
||||||
```bash
|
|
||||||
genkit start -- tsx --watch src/index.ts
|
|
||||||
```
|
|
||||||
|
|
||||||
This'll add LanceDB as a retriever and indexer to the genkit instance. You can see it in the GUI view
|
|
||||||
<img width="1710" alt="Screenshot 2025-05-11 at 7 21 05 PM" src="https://github.com/user-attachments/assets/e752f7f4-785b-4797-a11e-72ab06a531b7" />
|
|
||||||
|
|
||||||
**Testing retrieval on a sample table**
|
|
||||||
Let's see the raw retrieval results
|
|
||||||
|
|
||||||
<img width="1710" alt="Screenshot 2025-05-11 at 7 21 05 PM" src="https://github.com/user-attachments/assets/b8d356ed-8421-4790-8fc0-d6af563b9657" />
|
|
||||||
On running this query, you'll 5 results fetched from the lancedb table, where each result looks something like this:
|
|
||||||
<img width="1417" alt="Screenshot 2025-05-11 at 7 21 18 PM" src="https://github.com/user-attachments/assets/77429525-36e2-4da6-a694-e58c1cf9eb83" />
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## Creating a custom RAG flow
|
|
||||||
|
|
||||||
Now that we've seen how you can use LanceDB for in a genkit pipeline, let's refine the flow and create a RAG. A RAG flow will consist of an index and a retreiver with its outputs postprocessed an fed into an LLM for final response
|
|
||||||
|
|
||||||
### Creating custom indexer flows
|
|
||||||
You can also create custom indexer flows, utilizing more options and features provided by LanceDB.
|
|
||||||
|
|
||||||
```ts
|
|
||||||
export const menuPdfIndexer = lancedbIndexerRef({
|
|
||||||
// Using all defaults, for dbUri, tableName, and embedder, etc
|
|
||||||
});
|
|
||||||
|
|
||||||
const chunkingConfig = {
|
|
||||||
minLength: 1000,
|
|
||||||
maxLength: 2000,
|
|
||||||
splitter: 'sentence',
|
|
||||||
overlap: 100,
|
|
||||||
delimiters: '',
|
|
||||||
} as any;
|
|
||||||
|
|
||||||
|
|
||||||
async function extractTextFromPdf(filePath: string) {
|
|
||||||
const pdfFile = path.resolve(filePath);
|
|
||||||
const dataBuffer = await readFile(pdfFile);
|
|
||||||
const data = await pdf(dataBuffer);
|
|
||||||
return data.text;
|
|
||||||
}
|
|
||||||
|
|
||||||
export const indexMenu = ai.defineFlow(
|
|
||||||
{
|
|
||||||
name: 'indexMenu',
|
|
||||||
inputSchema: z.string().describe('PDF file path'),
|
|
||||||
outputSchema: z.void(),
|
|
||||||
},
|
|
||||||
async (filePath: string) => {
|
|
||||||
filePath = path.resolve(filePath);
|
|
||||||
|
|
||||||
// Read the pdf.
|
|
||||||
const pdfTxt = await ai.run('extract-text', () =>
|
|
||||||
extractTextFromPdf(filePath)
|
|
||||||
);
|
|
||||||
|
|
||||||
// Divide the pdf text into segments.
|
|
||||||
const chunks = await ai.run('chunk-it', async () =>
|
|
||||||
chunk(pdfTxt, chunkingConfig)
|
|
||||||
);
|
|
||||||
|
|
||||||
// Convert chunks of text into documents to store in the index.
|
|
||||||
const documents = chunks.map((text) => {
|
|
||||||
return Document.fromText(text, { filePath });
|
|
||||||
});
|
|
||||||
|
|
||||||
// Add documents to the index.
|
|
||||||
await ai.index({
|
|
||||||
indexer: menuPdfIndexer,
|
|
||||||
documents,
|
|
||||||
options: {
|
|
||||||
writeMode: WriteMode.Overwrite,
|
|
||||||
} as any
|
|
||||||
});
|
|
||||||
}
|
|
||||||
);
|
|
||||||
```
|
|
||||||
|
|
||||||
<img width="1316" alt="Screenshot 2025-05-11 at 8 35 56 PM" src="https://github.com/user-attachments/assets/e2a20ce4-d1d0-4fa2-9a84-f2cc26e3a29f" />
|
|
||||||
|
|
||||||
In your console, you can see the logs
|
|
||||||
|
|
||||||
<img width="511" alt="Screenshot 2025-05-11 at 7 19 14 PM" src="https://github.com/user-attachments/assets/243f26c5-ed38-40b6-b661-002f40f0423a" />
|
|
||||||
|
|
||||||
### Creating custom retriever flows
|
|
||||||
You can also create custom retriever flows, utilizing more options and features provided by LanceDB.
|
|
||||||
```ts
|
|
||||||
export const menuRetriever = lancedbRetrieverRef({
|
|
||||||
tableName: "table", // Use the same table name as the indexer.
|
|
||||||
displayName: "Menu", // Use a custom display name.
|
|
||||||
|
|
||||||
export const menuQAFlow = ai.defineFlow(
|
|
||||||
{ name: "Menu", inputSchema: z.string(), outputSchema: z.string() },
|
|
||||||
async (input: string) => {
|
|
||||||
// retrieve relevant documents
|
|
||||||
const docs = await ai.retrieve({
|
|
||||||
retriever: menuRetriever,
|
|
||||||
query: input,
|
|
||||||
options: {
|
|
||||||
k: 3,
|
|
||||||
},
|
|
||||||
});
|
|
||||||
|
|
||||||
const extractedContent = docs.map(doc => {
|
|
||||||
if (doc.content && Array.isArray(doc.content) && doc.content.length > 0) {
|
|
||||||
if (doc.content[0].media && doc.content[0].media.url) {
|
|
||||||
return doc.content[0].media.url;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return "No content found";
|
|
||||||
});
|
|
||||||
|
|
||||||
console.log("Extracted content:", extractedContent);
|
|
||||||
|
|
||||||
const { text } = await ai.generate({
|
|
||||||
model: gemini('gemini-2.0-flash'),
|
|
||||||
prompt: `
|
|
||||||
You are acting as a helpful AI assistant that can answer
|
|
||||||
questions about the food available on the menu at Genkit Grub Pub.
|
|
||||||
|
|
||||||
Use only the context provided to answer the question.
|
|
||||||
If you don't know, do not make up an answer.
|
|
||||||
Do not add or change items on the menu.
|
|
||||||
|
|
||||||
Context:
|
|
||||||
${extractedContent.join('\n\n')}
|
|
||||||
|
|
||||||
Question: ${input}`,
|
|
||||||
docs,
|
|
||||||
});
|
|
||||||
|
|
||||||
return text;
|
|
||||||
}
|
|
||||||
);
|
|
||||||
```
|
|
||||||
Now using our retrieval flow, we can ask question about the ingsted PDF
|
|
||||||
<img width="1306" alt="Screenshot 2025-05-11 at 7 18 45 PM" src="https://github.com/user-attachments/assets/86c66b13-7c12-4d5f-9d81-ae36bfb1c346" />
|
|
||||||
|
|
||||||
@@ -1,67 +0,0 @@
|
|||||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
[@lancedb/lancedb](../globals.md) / BoostQuery
|
|
||||||
|
|
||||||
# Class: BoostQuery
|
|
||||||
|
|
||||||
Represents a full-text query interface.
|
|
||||||
This interface defines the structure and behavior for full-text queries,
|
|
||||||
including methods to retrieve the query type and convert the query to a dictionary format.
|
|
||||||
|
|
||||||
## Implements
|
|
||||||
|
|
||||||
- [`FullTextQuery`](../interfaces/FullTextQuery.md)
|
|
||||||
|
|
||||||
## Constructors
|
|
||||||
|
|
||||||
### new BoostQuery()
|
|
||||||
|
|
||||||
```ts
|
|
||||||
new BoostQuery(
|
|
||||||
positive,
|
|
||||||
negative,
|
|
||||||
options?): BoostQuery
|
|
||||||
```
|
|
||||||
|
|
||||||
Creates an instance of BoostQuery.
|
|
||||||
The boost returns documents that match the positive query,
|
|
||||||
but penalizes those that match the negative query.
|
|
||||||
the penalty is controlled by the `negativeBoost` parameter.
|
|
||||||
|
|
||||||
#### Parameters
|
|
||||||
|
|
||||||
* **positive**: [`FullTextQuery`](../interfaces/FullTextQuery.md)
|
|
||||||
The positive query that boosts the relevance score.
|
|
||||||
|
|
||||||
* **negative**: [`FullTextQuery`](../interfaces/FullTextQuery.md)
|
|
||||||
The negative query that reduces the relevance score.
|
|
||||||
|
|
||||||
* **options?**
|
|
||||||
Optional parameters for the boost query.
|
|
||||||
- `negativeBoost`: The boost factor for the negative query (default is 0.0).
|
|
||||||
|
|
||||||
* **options.negativeBoost?**: `number`
|
|
||||||
|
|
||||||
#### Returns
|
|
||||||
|
|
||||||
[`BoostQuery`](BoostQuery.md)
|
|
||||||
|
|
||||||
## Methods
|
|
||||||
|
|
||||||
### queryType()
|
|
||||||
|
|
||||||
```ts
|
|
||||||
queryType(): FullTextQueryType
|
|
||||||
```
|
|
||||||
|
|
||||||
The type of the full-text query.
|
|
||||||
|
|
||||||
#### Returns
|
|
||||||
|
|
||||||
[`FullTextQueryType`](../enumerations/FullTextQueryType.md)
|
|
||||||
|
|
||||||
#### Implementation of
|
|
||||||
|
|
||||||
[`FullTextQuery`](../interfaces/FullTextQuery.md).[`queryType`](../interfaces/FullTextQuery.md#querytype)
|
|
||||||
@@ -1,70 +0,0 @@
|
|||||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
[@lancedb/lancedb](../globals.md) / MatchQuery
|
|
||||||
|
|
||||||
# Class: MatchQuery
|
|
||||||
|
|
||||||
Represents a full-text query interface.
|
|
||||||
This interface defines the structure and behavior for full-text queries,
|
|
||||||
including methods to retrieve the query type and convert the query to a dictionary format.
|
|
||||||
|
|
||||||
## Implements
|
|
||||||
|
|
||||||
- [`FullTextQuery`](../interfaces/FullTextQuery.md)
|
|
||||||
|
|
||||||
## Constructors
|
|
||||||
|
|
||||||
### new MatchQuery()
|
|
||||||
|
|
||||||
```ts
|
|
||||||
new MatchQuery(
|
|
||||||
query,
|
|
||||||
column,
|
|
||||||
options?): MatchQuery
|
|
||||||
```
|
|
||||||
|
|
||||||
Creates an instance of MatchQuery.
|
|
||||||
|
|
||||||
#### Parameters
|
|
||||||
|
|
||||||
* **query**: `string`
|
|
||||||
The text query to search for.
|
|
||||||
|
|
||||||
* **column**: `string`
|
|
||||||
The name of the column to search within.
|
|
||||||
|
|
||||||
* **options?**
|
|
||||||
Optional parameters for the match query.
|
|
||||||
- `boost`: The boost factor for the query (default is 1.0).
|
|
||||||
- `fuzziness`: The fuzziness level for the query (default is 0).
|
|
||||||
- `maxExpansions`: The maximum number of terms to consider for fuzzy matching (default is 50).
|
|
||||||
|
|
||||||
* **options.boost?**: `number`
|
|
||||||
|
|
||||||
* **options.fuzziness?**: `number`
|
|
||||||
|
|
||||||
* **options.maxExpansions?**: `number`
|
|
||||||
|
|
||||||
#### Returns
|
|
||||||
|
|
||||||
[`MatchQuery`](MatchQuery.md)
|
|
||||||
|
|
||||||
## Methods
|
|
||||||
|
|
||||||
### queryType()
|
|
||||||
|
|
||||||
```ts
|
|
||||||
queryType(): FullTextQueryType
|
|
||||||
```
|
|
||||||
|
|
||||||
The type of the full-text query.
|
|
||||||
|
|
||||||
#### Returns
|
|
||||||
|
|
||||||
[`FullTextQueryType`](../enumerations/FullTextQueryType.md)
|
|
||||||
|
|
||||||
#### Implementation of
|
|
||||||
|
|
||||||
[`FullTextQuery`](../interfaces/FullTextQuery.md).[`queryType`](../interfaces/FullTextQuery.md#querytype)
|
|
||||||
@@ -33,22 +33,20 @@ Construct a MergeInsertBuilder. __Internal use only.__
|
|||||||
### execute()
|
### execute()
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
execute(data, execOptions?): Promise<MergeResult>
|
execute(data): Promise<void>
|
||||||
```
|
```
|
||||||
|
|
||||||
Executes the merge insert operation
|
Executes the merge insert operation
|
||||||
|
|
||||||
|
Nothing is returned but the `Table` is updated
|
||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
* **data**: [`Data`](../type-aliases/Data.md)
|
* **data**: [`Data`](../type-aliases/Data.md)
|
||||||
|
|
||||||
* **execOptions?**: `Partial`<[`WriteExecutionOptions`](../interfaces/WriteExecutionOptions.md)>
|
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
`Promise`<[`MergeResult`](../interfaces/MergeResult.md)>
|
`Promise`<`void`>
|
||||||
|
|
||||||
the merge result
|
|
||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
|
|||||||
@@ -1,64 +0,0 @@
|
|||||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
[@lancedb/lancedb](../globals.md) / MultiMatchQuery
|
|
||||||
|
|
||||||
# Class: MultiMatchQuery
|
|
||||||
|
|
||||||
Represents a full-text query interface.
|
|
||||||
This interface defines the structure and behavior for full-text queries,
|
|
||||||
including methods to retrieve the query type and convert the query to a dictionary format.
|
|
||||||
|
|
||||||
## Implements
|
|
||||||
|
|
||||||
- [`FullTextQuery`](../interfaces/FullTextQuery.md)
|
|
||||||
|
|
||||||
## Constructors
|
|
||||||
|
|
||||||
### new MultiMatchQuery()
|
|
||||||
|
|
||||||
```ts
|
|
||||||
new MultiMatchQuery(
|
|
||||||
query,
|
|
||||||
columns,
|
|
||||||
options?): MultiMatchQuery
|
|
||||||
```
|
|
||||||
|
|
||||||
Creates an instance of MultiMatchQuery.
|
|
||||||
|
|
||||||
#### Parameters
|
|
||||||
|
|
||||||
* **query**: `string`
|
|
||||||
The text query to search for across multiple columns.
|
|
||||||
|
|
||||||
* **columns**: `string`[]
|
|
||||||
An array of column names to search within.
|
|
||||||
|
|
||||||
* **options?**
|
|
||||||
Optional parameters for the multi-match query.
|
|
||||||
- `boosts`: An array of boost factors for each column (default is 1.0 for all).
|
|
||||||
|
|
||||||
* **options.boosts?**: `number`[]
|
|
||||||
|
|
||||||
#### Returns
|
|
||||||
|
|
||||||
[`MultiMatchQuery`](MultiMatchQuery.md)
|
|
||||||
|
|
||||||
## Methods
|
|
||||||
|
|
||||||
### queryType()
|
|
||||||
|
|
||||||
```ts
|
|
||||||
queryType(): FullTextQueryType
|
|
||||||
```
|
|
||||||
|
|
||||||
The type of the full-text query.
|
|
||||||
|
|
||||||
#### Returns
|
|
||||||
|
|
||||||
[`FullTextQueryType`](../enumerations/FullTextQueryType.md)
|
|
||||||
|
|
||||||
#### Implementation of
|
|
||||||
|
|
||||||
[`FullTextQuery`](../interfaces/FullTextQuery.md).[`queryType`](../interfaces/FullTextQuery.md#querytype)
|
|
||||||
@@ -1,55 +0,0 @@
|
|||||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
[@lancedb/lancedb](../globals.md) / PhraseQuery
|
|
||||||
|
|
||||||
# Class: PhraseQuery
|
|
||||||
|
|
||||||
Represents a full-text query interface.
|
|
||||||
This interface defines the structure and behavior for full-text queries,
|
|
||||||
including methods to retrieve the query type and convert the query to a dictionary format.
|
|
||||||
|
|
||||||
## Implements
|
|
||||||
|
|
||||||
- [`FullTextQuery`](../interfaces/FullTextQuery.md)
|
|
||||||
|
|
||||||
## Constructors
|
|
||||||
|
|
||||||
### new PhraseQuery()
|
|
||||||
|
|
||||||
```ts
|
|
||||||
new PhraseQuery(query, column): PhraseQuery
|
|
||||||
```
|
|
||||||
|
|
||||||
Creates an instance of `PhraseQuery`.
|
|
||||||
|
|
||||||
#### Parameters
|
|
||||||
|
|
||||||
* **query**: `string`
|
|
||||||
The phrase to search for in the specified column.
|
|
||||||
|
|
||||||
* **column**: `string`
|
|
||||||
The name of the column to search within.
|
|
||||||
|
|
||||||
#### Returns
|
|
||||||
|
|
||||||
[`PhraseQuery`](PhraseQuery.md)
|
|
||||||
|
|
||||||
## Methods
|
|
||||||
|
|
||||||
### queryType()
|
|
||||||
|
|
||||||
```ts
|
|
||||||
queryType(): FullTextQueryType
|
|
||||||
```
|
|
||||||
|
|
||||||
The type of the full-text query.
|
|
||||||
|
|
||||||
#### Returns
|
|
||||||
|
|
||||||
[`FullTextQueryType`](../enumerations/FullTextQueryType.md)
|
|
||||||
|
|
||||||
#### Implementation of
|
|
||||||
|
|
||||||
[`FullTextQuery`](../interfaces/FullTextQuery.md).[`queryType`](../interfaces/FullTextQuery.md#querytype)
|
|
||||||
@@ -30,53 +30,6 @@ protected inner: Query | Promise<Query>;
|
|||||||
|
|
||||||
## Methods
|
## Methods
|
||||||
|
|
||||||
### analyzePlan()
|
|
||||||
|
|
||||||
```ts
|
|
||||||
analyzePlan(): Promise<string>
|
|
||||||
```
|
|
||||||
|
|
||||||
Executes the query and returns the physical query plan annotated with runtime metrics.
|
|
||||||
|
|
||||||
This is useful for debugging and performance analysis, as it shows how the query was executed
|
|
||||||
and includes metrics such as elapsed time, rows processed, and I/O statistics.
|
|
||||||
|
|
||||||
#### Returns
|
|
||||||
|
|
||||||
`Promise`<`string`>
|
|
||||||
|
|
||||||
A query execution plan with runtime metrics for each step.
|
|
||||||
|
|
||||||
#### Example
|
|
||||||
|
|
||||||
```ts
|
|
||||||
import * as lancedb from "@lancedb/lancedb"
|
|
||||||
|
|
||||||
const db = await lancedb.connect("./.lancedb");
|
|
||||||
const table = await db.createTable("my_table", [
|
|
||||||
{ vector: [1.1, 0.9], id: "1" },
|
|
||||||
]);
|
|
||||||
|
|
||||||
const plan = await table.query().nearestTo([0.5, 0.2]).analyzePlan();
|
|
||||||
|
|
||||||
Example output (with runtime metrics inlined):
|
|
||||||
AnalyzeExec verbose=true, metrics=[]
|
|
||||||
ProjectionExec: expr=[id@3 as id, vector@0 as vector, _distance@2 as _distance], metrics=[output_rows=1, elapsed_compute=3.292µs]
|
|
||||||
Take: columns="vector, _rowid, _distance, (id)", metrics=[output_rows=1, elapsed_compute=66.001µs, batches_processed=1, bytes_read=8, iops=1, requests=1]
|
|
||||||
CoalesceBatchesExec: target_batch_size=1024, metrics=[output_rows=1, elapsed_compute=3.333µs]
|
|
||||||
GlobalLimitExec: skip=0, fetch=10, metrics=[output_rows=1, elapsed_compute=167ns]
|
|
||||||
FilterExec: _distance@2 IS NOT NULL, metrics=[output_rows=1, elapsed_compute=8.542µs]
|
|
||||||
SortExec: TopK(fetch=10), expr=[_distance@2 ASC NULLS LAST], metrics=[output_rows=1, elapsed_compute=63.25µs, row_replacements=1]
|
|
||||||
KNNVectorDistance: metric=l2, metrics=[output_rows=1, elapsed_compute=114.333µs, output_batches=1]
|
|
||||||
LanceScan: uri=/path/to/data, projection=[vector], row_id=true, row_addr=false, ordered=false, metrics=[output_rows=1, elapsed_compute=103.626µs, bytes_read=549, iops=2, requests=2]
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Inherited from
|
|
||||||
|
|
||||||
[`QueryBase`](QueryBase.md).[`analyzePlan`](QueryBase.md#analyzeplan)
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
### execute()
|
### execute()
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
@@ -206,7 +159,7 @@ fullTextSearch(query, options?): this
|
|||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
* **query**: `string` \| [`FullTextQuery`](../interfaces/FullTextQuery.md)
|
* **query**: `string`
|
||||||
|
|
||||||
* **options?**: `Partial`<[`FullTextSearchOptions`](../interfaces/FullTextSearchOptions.md)>
|
* **options?**: `Partial`<[`FullTextSearchOptions`](../interfaces/FullTextSearchOptions.md)>
|
||||||
|
|
||||||
@@ -309,7 +262,7 @@ nearestToText(query, columns?): Query
|
|||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
* **query**: `string` \| [`FullTextQuery`](../interfaces/FullTextQuery.md)
|
* **query**: `string`
|
||||||
|
|
||||||
* **columns?**: `string`[]
|
* **columns?**: `string`[]
|
||||||
|
|
||||||
|
|||||||
@@ -36,49 +36,6 @@ protected inner: NativeQueryType | Promise<NativeQueryType>;
|
|||||||
|
|
||||||
## Methods
|
## Methods
|
||||||
|
|
||||||
### analyzePlan()
|
|
||||||
|
|
||||||
```ts
|
|
||||||
analyzePlan(): Promise<string>
|
|
||||||
```
|
|
||||||
|
|
||||||
Executes the query and returns the physical query plan annotated with runtime metrics.
|
|
||||||
|
|
||||||
This is useful for debugging and performance analysis, as it shows how the query was executed
|
|
||||||
and includes metrics such as elapsed time, rows processed, and I/O statistics.
|
|
||||||
|
|
||||||
#### Returns
|
|
||||||
|
|
||||||
`Promise`<`string`>
|
|
||||||
|
|
||||||
A query execution plan with runtime metrics for each step.
|
|
||||||
|
|
||||||
#### Example
|
|
||||||
|
|
||||||
```ts
|
|
||||||
import * as lancedb from "@lancedb/lancedb"
|
|
||||||
|
|
||||||
const db = await lancedb.connect("./.lancedb");
|
|
||||||
const table = await db.createTable("my_table", [
|
|
||||||
{ vector: [1.1, 0.9], id: "1" },
|
|
||||||
]);
|
|
||||||
|
|
||||||
const plan = await table.query().nearestTo([0.5, 0.2]).analyzePlan();
|
|
||||||
|
|
||||||
Example output (with runtime metrics inlined):
|
|
||||||
AnalyzeExec verbose=true, metrics=[]
|
|
||||||
ProjectionExec: expr=[id@3 as id, vector@0 as vector, _distance@2 as _distance], metrics=[output_rows=1, elapsed_compute=3.292µs]
|
|
||||||
Take: columns="vector, _rowid, _distance, (id)", metrics=[output_rows=1, elapsed_compute=66.001µs, batches_processed=1, bytes_read=8, iops=1, requests=1]
|
|
||||||
CoalesceBatchesExec: target_batch_size=1024, metrics=[output_rows=1, elapsed_compute=3.333µs]
|
|
||||||
GlobalLimitExec: skip=0, fetch=10, metrics=[output_rows=1, elapsed_compute=167ns]
|
|
||||||
FilterExec: _distance@2 IS NOT NULL, metrics=[output_rows=1, elapsed_compute=8.542µs]
|
|
||||||
SortExec: TopK(fetch=10), expr=[_distance@2 ASC NULLS LAST], metrics=[output_rows=1, elapsed_compute=63.25µs, row_replacements=1]
|
|
||||||
KNNVectorDistance: metric=l2, metrics=[output_rows=1, elapsed_compute=114.333µs, output_batches=1]
|
|
||||||
LanceScan: uri=/path/to/data, projection=[vector], row_id=true, row_addr=false, ordered=false, metrics=[output_rows=1, elapsed_compute=103.626µs, bytes_read=549, iops=2, requests=2]
|
|
||||||
```
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
### execute()
|
### execute()
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
@@ -192,7 +149,7 @@ fullTextSearch(query, options?): this
|
|||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
* **query**: `string` \| [`FullTextQuery`](../interfaces/FullTextQuery.md)
|
* **query**: `string`
|
||||||
|
|
||||||
* **options?**: `Partial`<[`FullTextSearchOptions`](../interfaces/FullTextSearchOptions.md)>
|
* **options?**: `Partial`<[`FullTextSearchOptions`](../interfaces/FullTextSearchOptions.md)>
|
||||||
|
|
||||||
|
|||||||
@@ -40,7 +40,7 @@ Returns the name of the table
|
|||||||
### add()
|
### add()
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
abstract add(data, options?): Promise<AddResult>
|
abstract add(data, options?): Promise<void>
|
||||||
```
|
```
|
||||||
|
|
||||||
Insert records into this Table.
|
Insert records into this Table.
|
||||||
@@ -54,17 +54,14 @@ Insert records into this Table.
|
|||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
`Promise`<[`AddResult`](../interfaces/AddResult.md)>
|
`Promise`<`void`>
|
||||||
|
|
||||||
A promise that resolves to an object
|
|
||||||
containing the new version number of the table
|
|
||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
### addColumns()
|
### addColumns()
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
abstract addColumns(newColumnTransforms): Promise<AddColumnsResult>
|
abstract addColumns(newColumnTransforms): Promise<void>
|
||||||
```
|
```
|
||||||
|
|
||||||
Add new columns with defined values.
|
Add new columns with defined values.
|
||||||
@@ -79,17 +76,14 @@ Add new columns with defined values.
|
|||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
`Promise`<[`AddColumnsResult`](../interfaces/AddColumnsResult.md)>
|
`Promise`<`void`>
|
||||||
|
|
||||||
A promise that resolves to an object
|
|
||||||
containing the new version number of the table after adding the columns.
|
|
||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
### alterColumns()
|
### alterColumns()
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
abstract alterColumns(columnAlterations): Promise<AlterColumnsResult>
|
abstract alterColumns(columnAlterations): Promise<void>
|
||||||
```
|
```
|
||||||
|
|
||||||
Alter the name or nullability of columns.
|
Alter the name or nullability of columns.
|
||||||
@@ -102,10 +96,7 @@ Alter the name or nullability of columns.
|
|||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
`Promise`<[`AlterColumnsResult`](../interfaces/AlterColumnsResult.md)>
|
`Promise`<`void`>
|
||||||
|
|
||||||
A promise that resolves to an object
|
|
||||||
containing the new version number of the table after altering the columns.
|
|
||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
@@ -126,8 +117,8 @@ wish to return to standard mode, call `checkoutLatest`.
|
|||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
* **version**: `string` \| `number`
|
* **version**: `number`
|
||||||
The version to checkout, could be version number or tag
|
The version to checkout
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -261,7 +252,7 @@ await table.createIndex("my_float_col");
|
|||||||
### delete()
|
### delete()
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
abstract delete(predicate): Promise<DeleteResult>
|
abstract delete(predicate): Promise<void>
|
||||||
```
|
```
|
||||||
|
|
||||||
Delete the rows that satisfy the predicate.
|
Delete the rows that satisfy the predicate.
|
||||||
@@ -272,10 +263,7 @@ Delete the rows that satisfy the predicate.
|
|||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
`Promise`<[`DeleteResult`](../interfaces/DeleteResult.md)>
|
`Promise`<`void`>
|
||||||
|
|
||||||
A promise that resolves to an object
|
|
||||||
containing the new version number of the table
|
|
||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
@@ -296,7 +284,7 @@ Return a brief description of the table
|
|||||||
### dropColumns()
|
### dropColumns()
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
abstract dropColumns(columnNames): Promise<DropColumnsResult>
|
abstract dropColumns(columnNames): Promise<void>
|
||||||
```
|
```
|
||||||
|
|
||||||
Drop one or more columns from the dataset
|
Drop one or more columns from the dataset
|
||||||
@@ -315,10 +303,7 @@ then call ``cleanup_files`` to remove the old files.
|
|||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
`Promise`<[`DropColumnsResult`](../interfaces/DropColumnsResult.md)>
|
`Promise`<`void`>
|
||||||
|
|
||||||
A promise that resolves to an object
|
|
||||||
containing the new version number of the table after dropping the columns.
|
|
||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
@@ -469,28 +454,6 @@ Modeled after ``VACUUM`` in PostgreSQL.
|
|||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
### prewarmIndex()
|
|
||||||
|
|
||||||
```ts
|
|
||||||
abstract prewarmIndex(name): Promise<void>
|
|
||||||
```
|
|
||||||
|
|
||||||
Prewarm an index in the table.
|
|
||||||
|
|
||||||
#### Parameters
|
|
||||||
|
|
||||||
* **name**: `string`
|
|
||||||
The name of the index.
|
|
||||||
This will load the index into memory. This may reduce the cold-start time for
|
|
||||||
future queries. If the index does not fit in the cache then this call may be
|
|
||||||
wasteful.
|
|
||||||
|
|
||||||
#### Returns
|
|
||||||
|
|
||||||
`Promise`<`void`>
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
### query()
|
### query()
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
@@ -612,7 +575,7 @@ of the given query
|
|||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
* **query**: `string` \| [`IntoVector`](../type-aliases/IntoVector.md) \| [`FullTextQuery`](../interfaces/FullTextQuery.md)
|
* **query**: `string` \| [`IntoVector`](../type-aliases/IntoVector.md)
|
||||||
the query, a vector or string
|
the query, a vector or string
|
||||||
|
|
||||||
* **queryType?**: `string`
|
* **queryType?**: `string`
|
||||||
@@ -630,50 +593,6 @@ of the given query
|
|||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
### stats()
|
|
||||||
|
|
||||||
```ts
|
|
||||||
abstract stats(): Promise<TableStatistics>
|
|
||||||
```
|
|
||||||
|
|
||||||
Returns table and fragment statistics
|
|
||||||
|
|
||||||
#### Returns
|
|
||||||
|
|
||||||
`Promise`<[`TableStatistics`](../interfaces/TableStatistics.md)>
|
|
||||||
|
|
||||||
The table and fragment statistics
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
### tags()
|
|
||||||
|
|
||||||
```ts
|
|
||||||
abstract tags(): Promise<Tags>
|
|
||||||
```
|
|
||||||
|
|
||||||
Get a tags manager for this table.
|
|
||||||
|
|
||||||
Tags allow you to label specific versions of a table with a human-readable name.
|
|
||||||
The returned tags manager can be used to list, create, update, or delete tags.
|
|
||||||
|
|
||||||
#### Returns
|
|
||||||
|
|
||||||
`Promise`<[`Tags`](Tags.md)>
|
|
||||||
|
|
||||||
A tags manager for this table
|
|
||||||
|
|
||||||
#### Example
|
|
||||||
|
|
||||||
```typescript
|
|
||||||
const tagsManager = await table.tags();
|
|
||||||
await tagsManager.create("v1", 1);
|
|
||||||
const tags = await tagsManager.list();
|
|
||||||
console.log(tags); // { "v1": { version: 1, manifestSize: ... } }
|
|
||||||
```
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
### toArrow()
|
### toArrow()
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
@@ -693,7 +612,7 @@ Return the table as an arrow table
|
|||||||
#### update(opts)
|
#### update(opts)
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
abstract update(opts): Promise<UpdateResult>
|
abstract update(opts): Promise<void>
|
||||||
```
|
```
|
||||||
|
|
||||||
Update existing records in the Table
|
Update existing records in the Table
|
||||||
@@ -704,10 +623,7 @@ Update existing records in the Table
|
|||||||
|
|
||||||
##### Returns
|
##### Returns
|
||||||
|
|
||||||
`Promise`<[`UpdateResult`](../interfaces/UpdateResult.md)>
|
`Promise`<`void`>
|
||||||
|
|
||||||
A promise that resolves to an object containing
|
|
||||||
the number of rows updated and the new version number
|
|
||||||
|
|
||||||
##### Example
|
##### Example
|
||||||
|
|
||||||
@@ -718,7 +634,7 @@ table.update({where:"x = 2", values:{"vector": [10, 10]}})
|
|||||||
#### update(opts)
|
#### update(opts)
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
abstract update(opts): Promise<UpdateResult>
|
abstract update(opts): Promise<void>
|
||||||
```
|
```
|
||||||
|
|
||||||
Update existing records in the Table
|
Update existing records in the Table
|
||||||
@@ -729,10 +645,7 @@ Update existing records in the Table
|
|||||||
|
|
||||||
##### Returns
|
##### Returns
|
||||||
|
|
||||||
`Promise`<[`UpdateResult`](../interfaces/UpdateResult.md)>
|
`Promise`<`void`>
|
||||||
|
|
||||||
A promise that resolves to an object containing
|
|
||||||
the number of rows updated and the new version number
|
|
||||||
|
|
||||||
##### Example
|
##### Example
|
||||||
|
|
||||||
@@ -743,7 +656,7 @@ table.update({where:"x = 2", valuesSql:{"x": "x + 1"}})
|
|||||||
#### update(updates, options)
|
#### update(updates, options)
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
abstract update(updates, options?): Promise<UpdateResult>
|
abstract update(updates, options?): Promise<void>
|
||||||
```
|
```
|
||||||
|
|
||||||
Update existing records in the Table
|
Update existing records in the Table
|
||||||
@@ -766,6 +679,10 @@ repeatedly calilng this method.
|
|||||||
* **updates**: `Record`<`string`, `string`> \| `Map`<`string`, `string`>
|
* **updates**: `Record`<`string`, `string`> \| `Map`<`string`, `string`>
|
||||||
the
|
the
|
||||||
columns to update
|
columns to update
|
||||||
|
Keys in the map should specify the name of the column to update.
|
||||||
|
Values in the map provide the new value of the column. These can
|
||||||
|
be SQL literal strings (e.g. "7" or "'foo'") or they can be expressions
|
||||||
|
based on the row being updated (e.g. "my_col + 1")
|
||||||
|
|
||||||
* **options?**: `Partial`<[`UpdateOptions`](../interfaces/UpdateOptions.md)>
|
* **options?**: `Partial`<[`UpdateOptions`](../interfaces/UpdateOptions.md)>
|
||||||
additional options to control
|
additional options to control
|
||||||
@@ -773,15 +690,7 @@ repeatedly calilng this method.
|
|||||||
|
|
||||||
##### Returns
|
##### Returns
|
||||||
|
|
||||||
`Promise`<[`UpdateResult`](../interfaces/UpdateResult.md)>
|
`Promise`<`void`>
|
||||||
|
|
||||||
A promise that resolves to an object
|
|
||||||
containing the number of rows updated and the new version number
|
|
||||||
|
|
||||||
Keys in the map should specify the name of the column to update.
|
|
||||||
Values in the map provide the new value of the column. These can
|
|
||||||
be SQL literal strings (e.g. "7" or "'foo'") or they can be expressions
|
|
||||||
based on the row being updated (e.g. "my_col + 1")
|
|
||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
@@ -822,26 +731,3 @@ Retrieve the version of the table
|
|||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
`Promise`<`number`>
|
`Promise`<`number`>
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
### waitForIndex()
|
|
||||||
|
|
||||||
```ts
|
|
||||||
abstract waitForIndex(indexNames, timeoutSeconds): Promise<void>
|
|
||||||
```
|
|
||||||
|
|
||||||
Waits for asynchronous indexing to complete on the table.
|
|
||||||
|
|
||||||
#### Parameters
|
|
||||||
|
|
||||||
* **indexNames**: `string`[]
|
|
||||||
The name of the indices to wait for
|
|
||||||
|
|
||||||
* **timeoutSeconds**: `number`
|
|
||||||
The number of seconds to wait before timing out
|
|
||||||
This will raise an error if the indices are not created and fully indexed within the timeout.
|
|
||||||
|
|
||||||
#### Returns
|
|
||||||
|
|
||||||
`Promise`<`void`>
|
|
||||||
|
|||||||
@@ -1,35 +0,0 @@
|
|||||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
[@lancedb/lancedb](../globals.md) / TagContents
|
|
||||||
|
|
||||||
# Class: TagContents
|
|
||||||
|
|
||||||
## Constructors
|
|
||||||
|
|
||||||
### new TagContents()
|
|
||||||
|
|
||||||
```ts
|
|
||||||
new TagContents(): TagContents
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Returns
|
|
||||||
|
|
||||||
[`TagContents`](TagContents.md)
|
|
||||||
|
|
||||||
## Properties
|
|
||||||
|
|
||||||
### manifestSize
|
|
||||||
|
|
||||||
```ts
|
|
||||||
manifestSize: number;
|
|
||||||
```
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
### version
|
|
||||||
|
|
||||||
```ts
|
|
||||||
version: number;
|
|
||||||
```
|
|
||||||
@@ -1,99 +0,0 @@
|
|||||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
[@lancedb/lancedb](../globals.md) / Tags
|
|
||||||
|
|
||||||
# Class: Tags
|
|
||||||
|
|
||||||
## Constructors
|
|
||||||
|
|
||||||
### new Tags()
|
|
||||||
|
|
||||||
```ts
|
|
||||||
new Tags(): Tags
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Returns
|
|
||||||
|
|
||||||
[`Tags`](Tags.md)
|
|
||||||
|
|
||||||
## Methods
|
|
||||||
|
|
||||||
### create()
|
|
||||||
|
|
||||||
```ts
|
|
||||||
create(tag, version): Promise<void>
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Parameters
|
|
||||||
|
|
||||||
* **tag**: `string`
|
|
||||||
|
|
||||||
* **version**: `number`
|
|
||||||
|
|
||||||
#### Returns
|
|
||||||
|
|
||||||
`Promise`<`void`>
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
### delete()
|
|
||||||
|
|
||||||
```ts
|
|
||||||
delete(tag): Promise<void>
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Parameters
|
|
||||||
|
|
||||||
* **tag**: `string`
|
|
||||||
|
|
||||||
#### Returns
|
|
||||||
|
|
||||||
`Promise`<`void`>
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
### getVersion()
|
|
||||||
|
|
||||||
```ts
|
|
||||||
getVersion(tag): Promise<number>
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Parameters
|
|
||||||
|
|
||||||
* **tag**: `string`
|
|
||||||
|
|
||||||
#### Returns
|
|
||||||
|
|
||||||
`Promise`<`number`>
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
### list()
|
|
||||||
|
|
||||||
```ts
|
|
||||||
list(): Promise<Record<string, TagContents>>
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Returns
|
|
||||||
|
|
||||||
`Promise`<`Record`<`string`, [`TagContents`](TagContents.md)>>
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
### update()
|
|
||||||
|
|
||||||
```ts
|
|
||||||
update(tag, version): Promise<void>
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Parameters
|
|
||||||
|
|
||||||
* **tag**: `string`
|
|
||||||
|
|
||||||
* **version**: `number`
|
|
||||||
|
|
||||||
#### Returns
|
|
||||||
|
|
||||||
`Promise`<`void`>
|
|
||||||
@@ -48,53 +48,6 @@ addQueryVector(vector): VectorQuery
|
|||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
### analyzePlan()
|
|
||||||
|
|
||||||
```ts
|
|
||||||
analyzePlan(): Promise<string>
|
|
||||||
```
|
|
||||||
|
|
||||||
Executes the query and returns the physical query plan annotated with runtime metrics.
|
|
||||||
|
|
||||||
This is useful for debugging and performance analysis, as it shows how the query was executed
|
|
||||||
and includes metrics such as elapsed time, rows processed, and I/O statistics.
|
|
||||||
|
|
||||||
#### Returns
|
|
||||||
|
|
||||||
`Promise`<`string`>
|
|
||||||
|
|
||||||
A query execution plan with runtime metrics for each step.
|
|
||||||
|
|
||||||
#### Example
|
|
||||||
|
|
||||||
```ts
|
|
||||||
import * as lancedb from "@lancedb/lancedb"
|
|
||||||
|
|
||||||
const db = await lancedb.connect("./.lancedb");
|
|
||||||
const table = await db.createTable("my_table", [
|
|
||||||
{ vector: [1.1, 0.9], id: "1" },
|
|
||||||
]);
|
|
||||||
|
|
||||||
const plan = await table.query().nearestTo([0.5, 0.2]).analyzePlan();
|
|
||||||
|
|
||||||
Example output (with runtime metrics inlined):
|
|
||||||
AnalyzeExec verbose=true, metrics=[]
|
|
||||||
ProjectionExec: expr=[id@3 as id, vector@0 as vector, _distance@2 as _distance], metrics=[output_rows=1, elapsed_compute=3.292µs]
|
|
||||||
Take: columns="vector, _rowid, _distance, (id)", metrics=[output_rows=1, elapsed_compute=66.001µs, batches_processed=1, bytes_read=8, iops=1, requests=1]
|
|
||||||
CoalesceBatchesExec: target_batch_size=1024, metrics=[output_rows=1, elapsed_compute=3.333µs]
|
|
||||||
GlobalLimitExec: skip=0, fetch=10, metrics=[output_rows=1, elapsed_compute=167ns]
|
|
||||||
FilterExec: _distance@2 IS NOT NULL, metrics=[output_rows=1, elapsed_compute=8.542µs]
|
|
||||||
SortExec: TopK(fetch=10), expr=[_distance@2 ASC NULLS LAST], metrics=[output_rows=1, elapsed_compute=63.25µs, row_replacements=1]
|
|
||||||
KNNVectorDistance: metric=l2, metrics=[output_rows=1, elapsed_compute=114.333µs, output_batches=1]
|
|
||||||
LanceScan: uri=/path/to/data, projection=[vector], row_id=true, row_addr=false, ordered=false, metrics=[output_rows=1, elapsed_compute=103.626µs, bytes_read=549, iops=2, requests=2]
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Inherited from
|
|
||||||
|
|
||||||
[`QueryBase`](QueryBase.md).[`analyzePlan`](QueryBase.md#analyzeplan)
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
### bypassVectorIndex()
|
### bypassVectorIndex()
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
@@ -347,7 +300,7 @@ fullTextSearch(query, options?): this
|
|||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
* **query**: `string` \| [`FullTextQuery`](../interfaces/FullTextQuery.md)
|
* **query**: `string`
|
||||||
|
|
||||||
* **options?**: `Partial`<[`FullTextSearchOptions`](../interfaces/FullTextSearchOptions.md)>
|
* **options?**: `Partial`<[`FullTextSearchOptions`](../interfaces/FullTextSearchOptions.md)>
|
||||||
|
|
||||||
|
|||||||
@@ -1,46 +0,0 @@
|
|||||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
[@lancedb/lancedb](../globals.md) / FullTextQueryType
|
|
||||||
|
|
||||||
# Enumeration: FullTextQueryType
|
|
||||||
|
|
||||||
Enum representing the types of full-text queries supported.
|
|
||||||
|
|
||||||
- `Match`: Performs a full-text search for terms in the query string.
|
|
||||||
- `MatchPhrase`: Searches for an exact phrase match in the text.
|
|
||||||
- `Boost`: Boosts the relevance score of specific terms in the query.
|
|
||||||
- `MultiMatch`: Searches across multiple fields for the query terms.
|
|
||||||
|
|
||||||
## Enumeration Members
|
|
||||||
|
|
||||||
### Boost
|
|
||||||
|
|
||||||
```ts
|
|
||||||
Boost: "boost";
|
|
||||||
```
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
### Match
|
|
||||||
|
|
||||||
```ts
|
|
||||||
Match: "match";
|
|
||||||
```
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
### MatchPhrase
|
|
||||||
|
|
||||||
```ts
|
|
||||||
MatchPhrase: "match_phrase";
|
|
||||||
```
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
### MultiMatch
|
|
||||||
|
|
||||||
```ts
|
|
||||||
MultiMatch: "multi_match";
|
|
||||||
```
|
|
||||||
@@ -9,48 +9,30 @@
|
|||||||
- [embedding](namespaces/embedding/README.md)
|
- [embedding](namespaces/embedding/README.md)
|
||||||
- [rerankers](namespaces/rerankers/README.md)
|
- [rerankers](namespaces/rerankers/README.md)
|
||||||
|
|
||||||
## Enumerations
|
|
||||||
|
|
||||||
- [FullTextQueryType](enumerations/FullTextQueryType.md)
|
|
||||||
|
|
||||||
## Classes
|
## Classes
|
||||||
|
|
||||||
- [BoostQuery](classes/BoostQuery.md)
|
|
||||||
- [Connection](classes/Connection.md)
|
- [Connection](classes/Connection.md)
|
||||||
- [Index](classes/Index.md)
|
- [Index](classes/Index.md)
|
||||||
- [MakeArrowTableOptions](classes/MakeArrowTableOptions.md)
|
- [MakeArrowTableOptions](classes/MakeArrowTableOptions.md)
|
||||||
- [MatchQuery](classes/MatchQuery.md)
|
|
||||||
- [MergeInsertBuilder](classes/MergeInsertBuilder.md)
|
- [MergeInsertBuilder](classes/MergeInsertBuilder.md)
|
||||||
- [MultiMatchQuery](classes/MultiMatchQuery.md)
|
|
||||||
- [PhraseQuery](classes/PhraseQuery.md)
|
|
||||||
- [Query](classes/Query.md)
|
- [Query](classes/Query.md)
|
||||||
- [QueryBase](classes/QueryBase.md)
|
- [QueryBase](classes/QueryBase.md)
|
||||||
- [RecordBatchIterator](classes/RecordBatchIterator.md)
|
- [RecordBatchIterator](classes/RecordBatchIterator.md)
|
||||||
- [Table](classes/Table.md)
|
- [Table](classes/Table.md)
|
||||||
- [TagContents](classes/TagContents.md)
|
|
||||||
- [Tags](classes/Tags.md)
|
|
||||||
- [VectorColumnOptions](classes/VectorColumnOptions.md)
|
- [VectorColumnOptions](classes/VectorColumnOptions.md)
|
||||||
- [VectorQuery](classes/VectorQuery.md)
|
- [VectorQuery](classes/VectorQuery.md)
|
||||||
|
|
||||||
## Interfaces
|
## Interfaces
|
||||||
|
|
||||||
- [AddColumnsResult](interfaces/AddColumnsResult.md)
|
|
||||||
- [AddColumnsSql](interfaces/AddColumnsSql.md)
|
- [AddColumnsSql](interfaces/AddColumnsSql.md)
|
||||||
- [AddDataOptions](interfaces/AddDataOptions.md)
|
- [AddDataOptions](interfaces/AddDataOptions.md)
|
||||||
- [AddResult](interfaces/AddResult.md)
|
|
||||||
- [AlterColumnsResult](interfaces/AlterColumnsResult.md)
|
|
||||||
- [ClientConfig](interfaces/ClientConfig.md)
|
- [ClientConfig](interfaces/ClientConfig.md)
|
||||||
- [ColumnAlteration](interfaces/ColumnAlteration.md)
|
- [ColumnAlteration](interfaces/ColumnAlteration.md)
|
||||||
- [CompactionStats](interfaces/CompactionStats.md)
|
- [CompactionStats](interfaces/CompactionStats.md)
|
||||||
- [ConnectionOptions](interfaces/ConnectionOptions.md)
|
- [ConnectionOptions](interfaces/ConnectionOptions.md)
|
||||||
- [CreateTableOptions](interfaces/CreateTableOptions.md)
|
- [CreateTableOptions](interfaces/CreateTableOptions.md)
|
||||||
- [DeleteResult](interfaces/DeleteResult.md)
|
|
||||||
- [DropColumnsResult](interfaces/DropColumnsResult.md)
|
|
||||||
- [ExecutableQuery](interfaces/ExecutableQuery.md)
|
- [ExecutableQuery](interfaces/ExecutableQuery.md)
|
||||||
- [FragmentStatistics](interfaces/FragmentStatistics.md)
|
|
||||||
- [FragmentSummaryStats](interfaces/FragmentSummaryStats.md)
|
|
||||||
- [FtsOptions](interfaces/FtsOptions.md)
|
- [FtsOptions](interfaces/FtsOptions.md)
|
||||||
- [FullTextQuery](interfaces/FullTextQuery.md)
|
|
||||||
- [FullTextSearchOptions](interfaces/FullTextSearchOptions.md)
|
- [FullTextSearchOptions](interfaces/FullTextSearchOptions.md)
|
||||||
- [HnswPqOptions](interfaces/HnswPqOptions.md)
|
- [HnswPqOptions](interfaces/HnswPqOptions.md)
|
||||||
- [HnswSqOptions](interfaces/HnswSqOptions.md)
|
- [HnswSqOptions](interfaces/HnswSqOptions.md)
|
||||||
@@ -59,7 +41,6 @@
|
|||||||
- [IndexStatistics](interfaces/IndexStatistics.md)
|
- [IndexStatistics](interfaces/IndexStatistics.md)
|
||||||
- [IvfFlatOptions](interfaces/IvfFlatOptions.md)
|
- [IvfFlatOptions](interfaces/IvfFlatOptions.md)
|
||||||
- [IvfPqOptions](interfaces/IvfPqOptions.md)
|
- [IvfPqOptions](interfaces/IvfPqOptions.md)
|
||||||
- [MergeResult](interfaces/MergeResult.md)
|
|
||||||
- [OpenTableOptions](interfaces/OpenTableOptions.md)
|
- [OpenTableOptions](interfaces/OpenTableOptions.md)
|
||||||
- [OptimizeOptions](interfaces/OptimizeOptions.md)
|
- [OptimizeOptions](interfaces/OptimizeOptions.md)
|
||||||
- [OptimizeStats](interfaces/OptimizeStats.md)
|
- [OptimizeStats](interfaces/OptimizeStats.md)
|
||||||
@@ -67,12 +48,9 @@
|
|||||||
- [RemovalStats](interfaces/RemovalStats.md)
|
- [RemovalStats](interfaces/RemovalStats.md)
|
||||||
- [RetryConfig](interfaces/RetryConfig.md)
|
- [RetryConfig](interfaces/RetryConfig.md)
|
||||||
- [TableNamesOptions](interfaces/TableNamesOptions.md)
|
- [TableNamesOptions](interfaces/TableNamesOptions.md)
|
||||||
- [TableStatistics](interfaces/TableStatistics.md)
|
|
||||||
- [TimeoutConfig](interfaces/TimeoutConfig.md)
|
- [TimeoutConfig](interfaces/TimeoutConfig.md)
|
||||||
- [UpdateOptions](interfaces/UpdateOptions.md)
|
- [UpdateOptions](interfaces/UpdateOptions.md)
|
||||||
- [UpdateResult](interfaces/UpdateResult.md)
|
|
||||||
- [Version](interfaces/Version.md)
|
- [Version](interfaces/Version.md)
|
||||||
- [WriteExecutionOptions](interfaces/WriteExecutionOptions.md)
|
|
||||||
|
|
||||||
## Type Aliases
|
## Type Aliases
|
||||||
|
|
||||||
|
|||||||
@@ -1,15 +0,0 @@
|
|||||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
[@lancedb/lancedb](../globals.md) / AddColumnsResult
|
|
||||||
|
|
||||||
# Interface: AddColumnsResult
|
|
||||||
|
|
||||||
## Properties
|
|
||||||
|
|
||||||
### version
|
|
||||||
|
|
||||||
```ts
|
|
||||||
version: number;
|
|
||||||
```
|
|
||||||
@@ -1,15 +0,0 @@
|
|||||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
[@lancedb/lancedb](../globals.md) / AddResult
|
|
||||||
|
|
||||||
# Interface: AddResult
|
|
||||||
|
|
||||||
## Properties
|
|
||||||
|
|
||||||
### version
|
|
||||||
|
|
||||||
```ts
|
|
||||||
version: number;
|
|
||||||
```
|
|
||||||
@@ -1,15 +0,0 @@
|
|||||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
[@lancedb/lancedb](../globals.md) / AlterColumnsResult
|
|
||||||
|
|
||||||
# Interface: AlterColumnsResult
|
|
||||||
|
|
||||||
## Properties
|
|
||||||
|
|
||||||
### version
|
|
||||||
|
|
||||||
```ts
|
|
||||||
version: number;
|
|
||||||
```
|
|
||||||
@@ -1,15 +0,0 @@
|
|||||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
[@lancedb/lancedb](../globals.md) / DeleteResult
|
|
||||||
|
|
||||||
# Interface: DeleteResult
|
|
||||||
|
|
||||||
## Properties
|
|
||||||
|
|
||||||
### version
|
|
||||||
|
|
||||||
```ts
|
|
||||||
version: number;
|
|
||||||
```
|
|
||||||
@@ -1,15 +0,0 @@
|
|||||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
[@lancedb/lancedb](../globals.md) / DropColumnsResult
|
|
||||||
|
|
||||||
# Interface: DropColumnsResult
|
|
||||||
|
|
||||||
## Properties
|
|
||||||
|
|
||||||
### version
|
|
||||||
|
|
||||||
```ts
|
|
||||||
version: number;
|
|
||||||
```
|
|
||||||
@@ -1,37 +0,0 @@
|
|||||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
[@lancedb/lancedb](../globals.md) / FragmentStatistics
|
|
||||||
|
|
||||||
# Interface: FragmentStatistics
|
|
||||||
|
|
||||||
## Properties
|
|
||||||
|
|
||||||
### lengths
|
|
||||||
|
|
||||||
```ts
|
|
||||||
lengths: FragmentSummaryStats;
|
|
||||||
```
|
|
||||||
|
|
||||||
Statistics on the number of rows in the table fragments
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
### numFragments
|
|
||||||
|
|
||||||
```ts
|
|
||||||
numFragments: number;
|
|
||||||
```
|
|
||||||
|
|
||||||
The number of fragments in the table
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
### numSmallFragments
|
|
||||||
|
|
||||||
```ts
|
|
||||||
numSmallFragments: number;
|
|
||||||
```
|
|
||||||
|
|
||||||
The number of uncompacted fragments in the table
|
|
||||||
@@ -1,77 +0,0 @@
|
|||||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
[@lancedb/lancedb](../globals.md) / FragmentSummaryStats
|
|
||||||
|
|
||||||
# Interface: FragmentSummaryStats
|
|
||||||
|
|
||||||
## Properties
|
|
||||||
|
|
||||||
### max
|
|
||||||
|
|
||||||
```ts
|
|
||||||
max: number;
|
|
||||||
```
|
|
||||||
|
|
||||||
The number of rows in the fragment with the most rows
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
### mean
|
|
||||||
|
|
||||||
```ts
|
|
||||||
mean: number;
|
|
||||||
```
|
|
||||||
|
|
||||||
The mean number of rows in the fragments
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
### min
|
|
||||||
|
|
||||||
```ts
|
|
||||||
min: number;
|
|
||||||
```
|
|
||||||
|
|
||||||
The number of rows in the fragment with the fewest rows
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
### p25
|
|
||||||
|
|
||||||
```ts
|
|
||||||
p25: number;
|
|
||||||
```
|
|
||||||
|
|
||||||
The 25th percentile of number of rows in the fragments
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
### p50
|
|
||||||
|
|
||||||
```ts
|
|
||||||
p50: number;
|
|
||||||
```
|
|
||||||
|
|
||||||
The 50th percentile of number of rows in the fragments
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
### p75
|
|
||||||
|
|
||||||
```ts
|
|
||||||
p75: number;
|
|
||||||
```
|
|
||||||
|
|
||||||
The 75th percentile of number of rows in the fragments
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
### p99
|
|
||||||
|
|
||||||
```ts
|
|
||||||
p99: number;
|
|
||||||
```
|
|
||||||
|
|
||||||
The 99th percentile of number of rows in the fragments
|
|
||||||
@@ -1,25 +0,0 @@
|
|||||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
[@lancedb/lancedb](../globals.md) / FullTextQuery
|
|
||||||
|
|
||||||
# Interface: FullTextQuery
|
|
||||||
|
|
||||||
Represents a full-text query interface.
|
|
||||||
This interface defines the structure and behavior for full-text queries,
|
|
||||||
including methods to retrieve the query type and convert the query to a dictionary format.
|
|
||||||
|
|
||||||
## Methods
|
|
||||||
|
|
||||||
### queryType()
|
|
||||||
|
|
||||||
```ts
|
|
||||||
queryType(): FullTextQueryType
|
|
||||||
```
|
|
||||||
|
|
||||||
The type of the full-text query.
|
|
||||||
|
|
||||||
#### Returns
|
|
||||||
|
|
||||||
[`FullTextQueryType`](../enumerations/FullTextQueryType.md)
|
|
||||||
@@ -39,11 +39,3 @@ and the same name, then an error will be returned. This is true even if
|
|||||||
that index is out of date.
|
that index is out of date.
|
||||||
|
|
||||||
The default is true
|
The default is true
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
### waitTimeoutSeconds?
|
|
||||||
|
|
||||||
```ts
|
|
||||||
optional waitTimeoutSeconds: number;
|
|
||||||
```
|
|
||||||
|
|||||||
@@ -1,39 +0,0 @@
|
|||||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
[@lancedb/lancedb](../globals.md) / MergeResult
|
|
||||||
|
|
||||||
# Interface: MergeResult
|
|
||||||
|
|
||||||
## Properties
|
|
||||||
|
|
||||||
### numDeletedRows
|
|
||||||
|
|
||||||
```ts
|
|
||||||
numDeletedRows: number;
|
|
||||||
```
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
### numInsertedRows
|
|
||||||
|
|
||||||
```ts
|
|
||||||
numInsertedRows: number;
|
|
||||||
```
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
### numUpdatedRows
|
|
||||||
|
|
||||||
```ts
|
|
||||||
numUpdatedRows: number;
|
|
||||||
```
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
### version
|
|
||||||
|
|
||||||
```ts
|
|
||||||
version: number;
|
|
||||||
```
|
|
||||||
@@ -20,13 +20,3 @@ The maximum number of rows to return in a single batch
|
|||||||
|
|
||||||
Batches may have fewer rows if the underlying data is stored
|
Batches may have fewer rows if the underlying data is stored
|
||||||
in smaller chunks.
|
in smaller chunks.
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
### timeoutMs?
|
|
||||||
|
|
||||||
```ts
|
|
||||||
optional timeoutMs: number;
|
|
||||||
```
|
|
||||||
|
|
||||||
Timeout for query execution in milliseconds
|
|
||||||
|
|||||||
@@ -1,47 +0,0 @@
|
|||||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
[@lancedb/lancedb](../globals.md) / TableStatistics
|
|
||||||
|
|
||||||
# Interface: TableStatistics
|
|
||||||
|
|
||||||
## Properties
|
|
||||||
|
|
||||||
### fragmentStats
|
|
||||||
|
|
||||||
```ts
|
|
||||||
fragmentStats: FragmentStatistics;
|
|
||||||
```
|
|
||||||
|
|
||||||
Statistics on table fragments
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
### numIndices
|
|
||||||
|
|
||||||
```ts
|
|
||||||
numIndices: number;
|
|
||||||
```
|
|
||||||
|
|
||||||
The number of indices in the table
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
### numRows
|
|
||||||
|
|
||||||
```ts
|
|
||||||
numRows: number;
|
|
||||||
```
|
|
||||||
|
|
||||||
The number of rows in the table
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
### totalBytes
|
|
||||||
|
|
||||||
```ts
|
|
||||||
totalBytes: number;
|
|
||||||
```
|
|
||||||
|
|
||||||
The total number of bytes in the table
|
|
||||||
@@ -1,23 +0,0 @@
|
|||||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
[@lancedb/lancedb](../globals.md) / UpdateResult
|
|
||||||
|
|
||||||
# Interface: UpdateResult
|
|
||||||
|
|
||||||
## Properties
|
|
||||||
|
|
||||||
### rowsUpdated
|
|
||||||
|
|
||||||
```ts
|
|
||||||
rowsUpdated: number;
|
|
||||||
```
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
### version
|
|
||||||
|
|
||||||
```ts
|
|
||||||
version: number;
|
|
||||||
```
|
|
||||||
@@ -1,26 +0,0 @@
|
|||||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
[@lancedb/lancedb](../globals.md) / WriteExecutionOptions
|
|
||||||
|
|
||||||
# Interface: WriteExecutionOptions
|
|
||||||
|
|
||||||
## Properties
|
|
||||||
|
|
||||||
### timeoutMs?
|
|
||||||
|
|
||||||
```ts
|
|
||||||
optional timeoutMs: number;
|
|
||||||
```
|
|
||||||
|
|
||||||
Maximum time to run the operation before cancelling it.
|
|
||||||
|
|
||||||
By default, there is a 30-second timeout that is only enforced after the
|
|
||||||
first attempt. This is to prevent spending too long retrying to resolve
|
|
||||||
conflicts. For example, if a write attempt takes 20 seconds and fails,
|
|
||||||
the second attempt will be cancelled after 10 seconds, hitting the
|
|
||||||
30-second timeout. However, a write that takes one hour and succeeds on the
|
|
||||||
first attempt will not be cancelled.
|
|
||||||
|
|
||||||
When this is set, the timeout is enforced on all attempts, including the first.
|
|
||||||
@@ -35,9 +35,3 @@ print the resolved query plan. You can use the `explain_plan` method to do this:
|
|||||||
* Python Sync: [LanceQueryBuilder.explain_plan][lancedb.query.LanceQueryBuilder.explain_plan]
|
* Python Sync: [LanceQueryBuilder.explain_plan][lancedb.query.LanceQueryBuilder.explain_plan]
|
||||||
* Python Async: [AsyncQueryBase.explain_plan][lancedb.query.AsyncQueryBase.explain_plan]
|
* Python Async: [AsyncQueryBase.explain_plan][lancedb.query.AsyncQueryBase.explain_plan]
|
||||||
* Node @lancedb/lancedb: [LanceQueryBuilder.explainPlan](/lancedb/js/classes/QueryBase/#explainplan)
|
* Node @lancedb/lancedb: [LanceQueryBuilder.explainPlan](/lancedb/js/classes/QueryBase/#explainplan)
|
||||||
|
|
||||||
To understand how a query was actually executed—including metrics like execution time, number of rows processed, I/O stats, and more—use the analyze_plan method. This executes the query and returns a physical execution plan annotated with runtime metrics, making it especially helpful for performance tuning and debugging.
|
|
||||||
|
|
||||||
* Python Sync: [LanceQueryBuilder.analyze_plan][lancedb.query.LanceQueryBuilder.analyze_plan]
|
|
||||||
* Python Async: [AsyncQueryBase.analyze_plan][lancedb.query.AsyncQueryBase.analyze_plan]
|
|
||||||
* Node @lancedb/lancedb: [LanceQueryBuilder.analyzePlan](/lancedb/js/classes/QueryBase/#analyzePlan)
|
|
||||||
|
|||||||
@@ -8,16 +8,13 @@
|
|||||||
<parent>
|
<parent>
|
||||||
<groupId>com.lancedb</groupId>
|
<groupId>com.lancedb</groupId>
|
||||||
<artifactId>lancedb-parent</artifactId>
|
<artifactId>lancedb-parent</artifactId>
|
||||||
<version>0.19.1-beta.5</version>
|
<version>0.18.2-beta.1</version>
|
||||||
<relativePath>../pom.xml</relativePath>
|
<relativePath>../pom.xml</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
|
|
||||||
<artifactId>lancedb-core</artifactId>
|
<artifactId>lancedb-core</artifactId>
|
||||||
<name>LanceDB Core</name>
|
<name>LanceDB Core</name>
|
||||||
<packaging>jar</packaging>
|
<packaging>jar</packaging>
|
||||||
<properties>
|
|
||||||
<rust.release.build>false</rust.release.build>
|
|
||||||
</properties>
|
|
||||||
|
|
||||||
<dependencies>
|
<dependencies>
|
||||||
<dependency>
|
<dependency>
|
||||||
@@ -71,7 +68,7 @@
|
|||||||
</goals>
|
</goals>
|
||||||
<configuration>
|
<configuration>
|
||||||
<path>lancedb-jni</path>
|
<path>lancedb-jni</path>
|
||||||
<release>${rust.release.build}</release>
|
<release>true</release>
|
||||||
<!-- Copy native libraries to target/classes for runtime access -->
|
<!-- Copy native libraries to target/classes for runtime access -->
|
||||||
<copyTo>${project.build.directory}/classes/nativelib</copyTo>
|
<copyTo>${project.build.directory}/classes/nativelib</copyTo>
|
||||||
<copyWithPlatformDir>true</copyWithPlatformDir>
|
<copyWithPlatformDir>true</copyWithPlatformDir>
|
||||||
|
|||||||
@@ -1,25 +1,16 @@
|
|||||||
/*
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package com.lancedb.lancedb;
|
package com.lancedb.lancedb;
|
||||||
|
|
||||||
import io.questdb.jar.jni.JarJniLoader;
|
import io.questdb.jar.jni.JarJniLoader;
|
||||||
|
|
||||||
import java.io.Closeable;
|
import java.io.Closeable;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Optional;
|
import java.util.Optional;
|
||||||
|
|
||||||
/** Represents LanceDB database. */
|
/**
|
||||||
|
* Represents LanceDB database.
|
||||||
|
*/
|
||||||
public class Connection implements Closeable {
|
public class Connection implements Closeable {
|
||||||
static {
|
static {
|
||||||
JarJniLoader.loadLib(Connection.class, "/nativelib", "lancedb_jni");
|
JarJniLoader.loadLib(Connection.class, "/nativelib", "lancedb_jni");
|
||||||
@@ -27,11 +18,14 @@ public class Connection implements Closeable {
|
|||||||
|
|
||||||
private long nativeConnectionHandle;
|
private long nativeConnectionHandle;
|
||||||
|
|
||||||
/** Connect to a LanceDB instance. */
|
/**
|
||||||
|
* Connect to a LanceDB instance.
|
||||||
|
*/
|
||||||
public static native Connection connect(String uri);
|
public static native Connection connect(String uri);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the names of all tables in the database. The names are sorted in ascending order.
|
* Get the names of all tables in the database. The names are sorted in
|
||||||
|
* ascending order.
|
||||||
*
|
*
|
||||||
* @return the table names
|
* @return the table names
|
||||||
*/
|
*/
|
||||||
@@ -40,7 +34,8 @@ public class Connection implements Closeable {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the names of filtered tables in the database. The names are sorted in ascending order.
|
* Get the names of filtered tables in the database. The names are sorted in
|
||||||
|
* ascending order.
|
||||||
*
|
*
|
||||||
* @param limit The number of results to return.
|
* @param limit The number of results to return.
|
||||||
* @return the table names
|
* @return the table names
|
||||||
@@ -50,11 +45,12 @@ public class Connection implements Closeable {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the names of filtered tables in the database. The names are sorted in ascending order.
|
* Get the names of filtered tables in the database. The names are sorted in
|
||||||
|
* ascending order.
|
||||||
*
|
*
|
||||||
* @param startAfter If present, only return names that come lexicographically after the supplied
|
* @param startAfter If present, only return names that come lexicographically after the supplied
|
||||||
* value. This can be combined with limit to implement pagination by setting this to the last
|
* value. This can be combined with limit to implement pagination
|
||||||
* table name from the previous page.
|
* by setting this to the last table name from the previous page.
|
||||||
* @return the table names
|
* @return the table names
|
||||||
*/
|
*/
|
||||||
public List<String> tableNames(String startAfter) {
|
public List<String> tableNames(String startAfter) {
|
||||||
@@ -62,11 +58,12 @@ public class Connection implements Closeable {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the names of filtered tables in the database. The names are sorted in ascending order.
|
* Get the names of filtered tables in the database. The names are sorted in
|
||||||
|
* ascending order.
|
||||||
*
|
*
|
||||||
* @param startAfter If present, only return names that come lexicographically after the supplied
|
* @param startAfter If present, only return names that come lexicographically after the supplied
|
||||||
* value. This can be combined with limit to implement pagination by setting this to the last
|
* value. This can be combined with limit to implement pagination
|
||||||
* table name from the previous page.
|
* by setting this to the last table name from the previous page.
|
||||||
* @param limit The number of results to return.
|
* @param limit The number of results to return.
|
||||||
* @return the table names
|
* @return the table names
|
||||||
*/
|
*/
|
||||||
@@ -75,19 +72,22 @@ public class Connection implements Closeable {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the names of filtered tables in the database. The names are sorted in ascending order.
|
* Get the names of filtered tables in the database. The names are sorted in
|
||||||
|
* ascending order.
|
||||||
*
|
*
|
||||||
* @param startAfter If present, only return names that come lexicographically after the supplied
|
* @param startAfter If present, only return names that come lexicographically after the supplied
|
||||||
* value. This can be combined with limit to implement pagination by setting this to the last
|
* value. This can be combined with limit to implement pagination
|
||||||
* table name from the previous page.
|
* by setting this to the last table name from the previous page.
|
||||||
* @param limit The number of results to return.
|
* @param limit The number of results to return.
|
||||||
* @return the table names
|
* @return the table names
|
||||||
*/
|
*/
|
||||||
public native List<String> tableNames(Optional<String> startAfter, Optional<Integer> limit);
|
public native List<String> tableNames(
|
||||||
|
Optional<String> startAfter, Optional<Integer> limit);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Closes this connection and releases any system resources associated with it. If the connection
|
* Closes this connection and releases any system resources associated with it. If
|
||||||
* is already closed, then invoking this method has no effect.
|
* the connection is
|
||||||
|
* already closed, then invoking this method has no effect.
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public void close() {
|
public void close() {
|
||||||
@@ -98,7 +98,8 @@ public class Connection implements Closeable {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Native method to release the Lance connection resources associated with the given handle.
|
* Native method to release the Lance connection resources associated with the
|
||||||
|
* given handle.
|
||||||
*
|
*
|
||||||
* @param handle The native handle to the connection resource.
|
* @param handle The native handle to the connection resource.
|
||||||
*/
|
*/
|
||||||
|
|||||||
@@ -1,35 +1,27 @@
|
|||||||
/*
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package com.lancedb.lancedb;
|
package com.lancedb.lancedb;
|
||||||
|
|
||||||
import org.junit.jupiter.api.BeforeAll;
|
|
||||||
import org.junit.jupiter.api.Test;
|
|
||||||
import org.junit.jupiter.api.io.TempDir;
|
|
||||||
|
|
||||||
import java.net.URL;
|
|
||||||
import java.nio.file.Path;
|
|
||||||
import java.util.List;
|
|
||||||
|
|
||||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
import static org.junit.jupiter.api.Assertions.assertTrue;
|
||||||
|
|
||||||
|
import java.nio.file.Path;
|
||||||
|
import java.util.List;
|
||||||
|
import java.net.URL;
|
||||||
|
import org.junit.jupiter.api.BeforeAll;
|
||||||
|
import org.junit.jupiter.api.Test;
|
||||||
|
import org.junit.jupiter.api.io.TempDir;
|
||||||
|
|
||||||
public class ConnectionTest {
|
public class ConnectionTest {
|
||||||
private static final String[] TABLE_NAMES = {
|
private static final String[] TABLE_NAMES = {
|
||||||
"dataset_version", "new_empty_dataset", "test", "write_stream"
|
"dataset_version",
|
||||||
|
"new_empty_dataset",
|
||||||
|
"test",
|
||||||
|
"write_stream"
|
||||||
};
|
};
|
||||||
|
|
||||||
@TempDir static Path tempDir; // Temporary directory for the tests
|
@TempDir
|
||||||
|
static Path tempDir; // Temporary directory for the tests
|
||||||
private static URL lanceDbURL;
|
private static URL lanceDbURL;
|
||||||
|
|
||||||
@BeforeAll
|
@BeforeAll
|
||||||
@@ -61,21 +53,18 @@ public class ConnectionTest {
|
|||||||
@Test
|
@Test
|
||||||
void tableNamesStartAfter() {
|
void tableNamesStartAfter() {
|
||||||
try (Connection conn = Connection.connect(lanceDbURL.toString())) {
|
try (Connection conn = Connection.connect(lanceDbURL.toString())) {
|
||||||
assertTableNamesStartAfter(
|
assertTableNamesStartAfter(conn, TABLE_NAMES[0], 3, TABLE_NAMES[1], TABLE_NAMES[2], TABLE_NAMES[3]);
|
||||||
conn, TABLE_NAMES[0], 3, TABLE_NAMES[1], TABLE_NAMES[2], TABLE_NAMES[3]);
|
|
||||||
assertTableNamesStartAfter(conn, TABLE_NAMES[1], 2, TABLE_NAMES[2], TABLE_NAMES[3]);
|
assertTableNamesStartAfter(conn, TABLE_NAMES[1], 2, TABLE_NAMES[2], TABLE_NAMES[3]);
|
||||||
assertTableNamesStartAfter(conn, TABLE_NAMES[2], 1, TABLE_NAMES[3]);
|
assertTableNamesStartAfter(conn, TABLE_NAMES[2], 1, TABLE_NAMES[3]);
|
||||||
assertTableNamesStartAfter(conn, TABLE_NAMES[3], 0);
|
assertTableNamesStartAfter(conn, TABLE_NAMES[3], 0);
|
||||||
assertTableNamesStartAfter(
|
assertTableNamesStartAfter(conn, "a_dataset", 4, TABLE_NAMES[0], TABLE_NAMES[1], TABLE_NAMES[2], TABLE_NAMES[3]);
|
||||||
conn, "a_dataset", 4, TABLE_NAMES[0], TABLE_NAMES[1], TABLE_NAMES[2], TABLE_NAMES[3]);
|
|
||||||
assertTableNamesStartAfter(conn, "o_dataset", 2, TABLE_NAMES[2], TABLE_NAMES[3]);
|
assertTableNamesStartAfter(conn, "o_dataset", 2, TABLE_NAMES[2], TABLE_NAMES[3]);
|
||||||
assertTableNamesStartAfter(conn, "v_dataset", 1, TABLE_NAMES[3]);
|
assertTableNamesStartAfter(conn, "v_dataset", 1, TABLE_NAMES[3]);
|
||||||
assertTableNamesStartAfter(conn, "z_dataset", 0);
|
assertTableNamesStartAfter(conn, "z_dataset", 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void assertTableNamesStartAfter(
|
private void assertTableNamesStartAfter(Connection conn, String startAfter, int expectedSize, String... expectedNames) {
|
||||||
Connection conn, String startAfter, int expectedSize, String... expectedNames) {
|
|
||||||
List<String> tableNames = conn.tableNames(startAfter);
|
List<String> tableNames = conn.tableNames(startAfter);
|
||||||
assertEquals(expectedSize, tableNames.size());
|
assertEquals(expectedSize, tableNames.size());
|
||||||
for (int i = 0; i < expectedNames.length; i++) {
|
for (int i = 0; i < expectedNames.length; i++) {
|
||||||
|
|||||||
76
java/pom.xml
76
java/pom.xml
@@ -6,7 +6,7 @@
|
|||||||
|
|
||||||
<groupId>com.lancedb</groupId>
|
<groupId>com.lancedb</groupId>
|
||||||
<artifactId>lancedb-parent</artifactId>
|
<artifactId>lancedb-parent</artifactId>
|
||||||
<version>0.19.1-beta.5</version>
|
<version>0.18.2-beta.1</version>
|
||||||
<packaging>pom</packaging>
|
<packaging>pom</packaging>
|
||||||
|
|
||||||
<name>LanceDB Parent</name>
|
<name>LanceDB Parent</name>
|
||||||
@@ -29,25 +29,6 @@
|
|||||||
<properties>
|
<properties>
|
||||||
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
|
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
|
||||||
<arrow.version>15.0.0</arrow.version>
|
<arrow.version>15.0.0</arrow.version>
|
||||||
<spotless.skip>false</spotless.skip>
|
|
||||||
<spotless.version>2.30.0</spotless.version>
|
|
||||||
<spotless.java.googlejavaformat.version>1.7</spotless.java.googlejavaformat.version>
|
|
||||||
<spotless.delimiter>package</spotless.delimiter>
|
|
||||||
<spotless.license.header>
|
|
||||||
/*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
</spotless.license.header>
|
|
||||||
</properties>
|
</properties>
|
||||||
|
|
||||||
<modules>
|
<modules>
|
||||||
@@ -146,8 +127,7 @@
|
|||||||
<configuration>
|
<configuration>
|
||||||
<configLocation>google_checks.xml</configLocation>
|
<configLocation>google_checks.xml</configLocation>
|
||||||
<consoleOutput>true</consoleOutput>
|
<consoleOutput>true</consoleOutput>
|
||||||
<failsOnError>false</failsOnError>
|
<failsOnError>true</failsOnError>
|
||||||
<failOnViolation>false</failOnViolation>
|
|
||||||
<violationSeverity>warning</violationSeverity>
|
<violationSeverity>warning</violationSeverity>
|
||||||
<linkXRef>false</linkXRef>
|
<linkXRef>false</linkXRef>
|
||||||
</configuration>
|
</configuration>
|
||||||
@@ -161,10 +141,6 @@
|
|||||||
</execution>
|
</execution>
|
||||||
</executions>
|
</executions>
|
||||||
</plugin>
|
</plugin>
|
||||||
<plugin>
|
|
||||||
<groupId>com.diffplug.spotless</groupId>
|
|
||||||
<artifactId>spotless-maven-plugin</artifactId>
|
|
||||||
</plugin>
|
|
||||||
</plugins>
|
</plugins>
|
||||||
<pluginManagement>
|
<pluginManagement>
|
||||||
<plugins>
|
<plugins>
|
||||||
@@ -203,54 +179,6 @@
|
|||||||
<artifactId>maven-install-plugin</artifactId>
|
<artifactId>maven-install-plugin</artifactId>
|
||||||
<version>2.5.2</version>
|
<version>2.5.2</version>
|
||||||
</plugin>
|
</plugin>
|
||||||
<plugin>
|
|
||||||
<groupId>com.diffplug.spotless</groupId>
|
|
||||||
<artifactId>spotless-maven-plugin</artifactId>
|
|
||||||
<version>${spotless.version}</version>
|
|
||||||
<configuration>
|
|
||||||
<skip>${spotless.skip}</skip>
|
|
||||||
<upToDateChecking>
|
|
||||||
<enabled>true</enabled>
|
|
||||||
</upToDateChecking>
|
|
||||||
<java>
|
|
||||||
<includes>
|
|
||||||
<include>src/main/java/**/*.java</include>
|
|
||||||
<include>src/test/java/**/*.java</include>
|
|
||||||
</includes>
|
|
||||||
<googleJavaFormat>
|
|
||||||
<version>${spotless.java.googlejavaformat.version}</version>
|
|
||||||
<style>GOOGLE</style>
|
|
||||||
</googleJavaFormat>
|
|
||||||
|
|
||||||
<importOrder>
|
|
||||||
<order>com.lancedb.lance,,javax,java,\#</order>
|
|
||||||
</importOrder>
|
|
||||||
|
|
||||||
<removeUnusedImports />
|
|
||||||
</java>
|
|
||||||
<scala>
|
|
||||||
<includes>
|
|
||||||
<include>src/main/scala/**/*.scala</include>
|
|
||||||
<include>src/main/scala-*/**/*.scala</include>
|
|
||||||
<include>src/test/scala/**/*.scala</include>
|
|
||||||
<include>src/test/scala-*/**/*.scala</include>
|
|
||||||
</includes>
|
|
||||||
</scala>
|
|
||||||
<licenseHeader>
|
|
||||||
<content>${spotless.license.header}</content>
|
|
||||||
<delimiter>${spotless.delimiter}</delimiter>
|
|
||||||
</licenseHeader>
|
|
||||||
</configuration>
|
|
||||||
<executions>
|
|
||||||
<execution>
|
|
||||||
<id>spotless-check</id>
|
|
||||||
<phase>validate</phase>
|
|
||||||
<goals>
|
|
||||||
<goal>apply</goal>
|
|
||||||
</goals>
|
|
||||||
</execution>
|
|
||||||
</executions>
|
|
||||||
</plugin>
|
|
||||||
</plugins>
|
</plugins>
|
||||||
</pluginManagement>
|
</pluginManagement>
|
||||||
</build>
|
</build>
|
||||||
|
|||||||
51
node/package-lock.json
generated
51
node/package-lock.json
generated
@@ -1,12 +1,12 @@
|
|||||||
{
|
{
|
||||||
"name": "vectordb",
|
"name": "vectordb",
|
||||||
"version": "0.19.1-beta.5",
|
"version": "0.18.2-beta.0",
|
||||||
"lockfileVersion": 3,
|
"lockfileVersion": 3,
|
||||||
"requires": true,
|
"requires": true,
|
||||||
"packages": {
|
"packages": {
|
||||||
"": {
|
"": {
|
||||||
"name": "vectordb",
|
"name": "vectordb",
|
||||||
"version": "0.19.1-beta.5",
|
"version": "0.18.2-beta.0",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"x64",
|
"x64",
|
||||||
"arm64"
|
"arm64"
|
||||||
@@ -52,11 +52,11 @@
|
|||||||
"uuid": "^9.0.0"
|
"uuid": "^9.0.0"
|
||||||
},
|
},
|
||||||
"optionalDependencies": {
|
"optionalDependencies": {
|
||||||
"@lancedb/vectordb-darwin-arm64": "0.19.1-beta.5",
|
"@lancedb/vectordb-darwin-arm64": "0.18.2-beta.0",
|
||||||
"@lancedb/vectordb-darwin-x64": "0.19.1-beta.5",
|
"@lancedb/vectordb-darwin-x64": "0.18.2-beta.0",
|
||||||
"@lancedb/vectordb-linux-arm64-gnu": "0.19.1-beta.5",
|
"@lancedb/vectordb-linux-arm64-gnu": "0.18.2-beta.0",
|
||||||
"@lancedb/vectordb-linux-x64-gnu": "0.19.1-beta.5",
|
"@lancedb/vectordb-linux-x64-gnu": "0.18.2-beta.0",
|
||||||
"@lancedb/vectordb-win32-x64-msvc": "0.19.1-beta.5"
|
"@lancedb/vectordb-win32-x64-msvc": "0.18.2-beta.0"
|
||||||
},
|
},
|
||||||
"peerDependencies": {
|
"peerDependencies": {
|
||||||
"@apache-arrow/ts": "^14.0.2",
|
"@apache-arrow/ts": "^14.0.2",
|
||||||
@@ -327,9 +327,9 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/@lancedb/vectordb-darwin-arm64": {
|
"node_modules/@lancedb/vectordb-darwin-arm64": {
|
||||||
"version": "0.19.1-beta.5",
|
"version": "0.18.2-beta.0",
|
||||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-arm64/-/vectordb-darwin-arm64-0.19.1-beta.5.tgz",
|
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-arm64/-/vectordb-darwin-arm64-0.18.2-beta.0.tgz",
|
||||||
"integrity": "sha512-9WcTw67We5HYGayDt5jFquGoyAVzFSt/I65ag8+q7H9q4ZYKxeDhgNyQZJ8BmXEvbJtnYtYBSAtTEdFKYMce6w==",
|
"integrity": "sha512-FzIcElkS6R5I5kU1S5m7yLVTB1Duv1XcmZQtVmYl/JjNlfxS1WTtMzdzMqSBFohDcgU2Tkc5+1FpK1B94dUUbg==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"arm64"
|
"arm64"
|
||||||
],
|
],
|
||||||
@@ -340,9 +340,9 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"node_modules/@lancedb/vectordb-darwin-x64": {
|
"node_modules/@lancedb/vectordb-darwin-x64": {
|
||||||
"version": "0.19.1-beta.5",
|
"version": "0.18.2-beta.0",
|
||||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-x64/-/vectordb-darwin-x64-0.19.1-beta.5.tgz",
|
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-x64/-/vectordb-darwin-x64-0.18.2-beta.0.tgz",
|
||||||
"integrity": "sha512-6Pe3PxEMi0VKGsu5R7IhOxTijUM3b5olRAqhxfcu5ti34gXIPNtu7g+T9lS78LKe+0D0v2BjZEY/JQakIFBNRw==",
|
"integrity": "sha512-jv+XludfLNBDm1DjdqyghwDMtd4E+ygwycQpkpK72wyZSh6Qytrgq+4dNi/zCZ3UChFLbKbIxrVxv9yENQn2Pg==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"x64"
|
"x64"
|
||||||
],
|
],
|
||||||
@@ -353,9 +353,9 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"node_modules/@lancedb/vectordb-linux-arm64-gnu": {
|
"node_modules/@lancedb/vectordb-linux-arm64-gnu": {
|
||||||
"version": "0.19.1-beta.5",
|
"version": "0.18.2-beta.0",
|
||||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-arm64-gnu/-/vectordb-linux-arm64-gnu-0.19.1-beta.5.tgz",
|
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-arm64-gnu/-/vectordb-linux-arm64-gnu-0.18.2-beta.0.tgz",
|
||||||
"integrity": "sha512-VJbBd+Y+6L2SREaOO1OzuUfTPHXyHE4AcsZuM6VMyoeX8k7lPnaA+vNk96o0w4V2KFEAI6o4QPgrRAXmMAzmbg==",
|
"integrity": "sha512-8/fBpbNYhhpetf/pZv0DyPnQkeAbsiICMyCoRiNu5auvQK4AsGF1XvLWrDi68u9F0GysBKvuatYuGqa/yh+Anw==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"arm64"
|
"arm64"
|
||||||
],
|
],
|
||||||
@@ -366,9 +366,9 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"node_modules/@lancedb/vectordb-linux-x64-gnu": {
|
"node_modules/@lancedb/vectordb-linux-x64-gnu": {
|
||||||
"version": "0.19.1-beta.5",
|
"version": "0.18.2-beta.0",
|
||||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-x64-gnu/-/vectordb-linux-x64-gnu-0.19.1-beta.5.tgz",
|
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-x64-gnu/-/vectordb-linux-x64-gnu-0.18.2-beta.0.tgz",
|
||||||
"integrity": "sha512-3wS8Zn5NmHoszXfrY4JzMimHoh5LAmVi3pTX4gD+C9kVGoUJcDBP7/CrAbjnAz7VzzAIPmz8kvBuPz8l9X4hjw==",
|
"integrity": "sha512-7a1Kc/2V2ff4HlLzXyXVdK0Z0VIFUt50v2SBRdlcycJ0NLW9ZqV+9UjB/NAOwMXVgYd7d3rKjACGkQzkpvcyeg==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"x64"
|
"x64"
|
||||||
],
|
],
|
||||||
@@ -379,9 +379,9 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"node_modules/@lancedb/vectordb-win32-x64-msvc": {
|
"node_modules/@lancedb/vectordb-win32-x64-msvc": {
|
||||||
"version": "0.19.1-beta.5",
|
"version": "0.18.2-beta.0",
|
||||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-win32-x64-msvc/-/vectordb-win32-x64-msvc-0.19.1-beta.5.tgz",
|
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-win32-x64-msvc/-/vectordb-win32-x64-msvc-0.18.2-beta.0.tgz",
|
||||||
"integrity": "sha512-TemM9cvrPa2jFCjvYmKnrL0DTHegi/+LOQ3No9nPDHie2ka2fM9O2q60fAbYsYz+Mo9aV7MvL49ATbNCyl9MLA==",
|
"integrity": "sha512-EeCiSf2RtJMESnkIca28GI6rAStYj2q9sVIyNCXpmIZSkJVpfQ3iswHGAbHrEfaPl0J1Re9cnRHLLuqkumwiIQ==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"x64"
|
"x64"
|
||||||
],
|
],
|
||||||
@@ -1184,10 +1184,9 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/axios": {
|
"node_modules/axios": {
|
||||||
"version": "1.8.4",
|
"version": "1.7.7",
|
||||||
"resolved": "https://registry.npmjs.org/axios/-/axios-1.8.4.tgz",
|
"resolved": "https://registry.npmjs.org/axios/-/axios-1.7.7.tgz",
|
||||||
"integrity": "sha512-eBSYY4Y68NNlHbHBMdeDmKNtDgXWhQsJcGqzO3iLUM0GraQFSS9cVgPX5I9b3lbdFKyYoAEGAZF1DwhTaljNAw==",
|
"integrity": "sha512-S4kL7XrjgBmvdGut0sN3yJxqYzrDOnivkBiN0OFs6hLiUam3UPvswUo0kqGyhqUZGEOytHyumEdXsAkgCOUf3Q==",
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"follow-redirects": "^1.15.6",
|
"follow-redirects": "^1.15.6",
|
||||||
"form-data": "^4.0.0",
|
"form-data": "^4.0.0",
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "vectordb",
|
"name": "vectordb",
|
||||||
"version": "0.19.1-beta.5",
|
"version": "0.18.2-beta.1",
|
||||||
"description": " Serverless, low-latency vector database for AI applications",
|
"description": " Serverless, low-latency vector database for AI applications",
|
||||||
"private": false,
|
"private": false,
|
||||||
"main": "dist/index.js",
|
"main": "dist/index.js",
|
||||||
@@ -89,10 +89,10 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"optionalDependencies": {
|
"optionalDependencies": {
|
||||||
"@lancedb/vectordb-darwin-x64": "0.19.1-beta.5",
|
"@lancedb/vectordb-darwin-x64": "0.18.2-beta.1",
|
||||||
"@lancedb/vectordb-darwin-arm64": "0.19.1-beta.5",
|
"@lancedb/vectordb-darwin-arm64": "0.18.2-beta.1",
|
||||||
"@lancedb/vectordb-linux-x64-gnu": "0.19.1-beta.5",
|
"@lancedb/vectordb-linux-x64-gnu": "0.18.2-beta.1",
|
||||||
"@lancedb/vectordb-linux-arm64-gnu": "0.19.1-beta.5",
|
"@lancedb/vectordb-linux-arm64-gnu": "0.18.2-beta.1",
|
||||||
"@lancedb/vectordb-win32-x64-msvc": "0.19.1-beta.5"
|
"@lancedb/vectordb-win32-x64-msvc": "0.18.2-beta.1"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "lancedb-nodejs"
|
name = "lancedb-nodejs"
|
||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
version = "0.19.1-beta.5"
|
version = "0.18.2-beta.1"
|
||||||
license.workspace = true
|
license.workspace = true
|
||||||
description.workspace = true
|
description.workspace = true
|
||||||
repository.workspace = true
|
repository.workspace = true
|
||||||
@@ -28,9 +28,6 @@ napi-derive = "2.16.4"
|
|||||||
lzma-sys = { version = "*", features = ["static"] }
|
lzma-sys = { version = "*", features = ["static"] }
|
||||||
log.workspace = true
|
log.workspace = true
|
||||||
|
|
||||||
# Workaround for build failure until we can fix it.
|
|
||||||
aws-lc-sys = "=0.28.0"
|
|
||||||
|
|
||||||
[build-dependencies]
|
[build-dependencies]
|
||||||
napi-build = "2.1"
|
napi-build = "2.1"
|
||||||
|
|
||||||
|
|||||||
@@ -374,71 +374,6 @@ describe.each([arrow15, arrow16, arrow17, arrow18])(
|
|||||||
expect(table2.numRows).toBe(4);
|
expect(table2.numRows).toBe(4);
|
||||||
expect(table2.schema).toEqual(schema);
|
expect(table2.schema).toEqual(schema);
|
||||||
});
|
});
|
||||||
|
|
||||||
it("should correctly retain values in nested struct fields", async function () {
|
|
||||||
// Define test data with nested struct
|
|
||||||
const testData = [
|
|
||||||
{
|
|
||||||
id: "doc1",
|
|
||||||
vector: [1, 2, 3],
|
|
||||||
metadata: {
|
|
||||||
filePath: "/path/to/file1.ts",
|
|
||||||
startLine: 10,
|
|
||||||
endLine: 20,
|
|
||||||
text: "function test() { return true; }",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
id: "doc2",
|
|
||||||
vector: [4, 5, 6],
|
|
||||||
metadata: {
|
|
||||||
filePath: "/path/to/file2.ts",
|
|
||||||
startLine: 30,
|
|
||||||
endLine: 40,
|
|
||||||
text: "function test2() { return false; }",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
];
|
|
||||||
|
|
||||||
// Create Arrow table from the data
|
|
||||||
const table = makeArrowTable(testData);
|
|
||||||
|
|
||||||
// Verify schema has the nested struct fields
|
|
||||||
const metadataField = table.schema.fields.find(
|
|
||||||
(f) => f.name === "metadata",
|
|
||||||
);
|
|
||||||
expect(metadataField).toBeDefined();
|
|
||||||
// biome-ignore lint/suspicious/noExplicitAny: accessing fields in different Arrow versions
|
|
||||||
const childNames = metadataField?.type.children.map((c: any) => c.name);
|
|
||||||
expect(childNames).toEqual([
|
|
||||||
"filePath",
|
|
||||||
"startLine",
|
|
||||||
"endLine",
|
|
||||||
"text",
|
|
||||||
]);
|
|
||||||
|
|
||||||
// Convert to buffer and back (simulating storage and retrieval)
|
|
||||||
const buf = await fromTableToBuffer(table);
|
|
||||||
const retrievedTable = tableFromIPC(buf);
|
|
||||||
|
|
||||||
// Verify the retrieved table has the same structure
|
|
||||||
const rows = [];
|
|
||||||
for (let i = 0; i < retrievedTable.numRows; i++) {
|
|
||||||
rows.push(retrievedTable.get(i));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check values in the first row
|
|
||||||
const firstRow = rows[0];
|
|
||||||
expect(firstRow.id).toBe("doc1");
|
|
||||||
expect(firstRow.vector.toJSON()).toEqual([1, 2, 3]);
|
|
||||||
|
|
||||||
// Verify metadata values are preserved (this is where the bug is)
|
|
||||||
expect(firstRow.metadata).toBeDefined();
|
|
||||||
expect(firstRow.metadata.filePath).toBe("/path/to/file1.ts");
|
|
||||||
expect(firstRow.metadata.startLine).toBe(10);
|
|
||||||
expect(firstRow.metadata.endLine).toBe(20);
|
|
||||||
expect(firstRow.metadata.text).toBe("function test() { return true; }");
|
|
||||||
});
|
|
||||||
});
|
});
|
||||||
|
|
||||||
class DummyEmbedding extends EmbeddingFunction<string> {
|
class DummyEmbedding extends EmbeddingFunction<string> {
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ import * as arrow16 from "apache-arrow-16";
|
|||||||
import * as arrow17 from "apache-arrow-17";
|
import * as arrow17 from "apache-arrow-17";
|
||||||
import * as arrow18 from "apache-arrow-18";
|
import * as arrow18 from "apache-arrow-18";
|
||||||
|
|
||||||
import { MatchQuery, PhraseQuery, Table, connect } from "../lancedb";
|
import { Table, connect } from "../lancedb";
|
||||||
import {
|
import {
|
||||||
Table as ArrowTable,
|
Table as ArrowTable,
|
||||||
Field,
|
Field,
|
||||||
@@ -33,8 +33,6 @@ import {
|
|||||||
register,
|
register,
|
||||||
} from "../lancedb/embedding";
|
} from "../lancedb/embedding";
|
||||||
import { Index } from "../lancedb/indices";
|
import { Index } from "../lancedb/indices";
|
||||||
import { instanceOfFullTextQuery } from "../lancedb/query";
|
|
||||||
import exp = require("constants");
|
|
||||||
|
|
||||||
describe.each([arrow15, arrow16, arrow17, arrow18])(
|
describe.each([arrow15, arrow16, arrow17, arrow18])(
|
||||||
"Given a table",
|
"Given a table",
|
||||||
@@ -72,33 +70,8 @@ describe.each([arrow15, arrow16, arrow17, arrow18])(
|
|||||||
await expect(table.countRows()).resolves.toBe(3);
|
await expect(table.countRows()).resolves.toBe(3);
|
||||||
});
|
});
|
||||||
|
|
||||||
it("should show table stats", async () => {
|
|
||||||
await table.add([{ id: 1 }, { id: 2 }]);
|
|
||||||
await table.add([{ id: 1 }]);
|
|
||||||
await expect(table.stats()).resolves.toEqual({
|
|
||||||
fragmentStats: {
|
|
||||||
lengths: {
|
|
||||||
max: 2,
|
|
||||||
mean: 1,
|
|
||||||
min: 1,
|
|
||||||
p25: 1,
|
|
||||||
p50: 2,
|
|
||||||
p75: 2,
|
|
||||||
p99: 2,
|
|
||||||
},
|
|
||||||
numFragments: 2,
|
|
||||||
numSmallFragments: 2,
|
|
||||||
},
|
|
||||||
numIndices: 0,
|
|
||||||
numRows: 3,
|
|
||||||
totalBytes: 24,
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it("should overwrite data if asked", async () => {
|
it("should overwrite data if asked", async () => {
|
||||||
const addRes = await table.add([{ id: 1 }, { id: 2 }]);
|
await table.add([{ id: 1 }, { id: 2 }]);
|
||||||
expect(addRes).toHaveProperty("version");
|
|
||||||
expect(addRes.version).toBe(2);
|
|
||||||
await table.add([{ id: 1 }], { mode: "overwrite" });
|
await table.add([{ id: 1 }], { mode: "overwrite" });
|
||||||
await expect(table.countRows()).resolves.toBe(1);
|
await expect(table.countRows()).resolves.toBe(1);
|
||||||
});
|
});
|
||||||
@@ -114,11 +87,7 @@ describe.each([arrow15, arrow16, arrow17, arrow18])(
|
|||||||
await table.add([{ id: 1 }]);
|
await table.add([{ id: 1 }]);
|
||||||
expect(await table.countRows("id == 1")).toBe(1);
|
expect(await table.countRows("id == 1")).toBe(1);
|
||||||
expect(await table.countRows("id == 7")).toBe(0);
|
expect(await table.countRows("id == 7")).toBe(0);
|
||||||
const updateRes = await table.update({ id: "7" });
|
await table.update({ id: "7" });
|
||||||
expect(updateRes).toHaveProperty("version");
|
|
||||||
expect(updateRes.version).toBe(3);
|
|
||||||
expect(updateRes).toHaveProperty("rowsUpdated");
|
|
||||||
expect(updateRes.rowsUpdated).toBe(1);
|
|
||||||
expect(await table.countRows("id == 1")).toBe(0);
|
expect(await table.countRows("id == 1")).toBe(0);
|
||||||
expect(await table.countRows("id == 7")).toBe(1);
|
expect(await table.countRows("id == 7")).toBe(1);
|
||||||
await table.add([{ id: 2 }]);
|
await table.add([{ id: 2 }]);
|
||||||
@@ -345,17 +314,11 @@ describe("merge insert", () => {
|
|||||||
{ a: 3, b: "y" },
|
{ a: 3, b: "y" },
|
||||||
{ a: 4, b: "z" },
|
{ a: 4, b: "z" },
|
||||||
];
|
];
|
||||||
const mergeInsertRes = await table
|
await table
|
||||||
.mergeInsert("a")
|
.mergeInsert("a")
|
||||||
.whenMatchedUpdateAll()
|
.whenMatchedUpdateAll()
|
||||||
.whenNotMatchedInsertAll()
|
.whenNotMatchedInsertAll()
|
||||||
.execute(newData, { timeoutMs: 10_000 });
|
.execute(newData);
|
||||||
expect(mergeInsertRes).toHaveProperty("version");
|
|
||||||
expect(mergeInsertRes.version).toBe(2);
|
|
||||||
expect(mergeInsertRes.numInsertedRows).toBe(1);
|
|
||||||
expect(mergeInsertRes.numUpdatedRows).toBe(2);
|
|
||||||
expect(mergeInsertRes.numDeletedRows).toBe(0);
|
|
||||||
|
|
||||||
const expected = [
|
const expected = [
|
||||||
{ a: 1, b: "a" },
|
{ a: 1, b: "a" },
|
||||||
{ a: 2, b: "x" },
|
{ a: 2, b: "x" },
|
||||||
@@ -373,12 +336,10 @@ describe("merge insert", () => {
|
|||||||
{ a: 3, b: "y" },
|
{ a: 3, b: "y" },
|
||||||
{ a: 4, b: "z" },
|
{ a: 4, b: "z" },
|
||||||
];
|
];
|
||||||
const mergeInsertRes = await table
|
await table
|
||||||
.mergeInsert("a")
|
.mergeInsert("a")
|
||||||
.whenMatchedUpdateAll({ where: "target.b = 'b'" })
|
.whenMatchedUpdateAll({ where: "target.b = 'b'" })
|
||||||
.execute(newData);
|
.execute(newData);
|
||||||
expect(mergeInsertRes).toHaveProperty("version");
|
|
||||||
expect(mergeInsertRes.version).toBe(2);
|
|
||||||
|
|
||||||
const expected = [
|
const expected = [
|
||||||
{ a: 1, b: "a" },
|
{ a: 1, b: "a" },
|
||||||
@@ -463,20 +424,6 @@ describe("merge insert", () => {
|
|||||||
res = res.sort((a, b) => a.a - b.a);
|
res = res.sort((a, b) => a.a - b.a);
|
||||||
expect(res).toEqual(expected);
|
expect(res).toEqual(expected);
|
||||||
});
|
});
|
||||||
|
|
||||||
test("timeout", async () => {
|
|
||||||
const newData = [
|
|
||||||
{ a: 2, b: "x" },
|
|
||||||
{ a: 4, b: "z" },
|
|
||||||
];
|
|
||||||
await expect(
|
|
||||||
table
|
|
||||||
.mergeInsert("a")
|
|
||||||
.whenMatchedUpdateAll()
|
|
||||||
.whenNotMatchedInsertAll()
|
|
||||||
.execute(newData, { timeoutMs: 0 }),
|
|
||||||
).rejects.toThrow("merge insert timed out");
|
|
||||||
});
|
|
||||||
});
|
});
|
||||||
|
|
||||||
describe("When creating an index", () => {
|
describe("When creating an index", () => {
|
||||||
@@ -559,15 +506,6 @@ describe("When creating an index", () => {
|
|||||||
expect(indices2.length).toBe(0);
|
expect(indices2.length).toBe(0);
|
||||||
});
|
});
|
||||||
|
|
||||||
it("should wait for index readiness", async () => {
|
|
||||||
// Create an index and then wait for it to be ready
|
|
||||||
await tbl.createIndex("vec");
|
|
||||||
const indices = await tbl.listIndices();
|
|
||||||
expect(indices.length).toBeGreaterThan(0);
|
|
||||||
const idxName = indices[0].name;
|
|
||||||
await expect(tbl.waitForIndex([idxName], 5)).resolves.toBeUndefined();
|
|
||||||
});
|
|
||||||
|
|
||||||
it("should search with distance range", async () => {
|
it("should search with distance range", async () => {
|
||||||
await tbl.createIndex("vec");
|
await tbl.createIndex("vec");
|
||||||
|
|
||||||
@@ -695,23 +633,6 @@ describe("When creating an index", () => {
|
|||||||
expect(plan2).not.toMatch("LanceScan");
|
expect(plan2).not.toMatch("LanceScan");
|
||||||
});
|
});
|
||||||
|
|
||||||
it("should be able to run analyze plan", async () => {
|
|
||||||
await tbl.createIndex("vec");
|
|
||||||
await tbl.add([
|
|
||||||
{
|
|
||||||
id: 300,
|
|
||||||
vec: Array(32)
|
|
||||||
.fill(1)
|
|
||||||
.map(() => Math.random()),
|
|
||||||
tags: [],
|
|
||||||
},
|
|
||||||
]);
|
|
||||||
|
|
||||||
const plan = await tbl.query().nearestTo(queryVec).analyzePlan();
|
|
||||||
expect(plan).toMatch("AnalyzeExec");
|
|
||||||
expect(plan).toMatch("metrics=");
|
|
||||||
});
|
|
||||||
|
|
||||||
it("should be able to query with row id", async () => {
|
it("should be able to query with row id", async () => {
|
||||||
const results = await tbl
|
const results = await tbl
|
||||||
.query()
|
.query()
|
||||||
@@ -885,7 +806,6 @@ describe("When creating an index", () => {
|
|||||||
// Only build index over v1
|
// Only build index over v1
|
||||||
await tbl.createIndex("vec", {
|
await tbl.createIndex("vec", {
|
||||||
config: Index.ivfPq({ numPartitions: 2, numSubVectors: 2 }),
|
config: Index.ivfPq({ numPartitions: 2, numSubVectors: 2 }),
|
||||||
waitTimeoutSeconds: 30,
|
|
||||||
});
|
});
|
||||||
|
|
||||||
const rst = await tbl
|
const rst = await tbl
|
||||||
@@ -930,44 +850,6 @@ describe("When creating an index", () => {
|
|||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
describe("When querying a table", () => {
|
|
||||||
let tmpDir: tmp.DirResult;
|
|
||||||
beforeEach(() => {
|
|
||||||
tmpDir = tmp.dirSync({ unsafeCleanup: true });
|
|
||||||
});
|
|
||||||
afterEach(() => tmpDir.removeCallback());
|
|
||||||
|
|
||||||
it("should throw an error when timeout is reached", async () => {
|
|
||||||
const db = await connect(tmpDir.name);
|
|
||||||
const data = makeArrowTable([
|
|
||||||
{ text: "a", vector: [0.1, 0.2] },
|
|
||||||
{ text: "b", vector: [0.3, 0.4] },
|
|
||||||
]);
|
|
||||||
const table = await db.createTable("test", data);
|
|
||||||
await table.createIndex("text", { config: Index.fts() });
|
|
||||||
|
|
||||||
await expect(
|
|
||||||
table.query().where("text != 'a'").toArray({ timeoutMs: 0 }),
|
|
||||||
).rejects.toThrow("Query timeout");
|
|
||||||
|
|
||||||
await expect(
|
|
||||||
table.query().nearestTo([0.0, 0.0]).toArrow({ timeoutMs: 0 }),
|
|
||||||
).rejects.toThrow("Query timeout");
|
|
||||||
|
|
||||||
await expect(
|
|
||||||
table.search("a", "fts").toArray({ timeoutMs: 0 }),
|
|
||||||
).rejects.toThrow("Query timeout");
|
|
||||||
|
|
||||||
await expect(
|
|
||||||
table
|
|
||||||
.query()
|
|
||||||
.nearestToText("a")
|
|
||||||
.nearestTo([0.0, 0.0])
|
|
||||||
.toArrow({ timeoutMs: 0 }),
|
|
||||||
).rejects.toThrow("Query timeout");
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe("Read consistency interval", () => {
|
describe("Read consistency interval", () => {
|
||||||
let tmpDir: tmp.DirResult;
|
let tmpDir: tmp.DirResult;
|
||||||
beforeEach(() => {
|
beforeEach(() => {
|
||||||
@@ -1052,19 +934,15 @@ describe("schema evolution", function () {
|
|||||||
{ id: 1n, vector: [0.1, 0.2] },
|
{ id: 1n, vector: [0.1, 0.2] },
|
||||||
]);
|
]);
|
||||||
// Can create a non-nullable column only through addColumns at the moment.
|
// Can create a non-nullable column only through addColumns at the moment.
|
||||||
const addColumnsRes = await table.addColumns([
|
await table.addColumns([
|
||||||
{ name: "price", valueSql: "cast(10.0 as double)" },
|
{ name: "price", valueSql: "cast(10.0 as double)" },
|
||||||
]);
|
]);
|
||||||
expect(addColumnsRes).toHaveProperty("version");
|
|
||||||
expect(addColumnsRes.version).toBe(2);
|
|
||||||
expect(await table.schema()).toEqual(schema);
|
expect(await table.schema()).toEqual(schema);
|
||||||
|
|
||||||
const alterColumnsRes = await table.alterColumns([
|
await table.alterColumns([
|
||||||
{ path: "id", rename: "new_id" },
|
{ path: "id", rename: "new_id" },
|
||||||
{ path: "price", nullable: true },
|
{ path: "price", nullable: true },
|
||||||
]);
|
]);
|
||||||
expect(alterColumnsRes).toHaveProperty("version");
|
|
||||||
expect(alterColumnsRes.version).toBe(3);
|
|
||||||
|
|
||||||
const expectedSchema = new Schema([
|
const expectedSchema = new Schema([
|
||||||
new Field("new_id", new Int64(), true),
|
new Field("new_id", new Int64(), true),
|
||||||
@@ -1182,9 +1060,7 @@ describe("schema evolution", function () {
|
|||||||
const table = await con.createTable("vectors", [
|
const table = await con.createTable("vectors", [
|
||||||
{ id: 1n, vector: [0.1, 0.2] },
|
{ id: 1n, vector: [0.1, 0.2] },
|
||||||
]);
|
]);
|
||||||
const dropColumnsRes = await table.dropColumns(["vector"]);
|
await table.dropColumns(["vector"]);
|
||||||
expect(dropColumnsRes).toHaveProperty("version");
|
|
||||||
expect(dropColumnsRes.version).toBe(2);
|
|
||||||
|
|
||||||
const expectedSchema = new Schema([new Field("id", new Int64(), true)]);
|
const expectedSchema = new Schema([new Field("id", new Int64(), true)]);
|
||||||
expect(await table.schema()).toEqual(expectedSchema);
|
expect(await table.schema()).toEqual(expectedSchema);
|
||||||
@@ -1236,99 +1112,6 @@ describe("when dealing with versioning", () => {
|
|||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
describe("when dealing with tags", () => {
|
|
||||||
let tmpDir: tmp.DirResult;
|
|
||||||
beforeEach(() => {
|
|
||||||
tmpDir = tmp.dirSync({ unsafeCleanup: true });
|
|
||||||
});
|
|
||||||
afterEach(() => {
|
|
||||||
tmpDir.removeCallback();
|
|
||||||
});
|
|
||||||
|
|
||||||
it("can manage tags", async () => {
|
|
||||||
const conn = await connect(tmpDir.name, {
|
|
||||||
readConsistencyInterval: 0,
|
|
||||||
});
|
|
||||||
|
|
||||||
const table = await conn.createTable("my_table", [
|
|
||||||
{ id: 1n, vector: [0.1, 0.2] },
|
|
||||||
]);
|
|
||||||
expect(await table.version()).toBe(1);
|
|
||||||
|
|
||||||
await table.add([{ id: 2n, vector: [0.3, 0.4] }]);
|
|
||||||
expect(await table.version()).toBe(2);
|
|
||||||
|
|
||||||
const tagsManager = await table.tags();
|
|
||||||
|
|
||||||
const initialTags = await tagsManager.list();
|
|
||||||
expect(Object.keys(initialTags).length).toBe(0);
|
|
||||||
|
|
||||||
const tag1 = "tag1";
|
|
||||||
await tagsManager.create(tag1, 1);
|
|
||||||
expect(await tagsManager.getVersion(tag1)).toBe(1);
|
|
||||||
|
|
||||||
const tagsAfterFirst = await tagsManager.list();
|
|
||||||
expect(Object.keys(tagsAfterFirst).length).toBe(1);
|
|
||||||
expect(tagsAfterFirst).toHaveProperty(tag1);
|
|
||||||
expect(tagsAfterFirst[tag1].version).toBe(1);
|
|
||||||
|
|
||||||
await tagsManager.create("tag2", 2);
|
|
||||||
expect(await tagsManager.getVersion("tag2")).toBe(2);
|
|
||||||
|
|
||||||
const tagsAfterSecond = await tagsManager.list();
|
|
||||||
expect(Object.keys(tagsAfterSecond).length).toBe(2);
|
|
||||||
expect(tagsAfterSecond).toHaveProperty(tag1);
|
|
||||||
expect(tagsAfterSecond[tag1].version).toBe(1);
|
|
||||||
expect(tagsAfterSecond).toHaveProperty("tag2");
|
|
||||||
expect(tagsAfterSecond["tag2"].version).toBe(2);
|
|
||||||
|
|
||||||
await table.add([{ id: 3n, vector: [0.5, 0.6] }]);
|
|
||||||
await tagsManager.update(tag1, 3);
|
|
||||||
expect(await tagsManager.getVersion(tag1)).toBe(3);
|
|
||||||
|
|
||||||
await tagsManager.delete("tag2");
|
|
||||||
const tagsAfterDelete = await tagsManager.list();
|
|
||||||
expect(Object.keys(tagsAfterDelete).length).toBe(1);
|
|
||||||
expect(tagsAfterDelete).toHaveProperty(tag1);
|
|
||||||
expect(tagsAfterDelete[tag1].version).toBe(3);
|
|
||||||
|
|
||||||
await table.add([{ id: 4n, vector: [0.7, 0.8] }]);
|
|
||||||
expect(await table.version()).toBe(4);
|
|
||||||
|
|
||||||
await table.checkout(tag1);
|
|
||||||
expect(await table.version()).toBe(3);
|
|
||||||
|
|
||||||
await table.checkoutLatest();
|
|
||||||
expect(await table.version()).toBe(4);
|
|
||||||
});
|
|
||||||
|
|
||||||
it("can checkout and restore tags", async () => {
|
|
||||||
const conn = await connect(tmpDir.name, {
|
|
||||||
readConsistencyInterval: 0,
|
|
||||||
});
|
|
||||||
|
|
||||||
const table = await conn.createTable("my_table", [
|
|
||||||
{ id: 1n, vector: [0.1, 0.2] },
|
|
||||||
]);
|
|
||||||
expect(await table.version()).toBe(1);
|
|
||||||
expect(await table.countRows()).toBe(1);
|
|
||||||
const tagsManager = await table.tags();
|
|
||||||
const tag1 = "tag1";
|
|
||||||
await tagsManager.create(tag1, 1);
|
|
||||||
await table.add([{ id: 2n, vector: [0.3, 0.4] }]);
|
|
||||||
const tag2 = "tag2";
|
|
||||||
await tagsManager.create(tag2, 2);
|
|
||||||
expect(await table.version()).toBe(2);
|
|
||||||
await table.checkout(tag1);
|
|
||||||
expect(await table.version()).toBe(1);
|
|
||||||
await table.restore();
|
|
||||||
expect(await table.version()).toBe(3);
|
|
||||||
expect(await table.countRows()).toBe(1);
|
|
||||||
await table.add([{ id: 3n, vector: [0.5, 0.6] }]);
|
|
||||||
expect(await table.countRows()).toBe(2);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe("when optimizing a dataset", () => {
|
describe("when optimizing a dataset", () => {
|
||||||
let tmpDir: tmp.DirResult;
|
let tmpDir: tmp.DirResult;
|
||||||
let table: Table;
|
let table: Table;
|
||||||
@@ -1464,56 +1247,6 @@ describe.each([arrow15, arrow16, arrow17, arrow18])(
|
|||||||
|
|
||||||
const results = await table.search("hello").toArray();
|
const results = await table.search("hello").toArray();
|
||||||
expect(results[0].text).toBe(data[0].text);
|
expect(results[0].text).toBe(data[0].text);
|
||||||
|
|
||||||
const query = new MatchQuery("goodbye", "text");
|
|
||||||
expect(instanceOfFullTextQuery(query)).toBe(true);
|
|
||||||
const results2 = await table
|
|
||||||
.search(new MatchQuery("goodbye", "text"))
|
|
||||||
.toArray();
|
|
||||||
expect(results2[0].text).toBe(data[1].text);
|
|
||||||
});
|
|
||||||
|
|
||||||
test("prewarm full text search index", async () => {
|
|
||||||
const db = await connect(tmpDir.name);
|
|
||||||
const data = [
|
|
||||||
{ text: ["lance database", "the", "search"], vector: [0.1, 0.2, 0.3] },
|
|
||||||
{ text: ["lance database"], vector: [0.4, 0.5, 0.6] },
|
|
||||||
{ text: ["lance", "search"], vector: [0.7, 0.8, 0.9] },
|
|
||||||
{ text: ["database", "search"], vector: [1.0, 1.1, 1.2] },
|
|
||||||
{ text: ["unrelated", "doc"], vector: [1.3, 1.4, 1.5] },
|
|
||||||
];
|
|
||||||
const table = await db.createTable("test", data);
|
|
||||||
await table.createIndex("text", {
|
|
||||||
config: Index.fts(),
|
|
||||||
});
|
|
||||||
|
|
||||||
// For the moment, we just confirm we can call prewarmIndex without error
|
|
||||||
// and still search it afterwards
|
|
||||||
await table.prewarmIndex("text_idx");
|
|
||||||
|
|
||||||
const results = await table.search("lance").toArray();
|
|
||||||
expect(results.length).toBe(3);
|
|
||||||
});
|
|
||||||
|
|
||||||
test("full text index on list", async () => {
|
|
||||||
const db = await connect(tmpDir.name);
|
|
||||||
const data = [
|
|
||||||
{ text: ["lance database", "the", "search"], vector: [0.1, 0.2, 0.3] },
|
|
||||||
{ text: ["lance database"], vector: [0.4, 0.5, 0.6] },
|
|
||||||
{ text: ["lance", "search"], vector: [0.7, 0.8, 0.9] },
|
|
||||||
{ text: ["database", "search"], vector: [1.0, 1.1, 1.2] },
|
|
||||||
{ text: ["unrelated", "doc"], vector: [1.3, 1.4, 1.5] },
|
|
||||||
];
|
|
||||||
const table = await db.createTable("test", data);
|
|
||||||
await table.createIndex("text", {
|
|
||||||
config: Index.fts(),
|
|
||||||
});
|
|
||||||
|
|
||||||
const results = await table.search("lance").toArray();
|
|
||||||
expect(results.length).toBe(3);
|
|
||||||
|
|
||||||
const results2 = await table.search('"lance database"').toArray();
|
|
||||||
expect(results2.length).toBe(2);
|
|
||||||
});
|
});
|
||||||
|
|
||||||
test("full text search without positions", async () => {
|
test("full text search without positions", async () => {
|
||||||
@@ -1566,43 +1299,6 @@ describe.each([arrow15, arrow16, arrow17, arrow18])(
|
|||||||
expect(results.length).toBe(2);
|
expect(results.length).toBe(2);
|
||||||
const phraseResults = await table.search('"hello world"').toArray();
|
const phraseResults = await table.search('"hello world"').toArray();
|
||||||
expect(phraseResults.length).toBe(1);
|
expect(phraseResults.length).toBe(1);
|
||||||
const phraseResults2 = await table
|
|
||||||
.search(new PhraseQuery("hello world", "text"))
|
|
||||||
.toArray();
|
|
||||||
expect(phraseResults2.length).toBe(1);
|
|
||||||
});
|
|
||||||
|
|
||||||
test("full text search fuzzy query", async () => {
|
|
||||||
const db = await connect(tmpDir.name);
|
|
||||||
const data = [
|
|
||||||
{ text: "fa", vector: [0.1, 0.2, 0.3] },
|
|
||||||
{ text: "fo", vector: [0.4, 0.5, 0.6] },
|
|
||||||
{ text: "fob", vector: [0.4, 0.5, 0.6] },
|
|
||||||
{ text: "focus", vector: [0.4, 0.5, 0.6] },
|
|
||||||
{ text: "foo", vector: [0.4, 0.5, 0.6] },
|
|
||||||
{ text: "food", vector: [0.4, 0.5, 0.6] },
|
|
||||||
{ text: "foul", vector: [0.4, 0.5, 0.6] },
|
|
||||||
];
|
|
||||||
const table = await db.createTable("test", data);
|
|
||||||
await table.createIndex("text", {
|
|
||||||
config: Index.fts(),
|
|
||||||
});
|
|
||||||
|
|
||||||
const results = await table
|
|
||||||
.search(new MatchQuery("foo", "text"))
|
|
||||||
.toArray();
|
|
||||||
expect(results.length).toBe(1);
|
|
||||||
expect(results[0].text).toBe("foo");
|
|
||||||
|
|
||||||
const fuzzyResults = await table
|
|
||||||
.search(new MatchQuery("foo", "text", { fuzziness: 1 }))
|
|
||||||
.toArray();
|
|
||||||
expect(fuzzyResults.length).toBe(4);
|
|
||||||
const resultSet = new Set(fuzzyResults.map((r) => r.text));
|
|
||||||
expect(resultSet.has("foo")).toBe(true);
|
|
||||||
expect(resultSet.has("fob")).toBe(true);
|
|
||||||
expect(resultSet.has("fo")).toBe(true);
|
|
||||||
expect(resultSet.has("food")).toBe(true);
|
|
||||||
});
|
});
|
||||||
|
|
||||||
test.each([
|
test.each([
|
||||||
@@ -1650,30 +1346,6 @@ describe("when calling explainPlan", () => {
|
|||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
describe("when calling analyzePlan", () => {
|
|
||||||
let tmpDir: tmp.DirResult;
|
|
||||||
let table: Table;
|
|
||||||
let queryVec: number[];
|
|
||||||
beforeEach(async () => {
|
|
||||||
tmpDir = tmp.dirSync({ unsafeCleanup: true });
|
|
||||||
const con = await connect(tmpDir.name);
|
|
||||||
table = await con.createTable("vectors", [{ id: 1, vector: [1.1, 0.9] }]);
|
|
||||||
});
|
|
||||||
|
|
||||||
afterEach(() => {
|
|
||||||
tmpDir.removeCallback();
|
|
||||||
});
|
|
||||||
|
|
||||||
it("retrieves runtime metrics", async () => {
|
|
||||||
queryVec = Array(2)
|
|
||||||
.fill(1)
|
|
||||||
.map(() => Math.random());
|
|
||||||
const plan = await table.query().nearestTo(queryVec).analyzePlan();
|
|
||||||
console.log("Query Plan:\n", plan); // <--- Print the plan
|
|
||||||
expect(plan).toMatch("AnalyzeExec");
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe("column name options", () => {
|
describe("column name options", () => {
|
||||||
let tmpDir: tmp.DirResult;
|
let tmpDir: tmp.DirResult;
|
||||||
let table: Table;
|
let table: Table;
|
||||||
|
|||||||
@@ -639,9 +639,8 @@ function transposeData(
|
|||||||
): Vector {
|
): Vector {
|
||||||
if (field.type instanceof Struct) {
|
if (field.type instanceof Struct) {
|
||||||
const childFields = field.type.children;
|
const childFields = field.type.children;
|
||||||
const fullPath = [...path, field.name];
|
|
||||||
const childVectors = childFields.map((child) => {
|
const childVectors = childFields.map((child) => {
|
||||||
return transposeData(data, child, fullPath);
|
return transposeData(data, child, [...path, child.name]);
|
||||||
});
|
});
|
||||||
const structData = makeData({
|
const structData = makeData({
|
||||||
type: field.type,
|
type: field.type,
|
||||||
@@ -653,14 +652,7 @@ function transposeData(
|
|||||||
const values = data.map((datum) => {
|
const values = data.map((datum) => {
|
||||||
let current: unknown = datum;
|
let current: unknown = datum;
|
||||||
for (const key of valuesPath) {
|
for (const key of valuesPath) {
|
||||||
if (current == null) {
|
if (isObject(current) && Object.hasOwn(current, key)) {
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (
|
|
||||||
isObject(current) &&
|
|
||||||
(Object.hasOwn(current, key) || key in current)
|
|
||||||
) {
|
|
||||||
current = current[key];
|
current = current[key];
|
||||||
} else {
|
} else {
|
||||||
return null;
|
return null;
|
||||||
|
|||||||
@@ -23,18 +23,6 @@ export {
|
|||||||
OptimizeStats,
|
OptimizeStats,
|
||||||
CompactionStats,
|
CompactionStats,
|
||||||
RemovalStats,
|
RemovalStats,
|
||||||
TableStatistics,
|
|
||||||
FragmentStatistics,
|
|
||||||
FragmentSummaryStats,
|
|
||||||
Tags,
|
|
||||||
TagContents,
|
|
||||||
MergeResult,
|
|
||||||
AddResult,
|
|
||||||
AddColumnsResult,
|
|
||||||
AlterColumnsResult,
|
|
||||||
DeleteResult,
|
|
||||||
DropColumnsResult,
|
|
||||||
UpdateResult,
|
|
||||||
} from "./native.js";
|
} from "./native.js";
|
||||||
|
|
||||||
export {
|
export {
|
||||||
@@ -59,12 +47,6 @@ export {
|
|||||||
QueryExecutionOptions,
|
QueryExecutionOptions,
|
||||||
FullTextSearchOptions,
|
FullTextSearchOptions,
|
||||||
RecordBatchIterator,
|
RecordBatchIterator,
|
||||||
FullTextQuery,
|
|
||||||
MatchQuery,
|
|
||||||
PhraseQuery,
|
|
||||||
BoostQuery,
|
|
||||||
MultiMatchQuery,
|
|
||||||
FullTextQueryType,
|
|
||||||
} from "./query";
|
} from "./query";
|
||||||
|
|
||||||
export {
|
export {
|
||||||
@@ -86,7 +68,7 @@ export {
|
|||||||
ColumnAlteration,
|
ColumnAlteration,
|
||||||
} from "./table";
|
} from "./table";
|
||||||
|
|
||||||
export { MergeInsertBuilder, WriteExecutionOptions } from "./merge";
|
export { MergeInsertBuilder } from "./merge";
|
||||||
|
|
||||||
export * as embedding from "./embedding";
|
export * as embedding from "./embedding";
|
||||||
export * as rerankers from "./rerankers";
|
export * as rerankers from "./rerankers";
|
||||||
|
|||||||
@@ -681,6 +681,4 @@ export interface IndexOptions {
|
|||||||
* The default is true
|
* The default is true
|
||||||
*/
|
*/
|
||||||
replace?: boolean;
|
replace?: boolean;
|
||||||
|
|
||||||
waitTimeoutSeconds?: number;
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
// SPDX-License-Identifier: Apache-2.0
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||||
import { Data, Schema, fromDataToBuffer } from "./arrow";
|
import { Data, Schema, fromDataToBuffer } from "./arrow";
|
||||||
import { MergeResult, NativeMergeInsertBuilder } from "./native";
|
import { NativeMergeInsertBuilder } from "./native";
|
||||||
|
|
||||||
/** A builder used to create and run a merge insert operation */
|
/** A builder used to create and run a merge insert operation */
|
||||||
export class MergeInsertBuilder {
|
export class MergeInsertBuilder {
|
||||||
@@ -73,12 +73,9 @@ export class MergeInsertBuilder {
|
|||||||
/**
|
/**
|
||||||
* Executes the merge insert operation
|
* Executes the merge insert operation
|
||||||
*
|
*
|
||||||
* @returns {Promise<MergeResult>} the merge result
|
* Nothing is returned but the `Table` is updated
|
||||||
*/
|
*/
|
||||||
async execute(
|
async execute(data: Data): Promise<void> {
|
||||||
data: Data,
|
|
||||||
execOptions?: Partial<WriteExecutionOptions>,
|
|
||||||
): Promise<MergeResult> {
|
|
||||||
let schema: Schema;
|
let schema: Schema;
|
||||||
if (this.#schema instanceof Promise) {
|
if (this.#schema instanceof Promise) {
|
||||||
schema = await this.#schema;
|
schema = await this.#schema;
|
||||||
@@ -86,28 +83,7 @@ export class MergeInsertBuilder {
|
|||||||
} else {
|
} else {
|
||||||
schema = this.#schema;
|
schema = this.#schema;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (execOptions?.timeoutMs !== undefined) {
|
|
||||||
this.#native.setTimeout(execOptions.timeoutMs);
|
|
||||||
}
|
|
||||||
|
|
||||||
const buffer = await fromDataToBuffer(data, undefined, schema);
|
const buffer = await fromDataToBuffer(data, undefined, schema);
|
||||||
return await this.#native.execute(buffer);
|
await this.#native.execute(buffer);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
export interface WriteExecutionOptions {
|
|
||||||
/**
|
|
||||||
* Maximum time to run the operation before cancelling it.
|
|
||||||
*
|
|
||||||
* By default, there is a 30-second timeout that is only enforced after the
|
|
||||||
* first attempt. This is to prevent spending too long retrying to resolve
|
|
||||||
* conflicts. For example, if a write attempt takes 20 seconds and fails,
|
|
||||||
* the second attempt will be cancelled after 10 seconds, hitting the
|
|
||||||
* 30-second timeout. However, a write that takes one hour and succeeds on the
|
|
||||||
* first attempt will not be cancelled.
|
|
||||||
*
|
|
||||||
* When this is set, the timeout is enforced on all attempts, including the first.
|
|
||||||
*/
|
|
||||||
timeoutMs?: number;
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -11,14 +11,12 @@ import {
|
|||||||
} from "./arrow";
|
} from "./arrow";
|
||||||
import { type IvfPqOptions } from "./indices";
|
import { type IvfPqOptions } from "./indices";
|
||||||
import {
|
import {
|
||||||
JsFullTextQuery,
|
|
||||||
RecordBatchIterator as NativeBatchIterator,
|
RecordBatchIterator as NativeBatchIterator,
|
||||||
Query as NativeQuery,
|
Query as NativeQuery,
|
||||||
Table as NativeTable,
|
Table as NativeTable,
|
||||||
VectorQuery as NativeVectorQuery,
|
VectorQuery as NativeVectorQuery,
|
||||||
} from "./native";
|
} from "./native";
|
||||||
import { Reranker } from "./rerankers";
|
import { Reranker } from "./rerankers";
|
||||||
|
|
||||||
export class RecordBatchIterator implements AsyncIterator<RecordBatch> {
|
export class RecordBatchIterator implements AsyncIterator<RecordBatch> {
|
||||||
private promisedInner?: Promise<NativeBatchIterator>;
|
private promisedInner?: Promise<NativeBatchIterator>;
|
||||||
private inner?: NativeBatchIterator;
|
private inner?: NativeBatchIterator;
|
||||||
@@ -64,7 +62,7 @@ class RecordBatchIterable<
|
|||||||
// biome-ignore lint/suspicious/noExplicitAny: skip
|
// biome-ignore lint/suspicious/noExplicitAny: skip
|
||||||
[Symbol.asyncIterator](): AsyncIterator<RecordBatch<any>, any, undefined> {
|
[Symbol.asyncIterator](): AsyncIterator<RecordBatch<any>, any, undefined> {
|
||||||
return new RecordBatchIterator(
|
return new RecordBatchIterator(
|
||||||
this.inner.execute(this.options?.maxBatchLength, this.options?.timeoutMs),
|
this.inner.execute(this.options?.maxBatchLength),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -80,11 +78,6 @@ export interface QueryExecutionOptions {
|
|||||||
* in smaller chunks.
|
* in smaller chunks.
|
||||||
*/
|
*/
|
||||||
maxBatchLength?: number;
|
maxBatchLength?: number;
|
||||||
|
|
||||||
/**
|
|
||||||
* Timeout for query execution in milliseconds
|
|
||||||
*/
|
|
||||||
timeoutMs?: number;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -159,7 +152,7 @@ export class QueryBase<NativeQueryType extends NativeQuery | NativeVectorQuery>
|
|||||||
}
|
}
|
||||||
|
|
||||||
fullTextSearch(
|
fullTextSearch(
|
||||||
query: string | FullTextQuery,
|
query: string,
|
||||||
options?: Partial<FullTextSearchOptions>,
|
options?: Partial<FullTextSearchOptions>,
|
||||||
): this {
|
): this {
|
||||||
let columns: string[] | null = null;
|
let columns: string[] | null = null;
|
||||||
@@ -171,16 +164,9 @@ export class QueryBase<NativeQueryType extends NativeQuery | NativeVectorQuery>
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
this.doCall((inner: NativeQueryType) => {
|
this.doCall((inner: NativeQueryType) =>
|
||||||
if (typeof query === "string") {
|
inner.fullTextSearch(query, columns),
|
||||||
inner.fullTextSearch({
|
);
|
||||||
query: query,
|
|
||||||
columns: columns,
|
|
||||||
});
|
|
||||||
} else {
|
|
||||||
inner.fullTextSearch({ query: query.inner });
|
|
||||||
}
|
|
||||||
});
|
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -287,11 +273,9 @@ export class QueryBase<NativeQueryType extends NativeQuery | NativeVectorQuery>
|
|||||||
options?: Partial<QueryExecutionOptions>,
|
options?: Partial<QueryExecutionOptions>,
|
||||||
): Promise<NativeBatchIterator> {
|
): Promise<NativeBatchIterator> {
|
||||||
if (this.inner instanceof Promise) {
|
if (this.inner instanceof Promise) {
|
||||||
return this.inner.then((inner) =>
|
return this.inner.then((inner) => inner.execute(options?.maxBatchLength));
|
||||||
inner.execute(options?.maxBatchLength, options?.timeoutMs),
|
|
||||||
);
|
|
||||||
} else {
|
} else {
|
||||||
return this.inner.execute(options?.maxBatchLength, options?.timeoutMs);
|
return this.inner.execute(options?.maxBatchLength);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -364,43 +348,6 @@ export class QueryBase<NativeQueryType extends NativeQuery | NativeVectorQuery>
|
|||||||
return this.inner.explainPlan(verbose);
|
return this.inner.explainPlan(verbose);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Executes the query and returns the physical query plan annotated with runtime metrics.
|
|
||||||
*
|
|
||||||
* This is useful for debugging and performance analysis, as it shows how the query was executed
|
|
||||||
* and includes metrics such as elapsed time, rows processed, and I/O statistics.
|
|
||||||
*
|
|
||||||
* @example
|
|
||||||
* import * as lancedb from "@lancedb/lancedb"
|
|
||||||
*
|
|
||||||
* const db = await lancedb.connect("./.lancedb");
|
|
||||||
* const table = await db.createTable("my_table", [
|
|
||||||
* { vector: [1.1, 0.9], id: "1" },
|
|
||||||
* ]);
|
|
||||||
*
|
|
||||||
* const plan = await table.query().nearestTo([0.5, 0.2]).analyzePlan();
|
|
||||||
*
|
|
||||||
* Example output (with runtime metrics inlined):
|
|
||||||
* AnalyzeExec verbose=true, metrics=[]
|
|
||||||
* ProjectionExec: expr=[id@3 as id, vector@0 as vector, _distance@2 as _distance], metrics=[output_rows=1, elapsed_compute=3.292µs]
|
|
||||||
* Take: columns="vector, _rowid, _distance, (id)", metrics=[output_rows=1, elapsed_compute=66.001µs, batches_processed=1, bytes_read=8, iops=1, requests=1]
|
|
||||||
* CoalesceBatchesExec: target_batch_size=1024, metrics=[output_rows=1, elapsed_compute=3.333µs]
|
|
||||||
* GlobalLimitExec: skip=0, fetch=10, metrics=[output_rows=1, elapsed_compute=167ns]
|
|
||||||
* FilterExec: _distance@2 IS NOT NULL, metrics=[output_rows=1, elapsed_compute=8.542µs]
|
|
||||||
* SortExec: TopK(fetch=10), expr=[_distance@2 ASC NULLS LAST], metrics=[output_rows=1, elapsed_compute=63.25µs, row_replacements=1]
|
|
||||||
* KNNVectorDistance: metric=l2, metrics=[output_rows=1, elapsed_compute=114.333µs, output_batches=1]
|
|
||||||
* LanceScan: uri=/path/to/data, projection=[vector], row_id=true, row_addr=false, ordered=false, metrics=[output_rows=1, elapsed_compute=103.626µs, bytes_read=549, iops=2, requests=2]
|
|
||||||
*
|
|
||||||
* @returns A query execution plan with runtime metrics for each step.
|
|
||||||
*/
|
|
||||||
async analyzePlan(): Promise<string> {
|
|
||||||
if (this.inner instanceof Promise) {
|
|
||||||
return this.inner.then((inner) => inner.analyzePlan());
|
|
||||||
} else {
|
|
||||||
return this.inner.analyzePlan();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -734,177 +681,8 @@ export class Query extends QueryBase<NativeQuery> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
nearestToText(query: string | FullTextQuery, columns?: string[]): Query {
|
nearestToText(query: string, columns?: string[]): Query {
|
||||||
this.doCall((inner) => {
|
this.doCall((inner) => inner.fullTextSearch(query, columns));
|
||||||
if (typeof query === "string") {
|
|
||||||
inner.fullTextSearch({
|
|
||||||
query: query,
|
|
||||||
columns: columns,
|
|
||||||
});
|
|
||||||
} else {
|
|
||||||
inner.fullTextSearch({ query: query.inner });
|
|
||||||
}
|
|
||||||
});
|
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Enum representing the types of full-text queries supported.
|
|
||||||
*
|
|
||||||
* - `Match`: Performs a full-text search for terms in the query string.
|
|
||||||
* - `MatchPhrase`: Searches for an exact phrase match in the text.
|
|
||||||
* - `Boost`: Boosts the relevance score of specific terms in the query.
|
|
||||||
* - `MultiMatch`: Searches across multiple fields for the query terms.
|
|
||||||
*/
|
|
||||||
export enum FullTextQueryType {
|
|
||||||
Match = "match",
|
|
||||||
MatchPhrase = "match_phrase",
|
|
||||||
Boost = "boost",
|
|
||||||
MultiMatch = "multi_match",
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Represents a full-text query interface.
|
|
||||||
* This interface defines the structure and behavior for full-text queries,
|
|
||||||
* including methods to retrieve the query type and convert the query to a dictionary format.
|
|
||||||
*/
|
|
||||||
export interface FullTextQuery {
|
|
||||||
/**
|
|
||||||
* Returns the inner query object.
|
|
||||||
* This is the underlying query object used by the database engine.
|
|
||||||
* @ignore
|
|
||||||
*/
|
|
||||||
inner: JsFullTextQuery;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The type of the full-text query.
|
|
||||||
*/
|
|
||||||
queryType(): FullTextQueryType;
|
|
||||||
}
|
|
||||||
|
|
||||||
// biome-ignore lint/suspicious/noExplicitAny: we want any here
|
|
||||||
export function instanceOfFullTextQuery(obj: any): obj is FullTextQuery {
|
|
||||||
return obj != null && obj.inner instanceof JsFullTextQuery;
|
|
||||||
}
|
|
||||||
|
|
||||||
export class MatchQuery implements FullTextQuery {
|
|
||||||
/** @ignore */
|
|
||||||
public readonly inner: JsFullTextQuery;
|
|
||||||
/**
|
|
||||||
* Creates an instance of MatchQuery.
|
|
||||||
*
|
|
||||||
* @param query - The text query to search for.
|
|
||||||
* @param column - The name of the column to search within.
|
|
||||||
* @param options - Optional parameters for the match query.
|
|
||||||
* - `boost`: The boost factor for the query (default is 1.0).
|
|
||||||
* - `fuzziness`: The fuzziness level for the query (default is 0).
|
|
||||||
* - `maxExpansions`: The maximum number of terms to consider for fuzzy matching (default is 50).
|
|
||||||
*/
|
|
||||||
constructor(
|
|
||||||
query: string,
|
|
||||||
column: string,
|
|
||||||
options?: {
|
|
||||||
boost?: number;
|
|
||||||
fuzziness?: number;
|
|
||||||
maxExpansions?: number;
|
|
||||||
},
|
|
||||||
) {
|
|
||||||
let fuzziness = options?.fuzziness;
|
|
||||||
if (fuzziness === undefined) {
|
|
||||||
fuzziness = 0;
|
|
||||||
}
|
|
||||||
this.inner = JsFullTextQuery.matchQuery(
|
|
||||||
query,
|
|
||||||
column,
|
|
||||||
options?.boost ?? 1.0,
|
|
||||||
fuzziness,
|
|
||||||
options?.maxExpansions ?? 50,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
queryType(): FullTextQueryType {
|
|
||||||
return FullTextQueryType.Match;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
export class PhraseQuery implements FullTextQuery {
|
|
||||||
/** @ignore */
|
|
||||||
public readonly inner: JsFullTextQuery;
|
|
||||||
/**
|
|
||||||
* Creates an instance of `PhraseQuery`.
|
|
||||||
*
|
|
||||||
* @param query - The phrase to search for in the specified column.
|
|
||||||
* @param column - The name of the column to search within.
|
|
||||||
*/
|
|
||||||
constructor(query: string, column: string) {
|
|
||||||
this.inner = JsFullTextQuery.phraseQuery(query, column);
|
|
||||||
}
|
|
||||||
|
|
||||||
queryType(): FullTextQueryType {
|
|
||||||
return FullTextQueryType.MatchPhrase;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
export class BoostQuery implements FullTextQuery {
|
|
||||||
/** @ignore */
|
|
||||||
public readonly inner: JsFullTextQuery;
|
|
||||||
/**
|
|
||||||
* Creates an instance of BoostQuery.
|
|
||||||
* The boost returns documents that match the positive query,
|
|
||||||
* but penalizes those that match the negative query.
|
|
||||||
* the penalty is controlled by the `negativeBoost` parameter.
|
|
||||||
*
|
|
||||||
* @param positive - The positive query that boosts the relevance score.
|
|
||||||
* @param negative - The negative query that reduces the relevance score.
|
|
||||||
* @param options - Optional parameters for the boost query.
|
|
||||||
* - `negativeBoost`: The boost factor for the negative query (default is 0.0).
|
|
||||||
*/
|
|
||||||
constructor(
|
|
||||||
positive: FullTextQuery,
|
|
||||||
negative: FullTextQuery,
|
|
||||||
options?: {
|
|
||||||
negativeBoost?: number;
|
|
||||||
},
|
|
||||||
) {
|
|
||||||
this.inner = JsFullTextQuery.boostQuery(
|
|
||||||
positive.inner,
|
|
||||||
negative.inner,
|
|
||||||
options?.negativeBoost,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
queryType(): FullTextQueryType {
|
|
||||||
return FullTextQueryType.Boost;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
export class MultiMatchQuery implements FullTextQuery {
|
|
||||||
/** @ignore */
|
|
||||||
public readonly inner: JsFullTextQuery;
|
|
||||||
/**
|
|
||||||
* Creates an instance of MultiMatchQuery.
|
|
||||||
*
|
|
||||||
* @param query - The text query to search for across multiple columns.
|
|
||||||
* @param columns - An array of column names to search within.
|
|
||||||
* @param options - Optional parameters for the multi-match query.
|
|
||||||
* - `boosts`: An array of boost factors for each column (default is 1.0 for all).
|
|
||||||
*/
|
|
||||||
constructor(
|
|
||||||
query: string,
|
|
||||||
columns: string[],
|
|
||||||
options?: {
|
|
||||||
boosts?: number[];
|
|
||||||
},
|
|
||||||
) {
|
|
||||||
this.inner = JsFullTextQuery.multiMatchQuery(
|
|
||||||
query,
|
|
||||||
columns,
|
|
||||||
options?.boosts,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
queryType(): FullTextQueryType {
|
|
||||||
return FullTextQueryType.MultiMatch;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -16,26 +16,13 @@ import { EmbeddingFunctionConfig, getRegistry } from "./embedding/registry";
|
|||||||
import { IndexOptions } from "./indices";
|
import { IndexOptions } from "./indices";
|
||||||
import { MergeInsertBuilder } from "./merge";
|
import { MergeInsertBuilder } from "./merge";
|
||||||
import {
|
import {
|
||||||
AddColumnsResult,
|
|
||||||
AddColumnsSql,
|
AddColumnsSql,
|
||||||
AddResult,
|
|
||||||
AlterColumnsResult,
|
|
||||||
DeleteResult,
|
|
||||||
DropColumnsResult,
|
|
||||||
IndexConfig,
|
IndexConfig,
|
||||||
IndexStatistics,
|
IndexStatistics,
|
||||||
OptimizeStats,
|
OptimizeStats,
|
||||||
TableStatistics,
|
|
||||||
Tags,
|
|
||||||
UpdateResult,
|
|
||||||
Table as _NativeTable,
|
Table as _NativeTable,
|
||||||
} from "./native";
|
} from "./native";
|
||||||
import {
|
import { Query, VectorQuery } from "./query";
|
||||||
FullTextQuery,
|
|
||||||
Query,
|
|
||||||
VectorQuery,
|
|
||||||
instanceOfFullTextQuery,
|
|
||||||
} from "./query";
|
|
||||||
import { sanitizeType } from "./sanitize";
|
import { sanitizeType } from "./sanitize";
|
||||||
import { IntoSql, toSQL } from "./util";
|
import { IntoSql, toSQL } from "./util";
|
||||||
export { IndexConfig } from "./native";
|
export { IndexConfig } from "./native";
|
||||||
@@ -132,19 +119,12 @@ export abstract class Table {
|
|||||||
/**
|
/**
|
||||||
* Insert records into this Table.
|
* Insert records into this Table.
|
||||||
* @param {Data} data Records to be inserted into the Table
|
* @param {Data} data Records to be inserted into the Table
|
||||||
* @returns {Promise<AddResult>} A promise that resolves to an object
|
|
||||||
* containing the new version number of the table
|
|
||||||
*/
|
*/
|
||||||
abstract add(
|
abstract add(data: Data, options?: Partial<AddDataOptions>): Promise<void>;
|
||||||
data: Data,
|
|
||||||
options?: Partial<AddDataOptions>,
|
|
||||||
): Promise<AddResult>;
|
|
||||||
/**
|
/**
|
||||||
* Update existing records in the Table
|
* Update existing records in the Table
|
||||||
* @param opts.values The values to update. The keys are the column names and the values
|
* @param opts.values The values to update. The keys are the column names and the values
|
||||||
* are the values to set.
|
* are the values to set.
|
||||||
* @returns {Promise<UpdateResult>} A promise that resolves to an object containing
|
|
||||||
* the number of rows updated and the new version number
|
|
||||||
* @example
|
* @example
|
||||||
* ```ts
|
* ```ts
|
||||||
* table.update({where:"x = 2", values:{"vector": [10, 10]}})
|
* table.update({where:"x = 2", values:{"vector": [10, 10]}})
|
||||||
@@ -154,13 +134,11 @@ export abstract class Table {
|
|||||||
opts: {
|
opts: {
|
||||||
values: Map<string, IntoSql> | Record<string, IntoSql>;
|
values: Map<string, IntoSql> | Record<string, IntoSql>;
|
||||||
} & Partial<UpdateOptions>,
|
} & Partial<UpdateOptions>,
|
||||||
): Promise<UpdateResult>;
|
): Promise<void>;
|
||||||
/**
|
/**
|
||||||
* Update existing records in the Table
|
* Update existing records in the Table
|
||||||
* @param opts.valuesSql The values to update. The keys are the column names and the values
|
* @param opts.valuesSql The values to update. The keys are the column names and the values
|
||||||
* are the values to set. The values are SQL expressions.
|
* are the values to set. The values are SQL expressions.
|
||||||
* @returns {Promise<UpdateResult>} A promise that resolves to an object containing
|
|
||||||
* the number of rows updated and the new version number
|
|
||||||
* @example
|
* @example
|
||||||
* ```ts
|
* ```ts
|
||||||
* table.update({where:"x = 2", valuesSql:{"x": "x + 1"}})
|
* table.update({where:"x = 2", valuesSql:{"x": "x + 1"}})
|
||||||
@@ -170,7 +148,7 @@ export abstract class Table {
|
|||||||
opts: {
|
opts: {
|
||||||
valuesSql: Map<string, string> | Record<string, string>;
|
valuesSql: Map<string, string> | Record<string, string>;
|
||||||
} & Partial<UpdateOptions>,
|
} & Partial<UpdateOptions>,
|
||||||
): Promise<UpdateResult>;
|
): Promise<void>;
|
||||||
/**
|
/**
|
||||||
* Update existing records in the Table
|
* Update existing records in the Table
|
||||||
*
|
*
|
||||||
@@ -188,8 +166,6 @@ export abstract class Table {
|
|||||||
* repeatedly calilng this method.
|
* repeatedly calilng this method.
|
||||||
* @param {Map<string, string> | Record<string, string>} updates - the
|
* @param {Map<string, string> | Record<string, string>} updates - the
|
||||||
* columns to update
|
* columns to update
|
||||||
* @returns {Promise<UpdateResult>} A promise that resolves to an object
|
|
||||||
* containing the number of rows updated and the new version number
|
|
||||||
*
|
*
|
||||||
* Keys in the map should specify the name of the column to update.
|
* Keys in the map should specify the name of the column to update.
|
||||||
* Values in the map provide the new value of the column. These can
|
* Values in the map provide the new value of the column. These can
|
||||||
@@ -201,16 +177,12 @@ export abstract class Table {
|
|||||||
abstract update(
|
abstract update(
|
||||||
updates: Map<string, string> | Record<string, string>,
|
updates: Map<string, string> | Record<string, string>,
|
||||||
options?: Partial<UpdateOptions>,
|
options?: Partial<UpdateOptions>,
|
||||||
): Promise<UpdateResult>;
|
): Promise<void>;
|
||||||
|
|
||||||
/** Count the total number of rows in the dataset. */
|
/** Count the total number of rows in the dataset. */
|
||||||
abstract countRows(filter?: string): Promise<number>;
|
abstract countRows(filter?: string): Promise<number>;
|
||||||
/**
|
/** Delete the rows that satisfy the predicate. */
|
||||||
* Delete the rows that satisfy the predicate.
|
abstract delete(predicate: string): Promise<void>;
|
||||||
* @returns {Promise<DeleteResult>} A promise that resolves to an object
|
|
||||||
* containing the new version number of the table
|
|
||||||
*/
|
|
||||||
abstract delete(predicate: string): Promise<DeleteResult>;
|
|
||||||
/**
|
/**
|
||||||
* Create an index to speed up queries.
|
* Create an index to speed up queries.
|
||||||
*
|
*
|
||||||
@@ -258,30 +230,6 @@ export abstract class Table {
|
|||||||
*/
|
*/
|
||||||
abstract dropIndex(name: string): Promise<void>;
|
abstract dropIndex(name: string): Promise<void>;
|
||||||
|
|
||||||
/**
|
|
||||||
* Prewarm an index in the table.
|
|
||||||
*
|
|
||||||
* @param name The name of the index.
|
|
||||||
*
|
|
||||||
* This will load the index into memory. This may reduce the cold-start time for
|
|
||||||
* future queries. If the index does not fit in the cache then this call may be
|
|
||||||
* wasteful.
|
|
||||||
*/
|
|
||||||
abstract prewarmIndex(name: string): Promise<void>;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Waits for asynchronous indexing to complete on the table.
|
|
||||||
*
|
|
||||||
* @param indexNames The name of the indices to wait for
|
|
||||||
* @param timeoutSeconds The number of seconds to wait before timing out
|
|
||||||
*
|
|
||||||
* This will raise an error if the indices are not created and fully indexed within the timeout.
|
|
||||||
*/
|
|
||||||
abstract waitForIndex(
|
|
||||||
indexNames: string[],
|
|
||||||
timeoutSeconds: number,
|
|
||||||
): Promise<void>;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a {@link Query} Builder.
|
* Create a {@link Query} Builder.
|
||||||
*
|
*
|
||||||
@@ -346,7 +294,7 @@ export abstract class Table {
|
|||||||
* if the query is a string and no embedding function is defined, it will be treated as a full text search query
|
* if the query is a string and no embedding function is defined, it will be treated as a full text search query
|
||||||
*/
|
*/
|
||||||
abstract search(
|
abstract search(
|
||||||
query: string | IntoVector | FullTextQuery,
|
query: string | IntoVector,
|
||||||
queryType?: string,
|
queryType?: string,
|
||||||
ftsColumns?: string | string[],
|
ftsColumns?: string | string[],
|
||||||
): VectorQuery | Query;
|
): VectorQuery | Query;
|
||||||
@@ -364,23 +312,15 @@ export abstract class Table {
|
|||||||
* the SQL expression to use to calculate the value of the new column. These
|
* the SQL expression to use to calculate the value of the new column. These
|
||||||
* expressions will be evaluated for each row in the table, and can
|
* expressions will be evaluated for each row in the table, and can
|
||||||
* reference existing columns in the table.
|
* reference existing columns in the table.
|
||||||
* @returns {Promise<AddColumnsResult>} A promise that resolves to an object
|
|
||||||
* containing the new version number of the table after adding the columns.
|
|
||||||
*/
|
*/
|
||||||
abstract addColumns(
|
abstract addColumns(newColumnTransforms: AddColumnsSql[]): Promise<void>;
|
||||||
newColumnTransforms: AddColumnsSql[],
|
|
||||||
): Promise<AddColumnsResult>;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Alter the name or nullability of columns.
|
* Alter the name or nullability of columns.
|
||||||
* @param {ColumnAlteration[]} columnAlterations One or more alterations to
|
* @param {ColumnAlteration[]} columnAlterations One or more alterations to
|
||||||
* apply to columns.
|
* apply to columns.
|
||||||
* @returns {Promise<AlterColumnsResult>} A promise that resolves to an object
|
|
||||||
* containing the new version number of the table after altering the columns.
|
|
||||||
*/
|
*/
|
||||||
abstract alterColumns(
|
abstract alterColumns(columnAlterations: ColumnAlteration[]): Promise<void>;
|
||||||
columnAlterations: ColumnAlteration[],
|
|
||||||
): Promise<AlterColumnsResult>;
|
|
||||||
/**
|
/**
|
||||||
* Drop one or more columns from the dataset
|
* Drop one or more columns from the dataset
|
||||||
*
|
*
|
||||||
@@ -391,10 +331,8 @@ export abstract class Table {
|
|||||||
* @param {string[]} columnNames The names of the columns to drop. These can
|
* @param {string[]} columnNames The names of the columns to drop. These can
|
||||||
* be nested column references (e.g. "a.b.c") or top-level column names
|
* be nested column references (e.g. "a.b.c") or top-level column names
|
||||||
* (e.g. "a").
|
* (e.g. "a").
|
||||||
* @returns {Promise<DropColumnsResult>} A promise that resolves to an object
|
|
||||||
* containing the new version number of the table after dropping the columns.
|
|
||||||
*/
|
*/
|
||||||
abstract dropColumns(columnNames: string[]): Promise<DropColumnsResult>;
|
abstract dropColumns(columnNames: string[]): Promise<void>;
|
||||||
/** Retrieve the version of the table */
|
/** Retrieve the version of the table */
|
||||||
|
|
||||||
abstract version(): Promise<number>;
|
abstract version(): Promise<number>;
|
||||||
@@ -407,7 +345,7 @@ export abstract class Table {
|
|||||||
*
|
*
|
||||||
* Calling this method will set the table into time-travel mode. If you
|
* Calling this method will set the table into time-travel mode. If you
|
||||||
* wish to return to standard mode, call `checkoutLatest`.
|
* wish to return to standard mode, call `checkoutLatest`.
|
||||||
* @param {number | string} version The version to checkout, could be version number or tag
|
* @param {number} version The version to checkout
|
||||||
* @example
|
* @example
|
||||||
* ```typescript
|
* ```typescript
|
||||||
* import * as lancedb from "@lancedb/lancedb"
|
* import * as lancedb from "@lancedb/lancedb"
|
||||||
@@ -423,8 +361,7 @@ export abstract class Table {
|
|||||||
* console.log(await table.version()); // 2
|
* console.log(await table.version()); // 2
|
||||||
* ```
|
* ```
|
||||||
*/
|
*/
|
||||||
abstract checkout(version: number | string): Promise<void>;
|
abstract checkout(version: number): Promise<void>;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Checkout the latest version of the table. _This is an in-place operation._
|
* Checkout the latest version of the table. _This is an in-place operation._
|
||||||
*
|
*
|
||||||
@@ -438,23 +375,6 @@ export abstract class Table {
|
|||||||
*/
|
*/
|
||||||
abstract listVersions(): Promise<Version[]>;
|
abstract listVersions(): Promise<Version[]>;
|
||||||
|
|
||||||
/**
|
|
||||||
* Get a tags manager for this table.
|
|
||||||
*
|
|
||||||
* Tags allow you to label specific versions of a table with a human-readable name.
|
|
||||||
* The returned tags manager can be used to list, create, update, or delete tags.
|
|
||||||
*
|
|
||||||
* @returns {Tags} A tags manager for this table
|
|
||||||
* @example
|
|
||||||
* ```typescript
|
|
||||||
* const tagsManager = await table.tags();
|
|
||||||
* await tagsManager.create("v1", 1);
|
|
||||||
* const tags = await tagsManager.list();
|
|
||||||
* console.log(tags); // { "v1": { version: 1, manifestSize: ... } }
|
|
||||||
* ```
|
|
||||||
*/
|
|
||||||
abstract tags(): Promise<Tags>;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Restore the table to the currently checked out version
|
* Restore the table to the currently checked out version
|
||||||
*
|
*
|
||||||
@@ -514,13 +434,6 @@ export abstract class Table {
|
|||||||
* Use {@link Table.listIndices} to find the names of the indices.
|
* Use {@link Table.listIndices} to find the names of the indices.
|
||||||
*/
|
*/
|
||||||
abstract indexStats(name: string): Promise<IndexStatistics | undefined>;
|
abstract indexStats(name: string): Promise<IndexStatistics | undefined>;
|
||||||
|
|
||||||
/** Returns table and fragment statistics
|
|
||||||
*
|
|
||||||
* @returns {TableStatistics} The table and fragment statistics
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
abstract stats(): Promise<TableStatistics>;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
export class LocalTable extends Table {
|
export class LocalTable extends Table {
|
||||||
@@ -560,12 +473,12 @@ export class LocalTable extends Table {
|
|||||||
return tbl.schema;
|
return tbl.schema;
|
||||||
}
|
}
|
||||||
|
|
||||||
async add(data: Data, options?: Partial<AddDataOptions>): Promise<AddResult> {
|
async add(data: Data, options?: Partial<AddDataOptions>): Promise<void> {
|
||||||
const mode = options?.mode ?? "append";
|
const mode = options?.mode ?? "append";
|
||||||
const schema = await this.schema();
|
const schema = await this.schema();
|
||||||
|
|
||||||
const buffer = await fromDataToBuffer(data, undefined, schema);
|
const buffer = await fromDataToBuffer(data, undefined, schema);
|
||||||
return await this.inner.add(buffer, mode);
|
await this.inner.add(buffer, mode);
|
||||||
}
|
}
|
||||||
|
|
||||||
async update(
|
async update(
|
||||||
@@ -578,7 +491,7 @@ export class LocalTable extends Table {
|
|||||||
valuesSql: Map<string, string> | Record<string, string>;
|
valuesSql: Map<string, string> | Record<string, string>;
|
||||||
} & Partial<UpdateOptions>),
|
} & Partial<UpdateOptions>),
|
||||||
options?: Partial<UpdateOptions>,
|
options?: Partial<UpdateOptions>,
|
||||||
): Promise<UpdateResult> {
|
) {
|
||||||
const isValues =
|
const isValues =
|
||||||
"values" in optsOrUpdates && typeof optsOrUpdates.values !== "string";
|
"values" in optsOrUpdates && typeof optsOrUpdates.values !== "string";
|
||||||
const isValuesSql =
|
const isValuesSql =
|
||||||
@@ -625,54 +538,38 @@ export class LocalTable extends Table {
|
|||||||
columns = Object.entries(optsOrUpdates as Record<string, string>);
|
columns = Object.entries(optsOrUpdates as Record<string, string>);
|
||||||
predicate = options?.where;
|
predicate = options?.where;
|
||||||
}
|
}
|
||||||
return await this.inner.update(predicate, columns);
|
await this.inner.update(predicate, columns);
|
||||||
}
|
}
|
||||||
|
|
||||||
async countRows(filter?: string): Promise<number> {
|
async countRows(filter?: string): Promise<number> {
|
||||||
return await this.inner.countRows(filter);
|
return await this.inner.countRows(filter);
|
||||||
}
|
}
|
||||||
|
|
||||||
async delete(predicate: string): Promise<DeleteResult> {
|
async delete(predicate: string): Promise<void> {
|
||||||
return await this.inner.delete(predicate);
|
await this.inner.delete(predicate);
|
||||||
}
|
}
|
||||||
|
|
||||||
async createIndex(column: string, options?: Partial<IndexOptions>) {
|
async createIndex(column: string, options?: Partial<IndexOptions>) {
|
||||||
// Bit of a hack to get around the fact that TS has no package-scope.
|
// Bit of a hack to get around the fact that TS has no package-scope.
|
||||||
// biome-ignore lint/suspicious/noExplicitAny: skip
|
// biome-ignore lint/suspicious/noExplicitAny: skip
|
||||||
const nativeIndex = (options?.config as any)?.inner;
|
const nativeIndex = (options?.config as any)?.inner;
|
||||||
await this.inner.createIndex(
|
await this.inner.createIndex(nativeIndex, column, options?.replace);
|
||||||
nativeIndex,
|
|
||||||
column,
|
|
||||||
options?.replace,
|
|
||||||
options?.waitTimeoutSeconds,
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async dropIndex(name: string): Promise<void> {
|
async dropIndex(name: string): Promise<void> {
|
||||||
await this.inner.dropIndex(name);
|
await this.inner.dropIndex(name);
|
||||||
}
|
}
|
||||||
|
|
||||||
async prewarmIndex(name: string): Promise<void> {
|
|
||||||
await this.inner.prewarmIndex(name);
|
|
||||||
}
|
|
||||||
|
|
||||||
async waitForIndex(
|
|
||||||
indexNames: string[],
|
|
||||||
timeoutSeconds: number,
|
|
||||||
): Promise<void> {
|
|
||||||
await this.inner.waitForIndex(indexNames, timeoutSeconds);
|
|
||||||
}
|
|
||||||
|
|
||||||
query(): Query {
|
query(): Query {
|
||||||
return new Query(this.inner);
|
return new Query(this.inner);
|
||||||
}
|
}
|
||||||
|
|
||||||
search(
|
search(
|
||||||
query: string | IntoVector | FullTextQuery,
|
query: string | IntoVector,
|
||||||
queryType: string = "auto",
|
queryType: string = "auto",
|
||||||
ftsColumns?: string | string[],
|
ftsColumns?: string | string[],
|
||||||
): VectorQuery | Query {
|
): VectorQuery | Query {
|
||||||
if (typeof query !== "string" && !instanceOfFullTextQuery(query)) {
|
if (typeof query !== "string") {
|
||||||
if (queryType === "fts") {
|
if (queryType === "fts") {
|
||||||
throw new Error("Cannot perform full text search on a vector query");
|
throw new Error("Cannot perform full text search on a vector query");
|
||||||
}
|
}
|
||||||
@@ -688,10 +585,7 @@ export class LocalTable extends Table {
|
|||||||
|
|
||||||
// The query type is auto or vector
|
// The query type is auto or vector
|
||||||
// fall back to full text search if no embedding functions are defined and the query is a string
|
// fall back to full text search if no embedding functions are defined and the query is a string
|
||||||
if (
|
if (queryType === "auto" && getRegistry().length() === 0) {
|
||||||
queryType === "auto" &&
|
|
||||||
(getRegistry().length() === 0 || instanceOfFullTextQuery(query))
|
|
||||||
) {
|
|
||||||
return this.query().fullTextSearch(query, {
|
return this.query().fullTextSearch(query, {
|
||||||
columns: ftsColumns,
|
columns: ftsColumns,
|
||||||
});
|
});
|
||||||
@@ -721,15 +615,11 @@ export class LocalTable extends Table {
|
|||||||
|
|
||||||
// TODO: Support BatchUDF
|
// TODO: Support BatchUDF
|
||||||
|
|
||||||
async addColumns(
|
async addColumns(newColumnTransforms: AddColumnsSql[]): Promise<void> {
|
||||||
newColumnTransforms: AddColumnsSql[],
|
await this.inner.addColumns(newColumnTransforms);
|
||||||
): Promise<AddColumnsResult> {
|
|
||||||
return await this.inner.addColumns(newColumnTransforms);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async alterColumns(
|
async alterColumns(columnAlterations: ColumnAlteration[]): Promise<void> {
|
||||||
columnAlterations: ColumnAlteration[],
|
|
||||||
): Promise<AlterColumnsResult> {
|
|
||||||
const processedAlterations = columnAlterations.map((alteration) => {
|
const processedAlterations = columnAlterations.map((alteration) => {
|
||||||
if (typeof alteration.dataType === "string") {
|
if (typeof alteration.dataType === "string") {
|
||||||
return {
|
return {
|
||||||
@@ -750,22 +640,19 @@ export class LocalTable extends Table {
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
return await this.inner.alterColumns(processedAlterations);
|
await this.inner.alterColumns(processedAlterations);
|
||||||
}
|
}
|
||||||
|
|
||||||
async dropColumns(columnNames: string[]): Promise<DropColumnsResult> {
|
async dropColumns(columnNames: string[]): Promise<void> {
|
||||||
return await this.inner.dropColumns(columnNames);
|
await this.inner.dropColumns(columnNames);
|
||||||
}
|
}
|
||||||
|
|
||||||
async version(): Promise<number> {
|
async version(): Promise<number> {
|
||||||
return await this.inner.version();
|
return await this.inner.version();
|
||||||
}
|
}
|
||||||
|
|
||||||
async checkout(version: number | string): Promise<void> {
|
async checkout(version: number): Promise<void> {
|
||||||
if (typeof version === "string") {
|
await this.inner.checkout(version);
|
||||||
return this.inner.checkoutTag(version);
|
|
||||||
}
|
|
||||||
return this.inner.checkout(version);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async checkoutLatest(): Promise<void> {
|
async checkoutLatest(): Promise<void> {
|
||||||
@@ -784,10 +671,6 @@ export class LocalTable extends Table {
|
|||||||
await this.inner.restore();
|
await this.inner.restore();
|
||||||
}
|
}
|
||||||
|
|
||||||
async tags(): Promise<Tags> {
|
|
||||||
return await this.inner.tags();
|
|
||||||
}
|
|
||||||
|
|
||||||
async optimize(options?: Partial<OptimizeOptions>): Promise<OptimizeStats> {
|
async optimize(options?: Partial<OptimizeOptions>): Promise<OptimizeStats> {
|
||||||
let cleanupOlderThanMs;
|
let cleanupOlderThanMs;
|
||||||
if (
|
if (
|
||||||
@@ -818,11 +701,6 @@ export class LocalTable extends Table {
|
|||||||
}
|
}
|
||||||
return stats;
|
return stats;
|
||||||
}
|
}
|
||||||
|
|
||||||
async stats(): Promise<TableStatistics> {
|
|
||||||
return await this.inner.stats();
|
|
||||||
}
|
|
||||||
|
|
||||||
mergeInsert(on: string | string[]): MergeInsertBuilder {
|
mergeInsert(on: string | string[]): MergeInsertBuilder {
|
||||||
on = Array.isArray(on) ? on : [on];
|
on = Array.isArray(on) ? on : [on];
|
||||||
return new MergeInsertBuilder(this.inner.mergeInsert(on), this.schema());
|
return new MergeInsertBuilder(this.inner.mergeInsert(on), this.schema());
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "@lancedb/lancedb-darwin-arm64",
|
"name": "@lancedb/lancedb-darwin-arm64",
|
||||||
"version": "0.19.1-beta.5",
|
"version": "0.18.2-beta.1",
|
||||||
"os": ["darwin"],
|
"os": ["darwin"],
|
||||||
"cpu": ["arm64"],
|
"cpu": ["arm64"],
|
||||||
"main": "lancedb.darwin-arm64.node",
|
"main": "lancedb.darwin-arm64.node",
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "@lancedb/lancedb-darwin-x64",
|
"name": "@lancedb/lancedb-darwin-x64",
|
||||||
"version": "0.19.1-beta.5",
|
"version": "0.18.2-beta.1",
|
||||||
"os": ["darwin"],
|
"os": ["darwin"],
|
||||||
"cpu": ["x64"],
|
"cpu": ["x64"],
|
||||||
"main": "lancedb.darwin-x64.node",
|
"main": "lancedb.darwin-x64.node",
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "@lancedb/lancedb-linux-arm64-gnu",
|
"name": "@lancedb/lancedb-linux-arm64-gnu",
|
||||||
"version": "0.19.1-beta.5",
|
"version": "0.18.2-beta.1",
|
||||||
"os": ["linux"],
|
"os": ["linux"],
|
||||||
"cpu": ["arm64"],
|
"cpu": ["arm64"],
|
||||||
"main": "lancedb.linux-arm64-gnu.node",
|
"main": "lancedb.linux-arm64-gnu.node",
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "@lancedb/lancedb-linux-arm64-musl",
|
"name": "@lancedb/lancedb-linux-arm64-musl",
|
||||||
"version": "0.19.1-beta.5",
|
"version": "0.18.2-beta.1",
|
||||||
"os": ["linux"],
|
"os": ["linux"],
|
||||||
"cpu": ["arm64"],
|
"cpu": ["arm64"],
|
||||||
"main": "lancedb.linux-arm64-musl.node",
|
"main": "lancedb.linux-arm64-musl.node",
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "@lancedb/lancedb-linux-x64-gnu",
|
"name": "@lancedb/lancedb-linux-x64-gnu",
|
||||||
"version": "0.19.1-beta.5",
|
"version": "0.18.2-beta.1",
|
||||||
"os": ["linux"],
|
"os": ["linux"],
|
||||||
"cpu": ["x64"],
|
"cpu": ["x64"],
|
||||||
"main": "lancedb.linux-x64-gnu.node",
|
"main": "lancedb.linux-x64-gnu.node",
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "@lancedb/lancedb-linux-x64-musl",
|
"name": "@lancedb/lancedb-linux-x64-musl",
|
||||||
"version": "0.19.1-beta.5",
|
"version": "0.18.2-beta.1",
|
||||||
"os": ["linux"],
|
"os": ["linux"],
|
||||||
"cpu": ["x64"],
|
"cpu": ["x64"],
|
||||||
"main": "lancedb.linux-x64-musl.node",
|
"main": "lancedb.linux-x64-musl.node",
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "@lancedb/lancedb-win32-arm64-msvc",
|
"name": "@lancedb/lancedb-win32-arm64-msvc",
|
||||||
"version": "0.19.1-beta.5",
|
"version": "0.18.2-beta.1",
|
||||||
"os": [
|
"os": [
|
||||||
"win32"
|
"win32"
|
||||||
],
|
],
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "@lancedb/lancedb-win32-x64-msvc",
|
"name": "@lancedb/lancedb-win32-x64-msvc",
|
||||||
"version": "0.19.1-beta.5",
|
"version": "0.18.2-beta.1",
|
||||||
"os": ["win32"],
|
"os": ["win32"],
|
||||||
"cpu": ["x64"],
|
"cpu": ["x64"],
|
||||||
"main": "lancedb.win32-x64-msvc.node",
|
"main": "lancedb.win32-x64-msvc.node",
|
||||||
|
|||||||
250
nodejs/package-lock.json
generated
250
nodejs/package-lock.json
generated
@@ -1,12 +1,12 @@
|
|||||||
{
|
{
|
||||||
"name": "@lancedb/lancedb",
|
"name": "@lancedb/lancedb",
|
||||||
"version": "0.19.1-beta.5",
|
"version": "0.18.2-beta.0",
|
||||||
"lockfileVersion": 3,
|
"lockfileVersion": 3,
|
||||||
"requires": true,
|
"requires": true,
|
||||||
"packages": {
|
"packages": {
|
||||||
"": {
|
"": {
|
||||||
"name": "@lancedb/lancedb",
|
"name": "@lancedb/lancedb",
|
||||||
"version": "0.19.1-beta.5",
|
"version": "0.18.2-beta.0",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"x64",
|
"x64",
|
||||||
"arm64"
|
"arm64"
|
||||||
@@ -2304,20 +2304,89 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/@babel/code-frame": {
|
"node_modules/@babel/code-frame": {
|
||||||
"version": "7.26.2",
|
"version": "7.23.5",
|
||||||
"resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.26.2.tgz",
|
"resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.23.5.tgz",
|
||||||
"integrity": "sha512-RJlIHRueQgwWitWgF8OdFYGZX328Ax5BCemNGlqHfplnRT9ESi8JkFlvaVYbS+UubVY6dpv87Fs2u5M29iNFVQ==",
|
"integrity": "sha512-CgH3s1a96LipHCmSUmYFPwY7MNx8C3avkq7i4Wl3cfa662ldtUe4VM1TPXX70pfmrlWTb6jLqTYrZyT2ZTJBgA==",
|
||||||
"dev": true,
|
"dev": true,
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@babel/helper-validator-identifier": "^7.25.9",
|
"@babel/highlight": "^7.23.4",
|
||||||
"js-tokens": "^4.0.0",
|
"chalk": "^2.4.2"
|
||||||
"picocolors": "^1.0.0"
|
|
||||||
},
|
},
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": ">=6.9.0"
|
"node": ">=6.9.0"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"node_modules/@babel/code-frame/node_modules/ansi-styles": {
|
||||||
|
"version": "3.2.1",
|
||||||
|
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz",
|
||||||
|
"integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==",
|
||||||
|
"dev": true,
|
||||||
|
"dependencies": {
|
||||||
|
"color-convert": "^1.9.0"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">=4"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@babel/code-frame/node_modules/chalk": {
|
||||||
|
"version": "2.4.2",
|
||||||
|
"resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz",
|
||||||
|
"integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==",
|
||||||
|
"dev": true,
|
||||||
|
"dependencies": {
|
||||||
|
"ansi-styles": "^3.2.1",
|
||||||
|
"escape-string-regexp": "^1.0.5",
|
||||||
|
"supports-color": "^5.3.0"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">=4"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@babel/code-frame/node_modules/color-convert": {
|
||||||
|
"version": "1.9.3",
|
||||||
|
"resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz",
|
||||||
|
"integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==",
|
||||||
|
"dev": true,
|
||||||
|
"dependencies": {
|
||||||
|
"color-name": "1.1.3"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@babel/code-frame/node_modules/color-name": {
|
||||||
|
"version": "1.1.3",
|
||||||
|
"resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz",
|
||||||
|
"integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==",
|
||||||
|
"dev": true
|
||||||
|
},
|
||||||
|
"node_modules/@babel/code-frame/node_modules/escape-string-regexp": {
|
||||||
|
"version": "1.0.5",
|
||||||
|
"resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz",
|
||||||
|
"integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==",
|
||||||
|
"dev": true,
|
||||||
|
"engines": {
|
||||||
|
"node": ">=0.8.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@babel/code-frame/node_modules/has-flag": {
|
||||||
|
"version": "3.0.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz",
|
||||||
|
"integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==",
|
||||||
|
"dev": true,
|
||||||
|
"engines": {
|
||||||
|
"node": ">=4"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@babel/code-frame/node_modules/supports-color": {
|
||||||
|
"version": "5.5.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz",
|
||||||
|
"integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==",
|
||||||
|
"dev": true,
|
||||||
|
"dependencies": {
|
||||||
|
"has-flag": "^3.0.0"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">=4"
|
||||||
|
}
|
||||||
|
},
|
||||||
"node_modules/@babel/compat-data": {
|
"node_modules/@babel/compat-data": {
|
||||||
"version": "7.23.5",
|
"version": "7.23.5",
|
||||||
"resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.23.5.tgz",
|
"resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.23.5.tgz",
|
||||||
@@ -2520,21 +2589,19 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/@babel/helper-string-parser": {
|
"node_modules/@babel/helper-string-parser": {
|
||||||
"version": "7.25.9",
|
"version": "7.23.4",
|
||||||
"resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.25.9.tgz",
|
"resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.23.4.tgz",
|
||||||
"integrity": "sha512-4A/SCr/2KLd5jrtOMFzaKjVtAei3+2r/NChoBNoZ3EyP/+GlhoaEGoWOZUmFmoITP7zOJyHIMm+DYRd8o3PvHA==",
|
"integrity": "sha512-803gmbQdqwdf4olxrX4AJyFBV/RTr3rSmOj0rKwesmzlfhYNDEs+/iOcznzpNWlJlIlTJC2QfPFcHB6DlzdVLQ==",
|
||||||
"dev": true,
|
"dev": true,
|
||||||
"license": "MIT",
|
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": ">=6.9.0"
|
"node": ">=6.9.0"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/@babel/helper-validator-identifier": {
|
"node_modules/@babel/helper-validator-identifier": {
|
||||||
"version": "7.25.9",
|
"version": "7.22.20",
|
||||||
"resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.25.9.tgz",
|
"resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.20.tgz",
|
||||||
"integrity": "sha512-Ed61U6XJc3CVRfkERJWDz4dJwKe7iLmmJsbOGu9wSloNSFttHV0I8g6UAgb7qnK5ly5bGLPd4oXZlxCdANBOWQ==",
|
"integrity": "sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A==",
|
||||||
"dev": true,
|
"dev": true,
|
||||||
"license": "MIT",
|
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": ">=6.9.0"
|
"node": ">=6.9.0"
|
||||||
}
|
}
|
||||||
@@ -2549,28 +2616,109 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/@babel/helpers": {
|
"node_modules/@babel/helpers": {
|
||||||
"version": "7.27.0",
|
"version": "7.23.8",
|
||||||
"resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.27.0.tgz",
|
"resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.23.8.tgz",
|
||||||
"integrity": "sha512-U5eyP/CTFPuNE3qk+WZMxFkp/4zUzdceQlfzf7DdGdhp+Fezd7HD+i8Y24ZuTMKX3wQBld449jijbGq6OdGNQg==",
|
"integrity": "sha512-KDqYz4PiOWvDFrdHLPhKtCThtIcKVy6avWD2oG4GEvyQ+XDZwHD4YQd+H2vNMnq2rkdxsDkU82T+Vk8U/WXHRQ==",
|
||||||
"dev": true,
|
"dev": true,
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@babel/template": "^7.27.0",
|
"@babel/template": "^7.22.15",
|
||||||
"@babel/types": "^7.27.0"
|
"@babel/traverse": "^7.23.7",
|
||||||
|
"@babel/types": "^7.23.6"
|
||||||
},
|
},
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": ">=6.9.0"
|
"node": ">=6.9.0"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/@babel/parser": {
|
"node_modules/@babel/highlight": {
|
||||||
"version": "7.27.0",
|
"version": "7.23.4",
|
||||||
"resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.27.0.tgz",
|
"resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.23.4.tgz",
|
||||||
"integrity": "sha512-iaepho73/2Pz7w2eMS0Q5f83+0RKI7i4xmiYeBmDzfRVbQtTOG7Ts0S4HzJVsTMGI9keU8rNfuZr8DKfSt7Yyg==",
|
"integrity": "sha512-acGdbYSfp2WheJoJm/EBBBLh/ID8KDc64ISZ9DYtBmC8/Q204PZJLHyzeB5qMzJ5trcOkybd78M4x2KWsUq++A==",
|
||||||
"dev": true,
|
"dev": true,
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@babel/types": "^7.27.0"
|
"@babel/helper-validator-identifier": "^7.22.20",
|
||||||
|
"chalk": "^2.4.2",
|
||||||
|
"js-tokens": "^4.0.0"
|
||||||
},
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">=6.9.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@babel/highlight/node_modules/ansi-styles": {
|
||||||
|
"version": "3.2.1",
|
||||||
|
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz",
|
||||||
|
"integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==",
|
||||||
|
"dev": true,
|
||||||
|
"dependencies": {
|
||||||
|
"color-convert": "^1.9.0"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">=4"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@babel/highlight/node_modules/chalk": {
|
||||||
|
"version": "2.4.2",
|
||||||
|
"resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz",
|
||||||
|
"integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==",
|
||||||
|
"dev": true,
|
||||||
|
"dependencies": {
|
||||||
|
"ansi-styles": "^3.2.1",
|
||||||
|
"escape-string-regexp": "^1.0.5",
|
||||||
|
"supports-color": "^5.3.0"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">=4"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@babel/highlight/node_modules/color-convert": {
|
||||||
|
"version": "1.9.3",
|
||||||
|
"resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz",
|
||||||
|
"integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==",
|
||||||
|
"dev": true,
|
||||||
|
"dependencies": {
|
||||||
|
"color-name": "1.1.3"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@babel/highlight/node_modules/color-name": {
|
||||||
|
"version": "1.1.3",
|
||||||
|
"resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz",
|
||||||
|
"integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==",
|
||||||
|
"dev": true
|
||||||
|
},
|
||||||
|
"node_modules/@babel/highlight/node_modules/escape-string-regexp": {
|
||||||
|
"version": "1.0.5",
|
||||||
|
"resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz",
|
||||||
|
"integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==",
|
||||||
|
"dev": true,
|
||||||
|
"engines": {
|
||||||
|
"node": ">=0.8.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@babel/highlight/node_modules/has-flag": {
|
||||||
|
"version": "3.0.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz",
|
||||||
|
"integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==",
|
||||||
|
"dev": true,
|
||||||
|
"engines": {
|
||||||
|
"node": ">=4"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@babel/highlight/node_modules/supports-color": {
|
||||||
|
"version": "5.5.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz",
|
||||||
|
"integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==",
|
||||||
|
"dev": true,
|
||||||
|
"dependencies": {
|
||||||
|
"has-flag": "^3.0.0"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">=4"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@babel/parser": {
|
||||||
|
"version": "7.23.6",
|
||||||
|
"resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.23.6.tgz",
|
||||||
|
"integrity": "sha512-Z2uID7YJ7oNvAI20O9X0bblw7Qqs8Q2hFy0R9tAfnfLkp5MW0UH9eUvnDSnFwKZ0AvgS1ucqR4KzvVHgnke1VQ==",
|
||||||
|
"dev": true,
|
||||||
"bin": {
|
"bin": {
|
||||||
"parser": "bin/babel-parser.js"
|
"parser": "bin/babel-parser.js"
|
||||||
},
|
},
|
||||||
@@ -2756,15 +2904,14 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/@babel/template": {
|
"node_modules/@babel/template": {
|
||||||
"version": "7.27.0",
|
"version": "7.22.15",
|
||||||
"resolved": "https://registry.npmjs.org/@babel/template/-/template-7.27.0.tgz",
|
"resolved": "https://registry.npmjs.org/@babel/template/-/template-7.22.15.tgz",
|
||||||
"integrity": "sha512-2ncevenBqXI6qRMukPlXwHKHchC7RyMuu4xv5JBXRfOGVcTy1mXCD12qrp7Jsoxll1EV3+9sE4GugBVRjT2jFA==",
|
"integrity": "sha512-QPErUVm4uyJa60rkI73qneDacvdvzxshT3kksGqlGWYdOTIUOwJ7RDUL8sGqslY1uXWSL6xMFKEXDS3ox2uF0w==",
|
||||||
"dev": true,
|
"dev": true,
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@babel/code-frame": "^7.26.2",
|
"@babel/code-frame": "^7.22.13",
|
||||||
"@babel/parser": "^7.27.0",
|
"@babel/parser": "^7.22.15",
|
||||||
"@babel/types": "^7.27.0"
|
"@babel/types": "^7.22.15"
|
||||||
},
|
},
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": ">=6.9.0"
|
"node": ">=6.9.0"
|
||||||
@@ -2801,14 +2948,14 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/@babel/types": {
|
"node_modules/@babel/types": {
|
||||||
"version": "7.27.0",
|
"version": "7.23.6",
|
||||||
"resolved": "https://registry.npmjs.org/@babel/types/-/types-7.27.0.tgz",
|
"resolved": "https://registry.npmjs.org/@babel/types/-/types-7.23.6.tgz",
|
||||||
"integrity": "sha512-H45s8fVLYjbhFH62dIJ3WtmJ6RSPt/3DRO0ZcT2SUiYiQyz3BLVb9ADEnLl91m74aQPS3AzzeajZHYOalWe3bg==",
|
"integrity": "sha512-+uarb83brBzPKN38NX1MkB6vb6+mwvR6amUulqAE7ccQw1pEl+bCia9TbdG1lsnFP7lZySvUn37CHyXQdfTwzg==",
|
||||||
"dev": true,
|
"dev": true,
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@babel/helper-string-parser": "^7.25.9",
|
"@babel/helper-string-parser": "^7.23.4",
|
||||||
"@babel/helper-validator-identifier": "^7.25.9"
|
"@babel/helper-validator-identifier": "^7.22.20",
|
||||||
|
"to-fast-properties": "^2.0.0"
|
||||||
},
|
},
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": ">=6.9.0"
|
"node": ">=6.9.0"
|
||||||
@@ -5403,11 +5550,10 @@
|
|||||||
"devOptional": true
|
"devOptional": true
|
||||||
},
|
},
|
||||||
"node_modules/axios": {
|
"node_modules/axios": {
|
||||||
"version": "1.8.4",
|
"version": "1.7.7",
|
||||||
"resolved": "https://registry.npmjs.org/axios/-/axios-1.8.4.tgz",
|
"resolved": "https://registry.npmjs.org/axios/-/axios-1.7.7.tgz",
|
||||||
"integrity": "sha512-eBSYY4Y68NNlHbHBMdeDmKNtDgXWhQsJcGqzO3iLUM0GraQFSS9cVgPX5I9b3lbdFKyYoAEGAZF1DwhTaljNAw==",
|
"integrity": "sha512-S4kL7XrjgBmvdGut0sN3yJxqYzrDOnivkBiN0OFs6hLiUam3UPvswUo0kqGyhqUZGEOytHyumEdXsAkgCOUf3Q==",
|
||||||
"dev": true,
|
"dev": true,
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"follow-redirects": "^1.15.6",
|
"follow-redirects": "^1.15.6",
|
||||||
"form-data": "^4.0.0",
|
"form-data": "^4.0.0",
|
||||||
@@ -7723,8 +7869,7 @@
|
|||||||
"version": "4.0.0",
|
"version": "4.0.0",
|
||||||
"resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz",
|
"resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz",
|
||||||
"integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==",
|
"integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==",
|
||||||
"dev": true,
|
"dev": true
|
||||||
"license": "MIT"
|
|
||||||
},
|
},
|
||||||
"node_modules/js-yaml": {
|
"node_modules/js-yaml": {
|
||||||
"version": "3.14.1",
|
"version": "3.14.1",
|
||||||
@@ -9215,6 +9360,15 @@
|
|||||||
"integrity": "sha512-3f0uOEAQwIqGuWW2MVzYg8fV/QNnc/IpuJNG837rLuczAaLVHslWHZQj4IGiEl5Hs3kkbhwL9Ab7Hrsmuj+Smw==",
|
"integrity": "sha512-3f0uOEAQwIqGuWW2MVzYg8fV/QNnc/IpuJNG837rLuczAaLVHslWHZQj4IGiEl5Hs3kkbhwL9Ab7Hrsmuj+Smw==",
|
||||||
"dev": true
|
"dev": true
|
||||||
},
|
},
|
||||||
|
"node_modules/to-fast-properties": {
|
||||||
|
"version": "2.0.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz",
|
||||||
|
"integrity": "sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==",
|
||||||
|
"dev": true,
|
||||||
|
"engines": {
|
||||||
|
"node": ">=4"
|
||||||
|
}
|
||||||
|
},
|
||||||
"node_modules/to-regex-range": {
|
"node_modules/to-regex-range": {
|
||||||
"version": "5.0.1",
|
"version": "5.0.1",
|
||||||
"resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz",
|
"resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz",
|
||||||
|
|||||||
@@ -11,7 +11,7 @@
|
|||||||
"ann"
|
"ann"
|
||||||
],
|
],
|
||||||
"private": false,
|
"private": false,
|
||||||
"version": "0.19.1-beta.5",
|
"version": "0.18.2-beta.1",
|
||||||
"main": "dist/index.js",
|
"main": "dist/index.js",
|
||||||
"exports": {
|
"exports": {
|
||||||
".": "./dist/index.js",
|
".": "./dist/index.js",
|
||||||
@@ -29,7 +29,6 @@
|
|||||||
"aarch64-apple-darwin",
|
"aarch64-apple-darwin",
|
||||||
"x86_64-unknown-linux-gnu",
|
"x86_64-unknown-linux-gnu",
|
||||||
"aarch64-unknown-linux-gnu",
|
"aarch64-unknown-linux-gnu",
|
||||||
"x86_64-unknown-linux-musl",
|
|
||||||
"aarch64-unknown-linux-musl",
|
"aarch64-unknown-linux-musl",
|
||||||
"x86_64-pc-windows-msvc",
|
"x86_64-pc-windows-msvc",
|
||||||
"aarch64-pc-windows-msvc"
|
"aarch64-pc-windows-msvc"
|
||||||
|
|||||||
@@ -1,13 +1,11 @@
|
|||||||
// SPDX-License-Identifier: Apache-2.0
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||||
|
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
use lancedb::{arrow::IntoArrow, ipc::ipc_file_to_batches, table::merge::MergeInsertBuilder};
|
use lancedb::{arrow::IntoArrow, ipc::ipc_file_to_batches, table::merge::MergeInsertBuilder};
|
||||||
use napi::bindgen_prelude::*;
|
use napi::bindgen_prelude::*;
|
||||||
use napi_derive::napi;
|
use napi_derive::napi;
|
||||||
|
|
||||||
use crate::{error::convert_error, table::MergeResult};
|
use crate::error::convert_error;
|
||||||
|
|
||||||
#[napi]
|
#[napi]
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
@@ -38,13 +36,8 @@ impl NativeMergeInsertBuilder {
|
|||||||
this
|
this
|
||||||
}
|
}
|
||||||
|
|
||||||
#[napi]
|
|
||||||
pub fn set_timeout(&mut self, timeout: u32) {
|
|
||||||
self.inner.timeout(Duration::from_millis(timeout as u64));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[napi(catch_unwind)]
|
#[napi(catch_unwind)]
|
||||||
pub async fn execute(&self, buf: Buffer) -> napi::Result<MergeResult> {
|
pub async fn execute(&self, buf: Buffer) -> napi::Result<()> {
|
||||||
let data = ipc_file_to_batches(buf.to_vec())
|
let data = ipc_file_to_batches(buf.to_vec())
|
||||||
.and_then(IntoArrow::into_arrow)
|
.and_then(IntoArrow::into_arrow)
|
||||||
.map_err(|e| {
|
.map_err(|e| {
|
||||||
@@ -53,13 +46,12 @@ impl NativeMergeInsertBuilder {
|
|||||||
|
|
||||||
let this = self.clone();
|
let this = self.clone();
|
||||||
|
|
||||||
let res = this.inner.execute(data).await.map_err(|e| {
|
this.inner.execute(data).await.map_err(|e| {
|
||||||
napi::Error::from_reason(format!(
|
napi::Error::from_reason(format!(
|
||||||
"Failed to execute merge insert: {}",
|
"Failed to execute merge insert: {}",
|
||||||
convert_error(&e)
|
convert_error(&e)
|
||||||
))
|
))
|
||||||
})?;
|
})
|
||||||
Ok(res.into())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -3,9 +3,7 @@
|
|||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use lancedb::index::scalar::{
|
use lancedb::index::scalar::FullTextSearchQuery;
|
||||||
BoostQuery, FtsQuery, FullTextSearchQuery, MatchQuery, MultiMatchQuery, PhraseQuery,
|
|
||||||
};
|
|
||||||
use lancedb::query::ExecutableQuery;
|
use lancedb::query::ExecutableQuery;
|
||||||
use lancedb::query::Query as LanceDbQuery;
|
use lancedb::query::Query as LanceDbQuery;
|
||||||
use lancedb::query::QueryBase;
|
use lancedb::query::QueryBase;
|
||||||
@@ -40,10 +38,9 @@ impl Query {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[napi]
|
#[napi]
|
||||||
pub fn full_text_search(&mut self, query: napi::JsObject) -> napi::Result<()> {
|
pub fn full_text_search(&mut self, query: String, columns: Option<Vec<String>>) {
|
||||||
let query = parse_fts_query(query)?;
|
let query = FullTextSearchQuery::new(query).columns(columns);
|
||||||
self.inner = self.inner.clone().full_text_search(query);
|
self.inner = self.inner.clone().full_text_search(query);
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[napi]
|
#[napi]
|
||||||
@@ -90,15 +87,11 @@ impl Query {
|
|||||||
pub async fn execute(
|
pub async fn execute(
|
||||||
&self,
|
&self,
|
||||||
max_batch_length: Option<u32>,
|
max_batch_length: Option<u32>,
|
||||||
timeout_ms: Option<u32>,
|
|
||||||
) -> napi::Result<RecordBatchIterator> {
|
) -> napi::Result<RecordBatchIterator> {
|
||||||
let mut execution_opts = QueryExecutionOptions::default();
|
let mut execution_opts = QueryExecutionOptions::default();
|
||||||
if let Some(max_batch_length) = max_batch_length {
|
if let Some(max_batch_length) = max_batch_length {
|
||||||
execution_opts.max_batch_length = max_batch_length;
|
execution_opts.max_batch_length = max_batch_length;
|
||||||
}
|
}
|
||||||
if let Some(timeout_ms) = timeout_ms {
|
|
||||||
execution_opts.timeout = Some(std::time::Duration::from_millis(timeout_ms as u64))
|
|
||||||
}
|
|
||||||
let inner_stream = self
|
let inner_stream = self
|
||||||
.inner
|
.inner
|
||||||
.execute_with_options(execution_opts)
|
.execute_with_options(execution_opts)
|
||||||
@@ -121,16 +114,6 @@ impl Query {
|
|||||||
))
|
))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
#[napi(catch_unwind)]
|
|
||||||
pub async fn analyze_plan(&self) -> napi::Result<String> {
|
|
||||||
self.inner.analyze_plan().await.map_err(|e| {
|
|
||||||
napi::Error::from_reason(format!(
|
|
||||||
"Failed to execute analyze plan: {}",
|
|
||||||
convert_error(&e)
|
|
||||||
))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[napi]
|
#[napi]
|
||||||
@@ -202,10 +185,9 @@ impl VectorQuery {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[napi]
|
#[napi]
|
||||||
pub fn full_text_search(&mut self, query: napi::JsObject) -> napi::Result<()> {
|
pub fn full_text_search(&mut self, query: String, columns: Option<Vec<String>>) {
|
||||||
let query = parse_fts_query(query)?;
|
let query = FullTextSearchQuery::new(query).columns(columns);
|
||||||
self.inner = self.inner.clone().full_text_search(query);
|
self.inner = self.inner.clone().full_text_search(query);
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[napi]
|
#[napi]
|
||||||
@@ -250,15 +232,11 @@ impl VectorQuery {
|
|||||||
pub async fn execute(
|
pub async fn execute(
|
||||||
&self,
|
&self,
|
||||||
max_batch_length: Option<u32>,
|
max_batch_length: Option<u32>,
|
||||||
timeout_ms: Option<u32>,
|
|
||||||
) -> napi::Result<RecordBatchIterator> {
|
) -> napi::Result<RecordBatchIterator> {
|
||||||
let mut execution_opts = QueryExecutionOptions::default();
|
let mut execution_opts = QueryExecutionOptions::default();
|
||||||
if let Some(max_batch_length) = max_batch_length {
|
if let Some(max_batch_length) = max_batch_length {
|
||||||
execution_opts.max_batch_length = max_batch_length;
|
execution_opts.max_batch_length = max_batch_length;
|
||||||
}
|
}
|
||||||
if let Some(timeout_ms) = timeout_ms {
|
|
||||||
execution_opts.timeout = Some(std::time::Duration::from_millis(timeout_ms as u64))
|
|
||||||
}
|
|
||||||
let inner_stream = self
|
let inner_stream = self
|
||||||
.inner
|
.inner
|
||||||
.execute_with_options(execution_opts)
|
.execute_with_options(execution_opts)
|
||||||
@@ -281,127 +259,4 @@ impl VectorQuery {
|
|||||||
))
|
))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
#[napi(catch_unwind)]
|
|
||||||
pub async fn analyze_plan(&self) -> napi::Result<String> {
|
|
||||||
self.inner.analyze_plan().await.map_err(|e| {
|
|
||||||
napi::Error::from_reason(format!(
|
|
||||||
"Failed to execute analyze plan: {}",
|
|
||||||
convert_error(&e)
|
|
||||||
))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[napi]
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub struct JsFullTextQuery {
|
|
||||||
pub(crate) inner: FtsQuery,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[napi]
|
|
||||||
impl JsFullTextQuery {
|
|
||||||
#[napi(factory)]
|
|
||||||
pub fn match_query(
|
|
||||||
query: String,
|
|
||||||
column: String,
|
|
||||||
boost: f64,
|
|
||||||
fuzziness: Option<u32>,
|
|
||||||
max_expansions: u32,
|
|
||||||
) -> napi::Result<Self> {
|
|
||||||
Ok(Self {
|
|
||||||
inner: MatchQuery::new(query)
|
|
||||||
.with_column(Some(column))
|
|
||||||
.with_boost(boost as f32)
|
|
||||||
.with_fuzziness(fuzziness)
|
|
||||||
.with_max_expansions(max_expansions as usize)
|
|
||||||
.into(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
#[napi(factory)]
|
|
||||||
pub fn phrase_query(query: String, column: String) -> napi::Result<Self> {
|
|
||||||
Ok(Self {
|
|
||||||
inner: PhraseQuery::new(query).with_column(Some(column)).into(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
#[napi(factory)]
|
|
||||||
#[allow(clippy::use_self)] // NAPI doesn't allow Self here but clippy reports it
|
|
||||||
pub fn boost_query(
|
|
||||||
positive: &JsFullTextQuery,
|
|
||||||
negative: &JsFullTextQuery,
|
|
||||||
negative_boost: Option<f64>,
|
|
||||||
) -> napi::Result<Self> {
|
|
||||||
Ok(Self {
|
|
||||||
inner: BoostQuery::new(
|
|
||||||
positive.inner.clone(),
|
|
||||||
negative.inner.clone(),
|
|
||||||
negative_boost.map(|v| v as f32),
|
|
||||||
)
|
|
||||||
.into(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
#[napi(factory)]
|
|
||||||
pub fn multi_match_query(
|
|
||||||
query: String,
|
|
||||||
columns: Vec<String>,
|
|
||||||
boosts: Option<Vec<f64>>,
|
|
||||||
) -> napi::Result<Self> {
|
|
||||||
let q = match boosts {
|
|
||||||
Some(boosts) => MultiMatchQuery::try_new(query, columns)
|
|
||||||
.and_then(|q| q.try_with_boosts(boosts.into_iter().map(|v| v as f32).collect())),
|
|
||||||
None => MultiMatchQuery::try_new(query, columns),
|
|
||||||
}
|
|
||||||
.map_err(|e| {
|
|
||||||
napi::Error::from_reason(format!("Failed to create multi match query: {}", e))
|
|
||||||
})?;
|
|
||||||
|
|
||||||
Ok(Self { inner: q.into() })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn parse_fts_query(query: napi::JsObject) -> napi::Result<FullTextSearchQuery> {
|
|
||||||
if let Ok(Some(query)) = query.get::<_, &JsFullTextQuery>("query") {
|
|
||||||
Ok(FullTextSearchQuery::new_query(query.inner.clone()))
|
|
||||||
} else if let Ok(Some(query_text)) = query.get::<_, String>("query") {
|
|
||||||
let mut query_text = query_text;
|
|
||||||
let columns = query.get::<_, Option<Vec<String>>>("columns")?.flatten();
|
|
||||||
|
|
||||||
let is_phrase =
|
|
||||||
query_text.len() >= 2 && query_text.starts_with('"') && query_text.ends_with('"');
|
|
||||||
let is_multi_match = columns.as_ref().map(|cols| cols.len() > 1).unwrap_or(false);
|
|
||||||
|
|
||||||
if is_phrase {
|
|
||||||
// Remove the surrounding quotes for phrase queries
|
|
||||||
query_text = query_text[1..query_text.len() - 1].to_string();
|
|
||||||
}
|
|
||||||
|
|
||||||
let query: FtsQuery = match (is_phrase, is_multi_match) {
|
|
||||||
(false, _) => MatchQuery::new(query_text).into(),
|
|
||||||
(true, false) => PhraseQuery::new(query_text).into(),
|
|
||||||
(true, true) => {
|
|
||||||
return Err(napi::Error::from_reason(
|
|
||||||
"Phrase queries cannot be used with multiple columns.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
let mut query = FullTextSearchQuery::new_query(query);
|
|
||||||
if let Some(cols) = columns {
|
|
||||||
if !cols.is_empty() {
|
|
||||||
query = query.with_columns(&cols).map_err(|e| {
|
|
||||||
napi::Error::from_reason(format!(
|
|
||||||
"Failed to set full text search columns: {}",
|
|
||||||
e
|
|
||||||
))
|
|
||||||
})?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(query)
|
|
||||||
} else {
|
|
||||||
Err(napi::Error::from_reason(
|
|
||||||
"Invalid full text search query object".to_string(),
|
|
||||||
))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -75,7 +75,7 @@ impl Table {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[napi(catch_unwind)]
|
#[napi(catch_unwind)]
|
||||||
pub async fn add(&self, buf: Buffer, mode: String) -> napi::Result<AddResult> {
|
pub async fn add(&self, buf: Buffer, mode: String) -> napi::Result<()> {
|
||||||
let batches = ipc_file_to_batches(buf.to_vec())
|
let batches = ipc_file_to_batches(buf.to_vec())
|
||||||
.map_err(|e| napi::Error::from_reason(format!("Failed to read IPC file: {}", e)))?;
|
.map_err(|e| napi::Error::from_reason(format!("Failed to read IPC file: {}", e)))?;
|
||||||
let mut op = self.inner_ref()?.add(batches);
|
let mut op = self.inner_ref()?.add(batches);
|
||||||
@@ -88,8 +88,7 @@ impl Table {
|
|||||||
return Err(napi::Error::from_reason(format!("Invalid mode: {}", mode)));
|
return Err(napi::Error::from_reason(format!("Invalid mode: {}", mode)));
|
||||||
};
|
};
|
||||||
|
|
||||||
let res = op.execute().await.default_error()?;
|
op.execute().await.default_error()
|
||||||
Ok(res.into())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[napi(catch_unwind)]
|
#[napi(catch_unwind)]
|
||||||
@@ -102,9 +101,8 @@ impl Table {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[napi(catch_unwind)]
|
#[napi(catch_unwind)]
|
||||||
pub async fn delete(&self, predicate: String) -> napi::Result<DeleteResult> {
|
pub async fn delete(&self, predicate: String) -> napi::Result<()> {
|
||||||
let res = self.inner_ref()?.delete(&predicate).await.default_error()?;
|
self.inner_ref()?.delete(&predicate).await.default_error()
|
||||||
Ok(res.into())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[napi(catch_unwind)]
|
#[napi(catch_unwind)]
|
||||||
@@ -113,7 +111,6 @@ impl Table {
|
|||||||
index: Option<&Index>,
|
index: Option<&Index>,
|
||||||
column: String,
|
column: String,
|
||||||
replace: Option<bool>,
|
replace: Option<bool>,
|
||||||
wait_timeout_s: Option<i64>,
|
|
||||||
) -> napi::Result<()> {
|
) -> napi::Result<()> {
|
||||||
let lancedb_index = if let Some(index) = index {
|
let lancedb_index = if let Some(index) = index {
|
||||||
index.consume()?
|
index.consume()?
|
||||||
@@ -124,10 +121,6 @@ impl Table {
|
|||||||
if let Some(replace) = replace {
|
if let Some(replace) = replace {
|
||||||
builder = builder.replace(replace);
|
builder = builder.replace(replace);
|
||||||
}
|
}
|
||||||
if let Some(timeout) = wait_timeout_s {
|
|
||||||
builder =
|
|
||||||
builder.wait_timeout(std::time::Duration::from_secs(timeout.try_into().unwrap()));
|
|
||||||
}
|
|
||||||
builder.execute().await.default_error()
|
builder.execute().await.default_error()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -139,38 +132,12 @@ impl Table {
|
|||||||
.default_error()
|
.default_error()
|
||||||
}
|
}
|
||||||
|
|
||||||
#[napi(catch_unwind)]
|
|
||||||
pub async fn prewarm_index(&self, index_name: String) -> napi::Result<()> {
|
|
||||||
self.inner_ref()?
|
|
||||||
.prewarm_index(&index_name)
|
|
||||||
.await
|
|
||||||
.default_error()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[napi(catch_unwind)]
|
|
||||||
pub async fn wait_for_index(&self, index_names: Vec<String>, timeout_s: i64) -> Result<()> {
|
|
||||||
let timeout = std::time::Duration::from_secs(timeout_s.try_into().unwrap());
|
|
||||||
let index_names: Vec<&str> = index_names.iter().map(|s| s.as_str()).collect();
|
|
||||||
let slice: &[&str] = &index_names;
|
|
||||||
|
|
||||||
self.inner_ref()?
|
|
||||||
.wait_for_index(slice, timeout)
|
|
||||||
.await
|
|
||||||
.default_error()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[napi(catch_unwind)]
|
|
||||||
pub async fn stats(&self) -> Result<TableStatistics> {
|
|
||||||
let stats = self.inner_ref()?.stats().await.default_error()?;
|
|
||||||
Ok(stats.into())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[napi(catch_unwind)]
|
#[napi(catch_unwind)]
|
||||||
pub async fn update(
|
pub async fn update(
|
||||||
&self,
|
&self,
|
||||||
only_if: Option<String>,
|
only_if: Option<String>,
|
||||||
columns: Vec<(String, String)>,
|
columns: Vec<(String, String)>,
|
||||||
) -> napi::Result<UpdateResult> {
|
) -> napi::Result<u64> {
|
||||||
let mut op = self.inner_ref()?.update();
|
let mut op = self.inner_ref()?.update();
|
||||||
if let Some(only_if) = only_if {
|
if let Some(only_if) = only_if {
|
||||||
op = op.only_if(only_if);
|
op = op.only_if(only_if);
|
||||||
@@ -178,8 +145,7 @@ impl Table {
|
|||||||
for (column_name, value) in columns {
|
for (column_name, value) in columns {
|
||||||
op = op.column(column_name, value);
|
op = op.column(column_name, value);
|
||||||
}
|
}
|
||||||
let res = op.execute().await.default_error()?;
|
op.execute().await.default_error()
|
||||||
Ok(res.into())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[napi(catch_unwind)]
|
#[napi(catch_unwind)]
|
||||||
@@ -193,28 +159,21 @@ impl Table {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[napi(catch_unwind)]
|
#[napi(catch_unwind)]
|
||||||
pub async fn add_columns(
|
pub async fn add_columns(&self, transforms: Vec<AddColumnsSql>) -> napi::Result<()> {
|
||||||
&self,
|
|
||||||
transforms: Vec<AddColumnsSql>,
|
|
||||||
) -> napi::Result<AddColumnsResult> {
|
|
||||||
let transforms = transforms
|
let transforms = transforms
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|sql| (sql.name, sql.value_sql))
|
.map(|sql| (sql.name, sql.value_sql))
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
let transforms = NewColumnTransform::SqlExpressions(transforms);
|
let transforms = NewColumnTransform::SqlExpressions(transforms);
|
||||||
let res = self
|
self.inner_ref()?
|
||||||
.inner_ref()?
|
|
||||||
.add_columns(transforms, None)
|
.add_columns(transforms, None)
|
||||||
.await
|
.await
|
||||||
.default_error()?;
|
.default_error()?;
|
||||||
Ok(res.into())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[napi(catch_unwind)]
|
#[napi(catch_unwind)]
|
||||||
pub async fn alter_columns(
|
pub async fn alter_columns(&self, alterations: Vec<ColumnAlteration>) -> napi::Result<()> {
|
||||||
&self,
|
|
||||||
alterations: Vec<ColumnAlteration>,
|
|
||||||
) -> napi::Result<AlterColumnsResult> {
|
|
||||||
for alteration in &alterations {
|
for alteration in &alterations {
|
||||||
if alteration.rename.is_none()
|
if alteration.rename.is_none()
|
||||||
&& alteration.nullable.is_none()
|
&& alteration.nullable.is_none()
|
||||||
@@ -231,23 +190,21 @@ impl Table {
|
|||||||
.collect::<std::result::Result<Vec<_>, String>>()
|
.collect::<std::result::Result<Vec<_>, String>>()
|
||||||
.map_err(napi::Error::from_reason)?;
|
.map_err(napi::Error::from_reason)?;
|
||||||
|
|
||||||
let res = self
|
self.inner_ref()?
|
||||||
.inner_ref()?
|
|
||||||
.alter_columns(&alterations)
|
.alter_columns(&alterations)
|
||||||
.await
|
.await
|
||||||
.default_error()?;
|
.default_error()?;
|
||||||
Ok(res.into())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[napi(catch_unwind)]
|
#[napi(catch_unwind)]
|
||||||
pub async fn drop_columns(&self, columns: Vec<String>) -> napi::Result<DropColumnsResult> {
|
pub async fn drop_columns(&self, columns: Vec<String>) -> napi::Result<()> {
|
||||||
let col_refs = columns.iter().map(String::as_str).collect::<Vec<_>>();
|
let col_refs = columns.iter().map(String::as_str).collect::<Vec<_>>();
|
||||||
let res = self
|
self.inner_ref()?
|
||||||
.inner_ref()?
|
|
||||||
.drop_columns(&col_refs)
|
.drop_columns(&col_refs)
|
||||||
.await
|
.await
|
||||||
.default_error()?;
|
.default_error()?;
|
||||||
Ok(res.into())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[napi(catch_unwind)]
|
#[napi(catch_unwind)]
|
||||||
@@ -267,14 +224,6 @@ impl Table {
|
|||||||
.default_error()
|
.default_error()
|
||||||
}
|
}
|
||||||
|
|
||||||
#[napi(catch_unwind)]
|
|
||||||
pub async fn checkout_tag(&self, tag: String) -> napi::Result<()> {
|
|
||||||
self.inner_ref()?
|
|
||||||
.checkout_tag(tag.as_str())
|
|
||||||
.await
|
|
||||||
.default_error()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[napi(catch_unwind)]
|
#[napi(catch_unwind)]
|
||||||
pub async fn checkout_latest(&self) -> napi::Result<()> {
|
pub async fn checkout_latest(&self) -> napi::Result<()> {
|
||||||
self.inner_ref()?.checkout_latest().await.default_error()
|
self.inner_ref()?.checkout_latest().await.default_error()
|
||||||
@@ -307,13 +256,6 @@ impl Table {
|
|||||||
self.inner_ref()?.restore().await.default_error()
|
self.inner_ref()?.restore().await.default_error()
|
||||||
}
|
}
|
||||||
|
|
||||||
#[napi(catch_unwind)]
|
|
||||||
pub async fn tags(&self) -> napi::Result<Tags> {
|
|
||||||
Ok(Tags {
|
|
||||||
inner: self.inner_ref()?.clone(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
#[napi(catch_unwind)]
|
#[napi(catch_unwind)]
|
||||||
pub async fn optimize(
|
pub async fn optimize(
|
||||||
&self,
|
&self,
|
||||||
@@ -573,257 +515,9 @@ impl From<lancedb::index::IndexStatistics> for IndexStatistics {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[napi(object)]
|
|
||||||
pub struct TableStatistics {
|
|
||||||
/// The total number of bytes in the table
|
|
||||||
pub total_bytes: i64,
|
|
||||||
|
|
||||||
/// The number of rows in the table
|
|
||||||
pub num_rows: i64,
|
|
||||||
|
|
||||||
/// The number of indices in the table
|
|
||||||
pub num_indices: i64,
|
|
||||||
|
|
||||||
/// Statistics on table fragments
|
|
||||||
pub fragment_stats: FragmentStatistics,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[napi(object)]
|
|
||||||
pub struct FragmentStatistics {
|
|
||||||
/// The number of fragments in the table
|
|
||||||
pub num_fragments: i64,
|
|
||||||
|
|
||||||
/// The number of uncompacted fragments in the table
|
|
||||||
pub num_small_fragments: i64,
|
|
||||||
|
|
||||||
/// Statistics on the number of rows in the table fragments
|
|
||||||
pub lengths: FragmentSummaryStats,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[napi(object)]
|
|
||||||
pub struct FragmentSummaryStats {
|
|
||||||
/// The number of rows in the fragment with the fewest rows
|
|
||||||
pub min: i64,
|
|
||||||
|
|
||||||
/// The number of rows in the fragment with the most rows
|
|
||||||
pub max: i64,
|
|
||||||
|
|
||||||
/// The mean number of rows in the fragments
|
|
||||||
pub mean: i64,
|
|
||||||
|
|
||||||
/// The 25th percentile of number of rows in the fragments
|
|
||||||
pub p25: i64,
|
|
||||||
|
|
||||||
/// The 50th percentile of number of rows in the fragments
|
|
||||||
pub p50: i64,
|
|
||||||
|
|
||||||
/// The 75th percentile of number of rows in the fragments
|
|
||||||
pub p75: i64,
|
|
||||||
|
|
||||||
/// The 99th percentile of number of rows in the fragments
|
|
||||||
pub p99: i64,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<lancedb::table::TableStatistics> for TableStatistics {
|
|
||||||
fn from(v: lancedb::table::TableStatistics) -> Self {
|
|
||||||
Self {
|
|
||||||
total_bytes: v.total_bytes as i64,
|
|
||||||
num_rows: v.num_rows as i64,
|
|
||||||
num_indices: v.num_indices as i64,
|
|
||||||
fragment_stats: FragmentStatistics {
|
|
||||||
num_fragments: v.fragment_stats.num_fragments as i64,
|
|
||||||
num_small_fragments: v.fragment_stats.num_small_fragments as i64,
|
|
||||||
lengths: FragmentSummaryStats {
|
|
||||||
min: v.fragment_stats.lengths.min as i64,
|
|
||||||
max: v.fragment_stats.lengths.max as i64,
|
|
||||||
mean: v.fragment_stats.lengths.mean as i64,
|
|
||||||
p25: v.fragment_stats.lengths.p25 as i64,
|
|
||||||
p50: v.fragment_stats.lengths.p50 as i64,
|
|
||||||
p75: v.fragment_stats.lengths.p75 as i64,
|
|
||||||
p99: v.fragment_stats.lengths.p99 as i64,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[napi(object)]
|
#[napi(object)]
|
||||||
pub struct Version {
|
pub struct Version {
|
||||||
pub version: i64,
|
pub version: i64,
|
||||||
pub timestamp: i64,
|
pub timestamp: i64,
|
||||||
pub metadata: HashMap<String, String>,
|
pub metadata: HashMap<String, String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[napi(object)]
|
|
||||||
pub struct UpdateResult {
|
|
||||||
pub rows_updated: i64,
|
|
||||||
pub version: i64,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<lancedb::table::UpdateResult> for UpdateResult {
|
|
||||||
fn from(value: lancedb::table::UpdateResult) -> Self {
|
|
||||||
Self {
|
|
||||||
rows_updated: value.rows_updated as i64,
|
|
||||||
version: value.version as i64,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[napi(object)]
|
|
||||||
pub struct AddResult {
|
|
||||||
pub version: i64,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<lancedb::table::AddResult> for AddResult {
|
|
||||||
fn from(value: lancedb::table::AddResult) -> Self {
|
|
||||||
Self {
|
|
||||||
version: value.version as i64,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[napi(object)]
|
|
||||||
pub struct DeleteResult {
|
|
||||||
pub version: i64,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<lancedb::table::DeleteResult> for DeleteResult {
|
|
||||||
fn from(value: lancedb::table::DeleteResult) -> Self {
|
|
||||||
Self {
|
|
||||||
version: value.version as i64,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[napi(object)]
|
|
||||||
pub struct MergeResult {
|
|
||||||
pub version: i64,
|
|
||||||
pub num_inserted_rows: i64,
|
|
||||||
pub num_updated_rows: i64,
|
|
||||||
pub num_deleted_rows: i64,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<lancedb::table::MergeResult> for MergeResult {
|
|
||||||
fn from(value: lancedb::table::MergeResult) -> Self {
|
|
||||||
Self {
|
|
||||||
version: value.version as i64,
|
|
||||||
num_inserted_rows: value.num_inserted_rows as i64,
|
|
||||||
num_updated_rows: value.num_updated_rows as i64,
|
|
||||||
num_deleted_rows: value.num_deleted_rows as i64,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[napi(object)]
|
|
||||||
pub struct AddColumnsResult {
|
|
||||||
pub version: i64,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<lancedb::table::AddColumnsResult> for AddColumnsResult {
|
|
||||||
fn from(value: lancedb::table::AddColumnsResult) -> Self {
|
|
||||||
Self {
|
|
||||||
version: value.version as i64,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[napi(object)]
|
|
||||||
pub struct AlterColumnsResult {
|
|
||||||
pub version: i64,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<lancedb::table::AlterColumnsResult> for AlterColumnsResult {
|
|
||||||
fn from(value: lancedb::table::AlterColumnsResult) -> Self {
|
|
||||||
Self {
|
|
||||||
version: value.version as i64,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[napi(object)]
|
|
||||||
pub struct DropColumnsResult {
|
|
||||||
pub version: i64,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<lancedb::table::DropColumnsResult> for DropColumnsResult {
|
|
||||||
fn from(value: lancedb::table::DropColumnsResult) -> Self {
|
|
||||||
Self {
|
|
||||||
version: value.version as i64,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[napi]
|
|
||||||
pub struct TagContents {
|
|
||||||
pub version: i64,
|
|
||||||
pub manifest_size: i64,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[napi]
|
|
||||||
pub struct Tags {
|
|
||||||
inner: LanceDbTable,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[napi]
|
|
||||||
impl Tags {
|
|
||||||
#[napi]
|
|
||||||
pub async fn list(&self) -> napi::Result<HashMap<String, TagContents>> {
|
|
||||||
let rust_tags = self.inner.tags().await.default_error()?;
|
|
||||||
let tag_list = rust_tags.as_ref().list().await.default_error()?;
|
|
||||||
let tag_contents = tag_list
|
|
||||||
.into_iter()
|
|
||||||
.map(|(k, v)| {
|
|
||||||
(
|
|
||||||
k,
|
|
||||||
TagContents {
|
|
||||||
version: v.version as i64,
|
|
||||||
manifest_size: v.manifest_size as i64,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
Ok(tag_contents)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[napi]
|
|
||||||
pub async fn get_version(&self, tag: String) -> napi::Result<i64> {
|
|
||||||
let rust_tags = self.inner.tags().await.default_error()?;
|
|
||||||
rust_tags
|
|
||||||
.as_ref()
|
|
||||||
.get_version(tag.as_str())
|
|
||||||
.await
|
|
||||||
.map(|v| v as i64)
|
|
||||||
.default_error()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[napi]
|
|
||||||
pub async unsafe fn create(&mut self, tag: String, version: i64) -> napi::Result<()> {
|
|
||||||
let mut rust_tags = self.inner.tags().await.default_error()?;
|
|
||||||
rust_tags
|
|
||||||
.as_mut()
|
|
||||||
.create(tag.as_str(), version as u64)
|
|
||||||
.await
|
|
||||||
.default_error()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[napi]
|
|
||||||
pub async unsafe fn delete(&mut self, tag: String) -> napi::Result<()> {
|
|
||||||
let mut rust_tags = self.inner.tags().await.default_error()?;
|
|
||||||
rust_tags
|
|
||||||
.as_mut()
|
|
||||||
.delete(tag.as_str())
|
|
||||||
.await
|
|
||||||
.default_error()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[napi]
|
|
||||||
pub async unsafe fn update(&mut self, tag: String, version: i64) -> napi::Result<()> {
|
|
||||||
let mut rust_tags = self.inner.tags().await.default_error()?;
|
|
||||||
rust_tags
|
|
||||||
.as_mut()
|
|
||||||
.update(tag.as_str(), version as u64)
|
|
||||||
.await
|
|
||||||
.default_error()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
[tool.bumpversion]
|
[tool.bumpversion]
|
||||||
current_version = "0.22.1"
|
current_version = "0.21.2"
|
||||||
parse = """(?x)
|
parse = """(?x)
|
||||||
(?P<major>0|[1-9]\\d*)\\.
|
(?P<major>0|[1-9]\\d*)\\.
|
||||||
(?P<minor>0|[1-9]\\d*)\\.
|
(?P<minor>0|[1-9]\\d*)\\.
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "lancedb-python"
|
name = "lancedb-python"
|
||||||
version = "0.22.1"
|
version = "0.21.2"
|
||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
description = "Python bindings for LanceDB"
|
description = "Python bindings for LanceDB"
|
||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|||||||
@@ -4,12 +4,11 @@ name = "lancedb"
|
|||||||
dynamic = ["version"]
|
dynamic = ["version"]
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"deprecation",
|
"deprecation",
|
||||||
"numpy",
|
|
||||||
"overrides>=0.7",
|
|
||||||
"packaging",
|
|
||||||
"pyarrow>=16",
|
|
||||||
"pydantic>=1.10",
|
|
||||||
"tqdm>=4.27.0",
|
"tqdm>=4.27.0",
|
||||||
|
"pyarrow>=14",
|
||||||
|
"pydantic>=1.10",
|
||||||
|
"packaging",
|
||||||
|
"overrides>=0.7",
|
||||||
]
|
]
|
||||||
description = "lancedb"
|
description = "lancedb"
|
||||||
authors = [{ name = "LanceDB Devs", email = "dev@lancedb.com" }]
|
authors = [{ name = "LanceDB Devs", email = "dev@lancedb.com" }]
|
||||||
@@ -43,9 +42,6 @@ classifiers = [
|
|||||||
repository = "https://github.com/lancedb/lancedb"
|
repository = "https://github.com/lancedb/lancedb"
|
||||||
|
|
||||||
[project.optional-dependencies]
|
[project.optional-dependencies]
|
||||||
pylance = [
|
|
||||||
"pylance>=0.25",
|
|
||||||
]
|
|
||||||
tests = [
|
tests = [
|
||||||
"aiohttp",
|
"aiohttp",
|
||||||
"boto3",
|
"boto3",
|
||||||
@@ -58,8 +54,7 @@ tests = [
|
|||||||
"polars>=0.19, <=1.3.0",
|
"polars>=0.19, <=1.3.0",
|
||||||
"tantivy",
|
"tantivy",
|
||||||
"pyarrow-stubs",
|
"pyarrow-stubs",
|
||||||
"pylance>=0.25",
|
"pylance>=0.23.2",
|
||||||
"requests",
|
|
||||||
]
|
]
|
||||||
dev = [
|
dev = [
|
||||||
"ruff",
|
"ruff",
|
||||||
@@ -77,7 +72,6 @@ embeddings = [
|
|||||||
"pillow",
|
"pillow",
|
||||||
"open-clip-torch",
|
"open-clip-torch",
|
||||||
"cohere",
|
"cohere",
|
||||||
"colpali-engine>=0.3.10",
|
|
||||||
"huggingface_hub",
|
"huggingface_hub",
|
||||||
"InstructorEmbedding",
|
"InstructorEmbedding",
|
||||||
"google.generativeai",
|
"google.generativeai",
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
from datetime import timedelta
|
from typing import Dict, List, Optional, Tuple, Any, Union, Literal
|
||||||
from typing import Dict, List, Optional, Tuple, Any, TypedDict, Union, Literal
|
|
||||||
|
|
||||||
import pyarrow as pa
|
import pyarrow as pa
|
||||||
|
|
||||||
@@ -36,10 +35,8 @@ class Table:
|
|||||||
async def schema(self) -> pa.Schema: ...
|
async def schema(self) -> pa.Schema: ...
|
||||||
async def add(
|
async def add(
|
||||||
self, data: pa.RecordBatchReader, mode: Literal["append", "overwrite"]
|
self, data: pa.RecordBatchReader, mode: Literal["append", "overwrite"]
|
||||||
) -> AddResult: ...
|
) -> None: ...
|
||||||
async def update(
|
async def update(self, updates: Dict[str, str], where: Optional[str]) -> None: ...
|
||||||
self, updates: Dict[str, str], where: Optional[str]
|
|
||||||
) -> UpdateResult: ...
|
|
||||||
async def count_rows(self, filter: Optional[str]) -> int: ...
|
async def count_rows(self, filter: Optional[str]) -> int: ...
|
||||||
async def create_index(
|
async def create_index(
|
||||||
self,
|
self,
|
||||||
@@ -49,34 +46,22 @@ class Table:
|
|||||||
): ...
|
): ...
|
||||||
async def list_versions(self) -> List[Dict[str, Any]]: ...
|
async def list_versions(self) -> List[Dict[str, Any]]: ...
|
||||||
async def version(self) -> int: ...
|
async def version(self) -> int: ...
|
||||||
async def checkout(self, version: Union[int, str]): ...
|
async def checkout(self, version: int): ...
|
||||||
async def checkout_latest(self): ...
|
async def checkout_latest(self): ...
|
||||||
async def restore(self, version: Optional[Union[int, str]] = None): ...
|
async def restore(self): ...
|
||||||
async def list_indices(self) -> list[IndexConfig]: ...
|
async def list_indices(self) -> list[IndexConfig]: ...
|
||||||
async def delete(self, filter: str) -> DeleteResult: ...
|
async def delete(self, filter: str): ...
|
||||||
async def add_columns(self, columns: list[tuple[str, str]]) -> AddColumnsResult: ...
|
async def add_columns(self, columns: list[tuple[str, str]]) -> None: ...
|
||||||
async def add_columns_with_schema(self, schema: pa.Schema) -> AddColumnsResult: ...
|
async def alter_columns(self, columns: list[dict[str, Any]]) -> None: ...
|
||||||
async def alter_columns(
|
|
||||||
self, columns: list[dict[str, Any]]
|
|
||||||
) -> AlterColumnsResult: ...
|
|
||||||
async def optimize(
|
async def optimize(
|
||||||
self,
|
self,
|
||||||
*,
|
*,
|
||||||
cleanup_since_ms: Optional[int] = None,
|
cleanup_since_ms: Optional[int] = None,
|
||||||
delete_unverified: Optional[bool] = None,
|
delete_unverified: Optional[bool] = None,
|
||||||
) -> OptimizeStats: ...
|
) -> OptimizeStats: ...
|
||||||
@property
|
|
||||||
def tags(self) -> Tags: ...
|
|
||||||
def query(self) -> Query: ...
|
def query(self) -> Query: ...
|
||||||
def vector_search(self) -> VectorQuery: ...
|
def vector_search(self) -> VectorQuery: ...
|
||||||
|
|
||||||
class Tags:
|
|
||||||
async def list(self) -> Dict[str, Tag]: ...
|
|
||||||
async def get_version(self, tag: str) -> int: ...
|
|
||||||
async def create(self, tag: str, version: int): ...
|
|
||||||
async def delete(self, tag: str): ...
|
|
||||||
async def update(self, tag: str, version: int): ...
|
|
||||||
|
|
||||||
class IndexConfig:
|
class IndexConfig:
|
||||||
index_type: str
|
index_type: str
|
||||||
columns: List[str]
|
columns: List[str]
|
||||||
@@ -108,11 +93,7 @@ class Query:
|
|||||||
def postfilter(self): ...
|
def postfilter(self): ...
|
||||||
def nearest_to(self, query_vec: pa.Array) -> VectorQuery: ...
|
def nearest_to(self, query_vec: pa.Array) -> VectorQuery: ...
|
||||||
def nearest_to_text(self, query: dict) -> FTSQuery: ...
|
def nearest_to_text(self, query: dict) -> FTSQuery: ...
|
||||||
async def execute(
|
async def execute(self, max_batch_length: Optional[int]) -> RecordBatchStream: ...
|
||||||
self, max_batch_length: Optional[int], timeout: Optional[timedelta]
|
|
||||||
) -> RecordBatchStream: ...
|
|
||||||
async def explain_plan(self, verbose: Optional[bool]) -> str: ...
|
|
||||||
async def analyze_plan(self) -> str: ...
|
|
||||||
def to_query_request(self) -> PyQueryRequest: ...
|
def to_query_request(self) -> PyQueryRequest: ...
|
||||||
|
|
||||||
class FTSQuery:
|
class FTSQuery:
|
||||||
@@ -126,9 +107,8 @@ class FTSQuery:
|
|||||||
def get_query(self) -> str: ...
|
def get_query(self) -> str: ...
|
||||||
def add_query_vector(self, query_vec: pa.Array) -> None: ...
|
def add_query_vector(self, query_vec: pa.Array) -> None: ...
|
||||||
def nearest_to(self, query_vec: pa.Array) -> HybridQuery: ...
|
def nearest_to(self, query_vec: pa.Array) -> HybridQuery: ...
|
||||||
async def execute(
|
async def execute(self, max_batch_length: Optional[int]) -> RecordBatchStream: ...
|
||||||
self, max_batch_length: Optional[int], timeout: Optional[timedelta]
|
async def explain_plan(self) -> str: ...
|
||||||
) -> RecordBatchStream: ...
|
|
||||||
def to_query_request(self) -> PyQueryRequest: ...
|
def to_query_request(self) -> PyQueryRequest: ...
|
||||||
|
|
||||||
class VectorQuery:
|
class VectorQuery:
|
||||||
@@ -208,32 +188,3 @@ class RemovalStats:
|
|||||||
class OptimizeStats:
|
class OptimizeStats:
|
||||||
compaction: CompactionStats
|
compaction: CompactionStats
|
||||||
prune: RemovalStats
|
prune: RemovalStats
|
||||||
|
|
||||||
class Tag(TypedDict):
|
|
||||||
version: int
|
|
||||||
manifest_size: int
|
|
||||||
|
|
||||||
class AddResult:
|
|
||||||
version: int
|
|
||||||
|
|
||||||
class DeleteResult:
|
|
||||||
version: int
|
|
||||||
|
|
||||||
class UpdateResult:
|
|
||||||
rows_updated: int
|
|
||||||
version: int
|
|
||||||
|
|
||||||
class MergeResult:
|
|
||||||
version: int
|
|
||||||
num_updated_rows: int
|
|
||||||
num_inserted_rows: int
|
|
||||||
num_deleted_rows: int
|
|
||||||
|
|
||||||
class AddColumnsResult:
|
|
||||||
version: int
|
|
||||||
|
|
||||||
class AlterColumnsResult:
|
|
||||||
version: int
|
|
||||||
|
|
||||||
class DropColumnsResult:
|
|
||||||
version: int
|
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ import numpy as np
|
|||||||
import pyarrow as pa
|
import pyarrow as pa
|
||||||
import pyarrow.dataset
|
import pyarrow.dataset
|
||||||
|
|
||||||
from .dependencies import _check_for_pandas, pandas as pd
|
from .dependencies import pandas as pd
|
||||||
|
|
||||||
DATA = Union[List[dict], "pd.DataFrame", pa.Table, Iterable[pa.RecordBatch]]
|
DATA = Union[List[dict], "pd.DataFrame", pa.Table, Iterable[pa.RecordBatch]]
|
||||||
VEC = Union[list, np.ndarray, pa.Array, pa.ChunkedArray]
|
VEC = Union[list, np.ndarray, pa.Array, pa.ChunkedArray]
|
||||||
@@ -63,7 +63,7 @@ def data_to_reader(
|
|||||||
data: DATA, schema: Optional[pa.Schema] = None
|
data: DATA, schema: Optional[pa.Schema] = None
|
||||||
) -> pa.RecordBatchReader:
|
) -> pa.RecordBatchReader:
|
||||||
"""Convert various types of input into a RecordBatchReader"""
|
"""Convert various types of input into a RecordBatchReader"""
|
||||||
if _check_for_pandas(data) and isinstance(data, pd.DataFrame):
|
if pd is not None and isinstance(data, pd.DataFrame):
|
||||||
return pa.Table.from_pandas(data, schema=schema).to_reader()
|
return pa.Table.from_pandas(data, schema=schema).to_reader()
|
||||||
elif isinstance(data, pa.Table):
|
elif isinstance(data, pa.Table):
|
||||||
return data.to_reader()
|
return data.to_reader()
|
||||||
|
|||||||
@@ -19,4 +19,3 @@ from .imagebind import ImageBindEmbeddings
|
|||||||
from .jinaai import JinaEmbeddings
|
from .jinaai import JinaEmbeddings
|
||||||
from .watsonx import WatsonxEmbeddings
|
from .watsonx import WatsonxEmbeddings
|
||||||
from .voyageai import VoyageAIEmbeddingFunction
|
from .voyageai import VoyageAIEmbeddingFunction
|
||||||
from .colpali import ColPaliEmbeddings
|
|
||||||
|
|||||||
@@ -1,255 +0,0 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
# SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
|
||||||
|
|
||||||
|
|
||||||
from functools import lru_cache
|
|
||||||
from typing import List, Union, Optional, Any
|
|
||||||
import numpy as np
|
|
||||||
import io
|
|
||||||
|
|
||||||
from ..util import attempt_import_or_raise
|
|
||||||
from .base import EmbeddingFunction
|
|
||||||
from .registry import register
|
|
||||||
from .utils import TEXT, IMAGES, is_flash_attn_2_available
|
|
||||||
|
|
||||||
|
|
||||||
@register("colpali")
|
|
||||||
class ColPaliEmbeddings(EmbeddingFunction):
|
|
||||||
"""
|
|
||||||
An embedding function that uses the ColPali engine for
|
|
||||||
multimodal multi-vector embeddings.
|
|
||||||
|
|
||||||
This embedding function supports ColQwen2.5 models, producing multivector outputs
|
|
||||||
for both text and image inputs. The output embeddings are lists of vectors, each
|
|
||||||
vector being 128-dimensional by default, represented as List[List[float]].
|
|
||||||
|
|
||||||
Parameters
|
|
||||||
----------
|
|
||||||
model_name : str
|
|
||||||
The name of the model to use (e.g., "Metric-AI/ColQwen2.5-3b-multilingual-v1.0")
|
|
||||||
device : str
|
|
||||||
The device for inference (default "cuda:0").
|
|
||||||
dtype : str
|
|
||||||
Data type for model weights (default "bfloat16").
|
|
||||||
use_token_pooling : bool
|
|
||||||
Whether to use token pooling to reduce embedding size (default True).
|
|
||||||
pool_factor : int
|
|
||||||
Factor to reduce sequence length if token pooling is enabled (default 2).
|
|
||||||
quantization_config : Optional[BitsAndBytesConfig]
|
|
||||||
Quantization configuration for the model. (default None, bitsandbytes needed)
|
|
||||||
batch_size : int
|
|
||||||
Batch size for processing inputs (default 2).
|
|
||||||
"""
|
|
||||||
|
|
||||||
model_name: str = "Metric-AI/ColQwen2.5-3b-multilingual-v1.0"
|
|
||||||
device: str = "auto"
|
|
||||||
dtype: str = "bfloat16"
|
|
||||||
use_token_pooling: bool = True
|
|
||||||
pool_factor: int = 2
|
|
||||||
quantization_config: Optional[Any] = None
|
|
||||||
batch_size: int = 2
|
|
||||||
|
|
||||||
_model = None
|
|
||||||
_processor = None
|
|
||||||
_token_pooler = None
|
|
||||||
_vector_dim = None
|
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
|
||||||
super().__init__(*args, **kwargs)
|
|
||||||
(
|
|
||||||
self._model,
|
|
||||||
self._processor,
|
|
||||||
self._token_pooler,
|
|
||||||
) = self._load_model(
|
|
||||||
self.model_name,
|
|
||||||
self.dtype,
|
|
||||||
self.device,
|
|
||||||
self.use_token_pooling,
|
|
||||||
self.quantization_config,
|
|
||||||
)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
@lru_cache(maxsize=1)
|
|
||||||
def _load_model(
|
|
||||||
model_name: str,
|
|
||||||
dtype: str,
|
|
||||||
device: str,
|
|
||||||
use_token_pooling: bool,
|
|
||||||
quantization_config: Optional[Any],
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Initialize and cache the ColPali model, processor, and token pooler.
|
|
||||||
"""
|
|
||||||
torch = attempt_import_or_raise("torch", "torch")
|
|
||||||
transformers = attempt_import_or_raise("transformers", "transformers")
|
|
||||||
colpali_engine = attempt_import_or_raise("colpali_engine", "colpali_engine")
|
|
||||||
from colpali_engine.compression.token_pooling import HierarchicalTokenPooler
|
|
||||||
|
|
||||||
if quantization_config is not None:
|
|
||||||
if not isinstance(quantization_config, transformers.BitsAndBytesConfig):
|
|
||||||
raise ValueError("quantization_config must be a BitsAndBytesConfig")
|
|
||||||
|
|
||||||
if dtype == "bfloat16":
|
|
||||||
torch_dtype = torch.bfloat16
|
|
||||||
elif dtype == "float16":
|
|
||||||
torch_dtype = torch.float16
|
|
||||||
elif dtype == "float64":
|
|
||||||
torch_dtype = torch.float64
|
|
||||||
else:
|
|
||||||
torch_dtype = torch.float32
|
|
||||||
|
|
||||||
model = colpali_engine.models.ColQwen2_5.from_pretrained(
|
|
||||||
model_name,
|
|
||||||
torch_dtype=torch_dtype,
|
|
||||||
device_map=device,
|
|
||||||
quantization_config=quantization_config
|
|
||||||
if quantization_config is not None
|
|
||||||
else None,
|
|
||||||
attn_implementation="flash_attention_2"
|
|
||||||
if is_flash_attn_2_available()
|
|
||||||
else None,
|
|
||||||
).eval()
|
|
||||||
processor = colpali_engine.models.ColQwen2_5_Processor.from_pretrained(
|
|
||||||
model_name
|
|
||||||
)
|
|
||||||
token_pooler = HierarchicalTokenPooler() if use_token_pooling else None
|
|
||||||
return model, processor, token_pooler
|
|
||||||
|
|
||||||
def ndims(self):
|
|
||||||
"""
|
|
||||||
Return the dimension of a vector in the multivector output (e.g., 128).
|
|
||||||
"""
|
|
||||||
torch = attempt_import_or_raise("torch", "torch")
|
|
||||||
if self._vector_dim is None:
|
|
||||||
dummy_query = "test"
|
|
||||||
batch_queries = self._processor.process_queries([dummy_query]).to(
|
|
||||||
self._model.device
|
|
||||||
)
|
|
||||||
with torch.no_grad():
|
|
||||||
query_embeddings = self._model(**batch_queries)
|
|
||||||
|
|
||||||
if self.use_token_pooling and self._token_pooler is not None:
|
|
||||||
query_embeddings = self._token_pooler.pool_embeddings(
|
|
||||||
query_embeddings,
|
|
||||||
pool_factor=self.pool_factor,
|
|
||||||
padding=True,
|
|
||||||
padding_side=self._processor.tokenizer.padding_side,
|
|
||||||
)
|
|
||||||
|
|
||||||
self._vector_dim = query_embeddings[0].shape[-1]
|
|
||||||
return self._vector_dim
|
|
||||||
|
|
||||||
def _process_embeddings(self, embeddings):
|
|
||||||
"""
|
|
||||||
Format model embeddings into List[List[float]].
|
|
||||||
Use token pooling if enabled.
|
|
||||||
"""
|
|
||||||
torch = attempt_import_or_raise("torch", "torch")
|
|
||||||
if self.use_token_pooling and self._token_pooler is not None:
|
|
||||||
embeddings = self._token_pooler.pool_embeddings(
|
|
||||||
embeddings,
|
|
||||||
pool_factor=self.pool_factor,
|
|
||||||
padding=True,
|
|
||||||
padding_side=self._processor.tokenizer.padding_side,
|
|
||||||
)
|
|
||||||
|
|
||||||
if isinstance(embeddings, torch.Tensor):
|
|
||||||
tensors = embeddings.detach().cpu()
|
|
||||||
if tensors.dtype == torch.bfloat16:
|
|
||||||
tensors = tensors.to(torch.float32)
|
|
||||||
return (
|
|
||||||
tensors.numpy()
|
|
||||||
.astype(np.float64 if self.dtype == "float64" else np.float32)
|
|
||||||
.tolist()
|
|
||||||
)
|
|
||||||
return []
|
|
||||||
|
|
||||||
def generate_text_embeddings(self, text: TEXT) -> List[List[List[float]]]:
|
|
||||||
"""
|
|
||||||
Generate embeddings for text input.
|
|
||||||
"""
|
|
||||||
torch = attempt_import_or_raise("torch", "torch")
|
|
||||||
text = self.sanitize_input(text)
|
|
||||||
all_embeddings = []
|
|
||||||
|
|
||||||
for i in range(0, len(text), self.batch_size):
|
|
||||||
batch_text = text[i : i + self.batch_size]
|
|
||||||
batch_queries = self._processor.process_queries(batch_text).to(
|
|
||||||
self._model.device
|
|
||||||
)
|
|
||||||
with torch.no_grad():
|
|
||||||
query_embeddings = self._model(**batch_queries)
|
|
||||||
all_embeddings.extend(self._process_embeddings(query_embeddings))
|
|
||||||
return all_embeddings
|
|
||||||
|
|
||||||
def _prepare_images(self, images: IMAGES) -> List:
|
|
||||||
"""
|
|
||||||
Convert image inputs to PIL Images.
|
|
||||||
"""
|
|
||||||
PIL = attempt_import_or_raise("PIL", "pillow")
|
|
||||||
requests = attempt_import_or_raise("requests", "requests")
|
|
||||||
images = self.sanitize_input(images)
|
|
||||||
pil_images = []
|
|
||||||
try:
|
|
||||||
for image in images:
|
|
||||||
if isinstance(image, str):
|
|
||||||
if image.startswith(("http://", "https://")):
|
|
||||||
response = requests.get(image, timeout=10)
|
|
||||||
response.raise_for_status()
|
|
||||||
pil_images.append(PIL.Image.open(io.BytesIO(response.content)))
|
|
||||||
else:
|
|
||||||
with PIL.Image.open(image) as im:
|
|
||||||
pil_images.append(im.copy())
|
|
||||||
elif isinstance(image, bytes):
|
|
||||||
pil_images.append(PIL.Image.open(io.BytesIO(image)))
|
|
||||||
else:
|
|
||||||
# Assume it's a PIL Image; will raise if invalid
|
|
||||||
pil_images.append(image)
|
|
||||||
except Exception as e:
|
|
||||||
raise ValueError(f"Failed to process image: {e}")
|
|
||||||
|
|
||||||
return pil_images
|
|
||||||
|
|
||||||
def generate_image_embeddings(self, images: IMAGES) -> List[List[List[float]]]:
|
|
||||||
"""
|
|
||||||
Generate embeddings for a batch of images.
|
|
||||||
"""
|
|
||||||
torch = attempt_import_or_raise("torch", "torch")
|
|
||||||
pil_images = self._prepare_images(images)
|
|
||||||
all_embeddings = []
|
|
||||||
|
|
||||||
for i in range(0, len(pil_images), self.batch_size):
|
|
||||||
batch_images = pil_images[i : i + self.batch_size]
|
|
||||||
batch_images = self._processor.process_images(batch_images).to(
|
|
||||||
self._model.device
|
|
||||||
)
|
|
||||||
with torch.no_grad():
|
|
||||||
image_embeddings = self._model(**batch_images)
|
|
||||||
all_embeddings.extend(self._process_embeddings(image_embeddings))
|
|
||||||
return all_embeddings
|
|
||||||
|
|
||||||
def compute_query_embeddings(
|
|
||||||
self, query: Union[str, IMAGES], *args, **kwargs
|
|
||||||
) -> List[List[List[float]]]:
|
|
||||||
"""
|
|
||||||
Compute embeddings for a single user query (text only).
|
|
||||||
"""
|
|
||||||
if not isinstance(query, str):
|
|
||||||
raise ValueError(
|
|
||||||
"Query must be a string, image to image search is not supported"
|
|
||||||
)
|
|
||||||
return self.generate_text_embeddings([query])
|
|
||||||
|
|
||||||
def compute_source_embeddings(
|
|
||||||
self, images: IMAGES, *args, **kwargs
|
|
||||||
) -> List[List[List[float]]]:
|
|
||||||
"""
|
|
||||||
Compute embeddings for a batch of source images.
|
|
||||||
|
|
||||||
Parameters
|
|
||||||
----------
|
|
||||||
images : Union[str, bytes, List, pa.Array, pa.ChunkedArray, np.ndarray]
|
|
||||||
Batch of images (paths, URLs, bytes, or PIL Images).
|
|
||||||
"""
|
|
||||||
images = self.sanitize_input(images)
|
|
||||||
return self.generate_image_embeddings(images)
|
|
||||||
@@ -18,7 +18,6 @@ import numpy as np
|
|||||||
import pyarrow as pa
|
import pyarrow as pa
|
||||||
|
|
||||||
from ..dependencies import pandas as pd
|
from ..dependencies import pandas as pd
|
||||||
from ..util import attempt_import_or_raise
|
|
||||||
|
|
||||||
|
|
||||||
# ruff: noqa: PERF203
|
# ruff: noqa: PERF203
|
||||||
@@ -276,12 +275,3 @@ def url_retrieve(url: str):
|
|||||||
def api_key_not_found_help(provider):
|
def api_key_not_found_help(provider):
|
||||||
logging.error("Could not find API key for %s", provider)
|
logging.error("Could not find API key for %s", provider)
|
||||||
raise ValueError(f"Please set the {provider.upper()}_API_KEY environment variable.")
|
raise ValueError(f"Please set the {provider.upper()}_API_KEY environment variable.")
|
||||||
|
|
||||||
|
|
||||||
def is_flash_attn_2_available():
|
|
||||||
try:
|
|
||||||
attempt_import_or_raise("flash_attn", "flash_attn")
|
|
||||||
|
|
||||||
return True
|
|
||||||
except ImportError:
|
|
||||||
return False
|
|
||||||
|
|||||||
@@ -1,12 +1,9 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
# SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
# SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||||
import base64
|
|
||||||
import os
|
|
||||||
from typing import ClassVar, TYPE_CHECKING, List, Union, Any
|
|
||||||
|
|
||||||
from pathlib import Path
|
|
||||||
from urllib.parse import urlparse
|
import os
|
||||||
from io import BytesIO
|
from typing import ClassVar, TYPE_CHECKING, List, Union
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import pyarrow as pa
|
import pyarrow as pa
|
||||||
@@ -14,100 +11,12 @@ import pyarrow as pa
|
|||||||
from ..util import attempt_import_or_raise
|
from ..util import attempt_import_or_raise
|
||||||
from .base import EmbeddingFunction
|
from .base import EmbeddingFunction
|
||||||
from .registry import register
|
from .registry import register
|
||||||
from .utils import api_key_not_found_help, IMAGES, TEXT
|
from .utils import api_key_not_found_help, IMAGES
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
import PIL
|
import PIL
|
||||||
|
|
||||||
|
|
||||||
def is_valid_url(text):
|
|
||||||
try:
|
|
||||||
parsed = urlparse(text)
|
|
||||||
return bool(parsed.scheme) and bool(parsed.netloc)
|
|
||||||
except Exception:
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def transform_input(input_data: Union[str, bytes, Path]):
|
|
||||||
PIL = attempt_import_or_raise("PIL", "pillow")
|
|
||||||
if isinstance(input_data, str):
|
|
||||||
if is_valid_url(input_data):
|
|
||||||
content = {"type": "image_url", "image_url": input_data}
|
|
||||||
else:
|
|
||||||
content = {"type": "text", "text": input_data}
|
|
||||||
elif isinstance(input_data, PIL.Image.Image):
|
|
||||||
buffered = BytesIO()
|
|
||||||
input_data.save(buffered, format="JPEG")
|
|
||||||
img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
|
||||||
content = {
|
|
||||||
"type": "image_base64",
|
|
||||||
"image_base64": "data:image/jpeg;base64," + img_str,
|
|
||||||
}
|
|
||||||
elif isinstance(input_data, bytes):
|
|
||||||
img = PIL.Image.open(BytesIO(input_data))
|
|
||||||
buffered = BytesIO()
|
|
||||||
img.save(buffered, format="JPEG")
|
|
||||||
img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
|
||||||
content = {
|
|
||||||
"type": "image_base64",
|
|
||||||
"image_base64": "data:image/jpeg;base64," + img_str,
|
|
||||||
}
|
|
||||||
elif isinstance(input_data, Path):
|
|
||||||
img = PIL.Image.open(input_data)
|
|
||||||
buffered = BytesIO()
|
|
||||||
img.save(buffered, format="JPEG")
|
|
||||||
img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
|
||||||
content = {
|
|
||||||
"type": "image_base64",
|
|
||||||
"image_base64": "data:image/jpeg;base64," + img_str,
|
|
||||||
}
|
|
||||||
else:
|
|
||||||
raise ValueError("Each input should be either str, bytes, Path or Image.")
|
|
||||||
|
|
||||||
return {"content": [content]}
|
|
||||||
|
|
||||||
|
|
||||||
def sanitize_multimodal_input(inputs: Union[TEXT, IMAGES]) -> List[Any]:
|
|
||||||
"""
|
|
||||||
Sanitize the input to the embedding function.
|
|
||||||
"""
|
|
||||||
PIL = attempt_import_or_raise("PIL", "pillow")
|
|
||||||
if isinstance(inputs, (str, bytes, Path, PIL.Image.Image)):
|
|
||||||
inputs = [inputs]
|
|
||||||
elif isinstance(inputs, pa.Array):
|
|
||||||
inputs = inputs.to_pylist()
|
|
||||||
elif isinstance(inputs, pa.ChunkedArray):
|
|
||||||
inputs = inputs.combine_chunks().to_pylist()
|
|
||||||
else:
|
|
||||||
raise ValueError(
|
|
||||||
f"Input type {type(inputs)} not allowed with multimodal model."
|
|
||||||
)
|
|
||||||
|
|
||||||
if not all(isinstance(x, (str, bytes, Path, PIL.Image.Image)) for x in inputs):
|
|
||||||
raise ValueError("Each input should be either str, bytes, Path or Image.")
|
|
||||||
|
|
||||||
return [transform_input(i) for i in inputs]
|
|
||||||
|
|
||||||
|
|
||||||
def sanitize_text_input(inputs: TEXT) -> List[str]:
|
|
||||||
"""
|
|
||||||
Sanitize the input to the embedding function.
|
|
||||||
"""
|
|
||||||
if isinstance(inputs, str):
|
|
||||||
inputs = [inputs]
|
|
||||||
elif isinstance(inputs, pa.Array):
|
|
||||||
inputs = inputs.to_pylist()
|
|
||||||
elif isinstance(inputs, pa.ChunkedArray):
|
|
||||||
inputs = inputs.combine_chunks().to_pylist()
|
|
||||||
else:
|
|
||||||
raise ValueError(f"Input type {type(inputs)} not allowed with text model.")
|
|
||||||
|
|
||||||
if not all(isinstance(x, str) for x in inputs):
|
|
||||||
raise ValueError("Each input should be str.")
|
|
||||||
|
|
||||||
return inputs
|
|
||||||
|
|
||||||
|
|
||||||
@register("voyageai")
|
@register("voyageai")
|
||||||
class VoyageAIEmbeddingFunction(EmbeddingFunction):
|
class VoyageAIEmbeddingFunction(EmbeddingFunction):
|
||||||
"""
|
"""
|
||||||
@@ -165,11 +74,6 @@ class VoyageAIEmbeddingFunction(EmbeddingFunction):
|
|||||||
]
|
]
|
||||||
multimodal_embedding_models: list = ["voyage-multimodal-3"]
|
multimodal_embedding_models: list = ["voyage-multimodal-3"]
|
||||||
|
|
||||||
def _is_multimodal_model(self, model_name: str):
|
|
||||||
return (
|
|
||||||
model_name in self.multimodal_embedding_models or "multimodal" in model_name
|
|
||||||
)
|
|
||||||
|
|
||||||
def ndims(self):
|
def ndims(self):
|
||||||
if self.name == "voyage-3-lite":
|
if self.name == "voyage-3-lite":
|
||||||
return 512
|
return 512
|
||||||
@@ -181,12 +85,55 @@ class VoyageAIEmbeddingFunction(EmbeddingFunction):
|
|||||||
"voyage-finance-2",
|
"voyage-finance-2",
|
||||||
"voyage-multilingual-2",
|
"voyage-multilingual-2",
|
||||||
"voyage-law-2",
|
"voyage-law-2",
|
||||||
"voyage-multimodal-3",
|
|
||||||
]:
|
]:
|
||||||
return 1024
|
return 1024
|
||||||
else:
|
else:
|
||||||
raise ValueError(f"Model {self.name} not supported")
|
raise ValueError(f"Model {self.name} not supported")
|
||||||
|
|
||||||
|
def sanitize_input(self, images: IMAGES) -> Union[List[bytes], np.ndarray]:
|
||||||
|
"""
|
||||||
|
Sanitize the input to the embedding function.
|
||||||
|
"""
|
||||||
|
if isinstance(images, (str, bytes)):
|
||||||
|
images = [images]
|
||||||
|
elif isinstance(images, pa.Array):
|
||||||
|
images = images.to_pylist()
|
||||||
|
elif isinstance(images, pa.ChunkedArray):
|
||||||
|
images = images.combine_chunks().to_pylist()
|
||||||
|
return images
|
||||||
|
|
||||||
|
def generate_text_embeddings(self, text: str, **kwargs) -> np.ndarray:
|
||||||
|
"""
|
||||||
|
Get the embeddings for the given texts
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
texts: list[str] or np.ndarray (of str)
|
||||||
|
The texts to embed
|
||||||
|
input_type: Optional[str]
|
||||||
|
|
||||||
|
truncation: Optional[bool]
|
||||||
|
"""
|
||||||
|
client = VoyageAIEmbeddingFunction._get_client()
|
||||||
|
if self.name in self.text_embedding_models:
|
||||||
|
rs = client.embed(texts=[text], model=self.name, **kwargs)
|
||||||
|
elif self.name in self.multimodal_embedding_models:
|
||||||
|
rs = client.multimodal_embed(inputs=[[text]], model=self.name, **kwargs)
|
||||||
|
else:
|
||||||
|
raise ValueError(
|
||||||
|
f"Model {self.name} not supported to generate text embeddings"
|
||||||
|
)
|
||||||
|
|
||||||
|
return rs.embeddings[0]
|
||||||
|
|
||||||
|
def generate_image_embedding(
|
||||||
|
self, image: "PIL.Image.Image", **kwargs
|
||||||
|
) -> np.ndarray:
|
||||||
|
rs = VoyageAIEmbeddingFunction._get_client().multimodal_embed(
|
||||||
|
inputs=[[image]], model=self.name, **kwargs
|
||||||
|
)
|
||||||
|
return rs.embeddings[0]
|
||||||
|
|
||||||
def compute_query_embeddings(
|
def compute_query_embeddings(
|
||||||
self, query: Union[str, "PIL.Image.Image"], *args, **kwargs
|
self, query: Union[str, "PIL.Image.Image"], *args, **kwargs
|
||||||
) -> List[np.ndarray]:
|
) -> List[np.ndarray]:
|
||||||
@@ -197,52 +144,23 @@ class VoyageAIEmbeddingFunction(EmbeddingFunction):
|
|||||||
----------
|
----------
|
||||||
query : Union[str, PIL.Image.Image]
|
query : Union[str, PIL.Image.Image]
|
||||||
The query to embed. A query can be either text or an image.
|
The query to embed. A query can be either text or an image.
|
||||||
|
|
||||||
Returns
|
|
||||||
-------
|
|
||||||
List[np.array]: the list of embeddings
|
|
||||||
"""
|
"""
|
||||||
client = VoyageAIEmbeddingFunction._get_client()
|
if isinstance(query, str):
|
||||||
if self._is_multimodal_model(self.name):
|
return [self.generate_text_embeddings(query, input_type="query")]
|
||||||
result = client.multimodal_embed(
|
|
||||||
inputs=[[query]], model=self.name, input_type="query", **kwargs
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
result = client.embed(
|
PIL = attempt_import_or_raise("PIL", "pillow")
|
||||||
texts=[query], model=self.name, input_type="query", **kwargs
|
if isinstance(query, PIL.Image.Image):
|
||||||
)
|
return [self.generate_image_embedding(query, input_type="query")]
|
||||||
|
else:
|
||||||
return [result.embeddings[0]]
|
raise TypeError("Only text PIL images supported as query")
|
||||||
|
|
||||||
def compute_source_embeddings(
|
def compute_source_embeddings(
|
||||||
self, inputs: Union[TEXT, IMAGES], *args, **kwargs
|
self, images: IMAGES, *args, **kwargs
|
||||||
) -> List[np.array]:
|
) -> List[np.array]:
|
||||||
"""
|
images = self.sanitize_input(images)
|
||||||
Compute the embeddings for the inputs
|
return [
|
||||||
|
self.generate_image_embedding(img, input_type="document") for img in images
|
||||||
Parameters
|
]
|
||||||
----------
|
|
||||||
inputs : Union[TEXT, IMAGES]
|
|
||||||
The inputs to embed. The input can be either str, bytes, Path (to an image),
|
|
||||||
PIL.Image or list of these.
|
|
||||||
|
|
||||||
Returns
|
|
||||||
-------
|
|
||||||
List[np.array]: the list of embeddings
|
|
||||||
"""
|
|
||||||
client = VoyageAIEmbeddingFunction._get_client()
|
|
||||||
if self._is_multimodal_model(self.name):
|
|
||||||
inputs = sanitize_multimodal_input(inputs)
|
|
||||||
result = client.multimodal_embed(
|
|
||||||
inputs=inputs, model=self.name, input_type="document", **kwargs
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
inputs = sanitize_text_input(inputs)
|
|
||||||
result = client.embed(
|
|
||||||
texts=inputs, model=self.name, input_type="document", **kwargs
|
|
||||||
)
|
|
||||||
|
|
||||||
return result.embeddings
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _get_client():
|
def _get_client():
|
||||||
|
|||||||
@@ -4,14 +4,10 @@
|
|||||||
|
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
from datetime import timedelta
|
|
||||||
from typing import TYPE_CHECKING, List, Optional
|
from typing import TYPE_CHECKING, List, Optional
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from .common import DATA
|
from .common import DATA
|
||||||
from ._lancedb import (
|
|
||||||
MergeInsertResult,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class LanceMergeInsertBuilder(object):
|
class LanceMergeInsertBuilder(object):
|
||||||
@@ -32,7 +28,6 @@ class LanceMergeInsertBuilder(object):
|
|||||||
self._when_not_matched_insert_all = False
|
self._when_not_matched_insert_all = False
|
||||||
self._when_not_matched_by_source_delete = False
|
self._when_not_matched_by_source_delete = False
|
||||||
self._when_not_matched_by_source_condition = None
|
self._when_not_matched_by_source_condition = None
|
||||||
self._timeout = None
|
|
||||||
|
|
||||||
def when_matched_update_all(
|
def when_matched_update_all(
|
||||||
self, *, where: Optional[str] = None
|
self, *, where: Optional[str] = None
|
||||||
@@ -83,8 +78,7 @@ class LanceMergeInsertBuilder(object):
|
|||||||
new_data: DATA,
|
new_data: DATA,
|
||||||
on_bad_vectors: str = "error",
|
on_bad_vectors: str = "error",
|
||||||
fill_value: float = 0.0,
|
fill_value: float = 0.0,
|
||||||
timeout: Optional[timedelta] = None,
|
):
|
||||||
) -> MergeInsertResult:
|
|
||||||
"""
|
"""
|
||||||
Executes the merge insert operation
|
Executes the merge insert operation
|
||||||
|
|
||||||
@@ -101,24 +95,5 @@ class LanceMergeInsertBuilder(object):
|
|||||||
One of "error", "drop", "fill".
|
One of "error", "drop", "fill".
|
||||||
fill_value: float, default 0.
|
fill_value: float, default 0.
|
||||||
The value to use when filling vectors. Only used if on_bad_vectors="fill".
|
The value to use when filling vectors. Only used if on_bad_vectors="fill".
|
||||||
timeout: Optional[timedelta], default None
|
|
||||||
Maximum time to run the operation before cancelling it.
|
|
||||||
|
|
||||||
By default, there is a 30-second timeout that is only enforced after the
|
|
||||||
first attempt. This is to prevent spending too long retrying to resolve
|
|
||||||
conflicts. For example, if a write attempt takes 20 seconds and fails,
|
|
||||||
the second attempt will be cancelled after 10 seconds, hitting the
|
|
||||||
30-second timeout. However, a write that takes one hour and succeeds on the
|
|
||||||
first attempt will not be cancelled.
|
|
||||||
|
|
||||||
When this is set, the timeout is enforced on all attempts, including
|
|
||||||
the first.
|
|
||||||
|
|
||||||
Returns
|
|
||||||
-------
|
|
||||||
MergeInsertResult
|
|
||||||
version: the new version number of the table after doing merge insert.
|
|
||||||
"""
|
"""
|
||||||
if timeout is not None:
|
|
||||||
self._timeout = timeout
|
|
||||||
return self._table._do_merge(self, new_data, on_bad_vectors, fill_value)
|
return self._table._do_merge(self, new_data, on_bad_vectors, fill_value)
|
||||||
|
|||||||
@@ -152,104 +152,6 @@ def Vector(
|
|||||||
return FixedSizeList
|
return FixedSizeList
|
||||||
|
|
||||||
|
|
||||||
def MultiVector(
|
|
||||||
dim: int, value_type: pa.DataType = pa.float32(), nullable: bool = True
|
|
||||||
) -> Type:
|
|
||||||
"""Pydantic MultiVector Type for multi-vector embeddings.
|
|
||||||
|
|
||||||
This type represents a list of vectors, each with the same dimension.
|
|
||||||
Useful for models that produce multiple embeddings per input, like ColPali.
|
|
||||||
|
|
||||||
Parameters
|
|
||||||
----------
|
|
||||||
dim : int
|
|
||||||
The dimension of each vector in the multi-vector.
|
|
||||||
value_type : pyarrow.DataType, optional
|
|
||||||
The value type of the vectors, by default pa.float32()
|
|
||||||
nullable : bool, optional
|
|
||||||
Whether the multi-vector is nullable, by default it is True.
|
|
||||||
|
|
||||||
Examples
|
|
||||||
--------
|
|
||||||
|
|
||||||
>>> import pydantic
|
|
||||||
>>> from lancedb.pydantic import MultiVector
|
|
||||||
...
|
|
||||||
>>> class MyModel(pydantic.BaseModel):
|
|
||||||
... id: int
|
|
||||||
... text: str
|
|
||||||
... embeddings: MultiVector(128) # List of 128-dimensional vectors
|
|
||||||
>>> schema = pydantic_to_schema(MyModel)
|
|
||||||
>>> assert schema == pa.schema([
|
|
||||||
... pa.field("id", pa.int64(), False),
|
|
||||||
... pa.field("text", pa.utf8(), False),
|
|
||||||
... pa.field("embeddings", pa.list_(pa.list_(pa.float32(), 128)))
|
|
||||||
... ])
|
|
||||||
"""
|
|
||||||
|
|
||||||
class MultiVectorList(list, FixedSizeListMixin):
|
|
||||||
def __repr__(self):
|
|
||||||
return f"MultiVector(dim={dim})"
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def nullable() -> bool:
|
|
||||||
return nullable
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def dim() -> int:
|
|
||||||
return dim
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def value_arrow_type() -> pa.DataType:
|
|
||||||
return value_type
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def is_multi_vector() -> bool:
|
|
||||||
return True
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def __get_pydantic_core_schema__(
|
|
||||||
cls, _source_type: Any, _handler: pydantic.GetCoreSchemaHandler
|
|
||||||
) -> CoreSchema:
|
|
||||||
return core_schema.no_info_after_validator_function(
|
|
||||||
cls,
|
|
||||||
core_schema.list_schema(
|
|
||||||
items_schema=core_schema.list_schema(
|
|
||||||
min_length=dim,
|
|
||||||
max_length=dim,
|
|
||||||
items_schema=core_schema.float_schema(),
|
|
||||||
),
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def __get_validators__(cls) -> Generator[Callable, None, None]:
|
|
||||||
yield cls.validate
|
|
||||||
|
|
||||||
# For pydantic v1
|
|
||||||
@classmethod
|
|
||||||
def validate(cls, v):
|
|
||||||
if not isinstance(v, (list, range)):
|
|
||||||
raise TypeError("A list of vectors is needed")
|
|
||||||
for vec in v:
|
|
||||||
if not isinstance(vec, (list, range, np.ndarray)) or len(vec) != dim:
|
|
||||||
raise TypeError(f"Each vector must be a list of {dim} numbers")
|
|
||||||
return cls(v)
|
|
||||||
|
|
||||||
if PYDANTIC_VERSION.major < 2:
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def __modify_schema__(cls, field_schema: Dict[str, Any]):
|
|
||||||
field_schema["items"] = {
|
|
||||||
"type": "array",
|
|
||||||
"items": {"type": "number"},
|
|
||||||
"minItems": dim,
|
|
||||||
"maxItems": dim,
|
|
||||||
}
|
|
||||||
|
|
||||||
return MultiVectorList
|
|
||||||
|
|
||||||
|
|
||||||
def _py_type_to_arrow_type(py_type: Type[Any], field: FieldInfo) -> pa.DataType:
|
def _py_type_to_arrow_type(py_type: Type[Any], field: FieldInfo) -> pa.DataType:
|
||||||
"""Convert a field with native Python type to Arrow data type.
|
"""Convert a field with native Python type to Arrow data type.
|
||||||
|
|
||||||
@@ -304,9 +206,6 @@ def _pydantic_type_to_arrow_type(tp: Any, field: FieldInfo) -> pa.DataType:
|
|||||||
fields = _pydantic_model_to_fields(tp)
|
fields = _pydantic_model_to_fields(tp)
|
||||||
return pa.struct(fields)
|
return pa.struct(fields)
|
||||||
if issubclass(tp, FixedSizeListMixin):
|
if issubclass(tp, FixedSizeListMixin):
|
||||||
if getattr(tp, "is_multi_vector", lambda: False)():
|
|
||||||
return pa.list_(pa.list_(tp.value_arrow_type(), tp.dim()))
|
|
||||||
# For regular Vector
|
|
||||||
return pa.list_(tp.value_arrow_type(), tp.dim())
|
return pa.list_(tp.value_arrow_type(), tp.dim())
|
||||||
return _py_type_to_arrow_type(tp, field)
|
return _py_type_to_arrow_type(tp, field)
|
||||||
|
|
||||||
@@ -415,7 +314,6 @@ class LanceModel(pydantic.BaseModel):
|
|||||||
>>> table.add([
|
>>> table.add([
|
||||||
... TestModel(name="test", vector=[1.0, 2.0])
|
... TestModel(name="test", vector=[1.0, 2.0])
|
||||||
... ])
|
... ])
|
||||||
AddResult(version=2)
|
|
||||||
>>> table.search([0., 0.]).limit(1).to_pydantic(TestModel)
|
>>> table.search([0., 0.]).limit(1).to_pydantic(TestModel)
|
||||||
[TestModel(name='test', vector=FixedSizeList(dim=2))]
|
[TestModel(name='test', vector=FixedSizeList(dim=2))]
|
||||||
"""
|
"""
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -7,16 +7,7 @@ from functools import cached_property
|
|||||||
from typing import Dict, Iterable, List, Optional, Union, Literal
|
from typing import Dict, Iterable, List, Optional, Union, Literal
|
||||||
import warnings
|
import warnings
|
||||||
|
|
||||||
from lancedb._lancedb import (
|
from lancedb._lancedb import IndexConfig
|
||||||
AddColumnsResult,
|
|
||||||
AddResult,
|
|
||||||
AlterColumnsResult,
|
|
||||||
DeleteResult,
|
|
||||||
DropColumnsResult,
|
|
||||||
IndexConfig,
|
|
||||||
MergeResult,
|
|
||||||
UpdateResult,
|
|
||||||
)
|
|
||||||
from lancedb.embeddings.base import EmbeddingFunctionConfig
|
from lancedb.embeddings.base import EmbeddingFunctionConfig
|
||||||
from lancedb.index import FTS, BTree, Bitmap, HnswPq, HnswSq, IvfFlat, IvfPq, LabelList
|
from lancedb.index import FTS, BTree, Bitmap, HnswPq, HnswSq, IvfFlat, IvfPq, LabelList
|
||||||
from lancedb.remote.db import LOOP
|
from lancedb.remote.db import LOOP
|
||||||
@@ -27,7 +18,7 @@ from lancedb.merge import LanceMergeInsertBuilder
|
|||||||
from lancedb.embeddings import EmbeddingFunctionRegistry
|
from lancedb.embeddings import EmbeddingFunctionRegistry
|
||||||
|
|
||||||
from ..query import LanceVectorQueryBuilder, LanceQueryBuilder
|
from ..query import LanceVectorQueryBuilder, LanceQueryBuilder
|
||||||
from ..table import AsyncTable, IndexStatistics, Query, Table, Tags
|
from ..table import AsyncTable, IndexStatistics, Query, Table
|
||||||
|
|
||||||
|
|
||||||
class RemoteTable(Table):
|
class RemoteTable(Table):
|
||||||
@@ -47,6 +38,9 @@ class RemoteTable(Table):
|
|||||||
def __repr__(self) -> str:
|
def __repr__(self) -> str:
|
||||||
return f"RemoteTable({self.db_name}.{self.name})"
|
return f"RemoteTable({self.db_name}.{self.name})"
|
||||||
|
|
||||||
|
def __len__(self) -> int:
|
||||||
|
self.count_rows(None)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def schema(self) -> pa.Schema:
|
def schema(self) -> pa.Schema:
|
||||||
"""The [Arrow Schema](https://arrow.apache.org/docs/python/api/datatypes.html#)
|
"""The [Arrow Schema](https://arrow.apache.org/docs/python/api/datatypes.html#)
|
||||||
@@ -60,10 +54,6 @@ class RemoteTable(Table):
|
|||||||
"""Get the current version of the table"""
|
"""Get the current version of the table"""
|
||||||
return LOOP.run(self._table.version())
|
return LOOP.run(self._table.version())
|
||||||
|
|
||||||
@property
|
|
||||||
def tags(self) -> Tags:
|
|
||||||
return Tags(self._table)
|
|
||||||
|
|
||||||
@cached_property
|
@cached_property
|
||||||
def embedding_functions(self) -> Dict[str, EmbeddingFunctionConfig]:
|
def embedding_functions(self) -> Dict[str, EmbeddingFunctionConfig]:
|
||||||
"""
|
"""
|
||||||
@@ -91,15 +81,12 @@ class RemoteTable(Table):
|
|||||||
"""to_pandas() is not yet supported on LanceDB cloud."""
|
"""to_pandas() is not yet supported on LanceDB cloud."""
|
||||||
return NotImplementedError("to_pandas() is not yet supported on LanceDB cloud.")
|
return NotImplementedError("to_pandas() is not yet supported on LanceDB cloud.")
|
||||||
|
|
||||||
def checkout(self, version: Union[int, str]):
|
def checkout(self, version: int):
|
||||||
return LOOP.run(self._table.checkout(version))
|
return LOOP.run(self._table.checkout(version))
|
||||||
|
|
||||||
def checkout_latest(self):
|
def checkout_latest(self):
|
||||||
return LOOP.run(self._table.checkout_latest())
|
return LOOP.run(self._table.checkout_latest())
|
||||||
|
|
||||||
def restore(self, version: Optional[Union[int, str]] = None):
|
|
||||||
return LOOP.run(self._table.restore(version))
|
|
||||||
|
|
||||||
def list_indices(self) -> Iterable[IndexConfig]:
|
def list_indices(self) -> Iterable[IndexConfig]:
|
||||||
"""List all the indices on the table"""
|
"""List all the indices on the table"""
|
||||||
return LOOP.run(self._table.list_indices())
|
return LOOP.run(self._table.list_indices())
|
||||||
@@ -114,7 +101,6 @@ class RemoteTable(Table):
|
|||||||
index_type: Literal["BTREE", "BITMAP", "LABEL_LIST", "scalar"] = "scalar",
|
index_type: Literal["BTREE", "BITMAP", "LABEL_LIST", "scalar"] = "scalar",
|
||||||
*,
|
*,
|
||||||
replace: bool = False,
|
replace: bool = False,
|
||||||
wait_timeout: timedelta = None,
|
|
||||||
):
|
):
|
||||||
"""Creates a scalar index
|
"""Creates a scalar index
|
||||||
Parameters
|
Parameters
|
||||||
@@ -137,18 +123,13 @@ class RemoteTable(Table):
|
|||||||
else:
|
else:
|
||||||
raise ValueError(f"Unknown index type: {index_type}")
|
raise ValueError(f"Unknown index type: {index_type}")
|
||||||
|
|
||||||
LOOP.run(
|
LOOP.run(self._table.create_index(column, config=config, replace=replace))
|
||||||
self._table.create_index(
|
|
||||||
column, config=config, replace=replace, wait_timeout=wait_timeout
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
def create_fts_index(
|
def create_fts_index(
|
||||||
self,
|
self,
|
||||||
column: str,
|
column: str,
|
||||||
*,
|
*,
|
||||||
replace: bool = False,
|
replace: bool = False,
|
||||||
wait_timeout: timedelta = None,
|
|
||||||
with_position: bool = True,
|
with_position: bool = True,
|
||||||
# tokenizer configs:
|
# tokenizer configs:
|
||||||
base_tokenizer: str = "simple",
|
base_tokenizer: str = "simple",
|
||||||
@@ -169,11 +150,7 @@ class RemoteTable(Table):
|
|||||||
remove_stop_words=remove_stop_words,
|
remove_stop_words=remove_stop_words,
|
||||||
ascii_folding=ascii_folding,
|
ascii_folding=ascii_folding,
|
||||||
)
|
)
|
||||||
LOOP.run(
|
LOOP.run(self._table.create_index(column, config=config, replace=replace))
|
||||||
self._table.create_index(
|
|
||||||
column, config=config, replace=replace, wait_timeout=wait_timeout
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
def create_index(
|
def create_index(
|
||||||
self,
|
self,
|
||||||
@@ -185,7 +162,6 @@ class RemoteTable(Table):
|
|||||||
replace: Optional[bool] = None,
|
replace: Optional[bool] = None,
|
||||||
accelerator: Optional[str] = None,
|
accelerator: Optional[str] = None,
|
||||||
index_type="vector",
|
index_type="vector",
|
||||||
wait_timeout: Optional[timedelta] = None,
|
|
||||||
):
|
):
|
||||||
"""Create an index on the table.
|
"""Create an index on the table.
|
||||||
Currently, the only parameters that matter are
|
Currently, the only parameters that matter are
|
||||||
@@ -257,11 +233,7 @@ class RemoteTable(Table):
|
|||||||
" 'IVF_FLAT', 'IVF_PQ', 'IVF_HNSW_PQ', 'IVF_HNSW_SQ'"
|
" 'IVF_FLAT', 'IVF_PQ', 'IVF_HNSW_PQ', 'IVF_HNSW_SQ'"
|
||||||
)
|
)
|
||||||
|
|
||||||
LOOP.run(
|
LOOP.run(self._table.create_index(vector_column_name, config=config))
|
||||||
self._table.create_index(
|
|
||||||
vector_column_name, config=config, wait_timeout=wait_timeout
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
def add(
|
def add(
|
||||||
self,
|
self,
|
||||||
@@ -269,7 +241,7 @@ class RemoteTable(Table):
|
|||||||
mode: str = "append",
|
mode: str = "append",
|
||||||
on_bad_vectors: str = "error",
|
on_bad_vectors: str = "error",
|
||||||
fill_value: float = 0.0,
|
fill_value: float = 0.0,
|
||||||
) -> AddResult:
|
) -> int:
|
||||||
"""Add more data to the [Table](Table). It has the same API signature as
|
"""Add more data to the [Table](Table). It has the same API signature as
|
||||||
the OSS version.
|
the OSS version.
|
||||||
|
|
||||||
@@ -292,12 +264,8 @@ class RemoteTable(Table):
|
|||||||
fill_value: float, default 0.
|
fill_value: float, default 0.
|
||||||
The value to use when filling vectors. Only used if on_bad_vectors="fill".
|
The value to use when filling vectors. Only used if on_bad_vectors="fill".
|
||||||
|
|
||||||
Returns
|
|
||||||
-------
|
|
||||||
AddResult
|
|
||||||
An object containing the new version number of the table after adding data.
|
|
||||||
"""
|
"""
|
||||||
return LOOP.run(
|
LOOP.run(
|
||||||
self._table.add(
|
self._table.add(
|
||||||
data, mode=mode, on_bad_vectors=on_bad_vectors, fill_value=fill_value
|
data, mode=mode, on_bad_vectors=on_bad_vectors, fill_value=fill_value
|
||||||
)
|
)
|
||||||
@@ -384,15 +352,9 @@ class RemoteTable(Table):
|
|||||||
)
|
)
|
||||||
|
|
||||||
def _execute_query(
|
def _execute_query(
|
||||||
self,
|
self, query: Query, batch_size: Optional[int] = None
|
||||||
query: Query,
|
|
||||||
*,
|
|
||||||
batch_size: Optional[int] = None,
|
|
||||||
timeout: Optional[timedelta] = None,
|
|
||||||
) -> pa.RecordBatchReader:
|
) -> pa.RecordBatchReader:
|
||||||
async_iter = LOOP.run(
|
async_iter = LOOP.run(self._table._execute_query(query, batch_size=batch_size))
|
||||||
self._table._execute_query(query, batch_size=batch_size, timeout=timeout)
|
|
||||||
)
|
|
||||||
|
|
||||||
def iter_sync():
|
def iter_sync():
|
||||||
try:
|
try:
|
||||||
@@ -403,12 +365,6 @@ class RemoteTable(Table):
|
|||||||
|
|
||||||
return pa.RecordBatchReader.from_batches(async_iter.schema, iter_sync())
|
return pa.RecordBatchReader.from_batches(async_iter.schema, iter_sync())
|
||||||
|
|
||||||
def _explain_plan(self, query: Query, verbose: Optional[bool] = False) -> str:
|
|
||||||
return LOOP.run(self._table._explain_plan(query, verbose))
|
|
||||||
|
|
||||||
def _analyze_plan(self, query: Query) -> str:
|
|
||||||
return LOOP.run(self._table._analyze_plan(query))
|
|
||||||
|
|
||||||
def merge_insert(self, on: Union[str, Iterable[str]]) -> LanceMergeInsertBuilder:
|
def merge_insert(self, on: Union[str, Iterable[str]]) -> LanceMergeInsertBuilder:
|
||||||
"""Returns a [`LanceMergeInsertBuilder`][lancedb.merge.LanceMergeInsertBuilder]
|
"""Returns a [`LanceMergeInsertBuilder`][lancedb.merge.LanceMergeInsertBuilder]
|
||||||
that can be used to create a "merge insert" operation.
|
that can be used to create a "merge insert" operation.
|
||||||
@@ -423,12 +379,10 @@ class RemoteTable(Table):
|
|||||||
new_data: DATA,
|
new_data: DATA,
|
||||||
on_bad_vectors: str,
|
on_bad_vectors: str,
|
||||||
fill_value: float,
|
fill_value: float,
|
||||||
) -> MergeResult:
|
):
|
||||||
return LOOP.run(
|
LOOP.run(self._table._do_merge(merge, new_data, on_bad_vectors, fill_value))
|
||||||
self._table._do_merge(merge, new_data, on_bad_vectors, fill_value)
|
|
||||||
)
|
|
||||||
|
|
||||||
def delete(self, predicate: str) -> DeleteResult:
|
def delete(self, predicate: str):
|
||||||
"""Delete rows from the table.
|
"""Delete rows from the table.
|
||||||
|
|
||||||
This can be used to delete a single row, many rows, all rows, or
|
This can be used to delete a single row, many rows, all rows, or
|
||||||
@@ -443,11 +397,6 @@ class RemoteTable(Table):
|
|||||||
|
|
||||||
The filter must not be empty, or it will error.
|
The filter must not be empty, or it will error.
|
||||||
|
|
||||||
Returns
|
|
||||||
-------
|
|
||||||
DeleteResult
|
|
||||||
An object containing the new version number of the table after deletion.
|
|
||||||
|
|
||||||
Examples
|
Examples
|
||||||
--------
|
--------
|
||||||
>>> import lancedb
|
>>> import lancedb
|
||||||
@@ -480,7 +429,7 @@ class RemoteTable(Table):
|
|||||||
x vector _distance # doctest: +SKIP
|
x vector _distance # doctest: +SKIP
|
||||||
0 2 [3.0, 4.0] 85.0 # doctest: +SKIP
|
0 2 [3.0, 4.0] 85.0 # doctest: +SKIP
|
||||||
"""
|
"""
|
||||||
return LOOP.run(self._table.delete(predicate))
|
LOOP.run(self._table.delete(predicate))
|
||||||
|
|
||||||
def update(
|
def update(
|
||||||
self,
|
self,
|
||||||
@@ -488,7 +437,7 @@ class RemoteTable(Table):
|
|||||||
values: Optional[dict] = None,
|
values: Optional[dict] = None,
|
||||||
*,
|
*,
|
||||||
values_sql: Optional[Dict[str, str]] = None,
|
values_sql: Optional[Dict[str, str]] = None,
|
||||||
) -> UpdateResult:
|
):
|
||||||
"""
|
"""
|
||||||
This can be used to update zero to all rows depending on how many
|
This can be used to update zero to all rows depending on how many
|
||||||
rows match the where clause.
|
rows match the where clause.
|
||||||
@@ -506,12 +455,6 @@ class RemoteTable(Table):
|
|||||||
reference existing columns. For example, {"x": "x + 1"} will increment
|
reference existing columns. For example, {"x": "x + 1"} will increment
|
||||||
the x column by 1.
|
the x column by 1.
|
||||||
|
|
||||||
Returns
|
|
||||||
-------
|
|
||||||
UpdateResult
|
|
||||||
- rows_updated: The number of rows that were updated
|
|
||||||
- version: The new version number of the table after the update
|
|
||||||
|
|
||||||
Examples
|
Examples
|
||||||
--------
|
--------
|
||||||
>>> import lancedb
|
>>> import lancedb
|
||||||
@@ -536,7 +479,7 @@ class RemoteTable(Table):
|
|||||||
2 2 [10.0, 10.0] # doctest: +SKIP
|
2 2 [10.0, 10.0] # doctest: +SKIP
|
||||||
|
|
||||||
"""
|
"""
|
||||||
return LOOP.run(
|
LOOP.run(
|
||||||
self._table.update(where=where, updates=values, updates_sql=values_sql)
|
self._table.update(where=where, updates=values, updates_sql=values_sql)
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -584,28 +527,18 @@ class RemoteTable(Table):
|
|||||||
def count_rows(self, filter: Optional[str] = None) -> int:
|
def count_rows(self, filter: Optional[str] = None) -> int:
|
||||||
return LOOP.run(self._table.count_rows(filter))
|
return LOOP.run(self._table.count_rows(filter))
|
||||||
|
|
||||||
def add_columns(self, transforms: Dict[str, str]) -> AddColumnsResult:
|
def add_columns(self, transforms: Dict[str, str]):
|
||||||
return LOOP.run(self._table.add_columns(transforms))
|
return LOOP.run(self._table.add_columns(transforms))
|
||||||
|
|
||||||
def alter_columns(
|
def alter_columns(self, *alterations: Iterable[Dict[str, str]]):
|
||||||
self, *alterations: Iterable[Dict[str, str]]
|
|
||||||
) -> AlterColumnsResult:
|
|
||||||
return LOOP.run(self._table.alter_columns(*alterations))
|
return LOOP.run(self._table.alter_columns(*alterations))
|
||||||
|
|
||||||
def drop_columns(self, columns: Iterable[str]) -> DropColumnsResult:
|
def drop_columns(self, columns: Iterable[str]):
|
||||||
return LOOP.run(self._table.drop_columns(columns))
|
return LOOP.run(self._table.drop_columns(columns))
|
||||||
|
|
||||||
def drop_index(self, index_name: str):
|
def drop_index(self, index_name: str):
|
||||||
return LOOP.run(self._table.drop_index(index_name))
|
return LOOP.run(self._table.drop_index(index_name))
|
||||||
|
|
||||||
def wait_for_index(
|
|
||||||
self, index_names: Iterable[str], timeout: timedelta = timedelta(seconds=300)
|
|
||||||
):
|
|
||||||
return LOOP.run(self._table.wait_for_index(index_names, timeout))
|
|
||||||
|
|
||||||
def stats(self):
|
|
||||||
return LOOP.run(self._table.stats())
|
|
||||||
|
|
||||||
def uses_v2_manifest_paths(self) -> bool:
|
def uses_v2_manifest_paths(self) -> bool:
|
||||||
raise NotImplementedError(
|
raise NotImplementedError(
|
||||||
"uses_v2_manifest_paths() is not supported on the LanceDB Cloud"
|
"uses_v2_manifest_paths() is not supported on the LanceDB Cloud"
|
||||||
|
|||||||
@@ -47,9 +47,6 @@ class AnswerdotaiRerankers(Reranker):
|
|||||||
)
|
)
|
||||||
|
|
||||||
def _rerank(self, result_set: pa.Table, query: str):
|
def _rerank(self, result_set: pa.Table, query: str):
|
||||||
result_set = self._handle_empty_results(result_set)
|
|
||||||
if len(result_set) == 0:
|
|
||||||
return result_set
|
|
||||||
docs = result_set[self.column].to_pylist()
|
docs = result_set[self.column].to_pylist()
|
||||||
doc_ids = list(range(len(docs)))
|
doc_ids = list(range(len(docs)))
|
||||||
result = self.reranker.rank(query, docs, doc_ids=doc_ids)
|
result = self.reranker.rank(query, docs, doc_ids=doc_ids)
|
||||||
@@ -86,6 +83,7 @@ class AnswerdotaiRerankers(Reranker):
|
|||||||
vector_results = self._rerank(vector_results, query)
|
vector_results = self._rerank(vector_results, query)
|
||||||
if self.score == "relevance":
|
if self.score == "relevance":
|
||||||
vector_results = vector_results.drop_columns(["_distance"])
|
vector_results = vector_results.drop_columns(["_distance"])
|
||||||
|
|
||||||
vector_results = vector_results.sort_by([("_relevance_score", "descending")])
|
vector_results = vector_results.sort_by([("_relevance_score", "descending")])
|
||||||
return vector_results
|
return vector_results
|
||||||
|
|
||||||
@@ -93,5 +91,7 @@ class AnswerdotaiRerankers(Reranker):
|
|||||||
fts_results = self._rerank(fts_results, query)
|
fts_results = self._rerank(fts_results, query)
|
||||||
if self.score == "relevance":
|
if self.score == "relevance":
|
||||||
fts_results = fts_results.drop_columns(["_score"])
|
fts_results = fts_results.drop_columns(["_score"])
|
||||||
|
|
||||||
fts_results = fts_results.sort_by([("_relevance_score", "descending")])
|
fts_results = fts_results.sort_by([("_relevance_score", "descending")])
|
||||||
|
|
||||||
return fts_results
|
return fts_results
|
||||||
|
|||||||
@@ -65,16 +65,6 @@ class Reranker(ABC):
|
|||||||
f"{self.__class__.__name__} does not implement rerank_vector"
|
f"{self.__class__.__name__} does not implement rerank_vector"
|
||||||
)
|
)
|
||||||
|
|
||||||
def _handle_empty_results(self, results: pa.Table):
|
|
||||||
"""
|
|
||||||
Helper method to handle empty FTS results consistently
|
|
||||||
"""
|
|
||||||
if len(results) > 0:
|
|
||||||
return results
|
|
||||||
return results.append_column(
|
|
||||||
"_relevance_score", pa.array([], type=pa.float32())
|
|
||||||
)
|
|
||||||
|
|
||||||
def rerank_fts(
|
def rerank_fts(
|
||||||
self,
|
self,
|
||||||
query: str,
|
query: str,
|
||||||
|
|||||||
@@ -62,9 +62,6 @@ class CohereReranker(Reranker):
|
|||||||
return cohere.Client(os.environ.get("COHERE_API_KEY") or self.api_key)
|
return cohere.Client(os.environ.get("COHERE_API_KEY") or self.api_key)
|
||||||
|
|
||||||
def _rerank(self, result_set: pa.Table, query: str):
|
def _rerank(self, result_set: pa.Table, query: str):
|
||||||
result_set = self._handle_empty_results(result_set)
|
|
||||||
if len(result_set) == 0:
|
|
||||||
return result_set
|
|
||||||
docs = result_set[self.column].to_pylist()
|
docs = result_set[self.column].to_pylist()
|
||||||
response = self._client.rerank(
|
response = self._client.rerank(
|
||||||
query=query,
|
query=query,
|
||||||
@@ -102,14 +99,24 @@ class CohereReranker(Reranker):
|
|||||||
)
|
)
|
||||||
return combined_results
|
return combined_results
|
||||||
|
|
||||||
def rerank_vector(self, query: str, vector_results: pa.Table):
|
def rerank_vector(
|
||||||
vector_results = self._rerank(vector_results, query)
|
self,
|
||||||
|
query: str,
|
||||||
|
vector_results: pa.Table,
|
||||||
|
):
|
||||||
|
result_set = self._rerank(vector_results, query)
|
||||||
if self.score == "relevance":
|
if self.score == "relevance":
|
||||||
vector_results = vector_results.drop_columns(["_distance"])
|
result_set = result_set.drop_columns(["_distance"])
|
||||||
return vector_results
|
|
||||||
|
|
||||||
def rerank_fts(self, query: str, fts_results: pa.Table):
|
return result_set
|
||||||
fts_results = self._rerank(fts_results, query)
|
|
||||||
|
def rerank_fts(
|
||||||
|
self,
|
||||||
|
query: str,
|
||||||
|
fts_results: pa.Table,
|
||||||
|
):
|
||||||
|
result_set = self._rerank(fts_results, query)
|
||||||
if self.score == "relevance":
|
if self.score == "relevance":
|
||||||
fts_results = fts_results.drop_columns(["_score"])
|
result_set = result_set.drop_columns(["_score"])
|
||||||
return fts_results
|
|
||||||
|
return result_set
|
||||||
|
|||||||
@@ -63,9 +63,6 @@ class CrossEncoderReranker(Reranker):
|
|||||||
return cross_encoder
|
return cross_encoder
|
||||||
|
|
||||||
def _rerank(self, result_set: pa.Table, query: str):
|
def _rerank(self, result_set: pa.Table, query: str):
|
||||||
result_set = self._handle_empty_results(result_set)
|
|
||||||
if len(result_set) == 0:
|
|
||||||
return result_set
|
|
||||||
passages = result_set[self.column].to_pylist()
|
passages = result_set[self.column].to_pylist()
|
||||||
cross_inp = [[query, passage] for passage in passages]
|
cross_inp = [[query, passage] for passage in passages]
|
||||||
cross_scores = self.model.predict(cross_inp)
|
cross_scores = self.model.predict(cross_inp)
|
||||||
@@ -96,7 +93,11 @@ class CrossEncoderReranker(Reranker):
|
|||||||
|
|
||||||
return combined_results
|
return combined_results
|
||||||
|
|
||||||
def rerank_vector(self, query: str, vector_results: pa.Table):
|
def rerank_vector(
|
||||||
|
self,
|
||||||
|
query: str,
|
||||||
|
vector_results: pa.Table,
|
||||||
|
):
|
||||||
vector_results = self._rerank(vector_results, query)
|
vector_results = self._rerank(vector_results, query)
|
||||||
if self.score == "relevance":
|
if self.score == "relevance":
|
||||||
vector_results = vector_results.drop_columns(["_distance"])
|
vector_results = vector_results.drop_columns(["_distance"])
|
||||||
@@ -104,7 +105,11 @@ class CrossEncoderReranker(Reranker):
|
|||||||
vector_results = vector_results.sort_by([("_relevance_score", "descending")])
|
vector_results = vector_results.sort_by([("_relevance_score", "descending")])
|
||||||
return vector_results
|
return vector_results
|
||||||
|
|
||||||
def rerank_fts(self, query: str, fts_results: pa.Table):
|
def rerank_fts(
|
||||||
|
self,
|
||||||
|
query: str,
|
||||||
|
fts_results: pa.Table,
|
||||||
|
):
|
||||||
fts_results = self._rerank(fts_results, query)
|
fts_results = self._rerank(fts_results, query)
|
||||||
if self.score == "relevance":
|
if self.score == "relevance":
|
||||||
fts_results = fts_results.drop_columns(["_score"])
|
fts_results = fts_results.drop_columns(["_score"])
|
||||||
|
|||||||
@@ -62,9 +62,6 @@ class JinaReranker(Reranker):
|
|||||||
return self._session
|
return self._session
|
||||||
|
|
||||||
def _rerank(self, result_set: pa.Table, query: str):
|
def _rerank(self, result_set: pa.Table, query: str):
|
||||||
result_set = self._handle_empty_results(result_set)
|
|
||||||
if len(result_set) == 0:
|
|
||||||
return result_set
|
|
||||||
docs = result_set[self.column].to_pylist()
|
docs = result_set[self.column].to_pylist()
|
||||||
response = self._client.post( # type: ignore
|
response = self._client.post( # type: ignore
|
||||||
API_URL,
|
API_URL,
|
||||||
@@ -107,14 +104,24 @@ class JinaReranker(Reranker):
|
|||||||
)
|
)
|
||||||
return combined_results
|
return combined_results
|
||||||
|
|
||||||
def rerank_vector(self, query: str, vector_results: pa.Table):
|
def rerank_vector(
|
||||||
vector_results = self._rerank(vector_results, query)
|
self,
|
||||||
|
query: str,
|
||||||
|
vector_results: pa.Table,
|
||||||
|
):
|
||||||
|
result_set = self._rerank(vector_results, query)
|
||||||
if self.score == "relevance":
|
if self.score == "relevance":
|
||||||
vector_results = vector_results.drop_columns(["_distance"])
|
result_set = result_set.drop_columns(["_distance"])
|
||||||
return vector_results
|
|
||||||
|
|
||||||
def rerank_fts(self, query: str, fts_results: pa.Table):
|
return result_set
|
||||||
fts_results = self._rerank(fts_results, query)
|
|
||||||
|
def rerank_fts(
|
||||||
|
self,
|
||||||
|
query: str,
|
||||||
|
fts_results: pa.Table,
|
||||||
|
):
|
||||||
|
result_set = self._rerank(fts_results, query)
|
||||||
if self.score == "relevance":
|
if self.score == "relevance":
|
||||||
fts_results = fts_results.drop_columns(["_score"])
|
result_set = result_set.drop_columns(["_score"])
|
||||||
return fts_results
|
|
||||||
|
return result_set
|
||||||
|
|||||||
@@ -44,9 +44,6 @@ class OpenaiReranker(Reranker):
|
|||||||
self.api_key = api_key
|
self.api_key = api_key
|
||||||
|
|
||||||
def _rerank(self, result_set: pa.Table, query: str):
|
def _rerank(self, result_set: pa.Table, query: str):
|
||||||
result_set = self._handle_empty_results(result_set)
|
|
||||||
if len(result_set) == 0:
|
|
||||||
return result_set
|
|
||||||
docs = result_set[self.column].to_pylist()
|
docs = result_set[self.column].to_pylist()
|
||||||
response = self._client.chat.completions.create(
|
response = self._client.chat.completions.create(
|
||||||
model=self.model_name,
|
model=self.model_name,
|
||||||
@@ -107,14 +104,18 @@ class OpenaiReranker(Reranker):
|
|||||||
vector_results = self._rerank(vector_results, query)
|
vector_results = self._rerank(vector_results, query)
|
||||||
if self.score == "relevance":
|
if self.score == "relevance":
|
||||||
vector_results = vector_results.drop_columns(["_distance"])
|
vector_results = vector_results.drop_columns(["_distance"])
|
||||||
|
|
||||||
vector_results = vector_results.sort_by([("_relevance_score", "descending")])
|
vector_results = vector_results.sort_by([("_relevance_score", "descending")])
|
||||||
|
|
||||||
return vector_results
|
return vector_results
|
||||||
|
|
||||||
def rerank_fts(self, query: str, fts_results: pa.Table):
|
def rerank_fts(self, query: str, fts_results: pa.Table):
|
||||||
fts_results = self._rerank(fts_results, query)
|
fts_results = self._rerank(fts_results, query)
|
||||||
if self.score == "relevance":
|
if self.score == "relevance":
|
||||||
fts_results = fts_results.drop_columns(["_score"])
|
fts_results = fts_results.drop_columns(["_score"])
|
||||||
|
|
||||||
fts_results = fts_results.sort_by([("_relevance_score", "descending")])
|
fts_results = fts_results.sort_by([("_relevance_score", "descending")])
|
||||||
|
|
||||||
return fts_results
|
return fts_results
|
||||||
|
|
||||||
@cached_property
|
@cached_property
|
||||||
|
|||||||
@@ -63,9 +63,6 @@ class VoyageAIReranker(Reranker):
|
|||||||
)
|
)
|
||||||
|
|
||||||
def _rerank(self, result_set: pa.Table, query: str):
|
def _rerank(self, result_set: pa.Table, query: str):
|
||||||
result_set = self._handle_empty_results(result_set)
|
|
||||||
if len(result_set) == 0:
|
|
||||||
return result_set
|
|
||||||
docs = result_set[self.column].to_pylist()
|
docs = result_set[self.column].to_pylist()
|
||||||
response = self._client.rerank(
|
response = self._client.rerank(
|
||||||
query=query,
|
query=query,
|
||||||
@@ -104,14 +101,24 @@ class VoyageAIReranker(Reranker):
|
|||||||
)
|
)
|
||||||
return combined_results
|
return combined_results
|
||||||
|
|
||||||
def rerank_vector(self, query: str, vector_results: pa.Table):
|
def rerank_vector(
|
||||||
vector_results = self._rerank(vector_results, query)
|
self,
|
||||||
|
query: str,
|
||||||
|
vector_results: pa.Table,
|
||||||
|
):
|
||||||
|
result_set = self._rerank(vector_results, query)
|
||||||
if self.score == "relevance":
|
if self.score == "relevance":
|
||||||
vector_results = vector_results.drop_columns(["_distance"])
|
result_set = result_set.drop_columns(["_distance"])
|
||||||
return vector_results
|
|
||||||
|
|
||||||
def rerank_fts(self, query: str, fts_results: pa.Table):
|
return result_set
|
||||||
fts_results = self._rerank(fts_results, query)
|
|
||||||
|
def rerank_fts(
|
||||||
|
self,
|
||||||
|
query: str,
|
||||||
|
fts_results: pa.Table,
|
||||||
|
):
|
||||||
|
result_set = self._rerank(fts_results, query)
|
||||||
if self.score == "relevance":
|
if self.score == "relevance":
|
||||||
fts_results = fts_results.drop_columns(["_score"])
|
result_set = result_set.drop_columns(["_score"])
|
||||||
return fts_results
|
|
||||||
|
return result_set
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -253,14 +253,9 @@ def infer_vector_column_name(
|
|||||||
query: Optional[Any], # inferred later in query builder
|
query: Optional[Any], # inferred later in query builder
|
||||||
vector_column_name: Optional[str],
|
vector_column_name: Optional[str],
|
||||||
):
|
):
|
||||||
if vector_column_name is not None:
|
if (vector_column_name is None and query is not None and query_type != "fts") or (
|
||||||
return vector_column_name
|
vector_column_name is None and query_type == "hybrid"
|
||||||
|
):
|
||||||
if query_type == "fts":
|
|
||||||
# FTS queries do not require a vector column
|
|
||||||
return None
|
|
||||||
|
|
||||||
if query is not None or query_type == "hybrid":
|
|
||||||
try:
|
try:
|
||||||
vector_column_name = inf_vector_column_query(schema)
|
vector_column_name = inf_vector_column_query(schema)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
|||||||
@@ -562,7 +562,7 @@ async def test_table_async():
|
|||||||
async_db = await lancedb.connect_async(uri, read_consistency_interval=timedelta(0))
|
async_db = await lancedb.connect_async(uri, read_consistency_interval=timedelta(0))
|
||||||
async_tbl = await async_db.open_table("test_table_async")
|
async_tbl = await async_db.open_table("test_table_async")
|
||||||
# --8<-- [end:table_async_strong_consistency]
|
# --8<-- [end:table_async_strong_consistency]
|
||||||
# --8<-- [start:table_async_eventual_consistency]
|
# --8<-- [start:table_async_ventual_consistency]
|
||||||
uri = "data/sample-lancedb"
|
uri = "data/sample-lancedb"
|
||||||
async_db = await lancedb.connect_async(
|
async_db = await lancedb.connect_async(
|
||||||
uri, read_consistency_interval=timedelta(seconds=5)
|
uri, read_consistency_interval=timedelta(seconds=5)
|
||||||
|
|||||||
@@ -18,19 +18,15 @@ def test_upsert(mem_db):
|
|||||||
{"id": 1, "name": "Bobby"},
|
{"id": 1, "name": "Bobby"},
|
||||||
{"id": 2, "name": "Charlie"},
|
{"id": 2, "name": "Charlie"},
|
||||||
]
|
]
|
||||||
res = (
|
(
|
||||||
table.merge_insert("id")
|
table.merge_insert("id")
|
||||||
.when_matched_update_all()
|
.when_matched_update_all()
|
||||||
.when_not_matched_insert_all()
|
.when_not_matched_insert_all()
|
||||||
.execute(new_users)
|
.execute(new_users)
|
||||||
)
|
)
|
||||||
table.count_rows() # 3
|
table.count_rows() # 3
|
||||||
res # {'num_inserted_rows': 1, 'num_updated_rows': 1, 'num_deleted_rows': 0}
|
|
||||||
# --8<-- [end:upsert_basic]
|
# --8<-- [end:upsert_basic]
|
||||||
assert table.count_rows() == 3
|
assert table.count_rows() == 3
|
||||||
assert res.num_inserted_rows == 1
|
|
||||||
assert res.num_deleted_rows == 0
|
|
||||||
assert res.num_updated_rows == 1
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
@@ -48,22 +44,15 @@ async def test_upsert_async(mem_db_async):
|
|||||||
{"id": 1, "name": "Bobby"},
|
{"id": 1, "name": "Bobby"},
|
||||||
{"id": 2, "name": "Charlie"},
|
{"id": 2, "name": "Charlie"},
|
||||||
]
|
]
|
||||||
res = await (
|
await (
|
||||||
table.merge_insert("id")
|
table.merge_insert("id")
|
||||||
.when_matched_update_all()
|
.when_matched_update_all()
|
||||||
.when_not_matched_insert_all()
|
.when_not_matched_insert_all()
|
||||||
.execute(new_users)
|
.execute(new_users)
|
||||||
)
|
)
|
||||||
await table.count_rows() # 3
|
await table.count_rows() # 3
|
||||||
res
|
|
||||||
# MergeResult(version=2, num_updated_rows=1,
|
|
||||||
# num_inserted_rows=1, num_deleted_rows=0)
|
|
||||||
# --8<-- [end:upsert_basic_async]
|
# --8<-- [end:upsert_basic_async]
|
||||||
assert await table.count_rows() == 3
|
assert await table.count_rows() == 3
|
||||||
assert res.version == 2
|
|
||||||
assert res.num_inserted_rows == 1
|
|
||||||
assert res.num_deleted_rows == 0
|
|
||||||
assert res.num_updated_rows == 1
|
|
||||||
|
|
||||||
|
|
||||||
def test_insert_if_not_exists(mem_db):
|
def test_insert_if_not_exists(mem_db):
|
||||||
@@ -80,19 +69,10 @@ def test_insert_if_not_exists(mem_db):
|
|||||||
{"domain": "google.com", "name": "Google"},
|
{"domain": "google.com", "name": "Google"},
|
||||||
{"domain": "facebook.com", "name": "Facebook"},
|
{"domain": "facebook.com", "name": "Facebook"},
|
||||||
]
|
]
|
||||||
res = (
|
(table.merge_insert("domain").when_not_matched_insert_all().execute(new_domains))
|
||||||
table.merge_insert("domain").when_not_matched_insert_all().execute(new_domains)
|
|
||||||
)
|
|
||||||
table.count_rows() # 3
|
table.count_rows() # 3
|
||||||
res
|
|
||||||
# MergeResult(version=2, num_updated_rows=0,
|
|
||||||
# num_inserted_rows=1, num_deleted_rows=0)
|
|
||||||
# --8<-- [end:insert_if_not_exists]
|
# --8<-- [end:insert_if_not_exists]
|
||||||
assert table.count_rows() == 3
|
assert table.count_rows() == 3
|
||||||
assert res.version == 2
|
|
||||||
assert res.num_inserted_rows == 1
|
|
||||||
assert res.num_deleted_rows == 0
|
|
||||||
assert res.num_updated_rows == 0
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
@@ -110,19 +90,12 @@ async def test_insert_if_not_exists_async(mem_db_async):
|
|||||||
{"domain": "google.com", "name": "Google"},
|
{"domain": "google.com", "name": "Google"},
|
||||||
{"domain": "facebook.com", "name": "Facebook"},
|
{"domain": "facebook.com", "name": "Facebook"},
|
||||||
]
|
]
|
||||||
res = await (
|
await (
|
||||||
table.merge_insert("domain").when_not_matched_insert_all().execute(new_domains)
|
table.merge_insert("domain").when_not_matched_insert_all().execute(new_domains)
|
||||||
)
|
)
|
||||||
await table.count_rows() # 3
|
await table.count_rows() # 3
|
||||||
res
|
# --8<-- [end:insert_if_not_exists_async]
|
||||||
# MergeResult(version=2, num_updated_rows=0,
|
|
||||||
# num_inserted_rows=1, num_deleted_rows=0)
|
|
||||||
# --8<-- [end:insert_if_not_exists]
|
|
||||||
assert await table.count_rows() == 3
|
assert await table.count_rows() == 3
|
||||||
assert res.version == 2
|
|
||||||
assert res.num_inserted_rows == 1
|
|
||||||
assert res.num_deleted_rows == 0
|
|
||||||
assert res.num_updated_rows == 0
|
|
||||||
|
|
||||||
|
|
||||||
def test_replace_range(mem_db):
|
def test_replace_range(mem_db):
|
||||||
@@ -140,7 +113,7 @@ def test_replace_range(mem_db):
|
|||||||
new_chunks = [
|
new_chunks = [
|
||||||
{"doc_id": 1, "chunk_id": 0, "text": "Baz"},
|
{"doc_id": 1, "chunk_id": 0, "text": "Baz"},
|
||||||
]
|
]
|
||||||
res = (
|
(
|
||||||
table.merge_insert(["doc_id", "chunk_id"])
|
table.merge_insert(["doc_id", "chunk_id"])
|
||||||
.when_matched_update_all()
|
.when_matched_update_all()
|
||||||
.when_not_matched_insert_all()
|
.when_not_matched_insert_all()
|
||||||
@@ -148,15 +121,8 @@ def test_replace_range(mem_db):
|
|||||||
.execute(new_chunks)
|
.execute(new_chunks)
|
||||||
)
|
)
|
||||||
table.count_rows("doc_id = 1") # 1
|
table.count_rows("doc_id = 1") # 1
|
||||||
res
|
# --8<-- [end:replace_range]
|
||||||
# MergeResult(version=2, num_updated_rows=1,
|
|
||||||
# num_inserted_rows=0, num_deleted_rows=1)
|
|
||||||
# --8<-- [end:insert_if_not_exists]
|
|
||||||
assert table.count_rows("doc_id = 1") == 1
|
assert table.count_rows("doc_id = 1") == 1
|
||||||
assert res.version == 2
|
|
||||||
assert res.num_inserted_rows == 0
|
|
||||||
assert res.num_deleted_rows == 1
|
|
||||||
assert res.num_updated_rows == 1
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
@@ -175,7 +141,7 @@ async def test_replace_range_async(mem_db_async):
|
|||||||
new_chunks = [
|
new_chunks = [
|
||||||
{"doc_id": 1, "chunk_id": 0, "text": "Baz"},
|
{"doc_id": 1, "chunk_id": 0, "text": "Baz"},
|
||||||
]
|
]
|
||||||
res = await (
|
await (
|
||||||
table.merge_insert(["doc_id", "chunk_id"])
|
table.merge_insert(["doc_id", "chunk_id"])
|
||||||
.when_matched_update_all()
|
.when_matched_update_all()
|
||||||
.when_not_matched_insert_all()
|
.when_not_matched_insert_all()
|
||||||
@@ -183,12 +149,5 @@ async def test_replace_range_async(mem_db_async):
|
|||||||
.execute(new_chunks)
|
.execute(new_chunks)
|
||||||
)
|
)
|
||||||
await table.count_rows("doc_id = 1") # 1
|
await table.count_rows("doc_id = 1") # 1
|
||||||
res
|
# --8<-- [end:replace_range_async]
|
||||||
# MergeResult(version=2, num_updated_rows=1,
|
|
||||||
# num_inserted_rows=0, num_deleted_rows=1)
|
|
||||||
# --8<-- [end:insert_if_not_exists]
|
|
||||||
assert await table.count_rows("doc_id = 1") == 1
|
assert await table.count_rows("doc_id = 1") == 1
|
||||||
assert res.version == 2
|
|
||||||
assert res.num_inserted_rows == 0
|
|
||||||
assert res.num_deleted_rows == 1
|
|
||||||
assert res.num_updated_rows == 1
|
|
||||||
|
|||||||
@@ -6,9 +6,7 @@ import lancedb
|
|||||||
|
|
||||||
# --8<-- [end:import-lancedb]
|
# --8<-- [end:import-lancedb]
|
||||||
# --8<-- [start:import-numpy]
|
# --8<-- [start:import-numpy]
|
||||||
from lancedb.query import BoostQuery, MatchQuery
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import pyarrow as pa
|
|
||||||
|
|
||||||
# --8<-- [end:import-numpy]
|
# --8<-- [end:import-numpy]
|
||||||
# --8<-- [start:import-datetime]
|
# --8<-- [start:import-datetime]
|
||||||
@@ -156,84 +154,6 @@ async def test_vector_search_async():
|
|||||||
# --8<-- [end:search_result_async_as_list]
|
# --8<-- [end:search_result_async_as_list]
|
||||||
|
|
||||||
|
|
||||||
def test_fts_fuzzy_query():
|
|
||||||
uri = "data/fuzzy-example"
|
|
||||||
db = lancedb.connect(uri)
|
|
||||||
|
|
||||||
table = db.create_table(
|
|
||||||
"my_table_fts_fuzzy",
|
|
||||||
data=pa.table(
|
|
||||||
{
|
|
||||||
"text": [
|
|
||||||
"fa",
|
|
||||||
"fo", # spellchecker:disable-line
|
|
||||||
"fob",
|
|
||||||
"focus",
|
|
||||||
"foo",
|
|
||||||
"food",
|
|
||||||
"foul",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
),
|
|
||||||
mode="overwrite",
|
|
||||||
)
|
|
||||||
table.create_fts_index("text", use_tantivy=False, replace=True)
|
|
||||||
|
|
||||||
results = table.search(MatchQuery("foo", "text", fuzziness=1)).to_pandas()
|
|
||||||
assert len(results) == 4
|
|
||||||
assert set(results["text"].to_list()) == {
|
|
||||||
"foo",
|
|
||||||
"fo", # 1 deletion # spellchecker:disable-line
|
|
||||||
"fob", # 1 substitution
|
|
||||||
"food", # 1 insertion
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def test_fts_boost_query():
|
|
||||||
uri = "data/boost-example"
|
|
||||||
db = lancedb.connect(uri)
|
|
||||||
|
|
||||||
table = db.create_table(
|
|
||||||
"my_table_fts_boost",
|
|
||||||
data=pa.table(
|
|
||||||
{
|
|
||||||
"title": [
|
|
||||||
"The Hidden Gems of Travel",
|
|
||||||
"Exploring Nature's Wonders",
|
|
||||||
"Cultural Treasures Unveiled",
|
|
||||||
"The Nightlife Chronicles",
|
|
||||||
"Scenic Escapes and Challenges",
|
|
||||||
],
|
|
||||||
"desc": [
|
|
||||||
"A vibrant city with occasional traffic jams.",
|
|
||||||
"Beautiful landscapes but overpriced tourist spots.",
|
|
||||||
"Rich cultural heritage but humid summers.",
|
|
||||||
"Bustling nightlife but noisy streets.",
|
|
||||||
"Scenic views but limited public transport options.",
|
|
||||||
],
|
|
||||||
}
|
|
||||||
),
|
|
||||||
mode="overwrite",
|
|
||||||
)
|
|
||||||
table.create_fts_index("desc", use_tantivy=False, replace=True)
|
|
||||||
|
|
||||||
results = table.search(
|
|
||||||
BoostQuery(
|
|
||||||
MatchQuery("beautiful, cultural, nightlife", "desc"),
|
|
||||||
MatchQuery("bad traffic jams, overpriced", "desc"),
|
|
||||||
),
|
|
||||||
).to_pandas()
|
|
||||||
|
|
||||||
# we will hit 3 results because the positive query has 3 hits
|
|
||||||
assert len(results) == 3
|
|
||||||
# the one containing "overpriced" will be negatively boosted,
|
|
||||||
# so it will be the last one
|
|
||||||
assert (
|
|
||||||
results["desc"].to_list()[2]
|
|
||||||
== "Beautiful landscapes but overpriced tourist spots."
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def test_fts_native():
|
def test_fts_native():
|
||||||
# --8<-- [start:basic_fts]
|
# --8<-- [start:basic_fts]
|
||||||
uri = "data/sample-lancedb"
|
uri = "data/sample-lancedb"
|
||||||
|
|||||||
@@ -11,8 +11,7 @@ import pandas as pd
|
|||||||
import pyarrow as pa
|
import pyarrow as pa
|
||||||
import pytest
|
import pytest
|
||||||
from lancedb.embeddings import get_registry
|
from lancedb.embeddings import get_registry
|
||||||
from lancedb.pydantic import LanceModel, Vector, MultiVector
|
from lancedb.pydantic import LanceModel, Vector
|
||||||
import requests
|
|
||||||
|
|
||||||
# These are integration tests for embedding functions.
|
# These are integration tests for embedding functions.
|
||||||
# They are slow because they require downloading models
|
# They are slow because they require downloading models
|
||||||
@@ -517,125 +516,3 @@ def test_voyageai_embedding_function():
|
|||||||
|
|
||||||
tbl.add(df)
|
tbl.add(df)
|
||||||
assert len(tbl.to_pandas()["vector"][0]) == voyageai.ndims()
|
assert len(tbl.to_pandas()["vector"][0]) == voyageai.ndims()
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.slow
|
|
||||||
@pytest.mark.skipif(
|
|
||||||
os.environ.get("VOYAGE_API_KEY") is None, reason="VOYAGE_API_KEY not set"
|
|
||||||
)
|
|
||||||
def test_voyageai_multimodal_embedding_function():
|
|
||||||
voyageai = (
|
|
||||||
get_registry().get("voyageai").create(name="voyage-multimodal-3", max_retries=0)
|
|
||||||
)
|
|
||||||
|
|
||||||
class Images(LanceModel):
|
|
||||||
label: str
|
|
||||||
image_uri: str = voyageai.SourceField() # image uri as the source
|
|
||||||
image_bytes: bytes = voyageai.SourceField() # image bytes as the source
|
|
||||||
vector: Vector(voyageai.ndims()) = voyageai.VectorField() # vector column
|
|
||||||
vec_from_bytes: Vector(voyageai.ndims()) = (
|
|
||||||
voyageai.VectorField()
|
|
||||||
) # Another vector column
|
|
||||||
|
|
||||||
db = lancedb.connect("~/lancedb")
|
|
||||||
table = db.create_table("test", schema=Images, mode="overwrite")
|
|
||||||
labels = ["cat", "cat", "dog", "dog", "horse", "horse"]
|
|
||||||
uris = [
|
|
||||||
"http://farm1.staticflickr.com/53/167798175_7c7845bbbd_z.jpg",
|
|
||||||
"http://farm1.staticflickr.com/134/332220238_da527d8140_z.jpg",
|
|
||||||
"http://farm9.staticflickr.com/8387/8602747737_2e5c2a45d4_z.jpg",
|
|
||||||
"http://farm5.staticflickr.com/4092/5017326486_1f46057f5f_z.jpg",
|
|
||||||
"http://farm9.staticflickr.com/8216/8434969557_d37882c42d_z.jpg",
|
|
||||||
"http://farm6.staticflickr.com/5142/5835678453_4f3a4edb45_z.jpg",
|
|
||||||
]
|
|
||||||
# get each uri as bytes
|
|
||||||
image_bytes = [requests.get(uri).content for uri in uris]
|
|
||||||
table.add(
|
|
||||||
pd.DataFrame({"label": labels, "image_uri": uris, "image_bytes": image_bytes})
|
|
||||||
)
|
|
||||||
assert len(table.to_pandas()["vector"][0]) == voyageai.ndims()
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.slow
|
|
||||||
@pytest.mark.skipif(
|
|
||||||
os.environ.get("VOYAGE_API_KEY") is None, reason="VOYAGE_API_KEY not set"
|
|
||||||
)
|
|
||||||
def test_voyageai_multimodal_embedding_text_function():
|
|
||||||
voyageai = (
|
|
||||||
get_registry().get("voyageai").create(name="voyage-multimodal-3", max_retries=0)
|
|
||||||
)
|
|
||||||
|
|
||||||
class TextModel(LanceModel):
|
|
||||||
text: str = voyageai.SourceField()
|
|
||||||
vector: Vector(voyageai.ndims()) = voyageai.VectorField()
|
|
||||||
|
|
||||||
df = pd.DataFrame({"text": ["hello world", "goodbye world"]})
|
|
||||||
db = lancedb.connect("~/lancedb")
|
|
||||||
tbl = db.create_table("test", schema=TextModel, mode="overwrite")
|
|
||||||
|
|
||||||
tbl.add(df)
|
|
||||||
assert len(tbl.to_pandas()["vector"][0]) == voyageai.ndims()
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.slow
|
|
||||||
@pytest.mark.skipif(
|
|
||||||
importlib.util.find_spec("colpali_engine") is None,
|
|
||||||
reason="colpali_engine not installed",
|
|
||||||
)
|
|
||||||
def test_colpali(tmp_path):
|
|
||||||
import requests
|
|
||||||
from lancedb.pydantic import LanceModel
|
|
||||||
|
|
||||||
db = lancedb.connect(tmp_path)
|
|
||||||
registry = get_registry()
|
|
||||||
func = registry.get("colpali").create()
|
|
||||||
|
|
||||||
class MediaItems(LanceModel):
|
|
||||||
text: str
|
|
||||||
image_uri: str = func.SourceField()
|
|
||||||
image_bytes: bytes = func.SourceField()
|
|
||||||
image_vectors: MultiVector(func.ndims()) = (
|
|
||||||
func.VectorField()
|
|
||||||
) # Multivector image embeddings
|
|
||||||
|
|
||||||
table = db.create_table("media", schema=MediaItems)
|
|
||||||
|
|
||||||
texts = [
|
|
||||||
"a cute cat playing with yarn",
|
|
||||||
"a puppy in a flower field",
|
|
||||||
"a red sports car on the highway",
|
|
||||||
"a vintage bicycle leaning against a wall",
|
|
||||||
"a plate of delicious pasta",
|
|
||||||
"fresh fruit salad in a bowl",
|
|
||||||
]
|
|
||||||
|
|
||||||
uris = [
|
|
||||||
"http://farm1.staticflickr.com/53/167798175_7c7845bbbd_z.jpg",
|
|
||||||
"http://farm1.staticflickr.com/134/332220238_da527d8140_z.jpg",
|
|
||||||
"http://farm9.staticflickr.com/8387/8602747737_2e5c2a45d4_z.jpg",
|
|
||||||
"http://farm5.staticflickr.com/4092/5017326486_1f46057f5f_z.jpg",
|
|
||||||
"http://farm9.staticflickr.com/8216/8434969557_d37882c42d_z.jpg",
|
|
||||||
"http://farm6.staticflickr.com/5142/5835678453_4f3a4edb45_z.jpg",
|
|
||||||
]
|
|
||||||
|
|
||||||
# Get images as bytes
|
|
||||||
image_bytes = [requests.get(uri).content for uri in uris]
|
|
||||||
|
|
||||||
table.add(
|
|
||||||
pd.DataFrame({"text": texts, "image_uri": uris, "image_bytes": image_bytes})
|
|
||||||
)
|
|
||||||
|
|
||||||
# Test text-to-image search
|
|
||||||
image_results = (
|
|
||||||
table.search("fluffy companion", vector_column_name="image_vectors")
|
|
||||||
.limit(1)
|
|
||||||
.to_pydantic(MediaItems)[0]
|
|
||||||
)
|
|
||||||
assert "cat" in image_results.text.lower() or "puppy" in image_results.text.lower()
|
|
||||||
|
|
||||||
# Verify multivector dimensions
|
|
||||||
first_row = table.to_arrow().to_pylist()[0]
|
|
||||||
assert len(first_row["image_vectors"]) > 1, "Should have multiple image vectors"
|
|
||||||
assert len(first_row["image_vectors"][0]) == func.ndims(), (
|
|
||||||
"Vector dimension mismatch"
|
|
||||||
)
|
|
||||||
|
|||||||
@@ -20,9 +20,7 @@ from unittest import mock
|
|||||||
import lancedb as ldb
|
import lancedb as ldb
|
||||||
from lancedb.db import DBConnection
|
from lancedb.db import DBConnection
|
||||||
from lancedb.index import FTS
|
from lancedb.index import FTS
|
||||||
from lancedb.query import BoostQuery, MatchQuery, MultiMatchQuery, PhraseQuery
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import pyarrow as pa
|
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
import pytest
|
import pytest
|
||||||
from utils import exception_output
|
from utils import exception_output
|
||||||
@@ -180,47 +178,11 @@ def test_search_fts(table, use_tantivy):
|
|||||||
results = table.search("puppy").select(["id", "text"]).to_list()
|
results = table.search("puppy").select(["id", "text"]).to_list()
|
||||||
assert len(results) == 10
|
assert len(results) == 10
|
||||||
|
|
||||||
if not use_tantivy:
|
|
||||||
# Test with a query
|
|
||||||
results = (
|
|
||||||
table.search(MatchQuery("puppy", "text"))
|
|
||||||
.select(["id", "text"])
|
|
||||||
.limit(5)
|
|
||||||
.to_list()
|
|
||||||
)
|
|
||||||
assert len(results) == 5
|
|
||||||
|
|
||||||
# Test boost query
|
|
||||||
results = (
|
|
||||||
table.search(
|
|
||||||
BoostQuery(
|
|
||||||
MatchQuery("puppy", "text"),
|
|
||||||
MatchQuery("runs", "text"),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
.select(["id", "text"])
|
|
||||||
.limit(5)
|
|
||||||
.to_list()
|
|
||||||
)
|
|
||||||
assert len(results) == 5
|
|
||||||
|
|
||||||
# Test multi match query
|
|
||||||
table.create_fts_index("text2", use_tantivy=use_tantivy)
|
|
||||||
results = (
|
|
||||||
table.search(MultiMatchQuery("puppy", ["text", "text2"]))
|
|
||||||
.select(["id", "text"])
|
|
||||||
.limit(5)
|
|
||||||
.to_list()
|
|
||||||
)
|
|
||||||
assert len(results) == 5
|
|
||||||
assert len(results[0]) == 3 # id, text, _score
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_fts_select_async(async_table):
|
async def test_fts_select_async(async_table):
|
||||||
tbl = await async_table
|
tbl = await async_table
|
||||||
await tbl.create_index("text", config=FTS())
|
await tbl.create_index("text", config=FTS())
|
||||||
await tbl.create_index("text2", config=FTS())
|
|
||||||
results = (
|
results = (
|
||||||
await tbl.query()
|
await tbl.query()
|
||||||
.nearest_to_text("puppy")
|
.nearest_to_text("puppy")
|
||||||
@@ -231,54 +193,6 @@ async def test_fts_select_async(async_table):
|
|||||||
assert len(results) == 5
|
assert len(results) == 5
|
||||||
assert len(results[0]) == 3 # id, text, _score
|
assert len(results[0]) == 3 # id, text, _score
|
||||||
|
|
||||||
# Test with FullTextQuery
|
|
||||||
results = (
|
|
||||||
await tbl.query()
|
|
||||||
.nearest_to_text(MatchQuery("puppy", "text"))
|
|
||||||
.select(["id", "text"])
|
|
||||||
.limit(5)
|
|
||||||
.to_list()
|
|
||||||
)
|
|
||||||
assert len(results) == 5
|
|
||||||
assert len(results[0]) == 3 # id, text, _score
|
|
||||||
|
|
||||||
# Test with BoostQuery
|
|
||||||
results = (
|
|
||||||
await tbl.query()
|
|
||||||
.nearest_to_text(
|
|
||||||
BoostQuery(
|
|
||||||
MatchQuery("puppy", "text"),
|
|
||||||
MatchQuery("runs", "text"),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
.select(["id", "text"])
|
|
||||||
.limit(5)
|
|
||||||
.to_list()
|
|
||||||
)
|
|
||||||
assert len(results) == 5
|
|
||||||
assert len(results[0]) == 3 # id, text, _score
|
|
||||||
|
|
||||||
# Test with MultiMatchQuery
|
|
||||||
results = (
|
|
||||||
await tbl.query()
|
|
||||||
.nearest_to_text(MultiMatchQuery("puppy", ["text", "text2"]))
|
|
||||||
.select(["id", "text"])
|
|
||||||
.limit(5)
|
|
||||||
.to_list()
|
|
||||||
)
|
|
||||||
assert len(results) == 5
|
|
||||||
assert len(results[0]) == 3 # id, text, _score
|
|
||||||
|
|
||||||
# Test with search() API
|
|
||||||
results = (
|
|
||||||
await (await tbl.search(MatchQuery("puppy", "text")))
|
|
||||||
.select(["id", "text"])
|
|
||||||
.limit(5)
|
|
||||||
.to_list()
|
|
||||||
)
|
|
||||||
assert len(results) == 5
|
|
||||||
assert len(results[0]) == 3 # id, text, _score
|
|
||||||
|
|
||||||
|
|
||||||
def test_search_fts_phrase_query(table):
|
def test_search_fts_phrase_query(table):
|
||||||
table.create_fts_index("text", use_tantivy=False, with_position=False)
|
table.create_fts_index("text", use_tantivy=False, with_position=False)
|
||||||
@@ -293,13 +207,6 @@ def test_search_fts_phrase_query(table):
|
|||||||
assert len(results) > len(phrase_results)
|
assert len(results) > len(phrase_results)
|
||||||
assert len(phrase_results) > 0
|
assert len(phrase_results) > 0
|
||||||
|
|
||||||
# Test with a query
|
|
||||||
phrase_results = (
|
|
||||||
table.search(PhraseQuery("puppy runs", "text")).limit(100).to_list()
|
|
||||||
)
|
|
||||||
assert len(results) > len(phrase_results)
|
|
||||||
assert len(phrase_results) > 0
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_search_fts_phrase_query_async(async_table):
|
async def test_search_fts_phrase_query_async(async_table):
|
||||||
@@ -320,16 +227,6 @@ async def test_search_fts_phrase_query_async(async_table):
|
|||||||
assert len(results) > len(phrase_results)
|
assert len(results) > len(phrase_results)
|
||||||
assert len(phrase_results) > 0
|
assert len(phrase_results) > 0
|
||||||
|
|
||||||
# Test with a query
|
|
||||||
phrase_results = (
|
|
||||||
await async_table.query()
|
|
||||||
.nearest_to_text(PhraseQuery("puppy runs", "text"))
|
|
||||||
.limit(100)
|
|
||||||
.to_list()
|
|
||||||
)
|
|
||||||
assert len(results) > len(phrase_results)
|
|
||||||
assert len(phrase_results) > 0
|
|
||||||
|
|
||||||
|
|
||||||
def test_search_fts_specify_column(table):
|
def test_search_fts_specify_column(table):
|
||||||
table.create_fts_index("text", use_tantivy=False)
|
table.create_fts_index("text", use_tantivy=False)
|
||||||
@@ -627,32 +524,3 @@ def test_language(mem_db: DBConnection):
|
|||||||
# Stop words -> no results
|
# Stop words -> no results
|
||||||
results = table.search("la", query_type="fts").limit(5).to_list()
|
results = table.search("la", query_type="fts").limit(5).to_list()
|
||||||
assert len(results) == 0
|
assert len(results) == 0
|
||||||
|
|
||||||
|
|
||||||
def test_fts_on_list(mem_db: DBConnection):
|
|
||||||
data = pa.table(
|
|
||||||
{
|
|
||||||
"text": [
|
|
||||||
["lance database", "the", "search"],
|
|
||||||
["lance database"],
|
|
||||||
["lance", "search"],
|
|
||||||
["database", "search"],
|
|
||||||
["unrelated", "doc"],
|
|
||||||
],
|
|
||||||
"vector": [
|
|
||||||
[1.0, 2.0, 3.0],
|
|
||||||
[4.0, 5.0, 6.0],
|
|
||||||
[7.0, 8.0, 9.0],
|
|
||||||
[10.0, 11.0, 12.0],
|
|
||||||
[13.0, 14.0, 15.0],
|
|
||||||
],
|
|
||||||
}
|
|
||||||
)
|
|
||||||
table = mem_db.create_table("test", data=data)
|
|
||||||
table.create_fts_index("text", use_tantivy=False)
|
|
||||||
|
|
||||||
res = table.search("lance").limit(5).to_list()
|
|
||||||
assert len(res) == 3
|
|
||||||
|
|
||||||
res = table.search(PhraseQuery("lance database", "text")).limit(5).to_list()
|
|
||||||
assert len(res) == 2
|
|
||||||
|
|||||||
@@ -4,32 +4,13 @@
|
|||||||
import lancedb
|
import lancedb
|
||||||
|
|
||||||
from lancedb.query import LanceHybridQueryBuilder
|
from lancedb.query import LanceHybridQueryBuilder
|
||||||
from lancedb.rerankers.rrf import RRFReranker
|
|
||||||
import pyarrow as pa
|
import pyarrow as pa
|
||||||
import pyarrow.compute as pc
|
import pyarrow.compute as pc
|
||||||
import pytest
|
import pytest
|
||||||
import pytest_asyncio
|
import pytest_asyncio
|
||||||
|
|
||||||
from lancedb.index import FTS
|
from lancedb.index import FTS
|
||||||
from lancedb.table import AsyncTable, Table
|
from lancedb.table import AsyncTable
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
|
||||||
def sync_table(tmpdir_factory) -> Table:
|
|
||||||
tmp_path = str(tmpdir_factory.mktemp("data"))
|
|
||||||
db = lancedb.connect(tmp_path)
|
|
||||||
data = pa.table(
|
|
||||||
{
|
|
||||||
"text": pa.array(["a", "b", "cat", "dog"]),
|
|
||||||
"vector": pa.array(
|
|
||||||
[[0.1, 0.1], [2, 2], [-0.1, -0.1], [0.5, -0.5]],
|
|
||||||
type=pa.list_(pa.float32(), list_size=2),
|
|
||||||
),
|
|
||||||
}
|
|
||||||
)
|
|
||||||
table = db.create_table("test", data)
|
|
||||||
table.create_fts_index("text", with_position=False, use_tantivy=False)
|
|
||||||
return table
|
|
||||||
|
|
||||||
|
|
||||||
@pytest_asyncio.fixture
|
@pytest_asyncio.fixture
|
||||||
@@ -121,42 +102,6 @@ async def test_async_hybrid_query_default_limit(table: AsyncTable):
|
|||||||
assert texts.count("a") == 1
|
assert texts.count("a") == 1
|
||||||
|
|
||||||
|
|
||||||
def test_hybrid_query_distance_range(sync_table: Table):
|
|
||||||
reranker = RRFReranker(return_score="all")
|
|
||||||
result = (
|
|
||||||
sync_table.search(query_type="hybrid")
|
|
||||||
.vector([0.0, 0.4])
|
|
||||||
.text("cat and dog")
|
|
||||||
.distance_range(lower_bound=0.2, upper_bound=0.5)
|
|
||||||
.rerank(reranker)
|
|
||||||
.limit(2)
|
|
||||||
.to_arrow()
|
|
||||||
)
|
|
||||||
assert len(result) == 2
|
|
||||||
print(result)
|
|
||||||
for dist in result["_distance"]:
|
|
||||||
if dist.is_valid:
|
|
||||||
assert 0.2 <= dist.as_py() <= 0.5
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_hybrid_query_distance_range_async(table: AsyncTable):
|
|
||||||
reranker = RRFReranker(return_score="all")
|
|
||||||
result = await (
|
|
||||||
table.query()
|
|
||||||
.nearest_to([0.0, 0.4])
|
|
||||||
.nearest_to_text("cat and dog")
|
|
||||||
.distance_range(lower_bound=0.2, upper_bound=0.5)
|
|
||||||
.rerank(reranker)
|
|
||||||
.limit(2)
|
|
||||||
.to_arrow()
|
|
||||||
)
|
|
||||||
assert len(result) == 2
|
|
||||||
for dist in result["_distance"]:
|
|
||||||
if dist.is_valid:
|
|
||||||
assert 0.2 <= dist.as_py() <= 0.5
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_explain_plan(table: AsyncTable):
|
async def test_explain_plan(table: AsyncTable):
|
||||||
plan = await (
|
plan = await (
|
||||||
@@ -169,16 +114,6 @@ async def test_explain_plan(table: AsyncTable):
|
|||||||
assert "LanceScan" in plan
|
assert "LanceScan" in plan
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_analyze_plan(table: AsyncTable):
|
|
||||||
res = await (
|
|
||||||
table.query().nearest_to_text("dog").nearest_to([0.1, 0.1]).analyze_plan()
|
|
||||||
)
|
|
||||||
|
|
||||||
assert "AnalyzeExec" in res
|
|
||||||
assert "metrics=" in res
|
|
||||||
|
|
||||||
|
|
||||||
def test_normalize_scores():
|
def test_normalize_scores():
|
||||||
cases = [
|
cases = [
|
||||||
(pa.array([0.1, 0.4]), pa.array([0.0, 1.0])),
|
(pa.array([0.1, 0.4]), pa.array([0.0, 1.0])),
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ import pyarrow as pa
|
|||||||
import pytest
|
import pytest
|
||||||
import pytest_asyncio
|
import pytest_asyncio
|
||||||
from lancedb import AsyncConnection, AsyncTable, connect_async
|
from lancedb import AsyncConnection, AsyncTable, connect_async
|
||||||
from lancedb.index import BTree, IvfFlat, IvfPq, Bitmap, LabelList, HnswPq, HnswSq, FTS
|
from lancedb.index import BTree, IvfFlat, IvfPq, Bitmap, LabelList, HnswPq, HnswSq
|
||||||
|
|
||||||
|
|
||||||
@pytest_asyncio.fixture
|
@pytest_asyncio.fixture
|
||||||
@@ -31,7 +31,6 @@ async def some_table(db_async):
|
|||||||
{
|
{
|
||||||
"id": list(range(NROWS)),
|
"id": list(range(NROWS)),
|
||||||
"vector": sample_fixed_size_list_array(NROWS, DIM),
|
"vector": sample_fixed_size_list_array(NROWS, DIM),
|
||||||
"fsb": pa.array([bytes([i]) for i in range(NROWS)], pa.binary(1)),
|
|
||||||
"tags": [
|
"tags": [
|
||||||
[f"tag{random.randint(0, 8)}" for _ in range(2)] for _ in range(NROWS)
|
[f"tag{random.randint(0, 8)}" for _ in range(2)] for _ in range(NROWS)
|
||||||
],
|
],
|
||||||
@@ -86,16 +85,6 @@ async def test_create_scalar_index(some_table: AsyncTable):
|
|||||||
assert len(indices) == 0
|
assert len(indices) == 0
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_create_fixed_size_binary_index(some_table: AsyncTable):
|
|
||||||
await some_table.create_index("fsb", config=BTree())
|
|
||||||
indices = await some_table.list_indices()
|
|
||||||
assert str(indices) == '[Index(BTree, columns=["fsb"], name="fsb_idx")]'
|
|
||||||
assert len(indices) == 1
|
|
||||||
assert indices[0].index_type == "BTree"
|
|
||||||
assert indices[0].columns == ["fsb"]
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_create_bitmap_index(some_table: AsyncTable):
|
async def test_create_bitmap_index(some_table: AsyncTable):
|
||||||
await some_table.create_index("id", config=Bitmap())
|
await some_table.create_index("id", config=Bitmap())
|
||||||
@@ -119,18 +108,6 @@ async def test_create_label_list_index(some_table: AsyncTable):
|
|||||||
assert str(indices) == '[Index(LabelList, columns=["tags"], name="tags_idx")]'
|
assert str(indices) == '[Index(LabelList, columns=["tags"], name="tags_idx")]'
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_full_text_search_index(some_table: AsyncTable):
|
|
||||||
await some_table.create_index("tags", config=FTS(with_position=False))
|
|
||||||
indices = await some_table.list_indices()
|
|
||||||
assert str(indices) == '[Index(FTS, columns=["tags"], name="tags_idx")]'
|
|
||||||
|
|
||||||
await some_table.prewarm_index("tags_idx")
|
|
||||||
|
|
||||||
res = await (await some_table.search("tag0")).to_arrow()
|
|
||||||
assert res.num_rows > 0
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_create_vector_index(some_table: AsyncTable):
|
async def test_create_vector_index(some_table: AsyncTable):
|
||||||
# Can create
|
# Can create
|
||||||
|
|||||||
@@ -9,13 +9,7 @@ from typing import List, Optional, Tuple
|
|||||||
import pyarrow as pa
|
import pyarrow as pa
|
||||||
import pydantic
|
import pydantic
|
||||||
import pytest
|
import pytest
|
||||||
from lancedb.pydantic import (
|
from lancedb.pydantic import PYDANTIC_VERSION, LanceModel, Vector, pydantic_to_schema
|
||||||
PYDANTIC_VERSION,
|
|
||||||
LanceModel,
|
|
||||||
Vector,
|
|
||||||
pydantic_to_schema,
|
|
||||||
MultiVector,
|
|
||||||
)
|
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
from pydantic import Field
|
from pydantic import Field
|
||||||
|
|
||||||
@@ -360,55 +354,3 @@ def test_optional_nested_model():
|
|||||||
),
|
),
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def test_multi_vector():
|
|
||||||
class TestModel(pydantic.BaseModel):
|
|
||||||
vec: MultiVector(8)
|
|
||||||
|
|
||||||
schema = pydantic_to_schema(TestModel)
|
|
||||||
assert schema == pa.schema(
|
|
||||||
[pa.field("vec", pa.list_(pa.list_(pa.float32(), 8)), True)]
|
|
||||||
)
|
|
||||||
|
|
||||||
with pytest.raises(pydantic.ValidationError):
|
|
||||||
TestModel(vec=[[1.0] * 7])
|
|
||||||
|
|
||||||
with pytest.raises(pydantic.ValidationError):
|
|
||||||
TestModel(vec=[[1.0] * 9])
|
|
||||||
|
|
||||||
TestModel(vec=[[1.0] * 8])
|
|
||||||
TestModel(vec=[[1.0] * 8, [2.0] * 8])
|
|
||||||
|
|
||||||
TestModel(vec=[])
|
|
||||||
|
|
||||||
|
|
||||||
def test_multi_vector_nullable():
|
|
||||||
class NullableModel(pydantic.BaseModel):
|
|
||||||
vec: MultiVector(16, nullable=False)
|
|
||||||
|
|
||||||
schema = pydantic_to_schema(NullableModel)
|
|
||||||
assert schema == pa.schema(
|
|
||||||
[pa.field("vec", pa.list_(pa.list_(pa.float32(), 16)), False)]
|
|
||||||
)
|
|
||||||
|
|
||||||
class DefaultModel(pydantic.BaseModel):
|
|
||||||
vec: MultiVector(16)
|
|
||||||
|
|
||||||
schema = pydantic_to_schema(DefaultModel)
|
|
||||||
assert schema == pa.schema(
|
|
||||||
[pa.field("vec", pa.list_(pa.list_(pa.float32(), 16)), True)]
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def test_multi_vector_in_lance_model():
|
|
||||||
class TestModel(LanceModel):
|
|
||||||
id: int
|
|
||||||
vectors: MultiVector(16) = Field(default=[[0.0] * 16])
|
|
||||||
|
|
||||||
schema = pydantic_to_schema(TestModel)
|
|
||||||
assert schema == TestModel.to_arrow_schema()
|
|
||||||
assert TestModel.field_names() == ["id", "vectors"]
|
|
||||||
|
|
||||||
t = TestModel(id=1)
|
|
||||||
assert t.vectors == [[0.0] * 16]
|
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user