mirror of
https://github.com/lancedb/lancedb.git
synced 2025-12-23 05:19:58 +00:00
Compare commits
69 Commits
python-v0.
...
python-v0.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
27d9e5c596 | ||
|
|
ec8271931f | ||
|
|
6c6966600c | ||
|
|
2e170c3c7b | ||
|
|
fd92e651d1 | ||
|
|
c298482ee1 | ||
|
|
d59f64b5a3 | ||
|
|
30ed8c4c43 | ||
|
|
4a2cdbf299 | ||
|
|
657843d9e9 | ||
|
|
1cd76b8498 | ||
|
|
a38f784081 | ||
|
|
647dee4e94 | ||
|
|
0844c2dd64 | ||
|
|
fd2692295c | ||
|
|
d4ea50fba1 | ||
|
|
0d42297cf8 | ||
|
|
a6d4125cbf | ||
|
|
5c32a99e61 | ||
|
|
cefaa75b24 | ||
|
|
bd62c2384f | ||
|
|
f0bc08c0d7 | ||
|
|
e52ac79c69 | ||
|
|
f091f57594 | ||
|
|
a997fd4108 | ||
|
|
1486514ccc | ||
|
|
a505bc3965 | ||
|
|
c1738250a3 | ||
|
|
1ee63984f5 | ||
|
|
2eb2c8862a | ||
|
|
4ea8e178d3 | ||
|
|
e4485a630e | ||
|
|
fb95f9b3bd | ||
|
|
625bab3f21 | ||
|
|
e59f9382a0 | ||
|
|
fdee7ba477 | ||
|
|
c44fa3abc4 | ||
|
|
fc43aac0ed | ||
|
|
e67cd0baf9 | ||
|
|
26dab93f2a | ||
|
|
b9bdb8d937 | ||
|
|
a1d1833a40 | ||
|
|
a547c523c2 | ||
|
|
dc8b75feab | ||
|
|
c1600cdc06 | ||
|
|
f5dee46970 | ||
|
|
346cbf8bf7 | ||
|
|
3c7dfe9f28 | ||
|
|
f52d05d3fa | ||
|
|
c321cccc12 | ||
|
|
cba14a5743 | ||
|
|
72057b743d | ||
|
|
698f329598 | ||
|
|
79fa745130 | ||
|
|
2ad71bdeca | ||
|
|
7c13615096 | ||
|
|
f882f5b69a | ||
|
|
a68311a893 | ||
|
|
846a5cea33 | ||
|
|
e3dec647b5 | ||
|
|
c58104cecc | ||
|
|
b3b5362632 | ||
|
|
abe06fee3d | ||
|
|
93a82fd371 | ||
|
|
0d379e6ffa | ||
|
|
e1388bdfdd | ||
|
|
315a24c2bc | ||
|
|
6dd4cf6038 | ||
|
|
f97e751b3c |
@@ -1,5 +1,5 @@
|
||||
[tool.bumpversion]
|
||||
current_version = "0.18.1"
|
||||
current_version = "0.19.0-beta.5"
|
||||
parse = """(?x)
|
||||
(?P<major>0|[1-9]\\d*)\\.
|
||||
(?P<minor>0|[1-9]\\d*)\\.
|
||||
|
||||
6
.github/workflows/java-publish.yml
vendored
6
.github/workflows/java-publish.yml
vendored
@@ -43,7 +43,7 @@ jobs:
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
with:
|
||||
toolchain: "1.79.0"
|
||||
toolchain: "1.81.0"
|
||||
cache-workspaces: "./java/core/lancedb-jni"
|
||||
# Disable full debug symbol generation to speed up CI build and keep memory down
|
||||
# "1" means line tables only, which is useful for panic tracebacks.
|
||||
@@ -97,7 +97,7 @@ jobs:
|
||||
- name: Dry run
|
||||
if: github.event_name == 'pull_request'
|
||||
run: |
|
||||
mvn --batch-mode -DskipTests package
|
||||
mvn --batch-mode -DskipTests -Drust.release.build=true package
|
||||
- name: Set github
|
||||
run: |
|
||||
git config --global user.email "LanceDB Github Runner"
|
||||
@@ -108,7 +108,7 @@ jobs:
|
||||
echo "use-agent" >> ~/.gnupg/gpg.conf
|
||||
echo "pinentry-mode loopback" >> ~/.gnupg/gpg.conf
|
||||
export GPG_TTY=$(tty)
|
||||
mvn --batch-mode -DskipTests -DpushChanges=false -Dgpg.passphrase=${{ secrets.GPG_PASSPHRASE }} deploy -P deploy-to-ossrh
|
||||
mvn --batch-mode -DskipTests -Drust.release.build=true -DpushChanges=false -Dgpg.passphrase=${{ secrets.GPG_PASSPHRASE }} deploy -P deploy-to-ossrh
|
||||
env:
|
||||
SONATYPE_USER: ${{ secrets.SONATYPE_USER }}
|
||||
SONATYPE_TOKEN: ${{ secrets.SONATYPE_TOKEN }}
|
||||
|
||||
45
.github/workflows/npm-publish.yml
vendored
45
.github/workflows/npm-publish.yml
vendored
@@ -18,6 +18,7 @@ on:
|
||||
# This should trigger a dry run (we skip the final publish step)
|
||||
paths:
|
||||
- .github/workflows/npm-publish.yml
|
||||
- Cargo.toml # Change in dependency frequently breaks builds
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
@@ -130,29 +131,24 @@ jobs:
|
||||
set -e &&
|
||||
apt-get update &&
|
||||
apt-get install -y protobuf-compiler pkg-config
|
||||
|
||||
# TODO: re-enable x64 musl builds. I could not figure out why, but it
|
||||
# consistently made GHA runners non-responsive at the end of build. Example:
|
||||
# https://github.com/lancedb/lancedb/actions/runs/13980431071/job/39144319470?pr=2250
|
||||
|
||||
# - target: x86_64-unknown-linux-musl
|
||||
# # This one seems to need some extra memory
|
||||
# host: ubuntu-2404-8x-x64
|
||||
# # https://github.com/napi-rs/napi-rs/blob/main/alpine.Dockerfile
|
||||
# docker: ghcr.io/napi-rs/napi-rs/nodejs-rust:lts-alpine
|
||||
# features: ","
|
||||
# pre_build: |-
|
||||
# set -e &&
|
||||
# apk add protobuf-dev curl &&
|
||||
# ln -s /usr/lib/gcc/x86_64-alpine-linux-musl/14.2.0/crtbeginS.o /usr/lib/crtbeginS.o &&
|
||||
# ln -s /usr/lib/libgcc_s.so /usr/lib/libgcc.so
|
||||
|
||||
- target: x86_64-unknown-linux-musl
|
||||
# This one seems to need some extra memory
|
||||
host: ubuntu-2404-8x-x64
|
||||
# https://github.com/napi-rs/napi-rs/blob/main/alpine.Dockerfile
|
||||
docker: ghcr.io/napi-rs/napi-rs/nodejs-rust:lts-alpine
|
||||
features: fp16kernels
|
||||
pre_build: |-
|
||||
set -e &&
|
||||
apk add protobuf-dev curl &&
|
||||
ln -s /usr/lib/gcc/x86_64-alpine-linux-musl/14.2.0/crtbeginS.o /usr/lib/crtbeginS.o &&
|
||||
ln -s /usr/lib/libgcc_s.so /usr/lib/libgcc.so &&
|
||||
CC=gcc &&
|
||||
CXX=g++
|
||||
- target: aarch64-unknown-linux-gnu
|
||||
host: ubuntu-2404-8x-x64
|
||||
# https://github.com/napi-rs/napi-rs/blob/main/debian-aarch64.Dockerfile
|
||||
docker: ghcr.io/napi-rs/napi-rs/nodejs-rust:lts-debian-aarch64
|
||||
# TODO: enable fp16kernels after https://github.com/lancedb/lance/pull/3559
|
||||
features: ","
|
||||
features: "fp16kernels"
|
||||
pre_build: |-
|
||||
set -e &&
|
||||
apt-get update &&
|
||||
@@ -170,8 +166,8 @@ jobs:
|
||||
set -e &&
|
||||
apk add protobuf-dev &&
|
||||
rustup target add aarch64-unknown-linux-musl &&
|
||||
export CC="/aarch64-linux-musl-cross/bin/aarch64-linux-musl-gcc" &&
|
||||
export CXX="/aarch64-linux-musl-cross/bin/aarch64-linux-musl-g++"
|
||||
export CC_aarch64_unknown_linux_musl=aarch64-linux-musl-gcc &&
|
||||
export CXX_aarch64_unknown_linux_musl=aarch64-linux-musl-g++
|
||||
name: build - ${{ matrix.settings.target }}
|
||||
runs-on: ${{ matrix.settings.host }}
|
||||
defaults:
|
||||
@@ -331,6 +327,7 @@ jobs:
|
||||
node-version: 20
|
||||
cache: npm
|
||||
cache-dependency-path: nodejs/package-lock.json
|
||||
registry-url: "https://registry.npmjs.org"
|
||||
- name: Install dependencies
|
||||
run: npm ci
|
||||
- uses: actions/download-artifact@v4
|
||||
@@ -534,6 +531,12 @@ jobs:
|
||||
for filename in *.tgz; do
|
||||
npm publish $PUBLISH_ARGS $filename
|
||||
done
|
||||
- name: Deprecate
|
||||
env:
|
||||
NODE_AUTH_TOKEN: ${{ secrets.LANCEDB_NPM_REGISTRY_TOKEN }}
|
||||
# We need to deprecate the old package to avoid confusion.
|
||||
# Each time we publish a new version, it gets undeprecated.
|
||||
run: npm deprecate vectordb "Use @lancedb/lancedb instead."
|
||||
- name: Notify Slack Action
|
||||
uses: ravsamhq/notify-slack-action@2.3.0
|
||||
if: ${{ always() }}
|
||||
|
||||
9
.github/workflows/pypi-publish.yml
vendored
9
.github/workflows/pypi-publish.yml
vendored
@@ -4,6 +4,11 @@ on:
|
||||
push:
|
||||
tags:
|
||||
- 'python-v*'
|
||||
pull_request:
|
||||
# This should trigger a dry run (we skip the final publish step)
|
||||
paths:
|
||||
- .github/workflows/pypi-publish.yml
|
||||
- Cargo.toml # Change in dependency frequently breaks builds
|
||||
|
||||
jobs:
|
||||
linux:
|
||||
@@ -46,6 +51,7 @@ jobs:
|
||||
arm-build: ${{ matrix.config.platform == 'aarch64' }}
|
||||
manylinux: ${{ matrix.config.manylinux }}
|
||||
- uses: ./.github/workflows/upload_wheel
|
||||
if: startsWith(github.ref, 'refs/tags/python-v')
|
||||
with:
|
||||
pypi_token: ${{ secrets.LANCEDB_PYPI_API_TOKEN }}
|
||||
fury_token: ${{ secrets.FURY_TOKEN }}
|
||||
@@ -75,6 +81,7 @@ jobs:
|
||||
python-minor-version: 8
|
||||
args: "--release --strip --target ${{ matrix.config.target }} --features fp16kernels"
|
||||
- uses: ./.github/workflows/upload_wheel
|
||||
if: startsWith(github.ref, 'refs/tags/python-v')
|
||||
with:
|
||||
pypi_token: ${{ secrets.LANCEDB_PYPI_API_TOKEN }}
|
||||
fury_token: ${{ secrets.FURY_TOKEN }}
|
||||
@@ -96,10 +103,12 @@ jobs:
|
||||
args: "--release --strip"
|
||||
vcpkg_token: ${{ secrets.VCPKG_GITHUB_PACKAGES }}
|
||||
- uses: ./.github/workflows/upload_wheel
|
||||
if: startsWith(github.ref, 'refs/tags/python-v')
|
||||
with:
|
||||
pypi_token: ${{ secrets.LANCEDB_PYPI_API_TOKEN }}
|
||||
fury_token: ${{ secrets.FURY_TOKEN }}
|
||||
gh-release:
|
||||
if: startsWith(github.ref, 'refs/tags/python-v')
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
634
Cargo.lock
generated
634
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
30
Cargo.toml
30
Cargo.toml
@@ -21,16 +21,16 @@ categories = ["database-implementations"]
|
||||
rust-version = "1.78.0"
|
||||
|
||||
[workspace.dependencies]
|
||||
lance = { "version" = "=0.25.0", "features" = [
|
||||
lance = { "version" = "=0.25.3", "features" = [
|
||||
"dynamodb",
|
||||
], tag = "v0.25.0-beta.5", git = "https://github.com/lancedb/lance.git" }
|
||||
lance-io = { version = "=0.25.0", tag = "v0.25.0-beta.5", git = "https://github.com/lancedb/lance.git" }
|
||||
lance-index = { version = "=0.25.0", tag = "v0.25.0-beta.5", git = "https://github.com/lancedb/lance.git" }
|
||||
lance-linalg = { version = "=0.25.0", tag = "v0.25.0-beta.5", git = "https://github.com/lancedb/lance.git" }
|
||||
lance-table = { version = "=0.25.0", tag = "v0.25.0-beta.5", git = "https://github.com/lancedb/lance.git" }
|
||||
lance-testing = { version = "=0.25.0", tag = "v0.25.0-beta.5", git = "https://github.com/lancedb/lance.git" }
|
||||
lance-datafusion = { version = "=0.25.0", tag = "v0.25.0-beta.5", git = "https://github.com/lancedb/lance.git" }
|
||||
lance-encoding = { version = "=0.25.0", tag = "v0.25.0-beta.5", git = "https://github.com/lancedb/lance.git" }
|
||||
], tag = "v0.25.3-beta.4", git = "https://github.com/lancedb/lance" }
|
||||
lance-io = { version = "=0.25.3", tag = "v0.25.3-beta.4", git = "https://github.com/lancedb/lance" }
|
||||
lance-index = { version = "=0.25.3", tag = "v0.25.3-beta.4", git = "https://github.com/lancedb/lance" }
|
||||
lance-linalg = { version = "=0.25.3", tag = "v0.25.3-beta.4", git = "https://github.com/lancedb/lance" }
|
||||
lance-table = { version = "=0.25.3", tag = "v0.25.3-beta.4", git = "https://github.com/lancedb/lance" }
|
||||
lance-testing = { version = "=0.25.3", tag = "v0.25.3-beta.4", git = "https://github.com/lancedb/lance" }
|
||||
lance-datafusion = { version = "=0.25.3", tag = "v0.25.3-beta.4", git = "https://github.com/lancedb/lance" }
|
||||
lance-encoding = { version = "=0.25.3", tag = "v0.25.3-beta.4", git = "https://github.com/lancedb/lance" }
|
||||
# Note that this one does not include pyarrow
|
||||
arrow = { version = "54.1", optional = false }
|
||||
arrow-array = "54.1"
|
||||
@@ -41,12 +41,12 @@ arrow-schema = "54.1"
|
||||
arrow-arith = "54.1"
|
||||
arrow-cast = "54.1"
|
||||
async-trait = "0"
|
||||
datafusion = { version = "45.0", default-features = false }
|
||||
datafusion-catalog = "45.0"
|
||||
datafusion-common = { version = "45.0", default-features = false }
|
||||
datafusion-execution = "45.0"
|
||||
datafusion-expr = "45.0"
|
||||
datafusion-physical-plan = "45.0"
|
||||
datafusion = { version = "46.0", default-features = false }
|
||||
datafusion-catalog = "46.0"
|
||||
datafusion-common = { version = "46.0", default-features = false }
|
||||
datafusion-execution = "46.0"
|
||||
datafusion-expr = "46.0"
|
||||
datafusion-physical-plan = "46.0"
|
||||
env_logger = "0.11"
|
||||
half = { "version" = "=2.4.1", default-features = false, features = [
|
||||
"num-traits",
|
||||
|
||||
12
README.md
12
README.md
@@ -1,9 +1,17 @@
|
||||
<a href="https://cloud.lancedb.com" target="_blank">
|
||||
<img src="https://github.com/user-attachments/assets/92dad0a2-2a37-4ce1-b783-0d1b4f30a00c" alt="LanceDB Cloud Public Beta" width="100%" style="max-width: 100%;">
|
||||
</a>
|
||||
|
||||
<div align="center">
|
||||
<p align="center">
|
||||
|
||||
<img width="275" alt="LanceDB Logo" src="https://github.com/lancedb/lancedb/assets/5846846/37d7c7ad-c2fd-4f56-9f16-fffb0d17c73a">
|
||||
<picture>
|
||||
<source media="(prefers-color-scheme: dark)" srcset="https://github.com/user-attachments/assets/ac270358-333e-4bea-a132-acefaa94040e">
|
||||
<source media="(prefers-color-scheme: light)" srcset="https://github.com/user-attachments/assets/b864d814-0d29-4784-8fd9-807297c758c0">
|
||||
<img alt="LanceDB Logo" src="https://github.com/user-attachments/assets/b864d814-0d29-4784-8fd9-807297c758c0" width=300>
|
||||
</picture>
|
||||
|
||||
**Developer-friendly, database for multimodal AI**
|
||||
**Search More, Manage Less**
|
||||
|
||||
<a href='https://github.com/lancedb/vectordb-recipes/tree/main' target="_blank"><img alt='LanceDB' src='https://img.shields.io/badge/VectorDB_Recipes-100000?style=for-the-badge&logo=LanceDB&logoColor=white&labelColor=645cfb&color=645cfb'/></a>
|
||||
<a href='https://lancedb.github.io/lancedb/' target="_blank"><img alt='lancdb' src='https://img.shields.io/badge/DOCS-100000?style=for-the-badge&logo=lancdb&logoColor=white&labelColor=645cfb&color=645cfb'/></a>
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
LanceDB Cloud is a SaaS (software-as-a-service) solution that runs serverless in the cloud, clearly separating storage from compute. It's designed to be highly scalable without breaking the bank. LanceDB Cloud is currently in private beta with general availability coming soon, but you can apply for early access with the private beta release by signing up below.
|
||||
|
||||
[Try out LanceDB Cloud](https://noteforms.com/forms/lancedb-mailing-list-cloud-kty1o5?notionforms=1&utm_source=notionforms){ .md-button .md-button--primary }
|
||||
[Try out LanceDB Cloud (Public Beta)](https://cloud.lancedb.com){ .md-button .md-button--primary }
|
||||
|
||||
## Architecture
|
||||
|
||||
|
||||
@@ -942,28 +942,6 @@ rewriting the column, which can be a heavy operation.
|
||||
```
|
||||
**API Reference:** [lancedb.Table.alterColumns](../js/classes/Table.md/#altercolumns)
|
||||
|
||||
You can even cast the a vector column to a different dimension:
|
||||
|
||||
=== "Python"
|
||||
|
||||
=== "Sync API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pyarrow"
|
||||
--8<-- "python/python/tests/docs/test_basic.py:alter_columns_vector"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pyarrow"
|
||||
--8<-- "python/python/tests/docs/test_basic.py:alter_columns_async_vector"
|
||||
```
|
||||
=== "Typescript"
|
||||
|
||||
```typescript
|
||||
--8<-- "nodejs/examples/basic.test.ts:alter_columns_vector"
|
||||
```
|
||||
|
||||
### Dropping columns
|
||||
|
||||
You can drop columns from the table with the `drop_columns` method. This will
|
||||
@@ -1023,9 +1001,11 @@ In LanceDB OSS, users can set the `read_consistency_interval` parameter on conne
|
||||
|
||||
There are three possible settings for `read_consistency_interval`:
|
||||
|
||||
1. **Unset (default)**: The database does not check for updates to tables made by other processes. This provides the best query performance, but means that clients may not see the most up-to-date data. This setting is suitable for applications where the data does not change during the lifetime of the table reference.
|
||||
2. **Zero seconds (Strong consistency)**: The database checks for updates on every read. This provides the strongest consistency guarantees, ensuring that all clients see the latest committed data. However, it has the most overhead. This setting is suitable when consistency matters more than having high QPS.
|
||||
3. **Custom interval (Eventual consistency)**: The database checks for updates at a custom interval, such as every 5 seconds. This provides eventual consistency, allowing for some lag between write and read operations. Performance wise, this is a middle ground between strong consistency and no consistency check. This setting is suitable for applications where immediate consistency is not critical, but clients should see updated data eventually.
|
||||
1. **Unset**: The database does not check for updates to tables made by other processes. This setting is suitable for applications where the data does not change during the lifetime of the table reference.
|
||||
2. **Zero seconds (Strong consistency)**: The database checks for updates on every read. This provides the strongest consistency guarantees, ensuring that all clients see the latest committed data. However, it has the most overhead. This setting is suitable when consistency matters more than having high QPS. For best performance, combine this setting with the storage option `new_table_enable_v2_manifest_paths` set to `true`.
|
||||
3. **Custom interval (Eventual consistency, the default)**: The database checks for updates at a custom interval. By default, this is every 5 seconds. This provides eventual consistency, allowing for some lag between write and read operations. Performance wise, this is a middle ground between strong consistency and no consistency check. This setting is suitable for applications where immediate consistency is not critical, but clients should see updated data eventually.
|
||||
|
||||
You can always force a synchronization by calling `checkout_latest()` / `checkoutLatest()` on a table.
|
||||
|
||||
!!! tip "Consistency in LanceDB Cloud"
|
||||
|
||||
@@ -1063,7 +1043,21 @@ There are three possible settings for `read_consistency_interval`:
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:table_async_eventual_consistency"
|
||||
```
|
||||
|
||||
By default, a `Table` will never check for updates from other writers. To manually check for updates you can use `checkout_latest`:
|
||||
For no consistency, use `None`:
|
||||
|
||||
=== "Sync API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:table_no_consistency"
|
||||
```
|
||||
|
||||
=== "Async API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:table_async_no_consistency"
|
||||
```
|
||||
|
||||
To manually check for updates you can use `checkout_latest`:
|
||||
|
||||
=== "Sync API"
|
||||
|
||||
@@ -1081,15 +1075,25 @@ There are three possible settings for `read_consistency_interval`:
|
||||
To set strong consistency, use `0`:
|
||||
|
||||
```ts
|
||||
const db = await lancedb.connect({ uri: "./.lancedb", readConsistencyInterval: 0 });
|
||||
const tbl = await db.openTable("my_table");
|
||||
--8<-- "nodejs/examples/basic.test.ts:table_strong_consistency"
|
||||
```
|
||||
|
||||
For eventual consistency, specify the update interval as seconds:
|
||||
|
||||
```ts
|
||||
const db = await lancedb.connect({ uri: "./.lancedb", readConsistencyInterval: 5 });
|
||||
const tbl = await db.openTable("my_table");
|
||||
--8<-- "nodejs/examples/basic.test.ts:table_eventual_consistency"
|
||||
```
|
||||
|
||||
For no consistency, use `null`:
|
||||
|
||||
```ts
|
||||
--8<-- "nodejs/examples/basic.test.ts:table_no_consistency"
|
||||
```
|
||||
|
||||
To manually check for updates you can use `checkoutLatest`:
|
||||
|
||||
```ts
|
||||
--8<-- "nodejs/examples/basic.test.ts:table_checkout_latest"
|
||||
```
|
||||
|
||||
<!-- Node doesn't yet support the version time travel: https://github.com/lancedb/lancedb/issues/1007
|
||||
|
||||
@@ -23,7 +23,7 @@ LanceDB **OSS** is an **open-source**, batteries-included embedded vector databa
|
||||
|
||||
LanceDB **Cloud** is a SaaS (software-as-a-service) solution that runs serverless in the cloud, making the storage clearly separated from compute. It's designed to be cost-effective and highly scalable without breaking the bank. LanceDB Cloud is currently in private beta with general availability coming soon, but you can apply for early access with the private beta release by signing up below.
|
||||
|
||||
[Try out LanceDB Cloud](https://noteforms.com/forms/lancedb-mailing-list-cloud-kty1o5?notionforms=1&utm_source=notionforms){ .md-button .md-button--primary }
|
||||
[Try out LanceDB Cloud (Public Beta) Now](https://cloud.lancedb.com){ .md-button .md-button--primary }
|
||||
|
||||
## Why use LanceDB?
|
||||
|
||||
|
||||
75
docs/src/js/classes/BoostQuery.md
Normal file
75
docs/src/js/classes/BoostQuery.md
Normal file
@@ -0,0 +1,75 @@
|
||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||
|
||||
***
|
||||
|
||||
[@lancedb/lancedb](../globals.md) / BoostQuery
|
||||
|
||||
# Class: BoostQuery
|
||||
|
||||
Represents a full-text query interface.
|
||||
This interface defines the structure and behavior for full-text queries,
|
||||
including methods to retrieve the query type and convert the query to a dictionary format.
|
||||
|
||||
## Implements
|
||||
|
||||
- [`FullTextQuery`](../interfaces/FullTextQuery.md)
|
||||
|
||||
## Constructors
|
||||
|
||||
### new BoostQuery()
|
||||
|
||||
```ts
|
||||
new BoostQuery(
|
||||
positive,
|
||||
negative,
|
||||
negativeBoost): BoostQuery
|
||||
```
|
||||
|
||||
Creates an instance of BoostQuery.
|
||||
|
||||
#### Parameters
|
||||
|
||||
* **positive**: [`FullTextQuery`](../interfaces/FullTextQuery.md)
|
||||
The positive query that boosts the relevance score.
|
||||
|
||||
* **negative**: [`FullTextQuery`](../interfaces/FullTextQuery.md)
|
||||
The negative query that reduces the relevance score.
|
||||
|
||||
* **negativeBoost**: `number`
|
||||
The factor by which the negative query reduces the score.
|
||||
|
||||
#### Returns
|
||||
|
||||
[`BoostQuery`](BoostQuery.md)
|
||||
|
||||
## Methods
|
||||
|
||||
### queryType()
|
||||
|
||||
```ts
|
||||
queryType(): FullTextQueryType
|
||||
```
|
||||
|
||||
#### Returns
|
||||
|
||||
[`FullTextQueryType`](../enumerations/FullTextQueryType.md)
|
||||
|
||||
#### Implementation of
|
||||
|
||||
[`FullTextQuery`](../interfaces/FullTextQuery.md).[`queryType`](../interfaces/FullTextQuery.md#querytype)
|
||||
|
||||
***
|
||||
|
||||
### toDict()
|
||||
|
||||
```ts
|
||||
toDict(): Record<string, unknown>
|
||||
```
|
||||
|
||||
#### Returns
|
||||
|
||||
`Record`<`string`, `unknown`>
|
||||
|
||||
#### Implementation of
|
||||
|
||||
[`FullTextQuery`](../interfaces/FullTextQuery.md).[`toDict`](../interfaces/FullTextQuery.md#todict)
|
||||
83
docs/src/js/classes/MatchQuery.md
Normal file
83
docs/src/js/classes/MatchQuery.md
Normal file
@@ -0,0 +1,83 @@
|
||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||
|
||||
***
|
||||
|
||||
[@lancedb/lancedb](../globals.md) / MatchQuery
|
||||
|
||||
# Class: MatchQuery
|
||||
|
||||
Represents a full-text query interface.
|
||||
This interface defines the structure and behavior for full-text queries,
|
||||
including methods to retrieve the query type and convert the query to a dictionary format.
|
||||
|
||||
## Implements
|
||||
|
||||
- [`FullTextQuery`](../interfaces/FullTextQuery.md)
|
||||
|
||||
## Constructors
|
||||
|
||||
### new MatchQuery()
|
||||
|
||||
```ts
|
||||
new MatchQuery(
|
||||
query,
|
||||
column,
|
||||
boost,
|
||||
fuzziness,
|
||||
maxExpansions): MatchQuery
|
||||
```
|
||||
|
||||
Creates an instance of MatchQuery.
|
||||
|
||||
#### Parameters
|
||||
|
||||
* **query**: `string`
|
||||
The text query to search for.
|
||||
|
||||
* **column**: `string`
|
||||
The name of the column to search within.
|
||||
|
||||
* **boost**: `number` = `1.0`
|
||||
(Optional) The boost factor to influence the relevance score of this query. Default is `1.0`.
|
||||
|
||||
* **fuzziness**: `number` = `0`
|
||||
(Optional) The allowed edit distance for fuzzy matching. Default is `0`.
|
||||
|
||||
* **maxExpansions**: `number` = `50`
|
||||
(Optional) The maximum number of terms to consider for fuzzy matching. Default is `50`.
|
||||
|
||||
#### Returns
|
||||
|
||||
[`MatchQuery`](MatchQuery.md)
|
||||
|
||||
## Methods
|
||||
|
||||
### queryType()
|
||||
|
||||
```ts
|
||||
queryType(): FullTextQueryType
|
||||
```
|
||||
|
||||
#### Returns
|
||||
|
||||
[`FullTextQueryType`](../enumerations/FullTextQueryType.md)
|
||||
|
||||
#### Implementation of
|
||||
|
||||
[`FullTextQuery`](../interfaces/FullTextQuery.md).[`queryType`](../interfaces/FullTextQuery.md#querytype)
|
||||
|
||||
***
|
||||
|
||||
### toDict()
|
||||
|
||||
```ts
|
||||
toDict(): Record<string, unknown>
|
||||
```
|
||||
|
||||
#### Returns
|
||||
|
||||
`Record`<`string`, `unknown`>
|
||||
|
||||
#### Implementation of
|
||||
|
||||
[`FullTextQuery`](../interfaces/FullTextQuery.md).[`toDict`](../interfaces/FullTextQuery.md#todict)
|
||||
77
docs/src/js/classes/MultiMatchQuery.md
Normal file
77
docs/src/js/classes/MultiMatchQuery.md
Normal file
@@ -0,0 +1,77 @@
|
||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||
|
||||
***
|
||||
|
||||
[@lancedb/lancedb](../globals.md) / MultiMatchQuery
|
||||
|
||||
# Class: MultiMatchQuery
|
||||
|
||||
Represents a full-text query interface.
|
||||
This interface defines the structure and behavior for full-text queries,
|
||||
including methods to retrieve the query type and convert the query to a dictionary format.
|
||||
|
||||
## Implements
|
||||
|
||||
- [`FullTextQuery`](../interfaces/FullTextQuery.md)
|
||||
|
||||
## Constructors
|
||||
|
||||
### new MultiMatchQuery()
|
||||
|
||||
```ts
|
||||
new MultiMatchQuery(
|
||||
query,
|
||||
columns,
|
||||
boosts): MultiMatchQuery
|
||||
```
|
||||
|
||||
Creates an instance of MultiMatchQuery.
|
||||
|
||||
#### Parameters
|
||||
|
||||
* **query**: `string`
|
||||
The text query to search for across multiple columns.
|
||||
|
||||
* **columns**: `string`[]
|
||||
An array of column names to search within.
|
||||
|
||||
* **boosts**: `number`[] = `...`
|
||||
(Optional) An array of boost factors corresponding to each column. Default is an array of 1.0 for each column.
|
||||
The `boosts` array should have the same length as `columns`. If not provided, all columns will have a default boost of 1.0.
|
||||
If the length of `boosts` is less than `columns`, it will be padded with 1.0s.
|
||||
|
||||
#### Returns
|
||||
|
||||
[`MultiMatchQuery`](MultiMatchQuery.md)
|
||||
|
||||
## Methods
|
||||
|
||||
### queryType()
|
||||
|
||||
```ts
|
||||
queryType(): FullTextQueryType
|
||||
```
|
||||
|
||||
#### Returns
|
||||
|
||||
[`FullTextQueryType`](../enumerations/FullTextQueryType.md)
|
||||
|
||||
#### Implementation of
|
||||
|
||||
[`FullTextQuery`](../interfaces/FullTextQuery.md).[`queryType`](../interfaces/FullTextQuery.md#querytype)
|
||||
|
||||
***
|
||||
|
||||
### toDict()
|
||||
|
||||
```ts
|
||||
toDict(): Record<string, unknown>
|
||||
```
|
||||
|
||||
#### Returns
|
||||
|
||||
`Record`<`string`, `unknown`>
|
||||
|
||||
#### Implementation of
|
||||
|
||||
[`FullTextQuery`](../interfaces/FullTextQuery.md).[`toDict`](../interfaces/FullTextQuery.md#todict)
|
||||
69
docs/src/js/classes/PhraseQuery.md
Normal file
69
docs/src/js/classes/PhraseQuery.md
Normal file
@@ -0,0 +1,69 @@
|
||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||
|
||||
***
|
||||
|
||||
[@lancedb/lancedb](../globals.md) / PhraseQuery
|
||||
|
||||
# Class: PhraseQuery
|
||||
|
||||
Represents a full-text query interface.
|
||||
This interface defines the structure and behavior for full-text queries,
|
||||
including methods to retrieve the query type and convert the query to a dictionary format.
|
||||
|
||||
## Implements
|
||||
|
||||
- [`FullTextQuery`](../interfaces/FullTextQuery.md)
|
||||
|
||||
## Constructors
|
||||
|
||||
### new PhraseQuery()
|
||||
|
||||
```ts
|
||||
new PhraseQuery(query, column): PhraseQuery
|
||||
```
|
||||
|
||||
Creates an instance of `PhraseQuery`.
|
||||
|
||||
#### Parameters
|
||||
|
||||
* **query**: `string`
|
||||
The phrase to search for in the specified column.
|
||||
|
||||
* **column**: `string`
|
||||
The name of the column to search within.
|
||||
|
||||
#### Returns
|
||||
|
||||
[`PhraseQuery`](PhraseQuery.md)
|
||||
|
||||
## Methods
|
||||
|
||||
### queryType()
|
||||
|
||||
```ts
|
||||
queryType(): FullTextQueryType
|
||||
```
|
||||
|
||||
#### Returns
|
||||
|
||||
[`FullTextQueryType`](../enumerations/FullTextQueryType.md)
|
||||
|
||||
#### Implementation of
|
||||
|
||||
[`FullTextQuery`](../interfaces/FullTextQuery.md).[`queryType`](../interfaces/FullTextQuery.md#querytype)
|
||||
|
||||
***
|
||||
|
||||
### toDict()
|
||||
|
||||
```ts
|
||||
toDict(): Record<string, unknown>
|
||||
```
|
||||
|
||||
#### Returns
|
||||
|
||||
`Record`<`string`, `unknown`>
|
||||
|
||||
#### Implementation of
|
||||
|
||||
[`FullTextQuery`](../interfaces/FullTextQuery.md).[`toDict`](../interfaces/FullTextQuery.md#todict)
|
||||
@@ -30,6 +30,53 @@ protected inner: Query | Promise<Query>;
|
||||
|
||||
## Methods
|
||||
|
||||
### analyzePlan()
|
||||
|
||||
```ts
|
||||
analyzePlan(): Promise<string>
|
||||
```
|
||||
|
||||
Executes the query and returns the physical query plan annotated with runtime metrics.
|
||||
|
||||
This is useful for debugging and performance analysis, as it shows how the query was executed
|
||||
and includes metrics such as elapsed time, rows processed, and I/O statistics.
|
||||
|
||||
#### Returns
|
||||
|
||||
`Promise`<`string`>
|
||||
|
||||
A query execution plan with runtime metrics for each step.
|
||||
|
||||
#### Example
|
||||
|
||||
```ts
|
||||
import * as lancedb from "@lancedb/lancedb"
|
||||
|
||||
const db = await lancedb.connect("./.lancedb");
|
||||
const table = await db.createTable("my_table", [
|
||||
{ vector: [1.1, 0.9], id: "1" },
|
||||
]);
|
||||
|
||||
const plan = await table.query().nearestTo([0.5, 0.2]).analyzePlan();
|
||||
|
||||
Example output (with runtime metrics inlined):
|
||||
AnalyzeExec verbose=true, metrics=[]
|
||||
ProjectionExec: expr=[id@3 as id, vector@0 as vector, _distance@2 as _distance], metrics=[output_rows=1, elapsed_compute=3.292µs]
|
||||
Take: columns="vector, _rowid, _distance, (id)", metrics=[output_rows=1, elapsed_compute=66.001µs, batches_processed=1, bytes_read=8, iops=1, requests=1]
|
||||
CoalesceBatchesExec: target_batch_size=1024, metrics=[output_rows=1, elapsed_compute=3.333µs]
|
||||
GlobalLimitExec: skip=0, fetch=10, metrics=[output_rows=1, elapsed_compute=167ns]
|
||||
FilterExec: _distance@2 IS NOT NULL, metrics=[output_rows=1, elapsed_compute=8.542µs]
|
||||
SortExec: TopK(fetch=10), expr=[_distance@2 ASC NULLS LAST], metrics=[output_rows=1, elapsed_compute=63.25µs, row_replacements=1]
|
||||
KNNVectorDistance: metric=l2, metrics=[output_rows=1, elapsed_compute=114.333µs, output_batches=1]
|
||||
LanceScan: uri=/path/to/data, projection=[vector], row_id=true, row_addr=false, ordered=false, metrics=[output_rows=1, elapsed_compute=103.626µs, bytes_read=549, iops=2, requests=2]
|
||||
```
|
||||
|
||||
#### Inherited from
|
||||
|
||||
[`QueryBase`](QueryBase.md).[`analyzePlan`](QueryBase.md#analyzeplan)
|
||||
|
||||
***
|
||||
|
||||
### execute()
|
||||
|
||||
```ts
|
||||
@@ -159,7 +206,7 @@ fullTextSearch(query, options?): this
|
||||
|
||||
#### Parameters
|
||||
|
||||
* **query**: `string`
|
||||
* **query**: `string` \| [`FullTextQuery`](../interfaces/FullTextQuery.md)
|
||||
|
||||
* **options?**: `Partial`<[`FullTextSearchOptions`](../interfaces/FullTextSearchOptions.md)>
|
||||
|
||||
@@ -262,7 +309,7 @@ nearestToText(query, columns?): Query
|
||||
|
||||
#### Parameters
|
||||
|
||||
* **query**: `string`
|
||||
* **query**: `string` \| [`FullTextQuery`](../interfaces/FullTextQuery.md)
|
||||
|
||||
* **columns?**: `string`[]
|
||||
|
||||
|
||||
@@ -36,6 +36,49 @@ protected inner: NativeQueryType | Promise<NativeQueryType>;
|
||||
|
||||
## Methods
|
||||
|
||||
### analyzePlan()
|
||||
|
||||
```ts
|
||||
analyzePlan(): Promise<string>
|
||||
```
|
||||
|
||||
Executes the query and returns the physical query plan annotated with runtime metrics.
|
||||
|
||||
This is useful for debugging and performance analysis, as it shows how the query was executed
|
||||
and includes metrics such as elapsed time, rows processed, and I/O statistics.
|
||||
|
||||
#### Returns
|
||||
|
||||
`Promise`<`string`>
|
||||
|
||||
A query execution plan with runtime metrics for each step.
|
||||
|
||||
#### Example
|
||||
|
||||
```ts
|
||||
import * as lancedb from "@lancedb/lancedb"
|
||||
|
||||
const db = await lancedb.connect("./.lancedb");
|
||||
const table = await db.createTable("my_table", [
|
||||
{ vector: [1.1, 0.9], id: "1" },
|
||||
]);
|
||||
|
||||
const plan = await table.query().nearestTo([0.5, 0.2]).analyzePlan();
|
||||
|
||||
Example output (with runtime metrics inlined):
|
||||
AnalyzeExec verbose=true, metrics=[]
|
||||
ProjectionExec: expr=[id@3 as id, vector@0 as vector, _distance@2 as _distance], metrics=[output_rows=1, elapsed_compute=3.292µs]
|
||||
Take: columns="vector, _rowid, _distance, (id)", metrics=[output_rows=1, elapsed_compute=66.001µs, batches_processed=1, bytes_read=8, iops=1, requests=1]
|
||||
CoalesceBatchesExec: target_batch_size=1024, metrics=[output_rows=1, elapsed_compute=3.333µs]
|
||||
GlobalLimitExec: skip=0, fetch=10, metrics=[output_rows=1, elapsed_compute=167ns]
|
||||
FilterExec: _distance@2 IS NOT NULL, metrics=[output_rows=1, elapsed_compute=8.542µs]
|
||||
SortExec: TopK(fetch=10), expr=[_distance@2 ASC NULLS LAST], metrics=[output_rows=1, elapsed_compute=63.25µs, row_replacements=1]
|
||||
KNNVectorDistance: metric=l2, metrics=[output_rows=1, elapsed_compute=114.333µs, output_batches=1]
|
||||
LanceScan: uri=/path/to/data, projection=[vector], row_id=true, row_addr=false, ordered=false, metrics=[output_rows=1, elapsed_compute=103.626µs, bytes_read=549, iops=2, requests=2]
|
||||
```
|
||||
|
||||
***
|
||||
|
||||
### execute()
|
||||
|
||||
```ts
|
||||
@@ -149,7 +192,7 @@ fullTextSearch(query, options?): this
|
||||
|
||||
#### Parameters
|
||||
|
||||
* **query**: `string`
|
||||
* **query**: `string` \| [`FullTextQuery`](../interfaces/FullTextQuery.md)
|
||||
|
||||
* **options?**: `Partial`<[`FullTextSearchOptions`](../interfaces/FullTextSearchOptions.md)>
|
||||
|
||||
|
||||
@@ -48,6 +48,53 @@ addQueryVector(vector): VectorQuery
|
||||
|
||||
***
|
||||
|
||||
### analyzePlan()
|
||||
|
||||
```ts
|
||||
analyzePlan(): Promise<string>
|
||||
```
|
||||
|
||||
Executes the query and returns the physical query plan annotated with runtime metrics.
|
||||
|
||||
This is useful for debugging and performance analysis, as it shows how the query was executed
|
||||
and includes metrics such as elapsed time, rows processed, and I/O statistics.
|
||||
|
||||
#### Returns
|
||||
|
||||
`Promise`<`string`>
|
||||
|
||||
A query execution plan with runtime metrics for each step.
|
||||
|
||||
#### Example
|
||||
|
||||
```ts
|
||||
import * as lancedb from "@lancedb/lancedb"
|
||||
|
||||
const db = await lancedb.connect("./.lancedb");
|
||||
const table = await db.createTable("my_table", [
|
||||
{ vector: [1.1, 0.9], id: "1" },
|
||||
]);
|
||||
|
||||
const plan = await table.query().nearestTo([0.5, 0.2]).analyzePlan();
|
||||
|
||||
Example output (with runtime metrics inlined):
|
||||
AnalyzeExec verbose=true, metrics=[]
|
||||
ProjectionExec: expr=[id@3 as id, vector@0 as vector, _distance@2 as _distance], metrics=[output_rows=1, elapsed_compute=3.292µs]
|
||||
Take: columns="vector, _rowid, _distance, (id)", metrics=[output_rows=1, elapsed_compute=66.001µs, batches_processed=1, bytes_read=8, iops=1, requests=1]
|
||||
CoalesceBatchesExec: target_batch_size=1024, metrics=[output_rows=1, elapsed_compute=3.333µs]
|
||||
GlobalLimitExec: skip=0, fetch=10, metrics=[output_rows=1, elapsed_compute=167ns]
|
||||
FilterExec: _distance@2 IS NOT NULL, metrics=[output_rows=1, elapsed_compute=8.542µs]
|
||||
SortExec: TopK(fetch=10), expr=[_distance@2 ASC NULLS LAST], metrics=[output_rows=1, elapsed_compute=63.25µs, row_replacements=1]
|
||||
KNNVectorDistance: metric=l2, metrics=[output_rows=1, elapsed_compute=114.333µs, output_batches=1]
|
||||
LanceScan: uri=/path/to/data, projection=[vector], row_id=true, row_addr=false, ordered=false, metrics=[output_rows=1, elapsed_compute=103.626µs, bytes_read=549, iops=2, requests=2]
|
||||
```
|
||||
|
||||
#### Inherited from
|
||||
|
||||
[`QueryBase`](QueryBase.md).[`analyzePlan`](QueryBase.md#analyzeplan)
|
||||
|
||||
***
|
||||
|
||||
### bypassVectorIndex()
|
||||
|
||||
```ts
|
||||
@@ -300,7 +347,7 @@ fullTextSearch(query, options?): this
|
||||
|
||||
#### Parameters
|
||||
|
||||
* **query**: `string`
|
||||
* **query**: `string` \| [`FullTextQuery`](../interfaces/FullTextQuery.md)
|
||||
|
||||
* **options?**: `Partial`<[`FullTextSearchOptions`](../interfaces/FullTextSearchOptions.md)>
|
||||
|
||||
|
||||
46
docs/src/js/enumerations/FullTextQueryType.md
Normal file
46
docs/src/js/enumerations/FullTextQueryType.md
Normal file
@@ -0,0 +1,46 @@
|
||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||
|
||||
***
|
||||
|
||||
[@lancedb/lancedb](../globals.md) / FullTextQueryType
|
||||
|
||||
# Enumeration: FullTextQueryType
|
||||
|
||||
Enum representing the types of full-text queries supported.
|
||||
|
||||
- `Match`: Performs a full-text search for terms in the query string.
|
||||
- `MatchPhrase`: Searches for an exact phrase match in the text.
|
||||
- `Boost`: Boosts the relevance score of specific terms in the query.
|
||||
- `MultiMatch`: Searches across multiple fields for the query terms.
|
||||
|
||||
## Enumeration Members
|
||||
|
||||
### Boost
|
||||
|
||||
```ts
|
||||
Boost: "boost";
|
||||
```
|
||||
|
||||
***
|
||||
|
||||
### Match
|
||||
|
||||
```ts
|
||||
Match: "match";
|
||||
```
|
||||
|
||||
***
|
||||
|
||||
### MatchPhrase
|
||||
|
||||
```ts
|
||||
MatchPhrase: "match_phrase";
|
||||
```
|
||||
|
||||
***
|
||||
|
||||
### MultiMatch
|
||||
|
||||
```ts
|
||||
MultiMatch: "multi_match";
|
||||
```
|
||||
@@ -9,12 +9,20 @@
|
||||
- [embedding](namespaces/embedding/README.md)
|
||||
- [rerankers](namespaces/rerankers/README.md)
|
||||
|
||||
## Enumerations
|
||||
|
||||
- [FullTextQueryType](enumerations/FullTextQueryType.md)
|
||||
|
||||
## Classes
|
||||
|
||||
- [BoostQuery](classes/BoostQuery.md)
|
||||
- [Connection](classes/Connection.md)
|
||||
- [Index](classes/Index.md)
|
||||
- [MakeArrowTableOptions](classes/MakeArrowTableOptions.md)
|
||||
- [MatchQuery](classes/MatchQuery.md)
|
||||
- [MergeInsertBuilder](classes/MergeInsertBuilder.md)
|
||||
- [MultiMatchQuery](classes/MultiMatchQuery.md)
|
||||
- [PhraseQuery](classes/PhraseQuery.md)
|
||||
- [Query](classes/Query.md)
|
||||
- [QueryBase](classes/QueryBase.md)
|
||||
- [RecordBatchIterator](classes/RecordBatchIterator.md)
|
||||
@@ -33,6 +41,7 @@
|
||||
- [CreateTableOptions](interfaces/CreateTableOptions.md)
|
||||
- [ExecutableQuery](interfaces/ExecutableQuery.md)
|
||||
- [FtsOptions](interfaces/FtsOptions.md)
|
||||
- [FullTextQuery](interfaces/FullTextQuery.md)
|
||||
- [FullTextSearchOptions](interfaces/FullTextSearchOptions.md)
|
||||
- [HnswPqOptions](interfaces/HnswPqOptions.md)
|
||||
- [HnswSqOptions](interfaces/HnswSqOptions.md)
|
||||
|
||||
@@ -44,7 +44,7 @@ for testing purposes.
|
||||
### readConsistencyInterval?
|
||||
|
||||
```ts
|
||||
optional readConsistencyInterval: number;
|
||||
optional readConsistencyInterval: null | number;
|
||||
```
|
||||
|
||||
(For LanceDB OSS only): The interval, in seconds, at which to check for
|
||||
|
||||
35
docs/src/js/interfaces/FullTextQuery.md
Normal file
35
docs/src/js/interfaces/FullTextQuery.md
Normal file
@@ -0,0 +1,35 @@
|
||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||
|
||||
***
|
||||
|
||||
[@lancedb/lancedb](../globals.md) / FullTextQuery
|
||||
|
||||
# Interface: FullTextQuery
|
||||
|
||||
Represents a full-text query interface.
|
||||
This interface defines the structure and behavior for full-text queries,
|
||||
including methods to retrieve the query type and convert the query to a dictionary format.
|
||||
|
||||
## Methods
|
||||
|
||||
### queryType()
|
||||
|
||||
```ts
|
||||
queryType(): FullTextQueryType
|
||||
```
|
||||
|
||||
#### Returns
|
||||
|
||||
[`FullTextQueryType`](../enumerations/FullTextQueryType.md)
|
||||
|
||||
***
|
||||
|
||||
### toDict()
|
||||
|
||||
```ts
|
||||
toDict(): Record<string, unknown>
|
||||
```
|
||||
|
||||
#### Returns
|
||||
|
||||
`Record`<`string`, `unknown`>
|
||||
@@ -20,3 +20,13 @@ The maximum number of rows to return in a single batch
|
||||
|
||||
Batches may have fewer rows if the underlying data is stored
|
||||
in smaller chunks.
|
||||
|
||||
***
|
||||
|
||||
### timeoutMs?
|
||||
|
||||
```ts
|
||||
optional timeoutMs: number;
|
||||
```
|
||||
|
||||
Timeout for query execution in milliseconds
|
||||
|
||||
@@ -8,6 +8,11 @@ For trouble shooting, the best place to ask is in our Discord, under the relevan
|
||||
language channel. By asking in the language-specific channel, it makes it more
|
||||
likely that someone who knows the answer will see your question.
|
||||
|
||||
## Common issues
|
||||
|
||||
* Multiprocessing with `fork` is not supported. You should use `spawn` instead.
|
||||
* Data returned by queries may not reflect the most recent writes, depending on configuration. LanceDB uses eventual consistency by default. See [consistency](/docs/src/guides/tables.md#consistency) for more information.
|
||||
|
||||
## Enabling logging
|
||||
|
||||
To provide more information, especially for LanceDB Cloud related issues, enable
|
||||
@@ -31,3 +36,9 @@ print the resolved query plan. You can use the `explain_plan` method to do this:
|
||||
* Python Sync: [LanceQueryBuilder.explain_plan][lancedb.query.LanceQueryBuilder.explain_plan]
|
||||
* Python Async: [AsyncQueryBase.explain_plan][lancedb.query.AsyncQueryBase.explain_plan]
|
||||
* Node @lancedb/lancedb: [LanceQueryBuilder.explainPlan](/lancedb/js/classes/QueryBase/#explainplan)
|
||||
|
||||
To understand how a query was actually executed—including metrics like execution time, number of rows processed, I/O stats, and more—use the analyze_plan method. This executes the query and returns a physical execution plan annotated with runtime metrics, making it especially helpful for performance tuning and debugging.
|
||||
|
||||
* Python Sync: [LanceQueryBuilder.analyze_plan][lancedb.query.LanceQueryBuilder.analyze_plan]
|
||||
* Python Async: [AsyncQueryBase.analyze_plan][lancedb.query.AsyncQueryBase.analyze_plan]
|
||||
* Node @lancedb/lancedb: [LanceQueryBuilder.analyzePlan](/lancedb/js/classes/QueryBase/#analyzePlan)
|
||||
|
||||
3
java/.gitignore
vendored
Normal file
3
java/.gitignore
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
*.iml
|
||||
.java-version
|
||||
|
||||
@@ -8,13 +8,16 @@
|
||||
<parent>
|
||||
<groupId>com.lancedb</groupId>
|
||||
<artifactId>lancedb-parent</artifactId>
|
||||
<version>0.18.1-final.0</version>
|
||||
<version>0.19.0-beta.5</version>
|
||||
<relativePath>../pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
<artifactId>lancedb-core</artifactId>
|
||||
<name>LanceDB Core</name>
|
||||
<packaging>jar</packaging>
|
||||
<properties>
|
||||
<rust.release.build>false</rust.release.build>
|
||||
</properties>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
@@ -68,7 +71,7 @@
|
||||
</goals>
|
||||
<configuration>
|
||||
<path>lancedb-jni</path>
|
||||
<release>true</release>
|
||||
<release>${rust.release.build}</release>
|
||||
<!-- Copy native libraries to target/classes for runtime access -->
|
||||
<copyTo>${project.build.directory}/classes/nativelib</copyTo>
|
||||
<copyWithPlatformDir>true</copyWithPlatformDir>
|
||||
|
||||
@@ -1,16 +1,25 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
/*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package com.lancedb.lancedb;
|
||||
|
||||
import io.questdb.jar.jni.JarJniLoader;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
|
||||
/**
|
||||
* Represents LanceDB database.
|
||||
*/
|
||||
/** Represents LanceDB database. */
|
||||
public class Connection implements Closeable {
|
||||
static {
|
||||
JarJniLoader.loadLib(Connection.class, "/nativelib", "lancedb_jni");
|
||||
@@ -18,14 +27,11 @@ public class Connection implements Closeable {
|
||||
|
||||
private long nativeConnectionHandle;
|
||||
|
||||
/**
|
||||
* Connect to a LanceDB instance.
|
||||
*/
|
||||
/** Connect to a LanceDB instance. */
|
||||
public static native Connection connect(String uri);
|
||||
|
||||
/**
|
||||
* Get the names of all tables in the database. The names are sorted in
|
||||
* ascending order.
|
||||
* Get the names of all tables in the database. The names are sorted in ascending order.
|
||||
*
|
||||
* @return the table names
|
||||
*/
|
||||
@@ -34,8 +40,7 @@ public class Connection implements Closeable {
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the names of filtered tables in the database. The names are sorted in
|
||||
* ascending order.
|
||||
* Get the names of filtered tables in the database. The names are sorted in ascending order.
|
||||
*
|
||||
* @param limit The number of results to return.
|
||||
* @return the table names
|
||||
@@ -45,12 +50,11 @@ public class Connection implements Closeable {
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the names of filtered tables in the database. The names are sorted in
|
||||
* ascending order.
|
||||
* Get the names of filtered tables in the database. The names are sorted in ascending order.
|
||||
*
|
||||
* @param startAfter If present, only return names that come lexicographically after the supplied
|
||||
* value. This can be combined with limit to implement pagination
|
||||
* by setting this to the last table name from the previous page.
|
||||
* value. This can be combined with limit to implement pagination by setting this to the last
|
||||
* table name from the previous page.
|
||||
* @return the table names
|
||||
*/
|
||||
public List<String> tableNames(String startAfter) {
|
||||
@@ -58,12 +62,11 @@ public class Connection implements Closeable {
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the names of filtered tables in the database. The names are sorted in
|
||||
* ascending order.
|
||||
* Get the names of filtered tables in the database. The names are sorted in ascending order.
|
||||
*
|
||||
* @param startAfter If present, only return names that come lexicographically after the supplied
|
||||
* value. This can be combined with limit to implement pagination
|
||||
* by setting this to the last table name from the previous page.
|
||||
* value. This can be combined with limit to implement pagination by setting this to the last
|
||||
* table name from the previous page.
|
||||
* @param limit The number of results to return.
|
||||
* @return the table names
|
||||
*/
|
||||
@@ -72,22 +75,19 @@ public class Connection implements Closeable {
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the names of filtered tables in the database. The names are sorted in
|
||||
* ascending order.
|
||||
* Get the names of filtered tables in the database. The names are sorted in ascending order.
|
||||
*
|
||||
* @param startAfter If present, only return names that come lexicographically after the supplied
|
||||
* value. This can be combined with limit to implement pagination
|
||||
* by setting this to the last table name from the previous page.
|
||||
* value. This can be combined with limit to implement pagination by setting this to the last
|
||||
* table name from the previous page.
|
||||
* @param limit The number of results to return.
|
||||
* @return the table names
|
||||
*/
|
||||
public native List<String> tableNames(
|
||||
Optional<String> startAfter, Optional<Integer> limit);
|
||||
public native List<String> tableNames(Optional<String> startAfter, Optional<Integer> limit);
|
||||
|
||||
/**
|
||||
* Closes this connection and releases any system resources associated with it. If
|
||||
* the connection is
|
||||
* already closed, then invoking this method has no effect.
|
||||
* Closes this connection and releases any system resources associated with it. If the connection
|
||||
* is already closed, then invoking this method has no effect.
|
||||
*/
|
||||
@Override
|
||||
public void close() {
|
||||
@@ -98,8 +98,7 @@ public class Connection implements Closeable {
|
||||
}
|
||||
|
||||
/**
|
||||
* Native method to release the Lance connection resources associated with the
|
||||
* given handle.
|
||||
* Native method to release the Lance connection resources associated with the given handle.
|
||||
*
|
||||
* @param handle The native handle to the connection resource.
|
||||
*/
|
||||
|
||||
@@ -1,27 +1,35 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
/*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package com.lancedb.lancedb;
|
||||
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
||||
|
||||
import java.nio.file.Path;
|
||||
import java.util.List;
|
||||
import java.net.URL;
|
||||
import org.junit.jupiter.api.BeforeAll;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.junit.jupiter.api.io.TempDir;
|
||||
|
||||
import java.net.URL;
|
||||
import java.nio.file.Path;
|
||||
import java.util.List;
|
||||
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
||||
|
||||
public class ConnectionTest {
|
||||
private static final String[] TABLE_NAMES = {
|
||||
"dataset_version",
|
||||
"new_empty_dataset",
|
||||
"test",
|
||||
"write_stream"
|
||||
"dataset_version", "new_empty_dataset", "test", "write_stream"
|
||||
};
|
||||
|
||||
@TempDir
|
||||
static Path tempDir; // Temporary directory for the tests
|
||||
@TempDir static Path tempDir; // Temporary directory for the tests
|
||||
private static URL lanceDbURL;
|
||||
|
||||
@BeforeAll
|
||||
@@ -53,18 +61,21 @@ public class ConnectionTest {
|
||||
@Test
|
||||
void tableNamesStartAfter() {
|
||||
try (Connection conn = Connection.connect(lanceDbURL.toString())) {
|
||||
assertTableNamesStartAfter(conn, TABLE_NAMES[0], 3, TABLE_NAMES[1], TABLE_NAMES[2], TABLE_NAMES[3]);
|
||||
assertTableNamesStartAfter(
|
||||
conn, TABLE_NAMES[0], 3, TABLE_NAMES[1], TABLE_NAMES[2], TABLE_NAMES[3]);
|
||||
assertTableNamesStartAfter(conn, TABLE_NAMES[1], 2, TABLE_NAMES[2], TABLE_NAMES[3]);
|
||||
assertTableNamesStartAfter(conn, TABLE_NAMES[2], 1, TABLE_NAMES[3]);
|
||||
assertTableNamesStartAfter(conn, TABLE_NAMES[3], 0);
|
||||
assertTableNamesStartAfter(conn, "a_dataset", 4, TABLE_NAMES[0], TABLE_NAMES[1], TABLE_NAMES[2], TABLE_NAMES[3]);
|
||||
assertTableNamesStartAfter(
|
||||
conn, "a_dataset", 4, TABLE_NAMES[0], TABLE_NAMES[1], TABLE_NAMES[2], TABLE_NAMES[3]);
|
||||
assertTableNamesStartAfter(conn, "o_dataset", 2, TABLE_NAMES[2], TABLE_NAMES[3]);
|
||||
assertTableNamesStartAfter(conn, "v_dataset", 1, TABLE_NAMES[3]);
|
||||
assertTableNamesStartAfter(conn, "z_dataset", 0);
|
||||
}
|
||||
}
|
||||
|
||||
private void assertTableNamesStartAfter(Connection conn, String startAfter, int expectedSize, String... expectedNames) {
|
||||
private void assertTableNamesStartAfter(
|
||||
Connection conn, String startAfter, int expectedSize, String... expectedNames) {
|
||||
List<String> tableNames = conn.tableNames(startAfter);
|
||||
assertEquals(expectedSize, tableNames.size());
|
||||
for (int i = 0; i < expectedNames.length; i++) {
|
||||
@@ -74,7 +85,7 @@ public class ConnectionTest {
|
||||
|
||||
@Test
|
||||
void tableNamesLimit() {
|
||||
try (Connection conn = Connection.connect(lanceDbURL.toString())) {
|
||||
try (Connection conn = Connection.connect(lanceDbURL.toString())) {
|
||||
for (int i = 0; i <= TABLE_NAMES.length; i++) {
|
||||
List<String> tableNames = conn.tableNames(i);
|
||||
assertEquals(i, tableNames.size());
|
||||
|
||||
77
java/pom.xml
77
java/pom.xml
@@ -6,7 +6,7 @@
|
||||
|
||||
<groupId>com.lancedb</groupId>
|
||||
<artifactId>lancedb-parent</artifactId>
|
||||
<version>0.18.1-final.0</version>
|
||||
<version>0.19.0-beta.5</version>
|
||||
<packaging>pom</packaging>
|
||||
|
||||
<name>LanceDB Parent</name>
|
||||
@@ -29,6 +29,25 @@
|
||||
<properties>
|
||||
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
|
||||
<arrow.version>15.0.0</arrow.version>
|
||||
<spotless.skip>false</spotless.skip>
|
||||
<spotless.version>2.30.0</spotless.version>
|
||||
<spotless.java.googlejavaformat.version>1.7</spotless.java.googlejavaformat.version>
|
||||
<spotless.delimiter>package</spotless.delimiter>
|
||||
<spotless.license.header>
|
||||
/*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
</spotless.license.header>
|
||||
</properties>
|
||||
|
||||
<modules>
|
||||
@@ -127,7 +146,8 @@
|
||||
<configuration>
|
||||
<configLocation>google_checks.xml</configLocation>
|
||||
<consoleOutput>true</consoleOutput>
|
||||
<failsOnError>true</failsOnError>
|
||||
<failsOnError>false</failsOnError>
|
||||
<failOnViolation>false</failOnViolation>
|
||||
<violationSeverity>warning</violationSeverity>
|
||||
<linkXRef>false</linkXRef>
|
||||
</configuration>
|
||||
@@ -141,6 +161,10 @@
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>com.diffplug.spotless</groupId>
|
||||
<artifactId>spotless-maven-plugin</artifactId>
|
||||
</plugin>
|
||||
</plugins>
|
||||
<pluginManagement>
|
||||
<plugins>
|
||||
@@ -166,7 +190,6 @@
|
||||
<artifactId>maven-surefire-plugin</artifactId>
|
||||
<version>3.2.5</version>
|
||||
<configuration>
|
||||
<argLine>--add-opens=java.base/java.nio=ALL-UNNAMED</argLine>
|
||||
<forkNode
|
||||
implementation="org.apache.maven.plugin.surefire.extensions.SurefireForkNodeFactory" />
|
||||
<useSystemClassLoader>false</useSystemClassLoader>
|
||||
@@ -180,6 +203,54 @@
|
||||
<artifactId>maven-install-plugin</artifactId>
|
||||
<version>2.5.2</version>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>com.diffplug.spotless</groupId>
|
||||
<artifactId>spotless-maven-plugin</artifactId>
|
||||
<version>${spotless.version}</version>
|
||||
<configuration>
|
||||
<skip>${spotless.skip}</skip>
|
||||
<upToDateChecking>
|
||||
<enabled>true</enabled>
|
||||
</upToDateChecking>
|
||||
<java>
|
||||
<includes>
|
||||
<include>src/main/java/**/*.java</include>
|
||||
<include>src/test/java/**/*.java</include>
|
||||
</includes>
|
||||
<googleJavaFormat>
|
||||
<version>${spotless.java.googlejavaformat.version}</version>
|
||||
<style>GOOGLE</style>
|
||||
</googleJavaFormat>
|
||||
|
||||
<importOrder>
|
||||
<order>com.lancedb.lance,,javax,java,\#</order>
|
||||
</importOrder>
|
||||
|
||||
<removeUnusedImports />
|
||||
</java>
|
||||
<scala>
|
||||
<includes>
|
||||
<include>src/main/scala/**/*.scala</include>
|
||||
<include>src/main/scala-*/**/*.scala</include>
|
||||
<include>src/test/scala/**/*.scala</include>
|
||||
<include>src/test/scala-*/**/*.scala</include>
|
||||
</includes>
|
||||
</scala>
|
||||
<licenseHeader>
|
||||
<content>${spotless.license.header}</content>
|
||||
<delimiter>${spotless.delimiter}</delimiter>
|
||||
</licenseHeader>
|
||||
</configuration>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>spotless-check</id>
|
||||
<phase>validate</phase>
|
||||
<goals>
|
||||
<goal>apply</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</pluginManagement>
|
||||
</build>
|
||||
|
||||
51
node/package-lock.json
generated
51
node/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "vectordb",
|
||||
"version": "0.18.1",
|
||||
"version": "0.19.0-beta.5",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "vectordb",
|
||||
"version": "0.18.1",
|
||||
"version": "0.19.0-beta.5",
|
||||
"cpu": [
|
||||
"x64",
|
||||
"arm64"
|
||||
@@ -52,11 +52,11 @@
|
||||
"uuid": "^9.0.0"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"@lancedb/vectordb-darwin-arm64": "0.18.1",
|
||||
"@lancedb/vectordb-darwin-x64": "0.18.1",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.18.1",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.18.1",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.18.1"
|
||||
"@lancedb/vectordb-darwin-arm64": "0.19.0-beta.5",
|
||||
"@lancedb/vectordb-darwin-x64": "0.19.0-beta.5",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.19.0-beta.5",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.19.0-beta.5",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.19.0-beta.5"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@apache-arrow/ts": "^14.0.2",
|
||||
@@ -327,9 +327,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-darwin-arm64": {
|
||||
"version": "0.18.1",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-arm64/-/vectordb-darwin-arm64-0.18.1.tgz",
|
||||
"integrity": "sha512-LAuaIfANAingnc4yxZ142kidY6KDkTzWDVidfG4847MO/eKk029A8zbhxkMHdSyTe1cNTBr7fYDfc7/LtHrhFQ==",
|
||||
"version": "0.19.0-beta.5",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-arm64/-/vectordb-darwin-arm64-0.19.0-beta.5.tgz",
|
||||
"integrity": "sha512-NuJVGaV4b6XgH3dlkCEquvtGM1cY5sIJE5M/LgJ3HYYvAbco/seBQM5AHTV/7CULoPEY9eQeJZOj9fWP5oQLYQ==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
@@ -340,9 +340,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-darwin-x64": {
|
||||
"version": "0.18.1",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-x64/-/vectordb-darwin-x64-0.18.1.tgz",
|
||||
"integrity": "sha512-6weTuzYZNv0z5vX7D6TSjDG47anw9NQ6cqxXTiZp2u99qaiuKj1EVxsRrDNIknspqKQ+YKwOLKwRum5VuLnHkQ==",
|
||||
"version": "0.19.0-beta.5",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-x64/-/vectordb-darwin-x64-0.19.0-beta.5.tgz",
|
||||
"integrity": "sha512-hbadwvQcUgKJfluUHhN+mx+XeFRwTuh9mD0L3Tf3t3BkDTxyHpEG5WNgOpWrh6e1RU6zW54CoCyQuSEaVqGgGw==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
@@ -353,9 +353,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-linux-arm64-gnu": {
|
||||
"version": "0.18.1",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-arm64-gnu/-/vectordb-linux-arm64-gnu-0.18.1.tgz",
|
||||
"integrity": "sha512-MowuP2eVNx9p0Fw6qi9O20liKxUy3c3YugOVs0eo8h1fpbSlXx2bIA40JgyzIjiX0QICIt9IzLCideIZduPc1A==",
|
||||
"version": "0.19.0-beta.5",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-arm64-gnu/-/vectordb-linux-arm64-gnu-0.19.0-beta.5.tgz",
|
||||
"integrity": "sha512-fu/EOYLr3mx76/SP4dEgbq0vSYHfuTf68lVl5/tL6eIb1Purz42l22+jNKLJ/S3Plase2SkXdxyY90K2Y/CvSg==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
@@ -366,9 +366,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-linux-x64-gnu": {
|
||||
"version": "0.18.1",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-x64-gnu/-/vectordb-linux-x64-gnu-0.18.1.tgz",
|
||||
"integrity": "sha512-E9Qtdstr5aTMuRzi4WdKAvQKrNaXuCdiUYph2D4tanawftn3cZVHjbQu63nRphmBKAqzuJ4vcIsYLnFbzM/CtA==",
|
||||
"version": "0.19.0-beta.5",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-x64-gnu/-/vectordb-linux-x64-gnu-0.19.0-beta.5.tgz",
|
||||
"integrity": "sha512-pzb8fl5M8155sc/mEFnKmuh9rCfQohHBlb+j+5qNMe84AyygQ8Me1H3b1h9fOkUPu2Y168zYfuGkjNv4Bjm9eA==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
@@ -379,9 +379,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-win32-x64-msvc": {
|
||||
"version": "0.18.1",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-win32-x64-msvc/-/vectordb-win32-x64-msvc-0.18.1.tgz",
|
||||
"integrity": "sha512-ILXHvq2s28U6I6mv1UK9LRuXGMXKTidztlRGDUeSCLBKQy033xOUnBBIOMQiS1IDsFMcJTGLTvr5nPemKI2NyQ==",
|
||||
"version": "0.19.0-beta.5",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-win32-x64-msvc/-/vectordb-win32-x64-msvc-0.19.0-beta.5.tgz",
|
||||
"integrity": "sha512-5z6BSfTuZYJdDL2wwRrEQlnfluahzaUH2U7vj3i4ik4zaAwvaYcrjmdYCTLRYhFscUqzxd2pVFHbfRYe+maYzA==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
@@ -1184,9 +1184,10 @@
|
||||
}
|
||||
},
|
||||
"node_modules/axios": {
|
||||
"version": "1.7.7",
|
||||
"resolved": "https://registry.npmjs.org/axios/-/axios-1.7.7.tgz",
|
||||
"integrity": "sha512-S4kL7XrjgBmvdGut0sN3yJxqYzrDOnivkBiN0OFs6hLiUam3UPvswUo0kqGyhqUZGEOytHyumEdXsAkgCOUf3Q==",
|
||||
"version": "1.8.4",
|
||||
"resolved": "https://registry.npmjs.org/axios/-/axios-1.8.4.tgz",
|
||||
"integrity": "sha512-eBSYY4Y68NNlHbHBMdeDmKNtDgXWhQsJcGqzO3iLUM0GraQFSS9cVgPX5I9b3lbdFKyYoAEGAZF1DwhTaljNAw==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"follow-redirects": "^1.15.6",
|
||||
"form-data": "^4.0.0",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "vectordb",
|
||||
"version": "0.18.1",
|
||||
"version": "0.19.0-beta.5",
|
||||
"description": " Serverless, low-latency vector database for AI applications",
|
||||
"private": false,
|
||||
"main": "dist/index.js",
|
||||
@@ -89,10 +89,10 @@
|
||||
}
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"@lancedb/vectordb-darwin-x64": "0.18.1",
|
||||
"@lancedb/vectordb-darwin-arm64": "0.18.1",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.18.1",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.18.1",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.18.1"
|
||||
"@lancedb/vectordb-darwin-x64": "0.19.0-beta.5",
|
||||
"@lancedb/vectordb-darwin-arm64": "0.19.0-beta.5",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.19.0-beta.5",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.19.0-beta.5",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.19.0-beta.5"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -110,7 +110,7 @@ describe('LanceDB Mirrored Store Integration test', function () {
|
||||
|
||||
fs.readdir(path.join(mirroredPath, 'data'), { withFileTypes: true }, (err, files) => {
|
||||
if (err != null) throw err
|
||||
assert.equal(files.length, 1)
|
||||
assert.equal(files.length, 1, `Found files: ${files.map(f => f.name)}`)
|
||||
assert.isTrue(files[0].name.endsWith('.lance'))
|
||||
})
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "lancedb-nodejs"
|
||||
edition.workspace = true
|
||||
version = "0.18.1"
|
||||
version = "0.19.0-beta.5"
|
||||
license.workspace = true
|
||||
description.workspace = true
|
||||
repository.workspace = true
|
||||
|
||||
@@ -17,7 +17,7 @@ describe("when connecting", () => {
|
||||
it("should connect", async () => {
|
||||
const db = await connect(tmpDir.name);
|
||||
expect(db.display()).toBe(
|
||||
`ListingDatabase(uri=${tmpDir.name}, read_consistency_interval=None)`,
|
||||
`ListingDatabase(uri=${tmpDir.name}, read_consistency_interval=5s)`,
|
||||
);
|
||||
});
|
||||
|
||||
|
||||
@@ -58,7 +58,7 @@ describe.each([arrow15, arrow16, arrow17, arrow18])(
|
||||
|
||||
it("be displayable", async () => {
|
||||
expect(table.display()).toMatch(
|
||||
/NativeTable\(some_table, uri=.*, read_consistency_interval=None\)/,
|
||||
/NativeTable\(some_table, uri=.*, read_consistency_interval=5s\)/,
|
||||
);
|
||||
table.close();
|
||||
expect(table.display()).toBe("ClosedTable(some_table)");
|
||||
@@ -633,6 +633,23 @@ describe("When creating an index", () => {
|
||||
expect(plan2).not.toMatch("LanceScan");
|
||||
});
|
||||
|
||||
it("should be able to run analyze plan", async () => {
|
||||
await tbl.createIndex("vec");
|
||||
await tbl.add([
|
||||
{
|
||||
id: 300,
|
||||
vec: Array(32)
|
||||
.fill(1)
|
||||
.map(() => Math.random()),
|
||||
tags: [],
|
||||
},
|
||||
]);
|
||||
|
||||
const plan = await tbl.query().nearestTo(queryVec).analyzePlan();
|
||||
expect(plan).toMatch("AnalyzeExec");
|
||||
expect(plan).toMatch("metrics=");
|
||||
});
|
||||
|
||||
it("should be able to query with row id", async () => {
|
||||
const results = await tbl
|
||||
.query()
|
||||
@@ -850,6 +867,44 @@ describe("When creating an index", () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe("When querying a table", () => {
|
||||
let tmpDir: tmp.DirResult;
|
||||
beforeEach(() => {
|
||||
tmpDir = tmp.dirSync({ unsafeCleanup: true });
|
||||
});
|
||||
afterEach(() => tmpDir.removeCallback());
|
||||
|
||||
it("should throw an error when timeout is reached", async () => {
|
||||
const db = await connect(tmpDir.name);
|
||||
const data = makeArrowTable([
|
||||
{ text: "a", vector: [0.1, 0.2] },
|
||||
{ text: "b", vector: [0.3, 0.4] },
|
||||
]);
|
||||
const table = await db.createTable("test", data);
|
||||
await table.createIndex("text", { config: Index.fts() });
|
||||
|
||||
await expect(
|
||||
table.query().where("text != 'a'").toArray({ timeoutMs: 0 }),
|
||||
).rejects.toThrow("Query timeout");
|
||||
|
||||
await expect(
|
||||
table.query().nearestTo([0.0, 0.0]).toArrow({ timeoutMs: 0 }),
|
||||
).rejects.toThrow("Query timeout");
|
||||
|
||||
await expect(
|
||||
table.search("a", "fts").toArray({ timeoutMs: 0 }),
|
||||
).rejects.toThrow("Query timeout");
|
||||
|
||||
await expect(
|
||||
table
|
||||
.query()
|
||||
.nearestToText("a")
|
||||
.nearestTo([0.0, 0.0])
|
||||
.toArrow({ timeoutMs: 0 }),
|
||||
).rejects.toThrow("Query timeout");
|
||||
});
|
||||
});
|
||||
|
||||
describe("Read consistency interval", () => {
|
||||
let tmpDir: tmp.DirResult;
|
||||
beforeEach(() => {
|
||||
@@ -1249,6 +1304,27 @@ describe.each([arrow15, arrow16, arrow17, arrow18])(
|
||||
expect(results[0].text).toBe(data[0].text);
|
||||
});
|
||||
|
||||
test("full text index on list", async () => {
|
||||
const db = await connect(tmpDir.name);
|
||||
const data = [
|
||||
{ text: ["lance database", "the", "search"], vector: [0.1, 0.2, 0.3] },
|
||||
{ text: ["lance database"], vector: [0.4, 0.5, 0.6] },
|
||||
{ text: ["lance", "search"], vector: [0.7, 0.8, 0.9] },
|
||||
{ text: ["database", "search"], vector: [1.0, 1.1, 1.2] },
|
||||
{ text: ["unrelated", "doc"], vector: [1.3, 1.4, 1.5] },
|
||||
];
|
||||
const table = await db.createTable("test", data);
|
||||
await table.createIndex("text", {
|
||||
config: Index.fts(),
|
||||
});
|
||||
|
||||
const results = await table.search("lance").toArray();
|
||||
expect(results.length).toBe(3);
|
||||
|
||||
const results2 = await table.search('"lance database"').toArray();
|
||||
expect(results2.length).toBe(2);
|
||||
});
|
||||
|
||||
test("full text search without positions", async () => {
|
||||
const db = await connect(tmpDir.name);
|
||||
const data = [
|
||||
@@ -1346,6 +1422,30 @@ describe("when calling explainPlan", () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe("when calling analyzePlan", () => {
|
||||
let tmpDir: tmp.DirResult;
|
||||
let table: Table;
|
||||
let queryVec: number[];
|
||||
beforeEach(async () => {
|
||||
tmpDir = tmp.dirSync({ unsafeCleanup: true });
|
||||
const con = await connect(tmpDir.name);
|
||||
table = await con.createTable("vectors", [{ id: 1, vector: [1.1, 0.9] }]);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
tmpDir.removeCallback();
|
||||
});
|
||||
|
||||
it("retrieves runtime metrics", async () => {
|
||||
queryVec = Array(2)
|
||||
.fill(1)
|
||||
.map(() => Math.random());
|
||||
const plan = await table.query().nearestTo(queryVec).analyzePlan();
|
||||
console.log("Query Plan:\n", plan); // <--- Print the plan
|
||||
expect(plan).toMatch("AnalyzeExec");
|
||||
});
|
||||
});
|
||||
|
||||
describe("column name options", () => {
|
||||
let tmpDir: tmp.DirResult;
|
||||
let table: Table;
|
||||
|
||||
@@ -202,5 +202,35 @@ test("basic table examples", async () => {
|
||||
// --8<-- [end:create_f16_table]
|
||||
await db.dropTable("f16_tbl");
|
||||
}
|
||||
const uri = databaseDir;
|
||||
await db.createTable("my_table", [{ id: 1 }, { id: 2 }]);
|
||||
{
|
||||
// --8<-- [start:table_strong_consistency]
|
||||
const db = await lancedb.connect({ uri, readConsistencyInterval: 0 });
|
||||
const tbl = await db.openTable("my_table");
|
||||
// --8<-- [end:table_strong_consistency]
|
||||
}
|
||||
{
|
||||
// --8<-- [start:table_eventual_consistency]
|
||||
const db = await lancedb.connect({ uri, readConsistencyInterval: 5 });
|
||||
const tbl = await db.openTable("my_table");
|
||||
// --8<-- [end:table_eventual_consistency]
|
||||
}
|
||||
{
|
||||
// --8<-- [start:table_no_consistency]
|
||||
const db = await lancedb.connect({ uri, readConsistencyInterval: null });
|
||||
const tbl = await db.openTable("my_table");
|
||||
// --8<-- [end:table_no_consistency]
|
||||
}
|
||||
{
|
||||
// --8<-- [start:table_checkout_latest]
|
||||
const tbl = await db.openTable("my_table");
|
||||
|
||||
// (Other writes happen to test_table_async from another process)
|
||||
|
||||
// Check for updates
|
||||
tbl.checkoutLatest();
|
||||
// --8<-- [end:table_checkout_latest]
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
@@ -47,6 +47,12 @@ export {
|
||||
QueryExecutionOptions,
|
||||
FullTextSearchOptions,
|
||||
RecordBatchIterator,
|
||||
FullTextQuery,
|
||||
MatchQuery,
|
||||
PhraseQuery,
|
||||
BoostQuery,
|
||||
MultiMatchQuery,
|
||||
FullTextQueryType,
|
||||
} from "./query";
|
||||
|
||||
export {
|
||||
|
||||
@@ -17,6 +17,7 @@ import {
|
||||
VectorQuery as NativeVectorQuery,
|
||||
} from "./native";
|
||||
import { Reranker } from "./rerankers";
|
||||
|
||||
export class RecordBatchIterator implements AsyncIterator<RecordBatch> {
|
||||
private promisedInner?: Promise<NativeBatchIterator>;
|
||||
private inner?: NativeBatchIterator;
|
||||
@@ -62,7 +63,7 @@ class RecordBatchIterable<
|
||||
// biome-ignore lint/suspicious/noExplicitAny: skip
|
||||
[Symbol.asyncIterator](): AsyncIterator<RecordBatch<any>, any, undefined> {
|
||||
return new RecordBatchIterator(
|
||||
this.inner.execute(this.options?.maxBatchLength),
|
||||
this.inner.execute(this.options?.maxBatchLength, this.options?.timeoutMs),
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -78,6 +79,11 @@ export interface QueryExecutionOptions {
|
||||
* in smaller chunks.
|
||||
*/
|
||||
maxBatchLength?: number;
|
||||
|
||||
/**
|
||||
* Timeout for query execution in milliseconds
|
||||
*/
|
||||
timeoutMs?: number;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -152,7 +158,7 @@ export class QueryBase<NativeQueryType extends NativeQuery | NativeVectorQuery>
|
||||
}
|
||||
|
||||
fullTextSearch(
|
||||
query: string,
|
||||
query: string | FullTextQuery,
|
||||
options?: Partial<FullTextSearchOptions>,
|
||||
): this {
|
||||
let columns: string[] | null = null;
|
||||
@@ -164,9 +170,18 @@ export class QueryBase<NativeQueryType extends NativeQuery | NativeVectorQuery>
|
||||
}
|
||||
}
|
||||
|
||||
this.doCall((inner: NativeQueryType) =>
|
||||
inner.fullTextSearch(query, columns),
|
||||
);
|
||||
this.doCall((inner: NativeQueryType) => {
|
||||
if (typeof query === "string") {
|
||||
inner.fullTextSearch({
|
||||
query: query,
|
||||
columns: columns,
|
||||
});
|
||||
} else {
|
||||
// If query is a FullTextQuery object, convert it to a dict
|
||||
const queryObj = query.toDict();
|
||||
inner.fullTextSearch(queryObj);
|
||||
}
|
||||
});
|
||||
return this;
|
||||
}
|
||||
|
||||
@@ -273,9 +288,11 @@ export class QueryBase<NativeQueryType extends NativeQuery | NativeVectorQuery>
|
||||
options?: Partial<QueryExecutionOptions>,
|
||||
): Promise<NativeBatchIterator> {
|
||||
if (this.inner instanceof Promise) {
|
||||
return this.inner.then((inner) => inner.execute(options?.maxBatchLength));
|
||||
return this.inner.then((inner) =>
|
||||
inner.execute(options?.maxBatchLength, options?.timeoutMs),
|
||||
);
|
||||
} else {
|
||||
return this.inner.execute(options?.maxBatchLength);
|
||||
return this.inner.execute(options?.maxBatchLength, options?.timeoutMs);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -348,6 +365,43 @@ export class QueryBase<NativeQueryType extends NativeQuery | NativeVectorQuery>
|
||||
return this.inner.explainPlan(verbose);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes the query and returns the physical query plan annotated with runtime metrics.
|
||||
*
|
||||
* This is useful for debugging and performance analysis, as it shows how the query was executed
|
||||
* and includes metrics such as elapsed time, rows processed, and I/O statistics.
|
||||
*
|
||||
* @example
|
||||
* import * as lancedb from "@lancedb/lancedb"
|
||||
*
|
||||
* const db = await lancedb.connect("./.lancedb");
|
||||
* const table = await db.createTable("my_table", [
|
||||
* { vector: [1.1, 0.9], id: "1" },
|
||||
* ]);
|
||||
*
|
||||
* const plan = await table.query().nearestTo([0.5, 0.2]).analyzePlan();
|
||||
*
|
||||
* Example output (with runtime metrics inlined):
|
||||
* AnalyzeExec verbose=true, metrics=[]
|
||||
* ProjectionExec: expr=[id@3 as id, vector@0 as vector, _distance@2 as _distance], metrics=[output_rows=1, elapsed_compute=3.292µs]
|
||||
* Take: columns="vector, _rowid, _distance, (id)", metrics=[output_rows=1, elapsed_compute=66.001µs, batches_processed=1, bytes_read=8, iops=1, requests=1]
|
||||
* CoalesceBatchesExec: target_batch_size=1024, metrics=[output_rows=1, elapsed_compute=3.333µs]
|
||||
* GlobalLimitExec: skip=0, fetch=10, metrics=[output_rows=1, elapsed_compute=167ns]
|
||||
* FilterExec: _distance@2 IS NOT NULL, metrics=[output_rows=1, elapsed_compute=8.542µs]
|
||||
* SortExec: TopK(fetch=10), expr=[_distance@2 ASC NULLS LAST], metrics=[output_rows=1, elapsed_compute=63.25µs, row_replacements=1]
|
||||
* KNNVectorDistance: metric=l2, metrics=[output_rows=1, elapsed_compute=114.333µs, output_batches=1]
|
||||
* LanceScan: uri=/path/to/data, projection=[vector], row_id=true, row_addr=false, ordered=false, metrics=[output_rows=1, elapsed_compute=103.626µs, bytes_read=549, iops=2, requests=2]
|
||||
*
|
||||
* @returns A query execution plan with runtime metrics for each step.
|
||||
*/
|
||||
async analyzePlan(): Promise<string> {
|
||||
if (this.inner instanceof Promise) {
|
||||
return this.inner.then((inner) => inner.analyzePlan());
|
||||
} else {
|
||||
return this.inner.analyzePlan();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -681,8 +735,167 @@ export class Query extends QueryBase<NativeQuery> {
|
||||
}
|
||||
}
|
||||
|
||||
nearestToText(query: string, columns?: string[]): Query {
|
||||
this.doCall((inner) => inner.fullTextSearch(query, columns));
|
||||
nearestToText(query: string | FullTextQuery, columns?: string[]): Query {
|
||||
this.doCall((inner) => {
|
||||
if (typeof query === "string") {
|
||||
inner.fullTextSearch({
|
||||
query: query,
|
||||
columns: columns,
|
||||
});
|
||||
} else {
|
||||
const queryObj = query.toDict();
|
||||
inner.fullTextSearch(queryObj);
|
||||
}
|
||||
});
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Enum representing the types of full-text queries supported.
|
||||
*
|
||||
* - `Match`: Performs a full-text search for terms in the query string.
|
||||
* - `MatchPhrase`: Searches for an exact phrase match in the text.
|
||||
* - `Boost`: Boosts the relevance score of specific terms in the query.
|
||||
* - `MultiMatch`: Searches across multiple fields for the query terms.
|
||||
*/
|
||||
export enum FullTextQueryType {
|
||||
Match = "match",
|
||||
MatchPhrase = "match_phrase",
|
||||
Boost = "boost",
|
||||
MultiMatch = "multi_match",
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents a full-text query interface.
|
||||
* This interface defines the structure and behavior for full-text queries,
|
||||
* including methods to retrieve the query type and convert the query to a dictionary format.
|
||||
*/
|
||||
export interface FullTextQuery {
|
||||
queryType(): FullTextQueryType;
|
||||
toDict(): Record<string, unknown>;
|
||||
}
|
||||
|
||||
export class MatchQuery implements FullTextQuery {
|
||||
/**
|
||||
* Creates an instance of MatchQuery.
|
||||
*
|
||||
* @param query - The text query to search for.
|
||||
* @param column - The name of the column to search within.
|
||||
* @param boost - (Optional) The boost factor to influence the relevance score of this query. Default is `1.0`.
|
||||
* @param fuzziness - (Optional) The allowed edit distance for fuzzy matching. Default is `0`.
|
||||
* @param maxExpansions - (Optional) The maximum number of terms to consider for fuzzy matching. Default is `50`.
|
||||
*/
|
||||
constructor(
|
||||
private query: string,
|
||||
private column: string,
|
||||
private boost: number = 1.0,
|
||||
private fuzziness: number = 0,
|
||||
private maxExpansions: number = 50,
|
||||
) {}
|
||||
|
||||
queryType(): FullTextQueryType {
|
||||
return FullTextQueryType.Match;
|
||||
}
|
||||
|
||||
toDict(): Record<string, unknown> {
|
||||
return {
|
||||
[this.queryType()]: {
|
||||
[this.column]: {
|
||||
query: this.query,
|
||||
boost: this.boost,
|
||||
fuzziness: this.fuzziness,
|
||||
// biome-ignore lint/style/useNamingConvention: use underscore for consistency with the other APIs
|
||||
max_expansions: this.maxExpansions,
|
||||
},
|
||||
},
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
export class PhraseQuery implements FullTextQuery {
|
||||
/**
|
||||
* Creates an instance of `PhraseQuery`.
|
||||
*
|
||||
* @param query - The phrase to search for in the specified column.
|
||||
* @param column - The name of the column to search within.
|
||||
*/
|
||||
constructor(
|
||||
private query: string,
|
||||
private column: string,
|
||||
) {}
|
||||
|
||||
queryType(): FullTextQueryType {
|
||||
return FullTextQueryType.MatchPhrase;
|
||||
}
|
||||
|
||||
toDict(): Record<string, unknown> {
|
||||
return {
|
||||
[this.queryType()]: {
|
||||
[this.column]: this.query,
|
||||
},
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
export class BoostQuery implements FullTextQuery {
|
||||
/**
|
||||
* Creates an instance of BoostQuery.
|
||||
*
|
||||
* @param positive - The positive query that boosts the relevance score.
|
||||
* @param negative - The negative query that reduces the relevance score.
|
||||
* @param negativeBoost - The factor by which the negative query reduces the score.
|
||||
*/
|
||||
constructor(
|
||||
private positive: FullTextQuery,
|
||||
private negative: FullTextQuery,
|
||||
private negativeBoost: number,
|
||||
) {}
|
||||
|
||||
queryType(): FullTextQueryType {
|
||||
return FullTextQueryType.Boost;
|
||||
}
|
||||
|
||||
toDict(): Record<string, unknown> {
|
||||
return {
|
||||
[this.queryType()]: {
|
||||
positive: this.positive.toDict(),
|
||||
negative: this.negative.toDict(),
|
||||
// biome-ignore lint/style/useNamingConvention: use underscore for consistency with the other APIs
|
||||
negative_boost: this.negativeBoost,
|
||||
},
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
export class MultiMatchQuery implements FullTextQuery {
|
||||
/**
|
||||
* Creates an instance of MultiMatchQuery.
|
||||
*
|
||||
* @param query - The text query to search for across multiple columns.
|
||||
* @param columns - An array of column names to search within.
|
||||
* @param boosts - (Optional) An array of boost factors corresponding to each column. Default is an array of 1.0 for each column.
|
||||
*
|
||||
* The `boosts` array should have the same length as `columns`. If not provided, all columns will have a default boost of 1.0.
|
||||
* If the length of `boosts` is less than `columns`, it will be padded with 1.0s.
|
||||
*/
|
||||
constructor(
|
||||
private query: string,
|
||||
private columns: string[],
|
||||
private boosts: number[] = columns.map(() => 1.0),
|
||||
) {}
|
||||
|
||||
queryType(): FullTextQueryType {
|
||||
return FullTextQueryType.MultiMatch;
|
||||
}
|
||||
|
||||
toDict(): Record<string, unknown> {
|
||||
return {
|
||||
[this.queryType()]: {
|
||||
query: this.query,
|
||||
columns: this.columns,
|
||||
boost: this.boosts,
|
||||
},
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-darwin-arm64",
|
||||
"version": "0.18.1",
|
||||
"version": "0.19.0-beta.5",
|
||||
"os": ["darwin"],
|
||||
"cpu": ["arm64"],
|
||||
"main": "lancedb.darwin-arm64.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-darwin-x64",
|
||||
"version": "0.18.1",
|
||||
"version": "0.19.0-beta.5",
|
||||
"os": ["darwin"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.darwin-x64.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-arm64-gnu",
|
||||
"version": "0.18.1",
|
||||
"version": "0.19.0-beta.5",
|
||||
"os": ["linux"],
|
||||
"cpu": ["arm64"],
|
||||
"main": "lancedb.linux-arm64-gnu.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-arm64-musl",
|
||||
"version": "0.18.1",
|
||||
"version": "0.19.0-beta.5",
|
||||
"os": ["linux"],
|
||||
"cpu": ["arm64"],
|
||||
"main": "lancedb.linux-arm64-musl.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-x64-gnu",
|
||||
"version": "0.18.1",
|
||||
"version": "0.19.0-beta.5",
|
||||
"os": ["linux"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.linux-x64-gnu.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-x64-musl",
|
||||
"version": "0.18.1",
|
||||
"version": "0.19.0-beta.5",
|
||||
"os": ["linux"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.linux-x64-musl.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-win32-arm64-msvc",
|
||||
"version": "0.18.1",
|
||||
"version": "0.19.0-beta.5",
|
||||
"os": [
|
||||
"win32"
|
||||
],
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-win32-x64-msvc",
|
||||
"version": "0.18.1",
|
||||
"version": "0.19.0-beta.5",
|
||||
"os": ["win32"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.win32-x64-msvc.node",
|
||||
|
||||
252
nodejs/package-lock.json
generated
252
nodejs/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb",
|
||||
"version": "0.18.1",
|
||||
"version": "0.19.0-beta.5",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@lancedb/lancedb",
|
||||
"version": "0.18.1",
|
||||
"version": "0.19.0-beta.5",
|
||||
"cpu": [
|
||||
"x64",
|
||||
"arm64"
|
||||
@@ -2304,89 +2304,20 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@babel/code-frame": {
|
||||
"version": "7.23.5",
|
||||
"resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.23.5.tgz",
|
||||
"integrity": "sha512-CgH3s1a96LipHCmSUmYFPwY7MNx8C3avkq7i4Wl3cfa662ldtUe4VM1TPXX70pfmrlWTb6jLqTYrZyT2ZTJBgA==",
|
||||
"version": "7.26.2",
|
||||
"resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.26.2.tgz",
|
||||
"integrity": "sha512-RJlIHRueQgwWitWgF8OdFYGZX328Ax5BCemNGlqHfplnRT9ESi8JkFlvaVYbS+UubVY6dpv87Fs2u5M29iNFVQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@babel/highlight": "^7.23.4",
|
||||
"chalk": "^2.4.2"
|
||||
"@babel/helper-validator-identifier": "^7.25.9",
|
||||
"js-tokens": "^4.0.0",
|
||||
"picocolors": "^1.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=6.9.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@babel/code-frame/node_modules/ansi-styles": {
|
||||
"version": "3.2.1",
|
||||
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz",
|
||||
"integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"color-convert": "^1.9.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=4"
|
||||
}
|
||||
},
|
||||
"node_modules/@babel/code-frame/node_modules/chalk": {
|
||||
"version": "2.4.2",
|
||||
"resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz",
|
||||
"integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"ansi-styles": "^3.2.1",
|
||||
"escape-string-regexp": "^1.0.5",
|
||||
"supports-color": "^5.3.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=4"
|
||||
}
|
||||
},
|
||||
"node_modules/@babel/code-frame/node_modules/color-convert": {
|
||||
"version": "1.9.3",
|
||||
"resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz",
|
||||
"integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"color-name": "1.1.3"
|
||||
}
|
||||
},
|
||||
"node_modules/@babel/code-frame/node_modules/color-name": {
|
||||
"version": "1.1.3",
|
||||
"resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz",
|
||||
"integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==",
|
||||
"dev": true
|
||||
},
|
||||
"node_modules/@babel/code-frame/node_modules/escape-string-regexp": {
|
||||
"version": "1.0.5",
|
||||
"resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz",
|
||||
"integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==",
|
||||
"dev": true,
|
||||
"engines": {
|
||||
"node": ">=0.8.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@babel/code-frame/node_modules/has-flag": {
|
||||
"version": "3.0.0",
|
||||
"resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz",
|
||||
"integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==",
|
||||
"dev": true,
|
||||
"engines": {
|
||||
"node": ">=4"
|
||||
}
|
||||
},
|
||||
"node_modules/@babel/code-frame/node_modules/supports-color": {
|
||||
"version": "5.5.0",
|
||||
"resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz",
|
||||
"integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"has-flag": "^3.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=4"
|
||||
}
|
||||
},
|
||||
"node_modules/@babel/compat-data": {
|
||||
"version": "7.23.5",
|
||||
"resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.23.5.tgz",
|
||||
@@ -2589,19 +2520,21 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@babel/helper-string-parser": {
|
||||
"version": "7.23.4",
|
||||
"resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.23.4.tgz",
|
||||
"integrity": "sha512-803gmbQdqwdf4olxrX4AJyFBV/RTr3rSmOj0rKwesmzlfhYNDEs+/iOcznzpNWlJlIlTJC2QfPFcHB6DlzdVLQ==",
|
||||
"version": "7.25.9",
|
||||
"resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.25.9.tgz",
|
||||
"integrity": "sha512-4A/SCr/2KLd5jrtOMFzaKjVtAei3+2r/NChoBNoZ3EyP/+GlhoaEGoWOZUmFmoITP7zOJyHIMm+DYRd8o3PvHA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=6.9.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@babel/helper-validator-identifier": {
|
||||
"version": "7.22.20",
|
||||
"resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.20.tgz",
|
||||
"integrity": "sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A==",
|
||||
"version": "7.25.9",
|
||||
"resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.25.9.tgz",
|
||||
"integrity": "sha512-Ed61U6XJc3CVRfkERJWDz4dJwKe7iLmmJsbOGu9wSloNSFttHV0I8g6UAgb7qnK5ly5bGLPd4oXZlxCdANBOWQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=6.9.0"
|
||||
}
|
||||
@@ -2616,109 +2549,28 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@babel/helpers": {
|
||||
"version": "7.23.8",
|
||||
"resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.23.8.tgz",
|
||||
"integrity": "sha512-KDqYz4PiOWvDFrdHLPhKtCThtIcKVy6avWD2oG4GEvyQ+XDZwHD4YQd+H2vNMnq2rkdxsDkU82T+Vk8U/WXHRQ==",
|
||||
"version": "7.27.0",
|
||||
"resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.27.0.tgz",
|
||||
"integrity": "sha512-U5eyP/CTFPuNE3qk+WZMxFkp/4zUzdceQlfzf7DdGdhp+Fezd7HD+i8Y24ZuTMKX3wQBld449jijbGq6OdGNQg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@babel/template": "^7.22.15",
|
||||
"@babel/traverse": "^7.23.7",
|
||||
"@babel/types": "^7.23.6"
|
||||
"@babel/template": "^7.27.0",
|
||||
"@babel/types": "^7.27.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=6.9.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@babel/highlight": {
|
||||
"version": "7.23.4",
|
||||
"resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.23.4.tgz",
|
||||
"integrity": "sha512-acGdbYSfp2WheJoJm/EBBBLh/ID8KDc64ISZ9DYtBmC8/Q204PZJLHyzeB5qMzJ5trcOkybd78M4x2KWsUq++A==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"@babel/helper-validator-identifier": "^7.22.20",
|
||||
"chalk": "^2.4.2",
|
||||
"js-tokens": "^4.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=6.9.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@babel/highlight/node_modules/ansi-styles": {
|
||||
"version": "3.2.1",
|
||||
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz",
|
||||
"integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"color-convert": "^1.9.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=4"
|
||||
}
|
||||
},
|
||||
"node_modules/@babel/highlight/node_modules/chalk": {
|
||||
"version": "2.4.2",
|
||||
"resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz",
|
||||
"integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"ansi-styles": "^3.2.1",
|
||||
"escape-string-regexp": "^1.0.5",
|
||||
"supports-color": "^5.3.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=4"
|
||||
}
|
||||
},
|
||||
"node_modules/@babel/highlight/node_modules/color-convert": {
|
||||
"version": "1.9.3",
|
||||
"resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz",
|
||||
"integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"color-name": "1.1.3"
|
||||
}
|
||||
},
|
||||
"node_modules/@babel/highlight/node_modules/color-name": {
|
||||
"version": "1.1.3",
|
||||
"resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz",
|
||||
"integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==",
|
||||
"dev": true
|
||||
},
|
||||
"node_modules/@babel/highlight/node_modules/escape-string-regexp": {
|
||||
"version": "1.0.5",
|
||||
"resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz",
|
||||
"integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==",
|
||||
"dev": true,
|
||||
"engines": {
|
||||
"node": ">=0.8.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@babel/highlight/node_modules/has-flag": {
|
||||
"version": "3.0.0",
|
||||
"resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz",
|
||||
"integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==",
|
||||
"dev": true,
|
||||
"engines": {
|
||||
"node": ">=4"
|
||||
}
|
||||
},
|
||||
"node_modules/@babel/highlight/node_modules/supports-color": {
|
||||
"version": "5.5.0",
|
||||
"resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz",
|
||||
"integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"has-flag": "^3.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=4"
|
||||
}
|
||||
},
|
||||
"node_modules/@babel/parser": {
|
||||
"version": "7.23.6",
|
||||
"resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.23.6.tgz",
|
||||
"integrity": "sha512-Z2uID7YJ7oNvAI20O9X0bblw7Qqs8Q2hFy0R9tAfnfLkp5MW0UH9eUvnDSnFwKZ0AvgS1ucqR4KzvVHgnke1VQ==",
|
||||
"version": "7.27.0",
|
||||
"resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.27.0.tgz",
|
||||
"integrity": "sha512-iaepho73/2Pz7w2eMS0Q5f83+0RKI7i4xmiYeBmDzfRVbQtTOG7Ts0S4HzJVsTMGI9keU8rNfuZr8DKfSt7Yyg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@babel/types": "^7.27.0"
|
||||
},
|
||||
"bin": {
|
||||
"parser": "bin/babel-parser.js"
|
||||
},
|
||||
@@ -2904,14 +2756,15 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@babel/template": {
|
||||
"version": "7.22.15",
|
||||
"resolved": "https://registry.npmjs.org/@babel/template/-/template-7.22.15.tgz",
|
||||
"integrity": "sha512-QPErUVm4uyJa60rkI73qneDacvdvzxshT3kksGqlGWYdOTIUOwJ7RDUL8sGqslY1uXWSL6xMFKEXDS3ox2uF0w==",
|
||||
"version": "7.27.0",
|
||||
"resolved": "https://registry.npmjs.org/@babel/template/-/template-7.27.0.tgz",
|
||||
"integrity": "sha512-2ncevenBqXI6qRMukPlXwHKHchC7RyMuu4xv5JBXRfOGVcTy1mXCD12qrp7Jsoxll1EV3+9sE4GugBVRjT2jFA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@babel/code-frame": "^7.22.13",
|
||||
"@babel/parser": "^7.22.15",
|
||||
"@babel/types": "^7.22.15"
|
||||
"@babel/code-frame": "^7.26.2",
|
||||
"@babel/parser": "^7.27.0",
|
||||
"@babel/types": "^7.27.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=6.9.0"
|
||||
@@ -2948,14 +2801,14 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@babel/types": {
|
||||
"version": "7.23.6",
|
||||
"resolved": "https://registry.npmjs.org/@babel/types/-/types-7.23.6.tgz",
|
||||
"integrity": "sha512-+uarb83brBzPKN38NX1MkB6vb6+mwvR6amUulqAE7ccQw1pEl+bCia9TbdG1lsnFP7lZySvUn37CHyXQdfTwzg==",
|
||||
"version": "7.27.0",
|
||||
"resolved": "https://registry.npmjs.org/@babel/types/-/types-7.27.0.tgz",
|
||||
"integrity": "sha512-H45s8fVLYjbhFH62dIJ3WtmJ6RSPt/3DRO0ZcT2SUiYiQyz3BLVb9ADEnLl91m74aQPS3AzzeajZHYOalWe3bg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@babel/helper-string-parser": "^7.23.4",
|
||||
"@babel/helper-validator-identifier": "^7.22.20",
|
||||
"to-fast-properties": "^2.0.0"
|
||||
"@babel/helper-string-parser": "^7.25.9",
|
||||
"@babel/helper-validator-identifier": "^7.25.9"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=6.9.0"
|
||||
@@ -5550,10 +5403,11 @@
|
||||
"devOptional": true
|
||||
},
|
||||
"node_modules/axios": {
|
||||
"version": "1.7.7",
|
||||
"resolved": "https://registry.npmjs.org/axios/-/axios-1.7.7.tgz",
|
||||
"integrity": "sha512-S4kL7XrjgBmvdGut0sN3yJxqYzrDOnivkBiN0OFs6hLiUam3UPvswUo0kqGyhqUZGEOytHyumEdXsAkgCOUf3Q==",
|
||||
"version": "1.8.4",
|
||||
"resolved": "https://registry.npmjs.org/axios/-/axios-1.8.4.tgz",
|
||||
"integrity": "sha512-eBSYY4Y68NNlHbHBMdeDmKNtDgXWhQsJcGqzO3iLUM0GraQFSS9cVgPX5I9b3lbdFKyYoAEGAZF1DwhTaljNAw==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"follow-redirects": "^1.15.6",
|
||||
"form-data": "^4.0.0",
|
||||
@@ -7869,7 +7723,8 @@
|
||||
"version": "4.0.0",
|
||||
"resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz",
|
||||
"integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==",
|
||||
"dev": true
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/js-yaml": {
|
||||
"version": "3.14.1",
|
||||
@@ -9360,15 +9215,6 @@
|
||||
"integrity": "sha512-3f0uOEAQwIqGuWW2MVzYg8fV/QNnc/IpuJNG837rLuczAaLVHslWHZQj4IGiEl5Hs3kkbhwL9Ab7Hrsmuj+Smw==",
|
||||
"dev": true
|
||||
},
|
||||
"node_modules/to-fast-properties": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz",
|
||||
"integrity": "sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==",
|
||||
"dev": true,
|
||||
"engines": {
|
||||
"node": ">=4"
|
||||
}
|
||||
},
|
||||
"node_modules/to-regex-range": {
|
||||
"version": "5.0.1",
|
||||
"resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz",
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
"ann"
|
||||
],
|
||||
"private": false,
|
||||
"version": "0.18.1",
|
||||
"version": "0.19.0-beta.5",
|
||||
"main": "dist/index.js",
|
||||
"exports": {
|
||||
".": "./dist/index.js",
|
||||
@@ -29,6 +29,7 @@
|
||||
"aarch64-apple-darwin",
|
||||
"x86_64-unknown-linux-gnu",
|
||||
"aarch64-unknown-linux-gnu",
|
||||
"x86_64-unknown-linux-musl",
|
||||
"aarch64-unknown-linux-musl",
|
||||
"x86_64-pc-windows-msvc",
|
||||
"aarch64-pc-windows-msvc"
|
||||
|
||||
@@ -48,8 +48,16 @@ impl Connection {
|
||||
pub async fn new(uri: String, options: ConnectionOptions) -> napi::Result<Self> {
|
||||
let mut builder = ConnectBuilder::new(&uri);
|
||||
if let Some(interval) = options.read_consistency_interval {
|
||||
builder =
|
||||
builder.read_consistency_interval(std::time::Duration::from_secs_f64(interval));
|
||||
match interval {
|
||||
Either::A(seconds) => {
|
||||
builder = builder.read_consistency_interval(Some(
|
||||
std::time::Duration::from_secs_f64(seconds),
|
||||
));
|
||||
}
|
||||
Either::B(_) => {
|
||||
builder = builder.read_consistency_interval(None);
|
||||
}
|
||||
}
|
||||
}
|
||||
if let Some(storage_options) = options.storage_options {
|
||||
for (key, value) in storage_options {
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use env_logger::Env;
|
||||
use napi::{bindgen_prelude::Null, Either};
|
||||
use napi_derive::*;
|
||||
|
||||
mod connection;
|
||||
@@ -18,7 +19,6 @@ mod table;
|
||||
mod util;
|
||||
|
||||
#[napi(object)]
|
||||
#[derive(Debug)]
|
||||
pub struct ConnectionOptions {
|
||||
/// (For LanceDB OSS only): The interval, in seconds, at which to check for
|
||||
/// updates to the table from other processes. If None, then consistency is not
|
||||
@@ -29,7 +29,7 @@ pub struct ConnectionOptions {
|
||||
/// has passed since the last check, then the table will be checked for updates.
|
||||
/// Note: this consistency only applies to read operations. Write operations are
|
||||
/// always consistent.
|
||||
pub read_consistency_interval: Option<f64>,
|
||||
pub read_consistency_interval: Option<Either<f64, Null>>,
|
||||
/// (For LanceDB OSS only): configuration for object storage.
|
||||
///
|
||||
/// The available options are described at https://lancedb.github.io/lancedb/guides/storage/
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use lancedb::index::scalar::FullTextSearchQuery;
|
||||
use lancedb::index::scalar::{FtsQuery, FullTextSearchQuery, MatchQuery, PhraseQuery};
|
||||
use lancedb::query::ExecutableQuery;
|
||||
use lancedb::query::Query as LanceDbQuery;
|
||||
use lancedb::query::QueryBase;
|
||||
@@ -18,7 +18,7 @@ use crate::error::NapiErrorExt;
|
||||
use crate::iterator::RecordBatchIterator;
|
||||
use crate::rerankers::Reranker;
|
||||
use crate::rerankers::RerankerCallbacks;
|
||||
use crate::util::parse_distance_type;
|
||||
use crate::util::{parse_distance_type, parse_fts_query};
|
||||
|
||||
#[napi]
|
||||
pub struct Query {
|
||||
@@ -38,9 +38,53 @@ impl Query {
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn full_text_search(&mut self, query: String, columns: Option<Vec<String>>) {
|
||||
let query = FullTextSearchQuery::new(query).columns(columns);
|
||||
pub fn full_text_search(&mut self, query: napi::JsUnknown) -> napi::Result<()> {
|
||||
let query = unsafe { query.cast::<napi::JsObject>() };
|
||||
let query = if let Some(query_text) = query.get::<_, String>("query").transpose() {
|
||||
let mut query_text = query_text?;
|
||||
let columns = query.get::<_, Option<Vec<String>>>("columns")?.flatten();
|
||||
|
||||
let is_phrase =
|
||||
query_text.len() >= 2 && query_text.starts_with('"') && query_text.ends_with('"');
|
||||
let is_multi_match = columns.as_ref().map(|cols| cols.len() > 1).unwrap_or(false);
|
||||
|
||||
if is_phrase {
|
||||
// Remove the surrounding quotes for phrase queries
|
||||
query_text = query_text[1..query_text.len() - 1].to_string();
|
||||
}
|
||||
|
||||
let query: FtsQuery = match (is_phrase, is_multi_match) {
|
||||
(false, _) => MatchQuery::new(query_text).into(),
|
||||
(true, false) => PhraseQuery::new(query_text).into(),
|
||||
(true, true) => {
|
||||
return Err(napi::Error::from_reason(
|
||||
"Phrase queries cannot be used with multiple columns.",
|
||||
));
|
||||
}
|
||||
};
|
||||
let mut query = FullTextSearchQuery::new_query(query);
|
||||
if let Some(cols) = columns {
|
||||
if !cols.is_empty() {
|
||||
query = query.with_columns(&cols).map_err(|e| {
|
||||
napi::Error::from_reason(format!(
|
||||
"Failed to set full text search columns: {}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
}
|
||||
}
|
||||
query
|
||||
} else if let Some(query) = query.get::<_, napi::JsObject>("query")? {
|
||||
let query = parse_fts_query(&query)?;
|
||||
FullTextSearchQuery::new_query(query)
|
||||
} else {
|
||||
return Err(napi::Error::from_reason(
|
||||
"Invalid full text search query object".to_string(),
|
||||
));
|
||||
};
|
||||
|
||||
self.inner = self.inner.clone().full_text_search(query);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[napi]
|
||||
@@ -87,11 +131,15 @@ impl Query {
|
||||
pub async fn execute(
|
||||
&self,
|
||||
max_batch_length: Option<u32>,
|
||||
timeout_ms: Option<u32>,
|
||||
) -> napi::Result<RecordBatchIterator> {
|
||||
let mut execution_opts = QueryExecutionOptions::default();
|
||||
if let Some(max_batch_length) = max_batch_length {
|
||||
execution_opts.max_batch_length = max_batch_length;
|
||||
}
|
||||
if let Some(timeout_ms) = timeout_ms {
|
||||
execution_opts.timeout = Some(std::time::Duration::from_millis(timeout_ms as u64))
|
||||
}
|
||||
let inner_stream = self
|
||||
.inner
|
||||
.execute_with_options(execution_opts)
|
||||
@@ -114,6 +162,16 @@ impl Query {
|
||||
))
|
||||
})
|
||||
}
|
||||
|
||||
#[napi(catch_unwind)]
|
||||
pub async fn analyze_plan(&self) -> napi::Result<String> {
|
||||
self.inner.analyze_plan().await.map_err(|e| {
|
||||
napi::Error::from_reason(format!(
|
||||
"Failed to execute analyze plan: {}",
|
||||
convert_error(&e)
|
||||
))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[napi]
|
||||
@@ -185,9 +243,53 @@ impl VectorQuery {
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn full_text_search(&mut self, query: String, columns: Option<Vec<String>>) {
|
||||
let query = FullTextSearchQuery::new(query).columns(columns);
|
||||
pub fn full_text_search(&mut self, query: napi::JsUnknown) -> napi::Result<()> {
|
||||
let query = unsafe { query.cast::<napi::JsObject>() };
|
||||
let query = if let Some(query_text) = query.get::<_, String>("query").transpose() {
|
||||
let mut query_text = query_text?;
|
||||
let columns = query.get::<_, Option<Vec<String>>>("columns")?.flatten();
|
||||
|
||||
let is_phrase =
|
||||
query_text.len() >= 2 && query_text.starts_with('"') && query_text.ends_with('"');
|
||||
let is_multi_match = columns.as_ref().map(|cols| cols.len() > 1).unwrap_or(false);
|
||||
|
||||
if is_phrase {
|
||||
// Remove the surrounding quotes for phrase queries
|
||||
query_text = query_text[1..query_text.len() - 1].to_string();
|
||||
}
|
||||
|
||||
let query: FtsQuery = match (is_phrase, is_multi_match) {
|
||||
(false, _) => MatchQuery::new(query_text).into(),
|
||||
(true, false) => PhraseQuery::new(query_text).into(),
|
||||
(true, true) => {
|
||||
return Err(napi::Error::from_reason(
|
||||
"Phrase queries cannot be used with multiple columns.",
|
||||
));
|
||||
}
|
||||
};
|
||||
let mut query = FullTextSearchQuery::new_query(query);
|
||||
if let Some(cols) = columns {
|
||||
if !cols.is_empty() {
|
||||
query = query.with_columns(&cols).map_err(|e| {
|
||||
napi::Error::from_reason(format!(
|
||||
"Failed to set full text search columns: {}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
}
|
||||
}
|
||||
query
|
||||
} else if let Some(query) = query.get::<_, napi::JsObject>("query")? {
|
||||
let query = parse_fts_query(&query)?;
|
||||
FullTextSearchQuery::new_query(query)
|
||||
} else {
|
||||
return Err(napi::Error::from_reason(
|
||||
"Invalid full text search query object".to_string(),
|
||||
));
|
||||
};
|
||||
|
||||
self.inner = self.inner.clone().full_text_search(query);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[napi]
|
||||
@@ -232,11 +334,15 @@ impl VectorQuery {
|
||||
pub async fn execute(
|
||||
&self,
|
||||
max_batch_length: Option<u32>,
|
||||
timeout_ms: Option<u32>,
|
||||
) -> napi::Result<RecordBatchIterator> {
|
||||
let mut execution_opts = QueryExecutionOptions::default();
|
||||
if let Some(max_batch_length) = max_batch_length {
|
||||
execution_opts.max_batch_length = max_batch_length;
|
||||
}
|
||||
if let Some(timeout_ms) = timeout_ms {
|
||||
execution_opts.timeout = Some(std::time::Duration::from_millis(timeout_ms as u64))
|
||||
}
|
||||
let inner_stream = self
|
||||
.inner
|
||||
.execute_with_options(execution_opts)
|
||||
@@ -259,4 +365,14 @@ impl VectorQuery {
|
||||
))
|
||||
})
|
||||
}
|
||||
|
||||
#[napi(catch_unwind)]
|
||||
pub async fn analyze_plan(&self) -> napi::Result<String> {
|
||||
self.inner.analyze_plan().await.map_err(|e| {
|
||||
napi::Error::from_reason(format!(
|
||||
"Failed to execute analyze plan: {}",
|
||||
convert_error(&e)
|
||||
))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
use lancedb::index::scalar::{BoostQuery, FtsQuery, MatchQuery, MultiMatchQuery, PhraseQuery};
|
||||
use lancedb::DistanceType;
|
||||
|
||||
pub fn parse_distance_type(distance_type: impl AsRef<str>) -> napi::Result<DistanceType> {
|
||||
@@ -15,3 +16,144 @@ pub fn parse_distance_type(distance_type: impl AsRef<str>) -> napi::Result<Dista
|
||||
))),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn parse_fts_query(query: &napi::JsObject) -> napi::Result<FtsQuery> {
|
||||
let query_type = query
|
||||
.get_property_names()?
|
||||
.get_element::<napi::JsString>(0)?;
|
||||
let query_type = query_type.into_utf8()?.into_owned()?;
|
||||
let query_value =
|
||||
query
|
||||
.get::<_, napi::JsObject>(&query_type)?
|
||||
.ok_or(napi::Error::from_reason(format!(
|
||||
"query value {} not found",
|
||||
query_type
|
||||
)))?;
|
||||
|
||||
match query_type.as_str() {
|
||||
"match" => {
|
||||
let column = query_value
|
||||
.get_property_names()?
|
||||
.get_element::<napi::JsString>(0)?
|
||||
.into_utf8()?
|
||||
.into_owned()?;
|
||||
let params =
|
||||
query_value
|
||||
.get::<_, napi::JsObject>(&column)?
|
||||
.ok_or(napi::Error::from_reason(format!(
|
||||
"column {} not found",
|
||||
column
|
||||
)))?;
|
||||
|
||||
let query = params
|
||||
.get::<_, napi::JsString>("query")?
|
||||
.ok_or(napi::Error::from_reason("query not found"))?
|
||||
.into_utf8()?
|
||||
.into_owned()?;
|
||||
let boost = params
|
||||
.get::<_, napi::JsNumber>("boost")?
|
||||
.ok_or(napi::Error::from_reason("boost not found"))?
|
||||
.get_double()? as f32;
|
||||
let fuzziness = params
|
||||
.get::<_, napi::JsNumber>("fuzziness")?
|
||||
.map(|f| f.get_uint32())
|
||||
.transpose()?;
|
||||
let max_expansions = params
|
||||
.get::<_, napi::JsNumber>("max_expansions")?
|
||||
.ok_or(napi::Error::from_reason("max_expansions not found"))?
|
||||
.get_uint32()? as usize;
|
||||
|
||||
let query = MatchQuery::new(query)
|
||||
.with_column(Some(column))
|
||||
.with_boost(boost)
|
||||
.with_fuzziness(fuzziness)
|
||||
.with_max_expansions(max_expansions);
|
||||
Ok(query.into())
|
||||
}
|
||||
|
||||
"match_phrase" => {
|
||||
let column = query_value
|
||||
.get_property_names()?
|
||||
.get_element::<napi::JsString>(0)?
|
||||
.into_utf8()?
|
||||
.into_owned()?;
|
||||
let query = query_value
|
||||
.get::<_, napi::JsString>(&column)?
|
||||
.ok_or(napi::Error::from_reason(format!(
|
||||
"column {} not found",
|
||||
column
|
||||
)))?
|
||||
.into_utf8()?
|
||||
.into_owned()?;
|
||||
|
||||
let query = PhraseQuery::new(query).with_column(Some(column));
|
||||
Ok(query.into())
|
||||
}
|
||||
|
||||
"boost" => {
|
||||
let positive = query_value
|
||||
.get::<_, napi::JsObject>("positive")?
|
||||
.ok_or(napi::Error::from_reason("positive not found"))?;
|
||||
|
||||
let negative = query_value
|
||||
.get::<_, napi::JsObject>("negative")?
|
||||
.ok_or(napi::Error::from_reason("negative not found"))?;
|
||||
let negative_boost = query_value
|
||||
.get::<_, napi::JsNumber>("negative_boost")?
|
||||
.ok_or(napi::Error::from_reason("negative_boost not found"))?
|
||||
.get_double()? as f32;
|
||||
|
||||
let positive = parse_fts_query(&positive)?;
|
||||
let negative = parse_fts_query(&negative)?;
|
||||
let query = BoostQuery::new(positive, negative, Some(negative_boost));
|
||||
Ok(query.into())
|
||||
}
|
||||
|
||||
"multi_match" => {
|
||||
let query = query_value
|
||||
.get::<_, napi::JsString>("query")?
|
||||
.ok_or(napi::Error::from_reason("query not found"))?
|
||||
.into_utf8()?
|
||||
.into_owned()?;
|
||||
let columns_array = query_value
|
||||
.get::<_, napi::JsTypedArray>("columns")?
|
||||
.ok_or(napi::Error::from_reason("columns not found"))?;
|
||||
let columns_num = columns_array.get_array_length()?;
|
||||
let mut columns = Vec::with_capacity(columns_num as usize);
|
||||
for i in 0..columns_num {
|
||||
let column = columns_array
|
||||
.get_element::<napi::JsString>(i)?
|
||||
.into_utf8()?
|
||||
.into_owned()?;
|
||||
columns.push(column);
|
||||
}
|
||||
let boost_array = query_value
|
||||
.get::<_, napi::JsTypedArray>("boost")?
|
||||
.ok_or(napi::Error::from_reason("boost not found"))?;
|
||||
if boost_array.get_array_length()? != columns_num {
|
||||
return Err(napi::Error::from_reason(format!(
|
||||
"boost array length ({}) does not match columns length ({})",
|
||||
boost_array.get_array_length()?,
|
||||
columns_num
|
||||
)));
|
||||
}
|
||||
let mut boost = Vec::with_capacity(columns_num as usize);
|
||||
for i in 0..columns_num {
|
||||
let b = boost_array.get_element::<napi::JsNumber>(i)?.get_double()? as f32;
|
||||
boost.push(b);
|
||||
}
|
||||
|
||||
let query =
|
||||
MultiMatchQuery::try_new_with_boosts(query, columns, boost).map_err(|e| {
|
||||
napi::Error::from_reason(format!("Error creating MultiMatchQuery: {}", e))
|
||||
})?;
|
||||
|
||||
Ok(query.into())
|
||||
}
|
||||
|
||||
_ => Err(napi::Error::from_reason(format!(
|
||||
"Unsupported query type: {}",
|
||||
query_type
|
||||
))),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
[tool.bumpversion]
|
||||
current_version = "0.21.2-beta.0"
|
||||
current_version = "0.22.0-beta.6"
|
||||
parse = """(?x)
|
||||
(?P<major>0|[1-9]\\d*)\\.
|
||||
(?P<minor>0|[1-9]\\d*)\\.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "lancedb-python"
|
||||
version = "0.21.2-beta.0"
|
||||
version = "0.22.0-beta.6"
|
||||
edition.workspace = true
|
||||
description = "Python bindings for LanceDB"
|
||||
license.workspace = true
|
||||
|
||||
@@ -4,11 +4,12 @@ name = "lancedb"
|
||||
dynamic = ["version"]
|
||||
dependencies = [
|
||||
"deprecation",
|
||||
"tqdm>=4.27.0",
|
||||
"numpy",
|
||||
"overrides>=0.7",
|
||||
"packaging",
|
||||
"pyarrow>=14",
|
||||
"pydantic>=1.10",
|
||||
"packaging",
|
||||
"overrides>=0.7",
|
||||
"tqdm>=4.27.0",
|
||||
]
|
||||
description = "lancedb"
|
||||
authors = [{ name = "LanceDB Devs", email = "dev@lancedb.com" }]
|
||||
@@ -55,6 +56,7 @@ tests = [
|
||||
"tantivy",
|
||||
"pyarrow-stubs",
|
||||
"pylance>=0.23.2",
|
||||
"requests",
|
||||
]
|
||||
dev = [
|
||||
"ruff",
|
||||
|
||||
@@ -7,6 +7,7 @@ import os
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from datetime import timedelta
|
||||
from typing import Dict, Optional, Union, Any
|
||||
import warnings
|
||||
|
||||
__version__ = importlib.metadata.version("lancedb")
|
||||
|
||||
@@ -25,7 +26,7 @@ def connect(
|
||||
api_key: Optional[str] = None,
|
||||
region: str = "us-east-1",
|
||||
host_override: Optional[str] = None,
|
||||
read_consistency_interval: Optional[timedelta] = None,
|
||||
read_consistency_interval: Optional[timedelta] = timedelta(seconds=5),
|
||||
request_thread_pool: Optional[Union[int, ThreadPoolExecutor]] = None,
|
||||
client_config: Union[ClientConfig, Dict[str, Any], None] = None,
|
||||
storage_options: Optional[Dict[str, str]] = None,
|
||||
@@ -48,9 +49,8 @@ def connect(
|
||||
read_consistency_interval: timedelta, default None
|
||||
(For LanceDB OSS only)
|
||||
The interval at which to check for updates to the table from other
|
||||
processes. If None, then consistency is not checked. For performance
|
||||
reasons, this is the default. For strong consistency, set this to
|
||||
zero seconds. Then every read will check for updates from other
|
||||
processes. If None, then consistency is not checked. For strong consistency,
|
||||
set this to zero seconds. Then every read will check for updates from other
|
||||
processes. As a compromise, you can set this to a non-zero timedelta
|
||||
for eventual consistency. If more than that interval has passed since
|
||||
the last check, then the table will be checked for updates. Note: this
|
||||
@@ -121,7 +121,7 @@ async def connect_async(
|
||||
api_key: Optional[str] = None,
|
||||
region: str = "us-east-1",
|
||||
host_override: Optional[str] = None,
|
||||
read_consistency_interval: Optional[timedelta] = None,
|
||||
read_consistency_interval: Optional[timedelta] = timedelta(seconds=5),
|
||||
client_config: Optional[Union[ClientConfig, Dict[str, Any]]] = None,
|
||||
storage_options: Optional[Dict[str, str]] = None,
|
||||
) -> AsyncConnection:
|
||||
@@ -142,9 +142,8 @@ async def connect_async(
|
||||
read_consistency_interval: timedelta, default None
|
||||
(For LanceDB OSS only)
|
||||
The interval at which to check for updates to the table from other
|
||||
processes. If None, then consistency is not checked. For performance
|
||||
reasons, this is the default. For strong consistency, set this to
|
||||
zero seconds. Then every read will check for updates from other
|
||||
processes. If None, then consistency is not checked. For strong consistency,
|
||||
set this to zero seconds. Then every read will check for updates from other
|
||||
processes. As a compromise, you can set this to a non-zero timedelta
|
||||
for eventual consistency. If more than that interval has passed since
|
||||
the last check, then the table will be checked for updates. Note: this
|
||||
@@ -213,3 +212,13 @@ __all__ = [
|
||||
"RemoteDBConnection",
|
||||
"__version__",
|
||||
]
|
||||
|
||||
|
||||
def __warn_on_fork():
|
||||
warnings.warn(
|
||||
"lance is not fork-safe. If you are using multiprocessing, use spawn instead.",
|
||||
)
|
||||
|
||||
|
||||
if hasattr(os, "register_at_fork"):
|
||||
os.register_at_fork(before=__warn_on_fork)
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
from datetime import timedelta
|
||||
from typing import Dict, List, Optional, Tuple, Any, Union, Literal
|
||||
|
||||
import pyarrow as pa
|
||||
@@ -48,10 +49,11 @@ class Table:
|
||||
async def version(self) -> int: ...
|
||||
async def checkout(self, version: int): ...
|
||||
async def checkout_latest(self): ...
|
||||
async def restore(self): ...
|
||||
async def restore(self, version: Optional[int] = None): ...
|
||||
async def list_indices(self) -> list[IndexConfig]: ...
|
||||
async def delete(self, filter: str): ...
|
||||
async def add_columns(self, columns: list[tuple[str, str]]) -> None: ...
|
||||
async def add_columns_with_schema(self, schema: pa.Schema) -> None: ...
|
||||
async def alter_columns(self, columns: list[dict[str, Any]]) -> None: ...
|
||||
async def optimize(
|
||||
self,
|
||||
@@ -93,7 +95,11 @@ class Query:
|
||||
def postfilter(self): ...
|
||||
def nearest_to(self, query_vec: pa.Array) -> VectorQuery: ...
|
||||
def nearest_to_text(self, query: dict) -> FTSQuery: ...
|
||||
async def execute(self, max_batch_length: Optional[int]) -> RecordBatchStream: ...
|
||||
async def execute(
|
||||
self, max_batch_length: Optional[int], timeout: Optional[timedelta]
|
||||
) -> RecordBatchStream: ...
|
||||
async def explain_plan(self, verbose: Optional[bool]) -> str: ...
|
||||
async def analyze_plan(self) -> str: ...
|
||||
def to_query_request(self) -> PyQueryRequest: ...
|
||||
|
||||
class FTSQuery:
|
||||
@@ -107,8 +113,9 @@ class FTSQuery:
|
||||
def get_query(self) -> str: ...
|
||||
def add_query_vector(self, query_vec: pa.Array) -> None: ...
|
||||
def nearest_to(self, query_vec: pa.Array) -> HybridQuery: ...
|
||||
async def execute(self, max_batch_length: Optional[int]) -> RecordBatchStream: ...
|
||||
async def explain_plan(self) -> str: ...
|
||||
async def execute(
|
||||
self, max_batch_length: Optional[int], timeout: Optional[timedelta]
|
||||
) -> RecordBatchStream: ...
|
||||
def to_query_request(self) -> PyQueryRequest: ...
|
||||
|
||||
class VectorQuery:
|
||||
|
||||
@@ -6,6 +6,7 @@ from __future__ import annotations
|
||||
|
||||
from abc import abstractmethod
|
||||
from pathlib import Path
|
||||
from datetime import timedelta
|
||||
from typing import TYPE_CHECKING, Dict, Iterable, List, Literal, Optional, Union
|
||||
|
||||
from lancedb.embeddings.registry import EmbeddingFunctionRegistry
|
||||
@@ -32,7 +33,6 @@ import deprecation
|
||||
if TYPE_CHECKING:
|
||||
import pyarrow as pa
|
||||
from .pydantic import LanceModel
|
||||
from datetime import timedelta
|
||||
|
||||
from ._lancedb import Connection as LanceDbConnection
|
||||
from .common import DATA, URI
|
||||
@@ -318,9 +318,8 @@ class LanceDBConnection(DBConnection):
|
||||
The root uri of the database.
|
||||
read_consistency_interval: timedelta, default None
|
||||
The interval at which to check for updates to the table from other
|
||||
processes. If None, then consistency is not checked. For performance
|
||||
reasons, this is the default. For strong consistency, set this to
|
||||
zero seconds. Then every read will check for updates from other
|
||||
processes. If None, then consistency is not checked. For strong consistency,
|
||||
set this to zero seconds. Then every read will check for updates from other
|
||||
processes. As a compromise, you can set this to a non-zero timedelta
|
||||
for eventual consistency. If more than that interval has passed since
|
||||
the last check, then the table will be checked for updates. Note: this
|
||||
@@ -352,7 +351,7 @@ class LanceDBConnection(DBConnection):
|
||||
self,
|
||||
uri: URI,
|
||||
*,
|
||||
read_consistency_interval: Optional[timedelta] = None,
|
||||
read_consistency_interval: Optional[timedelta] = timedelta(seconds=5),
|
||||
storage_options: Optional[Dict[str, str]] = None,
|
||||
):
|
||||
if not isinstance(uri, Path):
|
||||
|
||||
@@ -1,9 +1,12 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
|
||||
import base64
|
||||
import os
|
||||
from typing import ClassVar, TYPE_CHECKING, List, Union
|
||||
from typing import ClassVar, TYPE_CHECKING, List, Union, Any
|
||||
|
||||
from pathlib import Path
|
||||
from urllib.parse import urlparse
|
||||
from io import BytesIO
|
||||
|
||||
import numpy as np
|
||||
import pyarrow as pa
|
||||
@@ -11,12 +14,100 @@ import pyarrow as pa
|
||||
from ..util import attempt_import_or_raise
|
||||
from .base import EmbeddingFunction
|
||||
from .registry import register
|
||||
from .utils import api_key_not_found_help, IMAGES
|
||||
from .utils import api_key_not_found_help, IMAGES, TEXT
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import PIL
|
||||
|
||||
|
||||
def is_valid_url(text):
|
||||
try:
|
||||
parsed = urlparse(text)
|
||||
return bool(parsed.scheme) and bool(parsed.netloc)
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
def transform_input(input_data: Union[str, bytes, Path]):
|
||||
PIL = attempt_import_or_raise("PIL", "pillow")
|
||||
if isinstance(input_data, str):
|
||||
if is_valid_url(input_data):
|
||||
content = {"type": "image_url", "image_url": input_data}
|
||||
else:
|
||||
content = {"type": "text", "text": input_data}
|
||||
elif isinstance(input_data, PIL.Image.Image):
|
||||
buffered = BytesIO()
|
||||
input_data.save(buffered, format="JPEG")
|
||||
img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
||||
content = {
|
||||
"type": "image_base64",
|
||||
"image_base64": "data:image/jpeg;base64," + img_str,
|
||||
}
|
||||
elif isinstance(input_data, bytes):
|
||||
img = PIL.Image.open(BytesIO(input_data))
|
||||
buffered = BytesIO()
|
||||
img.save(buffered, format="JPEG")
|
||||
img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
||||
content = {
|
||||
"type": "image_base64",
|
||||
"image_base64": "data:image/jpeg;base64," + img_str,
|
||||
}
|
||||
elif isinstance(input_data, Path):
|
||||
img = PIL.Image.open(input_data)
|
||||
buffered = BytesIO()
|
||||
img.save(buffered, format="JPEG")
|
||||
img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
||||
content = {
|
||||
"type": "image_base64",
|
||||
"image_base64": "data:image/jpeg;base64," + img_str,
|
||||
}
|
||||
else:
|
||||
raise ValueError("Each input should be either str, bytes, Path or Image.")
|
||||
|
||||
return {"content": [content]}
|
||||
|
||||
|
||||
def sanitize_multimodal_input(inputs: Union[TEXT, IMAGES]) -> List[Any]:
|
||||
"""
|
||||
Sanitize the input to the embedding function.
|
||||
"""
|
||||
PIL = attempt_import_or_raise("PIL", "pillow")
|
||||
if isinstance(inputs, (str, bytes, Path, PIL.Image.Image)):
|
||||
inputs = [inputs]
|
||||
elif isinstance(inputs, pa.Array):
|
||||
inputs = inputs.to_pylist()
|
||||
elif isinstance(inputs, pa.ChunkedArray):
|
||||
inputs = inputs.combine_chunks().to_pylist()
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Input type {type(inputs)} not allowed with multimodal model."
|
||||
)
|
||||
|
||||
if not all(isinstance(x, (str, bytes, Path, PIL.Image.Image)) for x in inputs):
|
||||
raise ValueError("Each input should be either str, bytes, Path or Image.")
|
||||
|
||||
return [transform_input(i) for i in inputs]
|
||||
|
||||
|
||||
def sanitize_text_input(inputs: TEXT) -> List[str]:
|
||||
"""
|
||||
Sanitize the input to the embedding function.
|
||||
"""
|
||||
if isinstance(inputs, str):
|
||||
inputs = [inputs]
|
||||
elif isinstance(inputs, pa.Array):
|
||||
inputs = inputs.to_pylist()
|
||||
elif isinstance(inputs, pa.ChunkedArray):
|
||||
inputs = inputs.combine_chunks().to_pylist()
|
||||
else:
|
||||
raise ValueError(f"Input type {type(inputs)} not allowed with text model.")
|
||||
|
||||
if not all(isinstance(x, str) for x in inputs):
|
||||
raise ValueError("Each input should be str.")
|
||||
|
||||
return inputs
|
||||
|
||||
|
||||
@register("voyageai")
|
||||
class VoyageAIEmbeddingFunction(EmbeddingFunction):
|
||||
"""
|
||||
@@ -74,6 +165,11 @@ class VoyageAIEmbeddingFunction(EmbeddingFunction):
|
||||
]
|
||||
multimodal_embedding_models: list = ["voyage-multimodal-3"]
|
||||
|
||||
def _is_multimodal_model(self, model_name: str):
|
||||
return (
|
||||
model_name in self.multimodal_embedding_models or "multimodal" in model_name
|
||||
)
|
||||
|
||||
def ndims(self):
|
||||
if self.name == "voyage-3-lite":
|
||||
return 512
|
||||
@@ -85,55 +181,12 @@ class VoyageAIEmbeddingFunction(EmbeddingFunction):
|
||||
"voyage-finance-2",
|
||||
"voyage-multilingual-2",
|
||||
"voyage-law-2",
|
||||
"voyage-multimodal-3",
|
||||
]:
|
||||
return 1024
|
||||
else:
|
||||
raise ValueError(f"Model {self.name} not supported")
|
||||
|
||||
def sanitize_input(self, images: IMAGES) -> Union[List[bytes], np.ndarray]:
|
||||
"""
|
||||
Sanitize the input to the embedding function.
|
||||
"""
|
||||
if isinstance(images, (str, bytes)):
|
||||
images = [images]
|
||||
elif isinstance(images, pa.Array):
|
||||
images = images.to_pylist()
|
||||
elif isinstance(images, pa.ChunkedArray):
|
||||
images = images.combine_chunks().to_pylist()
|
||||
return images
|
||||
|
||||
def generate_text_embeddings(self, text: str, **kwargs) -> np.ndarray:
|
||||
"""
|
||||
Get the embeddings for the given texts
|
||||
|
||||
Parameters
|
||||
----------
|
||||
texts: list[str] or np.ndarray (of str)
|
||||
The texts to embed
|
||||
input_type: Optional[str]
|
||||
|
||||
truncation: Optional[bool]
|
||||
"""
|
||||
client = VoyageAIEmbeddingFunction._get_client()
|
||||
if self.name in self.text_embedding_models:
|
||||
rs = client.embed(texts=[text], model=self.name, **kwargs)
|
||||
elif self.name in self.multimodal_embedding_models:
|
||||
rs = client.multimodal_embed(inputs=[[text]], model=self.name, **kwargs)
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Model {self.name} not supported to generate text embeddings"
|
||||
)
|
||||
|
||||
return rs.embeddings[0]
|
||||
|
||||
def generate_image_embedding(
|
||||
self, image: "PIL.Image.Image", **kwargs
|
||||
) -> np.ndarray:
|
||||
rs = VoyageAIEmbeddingFunction._get_client().multimodal_embed(
|
||||
inputs=[[image]], model=self.name, **kwargs
|
||||
)
|
||||
return rs.embeddings[0]
|
||||
|
||||
def compute_query_embeddings(
|
||||
self, query: Union[str, "PIL.Image.Image"], *args, **kwargs
|
||||
) -> List[np.ndarray]:
|
||||
@@ -144,23 +197,52 @@ class VoyageAIEmbeddingFunction(EmbeddingFunction):
|
||||
----------
|
||||
query : Union[str, PIL.Image.Image]
|
||||
The query to embed. A query can be either text or an image.
|
||||
|
||||
Returns
|
||||
-------
|
||||
List[np.array]: the list of embeddings
|
||||
"""
|
||||
if isinstance(query, str):
|
||||
return [self.generate_text_embeddings(query, input_type="query")]
|
||||
client = VoyageAIEmbeddingFunction._get_client()
|
||||
if self._is_multimodal_model(self.name):
|
||||
result = client.multimodal_embed(
|
||||
inputs=[[query]], model=self.name, input_type="query", **kwargs
|
||||
)
|
||||
else:
|
||||
PIL = attempt_import_or_raise("PIL", "pillow")
|
||||
if isinstance(query, PIL.Image.Image):
|
||||
return [self.generate_image_embedding(query, input_type="query")]
|
||||
else:
|
||||
raise TypeError("Only text PIL images supported as query")
|
||||
result = client.embed(
|
||||
texts=[query], model=self.name, input_type="query", **kwargs
|
||||
)
|
||||
|
||||
return [result.embeddings[0]]
|
||||
|
||||
def compute_source_embeddings(
|
||||
self, images: IMAGES, *args, **kwargs
|
||||
self, inputs: Union[TEXT, IMAGES], *args, **kwargs
|
||||
) -> List[np.array]:
|
||||
images = self.sanitize_input(images)
|
||||
return [
|
||||
self.generate_image_embedding(img, input_type="document") for img in images
|
||||
]
|
||||
"""
|
||||
Compute the embeddings for the inputs
|
||||
|
||||
Parameters
|
||||
----------
|
||||
inputs : Union[TEXT, IMAGES]
|
||||
The inputs to embed. The input can be either str, bytes, Path (to an image),
|
||||
PIL.Image or list of these.
|
||||
|
||||
Returns
|
||||
-------
|
||||
List[np.array]: the list of embeddings
|
||||
"""
|
||||
client = VoyageAIEmbeddingFunction._get_client()
|
||||
if self._is_multimodal_model(self.name):
|
||||
inputs = sanitize_multimodal_input(inputs)
|
||||
result = client.multimodal_embed(
|
||||
inputs=inputs, model=self.name, input_type="document", **kwargs
|
||||
)
|
||||
else:
|
||||
inputs = sanitize_text_input(inputs)
|
||||
result = client.embed(
|
||||
texts=inputs, model=self.name, input_type="document", **kwargs
|
||||
)
|
||||
|
||||
return result.embeddings
|
||||
|
||||
@staticmethod
|
||||
def _get_client():
|
||||
|
||||
@@ -4,7 +4,10 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
import abc
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from enum import Enum
|
||||
from datetime import timedelta
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Dict,
|
||||
@@ -83,6 +86,213 @@ def ensure_vector_query(
|
||||
return val
|
||||
|
||||
|
||||
class FullTextQueryType(Enum):
|
||||
MATCH = "match"
|
||||
MATCH_PHRASE = "match_phrase"
|
||||
BOOST = "boost"
|
||||
MULTI_MATCH = "multi_match"
|
||||
|
||||
|
||||
class FullTextQuery(abc.ABC, pydantic.BaseModel):
|
||||
@abc.abstractmethod
|
||||
def query_type(self) -> FullTextQueryType:
|
||||
"""
|
||||
Get the query type of the query.
|
||||
|
||||
Returns
|
||||
-------
|
||||
str
|
||||
The type of the query.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def to_dict(self) -> dict:
|
||||
"""
|
||||
Convert the query to a dictionary.
|
||||
|
||||
Returns
|
||||
-------
|
||||
dict
|
||||
The query as a dictionary.
|
||||
"""
|
||||
|
||||
|
||||
class MatchQuery(FullTextQuery):
|
||||
query: str
|
||||
column: str
|
||||
boost: float = 1.0
|
||||
fuzziness: int = 0
|
||||
max_expansions: int = 50
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
query: str,
|
||||
column: str,
|
||||
*,
|
||||
boost: float = 1.0,
|
||||
fuzziness: int = 0,
|
||||
max_expansions: int = 50,
|
||||
):
|
||||
"""
|
||||
Match query for full-text search.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
query : str
|
||||
The query string to match against.
|
||||
column : str
|
||||
The name of the column to match against.
|
||||
boost : float, default 1.0
|
||||
The boost factor for the query.
|
||||
The score of each matching document is multiplied by this value.
|
||||
fuzziness : int, optional
|
||||
The maximum edit distance for each term in the match query.
|
||||
Defaults to 0 (exact match).
|
||||
If None, fuzziness is applied automatically by the rules:
|
||||
- 0 for terms with length <= 2
|
||||
- 1 for terms with length <= 5
|
||||
- 2 for terms with length > 5
|
||||
max_expansions : int, optional
|
||||
The maximum number of terms to consider for fuzzy matching.
|
||||
Defaults to 50.
|
||||
"""
|
||||
super().__init__(
|
||||
query=query,
|
||||
column=column,
|
||||
boost=boost,
|
||||
fuzziness=fuzziness,
|
||||
max_expansions=max_expansions,
|
||||
)
|
||||
|
||||
def query_type(self) -> FullTextQueryType:
|
||||
return FullTextQueryType.MATCH
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"match": {
|
||||
self.column: {
|
||||
"query": self.query,
|
||||
"boost": self.boost,
|
||||
"fuzziness": self.fuzziness,
|
||||
"max_expansions": self.max_expansions,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
class PhraseQuery(FullTextQuery):
|
||||
query: str
|
||||
column: str
|
||||
|
||||
def __init__(self, query: str, column: str):
|
||||
"""
|
||||
Phrase query for full-text search.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
query : str
|
||||
The query string to match against.
|
||||
column : str
|
||||
The name of the column to match against.
|
||||
"""
|
||||
super().__init__(query=query, column=column)
|
||||
|
||||
def query_type(self) -> FullTextQueryType:
|
||||
return FullTextQueryType.MATCH_PHRASE
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"match_phrase": {
|
||||
self.column: self.query,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
class BoostQuery(FullTextQuery):
|
||||
positive: FullTextQuery
|
||||
negative: FullTextQuery
|
||||
negative_boost: float = 0.5
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
positive: FullTextQuery,
|
||||
negative: FullTextQuery,
|
||||
*,
|
||||
negative_boost: float = 0.5,
|
||||
):
|
||||
"""
|
||||
Boost query for full-text search.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
positive : dict
|
||||
The positive query object.
|
||||
negative : dict
|
||||
The negative query object.
|
||||
negative_boost : float
|
||||
The boost factor for the negative query.
|
||||
"""
|
||||
super().__init__(
|
||||
positive=positive, negative=negative, negative_boost=negative_boost
|
||||
)
|
||||
|
||||
def query_type(self) -> FullTextQueryType:
|
||||
return FullTextQueryType.BOOST
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"boost": {
|
||||
"positive": self.positive.to_dict(),
|
||||
"negative": self.negative.to_dict(),
|
||||
"negative_boost": self.negative_boost,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
class MultiMatchQuery(FullTextQuery):
|
||||
query: str
|
||||
columns: list[str]
|
||||
boosts: list[float]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
query: str,
|
||||
columns: list[str],
|
||||
*,
|
||||
boosts: Optional[list[float]] = None,
|
||||
):
|
||||
"""
|
||||
Multi-match query for full-text search.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
query : str | list[Query]
|
||||
If a string, the query string to match against.
|
||||
|
||||
columns : list[str]
|
||||
The list of columns to match against.
|
||||
|
||||
boosts : list[float], optional
|
||||
The list of boost factors for each column. If not provided,
|
||||
all columns will have the same boost factor.
|
||||
"""
|
||||
if boosts is None:
|
||||
boosts = [1.0] * len(columns)
|
||||
super().__init__(query=query, columns=columns, boosts=boosts)
|
||||
|
||||
def query_type(self) -> FullTextQueryType:
|
||||
return FullTextQueryType.MULTI_MATCH
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"multi_match": {
|
||||
"query": self.query,
|
||||
"columns": self.columns,
|
||||
"boost": self.boosts,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
class FullTextSearchQuery(pydantic.BaseModel):
|
||||
"""A LanceDB Full Text Search Query
|
||||
|
||||
@@ -92,27 +302,23 @@ class FullTextSearchQuery(pydantic.BaseModel):
|
||||
The columns to search
|
||||
|
||||
If None, then the table should select the column automatically.
|
||||
query: str
|
||||
The query to search for
|
||||
limit: Optional[int] = None
|
||||
The limit on the number of results to return
|
||||
wand_factor: Optional[float] = None
|
||||
The wand factor to use for the search
|
||||
query: str | FullTextQuery
|
||||
If a string, it is treated as a MatchQuery.
|
||||
If a FullTextQuery object, it is used directly.
|
||||
"""
|
||||
|
||||
columns: Optional[List[str]] = None
|
||||
query: str
|
||||
limit: Optional[int] = None
|
||||
wand_factor: Optional[float] = None
|
||||
query: Union[str, FullTextQuery]
|
||||
|
||||
|
||||
class Query(pydantic.BaseModel):
|
||||
"""A LanceDB Query
|
||||
|
||||
Queries are constructed by the `Table.search` and `Table.query` methods. This
|
||||
class is a python representation of the query. Normally you will not need to
|
||||
interact with this class directly. You can build up a query and execute it using
|
||||
collection methods such as `to_batches()`, `to_arrow()`, `to_pandas()`, etc.
|
||||
Queries are constructed by the `Table.search` method. This class is a
|
||||
python representation of the query. Normally you will not need to interact
|
||||
with this class directly. You can build up a query and execute it using
|
||||
collection methods such as `to_batches()`, `to_arrow()`, `to_pandas()`,
|
||||
etc.
|
||||
|
||||
However, you can use the `to_query()` method to get the underlying query object.
|
||||
This can be useful for serializing a query or using it in a different context.
|
||||
@@ -356,7 +562,7 @@ class LanceQueryBuilder(ABC):
|
||||
table, query, vector_column_name, fts_columns=fts_columns
|
||||
)
|
||||
|
||||
if isinstance(query, str):
|
||||
if isinstance(query, (str, FullTextQuery)):
|
||||
# fts
|
||||
return LanceFtsQueryBuilder(
|
||||
table,
|
||||
@@ -381,8 +587,10 @@ class LanceQueryBuilder(ABC):
|
||||
# If query_type is fts, then query must be a string.
|
||||
# otherwise raise TypeError
|
||||
if query_type == "fts":
|
||||
if not isinstance(query, str):
|
||||
raise TypeError(f"'fts' queries must be a string: {type(query)}")
|
||||
if not isinstance(query, (str, FullTextQuery)):
|
||||
raise TypeError(
|
||||
f"'fts' query must be a string or FullTextQuery: {type(query)}"
|
||||
)
|
||||
return query, query_type
|
||||
elif query_type == "vector":
|
||||
query = cls._query_to_vector(table, query, vector_column_name)
|
||||
@@ -443,7 +651,12 @@ class LanceQueryBuilder(ABC):
|
||||
"""
|
||||
return self.to_pandas()
|
||||
|
||||
def to_pandas(self, flatten: Optional[Union[int, bool]] = None) -> "pd.DataFrame":
|
||||
def to_pandas(
|
||||
self,
|
||||
flatten: Optional[Union[int, bool]] = None,
|
||||
*,
|
||||
timeout: Optional[timedelta] = None,
|
||||
) -> "pd.DataFrame":
|
||||
"""
|
||||
Execute the query and return the results as a pandas DataFrame.
|
||||
In addition to the selected columns, LanceDB also returns a vector
|
||||
@@ -457,12 +670,15 @@ class LanceQueryBuilder(ABC):
|
||||
If flatten is an integer, flatten the nested columns up to the
|
||||
specified depth.
|
||||
If unspecified, do not flatten the nested columns.
|
||||
timeout: Optional[timedelta]
|
||||
The maximum time to wait for the query to complete.
|
||||
If None, wait indefinitely.
|
||||
"""
|
||||
tbl = flatten_columns(self.to_arrow(), flatten)
|
||||
tbl = flatten_columns(self.to_arrow(timeout=timeout), flatten)
|
||||
return tbl.to_pandas()
|
||||
|
||||
@abstractmethod
|
||||
def to_arrow(self) -> pa.Table:
|
||||
def to_arrow(self, *, timeout: Optional[timedelta] = None) -> pa.Table:
|
||||
"""
|
||||
Execute the query and return the results as an
|
||||
[Apache Arrow Table](https://arrow.apache.org/docs/python/generated/pyarrow.Table.html#pyarrow.Table).
|
||||
@@ -470,34 +686,65 @@ class LanceQueryBuilder(ABC):
|
||||
In addition to the selected columns, LanceDB also returns a vector
|
||||
and also the "_distance" column which is the distance between the query
|
||||
vector and the returned vectors.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
timeout: Optional[timedelta]
|
||||
The maximum time to wait for the query to complete.
|
||||
If None, wait indefinitely.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@abstractmethod
|
||||
def to_batches(self, /, batch_size: Optional[int] = None) -> pa.RecordBatchReader:
|
||||
def to_batches(
|
||||
self,
|
||||
/,
|
||||
batch_size: Optional[int] = None,
|
||||
*,
|
||||
timeout: Optional[timedelta] = None,
|
||||
) -> pa.RecordBatchReader:
|
||||
"""
|
||||
Execute the query and return the results as a pyarrow
|
||||
[RecordBatchReader](https://arrow.apache.org/docs/python/generated/pyarrow.RecordBatchReader.html)
|
||||
|
||||
Parameters
|
||||
----------
|
||||
batch_size: int
|
||||
The maximum number of selected records in a RecordBatch object.
|
||||
timeout: Optional[timedelta]
|
||||
The maximum time to wait for the query to complete.
|
||||
If None, wait indefinitely.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def to_list(self) -> List[dict]:
|
||||
def to_list(self, *, timeout: Optional[timedelta] = None) -> List[dict]:
|
||||
"""
|
||||
Execute the query and return the results as a list of dictionaries.
|
||||
|
||||
Each list entry is a dictionary with the selected column names as keys,
|
||||
or all table columns if `select` is not called. The vector and the "_distance"
|
||||
fields are returned whether or not they're explicitly selected.
|
||||
"""
|
||||
return self.to_arrow().to_pylist()
|
||||
|
||||
def to_pydantic(self, model: Type[LanceModel]) -> List[LanceModel]:
|
||||
Parameters
|
||||
----------
|
||||
timeout: Optional[timedelta]
|
||||
The maximum time to wait for the query to complete.
|
||||
If None, wait indefinitely.
|
||||
"""
|
||||
return self.to_arrow(timeout=timeout).to_pylist()
|
||||
|
||||
def to_pydantic(
|
||||
self, model: Type[LanceModel], *, timeout: Optional[timedelta] = None
|
||||
) -> List[LanceModel]:
|
||||
"""Return the table as a list of pydantic models.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
model: Type[LanceModel]
|
||||
The pydantic model to use.
|
||||
timeout: Optional[timedelta]
|
||||
The maximum time to wait for the query to complete.
|
||||
If None, wait indefinitely.
|
||||
|
||||
Returns
|
||||
-------
|
||||
@@ -505,19 +752,25 @@ class LanceQueryBuilder(ABC):
|
||||
"""
|
||||
return [
|
||||
model(**{k: v for k, v in row.items() if k in model.field_names()})
|
||||
for row in self.to_arrow().to_pylist()
|
||||
for row in self.to_arrow(timeout=timeout).to_pylist()
|
||||
]
|
||||
|
||||
def to_polars(self) -> "pl.DataFrame":
|
||||
def to_polars(self, *, timeout: Optional[timedelta] = None) -> "pl.DataFrame":
|
||||
"""
|
||||
Execute the query and return the results as a Polars DataFrame.
|
||||
In addition to the selected columns, LanceDB also returns a vector
|
||||
and also the "_distance" column which is the distance between the query
|
||||
vector and the returned vector.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
timeout: Optional[timedelta]
|
||||
The maximum time to wait for the query to complete.
|
||||
If None, wait indefinitely.
|
||||
"""
|
||||
import polars as pl
|
||||
|
||||
return pl.from_arrow(self.to_arrow())
|
||||
return pl.from_arrow(self.to_arrow(timeout=timeout))
|
||||
|
||||
def limit(self, limit: Union[int, None]) -> Self:
|
||||
"""Set the maximum number of results to return.
|
||||
@@ -656,7 +909,45 @@ class LanceQueryBuilder(ABC):
|
||||
-------
|
||||
plan : str
|
||||
""" # noqa: E501
|
||||
return self._table._explain_plan(self.to_query_object())
|
||||
return self._table._explain_plan(self.to_query_object(), verbose=verbose)
|
||||
|
||||
def analyze_plan(self) -> str:
|
||||
"""
|
||||
Run the query and return its execution plan with runtime metrics.
|
||||
|
||||
This returns detailed metrics for each step, such as elapsed time,
|
||||
rows processed, bytes read, and I/O stats. It is useful for debugging
|
||||
and performance tuning.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import lancedb
|
||||
>>> db = lancedb.connect("./.lancedb")
|
||||
>>> table = db.create_table("my_table", [{"vector": [99.0, 99]}])
|
||||
>>> query = [100, 100]
|
||||
>>> plan = table.search(query).analyze_plan()
|
||||
>>> print(plan) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
|
||||
AnalyzeExec verbose=true, metrics=[]
|
||||
ProjectionExec: expr=[...], metrics=[...]
|
||||
GlobalLimitExec: skip=0, fetch=10, metrics=[...]
|
||||
FilterExec: _distance@2 IS NOT NULL,
|
||||
metrics=[output_rows=..., elapsed_compute=...]
|
||||
SortExec: TopK(fetch=10), expr=[...],
|
||||
preserve_partitioning=[...],
|
||||
metrics=[output_rows=..., elapsed_compute=..., row_replacements=...]
|
||||
KNNVectorDistance: metric=l2,
|
||||
metrics=[output_rows=..., elapsed_compute=..., output_batches=...]
|
||||
LanceScan: uri=..., projection=[vector], row_id=true,
|
||||
row_addr=false, ordered=false,
|
||||
metrics=[output_rows=..., elapsed_compute=...,
|
||||
bytes_read=..., iops=..., requests=...]
|
||||
|
||||
Returns
|
||||
-------
|
||||
plan : str
|
||||
The physical query execution plan with runtime metrics.
|
||||
"""
|
||||
return self._table._analyze_plan(self.to_query_object())
|
||||
|
||||
def vector(self, vector: Union[np.ndarray, list]) -> Self:
|
||||
"""Set the vector to search for.
|
||||
@@ -673,13 +964,14 @@ class LanceQueryBuilder(ABC):
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def text(self, text: str) -> Self:
|
||||
def text(self, text: str | FullTextQuery) -> Self:
|
||||
"""Set the text to search for.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
text: str
|
||||
The text to search for.
|
||||
text: str | FullTextQuery
|
||||
If a string, it is treated as a MatchQuery.
|
||||
If a FullTextQuery object, it is used directly.
|
||||
|
||||
Returns
|
||||
-------
|
||||
@@ -893,7 +1185,7 @@ class LanceVectorQueryBuilder(LanceQueryBuilder):
|
||||
self._refine_factor = refine_factor
|
||||
return self
|
||||
|
||||
def to_arrow(self) -> pa.Table:
|
||||
def to_arrow(self, *, timeout: Optional[timedelta] = None) -> pa.Table:
|
||||
"""
|
||||
Execute the query and return the results as an
|
||||
[Apache Arrow Table](https://arrow.apache.org/docs/python/generated/pyarrow.Table.html#pyarrow.Table).
|
||||
@@ -901,8 +1193,14 @@ class LanceVectorQueryBuilder(LanceQueryBuilder):
|
||||
In addition to the selected columns, LanceDB also returns a vector
|
||||
and also the "_distance" column which is the distance between the query
|
||||
vector and the returned vectors.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
timeout: Optional[timedelta]
|
||||
The maximum time to wait for the query to complete.
|
||||
If None, wait indefinitely.
|
||||
"""
|
||||
return self.to_batches().read_all()
|
||||
return self.to_batches(timeout=timeout).read_all()
|
||||
|
||||
def to_query_object(self) -> Query:
|
||||
"""
|
||||
@@ -932,7 +1230,13 @@ class LanceVectorQueryBuilder(LanceQueryBuilder):
|
||||
bypass_vector_index=self._bypass_vector_index,
|
||||
)
|
||||
|
||||
def to_batches(self, /, batch_size: Optional[int] = None) -> pa.RecordBatchReader:
|
||||
def to_batches(
|
||||
self,
|
||||
/,
|
||||
batch_size: Optional[int] = None,
|
||||
*,
|
||||
timeout: Optional[timedelta] = None,
|
||||
) -> pa.RecordBatchReader:
|
||||
"""
|
||||
Execute the query and return the result as a RecordBatchReader object.
|
||||
|
||||
@@ -940,6 +1244,9 @@ class LanceVectorQueryBuilder(LanceQueryBuilder):
|
||||
----------
|
||||
batch_size: int
|
||||
The maximum number of selected records in a RecordBatch object.
|
||||
timeout: timedelta, default None
|
||||
The maximum time to wait for the query to complete.
|
||||
If None, wait indefinitely.
|
||||
|
||||
Returns
|
||||
-------
|
||||
@@ -949,7 +1256,9 @@ class LanceVectorQueryBuilder(LanceQueryBuilder):
|
||||
if isinstance(vector[0], np.ndarray):
|
||||
vector = [v.tolist() for v in vector]
|
||||
query = self.to_query_object()
|
||||
result_set = self._table._execute_query(query, batch_size)
|
||||
result_set = self._table._execute_query(
|
||||
query, batch_size=batch_size, timeout=timeout
|
||||
)
|
||||
if self._reranker is not None:
|
||||
rs_table = result_set.read_all()
|
||||
result_set = self._reranker.rerank_vector(self._str_query, rs_table)
|
||||
@@ -1045,7 +1354,7 @@ class LanceFtsQueryBuilder(LanceQueryBuilder):
|
||||
def __init__(
|
||||
self,
|
||||
table: "Table",
|
||||
query: str,
|
||||
query: str | FullTextQuery,
|
||||
ordering_field_name: Optional[str] = None,
|
||||
fts_columns: Optional[Union[str, List[str]]] = None,
|
||||
):
|
||||
@@ -1088,7 +1397,7 @@ class LanceFtsQueryBuilder(LanceQueryBuilder):
|
||||
offset=self._offset,
|
||||
)
|
||||
|
||||
def to_arrow(self) -> pa.Table:
|
||||
def to_arrow(self, *, timeout: Optional[timedelta] = None) -> pa.Table:
|
||||
path, fs, exist = self._table._get_fts_index_path()
|
||||
if exist:
|
||||
return self.tantivy_to_arrow()
|
||||
@@ -1100,14 +1409,16 @@ class LanceFtsQueryBuilder(LanceQueryBuilder):
|
||||
"Use tantivy-based index instead for now."
|
||||
)
|
||||
query = self.to_query_object()
|
||||
results = self._table._execute_query(query)
|
||||
results = self._table._execute_query(query, timeout=timeout)
|
||||
results = results.read_all()
|
||||
if self._reranker is not None:
|
||||
results = self._reranker.rerank_fts(self._query, results)
|
||||
check_reranker_result(results)
|
||||
return results
|
||||
|
||||
def to_batches(self, /, batch_size: Optional[int] = None):
|
||||
def to_batches(
|
||||
self, /, batch_size: Optional[int] = None, timeout: Optional[timedelta] = None
|
||||
):
|
||||
raise NotImplementedError("to_batches on an FTS query")
|
||||
|
||||
def tantivy_to_arrow(self) -> pa.Table:
|
||||
@@ -1212,8 +1523,8 @@ class LanceFtsQueryBuilder(LanceQueryBuilder):
|
||||
|
||||
|
||||
class LanceEmptyQueryBuilder(LanceQueryBuilder):
|
||||
def to_arrow(self) -> pa.Table:
|
||||
return self.to_batches().read_all()
|
||||
def to_arrow(self, *, timeout: Optional[timedelta] = None) -> pa.Table:
|
||||
return self.to_batches(timeout=timeout).read_all()
|
||||
|
||||
def to_query_object(self) -> Query:
|
||||
return Query(
|
||||
@@ -1224,9 +1535,11 @@ class LanceEmptyQueryBuilder(LanceQueryBuilder):
|
||||
offset=self._offset,
|
||||
)
|
||||
|
||||
def to_batches(self, /, batch_size: Optional[int] = None) -> pa.RecordBatchReader:
|
||||
def to_batches(
|
||||
self, /, batch_size: Optional[int] = None, timeout: Optional[timedelta] = None
|
||||
) -> pa.RecordBatchReader:
|
||||
query = self.to_query_object()
|
||||
return self._table._execute_query(query, batch_size)
|
||||
return self._table._execute_query(query, batch_size=batch_size, timeout=timeout)
|
||||
|
||||
def rerank(self, reranker: Reranker) -> LanceEmptyQueryBuilder:
|
||||
"""Rerank the results using the specified reranker.
|
||||
@@ -1259,7 +1572,7 @@ class LanceHybridQueryBuilder(LanceQueryBuilder):
|
||||
def __init__(
|
||||
self,
|
||||
table: "Table",
|
||||
query: Optional[str] = None,
|
||||
query: Optional[Union[str, FullTextQuery]] = None,
|
||||
vector_column: Optional[str] = None,
|
||||
fts_columns: Optional[Union[str, List[str]]] = None,
|
||||
):
|
||||
@@ -1289,8 +1602,8 @@ class LanceHybridQueryBuilder(LanceQueryBuilder):
|
||||
text_query = text or query
|
||||
if text_query is None:
|
||||
raise ValueError("Text query must be provided for hybrid search.")
|
||||
if not isinstance(text_query, str):
|
||||
raise ValueError("Text query must be a string")
|
||||
if not isinstance(text_query, (str, FullTextQuery)):
|
||||
raise ValueError("Text query must be a string or FullTextQuery")
|
||||
|
||||
return vector_query, text_query
|
||||
|
||||
@@ -1314,7 +1627,7 @@ class LanceHybridQueryBuilder(LanceQueryBuilder):
|
||||
def to_query_object(self) -> Query:
|
||||
raise NotImplementedError("to_query_object not yet supported on a hybrid query")
|
||||
|
||||
def to_arrow(self) -> pa.Table:
|
||||
def to_arrow(self, *, timeout: Optional[timedelta] = None) -> pa.Table:
|
||||
vector_query, fts_query = self._validate_query(
|
||||
self._query, self._vector, self._text
|
||||
)
|
||||
@@ -1357,9 +1670,11 @@ class LanceHybridQueryBuilder(LanceQueryBuilder):
|
||||
self._reranker = RRFReranker()
|
||||
|
||||
with ThreadPoolExecutor() as executor:
|
||||
fts_future = executor.submit(self._fts_query.with_row_id(True).to_arrow)
|
||||
fts_future = executor.submit(
|
||||
self._fts_query.with_row_id(True).to_arrow, timeout=timeout
|
||||
)
|
||||
vector_future = executor.submit(
|
||||
self._vector_query.with_row_id(True).to_arrow
|
||||
self._vector_query.with_row_id(True).to_arrow, timeout=timeout
|
||||
)
|
||||
fts_results = fts_future.result()
|
||||
vector_results = vector_future.result()
|
||||
@@ -1446,7 +1761,9 @@ class LanceHybridQueryBuilder(LanceQueryBuilder):
|
||||
|
||||
return results
|
||||
|
||||
def to_batches(self):
|
||||
def to_batches(
|
||||
self, /, batch_size: Optional[int] = None, timeout: Optional[timedelta] = None
|
||||
):
|
||||
raise NotImplementedError("to_batches not yet supported on a hybrid query")
|
||||
|
||||
@staticmethod
|
||||
@@ -1652,7 +1969,7 @@ class LanceHybridQueryBuilder(LanceQueryBuilder):
|
||||
self._vector = vector
|
||||
return self
|
||||
|
||||
def text(self, text: str) -> LanceHybridQueryBuilder:
|
||||
def text(self, text: str | FullTextQuery) -> LanceHybridQueryBuilder:
|
||||
self._text = text
|
||||
return self
|
||||
|
||||
@@ -1810,7 +2127,10 @@ class AsyncQueryBase(object):
|
||||
return self
|
||||
|
||||
async def to_batches(
|
||||
self, *, max_batch_length: Optional[int] = None
|
||||
self,
|
||||
*,
|
||||
max_batch_length: Optional[int] = None,
|
||||
timeout: Optional[timedelta] = None,
|
||||
) -> AsyncRecordBatchReader:
|
||||
"""
|
||||
Execute the query and return the results as an Apache Arrow RecordBatchReader.
|
||||
@@ -1823,34 +2143,56 @@ class AsyncQueryBase(object):
|
||||
If not specified, a default batch length is used.
|
||||
It is possible for batches to be smaller than the provided length if the
|
||||
underlying data is stored in smaller chunks.
|
||||
timeout: Optional[timedelta]
|
||||
The maximum time to wait for the query to complete.
|
||||
If not specified, no timeout is applied. If the query does not
|
||||
complete within the specified time, an error will be raised.
|
||||
"""
|
||||
return AsyncRecordBatchReader(await self._inner.execute(max_batch_length))
|
||||
return AsyncRecordBatchReader(
|
||||
await self._inner.execute(max_batch_length, timeout)
|
||||
)
|
||||
|
||||
async def to_arrow(self) -> pa.Table:
|
||||
async def to_arrow(self, timeout: Optional[timedelta] = None) -> pa.Table:
|
||||
"""
|
||||
Execute the query and collect the results into an Apache Arrow Table.
|
||||
|
||||
This method will collect all results into memory before returning. If
|
||||
you expect a large number of results, you may want to use
|
||||
[to_batches][lancedb.query.AsyncQueryBase.to_batches]
|
||||
|
||||
Parameters
|
||||
----------
|
||||
timeout: Optional[timedelta]
|
||||
The maximum time to wait for the query to complete.
|
||||
If not specified, no timeout is applied. If the query does not
|
||||
complete within the specified time, an error will be raised.
|
||||
"""
|
||||
batch_iter = await self.to_batches()
|
||||
batch_iter = await self.to_batches(timeout=timeout)
|
||||
return pa.Table.from_batches(
|
||||
await batch_iter.read_all(), schema=batch_iter.schema
|
||||
)
|
||||
|
||||
async def to_list(self) -> List[dict]:
|
||||
async def to_list(self, timeout: Optional[timedelta] = None) -> List[dict]:
|
||||
"""
|
||||
Execute the query and return the results as a list of dictionaries.
|
||||
|
||||
Each list entry is a dictionary with the selected column names as keys,
|
||||
or all table columns if `select` is not called. The vector and the "_distance"
|
||||
fields are returned whether or not they're explicitly selected.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
timeout: Optional[timedelta]
|
||||
The maximum time to wait for the query to complete.
|
||||
If not specified, no timeout is applied. If the query does not
|
||||
complete within the specified time, an error will be raised.
|
||||
"""
|
||||
return (await self.to_arrow()).to_pylist()
|
||||
return (await self.to_arrow(timeout=timeout)).to_pylist()
|
||||
|
||||
async def to_pandas(
|
||||
self, flatten: Optional[Union[int, bool]] = None
|
||||
self,
|
||||
flatten: Optional[Union[int, bool]] = None,
|
||||
timeout: Optional[timedelta] = None,
|
||||
) -> "pd.DataFrame":
|
||||
"""
|
||||
Execute the query and collect the results into a pandas DataFrame.
|
||||
@@ -1879,10 +2221,19 @@ class AsyncQueryBase(object):
|
||||
If flatten is an integer, flatten the nested columns up to the
|
||||
specified depth.
|
||||
If unspecified, do not flatten the nested columns.
|
||||
timeout: Optional[timedelta]
|
||||
The maximum time to wait for the query to complete.
|
||||
If not specified, no timeout is applied. If the query does not
|
||||
complete within the specified time, an error will be raised.
|
||||
"""
|
||||
return (flatten_columns(await self.to_arrow(), flatten)).to_pandas()
|
||||
return (
|
||||
flatten_columns(await self.to_arrow(timeout=timeout), flatten)
|
||||
).to_pandas()
|
||||
|
||||
async def to_polars(self) -> "pl.DataFrame":
|
||||
async def to_polars(
|
||||
self,
|
||||
timeout: Optional[timedelta] = None,
|
||||
) -> "pl.DataFrame":
|
||||
"""
|
||||
Execute the query and collect the results into a Polars DataFrame.
|
||||
|
||||
@@ -1891,6 +2242,13 @@ class AsyncQueryBase(object):
|
||||
[to_batches][lancedb.query.AsyncQueryBase.to_batches] and convert each batch to
|
||||
polars separately.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
timeout: Optional[timedelta]
|
||||
The maximum time to wait for the query to complete.
|
||||
If not specified, no timeout is applied. If the query does not
|
||||
complete within the specified time, an error will be raised.
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
@@ -1906,7 +2264,7 @@ class AsyncQueryBase(object):
|
||||
"""
|
||||
import polars as pl
|
||||
|
||||
return pl.from_arrow(await self.to_arrow())
|
||||
return pl.from_arrow(await self.to_arrow(timeout=timeout))
|
||||
|
||||
async def explain_plan(self, verbose: Optional[bool] = False):
|
||||
"""Return the execution plan for this query.
|
||||
@@ -1940,6 +2298,15 @@ class AsyncQueryBase(object):
|
||||
""" # noqa: E501
|
||||
return await self._inner.explain_plan(verbose)
|
||||
|
||||
async def analyze_plan(self):
|
||||
"""Execute the query and display with runtime metrics.
|
||||
|
||||
Returns
|
||||
-------
|
||||
plan : str
|
||||
"""
|
||||
return await self._inner.analyze_plan()
|
||||
|
||||
|
||||
class AsyncQuery(AsyncQueryBase):
|
||||
def __init__(self, inner: LanceQuery):
|
||||
@@ -2040,7 +2407,7 @@ class AsyncQuery(AsyncQueryBase):
|
||||
)
|
||||
|
||||
def nearest_to_text(
|
||||
self, query: str, columns: Union[str, List[str], None] = None
|
||||
self, query: str | FullTextQuery, columns: Union[str, List[str], None] = None
|
||||
) -> AsyncFTSQuery:
|
||||
"""
|
||||
Find the documents that are most relevant to the given text query.
|
||||
@@ -2066,9 +2433,13 @@ class AsyncQuery(AsyncQueryBase):
|
||||
columns = [columns]
|
||||
if columns is None:
|
||||
columns = []
|
||||
return AsyncFTSQuery(
|
||||
self._inner.nearest_to_text({"query": query, "columns": columns})
|
||||
)
|
||||
|
||||
if isinstance(query, str):
|
||||
return AsyncFTSQuery(
|
||||
self._inner.nearest_to_text({"query": query, "columns": columns})
|
||||
)
|
||||
# FullTextQuery object
|
||||
return AsyncFTSQuery(self._inner.nearest_to_text({"query": query.to_dict()}))
|
||||
|
||||
|
||||
class AsyncFTSQuery(AsyncQueryBase):
|
||||
@@ -2164,9 +2535,12 @@ class AsyncFTSQuery(AsyncQueryBase):
|
||||
)
|
||||
|
||||
async def to_batches(
|
||||
self, *, max_batch_length: Optional[int] = None
|
||||
self,
|
||||
*,
|
||||
max_batch_length: Optional[int] = None,
|
||||
timeout: Optional[timedelta] = None,
|
||||
) -> AsyncRecordBatchReader:
|
||||
reader = await super().to_batches()
|
||||
reader = await super().to_batches(timeout=timeout)
|
||||
results = pa.Table.from_batches(await reader.read_all(), reader.schema)
|
||||
if self._reranker:
|
||||
results = self._reranker.rerank_fts(self.get_query(), results)
|
||||
@@ -2351,7 +2725,7 @@ class AsyncVectorQuery(AsyncQueryBase, AsyncVectorQueryBase):
|
||||
return self
|
||||
|
||||
def nearest_to_text(
|
||||
self, query: str, columns: Union[str, List[str], None] = None
|
||||
self, query: str | FullTextQuery, columns: Union[str, List[str], None] = None
|
||||
) -> AsyncHybridQuery:
|
||||
"""
|
||||
Find the documents that are most relevant to the given text query,
|
||||
@@ -2381,14 +2755,21 @@ class AsyncVectorQuery(AsyncQueryBase, AsyncVectorQueryBase):
|
||||
columns = [columns]
|
||||
if columns is None:
|
||||
columns = []
|
||||
return AsyncHybridQuery(
|
||||
self._inner.nearest_to_text({"query": query, "columns": columns})
|
||||
)
|
||||
|
||||
if isinstance(query, str):
|
||||
return AsyncHybridQuery(
|
||||
self._inner.nearest_to_text({"query": query, "columns": columns})
|
||||
)
|
||||
# FullTextQuery object
|
||||
return AsyncHybridQuery(self._inner.nearest_to_text({"query": query.to_dict()}))
|
||||
|
||||
async def to_batches(
|
||||
self, *, max_batch_length: Optional[int] = None
|
||||
self,
|
||||
*,
|
||||
max_batch_length: Optional[int] = None,
|
||||
timeout: Optional[timedelta] = None,
|
||||
) -> AsyncRecordBatchReader:
|
||||
reader = await super().to_batches()
|
||||
reader = await super().to_batches(timeout=timeout)
|
||||
results = pa.Table.from_batches(await reader.read_all(), reader.schema)
|
||||
if self._reranker:
|
||||
results = self._reranker.rerank_vector(self._query_string, results)
|
||||
@@ -2444,7 +2825,10 @@ class AsyncHybridQuery(AsyncQueryBase, AsyncVectorQueryBase):
|
||||
return self
|
||||
|
||||
async def to_batches(
|
||||
self, *, max_batch_length: Optional[int] = None
|
||||
self,
|
||||
*,
|
||||
max_batch_length: Optional[int] = None,
|
||||
timeout: Optional[timedelta] = None,
|
||||
) -> AsyncRecordBatchReader:
|
||||
fts_query = AsyncFTSQuery(self._inner.to_fts_query())
|
||||
vec_query = AsyncVectorQuery(self._inner.to_vector_query())
|
||||
@@ -2456,8 +2840,8 @@ class AsyncHybridQuery(AsyncQueryBase, AsyncVectorQueryBase):
|
||||
vec_query.with_row_id()
|
||||
|
||||
fts_results, vector_results = await asyncio.gather(
|
||||
fts_query.to_arrow(),
|
||||
vec_query.to_arrow(),
|
||||
fts_query.to_arrow(timeout=timeout),
|
||||
vec_query.to_arrow(timeout=timeout),
|
||||
)
|
||||
|
||||
result = LanceHybridQueryBuilder._combine_hybrid_results(
|
||||
@@ -2509,7 +2893,7 @@ class AsyncHybridQuery(AsyncQueryBase, AsyncVectorQueryBase):
|
||||
|
||||
Returns
|
||||
-------
|
||||
plan
|
||||
plan : str
|
||||
""" # noqa: E501
|
||||
|
||||
results = ["Vector Search Plan:"]
|
||||
@@ -2518,3 +2902,23 @@ class AsyncHybridQuery(AsyncQueryBase, AsyncVectorQueryBase):
|
||||
results.append(await self._inner.to_fts_query().explain_plan(verbose))
|
||||
|
||||
return "\n".join(results)
|
||||
|
||||
async def analyze_plan(self):
|
||||
"""
|
||||
Execute the query and return the physical execution plan with runtime metrics.
|
||||
|
||||
This runs both the vector and FTS (full-text search) queries and returns
|
||||
detailed metrics for each step of execution—such as rows processed,
|
||||
elapsed time, I/O stats, and more. It’s useful for debugging and
|
||||
performance analysis.
|
||||
|
||||
Returns
|
||||
-------
|
||||
plan : str
|
||||
"""
|
||||
results = ["Vector Search Query:"]
|
||||
results.append(await self._inner.to_vector_query().analyze_plan())
|
||||
results.append("FTS Search Query:")
|
||||
results.append(await self._inner.to_fts_query().analyze_plan())
|
||||
|
||||
return "\n".join(results)
|
||||
|
||||
@@ -87,6 +87,9 @@ class RemoteTable(Table):
|
||||
def checkout_latest(self):
|
||||
return LOOP.run(self._table.checkout_latest())
|
||||
|
||||
def restore(self, version: Optional[int] = None):
|
||||
return LOOP.run(self._table.restore(version))
|
||||
|
||||
def list_indices(self) -> Iterable[IndexConfig]:
|
||||
"""List all the indices on the table"""
|
||||
return LOOP.run(self._table.list_indices())
|
||||
@@ -352,9 +355,15 @@ class RemoteTable(Table):
|
||||
)
|
||||
|
||||
def _execute_query(
|
||||
self, query: Query, batch_size: Optional[int] = None
|
||||
self,
|
||||
query: Query,
|
||||
*,
|
||||
batch_size: Optional[int] = None,
|
||||
timeout: Optional[timedelta] = None,
|
||||
) -> pa.RecordBatchReader:
|
||||
async_iter = LOOP.run(self._table._execute_query(query, batch_size=batch_size))
|
||||
async_iter = LOOP.run(
|
||||
self._table._execute_query(query, batch_size=batch_size, timeout=timeout)
|
||||
)
|
||||
|
||||
def iter_sync():
|
||||
try:
|
||||
@@ -365,6 +374,12 @@ class RemoteTable(Table):
|
||||
|
||||
return pa.RecordBatchReader.from_batches(async_iter.schema, iter_sync())
|
||||
|
||||
def _explain_plan(self, query: Query, verbose: Optional[bool] = False) -> str:
|
||||
return LOOP.run(self._table._explain_plan(query, verbose))
|
||||
|
||||
def _analyze_plan(self, query: Query) -> str:
|
||||
return LOOP.run(self._table._analyze_plan(query))
|
||||
|
||||
def merge_insert(self, on: Union[str, Iterable[str]]) -> LanceMergeInsertBuilder:
|
||||
"""Returns a [`LanceMergeInsertBuilder`][lancedb.merge.LanceMergeInsertBuilder]
|
||||
that can be used to create a "merge insert" operation.
|
||||
|
||||
@@ -42,7 +42,9 @@ class AnswerdotaiRerankers(Reranker):
|
||||
rerankers = attempt_import_or_raise(
|
||||
"rerankers"
|
||||
) # import here for faster ops later
|
||||
self.reranker = rerankers.Reranker(model_name, model_type, **kwargs)
|
||||
self.reranker = rerankers.Reranker(
|
||||
model_name=model_name, model_type=model_type, **kwargs
|
||||
)
|
||||
|
||||
def _rerank(self, result_set: pa.Table, query: str):
|
||||
docs = result_set[self.column].to_pylist()
|
||||
|
||||
@@ -52,6 +52,7 @@ from .query import (
|
||||
AsyncHybridQuery,
|
||||
AsyncQuery,
|
||||
AsyncVectorQuery,
|
||||
FullTextQuery,
|
||||
LanceEmptyQueryBuilder,
|
||||
LanceFtsQueryBuilder,
|
||||
LanceHybridQueryBuilder,
|
||||
@@ -919,7 +920,9 @@ class Table(ABC):
|
||||
@abstractmethod
|
||||
def search(
|
||||
self,
|
||||
query: Optional[Union[VEC, str, "PIL.Image.Image", Tuple]] = None,
|
||||
query: Optional[
|
||||
Union[VEC, str, "PIL.Image.Image", Tuple, FullTextQuery]
|
||||
] = None,
|
||||
vector_column_name: Optional[str] = None,
|
||||
query_type: QueryType = "auto",
|
||||
ordering_field_name: Optional[str] = None,
|
||||
@@ -1004,9 +1007,19 @@ class Table(ABC):
|
||||
|
||||
@abstractmethod
|
||||
def _execute_query(
|
||||
self, query: Query, batch_size: Optional[int] = None
|
||||
self,
|
||||
query: Query,
|
||||
*,
|
||||
batch_size: Optional[int] = None,
|
||||
timeout: Optional[timedelta] = None,
|
||||
) -> pa.RecordBatchReader: ...
|
||||
|
||||
@abstractmethod
|
||||
def _explain_plan(self, query: Query, verbose: Optional[bool] = False) -> str: ...
|
||||
|
||||
@abstractmethod
|
||||
def _analyze_plan(self, query: Query) -> str: ...
|
||||
|
||||
@abstractmethod
|
||||
def _do_merge(
|
||||
self,
|
||||
@@ -1262,16 +1275,21 @@ class Table(ABC):
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def add_columns(self, transforms: Dict[str, str]):
|
||||
def add_columns(
|
||||
self, transforms: Dict[str, str] | pa.Field | List[pa.Field] | pa.Schema
|
||||
):
|
||||
"""
|
||||
Add new columns with defined values.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
transforms: Dict[str, str]
|
||||
transforms: Dict[str, str], pa.Field, List[pa.Field], pa.Schema
|
||||
A map of column name to a SQL expression to use to calculate the
|
||||
value of the new column. These expressions will be evaluated for
|
||||
each row in the table, and can reference existing columns.
|
||||
Alternatively, a pyarrow Field or Schema can be provided to add
|
||||
new columns with the specified data types. The new columns will
|
||||
be initialized with null values.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
@@ -1339,6 +1357,21 @@ class Table(ABC):
|
||||
It can also be used to undo a `[Self::checkout]` operation
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def restore(self, version: Optional[int] = None):
|
||||
"""Restore a version of the table. This is an in-place operation.
|
||||
|
||||
This creates a new version where the data is equivalent to the
|
||||
specified previous version. Data is not copied (as of python-v0.2.1).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
version : int, default None
|
||||
The version to restore. If unspecified then restores the currently
|
||||
checked out version. If the currently checked out version is the
|
||||
latest version then this is a no-op.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def list_versions(self) -> List[Dict[str, Any]]:
|
||||
"""List all versions of the table"""
|
||||
@@ -2013,7 +2046,9 @@ class LanceTable(Table):
|
||||
@overload
|
||||
def search(
|
||||
self,
|
||||
query: Optional[Union[VEC, str, "PIL.Image.Image", Tuple]] = None,
|
||||
query: Optional[
|
||||
Union[VEC, str, "PIL.Image.Image", Tuple, FullTextQuery]
|
||||
] = None,
|
||||
vector_column_name: Optional[str] = None,
|
||||
query_type: Literal["hybrid"] = "hybrid",
|
||||
ordering_field_name: Optional[str] = None,
|
||||
@@ -2032,7 +2067,9 @@ class LanceTable(Table):
|
||||
|
||||
def search(
|
||||
self,
|
||||
query: Optional[Union[VEC, str, "PIL.Image.Image", Tuple]] = None,
|
||||
query: Optional[
|
||||
Union[VEC, str, "PIL.Image.Image", Tuple, FullTextQuery]
|
||||
] = None,
|
||||
vector_column_name: Optional[str] = None,
|
||||
query_type: QueryType = "auto",
|
||||
ordering_field_name: Optional[str] = None,
|
||||
@@ -2279,9 +2316,15 @@ class LanceTable(Table):
|
||||
LOOP.run(self._table.update(values, where=where, updates_sql=values_sql))
|
||||
|
||||
def _execute_query(
|
||||
self, query: Query, batch_size: Optional[int] = None
|
||||
self,
|
||||
query: Query,
|
||||
*,
|
||||
batch_size: Optional[int] = None,
|
||||
timeout: Optional[timedelta] = None,
|
||||
) -> pa.RecordBatchReader:
|
||||
async_iter = LOOP.run(self._table._execute_query(query, batch_size))
|
||||
async_iter = LOOP.run(
|
||||
self._table._execute_query(query, batch_size=batch_size, timeout=timeout)
|
||||
)
|
||||
|
||||
def iter_sync():
|
||||
try:
|
||||
@@ -2292,8 +2335,11 @@ class LanceTable(Table):
|
||||
|
||||
return pa.RecordBatchReader.from_batches(async_iter.schema, iter_sync())
|
||||
|
||||
def _explain_plan(self, query: Query) -> str:
|
||||
return LOOP.run(self._table._explain_plan(query))
|
||||
def _explain_plan(self, query: Query, verbose: Optional[bool] = False) -> str:
|
||||
return LOOP.run(self._table._explain_plan(query, verbose))
|
||||
|
||||
def _analyze_plan(self, query: Query) -> str:
|
||||
return LOOP.run(self._table._analyze_plan(query))
|
||||
|
||||
def _do_merge(
|
||||
self,
|
||||
@@ -2442,7 +2488,9 @@ class LanceTable(Table):
|
||||
"""
|
||||
return LOOP.run(self._table.index_stats(index_name))
|
||||
|
||||
def add_columns(self, transforms: Dict[str, str]):
|
||||
def add_columns(
|
||||
self, transforms: Dict[str, str] | pa.field | List[pa.field] | pa.Schema
|
||||
):
|
||||
LOOP.run(self._table.add_columns(transforms))
|
||||
|
||||
def alter_columns(self, *alterations: Iterable[Dict[str, str]]):
|
||||
@@ -3103,7 +3151,9 @@ class AsyncTable:
|
||||
@overload
|
||||
async def search(
|
||||
self,
|
||||
query: Optional[Union[VEC, str, "PIL.Image.Image", Tuple]] = None,
|
||||
query: Optional[
|
||||
Union[VEC, str, "PIL.Image.Image", Tuple, FullTextQuery]
|
||||
] = None,
|
||||
vector_column_name: Optional[str] = None,
|
||||
query_type: Literal["vector"] = ...,
|
||||
ordering_field_name: Optional[str] = None,
|
||||
@@ -3112,7 +3162,9 @@ class AsyncTable:
|
||||
|
||||
async def search(
|
||||
self,
|
||||
query: Optional[Union[VEC, str, "PIL.Image.Image", Tuple]] = None,
|
||||
query: Optional[
|
||||
Union[VEC, str, "PIL.Image.Image", Tuple, FullTextQuery]
|
||||
] = None,
|
||||
vector_column_name: Optional[str] = None,
|
||||
query_type: QueryType = "auto",
|
||||
ordering_field_name: Optional[str] = None,
|
||||
@@ -3222,6 +3274,8 @@ class AsyncTable:
|
||||
if is_embedding(query):
|
||||
vector_query = query
|
||||
query_type = "vector"
|
||||
elif isinstance(query, FullTextQuery):
|
||||
query_type = "fts"
|
||||
elif isinstance(query, str):
|
||||
try:
|
||||
(
|
||||
@@ -3342,13 +3396,15 @@ class AsyncTable:
|
||||
async_query = async_query.nearest_to_text(
|
||||
query.full_text_query.query, query.full_text_query.columns
|
||||
)
|
||||
if query.full_text_query.limit is not None:
|
||||
async_query = async_query.limit(query.full_text_query.limit)
|
||||
|
||||
return async_query
|
||||
|
||||
async def _execute_query(
|
||||
self, query: Query, batch_size: Optional[int] = None
|
||||
self,
|
||||
query: Query,
|
||||
*,
|
||||
batch_size: Optional[int] = None,
|
||||
timeout: Optional[timedelta] = None,
|
||||
) -> pa.RecordBatchReader:
|
||||
# The sync table calls into this method, so we need to map the
|
||||
# query to the async version of the query and run that here. This is only
|
||||
@@ -3356,12 +3412,19 @@ class AsyncTable:
|
||||
|
||||
async_query = self._sync_query_to_async(query)
|
||||
|
||||
return await async_query.to_batches(max_batch_length=batch_size)
|
||||
return await async_query.to_batches(
|
||||
max_batch_length=batch_size, timeout=timeout
|
||||
)
|
||||
|
||||
async def _explain_plan(self, query: Query) -> str:
|
||||
async def _explain_plan(self, query: Query, verbose: Optional[bool]) -> str:
|
||||
# This method is used by the sync table
|
||||
async_query = self._sync_query_to_async(query)
|
||||
return await async_query.explain_plan()
|
||||
return await async_query.explain_plan(verbose)
|
||||
|
||||
async def _analyze_plan(self, query: Query) -> str:
|
||||
# This method is used by the sync table
|
||||
async_query = self._sync_query_to_async(query)
|
||||
return await async_query.analyze_plan()
|
||||
|
||||
async def _do_merge(
|
||||
self,
|
||||
@@ -3501,7 +3564,9 @@ class AsyncTable:
|
||||
|
||||
return await self._inner.update(updates_sql, where)
|
||||
|
||||
async def add_columns(self, transforms: dict[str, str]):
|
||||
async def add_columns(
|
||||
self, transforms: dict[str, str] | pa.field | List[pa.field] | pa.Schema
|
||||
):
|
||||
"""
|
||||
Add new columns with defined values.
|
||||
|
||||
@@ -3511,8 +3576,19 @@ class AsyncTable:
|
||||
A map of column name to a SQL expression to use to calculate the
|
||||
value of the new column. These expressions will be evaluated for
|
||||
each row in the table, and can reference existing columns.
|
||||
Alternatively, you can pass a pyarrow field or schema to add
|
||||
new columns with NULLs.
|
||||
"""
|
||||
await self._inner.add_columns(list(transforms.items()))
|
||||
if isinstance(transforms, pa.Field):
|
||||
transforms = [transforms]
|
||||
if isinstance(transforms, list) and all(
|
||||
{isinstance(f, pa.Field) for f in transforms}
|
||||
):
|
||||
transforms = pa.schema(transforms)
|
||||
if isinstance(transforms, pa.Schema):
|
||||
await self._inner.add_columns_with_schema(transforms)
|
||||
else:
|
||||
await self._inner.add_columns(list(transforms.items()))
|
||||
|
||||
async def alter_columns(self, *alterations: Iterable[dict[str, Any]]):
|
||||
"""
|
||||
@@ -3610,7 +3686,7 @@ class AsyncTable:
|
||||
"""
|
||||
await self._inner.checkout_latest()
|
||||
|
||||
async def restore(self):
|
||||
async def restore(self, version: Optional[int] = None):
|
||||
"""
|
||||
Restore the table to the currently checked out version
|
||||
|
||||
@@ -3623,7 +3699,7 @@ class AsyncTable:
|
||||
Once the operation concludes the table will no longer be in a checked
|
||||
out state and the read_consistency_interval, if any, will apply.
|
||||
"""
|
||||
await self._inner.restore()
|
||||
await self._inner.restore(version)
|
||||
|
||||
async def optimize(
|
||||
self,
|
||||
|
||||
@@ -315,6 +315,11 @@ def test_table():
|
||||
db = lancedb.connect(uri, read_consistency_interval=timedelta(seconds=5))
|
||||
tbl = db.open_table("test_table")
|
||||
# --8<-- [end:table_eventual_consistency]
|
||||
# --8<-- [start:table_no_consistency]
|
||||
uri = "data/sample-lancedb"
|
||||
db = lancedb.connect(uri, read_consistency_interval=None)
|
||||
tbl = db.open_table("test_table")
|
||||
# --8<-- [end:table_no_consistency]
|
||||
# --8<-- [start:table_checkout_latest]
|
||||
tbl = db.open_table("test_table")
|
||||
|
||||
@@ -562,13 +567,19 @@ async def test_table_async():
|
||||
async_db = await lancedb.connect_async(uri, read_consistency_interval=timedelta(0))
|
||||
async_tbl = await async_db.open_table("test_table_async")
|
||||
# --8<-- [end:table_async_strong_consistency]
|
||||
# --8<-- [start:table_async_ventual_consistency]
|
||||
# --8<-- [start:table_async_eventual_consistency]
|
||||
uri = "data/sample-lancedb"
|
||||
async_db = await lancedb.connect_async(
|
||||
uri, read_consistency_interval=timedelta(seconds=5)
|
||||
)
|
||||
async_tbl = await async_db.open_table("test_table_async")
|
||||
# --8<-- [end:table_async_eventual_consistency]
|
||||
# --8<-- [start:table_async_no_consistency]
|
||||
uri = "data/sample-lancedb"
|
||||
async_db = await lancedb.connect_async(uri, read_consistency_interval=None)
|
||||
async_tbl = await async_db.open_table("test_table_async")
|
||||
# --8<-- [end:table_async_no_consistency]
|
||||
|
||||
# --8<-- [start:table_async_checkout_latest]
|
||||
async_tbl = await async_db.open_table("test_table_async")
|
||||
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
|
||||
|
||||
import re
|
||||
from datetime import timedelta
|
||||
import os
|
||||
|
||||
import lancedb
|
||||
@@ -299,13 +298,11 @@ def test_create_exist_ok(tmp_db: lancedb.DBConnection):
|
||||
@pytest.mark.asyncio
|
||||
async def test_connect(tmp_path):
|
||||
db = await lancedb.connect_async(tmp_path)
|
||||
assert str(db) == f"ListingDatabase(uri={tmp_path}, read_consistency_interval=None)"
|
||||
|
||||
db = await lancedb.connect_async(
|
||||
tmp_path, read_consistency_interval=timedelta(seconds=5)
|
||||
)
|
||||
assert str(db) == f"ListingDatabase(uri={tmp_path}, read_consistency_interval=5s)"
|
||||
|
||||
db = await lancedb.connect_async(tmp_path, read_consistency_interval=None)
|
||||
assert str(db) == f"ListingDatabase(uri={tmp_path}, read_consistency_interval=None)"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_close(mem_db_async: lancedb.AsyncConnection):
|
||||
@@ -453,7 +450,7 @@ async def test_open_table(tmp_path):
|
||||
assert tbl.name == "test"
|
||||
assert (
|
||||
re.search(
|
||||
r"NativeTable\(test, uri=.*test\.lance, read_consistency_interval=None\)",
|
||||
r"NativeTable\(test, uri=.*test\.lance, read_consistency_interval=5s\)",
|
||||
str(tbl),
|
||||
)
|
||||
is not None
|
||||
|
||||
@@ -12,6 +12,7 @@ import pyarrow as pa
|
||||
import pytest
|
||||
from lancedb.embeddings import get_registry
|
||||
from lancedb.pydantic import LanceModel, Vector
|
||||
import requests
|
||||
|
||||
# These are integration tests for embedding functions.
|
||||
# They are slow because they require downloading models
|
||||
@@ -516,3 +517,61 @@ def test_voyageai_embedding_function():
|
||||
|
||||
tbl.add(df)
|
||||
assert len(tbl.to_pandas()["vector"][0]) == voyageai.ndims()
|
||||
|
||||
|
||||
@pytest.mark.slow
|
||||
@pytest.mark.skipif(
|
||||
os.environ.get("VOYAGE_API_KEY") is None, reason="VOYAGE_API_KEY not set"
|
||||
)
|
||||
def test_voyageai_multimodal_embedding_function():
|
||||
voyageai = (
|
||||
get_registry().get("voyageai").create(name="voyage-multimodal-3", max_retries=0)
|
||||
)
|
||||
|
||||
class Images(LanceModel):
|
||||
label: str
|
||||
image_uri: str = voyageai.SourceField() # image uri as the source
|
||||
image_bytes: bytes = voyageai.SourceField() # image bytes as the source
|
||||
vector: Vector(voyageai.ndims()) = voyageai.VectorField() # vector column
|
||||
vec_from_bytes: Vector(voyageai.ndims()) = (
|
||||
voyageai.VectorField()
|
||||
) # Another vector column
|
||||
|
||||
db = lancedb.connect("~/lancedb")
|
||||
table = db.create_table("test", schema=Images, mode="overwrite")
|
||||
labels = ["cat", "cat", "dog", "dog", "horse", "horse"]
|
||||
uris = [
|
||||
"http://farm1.staticflickr.com/53/167798175_7c7845bbbd_z.jpg",
|
||||
"http://farm1.staticflickr.com/134/332220238_da527d8140_z.jpg",
|
||||
"http://farm9.staticflickr.com/8387/8602747737_2e5c2a45d4_z.jpg",
|
||||
"http://farm5.staticflickr.com/4092/5017326486_1f46057f5f_z.jpg",
|
||||
"http://farm9.staticflickr.com/8216/8434969557_d37882c42d_z.jpg",
|
||||
"http://farm6.staticflickr.com/5142/5835678453_4f3a4edb45_z.jpg",
|
||||
]
|
||||
# get each uri as bytes
|
||||
image_bytes = [requests.get(uri).content for uri in uris]
|
||||
table.add(
|
||||
pd.DataFrame({"label": labels, "image_uri": uris, "image_bytes": image_bytes})
|
||||
)
|
||||
assert len(table.to_pandas()["vector"][0]) == voyageai.ndims()
|
||||
|
||||
|
||||
@pytest.mark.slow
|
||||
@pytest.mark.skipif(
|
||||
os.environ.get("VOYAGE_API_KEY") is None, reason="VOYAGE_API_KEY not set"
|
||||
)
|
||||
def test_voyageai_multimodal_embedding_text_function():
|
||||
voyageai = (
|
||||
get_registry().get("voyageai").create(name="voyage-multimodal-3", max_retries=0)
|
||||
)
|
||||
|
||||
class TextModel(LanceModel):
|
||||
text: str = voyageai.SourceField()
|
||||
vector: Vector(voyageai.ndims()) = voyageai.VectorField()
|
||||
|
||||
df = pd.DataFrame({"text": ["hello world", "goodbye world"]})
|
||||
db = lancedb.connect("~/lancedb")
|
||||
tbl = db.create_table("test", schema=TextModel, mode="overwrite")
|
||||
|
||||
tbl.add(df)
|
||||
assert len(tbl.to_pandas()["vector"][0]) == voyageai.ndims()
|
||||
|
||||
@@ -20,7 +20,9 @@ from unittest import mock
|
||||
import lancedb as ldb
|
||||
from lancedb.db import DBConnection
|
||||
from lancedb.index import FTS
|
||||
from lancedb.query import BoostQuery, MatchQuery, MultiMatchQuery, PhraseQuery
|
||||
import numpy as np
|
||||
import pyarrow as pa
|
||||
import pandas as pd
|
||||
import pytest
|
||||
from utils import exception_output
|
||||
@@ -178,11 +180,47 @@ def test_search_fts(table, use_tantivy):
|
||||
results = table.search("puppy").select(["id", "text"]).to_list()
|
||||
assert len(results) == 10
|
||||
|
||||
if not use_tantivy:
|
||||
# Test with a query
|
||||
results = (
|
||||
table.search(MatchQuery("puppy", "text"))
|
||||
.select(["id", "text"])
|
||||
.limit(5)
|
||||
.to_list()
|
||||
)
|
||||
assert len(results) == 5
|
||||
|
||||
# Test boost query
|
||||
results = (
|
||||
table.search(
|
||||
BoostQuery(
|
||||
MatchQuery("puppy", "text"),
|
||||
MatchQuery("runs", "text"),
|
||||
)
|
||||
)
|
||||
.select(["id", "text"])
|
||||
.limit(5)
|
||||
.to_list()
|
||||
)
|
||||
assert len(results) == 5
|
||||
|
||||
# Test multi match query
|
||||
table.create_fts_index("text2", use_tantivy=use_tantivy)
|
||||
results = (
|
||||
table.search(MultiMatchQuery("puppy", ["text", "text2"]))
|
||||
.select(["id", "text"])
|
||||
.limit(5)
|
||||
.to_list()
|
||||
)
|
||||
assert len(results) == 5
|
||||
assert len(results[0]) == 3 # id, text, _score
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_fts_select_async(async_table):
|
||||
tbl = await async_table
|
||||
await tbl.create_index("text", config=FTS())
|
||||
await tbl.create_index("text2", config=FTS())
|
||||
results = (
|
||||
await tbl.query()
|
||||
.nearest_to_text("puppy")
|
||||
@@ -193,6 +231,54 @@ async def test_fts_select_async(async_table):
|
||||
assert len(results) == 5
|
||||
assert len(results[0]) == 3 # id, text, _score
|
||||
|
||||
# Test with FullTextQuery
|
||||
results = (
|
||||
await tbl.query()
|
||||
.nearest_to_text(MatchQuery("puppy", "text"))
|
||||
.select(["id", "text"])
|
||||
.limit(5)
|
||||
.to_list()
|
||||
)
|
||||
assert len(results) == 5
|
||||
assert len(results[0]) == 3 # id, text, _score
|
||||
|
||||
# Test with BoostQuery
|
||||
results = (
|
||||
await tbl.query()
|
||||
.nearest_to_text(
|
||||
BoostQuery(
|
||||
MatchQuery("puppy", "text"),
|
||||
MatchQuery("runs", "text"),
|
||||
)
|
||||
)
|
||||
.select(["id", "text"])
|
||||
.limit(5)
|
||||
.to_list()
|
||||
)
|
||||
assert len(results) == 5
|
||||
assert len(results[0]) == 3 # id, text, _score
|
||||
|
||||
# Test with MultiMatchQuery
|
||||
results = (
|
||||
await tbl.query()
|
||||
.nearest_to_text(MultiMatchQuery("puppy", ["text", "text2"]))
|
||||
.select(["id", "text"])
|
||||
.limit(5)
|
||||
.to_list()
|
||||
)
|
||||
assert len(results) == 5
|
||||
assert len(results[0]) == 3 # id, text, _score
|
||||
|
||||
# Test with search() API
|
||||
results = (
|
||||
await (await tbl.search(MatchQuery("puppy", "text")))
|
||||
.select(["id", "text"])
|
||||
.limit(5)
|
||||
.to_list()
|
||||
)
|
||||
assert len(results) == 5
|
||||
assert len(results[0]) == 3 # id, text, _score
|
||||
|
||||
|
||||
def test_search_fts_phrase_query(table):
|
||||
table.create_fts_index("text", use_tantivy=False, with_position=False)
|
||||
@@ -207,6 +293,13 @@ def test_search_fts_phrase_query(table):
|
||||
assert len(results) > len(phrase_results)
|
||||
assert len(phrase_results) > 0
|
||||
|
||||
# Test with a query
|
||||
phrase_results = (
|
||||
table.search(PhraseQuery("puppy runs", "text")).limit(100).to_list()
|
||||
)
|
||||
assert len(results) > len(phrase_results)
|
||||
assert len(phrase_results) > 0
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_search_fts_phrase_query_async(async_table):
|
||||
@@ -227,6 +320,16 @@ async def test_search_fts_phrase_query_async(async_table):
|
||||
assert len(results) > len(phrase_results)
|
||||
assert len(phrase_results) > 0
|
||||
|
||||
# Test with a query
|
||||
phrase_results = (
|
||||
await async_table.query()
|
||||
.nearest_to_text(PhraseQuery("puppy runs", "text"))
|
||||
.limit(100)
|
||||
.to_list()
|
||||
)
|
||||
assert len(results) > len(phrase_results)
|
||||
assert len(phrase_results) > 0
|
||||
|
||||
|
||||
def test_search_fts_specify_column(table):
|
||||
table.create_fts_index("text", use_tantivy=False)
|
||||
@@ -524,3 +627,32 @@ def test_language(mem_db: DBConnection):
|
||||
# Stop words -> no results
|
||||
results = table.search("la", query_type="fts").limit(5).to_list()
|
||||
assert len(results) == 0
|
||||
|
||||
|
||||
def test_fts_on_list(mem_db: DBConnection):
|
||||
data = pa.table(
|
||||
{
|
||||
"text": [
|
||||
["lance database", "the", "search"],
|
||||
["lance database"],
|
||||
["lance", "search"],
|
||||
["database", "search"],
|
||||
["unrelated", "doc"],
|
||||
],
|
||||
"vector": [
|
||||
[1.0, 2.0, 3.0],
|
||||
[4.0, 5.0, 6.0],
|
||||
[7.0, 8.0, 9.0],
|
||||
[10.0, 11.0, 12.0],
|
||||
[13.0, 14.0, 15.0],
|
||||
],
|
||||
}
|
||||
)
|
||||
table = mem_db.create_table("test", data=data)
|
||||
table.create_fts_index("text", use_tantivy=False)
|
||||
|
||||
res = table.search("lance").limit(5).to_list()
|
||||
assert len(res) == 3
|
||||
|
||||
res = table.search(PhraseQuery("lance database", "text")).limit(5).to_list()
|
||||
assert len(res) == 2
|
||||
|
||||
@@ -114,6 +114,16 @@ async def test_explain_plan(table: AsyncTable):
|
||||
assert "LanceScan" in plan
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_analyze_plan(table: AsyncTable):
|
||||
res = await (
|
||||
table.query().nearest_to_text("dog").nearest_to([0.1, 0.1]).analyze_plan()
|
||||
)
|
||||
|
||||
assert "AnalyzeExec" in res
|
||||
assert "metrics=" in res
|
||||
|
||||
|
||||
def test_normalize_scores():
|
||||
cases = [
|
||||
(pa.array([0.1, 0.4]), pa.array([0.0, 1.0])),
|
||||
|
||||
@@ -31,6 +31,7 @@ async def some_table(db_async):
|
||||
{
|
||||
"id": list(range(NROWS)),
|
||||
"vector": sample_fixed_size_list_array(NROWS, DIM),
|
||||
"fsb": pa.array([bytes([i]) for i in range(NROWS)], pa.binary(1)),
|
||||
"tags": [
|
||||
[f"tag{random.randint(0, 8)}" for _ in range(2)] for _ in range(NROWS)
|
||||
],
|
||||
@@ -85,6 +86,16 @@ async def test_create_scalar_index(some_table: AsyncTable):
|
||||
assert len(indices) == 0
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_create_fixed_size_binary_index(some_table: AsyncTable):
|
||||
await some_table.create_index("fsb", config=BTree())
|
||||
indices = await some_table.list_indices()
|
||||
assert str(indices) == '[Index(BTree, columns=["fsb"], name="fsb_idx")]'
|
||||
assert len(indices) == 1
|
||||
assert indices[0].index_type == "BTree"
|
||||
assert indices[0].columns == ["fsb"]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_create_bitmap_index(some_table: AsyncTable):
|
||||
await some_table.create_index("id", config=Bitmap())
|
||||
|
||||
@@ -511,7 +511,8 @@ def test_query_builder_with_different_vector_column():
|
||||
columns=["b"],
|
||||
vector_column="foo_vector",
|
||||
),
|
||||
None,
|
||||
batch_size=None,
|
||||
timeout=None,
|
||||
)
|
||||
|
||||
|
||||
@@ -702,6 +703,20 @@ async def test_fast_search_async(tmp_path):
|
||||
assert "LanceScan" not in plan
|
||||
|
||||
|
||||
def test_analyze_plan(table):
|
||||
q = LanceVectorQueryBuilder(table, [0, 0], "vector")
|
||||
res = q.analyze_plan()
|
||||
assert "AnalyzeExec" in res
|
||||
assert "metrics=" in res
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_analyze_plan_async(table_async: AsyncTable):
|
||||
res = await table_async.query().nearest_to(pa.array([1, 2])).analyze_plan()
|
||||
assert "AnalyzeExec" in res
|
||||
assert "metrics=" in res
|
||||
|
||||
|
||||
def test_explain_plan(table):
|
||||
q = LanceVectorQueryBuilder(table, [0, 0], "vector")
|
||||
plan = q.explain_plan(verbose=True)
|
||||
@@ -1062,3 +1077,67 @@ async def test_query_serialization_async(table_async: AsyncTable):
|
||||
full_text_query=FullTextSearchQuery(columns=[], query="foo"),
|
||||
with_row_id=False,
|
||||
)
|
||||
|
||||
|
||||
def test_query_timeout(tmp_path):
|
||||
# Use local directory instead of memory:// to add a bit of latency to
|
||||
# operations so a timeout of zero will trigger exceptions.
|
||||
db = lancedb.connect(tmp_path)
|
||||
data = pa.table(
|
||||
{
|
||||
"text": ["a", "b"],
|
||||
"vector": pa.FixedSizeListArray.from_arrays(
|
||||
pc.random(4).cast(pa.float32()), 2
|
||||
),
|
||||
}
|
||||
)
|
||||
table = db.create_table("test", data)
|
||||
table.create_fts_index("text", use_tantivy=False)
|
||||
|
||||
with pytest.raises(Exception, match="Query timeout"):
|
||||
table.search().where("text = 'a'").to_list(timeout=timedelta(0))
|
||||
|
||||
with pytest.raises(Exception, match="Query timeout"):
|
||||
table.search([0.0, 0.0]).to_arrow(timeout=timedelta(0))
|
||||
|
||||
with pytest.raises(Exception, match="Query timeout"):
|
||||
table.search("a", query_type="fts").to_pandas(timeout=timedelta(0))
|
||||
|
||||
with pytest.raises(Exception, match="Query timeout"):
|
||||
table.search(query_type="hybrid").vector([0.0, 0.0]).text("a").to_arrow(
|
||||
timeout=timedelta(0)
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_query_timeout_async(tmp_path):
|
||||
db = await lancedb.connect_async(tmp_path)
|
||||
data = pa.table(
|
||||
{
|
||||
"text": ["a", "b"],
|
||||
"vector": pa.FixedSizeListArray.from_arrays(
|
||||
pc.random(4).cast(pa.float32()), 2
|
||||
),
|
||||
}
|
||||
)
|
||||
table = await db.create_table("test", data)
|
||||
await table.create_index("text", config=FTS())
|
||||
|
||||
with pytest.raises(Exception, match="Query timeout"):
|
||||
await table.query().where("text != 'a'").to_list(timeout=timedelta(0))
|
||||
|
||||
with pytest.raises(Exception, match="Query timeout"):
|
||||
await table.vector_search([0.0, 0.0]).to_arrow(timeout=timedelta(0))
|
||||
|
||||
with pytest.raises(Exception, match="Query timeout"):
|
||||
await (await table.search("a", query_type="fts")).to_pandas(
|
||||
timeout=timedelta(0)
|
||||
)
|
||||
|
||||
with pytest.raises(Exception, match="Query timeout"):
|
||||
await (
|
||||
table.query()
|
||||
.nearest_to_text("a")
|
||||
.nearest_to([0.0, 0.0])
|
||||
.to_list(timeout=timedelta(0))
|
||||
)
|
||||
|
||||
@@ -444,6 +444,16 @@ def test_query_sync_fts():
|
||||
"prefilter": True,
|
||||
"with_row_id": True,
|
||||
"version": None,
|
||||
} or body == {
|
||||
"full_text_query": {
|
||||
"query": "puppy",
|
||||
"columns": ["description", "name"],
|
||||
},
|
||||
"k": 42,
|
||||
"vector": [],
|
||||
"prefilter": True,
|
||||
"with_row_id": True,
|
||||
"version": None,
|
||||
}
|
||||
|
||||
return pa.table({"id": [1, 2, 3]})
|
||||
|
||||
@@ -32,7 +32,11 @@ def test_basic(mem_db: DBConnection):
|
||||
table = mem_db.create_table("test", data=data)
|
||||
|
||||
assert table.name == "test"
|
||||
assert "LanceTable(name='test', version=1, _conn=LanceDBConnection(" in repr(table)
|
||||
assert (
|
||||
"LanceTable(name='test', version=1, "
|
||||
"read_consistency_interval=datetime.timedelta(seconds=5), "
|
||||
"_conn=LanceDBConnection("
|
||||
) in repr(table)
|
||||
expected_schema = pa.schema(
|
||||
{
|
||||
"vector": pa.list_(pa.float32(), 2),
|
||||
@@ -1384,6 +1388,37 @@ async def test_add_columns_async(mem_db_async: AsyncConnection):
|
||||
assert data["new_col"].to_pylist() == [2, 3]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_add_columns_with_schema(mem_db_async: AsyncConnection):
|
||||
data = pa.table({"id": [0, 1]})
|
||||
table = await mem_db_async.create_table("my_table", data=data)
|
||||
await table.add_columns(
|
||||
[pa.field("x", pa.int64()), pa.field("vector", pa.list_(pa.float32(), 8))]
|
||||
)
|
||||
|
||||
assert await table.schema() == pa.schema(
|
||||
[
|
||||
pa.field("id", pa.int64()),
|
||||
pa.field("x", pa.int64()),
|
||||
pa.field("vector", pa.list_(pa.float32(), 8)),
|
||||
]
|
||||
)
|
||||
|
||||
table = await mem_db_async.create_table("table2", data=data)
|
||||
await table.add_columns(
|
||||
pa.schema(
|
||||
[pa.field("y", pa.int64()), pa.field("emb", pa.list_(pa.float32(), 8))]
|
||||
)
|
||||
)
|
||||
assert await table.schema() == pa.schema(
|
||||
[
|
||||
pa.field("id", pa.int64()),
|
||||
pa.field("y", pa.int64()),
|
||||
pa.field("emb", pa.list_(pa.float32(), 8)),
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
def test_alter_columns(mem_db: DBConnection):
|
||||
data = pa.table({"id": [0, 1]})
|
||||
table = mem_db.create_table("my_table", data=data)
|
||||
|
||||
@@ -204,7 +204,9 @@ pub fn connect(
|
||||
}
|
||||
if let Some(read_consistency_interval) = read_consistency_interval {
|
||||
let read_consistency_interval = Duration::from_secs_f64(read_consistency_interval);
|
||||
builder = builder.read_consistency_interval(read_consistency_interval);
|
||||
builder = builder.read_consistency_interval(Some(read_consistency_interval));
|
||||
} else {
|
||||
builder = builder.read_consistency_interval(None);
|
||||
}
|
||||
if let Some(storage_options) = storage_options {
|
||||
builder = builder.storage_options(storage_options);
|
||||
|
||||
@@ -2,25 +2,26 @@
|
||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use arrow::array::make_array;
|
||||
use arrow::array::Array;
|
||||
use arrow::array::ArrayData;
|
||||
use arrow::pyarrow::FromPyArrow;
|
||||
use arrow::pyarrow::IntoPyArrow;
|
||||
use lancedb::index::scalar::FullTextSearchQuery;
|
||||
use lancedb::index::scalar::{FtsQuery, FullTextSearchQuery, MatchQuery, PhraseQuery};
|
||||
use lancedb::query::QueryExecutionOptions;
|
||||
use lancedb::query::QueryFilter;
|
||||
use lancedb::query::{
|
||||
ExecutableQuery, Query as LanceDbQuery, QueryBase, Select, VectorQuery as LanceDbVectorQuery,
|
||||
};
|
||||
use lancedb::table::AnyQuery;
|
||||
use pyo3::exceptions::PyNotImplementedError;
|
||||
use pyo3::exceptions::PyRuntimeError;
|
||||
use pyo3::exceptions::{PyNotImplementedError, PyValueError};
|
||||
use pyo3::prelude::{PyAnyMethods, PyDictMethods};
|
||||
use pyo3::pymethods;
|
||||
use pyo3::types::PyDict;
|
||||
use pyo3::types::PyList;
|
||||
use pyo3::types::{PyDict, PyString};
|
||||
use pyo3::Bound;
|
||||
use pyo3::IntoPyObject;
|
||||
use pyo3::PyAny;
|
||||
@@ -31,7 +32,7 @@ use pyo3_async_runtimes::tokio::future_into_py;
|
||||
|
||||
use crate::arrow::RecordBatchStream;
|
||||
use crate::error::PythonErrorExt;
|
||||
use crate::util::parse_distance_type;
|
||||
use crate::util::{parse_distance_type, parse_fts_query};
|
||||
|
||||
// Python representation of full text search parameters
|
||||
#[derive(Clone)]
|
||||
@@ -46,8 +47,8 @@ pub struct PyFullTextSearchQuery {
|
||||
impl From<FullTextSearchQuery> for PyFullTextSearchQuery {
|
||||
fn from(query: FullTextSearchQuery) -> Self {
|
||||
PyFullTextSearchQuery {
|
||||
columns: query.columns,
|
||||
query: query.query,
|
||||
columns: query.columns().into_iter().collect(),
|
||||
query: query.query.query().to_owned(),
|
||||
limit: query.limit,
|
||||
wand_factor: query.wand_factor,
|
||||
}
|
||||
@@ -236,29 +237,69 @@ impl Query {
|
||||
}
|
||||
|
||||
pub fn nearest_to_text(&mut self, query: Bound<'_, PyDict>) -> PyResult<FTSQuery> {
|
||||
let query_text = query
|
||||
let fts_query = query
|
||||
.get_item("query")?
|
||||
.ok_or(PyErr::new::<PyRuntimeError, _>(
|
||||
"Query text is required for nearest_to_text",
|
||||
))?
|
||||
.extract::<String>()?;
|
||||
let columns = query
|
||||
.get_item("columns")?
|
||||
.map(|columns| columns.extract::<Vec<String>>())
|
||||
.transpose()?;
|
||||
))?;
|
||||
|
||||
let fts_query = FullTextSearchQuery::new(query_text).columns(columns);
|
||||
let query = if let Ok(query_text) = fts_query.downcast::<PyString>() {
|
||||
let mut query_text = query_text.to_string();
|
||||
let columns = query
|
||||
.get_item("columns")?
|
||||
.map(|columns| columns.extract::<Vec<String>>())
|
||||
.transpose()?;
|
||||
|
||||
let is_phrase =
|
||||
query_text.len() >= 2 && query_text.starts_with('"') && query_text.ends_with('"');
|
||||
let is_multi_match = columns.as_ref().map(|cols| cols.len() > 1).unwrap_or(false);
|
||||
|
||||
if is_phrase {
|
||||
// Remove the surrounding quotes for phrase queries
|
||||
query_text = query_text[1..query_text.len() - 1].to_string();
|
||||
}
|
||||
|
||||
let query: FtsQuery = match (is_phrase, is_multi_match) {
|
||||
(false, _) => MatchQuery::new(query_text).into(),
|
||||
(true, false) => PhraseQuery::new(query_text).into(),
|
||||
(true, true) => {
|
||||
return Err(PyValueError::new_err(
|
||||
"Phrase queries cannot be used with multiple columns.",
|
||||
));
|
||||
}
|
||||
};
|
||||
let mut query = FullTextSearchQuery::new_query(query);
|
||||
if let Some(cols) = columns {
|
||||
if !cols.is_empty() {
|
||||
query = query.with_columns(&cols).map_err(|e| {
|
||||
PyValueError::new_err(format!(
|
||||
"Failed to set full text search columns: {}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
}
|
||||
}
|
||||
query
|
||||
} else if let Ok(query) = fts_query.downcast::<PyDict>() {
|
||||
let query = parse_fts_query(query)?;
|
||||
FullTextSearchQuery::new_query(query)
|
||||
} else {
|
||||
return Err(PyValueError::new_err(
|
||||
"query must be a string or a Query object",
|
||||
));
|
||||
};
|
||||
|
||||
Ok(FTSQuery {
|
||||
fts_query,
|
||||
inner: self.inner.clone(),
|
||||
fts_query: query,
|
||||
})
|
||||
}
|
||||
|
||||
#[pyo3(signature = (max_batch_length=None))]
|
||||
#[pyo3(signature = (max_batch_length=None, timeout=None))]
|
||||
pub fn execute(
|
||||
self_: PyRef<'_, Self>,
|
||||
max_batch_length: Option<u32>,
|
||||
timeout: Option<Duration>,
|
||||
) -> PyResult<Bound<'_, PyAny>> {
|
||||
let inner = self_.inner.clone();
|
||||
future_into_py(self_.py(), async move {
|
||||
@@ -266,12 +307,15 @@ impl Query {
|
||||
if let Some(max_batch_length) = max_batch_length {
|
||||
opts.max_batch_length = max_batch_length;
|
||||
}
|
||||
if let Some(timeout) = timeout {
|
||||
opts.timeout = Some(timeout);
|
||||
}
|
||||
let inner_stream = inner.execute_with_options(opts).await.infer_error()?;
|
||||
Ok(RecordBatchStream::new(inner_stream))
|
||||
})
|
||||
}
|
||||
|
||||
fn explain_plan(self_: PyRef<'_, Self>, verbose: bool) -> PyResult<Bound<'_, PyAny>> {
|
||||
pub fn explain_plan(self_: PyRef<'_, Self>, verbose: bool) -> PyResult<Bound<'_, PyAny>> {
|
||||
let inner = self_.inner.clone();
|
||||
future_into_py(self_.py(), async move {
|
||||
inner
|
||||
@@ -281,6 +325,16 @@ impl Query {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn analyze_plan(self_: PyRef<'_, Self>) -> PyResult<Bound<'_, PyAny>> {
|
||||
let inner = self_.inner.clone();
|
||||
future_into_py(self_.py(), async move {
|
||||
inner
|
||||
.analyze_plan()
|
||||
.await
|
||||
.map_err(|e| PyRuntimeError::new_err(e.to_string()))
|
||||
})
|
||||
}
|
||||
|
||||
pub fn to_query_request(&self) -> PyQueryRequest {
|
||||
PyQueryRequest::from(AnyQuery::Query(self.inner.clone().into_request()))
|
||||
}
|
||||
@@ -327,10 +381,11 @@ impl FTSQuery {
|
||||
self.inner = self.inner.clone().postfilter();
|
||||
}
|
||||
|
||||
#[pyo3(signature = (max_batch_length=None))]
|
||||
#[pyo3(signature = (max_batch_length=None, timeout=None))]
|
||||
pub fn execute(
|
||||
self_: PyRef<'_, Self>,
|
||||
max_batch_length: Option<u32>,
|
||||
timeout: Option<Duration>,
|
||||
) -> PyResult<Bound<'_, PyAny>> {
|
||||
let inner = self_
|
||||
.inner
|
||||
@@ -342,6 +397,9 @@ impl FTSQuery {
|
||||
if let Some(max_batch_length) = max_batch_length {
|
||||
opts.max_batch_length = max_batch_length;
|
||||
}
|
||||
if let Some(timeout) = timeout {
|
||||
opts.timeout = Some(timeout);
|
||||
}
|
||||
let inner_stream = inner.execute_with_options(opts).await.infer_error()?;
|
||||
Ok(RecordBatchStream::new(inner_stream))
|
||||
})
|
||||
@@ -365,8 +423,18 @@ impl FTSQuery {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn analyze_plan(self_: PyRef<'_, Self>) -> PyResult<Bound<'_, PyAny>> {
|
||||
let inner = self_.inner.clone();
|
||||
future_into_py(self_.py(), async move {
|
||||
inner
|
||||
.analyze_plan()
|
||||
.await
|
||||
.map_err(|e| PyRuntimeError::new_err(e.to_string()))
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_query(&self) -> String {
|
||||
self.fts_query.query.clone()
|
||||
self.fts_query.query.query().to_owned()
|
||||
}
|
||||
|
||||
pub fn to_query_request(&self) -> PyQueryRequest {
|
||||
@@ -454,10 +522,11 @@ impl VectorQuery {
|
||||
self.inner = self.inner.clone().bypass_vector_index()
|
||||
}
|
||||
|
||||
#[pyo3(signature = (max_batch_length=None))]
|
||||
#[pyo3(signature = (max_batch_length=None, timeout=None))]
|
||||
pub fn execute(
|
||||
self_: PyRef<'_, Self>,
|
||||
max_batch_length: Option<u32>,
|
||||
timeout: Option<Duration>,
|
||||
) -> PyResult<Bound<'_, PyAny>> {
|
||||
let inner = self_.inner.clone();
|
||||
future_into_py(self_.py(), async move {
|
||||
@@ -465,12 +534,15 @@ impl VectorQuery {
|
||||
if let Some(max_batch_length) = max_batch_length {
|
||||
opts.max_batch_length = max_batch_length;
|
||||
}
|
||||
if let Some(timeout) = timeout {
|
||||
opts.timeout = Some(timeout);
|
||||
}
|
||||
let inner_stream = inner.execute_with_options(opts).await.infer_error()?;
|
||||
Ok(RecordBatchStream::new(inner_stream))
|
||||
})
|
||||
}
|
||||
|
||||
fn explain_plan(self_: PyRef<'_, Self>, verbose: bool) -> PyResult<Bound<'_, PyAny>> {
|
||||
pub fn explain_plan(self_: PyRef<'_, Self>, verbose: bool) -> PyResult<Bound<'_, PyAny>> {
|
||||
let inner = self_.inner.clone();
|
||||
future_into_py(self_.py(), async move {
|
||||
inner
|
||||
@@ -480,6 +552,16 @@ impl VectorQuery {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn analyze_plan(self_: PyRef<'_, Self>) -> PyResult<Bound<'_, PyAny>> {
|
||||
let inner = self_.inner.clone();
|
||||
future_into_py(self_.py(), async move {
|
||||
inner
|
||||
.analyze_plan()
|
||||
.await
|
||||
.map_err(|e| PyRuntimeError::new_err(e.to_string()))
|
||||
})
|
||||
}
|
||||
|
||||
pub fn nearest_to_text(&mut self, query: Bound<'_, PyDict>) -> PyResult<HybridQuery> {
|
||||
let base_query = self.inner.clone().into_plain();
|
||||
let fts_query = Query::new(base_query).nearest_to_text(query)?;
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
|
||||
use arrow::{
|
||||
datatypes::DataType,
|
||||
datatypes::{DataType, Schema},
|
||||
ffi_stream::ArrowArrayStreamReader,
|
||||
pyarrow::{FromPyArrow, ToPyArrow},
|
||||
pyarrow::{FromPyArrow, PyArrowType, ToPyArrow},
|
||||
};
|
||||
use lancedb::table::{
|
||||
AddDataMode, ColumnAlteration, Duration, NewColumnTransform, OptimizeAction, OptimizeOptions,
|
||||
@@ -16,7 +18,6 @@ use pyo3::{
|
||||
Bound, FromPyObject, PyAny, PyRef, PyResult, Python,
|
||||
};
|
||||
use pyo3_async_runtimes::tokio::future_into_py;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use crate::{
|
||||
error::PythonErrorExt,
|
||||
@@ -303,12 +304,16 @@ impl Table {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn restore(self_: PyRef<'_, Self>) -> PyResult<Bound<'_, PyAny>> {
|
||||
#[pyo3(signature = (version=None))]
|
||||
pub fn restore(self_: PyRef<'_, Self>, version: Option<u64>) -> PyResult<Bound<'_, PyAny>> {
|
||||
let inner = self_.inner_ref()?.clone();
|
||||
future_into_py(
|
||||
self_.py(),
|
||||
async move { inner.restore().await.infer_error() },
|
||||
)
|
||||
|
||||
future_into_py(self_.py(), async move {
|
||||
if let Some(version) = version {
|
||||
inner.checkout(version).await.infer_error()?;
|
||||
}
|
||||
inner.restore().await.infer_error()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn query(&self) -> Query {
|
||||
@@ -440,6 +445,20 @@ impl Table {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn add_columns_with_schema(
|
||||
self_: PyRef<'_, Self>,
|
||||
schema: PyArrowType<Schema>,
|
||||
) -> PyResult<Bound<'_, PyAny>> {
|
||||
let arrow_schema = &schema.0;
|
||||
let transform = NewColumnTransform::AllNulls(Arc::new(arrow_schema.clone()));
|
||||
|
||||
let inner = self_.inner_ref()?.clone();
|
||||
future_into_py(self_.py(), async move {
|
||||
inner.add_columns(transform, None).await.infer_error()?;
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
pub fn alter_columns<'a>(
|
||||
self_: PyRef<'a, Self>,
|
||||
alterations: Vec<Bound<PyDict>>,
|
||||
|
||||
@@ -3,11 +3,15 @@
|
||||
|
||||
use std::sync::Mutex;
|
||||
|
||||
use lancedb::index::scalar::{BoostQuery, FtsQuery, MatchQuery, MultiMatchQuery, PhraseQuery};
|
||||
use lancedb::DistanceType;
|
||||
use pyo3::prelude::{PyAnyMethods, PyDictMethods, PyListMethods};
|
||||
use pyo3::types::PyDict;
|
||||
use pyo3::{
|
||||
exceptions::{PyRuntimeError, PyValueError},
|
||||
pyfunction, PyResult,
|
||||
};
|
||||
use pyo3::{Bound, PyAny};
|
||||
|
||||
/// A wrapper around a rust builder
|
||||
///
|
||||
@@ -59,3 +63,116 @@ pub fn validate_table_name(table_name: &str) -> PyResult<()> {
|
||||
lancedb::utils::validate_table_name(table_name)
|
||||
.map_err(|e| PyValueError::new_err(e.to_string()))
|
||||
}
|
||||
|
||||
pub fn parse_fts_query(query: &Bound<'_, PyDict>) -> PyResult<FtsQuery> {
|
||||
let query_type = query.keys().get_item(0)?.extract::<String>()?;
|
||||
let query_value = query
|
||||
.get_item(&query_type)?
|
||||
.ok_or(PyValueError::new_err(format!(
|
||||
"Query type {} not found",
|
||||
query_type
|
||||
)))?;
|
||||
let query_value = query_value.downcast::<PyDict>()?;
|
||||
|
||||
match query_type.as_str() {
|
||||
"match" => {
|
||||
let column = query_value.keys().get_item(0)?.extract::<String>()?;
|
||||
let params = query_value
|
||||
.get_item(&column)?
|
||||
.ok_or(PyValueError::new_err(format!(
|
||||
"column {} not found",
|
||||
column
|
||||
)))?;
|
||||
let params = params.downcast::<PyDict>()?;
|
||||
|
||||
let query = params
|
||||
.get_item("query")?
|
||||
.ok_or(PyValueError::new_err("query not found"))?
|
||||
.extract::<String>()?;
|
||||
let boost = params
|
||||
.get_item("boost")?
|
||||
.ok_or(PyValueError::new_err("boost not found"))?
|
||||
.extract::<f32>()?;
|
||||
let fuzziness = params
|
||||
.get_item("fuzziness")?
|
||||
.ok_or(PyValueError::new_err("fuzziness not found"))?
|
||||
.extract::<Option<u32>>()?;
|
||||
let max_expansions = params
|
||||
.get_item("max_expansions")?
|
||||
.ok_or(PyValueError::new_err("max_expansions not found"))?
|
||||
.extract::<usize>()?;
|
||||
|
||||
let query = MatchQuery::new(query)
|
||||
.with_column(Some(column))
|
||||
.with_boost(boost)
|
||||
.with_fuzziness(fuzziness)
|
||||
.with_max_expansions(max_expansions);
|
||||
Ok(query.into())
|
||||
}
|
||||
|
||||
"match_phrase" => {
|
||||
let column = query_value.keys().get_item(0)?.extract::<String>()?;
|
||||
let query = query_value
|
||||
.get_item(&column)?
|
||||
.ok_or(PyValueError::new_err(format!(
|
||||
"column {} not found",
|
||||
column
|
||||
)))?
|
||||
.extract::<String>()?;
|
||||
|
||||
let query = PhraseQuery::new(query).with_column(Some(column));
|
||||
Ok(query.into())
|
||||
}
|
||||
|
||||
"boost" => {
|
||||
let positive: Bound<'_, PyAny> = query_value
|
||||
.get_item("positive")?
|
||||
.ok_or(PyValueError::new_err("positive not found"))?;
|
||||
let positive = positive.downcast::<PyDict>()?;
|
||||
|
||||
let negative = query_value
|
||||
.get_item("negative")?
|
||||
.ok_or(PyValueError::new_err("negative not found"))?;
|
||||
let negative = negative.downcast::<PyDict>()?;
|
||||
|
||||
let negative_boost = query_value
|
||||
.get_item("negative_boost")?
|
||||
.ok_or(PyValueError::new_err("negative_boost not found"))?
|
||||
.extract::<f32>()?;
|
||||
|
||||
let positive_query = parse_fts_query(positive)?;
|
||||
let negative_query = parse_fts_query(negative)?;
|
||||
let query = BoostQuery::new(positive_query, negative_query, Some(negative_boost));
|
||||
|
||||
Ok(query.into())
|
||||
}
|
||||
|
||||
"multi_match" => {
|
||||
let query = query_value
|
||||
.get_item("query")?
|
||||
.ok_or(PyValueError::new_err("query not found"))?
|
||||
.extract::<String>()?;
|
||||
|
||||
let columns = query_value
|
||||
.get_item("columns")?
|
||||
.ok_or(PyValueError::new_err("columns not found"))?
|
||||
.extract::<Vec<String>>()?;
|
||||
|
||||
let boost = query_value
|
||||
.get_item("boost")?
|
||||
.ok_or(PyValueError::new_err("boost not found"))?
|
||||
.extract::<Vec<f32>>()?;
|
||||
|
||||
let query =
|
||||
MultiMatchQuery::try_new_with_boosts(query, columns, boost).map_err(|e| {
|
||||
PyValueError::new_err(format!("Error creating MultiMatchQuery: {}", e))
|
||||
})?;
|
||||
Ok(query.into())
|
||||
}
|
||||
|
||||
_ => Err(PyValueError::new_err(format!(
|
||||
"Unsupported query type: {}",
|
||||
query_type
|
||||
))),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "lancedb-node"
|
||||
version = "0.18.1"
|
||||
version = "0.19.0-beta.5"
|
||||
description = "Serverless, low-latency vector database for AI applications"
|
||||
license.workspace = true
|
||||
edition.workspace = true
|
||||
|
||||
@@ -60,7 +60,7 @@ fn database_new(mut cx: FunctionContext) -> JsResult<JsPromise> {
|
||||
let mut conn_builder = connect(&path).storage_options(storage_options);
|
||||
|
||||
if let Some(interval) = read_consistency_interval {
|
||||
conn_builder = conn_builder.read_consistency_interval(interval);
|
||||
conn_builder = conn_builder.read_consistency_interval(Some(interval));
|
||||
}
|
||||
rt.spawn(async move {
|
||||
let database = conn_builder.execute().await;
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "lancedb"
|
||||
version = "0.18.1"
|
||||
version = "0.19.0-beta.5"
|
||||
edition.workspace = true
|
||||
description = "LanceDB: A serverless, low-latency vector database for AI applications"
|
||||
license.workspace = true
|
||||
|
||||
@@ -12,7 +12,7 @@ use super::{
|
||||
Catalog, CatalogOptions, CreateDatabaseMode, CreateDatabaseRequest, DatabaseNamesRequest,
|
||||
OpenDatabaseRequest,
|
||||
};
|
||||
use crate::connection::ConnectRequest;
|
||||
use crate::connection::{ConnectRequest, DEFAULT_READ_CONSISTENCY_INTERVAL};
|
||||
use crate::database::listing::{ListingDatabase, ListingDatabaseOptions};
|
||||
use crate::database::{Database, DatabaseOptions};
|
||||
use crate::error::{CreateDirSnafu, Error, Result};
|
||||
@@ -214,7 +214,7 @@ impl Catalog for ListingCatalog {
|
||||
uri: db_uri,
|
||||
#[cfg(feature = "remote")]
|
||||
client_config: Default::default(),
|
||||
read_consistency_interval: None,
|
||||
read_consistency_interval: DEFAULT_READ_CONSISTENCY_INTERVAL,
|
||||
options: Default::default(),
|
||||
};
|
||||
|
||||
@@ -241,7 +241,7 @@ impl Catalog for ListingCatalog {
|
||||
uri: db_path.to_string(),
|
||||
#[cfg(feature = "remote")]
|
||||
client_config: Default::default(),
|
||||
read_consistency_interval: None,
|
||||
read_consistency_interval: DEFAULT_READ_CONSISTENCY_INTERVAL,
|
||||
options: Default::default(),
|
||||
};
|
||||
|
||||
@@ -311,7 +311,7 @@ mod tests {
|
||||
#[cfg(feature = "remote")]
|
||||
client_config: Default::default(),
|
||||
options: Default::default(),
|
||||
read_consistency_interval: None,
|
||||
read_consistency_interval: DEFAULT_READ_CONSISTENCY_INTERVAL,
|
||||
};
|
||||
|
||||
let catalog = ListingCatalog::connect(&request).await.unwrap();
|
||||
|
||||
@@ -36,6 +36,9 @@ pub use lance_encoding::version::LanceFileVersion;
|
||||
#[cfg(feature = "remote")]
|
||||
use lance_io::object_store::StorageOptions;
|
||||
|
||||
pub(crate) const DEFAULT_READ_CONSISTENCY_INTERVAL: Option<std::time::Duration> =
|
||||
Some(std::time::Duration::from_secs(5));
|
||||
|
||||
/// A builder for configuring a [`Connection::table_names`] operation
|
||||
pub struct TableNamesBuilder {
|
||||
parent: Arc<dyn Database>,
|
||||
@@ -139,12 +142,6 @@ impl CreateTableBuilder<true> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Apply the given write options when writing the initial data
|
||||
pub fn write_options(mut self, write_options: WriteOptions) -> Self {
|
||||
self.request.write_options = write_options;
|
||||
self
|
||||
}
|
||||
|
||||
/// Execute the create table operation
|
||||
pub async fn execute(self) -> Result<Table> {
|
||||
let embedding_registry = self.embedding_registry.clone();
|
||||
@@ -226,6 +223,12 @@ impl<const HAS_DATA: bool> CreateTableBuilder<HAS_DATA> {
|
||||
self
|
||||
}
|
||||
|
||||
/// Apply the given write options when writing the initial data
|
||||
pub fn write_options(mut self, write_options: WriteOptions) -> Self {
|
||||
self.request.write_options = write_options;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set an option for the storage layer.
|
||||
///
|
||||
/// Options already set on the connection will be inherited by the table,
|
||||
@@ -618,14 +621,15 @@ pub struct ConnectRequest {
|
||||
|
||||
/// The interval at which to check for updates from other processes.
|
||||
///
|
||||
/// If None, then consistency is not checked. For performance
|
||||
/// reasons, this is the default. For strong consistency, set this to
|
||||
/// If None, then consistency is not checked. For strong consistency, set this to
|
||||
/// zero seconds. Then every read will check for updates from other
|
||||
/// processes. As a compromise, you can set this to a non-zero timedelta
|
||||
/// for eventual consistency. If more than that interval has passed since
|
||||
/// the last check, then the table will be checked for updates. Note: this
|
||||
/// consistency only applies to read operations. Write operations are
|
||||
/// always consistent.
|
||||
///
|
||||
/// The default is 5 seconds.
|
||||
pub read_consistency_interval: Option<std::time::Duration>,
|
||||
}
|
||||
|
||||
@@ -643,7 +647,7 @@ impl ConnectBuilder {
|
||||
uri: uri.to_string(),
|
||||
#[cfg(feature = "remote")]
|
||||
client_config: Default::default(),
|
||||
read_consistency_interval: None,
|
||||
read_consistency_interval: DEFAULT_READ_CONSISTENCY_INTERVAL,
|
||||
options: HashMap::new(),
|
||||
},
|
||||
embedding_registry: None,
|
||||
@@ -782,8 +786,7 @@ impl ConnectBuilder {
|
||||
/// The interval at which to check for updates from other processes. This
|
||||
/// only affects LanceDB OSS.
|
||||
///
|
||||
/// If left unset, consistency is not checked. For maximum read
|
||||
/// performance, this is the default. For strong consistency, set this to
|
||||
/// If left unset, consistency is not checked. For strong consistency, set this to
|
||||
/// zero seconds. Then every read will check for updates from other processes.
|
||||
/// As a compromise, set this to a non-zero duration for eventual consistency.
|
||||
/// If more than that duration has passed since the last read, the read will
|
||||
@@ -792,13 +795,15 @@ impl ConnectBuilder {
|
||||
/// This only affects read operations. Write operations are always
|
||||
/// consistent.
|
||||
///
|
||||
/// The default is 5 seconds.
|
||||
///
|
||||
/// LanceDB Cloud uses eventual consistency under the hood, and is not
|
||||
/// currently configurable.
|
||||
pub fn read_consistency_interval(
|
||||
mut self,
|
||||
read_consistency_interval: std::time::Duration,
|
||||
read_consistency_interval: Option<std::time::Duration>,
|
||||
) -> Self {
|
||||
self.request.read_consistency_interval = Some(read_consistency_interval);
|
||||
self.request.read_consistency_interval = read_consistency_interval;
|
||||
self
|
||||
}
|
||||
|
||||
@@ -882,7 +887,7 @@ impl CatalogConnectBuilder {
|
||||
uri: uri.to_string(),
|
||||
#[cfg(feature = "remote")]
|
||||
client_config: Default::default(),
|
||||
read_consistency_interval: None,
|
||||
read_consistency_interval: DEFAULT_READ_CONSISTENCY_INTERVAL,
|
||||
options: HashMap::new(),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -80,5 +80,6 @@ impl FtsIndexBuilder {
|
||||
}
|
||||
}
|
||||
|
||||
pub use lance_index::scalar::inverted::query::*;
|
||||
pub use lance_index::scalar::inverted::TokenizerConfig;
|
||||
pub use lance_index::scalar::FullTextSearchQuery;
|
||||
|
||||
@@ -14,6 +14,9 @@ use object_store::{
|
||||
|
||||
use async_trait::async_trait;
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod io_tracking;
|
||||
|
||||
#[derive(Debug)]
|
||||
struct MirroringObjectStore {
|
||||
primary: Arc<dyn ObjectStore>,
|
||||
|
||||
237
rust/lancedb/src/io/object_store/io_tracking.rs
Normal file
237
rust/lancedb/src/io/object_store/io_tracking.rs
Normal file
@@ -0,0 +1,237 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
use std::{
|
||||
fmt::{Display, Formatter},
|
||||
sync::{Arc, Mutex},
|
||||
};
|
||||
|
||||
use bytes::Bytes;
|
||||
use futures::stream::BoxStream;
|
||||
use lance::io::WrappingObjectStore;
|
||||
use object_store::{
|
||||
path::Path, GetOptions, GetResult, ListResult, MultipartUpload, ObjectMeta, ObjectStore,
|
||||
PutMultipartOpts, PutOptions, PutPayload, PutResult, Result as OSResult, UploadPart,
|
||||
};
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct IoStats {
|
||||
pub read_iops: u64,
|
||||
pub read_bytes: u64,
|
||||
pub write_iops: u64,
|
||||
pub write_bytes: u64,
|
||||
}
|
||||
|
||||
impl Display for IoStats {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{:#?}", self)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct IoTrackingStore {
|
||||
target: Arc<dyn ObjectStore>,
|
||||
stats: Arc<Mutex<IoStats>>,
|
||||
}
|
||||
|
||||
impl Display for IoTrackingStore {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{:#?}", self)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub struct IoStatsHolder(Arc<Mutex<IoStats>>);
|
||||
|
||||
impl IoStatsHolder {
|
||||
pub fn incremental_stats(&self) -> IoStats {
|
||||
std::mem::take(&mut self.0.lock().expect("failed to lock IoStats"))
|
||||
}
|
||||
}
|
||||
|
||||
impl WrappingObjectStore for IoStatsHolder {
|
||||
fn wrap(&self, target: Arc<dyn ObjectStore>) -> Arc<dyn ObjectStore> {
|
||||
Arc::new(IoTrackingStore {
|
||||
target,
|
||||
stats: self.0.clone(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl IoTrackingStore {
|
||||
pub fn new_wrapper() -> (Arc<dyn WrappingObjectStore>, Arc<Mutex<IoStats>>) {
|
||||
let stats = Arc::new(Mutex::new(IoStats::default()));
|
||||
(Arc::new(IoStatsHolder(stats.clone())), stats)
|
||||
}
|
||||
|
||||
fn record_read(&self, num_bytes: u64) {
|
||||
let mut stats = self.stats.lock().unwrap();
|
||||
stats.read_iops += 1;
|
||||
stats.read_bytes += num_bytes;
|
||||
}
|
||||
|
||||
fn record_write(&self, num_bytes: u64) {
|
||||
let mut stats = self.stats.lock().unwrap();
|
||||
stats.write_iops += 1;
|
||||
stats.write_bytes += num_bytes;
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
#[deny(clippy::missing_trait_methods)]
|
||||
impl ObjectStore for IoTrackingStore {
|
||||
async fn put(&self, location: &Path, bytes: PutPayload) -> OSResult<PutResult> {
|
||||
self.record_write(bytes.content_length() as u64);
|
||||
self.target.put(location, bytes).await
|
||||
}
|
||||
|
||||
async fn put_opts(
|
||||
&self,
|
||||
location: &Path,
|
||||
bytes: PutPayload,
|
||||
opts: PutOptions,
|
||||
) -> OSResult<PutResult> {
|
||||
self.record_write(bytes.content_length() as u64);
|
||||
self.target.put_opts(location, bytes, opts).await
|
||||
}
|
||||
|
||||
async fn put_multipart(&self, location: &Path) -> OSResult<Box<dyn MultipartUpload>> {
|
||||
let target = self.target.put_multipart(location).await?;
|
||||
Ok(Box::new(IoTrackingMultipartUpload {
|
||||
target,
|
||||
stats: self.stats.clone(),
|
||||
}))
|
||||
}
|
||||
|
||||
async fn put_multipart_opts(
|
||||
&self,
|
||||
location: &Path,
|
||||
opts: PutMultipartOpts,
|
||||
) -> OSResult<Box<dyn MultipartUpload>> {
|
||||
let target = self.target.put_multipart_opts(location, opts).await?;
|
||||
Ok(Box::new(IoTrackingMultipartUpload {
|
||||
target,
|
||||
stats: self.stats.clone(),
|
||||
}))
|
||||
}
|
||||
|
||||
async fn get(&self, location: &Path) -> OSResult<GetResult> {
|
||||
let result = self.target.get(location).await;
|
||||
if let Ok(result) = &result {
|
||||
let num_bytes = result.range.end - result.range.start;
|
||||
self.record_read(num_bytes as u64);
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
async fn get_opts(&self, location: &Path, options: GetOptions) -> OSResult<GetResult> {
|
||||
let result = self.target.get_opts(location, options).await;
|
||||
if let Ok(result) = &result {
|
||||
let num_bytes = result.range.end - result.range.start;
|
||||
self.record_read(num_bytes as u64);
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
async fn get_range(&self, location: &Path, range: std::ops::Range<usize>) -> OSResult<Bytes> {
|
||||
let result = self.target.get_range(location, range).await;
|
||||
if let Ok(result) = &result {
|
||||
self.record_read(result.len() as u64);
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
async fn get_ranges(
|
||||
&self,
|
||||
location: &Path,
|
||||
ranges: &[std::ops::Range<usize>],
|
||||
) -> OSResult<Vec<Bytes>> {
|
||||
let result = self.target.get_ranges(location, ranges).await;
|
||||
if let Ok(result) = &result {
|
||||
self.record_read(result.iter().map(|b| b.len() as u64).sum());
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
async fn head(&self, location: &Path) -> OSResult<ObjectMeta> {
|
||||
self.record_read(0);
|
||||
self.target.head(location).await
|
||||
}
|
||||
|
||||
async fn delete(&self, location: &Path) -> OSResult<()> {
|
||||
self.record_write(0);
|
||||
self.target.delete(location).await
|
||||
}
|
||||
|
||||
fn delete_stream<'a>(
|
||||
&'a self,
|
||||
locations: BoxStream<'a, OSResult<Path>>,
|
||||
) -> BoxStream<'a, OSResult<Path>> {
|
||||
self.target.delete_stream(locations)
|
||||
}
|
||||
|
||||
fn list(&self, prefix: Option<&Path>) -> BoxStream<'_, OSResult<ObjectMeta>> {
|
||||
self.record_read(0);
|
||||
self.target.list(prefix)
|
||||
}
|
||||
|
||||
fn list_with_offset(
|
||||
&self,
|
||||
prefix: Option<&Path>,
|
||||
offset: &Path,
|
||||
) -> BoxStream<'_, OSResult<ObjectMeta>> {
|
||||
self.record_read(0);
|
||||
self.target.list_with_offset(prefix, offset)
|
||||
}
|
||||
|
||||
async fn list_with_delimiter(&self, prefix: Option<&Path>) -> OSResult<ListResult> {
|
||||
self.record_read(0);
|
||||
self.target.list_with_delimiter(prefix).await
|
||||
}
|
||||
|
||||
async fn copy(&self, from: &Path, to: &Path) -> OSResult<()> {
|
||||
self.record_write(0);
|
||||
self.target.copy(from, to).await
|
||||
}
|
||||
|
||||
async fn rename(&self, from: &Path, to: &Path) -> OSResult<()> {
|
||||
self.record_write(0);
|
||||
self.target.rename(from, to).await
|
||||
}
|
||||
|
||||
async fn rename_if_not_exists(&self, from: &Path, to: &Path) -> OSResult<()> {
|
||||
self.record_write(0);
|
||||
self.target.rename_if_not_exists(from, to).await
|
||||
}
|
||||
|
||||
async fn copy_if_not_exists(&self, from: &Path, to: &Path) -> OSResult<()> {
|
||||
self.record_write(0);
|
||||
self.target.copy_if_not_exists(from, to).await
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct IoTrackingMultipartUpload {
|
||||
target: Box<dyn MultipartUpload>,
|
||||
stats: Arc<Mutex<IoStats>>,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl MultipartUpload for IoTrackingMultipartUpload {
|
||||
async fn abort(&mut self) -> OSResult<()> {
|
||||
self.target.abort().await
|
||||
}
|
||||
|
||||
async fn complete(&mut self) -> OSResult<PutResult> {
|
||||
self.target.complete().await
|
||||
}
|
||||
|
||||
fn put_part(&mut self, payload: PutPayload) -> UploadPart {
|
||||
{
|
||||
let mut stats = self.stats.lock().unwrap();
|
||||
stats.write_iops += 1;
|
||||
stats.write_bytes += payload.content_length() as u64;
|
||||
}
|
||||
self.target.put_part(payload)
|
||||
}
|
||||
}
|
||||
@@ -1,8 +1,8 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
use std::future::Future;
|
||||
use std::sync::Arc;
|
||||
use std::{future::Future, time::Duration};
|
||||
|
||||
use arrow::compute::concat_batches;
|
||||
use arrow_array::{make_array, Array, Float16Array, Float32Array, Float64Array};
|
||||
@@ -25,6 +25,7 @@ use crate::error::{Error, Result};
|
||||
use crate::rerankers::rrf::RRFReranker;
|
||||
use crate::rerankers::{check_reranker_result, NormalizeMethod, Reranker};
|
||||
use crate::table::BaseTable;
|
||||
use crate::utils::TimeoutStream;
|
||||
use crate::DistanceType;
|
||||
use crate::{arrow::SendableRecordBatchStream, table::AnyQuery};
|
||||
|
||||
@@ -525,12 +526,15 @@ pub struct QueryExecutionOptions {
|
||||
///
|
||||
/// By default, this is 1024
|
||||
pub max_batch_length: u32,
|
||||
/// Max duration to wait for the query to execute before timing out.
|
||||
pub timeout: Option<Duration>,
|
||||
}
|
||||
|
||||
impl Default for QueryExecutionOptions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
max_batch_length: 1024,
|
||||
timeout: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -579,6 +583,15 @@ pub trait ExecutableQuery {
|
||||
) -> impl Future<Output = Result<SendableRecordBatchStream>> + Send;
|
||||
|
||||
fn explain_plan(&self, verbose: bool) -> impl Future<Output = Result<String>> + Send;
|
||||
|
||||
fn analyze_plan(&self) -> impl Future<Output = Result<String>> + Send {
|
||||
self.analyze_plan_with_options(QueryExecutionOptions::default())
|
||||
}
|
||||
|
||||
fn analyze_plan_with_options(
|
||||
&self,
|
||||
options: QueryExecutionOptions,
|
||||
) -> impl Future<Output = Result<String>> + Send;
|
||||
}
|
||||
|
||||
/// A query filter that can be applied to a query
|
||||
@@ -765,6 +778,11 @@ impl ExecutableQuery for Query {
|
||||
let query = AnyQuery::Query(self.request.clone());
|
||||
self.parent.explain_plan(&query, verbose).await
|
||||
}
|
||||
|
||||
async fn analyze_plan_with_options(&self, options: QueryExecutionOptions) -> Result<String> {
|
||||
let query = AnyQuery::Query(self.request.clone());
|
||||
self.parent.analyze_plan(&query, options).await
|
||||
}
|
||||
}
|
||||
|
||||
/// A request for a nearest-neighbors search into a table
|
||||
@@ -993,7 +1011,10 @@ impl VectorQuery {
|
||||
self
|
||||
}
|
||||
|
||||
pub async fn execute_hybrid(&self) -> Result<SendableRecordBatchStream> {
|
||||
pub async fn execute_hybrid(
|
||||
&self,
|
||||
options: QueryExecutionOptions,
|
||||
) -> Result<SendableRecordBatchStream> {
|
||||
// clone query and specify we want to include row IDs, which can be needed for reranking
|
||||
let mut fts_query = Query::new(self.parent.clone());
|
||||
fts_query.request = self.request.base.clone();
|
||||
@@ -1002,7 +1023,10 @@ impl VectorQuery {
|
||||
let mut vector_query = self.clone().with_row_id();
|
||||
|
||||
vector_query.request.base.full_text_search = None;
|
||||
let (fts_results, vec_results) = try_join!(fts_query.execute(), vector_query.execute())?;
|
||||
let (fts_results, vec_results) = try_join!(
|
||||
fts_query.execute_with_options(options.clone()),
|
||||
vector_query.inner_execute_with_options(options)
|
||||
)?;
|
||||
|
||||
let (fts_results, vec_results) = try_join!(
|
||||
fts_results.try_collect::<Vec<_>>(),
|
||||
@@ -1042,7 +1066,7 @@ impl VectorQuery {
|
||||
})?;
|
||||
|
||||
let mut results = reranker
|
||||
.rerank_hybrid(&fts_query.query, vec_results, fts_results)
|
||||
.rerank_hybrid(&fts_query.query.query(), vec_results, fts_results)
|
||||
.await?;
|
||||
|
||||
check_reranker_result(&results)?;
|
||||
@@ -1060,6 +1084,20 @@ impl VectorQuery {
|
||||
RecordBatchStreamAdapter::new(results.schema(), stream::iter([Ok(results)])),
|
||||
))
|
||||
}
|
||||
|
||||
async fn inner_execute_with_options(
|
||||
&self,
|
||||
options: QueryExecutionOptions,
|
||||
) -> Result<SendableRecordBatchStream> {
|
||||
let plan = self.create_plan(options.clone()).await?;
|
||||
let inner = execute_plan(plan, Default::default())?;
|
||||
let inner = if let Some(timeout) = options.timeout {
|
||||
TimeoutStream::new_boxed(inner, timeout)
|
||||
} else {
|
||||
inner
|
||||
};
|
||||
Ok(DatasetRecordBatchStream::new(inner).into())
|
||||
}
|
||||
}
|
||||
|
||||
impl ExecutableQuery for VectorQuery {
|
||||
@@ -1073,22 +1111,24 @@ impl ExecutableQuery for VectorQuery {
|
||||
options: QueryExecutionOptions,
|
||||
) -> Result<SendableRecordBatchStream> {
|
||||
if self.request.base.full_text_search.is_some() {
|
||||
let hybrid_result = async move { self.execute_hybrid().await }.boxed().await?;
|
||||
let hybrid_result = async move { self.execute_hybrid(options).await }
|
||||
.boxed()
|
||||
.await?;
|
||||
return Ok(hybrid_result);
|
||||
}
|
||||
|
||||
Ok(SendableRecordBatchStream::from(
|
||||
DatasetRecordBatchStream::new(execute_plan(
|
||||
self.create_plan(options).await?,
|
||||
Default::default(),
|
||||
)?),
|
||||
))
|
||||
self.inner_execute_with_options(options).await
|
||||
}
|
||||
|
||||
async fn explain_plan(&self, verbose: bool) -> Result<String> {
|
||||
let query = AnyQuery::VectorQuery(self.request.clone());
|
||||
self.parent.explain_plan(&query, verbose).await
|
||||
}
|
||||
|
||||
async fn analyze_plan_with_options(&self, options: QueryExecutionOptions) -> Result<String> {
|
||||
let query = AnyQuery::VectorQuery(self.request.clone());
|
||||
self.parent.analyze_plan(&query, options).await
|
||||
}
|
||||
}
|
||||
|
||||
impl HasQuery for VectorQuery {
|
||||
@@ -1370,6 +1410,31 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_analyze_plan() {
|
||||
let tmp_dir = tempdir().unwrap();
|
||||
let table = make_test_table(&tmp_dir).await;
|
||||
|
||||
let result = table.query().analyze_plan().await.unwrap();
|
||||
assert!(result.contains("metrics="));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_analyze_plan_with_options() {
|
||||
let tmp_dir = tempdir().unwrap();
|
||||
let table = make_test_table(&tmp_dir).await;
|
||||
|
||||
let result = table
|
||||
.query()
|
||||
.analyze_plan_with_options(QueryExecutionOptions {
|
||||
max_batch_length: 10,
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(result.contains("metrics="));
|
||||
}
|
||||
|
||||
fn assert_plan_exists(plan: &Arc<dyn ExecutionPlan>, name: &str) -> bool {
|
||||
if plan.name() == name {
|
||||
return true;
|
||||
|
||||
@@ -13,7 +13,7 @@ use reqwest::{
|
||||
use crate::error::{Error, Result};
|
||||
use crate::remote::db::RemoteOptions;
|
||||
|
||||
const REQUEST_ID_HEADER: &str = "x-request-id";
|
||||
const REQUEST_ID_HEADER: HeaderName = HeaderName::from_static("x-request-id");
|
||||
|
||||
/// Configuration for the LanceDB Cloud HTTP client.
|
||||
#[derive(Clone, Debug)]
|
||||
@@ -299,7 +299,7 @@ impl<S: HttpSend> RestfulLanceDbClient<S> {
|
||||
) -> Result<HeaderMap> {
|
||||
let mut headers = HeaderMap::new();
|
||||
headers.insert(
|
||||
"x-api-key",
|
||||
HeaderName::from_static("x-api-key"),
|
||||
HeaderValue::from_str(api_key).map_err(|_| Error::InvalidInput {
|
||||
message: "non-ascii api key provided".to_string(),
|
||||
})?,
|
||||
@@ -307,7 +307,7 @@ impl<S: HttpSend> RestfulLanceDbClient<S> {
|
||||
if region == "local" {
|
||||
let host = format!("{}.local.api.lancedb.com", db_name);
|
||||
headers.insert(
|
||||
"Host",
|
||||
http::header::HOST,
|
||||
HeaderValue::from_str(&host).map_err(|_| Error::InvalidInput {
|
||||
message: format!("non-ascii database name '{}' provided", db_name),
|
||||
})?,
|
||||
@@ -315,7 +315,7 @@ impl<S: HttpSend> RestfulLanceDbClient<S> {
|
||||
}
|
||||
if has_host_override {
|
||||
headers.insert(
|
||||
"x-lancedb-database",
|
||||
HeaderName::from_static("x-lancedb-database"),
|
||||
HeaderValue::from_str(db_name).map_err(|_| Error::InvalidInput {
|
||||
message: format!("non-ascii database name '{}' provided", db_name),
|
||||
})?,
|
||||
@@ -323,7 +323,7 @@ impl<S: HttpSend> RestfulLanceDbClient<S> {
|
||||
}
|
||||
if db_prefix.is_some() {
|
||||
headers.insert(
|
||||
"x-lancedb-database-prefix",
|
||||
HeaderName::from_static("x-lancedb-database-prefix"),
|
||||
HeaderValue::from_str(db_prefix.unwrap()).map_err(|_| Error::InvalidInput {
|
||||
message: format!(
|
||||
"non-ascii database prefix '{}' provided",
|
||||
@@ -335,7 +335,7 @@ impl<S: HttpSend> RestfulLanceDbClient<S> {
|
||||
|
||||
if let Some(v) = options.0.get("account_name") {
|
||||
headers.insert(
|
||||
"x-azure-storage-account-name",
|
||||
HeaderName::from_static("x-azure-storage-account-name"),
|
||||
HeaderValue::from_str(v).map_err(|_| Error::InvalidInput {
|
||||
message: format!("non-ascii storage account name '{}' provided", db_name),
|
||||
})?,
|
||||
@@ -343,7 +343,7 @@ impl<S: HttpSend> RestfulLanceDbClient<S> {
|
||||
}
|
||||
if let Some(v) = options.0.get("azure_storage_account_name") {
|
||||
headers.insert(
|
||||
"x-azure-storage-account-name",
|
||||
HeaderName::from_static("x-azure-storage-account-name"),
|
||||
HeaderValue::from_str(v).map_err(|_| Error::InvalidInput {
|
||||
message: format!("non-ascii storage account name '{}' provided", db_name),
|
||||
})?,
|
||||
|
||||
@@ -52,6 +52,10 @@ impl ServerVersion {
|
||||
pub fn support_multivector(&self) -> bool {
|
||||
self.0 >= semver::Version::new(0, 2, 0)
|
||||
}
|
||||
|
||||
pub fn support_structural_fts(&self) -> bool {
|
||||
self.0 >= semver::Version::new(0, 3, 0)
|
||||
}
|
||||
}
|
||||
|
||||
pub const OPT_REMOTE_PREFIX: &str = "remote_database_";
|
||||
|
||||
@@ -20,7 +20,7 @@ use datafusion_physical_plan::stream::RecordBatchStreamAdapter;
|
||||
use datafusion_physical_plan::{ExecutionPlan, RecordBatchStream, SendableRecordBatchStream};
|
||||
use futures::TryStreamExt;
|
||||
use http::header::CONTENT_TYPE;
|
||||
use http::StatusCode;
|
||||
use http::{HeaderName, StatusCode};
|
||||
use lance::arrow::json::{JsonDataType, JsonSchema};
|
||||
use lance::dataset::scanner::DatasetRecordBatchStream;
|
||||
use lance::dataset::{ColumnAlteration, NewColumnTransform, Version};
|
||||
@@ -44,6 +44,8 @@ use super::client::{HttpSend, RestfulLanceDbClient, Sender};
|
||||
use super::db::ServerVersion;
|
||||
use super::ARROW_STREAM_CONTENT_TYPE;
|
||||
|
||||
const REQUEST_TIMEOUT_HEADER: HeaderName = HeaderName::from_static("x-request-timeout-ms");
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct RemoteTable<S: HttpSend = Sender> {
|
||||
#[allow(dead_code)]
|
||||
@@ -155,7 +157,11 @@ impl<S: HttpSend> RemoteTable<S> {
|
||||
Ok(Box::pin(RecordBatchStreamAdapter::new(schema, stream)))
|
||||
}
|
||||
|
||||
fn apply_query_params(body: &mut serde_json::Value, params: &QueryRequest) -> Result<()> {
|
||||
fn apply_query_params(
|
||||
&self,
|
||||
body: &mut serde_json::Value,
|
||||
params: &QueryRequest,
|
||||
) -> Result<()> {
|
||||
body["prefilter"] = params.prefilter.into();
|
||||
if let Some(offset) = params.offset {
|
||||
body["offset"] = serde_json::Value::Number(serde_json::Number::from(offset));
|
||||
@@ -209,10 +215,17 @@ impl<S: HttpSend> RemoteTable<S> {
|
||||
message: "Wand factor is not yet supported in LanceDB Cloud".into(),
|
||||
});
|
||||
}
|
||||
body["full_text_query"] = serde_json::json!({
|
||||
"columns": full_text_search.columns,
|
||||
"query": full_text_search.query,
|
||||
})
|
||||
|
||||
if self.server_version.support_structural_fts() {
|
||||
body["full_text_query"] = serde_json::json!({
|
||||
"query": full_text_search.query.clone(),
|
||||
});
|
||||
} else {
|
||||
body["full_text_query"] = serde_json::json!({
|
||||
"columns": full_text_search.columns().into_iter().collect::<Vec<_>>(),
|
||||
"query": full_text_search.query.query(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -223,7 +236,7 @@ impl<S: HttpSend> RemoteTable<S> {
|
||||
mut body: serde_json::Value,
|
||||
query: &VectorQueryRequest,
|
||||
) -> Result<Vec<serde_json::Value>> {
|
||||
Self::apply_query_params(&mut body, &query.base)?;
|
||||
self.apply_query_params(&mut body, &query.base)?;
|
||||
|
||||
// Apply general parameters, before we dispatch based on number of query vectors.
|
||||
body["distance_type"] = serde_json::json!(query.distance_type.unwrap_or_default());
|
||||
@@ -321,28 +334,25 @@ impl<S: HttpSend> RemoteTable<S> {
|
||||
async fn execute_query(
|
||||
&self,
|
||||
query: &AnyQuery,
|
||||
_options: QueryExecutionOptions,
|
||||
options: &QueryExecutionOptions,
|
||||
) -> Result<Vec<Pin<Box<dyn RecordBatchStream + Send>>>> {
|
||||
let request = self.client.post(&format!("/v1/table/{}/query/", self.name));
|
||||
let mut request = self.client.post(&format!("/v1/table/{}/query/", self.name));
|
||||
|
||||
let version = self.current_version().await;
|
||||
let mut body = serde_json::json!({ "version": version });
|
||||
if let Some(timeout) = options.timeout {
|
||||
// Client side timeout
|
||||
request = request.timeout(timeout);
|
||||
// Also send to server, so it can abort the query if it takes too long.
|
||||
// (If it doesn't fit into u64, it's not worth sending anyways.)
|
||||
if let Ok(timeout_ms) = u64::try_from(timeout.as_millis()) {
|
||||
request = request.header(REQUEST_TIMEOUT_HEADER, timeout_ms);
|
||||
}
|
||||
}
|
||||
|
||||
let requests = match query {
|
||||
AnyQuery::Query(query) => {
|
||||
Self::apply_query_params(&mut body, query)?;
|
||||
// Empty vector can be passed if no vector search is performed.
|
||||
body["vector"] = serde_json::Value::Array(Vec::new());
|
||||
vec![request.json(&body)]
|
||||
}
|
||||
AnyQuery::VectorQuery(query) => {
|
||||
let bodies = self.apply_vector_query_params(body, query)?;
|
||||
bodies
|
||||
.into_iter()
|
||||
.map(|body| request.try_clone().unwrap().json(&body))
|
||||
.collect()
|
||||
}
|
||||
};
|
||||
let query_bodies = self.prepare_query_bodies(query).await?;
|
||||
let requests: Vec<reqwest::RequestBuilder> = query_bodies
|
||||
.into_iter()
|
||||
.map(|body| request.try_clone().unwrap().json(&body))
|
||||
.collect();
|
||||
|
||||
let futures = requests.into_iter().map(|req| async move {
|
||||
let (request_id, response) = self.client.send(req, true).await?;
|
||||
@@ -351,6 +361,22 @@ impl<S: HttpSend> RemoteTable<S> {
|
||||
let streams = futures::future::try_join_all(futures).await?;
|
||||
Ok(streams)
|
||||
}
|
||||
|
||||
async fn prepare_query_bodies(&self, query: &AnyQuery) -> Result<Vec<serde_json::Value>> {
|
||||
let version = self.current_version().await;
|
||||
let base_body = serde_json::json!({ "version": version });
|
||||
|
||||
match query {
|
||||
AnyQuery::Query(query) => {
|
||||
let mut body = base_body.clone();
|
||||
self.apply_query_params(&mut body, query)?;
|
||||
// Empty vector can be passed if no vector search is performed.
|
||||
body["vector"] = serde_json::Value::Array(Vec::new());
|
||||
Ok(vec![body])
|
||||
}
|
||||
AnyQuery::VectorQuery(query) => self.apply_vector_query_params(base_body, query),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
@@ -422,10 +448,17 @@ impl<S: HttpSend> BaseTable for RemoteTable<S> {
|
||||
Ok(())
|
||||
}
|
||||
async fn restore(&self) -> Result<()> {
|
||||
self.check_mutable().await?;
|
||||
Err(Error::NotSupported {
|
||||
message: "restore is not supported on LanceDB cloud.".into(),
|
||||
})
|
||||
let mut request = self
|
||||
.client
|
||||
.post(&format!("/v1/table/{}/restore/", self.name));
|
||||
let version = self.current_version().await;
|
||||
let body = serde_json::json!({ "version": version });
|
||||
request = request.json(&body);
|
||||
|
||||
let (request_id, response) = self.client.send(request, true).await?;
|
||||
self.check_table_response(&request_id, response).await?;
|
||||
self.checkout_latest().await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn list_versions(&self) -> Result<Vec<Version>> {
|
||||
@@ -522,7 +555,7 @@ impl<S: HttpSend> BaseTable for RemoteTable<S> {
|
||||
query: &AnyQuery,
|
||||
options: QueryExecutionOptions,
|
||||
) -> Result<Arc<dyn ExecutionPlan>> {
|
||||
let streams = self.execute_query(query, options).await?;
|
||||
let streams = self.execute_query(query, &options).await?;
|
||||
if streams.len() == 1 {
|
||||
let stream = streams.into_iter().next().unwrap();
|
||||
Ok(Arc::new(OneShotExec::new(stream)))
|
||||
@@ -538,9 +571,9 @@ impl<S: HttpSend> BaseTable for RemoteTable<S> {
|
||||
async fn query(
|
||||
&self,
|
||||
query: &AnyQuery,
|
||||
_options: QueryExecutionOptions,
|
||||
options: QueryExecutionOptions,
|
||||
) -> Result<DatasetRecordBatchStream> {
|
||||
let streams = self.execute_query(query, _options).await?;
|
||||
let streams = self.execute_query(query, &options).await?;
|
||||
|
||||
if streams.len() == 1 {
|
||||
Ok(DatasetRecordBatchStream::new(
|
||||
@@ -559,6 +592,94 @@ impl<S: HttpSend> BaseTable for RemoteTable<S> {
|
||||
)?))
|
||||
}
|
||||
}
|
||||
|
||||
async fn explain_plan(&self, query: &AnyQuery, verbose: bool) -> Result<String> {
|
||||
let base_request = self
|
||||
.client
|
||||
.post(&format!("/v1/table/{}/explain_plan/", self.name));
|
||||
|
||||
let query_bodies = self.prepare_query_bodies(query).await?;
|
||||
let requests: Vec<reqwest::RequestBuilder> = query_bodies
|
||||
.into_iter()
|
||||
.map(|query_body| {
|
||||
let explain_request = serde_json::json!({
|
||||
"verbose": verbose,
|
||||
"query": query_body
|
||||
});
|
||||
|
||||
base_request.try_clone().unwrap().json(&explain_request)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let futures = requests.into_iter().map(|req| async move {
|
||||
let (request_id, response) = self.client.send(req, true).await?;
|
||||
let response = self.check_table_response(&request_id, response).await?;
|
||||
let body = response.text().await.err_to_http(request_id.clone())?;
|
||||
|
||||
serde_json::from_str(&body).map_err(|e| Error::Http {
|
||||
source: format!("Failed to parse explain plan: {}", e).into(),
|
||||
request_id,
|
||||
status_code: None,
|
||||
})
|
||||
});
|
||||
|
||||
let plan_texts = futures::future::try_join_all(futures).await?;
|
||||
let final_plan = if plan_texts.len() > 1 {
|
||||
plan_texts
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.map(|(i, plan)| format!("--- Plan #{} ---\n{}", i + 1, plan))
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n\n")
|
||||
} else {
|
||||
plan_texts.into_iter().next().unwrap_or_default()
|
||||
};
|
||||
|
||||
Ok(final_plan)
|
||||
}
|
||||
|
||||
async fn analyze_plan(
|
||||
&self,
|
||||
query: &AnyQuery,
|
||||
_options: QueryExecutionOptions,
|
||||
) -> Result<String> {
|
||||
let request = self
|
||||
.client
|
||||
.post(&format!("/v1/table/{}/analyze_plan/", self.name));
|
||||
|
||||
let query_bodies = self.prepare_query_bodies(query).await?;
|
||||
let requests: Vec<reqwest::RequestBuilder> = query_bodies
|
||||
.into_iter()
|
||||
.map(|body| request.try_clone().unwrap().json(&body))
|
||||
.collect();
|
||||
|
||||
let futures = requests.into_iter().map(|req| async move {
|
||||
let (request_id, response) = self.client.send(req, true).await?;
|
||||
let response = self.check_table_response(&request_id, response).await?;
|
||||
let body = response.text().await.err_to_http(request_id.clone())?;
|
||||
|
||||
serde_json::from_str(&body).map_err(|e| Error::Http {
|
||||
source: format!("Failed to execute analyze plan: {}", e).into(),
|
||||
request_id,
|
||||
status_code: None,
|
||||
})
|
||||
});
|
||||
|
||||
let analyze_result_texts = futures::future::try_join_all(futures).await?;
|
||||
let final_analyze = if analyze_result_texts.len() > 1 {
|
||||
analyze_result_texts
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.map(|(i, plan)| format!("--- Query #{} ---\n{}", i + 1, plan))
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n\n")
|
||||
} else {
|
||||
analyze_result_texts.into_iter().next().unwrap_or_default()
|
||||
};
|
||||
|
||||
Ok(final_analyze)
|
||||
}
|
||||
|
||||
async fn update(&self, update: UpdateBuilder) -> Result<u64> {
|
||||
self.check_mutable().await?;
|
||||
let request = self
|
||||
@@ -581,6 +702,7 @@ impl<S: HttpSend> BaseTable for RemoteTable<S> {
|
||||
|
||||
Ok(0) // TODO: support returning number of modified rows once supported in SaaS.
|
||||
}
|
||||
|
||||
async fn delete(&self, predicate: &str) -> Result<()> {
|
||||
self.check_mutable().await?;
|
||||
let body = serde_json::json!({ "predicate": predicate });
|
||||
@@ -938,6 +1060,7 @@ mod tests {
|
||||
use arrow_schema::{DataType, Field, Schema};
|
||||
use chrono::{DateTime, Utc};
|
||||
use futures::{future::BoxFuture, StreamExt, TryFutureExt};
|
||||
use lance_index::scalar::inverted::query::MatchQuery;
|
||||
use lance_index::scalar::FullTextSearchQuery;
|
||||
use reqwest::Body;
|
||||
use rstest::rstest;
|
||||
@@ -1584,7 +1707,18 @@ mod tests {
|
||||
"prefilter": true,
|
||||
"version": null
|
||||
});
|
||||
assert_eq!(body, expected_body);
|
||||
let expected_body_2 = serde_json::json!({
|
||||
"full_text_query": {
|
||||
"columns": ["b","a"],
|
||||
"query": "hello world",
|
||||
},
|
||||
"k": 10,
|
||||
"vector": [],
|
||||
"with_row_id": true,
|
||||
"prefilter": true,
|
||||
"version": null
|
||||
});
|
||||
assert!(body == expected_body || body == expected_body_2);
|
||||
|
||||
let data = RecordBatch::try_new(
|
||||
Arc::new(Schema::new(vec![Field::new("a", DataType::Int32, false)])),
|
||||
@@ -1603,7 +1737,8 @@ mod tests {
|
||||
.query()
|
||||
.full_text_search(
|
||||
FullTextSearchQuery::new("hello world".into())
|
||||
.columns(Some(vec!["a".into(), "b".into()])),
|
||||
.with_columns(&["a".into(), "b".into()])
|
||||
.unwrap(),
|
||||
)
|
||||
.with_row_id()
|
||||
.limit(10)
|
||||
@@ -1612,6 +1747,66 @@ mod tests {
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_query_structured_fts() {
|
||||
let table =
|
||||
Table::new_with_handler_version("my_table", semver::Version::new(0, 3, 0), |request| {
|
||||
assert_eq!(request.method(), "POST");
|
||||
assert_eq!(request.url().path(), "/v1/table/my_table/query/");
|
||||
assert_eq!(
|
||||
request.headers().get("Content-Type").unwrap(),
|
||||
JSON_CONTENT_TYPE
|
||||
);
|
||||
|
||||
let body = request.body().unwrap().as_bytes().unwrap();
|
||||
let body: serde_json::Value = serde_json::from_slice(body).unwrap();
|
||||
let expected_body = serde_json::json!({
|
||||
"full_text_query": {
|
||||
"query": {
|
||||
"match": {
|
||||
"terms": "hello world",
|
||||
"column": "a",
|
||||
"boost": 1.0,
|
||||
"fuzziness": 0,
|
||||
"max_expansions": 50,
|
||||
},
|
||||
}
|
||||
},
|
||||
"k": 10,
|
||||
"vector": [],
|
||||
"with_row_id": true,
|
||||
"prefilter": true,
|
||||
"version": null
|
||||
});
|
||||
assert_eq!(body, expected_body);
|
||||
|
||||
let data = RecordBatch::try_new(
|
||||
Arc::new(Schema::new(vec![Field::new("a", DataType::Int32, false)])),
|
||||
vec![Arc::new(Int32Array::from(vec![1, 2, 3]))],
|
||||
)
|
||||
.unwrap();
|
||||
let response_body = write_ipc_file(&data);
|
||||
http::Response::builder()
|
||||
.status(200)
|
||||
.header(CONTENT_TYPE, ARROW_FILE_CONTENT_TYPE)
|
||||
.body(response_body)
|
||||
.unwrap()
|
||||
});
|
||||
|
||||
let _ = table
|
||||
.query()
|
||||
.full_text_search(FullTextSearchQuery::new_query(
|
||||
MatchQuery::new("hello world".to_owned())
|
||||
.with_column(Some("a".to_owned()))
|
||||
.into(),
|
||||
))
|
||||
.with_row_id()
|
||||
.limit(10)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[case(DEFAULT_SERVER_VERSION.clone())]
|
||||
#[case(semver::Version::new(0, 2, 0))]
|
||||
|
||||
@@ -33,7 +33,7 @@ use lance::dataset::{
|
||||
use lance::dataset::{MergeInsertBuilder as LanceMergeInsertBuilder, WhenNotMatchedBySource};
|
||||
use lance::index::vector::utils::infer_vector_dim;
|
||||
use lance::io::WrappingObjectStore;
|
||||
use lance_datafusion::exec::execute_plan;
|
||||
use lance_datafusion::exec::{analyze_plan as lance_analyze_plan, execute_plan};
|
||||
use lance_datafusion::utils::StreamingWriteSource;
|
||||
use lance_index::vector::hnsw::builder::HnswBuildParams;
|
||||
use lance_index::vector::ivf::IvfBuildParams;
|
||||
@@ -68,7 +68,7 @@ use crate::query::{
|
||||
use crate::utils::{
|
||||
default_vector_column, supported_bitmap_data_type, supported_btree_data_type,
|
||||
supported_fts_data_type, supported_label_list_data_type, supported_vector_data_type,
|
||||
PatchReadParam, PatchWriteParam,
|
||||
PatchReadParam, PatchWriteParam, TimeoutStream,
|
||||
};
|
||||
|
||||
use self::dataset::DatasetConsistencyWrapper;
|
||||
@@ -433,6 +433,12 @@ pub trait BaseTable: std::fmt::Display + std::fmt::Debug + Send + Sync {
|
||||
|
||||
Ok(format!("{}", display.indent(verbose)))
|
||||
}
|
||||
async fn analyze_plan(
|
||||
&self,
|
||||
query: &AnyQuery,
|
||||
options: QueryExecutionOptions,
|
||||
) -> Result<String>;
|
||||
|
||||
/// Add new records to the table.
|
||||
async fn add(
|
||||
&self,
|
||||
@@ -1769,11 +1775,14 @@ impl NativeTable {
|
||||
query: &AnyQuery,
|
||||
options: QueryExecutionOptions,
|
||||
) -> Result<DatasetRecordBatchStream> {
|
||||
let plan = self.create_plan(query, options).await?;
|
||||
Ok(DatasetRecordBatchStream::new(execute_plan(
|
||||
plan,
|
||||
Default::default(),
|
||||
)?))
|
||||
let plan = self.create_plan(query, options.clone()).await?;
|
||||
let inner = execute_plan(plan, Default::default())?;
|
||||
let inner = if let Some(timeout) = options.timeout {
|
||||
TimeoutStream::new_boxed(inner, timeout)
|
||||
} else {
|
||||
inner
|
||||
};
|
||||
Ok(DatasetRecordBatchStream::new(inner))
|
||||
}
|
||||
|
||||
/// Check whether the table uses V2 manifest paths.
|
||||
@@ -2192,6 +2201,15 @@ impl BaseTable for NativeTable {
|
||||
self.generic_query(query, options).await
|
||||
}
|
||||
|
||||
async fn analyze_plan(
|
||||
&self,
|
||||
query: &AnyQuery,
|
||||
options: QueryExecutionOptions,
|
||||
) -> Result<String> {
|
||||
let plan = self.create_plan(query, options).await?;
|
||||
Ok(lance_analyze_plan(plan, Default::default()).await?)
|
||||
}
|
||||
|
||||
async fn merge_insert(
|
||||
&self,
|
||||
params: MergeInsertBuilder,
|
||||
@@ -2611,7 +2629,7 @@ mod tests {
|
||||
let dataset_path = tmp_dir.path().join("test.lance");
|
||||
let uri = dataset_path.to_str().unwrap();
|
||||
let conn = connect(uri)
|
||||
.read_consistency_interval(Duration::from_secs(0))
|
||||
.read_consistency_interval(Some(Duration::from_secs(0)))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -2694,7 +2712,7 @@ mod tests {
|
||||
let dataset_path = tmp_dir.path().join("test.lance");
|
||||
let uri = dataset_path.to_str().unwrap();
|
||||
let conn = connect(uri)
|
||||
.read_consistency_interval(Duration::from_secs(0))
|
||||
.read_consistency_interval(Some(Duration::from_secs(0)))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -2891,7 +2909,7 @@ mod tests {
|
||||
let dataset_path = tmp_dir.path().join("test.lance");
|
||||
let uri = dataset_path.to_str().unwrap();
|
||||
let conn = connect(uri)
|
||||
.read_consistency_interval(Duration::from_secs(0))
|
||||
.read_consistency_interval(Some(Duration::from_secs(0)))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -3462,7 +3480,8 @@ mod tests {
|
||||
|
||||
let mut conn2 = ConnectBuilder::new(uri);
|
||||
if let Some(interval) = interval {
|
||||
conn2 = conn2.read_consistency_interval(std::time::Duration::from_millis(interval));
|
||||
conn2 = conn2
|
||||
.read_consistency_interval(Some(std::time::Duration::from_millis(interval)));
|
||||
}
|
||||
let conn2 = conn2.execute().await.unwrap();
|
||||
let table2 = conn2.open_table("my_table").execute().await.unwrap();
|
||||
@@ -3498,7 +3517,7 @@ mod tests {
|
||||
let uri = tmp_dir.path().to_str().unwrap();
|
||||
|
||||
let conn = ConnectBuilder::new(uri)
|
||||
.read_consistency_interval(Duration::from_secs(0))
|
||||
.read_consistency_interval(Some(Duration::from_secs(0)))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -3519,7 +3538,7 @@ mod tests {
|
||||
let uri = tmp_dir.path().to_str().unwrap();
|
||||
|
||||
let conn = ConnectBuilder::new(uri)
|
||||
.read_consistency_interval(Duration::from_secs(0))
|
||||
.read_consistency_interval(Some(Duration::from_secs(0)))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -3594,7 +3613,7 @@ mod tests {
|
||||
let uri = tmp_dir.path().to_str().unwrap();
|
||||
|
||||
let conn = ConnectBuilder::new(uri)
|
||||
.read_consistency_interval(Duration::from_secs(0))
|
||||
.read_consistency_interval(Some(Duration::from_secs(0)))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -3656,7 +3675,7 @@ mod tests {
|
||||
let uri = tmp_dir.path().to_str().unwrap();
|
||||
|
||||
let conn = ConnectBuilder::new(uri)
|
||||
.read_consistency_interval(Duration::from_secs(0))
|
||||
.read_consistency_interval(Some(Duration::from_secs(0)))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
@@ -7,6 +7,7 @@ use std::{
|
||||
time::{self, Duration, Instant},
|
||||
};
|
||||
|
||||
use futures::FutureExt;
|
||||
use lance::Dataset;
|
||||
use tokio::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard};
|
||||
|
||||
@@ -22,13 +23,16 @@ pub struct DatasetConsistencyWrapper(Arc<RwLock<DatasetRef>>);
|
||||
///
|
||||
/// The dataset is lazily loaded, and starts off as None. On the first access,
|
||||
/// the dataset is loaded.
|
||||
#[derive(Debug, Clone)]
|
||||
#[derive(Debug)]
|
||||
enum DatasetRef {
|
||||
/// In this mode, the dataset is always the latest version.
|
||||
Latest {
|
||||
dataset: Dataset,
|
||||
read_consistency_interval: Option<Duration>,
|
||||
last_consistency_check: Option<time::Instant>,
|
||||
/// A background task loading the next version of the dataset. This happens
|
||||
/// in the background so as not to block the current thread.
|
||||
refresh_task: Option<tokio::task::JoinHandle<Result<Dataset>>>,
|
||||
},
|
||||
/// In this mode, the dataset is a specific version. It cannot be mutated.
|
||||
TimeTravel { dataset: Dataset, version: u64 },
|
||||
@@ -41,9 +45,18 @@ impl DatasetRef {
|
||||
Self::Latest {
|
||||
dataset,
|
||||
last_consistency_check,
|
||||
refresh_task,
|
||||
..
|
||||
} => {
|
||||
dataset.checkout_latest().await?;
|
||||
// Replace the refresh task
|
||||
if let Some(refresh_task) = refresh_task {
|
||||
refresh_task.abort();
|
||||
}
|
||||
let mut new_dataset = dataset.clone();
|
||||
refresh_task.replace(tokio::spawn(async move {
|
||||
new_dataset.checkout_latest().await?;
|
||||
Ok(new_dataset)
|
||||
}));
|
||||
last_consistency_check.replace(Instant::now());
|
||||
}
|
||||
Self::TimeTravel { dataset, version } => {
|
||||
@@ -57,26 +70,24 @@ impl DatasetRef {
|
||||
matches!(self, Self::Latest { .. })
|
||||
}
|
||||
|
||||
async fn need_reload(&self) -> Result<bool> {
|
||||
Ok(match self {
|
||||
Self::Latest { dataset, .. } => {
|
||||
dataset.latest_version_id().await? != dataset.version().version
|
||||
}
|
||||
Self::TimeTravel { dataset, version } => dataset.version().version != *version,
|
||||
})
|
||||
fn strong_consistency(&self) -> bool {
|
||||
matches!(
|
||||
self,
|
||||
Self::Latest { read_consistency_interval: Some(interval), .. }
|
||||
if interval.as_nanos() == 0
|
||||
)
|
||||
}
|
||||
|
||||
async fn as_latest(&mut self, read_consistency_interval: Option<Duration>) -> Result<()> {
|
||||
match self {
|
||||
Self::Latest { .. } => Ok(()),
|
||||
Self::TimeTravel { dataset, .. } => {
|
||||
dataset
|
||||
.checkout_version(dataset.latest_version_id().await?)
|
||||
.await?;
|
||||
dataset.checkout_latest().await?;
|
||||
*self = Self::Latest {
|
||||
dataset: dataset.clone(),
|
||||
read_consistency_interval,
|
||||
last_consistency_check: Some(Instant::now()),
|
||||
refresh_task: None,
|
||||
};
|
||||
Ok(())
|
||||
}
|
||||
@@ -114,13 +125,74 @@ impl DatasetRef {
|
||||
match self {
|
||||
Self::Latest {
|
||||
dataset: ref mut ds,
|
||||
refresh_task,
|
||||
last_consistency_check,
|
||||
..
|
||||
} => {
|
||||
*ds = dataset;
|
||||
if let Some(refresh_task) = refresh_task {
|
||||
refresh_task.abort();
|
||||
}
|
||||
*refresh_task = None;
|
||||
*last_consistency_check = Some(Instant::now());
|
||||
}
|
||||
_ => unreachable!("Dataset should be in latest mode at this point"),
|
||||
}
|
||||
}
|
||||
|
||||
/// Wait for the background refresh task to complete.
|
||||
async fn await_refresh(&mut self) -> Result<()> {
|
||||
if let Self::Latest {
|
||||
refresh_task: Some(refresh_task),
|
||||
read_consistency_interval,
|
||||
..
|
||||
} = self
|
||||
{
|
||||
let dataset = refresh_task.await.expect("Refresh task panicked")?;
|
||||
*self = Self::Latest {
|
||||
dataset,
|
||||
read_consistency_interval: *read_consistency_interval,
|
||||
last_consistency_check: Some(Instant::now()),
|
||||
refresh_task: None,
|
||||
};
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Check if background refresh task is done, and if so, update the dataset.
|
||||
fn check_refresh(&mut self) -> Result<()> {
|
||||
if let Self::Latest {
|
||||
refresh_task: Some(refresh_task),
|
||||
read_consistency_interval,
|
||||
..
|
||||
} = self
|
||||
{
|
||||
if refresh_task.is_finished() {
|
||||
let dataset = refresh_task
|
||||
.now_or_never()
|
||||
.unwrap()
|
||||
.expect("Refresh task panicked")?;
|
||||
*self = Self::Latest {
|
||||
dataset,
|
||||
read_consistency_interval: *read_consistency_interval,
|
||||
last_consistency_check: Some(Instant::now()),
|
||||
refresh_task: None,
|
||||
};
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn refresh_is_ready(&self) -> bool {
|
||||
matches!(
|
||||
self,
|
||||
Self::Latest {
|
||||
refresh_task: Some(refresh_task),
|
||||
..
|
||||
}
|
||||
if refresh_task.is_finished()
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl DatasetConsistencyWrapper {
|
||||
@@ -130,6 +202,7 @@ impl DatasetConsistencyWrapper {
|
||||
dataset,
|
||||
read_consistency_interval,
|
||||
last_consistency_check: Some(Instant::now()),
|
||||
refresh_task: None,
|
||||
})))
|
||||
}
|
||||
|
||||
@@ -188,18 +261,9 @@ impl DatasetConsistencyWrapper {
|
||||
}
|
||||
|
||||
pub async fn reload(&self) -> Result<()> {
|
||||
if !self.0.read().await.need_reload().await? {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut write_guard = self.0.write().await;
|
||||
// on lock escalation -- check if someone else has already reloaded
|
||||
if !write_guard.need_reload().await? {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// actually need reloading
|
||||
write_guard.reload().await
|
||||
write_guard.reload().await?;
|
||||
write_guard.await_refresh().await
|
||||
}
|
||||
|
||||
/// Returns the version, if in time travel mode, or None otherwise
|
||||
@@ -245,9 +309,26 @@ impl DatasetConsistencyWrapper {
|
||||
/// Ensures that the dataset is loaded and up-to-date with consistency and
|
||||
/// version parameters.
|
||||
async fn ensure_up_to_date(&self) -> Result<()> {
|
||||
// We may have previously created a background task to fetch the new
|
||||
// version of the dataset. If that task is done, we should update the
|
||||
// dataset.
|
||||
{
|
||||
let read_guard = self.0.read().await;
|
||||
if read_guard.refresh_is_ready() {
|
||||
drop(read_guard);
|
||||
self.0.write().await.check_refresh()?;
|
||||
}
|
||||
}
|
||||
|
||||
if !self.is_up_to_date().await? {
|
||||
self.reload().await?;
|
||||
}
|
||||
|
||||
// If we are in strong consistency mode, we should await the refresh task.
|
||||
if self.0.read().await.strong_consistency() {
|
||||
self.0.write().await.await_refresh().await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -290,3 +371,48 @@ impl DerefMut for DatasetWriteGuard<'_> {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use arrow_schema::{DataType, Field, Schema};
|
||||
use lance::{dataset::WriteParams, io::ObjectStoreParams};
|
||||
|
||||
use super::*;
|
||||
|
||||
use crate::{connect, io::object_store::io_tracking::IoStatsHolder, table::WriteOptions};
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_iops_open_strong_consistency() {
|
||||
let db = connect("memory://")
|
||||
.read_consistency_interval(Some(Duration::ZERO))
|
||||
.execute()
|
||||
.await
|
||||
.expect("Failed to connect to database");
|
||||
let io_stats = IoStatsHolder::default();
|
||||
|
||||
let schema = Arc::new(Schema::new(vec![Field::new("id", DataType::Int32, false)]));
|
||||
|
||||
let table = db
|
||||
.create_empty_table("test", schema)
|
||||
.write_options(WriteOptions {
|
||||
lance_write_params: Some(WriteParams {
|
||||
store_params: Some(ObjectStoreParams {
|
||||
object_store_wrapper: Some(Arc::new(io_stats.clone())),
|
||||
..Default::default()
|
||||
}),
|
||||
..Default::default()
|
||||
}),
|
||||
})
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
io_stats.incremental_stats();
|
||||
|
||||
// We should only need 1 read IOP to check the schema: looking for the
|
||||
// latest version.
|
||||
table.schema().await.unwrap();
|
||||
let stats = io_stats.incremental_stats();
|
||||
assert_eq!(stats.read_iops, 1);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,14 +3,20 @@
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow_schema::{DataType, Schema};
|
||||
use arrow_array::RecordBatch;
|
||||
use arrow_schema::{DataType, Schema, SchemaRef};
|
||||
use datafusion_common::{DataFusionError, Result as DataFusionResult};
|
||||
use datafusion_execution::RecordBatchStream;
|
||||
use futures::{FutureExt, Stream};
|
||||
use lance::arrow::json::JsonDataType;
|
||||
use lance::dataset::{ReadParams, WriteParams};
|
||||
use lance::index::vector::utils::infer_vector_dim;
|
||||
use lance::io::{ObjectStoreParams, WrappingObjectStore};
|
||||
use lazy_static::lazy_static;
|
||||
use std::pin::Pin;
|
||||
|
||||
use crate::error::{Error, Result};
|
||||
use datafusion_physical_plan::SendableRecordBatchStream;
|
||||
|
||||
lazy_static! {
|
||||
static ref TABLE_NAME_REGEX: regex::Regex = regex::Regex::new(r"^[a-zA-Z0-9_\-\.]+$").unwrap();
|
||||
@@ -135,6 +141,7 @@ pub fn supported_btree_data_type(dtype: &DataType) -> bool {
|
||||
| DataType::Date32
|
||||
| DataType::Date64
|
||||
| DataType::Timestamp(_, _)
|
||||
| DataType::FixedSizeBinary(_)
|
||||
)
|
||||
}
|
||||
|
||||
@@ -151,7 +158,17 @@ pub fn supported_label_list_data_type(dtype: &DataType) -> bool {
|
||||
}
|
||||
|
||||
pub fn supported_fts_data_type(dtype: &DataType) -> bool {
|
||||
matches!(dtype, DataType::Utf8 | DataType::LargeUtf8)
|
||||
supported_fts_data_type_impl(dtype, false)
|
||||
}
|
||||
|
||||
fn supported_fts_data_type_impl(dtype: &DataType, in_list: bool) -> bool {
|
||||
match (dtype, in_list) {
|
||||
(DataType::Utf8 | DataType::LargeUtf8, _) => true,
|
||||
(DataType::List(field) | DataType::LargeList(field), false) => {
|
||||
supported_fts_data_type_impl(field.data_type(), true)
|
||||
}
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn supported_vector_data_type(dtype: &DataType) -> bool {
|
||||
@@ -177,11 +194,97 @@ pub fn string_to_datatype(s: &str) -> Option<DataType> {
|
||||
(&json_type).try_into().ok()
|
||||
}
|
||||
|
||||
enum TimeoutState {
|
||||
NotStarted {
|
||||
timeout: std::time::Duration,
|
||||
},
|
||||
Started {
|
||||
deadline: Pin<Box<tokio::time::Sleep>>,
|
||||
timeout: std::time::Duration,
|
||||
},
|
||||
Completed,
|
||||
}
|
||||
|
||||
/// A `Stream` wrapper that implements a timeout.
|
||||
///
|
||||
/// The timeout starts when the first `poll_next` is called. As soon as the timeout
|
||||
/// duration has passed, the stream will return an `Err` indicating a timeout error
|
||||
/// for the next poll.
|
||||
pub struct TimeoutStream {
|
||||
inner: SendableRecordBatchStream,
|
||||
state: TimeoutState,
|
||||
}
|
||||
|
||||
impl TimeoutStream {
|
||||
pub fn new(inner: SendableRecordBatchStream, timeout: std::time::Duration) -> Self {
|
||||
Self {
|
||||
inner,
|
||||
state: TimeoutState::NotStarted { timeout },
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_boxed(
|
||||
inner: SendableRecordBatchStream,
|
||||
timeout: std::time::Duration,
|
||||
) -> SendableRecordBatchStream {
|
||||
Box::pin(Self::new(inner, timeout))
|
||||
}
|
||||
|
||||
fn timeout_error(timeout: &std::time::Duration) -> DataFusionError {
|
||||
DataFusionError::Execution(format!("Query timeout after {} ms", timeout.as_millis()))
|
||||
}
|
||||
}
|
||||
|
||||
impl RecordBatchStream for TimeoutStream {
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.inner.schema()
|
||||
}
|
||||
}
|
||||
|
||||
impl Stream for TimeoutStream {
|
||||
type Item = DataFusionResult<RecordBatch>;
|
||||
|
||||
fn poll_next(
|
||||
mut self: std::pin::Pin<&mut Self>,
|
||||
cx: &mut std::task::Context<'_>,
|
||||
) -> std::task::Poll<Option<Self::Item>> {
|
||||
match &mut self.state {
|
||||
TimeoutState::NotStarted { timeout } => {
|
||||
if timeout.is_zero() {
|
||||
return std::task::Poll::Ready(Some(Err(Self::timeout_error(timeout))));
|
||||
}
|
||||
let deadline = Box::pin(tokio::time::sleep(*timeout));
|
||||
self.state = TimeoutState::Started {
|
||||
deadline,
|
||||
timeout: *timeout,
|
||||
};
|
||||
self.poll_next(cx)
|
||||
}
|
||||
TimeoutState::Started { deadline, timeout } => match deadline.poll_unpin(cx) {
|
||||
std::task::Poll::Ready(_) => {
|
||||
let err = Self::timeout_error(timeout);
|
||||
self.state = TimeoutState::Completed;
|
||||
std::task::Poll::Ready(Some(Err(err)))
|
||||
}
|
||||
std::task::Poll::Pending => {
|
||||
let inner = Pin::new(&mut self.inner);
|
||||
inner.poll_next(cx)
|
||||
}
|
||||
},
|
||||
TimeoutState::Completed => std::task::Poll::Ready(None),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use arrow_array::Int32Array;
|
||||
use arrow_schema::Field;
|
||||
use datafusion_physical_plan::stream::RecordBatchStreamAdapter;
|
||||
use futures::{stream, StreamExt};
|
||||
use tokio::time::sleep;
|
||||
|
||||
use arrow_schema::{DataType, Field};
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_guess_default_column() {
|
||||
@@ -248,4 +351,85 @@ mod tests {
|
||||
let expected = DataType::Int32;
|
||||
assert_eq!(string_to_datatype(string), Some(expected));
|
||||
}
|
||||
|
||||
fn sample_batch() -> RecordBatch {
|
||||
let schema = Arc::new(Schema::new(vec![Field::new(
|
||||
"col1",
|
||||
DataType::Int32,
|
||||
false,
|
||||
)]));
|
||||
RecordBatch::try_new(
|
||||
schema.clone(),
|
||||
vec![Arc::new(Int32Array::from(vec![1, 2, 3]))],
|
||||
)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_timeout_stream() {
|
||||
let batch = sample_batch();
|
||||
let schema = batch.schema();
|
||||
let mock_stream = stream::iter(vec![Ok(batch.clone()), Ok(batch.clone())]);
|
||||
|
||||
let sendable_stream: SendableRecordBatchStream =
|
||||
Box::pin(RecordBatchStreamAdapter::new(schema.clone(), mock_stream));
|
||||
let timeout_duration = std::time::Duration::from_millis(10);
|
||||
let mut timeout_stream = TimeoutStream::new(sendable_stream, timeout_duration);
|
||||
|
||||
// Poll the stream to get the first batch
|
||||
let first_result = timeout_stream.next().await;
|
||||
assert!(first_result.is_some());
|
||||
assert!(first_result.unwrap().is_ok());
|
||||
|
||||
// Sleep for the timeout duration
|
||||
sleep(timeout_duration).await;
|
||||
|
||||
// Poll the stream again and ensure it returns a timeout error
|
||||
let second_result = timeout_stream.next().await.unwrap();
|
||||
assert!(second_result.is_err());
|
||||
assert!(second_result
|
||||
.unwrap_err()
|
||||
.to_string()
|
||||
.contains("Query timeout"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_timeout_stream_zero_duration() {
|
||||
let batch = sample_batch();
|
||||
let schema = batch.schema();
|
||||
let mock_stream = stream::iter(vec![Ok(batch.clone()), Ok(batch.clone())]);
|
||||
|
||||
let sendable_stream: SendableRecordBatchStream =
|
||||
Box::pin(RecordBatchStreamAdapter::new(schema.clone(), mock_stream));
|
||||
|
||||
// Setup similar to test_timeout_stream
|
||||
let timeout_duration = std::time::Duration::from_secs(0);
|
||||
let mut timeout_stream = TimeoutStream::new(sendable_stream, timeout_duration);
|
||||
|
||||
// First poll should immediately return a timeout error
|
||||
let result = timeout_stream.next().await.unwrap();
|
||||
assert!(result.is_err());
|
||||
assert!(result.unwrap_err().to_string().contains("Query timeout"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_timeout_stream_completes_normally() {
|
||||
let batch = sample_batch();
|
||||
let schema = batch.schema();
|
||||
let mock_stream = stream::iter(vec![Ok(batch.clone()), Ok(batch.clone())]);
|
||||
|
||||
let sendable_stream: SendableRecordBatchStream =
|
||||
Box::pin(RecordBatchStreamAdapter::new(schema.clone(), mock_stream));
|
||||
|
||||
// Setup a stream with 2 batches
|
||||
// Use a longer timeout that won't trigger
|
||||
let timeout_duration = std::time::Duration::from_secs(1);
|
||||
let mut timeout_stream = TimeoutStream::new(sendable_stream, timeout_duration);
|
||||
|
||||
// Both polls should return data normally
|
||||
assert!(timeout_stream.next().await.unwrap().is_ok());
|
||||
assert!(timeout_stream.next().await.unwrap().is_ok());
|
||||
// Stream should be empty now
|
||||
assert!(timeout_stream.next().await.is_none());
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user