mirror of
https://github.com/lancedb/lancedb.git
synced 2025-12-24 05:49:57 +00:00
Compare commits
90 Commits
python-v0.
...
v0.22.2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
01091cc056 | ||
|
|
b7afed9beb | ||
|
|
5cbbaa2e4a | ||
|
|
1b6bd2498e | ||
|
|
285da9db1d | ||
|
|
ad8306c96b | ||
|
|
3594538509 | ||
|
|
917aabd077 | ||
|
|
5ec12c9971 | ||
|
|
d0ce489b21 | ||
|
|
d7e02c8181 | ||
|
|
70958f6366 | ||
|
|
1ac745eb18 | ||
|
|
1357fe8aa1 | ||
|
|
0d78929893 | ||
|
|
9e2a68541e | ||
|
|
1aa0fd16e7 | ||
|
|
fec2a05629 | ||
|
|
79a1cd60ee | ||
|
|
88807a59a4 | ||
|
|
e0e7e01ea8 | ||
|
|
a416ebc11d | ||
|
|
f941054baf | ||
|
|
1a81c46505 | ||
|
|
82b25a71e9 | ||
|
|
13c613d45f | ||
|
|
e07389a36c | ||
|
|
e7e9e80b1d | ||
|
|
247fb58400 | ||
|
|
504bdc471c | ||
|
|
d617cdef4a | ||
|
|
356d7046fd | ||
|
|
48e5caabda | ||
|
|
d6cc68f671 | ||
|
|
55eacfa685 | ||
|
|
222e3264ab | ||
|
|
13505026cb | ||
|
|
b0800b4b71 | ||
|
|
1befebf614 | ||
|
|
1ab60fae7f | ||
|
|
e921c90c1b | ||
|
|
05a4ea646a | ||
|
|
ebbeeff4e0 | ||
|
|
407ca53f92 | ||
|
|
ff71d7e552 | ||
|
|
2261eb95a0 | ||
|
|
5b397e410b | ||
|
|
b5a39bffec | ||
|
|
5e1e9add07 | ||
|
|
97e9938dfe | ||
|
|
1d4b92e01e | ||
|
|
4c9fc3044b | ||
|
|
0ebc8d45a8 | ||
|
|
f7d78c3420 | ||
|
|
6ea6884260 | ||
|
|
b1d791a299 | ||
|
|
8da74dcb37 | ||
|
|
3c7419b392 | ||
|
|
e612686fdb | ||
|
|
e77d57a5b6 | ||
|
|
9391ad1450 | ||
|
|
79960b254e | ||
|
|
d19c64e29b | ||
|
|
06d5612443 | ||
|
|
45f96f4151 | ||
|
|
f744b785f8 | ||
|
|
2e3f745820 | ||
|
|
683aaed716 | ||
|
|
48f7b20daa | ||
|
|
4dd399ca29 | ||
|
|
e6f1da31dc | ||
|
|
a9ea785b15 | ||
|
|
cc38453391 | ||
|
|
47747287b6 | ||
|
|
0847e666a0 | ||
|
|
981f8427e6 | ||
|
|
f6846004ca | ||
|
|
faf8973624 | ||
|
|
fabe37274f | ||
|
|
6839ac3509 | ||
|
|
b88422e515 | ||
|
|
8d60685ede | ||
|
|
04285a4a4e | ||
|
|
d4a41b5663 | ||
|
|
adc3daa462 | ||
|
|
acbfa6c012 | ||
|
|
d602e9f98c | ||
|
|
ad09234d59 | ||
|
|
0c34ffb252 | ||
|
|
d9f333d828 |
@@ -1,5 +1,5 @@
|
|||||||
[tool.bumpversion]
|
[tool.bumpversion]
|
||||||
current_version = "0.21.2"
|
current_version = "0.22.2"
|
||||||
parse = """(?x)
|
parse = """(?x)
|
||||||
(?P<major>0|[1-9]\\d*)\\.
|
(?P<major>0|[1-9]\\d*)\\.
|
||||||
(?P<minor>0|[1-9]\\d*)\\.
|
(?P<minor>0|[1-9]\\d*)\\.
|
||||||
|
|||||||
45
.github/actions/create-failure-issue/action.yml
vendored
Normal file
45
.github/actions/create-failure-issue/action.yml
vendored
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
name: Create Failure Issue
|
||||||
|
description: Creates a GitHub issue if any jobs in the workflow failed
|
||||||
|
|
||||||
|
inputs:
|
||||||
|
job-results:
|
||||||
|
description: 'JSON string of job results from needs context'
|
||||||
|
required: true
|
||||||
|
workflow-name:
|
||||||
|
description: 'Name of the workflow'
|
||||||
|
required: true
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: composite
|
||||||
|
steps:
|
||||||
|
- name: Check for failures and create issue
|
||||||
|
shell: bash
|
||||||
|
env:
|
||||||
|
JOB_RESULTS: ${{ inputs.job-results }}
|
||||||
|
WORKFLOW_NAME: ${{ inputs.workflow-name }}
|
||||||
|
RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
|
||||||
|
GH_TOKEN: ${{ github.token }}
|
||||||
|
run: |
|
||||||
|
# Check if any job failed
|
||||||
|
if echo "$JOB_RESULTS" | jq -e 'to_entries | any(.value.result == "failure")' > /dev/null; then
|
||||||
|
echo "Detected job failures, creating issue..."
|
||||||
|
|
||||||
|
# Extract failed job names
|
||||||
|
FAILED_JOBS=$(echo "$JOB_RESULTS" | jq -r 'to_entries | map(select(.value.result == "failure")) | map(.key) | join(", ")')
|
||||||
|
|
||||||
|
# Create issue with workflow name, failed jobs, and run URL
|
||||||
|
gh issue create \
|
||||||
|
--title "$WORKFLOW_NAME Failed ($FAILED_JOBS)" \
|
||||||
|
--body "The workflow **$WORKFLOW_NAME** failed during execution.
|
||||||
|
|
||||||
|
**Failed jobs:** $FAILED_JOBS
|
||||||
|
|
||||||
|
**Run URL:** $RUN_URL
|
||||||
|
|
||||||
|
Please investigate the failed jobs and address any issues." \
|
||||||
|
--label "ci"
|
||||||
|
|
||||||
|
echo "Issue created successfully"
|
||||||
|
else
|
||||||
|
echo "No job failures detected, skipping issue creation"
|
||||||
|
fi
|
||||||
14
.github/workflows/cargo-publish.yml
vendored
14
.github/workflows/cargo-publish.yml
vendored
@@ -38,3 +38,17 @@ jobs:
|
|||||||
- name: Publish the package
|
- name: Publish the package
|
||||||
run: |
|
run: |
|
||||||
cargo publish -p lancedb --all-features --token ${{ steps.auth.outputs.token }}
|
cargo publish -p lancedb --all-features --token ${{ steps.auth.outputs.token }}
|
||||||
|
report-failure:
|
||||||
|
name: Report Workflow Failure
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs: [build]
|
||||||
|
if: always() && (github.event_name == 'release' || github.event_name == 'workflow_dispatch')
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
issues: write
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- uses: ./.github/actions/create-failure-issue
|
||||||
|
with:
|
||||||
|
job-results: ${{ toJSON(needs) }}
|
||||||
|
workflow-name: ${{ github.workflow }}
|
||||||
|
|||||||
3
.github/workflows/docs.yml
vendored
3
.github/workflows/docs.yml
vendored
@@ -56,8 +56,9 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
node-version: 20
|
node-version: 20
|
||||||
cache: 'npm'
|
cache: 'npm'
|
||||||
|
cache-dependency-path: docs/package-lock.json
|
||||||
- name: Install node dependencies
|
- name: Install node dependencies
|
||||||
working-directory: node
|
working-directory: nodejs
|
||||||
run: |
|
run: |
|
||||||
sudo apt update
|
sudo apt update
|
||||||
sudo apt install -y protobuf-compiler libssl-dev
|
sudo apt install -y protobuf-compiler libssl-dev
|
||||||
|
|||||||
3
.github/workflows/docs_test.yml
vendored
3
.github/workflows/docs_test.yml
vendored
@@ -24,7 +24,8 @@ env:
|
|||||||
jobs:
|
jobs:
|
||||||
test-python:
|
test-python:
|
||||||
name: Test doc python code
|
name: Test doc python code
|
||||||
runs-on: ubuntu-24.04
|
runs-on: warp-ubuntu-2204-x64-8x
|
||||||
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
|
|||||||
15
.github/workflows/java-publish.yml
vendored
15
.github/workflows/java-publish.yml
vendored
@@ -43,7 +43,6 @@ jobs:
|
|||||||
- uses: Swatinem/rust-cache@v2
|
- uses: Swatinem/rust-cache@v2
|
||||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||||
with:
|
with:
|
||||||
toolchain: "1.81.0"
|
|
||||||
cache-workspaces: "./java/core/lancedb-jni"
|
cache-workspaces: "./java/core/lancedb-jni"
|
||||||
# Disable full debug symbol generation to speed up CI build and keep memory down
|
# Disable full debug symbol generation to speed up CI build and keep memory down
|
||||||
# "1" means line tables only, which is useful for panic tracebacks.
|
# "1" means line tables only, which is useful for panic tracebacks.
|
||||||
@@ -112,3 +111,17 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
SONATYPE_USER: ${{ secrets.SONATYPE_USER }}
|
SONATYPE_USER: ${{ secrets.SONATYPE_USER }}
|
||||||
SONATYPE_TOKEN: ${{ secrets.SONATYPE_TOKEN }}
|
SONATYPE_TOKEN: ${{ secrets.SONATYPE_TOKEN }}
|
||||||
|
report-failure:
|
||||||
|
name: Report Workflow Failure
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs: [linux-arm64, linux-x86, macos-arm64]
|
||||||
|
if: always() && (github.event_name == 'release' || github.event_name == 'workflow_dispatch')
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
issues: write
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- uses: ./.github/actions/create-failure-issue
|
||||||
|
with:
|
||||||
|
job-results: ${{ toJSON(needs) }}
|
||||||
|
workflow-name: ${{ github.workflow }}
|
||||||
|
|||||||
3
.github/workflows/nodejs.yml
vendored
3
.github/workflows/nodejs.yml
vendored
@@ -6,6 +6,7 @@ on:
|
|||||||
- main
|
- main
|
||||||
pull_request:
|
pull_request:
|
||||||
paths:
|
paths:
|
||||||
|
- Cargo.toml
|
||||||
- nodejs/**
|
- nodejs/**
|
||||||
- .github/workflows/nodejs.yml
|
- .github/workflows/nodejs.yml
|
||||||
- docker-compose.yml
|
- docker-compose.yml
|
||||||
@@ -116,7 +117,7 @@ jobs:
|
|||||||
set -e
|
set -e
|
||||||
npm ci
|
npm ci
|
||||||
npm run docs
|
npm run docs
|
||||||
if ! git diff --exit-code -- . ':(exclude)Cargo.lock'; then
|
if ! git diff --exit-code -- ../ ':(exclude)Cargo.lock'; then
|
||||||
echo "Docs need to be updated"
|
echo "Docs need to be updated"
|
||||||
echo "Run 'npm run docs', fix any warnings, and commit the changes."
|
echo "Run 'npm run docs', fix any warnings, and commit the changes."
|
||||||
exit 1
|
exit 1
|
||||||
|
|||||||
14
.github/workflows/npm-publish.yml
vendored
14
.github/workflows/npm-publish.yml
vendored
@@ -365,3 +365,17 @@ jobs:
|
|||||||
ARGS="$ARGS --tag preview"
|
ARGS="$ARGS --tag preview"
|
||||||
fi
|
fi
|
||||||
npm publish $ARGS
|
npm publish $ARGS
|
||||||
|
report-failure:
|
||||||
|
name: Report Workflow Failure
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs: [build-lancedb, test-lancedb, publish]
|
||||||
|
if: always() && (github.event_name == 'release' || github.event_name == 'workflow_dispatch')
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
issues: write
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- uses: ./.github/actions/create-failure-issue
|
||||||
|
with:
|
||||||
|
job-results: ${{ toJSON(needs) }}
|
||||||
|
workflow-name: ${{ github.workflow }}
|
||||||
|
|||||||
18
.github/workflows/pypi-publish.yml
vendored
18
.github/workflows/pypi-publish.yml
vendored
@@ -56,7 +56,7 @@ jobs:
|
|||||||
pypi_token: ${{ secrets.LANCEDB_PYPI_API_TOKEN }}
|
pypi_token: ${{ secrets.LANCEDB_PYPI_API_TOKEN }}
|
||||||
fury_token: ${{ secrets.FURY_TOKEN }}
|
fury_token: ${{ secrets.FURY_TOKEN }}
|
||||||
mac:
|
mac:
|
||||||
timeout-minutes: 60
|
timeout-minutes: 90
|
||||||
runs-on: ${{ matrix.config.runner }}
|
runs-on: ${{ matrix.config.runner }}
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
@@ -64,7 +64,7 @@ jobs:
|
|||||||
- target: x86_64-apple-darwin
|
- target: x86_64-apple-darwin
|
||||||
runner: macos-13
|
runner: macos-13
|
||||||
- target: aarch64-apple-darwin
|
- target: aarch64-apple-darwin
|
||||||
runner: macos-14
|
runner: warp-macos-14-arm64-6x
|
||||||
env:
|
env:
|
||||||
MACOSX_DEPLOYMENT_TARGET: 10.15
|
MACOSX_DEPLOYMENT_TARGET: 10.15
|
||||||
steps:
|
steps:
|
||||||
@@ -173,3 +173,17 @@ jobs:
|
|||||||
generate_release_notes: false
|
generate_release_notes: false
|
||||||
name: Python LanceDB v${{ steps.extract_version.outputs.version }}
|
name: Python LanceDB v${{ steps.extract_version.outputs.version }}
|
||||||
body: ${{ steps.python_release_notes.outputs.changelog }}
|
body: ${{ steps.python_release_notes.outputs.changelog }}
|
||||||
|
report-failure:
|
||||||
|
name: Report Workflow Failure
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs: [linux, mac, windows]
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
issues: write
|
||||||
|
if: always() && (github.event_name == 'release' || github.event_name == 'workflow_dispatch')
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- uses: ./.github/actions/create-failure-issue
|
||||||
|
with:
|
||||||
|
job-results: ${{ toJSON(needs) }}
|
||||||
|
workflow-name: ${{ github.workflow }}
|
||||||
|
|||||||
1
.github/workflows/python.yml
vendored
1
.github/workflows/python.yml
vendored
@@ -6,6 +6,7 @@ on:
|
|||||||
- main
|
- main
|
||||||
pull_request:
|
pull_request:
|
||||||
paths:
|
paths:
|
||||||
|
- Cargo.toml
|
||||||
- python/**
|
- python/**
|
||||||
- .github/workflows/python.yml
|
- .github/workflows/python.yml
|
||||||
|
|
||||||
|
|||||||
13
.github/workflows/rust.yml
vendored
13
.github/workflows/rust.yml
vendored
@@ -96,6 +96,7 @@ jobs:
|
|||||||
# Need up-to-date compilers for kernels
|
# Need up-to-date compilers for kernels
|
||||||
CC: clang-18
|
CC: clang-18
|
||||||
CXX: clang++-18
|
CXX: clang++-18
|
||||||
|
GH_TOKEN: ${{ secrets.SOPHON_READ_TOKEN }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
@@ -117,15 +118,17 @@ jobs:
|
|||||||
sudo chmod 600 /swapfile
|
sudo chmod 600 /swapfile
|
||||||
sudo mkswap /swapfile
|
sudo mkswap /swapfile
|
||||||
sudo swapon /swapfile
|
sudo swapon /swapfile
|
||||||
- name: Start S3 integration test environment
|
|
||||||
working-directory: .
|
|
||||||
run: docker compose up --detach --wait
|
|
||||||
- name: Build
|
- name: Build
|
||||||
run: cargo build --all-features --tests --locked --examples
|
run: cargo build --all-features --tests --locked --examples
|
||||||
- name: Run tests
|
- name: Run feature tests
|
||||||
run: cargo test --all-features --locked
|
run: make -C ./lancedb feature-tests
|
||||||
- name: Run examples
|
- name: Run examples
|
||||||
run: cargo run --example simple --locked
|
run: cargo run --example simple --locked
|
||||||
|
- name: Run remote tests
|
||||||
|
# Running this requires access to secrets, so skip if this is
|
||||||
|
# a PR from a fork.
|
||||||
|
if: github.event_name != 'pull_request' || !github.event.pull_request.head.repo.fork
|
||||||
|
run: make -C ./lancedb remote-tests
|
||||||
|
|
||||||
macos:
|
macos:
|
||||||
timeout-minutes: 30
|
timeout-minutes: 30
|
||||||
|
|||||||
26
.github/workflows/trigger-vectordb-recipes.yml
vendored
26
.github/workflows/trigger-vectordb-recipes.yml
vendored
@@ -1,26 +0,0 @@
|
|||||||
name: Trigger vectordb-recipers workflow
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches: [ main ]
|
|
||||||
pull_request:
|
|
||||||
paths:
|
|
||||||
- .github/workflows/trigger-vectordb-recipes.yml
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Trigger vectordb-recipes workflow
|
|
||||||
uses: actions/github-script@v6
|
|
||||||
with:
|
|
||||||
github-token: ${{ secrets.VECTORDB_RECIPES_ACTION_TOKEN }}
|
|
||||||
script: |
|
|
||||||
const result = await github.rest.actions.createWorkflowDispatch({
|
|
||||||
owner: 'lancedb',
|
|
||||||
repo: 'vectordb-recipes',
|
|
||||||
workflow_id: 'examples-test.yml',
|
|
||||||
ref: 'main'
|
|
||||||
});
|
|
||||||
console.log(result);
|
|
||||||
66
CLAUDE.md
66
CLAUDE.md
@@ -13,10 +13,68 @@ Project layout:
|
|||||||
|
|
||||||
Common commands:
|
Common commands:
|
||||||
|
|
||||||
* Check for compiler errors: `cargo check --features remote --tests --examples`
|
* Check for compiler errors: `cargo check --quiet --features remote --tests --examples`
|
||||||
* Run tests: `cargo test --features remote --tests`
|
* Run tests: `cargo test --quiet --features remote --tests`
|
||||||
* Run specific test: `cargo test --features remote -p <package_name> --test <test_name>`
|
* Run specific test: `cargo test --quiet --features remote -p <package_name> --test <test_name>`
|
||||||
* Lint: `cargo clippy --features remote --tests --examples`
|
* Lint: `cargo clippy --quiet --features remote --tests --examples`
|
||||||
* Format: `cargo fmt --all`
|
* Format: `cargo fmt --all`
|
||||||
|
|
||||||
Before committing changes, run formatting.
|
Before committing changes, run formatting.
|
||||||
|
|
||||||
|
## Coding tips
|
||||||
|
|
||||||
|
* When writing Rust doctests for things that require a connection or table reference,
|
||||||
|
write them as a function instead of a fully executable test. This allows type checking
|
||||||
|
to run but avoids needing a full test environment. For example:
|
||||||
|
```rust
|
||||||
|
/// ```
|
||||||
|
/// use lance_index::scalar::FullTextSearchQuery;
|
||||||
|
/// use lancedb::query::{QueryBase, ExecutableQuery};
|
||||||
|
///
|
||||||
|
/// # use lancedb::Table;
|
||||||
|
/// # async fn query(table: &Table) -> Result<(), Box<dyn std::error::Error>> {
|
||||||
|
/// let results = table.query()
|
||||||
|
/// .full_text_search(FullTextSearchQuery::new("hello world".into()))
|
||||||
|
/// .execute()
|
||||||
|
/// .await?;
|
||||||
|
/// # Ok(())
|
||||||
|
/// # }
|
||||||
|
/// ```
|
||||||
|
```
|
||||||
|
|
||||||
|
## Example plan: adding a new method on Table
|
||||||
|
|
||||||
|
Adding a new method involves first adding it to the Rust core, then exposing it
|
||||||
|
in the Python and TypeScript bindings. There are both local and remote tables.
|
||||||
|
Remote tables are implemented via a HTTP API and require the `remote` cargo
|
||||||
|
feature flag to be enabled. Python has both sync and async methods.
|
||||||
|
|
||||||
|
Rust core changes:
|
||||||
|
|
||||||
|
1. Add method on `Table` struct in `rust/lancedb/src/table.rs` (calls `BaseTable` trait).
|
||||||
|
2. Add method to `BaseTable` trait in `rust/lancedb/src/table.rs`.
|
||||||
|
3. Implement new trait method on `NativeTable` in `rust/lancedb/src/table.rs`.
|
||||||
|
* Test with unit test in `rust/lancedb/src/table.rs`.
|
||||||
|
4. Implement new trait method on `RemoteTable` in `rust/lancedb/src/remote/table.rs`.
|
||||||
|
* Test with unit test in `rust/lancedb/src/remote/table.rs` against mocked endpoint.
|
||||||
|
|
||||||
|
Python bindings changes:
|
||||||
|
|
||||||
|
1. Add PyO3 method binding in `python/src/table.rs`. Run `make develop` to compile bindings.
|
||||||
|
2. Add types for PyO3 method in `python/python/lancedb/_lancedb.pyi`.
|
||||||
|
3. Add method to `AsyncTable` class in `python/python/lancedb/table.py`.
|
||||||
|
4. Add abstract method to `Table` abstract base class in `python/python/lancedb/table.py`.
|
||||||
|
5. Add concrete sync method to `LanceTable` class in `python/python/lancedb/table.py`.
|
||||||
|
* Should use `LOOP.run()` to call the corresponding `AsyncTable` method.
|
||||||
|
6. Add concrete sync method to `RemoteTable` class in `python/python/lancedb/remote/table.py`.
|
||||||
|
7. Add unit test in `python/tests/test_table.py`.
|
||||||
|
|
||||||
|
TypeScript bindings changes:
|
||||||
|
|
||||||
|
1. Add napi-rs method binding on `Table` in `nodejs/src/table.rs`.
|
||||||
|
2. Run `npm run build` to generate TypeScript definitions.
|
||||||
|
3. Add typescript method on abstract class `Table` in `nodejs/src/table.ts`.
|
||||||
|
4. Add concrete method on `LocalTable` class in `nodejs/src/native_table.ts`.
|
||||||
|
* Note: despite the name, this class is also used for remote tables.
|
||||||
|
5. Add test in `nodejs/__test__/table.test.ts`.
|
||||||
|
6. Run `npm run docs` to generate TypeScript documentation.
|
||||||
|
|||||||
2500
Cargo.lock
generated
2500
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
59
Cargo.toml
59
Cargo.toml
@@ -15,30 +15,30 @@ categories = ["database-implementations"]
|
|||||||
rust-version = "1.78.0"
|
rust-version = "1.78.0"
|
||||||
|
|
||||||
[workspace.dependencies]
|
[workspace.dependencies]
|
||||||
lance = { "version" = "=0.33.0", "features" = ["dynamodb"] }
|
lance = { "version" = "=0.38.2", default-features = false, "features" = ["dynamodb"] }
|
||||||
lance-io = "=0.33.0"
|
lance-io = { "version" = "=0.38.2", default-features = false }
|
||||||
lance-index = "=0.33.0"
|
lance-index = "=0.38.2"
|
||||||
lance-linalg = "=0.33.0"
|
lance-linalg = "=0.38.2"
|
||||||
lance-table = "=0.33.0"
|
lance-table = "=0.38.2"
|
||||||
lance-testing = "=0.33.0"
|
lance-testing = "=0.38.2"
|
||||||
lance-datafusion = "=0.33.0"
|
lance-datafusion = "=0.38.2"
|
||||||
lance-encoding = "=0.33.0"
|
lance-encoding = "=0.38.2"
|
||||||
|
lance-namespace = "0.0.18"
|
||||||
# Note that this one does not include pyarrow
|
# Note that this one does not include pyarrow
|
||||||
arrow = { version = "55.1", optional = false }
|
arrow = { version = "56.2", optional = false }
|
||||||
arrow-array = "55.1"
|
arrow-array = "56.2"
|
||||||
arrow-data = "55.1"
|
arrow-data = "56.2"
|
||||||
arrow-ipc = "55.1"
|
arrow-ipc = "56.2"
|
||||||
arrow-ord = "55.1"
|
arrow-ord = "56.2"
|
||||||
arrow-schema = "55.1"
|
arrow-schema = "56.2"
|
||||||
arrow-arith = "55.1"
|
arrow-cast = "56.2"
|
||||||
arrow-cast = "55.1"
|
|
||||||
async-trait = "0"
|
async-trait = "0"
|
||||||
datafusion = { version = "48.0", default-features = false }
|
datafusion = { version = "50.1", default-features = false }
|
||||||
datafusion-catalog = "48.0"
|
datafusion-catalog = "50.1"
|
||||||
datafusion-common = { version = "48.0", default-features = false }
|
datafusion-common = { version = "50.1", default-features = false }
|
||||||
datafusion-execution = "48.0"
|
datafusion-execution = "50.1"
|
||||||
datafusion-expr = "48.0"
|
datafusion-expr = "50.1"
|
||||||
datafusion-physical-plan = "48.0"
|
datafusion-physical-plan = "50.1"
|
||||||
env_logger = "0.11"
|
env_logger = "0.11"
|
||||||
half = { "version" = "2.6.0", default-features = false, features = [
|
half = { "version" = "2.6.0", default-features = false, features = [
|
||||||
"num-traits",
|
"num-traits",
|
||||||
@@ -51,7 +51,6 @@ pin-project = "1.0.7"
|
|||||||
snafu = "0.8"
|
snafu = "0.8"
|
||||||
url = "2"
|
url = "2"
|
||||||
num-traits = "0.2"
|
num-traits = "0.2"
|
||||||
rand = "0.9"
|
|
||||||
regex = "1.10"
|
regex = "1.10"
|
||||||
lazy_static = "1"
|
lazy_static = "1"
|
||||||
semver = "1.0.25"
|
semver = "1.0.25"
|
||||||
@@ -59,7 +58,17 @@ crunchy = "0.2.4"
|
|||||||
# Temporary pins to work around downstream issues
|
# Temporary pins to work around downstream issues
|
||||||
# https://github.com/apache/arrow-rs/commit/2fddf85afcd20110ce783ed5b4cdeb82293da30b
|
# https://github.com/apache/arrow-rs/commit/2fddf85afcd20110ce783ed5b4cdeb82293da30b
|
||||||
chrono = "=0.4.41"
|
chrono = "=0.4.41"
|
||||||
# https://github.com/RustCrypto/formats/issues/1684
|
|
||||||
base64ct = "=1.6.0"
|
|
||||||
# Workaround for: https://github.com/Lokathor/bytemuck/issues/306
|
# Workaround for: https://github.com/Lokathor/bytemuck/issues/306
|
||||||
bytemuck_derive = ">=1.8.1, <1.9.0"
|
bytemuck_derive = ">=1.8.1, <1.9.0"
|
||||||
|
|
||||||
|
# This is only needed when we reference preview releases of lance
|
||||||
|
# [patch.crates-io]
|
||||||
|
# # Force to use the same lance version as the rest of the project to avoid duplicate dependencies
|
||||||
|
# lance = { "version" = "=0.38.0", "tag" = "v0.38.0", "git" = "https://github.com/lancedb/lance.git" }
|
||||||
|
# lance-io = { "version" = "=0.38.0", "tag" = "v0.38.0", "git" = "https://github.com/lancedb/lance.git" }
|
||||||
|
# lance-index = { "version" = "=0.38.0", "tag" = "v0.38.0", "git" = "https://github.com/lancedb/lance.git" }
|
||||||
|
# lance-linalg = { "version" = "=0.38.0", "tag" = "v0.38.0", "git" = "https://github.com/lancedb/lance.git" }
|
||||||
|
# lance-table = { "version" = "=0.38.0", "tag" = "v0.38.0", "git" = "https://github.com/lancedb/lance.git" }
|
||||||
|
# lance-testing = { "version" = "=0.38.0", "tag" = "v0.38.0", "git" = "https://github.com/lancedb/lance.git" }
|
||||||
|
# lance-datafusion = { "version" = "=0.38.0", "tag" = "v0.38.0", "git" = "https://github.com/lancedb/lance.git" }
|
||||||
|
# lance-encoding = { "version" = "=0.38.0", "tag" = "v0.38.0", "git" = "https://github.com/lancedb/lance.git" }
|
||||||
|
|||||||
4
ci/create_lancedb_test_connection.sh
Executable file
4
ci/create_lancedb_test_connection.sh
Executable file
@@ -0,0 +1,4 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
export RUST_LOG=info
|
||||||
|
exec ./lancedb server --port 0 --sql-port 0 --data-dir "${1}"
|
||||||
18
ci/run_with_docker_compose.sh
Executable file
18
ci/run_with_docker_compose.sh
Executable file
@@ -0,0 +1,18 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
#
|
||||||
|
# A script for running the given command together with a docker compose environment.
|
||||||
|
#
|
||||||
|
|
||||||
|
# Bring down the docker setup once the command is done running.
|
||||||
|
tear_down() {
|
||||||
|
docker compose -p fixture down
|
||||||
|
}
|
||||||
|
trap tear_down EXIT
|
||||||
|
|
||||||
|
set +xe
|
||||||
|
|
||||||
|
# Clean up any existing docker setup and bring up a new one.
|
||||||
|
docker compose -p fixture up --detach --wait || exit 1
|
||||||
|
|
||||||
|
"${@}"
|
||||||
68
ci/run_with_test_connection.sh
Executable file
68
ci/run_with_test_connection.sh
Executable file
@@ -0,0 +1,68 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
#
|
||||||
|
# A script for running the given command together with the lancedb cli.
|
||||||
|
#
|
||||||
|
|
||||||
|
die() {
|
||||||
|
echo $?
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
check_command_exists() {
|
||||||
|
command="${1}"
|
||||||
|
which ${command} &> /dev/null || \
|
||||||
|
die "Unable to locate command: ${command}. Did you install it?"
|
||||||
|
}
|
||||||
|
|
||||||
|
if [[ ! -e ./lancedb ]]; then
|
||||||
|
if [[ -v SOPHON_READ_TOKEN ]]; then
|
||||||
|
INPUT="lancedb-linux-x64"
|
||||||
|
gh release \
|
||||||
|
--repo lancedb/lancedb \
|
||||||
|
download ci-support-binaries \
|
||||||
|
--pattern "${INPUT}" \
|
||||||
|
|| die "failed to fetch cli."
|
||||||
|
check_command_exists openssl
|
||||||
|
openssl enc -aes-256-cbc \
|
||||||
|
-d -pbkdf2 \
|
||||||
|
-pass "env:SOPHON_READ_TOKEN" \
|
||||||
|
-in "${INPUT}" \
|
||||||
|
-out ./lancedb-linux-x64.tar.gz \
|
||||||
|
|| die "openssl failed"
|
||||||
|
TARGET="${INPUT}.tar.gz"
|
||||||
|
else
|
||||||
|
ARCH="x64"
|
||||||
|
if [[ $OSTYPE == 'darwin'* ]]; then
|
||||||
|
UNAME=$(uname -m)
|
||||||
|
if [[ $UNAME == 'arm64' ]]; then
|
||||||
|
ARCH='arm64'
|
||||||
|
fi
|
||||||
|
OSTYPE="macos"
|
||||||
|
elif [[ $OSTYPE == 'linux'* ]]; then
|
||||||
|
if [[ $UNAME == 'aarch64' ]]; then
|
||||||
|
ARCH='arm64'
|
||||||
|
fi
|
||||||
|
OSTYPE="linux"
|
||||||
|
else
|
||||||
|
die "unknown OSTYPE: $OSTYPE"
|
||||||
|
fi
|
||||||
|
|
||||||
|
check_command_exists gh
|
||||||
|
TARGET="lancedb-${OSTYPE}-${ARCH}.tar.gz"
|
||||||
|
gh release \
|
||||||
|
--repo lancedb/sophon \
|
||||||
|
download lancedb-cli-v0.0.3 \
|
||||||
|
--pattern "${TARGET}" \
|
||||||
|
|| die "failed to fetch cli."
|
||||||
|
fi
|
||||||
|
|
||||||
|
check_command_exists tar
|
||||||
|
tar xvf "${TARGET}" || die "tar failed."
|
||||||
|
[[ -e ./lancedb ]] || die "failed to extract lancedb."
|
||||||
|
fi
|
||||||
|
|
||||||
|
SCRIPT_DIR=$(dirname "$(readlink -f "$0")")
|
||||||
|
export CREATE_LANCEDB_TEST_CONNECTION_SCRIPT="${SCRIPT_DIR}/create_lancedb_test_connection.sh"
|
||||||
|
|
||||||
|
"${@}"
|
||||||
@@ -1,4 +1,5 @@
|
|||||||
import argparse
|
import argparse
|
||||||
|
import re
|
||||||
import sys
|
import sys
|
||||||
import json
|
import json
|
||||||
|
|
||||||
@@ -18,8 +19,12 @@ def run_command(command: str) -> str:
|
|||||||
|
|
||||||
def get_latest_stable_version() -> str:
|
def get_latest_stable_version() -> str:
|
||||||
version_line = run_command("cargo info lance | grep '^version:'")
|
version_line = run_command("cargo info lance | grep '^version:'")
|
||||||
version = version_line.split(" ")[1].strip()
|
# Example output: "version: 0.35.0 (latest 0.37.0)"
|
||||||
return version
|
match = re.search(r'\(latest ([0-9.]+)\)', version_line)
|
||||||
|
if match:
|
||||||
|
return match.group(1)
|
||||||
|
# Fallback: use the first version after 'version:'
|
||||||
|
return version_line.split("version:")[1].split()[0].strip()
|
||||||
|
|
||||||
|
|
||||||
def get_latest_preview_version() -> str:
|
def get_latest_preview_version() -> str:
|
||||||
@@ -54,6 +59,52 @@ def extract_features(line: str) -> list:
|
|||||||
return []
|
return []
|
||||||
|
|
||||||
|
|
||||||
|
def extract_default_features(line: str) -> bool:
|
||||||
|
"""
|
||||||
|
Checks if default-features = false is present in a line in Cargo.toml.
|
||||||
|
Example: 'lance = { "version" = "=0.29.0", default-features = false, "features" = ["dynamodb"] }'
|
||||||
|
Returns: True if default-features = false is present, False otherwise
|
||||||
|
"""
|
||||||
|
import re
|
||||||
|
|
||||||
|
match = re.search(r'default-features\s*=\s*false', line)
|
||||||
|
return match is not None
|
||||||
|
|
||||||
|
|
||||||
|
def dict_to_toml_line(package_name: str, config: dict) -> str:
|
||||||
|
"""
|
||||||
|
Converts a configuration dictionary to a TOML dependency line.
|
||||||
|
Dictionary insertion order is preserved (Python 3.7+), so the caller
|
||||||
|
controls the order of fields in the output.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
package_name: The name of the package (e.g., "lance", "lance-io")
|
||||||
|
config: Dictionary with keys like "version", "path", "git", "tag", "features", "default-features"
|
||||||
|
The order of keys in this dict determines the order in the output.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A properly formatted TOML line with a trailing newline
|
||||||
|
"""
|
||||||
|
# If only version is specified, use simple format
|
||||||
|
if len(config) == 1 and "version" in config:
|
||||||
|
return f'{package_name} = "{config["version"]}"\n'
|
||||||
|
|
||||||
|
# Otherwise, use inline table format
|
||||||
|
parts = []
|
||||||
|
for key, value in config.items():
|
||||||
|
if key == "default-features" and not value:
|
||||||
|
parts.append("default-features = false")
|
||||||
|
elif key == "features":
|
||||||
|
parts.append(f'"features" = {json.dumps(value)}')
|
||||||
|
elif isinstance(value, str):
|
||||||
|
parts.append(f'"{key}" = "{value}"')
|
||||||
|
else:
|
||||||
|
# This shouldn't happen with our current usage
|
||||||
|
parts.append(f'"{key}" = {json.dumps(value)}')
|
||||||
|
|
||||||
|
return f'{package_name} = {{ {", ".join(parts)} }}\n'
|
||||||
|
|
||||||
|
|
||||||
def update_cargo_toml(line_updater):
|
def update_cargo_toml(line_updater):
|
||||||
"""
|
"""
|
||||||
Updates the Cargo.toml file by applying the line_updater function to each line.
|
Updates the Cargo.toml file by applying the line_updater function to each line.
|
||||||
@@ -66,21 +117,28 @@ def update_cargo_toml(line_updater):
|
|||||||
lance_line = ""
|
lance_line = ""
|
||||||
is_parsing_lance_line = False
|
is_parsing_lance_line = False
|
||||||
for line in lines:
|
for line in lines:
|
||||||
if line.startswith("lance"):
|
if line.startswith("lance") and not line.startswith("lance-namespace"):
|
||||||
# Update the line using the provided function
|
# Check if this is a single-line or multi-line entry
|
||||||
if line.strip().endswith("}"):
|
# Single-line entries either:
|
||||||
|
# 1. End with } (complete inline table)
|
||||||
|
# 2. End with " (simple version string)
|
||||||
|
# Multi-line entries start with { but don't end with }
|
||||||
|
if line.strip().endswith("}") or line.strip().endswith('"'):
|
||||||
|
# Single-line entry - process immediately
|
||||||
new_lines.append(line_updater(line))
|
new_lines.append(line_updater(line))
|
||||||
else:
|
elif "{" in line and not line.strip().endswith("}"):
|
||||||
|
# Multi-line entry - start accumulating
|
||||||
lance_line = line
|
lance_line = line
|
||||||
is_parsing_lance_line = True
|
is_parsing_lance_line = True
|
||||||
|
else:
|
||||||
|
# Single-line entry without quotes or braces (shouldn't happen but handle it)
|
||||||
|
new_lines.append(line_updater(line))
|
||||||
elif is_parsing_lance_line:
|
elif is_parsing_lance_line:
|
||||||
lance_line += line
|
lance_line += line
|
||||||
if line.strip().endswith("}"):
|
if line.strip().endswith("}"):
|
||||||
new_lines.append(line_updater(lance_line))
|
new_lines.append(line_updater(lance_line))
|
||||||
lance_line = ""
|
lance_line = ""
|
||||||
is_parsing_lance_line = False
|
is_parsing_lance_line = False
|
||||||
else:
|
|
||||||
print("doesn't end with }:", line)
|
|
||||||
else:
|
else:
|
||||||
# Keep the line unchanged
|
# Keep the line unchanged
|
||||||
new_lines.append(line)
|
new_lines.append(line)
|
||||||
@@ -92,18 +150,25 @@ def update_cargo_toml(line_updater):
|
|||||||
def set_stable_version(version: str):
|
def set_stable_version(version: str):
|
||||||
"""
|
"""
|
||||||
Sets lines to
|
Sets lines to
|
||||||
lance = { "version" = "=0.29.0", "features" = ["dynamodb"] }
|
lance = { "version" = "=0.29.0", default-features = false, "features" = ["dynamodb"] }
|
||||||
lance-io = "=0.29.0"
|
lance-io = { "version" = "=0.29.0", default-features = false }
|
||||||
...
|
...
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def line_updater(line: str) -> str:
|
def line_updater(line: str) -> str:
|
||||||
package_name = line.split("=", maxsplit=1)[0].strip()
|
package_name = line.split("=", maxsplit=1)[0].strip()
|
||||||
|
|
||||||
|
# Build config in desired order: version, default-features, features
|
||||||
|
config = {"version": f"={version}"}
|
||||||
|
|
||||||
|
if extract_default_features(line):
|
||||||
|
config["default-features"] = False
|
||||||
|
|
||||||
features = extract_features(line)
|
features = extract_features(line)
|
||||||
if features:
|
if features:
|
||||||
return f'{package_name} = {{ "version" = "={version}", "features" = {json.dumps(features)} }}\n'
|
config["features"] = features
|
||||||
else:
|
|
||||||
return f'{package_name} = "={version}"\n'
|
return dict_to_toml_line(package_name, config)
|
||||||
|
|
||||||
update_cargo_toml(line_updater)
|
update_cargo_toml(line_updater)
|
||||||
|
|
||||||
@@ -111,19 +176,29 @@ def set_stable_version(version: str):
|
|||||||
def set_preview_version(version: str):
|
def set_preview_version(version: str):
|
||||||
"""
|
"""
|
||||||
Sets lines to
|
Sets lines to
|
||||||
lance = { "version" = "=0.29.0", "features" = ["dynamodb"], tag = "v0.29.0-beta.2", git="https://github.com/lancedb/lance.git" }
|
lance = { "version" = "=0.29.0", default-features = false, "features" = ["dynamodb"], "tag" = "v0.29.0-beta.2", "git" = "https://github.com/lancedb/lance.git" }
|
||||||
lance-io = { version = "=0.29.0", tag = "v0.29.0-beta.2", git="https://github.com/lancedb/lance.git" }
|
lance-io = { "version" = "=0.29.0", default-features = false, "tag" = "v0.29.0-beta.2", "git" = "https://github.com/lancedb/lance.git" }
|
||||||
...
|
...
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def line_updater(line: str) -> str:
|
def line_updater(line: str) -> str:
|
||||||
package_name = line.split("=", maxsplit=1)[0].strip()
|
package_name = line.split("=", maxsplit=1)[0].strip()
|
||||||
features = extract_features(line)
|
|
||||||
base_version = version.split("-")[0] # Get the base version without beta suffix
|
base_version = version.split("-")[0] # Get the base version without beta suffix
|
||||||
|
|
||||||
|
# Build config in desired order: version, default-features, features, tag, git
|
||||||
|
config = {"version": f"={base_version}"}
|
||||||
|
|
||||||
|
if extract_default_features(line):
|
||||||
|
config["default-features"] = False
|
||||||
|
|
||||||
|
features = extract_features(line)
|
||||||
if features:
|
if features:
|
||||||
return f'{package_name} = {{ "version" = "={base_version}", "features" = {json.dumps(features)}, "tag" = "v{version}", "git" = "https://github.com/lancedb/lance.git" }}\n'
|
config["features"] = features
|
||||||
else:
|
|
||||||
return f'{package_name} = {{ "version" = "={base_version}", "tag" = "v{version}", "git" = "https://github.com/lancedb/lance.git" }}\n'
|
config["tag"] = f"v{version}"
|
||||||
|
config["git"] = "https://github.com/lancedb/lance.git"
|
||||||
|
|
||||||
|
return dict_to_toml_line(package_name, config)
|
||||||
|
|
||||||
update_cargo_toml(line_updater)
|
update_cargo_toml(line_updater)
|
||||||
|
|
||||||
@@ -131,18 +206,25 @@ def set_preview_version(version: str):
|
|||||||
def set_local_version():
|
def set_local_version():
|
||||||
"""
|
"""
|
||||||
Sets lines to
|
Sets lines to
|
||||||
lance = { path = "../lance/rust/lance", features = ["dynamodb"] }
|
lance = { "path" = "../lance/rust/lance", default-features = false, "features" = ["dynamodb"] }
|
||||||
lance-io = { path = "../lance/rust/lance-io" }
|
lance-io = { "path" = "../lance/rust/lance-io", default-features = false }
|
||||||
...
|
...
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def line_updater(line: str) -> str:
|
def line_updater(line: str) -> str:
|
||||||
package_name = line.split("=", maxsplit=1)[0].strip()
|
package_name = line.split("=", maxsplit=1)[0].strip()
|
||||||
|
|
||||||
|
# Build config in desired order: path, default-features, features
|
||||||
|
config = {"path": f"../lance/rust/{package_name}"}
|
||||||
|
|
||||||
|
if extract_default_features(line):
|
||||||
|
config["default-features"] = False
|
||||||
|
|
||||||
features = extract_features(line)
|
features = extract_features(line)
|
||||||
if features:
|
if features:
|
||||||
return f'{package_name} = {{ "path" = "../lance/rust/{package_name}", "features" = {json.dumps(features)} }}\n'
|
config["features"] = features
|
||||||
else:
|
|
||||||
return f'{package_name} = {{ "path" = "../lance/rust/{package_name}" }}\n'
|
return dict_to_toml_line(package_name, config)
|
||||||
|
|
||||||
update_cargo_toml(line_updater)
|
update_cargo_toml(line_updater)
|
||||||
|
|
||||||
|
|||||||
@@ -70,6 +70,23 @@ plugins:
|
|||||||
- mkdocs-jupyter
|
- mkdocs-jupyter
|
||||||
- render_swagger:
|
- render_swagger:
|
||||||
allow_arbitrary_locations: true
|
allow_arbitrary_locations: true
|
||||||
|
- redirects:
|
||||||
|
redirect_maps:
|
||||||
|
# Redirect the home page and other top-level markdown files. This enables maximum SEO benefit
|
||||||
|
# other sub-pages are handled by the ingected js in overrides/partials/header.html
|
||||||
|
'index.md': 'https://lancedb.com/docs/'
|
||||||
|
'guides/tables.md': 'https://lancedb.com/docs/tables/'
|
||||||
|
'ann_indexes.md': 'https://lancedb.com/docs/indexing/'
|
||||||
|
'basic.md': 'https://lancedb.com/docs/quickstart/'
|
||||||
|
'faq.md': 'https://lancedb.com/docs/faq/'
|
||||||
|
'embeddings/understanding_embeddings.md': 'https://lancedb.com/docs/embedding/'
|
||||||
|
'integrations.md': 'https://lancedb.com/docs/integrations/'
|
||||||
|
'examples.md': 'https://lancedb.com/docs/tutorials/'
|
||||||
|
'concepts/vector_search.md': 'https://lancedb.com/docs/search/vector-search/'
|
||||||
|
'troubleshooting.md': 'https://lancedb.com/docs/troubleshooting/'
|
||||||
|
'guides/storage.md': 'https://lancedb.com/docs/storage/integrations'
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
markdown_extensions:
|
markdown_extensions:
|
||||||
- admonition
|
- admonition
|
||||||
|
|||||||
@@ -19,7 +19,13 @@
|
|||||||
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||||
IN THE SOFTWARE.
|
IN THE SOFTWARE.
|
||||||
-->
|
-->
|
||||||
|
<div id="deprecation-banner" style="background-color: #f8d7da; color: #721c24; padding: 1em; text-align: center;">
|
||||||
|
<p style="margin: 0; font-size: 1.1em;">
|
||||||
|
<strong>This documentation site is deprecated.</strong>
|
||||||
|
Please visit our new documentation site at <a href="https://lancedb.com/docs" style="color: #721c24; text-decoration: underline;">
|
||||||
|
lancedb.com/docs</a> for the latest information.
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
{% set class = "md-header" %}
|
{% set class = "md-header" %}
|
||||||
{% if "navigation.tabs.sticky" in features %}
|
{% if "navigation.tabs.sticky" in features %}
|
||||||
{% set class = class ~ " md-header--shadow md-header--lifted" %}
|
{% set class = class ~ " md-header--shadow md-header--lifted" %}
|
||||||
@@ -150,9 +156,9 @@
|
|||||||
|
|
||||||
<div style="margin-left: 10px; margin-right: 5px;">
|
<div style="margin-left: 10px; margin-right: 5px;">
|
||||||
<a href="https://discord.com/invite/zMM32dvNtd" target="_blank" rel="noopener noreferrer">
|
<a href="https://discord.com/invite/zMM32dvNtd" target="_blank" rel="noopener noreferrer">
|
||||||
<svg fill="#FFFFFF" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 50 50" width="25px" height="25px"><path d="M 41.625 10.769531 C 37.644531 7.566406 31.347656 7.023438 31.078125 7.003906 C 30.660156 6.96875 30.261719 7.203125 30.089844 7.589844 C 30.074219 7.613281 29.9375 7.929688 29.785156 8.421875 C 32.417969 8.867188 35.652344 9.761719 38.578125 11.578125 C 39.046875 11.867188 39.191406 12.484375 38.902344 12.953125 C 38.710938 13.261719 38.386719 13.429688 38.050781 13.429688 C 37.871094 13.429688 37.6875 13.378906 37.523438 13.277344 C 32.492188 10.15625 26.210938 10 25 10 C 23.789063 10 17.503906 10.15625 12.476563 13.277344 C 12.007813 13.570313 11.390625 13.425781 11.101563 12.957031 C 10.808594 12.484375 10.953125 11.871094 11.421875 11.578125 C 14.347656 9.765625 17.582031 8.867188 20.214844 8.425781 C 20.0625 7.929688 19.925781 7.617188 19.914063 7.589844 C 19.738281 7.203125 19.34375 6.960938 18.921875 7.003906 C 18.652344 7.023438 12.355469 7.566406 8.320313 10.8125 C 6.214844 12.761719 2 24.152344 2 34 C 2 34.175781 2.046875 34.34375 2.132813 34.496094 C 5.039063 39.605469 12.972656 40.941406 14.78125 41 C 14.789063 41 14.800781 41 14.8125 41 C 15.132813 41 15.433594 40.847656 15.621094 40.589844 L 17.449219 38.074219 C 12.515625 36.800781 9.996094 34.636719 9.851563 34.507813 C 9.4375 34.144531 9.398438 33.511719 9.765625 33.097656 C 10.128906 32.683594 10.761719 32.644531 11.175781 33.007813 C 11.234375 33.0625 15.875 37 25 37 C 34.140625 37 38.78125 33.046875 38.828125 33.007813 C 39.242188 32.648438 39.871094 32.683594 40.238281 33.101563 C 40.601563 33.515625 40.5625 34.144531 40.148438 34.507813 C 40.003906 34.636719 37.484375 36.800781 32.550781 38.074219 L 34.378906 40.589844 C 34.566406 40.847656 34.867188 41 35.1875 41 C 35.199219 41 35.210938 41 35.21875 41 C 37.027344 40.941406 44.960938 39.605469 47.867188 34.496094 C 47.953125 34.34375 48 34.175781 48 34 C 48 24.152344 43.785156 12.761719 41.625 10.769531 Z M 18.5 30 C 16.566406 30 15 28.210938 15 26 C 15 23.789063 16.566406 22 18.5 22 C 20.433594 22 22 23.789063 22 26 C 22 28.210938 20.433594 30 18.5 30 Z M 31.5 30 C 29.566406 30 28 28.210938 28 26 C 28 23.789063 29.566406 22 31.5 22 C 33.433594 22 35 23.789063 35 26 C 35 28.210938 33.433594 30 31.5 30 Z"/></svg>
|
<svg fill="#FFFFFF" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 50 50" width="25px" height="25px"><path d="M 41.625 10.769531 C 37.644531 7.566406 31.347656 7.023438 31.078125 7.003906 C 30.660156 6.96875 30.261719 7.203125 30.089844 7.589844 C 30.074219 7.613281 29.9375 7.929688 29.785156 8.421875 C 32.417969 8.867188 35.652344 9.761719 38.578125 11.578125 C 39.046875 11.867188 39.191406 12.484375 38.902344 12.953125 C 38.710938 13.261719 38.386719 13.429688 38.050781 13.429688 C 37.871094 13.429688 37.6875 13.378906 37.523438 13.277344 C 32.492188 10.15625 26.210938 10 25 10 C 23.789063 10 17.503906 10.15625 12.476563 13.277344 C 12.007813 13.570313 11.390625 13.425781 11.101563 12.957031 C 10.808594 12.484375 10.953125 11.871094 11.421875 11.578125 C 14.347656 9.765625 17.582031 8.867188 20.214844 8.425781 C 20.0625 7.929688 19.925781 7.617188 19.914063 7.589844 C 19.738281 7.203125 19.34375 6.960938 18.921875 7.003906 C 18.652344 7.023438 12.355469 7.566406 8.320313 10.8125 C 6.214844 12.761719 2 24.152344 2 34 C 2 34.175781 2.046875 34.34375 2.132813 34.496094 C 5.039063 39.605469 12.972656 40.941406 14.78125 41 C 14.789063 41 14.800781 41 14.8125 41 C 15.132813 41 15.433594 40.847656 15.621094 40.589844 L 17.449219 38.074219 C 12.515625 36.800781 9.996094 34.636719 9.851563 34.507813 C 9.4375 34.144531 9.398438 33.511719 9.765625 33.097656 C 10.128906 32.683594 10.761719 32.644531 11.175781 33.007813 C 11.234375 33.0625 15.875 37 25 37 C 34.140625 37 38.78125 33.046875 38.828125 33.007813 C 39.242188 32.648438 39.871094 32.683594 40.238281 33.101563 C 40.601563 33.515625 40.5625 34.144531 40.148438 34.507813 C 40.003906 34.636719 37.484375 36.800781 32.550781 38.074219 L 34.378906 40.589844 C 34.566406 40.847656 34.867188 41 35.1875 41 C 35.199219 41 35.210938 41 35.21875 41 C 37.027344 40.941406 44.960938 39.605469 47.867188 34.496094 C 47.953125 34.34375 48 34.175781 48 34 C 48 24.152344 43.785156 12.761719 41.625 10.769531 Z M 18.5 30 C 16.566406 30 15 28.210938 15 26 C 15 23.789063 16.566406 22 18.5 22 C 20.433594 22 22 23.789063 22 26 C 22 28.210938 20.433594 30 18.5 30 Z M 31.5 30 C 29.566406 30 28 28.210938 28 26 C 28 23.789063 29.566406 22 31.5 22 C 33.433594 22 35 23.789063 35 26 C 35 28.210938 33.433594 30 31.5 30 Z"/></svg>
|
||||||
</a>
|
</a>
|
||||||
</div>
|
</div>
|
||||||
<div style="margin-left: 5px; margin-right: 5px;">
|
<div style="margin-left: 5px; margin-right: 5px;">
|
||||||
<a href="https://twitter.com/lancedb" target="_blank" rel="noopener noreferrer">
|
<a href="https://twitter.com/lancedb" target="_blank" rel="noopener noreferrer">
|
||||||
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" viewBox="0,0,256,256" width="25px" height="25px" fill-rule="nonzero"><g fill-opacity="0" fill="#ffffff" fill-rule="nonzero" stroke="none" stroke-width="1" stroke-linecap="butt" stroke-linejoin="miter" stroke-miterlimit="10" stroke-dasharray="" stroke-dashoffset="0" font-family="none" font-weight="none" font-size="none" text-anchor="none" style="mix-blend-mode: normal"><path d="M0,256v-256h256v256z" id="bgRectangle"></path></g><g fill="#ffffff" fill-rule="nonzero" stroke="none" stroke-width="1" stroke-linecap="butt" stroke-linejoin="miter" stroke-miterlimit="10" stroke-dasharray="" stroke-dashoffset="0" font-family="none" font-weight="none" font-size="none" text-anchor="none" style="mix-blend-mode: normal"><g transform="scale(4,4)"><path d="M57,17.114c-1.32,1.973 -2.991,3.707 -4.916,5.097c0.018,0.423 0.028,0.847 0.028,1.274c0,13.013 -9.902,28.018 -28.016,28.018c-5.562,0 -12.81,-1.948 -15.095,-4.423c0.772,0.092 1.556,0.138 2.35,0.138c4.615,0 8.861,-1.575 12.23,-4.216c-4.309,-0.079 -7.946,-2.928 -9.199,-6.84c1.96,0.308 4.447,-0.17 4.447,-0.17c0,0 -7.7,-1.322 -7.899,-9.779c2.226,1.291 4.46,1.231 4.46,1.231c0,0 -4.441,-2.734 -4.379,-8.195c0.037,-3.221 1.331,-4.953 1.331,-4.953c8.414,10.361 20.298,10.29 20.298,10.29c0,0 -0.255,-1.471 -0.255,-2.243c0,-5.437 4.408,-9.847 9.847,-9.847c2.832,0 5.391,1.196 7.187,3.111c2.245,-0.443 4.353,-1.263 6.255,-2.391c-0.859,3.44 -4.329,5.448 -4.329,5.448c0,0 2.969,-0.329 5.655,-1.55z"></path></g></g></svg>
|
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" viewBox="0,0,256,256" width="25px" height="25px" fill-rule="nonzero"><g fill-opacity="0" fill="#ffffff" fill-rule="nonzero" stroke="none" stroke-width="1" stroke-linecap="butt" stroke-linejoin="miter" stroke-miterlimit="10" stroke-dasharray="" stroke-dashoffset="0" font-family="none" font-weight="none" font-size="none" text-anchor="none" style="mix-blend-mode: normal"><path d="M0,256v-256h256v256z" id="bgRectangle"></path></g><g fill="#ffffff" fill-rule="nonzero" stroke="none" stroke-width="1" stroke-linecap="butt" stroke-linejoin="miter" stroke-miterlimit="10" stroke-dasharray="" stroke-dashoffset="0" font-family="none" font-weight="none" font-size="none" text-anchor="none" style="mix-blend-mode: normal"><g transform="scale(4,4)"><path d="M57,17.114c-1.32,1.973 -2.991,3.707 -4.916,5.097c0.018,0.423 0.028,0.847 0.028,1.274c0,13.013 -9.902,28.018 -28.016,28.018c-5.562,0 -12.81,-1.948 -15.095,-4.423c0.772,0.092 1.556,0.138 2.35,0.138c4.615,0 8.861,-1.575 12.23,-4.216c-4.309,-0.079 -7.946,-2.928 -9.199,-6.84c1.96,0.308 4.447,-0.17 4.447,-0.17c0,0 -7.7,-1.322 -7.899,-9.779c2.226,1.291 4.46,1.231 4.46,1.231c0,0 -4.441,-2.734 -4.379,-8.195c0.037,-3.221 1.331,-4.953 1.331,-4.953c8.414,10.361 20.298,10.29 20.298,10.29c0,0 -0.255,-1.471 -0.255,-2.243c0,-5.437 4.408,-9.847 9.847,-9.847c2.832,0 5.391,1.196 7.187,3.111c2.245,-0.443 4.353,-1.263 6.255,-2.391c-0.859,3.44 -4.329,5.448 -4.329,5.448c0,0 2.969,-0.329 5.655,-1.55z"></path></g></g></svg>
|
||||||
@@ -173,4 +179,77 @@
|
|||||||
{% include "partials/tabs.html" %}
|
{% include "partials/tabs.html" %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
</header>
|
</header>
|
||||||
|
|
||||||
|
<script>
|
||||||
|
(function() {
|
||||||
|
function checkPathAndRedirect() {
|
||||||
|
var banner = document.getElementById('deprecation-banner');
|
||||||
|
|
||||||
|
if (document.querySelector('meta[http-equiv="refresh"]')) {
|
||||||
|
return; // The redirects plugin is already handling this page.
|
||||||
|
}
|
||||||
|
|
||||||
|
var currentPath = window.location.pathname;
|
||||||
|
|
||||||
|
var cleanPath = currentPath.endsWith('/') && currentPath.length > 1
|
||||||
|
? currentPath.slice(0, -1)
|
||||||
|
: currentPath;
|
||||||
|
|
||||||
|
// These are the ONLY paths that should remain on the old site
|
||||||
|
var apiPaths = [
|
||||||
|
'/lancedb/python',
|
||||||
|
'/lancedb/javascript',
|
||||||
|
'/lancedb/js',
|
||||||
|
'/lancedb/api_reference'
|
||||||
|
];
|
||||||
|
|
||||||
|
var isApiPage = apiPaths.some(function(apiPath) {
|
||||||
|
return cleanPath.startsWith(apiPath);
|
||||||
|
});
|
||||||
|
|
||||||
|
if (isApiPage) {
|
||||||
|
if (banner) {
|
||||||
|
banner.style.display = 'none';
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (banner) {
|
||||||
|
banner.style.display = 'block';
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add noindex meta tag to prevent indexing of old docs for seo
|
||||||
|
var noindexMeta = document.createElement('meta');
|
||||||
|
noindexMeta.setAttribute('name', 'robots');
|
||||||
|
noindexMeta.setAttribute('content', 'noindex, follow');
|
||||||
|
document.head.appendChild(noindexMeta);
|
||||||
|
|
||||||
|
// Add canonical link to point to the new docs to reward new site for seo
|
||||||
|
var canonicalLink = document.createElement('link');
|
||||||
|
canonicalLink.setAttribute('rel', 'canonical');
|
||||||
|
canonicalLink.setAttribute('href', 'https://lancedb.com/docs');
|
||||||
|
document.head.appendChild(canonicalLink);
|
||||||
|
|
||||||
|
window.location.replace('https://lancedb.com/docs');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run the check only if doc is ready. This makes sure we catch the initial load
|
||||||
|
// and redirect.
|
||||||
|
if (document.readyState === 'loading') {
|
||||||
|
document.addEventListener('DOMContentLoaded', checkPathAndRedirect);
|
||||||
|
} else {
|
||||||
|
checkPathAndRedirect();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use an interval to handle subsequent navigation clicks.
|
||||||
|
var lastPath = window.location.pathname;
|
||||||
|
setInterval(function() {
|
||||||
|
if (window.location.pathname !== lastPath) {
|
||||||
|
lastPath = window.location.pathname;
|
||||||
|
checkPathAndRedirect();
|
||||||
|
}
|
||||||
|
}, 2000); // keeping it 2 second to make it easy for user to understand
|
||||||
|
// what's happening
|
||||||
|
|
||||||
|
})();
|
||||||
|
</script>
|
||||||
@@ -5,3 +5,4 @@ mkdocstrings[python]==0.25.2
|
|||||||
griffe
|
griffe
|
||||||
mkdocs-render-swagger-plugin
|
mkdocs-render-swagger-plugin
|
||||||
pydantic
|
pydantic
|
||||||
|
mkdocs-redirects
|
||||||
|
|||||||
@@ -25,6 +25,51 @@ the underlying connection has been closed.
|
|||||||
|
|
||||||
## Methods
|
## Methods
|
||||||
|
|
||||||
|
### cloneTable()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
abstract cloneTable(
|
||||||
|
targetTableName,
|
||||||
|
sourceUri,
|
||||||
|
options?): Promise<Table>
|
||||||
|
```
|
||||||
|
|
||||||
|
Clone a table from a source table.
|
||||||
|
|
||||||
|
A shallow clone creates a new table that shares the underlying data files
|
||||||
|
with the source table but has its own independent manifest. This allows
|
||||||
|
both the source and cloned tables to evolve independently while initially
|
||||||
|
sharing the same data, deletion, and index files.
|
||||||
|
|
||||||
|
#### Parameters
|
||||||
|
|
||||||
|
* **targetTableName**: `string`
|
||||||
|
The name of the target table to create.
|
||||||
|
|
||||||
|
* **sourceUri**: `string`
|
||||||
|
The URI of the source table to clone from.
|
||||||
|
|
||||||
|
* **options?**
|
||||||
|
Clone options.
|
||||||
|
|
||||||
|
* **options.isShallow?**: `boolean`
|
||||||
|
Whether to perform a shallow clone (defaults to true).
|
||||||
|
|
||||||
|
* **options.sourceTag?**: `string`
|
||||||
|
The tag of the source table to clone.
|
||||||
|
|
||||||
|
* **options.sourceVersion?**: `number`
|
||||||
|
The version of the source table to clone.
|
||||||
|
|
||||||
|
* **options.targetNamespace?**: `string`[]
|
||||||
|
The namespace for the target table (defaults to root namespace).
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
`Promise`<[`Table`](Table.md)>
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
### close()
|
### close()
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
@@ -45,6 +90,8 @@ Any attempt to use the connection after it is closed will result in an error.
|
|||||||
|
|
||||||
### createEmptyTable()
|
### createEmptyTable()
|
||||||
|
|
||||||
|
#### createEmptyTable(name, schema, options)
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
abstract createEmptyTable(
|
abstract createEmptyTable(
|
||||||
name,
|
name,
|
||||||
@@ -54,7 +101,7 @@ abstract createEmptyTable(
|
|||||||
|
|
||||||
Creates a new empty Table
|
Creates a new empty Table
|
||||||
|
|
||||||
#### Parameters
|
##### Parameters
|
||||||
|
|
||||||
* **name**: `string`
|
* **name**: `string`
|
||||||
The name of the table.
|
The name of the table.
|
||||||
@@ -63,8 +110,39 @@ Creates a new empty Table
|
|||||||
The schema of the table
|
The schema of the table
|
||||||
|
|
||||||
* **options?**: `Partial`<[`CreateTableOptions`](../interfaces/CreateTableOptions.md)>
|
* **options?**: `Partial`<[`CreateTableOptions`](../interfaces/CreateTableOptions.md)>
|
||||||
|
Additional options (backwards compatibility)
|
||||||
|
|
||||||
#### Returns
|
##### Returns
|
||||||
|
|
||||||
|
`Promise`<[`Table`](Table.md)>
|
||||||
|
|
||||||
|
#### createEmptyTable(name, schema, namespace, options)
|
||||||
|
|
||||||
|
```ts
|
||||||
|
abstract createEmptyTable(
|
||||||
|
name,
|
||||||
|
schema,
|
||||||
|
namespace?,
|
||||||
|
options?): Promise<Table>
|
||||||
|
```
|
||||||
|
|
||||||
|
Creates a new empty Table
|
||||||
|
|
||||||
|
##### Parameters
|
||||||
|
|
||||||
|
* **name**: `string`
|
||||||
|
The name of the table.
|
||||||
|
|
||||||
|
* **schema**: [`SchemaLike`](../type-aliases/SchemaLike.md)
|
||||||
|
The schema of the table
|
||||||
|
|
||||||
|
* **namespace?**: `string`[]
|
||||||
|
The namespace to create the table in (defaults to root namespace)
|
||||||
|
|
||||||
|
* **options?**: `Partial`<[`CreateTableOptions`](../interfaces/CreateTableOptions.md)>
|
||||||
|
Additional options
|
||||||
|
|
||||||
|
##### Returns
|
||||||
|
|
||||||
`Promise`<[`Table`](Table.md)>
|
`Promise`<[`Table`](Table.md)>
|
||||||
|
|
||||||
@@ -72,10 +150,10 @@ Creates a new empty Table
|
|||||||
|
|
||||||
### createTable()
|
### createTable()
|
||||||
|
|
||||||
#### createTable(options)
|
#### createTable(options, namespace)
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
abstract createTable(options): Promise<Table>
|
abstract createTable(options, namespace?): Promise<Table>
|
||||||
```
|
```
|
||||||
|
|
||||||
Creates a new Table and initialize it with new data.
|
Creates a new Table and initialize it with new data.
|
||||||
@@ -85,6 +163,9 @@ Creates a new Table and initialize it with new data.
|
|||||||
* **options**: `object` & `Partial`<[`CreateTableOptions`](../interfaces/CreateTableOptions.md)>
|
* **options**: `object` & `Partial`<[`CreateTableOptions`](../interfaces/CreateTableOptions.md)>
|
||||||
The options object.
|
The options object.
|
||||||
|
|
||||||
|
* **namespace?**: `string`[]
|
||||||
|
The namespace to create the table in (defaults to root namespace)
|
||||||
|
|
||||||
##### Returns
|
##### Returns
|
||||||
|
|
||||||
`Promise`<[`Table`](Table.md)>
|
`Promise`<[`Table`](Table.md)>
|
||||||
@@ -110,6 +191,38 @@ Creates a new Table and initialize it with new data.
|
|||||||
to be inserted into the table
|
to be inserted into the table
|
||||||
|
|
||||||
* **options?**: `Partial`<[`CreateTableOptions`](../interfaces/CreateTableOptions.md)>
|
* **options?**: `Partial`<[`CreateTableOptions`](../interfaces/CreateTableOptions.md)>
|
||||||
|
Additional options (backwards compatibility)
|
||||||
|
|
||||||
|
##### Returns
|
||||||
|
|
||||||
|
`Promise`<[`Table`](Table.md)>
|
||||||
|
|
||||||
|
#### createTable(name, data, namespace, options)
|
||||||
|
|
||||||
|
```ts
|
||||||
|
abstract createTable(
|
||||||
|
name,
|
||||||
|
data,
|
||||||
|
namespace?,
|
||||||
|
options?): Promise<Table>
|
||||||
|
```
|
||||||
|
|
||||||
|
Creates a new Table and initialize it with new data.
|
||||||
|
|
||||||
|
##### Parameters
|
||||||
|
|
||||||
|
* **name**: `string`
|
||||||
|
The name of the table.
|
||||||
|
|
||||||
|
* **data**: [`TableLike`](../type-aliases/TableLike.md) \| `Record`<`string`, `unknown`>[]
|
||||||
|
Non-empty Array of Records
|
||||||
|
to be inserted into the table
|
||||||
|
|
||||||
|
* **namespace?**: `string`[]
|
||||||
|
The namespace to create the table in (defaults to root namespace)
|
||||||
|
|
||||||
|
* **options?**: `Partial`<[`CreateTableOptions`](../interfaces/CreateTableOptions.md)>
|
||||||
|
Additional options
|
||||||
|
|
||||||
##### Returns
|
##### Returns
|
||||||
|
|
||||||
@@ -134,11 +247,16 @@ Return a brief description of the connection
|
|||||||
### dropAllTables()
|
### dropAllTables()
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
abstract dropAllTables(): Promise<void>
|
abstract dropAllTables(namespace?): Promise<void>
|
||||||
```
|
```
|
||||||
|
|
||||||
Drop all tables in the database.
|
Drop all tables in the database.
|
||||||
|
|
||||||
|
#### Parameters
|
||||||
|
|
||||||
|
* **namespace?**: `string`[]
|
||||||
|
The namespace to drop tables from (defaults to root namespace).
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
`Promise`<`void`>
|
`Promise`<`void`>
|
||||||
@@ -148,7 +266,7 @@ Drop all tables in the database.
|
|||||||
### dropTable()
|
### dropTable()
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
abstract dropTable(name): Promise<void>
|
abstract dropTable(name, namespace?): Promise<void>
|
||||||
```
|
```
|
||||||
|
|
||||||
Drop an existing table.
|
Drop an existing table.
|
||||||
@@ -158,6 +276,9 @@ Drop an existing table.
|
|||||||
* **name**: `string`
|
* **name**: `string`
|
||||||
The name of the table to drop.
|
The name of the table to drop.
|
||||||
|
|
||||||
|
* **namespace?**: `string`[]
|
||||||
|
The namespace of the table (defaults to root namespace).
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
`Promise`<`void`>
|
`Promise`<`void`>
|
||||||
@@ -181,7 +302,10 @@ Return true if the connection has not been closed
|
|||||||
### openTable()
|
### openTable()
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
abstract openTable(name, options?): Promise<Table>
|
abstract openTable(
|
||||||
|
name,
|
||||||
|
namespace?,
|
||||||
|
options?): Promise<Table>
|
||||||
```
|
```
|
||||||
|
|
||||||
Open a table in the database.
|
Open a table in the database.
|
||||||
@@ -191,7 +315,11 @@ Open a table in the database.
|
|||||||
* **name**: `string`
|
* **name**: `string`
|
||||||
The name of the table
|
The name of the table
|
||||||
|
|
||||||
|
* **namespace?**: `string`[]
|
||||||
|
The namespace of the table (defaults to root namespace)
|
||||||
|
|
||||||
* **options?**: `Partial`<[`OpenTableOptions`](../interfaces/OpenTableOptions.md)>
|
* **options?**: `Partial`<[`OpenTableOptions`](../interfaces/OpenTableOptions.md)>
|
||||||
|
Additional options
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -201,6 +329,8 @@ Open a table in the database.
|
|||||||
|
|
||||||
### tableNames()
|
### tableNames()
|
||||||
|
|
||||||
|
#### tableNames(options)
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
abstract tableNames(options?): Promise<string[]>
|
abstract tableNames(options?): Promise<string[]>
|
||||||
```
|
```
|
||||||
@@ -209,12 +339,35 @@ List all the table names in this database.
|
|||||||
|
|
||||||
Tables will be returned in lexicographical order.
|
Tables will be returned in lexicographical order.
|
||||||
|
|
||||||
#### Parameters
|
##### Parameters
|
||||||
|
|
||||||
|
* **options?**: `Partial`<[`TableNamesOptions`](../interfaces/TableNamesOptions.md)>
|
||||||
|
options to control the
|
||||||
|
paging / start point (backwards compatibility)
|
||||||
|
|
||||||
|
##### Returns
|
||||||
|
|
||||||
|
`Promise`<`string`[]>
|
||||||
|
|
||||||
|
#### tableNames(namespace, options)
|
||||||
|
|
||||||
|
```ts
|
||||||
|
abstract tableNames(namespace?, options?): Promise<string[]>
|
||||||
|
```
|
||||||
|
|
||||||
|
List all the table names in this database.
|
||||||
|
|
||||||
|
Tables will be returned in lexicographical order.
|
||||||
|
|
||||||
|
##### Parameters
|
||||||
|
|
||||||
|
* **namespace?**: `string`[]
|
||||||
|
The namespace to list tables from (defaults to root namespace)
|
||||||
|
|
||||||
* **options?**: `Partial`<[`TableNamesOptions`](../interfaces/TableNamesOptions.md)>
|
* **options?**: `Partial`<[`TableNamesOptions`](../interfaces/TableNamesOptions.md)>
|
||||||
options to control the
|
options to control the
|
||||||
paging / start point
|
paging / start point
|
||||||
|
|
||||||
#### Returns
|
##### Returns
|
||||||
|
|
||||||
`Promise`<`string`[]>
|
`Promise`<`string`[]>
|
||||||
|
|||||||
85
docs/src/js/classes/HeaderProvider.md
Normal file
85
docs/src/js/classes/HeaderProvider.md
Normal file
@@ -0,0 +1,85 @@
|
|||||||
|
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
[@lancedb/lancedb](../globals.md) / HeaderProvider
|
||||||
|
|
||||||
|
# Class: `abstract` HeaderProvider
|
||||||
|
|
||||||
|
Abstract base class for providing custom headers for each request.
|
||||||
|
|
||||||
|
Users can implement this interface to provide dynamic headers for various purposes
|
||||||
|
such as authentication (OAuth tokens, API keys), request tracking (correlation IDs),
|
||||||
|
custom metadata, or any other header-based requirements. The provider is called
|
||||||
|
before each request to ensure fresh header values are always used.
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
Simple JWT token provider:
|
||||||
|
```typescript
|
||||||
|
class JWTProvider extends HeaderProvider {
|
||||||
|
constructor(private token: string) {
|
||||||
|
super();
|
||||||
|
}
|
||||||
|
|
||||||
|
getHeaders(): Record<string, string> {
|
||||||
|
return { authorization: `Bearer ${this.token}` };
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Provider with request tracking:
|
||||||
|
```typescript
|
||||||
|
class RequestTrackingProvider extends HeaderProvider {
|
||||||
|
constructor(private sessionId: string) {
|
||||||
|
super();
|
||||||
|
}
|
||||||
|
|
||||||
|
getHeaders(): Record<string, string> {
|
||||||
|
return {
|
||||||
|
"X-Session-Id": this.sessionId,
|
||||||
|
"X-Request-Id": `req-${Date.now()}`
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Extended by
|
||||||
|
|
||||||
|
- [`StaticHeaderProvider`](StaticHeaderProvider.md)
|
||||||
|
- [`OAuthHeaderProvider`](OAuthHeaderProvider.md)
|
||||||
|
|
||||||
|
## Constructors
|
||||||
|
|
||||||
|
### new HeaderProvider()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
new HeaderProvider(): HeaderProvider
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
[`HeaderProvider`](HeaderProvider.md)
|
||||||
|
|
||||||
|
## Methods
|
||||||
|
|
||||||
|
### getHeaders()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
abstract getHeaders(): Record<string, string>
|
||||||
|
```
|
||||||
|
|
||||||
|
Get the latest headers to be added to requests.
|
||||||
|
|
||||||
|
This method is called before each request to the remote LanceDB server.
|
||||||
|
Implementations should return headers that will be merged with existing headers.
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
`Record`<`string`, `string`>
|
||||||
|
|
||||||
|
Dictionary of header names to values to add to the request.
|
||||||
|
|
||||||
|
#### Throws
|
||||||
|
|
||||||
|
If unable to fetch headers, the exception will be propagated and the request will fail.
|
||||||
@@ -52,6 +52,30 @@ the merge result
|
|||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
|
### useIndex()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
useIndex(useIndex): MergeInsertBuilder
|
||||||
|
```
|
||||||
|
|
||||||
|
Controls whether to use indexes for the merge operation.
|
||||||
|
|
||||||
|
When set to `true` (the default), the operation will use an index if available
|
||||||
|
on the join key for improved performance. When set to `false`, it forces a full
|
||||||
|
table scan even if an index exists. This can be useful for benchmarking or when
|
||||||
|
the query optimizer chooses a suboptimal path.
|
||||||
|
|
||||||
|
#### Parameters
|
||||||
|
|
||||||
|
* **useIndex**: `boolean`
|
||||||
|
Whether to use indices for the merge operation. Defaults to `true`.
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
[`MergeInsertBuilder`](MergeInsertBuilder.md)
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
### whenMatchedUpdateAll()
|
### whenMatchedUpdateAll()
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
|
|||||||
29
docs/src/js/classes/NativeJsHeaderProvider.md
Normal file
29
docs/src/js/classes/NativeJsHeaderProvider.md
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
[@lancedb/lancedb](../globals.md) / NativeJsHeaderProvider
|
||||||
|
|
||||||
|
# Class: NativeJsHeaderProvider
|
||||||
|
|
||||||
|
JavaScript HeaderProvider implementation that wraps a JavaScript callback.
|
||||||
|
This is the only native header provider - all header provider implementations
|
||||||
|
should provide a JavaScript function that returns headers.
|
||||||
|
|
||||||
|
## Constructors
|
||||||
|
|
||||||
|
### new NativeJsHeaderProvider()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
new NativeJsHeaderProvider(getHeadersCallback): NativeJsHeaderProvider
|
||||||
|
```
|
||||||
|
|
||||||
|
Create a new JsHeaderProvider from a JavaScript callback
|
||||||
|
|
||||||
|
#### Parameters
|
||||||
|
|
||||||
|
* **getHeadersCallback**
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
[`NativeJsHeaderProvider`](NativeJsHeaderProvider.md)
|
||||||
108
docs/src/js/classes/OAuthHeaderProvider.md
Normal file
108
docs/src/js/classes/OAuthHeaderProvider.md
Normal file
@@ -0,0 +1,108 @@
|
|||||||
|
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
[@lancedb/lancedb](../globals.md) / OAuthHeaderProvider
|
||||||
|
|
||||||
|
# Class: OAuthHeaderProvider
|
||||||
|
|
||||||
|
Example implementation: OAuth token provider with automatic refresh.
|
||||||
|
|
||||||
|
This is an example implementation showing how to manage OAuth tokens
|
||||||
|
with automatic refresh when they expire.
|
||||||
|
|
||||||
|
## Example
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
async function fetchToken(): Promise<TokenResponse> {
|
||||||
|
const response = await fetch("https://oauth.example.com/token", {
|
||||||
|
method: "POST",
|
||||||
|
body: JSON.stringify({
|
||||||
|
grant_type: "client_credentials",
|
||||||
|
client_id: "your-client-id",
|
||||||
|
client_secret: "your-client-secret"
|
||||||
|
}),
|
||||||
|
headers: { "Content-Type": "application/json" }
|
||||||
|
});
|
||||||
|
const data = await response.json();
|
||||||
|
return {
|
||||||
|
accessToken: data.access_token,
|
||||||
|
expiresIn: data.expires_in
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
const provider = new OAuthHeaderProvider(fetchToken);
|
||||||
|
const headers = provider.getHeaders();
|
||||||
|
// Returns: {"authorization": "Bearer <your-token>"}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Extends
|
||||||
|
|
||||||
|
- [`HeaderProvider`](HeaderProvider.md)
|
||||||
|
|
||||||
|
## Constructors
|
||||||
|
|
||||||
|
### new OAuthHeaderProvider()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
new OAuthHeaderProvider(tokenFetcher, refreshBufferSeconds): OAuthHeaderProvider
|
||||||
|
```
|
||||||
|
|
||||||
|
Initialize the OAuth provider.
|
||||||
|
|
||||||
|
#### Parameters
|
||||||
|
|
||||||
|
* **tokenFetcher**
|
||||||
|
Function to fetch new tokens. Should return object with 'accessToken' and optionally 'expiresIn'.
|
||||||
|
|
||||||
|
* **refreshBufferSeconds**: `number` = `300`
|
||||||
|
Seconds before expiry to refresh token. Default 300 (5 minutes).
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
[`OAuthHeaderProvider`](OAuthHeaderProvider.md)
|
||||||
|
|
||||||
|
#### Overrides
|
||||||
|
|
||||||
|
[`HeaderProvider`](HeaderProvider.md).[`constructor`](HeaderProvider.md#constructors)
|
||||||
|
|
||||||
|
## Methods
|
||||||
|
|
||||||
|
### getHeaders()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
getHeaders(): Record<string, string>
|
||||||
|
```
|
||||||
|
|
||||||
|
Get OAuth headers, refreshing token if needed.
|
||||||
|
Note: This is synchronous for now as the Rust implementation expects sync.
|
||||||
|
In a real implementation, this would need to handle async properly.
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
`Record`<`string`, `string`>
|
||||||
|
|
||||||
|
Headers with Bearer token authorization.
|
||||||
|
|
||||||
|
#### Throws
|
||||||
|
|
||||||
|
If unable to fetch or refresh token.
|
||||||
|
|
||||||
|
#### Overrides
|
||||||
|
|
||||||
|
[`HeaderProvider`](HeaderProvider.md).[`getHeaders`](HeaderProvider.md#getheaders)
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### refreshToken()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
refreshToken(): Promise<void>
|
||||||
|
```
|
||||||
|
|
||||||
|
Manually refresh the token.
|
||||||
|
Call this before using getHeaders() to ensure token is available.
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
`Promise`<`void`>
|
||||||
70
docs/src/js/classes/StaticHeaderProvider.md
Normal file
70
docs/src/js/classes/StaticHeaderProvider.md
Normal file
@@ -0,0 +1,70 @@
|
|||||||
|
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
[@lancedb/lancedb](../globals.md) / StaticHeaderProvider
|
||||||
|
|
||||||
|
# Class: StaticHeaderProvider
|
||||||
|
|
||||||
|
Example implementation: A simple header provider that returns static headers.
|
||||||
|
|
||||||
|
This is an example implementation showing how to create a HeaderProvider
|
||||||
|
for cases where headers don't change during the session.
|
||||||
|
|
||||||
|
## Example
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
const provider = new StaticHeaderProvider({
|
||||||
|
authorization: "Bearer my-token",
|
||||||
|
"X-Custom-Header": "custom-value"
|
||||||
|
});
|
||||||
|
const headers = provider.getHeaders();
|
||||||
|
// Returns: {authorization: 'Bearer my-token', 'X-Custom-Header': 'custom-value'}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Extends
|
||||||
|
|
||||||
|
- [`HeaderProvider`](HeaderProvider.md)
|
||||||
|
|
||||||
|
## Constructors
|
||||||
|
|
||||||
|
### new StaticHeaderProvider()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
new StaticHeaderProvider(headers): StaticHeaderProvider
|
||||||
|
```
|
||||||
|
|
||||||
|
Initialize with static headers.
|
||||||
|
|
||||||
|
#### Parameters
|
||||||
|
|
||||||
|
* **headers**: `Record`<`string`, `string`>
|
||||||
|
Headers to return for every request.
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
[`StaticHeaderProvider`](StaticHeaderProvider.md)
|
||||||
|
|
||||||
|
#### Overrides
|
||||||
|
|
||||||
|
[`HeaderProvider`](HeaderProvider.md).[`constructor`](HeaderProvider.md#constructors)
|
||||||
|
|
||||||
|
## Methods
|
||||||
|
|
||||||
|
### getHeaders()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
getHeaders(): Record<string, string>
|
||||||
|
```
|
||||||
|
|
||||||
|
Return the static headers.
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
`Record`<`string`, `string`>
|
||||||
|
|
||||||
|
Copy of the static headers.
|
||||||
|
|
||||||
|
#### Overrides
|
||||||
|
|
||||||
|
[`HeaderProvider`](HeaderProvider.md).[`getHeaders`](HeaderProvider.md#getheaders)
|
||||||
@@ -6,13 +6,14 @@
|
|||||||
|
|
||||||
# Function: connect()
|
# Function: connect()
|
||||||
|
|
||||||
## connect(uri, options, session)
|
## connect(uri, options, session, headerProvider)
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
function connect(
|
function connect(
|
||||||
uri,
|
uri,
|
||||||
options?,
|
options?,
|
||||||
session?): Promise<Connection>
|
session?,
|
||||||
|
headerProvider?): Promise<Connection>
|
||||||
```
|
```
|
||||||
|
|
||||||
Connect to a LanceDB instance at the given URI.
|
Connect to a LanceDB instance at the given URI.
|
||||||
@@ -34,6 +35,8 @@ Accepted formats:
|
|||||||
|
|
||||||
* **session?**: [`Session`](../classes/Session.md)
|
* **session?**: [`Session`](../classes/Session.md)
|
||||||
|
|
||||||
|
* **headerProvider?**: [`HeaderProvider`](../classes/HeaderProvider.md) \| () => `Record`<`string`, `string`> \| () => `Promise`<`Record`<`string`, `string`>>
|
||||||
|
|
||||||
### Returns
|
### Returns
|
||||||
|
|
||||||
`Promise`<[`Connection`](../classes/Connection.md)>
|
`Promise`<[`Connection`](../classes/Connection.md)>
|
||||||
@@ -55,6 +58,18 @@ const conn = await connect(
|
|||||||
});
|
});
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Using with a header provider for per-request authentication:
|
||||||
|
```ts
|
||||||
|
const provider = new StaticHeaderProvider({
|
||||||
|
"X-API-Key": "my-key"
|
||||||
|
});
|
||||||
|
const conn = await connectWithHeaderProvider(
|
||||||
|
"db://host:port",
|
||||||
|
options,
|
||||||
|
provider
|
||||||
|
);
|
||||||
|
```
|
||||||
|
|
||||||
## connect(options)
|
## connect(options)
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ function makeArrowTable(
|
|||||||
metadata?): ArrowTable
|
metadata?): ArrowTable
|
||||||
```
|
```
|
||||||
|
|
||||||
An enhanced version of the makeTable function from Apache Arrow
|
An enhanced version of the apache-arrow makeTable function from Apache Arrow
|
||||||
that supports nested fields and embeddings columns.
|
that supports nested fields and embeddings columns.
|
||||||
|
|
||||||
(typically you do not need to call this function. It will be called automatically
|
(typically you do not need to call this function. It will be called automatically
|
||||||
|
|||||||
@@ -20,16 +20,20 @@
|
|||||||
- [BooleanQuery](classes/BooleanQuery.md)
|
- [BooleanQuery](classes/BooleanQuery.md)
|
||||||
- [BoostQuery](classes/BoostQuery.md)
|
- [BoostQuery](classes/BoostQuery.md)
|
||||||
- [Connection](classes/Connection.md)
|
- [Connection](classes/Connection.md)
|
||||||
|
- [HeaderProvider](classes/HeaderProvider.md)
|
||||||
- [Index](classes/Index.md)
|
- [Index](classes/Index.md)
|
||||||
- [MakeArrowTableOptions](classes/MakeArrowTableOptions.md)
|
- [MakeArrowTableOptions](classes/MakeArrowTableOptions.md)
|
||||||
- [MatchQuery](classes/MatchQuery.md)
|
- [MatchQuery](classes/MatchQuery.md)
|
||||||
- [MergeInsertBuilder](classes/MergeInsertBuilder.md)
|
- [MergeInsertBuilder](classes/MergeInsertBuilder.md)
|
||||||
- [MultiMatchQuery](classes/MultiMatchQuery.md)
|
- [MultiMatchQuery](classes/MultiMatchQuery.md)
|
||||||
|
- [NativeJsHeaderProvider](classes/NativeJsHeaderProvider.md)
|
||||||
|
- [OAuthHeaderProvider](classes/OAuthHeaderProvider.md)
|
||||||
- [PhraseQuery](classes/PhraseQuery.md)
|
- [PhraseQuery](classes/PhraseQuery.md)
|
||||||
- [Query](classes/Query.md)
|
- [Query](classes/Query.md)
|
||||||
- [QueryBase](classes/QueryBase.md)
|
- [QueryBase](classes/QueryBase.md)
|
||||||
- [RecordBatchIterator](classes/RecordBatchIterator.md)
|
- [RecordBatchIterator](classes/RecordBatchIterator.md)
|
||||||
- [Session](classes/Session.md)
|
- [Session](classes/Session.md)
|
||||||
|
- [StaticHeaderProvider](classes/StaticHeaderProvider.md)
|
||||||
- [Table](classes/Table.md)
|
- [Table](classes/Table.md)
|
||||||
- [TagContents](classes/TagContents.md)
|
- [TagContents](classes/TagContents.md)
|
||||||
- [Tags](classes/Tags.md)
|
- [Tags](classes/Tags.md)
|
||||||
@@ -74,6 +78,8 @@
|
|||||||
- [TableNamesOptions](interfaces/TableNamesOptions.md)
|
- [TableNamesOptions](interfaces/TableNamesOptions.md)
|
||||||
- [TableStatistics](interfaces/TableStatistics.md)
|
- [TableStatistics](interfaces/TableStatistics.md)
|
||||||
- [TimeoutConfig](interfaces/TimeoutConfig.md)
|
- [TimeoutConfig](interfaces/TimeoutConfig.md)
|
||||||
|
- [TlsConfig](interfaces/TlsConfig.md)
|
||||||
|
- [TokenResponse](interfaces/TokenResponse.md)
|
||||||
- [UpdateOptions](interfaces/UpdateOptions.md)
|
- [UpdateOptions](interfaces/UpdateOptions.md)
|
||||||
- [UpdateResult](interfaces/UpdateResult.md)
|
- [UpdateResult](interfaces/UpdateResult.md)
|
||||||
- [Version](interfaces/Version.md)
|
- [Version](interfaces/Version.md)
|
||||||
|
|||||||
@@ -16,6 +16,14 @@ optional extraHeaders: Record<string, string>;
|
|||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
|
### idDelimiter?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional idDelimiter: string;
|
||||||
|
```
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
### retryConfig?
|
### retryConfig?
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
@@ -32,6 +40,14 @@ optional timeoutConfig: TimeoutConfig;
|
|||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
|
### tlsConfig?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional tlsConfig: TlsConfig;
|
||||||
|
```
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
### userAgent?
|
### userAgent?
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
|
|||||||
@@ -26,6 +26,18 @@ will be used to determine the most useful kind of index to create.
|
|||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
|
### name?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional name: string;
|
||||||
|
```
|
||||||
|
|
||||||
|
Optional custom name for the index.
|
||||||
|
|
||||||
|
If not provided, a default name will be generated based on the column name.
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
### replace?
|
### replace?
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
@@ -42,8 +54,27 @@ The default is true
|
|||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
|
### train?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional train: boolean;
|
||||||
|
```
|
||||||
|
|
||||||
|
Whether to train the index with existing data.
|
||||||
|
|
||||||
|
If true (default), the index will be trained with existing data in the table.
|
||||||
|
If false, the index will be created empty and populated as new data is added.
|
||||||
|
|
||||||
|
Note: This option is only supported for scalar indices. Vector indices always train.
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
### waitTimeoutSeconds?
|
### waitTimeoutSeconds?
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
optional waitTimeoutSeconds: number;
|
optional waitTimeoutSeconds: number;
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Timeout in seconds to wait for index creation to complete.
|
||||||
|
|
||||||
|
If not specified, the method will return immediately after starting the index creation.
|
||||||
|
|||||||
49
docs/src/js/interfaces/TlsConfig.md
Normal file
49
docs/src/js/interfaces/TlsConfig.md
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
[@lancedb/lancedb](../globals.md) / TlsConfig
|
||||||
|
|
||||||
|
# Interface: TlsConfig
|
||||||
|
|
||||||
|
TLS/mTLS configuration for the remote HTTP client.
|
||||||
|
|
||||||
|
## Properties
|
||||||
|
|
||||||
|
### assertHostname?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional assertHostname: boolean;
|
||||||
|
```
|
||||||
|
|
||||||
|
Whether to verify the hostname in the server's certificate.
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### certFile?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional certFile: string;
|
||||||
|
```
|
||||||
|
|
||||||
|
Path to the client certificate file (PEM format) for mTLS authentication.
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### keyFile?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional keyFile: string;
|
||||||
|
```
|
||||||
|
|
||||||
|
Path to the client private key file (PEM format) for mTLS authentication.
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### sslCaCert?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional sslCaCert: string;
|
||||||
|
```
|
||||||
|
|
||||||
|
Path to the CA certificate file (PEM format) for server verification.
|
||||||
25
docs/src/js/interfaces/TokenResponse.md
Normal file
25
docs/src/js/interfaces/TokenResponse.md
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
[@lancedb/lancedb](../globals.md) / TokenResponse
|
||||||
|
|
||||||
|
# Interface: TokenResponse
|
||||||
|
|
||||||
|
Token response from OAuth provider.
|
||||||
|
|
||||||
|
## Properties
|
||||||
|
|
||||||
|
### accessToken
|
||||||
|
|
||||||
|
```ts
|
||||||
|
accessToken: string;
|
||||||
|
```
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### expiresIn?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional expiresIn: number;
|
||||||
|
```
|
||||||
@@ -15,7 +15,7 @@ publish = false
|
|||||||
crate-type = ["cdylib"]
|
crate-type = ["cdylib"]
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
lancedb = { path = "../../../rust/lancedb" }
|
lancedb = { path = "../../../rust/lancedb", default-features = false }
|
||||||
lance = { workspace = true }
|
lance = { workspace = true }
|
||||||
arrow = { workspace = true, features = ["ffi"] }
|
arrow = { workspace = true, features = ["ffi"] }
|
||||||
arrow-schema.workspace = true
|
arrow-schema.workspace = true
|
||||||
@@ -25,3 +25,6 @@ snafu.workspace = true
|
|||||||
lazy_static.workspace = true
|
lazy_static.workspace = true
|
||||||
serde = { version = "^1" }
|
serde = { version = "^1" }
|
||||||
serde_json = { version = "1" }
|
serde_json = { version = "1" }
|
||||||
|
|
||||||
|
[features]
|
||||||
|
default = ["lancedb/default"]
|
||||||
|
|||||||
@@ -16,6 +16,7 @@ pub trait JNIEnvExt {
|
|||||||
fn get_integers(&mut self, obj: &JObject) -> Result<Vec<i32>>;
|
fn get_integers(&mut self, obj: &JObject) -> Result<Vec<i32>>;
|
||||||
|
|
||||||
/// Get strings from Java List<String> object.
|
/// Get strings from Java List<String> object.
|
||||||
|
#[allow(dead_code)]
|
||||||
fn get_strings(&mut self, obj: &JObject) -> Result<Vec<String>>;
|
fn get_strings(&mut self, obj: &JObject) -> Result<Vec<String>>;
|
||||||
|
|
||||||
/// Get strings from Java String[] object.
|
/// Get strings from Java String[] object.
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ use jni::JNIEnv;
|
|||||||
|
|
||||||
use crate::Result;
|
use crate::Result;
|
||||||
|
|
||||||
|
#[allow(dead_code)]
|
||||||
pub trait FromJObject<T> {
|
pub trait FromJObject<T> {
|
||||||
fn extract(&self) -> Result<T>;
|
fn extract(&self) -> Result<T>;
|
||||||
}
|
}
|
||||||
@@ -39,6 +40,7 @@ impl FromJObject<f64> for JObject<'_> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(dead_code)]
|
||||||
pub trait FromJString {
|
pub trait FromJString {
|
||||||
fn extract(&self, env: &mut JNIEnv) -> Result<String>;
|
fn extract(&self, env: &mut JNIEnv) -> Result<String>;
|
||||||
}
|
}
|
||||||
@@ -66,6 +68,7 @@ pub trait JMapExt {
|
|||||||
fn get_f64(&self, env: &mut JNIEnv, key: &str) -> Result<Option<f64>>;
|
fn get_f64(&self, env: &mut JNIEnv, key: &str) -> Result<Option<f64>>;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(dead_code)]
|
||||||
fn get_map_value<T>(env: &mut JNIEnv, map: &JMap, key: &str) -> Result<Option<T>>
|
fn get_map_value<T>(env: &mut JNIEnv, map: &JMap, key: &str) -> Result<Option<T>>
|
||||||
where
|
where
|
||||||
for<'a> JObject<'a>: FromJObject<T>,
|
for<'a> JObject<'a>: FromJObject<T>,
|
||||||
|
|||||||
@@ -8,7 +8,7 @@
|
|||||||
<parent>
|
<parent>
|
||||||
<groupId>com.lancedb</groupId>
|
<groupId>com.lancedb</groupId>
|
||||||
<artifactId>lancedb-parent</artifactId>
|
<artifactId>lancedb-parent</artifactId>
|
||||||
<version>0.21.2-final.0</version>
|
<version>0.22.2-final.0</version>
|
||||||
<relativePath>../pom.xml</relativePath>
|
<relativePath>../pom.xml</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
|
|
||||||
|
|||||||
@@ -8,7 +8,7 @@
|
|||||||
<parent>
|
<parent>
|
||||||
<groupId>com.lancedb</groupId>
|
<groupId>com.lancedb</groupId>
|
||||||
<artifactId>lancedb-parent</artifactId>
|
<artifactId>lancedb-parent</artifactId>
|
||||||
<version>0.21.2-final.0</version>
|
<version>0.22.2-final.0</version>
|
||||||
<relativePath>../pom.xml</relativePath>
|
<relativePath>../pom.xml</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
|
|
||||||
|
|||||||
@@ -6,7 +6,7 @@
|
|||||||
|
|
||||||
<groupId>com.lancedb</groupId>
|
<groupId>com.lancedb</groupId>
|
||||||
<artifactId>lancedb-parent</artifactId>
|
<artifactId>lancedb-parent</artifactId>
|
||||||
<version>0.21.2-final.0</version>
|
<version>0.22.2-final.0</version>
|
||||||
<packaging>pom</packaging>
|
<packaging>pom</packaging>
|
||||||
<name>${project.artifactId}</name>
|
<name>${project.artifactId}</name>
|
||||||
<description>LanceDB Java SDK Parent POM</description>
|
<description>LanceDB Java SDK Parent POM</description>
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "lancedb-nodejs"
|
name = "lancedb-nodejs"
|
||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
version = "0.21.2"
|
version = "0.22.2"
|
||||||
license.workspace = true
|
license.workspace = true
|
||||||
description.workspace = true
|
description.workspace = true
|
||||||
repository.workspace = true
|
repository.workspace = true
|
||||||
@@ -18,7 +18,7 @@ arrow-array.workspace = true
|
|||||||
arrow-schema.workspace = true
|
arrow-schema.workspace = true
|
||||||
env_logger.workspace = true
|
env_logger.workspace = true
|
||||||
futures.workspace = true
|
futures.workspace = true
|
||||||
lancedb = { path = "../rust/lancedb" }
|
lancedb = { path = "../rust/lancedb", default-features = false }
|
||||||
napi = { version = "2.16.8", default-features = false, features = [
|
napi = { version = "2.16.8", default-features = false, features = [
|
||||||
"napi9",
|
"napi9",
|
||||||
"async"
|
"async"
|
||||||
@@ -36,6 +36,6 @@ aws-lc-rs = "=1.13.0"
|
|||||||
napi-build = "2.1"
|
napi-build = "2.1"
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = ["remote"]
|
default = ["remote", "lancedb/default"]
|
||||||
fp16kernels = ["lancedb/fp16kernels"]
|
fp16kernels = ["lancedb/fp16kernels"]
|
||||||
remote = ["lancedb/remote"]
|
remote = ["lancedb/remote"]
|
||||||
|
|||||||
@@ -1,17 +1,5 @@
|
|||||||
// SPDX-License-Identifier: Apache-2.0
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||||
|
|
||||||
import {
|
|
||||||
Bool,
|
|
||||||
Field,
|
|
||||||
Int32,
|
|
||||||
List,
|
|
||||||
Schema,
|
|
||||||
Struct,
|
|
||||||
Uint8,
|
|
||||||
Utf8,
|
|
||||||
} from "apache-arrow";
|
|
||||||
|
|
||||||
import * as arrow15 from "apache-arrow-15";
|
import * as arrow15 from "apache-arrow-15";
|
||||||
import * as arrow16 from "apache-arrow-16";
|
import * as arrow16 from "apache-arrow-16";
|
||||||
import * as arrow17 from "apache-arrow-17";
|
import * as arrow17 from "apache-arrow-17";
|
||||||
@@ -25,11 +13,9 @@ import {
|
|||||||
fromTableToBuffer,
|
fromTableToBuffer,
|
||||||
makeArrowTable,
|
makeArrowTable,
|
||||||
makeEmptyTable,
|
makeEmptyTable,
|
||||||
tableFromIPC,
|
|
||||||
} from "../lancedb/arrow";
|
} from "../lancedb/arrow";
|
||||||
import {
|
import {
|
||||||
EmbeddingFunction,
|
EmbeddingFunction,
|
||||||
FieldOptions,
|
|
||||||
FunctionOptions,
|
FunctionOptions,
|
||||||
} from "../lancedb/embedding/embedding_function";
|
} from "../lancedb/embedding/embedding_function";
|
||||||
import { EmbeddingFunctionConfig } from "../lancedb/embedding/registry";
|
import { EmbeddingFunctionConfig } from "../lancedb/embedding/registry";
|
||||||
@@ -1008,5 +994,64 @@ describe.each([arrow15, arrow16, arrow17, arrow18])(
|
|||||||
expect(result).toEqual(null);
|
expect(result).toEqual(null);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
|
describe("boolean null handling", function () {
|
||||||
|
it("should handle null values in nullable boolean fields", () => {
|
||||||
|
const { makeArrowTable } = require("../lancedb/arrow");
|
||||||
|
const schema = new Schema([new Field("test", new arrow.Bool(), true)]);
|
||||||
|
|
||||||
|
// Test with all null values
|
||||||
|
const data = [{ test: null }];
|
||||||
|
const table = makeArrowTable(data, { schema });
|
||||||
|
|
||||||
|
expect(table.numRows).toBe(1);
|
||||||
|
expect(table.schema.names).toEqual(["test"]);
|
||||||
|
expect(table.getChild("test")!.get(0)).toBeNull();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("should handle mixed null and non-null boolean values", () => {
|
||||||
|
const { makeArrowTable } = require("../lancedb/arrow");
|
||||||
|
const schema = new Schema([new Field("test", new Bool(), true)]);
|
||||||
|
|
||||||
|
// Test with mixed values
|
||||||
|
const data = [{ test: true }, { test: null }, { test: false }];
|
||||||
|
const table = makeArrowTable(data, { schema });
|
||||||
|
|
||||||
|
expect(table.numRows).toBe(3);
|
||||||
|
expect(table.getChild("test")!.get(0)).toBe(true);
|
||||||
|
expect(table.getChild("test")!.get(1)).toBeNull();
|
||||||
|
expect(table.getChild("test")!.get(2)).toBe(false);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
// Test for the undefined values bug fix
|
||||||
|
describe("undefined values handling", () => {
|
||||||
|
it("should handle mixed undefined and actual values", () => {
|
||||||
|
const schema = new Schema([
|
||||||
|
new Field("text", new Utf8(), true), // nullable
|
||||||
|
new Field("number", new Int32(), true), // nullable
|
||||||
|
new Field("bool", new Bool(), true), // nullable
|
||||||
|
]);
|
||||||
|
|
||||||
|
const data = [
|
||||||
|
{ text: undefined, number: 42, bool: true },
|
||||||
|
{ text: "hello", number: undefined, bool: false },
|
||||||
|
{ text: "world", number: 123, bool: undefined },
|
||||||
|
];
|
||||||
|
const table = makeArrowTable(data, { schema });
|
||||||
|
|
||||||
|
const result = table.toArray();
|
||||||
|
expect(result).toHaveLength(3);
|
||||||
|
expect(result[0].text).toBe(null);
|
||||||
|
expect(result[0].number).toBe(42);
|
||||||
|
expect(result[0].bool).toBe(true);
|
||||||
|
expect(result[1].text).toBe("hello");
|
||||||
|
expect(result[1].number).toBe(null);
|
||||||
|
expect(result[1].bool).toBe(false);
|
||||||
|
expect(result[2].text).toBe("world");
|
||||||
|
expect(result[2].number).toBe(123);
|
||||||
|
expect(result[2].bool).toBe(null);
|
||||||
|
});
|
||||||
|
});
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
|
|||||||
@@ -203,3 +203,106 @@ describe("given a connection", () => {
|
|||||||
});
|
});
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
|
describe("clone table functionality", () => {
|
||||||
|
let tmpDir: tmp.DirResult;
|
||||||
|
let db: Connection;
|
||||||
|
beforeEach(async () => {
|
||||||
|
tmpDir = tmp.dirSync({ unsafeCleanup: true });
|
||||||
|
db = await connect(tmpDir.name);
|
||||||
|
});
|
||||||
|
afterEach(() => tmpDir.removeCallback());
|
||||||
|
|
||||||
|
it("should clone a table with latest version (default behavior)", async () => {
|
||||||
|
// Create source table with some data
|
||||||
|
const data = [
|
||||||
|
{ id: 1, text: "hello", vector: [1.0, 2.0] },
|
||||||
|
{ id: 2, text: "world", vector: [3.0, 4.0] },
|
||||||
|
];
|
||||||
|
const sourceTable = await db.createTable("source", data);
|
||||||
|
|
||||||
|
// Add more data to create a new version
|
||||||
|
const moreData = [{ id: 3, text: "test", vector: [5.0, 6.0] }];
|
||||||
|
await sourceTable.add(moreData);
|
||||||
|
|
||||||
|
// Clone the table (should get latest version with 3 rows)
|
||||||
|
const sourceUri = `${tmpDir.name}/source.lance`;
|
||||||
|
const clonedTable = await db.cloneTable("cloned", sourceUri);
|
||||||
|
|
||||||
|
// Verify cloned table has all 3 rows
|
||||||
|
expect(await clonedTable.countRows()).toBe(3);
|
||||||
|
expect((await db.tableNames()).includes("cloned")).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("should clone a table from a specific version", async () => {
|
||||||
|
// Create source table with initial data
|
||||||
|
const data = [
|
||||||
|
{ id: 1, text: "hello", vector: [1.0, 2.0] },
|
||||||
|
{ id: 2, text: "world", vector: [3.0, 4.0] },
|
||||||
|
];
|
||||||
|
const sourceTable = await db.createTable("source", data);
|
||||||
|
|
||||||
|
// Get the initial version
|
||||||
|
const initialVersion = await sourceTable.version();
|
||||||
|
|
||||||
|
// Add more data to create a new version
|
||||||
|
const moreData = [{ id: 3, text: "test", vector: [5.0, 6.0] }];
|
||||||
|
await sourceTable.add(moreData);
|
||||||
|
|
||||||
|
// Verify source now has 3 rows
|
||||||
|
expect(await sourceTable.countRows()).toBe(3);
|
||||||
|
|
||||||
|
// Clone from the initial version (should have only 2 rows)
|
||||||
|
const sourceUri = `${tmpDir.name}/source.lance`;
|
||||||
|
const clonedTable = await db.cloneTable("cloned", sourceUri, {
|
||||||
|
sourceVersion: initialVersion,
|
||||||
|
});
|
||||||
|
|
||||||
|
// Verify cloned table has only the initial 2 rows
|
||||||
|
expect(await clonedTable.countRows()).toBe(2);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("should clone a table from a tagged version", async () => {
|
||||||
|
// Create source table with initial data
|
||||||
|
const data = [
|
||||||
|
{ id: 1, text: "hello", vector: [1.0, 2.0] },
|
||||||
|
{ id: 2, text: "world", vector: [3.0, 4.0] },
|
||||||
|
];
|
||||||
|
const sourceTable = await db.createTable("source", data);
|
||||||
|
|
||||||
|
// Create a tag for the current version
|
||||||
|
const tags = await sourceTable.tags();
|
||||||
|
await tags.create("v1.0", await sourceTable.version());
|
||||||
|
|
||||||
|
// Add more data after the tag
|
||||||
|
const moreData = [{ id: 3, text: "test", vector: [5.0, 6.0] }];
|
||||||
|
await sourceTable.add(moreData);
|
||||||
|
|
||||||
|
// Verify source now has 3 rows
|
||||||
|
expect(await sourceTable.countRows()).toBe(3);
|
||||||
|
|
||||||
|
// Clone from the tagged version (should have only 2 rows)
|
||||||
|
const sourceUri = `${tmpDir.name}/source.lance`;
|
||||||
|
const clonedTable = await db.cloneTable("cloned", sourceUri, {
|
||||||
|
sourceTag: "v1.0",
|
||||||
|
});
|
||||||
|
|
||||||
|
// Verify cloned table has only the tagged version's 2 rows
|
||||||
|
expect(await clonedTable.countRows()).toBe(2);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("should fail when attempting deep clone", async () => {
|
||||||
|
// Create source table with some data
|
||||||
|
const data = [
|
||||||
|
{ id: 1, text: "hello", vector: [1.0, 2.0] },
|
||||||
|
{ id: 2, text: "world", vector: [3.0, 4.0] },
|
||||||
|
];
|
||||||
|
await db.createTable("source", data);
|
||||||
|
|
||||||
|
// Try to create a deep clone (should fail)
|
||||||
|
const sourceUri = `${tmpDir.name}/source.lance`;
|
||||||
|
await expect(
|
||||||
|
db.cloneTable("cloned", sourceUri, { isShallow: false }),
|
||||||
|
).rejects.toThrow("Deep clone is not yet implemented");
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|||||||
@@ -256,6 +256,60 @@ describe("embedding functions", () => {
|
|||||||
expect(actual).toHaveProperty("text");
|
expect(actual).toHaveProperty("text");
|
||||||
});
|
});
|
||||||
|
|
||||||
|
it("should handle undefined vector field with embedding function correctly", async () => {
|
||||||
|
@register("undefined_test")
|
||||||
|
class MockEmbeddingFunction extends EmbeddingFunction<string> {
|
||||||
|
ndims() {
|
||||||
|
return 3;
|
||||||
|
}
|
||||||
|
embeddingDataType(): Float {
|
||||||
|
return new Float32();
|
||||||
|
}
|
||||||
|
async computeQueryEmbeddings(_data: string) {
|
||||||
|
return [1, 2, 3];
|
||||||
|
}
|
||||||
|
async computeSourceEmbeddings(data: string[]) {
|
||||||
|
return Array.from({ length: data.length }).fill([
|
||||||
|
1, 2, 3,
|
||||||
|
]) as number[][];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
const func = getRegistry()
|
||||||
|
.get<MockEmbeddingFunction>("undefined_test")!
|
||||||
|
.create();
|
||||||
|
const schema = new Schema([
|
||||||
|
new Field("text", new Utf8(), true),
|
||||||
|
new Field(
|
||||||
|
"vector",
|
||||||
|
new FixedSizeList(3, new Field("item", new Float32(), true)),
|
||||||
|
true,
|
||||||
|
),
|
||||||
|
]);
|
||||||
|
|
||||||
|
const db = await connect(tmpDir.name);
|
||||||
|
const table = await db.createEmptyTable("test_undefined", schema, {
|
||||||
|
embeddingFunction: {
|
||||||
|
function: func,
|
||||||
|
sourceColumn: "text",
|
||||||
|
vectorColumn: "vector",
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
// Test that undefined, null, and omitted vector fields all work
|
||||||
|
await table.add([{ text: "test1", vector: undefined }]);
|
||||||
|
await table.add([{ text: "test2", vector: null }]);
|
||||||
|
await table.add([{ text: "test3" }]);
|
||||||
|
|
||||||
|
const rows = await table.query().toArray();
|
||||||
|
expect(rows.length).toBe(3);
|
||||||
|
|
||||||
|
// All rows should have vectors computed by the embedding function
|
||||||
|
for (const row of rows) {
|
||||||
|
expect(row.vector).toBeDefined();
|
||||||
|
expect(JSON.parse(JSON.stringify(row.vector))).toEqual([1, 2, 3]);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
test.each([new Float16(), new Float32(), new Float64()])(
|
test.each([new Float16(), new Float32(), new Float64()])(
|
||||||
"should be able to provide manual embeddings with multiple float datatype",
|
"should be able to provide manual embeddings with multiple float datatype",
|
||||||
async (floatType) => {
|
async (floatType) => {
|
||||||
|
|||||||
@@ -3,7 +3,49 @@
|
|||||||
|
|
||||||
import * as http from "http";
|
import * as http from "http";
|
||||||
import { RequestListener } from "http";
|
import { RequestListener } from "http";
|
||||||
import { Connection, ConnectionOptions, connect } from "../lancedb";
|
import {
|
||||||
|
ClientConfig,
|
||||||
|
Connection,
|
||||||
|
ConnectionOptions,
|
||||||
|
TlsConfig,
|
||||||
|
connect,
|
||||||
|
} from "../lancedb";
|
||||||
|
import {
|
||||||
|
HeaderProvider,
|
||||||
|
OAuthHeaderProvider,
|
||||||
|
StaticHeaderProvider,
|
||||||
|
} from "../lancedb/header";
|
||||||
|
|
||||||
|
// Test-only header providers
|
||||||
|
class CustomProvider extends HeaderProvider {
|
||||||
|
getHeaders(): Record<string, string> {
|
||||||
|
return { "X-Custom": "custom-value" };
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
class ErrorProvider extends HeaderProvider {
|
||||||
|
private errorMessage: string;
|
||||||
|
public callCount: number = 0;
|
||||||
|
|
||||||
|
constructor(errorMessage: string = "Test error") {
|
||||||
|
super();
|
||||||
|
this.errorMessage = errorMessage;
|
||||||
|
}
|
||||||
|
|
||||||
|
getHeaders(): Record<string, string> {
|
||||||
|
this.callCount++;
|
||||||
|
throw new Error(this.errorMessage);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
class ConcurrentProvider extends HeaderProvider {
|
||||||
|
private counter: number = 0;
|
||||||
|
|
||||||
|
getHeaders(): Record<string, string> {
|
||||||
|
this.counter++;
|
||||||
|
return { "X-Request-Id": String(this.counter) };
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
async function withMockDatabase(
|
async function withMockDatabase(
|
||||||
listener: RequestListener,
|
listener: RequestListener,
|
||||||
@@ -148,4 +190,431 @@ describe("remote connection", () => {
|
|||||||
},
|
},
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
describe("TlsConfig", () => {
|
||||||
|
it("should create TlsConfig with all fields", () => {
|
||||||
|
const tlsConfig: TlsConfig = {
|
||||||
|
certFile: "/path/to/cert.pem",
|
||||||
|
keyFile: "/path/to/key.pem",
|
||||||
|
sslCaCert: "/path/to/ca.pem",
|
||||||
|
assertHostname: false,
|
||||||
|
};
|
||||||
|
|
||||||
|
expect(tlsConfig.certFile).toBe("/path/to/cert.pem");
|
||||||
|
expect(tlsConfig.keyFile).toBe("/path/to/key.pem");
|
||||||
|
expect(tlsConfig.sslCaCert).toBe("/path/to/ca.pem");
|
||||||
|
expect(tlsConfig.assertHostname).toBe(false);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("should create TlsConfig with partial fields", () => {
|
||||||
|
const tlsConfig: TlsConfig = {
|
||||||
|
certFile: "/path/to/cert.pem",
|
||||||
|
keyFile: "/path/to/key.pem",
|
||||||
|
};
|
||||||
|
|
||||||
|
expect(tlsConfig.certFile).toBe("/path/to/cert.pem");
|
||||||
|
expect(tlsConfig.keyFile).toBe("/path/to/key.pem");
|
||||||
|
expect(tlsConfig.sslCaCert).toBeUndefined();
|
||||||
|
expect(tlsConfig.assertHostname).toBeUndefined();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("should create ClientConfig with TlsConfig", () => {
|
||||||
|
const tlsConfig: TlsConfig = {
|
||||||
|
certFile: "/path/to/cert.pem",
|
||||||
|
keyFile: "/path/to/key.pem",
|
||||||
|
sslCaCert: "/path/to/ca.pem",
|
||||||
|
assertHostname: true,
|
||||||
|
};
|
||||||
|
|
||||||
|
const clientConfig: ClientConfig = {
|
||||||
|
userAgent: "test-agent",
|
||||||
|
tlsConfig: tlsConfig,
|
||||||
|
};
|
||||||
|
|
||||||
|
expect(clientConfig.userAgent).toBe("test-agent");
|
||||||
|
expect(clientConfig.tlsConfig).toBeDefined();
|
||||||
|
expect(clientConfig.tlsConfig?.certFile).toBe("/path/to/cert.pem");
|
||||||
|
expect(clientConfig.tlsConfig?.keyFile).toBe("/path/to/key.pem");
|
||||||
|
expect(clientConfig.tlsConfig?.sslCaCert).toBe("/path/to/ca.pem");
|
||||||
|
expect(clientConfig.tlsConfig?.assertHostname).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("should handle empty TlsConfig", () => {
|
||||||
|
const tlsConfig: TlsConfig = {};
|
||||||
|
|
||||||
|
expect(tlsConfig.certFile).toBeUndefined();
|
||||||
|
expect(tlsConfig.keyFile).toBeUndefined();
|
||||||
|
expect(tlsConfig.sslCaCert).toBeUndefined();
|
||||||
|
expect(tlsConfig.assertHostname).toBeUndefined();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("should accept TlsConfig in connection options", () => {
|
||||||
|
const tlsConfig: TlsConfig = {
|
||||||
|
certFile: "/path/to/cert.pem",
|
||||||
|
keyFile: "/path/to/key.pem",
|
||||||
|
sslCaCert: "/path/to/ca.pem",
|
||||||
|
assertHostname: false,
|
||||||
|
};
|
||||||
|
|
||||||
|
// Just verify that the ClientConfig accepts the TlsConfig
|
||||||
|
const clientConfig: ClientConfig = {
|
||||||
|
tlsConfig: tlsConfig,
|
||||||
|
};
|
||||||
|
|
||||||
|
const connectionOptions: ConnectionOptions = {
|
||||||
|
apiKey: "fake",
|
||||||
|
clientConfig: clientConfig,
|
||||||
|
};
|
||||||
|
|
||||||
|
// Verify the configuration structure is correct
|
||||||
|
expect(connectionOptions.clientConfig).toBeDefined();
|
||||||
|
expect(connectionOptions.clientConfig?.tlsConfig).toBeDefined();
|
||||||
|
expect(connectionOptions.clientConfig?.tlsConfig?.certFile).toBe(
|
||||||
|
"/path/to/cert.pem",
|
||||||
|
);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe("header providers", () => {
|
||||||
|
it("should work with StaticHeaderProvider", async () => {
|
||||||
|
const provider = new StaticHeaderProvider({
|
||||||
|
authorization: "Bearer test-token",
|
||||||
|
"X-Custom": "value",
|
||||||
|
});
|
||||||
|
|
||||||
|
const headers = provider.getHeaders();
|
||||||
|
expect(headers).toEqual({
|
||||||
|
authorization: "Bearer test-token",
|
||||||
|
"X-Custom": "value",
|
||||||
|
});
|
||||||
|
|
||||||
|
// Test that it returns a copy
|
||||||
|
headers["X-Modified"] = "modified";
|
||||||
|
const headers2 = provider.getHeaders();
|
||||||
|
expect(headers2).not.toHaveProperty("X-Modified");
|
||||||
|
});
|
||||||
|
|
||||||
|
it("should pass headers from StaticHeaderProvider to requests", async () => {
|
||||||
|
const provider = new StaticHeaderProvider({
|
||||||
|
"X-Custom-Auth": "secret-token",
|
||||||
|
"X-Request-Source": "test-suite",
|
||||||
|
});
|
||||||
|
|
||||||
|
await withMockDatabase(
|
||||||
|
(req, res) => {
|
||||||
|
expect(req.headers["x-custom-auth"]).toEqual("secret-token");
|
||||||
|
expect(req.headers["x-request-source"]).toEqual("test-suite");
|
||||||
|
|
||||||
|
const body = JSON.stringify({ tables: [] });
|
||||||
|
res.writeHead(200, { "Content-Type": "application/json" }).end(body);
|
||||||
|
},
|
||||||
|
async () => {
|
||||||
|
// Use actual header provider mechanism instead of extraHeaders
|
||||||
|
const conn = await connect(
|
||||||
|
"db://dev",
|
||||||
|
{
|
||||||
|
apiKey: "fake",
|
||||||
|
hostOverride: "http://localhost:8000",
|
||||||
|
},
|
||||||
|
undefined, // session
|
||||||
|
provider, // headerProvider
|
||||||
|
);
|
||||||
|
|
||||||
|
const tableNames = await conn.tableNames();
|
||||||
|
expect(tableNames).toEqual([]);
|
||||||
|
},
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("should work with CustomProvider", () => {
|
||||||
|
const provider = new CustomProvider();
|
||||||
|
const headers = provider.getHeaders();
|
||||||
|
expect(headers).toEqual({ "X-Custom": "custom-value" });
|
||||||
|
});
|
||||||
|
|
||||||
|
it("should handle ErrorProvider errors", () => {
|
||||||
|
const provider = new ErrorProvider("Authentication failed");
|
||||||
|
|
||||||
|
expect(() => provider.getHeaders()).toThrow("Authentication failed");
|
||||||
|
expect(provider.callCount).toBe(1);
|
||||||
|
|
||||||
|
// Test that error is thrown each time
|
||||||
|
expect(() => provider.getHeaders()).toThrow("Authentication failed");
|
||||||
|
expect(provider.callCount).toBe(2);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("should work with ConcurrentProvider", () => {
|
||||||
|
const provider = new ConcurrentProvider();
|
||||||
|
|
||||||
|
const headers1 = provider.getHeaders();
|
||||||
|
const headers2 = provider.getHeaders();
|
||||||
|
const headers3 = provider.getHeaders();
|
||||||
|
|
||||||
|
expect(headers1).toEqual({ "X-Request-Id": "1" });
|
||||||
|
expect(headers2).toEqual({ "X-Request-Id": "2" });
|
||||||
|
expect(headers3).toEqual({ "X-Request-Id": "3" });
|
||||||
|
});
|
||||||
|
|
||||||
|
describe("OAuthHeaderProvider", () => {
|
||||||
|
it("should initialize correctly", () => {
|
||||||
|
const fetcher = () => ({
|
||||||
|
accessToken: "token123",
|
||||||
|
expiresIn: 3600,
|
||||||
|
});
|
||||||
|
|
||||||
|
const provider = new OAuthHeaderProvider(fetcher);
|
||||||
|
expect(provider).toBeInstanceOf(HeaderProvider);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("should fetch token on first use", async () => {
|
||||||
|
let callCount = 0;
|
||||||
|
const fetcher = () => {
|
||||||
|
callCount++;
|
||||||
|
return {
|
||||||
|
accessToken: "token123",
|
||||||
|
expiresIn: 3600,
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
const provider = new OAuthHeaderProvider(fetcher);
|
||||||
|
|
||||||
|
// Need to manually refresh first due to sync limitation
|
||||||
|
await provider.refreshToken();
|
||||||
|
|
||||||
|
const headers = provider.getHeaders();
|
||||||
|
expect(headers).toEqual({ authorization: "Bearer token123" });
|
||||||
|
expect(callCount).toBe(1);
|
||||||
|
|
||||||
|
// Second call should not fetch again
|
||||||
|
const headers2 = provider.getHeaders();
|
||||||
|
expect(headers2).toEqual({ authorization: "Bearer token123" });
|
||||||
|
expect(callCount).toBe(1);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("should handle tokens without expiry", async () => {
|
||||||
|
const fetcher = () => ({
|
||||||
|
accessToken: "permanent_token",
|
||||||
|
});
|
||||||
|
|
||||||
|
const provider = new OAuthHeaderProvider(fetcher);
|
||||||
|
await provider.refreshToken();
|
||||||
|
|
||||||
|
const headers = provider.getHeaders();
|
||||||
|
expect(headers).toEqual({ authorization: "Bearer permanent_token" });
|
||||||
|
});
|
||||||
|
|
||||||
|
it("should throw error when access_token is missing", async () => {
|
||||||
|
const fetcher = () =>
|
||||||
|
({
|
||||||
|
expiresIn: 3600,
|
||||||
|
}) as { accessToken?: string; expiresIn?: number };
|
||||||
|
|
||||||
|
const provider = new OAuthHeaderProvider(
|
||||||
|
fetcher as () => {
|
||||||
|
accessToken: string;
|
||||||
|
expiresIn?: number;
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
await expect(provider.refreshToken()).rejects.toThrow(
|
||||||
|
"Token fetcher did not return 'accessToken'",
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("should handle async token fetchers", async () => {
|
||||||
|
const fetcher = async () => {
|
||||||
|
// Simulate async operation
|
||||||
|
await new Promise((resolve) => setTimeout(resolve, 10));
|
||||||
|
return {
|
||||||
|
accessToken: "async_token",
|
||||||
|
expiresIn: 3600,
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
const provider = new OAuthHeaderProvider(fetcher);
|
||||||
|
await provider.refreshToken();
|
||||||
|
|
||||||
|
const headers = provider.getHeaders();
|
||||||
|
expect(headers).toEqual({ authorization: "Bearer async_token" });
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it("should merge header provider headers with extra headers", async () => {
|
||||||
|
const provider = new StaticHeaderProvider({
|
||||||
|
"X-From-Provider": "provider-value",
|
||||||
|
});
|
||||||
|
|
||||||
|
await withMockDatabase(
|
||||||
|
(req, res) => {
|
||||||
|
expect(req.headers["x-from-provider"]).toEqual("provider-value");
|
||||||
|
expect(req.headers["x-extra-header"]).toEqual("extra-value");
|
||||||
|
|
||||||
|
const body = JSON.stringify({ tables: [] });
|
||||||
|
res.writeHead(200, { "Content-Type": "application/json" }).end(body);
|
||||||
|
},
|
||||||
|
async () => {
|
||||||
|
// Use header provider with additional extraHeaders
|
||||||
|
const conn = await connect(
|
||||||
|
"db://dev",
|
||||||
|
{
|
||||||
|
apiKey: "fake",
|
||||||
|
hostOverride: "http://localhost:8000",
|
||||||
|
clientConfig: {
|
||||||
|
extraHeaders: {
|
||||||
|
"X-Extra-Header": "extra-value",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
undefined, // session
|
||||||
|
provider, // headerProvider
|
||||||
|
);
|
||||||
|
|
||||||
|
const tableNames = await conn.tableNames();
|
||||||
|
expect(tableNames).toEqual([]);
|
||||||
|
},
|
||||||
|
);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe("header provider integration", () => {
|
||||||
|
it("should work with TypeScript StaticHeaderProvider", async () => {
|
||||||
|
let requestCount = 0;
|
||||||
|
|
||||||
|
await withMockDatabase(
|
||||||
|
(req, res) => {
|
||||||
|
requestCount++;
|
||||||
|
|
||||||
|
// Check headers are present on each request
|
||||||
|
expect(req.headers["authorization"]).toEqual("Bearer test-token-123");
|
||||||
|
expect(req.headers["x-custom"]).toEqual("custom-value");
|
||||||
|
|
||||||
|
// Return different responses based on the endpoint
|
||||||
|
if (req.url === "/v1/table/test_table/describe/") {
|
||||||
|
const body = JSON.stringify({
|
||||||
|
name: "test_table",
|
||||||
|
schema: { fields: [] },
|
||||||
|
});
|
||||||
|
res
|
||||||
|
.writeHead(200, { "Content-Type": "application/json" })
|
||||||
|
.end(body);
|
||||||
|
} else {
|
||||||
|
const body = JSON.stringify({ tables: ["test_table"] });
|
||||||
|
res
|
||||||
|
.writeHead(200, { "Content-Type": "application/json" })
|
||||||
|
.end(body);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
async () => {
|
||||||
|
// Create provider with static headers
|
||||||
|
const provider = new StaticHeaderProvider({
|
||||||
|
authorization: "Bearer test-token-123",
|
||||||
|
"X-Custom": "custom-value",
|
||||||
|
});
|
||||||
|
|
||||||
|
// Connect with the provider
|
||||||
|
const conn = await connect(
|
||||||
|
"db://dev",
|
||||||
|
{
|
||||||
|
apiKey: "fake",
|
||||||
|
hostOverride: "http://localhost:8000",
|
||||||
|
},
|
||||||
|
undefined, // session
|
||||||
|
provider, // headerProvider
|
||||||
|
);
|
||||||
|
|
||||||
|
// Make multiple requests to verify headers are sent each time
|
||||||
|
const tables1 = await conn.tableNames();
|
||||||
|
expect(tables1).toEqual(["test_table"]);
|
||||||
|
|
||||||
|
const tables2 = await conn.tableNames();
|
||||||
|
expect(tables2).toEqual(["test_table"]);
|
||||||
|
|
||||||
|
// Verify headers were sent with each request
|
||||||
|
expect(requestCount).toBeGreaterThanOrEqual(2);
|
||||||
|
},
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("should work with JavaScript function provider", async () => {
|
||||||
|
let requestId = 0;
|
||||||
|
|
||||||
|
await withMockDatabase(
|
||||||
|
(req, res) => {
|
||||||
|
// Check dynamic header is present
|
||||||
|
expect(req.headers["x-request-id"]).toBeDefined();
|
||||||
|
expect(req.headers["x-request-id"]).toMatch(/^req-\d+$/);
|
||||||
|
|
||||||
|
const body = JSON.stringify({ tables: [] });
|
||||||
|
res.writeHead(200, { "Content-Type": "application/json" }).end(body);
|
||||||
|
},
|
||||||
|
async () => {
|
||||||
|
// Create a JavaScript function that returns dynamic headers
|
||||||
|
const getHeaders = async () => {
|
||||||
|
requestId++;
|
||||||
|
return {
|
||||||
|
"X-Request-Id": `req-${requestId}`,
|
||||||
|
"X-Timestamp": new Date().toISOString(),
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
// Connect with the function directly
|
||||||
|
const conn = await connect(
|
||||||
|
"db://dev",
|
||||||
|
{
|
||||||
|
apiKey: "fake",
|
||||||
|
hostOverride: "http://localhost:8000",
|
||||||
|
},
|
||||||
|
undefined, // session
|
||||||
|
getHeaders, // headerProvider
|
||||||
|
);
|
||||||
|
|
||||||
|
// Make requests - each should have different headers
|
||||||
|
const tables = await conn.tableNames();
|
||||||
|
expect(tables).toEqual([]);
|
||||||
|
},
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("should support OAuth-like token refresh pattern", async () => {
|
||||||
|
let tokenVersion = 0;
|
||||||
|
|
||||||
|
await withMockDatabase(
|
||||||
|
(req, res) => {
|
||||||
|
// Verify authorization header
|
||||||
|
const authHeader = req.headers["authorization"];
|
||||||
|
expect(authHeader).toBeDefined();
|
||||||
|
expect(authHeader).toMatch(/^Bearer token-v\d+$/);
|
||||||
|
|
||||||
|
const body = JSON.stringify({ tables: [] });
|
||||||
|
res.writeHead(200, { "Content-Type": "application/json" }).end(body);
|
||||||
|
},
|
||||||
|
async () => {
|
||||||
|
// Simulate OAuth token fetcher
|
||||||
|
const fetchToken = async () => {
|
||||||
|
tokenVersion++;
|
||||||
|
return {
|
||||||
|
authorization: `Bearer token-v${tokenVersion}`,
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
// Connect with the function directly
|
||||||
|
const conn = await connect(
|
||||||
|
"db://dev",
|
||||||
|
{
|
||||||
|
apiKey: "fake",
|
||||||
|
hostOverride: "http://localhost:8000",
|
||||||
|
},
|
||||||
|
undefined, // session
|
||||||
|
fetchToken, // headerProvider
|
||||||
|
);
|
||||||
|
|
||||||
|
// Each request will fetch a new token
|
||||||
|
await conn.tableNames();
|
||||||
|
|
||||||
|
// Token should be different on next request
|
||||||
|
await conn.tableNames();
|
||||||
|
},
|
||||||
|
);
|
||||||
|
});
|
||||||
|
});
|
||||||
});
|
});
|
||||||
|
|||||||
184
nodejs/__test__/sanitize.test.ts
Normal file
184
nodejs/__test__/sanitize.test.ts
Normal file
@@ -0,0 +1,184 @@
|
|||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||||
|
|
||||||
|
import * as arrow from "../lancedb/arrow";
|
||||||
|
import { sanitizeField, sanitizeType } from "../lancedb/sanitize";
|
||||||
|
|
||||||
|
describe("sanitize", function () {
|
||||||
|
describe("sanitizeType function", function () {
|
||||||
|
it("should handle type objects", function () {
|
||||||
|
const type = new arrow.Int32();
|
||||||
|
const result = sanitizeType(type);
|
||||||
|
|
||||||
|
expect(result.typeId).toBe(arrow.Type.Int);
|
||||||
|
expect((result as arrow.Int).bitWidth).toBe(32);
|
||||||
|
expect((result as arrow.Int).isSigned).toBe(true);
|
||||||
|
|
||||||
|
const floatType = {
|
||||||
|
typeId: 3, // Type.Float = 3
|
||||||
|
precision: 2,
|
||||||
|
toString: () => "Float",
|
||||||
|
isFloat: true,
|
||||||
|
isFixedWidth: true,
|
||||||
|
};
|
||||||
|
|
||||||
|
const floatResult = sanitizeType(floatType);
|
||||||
|
expect(floatResult).toBeInstanceOf(arrow.DataType);
|
||||||
|
expect(floatResult.typeId).toBe(arrow.Type.Float);
|
||||||
|
|
||||||
|
const floatResult2 = sanitizeType({ ...floatType, typeId: () => 3 });
|
||||||
|
expect(floatResult2).toBeInstanceOf(arrow.DataType);
|
||||||
|
expect(floatResult2.typeId).toBe(arrow.Type.Float);
|
||||||
|
});
|
||||||
|
|
||||||
|
const allTypeNameTestCases = [
|
||||||
|
["null", new arrow.Null()],
|
||||||
|
["binary", new arrow.Binary()],
|
||||||
|
["utf8", new arrow.Utf8()],
|
||||||
|
["bool", new arrow.Bool()],
|
||||||
|
["int8", new arrow.Int8()],
|
||||||
|
["int16", new arrow.Int16()],
|
||||||
|
["int32", new arrow.Int32()],
|
||||||
|
["int64", new arrow.Int64()],
|
||||||
|
["uint8", new arrow.Uint8()],
|
||||||
|
["uint16", new arrow.Uint16()],
|
||||||
|
["uint32", new arrow.Uint32()],
|
||||||
|
["uint64", new arrow.Uint64()],
|
||||||
|
["float16", new arrow.Float16()],
|
||||||
|
["float32", new arrow.Float32()],
|
||||||
|
["float64", new arrow.Float64()],
|
||||||
|
["datemillisecond", new arrow.DateMillisecond()],
|
||||||
|
["dateday", new arrow.DateDay()],
|
||||||
|
["timenanosecond", new arrow.TimeNanosecond()],
|
||||||
|
["timemicrosecond", new arrow.TimeMicrosecond()],
|
||||||
|
["timemillisecond", new arrow.TimeMillisecond()],
|
||||||
|
["timesecond", new arrow.TimeSecond()],
|
||||||
|
["intervaldaytime", new arrow.IntervalDayTime()],
|
||||||
|
["intervalyearmonth", new arrow.IntervalYearMonth()],
|
||||||
|
["durationnanosecond", new arrow.DurationNanosecond()],
|
||||||
|
["durationmicrosecond", new arrow.DurationMicrosecond()],
|
||||||
|
["durationmillisecond", new arrow.DurationMillisecond()],
|
||||||
|
["durationsecond", new arrow.DurationSecond()],
|
||||||
|
] as const;
|
||||||
|
|
||||||
|
it.each(allTypeNameTestCases)(
|
||||||
|
'should map type name "%s" to %s',
|
||||||
|
function (name, expected) {
|
||||||
|
const result = sanitizeType(name);
|
||||||
|
expect(result).toBeInstanceOf(expected.constructor);
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
const caseVariationTestCases = [
|
||||||
|
["NULL", new arrow.Null()],
|
||||||
|
["Utf8", new arrow.Utf8()],
|
||||||
|
["FLOAT32", new arrow.Float32()],
|
||||||
|
["DaTedAy", new arrow.DateDay()],
|
||||||
|
] as const;
|
||||||
|
|
||||||
|
it.each(caseVariationTestCases)(
|
||||||
|
'should be case insensitive for type name "%s" mapped to %s',
|
||||||
|
function (name, expected) {
|
||||||
|
const result = sanitizeType(name);
|
||||||
|
expect(result).toBeInstanceOf(expected.constructor);
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
it("should throw error for unrecognized type name", function () {
|
||||||
|
expect(() => sanitizeType("invalid_type")).toThrow(
|
||||||
|
"Unrecognized type name in schema: invalid_type",
|
||||||
|
);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe("sanitizeField function", function () {
|
||||||
|
it("should handle field with string type name", function () {
|
||||||
|
const field = sanitizeField({
|
||||||
|
name: "string_field",
|
||||||
|
type: "utf8",
|
||||||
|
nullable: true,
|
||||||
|
metadata: new Map([["key", "value"]]),
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(field).toBeInstanceOf(arrow.Field);
|
||||||
|
expect(field.name).toBe("string_field");
|
||||||
|
expect(field.type).toBeInstanceOf(arrow.Utf8);
|
||||||
|
expect(field.nullable).toBe(true);
|
||||||
|
expect(field.metadata?.get("key")).toBe("value");
|
||||||
|
});
|
||||||
|
|
||||||
|
it("should handle field with type object", function () {
|
||||||
|
const floatType = {
|
||||||
|
typeId: 3, // Float
|
||||||
|
precision: 32,
|
||||||
|
};
|
||||||
|
|
||||||
|
const field = sanitizeField({
|
||||||
|
name: "float_field",
|
||||||
|
type: floatType,
|
||||||
|
nullable: false,
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(field).toBeInstanceOf(arrow.Field);
|
||||||
|
expect(field.name).toBe("float_field");
|
||||||
|
expect(field.type).toBeInstanceOf(arrow.DataType);
|
||||||
|
expect(field.type.typeId).toBe(arrow.Type.Float);
|
||||||
|
expect((field.type as arrow.Float64).precision).toBe(32);
|
||||||
|
expect(field.nullable).toBe(false);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("should handle field with direct Type instance", function () {
|
||||||
|
const field = sanitizeField({
|
||||||
|
name: "bool_field",
|
||||||
|
type: new arrow.Bool(),
|
||||||
|
nullable: true,
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(field).toBeInstanceOf(arrow.Field);
|
||||||
|
expect(field.name).toBe("bool_field");
|
||||||
|
expect(field.type).toBeInstanceOf(arrow.Bool);
|
||||||
|
expect(field.nullable).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("should throw error for invalid field object", function () {
|
||||||
|
expect(() =>
|
||||||
|
sanitizeField({
|
||||||
|
type: "int32",
|
||||||
|
nullable: true,
|
||||||
|
}),
|
||||||
|
).toThrow(
|
||||||
|
"The field passed in is missing a `type`/`name`/`nullable` property",
|
||||||
|
);
|
||||||
|
|
||||||
|
// Invalid type
|
||||||
|
expect(() =>
|
||||||
|
sanitizeField({
|
||||||
|
name: "invalid",
|
||||||
|
type: { invalid: true },
|
||||||
|
nullable: true,
|
||||||
|
}),
|
||||||
|
).toThrow("Expected a Type to have a typeId property");
|
||||||
|
|
||||||
|
// Invalid nullable
|
||||||
|
expect(() =>
|
||||||
|
sanitizeField({
|
||||||
|
name: "invalid_nullable",
|
||||||
|
type: "int32",
|
||||||
|
nullable: "not a boolean",
|
||||||
|
}),
|
||||||
|
).toThrow("The field passed in had a non-boolean `nullable` property");
|
||||||
|
});
|
||||||
|
|
||||||
|
it("should report error for invalid type name", function () {
|
||||||
|
expect(() =>
|
||||||
|
sanitizeField({
|
||||||
|
name: "invalid_field",
|
||||||
|
type: "invalid_type",
|
||||||
|
nullable: true,
|
||||||
|
}),
|
||||||
|
).toThrow(
|
||||||
|
"Unable to sanitize type for field: invalid_field due to error: Error: Unrecognized type name in schema: invalid_type",
|
||||||
|
);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
@@ -10,7 +10,13 @@ import * as arrow16 from "apache-arrow-16";
|
|||||||
import * as arrow17 from "apache-arrow-17";
|
import * as arrow17 from "apache-arrow-17";
|
||||||
import * as arrow18 from "apache-arrow-18";
|
import * as arrow18 from "apache-arrow-18";
|
||||||
|
|
||||||
import { MatchQuery, PhraseQuery, Table, connect } from "../lancedb";
|
import {
|
||||||
|
Connection,
|
||||||
|
MatchQuery,
|
||||||
|
PhraseQuery,
|
||||||
|
Table,
|
||||||
|
connect,
|
||||||
|
} from "../lancedb";
|
||||||
import {
|
import {
|
||||||
Table as ArrowTable,
|
Table as ArrowTable,
|
||||||
Field,
|
Field,
|
||||||
@@ -21,6 +27,8 @@ import {
|
|||||||
Int64,
|
Int64,
|
||||||
List,
|
List,
|
||||||
Schema,
|
Schema,
|
||||||
|
SchemaLike,
|
||||||
|
Type,
|
||||||
Uint8,
|
Uint8,
|
||||||
Utf8,
|
Utf8,
|
||||||
makeArrowTable,
|
makeArrowTable,
|
||||||
@@ -39,7 +47,6 @@ import {
|
|||||||
Operator,
|
Operator,
|
||||||
instanceOfFullTextQuery,
|
instanceOfFullTextQuery,
|
||||||
} from "../lancedb/query";
|
} from "../lancedb/query";
|
||||||
import exp = require("constants");
|
|
||||||
|
|
||||||
describe.each([arrow15, arrow16, arrow17, arrow18])(
|
describe.each([arrow15, arrow16, arrow17, arrow18])(
|
||||||
"Given a table",
|
"Given a table",
|
||||||
@@ -212,8 +219,7 @@ describe.each([arrow15, arrow16, arrow17, arrow18])(
|
|||||||
},
|
},
|
||||||
);
|
);
|
||||||
|
|
||||||
// TODO: https://github.com/lancedb/lancedb/issues/1832
|
it("should be able to omit nullable fields", async () => {
|
||||||
it.skip("should be able to omit nullable fields", async () => {
|
|
||||||
const db = await connect(tmpDir.name);
|
const db = await connect(tmpDir.name);
|
||||||
const schema = new arrow.Schema([
|
const schema = new arrow.Schema([
|
||||||
new arrow.Field(
|
new arrow.Field(
|
||||||
@@ -237,23 +243,36 @@ describe.each([arrow15, arrow16, arrow17, arrow18])(
|
|||||||
await table.add([data3]);
|
await table.add([data3]);
|
||||||
|
|
||||||
let res = await table.query().limit(10).toArray();
|
let res = await table.query().limit(10).toArray();
|
||||||
const resVector = res.map((r) => r.get("vector").toArray());
|
const resVector = res.map((r) =>
|
||||||
|
r.vector ? Array.from(r.vector) : null,
|
||||||
|
);
|
||||||
expect(resVector).toEqual([null, data2.vector, data3.vector]);
|
expect(resVector).toEqual([null, data2.vector, data3.vector]);
|
||||||
const resItem = res.map((r) => r.get("item").toArray());
|
const resItem = res.map((r) => r.item);
|
||||||
expect(resItem).toEqual(["foo", null, "bar"]);
|
expect(resItem).toEqual(["foo", null, "bar"]);
|
||||||
const resPrice = res.map((r) => r.get("price").toArray());
|
const resPrice = res.map((r) => r.price);
|
||||||
expect(resPrice).toEqual([10.0, 2.0, 3.0]);
|
expect(resPrice).toEqual([10.0, 2.0, 3.0]);
|
||||||
|
|
||||||
const data4 = { item: "foo" };
|
const data4 = { item: "foo" };
|
||||||
// We can't omit a column if it's not nullable
|
// We can't omit a column if it's not nullable
|
||||||
await expect(table.add([data4])).rejects.toThrow("Invalid user input");
|
await expect(table.add([data4])).rejects.toThrow(
|
||||||
|
"Append with different schema",
|
||||||
|
);
|
||||||
|
|
||||||
// But we can alter columns to make them nullable
|
// But we can alter columns to make them nullable
|
||||||
await table.alterColumns([{ path: "price", nullable: true }]);
|
await table.alterColumns([{ path: "price", nullable: true }]);
|
||||||
await table.add([data4]);
|
await table.add([data4]);
|
||||||
|
|
||||||
res = (await table.query().limit(10).toArray()).map((r) => r.toJSON());
|
res = (await table.query().limit(10).toArray()).map((r) => ({
|
||||||
expect(res).toEqual([data1, data2, data3, data4]);
|
...r.toJSON(),
|
||||||
|
vector: r.vector ? Array.from(r.vector) : null,
|
||||||
|
}));
|
||||||
|
// Rust fills missing nullable fields with null
|
||||||
|
expect(res).toEqual([
|
||||||
|
{ ...data1, vector: null },
|
||||||
|
{ ...data2, item: null },
|
||||||
|
data3,
|
||||||
|
{ ...data4, price: null, vector: null },
|
||||||
|
]);
|
||||||
});
|
});
|
||||||
|
|
||||||
it("should be able to insert nullable data for non-nullable fields", async () => {
|
it("should be able to insert nullable data for non-nullable fields", async () => {
|
||||||
@@ -331,6 +350,43 @@ describe.each([arrow15, arrow16, arrow17, arrow18])(
|
|||||||
const table = await db.createTable("my_table", data);
|
const table = await db.createTable("my_table", data);
|
||||||
expect(await table.countRows()).toEqual(2);
|
expect(await table.countRows()).toEqual(2);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
it("should allow undefined and omitted nullable vector fields", async () => {
|
||||||
|
// Test for the bug: can't pass undefined or omit vector column
|
||||||
|
const db = await connect("memory://");
|
||||||
|
const schema = new arrow.Schema([
|
||||||
|
new arrow.Field("id", new arrow.Int32(), true),
|
||||||
|
new arrow.Field(
|
||||||
|
"vector",
|
||||||
|
new arrow.FixedSizeList(
|
||||||
|
32,
|
||||||
|
new arrow.Field("item", new arrow.Float32(), true),
|
||||||
|
),
|
||||||
|
true, // nullable = true
|
||||||
|
),
|
||||||
|
]);
|
||||||
|
const table = await db.createEmptyTable("test_table", schema);
|
||||||
|
|
||||||
|
// Should not throw error for undefined value
|
||||||
|
await table.add([{ id: 0, vector: undefined }]);
|
||||||
|
|
||||||
|
// Should not throw error for omitted field
|
||||||
|
await table.add([{ id: 1 }]);
|
||||||
|
|
||||||
|
// Should still work for null
|
||||||
|
await table.add([{ id: 2, vector: null }]);
|
||||||
|
|
||||||
|
// Should still work for actual vector
|
||||||
|
const testVector = new Array(32).fill(0.5);
|
||||||
|
await table.add([{ id: 3, vector: testVector }]);
|
||||||
|
expect(await table.countRows()).toEqual(4);
|
||||||
|
|
||||||
|
const res = await table.query().limit(10).toArray();
|
||||||
|
const resVector = res.map((r) =>
|
||||||
|
r.vector ? Array.from(r.vector) : null,
|
||||||
|
);
|
||||||
|
expect(resVector).toEqual([null, null, null, testVector]);
|
||||||
|
});
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
|
|
||||||
@@ -488,6 +544,32 @@ describe("merge insert", () => {
|
|||||||
.execute(newData, { timeoutMs: 0 }),
|
.execute(newData, { timeoutMs: 0 }),
|
||||||
).rejects.toThrow("merge insert timed out");
|
).rejects.toThrow("merge insert timed out");
|
||||||
});
|
});
|
||||||
|
|
||||||
|
test("useIndex", async () => {
|
||||||
|
const newData = [
|
||||||
|
{ a: 2, b: "x" },
|
||||||
|
{ a: 4, b: "z" },
|
||||||
|
];
|
||||||
|
|
||||||
|
// Test with useIndex(true) - should work fine
|
||||||
|
const result1 = await table
|
||||||
|
.mergeInsert("a")
|
||||||
|
.whenNotMatchedInsertAll()
|
||||||
|
.useIndex(true)
|
||||||
|
.execute(newData);
|
||||||
|
|
||||||
|
expect(result1.numInsertedRows).toBe(1); // Only a=4 should be inserted
|
||||||
|
|
||||||
|
// Test with useIndex(false) - should also work fine
|
||||||
|
const newData2 = [{ a: 5, b: "w" }];
|
||||||
|
const result2 = await table
|
||||||
|
.mergeInsert("a")
|
||||||
|
.whenNotMatchedInsertAll()
|
||||||
|
.useIndex(false)
|
||||||
|
.execute(newData2);
|
||||||
|
|
||||||
|
expect(result2.numInsertedRows).toBe(1); // a=5 should be inserted
|
||||||
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
describe("When creating an index", () => {
|
describe("When creating an index", () => {
|
||||||
@@ -857,6 +939,40 @@ describe("When creating an index", () => {
|
|||||||
expect(stats).toBeUndefined();
|
expect(stats).toBeUndefined();
|
||||||
});
|
});
|
||||||
|
|
||||||
|
test("should support name and train parameters", async () => {
|
||||||
|
// Test with custom name
|
||||||
|
await tbl.createIndex("vec", {
|
||||||
|
config: Index.ivfPq({ numPartitions: 4 }),
|
||||||
|
name: "my_custom_vector_index",
|
||||||
|
});
|
||||||
|
|
||||||
|
const indices = await tbl.listIndices();
|
||||||
|
expect(indices).toHaveLength(1);
|
||||||
|
expect(indices[0].name).toBe("my_custom_vector_index");
|
||||||
|
|
||||||
|
// Test scalar index with train=false
|
||||||
|
await tbl.createIndex("id", {
|
||||||
|
config: Index.btree(),
|
||||||
|
name: "btree_empty",
|
||||||
|
train: false,
|
||||||
|
});
|
||||||
|
|
||||||
|
const allIndices = await tbl.listIndices();
|
||||||
|
expect(allIndices).toHaveLength(2);
|
||||||
|
expect(allIndices.some((idx) => idx.name === "btree_empty")).toBe(true);
|
||||||
|
|
||||||
|
// Test with both name and train=true (use tags column)
|
||||||
|
await tbl.createIndex("tags", {
|
||||||
|
config: Index.labelList(),
|
||||||
|
name: "tags_trained",
|
||||||
|
train: true,
|
||||||
|
});
|
||||||
|
|
||||||
|
const finalIndices = await tbl.listIndices();
|
||||||
|
expect(finalIndices).toHaveLength(3);
|
||||||
|
expect(finalIndices.some((idx) => idx.name === "tags_trained")).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
test("create ivf_flat with binary vectors", async () => {
|
test("create ivf_flat with binary vectors", async () => {
|
||||||
const db = await connect(tmpDir.name);
|
const db = await connect(tmpDir.name);
|
||||||
const binarySchema = new Schema([
|
const binarySchema = new Schema([
|
||||||
@@ -1395,7 +1511,9 @@ describe("when optimizing a dataset", () => {
|
|||||||
|
|
||||||
it("delete unverified", async () => {
|
it("delete unverified", async () => {
|
||||||
const version = await table.version();
|
const version = await table.version();
|
||||||
const versionFile = `${tmpDir.name}/${table.name}.lance/_versions/${version - 1}.manifest`;
|
const versionFile = `${tmpDir.name}/${table.name}.lance/_versions/${
|
||||||
|
version - 1
|
||||||
|
}.manifest`;
|
||||||
fs.rmSync(versionFile);
|
fs.rmSync(versionFile);
|
||||||
|
|
||||||
let stats = await table.optimize({ deleteUnverified: false });
|
let stats = await table.optimize({ deleteUnverified: false });
|
||||||
@@ -1909,3 +2027,52 @@ describe("column name options", () => {
|
|||||||
expect(results2.length).toBe(10);
|
expect(results2.length).toBe(10);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
|
describe("when creating an empty table", () => {
|
||||||
|
let con: Connection;
|
||||||
|
beforeEach(async () => {
|
||||||
|
const tmpDir = tmp.dirSync({ unsafeCleanup: true });
|
||||||
|
con = await connect(tmpDir.name);
|
||||||
|
});
|
||||||
|
afterEach(() => {
|
||||||
|
con.close();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("can create an empty table from an arrow Schema", async () => {
|
||||||
|
const schema = new Schema([
|
||||||
|
new Field("id", new Int64()),
|
||||||
|
new Field("vector", new Float64()),
|
||||||
|
]);
|
||||||
|
const table = await con.createEmptyTable("test", schema);
|
||||||
|
const actualSchema = await table.schema();
|
||||||
|
expect(actualSchema.fields[0].type.typeId).toBe(Type.Int);
|
||||||
|
expect((actualSchema.fields[0].type as Int64).bitWidth).toBe(64);
|
||||||
|
expect(actualSchema.fields[1].type.typeId).toBe(Type.Float);
|
||||||
|
expect((actualSchema.fields[1].type as Float64).precision).toBe(2);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("can create an empty table from schema that specifies field types by name", async () => {
|
||||||
|
const schemaLike = {
|
||||||
|
fields: [
|
||||||
|
{
|
||||||
|
name: "id",
|
||||||
|
type: "int64",
|
||||||
|
nullable: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "vector",
|
||||||
|
type: "float64",
|
||||||
|
nullable: true,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
metadata: new Map(),
|
||||||
|
names: ["id", "vector"],
|
||||||
|
} satisfies SchemaLike;
|
||||||
|
const table = await con.createEmptyTable("test", schemaLike);
|
||||||
|
const actualSchema = await table.schema();
|
||||||
|
expect(actualSchema.fields[0].type.typeId).toBe(Type.Int);
|
||||||
|
expect((actualSchema.fields[0].type as Int64).bitWidth).toBe(64);
|
||||||
|
expect(actualSchema.fields[1].type.typeId).toBe(Type.Float);
|
||||||
|
expect((actualSchema.fields[1].type as Float64).precision).toBe(2);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|||||||
@@ -48,6 +48,7 @@
|
|||||||
"noUnreachableSuper": "error",
|
"noUnreachableSuper": "error",
|
||||||
"noUnsafeFinally": "error",
|
"noUnsafeFinally": "error",
|
||||||
"noUnsafeOptionalChaining": "error",
|
"noUnsafeOptionalChaining": "error",
|
||||||
|
"noUnusedImports": "error",
|
||||||
"noUnusedLabels": "error",
|
"noUnusedLabels": "error",
|
||||||
"noUnusedVariables": "warn",
|
"noUnusedVariables": "warn",
|
||||||
"useIsNan": "error",
|
"useIsNan": "error",
|
||||||
|
|||||||
@@ -41,7 +41,6 @@ import {
|
|||||||
vectorFromArray as badVectorFromArray,
|
vectorFromArray as badVectorFromArray,
|
||||||
makeBuilder,
|
makeBuilder,
|
||||||
makeData,
|
makeData,
|
||||||
makeTable,
|
|
||||||
} from "apache-arrow";
|
} from "apache-arrow";
|
||||||
import { Buffers } from "apache-arrow/data";
|
import { Buffers } from "apache-arrow/data";
|
||||||
import { type EmbeddingFunction } from "./embedding/embedding_function";
|
import { type EmbeddingFunction } from "./embedding/embedding_function";
|
||||||
@@ -74,7 +73,7 @@ export type FieldLike =
|
|||||||
| {
|
| {
|
||||||
type: string;
|
type: string;
|
||||||
name: string;
|
name: string;
|
||||||
nullable?: boolean;
|
nullable: boolean;
|
||||||
metadata?: Map<string, string>;
|
metadata?: Map<string, string>;
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -279,7 +278,7 @@ export class MakeArrowTableOptions {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* An enhanced version of the {@link makeTable} function from Apache Arrow
|
* An enhanced version of the apache-arrow makeTable function from Apache Arrow
|
||||||
* that supports nested fields and embeddings columns.
|
* that supports nested fields and embeddings columns.
|
||||||
*
|
*
|
||||||
* (typically you do not need to call this function. It will be called automatically
|
* (typically you do not need to call this function. It will be called automatically
|
||||||
@@ -512,7 +511,11 @@ function* rowPathsAndValues(
|
|||||||
if (isObject(value)) {
|
if (isObject(value)) {
|
||||||
yield* rowPathsAndValues(value, [...basePath, key]);
|
yield* rowPathsAndValues(value, [...basePath, key]);
|
||||||
} else {
|
} else {
|
||||||
yield [[...basePath, key], value];
|
// Skip undefined values - they should be treated the same as missing fields
|
||||||
|
// for embedding function purposes
|
||||||
|
if (value !== undefined) {
|
||||||
|
yield [[...basePath, key], value];
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -701,7 +704,7 @@ function transposeData(
|
|||||||
}
|
}
|
||||||
return current;
|
return current;
|
||||||
});
|
});
|
||||||
return makeVector(values, field.type);
|
return makeVector(values, field.type, undefined, field.nullable);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -748,9 +751,30 @@ function makeVector(
|
|||||||
values: unknown[],
|
values: unknown[],
|
||||||
type?: DataType,
|
type?: DataType,
|
||||||
stringAsDictionary?: boolean,
|
stringAsDictionary?: boolean,
|
||||||
|
nullable?: boolean,
|
||||||
// biome-ignore lint/suspicious/noExplicitAny: skip
|
// biome-ignore lint/suspicious/noExplicitAny: skip
|
||||||
): Vector<any> {
|
): Vector<any> {
|
||||||
if (type !== undefined) {
|
if (type !== undefined) {
|
||||||
|
// Convert undefined values to null for nullable fields
|
||||||
|
if (nullable) {
|
||||||
|
values = values.map((v) => (v === undefined ? null : v));
|
||||||
|
}
|
||||||
|
|
||||||
|
// workaround for: https://github.com/apache/arrow-js/issues/68
|
||||||
|
if (DataType.isBool(type)) {
|
||||||
|
const hasNonNullValue = values.some((v) => v !== null && v !== undefined);
|
||||||
|
if (!hasNonNullValue) {
|
||||||
|
const nullBitmap = new Uint8Array(Math.ceil(values.length / 8));
|
||||||
|
const data = makeData({
|
||||||
|
type: type,
|
||||||
|
length: values.length,
|
||||||
|
nullCount: values.length,
|
||||||
|
nullBitmap,
|
||||||
|
});
|
||||||
|
return arrowMakeVector(data);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// No need for inference, let Arrow create it
|
// No need for inference, let Arrow create it
|
||||||
if (type instanceof Int) {
|
if (type instanceof Int) {
|
||||||
if (DataType.isInt(type) && type.bitWidth === 64) {
|
if (DataType.isInt(type) && type.bitWidth === 64) {
|
||||||
@@ -875,7 +899,12 @@ async function applyEmbeddingsFromMetadata(
|
|||||||
for (const field of schema.fields) {
|
for (const field of schema.fields) {
|
||||||
if (!(field.name in columns)) {
|
if (!(field.name in columns)) {
|
||||||
const nullValues = new Array(table.numRows).fill(null);
|
const nullValues = new Array(table.numRows).fill(null);
|
||||||
columns[field.name] = makeVector(nullValues, field.type);
|
columns[field.name] = makeVector(
|
||||||
|
nullValues,
|
||||||
|
field.type,
|
||||||
|
undefined,
|
||||||
|
field.nullable,
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -939,7 +968,12 @@ async function applyEmbeddings<T>(
|
|||||||
} else if (schema != null) {
|
} else if (schema != null) {
|
||||||
const destField = schema.fields.find((f) => f.name === destColumn);
|
const destField = schema.fields.find((f) => f.name === destColumn);
|
||||||
if (destField != null) {
|
if (destField != null) {
|
||||||
newColumns[destColumn] = makeVector([], destField.type);
|
newColumns[destColumn] = makeVector(
|
||||||
|
[],
|
||||||
|
destField.type,
|
||||||
|
undefined,
|
||||||
|
destField.nullable,
|
||||||
|
);
|
||||||
} else {
|
} else {
|
||||||
throw new Error(
|
throw new Error(
|
||||||
`Attempt to apply embeddings to an empty table failed because schema was missing embedding column '${destColumn}'`,
|
`Attempt to apply embeddings to an empty table failed because schema was missing embedding column '${destColumn}'`,
|
||||||
@@ -1251,19 +1285,36 @@ function validateSchemaEmbeddings(
|
|||||||
if (isFixedSizeList(field.type)) {
|
if (isFixedSizeList(field.type)) {
|
||||||
field = sanitizeField(field);
|
field = sanitizeField(field);
|
||||||
if (data.length !== 0 && data?.[0]?.[field.name] === undefined) {
|
if (data.length !== 0 && data?.[0]?.[field.name] === undefined) {
|
||||||
|
// Check if there's an embedding function registered for this field
|
||||||
|
let hasEmbeddingFunction = false;
|
||||||
|
|
||||||
|
// Check schema metadata for embedding functions
|
||||||
if (schema.metadata.has("embedding_functions")) {
|
if (schema.metadata.has("embedding_functions")) {
|
||||||
const embeddings = JSON.parse(
|
const embeddings = JSON.parse(
|
||||||
schema.metadata.get("embedding_functions")!,
|
schema.metadata.get("embedding_functions")!,
|
||||||
);
|
);
|
||||||
if (
|
// biome-ignore lint/suspicious/noExplicitAny: we don't know the type of `f`
|
||||||
// biome-ignore lint/suspicious/noExplicitAny: we don't know the type of `f`
|
if (embeddings.find((f: any) => f["vectorColumn"] === field.name)) {
|
||||||
embeddings.find((f: any) => f["vectorColumn"] === field.name) ===
|
hasEmbeddingFunction = true;
|
||||||
undefined
|
}
|
||||||
) {
|
}
|
||||||
|
|
||||||
|
// Check passed embedding function parameter
|
||||||
|
if (embeddings && embeddings.vectorColumn === field.name) {
|
||||||
|
hasEmbeddingFunction = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the field is nullable AND there's no embedding function, allow undefined/omitted values
|
||||||
|
if (field.nullable && !hasEmbeddingFunction) {
|
||||||
|
fields.push(field);
|
||||||
|
} else {
|
||||||
|
// Either not nullable OR has embedding function - require explicit values
|
||||||
|
if (hasEmbeddingFunction) {
|
||||||
|
// Don't add to missingEmbeddingFields since this is expected to be filled by embedding function
|
||||||
|
fields.push(field);
|
||||||
|
} else {
|
||||||
missingEmbeddingFields.push(field);
|
missingEmbeddingFields.push(field);
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
missingEmbeddingFields.push(field);
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
fields.push(field);
|
fields.push(field);
|
||||||
|
|||||||
@@ -3,7 +3,6 @@
|
|||||||
|
|
||||||
import {
|
import {
|
||||||
Data,
|
Data,
|
||||||
Schema,
|
|
||||||
SchemaLike,
|
SchemaLike,
|
||||||
TableLike,
|
TableLike,
|
||||||
fromTableToStreamBuffer,
|
fromTableToStreamBuffer,
|
||||||
@@ -159,17 +158,33 @@ export abstract class Connection {
|
|||||||
*
|
*
|
||||||
* Tables will be returned in lexicographical order.
|
* Tables will be returned in lexicographical order.
|
||||||
* @param {Partial<TableNamesOptions>} options - options to control the
|
* @param {Partial<TableNamesOptions>} options - options to control the
|
||||||
* paging / start point
|
* paging / start point (backwards compatibility)
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
abstract tableNames(options?: Partial<TableNamesOptions>): Promise<string[]>;
|
abstract tableNames(options?: Partial<TableNamesOptions>): Promise<string[]>;
|
||||||
|
/**
|
||||||
|
* List all the table names in this database.
|
||||||
|
*
|
||||||
|
* Tables will be returned in lexicographical order.
|
||||||
|
* @param {string[]} namespace - The namespace to list tables from (defaults to root namespace)
|
||||||
|
* @param {Partial<TableNamesOptions>} options - options to control the
|
||||||
|
* paging / start point
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
abstract tableNames(
|
||||||
|
namespace?: string[],
|
||||||
|
options?: Partial<TableNamesOptions>,
|
||||||
|
): Promise<string[]>;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Open a table in the database.
|
* Open a table in the database.
|
||||||
* @param {string} name - The name of the table
|
* @param {string} name - The name of the table
|
||||||
|
* @param {string[]} namespace - The namespace of the table (defaults to root namespace)
|
||||||
|
* @param {Partial<OpenTableOptions>} options - Additional options
|
||||||
*/
|
*/
|
||||||
abstract openTable(
|
abstract openTable(
|
||||||
name: string,
|
name: string,
|
||||||
|
namespace?: string[],
|
||||||
options?: Partial<OpenTableOptions>,
|
options?: Partial<OpenTableOptions>,
|
||||||
): Promise<Table>;
|
): Promise<Table>;
|
||||||
|
|
||||||
@@ -178,6 +193,7 @@ export abstract class Connection {
|
|||||||
* @param {object} options - The options object.
|
* @param {object} options - The options object.
|
||||||
* @param {string} options.name - The name of the table.
|
* @param {string} options.name - The name of the table.
|
||||||
* @param {Data} options.data - Non-empty Array of Records to be inserted into the table
|
* @param {Data} options.data - Non-empty Array of Records to be inserted into the table
|
||||||
|
* @param {string[]} namespace - The namespace to create the table in (defaults to root namespace)
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
abstract createTable(
|
abstract createTable(
|
||||||
@@ -185,40 +201,99 @@ export abstract class Connection {
|
|||||||
name: string;
|
name: string;
|
||||||
data: Data;
|
data: Data;
|
||||||
} & Partial<CreateTableOptions>,
|
} & Partial<CreateTableOptions>,
|
||||||
|
namespace?: string[],
|
||||||
): Promise<Table>;
|
): Promise<Table>;
|
||||||
/**
|
/**
|
||||||
* Creates a new Table and initialize it with new data.
|
* Creates a new Table and initialize it with new data.
|
||||||
* @param {string} name - The name of the table.
|
* @param {string} name - The name of the table.
|
||||||
* @param {Record<string, unknown>[] | TableLike} data - Non-empty Array of Records
|
* @param {Record<string, unknown>[] | TableLike} data - Non-empty Array of Records
|
||||||
* to be inserted into the table
|
* to be inserted into the table
|
||||||
|
* @param {Partial<CreateTableOptions>} options - Additional options (backwards compatibility)
|
||||||
*/
|
*/
|
||||||
abstract createTable(
|
abstract createTable(
|
||||||
name: string,
|
name: string,
|
||||||
data: Record<string, unknown>[] | TableLike,
|
data: Record<string, unknown>[] | TableLike,
|
||||||
options?: Partial<CreateTableOptions>,
|
options?: Partial<CreateTableOptions>,
|
||||||
): Promise<Table>;
|
): Promise<Table>;
|
||||||
|
/**
|
||||||
|
* Creates a new Table and initialize it with new data.
|
||||||
|
* @param {string} name - The name of the table.
|
||||||
|
* @param {Record<string, unknown>[] | TableLike} data - Non-empty Array of Records
|
||||||
|
* to be inserted into the table
|
||||||
|
* @param {string[]} namespace - The namespace to create the table in (defaults to root namespace)
|
||||||
|
* @param {Partial<CreateTableOptions>} options - Additional options
|
||||||
|
*/
|
||||||
|
abstract createTable(
|
||||||
|
name: string,
|
||||||
|
data: Record<string, unknown>[] | TableLike,
|
||||||
|
namespace?: string[],
|
||||||
|
options?: Partial<CreateTableOptions>,
|
||||||
|
): Promise<Table>;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a new empty Table
|
* Creates a new empty Table
|
||||||
* @param {string} name - The name of the table.
|
* @param {string} name - The name of the table.
|
||||||
* @param {Schema} schema - The schema of the table
|
* @param {Schema} schema - The schema of the table
|
||||||
|
* @param {Partial<CreateTableOptions>} options - Additional options (backwards compatibility)
|
||||||
*/
|
*/
|
||||||
abstract createEmptyTable(
|
abstract createEmptyTable(
|
||||||
name: string,
|
name: string,
|
||||||
schema: import("./arrow").SchemaLike,
|
schema: import("./arrow").SchemaLike,
|
||||||
options?: Partial<CreateTableOptions>,
|
options?: Partial<CreateTableOptions>,
|
||||||
): Promise<Table>;
|
): Promise<Table>;
|
||||||
|
/**
|
||||||
|
* Creates a new empty Table
|
||||||
|
* @param {string} name - The name of the table.
|
||||||
|
* @param {Schema} schema - The schema of the table
|
||||||
|
* @param {string[]} namespace - The namespace to create the table in (defaults to root namespace)
|
||||||
|
* @param {Partial<CreateTableOptions>} options - Additional options
|
||||||
|
*/
|
||||||
|
abstract createEmptyTable(
|
||||||
|
name: string,
|
||||||
|
schema: import("./arrow").SchemaLike,
|
||||||
|
namespace?: string[],
|
||||||
|
options?: Partial<CreateTableOptions>,
|
||||||
|
): Promise<Table>;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Drop an existing table.
|
* Drop an existing table.
|
||||||
* @param {string} name The name of the table to drop.
|
* @param {string} name The name of the table to drop.
|
||||||
|
* @param {string[]} namespace The namespace of the table (defaults to root namespace).
|
||||||
*/
|
*/
|
||||||
abstract dropTable(name: string): Promise<void>;
|
abstract dropTable(name: string, namespace?: string[]): Promise<void>;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Drop all tables in the database.
|
* Drop all tables in the database.
|
||||||
|
* @param {string[]} namespace The namespace to drop tables from (defaults to root namespace).
|
||||||
*/
|
*/
|
||||||
abstract dropAllTables(): Promise<void>;
|
abstract dropAllTables(namespace?: string[]): Promise<void>;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Clone a table from a source table.
|
||||||
|
*
|
||||||
|
* A shallow clone creates a new table that shares the underlying data files
|
||||||
|
* with the source table but has its own independent manifest. This allows
|
||||||
|
* both the source and cloned tables to evolve independently while initially
|
||||||
|
* sharing the same data, deletion, and index files.
|
||||||
|
*
|
||||||
|
* @param {string} targetTableName - The name of the target table to create.
|
||||||
|
* @param {string} sourceUri - The URI of the source table to clone from.
|
||||||
|
* @param {object} options - Clone options.
|
||||||
|
* @param {string[]} options.targetNamespace - The namespace for the target table (defaults to root namespace).
|
||||||
|
* @param {number} options.sourceVersion - The version of the source table to clone.
|
||||||
|
* @param {string} options.sourceTag - The tag of the source table to clone.
|
||||||
|
* @param {boolean} options.isShallow - Whether to perform a shallow clone (defaults to true).
|
||||||
|
*/
|
||||||
|
abstract cloneTable(
|
||||||
|
targetTableName: string,
|
||||||
|
sourceUri: string,
|
||||||
|
options?: {
|
||||||
|
targetNamespace?: string[];
|
||||||
|
sourceVersion?: number;
|
||||||
|
sourceTag?: string;
|
||||||
|
isShallow?: boolean;
|
||||||
|
},
|
||||||
|
): Promise<Table>;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** @hideconstructor */
|
/** @hideconstructor */
|
||||||
@@ -243,16 +318,39 @@ export class LocalConnection extends Connection {
|
|||||||
return this.inner.display();
|
return this.inner.display();
|
||||||
}
|
}
|
||||||
|
|
||||||
async tableNames(options?: Partial<TableNamesOptions>): Promise<string[]> {
|
async tableNames(
|
||||||
return this.inner.tableNames(options?.startAfter, options?.limit);
|
namespaceOrOptions?: string[] | Partial<TableNamesOptions>,
|
||||||
|
options?: Partial<TableNamesOptions>,
|
||||||
|
): Promise<string[]> {
|
||||||
|
// Detect if first argument is namespace array or options object
|
||||||
|
let namespace: string[] | undefined;
|
||||||
|
let tableNamesOptions: Partial<TableNamesOptions> | undefined;
|
||||||
|
|
||||||
|
if (Array.isArray(namespaceOrOptions)) {
|
||||||
|
// First argument is namespace array
|
||||||
|
namespace = namespaceOrOptions;
|
||||||
|
tableNamesOptions = options;
|
||||||
|
} else {
|
||||||
|
// First argument is options object (backwards compatibility)
|
||||||
|
namespace = undefined;
|
||||||
|
tableNamesOptions = namespaceOrOptions;
|
||||||
|
}
|
||||||
|
|
||||||
|
return this.inner.tableNames(
|
||||||
|
namespace ?? [],
|
||||||
|
tableNamesOptions?.startAfter,
|
||||||
|
tableNamesOptions?.limit,
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
async openTable(
|
async openTable(
|
||||||
name: string,
|
name: string,
|
||||||
|
namespace?: string[],
|
||||||
options?: Partial<OpenTableOptions>,
|
options?: Partial<OpenTableOptions>,
|
||||||
): Promise<Table> {
|
): Promise<Table> {
|
||||||
const innerTable = await this.inner.openTable(
|
const innerTable = await this.inner.openTable(
|
||||||
name,
|
name,
|
||||||
|
namespace ?? [],
|
||||||
cleanseStorageOptions(options?.storageOptions),
|
cleanseStorageOptions(options?.storageOptions),
|
||||||
options?.indexCacheSize,
|
options?.indexCacheSize,
|
||||||
);
|
);
|
||||||
@@ -260,6 +358,28 @@ export class LocalConnection extends Connection {
|
|||||||
return new LocalTable(innerTable);
|
return new LocalTable(innerTable);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async cloneTable(
|
||||||
|
targetTableName: string,
|
||||||
|
sourceUri: string,
|
||||||
|
options?: {
|
||||||
|
targetNamespace?: string[];
|
||||||
|
sourceVersion?: number;
|
||||||
|
sourceTag?: string;
|
||||||
|
isShallow?: boolean;
|
||||||
|
},
|
||||||
|
): Promise<Table> {
|
||||||
|
const innerTable = await this.inner.cloneTable(
|
||||||
|
targetTableName,
|
||||||
|
sourceUri,
|
||||||
|
options?.targetNamespace ?? [],
|
||||||
|
options?.sourceVersion ?? null,
|
||||||
|
options?.sourceTag ?? null,
|
||||||
|
options?.isShallow ?? true,
|
||||||
|
);
|
||||||
|
|
||||||
|
return new LocalTable(innerTable);
|
||||||
|
}
|
||||||
|
|
||||||
private getStorageOptions(
|
private getStorageOptions(
|
||||||
options?: Partial<CreateTableOptions>,
|
options?: Partial<CreateTableOptions>,
|
||||||
): Record<string, string> | undefined {
|
): Record<string, string> | undefined {
|
||||||
@@ -286,14 +406,44 @@ export class LocalConnection extends Connection {
|
|||||||
nameOrOptions:
|
nameOrOptions:
|
||||||
| string
|
| string
|
||||||
| ({ name: string; data: Data } & Partial<CreateTableOptions>),
|
| ({ name: string; data: Data } & Partial<CreateTableOptions>),
|
||||||
data?: Record<string, unknown>[] | TableLike,
|
dataOrNamespace?: Record<string, unknown>[] | TableLike | string[],
|
||||||
|
namespaceOrOptions?: string[] | Partial<CreateTableOptions>,
|
||||||
options?: Partial<CreateTableOptions>,
|
options?: Partial<CreateTableOptions>,
|
||||||
): Promise<Table> {
|
): Promise<Table> {
|
||||||
if (typeof nameOrOptions !== "string" && "name" in nameOrOptions) {
|
if (typeof nameOrOptions !== "string" && "name" in nameOrOptions) {
|
||||||
const { name, data, ...options } = nameOrOptions;
|
// First overload: createTable(options, namespace?)
|
||||||
|
const { name, data, ...createOptions } = nameOrOptions;
|
||||||
return this.createTable(name, data, options);
|
const namespace = dataOrNamespace as string[] | undefined;
|
||||||
|
return this._createTableImpl(name, data, namespace, createOptions);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Second overload: createTable(name, data, namespace?, options?)
|
||||||
|
const name = nameOrOptions;
|
||||||
|
const data = dataOrNamespace as Record<string, unknown>[] | TableLike;
|
||||||
|
|
||||||
|
// Detect if third argument is namespace array or options object
|
||||||
|
let namespace: string[] | undefined;
|
||||||
|
let createOptions: Partial<CreateTableOptions> | undefined;
|
||||||
|
|
||||||
|
if (Array.isArray(namespaceOrOptions)) {
|
||||||
|
// Third argument is namespace array
|
||||||
|
namespace = namespaceOrOptions;
|
||||||
|
createOptions = options;
|
||||||
|
} else {
|
||||||
|
// Third argument is options object (backwards compatibility)
|
||||||
|
namespace = undefined;
|
||||||
|
createOptions = namespaceOrOptions;
|
||||||
|
}
|
||||||
|
|
||||||
|
return this._createTableImpl(name, data, namespace, createOptions);
|
||||||
|
}
|
||||||
|
|
||||||
|
private async _createTableImpl(
|
||||||
|
name: string,
|
||||||
|
data: Data,
|
||||||
|
namespace?: string[],
|
||||||
|
options?: Partial<CreateTableOptions>,
|
||||||
|
): Promise<Table> {
|
||||||
if (data === undefined) {
|
if (data === undefined) {
|
||||||
throw new Error("data is required");
|
throw new Error("data is required");
|
||||||
}
|
}
|
||||||
@@ -302,9 +452,10 @@ export class LocalConnection extends Connection {
|
|||||||
const storageOptions = this.getStorageOptions(options);
|
const storageOptions = this.getStorageOptions(options);
|
||||||
|
|
||||||
const innerTable = await this.inner.createTable(
|
const innerTable = await this.inner.createTable(
|
||||||
nameOrOptions,
|
name,
|
||||||
buf,
|
buf,
|
||||||
mode,
|
mode,
|
||||||
|
namespace ?? [],
|
||||||
storageOptions,
|
storageOptions,
|
||||||
);
|
);
|
||||||
|
|
||||||
@@ -314,39 +465,55 @@ export class LocalConnection extends Connection {
|
|||||||
async createEmptyTable(
|
async createEmptyTable(
|
||||||
name: string,
|
name: string,
|
||||||
schema: import("./arrow").SchemaLike,
|
schema: import("./arrow").SchemaLike,
|
||||||
|
namespaceOrOptions?: string[] | Partial<CreateTableOptions>,
|
||||||
options?: Partial<CreateTableOptions>,
|
options?: Partial<CreateTableOptions>,
|
||||||
): Promise<Table> {
|
): Promise<Table> {
|
||||||
let mode: string = options?.mode ?? "create";
|
// Detect if third argument is namespace array or options object
|
||||||
const existOk = options?.existOk ?? false;
|
let namespace: string[] | undefined;
|
||||||
|
let createOptions: Partial<CreateTableOptions> | undefined;
|
||||||
|
|
||||||
|
if (Array.isArray(namespaceOrOptions)) {
|
||||||
|
// Third argument is namespace array
|
||||||
|
namespace = namespaceOrOptions;
|
||||||
|
createOptions = options;
|
||||||
|
} else {
|
||||||
|
// Third argument is options object (backwards compatibility)
|
||||||
|
namespace = undefined;
|
||||||
|
createOptions = namespaceOrOptions;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mode: string = createOptions?.mode ?? "create";
|
||||||
|
const existOk = createOptions?.existOk ?? false;
|
||||||
|
|
||||||
if (mode === "create" && existOk) {
|
if (mode === "create" && existOk) {
|
||||||
mode = "exist_ok";
|
mode = "exist_ok";
|
||||||
}
|
}
|
||||||
let metadata: Map<string, string> | undefined = undefined;
|
let metadata: Map<string, string> | undefined = undefined;
|
||||||
if (options?.embeddingFunction !== undefined) {
|
if (createOptions?.embeddingFunction !== undefined) {
|
||||||
const embeddingFunction = options.embeddingFunction;
|
const embeddingFunction = createOptions.embeddingFunction;
|
||||||
const registry = getRegistry();
|
const registry = getRegistry();
|
||||||
metadata = registry.getTableMetadata([embeddingFunction]);
|
metadata = registry.getTableMetadata([embeddingFunction]);
|
||||||
}
|
}
|
||||||
|
|
||||||
const storageOptions = this.getStorageOptions(options);
|
const storageOptions = this.getStorageOptions(createOptions);
|
||||||
const table = makeEmptyTable(schema, metadata);
|
const table = makeEmptyTable(schema, metadata);
|
||||||
const buf = await fromTableToBuffer(table);
|
const buf = await fromTableToBuffer(table);
|
||||||
const innerTable = await this.inner.createEmptyTable(
|
const innerTable = await this.inner.createEmptyTable(
|
||||||
name,
|
name,
|
||||||
buf,
|
buf,
|
||||||
mode,
|
mode,
|
||||||
|
namespace ?? [],
|
||||||
storageOptions,
|
storageOptions,
|
||||||
);
|
);
|
||||||
return new LocalTable(innerTable);
|
return new LocalTable(innerTable);
|
||||||
}
|
}
|
||||||
|
|
||||||
async dropTable(name: string): Promise<void> {
|
async dropTable(name: string, namespace?: string[]): Promise<void> {
|
||||||
return this.inner.dropTable(name);
|
return this.inner.dropTable(name, namespace ?? []);
|
||||||
}
|
}
|
||||||
|
|
||||||
async dropAllTables(): Promise<void> {
|
async dropAllTables(namespace?: string[]): Promise<void> {
|
||||||
return this.inner.dropAllTables();
|
return this.inner.dropAllTables(namespace ?? []);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
253
nodejs/lancedb/header.ts
Normal file
253
nodejs/lancedb/header.ts
Normal file
@@ -0,0 +1,253 @@
|
|||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Header providers for LanceDB remote connections.
|
||||||
|
*
|
||||||
|
* This module provides a flexible header management framework for LanceDB remote
|
||||||
|
* connections, allowing users to implement custom header strategies for
|
||||||
|
* authentication, request tracking, custom metadata, or any other header-based
|
||||||
|
* requirements.
|
||||||
|
*
|
||||||
|
* @module header
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Abstract base class for providing custom headers for each request.
|
||||||
|
*
|
||||||
|
* Users can implement this interface to provide dynamic headers for various purposes
|
||||||
|
* such as authentication (OAuth tokens, API keys), request tracking (correlation IDs),
|
||||||
|
* custom metadata, or any other header-based requirements. The provider is called
|
||||||
|
* before each request to ensure fresh header values are always used.
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* Simple JWT token provider:
|
||||||
|
* ```typescript
|
||||||
|
* class JWTProvider extends HeaderProvider {
|
||||||
|
* constructor(private token: string) {
|
||||||
|
* super();
|
||||||
|
* }
|
||||||
|
*
|
||||||
|
* getHeaders(): Record<string, string> {
|
||||||
|
* return { authorization: `Bearer ${this.token}` };
|
||||||
|
* }
|
||||||
|
* }
|
||||||
|
* ```
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* Provider with request tracking:
|
||||||
|
* ```typescript
|
||||||
|
* class RequestTrackingProvider extends HeaderProvider {
|
||||||
|
* constructor(private sessionId: string) {
|
||||||
|
* super();
|
||||||
|
* }
|
||||||
|
*
|
||||||
|
* getHeaders(): Record<string, string> {
|
||||||
|
* return {
|
||||||
|
* "X-Session-Id": this.sessionId,
|
||||||
|
* "X-Request-Id": `req-${Date.now()}`
|
||||||
|
* };
|
||||||
|
* }
|
||||||
|
* }
|
||||||
|
* ```
|
||||||
|
*/
|
||||||
|
export abstract class HeaderProvider {
|
||||||
|
/**
|
||||||
|
* Get the latest headers to be added to requests.
|
||||||
|
*
|
||||||
|
* This method is called before each request to the remote LanceDB server.
|
||||||
|
* Implementations should return headers that will be merged with existing headers.
|
||||||
|
*
|
||||||
|
* @returns Dictionary of header names to values to add to the request.
|
||||||
|
* @throws If unable to fetch headers, the exception will be propagated and the request will fail.
|
||||||
|
*/
|
||||||
|
abstract getHeaders(): Record<string, string>;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Example implementation: A simple header provider that returns static headers.
|
||||||
|
*
|
||||||
|
* This is an example implementation showing how to create a HeaderProvider
|
||||||
|
* for cases where headers don't change during the session.
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* ```typescript
|
||||||
|
* const provider = new StaticHeaderProvider({
|
||||||
|
* authorization: "Bearer my-token",
|
||||||
|
* "X-Custom-Header": "custom-value"
|
||||||
|
* });
|
||||||
|
* const headers = provider.getHeaders();
|
||||||
|
* // Returns: {authorization: 'Bearer my-token', 'X-Custom-Header': 'custom-value'}
|
||||||
|
* ```
|
||||||
|
*/
|
||||||
|
export class StaticHeaderProvider extends HeaderProvider {
|
||||||
|
private _headers: Record<string, string>;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Initialize with static headers.
|
||||||
|
* @param headers - Headers to return for every request.
|
||||||
|
*/
|
||||||
|
constructor(headers: Record<string, string>) {
|
||||||
|
super();
|
||||||
|
this._headers = { ...headers };
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return the static headers.
|
||||||
|
* @returns Copy of the static headers.
|
||||||
|
*/
|
||||||
|
getHeaders(): Record<string, string> {
|
||||||
|
return { ...this._headers };
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Token response from OAuth provider.
|
||||||
|
* @public
|
||||||
|
*/
|
||||||
|
export interface TokenResponse {
|
||||||
|
accessToken: string;
|
||||||
|
expiresIn?: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Example implementation: OAuth token provider with automatic refresh.
|
||||||
|
*
|
||||||
|
* This is an example implementation showing how to manage OAuth tokens
|
||||||
|
* with automatic refresh when they expire.
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* ```typescript
|
||||||
|
* async function fetchToken(): Promise<TokenResponse> {
|
||||||
|
* const response = await fetch("https://oauth.example.com/token", {
|
||||||
|
* method: "POST",
|
||||||
|
* body: JSON.stringify({
|
||||||
|
* grant_type: "client_credentials",
|
||||||
|
* client_id: "your-client-id",
|
||||||
|
* client_secret: "your-client-secret"
|
||||||
|
* }),
|
||||||
|
* headers: { "Content-Type": "application/json" }
|
||||||
|
* });
|
||||||
|
* const data = await response.json();
|
||||||
|
* return {
|
||||||
|
* accessToken: data.access_token,
|
||||||
|
* expiresIn: data.expires_in
|
||||||
|
* };
|
||||||
|
* }
|
||||||
|
*
|
||||||
|
* const provider = new OAuthHeaderProvider(fetchToken);
|
||||||
|
* const headers = provider.getHeaders();
|
||||||
|
* // Returns: {"authorization": "Bearer <your-token>"}
|
||||||
|
* ```
|
||||||
|
*/
|
||||||
|
export class OAuthHeaderProvider extends HeaderProvider {
|
||||||
|
private _tokenFetcher: () => Promise<TokenResponse> | TokenResponse;
|
||||||
|
private _refreshBufferSeconds: number;
|
||||||
|
private _currentToken: string | null = null;
|
||||||
|
private _tokenExpiresAt: number | null = null;
|
||||||
|
private _refreshPromise: Promise<void> | null = null;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Initialize the OAuth provider.
|
||||||
|
* @param tokenFetcher - Function to fetch new tokens. Should return object with 'accessToken' and optionally 'expiresIn'.
|
||||||
|
* @param refreshBufferSeconds - Seconds before expiry to refresh token. Default 300 (5 minutes).
|
||||||
|
*/
|
||||||
|
constructor(
|
||||||
|
tokenFetcher: () => Promise<TokenResponse> | TokenResponse,
|
||||||
|
refreshBufferSeconds: number = 300,
|
||||||
|
) {
|
||||||
|
super();
|
||||||
|
this._tokenFetcher = tokenFetcher;
|
||||||
|
this._refreshBufferSeconds = refreshBufferSeconds;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if token needs refresh.
|
||||||
|
*/
|
||||||
|
private _needsRefresh(): boolean {
|
||||||
|
if (this._currentToken === null) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this._tokenExpiresAt === null) {
|
||||||
|
// No expiration info, assume token is valid
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Refresh if we're within the buffer time of expiration
|
||||||
|
const now = Date.now() / 1000;
|
||||||
|
return now >= this._tokenExpiresAt - this._refreshBufferSeconds;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Refresh the token if it's expired or close to expiring.
|
||||||
|
*/
|
||||||
|
private async _refreshTokenIfNeeded(): Promise<void> {
|
||||||
|
if (!this._needsRefresh()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If refresh is already in progress, wait for it
|
||||||
|
if (this._refreshPromise) {
|
||||||
|
await this._refreshPromise;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start refresh
|
||||||
|
this._refreshPromise = (async () => {
|
||||||
|
try {
|
||||||
|
const tokenData = await this._tokenFetcher();
|
||||||
|
|
||||||
|
this._currentToken = tokenData.accessToken;
|
||||||
|
if (!this._currentToken) {
|
||||||
|
throw new Error("Token fetcher did not return 'accessToken'");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set expiration if provided
|
||||||
|
if (tokenData.expiresIn) {
|
||||||
|
this._tokenExpiresAt = Date.now() / 1000 + tokenData.expiresIn;
|
||||||
|
} else {
|
||||||
|
// Token doesn't expire or expiration unknown
|
||||||
|
this._tokenExpiresAt = null;
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
this._refreshPromise = null;
|
||||||
|
}
|
||||||
|
})();
|
||||||
|
|
||||||
|
await this._refreshPromise;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get OAuth headers, refreshing token if needed.
|
||||||
|
* Note: This is synchronous for now as the Rust implementation expects sync.
|
||||||
|
* In a real implementation, this would need to handle async properly.
|
||||||
|
* @returns Headers with Bearer token authorization.
|
||||||
|
* @throws If unable to fetch or refresh token.
|
||||||
|
*/
|
||||||
|
getHeaders(): Record<string, string> {
|
||||||
|
// For simplicity in this example, we assume the token is already fetched
|
||||||
|
// In a real implementation, this would need to handle the async nature properly
|
||||||
|
if (!this._currentToken && !this._refreshPromise) {
|
||||||
|
// Synchronously trigger refresh - this is a limitation of the current implementation
|
||||||
|
throw new Error(
|
||||||
|
"Token not initialized. Call refreshToken() first or use async initialization.",
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!this._currentToken) {
|
||||||
|
throw new Error("Failed to obtain OAuth token");
|
||||||
|
}
|
||||||
|
|
||||||
|
return { authorization: `Bearer ${this._currentToken}` };
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Manually refresh the token.
|
||||||
|
* Call this before using getHeaders() to ensure token is available.
|
||||||
|
*/
|
||||||
|
async refreshToken(): Promise<void> {
|
||||||
|
this._currentToken = null; // Force refresh
|
||||||
|
await this._refreshTokenIfNeeded();
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -10,9 +10,15 @@ import {
|
|||||||
import {
|
import {
|
||||||
ConnectionOptions,
|
ConnectionOptions,
|
||||||
Connection as LanceDbConnection,
|
Connection as LanceDbConnection,
|
||||||
|
JsHeaderProvider as NativeJsHeaderProvider,
|
||||||
Session,
|
Session,
|
||||||
} from "./native.js";
|
} from "./native.js";
|
||||||
|
|
||||||
|
import { HeaderProvider } from "./header";
|
||||||
|
|
||||||
|
// Re-export native header provider for use with connectWithHeaderProvider
|
||||||
|
export { JsHeaderProvider as NativeJsHeaderProvider } from "./native.js";
|
||||||
|
|
||||||
export {
|
export {
|
||||||
AddColumnsSql,
|
AddColumnsSql,
|
||||||
ConnectionOptions,
|
ConnectionOptions,
|
||||||
@@ -21,6 +27,7 @@ export {
|
|||||||
ClientConfig,
|
ClientConfig,
|
||||||
TimeoutConfig,
|
TimeoutConfig,
|
||||||
RetryConfig,
|
RetryConfig,
|
||||||
|
TlsConfig,
|
||||||
OptimizeStats,
|
OptimizeStats,
|
||||||
CompactionStats,
|
CompactionStats,
|
||||||
RemovalStats,
|
RemovalStats,
|
||||||
@@ -93,6 +100,13 @@ export {
|
|||||||
ColumnAlteration,
|
ColumnAlteration,
|
||||||
} from "./table";
|
} from "./table";
|
||||||
|
|
||||||
|
export {
|
||||||
|
HeaderProvider,
|
||||||
|
StaticHeaderProvider,
|
||||||
|
OAuthHeaderProvider,
|
||||||
|
TokenResponse,
|
||||||
|
} from "./header";
|
||||||
|
|
||||||
export { MergeInsertBuilder, WriteExecutionOptions } from "./merge";
|
export { MergeInsertBuilder, WriteExecutionOptions } from "./merge";
|
||||||
|
|
||||||
export * as embedding from "./embedding";
|
export * as embedding from "./embedding";
|
||||||
@@ -131,11 +145,27 @@ export { IntoSql, packBits } from "./util";
|
|||||||
* {storageOptions: {timeout: "60s"}
|
* {storageOptions: {timeout: "60s"}
|
||||||
* });
|
* });
|
||||||
* ```
|
* ```
|
||||||
|
* @example
|
||||||
|
* Using with a header provider for per-request authentication:
|
||||||
|
* ```ts
|
||||||
|
* const provider = new StaticHeaderProvider({
|
||||||
|
* "X-API-Key": "my-key"
|
||||||
|
* });
|
||||||
|
* const conn = await connectWithHeaderProvider(
|
||||||
|
* "db://host:port",
|
||||||
|
* options,
|
||||||
|
* provider
|
||||||
|
* );
|
||||||
|
* ```
|
||||||
*/
|
*/
|
||||||
export async function connect(
|
export async function connect(
|
||||||
uri: string,
|
uri: string,
|
||||||
options?: Partial<ConnectionOptions>,
|
options?: Partial<ConnectionOptions>,
|
||||||
session?: Session,
|
session?: Session,
|
||||||
|
headerProvider?:
|
||||||
|
| HeaderProvider
|
||||||
|
| (() => Record<string, string>)
|
||||||
|
| (() => Promise<Record<string, string>>),
|
||||||
): Promise<Connection>;
|
): Promise<Connection>;
|
||||||
/**
|
/**
|
||||||
* Connect to a LanceDB instance at the given URI.
|
* Connect to a LanceDB instance at the given URI.
|
||||||
@@ -169,18 +199,58 @@ export async function connect(
|
|||||||
): Promise<Connection>;
|
): Promise<Connection>;
|
||||||
export async function connect(
|
export async function connect(
|
||||||
uriOrOptions: string | (Partial<ConnectionOptions> & { uri: string }),
|
uriOrOptions: string | (Partial<ConnectionOptions> & { uri: string }),
|
||||||
options?: Partial<ConnectionOptions>,
|
optionsOrSession?: Partial<ConnectionOptions> | Session,
|
||||||
|
sessionOrHeaderProvider?:
|
||||||
|
| Session
|
||||||
|
| HeaderProvider
|
||||||
|
| (() => Record<string, string>)
|
||||||
|
| (() => Promise<Record<string, string>>),
|
||||||
|
headerProvider?:
|
||||||
|
| HeaderProvider
|
||||||
|
| (() => Record<string, string>)
|
||||||
|
| (() => Promise<Record<string, string>>),
|
||||||
): Promise<Connection> {
|
): Promise<Connection> {
|
||||||
let uri: string | undefined;
|
let uri: string | undefined;
|
||||||
let finalOptions: Partial<ConnectionOptions> = {};
|
let finalOptions: Partial<ConnectionOptions> = {};
|
||||||
|
let finalHeaderProvider:
|
||||||
|
| HeaderProvider
|
||||||
|
| (() => Record<string, string>)
|
||||||
|
| (() => Promise<Record<string, string>>)
|
||||||
|
| undefined;
|
||||||
|
|
||||||
if (typeof uriOrOptions !== "string") {
|
if (typeof uriOrOptions !== "string") {
|
||||||
|
// First overload: connect(options)
|
||||||
const { uri: uri_, ...opts } = uriOrOptions;
|
const { uri: uri_, ...opts } = uriOrOptions;
|
||||||
uri = uri_;
|
uri = uri_;
|
||||||
finalOptions = opts;
|
finalOptions = opts;
|
||||||
} else {
|
} else {
|
||||||
|
// Second overload: connect(uri, options?, session?, headerProvider?)
|
||||||
uri = uriOrOptions;
|
uri = uriOrOptions;
|
||||||
finalOptions = options || {};
|
|
||||||
|
// Handle optionsOrSession parameter
|
||||||
|
if (optionsOrSession && "inner" in optionsOrSession) {
|
||||||
|
// Second param is session, so no options provided
|
||||||
|
finalOptions = {};
|
||||||
|
} else {
|
||||||
|
// Second param is options
|
||||||
|
finalOptions = (optionsOrSession as Partial<ConnectionOptions>) || {};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle sessionOrHeaderProvider parameter
|
||||||
|
if (
|
||||||
|
sessionOrHeaderProvider &&
|
||||||
|
(typeof sessionOrHeaderProvider === "function" ||
|
||||||
|
"getHeaders" in sessionOrHeaderProvider)
|
||||||
|
) {
|
||||||
|
// Third param is header provider
|
||||||
|
finalHeaderProvider = sessionOrHeaderProvider as
|
||||||
|
| HeaderProvider
|
||||||
|
| (() => Record<string, string>)
|
||||||
|
| (() => Promise<Record<string, string>>);
|
||||||
|
} else {
|
||||||
|
// Third param is session, header provider is fourth param
|
||||||
|
finalHeaderProvider = headerProvider;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!uri) {
|
if (!uri) {
|
||||||
@@ -191,6 +261,26 @@ export async function connect(
|
|||||||
(<ConnectionOptions>finalOptions).storageOptions = cleanseStorageOptions(
|
(<ConnectionOptions>finalOptions).storageOptions = cleanseStorageOptions(
|
||||||
(<ConnectionOptions>finalOptions).storageOptions,
|
(<ConnectionOptions>finalOptions).storageOptions,
|
||||||
);
|
);
|
||||||
const nativeConn = await LanceDbConnection.new(uri, finalOptions);
|
|
||||||
|
// Create native header provider if one was provided
|
||||||
|
let nativeProvider: NativeJsHeaderProvider | undefined;
|
||||||
|
if (finalHeaderProvider) {
|
||||||
|
if (typeof finalHeaderProvider === "function") {
|
||||||
|
nativeProvider = new NativeJsHeaderProvider(finalHeaderProvider);
|
||||||
|
} else if (
|
||||||
|
finalHeaderProvider &&
|
||||||
|
typeof finalHeaderProvider.getHeaders === "function"
|
||||||
|
) {
|
||||||
|
nativeProvider = new NativeJsHeaderProvider(async () =>
|
||||||
|
finalHeaderProvider.getHeaders(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const nativeConn = await LanceDbConnection.new(
|
||||||
|
uri,
|
||||||
|
finalOptions,
|
||||||
|
nativeProvider,
|
||||||
|
);
|
||||||
return new LocalConnection(nativeConn);
|
return new LocalConnection(nativeConn);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -700,5 +700,27 @@ export interface IndexOptions {
|
|||||||
*/
|
*/
|
||||||
replace?: boolean;
|
replace?: boolean;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Timeout in seconds to wait for index creation to complete.
|
||||||
|
*
|
||||||
|
* If not specified, the method will return immediately after starting the index creation.
|
||||||
|
*/
|
||||||
waitTimeoutSeconds?: number;
|
waitTimeoutSeconds?: number;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Optional custom name for the index.
|
||||||
|
*
|
||||||
|
* If not provided, a default name will be generated based on the column name.
|
||||||
|
*/
|
||||||
|
name?: string;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Whether to train the index with existing data.
|
||||||
|
*
|
||||||
|
* If true (default), the index will be trained with existing data in the table.
|
||||||
|
* If false, the index will be created empty and populated as new data is added.
|
||||||
|
*
|
||||||
|
* Note: This option is only supported for scalar indices. Vector indices always train.
|
||||||
|
*/
|
||||||
|
train?: boolean;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -70,6 +70,23 @@ export class MergeInsertBuilder {
|
|||||||
this.#schema,
|
this.#schema,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Controls whether to use indexes for the merge operation.
|
||||||
|
*
|
||||||
|
* When set to `true` (the default), the operation will use an index if available
|
||||||
|
* on the join key for improved performance. When set to `false`, it forces a full
|
||||||
|
* table scan even if an index exists. This can be useful for benchmarking or when
|
||||||
|
* the query optimizer chooses a suboptimal path.
|
||||||
|
*
|
||||||
|
* @param useIndex - Whether to use indices for the merge operation. Defaults to `true`.
|
||||||
|
*/
|
||||||
|
useIndex(useIndex: boolean): MergeInsertBuilder {
|
||||||
|
return new MergeInsertBuilder(
|
||||||
|
this.#native.useIndex(useIndex),
|
||||||
|
this.#schema,
|
||||||
|
);
|
||||||
|
}
|
||||||
/**
|
/**
|
||||||
* Executes the merge insert operation
|
* Executes the merge insert operation
|
||||||
*
|
*
|
||||||
|
|||||||
@@ -326,6 +326,9 @@ export function sanitizeDictionary(typeLike: object) {
|
|||||||
|
|
||||||
// biome-ignore lint/suspicious/noExplicitAny: skip
|
// biome-ignore lint/suspicious/noExplicitAny: skip
|
||||||
export function sanitizeType(typeLike: unknown): DataType<any> {
|
export function sanitizeType(typeLike: unknown): DataType<any> {
|
||||||
|
if (typeof typeLike === "string") {
|
||||||
|
return dataTypeFromName(typeLike);
|
||||||
|
}
|
||||||
if (typeof typeLike !== "object" || typeLike === null) {
|
if (typeof typeLike !== "object" || typeLike === null) {
|
||||||
throw Error("Expected a Type but object was null/undefined");
|
throw Error("Expected a Type but object was null/undefined");
|
||||||
}
|
}
|
||||||
@@ -447,7 +450,7 @@ export function sanitizeType(typeLike: unknown): DataType<any> {
|
|||||||
case Type.DurationSecond:
|
case Type.DurationSecond:
|
||||||
return new DurationSecond();
|
return new DurationSecond();
|
||||||
default:
|
default:
|
||||||
throw new Error("Unrecoginized type id in schema: " + typeId);
|
throw new Error("Unrecognized type id in schema: " + typeId);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -467,7 +470,15 @@ export function sanitizeField(fieldLike: unknown): Field {
|
|||||||
"The field passed in is missing a `type`/`name`/`nullable` property",
|
"The field passed in is missing a `type`/`name`/`nullable` property",
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
const type = sanitizeType(fieldLike.type);
|
let type: DataType;
|
||||||
|
try {
|
||||||
|
type = sanitizeType(fieldLike.type);
|
||||||
|
} catch (error: unknown) {
|
||||||
|
throw Error(
|
||||||
|
`Unable to sanitize type for field: ${fieldLike.name} due to error: ${error}`,
|
||||||
|
{ cause: error },
|
||||||
|
);
|
||||||
|
}
|
||||||
const name = fieldLike.name;
|
const name = fieldLike.name;
|
||||||
if (!(typeof name === "string")) {
|
if (!(typeof name === "string")) {
|
||||||
throw Error("The field passed in had a non-string `name` property");
|
throw Error("The field passed in had a non-string `name` property");
|
||||||
@@ -581,3 +592,46 @@ function sanitizeData(
|
|||||||
},
|
},
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const constructorsByTypeName = {
|
||||||
|
null: () => new Null(),
|
||||||
|
binary: () => new Binary(),
|
||||||
|
utf8: () => new Utf8(),
|
||||||
|
bool: () => new Bool(),
|
||||||
|
int8: () => new Int8(),
|
||||||
|
int16: () => new Int16(),
|
||||||
|
int32: () => new Int32(),
|
||||||
|
int64: () => new Int64(),
|
||||||
|
uint8: () => new Uint8(),
|
||||||
|
uint16: () => new Uint16(),
|
||||||
|
uint32: () => new Uint32(),
|
||||||
|
uint64: () => new Uint64(),
|
||||||
|
float16: () => new Float16(),
|
||||||
|
float32: () => new Float32(),
|
||||||
|
float64: () => new Float64(),
|
||||||
|
datemillisecond: () => new DateMillisecond(),
|
||||||
|
dateday: () => new DateDay(),
|
||||||
|
timenanosecond: () => new TimeNanosecond(),
|
||||||
|
timemicrosecond: () => new TimeMicrosecond(),
|
||||||
|
timemillisecond: () => new TimeMillisecond(),
|
||||||
|
timesecond: () => new TimeSecond(),
|
||||||
|
intervaldaytime: () => new IntervalDayTime(),
|
||||||
|
intervalyearmonth: () => new IntervalYearMonth(),
|
||||||
|
durationnanosecond: () => new DurationNanosecond(),
|
||||||
|
durationmicrosecond: () => new DurationMicrosecond(),
|
||||||
|
durationmillisecond: () => new DurationMillisecond(),
|
||||||
|
durationsecond: () => new DurationSecond(),
|
||||||
|
} as const;
|
||||||
|
|
||||||
|
type MappableTypeName = keyof typeof constructorsByTypeName;
|
||||||
|
|
||||||
|
export function dataTypeFromName(typeName: string): DataType {
|
||||||
|
const normalizedTypeName = typeName.toLowerCase() as MappableTypeName;
|
||||||
|
const _constructor = constructorsByTypeName[normalizedTypeName];
|
||||||
|
|
||||||
|
if (!_constructor) {
|
||||||
|
throw new Error("Unrecognized type name in schema: " + typeName);
|
||||||
|
}
|
||||||
|
|
||||||
|
return _constructor();
|
||||||
|
}
|
||||||
|
|||||||
@@ -662,6 +662,8 @@ export class LocalTable extends Table {
|
|||||||
column,
|
column,
|
||||||
options?.replace,
|
options?.replace,
|
||||||
options?.waitTimeoutSeconds,
|
options?.waitTimeoutSeconds,
|
||||||
|
options?.name,
|
||||||
|
options?.train,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "@lancedb/lancedb-darwin-arm64",
|
"name": "@lancedb/lancedb-darwin-arm64",
|
||||||
"version": "0.21.2",
|
"version": "0.22.2",
|
||||||
"os": ["darwin"],
|
"os": ["darwin"],
|
||||||
"cpu": ["arm64"],
|
"cpu": ["arm64"],
|
||||||
"main": "lancedb.darwin-arm64.node",
|
"main": "lancedb.darwin-arm64.node",
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "@lancedb/lancedb-darwin-x64",
|
"name": "@lancedb/lancedb-darwin-x64",
|
||||||
"version": "0.21.2",
|
"version": "0.22.2",
|
||||||
"os": ["darwin"],
|
"os": ["darwin"],
|
||||||
"cpu": ["x64"],
|
"cpu": ["x64"],
|
||||||
"main": "lancedb.darwin-x64.node",
|
"main": "lancedb.darwin-x64.node",
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "@lancedb/lancedb-linux-arm64-gnu",
|
"name": "@lancedb/lancedb-linux-arm64-gnu",
|
||||||
"version": "0.21.2",
|
"version": "0.22.2",
|
||||||
"os": ["linux"],
|
"os": ["linux"],
|
||||||
"cpu": ["arm64"],
|
"cpu": ["arm64"],
|
||||||
"main": "lancedb.linux-arm64-gnu.node",
|
"main": "lancedb.linux-arm64-gnu.node",
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "@lancedb/lancedb-linux-arm64-musl",
|
"name": "@lancedb/lancedb-linux-arm64-musl",
|
||||||
"version": "0.21.2",
|
"version": "0.22.2",
|
||||||
"os": ["linux"],
|
"os": ["linux"],
|
||||||
"cpu": ["arm64"],
|
"cpu": ["arm64"],
|
||||||
"main": "lancedb.linux-arm64-musl.node",
|
"main": "lancedb.linux-arm64-musl.node",
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "@lancedb/lancedb-linux-x64-gnu",
|
"name": "@lancedb/lancedb-linux-x64-gnu",
|
||||||
"version": "0.21.2",
|
"version": "0.22.2",
|
||||||
"os": ["linux"],
|
"os": ["linux"],
|
||||||
"cpu": ["x64"],
|
"cpu": ["x64"],
|
||||||
"main": "lancedb.linux-x64-gnu.node",
|
"main": "lancedb.linux-x64-gnu.node",
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "@lancedb/lancedb-linux-x64-musl",
|
"name": "@lancedb/lancedb-linux-x64-musl",
|
||||||
"version": "0.21.2",
|
"version": "0.22.2",
|
||||||
"os": ["linux"],
|
"os": ["linux"],
|
||||||
"cpu": ["x64"],
|
"cpu": ["x64"],
|
||||||
"main": "lancedb.linux-x64-musl.node",
|
"main": "lancedb.linux-x64-musl.node",
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "@lancedb/lancedb-win32-arm64-msvc",
|
"name": "@lancedb/lancedb-win32-arm64-msvc",
|
||||||
"version": "0.21.2",
|
"version": "0.22.2",
|
||||||
"os": [
|
"os": [
|
||||||
"win32"
|
"win32"
|
||||||
],
|
],
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "@lancedb/lancedb-win32-x64-msvc",
|
"name": "@lancedb/lancedb-win32-x64-msvc",
|
||||||
"version": "0.21.2",
|
"version": "0.22.2",
|
||||||
"os": ["win32"],
|
"os": ["win32"],
|
||||||
"cpu": ["x64"],
|
"cpu": ["x64"],
|
||||||
"main": "lancedb.win32-x64-msvc.node",
|
"main": "lancedb.win32-x64-msvc.node",
|
||||||
|
|||||||
228
nodejs/package-lock.json
generated
228
nodejs/package-lock.json
generated
@@ -1,12 +1,12 @@
|
|||||||
{
|
{
|
||||||
"name": "@lancedb/lancedb",
|
"name": "@lancedb/lancedb",
|
||||||
"version": "0.21.2",
|
"version": "0.22.2-beta.2",
|
||||||
"lockfileVersion": 3,
|
"lockfileVersion": 3,
|
||||||
"requires": true,
|
"requires": true,
|
||||||
"packages": {
|
"packages": {
|
||||||
"": {
|
"": {
|
||||||
"name": "@lancedb/lancedb",
|
"name": "@lancedb/lancedb",
|
||||||
"version": "0.21.2",
|
"version": "0.22.2-beta.2",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"x64",
|
"x64",
|
||||||
"arm64"
|
"arm64"
|
||||||
@@ -5549,10 +5549,11 @@
|
|||||||
"dev": true
|
"dev": true
|
||||||
},
|
},
|
||||||
"node_modules/brace-expansion": {
|
"node_modules/brace-expansion": {
|
||||||
"version": "1.1.11",
|
"version": "1.1.12",
|
||||||
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz",
|
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz",
|
||||||
"integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==",
|
"integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==",
|
||||||
"dev": true,
|
"dev": true,
|
||||||
|
"license": "MIT",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"balanced-match": "^1.0.0",
|
"balanced-match": "^1.0.0",
|
||||||
"concat-map": "0.0.1"
|
"concat-map": "0.0.1"
|
||||||
@@ -5629,6 +5630,20 @@
|
|||||||
"integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==",
|
"integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==",
|
||||||
"dev": true
|
"dev": true
|
||||||
},
|
},
|
||||||
|
"node_modules/call-bind-apply-helpers": {
|
||||||
|
"version": "1.0.2",
|
||||||
|
"resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz",
|
||||||
|
"integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==",
|
||||||
|
"devOptional": true,
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"es-errors": "^1.3.0",
|
||||||
|
"function-bind": "^1.1.2"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 0.4"
|
||||||
|
}
|
||||||
|
},
|
||||||
"node_modules/camelcase": {
|
"node_modules/camelcase": {
|
||||||
"version": "5.3.1",
|
"version": "5.3.1",
|
||||||
"resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz",
|
"resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz",
|
||||||
@@ -6032,6 +6047,21 @@
|
|||||||
"node": ">=6.0.0"
|
"node": ">=6.0.0"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"node_modules/dunder-proto": {
|
||||||
|
"version": "1.0.1",
|
||||||
|
"resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz",
|
||||||
|
"integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==",
|
||||||
|
"devOptional": true,
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"call-bind-apply-helpers": "^1.0.1",
|
||||||
|
"es-errors": "^1.3.0",
|
||||||
|
"gopd": "^1.2.0"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 0.4"
|
||||||
|
}
|
||||||
|
},
|
||||||
"node_modules/eastasianwidth": {
|
"node_modules/eastasianwidth": {
|
||||||
"version": "0.2.0",
|
"version": "0.2.0",
|
||||||
"resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz",
|
"resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz",
|
||||||
@@ -6071,6 +6101,55 @@
|
|||||||
"is-arrayish": "^0.2.1"
|
"is-arrayish": "^0.2.1"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"node_modules/es-define-property": {
|
||||||
|
"version": "1.0.1",
|
||||||
|
"resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz",
|
||||||
|
"integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==",
|
||||||
|
"devOptional": true,
|
||||||
|
"license": "MIT",
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 0.4"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/es-errors": {
|
||||||
|
"version": "1.3.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz",
|
||||||
|
"integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==",
|
||||||
|
"devOptional": true,
|
||||||
|
"license": "MIT",
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 0.4"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/es-object-atoms": {
|
||||||
|
"version": "1.1.1",
|
||||||
|
"resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz",
|
||||||
|
"integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==",
|
||||||
|
"devOptional": true,
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"es-errors": "^1.3.0"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 0.4"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/es-set-tostringtag": {
|
||||||
|
"version": "2.1.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz",
|
||||||
|
"integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==",
|
||||||
|
"devOptional": true,
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"es-errors": "^1.3.0",
|
||||||
|
"get-intrinsic": "^1.2.6",
|
||||||
|
"has-tostringtag": "^1.0.2",
|
||||||
|
"hasown": "^2.0.2"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 0.4"
|
||||||
|
}
|
||||||
|
},
|
||||||
"node_modules/escalade": {
|
"node_modules/escalade": {
|
||||||
"version": "3.1.1",
|
"version": "3.1.1",
|
||||||
"resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz",
|
"resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz",
|
||||||
@@ -6510,13 +6589,16 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/form-data": {
|
"node_modules/form-data": {
|
||||||
"version": "4.0.0",
|
"version": "4.0.4",
|
||||||
"resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz",
|
"resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.4.tgz",
|
||||||
"integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==",
|
"integrity": "sha512-KrGhL9Q4zjj0kiUt5OO4Mr/A/jlI2jDYs5eHBpYHPcBEVSiipAvn2Ko2HnPe20rmcuuvMHNdZFp+4IlGTMF0Ow==",
|
||||||
"devOptional": true,
|
"devOptional": true,
|
||||||
|
"license": "MIT",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"asynckit": "^0.4.0",
|
"asynckit": "^0.4.0",
|
||||||
"combined-stream": "^1.0.8",
|
"combined-stream": "^1.0.8",
|
||||||
|
"es-set-tostringtag": "^2.1.0",
|
||||||
|
"hasown": "^2.0.2",
|
||||||
"mime-types": "^2.1.12"
|
"mime-types": "^2.1.12"
|
||||||
},
|
},
|
||||||
"engines": {
|
"engines": {
|
||||||
@@ -6575,7 +6657,7 @@
|
|||||||
"version": "1.1.2",
|
"version": "1.1.2",
|
||||||
"resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz",
|
"resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz",
|
||||||
"integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==",
|
"integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==",
|
||||||
"dev": true,
|
"devOptional": true,
|
||||||
"funding": {
|
"funding": {
|
||||||
"url": "https://github.com/sponsors/ljharb"
|
"url": "https://github.com/sponsors/ljharb"
|
||||||
}
|
}
|
||||||
@@ -6598,6 +6680,31 @@
|
|||||||
"node": "6.* || 8.* || >= 10.*"
|
"node": "6.* || 8.* || >= 10.*"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"node_modules/get-intrinsic": {
|
||||||
|
"version": "1.3.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz",
|
||||||
|
"integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==",
|
||||||
|
"devOptional": true,
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"call-bind-apply-helpers": "^1.0.2",
|
||||||
|
"es-define-property": "^1.0.1",
|
||||||
|
"es-errors": "^1.3.0",
|
||||||
|
"es-object-atoms": "^1.1.1",
|
||||||
|
"function-bind": "^1.1.2",
|
||||||
|
"get-proto": "^1.0.1",
|
||||||
|
"gopd": "^1.2.0",
|
||||||
|
"has-symbols": "^1.1.0",
|
||||||
|
"hasown": "^2.0.2",
|
||||||
|
"math-intrinsics": "^1.1.0"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 0.4"
|
||||||
|
},
|
||||||
|
"funding": {
|
||||||
|
"url": "https://github.com/sponsors/ljharb"
|
||||||
|
}
|
||||||
|
},
|
||||||
"node_modules/get-package-type": {
|
"node_modules/get-package-type": {
|
||||||
"version": "0.1.0",
|
"version": "0.1.0",
|
||||||
"resolved": "https://registry.npmjs.org/get-package-type/-/get-package-type-0.1.0.tgz",
|
"resolved": "https://registry.npmjs.org/get-package-type/-/get-package-type-0.1.0.tgz",
|
||||||
@@ -6607,6 +6714,20 @@
|
|||||||
"node": ">=8.0.0"
|
"node": ">=8.0.0"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"node_modules/get-proto": {
|
||||||
|
"version": "1.0.1",
|
||||||
|
"resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz",
|
||||||
|
"integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==",
|
||||||
|
"devOptional": true,
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"dunder-proto": "^1.0.1",
|
||||||
|
"es-object-atoms": "^1.0.0"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 0.4"
|
||||||
|
}
|
||||||
|
},
|
||||||
"node_modules/get-stream": {
|
"node_modules/get-stream": {
|
||||||
"version": "6.0.1",
|
"version": "6.0.1",
|
||||||
"resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz",
|
"resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz",
|
||||||
@@ -6698,6 +6819,19 @@
|
|||||||
"url": "https://github.com/sponsors/sindresorhus"
|
"url": "https://github.com/sponsors/sindresorhus"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"node_modules/gopd": {
|
||||||
|
"version": "1.2.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz",
|
||||||
|
"integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==",
|
||||||
|
"devOptional": true,
|
||||||
|
"license": "MIT",
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 0.4"
|
||||||
|
},
|
||||||
|
"funding": {
|
||||||
|
"url": "https://github.com/sponsors/ljharb"
|
||||||
|
}
|
||||||
|
},
|
||||||
"node_modules/graceful-fs": {
|
"node_modules/graceful-fs": {
|
||||||
"version": "4.2.11",
|
"version": "4.2.11",
|
||||||
"resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz",
|
"resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz",
|
||||||
@@ -6724,11 +6858,41 @@
|
|||||||
"node": ">=8"
|
"node": ">=8"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"node_modules/has-symbols": {
|
||||||
|
"version": "1.1.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz",
|
||||||
|
"integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==",
|
||||||
|
"devOptional": true,
|
||||||
|
"license": "MIT",
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 0.4"
|
||||||
|
},
|
||||||
|
"funding": {
|
||||||
|
"url": "https://github.com/sponsors/ljharb"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/has-tostringtag": {
|
||||||
|
"version": "1.0.2",
|
||||||
|
"resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz",
|
||||||
|
"integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==",
|
||||||
|
"devOptional": true,
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"has-symbols": "^1.0.3"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 0.4"
|
||||||
|
},
|
||||||
|
"funding": {
|
||||||
|
"url": "https://github.com/sponsors/ljharb"
|
||||||
|
}
|
||||||
|
},
|
||||||
"node_modules/hasown": {
|
"node_modules/hasown": {
|
||||||
"version": "2.0.0",
|
"version": "2.0.2",
|
||||||
"resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.0.tgz",
|
"resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz",
|
||||||
"integrity": "sha512-vUptKVTpIJhcczKBbgnS+RtcuYMB8+oNzPK2/Hp3hanz8JmpATdmmgLgSaadVREkDm+e2giHwY3ZRkyjSIDDFA==",
|
"integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==",
|
||||||
"dev": true,
|
"devOptional": true,
|
||||||
|
"license": "MIT",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"function-bind": "^1.1.2"
|
"function-bind": "^1.1.2"
|
||||||
},
|
},
|
||||||
@@ -7943,6 +8107,16 @@
|
|||||||
"integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==",
|
"integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==",
|
||||||
"dev": true
|
"dev": true
|
||||||
},
|
},
|
||||||
|
"node_modules/math-intrinsics": {
|
||||||
|
"version": "1.1.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz",
|
||||||
|
"integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==",
|
||||||
|
"devOptional": true,
|
||||||
|
"license": "MIT",
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 0.4"
|
||||||
|
}
|
||||||
|
},
|
||||||
"node_modules/md5": {
|
"node_modules/md5": {
|
||||||
"version": "2.3.0",
|
"version": "2.3.0",
|
||||||
"resolved": "https://registry.npmjs.org/md5/-/md5-2.3.0.tgz",
|
"resolved": "https://registry.npmjs.org/md5/-/md5-2.3.0.tgz",
|
||||||
@@ -8053,9 +8227,10 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/minizlib/node_modules/brace-expansion": {
|
"node_modules/minizlib/node_modules/brace-expansion": {
|
||||||
"version": "2.0.1",
|
"version": "2.0.2",
|
||||||
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz",
|
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz",
|
||||||
"integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==",
|
"integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==",
|
||||||
|
"license": "MIT",
|
||||||
"optional": true,
|
"optional": true,
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"balanced-match": "^1.0.0"
|
"balanced-match": "^1.0.0"
|
||||||
@@ -9201,10 +9376,11 @@
|
|||||||
"dev": true
|
"dev": true
|
||||||
},
|
},
|
||||||
"node_modules/tmp": {
|
"node_modules/tmp": {
|
||||||
"version": "0.2.3",
|
"version": "0.2.5",
|
||||||
"resolved": "https://registry.npmjs.org/tmp/-/tmp-0.2.3.tgz",
|
"resolved": "https://registry.npmjs.org/tmp/-/tmp-0.2.5.tgz",
|
||||||
"integrity": "sha512-nZD7m9iCPC5g0pYmcaxogYKggSfLsdxl8of3Q/oIbqCqLLIO9IAF0GWjX1z9NZRHPiXv8Wex4yDCaZsgEw0Y8w==",
|
"integrity": "sha512-voyz6MApa1rQGUxT3E+BK7/ROe8itEx7vD8/HEvt4xwXucvQ5G5oeEiHkmHZJuBO21RpOf+YYm9MOivj709jow==",
|
||||||
"dev": true,
|
"dev": true,
|
||||||
|
"license": "MIT",
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": ">=14.14"
|
"node": ">=14.14"
|
||||||
}
|
}
|
||||||
@@ -9349,10 +9525,11 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/typedoc/node_modules/brace-expansion": {
|
"node_modules/typedoc/node_modules/brace-expansion": {
|
||||||
"version": "2.0.1",
|
"version": "2.0.2",
|
||||||
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz",
|
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz",
|
||||||
"integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==",
|
"integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==",
|
||||||
"dev": true,
|
"dev": true,
|
||||||
|
"license": "MIT",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"balanced-match": "^1.0.0"
|
"balanced-match": "^1.0.0"
|
||||||
}
|
}
|
||||||
@@ -9602,10 +9779,11 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/typescript-eslint/node_modules/brace-expansion": {
|
"node_modules/typescript-eslint/node_modules/brace-expansion": {
|
||||||
"version": "2.0.1",
|
"version": "2.0.2",
|
||||||
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz",
|
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz",
|
||||||
"integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==",
|
"integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==",
|
||||||
"dev": true,
|
"dev": true,
|
||||||
|
"license": "MIT",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"balanced-match": "^1.0.0"
|
"balanced-match": "^1.0.0"
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -11,7 +11,7 @@
|
|||||||
"ann"
|
"ann"
|
||||||
],
|
],
|
||||||
"private": false,
|
"private": false,
|
||||||
"version": "0.21.2",
|
"version": "0.22.2",
|
||||||
"main": "dist/index.js",
|
"main": "dist/index.js",
|
||||||
"exports": {
|
"exports": {
|
||||||
".": "./dist/index.js",
|
".": "./dist/index.js",
|
||||||
|
|||||||
@@ -2,12 +2,14 @@
|
|||||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||||
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
use lancedb::database::CreateTableMode;
|
use lancedb::database::CreateTableMode;
|
||||||
use napi::bindgen_prelude::*;
|
use napi::bindgen_prelude::*;
|
||||||
use napi_derive::*;
|
use napi_derive::*;
|
||||||
|
|
||||||
use crate::error::NapiErrorExt;
|
use crate::error::NapiErrorExt;
|
||||||
|
use crate::header::JsHeaderProvider;
|
||||||
use crate::table::Table;
|
use crate::table::Table;
|
||||||
use crate::ConnectionOptions;
|
use crate::ConnectionOptions;
|
||||||
use lancedb::connection::{ConnectBuilder, Connection as LanceDBConnection};
|
use lancedb::connection::{ConnectBuilder, Connection as LanceDBConnection};
|
||||||
@@ -45,7 +47,11 @@ impl Connection {
|
|||||||
impl Connection {
|
impl Connection {
|
||||||
/// Create a new Connection instance from the given URI.
|
/// Create a new Connection instance from the given URI.
|
||||||
#[napi(factory)]
|
#[napi(factory)]
|
||||||
pub async fn new(uri: String, options: ConnectionOptions) -> napi::Result<Self> {
|
pub async fn new(
|
||||||
|
uri: String,
|
||||||
|
options: ConnectionOptions,
|
||||||
|
header_provider: Option<&JsHeaderProvider>,
|
||||||
|
) -> napi::Result<Self> {
|
||||||
let mut builder = ConnectBuilder::new(&uri);
|
let mut builder = ConnectBuilder::new(&uri);
|
||||||
if let Some(interval) = options.read_consistency_interval {
|
if let Some(interval) = options.read_consistency_interval {
|
||||||
builder =
|
builder =
|
||||||
@@ -57,8 +63,16 @@ impl Connection {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Create client config, optionally with header provider
|
||||||
let client_config = options.client_config.unwrap_or_default();
|
let client_config = options.client_config.unwrap_or_default();
|
||||||
builder = builder.client_config(client_config.into());
|
let mut rust_config: lancedb::remote::ClientConfig = client_config.into();
|
||||||
|
|
||||||
|
if let Some(provider) = header_provider {
|
||||||
|
rust_config.header_provider =
|
||||||
|
Some(Arc::new(provider.clone()) as Arc<dyn lancedb::remote::HeaderProvider>);
|
||||||
|
}
|
||||||
|
|
||||||
|
builder = builder.client_config(rust_config);
|
||||||
|
|
||||||
if let Some(api_key) = options.api_key {
|
if let Some(api_key) = options.api_key {
|
||||||
builder = builder.api_key(&api_key);
|
builder = builder.api_key(&api_key);
|
||||||
@@ -100,10 +114,12 @@ impl Connection {
|
|||||||
#[napi(catch_unwind)]
|
#[napi(catch_unwind)]
|
||||||
pub async fn table_names(
|
pub async fn table_names(
|
||||||
&self,
|
&self,
|
||||||
|
namespace: Vec<String>,
|
||||||
start_after: Option<String>,
|
start_after: Option<String>,
|
||||||
limit: Option<u32>,
|
limit: Option<u32>,
|
||||||
) -> napi::Result<Vec<String>> {
|
) -> napi::Result<Vec<String>> {
|
||||||
let mut op = self.get_inner()?.table_names();
|
let mut op = self.get_inner()?.table_names();
|
||||||
|
op = op.namespace(namespace);
|
||||||
if let Some(start_after) = start_after {
|
if let Some(start_after) = start_after {
|
||||||
op = op.start_after(start_after);
|
op = op.start_after(start_after);
|
||||||
}
|
}
|
||||||
@@ -125,6 +141,7 @@ impl Connection {
|
|||||||
name: String,
|
name: String,
|
||||||
buf: Buffer,
|
buf: Buffer,
|
||||||
mode: String,
|
mode: String,
|
||||||
|
namespace: Vec<String>,
|
||||||
storage_options: Option<HashMap<String, String>>,
|
storage_options: Option<HashMap<String, String>>,
|
||||||
) -> napi::Result<Table> {
|
) -> napi::Result<Table> {
|
||||||
let batches = ipc_file_to_batches(buf.to_vec())
|
let batches = ipc_file_to_batches(buf.to_vec())
|
||||||
@@ -132,6 +149,8 @@ impl Connection {
|
|||||||
let mode = Self::parse_create_mode_str(&mode)?;
|
let mode = Self::parse_create_mode_str(&mode)?;
|
||||||
let mut builder = self.get_inner()?.create_table(&name, batches).mode(mode);
|
let mut builder = self.get_inner()?.create_table(&name, batches).mode(mode);
|
||||||
|
|
||||||
|
builder = builder.namespace(namespace);
|
||||||
|
|
||||||
if let Some(storage_options) = storage_options {
|
if let Some(storage_options) = storage_options {
|
||||||
for (key, value) in storage_options {
|
for (key, value) in storage_options {
|
||||||
builder = builder.storage_option(key, value);
|
builder = builder.storage_option(key, value);
|
||||||
@@ -147,6 +166,7 @@ impl Connection {
|
|||||||
name: String,
|
name: String,
|
||||||
schema_buf: Buffer,
|
schema_buf: Buffer,
|
||||||
mode: String,
|
mode: String,
|
||||||
|
namespace: Vec<String>,
|
||||||
storage_options: Option<HashMap<String, String>>,
|
storage_options: Option<HashMap<String, String>>,
|
||||||
) -> napi::Result<Table> {
|
) -> napi::Result<Table> {
|
||||||
let schema = ipc_file_to_schema(schema_buf.to_vec()).map_err(|e| {
|
let schema = ipc_file_to_schema(schema_buf.to_vec()).map_err(|e| {
|
||||||
@@ -157,6 +177,9 @@ impl Connection {
|
|||||||
.get_inner()?
|
.get_inner()?
|
||||||
.create_empty_table(&name, schema)
|
.create_empty_table(&name, schema)
|
||||||
.mode(mode);
|
.mode(mode);
|
||||||
|
|
||||||
|
builder = builder.namespace(namespace);
|
||||||
|
|
||||||
if let Some(storage_options) = storage_options {
|
if let Some(storage_options) = storage_options {
|
||||||
for (key, value) in storage_options {
|
for (key, value) in storage_options {
|
||||||
builder = builder.storage_option(key, value);
|
builder = builder.storage_option(key, value);
|
||||||
@@ -170,10 +193,14 @@ impl Connection {
|
|||||||
pub async fn open_table(
|
pub async fn open_table(
|
||||||
&self,
|
&self,
|
||||||
name: String,
|
name: String,
|
||||||
|
namespace: Vec<String>,
|
||||||
storage_options: Option<HashMap<String, String>>,
|
storage_options: Option<HashMap<String, String>>,
|
||||||
index_cache_size: Option<u32>,
|
index_cache_size: Option<u32>,
|
||||||
) -> napi::Result<Table> {
|
) -> napi::Result<Table> {
|
||||||
let mut builder = self.get_inner()?.open_table(&name);
|
let mut builder = self.get_inner()?.open_table(&name);
|
||||||
|
|
||||||
|
builder = builder.namespace(namespace);
|
||||||
|
|
||||||
if let Some(storage_options) = storage_options {
|
if let Some(storage_options) = storage_options {
|
||||||
for (key, value) in storage_options {
|
for (key, value) in storage_options {
|
||||||
builder = builder.storage_option(key, value);
|
builder = builder.storage_option(key, value);
|
||||||
@@ -186,14 +213,50 @@ impl Connection {
|
|||||||
Ok(Table::new(tbl))
|
Ok(Table::new(tbl))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[napi(catch_unwind)]
|
||||||
|
pub async fn clone_table(
|
||||||
|
&self,
|
||||||
|
target_table_name: String,
|
||||||
|
source_uri: String,
|
||||||
|
target_namespace: Vec<String>,
|
||||||
|
source_version: Option<i64>,
|
||||||
|
source_tag: Option<String>,
|
||||||
|
is_shallow: bool,
|
||||||
|
) -> napi::Result<Table> {
|
||||||
|
let mut builder = self
|
||||||
|
.get_inner()?
|
||||||
|
.clone_table(&target_table_name, &source_uri);
|
||||||
|
|
||||||
|
builder = builder.target_namespace(target_namespace);
|
||||||
|
|
||||||
|
if let Some(version) = source_version {
|
||||||
|
builder = builder.source_version(version as u64);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(tag) = source_tag {
|
||||||
|
builder = builder.source_tag(tag);
|
||||||
|
}
|
||||||
|
|
||||||
|
builder = builder.is_shallow(is_shallow);
|
||||||
|
|
||||||
|
let tbl = builder.execute().await.default_error()?;
|
||||||
|
Ok(Table::new(tbl))
|
||||||
|
}
|
||||||
|
|
||||||
/// Drop table with the name. Or raise an error if the table does not exist.
|
/// Drop table with the name. Or raise an error if the table does not exist.
|
||||||
#[napi(catch_unwind)]
|
#[napi(catch_unwind)]
|
||||||
pub async fn drop_table(&self, name: String) -> napi::Result<()> {
|
pub async fn drop_table(&self, name: String, namespace: Vec<String>) -> napi::Result<()> {
|
||||||
self.get_inner()?.drop_table(&name).await.default_error()
|
self.get_inner()?
|
||||||
|
.drop_table(&name, &namespace)
|
||||||
|
.await
|
||||||
|
.default_error()
|
||||||
}
|
}
|
||||||
|
|
||||||
#[napi(catch_unwind)]
|
#[napi(catch_unwind)]
|
||||||
pub async fn drop_all_tables(&self) -> napi::Result<()> {
|
pub async fn drop_all_tables(&self, namespace: Vec<String>) -> napi::Result<()> {
|
||||||
self.get_inner()?.drop_all_tables().await.default_error()
|
self.get_inner()?
|
||||||
|
.drop_all_tables(&namespace)
|
||||||
|
.await
|
||||||
|
.default_error()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
71
nodejs/src/header.rs
Normal file
71
nodejs/src/header.rs
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||||
|
|
||||||
|
use napi::{
|
||||||
|
bindgen_prelude::*,
|
||||||
|
threadsafe_function::{ErrorStrategy, ThreadsafeFunction},
|
||||||
|
};
|
||||||
|
use napi_derive::napi;
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
/// JavaScript HeaderProvider implementation that wraps a JavaScript callback.
|
||||||
|
/// This is the only native header provider - all header provider implementations
|
||||||
|
/// should provide a JavaScript function that returns headers.
|
||||||
|
#[napi]
|
||||||
|
pub struct JsHeaderProvider {
|
||||||
|
get_headers_fn: Arc<ThreadsafeFunction<(), ErrorStrategy::CalleeHandled>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Clone for JsHeaderProvider {
|
||||||
|
fn clone(&self) -> Self {
|
||||||
|
Self {
|
||||||
|
get_headers_fn: self.get_headers_fn.clone(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[napi]
|
||||||
|
impl JsHeaderProvider {
|
||||||
|
/// Create a new JsHeaderProvider from a JavaScript callback
|
||||||
|
#[napi(constructor)]
|
||||||
|
pub fn new(get_headers_callback: JsFunction) -> Result<Self> {
|
||||||
|
let get_headers_fn = get_headers_callback
|
||||||
|
.create_threadsafe_function(0, |ctx| Ok(vec![ctx.value]))
|
||||||
|
.map_err(|e| {
|
||||||
|
Error::new(
|
||||||
|
Status::GenericFailure,
|
||||||
|
format!("Failed to create threadsafe function: {}", e),
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
get_headers_fn: Arc::new(get_headers_fn),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "remote")]
|
||||||
|
#[async_trait::async_trait]
|
||||||
|
impl lancedb::remote::HeaderProvider for JsHeaderProvider {
|
||||||
|
async fn get_headers(&self) -> lancedb::error::Result<HashMap<String, String>> {
|
||||||
|
// Call the JavaScript function asynchronously
|
||||||
|
let promise: Promise<HashMap<String, String>> =
|
||||||
|
self.get_headers_fn.call_async(Ok(())).await.map_err(|e| {
|
||||||
|
lancedb::error::Error::Runtime {
|
||||||
|
message: format!("Failed to call JavaScript get_headers: {}", e),
|
||||||
|
}
|
||||||
|
})?;
|
||||||
|
|
||||||
|
// Await the promise result
|
||||||
|
promise.await.map_err(|e| lancedb::error::Error::Runtime {
|
||||||
|
message: format!("JavaScript get_headers failed: {}", e),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::fmt::Debug for JsHeaderProvider {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
write!(f, "JsHeaderProvider")
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -8,6 +8,7 @@ use napi_derive::*;
|
|||||||
|
|
||||||
mod connection;
|
mod connection;
|
||||||
mod error;
|
mod error;
|
||||||
|
mod header;
|
||||||
mod index;
|
mod index;
|
||||||
mod iterator;
|
mod iterator;
|
||||||
pub mod merge;
|
pub mod merge;
|
||||||
|
|||||||
@@ -43,6 +43,13 @@ impl NativeMergeInsertBuilder {
|
|||||||
self.inner.timeout(Duration::from_millis(timeout as u64));
|
self.inner.timeout(Duration::from_millis(timeout as u64));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[napi]
|
||||||
|
pub fn use_index(&self, use_index: bool) -> Self {
|
||||||
|
let mut this = self.clone();
|
||||||
|
this.inner.use_index(use_index);
|
||||||
|
this
|
||||||
|
}
|
||||||
|
|
||||||
#[napi(catch_unwind)]
|
#[napi(catch_unwind)]
|
||||||
pub async fn execute(&self, buf: Buffer) -> napi::Result<MergeResult> {
|
pub async fn execute(&self, buf: Buffer) -> napi::Result<MergeResult> {
|
||||||
let data = ipc_file_to_batches(buf.to_vec())
|
let data = ipc_file_to_batches(buf.to_vec())
|
||||||
|
|||||||
@@ -480,6 +480,7 @@ impl JsFullTextQuery {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[napi(factory)]
|
#[napi(factory)]
|
||||||
|
#[allow(clippy::use_self)] // NAPI doesn't allow Self here but clippy reports it
|
||||||
pub fn boolean_query(queries: Vec<(String, &JsFullTextQuery)>) -> napi::Result<Self> {
|
pub fn boolean_query(queries: Vec<(String, &JsFullTextQuery)>) -> napi::Result<Self> {
|
||||||
let mut sub_queries = Vec::with_capacity(queries.len());
|
let mut sub_queries = Vec::with_capacity(queries.len());
|
||||||
for (occur, q) in queries {
|
for (occur, q) in queries {
|
||||||
|
|||||||
@@ -69,6 +69,20 @@ pub struct RetryConfig {
|
|||||||
pub statuses: Option<Vec<u16>>,
|
pub statuses: Option<Vec<u16>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// TLS/mTLS configuration for the remote HTTP client.
|
||||||
|
#[napi(object)]
|
||||||
|
#[derive(Debug, Default)]
|
||||||
|
pub struct TlsConfig {
|
||||||
|
/// Path to the client certificate file (PEM format) for mTLS authentication.
|
||||||
|
pub cert_file: Option<String>,
|
||||||
|
/// Path to the client private key file (PEM format) for mTLS authentication.
|
||||||
|
pub key_file: Option<String>,
|
||||||
|
/// Path to the CA certificate file (PEM format) for server verification.
|
||||||
|
pub ssl_ca_cert: Option<String>,
|
||||||
|
/// Whether to verify the hostname in the server's certificate.
|
||||||
|
pub assert_hostname: Option<bool>,
|
||||||
|
}
|
||||||
|
|
||||||
#[napi(object)]
|
#[napi(object)]
|
||||||
#[derive(Debug, Default)]
|
#[derive(Debug, Default)]
|
||||||
pub struct ClientConfig {
|
pub struct ClientConfig {
|
||||||
@@ -76,6 +90,8 @@ pub struct ClientConfig {
|
|||||||
pub retry_config: Option<RetryConfig>,
|
pub retry_config: Option<RetryConfig>,
|
||||||
pub timeout_config: Option<TimeoutConfig>,
|
pub timeout_config: Option<TimeoutConfig>,
|
||||||
pub extra_headers: Option<HashMap<String, String>>,
|
pub extra_headers: Option<HashMap<String, String>>,
|
||||||
|
pub id_delimiter: Option<String>,
|
||||||
|
pub tls_config: Option<TlsConfig>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<TimeoutConfig> for lancedb::remote::TimeoutConfig {
|
impl From<TimeoutConfig> for lancedb::remote::TimeoutConfig {
|
||||||
@@ -106,6 +122,17 @@ impl From<RetryConfig> for lancedb::remote::RetryConfig {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl From<TlsConfig> for lancedb::remote::TlsConfig {
|
||||||
|
fn from(config: TlsConfig) -> Self {
|
||||||
|
Self {
|
||||||
|
cert_file: config.cert_file,
|
||||||
|
key_file: config.key_file,
|
||||||
|
ssl_ca_cert: config.ssl_ca_cert,
|
||||||
|
assert_hostname: config.assert_hostname.unwrap_or(true),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl From<ClientConfig> for lancedb::remote::ClientConfig {
|
impl From<ClientConfig> for lancedb::remote::ClientConfig {
|
||||||
fn from(config: ClientConfig) -> Self {
|
fn from(config: ClientConfig) -> Self {
|
||||||
Self {
|
Self {
|
||||||
@@ -115,6 +142,9 @@ impl From<ClientConfig> for lancedb::remote::ClientConfig {
|
|||||||
retry_config: config.retry_config.map(Into::into).unwrap_or_default(),
|
retry_config: config.retry_config.map(Into::into).unwrap_or_default(),
|
||||||
timeout_config: config.timeout_config.map(Into::into).unwrap_or_default(),
|
timeout_config: config.timeout_config.map(Into::into).unwrap_or_default(),
|
||||||
extra_headers: config.extra_headers.unwrap_or_default(),
|
extra_headers: config.extra_headers.unwrap_or_default(),
|
||||||
|
id_delimiter: config.id_delimiter,
|
||||||
|
tls_config: config.tls_config.map(Into::into),
|
||||||
|
header_provider: None, // the header provider is set separately later
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -94,7 +94,7 @@ impl napi::bindgen_prelude::FromNapiValue for Session {
|
|||||||
env: napi::sys::napi_env,
|
env: napi::sys::napi_env,
|
||||||
napi_val: napi::sys::napi_value,
|
napi_val: napi::sys::napi_value,
|
||||||
) -> napi::Result<Self> {
|
) -> napi::Result<Self> {
|
||||||
let object: napi::bindgen_prelude::ClassInstance<Session> =
|
let object: napi::bindgen_prelude::ClassInstance<Self> =
|
||||||
napi::bindgen_prelude::ClassInstance::from_napi_value(env, napi_val)?;
|
napi::bindgen_prelude::ClassInstance::from_napi_value(env, napi_val)?;
|
||||||
let copy = object.clone();
|
let copy = object.clone();
|
||||||
Ok(copy)
|
Ok(copy)
|
||||||
|
|||||||
@@ -114,6 +114,8 @@ impl Table {
|
|||||||
column: String,
|
column: String,
|
||||||
replace: Option<bool>,
|
replace: Option<bool>,
|
||||||
wait_timeout_s: Option<i64>,
|
wait_timeout_s: Option<i64>,
|
||||||
|
name: Option<String>,
|
||||||
|
train: Option<bool>,
|
||||||
) -> napi::Result<()> {
|
) -> napi::Result<()> {
|
||||||
let lancedb_index = if let Some(index) = index {
|
let lancedb_index = if let Some(index) = index {
|
||||||
index.consume()?
|
index.consume()?
|
||||||
@@ -128,6 +130,12 @@ impl Table {
|
|||||||
builder =
|
builder =
|
||||||
builder.wait_timeout(std::time::Duration::from_secs(timeout.try_into().unwrap()));
|
builder.wait_timeout(std::time::Duration::from_secs(timeout.try_into().unwrap()));
|
||||||
}
|
}
|
||||||
|
if let Some(name) = name {
|
||||||
|
builder = builder.name(name);
|
||||||
|
}
|
||||||
|
if let Some(train) = train {
|
||||||
|
builder = builder.train(train);
|
||||||
|
}
|
||||||
builder.execute().await.default_error()
|
builder.execute().await.default_error()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
[tool.bumpversion]
|
[tool.bumpversion]
|
||||||
current_version = "0.24.3"
|
current_version = "0.25.2"
|
||||||
parse = """(?x)
|
parse = """(?x)
|
||||||
(?P<major>0|[1-9]\\d*)\\.
|
(?P<major>0|[1-9]\\d*)\\.
|
||||||
(?P<minor>0|[1-9]\\d*)\\.
|
(?P<minor>0|[1-9]\\d*)\\.
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "lancedb-python"
|
name = "lancedb-python"
|
||||||
version = "0.24.3"
|
version = "0.25.2"
|
||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
description = "Python bindings for LanceDB"
|
description = "Python bindings for LanceDB"
|
||||||
license.workspace = true
|
license.workspace = true
|
||||||
@@ -14,11 +14,12 @@ name = "_lancedb"
|
|||||||
crate-type = ["cdylib"]
|
crate-type = ["cdylib"]
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
arrow = { version = "55.1", features = ["pyarrow"] }
|
arrow = { version = "56.2", features = ["pyarrow"] }
|
||||||
|
async-trait = "0.1"
|
||||||
lancedb = { path = "../rust/lancedb", default-features = false }
|
lancedb = { path = "../rust/lancedb", default-features = false }
|
||||||
env_logger.workspace = true
|
env_logger.workspace = true
|
||||||
pyo3 = { version = "0.24", features = ["extension-module", "abi3-py39"] }
|
pyo3 = { version = "0.25", features = ["extension-module", "abi3-py39"] }
|
||||||
pyo3-async-runtimes = { version = "0.24", features = [
|
pyo3-async-runtimes = { version = "0.25", features = [
|
||||||
"attributes",
|
"attributes",
|
||||||
"tokio-runtime",
|
"tokio-runtime",
|
||||||
] }
|
] }
|
||||||
@@ -27,12 +28,12 @@ futures.workspace = true
|
|||||||
tokio = { version = "1.40", features = ["sync"] }
|
tokio = { version = "1.40", features = ["sync"] }
|
||||||
|
|
||||||
[build-dependencies]
|
[build-dependencies]
|
||||||
pyo3-build-config = { version = "0.24", features = [
|
pyo3-build-config = { version = "0.25", features = [
|
||||||
"extension-module",
|
"extension-module",
|
||||||
"abi3-py39",
|
"abi3-py39",
|
||||||
] }
|
] }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = ["remote"]
|
default = ["remote", "lancedb/default"]
|
||||||
fp16kernels = ["lancedb/fp16kernels"]
|
fp16kernels = ["lancedb/fp16kernels"]
|
||||||
remote = ["lancedb/remote"]
|
remote = ["lancedb/remote"]
|
||||||
|
|||||||
@@ -5,11 +5,12 @@ dynamic = ["version"]
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"deprecation",
|
"deprecation",
|
||||||
"numpy",
|
"numpy",
|
||||||
"overrides>=0.7",
|
"overrides>=0.7; python_version<'3.12'",
|
||||||
"packaging",
|
"packaging",
|
||||||
"pyarrow>=16",
|
"pyarrow>=16",
|
||||||
"pydantic>=1.10",
|
"pydantic>=1.10",
|
||||||
"tqdm>=4.27.0",
|
"tqdm>=4.27.0",
|
||||||
|
"lance-namespace>=0.0.16"
|
||||||
]
|
]
|
||||||
description = "lancedb"
|
description = "lancedb"
|
||||||
authors = [{ name = "LanceDB Devs", email = "dev@lancedb.com" }]
|
authors = [{ name = "LanceDB Devs", email = "dev@lancedb.com" }]
|
||||||
|
|||||||
@@ -19,6 +19,7 @@ from .remote.db import RemoteDBConnection
|
|||||||
from .schema import vector
|
from .schema import vector
|
||||||
from .table import AsyncTable
|
from .table import AsyncTable
|
||||||
from ._lancedb import Session
|
from ._lancedb import Session
|
||||||
|
from .namespace import connect_namespace, LanceNamespaceDBConnection
|
||||||
|
|
||||||
|
|
||||||
def connect(
|
def connect(
|
||||||
@@ -221,6 +222,7 @@ async def connect_async(
|
|||||||
__all__ = [
|
__all__ = [
|
||||||
"connect",
|
"connect",
|
||||||
"connect_async",
|
"connect_async",
|
||||||
|
"connect_namespace",
|
||||||
"AsyncConnection",
|
"AsyncConnection",
|
||||||
"AsyncTable",
|
"AsyncTable",
|
||||||
"URI",
|
"URI",
|
||||||
@@ -228,6 +230,7 @@ __all__ = [
|
|||||||
"vector",
|
"vector",
|
||||||
"DBConnection",
|
"DBConnection",
|
||||||
"LanceDBConnection",
|
"LanceDBConnection",
|
||||||
|
"LanceNamespaceDBConnection",
|
||||||
"RemoteDBConnection",
|
"RemoteDBConnection",
|
||||||
"Session",
|
"Session",
|
||||||
"__version__",
|
"__version__",
|
||||||
|
|||||||
@@ -21,14 +21,28 @@ class Session:
|
|||||||
|
|
||||||
class Connection(object):
|
class Connection(object):
|
||||||
uri: str
|
uri: str
|
||||||
|
async def is_open(self): ...
|
||||||
|
async def close(self): ...
|
||||||
|
async def list_namespaces(
|
||||||
|
self,
|
||||||
|
namespace: List[str],
|
||||||
|
page_token: Optional[str],
|
||||||
|
limit: Optional[int],
|
||||||
|
) -> List[str]: ...
|
||||||
|
async def create_namespace(self, namespace: List[str]) -> None: ...
|
||||||
|
async def drop_namespace(self, namespace: List[str]) -> None: ...
|
||||||
async def table_names(
|
async def table_names(
|
||||||
self, start_after: Optional[str], limit: Optional[int]
|
self,
|
||||||
|
namespace: List[str],
|
||||||
|
start_after: Optional[str],
|
||||||
|
limit: Optional[int],
|
||||||
) -> list[str]: ...
|
) -> list[str]: ...
|
||||||
async def create_table(
|
async def create_table(
|
||||||
self,
|
self,
|
||||||
name: str,
|
name: str,
|
||||||
mode: str,
|
mode: str,
|
||||||
data: pa.RecordBatchReader,
|
data: pa.RecordBatchReader,
|
||||||
|
namespace: List[str] = [],
|
||||||
storage_options: Optional[Dict[str, str]] = None,
|
storage_options: Optional[Dict[str, str]] = None,
|
||||||
) -> Table: ...
|
) -> Table: ...
|
||||||
async def create_empty_table(
|
async def create_empty_table(
|
||||||
@@ -36,10 +50,34 @@ class Connection(object):
|
|||||||
name: str,
|
name: str,
|
||||||
mode: str,
|
mode: str,
|
||||||
schema: pa.Schema,
|
schema: pa.Schema,
|
||||||
|
namespace: List[str] = [],
|
||||||
storage_options: Optional[Dict[str, str]] = None,
|
storage_options: Optional[Dict[str, str]] = None,
|
||||||
) -> Table: ...
|
) -> Table: ...
|
||||||
async def rename_table(self, old_name: str, new_name: str) -> None: ...
|
async def open_table(
|
||||||
async def drop_table(self, name: str) -> None: ...
|
self,
|
||||||
|
name: str,
|
||||||
|
namespace: List[str] = [],
|
||||||
|
storage_options: Optional[Dict[str, str]] = None,
|
||||||
|
index_cache_size: Optional[int] = None,
|
||||||
|
) -> Table: ...
|
||||||
|
async def clone_table(
|
||||||
|
self,
|
||||||
|
target_table_name: str,
|
||||||
|
source_uri: str,
|
||||||
|
target_namespace: List[str] = [],
|
||||||
|
source_version: Optional[int] = None,
|
||||||
|
source_tag: Optional[str] = None,
|
||||||
|
is_shallow: bool = True,
|
||||||
|
) -> Table: ...
|
||||||
|
async def rename_table(
|
||||||
|
self,
|
||||||
|
cur_name: str,
|
||||||
|
new_name: str,
|
||||||
|
cur_namespace: List[str] = [],
|
||||||
|
new_namespace: List[str] = [],
|
||||||
|
) -> None: ...
|
||||||
|
async def drop_table(self, name: str, namespace: List[str] = []) -> None: ...
|
||||||
|
async def drop_all_tables(self, namespace: List[str] = []) -> None: ...
|
||||||
|
|
||||||
class Table:
|
class Table:
|
||||||
def name(self) -> str: ...
|
def name(self) -> str: ...
|
||||||
@@ -59,6 +97,10 @@ class Table:
|
|||||||
column: str,
|
column: str,
|
||||||
index: Union[IvfFlat, IvfPq, HnswPq, HnswSq, BTree, Bitmap, LabelList, FTS],
|
index: Union[IvfFlat, IvfPq, HnswPq, HnswSq, BTree, Bitmap, LabelList, FTS],
|
||||||
replace: Optional[bool],
|
replace: Optional[bool],
|
||||||
|
wait_timeout: Optional[object],
|
||||||
|
*,
|
||||||
|
name: Optional[str],
|
||||||
|
train: Optional[bool],
|
||||||
): ...
|
): ...
|
||||||
async def list_versions(self) -> List[Dict[str, Any]]: ...
|
async def list_versions(self) -> List[Dict[str, Any]]: ...
|
||||||
async def version(self) -> int: ...
|
async def version(self) -> int: ...
|
||||||
@@ -91,6 +133,7 @@ class Tags:
|
|||||||
async def update(self, tag: str, version: int): ...
|
async def update(self, tag: str, version: int): ...
|
||||||
|
|
||||||
class IndexConfig:
|
class IndexConfig:
|
||||||
|
name: str
|
||||||
index_type: str
|
index_type: str
|
||||||
columns: List[str]
|
columns: List[str]
|
||||||
|
|
||||||
|
|||||||
@@ -6,10 +6,18 @@ from __future__ import annotations
|
|||||||
|
|
||||||
from abc import abstractmethod
|
from abc import abstractmethod
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
import sys
|
||||||
from typing import TYPE_CHECKING, Dict, Iterable, List, Literal, Optional, Union
|
from typing import TYPE_CHECKING, Dict, Iterable, List, Literal, Optional, Union
|
||||||
|
|
||||||
|
if sys.version_info >= (3, 12):
|
||||||
|
from typing import override
|
||||||
|
|
||||||
|
class EnforceOverrides:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
from overrides import EnforceOverrides, override # type: ignore
|
||||||
|
|
||||||
from lancedb.embeddings.registry import EmbeddingFunctionRegistry
|
from lancedb.embeddings.registry import EmbeddingFunctionRegistry
|
||||||
from overrides import EnforceOverrides, override # type: ignore
|
|
||||||
|
|
||||||
from lancedb.common import data_to_reader, sanitize_uri, validate_schema
|
from lancedb.common import data_to_reader, sanitize_uri, validate_schema
|
||||||
from lancedb.background_loop import LOOP
|
from lancedb.background_loop import LOOP
|
||||||
@@ -43,14 +51,70 @@ if TYPE_CHECKING:
|
|||||||
class DBConnection(EnforceOverrides):
|
class DBConnection(EnforceOverrides):
|
||||||
"""An active LanceDB connection interface."""
|
"""An active LanceDB connection interface."""
|
||||||
|
|
||||||
|
def list_namespaces(
|
||||||
|
self,
|
||||||
|
namespace: List[str] = [],
|
||||||
|
page_token: Optional[str] = None,
|
||||||
|
limit: int = 10,
|
||||||
|
) -> Iterable[str]:
|
||||||
|
"""List immediate child namespace names in the given namespace.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
namespace: List[str], default []
|
||||||
|
The parent namespace to list namespaces in.
|
||||||
|
Empty list represents root namespace.
|
||||||
|
page_token: str, optional
|
||||||
|
The token to use for pagination. If not present, start from the beginning.
|
||||||
|
limit: int, default 10
|
||||||
|
The size of the page to return.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
Iterable of str
|
||||||
|
List of immediate child namespace names
|
||||||
|
"""
|
||||||
|
return []
|
||||||
|
|
||||||
|
def create_namespace(self, namespace: List[str]) -> None:
|
||||||
|
"""Create a new namespace.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
namespace: List[str]
|
||||||
|
The namespace identifier to create.
|
||||||
|
"""
|
||||||
|
raise NotImplementedError(
|
||||||
|
"Namespace operations are not supported for this connection type"
|
||||||
|
)
|
||||||
|
|
||||||
|
def drop_namespace(self, namespace: List[str]) -> None:
|
||||||
|
"""Drop a namespace.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
namespace: List[str]
|
||||||
|
The namespace identifier to drop.
|
||||||
|
"""
|
||||||
|
raise NotImplementedError(
|
||||||
|
"Namespace operations are not supported for this connection type"
|
||||||
|
)
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def table_names(
|
def table_names(
|
||||||
self, page_token: Optional[str] = None, limit: int = 10
|
self,
|
||||||
|
page_token: Optional[str] = None,
|
||||||
|
limit: int = 10,
|
||||||
|
*,
|
||||||
|
namespace: List[str] = [],
|
||||||
) -> Iterable[str]:
|
) -> Iterable[str]:
|
||||||
"""List all tables in this database, in sorted order
|
"""List all tables in this database, in sorted order
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
|
namespace: List[str], default []
|
||||||
|
The namespace to list tables in.
|
||||||
|
Empty list represents root namespace.
|
||||||
page_token: str, optional
|
page_token: str, optional
|
||||||
The token to use for pagination. If not present, start from the beginning.
|
The token to use for pagination. If not present, start from the beginning.
|
||||||
Typically, this token is last table name from the previous page.
|
Typically, this token is last table name from the previous page.
|
||||||
@@ -77,6 +141,7 @@ class DBConnection(EnforceOverrides):
|
|||||||
fill_value: float = 0.0,
|
fill_value: float = 0.0,
|
||||||
embedding_functions: Optional[List[EmbeddingFunctionConfig]] = None,
|
embedding_functions: Optional[List[EmbeddingFunctionConfig]] = None,
|
||||||
*,
|
*,
|
||||||
|
namespace: List[str] = [],
|
||||||
storage_options: Optional[Dict[str, str]] = None,
|
storage_options: Optional[Dict[str, str]] = None,
|
||||||
data_storage_version: Optional[str] = None,
|
data_storage_version: Optional[str] = None,
|
||||||
enable_v2_manifest_paths: Optional[bool] = None,
|
enable_v2_manifest_paths: Optional[bool] = None,
|
||||||
@@ -87,6 +152,9 @@ class DBConnection(EnforceOverrides):
|
|||||||
----------
|
----------
|
||||||
name: str
|
name: str
|
||||||
The name of the table.
|
The name of the table.
|
||||||
|
namespace: List[str], default []
|
||||||
|
The namespace to create the table in.
|
||||||
|
Empty list represents root namespace.
|
||||||
data: The data to initialize the table, *optional*
|
data: The data to initialize the table, *optional*
|
||||||
User must provide at least one of `data` or `schema`.
|
User must provide at least one of `data` or `schema`.
|
||||||
Acceptable types are:
|
Acceptable types are:
|
||||||
@@ -238,6 +306,7 @@ class DBConnection(EnforceOverrides):
|
|||||||
self,
|
self,
|
||||||
name: str,
|
name: str,
|
||||||
*,
|
*,
|
||||||
|
namespace: List[str] = [],
|
||||||
storage_options: Optional[Dict[str, str]] = None,
|
storage_options: Optional[Dict[str, str]] = None,
|
||||||
index_cache_size: Optional[int] = None,
|
index_cache_size: Optional[int] = None,
|
||||||
) -> Table:
|
) -> Table:
|
||||||
@@ -247,6 +316,9 @@ class DBConnection(EnforceOverrides):
|
|||||||
----------
|
----------
|
||||||
name: str
|
name: str
|
||||||
The name of the table.
|
The name of the table.
|
||||||
|
namespace: List[str], optional
|
||||||
|
The namespace to open the table from.
|
||||||
|
None or empty list represents root namespace.
|
||||||
index_cache_size: int, default 256
|
index_cache_size: int, default 256
|
||||||
**Deprecated**: Use session-level cache configuration instead.
|
**Deprecated**: Use session-level cache configuration instead.
|
||||||
Create a Session with custom cache sizes and pass it to lancedb.connect().
|
Create a Session with custom cache sizes and pass it to lancedb.connect().
|
||||||
@@ -272,17 +344,26 @@ class DBConnection(EnforceOverrides):
|
|||||||
"""
|
"""
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def drop_table(self, name: str):
|
def drop_table(self, name: str, namespace: List[str] = []):
|
||||||
"""Drop a table from the database.
|
"""Drop a table from the database.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
name: str
|
name: str
|
||||||
The name of the table.
|
The name of the table.
|
||||||
|
namespace: List[str], default []
|
||||||
|
The namespace to drop the table from.
|
||||||
|
Empty list represents root namespace.
|
||||||
"""
|
"""
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def rename_table(self, cur_name: str, new_name: str):
|
def rename_table(
|
||||||
|
self,
|
||||||
|
cur_name: str,
|
||||||
|
new_name: str,
|
||||||
|
cur_namespace: List[str] = [],
|
||||||
|
new_namespace: List[str] = [],
|
||||||
|
):
|
||||||
"""Rename a table in the database.
|
"""Rename a table in the database.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
@@ -291,6 +372,12 @@ class DBConnection(EnforceOverrides):
|
|||||||
The current name of the table.
|
The current name of the table.
|
||||||
new_name: str
|
new_name: str
|
||||||
The new name of the table.
|
The new name of the table.
|
||||||
|
cur_namespace: List[str], optional
|
||||||
|
The namespace of the current table.
|
||||||
|
None or empty list represents root namespace.
|
||||||
|
new_namespace: List[str], optional
|
||||||
|
The namespace to move the table to.
|
||||||
|
If not specified, defaults to the same as cur_namespace.
|
||||||
"""
|
"""
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
@@ -301,9 +388,15 @@ class DBConnection(EnforceOverrides):
|
|||||||
"""
|
"""
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def drop_all_tables(self):
|
def drop_all_tables(self, namespace: List[str] = []):
|
||||||
"""
|
"""
|
||||||
Drop all tables from the database
|
Drop all tables from the database
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
namespace: List[str], optional
|
||||||
|
The namespace to drop all tables from.
|
||||||
|
None or empty list represents root namespace.
|
||||||
"""
|
"""
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
@@ -404,18 +497,87 @@ class LanceDBConnection(DBConnection):
|
|||||||
conn = AsyncConnection(await lancedb_connect(self.uri))
|
conn = AsyncConnection(await lancedb_connect(self.uri))
|
||||||
return await conn.table_names(start_after=start_after, limit=limit)
|
return await conn.table_names(start_after=start_after, limit=limit)
|
||||||
|
|
||||||
|
@override
|
||||||
|
def list_namespaces(
|
||||||
|
self,
|
||||||
|
namespace: List[str] = [],
|
||||||
|
page_token: Optional[str] = None,
|
||||||
|
limit: int = 10,
|
||||||
|
) -> Iterable[str]:
|
||||||
|
"""List immediate child namespace names in the given namespace.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
namespace: List[str], optional
|
||||||
|
The parent namespace to list namespaces in.
|
||||||
|
None or empty list represents root namespace.
|
||||||
|
page_token: str, optional
|
||||||
|
The token to use for pagination. If not present, start from the beginning.
|
||||||
|
limit: int, default 10
|
||||||
|
The size of the page to return.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
Iterable of str
|
||||||
|
List of immediate child namespace names
|
||||||
|
"""
|
||||||
|
return LOOP.run(
|
||||||
|
self._conn.list_namespaces(
|
||||||
|
namespace=namespace, page_token=page_token, limit=limit
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
@override
|
||||||
|
def create_namespace(self, namespace: List[str]) -> None:
|
||||||
|
"""Create a new namespace.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
namespace: List[str]
|
||||||
|
The namespace identifier to create.
|
||||||
|
"""
|
||||||
|
LOOP.run(self._conn.create_namespace(namespace=namespace))
|
||||||
|
|
||||||
|
@override
|
||||||
|
def drop_namespace(self, namespace: List[str]) -> None:
|
||||||
|
"""Drop a namespace.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
namespace: List[str]
|
||||||
|
The namespace identifier to drop.
|
||||||
|
"""
|
||||||
|
return LOOP.run(self._conn.drop_namespace(namespace=namespace))
|
||||||
|
|
||||||
@override
|
@override
|
||||||
def table_names(
|
def table_names(
|
||||||
self, page_token: Optional[str] = None, limit: int = 10
|
self,
|
||||||
|
page_token: Optional[str] = None,
|
||||||
|
limit: int = 10,
|
||||||
|
*,
|
||||||
|
namespace: List[str] = [],
|
||||||
) -> Iterable[str]:
|
) -> Iterable[str]:
|
||||||
"""Get the names of all tables in the database. The names are sorted.
|
"""Get the names of all tables in the database. The names are sorted.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
namespace: List[str], optional
|
||||||
|
The namespace to list tables in.
|
||||||
|
page_token: str, optional
|
||||||
|
The token to use for pagination.
|
||||||
|
limit: int, default 10
|
||||||
|
The maximum number of tables to return.
|
||||||
|
|
||||||
Returns
|
Returns
|
||||||
-------
|
-------
|
||||||
Iterator of str.
|
Iterator of str.
|
||||||
A list of table names.
|
A list of table names.
|
||||||
"""
|
"""
|
||||||
return LOOP.run(self._conn.table_names(start_after=page_token, limit=limit))
|
return LOOP.run(
|
||||||
|
self._conn.table_names(
|
||||||
|
namespace=namespace, start_after=page_token, limit=limit
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
def __len__(self) -> int:
|
def __len__(self) -> int:
|
||||||
return len(self.table_names())
|
return len(self.table_names())
|
||||||
@@ -435,12 +597,18 @@ class LanceDBConnection(DBConnection):
|
|||||||
fill_value: float = 0.0,
|
fill_value: float = 0.0,
|
||||||
embedding_functions: Optional[List[EmbeddingFunctionConfig]] = None,
|
embedding_functions: Optional[List[EmbeddingFunctionConfig]] = None,
|
||||||
*,
|
*,
|
||||||
|
namespace: List[str] = [],
|
||||||
storage_options: Optional[Dict[str, str]] = None,
|
storage_options: Optional[Dict[str, str]] = None,
|
||||||
data_storage_version: Optional[str] = None,
|
data_storage_version: Optional[str] = None,
|
||||||
enable_v2_manifest_paths: Optional[bool] = None,
|
enable_v2_manifest_paths: Optional[bool] = None,
|
||||||
) -> LanceTable:
|
) -> LanceTable:
|
||||||
"""Create a table in the database.
|
"""Create a table in the database.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
namespace: List[str], optional
|
||||||
|
The namespace to create the table in.
|
||||||
|
|
||||||
See
|
See
|
||||||
---
|
---
|
||||||
DBConnection.create_table
|
DBConnection.create_table
|
||||||
@@ -459,6 +627,7 @@ class LanceDBConnection(DBConnection):
|
|||||||
on_bad_vectors=on_bad_vectors,
|
on_bad_vectors=on_bad_vectors,
|
||||||
fill_value=fill_value,
|
fill_value=fill_value,
|
||||||
embedding_functions=embedding_functions,
|
embedding_functions=embedding_functions,
|
||||||
|
namespace=namespace,
|
||||||
storage_options=storage_options,
|
storage_options=storage_options,
|
||||||
)
|
)
|
||||||
return tbl
|
return tbl
|
||||||
@@ -468,6 +637,7 @@ class LanceDBConnection(DBConnection):
|
|||||||
self,
|
self,
|
||||||
name: str,
|
name: str,
|
||||||
*,
|
*,
|
||||||
|
namespace: List[str] = [],
|
||||||
storage_options: Optional[Dict[str, str]] = None,
|
storage_options: Optional[Dict[str, str]] = None,
|
||||||
index_cache_size: Optional[int] = None,
|
index_cache_size: Optional[int] = None,
|
||||||
) -> LanceTable:
|
) -> LanceTable:
|
||||||
@@ -477,6 +647,8 @@ class LanceDBConnection(DBConnection):
|
|||||||
----------
|
----------
|
||||||
name: str
|
name: str
|
||||||
The name of the table.
|
The name of the table.
|
||||||
|
namespace: List[str], optional
|
||||||
|
The namespace to open the table from.
|
||||||
|
|
||||||
Returns
|
Returns
|
||||||
-------
|
-------
|
||||||
@@ -496,26 +668,122 @@ class LanceDBConnection(DBConnection):
|
|||||||
return LanceTable.open(
|
return LanceTable.open(
|
||||||
self,
|
self,
|
||||||
name,
|
name,
|
||||||
|
namespace=namespace,
|
||||||
storage_options=storage_options,
|
storage_options=storage_options,
|
||||||
index_cache_size=index_cache_size,
|
index_cache_size=index_cache_size,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def clone_table(
|
||||||
|
self,
|
||||||
|
target_table_name: str,
|
||||||
|
source_uri: str,
|
||||||
|
*,
|
||||||
|
target_namespace: List[str] = [],
|
||||||
|
source_version: Optional[int] = None,
|
||||||
|
source_tag: Optional[str] = None,
|
||||||
|
is_shallow: bool = True,
|
||||||
|
) -> LanceTable:
|
||||||
|
"""Clone a table from a source table.
|
||||||
|
|
||||||
|
A shallow clone creates a new table that shares the underlying data files
|
||||||
|
with the source table but has its own independent manifest. This allows
|
||||||
|
both the source and cloned tables to evolve independently while initially
|
||||||
|
sharing the same data, deletion, and index files.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
target_table_name: str
|
||||||
|
The name of the target table to create.
|
||||||
|
source_uri: str
|
||||||
|
The URI of the source table to clone from.
|
||||||
|
target_namespace: List[str], optional
|
||||||
|
The namespace for the target table.
|
||||||
|
None or empty list represents root namespace.
|
||||||
|
source_version: int, optional
|
||||||
|
The version of the source table to clone.
|
||||||
|
source_tag: str, optional
|
||||||
|
The tag of the source table to clone.
|
||||||
|
is_shallow: bool, default True
|
||||||
|
Whether to perform a shallow clone (True) or deep clone (False).
|
||||||
|
Currently only shallow clone is supported.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
A LanceTable object representing the cloned table.
|
||||||
|
"""
|
||||||
|
LOOP.run(
|
||||||
|
self._conn.clone_table(
|
||||||
|
target_table_name,
|
||||||
|
source_uri,
|
||||||
|
target_namespace=target_namespace,
|
||||||
|
source_version=source_version,
|
||||||
|
source_tag=source_tag,
|
||||||
|
is_shallow=is_shallow,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
return LanceTable.open(
|
||||||
|
self,
|
||||||
|
target_table_name,
|
||||||
|
namespace=target_namespace,
|
||||||
|
)
|
||||||
|
|
||||||
@override
|
@override
|
||||||
def drop_table(self, name: str, ignore_missing: bool = False):
|
def drop_table(
|
||||||
|
self,
|
||||||
|
name: str,
|
||||||
|
namespace: List[str] = [],
|
||||||
|
ignore_missing: bool = False,
|
||||||
|
):
|
||||||
"""Drop a table from the database.
|
"""Drop a table from the database.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
name: str
|
name: str
|
||||||
The name of the table.
|
The name of the table.
|
||||||
|
namespace: List[str], optional
|
||||||
|
The namespace to drop the table from.
|
||||||
ignore_missing: bool, default False
|
ignore_missing: bool, default False
|
||||||
If True, ignore if the table does not exist.
|
If True, ignore if the table does not exist.
|
||||||
"""
|
"""
|
||||||
LOOP.run(self._conn.drop_table(name, ignore_missing=ignore_missing))
|
LOOP.run(
|
||||||
|
self._conn.drop_table(
|
||||||
|
name, namespace=namespace, ignore_missing=ignore_missing
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
@override
|
@override
|
||||||
def drop_all_tables(self):
|
def drop_all_tables(self, namespace: List[str] = []):
|
||||||
LOOP.run(self._conn.drop_all_tables())
|
LOOP.run(self._conn.drop_all_tables(namespace=namespace))
|
||||||
|
|
||||||
|
@override
|
||||||
|
def rename_table(
|
||||||
|
self,
|
||||||
|
cur_name: str,
|
||||||
|
new_name: str,
|
||||||
|
cur_namespace: List[str] = [],
|
||||||
|
new_namespace: List[str] = [],
|
||||||
|
):
|
||||||
|
"""Rename a table in the database.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
cur_name: str
|
||||||
|
The current name of the table.
|
||||||
|
new_name: str
|
||||||
|
The new name of the table.
|
||||||
|
cur_namespace: List[str], optional
|
||||||
|
The namespace of the current table.
|
||||||
|
new_namespace: List[str], optional
|
||||||
|
The namespace to move the table to.
|
||||||
|
"""
|
||||||
|
LOOP.run(
|
||||||
|
self._conn.rename_table(
|
||||||
|
cur_name,
|
||||||
|
new_name,
|
||||||
|
cur_namespace=cur_namespace,
|
||||||
|
new_namespace=new_namespace,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
@deprecation.deprecated(
|
@deprecation.deprecated(
|
||||||
deprecated_in="0.15.1",
|
deprecated_in="0.15.1",
|
||||||
@@ -588,13 +856,67 @@ class AsyncConnection(object):
|
|||||||
def uri(self) -> str:
|
def uri(self) -> str:
|
||||||
return self._inner.uri
|
return self._inner.uri
|
||||||
|
|
||||||
|
async def list_namespaces(
|
||||||
|
self,
|
||||||
|
namespace: List[str] = [],
|
||||||
|
page_token: Optional[str] = None,
|
||||||
|
limit: int = 10,
|
||||||
|
) -> Iterable[str]:
|
||||||
|
"""List immediate child namespace names in the given namespace.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
namespace: List[str], optional
|
||||||
|
The parent namespace to list namespaces in.
|
||||||
|
None or empty list represents root namespace.
|
||||||
|
page_token: str, optional
|
||||||
|
The token to use for pagination. If not present, start from the beginning.
|
||||||
|
limit: int, default 10
|
||||||
|
The size of the page to return.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
Iterable of str
|
||||||
|
List of immediate child namespace names (not full paths)
|
||||||
|
"""
|
||||||
|
return await self._inner.list_namespaces(
|
||||||
|
namespace=namespace, page_token=page_token, limit=limit
|
||||||
|
)
|
||||||
|
|
||||||
|
async def create_namespace(self, namespace: List[str]) -> None:
|
||||||
|
"""Create a new namespace.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
namespace: List[str]
|
||||||
|
The namespace identifier to create.
|
||||||
|
"""
|
||||||
|
await self._inner.create_namespace(namespace)
|
||||||
|
|
||||||
|
async def drop_namespace(self, namespace: List[str]) -> None:
|
||||||
|
"""Drop a namespace.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
namespace: List[str]
|
||||||
|
The namespace identifier to drop.
|
||||||
|
"""
|
||||||
|
await self._inner.drop_namespace(namespace)
|
||||||
|
|
||||||
async def table_names(
|
async def table_names(
|
||||||
self, *, start_after: Optional[str] = None, limit: Optional[int] = None
|
self,
|
||||||
|
*,
|
||||||
|
namespace: List[str] = [],
|
||||||
|
start_after: Optional[str] = None,
|
||||||
|
limit: Optional[int] = None,
|
||||||
) -> Iterable[str]:
|
) -> Iterable[str]:
|
||||||
"""List all tables in this database, in sorted order
|
"""List all tables in this database, in sorted order
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
|
namespace: List[str], optional
|
||||||
|
The namespace to list tables in.
|
||||||
|
None or empty list represents root namespace.
|
||||||
start_after: str, optional
|
start_after: str, optional
|
||||||
If present, only return names that come lexicographically after the supplied
|
If present, only return names that come lexicographically after the supplied
|
||||||
value.
|
value.
|
||||||
@@ -608,7 +930,9 @@ class AsyncConnection(object):
|
|||||||
-------
|
-------
|
||||||
Iterable of str
|
Iterable of str
|
||||||
"""
|
"""
|
||||||
return await self._inner.table_names(start_after=start_after, limit=limit)
|
return await self._inner.table_names(
|
||||||
|
namespace=namespace, start_after=start_after, limit=limit
|
||||||
|
)
|
||||||
|
|
||||||
async def create_table(
|
async def create_table(
|
||||||
self,
|
self,
|
||||||
@@ -621,6 +945,7 @@ class AsyncConnection(object):
|
|||||||
fill_value: Optional[float] = None,
|
fill_value: Optional[float] = None,
|
||||||
storage_options: Optional[Dict[str, str]] = None,
|
storage_options: Optional[Dict[str, str]] = None,
|
||||||
*,
|
*,
|
||||||
|
namespace: List[str] = [],
|
||||||
embedding_functions: Optional[List[EmbeddingFunctionConfig]] = None,
|
embedding_functions: Optional[List[EmbeddingFunctionConfig]] = None,
|
||||||
) -> AsyncTable:
|
) -> AsyncTable:
|
||||||
"""Create an [AsyncTable][lancedb.table.AsyncTable] in the database.
|
"""Create an [AsyncTable][lancedb.table.AsyncTable] in the database.
|
||||||
@@ -629,6 +954,9 @@ class AsyncConnection(object):
|
|||||||
----------
|
----------
|
||||||
name: str
|
name: str
|
||||||
The name of the table.
|
The name of the table.
|
||||||
|
namespace: List[str], default []
|
||||||
|
The namespace to create the table in.
|
||||||
|
Empty list represents root namespace.
|
||||||
data: The data to initialize the table, *optional*
|
data: The data to initialize the table, *optional*
|
||||||
User must provide at least one of `data` or `schema`.
|
User must provide at least one of `data` or `schema`.
|
||||||
Acceptable types are:
|
Acceptable types are:
|
||||||
@@ -807,6 +1135,7 @@ class AsyncConnection(object):
|
|||||||
name,
|
name,
|
||||||
mode,
|
mode,
|
||||||
schema,
|
schema,
|
||||||
|
namespace=namespace,
|
||||||
storage_options=storage_options,
|
storage_options=storage_options,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
@@ -815,6 +1144,7 @@ class AsyncConnection(object):
|
|||||||
name,
|
name,
|
||||||
mode,
|
mode,
|
||||||
data,
|
data,
|
||||||
|
namespace=namespace,
|
||||||
storage_options=storage_options,
|
storage_options=storage_options,
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -823,6 +1153,8 @@ class AsyncConnection(object):
|
|||||||
async def open_table(
|
async def open_table(
|
||||||
self,
|
self,
|
||||||
name: str,
|
name: str,
|
||||||
|
*,
|
||||||
|
namespace: List[str] = [],
|
||||||
storage_options: Optional[Dict[str, str]] = None,
|
storage_options: Optional[Dict[str, str]] = None,
|
||||||
index_cache_size: Optional[int] = None,
|
index_cache_size: Optional[int] = None,
|
||||||
) -> AsyncTable:
|
) -> AsyncTable:
|
||||||
@@ -832,6 +1164,9 @@ class AsyncConnection(object):
|
|||||||
----------
|
----------
|
||||||
name: str
|
name: str
|
||||||
The name of the table.
|
The name of the table.
|
||||||
|
namespace: List[str], optional
|
||||||
|
The namespace to open the table from.
|
||||||
|
None or empty list represents root namespace.
|
||||||
storage_options: dict, optional
|
storage_options: dict, optional
|
||||||
Additional options for the storage backend. Options already set on the
|
Additional options for the storage backend. Options already set on the
|
||||||
connection will be inherited by the table, but can be overridden here.
|
connection will be inherited by the table, but can be overridden here.
|
||||||
@@ -855,42 +1190,125 @@ class AsyncConnection(object):
|
|||||||
-------
|
-------
|
||||||
A LanceTable object representing the table.
|
A LanceTable object representing the table.
|
||||||
"""
|
"""
|
||||||
table = await self._inner.open_table(name, storage_options, index_cache_size)
|
table = await self._inner.open_table(
|
||||||
|
name,
|
||||||
|
namespace=namespace,
|
||||||
|
storage_options=storage_options,
|
||||||
|
index_cache_size=index_cache_size,
|
||||||
|
)
|
||||||
return AsyncTable(table)
|
return AsyncTable(table)
|
||||||
|
|
||||||
async def rename_table(self, old_name: str, new_name: str):
|
async def clone_table(
|
||||||
|
self,
|
||||||
|
target_table_name: str,
|
||||||
|
source_uri: str,
|
||||||
|
*,
|
||||||
|
target_namespace: List[str] = [],
|
||||||
|
source_version: Optional[int] = None,
|
||||||
|
source_tag: Optional[str] = None,
|
||||||
|
is_shallow: bool = True,
|
||||||
|
) -> AsyncTable:
|
||||||
|
"""Clone a table from a source table.
|
||||||
|
|
||||||
|
A shallow clone creates a new table that shares the underlying data files
|
||||||
|
with the source table but has its own independent manifest. This allows
|
||||||
|
both the source and cloned tables to evolve independently while initially
|
||||||
|
sharing the same data, deletion, and index files.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
target_table_name: str
|
||||||
|
The name of the target table to create.
|
||||||
|
source_uri: str
|
||||||
|
The URI of the source table to clone from.
|
||||||
|
target_namespace: List[str], optional
|
||||||
|
The namespace for the target table.
|
||||||
|
None or empty list represents root namespace.
|
||||||
|
source_version: int, optional
|
||||||
|
The version of the source table to clone.
|
||||||
|
source_tag: str, optional
|
||||||
|
The tag of the source table to clone.
|
||||||
|
is_shallow: bool, default True
|
||||||
|
Whether to perform a shallow clone (True) or deep clone (False).
|
||||||
|
Currently only shallow clone is supported.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
An AsyncTable object representing the cloned table.
|
||||||
|
"""
|
||||||
|
table = await self._inner.clone_table(
|
||||||
|
target_table_name,
|
||||||
|
source_uri,
|
||||||
|
target_namespace=target_namespace,
|
||||||
|
source_version=source_version,
|
||||||
|
source_tag=source_tag,
|
||||||
|
is_shallow=is_shallow,
|
||||||
|
)
|
||||||
|
return AsyncTable(table)
|
||||||
|
|
||||||
|
async def rename_table(
|
||||||
|
self,
|
||||||
|
cur_name: str,
|
||||||
|
new_name: str,
|
||||||
|
cur_namespace: List[str] = [],
|
||||||
|
new_namespace: List[str] = [],
|
||||||
|
):
|
||||||
"""Rename a table in the database.
|
"""Rename a table in the database.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
old_name: str
|
cur_name: str
|
||||||
The current name of the table.
|
The current name of the table.
|
||||||
new_name: str
|
new_name: str
|
||||||
The new name of the table.
|
The new name of the table.
|
||||||
|
cur_namespace: List[str], optional
|
||||||
|
The namespace of the current table.
|
||||||
|
None or empty list represents root namespace.
|
||||||
|
new_namespace: List[str], optional
|
||||||
|
The namespace to move the table to.
|
||||||
|
If not specified, defaults to the same as cur_namespace.
|
||||||
"""
|
"""
|
||||||
await self._inner.rename_table(old_name, new_name)
|
await self._inner.rename_table(
|
||||||
|
cur_name, new_name, cur_namespace=cur_namespace, new_namespace=new_namespace
|
||||||
|
)
|
||||||
|
|
||||||
async def drop_table(self, name: str, *, ignore_missing: bool = False):
|
async def drop_table(
|
||||||
|
self,
|
||||||
|
name: str,
|
||||||
|
*,
|
||||||
|
namespace: List[str] = [],
|
||||||
|
ignore_missing: bool = False,
|
||||||
|
):
|
||||||
"""Drop a table from the database.
|
"""Drop a table from the database.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
name: str
|
name: str
|
||||||
The name of the table.
|
The name of the table.
|
||||||
|
namespace: List[str], default []
|
||||||
|
The namespace to drop the table from.
|
||||||
|
Empty list represents root namespace.
|
||||||
ignore_missing: bool, default False
|
ignore_missing: bool, default False
|
||||||
If True, ignore if the table does not exist.
|
If True, ignore if the table does not exist.
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
await self._inner.drop_table(name)
|
await self._inner.drop_table(name, namespace=namespace)
|
||||||
except ValueError as e:
|
except ValueError as e:
|
||||||
if not ignore_missing:
|
if not ignore_missing:
|
||||||
raise e
|
raise e
|
||||||
if f"Table '{name}' was not found" not in str(e):
|
if f"Table '{name}' was not found" not in str(e):
|
||||||
raise e
|
raise e
|
||||||
|
|
||||||
async def drop_all_tables(self):
|
async def drop_all_tables(self, namespace: List[str] = []):
|
||||||
"""Drop all tables from the database."""
|
"""Drop all tables from the database.
|
||||||
await self._inner.drop_all_tables()
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
namespace: List[str], optional
|
||||||
|
The namespace to drop all tables from.
|
||||||
|
None or empty list represents root namespace.
|
||||||
|
"""
|
||||||
|
await self._inner.drop_all_tables(namespace=namespace)
|
||||||
|
|
||||||
@deprecation.deprecated(
|
@deprecation.deprecated(
|
||||||
deprecated_in="0.15.1",
|
deprecated_in="0.15.1",
|
||||||
|
|||||||
@@ -122,7 +122,7 @@ class EmbeddingFunctionRegistry:
|
|||||||
obj["vector_column"]: EmbeddingFunctionConfig(
|
obj["vector_column"]: EmbeddingFunctionConfig(
|
||||||
vector_column=obj["vector_column"],
|
vector_column=obj["vector_column"],
|
||||||
source_column=obj["source_column"],
|
source_column=obj["source_column"],
|
||||||
function=self.get(obj["name"])(**obj["model"]),
|
function=self.get(obj["name"]).create(**obj["model"]),
|
||||||
)
|
)
|
||||||
for obj in raw_list
|
for obj in raw_list
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -251,6 +251,13 @@ class HnswPq:
|
|||||||
results. In most cases, there is no benefit to setting this higher than 500.
|
results. In most cases, there is no benefit to setting this higher than 500.
|
||||||
This value should be set to a value that is not less than `ef` in the
|
This value should be set to a value that is not less than `ef` in the
|
||||||
search phase.
|
search phase.
|
||||||
|
|
||||||
|
target_partition_size, default is 1,048,576
|
||||||
|
|
||||||
|
The target size of each partition.
|
||||||
|
|
||||||
|
This value controls the tradeoff between search performance and accuracy.
|
||||||
|
faster search but less accurate results as higher value.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
distance_type: Literal["l2", "cosine", "dot"] = "l2"
|
distance_type: Literal["l2", "cosine", "dot"] = "l2"
|
||||||
@@ -261,6 +268,7 @@ class HnswPq:
|
|||||||
sample_rate: int = 256
|
sample_rate: int = 256
|
||||||
m: int = 20
|
m: int = 20
|
||||||
ef_construction: int = 300
|
ef_construction: int = 300
|
||||||
|
target_partition_size: Optional[int] = None
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
@@ -351,6 +359,12 @@ class HnswSq:
|
|||||||
This value should be set to a value that is not less than `ef` in the search
|
This value should be set to a value that is not less than `ef` in the search
|
||||||
phase.
|
phase.
|
||||||
|
|
||||||
|
target_partition_size, default is 1,048,576
|
||||||
|
|
||||||
|
The target size of each partition.
|
||||||
|
|
||||||
|
This value controls the tradeoff between search performance and accuracy.
|
||||||
|
faster search but less accurate results as higher value.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
distance_type: Literal["l2", "cosine", "dot"] = "l2"
|
distance_type: Literal["l2", "cosine", "dot"] = "l2"
|
||||||
@@ -359,6 +373,7 @@ class HnswSq:
|
|||||||
sample_rate: int = 256
|
sample_rate: int = 256
|
||||||
m: int = 20
|
m: int = 20
|
||||||
ef_construction: int = 300
|
ef_construction: int = 300
|
||||||
|
target_partition_size: Optional[int] = None
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
@@ -444,12 +459,20 @@ class IvfFlat:
|
|||||||
cases the default should be sufficient.
|
cases the default should be sufficient.
|
||||||
|
|
||||||
The default value is 256.
|
The default value is 256.
|
||||||
|
|
||||||
|
target_partition_size, default is 8192
|
||||||
|
|
||||||
|
The target size of each partition.
|
||||||
|
|
||||||
|
This value controls the tradeoff between search performance and accuracy.
|
||||||
|
faster search but less accurate results as higher value.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
distance_type: Literal["l2", "cosine", "dot", "hamming"] = "l2"
|
distance_type: Literal["l2", "cosine", "dot", "hamming"] = "l2"
|
||||||
num_partitions: Optional[int] = None
|
num_partitions: Optional[int] = None
|
||||||
max_iterations: int = 50
|
max_iterations: int = 50
|
||||||
sample_rate: int = 256
|
sample_rate: int = 256
|
||||||
|
target_partition_size: Optional[int] = None
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
@@ -564,6 +587,13 @@ class IvfPq:
|
|||||||
cases the default should be sufficient.
|
cases the default should be sufficient.
|
||||||
|
|
||||||
The default value is 256.
|
The default value is 256.
|
||||||
|
|
||||||
|
target_partition_size, default is 8192
|
||||||
|
|
||||||
|
The target size of each partition.
|
||||||
|
|
||||||
|
This value controls the tradeoff between search performance and accuracy.
|
||||||
|
faster search but less accurate results as higher value.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
distance_type: Literal["l2", "cosine", "dot"] = "l2"
|
distance_type: Literal["l2", "cosine", "dot"] = "l2"
|
||||||
@@ -572,6 +602,7 @@ class IvfPq:
|
|||||||
num_bits: int = 8
|
num_bits: int = 8
|
||||||
max_iterations: int = 50
|
max_iterations: int = 50
|
||||||
sample_rate: int = 256
|
sample_rate: int = 256
|
||||||
|
target_partition_size: Optional[int] = None
|
||||||
|
|
||||||
|
|
||||||
__all__ = [
|
__all__ = [
|
||||||
|
|||||||
@@ -33,6 +33,7 @@ class LanceMergeInsertBuilder(object):
|
|||||||
self._when_not_matched_by_source_delete = False
|
self._when_not_matched_by_source_delete = False
|
||||||
self._when_not_matched_by_source_condition = None
|
self._when_not_matched_by_source_condition = None
|
||||||
self._timeout = None
|
self._timeout = None
|
||||||
|
self._use_index = True
|
||||||
|
|
||||||
def when_matched_update_all(
|
def when_matched_update_all(
|
||||||
self, *, where: Optional[str] = None
|
self, *, where: Optional[str] = None
|
||||||
@@ -78,6 +79,23 @@ class LanceMergeInsertBuilder(object):
|
|||||||
self._when_not_matched_by_source_condition = condition
|
self._when_not_matched_by_source_condition = condition
|
||||||
return self
|
return self
|
||||||
|
|
||||||
|
def use_index(self, use_index: bool) -> LanceMergeInsertBuilder:
|
||||||
|
"""
|
||||||
|
Controls whether to use indexes for the merge operation.
|
||||||
|
|
||||||
|
When set to `True` (the default), the operation will use an index if available
|
||||||
|
on the join key for improved performance. When set to `False`, it forces a full
|
||||||
|
table scan even if an index exists. This can be useful for benchmarking or when
|
||||||
|
the query optimizer chooses a suboptimal path.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
use_index: bool
|
||||||
|
Whether to use indices for the merge operation. Defaults to `True`.
|
||||||
|
"""
|
||||||
|
self._use_index = use_index
|
||||||
|
return self
|
||||||
|
|
||||||
def execute(
|
def execute(
|
||||||
self,
|
self,
|
||||||
new_data: DATA,
|
new_data: DATA,
|
||||||
|
|||||||
406
python/python/lancedb/namespace.py
Normal file
406
python/python/lancedb/namespace.py
Normal file
@@ -0,0 +1,406 @@
|
|||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||||
|
|
||||||
|
"""
|
||||||
|
LanceDB Namespace integration module.
|
||||||
|
|
||||||
|
This module provides integration with lance_namespace for managing tables
|
||||||
|
through a namespace abstraction.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from typing import Dict, Iterable, List, Optional, Union
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
|
if sys.version_info >= (3, 12):
|
||||||
|
from typing import override
|
||||||
|
else:
|
||||||
|
from overrides import override
|
||||||
|
|
||||||
|
from lancedb.db import DBConnection
|
||||||
|
from lancedb.table import LanceTable, Table
|
||||||
|
from lancedb.util import validate_table_name
|
||||||
|
from lancedb.common import validate_schema
|
||||||
|
from lancedb.table import sanitize_create_table
|
||||||
|
|
||||||
|
from lance_namespace import LanceNamespace, connect as namespace_connect
|
||||||
|
from lance_namespace_urllib3_client.models import (
|
||||||
|
ListTablesRequest,
|
||||||
|
DescribeTableRequest,
|
||||||
|
CreateTableRequest,
|
||||||
|
DropTableRequest,
|
||||||
|
ListNamespacesRequest,
|
||||||
|
CreateNamespaceRequest,
|
||||||
|
DropNamespaceRequest,
|
||||||
|
JsonArrowSchema,
|
||||||
|
JsonArrowField,
|
||||||
|
JsonArrowDataType,
|
||||||
|
)
|
||||||
|
|
||||||
|
import pyarrow as pa
|
||||||
|
from datetime import timedelta
|
||||||
|
from lancedb.pydantic import LanceModel
|
||||||
|
from lancedb.common import DATA
|
||||||
|
from lancedb.embeddings import EmbeddingFunctionConfig
|
||||||
|
from ._lancedb import Session
|
||||||
|
|
||||||
|
|
||||||
|
def _convert_pyarrow_type_to_json(arrow_type: pa.DataType) -> JsonArrowDataType:
|
||||||
|
"""Convert PyArrow DataType to JsonArrowDataType."""
|
||||||
|
if pa.types.is_null(arrow_type):
|
||||||
|
type_name = "null"
|
||||||
|
elif pa.types.is_boolean(arrow_type):
|
||||||
|
type_name = "bool"
|
||||||
|
elif pa.types.is_int8(arrow_type):
|
||||||
|
type_name = "int8"
|
||||||
|
elif pa.types.is_uint8(arrow_type):
|
||||||
|
type_name = "uint8"
|
||||||
|
elif pa.types.is_int16(arrow_type):
|
||||||
|
type_name = "int16"
|
||||||
|
elif pa.types.is_uint16(arrow_type):
|
||||||
|
type_name = "uint16"
|
||||||
|
elif pa.types.is_int32(arrow_type):
|
||||||
|
type_name = "int32"
|
||||||
|
elif pa.types.is_uint32(arrow_type):
|
||||||
|
type_name = "uint32"
|
||||||
|
elif pa.types.is_int64(arrow_type):
|
||||||
|
type_name = "int64"
|
||||||
|
elif pa.types.is_uint64(arrow_type):
|
||||||
|
type_name = "uint64"
|
||||||
|
elif pa.types.is_float32(arrow_type):
|
||||||
|
type_name = "float32"
|
||||||
|
elif pa.types.is_float64(arrow_type):
|
||||||
|
type_name = "float64"
|
||||||
|
elif pa.types.is_string(arrow_type):
|
||||||
|
type_name = "utf8"
|
||||||
|
elif pa.types.is_binary(arrow_type):
|
||||||
|
type_name = "binary"
|
||||||
|
elif pa.types.is_list(arrow_type):
|
||||||
|
# For list types, we need more complex handling
|
||||||
|
type_name = "list"
|
||||||
|
elif pa.types.is_fixed_size_list(arrow_type):
|
||||||
|
type_name = "fixed_size_list"
|
||||||
|
else:
|
||||||
|
# Default to string representation for unsupported types
|
||||||
|
type_name = str(arrow_type)
|
||||||
|
|
||||||
|
return JsonArrowDataType(type=type_name)
|
||||||
|
|
||||||
|
|
||||||
|
def _convert_pyarrow_schema_to_json(schema: pa.Schema) -> JsonArrowSchema:
|
||||||
|
"""Convert PyArrow Schema to JsonArrowSchema."""
|
||||||
|
fields = []
|
||||||
|
for field in schema:
|
||||||
|
json_field = JsonArrowField(
|
||||||
|
name=field.name,
|
||||||
|
type=_convert_pyarrow_type_to_json(field.type),
|
||||||
|
nullable=field.nullable,
|
||||||
|
metadata=field.metadata,
|
||||||
|
)
|
||||||
|
fields.append(json_field)
|
||||||
|
|
||||||
|
return JsonArrowSchema(fields=fields, metadata=schema.metadata)
|
||||||
|
|
||||||
|
|
||||||
|
class LanceNamespaceDBConnection(DBConnection):
|
||||||
|
"""
|
||||||
|
A LanceDB connection that uses a namespace for table management.
|
||||||
|
|
||||||
|
This connection delegates table URI resolution to a lance_namespace instance,
|
||||||
|
while using the standard LanceTable for actual table operations.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
namespace: LanceNamespace,
|
||||||
|
*,
|
||||||
|
read_consistency_interval: Optional[timedelta] = None,
|
||||||
|
storage_options: Optional[Dict[str, str]] = None,
|
||||||
|
session: Optional[Session] = None,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Initialize a namespace-based LanceDB connection.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
namespace : LanceNamespace
|
||||||
|
The namespace instance to use for table management
|
||||||
|
read_consistency_interval : Optional[timedelta]
|
||||||
|
The interval at which to check for updates to the table from other
|
||||||
|
processes. If None, then consistency is not checked.
|
||||||
|
storage_options : Optional[Dict[str, str]]
|
||||||
|
Additional options for the storage backend
|
||||||
|
session : Optional[Session]
|
||||||
|
A session to use for this connection
|
||||||
|
"""
|
||||||
|
self._ns = namespace
|
||||||
|
self.read_consistency_interval = read_consistency_interval
|
||||||
|
self.storage_options = storage_options or {}
|
||||||
|
self.session = session
|
||||||
|
|
||||||
|
@override
|
||||||
|
def table_names(
|
||||||
|
self,
|
||||||
|
page_token: Optional[str] = None,
|
||||||
|
limit: int = 10,
|
||||||
|
*,
|
||||||
|
namespace: List[str] = [],
|
||||||
|
) -> Iterable[str]:
|
||||||
|
request = ListTablesRequest(id=namespace, page_token=page_token, limit=limit)
|
||||||
|
response = self._ns.list_tables(request)
|
||||||
|
return response.tables if response.tables else []
|
||||||
|
|
||||||
|
@override
|
||||||
|
def create_table(
|
||||||
|
self,
|
||||||
|
name: str,
|
||||||
|
data: Optional[DATA] = None,
|
||||||
|
schema: Optional[Union[pa.Schema, LanceModel]] = None,
|
||||||
|
mode: str = "create",
|
||||||
|
exist_ok: bool = False,
|
||||||
|
on_bad_vectors: str = "error",
|
||||||
|
fill_value: float = 0.0,
|
||||||
|
embedding_functions: Optional[List[EmbeddingFunctionConfig]] = None,
|
||||||
|
*,
|
||||||
|
namespace: List[str] = [],
|
||||||
|
storage_options: Optional[Dict[str, str]] = None,
|
||||||
|
data_storage_version: Optional[str] = None,
|
||||||
|
enable_v2_manifest_paths: Optional[bool] = None,
|
||||||
|
) -> Table:
|
||||||
|
if mode.lower() not in ["create", "overwrite"]:
|
||||||
|
raise ValueError("mode must be either 'create' or 'overwrite'")
|
||||||
|
validate_table_name(name)
|
||||||
|
|
||||||
|
# TODO: support passing data
|
||||||
|
if data is not None:
|
||||||
|
raise ValueError(
|
||||||
|
"create_table currently only supports creating empty tables (data=None)"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Prepare schema
|
||||||
|
metadata = None
|
||||||
|
if embedding_functions is not None:
|
||||||
|
from lancedb.embeddings.registry import EmbeddingFunctionRegistry
|
||||||
|
|
||||||
|
registry = EmbeddingFunctionRegistry.get_instance()
|
||||||
|
metadata = registry.get_table_metadata(embedding_functions)
|
||||||
|
|
||||||
|
data, schema = sanitize_create_table(
|
||||||
|
data, schema, metadata, on_bad_vectors, fill_value
|
||||||
|
)
|
||||||
|
validate_schema(schema)
|
||||||
|
|
||||||
|
# Convert PyArrow schema to JsonArrowSchema
|
||||||
|
json_schema = _convert_pyarrow_schema_to_json(schema)
|
||||||
|
|
||||||
|
# Create table request with namespace
|
||||||
|
table_id = namespace + [name]
|
||||||
|
request = CreateTableRequest(id=table_id, var_schema=json_schema)
|
||||||
|
|
||||||
|
# Create empty Arrow IPC stream bytes
|
||||||
|
import pyarrow.ipc as ipc
|
||||||
|
import io
|
||||||
|
|
||||||
|
empty_table = pa.Table.from_arrays(
|
||||||
|
[pa.array([], type=field.type) for field in schema], schema=schema
|
||||||
|
)
|
||||||
|
buffer = io.BytesIO()
|
||||||
|
with ipc.new_stream(buffer, schema) as writer:
|
||||||
|
writer.write_table(empty_table)
|
||||||
|
request_data = buffer.getvalue()
|
||||||
|
|
||||||
|
self._ns.create_table(request, request_data)
|
||||||
|
return self.open_table(
|
||||||
|
name, namespace=namespace, storage_options=storage_options
|
||||||
|
)
|
||||||
|
|
||||||
|
@override
|
||||||
|
def open_table(
|
||||||
|
self,
|
||||||
|
name: str,
|
||||||
|
*,
|
||||||
|
namespace: List[str] = [],
|
||||||
|
storage_options: Optional[Dict[str, str]] = None,
|
||||||
|
index_cache_size: Optional[int] = None,
|
||||||
|
) -> Table:
|
||||||
|
table_id = namespace + [name]
|
||||||
|
request = DescribeTableRequest(id=table_id)
|
||||||
|
response = self._ns.describe_table(request)
|
||||||
|
|
||||||
|
merged_storage_options = dict()
|
||||||
|
if storage_options:
|
||||||
|
merged_storage_options.update(storage_options)
|
||||||
|
if response.storage_options:
|
||||||
|
merged_storage_options.update(response.storage_options)
|
||||||
|
|
||||||
|
return self._lance_table_from_uri(
|
||||||
|
response.location,
|
||||||
|
storage_options=merged_storage_options,
|
||||||
|
index_cache_size=index_cache_size,
|
||||||
|
)
|
||||||
|
|
||||||
|
@override
|
||||||
|
def drop_table(self, name: str, namespace: List[str] = []):
|
||||||
|
# Use namespace drop_table directly
|
||||||
|
table_id = namespace + [name]
|
||||||
|
request = DropTableRequest(id=table_id)
|
||||||
|
self._ns.drop_table(request)
|
||||||
|
|
||||||
|
@override
|
||||||
|
def rename_table(
|
||||||
|
self,
|
||||||
|
cur_name: str,
|
||||||
|
new_name: str,
|
||||||
|
cur_namespace: List[str] = [],
|
||||||
|
new_namespace: List[str] = [],
|
||||||
|
):
|
||||||
|
raise NotImplementedError(
|
||||||
|
"rename_table is not supported for namespace connections"
|
||||||
|
)
|
||||||
|
|
||||||
|
@override
|
||||||
|
def drop_database(self):
|
||||||
|
raise NotImplementedError(
|
||||||
|
"drop_database is deprecated, use drop_all_tables instead"
|
||||||
|
)
|
||||||
|
|
||||||
|
@override
|
||||||
|
def drop_all_tables(self, namespace: List[str] = []):
|
||||||
|
for table_name in self.table_names(namespace=namespace):
|
||||||
|
self.drop_table(table_name, namespace=namespace)
|
||||||
|
|
||||||
|
@override
|
||||||
|
def list_namespaces(
|
||||||
|
self,
|
||||||
|
namespace: List[str] = [],
|
||||||
|
page_token: Optional[str] = None,
|
||||||
|
limit: int = 10,
|
||||||
|
) -> Iterable[str]:
|
||||||
|
"""
|
||||||
|
List child namespaces under the given namespace.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
namespace : Optional[List[str]]
|
||||||
|
The parent namespace to list children from.
|
||||||
|
If None, lists root-level namespaces.
|
||||||
|
page_token : Optional[str]
|
||||||
|
Pagination token for listing results.
|
||||||
|
limit : int
|
||||||
|
Maximum number of namespaces to return.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
Iterable[str]
|
||||||
|
Names of child namespaces.
|
||||||
|
"""
|
||||||
|
request = ListNamespacesRequest(
|
||||||
|
id=namespace, page_token=page_token, limit=limit
|
||||||
|
)
|
||||||
|
response = self._ns.list_namespaces(request)
|
||||||
|
return response.namespaces if response.namespaces else []
|
||||||
|
|
||||||
|
@override
|
||||||
|
def create_namespace(self, namespace: List[str]) -> None:
|
||||||
|
"""
|
||||||
|
Create a new namespace.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
namespace : List[str]
|
||||||
|
The namespace path to create.
|
||||||
|
"""
|
||||||
|
request = CreateNamespaceRequest(id=namespace)
|
||||||
|
self._ns.create_namespace(request)
|
||||||
|
|
||||||
|
@override
|
||||||
|
def drop_namespace(self, namespace: List[str]) -> None:
|
||||||
|
"""
|
||||||
|
Drop a namespace.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
namespace : List[str]
|
||||||
|
The namespace path to drop.
|
||||||
|
"""
|
||||||
|
request = DropNamespaceRequest(id=namespace)
|
||||||
|
self._ns.drop_namespace(request)
|
||||||
|
|
||||||
|
def _lance_table_from_uri(
|
||||||
|
self,
|
||||||
|
table_uri: str,
|
||||||
|
*,
|
||||||
|
storage_options: Optional[Dict[str, str]] = None,
|
||||||
|
index_cache_size: Optional[int] = None,
|
||||||
|
) -> LanceTable:
|
||||||
|
# Extract the base path and table name from the URI
|
||||||
|
if table_uri.endswith(".lance"):
|
||||||
|
base_path = os.path.dirname(table_uri)
|
||||||
|
table_name = os.path.basename(table_uri)[:-6] # Remove .lance
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Invalid table URI: {table_uri}")
|
||||||
|
|
||||||
|
from lancedb.db import LanceDBConnection
|
||||||
|
|
||||||
|
temp_conn = LanceDBConnection(
|
||||||
|
base_path,
|
||||||
|
read_consistency_interval=self.read_consistency_interval,
|
||||||
|
storage_options={**self.storage_options, **(storage_options or {})},
|
||||||
|
session=self.session,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Open the table using the temporary connection
|
||||||
|
return LanceTable.open(
|
||||||
|
temp_conn,
|
||||||
|
table_name,
|
||||||
|
storage_options=storage_options,
|
||||||
|
index_cache_size=index_cache_size,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def connect_namespace(
|
||||||
|
impl: str,
|
||||||
|
properties: Dict[str, str],
|
||||||
|
*,
|
||||||
|
read_consistency_interval: Optional[timedelta] = None,
|
||||||
|
storage_options: Optional[Dict[str, str]] = None,
|
||||||
|
session: Optional[Session] = None,
|
||||||
|
) -> LanceNamespaceDBConnection:
|
||||||
|
"""
|
||||||
|
Connect to a LanceDB database through a namespace.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
impl : str
|
||||||
|
The namespace implementation to use. For examples:
|
||||||
|
- "dir" for DirectoryNamespace
|
||||||
|
- "rest" for REST-based namespace
|
||||||
|
- Full module path for custom implementations
|
||||||
|
properties : Dict[str, str]
|
||||||
|
Configuration properties for the namespace implementation.
|
||||||
|
Different namespace implemenation has different config properties.
|
||||||
|
For example, use DirectoryNamespace with {"root": "/path/to/directory"}
|
||||||
|
read_consistency_interval : Optional[timedelta]
|
||||||
|
The interval at which to check for updates to the table from other
|
||||||
|
processes. If None, then consistency is not checked.
|
||||||
|
storage_options : Optional[Dict[str, str]]
|
||||||
|
Additional options for the storage backend
|
||||||
|
session : Optional[Session]
|
||||||
|
A session to use for this connection
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
LanceNamespaceDBConnection
|
||||||
|
A namespace-based connection to LanceDB
|
||||||
|
"""
|
||||||
|
namespace = namespace_connect(impl, properties)
|
||||||
|
|
||||||
|
# Return the namespace-based connection
|
||||||
|
return LanceNamespaceDBConnection(
|
||||||
|
namespace,
|
||||||
|
read_consistency_interval=read_consistency_interval,
|
||||||
|
storage_options=storage_options,
|
||||||
|
session=session,
|
||||||
|
)
|
||||||
@@ -943,20 +943,22 @@ class LanceQueryBuilder(ABC):
|
|||||||
>>> query = [100, 100]
|
>>> query = [100, 100]
|
||||||
>>> plan = table.search(query).analyze_plan()
|
>>> plan = table.search(query).analyze_plan()
|
||||||
>>> print(plan) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
|
>>> print(plan) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
|
||||||
AnalyzeExec verbose=true, metrics=[]
|
AnalyzeExec verbose=true, metrics=[], cumulative_cpu=...
|
||||||
TracedExec, metrics=[]
|
TracedExec, metrics=[], cumulative_cpu=...
|
||||||
ProjectionExec: expr=[...], metrics=[...]
|
ProjectionExec: expr=[...], metrics=[...], cumulative_cpu=...
|
||||||
GlobalLimitExec: skip=0, fetch=10, metrics=[...]
|
GlobalLimitExec: skip=0, fetch=10, metrics=[...], cumulative_cpu=...
|
||||||
FilterExec: _distance@2 IS NOT NULL,
|
FilterExec: _distance@2 IS NOT NULL,
|
||||||
metrics=[output_rows=..., elapsed_compute=...]
|
metrics=[output_rows=..., elapsed_compute=...], cumulative_cpu=...
|
||||||
SortExec: TopK(fetch=10), expr=[...],
|
SortExec: TopK(fetch=10), expr=[...],
|
||||||
preserve_partitioning=[...],
|
preserve_partitioning=[...],
|
||||||
metrics=[output_rows=..., elapsed_compute=..., row_replacements=...]
|
metrics=[output_rows=..., elapsed_compute=..., row_replacements=...],
|
||||||
|
cumulative_cpu=...
|
||||||
KNNVectorDistance: metric=l2,
|
KNNVectorDistance: metric=l2,
|
||||||
metrics=[output_rows=..., elapsed_compute=..., output_batches=...]
|
metrics=[output_rows=..., elapsed_compute=..., output_batches=...],
|
||||||
|
cumulative_cpu=...
|
||||||
LanceRead: uri=..., projection=[vector], ...
|
LanceRead: uri=..., projection=[vector], ...
|
||||||
metrics=[output_rows=..., elapsed_compute=...,
|
metrics=[output_rows=..., elapsed_compute=...,
|
||||||
bytes_read=..., iops=..., requests=...]
|
bytes_read=..., iops=..., requests=...], cumulative_cpu=...
|
||||||
|
|
||||||
Returns
|
Returns
|
||||||
-------
|
-------
|
||||||
|
|||||||
@@ -8,7 +8,15 @@ from typing import List, Optional
|
|||||||
|
|
||||||
from lancedb import __version__
|
from lancedb import __version__
|
||||||
|
|
||||||
__all__ = ["TimeoutConfig", "RetryConfig", "ClientConfig"]
|
from .header import HeaderProvider
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
"TimeoutConfig",
|
||||||
|
"RetryConfig",
|
||||||
|
"TlsConfig",
|
||||||
|
"ClientConfig",
|
||||||
|
"HeaderProvider",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
@@ -112,15 +120,43 @@ class RetryConfig:
|
|||||||
statuses: Optional[List[int]] = None
|
statuses: Optional[List[int]] = None
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class TlsConfig:
|
||||||
|
"""TLS/mTLS configuration for the remote HTTP client.
|
||||||
|
|
||||||
|
Attributes
|
||||||
|
----------
|
||||||
|
cert_file: Optional[str]
|
||||||
|
Path to the client certificate file (PEM format) for mTLS authentication.
|
||||||
|
key_file: Optional[str]
|
||||||
|
Path to the client private key file (PEM format) for mTLS authentication.
|
||||||
|
ssl_ca_cert: Optional[str]
|
||||||
|
Path to the CA certificate file (PEM format) for server verification.
|
||||||
|
assert_hostname: bool
|
||||||
|
Whether to verify the hostname in the server's certificate. Default is True.
|
||||||
|
Set to False to disable hostname verification (use with caution).
|
||||||
|
"""
|
||||||
|
|
||||||
|
cert_file: Optional[str] = None
|
||||||
|
key_file: Optional[str] = None
|
||||||
|
ssl_ca_cert: Optional[str] = None
|
||||||
|
assert_hostname: bool = True
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class ClientConfig:
|
class ClientConfig:
|
||||||
user_agent: str = f"LanceDB-Python-Client/{__version__}"
|
user_agent: str = f"LanceDB-Python-Client/{__version__}"
|
||||||
retry_config: RetryConfig = field(default_factory=RetryConfig)
|
retry_config: RetryConfig = field(default_factory=RetryConfig)
|
||||||
timeout_config: Optional[TimeoutConfig] = field(default_factory=TimeoutConfig)
|
timeout_config: Optional[TimeoutConfig] = field(default_factory=TimeoutConfig)
|
||||||
extra_headers: Optional[dict] = None
|
extra_headers: Optional[dict] = None
|
||||||
|
id_delimiter: Optional[str] = None
|
||||||
|
tls_config: Optional[TlsConfig] = None
|
||||||
|
header_provider: Optional["HeaderProvider"] = None
|
||||||
|
|
||||||
def __post_init__(self):
|
def __post_init__(self):
|
||||||
if isinstance(self.retry_config, dict):
|
if isinstance(self.retry_config, dict):
|
||||||
self.retry_config = RetryConfig(**self.retry_config)
|
self.retry_config = RetryConfig(**self.retry_config)
|
||||||
if isinstance(self.timeout_config, dict):
|
if isinstance(self.timeout_config, dict):
|
||||||
self.timeout_config = TimeoutConfig(**self.timeout_config)
|
self.timeout_config = TimeoutConfig(**self.timeout_config)
|
||||||
|
if isinstance(self.tls_config, dict):
|
||||||
|
self.tls_config = TlsConfig(**self.tls_config)
|
||||||
|
|||||||
@@ -5,15 +5,20 @@
|
|||||||
from datetime import timedelta
|
from datetime import timedelta
|
||||||
import logging
|
import logging
|
||||||
from concurrent.futures import ThreadPoolExecutor
|
from concurrent.futures import ThreadPoolExecutor
|
||||||
|
import sys
|
||||||
from typing import Any, Dict, Iterable, List, Optional, Union
|
from typing import Any, Dict, Iterable, List, Optional, Union
|
||||||
from urllib.parse import urlparse
|
from urllib.parse import urlparse
|
||||||
import warnings
|
import warnings
|
||||||
|
|
||||||
|
if sys.version_info >= (3, 12):
|
||||||
|
from typing import override
|
||||||
|
else:
|
||||||
|
from overrides import override
|
||||||
|
|
||||||
# Remove this import to fix circular dependency
|
# Remove this import to fix circular dependency
|
||||||
# from lancedb import connect_async
|
# from lancedb import connect_async
|
||||||
from lancedb.remote import ClientConfig
|
from lancedb.remote import ClientConfig
|
||||||
import pyarrow as pa
|
import pyarrow as pa
|
||||||
from overrides import override
|
|
||||||
|
|
||||||
from ..common import DATA
|
from ..common import DATA
|
||||||
from ..db import DBConnection, LOOP
|
from ..db import DBConnection, LOOP
|
||||||
@@ -96,14 +101,73 @@ class RemoteDBConnection(DBConnection):
|
|||||||
def __repr__(self) -> str:
|
def __repr__(self) -> str:
|
||||||
return f"RemoteConnect(name={self.db_name})"
|
return f"RemoteConnect(name={self.db_name})"
|
||||||
|
|
||||||
|
@override
|
||||||
|
def list_namespaces(
|
||||||
|
self,
|
||||||
|
namespace: List[str] = [],
|
||||||
|
page_token: Optional[str] = None,
|
||||||
|
limit: int = 10,
|
||||||
|
) -> Iterable[str]:
|
||||||
|
"""List immediate child namespace names in the given namespace.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
namespace: List[str], optional
|
||||||
|
The parent namespace to list namespaces in.
|
||||||
|
None or empty list represents root namespace.
|
||||||
|
page_token: str, optional
|
||||||
|
The token to use for pagination. If not present, start from the beginning.
|
||||||
|
limit: int, default 10
|
||||||
|
The size of the page to return.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
Iterable of str
|
||||||
|
List of immediate child namespace names
|
||||||
|
"""
|
||||||
|
return LOOP.run(
|
||||||
|
self._conn.list_namespaces(
|
||||||
|
namespace=namespace, page_token=page_token, limit=limit
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
@override
|
||||||
|
def create_namespace(self, namespace: List[str]) -> None:
|
||||||
|
"""Create a new namespace.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
namespace: List[str]
|
||||||
|
The namespace identifier to create.
|
||||||
|
"""
|
||||||
|
LOOP.run(self._conn.create_namespace(namespace=namespace))
|
||||||
|
|
||||||
|
@override
|
||||||
|
def drop_namespace(self, namespace: List[str]) -> None:
|
||||||
|
"""Drop a namespace.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
namespace: List[str]
|
||||||
|
The namespace identifier to drop.
|
||||||
|
"""
|
||||||
|
return LOOP.run(self._conn.drop_namespace(namespace=namespace))
|
||||||
|
|
||||||
@override
|
@override
|
||||||
def table_names(
|
def table_names(
|
||||||
self, page_token: Optional[str] = None, limit: int = 10
|
self,
|
||||||
|
page_token: Optional[str] = None,
|
||||||
|
limit: int = 10,
|
||||||
|
*,
|
||||||
|
namespace: List[str] = [],
|
||||||
) -> Iterable[str]:
|
) -> Iterable[str]:
|
||||||
"""List the names of all tables in the database.
|
"""List the names of all tables in the database.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
|
namespace: List[str], default []
|
||||||
|
The namespace to list tables in.
|
||||||
|
Empty list represents root namespace.
|
||||||
page_token: str
|
page_token: str
|
||||||
The last token to start the new page.
|
The last token to start the new page.
|
||||||
limit: int, default 10
|
limit: int, default 10
|
||||||
@@ -113,13 +177,18 @@ class RemoteDBConnection(DBConnection):
|
|||||||
-------
|
-------
|
||||||
An iterator of table names.
|
An iterator of table names.
|
||||||
"""
|
"""
|
||||||
return LOOP.run(self._conn.table_names(start_after=page_token, limit=limit))
|
return LOOP.run(
|
||||||
|
self._conn.table_names(
|
||||||
|
namespace=namespace, start_after=page_token, limit=limit
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
@override
|
@override
|
||||||
def open_table(
|
def open_table(
|
||||||
self,
|
self,
|
||||||
name: str,
|
name: str,
|
||||||
*,
|
*,
|
||||||
|
namespace: List[str] = [],
|
||||||
storage_options: Optional[Dict[str, str]] = None,
|
storage_options: Optional[Dict[str, str]] = None,
|
||||||
index_cache_size: Optional[int] = None,
|
index_cache_size: Optional[int] = None,
|
||||||
) -> Table:
|
) -> Table:
|
||||||
@@ -129,6 +198,9 @@ class RemoteDBConnection(DBConnection):
|
|||||||
----------
|
----------
|
||||||
name: str
|
name: str
|
||||||
The name of the table.
|
The name of the table.
|
||||||
|
namespace: List[str], optional
|
||||||
|
The namespace to open the table from.
|
||||||
|
None or empty list represents root namespace.
|
||||||
|
|
||||||
Returns
|
Returns
|
||||||
-------
|
-------
|
||||||
@@ -142,7 +214,54 @@ class RemoteDBConnection(DBConnection):
|
|||||||
" (there is no local cache to configure)"
|
" (there is no local cache to configure)"
|
||||||
)
|
)
|
||||||
|
|
||||||
table = LOOP.run(self._conn.open_table(name))
|
table = LOOP.run(self._conn.open_table(name, namespace=namespace))
|
||||||
|
return RemoteTable(table, self.db_name)
|
||||||
|
|
||||||
|
def clone_table(
|
||||||
|
self,
|
||||||
|
target_table_name: str,
|
||||||
|
source_uri: str,
|
||||||
|
*,
|
||||||
|
target_namespace: List[str] = [],
|
||||||
|
source_version: Optional[int] = None,
|
||||||
|
source_tag: Optional[str] = None,
|
||||||
|
is_shallow: bool = True,
|
||||||
|
) -> Table:
|
||||||
|
"""Clone a table from a source table.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
target_table_name: str
|
||||||
|
The name of the target table to create.
|
||||||
|
source_uri: str
|
||||||
|
The URI of the source table to clone from.
|
||||||
|
target_namespace: List[str], optional
|
||||||
|
The namespace for the target table.
|
||||||
|
None or empty list represents root namespace.
|
||||||
|
source_version: int, optional
|
||||||
|
The version of the source table to clone.
|
||||||
|
source_tag: str, optional
|
||||||
|
The tag of the source table to clone.
|
||||||
|
is_shallow: bool, default True
|
||||||
|
Whether to perform a shallow clone (True) or deep clone (False).
|
||||||
|
Currently only shallow clone is supported.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
A RemoteTable object representing the cloned table.
|
||||||
|
"""
|
||||||
|
from .table import RemoteTable
|
||||||
|
|
||||||
|
table = LOOP.run(
|
||||||
|
self._conn.clone_table(
|
||||||
|
target_table_name,
|
||||||
|
source_uri,
|
||||||
|
target_namespace=target_namespace,
|
||||||
|
source_version=source_version,
|
||||||
|
source_tag=source_tag,
|
||||||
|
is_shallow=is_shallow,
|
||||||
|
)
|
||||||
|
)
|
||||||
return RemoteTable(table, self.db_name)
|
return RemoteTable(table, self.db_name)
|
||||||
|
|
||||||
@override
|
@override
|
||||||
@@ -155,6 +274,8 @@ class RemoteDBConnection(DBConnection):
|
|||||||
fill_value: float = 0.0,
|
fill_value: float = 0.0,
|
||||||
mode: Optional[str] = None,
|
mode: Optional[str] = None,
|
||||||
embedding_functions: Optional[List[EmbeddingFunctionConfig]] = None,
|
embedding_functions: Optional[List[EmbeddingFunctionConfig]] = None,
|
||||||
|
*,
|
||||||
|
namespace: List[str] = [],
|
||||||
) -> Table:
|
) -> Table:
|
||||||
"""Create a [Table][lancedb.table.Table] in the database.
|
"""Create a [Table][lancedb.table.Table] in the database.
|
||||||
|
|
||||||
@@ -162,6 +283,9 @@ class RemoteDBConnection(DBConnection):
|
|||||||
----------
|
----------
|
||||||
name: str
|
name: str
|
||||||
The name of the table.
|
The name of the table.
|
||||||
|
namespace: List[str], optional
|
||||||
|
The namespace to create the table in.
|
||||||
|
None or empty list represents root namespace.
|
||||||
data: The data to initialize the table, *optional*
|
data: The data to initialize the table, *optional*
|
||||||
User must provide at least one of `data` or `schema`.
|
User must provide at least one of `data` or `schema`.
|
||||||
Acceptable types are:
|
Acceptable types are:
|
||||||
@@ -262,6 +386,7 @@ class RemoteDBConnection(DBConnection):
|
|||||||
self._conn.create_table(
|
self._conn.create_table(
|
||||||
name,
|
name,
|
||||||
data,
|
data,
|
||||||
|
namespace=namespace,
|
||||||
mode=mode,
|
mode=mode,
|
||||||
schema=schema,
|
schema=schema,
|
||||||
on_bad_vectors=on_bad_vectors,
|
on_bad_vectors=on_bad_vectors,
|
||||||
@@ -271,18 +396,27 @@ class RemoteDBConnection(DBConnection):
|
|||||||
return RemoteTable(table, self.db_name)
|
return RemoteTable(table, self.db_name)
|
||||||
|
|
||||||
@override
|
@override
|
||||||
def drop_table(self, name: str):
|
def drop_table(self, name: str, namespace: List[str] = []):
|
||||||
"""Drop a table from the database.
|
"""Drop a table from the database.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
name: str
|
name: str
|
||||||
The name of the table.
|
The name of the table.
|
||||||
|
namespace: List[str], optional
|
||||||
|
The namespace to drop the table from.
|
||||||
|
None or empty list represents root namespace.
|
||||||
"""
|
"""
|
||||||
LOOP.run(self._conn.drop_table(name))
|
LOOP.run(self._conn.drop_table(name, namespace=namespace))
|
||||||
|
|
||||||
@override
|
@override
|
||||||
def rename_table(self, cur_name: str, new_name: str):
|
def rename_table(
|
||||||
|
self,
|
||||||
|
cur_name: str,
|
||||||
|
new_name: str,
|
||||||
|
cur_namespace: List[str] = [],
|
||||||
|
new_namespace: List[str] = [],
|
||||||
|
):
|
||||||
"""Rename a table in the database.
|
"""Rename a table in the database.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
@@ -292,7 +426,14 @@ class RemoteDBConnection(DBConnection):
|
|||||||
new_name: str
|
new_name: str
|
||||||
The new name of the table.
|
The new name of the table.
|
||||||
"""
|
"""
|
||||||
LOOP.run(self._conn.rename_table(cur_name, new_name))
|
LOOP.run(
|
||||||
|
self._conn.rename_table(
|
||||||
|
cur_name,
|
||||||
|
new_name,
|
||||||
|
cur_namespace=cur_namespace,
|
||||||
|
new_namespace=new_namespace,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
async def close(self):
|
async def close(self):
|
||||||
"""Close the connection to the database."""
|
"""Close the connection to the database."""
|
||||||
|
|||||||
180
python/python/lancedb/remote/header.py
Normal file
180
python/python/lancedb/remote/header.py
Normal file
@@ -0,0 +1,180 @@
|
|||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||||
|
|
||||||
|
"""Header providers for LanceDB remote connections.
|
||||||
|
|
||||||
|
This module provides a flexible header management framework for LanceDB remote
|
||||||
|
connections, allowing users to implement custom header strategies for
|
||||||
|
authentication, request tracking, custom metadata, or any other header-based
|
||||||
|
requirements.
|
||||||
|
|
||||||
|
The module includes the HeaderProvider abstract base class and example implementations
|
||||||
|
(StaticHeaderProvider and OAuthProvider) that demonstrate common patterns.
|
||||||
|
|
||||||
|
The HeaderProvider interface is designed to be called before each request to the remote
|
||||||
|
server, enabling dynamic header scenarios where values may need to be
|
||||||
|
refreshed, rotated, or computed on-demand.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from typing import Dict, Optional, Callable, Any
|
||||||
|
import time
|
||||||
|
import threading
|
||||||
|
|
||||||
|
|
||||||
|
class HeaderProvider(ABC):
|
||||||
|
"""Abstract base class for providing custom headers for each request.
|
||||||
|
|
||||||
|
Users can implement this interface to provide dynamic headers for various purposes
|
||||||
|
such as authentication (OAuth tokens, API keys), request tracking (correlation IDs),
|
||||||
|
custom metadata, or any other header-based requirements. The provider is called
|
||||||
|
before each request to ensure fresh header values are always used.
|
||||||
|
|
||||||
|
Error Handling
|
||||||
|
--------------
|
||||||
|
If get_headers() raises an exception, the request will fail. Implementations
|
||||||
|
should handle recoverable errors internally (e.g., retry token refresh) and
|
||||||
|
only raise exceptions for unrecoverable errors.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_headers(self) -> Dict[str, str]:
|
||||||
|
"""Get the latest headers to be added to requests.
|
||||||
|
|
||||||
|
This method is called before each request to the remote LanceDB server.
|
||||||
|
Implementations should return headers that will be merged with existing headers.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
Dict[str, str]
|
||||||
|
Dictionary of header names to values to add to the request.
|
||||||
|
|
||||||
|
Raises
|
||||||
|
------
|
||||||
|
Exception
|
||||||
|
If unable to fetch headers, the exception will be propagated
|
||||||
|
and the request will fail.
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class StaticHeaderProvider(HeaderProvider):
|
||||||
|
"""Example implementation: A simple header provider that returns static headers.
|
||||||
|
|
||||||
|
This is an example implementation showing how to create a HeaderProvider
|
||||||
|
for cases where headers don't change during the session. Users can use this
|
||||||
|
as a reference for implementing their own providers.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
headers : Dict[str, str]
|
||||||
|
Static headers to return for every request.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, headers: Dict[str, str]):
|
||||||
|
"""Initialize with static headers.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
headers : Dict[str, str]
|
||||||
|
Headers to return for every request.
|
||||||
|
"""
|
||||||
|
self._headers = headers.copy()
|
||||||
|
|
||||||
|
def get_headers(self) -> Dict[str, str]:
|
||||||
|
"""Return the static headers.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
Dict[str, str]
|
||||||
|
Copy of the static headers.
|
||||||
|
"""
|
||||||
|
return self._headers.copy()
|
||||||
|
|
||||||
|
|
||||||
|
class OAuthProvider(HeaderProvider):
|
||||||
|
"""Example implementation: OAuth token provider with automatic refresh.
|
||||||
|
|
||||||
|
This is an example implementation showing how to manage OAuth tokens
|
||||||
|
with automatic refresh when they expire. Users can use this as a reference
|
||||||
|
for implementing their own OAuth or token-based authentication providers.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
token_fetcher : Callable[[], Dict[str, Any]]
|
||||||
|
Function that fetches a new token. Should return a dict with
|
||||||
|
'access_token' and optionally 'expires_in' (seconds until expiration).
|
||||||
|
refresh_buffer_seconds : int, optional
|
||||||
|
Number of seconds before expiration to trigger refresh. Default is 300
|
||||||
|
(5 minutes).
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self, token_fetcher: Callable[[], Any], refresh_buffer_seconds: int = 300
|
||||||
|
):
|
||||||
|
"""Initialize the OAuth provider.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
token_fetcher : Callable[[], Any]
|
||||||
|
Function to fetch new tokens. Should return dict with
|
||||||
|
'access_token' and optionally 'expires_in'.
|
||||||
|
refresh_buffer_seconds : int, optional
|
||||||
|
Seconds before expiry to refresh token. Default 300.
|
||||||
|
"""
|
||||||
|
self._token_fetcher = token_fetcher
|
||||||
|
self._refresh_buffer = refresh_buffer_seconds
|
||||||
|
self._current_token: Optional[str] = None
|
||||||
|
self._token_expires_at: Optional[float] = None
|
||||||
|
self._refresh_lock = threading.Lock()
|
||||||
|
|
||||||
|
def _refresh_token_if_needed(self) -> None:
|
||||||
|
"""Refresh the token if it's expired or close to expiring."""
|
||||||
|
with self._refresh_lock:
|
||||||
|
# Check again inside the lock in case another thread refreshed
|
||||||
|
if self._needs_refresh():
|
||||||
|
token_data = self._token_fetcher()
|
||||||
|
|
||||||
|
self._current_token = token_data.get("access_token")
|
||||||
|
if not self._current_token:
|
||||||
|
raise ValueError("Token fetcher did not return 'access_token'")
|
||||||
|
|
||||||
|
# Set expiration if provided
|
||||||
|
expires_in = token_data.get("expires_in")
|
||||||
|
if expires_in:
|
||||||
|
self._token_expires_at = time.time() + expires_in
|
||||||
|
else:
|
||||||
|
# Token doesn't expire or expiration unknown
|
||||||
|
self._token_expires_at = None
|
||||||
|
|
||||||
|
def _needs_refresh(self) -> bool:
|
||||||
|
"""Check if token needs refresh."""
|
||||||
|
if self._current_token is None:
|
||||||
|
return True
|
||||||
|
|
||||||
|
if self._token_expires_at is None:
|
||||||
|
# No expiration info, assume token is valid
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Refresh if we're within the buffer time of expiration
|
||||||
|
return time.time() >= (self._token_expires_at - self._refresh_buffer)
|
||||||
|
|
||||||
|
def get_headers(self) -> Dict[str, str]:
|
||||||
|
"""Get OAuth headers, refreshing token if needed.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
Dict[str, str]
|
||||||
|
Headers with Bearer token authorization.
|
||||||
|
|
||||||
|
Raises
|
||||||
|
------
|
||||||
|
Exception
|
||||||
|
If unable to fetch or refresh token.
|
||||||
|
"""
|
||||||
|
self._refresh_token_if_needed()
|
||||||
|
|
||||||
|
if not self._current_token:
|
||||||
|
raise RuntimeError("Failed to obtain OAuth token")
|
||||||
|
|
||||||
|
return {"Authorization": f"Bearer {self._current_token}"}
|
||||||
@@ -114,7 +114,8 @@ class RemoteTable(Table):
|
|||||||
index_type: Literal["BTREE", "BITMAP", "LABEL_LIST", "scalar"] = "scalar",
|
index_type: Literal["BTREE", "BITMAP", "LABEL_LIST", "scalar"] = "scalar",
|
||||||
*,
|
*,
|
||||||
replace: bool = False,
|
replace: bool = False,
|
||||||
wait_timeout: timedelta = None,
|
wait_timeout: Optional[timedelta] = None,
|
||||||
|
name: Optional[str] = None,
|
||||||
):
|
):
|
||||||
"""Creates a scalar index
|
"""Creates a scalar index
|
||||||
Parameters
|
Parameters
|
||||||
@@ -139,7 +140,11 @@ class RemoteTable(Table):
|
|||||||
|
|
||||||
LOOP.run(
|
LOOP.run(
|
||||||
self._table.create_index(
|
self._table.create_index(
|
||||||
column, config=config, replace=replace, wait_timeout=wait_timeout
|
column,
|
||||||
|
config=config,
|
||||||
|
replace=replace,
|
||||||
|
wait_timeout=wait_timeout,
|
||||||
|
name=name,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -148,7 +153,7 @@ class RemoteTable(Table):
|
|||||||
column: str,
|
column: str,
|
||||||
*,
|
*,
|
||||||
replace: bool = False,
|
replace: bool = False,
|
||||||
wait_timeout: timedelta = None,
|
wait_timeout: Optional[timedelta] = None,
|
||||||
with_position: bool = False,
|
with_position: bool = False,
|
||||||
# tokenizer configs:
|
# tokenizer configs:
|
||||||
base_tokenizer: str = "simple",
|
base_tokenizer: str = "simple",
|
||||||
@@ -161,6 +166,7 @@ class RemoteTable(Table):
|
|||||||
ngram_min_length: int = 3,
|
ngram_min_length: int = 3,
|
||||||
ngram_max_length: int = 3,
|
ngram_max_length: int = 3,
|
||||||
prefix_only: bool = False,
|
prefix_only: bool = False,
|
||||||
|
name: Optional[str] = None,
|
||||||
):
|
):
|
||||||
config = FTS(
|
config = FTS(
|
||||||
with_position=with_position,
|
with_position=with_position,
|
||||||
@@ -177,7 +183,11 @@ class RemoteTable(Table):
|
|||||||
)
|
)
|
||||||
LOOP.run(
|
LOOP.run(
|
||||||
self._table.create_index(
|
self._table.create_index(
|
||||||
column, config=config, replace=replace, wait_timeout=wait_timeout
|
column,
|
||||||
|
config=config,
|
||||||
|
replace=replace,
|
||||||
|
wait_timeout=wait_timeout,
|
||||||
|
name=name,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -194,6 +204,8 @@ class RemoteTable(Table):
|
|||||||
wait_timeout: Optional[timedelta] = None,
|
wait_timeout: Optional[timedelta] = None,
|
||||||
*,
|
*,
|
||||||
num_bits: int = 8,
|
num_bits: int = 8,
|
||||||
|
name: Optional[str] = None,
|
||||||
|
train: bool = True,
|
||||||
):
|
):
|
||||||
"""Create an index on the table.
|
"""Create an index on the table.
|
||||||
Currently, the only parameters that matter are
|
Currently, the only parameters that matter are
|
||||||
@@ -270,7 +282,11 @@ class RemoteTable(Table):
|
|||||||
|
|
||||||
LOOP.run(
|
LOOP.run(
|
||||||
self._table.create_index(
|
self._table.create_index(
|
||||||
vector_column_name, config=config, wait_timeout=wait_timeout
|
vector_column_name,
|
||||||
|
config=config,
|
||||||
|
wait_timeout=wait_timeout,
|
||||||
|
name=name,
|
||||||
|
train=train,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ from .linear_combination import LinearCombinationReranker
|
|||||||
from .openai import OpenaiReranker
|
from .openai import OpenaiReranker
|
||||||
from .jinaai import JinaReranker
|
from .jinaai import JinaReranker
|
||||||
from .rrf import RRFReranker
|
from .rrf import RRFReranker
|
||||||
|
from .mrr import MRRReranker
|
||||||
from .answerdotai import AnswerdotaiRerankers
|
from .answerdotai import AnswerdotaiRerankers
|
||||||
from .voyageai import VoyageAIReranker
|
from .voyageai import VoyageAIReranker
|
||||||
|
|
||||||
@@ -23,4 +24,5 @@ __all__ = [
|
|||||||
"RRFReranker",
|
"RRFReranker",
|
||||||
"AnswerdotaiRerankers",
|
"AnswerdotaiRerankers",
|
||||||
"VoyageAIReranker",
|
"VoyageAIReranker",
|
||||||
|
"MRRReranker",
|
||||||
]
|
]
|
||||||
|
|||||||
169
python/python/lancedb/rerankers/mrr.py
Normal file
169
python/python/lancedb/rerankers/mrr.py
Normal file
@@ -0,0 +1,169 @@
|
|||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||||
|
|
||||||
|
|
||||||
|
from typing import Union, List, TYPE_CHECKING
|
||||||
|
import pyarrow as pa
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
from collections import defaultdict
|
||||||
|
from .base import Reranker
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from ..table import LanceVectorQueryBuilder
|
||||||
|
|
||||||
|
|
||||||
|
class MRRReranker(Reranker):
|
||||||
|
"""
|
||||||
|
Reranks the results using Mean Reciprocal Rank (MRR) algorithm based
|
||||||
|
on the scores of vector and FTS search.
|
||||||
|
Algorithm reference - https://en.wikipedia.org/wiki/Mean_reciprocal_rank
|
||||||
|
|
||||||
|
MRR calculates the average of reciprocal ranks across different search results.
|
||||||
|
For each document, it computes the reciprocal of its rank in each system,
|
||||||
|
then takes the mean of these reciprocal ranks as the final score.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
weight_vector : float, default 0.5
|
||||||
|
Weight for vector search results (0.0 to 1.0)
|
||||||
|
weight_fts : float, default 0.5
|
||||||
|
Weight for FTS search results (0.0 to 1.0)
|
||||||
|
Note: weight_vector + weight_fts should equal 1.0
|
||||||
|
return_score : str, default "relevance"
|
||||||
|
Options are "relevance" or "all"
|
||||||
|
The type of score to return. If "relevance", will return only the relevance
|
||||||
|
score. If "all", will return all scores from the vector and FTS search along
|
||||||
|
with the relevance score.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
weight_vector: float = 0.5,
|
||||||
|
weight_fts: float = 0.5,
|
||||||
|
return_score="relevance",
|
||||||
|
):
|
||||||
|
if not (0.0 <= weight_vector <= 1.0):
|
||||||
|
raise ValueError("weight_vector must be between 0.0 and 1.0")
|
||||||
|
if not (0.0 <= weight_fts <= 1.0):
|
||||||
|
raise ValueError("weight_fts must be between 0.0 and 1.0")
|
||||||
|
if abs(weight_vector + weight_fts - 1.0) > 1e-6:
|
||||||
|
raise ValueError("weight_vector + weight_fts must equal 1.0")
|
||||||
|
|
||||||
|
super().__init__(return_score)
|
||||||
|
self.weight_vector = weight_vector
|
||||||
|
self.weight_fts = weight_fts
|
||||||
|
|
||||||
|
def rerank_hybrid(
|
||||||
|
self,
|
||||||
|
query: str, # noqa: F821
|
||||||
|
vector_results: pa.Table,
|
||||||
|
fts_results: pa.Table,
|
||||||
|
):
|
||||||
|
vector_ids = vector_results["_rowid"].to_pylist() if vector_results else []
|
||||||
|
fts_ids = fts_results["_rowid"].to_pylist() if fts_results else []
|
||||||
|
|
||||||
|
# Maps result_id to list of (type, reciprocal_rank)
|
||||||
|
mrr_score_map = defaultdict(list)
|
||||||
|
|
||||||
|
if vector_ids:
|
||||||
|
for rank, result_id in enumerate(vector_ids, 1):
|
||||||
|
reciprocal_rank = 1.0 / rank
|
||||||
|
mrr_score_map[result_id].append(("vector", reciprocal_rank))
|
||||||
|
|
||||||
|
if fts_ids:
|
||||||
|
for rank, result_id in enumerate(fts_ids, 1):
|
||||||
|
reciprocal_rank = 1.0 / rank
|
||||||
|
mrr_score_map[result_id].append(("fts", reciprocal_rank))
|
||||||
|
|
||||||
|
final_mrr_scores = {}
|
||||||
|
for result_id, scores in mrr_score_map.items():
|
||||||
|
vector_rr = 0.0
|
||||||
|
fts_rr = 0.0
|
||||||
|
|
||||||
|
for score_type, reciprocal_rank in scores:
|
||||||
|
if score_type == "vector":
|
||||||
|
vector_rr = reciprocal_rank
|
||||||
|
elif score_type == "fts":
|
||||||
|
fts_rr = reciprocal_rank
|
||||||
|
|
||||||
|
# If a document doesn't appear, its reciprocal rank is 0
|
||||||
|
weighted_mrr = self.weight_vector * vector_rr + self.weight_fts * fts_rr
|
||||||
|
final_mrr_scores[result_id] = weighted_mrr
|
||||||
|
|
||||||
|
combined_results = self.merge_results(vector_results, fts_results)
|
||||||
|
combined_row_ids = combined_results["_rowid"].to_pylist()
|
||||||
|
relevance_scores = [final_mrr_scores[row_id] for row_id in combined_row_ids]
|
||||||
|
combined_results = combined_results.append_column(
|
||||||
|
"_relevance_score", pa.array(relevance_scores, type=pa.float32())
|
||||||
|
)
|
||||||
|
combined_results = combined_results.sort_by(
|
||||||
|
[("_relevance_score", "descending")]
|
||||||
|
)
|
||||||
|
|
||||||
|
if self.score == "relevance":
|
||||||
|
combined_results = self._keep_relevance_score(combined_results)
|
||||||
|
|
||||||
|
return combined_results
|
||||||
|
|
||||||
|
def rerank_multivector(
|
||||||
|
self,
|
||||||
|
vector_results: Union[List[pa.Table], List["LanceVectorQueryBuilder"]],
|
||||||
|
query: str = None,
|
||||||
|
deduplicate: bool = True, # noqa: F821
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Reranks the results from multiple vector searches using MRR algorithm.
|
||||||
|
Each vector search result is treated as a separate ranking system,
|
||||||
|
and MRR calculates the mean of reciprocal ranks across all systems.
|
||||||
|
This cannot reuse rerank_hybrid because MRR semantics require treating
|
||||||
|
each vector result as a separate ranking system.
|
||||||
|
"""
|
||||||
|
if not all(isinstance(v, type(vector_results[0])) for v in vector_results):
|
||||||
|
raise ValueError(
|
||||||
|
"All elements in vector_results should be of the same type"
|
||||||
|
)
|
||||||
|
|
||||||
|
# avoid circular import
|
||||||
|
if type(vector_results[0]).__name__ == "LanceVectorQueryBuilder":
|
||||||
|
vector_results = [result.to_arrow() for result in vector_results]
|
||||||
|
elif not isinstance(vector_results[0], pa.Table):
|
||||||
|
raise ValueError(
|
||||||
|
"vector_results should be a list of pa.Table or LanceVectorQueryBuilder"
|
||||||
|
)
|
||||||
|
|
||||||
|
if not all("_rowid" in result.column_names for result in vector_results):
|
||||||
|
raise ValueError(
|
||||||
|
"'_rowid' is required for deduplication. \
|
||||||
|
add _rowid to search results like this: \
|
||||||
|
`search().with_row_id(True)`"
|
||||||
|
)
|
||||||
|
|
||||||
|
mrr_score_map = defaultdict(list)
|
||||||
|
|
||||||
|
for result_table in vector_results:
|
||||||
|
result_ids = result_table["_rowid"].to_pylist()
|
||||||
|
for rank, result_id in enumerate(result_ids, 1):
|
||||||
|
reciprocal_rank = 1.0 / rank
|
||||||
|
mrr_score_map[result_id].append(reciprocal_rank)
|
||||||
|
|
||||||
|
final_mrr_scores = {}
|
||||||
|
for result_id, reciprocal_ranks in mrr_score_map.items():
|
||||||
|
mean_rr = np.mean(reciprocal_ranks)
|
||||||
|
final_mrr_scores[result_id] = mean_rr
|
||||||
|
|
||||||
|
combined = pa.concat_tables(vector_results, **self._concat_tables_args)
|
||||||
|
combined = self._deduplicate(combined)
|
||||||
|
|
||||||
|
combined_row_ids = combined["_rowid"].to_pylist()
|
||||||
|
|
||||||
|
relevance_scores = [final_mrr_scores[row_id] for row_id in combined_row_ids]
|
||||||
|
combined = combined.append_column(
|
||||||
|
"_relevance_score", pa.array(relevance_scores, type=pa.float32())
|
||||||
|
)
|
||||||
|
combined = combined.sort_by([("_relevance_score", "descending")])
|
||||||
|
|
||||||
|
if self.score == "relevance":
|
||||||
|
combined = self._keep_relevance_score(combined)
|
||||||
|
|
||||||
|
return combined
|
||||||
@@ -689,6 +689,9 @@ class Table(ABC):
|
|||||||
sample_rate: int = 256,
|
sample_rate: int = 256,
|
||||||
m: int = 20,
|
m: int = 20,
|
||||||
ef_construction: int = 300,
|
ef_construction: int = 300,
|
||||||
|
name: Optional[str] = None,
|
||||||
|
train: bool = True,
|
||||||
|
target_partition_size: Optional[int] = None,
|
||||||
):
|
):
|
||||||
"""Create an index on the table.
|
"""Create an index on the table.
|
||||||
|
|
||||||
@@ -721,6 +724,11 @@ class Table(ABC):
|
|||||||
Only 4 and 8 are supported.
|
Only 4 and 8 are supported.
|
||||||
wait_timeout: timedelta, optional
|
wait_timeout: timedelta, optional
|
||||||
The timeout to wait if indexing is asynchronous.
|
The timeout to wait if indexing is asynchronous.
|
||||||
|
name: str, optional
|
||||||
|
The name of the index. If not provided, a default name will be generated.
|
||||||
|
train: bool, default True
|
||||||
|
Whether to train the index with existing data. Vector indices always train
|
||||||
|
with existing data.
|
||||||
"""
|
"""
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
@@ -776,6 +784,7 @@ class Table(ABC):
|
|||||||
replace: bool = True,
|
replace: bool = True,
|
||||||
index_type: ScalarIndexType = "BTREE",
|
index_type: ScalarIndexType = "BTREE",
|
||||||
wait_timeout: Optional[timedelta] = None,
|
wait_timeout: Optional[timedelta] = None,
|
||||||
|
name: Optional[str] = None,
|
||||||
):
|
):
|
||||||
"""Create a scalar index on a column.
|
"""Create a scalar index on a column.
|
||||||
|
|
||||||
@@ -790,6 +799,8 @@ class Table(ABC):
|
|||||||
The type of index to create.
|
The type of index to create.
|
||||||
wait_timeout: timedelta, optional
|
wait_timeout: timedelta, optional
|
||||||
The timeout to wait if indexing is asynchronous.
|
The timeout to wait if indexing is asynchronous.
|
||||||
|
name: str, optional
|
||||||
|
The name of the index. If not provided, a default name will be generated.
|
||||||
Examples
|
Examples
|
||||||
--------
|
--------
|
||||||
|
|
||||||
@@ -852,6 +863,7 @@ class Table(ABC):
|
|||||||
ngram_max_length: int = 3,
|
ngram_max_length: int = 3,
|
||||||
prefix_only: bool = False,
|
prefix_only: bool = False,
|
||||||
wait_timeout: Optional[timedelta] = None,
|
wait_timeout: Optional[timedelta] = None,
|
||||||
|
name: Optional[str] = None,
|
||||||
):
|
):
|
||||||
"""Create a full-text search index on the table.
|
"""Create a full-text search index on the table.
|
||||||
|
|
||||||
@@ -916,6 +928,8 @@ class Table(ABC):
|
|||||||
Whether to only index the prefix of the token for ngram tokenizer.
|
Whether to only index the prefix of the token for ngram tokenizer.
|
||||||
wait_timeout: timedelta, optional
|
wait_timeout: timedelta, optional
|
||||||
The timeout to wait if indexing is asynchronous.
|
The timeout to wait if indexing is asynchronous.
|
||||||
|
name: str, optional
|
||||||
|
The name of the index. If not provided, a default name will be generated.
|
||||||
"""
|
"""
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
@@ -1106,7 +1120,9 @@ class Table(ABC):
|
|||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def take_offsets(self, offsets: list[int]) -> LanceTakeQueryBuilder:
|
def take_offsets(
|
||||||
|
self, offsets: list[int], *, with_row_id: bool = False
|
||||||
|
) -> LanceTakeQueryBuilder:
|
||||||
"""
|
"""
|
||||||
Take a list of offsets from the table.
|
Take a list of offsets from the table.
|
||||||
|
|
||||||
@@ -1132,8 +1148,60 @@ class Table(ABC):
|
|||||||
A record batch containing the rows at the given offsets.
|
A record batch containing the rows at the given offsets.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
def __getitems__(self, offsets: list[int]) -> pa.RecordBatch:
|
||||||
|
"""
|
||||||
|
Take a list of offsets from the table and return as a record batch.
|
||||||
|
|
||||||
|
This method uses the `take_offsets` method to take the rows. However, it
|
||||||
|
aligns the offsets to the passed in offsets. This means the return type
|
||||||
|
is a record batch (and so users should take care not to pass in too many
|
||||||
|
offsets)
|
||||||
|
|
||||||
|
Note: this method is primarily intended to fulfill the Dataset contract
|
||||||
|
for pytorch.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
offsets: list[int]
|
||||||
|
The offsets to take.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
pa.RecordBatch
|
||||||
|
A record batch containing the rows at the given offsets.
|
||||||
|
"""
|
||||||
|
# We don't know the order of the results at all. So we calculate a permutation
|
||||||
|
# for ordering the given offsets. Then we load the data with the _rowoffset
|
||||||
|
# column. Then we sort by _rowoffset and apply the inverse of the permutation
|
||||||
|
# that we calculated.
|
||||||
|
#
|
||||||
|
# Note: this is potentially a lot of memory copy if we're operating on large
|
||||||
|
# batches :(
|
||||||
|
num_offsets = len(offsets)
|
||||||
|
indices = list(range(num_offsets))
|
||||||
|
permutation = sorted(indices, key=lambda idx: offsets[idx])
|
||||||
|
permutation_inv = [0] * num_offsets
|
||||||
|
for i in range(num_offsets):
|
||||||
|
permutation_inv[permutation[i]] = i
|
||||||
|
|
||||||
|
columns = self.schema.names
|
||||||
|
columns.append("_rowoffset")
|
||||||
|
tbl = (
|
||||||
|
self.take_offsets(offsets)
|
||||||
|
.select(columns)
|
||||||
|
.to_arrow()
|
||||||
|
.sort_by("_rowoffset")
|
||||||
|
.take(permutation_inv)
|
||||||
|
.combine_chunks()
|
||||||
|
.drop_columns(["_rowoffset"])
|
||||||
|
)
|
||||||
|
|
||||||
|
return tbl
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def take_row_ids(self, row_ids: list[int]) -> LanceTakeQueryBuilder:
|
def take_row_ids(
|
||||||
|
self, row_ids: list[int], *, with_row_id: bool = False
|
||||||
|
) -> LanceTakeQueryBuilder:
|
||||||
"""
|
"""
|
||||||
Take a list of row ids from the table.
|
Take a list of row ids from the table.
|
||||||
|
|
||||||
@@ -1402,10 +1470,7 @@ class Table(ABC):
|
|||||||
be deleted unless they are at least 7 days old. If delete_unverified is True
|
be deleted unless they are at least 7 days old. If delete_unverified is True
|
||||||
then these files will be deleted regardless of their age.
|
then these files will be deleted regardless of their age.
|
||||||
retrain: bool, default False
|
retrain: bool, default False
|
||||||
If True, retrain the vector indices, this would refine the IVF clustering
|
This parameter is no longer used and is deprecated.
|
||||||
and quantization, which may improve the search accuracy. It's faster than
|
|
||||||
re-creating the index from scratch, so it's recommended to try this first,
|
|
||||||
when the data distribution has changed significantly.
|
|
||||||
|
|
||||||
Experimental API
|
Experimental API
|
||||||
----------------
|
----------------
|
||||||
@@ -1639,13 +1704,16 @@ class LanceTable(Table):
|
|||||||
connection: "LanceDBConnection",
|
connection: "LanceDBConnection",
|
||||||
name: str,
|
name: str,
|
||||||
*,
|
*,
|
||||||
|
namespace: List[str] = [],
|
||||||
storage_options: Optional[Dict[str, str]] = None,
|
storage_options: Optional[Dict[str, str]] = None,
|
||||||
index_cache_size: Optional[int] = None,
|
index_cache_size: Optional[int] = None,
|
||||||
):
|
):
|
||||||
self._conn = connection
|
self._conn = connection
|
||||||
|
self._namespace = namespace
|
||||||
self._table = LOOP.run(
|
self._table = LOOP.run(
|
||||||
connection._conn.open_table(
|
connection._conn.open_table(
|
||||||
name,
|
name,
|
||||||
|
namespace=namespace,
|
||||||
storage_options=storage_options,
|
storage_options=storage_options,
|
||||||
index_cache_size=index_cache_size,
|
index_cache_size=index_cache_size,
|
||||||
)
|
)
|
||||||
@@ -1656,8 +1724,8 @@ class LanceTable(Table):
|
|||||||
return self._table.name
|
return self._table.name
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def open(cls, db, name, **kwargs):
|
def open(cls, db, name, *, namespace: List[str] = [], **kwargs):
|
||||||
tbl = cls(db, name, **kwargs)
|
tbl = cls(db, name, namespace=namespace, **kwargs)
|
||||||
|
|
||||||
# check the dataset exists
|
# check the dataset exists
|
||||||
try:
|
try:
|
||||||
@@ -1929,6 +1997,10 @@ class LanceTable(Table):
|
|||||||
sample_rate: int = 256,
|
sample_rate: int = 256,
|
||||||
m: int = 20,
|
m: int = 20,
|
||||||
ef_construction: int = 300,
|
ef_construction: int = 300,
|
||||||
|
*,
|
||||||
|
name: Optional[str] = None,
|
||||||
|
train: bool = True,
|
||||||
|
target_partition_size: Optional[int] = None,
|
||||||
):
|
):
|
||||||
"""Create an index on the table."""
|
"""Create an index on the table."""
|
||||||
if accelerator is not None:
|
if accelerator is not None:
|
||||||
@@ -1945,6 +2017,7 @@ class LanceTable(Table):
|
|||||||
num_bits=num_bits,
|
num_bits=num_bits,
|
||||||
m=m,
|
m=m,
|
||||||
ef_construction=ef_construction,
|
ef_construction=ef_construction,
|
||||||
|
target_partition_size=target_partition_size,
|
||||||
)
|
)
|
||||||
self.checkout_latest()
|
self.checkout_latest()
|
||||||
return
|
return
|
||||||
@@ -1954,6 +2027,7 @@ class LanceTable(Table):
|
|||||||
num_partitions=num_partitions,
|
num_partitions=num_partitions,
|
||||||
max_iterations=max_iterations,
|
max_iterations=max_iterations,
|
||||||
sample_rate=sample_rate,
|
sample_rate=sample_rate,
|
||||||
|
target_partition_size=target_partition_size,
|
||||||
)
|
)
|
||||||
elif index_type == "IVF_PQ":
|
elif index_type == "IVF_PQ":
|
||||||
config = IvfPq(
|
config = IvfPq(
|
||||||
@@ -1963,6 +2037,7 @@ class LanceTable(Table):
|
|||||||
num_bits=num_bits,
|
num_bits=num_bits,
|
||||||
max_iterations=max_iterations,
|
max_iterations=max_iterations,
|
||||||
sample_rate=sample_rate,
|
sample_rate=sample_rate,
|
||||||
|
target_partition_size=target_partition_size,
|
||||||
)
|
)
|
||||||
elif index_type == "IVF_HNSW_PQ":
|
elif index_type == "IVF_HNSW_PQ":
|
||||||
config = HnswPq(
|
config = HnswPq(
|
||||||
@@ -1974,6 +2049,7 @@ class LanceTable(Table):
|
|||||||
sample_rate=sample_rate,
|
sample_rate=sample_rate,
|
||||||
m=m,
|
m=m,
|
||||||
ef_construction=ef_construction,
|
ef_construction=ef_construction,
|
||||||
|
target_partition_size=target_partition_size,
|
||||||
)
|
)
|
||||||
elif index_type == "IVF_HNSW_SQ":
|
elif index_type == "IVF_HNSW_SQ":
|
||||||
config = HnswSq(
|
config = HnswSq(
|
||||||
@@ -1983,6 +2059,7 @@ class LanceTable(Table):
|
|||||||
sample_rate=sample_rate,
|
sample_rate=sample_rate,
|
||||||
m=m,
|
m=m,
|
||||||
ef_construction=ef_construction,
|
ef_construction=ef_construction,
|
||||||
|
target_partition_size=target_partition_size,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
raise ValueError(f"Unknown index type {index_type}")
|
raise ValueError(f"Unknown index type {index_type}")
|
||||||
@@ -1992,6 +2069,8 @@ class LanceTable(Table):
|
|||||||
vector_column_name,
|
vector_column_name,
|
||||||
replace=replace,
|
replace=replace,
|
||||||
config=config,
|
config=config,
|
||||||
|
name=name,
|
||||||
|
train=train,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -2036,6 +2115,7 @@ class LanceTable(Table):
|
|||||||
*,
|
*,
|
||||||
replace: bool = True,
|
replace: bool = True,
|
||||||
index_type: ScalarIndexType = "BTREE",
|
index_type: ScalarIndexType = "BTREE",
|
||||||
|
name: Optional[str] = None,
|
||||||
):
|
):
|
||||||
if index_type == "BTREE":
|
if index_type == "BTREE":
|
||||||
config = BTree()
|
config = BTree()
|
||||||
@@ -2046,7 +2126,7 @@ class LanceTable(Table):
|
|||||||
else:
|
else:
|
||||||
raise ValueError(f"Unknown index type {index_type}")
|
raise ValueError(f"Unknown index type {index_type}")
|
||||||
return LOOP.run(
|
return LOOP.run(
|
||||||
self._table.create_index(column, replace=replace, config=config)
|
self._table.create_index(column, replace=replace, config=config, name=name)
|
||||||
)
|
)
|
||||||
|
|
||||||
def create_fts_index(
|
def create_fts_index(
|
||||||
@@ -2070,6 +2150,7 @@ class LanceTable(Table):
|
|||||||
ngram_min_length: int = 3,
|
ngram_min_length: int = 3,
|
||||||
ngram_max_length: int = 3,
|
ngram_max_length: int = 3,
|
||||||
prefix_only: bool = False,
|
prefix_only: bool = False,
|
||||||
|
name: Optional[str] = None,
|
||||||
):
|
):
|
||||||
if not use_tantivy:
|
if not use_tantivy:
|
||||||
if not isinstance(field_names, str):
|
if not isinstance(field_names, str):
|
||||||
@@ -2107,6 +2188,7 @@ class LanceTable(Table):
|
|||||||
field_names,
|
field_names,
|
||||||
replace=replace,
|
replace=replace,
|
||||||
config=config,
|
config=config,
|
||||||
|
name=name,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
return
|
return
|
||||||
@@ -2473,6 +2555,7 @@ class LanceTable(Table):
|
|||||||
fill_value: float = 0.0,
|
fill_value: float = 0.0,
|
||||||
embedding_functions: Optional[List[EmbeddingFunctionConfig]] = None,
|
embedding_functions: Optional[List[EmbeddingFunctionConfig]] = None,
|
||||||
*,
|
*,
|
||||||
|
namespace: List[str] = [],
|
||||||
storage_options: Optional[Dict[str, str | bool]] = None,
|
storage_options: Optional[Dict[str, str | bool]] = None,
|
||||||
data_storage_version: Optional[str] = None,
|
data_storage_version: Optional[str] = None,
|
||||||
enable_v2_manifest_paths: Optional[bool] = None,
|
enable_v2_manifest_paths: Optional[bool] = None,
|
||||||
@@ -2532,6 +2615,7 @@ class LanceTable(Table):
|
|||||||
"""
|
"""
|
||||||
self = cls.__new__(cls)
|
self = cls.__new__(cls)
|
||||||
self._conn = db
|
self._conn = db
|
||||||
|
self._namespace = namespace
|
||||||
|
|
||||||
if data_storage_version is not None:
|
if data_storage_version is not None:
|
||||||
warnings.warn(
|
warnings.warn(
|
||||||
@@ -2564,6 +2648,7 @@ class LanceTable(Table):
|
|||||||
on_bad_vectors=on_bad_vectors,
|
on_bad_vectors=on_bad_vectors,
|
||||||
fill_value=fill_value,
|
fill_value=fill_value,
|
||||||
embedding_functions=embedding_functions,
|
embedding_functions=embedding_functions,
|
||||||
|
namespace=namespace,
|
||||||
storage_options=storage_options,
|
storage_options=storage_options,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
@@ -2747,10 +2832,7 @@ class LanceTable(Table):
|
|||||||
be deleted unless they are at least 7 days old. If delete_unverified is True
|
be deleted unless they are at least 7 days old. If delete_unverified is True
|
||||||
then these files will be deleted regardless of their age.
|
then these files will be deleted regardless of their age.
|
||||||
retrain: bool, default False
|
retrain: bool, default False
|
||||||
If True, retrain the vector indices, this would refine the IVF clustering
|
This parameter is no longer used and is deprecated.
|
||||||
and quantization, which may improve the search accuracy. It's faster than
|
|
||||||
re-creating the index from scratch, so it's recommended to try this first,
|
|
||||||
when the data distribution has changed significantly.
|
|
||||||
|
|
||||||
Experimental API
|
Experimental API
|
||||||
----------------
|
----------------
|
||||||
@@ -3251,6 +3333,8 @@ class AsyncTable:
|
|||||||
Union[IvfFlat, IvfPq, HnswPq, HnswSq, BTree, Bitmap, LabelList, FTS]
|
Union[IvfFlat, IvfPq, HnswPq, HnswSq, BTree, Bitmap, LabelList, FTS]
|
||||||
] = None,
|
] = None,
|
||||||
wait_timeout: Optional[timedelta] = None,
|
wait_timeout: Optional[timedelta] = None,
|
||||||
|
name: Optional[str] = None,
|
||||||
|
train: bool = True,
|
||||||
):
|
):
|
||||||
"""Create an index to speed up queries
|
"""Create an index to speed up queries
|
||||||
|
|
||||||
@@ -3277,6 +3361,11 @@ class AsyncTable:
|
|||||||
creating an index object.
|
creating an index object.
|
||||||
wait_timeout: timedelta, optional
|
wait_timeout: timedelta, optional
|
||||||
The timeout to wait if indexing is asynchronous.
|
The timeout to wait if indexing is asynchronous.
|
||||||
|
name: str, optional
|
||||||
|
The name of the index. If not provided, a default name will be generated.
|
||||||
|
train: bool, default True
|
||||||
|
Whether to train the index with existing data. Vector indices always train
|
||||||
|
with existing data.
|
||||||
"""
|
"""
|
||||||
if config is not None:
|
if config is not None:
|
||||||
if not isinstance(
|
if not isinstance(
|
||||||
@@ -3288,7 +3377,12 @@ class AsyncTable:
|
|||||||
)
|
)
|
||||||
try:
|
try:
|
||||||
await self._inner.create_index(
|
await self._inner.create_index(
|
||||||
column, index=config, replace=replace, wait_timeout=wait_timeout
|
column,
|
||||||
|
index=config,
|
||||||
|
replace=replace,
|
||||||
|
wait_timeout=wait_timeout,
|
||||||
|
name=name,
|
||||||
|
train=train,
|
||||||
)
|
)
|
||||||
except ValueError as e:
|
except ValueError as e:
|
||||||
if "not support the requested language" in str(e):
|
if "not support the requested language" in str(e):
|
||||||
@@ -3826,6 +3920,7 @@ class AsyncTable:
|
|||||||
when_not_matched_by_source_delete=merge._when_not_matched_by_source_delete,
|
when_not_matched_by_source_delete=merge._when_not_matched_by_source_delete,
|
||||||
when_not_matched_by_source_condition=merge._when_not_matched_by_source_condition,
|
when_not_matched_by_source_condition=merge._when_not_matched_by_source_condition,
|
||||||
timeout=merge._timeout,
|
timeout=merge._timeout,
|
||||||
|
use_index=merge._use_index,
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -4198,10 +4293,7 @@ class AsyncTable:
|
|||||||
be deleted unless they are at least 7 days old. If delete_unverified is True
|
be deleted unless they are at least 7 days old. If delete_unverified is True
|
||||||
then these files will be deleted regardless of their age.
|
then these files will be deleted regardless of their age.
|
||||||
retrain: bool, default False
|
retrain: bool, default False
|
||||||
If True, retrain the vector indices, this would refine the IVF clustering
|
This parameter is no longer used and is deprecated.
|
||||||
and quantization, which may improve the search accuracy. It's faster than
|
|
||||||
re-creating the index from scratch, so it's recommended to try this first,
|
|
||||||
when the data distribution has changed significantly.
|
|
||||||
|
|
||||||
Experimental API
|
Experimental API
|
||||||
----------------
|
----------------
|
||||||
@@ -4224,10 +4316,19 @@ class AsyncTable:
|
|||||||
cleanup_since_ms: Optional[int] = None
|
cleanup_since_ms: Optional[int] = None
|
||||||
if cleanup_older_than is not None:
|
if cleanup_older_than is not None:
|
||||||
cleanup_since_ms = round(cleanup_older_than.total_seconds() * 1000)
|
cleanup_since_ms = round(cleanup_older_than.total_seconds() * 1000)
|
||||||
|
|
||||||
|
if retrain:
|
||||||
|
import warnings
|
||||||
|
|
||||||
|
warnings.warn(
|
||||||
|
"The 'retrain' parameter is deprecated and will be removed in a "
|
||||||
|
"future version.",
|
||||||
|
DeprecationWarning,
|
||||||
|
)
|
||||||
|
|
||||||
return await self._inner.optimize(
|
return await self._inner.optimize(
|
||||||
cleanup_since_ms=cleanup_since_ms,
|
cleanup_since_ms=cleanup_since_ms,
|
||||||
delete_unverified=delete_unverified,
|
delete_unverified=delete_unverified,
|
||||||
retrain=retrain,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
async def list_indices(self) -> Iterable[IndexConfig]:
|
async def list_indices(self) -> Iterable[IndexConfig]:
|
||||||
|
|||||||
@@ -175,6 +175,18 @@ def test_table_names(tmp_db: lancedb.DBConnection):
|
|||||||
tmp_db.create_table("test3", data=data)
|
tmp_db.create_table("test3", data=data)
|
||||||
assert tmp_db.table_names() == ["test1", "test2", "test3"]
|
assert tmp_db.table_names() == ["test1", "test2", "test3"]
|
||||||
|
|
||||||
|
# Test that positional arguments for page_token and limit
|
||||||
|
result = list(tmp_db.table_names("test1", 1)) # page_token="test1", limit=1
|
||||||
|
assert result == ["test2"], f"Expected ['test2'], got {result}"
|
||||||
|
|
||||||
|
# Test mixed positional and keyword arguments
|
||||||
|
result = list(tmp_db.table_names("test2", limit=2))
|
||||||
|
assert result == ["test3"], f"Expected ['test3'], got {result}"
|
||||||
|
|
||||||
|
# Test that namespace parameter can be passed as keyword
|
||||||
|
result = list(tmp_db.table_names(namespace=[]))
|
||||||
|
assert len(result) == 3
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_table_names_async(tmp_path):
|
async def test_table_names_async(tmp_path):
|
||||||
@@ -728,3 +740,210 @@ def test_bypass_vector_index_sync(tmp_db: lancedb.DBConnection):
|
|||||||
table.search(sample_key).bypass_vector_index().explain_plan(verbose=True)
|
table.search(sample_key).bypass_vector_index().explain_plan(verbose=True)
|
||||||
)
|
)
|
||||||
assert "KNN" in plan_without_index
|
assert "KNN" in plan_without_index
|
||||||
|
|
||||||
|
|
||||||
|
def test_local_namespace_operations(tmp_path):
|
||||||
|
"""Test that local mode namespace operations behave as expected."""
|
||||||
|
# Create a local database connection
|
||||||
|
db = lancedb.connect(tmp_path)
|
||||||
|
|
||||||
|
# Test list_namespaces returns empty list for root namespace
|
||||||
|
namespaces = list(db.list_namespaces())
|
||||||
|
assert namespaces == []
|
||||||
|
|
||||||
|
# Test list_namespaces with non-empty namespace raises NotImplementedError
|
||||||
|
with pytest.raises(
|
||||||
|
NotImplementedError,
|
||||||
|
match="Namespace operations are not supported for listing database",
|
||||||
|
):
|
||||||
|
list(db.list_namespaces(namespace=["test"]))
|
||||||
|
|
||||||
|
|
||||||
|
def test_local_create_namespace_not_supported(tmp_path):
|
||||||
|
"""Test that create_namespace is not supported in local mode."""
|
||||||
|
db = lancedb.connect(tmp_path)
|
||||||
|
|
||||||
|
with pytest.raises(
|
||||||
|
NotImplementedError,
|
||||||
|
match="Namespace operations are not supported for listing database",
|
||||||
|
):
|
||||||
|
db.create_namespace(["test_namespace"])
|
||||||
|
|
||||||
|
|
||||||
|
def test_local_drop_namespace_not_supported(tmp_path):
|
||||||
|
"""Test that drop_namespace is not supported in local mode."""
|
||||||
|
db = lancedb.connect(tmp_path)
|
||||||
|
|
||||||
|
with pytest.raises(
|
||||||
|
NotImplementedError,
|
||||||
|
match="Namespace operations are not supported for listing database",
|
||||||
|
):
|
||||||
|
db.drop_namespace(["test_namespace"])
|
||||||
|
|
||||||
|
|
||||||
|
def test_local_table_operations_with_namespace_raise_error(tmp_path):
|
||||||
|
"""
|
||||||
|
Test that table operations with namespace parameter
|
||||||
|
raise ValueError in local mode.
|
||||||
|
"""
|
||||||
|
db = lancedb.connect(tmp_path)
|
||||||
|
|
||||||
|
# Create some test data
|
||||||
|
data = [{"vector": [1.0, 2.0], "item": "test"}]
|
||||||
|
schema = pa.schema(
|
||||||
|
[pa.field("vector", pa.list_(pa.float32(), 2)), pa.field("item", pa.string())]
|
||||||
|
)
|
||||||
|
|
||||||
|
# Test create_table with namespace - should raise ValueError
|
||||||
|
with pytest.raises(
|
||||||
|
NotImplementedError,
|
||||||
|
match="Namespace parameter is not supported for listing database",
|
||||||
|
):
|
||||||
|
db.create_table(
|
||||||
|
"test_table_with_ns", data=data, schema=schema, namespace=["test_ns"]
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create table normally for other tests
|
||||||
|
db.create_table("test_table", data=data, schema=schema)
|
||||||
|
assert "test_table" in db.table_names()
|
||||||
|
|
||||||
|
# Test open_table with namespace - should raise ValueError
|
||||||
|
with pytest.raises(
|
||||||
|
NotImplementedError,
|
||||||
|
match="Namespace parameter is not supported for listing database",
|
||||||
|
):
|
||||||
|
db.open_table("test_table", namespace=["test_ns"])
|
||||||
|
|
||||||
|
# Test table_names with namespace - should raise ValueError
|
||||||
|
with pytest.raises(
|
||||||
|
NotImplementedError,
|
||||||
|
match="Namespace parameter is not supported for listing database",
|
||||||
|
):
|
||||||
|
list(db.table_names(namespace=["test_ns"]))
|
||||||
|
|
||||||
|
# Test drop_table with namespace - should raise ValueError
|
||||||
|
with pytest.raises(
|
||||||
|
NotImplementedError,
|
||||||
|
match="Namespace parameter is not supported for listing database",
|
||||||
|
):
|
||||||
|
db.drop_table("test_table", namespace=["test_ns"])
|
||||||
|
|
||||||
|
# Test table_names without namespace - should work normally
|
||||||
|
tables_root = list(db.table_names())
|
||||||
|
assert "test_table" in tables_root
|
||||||
|
|
||||||
|
|
||||||
|
def test_clone_table_latest_version(tmp_path):
|
||||||
|
"""Test cloning a table with the latest version (default behavior)"""
|
||||||
|
import os
|
||||||
|
|
||||||
|
db = lancedb.connect(tmp_path)
|
||||||
|
|
||||||
|
# Create source table with some data
|
||||||
|
data = [
|
||||||
|
{"id": 1, "text": "hello", "vector": [1.0, 2.0]},
|
||||||
|
{"id": 2, "text": "world", "vector": [3.0, 4.0]},
|
||||||
|
]
|
||||||
|
source_table = db.create_table("source", data=data)
|
||||||
|
|
||||||
|
# Add more data to create a new version
|
||||||
|
more_data = [{"id": 3, "text": "test", "vector": [5.0, 6.0]}]
|
||||||
|
source_table.add(more_data)
|
||||||
|
|
||||||
|
# Clone the table (should get latest version with 3 rows)
|
||||||
|
source_uri = os.path.join(tmp_path, "source.lance")
|
||||||
|
cloned_table = db.clone_table("cloned", source_uri)
|
||||||
|
|
||||||
|
# Verify cloned table has all 3 rows
|
||||||
|
assert cloned_table.count_rows() == 3
|
||||||
|
assert "cloned" in db.table_names()
|
||||||
|
|
||||||
|
# Verify data matches
|
||||||
|
cloned_data = cloned_table.to_pandas()
|
||||||
|
assert len(cloned_data) == 3
|
||||||
|
assert set(cloned_data["id"].tolist()) == {1, 2, 3}
|
||||||
|
|
||||||
|
|
||||||
|
def test_clone_table_specific_version(tmp_path):
|
||||||
|
"""Test cloning a table from a specific version"""
|
||||||
|
import os
|
||||||
|
|
||||||
|
db = lancedb.connect(tmp_path)
|
||||||
|
|
||||||
|
# Create source table with initial data
|
||||||
|
data = [
|
||||||
|
{"id": 1, "text": "hello", "vector": [1.0, 2.0]},
|
||||||
|
{"id": 2, "text": "world", "vector": [3.0, 4.0]},
|
||||||
|
]
|
||||||
|
source_table = db.create_table("source", data=data)
|
||||||
|
|
||||||
|
# Get the initial version
|
||||||
|
initial_version = source_table.version
|
||||||
|
|
||||||
|
# Add more data to create a new version
|
||||||
|
more_data = [{"id": 3, "text": "test", "vector": [5.0, 6.0]}]
|
||||||
|
source_table.add(more_data)
|
||||||
|
|
||||||
|
# Verify source now has 3 rows
|
||||||
|
assert source_table.count_rows() == 3
|
||||||
|
|
||||||
|
# Clone from the initial version (should have only 2 rows)
|
||||||
|
source_uri = os.path.join(tmp_path, "source.lance")
|
||||||
|
cloned_table = db.clone_table("cloned", source_uri, source_version=initial_version)
|
||||||
|
|
||||||
|
# Verify cloned table has only the initial 2 rows
|
||||||
|
assert cloned_table.count_rows() == 2
|
||||||
|
cloned_data = cloned_table.to_pandas()
|
||||||
|
assert set(cloned_data["id"].tolist()) == {1, 2}
|
||||||
|
|
||||||
|
|
||||||
|
def test_clone_table_with_tag(tmp_path):
|
||||||
|
"""Test cloning a table from a tagged version"""
|
||||||
|
import os
|
||||||
|
|
||||||
|
db = lancedb.connect(tmp_path)
|
||||||
|
|
||||||
|
# Create source table with initial data
|
||||||
|
data = [
|
||||||
|
{"id": 1, "text": "hello", "vector": [1.0, 2.0]},
|
||||||
|
{"id": 2, "text": "world", "vector": [3.0, 4.0]},
|
||||||
|
]
|
||||||
|
source_table = db.create_table("source", data=data)
|
||||||
|
|
||||||
|
# Create a tag for the current version
|
||||||
|
source_table.tags.create("v1.0", source_table.version)
|
||||||
|
|
||||||
|
# Add more data after the tag
|
||||||
|
more_data = [{"id": 3, "text": "test", "vector": [5.0, 6.0]}]
|
||||||
|
source_table.add(more_data)
|
||||||
|
|
||||||
|
# Verify source now has 3 rows
|
||||||
|
assert source_table.count_rows() == 3
|
||||||
|
|
||||||
|
# Clone from the tagged version (should have only 2 rows)
|
||||||
|
source_uri = os.path.join(tmp_path, "source.lance")
|
||||||
|
cloned_table = db.clone_table("cloned", source_uri, source_tag="v1.0")
|
||||||
|
|
||||||
|
# Verify cloned table has only the tagged version's 2 rows
|
||||||
|
assert cloned_table.count_rows() == 2
|
||||||
|
cloned_data = cloned_table.to_pandas()
|
||||||
|
assert set(cloned_data["id"].tolist()) == {1, 2}
|
||||||
|
|
||||||
|
|
||||||
|
def test_clone_table_deep_clone_fails(tmp_path):
|
||||||
|
"""Test that deep clone raises an unsupported error"""
|
||||||
|
import os
|
||||||
|
|
||||||
|
db = lancedb.connect(tmp_path)
|
||||||
|
|
||||||
|
# Create source table with some data
|
||||||
|
data = [
|
||||||
|
{"id": 1, "text": "hello", "vector": [1.0, 2.0]},
|
||||||
|
{"id": 2, "text": "world", "vector": [3.0, 4.0]},
|
||||||
|
]
|
||||||
|
db.create_table("source", data=data)
|
||||||
|
|
||||||
|
# Try to create a deep clone (should fail)
|
||||||
|
source_uri = os.path.join(tmp_path, "source.lance")
|
||||||
|
with pytest.raises(Exception, match="Deep clone is not yet implemented"):
|
||||||
|
db.clone_table("cloned", source_uri, is_shallow=False)
|
||||||
|
|||||||
@@ -114,6 +114,63 @@ def test_embedding_function_variables():
|
|||||||
assert func.safe_model_dump()["secret_key"] == "$var:secret"
|
assert func.safe_model_dump()["secret_key"] == "$var:secret"
|
||||||
|
|
||||||
|
|
||||||
|
def test_parse_functions_with_variables():
|
||||||
|
@register("variable-parsing-test")
|
||||||
|
class VariableParsingFunction(TextEmbeddingFunction):
|
||||||
|
api_key: str
|
||||||
|
base_url: Optional[str] = None
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def sensitive_keys():
|
||||||
|
return ["api_key"]
|
||||||
|
|
||||||
|
def ndims(self):
|
||||||
|
return 10
|
||||||
|
|
||||||
|
def generate_embeddings(self, texts):
|
||||||
|
# Mock implementation that just returns random embeddings
|
||||||
|
# In real usage, this would use the api_key to call an API
|
||||||
|
return [np.random.rand(self.ndims()).tolist() for _ in texts]
|
||||||
|
|
||||||
|
registry = EmbeddingFunctionRegistry.get_instance()
|
||||||
|
|
||||||
|
registry.set_var("test_api_key", "sk-test-key-12345")
|
||||||
|
registry.set_var("test_base_url", "https://api.example.com")
|
||||||
|
|
||||||
|
conf = EmbeddingFunctionConfig(
|
||||||
|
source_column="text",
|
||||||
|
vector_column="vector",
|
||||||
|
function=registry.get("variable-parsing-test").create(
|
||||||
|
api_key="$var:test_api_key", base_url="$var:test_base_url"
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
metadata = registry.get_table_metadata([conf])
|
||||||
|
|
||||||
|
# Create a mock arrow table with the metadata
|
||||||
|
schema = pa.schema(
|
||||||
|
[pa.field("text", pa.string()), pa.field("vector", pa.list_(pa.float32(), 10))]
|
||||||
|
)
|
||||||
|
table = pa.table({"text": [], "vector": []}, schema=schema)
|
||||||
|
table = table.replace_schema_metadata(metadata)
|
||||||
|
|
||||||
|
ds = lance.write_dataset(table, "memory://")
|
||||||
|
|
||||||
|
configs = registry.parse_functions(ds.schema.metadata)
|
||||||
|
|
||||||
|
assert "vector" in configs
|
||||||
|
parsed_func = configs["vector"].function
|
||||||
|
|
||||||
|
assert parsed_func.api_key == "sk-test-key-12345"
|
||||||
|
assert parsed_func.base_url == "https://api.example.com"
|
||||||
|
|
||||||
|
embeddings = parsed_func.generate_embeddings(["test text"])
|
||||||
|
assert len(embeddings) == 1
|
||||||
|
assert len(embeddings[0]) == 10
|
||||||
|
|
||||||
|
assert parsed_func.safe_model_dump()["api_key"] == "$var:test_api_key"
|
||||||
|
|
||||||
|
|
||||||
def test_embedding_with_bad_results(tmp_path):
|
def test_embedding_with_bad_results(tmp_path):
|
||||||
@register("null-embedding")
|
@register("null-embedding")
|
||||||
class NullEmbeddingFunction(TextEmbeddingFunction):
|
class NullEmbeddingFunction(TextEmbeddingFunction):
|
||||||
|
|||||||
@@ -157,7 +157,16 @@ def test_create_index_with_stemming(tmp_path, table):
|
|||||||
def test_create_inverted_index(table, use_tantivy, with_position):
|
def test_create_inverted_index(table, use_tantivy, with_position):
|
||||||
if use_tantivy and not with_position:
|
if use_tantivy and not with_position:
|
||||||
pytest.skip("we don't support building a tantivy index without position")
|
pytest.skip("we don't support building a tantivy index without position")
|
||||||
table.create_fts_index("text", use_tantivy=use_tantivy, with_position=with_position)
|
table.create_fts_index(
|
||||||
|
"text",
|
||||||
|
use_tantivy=use_tantivy,
|
||||||
|
with_position=with_position,
|
||||||
|
name="custom_fts_index",
|
||||||
|
)
|
||||||
|
if not use_tantivy:
|
||||||
|
indices = table.list_indices()
|
||||||
|
fts_indices = [i for i in indices if i.index_type == "FTS"]
|
||||||
|
assert any(i.name == "custom_fts_index" for i in fts_indices)
|
||||||
|
|
||||||
|
|
||||||
def test_populate_index(tmp_path, table):
|
def test_populate_index(tmp_path, table):
|
||||||
|
|||||||
237
python/python/tests/test_header_provider.py
Normal file
237
python/python/tests/test_header_provider.py
Normal file
@@ -0,0 +1,237 @@
|
|||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||||
|
|
||||||
|
import concurrent.futures
|
||||||
|
import pytest
|
||||||
|
import time
|
||||||
|
import threading
|
||||||
|
from typing import Dict
|
||||||
|
|
||||||
|
from lancedb.remote import ClientConfig, HeaderProvider
|
||||||
|
from lancedb.remote.header import StaticHeaderProvider, OAuthProvider
|
||||||
|
|
||||||
|
|
||||||
|
class TestStaticHeaderProvider:
|
||||||
|
def test_init(self):
|
||||||
|
"""Test StaticHeaderProvider initialization."""
|
||||||
|
headers = {"X-API-Key": "test-key", "X-Custom": "value"}
|
||||||
|
provider = StaticHeaderProvider(headers)
|
||||||
|
assert provider._headers == headers
|
||||||
|
|
||||||
|
def test_get_headers(self):
|
||||||
|
"""Test get_headers returns correct headers."""
|
||||||
|
headers = {"X-API-Key": "test-key", "X-Custom": "value"}
|
||||||
|
provider = StaticHeaderProvider(headers)
|
||||||
|
|
||||||
|
result = provider.get_headers()
|
||||||
|
assert result == headers
|
||||||
|
|
||||||
|
# Ensure it returns a copy
|
||||||
|
result["X-Modified"] = "modified"
|
||||||
|
result2 = provider.get_headers()
|
||||||
|
assert "X-Modified" not in result2
|
||||||
|
|
||||||
|
|
||||||
|
class TestOAuthProvider:
|
||||||
|
def test_init(self):
|
||||||
|
"""Test OAuthProvider initialization."""
|
||||||
|
|
||||||
|
def fetcher():
|
||||||
|
return {"access_token": "token123", "expires_in": 3600}
|
||||||
|
|
||||||
|
provider = OAuthProvider(fetcher)
|
||||||
|
assert provider._token_fetcher is fetcher
|
||||||
|
assert provider._refresh_buffer == 300
|
||||||
|
assert provider._current_token is None
|
||||||
|
assert provider._token_expires_at is None
|
||||||
|
|
||||||
|
def test_get_headers_first_time(self):
|
||||||
|
"""Test get_headers fetches token on first call."""
|
||||||
|
|
||||||
|
def fetcher():
|
||||||
|
return {"access_token": "token123", "expires_in": 3600}
|
||||||
|
|
||||||
|
provider = OAuthProvider(fetcher)
|
||||||
|
headers = provider.get_headers()
|
||||||
|
|
||||||
|
assert headers == {"Authorization": "Bearer token123"}
|
||||||
|
assert provider._current_token == "token123"
|
||||||
|
assert provider._token_expires_at is not None
|
||||||
|
|
||||||
|
def test_token_refresh(self):
|
||||||
|
"""Test token refresh when expired."""
|
||||||
|
call_count = 0
|
||||||
|
tokens = ["token1", "token2"]
|
||||||
|
|
||||||
|
def fetcher():
|
||||||
|
nonlocal call_count
|
||||||
|
token = tokens[call_count]
|
||||||
|
call_count += 1
|
||||||
|
return {"access_token": token, "expires_in": 1} # Expires in 1 second
|
||||||
|
|
||||||
|
provider = OAuthProvider(fetcher, refresh_buffer_seconds=0)
|
||||||
|
|
||||||
|
# First call
|
||||||
|
headers1 = provider.get_headers()
|
||||||
|
assert headers1 == {"Authorization": "Bearer token1"}
|
||||||
|
|
||||||
|
# Wait for token to expire
|
||||||
|
time.sleep(1.1)
|
||||||
|
|
||||||
|
# Second call should refresh
|
||||||
|
headers2 = provider.get_headers()
|
||||||
|
assert headers2 == {"Authorization": "Bearer token2"}
|
||||||
|
assert call_count == 2
|
||||||
|
|
||||||
|
def test_no_expiry_info(self):
|
||||||
|
"""Test handling tokens without expiry information."""
|
||||||
|
|
||||||
|
def fetcher():
|
||||||
|
return {"access_token": "permanent_token"}
|
||||||
|
|
||||||
|
provider = OAuthProvider(fetcher)
|
||||||
|
headers = provider.get_headers()
|
||||||
|
|
||||||
|
assert headers == {"Authorization": "Bearer permanent_token"}
|
||||||
|
assert provider._token_expires_at is None
|
||||||
|
|
||||||
|
# Should not refresh on second call
|
||||||
|
headers2 = provider.get_headers()
|
||||||
|
assert headers2 == {"Authorization": "Bearer permanent_token"}
|
||||||
|
|
||||||
|
def test_missing_access_token(self):
|
||||||
|
"""Test error handling when access_token is missing."""
|
||||||
|
|
||||||
|
def fetcher():
|
||||||
|
return {"expires_in": 3600} # Missing access_token
|
||||||
|
|
||||||
|
provider = OAuthProvider(fetcher)
|
||||||
|
|
||||||
|
with pytest.raises(
|
||||||
|
ValueError, match="Token fetcher did not return 'access_token'"
|
||||||
|
):
|
||||||
|
provider.get_headers()
|
||||||
|
|
||||||
|
def test_sync_method(self):
|
||||||
|
"""Test synchronous get_headers method."""
|
||||||
|
|
||||||
|
def fetcher():
|
||||||
|
return {"access_token": "sync_token", "expires_in": 3600}
|
||||||
|
|
||||||
|
provider = OAuthProvider(fetcher)
|
||||||
|
headers = provider.get_headers()
|
||||||
|
|
||||||
|
assert headers == {"Authorization": "Bearer sync_token"}
|
||||||
|
|
||||||
|
|
||||||
|
class TestClientConfigIntegration:
|
||||||
|
def test_client_config_with_header_provider(self):
|
||||||
|
"""Test ClientConfig can accept a HeaderProvider."""
|
||||||
|
provider = StaticHeaderProvider({"X-Test": "value"})
|
||||||
|
config = ClientConfig(header_provider=provider)
|
||||||
|
|
||||||
|
assert config.header_provider is provider
|
||||||
|
|
||||||
|
def test_client_config_without_header_provider(self):
|
||||||
|
"""Test ClientConfig works without HeaderProvider."""
|
||||||
|
config = ClientConfig()
|
||||||
|
assert config.header_provider is None
|
||||||
|
|
||||||
|
|
||||||
|
class CustomProvider(HeaderProvider):
|
||||||
|
"""Custom provider for testing abstract class."""
|
||||||
|
|
||||||
|
def get_headers(self) -> Dict[str, str]:
|
||||||
|
return {"X-Custom": "custom-value"}
|
||||||
|
|
||||||
|
|
||||||
|
class TestCustomHeaderProvider:
|
||||||
|
def test_custom_provider(self):
|
||||||
|
"""Test custom HeaderProvider implementation."""
|
||||||
|
provider = CustomProvider()
|
||||||
|
headers = provider.get_headers()
|
||||||
|
assert headers == {"X-Custom": "custom-value"}
|
||||||
|
|
||||||
|
|
||||||
|
class ErrorProvider(HeaderProvider):
|
||||||
|
"""Provider that raises errors for testing error handling."""
|
||||||
|
|
||||||
|
def __init__(self, error_message: str = "Test error"):
|
||||||
|
self.error_message = error_message
|
||||||
|
self.call_count = 0
|
||||||
|
|
||||||
|
def get_headers(self) -> Dict[str, str]:
|
||||||
|
self.call_count += 1
|
||||||
|
raise RuntimeError(self.error_message)
|
||||||
|
|
||||||
|
|
||||||
|
class TestErrorHandling:
|
||||||
|
def test_provider_error_propagation(self):
|
||||||
|
"""Test that errors from header provider are properly propagated."""
|
||||||
|
provider = ErrorProvider("Authentication failed")
|
||||||
|
|
||||||
|
with pytest.raises(RuntimeError, match="Authentication failed"):
|
||||||
|
provider.get_headers()
|
||||||
|
|
||||||
|
assert provider.call_count == 1
|
||||||
|
|
||||||
|
def test_provider_error(self):
|
||||||
|
"""Test that errors are propagated."""
|
||||||
|
provider = ErrorProvider("Sync error")
|
||||||
|
|
||||||
|
with pytest.raises(RuntimeError, match="Sync error"):
|
||||||
|
provider.get_headers()
|
||||||
|
|
||||||
|
|
||||||
|
class ConcurrentProvider(HeaderProvider):
|
||||||
|
"""Provider for testing thread safety."""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.counter = 0
|
||||||
|
self.lock = threading.Lock()
|
||||||
|
|
||||||
|
def get_headers(self) -> Dict[str, str]:
|
||||||
|
with self.lock:
|
||||||
|
self.counter += 1
|
||||||
|
# Simulate some work
|
||||||
|
time.sleep(0.01)
|
||||||
|
return {"X-Request-Id": str(self.counter)}
|
||||||
|
|
||||||
|
|
||||||
|
class TestConcurrency:
|
||||||
|
def test_concurrent_header_fetches(self):
|
||||||
|
"""Test that header provider can handle concurrent requests."""
|
||||||
|
provider = ConcurrentProvider()
|
||||||
|
|
||||||
|
# Create multiple concurrent requests
|
||||||
|
with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
|
||||||
|
futures = [executor.submit(provider.get_headers) for _ in range(10)]
|
||||||
|
results = [f.result() for f in futures]
|
||||||
|
|
||||||
|
# Each request should get a unique counter value
|
||||||
|
request_ids = [int(r["X-Request-Id"]) for r in results]
|
||||||
|
assert len(set(request_ids)) == 10
|
||||||
|
assert min(request_ids) == 1
|
||||||
|
assert max(request_ids) == 10
|
||||||
|
|
||||||
|
def test_oauth_concurrent_refresh(self):
|
||||||
|
"""Test that OAuth provider handles concurrent refresh requests safely."""
|
||||||
|
call_count = 0
|
||||||
|
|
||||||
|
def slow_token_fetch():
|
||||||
|
nonlocal call_count
|
||||||
|
call_count += 1
|
||||||
|
time.sleep(0.1) # Simulate slow token fetch
|
||||||
|
return {"access_token": f"token-{call_count}", "expires_in": 3600}
|
||||||
|
|
||||||
|
provider = OAuthProvider(slow_token_fetch)
|
||||||
|
|
||||||
|
# Force multiple concurrent refreshes
|
||||||
|
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
|
||||||
|
futures = [executor.submit(provider.get_headers) for _ in range(5)]
|
||||||
|
results = [f.result() for f in futures]
|
||||||
|
|
||||||
|
# All requests should get the same token (only one refresh should happen)
|
||||||
|
tokens = [r["Authorization"] for r in results]
|
||||||
|
assert all(t == "Bearer token-1" for t in tokens)
|
||||||
|
assert call_count == 1 # Only one token fetch despite concurrent requests
|
||||||
@@ -35,6 +35,8 @@ async def some_table(db_async):
|
|||||||
"tags": [
|
"tags": [
|
||||||
[f"tag{random.randint(0, 8)}" for _ in range(2)] for _ in range(NROWS)
|
[f"tag{random.randint(0, 8)}" for _ in range(2)] for _ in range(NROWS)
|
||||||
],
|
],
|
||||||
|
"is_active": [random.choice([True, False]) for _ in range(NROWS)],
|
||||||
|
"data": [random.randbytes(random.randint(0, 128)) for _ in range(NROWS)],
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
return await db_async.create_table(
|
return await db_async.create_table(
|
||||||
@@ -99,10 +101,17 @@ async def test_create_fixed_size_binary_index(some_table: AsyncTable):
|
|||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_create_bitmap_index(some_table: AsyncTable):
|
async def test_create_bitmap_index(some_table: AsyncTable):
|
||||||
await some_table.create_index("id", config=Bitmap())
|
await some_table.create_index("id", config=Bitmap())
|
||||||
|
await some_table.create_index("is_active", config=Bitmap())
|
||||||
|
await some_table.create_index("data", config=Bitmap())
|
||||||
indices = await some_table.list_indices()
|
indices = await some_table.list_indices()
|
||||||
assert str(indices) == '[Index(Bitmap, columns=["id"], name="id_idx")]'
|
assert len(indices) == 3
|
||||||
indices = await some_table.list_indices()
|
assert indices[0].index_type == "Bitmap"
|
||||||
assert len(indices) == 1
|
assert indices[0].columns == ["id"]
|
||||||
|
assert indices[1].index_type == "Bitmap"
|
||||||
|
assert indices[1].columns == ["is_active"]
|
||||||
|
assert indices[2].index_type == "Bitmap"
|
||||||
|
assert indices[2].columns == ["data"]
|
||||||
|
|
||||||
index_name = indices[0].name
|
index_name = indices[0].name
|
||||||
stats = await some_table.index_stats(index_name)
|
stats = await some_table.index_stats(index_name)
|
||||||
assert stats.index_type == "BITMAP"
|
assert stats.index_type == "BITMAP"
|
||||||
@@ -111,6 +120,11 @@ async def test_create_bitmap_index(some_table: AsyncTable):
|
|||||||
assert stats.num_unindexed_rows == 0
|
assert stats.num_unindexed_rows == 0
|
||||||
assert stats.num_indices == 1
|
assert stats.num_indices == 1
|
||||||
|
|
||||||
|
assert (
|
||||||
|
"ScalarIndexQuery"
|
||||||
|
in await some_table.query().where("is_active = TRUE").explain_plan()
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_create_label_list_index(some_table: AsyncTable):
|
async def test_create_label_list_index(some_table: AsyncTable):
|
||||||
|
|||||||
707
python/python/tests/test_namespace.py
Normal file
707
python/python/tests/test_namespace.py
Normal file
@@ -0,0 +1,707 @@
|
|||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||||
|
|
||||||
|
"""Tests for LanceDB namespace integration."""
|
||||||
|
|
||||||
|
import tempfile
|
||||||
|
import shutil
|
||||||
|
from typing import Dict, Optional
|
||||||
|
import pytest
|
||||||
|
import pyarrow as pa
|
||||||
|
import lancedb
|
||||||
|
from lance_namespace.namespace import NATIVE_IMPLS, LanceNamespace
|
||||||
|
from lance_namespace_urllib3_client.models import (
|
||||||
|
ListTablesRequest,
|
||||||
|
ListTablesResponse,
|
||||||
|
DescribeTableRequest,
|
||||||
|
DescribeTableResponse,
|
||||||
|
RegisterTableRequest,
|
||||||
|
RegisterTableResponse,
|
||||||
|
DeregisterTableRequest,
|
||||||
|
DeregisterTableResponse,
|
||||||
|
CreateTableRequest,
|
||||||
|
CreateTableResponse,
|
||||||
|
DropTableRequest,
|
||||||
|
DropTableResponse,
|
||||||
|
ListNamespacesRequest,
|
||||||
|
ListNamespacesResponse,
|
||||||
|
CreateNamespaceRequest,
|
||||||
|
CreateNamespaceResponse,
|
||||||
|
DropNamespaceRequest,
|
||||||
|
DropNamespaceResponse,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class TempNamespace(LanceNamespace):
|
||||||
|
"""A simple dictionary-backed namespace for testing."""
|
||||||
|
|
||||||
|
# Class-level storage to persist table registry across instances
|
||||||
|
_global_registry: Dict[str, Dict[str, str]] = {}
|
||||||
|
# Class-level storage for namespaces (supporting 1-level namespace)
|
||||||
|
_global_namespaces: Dict[str, set] = {}
|
||||||
|
|
||||||
|
def __init__(self, **properties):
|
||||||
|
"""Initialize the test namespace.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
root: The root directory for tables (optional)
|
||||||
|
**properties: Additional configuration properties
|
||||||
|
"""
|
||||||
|
self.config = TempNamespaceConfig(properties)
|
||||||
|
# Use the root as a key to maintain separate registries per root
|
||||||
|
root = self.config.root
|
||||||
|
if root not in self._global_registry:
|
||||||
|
self._global_registry[root] = {}
|
||||||
|
if root not in self._global_namespaces:
|
||||||
|
self._global_namespaces[root] = set()
|
||||||
|
self.tables = self._global_registry[root] # Reference to shared registry
|
||||||
|
self.namespaces = self._global_namespaces[
|
||||||
|
root
|
||||||
|
] # Reference to shared namespaces
|
||||||
|
|
||||||
|
def list_tables(self, request: ListTablesRequest) -> ListTablesResponse:
|
||||||
|
"""List all tables in the namespace."""
|
||||||
|
if not request.id:
|
||||||
|
# List all tables in root namespace
|
||||||
|
tables = [name for name in self.tables.keys() if "." not in name]
|
||||||
|
else:
|
||||||
|
# List tables in specific namespace (1-level only)
|
||||||
|
if len(request.id) == 1:
|
||||||
|
namespace_name = request.id[0]
|
||||||
|
prefix = f"{namespace_name}."
|
||||||
|
tables = [
|
||||||
|
name[len(prefix) :]
|
||||||
|
for name in self.tables.keys()
|
||||||
|
if name.startswith(prefix)
|
||||||
|
]
|
||||||
|
else:
|
||||||
|
# Multi-level namespaces not supported
|
||||||
|
raise ValueError("Only 1-level namespaces are supported")
|
||||||
|
return ListTablesResponse(tables=tables)
|
||||||
|
|
||||||
|
def describe_table(self, request: DescribeTableRequest) -> DescribeTableResponse:
|
||||||
|
"""Describe a table by returning its location."""
|
||||||
|
if not request.id:
|
||||||
|
raise ValueError("Invalid table ID")
|
||||||
|
|
||||||
|
if len(request.id) == 1:
|
||||||
|
# Root namespace table
|
||||||
|
table_name = request.id[0]
|
||||||
|
elif len(request.id) == 2:
|
||||||
|
# Namespaced table (1-level namespace)
|
||||||
|
namespace_name, table_name = request.id
|
||||||
|
table_name = f"{namespace_name}.{table_name}"
|
||||||
|
else:
|
||||||
|
raise ValueError("Only 1-level namespaces are supported")
|
||||||
|
|
||||||
|
if table_name not in self.tables:
|
||||||
|
raise RuntimeError(f"Table does not exist: {table_name}")
|
||||||
|
|
||||||
|
table_uri = self.tables[table_name]
|
||||||
|
return DescribeTableResponse(location=table_uri)
|
||||||
|
|
||||||
|
def create_table(
|
||||||
|
self, request: CreateTableRequest, request_data: bytes
|
||||||
|
) -> CreateTableResponse:
|
||||||
|
"""Create a table in the namespace."""
|
||||||
|
if not request.id:
|
||||||
|
raise ValueError("Invalid table ID")
|
||||||
|
|
||||||
|
if len(request.id) == 1:
|
||||||
|
# Root namespace table
|
||||||
|
table_name = request.id[0]
|
||||||
|
table_uri = f"{self.config.root}/{table_name}.lance"
|
||||||
|
elif len(request.id) == 2:
|
||||||
|
# Namespaced table (1-level namespace)
|
||||||
|
namespace_name, base_table_name = request.id
|
||||||
|
# Add namespace to our namespace set
|
||||||
|
self.namespaces.add(namespace_name)
|
||||||
|
table_name = f"{namespace_name}.{base_table_name}"
|
||||||
|
table_uri = f"{self.config.root}/{namespace_name}/{base_table_name}.lance"
|
||||||
|
else:
|
||||||
|
raise ValueError("Only 1-level namespaces are supported")
|
||||||
|
|
||||||
|
# Check if table already exists
|
||||||
|
if table_name in self.tables:
|
||||||
|
if request.mode == "overwrite":
|
||||||
|
# Drop existing table for overwrite mode
|
||||||
|
del self.tables[table_name]
|
||||||
|
else:
|
||||||
|
raise RuntimeError(f"Table already exists: {table_name}")
|
||||||
|
|
||||||
|
# Parse the Arrow IPC stream to get the schema and create the actual table
|
||||||
|
import pyarrow.ipc as ipc
|
||||||
|
import io
|
||||||
|
import lance
|
||||||
|
import os
|
||||||
|
|
||||||
|
# Create directory if needed for namespaced tables
|
||||||
|
os.makedirs(os.path.dirname(table_uri), exist_ok=True)
|
||||||
|
|
||||||
|
# Read the IPC stream
|
||||||
|
reader = ipc.open_stream(io.BytesIO(request_data))
|
||||||
|
table = reader.read_all()
|
||||||
|
|
||||||
|
# Create the actual Lance table
|
||||||
|
lance.write_dataset(table, table_uri)
|
||||||
|
|
||||||
|
# Store the table mapping
|
||||||
|
self.tables[table_name] = table_uri
|
||||||
|
|
||||||
|
return CreateTableResponse(location=table_uri)
|
||||||
|
|
||||||
|
def drop_table(self, request: DropTableRequest) -> DropTableResponse:
|
||||||
|
"""Drop a table from the namespace."""
|
||||||
|
if not request.id:
|
||||||
|
raise ValueError("Invalid table ID")
|
||||||
|
|
||||||
|
if len(request.id) == 1:
|
||||||
|
# Root namespace table
|
||||||
|
table_name = request.id[0]
|
||||||
|
elif len(request.id) == 2:
|
||||||
|
# Namespaced table (1-level namespace)
|
||||||
|
namespace_name, base_table_name = request.id
|
||||||
|
table_name = f"{namespace_name}.{base_table_name}"
|
||||||
|
else:
|
||||||
|
raise ValueError("Only 1-level namespaces are supported")
|
||||||
|
|
||||||
|
if table_name not in self.tables:
|
||||||
|
raise RuntimeError(f"Table does not exist: {table_name}")
|
||||||
|
|
||||||
|
# Get the table URI
|
||||||
|
table_uri = self.tables[table_name]
|
||||||
|
|
||||||
|
# Delete the actual table files
|
||||||
|
import shutil
|
||||||
|
import os
|
||||||
|
|
||||||
|
if os.path.exists(table_uri):
|
||||||
|
shutil.rmtree(table_uri, ignore_errors=True)
|
||||||
|
|
||||||
|
# Remove from registry
|
||||||
|
del self.tables[table_name]
|
||||||
|
|
||||||
|
return DropTableResponse()
|
||||||
|
|
||||||
|
def register_table(self, request: RegisterTableRequest) -> RegisterTableResponse:
|
||||||
|
"""Register a table with the namespace."""
|
||||||
|
if not request.id or len(request.id) != 1:
|
||||||
|
raise ValueError("Invalid table ID")
|
||||||
|
|
||||||
|
if not request.location:
|
||||||
|
raise ValueError("Table location is required")
|
||||||
|
|
||||||
|
table_name = request.id[0]
|
||||||
|
self.tables[table_name] = request.location
|
||||||
|
|
||||||
|
return RegisterTableResponse()
|
||||||
|
|
||||||
|
def deregister_table(
|
||||||
|
self, request: DeregisterTableRequest
|
||||||
|
) -> DeregisterTableResponse:
|
||||||
|
"""Deregister a table from the namespace."""
|
||||||
|
if not request.id or len(request.id) != 1:
|
||||||
|
raise ValueError("Invalid table ID")
|
||||||
|
|
||||||
|
table_name = request.id[0]
|
||||||
|
if table_name not in self.tables:
|
||||||
|
raise RuntimeError(f"Table does not exist: {table_name}")
|
||||||
|
|
||||||
|
del self.tables[table_name]
|
||||||
|
return DeregisterTableResponse()
|
||||||
|
|
||||||
|
def list_namespaces(self, request: ListNamespacesRequest) -> ListNamespacesResponse:
|
||||||
|
"""List child namespaces."""
|
||||||
|
if not request.id:
|
||||||
|
# List root-level namespaces
|
||||||
|
namespaces = list(self.namespaces)
|
||||||
|
elif len(request.id) == 1:
|
||||||
|
# For 1-level namespace, there are no child namespaces
|
||||||
|
namespaces = []
|
||||||
|
else:
|
||||||
|
raise ValueError("Only 1-level namespaces are supported")
|
||||||
|
|
||||||
|
return ListNamespacesResponse(namespaces=namespaces)
|
||||||
|
|
||||||
|
def create_namespace(
|
||||||
|
self, request: CreateNamespaceRequest
|
||||||
|
) -> CreateNamespaceResponse:
|
||||||
|
"""Create a namespace."""
|
||||||
|
if not request.id:
|
||||||
|
raise ValueError("Invalid namespace ID")
|
||||||
|
|
||||||
|
if len(request.id) == 1:
|
||||||
|
# Create 1-level namespace
|
||||||
|
namespace_name = request.id[0]
|
||||||
|
self.namespaces.add(namespace_name)
|
||||||
|
|
||||||
|
# Create directory for the namespace
|
||||||
|
import os
|
||||||
|
|
||||||
|
namespace_dir = f"{self.config.root}/{namespace_name}"
|
||||||
|
os.makedirs(namespace_dir, exist_ok=True)
|
||||||
|
else:
|
||||||
|
raise ValueError("Only 1-level namespaces are supported")
|
||||||
|
|
||||||
|
return CreateNamespaceResponse()
|
||||||
|
|
||||||
|
def drop_namespace(self, request: DropNamespaceRequest) -> DropNamespaceResponse:
|
||||||
|
"""Drop a namespace."""
|
||||||
|
if not request.id:
|
||||||
|
raise ValueError("Invalid namespace ID")
|
||||||
|
|
||||||
|
if len(request.id) == 1:
|
||||||
|
# Drop 1-level namespace
|
||||||
|
namespace_name = request.id[0]
|
||||||
|
|
||||||
|
if namespace_name not in self.namespaces:
|
||||||
|
raise RuntimeError(f"Namespace does not exist: {namespace_name}")
|
||||||
|
|
||||||
|
# Check if namespace has any tables
|
||||||
|
prefix = f"{namespace_name}."
|
||||||
|
tables_in_namespace = [
|
||||||
|
name for name in self.tables.keys() if name.startswith(prefix)
|
||||||
|
]
|
||||||
|
if tables_in_namespace:
|
||||||
|
raise RuntimeError(
|
||||||
|
f"Cannot drop namespace '{namespace_name}': contains tables"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Remove namespace
|
||||||
|
self.namespaces.remove(namespace_name)
|
||||||
|
|
||||||
|
# Remove directory
|
||||||
|
import shutil
|
||||||
|
import os
|
||||||
|
|
||||||
|
namespace_dir = f"{self.config.root}/{namespace_name}"
|
||||||
|
if os.path.exists(namespace_dir):
|
||||||
|
shutil.rmtree(namespace_dir, ignore_errors=True)
|
||||||
|
else:
|
||||||
|
raise ValueError("Only 1-level namespaces are supported")
|
||||||
|
|
||||||
|
return DropNamespaceResponse()
|
||||||
|
|
||||||
|
|
||||||
|
class TempNamespaceConfig:
|
||||||
|
"""Configuration for TestNamespace."""
|
||||||
|
|
||||||
|
ROOT = "root"
|
||||||
|
|
||||||
|
def __init__(self, properties: Optional[Dict[str, str]] = None):
|
||||||
|
"""Initialize configuration from properties.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
properties: Dictionary of configuration properties
|
||||||
|
"""
|
||||||
|
if properties is None:
|
||||||
|
properties = {}
|
||||||
|
|
||||||
|
self._root = properties.get(self.ROOT, "/tmp")
|
||||||
|
|
||||||
|
@property
|
||||||
|
def root(self) -> str:
|
||||||
|
"""Get the namespace root directory."""
|
||||||
|
return self._root
|
||||||
|
|
||||||
|
|
||||||
|
NATIVE_IMPLS["temp"] = f"{TempNamespace.__module__}.TempNamespace"
|
||||||
|
|
||||||
|
|
||||||
|
class TestNamespaceConnection:
|
||||||
|
"""Test namespace-based LanceDB connection."""
|
||||||
|
|
||||||
|
def setup_method(self):
|
||||||
|
"""Set up test fixtures."""
|
||||||
|
self.temp_dir = tempfile.mkdtemp()
|
||||||
|
# Clear the TestNamespace registry for this test
|
||||||
|
if self.temp_dir in TempNamespace._global_registry:
|
||||||
|
TempNamespace._global_registry[self.temp_dir].clear()
|
||||||
|
if self.temp_dir in TempNamespace._global_namespaces:
|
||||||
|
TempNamespace._global_namespaces[self.temp_dir].clear()
|
||||||
|
|
||||||
|
def teardown_method(self):
|
||||||
|
"""Clean up test fixtures."""
|
||||||
|
# Clear the TestNamespace registry
|
||||||
|
if self.temp_dir in TempNamespace._global_registry:
|
||||||
|
del TempNamespace._global_registry[self.temp_dir]
|
||||||
|
if self.temp_dir in TempNamespace._global_namespaces:
|
||||||
|
del TempNamespace._global_namespaces[self.temp_dir]
|
||||||
|
shutil.rmtree(self.temp_dir, ignore_errors=True)
|
||||||
|
|
||||||
|
def test_connect_namespace_test(self):
|
||||||
|
"""Test connecting to LanceDB through TestNamespace."""
|
||||||
|
# Connect using TestNamespace
|
||||||
|
db = lancedb.connect_namespace("temp", {"root": self.temp_dir})
|
||||||
|
|
||||||
|
# Should be a LanceNamespaceDBConnection
|
||||||
|
assert isinstance(db, lancedb.LanceNamespaceDBConnection)
|
||||||
|
|
||||||
|
# Initially no tables
|
||||||
|
assert len(list(db.table_names())) == 0
|
||||||
|
|
||||||
|
def test_create_table_through_namespace(self):
|
||||||
|
"""Test creating a table through namespace."""
|
||||||
|
db = lancedb.connect_namespace("temp", {"root": self.temp_dir})
|
||||||
|
|
||||||
|
# Define schema for empty table
|
||||||
|
schema = pa.schema(
|
||||||
|
[
|
||||||
|
pa.field("id", pa.int64()),
|
||||||
|
pa.field("vector", pa.list_(pa.float32(), 2)),
|
||||||
|
pa.field("text", pa.string()),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create empty table
|
||||||
|
table = db.create_table("test_table", schema=schema)
|
||||||
|
assert table is not None
|
||||||
|
assert table.name == "test_table"
|
||||||
|
|
||||||
|
# Table should appear in namespace
|
||||||
|
table_names = list(db.table_names())
|
||||||
|
assert "test_table" in table_names
|
||||||
|
assert len(table_names) == 1
|
||||||
|
|
||||||
|
# Verify empty table
|
||||||
|
result = table.to_pandas()
|
||||||
|
assert len(result) == 0
|
||||||
|
assert list(result.columns) == ["id", "vector", "text"]
|
||||||
|
|
||||||
|
def test_open_table_through_namespace(self):
|
||||||
|
"""Test opening an existing table through namespace."""
|
||||||
|
db = lancedb.connect_namespace("temp", {"root": self.temp_dir})
|
||||||
|
|
||||||
|
# Create a table with schema
|
||||||
|
schema = pa.schema(
|
||||||
|
[
|
||||||
|
pa.field("id", pa.int64()),
|
||||||
|
pa.field("vector", pa.list_(pa.float32(), 2)),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
db.create_table("test_table", schema=schema)
|
||||||
|
|
||||||
|
# Open the table
|
||||||
|
table = db.open_table("test_table")
|
||||||
|
assert table is not None
|
||||||
|
assert table.name == "test_table"
|
||||||
|
|
||||||
|
# Verify empty table with correct schema
|
||||||
|
result = table.to_pandas()
|
||||||
|
assert len(result) == 0
|
||||||
|
assert list(result.columns) == ["id", "vector"]
|
||||||
|
|
||||||
|
def test_drop_table_through_namespace(self):
|
||||||
|
"""Test dropping a table through namespace."""
|
||||||
|
db = lancedb.connect_namespace("temp", {"root": self.temp_dir})
|
||||||
|
|
||||||
|
# Create tables
|
||||||
|
schema = pa.schema(
|
||||||
|
[
|
||||||
|
pa.field("id", pa.int64()),
|
||||||
|
pa.field("vector", pa.list_(pa.float32(), 2)),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
db.create_table("table1", schema=schema)
|
||||||
|
db.create_table("table2", schema=schema)
|
||||||
|
|
||||||
|
# Verify both tables exist
|
||||||
|
table_names = list(db.table_names())
|
||||||
|
assert "table1" in table_names
|
||||||
|
assert "table2" in table_names
|
||||||
|
assert len(table_names) == 2
|
||||||
|
|
||||||
|
# Drop one table
|
||||||
|
db.drop_table("table1")
|
||||||
|
|
||||||
|
# Verify only table2 remains
|
||||||
|
table_names = list(db.table_names())
|
||||||
|
assert "table1" not in table_names
|
||||||
|
assert "table2" in table_names
|
||||||
|
assert len(table_names) == 1
|
||||||
|
|
||||||
|
# Test that drop_table works without explicit namespace parameter
|
||||||
|
db.drop_table("table2")
|
||||||
|
assert len(list(db.table_names())) == 0
|
||||||
|
|
||||||
|
# Should not be able to open dropped table
|
||||||
|
with pytest.raises(RuntimeError):
|
||||||
|
db.open_table("table1")
|
||||||
|
|
||||||
|
def test_create_table_with_schema(self):
|
||||||
|
"""Test creating a table with explicit schema through namespace."""
|
||||||
|
db = lancedb.connect_namespace("temp", {"root": self.temp_dir})
|
||||||
|
|
||||||
|
# Define schema
|
||||||
|
schema = pa.schema(
|
||||||
|
[
|
||||||
|
pa.field("id", pa.int64()),
|
||||||
|
pa.field("vector", pa.list_(pa.float32(), 3)),
|
||||||
|
pa.field("text", pa.string()),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create table with schema
|
||||||
|
table = db.create_table("test_table", schema=schema)
|
||||||
|
assert table is not None
|
||||||
|
|
||||||
|
# Verify schema
|
||||||
|
table_schema = table.schema
|
||||||
|
assert len(table_schema) == 3
|
||||||
|
assert table_schema.field("id").type == pa.int64()
|
||||||
|
assert table_schema.field("text").type == pa.string()
|
||||||
|
|
||||||
|
def test_rename_table_not_supported(self):
|
||||||
|
"""Test that rename_table raises NotImplementedError."""
|
||||||
|
db = lancedb.connect_namespace("temp", {"root": self.temp_dir})
|
||||||
|
|
||||||
|
# Create a table
|
||||||
|
schema = pa.schema(
|
||||||
|
[
|
||||||
|
pa.field("id", pa.int64()),
|
||||||
|
pa.field("vector", pa.list_(pa.float32(), 2)),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
db.create_table("old_name", schema=schema)
|
||||||
|
|
||||||
|
# Rename should raise NotImplementedError
|
||||||
|
with pytest.raises(NotImplementedError, match="rename_table is not supported"):
|
||||||
|
db.rename_table("old_name", "new_name")
|
||||||
|
|
||||||
|
def test_drop_all_tables(self):
|
||||||
|
"""Test dropping all tables through namespace."""
|
||||||
|
db = lancedb.connect_namespace("temp", {"root": self.temp_dir})
|
||||||
|
|
||||||
|
# Create multiple tables
|
||||||
|
schema = pa.schema(
|
||||||
|
[
|
||||||
|
pa.field("id", pa.int64()),
|
||||||
|
pa.field("vector", pa.list_(pa.float32(), 2)),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
for i in range(3):
|
||||||
|
db.create_table(f"table{i}", schema=schema)
|
||||||
|
|
||||||
|
# Verify tables exist
|
||||||
|
assert len(list(db.table_names())) == 3
|
||||||
|
|
||||||
|
# Drop all tables
|
||||||
|
db.drop_all_tables()
|
||||||
|
|
||||||
|
# Verify all tables are gone
|
||||||
|
assert len(list(db.table_names())) == 0
|
||||||
|
|
||||||
|
# Test that table_names works with keyword-only namespace parameter
|
||||||
|
db.create_table("test_table", schema=schema)
|
||||||
|
result = list(db.table_names(namespace=[]))
|
||||||
|
assert "test_table" in result
|
||||||
|
|
||||||
|
def test_table_operations(self):
|
||||||
|
"""Test various table operations through namespace."""
|
||||||
|
db = lancedb.connect_namespace("temp", {"root": self.temp_dir})
|
||||||
|
|
||||||
|
# Create a table with schema
|
||||||
|
schema = pa.schema(
|
||||||
|
[
|
||||||
|
pa.field("id", pa.int64()),
|
||||||
|
pa.field("vector", pa.list_(pa.float32(), 2)),
|
||||||
|
pa.field("text", pa.string()),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
table = db.create_table("test_table", schema=schema)
|
||||||
|
|
||||||
|
# Verify empty table was created
|
||||||
|
result = table.to_pandas()
|
||||||
|
assert len(result) == 0
|
||||||
|
assert list(result.columns) == ["id", "vector", "text"]
|
||||||
|
|
||||||
|
# Test add data to the table
|
||||||
|
new_data = [
|
||||||
|
{"id": 1, "vector": [1.0, 2.0], "text": "item_1"},
|
||||||
|
{"id": 2, "vector": [2.0, 3.0], "text": "item_2"},
|
||||||
|
]
|
||||||
|
table.add(new_data)
|
||||||
|
result = table.to_pandas()
|
||||||
|
assert len(result) == 2
|
||||||
|
|
||||||
|
# Test delete
|
||||||
|
table.delete("id = 1")
|
||||||
|
result = table.to_pandas()
|
||||||
|
assert len(result) == 1
|
||||||
|
assert result["id"].values[0] == 2
|
||||||
|
|
||||||
|
# Test update
|
||||||
|
table.update(where="id = 2", values={"text": "updated"})
|
||||||
|
result = table.to_pandas()
|
||||||
|
assert result["text"].values[0] == "updated"
|
||||||
|
|
||||||
|
def test_storage_options(self):
|
||||||
|
"""Test passing storage options through namespace connection."""
|
||||||
|
# Connect with storage options
|
||||||
|
storage_opts = {"test_option": "test_value"}
|
||||||
|
db = lancedb.connect_namespace(
|
||||||
|
"temp", {"root": self.temp_dir}, storage_options=storage_opts
|
||||||
|
)
|
||||||
|
|
||||||
|
# Storage options should be preserved
|
||||||
|
assert db.storage_options == storage_opts
|
||||||
|
|
||||||
|
# Create table with additional storage options
|
||||||
|
table_opts = {"table_option": "table_value"}
|
||||||
|
schema = pa.schema(
|
||||||
|
[
|
||||||
|
pa.field("id", pa.int64()),
|
||||||
|
pa.field("vector", pa.list_(pa.float32(), 2)),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
db.create_table("test_table", schema=schema, storage_options=table_opts)
|
||||||
|
|
||||||
|
def test_namespace_operations(self):
|
||||||
|
"""Test namespace management operations."""
|
||||||
|
db = lancedb.connect_namespace("temp", {"root": self.temp_dir})
|
||||||
|
|
||||||
|
# Initially no namespaces
|
||||||
|
assert len(list(db.list_namespaces())) == 0
|
||||||
|
|
||||||
|
# Create a namespace
|
||||||
|
db.create_namespace(["test_namespace"])
|
||||||
|
|
||||||
|
# Verify namespace exists
|
||||||
|
namespaces = list(db.list_namespaces())
|
||||||
|
assert "test_namespace" in namespaces
|
||||||
|
assert len(namespaces) == 1
|
||||||
|
|
||||||
|
# Create table in namespace
|
||||||
|
schema = pa.schema(
|
||||||
|
[
|
||||||
|
pa.field("id", pa.int64()),
|
||||||
|
pa.field("vector", pa.list_(pa.float32(), 2)),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
table = db.create_table(
|
||||||
|
"test_table", schema=schema, namespace=["test_namespace"]
|
||||||
|
)
|
||||||
|
assert table is not None
|
||||||
|
|
||||||
|
# Verify table exists in namespace
|
||||||
|
tables_in_namespace = list(db.table_names(namespace=["test_namespace"]))
|
||||||
|
assert "test_table" in tables_in_namespace
|
||||||
|
assert len(tables_in_namespace) == 1
|
||||||
|
|
||||||
|
# Open table from namespace
|
||||||
|
table = db.open_table("test_table", namespace=["test_namespace"])
|
||||||
|
assert table is not None
|
||||||
|
assert table.name == "test_table"
|
||||||
|
|
||||||
|
# Drop table from namespace
|
||||||
|
db.drop_table("test_table", namespace=["test_namespace"])
|
||||||
|
|
||||||
|
# Verify table no longer exists in namespace
|
||||||
|
tables_in_namespace = list(db.table_names(namespace=["test_namespace"]))
|
||||||
|
assert len(tables_in_namespace) == 0
|
||||||
|
|
||||||
|
# Drop namespace
|
||||||
|
db.drop_namespace(["test_namespace"])
|
||||||
|
|
||||||
|
# Verify namespace no longer exists
|
||||||
|
namespaces = list(db.list_namespaces())
|
||||||
|
assert len(namespaces) == 0
|
||||||
|
|
||||||
|
def test_namespace_with_tables_cannot_be_dropped(self):
|
||||||
|
"""Test that namespaces containing tables cannot be dropped."""
|
||||||
|
db = lancedb.connect_namespace("temp", {"root": self.temp_dir})
|
||||||
|
|
||||||
|
# Create namespace and table
|
||||||
|
db.create_namespace(["test_namespace"])
|
||||||
|
schema = pa.schema(
|
||||||
|
[
|
||||||
|
pa.field("id", pa.int64()),
|
||||||
|
pa.field("vector", pa.list_(pa.float32(), 2)),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
db.create_table("test_table", schema=schema, namespace=["test_namespace"])
|
||||||
|
|
||||||
|
# Try to drop namespace with tables - should fail
|
||||||
|
with pytest.raises(RuntimeError, match="contains tables"):
|
||||||
|
db.drop_namespace(["test_namespace"])
|
||||||
|
|
||||||
|
# Drop table first
|
||||||
|
db.drop_table("test_table", namespace=["test_namespace"])
|
||||||
|
|
||||||
|
# Now dropping namespace should work
|
||||||
|
db.drop_namespace(["test_namespace"])
|
||||||
|
|
||||||
|
def test_same_table_name_different_namespaces(self):
|
||||||
|
db = lancedb.connect_namespace("temp", {"root": self.temp_dir})
|
||||||
|
|
||||||
|
# Create two namespaces
|
||||||
|
db.create_namespace(["namespace_a"])
|
||||||
|
db.create_namespace(["namespace_b"])
|
||||||
|
|
||||||
|
# Define schema
|
||||||
|
schema = pa.schema(
|
||||||
|
[
|
||||||
|
pa.field("id", pa.int64()),
|
||||||
|
pa.field("vector", pa.list_(pa.float32(), 2)),
|
||||||
|
pa.field("text", pa.string()),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create table with same name in both namespaces
|
||||||
|
table_a = db.create_table(
|
||||||
|
"same_name_table", schema=schema, namespace=["namespace_a"]
|
||||||
|
)
|
||||||
|
table_b = db.create_table(
|
||||||
|
"same_name_table", schema=schema, namespace=["namespace_b"]
|
||||||
|
)
|
||||||
|
|
||||||
|
# Add different data to each table
|
||||||
|
data_a = [
|
||||||
|
{"id": 1, "vector": [1.0, 2.0], "text": "data_from_namespace_a"},
|
||||||
|
{"id": 2, "vector": [3.0, 4.0], "text": "also_from_namespace_a"},
|
||||||
|
]
|
||||||
|
table_a.add(data_a)
|
||||||
|
|
||||||
|
data_b = [
|
||||||
|
{"id": 10, "vector": [10.0, 20.0], "text": "data_from_namespace_b"},
|
||||||
|
{"id": 20, "vector": [30.0, 40.0], "text": "also_from_namespace_b"},
|
||||||
|
{"id": 30, "vector": [50.0, 60.0], "text": "more_from_namespace_b"},
|
||||||
|
]
|
||||||
|
table_b.add(data_b)
|
||||||
|
|
||||||
|
# Verify data in namespace_a table
|
||||||
|
opened_table_a = db.open_table("same_name_table", namespace=["namespace_a"])
|
||||||
|
result_a = opened_table_a.to_pandas().sort_values("id").reset_index(drop=True)
|
||||||
|
assert len(result_a) == 2
|
||||||
|
assert result_a["id"].tolist() == [1, 2]
|
||||||
|
assert result_a["text"].tolist() == [
|
||||||
|
"data_from_namespace_a",
|
||||||
|
"also_from_namespace_a",
|
||||||
|
]
|
||||||
|
assert [v.tolist() for v in result_a["vector"]] == [[1.0, 2.0], [3.0, 4.0]]
|
||||||
|
|
||||||
|
# Verify data in namespace_b table
|
||||||
|
opened_table_b = db.open_table("same_name_table", namespace=["namespace_b"])
|
||||||
|
result_b = opened_table_b.to_pandas().sort_values("id").reset_index(drop=True)
|
||||||
|
assert len(result_b) == 3
|
||||||
|
assert result_b["id"].tolist() == [10, 20, 30]
|
||||||
|
assert result_b["text"].tolist() == [
|
||||||
|
"data_from_namespace_b",
|
||||||
|
"also_from_namespace_b",
|
||||||
|
"more_from_namespace_b",
|
||||||
|
]
|
||||||
|
assert [v.tolist() for v in result_b["vector"]] == [
|
||||||
|
[10.0, 20.0],
|
||||||
|
[30.0, 40.0],
|
||||||
|
[50.0, 60.0],
|
||||||
|
]
|
||||||
|
|
||||||
|
# Verify root namespace doesn't have this table
|
||||||
|
root_tables = list(db.table_names())
|
||||||
|
assert "same_name_table" not in root_tables
|
||||||
|
|
||||||
|
# Clean up
|
||||||
|
db.drop_table("same_name_table", namespace=["namespace_a"])
|
||||||
|
db.drop_table("same_name_table", namespace=["namespace_b"])
|
||||||
|
db.drop_namespace(["namespace_a"])
|
||||||
|
db.drop_namespace(["namespace_b"])
|
||||||
@@ -5,6 +5,7 @@ from typing import List, Union
|
|||||||
import unittest.mock as mock
|
import unittest.mock as mock
|
||||||
from datetime import timedelta
|
from datetime import timedelta
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
import random
|
||||||
|
|
||||||
import lancedb
|
import lancedb
|
||||||
from lancedb.db import AsyncConnection
|
from lancedb.db import AsyncConnection
|
||||||
@@ -1355,6 +1356,27 @@ def test_take_queries(tmp_path):
|
|||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def test_getitems(tmp_path):
|
||||||
|
db = lancedb.connect(tmp_path)
|
||||||
|
data = pa.table(
|
||||||
|
{
|
||||||
|
"idx": range(100),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
# Make two fragments
|
||||||
|
table = db.create_table("test", data)
|
||||||
|
table.add(pa.table({"idx": range(100, 200)}))
|
||||||
|
|
||||||
|
assert table.__getitems__([5, 2, 117]) == pa.table(
|
||||||
|
{
|
||||||
|
"idx": [5, 2, 117],
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
offsets = random.sample(range(200), 10)
|
||||||
|
assert table.__getitems__(offsets) == pa.table({"idx": offsets})
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_query_timeout_async(tmp_path):
|
async def test_query_timeout_async(tmp_path):
|
||||||
db = await lancedb.connect_async(tmp_path)
|
db = await lancedb.connect_async(tmp_path)
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user