mirror of
https://github.com/lancedb/lancedb.git
synced 2026-03-28 11:30:39 +00:00
Compare commits
43 Commits
python-v0.
...
jack/clipp
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f94349ef59 | ||
|
|
c9c08ac8b9 | ||
|
|
e253f5d9b6 | ||
|
|
05b4fb0990 | ||
|
|
613b9c1099 | ||
|
|
d5948576b9 | ||
|
|
0d3fc7860a | ||
|
|
531cec075c | ||
|
|
0e486511fa | ||
|
|
367262662d | ||
|
|
11efaf46ae | ||
|
|
1ea22ee5ef | ||
|
|
8cef8806e9 | ||
|
|
a3cd7fce69 | ||
|
|
48ddc833dd | ||
|
|
2802764092 | ||
|
|
37bbb0dba1 | ||
|
|
155ec16161 | ||
|
|
636b8b5bbd | ||
|
|
715b81c86b | ||
|
|
7e1616376e | ||
|
|
d5ac5b949a | ||
|
|
7be6f45e0b | ||
|
|
d9e2d51f51 | ||
|
|
e081708cce | ||
|
|
2d60ea6938 | ||
|
|
dcb1443143 | ||
|
|
c0230f91d2 | ||
|
|
5d629c9ecb | ||
|
|
14973ac9d1 | ||
|
|
70cbee6293 | ||
|
|
02783bf440 | ||
|
|
4323ca0147 | ||
|
|
bd3dd6a8e5 | ||
|
|
3c1162612e | ||
|
|
53c7c560c9 | ||
|
|
de4f77800d | ||
|
|
b6ab721cf7 | ||
|
|
027d53500b | ||
|
|
9098f47e73 | ||
|
|
826a3e5ee9 | ||
|
|
9fac56252e | ||
|
|
c55ca20c1b |
@@ -1,5 +1,5 @@
|
|||||||
[tool.bumpversion]
|
[tool.bumpversion]
|
||||||
current_version = "0.26.0"
|
current_version = "0.27.0-beta.2"
|
||||||
parse = """(?x)
|
parse = """(?x)
|
||||||
(?P<major>0|[1-9]\\d*)\\.
|
(?P<major>0|[1-9]\\d*)\\.
|
||||||
(?P<minor>0|[1-9]\\d*)\\.
|
(?P<minor>0|[1-9]\\d*)\\.
|
||||||
|
|||||||
173
.github/workflows/codex-fix-ci.yml
vendored
Normal file
173
.github/workflows/codex-fix-ci.yml
vendored
Normal file
@@ -0,0 +1,173 @@
|
|||||||
|
name: Codex Fix CI
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
workflow_run_url:
|
||||||
|
description: "Failing CI workflow run URL (e.g., https://github.com/lancedb/lancedb/actions/runs/12345678)"
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
branch:
|
||||||
|
description: "Branch to fix (e.g., main, release/v2.0, or feature-branch)"
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
guidelines:
|
||||||
|
description: "Additional guidelines for the fix (optional)"
|
||||||
|
required: false
|
||||||
|
type: string
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
pull-requests: write
|
||||||
|
actions: read
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
fix-ci:
|
||||||
|
runs-on: warp-ubuntu-latest-x64-4x
|
||||||
|
timeout-minutes: 60
|
||||||
|
env:
|
||||||
|
CC: clang
|
||||||
|
CXX: clang++
|
||||||
|
steps:
|
||||||
|
- name: Show inputs
|
||||||
|
run: |
|
||||||
|
echo "workflow_run_url = ${{ inputs.workflow_run_url }}"
|
||||||
|
echo "branch = ${{ inputs.branch }}"
|
||||||
|
echo "guidelines = ${{ inputs.guidelines }}"
|
||||||
|
|
||||||
|
- name: Checkout Repo
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
ref: ${{ inputs.branch }}
|
||||||
|
fetch-depth: 0
|
||||||
|
persist-credentials: true
|
||||||
|
|
||||||
|
- name: Set up Node.js
|
||||||
|
uses: actions/setup-node@v4
|
||||||
|
with:
|
||||||
|
node-version: 20
|
||||||
|
|
||||||
|
- name: Install Codex CLI
|
||||||
|
run: npm install -g @openai/codex
|
||||||
|
|
||||||
|
- name: Install Rust toolchain
|
||||||
|
uses: dtolnay/rust-toolchain@stable
|
||||||
|
with:
|
||||||
|
toolchain: stable
|
||||||
|
components: clippy, rustfmt
|
||||||
|
|
||||||
|
- uses: Swatinem/rust-cache@v2
|
||||||
|
|
||||||
|
- name: Install system dependencies
|
||||||
|
run: |
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install -y protobuf-compiler libssl-dev
|
||||||
|
|
||||||
|
- name: Set up Python
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: '3.11'
|
||||||
|
|
||||||
|
- name: Install Python dependencies
|
||||||
|
run: |
|
||||||
|
pip install maturin ruff pytest pyarrow pandas polars
|
||||||
|
|
||||||
|
- name: Set up Java
|
||||||
|
uses: actions/setup-java@v4
|
||||||
|
with:
|
||||||
|
distribution: temurin
|
||||||
|
java-version: '11'
|
||||||
|
cache: maven
|
||||||
|
|
||||||
|
- name: Install Node.js dependencies for TypeScript bindings
|
||||||
|
run: |
|
||||||
|
cd nodejs
|
||||||
|
npm ci
|
||||||
|
|
||||||
|
- name: Configure git user
|
||||||
|
run: |
|
||||||
|
git config user.name "lancedb automation"
|
||||||
|
git config user.email "robot@lancedb.com"
|
||||||
|
|
||||||
|
- name: Run Codex to fix CI failure
|
||||||
|
env:
|
||||||
|
WORKFLOW_RUN_URL: ${{ inputs.workflow_run_url }}
|
||||||
|
BRANCH: ${{ inputs.branch }}
|
||||||
|
GUIDELINES: ${{ inputs.guidelines }}
|
||||||
|
GITHUB_TOKEN: ${{ secrets.ROBOT_TOKEN }}
|
||||||
|
GH_TOKEN: ${{ secrets.ROBOT_TOKEN }}
|
||||||
|
OPENAI_API_KEY: ${{ secrets.CODEX_TOKEN }}
|
||||||
|
run: |
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
cat <<EOF >/tmp/codex-prompt.txt
|
||||||
|
You are running inside the lancedb repository on a GitHub Actions runner. Your task is to fix a CI failure.
|
||||||
|
|
||||||
|
Input parameters:
|
||||||
|
- Failing workflow run URL: ${WORKFLOW_RUN_URL}
|
||||||
|
- Branch to fix: ${BRANCH}
|
||||||
|
- Additional guidelines: ${GUIDELINES:-"None provided"}
|
||||||
|
|
||||||
|
Follow these steps exactly:
|
||||||
|
|
||||||
|
1. Extract the run ID from the workflow URL. The URL format is https://github.com/lancedb/lancedb/actions/runs/<run_id>.
|
||||||
|
|
||||||
|
2. Use "gh run view <run_id> --json jobs,conclusion,name" to get information about the failed run.
|
||||||
|
|
||||||
|
3. Identify which jobs failed. For each failed job, use "gh run view <run_id> --job <job_id> --log-failed" to get the failure logs.
|
||||||
|
|
||||||
|
4. Analyze the failure logs to understand what went wrong. Common failures include:
|
||||||
|
- Compilation errors
|
||||||
|
- Test failures
|
||||||
|
- Clippy warnings treated as errors
|
||||||
|
- Formatting issues
|
||||||
|
- Dependency issues
|
||||||
|
|
||||||
|
5. Based on the analysis, fix the issues in the codebase:
|
||||||
|
- For compilation errors: Fix the code that doesn't compile
|
||||||
|
- For test failures: Fix the failing tests or the code they test
|
||||||
|
- For clippy warnings: Apply the suggested fixes
|
||||||
|
- For formatting issues: Run "cargo fmt --all"
|
||||||
|
- For other issues: Apply appropriate fixes
|
||||||
|
|
||||||
|
6. After making fixes, verify them locally:
|
||||||
|
- Run "cargo fmt --all" to ensure formatting is correct
|
||||||
|
- Run "cargo clippy --workspace --tests --all-features -- -D warnings" to check for issues
|
||||||
|
- Run ONLY the specific failing tests to confirm they pass now:
|
||||||
|
- For Rust test failures: Run the specific test with "cargo test -p <crate> <test_name>"
|
||||||
|
- For Python test failures: Build with "cd python && maturin develop" then run "pytest <specific_test_file>::<test_name>"
|
||||||
|
- For Java test failures: Run "cd java && mvn test -Dtest=<TestClass>#<testMethod>"
|
||||||
|
- For TypeScript test failures: Run "cd nodejs && npm run build && npm test -- --testNamePattern='<test_name>'"
|
||||||
|
- Do NOT run the full test suite - only run the tests that were failing
|
||||||
|
|
||||||
|
7. If the additional guidelines are provided, follow them as well.
|
||||||
|
|
||||||
|
8. Inspect "git status --short" and "git diff" to review your changes.
|
||||||
|
|
||||||
|
9. Create a fix branch: "git checkout -b codex/fix-ci-<run_id>".
|
||||||
|
|
||||||
|
10. Stage all changes with "git add -A" and commit with message "fix: resolve CI failures from run <run_id>".
|
||||||
|
|
||||||
|
11. Push the branch: "git push origin codex/fix-ci-<run_id>". If the remote branch exists, delete it first with "gh api -X DELETE repos/lancedb/lancedb/git/refs/heads/codex/fix-ci-<run_id>" then push. Do NOT use "git push --force" or "git push -f".
|
||||||
|
|
||||||
|
12. Create a pull request targeting "${BRANCH}":
|
||||||
|
- Title: "ci: <short summary describing the fix>" (e.g., "ci: fix clippy warnings in lancedb" or "ci: resolve test flakiness in vector search")
|
||||||
|
- First, write the PR body to /tmp/pr-body.md using a heredoc (cat <<'PREOF' > /tmp/pr-body.md). The body should include:
|
||||||
|
- Link to the failing workflow run
|
||||||
|
- Summary of what failed
|
||||||
|
- Description of the fixes applied
|
||||||
|
- Then run "gh pr create --base ${BRANCH} --body-file /tmp/pr-body.md".
|
||||||
|
|
||||||
|
13. Display the new PR URL, "git status --short", and a summary of what was fixed.
|
||||||
|
|
||||||
|
Constraints:
|
||||||
|
- Use bash commands for all operations.
|
||||||
|
- Do not merge the PR.
|
||||||
|
- Do not modify GitHub workflow files unless they are the cause of the failure.
|
||||||
|
- If any command fails, diagnose and attempt to fix the issue instead of aborting immediately.
|
||||||
|
- If you cannot fix the issue automatically, create the PR anyway with a clear explanation of what you tried and what remains to be fixed.
|
||||||
|
- env "GH_TOKEN" is available, use "gh" tools for GitHub-related operations.
|
||||||
|
EOF
|
||||||
|
|
||||||
|
printenv OPENAI_API_KEY | codex login --with-api-key
|
||||||
|
codex --config shell_environment_policy.ignore_default_excludes=true exec --dangerously-bypass-approvals-and-sandbox "$(cat /tmp/codex-prompt.txt)"
|
||||||
15
.github/workflows/nodejs.yml
vendored
15
.github/workflows/nodejs.yml
vendored
@@ -8,6 +8,7 @@ on:
|
|||||||
paths:
|
paths:
|
||||||
- Cargo.toml
|
- Cargo.toml
|
||||||
- nodejs/**
|
- nodejs/**
|
||||||
|
- rust/**
|
||||||
- docs/src/js/**
|
- docs/src/js/**
|
||||||
- .github/workflows/nodejs.yml
|
- .github/workflows/nodejs.yml
|
||||||
- docker-compose.yml
|
- docker-compose.yml
|
||||||
@@ -77,8 +78,11 @@ jobs:
|
|||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
lfs: true
|
lfs: true
|
||||||
- uses: actions/setup-node@v3
|
- uses: actions/setup-node@v3
|
||||||
|
name: Setup Node.js 20 for build
|
||||||
with:
|
with:
|
||||||
node-version: ${{ matrix.node-version }}
|
# @napi-rs/cli v3 requires Node >= 20.12 (via @inquirer/prompts@8).
|
||||||
|
# Build always on Node 20; tests run on the matrix version below.
|
||||||
|
node-version: 20
|
||||||
cache: 'npm'
|
cache: 'npm'
|
||||||
cache-dependency-path: nodejs/package-lock.json
|
cache-dependency-path: nodejs/package-lock.json
|
||||||
- uses: Swatinem/rust-cache@v2
|
- uses: Swatinem/rust-cache@v2
|
||||||
@@ -86,12 +90,16 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
sudo apt update
|
sudo apt update
|
||||||
sudo apt install -y protobuf-compiler libssl-dev
|
sudo apt install -y protobuf-compiler libssl-dev
|
||||||
npm install -g @napi-rs/cli
|
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
npm ci --include=optional
|
npm ci --include=optional
|
||||||
npm run build:debug -- --profile ci
|
npm run build:debug -- --profile ci
|
||||||
npm run tsc
|
- uses: actions/setup-node@v3
|
||||||
|
name: Setup Node.js ${{ matrix.node-version }} for test
|
||||||
|
with:
|
||||||
|
node-version: ${{ matrix.node-version }}
|
||||||
|
- name: Compile TypeScript
|
||||||
|
run: npm run tsc
|
||||||
- name: Setup localstack
|
- name: Setup localstack
|
||||||
working-directory: .
|
working-directory: .
|
||||||
run: docker compose up --detach --wait
|
run: docker compose up --detach --wait
|
||||||
@@ -144,7 +152,6 @@ jobs:
|
|||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
brew install protobuf
|
brew install protobuf
|
||||||
npm install -g @napi-rs/cli
|
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
npm ci --include=optional
|
npm ci --include=optional
|
||||||
|
|||||||
41
.github/workflows/npm-publish.yml
vendored
41
.github/workflows/npm-publish.yml
vendored
@@ -128,16 +128,13 @@ jobs:
|
|||||||
- target: x86_64-unknown-linux-musl
|
- target: x86_64-unknown-linux-musl
|
||||||
# This one seems to need some extra memory
|
# This one seems to need some extra memory
|
||||||
host: ubuntu-2404-8x-x64
|
host: ubuntu-2404-8x-x64
|
||||||
# https://github.com/napi-rs/napi-rs/blob/main/alpine.Dockerfile
|
|
||||||
docker: ghcr.io/napi-rs/napi-rs/nodejs-rust:lts-alpine
|
|
||||||
features: fp16kernels
|
features: fp16kernels
|
||||||
pre_build: |-
|
pre_build: |-
|
||||||
set -e &&
|
set -e &&
|
||||||
apk add protobuf-dev curl &&
|
sudo apt-get update &&
|
||||||
ln -s /usr/lib/gcc/x86_64-alpine-linux-musl/14.2.0/crtbeginS.o /usr/lib/crtbeginS.o &&
|
sudo apt-get install -y protobuf-compiler pkg-config &&
|
||||||
ln -s /usr/lib/libgcc_s.so /usr/lib/libgcc.so &&
|
rustup target add x86_64-unknown-linux-musl &&
|
||||||
CC=gcc &&
|
export EXTRA_ARGS="-x"
|
||||||
CXX=g++
|
|
||||||
- target: aarch64-unknown-linux-gnu
|
- target: aarch64-unknown-linux-gnu
|
||||||
host: ubuntu-2404-8x-x64
|
host: ubuntu-2404-8x-x64
|
||||||
# https://github.com/napi-rs/napi-rs/blob/main/debian-aarch64.Dockerfile
|
# https://github.com/napi-rs/napi-rs/blob/main/debian-aarch64.Dockerfile
|
||||||
@@ -153,15 +150,13 @@ jobs:
|
|||||||
rustup target add aarch64-unknown-linux-gnu
|
rustup target add aarch64-unknown-linux-gnu
|
||||||
- target: aarch64-unknown-linux-musl
|
- target: aarch64-unknown-linux-musl
|
||||||
host: ubuntu-2404-8x-x64
|
host: ubuntu-2404-8x-x64
|
||||||
# https://github.com/napi-rs/napi-rs/blob/main/alpine.Dockerfile
|
|
||||||
docker: ghcr.io/napi-rs/napi-rs/nodejs-rust:lts-alpine
|
|
||||||
features: ","
|
features: ","
|
||||||
pre_build: |-
|
pre_build: |-
|
||||||
set -e &&
|
set -e &&
|
||||||
apk add protobuf-dev &&
|
sudo apt-get update &&
|
||||||
|
sudo apt-get install -y protobuf-compiler &&
|
||||||
rustup target add aarch64-unknown-linux-musl &&
|
rustup target add aarch64-unknown-linux-musl &&
|
||||||
export CC_aarch64_unknown_linux_musl=aarch64-linux-musl-gcc &&
|
export EXTRA_ARGS="-x"
|
||||||
export CXX_aarch64_unknown_linux_musl=aarch64-linux-musl-g++
|
|
||||||
name: build - ${{ matrix.settings.target }}
|
name: build - ${{ matrix.settings.target }}
|
||||||
runs-on: ${{ matrix.settings.host }}
|
runs-on: ${{ matrix.settings.host }}
|
||||||
defaults:
|
defaults:
|
||||||
@@ -192,12 +187,18 @@ jobs:
|
|||||||
.cargo-cache
|
.cargo-cache
|
||||||
target/
|
target/
|
||||||
key: nodejs-${{ matrix.settings.target }}-cargo-${{ matrix.settings.host }}
|
key: nodejs-${{ matrix.settings.target }}-cargo-${{ matrix.settings.host }}
|
||||||
- name: Setup toolchain
|
|
||||||
run: ${{ matrix.settings.setup }}
|
|
||||||
if: ${{ matrix.settings.setup }}
|
|
||||||
shell: bash
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: npm ci
|
run: npm ci
|
||||||
|
- name: Install Zig
|
||||||
|
uses: mlugg/setup-zig@v2
|
||||||
|
if: ${{ contains(matrix.settings.target, 'musl') }}
|
||||||
|
with:
|
||||||
|
version: 0.14.1
|
||||||
|
- name: Install cargo-zigbuild
|
||||||
|
uses: taiki-e/install-action@v2
|
||||||
|
if: ${{ contains(matrix.settings.target, 'musl') }}
|
||||||
|
with:
|
||||||
|
tool: cargo-zigbuild
|
||||||
- name: Build in docker
|
- name: Build in docker
|
||||||
uses: addnab/docker-run-action@v3
|
uses: addnab/docker-run-action@v3
|
||||||
if: ${{ matrix.settings.docker }}
|
if: ${{ matrix.settings.docker }}
|
||||||
@@ -210,24 +211,24 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
set -e
|
set -e
|
||||||
${{ matrix.settings.pre_build }}
|
${{ matrix.settings.pre_build }}
|
||||||
npx napi build --platform --release --no-const-enum \
|
npx napi build --platform --release \
|
||||||
--features ${{ matrix.settings.features }} \
|
--features ${{ matrix.settings.features }} \
|
||||||
--target ${{ matrix.settings.target }} \
|
--target ${{ matrix.settings.target }} \
|
||||||
--dts ../lancedb/native.d.ts \
|
--dts ../lancedb/native.d.ts \
|
||||||
--js ../lancedb/native.js \
|
--js ../lancedb/native.js \
|
||||||
--strip \
|
--strip \
|
||||||
dist/
|
--output-dir dist/
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
${{ matrix.settings.pre_build }}
|
${{ matrix.settings.pre_build }}
|
||||||
npx napi build --platform --release --no-const-enum \
|
npx napi build --platform --release \
|
||||||
--features ${{ matrix.settings.features }} \
|
--features ${{ matrix.settings.features }} \
|
||||||
--target ${{ matrix.settings.target }} \
|
--target ${{ matrix.settings.target }} \
|
||||||
--dts ../lancedb/native.d.ts \
|
--dts ../lancedb/native.d.ts \
|
||||||
--js ../lancedb/native.js \
|
--js ../lancedb/native.js \
|
||||||
--strip \
|
--strip \
|
||||||
$EXTRA_ARGS \
|
$EXTRA_ARGS \
|
||||||
dist/
|
--output-dir dist/
|
||||||
if: ${{ !matrix.settings.docker }}
|
if: ${{ !matrix.settings.docker }}
|
||||||
shell: bash
|
shell: bash
|
||||||
- name: Upload artifact
|
- name: Upload artifact
|
||||||
|
|||||||
1
.github/workflows/python.yml
vendored
1
.github/workflows/python.yml
vendored
@@ -8,6 +8,7 @@ on:
|
|||||||
paths:
|
paths:
|
||||||
- Cargo.toml
|
- Cargo.toml
|
||||||
- python/**
|
- python/**
|
||||||
|
- rust/**
|
||||||
- .github/workflows/python.yml
|
- .github/workflows/python.yml
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
|
|||||||
2
.github/workflows/rust.yml
vendored
2
.github/workflows/rust.yml
vendored
@@ -183,7 +183,7 @@ jobs:
|
|||||||
runs-on: ubuntu-24.04
|
runs-on: ubuntu-24.04
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
msrv: ["1.88.0"] # This should match up with rust-version in Cargo.toml
|
msrv: ["1.91.0"] # This should match up with rust-version in Cargo.toml
|
||||||
env:
|
env:
|
||||||
# Need up-to-date compilers for kernels
|
# Need up-to-date compilers for kernels
|
||||||
CC: clang-18
|
CC: clang-18
|
||||||
|
|||||||
500
Cargo.lock
generated
500
Cargo.lock
generated
@@ -128,15 +128,6 @@ version = "1.0.100"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61"
|
checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61"
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "approx"
|
|
||||||
version = "0.5.1"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "cab112f0a86d568ea0e627cc1d6be74a1e9cd55214684db5561995f6dad897c6"
|
|
||||||
dependencies = [
|
|
||||||
"num-traits",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "arbitrary"
|
name = "arbitrary"
|
||||||
version = "1.4.2"
|
version = "1.4.2"
|
||||||
@@ -1389,9 +1380,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "bytes"
|
name = "bytes"
|
||||||
version = "1.10.1"
|
version = "1.11.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a"
|
checksum = "1e748733b7cbc798e1434b6ac524f0c1ff2ab456fe201501e6497c8417a4fc33"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "bytes-utils"
|
name = "bytes-utils"
|
||||||
@@ -1571,7 +1562,7 @@ checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"glob",
|
"glob",
|
||||||
"libc",
|
"libc",
|
||||||
"libloading",
|
"libloading 0.8.9",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -1658,9 +1649,9 @@ checksum = "7c74b8349d32d297c9134b8c88677813a227df8f779daa29bfc29c183fe3dca6"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "convert_case"
|
name = "convert_case"
|
||||||
version = "0.6.0"
|
version = "0.11.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "ec182b0ca2f35d8fc196cf3404988fd8b8c739a4d270ff118a398feb0cbec1ca"
|
checksum = "affbf0190ed2caf063e3def54ff444b449371d55c58e513a95ab98eca50adb49"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"unicode-segmentation",
|
"unicode-segmentation",
|
||||||
]
|
]
|
||||||
@@ -1783,6 +1774,16 @@ dependencies = [
|
|||||||
"crossbeam-utils",
|
"crossbeam-utils",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "crossbeam-skiplist"
|
||||||
|
version = "0.1.3"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "df29de440c58ca2cc6e587ec3d22347551a32435fbde9d2bff64e78a9ffa151b"
|
||||||
|
dependencies = [
|
||||||
|
"crossbeam-epoch",
|
||||||
|
"crossbeam-utils",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "crossbeam-utils"
|
name = "crossbeam-utils"
|
||||||
version = "0.8.21"
|
version = "0.8.21"
|
||||||
@@ -1883,14 +1884,20 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ctor"
|
name = "ctor"
|
||||||
version = "0.2.9"
|
version = "0.6.3"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "32a2785755761f3ddc1492979ce1e48d2c00d09311c39e4466429188f3dd6501"
|
checksum = "424e0138278faeb2b401f174ad17e715c829512d74f3d1e81eb43365c2e0590e"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"quote",
|
"ctor-proc-macro",
|
||||||
"syn 2.0.114",
|
"dtor",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "ctor-proc-macro"
|
||||||
|
version = "0.0.7"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "52560adf09603e58c9a7ee1fe1dcb95a16927b17c127f0ac02d6e768a0e25bc1"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "darling"
|
name = "darling"
|
||||||
version = "0.20.11"
|
version = "0.20.11"
|
||||||
@@ -2727,6 +2734,21 @@ version = "2.0.2"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "117240f60069e65410b3ae1bb213295bd828f707b5bec6596a1afc8793ce0cbc"
|
checksum = "117240f60069e65410b3ae1bb213295bd828f707b5bec6596a1afc8793ce0cbc"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "dtor"
|
||||||
|
version = "0.1.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "404d02eeb088a82cfd873006cb713fe411306c7d182c344905e101fb1167d301"
|
||||||
|
dependencies = [
|
||||||
|
"dtor-proc-macro",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "dtor-proc-macro"
|
||||||
|
version = "0.0.6"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "f678cf4a922c215c63e0de95eb1ff08a958a81d47e485cf9da1e27bf6305cfa5"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "dunce"
|
name = "dunce"
|
||||||
version = "1.0.5"
|
version = "1.0.5"
|
||||||
@@ -2765,16 +2787,6 @@ version = "0.1.0"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "05dbec7076f432bb132db738df90d87a4f5789e99f59e7b1219a6b8ef61eaa68"
|
checksum = "05dbec7076f432bb132db738df90d87a4f5789e99f59e7b1219a6b8ef61eaa68"
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "earcutr"
|
|
||||||
version = "0.4.3"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "79127ed59a85d7687c409e9978547cffb7dc79675355ed22da6b66fd5f6ead01"
|
|
||||||
dependencies = [
|
|
||||||
"itertools 0.11.0",
|
|
||||||
"num-traits",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ecdsa"
|
name = "ecdsa"
|
||||||
version = "0.14.8"
|
version = "0.14.8"
|
||||||
@@ -3021,12 +3033,6 @@ dependencies = [
|
|||||||
"miniz_oxide",
|
"miniz_oxide",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "float_next_after"
|
|
||||||
version = "1.0.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "8bf7cc16383c4b8d58b9905a8509f02926ce3058053c056376248d958c9df1e8"
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "fnv"
|
name = "fnv"
|
||||||
version = "1.0.7"
|
version = "1.0.7"
|
||||||
@@ -3072,9 +3078,8 @@ checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "fsst"
|
name = "fsst"
|
||||||
version = "2.0.0"
|
version = "3.1.0-beta.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "git+https://github.com/lance-format/lance.git?tag=v3.1.0-beta.2#ae3b1f413cc49d783f51abe62c8261c106c9b6cd"
|
||||||
checksum = "0f03a771ab914e207dd26bd2f12666839555ec8ecc7e1770e1ed6f9900d899a4"
|
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arrow-array",
|
"arrow-array",
|
||||||
"rand 0.9.2",
|
"rand 0.9.2",
|
||||||
@@ -3451,128 +3456,6 @@ dependencies = [
|
|||||||
"version_check",
|
"version_check",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "geo"
|
|
||||||
version = "0.31.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "2fc1a1678e54befc9b4bcab6cd43b8e7f834ae8ea121118b0fd8c42747675b4a"
|
|
||||||
dependencies = [
|
|
||||||
"earcutr",
|
|
||||||
"float_next_after",
|
|
||||||
"geo-types",
|
|
||||||
"geographiclib-rs",
|
|
||||||
"i_overlay",
|
|
||||||
"log",
|
|
||||||
"num-traits",
|
|
||||||
"robust",
|
|
||||||
"rstar",
|
|
||||||
"spade",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "geo-traits"
|
|
||||||
version = "0.3.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "2e7c353d12a704ccfab1ba8bfb1a7fe6cb18b665bf89d37f4f7890edcd260206"
|
|
||||||
dependencies = [
|
|
||||||
"geo-types",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "geo-types"
|
|
||||||
version = "0.7.17"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "75a4dcd69d35b2c87a7c83bce9af69fd65c9d68d3833a0ded568983928f3fc99"
|
|
||||||
dependencies = [
|
|
||||||
"approx",
|
|
||||||
"num-traits",
|
|
||||||
"rayon",
|
|
||||||
"rstar",
|
|
||||||
"serde",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "geoarrow-array"
|
|
||||||
version = "0.7.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "dc1cc4106ac0a0a512c398961ce95d8150475c84a84e17c4511c3643fa120a17"
|
|
||||||
dependencies = [
|
|
||||||
"arrow-array",
|
|
||||||
"arrow-buffer",
|
|
||||||
"arrow-schema",
|
|
||||||
"geo-traits",
|
|
||||||
"geoarrow-schema",
|
|
||||||
"num-traits",
|
|
||||||
"wkb",
|
|
||||||
"wkt",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "geoarrow-expr-geo"
|
|
||||||
version = "0.7.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "fa84300361ce57fb875bcaa6e32b95b0aff5c6b1af692b936bdd58ff343f4394"
|
|
||||||
dependencies = [
|
|
||||||
"arrow-array",
|
|
||||||
"arrow-buffer",
|
|
||||||
"geo",
|
|
||||||
"geo-traits",
|
|
||||||
"geoarrow-array",
|
|
||||||
"geoarrow-schema",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "geoarrow-schema"
|
|
||||||
version = "0.7.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "e97be4e9f523f92bd6a0e0458323f4b783d073d011664decd8dbf05651704f34"
|
|
||||||
dependencies = [
|
|
||||||
"arrow-schema",
|
|
||||||
"geo-traits",
|
|
||||||
"serde",
|
|
||||||
"serde_json",
|
|
||||||
"thiserror 1.0.69",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "geodatafusion"
|
|
||||||
version = "0.2.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "773cfa1fb0d7f7661b76b3fde00f3ffd8e0ff7b3635096f0ff6294fe5ca62a2b"
|
|
||||||
dependencies = [
|
|
||||||
"arrow-arith",
|
|
||||||
"arrow-array",
|
|
||||||
"arrow-schema",
|
|
||||||
"datafusion",
|
|
||||||
"geo",
|
|
||||||
"geo-traits",
|
|
||||||
"geoarrow-array",
|
|
||||||
"geoarrow-expr-geo",
|
|
||||||
"geoarrow-schema",
|
|
||||||
"geohash",
|
|
||||||
"thiserror 1.0.69",
|
|
||||||
"wkt",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "geographiclib-rs"
|
|
||||||
version = "0.2.5"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "f611040a2bb37eaa29a78a128d1e92a378a03e0b6e66ae27398d42b1ba9a7841"
|
|
||||||
dependencies = [
|
|
||||||
"libm",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "geohash"
|
|
||||||
version = "0.13.1"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "0fb94b1a65401d6cbf22958a9040aa364812c26674f841bee538b12c135db1e6"
|
|
||||||
dependencies = [
|
|
||||||
"geo-types",
|
|
||||||
"libm",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "getrandom"
|
name = "getrandom"
|
||||||
version = "0.2.16"
|
version = "0.2.16"
|
||||||
@@ -3682,15 +3565,6 @@ dependencies = [
|
|||||||
"zerocopy",
|
"zerocopy",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "hash32"
|
|
||||||
version = "0.3.1"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "47d60b12902ba28e2730cd37e95b8c9223af2808df9e902d4df49588d1470606"
|
|
||||||
dependencies = [
|
|
||||||
"byteorder",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "hashbrown"
|
name = "hashbrown"
|
||||||
version = "0.12.3"
|
version = "0.12.3"
|
||||||
@@ -3725,16 +3599,6 @@ version = "0.16.0"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d"
|
checksum = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d"
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "heapless"
|
|
||||||
version = "0.8.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "0bfb9eb618601c89945a70e254898da93b13be0388091d42117462b265bb3fad"
|
|
||||||
dependencies = [
|
|
||||||
"hash32",
|
|
||||||
"stable_deref_trait",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "heck"
|
name = "heck"
|
||||||
version = "0.4.1"
|
version = "0.4.1"
|
||||||
@@ -3996,49 +3860,6 @@ dependencies = [
|
|||||||
"serde",
|
"serde",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "i_float"
|
|
||||||
version = "1.15.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "010025c2c532c8d82e42d0b8bb5184afa449fa6f06c709ea9adcb16c49ae405b"
|
|
||||||
dependencies = [
|
|
||||||
"libm",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "i_key_sort"
|
|
||||||
version = "0.6.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "9190f86706ca38ac8add223b2aed8b1330002b5cdbbce28fb58b10914d38fc27"
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "i_overlay"
|
|
||||||
version = "4.0.6"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "0fcccbd4e4274e0f80697f5fbc6540fdac533cce02f2081b328e68629cce24f9"
|
|
||||||
dependencies = [
|
|
||||||
"i_float",
|
|
||||||
"i_key_sort",
|
|
||||||
"i_shape",
|
|
||||||
"i_tree",
|
|
||||||
"rayon",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "i_shape"
|
|
||||||
version = "1.14.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "1ea154b742f7d43dae2897fcd5ead86bc7b5eefcedd305a7ebf9f69d44d61082"
|
|
||||||
dependencies = [
|
|
||||||
"i_float",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "i_tree"
|
|
||||||
version = "0.16.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "35e6d558e6d4c7b82bc51d9c771e7a927862a161a7d87bf2b0541450e0e20915"
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "iana-time-zone"
|
name = "iana-time-zone"
|
||||||
version = "0.1.64"
|
version = "0.1.64"
|
||||||
@@ -4405,9 +4226,8 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "lance"
|
name = "lance"
|
||||||
version = "2.0.0"
|
version = "3.1.0-beta.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "git+https://github.com/lance-format/lance.git?tag=v3.1.0-beta.2#ae3b1f413cc49d783f51abe62c8261c106c9b6cd"
|
||||||
checksum = "47b685aca3f97ee02997c83ded16f59c747ccb69e74c8abbbae4aa3d22cf1301"
|
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arrow",
|
"arrow",
|
||||||
"arrow-arith",
|
"arrow-arith",
|
||||||
@@ -4426,6 +4246,7 @@ dependencies = [
|
|||||||
"byteorder",
|
"byteorder",
|
||||||
"bytes",
|
"bytes",
|
||||||
"chrono",
|
"chrono",
|
||||||
|
"crossbeam-skiplist",
|
||||||
"dashmap",
|
"dashmap",
|
||||||
"datafusion",
|
"datafusion",
|
||||||
"datafusion-expr",
|
"datafusion-expr",
|
||||||
@@ -4443,7 +4264,6 @@ dependencies = [
|
|||||||
"lance-datafusion",
|
"lance-datafusion",
|
||||||
"lance-encoding",
|
"lance-encoding",
|
||||||
"lance-file",
|
"lance-file",
|
||||||
"lance-geo",
|
|
||||||
"lance-index",
|
"lance-index",
|
||||||
"lance-io",
|
"lance-io",
|
||||||
"lance-linalg",
|
"lance-linalg",
|
||||||
@@ -4465,6 +4285,7 @@ dependencies = [
|
|||||||
"tantivy",
|
"tantivy",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tokio-stream",
|
"tokio-stream",
|
||||||
|
"tokio-util",
|
||||||
"tracing",
|
"tracing",
|
||||||
"url",
|
"url",
|
||||||
"uuid",
|
"uuid",
|
||||||
@@ -4472,9 +4293,8 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "lance-arrow"
|
name = "lance-arrow"
|
||||||
version = "2.0.0"
|
version = "3.1.0-beta.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "git+https://github.com/lance-format/lance.git?tag=v3.1.0-beta.2#ae3b1f413cc49d783f51abe62c8261c106c9b6cd"
|
||||||
checksum = "daf00c7537df524cc518a089f0d156a036d95ca3f5bc2bc1f0a9f9293e9b62ef"
|
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arrow-array",
|
"arrow-array",
|
||||||
"arrow-buffer",
|
"arrow-buffer",
|
||||||
@@ -4493,9 +4313,8 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "lance-bitpacking"
|
name = "lance-bitpacking"
|
||||||
version = "2.0.0"
|
version = "3.1.0-beta.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "git+https://github.com/lance-format/lance.git?tag=v3.1.0-beta.2#ae3b1f413cc49d783f51abe62c8261c106c9b6cd"
|
||||||
checksum = "46752e4ac8fc5590a445e780b63a8800adc7a770bd74770a8dc66963778e4e77"
|
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arrayref",
|
"arrayref",
|
||||||
"paste",
|
"paste",
|
||||||
@@ -4504,9 +4323,8 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "lance-core"
|
name = "lance-core"
|
||||||
version = "2.0.0"
|
version = "3.1.0-beta.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "git+https://github.com/lance-format/lance.git?tag=v3.1.0-beta.2#ae3b1f413cc49d783f51abe62c8261c106c9b6cd"
|
||||||
checksum = "3d13d87d07305c6d4b4dc7780fb1107babf782a0e5b1dc7872e17ae1f8fd11ca"
|
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arrow-array",
|
"arrow-array",
|
||||||
"arrow-buffer",
|
"arrow-buffer",
|
||||||
@@ -4543,9 +4361,8 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "lance-datafusion"
|
name = "lance-datafusion"
|
||||||
version = "2.0.0"
|
version = "3.1.0-beta.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "git+https://github.com/lance-format/lance.git?tag=v3.1.0-beta.2#ae3b1f413cc49d783f51abe62c8261c106c9b6cd"
|
||||||
checksum = "6451b5af876eaef8bec4b38a39dadac9d44621e1ecf85d0cdf6097a5d0aa8721"
|
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arrow",
|
"arrow",
|
||||||
"arrow-array",
|
"arrow-array",
|
||||||
@@ -4564,10 +4381,10 @@ dependencies = [
|
|||||||
"lance-arrow",
|
"lance-arrow",
|
||||||
"lance-core",
|
"lance-core",
|
||||||
"lance-datagen",
|
"lance-datagen",
|
||||||
"lance-geo",
|
|
||||||
"log",
|
"log",
|
||||||
"pin-project",
|
"pin-project",
|
||||||
"prost",
|
"prost",
|
||||||
|
"prost-build",
|
||||||
"snafu",
|
"snafu",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tracing",
|
"tracing",
|
||||||
@@ -4575,9 +4392,8 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "lance-datagen"
|
name = "lance-datagen"
|
||||||
version = "2.0.0"
|
version = "3.1.0-beta.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "git+https://github.com/lance-format/lance.git?tag=v3.1.0-beta.2#ae3b1f413cc49d783f51abe62c8261c106c9b6cd"
|
||||||
checksum = "e1736708dd7867dfbab8fcc930b21c96717c6c00be73b7d9a240336a4ed80375"
|
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arrow",
|
"arrow",
|
||||||
"arrow-array",
|
"arrow-array",
|
||||||
@@ -4595,9 +4411,8 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "lance-encoding"
|
name = "lance-encoding"
|
||||||
version = "2.0.0"
|
version = "3.1.0-beta.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "git+https://github.com/lance-format/lance.git?tag=v3.1.0-beta.2#ae3b1f413cc49d783f51abe62c8261c106c9b6cd"
|
||||||
checksum = "d6b6ca4ff94833240d5ba4a94a742cba786d1949b3c3fa7e11d6f0050443432a"
|
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arrow-arith",
|
"arrow-arith",
|
||||||
"arrow-array",
|
"arrow-array",
|
||||||
@@ -4634,9 +4449,8 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "lance-file"
|
name = "lance-file"
|
||||||
version = "2.0.0"
|
version = "3.1.0-beta.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "git+https://github.com/lance-format/lance.git?tag=v3.1.0-beta.2#ae3b1f413cc49d783f51abe62c8261c106c9b6cd"
|
||||||
checksum = "55fbe959bffe185543aed3cbeb14484f1aa2e55886034fdb1ea3d8cc9b70aad8"
|
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arrow-arith",
|
"arrow-arith",
|
||||||
"arrow-array",
|
"arrow-array",
|
||||||
@@ -4666,27 +4480,10 @@ dependencies = [
|
|||||||
"tracing",
|
"tracing",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "lance-geo"
|
|
||||||
version = "2.0.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "a52b0adabc953d457f336a784a3b37353a180e6a79905f544949746e0d4c6483"
|
|
||||||
dependencies = [
|
|
||||||
"datafusion",
|
|
||||||
"geo-traits",
|
|
||||||
"geo-types",
|
|
||||||
"geoarrow-array",
|
|
||||||
"geoarrow-schema",
|
|
||||||
"geodatafusion",
|
|
||||||
"lance-core",
|
|
||||||
"serde",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "lance-index"
|
name = "lance-index"
|
||||||
version = "2.0.0"
|
version = "3.1.0-beta.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "git+https://github.com/lance-format/lance.git?tag=v3.1.0-beta.2#ae3b1f413cc49d783f51abe62c8261c106c9b6cd"
|
||||||
checksum = "6b67654bf86fd942dd2cf08294ee7e91053427cd148225f49c9ff398ff9a40fd"
|
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arrow",
|
"arrow",
|
||||||
"arrow-arith",
|
"arrow-arith",
|
||||||
@@ -4710,9 +4507,6 @@ dependencies = [
|
|||||||
"dirs",
|
"dirs",
|
||||||
"fst",
|
"fst",
|
||||||
"futures",
|
"futures",
|
||||||
"geo-types",
|
|
||||||
"geoarrow-array",
|
|
||||||
"geoarrow-schema",
|
|
||||||
"half",
|
"half",
|
||||||
"itertools 0.13.0",
|
"itertools 0.13.0",
|
||||||
"jsonb",
|
"jsonb",
|
||||||
@@ -4722,7 +4516,6 @@ dependencies = [
|
|||||||
"lance-datagen",
|
"lance-datagen",
|
||||||
"lance-encoding",
|
"lance-encoding",
|
||||||
"lance-file",
|
"lance-file",
|
||||||
"lance-geo",
|
|
||||||
"lance-io",
|
"lance-io",
|
||||||
"lance-linalg",
|
"lance-linalg",
|
||||||
"lance-table",
|
"lance-table",
|
||||||
@@ -4753,9 +4546,8 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "lance-io"
|
name = "lance-io"
|
||||||
version = "2.0.0"
|
version = "3.1.0-beta.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "git+https://github.com/lance-format/lance.git?tag=v3.1.0-beta.2#ae3b1f413cc49d783f51abe62c8261c106c9b6cd"
|
||||||
checksum = "8eb0ccc1c414e31687d83992d546af0a0237c8d2f4bf2ae3d347d539fd0fc141"
|
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arrow",
|
"arrow",
|
||||||
"arrow-arith",
|
"arrow-arith",
|
||||||
@@ -4786,8 +4578,8 @@ dependencies = [
|
|||||||
"prost",
|
"prost",
|
||||||
"rand 0.9.2",
|
"rand 0.9.2",
|
||||||
"serde",
|
"serde",
|
||||||
"shellexpand",
|
|
||||||
"snafu",
|
"snafu",
|
||||||
|
"tempfile",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tracing",
|
"tracing",
|
||||||
"url",
|
"url",
|
||||||
@@ -4795,9 +4587,8 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "lance-linalg"
|
name = "lance-linalg"
|
||||||
version = "2.0.0"
|
version = "3.1.0-beta.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "git+https://github.com/lance-format/lance.git?tag=v3.1.0-beta.2#ae3b1f413cc49d783f51abe62c8261c106c9b6cd"
|
||||||
checksum = "083404cf12dcdb1a7df98fb58f9daf626b6e43a2f794b37b6b89b4012a0e1f78"
|
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arrow-array",
|
"arrow-array",
|
||||||
"arrow-buffer",
|
"arrow-buffer",
|
||||||
@@ -4813,9 +4604,8 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "lance-namespace"
|
name = "lance-namespace"
|
||||||
version = "2.0.0"
|
version = "3.1.0-beta.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "git+https://github.com/lance-format/lance.git?tag=v3.1.0-beta.2#ae3b1f413cc49d783f51abe62c8261c106c9b6cd"
|
||||||
checksum = "c12778d2aabf9c2bfd16e2509ebe120e562a288d8ae630ec6b6b204868df41b2"
|
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arrow",
|
"arrow",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -4827,9 +4617,8 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "lance-namespace-impls"
|
name = "lance-namespace-impls"
|
||||||
version = "2.0.0"
|
version = "3.1.0-beta.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "git+https://github.com/lance-format/lance.git?tag=v3.1.0-beta.2#ae3b1f413cc49d783f51abe62c8261c106c9b6cd"
|
||||||
checksum = "8863aababdd13a6d2c8d6179dc6981f4f8f49d8b66a00c5dd75115aec4cadc99"
|
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arrow",
|
"arrow",
|
||||||
"arrow-ipc",
|
"arrow-ipc",
|
||||||
@@ -4844,6 +4633,7 @@ dependencies = [
|
|||||||
"lance-index",
|
"lance-index",
|
||||||
"lance-io",
|
"lance-io",
|
||||||
"lance-namespace",
|
"lance-namespace",
|
||||||
|
"lance-table",
|
||||||
"log",
|
"log",
|
||||||
"object_store",
|
"object_store",
|
||||||
"rand 0.9.2",
|
"rand 0.9.2",
|
||||||
@@ -4859,9 +4649,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "lance-namespace-reqwest-client"
|
name = "lance-namespace-reqwest-client"
|
||||||
version = "0.4.5"
|
version = "0.5.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "a2acdba67f84190067532fce07b51a435dd390d7cdc1129a05003e5cb3274cf0"
|
checksum = "3ad4c947349acd6e37e984eba0254588bd894e6128434338b9e6904e56fb4633"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"reqwest",
|
"reqwest",
|
||||||
"serde",
|
"serde",
|
||||||
@@ -4872,9 +4662,8 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "lance-table"
|
name = "lance-table"
|
||||||
version = "2.0.0"
|
version = "3.1.0-beta.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "git+https://github.com/lance-format/lance.git?tag=v3.1.0-beta.2#ae3b1f413cc49d783f51abe62c8261c106c9b6cd"
|
||||||
checksum = "f0fcc83f197ce2000c4abe4f5e0873490ab1f41788fa76571c4209b87d4daf50"
|
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arrow",
|
"arrow",
|
||||||
"arrow-array",
|
"arrow-array",
|
||||||
@@ -4913,9 +4702,8 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "lance-testing"
|
name = "lance-testing"
|
||||||
version = "2.0.0"
|
version = "3.1.0-beta.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "git+https://github.com/lance-format/lance.git?tag=v3.1.0-beta.2#ae3b1f413cc49d783f51abe62c8261c106c9b6cd"
|
||||||
checksum = "7fb1f7c7e06f91360e141ecee1cf2110f858c231705f69f2cd2fda9e30c1e9f4"
|
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arrow-array",
|
"arrow-array",
|
||||||
"arrow-schema",
|
"arrow-schema",
|
||||||
@@ -4926,7 +4714,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "lancedb"
|
name = "lancedb"
|
||||||
version = "0.26.0"
|
version = "0.27.0-beta.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ahash",
|
"ahash",
|
||||||
"anyhow",
|
"anyhow",
|
||||||
@@ -4956,8 +4744,10 @@ dependencies = [
|
|||||||
"datafusion-common",
|
"datafusion-common",
|
||||||
"datafusion-execution",
|
"datafusion-execution",
|
||||||
"datafusion-expr",
|
"datafusion-expr",
|
||||||
|
"datafusion-functions",
|
||||||
"datafusion-physical-expr",
|
"datafusion-physical-expr",
|
||||||
"datafusion-physical-plan",
|
"datafusion-physical-plan",
|
||||||
|
"datafusion-sql",
|
||||||
"futures",
|
"futures",
|
||||||
"half",
|
"half",
|
||||||
"hf-hub",
|
"hf-hub",
|
||||||
@@ -5006,7 +4796,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "lancedb-nodejs"
|
name = "lancedb-nodejs"
|
||||||
version = "0.26.0"
|
version = "0.27.0-beta.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arrow-array",
|
"arrow-array",
|
||||||
"arrow-ipc",
|
"arrow-ipc",
|
||||||
@@ -5026,7 +4816,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "lancedb-python"
|
name = "lancedb-python"
|
||||||
version = "0.29.0"
|
version = "0.30.0-beta.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arrow",
|
"arrow",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -5138,6 +4928,16 @@ dependencies = [
|
|||||||
"windows-link 0.2.1",
|
"windows-link 0.2.1",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "libloading"
|
||||||
|
version = "0.9.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "754ca22de805bb5744484a5b151a9e1a8e837d5dc232c2d7d8c2e3492edc8b60"
|
||||||
|
dependencies = [
|
||||||
|
"cfg-if",
|
||||||
|
"windows-link 0.2.1",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "libm"
|
name = "libm"
|
||||||
version = "0.2.15"
|
version = "0.2.15"
|
||||||
@@ -5485,32 +5285,34 @@ checksum = "2195bf6aa996a481483b29d62a7663eed3fe39600c460e323f8ff41e90bdd89b"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "napi"
|
name = "napi"
|
||||||
version = "2.16.17"
|
version = "3.8.3"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "55740c4ae1d8696773c78fdafd5d0e5fe9bc9f1b071c7ba493ba5c413a9184f3"
|
checksum = "e6944d0bf100571cd6e1a98a316cdca262deb6fccf8d93f5ae1502ca3fc88bd3"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bitflags 2.9.4",
|
"bitflags 2.9.4",
|
||||||
"ctor",
|
"ctor",
|
||||||
"napi-derive",
|
"futures",
|
||||||
|
"napi-build",
|
||||||
"napi-sys",
|
"napi-sys",
|
||||||
"once_cell",
|
"nohash-hasher",
|
||||||
|
"rustc-hash 2.1.1",
|
||||||
"tokio",
|
"tokio",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "napi-build"
|
name = "napi-build"
|
||||||
version = "2.2.3"
|
version = "2.3.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "dcae8ad5609d14afb3a3b91dee88c757016261b151e9dcecabf1b2a31a6cab14"
|
checksum = "d376940fd5b723c6893cd1ee3f33abbfd86acb1cd1ec079f3ab04a2a3bc4d3b1"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "napi-derive"
|
name = "napi-derive"
|
||||||
version = "2.16.13"
|
version = "3.5.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "7cbe2585d8ac223f7d34f13701434b9d5f4eb9c332cccce8dee57ea18ab8ab0c"
|
checksum = "2c914b5e420182bfb73504e0607592cdb8e2e21437d450883077669fb72a114d"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"cfg-if",
|
|
||||||
"convert_case",
|
"convert_case",
|
||||||
|
"ctor",
|
||||||
"napi-derive-backend",
|
"napi-derive-backend",
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
@@ -5519,26 +5321,24 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "napi-derive-backend"
|
name = "napi-derive-backend"
|
||||||
version = "1.0.75"
|
version = "5.0.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "1639aaa9eeb76e91c6ae66da8ce3e89e921cd3885e99ec85f4abacae72fc91bf"
|
checksum = "f0864cf6a82e2cfb69067374b64c9253d7e910e5b34db833ed7495dda56ccb18"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"convert_case",
|
"convert_case",
|
||||||
"once_cell",
|
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"regex",
|
|
||||||
"semver",
|
"semver",
|
||||||
"syn 2.0.114",
|
"syn 2.0.114",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "napi-sys"
|
name = "napi-sys"
|
||||||
version = "2.4.0"
|
version = "3.2.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "427802e8ec3a734331fec1035594a210ce1ff4dc5bc1950530920ab717964ea3"
|
checksum = "8eb602b84d7c1edae45e50bbf1374696548f36ae179dfa667f577e384bb90c2b"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"libloading",
|
"libloading 0.9.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -5556,6 +5356,12 @@ dependencies = [
|
|||||||
"rawpointer",
|
"rawpointer",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "nohash-hasher"
|
||||||
|
version = "0.2.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "2bf50223579dc7cdcfb3bfcacf7069ff68243f8c363f62ffa99cf000a6b9c451"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "nom"
|
name = "nom"
|
||||||
version = "7.1.3"
|
version = "7.1.3"
|
||||||
@@ -5628,11 +5434,10 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "num-bigint-dig"
|
name = "num-bigint-dig"
|
||||||
version = "0.8.4"
|
version = "0.8.6"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "dc84195820f291c7697304f3cbdadd1cb7199c0efc917ff5eafd71225c136151"
|
checksum = "e661dda6640fad38e827a6d4a310ff4763082116fe217f279885c97f511bb0b7"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"byteorder",
|
|
||||||
"lazy_static",
|
"lazy_static",
|
||||||
"libm",
|
"libm",
|
||||||
"num-integer",
|
"num-integer",
|
||||||
@@ -7274,20 +7079,14 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "roaring"
|
name = "roaring"
|
||||||
version = "0.10.12"
|
version = "0.11.3"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "19e8d2cfa184d94d0726d650a9f4a1be7f9b76ac9fdb954219878dc00c1c1e7b"
|
checksum = "8ba9ce64a8f45d7fc86358410bb1a82e8c987504c0d4900e9141d69a9f26c885"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bytemuck",
|
"bytemuck",
|
||||||
"byteorder",
|
"byteorder",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "robust"
|
|
||||||
version = "1.2.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "4e27ee8bb91ca0adcf0ecb116293afa12d393f9c2b9b9cd54d33e8078fe19839"
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "rsa"
|
name = "rsa"
|
||||||
version = "0.9.8"
|
version = "0.9.8"
|
||||||
@@ -7309,17 +7108,6 @@ dependencies = [
|
|||||||
"zeroize",
|
"zeroize",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "rstar"
|
|
||||||
version = "0.12.2"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "421400d13ccfd26dfa5858199c30a5d76f9c54e0dba7575273025b43c5175dbb"
|
|
||||||
dependencies = [
|
|
||||||
"heapless",
|
|
||||||
"num-traits",
|
|
||||||
"smallvec",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "rstest"
|
name = "rstest"
|
||||||
version = "0.23.0"
|
version = "0.23.0"
|
||||||
@@ -7874,15 +7662,6 @@ dependencies = [
|
|||||||
"lazy_static",
|
"lazy_static",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "shellexpand"
|
|
||||||
version = "3.1.1"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "8b1fdf65dd6331831494dd616b30351c38e96e45921a27745cf98490458b90bb"
|
|
||||||
dependencies = [
|
|
||||||
"dirs",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "shlex"
|
name = "shlex"
|
||||||
version = "1.3.0"
|
version = "1.3.0"
|
||||||
@@ -8032,18 +7811,6 @@ dependencies = [
|
|||||||
"winapi",
|
"winapi",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "spade"
|
|
||||||
version = "2.15.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "fb313e1c8afee5b5647e00ee0fe6855e3d529eb863a0fdae1d60006c4d1e9990"
|
|
||||||
dependencies = [
|
|
||||||
"hashbrown 0.15.5",
|
|
||||||
"num-traits",
|
|
||||||
"robust",
|
|
||||||
"smallvec",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "spin"
|
name = "spin"
|
||||||
version = "0.9.8"
|
version = "0.9.8"
|
||||||
@@ -8938,7 +8705,7 @@ checksum = "90b70b37e9074642bc5f60bb23247fd072a84314ca9e71cdf8527593406a0dd3"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"gemm 0.18.2",
|
"gemm 0.18.2",
|
||||||
"half",
|
"half",
|
||||||
"libloading",
|
"libloading 0.8.9",
|
||||||
"memmap2 0.9.8",
|
"memmap2 0.9.8",
|
||||||
"num",
|
"num",
|
||||||
"num-traits",
|
"num-traits",
|
||||||
@@ -9638,31 +9405,6 @@ version = "0.46.0"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59"
|
checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59"
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "wkb"
|
|
||||||
version = "0.9.2"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "a120b336c7ad17749026d50427c23d838ecb50cd64aaea6254b5030152f890a9"
|
|
||||||
dependencies = [
|
|
||||||
"byteorder",
|
|
||||||
"geo-traits",
|
|
||||||
"num_enum",
|
|
||||||
"thiserror 1.0.69",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "wkt"
|
|
||||||
version = "0.14.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "efb2b923ccc882312e559ffaa832a055ba9d1ac0cc8e86b3e25453247e4b81d7"
|
|
||||||
dependencies = [
|
|
||||||
"geo-traits",
|
|
||||||
"geo-types",
|
|
||||||
"log",
|
|
||||||
"num-traits",
|
|
||||||
"thiserror 1.0.69",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "writeable"
|
name = "writeable"
|
||||||
version = "0.6.1"
|
version = "0.6.1"
|
||||||
|
|||||||
30
Cargo.toml
30
Cargo.toml
@@ -12,23 +12,23 @@ repository = "https://github.com/lancedb/lancedb"
|
|||||||
description = "Serverless, low-latency vector database for AI applications"
|
description = "Serverless, low-latency vector database for AI applications"
|
||||||
keywords = ["lancedb", "lance", "database", "vector", "search"]
|
keywords = ["lancedb", "lance", "database", "vector", "search"]
|
||||||
categories = ["database-implementations"]
|
categories = ["database-implementations"]
|
||||||
rust-version = "1.88.0"
|
rust-version = "1.91.0"
|
||||||
|
|
||||||
[workspace.dependencies]
|
[workspace.dependencies]
|
||||||
lance = { "version" = "=2.0.0", default-features = false }
|
lance = { "version" = "=3.1.0-beta.2", default-features = false, "tag" = "v3.1.0-beta.2", "git" = "https://github.com/lance-format/lance.git" }
|
||||||
lance-core = "=2.0.0"
|
lance-core = { "version" = "=3.1.0-beta.2", "tag" = "v3.1.0-beta.2", "git" = "https://github.com/lance-format/lance.git" }
|
||||||
lance-datagen = "=2.0.0"
|
lance-datagen = { "version" = "=3.1.0-beta.2", "tag" = "v3.1.0-beta.2", "git" = "https://github.com/lance-format/lance.git" }
|
||||||
lance-file = "=2.0.0"
|
lance-file = { "version" = "=3.1.0-beta.2", "tag" = "v3.1.0-beta.2", "git" = "https://github.com/lance-format/lance.git" }
|
||||||
lance-io = { "version" = "=2.0.0", default-features = false }
|
lance-io = { "version" = "=3.1.0-beta.2", default-features = false, "tag" = "v3.1.0-beta.2", "git" = "https://github.com/lance-format/lance.git" }
|
||||||
lance-index = "=2.0.0"
|
lance-index = { "version" = "=3.1.0-beta.2", "tag" = "v3.1.0-beta.2", "git" = "https://github.com/lance-format/lance.git" }
|
||||||
lance-linalg = "=2.0.0"
|
lance-linalg = { "version" = "=3.1.0-beta.2", "tag" = "v3.1.0-beta.2", "git" = "https://github.com/lance-format/lance.git" }
|
||||||
lance-namespace = "=2.0.0"
|
lance-namespace = { "version" = "=3.1.0-beta.2", "tag" = "v3.1.0-beta.2", "git" = "https://github.com/lance-format/lance.git" }
|
||||||
lance-namespace-impls = { "version" = "=2.0.0", default-features = false }
|
lance-namespace-impls = { "version" = "=3.1.0-beta.2", default-features = false, "tag" = "v3.1.0-beta.2", "git" = "https://github.com/lance-format/lance.git" }
|
||||||
lance-table = "=2.0.0"
|
lance-table = { "version" = "=3.1.0-beta.2", "tag" = "v3.1.0-beta.2", "git" = "https://github.com/lance-format/lance.git" }
|
||||||
lance-testing = "=2.0.0"
|
lance-testing = { "version" = "=3.1.0-beta.2", "tag" = "v3.1.0-beta.2", "git" = "https://github.com/lance-format/lance.git" }
|
||||||
lance-datafusion = "=2.0.0"
|
lance-datafusion = { "version" = "=3.1.0-beta.2", "tag" = "v3.1.0-beta.2", "git" = "https://github.com/lance-format/lance.git" }
|
||||||
lance-encoding = "=2.0.0"
|
lance-encoding = { "version" = "=3.1.0-beta.2", "tag" = "v3.1.0-beta.2", "git" = "https://github.com/lance-format/lance.git" }
|
||||||
lance-arrow = "=2.0.0"
|
lance-arrow = { "version" = "=3.1.0-beta.2", "tag" = "v3.1.0-beta.2", "git" = "https://github.com/lance-format/lance.git" }
|
||||||
ahash = "0.8"
|
ahash = "0.8"
|
||||||
# Note that this one does not include pyarrow
|
# Note that this one does not include pyarrow
|
||||||
arrow = { version = "57.2", optional = false }
|
arrow = { version = "57.2", optional = false }
|
||||||
|
|||||||
9
Makefile
Normal file
9
Makefile
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
.PHONY: licenses
|
||||||
|
|
||||||
|
licenses:
|
||||||
|
cargo about generate about.hbs -o RUST_THIRD_PARTY_LICENSES.html -c about.toml
|
||||||
|
cd python && cargo about generate ../about.hbs -o RUST_THIRD_PARTY_LICENSES.html -c ../about.toml
|
||||||
|
cd python && uv sync --all-extras && uv tool run pip-licenses --python .venv/bin/python --format=markdown --with-urls --output-file=PYTHON_THIRD_PARTY_LICENSES.md
|
||||||
|
cd nodejs && cargo about generate ../about.hbs -o RUST_THIRD_PARTY_LICENSES.html -c ../about.toml
|
||||||
|
cd nodejs && npx license-checker --markdown --out NODEJS_THIRD_PARTY_LICENSES.md
|
||||||
|
cd java && ./mvnw license:aggregate-add-third-party -q
|
||||||
15276
RUST_THIRD_PARTY_LICENSES.html
Normal file
15276
RUST_THIRD_PARTY_LICENSES.html
Normal file
File diff suppressed because it is too large
Load Diff
70
about.hbs
Normal file
70
about.hbs
Normal file
@@ -0,0 +1,70 @@
|
|||||||
|
<html>
|
||||||
|
|
||||||
|
<head>
|
||||||
|
<style>
|
||||||
|
@media (prefers-color-scheme: dark) {
|
||||||
|
body {
|
||||||
|
background: #333;
|
||||||
|
color: white;
|
||||||
|
}
|
||||||
|
a {
|
||||||
|
color: skyblue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
.container {
|
||||||
|
font-family: sans-serif;
|
||||||
|
max-width: 800px;
|
||||||
|
margin: 0 auto;
|
||||||
|
}
|
||||||
|
.intro {
|
||||||
|
text-align: center;
|
||||||
|
}
|
||||||
|
.licenses-list {
|
||||||
|
list-style-type: none;
|
||||||
|
margin: 0;
|
||||||
|
padding: 0;
|
||||||
|
}
|
||||||
|
.license-used-by {
|
||||||
|
margin-top: -10px;
|
||||||
|
}
|
||||||
|
.license-text {
|
||||||
|
max-height: 200px;
|
||||||
|
overflow-y: scroll;
|
||||||
|
white-space: pre-wrap;
|
||||||
|
}
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
|
||||||
|
<body>
|
||||||
|
<main class="container">
|
||||||
|
<div class="intro">
|
||||||
|
<h1>Third Party Licenses</h1>
|
||||||
|
<p>This page lists the licenses of the projects used in cargo-about.</p>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<h2>Overview of licenses:</h2>
|
||||||
|
<ul class="licenses-overview">
|
||||||
|
{{#each overview}}
|
||||||
|
<li><a href="#{{id}}">{{name}}</a> ({{count}})</li>
|
||||||
|
{{/each}}
|
||||||
|
</ul>
|
||||||
|
|
||||||
|
<h2>All license text:</h2>
|
||||||
|
<ul class="licenses-list">
|
||||||
|
{{#each licenses}}
|
||||||
|
<li class="license">
|
||||||
|
<h3 id="{{id}}">{{name}}</h3>
|
||||||
|
<h4>Used by:</h4>
|
||||||
|
<ul class="license-used-by">
|
||||||
|
{{#each used_by}}
|
||||||
|
<li><a href="{{#if crate.repository}} {{crate.repository}} {{else}} https://crates.io/crates/{{crate.name}} {{/if}}">{{crate.name}} {{crate.version}}</a></li>
|
||||||
|
{{/each}}
|
||||||
|
</ul>
|
||||||
|
<pre class="license-text">{{text}}</pre>
|
||||||
|
</li>
|
||||||
|
{{/each}}
|
||||||
|
</ul>
|
||||||
|
</main>
|
||||||
|
</body>
|
||||||
|
|
||||||
|
</html>
|
||||||
18
about.toml
Normal file
18
about.toml
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
accepted = [
|
||||||
|
"0BSD",
|
||||||
|
"Apache-2.0",
|
||||||
|
"Apache-2.0 WITH LLVM-exception",
|
||||||
|
"BSD-2-Clause",
|
||||||
|
"BSD-3-Clause",
|
||||||
|
"BSL-1.0",
|
||||||
|
"bzip2-1.0.6",
|
||||||
|
"CC0-1.0",
|
||||||
|
"CDDL-1.0",
|
||||||
|
"CDLA-Permissive-2.0",
|
||||||
|
"ISC",
|
||||||
|
"MIT",
|
||||||
|
"MPL-2.0",
|
||||||
|
"OpenSSL",
|
||||||
|
"Unicode-3.0",
|
||||||
|
"Zlib",
|
||||||
|
]
|
||||||
@@ -14,7 +14,7 @@ Add the following dependency to your `pom.xml`:
|
|||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.lancedb</groupId>
|
<groupId>com.lancedb</groupId>
|
||||||
<artifactId>lancedb-core</artifactId>
|
<artifactId>lancedb-core</artifactId>
|
||||||
<version>0.26.0</version>
|
<version>0.27.0-beta.2</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
71
java/JAVA_THIRD_PARTY_LICENSES.md
Normal file
71
java/JAVA_THIRD_PARTY_LICENSES.md
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
|
||||||
|
List of third-party dependencies grouped by their license type.
|
||||||
|
|
||||||
|
Apache 2.0:
|
||||||
|
|
||||||
|
* error-prone annotations (com.google.errorprone:error_prone_annotations:2.28.0 - https://errorprone.info/error_prone_annotations)
|
||||||
|
|
||||||
|
Apache License 2.0:
|
||||||
|
|
||||||
|
* JsonNullable Jackson module (org.openapitools:jackson-databind-nullable:0.2.6 - https://github.com/OpenAPITools/jackson-databind-nullable)
|
||||||
|
|
||||||
|
Apache License V2.0:
|
||||||
|
|
||||||
|
* FlatBuffers Java API (com.google.flatbuffers:flatbuffers-java:23.5.26 - https://github.com/google/flatbuffers)
|
||||||
|
|
||||||
|
Apache License, Version 2.0:
|
||||||
|
|
||||||
|
* Apache Commons Codec (commons-codec:commons-codec:1.15 - https://commons.apache.org/proper/commons-codec/)
|
||||||
|
* Apache HttpClient (org.apache.httpcomponents.client5:httpclient5:5.2.1 - https://hc.apache.org/httpcomponents-client-5.0.x/5.2.1/httpclient5/)
|
||||||
|
* Apache HttpComponents Core HTTP/1.1 (org.apache.httpcomponents.core5:httpcore5:5.2 - https://hc.apache.org/httpcomponents-core-5.2.x/5.2/httpcore5/)
|
||||||
|
* Apache HttpComponents Core HTTP/2 (org.apache.httpcomponents.core5:httpcore5-h2:5.2 - https://hc.apache.org/httpcomponents-core-5.2.x/5.2/httpcore5-h2/)
|
||||||
|
* Arrow Format (org.apache.arrow:arrow-format:15.0.0 - https://arrow.apache.org/arrow-format/)
|
||||||
|
* Arrow Java C Data Interface (org.apache.arrow:arrow-c-data:15.0.0 - https://arrow.apache.org/arrow-c-data/)
|
||||||
|
* Arrow Java Dataset (org.apache.arrow:arrow-dataset:15.0.0 - https://arrow.apache.org/arrow-dataset/)
|
||||||
|
* Arrow Memory - Core (org.apache.arrow:arrow-memory-core:15.0.0 - https://arrow.apache.org/arrow-memory/arrow-memory-core/)
|
||||||
|
* Arrow Memory - Netty (org.apache.arrow:arrow-memory-netty:15.0.0 - https://arrow.apache.org/arrow-memory/arrow-memory-netty/)
|
||||||
|
* Arrow Vectors (org.apache.arrow:arrow-vector:15.0.0 - https://arrow.apache.org/arrow-vector/)
|
||||||
|
* Guava: Google Core Libraries for Java (com.google.guava:guava:33.3.1-jre - https://github.com/google/guava)
|
||||||
|
* J2ObjC Annotations (com.google.j2objc:j2objc-annotations:3.0.0 - https://github.com/google/j2objc/)
|
||||||
|
* Netty/Buffer (io.netty:netty-buffer:4.1.104.Final - https://netty.io/netty-buffer/)
|
||||||
|
* Netty/Common (io.netty:netty-common:4.1.104.Final - https://netty.io/netty-common/)
|
||||||
|
|
||||||
|
Apache-2.0:
|
||||||
|
|
||||||
|
* Apache Commons Lang (org.apache.commons:commons-lang3:3.18.0 - https://commons.apache.org/proper/commons-lang/)
|
||||||
|
* lance-namespace-apache-client (org.lance:lance-namespace-apache-client:0.4.5 - https://github.com/openapitools/openapi-generator)
|
||||||
|
* lance-namespace-core (org.lance:lance-namespace-core:0.4.5 - https://lance.org/format/namespace/lance-namespace-core/)
|
||||||
|
|
||||||
|
EDL 1.0:
|
||||||
|
|
||||||
|
* Jakarta Activation API jar (jakarta.activation:jakarta.activation-api:1.2.2 - https://github.com/eclipse-ee4j/jaf/jakarta.activation-api)
|
||||||
|
|
||||||
|
Eclipse Distribution License - v 1.0:
|
||||||
|
|
||||||
|
* Eclipse Collections API (org.eclipse.collections:eclipse-collections-api:11.1.0 - https://github.com/eclipse/eclipse-collections/eclipse-collections-api)
|
||||||
|
* Eclipse Collections Main Library (org.eclipse.collections:eclipse-collections:11.1.0 - https://github.com/eclipse/eclipse-collections/eclipse-collections)
|
||||||
|
* Jakarta XML Binding API (jakarta.xml.bind:jakarta.xml.bind-api:2.3.3 - https://github.com/eclipse-ee4j/jaxb-api/jakarta.xml.bind-api)
|
||||||
|
|
||||||
|
Eclipse Public License - v 1.0:
|
||||||
|
|
||||||
|
* Eclipse Collections API (org.eclipse.collections:eclipse-collections-api:11.1.0 - https://github.com/eclipse/eclipse-collections/eclipse-collections-api)
|
||||||
|
* Eclipse Collections Main Library (org.eclipse.collections:eclipse-collections:11.1.0 - https://github.com/eclipse/eclipse-collections/eclipse-collections)
|
||||||
|
|
||||||
|
The Apache Software License, Version 2.0:
|
||||||
|
|
||||||
|
* FindBugs-jsr305 (com.google.code.findbugs:jsr305:3.0.2 - http://findbugs.sourceforge.net/)
|
||||||
|
* Guava InternalFutureFailureAccess and InternalFutures (com.google.guava:failureaccess:1.0.2 - https://github.com/google/guava/failureaccess)
|
||||||
|
* Guava ListenableFuture only (com.google.guava:listenablefuture:9999.0-empty-to-avoid-conflict-with-guava - https://github.com/google/guava/listenablefuture)
|
||||||
|
* Jackson datatype: JSR310 (com.fasterxml.jackson.datatype:jackson-datatype-jsr310:2.16.0 - https://github.com/FasterXML/jackson-modules-java8/jackson-datatype-jsr310)
|
||||||
|
* Jackson module: Old JAXB Annotations (javax.xml.bind) (com.fasterxml.jackson.module:jackson-module-jaxb-annotations:2.17.1 - https://github.com/FasterXML/jackson-modules-base)
|
||||||
|
* Jackson-annotations (com.fasterxml.jackson.core:jackson-annotations:2.16.0 - https://github.com/FasterXML/jackson)
|
||||||
|
* Jackson-core (com.fasterxml.jackson.core:jackson-core:2.16.0 - https://github.com/FasterXML/jackson-core)
|
||||||
|
* jackson-databind (com.fasterxml.jackson.core:jackson-databind:2.15.2 - https://github.com/FasterXML/jackson)
|
||||||
|
* Jackson-JAXRS: base (com.fasterxml.jackson.jaxrs:jackson-jaxrs-base:2.17.1 - https://github.com/FasterXML/jackson-jaxrs-providers/jackson-jaxrs-base)
|
||||||
|
* Jackson-JAXRS: JSON (com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider:2.17.1 - https://github.com/FasterXML/jackson-jaxrs-providers/jackson-jaxrs-json-provider)
|
||||||
|
* JAR JNI Loader (org.questdb:jar-jni:1.1.1 - https://github.com/questdb/rust-maven-plugin)
|
||||||
|
* Lance Core (org.lance:lance-core:2.0.0 - https://lance.org/)
|
||||||
|
|
||||||
|
The MIT License:
|
||||||
|
|
||||||
|
* Checker Qual (org.checkerframework:checker-qual:3.43.0 - https://checkerframework.org/)
|
||||||
71
java/lancedb-core/JAVA_THIRD_PARTY_LICENSES.md
Normal file
71
java/lancedb-core/JAVA_THIRD_PARTY_LICENSES.md
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
|
||||||
|
List of third-party dependencies grouped by their license type.
|
||||||
|
|
||||||
|
Apache 2.0:
|
||||||
|
|
||||||
|
* error-prone annotations (com.google.errorprone:error_prone_annotations:2.28.0 - https://errorprone.info/error_prone_annotations)
|
||||||
|
|
||||||
|
Apache License 2.0:
|
||||||
|
|
||||||
|
* JsonNullable Jackson module (org.openapitools:jackson-databind-nullable:0.2.6 - https://github.com/OpenAPITools/jackson-databind-nullable)
|
||||||
|
|
||||||
|
Apache License V2.0:
|
||||||
|
|
||||||
|
* FlatBuffers Java API (com.google.flatbuffers:flatbuffers-java:23.5.26 - https://github.com/google/flatbuffers)
|
||||||
|
|
||||||
|
Apache License, Version 2.0:
|
||||||
|
|
||||||
|
* Apache Commons Codec (commons-codec:commons-codec:1.15 - https://commons.apache.org/proper/commons-codec/)
|
||||||
|
* Apache HttpClient (org.apache.httpcomponents.client5:httpclient5:5.2.1 - https://hc.apache.org/httpcomponents-client-5.0.x/5.2.1/httpclient5/)
|
||||||
|
* Apache HttpComponents Core HTTP/1.1 (org.apache.httpcomponents.core5:httpcore5:5.2 - https://hc.apache.org/httpcomponents-core-5.2.x/5.2/httpcore5/)
|
||||||
|
* Apache HttpComponents Core HTTP/2 (org.apache.httpcomponents.core5:httpcore5-h2:5.2 - https://hc.apache.org/httpcomponents-core-5.2.x/5.2/httpcore5-h2/)
|
||||||
|
* Arrow Format (org.apache.arrow:arrow-format:15.0.0 - https://arrow.apache.org/arrow-format/)
|
||||||
|
* Arrow Java C Data Interface (org.apache.arrow:arrow-c-data:15.0.0 - https://arrow.apache.org/arrow-c-data/)
|
||||||
|
* Arrow Java Dataset (org.apache.arrow:arrow-dataset:15.0.0 - https://arrow.apache.org/arrow-dataset/)
|
||||||
|
* Arrow Memory - Core (org.apache.arrow:arrow-memory-core:15.0.0 - https://arrow.apache.org/arrow-memory/arrow-memory-core/)
|
||||||
|
* Arrow Memory - Netty (org.apache.arrow:arrow-memory-netty:15.0.0 - https://arrow.apache.org/arrow-memory/arrow-memory-netty/)
|
||||||
|
* Arrow Vectors (org.apache.arrow:arrow-vector:15.0.0 - https://arrow.apache.org/arrow-vector/)
|
||||||
|
* Guava: Google Core Libraries for Java (com.google.guava:guava:33.3.1-jre - https://github.com/google/guava)
|
||||||
|
* J2ObjC Annotations (com.google.j2objc:j2objc-annotations:3.0.0 - https://github.com/google/j2objc/)
|
||||||
|
* Netty/Buffer (io.netty:netty-buffer:4.1.104.Final - https://netty.io/netty-buffer/)
|
||||||
|
* Netty/Common (io.netty:netty-common:4.1.104.Final - https://netty.io/netty-common/)
|
||||||
|
|
||||||
|
Apache-2.0:
|
||||||
|
|
||||||
|
* Apache Commons Lang (org.apache.commons:commons-lang3:3.18.0 - https://commons.apache.org/proper/commons-lang/)
|
||||||
|
* lance-namespace-apache-client (org.lance:lance-namespace-apache-client:0.4.5 - https://github.com/openapitools/openapi-generator)
|
||||||
|
* lance-namespace-core (org.lance:lance-namespace-core:0.4.5 - https://lance.org/format/namespace/lance-namespace-core/)
|
||||||
|
|
||||||
|
EDL 1.0:
|
||||||
|
|
||||||
|
* Jakarta Activation API jar (jakarta.activation:jakarta.activation-api:1.2.2 - https://github.com/eclipse-ee4j/jaf/jakarta.activation-api)
|
||||||
|
|
||||||
|
Eclipse Distribution License - v 1.0:
|
||||||
|
|
||||||
|
* Eclipse Collections API (org.eclipse.collections:eclipse-collections-api:11.1.0 - https://github.com/eclipse/eclipse-collections/eclipse-collections-api)
|
||||||
|
* Eclipse Collections Main Library (org.eclipse.collections:eclipse-collections:11.1.0 - https://github.com/eclipse/eclipse-collections/eclipse-collections)
|
||||||
|
* Jakarta XML Binding API (jakarta.xml.bind:jakarta.xml.bind-api:2.3.3 - https://github.com/eclipse-ee4j/jaxb-api/jakarta.xml.bind-api)
|
||||||
|
|
||||||
|
Eclipse Public License - v 1.0:
|
||||||
|
|
||||||
|
* Eclipse Collections API (org.eclipse.collections:eclipse-collections-api:11.1.0 - https://github.com/eclipse/eclipse-collections/eclipse-collections-api)
|
||||||
|
* Eclipse Collections Main Library (org.eclipse.collections:eclipse-collections:11.1.0 - https://github.com/eclipse/eclipse-collections/eclipse-collections)
|
||||||
|
|
||||||
|
The Apache Software License, Version 2.0:
|
||||||
|
|
||||||
|
* FindBugs-jsr305 (com.google.code.findbugs:jsr305:3.0.2 - http://findbugs.sourceforge.net/)
|
||||||
|
* Guava InternalFutureFailureAccess and InternalFutures (com.google.guava:failureaccess:1.0.2 - https://github.com/google/guava/failureaccess)
|
||||||
|
* Guava ListenableFuture only (com.google.guava:listenablefuture:9999.0-empty-to-avoid-conflict-with-guava - https://github.com/google/guava/listenablefuture)
|
||||||
|
* Jackson datatype: JSR310 (com.fasterxml.jackson.datatype:jackson-datatype-jsr310:2.16.0 - https://github.com/FasterXML/jackson-modules-java8/jackson-datatype-jsr310)
|
||||||
|
* Jackson module: Old JAXB Annotations (javax.xml.bind) (com.fasterxml.jackson.module:jackson-module-jaxb-annotations:2.17.1 - https://github.com/FasterXML/jackson-modules-base)
|
||||||
|
* Jackson-annotations (com.fasterxml.jackson.core:jackson-annotations:2.16.0 - https://github.com/FasterXML/jackson)
|
||||||
|
* Jackson-core (com.fasterxml.jackson.core:jackson-core:2.16.0 - https://github.com/FasterXML/jackson-core)
|
||||||
|
* jackson-databind (com.fasterxml.jackson.core:jackson-databind:2.15.2 - https://github.com/FasterXML/jackson)
|
||||||
|
* Jackson-JAXRS: base (com.fasterxml.jackson.jaxrs:jackson-jaxrs-base:2.17.1 - https://github.com/FasterXML/jackson-jaxrs-providers/jackson-jaxrs-base)
|
||||||
|
* Jackson-JAXRS: JSON (com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider:2.17.1 - https://github.com/FasterXML/jackson-jaxrs-providers/jackson-jaxrs-json-provider)
|
||||||
|
* JAR JNI Loader (org.questdb:jar-jni:1.1.1 - https://github.com/questdb/rust-maven-plugin)
|
||||||
|
* Lance Core (org.lance:lance-core:2.0.0 - https://lance.org/)
|
||||||
|
|
||||||
|
The MIT License:
|
||||||
|
|
||||||
|
* Checker Qual (org.checkerframework:checker-qual:3.43.0 - https://checkerframework.org/)
|
||||||
@@ -8,7 +8,7 @@
|
|||||||
<parent>
|
<parent>
|
||||||
<groupId>com.lancedb</groupId>
|
<groupId>com.lancedb</groupId>
|
||||||
<artifactId>lancedb-parent</artifactId>
|
<artifactId>lancedb-parent</artifactId>
|
||||||
<version>0.26.0-final.0</version>
|
<version>0.27.0-beta.2</version>
|
||||||
<relativePath>../pom.xml</relativePath>
|
<relativePath>../pom.xml</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
|
|
||||||
|
|||||||
17
java/pom.xml
17
java/pom.xml
@@ -6,7 +6,7 @@
|
|||||||
|
|
||||||
<groupId>com.lancedb</groupId>
|
<groupId>com.lancedb</groupId>
|
||||||
<artifactId>lancedb-parent</artifactId>
|
<artifactId>lancedb-parent</artifactId>
|
||||||
<version>0.26.0-final.0</version>
|
<version>0.27.0-beta.2</version>
|
||||||
<packaging>pom</packaging>
|
<packaging>pom</packaging>
|
||||||
<name>${project.artifactId}</name>
|
<name>${project.artifactId}</name>
|
||||||
<description>LanceDB Java SDK Parent POM</description>
|
<description>LanceDB Java SDK Parent POM</description>
|
||||||
@@ -28,7 +28,7 @@
|
|||||||
<properties>
|
<properties>
|
||||||
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
|
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
|
||||||
<arrow.version>15.0.0</arrow.version>
|
<arrow.version>15.0.0</arrow.version>
|
||||||
<lance-core.version>2.0.0</lance-core.version>
|
<lance-core.version>3.1.0-beta.2</lance-core.version>
|
||||||
<spotless.skip>false</spotless.skip>
|
<spotless.skip>false</spotless.skip>
|
||||||
<spotless.version>2.30.0</spotless.version>
|
<spotless.version>2.30.0</spotless.version>
|
||||||
<spotless.java.googlejavaformat.version>1.7</spotless.java.googlejavaformat.version>
|
<spotless.java.googlejavaformat.version>1.7</spotless.java.googlejavaformat.version>
|
||||||
@@ -160,6 +160,19 @@
|
|||||||
<groupId>com.diffplug.spotless</groupId>
|
<groupId>com.diffplug.spotless</groupId>
|
||||||
<artifactId>spotless-maven-plugin</artifactId>
|
<artifactId>spotless-maven-plugin</artifactId>
|
||||||
</plugin>
|
</plugin>
|
||||||
|
<plugin>
|
||||||
|
<groupId>org.codehaus.mojo</groupId>
|
||||||
|
<artifactId>license-maven-plugin</artifactId>
|
||||||
|
<version>2.4.0</version>
|
||||||
|
<configuration>
|
||||||
|
<outputDirectory>${project.basedir}</outputDirectory>
|
||||||
|
<thirdPartyFilename>JAVA_THIRD_PARTY_LICENSES.md</thirdPartyFilename>
|
||||||
|
<fileTemplate>/org/codehaus/mojo/license/third-party-file-groupByLicense.ftl</fileTemplate>
|
||||||
|
<includedScopes>compile,runtime</includedScopes>
|
||||||
|
<excludedScopes>test,provided</excludedScopes>
|
||||||
|
<sortArtifactByName>true</sortArtifactByName>
|
||||||
|
</configuration>
|
||||||
|
</plugin>
|
||||||
</plugins>
|
</plugins>
|
||||||
<pluginManagement>
|
<pluginManagement>
|
||||||
<plugins>
|
<plugins>
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "lancedb-nodejs"
|
name = "lancedb-nodejs"
|
||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
version = "0.26.0"
|
version = "0.27.0-beta.2"
|
||||||
license.workspace = true
|
license.workspace = true
|
||||||
description.workspace = true
|
description.workspace = true
|
||||||
repository.workspace = true
|
repository.workspace = true
|
||||||
@@ -19,11 +19,11 @@ arrow-schema.workspace = true
|
|||||||
env_logger.workspace = true
|
env_logger.workspace = true
|
||||||
futures.workspace = true
|
futures.workspace = true
|
||||||
lancedb = { path = "../rust/lancedb", default-features = false }
|
lancedb = { path = "../rust/lancedb", default-features = false }
|
||||||
napi = { version = "2.16.8", default-features = false, features = [
|
napi = { version = "3.8.3", default-features = false, features = [
|
||||||
"napi9",
|
"napi9",
|
||||||
"async"
|
"async"
|
||||||
] }
|
] }
|
||||||
napi-derive = "2.16.4"
|
napi-derive = "3.5.2"
|
||||||
# Prevent dynamic linking of lzma, which comes from datafusion
|
# Prevent dynamic linking of lzma, which comes from datafusion
|
||||||
lzma-sys = { version = "*", features = ["static"] }
|
lzma-sys = { version = "*", features = ["static"] }
|
||||||
log.workspace = true
|
log.workspace = true
|
||||||
@@ -33,7 +33,7 @@ aws-lc-sys = "=0.28.0"
|
|||||||
aws-lc-rs = "=1.13.0"
|
aws-lc-rs = "=1.13.0"
|
||||||
|
|
||||||
[build-dependencies]
|
[build-dependencies]
|
||||||
napi-build = "2.1"
|
napi-build = "2.3.1"
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = ["remote", "lancedb/aws", "lancedb/gcs", "lancedb/azure", "lancedb/dynamodb", "lancedb/oss", "lancedb/huggingface"]
|
default = ["remote", "lancedb/aws", "lancedb/gcs", "lancedb/azure", "lancedb/dynamodb", "lancedb/oss", "lancedb/huggingface"]
|
||||||
|
|||||||
668
nodejs/NODEJS_THIRD_PARTY_LICENSES.md
Normal file
668
nodejs/NODEJS_THIRD_PARTY_LICENSES.md
Normal file
@@ -0,0 +1,668 @@
|
|||||||
|
[@75lb/deep-merge@1.1.2](https://github.com/75lb/deep-merge) - MIT
|
||||||
|
[@aashutoshrathi/word-wrap@1.2.6](https://github.com/aashutoshrathi/word-wrap) - MIT
|
||||||
|
[@ampproject/remapping@2.2.1](https://github.com/ampproject/remapping) - Apache-2.0
|
||||||
|
[@aws-crypto/crc32@3.0.0](https://github.com/aws/aws-sdk-js-crypto-helpers) - Apache-2.0
|
||||||
|
[@aws-crypto/crc32c@3.0.0](https://github.com/aws/aws-sdk-js-crypto-helpers) - Apache-2.0
|
||||||
|
[@aws-crypto/ie11-detection@3.0.0](https://github.com/aws/aws-sdk-js-crypto-helpers) - Apache-2.0
|
||||||
|
[@aws-crypto/sha1-browser@3.0.0](https://github.com/aws/aws-sdk-js-crypto-helpers) - Apache-2.0
|
||||||
|
[@aws-crypto/sha256-browser@3.0.0](https://github.com/aws/aws-sdk-js-crypto-helpers) - Apache-2.0
|
||||||
|
[@aws-crypto/sha256-browser@5.2.0](https://github.com/aws/aws-sdk-js-crypto-helpers) - Apache-2.0
|
||||||
|
[@aws-crypto/sha256-js@3.0.0](https://github.com/aws/aws-sdk-js-crypto-helpers) - Apache-2.0
|
||||||
|
[@aws-crypto/sha256-js@5.2.0](https://github.com/aws/aws-sdk-js-crypto-helpers) - Apache-2.0
|
||||||
|
[@aws-crypto/supports-web-crypto@3.0.0](https://github.com/aws/aws-sdk-js-crypto-helpers) - Apache-2.0
|
||||||
|
[@aws-crypto/supports-web-crypto@5.2.0](https://github.com/aws/aws-sdk-js-crypto-helpers) - Apache-2.0
|
||||||
|
[@aws-crypto/util@3.0.0](https://github.com/aws/aws-sdk-js-crypto-helpers) - Apache-2.0
|
||||||
|
[@aws-crypto/util@5.2.0](https://github.com/aws/aws-sdk-js-crypto-helpers) - Apache-2.0
|
||||||
|
[@aws-sdk/client-dynamodb@3.602.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/client-kms@3.549.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/client-s3@3.550.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/client-sso-oidc@3.549.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/client-sso-oidc@3.600.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/client-sso@3.549.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/client-sso@3.598.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/client-sts@3.549.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/client-sts@3.600.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/core@3.549.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/core@3.598.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/credential-provider-env@3.535.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/credential-provider-env@3.598.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/credential-provider-http@3.535.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/credential-provider-http@3.598.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/credential-provider-ini@3.549.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/credential-provider-ini@3.598.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/credential-provider-node@3.549.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/credential-provider-node@3.600.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/credential-provider-process@3.535.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/credential-provider-process@3.598.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/credential-provider-sso@3.549.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/credential-provider-sso@3.598.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/credential-provider-web-identity@3.549.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/credential-provider-web-identity@3.598.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/endpoint-cache@3.572.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/middleware-bucket-endpoint@3.535.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/middleware-endpoint-discovery@3.598.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/middleware-expect-continue@3.535.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/middleware-flexible-checksums@3.535.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/middleware-host-header@3.535.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/middleware-host-header@3.598.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/middleware-location-constraint@3.535.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/middleware-logger@3.535.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/middleware-logger@3.598.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/middleware-recursion-detection@3.535.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/middleware-recursion-detection@3.598.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/middleware-sdk-s3@3.535.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/middleware-signing@3.535.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/middleware-ssec@3.537.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/middleware-user-agent@3.540.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/middleware-user-agent@3.598.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/region-config-resolver@3.535.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/region-config-resolver@3.598.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/signature-v4-multi-region@3.535.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/token-providers@3.549.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/token-providers@3.598.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/types@3.535.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/types@3.598.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/util-arn-parser@3.535.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/util-endpoints@3.540.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/util-endpoints@3.598.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/util-locate-window@3.535.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/util-user-agent-browser@3.535.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/util-user-agent-browser@3.598.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/util-user-agent-node@3.535.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/util-user-agent-node@3.598.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/util-utf8-browser@3.259.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@aws-sdk/xml-builder@3.535.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||||
|
[@babel/code-frame@7.26.2](https://github.com/babel/babel) - MIT
|
||||||
|
[@babel/compat-data@7.23.5](https://github.com/babel/babel) - MIT
|
||||||
|
[@babel/core@7.23.7](https://github.com/babel/babel) - MIT
|
||||||
|
[@babel/generator@7.23.6](https://github.com/babel/babel) - MIT
|
||||||
|
[@babel/helper-compilation-targets@7.23.6](https://github.com/babel/babel) - MIT
|
||||||
|
[@babel/helper-environment-visitor@7.22.20](https://github.com/babel/babel) - MIT
|
||||||
|
[@babel/helper-function-name@7.23.0](https://github.com/babel/babel) - MIT
|
||||||
|
[@babel/helper-hoist-variables@7.22.5](https://github.com/babel/babel) - MIT
|
||||||
|
[@babel/helper-module-imports@7.22.15](https://github.com/babel/babel) - MIT
|
||||||
|
[@babel/helper-module-transforms@7.23.3](https://github.com/babel/babel) - MIT
|
||||||
|
[@babel/helper-plugin-utils@7.22.5](https://github.com/babel/babel) - MIT
|
||||||
|
[@babel/helper-simple-access@7.22.5](https://github.com/babel/babel) - MIT
|
||||||
|
[@babel/helper-split-export-declaration@7.22.6](https://github.com/babel/babel) - MIT
|
||||||
|
[@babel/helper-string-parser@7.25.9](https://github.com/babel/babel) - MIT
|
||||||
|
[@babel/helper-validator-identifier@7.25.9](https://github.com/babel/babel) - MIT
|
||||||
|
[@babel/helper-validator-option@7.23.5](https://github.com/babel/babel) - MIT
|
||||||
|
[@babel/helpers@7.27.0](https://github.com/babel/babel) - MIT
|
||||||
|
[@babel/parser@7.27.0](https://github.com/babel/babel) - MIT
|
||||||
|
[@babel/plugin-syntax-async-generators@7.8.4](https://github.com/babel/babel/tree/master/packages/babel-plugin-syntax-async-generators) - MIT
|
||||||
|
[@babel/plugin-syntax-bigint@7.8.3](https://github.com/babel/babel/tree/master/packages/babel-plugin-syntax-bigint) - MIT
|
||||||
|
[@babel/plugin-syntax-class-properties@7.12.13](https://github.com/babel/babel) - MIT
|
||||||
|
[@babel/plugin-syntax-import-meta@7.10.4](https://github.com/babel/babel) - MIT
|
||||||
|
[@babel/plugin-syntax-json-strings@7.8.3](https://github.com/babel/babel/tree/master/packages/babel-plugin-syntax-json-strings) - MIT
|
||||||
|
[@babel/plugin-syntax-jsx@7.23.3](https://github.com/babel/babel) - MIT
|
||||||
|
[@babel/plugin-syntax-logical-assignment-operators@7.10.4](https://github.com/babel/babel) - MIT
|
||||||
|
[@babel/plugin-syntax-nullish-coalescing-operator@7.8.3](https://github.com/babel/babel/tree/master/packages/babel-plugin-syntax-nullish-coalescing-operator) - MIT
|
||||||
|
[@babel/plugin-syntax-numeric-separator@7.10.4](https://github.com/babel/babel) - MIT
|
||||||
|
[@babel/plugin-syntax-object-rest-spread@7.8.3](https://github.com/babel/babel/tree/master/packages/babel-plugin-syntax-object-rest-spread) - MIT
|
||||||
|
[@babel/plugin-syntax-optional-catch-binding@7.8.3](https://github.com/babel/babel/tree/master/packages/babel-plugin-syntax-optional-catch-binding) - MIT
|
||||||
|
[@babel/plugin-syntax-optional-chaining@7.8.3](https://github.com/babel/babel/tree/master/packages/babel-plugin-syntax-optional-chaining) - MIT
|
||||||
|
[@babel/plugin-syntax-top-level-await@7.14.5](https://github.com/babel/babel) - MIT
|
||||||
|
[@babel/plugin-syntax-typescript@7.23.3](https://github.com/babel/babel) - MIT
|
||||||
|
[@babel/template@7.27.0](https://github.com/babel/babel) - MIT
|
||||||
|
[@babel/traverse@7.23.7](https://github.com/babel/babel) - MIT
|
||||||
|
[@babel/types@7.27.0](https://github.com/babel/babel) - MIT
|
||||||
|
[@bcoe/v8-coverage@0.2.3](https://github.com/demurgos/v8-coverage) - MIT
|
||||||
|
[@biomejs/biome@1.8.3](https://github.com/biomejs/biome) - MIT OR Apache-2.0
|
||||||
|
[@biomejs/cli-darwin-arm64@1.8.3](https://github.com/biomejs/biome) - MIT OR Apache-2.0
|
||||||
|
[@eslint-community/eslint-utils@4.4.0](https://github.com/eslint-community/eslint-utils) - MIT
|
||||||
|
[@eslint-community/regexpp@4.10.0](https://github.com/eslint-community/regexpp) - MIT
|
||||||
|
[@eslint/eslintrc@2.1.4](https://github.com/eslint/eslintrc) - MIT
|
||||||
|
[@eslint/js@8.57.0](https://github.com/eslint/eslint) - MIT
|
||||||
|
[@huggingface/jinja@0.3.2](https://github.com/huggingface/huggingface.js) - MIT
|
||||||
|
[@huggingface/transformers@3.0.2](https://github.com/huggingface/transformers.js) - Apache-2.0
|
||||||
|
[@humanwhocodes/config-array@0.11.14](https://github.com/humanwhocodes/config-array) - Apache-2.0
|
||||||
|
[@humanwhocodes/module-importer@1.0.1](https://github.com/humanwhocodes/module-importer) - Apache-2.0
|
||||||
|
[@humanwhocodes/object-schema@2.0.2](https://github.com/humanwhocodes/object-schema) - BSD-3-Clause
|
||||||
|
[@img/sharp-darwin-arm64@0.33.5](https://github.com/lovell/sharp) - Apache-2.0
|
||||||
|
[@img/sharp-libvips-darwin-arm64@1.0.4](https://github.com/lovell/sharp-libvips) - LGPL-3.0-or-later
|
||||||
|
[@isaacs/cliui@8.0.2](https://github.com/yargs/cliui) - ISC
|
||||||
|
[@isaacs/fs-minipass@4.0.1](https://github.com/npm/fs-minipass) - ISC
|
||||||
|
[@istanbuljs/load-nyc-config@1.1.0](https://github.com/istanbuljs/load-nyc-config) - ISC
|
||||||
|
[@istanbuljs/schema@0.1.3](https://github.com/istanbuljs/schema) - MIT
|
||||||
|
[@jest/console@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||||
|
[@jest/core@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||||
|
[@jest/environment@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||||
|
[@jest/expect-utils@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||||
|
[@jest/expect@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||||
|
[@jest/fake-timers@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||||
|
[@jest/globals@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||||
|
[@jest/reporters@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||||
|
[@jest/schemas@29.6.3](https://github.com/jestjs/jest) - MIT
|
||||||
|
[@jest/source-map@29.6.3](https://github.com/jestjs/jest) - MIT
|
||||||
|
[@jest/test-result@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||||
|
[@jest/test-sequencer@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||||
|
[@jest/transform@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||||
|
[@jest/types@29.6.3](https://github.com/jestjs/jest) - MIT
|
||||||
|
[@jridgewell/gen-mapping@0.3.3](https://github.com/jridgewell/gen-mapping) - MIT
|
||||||
|
[@jridgewell/resolve-uri@3.1.1](https://github.com/jridgewell/resolve-uri) - MIT
|
||||||
|
[@jridgewell/set-array@1.1.2](https://github.com/jridgewell/set-array) - MIT
|
||||||
|
[@jridgewell/sourcemap-codec@1.4.15](https://github.com/jridgewell/sourcemap-codec) - MIT
|
||||||
|
[@jridgewell/trace-mapping@0.3.22](https://github.com/jridgewell/trace-mapping) - MIT
|
||||||
|
[@lancedb/lancedb@0.26.2](https://github.com/lancedb/lancedb) - Apache-2.0
|
||||||
|
[@napi-rs/cli@2.18.3](https://github.com/napi-rs/napi-rs) - MIT
|
||||||
|
[@nodelib/fs.scandir@2.1.5](https://github.com/nodelib/nodelib/tree/master/packages/fs/fs.scandir) - MIT
|
||||||
|
[@nodelib/fs.stat@2.0.5](https://github.com/nodelib/nodelib/tree/master/packages/fs/fs.stat) - MIT
|
||||||
|
[@nodelib/fs.walk@1.2.8](https://github.com/nodelib/nodelib/tree/master/packages/fs/fs.walk) - MIT
|
||||||
|
[@pkgjs/parseargs@0.11.0](https://github.com/pkgjs/parseargs) - MIT
|
||||||
|
[@protobufjs/aspromise@1.1.2](https://github.com/dcodeIO/protobuf.js) - BSD-3-Clause
|
||||||
|
[@protobufjs/base64@1.1.2](https://github.com/dcodeIO/protobuf.js) - BSD-3-Clause
|
||||||
|
[@protobufjs/codegen@2.0.4](https://github.com/dcodeIO/protobuf.js) - BSD-3-Clause
|
||||||
|
[@protobufjs/eventemitter@1.1.0](https://github.com/dcodeIO/protobuf.js) - BSD-3-Clause
|
||||||
|
[@protobufjs/fetch@1.1.0](https://github.com/dcodeIO/protobuf.js) - BSD-3-Clause
|
||||||
|
[@protobufjs/float@1.0.2](https://github.com/dcodeIO/protobuf.js) - BSD-3-Clause
|
||||||
|
[@protobufjs/inquire@1.1.0](https://github.com/dcodeIO/protobuf.js) - BSD-3-Clause
|
||||||
|
[@protobufjs/path@1.1.2](https://github.com/dcodeIO/protobuf.js) - BSD-3-Clause
|
||||||
|
[@protobufjs/pool@1.1.0](https://github.com/dcodeIO/protobuf.js) - BSD-3-Clause
|
||||||
|
[@protobufjs/utf8@1.1.0](https://github.com/dcodeIO/protobuf.js) - BSD-3-Clause
|
||||||
|
[@shikijs/core@1.10.3](https://github.com/shikijs/shiki) - MIT
|
||||||
|
[@sinclair/typebox@0.27.8](https://github.com/sinclairzx81/typebox) - MIT
|
||||||
|
[@sinonjs/commons@3.0.1](https://github.com/sinonjs/commons) - BSD-3-Clause
|
||||||
|
[@sinonjs/fake-timers@10.3.0](https://github.com/sinonjs/fake-timers) - BSD-3-Clause
|
||||||
|
[@smithy/abort-controller@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/abort-controller@3.1.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/chunked-blob-reader-native@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/chunked-blob-reader@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/config-resolver@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/config-resolver@3.0.3](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/core@1.4.2](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/core@2.2.3](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/credential-provider-imds@2.3.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/credential-provider-imds@3.1.2](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/eventstream-codec@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/eventstream-serde-browser@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/eventstream-serde-config-resolver@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/eventstream-serde-node@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/eventstream-serde-universal@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/fetch-http-handler@2.5.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/fetch-http-handler@3.1.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/hash-blob-browser@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/hash-node@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/hash-node@3.0.2](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/hash-stream-node@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/invalid-dependency@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/invalid-dependency@3.0.2](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/is-array-buffer@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/is-array-buffer@3.0.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/md5-js@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/middleware-content-length@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/middleware-content-length@3.0.2](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/middleware-endpoint@2.5.1](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/middleware-endpoint@3.0.3](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/middleware-retry@2.3.1](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/middleware-retry@3.0.6](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/middleware-serde@2.3.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/middleware-serde@3.0.2](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/middleware-stack@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/middleware-stack@3.0.2](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/node-config-provider@2.3.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/node-config-provider@3.1.2](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/node-http-handler@2.5.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/node-http-handler@3.1.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/property-provider@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/property-provider@3.1.2](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/protocol-http@3.3.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/protocol-http@4.0.2](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/querystring-builder@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/querystring-builder@3.0.2](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/querystring-parser@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/querystring-parser@3.0.2](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/service-error-classification@2.1.5](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/service-error-classification@3.0.2](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/shared-ini-file-loader@2.4.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/shared-ini-file-loader@3.1.2](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/signature-v4@2.2.1](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/signature-v4@3.1.1](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/smithy-client@2.5.1](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/smithy-client@3.1.4](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/types@2.12.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/types@3.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/url-parser@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/url-parser@3.0.2](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/util-base64@2.3.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/util-base64@3.0.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/util-body-length-browser@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/util-body-length-browser@3.0.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/util-body-length-node@2.3.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/util-body-length-node@3.0.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/util-buffer-from@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/util-buffer-from@3.0.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/util-config-provider@2.3.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/util-config-provider@3.0.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/util-defaults-mode-browser@2.2.1](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/util-defaults-mode-browser@3.0.6](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/util-defaults-mode-node@2.3.1](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/util-defaults-mode-node@3.0.6](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/util-endpoints@1.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/util-endpoints@2.0.3](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/util-hex-encoding@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/util-hex-encoding@3.0.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/util-middleware@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/util-middleware@3.0.2](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/util-retry@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/util-retry@3.0.2](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/util-stream@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/util-stream@3.0.4](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/util-uri-escape@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/util-uri-escape@3.0.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/util-utf8@2.3.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/util-utf8@3.0.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/util-waiter@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@smithy/util-waiter@3.1.1](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||||
|
[@swc/helpers@0.5.12](https://github.com/swc-project/swc) - Apache-2.0
|
||||||
|
[@types/axios@0.14.0](https://github.com/mzabriskie/axios) - MIT
|
||||||
|
[@types/babel__core@7.20.5](https://github.com/DefinitelyTyped/DefinitelyTyped) - MIT
|
||||||
|
[@types/babel__generator@7.6.8](https://github.com/DefinitelyTyped/DefinitelyTyped) - MIT
|
||||||
|
[@types/babel__template@7.4.4](https://github.com/DefinitelyTyped/DefinitelyTyped) - MIT
|
||||||
|
[@types/babel__traverse@7.20.5](https://github.com/DefinitelyTyped/DefinitelyTyped) - MIT
|
||||||
|
[@types/command-line-args@5.2.3](https://github.com/DefinitelyTyped/DefinitelyTyped) - MIT
|
||||||
|
[@types/command-line-usage@5.0.2](https://github.com/DefinitelyTyped/DefinitelyTyped) - MIT
|
||||||
|
[@types/command-line-usage@5.0.4](https://github.com/DefinitelyTyped/DefinitelyTyped) - MIT
|
||||||
|
[@types/graceful-fs@4.1.9](https://github.com/DefinitelyTyped/DefinitelyTyped) - MIT
|
||||||
|
[@types/hast@3.0.4](https://github.com/DefinitelyTyped/DefinitelyTyped) - MIT
|
||||||
|
[@types/istanbul-lib-coverage@2.0.6](https://github.com/DefinitelyTyped/DefinitelyTyped) - MIT
|
||||||
|
[@types/istanbul-lib-report@3.0.3](https://github.com/DefinitelyTyped/DefinitelyTyped) - MIT
|
||||||
|
[@types/istanbul-reports@3.0.4](https://github.com/DefinitelyTyped/DefinitelyTyped) - MIT
|
||||||
|
[@types/jest@29.5.12](https://github.com/DefinitelyTyped/DefinitelyTyped) - MIT
|
||||||
|
[@types/json-schema@7.0.15](https://github.com/DefinitelyTyped/DefinitelyTyped) - MIT
|
||||||
|
[@types/node-fetch@2.6.11](https://github.com/DefinitelyTyped/DefinitelyTyped) - MIT
|
||||||
|
[@types/node@18.19.26](https://github.com/DefinitelyTyped/DefinitelyTyped) - MIT
|
||||||
|
[@types/node@20.16.10](https://github.com/DefinitelyTyped/DefinitelyTyped) - MIT
|
||||||
|
[@types/node@20.17.9](https://github.com/DefinitelyTyped/DefinitelyTyped) - MIT
|
||||||
|
[@types/node@22.7.4](https://github.com/DefinitelyTyped/DefinitelyTyped) - MIT
|
||||||
|
[@types/semver@7.5.6](https://github.com/DefinitelyTyped/DefinitelyTyped) - MIT
|
||||||
|
[@types/stack-utils@2.0.3](https://github.com/DefinitelyTyped/DefinitelyTyped) - MIT
|
||||||
|
[@types/tmp@0.2.6](https://github.com/DefinitelyTyped/DefinitelyTyped) - MIT
|
||||||
|
[@types/unist@3.0.2](https://github.com/DefinitelyTyped/DefinitelyTyped) - MIT
|
||||||
|
[@types/yargs-parser@21.0.3](https://github.com/DefinitelyTyped/DefinitelyTyped) - MIT
|
||||||
|
[@types/yargs@17.0.32](https://github.com/DefinitelyTyped/DefinitelyTyped) - MIT
|
||||||
|
[@typescript-eslint/eslint-plugin@7.1.0](https://github.com/typescript-eslint/typescript-eslint) - MIT
|
||||||
|
[@typescript-eslint/parser@7.1.0](https://github.com/typescript-eslint/typescript-eslint) - BSD-2-Clause
|
||||||
|
[@typescript-eslint/scope-manager@7.1.0](https://github.com/typescript-eslint/typescript-eslint) - MIT
|
||||||
|
[@typescript-eslint/type-utils@7.1.0](https://github.com/typescript-eslint/typescript-eslint) - MIT
|
||||||
|
[@typescript-eslint/types@7.1.0](https://github.com/typescript-eslint/typescript-eslint) - MIT
|
||||||
|
[@typescript-eslint/typescript-estree@7.1.0](https://github.com/typescript-eslint/typescript-eslint) - BSD-2-Clause
|
||||||
|
[@typescript-eslint/utils@7.1.0](https://github.com/typescript-eslint/typescript-eslint) - MIT
|
||||||
|
[@typescript-eslint/visitor-keys@7.1.0](https://github.com/typescript-eslint/typescript-eslint) - MIT
|
||||||
|
[@ungap/structured-clone@1.2.0](https://github.com/ungap/structured-clone) - ISC
|
||||||
|
[abort-controller@3.0.0](https://github.com/mysticatea/abort-controller) - MIT
|
||||||
|
[acorn-jsx@5.3.2](https://github.com/acornjs/acorn-jsx) - MIT
|
||||||
|
[acorn@8.11.3](https://github.com/acornjs/acorn) - MIT
|
||||||
|
[agentkeepalive@4.5.0](https://github.com/node-modules/agentkeepalive) - MIT
|
||||||
|
[ajv@6.12.6](https://github.com/ajv-validator/ajv) - MIT
|
||||||
|
[ansi-escapes@4.3.2](https://github.com/sindresorhus/ansi-escapes) - MIT
|
||||||
|
[ansi-regex@5.0.1](https://github.com/chalk/ansi-regex) - MIT
|
||||||
|
[ansi-regex@6.1.0](https://github.com/chalk/ansi-regex) - MIT
|
||||||
|
[ansi-styles@4.3.0](https://github.com/chalk/ansi-styles) - MIT
|
||||||
|
[ansi-styles@5.2.0](https://github.com/chalk/ansi-styles) - MIT
|
||||||
|
[ansi-styles@6.2.1](https://github.com/chalk/ansi-styles) - MIT
|
||||||
|
[anymatch@3.1.3](https://github.com/micromatch/anymatch) - ISC
|
||||||
|
[apache-arrow@15.0.0](https://github.com/apache/arrow) - Apache-2.0
|
||||||
|
[apache-arrow@16.0.0](https://github.com/apache/arrow) - Apache-2.0
|
||||||
|
[apache-arrow@17.0.0](https://github.com/apache/arrow) - Apache-2.0
|
||||||
|
[apache-arrow@18.0.0](https://github.com/apache/arrow) - Apache-2.0
|
||||||
|
[argparse@1.0.10](https://github.com/nodeca/argparse) - MIT
|
||||||
|
[argparse@2.0.1](https://github.com/nodeca/argparse) - Python-2.0
|
||||||
|
[array-back@3.1.0](https://github.com/75lb/array-back) - MIT
|
||||||
|
[array-back@6.2.2](https://github.com/75lb/array-back) - MIT
|
||||||
|
[array-union@2.1.0](https://github.com/sindresorhus/array-union) - MIT
|
||||||
|
[asynckit@0.4.0](https://github.com/alexindigo/asynckit) - MIT
|
||||||
|
[axios@1.8.4](https://github.com/axios/axios) - MIT
|
||||||
|
[babel-jest@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||||
|
[babel-plugin-istanbul@6.1.1](https://github.com/istanbuljs/babel-plugin-istanbul) - BSD-3-Clause
|
||||||
|
[babel-plugin-jest-hoist@29.6.3](https://github.com/jestjs/jest) - MIT
|
||||||
|
[babel-preset-current-node-syntax@1.0.1](https://github.com/nicolo-ribaudo/babel-preset-current-node-syntax) - MIT
|
||||||
|
[babel-preset-jest@29.6.3](https://github.com/jestjs/jest) - MIT
|
||||||
|
[balanced-match@1.0.2](https://github.com/juliangruber/balanced-match) - MIT
|
||||||
|
[base-64@0.1.0](https://github.com/mathiasbynens/base64) - MIT
|
||||||
|
[bowser@2.11.0](https://github.com/lancedikson/bowser) - MIT
|
||||||
|
[brace-expansion@1.1.11](https://github.com/juliangruber/brace-expansion) - MIT
|
||||||
|
[brace-expansion@2.0.1](https://github.com/juliangruber/brace-expansion) - MIT
|
||||||
|
[braces@3.0.3](https://github.com/micromatch/braces) - MIT
|
||||||
|
[browserslist@4.22.2](https://github.com/browserslist/browserslist) - MIT
|
||||||
|
[bs-logger@0.2.6](https://github.com/huafu/bs-logger) - MIT
|
||||||
|
[bser@2.1.1](https://github.com/facebook/watchman) - Apache-2.0
|
||||||
|
[buffer-from@1.1.2](https://github.com/LinusU/buffer-from) - MIT
|
||||||
|
[callsites@3.1.0](https://github.com/sindresorhus/callsites) - MIT
|
||||||
|
[camelcase@5.3.1](https://github.com/sindresorhus/camelcase) - MIT
|
||||||
|
[camelcase@6.3.0](https://github.com/sindresorhus/camelcase) - MIT
|
||||||
|
[caniuse-lite@1.0.30001579](https://github.com/browserslist/caniuse-lite) - CC-BY-4.0
|
||||||
|
[chalk-template@0.4.0](https://github.com/chalk/chalk-template) - MIT
|
||||||
|
[chalk@4.1.2](https://github.com/chalk/chalk) - MIT
|
||||||
|
[char-regex@1.0.2](https://github.com/Richienb/char-regex) - MIT
|
||||||
|
[charenc@0.0.2](https://github.com/pvorb/node-charenc) - BSD-3-Clause
|
||||||
|
[chownr@3.0.0](https://github.com/isaacs/chownr) - BlueOak-1.0.0
|
||||||
|
[ci-info@3.9.0](https://github.com/watson/ci-info) - MIT
|
||||||
|
[cjs-module-lexer@1.2.3](https://github.com/nodejs/cjs-module-lexer) - MIT
|
||||||
|
[cliui@8.0.1](https://github.com/yargs/cliui) - ISC
|
||||||
|
[co@4.6.0](https://github.com/tj/co) - MIT
|
||||||
|
[collect-v8-coverage@1.0.2](https://github.com/SimenB/collect-v8-coverage) - MIT
|
||||||
|
[color-convert@2.0.1](https://github.com/Qix-/color-convert) - MIT
|
||||||
|
[color-name@1.1.4](https://github.com/colorjs/color-name) - MIT
|
||||||
|
[color-string@1.9.1](https://github.com/Qix-/color-string) - MIT
|
||||||
|
[color@4.2.3](https://github.com/Qix-/color) - MIT
|
||||||
|
[combined-stream@1.0.8](https://github.com/felixge/node-combined-stream) - MIT
|
||||||
|
[command-line-args@5.2.1](https://github.com/75lb/command-line-args) - MIT
|
||||||
|
[command-line-usage@7.0.1](https://github.com/75lb/command-line-usage) - MIT
|
||||||
|
[concat-map@0.0.1](https://github.com/substack/node-concat-map) - MIT
|
||||||
|
[convert-source-map@2.0.0](https://github.com/thlorenz/convert-source-map) - MIT
|
||||||
|
[create-jest@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||||
|
[cross-spawn@7.0.6](https://github.com/moxystudio/node-cross-spawn) - MIT
|
||||||
|
[crypt@0.0.2](https://github.com/pvorb/node-crypt) - BSD-3-Clause
|
||||||
|
[debug@4.3.4](https://github.com/debug-js/debug) - MIT
|
||||||
|
[dedent@1.5.1](https://github.com/dmnd/dedent) - MIT
|
||||||
|
[deep-is@0.1.4](https://github.com/thlorenz/deep-is) - MIT
|
||||||
|
[deepmerge@4.3.1](https://github.com/TehShrike/deepmerge) - MIT
|
||||||
|
[delayed-stream@1.0.0](https://github.com/felixge/node-delayed-stream) - MIT
|
||||||
|
[detect-libc@2.0.3](https://github.com/lovell/detect-libc) - Apache-2.0
|
||||||
|
[detect-newline@3.1.0](https://github.com/sindresorhus/detect-newline) - MIT
|
||||||
|
[diff-sequences@29.6.3](https://github.com/jestjs/jest) - MIT
|
||||||
|
[digest-fetch@1.3.0](https://github.com/devfans/digest-fetch) - ISC
|
||||||
|
[dir-glob@3.0.1](https://github.com/kevva/dir-glob) - MIT
|
||||||
|
[doctrine@3.0.0](https://github.com/eslint/doctrine) - Apache-2.0
|
||||||
|
[eastasianwidth@0.2.0](https://github.com/komagata/eastasianwidth) - MIT
|
||||||
|
[electron-to-chromium@1.4.642](https://github.com/kilian/electron-to-chromium) - ISC
|
||||||
|
[emittery@0.13.1](https://github.com/sindresorhus/emittery) - MIT
|
||||||
|
[emoji-regex@8.0.0](https://github.com/mathiasbynens/emoji-regex) - MIT
|
||||||
|
[emoji-regex@9.2.2](https://github.com/mathiasbynens/emoji-regex) - MIT
|
||||||
|
[entities@4.5.0](https://github.com/fb55/entities) - BSD-2-Clause
|
||||||
|
[error-ex@1.3.2](https://github.com/qix-/node-error-ex) - MIT
|
||||||
|
[escalade@3.1.1](https://github.com/lukeed/escalade) - MIT
|
||||||
|
[escape-string-regexp@2.0.0](https://github.com/sindresorhus/escape-string-regexp) - MIT
|
||||||
|
[escape-string-regexp@4.0.0](https://github.com/sindresorhus/escape-string-regexp) - MIT
|
||||||
|
[eslint-scope@7.2.2](https://github.com/eslint/eslint-scope) - BSD-2-Clause
|
||||||
|
[eslint-visitor-keys@3.4.3](https://github.com/eslint/eslint-visitor-keys) - Apache-2.0
|
||||||
|
[eslint@8.57.0](https://github.com/eslint/eslint) - MIT
|
||||||
|
[espree@9.6.1](https://github.com/eslint/espree) - BSD-2-Clause
|
||||||
|
[esprima@4.0.1](https://github.com/jquery/esprima) - BSD-2-Clause
|
||||||
|
[esquery@1.5.0](https://github.com/estools/esquery) - BSD-3-Clause
|
||||||
|
[esrecurse@4.3.0](https://github.com/estools/esrecurse) - BSD-2-Clause
|
||||||
|
[estraverse@5.3.0](https://github.com/estools/estraverse) - BSD-2-Clause
|
||||||
|
[esutils@2.0.3](https://github.com/estools/esutils) - BSD-2-Clause
|
||||||
|
[event-target-shim@5.0.1](https://github.com/mysticatea/event-target-shim) - MIT
|
||||||
|
[execa@5.1.1](https://github.com/sindresorhus/execa) - MIT
|
||||||
|
[exit@0.1.2](https://github.com/cowboy/node-exit) - MIT
|
||||||
|
[expect@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||||
|
[fast-deep-equal@3.1.3](https://github.com/epoberezkin/fast-deep-equal) - MIT
|
||||||
|
[fast-glob@3.3.2](https://github.com/mrmlnc/fast-glob) - MIT
|
||||||
|
[fast-json-stable-stringify@2.1.0](https://github.com/epoberezkin/fast-json-stable-stringify) - MIT
|
||||||
|
[fast-levenshtein@2.0.6](https://github.com/hiddentao/fast-levenshtein) - MIT
|
||||||
|
[fast-xml-parser@4.2.5](https://github.com/NaturalIntelligence/fast-xml-parser) - MIT
|
||||||
|
[fastq@1.16.0](https://github.com/mcollina/fastq) - ISC
|
||||||
|
[fb-watchman@2.0.2](https://github.com/facebook/watchman) - Apache-2.0
|
||||||
|
[file-entry-cache@6.0.1](https://github.com/royriojas/file-entry-cache) - MIT
|
||||||
|
[fill-range@7.1.1](https://github.com/jonschlinkert/fill-range) - MIT
|
||||||
|
[find-replace@3.0.0](https://github.com/75lb/find-replace) - MIT
|
||||||
|
[find-up@4.1.0](https://github.com/sindresorhus/find-up) - MIT
|
||||||
|
[find-up@5.0.0](https://github.com/sindresorhus/find-up) - MIT
|
||||||
|
[flat-cache@3.2.0](https://github.com/jaredwray/flat-cache) - MIT
|
||||||
|
[flatbuffers@1.12.0](https://github.com/google/flatbuffers) - Apache*
|
||||||
|
[flatbuffers@23.5.26](https://github.com/google/flatbuffers) - Apache*
|
||||||
|
[flatbuffers@24.3.25](https://github.com/google/flatbuffers) - Apache-2.0
|
||||||
|
[flatted@3.2.9](https://github.com/WebReflection/flatted) - ISC
|
||||||
|
[follow-redirects@1.15.6](https://github.com/follow-redirects/follow-redirects) - MIT
|
||||||
|
[foreground-child@3.3.0](https://github.com/tapjs/foreground-child) - ISC
|
||||||
|
[form-data-encoder@1.7.2](https://github.com/octet-stream/form-data-encoder) - MIT
|
||||||
|
[form-data@4.0.0](https://github.com/form-data/form-data) - MIT
|
||||||
|
[formdata-node@4.4.1](https://github.com/octet-stream/form-data) - MIT
|
||||||
|
[fs.realpath@1.0.0](https://github.com/isaacs/fs.realpath) - ISC
|
||||||
|
[fsevents@2.3.3](https://github.com/fsevents/fsevents) - MIT
|
||||||
|
[function-bind@1.1.2](https://github.com/Raynos/function-bind) - MIT
|
||||||
|
[gensync@1.0.0-beta.2](https://github.com/loganfsmyth/gensync) - MIT
|
||||||
|
[get-caller-file@2.0.5](https://github.com/stefanpenner/get-caller-file) - ISC
|
||||||
|
[get-package-type@0.1.0](https://github.com/cfware/get-package-type) - MIT
|
||||||
|
[get-stream@6.0.1](https://github.com/sindresorhus/get-stream) - MIT
|
||||||
|
[glob-parent@5.1.2](https://github.com/gulpjs/glob-parent) - ISC
|
||||||
|
[glob-parent@6.0.2](https://github.com/gulpjs/glob-parent) - ISC
|
||||||
|
[glob@10.4.5](https://github.com/isaacs/node-glob) - ISC
|
||||||
|
[glob@7.2.3](https://github.com/isaacs/node-glob) - ISC
|
||||||
|
[globals@11.12.0](https://github.com/sindresorhus/globals) - MIT
|
||||||
|
[globals@13.24.0](https://github.com/sindresorhus/globals) - MIT
|
||||||
|
[globby@11.1.0](https://github.com/sindresorhus/globby) - MIT
|
||||||
|
[graceful-fs@4.2.11](https://github.com/isaacs/node-graceful-fs) - ISC
|
||||||
|
[graphemer@1.4.0](https://github.com/flmnt/graphemer) - MIT
|
||||||
|
[guid-typescript@1.0.9](https://github.com/NicolasDeveloper/guid-typescript) - ISC
|
||||||
|
[has-flag@4.0.0](https://github.com/sindresorhus/has-flag) - MIT
|
||||||
|
[hasown@2.0.0](https://github.com/inspect-js/hasOwn) - MIT
|
||||||
|
[html-escaper@2.0.2](https://github.com/WebReflection/html-escaper) - MIT
|
||||||
|
[human-signals@2.1.0](https://github.com/ehmicky/human-signals) - Apache-2.0
|
||||||
|
[humanize-ms@1.2.1](https://github.com/node-modules/humanize-ms) - MIT
|
||||||
|
[ignore@5.3.0](https://github.com/kaelzhang/node-ignore) - MIT
|
||||||
|
[import-fresh@3.3.0](https://github.com/sindresorhus/import-fresh) - MIT
|
||||||
|
[import-local@3.1.0](https://github.com/sindresorhus/import-local) - MIT
|
||||||
|
[imurmurhash@0.1.4](https://github.com/jensyt/imurmurhash-js) - MIT
|
||||||
|
[inflight@1.0.6](https://github.com/npm/inflight) - ISC
|
||||||
|
[inherits@2.0.4](https://github.com/isaacs/inherits) - ISC
|
||||||
|
[interpret@1.4.0](https://github.com/gulpjs/interpret) - MIT
|
||||||
|
[is-arrayish@0.2.1](https://github.com/qix-/node-is-arrayish) - MIT
|
||||||
|
[is-arrayish@0.3.2](https://github.com/qix-/node-is-arrayish) - MIT
|
||||||
|
[is-buffer@1.1.6](https://github.com/feross/is-buffer) - MIT
|
||||||
|
[is-core-module@2.13.1](https://github.com/inspect-js/is-core-module) - MIT
|
||||||
|
[is-extglob@2.1.1](https://github.com/jonschlinkert/is-extglob) - MIT
|
||||||
|
[is-fullwidth-code-point@3.0.0](https://github.com/sindresorhus/is-fullwidth-code-point) - MIT
|
||||||
|
[is-generator-fn@2.1.0](https://github.com/sindresorhus/is-generator-fn) - MIT
|
||||||
|
[is-glob@4.0.3](https://github.com/micromatch/is-glob) - MIT
|
||||||
|
[is-number@7.0.0](https://github.com/jonschlinkert/is-number) - MIT
|
||||||
|
[is-path-inside@3.0.3](https://github.com/sindresorhus/is-path-inside) - MIT
|
||||||
|
[is-stream@2.0.1](https://github.com/sindresorhus/is-stream) - MIT
|
||||||
|
[isexe@2.0.0](https://github.com/isaacs/isexe) - ISC
|
||||||
|
[istanbul-lib-coverage@3.2.2](https://github.com/istanbuljs/istanbuljs) - BSD-3-Clause
|
||||||
|
[istanbul-lib-instrument@5.2.1](https://github.com/istanbuljs/istanbuljs) - BSD-3-Clause
|
||||||
|
[istanbul-lib-instrument@6.0.1](https://github.com/istanbuljs/istanbuljs) - BSD-3-Clause
|
||||||
|
[istanbul-lib-report@3.0.1](https://github.com/istanbuljs/istanbuljs) - BSD-3-Clause
|
||||||
|
[istanbul-lib-source-maps@4.0.1](https://github.com/istanbuljs/istanbuljs) - BSD-3-Clause
|
||||||
|
[istanbul-reports@3.1.6](https://github.com/istanbuljs/istanbuljs) - BSD-3-Clause
|
||||||
|
[jackspeak@3.4.3](https://github.com/isaacs/jackspeak) - BlueOak-1.0.0
|
||||||
|
[jest-changed-files@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||||
|
[jest-circus@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||||
|
[jest-cli@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||||
|
[jest-config@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||||
|
[jest-diff@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||||
|
[jest-docblock@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||||
|
[jest-each@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||||
|
[jest-environment-node@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||||
|
[jest-get-type@29.6.3](https://github.com/jestjs/jest) - MIT
|
||||||
|
[jest-haste-map@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||||
|
[jest-leak-detector@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||||
|
[jest-matcher-utils@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||||
|
[jest-message-util@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||||
|
[jest-mock@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||||
|
[jest-pnp-resolver@1.2.3](https://github.com/arcanis/jest-pnp-resolver) - MIT
|
||||||
|
[jest-regex-util@29.6.3](https://github.com/jestjs/jest) - MIT
|
||||||
|
[jest-resolve-dependencies@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||||
|
[jest-resolve@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||||
|
[jest-runner@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||||
|
[jest-runtime@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||||
|
[jest-snapshot@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||||
|
[jest-util@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||||
|
[jest-validate@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||||
|
[jest-watcher@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||||
|
[jest-worker@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||||
|
[jest@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||||
|
[js-tokens@4.0.0](https://github.com/lydell/js-tokens) - MIT
|
||||||
|
[js-yaml@3.14.1](https://github.com/nodeca/js-yaml) - MIT
|
||||||
|
[js-yaml@4.1.0](https://github.com/nodeca/js-yaml) - MIT
|
||||||
|
[jsesc@2.5.2](https://github.com/mathiasbynens/jsesc) - MIT
|
||||||
|
[json-bignum@0.0.3](https://github.com/datalanche/json-bignum) - MIT
|
||||||
|
[json-buffer@3.0.1](https://github.com/dominictarr/json-buffer) - MIT
|
||||||
|
[json-parse-even-better-errors@2.3.1](https://github.com/npm/json-parse-even-better-errors) - MIT
|
||||||
|
[json-schema-traverse@0.4.1](https://github.com/epoberezkin/json-schema-traverse) - MIT
|
||||||
|
[json-stable-stringify-without-jsonify@1.0.1](https://github.com/samn/json-stable-stringify) - MIT
|
||||||
|
[json5@2.2.3](https://github.com/json5/json5) - MIT
|
||||||
|
[keyv@4.5.4](https://github.com/jaredwray/keyv) - MIT
|
||||||
|
[kleur@3.0.3](https://github.com/lukeed/kleur) - MIT
|
||||||
|
[leven@3.1.0](https://github.com/sindresorhus/leven) - MIT
|
||||||
|
[levn@0.4.1](https://github.com/gkz/levn) - MIT
|
||||||
|
[lines-and-columns@1.2.4](https://github.com/eventualbuddha/lines-and-columns) - MIT
|
||||||
|
[linkify-it@5.0.0](https://github.com/markdown-it/linkify-it) - MIT
|
||||||
|
[locate-path@5.0.0](https://github.com/sindresorhus/locate-path) - MIT
|
||||||
|
[locate-path@6.0.0](https://github.com/sindresorhus/locate-path) - MIT
|
||||||
|
[lodash.camelcase@4.3.0](https://github.com/lodash/lodash) - MIT
|
||||||
|
[lodash.memoize@4.1.2](https://github.com/lodash/lodash) - MIT
|
||||||
|
[lodash.merge@4.6.2](https://github.com/lodash/lodash) - MIT
|
||||||
|
[lodash@4.17.21](https://github.com/lodash/lodash) - MIT
|
||||||
|
[long@5.2.3](https://github.com/dcodeIO/long.js) - Apache-2.0
|
||||||
|
[lru-cache@10.4.3](https://github.com/isaacs/node-lru-cache) - ISC
|
||||||
|
[lru-cache@5.1.1](https://github.com/isaacs/node-lru-cache) - ISC
|
||||||
|
[lunr@2.3.9](https://github.com/olivernn/lunr.js) - MIT
|
||||||
|
[make-dir@4.0.0](https://github.com/sindresorhus/make-dir) - MIT
|
||||||
|
[make-error@1.3.6](https://github.com/JsCommunity/make-error) - ISC
|
||||||
|
[makeerror@1.0.12](https://github.com/daaku/nodejs-makeerror) - BSD-3-Clause
|
||||||
|
[markdown-it@14.1.0](https://github.com/markdown-it/markdown-it) - MIT
|
||||||
|
[md5@2.3.0](https://github.com/pvorb/node-md5) - BSD-3-Clause
|
||||||
|
[mdurl@2.0.0](https://github.com/markdown-it/mdurl) - MIT
|
||||||
|
[merge-stream@2.0.0](https://github.com/grncdr/merge-stream) - MIT
|
||||||
|
[merge2@1.4.1](https://github.com/teambition/merge2) - MIT
|
||||||
|
[micromatch@4.0.8](https://github.com/micromatch/micromatch) - MIT
|
||||||
|
[mime-db@1.52.0](https://github.com/jshttp/mime-db) - MIT
|
||||||
|
[mime-types@2.1.35](https://github.com/jshttp/mime-types) - MIT
|
||||||
|
[mimic-fn@2.1.0](https://github.com/sindresorhus/mimic-fn) - MIT
|
||||||
|
[minimatch@3.1.2](https://github.com/isaacs/minimatch) - ISC
|
||||||
|
[minimatch@9.0.3](https://github.com/isaacs/minimatch) - ISC
|
||||||
|
[minimatch@9.0.5](https://github.com/isaacs/minimatch) - ISC
|
||||||
|
[minimist@1.2.8](https://github.com/minimistjs/minimist) - MIT
|
||||||
|
[minipass@7.1.2](https://github.com/isaacs/minipass) - ISC
|
||||||
|
[minizlib@3.0.1](https://github.com/isaacs/minizlib) - MIT
|
||||||
|
[mkdirp@3.0.1](https://github.com/isaacs/node-mkdirp) - MIT
|
||||||
|
[mnemonist@0.38.3](https://github.com/yomguithereal/mnemonist) - MIT
|
||||||
|
[ms@2.1.2](https://github.com/zeit/ms) - MIT
|
||||||
|
[ms@2.1.3](https://github.com/vercel/ms) - MIT
|
||||||
|
[natural-compare@1.4.0](https://github.com/litejs/natural-compare-lite) - MIT
|
||||||
|
[node-domexception@1.0.0](https://github.com/jimmywarting/node-domexception) - MIT
|
||||||
|
[node-fetch@2.7.0](https://github.com/bitinn/node-fetch) - MIT
|
||||||
|
[node-int64@0.4.0](https://github.com/broofa/node-int64) - MIT
|
||||||
|
[node-releases@2.0.14](https://github.com/chicoxyzzy/node-releases) - MIT
|
||||||
|
[normalize-path@3.0.0](https://github.com/jonschlinkert/normalize-path) - MIT
|
||||||
|
[npm-run-path@4.0.1](https://github.com/sindresorhus/npm-run-path) - MIT
|
||||||
|
[obliterator@1.6.1](https://github.com/yomguithereal/obliterator) - MIT
|
||||||
|
[once@1.4.0](https://github.com/isaacs/once) - ISC
|
||||||
|
[onetime@5.1.2](https://github.com/sindresorhus/onetime) - MIT
|
||||||
|
[onnxruntime-common@1.19.2](https://github.com/Microsoft/onnxruntime) - MIT
|
||||||
|
[onnxruntime-common@1.20.0-dev.20241016-2b8fc5529b](https://github.com/Microsoft/onnxruntime) - MIT
|
||||||
|
[onnxruntime-node@1.19.2](https://github.com/Microsoft/onnxruntime) - MIT
|
||||||
|
[onnxruntime-web@1.21.0-dev.20241024-d9ca84ef96](https://github.com/Microsoft/onnxruntime) - MIT
|
||||||
|
[openai@4.29.2](https://github.com/openai/openai-node) - Apache-2.0
|
||||||
|
[optionator@0.9.3](https://github.com/gkz/optionator) - MIT
|
||||||
|
[p-limit@2.3.0](https://github.com/sindresorhus/p-limit) - MIT
|
||||||
|
[p-limit@3.1.0](https://github.com/sindresorhus/p-limit) - MIT
|
||||||
|
[p-locate@4.1.0](https://github.com/sindresorhus/p-locate) - MIT
|
||||||
|
[p-locate@5.0.0](https://github.com/sindresorhus/p-locate) - MIT
|
||||||
|
[p-try@2.2.0](https://github.com/sindresorhus/p-try) - MIT
|
||||||
|
[package-json-from-dist@1.0.1](https://github.com/isaacs/package-json-from-dist) - BlueOak-1.0.0
|
||||||
|
[parent-module@1.0.1](https://github.com/sindresorhus/parent-module) - MIT
|
||||||
|
[parse-json@5.2.0](https://github.com/sindresorhus/parse-json) - MIT
|
||||||
|
[path-exists@4.0.0](https://github.com/sindresorhus/path-exists) - MIT
|
||||||
|
[path-is-absolute@1.0.1](https://github.com/sindresorhus/path-is-absolute) - MIT
|
||||||
|
[path-key@3.1.1](https://github.com/sindresorhus/path-key) - MIT
|
||||||
|
[path-parse@1.0.7](https://github.com/jbgutierrez/path-parse) - MIT
|
||||||
|
[path-scurry@1.11.1](https://github.com/isaacs/path-scurry) - BlueOak-1.0.0
|
||||||
|
[path-type@4.0.0](https://github.com/sindresorhus/path-type) - MIT
|
||||||
|
[picocolors@1.0.0](https://github.com/alexeyraspopov/picocolors) - ISC
|
||||||
|
[picomatch@2.3.1](https://github.com/micromatch/picomatch) - MIT
|
||||||
|
[pirates@4.0.6](https://github.com/danez/pirates) - MIT
|
||||||
|
[pkg-dir@4.2.0](https://github.com/sindresorhus/pkg-dir) - MIT
|
||||||
|
[platform@1.3.6](https://github.com/bestiejs/platform.js) - MIT
|
||||||
|
[prelude-ls@1.2.1](https://github.com/gkz/prelude-ls) - MIT
|
||||||
|
[pretty-format@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||||
|
[prompts@2.4.2](https://github.com/terkelg/prompts) - MIT
|
||||||
|
[protobufjs@7.4.0](https://github.com/protobufjs/protobuf.js) - BSD-3-Clause
|
||||||
|
[proxy-from-env@1.1.0](https://github.com/Rob--W/proxy-from-env) - MIT
|
||||||
|
[punycode.js@2.3.1](https://github.com/mathiasbynens/punycode.js) - MIT
|
||||||
|
[punycode@2.3.1](https://github.com/mathiasbynens/punycode.js) - MIT
|
||||||
|
[pure-rand@6.0.4](https://github.com/dubzzz/pure-rand) - MIT
|
||||||
|
[queue-microtask@1.2.3](https://github.com/feross/queue-microtask) - MIT
|
||||||
|
[react-is@18.2.0](https://github.com/facebook/react) - MIT
|
||||||
|
[rechoir@0.6.2](https://github.com/tkellen/node-rechoir) - MIT
|
||||||
|
[reflect-metadata@0.2.2](https://github.com/rbuckton/reflect-metadata) - Apache-2.0
|
||||||
|
[require-directory@2.1.1](https://github.com/troygoode/node-require-directory) - MIT
|
||||||
|
[resolve-cwd@3.0.0](https://github.com/sindresorhus/resolve-cwd) - MIT
|
||||||
|
[resolve-from@4.0.0](https://github.com/sindresorhus/resolve-from) - MIT
|
||||||
|
[resolve-from@5.0.0](https://github.com/sindresorhus/resolve-from) - MIT
|
||||||
|
[resolve.exports@2.0.2](https://github.com/lukeed/resolve.exports) - MIT
|
||||||
|
[resolve@1.22.8](https://github.com/browserify/resolve) - MIT
|
||||||
|
[reusify@1.0.4](https://github.com/mcollina/reusify) - MIT
|
||||||
|
[rimraf@3.0.2](https://github.com/isaacs/rimraf) - ISC
|
||||||
|
[rimraf@5.0.10](https://github.com/isaacs/rimraf) - ISC
|
||||||
|
[run-parallel@1.2.0](https://github.com/feross/run-parallel) - MIT
|
||||||
|
[semver@6.3.1](https://github.com/npm/node-semver) - ISC
|
||||||
|
[semver@7.6.3](https://github.com/npm/node-semver) - ISC
|
||||||
|
[sharp@0.33.5](https://github.com/lovell/sharp) - Apache-2.0
|
||||||
|
[shebang-command@2.0.0](https://github.com/kevva/shebang-command) - MIT
|
||||||
|
[shebang-regex@3.0.0](https://github.com/sindresorhus/shebang-regex) - MIT
|
||||||
|
[shelljs@0.8.5](https://github.com/shelljs/shelljs) - BSD-3-Clause
|
||||||
|
[shiki@1.10.3](https://github.com/shikijs/shiki) - MIT
|
||||||
|
[shx@0.3.4](https://github.com/shelljs/shx) - MIT
|
||||||
|
[signal-exit@3.0.7](https://github.com/tapjs/signal-exit) - ISC
|
||||||
|
[signal-exit@4.1.0](https://github.com/tapjs/signal-exit) - ISC
|
||||||
|
[simple-swizzle@0.2.2](https://github.com/qix-/node-simple-swizzle) - MIT
|
||||||
|
[sisteransi@1.0.5](https://github.com/terkelg/sisteransi) - MIT
|
||||||
|
[slash@3.0.0](https://github.com/sindresorhus/slash) - MIT
|
||||||
|
[source-map-support@0.5.13](https://github.com/evanw/node-source-map-support) - MIT
|
||||||
|
[source-map@0.6.1](https://github.com/mozilla/source-map) - BSD-3-Clause
|
||||||
|
[sprintf-js@1.0.3](https://github.com/alexei/sprintf.js) - BSD-3-Clause
|
||||||
|
[stack-utils@2.0.6](https://github.com/tapjs/stack-utils) - MIT
|
||||||
|
[stream-read-all@3.0.1](https://github.com/75lb/stream-read-all) - MIT
|
||||||
|
[string-length@4.0.2](https://github.com/sindresorhus/string-length) - MIT
|
||||||
|
[string-width@4.2.3](https://github.com/sindresorhus/string-width) - MIT
|
||||||
|
[string-width@5.1.2](https://github.com/sindresorhus/string-width) - MIT
|
||||||
|
[strip-ansi@6.0.1](https://github.com/chalk/strip-ansi) - MIT
|
||||||
|
[strip-ansi@7.1.0](https://github.com/chalk/strip-ansi) - MIT
|
||||||
|
[strip-bom@4.0.0](https://github.com/sindresorhus/strip-bom) - MIT
|
||||||
|
[strip-final-newline@2.0.0](https://github.com/sindresorhus/strip-final-newline) - MIT
|
||||||
|
[strip-json-comments@3.1.1](https://github.com/sindresorhus/strip-json-comments) - MIT
|
||||||
|
[strnum@1.0.5](https://github.com/NaturalIntelligence/strnum) - MIT
|
||||||
|
[supports-color@7.2.0](https://github.com/chalk/supports-color) - MIT
|
||||||
|
[supports-color@8.1.1](https://github.com/chalk/supports-color) - MIT
|
||||||
|
[supports-preserve-symlinks-flag@1.0.0](https://github.com/inspect-js/node-supports-preserve-symlinks-flag) - MIT
|
||||||
|
[table-layout@3.0.2](https://github.com/75lb/table-layout) - MIT
|
||||||
|
[tar@7.4.3](https://github.com/isaacs/node-tar) - ISC
|
||||||
|
[test-exclude@6.0.0](https://github.com/istanbuljs/test-exclude) - ISC
|
||||||
|
[text-table@0.2.0](https://github.com/substack/text-table) - MIT
|
||||||
|
[tmp@0.2.3](https://github.com/raszi/node-tmp) - MIT
|
||||||
|
[tmpl@1.0.5](https://github.com/daaku/nodejs-tmpl) - BSD-3-Clause
|
||||||
|
[to-regex-range@5.0.1](https://github.com/micromatch/to-regex-range) - MIT
|
||||||
|
[tr46@0.0.3](https://github.com/Sebmaster/tr46.js) - MIT
|
||||||
|
[ts-api-utils@1.0.3](https://github.com/JoshuaKGoldberg/ts-api-utils) - MIT
|
||||||
|
[ts-jest@29.1.2](https://github.com/kulshekhar/ts-jest) - MIT
|
||||||
|
[tslib@1.14.1](https://github.com/Microsoft/tslib) - 0BSD
|
||||||
|
[tslib@2.6.2](https://github.com/Microsoft/tslib) - 0BSD
|
||||||
|
[type-check@0.4.0](https://github.com/gkz/type-check) - MIT
|
||||||
|
[type-detect@4.0.8](https://github.com/chaijs/type-detect) - MIT
|
||||||
|
[type-fest@0.20.2](https://github.com/sindresorhus/type-fest) - (MIT OR CC0-1.0)
|
||||||
|
[type-fest@0.21.3](https://github.com/sindresorhus/type-fest) - (MIT OR CC0-1.0)
|
||||||
|
[typedoc-plugin-markdown@4.2.1](https://github.com/typedoc2md/typedoc-plugin-markdown) - MIT
|
||||||
|
[typedoc@0.26.4](https://github.com/TypeStrong/TypeDoc) - Apache-2.0
|
||||||
|
[typescript-eslint@7.1.0](https://github.com/typescript-eslint/typescript-eslint) - MIT
|
||||||
|
[typescript@5.5.4](https://github.com/Microsoft/TypeScript) - Apache-2.0
|
||||||
|
[typical@4.0.0](https://github.com/75lb/typical) - MIT
|
||||||
|
[typical@7.1.1](https://github.com/75lb/typical) - MIT
|
||||||
|
[uc.micro@2.1.0](https://github.com/markdown-it/uc.micro) - MIT
|
||||||
|
[undici-types@5.26.5](https://github.com/nodejs/undici) - MIT
|
||||||
|
[undici-types@6.19.8](https://github.com/nodejs/undici) - MIT
|
||||||
|
[update-browserslist-db@1.0.13](https://github.com/browserslist/update-db) - MIT
|
||||||
|
[uri-js@4.4.1](https://github.com/garycourt/uri-js) - BSD-2-Clause
|
||||||
|
[uuid@9.0.1](https://github.com/uuidjs/uuid) - MIT
|
||||||
|
[v8-to-istanbul@9.2.0](https://github.com/istanbuljs/v8-to-istanbul) - ISC
|
||||||
|
[walker@1.0.8](https://github.com/daaku/nodejs-walker) - Apache-2.0
|
||||||
|
[web-streams-polyfill@3.3.3](https://github.com/MattiasBuelens/web-streams-polyfill) - MIT
|
||||||
|
[web-streams-polyfill@4.0.0-beta.3](https://github.com/MattiasBuelens/web-streams-polyfill) - MIT
|
||||||
|
[webidl-conversions@3.0.1](https://github.com/jsdom/webidl-conversions) - BSD-2-Clause
|
||||||
|
[whatwg-url@5.0.0](https://github.com/jsdom/whatwg-url) - MIT
|
||||||
|
[which@2.0.2](https://github.com/isaacs/node-which) - ISC
|
||||||
|
[wordwrapjs@5.1.0](https://github.com/75lb/wordwrapjs) - MIT
|
||||||
|
[wrap-ansi@7.0.0](https://github.com/chalk/wrap-ansi) - MIT
|
||||||
|
[wrap-ansi@8.1.0](https://github.com/chalk/wrap-ansi) - MIT
|
||||||
|
[wrappy@1.0.2](https://github.com/npm/wrappy) - ISC
|
||||||
|
[write-file-atomic@4.0.2](https://github.com/npm/write-file-atomic) - ISC
|
||||||
|
[y18n@5.0.8](https://github.com/yargs/y18n) - ISC
|
||||||
|
[yallist@3.1.1](https://github.com/isaacs/yallist) - ISC
|
||||||
|
[yallist@5.0.0](https://github.com/isaacs/yallist) - BlueOak-1.0.0
|
||||||
|
[yaml@2.4.5](https://github.com/eemeli/yaml) - ISC
|
||||||
|
[yargs-parser@21.1.1](https://github.com/yargs/yargs-parser) - ISC
|
||||||
|
[yargs@17.7.2](https://github.com/yargs/yargs) - MIT
|
||||||
|
[yocto-queue@0.1.0](https://github.com/sindresorhus/yocto-queue) - MIT
|
||||||
14607
nodejs/RUST_THIRD_PARTY_LICENSES.html
Normal file
14607
nodejs/RUST_THIRD_PARTY_LICENSES.html
Normal file
File diff suppressed because it is too large
Load Diff
@@ -273,7 +273,9 @@ export async function connect(
|
|||||||
let nativeProvider: NativeJsHeaderProvider | undefined;
|
let nativeProvider: NativeJsHeaderProvider | undefined;
|
||||||
if (finalHeaderProvider) {
|
if (finalHeaderProvider) {
|
||||||
if (typeof finalHeaderProvider === "function") {
|
if (typeof finalHeaderProvider === "function") {
|
||||||
nativeProvider = new NativeJsHeaderProvider(finalHeaderProvider);
|
nativeProvider = new NativeJsHeaderProvider(async () =>
|
||||||
|
finalHeaderProvider(),
|
||||||
|
);
|
||||||
} else if (
|
} else if (
|
||||||
finalHeaderProvider &&
|
finalHeaderProvider &&
|
||||||
typeof finalHeaderProvider.getHeaders === "function"
|
typeof finalHeaderProvider.getHeaders === "function"
|
||||||
|
|||||||
@@ -684,19 +684,17 @@ export class VectorQuery extends StandardQueryBase<NativeVectorQuery> {
|
|||||||
|
|
||||||
rerank(reranker: Reranker): VectorQuery {
|
rerank(reranker: Reranker): VectorQuery {
|
||||||
super.doCall((inner) =>
|
super.doCall((inner) =>
|
||||||
inner.rerank({
|
inner.rerank(async (args) => {
|
||||||
rerankHybrid: async (_, args) => {
|
const vecResults = await fromBufferToRecordBatch(args.vecResults);
|
||||||
const vecResults = await fromBufferToRecordBatch(args.vecResults);
|
const ftsResults = await fromBufferToRecordBatch(args.ftsResults);
|
||||||
const ftsResults = await fromBufferToRecordBatch(args.ftsResults);
|
const result = await reranker.rerankHybrid(
|
||||||
const result = await reranker.rerankHybrid(
|
args.query,
|
||||||
args.query,
|
vecResults as RecordBatch,
|
||||||
vecResults as RecordBatch,
|
ftsResults as RecordBatch,
|
||||||
ftsResults as RecordBatch,
|
);
|
||||||
);
|
|
||||||
|
|
||||||
const buffer = fromRecordBatchToBuffer(result);
|
const buffer = fromRecordBatchToBuffer(result);
|
||||||
return buffer;
|
return buffer;
|
||||||
},
|
|
||||||
}),
|
}),
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "@lancedb/lancedb-darwin-arm64",
|
"name": "@lancedb/lancedb-darwin-arm64",
|
||||||
"version": "0.26.0",
|
"version": "0.27.0-beta.2",
|
||||||
"os": ["darwin"],
|
"os": ["darwin"],
|
||||||
"cpu": ["arm64"],
|
"cpu": ["arm64"],
|
||||||
"main": "lancedb.darwin-arm64.node",
|
"main": "lancedb.darwin-arm64.node",
|
||||||
@@ -8,5 +8,9 @@
|
|||||||
"license": "Apache-2.0",
|
"license": "Apache-2.0",
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": ">= 18"
|
"node": ">= 18"
|
||||||
|
},
|
||||||
|
"repository": {
|
||||||
|
"type": "git",
|
||||||
|
"url": "https://github.com/lancedb/lancedb"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "@lancedb/lancedb-linux-arm64-gnu",
|
"name": "@lancedb/lancedb-linux-arm64-gnu",
|
||||||
"version": "0.26.0",
|
"version": "0.27.0-beta.2",
|
||||||
"os": ["linux"],
|
"os": ["linux"],
|
||||||
"cpu": ["arm64"],
|
"cpu": ["arm64"],
|
||||||
"main": "lancedb.linux-arm64-gnu.node",
|
"main": "lancedb.linux-arm64-gnu.node",
|
||||||
@@ -9,5 +9,9 @@
|
|||||||
"engines": {
|
"engines": {
|
||||||
"node": ">= 18"
|
"node": ">= 18"
|
||||||
},
|
},
|
||||||
"libc": ["glibc"]
|
"libc": ["glibc"],
|
||||||
|
"repository": {
|
||||||
|
"type": "git",
|
||||||
|
"url": "https://github.com/lancedb/lancedb"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "@lancedb/lancedb-linux-arm64-musl",
|
"name": "@lancedb/lancedb-linux-arm64-musl",
|
||||||
"version": "0.26.0",
|
"version": "0.27.0-beta.2",
|
||||||
"os": ["linux"],
|
"os": ["linux"],
|
||||||
"cpu": ["arm64"],
|
"cpu": ["arm64"],
|
||||||
"main": "lancedb.linux-arm64-musl.node",
|
"main": "lancedb.linux-arm64-musl.node",
|
||||||
@@ -9,5 +9,9 @@
|
|||||||
"engines": {
|
"engines": {
|
||||||
"node": ">= 18"
|
"node": ">= 18"
|
||||||
},
|
},
|
||||||
"libc": ["musl"]
|
"libc": ["musl"],
|
||||||
|
"repository": {
|
||||||
|
"type": "git",
|
||||||
|
"url": "https://github.com/lancedb/lancedb"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "@lancedb/lancedb-linux-x64-gnu",
|
"name": "@lancedb/lancedb-linux-x64-gnu",
|
||||||
"version": "0.26.0",
|
"version": "0.27.0-beta.2",
|
||||||
"os": ["linux"],
|
"os": ["linux"],
|
||||||
"cpu": ["x64"],
|
"cpu": ["x64"],
|
||||||
"main": "lancedb.linux-x64-gnu.node",
|
"main": "lancedb.linux-x64-gnu.node",
|
||||||
@@ -9,5 +9,9 @@
|
|||||||
"engines": {
|
"engines": {
|
||||||
"node": ">= 18"
|
"node": ">= 18"
|
||||||
},
|
},
|
||||||
"libc": ["glibc"]
|
"libc": ["glibc"],
|
||||||
|
"repository": {
|
||||||
|
"type": "git",
|
||||||
|
"url": "https://github.com/lancedb/lancedb"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "@lancedb/lancedb-linux-x64-musl",
|
"name": "@lancedb/lancedb-linux-x64-musl",
|
||||||
"version": "0.26.0",
|
"version": "0.27.0-beta.2",
|
||||||
"os": ["linux"],
|
"os": ["linux"],
|
||||||
"cpu": ["x64"],
|
"cpu": ["x64"],
|
||||||
"main": "lancedb.linux-x64-musl.node",
|
"main": "lancedb.linux-x64-musl.node",
|
||||||
@@ -9,5 +9,9 @@
|
|||||||
"engines": {
|
"engines": {
|
||||||
"node": ">= 18"
|
"node": ">= 18"
|
||||||
},
|
},
|
||||||
"libc": ["musl"]
|
"libc": ["musl"],
|
||||||
|
"repository": {
|
||||||
|
"type": "git",
|
||||||
|
"url": "https://github.com/lancedb/lancedb"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "@lancedb/lancedb-win32-arm64-msvc",
|
"name": "@lancedb/lancedb-win32-arm64-msvc",
|
||||||
"version": "0.26.0",
|
"version": "0.27.0-beta.2",
|
||||||
"os": [
|
"os": [
|
||||||
"win32"
|
"win32"
|
||||||
],
|
],
|
||||||
@@ -14,5 +14,9 @@
|
|||||||
"license": "Apache-2.0",
|
"license": "Apache-2.0",
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": ">= 18"
|
"node": ">= 18"
|
||||||
|
},
|
||||||
|
"repository": {
|
||||||
|
"type": "git",
|
||||||
|
"url": "https://github.com/lancedb/lancedb"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "@lancedb/lancedb-win32-x64-msvc",
|
"name": "@lancedb/lancedb-win32-x64-msvc",
|
||||||
"version": "0.26.0",
|
"version": "0.27.0-beta.2",
|
||||||
"os": ["win32"],
|
"os": ["win32"],
|
||||||
"cpu": ["x64"],
|
"cpu": ["x64"],
|
||||||
"main": "lancedb.win32-x64-msvc.node",
|
"main": "lancedb.win32-x64-msvc.node",
|
||||||
@@ -8,5 +8,9 @@
|
|||||||
"license": "Apache-2.0",
|
"license": "Apache-2.0",
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": ">= 18"
|
"node": ">= 18"
|
||||||
|
},
|
||||||
|
"repository": {
|
||||||
|
"type": "git",
|
||||||
|
"url": "https://github.com/lancedb/lancedb"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
1781
nodejs/package-lock.json
generated
1781
nodejs/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -11,7 +11,7 @@
|
|||||||
"ann"
|
"ann"
|
||||||
],
|
],
|
||||||
"private": false,
|
"private": false,
|
||||||
"version": "0.26.0",
|
"version": "0.27.0-beta.2",
|
||||||
"main": "dist/index.js",
|
"main": "dist/index.js",
|
||||||
"exports": {
|
"exports": {
|
||||||
".": "./dist/index.js",
|
".": "./dist/index.js",
|
||||||
@@ -21,28 +21,29 @@
|
|||||||
},
|
},
|
||||||
"types": "dist/index.d.ts",
|
"types": "dist/index.d.ts",
|
||||||
"napi": {
|
"napi": {
|
||||||
"name": "lancedb",
|
"binaryName": "lancedb",
|
||||||
"triples": {
|
"targets": [
|
||||||
"defaults": false,
|
"aarch64-apple-darwin",
|
||||||
"additional": [
|
"x86_64-unknown-linux-gnu",
|
||||||
"aarch64-apple-darwin",
|
"aarch64-unknown-linux-gnu",
|
||||||
"x86_64-unknown-linux-gnu",
|
"x86_64-unknown-linux-musl",
|
||||||
"aarch64-unknown-linux-gnu",
|
"aarch64-unknown-linux-musl",
|
||||||
"x86_64-unknown-linux-musl",
|
"x86_64-pc-windows-msvc",
|
||||||
"aarch64-unknown-linux-musl",
|
"aarch64-pc-windows-msvc"
|
||||||
"x86_64-pc-windows-msvc",
|
]
|
||||||
"aarch64-pc-windows-msvc"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
"license": "Apache-2.0",
|
"license": "Apache-2.0",
|
||||||
|
"repository": {
|
||||||
|
"type": "git",
|
||||||
|
"url": "https://github.com/lancedb/lancedb"
|
||||||
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@aws-sdk/client-dynamodb": "^3.33.0",
|
"@aws-sdk/client-dynamodb": "^3.33.0",
|
||||||
"@aws-sdk/client-kms": "^3.33.0",
|
"@aws-sdk/client-kms": "^3.33.0",
|
||||||
"@aws-sdk/client-s3": "^3.33.0",
|
"@aws-sdk/client-s3": "^3.33.0",
|
||||||
"@biomejs/biome": "^1.7.3",
|
"@biomejs/biome": "^1.7.3",
|
||||||
"@jest/globals": "^29.7.0",
|
"@jest/globals": "^29.7.0",
|
||||||
"@napi-rs/cli": "^2.18.3",
|
"@napi-rs/cli": "^3.5.1",
|
||||||
"@types/axios": "^0.14.0",
|
"@types/axios": "^0.14.0",
|
||||||
"@types/jest": "^29.1.2",
|
"@types/jest": "^29.1.2",
|
||||||
"@types/node": "^22.7.4",
|
"@types/node": "^22.7.4",
|
||||||
@@ -71,9 +72,9 @@
|
|||||||
"os": ["darwin", "linux", "win32"],
|
"os": ["darwin", "linux", "win32"],
|
||||||
"scripts": {
|
"scripts": {
|
||||||
"artifacts": "napi artifacts",
|
"artifacts": "napi artifacts",
|
||||||
"build:debug": "napi build --platform --no-const-enum --dts ../lancedb/native.d.ts --js ../lancedb/native.js lancedb",
|
"build:debug": "napi build --platform --dts ../lancedb/native.d.ts --js ../lancedb/native.js --output-dir lancedb",
|
||||||
"postbuild:debug": "shx mkdir -p dist && shx cp lancedb/*.node dist/",
|
"postbuild:debug": "shx mkdir -p dist && shx cp lancedb/*.node dist/",
|
||||||
"build:release": "napi build --platform --no-const-enum --release --dts ../lancedb/native.d.ts --js ../lancedb/native.js dist/",
|
"build:release": "napi build --platform --release --dts ../lancedb/native.d.ts --js ../lancedb/native.js --output-dir dist",
|
||||||
"postbuild:release": "shx mkdir -p dist && shx cp lancedb/*.node dist/",
|
"postbuild:release": "shx mkdir -p dist && shx cp lancedb/*.node dist/",
|
||||||
"build": "npm run build:debug && npm run tsc",
|
"build": "npm run build:debug && npm run tsc",
|
||||||
"build-release": "npm run build:release && npm run tsc",
|
"build-release": "npm run build:release && npm run tsc",
|
||||||
@@ -87,7 +88,7 @@
|
|||||||
"prepublishOnly": "napi prepublish -t npm",
|
"prepublishOnly": "napi prepublish -t npm",
|
||||||
"test": "jest --verbose",
|
"test": "jest --verbose",
|
||||||
"integration": "S3_TEST=1 npm run test",
|
"integration": "S3_TEST=1 npm run test",
|
||||||
"universal": "napi universal",
|
"universal": "napi universalize",
|
||||||
"version": "napi version"
|
"version": "napi version"
|
||||||
},
|
},
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
|
|||||||
@@ -13,6 +13,7 @@ use crate::header::JsHeaderProvider;
|
|||||||
use crate::table::Table;
|
use crate::table::Table;
|
||||||
use crate::ConnectionOptions;
|
use crate::ConnectionOptions;
|
||||||
use lancedb::connection::{ConnectBuilder, Connection as LanceDBConnection};
|
use lancedb::connection::{ConnectBuilder, Connection as LanceDBConnection};
|
||||||
|
|
||||||
use lancedb::ipc::{ipc_file_to_batches, ipc_file_to_schema};
|
use lancedb::ipc::{ipc_file_to_batches, ipc_file_to_schema};
|
||||||
|
|
||||||
#[napi]
|
#[napi]
|
||||||
|
|||||||
@@ -1,20 +1,19 @@
|
|||||||
// SPDX-License-Identifier: Apache-2.0
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||||
|
|
||||||
use napi::{
|
use napi::{bindgen_prelude::*, threadsafe_function::ThreadsafeFunction};
|
||||||
bindgen_prelude::*,
|
|
||||||
threadsafe_function::{ErrorStrategy, ThreadsafeFunction},
|
|
||||||
};
|
|
||||||
use napi_derive::napi;
|
use napi_derive::napi;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
type GetHeadersFn = ThreadsafeFunction<(), Promise<HashMap<String, String>>, (), Status, false>;
|
||||||
|
|
||||||
/// JavaScript HeaderProvider implementation that wraps a JavaScript callback.
|
/// JavaScript HeaderProvider implementation that wraps a JavaScript callback.
|
||||||
/// This is the only native header provider - all header provider implementations
|
/// This is the only native header provider - all header provider implementations
|
||||||
/// should provide a JavaScript function that returns headers.
|
/// should provide a JavaScript function that returns headers.
|
||||||
#[napi]
|
#[napi]
|
||||||
pub struct JsHeaderProvider {
|
pub struct JsHeaderProvider {
|
||||||
get_headers_fn: Arc<ThreadsafeFunction<(), ErrorStrategy::CalleeHandled>>,
|
get_headers_fn: Arc<GetHeadersFn>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Clone for JsHeaderProvider {
|
impl Clone for JsHeaderProvider {
|
||||||
@@ -29,9 +28,12 @@ impl Clone for JsHeaderProvider {
|
|||||||
impl JsHeaderProvider {
|
impl JsHeaderProvider {
|
||||||
/// Create a new JsHeaderProvider from a JavaScript callback
|
/// Create a new JsHeaderProvider from a JavaScript callback
|
||||||
#[napi(constructor)]
|
#[napi(constructor)]
|
||||||
pub fn new(get_headers_callback: JsFunction) -> Result<Self> {
|
pub fn new(
|
||||||
|
get_headers_callback: Function<(), Promise<HashMap<String, String>>>,
|
||||||
|
) -> Result<Self> {
|
||||||
let get_headers_fn = get_headers_callback
|
let get_headers_fn = get_headers_callback
|
||||||
.create_threadsafe_function(0, |ctx| Ok(vec![ctx.value]))
|
.build_threadsafe_function()
|
||||||
|
.build()
|
||||||
.map_err(|e| {
|
.map_err(|e| {
|
||||||
Error::new(
|
Error::new(
|
||||||
Status::GenericFailure,
|
Status::GenericFailure,
|
||||||
@@ -51,7 +53,7 @@ impl lancedb::remote::HeaderProvider for JsHeaderProvider {
|
|||||||
async fn get_headers(&self) -> lancedb::error::Result<HashMap<String, String>> {
|
async fn get_headers(&self) -> lancedb::error::Result<HashMap<String, String>> {
|
||||||
// Call the JavaScript function asynchronously
|
// Call the JavaScript function asynchronously
|
||||||
let promise: Promise<HashMap<String, String>> =
|
let promise: Promise<HashMap<String, String>> =
|
||||||
self.get_headers_fn.call_async(Ok(())).await.map_err(|e| {
|
self.get_headers_fn.call_async(()).await.map_err(|e| {
|
||||||
lancedb::error::Error::Runtime {
|
lancedb::error::Error::Runtime {
|
||||||
message: format!("Failed to call JavaScript get_headers: {}", e),
|
message: format!("Failed to call JavaScript get_headers: {}", e),
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -60,7 +60,7 @@ pub struct OpenTableOptions {
|
|||||||
pub storage_options: Option<HashMap<String, String>>,
|
pub storage_options: Option<HashMap<String, String>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[napi::module_init]
|
#[napi_derive::module_init]
|
||||||
fn init() {
|
fn init() {
|
||||||
let env = Env::new()
|
let env = Env::new()
|
||||||
.filter_or("LANCEDB_LOG", "warn")
|
.filter_or("LANCEDB_LOG", "warn")
|
||||||
|
|||||||
@@ -20,8 +20,8 @@ use napi_derive::napi;
|
|||||||
use crate::error::convert_error;
|
use crate::error::convert_error;
|
||||||
use crate::error::NapiErrorExt;
|
use crate::error::NapiErrorExt;
|
||||||
use crate::iterator::RecordBatchIterator;
|
use crate::iterator::RecordBatchIterator;
|
||||||
|
use crate::rerankers::RerankHybridCallbackArgs;
|
||||||
use crate::rerankers::Reranker;
|
use crate::rerankers::Reranker;
|
||||||
use crate::rerankers::RerankerCallbacks;
|
|
||||||
use crate::util::{parse_distance_type, schema_to_buffer};
|
use crate::util::{parse_distance_type, schema_to_buffer};
|
||||||
|
|
||||||
#[napi]
|
#[napi]
|
||||||
@@ -42,7 +42,7 @@ impl Query {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[napi]
|
#[napi]
|
||||||
pub fn full_text_search(&mut self, query: napi::JsObject) -> napi::Result<()> {
|
pub fn full_text_search(&mut self, query: Object) -> napi::Result<()> {
|
||||||
let query = parse_fts_query(query)?;
|
let query = parse_fts_query(query)?;
|
||||||
self.inner = self.inner.clone().full_text_search(query);
|
self.inner = self.inner.clone().full_text_search(query);
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -235,7 +235,7 @@ impl VectorQuery {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[napi]
|
#[napi]
|
||||||
pub fn full_text_search(&mut self, query: napi::JsObject) -> napi::Result<()> {
|
pub fn full_text_search(&mut self, query: Object) -> napi::Result<()> {
|
||||||
let query = parse_fts_query(query)?;
|
let query = parse_fts_query(query)?;
|
||||||
self.inner = self.inner.clone().full_text_search(query);
|
self.inner = self.inner.clone().full_text_search(query);
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -272,11 +272,13 @@ impl VectorQuery {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[napi]
|
#[napi]
|
||||||
pub fn rerank(&mut self, callbacks: RerankerCallbacks) {
|
pub fn rerank(
|
||||||
self.inner = self
|
&mut self,
|
||||||
.inner
|
rerank_hybrid: Function<RerankHybridCallbackArgs, Promise<Buffer>>,
|
||||||
.clone()
|
) -> napi::Result<()> {
|
||||||
.rerank(Arc::new(Reranker::new(callbacks)));
|
let reranker = Reranker::new(rerank_hybrid)?;
|
||||||
|
self.inner = self.inner.clone().rerank(Arc::new(reranker));
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[napi(catch_unwind)]
|
#[napi(catch_unwind)]
|
||||||
@@ -523,12 +525,12 @@ impl JsFullTextQuery {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn parse_fts_query(query: napi::JsObject) -> napi::Result<FullTextSearchQuery> {
|
fn parse_fts_query(query: Object) -> napi::Result<FullTextSearchQuery> {
|
||||||
if let Ok(Some(query)) = query.get::<_, &JsFullTextQuery>("query") {
|
if let Ok(Some(query)) = query.get::<&JsFullTextQuery>("query") {
|
||||||
Ok(FullTextSearchQuery::new_query(query.inner.clone()))
|
Ok(FullTextSearchQuery::new_query(query.inner.clone()))
|
||||||
} else if let Ok(Some(query_text)) = query.get::<_, String>("query") {
|
} else if let Ok(Some(query_text)) = query.get::<String>("query") {
|
||||||
let mut query_text = query_text;
|
let mut query_text = query_text;
|
||||||
let columns = query.get::<_, Option<Vec<String>>>("columns")?.flatten();
|
let columns = query.get::<Option<Vec<String>>>("columns")?.flatten();
|
||||||
|
|
||||||
let is_phrase =
|
let is_phrase =
|
||||||
query_text.len() >= 2 && query_text.starts_with('"') && query_text.ends_with('"');
|
query_text.len() >= 2 && query_text.starts_with('"') && query_text.ends_with('"');
|
||||||
|
|||||||
@@ -3,10 +3,7 @@
|
|||||||
|
|
||||||
use arrow_array::RecordBatch;
|
use arrow_array::RecordBatch;
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use napi::{
|
use napi::{bindgen_prelude::*, threadsafe_function::ThreadsafeFunction};
|
||||||
bindgen_prelude::*,
|
|
||||||
threadsafe_function::{ErrorStrategy, ThreadsafeFunction},
|
|
||||||
};
|
|
||||||
use napi_derive::napi;
|
use napi_derive::napi;
|
||||||
|
|
||||||
use lancedb::ipc::batches_to_ipc_file;
|
use lancedb::ipc::batches_to_ipc_file;
|
||||||
@@ -15,27 +12,28 @@ use lancedb::{error::Error, ipc::ipc_file_to_batches};
|
|||||||
|
|
||||||
use crate::error::NapiErrorExt;
|
use crate::error::NapiErrorExt;
|
||||||
|
|
||||||
|
type RerankHybridFn = ThreadsafeFunction<
|
||||||
|
RerankHybridCallbackArgs,
|
||||||
|
Promise<Buffer>,
|
||||||
|
RerankHybridCallbackArgs,
|
||||||
|
Status,
|
||||||
|
false,
|
||||||
|
>;
|
||||||
|
|
||||||
/// Reranker implementation that "wraps" a NodeJS Reranker implementation.
|
/// Reranker implementation that "wraps" a NodeJS Reranker implementation.
|
||||||
/// This contains references to the callbacks that can be used to invoke the
|
/// This contains references to the callbacks that can be used to invoke the
|
||||||
/// reranking methods on the NodeJS implementation and handles serializing the
|
/// reranking methods on the NodeJS implementation and handles serializing the
|
||||||
/// record batches to Arrow IPC buffers.
|
/// record batches to Arrow IPC buffers.
|
||||||
#[napi]
|
|
||||||
pub struct Reranker {
|
pub struct Reranker {
|
||||||
/// callback to the Javascript which will call the rerankHybrid method of
|
rerank_hybrid: RerankHybridFn,
|
||||||
/// some Reranker implementation
|
|
||||||
rerank_hybrid: ThreadsafeFunction<RerankHybridCallbackArgs, ErrorStrategy::CalleeHandled>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[napi]
|
|
||||||
impl Reranker {
|
impl Reranker {
|
||||||
#[napi]
|
pub fn new(
|
||||||
pub fn new(callbacks: RerankerCallbacks) -> Self {
|
rerank_hybrid: Function<RerankHybridCallbackArgs, Promise<Buffer>>,
|
||||||
let rerank_hybrid = callbacks
|
) -> napi::Result<Self> {
|
||||||
.rerank_hybrid
|
let rerank_hybrid = rerank_hybrid.build_threadsafe_function().build()?;
|
||||||
.create_threadsafe_function(0, move |ctx| Ok(vec![ctx.value]))
|
Ok(Self { rerank_hybrid })
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
Self { rerank_hybrid }
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -49,16 +47,16 @@ impl lancedb::rerankers::Reranker for Reranker {
|
|||||||
) -> lancedb::error::Result<RecordBatch> {
|
) -> lancedb::error::Result<RecordBatch> {
|
||||||
let callback_args = RerankHybridCallbackArgs {
|
let callback_args = RerankHybridCallbackArgs {
|
||||||
query: query.to_string(),
|
query: query.to_string(),
|
||||||
vec_results: batches_to_ipc_file(&[vector_results])?,
|
vec_results: Buffer::from(batches_to_ipc_file(&[vector_results])?.as_ref()),
|
||||||
fts_results: batches_to_ipc_file(&[fts_results])?,
|
fts_results: Buffer::from(batches_to_ipc_file(&[fts_results])?.as_ref()),
|
||||||
};
|
};
|
||||||
let promised_buffer: Promise<Buffer> = self
|
let promised_buffer: Promise<Buffer> = self
|
||||||
.rerank_hybrid
|
.rerank_hybrid
|
||||||
.call_async(Ok(callback_args))
|
.call_async(callback_args)
|
||||||
.await
|
.await
|
||||||
.map_err(|e| Error::Runtime {
|
.map_err(|e| Error::Runtime {
|
||||||
message: format!("napi error status={}, reason={}", e.status, e.reason),
|
message: format!("napi error status={}, reason={}", e.status, e.reason),
|
||||||
})?;
|
})?;
|
||||||
let buffer = promised_buffer.await.map_err(|e| Error::Runtime {
|
let buffer = promised_buffer.await.map_err(|e| Error::Runtime {
|
||||||
message: format!("napi error status={}, reason={}", e.status, e.reason),
|
message: format!("napi error status={}, reason={}", e.status, e.reason),
|
||||||
})?;
|
})?;
|
||||||
@@ -77,16 +75,11 @@ impl std::fmt::Debug for Reranker {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[napi(object)]
|
|
||||||
pub struct RerankerCallbacks {
|
|
||||||
pub rerank_hybrid: JsFunction,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[napi(object)]
|
#[napi(object)]
|
||||||
pub struct RerankHybridCallbackArgs {
|
pub struct RerankHybridCallbackArgs {
|
||||||
pub query: String,
|
pub query: String,
|
||||||
pub vec_results: Vec<u8>,
|
pub vec_results: Buffer,
|
||||||
pub fts_results: Vec<u8>,
|
pub fts_results: Buffer,
|
||||||
}
|
}
|
||||||
|
|
||||||
fn buffer_to_record_batch(buffer: Buffer) -> Result<RecordBatch> {
|
fn buffer_to_record_batch(buffer: Buffer) -> Result<RecordBatch> {
|
||||||
|
|||||||
@@ -96,7 +96,6 @@ impl napi::bindgen_prelude::FromNapiValue for Session {
|
|||||||
) -> napi::Result<Self> {
|
) -> napi::Result<Self> {
|
||||||
let object: napi::bindgen_prelude::ClassInstance<Self> =
|
let object: napi::bindgen_prelude::ClassInstance<Self> =
|
||||||
napi::bindgen_prelude::ClassInstance::from_napi_value(env, napi_val)?;
|
napi::bindgen_prelude::ClassInstance::from_napi_value(env, napi_val)?;
|
||||||
let copy = object.clone();
|
Ok((*object).clone())
|
||||||
Ok(copy)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -71,6 +71,17 @@ impl Table {
|
|||||||
pub async fn add(&self, buf: Buffer, mode: String) -> napi::Result<AddResult> {
|
pub async fn add(&self, buf: Buffer, mode: String) -> napi::Result<AddResult> {
|
||||||
let batches = ipc_file_to_batches(buf.to_vec())
|
let batches = ipc_file_to_batches(buf.to_vec())
|
||||||
.map_err(|e| napi::Error::from_reason(format!("Failed to read IPC file: {}", e)))?;
|
.map_err(|e| napi::Error::from_reason(format!("Failed to read IPC file: {}", e)))?;
|
||||||
|
let batches = batches
|
||||||
|
.into_iter()
|
||||||
|
.map(|batch| {
|
||||||
|
batch.map_err(|e| {
|
||||||
|
napi::Error::from_reason(format!(
|
||||||
|
"Failed to read record batch from IPC file: {}",
|
||||||
|
e
|
||||||
|
))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.collect::<Result<Vec<_>>>()?;
|
||||||
let mut op = self.inner_ref()?.add(batches);
|
let mut op = self.inner_ref()?.add(batches);
|
||||||
|
|
||||||
op = if mode == "append" {
|
op = if mode == "append" {
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
[tool.bumpversion]
|
[tool.bumpversion]
|
||||||
current_version = "0.29.1"
|
current_version = "0.30.0-beta.2"
|
||||||
parse = """(?x)
|
parse = """(?x)
|
||||||
(?P<major>0|[1-9]\\d*)\\.
|
(?P<major>0|[1-9]\\d*)\\.
|
||||||
(?P<minor>0|[1-9]\\d*)\\.
|
(?P<minor>0|[1-9]\\d*)\\.
|
||||||
|
|||||||
@@ -1,13 +1,13 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "lancedb-python"
|
name = "lancedb-python"
|
||||||
version = "0.29.1"
|
version = "0.30.0-beta.2"
|
||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
description = "Python bindings for LanceDB"
|
description = "Python bindings for LanceDB"
|
||||||
license.workspace = true
|
license.workspace = true
|
||||||
repository.workspace = true
|
repository.workspace = true
|
||||||
keywords.workspace = true
|
keywords.workspace = true
|
||||||
categories.workspace = true
|
categories.workspace = true
|
||||||
rust-version = "1.88.0"
|
rust-version = "1.91.0"
|
||||||
|
|
||||||
[lib]
|
[lib]
|
||||||
name = "_lancedb"
|
name = "_lancedb"
|
||||||
|
|||||||
206
python/PYTHON_THIRD_PARTY_LICENSES.md
Normal file
206
python/PYTHON_THIRD_PARTY_LICENSES.md
Normal file
@@ -0,0 +1,206 @@
|
|||||||
|
| Name | Version | License | URL |
|
||||||
|
|--------------------------------|-----------------|--------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------|
|
||||||
|
| InstructorEmbedding | 1.0.1 | Apache License 2.0 | https://github.com/HKUNLP/instructor-embedding |
|
||||||
|
| Jinja2 | 3.1.6 | BSD License | https://github.com/pallets/jinja/ |
|
||||||
|
| Markdown | 3.10.2 | BSD-3-Clause | https://Python-Markdown.github.io/ |
|
||||||
|
| MarkupSafe | 3.0.3 | BSD-3-Clause | https://github.com/pallets/markupsafe/ |
|
||||||
|
| PyJWT | 2.11.0 | MIT | https://github.com/jpadilla/pyjwt |
|
||||||
|
| PyYAML | 6.0.3 | MIT License | https://pyyaml.org/ |
|
||||||
|
| Pygments | 2.19.2 | BSD License | https://pygments.org |
|
||||||
|
| accelerate | 1.12.0 | Apache Software License | https://github.com/huggingface/accelerate |
|
||||||
|
| adlfs | 2026.2.0 | BSD License | UNKNOWN |
|
||||||
|
| aiohappyeyeballs | 2.6.1 | Python Software Foundation License | https://github.com/aio-libs/aiohappyeyeballs |
|
||||||
|
| aiohttp | 3.13.3 | Apache-2.0 AND MIT | https://github.com/aio-libs/aiohttp |
|
||||||
|
| aiosignal | 1.4.0 | Apache Software License | https://github.com/aio-libs/aiosignal |
|
||||||
|
| annotated-types | 0.7.0 | MIT License | https://github.com/annotated-types/annotated-types |
|
||||||
|
| anyio | 4.12.1 | MIT | https://anyio.readthedocs.io/en/stable/versionhistory.html |
|
||||||
|
| appnope | 0.1.4 | BSD License | http://github.com/minrk/appnope |
|
||||||
|
| asttokens | 3.0.1 | Apache 2.0 | https://github.com/gristlabs/asttokens |
|
||||||
|
| attrs | 25.4.0 | MIT | https://www.attrs.org/en/stable/changelog.html |
|
||||||
|
| awscli | 1.44.35 | Apache Software License | http://aws.amazon.com/cli/ |
|
||||||
|
| azure-core | 1.38.0 | MIT License | https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/core/azure-core |
|
||||||
|
| azure-datalake-store | 0.0.53 | MIT License | https://github.com/Azure/azure-data-lake-store-python |
|
||||||
|
| azure-identity | 1.25.1 | MIT | https://github.com/Azure/azure-sdk-for-python |
|
||||||
|
| azure-storage-blob | 12.28.0 | MIT License | https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob |
|
||||||
|
| babel | 2.18.0 | BSD License | https://babel.pocoo.org/ |
|
||||||
|
| backrefs | 6.1 | MIT | https://github.com/facelessuser/backrefs |
|
||||||
|
| beautifulsoup4 | 4.14.3 | MIT License | https://www.crummy.com/software/BeautifulSoup/bs4/ |
|
||||||
|
| bleach | 6.3.0 | Apache Software License | https://github.com/mozilla/bleach |
|
||||||
|
| boto3 | 1.42.45 | Apache-2.0 | https://github.com/boto/boto3 |
|
||||||
|
| botocore | 1.42.45 | Apache-2.0 | https://github.com/boto/botocore |
|
||||||
|
| cachetools | 7.0.0 | MIT | https://github.com/tkem/cachetools/ |
|
||||||
|
| certifi | 2026.1.4 | Mozilla Public License 2.0 (MPL 2.0) | https://github.com/certifi/python-certifi |
|
||||||
|
| cffi | 2.0.0 | MIT | https://cffi.readthedocs.io/en/latest/whatsnew.html |
|
||||||
|
| cfgv | 3.5.0 | MIT | https://github.com/asottile/cfgv |
|
||||||
|
| charset-normalizer | 3.4.4 | MIT | https://github.com/jawah/charset_normalizer/blob/master/CHANGELOG.md |
|
||||||
|
| click | 8.3.1 | BSD-3-Clause | https://github.com/pallets/click/ |
|
||||||
|
| cohere | 5.20.4 | MIT License | https://github.com/cohere-ai/cohere-python |
|
||||||
|
| colorama | 0.4.6 | BSD License | https://github.com/tartley/colorama |
|
||||||
|
| colpali_engine | 0.3.13 | MIT License | https://github.com/illuin-tech/colpali |
|
||||||
|
| comm | 0.2.3 | BSD License | https://github.com/ipython/comm |
|
||||||
|
| cryptography | 46.0.4 | Apache-2.0 OR BSD-3-Clause | https://github.com/pyca/cryptography |
|
||||||
|
| datafusion | 51.0.0 | Apache Software License | https://datafusion.apache.org/python |
|
||||||
|
| debugpy | 1.8.20 | MIT License | https://aka.ms/debugpy |
|
||||||
|
| decorator | 5.2.1 | BSD License | UNKNOWN |
|
||||||
|
| defusedxml | 0.7.1 | Python Software Foundation License | https://github.com/tiran/defusedxml |
|
||||||
|
| deprecation | 2.1.0 | Apache Software License | http://deprecation.readthedocs.io/ |
|
||||||
|
| distlib | 0.4.0 | Python Software Foundation License | https://github.com/pypa/distlib |
|
||||||
|
| distro | 1.9.0 | Apache Software License | https://github.com/python-distro/distro |
|
||||||
|
| docutils | 0.19 | BSD License; GNU General Public License (GPL); Public Domain; Python Software Foundation License | https://docutils.sourceforge.io/ |
|
||||||
|
| duckdb | 1.4.4 | MIT License | https://github.com/duckdb/duckdb-python |
|
||||||
|
| executing | 2.2.1 | MIT License | https://github.com/alexmojaki/executing |
|
||||||
|
| fastavro | 1.12.1 | MIT | https://github.com/fastavro/fastavro |
|
||||||
|
| fastjsonschema | 2.21.2 | BSD License | https://github.com/horejsek/python-fastjsonschema |
|
||||||
|
| filelock | 3.20.3 | Unlicense | https://github.com/tox-dev/py-filelock |
|
||||||
|
| frozenlist | 1.8.0 | Apache-2.0 | https://github.com/aio-libs/frozenlist |
|
||||||
|
| fsspec | 2026.2.0 | BSD-3-Clause | https://github.com/fsspec/filesystem_spec |
|
||||||
|
| ftfy | 6.3.1 | Apache-2.0 | https://ftfy.readthedocs.io/en/latest/ |
|
||||||
|
| ghp-import | 2.1.0 | Apache Software License | https://github.com/c-w/ghp-import |
|
||||||
|
| google-ai-generativelanguage | 0.6.15 | Apache Software License | https://github.com/googleapis/google-cloud-python/tree/main/packages/google-ai-generativelanguage |
|
||||||
|
| google-api-core | 2.25.2 | Apache Software License | https://github.com/googleapis/python-api-core |
|
||||||
|
| google-api-python-client | 2.189.0 | Apache Software License | https://github.com/googleapis/google-api-python-client/ |
|
||||||
|
| google-auth | 2.48.0 | Apache Software License | https://github.com/googleapis/google-auth-library-python |
|
||||||
|
| google-auth-httplib2 | 0.3.0 | Apache Software License | https://github.com/GoogleCloudPlatform/google-auth-library-python-httplib2 |
|
||||||
|
| google-generativeai | 0.8.6 | Apache Software License | https://github.com/google/generative-ai-python |
|
||||||
|
| googleapis-common-protos | 1.72.0 | Apache Software License | https://github.com/googleapis/google-cloud-python/tree/main/packages/googleapis-common-protos |
|
||||||
|
| griffe | 2.0.0 | ISC | https://mkdocstrings.github.io/griffe |
|
||||||
|
| griffecli | 2.0.0 | ISC | UNKNOWN |
|
||||||
|
| griffelib | 2.0.0 | ISC | UNKNOWN |
|
||||||
|
| grpcio | 1.78.0 | Apache-2.0 | https://grpc.io |
|
||||||
|
| grpcio-status | 1.71.2 | Apache Software License | https://grpc.io |
|
||||||
|
| h11 | 0.16.0 | MIT License | https://github.com/python-hyper/h11 |
|
||||||
|
| hf-xet | 1.2.0 | Apache-2.0 | https://github.com/huggingface/xet-core |
|
||||||
|
| httpcore | 1.0.9 | BSD-3-Clause | https://www.encode.io/httpcore/ |
|
||||||
|
| httplib2 | 0.31.2 | MIT License | https://github.com/httplib2/httplib2 |
|
||||||
|
| httpx | 0.28.1 | BSD License | https://github.com/encode/httpx |
|
||||||
|
| huggingface_hub | 0.36.2 | Apache Software License | https://github.com/huggingface/huggingface_hub |
|
||||||
|
| ibm-cos-sdk | 2.14.3 | Apache Software License | https://github.com/ibm/ibm-cos-sdk-python |
|
||||||
|
| ibm-cos-sdk-core | 2.14.3 | Apache Software License | https://github.com/ibm/ibm-cos-sdk-python-core |
|
||||||
|
| ibm-cos-sdk-s3transfer | 2.14.3 | Apache Software License | https://github.com/IBM/ibm-cos-sdk-python-s3transfer |
|
||||||
|
| ibm_watsonx_ai | 1.5.1 | BSD License | https://ibm.github.io/watsonx-ai-python-sdk/changelog.html |
|
||||||
|
| identify | 2.6.16 | MIT | https://github.com/pre-commit/identify |
|
||||||
|
| idna | 3.11 | BSD-3-Clause | https://github.com/kjd/idna |
|
||||||
|
| iniconfig | 2.3.0 | MIT | https://github.com/pytest-dev/iniconfig |
|
||||||
|
| ipykernel | 6.31.0 | BSD-3-Clause | https://ipython.org |
|
||||||
|
| ipython | 9.10.0 | BSD-3-Clause | https://ipython.org |
|
||||||
|
| ipython_pygments_lexers | 1.1.1 | BSD License | https://github.com/ipython/ipython-pygments-lexers |
|
||||||
|
| isodate | 0.7.2 | BSD License | https://github.com/gweis/isodate/ |
|
||||||
|
| jedi | 0.19.2 | MIT License | https://github.com/davidhalter/jedi |
|
||||||
|
| jiter | 0.13.0 | MIT License | https://github.com/pydantic/jiter/ |
|
||||||
|
| jmespath | 1.0.1 | MIT License | https://github.com/jmespath/jmespath.py |
|
||||||
|
| joblib | 1.5.3 | BSD-3-Clause | https://joblib.readthedocs.io |
|
||||||
|
| jsonschema | 4.26.0 | MIT | https://github.com/python-jsonschema/jsonschema |
|
||||||
|
| jsonschema-specifications | 2025.9.1 | MIT | https://github.com/python-jsonschema/jsonschema-specifications |
|
||||||
|
| jupyter_client | 8.8.0 | BSD License | https://jupyter.org |
|
||||||
|
| jupyter_core | 5.9.1 | BSD-3-Clause | https://jupyter.org |
|
||||||
|
| jupyterlab_pygments | 0.3.0 | BSD License | https://github.com/jupyterlab/jupyterlab_pygments |
|
||||||
|
| jupytext | 1.19.1 | MIT License | https://github.com/mwouts/jupytext |
|
||||||
|
| lance-namespace | 0.4.5 | Apache-2.0 | https://github.com/lance-format/lance-namespace |
|
||||||
|
| lance-namespace-urllib3-client | 0.4.5 | Apache-2.0 | https://github.com/lance-format/lance-namespace |
|
||||||
|
| lancedb | 0.29.2 | Apache Software License | https://github.com/lancedb/lancedb |
|
||||||
|
| lomond | 0.3.3 | BSD License | https://github.com/wildfoundry/dataplicity-lomond |
|
||||||
|
| markdown-it-py | 4.0.0 | MIT License | https://github.com/executablebooks/markdown-it-py |
|
||||||
|
| matplotlib-inline | 0.2.1 | UNKNOWN | https://github.com/ipython/matplotlib-inline |
|
||||||
|
| mdit-py-plugins | 0.5.0 | MIT License | https://github.com/executablebooks/mdit-py-plugins |
|
||||||
|
| mdurl | 0.1.2 | MIT License | https://github.com/executablebooks/mdurl |
|
||||||
|
| mergedeep | 1.3.4 | MIT License | https://github.com/clarketm/mergedeep |
|
||||||
|
| mistune | 3.2.0 | BSD License | https://github.com/lepture/mistune |
|
||||||
|
| mkdocs | 1.6.1 | BSD-2-Clause | https://github.com/mkdocs/mkdocs |
|
||||||
|
| mkdocs-autorefs | 1.4.3 | ISC | https://mkdocstrings.github.io/autorefs |
|
||||||
|
| mkdocs-get-deps | 0.2.0 | MIT | https://github.com/mkdocs/get-deps |
|
||||||
|
| mkdocs-jupyter | 0.25.1 | Apache-2.0 | https://github.com/danielfrg/mkdocs-jupyter |
|
||||||
|
| mkdocs-material | 9.7.1 | MIT | https://github.com/squidfunk/mkdocs-material |
|
||||||
|
| mkdocs-material-extensions | 1.3.1 | MIT | https://github.com/facelessuser/mkdocs-material-extensions |
|
||||||
|
| mkdocstrings | 1.0.3 | ISC | https://mkdocstrings.github.io |
|
||||||
|
| mkdocstrings-python | 2.0.2 | ISC | https://mkdocstrings.github.io/python |
|
||||||
|
| mpmath | 1.3.0 | BSD License | http://mpmath.org/ |
|
||||||
|
| msal | 1.34.0 | MIT License | https://github.com/AzureAD/microsoft-authentication-library-for-python |
|
||||||
|
| msal-extensions | 1.3.1 | MIT License | https://github.com/AzureAD/microsoft-authentication-extensions-for-python/releases |
|
||||||
|
| multidict | 6.7.1 | Apache License 2.0 | https://github.com/aio-libs/multidict |
|
||||||
|
| nbclient | 0.10.4 | BSD License | https://jupyter.org |
|
||||||
|
| nbconvert | 7.17.0 | BSD License | https://jupyter.org |
|
||||||
|
| nbformat | 5.10.4 | BSD License | https://jupyter.org |
|
||||||
|
| nest-asyncio | 1.6.0 | BSD License | https://github.com/erdewit/nest_asyncio |
|
||||||
|
| networkx | 3.6.1 | BSD-3-Clause | https://networkx.org/ |
|
||||||
|
| nodeenv | 1.10.0 | BSD License | https://github.com/ekalinin/nodeenv |
|
||||||
|
| numpy | 2.4.2 | BSD-3-Clause AND 0BSD AND MIT AND Zlib AND CC0-1.0 | https://numpy.org |
|
||||||
|
| ollama | 0.6.1 | MIT | https://ollama.com |
|
||||||
|
| open_clip_torch | 3.2.0 | MIT License | https://github.com/mlfoundations/open_clip |
|
||||||
|
| openai | 2.18.0 | Apache Software License | https://github.com/openai/openai-python |
|
||||||
|
| packaging | 26.0 | Apache-2.0 OR BSD-2-Clause | https://github.com/pypa/packaging |
|
||||||
|
| paginate | 0.5.7 | MIT License | https://github.com/Signum/paginate |
|
||||||
|
| pandas | 2.3.3 | BSD License | https://pandas.pydata.org |
|
||||||
|
| pandocfilters | 1.5.1 | BSD License | http://github.com/jgm/pandocfilters |
|
||||||
|
| parso | 0.8.6 | MIT License | https://github.com/davidhalter/parso |
|
||||||
|
| pathspec | 1.0.4 | Mozilla Public License 2.0 (MPL 2.0) | UNKNOWN |
|
||||||
|
| peft | 0.17.1 | Apache Software License | https://github.com/huggingface/peft |
|
||||||
|
| pexpect | 4.9.0 | ISC License (ISCL) | https://pexpect.readthedocs.io/ |
|
||||||
|
| pillow | 12.1.0 | MIT-CMU | https://python-pillow.github.io |
|
||||||
|
| platformdirs | 4.5.1 | MIT | https://github.com/tox-dev/platformdirs |
|
||||||
|
| pluggy | 1.6.0 | MIT License | UNKNOWN |
|
||||||
|
| polars | 1.3.0 | MIT License | https://www.pola.rs/ |
|
||||||
|
| pre_commit | 4.5.1 | MIT | https://github.com/pre-commit/pre-commit |
|
||||||
|
| prompt_toolkit | 3.0.52 | BSD License | https://github.com/prompt-toolkit/python-prompt-toolkit |
|
||||||
|
| propcache | 0.4.1 | Apache Software License | https://github.com/aio-libs/propcache |
|
||||||
|
| proto-plus | 1.27.1 | Apache Software License | https://github.com/googleapis/proto-plus-python |
|
||||||
|
| protobuf | 5.29.6 | 3-Clause BSD License | https://developers.google.com/protocol-buffers/ |
|
||||||
|
| psutil | 7.2.2 | BSD-3-Clause | https://github.com/giampaolo/psutil |
|
||||||
|
| ptyprocess | 0.7.0 | ISC License (ISCL) | https://github.com/pexpect/ptyprocess |
|
||||||
|
| pure_eval | 0.2.3 | MIT License | http://github.com/alexmojaki/pure_eval |
|
||||||
|
| pyarrow | 23.0.0 | Apache-2.0 | https://arrow.apache.org/ |
|
||||||
|
| pyarrow-stubs | 20.0.0.20251215 | BSD-2-Clause | https://github.com/zen-xu/pyarrow-stubs |
|
||||||
|
| pyasn1 | 0.6.2 | BSD-2-Clause | https://github.com/pyasn1/pyasn1 |
|
||||||
|
| pyasn1_modules | 0.4.2 | BSD License | https://github.com/pyasn1/pyasn1-modules |
|
||||||
|
| pycparser | 3.0 | BSD-3-Clause | https://github.com/eliben/pycparser |
|
||||||
|
| pydantic | 2.12.5 | MIT | https://github.com/pydantic/pydantic |
|
||||||
|
| pydantic_core | 2.41.5 | MIT | https://github.com/pydantic/pydantic-core |
|
||||||
|
| pylance | 2.0.0 | Apache Software License | UNKNOWN |
|
||||||
|
| pymdown-extensions | 10.20.1 | MIT | https://github.com/facelessuser/pymdown-extensions |
|
||||||
|
| pyparsing | 3.3.2 | MIT | https://github.com/pyparsing/pyparsing/ |
|
||||||
|
| pyright | 1.1.408 | MIT | https://github.com/RobertCraigie/pyright-python |
|
||||||
|
| pytest | 9.0.2 | MIT | https://docs.pytest.org/en/latest/ |
|
||||||
|
| pytest-asyncio | 1.3.0 | Apache-2.0 | https://github.com/pytest-dev/pytest-asyncio |
|
||||||
|
| pytest-mock | 3.15.1 | MIT License | https://github.com/pytest-dev/pytest-mock/ |
|
||||||
|
| python-dateutil | 2.9.0.post0 | Apache Software License; BSD License | https://github.com/dateutil/dateutil |
|
||||||
|
| pytz | 2025.2 | MIT License | http://pythonhosted.org/pytz |
|
||||||
|
| pyyaml_env_tag | 1.1 | MIT | https://github.com/waylan/pyyaml-env-tag |
|
||||||
|
| pyzmq | 27.1.0 | BSD License | https://pyzmq.readthedocs.org |
|
||||||
|
| referencing | 0.37.0 | MIT | https://github.com/python-jsonschema/referencing |
|
||||||
|
| regex | 2026.1.15 | Apache-2.0 AND CNRI-Python | https://github.com/mrabarnett/mrab-regex |
|
||||||
|
| requests | 2.32.5 | Apache Software License | https://requests.readthedocs.io |
|
||||||
|
| rpds-py | 0.30.0 | MIT | https://github.com/crate-py/rpds |
|
||||||
|
| rsa | 4.7.2 | Apache Software License | https://stuvel.eu/rsa |
|
||||||
|
| ruff | 0.15.0 | MIT License | https://docs.astral.sh/ruff |
|
||||||
|
| s3transfer | 0.16.0 | Apache Software License | https://github.com/boto/s3transfer |
|
||||||
|
| safetensors | 0.7.0 | Apache Software License | https://github.com/huggingface/safetensors |
|
||||||
|
| scikit-learn | 1.8.0 | BSD-3-Clause | https://scikit-learn.org |
|
||||||
|
| scipy | 1.17.0 | BSD License | https://scipy.org/ |
|
||||||
|
| sentence-transformers | 5.2.2 | Apache Software License | https://www.SBERT.net |
|
||||||
|
| sentencepiece | 0.2.1 | UNKNOWN | https://github.com/google/sentencepiece |
|
||||||
|
| six | 1.17.0 | MIT License | https://github.com/benjaminp/six |
|
||||||
|
| sniffio | 1.3.1 | Apache Software License; MIT License | https://github.com/python-trio/sniffio |
|
||||||
|
| soupsieve | 2.8.3 | MIT | https://github.com/facelessuser/soupsieve |
|
||||||
|
| stack-data | 0.6.3 | MIT License | http://github.com/alexmojaki/stack_data |
|
||||||
|
| sympy | 1.14.0 | BSD License | https://sympy.org |
|
||||||
|
| tabulate | 0.9.0 | MIT License | https://github.com/astanin/python-tabulate |
|
||||||
|
| tantivy | 0.25.1 | UNKNOWN | UNKNOWN |
|
||||||
|
| threadpoolctl | 3.6.0 | BSD License | https://github.com/joblib/threadpoolctl |
|
||||||
|
| timm | 1.0.24 | Apache Software License | https://github.com/huggingface/pytorch-image-models |
|
||||||
|
| tinycss2 | 1.4.0 | BSD License | https://www.courtbouillon.org/tinycss2 |
|
||||||
|
| tokenizers | 0.22.2 | Apache Software License | https://github.com/huggingface/tokenizers |
|
||||||
|
| torch | 2.8.0 | BSD License | https://pytorch.org/ |
|
||||||
|
| torchvision | 0.23.0 | BSD | https://github.com/pytorch/vision |
|
||||||
|
| tornado | 6.5.4 | Apache Software License | http://www.tornadoweb.org/ |
|
||||||
|
| tqdm | 4.67.3 | MPL-2.0 AND MIT | https://tqdm.github.io |
|
||||||
|
| traitlets | 5.14.3 | BSD License | https://github.com/ipython/traitlets |
|
||||||
|
| transformers | 4.57.6 | Apache Software License | https://github.com/huggingface/transformers |
|
||||||
|
| types-requests | 2.32.4.20260107 | Apache-2.0 | https://github.com/python/typeshed |
|
||||||
|
| typing-inspection | 0.4.2 | MIT | https://github.com/pydantic/typing-inspection |
|
||||||
|
| typing_extensions | 4.15.0 | PSF-2.0 | https://github.com/python/typing_extensions |
|
||||||
|
| tzdata | 2025.3 | Apache-2.0 | https://github.com/python/tzdata |
|
||||||
|
| uritemplate | 4.2.0 | BSD 3-Clause OR Apache-2.0 | https://uritemplate.readthedocs.org |
|
||||||
|
| urllib3 | 2.6.3 | MIT | https://github.com/urllib3/urllib3/blob/main/CHANGES.rst |
|
||||||
|
| virtualenv | 20.36.1 | MIT | https://github.com/pypa/virtualenv |
|
||||||
|
| watchdog | 6.0.0 | Apache Software License | https://github.com/gorakhargosh/watchdog |
|
||||||
|
| webencodings | 0.5.1 | BSD License | https://github.com/SimonSapin/python-webencodings |
|
||||||
|
| yarl | 1.22.0 | Apache Software License | https://github.com/aio-libs/yarl |
|
||||||
14687
python/RUST_THIRD_PARTY_LICENSES.html
Normal file
14687
python/RUST_THIRD_PARTY_LICENSES.html
Normal file
File diff suppressed because it is too large
Load Diff
@@ -61,7 +61,7 @@ tests = [
|
|||||||
"pyarrow-stubs",
|
"pyarrow-stubs",
|
||||||
"pylance>=1.0.0b14",
|
"pylance>=1.0.0b14",
|
||||||
"requests",
|
"requests",
|
||||||
"datafusion",
|
"datafusion<52",
|
||||||
]
|
]
|
||||||
dev = [
|
dev = [
|
||||||
"ruff",
|
"ruff",
|
||||||
|
|||||||
@@ -1,8 +1,10 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
# SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
# SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||||
|
|
||||||
|
from functools import singledispatch
|
||||||
from typing import List, Optional, Tuple, Union
|
from typing import List, Optional, Tuple, Union
|
||||||
|
|
||||||
|
from lancedb.pydantic import LanceModel, model_to_dict
|
||||||
import pyarrow as pa
|
import pyarrow as pa
|
||||||
|
|
||||||
from ._lancedb import RecordBatchStream
|
from ._lancedb import RecordBatchStream
|
||||||
@@ -80,3 +82,32 @@ def peek_reader(
|
|||||||
yield from reader
|
yield from reader
|
||||||
|
|
||||||
return batch, pa.RecordBatchReader.from_batches(batch.schema, all_batches())
|
return batch, pa.RecordBatchReader.from_batches(batch.schema, all_batches())
|
||||||
|
|
||||||
|
|
||||||
|
@singledispatch
|
||||||
|
def to_arrow(data) -> pa.Table:
|
||||||
|
"""Convert a single data object to a pa.Table."""
|
||||||
|
raise NotImplementedError(f"to_arrow not implemented for type {type(data)}")
|
||||||
|
|
||||||
|
|
||||||
|
@to_arrow.register(pa.RecordBatch)
|
||||||
|
def _arrow_from_batch(data: pa.RecordBatch) -> pa.Table:
|
||||||
|
return pa.Table.from_batches([data])
|
||||||
|
|
||||||
|
|
||||||
|
@to_arrow.register(pa.Table)
|
||||||
|
def _arrow_from_table(data: pa.Table) -> pa.Table:
|
||||||
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
@to_arrow.register(list)
|
||||||
|
def _arrow_from_list(data: list) -> pa.Table:
|
||||||
|
if not data:
|
||||||
|
raise ValueError("Cannot create table from empty list without a schema")
|
||||||
|
|
||||||
|
if isinstance(data[0], LanceModel):
|
||||||
|
schema = data[0].__class__.to_arrow_schema()
|
||||||
|
dicts = [model_to_dict(d) for d in data]
|
||||||
|
return pa.Table.from_pylist(dicts, schema=schema)
|
||||||
|
|
||||||
|
return pa.Table.from_pylist(data)
|
||||||
|
|||||||
@@ -2,6 +2,7 @@
|
|||||||
# SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
# SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||||
|
|
||||||
|
|
||||||
|
import warnings
|
||||||
from typing import List, Union
|
from typing import List, Union
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
@@ -15,6 +16,8 @@ from .utils import weak_lru
|
|||||||
@register("gte-text")
|
@register("gte-text")
|
||||||
class GteEmbeddings(TextEmbeddingFunction):
|
class GteEmbeddings(TextEmbeddingFunction):
|
||||||
"""
|
"""
|
||||||
|
Deprecated: GTE embeddings should be used through sentence-transformers.
|
||||||
|
|
||||||
An embedding function that uses GTE-LARGE MLX format(for Apple silicon devices only)
|
An embedding function that uses GTE-LARGE MLX format(for Apple silicon devices only)
|
||||||
as well as the standard cpu/gpu version from: https://huggingface.co/thenlper/gte-large.
|
as well as the standard cpu/gpu version from: https://huggingface.co/thenlper/gte-large.
|
||||||
|
|
||||||
@@ -61,6 +64,13 @@ class GteEmbeddings(TextEmbeddingFunction):
|
|||||||
|
|
||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
super().__init__(**kwargs)
|
super().__init__(**kwargs)
|
||||||
|
warnings.warn(
|
||||||
|
"GTE embeddings as a standalone embedding function are deprecated. "
|
||||||
|
"Use the 'sentence-transformers' embedding function with a GTE model "
|
||||||
|
"instead.",
|
||||||
|
DeprecationWarning,
|
||||||
|
stacklevel=3,
|
||||||
|
)
|
||||||
self._ndims = None
|
self._ndims = None
|
||||||
if kwargs:
|
if kwargs:
|
||||||
self.mlx = kwargs.get("mlx", False)
|
self.mlx = kwargs.get("mlx", False)
|
||||||
|
|||||||
@@ -110,6 +110,9 @@ class OpenAIEmbeddings(TextEmbeddingFunction):
|
|||||||
valid_embeddings = {
|
valid_embeddings = {
|
||||||
idx: v.embedding for v, idx in zip(rs.data, valid_indices)
|
idx: v.embedding for v, idx in zip(rs.data, valid_indices)
|
||||||
}
|
}
|
||||||
|
except openai.AuthenticationError:
|
||||||
|
logging.error("Authentication failed: Invalid API key provided")
|
||||||
|
raise
|
||||||
except openai.BadRequestError:
|
except openai.BadRequestError:
|
||||||
logging.exception("Bad request: %s", texts)
|
logging.exception("Bad request: %s", texts)
|
||||||
return [None] * len(texts)
|
return [None] * len(texts)
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ import io
|
|||||||
import os
|
import os
|
||||||
from typing import TYPE_CHECKING, List, Union
|
from typing import TYPE_CHECKING, List, Union
|
||||||
import urllib.parse as urlparse
|
import urllib.parse as urlparse
|
||||||
|
import warnings
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import pyarrow as pa
|
import pyarrow as pa
|
||||||
@@ -24,6 +25,7 @@ if TYPE_CHECKING:
|
|||||||
|
|
||||||
@register("siglip")
|
@register("siglip")
|
||||||
class SigLipEmbeddings(EmbeddingFunction):
|
class SigLipEmbeddings(EmbeddingFunction):
|
||||||
|
# Deprecated: prefer CLIP embeddings via `open-clip`.
|
||||||
model_name: str = "google/siglip-base-patch16-224"
|
model_name: str = "google/siglip-base-patch16-224"
|
||||||
device: str = "cpu"
|
device: str = "cpu"
|
||||||
batch_size: int = 64
|
batch_size: int = 64
|
||||||
@@ -36,6 +38,12 @@ class SigLipEmbeddings(EmbeddingFunction):
|
|||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
super().__init__(*args, **kwargs)
|
super().__init__(*args, **kwargs)
|
||||||
|
warnings.warn(
|
||||||
|
"SigLip embeddings are deprecated. Use CLIP embeddings via the "
|
||||||
|
"'open-clip' embedding function instead.",
|
||||||
|
DeprecationWarning,
|
||||||
|
stacklevel=3,
|
||||||
|
)
|
||||||
transformers = attempt_import_or_raise("transformers")
|
transformers = attempt_import_or_raise("transformers")
|
||||||
self._torch = attempt_import_or_raise("torch")
|
self._torch = attempt_import_or_raise("torch")
|
||||||
|
|
||||||
|
|||||||
@@ -269,6 +269,11 @@ def retry_with_exponential_backoff(
|
|||||||
# and say that it is assumed that if this portion errors out, it's due
|
# and say that it is assumed that if this portion errors out, it's due
|
||||||
# to rate limit but the user should check the error message to be sure.
|
# to rate limit but the user should check the error message to be sure.
|
||||||
except Exception as e: # noqa: PERF203
|
except Exception as e: # noqa: PERF203
|
||||||
|
# Don't retry on authentication errors (e.g., OpenAI 401)
|
||||||
|
# These are permanent failures that won't be fixed by retrying
|
||||||
|
if _is_non_retryable_error(e):
|
||||||
|
raise
|
||||||
|
|
||||||
num_retries += 1
|
num_retries += 1
|
||||||
|
|
||||||
if num_retries > max_retries:
|
if num_retries > max_retries:
|
||||||
@@ -289,6 +294,29 @@ def retry_with_exponential_backoff(
|
|||||||
return wrapper
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
|
def _is_non_retryable_error(error: Exception) -> bool:
|
||||||
|
"""Check if an error should not be retried.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
error: The exception to check
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if the error should not be retried, False otherwise
|
||||||
|
"""
|
||||||
|
# Check for OpenAI authentication errors
|
||||||
|
error_type = type(error).__name__
|
||||||
|
if error_type == "AuthenticationError":
|
||||||
|
return True
|
||||||
|
|
||||||
|
# Check for other common non-retryable HTTP status codes
|
||||||
|
# 401 Unauthorized, 403 Forbidden
|
||||||
|
if hasattr(error, "status_code"):
|
||||||
|
if error.status_code in (401, 403):
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
def url_retrieve(url: str):
|
def url_retrieve(url: str):
|
||||||
"""
|
"""
|
||||||
Parameters
|
Parameters
|
||||||
|
|||||||
@@ -44,7 +44,7 @@ from lance_namespace import (
|
|||||||
ListNamespacesRequest,
|
ListNamespacesRequest,
|
||||||
CreateNamespaceRequest,
|
CreateNamespaceRequest,
|
||||||
DropNamespaceRequest,
|
DropNamespaceRequest,
|
||||||
CreateEmptyTableRequest,
|
DeclareTableRequest,
|
||||||
)
|
)
|
||||||
from lancedb.table import AsyncTable, LanceTable, Table
|
from lancedb.table import AsyncTable, LanceTable, Table
|
||||||
from lancedb.util import validate_table_name
|
from lancedb.util import validate_table_name
|
||||||
@@ -318,20 +318,20 @@ class LanceNamespaceDBConnection(DBConnection):
|
|||||||
|
|
||||||
if location is None:
|
if location is None:
|
||||||
# Table doesn't exist or mode is "create", reserve a new location
|
# Table doesn't exist or mode is "create", reserve a new location
|
||||||
create_empty_request = CreateEmptyTableRequest(
|
declare_request = DeclareTableRequest(
|
||||||
id=table_id,
|
id=table_id,
|
||||||
location=None,
|
location=None,
|
||||||
properties=self.storage_options if self.storage_options else None,
|
properties=self.storage_options if self.storage_options else None,
|
||||||
)
|
)
|
||||||
create_empty_response = self._ns.create_empty_table(create_empty_request)
|
declare_response = self._ns.declare_table(declare_request)
|
||||||
|
|
||||||
if not create_empty_response.location:
|
if not declare_response.location:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"Table location is missing from create_empty_table response"
|
"Table location is missing from declare_table response"
|
||||||
)
|
)
|
||||||
|
|
||||||
location = create_empty_response.location
|
location = declare_response.location
|
||||||
namespace_storage_options = create_empty_response.storage_options
|
namespace_storage_options = declare_response.storage_options
|
||||||
|
|
||||||
# Merge storage options: self.storage_options < user options < namespace options
|
# Merge storage options: self.storage_options < user options < namespace options
|
||||||
merged_storage_options = dict(self.storage_options)
|
merged_storage_options = dict(self.storage_options)
|
||||||
@@ -759,20 +759,20 @@ class AsyncLanceNamespaceDBConnection:
|
|||||||
|
|
||||||
if location is None:
|
if location is None:
|
||||||
# Table doesn't exist or mode is "create", reserve a new location
|
# Table doesn't exist or mode is "create", reserve a new location
|
||||||
create_empty_request = CreateEmptyTableRequest(
|
declare_request = DeclareTableRequest(
|
||||||
id=table_id,
|
id=table_id,
|
||||||
location=None,
|
location=None,
|
||||||
properties=self.storage_options if self.storage_options else None,
|
properties=self.storage_options if self.storage_options else None,
|
||||||
)
|
)
|
||||||
create_empty_response = self._ns.create_empty_table(create_empty_request)
|
declare_response = self._ns.declare_table(declare_request)
|
||||||
|
|
||||||
if not create_empty_response.location:
|
if not declare_response.location:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"Table location is missing from create_empty_table response"
|
"Table location is missing from declare_table response"
|
||||||
)
|
)
|
||||||
|
|
||||||
location = create_empty_response.location
|
location = declare_response.location
|
||||||
namespace_storage_options = create_empty_response.storage_options
|
namespace_storage_options = declare_response.storage_options
|
||||||
|
|
||||||
# Merge storage options: self.storage_options < user options < namespace options
|
# Merge storage options: self.storage_options < user options < namespace options
|
||||||
merged_storage_options = dict(self.storage_options)
|
merged_storage_options = dict(self.storage_options)
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ import json
|
|||||||
from ._lancedb import async_permutation_builder, PermutationReader
|
from ._lancedb import async_permutation_builder, PermutationReader
|
||||||
from .table import LanceTable
|
from .table import LanceTable
|
||||||
from .background_loop import LOOP
|
from .background_loop import LOOP
|
||||||
from .util import batch_to_tensor
|
from .util import batch_to_tensor, batch_to_tensor_rows
|
||||||
from typing import Any, Callable, Iterator, Literal, Optional, TYPE_CHECKING, Union
|
from typing import Any, Callable, Iterator, Literal, Optional, TYPE_CHECKING, Union
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
@@ -333,7 +333,11 @@ class Transforms:
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def arrow2python(batch: pa.RecordBatch) -> dict[str, list[Any]]:
|
def arrow2python(batch: pa.RecordBatch) -> list[dict[str, Any]]:
|
||||||
|
return batch.to_pylist()
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def arrow2pythoncol(batch: pa.RecordBatch) -> dict[str, list[Any]]:
|
||||||
return batch.to_pydict()
|
return batch.to_pydict()
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@@ -687,7 +691,17 @@ class Permutation:
|
|||||||
return
|
return
|
||||||
|
|
||||||
def with_format(
|
def with_format(
|
||||||
self, format: Literal["numpy", "python", "pandas", "arrow", "torch", "polars"]
|
self,
|
||||||
|
format: Literal[
|
||||||
|
"numpy",
|
||||||
|
"python",
|
||||||
|
"python_col",
|
||||||
|
"pandas",
|
||||||
|
"arrow",
|
||||||
|
"torch",
|
||||||
|
"torch_col",
|
||||||
|
"polars",
|
||||||
|
],
|
||||||
) -> "Permutation":
|
) -> "Permutation":
|
||||||
"""
|
"""
|
||||||
Set the format for batches
|
Set the format for batches
|
||||||
@@ -696,16 +710,18 @@ class Permutation:
|
|||||||
|
|
||||||
The format can be one of:
|
The format can be one of:
|
||||||
- "numpy" - the batch will be a dict of numpy arrays (one per column)
|
- "numpy" - the batch will be a dict of numpy arrays (one per column)
|
||||||
- "python" - the batch will be a dict of lists (one per column)
|
- "python" - the batch will be a list of dicts (one per row)
|
||||||
|
- "python_col" - the batch will be a dict of lists (one entry per column)
|
||||||
- "pandas" - the batch will be a pandas DataFrame
|
- "pandas" - the batch will be a pandas DataFrame
|
||||||
- "arrow" - the batch will be a pyarrow RecordBatch
|
- "arrow" - the batch will be a pyarrow RecordBatch
|
||||||
- "torch" - the batch will be a two dimensional torch tensor
|
- "torch" - the batch will be a list of tensors, one per row
|
||||||
|
- "torch_col" - the batch will be a 2D torch tensor (first dim indexes columns)
|
||||||
- "polars" - the batch will be a polars DataFrame
|
- "polars" - the batch will be a polars DataFrame
|
||||||
|
|
||||||
Conversion may or may not involve a data copy. Lance uses Arrow internally
|
Conversion may or may not involve a data copy. Lance uses Arrow internally
|
||||||
and so it is able to zero-copy to the arrow and polars.
|
and so it is able to zero-copy to the arrow and polars formats.
|
||||||
|
|
||||||
Conversion to torch will be zero-copy but will only support a subset of data
|
Conversion to torch_col will be zero-copy but will only support a subset of data
|
||||||
types (numeric types).
|
types (numeric types).
|
||||||
|
|
||||||
Conversion to numpy and/or pandas will typically be zero-copy for numeric
|
Conversion to numpy and/or pandas will typically be zero-copy for numeric
|
||||||
@@ -718,6 +734,8 @@ class Permutation:
|
|||||||
assert format is not None, "format is required"
|
assert format is not None, "format is required"
|
||||||
if format == "python":
|
if format == "python":
|
||||||
return self.with_transform(Transforms.arrow2python)
|
return self.with_transform(Transforms.arrow2python)
|
||||||
|
if format == "python_col":
|
||||||
|
return self.with_transform(Transforms.arrow2pythoncol)
|
||||||
elif format == "numpy":
|
elif format == "numpy":
|
||||||
return self.with_transform(Transforms.arrow2numpy)
|
return self.with_transform(Transforms.arrow2numpy)
|
||||||
elif format == "pandas":
|
elif format == "pandas":
|
||||||
@@ -725,6 +743,8 @@ class Permutation:
|
|||||||
elif format == "arrow":
|
elif format == "arrow":
|
||||||
return self.with_transform(Transforms.arrow2arrow)
|
return self.with_transform(Transforms.arrow2arrow)
|
||||||
elif format == "torch":
|
elif format == "torch":
|
||||||
|
return self.with_transform(batch_to_tensor_rows)
|
||||||
|
elif format == "torch_col":
|
||||||
return self.with_transform(batch_to_tensor)
|
return self.with_transform(batch_to_tensor)
|
||||||
elif format == "polars":
|
elif format == "polars":
|
||||||
return self.with_transform(Transforms.arrow2polars())
|
return self.with_transform(Transforms.arrow2polars())
|
||||||
@@ -746,15 +766,20 @@ class Permutation:
|
|||||||
|
|
||||||
def __getitem__(self, index: int) -> Any:
|
def __getitem__(self, index: int) -> Any:
|
||||||
"""
|
"""
|
||||||
Return a single row from the permutation
|
Returns a single row from the permutation by offset
|
||||||
|
|
||||||
The output will always be a python dictionary regardless of the format.
|
|
||||||
|
|
||||||
This method is mostly useful for debugging and exploration. For actual
|
|
||||||
processing use [iter](#iter) or a torch data loader to perform batched
|
|
||||||
processing.
|
|
||||||
"""
|
"""
|
||||||
pass
|
return self.__getitems__([index])
|
||||||
|
|
||||||
|
def __getitems__(self, indices: list[int]) -> Any:
|
||||||
|
"""
|
||||||
|
Returns rows from the permutation by offset
|
||||||
|
"""
|
||||||
|
|
||||||
|
async def do_getitems():
|
||||||
|
return await self.reader.take_offsets(indices, selection=self.selection)
|
||||||
|
|
||||||
|
batch = LOOP.run(do_getitems())
|
||||||
|
return self.transform_fn(batch)
|
||||||
|
|
||||||
@deprecated(details="Use with_skip instead")
|
@deprecated(details="Use with_skip instead")
|
||||||
def skip(self, skip: int) -> "Permutation":
|
def skip(self, skip: int) -> "Permutation":
|
||||||
|
|||||||
@@ -1782,6 +1782,26 @@ class LanceHybridQueryBuilder(LanceQueryBuilder):
|
|||||||
vector_results = LanceHybridQueryBuilder._rank(vector_results, "_distance")
|
vector_results = LanceHybridQueryBuilder._rank(vector_results, "_distance")
|
||||||
fts_results = LanceHybridQueryBuilder._rank(fts_results, "_score")
|
fts_results = LanceHybridQueryBuilder._rank(fts_results, "_score")
|
||||||
|
|
||||||
|
# If both result sets are empty (e.g. after hard filtering),
|
||||||
|
# return early to avoid errors in reranking or score restoration.
|
||||||
|
if vector_results.num_rows == 0 and fts_results.num_rows == 0:
|
||||||
|
# Build a minimal empty table with the _relevance_score column
|
||||||
|
combined_schema = pa.unify_schemas(
|
||||||
|
[vector_results.schema, fts_results.schema],
|
||||||
|
)
|
||||||
|
empty = pa.table(
|
||||||
|
{
|
||||||
|
col: pa.array([], type=combined_schema.field(col).type)
|
||||||
|
for col in combined_schema.names
|
||||||
|
}
|
||||||
|
)
|
||||||
|
empty = empty.append_column(
|
||||||
|
"_relevance_score", pa.array([], type=pa.float32())
|
||||||
|
)
|
||||||
|
if not with_row_ids and "_rowid" in empty.column_names:
|
||||||
|
empty = empty.drop(["_rowid"])
|
||||||
|
return empty
|
||||||
|
|
||||||
original_distances = None
|
original_distances = None
|
||||||
original_scores = None
|
original_scores = None
|
||||||
original_distance_row_ids = None
|
original_distance_row_ids = None
|
||||||
@@ -2118,19 +2138,17 @@ class LanceHybridQueryBuilder(LanceQueryBuilder):
|
|||||||
""" # noqa: E501
|
""" # noqa: E501
|
||||||
self._create_query_builders()
|
self._create_query_builders()
|
||||||
|
|
||||||
results = ["Vector Search Plan:"]
|
reranker_label = str(self._reranker) if self._reranker else "No reranker"
|
||||||
results.append(
|
vector_plan = self._table._explain_plan(
|
||||||
self._table._explain_plan(
|
self._vector_query.to_query_object(), verbose=verbose
|
||||||
self._vector_query.to_query_object(), verbose=verbose
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
results.append("FTS Search Plan:")
|
fts_plan = self._table._explain_plan(
|
||||||
results.append(
|
self._fts_query.to_query_object(), verbose=verbose
|
||||||
self._table._explain_plan(
|
|
||||||
self._fts_query.to_query_object(), verbose=verbose
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
return "\n".join(results)
|
# Indent sub-plans under the reranker
|
||||||
|
indented_vector = "\n".join(" " + line for line in vector_plan.splitlines())
|
||||||
|
indented_fts = "\n".join(" " + line for line in fts_plan.splitlines())
|
||||||
|
return f"{reranker_label}\n {indented_vector}\n {indented_fts}"
|
||||||
|
|
||||||
def analyze_plan(self):
|
def analyze_plan(self):
|
||||||
"""Execute the query and display with runtime metrics.
|
"""Execute the query and display with runtime metrics.
|
||||||
@@ -3164,23 +3182,20 @@ class AsyncHybridQuery(AsyncStandardQuery, AsyncVectorQueryBase):
|
|||||||
... plan = await table.query().nearest_to([1.0, 2.0]).nearest_to_text("hello").explain_plan(True)
|
... plan = await table.query().nearest_to([1.0, 2.0]).nearest_to_text("hello").explain_plan(True)
|
||||||
... print(plan)
|
... print(plan)
|
||||||
>>> asyncio.run(doctest_example()) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
|
>>> asyncio.run(doctest_example()) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
|
||||||
Vector Search Plan:
|
RRFReranker(K=60)
|
||||||
ProjectionExec: expr=[vector@0 as vector, text@3 as text, _distance@2 as _distance]
|
ProjectionExec: expr=[vector@0 as vector, text@3 as text, _distance@2 as _distance]
|
||||||
Take: columns="vector, _rowid, _distance, (text)"
|
Take: columns="vector, _rowid, _distance, (text)"
|
||||||
CoalesceBatchesExec: target_batch_size=1024
|
CoalesceBatchesExec: target_batch_size=1024
|
||||||
GlobalLimitExec: skip=0, fetch=10
|
GlobalLimitExec: skip=0, fetch=10
|
||||||
FilterExec: _distance@2 IS NOT NULL
|
FilterExec: _distance@2 IS NOT NULL
|
||||||
SortExec: TopK(fetch=10), expr=[_distance@2 ASC NULLS LAST, _rowid@1 ASC NULLS LAST], preserve_partitioning=[false]
|
SortExec: TopK(fetch=10), expr=[_distance@2 ASC NULLS LAST, _rowid@1 ASC NULLS LAST], preserve_partitioning=[false]
|
||||||
KNNVectorDistance: metric=l2
|
KNNVectorDistance: metric=l2
|
||||||
LanceRead: uri=..., projection=[vector], ...
|
LanceRead: uri=..., projection=[vector], ...
|
||||||
<BLANKLINE>
|
ProjectionExec: expr=[vector@2 as vector, text@3 as text, _score@1 as _score]
|
||||||
FTS Search Plan:
|
Take: columns="_rowid, _score, (vector), (text)"
|
||||||
ProjectionExec: expr=[vector@2 as vector, text@3 as text, _score@1 as _score]
|
CoalesceBatchesExec: target_batch_size=1024
|
||||||
Take: columns="_rowid, _score, (vector), (text)"
|
GlobalLimitExec: skip=0, fetch=10
|
||||||
CoalesceBatchesExec: target_batch_size=1024
|
MatchQuery: column=text, query=hello
|
||||||
GlobalLimitExec: skip=0, fetch=10
|
|
||||||
MatchQuery: column=text, query=hello
|
|
||||||
<BLANKLINE>
|
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
@@ -3192,12 +3207,12 @@ class AsyncHybridQuery(AsyncStandardQuery, AsyncVectorQueryBase):
|
|||||||
plan : str
|
plan : str
|
||||||
""" # noqa: E501
|
""" # noqa: E501
|
||||||
|
|
||||||
results = ["Vector Search Plan:"]
|
vector_plan = await self._inner.to_vector_query().explain_plan(verbose)
|
||||||
results.append(await self._inner.to_vector_query().explain_plan(verbose))
|
fts_plan = await self._inner.to_fts_query().explain_plan(verbose)
|
||||||
results.append("FTS Search Plan:")
|
# Indent sub-plans under the reranker
|
||||||
results.append(await self._inner.to_fts_query().explain_plan(verbose))
|
indented_vector = "\n".join(" " + line for line in vector_plan.splitlines())
|
||||||
|
indented_fts = "\n".join(" " + line for line in fts_plan.splitlines())
|
||||||
return "\n".join(results)
|
return f"{self._reranker}\n {indented_vector}\n {indented_fts}"
|
||||||
|
|
||||||
async def analyze_plan(self):
|
async def analyze_plan(self):
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -42,10 +42,18 @@ class AnswerdotaiRerankers(Reranker):
|
|||||||
rerankers = attempt_import_or_raise(
|
rerankers = attempt_import_or_raise(
|
||||||
"rerankers"
|
"rerankers"
|
||||||
) # import here for faster ops later
|
) # import here for faster ops later
|
||||||
|
self.model_name = model_name
|
||||||
|
self.model_type = model_type
|
||||||
self.reranker = rerankers.Reranker(
|
self.reranker = rerankers.Reranker(
|
||||||
model_name=model_name, model_type=model_type, **kwargs
|
model_name=model_name, model_type=model_type, **kwargs
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return (
|
||||||
|
f"AnswerdotaiRerankers(model_type={self.model_type}, "
|
||||||
|
f"model_name={self.model_name})"
|
||||||
|
)
|
||||||
|
|
||||||
def _rerank(self, result_set: pa.Table, query: str):
|
def _rerank(self, result_set: pa.Table, query: str):
|
||||||
result_set = self._handle_empty_results(result_set)
|
result_set = self._handle_empty_results(result_set)
|
||||||
if len(result_set) == 0:
|
if len(result_set) == 0:
|
||||||
|
|||||||
@@ -40,6 +40,9 @@ class Reranker(ABC):
|
|||||||
if ARROW_VERSION.major <= 13:
|
if ARROW_VERSION.major <= 13:
|
||||||
self._concat_tables_args = {"promote": True}
|
self._concat_tables_args = {"promote": True}
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return self.__class__.__name__
|
||||||
|
|
||||||
def rerank_vector(
|
def rerank_vector(
|
||||||
self,
|
self,
|
||||||
query: str,
|
query: str,
|
||||||
|
|||||||
@@ -44,6 +44,9 @@ class CohereReranker(Reranker):
|
|||||||
self.top_n = top_n
|
self.top_n = top_n
|
||||||
self.api_key = api_key
|
self.api_key = api_key
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return f"CohereReranker(model_name={self.model_name})"
|
||||||
|
|
||||||
@cached_property
|
@cached_property
|
||||||
def _client(self):
|
def _client(self):
|
||||||
cohere = attempt_import_or_raise("cohere")
|
cohere = attempt_import_or_raise("cohere")
|
||||||
|
|||||||
@@ -50,6 +50,9 @@ class CrossEncoderReranker(Reranker):
|
|||||||
if self.device is None:
|
if self.device is None:
|
||||||
self.device = "cuda" if torch.cuda.is_available() else "cpu"
|
self.device = "cuda" if torch.cuda.is_available() else "cpu"
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return f"CrossEncoderReranker(model_name={self.model_name})"
|
||||||
|
|
||||||
@cached_property
|
@cached_property
|
||||||
def model(self):
|
def model(self):
|
||||||
sbert = attempt_import_or_raise("sentence_transformers")
|
sbert = attempt_import_or_raise("sentence_transformers")
|
||||||
|
|||||||
@@ -45,6 +45,9 @@ class JinaReranker(Reranker):
|
|||||||
self.top_n = top_n
|
self.top_n = top_n
|
||||||
self.api_key = api_key
|
self.api_key = api_key
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return f"JinaReranker(model_name={self.model_name})"
|
||||||
|
|
||||||
@cached_property
|
@cached_property
|
||||||
def _client(self):
|
def _client(self):
|
||||||
import requests
|
import requests
|
||||||
|
|||||||
@@ -38,6 +38,9 @@ class LinearCombinationReranker(Reranker):
|
|||||||
self.weight = weight
|
self.weight = weight
|
||||||
self.fill = fill
|
self.fill = fill
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return f"LinearCombinationReranker(weight={self.weight}, fill={self.fill})"
|
||||||
|
|
||||||
def rerank_hybrid(
|
def rerank_hybrid(
|
||||||
self,
|
self,
|
||||||
query: str, # noqa: F821
|
query: str, # noqa: F821
|
||||||
|
|||||||
@@ -54,6 +54,12 @@ class MRRReranker(Reranker):
|
|||||||
self.weight_vector = weight_vector
|
self.weight_vector = weight_vector
|
||||||
self.weight_fts = weight_fts
|
self.weight_fts = weight_fts
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return (
|
||||||
|
f"MRRReranker(weight_vector={self.weight_vector}, "
|
||||||
|
f"weight_fts={self.weight_fts})"
|
||||||
|
)
|
||||||
|
|
||||||
def rerank_hybrid(
|
def rerank_hybrid(
|
||||||
self,
|
self,
|
||||||
query: str, # noqa: F821
|
query: str, # noqa: F821
|
||||||
|
|||||||
@@ -43,6 +43,9 @@ class OpenaiReranker(Reranker):
|
|||||||
self.column = column
|
self.column = column
|
||||||
self.api_key = api_key
|
self.api_key = api_key
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return f"OpenaiReranker(model_name={self.model_name})"
|
||||||
|
|
||||||
def _rerank(self, result_set: pa.Table, query: str):
|
def _rerank(self, result_set: pa.Table, query: str):
|
||||||
result_set = self._handle_empty_results(result_set)
|
result_set = self._handle_empty_results(result_set)
|
||||||
if len(result_set) == 0:
|
if len(result_set) == 0:
|
||||||
|
|||||||
@@ -36,6 +36,9 @@ class RRFReranker(Reranker):
|
|||||||
super().__init__(return_score)
|
super().__init__(return_score)
|
||||||
self.K = K
|
self.K = K
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return f"RRFReranker(K={self.K})"
|
||||||
|
|
||||||
def rerank_hybrid(
|
def rerank_hybrid(
|
||||||
self,
|
self,
|
||||||
query: str, # noqa: F821
|
query: str, # noqa: F821
|
||||||
|
|||||||
@@ -52,6 +52,9 @@ class VoyageAIReranker(Reranker):
|
|||||||
self.api_key = api_key
|
self.api_key = api_key
|
||||||
self.truncation = truncation
|
self.truncation = truncation
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return f"VoyageAIReranker(model_name={self.model_name})"
|
||||||
|
|
||||||
@cached_property
|
@cached_property
|
||||||
def _client(self):
|
def _client(self):
|
||||||
voyageai = attempt_import_or_raise("voyageai")
|
voyageai = attempt_import_or_raise("voyageai")
|
||||||
|
|||||||
214
python/python/lancedb/scannable.py
Normal file
214
python/python/lancedb/scannable.py
Normal file
@@ -0,0 +1,214 @@
|
|||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||||
|
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from functools import singledispatch
|
||||||
|
import sys
|
||||||
|
from typing import Callable, Iterator, Optional
|
||||||
|
from lancedb.arrow import to_arrow
|
||||||
|
import pyarrow as pa
|
||||||
|
import pyarrow.dataset as ds
|
||||||
|
|
||||||
|
from .pydantic import LanceModel
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class Scannable:
|
||||||
|
schema: pa.Schema
|
||||||
|
num_rows: Optional[int]
|
||||||
|
# Factory function to create a new reader each time (supports re-scanning)
|
||||||
|
reader: Callable[[], pa.RecordBatchReader]
|
||||||
|
# Whether reader can be called more than once. For example, an iterator can
|
||||||
|
# only be consumed once, while a DataFrame can be converted to a new reader
|
||||||
|
# each time.
|
||||||
|
rescannable: bool = True
|
||||||
|
|
||||||
|
|
||||||
|
@singledispatch
|
||||||
|
def to_scannable(data) -> Scannable:
|
||||||
|
# Fallback: try iterable protocol
|
||||||
|
if hasattr(data, "__iter__"):
|
||||||
|
return _from_iterable(iter(data))
|
||||||
|
raise NotImplementedError(f"to_scannable not implemented for type {type(data)}")
|
||||||
|
|
||||||
|
|
||||||
|
@to_scannable.register(pa.RecordBatchReader)
|
||||||
|
def _from_reader(data: pa.RecordBatchReader) -> Scannable:
|
||||||
|
# RecordBatchReader can only be consumed once - not rescannable
|
||||||
|
return Scannable(
|
||||||
|
schema=data.schema, num_rows=None, reader=lambda: data, rescannable=False
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@to_scannable.register(pa.RecordBatch)
|
||||||
|
def _from_batch(data: pa.RecordBatch) -> Scannable:
|
||||||
|
return Scannable(
|
||||||
|
schema=data.schema,
|
||||||
|
num_rows=data.num_rows,
|
||||||
|
reader=lambda: pa.RecordBatchReader.from_batches(data.schema, [data]),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@to_scannable.register(pa.Table)
|
||||||
|
def _from_table(data: pa.Table) -> Scannable:
|
||||||
|
return Scannable(schema=data.schema, num_rows=data.num_rows, reader=data.to_reader)
|
||||||
|
|
||||||
|
|
||||||
|
@to_scannable.register(ds.Dataset)
|
||||||
|
def _from_dataset(data: ds.Dataset) -> Scannable:
|
||||||
|
return Scannable(
|
||||||
|
schema=data.schema,
|
||||||
|
num_rows=data.count_rows(),
|
||||||
|
reader=lambda: data.scanner().to_reader(),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@to_scannable.register(ds.Scanner)
|
||||||
|
def _from_scanner(data: ds.Scanner) -> Scannable:
|
||||||
|
# Scanner can only be consumed once - not rescannable
|
||||||
|
return Scannable(
|
||||||
|
schema=data.projected_schema,
|
||||||
|
num_rows=None,
|
||||||
|
reader=data.to_reader,
|
||||||
|
rescannable=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@to_scannable.register(list)
|
||||||
|
def _from_list(data: list) -> Scannable:
|
||||||
|
if not data:
|
||||||
|
raise ValueError("Cannot create table from empty list without a schema")
|
||||||
|
table = to_arrow(data)
|
||||||
|
return Scannable(
|
||||||
|
schema=table.schema, num_rows=table.num_rows, reader=table.to_reader
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@to_scannable.register(dict)
|
||||||
|
def _from_dict(data: dict) -> Scannable:
|
||||||
|
raise ValueError("Cannot add a single dictionary to a table. Use a list.")
|
||||||
|
|
||||||
|
|
||||||
|
@to_scannable.register(LanceModel)
|
||||||
|
def _from_lance_model(data: LanceModel) -> Scannable:
|
||||||
|
raise ValueError("Cannot add a single LanceModel to a table. Use a list.")
|
||||||
|
|
||||||
|
|
||||||
|
def _from_iterable(data: Iterator) -> Scannable:
|
||||||
|
first_item = next(data, None)
|
||||||
|
if first_item is None:
|
||||||
|
raise ValueError("Cannot create table from empty iterator")
|
||||||
|
first = to_arrow(first_item)
|
||||||
|
schema = first.schema
|
||||||
|
|
||||||
|
def iter():
|
||||||
|
yield from first.to_batches()
|
||||||
|
for item in data:
|
||||||
|
batch = to_arrow(item)
|
||||||
|
if batch.schema != schema:
|
||||||
|
try:
|
||||||
|
batch = batch.cast(schema)
|
||||||
|
except pa.lib.ArrowInvalid:
|
||||||
|
raise ValueError(
|
||||||
|
f"Input iterator yielded a batch with schema that "
|
||||||
|
f"does not match the schema of other batches.\n"
|
||||||
|
f"Expected:\n{schema}\nGot:\n{batch.schema}"
|
||||||
|
)
|
||||||
|
yield from batch.to_batches()
|
||||||
|
|
||||||
|
reader = pa.RecordBatchReader.from_batches(schema, iter())
|
||||||
|
return to_scannable(reader)
|
||||||
|
|
||||||
|
|
||||||
|
_registered_modules: set[str] = set()
|
||||||
|
|
||||||
|
|
||||||
|
def _register_optional_converters():
|
||||||
|
"""Register converters for optional dependencies that are already imported."""
|
||||||
|
|
||||||
|
if "pandas" in sys.modules and "pandas" not in _registered_modules:
|
||||||
|
_registered_modules.add("pandas")
|
||||||
|
import pandas as pd
|
||||||
|
|
||||||
|
@to_arrow.register(pd.DataFrame)
|
||||||
|
def _arrow_from_pandas(data: pd.DataFrame) -> pa.Table:
|
||||||
|
table = pa.Table.from_pandas(data, preserve_index=False)
|
||||||
|
return table.replace_schema_metadata(None)
|
||||||
|
|
||||||
|
@to_scannable.register(pd.DataFrame)
|
||||||
|
def _from_pandas(data: pd.DataFrame) -> Scannable:
|
||||||
|
return to_scannable(_arrow_from_pandas(data))
|
||||||
|
|
||||||
|
if "polars" in sys.modules and "polars" not in _registered_modules:
|
||||||
|
_registered_modules.add("polars")
|
||||||
|
import polars as pl
|
||||||
|
|
||||||
|
@to_arrow.register(pl.DataFrame)
|
||||||
|
def _arrow_from_polars(data: pl.DataFrame) -> pa.Table:
|
||||||
|
return data.to_arrow()
|
||||||
|
|
||||||
|
@to_scannable.register(pl.DataFrame)
|
||||||
|
def _from_polars(data: pl.DataFrame) -> Scannable:
|
||||||
|
arrow = data.to_arrow()
|
||||||
|
return Scannable(
|
||||||
|
schema=arrow.schema, num_rows=len(data), reader=arrow.to_reader
|
||||||
|
)
|
||||||
|
|
||||||
|
@to_scannable.register(pl.LazyFrame)
|
||||||
|
def _from_polars_lazy(data: pl.LazyFrame) -> Scannable:
|
||||||
|
arrow = data.collect().to_arrow()
|
||||||
|
return Scannable(
|
||||||
|
schema=arrow.schema, num_rows=arrow.num_rows, reader=arrow.to_reader
|
||||||
|
)
|
||||||
|
|
||||||
|
if "datasets" in sys.modules and "datasets" not in _registered_modules:
|
||||||
|
_registered_modules.add("datasets")
|
||||||
|
from datasets import Dataset as HFDataset
|
||||||
|
from datasets import DatasetDict as HFDatasetDict
|
||||||
|
|
||||||
|
@to_scannable.register(HFDataset)
|
||||||
|
def _from_hf_dataset(data: HFDataset) -> Scannable:
|
||||||
|
table = data.data.table # Access underlying Arrow table
|
||||||
|
return Scannable(
|
||||||
|
schema=table.schema, num_rows=len(data), reader=table.to_reader
|
||||||
|
)
|
||||||
|
|
||||||
|
@to_scannable.register(HFDatasetDict)
|
||||||
|
def _from_hf_dataset_dict(data: HFDatasetDict) -> Scannable:
|
||||||
|
# HuggingFace DatasetDict: combine all splits with a 'split' column
|
||||||
|
schema = data[list(data.keys())[0]].features.arrow_schema
|
||||||
|
if "split" not in schema.names:
|
||||||
|
schema = schema.append(pa.field("split", pa.string()))
|
||||||
|
|
||||||
|
def gen():
|
||||||
|
for split_name, dataset in data.items():
|
||||||
|
for batch in dataset.data.to_batches():
|
||||||
|
split_arr = pa.array(
|
||||||
|
[split_name] * len(batch), type=pa.string()
|
||||||
|
)
|
||||||
|
yield pa.RecordBatch.from_arrays(
|
||||||
|
list(batch.columns) + [split_arr], schema=schema
|
||||||
|
)
|
||||||
|
|
||||||
|
total_rows = sum(len(dataset) for dataset in data.values())
|
||||||
|
return Scannable(
|
||||||
|
schema=schema,
|
||||||
|
num_rows=total_rows,
|
||||||
|
reader=lambda: pa.RecordBatchReader.from_batches(schema, gen()),
|
||||||
|
)
|
||||||
|
|
||||||
|
if "lance" in sys.modules and "lance" not in _registered_modules:
|
||||||
|
_registered_modules.add("lance")
|
||||||
|
import lance
|
||||||
|
|
||||||
|
@to_scannable.register(lance.LanceDataset)
|
||||||
|
def _from_lance(data: lance.LanceDataset) -> Scannable:
|
||||||
|
return Scannable(
|
||||||
|
schema=data.schema,
|
||||||
|
num_rows=data.count_rows(),
|
||||||
|
reader=lambda: data.scanner().to_reader(),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# Register on module load
|
||||||
|
_register_optional_converters()
|
||||||
@@ -25,6 +25,8 @@ from typing import (
|
|||||||
)
|
)
|
||||||
from urllib.parse import urlparse
|
from urllib.parse import urlparse
|
||||||
|
|
||||||
|
from lancedb.scannable import _register_optional_converters, to_scannable
|
||||||
|
|
||||||
from . import __version__
|
from . import __version__
|
||||||
from lancedb.arrow import peek_reader
|
from lancedb.arrow import peek_reader
|
||||||
from lancedb.background_loop import LOOP
|
from lancedb.background_loop import LOOP
|
||||||
@@ -904,7 +906,9 @@ class Table(ABC):
|
|||||||
----------
|
----------
|
||||||
field_names: str or list of str
|
field_names: str or list of str
|
||||||
The name(s) of the field to index.
|
The name(s) of the field to index.
|
||||||
can be only str if use_tantivy=True for now.
|
If ``use_tantivy`` is False (default), only a single field name
|
||||||
|
(str) is supported. To index multiple fields, create a separate
|
||||||
|
FTS index for each field.
|
||||||
replace: bool, default False
|
replace: bool, default False
|
||||||
If True, replace the existing index if it exists. Note that this is
|
If True, replace the existing index if it exists. Note that this is
|
||||||
not yet an atomic operation; the index will be temporarily
|
not yet an atomic operation; the index will be temporarily
|
||||||
@@ -2298,7 +2302,11 @@ class LanceTable(Table):
|
|||||||
):
|
):
|
||||||
if not use_tantivy:
|
if not use_tantivy:
|
||||||
if not isinstance(field_names, str):
|
if not isinstance(field_names, str):
|
||||||
raise ValueError("field_names must be a string when use_tantivy=False")
|
raise ValueError(
|
||||||
|
"Native FTS indexes can only be created on a single field "
|
||||||
|
"at a time. To search over multiple text fields, create a "
|
||||||
|
"separate FTS index for each field."
|
||||||
|
)
|
||||||
|
|
||||||
if tokenizer_name is None:
|
if tokenizer_name is None:
|
||||||
tokenizer_configs = {
|
tokenizer_configs = {
|
||||||
@@ -3721,18 +3729,31 @@ class AsyncTable:
|
|||||||
on_bad_vectors = "error"
|
on_bad_vectors = "error"
|
||||||
if fill_value is None:
|
if fill_value is None:
|
||||||
fill_value = 0.0
|
fill_value = 0.0
|
||||||
data = _sanitize_data(
|
|
||||||
data,
|
|
||||||
schema,
|
|
||||||
metadata=schema.metadata,
|
|
||||||
on_bad_vectors=on_bad_vectors,
|
|
||||||
fill_value=fill_value,
|
|
||||||
allow_subschema=True,
|
|
||||||
)
|
|
||||||
if isinstance(data, pa.Table):
|
|
||||||
data = data.to_reader()
|
|
||||||
|
|
||||||
return await self._inner.add(data, mode or "append")
|
# _santitize_data is an old code path, but we will use it until the
|
||||||
|
# new code path is ready.
|
||||||
|
if on_bad_vectors != "error" or (
|
||||||
|
schema.metadata is not None and b"embedding_functions" in schema.metadata
|
||||||
|
):
|
||||||
|
data = _sanitize_data(
|
||||||
|
data,
|
||||||
|
schema,
|
||||||
|
metadata=schema.metadata,
|
||||||
|
on_bad_vectors=on_bad_vectors,
|
||||||
|
fill_value=fill_value,
|
||||||
|
allow_subschema=True,
|
||||||
|
)
|
||||||
|
_register_optional_converters()
|
||||||
|
data = to_scannable(data)
|
||||||
|
try:
|
||||||
|
return await self._inner.add(data, mode or "append")
|
||||||
|
except RuntimeError as e:
|
||||||
|
if "Cast error" in str(e):
|
||||||
|
raise ValueError(e)
|
||||||
|
elif "Vector column contains NaN" in str(e):
|
||||||
|
raise ValueError(e)
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
|
||||||
def merge_insert(self, on: Union[str, Iterable[str]]) -> LanceMergeInsertBuilder:
|
def merge_insert(self, on: Union[str, Iterable[str]]) -> LanceMergeInsertBuilder:
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -419,3 +419,22 @@ def batch_to_tensor(batch: pa.RecordBatch):
|
|||||||
"""
|
"""
|
||||||
torch = attempt_import_or_raise("torch", "torch")
|
torch = attempt_import_or_raise("torch", "torch")
|
||||||
return torch.stack([torch.from_dlpack(col) for col in batch.columns])
|
return torch.stack([torch.from_dlpack(col) for col in batch.columns])
|
||||||
|
|
||||||
|
|
||||||
|
def batch_to_tensor_rows(batch: pa.RecordBatch):
|
||||||
|
"""
|
||||||
|
Convert a PyArrow RecordBatch to a list of PyTorch Tensor, one per row
|
||||||
|
|
||||||
|
Each column is converted to a tensor (using zero-copy via DLPack)
|
||||||
|
and the columns are then stacked into a single tensor. The 2D tensor
|
||||||
|
is then converted to a list of tensors, one per row
|
||||||
|
|
||||||
|
Fails if torch or numpy is not installed.
|
||||||
|
Fails if a column's data type is not supported by PyTorch.
|
||||||
|
"""
|
||||||
|
torch = attempt_import_or_raise("torch", "torch")
|
||||||
|
numpy = attempt_import_or_raise("numpy", "numpy")
|
||||||
|
columns = [col.to_numpy(zero_copy_only=False) for col in batch.columns]
|
||||||
|
stacked = torch.tensor(numpy.column_stack(columns))
|
||||||
|
rows = list(stacked.unbind(dim=0))
|
||||||
|
return rows
|
||||||
|
|||||||
@@ -515,3 +515,34 @@ def test_openai_propagates_api_key(monkeypatch):
|
|||||||
query = "greetings"
|
query = "greetings"
|
||||||
actual = table.search(query).limit(1).to_pydantic(Words)[0]
|
actual = table.search(query).limit(1).to_pydantic(Words)[0]
|
||||||
assert len(actual.text) > 0
|
assert len(actual.text) > 0
|
||||||
|
|
||||||
|
|
||||||
|
@patch("time.sleep")
|
||||||
|
def test_openai_no_retry_on_401(mock_sleep):
|
||||||
|
"""
|
||||||
|
Test that OpenAI embedding function does not retry on 401 authentication
|
||||||
|
errors.
|
||||||
|
"""
|
||||||
|
from lancedb.embeddings.utils import retry_with_exponential_backoff
|
||||||
|
|
||||||
|
# Create a mock that raises an AuthenticationError
|
||||||
|
class MockAuthenticationError(Exception):
|
||||||
|
"""Mock OpenAI AuthenticationError"""
|
||||||
|
|
||||||
|
pass
|
||||||
|
|
||||||
|
MockAuthenticationError.__name__ = "AuthenticationError"
|
||||||
|
|
||||||
|
mock_func = MagicMock(side_effect=MockAuthenticationError("Invalid API key"))
|
||||||
|
|
||||||
|
# Wrap the function with retry logic
|
||||||
|
wrapped_func = retry_with_exponential_backoff(mock_func, max_retries=3)
|
||||||
|
|
||||||
|
# Should raise without retrying
|
||||||
|
with pytest.raises(MockAuthenticationError):
|
||||||
|
wrapped_func()
|
||||||
|
|
||||||
|
# Verify that the function was only called once (no retries)
|
||||||
|
assert mock_func.call_count == 1
|
||||||
|
# Verify that sleep was never called (no retries)
|
||||||
|
assert mock_sleep.call_count == 0
|
||||||
|
|||||||
@@ -163,9 +163,7 @@ async def test_explain_plan(table: AsyncTable):
|
|||||||
table.query().nearest_to_text("dog").nearest_to([0.1, 0.1]).explain_plan(True)
|
table.query().nearest_to_text("dog").nearest_to([0.1, 0.1]).explain_plan(True)
|
||||||
)
|
)
|
||||||
|
|
||||||
assert "Vector Search Plan" in plan
|
|
||||||
assert "KNNVectorDistance" in plan
|
assert "KNNVectorDistance" in plan
|
||||||
assert "FTS Search Plan" in plan
|
|
||||||
assert "LanceRead" in plan
|
assert "LanceRead" in plan
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -664,23 +664,20 @@ def test_iter_basic(some_permutation: Permutation):
|
|||||||
expected_batches = (950 + batch_size - 1) // batch_size # ceiling division
|
expected_batches = (950 + batch_size - 1) // batch_size # ceiling division
|
||||||
assert len(batches) == expected_batches
|
assert len(batches) == expected_batches
|
||||||
|
|
||||||
# Check that all batches are dicts (default python format)
|
# Check that all batches are lists of dicts (default python format)
|
||||||
assert all(isinstance(batch, dict) for batch in batches)
|
assert all(isinstance(batch, list) for batch in batches)
|
||||||
|
|
||||||
# Check that batches have the correct structure
|
# Check that batches have the correct structure
|
||||||
for batch in batches:
|
for batch in batches:
|
||||||
assert "id" in batch
|
assert "id" in batch[0]
|
||||||
assert "value" in batch
|
assert "value" in batch[0]
|
||||||
assert isinstance(batch["id"], list)
|
|
||||||
assert isinstance(batch["value"], list)
|
|
||||||
|
|
||||||
# Check that all batches except the last have the correct size
|
# Check that all batches except the last have the correct size
|
||||||
for batch in batches[:-1]:
|
for batch in batches[:-1]:
|
||||||
assert len(batch["id"]) == batch_size
|
assert len(batch) == batch_size
|
||||||
assert len(batch["value"]) == batch_size
|
|
||||||
|
|
||||||
# Last batch might be smaller
|
# Last batch might be smaller
|
||||||
assert len(batches[-1]["id"]) <= batch_size
|
assert len(batches[-1]) <= batch_size
|
||||||
|
|
||||||
|
|
||||||
def test_iter_skip_last_batch(some_permutation: Permutation):
|
def test_iter_skip_last_batch(some_permutation: Permutation):
|
||||||
@@ -699,11 +696,11 @@ def test_iter_skip_last_batch(some_permutation: Permutation):
|
|||||||
if 950 % batch_size != 0:
|
if 950 % batch_size != 0:
|
||||||
assert len(batches_without_skip) == num_full_batches + 1
|
assert len(batches_without_skip) == num_full_batches + 1
|
||||||
# Last batch should be smaller
|
# Last batch should be smaller
|
||||||
assert len(batches_without_skip[-1]["id"]) == 950 % batch_size
|
assert len(batches_without_skip[-1]) == 950 % batch_size
|
||||||
|
|
||||||
# All batches with skip_last_batch should be full size
|
# All batches with skip_last_batch should be full size
|
||||||
for batch in batches_with_skip:
|
for batch in batches_with_skip:
|
||||||
assert len(batch["id"]) == batch_size
|
assert len(batch) == batch_size
|
||||||
|
|
||||||
|
|
||||||
def test_iter_different_batch_sizes(some_permutation: Permutation):
|
def test_iter_different_batch_sizes(some_permutation: Permutation):
|
||||||
@@ -720,12 +717,12 @@ def test_iter_different_batch_sizes(some_permutation: Permutation):
|
|||||||
# Test with batch size equal to total rows
|
# Test with batch size equal to total rows
|
||||||
single_batch = list(some_permutation.iter(950, skip_last_batch=False))
|
single_batch = list(some_permutation.iter(950, skip_last_batch=False))
|
||||||
assert len(single_batch) == 1
|
assert len(single_batch) == 1
|
||||||
assert len(single_batch[0]["id"]) == 950
|
assert len(single_batch[0]) == 950
|
||||||
|
|
||||||
# Test with batch size larger than total rows
|
# Test with batch size larger than total rows
|
||||||
oversized_batch = list(some_permutation.iter(10000, skip_last_batch=False))
|
oversized_batch = list(some_permutation.iter(10000, skip_last_batch=False))
|
||||||
assert len(oversized_batch) == 1
|
assert len(oversized_batch) == 1
|
||||||
assert len(oversized_batch[0]["id"]) == 950
|
assert len(oversized_batch[0]) == 950
|
||||||
|
|
||||||
|
|
||||||
def test_dunder_iter(some_permutation: Permutation):
|
def test_dunder_iter(some_permutation: Permutation):
|
||||||
@@ -738,15 +735,13 @@ def test_dunder_iter(some_permutation: Permutation):
|
|||||||
|
|
||||||
# All batches should be full size
|
# All batches should be full size
|
||||||
for batch in batches:
|
for batch in batches:
|
||||||
assert len(batch["id"]) == 100
|
assert len(batch) == 100
|
||||||
assert len(batch["value"]) == 100
|
|
||||||
|
|
||||||
some_permutation = some_permutation.with_batch_size(400)
|
some_permutation = some_permutation.with_batch_size(400)
|
||||||
batches = list(some_permutation)
|
batches = list(some_permutation)
|
||||||
assert len(batches) == 2 # floor(950 / 400) since skip_last_batch=True
|
assert len(batches) == 2 # floor(950 / 400) since skip_last_batch=True
|
||||||
for batch in batches:
|
for batch in batches:
|
||||||
assert len(batch["id"]) == 400
|
assert len(batch) == 400
|
||||||
assert len(batch["value"]) == 400
|
|
||||||
|
|
||||||
|
|
||||||
def test_iter_with_different_formats(some_permutation: Permutation):
|
def test_iter_with_different_formats(some_permutation: Permutation):
|
||||||
@@ -761,7 +756,7 @@ def test_iter_with_different_formats(some_permutation: Permutation):
|
|||||||
# Test with python format (default)
|
# Test with python format (default)
|
||||||
python_perm = some_permutation.with_format("python")
|
python_perm = some_permutation.with_format("python")
|
||||||
python_batches = list(python_perm.iter(batch_size, skip_last_batch=False))
|
python_batches = list(python_perm.iter(batch_size, skip_last_batch=False))
|
||||||
assert all(isinstance(batch, dict) for batch in python_batches)
|
assert all(isinstance(batch, list) for batch in python_batches)
|
||||||
|
|
||||||
# Test with pandas format
|
# Test with pandas format
|
||||||
pandas_perm = some_permutation.with_format("pandas")
|
pandas_perm = some_permutation.with_format("pandas")
|
||||||
@@ -780,8 +775,8 @@ def test_iter_with_column_selection(some_permutation: Permutation):
|
|||||||
|
|
||||||
# Check that batches only contain the id column
|
# Check that batches only contain the id column
|
||||||
for batch in batches:
|
for batch in batches:
|
||||||
assert "id" in batch
|
assert "id" in batch[0]
|
||||||
assert "value" not in batch
|
assert "value" not in batch[0]
|
||||||
|
|
||||||
|
|
||||||
def test_iter_with_column_rename(some_permutation: Permutation):
|
def test_iter_with_column_rename(some_permutation: Permutation):
|
||||||
@@ -791,9 +786,9 @@ def test_iter_with_column_rename(some_permutation: Permutation):
|
|||||||
|
|
||||||
# Check that batches have the renamed column
|
# Check that batches have the renamed column
|
||||||
for batch in batches:
|
for batch in batches:
|
||||||
assert "id" in batch
|
assert "id" in batch[0]
|
||||||
assert "data" in batch
|
assert "data" in batch[0]
|
||||||
assert "value" not in batch
|
assert "value" not in batch[0]
|
||||||
|
|
||||||
|
|
||||||
def test_iter_with_limit_offset(some_permutation: Permutation):
|
def test_iter_with_limit_offset(some_permutation: Permutation):
|
||||||
@@ -812,14 +807,14 @@ def test_iter_with_limit_offset(some_permutation: Permutation):
|
|||||||
assert len(limit_batches) == 5
|
assert len(limit_batches) == 5
|
||||||
|
|
||||||
no_skip = some_permutation.iter(101, skip_last_batch=False)
|
no_skip = some_permutation.iter(101, skip_last_batch=False)
|
||||||
row_100 = next(no_skip)["id"][100]
|
row_100 = next(no_skip)[100]["id"]
|
||||||
|
|
||||||
# Test with both limit and offset
|
# Test with both limit and offset
|
||||||
limited_perm = some_permutation.with_skip(100).with_take(300)
|
limited_perm = some_permutation.with_skip(100).with_take(300)
|
||||||
limited_batches = list(limited_perm.iter(100, skip_last_batch=False))
|
limited_batches = list(limited_perm.iter(100, skip_last_batch=False))
|
||||||
# Should have 3 batches (300 / 100)
|
# Should have 3 batches (300 / 100)
|
||||||
assert len(limited_batches) == 3
|
assert len(limited_batches) == 3
|
||||||
assert limited_batches[0]["id"][0] == row_100
|
assert limited_batches[0][0]["id"] == row_100
|
||||||
|
|
||||||
|
|
||||||
def test_iter_empty_permutation(mem_db):
|
def test_iter_empty_permutation(mem_db):
|
||||||
@@ -842,7 +837,7 @@ def test_iter_single_row(mem_db):
|
|||||||
# With skip_last_batch=False, should get one batch
|
# With skip_last_batch=False, should get one batch
|
||||||
batches = list(perm.iter(10, skip_last_batch=False))
|
batches = list(perm.iter(10, skip_last_batch=False))
|
||||||
assert len(batches) == 1
|
assert len(batches) == 1
|
||||||
assert len(batches[0]["id"]) == 1
|
assert len(batches[0]) == 1
|
||||||
|
|
||||||
# With skip_last_batch=True, should skip the single row (since it's < batch_size)
|
# With skip_last_batch=True, should skip the single row (since it's < batch_size)
|
||||||
batches_skip = list(perm.iter(10, skip_last_batch=True))
|
batches_skip = list(perm.iter(10, skip_last_batch=True))
|
||||||
@@ -860,8 +855,7 @@ def test_identity_permutation(mem_db):
|
|||||||
|
|
||||||
batches = list(permutation.iter(10, skip_last_batch=False))
|
batches = list(permutation.iter(10, skip_last_batch=False))
|
||||||
assert len(batches) == 1
|
assert len(batches) == 1
|
||||||
assert len(batches[0]["id"]) == 10
|
assert len(batches[0]) == 10
|
||||||
assert len(batches[0]["value"]) == 10
|
|
||||||
|
|
||||||
permutation = permutation.remove_columns(["value"])
|
permutation = permutation.remove_columns(["value"])
|
||||||
assert permutation.num_columns == 1
|
assert permutation.num_columns == 1
|
||||||
@@ -904,10 +898,10 @@ def test_transform_fn(mem_db):
|
|||||||
py_result = list(permutation.with_format("python").iter(10, skip_last_batch=False))[
|
py_result = list(permutation.with_format("python").iter(10, skip_last_batch=False))[
|
||||||
0
|
0
|
||||||
]
|
]
|
||||||
assert len(py_result) == 2
|
assert len(py_result) == 10
|
||||||
assert len(py_result["id"]) == 10
|
assert "id" in py_result[0]
|
||||||
assert len(py_result["value"]) == 10
|
assert "value" in py_result[0]
|
||||||
assert isinstance(py_result, dict)
|
assert isinstance(py_result, list)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import torch
|
import torch
|
||||||
@@ -915,9 +909,11 @@ def test_transform_fn(mem_db):
|
|||||||
torch_result = list(
|
torch_result = list(
|
||||||
permutation.with_format("torch").iter(10, skip_last_batch=False)
|
permutation.with_format("torch").iter(10, skip_last_batch=False)
|
||||||
)[0]
|
)[0]
|
||||||
assert torch_result.shape == (2, 10)
|
assert isinstance(torch_result, list)
|
||||||
assert torch_result.dtype == torch.int64
|
assert len(torch_result) == 10
|
||||||
assert isinstance(torch_result, torch.Tensor)
|
assert isinstance(torch_result[0], torch.Tensor)
|
||||||
|
assert torch_result[0].shape == (2,)
|
||||||
|
assert torch_result[0].dtype == torch.int64
|
||||||
except ImportError:
|
except ImportError:
|
||||||
# Skip check if torch is not installed
|
# Skip check if torch is not installed
|
||||||
pass
|
pass
|
||||||
@@ -945,3 +941,113 @@ def test_custom_transform(mem_db):
|
|||||||
batch = batches[0]
|
batch = batches[0]
|
||||||
|
|
||||||
assert batch == pa.record_batch([range(10)], ["id"])
|
assert batch == pa.record_batch([range(10)], ["id"])
|
||||||
|
|
||||||
|
|
||||||
|
def test_getitems_basic(some_permutation: Permutation):
|
||||||
|
"""Test __getitems__ returns correct rows by offset."""
|
||||||
|
result = some_permutation.__getitems__([0, 1, 2])
|
||||||
|
assert isinstance(result, list)
|
||||||
|
assert "id" in result[0]
|
||||||
|
assert "value" in result[0]
|
||||||
|
assert len(result) == 3
|
||||||
|
|
||||||
|
|
||||||
|
def test_getitems_single_index(some_permutation: Permutation):
|
||||||
|
"""Test __getitems__ with a single index."""
|
||||||
|
result = some_permutation.__getitems__([0])
|
||||||
|
assert len(result) == 1
|
||||||
|
|
||||||
|
|
||||||
|
def test_getitems_preserves_order(some_permutation: Permutation):
|
||||||
|
"""Test __getitems__ returns rows in the requested order."""
|
||||||
|
# Get rows in forward order
|
||||||
|
forward = some_permutation.__getitems__([0, 1, 2, 3, 4])
|
||||||
|
# Get the same rows in reverse order
|
||||||
|
reverse = some_permutation.__getitems__([4, 3, 2, 1, 0])
|
||||||
|
|
||||||
|
assert [r["id"] for r in forward] == list(reversed([r["id"] for r in reverse]))
|
||||||
|
assert [r["value"] for r in forward] == list(
|
||||||
|
reversed([r["value"] for r in reverse])
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def test_getitems_non_contiguous(some_permutation: Permutation):
|
||||||
|
"""Test __getitems__ with non-contiguous indices."""
|
||||||
|
result = some_permutation.__getitems__([0, 10, 50, 100, 500])
|
||||||
|
assert len(result) == 5
|
||||||
|
|
||||||
|
# Each id/value pair should match what we'd get individually
|
||||||
|
for i, offset in enumerate([0, 10, 50, 100, 500]):
|
||||||
|
single = some_permutation.__getitems__([offset])
|
||||||
|
assert result[i]["id"] == single[0]["id"]
|
||||||
|
assert result[i]["value"] == single[0]["value"]
|
||||||
|
|
||||||
|
|
||||||
|
def test_getitems_with_column_selection(some_permutation: Permutation):
|
||||||
|
"""Test __getitems__ respects column selection."""
|
||||||
|
id_only = some_permutation.select_columns(["id"])
|
||||||
|
result = id_only.__getitems__([0, 1, 2])
|
||||||
|
assert "id" in result[0]
|
||||||
|
assert "value" not in result[0]
|
||||||
|
assert len(result) == 3
|
||||||
|
|
||||||
|
|
||||||
|
def test_getitems_with_column_rename(some_permutation: Permutation):
|
||||||
|
"""Test __getitems__ respects column renames."""
|
||||||
|
renamed = some_permutation.rename_column("value", "data")
|
||||||
|
result = renamed.__getitems__([0, 1])
|
||||||
|
assert "data" in result[0]
|
||||||
|
assert "value" not in result[0]
|
||||||
|
assert len(result) == 2
|
||||||
|
|
||||||
|
|
||||||
|
def test_getitems_with_format(some_permutation: Permutation):
|
||||||
|
"""Test __getitems__ applies the transform function."""
|
||||||
|
arrow_perm = some_permutation.with_format("arrow")
|
||||||
|
result = arrow_perm.__getitems__([0, 1, 2])
|
||||||
|
assert isinstance(result, pa.RecordBatch)
|
||||||
|
assert result.num_rows == 3
|
||||||
|
|
||||||
|
|
||||||
|
def test_getitems_with_custom_transform(some_permutation: Permutation):
|
||||||
|
"""Test __getitems__ with a custom transform."""
|
||||||
|
|
||||||
|
def transform(batch: pa.RecordBatch) -> list:
|
||||||
|
return batch.column("id").to_pylist()
|
||||||
|
|
||||||
|
custom = some_permutation.with_transform(transform)
|
||||||
|
result = custom.__getitems__([0, 1, 2])
|
||||||
|
assert isinstance(result, list)
|
||||||
|
assert len(result) == 3
|
||||||
|
|
||||||
|
|
||||||
|
def test_getitems_identity_permutation(mem_db):
|
||||||
|
"""Test __getitems__ on an identity permutation."""
|
||||||
|
tbl = mem_db.create_table(
|
||||||
|
"test_table", pa.table({"id": range(10), "value": range(10)})
|
||||||
|
)
|
||||||
|
perm = Permutation.identity(tbl)
|
||||||
|
|
||||||
|
result = perm.__getitems__([0, 5, 9])
|
||||||
|
assert [r["id"] for r in result] == [0, 5, 9]
|
||||||
|
assert [r["value"] for r in result] == [0, 5, 9]
|
||||||
|
|
||||||
|
|
||||||
|
def test_getitems_with_limit_offset(some_permutation: Permutation):
|
||||||
|
"""Test __getitems__ on a permutation with skip/take applied."""
|
||||||
|
limited = some_permutation.with_skip(100).with_take(200)
|
||||||
|
|
||||||
|
# Should be able to access offsets within the limited range
|
||||||
|
result = limited.__getitems__([0, 1, 199])
|
||||||
|
assert len(result) == 3
|
||||||
|
|
||||||
|
# The first item of the limited permutation should match offset 100 of original
|
||||||
|
full_result = some_permutation.__getitems__([100])
|
||||||
|
limited_result = limited.__getitems__([0])
|
||||||
|
assert limited_result[0]["id"] == full_result[0]["id"]
|
||||||
|
|
||||||
|
|
||||||
|
def test_getitems_invalid_offset(some_permutation: Permutation):
|
||||||
|
"""Test __getitems__ with an out-of-range offset raises an error."""
|
||||||
|
with pytest.raises(Exception):
|
||||||
|
some_permutation.__getitems__([999999])
|
||||||
|
|||||||
@@ -531,6 +531,78 @@ def test_empty_result_reranker():
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def test_empty_hybrid_result_reranker():
|
||||||
|
"""Test that hybrid search with empty results after filtering doesn't crash.
|
||||||
|
|
||||||
|
Regression test for https://github.com/lancedb/lancedb/issues/2425
|
||||||
|
"""
|
||||||
|
from lancedb.query import LanceHybridQueryBuilder
|
||||||
|
|
||||||
|
# Simulate empty vector and FTS results with the expected schema
|
||||||
|
vector_schema = pa.schema(
|
||||||
|
[
|
||||||
|
("text", pa.string()),
|
||||||
|
("vector", pa.list_(pa.float32(), 4)),
|
||||||
|
("_rowid", pa.uint64()),
|
||||||
|
("_distance", pa.float32()),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
fts_schema = pa.schema(
|
||||||
|
[
|
||||||
|
("text", pa.string()),
|
||||||
|
("vector", pa.list_(pa.float32(), 4)),
|
||||||
|
("_rowid", pa.uint64()),
|
||||||
|
("_score", pa.float32()),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
empty_vector = pa.table(
|
||||||
|
{
|
||||||
|
"text": pa.array([], type=pa.string()),
|
||||||
|
"vector": pa.array([], type=pa.list_(pa.float32(), 4)),
|
||||||
|
"_rowid": pa.array([], type=pa.uint64()),
|
||||||
|
"_distance": pa.array([], type=pa.float32()),
|
||||||
|
},
|
||||||
|
schema=vector_schema,
|
||||||
|
)
|
||||||
|
empty_fts = pa.table(
|
||||||
|
{
|
||||||
|
"text": pa.array([], type=pa.string()),
|
||||||
|
"vector": pa.array([], type=pa.list_(pa.float32(), 4)),
|
||||||
|
"_rowid": pa.array([], type=pa.uint64()),
|
||||||
|
"_score": pa.array([], type=pa.float32()),
|
||||||
|
},
|
||||||
|
schema=fts_schema,
|
||||||
|
)
|
||||||
|
|
||||||
|
for reranker in [LinearCombinationReranker(), RRFReranker()]:
|
||||||
|
result = LanceHybridQueryBuilder._combine_hybrid_results(
|
||||||
|
fts_results=empty_fts,
|
||||||
|
vector_results=empty_vector,
|
||||||
|
norm="score",
|
||||||
|
fts_query="nonexistent query",
|
||||||
|
reranker=reranker,
|
||||||
|
limit=10,
|
||||||
|
with_row_ids=False,
|
||||||
|
)
|
||||||
|
assert len(result) == 0
|
||||||
|
assert "_relevance_score" in result.column_names
|
||||||
|
assert "_rowid" not in result.column_names
|
||||||
|
|
||||||
|
# Also test with with_row_ids=True
|
||||||
|
result = LanceHybridQueryBuilder._combine_hybrid_results(
|
||||||
|
fts_results=empty_fts,
|
||||||
|
vector_results=empty_vector,
|
||||||
|
norm="score",
|
||||||
|
fts_query="nonexistent query",
|
||||||
|
reranker=LinearCombinationReranker(),
|
||||||
|
limit=10,
|
||||||
|
with_row_ids=True,
|
||||||
|
)
|
||||||
|
assert len(result) == 0
|
||||||
|
assert "_relevance_score" in result.column_names
|
||||||
|
assert "_rowid" in result.column_names
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("use_tantivy", [True, False])
|
@pytest.mark.parametrize("use_tantivy", [True, False])
|
||||||
def test_cross_encoder_reranker_return_all(tmp_path, use_tantivy):
|
def test_cross_encoder_reranker_return_all(tmp_path, use_tantivy):
|
||||||
pytest.importorskip("sentence_transformers")
|
pytest.importorskip("sentence_transformers")
|
||||||
|
|||||||
@@ -810,7 +810,7 @@ def test_create_index_name_and_train_parameters(
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def test_add_with_nans(mem_db: DBConnection):
|
def test_create_with_nans(mem_db: DBConnection):
|
||||||
# by default we raise an error on bad input vectors
|
# by default we raise an error on bad input vectors
|
||||||
bad_data = [
|
bad_data = [
|
||||||
{"vector": [np.nan], "item": "bar", "price": 20.0},
|
{"vector": [np.nan], "item": "bar", "price": 20.0},
|
||||||
@@ -854,6 +854,57 @@ def test_add_with_nans(mem_db: DBConnection):
|
|||||||
assert np.allclose(v, np.array([0.0, 0.0]))
|
assert np.allclose(v, np.array([0.0, 0.0]))
|
||||||
|
|
||||||
|
|
||||||
|
def test_add_with_nans(mem_db: DBConnection):
|
||||||
|
schema = pa.schema(
|
||||||
|
[
|
||||||
|
pa.field("vector", pa.list_(pa.float32(), 2), nullable=True),
|
||||||
|
pa.field("item", pa.string(), nullable=True),
|
||||||
|
pa.field("price", pa.float64(), nullable=False),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
table = mem_db.create_table("test", schema=schema)
|
||||||
|
# by default we raise an error on bad input vectors
|
||||||
|
bad_data = [
|
||||||
|
{"vector": [np.nan], "item": "bar", "price": 20.0},
|
||||||
|
{"vector": [5], "item": "bar", "price": 20.0},
|
||||||
|
{"vector": [np.nan, np.nan], "item": "bar", "price": 20.0},
|
||||||
|
{"vector": [np.nan, 5.0], "item": "bar", "price": 20.0},
|
||||||
|
]
|
||||||
|
for row in bad_data:
|
||||||
|
with pytest.raises(ValueError):
|
||||||
|
table.add(
|
||||||
|
data=[row],
|
||||||
|
)
|
||||||
|
|
||||||
|
table.add(
|
||||||
|
[
|
||||||
|
{"vector": [3.1, 4.1], "item": "foo", "price": 10.0},
|
||||||
|
{"vector": [2.1, 4.1], "item": "foo", "price": 9.0},
|
||||||
|
{"vector": [np.nan], "item": "bar", "price": 20.0},
|
||||||
|
{"vector": [5], "item": "bar", "price": 20.0},
|
||||||
|
{"vector": [np.nan, np.nan], "item": "bar", "price": 20.0},
|
||||||
|
],
|
||||||
|
on_bad_vectors="drop",
|
||||||
|
)
|
||||||
|
assert len(table) == 2
|
||||||
|
table.delete("true")
|
||||||
|
|
||||||
|
# We can fill bad input with some value
|
||||||
|
table.add(
|
||||||
|
data=[
|
||||||
|
{"vector": [3.1, 4.1], "item": "foo", "price": 10.0},
|
||||||
|
{"vector": [np.nan], "item": "bar", "price": 20.0},
|
||||||
|
{"vector": [np.nan, np.nan], "item": "bar", "price": 20.0},
|
||||||
|
],
|
||||||
|
on_bad_vectors="fill",
|
||||||
|
fill_value=0.0,
|
||||||
|
)
|
||||||
|
assert len(table) == 3
|
||||||
|
arrow_tbl = table.search().where("item == 'bar'").to_arrow()
|
||||||
|
v = arrow_tbl["vector"].to_pylist()[0]
|
||||||
|
assert np.allclose(v, np.array([0.0, 0.0]))
|
||||||
|
|
||||||
|
|
||||||
def test_restore(mem_db: DBConnection):
|
def test_restore(mem_db: DBConnection):
|
||||||
table = mem_db.create_table(
|
table = mem_db.create_table(
|
||||||
"my_table",
|
"my_table",
|
||||||
|
|||||||
@@ -4,6 +4,7 @@
|
|||||||
import pyarrow as pa
|
import pyarrow as pa
|
||||||
import pytest
|
import pytest
|
||||||
from lancedb.util import tbl_to_tensor
|
from lancedb.util import tbl_to_tensor
|
||||||
|
from lancedb.permutation import Permutation
|
||||||
|
|
||||||
torch = pytest.importorskip("torch")
|
torch = pytest.importorskip("torch")
|
||||||
|
|
||||||
@@ -16,3 +17,26 @@ def test_table_dataloader(mem_db):
|
|||||||
for batch in dataloader:
|
for batch in dataloader:
|
||||||
assert batch.size(0) == 1
|
assert batch.size(0) == 1
|
||||||
assert batch.size(1) == 10
|
assert batch.size(1) == 10
|
||||||
|
|
||||||
|
|
||||||
|
def test_permutation_dataloader(mem_db):
|
||||||
|
table = mem_db.create_table("test_table", pa.table({"a": range(1000)}))
|
||||||
|
|
||||||
|
permutation = Permutation.identity(table)
|
||||||
|
dataloader = torch.utils.data.DataLoader(permutation, batch_size=10, shuffle=True)
|
||||||
|
for batch in dataloader:
|
||||||
|
assert batch["a"].size(0) == 10
|
||||||
|
|
||||||
|
permutation = permutation.with_format("torch")
|
||||||
|
dataloader = torch.utils.data.DataLoader(permutation, batch_size=10, shuffle=True)
|
||||||
|
for batch in dataloader:
|
||||||
|
assert batch.size(0) == 10
|
||||||
|
assert batch.size(1) == 1
|
||||||
|
|
||||||
|
permutation = permutation.with_format("torch_col")
|
||||||
|
dataloader = torch.utils.data.DataLoader(
|
||||||
|
permutation, collate_fn=lambda x: x, batch_size=10, shuffle=True
|
||||||
|
)
|
||||||
|
for batch in dataloader:
|
||||||
|
assert batch.size(0) == 1
|
||||||
|
assert batch.size(1) == 10
|
||||||
|
|||||||
@@ -292,18 +292,14 @@ class TestModel(lancedb.pydantic.LanceModel):
|
|||||||
lambda: pa.table({"a": [1], "b": [2]}),
|
lambda: pa.table({"a": [1], "b": [2]}),
|
||||||
lambda: pa.table({"a": [1], "b": [2]}).to_reader(),
|
lambda: pa.table({"a": [1], "b": [2]}).to_reader(),
|
||||||
lambda: iter(pa.table({"a": [1], "b": [2]}).to_batches()),
|
lambda: iter(pa.table({"a": [1], "b": [2]}).to_batches()),
|
||||||
lambda: (
|
lambda: lance.write_dataset(
|
||||||
lance.write_dataset(
|
pa.table({"a": [1], "b": [2]}),
|
||||||
pa.table({"a": [1], "b": [2]}),
|
"memory://test",
|
||||||
"memory://test",
|
|
||||||
)
|
|
||||||
),
|
|
||||||
lambda: (
|
|
||||||
lance.write_dataset(
|
|
||||||
pa.table({"a": [1], "b": [2]}),
|
|
||||||
"memory://test",
|
|
||||||
).scanner()
|
|
||||||
),
|
),
|
||||||
|
lambda: lance.write_dataset(
|
||||||
|
pa.table({"a": [1], "b": [2]}),
|
||||||
|
"memory://test",
|
||||||
|
).scanner(),
|
||||||
lambda: pd.DataFrame({"a": [1], "b": [2]}),
|
lambda: pd.DataFrame({"a": [1], "b": [2]}),
|
||||||
lambda: pl.DataFrame({"a": [1], "b": [2]}),
|
lambda: pl.DataFrame({"a": [1], "b": [2]}),
|
||||||
lambda: pl.LazyFrame({"a": [1], "b": [2]}),
|
lambda: pl.LazyFrame({"a": [1], "b": [2]}),
|
||||||
|
|||||||
@@ -121,7 +121,8 @@ impl Connection {
|
|||||||
|
|
||||||
let mode = Self::parse_create_mode_str(mode)?;
|
let mode = Self::parse_create_mode_str(mode)?;
|
||||||
|
|
||||||
let batches = ArrowArrayStreamReader::from_pyarrow_bound(&data)?;
|
let batches: Box<dyn arrow::array::RecordBatchReader + Send> =
|
||||||
|
Box::new(ArrowArrayStreamReader::from_pyarrow_bound(&data)?);
|
||||||
|
|
||||||
let mut builder = inner.create_table(name, batches).mode(mode);
|
let mut builder = inner.create_table(name, batches).mode(mode);
|
||||||
|
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ use std::sync::{Arc, Mutex};
|
|||||||
use crate::{
|
use crate::{
|
||||||
arrow::RecordBatchStream, connection::Connection, error::PythonErrorExt, table::Table,
|
arrow::RecordBatchStream, connection::Connection, error::PythonErrorExt, table::Table,
|
||||||
};
|
};
|
||||||
use arrow::pyarrow::ToPyArrow;
|
use arrow::pyarrow::{PyArrowType, ToPyArrow};
|
||||||
use lancedb::{
|
use lancedb::{
|
||||||
dataloader::permutation::{
|
dataloader::permutation::{
|
||||||
builder::{PermutationBuilder as LancePermutationBuilder, ShuffleStrategy},
|
builder::{PermutationBuilder as LancePermutationBuilder, ShuffleStrategy},
|
||||||
@@ -23,10 +23,25 @@ use pyo3::{
|
|||||||
};
|
};
|
||||||
use pyo3_async_runtimes::tokio::future_into_py;
|
use pyo3_async_runtimes::tokio::future_into_py;
|
||||||
|
|
||||||
|
fn table_from_py<'a>(table: Bound<'a, PyAny>) -> PyResult<Bound<'a, Table>> {
|
||||||
|
if table.hasattr("_inner")? {
|
||||||
|
Ok(table.getattr("_inner")?.downcast_into::<Table>()?)
|
||||||
|
} else if table.hasattr("_table")? {
|
||||||
|
Ok(table
|
||||||
|
.getattr("_table")?
|
||||||
|
.getattr("_inner")?
|
||||||
|
.downcast_into::<Table>()?)
|
||||||
|
} else {
|
||||||
|
Err(PyRuntimeError::new_err(
|
||||||
|
"Provided table does not appear to be a Table or RemoteTable instance",
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Create a permutation builder for the given table
|
/// Create a permutation builder for the given table
|
||||||
#[pyo3::pyfunction]
|
#[pyo3::pyfunction]
|
||||||
pub fn async_permutation_builder(table: Bound<'_, PyAny>) -> PyResult<PyAsyncPermutationBuilder> {
|
pub fn async_permutation_builder(table: Bound<'_, PyAny>) -> PyResult<PyAsyncPermutationBuilder> {
|
||||||
let table = table.getattr("_inner")?.downcast_into::<Table>()?;
|
let table = table_from_py(table)?;
|
||||||
let inner_table = table.borrow().inner_ref()?.clone();
|
let inner_table = table.borrow().inner_ref()?.clone();
|
||||||
let inner_builder = LancePermutationBuilder::new(inner_table);
|
let inner_builder = LancePermutationBuilder::new(inner_table);
|
||||||
|
|
||||||
@@ -250,10 +265,8 @@ impl PyPermutationReader {
|
|||||||
permutation_table: Option<Bound<'py, PyAny>>,
|
permutation_table: Option<Bound<'py, PyAny>>,
|
||||||
split: u64,
|
split: u64,
|
||||||
) -> PyResult<Bound<'py, PyAny>> {
|
) -> PyResult<Bound<'py, PyAny>> {
|
||||||
let base_table = base_table.getattr("_inner")?.downcast_into::<Table>()?;
|
let base_table = table_from_py(base_table)?;
|
||||||
let permutation_table = permutation_table
|
let permutation_table = permutation_table.map(table_from_py).transpose()?;
|
||||||
.map(|p| PyResult::Ok(p.getattr("_inner")?.downcast_into::<Table>()?))
|
|
||||||
.transpose()?;
|
|
||||||
|
|
||||||
let base_table = base_table.borrow().inner_ref()?.base_table().clone();
|
let base_table = base_table.borrow().inner_ref()?.base_table().clone();
|
||||||
let permutation_table = permutation_table
|
let permutation_table = permutation_table
|
||||||
@@ -328,4 +341,21 @@ impl PyPermutationReader {
|
|||||||
Ok(RecordBatchStream::new(stream))
|
Ok(RecordBatchStream::new(stream))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[pyo3(signature = (indices, *, selection=None))]
|
||||||
|
pub fn take_offsets<'py>(
|
||||||
|
slf: PyRef<'py, Self>,
|
||||||
|
indices: Vec<u64>,
|
||||||
|
selection: Option<Bound<'py, PyAny>>,
|
||||||
|
) -> PyResult<Bound<'py, PyAny>> {
|
||||||
|
let selection = Self::parse_selection(selection)?;
|
||||||
|
let reader = slf.reader.clone();
|
||||||
|
future_into_py(slf.py(), async move {
|
||||||
|
let batch = reader
|
||||||
|
.take_offsets(&indices, selection)
|
||||||
|
.await
|
||||||
|
.infer_error()?;
|
||||||
|
Ok(PyArrowType(batch))
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ use crate::{
|
|||||||
error::PythonErrorExt,
|
error::PythonErrorExt,
|
||||||
index::{extract_index_params, IndexConfig},
|
index::{extract_index_params, IndexConfig},
|
||||||
query::{Query, TakeQuery},
|
query::{Query, TakeQuery},
|
||||||
|
table::scannable::PyScannable,
|
||||||
};
|
};
|
||||||
use arrow::{
|
use arrow::{
|
||||||
datatypes::{DataType, Schema},
|
datatypes::{DataType, Schema},
|
||||||
@@ -25,6 +26,8 @@ use pyo3::{
|
|||||||
};
|
};
|
||||||
use pyo3_async_runtimes::tokio::future_into_py;
|
use pyo3_async_runtimes::tokio::future_into_py;
|
||||||
|
|
||||||
|
mod scannable;
|
||||||
|
|
||||||
/// Statistics about a compaction operation.
|
/// Statistics about a compaction operation.
|
||||||
#[pyclass(get_all)]
|
#[pyclass(get_all)]
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
@@ -293,11 +296,10 @@ impl Table {
|
|||||||
|
|
||||||
pub fn add<'a>(
|
pub fn add<'a>(
|
||||||
self_: PyRef<'a, Self>,
|
self_: PyRef<'a, Self>,
|
||||||
data: Bound<'_, PyAny>,
|
data: PyScannable,
|
||||||
mode: String,
|
mode: String,
|
||||||
) -> PyResult<Bound<'a, PyAny>> {
|
) -> PyResult<Bound<'a, PyAny>> {
|
||||||
let batches = ArrowArrayStreamReader::from_pyarrow_bound(&data)?;
|
let mut op = self_.inner_ref()?.add(data);
|
||||||
let mut op = self_.inner_ref()?.add(batches);
|
|
||||||
if mode == "append" {
|
if mode == "append" {
|
||||||
op = op.mode(AddDataMode::Append);
|
op = op.mode(AddDataMode::Append);
|
||||||
} else if mode == "overwrite" {
|
} else if mode == "overwrite" {
|
||||||
|
|||||||
145
python/src/table/scannable.rs
Normal file
145
python/src/table/scannable.rs
Normal file
@@ -0,0 +1,145 @@
|
|||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use arrow::{
|
||||||
|
datatypes::{Schema, SchemaRef},
|
||||||
|
ffi_stream::ArrowArrayStreamReader,
|
||||||
|
pyarrow::{FromPyArrow, PyArrowType},
|
||||||
|
};
|
||||||
|
use futures::StreamExt;
|
||||||
|
use lancedb::{
|
||||||
|
arrow::{SendableRecordBatchStream, SimpleRecordBatchStream},
|
||||||
|
data::scannable::Scannable,
|
||||||
|
Error,
|
||||||
|
};
|
||||||
|
use pyo3::{types::PyAnyMethods, FromPyObject, Py, PyAny, Python};
|
||||||
|
|
||||||
|
/// Adapter that implements Scannable for a Python reader factory callable.
|
||||||
|
///
|
||||||
|
/// This holds a Python callable that returns a RecordBatchReader when called.
|
||||||
|
/// For rescannable sources, the callable can be invoked multiple times to
|
||||||
|
/// get fresh readers.
|
||||||
|
pub struct PyScannable {
|
||||||
|
/// Python callable that returns a RecordBatchReader
|
||||||
|
reader_factory: Py<PyAny>,
|
||||||
|
schema: SchemaRef,
|
||||||
|
num_rows: Option<usize>,
|
||||||
|
rescannable: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::fmt::Debug for PyScannable {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
f.debug_struct("PyScannable")
|
||||||
|
.field("schema", &self.schema)
|
||||||
|
.field("num_rows", &self.num_rows)
|
||||||
|
.field("rescannable", &self.rescannable)
|
||||||
|
.finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Scannable for PyScannable {
|
||||||
|
fn schema(&self) -> SchemaRef {
|
||||||
|
self.schema.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn scan_as_stream(&mut self) -> SendableRecordBatchStream {
|
||||||
|
let reader: Result<ArrowArrayStreamReader, Error> = {
|
||||||
|
Python::attach(|py| {
|
||||||
|
let result =
|
||||||
|
self.reader_factory
|
||||||
|
.call0(py)
|
||||||
|
.map_err(|e| lancedb::Error::Runtime {
|
||||||
|
message: format!("Python reader factory failed: {}", e),
|
||||||
|
})?;
|
||||||
|
ArrowArrayStreamReader::from_pyarrow_bound(result.bind(py)).map_err(|e| {
|
||||||
|
lancedb::Error::Runtime {
|
||||||
|
message: format!("Failed to create Arrow reader from Python: {}", e),
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
};
|
||||||
|
|
||||||
|
// Reader is blocking but stream is non-blocking, so we need to spawn a task to pull.
|
||||||
|
let (tx, rx) = tokio::sync::mpsc::channel(1);
|
||||||
|
|
||||||
|
let join_handle = tokio::task::spawn_blocking(move || {
|
||||||
|
let reader = match reader {
|
||||||
|
Ok(reader) => reader,
|
||||||
|
Err(e) => {
|
||||||
|
let _ = tx.blocking_send(Err(e));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
for batch in reader {
|
||||||
|
match batch {
|
||||||
|
Ok(batch) => {
|
||||||
|
if tx.blocking_send(Ok(batch)).is_err() {
|
||||||
|
// Receiver dropped, stop processing
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(source) => {
|
||||||
|
let _ = tx.blocking_send(Err(Error::Arrow { source }));
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let schema = self.schema.clone();
|
||||||
|
let stream = futures::stream::unfold(
|
||||||
|
(rx, Some(join_handle)),
|
||||||
|
|(mut rx, join_handle)| async move {
|
||||||
|
match rx.recv().await {
|
||||||
|
Some(Ok(batch)) => Some((Ok(batch), (rx, join_handle))),
|
||||||
|
Some(Err(e)) => Some((Err(e), (rx, join_handle))),
|
||||||
|
None => {
|
||||||
|
// Channel closed. Check if the task panicked — a panic
|
||||||
|
// drops the sender without sending an error, so without
|
||||||
|
// this check we'd silently return a truncated stream.
|
||||||
|
if let Some(handle) = join_handle {
|
||||||
|
if let Err(join_err) = handle.await {
|
||||||
|
return Some((
|
||||||
|
Err(Error::Runtime {
|
||||||
|
message: format!("Reader task panicked: {}", join_err),
|
||||||
|
}),
|
||||||
|
(rx, None),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
);
|
||||||
|
Box::pin(SimpleRecordBatchStream::new(stream.fuse(), schema))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn num_rows(&self) -> Option<usize> {
|
||||||
|
self.num_rows
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rescannable(&self) -> bool {
|
||||||
|
self.rescannable
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'py> FromPyObject<'py> for PyScannable {
|
||||||
|
fn extract_bound(ob: &pyo3::Bound<'py, PyAny>) -> pyo3::PyResult<Self> {
|
||||||
|
// Convert from Scannable dataclass.
|
||||||
|
let schema: PyArrowType<Schema> = ob.getattr("schema")?.extract()?;
|
||||||
|
let schema = Arc::new(schema.0);
|
||||||
|
let num_rows: Option<usize> = ob.getattr("num_rows")?.extract()?;
|
||||||
|
let rescannable: bool = ob.getattr("rescannable")?.extract()?;
|
||||||
|
let reader_factory: Py<PyAny> = ob.getattr("reader")?.unbind();
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
schema,
|
||||||
|
reader_factory,
|
||||||
|
num_rows,
|
||||||
|
rescannable,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
5349
python/uv.lock
generated
Normal file
5349
python/uv.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,2 +1,2 @@
|
|||||||
[toolchain]
|
[toolchain]
|
||||||
channel = "1.90.0"
|
channel = "1.91.0"
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "lancedb"
|
name = "lancedb"
|
||||||
version = "0.26.0"
|
version = "0.27.0-beta.2"
|
||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
description = "LanceDB: A serverless, low-latency vector database for AI applications"
|
description = "LanceDB: A serverless, low-latency vector database for AI applications"
|
||||||
license.workspace = true
|
license.workspace = true
|
||||||
@@ -25,7 +25,9 @@ datafusion-catalog.workspace = true
|
|||||||
datafusion-common.workspace = true
|
datafusion-common.workspace = true
|
||||||
datafusion-execution.workspace = true
|
datafusion-execution.workspace = true
|
||||||
datafusion-expr.workspace = true
|
datafusion-expr.workspace = true
|
||||||
|
datafusion-functions = "51.0"
|
||||||
datafusion-physical-expr.workspace = true
|
datafusion-physical-expr.workspace = true
|
||||||
|
datafusion-sql = "51.0"
|
||||||
datafusion-physical-plan.workspace = true
|
datafusion-physical-plan.workspace = true
|
||||||
datafusion.workspace = true
|
datafusion.workspace = true
|
||||||
object_store = { workspace = true }
|
object_store = { workspace = true }
|
||||||
|
|||||||
@@ -3,13 +3,12 @@
|
|||||||
|
|
||||||
use std::{iter::once, sync::Arc};
|
use std::{iter::once, sync::Arc};
|
||||||
|
|
||||||
use arrow_array::{Float64Array, Int32Array, RecordBatch, RecordBatchIterator, StringArray};
|
use arrow_array::{Float64Array, Int32Array, RecordBatch, StringArray};
|
||||||
use arrow_schema::{DataType, Field, Schema};
|
use arrow_schema::{DataType, Field, Schema};
|
||||||
use aws_config::Region;
|
use aws_config::Region;
|
||||||
use aws_sdk_bedrockruntime::Client;
|
use aws_sdk_bedrockruntime::Client;
|
||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
use lancedb::{
|
use lancedb::{
|
||||||
arrow::IntoArrow,
|
|
||||||
connect,
|
connect,
|
||||||
embeddings::{bedrock::BedrockEmbeddingFunction, EmbeddingDefinition, EmbeddingFunction},
|
embeddings::{bedrock::BedrockEmbeddingFunction, EmbeddingDefinition, EmbeddingFunction},
|
||||||
query::{ExecutableQuery, QueryBase},
|
query::{ExecutableQuery, QueryBase},
|
||||||
@@ -67,7 +66,7 @@ async fn main() -> Result<()> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn make_data() -> impl IntoArrow {
|
fn make_data() -> RecordBatch {
|
||||||
let schema = Schema::new(vec![
|
let schema = Schema::new(vec![
|
||||||
Field::new("id", DataType::Int32, true),
|
Field::new("id", DataType::Int32, true),
|
||||||
Field::new("text", DataType::Utf8, false),
|
Field::new("text", DataType::Utf8, false),
|
||||||
@@ -83,10 +82,9 @@ fn make_data() -> impl IntoArrow {
|
|||||||
]);
|
]);
|
||||||
let price = Float64Array::from(vec![10.0, 50.0, 100.0, 30.0]);
|
let price = Float64Array::from(vec![10.0, 50.0, 100.0, 30.0]);
|
||||||
let schema = Arc::new(schema);
|
let schema = Arc::new(schema);
|
||||||
let rb = RecordBatch::try_new(
|
RecordBatch::try_new(
|
||||||
schema.clone(),
|
schema.clone(),
|
||||||
vec![Arc::new(id), Arc::new(text), Arc::new(price)],
|
vec![Arc::new(id), Arc::new(text), Arc::new(price)],
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap()
|
||||||
Box::new(RecordBatchIterator::new(vec![Ok(rb)], schema))
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,12 +3,13 @@
|
|||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use arrow_array::{Int32Array, RecordBatch, RecordBatchIterator, RecordBatchReader, StringArray};
|
use arrow_array::{Int32Array, RecordBatch, RecordBatchIterator, StringArray};
|
||||||
use arrow_schema::{DataType, Field, Schema};
|
use arrow_schema::{DataType, Field, Schema};
|
||||||
|
|
||||||
use futures::TryStreamExt;
|
use futures::TryStreamExt;
|
||||||
use lance_index::scalar::FullTextSearchQuery;
|
use lance_index::scalar::FullTextSearchQuery;
|
||||||
use lancedb::connection::Connection;
|
use lancedb::connection::Connection;
|
||||||
|
|
||||||
use lancedb::index::scalar::FtsIndexBuilder;
|
use lancedb::index::scalar::FtsIndexBuilder;
|
||||||
use lancedb::index::Index;
|
use lancedb::index::Index;
|
||||||
use lancedb::query::{ExecutableQuery, QueryBase};
|
use lancedb::query::{ExecutableQuery, QueryBase};
|
||||||
@@ -29,7 +30,7 @@ async fn main() -> Result<()> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn create_some_records() -> Result<Box<dyn RecordBatchReader + Send>> {
|
fn create_some_records() -> Result<Box<dyn arrow_array::RecordBatchReader + Send>> {
|
||||||
const TOTAL: usize = 1000;
|
const TOTAL: usize = 1000;
|
||||||
|
|
||||||
let schema = Arc::new(Schema::new(vec![
|
let schema = Arc::new(Schema::new(vec![
|
||||||
@@ -66,7 +67,7 @@ fn create_some_records() -> Result<Box<dyn RecordBatchReader + Send>> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn create_table(db: &Connection) -> Result<Table> {
|
async fn create_table(db: &Connection) -> Result<Table> {
|
||||||
let initial_data: Box<dyn RecordBatchReader + Send> = create_some_records()?;
|
let initial_data = create_some_records()?;
|
||||||
let tbl = db.create_table("my_table", initial_data).execute().await?;
|
let tbl = db.create_table("my_table", initial_data).execute().await?;
|
||||||
Ok(tbl)
|
Ok(tbl)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,14 +1,13 @@
|
|||||||
// SPDX-License-Identifier: Apache-2.0
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||||
|
|
||||||
use arrow_array::{RecordBatch, RecordBatchIterator, StringArray};
|
use arrow_array::{RecordBatch, StringArray};
|
||||||
use arrow_schema::{DataType, Field, Schema};
|
use arrow_schema::{DataType, Field, Schema};
|
||||||
use futures::TryStreamExt;
|
use futures::TryStreamExt;
|
||||||
use lance_index::scalar::FullTextSearchQuery;
|
use lance_index::scalar::FullTextSearchQuery;
|
||||||
use lancedb::index::scalar::FtsIndexBuilder;
|
use lancedb::index::scalar::FtsIndexBuilder;
|
||||||
use lancedb::index::Index;
|
use lancedb::index::Index;
|
||||||
use lancedb::{
|
use lancedb::{
|
||||||
arrow::IntoArrow,
|
|
||||||
connect,
|
connect,
|
||||||
embeddings::{
|
embeddings::{
|
||||||
sentence_transformers::SentenceTransformersEmbeddings, EmbeddingDefinition,
|
sentence_transformers::SentenceTransformersEmbeddings, EmbeddingDefinition,
|
||||||
@@ -70,7 +69,7 @@ async fn main() -> Result<()> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn make_data() -> impl IntoArrow {
|
fn make_data() -> RecordBatch {
|
||||||
let schema = Schema::new(vec![Field::new("facts", DataType::Utf8, false)]);
|
let schema = Schema::new(vec![Field::new("facts", DataType::Utf8, false)]);
|
||||||
|
|
||||||
let facts = StringArray::from_iter_values(vec![
|
let facts = StringArray::from_iter_values(vec![
|
||||||
@@ -101,8 +100,7 @@ fn make_data() -> impl IntoArrow {
|
|||||||
"The first chatbot was ELIZA, created in the 1960s.",
|
"The first chatbot was ELIZA, created in the 1960s.",
|
||||||
]);
|
]);
|
||||||
let schema = Arc::new(schema);
|
let schema = Arc::new(schema);
|
||||||
let rb = RecordBatch::try_new(schema.clone(), vec![Arc::new(facts)]).unwrap();
|
RecordBatch::try_new(schema.clone(), vec![Arc::new(facts)]).unwrap()
|
||||||
Box::new(RecordBatchIterator::new(vec![Ok(rb)], schema))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn create_index(table: &Table) -> Result<()> {
|
async fn create_index(table: &Table) -> Result<()> {
|
||||||
|
|||||||
@@ -8,13 +8,12 @@
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use arrow_array::types::Float32Type;
|
use arrow_array::types::Float32Type;
|
||||||
use arrow_array::{
|
use arrow_array::{FixedSizeListArray, Int32Array, RecordBatch, RecordBatchIterator};
|
||||||
FixedSizeListArray, Int32Array, RecordBatch, RecordBatchIterator, RecordBatchReader,
|
|
||||||
};
|
|
||||||
use arrow_schema::{DataType, Field, Schema};
|
use arrow_schema::{DataType, Field, Schema};
|
||||||
|
|
||||||
use futures::TryStreamExt;
|
use futures::TryStreamExt;
|
||||||
use lancedb::connection::Connection;
|
use lancedb::connection::Connection;
|
||||||
|
|
||||||
use lancedb::index::vector::IvfPqIndexBuilder;
|
use lancedb::index::vector::IvfPqIndexBuilder;
|
||||||
use lancedb::index::Index;
|
use lancedb::index::Index;
|
||||||
use lancedb::query::{ExecutableQuery, QueryBase};
|
use lancedb::query::{ExecutableQuery, QueryBase};
|
||||||
@@ -34,7 +33,7 @@ async fn main() -> Result<()> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn create_some_records() -> Result<Box<dyn RecordBatchReader + Send>> {
|
fn create_some_records() -> Result<Box<dyn arrow_array::RecordBatchReader + Send>> {
|
||||||
const TOTAL: usize = 1000;
|
const TOTAL: usize = 1000;
|
||||||
const DIM: usize = 128;
|
const DIM: usize = 128;
|
||||||
|
|
||||||
@@ -73,9 +72,9 @@ fn create_some_records() -> Result<Box<dyn RecordBatchReader + Send>> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn create_table(db: &Connection) -> Result<Table> {
|
async fn create_table(db: &Connection) -> Result<Table> {
|
||||||
let initial_data: Box<dyn RecordBatchReader + Send> = create_some_records()?;
|
let initial_data = create_some_records()?;
|
||||||
let tbl = db
|
let tbl = db
|
||||||
.create_table("my_table", Box::new(initial_data))
|
.create_table("my_table", initial_data)
|
||||||
.execute()
|
.execute()
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|||||||
@@ -5,11 +5,9 @@
|
|||||||
|
|
||||||
use std::{iter::once, sync::Arc};
|
use std::{iter::once, sync::Arc};
|
||||||
|
|
||||||
use arrow_array::{Float64Array, Int32Array, RecordBatch, RecordBatchIterator, StringArray};
|
use arrow_array::{RecordBatch, StringArray};
|
||||||
use arrow_schema::{DataType, Field, Schema};
|
|
||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
use lancedb::{
|
use lancedb::{
|
||||||
arrow::IntoArrow,
|
|
||||||
connect,
|
connect,
|
||||||
embeddings::{openai::OpenAIEmbeddingFunction, EmbeddingDefinition, EmbeddingFunction},
|
embeddings::{openai::OpenAIEmbeddingFunction, EmbeddingDefinition, EmbeddingFunction},
|
||||||
query::{ExecutableQuery, QueryBase},
|
query::{ExecutableQuery, QueryBase},
|
||||||
@@ -64,26 +62,20 @@ async fn main() -> Result<()> {
|
|||||||
}
|
}
|
||||||
// --8<-- [end:openai_embeddings]
|
// --8<-- [end:openai_embeddings]
|
||||||
|
|
||||||
fn make_data() -> impl IntoArrow {
|
fn make_data() -> RecordBatch {
|
||||||
let schema = Schema::new(vec![
|
arrow_array::record_batch!(
|
||||||
Field::new("id", DataType::Int32, true),
|
("id", Int32, [1, 2, 3, 4]),
|
||||||
Field::new("text", DataType::Utf8, false),
|
(
|
||||||
Field::new("price", DataType::Float64, false),
|
"text",
|
||||||
]);
|
Utf8,
|
||||||
|
[
|
||||||
let id = Int32Array::from(vec![1, 2, 3, 4]);
|
"Black T-Shirt",
|
||||||
let text = StringArray::from_iter_values(vec![
|
"Leather Jacket",
|
||||||
"Black T-Shirt",
|
"Winter Parka",
|
||||||
"Leather Jacket",
|
"Hooded Sweatshirt"
|
||||||
"Winter Parka",
|
]
|
||||||
"Hooded Sweatshirt",
|
),
|
||||||
]);
|
("price", Float64, [10.0, 50.0, 100.0, 30.0])
|
||||||
let price = Float64Array::from(vec![10.0, 50.0, 100.0, 30.0]);
|
|
||||||
let schema = Arc::new(schema);
|
|
||||||
let rb = RecordBatch::try_new(
|
|
||||||
schema.clone(),
|
|
||||||
vec![Arc::new(id), Arc::new(text), Arc::new(price)],
|
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap()
|
||||||
Box::new(RecordBatchIterator::new(vec![Ok(rb)], schema))
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,11 +3,10 @@
|
|||||||
|
|
||||||
use std::{iter::once, sync::Arc};
|
use std::{iter::once, sync::Arc};
|
||||||
|
|
||||||
use arrow_array::{RecordBatch, RecordBatchIterator, StringArray};
|
use arrow_array::{RecordBatch, StringArray};
|
||||||
use arrow_schema::{DataType, Field, Schema};
|
use arrow_schema::{DataType, Field, Schema};
|
||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
use lancedb::{
|
use lancedb::{
|
||||||
arrow::IntoArrow,
|
|
||||||
connect,
|
connect,
|
||||||
embeddings::{
|
embeddings::{
|
||||||
sentence_transformers::SentenceTransformersEmbeddings, EmbeddingDefinition,
|
sentence_transformers::SentenceTransformersEmbeddings, EmbeddingDefinition,
|
||||||
@@ -59,7 +58,7 @@ async fn main() -> Result<()> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn make_data() -> impl IntoArrow {
|
fn make_data() -> RecordBatch {
|
||||||
let schema = Schema::new(vec![Field::new("facts", DataType::Utf8, false)]);
|
let schema = Schema::new(vec![Field::new("facts", DataType::Utf8, false)]);
|
||||||
|
|
||||||
let facts = StringArray::from_iter_values(vec![
|
let facts = StringArray::from_iter_values(vec![
|
||||||
@@ -90,6 +89,5 @@ fn make_data() -> impl IntoArrow {
|
|||||||
"The first chatbot was ELIZA, created in the 1960s.",
|
"The first chatbot was ELIZA, created in the 1960s.",
|
||||||
]);
|
]);
|
||||||
let schema = Arc::new(schema);
|
let schema = Arc::new(schema);
|
||||||
let rb = RecordBatch::try_new(schema.clone(), vec![Arc::new(facts)]).unwrap();
|
RecordBatch::try_new(schema.clone(), vec![Arc::new(facts)]).unwrap()
|
||||||
Box::new(RecordBatchIterator::new(vec![Ok(rb)], schema))
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,11 +8,9 @@
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use arrow_array::types::Float32Type;
|
use arrow_array::types::Float32Type;
|
||||||
use arrow_array::{FixedSizeListArray, Int32Array, RecordBatch, RecordBatchIterator};
|
use arrow_array::{FixedSizeListArray, Int32Array, RecordBatch};
|
||||||
use arrow_schema::{DataType, Field, Schema};
|
use arrow_schema::{DataType, Field, Schema};
|
||||||
use futures::TryStreamExt;
|
use futures::TryStreamExt;
|
||||||
|
|
||||||
use lancedb::arrow::IntoArrow;
|
|
||||||
use lancedb::connection::Connection;
|
use lancedb::connection::Connection;
|
||||||
use lancedb::index::Index;
|
use lancedb::index::Index;
|
||||||
use lancedb::query::{ExecutableQuery, QueryBase};
|
use lancedb::query::{ExecutableQuery, QueryBase};
|
||||||
@@ -59,7 +57,7 @@ async fn open_with_existing_tbl() -> Result<()> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn create_some_records() -> Result<impl IntoArrow> {
|
fn create_some_records() -> Result<RecordBatch> {
|
||||||
const TOTAL: usize = 1000;
|
const TOTAL: usize = 1000;
|
||||||
const DIM: usize = 128;
|
const DIM: usize = 128;
|
||||||
|
|
||||||
@@ -76,25 +74,18 @@ fn create_some_records() -> Result<impl IntoArrow> {
|
|||||||
]));
|
]));
|
||||||
|
|
||||||
// Create a RecordBatch stream.
|
// Create a RecordBatch stream.
|
||||||
let batches = RecordBatchIterator::new(
|
Ok(RecordBatch::try_new(
|
||||||
vec![RecordBatch::try_new(
|
|
||||||
schema.clone(),
|
|
||||||
vec![
|
|
||||||
Arc::new(Int32Array::from_iter_values(0..TOTAL as i32)),
|
|
||||||
Arc::new(
|
|
||||||
FixedSizeListArray::from_iter_primitive::<Float32Type, _, _>(
|
|
||||||
(0..TOTAL).map(|_| Some(vec![Some(1.0); DIM])),
|
|
||||||
DIM as i32,
|
|
||||||
),
|
|
||||||
),
|
|
||||||
],
|
|
||||||
)
|
|
||||||
.unwrap()]
|
|
||||||
.into_iter()
|
|
||||||
.map(Ok),
|
|
||||||
schema.clone(),
|
schema.clone(),
|
||||||
);
|
vec![
|
||||||
Ok(Box::new(batches))
|
Arc::new(Int32Array::from_iter_values(0..TOTAL as i32)),
|
||||||
|
Arc::new(
|
||||||
|
FixedSizeListArray::from_iter_primitive::<Float32Type, _, _>(
|
||||||
|
(0..TOTAL).map(|_| Some(vec![Some(1.0); DIM])),
|
||||||
|
DIM as i32,
|
||||||
|
),
|
||||||
|
),
|
||||||
|
],
|
||||||
|
)?)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn create_table(db: &Connection) -> Result<LanceDbTable> {
|
async fn create_table(db: &Connection) -> Result<LanceDbTable> {
|
||||||
|
|||||||
@@ -155,9 +155,7 @@ impl IntoArrowStream for SendableRecordBatchStream {
|
|||||||
impl IntoArrowStream for datafusion_physical_plan::SendableRecordBatchStream {
|
impl IntoArrowStream for datafusion_physical_plan::SendableRecordBatchStream {
|
||||||
fn into_arrow(self) -> Result<SendableRecordBatchStream> {
|
fn into_arrow(self) -> Result<SendableRecordBatchStream> {
|
||||||
let schema = self.schema();
|
let schema = self.schema();
|
||||||
let stream = self.map_err(|df_err| Error::Runtime {
|
let stream = self.map_err(|df_err| df_err.into());
|
||||||
message: df_err.to_string(),
|
|
||||||
});
|
|
||||||
Ok(Box::pin(SimpleRecordBatchStream::new(stream, schema)))
|
Ok(Box::pin(SimpleRecordBatchStream::new(stream, schema)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,8 +6,8 @@
|
|||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use arrow_array::RecordBatchReader;
|
use arrow_array::RecordBatch;
|
||||||
use arrow_schema::{Field, SchemaRef};
|
use arrow_schema::SchemaRef;
|
||||||
use lance::dataset::ReadParams;
|
use lance::dataset::ReadParams;
|
||||||
use lance_namespace::models::{
|
use lance_namespace::models::{
|
||||||
CreateNamespaceRequest, CreateNamespaceResponse, DescribeNamespaceRequest,
|
CreateNamespaceRequest, CreateNamespaceResponse, DescribeNamespaceRequest,
|
||||||
@@ -17,24 +17,20 @@ use lance_namespace::models::{
|
|||||||
#[cfg(feature = "aws")]
|
#[cfg(feature = "aws")]
|
||||||
use object_store::aws::AwsCredential;
|
use object_store::aws::AwsCredential;
|
||||||
|
|
||||||
use crate::arrow::{IntoArrow, IntoArrowStream, SendableRecordBatchStream};
|
use crate::connection::create_table::CreateTableBuilder;
|
||||||
use crate::database::listing::{
|
use crate::data::scannable::Scannable;
|
||||||
ListingDatabase, OPT_NEW_TABLE_STORAGE_VERSION, OPT_NEW_TABLE_V2_MANIFEST_PATHS,
|
use crate::database::listing::ListingDatabase;
|
||||||
};
|
|
||||||
use crate::database::{
|
use crate::database::{
|
||||||
CloneTableRequest, CreateTableData, CreateTableMode, CreateTableRequest, Database,
|
CloneTableRequest, Database, DatabaseOptions, OpenTableRequest, ReadConsistency,
|
||||||
DatabaseOptions, OpenTableRequest, ReadConsistency, TableNamesRequest,
|
TableNamesRequest,
|
||||||
};
|
|
||||||
use crate::embeddings::{
|
|
||||||
EmbeddingDefinition, EmbeddingFunction, EmbeddingRegistry, MemoryRegistry, WithEmbeddings,
|
|
||||||
};
|
};
|
||||||
|
use crate::embeddings::{EmbeddingRegistry, MemoryRegistry};
|
||||||
use crate::error::{Error, Result};
|
use crate::error::{Error, Result};
|
||||||
#[cfg(feature = "remote")]
|
#[cfg(feature = "remote")]
|
||||||
use crate::remote::{
|
use crate::remote::{
|
||||||
client::ClientConfig,
|
client::ClientConfig,
|
||||||
db::{OPT_REMOTE_API_KEY, OPT_REMOTE_HOST_OVERRIDE, OPT_REMOTE_REGION},
|
db::{OPT_REMOTE_API_KEY, OPT_REMOTE_HOST_OVERRIDE, OPT_REMOTE_REGION},
|
||||||
};
|
};
|
||||||
use crate::table::{TableDefinition, WriteOptions};
|
|
||||||
use crate::Table;
|
use crate::Table;
|
||||||
use lance::io::ObjectStoreParams;
|
use lance::io::ObjectStoreParams;
|
||||||
pub use lance_encoding::version::LanceFileVersion;
|
pub use lance_encoding::version::LanceFileVersion;
|
||||||
@@ -42,6 +38,8 @@ pub use lance_encoding::version::LanceFileVersion;
|
|||||||
use lance_io::object_store::StorageOptions;
|
use lance_io::object_store::StorageOptions;
|
||||||
use lance_io::object_store::{StorageOptionsAccessor, StorageOptionsProvider};
|
use lance_io::object_store::{StorageOptionsAccessor, StorageOptionsProvider};
|
||||||
|
|
||||||
|
mod create_table;
|
||||||
|
|
||||||
fn merge_storage_options(
|
fn merge_storage_options(
|
||||||
store_params: &mut ObjectStoreParams,
|
store_params: &mut ObjectStoreParams,
|
||||||
pairs: impl IntoIterator<Item = (String, String)>,
|
pairs: impl IntoIterator<Item = (String, String)>,
|
||||||
@@ -116,337 +114,6 @@ impl TableNamesBuilder {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct NoData {}
|
|
||||||
|
|
||||||
impl IntoArrow for NoData {
|
|
||||||
fn into_arrow(self) -> Result<Box<dyn arrow_array::RecordBatchReader + Send>> {
|
|
||||||
unreachable!("NoData should never be converted to Arrow")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stores the value given from the initial CreateTableBuilder::new call
|
|
||||||
// and defers errors until `execute` is called
|
|
||||||
enum CreateTableBuilderInitialData {
|
|
||||||
None,
|
|
||||||
Iterator(Result<Box<dyn RecordBatchReader + Send>>),
|
|
||||||
Stream(Result<SendableRecordBatchStream>),
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A builder for configuring a [`Connection::create_table`] operation
|
|
||||||
pub struct CreateTableBuilder<const HAS_DATA: bool> {
|
|
||||||
parent: Arc<dyn Database>,
|
|
||||||
embeddings: Vec<(EmbeddingDefinition, Arc<dyn EmbeddingFunction>)>,
|
|
||||||
embedding_registry: Arc<dyn EmbeddingRegistry>,
|
|
||||||
request: CreateTableRequest,
|
|
||||||
// This is a bit clumsy but we defer errors until `execute` is called
|
|
||||||
// to maintain backwards compatibility
|
|
||||||
data: CreateTableBuilderInitialData,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Builder methods that only apply when we have initial data
|
|
||||||
impl CreateTableBuilder<true> {
|
|
||||||
fn new<T: IntoArrow>(
|
|
||||||
parent: Arc<dyn Database>,
|
|
||||||
name: String,
|
|
||||||
data: T,
|
|
||||||
embedding_registry: Arc<dyn EmbeddingRegistry>,
|
|
||||||
) -> Self {
|
|
||||||
let dummy_schema = Arc::new(arrow_schema::Schema::new(Vec::<Field>::default()));
|
|
||||||
Self {
|
|
||||||
parent,
|
|
||||||
request: CreateTableRequest::new(
|
|
||||||
name,
|
|
||||||
CreateTableData::Empty(TableDefinition::new_from_schema(dummy_schema)),
|
|
||||||
),
|
|
||||||
embeddings: Vec::new(),
|
|
||||||
embedding_registry,
|
|
||||||
data: CreateTableBuilderInitialData::Iterator(data.into_arrow()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn new_streaming<T: IntoArrowStream>(
|
|
||||||
parent: Arc<dyn Database>,
|
|
||||||
name: String,
|
|
||||||
data: T,
|
|
||||||
embedding_registry: Arc<dyn EmbeddingRegistry>,
|
|
||||||
) -> Self {
|
|
||||||
let dummy_schema = Arc::new(arrow_schema::Schema::new(Vec::<Field>::default()));
|
|
||||||
Self {
|
|
||||||
parent,
|
|
||||||
request: CreateTableRequest::new(
|
|
||||||
name,
|
|
||||||
CreateTableData::Empty(TableDefinition::new_from_schema(dummy_schema)),
|
|
||||||
),
|
|
||||||
embeddings: Vec::new(),
|
|
||||||
embedding_registry,
|
|
||||||
data: CreateTableBuilderInitialData::Stream(data.into_arrow()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Execute the create table operation
|
|
||||||
pub async fn execute(self) -> Result<Table> {
|
|
||||||
let embedding_registry = self.embedding_registry.clone();
|
|
||||||
let parent = self.parent.clone();
|
|
||||||
let request = self.into_request()?;
|
|
||||||
Ok(Table::new_with_embedding_registry(
|
|
||||||
parent.create_table(request).await?,
|
|
||||||
parent,
|
|
||||||
embedding_registry,
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn into_request(self) -> Result<CreateTableRequest> {
|
|
||||||
if self.embeddings.is_empty() {
|
|
||||||
match self.data {
|
|
||||||
CreateTableBuilderInitialData::Iterator(maybe_iter) => {
|
|
||||||
let data = maybe_iter?;
|
|
||||||
Ok(CreateTableRequest {
|
|
||||||
data: CreateTableData::Data(data),
|
|
||||||
..self.request
|
|
||||||
})
|
|
||||||
}
|
|
||||||
CreateTableBuilderInitialData::None => {
|
|
||||||
unreachable!("No data provided for CreateTableBuilder<true>")
|
|
||||||
}
|
|
||||||
CreateTableBuilderInitialData::Stream(maybe_stream) => {
|
|
||||||
let data = maybe_stream?;
|
|
||||||
Ok(CreateTableRequest {
|
|
||||||
data: CreateTableData::StreamingData(data),
|
|
||||||
..self.request
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
let CreateTableBuilderInitialData::Iterator(maybe_iter) = self.data else {
|
|
||||||
return Err(Error::NotSupported { message: "Creating a table with embeddings is currently not support when the input is streaming".to_string() });
|
|
||||||
};
|
|
||||||
let data = maybe_iter?;
|
|
||||||
let data = Box::new(WithEmbeddings::new(data, self.embeddings));
|
|
||||||
Ok(CreateTableRequest {
|
|
||||||
data: CreateTableData::Data(data),
|
|
||||||
..self.request
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Builder methods that only apply when we do not have initial data
|
|
||||||
impl CreateTableBuilder<false> {
|
|
||||||
fn new(
|
|
||||||
parent: Arc<dyn Database>,
|
|
||||||
name: String,
|
|
||||||
schema: SchemaRef,
|
|
||||||
embedding_registry: Arc<dyn EmbeddingRegistry>,
|
|
||||||
) -> Self {
|
|
||||||
let table_definition = TableDefinition::new_from_schema(schema);
|
|
||||||
Self {
|
|
||||||
parent,
|
|
||||||
request: CreateTableRequest::new(name, CreateTableData::Empty(table_definition)),
|
|
||||||
data: CreateTableBuilderInitialData::None,
|
|
||||||
embeddings: Vec::default(),
|
|
||||||
embedding_registry,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Execute the create table operation
|
|
||||||
pub async fn execute(self) -> Result<Table> {
|
|
||||||
let parent = self.parent.clone();
|
|
||||||
let embedding_registry = self.embedding_registry.clone();
|
|
||||||
let request = self.into_request()?;
|
|
||||||
Ok(Table::new_with_embedding_registry(
|
|
||||||
parent.create_table(request).await?,
|
|
||||||
parent,
|
|
||||||
embedding_registry,
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn into_request(self) -> Result<CreateTableRequest> {
|
|
||||||
if self.embeddings.is_empty() {
|
|
||||||
return Ok(self.request);
|
|
||||||
}
|
|
||||||
|
|
||||||
let CreateTableData::Empty(table_def) = self.request.data else {
|
|
||||||
unreachable!("CreateTableBuilder<false> should always have Empty data")
|
|
||||||
};
|
|
||||||
|
|
||||||
let schema = table_def.schema.clone();
|
|
||||||
let empty_batch = arrow_array::RecordBatch::new_empty(schema.clone());
|
|
||||||
|
|
||||||
let reader = Box::new(std::iter::once(Ok(empty_batch)).collect::<Vec<_>>());
|
|
||||||
let reader = arrow_array::RecordBatchIterator::new(reader.into_iter(), schema);
|
|
||||||
let with_embeddings = WithEmbeddings::new(reader, self.embeddings);
|
|
||||||
let table_definition = with_embeddings.table_definition()?;
|
|
||||||
|
|
||||||
Ok(CreateTableRequest {
|
|
||||||
data: CreateTableData::Empty(table_definition),
|
|
||||||
..self.request
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<const HAS_DATA: bool> CreateTableBuilder<HAS_DATA> {
|
|
||||||
/// Set the mode for creating the table
|
|
||||||
///
|
|
||||||
/// This controls what happens if a table with the given name already exists
|
|
||||||
pub fn mode(mut self, mode: CreateTableMode) -> Self {
|
|
||||||
self.request.mode = mode;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Apply the given write options when writing the initial data
|
|
||||||
pub fn write_options(mut self, write_options: WriteOptions) -> Self {
|
|
||||||
self.request.write_options = write_options;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set an option for the storage layer.
|
|
||||||
///
|
|
||||||
/// Options already set on the connection will be inherited by the table,
|
|
||||||
/// but can be overridden here.
|
|
||||||
///
|
|
||||||
/// See available options at <https://lancedb.com/docs/storage/>
|
|
||||||
pub fn storage_option(mut self, key: impl Into<String>, value: impl Into<String>) -> Self {
|
|
||||||
let store_params = self
|
|
||||||
.request
|
|
||||||
.write_options
|
|
||||||
.lance_write_params
|
|
||||||
.get_or_insert(Default::default())
|
|
||||||
.store_params
|
|
||||||
.get_or_insert(Default::default());
|
|
||||||
merge_storage_options(store_params, [(key.into(), value.into())]);
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set multiple options for the storage layer.
|
|
||||||
///
|
|
||||||
/// Options already set on the connection will be inherited by the table,
|
|
||||||
/// but can be overridden here.
|
|
||||||
///
|
|
||||||
/// See available options at <https://lancedb.com/docs/storage/>
|
|
||||||
pub fn storage_options(
|
|
||||||
mut self,
|
|
||||||
pairs: impl IntoIterator<Item = (impl Into<String>, impl Into<String>)>,
|
|
||||||
) -> Self {
|
|
||||||
let store_params = self
|
|
||||||
.request
|
|
||||||
.write_options
|
|
||||||
.lance_write_params
|
|
||||||
.get_or_insert(Default::default())
|
|
||||||
.store_params
|
|
||||||
.get_or_insert(Default::default());
|
|
||||||
let updates = pairs
|
|
||||||
.into_iter()
|
|
||||||
.map(|(key, value)| (key.into(), value.into()));
|
|
||||||
merge_storage_options(store_params, updates);
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Add an embedding definition to the table.
|
|
||||||
///
|
|
||||||
/// The `embedding_name` must match the name of an embedding function that
|
|
||||||
/// was previously registered with the connection's [`EmbeddingRegistry`].
|
|
||||||
pub fn add_embedding(mut self, definition: EmbeddingDefinition) -> Result<Self> {
|
|
||||||
// Early verification of the embedding name
|
|
||||||
let embedding_func = self
|
|
||||||
.embedding_registry
|
|
||||||
.get(&definition.embedding_name)
|
|
||||||
.ok_or_else(|| Error::EmbeddingFunctionNotFound {
|
|
||||||
name: definition.embedding_name.clone(),
|
|
||||||
reason: "No embedding function found in the connection's embedding_registry"
|
|
||||||
.to_string(),
|
|
||||||
})?;
|
|
||||||
|
|
||||||
self.embeddings.push((definition, embedding_func));
|
|
||||||
Ok(self)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set whether to use V2 manifest paths for the table. (default: false)
|
|
||||||
///
|
|
||||||
/// These paths provide more efficient opening of tables with many
|
|
||||||
/// versions on object stores.
|
|
||||||
///
|
|
||||||
/// <div class="warning">Turning this on will make the dataset unreadable
|
|
||||||
/// for older versions of LanceDB (prior to 0.10.0).</div>
|
|
||||||
///
|
|
||||||
/// To migrate an existing dataset, instead use the
|
|
||||||
/// [[NativeTable::migrate_manifest_paths_v2]].
|
|
||||||
///
|
|
||||||
/// This has no effect in LanceDB Cloud.
|
|
||||||
#[deprecated(since = "0.15.1", note = "Use `database_options` instead")]
|
|
||||||
pub fn enable_v2_manifest_paths(mut self, use_v2_manifest_paths: bool) -> Self {
|
|
||||||
let store_params = self
|
|
||||||
.request
|
|
||||||
.write_options
|
|
||||||
.lance_write_params
|
|
||||||
.get_or_insert_with(Default::default)
|
|
||||||
.store_params
|
|
||||||
.get_or_insert_with(Default::default);
|
|
||||||
let value = if use_v2_manifest_paths {
|
|
||||||
"true".to_string()
|
|
||||||
} else {
|
|
||||||
"false".to_string()
|
|
||||||
};
|
|
||||||
merge_storage_options(
|
|
||||||
store_params,
|
|
||||||
[(OPT_NEW_TABLE_V2_MANIFEST_PATHS.to_string(), value)],
|
|
||||||
);
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the data storage version.
|
|
||||||
///
|
|
||||||
/// The default is `LanceFileVersion::Stable`.
|
|
||||||
#[deprecated(since = "0.15.1", note = "Use `database_options` instead")]
|
|
||||||
pub fn data_storage_version(mut self, data_storage_version: LanceFileVersion) -> Self {
|
|
||||||
let store_params = self
|
|
||||||
.request
|
|
||||||
.write_options
|
|
||||||
.lance_write_params
|
|
||||||
.get_or_insert_with(Default::default)
|
|
||||||
.store_params
|
|
||||||
.get_or_insert_with(Default::default);
|
|
||||||
merge_storage_options(
|
|
||||||
store_params,
|
|
||||||
[(
|
|
||||||
OPT_NEW_TABLE_STORAGE_VERSION.to_string(),
|
|
||||||
data_storage_version.to_string(),
|
|
||||||
)],
|
|
||||||
);
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the namespace for the table
|
|
||||||
pub fn namespace(mut self, namespace: Vec<String>) -> Self {
|
|
||||||
self.request.namespace = namespace;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set a custom location for the table.
|
|
||||||
///
|
|
||||||
/// If not set, the database will derive a location from its URI and the table name.
|
|
||||||
/// This is useful when integrating with namespace systems that manage table locations.
|
|
||||||
pub fn location(mut self, location: impl Into<String>) -> Self {
|
|
||||||
self.request.location = Some(location.into());
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set a storage options provider for automatic credential refresh.
|
|
||||||
///
|
|
||||||
/// This allows tables to automatically refresh cloud storage credentials
|
|
||||||
/// when they expire, enabling long-running operations on remote storage.
|
|
||||||
pub fn storage_options_provider(mut self, provider: Arc<dyn StorageOptionsProvider>) -> Self {
|
|
||||||
let store_params = self
|
|
||||||
.request
|
|
||||||
.write_options
|
|
||||||
.lance_write_params
|
|
||||||
.get_or_insert(Default::default())
|
|
||||||
.store_params
|
|
||||||
.get_or_insert(Default::default());
|
|
||||||
set_storage_options_provider(store_params, provider);
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct OpenTableBuilder {
|
pub struct OpenTableBuilder {
|
||||||
parent: Arc<dyn Database>,
|
parent: Arc<dyn Database>,
|
||||||
@@ -684,35 +351,17 @@ impl Connection {
|
|||||||
///
|
///
|
||||||
/// * `name` - The name of the table
|
/// * `name` - The name of the table
|
||||||
/// * `initial_data` - The initial data to write to the table
|
/// * `initial_data` - The initial data to write to the table
|
||||||
pub fn create_table<T: IntoArrow>(
|
pub fn create_table<T: Scannable + 'static>(
|
||||||
&self,
|
&self,
|
||||||
name: impl Into<String>,
|
name: impl Into<String>,
|
||||||
initial_data: T,
|
initial_data: T,
|
||||||
) -> CreateTableBuilder<true> {
|
) -> CreateTableBuilder {
|
||||||
CreateTableBuilder::<true>::new(
|
let initial_data = Box::new(initial_data);
|
||||||
|
CreateTableBuilder::new(
|
||||||
self.internal.clone(),
|
self.internal.clone(),
|
||||||
|
self.embedding_registry.clone(),
|
||||||
name.into(),
|
name.into(),
|
||||||
initial_data,
|
initial_data,
|
||||||
self.embedding_registry.clone(),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create a new table from a stream of data
|
|
||||||
///
|
|
||||||
/// # Parameters
|
|
||||||
///
|
|
||||||
/// * `name` - The name of the table
|
|
||||||
/// * `initial_data` - The initial data to write to the table
|
|
||||||
pub fn create_table_streaming<T: IntoArrowStream>(
|
|
||||||
&self,
|
|
||||||
name: impl Into<String>,
|
|
||||||
initial_data: T,
|
|
||||||
) -> CreateTableBuilder<true> {
|
|
||||||
CreateTableBuilder::<true>::new_streaming(
|
|
||||||
self.internal.clone(),
|
|
||||||
name.into(),
|
|
||||||
initial_data,
|
|
||||||
self.embedding_registry.clone(),
|
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -726,13 +375,9 @@ impl Connection {
|
|||||||
&self,
|
&self,
|
||||||
name: impl Into<String>,
|
name: impl Into<String>,
|
||||||
schema: SchemaRef,
|
schema: SchemaRef,
|
||||||
) -> CreateTableBuilder<false> {
|
) -> CreateTableBuilder {
|
||||||
CreateTableBuilder::<false>::new(
|
let empty_batch = RecordBatch::new_empty(schema);
|
||||||
self.internal.clone(),
|
self.create_table(name, empty_batch)
|
||||||
name.into(),
|
|
||||||
schema,
|
|
||||||
self.embedding_registry.clone(),
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Open an existing table in the database
|
/// Open an existing table in the database
|
||||||
@@ -1349,20 +994,11 @@ mod test_utils {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use crate::database::listing::{ListingDatabaseOptions, NewTableConfig};
|
|
||||||
use crate::query::QueryBase;
|
|
||||||
use crate::query::{ExecutableQuery, QueryExecutionOptions};
|
|
||||||
use crate::test_utils::connection::new_test_connection;
|
|
||||||
use arrow::compute::concat_batches;
|
|
||||||
use arrow_array::RecordBatchReader;
|
|
||||||
use arrow_schema::{DataType, Field, Schema};
|
use arrow_schema::{DataType, Field, Schema};
|
||||||
use datafusion_physical_plan::stream::RecordBatchStreamAdapter;
|
|
||||||
use futures::{stream, TryStreamExt};
|
|
||||||
use lance_core::error::{ArrowResult, DataFusionResult};
|
|
||||||
use lance_testing::datagen::{BatchGenerator, IncrementingInt32};
|
use lance_testing::datagen::{BatchGenerator, IncrementingInt32};
|
||||||
use tempfile::tempdir;
|
use tempfile::tempdir;
|
||||||
|
|
||||||
use crate::arrow::SimpleRecordBatchStream;
|
use crate::test_utils::connection::new_test_connection;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
@@ -1478,139 +1114,6 @@ mod tests {
|
|||||||
assert_eq!(tables, vec!["table1".to_owned()]);
|
assert_eq!(tables, vec!["table1".to_owned()]);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn make_data() -> Box<dyn RecordBatchReader + Send + 'static> {
|
|
||||||
let id = Box::new(IncrementingInt32::new().named("id".to_string()));
|
|
||||||
Box::new(BatchGenerator::new().col(id).batches(10, 2000))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_create_table_v2() {
|
|
||||||
let tmp_dir = tempdir().unwrap();
|
|
||||||
let uri = tmp_dir.path().to_str().unwrap();
|
|
||||||
let db = connect(uri)
|
|
||||||
.database_options(&ListingDatabaseOptions {
|
|
||||||
new_table_config: NewTableConfig {
|
|
||||||
data_storage_version: Some(LanceFileVersion::Legacy),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
..Default::default()
|
|
||||||
})
|
|
||||||
.execute()
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let tbl = db
|
|
||||||
.create_table("v1_test", make_data())
|
|
||||||
.execute()
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
// In v1 the row group size will trump max_batch_length
|
|
||||||
let batches = tbl
|
|
||||||
.query()
|
|
||||||
.limit(20000)
|
|
||||||
.execute_with_options(QueryExecutionOptions {
|
|
||||||
max_batch_length: 50000,
|
|
||||||
..Default::default()
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
.unwrap()
|
|
||||||
.try_collect::<Vec<_>>()
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
assert_eq!(batches.len(), 20);
|
|
||||||
|
|
||||||
let db = connect(uri)
|
|
||||||
.database_options(&ListingDatabaseOptions {
|
|
||||||
new_table_config: NewTableConfig {
|
|
||||||
data_storage_version: Some(LanceFileVersion::Stable),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
..Default::default()
|
|
||||||
})
|
|
||||||
.execute()
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let tbl = db
|
|
||||||
.create_table("v2_test", make_data())
|
|
||||||
.execute()
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
// In v2 the page size is much bigger than 50k so we should get a single batch
|
|
||||||
let batches = tbl
|
|
||||||
.query()
|
|
||||||
.execute_with_options(QueryExecutionOptions {
|
|
||||||
max_batch_length: 50000,
|
|
||||||
..Default::default()
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
.unwrap()
|
|
||||||
.try_collect::<Vec<_>>()
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
assert_eq!(batches.len(), 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_create_table_streaming() {
|
|
||||||
let tmp_dir = tempdir().unwrap();
|
|
||||||
|
|
||||||
let uri = tmp_dir.path().to_str().unwrap();
|
|
||||||
let db = connect(uri).execute().await.unwrap();
|
|
||||||
|
|
||||||
let batches = make_data().collect::<ArrowResult<Vec<_>>>().unwrap();
|
|
||||||
|
|
||||||
let schema = batches.first().unwrap().schema();
|
|
||||||
let one_batch = concat_batches(&schema, batches.iter()).unwrap();
|
|
||||||
|
|
||||||
let ldb_stream = stream::iter(batches.clone().into_iter().map(Result::Ok));
|
|
||||||
let ldb_stream: SendableRecordBatchStream =
|
|
||||||
Box::pin(SimpleRecordBatchStream::new(ldb_stream, schema.clone()));
|
|
||||||
|
|
||||||
let tbl1 = db
|
|
||||||
.create_table_streaming("one", ldb_stream)
|
|
||||||
.execute()
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let df_stream = stream::iter(batches.into_iter().map(DataFusionResult::Ok));
|
|
||||||
let df_stream: datafusion_physical_plan::SendableRecordBatchStream =
|
|
||||||
Box::pin(RecordBatchStreamAdapter::new(schema.clone(), df_stream));
|
|
||||||
|
|
||||||
let tbl2 = db
|
|
||||||
.create_table_streaming("two", df_stream)
|
|
||||||
.execute()
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let tbl1_data = tbl1
|
|
||||||
.query()
|
|
||||||
.execute()
|
|
||||||
.await
|
|
||||||
.unwrap()
|
|
||||||
.try_collect::<Vec<_>>()
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let tbl1_data = concat_batches(&schema, tbl1_data.iter()).unwrap();
|
|
||||||
assert_eq!(tbl1_data, one_batch);
|
|
||||||
|
|
||||||
let tbl2_data = tbl2
|
|
||||||
.query()
|
|
||||||
.execute()
|
|
||||||
.await
|
|
||||||
.unwrap()
|
|
||||||
.try_collect::<Vec<_>>()
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let tbl2_data = concat_batches(&schema, tbl2_data.iter()).unwrap();
|
|
||||||
assert_eq!(tbl2_data, one_batch);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn drop_table() {
|
async fn drop_table() {
|
||||||
let tc = new_test_connection().await.unwrap();
|
let tc = new_test_connection().await.unwrap();
|
||||||
@@ -1640,41 +1143,6 @@ mod tests {
|
|||||||
assert_eq!(tables.len(), 0);
|
assert_eq!(tables.len(), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_create_table_already_exists() {
|
|
||||||
let tmp_dir = tempdir().unwrap();
|
|
||||||
let uri = tmp_dir.path().to_str().unwrap();
|
|
||||||
let db = connect(uri).execute().await.unwrap();
|
|
||||||
let schema = Arc::new(Schema::new(vec![Field::new("x", DataType::Int32, false)]));
|
|
||||||
db.create_empty_table("test", schema.clone())
|
|
||||||
.execute()
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
// TODO: None of the open table options are "inspectable" right now but once one is we
|
|
||||||
// should assert we are passing these options in correctly
|
|
||||||
db.create_empty_table("test", schema)
|
|
||||||
.mode(CreateTableMode::exist_ok(|mut req| {
|
|
||||||
req.index_cache_size = Some(16);
|
|
||||||
req
|
|
||||||
}))
|
|
||||||
.execute()
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
let other_schema = Arc::new(Schema::new(vec![Field::new("y", DataType::Int32, false)]));
|
|
||||||
assert!(db
|
|
||||||
.create_empty_table("test", other_schema.clone())
|
|
||||||
.execute()
|
|
||||||
.await
|
|
||||||
.is_err());
|
|
||||||
let overwritten = db
|
|
||||||
.create_empty_table("test", other_schema.clone())
|
|
||||||
.mode(CreateTableMode::Overwrite)
|
|
||||||
.execute()
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
assert_eq!(other_schema, overwritten.schema().await.unwrap());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_clone_table() {
|
async fn test_clone_table() {
|
||||||
let tmp_dir = tempdir().unwrap();
|
let tmp_dir = tempdir().unwrap();
|
||||||
@@ -1685,7 +1153,8 @@ mod tests {
|
|||||||
let mut batch_gen = BatchGenerator::new()
|
let mut batch_gen = BatchGenerator::new()
|
||||||
.col(Box::new(IncrementingInt32::new().named("id")))
|
.col(Box::new(IncrementingInt32::new().named("id")))
|
||||||
.col(Box::new(IncrementingInt32::new().named("value")));
|
.col(Box::new(IncrementingInt32::new().named("value")));
|
||||||
let reader = batch_gen.batches(5, 100);
|
let reader: Box<dyn arrow_array::RecordBatchReader + Send> =
|
||||||
|
Box::new(batch_gen.batches(5, 100));
|
||||||
|
|
||||||
let source_table = db
|
let source_table = db
|
||||||
.create_table("source_table", reader)
|
.create_table("source_table", reader)
|
||||||
@@ -1720,128 +1189,4 @@ mod tests {
|
|||||||
let cloned_count = cloned_table.count_rows(None).await.unwrap();
|
let cloned_count = cloned_table.count_rows(None).await.unwrap();
|
||||||
assert_eq!(source_count, cloned_count);
|
assert_eq!(source_count, cloned_count);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_create_empty_table_with_embeddings() {
|
|
||||||
use crate::embeddings::{EmbeddingDefinition, EmbeddingFunction};
|
|
||||||
use arrow_array::{
|
|
||||||
Array, FixedSizeListArray, Float32Array, RecordBatch, RecordBatchIterator, StringArray,
|
|
||||||
};
|
|
||||||
use std::borrow::Cow;
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
struct MockEmbedding {
|
|
||||||
dim: usize,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl EmbeddingFunction for MockEmbedding {
|
|
||||||
fn name(&self) -> &str {
|
|
||||||
"test_embedding"
|
|
||||||
}
|
|
||||||
|
|
||||||
fn source_type(&self) -> Result<Cow<'_, DataType>> {
|
|
||||||
Ok(Cow::Owned(DataType::Utf8))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn dest_type(&self) -> Result<Cow<'_, DataType>> {
|
|
||||||
Ok(Cow::Owned(DataType::new_fixed_size_list(
|
|
||||||
DataType::Float32,
|
|
||||||
self.dim as i32,
|
|
||||||
true,
|
|
||||||
)))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn compute_source_embeddings(&self, source: Arc<dyn Array>) -> Result<Arc<dyn Array>> {
|
|
||||||
let len = source.len();
|
|
||||||
let values = vec![1.0f32; len * self.dim];
|
|
||||||
let values = Arc::new(Float32Array::from(values));
|
|
||||||
let field = Arc::new(Field::new("item", DataType::Float32, true));
|
|
||||||
Ok(Arc::new(FixedSizeListArray::new(
|
|
||||||
field,
|
|
||||||
self.dim as i32,
|
|
||||||
values,
|
|
||||||
None,
|
|
||||||
)))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn compute_query_embeddings(&self, _input: Arc<dyn Array>) -> Result<Arc<dyn Array>> {
|
|
||||||
unimplemented!()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let tmp_dir = tempdir().unwrap();
|
|
||||||
let uri = tmp_dir.path().to_str().unwrap();
|
|
||||||
let db = connect(uri).execute().await.unwrap();
|
|
||||||
|
|
||||||
let embed_func = Arc::new(MockEmbedding { dim: 128 });
|
|
||||||
db.embedding_registry()
|
|
||||||
.register("test_embedding", embed_func.clone())
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let schema = Arc::new(Schema::new(vec![Field::new("name", DataType::Utf8, true)]));
|
|
||||||
let ed = EmbeddingDefinition {
|
|
||||||
source_column: "name".to_owned(),
|
|
||||||
dest_column: Some("name_embedding".to_owned()),
|
|
||||||
embedding_name: "test_embedding".to_owned(),
|
|
||||||
};
|
|
||||||
|
|
||||||
let table = db
|
|
||||||
.create_empty_table("test", schema)
|
|
||||||
.mode(CreateTableMode::Overwrite)
|
|
||||||
.add_embedding(ed)
|
|
||||||
.unwrap()
|
|
||||||
.execute()
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let table_schema = table.schema().await.unwrap();
|
|
||||||
assert!(table_schema.column_with_name("name").is_some());
|
|
||||||
assert!(table_schema.column_with_name("name_embedding").is_some());
|
|
||||||
|
|
||||||
let embedding_field = table_schema.field_with_name("name_embedding").unwrap();
|
|
||||||
assert_eq!(
|
|
||||||
embedding_field.data_type(),
|
|
||||||
&DataType::new_fixed_size_list(DataType::Float32, 128, true)
|
|
||||||
);
|
|
||||||
|
|
||||||
let input_schema = Arc::new(Schema::new(vec![Field::new("name", DataType::Utf8, true)]));
|
|
||||||
let input_batch = RecordBatch::try_new(
|
|
||||||
input_schema.clone(),
|
|
||||||
vec![Arc::new(StringArray::from(vec![
|
|
||||||
Some("Alice"),
|
|
||||||
Some("Bob"),
|
|
||||||
Some("Charlie"),
|
|
||||||
]))],
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let input_reader = Box::new(RecordBatchIterator::new(
|
|
||||||
vec![Ok(input_batch)].into_iter(),
|
|
||||||
input_schema,
|
|
||||||
));
|
|
||||||
|
|
||||||
table.add(input_reader).execute().await.unwrap();
|
|
||||||
|
|
||||||
let results = table
|
|
||||||
.query()
|
|
||||||
.execute()
|
|
||||||
.await
|
|
||||||
.unwrap()
|
|
||||||
.try_collect::<Vec<_>>()
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
assert_eq!(results.len(), 1);
|
|
||||||
let batch = &results[0];
|
|
||||||
assert_eq!(batch.num_rows(), 3);
|
|
||||||
assert!(batch.column_by_name("name_embedding").is_some());
|
|
||||||
|
|
||||||
let embedding_col = batch
|
|
||||||
.column_by_name("name_embedding")
|
|
||||||
.unwrap()
|
|
||||||
.as_any()
|
|
||||||
.downcast_ref::<FixedSizeListArray>()
|
|
||||||
.unwrap();
|
|
||||||
assert_eq!(embedding_col.len(), 3);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
612
rust/lancedb/src/connection/create_table.rs
Normal file
612
rust/lancedb/src/connection/create_table.rs
Normal file
@@ -0,0 +1,612 @@
|
|||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use lance_io::object_store::StorageOptionsProvider;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
connection::{merge_storage_options, set_storage_options_provider},
|
||||||
|
data::scannable::{Scannable, WithEmbeddingsScannable},
|
||||||
|
database::{CreateTableMode, CreateTableRequest, Database},
|
||||||
|
embeddings::{EmbeddingDefinition, EmbeddingFunction, EmbeddingRegistry},
|
||||||
|
table::WriteOptions,
|
||||||
|
Error, Result, Table,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub struct CreateTableBuilder {
|
||||||
|
parent: Arc<dyn Database>,
|
||||||
|
embeddings: Vec<(EmbeddingDefinition, Arc<dyn EmbeddingFunction>)>,
|
||||||
|
embedding_registry: Arc<dyn EmbeddingRegistry>,
|
||||||
|
request: CreateTableRequest,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CreateTableBuilder {
|
||||||
|
pub(super) fn new(
|
||||||
|
parent: Arc<dyn Database>,
|
||||||
|
embedding_registry: Arc<dyn EmbeddingRegistry>,
|
||||||
|
name: String,
|
||||||
|
data: Box<dyn Scannable>,
|
||||||
|
) -> Self {
|
||||||
|
Self {
|
||||||
|
parent,
|
||||||
|
embeddings: Vec::new(),
|
||||||
|
embedding_registry,
|
||||||
|
request: CreateTableRequest::new(name, data),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Set the mode for creating the table
|
||||||
|
///
|
||||||
|
/// This controls what happens if a table with the given name already exists
|
||||||
|
pub fn mode(mut self, mode: CreateTableMode) -> Self {
|
||||||
|
self.request.mode = mode;
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Apply the given write options when writing the initial data
|
||||||
|
pub fn write_options(mut self, write_options: WriteOptions) -> Self {
|
||||||
|
self.request.write_options = write_options;
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Set an option for the storage layer.
|
||||||
|
///
|
||||||
|
/// Options already set on the connection will be inherited by the table,
|
||||||
|
/// but can be overridden here.
|
||||||
|
///
|
||||||
|
/// See available options at <https://lancedb.com/docs/storage/>
|
||||||
|
pub fn storage_option(mut self, key: impl Into<String>, value: impl Into<String>) -> Self {
|
||||||
|
let store_params = self
|
||||||
|
.request
|
||||||
|
.write_options
|
||||||
|
.lance_write_params
|
||||||
|
.get_or_insert(Default::default())
|
||||||
|
.store_params
|
||||||
|
.get_or_insert(Default::default());
|
||||||
|
merge_storage_options(store_params, [(key.into(), value.into())]);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Set multiple options for the storage layer.
|
||||||
|
///
|
||||||
|
/// Options already set on the connection will be inherited by the table,
|
||||||
|
/// but can be overridden here.
|
||||||
|
///
|
||||||
|
/// See available options at <https://lancedb.com/docs/storage/>
|
||||||
|
pub fn storage_options(
|
||||||
|
mut self,
|
||||||
|
pairs: impl IntoIterator<Item = (impl Into<String>, impl Into<String>)>,
|
||||||
|
) -> Self {
|
||||||
|
let store_params = self
|
||||||
|
.request
|
||||||
|
.write_options
|
||||||
|
.lance_write_params
|
||||||
|
.get_or_insert(Default::default())
|
||||||
|
.store_params
|
||||||
|
.get_or_insert(Default::default());
|
||||||
|
let updates = pairs
|
||||||
|
.into_iter()
|
||||||
|
.map(|(key, value)| (key.into(), value.into()));
|
||||||
|
merge_storage_options(store_params, updates);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Add an embedding definition to the table.
|
||||||
|
///
|
||||||
|
/// The `embedding_name` must match the name of an embedding function that
|
||||||
|
/// was previously registered with the connection's [`EmbeddingRegistry`].
|
||||||
|
pub fn add_embedding(mut self, definition: EmbeddingDefinition) -> Result<Self> {
|
||||||
|
// Early verification of the embedding name
|
||||||
|
let embedding_func = self
|
||||||
|
.embedding_registry
|
||||||
|
.get(&definition.embedding_name)
|
||||||
|
.ok_or_else(|| Error::EmbeddingFunctionNotFound {
|
||||||
|
name: definition.embedding_name.clone(),
|
||||||
|
reason: "No embedding function found in the connection's embedding_registry"
|
||||||
|
.to_string(),
|
||||||
|
})?;
|
||||||
|
|
||||||
|
self.embeddings.push((definition, embedding_func));
|
||||||
|
Ok(self)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Set the namespace for the table
|
||||||
|
pub fn namespace(mut self, namespace: Vec<String>) -> Self {
|
||||||
|
self.request.namespace = namespace;
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Set a custom location for the table.
|
||||||
|
///
|
||||||
|
/// If not set, the database will derive a location from its URI and the table name.
|
||||||
|
/// This is useful when integrating with namespace systems that manage table locations.
|
||||||
|
pub fn location(mut self, location: impl Into<String>) -> Self {
|
||||||
|
self.request.location = Some(location.into());
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Set a storage options provider for automatic credential refresh.
|
||||||
|
///
|
||||||
|
/// This allows tables to automatically refresh cloud storage credentials
|
||||||
|
/// when they expire, enabling long-running operations on remote storage.
|
||||||
|
pub fn storage_options_provider(mut self, provider: Arc<dyn StorageOptionsProvider>) -> Self {
|
||||||
|
let store_params = self
|
||||||
|
.request
|
||||||
|
.write_options
|
||||||
|
.lance_write_params
|
||||||
|
.get_or_insert(Default::default())
|
||||||
|
.store_params
|
||||||
|
.get_or_insert(Default::default());
|
||||||
|
set_storage_options_provider(store_params, provider);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Execute the create table operation
|
||||||
|
pub async fn execute(mut self) -> Result<Table> {
|
||||||
|
let embedding_registry = self.embedding_registry.clone();
|
||||||
|
let parent = self.parent.clone();
|
||||||
|
|
||||||
|
// If embeddings were configured via add_embedding(), wrap the data
|
||||||
|
if !self.embeddings.is_empty() {
|
||||||
|
let wrapped_data: Box<dyn Scannable> = Box::new(WithEmbeddingsScannable::try_new(
|
||||||
|
self.request.data,
|
||||||
|
self.embeddings,
|
||||||
|
)?);
|
||||||
|
self.request.data = wrapped_data;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Table::new_with_embedding_registry(
|
||||||
|
parent.create_table(self.request).await?,
|
||||||
|
parent,
|
||||||
|
embedding_registry,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use arrow_array::{
|
||||||
|
record_batch, Array, FixedSizeListArray, Float32Array, RecordBatch, RecordBatchIterator,
|
||||||
|
};
|
||||||
|
use arrow_schema::{ArrowError, DataType, Field, Schema};
|
||||||
|
use futures::TryStreamExt;
|
||||||
|
use lance_file::version::LanceFileVersion;
|
||||||
|
use tempfile::tempdir;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
arrow::{SendableRecordBatchStream, SimpleRecordBatchStream},
|
||||||
|
connect,
|
||||||
|
database::listing::{ListingDatabaseOptions, NewTableConfig},
|
||||||
|
embeddings::{EmbeddingDefinition, EmbeddingFunction, MemoryRegistry},
|
||||||
|
query::{ExecutableQuery, QueryBase, Select},
|
||||||
|
test_utils::embeddings::MockEmbed,
|
||||||
|
};
|
||||||
|
use std::borrow::Cow;
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn create_empty_table() {
|
||||||
|
let db = connect("memory://").execute().await.unwrap();
|
||||||
|
let schema = Arc::new(Schema::new(vec![
|
||||||
|
Field::new("id", DataType::Int64, false),
|
||||||
|
Field::new("value", DataType::Float64, false),
|
||||||
|
]));
|
||||||
|
db.create_empty_table("name", schema.clone())
|
||||||
|
.execute()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
let table = db.open_table("name").execute().await.unwrap();
|
||||||
|
assert_eq!(table.schema().await.unwrap(), schema);
|
||||||
|
assert_eq!(table.count_rows(None).await.unwrap(), 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn test_create_table_with_data<T>(data: T)
|
||||||
|
where
|
||||||
|
T: Scannable + 'static,
|
||||||
|
{
|
||||||
|
let db = connect("memory://").execute().await.unwrap();
|
||||||
|
let schema = data.schema();
|
||||||
|
db.create_table("data_table", data).execute().await.unwrap();
|
||||||
|
let table = db.open_table("data_table").execute().await.unwrap();
|
||||||
|
assert_eq!(table.count_rows(None).await.unwrap(), 3);
|
||||||
|
assert_eq!(table.schema().await.unwrap(), schema);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn create_table_with_batch() {
|
||||||
|
let batch = record_batch!(("id", Int64, [1, 2, 3])).unwrap();
|
||||||
|
test_create_table_with_data(batch).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_create_table_with_vec_batch() {
|
||||||
|
let data = vec![
|
||||||
|
record_batch!(("id", Int64, [1, 2])).unwrap(),
|
||||||
|
record_batch!(("id", Int64, [3])).unwrap(),
|
||||||
|
];
|
||||||
|
test_create_table_with_data(data).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_create_table_with_record_batch_reader() {
|
||||||
|
let data = vec![
|
||||||
|
record_batch!(("id", Int64, [1, 2])).unwrap(),
|
||||||
|
record_batch!(("id", Int64, [3])).unwrap(),
|
||||||
|
];
|
||||||
|
let schema = data[0].schema();
|
||||||
|
let reader: Box<dyn arrow_array::RecordBatchReader + Send> = Box::new(
|
||||||
|
RecordBatchIterator::new(data.into_iter().map(Ok), schema.clone()),
|
||||||
|
);
|
||||||
|
test_create_table_with_data(reader).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_create_table_with_stream() {
|
||||||
|
let data = vec![
|
||||||
|
record_batch!(("id", Int64, [1, 2])).unwrap(),
|
||||||
|
record_batch!(("id", Int64, [3])).unwrap(),
|
||||||
|
];
|
||||||
|
let schema = data[0].schema();
|
||||||
|
let inner = futures::stream::iter(data.into_iter().map(Ok));
|
||||||
|
let stream: SendableRecordBatchStream = Box::pin(SimpleRecordBatchStream {
|
||||||
|
schema,
|
||||||
|
stream: inner,
|
||||||
|
});
|
||||||
|
test_create_table_with_data(stream).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
struct MyError;
|
||||||
|
|
||||||
|
impl std::fmt::Display for MyError {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
write!(f, "MyError occurred")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::error::Error for MyError {}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_create_preserves_reader_error() {
|
||||||
|
let first_batch = record_batch!(("id", Int64, [1, 2])).unwrap();
|
||||||
|
let schema = first_batch.schema();
|
||||||
|
let iterator = vec![
|
||||||
|
Ok(first_batch),
|
||||||
|
Err(ArrowError::ExternalError(Box::new(MyError))),
|
||||||
|
];
|
||||||
|
let reader: Box<dyn arrow_array::RecordBatchReader + Send> = Box::new(
|
||||||
|
RecordBatchIterator::new(iterator.into_iter(), schema.clone()),
|
||||||
|
);
|
||||||
|
|
||||||
|
let db = connect("memory://").execute().await.unwrap();
|
||||||
|
let result = db.create_table("failing_table", reader).execute().await;
|
||||||
|
|
||||||
|
assert!(result.is_err());
|
||||||
|
// TODO: when we upgrade to Lance 2.0.0, this should pass
|
||||||
|
// assert!(matches!(result, Err(Error::External { source})
|
||||||
|
// if source.downcast_ref::<MyError>().is_some()
|
||||||
|
// ));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_create_preserves_stream_error() {
|
||||||
|
let first_batch = record_batch!(("id", Int64, [1, 2])).unwrap();
|
||||||
|
let schema = first_batch.schema();
|
||||||
|
let iterator = vec![
|
||||||
|
Ok(first_batch),
|
||||||
|
Err(Error::External {
|
||||||
|
source: Box::new(MyError),
|
||||||
|
}),
|
||||||
|
];
|
||||||
|
let stream = futures::stream::iter(iterator);
|
||||||
|
let stream: SendableRecordBatchStream = Box::pin(SimpleRecordBatchStream {
|
||||||
|
schema: schema.clone(),
|
||||||
|
stream,
|
||||||
|
});
|
||||||
|
|
||||||
|
let db = connect("memory://").execute().await.unwrap();
|
||||||
|
let result = db
|
||||||
|
.create_table("failing_stream_table", stream)
|
||||||
|
.execute()
|
||||||
|
.await;
|
||||||
|
|
||||||
|
assert!(result.is_err());
|
||||||
|
// TODO: when we upgrade to Lance 2.0.0, this should pass
|
||||||
|
// assert!(matches!(result, Err(Error::External { source})
|
||||||
|
// if source.downcast_ref::<MyError>().is_some()
|
||||||
|
// ));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[allow(deprecated)]
|
||||||
|
async fn test_create_table_with_storage_options() {
|
||||||
|
let batch = record_batch!(("id", Int64, [1, 2, 3])).unwrap();
|
||||||
|
let db = connect("memory://").execute().await.unwrap();
|
||||||
|
|
||||||
|
let table = db
|
||||||
|
.create_table("options_table", batch)
|
||||||
|
.storage_option("timeout", "30s")
|
||||||
|
.storage_options([("retry_count", "3")])
|
||||||
|
.execute()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let final_options = table.storage_options().await.unwrap();
|
||||||
|
assert_eq!(final_options.get("timeout"), Some(&"30s".to_string()));
|
||||||
|
assert_eq!(final_options.get("retry_count"), Some(&"3".to_string()));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_create_table_unregistered_embedding() {
|
||||||
|
let db = connect("memory://").execute().await.unwrap();
|
||||||
|
let batch = record_batch!(("text", Utf8, ["hello", "world"])).unwrap();
|
||||||
|
|
||||||
|
// Try to add an embedding that doesn't exist in the registry
|
||||||
|
let result = db
|
||||||
|
.create_table("embed_table", batch)
|
||||||
|
.add_embedding(EmbeddingDefinition::new(
|
||||||
|
"text",
|
||||||
|
"nonexistent_embedding_function",
|
||||||
|
None::<&str>,
|
||||||
|
));
|
||||||
|
|
||||||
|
match result {
|
||||||
|
Err(Error::EmbeddingFunctionNotFound { name, .. }) => {
|
||||||
|
assert_eq!(name, "nonexistent_embedding_function");
|
||||||
|
}
|
||||||
|
Err(other) => panic!("Expected EmbeddingFunctionNotFound error, got: {:?}", other),
|
||||||
|
Ok(_) => panic!("Expected error, but got Ok"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_create_table_already_exists() {
|
||||||
|
let tmp_dir = tempdir().unwrap();
|
||||||
|
let uri = tmp_dir.path().to_str().unwrap();
|
||||||
|
let db = connect(uri).execute().await.unwrap();
|
||||||
|
let schema = Arc::new(Schema::new(vec![Field::new("x", DataType::Int32, false)]));
|
||||||
|
db.create_empty_table("test", schema.clone())
|
||||||
|
.execute()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
db.create_empty_table("test", schema)
|
||||||
|
.mode(CreateTableMode::exist_ok(|mut req| {
|
||||||
|
req.index_cache_size = Some(16);
|
||||||
|
req
|
||||||
|
}))
|
||||||
|
.execute()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
let other_schema = Arc::new(Schema::new(vec![Field::new("y", DataType::Int32, false)]));
|
||||||
|
assert!(db
|
||||||
|
.create_empty_table("test", other_schema.clone())
|
||||||
|
.execute()
|
||||||
|
.await
|
||||||
|
.is_err()); // TODO: assert what this error is
|
||||||
|
let overwritten = db
|
||||||
|
.create_empty_table("test", other_schema.clone())
|
||||||
|
.mode(CreateTableMode::Overwrite)
|
||||||
|
.execute()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(other_schema, overwritten.schema().await.unwrap());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[rstest::rstest]
|
||||||
|
#[case(LanceFileVersion::Legacy)]
|
||||||
|
#[case(LanceFileVersion::Stable)]
|
||||||
|
async fn test_create_table_with_storage_version(
|
||||||
|
#[case] data_storage_version: LanceFileVersion,
|
||||||
|
) {
|
||||||
|
let db = connect("memory://")
|
||||||
|
.database_options(&ListingDatabaseOptions {
|
||||||
|
new_table_config: NewTableConfig {
|
||||||
|
data_storage_version: Some(data_storage_version),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
..Default::default()
|
||||||
|
})
|
||||||
|
.execute()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let batch = record_batch!(("id", Int64, [1, 2, 3])).unwrap();
|
||||||
|
let table = db
|
||||||
|
.create_table("legacy_table", batch)
|
||||||
|
.execute()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let native_table = table.as_native().unwrap();
|
||||||
|
let storage_format = native_table
|
||||||
|
.manifest()
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.data_storage_format
|
||||||
|
.lance_file_version()
|
||||||
|
.unwrap();
|
||||||
|
// Compare resolved versions since Stable/Next are aliases that resolve at storage time
|
||||||
|
assert_eq!(storage_format.resolve(), data_storage_version.resolve());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_create_table_with_embedding() {
|
||||||
|
// Register the mock embedding function
|
||||||
|
let registry = Arc::new(MemoryRegistry::new());
|
||||||
|
let mock_embedding: Arc<dyn EmbeddingFunction> = Arc::new(MockEmbed::new("mock", 4));
|
||||||
|
registry.register("mock", mock_embedding).unwrap();
|
||||||
|
|
||||||
|
// Connect with the custom registry
|
||||||
|
let conn = connect("memory://")
|
||||||
|
.embedding_registry(registry)
|
||||||
|
.execute()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// Create data without the embedding column
|
||||||
|
let batch = record_batch!(("text", Utf8, ["hello", "world", "test"])).unwrap();
|
||||||
|
|
||||||
|
// Create table with add_embedding - embeddings should be computed automatically
|
||||||
|
let table = conn
|
||||||
|
.create_table("embed_test", batch)
|
||||||
|
.add_embedding(EmbeddingDefinition::new(
|
||||||
|
"text",
|
||||||
|
"mock",
|
||||||
|
Some("text_embedding"),
|
||||||
|
))
|
||||||
|
.unwrap()
|
||||||
|
.execute()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// Verify row count
|
||||||
|
assert_eq!(table.count_rows(None).await.unwrap(), 3);
|
||||||
|
|
||||||
|
// Verify the schema includes the embedding column
|
||||||
|
let result_schema = table.schema().await.unwrap();
|
||||||
|
assert_eq!(result_schema.fields().len(), 2);
|
||||||
|
assert_eq!(result_schema.field(0).name(), "text");
|
||||||
|
assert_eq!(result_schema.field(1).name(), "text_embedding");
|
||||||
|
|
||||||
|
// Verify the embedding column has the correct type
|
||||||
|
assert!(matches!(
|
||||||
|
result_schema.field(1).data_type(),
|
||||||
|
DataType::FixedSizeList(_, 4)
|
||||||
|
));
|
||||||
|
|
||||||
|
// Query to verify the embeddings were computed
|
||||||
|
let results: Vec<RecordBatch> = table
|
||||||
|
.query()
|
||||||
|
.select(Select::columns(&["text", "text_embedding"]))
|
||||||
|
.execute()
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.try_collect()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let total_rows: usize = results.iter().map(|b| b.num_rows()).sum();
|
||||||
|
assert_eq!(total_rows, 3);
|
||||||
|
|
||||||
|
// Check that all rows have embedding values (not null)
|
||||||
|
for batch in &results {
|
||||||
|
let embedding_col = batch.column(1);
|
||||||
|
assert_eq!(embedding_col.null_count(), 0);
|
||||||
|
assert_eq!(embedding_col.len(), batch.num_rows());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify the schema metadata contains the column definitions
|
||||||
|
assert!(
|
||||||
|
result_schema
|
||||||
|
.metadata
|
||||||
|
.contains_key("lancedb::column_definitions"),
|
||||||
|
"Schema metadata should contain column definitions"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_create_empty_table_with_embeddings() {
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
struct MockEmbedding {
|
||||||
|
dim: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl EmbeddingFunction for MockEmbedding {
|
||||||
|
fn name(&self) -> &str {
|
||||||
|
"test_embedding"
|
||||||
|
}
|
||||||
|
|
||||||
|
fn source_type(&self) -> Result<Cow<'_, DataType>> {
|
||||||
|
Ok(Cow::Owned(DataType::Utf8))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn dest_type(&self) -> Result<Cow<'_, DataType>> {
|
||||||
|
Ok(Cow::Owned(DataType::new_fixed_size_list(
|
||||||
|
DataType::Float32,
|
||||||
|
self.dim as i32,
|
||||||
|
true,
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn compute_source_embeddings(&self, source: Arc<dyn Array>) -> Result<Arc<dyn Array>> {
|
||||||
|
let len = source.len();
|
||||||
|
let values = vec![1.0f32; len * self.dim];
|
||||||
|
let values = Arc::new(Float32Array::from(values));
|
||||||
|
let field = Arc::new(Field::new("item", DataType::Float32, true));
|
||||||
|
Ok(Arc::new(FixedSizeListArray::new(
|
||||||
|
field,
|
||||||
|
self.dim as i32,
|
||||||
|
values,
|
||||||
|
None,
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn compute_query_embeddings(&self, _input: Arc<dyn Array>) -> Result<Arc<dyn Array>> {
|
||||||
|
unimplemented!()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let tmp_dir = tempdir().unwrap();
|
||||||
|
let uri = tmp_dir.path().to_str().unwrap();
|
||||||
|
let db = connect(uri).execute().await.unwrap();
|
||||||
|
|
||||||
|
let embed_func = Arc::new(MockEmbedding { dim: 128 });
|
||||||
|
db.embedding_registry()
|
||||||
|
.register("test_embedding", embed_func.clone())
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let schema = Arc::new(Schema::new(vec![Field::new("name", DataType::Utf8, true)]));
|
||||||
|
let ed = EmbeddingDefinition {
|
||||||
|
source_column: "name".to_owned(),
|
||||||
|
dest_column: Some("name_embedding".to_owned()),
|
||||||
|
embedding_name: "test_embedding".to_owned(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let table = db
|
||||||
|
.create_empty_table("test", schema)
|
||||||
|
.mode(CreateTableMode::Overwrite)
|
||||||
|
.add_embedding(ed)
|
||||||
|
.unwrap()
|
||||||
|
.execute()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let table_schema = table.schema().await.unwrap();
|
||||||
|
assert!(table_schema.column_with_name("name").is_some());
|
||||||
|
assert!(table_schema.column_with_name("name_embedding").is_some());
|
||||||
|
|
||||||
|
let embedding_field = table_schema.field_with_name("name_embedding").unwrap();
|
||||||
|
assert_eq!(
|
||||||
|
embedding_field.data_type(),
|
||||||
|
&DataType::new_fixed_size_list(DataType::Float32, 128, true)
|
||||||
|
);
|
||||||
|
|
||||||
|
let input_batch = record_batch!(("name", Utf8, ["Alice", "Bob", "Charlie"])).unwrap();
|
||||||
|
table.add(input_batch).execute().await.unwrap();
|
||||||
|
|
||||||
|
let results = table
|
||||||
|
.query()
|
||||||
|
.execute()
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.try_collect::<Vec<_>>()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
assert_eq!(results.len(), 1);
|
||||||
|
let batch = &results[0];
|
||||||
|
assert_eq!(batch.num_rows(), 3);
|
||||||
|
assert!(batch.column_by_name("name_embedding").is_some());
|
||||||
|
|
||||||
|
let embedding_col = batch
|
||||||
|
.column_by_name("name_embedding")
|
||||||
|
.unwrap()
|
||||||
|
.as_any()
|
||||||
|
.downcast_ref::<FixedSizeListArray>()
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(embedding_col.len(), 3);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -5,3 +5,4 @@
|
|||||||
|
|
||||||
pub mod inspect;
|
pub mod inspect;
|
||||||
pub mod sanitize;
|
pub mod sanitize;
|
||||||
|
pub mod scannable;
|
||||||
|
|||||||
968
rust/lancedb/src/data/scannable.rs
Normal file
968
rust/lancedb/src/data/scannable.rs
Normal file
@@ -0,0 +1,968 @@
|
|||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||||
|
|
||||||
|
//! Data source abstraction for LanceDB.
|
||||||
|
//!
|
||||||
|
//! This module provides a [`Scannable`] trait that allows input data sources to express
|
||||||
|
//! capabilities (row count, rescannability) so the insert pipeline can make
|
||||||
|
//! better decisions about write parallelism and retry strategies.
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use crate::arrow::{
|
||||||
|
SendableRecordBatchStream, SendableRecordBatchStreamExt, SimpleRecordBatchStream,
|
||||||
|
};
|
||||||
|
use crate::embeddings::{
|
||||||
|
compute_embeddings_for_batch, compute_output_schema, EmbeddingDefinition, EmbeddingFunction,
|
||||||
|
EmbeddingRegistry,
|
||||||
|
};
|
||||||
|
use crate::table::{ColumnDefinition, ColumnKind, TableDefinition};
|
||||||
|
use crate::{Error, Result};
|
||||||
|
use arrow_array::{ArrayRef, RecordBatch, RecordBatchIterator, RecordBatchReader};
|
||||||
|
use arrow_schema::{ArrowError, SchemaRef};
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use futures::stream::once;
|
||||||
|
use futures::StreamExt;
|
||||||
|
use lance_datafusion::utils::StreamingWriteSource;
|
||||||
|
|
||||||
|
pub trait Scannable: Send {
|
||||||
|
/// Returns the schema of the data.
|
||||||
|
fn schema(&self) -> SchemaRef;
|
||||||
|
|
||||||
|
/// Read data as a stream of record batches.
|
||||||
|
///
|
||||||
|
/// For rescannable sources (in-memory data like RecordBatch, Vec<RecordBatch>),
|
||||||
|
/// this can be called multiple times and returns cloned data each time.
|
||||||
|
///
|
||||||
|
/// For non-rescannable sources (streams, readers), this can only be called once.
|
||||||
|
/// Calling it a second time returns a stream whose first item is an error.
|
||||||
|
fn scan_as_stream(&mut self) -> SendableRecordBatchStream;
|
||||||
|
|
||||||
|
/// Optional hint about the number of rows.
|
||||||
|
///
|
||||||
|
/// When available, this allows the pipeline to estimate total data size
|
||||||
|
/// and choose appropriate partitioning.
|
||||||
|
fn num_rows(&self) -> Option<usize> {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Whether the source can be re-read from the beginning.
|
||||||
|
///
|
||||||
|
/// `true` for in-memory data (Tables, DataFrames) and disk-based sources (Datasets).
|
||||||
|
/// `false` for streaming sources (DuckDB results, network streams).
|
||||||
|
///
|
||||||
|
/// When true, the pipeline can retry failed writes by rescanning.
|
||||||
|
fn rescannable(&self) -> bool {
|
||||||
|
false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::fmt::Debug for dyn Scannable {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
f.debug_struct("Scannable")
|
||||||
|
.field("schema", &self.schema())
|
||||||
|
.field("num_rows", &self.num_rows())
|
||||||
|
.field("rescannable", &self.rescannable())
|
||||||
|
.finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Scannable for RecordBatch {
|
||||||
|
fn schema(&self) -> SchemaRef {
|
||||||
|
Self::schema(self)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn scan_as_stream(&mut self) -> SendableRecordBatchStream {
|
||||||
|
let batch = self.clone();
|
||||||
|
let schema = batch.schema();
|
||||||
|
Box::pin(SimpleRecordBatchStream {
|
||||||
|
schema,
|
||||||
|
stream: once(async move { Ok(batch) }),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn num_rows(&self) -> Option<usize> {
|
||||||
|
Some(Self::num_rows(self))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rescannable(&self) -> bool {
|
||||||
|
true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Scannable for Vec<RecordBatch> {
|
||||||
|
fn schema(&self) -> SchemaRef {
|
||||||
|
if self.is_empty() {
|
||||||
|
Arc::new(arrow_schema::Schema::empty())
|
||||||
|
} else {
|
||||||
|
self[0].schema()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn scan_as_stream(&mut self) -> SendableRecordBatchStream {
|
||||||
|
if self.is_empty() {
|
||||||
|
let schema = Scannable::schema(self);
|
||||||
|
return Box::pin(SimpleRecordBatchStream {
|
||||||
|
schema,
|
||||||
|
stream: once(async {
|
||||||
|
Err(Error::InvalidInput {
|
||||||
|
message: "Cannot scan an empty Vec<RecordBatch>".to_string(),
|
||||||
|
})
|
||||||
|
}),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
let schema = Scannable::schema(self);
|
||||||
|
let batches = self.clone();
|
||||||
|
let stream = futures::stream::iter(batches.into_iter().map(Ok));
|
||||||
|
Box::pin(SimpleRecordBatchStream { schema, stream })
|
||||||
|
}
|
||||||
|
|
||||||
|
fn num_rows(&self) -> Option<usize> {
|
||||||
|
Some(self.iter().map(|b| b.num_rows()).sum())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rescannable(&self) -> bool {
|
||||||
|
true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Scannable for Box<dyn RecordBatchReader + Send> {
|
||||||
|
fn schema(&self) -> SchemaRef {
|
||||||
|
RecordBatchReader::schema(self.as_ref())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn scan_as_stream(&mut self) -> SendableRecordBatchStream {
|
||||||
|
let schema = Scannable::schema(self);
|
||||||
|
|
||||||
|
// Swap self with a reader that errors on iteration, so a second call
|
||||||
|
// produces a clear error instead of silently returning empty data.
|
||||||
|
let err_reader: Box<dyn RecordBatchReader + Send> = Box::new(RecordBatchIterator::new(
|
||||||
|
vec![Err(ArrowError::InvalidArgumentError(
|
||||||
|
"Reader has already been consumed".into(),
|
||||||
|
))],
|
||||||
|
schema.clone(),
|
||||||
|
));
|
||||||
|
let reader = std::mem::replace(self, err_reader);
|
||||||
|
|
||||||
|
// Bridge the blocking RecordBatchReader to an async stream via a channel.
|
||||||
|
let (tx, rx) = tokio::sync::mpsc::channel::<crate::Result<RecordBatch>>(2);
|
||||||
|
tokio::task::spawn_blocking(move || {
|
||||||
|
for batch_result in reader {
|
||||||
|
let result = batch_result.map_err(Into::into);
|
||||||
|
if tx.blocking_send(result).is_err() {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let stream = futures::stream::unfold(rx, |mut rx| async move {
|
||||||
|
rx.recv().await.map(|batch| (batch, rx))
|
||||||
|
})
|
||||||
|
.fuse();
|
||||||
|
|
||||||
|
Box::pin(SimpleRecordBatchStream { schema, stream })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Scannable for SendableRecordBatchStream {
|
||||||
|
fn schema(&self) -> SchemaRef {
|
||||||
|
self.as_ref().schema()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn scan_as_stream(&mut self) -> SendableRecordBatchStream {
|
||||||
|
let schema = Scannable::schema(self);
|
||||||
|
|
||||||
|
// Swap self with an error stream so a second call produces a clear error.
|
||||||
|
let error_stream = Box::pin(SimpleRecordBatchStream {
|
||||||
|
schema: schema.clone(),
|
||||||
|
stream: once(async {
|
||||||
|
Err(Error::InvalidInput {
|
||||||
|
message: "Stream has already been consumed".to_string(),
|
||||||
|
})
|
||||||
|
}),
|
||||||
|
});
|
||||||
|
std::mem::replace(self, error_stream)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl StreamingWriteSource for Box<dyn Scannable> {
|
||||||
|
fn arrow_schema(&self) -> SchemaRef {
|
||||||
|
self.schema()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn into_stream(mut self) -> datafusion_physical_plan::SendableRecordBatchStream {
|
||||||
|
self.scan_as_stream().into_df_stream()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A scannable that applies embeddings to the stream.
|
||||||
|
pub struct WithEmbeddingsScannable {
|
||||||
|
inner: Box<dyn Scannable>,
|
||||||
|
embeddings: Vec<(EmbeddingDefinition, Arc<dyn EmbeddingFunction>)>,
|
||||||
|
output_schema: SchemaRef,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl WithEmbeddingsScannable {
|
||||||
|
/// Create a new WithEmbeddingsScannable.
|
||||||
|
///
|
||||||
|
/// The embeddings are applied to the inner scannable's data as new columns.
|
||||||
|
pub fn try_new(
|
||||||
|
inner: Box<dyn Scannable>,
|
||||||
|
embeddings: Vec<(EmbeddingDefinition, Arc<dyn EmbeddingFunction>)>,
|
||||||
|
) -> Result<Self> {
|
||||||
|
let output_schema = compute_output_schema(&inner.schema(), &embeddings)?;
|
||||||
|
|
||||||
|
// Build column definitions: Physical for base columns, Embedding for new ones
|
||||||
|
let base_col_count = inner.schema().fields().len();
|
||||||
|
let column_definitions: Vec<ColumnDefinition> = (0..base_col_count)
|
||||||
|
.map(|_| ColumnDefinition {
|
||||||
|
kind: ColumnKind::Physical,
|
||||||
|
})
|
||||||
|
.chain(embeddings.iter().map(|(ed, _)| ColumnDefinition {
|
||||||
|
kind: ColumnKind::Embedding(ed.clone()),
|
||||||
|
}))
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let table_definition = TableDefinition::new(output_schema, column_definitions);
|
||||||
|
let output_schema = table_definition.into_rich_schema();
|
||||||
|
|
||||||
|
Self::with_schema(inner, embeddings, output_schema)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create a WithEmbeddingsScannable with a specific output schema.
|
||||||
|
///
|
||||||
|
/// Use this when the table schema is already known (e.g. during add) to
|
||||||
|
/// avoid nullability mismatches between the embedding function's declared
|
||||||
|
/// type and the table's stored type.
|
||||||
|
pub fn with_schema(
|
||||||
|
inner: Box<dyn Scannable>,
|
||||||
|
embeddings: Vec<(EmbeddingDefinition, Arc<dyn EmbeddingFunction>)>,
|
||||||
|
output_schema: SchemaRef,
|
||||||
|
) -> Result<Self> {
|
||||||
|
Ok(Self {
|
||||||
|
inner,
|
||||||
|
embeddings,
|
||||||
|
output_schema,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Scannable for WithEmbeddingsScannable {
|
||||||
|
fn schema(&self) -> SchemaRef {
|
||||||
|
self.output_schema.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn scan_as_stream(&mut self) -> SendableRecordBatchStream {
|
||||||
|
let inner_stream = self.inner.scan_as_stream();
|
||||||
|
let embeddings = self.embeddings.clone();
|
||||||
|
let output_schema = self.output_schema.clone();
|
||||||
|
let stream_schema = output_schema.clone();
|
||||||
|
|
||||||
|
let mapped_stream = inner_stream.then(move |batch_result| {
|
||||||
|
let embeddings = embeddings.clone();
|
||||||
|
let output_schema = output_schema.clone();
|
||||||
|
async move {
|
||||||
|
let batch = batch_result?;
|
||||||
|
let result = tokio::task::spawn_blocking(move || {
|
||||||
|
compute_embeddings_for_batch(batch, &embeddings)
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.map_err(|e| Error::Runtime {
|
||||||
|
message: format!("Task panicked during embedding computation: {}", e),
|
||||||
|
})??;
|
||||||
|
// Cast columns to match the declared output schema. The data is
|
||||||
|
// identical but field metadata (e.g. nested nullability) may
|
||||||
|
// differ between the embedding function output and the table.
|
||||||
|
let columns: Vec<ArrayRef> = result
|
||||||
|
.columns()
|
||||||
|
.iter()
|
||||||
|
.enumerate()
|
||||||
|
.map(|(i, col)| {
|
||||||
|
let target_type = output_schema.field(i).data_type();
|
||||||
|
if col.data_type() == target_type {
|
||||||
|
Ok(col.clone())
|
||||||
|
} else {
|
||||||
|
arrow_cast::cast(col, target_type).map_err(Error::from)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect::<Result<_>>()?;
|
||||||
|
let result = RecordBatch::try_new(output_schema, columns)?;
|
||||||
|
Ok(result)
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
Box::pin(SimpleRecordBatchStream {
|
||||||
|
schema: stream_schema,
|
||||||
|
stream: mapped_stream,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn num_rows(&self) -> Option<usize> {
|
||||||
|
self.inner.num_rows()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rescannable(&self) -> bool {
|
||||||
|
self.inner.rescannable()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn scannable_with_embeddings(
|
||||||
|
inner: Box<dyn Scannable>,
|
||||||
|
table_definition: &TableDefinition,
|
||||||
|
registry: Option<&Arc<dyn EmbeddingRegistry>>,
|
||||||
|
) -> Result<Box<dyn Scannable>> {
|
||||||
|
if let Some(registry) = registry {
|
||||||
|
let mut embeddings = Vec::with_capacity(table_definition.column_definitions.len());
|
||||||
|
for cd in table_definition.column_definitions.iter() {
|
||||||
|
if let ColumnKind::Embedding(embedding_def) = &cd.kind {
|
||||||
|
match registry.get(&embedding_def.embedding_name) {
|
||||||
|
Some(func) => {
|
||||||
|
embeddings.push((embedding_def.clone(), func));
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
return Err(Error::EmbeddingFunctionNotFound {
|
||||||
|
name: embedding_def.embedding_name.clone(),
|
||||||
|
reason: format!(
|
||||||
|
"Table was defined with an embedding column `{}` but no embedding function was found with that name within the registry.",
|
||||||
|
embedding_def.embedding_name
|
||||||
|
),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !embeddings.is_empty() {
|
||||||
|
// Use the table's schema so embedding column types (including nested
|
||||||
|
// nullability) match what's stored, avoiding mismatches with the
|
||||||
|
// embedding function's declared dest_type.
|
||||||
|
return Ok(Box::new(WithEmbeddingsScannable::with_schema(
|
||||||
|
inner,
|
||||||
|
embeddings,
|
||||||
|
table_definition.schema.clone(),
|
||||||
|
)?));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(inner)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A wrapper that buffers the first RecordBatch from a Scannable so we can
|
||||||
|
/// inspect it (e.g. to estimate data size) without losing it.
|
||||||
|
pub(crate) struct PeekedScannable {
|
||||||
|
inner: Box<dyn Scannable>,
|
||||||
|
peeked: Option<RecordBatch>,
|
||||||
|
/// The first item from the stream, if it was an error. Stored so we can
|
||||||
|
/// re-emit it from `scan_as_stream` instead of silently dropping it.
|
||||||
|
first_error: Option<crate::Error>,
|
||||||
|
stream: Option<SendableRecordBatchStream>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PeekedScannable {
|
||||||
|
pub fn new(inner: Box<dyn Scannable>) -> Self {
|
||||||
|
Self {
|
||||||
|
inner,
|
||||||
|
peeked: None,
|
||||||
|
first_error: None,
|
||||||
|
stream: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Reads and buffers the first batch from the inner scannable.
|
||||||
|
/// Returns a clone of it. Subsequent calls return the same batch.
|
||||||
|
///
|
||||||
|
/// Returns `None` if the stream is empty or the first item is an error.
|
||||||
|
/// Errors are preserved and re-emitted by `scan_as_stream`.
|
||||||
|
pub async fn peek(&mut self) -> Option<RecordBatch> {
|
||||||
|
if self.peeked.is_some() {
|
||||||
|
return self.peeked.clone();
|
||||||
|
}
|
||||||
|
// Already peeked and got an error or empty stream.
|
||||||
|
if self.stream.is_some() || self.first_error.is_some() {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
let mut stream = self.inner.scan_as_stream();
|
||||||
|
match stream.next().await {
|
||||||
|
Some(Ok(batch)) => {
|
||||||
|
self.peeked = Some(batch.clone());
|
||||||
|
self.stream = Some(stream);
|
||||||
|
Some(batch)
|
||||||
|
}
|
||||||
|
Some(Err(e)) => {
|
||||||
|
self.first_error = Some(e);
|
||||||
|
self.stream = Some(stream);
|
||||||
|
None
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
self.stream = Some(stream);
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Scannable for PeekedScannable {
|
||||||
|
fn schema(&self) -> SchemaRef {
|
||||||
|
self.inner.schema()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn num_rows(&self) -> Option<usize> {
|
||||||
|
self.inner.num_rows()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rescannable(&self) -> bool {
|
||||||
|
self.inner.rescannable()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn scan_as_stream(&mut self) -> SendableRecordBatchStream {
|
||||||
|
let schema = self.inner.schema();
|
||||||
|
|
||||||
|
// If peek() hit an error, prepend it so downstream sees the error.
|
||||||
|
let error_item = self.first_error.take().map(Err);
|
||||||
|
|
||||||
|
match (self.peeked.take(), self.stream.take()) {
|
||||||
|
(Some(batch), Some(rest)) => {
|
||||||
|
let prepend = futures::stream::once(std::future::ready(Ok(batch)));
|
||||||
|
Box::pin(SimpleRecordBatchStream {
|
||||||
|
schema,
|
||||||
|
stream: prepend.chain(rest),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
(Some(batch), None) => Box::pin(SimpleRecordBatchStream {
|
||||||
|
schema,
|
||||||
|
stream: futures::stream::once(std::future::ready(Ok(batch))),
|
||||||
|
}),
|
||||||
|
(None, Some(rest)) => {
|
||||||
|
if let Some(err) = error_item {
|
||||||
|
let stream = futures::stream::once(std::future::ready(err));
|
||||||
|
Box::pin(SimpleRecordBatchStream { schema, stream })
|
||||||
|
} else {
|
||||||
|
rest
|
||||||
|
}
|
||||||
|
}
|
||||||
|
(None, None) => {
|
||||||
|
// peek() was never called — just delegate
|
||||||
|
self.inner.scan_as_stream()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Compute the number of write partitions based on data size estimates.
|
||||||
|
///
|
||||||
|
/// `sample_bytes` and `sample_rows` come from a representative batch and are
|
||||||
|
/// used to estimate per-row size. `total_rows_hint` is the total row count
|
||||||
|
/// when known; otherwise `sample_rows` row count is used as a lower bound
|
||||||
|
/// estimate.
|
||||||
|
///
|
||||||
|
/// Targets roughly 1 million rows or 2 GB per partition, capped at
|
||||||
|
/// `max_partitions` (typically the number of available CPU cores).
|
||||||
|
pub(crate) fn estimate_write_partitions(
|
||||||
|
sample_bytes: usize,
|
||||||
|
sample_rows: usize,
|
||||||
|
total_rows_hint: Option<usize>,
|
||||||
|
max_partitions: usize,
|
||||||
|
) -> usize {
|
||||||
|
if sample_rows == 0 {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
let bytes_per_row = sample_bytes / sample_rows;
|
||||||
|
let total_rows = total_rows_hint.unwrap_or(sample_rows);
|
||||||
|
let total_bytes = total_rows * bytes_per_row;
|
||||||
|
let by_rows = total_rows.div_ceil(1_000_000);
|
||||||
|
let by_bytes = total_bytes.div_ceil(2 * 1024 * 1024 * 1024);
|
||||||
|
by_rows.max(by_bytes).max(1).min(max_partitions)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use arrow_array::record_batch;
|
||||||
|
use futures::TryStreamExt;
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_record_batch_rescannable() {
|
||||||
|
let mut batch = record_batch!(("id", Int64, [0, 1, 2])).unwrap();
|
||||||
|
|
||||||
|
let stream1 = batch.scan_as_stream();
|
||||||
|
let batches1: Vec<RecordBatch> = stream1.try_collect().await.unwrap();
|
||||||
|
assert_eq!(batches1.len(), 1);
|
||||||
|
assert_eq!(batches1[0], batch);
|
||||||
|
|
||||||
|
assert!(batch.rescannable());
|
||||||
|
let stream2 = batch.scan_as_stream();
|
||||||
|
let batches2: Vec<RecordBatch> = stream2.try_collect().await.unwrap();
|
||||||
|
assert_eq!(batches2.len(), 1);
|
||||||
|
assert_eq!(batches2[0], batch);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_vec_batch_rescannable() {
|
||||||
|
let mut batches = vec![
|
||||||
|
record_batch!(("id", Int64, [0, 1])).unwrap(),
|
||||||
|
record_batch!(("id", Int64, [2, 3, 4])).unwrap(),
|
||||||
|
];
|
||||||
|
|
||||||
|
let stream1 = batches.scan_as_stream();
|
||||||
|
let result1: Vec<RecordBatch> = stream1.try_collect().await.unwrap();
|
||||||
|
assert_eq!(result1.len(), 2);
|
||||||
|
assert_eq!(result1[0], batches[0]);
|
||||||
|
assert_eq!(result1[1], batches[1]);
|
||||||
|
|
||||||
|
assert!(batches.rescannable());
|
||||||
|
let stream2 = batches.scan_as_stream();
|
||||||
|
let result2: Vec<RecordBatch> = stream2.try_collect().await.unwrap();
|
||||||
|
assert_eq!(result2.len(), 2);
|
||||||
|
assert_eq!(result2[0], batches[0]);
|
||||||
|
assert_eq!(result2[1], batches[1]);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_vec_batch_empty_errors() {
|
||||||
|
let mut empty: Vec<RecordBatch> = vec![];
|
||||||
|
let mut stream = empty.scan_as_stream();
|
||||||
|
let result = stream.next().await;
|
||||||
|
assert!(result.is_some());
|
||||||
|
assert!(result.unwrap().is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_reader_not_rescannable() {
|
||||||
|
let batch = record_batch!(("id", Int64, [0, 1, 2])).unwrap();
|
||||||
|
let schema = batch.schema();
|
||||||
|
let mut reader: Box<dyn arrow_array::RecordBatchReader + Send> = Box::new(
|
||||||
|
RecordBatchIterator::new(vec![Ok(batch.clone())], schema.clone()),
|
||||||
|
);
|
||||||
|
|
||||||
|
let stream1 = reader.scan_as_stream();
|
||||||
|
let result1: Vec<RecordBatch> = stream1.try_collect().await.unwrap();
|
||||||
|
assert_eq!(result1.len(), 1);
|
||||||
|
assert_eq!(result1[0], batch);
|
||||||
|
|
||||||
|
assert!(!reader.rescannable());
|
||||||
|
// Second call returns a stream whose first item is an error
|
||||||
|
let mut stream2 = reader.scan_as_stream();
|
||||||
|
let result2 = stream2.next().await;
|
||||||
|
assert!(result2.is_some());
|
||||||
|
assert!(result2.unwrap().is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_stream_not_rescannable() {
|
||||||
|
let batch = record_batch!(("id", Int64, [0, 1, 2])).unwrap();
|
||||||
|
let schema = batch.schema();
|
||||||
|
let inner_stream = futures::stream::iter(vec![Ok(batch.clone())]);
|
||||||
|
let mut stream: SendableRecordBatchStream = Box::pin(SimpleRecordBatchStream {
|
||||||
|
schema: schema.clone(),
|
||||||
|
stream: inner_stream,
|
||||||
|
});
|
||||||
|
|
||||||
|
let stream1 = stream.scan_as_stream();
|
||||||
|
let result1: Vec<RecordBatch> = stream1.try_collect().await.unwrap();
|
||||||
|
assert_eq!(result1.len(), 1);
|
||||||
|
assert_eq!(result1[0], batch);
|
||||||
|
|
||||||
|
assert!(!stream.rescannable());
|
||||||
|
// Second call returns a stream whose first item is an error
|
||||||
|
let mut stream2 = stream.scan_as_stream();
|
||||||
|
let result2 = stream2.next().await;
|
||||||
|
assert!(result2.is_some());
|
||||||
|
assert!(result2.unwrap().is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
mod peeked_scannable_tests {
|
||||||
|
use crate::test_utils::TestCustomError;
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_peek_returns_first_batch() {
|
||||||
|
let batch = record_batch!(("id", Int64, [1, 2, 3])).unwrap();
|
||||||
|
let mut peeked = PeekedScannable::new(Box::new(batch.clone()));
|
||||||
|
|
||||||
|
let first = peeked.peek().await.unwrap();
|
||||||
|
assert_eq!(first, batch);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_peek_is_idempotent() {
|
||||||
|
let batch = record_batch!(("id", Int64, [1, 2, 3])).unwrap();
|
||||||
|
let mut peeked = PeekedScannable::new(Box::new(batch.clone()));
|
||||||
|
|
||||||
|
let first = peeked.peek().await.unwrap();
|
||||||
|
let second = peeked.peek().await.unwrap();
|
||||||
|
assert_eq!(first, second);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_scan_after_peek_returns_all_data() {
|
||||||
|
let batches = vec![
|
||||||
|
record_batch!(("id", Int64, [1, 2])).unwrap(),
|
||||||
|
record_batch!(("id", Int64, [3, 4, 5])).unwrap(),
|
||||||
|
];
|
||||||
|
let mut peeked = PeekedScannable::new(Box::new(batches.clone()));
|
||||||
|
|
||||||
|
let first = peeked.peek().await.unwrap();
|
||||||
|
assert_eq!(first, batches[0]);
|
||||||
|
|
||||||
|
let result: Vec<RecordBatch> = peeked.scan_as_stream().try_collect().await.unwrap();
|
||||||
|
assert_eq!(result.len(), 2);
|
||||||
|
assert_eq!(result[0], batches[0]);
|
||||||
|
assert_eq!(result[1], batches[1]);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_scan_without_peek_passes_through() {
|
||||||
|
let batch = record_batch!(("id", Int64, [1, 2, 3])).unwrap();
|
||||||
|
let mut peeked = PeekedScannable::new(Box::new(batch.clone()));
|
||||||
|
|
||||||
|
let result: Vec<RecordBatch> = peeked.scan_as_stream().try_collect().await.unwrap();
|
||||||
|
assert_eq!(result.len(), 1);
|
||||||
|
assert_eq!(result[0], batch);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_delegates_num_rows() {
|
||||||
|
let batches = vec![
|
||||||
|
record_batch!(("id", Int64, [1, 2])).unwrap(),
|
||||||
|
record_batch!(("id", Int64, [3])).unwrap(),
|
||||||
|
];
|
||||||
|
let peeked = PeekedScannable::new(Box::new(batches));
|
||||||
|
assert_eq!(peeked.num_rows(), Some(3));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_non_rescannable_stream_data_preserved() {
|
||||||
|
let batches = vec![
|
||||||
|
record_batch!(("id", Int64, [1, 2])).unwrap(),
|
||||||
|
record_batch!(("id", Int64, [3])).unwrap(),
|
||||||
|
];
|
||||||
|
let schema = batches[0].schema();
|
||||||
|
let inner = futures::stream::iter(batches.clone().into_iter().map(Ok));
|
||||||
|
let stream: SendableRecordBatchStream = Box::pin(SimpleRecordBatchStream {
|
||||||
|
schema,
|
||||||
|
stream: inner,
|
||||||
|
});
|
||||||
|
|
||||||
|
let mut peeked = PeekedScannable::new(Box::new(stream));
|
||||||
|
assert!(!peeked.rescannable());
|
||||||
|
assert_eq!(peeked.num_rows(), None);
|
||||||
|
|
||||||
|
let first = peeked.peek().await.unwrap();
|
||||||
|
assert_eq!(first, batches[0]);
|
||||||
|
|
||||||
|
// All data is still available via scan_as_stream
|
||||||
|
let result: Vec<RecordBatch> = peeked.scan_as_stream().try_collect().await.unwrap();
|
||||||
|
assert_eq!(result.len(), 2);
|
||||||
|
assert_eq!(result[0], batches[0]);
|
||||||
|
assert_eq!(result[1], batches[1]);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_error_in_first_batch_propagates() {
|
||||||
|
let schema = Arc::new(arrow_schema::Schema::new(vec![arrow_schema::Field::new(
|
||||||
|
"id",
|
||||||
|
arrow_schema::DataType::Int64,
|
||||||
|
false,
|
||||||
|
)]));
|
||||||
|
let inner = futures::stream::iter(vec![Err(Error::External {
|
||||||
|
source: Box::new(TestCustomError),
|
||||||
|
})]);
|
||||||
|
let stream: SendableRecordBatchStream = Box::pin(SimpleRecordBatchStream {
|
||||||
|
schema,
|
||||||
|
stream: inner,
|
||||||
|
});
|
||||||
|
|
||||||
|
let mut peeked = PeekedScannable::new(Box::new(stream));
|
||||||
|
|
||||||
|
// peek returns None for errors
|
||||||
|
assert!(peeked.peek().await.is_none());
|
||||||
|
|
||||||
|
// But the error should come through when scanning
|
||||||
|
let mut stream = peeked.scan_as_stream();
|
||||||
|
let first = stream.next().await.unwrap();
|
||||||
|
assert!(first.is_err());
|
||||||
|
let err = first.unwrap_err();
|
||||||
|
assert!(
|
||||||
|
matches!(&err, Error::External { source } if source.downcast_ref::<TestCustomError>().is_some()),
|
||||||
|
"Expected TestCustomError to be preserved, got: {err}"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_error_in_later_batch_propagates() {
|
||||||
|
let good_batch = record_batch!(("id", Int64, [1, 2])).unwrap();
|
||||||
|
let schema = good_batch.schema();
|
||||||
|
let inner = futures::stream::iter(vec![
|
||||||
|
Ok(good_batch.clone()),
|
||||||
|
Err(Error::External {
|
||||||
|
source: Box::new(TestCustomError),
|
||||||
|
}),
|
||||||
|
]);
|
||||||
|
let stream: SendableRecordBatchStream = Box::pin(SimpleRecordBatchStream {
|
||||||
|
schema,
|
||||||
|
stream: inner,
|
||||||
|
});
|
||||||
|
|
||||||
|
let mut peeked = PeekedScannable::new(Box::new(stream));
|
||||||
|
|
||||||
|
// peek succeeds with the first batch
|
||||||
|
let first = peeked.peek().await.unwrap();
|
||||||
|
assert_eq!(first, good_batch);
|
||||||
|
|
||||||
|
// scan_as_stream should yield the first batch, then the error
|
||||||
|
let mut stream = peeked.scan_as_stream();
|
||||||
|
let batch1 = stream.next().await.unwrap().unwrap();
|
||||||
|
assert_eq!(batch1, good_batch);
|
||||||
|
|
||||||
|
let batch2 = stream.next().await.unwrap();
|
||||||
|
assert!(batch2.is_err());
|
||||||
|
let err = batch2.unwrap_err();
|
||||||
|
assert!(
|
||||||
|
matches!(&err, Error::External { source } if source.downcast_ref::<TestCustomError>().is_some()),
|
||||||
|
"Expected TestCustomError to be preserved, got: {err}"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_empty_stream_returns_none() {
|
||||||
|
let schema = Arc::new(arrow_schema::Schema::new(vec![arrow_schema::Field::new(
|
||||||
|
"id",
|
||||||
|
arrow_schema::DataType::Int64,
|
||||||
|
false,
|
||||||
|
)]));
|
||||||
|
let inner = futures::stream::empty();
|
||||||
|
let stream: SendableRecordBatchStream = Box::pin(SimpleRecordBatchStream {
|
||||||
|
schema,
|
||||||
|
stream: inner,
|
||||||
|
});
|
||||||
|
|
||||||
|
let mut peeked = PeekedScannable::new(Box::new(stream));
|
||||||
|
assert!(peeked.peek().await.is_none());
|
||||||
|
|
||||||
|
// Scanning an empty (post-peek) stream should yield nothing
|
||||||
|
let result: Vec<RecordBatch> = peeked.scan_as_stream().try_collect().await.unwrap();
|
||||||
|
assert!(result.is_empty());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
mod estimate_write_partitions_tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_small_data_single_partition() {
|
||||||
|
// 100 rows * 24 bytes/row = 2400 bytes — well under both thresholds
|
||||||
|
assert_eq!(estimate_write_partitions(2400, 100, Some(100), 8), 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_scales_by_row_count() {
|
||||||
|
// 2.5M rows at 24 bytes/row — row threshold dominates
|
||||||
|
// ceil(2_500_000 / 1_000_000) = 3
|
||||||
|
assert_eq!(estimate_write_partitions(72, 3, Some(2_500_000), 8), 3);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_scales_by_byte_size() {
|
||||||
|
// 100k rows at 40KB/row = ~4GB total → ceil(4GB / 2GB) = 2
|
||||||
|
let sample_bytes = 40_000 * 10;
|
||||||
|
assert_eq!(
|
||||||
|
estimate_write_partitions(sample_bytes, 10, Some(100_000), 8),
|
||||||
|
2
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_capped_at_max_partitions() {
|
||||||
|
// 10M rows would want 10 partitions, but capped at 4
|
||||||
|
assert_eq!(estimate_write_partitions(72, 3, Some(10_000_000), 4), 4);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_zero_sample_rows_returns_one() {
|
||||||
|
assert_eq!(estimate_write_partitions(0, 0, Some(1_000_000), 8), 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_no_row_hint_uses_sample_size() {
|
||||||
|
// Without a hint, uses sample_rows (3), which is small
|
||||||
|
assert_eq!(estimate_write_partitions(72, 3, None, 8), 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_always_at_least_one() {
|
||||||
|
assert_eq!(estimate_write_partitions(24, 1, Some(1), 8), 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
mod embedding_tests {
|
||||||
|
use super::*;
|
||||||
|
use crate::embeddings::MemoryRegistry;
|
||||||
|
use crate::table::{ColumnDefinition, ColumnKind};
|
||||||
|
use crate::test_utils::embeddings::MockEmbed;
|
||||||
|
use arrow_array::Array as _;
|
||||||
|
use arrow_array::{ArrayRef, StringArray};
|
||||||
|
use arrow_schema::{DataType, Field, Schema};
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_with_embeddings_scannable() {
|
||||||
|
let schema = Arc::new(Schema::new(vec![Field::new("text", DataType::Utf8, false)]));
|
||||||
|
let text_array = StringArray::from(vec!["hello", "world", "test"]);
|
||||||
|
let batch =
|
||||||
|
RecordBatch::try_new(schema.clone(), vec![Arc::new(text_array) as ArrayRef])
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let mock_embedding: Arc<dyn EmbeddingFunction> = Arc::new(MockEmbed::new("mock", 4));
|
||||||
|
let embedding_def = EmbeddingDefinition::new("text", "mock", Some("text_embedding"));
|
||||||
|
|
||||||
|
let mut scannable = WithEmbeddingsScannable::try_new(
|
||||||
|
Box::new(batch.clone()),
|
||||||
|
vec![(embedding_def, mock_embedding)],
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// Check that schema has the embedding column
|
||||||
|
let output_schema = scannable.schema();
|
||||||
|
assert_eq!(output_schema.fields().len(), 2);
|
||||||
|
assert_eq!(output_schema.field(0).name(), "text");
|
||||||
|
assert_eq!(output_schema.field(1).name(), "text_embedding");
|
||||||
|
|
||||||
|
// Check num_rows and rescannable are preserved
|
||||||
|
assert_eq!(scannable.num_rows(), Some(3));
|
||||||
|
assert!(scannable.rescannable());
|
||||||
|
|
||||||
|
// Read the data
|
||||||
|
let stream = scannable.scan_as_stream();
|
||||||
|
let results: Vec<RecordBatch> = stream.try_collect().await.unwrap();
|
||||||
|
assert_eq!(results.len(), 1);
|
||||||
|
|
||||||
|
let result_batch = &results[0];
|
||||||
|
assert_eq!(result_batch.num_rows(), 3);
|
||||||
|
assert_eq!(result_batch.num_columns(), 2);
|
||||||
|
|
||||||
|
// Verify the embedding column is present and has the right shape
|
||||||
|
let embedding_col = result_batch.column(1);
|
||||||
|
assert_eq!(embedding_col.len(), 3);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_maybe_embedded_scannable_no_embeddings() {
|
||||||
|
let batch = record_batch!(("id", Int64, [1, 2, 3])).unwrap();
|
||||||
|
|
||||||
|
// Create a table definition with no embedding columns
|
||||||
|
let table_def = TableDefinition::new_from_schema(batch.schema());
|
||||||
|
|
||||||
|
// Even with a registry, if there are no embedding columns, it's a passthrough
|
||||||
|
let registry: Arc<dyn EmbeddingRegistry> = Arc::new(MemoryRegistry::new());
|
||||||
|
let mut scannable =
|
||||||
|
scannable_with_embeddings(Box::new(batch.clone()), &table_def, Some(®istry))
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// Check that data passes through unchanged
|
||||||
|
let stream = scannable.scan_as_stream();
|
||||||
|
let results: Vec<RecordBatch> = stream.try_collect().await.unwrap();
|
||||||
|
assert_eq!(results.len(), 1);
|
||||||
|
assert_eq!(results[0], batch);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_maybe_embedded_scannable_with_embeddings() {
|
||||||
|
let schema = Arc::new(Schema::new(vec![Field::new("text", DataType::Utf8, false)]));
|
||||||
|
let text_array = StringArray::from(vec!["hello", "world"]);
|
||||||
|
let batch =
|
||||||
|
RecordBatch::try_new(schema.clone(), vec![Arc::new(text_array) as ArrayRef])
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// Create a table definition with an embedding column
|
||||||
|
let embedding_def = EmbeddingDefinition::new("text", "mock", Some("text_embedding"));
|
||||||
|
let embedding_schema = Arc::new(Schema::new(vec![
|
||||||
|
Field::new("text", DataType::Utf8, false),
|
||||||
|
Field::new(
|
||||||
|
"text_embedding",
|
||||||
|
DataType::FixedSizeList(
|
||||||
|
Arc::new(Field::new("item", DataType::Float32, true)),
|
||||||
|
4,
|
||||||
|
),
|
||||||
|
false,
|
||||||
|
),
|
||||||
|
]));
|
||||||
|
let table_def = TableDefinition::new(
|
||||||
|
embedding_schema,
|
||||||
|
vec![
|
||||||
|
ColumnDefinition {
|
||||||
|
kind: ColumnKind::Physical,
|
||||||
|
},
|
||||||
|
ColumnDefinition {
|
||||||
|
kind: ColumnKind::Embedding(embedding_def.clone()),
|
||||||
|
},
|
||||||
|
],
|
||||||
|
);
|
||||||
|
|
||||||
|
// Register the mock embedding function
|
||||||
|
let registry: Arc<dyn EmbeddingRegistry> = Arc::new(MemoryRegistry::new());
|
||||||
|
let mock_embedding: Arc<dyn EmbeddingFunction> = Arc::new(MockEmbed::new("mock", 4));
|
||||||
|
registry.register("mock", mock_embedding).unwrap();
|
||||||
|
|
||||||
|
let mut scannable =
|
||||||
|
scannable_with_embeddings(Box::new(batch), &table_def, Some(®istry)).unwrap();
|
||||||
|
|
||||||
|
// Read and verify the data has embeddings
|
||||||
|
let stream = scannable.scan_as_stream();
|
||||||
|
let results: Vec<RecordBatch> = stream.try_collect().await.unwrap();
|
||||||
|
assert_eq!(results.len(), 1);
|
||||||
|
|
||||||
|
let result_batch = &results[0];
|
||||||
|
assert_eq!(result_batch.num_columns(), 2);
|
||||||
|
assert_eq!(result_batch.schema().field(1).name(), "text_embedding");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_maybe_embedded_scannable_missing_function() {
|
||||||
|
let schema = Arc::new(Schema::new(vec![Field::new("text", DataType::Utf8, false)]));
|
||||||
|
let text_array = StringArray::from(vec!["hello"]);
|
||||||
|
let batch =
|
||||||
|
RecordBatch::try_new(schema.clone(), vec![Arc::new(text_array) as ArrayRef])
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// Create a table definition with an embedding column
|
||||||
|
let embedding_def =
|
||||||
|
EmbeddingDefinition::new("text", "nonexistent", Some("text_embedding"));
|
||||||
|
let embedding_schema = Arc::new(Schema::new(vec![
|
||||||
|
Field::new("text", DataType::Utf8, false),
|
||||||
|
Field::new(
|
||||||
|
"text_embedding",
|
||||||
|
DataType::FixedSizeList(
|
||||||
|
Arc::new(Field::new("item", DataType::Float32, true)),
|
||||||
|
4,
|
||||||
|
),
|
||||||
|
false,
|
||||||
|
),
|
||||||
|
]));
|
||||||
|
let table_def = TableDefinition::new(
|
||||||
|
embedding_schema,
|
||||||
|
vec![
|
||||||
|
ColumnDefinition {
|
||||||
|
kind: ColumnKind::Physical,
|
||||||
|
},
|
||||||
|
ColumnDefinition {
|
||||||
|
kind: ColumnKind::Embedding(embedding_def),
|
||||||
|
},
|
||||||
|
],
|
||||||
|
);
|
||||||
|
|
||||||
|
// Registry has no embedding functions registered
|
||||||
|
let registry: Arc<dyn EmbeddingRegistry> = Arc::new(MemoryRegistry::new());
|
||||||
|
|
||||||
|
let result = scannable_with_embeddings(Box::new(batch), &table_def, Some(®istry));
|
||||||
|
|
||||||
|
// Should fail because the embedding function is not found
|
||||||
|
assert!(result.is_err());
|
||||||
|
let err = result.err().unwrap();
|
||||||
|
assert!(
|
||||||
|
matches!(err, Error::EmbeddingFunctionNotFound { .. }),
|
||||||
|
"Expected EmbeddingFunctionNotFound"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -18,12 +18,7 @@ use std::collections::HashMap;
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
use arrow_array::RecordBatchReader;
|
|
||||||
use async_trait::async_trait;
|
|
||||||
use datafusion_physical_plan::stream::RecordBatchStreamAdapter;
|
|
||||||
use futures::stream;
|
|
||||||
use lance::dataset::ReadParams;
|
use lance::dataset::ReadParams;
|
||||||
use lance_datafusion::utils::StreamingWriteSource;
|
|
||||||
use lance_namespace::models::{
|
use lance_namespace::models::{
|
||||||
CreateNamespaceRequest, CreateNamespaceResponse, DescribeNamespaceRequest,
|
CreateNamespaceRequest, CreateNamespaceResponse, DescribeNamespaceRequest,
|
||||||
DescribeNamespaceResponse, DropNamespaceRequest, DropNamespaceResponse, ListNamespacesRequest,
|
DescribeNamespaceResponse, DropNamespaceRequest, DropNamespaceResponse, ListNamespacesRequest,
|
||||||
@@ -31,9 +26,9 @@ use lance_namespace::models::{
|
|||||||
};
|
};
|
||||||
use lance_namespace::LanceNamespace;
|
use lance_namespace::LanceNamespace;
|
||||||
|
|
||||||
use crate::arrow::{SendableRecordBatchStream, SendableRecordBatchStreamExt};
|
use crate::data::scannable::Scannable;
|
||||||
use crate::error::Result;
|
use crate::error::Result;
|
||||||
use crate::table::{BaseTable, TableDefinition, WriteOptions};
|
use crate::table::{BaseTable, WriteOptions};
|
||||||
|
|
||||||
pub mod listing;
|
pub mod listing;
|
||||||
pub mod namespace;
|
pub mod namespace;
|
||||||
@@ -90,8 +85,10 @@ pub type TableBuilderCallback = Box<dyn FnOnce(OpenTableRequest) -> OpenTableReq
|
|||||||
|
|
||||||
/// Describes what happens when creating a table and a table with
|
/// Describes what happens when creating a table and a table with
|
||||||
/// the same name already exists
|
/// the same name already exists
|
||||||
|
#[derive(Default)]
|
||||||
pub enum CreateTableMode {
|
pub enum CreateTableMode {
|
||||||
/// If the table already exists, an error is returned
|
/// If the table already exists, an error is returned
|
||||||
|
#[default]
|
||||||
Create,
|
Create,
|
||||||
/// If the table already exists, it is opened. Any provided data is
|
/// If the table already exists, it is opened. Any provided data is
|
||||||
/// ignored. The function will be passed an OpenTableBuilder to customize
|
/// ignored. The function will be passed an OpenTableBuilder to customize
|
||||||
@@ -109,57 +106,14 @@ impl CreateTableMode {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for CreateTableMode {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self::Create
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The data to start a table or a schema to create an empty table
|
|
||||||
pub enum CreateTableData {
|
|
||||||
/// Creates a table using an iterator of data, the schema will be obtained from the data
|
|
||||||
Data(Box<dyn RecordBatchReader + Send>),
|
|
||||||
/// Creates a table using a stream of data, the schema will be obtained from the data
|
|
||||||
StreamingData(SendableRecordBatchStream),
|
|
||||||
/// Creates an empty table, the definition / schema must be provided separately
|
|
||||||
Empty(TableDefinition),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl CreateTableData {
|
|
||||||
pub fn schema(&self) -> Arc<arrow_schema::Schema> {
|
|
||||||
match self {
|
|
||||||
Self::Data(reader) => reader.schema(),
|
|
||||||
Self::StreamingData(stream) => stream.schema(),
|
|
||||||
Self::Empty(definition) => definition.schema.clone(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl StreamingWriteSource for CreateTableData {
|
|
||||||
fn arrow_schema(&self) -> Arc<arrow_schema::Schema> {
|
|
||||||
self.schema()
|
|
||||||
}
|
|
||||||
fn into_stream(self) -> datafusion_physical_plan::SendableRecordBatchStream {
|
|
||||||
match self {
|
|
||||||
Self::Data(reader) => reader.into_stream(),
|
|
||||||
Self::StreamingData(stream) => stream.into_df_stream(),
|
|
||||||
Self::Empty(table_definition) => {
|
|
||||||
let schema = table_definition.schema.clone();
|
|
||||||
Box::pin(RecordBatchStreamAdapter::new(schema, stream::empty()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A request to create a table
|
/// A request to create a table
|
||||||
pub struct CreateTableRequest {
|
pub struct CreateTableRequest {
|
||||||
/// The name of the new table
|
/// The name of the new table
|
||||||
pub name: String,
|
pub name: String,
|
||||||
/// The namespace to create the table in. Empty list represents root namespace.
|
/// The namespace to create the table in. Empty list represents root namespace.
|
||||||
pub namespace: Vec<String>,
|
pub namespace: Vec<String>,
|
||||||
/// Initial data to write to the table, can be None to create an empty table
|
/// Initial data to write to the table, can be empty.
|
||||||
pub data: CreateTableData,
|
pub data: Box<dyn Scannable>,
|
||||||
/// The mode to use when creating the table
|
/// The mode to use when creating the table
|
||||||
pub mode: CreateTableMode,
|
pub mode: CreateTableMode,
|
||||||
/// Options to use when writing data (only used if `data` is not None)
|
/// Options to use when writing data (only used if `data` is not None)
|
||||||
@@ -173,7 +127,7 @@ pub struct CreateTableRequest {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl CreateTableRequest {
|
impl CreateTableRequest {
|
||||||
pub fn new(name: String, data: CreateTableData) -> Self {
|
pub fn new(name: String, data: Box<dyn Scannable>) -> Self {
|
||||||
Self {
|
Self {
|
||||||
name,
|
name,
|
||||||
namespace: vec![],
|
namespace: vec![],
|
||||||
|
|||||||
@@ -922,7 +922,7 @@ impl Database for ListingDatabase {
|
|||||||
.with_read_params(read_params.clone())
|
.with_read_params(read_params.clone())
|
||||||
.load()
|
.load()
|
||||||
.await
|
.await
|
||||||
.map_err(|e| Error::Lance { source: e })?;
|
.map_err(|e| -> Error { e.into() })?;
|
||||||
|
|
||||||
let version_ref = match (request.source_version, request.source_tag) {
|
let version_ref = match (request.source_version, request.source_tag) {
|
||||||
(Some(v), None) => Ok(Ref::Version(None, Some(v))),
|
(Some(v), None) => Ok(Ref::Version(None, Some(v))),
|
||||||
@@ -937,7 +937,7 @@ impl Database for ListingDatabase {
|
|||||||
source_dataset
|
source_dataset
|
||||||
.shallow_clone(&target_uri, version_ref, Some(storage_params))
|
.shallow_clone(&target_uri, version_ref, Some(storage_params))
|
||||||
.await
|
.await
|
||||||
.map_err(|e| Error::Lance { source: e })?;
|
.map_err(|e| -> Error { e.into() })?;
|
||||||
|
|
||||||
let cloned_table = NativeTable::open_with_params(
|
let cloned_table = NativeTable::open_with_params(
|
||||||
&target_uri,
|
&target_uri,
|
||||||
@@ -1098,8 +1098,10 @@ impl Database for ListingDatabase {
|
|||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::connection::ConnectRequest;
|
use crate::connection::ConnectRequest;
|
||||||
use crate::database::{CreateTableData, CreateTableMode, CreateTableRequest, WriteOptions};
|
use crate::data::scannable::Scannable;
|
||||||
use crate::table::{Table, TableDefinition};
|
use crate::database::{CreateTableMode, CreateTableRequest};
|
||||||
|
use crate::table::WriteOptions;
|
||||||
|
use crate::Table;
|
||||||
use arrow_array::{Int32Array, RecordBatch, StringArray};
|
use arrow_array::{Int32Array, RecordBatch, StringArray};
|
||||||
use arrow_schema::{DataType, Field, Schema};
|
use arrow_schema::{DataType, Field, Schema};
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
@@ -1139,7 +1141,7 @@ mod tests {
|
|||||||
.create_table(CreateTableRequest {
|
.create_table(CreateTableRequest {
|
||||||
name: "source_table".to_string(),
|
name: "source_table".to_string(),
|
||||||
namespace: vec![],
|
namespace: vec![],
|
||||||
data: CreateTableData::Empty(TableDefinition::new_from_schema(schema.clone())),
|
data: Box::new(RecordBatch::new_empty(schema.clone())) as Box<dyn Scannable>,
|
||||||
mode: CreateTableMode::Create,
|
mode: CreateTableMode::Create,
|
||||||
write_options: Default::default(),
|
write_options: Default::default(),
|
||||||
location: None,
|
location: None,
|
||||||
@@ -1196,16 +1198,11 @@ mod tests {
|
|||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
let reader = Box::new(arrow_array::RecordBatchIterator::new(
|
|
||||||
vec![Ok(batch)],
|
|
||||||
schema.clone(),
|
|
||||||
));
|
|
||||||
|
|
||||||
let source_table = db
|
let source_table = db
|
||||||
.create_table(CreateTableRequest {
|
.create_table(CreateTableRequest {
|
||||||
name: "source_with_data".to_string(),
|
name: "source_with_data".to_string(),
|
||||||
namespace: vec![],
|
namespace: vec![],
|
||||||
data: CreateTableData::Data(reader),
|
data: Box::new(batch) as Box<dyn Scannable>,
|
||||||
mode: CreateTableMode::Create,
|
mode: CreateTableMode::Create,
|
||||||
write_options: Default::default(),
|
write_options: Default::default(),
|
||||||
location: None,
|
location: None,
|
||||||
@@ -1264,7 +1261,7 @@ mod tests {
|
|||||||
db.create_table(CreateTableRequest {
|
db.create_table(CreateTableRequest {
|
||||||
name: "source".to_string(),
|
name: "source".to_string(),
|
||||||
namespace: vec![],
|
namespace: vec![],
|
||||||
data: CreateTableData::Empty(TableDefinition::new_from_schema(schema)),
|
data: Box::new(RecordBatch::new_empty(schema)) as Box<dyn Scannable>,
|
||||||
mode: CreateTableMode::Create,
|
mode: CreateTableMode::Create,
|
||||||
write_options: Default::default(),
|
write_options: Default::default(),
|
||||||
location: None,
|
location: None,
|
||||||
@@ -1300,7 +1297,7 @@ mod tests {
|
|||||||
db.create_table(CreateTableRequest {
|
db.create_table(CreateTableRequest {
|
||||||
name: "source".to_string(),
|
name: "source".to_string(),
|
||||||
namespace: vec![],
|
namespace: vec![],
|
||||||
data: CreateTableData::Empty(TableDefinition::new_from_schema(schema)),
|
data: Box::new(RecordBatch::new_empty(schema)) as Box<dyn Scannable>,
|
||||||
mode: CreateTableMode::Create,
|
mode: CreateTableMode::Create,
|
||||||
write_options: Default::default(),
|
write_options: Default::default(),
|
||||||
location: None,
|
location: None,
|
||||||
@@ -1340,7 +1337,7 @@ mod tests {
|
|||||||
db.create_table(CreateTableRequest {
|
db.create_table(CreateTableRequest {
|
||||||
name: "source".to_string(),
|
name: "source".to_string(),
|
||||||
namespace: vec![],
|
namespace: vec![],
|
||||||
data: CreateTableData::Empty(TableDefinition::new_from_schema(schema)),
|
data: Box::new(RecordBatch::new_empty(schema)) as Box<dyn Scannable>,
|
||||||
mode: CreateTableMode::Create,
|
mode: CreateTableMode::Create,
|
||||||
write_options: Default::default(),
|
write_options: Default::default(),
|
||||||
location: None,
|
location: None,
|
||||||
@@ -1380,7 +1377,7 @@ mod tests {
|
|||||||
db.create_table(CreateTableRequest {
|
db.create_table(CreateTableRequest {
|
||||||
name: "source".to_string(),
|
name: "source".to_string(),
|
||||||
namespace: vec![],
|
namespace: vec![],
|
||||||
data: CreateTableData::Empty(TableDefinition::new_from_schema(schema)),
|
data: Box::new(RecordBatch::new_empty(schema)) as Box<dyn Scannable>,
|
||||||
mode: CreateTableMode::Create,
|
mode: CreateTableMode::Create,
|
||||||
write_options: Default::default(),
|
write_options: Default::default(),
|
||||||
location: None,
|
location: None,
|
||||||
@@ -1435,7 +1432,7 @@ mod tests {
|
|||||||
db.create_table(CreateTableRequest {
|
db.create_table(CreateTableRequest {
|
||||||
name: "source".to_string(),
|
name: "source".to_string(),
|
||||||
namespace: vec![],
|
namespace: vec![],
|
||||||
data: CreateTableData::Empty(TableDefinition::new_from_schema(schema)),
|
data: Box::new(RecordBatch::new_empty(schema)) as Box<dyn Scannable>,
|
||||||
mode: CreateTableMode::Create,
|
mode: CreateTableMode::Create,
|
||||||
write_options: Default::default(),
|
write_options: Default::default(),
|
||||||
location: None,
|
location: None,
|
||||||
@@ -1484,16 +1481,11 @@ mod tests {
|
|||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
let reader = Box::new(arrow_array::RecordBatchIterator::new(
|
|
||||||
vec![Ok(batch1)],
|
|
||||||
schema.clone(),
|
|
||||||
));
|
|
||||||
|
|
||||||
let source_table = db
|
let source_table = db
|
||||||
.create_table(CreateTableRequest {
|
.create_table(CreateTableRequest {
|
||||||
name: "versioned_source".to_string(),
|
name: "versioned_source".to_string(),
|
||||||
namespace: vec![],
|
namespace: vec![],
|
||||||
data: CreateTableData::Data(reader),
|
data: Box::new(batch1) as Box<dyn Scannable>,
|
||||||
mode: CreateTableMode::Create,
|
mode: CreateTableMode::Create,
|
||||||
write_options: Default::default(),
|
write_options: Default::default(),
|
||||||
location: None,
|
location: None,
|
||||||
@@ -1517,14 +1509,7 @@ mod tests {
|
|||||||
|
|
||||||
let db = Arc::new(db);
|
let db = Arc::new(db);
|
||||||
let source_table_obj = Table::new(source_table.clone(), db.clone());
|
let source_table_obj = Table::new(source_table.clone(), db.clone());
|
||||||
source_table_obj
|
source_table_obj.add(batch2).execute().await.unwrap();
|
||||||
.add(Box::new(arrow_array::RecordBatchIterator::new(
|
|
||||||
vec![Ok(batch2)],
|
|
||||||
schema.clone(),
|
|
||||||
)))
|
|
||||||
.execute()
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
// Verify source table now has 4 rows
|
// Verify source table now has 4 rows
|
||||||
assert_eq!(source_table.count_rows(None).await.unwrap(), 4);
|
assert_eq!(source_table.count_rows(None).await.unwrap(), 4);
|
||||||
@@ -1570,16 +1555,11 @@ mod tests {
|
|||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
let reader = Box::new(arrow_array::RecordBatchIterator::new(
|
|
||||||
vec![Ok(batch1)],
|
|
||||||
schema.clone(),
|
|
||||||
));
|
|
||||||
|
|
||||||
let source_table = db
|
let source_table = db
|
||||||
.create_table(CreateTableRequest {
|
.create_table(CreateTableRequest {
|
||||||
name: "tagged_source".to_string(),
|
name: "tagged_source".to_string(),
|
||||||
namespace: vec![],
|
namespace: vec![],
|
||||||
data: CreateTableData::Data(reader),
|
data: Box::new(batch1),
|
||||||
mode: CreateTableMode::Create,
|
mode: CreateTableMode::Create,
|
||||||
write_options: Default::default(),
|
write_options: Default::default(),
|
||||||
location: None,
|
location: None,
|
||||||
@@ -1607,14 +1587,7 @@ mod tests {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
let source_table_obj = Table::new(source_table.clone(), db.clone());
|
let source_table_obj = Table::new(source_table.clone(), db.clone());
|
||||||
source_table_obj
|
source_table_obj.add(batch2).execute().await.unwrap();
|
||||||
.add(Box::new(arrow_array::RecordBatchIterator::new(
|
|
||||||
vec![Ok(batch2)],
|
|
||||||
schema.clone(),
|
|
||||||
)))
|
|
||||||
.execute()
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
// Source table should have 4 rows
|
// Source table should have 4 rows
|
||||||
assert_eq!(source_table.count_rows(None).await.unwrap(), 4);
|
assert_eq!(source_table.count_rows(None).await.unwrap(), 4);
|
||||||
@@ -1657,16 +1630,11 @@ mod tests {
|
|||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
let reader = Box::new(arrow_array::RecordBatchIterator::new(
|
|
||||||
vec![Ok(batch1)],
|
|
||||||
schema.clone(),
|
|
||||||
));
|
|
||||||
|
|
||||||
let source_table = db
|
let source_table = db
|
||||||
.create_table(CreateTableRequest {
|
.create_table(CreateTableRequest {
|
||||||
name: "independent_source".to_string(),
|
name: "independent_source".to_string(),
|
||||||
namespace: vec![],
|
namespace: vec![],
|
||||||
data: CreateTableData::Data(reader),
|
data: Box::new(batch1),
|
||||||
mode: CreateTableMode::Create,
|
mode: CreateTableMode::Create,
|
||||||
write_options: Default::default(),
|
write_options: Default::default(),
|
||||||
location: None,
|
location: None,
|
||||||
@@ -1706,14 +1674,7 @@ mod tests {
|
|||||||
|
|
||||||
let db = Arc::new(db);
|
let db = Arc::new(db);
|
||||||
let cloned_table_obj = Table::new(cloned_table.clone(), db.clone());
|
let cloned_table_obj = Table::new(cloned_table.clone(), db.clone());
|
||||||
cloned_table_obj
|
cloned_table_obj.add(batch_clone).execute().await.unwrap();
|
||||||
.add(Box::new(arrow_array::RecordBatchIterator::new(
|
|
||||||
vec![Ok(batch_clone)],
|
|
||||||
schema.clone(),
|
|
||||||
)))
|
|
||||||
.execute()
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
// Add different data to the source table
|
// Add different data to the source table
|
||||||
let batch_source = RecordBatch::try_new(
|
let batch_source = RecordBatch::try_new(
|
||||||
@@ -1726,14 +1687,7 @@ mod tests {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
let source_table_obj = Table::new(source_table.clone(), db);
|
let source_table_obj = Table::new(source_table.clone(), db);
|
||||||
source_table_obj
|
source_table_obj.add(batch_source).execute().await.unwrap();
|
||||||
.add(Box::new(arrow_array::RecordBatchIterator::new(
|
|
||||||
vec![Ok(batch_source)],
|
|
||||||
schema.clone(),
|
|
||||||
)))
|
|
||||||
.execute()
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
// Verify they have evolved independently
|
// Verify they have evolved independently
|
||||||
assert_eq!(source_table.count_rows(None).await.unwrap(), 4); // 2 + 2
|
assert_eq!(source_table.count_rows(None).await.unwrap(), 4); // 2 + 2
|
||||||
@@ -1751,16 +1705,11 @@ mod tests {
|
|||||||
RecordBatch::try_new(schema.clone(), vec![Arc::new(Int32Array::from(vec![1, 2]))])
|
RecordBatch::try_new(schema.clone(), vec![Arc::new(Int32Array::from(vec![1, 2]))])
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
let reader = Box::new(arrow_array::RecordBatchIterator::new(
|
|
||||||
vec![Ok(batch1)],
|
|
||||||
schema.clone(),
|
|
||||||
));
|
|
||||||
|
|
||||||
let source_table = db
|
let source_table = db
|
||||||
.create_table(CreateTableRequest {
|
.create_table(CreateTableRequest {
|
||||||
name: "latest_version_source".to_string(),
|
name: "latest_version_source".to_string(),
|
||||||
namespace: vec![],
|
namespace: vec![],
|
||||||
data: CreateTableData::Data(reader),
|
data: Box::new(batch1),
|
||||||
mode: CreateTableMode::Create,
|
mode: CreateTableMode::Create,
|
||||||
write_options: Default::default(),
|
write_options: Default::default(),
|
||||||
location: None,
|
location: None,
|
||||||
@@ -1779,14 +1728,7 @@ mod tests {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
let source_table_obj = Table::new(source_table.clone(), db.clone());
|
let source_table_obj = Table::new(source_table.clone(), db.clone());
|
||||||
source_table_obj
|
source_table_obj.add(batch).execute().await.unwrap();
|
||||||
.add(Box::new(arrow_array::RecordBatchIterator::new(
|
|
||||||
vec![Ok(batch)],
|
|
||||||
schema.clone(),
|
|
||||||
)))
|
|
||||||
.execute()
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Source should have 8 rows total (2 + 2 + 2 + 2)
|
// Source should have 8 rows total (2 + 2 + 2 + 2)
|
||||||
@@ -1849,16 +1791,11 @@ mod tests {
|
|||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
let reader = Box::new(arrow_array::RecordBatchIterator::new(
|
|
||||||
vec![Ok(batch)],
|
|
||||||
schema.clone(),
|
|
||||||
));
|
|
||||||
|
|
||||||
let table = db
|
let table = db
|
||||||
.create_table(CreateTableRequest {
|
.create_table(CreateTableRequest {
|
||||||
name: "test_stable".to_string(),
|
name: "test_stable".to_string(),
|
||||||
namespace: vec![],
|
namespace: vec![],
|
||||||
data: CreateTableData::Data(reader),
|
data: Box::new(batch),
|
||||||
mode: CreateTableMode::Create,
|
mode: CreateTableMode::Create,
|
||||||
write_options: Default::default(),
|
write_options: Default::default(),
|
||||||
location: None,
|
location: None,
|
||||||
@@ -1887,11 +1824,6 @@ mod tests {
|
|||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
let reader = Box::new(arrow_array::RecordBatchIterator::new(
|
|
||||||
vec![Ok(batch)],
|
|
||||||
schema.clone(),
|
|
||||||
));
|
|
||||||
|
|
||||||
let mut storage_options = HashMap::new();
|
let mut storage_options = HashMap::new();
|
||||||
storage_options.insert(
|
storage_options.insert(
|
||||||
OPT_NEW_TABLE_ENABLE_STABLE_ROW_IDS.to_string(),
|
OPT_NEW_TABLE_ENABLE_STABLE_ROW_IDS.to_string(),
|
||||||
@@ -1914,7 +1846,7 @@ mod tests {
|
|||||||
.create_table(CreateTableRequest {
|
.create_table(CreateTableRequest {
|
||||||
name: "test_stable_table_level".to_string(),
|
name: "test_stable_table_level".to_string(),
|
||||||
namespace: vec![],
|
namespace: vec![],
|
||||||
data: CreateTableData::Data(reader),
|
data: Box::new(batch),
|
||||||
mode: CreateTableMode::Create,
|
mode: CreateTableMode::Create,
|
||||||
write_options,
|
write_options,
|
||||||
location: None,
|
location: None,
|
||||||
@@ -1963,11 +1895,6 @@ mod tests {
|
|||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
let reader = Box::new(arrow_array::RecordBatchIterator::new(
|
|
||||||
vec![Ok(batch)],
|
|
||||||
schema.clone(),
|
|
||||||
));
|
|
||||||
|
|
||||||
let mut storage_options = HashMap::new();
|
let mut storage_options = HashMap::new();
|
||||||
storage_options.insert(
|
storage_options.insert(
|
||||||
OPT_NEW_TABLE_ENABLE_STABLE_ROW_IDS.to_string(),
|
OPT_NEW_TABLE_ENABLE_STABLE_ROW_IDS.to_string(),
|
||||||
@@ -1990,7 +1917,7 @@ mod tests {
|
|||||||
.create_table(CreateTableRequest {
|
.create_table(CreateTableRequest {
|
||||||
name: "test_override".to_string(),
|
name: "test_override".to_string(),
|
||||||
namespace: vec![],
|
namespace: vec![],
|
||||||
data: CreateTableData::Data(reader),
|
data: Box::new(batch),
|
||||||
mode: CreateTableMode::Create,
|
mode: CreateTableMode::Create,
|
||||||
write_options,
|
write_options,
|
||||||
location: None,
|
location: None,
|
||||||
@@ -2108,7 +2035,7 @@ mod tests {
|
|||||||
db.create_table(CreateTableRequest {
|
db.create_table(CreateTableRequest {
|
||||||
name: "table1".to_string(),
|
name: "table1".to_string(),
|
||||||
namespace: vec![],
|
namespace: vec![],
|
||||||
data: CreateTableData::Empty(TableDefinition::new_from_schema(schema.clone())),
|
data: Box::new(RecordBatch::new_empty(schema.clone())) as Box<dyn Scannable>,
|
||||||
mode: CreateTableMode::Create,
|
mode: CreateTableMode::Create,
|
||||||
write_options: Default::default(),
|
write_options: Default::default(),
|
||||||
location: None,
|
location: None,
|
||||||
@@ -2120,7 +2047,7 @@ mod tests {
|
|||||||
db.create_table(CreateTableRequest {
|
db.create_table(CreateTableRequest {
|
||||||
name: "table2".to_string(),
|
name: "table2".to_string(),
|
||||||
namespace: vec![],
|
namespace: vec![],
|
||||||
data: CreateTableData::Empty(TableDefinition::new_from_schema(schema)),
|
data: Box::new(RecordBatch::new_empty(schema)) as Box<dyn Scannable>,
|
||||||
mode: CreateTableMode::Create,
|
mode: CreateTableMode::Create,
|
||||||
write_options: Default::default(),
|
write_options: Default::default(),
|
||||||
location: None,
|
location: None,
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ use std::collections::HashMap;
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
|
use lance_io::object_store::{ObjectStoreParams, StorageOptionsAccessor};
|
||||||
use lance_namespace::{
|
use lance_namespace::{
|
||||||
models::{
|
models::{
|
||||||
CreateEmptyTableRequest, CreateNamespaceRequest, CreateNamespaceResponse,
|
CreateEmptyTableRequest, CreateNamespaceRequest, CreateNamespaceResponse,
|
||||||
@@ -212,45 +213,75 @@ impl Database for LanceNamespaceDatabase {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
let location = match self.namespace.declare_table(declare_request).await {
|
let (location, initial_storage_options) =
|
||||||
Ok(response) => response.location.ok_or_else(|| Error::Runtime {
|
match self.namespace.declare_table(declare_request).await {
|
||||||
message: "Table location is missing from declare_table response".to_string(),
|
Ok(response) => {
|
||||||
})?,
|
let loc = response.location.ok_or_else(|| Error::Runtime {
|
||||||
Err(e) => {
|
message: "Table location is missing from declare_table response"
|
||||||
// Check if the error is "not supported" and try create_empty_table as fallback
|
.to_string(),
|
||||||
let err_str = e.to_string().to_lowercase();
|
})?;
|
||||||
if err_str.contains("not supported") || err_str.contains("not implemented") {
|
// Use storage options from response, fall back to self.storage_options
|
||||||
warn!(
|
let opts = response
|
||||||
"declare_table is not supported by the namespace client, \
|
.storage_options
|
||||||
|
.or_else(|| Some(self.storage_options.clone()))
|
||||||
|
.filter(|o| !o.is_empty());
|
||||||
|
(loc, opts)
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
// Check if the error is "not supported" and try create_empty_table as fallback
|
||||||
|
let err_str = e.to_string().to_lowercase();
|
||||||
|
if err_str.contains("not supported") || err_str.contains("not implemented") {
|
||||||
|
warn!(
|
||||||
|
"declare_table is not supported by the namespace client, \
|
||||||
falling back to deprecated create_empty_table. \
|
falling back to deprecated create_empty_table. \
|
||||||
create_empty_table is deprecated and will be removed in Lance 3.0.0. \
|
create_empty_table is deprecated and will be removed in Lance 3.0.0. \
|
||||||
Please upgrade your namespace client to support declare_table."
|
Please upgrade your namespace client to support declare_table."
|
||||||
);
|
);
|
||||||
#[allow(deprecated)]
|
#[allow(deprecated)]
|
||||||
let create_empty_request = CreateEmptyTableRequest {
|
let create_empty_request = CreateEmptyTableRequest {
|
||||||
id: Some(table_id.clone()),
|
id: Some(table_id.clone()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
#[allow(deprecated)]
|
#[allow(deprecated)]
|
||||||
let create_response = self
|
let create_response = self
|
||||||
.namespace
|
.namespace
|
||||||
.create_empty_table(create_empty_request)
|
.create_empty_table(create_empty_request)
|
||||||
.await
|
.await
|
||||||
.map_err(|e| Error::Runtime {
|
.map_err(|e| Error::Runtime {
|
||||||
message: format!("Failed to create empty table: {}", e),
|
message: format!("Failed to create empty table: {}", e),
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let loc = create_response.location.ok_or_else(|| Error::Runtime {
|
||||||
|
message: "Table location is missing from create_empty_table response"
|
||||||
|
.to_string(),
|
||||||
})?;
|
})?;
|
||||||
|
// For deprecated path, use self.storage_options
|
||||||
create_response.location.ok_or_else(|| Error::Runtime {
|
let opts = if self.storage_options.is_empty() {
|
||||||
message: "Table location is missing from create_empty_table response"
|
None
|
||||||
.to_string(),
|
} else {
|
||||||
})?
|
Some(self.storage_options.clone())
|
||||||
} else {
|
};
|
||||||
return Err(Error::Runtime {
|
(loc, opts)
|
||||||
message: format!("Failed to declare table: {}", e),
|
} else {
|
||||||
});
|
return Err(Error::Runtime {
|
||||||
|
message: format!("Failed to declare table: {}", e),
|
||||||
|
});
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
};
|
||||||
|
|
||||||
|
let write_params = if let Some(storage_opts) = initial_storage_options {
|
||||||
|
let mut params = request.write_options.lance_write_params.unwrap_or_default();
|
||||||
|
let store_params = params
|
||||||
|
.store_params
|
||||||
|
.get_or_insert_with(ObjectStoreParams::default);
|
||||||
|
store_params.storage_options_accessor = Some(Arc::new(
|
||||||
|
StorageOptionsAccessor::with_static_options(storage_opts),
|
||||||
|
));
|
||||||
|
Some(params)
|
||||||
|
} else {
|
||||||
|
request.write_options.lance_write_params
|
||||||
};
|
};
|
||||||
|
|
||||||
let native_table = NativeTable::create_from_namespace(
|
let native_table = NativeTable::create_from_namespace(
|
||||||
@@ -260,7 +291,7 @@ impl Database for LanceNamespaceDatabase {
|
|||||||
request.namespace.clone(),
|
request.namespace.clone(),
|
||||||
request.data,
|
request.data,
|
||||||
None, // write_store_wrapper not used for namespace connections
|
None, // write_store_wrapper not used for namespace connections
|
||||||
request.write_options.lance_write_params,
|
write_params,
|
||||||
self.read_consistency_interval,
|
self.read_consistency_interval,
|
||||||
self.server_side_query_enabled,
|
self.server_side_query_enabled,
|
||||||
self.session.clone(),
|
self.session.clone(),
|
||||||
@@ -354,15 +385,13 @@ mod tests {
|
|||||||
use super::*;
|
use super::*;
|
||||||
use crate::connect_namespace;
|
use crate::connect_namespace;
|
||||||
use crate::query::ExecutableQuery;
|
use crate::query::ExecutableQuery;
|
||||||
use arrow_array::{Int32Array, RecordBatch, RecordBatchIterator, StringArray};
|
use arrow_array::{Int32Array, RecordBatch, StringArray};
|
||||||
use arrow_schema::{DataType, Field, Schema};
|
use arrow_schema::{DataType, Field, Schema};
|
||||||
use futures::TryStreamExt;
|
use futures::TryStreamExt;
|
||||||
use tempfile::tempdir;
|
use tempfile::tempdir;
|
||||||
|
|
||||||
/// Helper function to create test data
|
/// Helper function to create test data
|
||||||
fn create_test_data() -> RecordBatchIterator<
|
fn create_test_data() -> RecordBatch {
|
||||||
std::vec::IntoIter<std::result::Result<RecordBatch, arrow_schema::ArrowError>>,
|
|
||||||
> {
|
|
||||||
let schema = Arc::new(Schema::new(vec![
|
let schema = Arc::new(Schema::new(vec![
|
||||||
Field::new("id", DataType::Int32, false),
|
Field::new("id", DataType::Int32, false),
|
||||||
Field::new("name", DataType::Utf8, false),
|
Field::new("name", DataType::Utf8, false),
|
||||||
@@ -371,12 +400,7 @@ mod tests {
|
|||||||
let id_array = Int32Array::from(vec![1, 2, 3, 4, 5]);
|
let id_array = Int32Array::from(vec![1, 2, 3, 4, 5]);
|
||||||
let name_array = StringArray::from(vec!["Alice", "Bob", "Charlie", "David", "Eve"]);
|
let name_array = StringArray::from(vec!["Alice", "Bob", "Charlie", "David", "Eve"]);
|
||||||
|
|
||||||
let batch = RecordBatch::try_new(
|
RecordBatch::try_new(schema, vec![Arc::new(id_array), Arc::new(name_array)]).unwrap()
|
||||||
schema.clone(),
|
|
||||||
vec![Arc::new(id_array), Arc::new(name_array)],
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
RecordBatchIterator::new(vec![std::result::Result::Ok(batch)].into_iter(), schema)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
@@ -618,13 +642,7 @@ mod tests {
|
|||||||
|
|
||||||
// Test: Overwrite the table
|
// Test: Overwrite the table
|
||||||
let table2 = conn
|
let table2 = conn
|
||||||
.create_table(
|
.create_table("overwrite_test", test_data2)
|
||||||
"overwrite_test",
|
|
||||||
RecordBatchIterator::new(
|
|
||||||
vec![std::result::Result::Ok(test_data2)].into_iter(),
|
|
||||||
schema,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
.namespace(vec!["test_ns".into()])
|
.namespace(vec!["test_ns".into()])
|
||||||
.mode(CreateTableMode::Overwrite)
|
.mode(CreateTableMode::Overwrite)
|
||||||
.execute()
|
.execute()
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ use lance_datafusion::exec::SessionContextExt;
|
|||||||
use crate::{
|
use crate::{
|
||||||
arrow::{SendableRecordBatchStream, SendableRecordBatchStreamExt, SimpleRecordBatchStream},
|
arrow::{SendableRecordBatchStream, SendableRecordBatchStreamExt, SimpleRecordBatchStream},
|
||||||
connect,
|
connect,
|
||||||
database::{CreateTableData, CreateTableRequest, Database},
|
database::{CreateTableRequest, Database},
|
||||||
dataloader::permutation::{
|
dataloader::permutation::{
|
||||||
shuffle::{Shuffler, ShufflerConfig},
|
shuffle::{Shuffler, ShufflerConfig},
|
||||||
split::{SplitStrategy, Splitter, SPLIT_ID_COLUMN},
|
split::{SplitStrategy, Splitter, SPLIT_ID_COLUMN},
|
||||||
@@ -57,7 +57,7 @@ pub struct PermutationConfig {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Strategy for shuffling the data.
|
/// Strategy for shuffling the data.
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone, Default)]
|
||||||
pub enum ShuffleStrategy {
|
pub enum ShuffleStrategy {
|
||||||
/// The data is randomly shuffled
|
/// The data is randomly shuffled
|
||||||
///
|
///
|
||||||
@@ -78,15 +78,10 @@ pub enum ShuffleStrategy {
|
|||||||
/// The data is not shuffled
|
/// The data is not shuffled
|
||||||
///
|
///
|
||||||
/// This is useful for debugging and testing.
|
/// This is useful for debugging and testing.
|
||||||
|
#[default]
|
||||||
None,
|
None,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for ShuffleStrategy {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self::None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Builder for creating a permutation table.
|
/// Builder for creating a permutation table.
|
||||||
///
|
///
|
||||||
/// A permutation table is a table that stores split assignments and a shuffled order of rows. This
|
/// A permutation table is a table that stores split assignments and a shuffled order of rows. This
|
||||||
@@ -313,10 +308,8 @@ impl PermutationBuilder {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let create_table_request = CreateTableRequest::new(
|
let create_table_request =
|
||||||
name.to_string(),
|
CreateTableRequest::new(name.to_string(), Box::new(streaming_data));
|
||||||
CreateTableData::StreamingData(streaming_data),
|
|
||||||
);
|
|
||||||
|
|
||||||
let table = database.create_table(create_table_request).await?;
|
let table = database.create_table(create_table_request).await?;
|
||||||
|
|
||||||
@@ -347,7 +340,7 @@ mod tests {
|
|||||||
.col("col_b", lance_datagen::array::step::<Int32Type>())
|
.col("col_b", lance_datagen::array::step::<Int32Type>())
|
||||||
.into_ldb_stream(RowCount::from(100), BatchCount::from(10));
|
.into_ldb_stream(RowCount::from(100), BatchCount::from(10));
|
||||||
let data_table = db
|
let data_table = db
|
||||||
.create_table_streaming("base_tbl", initial_data)
|
.create_table("base_tbl", initial_data)
|
||||||
.execute()
|
.execute()
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
@@ -387,7 +380,7 @@ mod tests {
|
|||||||
.col("some_value", lance_datagen::array::step::<Int32Type>())
|
.col("some_value", lance_datagen::array::step::<Int32Type>())
|
||||||
.into_ldb_stream(RowCount::from(100), BatchCount::from(10));
|
.into_ldb_stream(RowCount::from(100), BatchCount::from(10));
|
||||||
let data_table = db
|
let data_table = db
|
||||||
.create_table_streaming("mytbl", initial_data)
|
.create_table("mytbl", initial_data)
|
||||||
.execute()
|
.execute()
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|||||||
@@ -39,6 +39,9 @@ pub struct PermutationReader {
|
|||||||
limit: Option<u64>,
|
limit: Option<u64>,
|
||||||
available_rows: u64,
|
available_rows: u64,
|
||||||
split: u64,
|
split: u64,
|
||||||
|
// Cached map of offset to row id for the split
|
||||||
|
#[allow(clippy::type_complexity)]
|
||||||
|
offset_map: Arc<tokio::sync::Mutex<Option<Arc<HashMap<u64, u64>>>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl std::fmt::Debug for PermutationReader {
|
impl std::fmt::Debug for PermutationReader {
|
||||||
@@ -72,6 +75,7 @@ impl PermutationReader {
|
|||||||
limit: None,
|
limit: None,
|
||||||
available_rows: 0,
|
available_rows: 0,
|
||||||
split,
|
split,
|
||||||
|
offset_map: Arc::new(tokio::sync::Mutex::new(None)),
|
||||||
};
|
};
|
||||||
slf.validate().await?;
|
slf.validate().await?;
|
||||||
// Calculate the number of available rows
|
// Calculate the number of available rows
|
||||||
@@ -157,6 +161,7 @@ impl PermutationReader {
|
|||||||
let available_rows = self.verify_limit_offset(self.limit, Some(offset)).await?;
|
let available_rows = self.verify_limit_offset(self.limit, Some(offset)).await?;
|
||||||
self.offset = Some(offset);
|
self.offset = Some(offset);
|
||||||
self.available_rows = available_rows;
|
self.available_rows = available_rows;
|
||||||
|
self.offset_map = Arc::new(tokio::sync::Mutex::new(None));
|
||||||
Ok(self)
|
Ok(self)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -164,6 +169,7 @@ impl PermutationReader {
|
|||||||
let available_rows = self.verify_limit_offset(Some(limit), self.offset).await?;
|
let available_rows = self.verify_limit_offset(Some(limit), self.offset).await?;
|
||||||
self.available_rows = available_rows;
|
self.available_rows = available_rows;
|
||||||
self.limit = Some(limit);
|
self.limit = Some(limit);
|
||||||
|
self.offset_map = Arc::new(tokio::sync::Mutex::new(None));
|
||||||
Ok(self)
|
Ok(self)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -180,8 +186,9 @@ impl PermutationReader {
|
|||||||
base_table: &Arc<dyn BaseTable>,
|
base_table: &Arc<dyn BaseTable>,
|
||||||
row_ids: RecordBatch,
|
row_ids: RecordBatch,
|
||||||
selection: Select,
|
selection: Select,
|
||||||
has_row_id: bool,
|
|
||||||
) -> Result<RecordBatch> {
|
) -> Result<RecordBatch> {
|
||||||
|
let has_row_id = Self::has_row_id(&selection)?;
|
||||||
|
|
||||||
let num_rows = row_ids.num_rows();
|
let num_rows = row_ids.num_rows();
|
||||||
let row_ids = row_ids
|
let row_ids = row_ids
|
||||||
.column(0)
|
.column(0)
|
||||||
@@ -282,14 +289,13 @@ impl PermutationReader {
|
|||||||
row_ids: DatasetRecordBatchStream,
|
row_ids: DatasetRecordBatchStream,
|
||||||
selection: Select,
|
selection: Select,
|
||||||
) -> Result<SendableRecordBatchStream> {
|
) -> Result<SendableRecordBatchStream> {
|
||||||
let has_row_id = Self::has_row_id(&selection)?;
|
|
||||||
let mut stream = row_ids
|
let mut stream = row_ids
|
||||||
.map_err(Error::from)
|
.map_err(Error::from)
|
||||||
.try_filter_map(move |batch| {
|
.try_filter_map(move |batch| {
|
||||||
let selection = selection.clone();
|
let selection = selection.clone();
|
||||||
let base_table = base_table.clone();
|
let base_table = base_table.clone();
|
||||||
async move {
|
async move {
|
||||||
Self::load_batch(&base_table, batch, selection, has_row_id)
|
Self::load_batch(&base_table, batch, selection)
|
||||||
.await
|
.await
|
||||||
.map(Some)
|
.map(Some)
|
||||||
}
|
}
|
||||||
@@ -397,6 +403,84 @@ impl PermutationReader {
|
|||||||
Self::row_ids_to_batches(self.base_table.clone(), row_ids, selection).await
|
Self::row_ids_to_batches(self.base_table.clone(), row_ids, selection).await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// If we are going to use `take` then we load the offset -> row id map once for the split and cache it
|
||||||
|
///
|
||||||
|
/// This method fetches the map with find-or-create semantics.
|
||||||
|
async fn get_offset_map(
|
||||||
|
&self,
|
||||||
|
permutation_table: &Arc<dyn BaseTable>,
|
||||||
|
) -> Result<Arc<HashMap<u64, u64>>> {
|
||||||
|
let mut offset_map_ref = self.offset_map.lock().await;
|
||||||
|
if let Some(offset_map) = &*offset_map_ref {
|
||||||
|
return Ok(offset_map.clone());
|
||||||
|
}
|
||||||
|
let mut offset_map = HashMap::new();
|
||||||
|
let mut row_ids_query = Table::from(permutation_table.clone())
|
||||||
|
.query()
|
||||||
|
.select(Select::Columns(vec![SRC_ROW_ID_COL.to_string()]))
|
||||||
|
.only_if(format!("{} = {}", SPLIT_ID_COLUMN, self.split));
|
||||||
|
if let Some(offset) = self.offset {
|
||||||
|
row_ids_query = row_ids_query.offset(offset as usize);
|
||||||
|
}
|
||||||
|
if let Some(limit) = self.limit {
|
||||||
|
row_ids_query = row_ids_query.limit(limit as usize);
|
||||||
|
}
|
||||||
|
let mut row_ids = row_ids_query.execute().await?;
|
||||||
|
let mut idx_offset = 0;
|
||||||
|
while let Some(batch) = row_ids.try_next().await? {
|
||||||
|
let row_ids = batch
|
||||||
|
.column(0)
|
||||||
|
.as_primitive::<UInt64Type>()
|
||||||
|
.values()
|
||||||
|
.to_vec();
|
||||||
|
for (i, row_id) in row_ids.iter().enumerate() {
|
||||||
|
offset_map.insert(i as u64 + idx_offset, *row_id);
|
||||||
|
}
|
||||||
|
idx_offset += batch.num_rows() as u64;
|
||||||
|
}
|
||||||
|
let offset_map = Arc::new(offset_map);
|
||||||
|
*offset_map_ref = Some(offset_map.clone());
|
||||||
|
Ok(offset_map)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn take_offsets(&self, offsets: &[u64], selection: Select) -> Result<RecordBatch> {
|
||||||
|
if let Some(permutation_table) = &self.permutation_table {
|
||||||
|
let offset_map = self.get_offset_map(permutation_table).await?;
|
||||||
|
let row_ids = offsets
|
||||||
|
.iter()
|
||||||
|
.map(|o| offset_map.get(o).copied().expect_ok().map_err(Error::from))
|
||||||
|
.collect::<Result<Vec<_>>>()?;
|
||||||
|
let row_ids = RecordBatch::try_new(
|
||||||
|
Arc::new(arrow_schema::Schema::new(vec![arrow_schema::Field::new(
|
||||||
|
"row_id",
|
||||||
|
arrow_schema::DataType::UInt64,
|
||||||
|
false,
|
||||||
|
)])),
|
||||||
|
vec![Arc::new(UInt64Array::from(row_ids))],
|
||||||
|
)?;
|
||||||
|
Self::load_batch(&self.base_table, row_ids, selection).await
|
||||||
|
} else {
|
||||||
|
let table = Table::from(self.base_table.clone());
|
||||||
|
let batches = table
|
||||||
|
.take_offsets(offsets.to_vec())
|
||||||
|
.select(selection.clone())
|
||||||
|
.execute()
|
||||||
|
.await?
|
||||||
|
.try_collect::<Vec<_>>()
|
||||||
|
.await?;
|
||||||
|
if let Some(first_batch) = batches.first() {
|
||||||
|
let schema = first_batch.schema();
|
||||||
|
let batch = arrow::compute::concat_batches(&schema, &batches)?;
|
||||||
|
Ok(batch)
|
||||||
|
} else {
|
||||||
|
Ok(RecordBatch::try_new(
|
||||||
|
self.output_schema(selection).await?,
|
||||||
|
vec![],
|
||||||
|
)?)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn output_schema(&self, selection: Select) -> Result<SchemaRef> {
|
pub async fn output_schema(&self, selection: Select) -> Result<SchemaRef> {
|
||||||
let table = Table::from(self.base_table.clone());
|
let table = Table::from(self.base_table.clone());
|
||||||
table.query().select(selection).output_schema().await
|
table.query().select(selection).output_schema().await
|
||||||
@@ -543,4 +627,326 @@ mod tests {
|
|||||||
check_batch(&mut stream, &row_ids[7..9]).await;
|
check_batch(&mut stream, &row_ids[7..9]).await;
|
||||||
assert!(stream.try_next().await.unwrap().is_none());
|
assert!(stream.try_next().await.unwrap().is_none());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Helper to create a base table and permutation table for take_offsets tests.
|
||||||
|
/// Returns (base_table, row_ids_table, shuffled_row_ids).
|
||||||
|
async fn setup_permutation_tables(num_rows: usize) -> (Table, Table, Vec<u64>) {
|
||||||
|
let base_table = lance_datagen::gen_batch()
|
||||||
|
.col("idx", lance_datagen::array::step::<Int32Type>())
|
||||||
|
.col("other_col", lance_datagen::array::step::<UInt64Type>())
|
||||||
|
.into_mem_table("tbl", RowCount::from(num_rows as u64), BatchCount::from(1))
|
||||||
|
.await;
|
||||||
|
|
||||||
|
let mut row_ids = collect_column::<UInt64Type>(&base_table, "_rowid").await;
|
||||||
|
row_ids.shuffle(&mut rand::rng());
|
||||||
|
|
||||||
|
let split_ids = UInt64Array::from_iter_values(std::iter::repeat_n(0u64, row_ids.len()));
|
||||||
|
let permutation_batch = RecordBatch::try_new(
|
||||||
|
Arc::new(Schema::new(vec![
|
||||||
|
Field::new("row_id", DataType::UInt64, false),
|
||||||
|
Field::new(SPLIT_ID_COLUMN, DataType::UInt64, false),
|
||||||
|
])),
|
||||||
|
vec![
|
||||||
|
Arc::new(UInt64Array::from(row_ids.clone())),
|
||||||
|
Arc::new(split_ids),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
let row_ids_table = virtual_table("row_ids", &permutation_batch).await;
|
||||||
|
|
||||||
|
(base_table, row_ids_table, row_ids)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_take_offsets_with_permutation_table() {
|
||||||
|
let (base_table, row_ids_table, row_ids) = setup_permutation_tables(10).await;
|
||||||
|
|
||||||
|
let reader = PermutationReader::try_from_tables(
|
||||||
|
base_table.base_table().clone(),
|
||||||
|
row_ids_table.base_table().clone(),
|
||||||
|
0,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// Take specific offsets and verify the returned rows match the permutation order
|
||||||
|
let offsets = vec![0, 2, 4];
|
||||||
|
let batch = reader.take_offsets(&offsets, Select::All).await.unwrap();
|
||||||
|
|
||||||
|
assert_eq!(batch.num_rows(), 3);
|
||||||
|
|
||||||
|
let idx_values = batch
|
||||||
|
.column(0)
|
||||||
|
.as_primitive::<Int32Type>()
|
||||||
|
.values()
|
||||||
|
.to_vec();
|
||||||
|
let expected: Vec<i32> = offsets
|
||||||
|
.iter()
|
||||||
|
.map(|&o| row_ids[o as usize] as i32)
|
||||||
|
.collect();
|
||||||
|
assert_eq!(idx_values, expected);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_take_offsets_preserves_order() {
|
||||||
|
let (base_table, row_ids_table, row_ids) = setup_permutation_tables(10).await;
|
||||||
|
|
||||||
|
let reader = PermutationReader::try_from_tables(
|
||||||
|
base_table.base_table().clone(),
|
||||||
|
row_ids_table.base_table().clone(),
|
||||||
|
0,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// Take offsets in reverse order and verify returned rows match that order
|
||||||
|
let offsets = vec![5, 3, 1, 0];
|
||||||
|
let batch = reader.take_offsets(&offsets, Select::All).await.unwrap();
|
||||||
|
|
||||||
|
assert_eq!(batch.num_rows(), 4);
|
||||||
|
|
||||||
|
let idx_values = batch
|
||||||
|
.column(0)
|
||||||
|
.as_primitive::<Int32Type>()
|
||||||
|
.values()
|
||||||
|
.to_vec();
|
||||||
|
let expected: Vec<i32> = offsets
|
||||||
|
.iter()
|
||||||
|
.map(|&o| row_ids[o as usize] as i32)
|
||||||
|
.collect();
|
||||||
|
assert_eq!(idx_values, expected);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_take_offsets_with_column_selection() {
|
||||||
|
let (base_table, row_ids_table, row_ids) = setup_permutation_tables(10).await;
|
||||||
|
|
||||||
|
let reader = PermutationReader::try_from_tables(
|
||||||
|
base_table.base_table().clone(),
|
||||||
|
row_ids_table.base_table().clone(),
|
||||||
|
0,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let offsets = vec![1, 3];
|
||||||
|
let batch = reader
|
||||||
|
.take_offsets(&offsets, Select::Columns(vec!["idx".to_string()]))
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
assert_eq!(batch.num_rows(), 2);
|
||||||
|
assert_eq!(batch.num_columns(), 1);
|
||||||
|
assert_eq!(batch.schema().field(0).name(), "idx");
|
||||||
|
|
||||||
|
let idx_values = batch
|
||||||
|
.column(0)
|
||||||
|
.as_primitive::<Int32Type>()
|
||||||
|
.values()
|
||||||
|
.to_vec();
|
||||||
|
let expected: Vec<i32> = offsets
|
||||||
|
.iter()
|
||||||
|
.map(|&o| row_ids[o as usize] as i32)
|
||||||
|
.collect();
|
||||||
|
assert_eq!(idx_values, expected);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_take_offsets_invalid_offset() {
|
||||||
|
let (base_table, row_ids_table, _) = setup_permutation_tables(5).await;
|
||||||
|
|
||||||
|
let reader = PermutationReader::try_from_tables(
|
||||||
|
base_table.base_table().clone(),
|
||||||
|
row_ids_table.base_table().clone(),
|
||||||
|
0,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// Offset 999 doesn't exist in the offset map
|
||||||
|
let result = reader.take_offsets(&[0, 999], Select::All).await;
|
||||||
|
assert!(result.is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_take_offsets_identity_reader() {
|
||||||
|
let base_table = lance_datagen::gen_batch()
|
||||||
|
.col("idx", lance_datagen::array::step::<Int32Type>())
|
||||||
|
.into_mem_table("tbl", RowCount::from(10), BatchCount::from(1))
|
||||||
|
.await;
|
||||||
|
|
||||||
|
let reader = PermutationReader::identity(base_table.base_table().clone()).await;
|
||||||
|
|
||||||
|
// With no permutation table, take_offsets uses the base table directly
|
||||||
|
let offsets = vec![0, 2, 4, 6];
|
||||||
|
let batch = reader.take_offsets(&offsets, Select::All).await.unwrap();
|
||||||
|
|
||||||
|
assert_eq!(batch.num_rows(), 4);
|
||||||
|
|
||||||
|
let idx_values = batch
|
||||||
|
.column(0)
|
||||||
|
.as_primitive::<Int32Type>()
|
||||||
|
.values()
|
||||||
|
.to_vec();
|
||||||
|
assert_eq!(idx_values, vec![0, 2, 4, 6]);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_take_offsets_caches_offset_map() {
|
||||||
|
let (base_table, row_ids_table, row_ids) = setup_permutation_tables(10).await;
|
||||||
|
|
||||||
|
let reader = PermutationReader::try_from_tables(
|
||||||
|
base_table.base_table().clone(),
|
||||||
|
row_ids_table.base_table().clone(),
|
||||||
|
0,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// First call populates the cache
|
||||||
|
let batch1 = reader.take_offsets(&[0, 1], Select::All).await.unwrap();
|
||||||
|
|
||||||
|
// Second call should use the cached offset map and produce consistent results
|
||||||
|
let batch2 = reader.take_offsets(&[0, 1], Select::All).await.unwrap();
|
||||||
|
|
||||||
|
let values1 = batch1
|
||||||
|
.column(0)
|
||||||
|
.as_primitive::<Int32Type>()
|
||||||
|
.values()
|
||||||
|
.to_vec();
|
||||||
|
let values2 = batch2
|
||||||
|
.column(0)
|
||||||
|
.as_primitive::<Int32Type>()
|
||||||
|
.values()
|
||||||
|
.to_vec();
|
||||||
|
assert_eq!(values1, values2);
|
||||||
|
|
||||||
|
let expected: Vec<i32> = vec![row_ids[0] as i32, row_ids[1] as i32];
|
||||||
|
assert_eq!(values1, expected);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_take_offsets_single_offset() {
|
||||||
|
let (base_table, row_ids_table, row_ids) = setup_permutation_tables(5).await;
|
||||||
|
|
||||||
|
let reader = PermutationReader::try_from_tables(
|
||||||
|
base_table.base_table().clone(),
|
||||||
|
row_ids_table.base_table().clone(),
|
||||||
|
0,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let batch = reader.take_offsets(&[2], Select::All).await.unwrap();
|
||||||
|
|
||||||
|
assert_eq!(batch.num_rows(), 1);
|
||||||
|
let idx_values = batch
|
||||||
|
.column(0)
|
||||||
|
.as_primitive::<Int32Type>()
|
||||||
|
.values()
|
||||||
|
.to_vec();
|
||||||
|
assert_eq!(idx_values, vec![row_ids[2] as i32]);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_filtered_permutation_full_iteration() {
|
||||||
|
use crate::dataloader::permutation::builder::PermutationBuilder;
|
||||||
|
|
||||||
|
// Create a base table with 10000 rows where idx goes 0..10000.
|
||||||
|
// Filter to even values only, giving 5000 rows in the permutation.
|
||||||
|
let base_table = lance_datagen::gen_batch()
|
||||||
|
.col("idx", lance_datagen::array::step::<Int32Type>())
|
||||||
|
.into_mem_table("tbl", RowCount::from(10000), BatchCount::from(1))
|
||||||
|
.await;
|
||||||
|
|
||||||
|
let permutation_table = PermutationBuilder::new(base_table.clone())
|
||||||
|
.with_filter("idx % 2 = 0".to_string())
|
||||||
|
.build()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
assert_eq!(permutation_table.count_rows(None).await.unwrap(), 5000);
|
||||||
|
|
||||||
|
let reader = PermutationReader::try_from_tables(
|
||||||
|
base_table.base_table().clone(),
|
||||||
|
permutation_table.base_table().clone(),
|
||||||
|
0,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
assert_eq!(reader.count_rows(), 5000);
|
||||||
|
|
||||||
|
// Iterate through all batches using a batch size that doesn't evenly divide
|
||||||
|
// the row count (5000 / 128 = 39 full batches + 1 batch of 8 rows).
|
||||||
|
let batch_size = 128;
|
||||||
|
let mut stream = reader
|
||||||
|
.read(
|
||||||
|
Select::All,
|
||||||
|
QueryExecutionOptions {
|
||||||
|
max_batch_length: batch_size,
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let mut total_rows = 0u64;
|
||||||
|
let mut all_idx_values = Vec::new();
|
||||||
|
while let Some(batch) = stream.try_next().await.unwrap() {
|
||||||
|
assert!(batch.num_rows() <= batch_size as usize);
|
||||||
|
total_rows += batch.num_rows() as u64;
|
||||||
|
let idx_col = batch.column(0).as_primitive::<Int32Type>().values();
|
||||||
|
all_idx_values.extend(idx_col.iter().copied());
|
||||||
|
}
|
||||||
|
|
||||||
|
assert_eq!(total_rows, 5000);
|
||||||
|
assert_eq!(all_idx_values.len(), 5000);
|
||||||
|
|
||||||
|
// Every value should be even (from the filter)
|
||||||
|
assert!(all_idx_values.iter().all(|v| v % 2 == 0));
|
||||||
|
|
||||||
|
// Should have 5000 unique values
|
||||||
|
let unique: std::collections::HashSet<i32> = all_idx_values.iter().copied().collect();
|
||||||
|
assert_eq!(unique.len(), 5000);
|
||||||
|
|
||||||
|
// Use take_offsets to fetch rows from the beginning, middle, and end
|
||||||
|
// of the permutation. The values should match what we saw during iteration.
|
||||||
|
|
||||||
|
// Beginning
|
||||||
|
let batch = reader.take_offsets(&[0, 1, 2], Select::All).await.unwrap();
|
||||||
|
assert_eq!(batch.num_rows(), 3);
|
||||||
|
let idx_values = batch
|
||||||
|
.column(0)
|
||||||
|
.as_primitive::<Int32Type>()
|
||||||
|
.values()
|
||||||
|
.to_vec();
|
||||||
|
assert_eq!(idx_values, &all_idx_values[0..3]);
|
||||||
|
|
||||||
|
// Middle
|
||||||
|
let batch = reader
|
||||||
|
.take_offsets(&[2499, 2500, 2501], Select::All)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(batch.num_rows(), 3);
|
||||||
|
let idx_values = batch
|
||||||
|
.column(0)
|
||||||
|
.as_primitive::<Int32Type>()
|
||||||
|
.values()
|
||||||
|
.to_vec();
|
||||||
|
assert_eq!(idx_values, &all_idx_values[2499..2502]);
|
||||||
|
|
||||||
|
// End (last 3 rows)
|
||||||
|
let batch = reader
|
||||||
|
.take_offsets(&[4997, 4998, 4999], Select::All)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(batch.num_rows(), 3);
|
||||||
|
let idx_values = batch
|
||||||
|
.column(0)
|
||||||
|
.as_primitive::<Int32Type>()
|
||||||
|
.values()
|
||||||
|
.to_vec();
|
||||||
|
assert_eq!(idx_values, &all_idx_values[4997..5000]);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -27,9 +27,10 @@ use crate::{
|
|||||||
pub const SPLIT_ID_COLUMN: &str = "split_id";
|
pub const SPLIT_ID_COLUMN: &str = "split_id";
|
||||||
|
|
||||||
/// Strategy for assigning rows to splits
|
/// Strategy for assigning rows to splits
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone, Default)]
|
||||||
pub enum SplitStrategy {
|
pub enum SplitStrategy {
|
||||||
/// All rows will have split id 0
|
/// All rows will have split id 0
|
||||||
|
#[default]
|
||||||
NoSplit,
|
NoSplit,
|
||||||
/// Rows will be randomly assigned to splits
|
/// Rows will be randomly assigned to splits
|
||||||
///
|
///
|
||||||
@@ -73,15 +74,6 @@ pub enum SplitStrategy {
|
|||||||
Calculated { calculation: String },
|
Calculated { calculation: String },
|
||||||
}
|
}
|
||||||
|
|
||||||
// The default is not to split the data
|
|
||||||
//
|
|
||||||
// All data will be assigned to a single split.
|
|
||||||
impl Default for SplitStrategy {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self::NoSplit
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SplitStrategy {
|
impl SplitStrategy {
|
||||||
pub fn validate(&self, num_rows: u64) -> Result<()> {
|
pub fn validate(&self, num_rows: u64) -> Result<()> {
|
||||||
match self {
|
match self {
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ use std::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
use arrow_array::{Array, RecordBatch, RecordBatchReader};
|
use arrow_array::{Array, RecordBatch, RecordBatchReader};
|
||||||
use arrow_schema::{DataType, Field, SchemaBuilder};
|
use arrow_schema::{DataType, Field, SchemaBuilder, SchemaRef};
|
||||||
// use async_trait::async_trait;
|
// use async_trait::async_trait;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
@@ -190,6 +190,112 @@ impl<R: RecordBatchReader> WithEmbeddings<R> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Compute embedding arrays for a batch.
|
||||||
|
///
|
||||||
|
/// When multiple embedding functions are defined, they are computed in parallel using
|
||||||
|
/// scoped threads. For a single embedding function, computation is done inline.
|
||||||
|
fn compute_embedding_arrays(
|
||||||
|
batch: &RecordBatch,
|
||||||
|
embeddings: &[(EmbeddingDefinition, Arc<dyn EmbeddingFunction>)],
|
||||||
|
) -> Result<Vec<Arc<dyn Array>>> {
|
||||||
|
if embeddings.len() == 1 {
|
||||||
|
let (fld, func) = &embeddings[0];
|
||||||
|
let src_column =
|
||||||
|
batch
|
||||||
|
.column_by_name(&fld.source_column)
|
||||||
|
.ok_or_else(|| Error::InvalidInput {
|
||||||
|
message: format!("Source column '{}' not found", fld.source_column),
|
||||||
|
})?;
|
||||||
|
return Ok(vec![func.compute_source_embeddings(src_column.clone())?]);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parallel path: multiple embeddings
|
||||||
|
std::thread::scope(|s| {
|
||||||
|
let handles: Vec<_> = embeddings
|
||||||
|
.iter()
|
||||||
|
.map(|(fld, func)| {
|
||||||
|
let src_column = batch.column_by_name(&fld.source_column).ok_or_else(|| {
|
||||||
|
Error::InvalidInput {
|
||||||
|
message: format!("Source column '{}' not found", fld.source_column),
|
||||||
|
}
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let handle = s.spawn(move || func.compute_source_embeddings(src_column.clone()));
|
||||||
|
|
||||||
|
Ok(handle)
|
||||||
|
})
|
||||||
|
.collect::<Result<_>>()?;
|
||||||
|
|
||||||
|
handles
|
||||||
|
.into_iter()
|
||||||
|
.map(|h| {
|
||||||
|
h.join().map_err(|e| Error::Runtime {
|
||||||
|
message: format!("Thread panicked during embedding computation: {:?}", e),
|
||||||
|
})?
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Compute the output schema when embeddings are applied to a base schema.
|
||||||
|
///
|
||||||
|
/// This returns the schema with embedding columns appended.
|
||||||
|
pub fn compute_output_schema(
|
||||||
|
base_schema: &SchemaRef,
|
||||||
|
embeddings: &[(EmbeddingDefinition, Arc<dyn EmbeddingFunction>)],
|
||||||
|
) -> Result<SchemaRef> {
|
||||||
|
let mut sb: SchemaBuilder = base_schema.as_ref().into();
|
||||||
|
|
||||||
|
for (ed, func) in embeddings {
|
||||||
|
let src_field = base_schema
|
||||||
|
.field_with_name(&ed.source_column)
|
||||||
|
.map_err(|_| Error::InvalidInput {
|
||||||
|
message: format!("Source column '{}' not found in schema", ed.source_column),
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let field_name = ed
|
||||||
|
.dest_column
|
||||||
|
.clone()
|
||||||
|
.unwrap_or_else(|| format!("{}_embedding", &ed.source_column));
|
||||||
|
|
||||||
|
sb.push(Field::new(
|
||||||
|
field_name,
|
||||||
|
func.dest_type()?.into_owned(),
|
||||||
|
src_field.is_nullable(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Arc::new(sb.finish()))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Compute embeddings for a batch and append as new columns.
|
||||||
|
///
|
||||||
|
/// This function computes embeddings using the provided embedding functions and
|
||||||
|
/// appends them as new columns to the batch.
|
||||||
|
pub fn compute_embeddings_for_batch(
|
||||||
|
batch: RecordBatch,
|
||||||
|
embeddings: &[(EmbeddingDefinition, Arc<dyn EmbeddingFunction>)],
|
||||||
|
) -> Result<RecordBatch> {
|
||||||
|
let embedding_arrays = compute_embedding_arrays(&batch, embeddings)?;
|
||||||
|
|
||||||
|
let mut result = batch;
|
||||||
|
for ((fld, _), embedding) in embeddings.iter().zip(embedding_arrays.iter()) {
|
||||||
|
let dst_field_name = fld
|
||||||
|
.dest_column
|
||||||
|
.clone()
|
||||||
|
.unwrap_or_else(|| format!("{}_embedding", &fld.source_column));
|
||||||
|
|
||||||
|
let dst_field = Field::new(
|
||||||
|
dst_field_name,
|
||||||
|
embedding.data_type().clone(),
|
||||||
|
embedding.nulls().is_some(),
|
||||||
|
);
|
||||||
|
|
||||||
|
result = result.try_with_column(dst_field, embedding.clone())?;
|
||||||
|
}
|
||||||
|
Ok(result)
|
||||||
|
}
|
||||||
|
|
||||||
impl<R: RecordBatchReader> WithEmbeddings<R> {
|
impl<R: RecordBatchReader> WithEmbeddings<R> {
|
||||||
fn dest_fields(&self) -> Result<Vec<Field>> {
|
fn dest_fields(&self) -> Result<Vec<Field>> {
|
||||||
let schema = self.inner.schema();
|
let schema = self.inner.schema();
|
||||||
@@ -240,48 +346,6 @@ impl<R: RecordBatchReader> WithEmbeddings<R> {
|
|||||||
column_definitions,
|
column_definitions,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn compute_embeddings_parallel(&self, batch: &RecordBatch) -> Result<Vec<Arc<dyn Array>>> {
|
|
||||||
if self.embeddings.len() == 1 {
|
|
||||||
let (fld, func) = &self.embeddings[0];
|
|
||||||
let src_column =
|
|
||||||
batch
|
|
||||||
.column_by_name(&fld.source_column)
|
|
||||||
.ok_or_else(|| Error::InvalidInput {
|
|
||||||
message: format!("Source column '{}' not found", fld.source_column),
|
|
||||||
})?;
|
|
||||||
return Ok(vec![func.compute_source_embeddings(src_column.clone())?]);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parallel path: multiple embeddings
|
|
||||||
std::thread::scope(|s| {
|
|
||||||
let handles: Vec<_> = self
|
|
||||||
.embeddings
|
|
||||||
.iter()
|
|
||||||
.map(|(fld, func)| {
|
|
||||||
let src_column = batch.column_by_name(&fld.source_column).ok_or_else(|| {
|
|
||||||
Error::InvalidInput {
|
|
||||||
message: format!("Source column '{}' not found", fld.source_column),
|
|
||||||
}
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let handle =
|
|
||||||
s.spawn(move || func.compute_source_embeddings(src_column.clone()));
|
|
||||||
|
|
||||||
Ok(handle)
|
|
||||||
})
|
|
||||||
.collect::<Result<_>>()?;
|
|
||||||
|
|
||||||
handles
|
|
||||||
.into_iter()
|
|
||||||
.map(|h| {
|
|
||||||
h.join().map_err(|e| Error::Runtime {
|
|
||||||
message: format!("Thread panicked during embedding computation: {:?}", e),
|
|
||||||
})?
|
|
||||||
})
|
|
||||||
.collect()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<R: RecordBatchReader> Iterator for MaybeEmbedded<R> {
|
impl<R: RecordBatchReader> Iterator for MaybeEmbedded<R> {
|
||||||
@@ -309,37 +373,13 @@ impl<R: RecordBatchReader> Iterator for WithEmbeddings<R> {
|
|||||||
fn next(&mut self) -> Option<Self::Item> {
|
fn next(&mut self) -> Option<Self::Item> {
|
||||||
let batch = self.inner.next()?;
|
let batch = self.inner.next()?;
|
||||||
match batch {
|
match batch {
|
||||||
Ok(batch) => {
|
Ok(batch) => match compute_embeddings_for_batch(batch, &self.embeddings) {
|
||||||
let embeddings = match self.compute_embeddings_parallel(&batch) {
|
Ok(batch_with_embeddings) => Some(Ok(batch_with_embeddings)),
|
||||||
Ok(emb) => emb,
|
Err(e) => Some(Err(arrow_schema::ArrowError::ComputeError(format!(
|
||||||
Err(e) => {
|
"Error computing embedding: {}",
|
||||||
return Some(Err(arrow_schema::ArrowError::ComputeError(format!(
|
e
|
||||||
"Error computing embedding: {}",
|
)))),
|
||||||
e
|
},
|
||||||
))))
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut batch = batch;
|
|
||||||
for ((fld, _), embedding) in self.embeddings.iter().zip(embeddings.iter()) {
|
|
||||||
let dst_field_name = fld
|
|
||||||
.dest_column
|
|
||||||
.clone()
|
|
||||||
.unwrap_or_else(|| format!("{}_embedding", &fld.source_column));
|
|
||||||
|
|
||||||
let dst_field = Field::new(
|
|
||||||
dst_field_name,
|
|
||||||
embedding.data_type().clone(),
|
|
||||||
embedding.nulls().is_some(),
|
|
||||||
);
|
|
||||||
|
|
||||||
match batch.try_with_column(dst_field.clone(), embedding.clone()) {
|
|
||||||
Ok(b) => batch = b,
|
|
||||||
Err(e) => return Some(Err(e)),
|
|
||||||
};
|
|
||||||
}
|
|
||||||
Some(Ok(batch))
|
|
||||||
}
|
|
||||||
Err(e) => Some(Err(e)),
|
Err(e) => Some(Err(e)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,9 +4,10 @@
|
|||||||
use std::sync::PoisonError;
|
use std::sync::PoisonError;
|
||||||
|
|
||||||
use arrow_schema::ArrowError;
|
use arrow_schema::ArrowError;
|
||||||
|
use datafusion_common::DataFusionError;
|
||||||
use snafu::Snafu;
|
use snafu::Snafu;
|
||||||
|
|
||||||
type BoxError = Box<dyn std::error::Error + Send + Sync>;
|
pub(crate) type BoxError = Box<dyn std::error::Error + Send + Sync>;
|
||||||
|
|
||||||
#[derive(Debug, Snafu)]
|
#[derive(Debug, Snafu)]
|
||||||
#[snafu(visibility(pub(crate)))]
|
#[snafu(visibility(pub(crate)))]
|
||||||
@@ -80,6 +81,9 @@ pub enum Error {
|
|||||||
Arrow { source: ArrowError },
|
Arrow { source: ArrowError },
|
||||||
#[snafu(display("LanceDBError: not supported: {message}"))]
|
#[snafu(display("LanceDBError: not supported: {message}"))]
|
||||||
NotSupported { message: String },
|
NotSupported { message: String },
|
||||||
|
/// External error pass through from user code.
|
||||||
|
#[snafu(transparent)]
|
||||||
|
External { source: BoxError },
|
||||||
#[snafu(whatever, display("{message}"))]
|
#[snafu(whatever, display("{message}"))]
|
||||||
Other {
|
Other {
|
||||||
message: String,
|
message: String,
|
||||||
@@ -92,15 +96,72 @@ pub type Result<T> = std::result::Result<T, Error>;
|
|||||||
|
|
||||||
impl From<ArrowError> for Error {
|
impl From<ArrowError> for Error {
|
||||||
fn from(source: ArrowError) -> Self {
|
fn from(source: ArrowError) -> Self {
|
||||||
Self::Arrow { source }
|
match source {
|
||||||
|
ArrowError::ExternalError(source) => Self::from_box_error(source),
|
||||||
|
_ => Self::Arrow { source },
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<DataFusionError> for Error {
|
||||||
|
fn from(source: DataFusionError) -> Self {
|
||||||
|
match source {
|
||||||
|
DataFusionError::ArrowError(source, _) => (*source).into(),
|
||||||
|
DataFusionError::External(source) => Self::from_box_error(source),
|
||||||
|
other => Self::External {
|
||||||
|
source: Box::new(other),
|
||||||
|
},
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<lance::Error> for Error {
|
impl From<lance::Error> for Error {
|
||||||
fn from(source: lance::Error) -> Self {
|
fn from(source: lance::Error) -> Self {
|
||||||
// TODO: Once Lance is changed to preserve ObjectStore, DataFusion, and Arrow errors, we can
|
// Try to unwrap external errors that were wrapped by lance
|
||||||
// pass those variants through here as well.
|
match source {
|
||||||
Self::Lance { source }
|
lance::Error::Wrapped { error, .. } => Self::from_box_error(error),
|
||||||
|
lance::Error::External { source } => Self::from_box_error(source),
|
||||||
|
_ => Self::Lance { source },
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Error {
|
||||||
|
fn from_box_error(mut source: Box<dyn std::error::Error + Send + Sync>) -> Self {
|
||||||
|
source = match source.downcast::<Self>() {
|
||||||
|
Ok(e) => match *e {
|
||||||
|
Self::External { source } => return Self::from_box_error(source),
|
||||||
|
other => return other,
|
||||||
|
},
|
||||||
|
Err(source) => source,
|
||||||
|
};
|
||||||
|
|
||||||
|
source = match source.downcast::<lance::Error>() {
|
||||||
|
Ok(e) => match *e {
|
||||||
|
lance::Error::Wrapped { error, .. } => return Self::from_box_error(error),
|
||||||
|
other => return other.into(),
|
||||||
|
},
|
||||||
|
Err(source) => source,
|
||||||
|
};
|
||||||
|
|
||||||
|
source = match source.downcast::<ArrowError>() {
|
||||||
|
Ok(e) => match *e {
|
||||||
|
ArrowError::ExternalError(source) => return Self::from_box_error(source),
|
||||||
|
other => return other.into(),
|
||||||
|
},
|
||||||
|
Err(source) => source,
|
||||||
|
};
|
||||||
|
|
||||||
|
source = match source.downcast::<DataFusionError>() {
|
||||||
|
Ok(e) => match *e {
|
||||||
|
DataFusionError::ArrowError(source, _) => return (*source).into(),
|
||||||
|
DataFusionError::External(source) => return Self::from_box_error(source),
|
||||||
|
other => return other.into(),
|
||||||
|
},
|
||||||
|
Err(source) => source,
|
||||||
|
};
|
||||||
|
|
||||||
|
Self::External { source }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
131
rust/lancedb/src/expr.rs
Normal file
131
rust/lancedb/src/expr.rs
Normal file
@@ -0,0 +1,131 @@
|
|||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||||
|
|
||||||
|
//! Expression builder API for type-safe query construction
|
||||||
|
//!
|
||||||
|
//! This module provides a fluent API for building expressions that can be used
|
||||||
|
//! in filters and projections. It wraps DataFusion's expression system.
|
||||||
|
//!
|
||||||
|
//! # Examples
|
||||||
|
//!
|
||||||
|
//! ```rust
|
||||||
|
//! use std::ops::Mul;
|
||||||
|
//! use lancedb::expr::{col, lit};
|
||||||
|
//!
|
||||||
|
//! let expr = col("age").gt(lit(18));
|
||||||
|
//! let expr = col("age").gt(lit(18)).and(col("status").eq(lit("active")));
|
||||||
|
//! let expr = col("price") * lit(1.1);
|
||||||
|
//! ```
|
||||||
|
|
||||||
|
mod sql;
|
||||||
|
|
||||||
|
pub use sql::expr_to_sql_string;
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use arrow_schema::DataType;
|
||||||
|
use datafusion_expr::{expr_fn::cast, Expr, ScalarUDF};
|
||||||
|
use datafusion_functions::string::expr_fn as string_expr_fn;
|
||||||
|
|
||||||
|
pub use datafusion_expr::{col, lit};
|
||||||
|
|
||||||
|
pub use datafusion_expr::Expr as DfExpr;
|
||||||
|
|
||||||
|
pub fn lower(expr: Expr) -> Expr {
|
||||||
|
string_expr_fn::lower(expr)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn upper(expr: Expr) -> Expr {
|
||||||
|
string_expr_fn::upper(expr)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn contains(expr: Expr, search: Expr) -> Expr {
|
||||||
|
string_expr_fn::contains(expr, search)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn expr_cast(expr: Expr, data_type: DataType) -> Expr {
|
||||||
|
cast(expr, data_type)
|
||||||
|
}
|
||||||
|
|
||||||
|
lazy_static::lazy_static! {
|
||||||
|
static ref FUNC_REGISTRY: std::sync::RwLock<std::collections::HashMap<String, Arc<ScalarUDF>>> = {
|
||||||
|
let mut m = std::collections::HashMap::new();
|
||||||
|
m.insert("lower".to_string(), datafusion_functions::string::lower());
|
||||||
|
m.insert("upper".to_string(), datafusion_functions::string::upper());
|
||||||
|
m.insert("contains".to_string(), datafusion_functions::string::contains());
|
||||||
|
m.insert("btrim".to_string(), datafusion_functions::string::btrim());
|
||||||
|
m.insert("ltrim".to_string(), datafusion_functions::string::ltrim());
|
||||||
|
m.insert("rtrim".to_string(), datafusion_functions::string::rtrim());
|
||||||
|
m.insert("concat".to_string(), datafusion_functions::string::concat());
|
||||||
|
m.insert("octet_length".to_string(), datafusion_functions::string::octet_length());
|
||||||
|
std::sync::RwLock::new(m)
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn func(name: impl AsRef<str>, args: Vec<Expr>) -> crate::Result<Expr> {
|
||||||
|
let name = name.as_ref();
|
||||||
|
let registry = FUNC_REGISTRY
|
||||||
|
.read()
|
||||||
|
.map_err(|e| crate::Error::InvalidInput {
|
||||||
|
message: format!("lock poisoned: {}", e),
|
||||||
|
})?;
|
||||||
|
let udf = registry
|
||||||
|
.get(name)
|
||||||
|
.ok_or_else(|| crate::Error::InvalidInput {
|
||||||
|
message: format!("unknown function: {}", name),
|
||||||
|
})?;
|
||||||
|
Ok(Expr::ScalarFunction(
|
||||||
|
datafusion_expr::expr::ScalarFunction::new_udf(udf.clone(), args),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_col_lit_comparisons() {
|
||||||
|
let expr = col("age").gt(lit(18));
|
||||||
|
let sql = expr_to_sql_string(&expr).unwrap();
|
||||||
|
assert!(sql.contains("age") && sql.contains("18"));
|
||||||
|
|
||||||
|
let expr = col("name").eq(lit("Alice"));
|
||||||
|
let sql = expr_to_sql_string(&expr).unwrap();
|
||||||
|
assert!(sql.contains("name") && sql.contains("Alice"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_compound_expression() {
|
||||||
|
let expr = col("age").gt(lit(18)).and(col("status").eq(lit("active")));
|
||||||
|
let sql = expr_to_sql_string(&expr).unwrap();
|
||||||
|
assert!(sql.contains("age") && sql.contains("status"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_string_functions() {
|
||||||
|
let expr = lower(col("name"));
|
||||||
|
let sql = expr_to_sql_string(&expr).unwrap();
|
||||||
|
assert!(sql.to_lowercase().contains("lower"));
|
||||||
|
|
||||||
|
let expr = contains(col("text"), lit("search"));
|
||||||
|
let sql = expr_to_sql_string(&expr).unwrap();
|
||||||
|
assert!(sql.to_lowercase().contains("contains"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_func() {
|
||||||
|
let expr = func("lower", vec![col("x")]).unwrap();
|
||||||
|
let sql = expr_to_sql_string(&expr).unwrap();
|
||||||
|
assert!(sql.to_lowercase().contains("lower"));
|
||||||
|
|
||||||
|
let result = func("unknown_func", vec![col("x")]);
|
||||||
|
assert!(result.is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_arithmetic() {
|
||||||
|
let expr = col("price") * lit(1.1);
|
||||||
|
let sql = expr_to_sql_string(&expr).unwrap();
|
||||||
|
assert!(sql.contains("price"));
|
||||||
|
}
|
||||||
|
}
|
||||||
12
rust/lancedb/src/expr/sql.rs
Normal file
12
rust/lancedb/src/expr/sql.rs
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||||
|
|
||||||
|
use datafusion_expr::Expr;
|
||||||
|
use datafusion_sql::unparser;
|
||||||
|
|
||||||
|
pub fn expr_to_sql_string(expr: &Expr) -> crate::Result<String> {
|
||||||
|
let ast = unparser::expr_to_sql(expr).map_err(|e| crate::Error::InvalidInput {
|
||||||
|
message: format!("failed to serialize expression to SQL: {}", e),
|
||||||
|
})?;
|
||||||
|
Ok(ast.to_string())
|
||||||
|
}
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user