Compare commits

..

1 Commits

Author SHA1 Message Date
Wyatt Alt
3c8f9e4c9c feat: support multi-tenant azure deployments in the remote client
This forwards an x-azure-tenant-id header to the remote server when
supplied in storage_options. This makes it possible to have a server
deployment that spans multiple Azure AD tenants.
2026-02-13 14:13:31 -08:00
82 changed files with 3355 additions and 6190 deletions

View File

@@ -1,5 +1,5 @@
[tool.bumpversion]
current_version = "0.27.0-beta.0"
current_version = "0.26.2"
parse = """(?x)
(?P<major>0|[1-9]\\d*)\\.
(?P<minor>0|[1-9]\\d*)\\.

View File

@@ -1,173 +0,0 @@
name: Codex Fix CI
on:
workflow_dispatch:
inputs:
workflow_run_url:
description: "Failing CI workflow run URL (e.g., https://github.com/lancedb/lancedb/actions/runs/12345678)"
required: true
type: string
branch:
description: "Branch to fix (e.g., main, release/v2.0, or feature-branch)"
required: true
type: string
guidelines:
description: "Additional guidelines for the fix (optional)"
required: false
type: string
permissions:
contents: write
pull-requests: write
actions: read
jobs:
fix-ci:
runs-on: warp-ubuntu-latest-x64-4x
timeout-minutes: 60
env:
CC: clang
CXX: clang++
steps:
- name: Show inputs
run: |
echo "workflow_run_url = ${{ inputs.workflow_run_url }}"
echo "branch = ${{ inputs.branch }}"
echo "guidelines = ${{ inputs.guidelines }}"
- name: Checkout Repo
uses: actions/checkout@v4
with:
ref: ${{ inputs.branch }}
fetch-depth: 0
persist-credentials: true
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: 20
- name: Install Codex CLI
run: npm install -g @openai/codex
- name: Install Rust toolchain
uses: dtolnay/rust-toolchain@stable
with:
toolchain: stable
components: clippy, rustfmt
- uses: Swatinem/rust-cache@v2
- name: Install system dependencies
run: |
sudo apt-get update
sudo apt-get install -y protobuf-compiler libssl-dev
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: Install Python dependencies
run: |
pip install maturin ruff pytest pyarrow pandas polars
- name: Set up Java
uses: actions/setup-java@v4
with:
distribution: temurin
java-version: '11'
cache: maven
- name: Install Node.js dependencies for TypeScript bindings
run: |
cd nodejs
npm ci
- name: Configure git user
run: |
git config user.name "lancedb automation"
git config user.email "robot@lancedb.com"
- name: Run Codex to fix CI failure
env:
WORKFLOW_RUN_URL: ${{ inputs.workflow_run_url }}
BRANCH: ${{ inputs.branch }}
GUIDELINES: ${{ inputs.guidelines }}
GITHUB_TOKEN: ${{ secrets.ROBOT_TOKEN }}
GH_TOKEN: ${{ secrets.ROBOT_TOKEN }}
OPENAI_API_KEY: ${{ secrets.CODEX_TOKEN }}
run: |
set -euo pipefail
cat <<EOF >/tmp/codex-prompt.txt
You are running inside the lancedb repository on a GitHub Actions runner. Your task is to fix a CI failure.
Input parameters:
- Failing workflow run URL: ${WORKFLOW_RUN_URL}
- Branch to fix: ${BRANCH}
- Additional guidelines: ${GUIDELINES:-"None provided"}
Follow these steps exactly:
1. Extract the run ID from the workflow URL. The URL format is https://github.com/lancedb/lancedb/actions/runs/<run_id>.
2. Use "gh run view <run_id> --json jobs,conclusion,name" to get information about the failed run.
3. Identify which jobs failed. For each failed job, use "gh run view <run_id> --job <job_id> --log-failed" to get the failure logs.
4. Analyze the failure logs to understand what went wrong. Common failures include:
- Compilation errors
- Test failures
- Clippy warnings treated as errors
- Formatting issues
- Dependency issues
5. Based on the analysis, fix the issues in the codebase:
- For compilation errors: Fix the code that doesn't compile
- For test failures: Fix the failing tests or the code they test
- For clippy warnings: Apply the suggested fixes
- For formatting issues: Run "cargo fmt --all"
- For other issues: Apply appropriate fixes
6. After making fixes, verify them locally:
- Run "cargo fmt --all" to ensure formatting is correct
- Run "cargo clippy --workspace --tests --all-features -- -D warnings" to check for issues
- Run ONLY the specific failing tests to confirm they pass now:
- For Rust test failures: Run the specific test with "cargo test -p <crate> <test_name>"
- For Python test failures: Build with "cd python && maturin develop" then run "pytest <specific_test_file>::<test_name>"
- For Java test failures: Run "cd java && mvn test -Dtest=<TestClass>#<testMethod>"
- For TypeScript test failures: Run "cd nodejs && npm run build && npm test -- --testNamePattern='<test_name>'"
- Do NOT run the full test suite - only run the tests that were failing
7. If the additional guidelines are provided, follow them as well.
8. Inspect "git status --short" and "git diff" to review your changes.
9. Create a fix branch: "git checkout -b codex/fix-ci-<run_id>".
10. Stage all changes with "git add -A" and commit with message "fix: resolve CI failures from run <run_id>".
11. Push the branch: "git push origin codex/fix-ci-<run_id>". If the remote branch exists, delete it first with "gh api -X DELETE repos/lancedb/lancedb/git/refs/heads/codex/fix-ci-<run_id>" then push. Do NOT use "git push --force" or "git push -f".
12. Create a pull request targeting "${BRANCH}":
- Title: "ci: <short summary describing the fix>" (e.g., "ci: fix clippy warnings in lancedb" or "ci: resolve test flakiness in vector search")
- First, write the PR body to /tmp/pr-body.md using a heredoc (cat <<'PREOF' > /tmp/pr-body.md). The body should include:
- Link to the failing workflow run
- Summary of what failed
- Description of the fixes applied
- Then run "gh pr create --base ${BRANCH} --body-file /tmp/pr-body.md".
13. Display the new PR URL, "git status --short", and a summary of what was fixed.
Constraints:
- Use bash commands for all operations.
- Do not merge the PR.
- Do not modify GitHub workflow files unless they are the cause of the failure.
- If any command fails, diagnose and attempt to fix the issue instead of aborting immediately.
- If you cannot fix the issue automatically, create the PR anyway with a clear explanation of what you tried and what remains to be fixed.
- env "GH_TOKEN" is available, use "gh" tools for GitHub-related operations.
EOF
printenv OPENAI_API_KEY | codex login --with-api-key
codex --config shell_environment_policy.ignore_default_excludes=true exec --dangerously-bypass-approvals-and-sandbox "$(cat /tmp/codex-prompt.txt)"

View File

@@ -8,7 +8,6 @@ on:
paths:
- Cargo.toml
- nodejs/**
- rust/**
- docs/src/js/**
- .github/workflows/nodejs.yml
- docker-compose.yml

View File

@@ -8,7 +8,6 @@ on:
paths:
- Cargo.toml
- python/**
- rust/**
- .github/workflows/python.yml
concurrency:

View File

@@ -183,7 +183,7 @@ jobs:
runs-on: ubuntu-24.04
strategy:
matrix:
msrv: ["1.91.0"] # This should match up with rust-version in Cargo.toml
msrv: ["1.88.0"] # This should match up with rust-version in Cargo.toml
env:
# Need up-to-date compilers for kernels
CC: clang-18

123
Cargo.lock generated
View File

@@ -1389,9 +1389,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b"
[[package]]
name = "bytes"
version = "1.11.1"
version = "1.10.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e748733b7cbc798e1434b6ac524f0c1ff2ab456fe201501e6497c8417a4fc33"
checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a"
[[package]]
name = "bytes-utils"
@@ -1783,16 +1783,6 @@ dependencies = [
"crossbeam-utils",
]
[[package]]
name = "crossbeam-skiplist"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "df29de440c58ca2cc6e587ec3d22347551a32435fbde9d2bff64e78a9ffa151b"
dependencies = [
"crossbeam-epoch",
"crossbeam-utils",
]
[[package]]
name = "crossbeam-utils"
version = "0.8.21"
@@ -3082,8 +3072,9 @@ checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c"
[[package]]
name = "fsst"
version = "3.0.0-beta.5"
source = "git+https://github.com/lance-format/lance.git?tag=v3.0.0-beta.5#c69274bd83da9930157d5e2ceeb101af13a916a3"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0f03a771ab914e207dd26bd2f12666839555ec8ecc7e1770e1ed6f9900d899a4"
dependencies = [
"arrow-array",
"rand 0.9.2",
@@ -4414,8 +4405,9 @@ dependencies = [
[[package]]
name = "lance"
version = "3.0.0-beta.5"
source = "git+https://github.com/lance-format/lance.git?tag=v3.0.0-beta.5#c69274bd83da9930157d5e2ceeb101af13a916a3"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "47b685aca3f97ee02997c83ded16f59c747ccb69e74c8abbbae4aa3d22cf1301"
dependencies = [
"arrow",
"arrow-arith",
@@ -4434,7 +4426,6 @@ dependencies = [
"byteorder",
"bytes",
"chrono",
"crossbeam-skiplist",
"dashmap",
"datafusion",
"datafusion-expr",
@@ -4474,7 +4465,6 @@ dependencies = [
"tantivy",
"tokio",
"tokio-stream",
"tokio-util",
"tracing",
"url",
"uuid",
@@ -4482,8 +4472,9 @@ dependencies = [
[[package]]
name = "lance-arrow"
version = "3.0.0-beta.5"
source = "git+https://github.com/lance-format/lance.git?tag=v3.0.0-beta.5#c69274bd83da9930157d5e2ceeb101af13a916a3"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "daf00c7537df524cc518a089f0d156a036d95ca3f5bc2bc1f0a9f9293e9b62ef"
dependencies = [
"arrow-array",
"arrow-buffer",
@@ -4502,8 +4493,9 @@ dependencies = [
[[package]]
name = "lance-bitpacking"
version = "3.0.0-beta.5"
source = "git+https://github.com/lance-format/lance.git?tag=v3.0.0-beta.5#c69274bd83da9930157d5e2ceeb101af13a916a3"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "46752e4ac8fc5590a445e780b63a8800adc7a770bd74770a8dc66963778e4e77"
dependencies = [
"arrayref",
"paste",
@@ -4512,8 +4504,9 @@ dependencies = [
[[package]]
name = "lance-core"
version = "3.0.0-beta.5"
source = "git+https://github.com/lance-format/lance.git?tag=v3.0.0-beta.5#c69274bd83da9930157d5e2ceeb101af13a916a3"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3d13d87d07305c6d4b4dc7780fb1107babf782a0e5b1dc7872e17ae1f8fd11ca"
dependencies = [
"arrow-array",
"arrow-buffer",
@@ -4550,8 +4543,9 @@ dependencies = [
[[package]]
name = "lance-datafusion"
version = "3.0.0-beta.5"
source = "git+https://github.com/lance-format/lance.git?tag=v3.0.0-beta.5#c69274bd83da9930157d5e2ceeb101af13a916a3"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6451b5af876eaef8bec4b38a39dadac9d44621e1ecf85d0cdf6097a5d0aa8721"
dependencies = [
"arrow",
"arrow-array",
@@ -4574,7 +4568,6 @@ dependencies = [
"log",
"pin-project",
"prost",
"prost-build",
"snafu",
"tokio",
"tracing",
@@ -4582,8 +4575,9 @@ dependencies = [
[[package]]
name = "lance-datagen"
version = "3.0.0-beta.5"
source = "git+https://github.com/lance-format/lance.git?tag=v3.0.0-beta.5#c69274bd83da9930157d5e2ceeb101af13a916a3"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e1736708dd7867dfbab8fcc930b21c96717c6c00be73b7d9a240336a4ed80375"
dependencies = [
"arrow",
"arrow-array",
@@ -4601,8 +4595,9 @@ dependencies = [
[[package]]
name = "lance-encoding"
version = "3.0.0-beta.5"
source = "git+https://github.com/lance-format/lance.git?tag=v3.0.0-beta.5#c69274bd83da9930157d5e2ceeb101af13a916a3"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d6b6ca4ff94833240d5ba4a94a742cba786d1949b3c3fa7e11d6f0050443432a"
dependencies = [
"arrow-arith",
"arrow-array",
@@ -4639,8 +4634,9 @@ dependencies = [
[[package]]
name = "lance-file"
version = "3.0.0-beta.5"
source = "git+https://github.com/lance-format/lance.git?tag=v3.0.0-beta.5#c69274bd83da9930157d5e2ceeb101af13a916a3"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "55fbe959bffe185543aed3cbeb14484f1aa2e55886034fdb1ea3d8cc9b70aad8"
dependencies = [
"arrow-arith",
"arrow-array",
@@ -4672,8 +4668,9 @@ dependencies = [
[[package]]
name = "lance-geo"
version = "3.0.0-beta.5"
source = "git+https://github.com/lance-format/lance.git?tag=v3.0.0-beta.5#c69274bd83da9930157d5e2ceeb101af13a916a3"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a52b0adabc953d457f336a784a3b37353a180e6a79905f544949746e0d4c6483"
dependencies = [
"datafusion",
"geo-traits",
@@ -4687,8 +4684,9 @@ dependencies = [
[[package]]
name = "lance-index"
version = "3.0.0-beta.5"
source = "git+https://github.com/lance-format/lance.git?tag=v3.0.0-beta.5#c69274bd83da9930157d5e2ceeb101af13a916a3"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6b67654bf86fd942dd2cf08294ee7e91053427cd148225f49c9ff398ff9a40fd"
dependencies = [
"arrow",
"arrow-arith",
@@ -4755,8 +4753,9 @@ dependencies = [
[[package]]
name = "lance-io"
version = "3.0.0-beta.5"
source = "git+https://github.com/lance-format/lance.git?tag=v3.0.0-beta.5#c69274bd83da9930157d5e2ceeb101af13a916a3"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8eb0ccc1c414e31687d83992d546af0a0237c8d2f4bf2ae3d347d539fd0fc141"
dependencies = [
"arrow",
"arrow-arith",
@@ -4789,7 +4788,6 @@ dependencies = [
"serde",
"shellexpand",
"snafu",
"tempfile",
"tokio",
"tracing",
"url",
@@ -4797,8 +4795,9 @@ dependencies = [
[[package]]
name = "lance-linalg"
version = "3.0.0-beta.5"
source = "git+https://github.com/lance-format/lance.git?tag=v3.0.0-beta.5#c69274bd83da9930157d5e2ceeb101af13a916a3"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "083404cf12dcdb1a7df98fb58f9daf626b6e43a2f794b37b6b89b4012a0e1f78"
dependencies = [
"arrow-array",
"arrow-buffer",
@@ -4814,8 +4813,9 @@ dependencies = [
[[package]]
name = "lance-namespace"
version = "3.0.0-beta.5"
source = "git+https://github.com/lance-format/lance.git?tag=v3.0.0-beta.5#c69274bd83da9930157d5e2ceeb101af13a916a3"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c12778d2aabf9c2bfd16e2509ebe120e562a288d8ae630ec6b6b204868df41b2"
dependencies = [
"arrow",
"async-trait",
@@ -4827,8 +4827,9 @@ dependencies = [
[[package]]
name = "lance-namespace-impls"
version = "3.0.0-beta.5"
source = "git+https://github.com/lance-format/lance.git?tag=v3.0.0-beta.5#c69274bd83da9930157d5e2ceeb101af13a916a3"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8863aababdd13a6d2c8d6179dc6981f4f8f49d8b66a00c5dd75115aec4cadc99"
dependencies = [
"arrow",
"arrow-ipc",
@@ -4843,7 +4844,6 @@ dependencies = [
"lance-index",
"lance-io",
"lance-namespace",
"lance-table",
"log",
"object_store",
"rand 0.9.2",
@@ -4859,9 +4859,9 @@ dependencies = [
[[package]]
name = "lance-namespace-reqwest-client"
version = "0.5.2"
version = "0.4.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3ad4c947349acd6e37e984eba0254588bd894e6128434338b9e6904e56fb4633"
checksum = "a2acdba67f84190067532fce07b51a435dd390d7cdc1129a05003e5cb3274cf0"
dependencies = [
"reqwest",
"serde",
@@ -4872,8 +4872,9 @@ dependencies = [
[[package]]
name = "lance-table"
version = "3.0.0-beta.5"
source = "git+https://github.com/lance-format/lance.git?tag=v3.0.0-beta.5#c69274bd83da9930157d5e2ceeb101af13a916a3"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f0fcc83f197ce2000c4abe4f5e0873490ab1f41788fa76571c4209b87d4daf50"
dependencies = [
"arrow",
"arrow-array",
@@ -4912,8 +4913,9 @@ dependencies = [
[[package]]
name = "lance-testing"
version = "3.0.0-beta.5"
source = "git+https://github.com/lance-format/lance.git?tag=v3.0.0-beta.5#c69274bd83da9930157d5e2ceeb101af13a916a3"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7fb1f7c7e06f91360e141ecee1cf2110f858c231705f69f2cd2fda9e30c1e9f4"
dependencies = [
"arrow-array",
"arrow-schema",
@@ -4924,7 +4926,7 @@ dependencies = [
[[package]]
name = "lancedb"
version = "0.27.0-beta.0"
version = "0.26.2"
dependencies = [
"ahash",
"anyhow",
@@ -5004,7 +5006,7 @@ dependencies = [
[[package]]
name = "lancedb-nodejs"
version = "0.27.0-beta.0"
version = "0.26.2"
dependencies = [
"arrow-array",
"arrow-ipc",
@@ -5024,7 +5026,7 @@ dependencies = [
[[package]]
name = "lancedb-python"
version = "0.30.0-beta.0"
version = "0.29.2"
dependencies = [
"arrow",
"async-trait",
@@ -5626,10 +5628,11 @@ dependencies = [
[[package]]
name = "num-bigint-dig"
version = "0.8.6"
version = "0.8.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e661dda6640fad38e827a6d4a310ff4763082116fe217f279885c97f511bb0b7"
checksum = "dc84195820f291c7697304f3cbdadd1cb7199c0efc917ff5eafd71225c136151"
dependencies = [
"byteorder",
"lazy_static",
"libm",
"num-integer",
@@ -7271,9 +7274,9 @@ dependencies = [
[[package]]
name = "roaring"
version = "0.11.3"
version = "0.10.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8ba9ce64a8f45d7fc86358410bb1a82e8c987504c0d4900e9141d69a9f26c885"
checksum = "19e8d2cfa184d94d0726d650a9f4a1be7f9b76ac9fdb954219878dc00c1c1e7b"
dependencies = [
"bytemuck",
"byteorder",

View File

@@ -12,23 +12,23 @@ repository = "https://github.com/lancedb/lancedb"
description = "Serverless, low-latency vector database for AI applications"
keywords = ["lancedb", "lance", "database", "vector", "search"]
categories = ["database-implementations"]
rust-version = "1.91.0"
rust-version = "1.88.0"
[workspace.dependencies]
lance = { "version" = "=3.0.0-beta.5", default-features = false, "tag" = "v3.0.0-beta.5", "git" = "https://github.com/lance-format/lance.git" }
lance-core = { "version" = "=3.0.0-beta.5", "tag" = "v3.0.0-beta.5", "git" = "https://github.com/lance-format/lance.git" }
lance-datagen = { "version" = "=3.0.0-beta.5", "tag" = "v3.0.0-beta.5", "git" = "https://github.com/lance-format/lance.git" }
lance-file = { "version" = "=3.0.0-beta.5", "tag" = "v3.0.0-beta.5", "git" = "https://github.com/lance-format/lance.git" }
lance-io = { "version" = "=3.0.0-beta.5", default-features = false, "tag" = "v3.0.0-beta.5", "git" = "https://github.com/lance-format/lance.git" }
lance-index = { "version" = "=3.0.0-beta.5", "tag" = "v3.0.0-beta.5", "git" = "https://github.com/lance-format/lance.git" }
lance-linalg = { "version" = "=3.0.0-beta.5", "tag" = "v3.0.0-beta.5", "git" = "https://github.com/lance-format/lance.git" }
lance-namespace = { "version" = "=3.0.0-beta.5", "tag" = "v3.0.0-beta.5", "git" = "https://github.com/lance-format/lance.git" }
lance-namespace-impls = { "version" = "=3.0.0-beta.5", default-features = false, "tag" = "v3.0.0-beta.5", "git" = "https://github.com/lance-format/lance.git" }
lance-table = { "version" = "=3.0.0-beta.5", "tag" = "v3.0.0-beta.5", "git" = "https://github.com/lance-format/lance.git" }
lance-testing = { "version" = "=3.0.0-beta.5", "tag" = "v3.0.0-beta.5", "git" = "https://github.com/lance-format/lance.git" }
lance-datafusion = { "version" = "=3.0.0-beta.5", "tag" = "v3.0.0-beta.5", "git" = "https://github.com/lance-format/lance.git" }
lance-encoding = { "version" = "=3.0.0-beta.5", "tag" = "v3.0.0-beta.5", "git" = "https://github.com/lance-format/lance.git" }
lance-arrow = { "version" = "=3.0.0-beta.5", "tag" = "v3.0.0-beta.5", "git" = "https://github.com/lance-format/lance.git" }
lance = { "version" = "=2.0.0", default-features = false }
lance-core = "=2.0.0"
lance-datagen = "=2.0.0"
lance-file = "=2.0.0"
lance-io = { "version" = "=2.0.0", default-features = false }
lance-index = "=2.0.0"
lance-linalg = "=2.0.0"
lance-namespace = "=2.0.0"
lance-namespace-impls = { "version" = "=2.0.0", default-features = false }
lance-table = "=2.0.0"
lance-testing = "=2.0.0"
lance-datafusion = "=2.0.0"
lance-encoding = "=2.0.0"
lance-arrow = "=2.0.0"
ahash = "0.8"
# Note that this one does not include pyarrow
arrow = { version = "57.2", optional = false }

View File

@@ -14,7 +14,7 @@ Add the following dependency to your `pom.xml`:
<dependency>
<groupId>com.lancedb</groupId>
<artifactId>lancedb-core</artifactId>
<version>0.27.0-beta.0</version>
<version>0.26.2</version>
</dependency>
```

View File

@@ -8,7 +8,7 @@
<parent>
<groupId>com.lancedb</groupId>
<artifactId>lancedb-parent</artifactId>
<version>0.27.0-beta.0</version>
<version>0.26.2-final.0</version>
<relativePath>../pom.xml</relativePath>
</parent>

View File

@@ -6,7 +6,7 @@
<groupId>com.lancedb</groupId>
<artifactId>lancedb-parent</artifactId>
<version>0.27.0-beta.0</version>
<version>0.26.2-final.0</version>
<packaging>pom</packaging>
<name>${project.artifactId}</name>
<description>LanceDB Java SDK Parent POM</description>
@@ -28,7 +28,7 @@
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<arrow.version>15.0.0</arrow.version>
<lance-core.version>3.0.0-beta.5</lance-core.version>
<lance-core.version>2.0.0</lance-core.version>
<spotless.skip>false</spotless.skip>
<spotless.version>2.30.0</spotless.version>
<spotless.java.googlejavaformat.version>1.7</spotless.java.googlejavaformat.version>

View File

@@ -1,7 +1,7 @@
[package]
name = "lancedb-nodejs"
edition.workspace = true
version = "0.27.0-beta.0"
version = "0.26.2"
license.workspace = true
description.workspace = true
repository.workspace = true

View File

@@ -1,6 +1,6 @@
{
"name": "@lancedb/lancedb-darwin-arm64",
"version": "0.27.0-beta.0",
"version": "0.26.2",
"os": ["darwin"],
"cpu": ["arm64"],
"main": "lancedb.darwin-arm64.node",

View File

@@ -1,6 +1,6 @@
{
"name": "@lancedb/lancedb-linux-arm64-gnu",
"version": "0.27.0-beta.0",
"version": "0.26.2",
"os": ["linux"],
"cpu": ["arm64"],
"main": "lancedb.linux-arm64-gnu.node",

View File

@@ -1,6 +1,6 @@
{
"name": "@lancedb/lancedb-linux-arm64-musl",
"version": "0.27.0-beta.0",
"version": "0.26.2",
"os": ["linux"],
"cpu": ["arm64"],
"main": "lancedb.linux-arm64-musl.node",

View File

@@ -1,6 +1,6 @@
{
"name": "@lancedb/lancedb-linux-x64-gnu",
"version": "0.27.0-beta.0",
"version": "0.26.2",
"os": ["linux"],
"cpu": ["x64"],
"main": "lancedb.linux-x64-gnu.node",

View File

@@ -1,6 +1,6 @@
{
"name": "@lancedb/lancedb-linux-x64-musl",
"version": "0.27.0-beta.0",
"version": "0.26.2",
"os": ["linux"],
"cpu": ["x64"],
"main": "lancedb.linux-x64-musl.node",

View File

@@ -1,6 +1,6 @@
{
"name": "@lancedb/lancedb-win32-arm64-msvc",
"version": "0.27.0-beta.0",
"version": "0.26.2",
"os": [
"win32"
],

View File

@@ -1,6 +1,6 @@
{
"name": "@lancedb/lancedb-win32-x64-msvc",
"version": "0.27.0-beta.0",
"version": "0.26.2",
"os": ["win32"],
"cpu": ["x64"],
"main": "lancedb.win32-x64-msvc.node",

View File

@@ -1,12 +1,12 @@
{
"name": "@lancedb/lancedb",
"version": "0.27.0-beta.0",
"version": "0.26.2",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "@lancedb/lancedb",
"version": "0.27.0-beta.0",
"version": "0.26.2",
"cpu": [
"x64",
"arm64"

View File

@@ -11,7 +11,7 @@
"ann"
],
"private": false,
"version": "0.27.0-beta.0",
"version": "0.26.2",
"main": "dist/index.js",
"exports": {
".": "./dist/index.js",

View File

@@ -13,7 +13,6 @@ use crate::header::JsHeaderProvider;
use crate::table::Table;
use crate::ConnectionOptions;
use lancedb::connection::{ConnectBuilder, Connection as LanceDBConnection};
use lancedb::ipc::{ipc_file_to_batches, ipc_file_to_schema};
#[napi]

View File

@@ -1,5 +1,5 @@
[tool.bumpversion]
current_version = "0.30.0-beta.1"
current_version = "0.29.2"
parse = """(?x)
(?P<major>0|[1-9]\\d*)\\.
(?P<minor>0|[1-9]\\d*)\\.

View File

@@ -1,13 +1,13 @@
[package]
name = "lancedb-python"
version = "0.30.0-beta.1"
version = "0.29.2"
edition.workspace = true
description = "Python bindings for LanceDB"
license.workspace = true
repository.workspace = true
keywords.workspace = true
categories.workspace = true
rust-version = "1.91.0"
rust-version = "1.88.0"
[lib]
name = "_lancedb"

View File

@@ -2,7 +2,6 @@
# SPDX-FileCopyrightText: Copyright The LanceDB Authors
import warnings
from typing import List, Union
import numpy as np
@@ -16,8 +15,6 @@ from .utils import weak_lru
@register("gte-text")
class GteEmbeddings(TextEmbeddingFunction):
"""
Deprecated: GTE embeddings should be used through sentence-transformers.
An embedding function that uses GTE-LARGE MLX format(for Apple silicon devices only)
as well as the standard cpu/gpu version from: https://huggingface.co/thenlper/gte-large.
@@ -64,13 +61,6 @@ class GteEmbeddings(TextEmbeddingFunction):
def __init__(self, **kwargs):
super().__init__(**kwargs)
warnings.warn(
"GTE embeddings as a standalone embedding function are deprecated. "
"Use the 'sentence-transformers' embedding function with a GTE model "
"instead.",
DeprecationWarning,
stacklevel=3,
)
self._ndims = None
if kwargs:
self.mlx = kwargs.get("mlx", False)

View File

@@ -110,9 +110,6 @@ class OpenAIEmbeddings(TextEmbeddingFunction):
valid_embeddings = {
idx: v.embedding for v, idx in zip(rs.data, valid_indices)
}
except openai.AuthenticationError:
logging.error("Authentication failed: Invalid API key provided")
raise
except openai.BadRequestError:
logging.exception("Bad request: %s", texts)
return [None] * len(texts)

View File

@@ -6,7 +6,6 @@ import io
import os
from typing import TYPE_CHECKING, List, Union
import urllib.parse as urlparse
import warnings
import numpy as np
import pyarrow as pa
@@ -25,7 +24,6 @@ if TYPE_CHECKING:
@register("siglip")
class SigLipEmbeddings(EmbeddingFunction):
# Deprecated: prefer CLIP embeddings via `open-clip`.
model_name: str = "google/siglip-base-patch16-224"
device: str = "cpu"
batch_size: int = 64
@@ -38,12 +36,6 @@ class SigLipEmbeddings(EmbeddingFunction):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
warnings.warn(
"SigLip embeddings are deprecated. Use CLIP embeddings via the "
"'open-clip' embedding function instead.",
DeprecationWarning,
stacklevel=3,
)
transformers = attempt_import_or_raise("transformers")
self._torch = attempt_import_or_raise("torch")

View File

@@ -269,11 +269,6 @@ def retry_with_exponential_backoff(
# and say that it is assumed that if this portion errors out, it's due
# to rate limit but the user should check the error message to be sure.
except Exception as e: # noqa: PERF203
# Don't retry on authentication errors (e.g., OpenAI 401)
# These are permanent failures that won't be fixed by retrying
if _is_non_retryable_error(e):
raise
num_retries += 1
if num_retries > max_retries:
@@ -294,29 +289,6 @@ def retry_with_exponential_backoff(
return wrapper
def _is_non_retryable_error(error: Exception) -> bool:
"""Check if an error should not be retried.
Args:
error: The exception to check
Returns:
True if the error should not be retried, False otherwise
"""
# Check for OpenAI authentication errors
error_type = type(error).__name__
if error_type == "AuthenticationError":
return True
# Check for other common non-retryable HTTP status codes
# 401 Unauthorized, 403 Forbidden
if hasattr(error, "status_code"):
if error.status_code in (401, 403):
return True
return False
def url_retrieve(url: str):
"""
Parameters

View File

@@ -44,7 +44,7 @@ from lance_namespace import (
ListNamespacesRequest,
CreateNamespaceRequest,
DropNamespaceRequest,
DeclareTableRequest,
CreateEmptyTableRequest,
)
from lancedb.table import AsyncTable, LanceTable, Table
from lancedb.util import validate_table_name
@@ -318,20 +318,20 @@ class LanceNamespaceDBConnection(DBConnection):
if location is None:
# Table doesn't exist or mode is "create", reserve a new location
declare_request = DeclareTableRequest(
create_empty_request = CreateEmptyTableRequest(
id=table_id,
location=None,
properties=self.storage_options if self.storage_options else None,
)
declare_response = self._ns.declare_table(declare_request)
create_empty_response = self._ns.create_empty_table(create_empty_request)
if not declare_response.location:
if not create_empty_response.location:
raise ValueError(
"Table location is missing from declare_table response"
"Table location is missing from create_empty_table response"
)
location = declare_response.location
namespace_storage_options = declare_response.storage_options
location = create_empty_response.location
namespace_storage_options = create_empty_response.storage_options
# Merge storage options: self.storage_options < user options < namespace options
merged_storage_options = dict(self.storage_options)
@@ -759,20 +759,20 @@ class AsyncLanceNamespaceDBConnection:
if location is None:
# Table doesn't exist or mode is "create", reserve a new location
declare_request = DeclareTableRequest(
create_empty_request = CreateEmptyTableRequest(
id=table_id,
location=None,
properties=self.storage_options if self.storage_options else None,
)
declare_response = self._ns.declare_table(declare_request)
create_empty_response = self._ns.create_empty_table(create_empty_request)
if not declare_response.location:
if not create_empty_response.location:
raise ValueError(
"Table location is missing from declare_table response"
"Table location is missing from create_empty_table response"
)
location = declare_response.location
namespace_storage_options = declare_response.storage_options
location = create_empty_response.location
namespace_storage_options = create_empty_response.storage_options
# Merge storage options: self.storage_options < user options < namespace options
merged_storage_options = dict(self.storage_options)

View File

@@ -1782,26 +1782,6 @@ class LanceHybridQueryBuilder(LanceQueryBuilder):
vector_results = LanceHybridQueryBuilder._rank(vector_results, "_distance")
fts_results = LanceHybridQueryBuilder._rank(fts_results, "_score")
# If both result sets are empty (e.g. after hard filtering),
# return early to avoid errors in reranking or score restoration.
if vector_results.num_rows == 0 and fts_results.num_rows == 0:
# Build a minimal empty table with the _relevance_score column
combined_schema = pa.unify_schemas(
[vector_results.schema, fts_results.schema],
)
empty = pa.table(
{
col: pa.array([], type=combined_schema.field(col).type)
for col in combined_schema.names
}
)
empty = empty.append_column(
"_relevance_score", pa.array([], type=pa.float32())
)
if not with_row_ids and "_rowid" in empty.column_names:
empty = empty.drop(["_rowid"])
return empty
original_distances = None
original_scores = None
original_distance_row_ids = None

View File

@@ -515,34 +515,3 @@ def test_openai_propagates_api_key(monkeypatch):
query = "greetings"
actual = table.search(query).limit(1).to_pydantic(Words)[0]
assert len(actual.text) > 0
@patch("time.sleep")
def test_openai_no_retry_on_401(mock_sleep):
"""
Test that OpenAI embedding function does not retry on 401 authentication
errors.
"""
from lancedb.embeddings.utils import retry_with_exponential_backoff
# Create a mock that raises an AuthenticationError
class MockAuthenticationError(Exception):
"""Mock OpenAI AuthenticationError"""
pass
MockAuthenticationError.__name__ = "AuthenticationError"
mock_func = MagicMock(side_effect=MockAuthenticationError("Invalid API key"))
# Wrap the function with retry logic
wrapped_func = retry_with_exponential_backoff(mock_func, max_retries=3)
# Should raise without retrying
with pytest.raises(MockAuthenticationError):
wrapped_func()
# Verify that the function was only called once (no retries)
assert mock_func.call_count == 1
# Verify that sleep was never called (no retries)
assert mock_sleep.call_count == 0

View File

@@ -531,78 +531,6 @@ def test_empty_result_reranker():
)
def test_empty_hybrid_result_reranker():
"""Test that hybrid search with empty results after filtering doesn't crash.
Regression test for https://github.com/lancedb/lancedb/issues/2425
"""
from lancedb.query import LanceHybridQueryBuilder
# Simulate empty vector and FTS results with the expected schema
vector_schema = pa.schema(
[
("text", pa.string()),
("vector", pa.list_(pa.float32(), 4)),
("_rowid", pa.uint64()),
("_distance", pa.float32()),
]
)
fts_schema = pa.schema(
[
("text", pa.string()),
("vector", pa.list_(pa.float32(), 4)),
("_rowid", pa.uint64()),
("_score", pa.float32()),
]
)
empty_vector = pa.table(
{
"text": pa.array([], type=pa.string()),
"vector": pa.array([], type=pa.list_(pa.float32(), 4)),
"_rowid": pa.array([], type=pa.uint64()),
"_distance": pa.array([], type=pa.float32()),
},
schema=vector_schema,
)
empty_fts = pa.table(
{
"text": pa.array([], type=pa.string()),
"vector": pa.array([], type=pa.list_(pa.float32(), 4)),
"_rowid": pa.array([], type=pa.uint64()),
"_score": pa.array([], type=pa.float32()),
},
schema=fts_schema,
)
for reranker in [LinearCombinationReranker(), RRFReranker()]:
result = LanceHybridQueryBuilder._combine_hybrid_results(
fts_results=empty_fts,
vector_results=empty_vector,
norm="score",
fts_query="nonexistent query",
reranker=reranker,
limit=10,
with_row_ids=False,
)
assert len(result) == 0
assert "_relevance_score" in result.column_names
assert "_rowid" not in result.column_names
# Also test with with_row_ids=True
result = LanceHybridQueryBuilder._combine_hybrid_results(
fts_results=empty_fts,
vector_results=empty_vector,
norm="score",
fts_query="nonexistent query",
reranker=LinearCombinationReranker(),
limit=10,
with_row_ids=True,
)
assert len(result) == 0
assert "_relevance_score" in result.column_names
assert "_rowid" in result.column_names
@pytest.mark.parametrize("use_tantivy", [True, False])
def test_cross_encoder_reranker_return_all(tmp_path, use_tantivy):
pytest.importorskip("sentence_transformers")

View File

@@ -292,14 +292,18 @@ class TestModel(lancedb.pydantic.LanceModel):
lambda: pa.table({"a": [1], "b": [2]}),
lambda: pa.table({"a": [1], "b": [2]}).to_reader(),
lambda: iter(pa.table({"a": [1], "b": [2]}).to_batches()),
lambda: lance.write_dataset(
pa.table({"a": [1], "b": [2]}),
"memory://test",
lambda: (
lance.write_dataset(
pa.table({"a": [1], "b": [2]}),
"memory://test",
)
),
lambda: (
lance.write_dataset(
pa.table({"a": [1], "b": [2]}),
"memory://test",
).scanner()
),
lambda: lance.write_dataset(
pa.table({"a": [1], "b": [2]}),
"memory://test",
).scanner(),
lambda: pd.DataFrame({"a": [1], "b": [2]}),
lambda: pl.DataFrame({"a": [1], "b": [2]}),
lambda: pl.LazyFrame({"a": [1], "b": [2]}),

View File

@@ -121,8 +121,7 @@ impl Connection {
let mode = Self::parse_create_mode_str(mode)?;
let batches: Box<dyn arrow::array::RecordBatchReader + Send> =
Box::new(ArrowArrayStreamReader::from_pyarrow_bound(&data)?);
let batches = ArrowArrayStreamReader::from_pyarrow_bound(&data)?;
let mut builder = inner.create_table(name, batches).mode(mode);

View File

@@ -23,25 +23,10 @@ use pyo3::{
};
use pyo3_async_runtimes::tokio::future_into_py;
fn table_from_py<'a>(table: Bound<'a, PyAny>) -> PyResult<Bound<'a, Table>> {
if table.hasattr("_inner")? {
Ok(table.getattr("_inner")?.downcast_into::<Table>()?)
} else if table.hasattr("_table")? {
Ok(table
.getattr("_table")?
.getattr("_inner")?
.downcast_into::<Table>()?)
} else {
Err(PyRuntimeError::new_err(
"Provided table does not appear to be a Table or RemoteTable instance",
))
}
}
/// Create a permutation builder for the given table
#[pyo3::pyfunction]
pub fn async_permutation_builder(table: Bound<'_, PyAny>) -> PyResult<PyAsyncPermutationBuilder> {
let table = table_from_py(table)?;
let table = table.getattr("_inner")?.downcast_into::<Table>()?;
let inner_table = table.borrow().inner_ref()?.clone();
let inner_builder = LancePermutationBuilder::new(inner_table);
@@ -265,8 +250,10 @@ impl PyPermutationReader {
permutation_table: Option<Bound<'py, PyAny>>,
split: u64,
) -> PyResult<Bound<'py, PyAny>> {
let base_table = table_from_py(base_table)?;
let permutation_table = permutation_table.map(table_from_py).transpose()?;
let base_table = base_table.getattr("_inner")?.downcast_into::<Table>()?;
let permutation_table = permutation_table
.map(|p| PyResult::Ok(p.getattr("_inner")?.downcast_into::<Table>()?))
.transpose()?;
let base_table = base_table.borrow().inner_ref()?.base_table().clone();
let permutation_table = permutation_table

View File

@@ -296,8 +296,7 @@ impl Table {
data: Bound<'_, PyAny>,
mode: String,
) -> PyResult<Bound<'a, PyAny>> {
let batches: Box<dyn arrow::array::RecordBatchReader + Send> =
Box::new(ArrowArrayStreamReader::from_pyarrow_bound(&data)?);
let batches = ArrowArrayStreamReader::from_pyarrow_bound(&data)?;
let mut op = self_.inner_ref()?.add(batches);
if mode == "append" {
op = op.mode(AddDataMode::Append);

View File

@@ -1,2 +1,2 @@
[toolchain]
channel = "1.91.0"
channel = "1.90.0"

View File

@@ -1,6 +1,6 @@
[package]
name = "lancedb"
version = "0.27.0-beta.0"
version = "0.26.2"
edition.workspace = true
description = "LanceDB: A serverless, low-latency vector database for AI applications"
license.workspace = true

View File

@@ -3,12 +3,13 @@
use std::{iter::once, sync::Arc};
use arrow_array::{Float64Array, Int32Array, RecordBatch, StringArray};
use arrow_array::{Float64Array, Int32Array, RecordBatch, RecordBatchIterator, StringArray};
use arrow_schema::{DataType, Field, Schema};
use aws_config::Region;
use aws_sdk_bedrockruntime::Client;
use futures::StreamExt;
use lancedb::{
arrow::IntoArrow,
connect,
embeddings::{bedrock::BedrockEmbeddingFunction, EmbeddingDefinition, EmbeddingFunction},
query::{ExecutableQuery, QueryBase},
@@ -66,7 +67,7 @@ async fn main() -> Result<()> {
Ok(())
}
fn make_data() -> RecordBatch {
fn make_data() -> impl IntoArrow {
let schema = Schema::new(vec![
Field::new("id", DataType::Int32, true),
Field::new("text", DataType::Utf8, false),
@@ -82,9 +83,10 @@ fn make_data() -> RecordBatch {
]);
let price = Float64Array::from(vec![10.0, 50.0, 100.0, 30.0]);
let schema = Arc::new(schema);
RecordBatch::try_new(
let rb = RecordBatch::try_new(
schema.clone(),
vec![Arc::new(id), Arc::new(text), Arc::new(price)],
)
.unwrap()
.unwrap();
Box::new(RecordBatchIterator::new(vec![Ok(rb)], schema))
}

View File

@@ -3,13 +3,12 @@
use std::sync::Arc;
use arrow_array::{Int32Array, RecordBatch, RecordBatchIterator, StringArray};
use arrow_array::{Int32Array, RecordBatch, RecordBatchIterator, RecordBatchReader, StringArray};
use arrow_schema::{DataType, Field, Schema};
use futures::TryStreamExt;
use lance_index::scalar::FullTextSearchQuery;
use lancedb::connection::Connection;
use lancedb::index::scalar::FtsIndexBuilder;
use lancedb::index::Index;
use lancedb::query::{ExecutableQuery, QueryBase};
@@ -30,7 +29,7 @@ async fn main() -> Result<()> {
Ok(())
}
fn create_some_records() -> Result<Box<dyn arrow_array::RecordBatchReader + Send>> {
fn create_some_records() -> Result<Box<dyn RecordBatchReader + Send>> {
const TOTAL: usize = 1000;
let schema = Arc::new(Schema::new(vec![
@@ -67,7 +66,7 @@ fn create_some_records() -> Result<Box<dyn arrow_array::RecordBatchReader + Send
}
async fn create_table(db: &Connection) -> Result<Table> {
let initial_data = create_some_records()?;
let initial_data: Box<dyn RecordBatchReader + Send> = create_some_records()?;
let tbl = db.create_table("my_table", initial_data).execute().await?;
Ok(tbl)
}

View File

@@ -1,13 +1,14 @@
// SPDX-License-Identifier: Apache-2.0
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
use arrow_array::{RecordBatch, StringArray};
use arrow_array::{RecordBatch, RecordBatchIterator, StringArray};
use arrow_schema::{DataType, Field, Schema};
use futures::TryStreamExt;
use lance_index::scalar::FullTextSearchQuery;
use lancedb::index::scalar::FtsIndexBuilder;
use lancedb::index::Index;
use lancedb::{
arrow::IntoArrow,
connect,
embeddings::{
sentence_transformers::SentenceTransformersEmbeddings, EmbeddingDefinition,
@@ -69,7 +70,7 @@ async fn main() -> Result<()> {
Ok(())
}
fn make_data() -> RecordBatch {
fn make_data() -> impl IntoArrow {
let schema = Schema::new(vec![Field::new("facts", DataType::Utf8, false)]);
let facts = StringArray::from_iter_values(vec![
@@ -100,7 +101,8 @@ fn make_data() -> RecordBatch {
"The first chatbot was ELIZA, created in the 1960s.",
]);
let schema = Arc::new(schema);
RecordBatch::try_new(schema.clone(), vec![Arc::new(facts)]).unwrap()
let rb = RecordBatch::try_new(schema.clone(), vec![Arc::new(facts)]).unwrap();
Box::new(RecordBatchIterator::new(vec![Ok(rb)], schema))
}
async fn create_index(table: &Table) -> Result<()> {

View File

@@ -8,12 +8,13 @@
use std::sync::Arc;
use arrow_array::types::Float32Type;
use arrow_array::{FixedSizeListArray, Int32Array, RecordBatch, RecordBatchIterator};
use arrow_array::{
FixedSizeListArray, Int32Array, RecordBatch, RecordBatchIterator, RecordBatchReader,
};
use arrow_schema::{DataType, Field, Schema};
use futures::TryStreamExt;
use lancedb::connection::Connection;
use lancedb::index::vector::IvfPqIndexBuilder;
use lancedb::index::Index;
use lancedb::query::{ExecutableQuery, QueryBase};
@@ -33,7 +34,7 @@ async fn main() -> Result<()> {
Ok(())
}
fn create_some_records() -> Result<Box<dyn arrow_array::RecordBatchReader + Send>> {
fn create_some_records() -> Result<Box<dyn RecordBatchReader + Send>> {
const TOTAL: usize = 1000;
const DIM: usize = 128;
@@ -72,9 +73,9 @@ fn create_some_records() -> Result<Box<dyn arrow_array::RecordBatchReader + Send
}
async fn create_table(db: &Connection) -> Result<Table> {
let initial_data = create_some_records()?;
let initial_data: Box<dyn RecordBatchReader + Send> = create_some_records()?;
let tbl = db
.create_table("my_table", initial_data)
.create_table("my_table", Box::new(initial_data))
.execute()
.await
.unwrap();

View File

@@ -5,9 +5,11 @@
use std::{iter::once, sync::Arc};
use arrow_array::{RecordBatch, StringArray};
use arrow_array::{Float64Array, Int32Array, RecordBatch, RecordBatchIterator, StringArray};
use arrow_schema::{DataType, Field, Schema};
use futures::StreamExt;
use lancedb::{
arrow::IntoArrow,
connect,
embeddings::{openai::OpenAIEmbeddingFunction, EmbeddingDefinition, EmbeddingFunction},
query::{ExecutableQuery, QueryBase},
@@ -62,20 +64,26 @@ async fn main() -> Result<()> {
}
// --8<-- [end:openai_embeddings]
fn make_data() -> RecordBatch {
arrow_array::record_batch!(
("id", Int32, [1, 2, 3, 4]),
(
"text",
Utf8,
[
"Black T-Shirt",
"Leather Jacket",
"Winter Parka",
"Hooded Sweatshirt"
]
),
("price", Float64, [10.0, 50.0, 100.0, 30.0])
fn make_data() -> impl IntoArrow {
let schema = Schema::new(vec![
Field::new("id", DataType::Int32, true),
Field::new("text", DataType::Utf8, false),
Field::new("price", DataType::Float64, false),
]);
let id = Int32Array::from(vec![1, 2, 3, 4]);
let text = StringArray::from_iter_values(vec![
"Black T-Shirt",
"Leather Jacket",
"Winter Parka",
"Hooded Sweatshirt",
]);
let price = Float64Array::from(vec![10.0, 50.0, 100.0, 30.0]);
let schema = Arc::new(schema);
let rb = RecordBatch::try_new(
schema.clone(),
vec![Arc::new(id), Arc::new(text), Arc::new(price)],
)
.unwrap()
.unwrap();
Box::new(RecordBatchIterator::new(vec![Ok(rb)], schema))
}

View File

@@ -3,10 +3,11 @@
use std::{iter::once, sync::Arc};
use arrow_array::{RecordBatch, StringArray};
use arrow_array::{RecordBatch, RecordBatchIterator, StringArray};
use arrow_schema::{DataType, Field, Schema};
use futures::StreamExt;
use lancedb::{
arrow::IntoArrow,
connect,
embeddings::{
sentence_transformers::SentenceTransformersEmbeddings, EmbeddingDefinition,
@@ -58,7 +59,7 @@ async fn main() -> Result<()> {
Ok(())
}
fn make_data() -> RecordBatch {
fn make_data() -> impl IntoArrow {
let schema = Schema::new(vec![Field::new("facts", DataType::Utf8, false)]);
let facts = StringArray::from_iter_values(vec![
@@ -89,5 +90,6 @@ fn make_data() -> RecordBatch {
"The first chatbot was ELIZA, created in the 1960s.",
]);
let schema = Arc::new(schema);
RecordBatch::try_new(schema.clone(), vec![Arc::new(facts)]).unwrap()
let rb = RecordBatch::try_new(schema.clone(), vec![Arc::new(facts)]).unwrap();
Box::new(RecordBatchIterator::new(vec![Ok(rb)], schema))
}

View File

@@ -8,9 +8,11 @@
use std::sync::Arc;
use arrow_array::types::Float32Type;
use arrow_array::{FixedSizeListArray, Int32Array, RecordBatch};
use arrow_array::{FixedSizeListArray, Int32Array, RecordBatch, RecordBatchIterator};
use arrow_schema::{DataType, Field, Schema};
use futures::TryStreamExt;
use lancedb::arrow::IntoArrow;
use lancedb::connection::Connection;
use lancedb::index::Index;
use lancedb::query::{ExecutableQuery, QueryBase};
@@ -57,7 +59,7 @@ async fn open_with_existing_tbl() -> Result<()> {
Ok(())
}
fn create_some_records() -> Result<RecordBatch> {
fn create_some_records() -> Result<impl IntoArrow> {
const TOTAL: usize = 1000;
const DIM: usize = 128;
@@ -74,18 +76,25 @@ fn create_some_records() -> Result<RecordBatch> {
]));
// Create a RecordBatch stream.
Ok(RecordBatch::try_new(
schema.clone(),
vec![
Arc::new(Int32Array::from_iter_values(0..TOTAL as i32)),
Arc::new(
FixedSizeListArray::from_iter_primitive::<Float32Type, _, _>(
(0..TOTAL).map(|_| Some(vec![Some(1.0); DIM])),
DIM as i32,
let batches = RecordBatchIterator::new(
vec![RecordBatch::try_new(
schema.clone(),
vec![
Arc::new(Int32Array::from_iter_values(0..TOTAL as i32)),
Arc::new(
FixedSizeListArray::from_iter_primitive::<Float32Type, _, _>(
(0..TOTAL).map(|_| Some(vec![Some(1.0); DIM])),
DIM as i32,
),
),
),
],
)?)
],
)
.unwrap()]
.into_iter()
.map(Ok),
schema.clone(),
);
Ok(Box::new(batches))
}
async fn create_table(db: &Connection) -> Result<LanceDbTable> {

View File

@@ -6,8 +6,8 @@
use std::collections::HashMap;
use std::sync::Arc;
use arrow_array::RecordBatch;
use arrow_schema::SchemaRef;
use arrow_array::RecordBatchReader;
use arrow_schema::{Field, SchemaRef};
use lance::dataset::ReadParams;
use lance_namespace::models::{
CreateNamespaceRequest, CreateNamespaceResponse, DescribeNamespaceRequest,
@@ -17,20 +17,24 @@ use lance_namespace::models::{
#[cfg(feature = "aws")]
use object_store::aws::AwsCredential;
use crate::connection::create_table::CreateTableBuilder;
use crate::data::scannable::Scannable;
use crate::database::listing::ListingDatabase;
use crate::database::{
CloneTableRequest, Database, DatabaseOptions, OpenTableRequest, ReadConsistency,
TableNamesRequest,
use crate::arrow::{IntoArrow, IntoArrowStream, SendableRecordBatchStream};
use crate::database::listing::{
ListingDatabase, OPT_NEW_TABLE_STORAGE_VERSION, OPT_NEW_TABLE_V2_MANIFEST_PATHS,
};
use crate::database::{
CloneTableRequest, CreateTableData, CreateTableMode, CreateTableRequest, Database,
DatabaseOptions, OpenTableRequest, ReadConsistency, TableNamesRequest,
};
use crate::embeddings::{
EmbeddingDefinition, EmbeddingFunction, EmbeddingRegistry, MemoryRegistry, WithEmbeddings,
};
use crate::embeddings::{EmbeddingRegistry, MemoryRegistry};
use crate::error::{Error, Result};
#[cfg(feature = "remote")]
use crate::remote::{
client::ClientConfig,
db::{OPT_REMOTE_API_KEY, OPT_REMOTE_HOST_OVERRIDE, OPT_REMOTE_REGION},
};
use crate::table::{TableDefinition, WriteOptions};
use crate::Table;
use lance::io::ObjectStoreParams;
pub use lance_encoding::version::LanceFileVersion;
@@ -38,8 +42,6 @@ pub use lance_encoding::version::LanceFileVersion;
use lance_io::object_store::StorageOptions;
use lance_io::object_store::{StorageOptionsAccessor, StorageOptionsProvider};
mod create_table;
fn merge_storage_options(
store_params: &mut ObjectStoreParams,
pairs: impl IntoIterator<Item = (String, String)>,
@@ -114,6 +116,337 @@ impl TableNamesBuilder {
}
}
pub struct NoData {}
impl IntoArrow for NoData {
fn into_arrow(self) -> Result<Box<dyn arrow_array::RecordBatchReader + Send>> {
unreachable!("NoData should never be converted to Arrow")
}
}
// Stores the value given from the initial CreateTableBuilder::new call
// and defers errors until `execute` is called
enum CreateTableBuilderInitialData {
None,
Iterator(Result<Box<dyn RecordBatchReader + Send>>),
Stream(Result<SendableRecordBatchStream>),
}
/// A builder for configuring a [`Connection::create_table`] operation
pub struct CreateTableBuilder<const HAS_DATA: bool> {
parent: Arc<dyn Database>,
embeddings: Vec<(EmbeddingDefinition, Arc<dyn EmbeddingFunction>)>,
embedding_registry: Arc<dyn EmbeddingRegistry>,
request: CreateTableRequest,
// This is a bit clumsy but we defer errors until `execute` is called
// to maintain backwards compatibility
data: CreateTableBuilderInitialData,
}
// Builder methods that only apply when we have initial data
impl CreateTableBuilder<true> {
fn new<T: IntoArrow>(
parent: Arc<dyn Database>,
name: String,
data: T,
embedding_registry: Arc<dyn EmbeddingRegistry>,
) -> Self {
let dummy_schema = Arc::new(arrow_schema::Schema::new(Vec::<Field>::default()));
Self {
parent,
request: CreateTableRequest::new(
name,
CreateTableData::Empty(TableDefinition::new_from_schema(dummy_schema)),
),
embeddings: Vec::new(),
embedding_registry,
data: CreateTableBuilderInitialData::Iterator(data.into_arrow()),
}
}
fn new_streaming<T: IntoArrowStream>(
parent: Arc<dyn Database>,
name: String,
data: T,
embedding_registry: Arc<dyn EmbeddingRegistry>,
) -> Self {
let dummy_schema = Arc::new(arrow_schema::Schema::new(Vec::<Field>::default()));
Self {
parent,
request: CreateTableRequest::new(
name,
CreateTableData::Empty(TableDefinition::new_from_schema(dummy_schema)),
),
embeddings: Vec::new(),
embedding_registry,
data: CreateTableBuilderInitialData::Stream(data.into_arrow()),
}
}
/// Execute the create table operation
pub async fn execute(self) -> Result<Table> {
let embedding_registry = self.embedding_registry.clone();
let parent = self.parent.clone();
let request = self.into_request()?;
Ok(Table::new_with_embedding_registry(
parent.create_table(request).await?,
parent,
embedding_registry,
))
}
fn into_request(self) -> Result<CreateTableRequest> {
if self.embeddings.is_empty() {
match self.data {
CreateTableBuilderInitialData::Iterator(maybe_iter) => {
let data = maybe_iter?;
Ok(CreateTableRequest {
data: CreateTableData::Data(data),
..self.request
})
}
CreateTableBuilderInitialData::None => {
unreachable!("No data provided for CreateTableBuilder<true>")
}
CreateTableBuilderInitialData::Stream(maybe_stream) => {
let data = maybe_stream?;
Ok(CreateTableRequest {
data: CreateTableData::StreamingData(data),
..self.request
})
}
}
} else {
let CreateTableBuilderInitialData::Iterator(maybe_iter) = self.data else {
return Err(Error::NotSupported { message: "Creating a table with embeddings is currently not support when the input is streaming".to_string() });
};
let data = maybe_iter?;
let data = Box::new(WithEmbeddings::new(data, self.embeddings));
Ok(CreateTableRequest {
data: CreateTableData::Data(data),
..self.request
})
}
}
}
// Builder methods that only apply when we do not have initial data
impl CreateTableBuilder<false> {
fn new(
parent: Arc<dyn Database>,
name: String,
schema: SchemaRef,
embedding_registry: Arc<dyn EmbeddingRegistry>,
) -> Self {
let table_definition = TableDefinition::new_from_schema(schema);
Self {
parent,
request: CreateTableRequest::new(name, CreateTableData::Empty(table_definition)),
data: CreateTableBuilderInitialData::None,
embeddings: Vec::default(),
embedding_registry,
}
}
/// Execute the create table operation
pub async fn execute(self) -> Result<Table> {
let parent = self.parent.clone();
let embedding_registry = self.embedding_registry.clone();
let request = self.into_request()?;
Ok(Table::new_with_embedding_registry(
parent.create_table(request).await?,
parent,
embedding_registry,
))
}
fn into_request(self) -> Result<CreateTableRequest> {
if self.embeddings.is_empty() {
return Ok(self.request);
}
let CreateTableData::Empty(table_def) = self.request.data else {
unreachable!("CreateTableBuilder<false> should always have Empty data")
};
let schema = table_def.schema.clone();
let empty_batch = arrow_array::RecordBatch::new_empty(schema.clone());
let reader = Box::new(std::iter::once(Ok(empty_batch)).collect::<Vec<_>>());
let reader = arrow_array::RecordBatchIterator::new(reader.into_iter(), schema);
let with_embeddings = WithEmbeddings::new(reader, self.embeddings);
let table_definition = with_embeddings.table_definition()?;
Ok(CreateTableRequest {
data: CreateTableData::Empty(table_definition),
..self.request
})
}
}
impl<const HAS_DATA: bool> CreateTableBuilder<HAS_DATA> {
/// Set the mode for creating the table
///
/// This controls what happens if a table with the given name already exists
pub fn mode(mut self, mode: CreateTableMode) -> Self {
self.request.mode = mode;
self
}
/// Apply the given write options when writing the initial data
pub fn write_options(mut self, write_options: WriteOptions) -> Self {
self.request.write_options = write_options;
self
}
/// Set an option for the storage layer.
///
/// Options already set on the connection will be inherited by the table,
/// but can be overridden here.
///
/// See available options at <https://lancedb.com/docs/storage/>
pub fn storage_option(mut self, key: impl Into<String>, value: impl Into<String>) -> Self {
let store_params = self
.request
.write_options
.lance_write_params
.get_or_insert(Default::default())
.store_params
.get_or_insert(Default::default());
merge_storage_options(store_params, [(key.into(), value.into())]);
self
}
/// Set multiple options for the storage layer.
///
/// Options already set on the connection will be inherited by the table,
/// but can be overridden here.
///
/// See available options at <https://lancedb.com/docs/storage/>
pub fn storage_options(
mut self,
pairs: impl IntoIterator<Item = (impl Into<String>, impl Into<String>)>,
) -> Self {
let store_params = self
.request
.write_options
.lance_write_params
.get_or_insert(Default::default())
.store_params
.get_or_insert(Default::default());
let updates = pairs
.into_iter()
.map(|(key, value)| (key.into(), value.into()));
merge_storage_options(store_params, updates);
self
}
/// Add an embedding definition to the table.
///
/// The `embedding_name` must match the name of an embedding function that
/// was previously registered with the connection's [`EmbeddingRegistry`].
pub fn add_embedding(mut self, definition: EmbeddingDefinition) -> Result<Self> {
// Early verification of the embedding name
let embedding_func = self
.embedding_registry
.get(&definition.embedding_name)
.ok_or_else(|| Error::EmbeddingFunctionNotFound {
name: definition.embedding_name.clone(),
reason: "No embedding function found in the connection's embedding_registry"
.to_string(),
})?;
self.embeddings.push((definition, embedding_func));
Ok(self)
}
/// Set whether to use V2 manifest paths for the table. (default: false)
///
/// These paths provide more efficient opening of tables with many
/// versions on object stores.
///
/// <div class="warning">Turning this on will make the dataset unreadable
/// for older versions of LanceDB (prior to 0.10.0).</div>
///
/// To migrate an existing dataset, instead use the
/// [[NativeTable::migrate_manifest_paths_v2]].
///
/// This has no effect in LanceDB Cloud.
#[deprecated(since = "0.15.1", note = "Use `database_options` instead")]
pub fn enable_v2_manifest_paths(mut self, use_v2_manifest_paths: bool) -> Self {
let store_params = self
.request
.write_options
.lance_write_params
.get_or_insert_with(Default::default)
.store_params
.get_or_insert_with(Default::default);
let value = if use_v2_manifest_paths {
"true".to_string()
} else {
"false".to_string()
};
merge_storage_options(
store_params,
[(OPT_NEW_TABLE_V2_MANIFEST_PATHS.to_string(), value)],
);
self
}
/// Set the data storage version.
///
/// The default is `LanceFileVersion::Stable`.
#[deprecated(since = "0.15.1", note = "Use `database_options` instead")]
pub fn data_storage_version(mut self, data_storage_version: LanceFileVersion) -> Self {
let store_params = self
.request
.write_options
.lance_write_params
.get_or_insert_with(Default::default)
.store_params
.get_or_insert_with(Default::default);
merge_storage_options(
store_params,
[(
OPT_NEW_TABLE_STORAGE_VERSION.to_string(),
data_storage_version.to_string(),
)],
);
self
}
/// Set the namespace for the table
pub fn namespace(mut self, namespace: Vec<String>) -> Self {
self.request.namespace = namespace;
self
}
/// Set a custom location for the table.
///
/// If not set, the database will derive a location from its URI and the table name.
/// This is useful when integrating with namespace systems that manage table locations.
pub fn location(mut self, location: impl Into<String>) -> Self {
self.request.location = Some(location.into());
self
}
/// Set a storage options provider for automatic credential refresh.
///
/// This allows tables to automatically refresh cloud storage credentials
/// when they expire, enabling long-running operations on remote storage.
pub fn storage_options_provider(mut self, provider: Arc<dyn StorageOptionsProvider>) -> Self {
let store_params = self
.request
.write_options
.lance_write_params
.get_or_insert(Default::default())
.store_params
.get_or_insert(Default::default());
set_storage_options_provider(store_params, provider);
self
}
}
#[derive(Clone, Debug)]
pub struct OpenTableBuilder {
parent: Arc<dyn Database>,
@@ -351,17 +684,35 @@ impl Connection {
///
/// * `name` - The name of the table
/// * `initial_data` - The initial data to write to the table
pub fn create_table<T: Scannable + 'static>(
pub fn create_table<T: IntoArrow>(
&self,
name: impl Into<String>,
initial_data: T,
) -> CreateTableBuilder {
let initial_data = Box::new(initial_data);
CreateTableBuilder::new(
) -> CreateTableBuilder<true> {
CreateTableBuilder::<true>::new(
self.internal.clone(),
self.embedding_registry.clone(),
name.into(),
initial_data,
self.embedding_registry.clone(),
)
}
/// Create a new table from a stream of data
///
/// # Parameters
///
/// * `name` - The name of the table
/// * `initial_data` - The initial data to write to the table
pub fn create_table_streaming<T: IntoArrowStream>(
&self,
name: impl Into<String>,
initial_data: T,
) -> CreateTableBuilder<true> {
CreateTableBuilder::<true>::new_streaming(
self.internal.clone(),
name.into(),
initial_data,
self.embedding_registry.clone(),
)
}
@@ -375,9 +726,13 @@ impl Connection {
&self,
name: impl Into<String>,
schema: SchemaRef,
) -> CreateTableBuilder {
let empty_batch = RecordBatch::new_empty(schema);
self.create_table(name, empty_batch)
) -> CreateTableBuilder<false> {
CreateTableBuilder::<false>::new(
self.internal.clone(),
name.into(),
schema,
self.embedding_registry.clone(),
)
}
/// Open an existing table in the database
@@ -566,8 +921,15 @@ pub struct ConnectBuilder {
}
#[cfg(feature = "remote")]
const ENV_VARS_TO_STORAGE_OPTS: [(&str, &str); 1] =
[("AZURE_STORAGE_ACCOUNT_NAME", "azure_storage_account_name")];
const ENV_VARS_TO_STORAGE_OPTS: [(&str, &str); 4] = [
("AZURE_STORAGE_ACCOUNT_NAME", "azure_storage_account_name"),
("AZURE_TENANT_ID", "azure_tenant_id"),
("AZURE_CLIENT_ID", "azure_client_id"),
(
"AZURE_FEDERATED_TOKEN_FILE",
"azure_federated_token_file",
),
];
impl ConnectBuilder {
/// Create a new [`ConnectOptions`] with the given database URI.
@@ -994,11 +1356,20 @@ mod test_utils {
#[cfg(test)]
mod tests {
use crate::database::listing::{ListingDatabaseOptions, NewTableConfig};
use crate::query::QueryBase;
use crate::query::{ExecutableQuery, QueryExecutionOptions};
use crate::test_utils::connection::new_test_connection;
use arrow::compute::concat_batches;
use arrow_array::RecordBatchReader;
use arrow_schema::{DataType, Field, Schema};
use datafusion_physical_plan::stream::RecordBatchStreamAdapter;
use futures::{stream, TryStreamExt};
use lance_core::error::{ArrowResult, DataFusionResult};
use lance_testing::datagen::{BatchGenerator, IncrementingInt32};
use tempfile::tempdir;
use crate::test_utils::connection::new_test_connection;
use crate::arrow::SimpleRecordBatchStream;
use super::*;
@@ -1114,6 +1485,139 @@ mod tests {
assert_eq!(tables, vec!["table1".to_owned()]);
}
fn make_data() -> Box<dyn RecordBatchReader + Send + 'static> {
let id = Box::new(IncrementingInt32::new().named("id".to_string()));
Box::new(BatchGenerator::new().col(id).batches(10, 2000))
}
#[tokio::test]
async fn test_create_table_v2() {
let tmp_dir = tempdir().unwrap();
let uri = tmp_dir.path().to_str().unwrap();
let db = connect(uri)
.database_options(&ListingDatabaseOptions {
new_table_config: NewTableConfig {
data_storage_version: Some(LanceFileVersion::Legacy),
..Default::default()
},
..Default::default()
})
.execute()
.await
.unwrap();
let tbl = db
.create_table("v1_test", make_data())
.execute()
.await
.unwrap();
// In v1 the row group size will trump max_batch_length
let batches = tbl
.query()
.limit(20000)
.execute_with_options(QueryExecutionOptions {
max_batch_length: 50000,
..Default::default()
})
.await
.unwrap()
.try_collect::<Vec<_>>()
.await
.unwrap();
assert_eq!(batches.len(), 20);
let db = connect(uri)
.database_options(&ListingDatabaseOptions {
new_table_config: NewTableConfig {
data_storage_version: Some(LanceFileVersion::Stable),
..Default::default()
},
..Default::default()
})
.execute()
.await
.unwrap();
let tbl = db
.create_table("v2_test", make_data())
.execute()
.await
.unwrap();
// In v2 the page size is much bigger than 50k so we should get a single batch
let batches = tbl
.query()
.execute_with_options(QueryExecutionOptions {
max_batch_length: 50000,
..Default::default()
})
.await
.unwrap()
.try_collect::<Vec<_>>()
.await
.unwrap();
assert_eq!(batches.len(), 1);
}
#[tokio::test]
async fn test_create_table_streaming() {
let tmp_dir = tempdir().unwrap();
let uri = tmp_dir.path().to_str().unwrap();
let db = connect(uri).execute().await.unwrap();
let batches = make_data().collect::<ArrowResult<Vec<_>>>().unwrap();
let schema = batches.first().unwrap().schema();
let one_batch = concat_batches(&schema, batches.iter()).unwrap();
let ldb_stream = stream::iter(batches.clone().into_iter().map(Result::Ok));
let ldb_stream: SendableRecordBatchStream =
Box::pin(SimpleRecordBatchStream::new(ldb_stream, schema.clone()));
let tbl1 = db
.create_table_streaming("one", ldb_stream)
.execute()
.await
.unwrap();
let df_stream = stream::iter(batches.into_iter().map(DataFusionResult::Ok));
let df_stream: datafusion_physical_plan::SendableRecordBatchStream =
Box::pin(RecordBatchStreamAdapter::new(schema.clone(), df_stream));
let tbl2 = db
.create_table_streaming("two", df_stream)
.execute()
.await
.unwrap();
let tbl1_data = tbl1
.query()
.execute()
.await
.unwrap()
.try_collect::<Vec<_>>()
.await
.unwrap();
let tbl1_data = concat_batches(&schema, tbl1_data.iter()).unwrap();
assert_eq!(tbl1_data, one_batch);
let tbl2_data = tbl2
.query()
.execute()
.await
.unwrap()
.try_collect::<Vec<_>>()
.await
.unwrap();
let tbl2_data = concat_batches(&schema, tbl2_data.iter()).unwrap();
assert_eq!(tbl2_data, one_batch);
}
#[tokio::test]
async fn drop_table() {
let tc = new_test_connection().await.unwrap();
@@ -1143,6 +1647,41 @@ mod tests {
assert_eq!(tables.len(), 0);
}
#[tokio::test]
async fn test_create_table_already_exists() {
let tmp_dir = tempdir().unwrap();
let uri = tmp_dir.path().to_str().unwrap();
let db = connect(uri).execute().await.unwrap();
let schema = Arc::new(Schema::new(vec![Field::new("x", DataType::Int32, false)]));
db.create_empty_table("test", schema.clone())
.execute()
.await
.unwrap();
// TODO: None of the open table options are "inspectable" right now but once one is we
// should assert we are passing these options in correctly
db.create_empty_table("test", schema)
.mode(CreateTableMode::exist_ok(|mut req| {
req.index_cache_size = Some(16);
req
}))
.execute()
.await
.unwrap();
let other_schema = Arc::new(Schema::new(vec![Field::new("y", DataType::Int32, false)]));
assert!(db
.create_empty_table("test", other_schema.clone())
.execute()
.await
.is_err());
let overwritten = db
.create_empty_table("test", other_schema.clone())
.mode(CreateTableMode::Overwrite)
.execute()
.await
.unwrap();
assert_eq!(other_schema, overwritten.schema().await.unwrap());
}
#[tokio::test]
async fn test_clone_table() {
let tmp_dir = tempdir().unwrap();
@@ -1153,8 +1692,7 @@ mod tests {
let mut batch_gen = BatchGenerator::new()
.col(Box::new(IncrementingInt32::new().named("id")))
.col(Box::new(IncrementingInt32::new().named("value")));
let reader: Box<dyn arrow_array::RecordBatchReader + Send> =
Box::new(batch_gen.batches(5, 100));
let reader = batch_gen.batches(5, 100);
let source_table = db
.create_table("source_table", reader)
@@ -1189,4 +1727,128 @@ mod tests {
let cloned_count = cloned_table.count_rows(None).await.unwrap();
assert_eq!(source_count, cloned_count);
}
#[tokio::test]
async fn test_create_empty_table_with_embeddings() {
use crate::embeddings::{EmbeddingDefinition, EmbeddingFunction};
use arrow_array::{
Array, FixedSizeListArray, Float32Array, RecordBatch, RecordBatchIterator, StringArray,
};
use std::borrow::Cow;
#[derive(Debug, Clone)]
struct MockEmbedding {
dim: usize,
}
impl EmbeddingFunction for MockEmbedding {
fn name(&self) -> &str {
"test_embedding"
}
fn source_type(&self) -> Result<Cow<'_, DataType>> {
Ok(Cow::Owned(DataType::Utf8))
}
fn dest_type(&self) -> Result<Cow<'_, DataType>> {
Ok(Cow::Owned(DataType::new_fixed_size_list(
DataType::Float32,
self.dim as i32,
true,
)))
}
fn compute_source_embeddings(&self, source: Arc<dyn Array>) -> Result<Arc<dyn Array>> {
let len = source.len();
let values = vec![1.0f32; len * self.dim];
let values = Arc::new(Float32Array::from(values));
let field = Arc::new(Field::new("item", DataType::Float32, true));
Ok(Arc::new(FixedSizeListArray::new(
field,
self.dim as i32,
values,
None,
)))
}
fn compute_query_embeddings(&self, _input: Arc<dyn Array>) -> Result<Arc<dyn Array>> {
unimplemented!()
}
}
let tmp_dir = tempdir().unwrap();
let uri = tmp_dir.path().to_str().unwrap();
let db = connect(uri).execute().await.unwrap();
let embed_func = Arc::new(MockEmbedding { dim: 128 });
db.embedding_registry()
.register("test_embedding", embed_func.clone())
.unwrap();
let schema = Arc::new(Schema::new(vec![Field::new("name", DataType::Utf8, true)]));
let ed = EmbeddingDefinition {
source_column: "name".to_owned(),
dest_column: Some("name_embedding".to_owned()),
embedding_name: "test_embedding".to_owned(),
};
let table = db
.create_empty_table("test", schema)
.mode(CreateTableMode::Overwrite)
.add_embedding(ed)
.unwrap()
.execute()
.await
.unwrap();
let table_schema = table.schema().await.unwrap();
assert!(table_schema.column_with_name("name").is_some());
assert!(table_schema.column_with_name("name_embedding").is_some());
let embedding_field = table_schema.field_with_name("name_embedding").unwrap();
assert_eq!(
embedding_field.data_type(),
&DataType::new_fixed_size_list(DataType::Float32, 128, true)
);
let input_schema = Arc::new(Schema::new(vec![Field::new("name", DataType::Utf8, true)]));
let input_batch = RecordBatch::try_new(
input_schema.clone(),
vec![Arc::new(StringArray::from(vec![
Some("Alice"),
Some("Bob"),
Some("Charlie"),
]))],
)
.unwrap();
let input_reader = Box::new(RecordBatchIterator::new(
vec![Ok(input_batch)].into_iter(),
input_schema,
));
table.add(input_reader).execute().await.unwrap();
let results = table
.query()
.execute()
.await
.unwrap()
.try_collect::<Vec<_>>()
.await
.unwrap();
assert_eq!(results.len(), 1);
let batch = &results[0];
assert_eq!(batch.num_rows(), 3);
assert!(batch.column_by_name("name_embedding").is_some());
let embedding_col = batch
.column_by_name("name_embedding")
.unwrap()
.as_any()
.downcast_ref::<FixedSizeListArray>()
.unwrap();
assert_eq!(embedding_col.len(), 3);
}
}

View File

@@ -1,612 +0,0 @@
// SPDX-License-Identifier: Apache-2.0
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
use std::sync::Arc;
use lance_io::object_store::StorageOptionsProvider;
use crate::{
connection::{merge_storage_options, set_storage_options_provider},
data::scannable::{Scannable, WithEmbeddingsScannable},
database::{CreateTableMode, CreateTableRequest, Database},
embeddings::{EmbeddingDefinition, EmbeddingFunction, EmbeddingRegistry},
table::WriteOptions,
Error, Result, Table,
};
pub struct CreateTableBuilder {
parent: Arc<dyn Database>,
embeddings: Vec<(EmbeddingDefinition, Arc<dyn EmbeddingFunction>)>,
embedding_registry: Arc<dyn EmbeddingRegistry>,
request: CreateTableRequest,
}
impl CreateTableBuilder {
pub(super) fn new(
parent: Arc<dyn Database>,
embedding_registry: Arc<dyn EmbeddingRegistry>,
name: String,
data: Box<dyn Scannable>,
) -> Self {
Self {
parent,
embeddings: Vec::new(),
embedding_registry,
request: CreateTableRequest::new(name, data),
}
}
/// Set the mode for creating the table
///
/// This controls what happens if a table with the given name already exists
pub fn mode(mut self, mode: CreateTableMode) -> Self {
self.request.mode = mode;
self
}
/// Apply the given write options when writing the initial data
pub fn write_options(mut self, write_options: WriteOptions) -> Self {
self.request.write_options = write_options;
self
}
/// Set an option for the storage layer.
///
/// Options already set on the connection will be inherited by the table,
/// but can be overridden here.
///
/// See available options at <https://lancedb.com/docs/storage/>
pub fn storage_option(mut self, key: impl Into<String>, value: impl Into<String>) -> Self {
let store_params = self
.request
.write_options
.lance_write_params
.get_or_insert(Default::default())
.store_params
.get_or_insert(Default::default());
merge_storage_options(store_params, [(key.into(), value.into())]);
self
}
/// Set multiple options for the storage layer.
///
/// Options already set on the connection will be inherited by the table,
/// but can be overridden here.
///
/// See available options at <https://lancedb.com/docs/storage/>
pub fn storage_options(
mut self,
pairs: impl IntoIterator<Item = (impl Into<String>, impl Into<String>)>,
) -> Self {
let store_params = self
.request
.write_options
.lance_write_params
.get_or_insert(Default::default())
.store_params
.get_or_insert(Default::default());
let updates = pairs
.into_iter()
.map(|(key, value)| (key.into(), value.into()));
merge_storage_options(store_params, updates);
self
}
/// Add an embedding definition to the table.
///
/// The `embedding_name` must match the name of an embedding function that
/// was previously registered with the connection's [`EmbeddingRegistry`].
pub fn add_embedding(mut self, definition: EmbeddingDefinition) -> Result<Self> {
// Early verification of the embedding name
let embedding_func = self
.embedding_registry
.get(&definition.embedding_name)
.ok_or_else(|| Error::EmbeddingFunctionNotFound {
name: definition.embedding_name.clone(),
reason: "No embedding function found in the connection's embedding_registry"
.to_string(),
})?;
self.embeddings.push((definition, embedding_func));
Ok(self)
}
/// Set the namespace for the table
pub fn namespace(mut self, namespace: Vec<String>) -> Self {
self.request.namespace = namespace;
self
}
/// Set a custom location for the table.
///
/// If not set, the database will derive a location from its URI and the table name.
/// This is useful when integrating with namespace systems that manage table locations.
pub fn location(mut self, location: impl Into<String>) -> Self {
self.request.location = Some(location.into());
self
}
/// Set a storage options provider for automatic credential refresh.
///
/// This allows tables to automatically refresh cloud storage credentials
/// when they expire, enabling long-running operations on remote storage.
pub fn storage_options_provider(mut self, provider: Arc<dyn StorageOptionsProvider>) -> Self {
let store_params = self
.request
.write_options
.lance_write_params
.get_or_insert(Default::default())
.store_params
.get_or_insert(Default::default());
set_storage_options_provider(store_params, provider);
self
}
/// Execute the create table operation
pub async fn execute(mut self) -> Result<Table> {
let embedding_registry = self.embedding_registry.clone();
let parent = self.parent.clone();
// If embeddings were configured via add_embedding(), wrap the data
if !self.embeddings.is_empty() {
let wrapped_data: Box<dyn Scannable> = Box::new(WithEmbeddingsScannable::try_new(
self.request.data,
self.embeddings,
)?);
self.request.data = wrapped_data;
}
Ok(Table::new_with_embedding_registry(
parent.create_table(self.request).await?,
parent,
embedding_registry,
))
}
}
#[cfg(test)]
mod tests {
use arrow_array::{
record_batch, Array, FixedSizeListArray, Float32Array, RecordBatch, RecordBatchIterator,
};
use arrow_schema::{ArrowError, DataType, Field, Schema};
use futures::TryStreamExt;
use lance_file::version::LanceFileVersion;
use tempfile::tempdir;
use crate::{
arrow::{SendableRecordBatchStream, SimpleRecordBatchStream},
connect,
database::listing::{ListingDatabaseOptions, NewTableConfig},
embeddings::{EmbeddingDefinition, EmbeddingFunction, MemoryRegistry},
query::{ExecutableQuery, QueryBase, Select},
test_utils::embeddings::MockEmbed,
};
use std::borrow::Cow;
use super::*;
#[tokio::test]
async fn create_empty_table() {
let db = connect("memory://").execute().await.unwrap();
let schema = Arc::new(Schema::new(vec![
Field::new("id", DataType::Int64, false),
Field::new("value", DataType::Float64, false),
]));
db.create_empty_table("name", schema.clone())
.execute()
.await
.unwrap();
let table = db.open_table("name").execute().await.unwrap();
assert_eq!(table.schema().await.unwrap(), schema);
assert_eq!(table.count_rows(None).await.unwrap(), 0);
}
async fn test_create_table_with_data<T>(data: T)
where
T: Scannable + 'static,
{
let db = connect("memory://").execute().await.unwrap();
let schema = data.schema();
db.create_table("data_table", data).execute().await.unwrap();
let table = db.open_table("data_table").execute().await.unwrap();
assert_eq!(table.count_rows(None).await.unwrap(), 3);
assert_eq!(table.schema().await.unwrap(), schema);
}
#[tokio::test]
async fn create_table_with_batch() {
let batch = record_batch!(("id", Int64, [1, 2, 3])).unwrap();
test_create_table_with_data(batch).await;
}
#[tokio::test]
async fn test_create_table_with_vec_batch() {
let data = vec![
record_batch!(("id", Int64, [1, 2])).unwrap(),
record_batch!(("id", Int64, [3])).unwrap(),
];
test_create_table_with_data(data).await;
}
#[tokio::test]
async fn test_create_table_with_record_batch_reader() {
let data = vec![
record_batch!(("id", Int64, [1, 2])).unwrap(),
record_batch!(("id", Int64, [3])).unwrap(),
];
let schema = data[0].schema();
let reader: Box<dyn arrow_array::RecordBatchReader + Send> = Box::new(
RecordBatchIterator::new(data.into_iter().map(Ok), schema.clone()),
);
test_create_table_with_data(reader).await;
}
#[tokio::test]
async fn test_create_table_with_stream() {
let data = vec![
record_batch!(("id", Int64, [1, 2])).unwrap(),
record_batch!(("id", Int64, [3])).unwrap(),
];
let schema = data[0].schema();
let inner = futures::stream::iter(data.into_iter().map(Ok));
let stream: SendableRecordBatchStream = Box::pin(SimpleRecordBatchStream {
schema,
stream: inner,
});
test_create_table_with_data(stream).await;
}
#[derive(Debug)]
struct MyError;
impl std::fmt::Display for MyError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "MyError occurred")
}
}
impl std::error::Error for MyError {}
#[tokio::test]
async fn test_create_preserves_reader_error() {
let first_batch = record_batch!(("id", Int64, [1, 2])).unwrap();
let schema = first_batch.schema();
let iterator = vec![
Ok(first_batch),
Err(ArrowError::ExternalError(Box::new(MyError))),
];
let reader: Box<dyn arrow_array::RecordBatchReader + Send> = Box::new(
RecordBatchIterator::new(iterator.into_iter(), schema.clone()),
);
let db = connect("memory://").execute().await.unwrap();
let result = db.create_table("failing_table", reader).execute().await;
assert!(result.is_err());
// TODO: when we upgrade to Lance 2.0.0, this should pass
// assert!(matches!(result, Err(Error::External { source})
// if source.downcast_ref::<MyError>().is_some()
// ));
}
#[tokio::test]
async fn test_create_preserves_stream_error() {
let first_batch = record_batch!(("id", Int64, [1, 2])).unwrap();
let schema = first_batch.schema();
let iterator = vec![
Ok(first_batch),
Err(Error::External {
source: Box::new(MyError),
}),
];
let stream = futures::stream::iter(iterator);
let stream: SendableRecordBatchStream = Box::pin(SimpleRecordBatchStream {
schema: schema.clone(),
stream,
});
let db = connect("memory://").execute().await.unwrap();
let result = db
.create_table("failing_stream_table", stream)
.execute()
.await;
assert!(result.is_err());
// TODO: when we upgrade to Lance 2.0.0, this should pass
// assert!(matches!(result, Err(Error::External { source})
// if source.downcast_ref::<MyError>().is_some()
// ));
}
#[tokio::test]
#[allow(deprecated)]
async fn test_create_table_with_storage_options() {
let batch = record_batch!(("id", Int64, [1, 2, 3])).unwrap();
let db = connect("memory://").execute().await.unwrap();
let table = db
.create_table("options_table", batch)
.storage_option("timeout", "30s")
.storage_options([("retry_count", "3")])
.execute()
.await
.unwrap();
let final_options = table.storage_options().await.unwrap();
assert_eq!(final_options.get("timeout"), Some(&"30s".to_string()));
assert_eq!(final_options.get("retry_count"), Some(&"3".to_string()));
}
#[tokio::test]
async fn test_create_table_unregistered_embedding() {
let db = connect("memory://").execute().await.unwrap();
let batch = record_batch!(("text", Utf8, ["hello", "world"])).unwrap();
// Try to add an embedding that doesn't exist in the registry
let result = db
.create_table("embed_table", batch)
.add_embedding(EmbeddingDefinition::new(
"text",
"nonexistent_embedding_function",
None::<&str>,
));
match result {
Err(Error::EmbeddingFunctionNotFound { name, .. }) => {
assert_eq!(name, "nonexistent_embedding_function");
}
Err(other) => panic!("Expected EmbeddingFunctionNotFound error, got: {:?}", other),
Ok(_) => panic!("Expected error, but got Ok"),
}
}
#[tokio::test]
async fn test_create_table_already_exists() {
let tmp_dir = tempdir().unwrap();
let uri = tmp_dir.path().to_str().unwrap();
let db = connect(uri).execute().await.unwrap();
let schema = Arc::new(Schema::new(vec![Field::new("x", DataType::Int32, false)]));
db.create_empty_table("test", schema.clone())
.execute()
.await
.unwrap();
db.create_empty_table("test", schema)
.mode(CreateTableMode::exist_ok(|mut req| {
req.index_cache_size = Some(16);
req
}))
.execute()
.await
.unwrap();
let other_schema = Arc::new(Schema::new(vec![Field::new("y", DataType::Int32, false)]));
assert!(db
.create_empty_table("test", other_schema.clone())
.execute()
.await
.is_err()); // TODO: assert what this error is
let overwritten = db
.create_empty_table("test", other_schema.clone())
.mode(CreateTableMode::Overwrite)
.execute()
.await
.unwrap();
assert_eq!(other_schema, overwritten.schema().await.unwrap());
}
#[tokio::test]
#[rstest::rstest]
#[case(LanceFileVersion::Legacy)]
#[case(LanceFileVersion::Stable)]
async fn test_create_table_with_storage_version(
#[case] data_storage_version: LanceFileVersion,
) {
let db = connect("memory://")
.database_options(&ListingDatabaseOptions {
new_table_config: NewTableConfig {
data_storage_version: Some(data_storage_version),
..Default::default()
},
..Default::default()
})
.execute()
.await
.unwrap();
let batch = record_batch!(("id", Int64, [1, 2, 3])).unwrap();
let table = db
.create_table("legacy_table", batch)
.execute()
.await
.unwrap();
let native_table = table.as_native().unwrap();
let storage_format = native_table
.manifest()
.await
.unwrap()
.data_storage_format
.lance_file_version()
.unwrap();
// Compare resolved versions since Stable/Next are aliases that resolve at storage time
assert_eq!(storage_format.resolve(), data_storage_version.resolve());
}
#[tokio::test]
async fn test_create_table_with_embedding() {
// Register the mock embedding function
let registry = Arc::new(MemoryRegistry::new());
let mock_embedding: Arc<dyn EmbeddingFunction> = Arc::new(MockEmbed::new("mock", 4));
registry.register("mock", mock_embedding).unwrap();
// Connect with the custom registry
let conn = connect("memory://")
.embedding_registry(registry)
.execute()
.await
.unwrap();
// Create data without the embedding column
let batch = record_batch!(("text", Utf8, ["hello", "world", "test"])).unwrap();
// Create table with add_embedding - embeddings should be computed automatically
let table = conn
.create_table("embed_test", batch)
.add_embedding(EmbeddingDefinition::new(
"text",
"mock",
Some("text_embedding"),
))
.unwrap()
.execute()
.await
.unwrap();
// Verify row count
assert_eq!(table.count_rows(None).await.unwrap(), 3);
// Verify the schema includes the embedding column
let result_schema = table.schema().await.unwrap();
assert_eq!(result_schema.fields().len(), 2);
assert_eq!(result_schema.field(0).name(), "text");
assert_eq!(result_schema.field(1).name(), "text_embedding");
// Verify the embedding column has the correct type
assert!(matches!(
result_schema.field(1).data_type(),
DataType::FixedSizeList(_, 4)
));
// Query to verify the embeddings were computed
let results: Vec<RecordBatch> = table
.query()
.select(Select::columns(&["text", "text_embedding"]))
.execute()
.await
.unwrap()
.try_collect()
.await
.unwrap();
let total_rows: usize = results.iter().map(|b| b.num_rows()).sum();
assert_eq!(total_rows, 3);
// Check that all rows have embedding values (not null)
for batch in &results {
let embedding_col = batch.column(1);
assert_eq!(embedding_col.null_count(), 0);
assert_eq!(embedding_col.len(), batch.num_rows());
}
// Verify the schema metadata contains the column definitions
assert!(
result_schema
.metadata
.contains_key("lancedb::column_definitions"),
"Schema metadata should contain column definitions"
);
}
#[tokio::test]
async fn test_create_empty_table_with_embeddings() {
#[derive(Debug, Clone)]
struct MockEmbedding {
dim: usize,
}
impl EmbeddingFunction for MockEmbedding {
fn name(&self) -> &str {
"test_embedding"
}
fn source_type(&self) -> Result<Cow<'_, DataType>> {
Ok(Cow::Owned(DataType::Utf8))
}
fn dest_type(&self) -> Result<Cow<'_, DataType>> {
Ok(Cow::Owned(DataType::new_fixed_size_list(
DataType::Float32,
self.dim as i32,
true,
)))
}
fn compute_source_embeddings(&self, source: Arc<dyn Array>) -> Result<Arc<dyn Array>> {
let len = source.len();
let values = vec![1.0f32; len * self.dim];
let values = Arc::new(Float32Array::from(values));
let field = Arc::new(Field::new("item", DataType::Float32, true));
Ok(Arc::new(FixedSizeListArray::new(
field,
self.dim as i32,
values,
None,
)))
}
fn compute_query_embeddings(&self, _input: Arc<dyn Array>) -> Result<Arc<dyn Array>> {
unimplemented!()
}
}
let tmp_dir = tempdir().unwrap();
let uri = tmp_dir.path().to_str().unwrap();
let db = connect(uri).execute().await.unwrap();
let embed_func = Arc::new(MockEmbedding { dim: 128 });
db.embedding_registry()
.register("test_embedding", embed_func.clone())
.unwrap();
let schema = Arc::new(Schema::new(vec![Field::new("name", DataType::Utf8, true)]));
let ed = EmbeddingDefinition {
source_column: "name".to_owned(),
dest_column: Some("name_embedding".to_owned()),
embedding_name: "test_embedding".to_owned(),
};
let table = db
.create_empty_table("test", schema)
.mode(CreateTableMode::Overwrite)
.add_embedding(ed)
.unwrap()
.execute()
.await
.unwrap();
let table_schema = table.schema().await.unwrap();
assert!(table_schema.column_with_name("name").is_some());
assert!(table_schema.column_with_name("name_embedding").is_some());
let embedding_field = table_schema.field_with_name("name_embedding").unwrap();
assert_eq!(
embedding_field.data_type(),
&DataType::new_fixed_size_list(DataType::Float32, 128, true)
);
let input_batch = record_batch!(("name", Utf8, ["Alice", "Bob", "Charlie"])).unwrap();
table.add(input_batch).execute().await.unwrap();
let results = table
.query()
.execute()
.await
.unwrap()
.try_collect::<Vec<_>>()
.await
.unwrap();
assert_eq!(results.len(), 1);
let batch = &results[0];
assert_eq!(batch.num_rows(), 3);
assert!(batch.column_by_name("name_embedding").is_some());
let embedding_col = batch
.column_by_name("name_embedding")
.unwrap()
.as_any()
.downcast_ref::<FixedSizeListArray>()
.unwrap();
assert_eq!(embedding_col.len(), 3);
}
}

View File

@@ -5,4 +5,3 @@
pub mod inspect;
pub mod sanitize;
pub mod scannable;

View File

@@ -1,580 +0,0 @@
// SPDX-License-Identifier: Apache-2.0
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
//! Data source abstraction for LanceDB.
//!
//! This module provides a [`Scannable`] trait that allows input data sources to express
//! capabilities (row count, rescannability) so the insert pipeline can make
//! better decisions about write parallelism and retry strategies.
use std::sync::Arc;
use arrow_array::{RecordBatch, RecordBatchIterator, RecordBatchReader};
use arrow_schema::{ArrowError, SchemaRef};
use async_trait::async_trait;
use futures::stream::once;
use futures::StreamExt;
use lance_datafusion::utils::StreamingWriteSource;
use crate::arrow::{
SendableRecordBatchStream, SendableRecordBatchStreamExt, SimpleRecordBatchStream,
};
use crate::embeddings::{
compute_embeddings_for_batch, compute_output_schema, EmbeddingDefinition, EmbeddingFunction,
EmbeddingRegistry,
};
use crate::table::{ColumnDefinition, ColumnKind, TableDefinition};
use crate::{Error, Result};
pub trait Scannable: Send {
/// Returns the schema of the data.
fn schema(&self) -> SchemaRef;
/// Read data as a stream of record batches.
///
/// For rescannable sources (in-memory data like RecordBatch, Vec<RecordBatch>),
/// this can be called multiple times and returns cloned data each time.
///
/// For non-rescannable sources (streams, readers), this can only be called once.
/// Calling it a second time returns a stream whose first item is an error.
fn scan_as_stream(&mut self) -> SendableRecordBatchStream;
/// Optional hint about the number of rows.
///
/// When available, this allows the pipeline to estimate total data size
/// and choose appropriate partitioning.
fn num_rows(&self) -> Option<usize> {
None
}
/// Whether the source can be re-read from the beginning.
///
/// `true` for in-memory data (Tables, DataFrames) and disk-based sources (Datasets).
/// `false` for streaming sources (DuckDB results, network streams).
///
/// When true, the pipeline can retry failed writes by rescanning.
fn rescannable(&self) -> bool {
false
}
}
impl std::fmt::Debug for dyn Scannable {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Scannable")
.field("schema", &self.schema())
.field("num_rows", &self.num_rows())
.field("rescannable", &self.rescannable())
.finish()
}
}
impl Scannable for RecordBatch {
fn schema(&self) -> SchemaRef {
Self::schema(self)
}
fn scan_as_stream(&mut self) -> SendableRecordBatchStream {
let batch = self.clone();
let schema = batch.schema();
Box::pin(SimpleRecordBatchStream {
schema,
stream: once(async move { Ok(batch) }),
})
}
fn num_rows(&self) -> Option<usize> {
Some(Self::num_rows(self))
}
fn rescannable(&self) -> bool {
true
}
}
impl Scannable for Vec<RecordBatch> {
fn schema(&self) -> SchemaRef {
if self.is_empty() {
Arc::new(arrow_schema::Schema::empty())
} else {
self[0].schema()
}
}
fn scan_as_stream(&mut self) -> SendableRecordBatchStream {
if self.is_empty() {
let schema = Scannable::schema(self);
return Box::pin(SimpleRecordBatchStream {
schema,
stream: once(async {
Err(Error::InvalidInput {
message: "Cannot scan an empty Vec<RecordBatch>".to_string(),
})
}),
});
}
let schema = Scannable::schema(self);
let batches = self.clone();
let stream = futures::stream::iter(batches.into_iter().map(Ok));
Box::pin(SimpleRecordBatchStream { schema, stream })
}
fn num_rows(&self) -> Option<usize> {
Some(self.iter().map(|b| b.num_rows()).sum())
}
fn rescannable(&self) -> bool {
true
}
}
impl Scannable for Box<dyn RecordBatchReader + Send> {
fn schema(&self) -> SchemaRef {
RecordBatchReader::schema(self.as_ref())
}
fn scan_as_stream(&mut self) -> SendableRecordBatchStream {
let schema = Scannable::schema(self);
// Swap self with a reader that errors on iteration, so a second call
// produces a clear error instead of silently returning empty data.
let err_reader: Box<dyn RecordBatchReader + Send> = Box::new(RecordBatchIterator::new(
vec![Err(ArrowError::InvalidArgumentError(
"Reader has already been consumed".into(),
))],
schema.clone(),
));
let reader = std::mem::replace(self, err_reader);
// Bridge the blocking RecordBatchReader to an async stream via a channel.
let (tx, rx) = tokio::sync::mpsc::channel::<crate::Result<RecordBatch>>(2);
tokio::task::spawn_blocking(move || {
for batch_result in reader {
let result = batch_result.map_err(Into::into);
if tx.blocking_send(result).is_err() {
break;
}
}
});
let stream = futures::stream::unfold(rx, |mut rx| async move {
rx.recv().await.map(|batch| (batch, rx))
})
.fuse();
Box::pin(SimpleRecordBatchStream { schema, stream })
}
}
impl Scannable for SendableRecordBatchStream {
fn schema(&self) -> SchemaRef {
self.as_ref().schema()
}
fn scan_as_stream(&mut self) -> SendableRecordBatchStream {
let schema = Scannable::schema(self);
// Swap self with an error stream so a second call produces a clear error.
let error_stream = Box::pin(SimpleRecordBatchStream {
schema: schema.clone(),
stream: once(async {
Err(Error::InvalidInput {
message: "Stream has already been consumed".to_string(),
})
}),
});
std::mem::replace(self, error_stream)
}
}
#[async_trait]
impl StreamingWriteSource for Box<dyn Scannable> {
fn arrow_schema(&self) -> SchemaRef {
self.schema()
}
fn into_stream(mut self) -> datafusion_physical_plan::SendableRecordBatchStream {
self.scan_as_stream().into_df_stream()
}
}
/// A scannable that applies embeddings to the stream.
pub struct WithEmbeddingsScannable {
inner: Box<dyn Scannable>,
embeddings: Vec<(EmbeddingDefinition, Arc<dyn EmbeddingFunction>)>,
output_schema: SchemaRef,
}
impl WithEmbeddingsScannable {
/// Create a new WithEmbeddingsScannable.
///
/// The embeddings are applied to the inner scannable's data as new columns.
pub fn try_new(
inner: Box<dyn Scannable>,
embeddings: Vec<(EmbeddingDefinition, Arc<dyn EmbeddingFunction>)>,
) -> Result<Self> {
let output_schema = compute_output_schema(&inner.schema(), &embeddings)?;
// Build column definitions: Physical for base columns, Embedding for new ones
let base_col_count = inner.schema().fields().len();
let column_definitions: Vec<ColumnDefinition> = (0..base_col_count)
.map(|_| ColumnDefinition {
kind: ColumnKind::Physical,
})
.chain(embeddings.iter().map(|(ed, _)| ColumnDefinition {
kind: ColumnKind::Embedding(ed.clone()),
}))
.collect();
let table_definition = TableDefinition::new(output_schema, column_definitions);
let output_schema = table_definition.into_rich_schema();
Ok(Self {
inner,
embeddings,
output_schema,
})
}
}
impl Scannable for WithEmbeddingsScannable {
fn schema(&self) -> SchemaRef {
self.output_schema.clone()
}
fn scan_as_stream(&mut self) -> SendableRecordBatchStream {
let inner_stream = self.inner.scan_as_stream();
let embeddings = self.embeddings.clone();
let output_schema = self.output_schema.clone();
let mapped_stream = inner_stream.then(move |batch_result| {
let embeddings = embeddings.clone();
async move {
let batch = batch_result?;
let result = tokio::task::spawn_blocking(move || {
compute_embeddings_for_batch(batch, &embeddings)
})
.await
.map_err(|e| Error::Runtime {
message: format!("Task panicked during embedding computation: {}", e),
})??;
Ok(result)
}
});
Box::pin(SimpleRecordBatchStream {
schema: output_schema,
stream: mapped_stream,
})
}
fn num_rows(&self) -> Option<usize> {
self.inner.num_rows()
}
fn rescannable(&self) -> bool {
self.inner.rescannable()
}
}
pub fn scannable_with_embeddings(
inner: Box<dyn Scannable>,
table_definition: &TableDefinition,
registry: Option<&Arc<dyn EmbeddingRegistry>>,
) -> Result<Box<dyn Scannable>> {
if let Some(registry) = registry {
let mut embeddings = Vec::with_capacity(table_definition.column_definitions.len());
for cd in table_definition.column_definitions.iter() {
if let ColumnKind::Embedding(embedding_def) = &cd.kind {
match registry.get(&embedding_def.embedding_name) {
Some(func) => {
embeddings.push((embedding_def.clone(), func));
}
None => {
return Err(Error::EmbeddingFunctionNotFound {
name: embedding_def.embedding_name.clone(),
reason: format!(
"Table was defined with an embedding column `{}` but no embedding function was found with that name within the registry.",
embedding_def.embedding_name
),
});
}
}
}
}
if !embeddings.is_empty() {
return Ok(Box::new(WithEmbeddingsScannable::try_new(
inner, embeddings,
)?));
}
}
Ok(inner)
}
#[cfg(test)]
mod tests {
use super::*;
use arrow_array::record_batch;
use futures::TryStreamExt;
#[tokio::test]
async fn test_record_batch_rescannable() {
let mut batch = record_batch!(("id", Int64, [0, 1, 2])).unwrap();
let stream1 = batch.scan_as_stream();
let batches1: Vec<RecordBatch> = stream1.try_collect().await.unwrap();
assert_eq!(batches1.len(), 1);
assert_eq!(batches1[0], batch);
assert!(batch.rescannable());
let stream2 = batch.scan_as_stream();
let batches2: Vec<RecordBatch> = stream2.try_collect().await.unwrap();
assert_eq!(batches2.len(), 1);
assert_eq!(batches2[0], batch);
}
#[tokio::test]
async fn test_vec_batch_rescannable() {
let mut batches = vec![
record_batch!(("id", Int64, [0, 1])).unwrap(),
record_batch!(("id", Int64, [2, 3, 4])).unwrap(),
];
let stream1 = batches.scan_as_stream();
let result1: Vec<RecordBatch> = stream1.try_collect().await.unwrap();
assert_eq!(result1.len(), 2);
assert_eq!(result1[0], batches[0]);
assert_eq!(result1[1], batches[1]);
assert!(batches.rescannable());
let stream2 = batches.scan_as_stream();
let result2: Vec<RecordBatch> = stream2.try_collect().await.unwrap();
assert_eq!(result2.len(), 2);
assert_eq!(result2[0], batches[0]);
assert_eq!(result2[1], batches[1]);
}
#[tokio::test]
async fn test_vec_batch_empty_errors() {
let mut empty: Vec<RecordBatch> = vec![];
let mut stream = empty.scan_as_stream();
let result = stream.next().await;
assert!(result.is_some());
assert!(result.unwrap().is_err());
}
#[tokio::test]
async fn test_reader_not_rescannable() {
let batch = record_batch!(("id", Int64, [0, 1, 2])).unwrap();
let schema = batch.schema();
let mut reader: Box<dyn arrow_array::RecordBatchReader + Send> = Box::new(
RecordBatchIterator::new(vec![Ok(batch.clone())], schema.clone()),
);
let stream1 = reader.scan_as_stream();
let result1: Vec<RecordBatch> = stream1.try_collect().await.unwrap();
assert_eq!(result1.len(), 1);
assert_eq!(result1[0], batch);
assert!(!reader.rescannable());
// Second call returns a stream whose first item is an error
let mut stream2 = reader.scan_as_stream();
let result2 = stream2.next().await;
assert!(result2.is_some());
assert!(result2.unwrap().is_err());
}
#[tokio::test]
async fn test_stream_not_rescannable() {
let batch = record_batch!(("id", Int64, [0, 1, 2])).unwrap();
let schema = batch.schema();
let inner_stream = futures::stream::iter(vec![Ok(batch.clone())]);
let mut stream: SendableRecordBatchStream = Box::pin(SimpleRecordBatchStream {
schema: schema.clone(),
stream: inner_stream,
});
let stream1 = stream.scan_as_stream();
let result1: Vec<RecordBatch> = stream1.try_collect().await.unwrap();
assert_eq!(result1.len(), 1);
assert_eq!(result1[0], batch);
assert!(!stream.rescannable());
// Second call returns a stream whose first item is an error
let mut stream2 = stream.scan_as_stream();
let result2 = stream2.next().await;
assert!(result2.is_some());
assert!(result2.unwrap().is_err());
}
mod embedding_tests {
use super::*;
use crate::embeddings::MemoryRegistry;
use crate::table::{ColumnDefinition, ColumnKind};
use crate::test_utils::embeddings::MockEmbed;
use arrow_array::Array as _;
use arrow_array::{ArrayRef, StringArray};
use arrow_schema::{DataType, Field, Schema};
#[tokio::test]
async fn test_with_embeddings_scannable() {
let schema = Arc::new(Schema::new(vec![Field::new("text", DataType::Utf8, false)]));
let text_array = StringArray::from(vec!["hello", "world", "test"]);
let batch =
RecordBatch::try_new(schema.clone(), vec![Arc::new(text_array) as ArrayRef])
.unwrap();
let mock_embedding: Arc<dyn EmbeddingFunction> = Arc::new(MockEmbed::new("mock", 4));
let embedding_def = EmbeddingDefinition::new("text", "mock", Some("text_embedding"));
let mut scannable = WithEmbeddingsScannable::try_new(
Box::new(batch.clone()),
vec![(embedding_def, mock_embedding)],
)
.unwrap();
// Check that schema has the embedding column
let output_schema = scannable.schema();
assert_eq!(output_schema.fields().len(), 2);
assert_eq!(output_schema.field(0).name(), "text");
assert_eq!(output_schema.field(1).name(), "text_embedding");
// Check num_rows and rescannable are preserved
assert_eq!(scannable.num_rows(), Some(3));
assert!(scannable.rescannable());
// Read the data
let stream = scannable.scan_as_stream();
let results: Vec<RecordBatch> = stream.try_collect().await.unwrap();
assert_eq!(results.len(), 1);
let result_batch = &results[0];
assert_eq!(result_batch.num_rows(), 3);
assert_eq!(result_batch.num_columns(), 2);
// Verify the embedding column is present and has the right shape
let embedding_col = result_batch.column(1);
assert_eq!(embedding_col.len(), 3);
}
#[tokio::test]
async fn test_maybe_embedded_scannable_no_embeddings() {
let batch = record_batch!(("id", Int64, [1, 2, 3])).unwrap();
// Create a table definition with no embedding columns
let table_def = TableDefinition::new_from_schema(batch.schema());
// Even with a registry, if there are no embedding columns, it's a passthrough
let registry: Arc<dyn EmbeddingRegistry> = Arc::new(MemoryRegistry::new());
let mut scannable =
scannable_with_embeddings(Box::new(batch.clone()), &table_def, Some(&registry))
.unwrap();
// Check that data passes through unchanged
let stream = scannable.scan_as_stream();
let results: Vec<RecordBatch> = stream.try_collect().await.unwrap();
assert_eq!(results.len(), 1);
assert_eq!(results[0], batch);
}
#[tokio::test]
async fn test_maybe_embedded_scannable_with_embeddings() {
let schema = Arc::new(Schema::new(vec![Field::new("text", DataType::Utf8, false)]));
let text_array = StringArray::from(vec!["hello", "world"]);
let batch =
RecordBatch::try_new(schema.clone(), vec![Arc::new(text_array) as ArrayRef])
.unwrap();
// Create a table definition with an embedding column
let embedding_def = EmbeddingDefinition::new("text", "mock", Some("text_embedding"));
let embedding_schema = Arc::new(Schema::new(vec![
Field::new("text", DataType::Utf8, false),
Field::new(
"text_embedding",
DataType::FixedSizeList(
Arc::new(Field::new("item", DataType::Float32, true)),
4,
),
false,
),
]));
let table_def = TableDefinition::new(
embedding_schema,
vec![
ColumnDefinition {
kind: ColumnKind::Physical,
},
ColumnDefinition {
kind: ColumnKind::Embedding(embedding_def.clone()),
},
],
);
// Register the mock embedding function
let registry: Arc<dyn EmbeddingRegistry> = Arc::new(MemoryRegistry::new());
let mock_embedding: Arc<dyn EmbeddingFunction> = Arc::new(MockEmbed::new("mock", 4));
registry.register("mock", mock_embedding).unwrap();
let mut scannable =
scannable_with_embeddings(Box::new(batch), &table_def, Some(&registry)).unwrap();
// Read and verify the data has embeddings
let stream = scannable.scan_as_stream();
let results: Vec<RecordBatch> = stream.try_collect().await.unwrap();
assert_eq!(results.len(), 1);
let result_batch = &results[0];
assert_eq!(result_batch.num_columns(), 2);
assert_eq!(result_batch.schema().field(1).name(), "text_embedding");
}
#[tokio::test]
async fn test_maybe_embedded_scannable_missing_function() {
let schema = Arc::new(Schema::new(vec![Field::new("text", DataType::Utf8, false)]));
let text_array = StringArray::from(vec!["hello"]);
let batch =
RecordBatch::try_new(schema.clone(), vec![Arc::new(text_array) as ArrayRef])
.unwrap();
// Create a table definition with an embedding column
let embedding_def =
EmbeddingDefinition::new("text", "nonexistent", Some("text_embedding"));
let embedding_schema = Arc::new(Schema::new(vec![
Field::new("text", DataType::Utf8, false),
Field::new(
"text_embedding",
DataType::FixedSizeList(
Arc::new(Field::new("item", DataType::Float32, true)),
4,
),
false,
),
]));
let table_def = TableDefinition::new(
embedding_schema,
vec![
ColumnDefinition {
kind: ColumnKind::Physical,
},
ColumnDefinition {
kind: ColumnKind::Embedding(embedding_def),
},
],
);
// Registry has no embedding functions registered
let registry: Arc<dyn EmbeddingRegistry> = Arc::new(MemoryRegistry::new());
let result = scannable_with_embeddings(Box::new(batch), &table_def, Some(&registry));
// Should fail because the embedding function is not found
assert!(result.is_err());
let err = result.err().unwrap();
assert!(
matches!(err, Error::EmbeddingFunctionNotFound { .. }),
"Expected EmbeddingFunctionNotFound"
);
}
}
}

View File

@@ -18,7 +18,12 @@ use std::collections::HashMap;
use std::sync::Arc;
use std::time::Duration;
use arrow_array::RecordBatchReader;
use async_trait::async_trait;
use datafusion_physical_plan::stream::RecordBatchStreamAdapter;
use futures::stream;
use lance::dataset::ReadParams;
use lance_datafusion::utils::StreamingWriteSource;
use lance_namespace::models::{
CreateNamespaceRequest, CreateNamespaceResponse, DescribeNamespaceRequest,
DescribeNamespaceResponse, DropNamespaceRequest, DropNamespaceResponse, ListNamespacesRequest,
@@ -26,9 +31,9 @@ use lance_namespace::models::{
};
use lance_namespace::LanceNamespace;
use crate::data::scannable::Scannable;
use crate::arrow::{SendableRecordBatchStream, SendableRecordBatchStreamExt};
use crate::error::Result;
use crate::table::{BaseTable, WriteOptions};
use crate::table::{BaseTable, TableDefinition, WriteOptions};
pub mod listing;
pub mod namespace;
@@ -85,10 +90,8 @@ pub type TableBuilderCallback = Box<dyn FnOnce(OpenTableRequest) -> OpenTableReq
/// Describes what happens when creating a table and a table with
/// the same name already exists
#[derive(Default)]
pub enum CreateTableMode {
/// If the table already exists, an error is returned
#[default]
Create,
/// If the table already exists, it is opened. Any provided data is
/// ignored. The function will be passed an OpenTableBuilder to customize
@@ -106,14 +109,57 @@ impl CreateTableMode {
}
}
impl Default for CreateTableMode {
fn default() -> Self {
Self::Create
}
}
/// The data to start a table or a schema to create an empty table
pub enum CreateTableData {
/// Creates a table using an iterator of data, the schema will be obtained from the data
Data(Box<dyn RecordBatchReader + Send>),
/// Creates a table using a stream of data, the schema will be obtained from the data
StreamingData(SendableRecordBatchStream),
/// Creates an empty table, the definition / schema must be provided separately
Empty(TableDefinition),
}
impl CreateTableData {
pub fn schema(&self) -> Arc<arrow_schema::Schema> {
match self {
Self::Data(reader) => reader.schema(),
Self::StreamingData(stream) => stream.schema(),
Self::Empty(definition) => definition.schema.clone(),
}
}
}
#[async_trait]
impl StreamingWriteSource for CreateTableData {
fn arrow_schema(&self) -> Arc<arrow_schema::Schema> {
self.schema()
}
fn into_stream(self) -> datafusion_physical_plan::SendableRecordBatchStream {
match self {
Self::Data(reader) => reader.into_stream(),
Self::StreamingData(stream) => stream.into_df_stream(),
Self::Empty(table_definition) => {
let schema = table_definition.schema.clone();
Box::pin(RecordBatchStreamAdapter::new(schema, stream::empty()))
}
}
}
}
/// A request to create a table
pub struct CreateTableRequest {
/// The name of the new table
pub name: String,
/// The namespace to create the table in. Empty list represents root namespace.
pub namespace: Vec<String>,
/// Initial data to write to the table, can be empty.
pub data: Box<dyn Scannable>,
/// Initial data to write to the table, can be None to create an empty table
pub data: CreateTableData,
/// The mode to use when creating the table
pub mode: CreateTableMode,
/// Options to use when writing data (only used if `data` is not None)
@@ -127,7 +173,7 @@ pub struct CreateTableRequest {
}
impl CreateTableRequest {
pub fn new(name: String, data: Box<dyn Scannable>) -> Self {
pub fn new(name: String, data: CreateTableData) -> Self {
Self {
name,
namespace: vec![],

View File

@@ -922,7 +922,7 @@ impl Database for ListingDatabase {
.with_read_params(read_params.clone())
.load()
.await
.map_err(|e| -> Error { e.into() })?;
.map_err(|e| Error::Lance { source: e })?;
let version_ref = match (request.source_version, request.source_tag) {
(Some(v), None) => Ok(Ref::Version(None, Some(v))),
@@ -937,7 +937,7 @@ impl Database for ListingDatabase {
source_dataset
.shallow_clone(&target_uri, version_ref, Some(storage_params))
.await
.map_err(|e| -> Error { e.into() })?;
.map_err(|e| Error::Lance { source: e })?;
let cloned_table = NativeTable::open_with_params(
&target_uri,
@@ -1098,10 +1098,8 @@ impl Database for ListingDatabase {
mod tests {
use super::*;
use crate::connection::ConnectRequest;
use crate::data::scannable::Scannable;
use crate::database::{CreateTableMode, CreateTableRequest};
use crate::table::WriteOptions;
use crate::Table;
use crate::database::{CreateTableData, CreateTableMode, CreateTableRequest, WriteOptions};
use crate::table::{Table, TableDefinition};
use arrow_array::{Int32Array, RecordBatch, StringArray};
use arrow_schema::{DataType, Field, Schema};
use std::path::PathBuf;
@@ -1141,7 +1139,7 @@ mod tests {
.create_table(CreateTableRequest {
name: "source_table".to_string(),
namespace: vec![],
data: Box::new(RecordBatch::new_empty(schema.clone())) as Box<dyn Scannable>,
data: CreateTableData::Empty(TableDefinition::new_from_schema(schema.clone())),
mode: CreateTableMode::Create,
write_options: Default::default(),
location: None,
@@ -1198,11 +1196,16 @@ mod tests {
)
.unwrap();
let reader = Box::new(arrow_array::RecordBatchIterator::new(
vec![Ok(batch)],
schema.clone(),
));
let source_table = db
.create_table(CreateTableRequest {
name: "source_with_data".to_string(),
namespace: vec![],
data: Box::new(batch) as Box<dyn Scannable>,
data: CreateTableData::Data(reader),
mode: CreateTableMode::Create,
write_options: Default::default(),
location: None,
@@ -1261,7 +1264,7 @@ mod tests {
db.create_table(CreateTableRequest {
name: "source".to_string(),
namespace: vec![],
data: Box::new(RecordBatch::new_empty(schema)) as Box<dyn Scannable>,
data: CreateTableData::Empty(TableDefinition::new_from_schema(schema)),
mode: CreateTableMode::Create,
write_options: Default::default(),
location: None,
@@ -1297,7 +1300,7 @@ mod tests {
db.create_table(CreateTableRequest {
name: "source".to_string(),
namespace: vec![],
data: Box::new(RecordBatch::new_empty(schema)) as Box<dyn Scannable>,
data: CreateTableData::Empty(TableDefinition::new_from_schema(schema)),
mode: CreateTableMode::Create,
write_options: Default::default(),
location: None,
@@ -1337,7 +1340,7 @@ mod tests {
db.create_table(CreateTableRequest {
name: "source".to_string(),
namespace: vec![],
data: Box::new(RecordBatch::new_empty(schema)) as Box<dyn Scannable>,
data: CreateTableData::Empty(TableDefinition::new_from_schema(schema)),
mode: CreateTableMode::Create,
write_options: Default::default(),
location: None,
@@ -1377,7 +1380,7 @@ mod tests {
db.create_table(CreateTableRequest {
name: "source".to_string(),
namespace: vec![],
data: Box::new(RecordBatch::new_empty(schema)) as Box<dyn Scannable>,
data: CreateTableData::Empty(TableDefinition::new_from_schema(schema)),
mode: CreateTableMode::Create,
write_options: Default::default(),
location: None,
@@ -1432,7 +1435,7 @@ mod tests {
db.create_table(CreateTableRequest {
name: "source".to_string(),
namespace: vec![],
data: Box::new(RecordBatch::new_empty(schema)) as Box<dyn Scannable>,
data: CreateTableData::Empty(TableDefinition::new_from_schema(schema)),
mode: CreateTableMode::Create,
write_options: Default::default(),
location: None,
@@ -1481,11 +1484,16 @@ mod tests {
)
.unwrap();
let reader = Box::new(arrow_array::RecordBatchIterator::new(
vec![Ok(batch1)],
schema.clone(),
));
let source_table = db
.create_table(CreateTableRequest {
name: "versioned_source".to_string(),
namespace: vec![],
data: Box::new(batch1) as Box<dyn Scannable>,
data: CreateTableData::Data(reader),
mode: CreateTableMode::Create,
write_options: Default::default(),
location: None,
@@ -1509,7 +1517,14 @@ mod tests {
let db = Arc::new(db);
let source_table_obj = Table::new(source_table.clone(), db.clone());
source_table_obj.add(batch2).execute().await.unwrap();
source_table_obj
.add(Box::new(arrow_array::RecordBatchIterator::new(
vec![Ok(batch2)],
schema.clone(),
)))
.execute()
.await
.unwrap();
// Verify source table now has 4 rows
assert_eq!(source_table.count_rows(None).await.unwrap(), 4);
@@ -1555,11 +1570,16 @@ mod tests {
)
.unwrap();
let reader = Box::new(arrow_array::RecordBatchIterator::new(
vec![Ok(batch1)],
schema.clone(),
));
let source_table = db
.create_table(CreateTableRequest {
name: "tagged_source".to_string(),
namespace: vec![],
data: Box::new(batch1),
data: CreateTableData::Data(reader),
mode: CreateTableMode::Create,
write_options: Default::default(),
location: None,
@@ -1587,7 +1607,14 @@ mod tests {
.unwrap();
let source_table_obj = Table::new(source_table.clone(), db.clone());
source_table_obj.add(batch2).execute().await.unwrap();
source_table_obj
.add(Box::new(arrow_array::RecordBatchIterator::new(
vec![Ok(batch2)],
schema.clone(),
)))
.execute()
.await
.unwrap();
// Source table should have 4 rows
assert_eq!(source_table.count_rows(None).await.unwrap(), 4);
@@ -1630,11 +1657,16 @@ mod tests {
)
.unwrap();
let reader = Box::new(arrow_array::RecordBatchIterator::new(
vec![Ok(batch1)],
schema.clone(),
));
let source_table = db
.create_table(CreateTableRequest {
name: "independent_source".to_string(),
namespace: vec![],
data: Box::new(batch1),
data: CreateTableData::Data(reader),
mode: CreateTableMode::Create,
write_options: Default::default(),
location: None,
@@ -1674,7 +1706,14 @@ mod tests {
let db = Arc::new(db);
let cloned_table_obj = Table::new(cloned_table.clone(), db.clone());
cloned_table_obj.add(batch_clone).execute().await.unwrap();
cloned_table_obj
.add(Box::new(arrow_array::RecordBatchIterator::new(
vec![Ok(batch_clone)],
schema.clone(),
)))
.execute()
.await
.unwrap();
// Add different data to the source table
let batch_source = RecordBatch::try_new(
@@ -1687,7 +1726,14 @@ mod tests {
.unwrap();
let source_table_obj = Table::new(source_table.clone(), db);
source_table_obj.add(batch_source).execute().await.unwrap();
source_table_obj
.add(Box::new(arrow_array::RecordBatchIterator::new(
vec![Ok(batch_source)],
schema.clone(),
)))
.execute()
.await
.unwrap();
// Verify they have evolved independently
assert_eq!(source_table.count_rows(None).await.unwrap(), 4); // 2 + 2
@@ -1705,11 +1751,16 @@ mod tests {
RecordBatch::try_new(schema.clone(), vec![Arc::new(Int32Array::from(vec![1, 2]))])
.unwrap();
let reader = Box::new(arrow_array::RecordBatchIterator::new(
vec![Ok(batch1)],
schema.clone(),
));
let source_table = db
.create_table(CreateTableRequest {
name: "latest_version_source".to_string(),
namespace: vec![],
data: Box::new(batch1),
data: CreateTableData::Data(reader),
mode: CreateTableMode::Create,
write_options: Default::default(),
location: None,
@@ -1728,7 +1779,14 @@ mod tests {
.unwrap();
let source_table_obj = Table::new(source_table.clone(), db.clone());
source_table_obj.add(batch).execute().await.unwrap();
source_table_obj
.add(Box::new(arrow_array::RecordBatchIterator::new(
vec![Ok(batch)],
schema.clone(),
)))
.execute()
.await
.unwrap();
}
// Source should have 8 rows total (2 + 2 + 2 + 2)
@@ -1791,11 +1849,16 @@ mod tests {
)
.unwrap();
let reader = Box::new(arrow_array::RecordBatchIterator::new(
vec![Ok(batch)],
schema.clone(),
));
let table = db
.create_table(CreateTableRequest {
name: "test_stable".to_string(),
namespace: vec![],
data: Box::new(batch),
data: CreateTableData::Data(reader),
mode: CreateTableMode::Create,
write_options: Default::default(),
location: None,
@@ -1824,6 +1887,11 @@ mod tests {
)
.unwrap();
let reader = Box::new(arrow_array::RecordBatchIterator::new(
vec![Ok(batch)],
schema.clone(),
));
let mut storage_options = HashMap::new();
storage_options.insert(
OPT_NEW_TABLE_ENABLE_STABLE_ROW_IDS.to_string(),
@@ -1846,7 +1914,7 @@ mod tests {
.create_table(CreateTableRequest {
name: "test_stable_table_level".to_string(),
namespace: vec![],
data: Box::new(batch),
data: CreateTableData::Data(reader),
mode: CreateTableMode::Create,
write_options,
location: None,
@@ -1895,6 +1963,11 @@ mod tests {
)
.unwrap();
let reader = Box::new(arrow_array::RecordBatchIterator::new(
vec![Ok(batch)],
schema.clone(),
));
let mut storage_options = HashMap::new();
storage_options.insert(
OPT_NEW_TABLE_ENABLE_STABLE_ROW_IDS.to_string(),
@@ -1917,7 +1990,7 @@ mod tests {
.create_table(CreateTableRequest {
name: "test_override".to_string(),
namespace: vec![],
data: Box::new(batch),
data: CreateTableData::Data(reader),
mode: CreateTableMode::Create,
write_options,
location: None,
@@ -2035,7 +2108,7 @@ mod tests {
db.create_table(CreateTableRequest {
name: "table1".to_string(),
namespace: vec![],
data: Box::new(RecordBatch::new_empty(schema.clone())) as Box<dyn Scannable>,
data: CreateTableData::Empty(TableDefinition::new_from_schema(schema.clone())),
mode: CreateTableMode::Create,
write_options: Default::default(),
location: None,
@@ -2047,7 +2120,7 @@ mod tests {
db.create_table(CreateTableRequest {
name: "table2".to_string(),
namespace: vec![],
data: Box::new(RecordBatch::new_empty(schema)) as Box<dyn Scannable>,
data: CreateTableData::Empty(TableDefinition::new_from_schema(schema)),
mode: CreateTableMode::Create,
write_options: Default::default(),
location: None,

View File

@@ -7,7 +7,6 @@ use std::collections::HashMap;
use std::sync::Arc;
use async_trait::async_trait;
use lance_io::object_store::{ObjectStoreParams, StorageOptionsAccessor};
use lance_namespace::{
models::{
CreateEmptyTableRequest, CreateNamespaceRequest, CreateNamespaceResponse,
@@ -213,75 +212,45 @@ impl Database for LanceNamespaceDatabase {
..Default::default()
};
let (location, initial_storage_options) =
match self.namespace.declare_table(declare_request).await {
Ok(response) => {
let loc = response.location.ok_or_else(|| Error::Runtime {
message: "Table location is missing from declare_table response"
.to_string(),
})?;
// Use storage options from response, fall back to self.storage_options
let opts = response
.storage_options
.or_else(|| Some(self.storage_options.clone()))
.filter(|o| !o.is_empty());
(loc, opts)
}
Err(e) => {
// Check if the error is "not supported" and try create_empty_table as fallback
let err_str = e.to_string().to_lowercase();
if err_str.contains("not supported") || err_str.contains("not implemented") {
warn!(
"declare_table is not supported by the namespace client, \
let location = match self.namespace.declare_table(declare_request).await {
Ok(response) => response.location.ok_or_else(|| Error::Runtime {
message: "Table location is missing from declare_table response".to_string(),
})?,
Err(e) => {
// Check if the error is "not supported" and try create_empty_table as fallback
let err_str = e.to_string().to_lowercase();
if err_str.contains("not supported") || err_str.contains("not implemented") {
warn!(
"declare_table is not supported by the namespace client, \
falling back to deprecated create_empty_table. \
create_empty_table is deprecated and will be removed in Lance 3.0.0. \
Please upgrade your namespace client to support declare_table."
);
#[allow(deprecated)]
let create_empty_request = CreateEmptyTableRequest {
id: Some(table_id.clone()),
..Default::default()
};
);
#[allow(deprecated)]
let create_empty_request = CreateEmptyTableRequest {
id: Some(table_id.clone()),
..Default::default()
};
#[allow(deprecated)]
let create_response = self
.namespace
.create_empty_table(create_empty_request)
.await
.map_err(|e| Error::Runtime {
message: format!("Failed to create empty table: {}", e),
})?;
let loc = create_response.location.ok_or_else(|| Error::Runtime {
message: "Table location is missing from create_empty_table response"
.to_string(),
#[allow(deprecated)]
let create_response = self
.namespace
.create_empty_table(create_empty_request)
.await
.map_err(|e| Error::Runtime {
message: format!("Failed to create empty table: {}", e),
})?;
// For deprecated path, use self.storage_options
let opts = if self.storage_options.is_empty() {
None
} else {
Some(self.storage_options.clone())
};
(loc, opts)
} else {
return Err(Error::Runtime {
message: format!("Failed to declare table: {}", e),
});
}
}
};
let write_params = if let Some(storage_opts) = initial_storage_options {
let mut params = request.write_options.lance_write_params.unwrap_or_default();
let store_params = params
.store_params
.get_or_insert_with(ObjectStoreParams::default);
store_params.storage_options_accessor = Some(Arc::new(
StorageOptionsAccessor::with_static_options(storage_opts),
));
Some(params)
} else {
request.write_options.lance_write_params
create_response.location.ok_or_else(|| Error::Runtime {
message: "Table location is missing from create_empty_table response"
.to_string(),
})?
} else {
return Err(Error::Runtime {
message: format!("Failed to declare table: {}", e),
});
}
}
};
let native_table = NativeTable::create_from_namespace(
@@ -291,7 +260,7 @@ impl Database for LanceNamespaceDatabase {
request.namespace.clone(),
request.data,
None, // write_store_wrapper not used for namespace connections
write_params,
request.write_options.lance_write_params,
self.read_consistency_interval,
self.server_side_query_enabled,
self.session.clone(),
@@ -385,13 +354,15 @@ mod tests {
use super::*;
use crate::connect_namespace;
use crate::query::ExecutableQuery;
use arrow_array::{Int32Array, RecordBatch, StringArray};
use arrow_array::{Int32Array, RecordBatch, RecordBatchIterator, StringArray};
use arrow_schema::{DataType, Field, Schema};
use futures::TryStreamExt;
use tempfile::tempdir;
/// Helper function to create test data
fn create_test_data() -> RecordBatch {
fn create_test_data() -> RecordBatchIterator<
std::vec::IntoIter<std::result::Result<RecordBatch, arrow_schema::ArrowError>>,
> {
let schema = Arc::new(Schema::new(vec![
Field::new("id", DataType::Int32, false),
Field::new("name", DataType::Utf8, false),
@@ -400,7 +371,12 @@ mod tests {
let id_array = Int32Array::from(vec![1, 2, 3, 4, 5]);
let name_array = StringArray::from(vec!["Alice", "Bob", "Charlie", "David", "Eve"]);
RecordBatch::try_new(schema, vec![Arc::new(id_array), Arc::new(name_array)]).unwrap()
let batch = RecordBatch::try_new(
schema.clone(),
vec![Arc::new(id_array), Arc::new(name_array)],
)
.unwrap();
RecordBatchIterator::new(vec![std::result::Result::Ok(batch)].into_iter(), schema)
}
#[tokio::test]
@@ -642,7 +618,13 @@ mod tests {
// Test: Overwrite the table
let table2 = conn
.create_table("overwrite_test", test_data2)
.create_table(
"overwrite_test",
RecordBatchIterator::new(
vec![std::result::Result::Ok(test_data2)].into_iter(),
schema,
),
)
.namespace(vec!["test_ns".into()])
.mode(CreateTableMode::Overwrite)
.execute()

View File

@@ -13,7 +13,7 @@ use lance_datafusion::exec::SessionContextExt;
use crate::{
arrow::{SendableRecordBatchStream, SendableRecordBatchStreamExt, SimpleRecordBatchStream},
connect,
database::{CreateTableRequest, Database},
database::{CreateTableData, CreateTableRequest, Database},
dataloader::permutation::{
shuffle::{Shuffler, ShufflerConfig},
split::{SplitStrategy, Splitter, SPLIT_ID_COLUMN},
@@ -57,7 +57,7 @@ pub struct PermutationConfig {
}
/// Strategy for shuffling the data.
#[derive(Debug, Clone, Default)]
#[derive(Debug, Clone)]
pub enum ShuffleStrategy {
/// The data is randomly shuffled
///
@@ -78,10 +78,15 @@ pub enum ShuffleStrategy {
/// The data is not shuffled
///
/// This is useful for debugging and testing.
#[default]
None,
}
impl Default for ShuffleStrategy {
fn default() -> Self {
Self::None
}
}
/// Builder for creating a permutation table.
///
/// A permutation table is a table that stores split assignments and a shuffled order of rows. This
@@ -308,8 +313,10 @@ impl PermutationBuilder {
}
};
let create_table_request =
CreateTableRequest::new(name.to_string(), Box::new(streaming_data));
let create_table_request = CreateTableRequest::new(
name.to_string(),
CreateTableData::StreamingData(streaming_data),
);
let table = database.create_table(create_table_request).await?;
@@ -340,7 +347,7 @@ mod tests {
.col("col_b", lance_datagen::array::step::<Int32Type>())
.into_ldb_stream(RowCount::from(100), BatchCount::from(10));
let data_table = db
.create_table("base_tbl", initial_data)
.create_table_streaming("base_tbl", initial_data)
.execute()
.await
.unwrap();
@@ -380,7 +387,7 @@ mod tests {
.col("some_value", lance_datagen::array::step::<Int32Type>())
.into_ldb_stream(RowCount::from(100), BatchCount::from(10));
let data_table = db
.create_table("mytbl", initial_data)
.create_table_streaming("mytbl", initial_data)
.execute()
.await
.unwrap();

View File

@@ -27,10 +27,9 @@ use crate::{
pub const SPLIT_ID_COLUMN: &str = "split_id";
/// Strategy for assigning rows to splits
#[derive(Debug, Clone, Default)]
#[derive(Debug, Clone)]
pub enum SplitStrategy {
/// All rows will have split id 0
#[default]
NoSplit,
/// Rows will be randomly assigned to splits
///
@@ -74,6 +73,15 @@ pub enum SplitStrategy {
Calculated { calculation: String },
}
// The default is not to split the data
//
// All data will be assigned to a single split.
impl Default for SplitStrategy {
fn default() -> Self {
Self::NoSplit
}
}
impl SplitStrategy {
pub fn validate(&self, num_rows: u64) -> Result<()> {
match self {

View File

@@ -18,7 +18,7 @@ use std::{
};
use arrow_array::{Array, RecordBatch, RecordBatchReader};
use arrow_schema::{DataType, Field, SchemaBuilder, SchemaRef};
use arrow_schema::{DataType, Field, SchemaBuilder};
// use async_trait::async_trait;
use serde::{Deserialize, Serialize};
@@ -190,112 +190,6 @@ impl<R: RecordBatchReader> WithEmbeddings<R> {
}
}
/// Compute embedding arrays for a batch.
///
/// When multiple embedding functions are defined, they are computed in parallel using
/// scoped threads. For a single embedding function, computation is done inline.
fn compute_embedding_arrays(
batch: &RecordBatch,
embeddings: &[(EmbeddingDefinition, Arc<dyn EmbeddingFunction>)],
) -> Result<Vec<Arc<dyn Array>>> {
if embeddings.len() == 1 {
let (fld, func) = &embeddings[0];
let src_column =
batch
.column_by_name(&fld.source_column)
.ok_or_else(|| Error::InvalidInput {
message: format!("Source column '{}' not found", fld.source_column),
})?;
return Ok(vec![func.compute_source_embeddings(src_column.clone())?]);
}
// Parallel path: multiple embeddings
std::thread::scope(|s| {
let handles: Vec<_> = embeddings
.iter()
.map(|(fld, func)| {
let src_column = batch.column_by_name(&fld.source_column).ok_or_else(|| {
Error::InvalidInput {
message: format!("Source column '{}' not found", fld.source_column),
}
})?;
let handle = s.spawn(move || func.compute_source_embeddings(src_column.clone()));
Ok(handle)
})
.collect::<Result<_>>()?;
handles
.into_iter()
.map(|h| {
h.join().map_err(|e| Error::Runtime {
message: format!("Thread panicked during embedding computation: {:?}", e),
})?
})
.collect()
})
}
/// Compute the output schema when embeddings are applied to a base schema.
///
/// This returns the schema with embedding columns appended.
pub fn compute_output_schema(
base_schema: &SchemaRef,
embeddings: &[(EmbeddingDefinition, Arc<dyn EmbeddingFunction>)],
) -> Result<SchemaRef> {
let mut sb: SchemaBuilder = base_schema.as_ref().into();
for (ed, func) in embeddings {
let src_field = base_schema
.field_with_name(&ed.source_column)
.map_err(|_| Error::InvalidInput {
message: format!("Source column '{}' not found in schema", ed.source_column),
})?;
let field_name = ed
.dest_column
.clone()
.unwrap_or_else(|| format!("{}_embedding", &ed.source_column));
sb.push(Field::new(
field_name,
func.dest_type()?.into_owned(),
src_field.is_nullable(),
));
}
Ok(Arc::new(sb.finish()))
}
/// Compute embeddings for a batch and append as new columns.
///
/// This function computes embeddings using the provided embedding functions and
/// appends them as new columns to the batch.
pub fn compute_embeddings_for_batch(
batch: RecordBatch,
embeddings: &[(EmbeddingDefinition, Arc<dyn EmbeddingFunction>)],
) -> Result<RecordBatch> {
let embedding_arrays = compute_embedding_arrays(&batch, embeddings)?;
let mut result = batch;
for ((fld, _), embedding) in embeddings.iter().zip(embedding_arrays.iter()) {
let dst_field_name = fld
.dest_column
.clone()
.unwrap_or_else(|| format!("{}_embedding", &fld.source_column));
let dst_field = Field::new(
dst_field_name,
embedding.data_type().clone(),
embedding.nulls().is_some(),
);
result = result.try_with_column(dst_field, embedding.clone())?;
}
Ok(result)
}
impl<R: RecordBatchReader> WithEmbeddings<R> {
fn dest_fields(&self) -> Result<Vec<Field>> {
let schema = self.inner.schema();
@@ -346,6 +240,48 @@ impl<R: RecordBatchReader> WithEmbeddings<R> {
column_definitions,
})
}
fn compute_embeddings_parallel(&self, batch: &RecordBatch) -> Result<Vec<Arc<dyn Array>>> {
if self.embeddings.len() == 1 {
let (fld, func) = &self.embeddings[0];
let src_column =
batch
.column_by_name(&fld.source_column)
.ok_or_else(|| Error::InvalidInput {
message: format!("Source column '{}' not found", fld.source_column),
})?;
return Ok(vec![func.compute_source_embeddings(src_column.clone())?]);
}
// Parallel path: multiple embeddings
std::thread::scope(|s| {
let handles: Vec<_> = self
.embeddings
.iter()
.map(|(fld, func)| {
let src_column = batch.column_by_name(&fld.source_column).ok_or_else(|| {
Error::InvalidInput {
message: format!("Source column '{}' not found", fld.source_column),
}
})?;
let handle =
s.spawn(move || func.compute_source_embeddings(src_column.clone()));
Ok(handle)
})
.collect::<Result<_>>()?;
handles
.into_iter()
.map(|h| {
h.join().map_err(|e| Error::Runtime {
message: format!("Thread panicked during embedding computation: {:?}", e),
})?
})
.collect()
})
}
}
impl<R: RecordBatchReader> Iterator for MaybeEmbedded<R> {
@@ -373,13 +309,37 @@ impl<R: RecordBatchReader> Iterator for WithEmbeddings<R> {
fn next(&mut self) -> Option<Self::Item> {
let batch = self.inner.next()?;
match batch {
Ok(batch) => match compute_embeddings_for_batch(batch, &self.embeddings) {
Ok(batch_with_embeddings) => Some(Ok(batch_with_embeddings)),
Err(e) => Some(Err(arrow_schema::ArrowError::ComputeError(format!(
"Error computing embedding: {}",
e
)))),
},
Ok(batch) => {
let embeddings = match self.compute_embeddings_parallel(&batch) {
Ok(emb) => emb,
Err(e) => {
return Some(Err(arrow_schema::ArrowError::ComputeError(format!(
"Error computing embedding: {}",
e
))))
}
};
let mut batch = batch;
for ((fld, _), embedding) in self.embeddings.iter().zip(embeddings.iter()) {
let dst_field_name = fld
.dest_column
.clone()
.unwrap_or_else(|| format!("{}_embedding", &fld.source_column));
let dst_field = Field::new(
dst_field_name,
embedding.data_type().clone(),
embedding.nulls().is_some(),
);
match batch.try_with_column(dst_field.clone(), embedding.clone()) {
Ok(b) => batch = b,
Err(e) => return Some(Err(e)),
};
}
Some(Ok(batch))
}
Err(e) => Some(Err(e)),
}
}

View File

@@ -6,7 +6,7 @@ use std::sync::PoisonError;
use arrow_schema::ArrowError;
use snafu::Snafu;
pub(crate) type BoxError = Box<dyn std::error::Error + Send + Sync>;
type BoxError = Box<dyn std::error::Error + Send + Sync>;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
@@ -80,9 +80,6 @@ pub enum Error {
Arrow { source: ArrowError },
#[snafu(display("LanceDBError: not supported: {message}"))]
NotSupported { message: String },
/// External error pass through from user code.
#[snafu(transparent)]
External { source: BoxError },
#[snafu(whatever, display("{message}"))]
Other {
message: String,
@@ -95,26 +92,15 @@ pub type Result<T> = std::result::Result<T, Error>;
impl From<ArrowError> for Error {
fn from(source: ArrowError) -> Self {
match source {
ArrowError::ExternalError(source) => match source.downcast::<Self>() {
Ok(e) => *e,
Err(source) => Self::External { source },
},
_ => Self::Arrow { source },
}
Self::Arrow { source }
}
}
impl From<lance::Error> for Error {
fn from(source: lance::Error) -> Self {
// Try to unwrap external errors that were wrapped by lance
match source {
lance::Error::Wrapped { error, .. } => match error.downcast::<Self>() {
Ok(e) => *e,
Err(source) => Self::External { source },
},
_ => Self::Lance { source },
}
// TODO: Once Lance is changed to preserve ObjectStore, DataFusion, and Arrow errors, we can
// pass those variants through here as well.
Self::Lance { source }
}
}

View File

@@ -195,11 +195,6 @@ mod test {
table::WriteOptions,
};
// This test is ignored because lance 3.0 introduced LocalWriter optimization
// that bypasses the object store wrapper for local writes. The mirroring feature
// still works for remote/cloud storage, but can't be tested with local storage.
// See lance commit c878af433 "perf: create local writer for efficient local writes"
#[ignore]
#[tokio::test]
async fn test_e2e() {
let dir1 = tempfile::tempdir().unwrap().keep().canonicalize().unwrap();
@@ -223,9 +218,8 @@ mod test {
datagen = datagen.col(Box::<IncrementingInt32>::default());
datagen = datagen.col(Box::new(RandomVector::default().named("vector".into())));
let data: Box<dyn arrow_array::RecordBatchReader + Send> = Box::new(datagen.batch(100));
let res = db
.create_table("test", data)
.create_table("test", Box::new(datagen.batch(100)))
.write_options(WriteOptions {
lance_write_params: Some(param),
})
@@ -255,38 +249,32 @@ mod test {
let primary_location = dir1.join("test.lance").canonicalize().unwrap();
let secondary_location = dir2.join(primary_location.strip_prefix("/").unwrap());
// Skip lance internal directories (_versions, _transactions) and manifest files
let should_skip = |path: &std::path::Path| -> bool {
let path_str = path.to_str().unwrap();
path_str.contains("_latest.manifest")
|| path_str.contains("_versions")
|| path_str.contains("_transactions")
};
let mut primary_iter = WalkDir::new(&primary_location).into_iter();
let mut secondary_iter = WalkDir::new(&secondary_location).into_iter();
let primary_files: Vec<_> = WalkDir::new(&primary_location)
.into_iter()
.filter_entry(|e| !should_skip(e.path()))
.filter_map(|e| e.ok())
.map(|e| {
e.path()
.strip_prefix(&primary_location)
.unwrap()
.to_path_buf()
})
.collect();
let mut primary_elem = primary_iter.next();
let mut secondary_elem = secondary_iter.next();
let secondary_files: Vec<_> = WalkDir::new(&secondary_location)
.into_iter()
.filter_entry(|e| !should_skip(e.path()))
.filter_map(|e| e.ok())
.map(|e| {
e.path()
.strip_prefix(&secondary_location)
.unwrap()
.to_path_buf()
})
.collect();
loop {
if primary_elem.is_none() && secondary_elem.is_none() {
break;
}
// primary has more data then secondary, should not run out before secondary
let primary_f = primary_elem.unwrap().unwrap();
// hit manifest, skip, _versions contains all the manifest and should not exist on secondary
let primary_raw_path = primary_f.file_name().to_str().unwrap();
if primary_raw_path.contains("_latest.manifest") {
primary_elem = primary_iter.next();
continue;
}
let secondary_f = secondary_elem.unwrap().unwrap();
assert_eq!(
primary_f.path().strip_prefix(&primary_location),
secondary_f.path().strip_prefix(&secondary_location)
);
assert_eq!(primary_files, secondary_files, "File lists should match");
primary_elem = primary_iter.next();
secondary_elem = secondary_iter.next();
}
}
}

View File

@@ -12,10 +12,10 @@ use arrow_schema::Schema;
use crate::{Error, Result};
/// Convert a Arrow IPC file to a batch reader
pub fn ipc_file_to_batches(buf: Vec<u8>) -> Result<Box<dyn RecordBatchReader + Send>> {
pub fn ipc_file_to_batches(buf: Vec<u8>) -> Result<impl RecordBatchReader> {
let buf_reader = Cursor::new(buf);
let reader = FileReader::try_new(buf_reader, None)?;
Ok(Box::new(reader))
Ok(reader)
}
/// Convert record batches to Arrow IPC file

View File

@@ -39,6 +39,7 @@
//! #### Connect to a database.
//!
//! ```rust
//! # use arrow_schema::{Field, Schema};
//! # tokio::runtime::Runtime::new().unwrap().block_on(async {
//! let db = lancedb::connect("data/sample-lancedb").execute().await.unwrap();
//! # });
@@ -73,10 +74,7 @@
//!
//! #### Create a table
//!
//! To create a Table, you need to provide an [`arrow_array::RecordBatch`]. The
//! schema of the `RecordBatch` determines the schema of the table.
//!
//! Vector columns should be represented as `FixedSizeList<Float16/Float32>` data type.
//! To create a Table, you need to provide a [`arrow_schema::Schema`] and a [`arrow_array::RecordBatch`] stream.
//!
//! ```rust
//! # use std::sync::Arc;
@@ -87,29 +85,34 @@
//! # tokio::runtime::Runtime::new().unwrap().block_on(async {
//! # let tmpdir = tempfile::tempdir().unwrap();
//! # let db = lancedb::connect(tmpdir.path().to_str().unwrap()).execute().await.unwrap();
//! let ndims = 128;
//! let schema = Arc::new(Schema::new(vec![
//! Field::new("id", DataType::Int32, false),
//! Field::new(
//! "vector",
//! DataType::FixedSizeList(Arc::new(Field::new("item", DataType::Float32, true)), ndims),
//! DataType::FixedSizeList(Arc::new(Field::new("item", DataType::Float32, true)), 128),
//! true,
//! ),
//! ]));
//! let data = RecordBatch::try_new(
//! // Create a RecordBatch stream.
//! let batches = RecordBatchIterator::new(
//! vec![RecordBatch::try_new(
//! schema.clone(),
//! vec![
//! Arc::new(Int32Array::from_iter_values(0..256)),
//! Arc::new(
//! FixedSizeListArray::from_iter_primitive::<Float32Type, _, _>(
//! (0..256).map(|_| Some(vec![Some(1.0); ndims as usize])),
//! ndims,
//! (0..256).map(|_| Some(vec![Some(1.0); 128])),
//! 128,
//! ),
//! ),
//! ],
//! )
//! .unwrap();
//! db.create_table("my_table", data)
//! .unwrap()]
//! .into_iter()
//! .map(Ok),
//! schema.clone(),
//! );
//! db.create_table("my_table", Box::new(batches))
//! .execute()
//! .await
//! .unwrap();
@@ -148,18 +151,42 @@
//! #### Open table and search
//!
//! ```rust
//! # use std::sync::Arc;
//! # use futures::TryStreamExt;
//! # use arrow_schema::{DataType, Schema, Field};
//! # use arrow_array::{RecordBatch, RecordBatchIterator};
//! # use arrow_array::{FixedSizeListArray, Float32Array, Int32Array, types::Float32Type};
//! # use lancedb::query::{ExecutableQuery, QueryBase};
//! # async fn example(table: &lancedb::Table) -> lancedb::Result<()> {
//! # tokio::runtime::Runtime::new().unwrap().block_on(async {
//! # let tmpdir = tempfile::tempdir().unwrap();
//! # let db = lancedb::connect(tmpdir.path().to_str().unwrap()).execute().await.unwrap();
//! # let schema = Arc::new(Schema::new(vec![
//! # Field::new("id", DataType::Int32, false),
//! # Field::new("vector", DataType::FixedSizeList(
//! # Arc::new(Field::new("item", DataType::Float32, true)), 128), true),
//! # ]));
//! # let batches = RecordBatchIterator::new(vec![
//! # RecordBatch::try_new(schema.clone(),
//! # vec![
//! # Arc::new(Int32Array::from_iter_values(0..10)),
//! # Arc::new(FixedSizeListArray::from_iter_primitive::<Float32Type, _, _>(
//! # (0..10).map(|_| Some(vec![Some(1.0); 128])), 128)),
//! # ]).unwrap()
//! # ].into_iter().map(Ok),
//! # schema.clone());
//! # db.create_table("my_table", Box::new(batches)).execute().await.unwrap();
//! # let table = db.open_table("my_table").execute().await.unwrap();
//! let results = table
//! .query()
//! .nearest_to(&[1.0; 128])?
//! .nearest_to(&[1.0; 128])
//! .unwrap()
//! .execute()
//! .await?
//! .await
//! .unwrap()
//! .try_collect::<Vec<_>>()
//! .await?;
//! # Ok(())
//! # }
//! .await
//! .unwrap();
//! # });
//! ```
pub mod arrow;
@@ -192,14 +219,13 @@ pub use error::{Error, Result};
use lance_linalg::distance::DistanceType as LanceDistanceType;
pub use table::Table;
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize, Default)]
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
#[non_exhaustive]
#[serde(rename_all = "lowercase")]
pub enum DistanceType {
/// Euclidean distance. This is a very common distance metric that
/// accounts for both magnitude and direction when determining the distance
/// between vectors. l2 distance has a range of [0, ∞).
#[default]
L2,
/// Cosine distance. Cosine distance is a distance metric
/// calculated from the cosine similarity between two vectors. Cosine
@@ -221,6 +247,12 @@ pub enum DistanceType {
Hamming,
}
impl Default for DistanceType {
fn default() -> Self {
Self::L2
}
}
impl From<DistanceType> for LanceDistanceType {
fn from(value: DistanceType) -> Self {
match value {

View File

@@ -1381,7 +1381,7 @@ mod tests {
use arrow::{array::downcast_array, compute::concat_batches, datatypes::Int32Type};
use arrow_array::{
cast::AsArray, types::Float32Type, FixedSizeListArray, Float32Array, Int32Array,
RecordBatch, StringArray,
RecordBatch, RecordBatchIterator, RecordBatchReader, StringArray,
};
use arrow_schema::{DataType, Field as ArrowField, Schema as ArrowSchema};
use futures::{StreamExt, TryStreamExt};
@@ -1402,7 +1402,7 @@ mod tests {
let batches = make_test_batches();
let conn = connect(uri).execute().await.unwrap();
let table = conn
.create_table("my_table", batches)
.create_table("my_table", Box::new(batches))
.execute()
.await
.unwrap();
@@ -1463,7 +1463,7 @@ mod tests {
let batches = make_non_empty_batches();
let conn = connect(uri).execute().await.unwrap();
let table = conn
.create_table("my_table", batches)
.create_table("my_table", Box::new(batches))
.execute()
.await
.unwrap();
@@ -1525,7 +1525,7 @@ mod tests {
let batches = make_non_empty_batches();
let conn = connect(uri).execute().await.unwrap();
let table = conn
.create_table("my_table", batches)
.create_table("my_table", Box::new(batches))
.execute()
.await
.unwrap();
@@ -1578,7 +1578,7 @@ mod tests {
let batches = make_non_empty_batches();
let conn = connect(uri).execute().await.unwrap();
let table = conn
.create_table("my_table", batches)
.create_table("my_table", Box::new(batches))
.execute()
.await
.unwrap();
@@ -1599,13 +1599,13 @@ mod tests {
assert!(result.is_err());
}
fn make_non_empty_batches() -> Box<dyn arrow_array::RecordBatchReader + Send> {
fn make_non_empty_batches() -> impl RecordBatchReader + Send + 'static {
let vec = Box::new(RandomVector::new().named("vector".to_string()));
let id = Box::new(IncrementingInt32::new().named("id".to_string()));
Box::new(BatchGenerator::new().col(vec).col(id).batch(512))
BatchGenerator::new().col(vec).col(id).batch(512)
}
fn make_test_batches() -> RecordBatch {
fn make_test_batches() -> impl RecordBatchReader + Send + 'static {
let dim: usize = 128;
let schema = Arc::new(ArrowSchema::new(vec![
ArrowField::new("key", DataType::Int32, false),
@@ -1619,7 +1619,12 @@ mod tests {
),
ArrowField::new("uri", DataType::Utf8, true),
]));
RecordBatch::new_empty(schema)
RecordBatchIterator::new(
vec![RecordBatch::new_empty(schema.clone())]
.into_iter()
.map(Ok),
schema,
)
}
async fn make_test_table(tmp_dir: &tempfile::TempDir) -> Table {
@@ -1628,7 +1633,7 @@ mod tests {
let batches = make_non_empty_batches();
let conn = connect(uri).execute().await.unwrap();
conn.create_table("my_table", batches)
conn.create_table("my_table", Box::new(batches))
.execute()
.await
.unwrap()
@@ -1857,8 +1862,10 @@ mod tests {
let record_batch =
RecordBatch::try_new(schema.clone(), vec![Arc::new(text), Arc::new(vector)]).unwrap();
let record_batch_iter =
RecordBatchIterator::new(vec![record_batch].into_iter().map(Ok), schema.clone());
let table = conn
.create_table("my_table", record_batch)
.create_table("my_table", record_batch_iter)
.execute()
.await
.unwrap();
@@ -1942,8 +1949,10 @@ mod tests {
],
)
.unwrap();
let record_batch_iter =
RecordBatchIterator::new(vec![record_batch].into_iter().map(Ok), schema.clone());
let table = conn
.create_table("my_table", record_batch)
.create_table("my_table", record_batch_iter)
.mode(CreateTableMode::Overwrite)
.execute()
.await
@@ -2053,6 +2062,8 @@ mod tests {
async fn test_pagination_with_fts() {
let db = connect("memory://test").execute().await.unwrap();
let data = fts_test_data(400);
let schema = data.schema();
let data = RecordBatchIterator::new(vec![Ok(data)], schema);
let table = db.create_table("test_table", data).execute().await.unwrap();
table

View File

@@ -438,21 +438,26 @@ impl<S: HttpSend> RestfulLanceDbClient<S> {
);
}
if let Some(v) = options.0.get("account_name") {
headers.insert(
HeaderName::from_static("x-azure-storage-account-name"),
HeaderValue::from_str(v).map_err(|_| Error::InvalidInput {
message: format!("non-ascii storage account name '{}' provided", db_name),
})?,
);
}
if let Some(v) = options.0.get("azure_storage_account_name") {
headers.insert(
HeaderName::from_static("x-azure-storage-account-name"),
HeaderValue::from_str(v).map_err(|_| Error::InvalidInput {
message: format!("non-ascii storage account name '{}' provided", db_name),
})?,
);
// Map storage options to HTTP headers for Azure configuration.
const OPTION_TO_HEADER: &[(&str, &str)] = &[
("account_name", "x-azure-storage-account-name"),
("azure_storage_account_name", "x-azure-storage-account-name"),
("azure_tenant_id", "x-azure-tenant-id"),
("azure_client_id", "x-azure-client-id"),
(
"azure_federated_token_file",
"x-azure-federated-token-file",
),
];
for (opt_key, header_name) in OPTION_TO_HEADER {
if let Some(v) = options.get(opt_key) {
headers.insert(
HeaderName::from_static(header_name),
HeaderValue::from_str(v).map_err(|_| Error::InvalidInput {
message: format!("non-ascii value for '{}' provided", opt_key),
})?,
);
}
}
for (key, value) in &config.extra_headers {
@@ -491,7 +496,7 @@ impl<S: HttpSend> RestfulLanceDbClient<S> {
}
/// Apply dynamic headers from the header provider if configured
pub(crate) async fn apply_dynamic_headers(&self, mut request: Request) -> Result<Request> {
async fn apply_dynamic_headers(&self, mut request: Request) -> Result<Request> {
if let Some(ref provider) = self.header_provider {
let headers = provider.get_headers().await?;
let request_headers = request.headers_mut();
@@ -555,9 +560,7 @@ impl<S: HttpSend> RestfulLanceDbClient<S> {
message: "Attempted to retry a request that cannot be cloned".to_string(),
})?;
let (_, r) = tmp_req.build_split();
let mut r = r.map_err(|e| Error::Runtime {
message: format!("Failed to build request: {}", e),
})?;
let mut r = r.unwrap();
let request_id = self.extract_request_id(&mut r);
let mut retry_counter = RetryCounter::new(retry_config, request_id.clone());
@@ -573,9 +576,7 @@ impl<S: HttpSend> RestfulLanceDbClient<S> {
}
let (c, request) = req_builder.build_split();
let mut request = request.map_err(|e| Error::Runtime {
message: format!("Failed to build request: {}", e),
})?;
let mut request = request.unwrap();
self.set_request_id(&mut request, &request_id.clone());
// Apply dynamic headers before each retry attempt
@@ -625,7 +626,7 @@ impl<S: HttpSend> RestfulLanceDbClient<S> {
}
}
pub(crate) fn log_request(&self, request: &Request, request_id: &String) {
fn log_request(&self, request: &Request, request_id: &String) {
if log::log_enabled!(log::Level::Debug) {
let content_type = request
.headers()

View File

@@ -4,11 +4,13 @@
use std::collections::HashMap;
use std::sync::Arc;
use arrow_array::RecordBatchIterator;
use async_trait::async_trait;
use http::StatusCode;
use lance_io::object_store::StorageOptions;
use moka::future::Cache;
use reqwest::header::CONTENT_TYPE;
use tokio::task::spawn_blocking;
use lance_namespace::models::{
CreateNamespaceRequest, CreateNamespaceResponse, DescribeNamespaceRequest,
@@ -17,17 +19,16 @@ use lance_namespace::models::{
};
use crate::database::{
CloneTableRequest, CreateTableMode, CreateTableRequest, Database, DatabaseOptions,
OpenTableRequest, ReadConsistency, TableNamesRequest,
CloneTableRequest, CreateTableData, CreateTableMode, CreateTableRequest, Database,
DatabaseOptions, OpenTableRequest, ReadConsistency, TableNamesRequest,
};
use crate::error::Result;
use crate::remote::util::stream_as_body;
use crate::table::BaseTable;
use crate::Error;
use super::client::{ClientConfig, HttpSend, RequestResultExt, RestfulLanceDbClient, Sender};
use super::table::RemoteTable;
use super::util::parse_server_version;
use super::util::{batches_to_ipc_bytes, parse_server_version};
use super::ARROW_STREAM_CONTENT_TYPE;
// Request structure for the remote clone table API
@@ -435,8 +436,26 @@ impl<S: HttpSend> Database for RemoteDatabase<S> {
Ok(response)
}
async fn create_table(&self, mut request: CreateTableRequest) -> Result<Arc<dyn BaseTable>> {
let body = stream_as_body(request.data.scan_as_stream())?;
async fn create_table(&self, request: CreateTableRequest) -> Result<Arc<dyn BaseTable>> {
let data = match request.data {
CreateTableData::Data(data) => data,
CreateTableData::StreamingData(_) => {
return Err(Error::NotSupported {
message: "Creating a remote table from a streaming source".to_string(),
})
}
CreateTableData::Empty(table_definition) => {
let schema = table_definition.schema.clone();
Box::new(RecordBatchIterator::new(vec![], schema))
}
};
// TODO: https://github.com/lancedb/lancedb/issues/1026
// We should accept data from an async source. In the meantime, spawn this as blocking
// to make sure we don't block the tokio runtime if the source is slow.
let data_buffer = spawn_blocking(move || batches_to_ipc_bytes(data))
.await
.unwrap()?;
let identifier =
build_table_identifier(&request.name, &request.namespace, &self.client.id_delimiter);
@@ -444,7 +463,7 @@ impl<S: HttpSend> Database for RemoteDatabase<S> {
.client
.post(&format!("/v1/table/{}/create/", identifier))
.query(&[("mode", Into::<&str>::into(&request.mode))])
.body(body)
.body(data_buffer)
.header(CONTENT_TYPE, ARROW_STREAM_CONTENT_TYPE);
let (request_id, rsp) = self.client.send(req).await?;
@@ -773,11 +792,21 @@ impl RemoteOptions {
pub fn new(options: HashMap<String, String>) -> Self {
Self(options)
}
pub fn get(&self, key: &str) -> Option<&String> {
self.0.get(key)
}
}
impl From<StorageOptions> for RemoteOptions {
fn from(options: StorageOptions) -> Self {
let supported_opts = vec!["account_name", "azure_storage_account_name"];
let supported_opts = vec![
"account_name",
"azure_storage_account_name",
"azure_tenant_id",
"azure_client_id",
"azure_federated_token_file",
];
let mut filtered = HashMap::new();
for opt in supported_opts {
if let Some(v) = options.0.get(opt) {
@@ -794,7 +823,7 @@ mod tests {
use std::collections::HashMap;
use std::sync::{Arc, OnceLock};
use arrow_array::{Int32Array, RecordBatch};
use arrow_array::{Int32Array, RecordBatch, RecordBatchIterator};
use arrow_schema::{DataType, Field, Schema};
use crate::connection::ConnectBuilder;
@@ -974,7 +1003,8 @@ mod tests {
vec![Arc::new(Int32Array::from(vec![1, 2, 3]))],
)
.unwrap();
let table = conn.create_table("table1", data).execute().await.unwrap();
let reader = RecordBatchIterator::new([Ok(data.clone())], data.schema());
let table = conn.create_table("table1", reader).execute().await.unwrap();
assert_eq!(table.name(), "table1");
}
@@ -991,7 +1021,8 @@ mod tests {
vec![Arc::new(Int32Array::from(vec![1, 2, 3]))],
)
.unwrap();
let result = conn.create_table("table1", data).execute().await;
let reader = RecordBatchIterator::new([Ok(data.clone())], data.schema());
let result = conn.create_table("table1", reader).execute().await;
assert!(result.is_err());
assert!(
matches!(result, Err(crate::Error::TableAlreadyExists { name }) if name == "table1")
@@ -1024,7 +1055,8 @@ mod tests {
vec![Arc::new(Int32Array::from(vec![1, 2, 3]))],
)
.unwrap();
let mut builder = conn.create_table("table1", data.clone());
let reader = RecordBatchIterator::new([Ok(data.clone())], data.schema());
let mut builder = conn.create_table("table1", reader);
if let Some(mode) = mode {
builder = builder.mode(mode);
}
@@ -1049,8 +1081,9 @@ mod tests {
.unwrap();
let called: Arc<OnceLock<bool>> = Arc::new(OnceLock::new());
let reader = RecordBatchIterator::new([Ok(data.clone())], data.schema());
let called_in_cb = called.clone();
conn.create_table("table1", data)
conn.create_table("table1", reader)
.mode(CreateTableMode::ExistOk(Box::new(move |b| {
called_in_cb.clone().set(true).unwrap();
b
@@ -1239,8 +1272,9 @@ mod tests {
vec![Arc::new(Int32Array::from(vec![1, 2, 3]))],
)
.unwrap();
let reader = RecordBatchIterator::new([Ok(data.clone())], data.schema());
let table = conn
.create_table("table1", data)
.create_table("table1", reader)
.namespace(vec!["ns1".to_string()])
.execute()
.await
@@ -1706,8 +1740,10 @@ mod tests {
vec![Arc::new(Int32Array::from(vec![1, 2, 3]))],
)
.unwrap();
let reader = RecordBatchIterator::new([Ok(data.clone())], schema.clone());
let table = conn
.create_table("test_table", data)
.create_table("test_table", reader)
.namespace(namespace.clone())
.execute()
.await;
@@ -1780,7 +1816,9 @@ mod tests {
let data =
RecordBatch::try_new(schema.clone(), vec![Arc::new(Int32Array::from(vec![i]))])
.unwrap();
conn.create_table(format!("table{}", i), data)
let reader = RecordBatchIterator::new([Ok(data.clone())], schema.clone());
conn.create_table(format!("table{}", i), reader)
.namespace(namespace.clone())
.execute()
.await

File diff suppressed because it is too large Load Diff

View File

@@ -1,50 +1,29 @@
// SPDX-License-Identifier: Apache-2.0
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
use arrow_ipc::CompressionType;
use futures::{Stream, StreamExt};
use std::io::Cursor;
use arrow_array::RecordBatchReader;
use reqwest::Response;
use crate::{arrow::SendableRecordBatchStream, Result};
use crate::Result;
use super::db::ServerVersion;
pub fn stream_as_ipc(
data: SendableRecordBatchStream,
) -> Result<impl Stream<Item = Result<bytes::Bytes>>> {
let options = arrow_ipc::writer::IpcWriteOptions::default()
.try_with_compression(Some(CompressionType::LZ4_FRAME))?;
pub fn batches_to_ipc_bytes(batches: impl RecordBatchReader) -> Result<Vec<u8>> {
const WRITE_BUF_SIZE: usize = 4096;
let buf = Vec::with_capacity(WRITE_BUF_SIZE);
let writer =
arrow_ipc::writer::StreamWriter::try_new_with_options(buf, &data.schema(), options)?;
let stream = futures::stream::try_unfold(
(data, writer, false),
move |(mut data, mut writer, finished)| async move {
if finished {
return Ok(None);
}
match data.next().await {
Some(Ok(batch)) => {
writer.write(&batch)?;
let buffer = std::mem::take(writer.get_mut());
Ok(Some((bytes::Bytes::from(buffer), (data, writer, false))))
}
Some(Err(e)) => Err(e),
None => {
writer.finish()?;
let buffer = std::mem::take(writer.get_mut());
Ok(Some((bytes::Bytes::from(buffer), (data, writer, true))))
}
}
},
);
Ok(stream)
}
let mut buf = Cursor::new(buf);
{
let mut writer = arrow_ipc::writer::StreamWriter::try_new(&mut buf, &batches.schema())?;
pub fn stream_as_body(data: SendableRecordBatchStream) -> Result<reqwest::Body> {
let stream = stream_as_ipc(data)?;
Ok(reqwest::Body::wrap_stream(stream))
for batch in batches {
let batch = batch?;
writer.write(&batch)?;
}
writer.finish()?;
}
Ok(buf.into_inner())
}
pub fn parse_server_version(req_id: &str, rsp: &Response) -> Result<ServerVersion> {

File diff suppressed because it is too large Load Diff

View File

@@ -1,343 +0,0 @@
// SPDX-License-Identifier: Apache-2.0
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
use std::sync::Arc;
use serde::{Deserialize, Serialize};
use crate::data::scannable::Scannable;
use crate::embeddings::EmbeddingRegistry;
use crate::Result;
use super::{BaseTable, WriteOptions};
#[derive(Debug, Clone, Default)]
pub enum AddDataMode {
/// Rows will be appended to the table (the default)
#[default]
Append,
/// The existing table will be overwritten with the new data
Overwrite,
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
pub struct AddResult {
// The commit version associated with the operation.
// A version of `0` indicates compatibility with legacy servers that do not return
/// a commit version.
#[serde(default)]
pub version: u64,
}
/// A builder for configuring a [`crate::table::Table::add`] operation
pub struct AddDataBuilder {
pub(crate) parent: Arc<dyn BaseTable>,
pub(crate) data: Box<dyn Scannable>,
pub(crate) mode: AddDataMode,
pub(crate) write_options: WriteOptions,
pub(crate) embedding_registry: Option<Arc<dyn EmbeddingRegistry>>,
}
impl std::fmt::Debug for AddDataBuilder {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("AddDataBuilder")
.field("parent", &self.parent)
.field("mode", &self.mode)
.field("write_options", &self.write_options)
.finish()
}
}
impl AddDataBuilder {
pub(crate) fn new(
parent: Arc<dyn BaseTable>,
data: Box<dyn Scannable>,
embedding_registry: Option<Arc<dyn EmbeddingRegistry>>,
) -> Self {
Self {
parent,
data,
mode: AddDataMode::Append,
write_options: WriteOptions::default(),
embedding_registry,
}
}
pub fn mode(mut self, mode: AddDataMode) -> Self {
self.mode = mode;
self
}
pub fn write_options(mut self, options: WriteOptions) -> Self {
self.write_options = options;
self
}
pub async fn execute(self) -> Result<AddResult> {
self.parent.clone().add(self).await
}
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use arrow_array::{record_batch, RecordBatch, RecordBatchIterator};
use arrow_schema::{ArrowError, DataType, Field, Schema};
use futures::TryStreamExt;
use lance::dataset::{WriteMode, WriteParams};
use crate::arrow::{SendableRecordBatchStream, SimpleRecordBatchStream};
use crate::connect;
use crate::data::scannable::Scannable;
use crate::embeddings::{
EmbeddingDefinition, EmbeddingFunction, EmbeddingRegistry, MemoryRegistry,
};
use crate::query::{ExecutableQuery, QueryBase, Select};
use crate::table::{ColumnDefinition, ColumnKind, Table, TableDefinition, WriteOptions};
use crate::test_utils::embeddings::MockEmbed;
use crate::Error;
use super::AddDataMode;
async fn create_test_table() -> Table {
let conn = connect("memory://").execute().await.unwrap();
let batch = record_batch!(("id", Int64, [1, 2, 3])).unwrap();
conn.create_table("test", batch).execute().await.unwrap()
}
async fn test_add_with_data<T>(data: T)
where
T: Scannable + 'static,
{
let table = create_test_table().await;
let schema = data.schema();
table.add(data).execute().await.unwrap();
assert_eq!(table.count_rows(None).await.unwrap(), 5); // 3 initial + 2 added
assert_eq!(table.schema().await.unwrap(), schema);
}
#[tokio::test]
async fn test_add_with_batch() {
let batch = record_batch!(("id", Int64, [4, 5])).unwrap();
test_add_with_data(batch).await;
}
#[tokio::test]
async fn test_add_with_vec_batch() {
let data = vec![
record_batch!(("id", Int64, [4])).unwrap(),
record_batch!(("id", Int64, [5])).unwrap(),
];
test_add_with_data(data).await;
}
#[tokio::test]
async fn test_add_with_record_batch_reader() {
let data = vec![
record_batch!(("id", Int64, [4])).unwrap(),
record_batch!(("id", Int64, [5])).unwrap(),
];
let schema = data[0].schema();
let reader: Box<dyn arrow_array::RecordBatchReader + Send> = Box::new(
RecordBatchIterator::new(data.into_iter().map(Ok), schema.clone()),
);
test_add_with_data(reader).await;
}
#[tokio::test]
async fn test_add_with_stream() {
let data = vec![
record_batch!(("id", Int64, [4])).unwrap(),
record_batch!(("id", Int64, [5])).unwrap(),
];
let schema = data[0].schema();
let inner = futures::stream::iter(data.into_iter().map(Ok));
let stream: SendableRecordBatchStream = Box::pin(SimpleRecordBatchStream {
schema,
stream: inner,
});
test_add_with_data(stream).await;
}
#[derive(Debug)]
struct MyError;
impl std::fmt::Display for MyError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "MyError occurred")
}
}
impl std::error::Error for MyError {}
#[tokio::test]
async fn test_add_preserves_reader_error() {
let table = create_test_table().await;
let first_batch = record_batch!(("id", Int64, [4])).unwrap();
let schema = first_batch.schema();
let iterator = vec![
Ok(first_batch),
Err(ArrowError::ExternalError(Box::new(MyError))),
];
let reader: Box<dyn arrow_array::RecordBatchReader + Send> = Box::new(
RecordBatchIterator::new(iterator.into_iter(), schema.clone()),
);
let result = table.add(reader).execute().await;
assert!(result.is_err());
}
#[tokio::test]
async fn test_add_preserves_stream_error() {
let table = create_test_table().await;
let first_batch = record_batch!(("id", Int64, [4])).unwrap();
let schema = first_batch.schema();
let iterator = vec![
Ok(first_batch),
Err(Error::External {
source: Box::new(MyError),
}),
];
let stream = futures::stream::iter(iterator);
let stream: SendableRecordBatchStream = Box::pin(SimpleRecordBatchStream {
schema: schema.clone(),
stream,
});
let result = table.add(stream).execute().await;
assert!(result.is_err());
}
#[tokio::test]
async fn test_add() {
let conn = connect("memory://").execute().await.unwrap();
let batch = record_batch!(("i", Int32, [0, 1, 2])).unwrap();
let table = conn
.create_table("test", batch.clone())
.execute()
.await
.unwrap();
assert_eq!(table.count_rows(None).await.unwrap(), 3);
let new_batch = record_batch!(("i", Int32, [3])).unwrap();
table.add(new_batch).execute().await.unwrap();
assert_eq!(table.count_rows(None).await.unwrap(), 4);
assert_eq!(table.schema().await.unwrap(), batch.schema());
}
#[tokio::test]
async fn test_add_overwrite() {
let conn = connect("memory://").execute().await.unwrap();
let batch = record_batch!(("i", Int32, [0, 1, 2])).unwrap();
let table = conn
.create_table("test", batch.clone())
.execute()
.await
.unwrap();
assert_eq!(table.count_rows(None).await.unwrap(), batch.num_rows());
let new_batch = record_batch!(("x", Float32, [0.0, 1.0])).unwrap();
let res = table
.add(new_batch.clone())
.mode(AddDataMode::Overwrite)
.execute()
.await
.unwrap();
assert_eq!(res.version, table.version().await.unwrap());
assert_eq!(table.count_rows(None).await.unwrap(), new_batch.num_rows());
assert_eq!(table.schema().await.unwrap(), new_batch.schema());
// Can overwrite using underlying WriteParams (which
// take precedence over AddDataMode)
let param: WriteParams = WriteParams {
mode: WriteMode::Overwrite,
..Default::default()
};
table
.add(new_batch.clone())
.write_options(WriteOptions {
lance_write_params: Some(param),
})
.mode(AddDataMode::Append)
.execute()
.await
.unwrap();
assert_eq!(table.count_rows(None).await.unwrap(), new_batch.num_rows());
}
#[tokio::test]
async fn test_add_with_embeddings() {
let registry = Arc::new(MemoryRegistry::new());
let mock_embedding: Arc<dyn EmbeddingFunction> = Arc::new(MockEmbed::new("mock", 4));
registry.register("mock", mock_embedding).unwrap();
let conn = connect("memory://")
.embedding_registry(registry)
.execute()
.await
.unwrap();
let schema = Arc::new(Schema::new(vec![
Field::new("text", DataType::Utf8, false),
Field::new(
"text_embedding",
DataType::FixedSizeList(Arc::new(Field::new("item", DataType::Float32, true)), 4),
false,
),
]));
// Add embedding metadata to the schema
let embedding_def = EmbeddingDefinition::new("text", "mock", Some("text_embedding"));
let table_def = TableDefinition::new(
schema.clone(),
vec![
ColumnDefinition {
kind: ColumnKind::Physical,
},
ColumnDefinition {
kind: ColumnKind::Embedding(embedding_def),
},
],
);
let rich_schema = table_def.into_rich_schema();
let table = conn
.create_empty_table("embed_test", rich_schema)
.execute()
.await
.unwrap();
// Now add new data WITHOUT the embedding column - it should be computed automatically
let new_batch = record_batch!(("text", Utf8, ["hello", "world"])).unwrap();
table.add(new_batch).execute().await.unwrap();
assert_eq!(table.count_rows(None).await.unwrap(), 2);
// Query to verify the embeddings were computed for the new rows
let results: Vec<RecordBatch> = table
.query()
.select(Select::columns(&["text", "text_embedding"]))
.execute()
.await
.unwrap()
.try_collect()
.await
.unwrap();
let total_rows: usize = results.iter().map(|b| b.num_rows()).sum();
assert_eq!(total_rows, 2);
// Check that all rows have embedding values (not null)
for batch in &results {
let embedding_col = batch.column(1);
assert_eq!(embedding_col.null_count(), 0);
}
}
}

View File

@@ -287,7 +287,8 @@ pub mod tests {
use arrow::array::AsArray;
use arrow_array::{
BinaryArray, Float64Array, Int32Array, Int64Array, RecordBatch, StringArray, UInt32Array,
BinaryArray, Float64Array, Int32Array, Int64Array, RecordBatch, RecordBatchIterator,
RecordBatchReader, StringArray, UInt32Array,
};
use arrow_schema::{DataType, Field, Schema};
use datafusion::{
@@ -307,7 +308,7 @@ pub mod tests {
table::datafusion::BaseTableAdapter,
};
fn make_test_batches() -> RecordBatch {
fn make_test_batches() -> impl RecordBatchReader + Send + Sync + 'static {
let metadata = HashMap::from_iter(vec![("foo".to_string(), "bar".to_string())]);
let schema = Arc::new(
Schema::new(vec![
@@ -316,17 +317,19 @@ pub mod tests {
])
.with_metadata(metadata),
);
RecordBatch::try_new(
RecordBatchIterator::new(
vec![RecordBatch::try_new(
schema.clone(),
vec![
Arc::new(Int32Array::from_iter_values(0..10)),
Arc::new(UInt32Array::from_iter_values(0..10)),
],
)],
schema,
vec![
Arc::new(Int32Array::from_iter_values(0..10)),
Arc::new(UInt32Array::from_iter_values(0..10)),
],
)
.unwrap()
}
fn make_tbl_two_test_batches() -> RecordBatch {
fn make_tbl_two_test_batches() -> impl RecordBatchReader + Send + Sync + 'static {
let metadata = HashMap::from_iter(vec![("foo".to_string(), "bar".to_string())]);
let schema = Arc::new(
Schema::new(vec![
@@ -339,26 +342,28 @@ pub mod tests {
])
.with_metadata(metadata),
);
RecordBatch::try_new(
RecordBatchIterator::new(
vec![RecordBatch::try_new(
schema.clone(),
vec![
Arc::new(Int64Array::from_iter_values(0..1000)),
Arc::new(StringArray::from_iter_values(
(0..1000).map(|i| i.to_string()),
)),
Arc::new(Float64Array::from_iter_values((0..1000).map(|i| i as f64))),
Arc::new(StringArray::from_iter_values(
(0..1000).map(|i| format!("{{\"i\":{}}}", i)),
)),
Arc::new(BinaryArray::from_iter_values(
(0..1000).map(|i| (i as u32).to_be_bytes().to_vec()),
)),
Arc::new(StringArray::from_iter_values(
(0..1000).map(|i| i.to_string()),
)),
],
)],
schema,
vec![
Arc::new(Int64Array::from_iter_values(0..1000)),
Arc::new(StringArray::from_iter_values(
(0..1000).map(|i| i.to_string()),
)),
Arc::new(Float64Array::from_iter_values((0..1000).map(|i| i as f64))),
Arc::new(StringArray::from_iter_values(
(0..1000).map(|i| format!("{{\"i\":{}}}", i)),
)),
Arc::new(BinaryArray::from_iter_values(
(0..1000).map(|i| (i as u32).to_be_bytes().to_vec()),
)),
Arc::new(StringArray::from_iter_values(
(0..1000).map(|i| i.to_string()),
)),
],
)
.unwrap()
}
struct TestFixture {

View File

@@ -200,7 +200,7 @@ impl ExecutionPlan for InsertExec {
let new_dataset = CommitBuilder::new(dataset.clone())
.execute(merged_txn)
.await?;
ds_wrapper.update(new_dataset);
ds_wrapper.set_latest(new_dataset).await;
}
}
@@ -222,7 +222,7 @@ mod tests {
use std::vec;
use super::*;
use arrow_array::{record_batch, RecordBatchIterator};
use arrow_array::{record_batch, Int32Array, RecordBatchIterator};
use datafusion::prelude::SessionContext;
use datafusion_catalog::MemTable;
use tempfile::tempdir;
@@ -238,8 +238,11 @@ mod tests {
// Create initial table
let batch = record_batch!(("id", Int32, [1, 2, 3])).unwrap();
let schema = batch.schema();
let reader = RecordBatchIterator::new(vec![Ok(batch)], schema);
let table = db
.create_table("test_insert", batch)
.create_table("test_insert", Box::new(reader))
.execute()
.await
.unwrap();
@@ -276,8 +279,11 @@ mod tests {
// Create initial table with 3 rows
let batch = record_batch!(("id", Int32, [1, 2, 3])).unwrap();
let schema = batch.schema();
let reader = RecordBatchIterator::new(vec![Ok(batch)], schema);
let table = db
.create_table("test_overwrite", batch)
.create_table("test_overwrite", Box::new(reader))
.execute()
.await
.unwrap();
@@ -312,9 +318,20 @@ mod tests {
let db = connect(uri).execute().await.unwrap();
// Create initial table
let batch = record_batch!(("id", Int32, [1, 2, 3])).unwrap();
let schema = Arc::new(ArrowSchema::new(vec![Field::new(
"id",
DataType::Int32,
false,
)]));
let batches = vec![RecordBatch::try_new(
schema.clone(),
vec![Arc::new(Int32Array::from(vec![1, 2, 3]))],
)
.unwrap()];
let reader = RecordBatchIterator::new(batches.into_iter().map(Ok), schema.clone());
let table = db
.create_table("test_empty", batch)
.create_table("test_empty", Box::new(reader))
.execute()
.await
.unwrap();
@@ -335,13 +352,12 @@ mod tests {
false,
)]));
// Empty batches
let source_reader: Box<dyn arrow_array::RecordBatchReader + Send> =
Box::new(RecordBatchIterator::new(
std::iter::empty::<Result<RecordBatch, arrow_schema::ArrowError>>(),
source_schema,
));
let source_reader = RecordBatchIterator::new(
std::iter::empty::<Result<RecordBatch, arrow_schema::ArrowError>>(),
source_schema,
);
let source_table = db
.create_table("empty_source", source_reader)
.create_table("empty_source", Box::new(source_reader))
.execute()
.await
.unwrap();
@@ -373,10 +389,20 @@ mod tests {
let db = connect(uri).execute().await.unwrap();
// Create initial table
let batch = record_batch!(("id", Int32, [1])).unwrap();
let schema = batch.schema();
let schema = Arc::new(ArrowSchema::new(vec![Field::new(
"id",
DataType::Int32,
true,
)]));
let batches =
vec![
RecordBatch::try_new(schema.clone(), vec![Arc::new(Int32Array::from(vec![1]))])
.unwrap(),
];
let reader = RecordBatchIterator::new(batches.into_iter().map(Ok), schema.clone());
let table = db
.create_table("test_multi_batch", batch)
.create_table("test_multi_batch", Box::new(reader))
.execute()
.await
.unwrap();

View File

@@ -97,7 +97,7 @@ mod tests {
table::datafusion::BaseTableAdapter,
Connection, Table,
};
use arrow_array::{Int32Array, RecordBatch, StringArray};
use arrow_array::{Int32Array, RecordBatch, RecordBatchIterator, StringArray};
use arrow_schema::{DataType, Field, Schema as ArrowSchema};
use datafusion::prelude::SessionContext;
@@ -173,7 +173,14 @@ mod tests {
// Create LanceDB database and table
let db = crate::connect("memory://test").execute().await.unwrap();
let table = db.create_table("foo", batch).execute().await.unwrap();
let table = db
.create_table(
"foo",
RecordBatchIterator::new(vec![Ok(batch)].into_iter(), schema),
)
.execute()
.await
.unwrap();
// Create FTS index
table
@@ -316,7 +323,13 @@ mod tests {
RecordBatch::try_new(metadata_schema.clone(), vec![metadata_col, extra_col]).unwrap();
let _metadata_table = db
.create_table("metadata", metadata_batch.clone())
.create_table(
"metadata",
RecordBatchIterator::new(
vec![Ok(metadata_batch.clone())].into_iter(),
metadata_schema.clone(),
),
)
.execute()
.await
.unwrap();
@@ -380,7 +393,14 @@ mod tests {
let batch =
RecordBatch::try_new(schema.clone(), vec![id_col, text_col, category_col]).unwrap();
let table = db.create_table(table_name, batch).execute().await.unwrap();
let table = db
.create_table(
table_name,
RecordBatchIterator::new(vec![Ok(batch)].into_iter(), schema),
)
.execute()
.await
.unwrap();
// Create FTS index
table
@@ -526,7 +546,14 @@ mod tests {
]));
let batch = RecordBatch::try_new(schema.clone(), vec![id_col, text_col]).unwrap();
let table = db.create_table("docs", batch).execute().await.unwrap();
let table = db
.create_table(
"docs",
RecordBatchIterator::new(vec![Ok(batch)].into_iter(), schema),
)
.execute()
.await
.unwrap();
// Create FTS index with position information for phrase queries
table
@@ -664,7 +691,14 @@ mod tests {
let batch =
RecordBatch::try_new(schema.clone(), vec![id_col, title_col, content_col]).unwrap();
let table = db.create_table("multi_col", batch).execute().await.unwrap();
let table = db
.create_table(
"multi_col",
RecordBatchIterator::new(vec![Ok(batch)].into_iter(), schema),
)
.execute()
.await
.unwrap();
// Create FTS indices on both columns
table
@@ -929,7 +963,13 @@ mod tests {
let metadata_batch =
RecordBatch::try_new(metadata_schema.clone(), vec![metadata_id, extra_info]).unwrap();
let _metadata_table = db
.create_table("metadata", metadata_batch.clone())
.create_table(
"metadata",
RecordBatchIterator::new(
vec![Ok(metadata_batch.clone())].into_iter(),
metadata_schema,
),
)
.execute()
.await
.unwrap();
@@ -1318,7 +1358,14 @@ mod tests {
]));
let batch = RecordBatch::try_new(schema.clone(), vec![id_col, text_col]).unwrap();
let table = db.create_table("docs", batch).execute().await.unwrap();
let table = db
.create_table(
"docs",
RecordBatchIterator::new(vec![Ok(batch)].into_iter(), schema),
)
.execute()
.await
.unwrap();
// Create FTS index with position information
table
@@ -1463,7 +1510,14 @@ mod tests {
let batch =
RecordBatch::try_new(schema.clone(), vec![id_col, title_col, content_col]).unwrap();
let table = db.create_table("docs", batch).execute().await.unwrap();
let table = db
.create_table(
"docs",
RecordBatchIterator::new(vec![Ok(batch)].into_iter(), schema),
)
.execute()
.await
.unwrap();
// Create FTS indices on both columns
table
@@ -1537,7 +1591,14 @@ mod tests {
let batch =
RecordBatch::try_new(schema.clone(), vec![id_col, title_col, content_col]).unwrap();
let table = db.create_table("docs", batch).execute().await.unwrap();
let table = db
.create_table(
"docs",
RecordBatchIterator::new(vec![Ok(batch)].into_iter(), schema),
)
.execute()
.await
.unwrap();
// Create FTS indices
table
@@ -1663,23 +1724,36 @@ mod tests {
.unwrap();
// Create table with simple text for n-gram testing
let data = RecordBatch::try_new(
let data = RecordBatchIterator::new(
vec![RecordBatch::try_new(
Arc::new(ArrowSchema::new(vec![
Field::new("id", DataType::Int32, false),
Field::new("text", DataType::Utf8, false),
])),
vec![
Arc::new(Int32Array::from(vec![1, 2, 3])),
Arc::new(StringArray::from(vec![
"hello world",
"lance database",
"lance is cool",
])),
],
)
.unwrap()]
.into_iter()
.map(Ok),
Arc::new(ArrowSchema::new(vec![
Field::new("id", DataType::Int32, false),
Field::new("text", DataType::Utf8, false),
])),
vec![
Arc::new(Int32Array::from(vec![1, 2, 3])),
Arc::new(StringArray::from(vec![
"hello world",
"lance database",
"lance is cool",
])),
],
)
.unwrap();
);
let table = Arc::new(db.create_table("docs", data).execute().await.unwrap());
let table = Arc::new(
db.create_table("docs", Box::new(data))
.execute()
.await
.unwrap(),
);
// Create FTS index with n-gram tokenizer (default min_ngram_length=3)
table
@@ -1802,29 +1876,43 @@ mod tests {
.unwrap();
// Create table with two text columns
let data = RecordBatch::try_new(
let data = RecordBatchIterator::new(
vec![RecordBatch::try_new(
Arc::new(ArrowSchema::new(vec![
Field::new("id", DataType::Int32, false),
Field::new("title", DataType::Utf8, false),
Field::new("content", DataType::Utf8, false),
])),
vec![
Arc::new(Int32Array::from(vec![1, 2, 3])),
Arc::new(StringArray::from(vec![
"Important Document",
"Another Document",
"Random Text",
])),
Arc::new(StringArray::from(vec![
"This is important information",
"This has details",
"Nothing special here",
])),
],
)
.unwrap()]
.into_iter()
.map(Ok),
Arc::new(ArrowSchema::new(vec![
Field::new("id", DataType::Int32, false),
Field::new("title", DataType::Utf8, false),
Field::new("content", DataType::Utf8, false),
])),
vec![
Arc::new(Int32Array::from(vec![1, 2, 3])),
Arc::new(StringArray::from(vec![
"Important Document",
"Another Document",
"Random Text",
])),
Arc::new(StringArray::from(vec![
"This is important information",
"This has details",
"Nothing special here",
])),
],
)
.unwrap();
);
let table = Arc::new(db.create_table("docs", data).execute().await.unwrap());
let table = Arc::new(
db.create_table("docs", Box::new(data))
.execute()
.await
.unwrap(),
);
// Create FTS indices on both columns
table

View File

@@ -2,501 +2,321 @@
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
use std::{
sync::{Arc, Mutex},
time::Duration,
ops::{Deref, DerefMut},
sync::Arc,
time::{self, Duration, Instant},
};
use lance::{dataset::refs, Dataset};
use tokio::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard};
use crate::{error::Result, utils::background_cache::BackgroundCache, Error};
use crate::error::Result;
/// A wrapper around a [Dataset] that provides lazy-loading and consistency checks.
///
/// This can be cloned cheaply. It supports concurrent reads or exclusive writes.
#[derive(Debug, Clone)]
pub struct DatasetConsistencyWrapper(Arc<RwLock<DatasetRef>>);
/// A wrapper around a [Dataset] that provides consistency checks.
///
/// This can be cloned cheaply. Callers get an [`Arc<Dataset>`] from [`get()`](Self::get)
/// and call [`update()`](Self::update) after writes to store the new version.
/// The dataset is lazily loaded, and starts off as None. On the first access,
/// the dataset is loaded.
#[derive(Debug, Clone)]
pub struct DatasetConsistencyWrapper {
state: Arc<Mutex<DatasetState>>,
consistency: ConsistencyMode,
enum DatasetRef {
/// In this mode, the dataset is always the latest version.
Latest {
dataset: Dataset,
read_consistency_interval: Option<Duration>,
last_consistency_check: Option<time::Instant>,
},
/// In this mode, the dataset is a specific version. It cannot be mutated.
TimeTravel { dataset: Dataset, version: u64 },
}
/// The current dataset and whether it is pinned to a specific version.
#[derive(Debug, Clone)]
struct DatasetState {
dataset: Arc<Dataset>,
/// `Some(version)` = pinned to a specific version (time travel),
/// `None` = tracking latest.
pinned_version: Option<u64>,
}
impl DatasetRef {
/// Reload the dataset to the appropriate version.
async fn reload(&mut self) -> Result<()> {
match self {
Self::Latest {
dataset,
last_consistency_check,
..
} => {
dataset.checkout_latest().await?;
last_consistency_check.replace(Instant::now());
}
Self::TimeTravel { dataset, version } => {
dataset.checkout_version(*version).await?;
}
}
Ok(())
}
#[derive(Debug, Clone)]
enum ConsistencyMode {
/// Only update table state when explicitly asked.
Lazy,
/// Always check for a new version on every read.
Strong,
/// Periodically check for new version in the background. If the table is being
/// regularly accessed, refresh will happen in the background. If the table is idle for a while,
/// the next access will trigger a refresh before returning the dataset.
///
/// read_consistency_interval = TTL
/// refresh_window = min(3s, TTL/4)
///
/// | t < TTL - refresh_window | t < TTL | t >= TTL |
/// | Return value | Background refresh & return value | syncronous refresh |
Eventual(BackgroundCache<Arc<Dataset>, Error>),
fn is_latest(&self) -> bool {
matches!(self, Self::Latest { .. })
}
async fn need_reload(&self) -> Result<bool> {
Ok(match self {
Self::Latest { dataset, .. } => {
dataset.latest_version_id().await? != dataset.version().version
}
Self::TimeTravel { dataset, version } => dataset.version().version != *version,
})
}
async fn as_latest(&mut self, read_consistency_interval: Option<Duration>) -> Result<()> {
match self {
Self::Latest { .. } => Ok(()),
Self::TimeTravel { dataset, .. } => {
dataset
.checkout_version(dataset.latest_version_id().await?)
.await?;
*self = Self::Latest {
dataset: dataset.clone(),
read_consistency_interval,
last_consistency_check: Some(Instant::now()),
};
Ok(())
}
}
}
async fn as_time_travel(&mut self, target_version: impl Into<refs::Ref>) -> Result<()> {
let target_ref = target_version.into();
match self {
Self::Latest { dataset, .. } => {
let new_dataset = dataset.checkout_version(target_ref.clone()).await?;
let version_value = new_dataset.version().version;
*self = Self::TimeTravel {
dataset: new_dataset,
version: version_value,
};
}
Self::TimeTravel { dataset, version } => {
let should_checkout = match &target_ref {
refs::Ref::Version(_, Some(target_ver)) => version != target_ver,
refs::Ref::Version(_, None) => true, // No specific version, always checkout
refs::Ref::VersionNumber(target_ver) => version != target_ver,
refs::Ref::Tag(_) => true, // Always checkout for tags
};
if should_checkout {
let new_dataset = dataset.checkout_version(target_ref).await?;
let version_value = new_dataset.version().version;
*self = Self::TimeTravel {
dataset: new_dataset,
version: version_value,
};
}
}
}
Ok(())
}
fn time_travel_version(&self) -> Option<u64> {
match self {
Self::Latest { .. } => None,
Self::TimeTravel { version, .. } => Some(*version),
}
}
fn set_latest(&mut self, dataset: Dataset) {
match self {
Self::Latest {
dataset: ref mut ds,
..
} => {
if dataset.manifest().version > ds.manifest().version {
*ds = dataset;
}
}
_ => unreachable!("Dataset should be in latest mode at this point"),
}
}
}
impl DatasetConsistencyWrapper {
/// Create a new wrapper in the latest version mode.
pub fn new_latest(dataset: Dataset, read_consistency_interval: Option<Duration>) -> Self {
let dataset = Arc::new(dataset);
let consistency = match read_consistency_interval {
Some(d) if d == Duration::ZERO => ConsistencyMode::Strong,
Some(d) => {
let refresh_window = std::cmp::min(std::time::Duration::from_secs(3), d / 4);
let cache = BackgroundCache::new(d, refresh_window);
cache.seed(dataset.clone());
ConsistencyMode::Eventual(cache)
}
None => ConsistencyMode::Lazy,
};
Self {
state: Arc::new(Mutex::new(DatasetState {
dataset,
pinned_version: None,
})),
consistency,
}
Self(Arc::new(RwLock::new(DatasetRef::Latest {
dataset,
read_consistency_interval,
last_consistency_check: Some(Instant::now()),
})))
}
/// Get the current dataset.
/// Get an immutable reference to the dataset.
pub async fn get(&self) -> Result<DatasetReadGuard<'_>> {
self.ensure_up_to_date().await?;
Ok(DatasetReadGuard {
guard: self.0.read().await,
})
}
/// Get a mutable reference to the dataset.
///
/// Behavior depends on the consistency mode:
/// - **Lazy** (`None`): returns the cached dataset immediately.
/// - **Strong** (`Some(ZERO)`): checks for a new version before returning.
/// - **Eventual** (`Some(d)` where `d > 0`): returns a cached value immediately
/// while refreshing in the background when the TTL expires.
///
/// If pinned to a specific version (time travel), always returns the
/// pinned dataset regardless of consistency mode.
pub async fn get(&self) -> Result<Arc<Dataset>> {
{
let state = self.state.lock().unwrap();
if state.pinned_version.is_some() {
return Ok(state.dataset.clone());
}
}
match &self.consistency {
ConsistencyMode::Eventual(bg_cache) => {
if let Some(dataset) = bg_cache.try_get() {
return Ok(dataset);
}
let state = self.state.clone();
bg_cache
.get(move || refresh_latest(state))
.await
.map_err(unwrap_shared_error)
}
ConsistencyMode::Strong => refresh_latest(self.state.clone()).await,
ConsistencyMode::Lazy => {
let state = self.state.lock().unwrap();
Ok(state.dataset.clone())
}
}
/// If the dataset is in time travel mode this will fail
pub async fn get_mut(&self) -> Result<DatasetWriteGuard<'_>> {
self.ensure_mutable().await?;
self.ensure_up_to_date().await?;
Ok(DatasetWriteGuard {
guard: self.0.write().await,
})
}
/// Store a new dataset version after a write operation.
///
/// Only stores the dataset if its version is at least as new as the current one.
/// Same-version updates are accepted for operations like manifest path migration
/// that modify the dataset without creating a new version.
/// If the wrapper has since transitioned to time-travel mode (e.g. via a
/// concurrent [`as_time_travel`](Self::as_time_travel) call), the update
/// is silently ignored — the write already committed to storage.
pub fn update(&self, dataset: Dataset) {
let mut state = self.state.lock().unwrap();
if state.pinned_version.is_some() {
// A concurrent as_time_travel() beat us here. The write succeeded
// in storage, but since we're now pinned we don't advance the
// cached pointer.
return;
}
if dataset.manifest().version >= state.dataset.manifest().version {
state.dataset = Arc::new(dataset);
}
drop(state);
if let ConsistencyMode::Eventual(bg_cache) = &self.consistency {
bg_cache.invalidate();
}
/// Get a mutable reference to the dataset without requiring the
/// dataset to be in a Latest mode.
pub async fn get_mut_unchecked(&self) -> Result<DatasetWriteGuard<'_>> {
self.ensure_up_to_date().await?;
Ok(DatasetWriteGuard {
guard: self.0.write().await,
})
}
/// Checkout a branch and track its HEAD for new versions.
pub async fn as_branch(&self, _branch: impl Into<String>) -> Result<()> {
todo!("Branch support not yet implemented")
}
/// Check that the dataset is in a mutable mode (Latest).
pub fn ensure_mutable(&self) -> Result<()> {
let state = self.state.lock().unwrap();
if state.pinned_version.is_some() {
Err(crate::Error::InvalidInput {
message: "table cannot be modified when a specific version is checked out"
.to_string(),
})
} else {
Ok(())
}
}
/// Returns the version, if in time travel mode, or None otherwise.
pub fn time_travel_version(&self) -> Option<u64> {
self.state.lock().unwrap().pinned_version
}
/// Convert into a wrapper in latest version mode.
pub async fn as_latest(&self) -> Result<()> {
let dataset = {
let state = self.state.lock().unwrap();
if state.pinned_version.is_none() {
return Ok(());
}
state.dataset.clone()
};
let latest_version = dataset.latest_version_id().await?;
let new_dataset = dataset.checkout_version(latest_version).await?;
let mut state = self.state.lock().unwrap();
if state.pinned_version.is_some() {
state.dataset = Arc::new(new_dataset);
state.pinned_version = None;
}
drop(state);
if let ConsistencyMode::Eventual(bg_cache) = &self.consistency {
bg_cache.invalidate();
}
Ok(())
}
pub async fn as_time_travel(&self, target_version: impl Into<refs::Ref>) -> Result<()> {
let target_ref = target_version.into();
let (should_checkout, dataset) = {
let state = self.state.lock().unwrap();
let should = match state.pinned_version {
None => true,
Some(version) => match &target_ref {
refs::Ref::Version(_, Some(target_ver)) => version != *target_ver,
refs::Ref::Version(_, None) => true,
refs::Ref::VersionNumber(target_ver) => version != *target_ver,
refs::Ref::Tag(_) => true,
},
};
(should, state.dataset.clone())
};
if !should_checkout {
/// Convert into a wrapper in latest version mode
pub async fn as_latest(&self, read_consistency_interval: Option<Duration>) -> Result<()> {
if self.0.read().await.is_latest() {
return Ok(());
}
let new_dataset = dataset.checkout_version(target_ref).await?;
let version_value = new_dataset.version().version;
let mut write_guard = self.0.write().await;
if write_guard.is_latest() {
return Ok(());
}
let mut state = self.state.lock().unwrap();
state.dataset = Arc::new(new_dataset);
state.pinned_version = Some(version_value);
Ok(())
write_guard.as_latest(read_consistency_interval).await
}
pub async fn as_time_travel(&self, target_version: impl Into<refs::Ref>) -> Result<()> {
self.0.write().await.as_time_travel(target_version).await
}
/// Provide a known latest version of the dataset.
///
/// This is usually done after some write operation, which inherently will
/// have the latest version.
pub async fn set_latest(&self, dataset: Dataset) {
self.0.write().await.set_latest(dataset);
}
pub async fn reload(&self) -> Result<()> {
let (dataset, pinned_version) = {
let state = self.state.lock().unwrap();
(state.dataset.clone(), state.pinned_version)
};
match pinned_version {
None => {
refresh_latest(self.state.clone()).await?;
if let ConsistencyMode::Eventual(bg_cache) = &self.consistency {
bg_cache.invalidate();
}
}
Some(version) => {
if dataset.version().version == version {
return Ok(());
}
let new_dataset = dataset.checkout_version(version).await?;
let mut state = self.state.lock().unwrap();
if state.pinned_version == Some(version) {
state.dataset = Arc::new(new_dataset);
}
}
if !self.0.read().await.need_reload().await? {
return Ok(());
}
let mut write_guard = self.0.write().await;
// on lock escalation -- check if someone else has already reloaded
if !write_guard.need_reload().await? {
return Ok(());
}
// actually need reloading
write_guard.reload().await
}
/// Returns the version, if in time travel mode, or None otherwise
pub async fn time_travel_version(&self) -> Option<u64> {
self.0.read().await.time_travel_version()
}
pub async fn ensure_mutable(&self) -> Result<()> {
let dataset_ref = self.0.read().await;
match &*dataset_ref {
DatasetRef::Latest { .. } => Ok(()),
DatasetRef::TimeTravel { .. } => Err(crate::Error::InvalidInput {
message: "table cannot be modified when a specific version is checked out"
.to_string(),
}),
}
}
async fn is_up_to_date(&self) -> Result<bool> {
let dataset_ref = self.0.read().await;
match &*dataset_ref {
DatasetRef::Latest {
read_consistency_interval,
last_consistency_check,
..
} => match (read_consistency_interval, last_consistency_check) {
(None, _) => Ok(true),
(Some(_), None) => Ok(false),
(Some(read_consistency_interval), Some(last_consistency_check)) => {
if &last_consistency_check.elapsed() < read_consistency_interval {
Ok(true)
} else {
Ok(false)
}
}
},
DatasetRef::TimeTravel { dataset, version } => {
Ok(dataset.version().version == *version)
}
}
}
/// Ensures that the dataset is loaded and up-to-date with consistency and
/// version parameters.
async fn ensure_up_to_date(&self) -> Result<()> {
if !self.is_up_to_date().await? {
self.reload().await?;
}
Ok(())
}
}
async fn refresh_latest(state: Arc<Mutex<DatasetState>>) -> Result<Arc<Dataset>> {
let dataset = { state.lock().unwrap().dataset.clone() };
let mut ds = (*dataset).clone();
ds.checkout_latest().await?;
let new_arc = Arc::new(ds);
{
let mut state = state.lock().unwrap();
if state.pinned_version.is_none()
&& new_arc.manifest().version >= state.dataset.manifest().version
{
state.dataset = new_arc.clone();
}
}
Ok(new_arc)
pub struct DatasetReadGuard<'a> {
guard: RwLockReadGuard<'a, DatasetRef>,
}
fn unwrap_shared_error(arc: Arc<Error>) -> Error {
match Arc::try_unwrap(arc) {
Ok(err) => err,
Err(arc) => Error::Runtime {
message: arc.to_string(),
},
impl Deref for DatasetReadGuard<'_> {
type Target = Dataset;
fn deref(&self) -> &Self::Target {
match &*self.guard {
DatasetRef::Latest { dataset, .. } => dataset,
DatasetRef::TimeTravel { dataset, .. } => dataset,
}
}
}
pub struct DatasetWriteGuard<'a> {
guard: RwLockWriteGuard<'a, DatasetRef>,
}
impl Deref for DatasetWriteGuard<'_> {
type Target = Dataset;
fn deref(&self) -> &Self::Target {
match &*self.guard {
DatasetRef::Latest { dataset, .. } => dataset,
DatasetRef::TimeTravel { dataset, .. } => dataset,
}
}
}
impl DerefMut for DatasetWriteGuard<'_> {
fn deref_mut(&mut self) -> &mut Self::Target {
match &mut *self.guard {
DatasetRef::Latest { dataset, .. } => dataset,
DatasetRef::TimeTravel { dataset, .. } => dataset,
}
}
}
#[cfg(test)]
mod tests {
use std::time::Instant;
use arrow_array::{Int32Array, RecordBatch, RecordBatchIterator};
use arrow_schema::{DataType, Field, Schema};
use lance::{
dataset::{WriteMode, WriteParams},
io::ObjectStoreParams,
};
use lance::{dataset::WriteParams, io::ObjectStoreParams};
use super::*;
use crate::{connect, io::object_store::io_tracking::IoStatsHolder, table::WriteOptions};
async fn create_test_dataset(uri: &str) -> Dataset {
let schema = Arc::new(Schema::new(vec![Field::new("id", DataType::Int32, false)]));
let batch = RecordBatch::try_new(
schema.clone(),
vec![Arc::new(Int32Array::from(vec![1, 2, 3]))],
)
.unwrap();
Dataset::write(
RecordBatchIterator::new(vec![Ok(batch)], schema),
uri,
Some(WriteParams::default()),
)
.await
.unwrap()
}
async fn append_to_dataset(uri: &str) -> Dataset {
let schema = Arc::new(Schema::new(vec![Field::new("id", DataType::Int32, false)]));
let batch = RecordBatch::try_new(
schema.clone(),
vec![Arc::new(Int32Array::from(vec![4, 5, 6]))],
)
.unwrap();
Dataset::write(
RecordBatchIterator::new(vec![Ok(batch)], schema),
uri,
Some(WriteParams {
mode: WriteMode::Append,
..Default::default()
}),
)
.await
.unwrap()
}
#[tokio::test]
async fn test_get_returns_dataset() {
let dir = tempfile::tempdir().unwrap();
let uri = dir.path().to_str().unwrap();
let ds = create_test_dataset(uri).await;
let version = ds.version().version;
let wrapper = DatasetConsistencyWrapper::new_latest(ds, None);
let ds1 = wrapper.get().await.unwrap();
let ds2 = wrapper.get().await.unwrap();
assert_eq!(ds1.version().version, version);
assert_eq!(ds2.version().version, version);
// Arc<Dataset> is independent — not borrowing from wrapper
drop(wrapper);
assert_eq!(ds1.version().version, version);
}
#[tokio::test]
async fn test_update_stores_newer_version() {
let dir = tempfile::tempdir().unwrap();
let uri = dir.path().to_str().unwrap();
let ds_v1 = create_test_dataset(uri).await;
assert_eq!(ds_v1.version().version, 1);
let wrapper = DatasetConsistencyWrapper::new_latest(ds_v1, None);
let ds_v2 = append_to_dataset(uri).await;
assert_eq!(ds_v2.version().version, 2);
wrapper.update(ds_v2);
let ds = wrapper.get().await.unwrap();
assert_eq!(ds.version().version, 2);
}
#[tokio::test]
async fn test_update_ignores_older_version() {
let dir = tempfile::tempdir().unwrap();
let uri = dir.path().to_str().unwrap();
let ds_v1 = create_test_dataset(uri).await;
let ds_v2 = append_to_dataset(uri).await;
let wrapper = DatasetConsistencyWrapper::new_latest(ds_v2, None);
wrapper.update(ds_v1);
let ds = wrapper.get().await.unwrap();
assert_eq!(ds.version().version, 2);
}
#[tokio::test]
async fn test_ensure_mutable_allows_latest() {
let dir = tempfile::tempdir().unwrap();
let uri = dir.path().to_str().unwrap();
let ds = create_test_dataset(uri).await;
let wrapper = DatasetConsistencyWrapper::new_latest(ds, None);
assert!(wrapper.ensure_mutable().is_ok());
}
#[tokio::test]
async fn test_ensure_mutable_rejects_time_travel() {
let dir = tempfile::tempdir().unwrap();
let uri = dir.path().to_str().unwrap();
let ds = create_test_dataset(uri).await;
let wrapper = DatasetConsistencyWrapper::new_latest(ds, None);
wrapper.as_time_travel(1u64).await.unwrap();
assert!(wrapper.ensure_mutable().is_err());
}
#[tokio::test]
async fn test_time_travel_version() {
let dir = tempfile::tempdir().unwrap();
let uri = dir.path().to_str().unwrap();
let ds = create_test_dataset(uri).await;
let wrapper = DatasetConsistencyWrapper::new_latest(ds, None);
assert_eq!(wrapper.time_travel_version(), None);
wrapper.as_time_travel(1u64).await.unwrap();
assert_eq!(wrapper.time_travel_version(), Some(1));
}
#[tokio::test]
async fn test_as_latest_from_time_travel() {
let dir = tempfile::tempdir().unwrap();
let uri = dir.path().to_str().unwrap();
let ds = create_test_dataset(uri).await;
let wrapper = DatasetConsistencyWrapper::new_latest(ds, None);
wrapper.as_time_travel(1u64).await.unwrap();
assert!(wrapper.ensure_mutable().is_err());
wrapper.as_latest().await.unwrap();
assert!(wrapper.ensure_mutable().is_ok());
assert_eq!(wrapper.time_travel_version(), None);
}
#[tokio::test]
async fn test_lazy_consistency_never_refreshes() {
let dir = tempfile::tempdir().unwrap();
let uri = dir.path().to_str().unwrap();
let ds = create_test_dataset(uri).await;
let wrapper = DatasetConsistencyWrapper::new_latest(ds, None);
let v1 = wrapper.get().await.unwrap().version().version;
// External write
append_to_dataset(uri).await;
// Lazy consistency should not pick up external write
let v_after = wrapper.get().await.unwrap().version().version;
assert_eq!(v1, v_after);
}
#[tokio::test]
async fn test_strong_consistency_always_refreshes() {
let dir = tempfile::tempdir().unwrap();
let uri = dir.path().to_str().unwrap();
let ds = create_test_dataset(uri).await;
let wrapper = DatasetConsistencyWrapper::new_latest(ds, Some(Duration::ZERO));
let v1 = wrapper.get().await.unwrap().version().version;
// External write
append_to_dataset(uri).await;
// Strong consistency should pick up external write
let v_after = wrapper.get().await.unwrap().version().version;
assert_eq!(v_after, v1 + 1);
}
#[tokio::test]
async fn test_eventual_consistency_background_refresh() {
let dir = tempfile::tempdir().unwrap();
let uri = dir.path().to_str().unwrap();
let ds = create_test_dataset(uri).await;
let wrapper = DatasetConsistencyWrapper::new_latest(ds, Some(Duration::from_millis(200)));
// Populate the cache
let v1 = wrapper.get().await.unwrap().version().version;
assert_eq!(v1, 1);
// External write
append_to_dataset(uri).await;
// Should return cached value immediately (within TTL)
let v_cached = wrapper.get().await.unwrap().version().version;
assert_eq!(v_cached, 1);
// Wait for TTL to expire, then get() should trigger a refresh
tokio::time::sleep(Duration::from_millis(300)).await;
let v_after = wrapper.get().await.unwrap().version().version;
assert_eq!(v_after, 2);
}
#[tokio::test]
async fn test_eventual_consistency_update_invalidates_cache() {
let dir = tempfile::tempdir().unwrap();
let uri = dir.path().to_str().unwrap();
let ds_v1 = create_test_dataset(uri).await;
let wrapper = DatasetConsistencyWrapper::new_latest(ds_v1, Some(Duration::from_secs(60)));
// Simulate a write that produces v2
let ds_v2 = append_to_dataset(uri).await;
wrapper.update(ds_v2);
// get() should return v2 immediately (update invalidated the bg_cache,
// and the mutex state was updated)
let v = wrapper.get().await.unwrap().version().version;
assert_eq!(v, 2);
}
#[tokio::test]
async fn test_iops_open_strong_consistency() {
let db = connect("memory://")
@@ -512,7 +332,7 @@ mod tests {
.create_empty_table("test", schema)
.write_options(WriteOptions {
lance_write_params: Some(WriteParams {
store_params: Some(lance::io::ObjectStoreParams {
store_params: Some(ObjectStoreParams {
object_store_wrapper: Some(Arc::new(io_stats.clone())),
..Default::default()
}),
@@ -531,85 +351,4 @@ mod tests {
let stats = io_stats.incremental_stats();
assert_eq!(stats.read_iops, 1);
}
/// Regression test: a write that races with as_time_travel() must not panic.
///
/// Sequence: ensure_mutable() passes → as_time_travel() completes → write
/// calls update(). Previously the assert!() in update() would fire.
#[tokio::test]
async fn test_update_after_concurrent_time_travel_does_not_panic() {
let dir = tempfile::tempdir().unwrap();
let uri = dir.path().to_str().unwrap();
let ds_v1 = create_test_dataset(uri).await;
let wrapper = DatasetConsistencyWrapper::new_latest(ds_v1, None);
// Simulate: as_time_travel() completes just before the write's update().
wrapper.as_time_travel(1u64).await.unwrap();
assert_eq!(wrapper.time_travel_version(), Some(1));
// The write already committed to storage; now it calls update().
// This must not panic, and the wrapper must stay pinned.
let ds_v2 = append_to_dataset(uri).await;
wrapper.update(ds_v2);
let ds = wrapper.get().await.unwrap();
assert_eq!(ds.version().version, 1);
}
/// Regression test: before the fix, the reload fast-path (no version change)
/// did not reset `last_consistency_check`, causing a list call on every
/// subsequent query once the interval expired.
#[tokio::test]
async fn test_reload_resets_consistency_timer() {
let db = connect("memory://")
.read_consistency_interval(Duration::from_secs(1))
.execute()
.await
.unwrap();
let io_stats = IoStatsHolder::default();
let schema = Arc::new(Schema::new(vec![Field::new("id", DataType::Int32, false)]));
let table = db
.create_empty_table("test", schema)
.write_options(WriteOptions {
lance_write_params: Some(WriteParams {
store_params: Some(ObjectStoreParams {
object_store_wrapper: Some(Arc::new(io_stats.clone())),
..Default::default()
}),
..Default::default()
}),
})
.execute()
.await
.unwrap();
let start = Instant::now();
io_stats.incremental_stats(); // reset
// Step 1: within interval — no list
table.schema().await.unwrap();
let s = io_stats.incremental_stats();
assert_eq!(s.read_iops, 0, "step 1, elapsed={:?}", start.elapsed());
// Step 2: still within interval — no list
table.schema().await.unwrap();
let s = io_stats.incremental_stats();
assert_eq!(s.read_iops, 0, "step 2, elapsed={:?}", start.elapsed());
// Step 3: sleep past the 1s boundary
tokio::time::sleep(Duration::from_secs(1)).await;
// Step 4: interval expired — exactly 1 list, timer resets
table.schema().await.unwrap();
let s = io_stats.incremental_stats();
assert_eq!(s.read_iops, 1, "step 4, elapsed={:?}", start.elapsed());
// Step 5: 10 more calls — timer just reset, no lists (THIS is the regression test).
for _ in 0..10 {
table.schema().await.unwrap();
}
let s = io_stats.incremental_stats();
assert_eq!(s.read_iops, 0, "step 5, elapsed={:?}", start.elapsed());
}
}

View File

@@ -18,18 +18,23 @@ pub struct DeleteResult {
///
/// This logic was moved from NativeTable::delete to keep table.rs clean.
pub(crate) async fn execute_delete(table: &NativeTable, predicate: &str) -> Result<DeleteResult> {
table.dataset.ensure_mutable()?;
let mut dataset = (*table.dataset.get().await?).clone();
// We access the dataset from the table. Since this is in the same module hierarchy (super),
// and 'dataset' is pub(crate), we can access it.
let mut dataset = table.dataset.get_mut().await?;
// Perform the actual delete on the Lance dataset
dataset.delete(predicate).await?;
let version = dataset.version().version;
table.dataset.update(dataset);
Ok(DeleteResult { version })
// Return the result with the new version
Ok(DeleteResult {
version: dataset.version().version,
})
}
#[cfg(test)]
mod tests {
use crate::connect;
use arrow_array::{record_batch, Int32Array, RecordBatch};
use arrow_array::{record_batch, Int32Array, RecordBatch, RecordBatchIterator};
use arrow_schema::{DataType, Field, Schema};
use std::sync::Arc;
@@ -48,7 +53,10 @@ mod tests {
.unwrap();
let table = conn
.create_table("test_delete", batch)
.create_table(
"test_delete",
RecordBatchIterator::new(vec![Ok(batch)], schema),
)
.execute()
.await
.unwrap();
@@ -94,7 +102,10 @@ mod tests {
let original_schema = batch.schema();
let table = conn
.create_table("test_delete_all", batch)
.create_table(
"test_delete_all",
RecordBatchIterator::new(vec![Ok(batch)], original_schema.clone()),
)
.execute()
.await
.unwrap();
@@ -115,8 +126,13 @@ mod tests {
// Create a table with 5 rows
let batch = record_batch!(("id", Int32, [1, 2, 3, 4, 5])).unwrap();
let schema = batch.schema();
let table = conn
.create_table("test_delete_noop", batch)
.create_table(
"test_delete_noop",
RecordBatchIterator::new(vec![Ok(batch)], schema),
)
.execute()
.await
.unwrap();

View File

@@ -1,45 +1,13 @@
// SPDX-License-Identifier: Apache-2.0
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
use std::sync::Arc;
use std::time::Duration;
use std::{sync::Arc, time::Duration};
use arrow_array::RecordBatchReader;
use futures::future::Either;
use futures::{FutureExt, TryFutureExt};
use lance::dataset::{
MergeInsertBuilder as LanceMergeInsertBuilder, WhenMatched, WhenNotMatchedBySource,
};
use serde::{Deserialize, Serialize};
use crate::error::{Error, Result};
use crate::Result;
use super::{BaseTable, NativeTable};
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
pub struct MergeResult {
// The commit version associated with the operation.
// A version of `0` indicates compatibility with legacy servers that do not return
/// a commit version.
#[serde(default)]
pub version: u64,
/// Number of inserted rows (for user statistics)
#[serde(default)]
pub num_inserted_rows: u64,
/// Number of updated rows (for user statistics)
#[serde(default)]
pub num_updated_rows: u64,
/// Number of deleted rows (for user statistics)
/// Note: This is different from internal references to 'deleted_rows', since we technically "delete" updated rows during processing.
/// However those rows are not shared with the user.
#[serde(default)]
pub num_deleted_rows: u64,
/// Number of attempts performed during the merge operation.
/// This includes the initial attempt plus any retries due to transaction conflicts.
/// A value of 1 means the operation succeeded on the first try.
#[serde(default)]
pub num_attempts: u32,
}
use super::{BaseTable, MergeResult};
/// A builder used to create and run a merge insert operation
///
@@ -156,172 +124,3 @@ impl MergeInsertBuilder {
self.table.clone().merge_insert(self, new_data).await
}
}
/// Internal implementation of the merge insert logic
///
/// This logic was moved from NativeTable::merge_insert to keep table.rs clean.
pub(crate) async fn execute_merge_insert(
table: &NativeTable,
params: MergeInsertBuilder,
new_data: Box<dyn RecordBatchReader + Send>,
) -> Result<MergeResult> {
let dataset = table.dataset.get().await?;
let mut builder = LanceMergeInsertBuilder::try_new(dataset.clone(), params.on)?;
match (
params.when_matched_update_all,
params.when_matched_update_all_filt,
) {
(false, _) => builder.when_matched(WhenMatched::DoNothing),
(true, None) => builder.when_matched(WhenMatched::UpdateAll),
(true, Some(filt)) => builder.when_matched(WhenMatched::update_if(&dataset, &filt)?),
};
if params.when_not_matched_insert_all {
builder.when_not_matched(lance::dataset::WhenNotMatched::InsertAll);
} else {
builder.when_not_matched(lance::dataset::WhenNotMatched::DoNothing);
}
if params.when_not_matched_by_source_delete {
let behavior = if let Some(filter) = params.when_not_matched_by_source_delete_filt {
WhenNotMatchedBySource::delete_if(dataset.as_ref(), &filter)?
} else {
WhenNotMatchedBySource::Delete
};
builder.when_not_matched_by_source(behavior);
} else {
builder.when_not_matched_by_source(WhenNotMatchedBySource::Keep);
}
builder.use_index(params.use_index);
let future = if let Some(timeout) = params.timeout {
let future = builder
.retry_timeout(timeout)
.try_build()?
.execute_reader(new_data);
Either::Left(tokio::time::timeout(timeout, future).map(|res| match res {
Ok(Ok((new_dataset, stats))) => Ok((new_dataset, stats)),
Ok(Err(e)) => Err(e.into()),
Err(_) => Err(Error::Runtime {
message: "merge insert timed out".to_string(),
}),
}))
} else {
let job = builder.try_build()?;
Either::Right(job.execute_reader(new_data).map_err(|e| e.into()))
};
let (new_dataset, stats) = future.await?;
let version = new_dataset.manifest().version;
table.dataset.update(new_dataset.as_ref().clone());
Ok(MergeResult {
version,
num_updated_rows: stats.num_updated_rows,
num_inserted_rows: stats.num_inserted_rows,
num_deleted_rows: stats.num_deleted_rows,
num_attempts: stats.num_attempts,
})
}
#[cfg(test)]
mod tests {
use arrow_array::{Int32Array, RecordBatch, RecordBatchIterator, RecordBatchReader};
use arrow_schema::{DataType, Field, Schema};
use std::sync::Arc;
use crate::connect;
fn merge_insert_test_batches(offset: i32, age: i32) -> Box<dyn RecordBatchReader + Send> {
let schema = Arc::new(Schema::new(vec![
Field::new("i", DataType::Int32, false),
Field::new("age", DataType::Int32, false),
]));
let batch = RecordBatch::try_new(
schema.clone(),
vec![
Arc::new(Int32Array::from_iter_values(offset..(offset + 10))),
Arc::new(Int32Array::from_iter_values(std::iter::repeat_n(age, 10))),
],
)
.unwrap();
Box::new(RecordBatchIterator::new(vec![Ok(batch)], schema))
}
#[tokio::test]
async fn test_merge_insert() {
let conn = connect("memory://").execute().await.unwrap();
// Create a dataset with i=0..10
let batches = merge_insert_test_batches(0, 0);
let table = conn
.create_table("my_table", batches)
.execute()
.await
.unwrap();
assert_eq!(table.count_rows(None).await.unwrap(), 10);
// Create new data with i=5..15
let new_batches = merge_insert_test_batches(5, 1);
// Perform a "insert if not exists"
let mut merge_insert_builder = table.merge_insert(&["i"]);
merge_insert_builder.when_not_matched_insert_all();
let result = merge_insert_builder.execute(new_batches).await.unwrap();
// Only 5 rows should actually be inserted
assert_eq!(table.count_rows(None).await.unwrap(), 15);
assert_eq!(result.num_inserted_rows, 5);
assert_eq!(result.num_updated_rows, 0);
assert_eq!(result.num_deleted_rows, 0);
assert_eq!(result.num_attempts, 1);
// Create new data with i=15..25 (no id matches)
let new_batches = merge_insert_test_batches(15, 2);
// Perform a "bulk update" (should not affect anything)
let mut merge_insert_builder = table.merge_insert(&["i"]);
merge_insert_builder.when_matched_update_all(None);
merge_insert_builder.execute(new_batches).await.unwrap();
// No new rows should have been inserted
assert_eq!(table.count_rows(None).await.unwrap(), 15);
assert_eq!(
table.count_rows(Some("age = 2".to_string())).await.unwrap(),
0
);
// Conditional update that only replaces the age=0 data
let new_batches = merge_insert_test_batches(5, 3);
let mut merge_insert_builder = table.merge_insert(&["i"]);
merge_insert_builder.when_matched_update_all(Some("target.age = 0".to_string()));
merge_insert_builder.execute(new_batches).await.unwrap();
assert_eq!(
table.count_rows(Some("age = 3".to_string())).await.unwrap(),
5
);
}
#[tokio::test]
async fn test_merge_insert_use_index() {
let conn = connect("memory://").execute().await.unwrap();
// Create a dataset with i=0..10
let batches = merge_insert_test_batches(0, 0);
let table = conn
.create_table("my_table", batches)
.execute()
.await
.unwrap();
assert_eq!(table.count_rows(None).await.unwrap(), 10);
// Test use_index=true (default behavior)
let new_batches = merge_insert_test_batches(5, 1);
let mut merge_insert_builder = table.merge_insert(&["i"]);
merge_insert_builder.when_not_matched_insert_all();
merge_insert_builder.use_index(true);
merge_insert_builder.execute(new_batches).await.unwrap();
assert_eq!(table.count_rows(None).await.unwrap(), 15);
// Test use_index=false (force table scan)
let new_batches = merge_insert_test_batches(15, 2);
let mut merge_insert_builder = table.merge_insert(&["i"]);
merge_insert_builder.when_not_matched_insert_all();
merge_insert_builder.use_index(false);
merge_insert_builder.execute(new_batches).await.unwrap();
assert_eq!(table.count_rows(None).await.unwrap(), 25);
}
}

View File

@@ -26,10 +26,8 @@ use crate::error::Result;
/// optimize different parts of the table on disk.
///
/// By default, it optimizes everything, as [`OptimizeAction::All`].
#[derive(Default)]
pub enum OptimizeAction {
/// Run all optimizations with default values
#[default]
All,
/// Compacts files in the dataset
///
@@ -86,6 +84,12 @@ pub enum OptimizeAction {
Index(OptimizeOptions),
}
impl Default for OptimizeAction {
fn default() -> Self {
Self::All
}
}
/// Statistics about the optimization.
#[derive(Debug, Default)]
pub struct OptimizeStats {
@@ -101,10 +105,12 @@ pub struct OptimizeStats {
/// This logic was moved from NativeTable to keep table.rs clean.
pub(crate) async fn optimize_indices(table: &NativeTable, options: &OptimizeOptions) -> Result<()> {
info!("LanceDB: optimizing indices: {:?}", options);
table.dataset.ensure_mutable()?;
let mut dataset = (*table.dataset.get().await?).clone();
dataset.optimize_indices(options).await?;
table.dataset.update(dataset);
table
.dataset
.get_mut()
.await?
.optimize_indices(options)
.await?;
Ok(())
}
@@ -125,9 +131,10 @@ pub(crate) async fn cleanup_old_versions(
delete_unverified: Option<bool>,
error_if_tagged_old_versions: Option<bool>,
) -> Result<RemovalStats> {
table.dataset.ensure_mutable()?;
let dataset = table.dataset.get().await?;
Ok(dataset
Ok(table
.dataset
.get_mut()
.await?
.cleanup_old_versions(older_than, delete_unverified, error_if_tagged_old_versions)
.await?)
}
@@ -143,10 +150,8 @@ pub(crate) async fn compact_files_impl(
options: CompactionOptions,
remap_options: Option<Arc<dyn IndexRemapperOptions>>,
) -> Result<CompactionMetrics> {
table.dataset.ensure_mutable()?;
let mut dataset = (*table.dataset.get().await?).clone();
let metrics = compact_files(&mut dataset, options, remap_options).await?;
table.dataset.update(dataset);
let mut dataset_mut = table.dataset.get_mut().await?;
let metrics = compact_files(&mut dataset_mut, options, remap_options).await?;
Ok(metrics)
}
@@ -207,7 +212,7 @@ pub(crate) async fn execute_optimize(
#[cfg(test)]
mod tests {
use arrow_array::{Int32Array, RecordBatch, StringArray};
use arrow_array::{Int32Array, RecordBatch, RecordBatchIterator, StringArray};
use arrow_schema::{DataType, Field, Schema};
use rstest::rstest;
use std::sync::Arc;
@@ -231,7 +236,10 @@ mod tests {
.unwrap();
let table = conn
.create_table("test_compact", batch)
.create_table(
"test_compact",
RecordBatchIterator::new(vec![Ok(batch)], schema.clone()),
)
.execute()
.await
.unwrap();
@@ -245,7 +253,11 @@ mod tests {
))],
)
.unwrap();
table.add(batch).execute().await.unwrap();
table
.add(RecordBatchIterator::new(vec![Ok(batch)], schema.clone()))
.execute()
.await
.unwrap();
}
// Verify we have multiple fragments before compaction
@@ -310,7 +322,10 @@ mod tests {
.unwrap();
let table = conn
.create_table("test_prune", batch)
.create_table(
"test_prune",
RecordBatchIterator::new(vec![Ok(batch)], schema.clone()),
)
.execute()
.await
.unwrap();
@@ -324,7 +339,11 @@ mod tests {
))],
)
.unwrap();
table.add(batch).execute().await.unwrap();
table
.add(RecordBatchIterator::new(vec![Ok(batch)], schema.clone()))
.execute()
.await
.unwrap();
}
// Verify multiple versions exist
@@ -386,7 +405,10 @@ mod tests {
.unwrap();
let table = conn
.create_table("test_index_optimize", batch)
.create_table(
"test_index_optimize",
RecordBatchIterator::new(vec![Ok(batch)], schema.clone()),
)
.execute()
.await
.unwrap();
@@ -404,7 +426,11 @@ mod tests {
vec![Arc::new(Int32Array::from_iter_values(100..200))],
)
.unwrap();
table.add(batch).execute().await.unwrap();
table
.add(RecordBatchIterator::new(vec![Ok(batch)], schema.clone()))
.execute()
.await
.unwrap();
// Verify index stats before optimization
let indices = table.list_indices().await.unwrap();
@@ -448,7 +474,10 @@ mod tests {
.unwrap();
let table = conn
.create_table("test_optimize_all", batch)
.create_table(
"test_optimize_all",
RecordBatchIterator::new(vec![Ok(batch)], schema.clone()),
)
.execute()
.await
.unwrap();
@@ -462,7 +491,11 @@ mod tests {
))],
)
.unwrap();
table.add(batch).execute().await.unwrap();
table
.add(RecordBatchIterator::new(vec![Ok(batch)], schema.clone()))
.execute()
.await
.unwrap();
}
// Run all optimizations
@@ -526,13 +559,20 @@ mod tests {
.unwrap();
let table = conn
.create_table("test_deferred_remap", batch.clone())
.create_table(
"test_deferred_remap",
RecordBatchIterator::new(vec![Ok(batch.clone())], schema.clone()),
)
.execute()
.await
.unwrap();
// Add more data
table.add(batch).execute().await.unwrap();
table
.add(RecordBatchIterator::new(vec![Ok(batch)], schema.clone()))
.execute()
.await
.unwrap();
// Create an index
table
@@ -608,13 +648,20 @@ mod tests {
let original_schema = batch.schema();
let table = conn
.create_table("test_schema_preserved", batch.clone())
.create_table(
"test_schema_preserved",
RecordBatchIterator::new(vec![Ok(batch.clone())], schema.clone()),
)
.execute()
.await
.unwrap();
// Add more data
table.add(batch).execute().await.unwrap();
table
.add(RecordBatchIterator::new(vec![Ok(batch)], schema.clone()))
.execute()
.await
.unwrap();
// Run compaction
table
@@ -656,7 +703,10 @@ mod tests {
.unwrap();
let table = conn
.create_table("test_empty_optimize", batch)
.create_table(
"test_empty_optimize",
RecordBatchIterator::new(vec![Ok(batch)], schema.clone()),
)
.execute()
.await
.unwrap();
@@ -702,12 +752,19 @@ mod tests {
.unwrap();
let table = conn
.create_table("test_checkout_optimize", batch.clone())
.create_table(
"test_checkout_optimize",
RecordBatchIterator::new(vec![Ok(batch.clone())], schema.clone()),
)
.execute()
.await
.unwrap();
table.add(batch).execute().await.unwrap();
table
.add(RecordBatchIterator::new(vec![Ok(batch)], schema.clone()))
.execute()
.await
.unwrap();
table.checkout(1).await.unwrap();

View File

@@ -1,739 +0,0 @@
// SPDX-License-Identifier: Apache-2.0
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
use std::sync::Arc;
use super::NativeTable;
use crate::error::{Error, Result};
use crate::query::{
QueryExecutionOptions, QueryFilter, QueryRequest, Select, VectorQueryRequest, DEFAULT_TOP_K,
};
use crate::utils::{default_vector_column, TimeoutStream};
use arrow::array::{AsArray, FixedSizeListBuilder, Float32Builder};
use arrow::datatypes::{Float32Type, UInt8Type};
use arrow_array::Array;
use arrow_schema::{DataType, Schema};
use datafusion_physical_plan::projection::ProjectionExec;
use datafusion_physical_plan::repartition::RepartitionExec;
use datafusion_physical_plan::union::UnionExec;
use datafusion_physical_plan::ExecutionPlan;
use futures::future::try_join_all;
use lance::dataset::scanner::DatasetRecordBatchStream;
use lance::dataset::scanner::Scanner;
use lance_datafusion::exec::{analyze_plan as lance_analyze_plan, execute_plan};
use lance_namespace::models::{
QueryTableRequest as NsQueryTableRequest, QueryTableRequestColumns,
QueryTableRequestFullTextQuery, QueryTableRequestVector, StringFtsQuery,
};
use lance_namespace::LanceNamespace;
#[derive(Debug, Clone)]
pub enum AnyQuery {
Query(QueryRequest),
VectorQuery(VectorQueryRequest),
}
//Decide between namespace or local
pub async fn execute_query(
table: &NativeTable,
query: &AnyQuery,
options: QueryExecutionOptions,
) -> Result<DatasetRecordBatchStream> {
// If namespace client is configured, use server-side query execution
if let Some(ref namespace_client) = table.namespace_client {
return execute_namespace_query(table, namespace_client.clone(), query, options).await;
}
execute_generic_query(table, query, options).await
}
pub async fn analyze_query_plan(
table: &NativeTable,
query: &AnyQuery,
options: QueryExecutionOptions,
) -> Result<String> {
let plan = create_plan(table, query, options).await?;
Ok(lance_analyze_plan(plan, Default::default()).await?)
}
/// Local Execution Path (DataFusion)
async fn execute_generic_query(
table: &NativeTable,
query: &AnyQuery,
options: QueryExecutionOptions,
) -> Result<DatasetRecordBatchStream> {
let plan = create_plan(table, query, options.clone()).await?;
let inner = execute_plan(plan, Default::default())?;
let inner = if let Some(timeout) = options.timeout {
TimeoutStream::new_boxed(inner, timeout)
} else {
inner
};
Ok(DatasetRecordBatchStream::new(inner))
}
pub async fn create_plan(
table: &NativeTable,
query: &AnyQuery,
options: QueryExecutionOptions,
) -> Result<Arc<dyn ExecutionPlan>> {
let query = match query {
AnyQuery::VectorQuery(query) => query.clone(),
AnyQuery::Query(query) => VectorQueryRequest::from_plain_query(query.clone()),
};
let ds_ref = table.dataset.get().await?;
let schema = ds_ref.schema();
let mut column = query.column.clone();
let mut query_vector = query.query_vector.first().cloned();
if query.query_vector.len() > 1 {
if column.is_none() {
// Infer a vector column with the same dimension of the query vector.
let arrow_schema = Schema::from(ds_ref.schema());
column = Some(default_vector_column(
&arrow_schema,
Some(query.query_vector[0].len() as i32),
)?);
}
let vector_field = schema.field(column.as_ref().unwrap()).unwrap();
if let DataType::List(_) = vector_field.data_type() {
// Multivector handling: concatenate into FixedSizeList<FixedSizeList<_>>
let vectors = query
.query_vector
.iter()
.map(|arr| arr.as_ref())
.collect::<Vec<_>>();
let dim = vectors[0].len();
let mut fsl_builder = FixedSizeListBuilder::with_capacity(
Float32Builder::with_capacity(dim),
dim as i32,
vectors.len(),
);
for vec in vectors {
fsl_builder
.values()
.append_slice(vec.as_primitive::<Float32Type>().values());
fsl_builder.append(true);
}
query_vector = Some(Arc::new(fsl_builder.finish()));
} else {
// Multiple query vectors: create a plan for each and union them
let query_vecs = query.query_vector.clone();
let plan_futures = query_vecs
.into_iter()
.map(|query_vector| {
let mut sub_query = query.clone();
sub_query.query_vector = vec![query_vector];
let options_ref = options.clone();
async move {
create_plan(table, &AnyQuery::VectorQuery(sub_query), options_ref).await
}
})
.collect::<Vec<_>>();
let plans = try_join_all(plan_futures).await?;
return create_multi_vector_plan(plans);
}
}
let mut scanner: Scanner = ds_ref.scan();
if let Some(query_vector) = query_vector {
let column = if let Some(col) = column {
col
} else {
let arrow_schema = Schema::from(ds_ref.schema());
default_vector_column(&arrow_schema, Some(query_vector.len() as i32))?
};
let (_, element_type) = lance::index::vector::utils::get_vector_type(schema, &column)?;
let is_binary = matches!(element_type, DataType::UInt8);
let top_k = query.base.limit.unwrap_or(DEFAULT_TOP_K) + query.base.offset.unwrap_or(0);
if is_binary {
let query_vector = arrow::compute::cast(&query_vector, &DataType::UInt8)?;
let query_vector = query_vector.as_primitive::<UInt8Type>();
scanner.nearest(&column, query_vector, top_k)?;
} else {
scanner.nearest(&column, query_vector.as_ref(), top_k)?;
}
scanner.minimum_nprobes(query.minimum_nprobes);
if let Some(maximum_nprobes) = query.maximum_nprobes {
scanner.maximum_nprobes(maximum_nprobes);
}
}
scanner.limit(
query.base.limit.map(|limit| limit as i64),
query.base.offset.map(|offset| offset as i64),
)?;
if let Some(ef) = query.ef {
scanner.ef(ef);
}
scanner.distance_range(query.lower_bound, query.upper_bound);
scanner.use_index(query.use_index);
scanner.prefilter(query.base.prefilter);
match query.base.select {
Select::Columns(ref columns) => {
scanner.project(columns.as_slice())?;
}
Select::Dynamic(ref select_with_transform) => {
scanner.project_with_transform(select_with_transform.as_slice())?;
}
Select::All => {}
}
if query.base.with_row_id {
scanner.with_row_id();
}
scanner.batch_size(options.max_batch_length as usize);
if query.base.fast_search {
scanner.fast_search();
}
if let Some(filter) = &query.base.filter {
match filter {
QueryFilter::Sql(sql) => {
scanner.filter(sql)?;
}
QueryFilter::Substrait(substrait) => {
scanner.filter_substrait(substrait)?;
}
QueryFilter::Datafusion(expr) => {
scanner.filter_expr(expr.clone());
}
}
}
if let Some(fts) = &query.base.full_text_search {
scanner.full_text_search(fts.clone())?;
}
if let Some(refine_factor) = query.refine_factor {
scanner.refine(refine_factor);
}
if let Some(distance_type) = query.distance_type {
scanner.distance_metric(distance_type.into());
}
if query.base.disable_scoring_autoprojection {
scanner.disable_scoring_autoprojection();
}
Ok(scanner.create_plan().await?)
}
//Helper functions below
// Take many execution plans and map them into a single plan that adds
// a query_index column and unions them.
pub(crate) fn create_multi_vector_plan(
plans: Vec<Arc<dyn ExecutionPlan>>,
) -> Result<Arc<dyn ExecutionPlan>> {
if plans.is_empty() {
return Err(Error::InvalidInput {
message: "No plans provided".to_string(),
});
}
// Projection to keeping all existing columns
let first_plan = plans[0].clone();
let project_all_columns = first_plan
.schema()
.fields()
.iter()
.enumerate()
.map(|(i, field)| {
let expr = datafusion_physical_plan::expressions::Column::new(field.name().as_str(), i);
let expr = Arc::new(expr) as Arc<dyn datafusion_physical_plan::PhysicalExpr>;
(expr, field.name().clone())
})
.collect::<Vec<_>>();
let projected_plans = plans
.into_iter()
.enumerate()
.map(|(plan_i, plan)| {
let query_index = datafusion_common::ScalarValue::Int32(Some(plan_i as i32));
let query_index_expr = datafusion_physical_plan::expressions::Literal::new(query_index);
let query_index_expr =
Arc::new(query_index_expr) as Arc<dyn datafusion_physical_plan::PhysicalExpr>;
let mut projections = vec![(query_index_expr, "query_index".to_string())];
projections.extend_from_slice(&project_all_columns);
let projection = ProjectionExec::try_new(projections, plan).unwrap();
Arc::new(projection) as Arc<dyn datafusion_physical_plan::ExecutionPlan>
})
.collect::<Vec<_>>();
let unioned = UnionExec::try_new(projected_plans).map_err(|err| Error::Runtime {
message: err.to_string(),
})?;
// We require 1 partition in the final output
let repartitioned = RepartitionExec::try_new(
unioned,
datafusion_physical_plan::Partitioning::RoundRobinBatch(1),
)
.unwrap();
Ok(Arc::new(repartitioned))
}
/// Execute a query on the namespace server instead of locally.
async fn execute_namespace_query(
table: &NativeTable,
namespace_client: Arc<dyn LanceNamespace>,
query: &AnyQuery,
_options: QueryExecutionOptions,
) -> Result<DatasetRecordBatchStream> {
// Build table_id from namespace + table name
let mut table_id = table.namespace.clone();
table_id.push(table.name.clone());
// Convert AnyQuery to namespace QueryTableRequest
let mut ns_request = convert_to_namespace_query(query)?;
// Set the table ID on the request
ns_request.id = Some(table_id);
// Call the namespace query_table API
let response_bytes = namespace_client
.query_table(ns_request)
.await
.map_err(|e| Error::Runtime {
message: format!("Failed to execute server-side query: {}", e),
})?;
// Parse the Arrow IPC response into a RecordBatchStream
parse_arrow_ipc_response(response_bytes).await
}
/// Convert an AnyQuery to the namespace QueryTableRequest format.
fn convert_to_namespace_query(query: &AnyQuery) -> Result<NsQueryTableRequest> {
match query {
AnyQuery::VectorQuery(vq) => {
// Extract the query vector(s)
let vector = extract_query_vector(&vq.query_vector)?;
// Convert filter to SQL string
let filter = match &vq.base.filter {
Some(f) => Some(filter_to_sql(f)?),
None => None,
};
// Convert select to columns list
let columns = match &vq.base.select {
Select::All => None,
Select::Columns(cols) => Some(Box::new(QueryTableRequestColumns {
column_names: Some(cols.clone()),
column_aliases: None,
})),
Select::Dynamic(_) => {
return Err(Error::NotSupported {
message:
"Dynamic column selection is not supported for server-side queries"
.to_string(),
});
}
};
// Check for unsupported features
if vq.base.reranker.is_some() {
return Err(Error::NotSupported {
message: "Reranker is not supported for server-side queries".to_string(),
});
}
// Convert FTS query if present
let full_text_query = vq.base.full_text_search.as_ref().map(|fts| {
let columns = fts.columns();
let columns_vec = if columns.is_empty() {
None
} else {
Some(columns.into_iter().collect())
};
Box::new(QueryTableRequestFullTextQuery {
string_query: Some(Box::new(StringFtsQuery {
query: fts.query.to_string(),
columns: columns_vec,
})),
structured_query: None,
})
});
Ok(NsQueryTableRequest {
id: None, // Will be set in namespace_query
k: vq.base.limit.unwrap_or(10) as i32,
vector: Box::new(vector),
vector_column: vq.column.clone(),
filter,
columns,
offset: vq.base.offset.map(|o| o as i32),
distance_type: vq.distance_type.map(|dt| dt.to_string()),
nprobes: Some(vq.minimum_nprobes as i32),
ef: vq.ef.map(|e| e as i32),
refine_factor: vq.refine_factor.map(|r| r as i32),
lower_bound: vq.lower_bound,
upper_bound: vq.upper_bound,
prefilter: Some(vq.base.prefilter),
fast_search: Some(vq.base.fast_search),
with_row_id: Some(vq.base.with_row_id),
bypass_vector_index: Some(!vq.use_index),
full_text_query,
..Default::default()
})
}
AnyQuery::Query(q) => {
// For non-vector queries, pass an empty vector (similar to remote table implementation)
if q.reranker.is_some() {
return Err(Error::NotSupported {
message: "Reranker is not supported for server-side query execution"
.to_string(),
});
}
let filter = q.filter.as_ref().map(filter_to_sql).transpose()?;
let columns = match &q.select {
Select::All => None,
Select::Columns(cols) => Some(Box::new(QueryTableRequestColumns {
column_names: Some(cols.clone()),
column_aliases: None,
})),
Select::Dynamic(_) => {
return Err(Error::NotSupported {
message: "Dynamic columns are not supported for server-side query"
.to_string(),
});
}
};
// Handle full text search if present
let full_text_query = q.full_text_search.as_ref().map(|fts| {
let columns_vec = if fts.columns().is_empty() {
None
} else {
Some(fts.columns().iter().cloned().collect())
};
Box::new(QueryTableRequestFullTextQuery {
string_query: Some(Box::new(StringFtsQuery {
query: fts.query.to_string(),
columns: columns_vec,
})),
structured_query: None,
})
});
// Empty vector for non-vector queries
let vector = Box::new(QueryTableRequestVector {
single_vector: Some(vec![]),
multi_vector: None,
});
Ok(NsQueryTableRequest {
id: None, // Will be set by caller
vector,
k: q.limit.unwrap_or(10) as i32,
filter,
columns,
prefilter: Some(q.prefilter),
offset: q.offset.map(|o| o as i32),
vector_column: None, // No vector column for plain queries
with_row_id: Some(q.with_row_id),
bypass_vector_index: Some(true), // No vector index for plain queries
full_text_query,
..Default::default()
})
}
}
}
fn filter_to_sql(filter: &QueryFilter) -> Result<String> {
match filter {
QueryFilter::Sql(sql) => Ok(sql.clone()),
QueryFilter::Substrait(_) => Err(Error::NotSupported {
message: "Substrait filters are not supported for server-side queries".to_string(),
}),
QueryFilter::Datafusion(_) => Err(Error::NotSupported {
message: "Datafusion expression filters are not supported for server-side queries. Use SQL filter instead.".to_string(),
}),
}
}
/// Extract query vector(s) from Arrow arrays into the namespace format.
fn extract_query_vector(
query_vectors: &[Arc<dyn arrow_array::Array>],
) -> Result<QueryTableRequestVector> {
if query_vectors.is_empty() {
return Err(Error::InvalidInput {
message: "Query vector is required for vector search".to_string(),
});
}
// Handle single vector case
if query_vectors.len() == 1 {
let arr = &query_vectors[0];
let single_vector = array_to_f32_vec(arr)?;
Ok(QueryTableRequestVector {
single_vector: Some(single_vector),
multi_vector: None,
})
} else {
// Handle multi-vector case
let multi_vector: Result<Vec<Vec<f32>>> =
query_vectors.iter().map(array_to_f32_vec).collect();
Ok(QueryTableRequestVector {
single_vector: None,
multi_vector: Some(multi_vector?),
})
}
}
/// Convert an Arrow array to a Vec<f32>.
fn array_to_f32_vec(arr: &Arc<dyn arrow_array::Array>) -> Result<Vec<f32>> {
// Handle FixedSizeList (common for vectors)
if let Some(fsl) = arr
.as_any()
.downcast_ref::<arrow_array::FixedSizeListArray>()
{
let values = fsl.values();
if let Some(f32_arr) = values.as_any().downcast_ref::<arrow_array::Float32Array>() {
return Ok(f32_arr.values().to_vec());
}
}
// Handle direct Float32Array
if let Some(f32_arr) = arr.as_any().downcast_ref::<arrow_array::Float32Array>() {
return Ok(f32_arr.values().to_vec());
}
Err(Error::InvalidInput {
message: "Query vector must be Float32 type".to_string(),
})
}
/// Parse Arrow IPC response from the namespace server.
async fn parse_arrow_ipc_response(bytes: bytes::Bytes) -> Result<DatasetRecordBatchStream> {
use arrow_ipc::reader::StreamReader;
use std::io::Cursor;
let cursor = Cursor::new(bytes);
let reader = StreamReader::try_new(cursor, None).map_err(|e| Error::Runtime {
message: format!("Failed to parse Arrow IPC response: {}", e),
})?;
// Collect all record batches
let schema = reader.schema();
let batches: Vec<_> = reader
.into_iter()
.collect::<std::result::Result<Vec<_>, _>>()
.map_err(|e| Error::Runtime {
message: format!("Failed to read Arrow IPC batches: {}", e),
})?;
// Create a stream from the batches
let stream = futures::stream::iter(batches.into_iter().map(Ok));
let record_batch_stream =
Box::pin(datafusion_physical_plan::stream::RecordBatchStreamAdapter::new(schema, stream));
Ok(DatasetRecordBatchStream::new(record_batch_stream))
}
#[cfg(test)]
#[allow(deprecated)]
mod tests {
use arrow_array::Float32Array;
use futures::TryStreamExt;
use std::sync::Arc;
use super::*;
use crate::query::QueryExecutionOptions;
#[test]
fn test_convert_to_namespace_query_vector() {
let query_vector = Arc::new(Float32Array::from(vec![1.0, 2.0, 3.0, 4.0]));
let vq = VectorQueryRequest {
base: QueryRequest {
limit: Some(10),
offset: Some(5),
filter: Some(QueryFilter::Sql("id > 0".to_string())),
select: Select::Columns(vec!["id".to_string()]),
..Default::default()
},
column: Some("vector".to_string()),
// We cast here to satisfy the struct definition
query_vector: vec![query_vector as Arc<dyn Array>],
minimum_nprobes: 20,
distance_type: Some(crate::DistanceType::L2),
..Default::default()
};
let any_query = AnyQuery::VectorQuery(vq);
let ns_request = convert_to_namespace_query(&any_query).unwrap();
assert_eq!(ns_request.k, 10);
assert_eq!(ns_request.offset, Some(5));
assert_eq!(ns_request.filter, Some("id > 0".to_string()));
assert_eq!(
ns_request
.columns
.as_ref()
.and_then(|c| c.column_names.as_ref()),
Some(&vec!["id".to_string()])
);
assert_eq!(ns_request.vector_column, Some("vector".to_string()));
assert_eq!(ns_request.distance_type, Some("l2".to_string()));
// Verify the vector data was extracted correctly
assert!(ns_request.vector.single_vector.is_some());
assert_eq!(
ns_request.vector.single_vector.as_ref().unwrap(),
&vec![1.0, 2.0, 3.0, 4.0]
);
}
#[test]
fn test_convert_to_namespace_query_plain_query() {
let q = QueryRequest {
limit: Some(20),
offset: Some(5),
filter: Some(QueryFilter::Sql("id > 5".to_string())),
select: Select::Columns(vec!["id".to_string()]),
with_row_id: true,
..Default::default()
};
let any_query = AnyQuery::Query(q);
let ns_request = convert_to_namespace_query(&any_query).unwrap();
assert_eq!(ns_request.k, 20);
assert_eq!(ns_request.offset, Some(5));
assert_eq!(ns_request.filter, Some("id > 5".to_string()));
assert_eq!(
ns_request
.columns
.as_ref()
.and_then(|c| c.column_names.as_ref()),
Some(&vec!["id".to_string()])
);
assert_eq!(ns_request.with_row_id, Some(true));
assert_eq!(ns_request.bypass_vector_index, Some(true));
assert!(ns_request.vector_column.is_none());
assert!(ns_request.vector.single_vector.as_ref().unwrap().is_empty());
}
#[tokio::test]
async fn test_execute_query_local_routing() {
use crate::connect;
use crate::table::query::execute_query;
use arrow_array::{Int32Array, RecordBatch};
use arrow_schema::{DataType, Field, Schema};
let conn = connect("memory://").execute().await.unwrap();
let schema = Arc::new(Schema::new(vec![Field::new("id", DataType::Int32, false)]));
let batch = RecordBatch::try_new(
schema.clone(),
vec![Arc::new(Int32Array::from(vec![1, 2, 3, 4, 5]))],
)
.unwrap();
let table = conn
.create_table("test_routing", vec![batch])
.execute()
.await
.unwrap();
let native_table = table.as_native().unwrap();
// Setup a request
let req = QueryRequest {
filter: Some(QueryFilter::Sql("id > 3".to_string())),
..Default::default()
};
let query = AnyQuery::Query(req);
// Action: Call execute_query directly
// This validates that execute_query correctly routes to the local DataFusion engine
// when table.namespace_client is None.
let stream = execute_query(native_table, &query, QueryExecutionOptions::default())
.await
.unwrap();
// Verify results
let batches = stream.try_collect::<Vec<_>>().await.unwrap();
let count: usize = batches.iter().map(|b| b.num_rows()).sum();
assert_eq!(count, 2); // 4 and 5
}
#[tokio::test]
async fn test_create_plan_multivector_structure() {
use arrow_array::{Float32Array, RecordBatch};
use arrow_schema::{DataType, Field, Schema};
use datafusion_physical_plan::display::DisplayableExecutionPlan;
use crate::table::query::create_plan;
use crate::connect;
let conn = connect("memory://").execute().await.unwrap();
let schema = Arc::new(Schema::new(vec![
Field::new("id", DataType::Int32, false),
Field::new(
"vector",
DataType::FixedSizeList(Arc::new(Field::new("item", DataType::Float32, true)), 2),
false,
),
]));
let batch = RecordBatch::new_empty(schema.clone());
let table = conn
.create_table("test_plan", vec![batch])
.execute()
.await
.unwrap();
let native_table = table.as_native().unwrap();
// This triggers the "create_multi_vector_plan" logic branch
let q1 = Arc::new(Float32Array::from(vec![1.0, 2.0]));
let q2 = Arc::new(Float32Array::from(vec![3.0, 4.0]));
let req = VectorQueryRequest {
column: Some("vector".to_string()),
query_vector: vec![q1, q2],
..Default::default()
};
let query = AnyQuery::VectorQuery(req);
// Create the Plan
let plan = create_plan(native_table, &query, QueryExecutionOptions::default())
.await
.unwrap();
// formatting it allows us to see the hierarchy
let display = DisplayableExecutionPlan::new(plan.as_ref())
.indent(true)
.to_string();
// We expect a RepartitionExec wrapping a UnionExec
assert!(
display.contains("RepartitionExec"),
"Plan should include Repartitioning"
);
assert!(
display.contains("UnionExec"),
"Plan should include a Union of multiple searches"
);
// We expect the projection to add the 'query_index' column (logic inside multi_vector_plan)
assert!(
display.contains("query_index"),
"Plan should add query_index column"
);
}
}

View File

@@ -52,12 +52,11 @@ pub(crate) async fn execute_add_columns(
transforms: NewColumnTransform,
read_columns: Option<Vec<String>>,
) -> Result<AddColumnsResult> {
table.dataset.ensure_mutable()?;
let mut dataset = (*table.dataset.get().await?).clone();
let mut dataset = table.dataset.get_mut().await?;
dataset.add_columns(transforms, read_columns, None).await?;
let version = dataset.version().version;
table.dataset.update(dataset);
Ok(AddColumnsResult { version })
Ok(AddColumnsResult {
version: dataset.version().version,
})
}
/// Internal implementation of the alter columns logic.
@@ -67,12 +66,11 @@ pub(crate) async fn execute_alter_columns(
table: &NativeTable,
alterations: &[ColumnAlteration],
) -> Result<AlterColumnsResult> {
table.dataset.ensure_mutable()?;
let mut dataset = (*table.dataset.get().await?).clone();
let mut dataset = table.dataset.get_mut().await?;
dataset.alter_columns(alterations).await?;
let version = dataset.version().version;
table.dataset.update(dataset);
Ok(AlterColumnsResult { version })
Ok(AlterColumnsResult {
version: dataset.version().version,
})
}
/// Internal implementation of the drop columns logic.
@@ -82,17 +80,16 @@ pub(crate) async fn execute_drop_columns(
table: &NativeTable,
columns: &[&str],
) -> Result<DropColumnsResult> {
table.dataset.ensure_mutable()?;
let mut dataset = (*table.dataset.get().await?).clone();
let mut dataset = table.dataset.get_mut().await?;
dataset.drop_columns(columns).await?;
let version = dataset.version().version;
table.dataset.update(dataset);
Ok(DropColumnsResult { version })
Ok(DropColumnsResult {
version: dataset.version().version,
})
}
#[cfg(test)]
mod tests {
use arrow_array::{record_batch, Int32Array, StringArray};
use arrow_array::{record_batch, Int32Array, RecordBatchIterator, StringArray};
use arrow_schema::DataType;
use futures::TryStreamExt;
use lance::dataset::ColumnAlteration;
@@ -108,9 +105,13 @@ mod tests {
let conn = connect("memory://").execute().await.unwrap();
let batch = record_batch!(("id", Int32, [1, 2, 3, 4, 5])).unwrap();
let schema = batch.schema();
let table = conn
.create_table("test_add_columns", batch)
.create_table(
"test_add_columns",
RecordBatchIterator::new(vec![Ok(batch)], schema),
)
.execute()
.await
.unwrap();
@@ -168,9 +169,13 @@ mod tests {
let conn = connect("memory://").execute().await.unwrap();
let batch = record_batch!(("x", Int32, [10, 20, 30])).unwrap();
let schema = batch.schema();
let table = conn
.create_table("test_add_multi_columns", batch)
.create_table(
"test_add_multi_columns",
RecordBatchIterator::new(vec![Ok(batch)], schema),
)
.execute()
.await
.unwrap();
@@ -200,9 +205,13 @@ mod tests {
let conn = connect("memory://").execute().await.unwrap();
let batch = record_batch!(("id", Int32, [1, 2, 3])).unwrap();
let schema = batch.schema();
let table = conn
.create_table("test_add_const_column", batch)
.create_table(
"test_add_const_column",
RecordBatchIterator::new(vec![Ok(batch)], schema),
)
.execute()
.await
.unwrap();
@@ -246,9 +255,13 @@ mod tests {
let conn = connect("memory://").execute().await.unwrap();
let batch = record_batch!(("old_name", Int32, [1, 2, 3])).unwrap();
let schema = batch.schema();
let table = conn
.create_table("test_alter_rename", batch)
.create_table(
"test_alter_rename",
RecordBatchIterator::new(vec![Ok(batch)], schema),
)
.execute()
.await
.unwrap();
@@ -291,7 +304,10 @@ mod tests {
.unwrap();
let table = conn
.create_table("test_alter_nullable", batch)
.create_table(
"test_alter_nullable",
RecordBatchIterator::new(vec![Ok(batch)], schema),
)
.execute()
.await
.unwrap();
@@ -316,9 +332,13 @@ mod tests {
let conn = connect("memory://").execute().await.unwrap();
let batch = record_batch!(("num", Int32, [1, 2, 3])).unwrap();
let schema = batch.schema();
let table = conn
.create_table("test_cast_type", batch)
.create_table(
"test_cast_type",
RecordBatchIterator::new(vec![Ok(batch)], schema),
)
.execute()
.await
.unwrap();
@@ -359,9 +379,13 @@ mod tests {
let conn = connect("memory://").execute().await.unwrap();
let batch = record_batch!(("num", Int32, [1, 2, 3])).unwrap();
let schema = batch.schema();
let table = conn
.create_table("test_invalid_cast", batch)
.create_table(
"test_invalid_cast",
RecordBatchIterator::new(vec![Ok(batch)], schema),
)
.execute()
.await
.unwrap();
@@ -383,9 +407,13 @@ mod tests {
let conn = connect("memory://").execute().await.unwrap();
let batch = record_batch!(("a", Int32, [1, 2, 3]), ("b", Int32, [4, 5, 6])).unwrap();
let schema = batch.schema();
let table = conn
.create_table("test_alter_multi", batch)
.create_table(
"test_alter_multi",
RecordBatchIterator::new(vec![Ok(batch)], schema),
)
.execute()
.await
.unwrap();
@@ -413,9 +441,13 @@ mod tests {
let batch =
record_batch!(("keep", Int32, [1, 2, 3]), ("remove", Int32, [4, 5, 6])).unwrap();
let schema = batch.schema();
let table = conn
.create_table("test_drop_single", batch)
.create_table(
"test_drop_single",
RecordBatchIterator::new(vec![Ok(batch)], schema),
)
.execute()
.await
.unwrap();
@@ -446,9 +478,13 @@ mod tests {
("d", Int32, [7, 8])
)
.unwrap();
let schema = batch.schema();
let table = conn
.create_table("test_drop_multi", batch)
.create_table(
"test_drop_multi",
RecordBatchIterator::new(vec![Ok(batch)], schema),
)
.execute()
.await
.unwrap();
@@ -475,9 +511,13 @@ mod tests {
("extra", Int32, [10, 20, 30])
)
.unwrap();
let schema = batch.schema();
let table = conn
.create_table("test_drop_preserves", batch)
.create_table(
"test_drop_preserves",
RecordBatchIterator::new(vec![Ok(batch)], schema),
)
.execute()
.await
.unwrap();
@@ -527,9 +567,13 @@ mod tests {
let conn = connect("memory://").execute().await.unwrap();
let batch = record_batch!(("existing", Int32, [1, 2, 3])).unwrap();
let schema = batch.schema();
let table = conn
.create_table("test_drop_nonexistent", batch)
.create_table(
"test_drop_nonexistent",
RecordBatchIterator::new(vec![Ok(batch)], schema),
)
.execute()
.await
.unwrap();
@@ -549,9 +593,13 @@ mod tests {
let conn = connect("memory://").execute().await.unwrap();
let batch = record_batch!(("existing", Int32, [1, 2, 3])).unwrap();
let schema = batch.schema();
let table = conn
.create_table("test_alter_nonexistent", batch)
.create_table(
"test_alter_nonexistent",
RecordBatchIterator::new(vec![Ok(batch)], schema),
)
.execute()
.await
.unwrap();
@@ -575,8 +623,13 @@ mod tests {
let conn = connect("memory://").execute().await.unwrap();
let batch = record_batch!(("a", Int32, [1, 2, 3]), ("b", Int32, [4, 5, 6])).unwrap();
let schema = batch.schema();
let table = conn
.create_table("test_version_increment", batch)
.create_table(
"test_version_increment",
RecordBatchIterator::new(vec![Ok(batch)], schema),
)
.execute()
.await
.unwrap();

View File

@@ -78,13 +78,11 @@ pub(crate) async fn execute_update(
table: &NativeTable,
update: UpdateBuilder,
) -> Result<UpdateResult> {
table.dataset.ensure_mutable()?;
// 1. Snapshot the current dataset
let dataset = table.dataset.get().await?;
let dataset = table.dataset.get().await?.clone();
// 2. Initialize the Lance Core builder
let mut builder = LanceUpdateBuilder::new(dataset);
let mut builder = LanceUpdateBuilder::new(Arc::new(dataset));
// 3. Apply the filter (WHERE clause)
if let Some(predicate) = update.filter {
@@ -101,7 +99,10 @@ pub(crate) async fn execute_update(
let res = operation.execute().await?;
// 6. Update the table's view of the latest version
table.dataset.update(res.new_dataset.as_ref().clone());
table
.dataset
.set_latest(res.new_dataset.as_ref().clone())
.await;
Ok(UpdateResult {
rows_updated: res.rows_updated,
@@ -116,8 +117,9 @@ mod tests {
use crate::query::{ExecutableQuery, Select};
use arrow_array::{
record_batch, Array, BooleanArray, Date32Array, FixedSizeListArray, Float32Array,
Float64Array, Int32Array, Int64Array, LargeStringArray, RecordBatch, StringArray,
TimestampMillisecondArray, TimestampNanosecondArray, UInt32Array,
Float64Array, Int32Array, Int64Array, LargeStringArray, RecordBatch, RecordBatchIterator,
RecordBatchReader, StringArray, TimestampMillisecondArray, TimestampNanosecondArray,
UInt32Array,
};
use arrow_data::ArrayDataBuilder;
use arrow_schema::{ArrowError, DataType, Field, Schema, TimeUnit};
@@ -165,46 +167,51 @@ mod tests {
),
]));
let batch = RecordBatch::try_new(
let record_batch_iter = RecordBatchIterator::new(
vec![RecordBatch::try_new(
schema.clone(),
vec![
Arc::new(Int32Array::from_iter_values(0..10)),
Arc::new(Int64Array::from_iter_values(0..10)),
Arc::new(UInt32Array::from_iter_values(0..10)),
Arc::new(StringArray::from_iter_values(vec![
"a", "b", "c", "d", "e", "f", "g", "h", "i", "j",
])),
Arc::new(LargeStringArray::from_iter_values(vec![
"a", "b", "c", "d", "e", "f", "g", "h", "i", "j",
])),
Arc::new(Float32Array::from_iter_values((0..10).map(|i| i as f32))),
Arc::new(Float64Array::from_iter_values((0..10).map(|i| i as f64))),
Arc::new(Into::<BooleanArray>::into(vec![
true, false, true, false, true, false, true, false, true, false,
])),
Arc::new(Date32Array::from_iter_values(0..10)),
Arc::new(TimestampNanosecondArray::from_iter_values(0..10)),
Arc::new(TimestampMillisecondArray::from_iter_values(0..10)),
Arc::new(
create_fixed_size_list(
Float32Array::from_iter_values((0..20).map(|i| i as f32)),
2,
)
.unwrap(),
),
Arc::new(
create_fixed_size_list(
Float64Array::from_iter_values((0..20).map(|i| i as f64)),
2,
)
.unwrap(),
),
],
)
.unwrap()]
.into_iter()
.map(Ok),
schema.clone(),
vec![
Arc::new(Int32Array::from_iter_values(0..10)),
Arc::new(Int64Array::from_iter_values(0..10)),
Arc::new(UInt32Array::from_iter_values(0..10)),
Arc::new(StringArray::from_iter_values(vec![
"a", "b", "c", "d", "e", "f", "g", "h", "i", "j",
])),
Arc::new(LargeStringArray::from_iter_values(vec![
"a", "b", "c", "d", "e", "f", "g", "h", "i", "j",
])),
Arc::new(Float32Array::from_iter_values((0..10).map(|i| i as f32))),
Arc::new(Float64Array::from_iter_values((0..10).map(|i| i as f64))),
Arc::new(Into::<BooleanArray>::into(vec![
true, false, true, false, true, false, true, false, true, false,
])),
Arc::new(Date32Array::from_iter_values(0..10)),
Arc::new(TimestampNanosecondArray::from_iter_values(0..10)),
Arc::new(TimestampMillisecondArray::from_iter_values(0..10)),
Arc::new(
create_fixed_size_list(
Float32Array::from_iter_values((0..20).map(|i| i as f32)),
2,
)
.unwrap(),
),
Arc::new(
create_fixed_size_list(
Float64Array::from_iter_values((0..20).map(|i| i as f64)),
2,
)
.unwrap(),
),
],
)
.unwrap();
);
let table = conn
.create_table("my_table", batch)
.create_table("my_table", record_batch_iter)
.execute()
.await
.unwrap();
@@ -331,13 +338,15 @@ mod tests {
Ok(FixedSizeListArray::from(data))
}
fn make_test_batch() -> RecordBatch {
fn make_test_batches() -> impl RecordBatchReader + Send + Sync + 'static {
let schema = Arc::new(Schema::new(vec![Field::new("i", DataType::Int32, false)]));
RecordBatch::try_new(
schema.clone(),
vec![Arc::new(Int32Array::from_iter_values(0..10))],
RecordBatchIterator::new(
vec![RecordBatch::try_new(
schema.clone(),
vec![Arc::new(Int32Array::from_iter_values(0..10))],
)],
schema,
)
.unwrap()
}
#[tokio::test]
@@ -358,8 +367,12 @@ mod tests {
)
.unwrap();
let schema = batch.schema();
// need the iterator for create table
let record_batch_iter = RecordBatchIterator::new(vec![Ok(batch)], schema);
let table = conn
.create_table("my_table", batch)
.create_table("my_table", record_batch_iter)
.execute()
.await
.unwrap();
@@ -417,7 +430,7 @@ mod tests {
.await
.unwrap();
let tbl = conn
.create_table("my_table", make_test_batch())
.create_table("my_table", make_test_batches())
.execute()
.await
.unwrap();

View File

@@ -3,4 +3,3 @@
pub mod connection;
pub mod datagen;
pub mod embeddings;

View File

@@ -34,7 +34,10 @@ impl LanceDbDatagenExt for BatchGeneratorBuilder {
schema,
));
let db = connect("memory:///").execute().await.unwrap();
db.create_table(table_name, stream).execute().await.unwrap()
db.create_table_streaming(table_name, stream)
.execute()
.await
.unwrap()
}
}
@@ -45,5 +48,8 @@ pub async fn virtual_table(name: &str, values: &RecordBatch) -> Table {
schema,
));
let db = connect("memory:///").execute().await.unwrap();
db.create_table(name, stream).execute().await.unwrap()
db.create_table_streaming(name, stream)
.execute()
.await
.unwrap()
}

View File

@@ -1,59 +0,0 @@
// SPDX-License-Identifier: Apache-2.0
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
use std::{borrow::Cow, sync::Arc};
use arrow_array::{Array, FixedSizeListArray, Float32Array};
use arrow_schema::{DataType, Field};
use crate::embeddings::EmbeddingFunction;
use crate::Result;
#[derive(Debug, Clone)]
pub struct MockEmbed {
name: String,
dim: usize,
}
impl MockEmbed {
pub fn new(name: impl Into<String>, dim: usize) -> Self {
Self {
name: name.into(),
dim,
}
}
}
impl EmbeddingFunction for MockEmbed {
fn name(&self) -> &str {
&self.name
}
fn source_type(&self) -> Result<Cow<'_, DataType>> {
Ok(Cow::Borrowed(&DataType::Utf8))
}
fn dest_type(&self) -> Result<Cow<'_, DataType>> {
Ok(Cow::Owned(DataType::new_fixed_size_list(
DataType::Float32,
self.dim as _,
true,
)))
}
fn compute_source_embeddings(&self, source: Arc<dyn Array>) -> Result<Arc<dyn Array>> {
// We can't use the FixedSizeListBuilder here because it always adds a null bitmap
// and we want to explicitly work with non-nullable arrays.
let len = source.len();
let inner = Arc::new(Float32Array::from(vec![Some(1.0); len * self.dim]));
let field = Field::new("item", inner.data_type().clone(), false);
let arr = FixedSizeListArray::new(Arc::new(field), self.dim as _, inner, None);
Ok(Arc::new(arr))
}
#[allow(unused_variables)]
fn compute_query_embeddings(&self, input: Arc<dyn Array>) -> Result<Arc<dyn Array>> {
todo!()
}
}

View File

@@ -1,8 +1,6 @@
// SPDX-License-Identifier: Apache-2.0
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
pub(crate) mod background_cache;
use std::sync::Arc;
use arrow_array::RecordBatch;

View File

@@ -1,593 +0,0 @@
// SPDX-License-Identifier: Apache-2.0
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
//! A cache that refreshes values in the background before they expire.
//!
//! See [`BackgroundCache`] for details.
use std::future::Future;
use std::sync::{Arc, Mutex};
use std::time::Duration;
use futures::future::{BoxFuture, Shared};
use futures::FutureExt;
type SharedFut<V, E> = Shared<BoxFuture<'static, Result<V, Arc<E>>>>;
enum State<V, E> {
Empty,
Current(V, clock::Instant),
Refreshing {
previous: Option<(V, clock::Instant)>,
future: SharedFut<V, E>,
},
}
impl<V: Clone, E> State<V, E> {
fn fresh_value(&self, ttl: Duration, refresh_window: Duration) -> Option<V> {
let fresh_threshold = ttl - refresh_window;
match self {
Self::Current(value, cached_at) => {
if clock::now().duration_since(*cached_at) < fresh_threshold {
Some(value.clone())
} else {
None
}
}
Self::Refreshing {
previous: Some((value, cached_at)),
..
} => {
if clock::now().duration_since(*cached_at) < fresh_threshold {
Some(value.clone())
} else {
None
}
}
_ => None,
}
}
}
struct CacheInner<V, E> {
state: State<V, E>,
/// Incremented on invalidation. Background fetches check this to avoid
/// overwriting with stale data after a concurrent invalidation.
generation: u64,
}
enum Action<V, E> {
Return(V),
Wait(SharedFut<V, E>),
}
/// A cache that refreshes values in the background before they expire.
///
/// The cache has three states:
/// - **Empty**: No cached value. The next [`get()`](Self::get) blocks until a fetch completes.
/// - **Current**: A valid cached value with a timestamp. Returns immediately if fresh.
/// - **Refreshing**: A fetch is in progress. Returns the previous value if still valid,
/// otherwise blocks until the fetch completes.
///
/// When the cached value enters the refresh window (close to TTL expiry),
/// [`get()`](Self::get) starts a background fetch and returns the current value
/// immediately. Multiple concurrent callers share a single in-flight fetch.
pub struct BackgroundCache<V, E> {
inner: Arc<Mutex<CacheInner<V, E>>>,
ttl: Duration,
refresh_window: Duration,
}
impl<V, E> std::fmt::Debug for BackgroundCache<V, E> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("BackgroundCache")
.field("ttl", &self.ttl)
.field("refresh_window", &self.refresh_window)
.finish_non_exhaustive()
}
}
impl<V, E> Clone for BackgroundCache<V, E> {
fn clone(&self) -> Self {
Self {
inner: self.inner.clone(),
ttl: self.ttl,
refresh_window: self.refresh_window,
}
}
}
impl<V, E> BackgroundCache<V, E>
where
V: Clone + Send + Sync + 'static,
E: Send + Sync + 'static,
{
pub fn new(ttl: Duration, refresh_window: Duration) -> Self {
assert!(
refresh_window < ttl,
"refresh_window ({refresh_window:?}) must be less than ttl ({ttl:?})"
);
Self {
inner: Arc::new(Mutex::new(CacheInner {
state: State::Empty,
generation: 0,
})),
ttl,
refresh_window,
}
}
/// Returns the cached value if it's fresh (not in the refresh window).
///
/// This is a cheap synchronous check useful as a fast path before
/// constructing a fetch closure for [`get()`](Self::get).
pub fn try_get(&self) -> Option<V> {
let cache = self.inner.lock().unwrap();
cache.state.fresh_value(self.ttl, self.refresh_window)
}
/// Get the cached value, fetching if needed.
///
/// The closure is called to create the fetch future only when a new fetch
/// is needed. If the cache already has an in-flight fetch, the closure is
/// not called and the caller joins the existing fetch.
pub async fn get<F, Fut>(&self, fetch: F) -> Result<V, Arc<E>>
where
F: FnOnce() -> Fut + Send + 'static,
Fut: Future<Output = Result<V, E>> + Send + 'static,
{
// Fast path: check if cache is fresh
{
let cache = self.inner.lock().unwrap();
if let Some(value) = cache.state.fresh_value(self.ttl, self.refresh_window) {
return Ok(value);
}
}
// Slow path
let mut fetch = Some(fetch);
let action = {
let mut cache = self.inner.lock().unwrap();
self.determine_action(&mut cache, &mut fetch)
};
match action {
Action::Return(value) => Ok(value),
Action::Wait(fut) => fut.await,
}
}
/// Pre-populate the cache with an initial value.
///
/// This avoids a blocking fetch on the first [`get()`](Self::get) call.
pub fn seed(&self, value: V) {
let mut cache = self.inner.lock().unwrap();
cache.state = State::Current(value, clock::now());
}
/// Invalidate the cache. The next [`get()`](Self::get) will start a fresh fetch.
///
/// Any in-flight background fetch from before this call will not update the
/// cache (the generation counter prevents stale writes).
pub fn invalidate(&self) {
let mut cache = self.inner.lock().unwrap();
cache.state = State::Empty;
cache.generation += 1;
}
fn determine_action<F, Fut>(
&self,
cache: &mut CacheInner<V, E>,
fetch: &mut Option<F>,
) -> Action<V, E>
where
F: FnOnce() -> Fut + Send + 'static,
Fut: Future<Output = Result<V, E>> + Send + 'static,
{
match &cache.state {
State::Empty => {
let f = fetch
.take()
.expect("fetch closure required for empty cache");
let shared = self.start_fetch(cache, f, None);
Action::Wait(shared)
}
State::Current(value, cached_at) => {
let elapsed = clock::now().duration_since(*cached_at);
if elapsed < self.ttl - self.refresh_window {
Action::Return(value.clone())
} else if elapsed < self.ttl {
// In refresh window: start background fetch, return current value
let value = value.clone();
let previous = Some((value.clone(), *cached_at));
if let Some(f) = fetch.take() {
// The spawned task inside start_fetch drives the future;
// we don't need to await the returned handle here.
drop(self.start_fetch(cache, f, previous));
}
Action::Return(value)
} else {
// Expired: must wait for fetch
let previous = Some((value.clone(), *cached_at));
let f = fetch
.take()
.expect("fetch closure required for expired cache");
let shared = self.start_fetch(cache, f, previous);
Action::Wait(shared)
}
}
State::Refreshing { previous, future } => {
// If the background fetch already completed (spawned task hasn't
// run yet to update state), transition the state and re-evaluate.
if let Some(result) = future.peek() {
match result {
Ok(value) => {
cache.state = State::Current(value.clone(), clock::now());
}
Err(_) => {
cache.state = match previous.clone() {
Some((v, t)) => State::Current(v, t),
None => State::Empty,
};
}
}
return self.determine_action(cache, fetch);
}
if let Some((value, cached_at)) = previous {
if clock::now().duration_since(*cached_at) < self.ttl {
Action::Return(value.clone())
} else {
Action::Wait(future.clone())
}
} else {
Action::Wait(future.clone())
}
}
}
}
fn start_fetch<F, Fut>(
&self,
cache: &mut CacheInner<V, E>,
fetch: F,
previous: Option<(V, clock::Instant)>,
) -> SharedFut<V, E>
where
F: FnOnce() -> Fut + Send + 'static,
Fut: Future<Output = Result<V, E>> + Send + 'static,
{
let generation = cache.generation;
let shared = async move { (fetch)().await.map_err(Arc::new) }
.boxed()
.shared();
// Spawn task to eagerly drive the future and update state on completion
let inner = self.inner.clone();
let fut_for_spawn = shared.clone();
tokio::spawn(async move {
let result = fut_for_spawn.await;
let mut cache = inner.lock().unwrap();
// Only update if no invalidation has happened since we started
if cache.generation != generation {
return;
}
match result {
Ok(value) => {
cache.state = State::Current(value, clock::now());
}
Err(_) => {
let prev = match &cache.state {
State::Refreshing { previous, .. } => previous.clone(),
_ => None,
};
cache.state = match prev {
Some((v, t)) => State::Current(v, t),
None => State::Empty,
};
}
}
});
cache.state = State::Refreshing {
previous,
future: shared.clone(),
};
shared
}
}
#[cfg(test)]
pub mod clock {
use std::cell::Cell;
use std::time::Duration;
// Re-export Instant so callers use the same type
pub use std::time::Instant;
thread_local! {
static MOCK_NOW: Cell<Option<Instant>> = const { Cell::new(None) };
}
pub fn now() -> Instant {
MOCK_NOW.with(|mock| mock.get().unwrap_or_else(Instant::now))
}
pub fn advance_by(duration: Duration) {
MOCK_NOW.with(|mock| {
let current = mock.get().unwrap_or_else(Instant::now);
mock.set(Some(current + duration));
});
}
#[allow(dead_code)]
pub fn clear_mock() {
MOCK_NOW.with(|mock| mock.set(None));
}
}
#[cfg(not(test))]
mod clock {
// Re-export Instant so callers use the same type
pub use std::time::Instant;
pub fn now() -> Instant {
Instant::now()
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::sync::atomic::{AtomicUsize, Ordering};
#[derive(Debug)]
struct TestError(String);
impl std::fmt::Display for TestError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.0)
}
}
const TEST_TTL: Duration = Duration::from_secs(30);
const TEST_REFRESH_WINDOW: Duration = Duration::from_secs(5);
fn new_cache() -> BackgroundCache<String, TestError> {
BackgroundCache::new(TEST_TTL, TEST_REFRESH_WINDOW)
}
fn ok_fetcher(
counter: Arc<AtomicUsize>,
value: &str,
) -> impl FnOnce() -> BoxFuture<'static, Result<String, TestError>> + Send + 'static {
let value = value.to_string();
move || {
counter.fetch_add(1, Ordering::SeqCst);
async move { Ok(value) }.boxed()
}
}
fn err_fetcher(
counter: Arc<AtomicUsize>,
msg: &str,
) -> impl FnOnce() -> BoxFuture<'static, Result<String, TestError>> + Send + 'static {
let msg = msg.to_string();
move || {
counter.fetch_add(1, Ordering::SeqCst);
async move { Err(TestError(msg)) }.boxed()
}
}
#[tokio::test]
async fn test_basic_caching() {
let cache = new_cache();
let count = Arc::new(AtomicUsize::new(0));
let v1 = cache.get(ok_fetcher(count.clone(), "hello")).await.unwrap();
assert_eq!(v1, "hello");
assert_eq!(count.load(Ordering::SeqCst), 1);
// Second call triggers peek transition to Current, returns cached
let v2 = cache.get(ok_fetcher(count.clone(), "hello")).await.unwrap();
assert_eq!(v2, "hello");
assert_eq!(count.load(Ordering::SeqCst), 1);
// Third call still cached
let v3 = cache.get(ok_fetcher(count.clone(), "hello")).await.unwrap();
assert_eq!(v3, "hello");
assert_eq!(count.load(Ordering::SeqCst), 1);
}
#[tokio::test]
async fn test_try_get_returns_none_when_empty() {
let cache: BackgroundCache<String, TestError> = new_cache();
assert!(cache.try_get().is_none());
}
#[tokio::test]
async fn test_try_get_returns_value_when_fresh() {
let cache = new_cache();
let count = Arc::new(AtomicUsize::new(0));
cache.get(ok_fetcher(count.clone(), "hello")).await.unwrap();
// Peek transition
cache.get(ok_fetcher(count.clone(), "hello")).await.unwrap();
assert_eq!(cache.try_get().unwrap(), "hello");
}
#[tokio::test]
async fn test_try_get_returns_none_in_refresh_window() {
let cache = new_cache();
let count = Arc::new(AtomicUsize::new(0));
cache.get(ok_fetcher(count.clone(), "hello")).await.unwrap();
cache.get(ok_fetcher(count.clone(), "hello")).await.unwrap(); // peek
clock::advance_by(Duration::from_secs(26));
assert!(cache.try_get().is_none());
}
#[tokio::test]
async fn test_ttl_expiration() {
let cache = new_cache();
let count = Arc::new(AtomicUsize::new(0));
cache.get(ok_fetcher(count.clone(), "v1")).await.unwrap();
cache.get(ok_fetcher(count.clone(), "v1")).await.unwrap(); // peek
assert_eq!(count.load(Ordering::SeqCst), 1);
clock::advance_by(Duration::from_secs(31));
let v = cache.get(ok_fetcher(count.clone(), "v2")).await.unwrap();
assert_eq!(v, "v2");
assert_eq!(count.load(Ordering::SeqCst), 2);
}
#[tokio::test]
async fn test_invalidate_forces_refetch() {
let cache = new_cache();
let count = Arc::new(AtomicUsize::new(0));
cache.get(ok_fetcher(count.clone(), "v1")).await.unwrap();
cache.get(ok_fetcher(count.clone(), "v1")).await.unwrap(); // peek
assert_eq!(count.load(Ordering::SeqCst), 1);
cache.invalidate();
let v = cache.get(ok_fetcher(count.clone(), "v2")).await.unwrap();
assert_eq!(v, "v2");
assert_eq!(count.load(Ordering::SeqCst), 2);
}
#[tokio::test]
async fn test_concurrent_get_single_fetch() {
let cache = Arc::new(new_cache());
let count = Arc::new(AtomicUsize::new(0));
let mut handles = Vec::new();
for _ in 0..10 {
let cache = cache.clone();
let count = count.clone();
handles.push(tokio::spawn(async move {
cache.get(ok_fetcher(count, "hello")).await.unwrap()
}));
}
let results: Vec<String> = futures::future::try_join_all(handles).await.unwrap();
for r in &results {
assert_eq!(r, "hello");
}
assert_eq!(count.load(Ordering::SeqCst), 1);
}
#[tokio::test]
async fn test_background_refresh_in_window() {
let cache = new_cache();
let count = Arc::new(AtomicUsize::new(0));
// Populate and transition to Current
cache.get(ok_fetcher(count.clone(), "v1")).await.unwrap();
cache.get(ok_fetcher(count.clone(), "v1")).await.unwrap(); // peek
assert_eq!(count.load(Ordering::SeqCst), 1);
// Move into refresh window
clock::advance_by(Duration::from_secs(26));
// Returns cached value and starts background fetch
let v = cache.get(ok_fetcher(count.clone(), "v2")).await.unwrap();
assert_eq!(v, "v1"); // Still old value
assert_eq!(count.load(Ordering::SeqCst), 1); // bg task hasn't run yet
// Advance past TTL to force waiting on the shared future
clock::advance_by(Duration::from_secs(30));
let v = cache.get(ok_fetcher(count.clone(), "v3")).await.unwrap();
assert_eq!(count.load(Ordering::SeqCst), 2);
assert_eq!(v, "v2"); // Got the bg refresh result
}
#[tokio::test]
async fn test_no_duplicate_background_refreshes() {
let cache = new_cache();
let count = Arc::new(AtomicUsize::new(0));
// Populate and transition to Current
cache.get(ok_fetcher(count.clone(), "v1")).await.unwrap();
cache.get(ok_fetcher(count.clone(), "v1")).await.unwrap(); // peek
assert_eq!(count.load(Ordering::SeqCst), 1);
// Move into refresh window
clock::advance_by(Duration::from_secs(26));
// Multiple calls should all return cached, only one bg fetch
for _ in 0..5 {
let v = cache.get(ok_fetcher(count.clone(), "v2")).await.unwrap();
assert_eq!(v, "v1");
}
// Drive the shared future to completion
clock::advance_by(Duration::from_secs(30));
cache.get(ok_fetcher(count.clone(), "v3")).await.unwrap();
// Only 1 additional fetch (the background refresh)
assert_eq!(count.load(Ordering::SeqCst), 2);
}
#[tokio::test]
async fn test_background_refresh_error_preserves_cache() {
let cache = new_cache();
let count = Arc::new(AtomicUsize::new(0));
// Populate and transition to Current
cache.get(ok_fetcher(count.clone(), "v1")).await.unwrap();
cache.get(ok_fetcher(count.clone(), "v1")).await.unwrap(); // peek
assert_eq!(count.load(Ordering::SeqCst), 1);
// Move into refresh window
clock::advance_by(Duration::from_secs(26));
// Start bg refresh that will fail, returns cached value
let v = cache.get(err_fetcher(count.clone(), "fail")).await.unwrap();
assert_eq!(v, "v1");
// Still in refresh window, previous is valid
let v = cache.get(err_fetcher(count.clone(), "fail")).await.unwrap();
assert_eq!(v, "v1");
// Advance past TTL to drive the failed future
clock::advance_by(Duration::from_secs(30));
// The peek error path restores previous, but it's expired,
// so a new fetch is needed. This one also fails.
let result = cache.get(err_fetcher(count.clone(), "fail again")).await;
assert!(result.is_err());
assert_eq!(count.load(Ordering::SeqCst), 2);
}
#[tokio::test]
async fn test_invalidation_during_fetch_prevents_stale_update() {
let cache = new_cache();
let count = Arc::new(AtomicUsize::new(0));
// Populate and transition to Current
cache.get(ok_fetcher(count.clone(), "v1")).await.unwrap();
cache.get(ok_fetcher(count.clone(), "v1")).await.unwrap(); // peek
// Move into refresh window to start background fetch
clock::advance_by(Duration::from_secs(26));
cache.get(ok_fetcher(count.clone(), "stale")).await.unwrap();
// Invalidate before bg task completes
cache.invalidate();
// Advance past TTL
clock::advance_by(Duration::from_secs(30));
// Should get fresh data, not the stale background result
let v = cache.get(ok_fetcher(count.clone(), "fresh")).await.unwrap();
assert_eq!(v, "fresh");
}
}

View File

@@ -15,6 +15,7 @@ use arrow_array::{
use arrow_schema::{DataType, Field, Schema};
use futures::StreamExt;
use lancedb::{
arrow::IntoArrow,
connect,
embeddings::{EmbeddingDefinition, EmbeddingFunction, EmbeddingRegistry},
query::ExecutableQuery,
@@ -252,7 +253,7 @@ async fn test_no_func_in_registry_on_add() -> Result<()> {
Ok(())
}
fn create_some_records() -> Result<Box<dyn arrow_array::RecordBatchReader + Send>> {
fn create_some_records() -> Result<impl IntoArrow> {
const TOTAL: usize = 2;
let schema = Arc::new(Schema::new(vec![

View File

@@ -4,7 +4,7 @@
#![cfg(feature = "s3-test")]
use std::sync::Arc;
use arrow_array::{Int32Array, RecordBatch, StringArray};
use arrow_array::{Int32Array, RecordBatch, RecordBatchIterator, StringArray};
use arrow_schema::{DataType, Field, Schema};
use aws_config::{BehaviorVersion, ConfigLoader, Region, SdkConfig};
@@ -111,6 +111,7 @@ async fn test_minio_lifecycle() -> Result<()> {
.await?;
let data = test_data();
let data = RecordBatchIterator::new(vec![Ok(data.clone())], data.schema());
let table = db.create_table("test_table", data).execute().await?;
@@ -126,6 +127,7 @@ async fn test_minio_lifecycle() -> Result<()> {
assert_eq!(row_count, 3);
let data = test_data();
let data = RecordBatchIterator::new(vec![Ok(data.clone())], data.schema());
table.add(data).execute().await?;
db.drop_table("test_table", &[]).await?;
@@ -245,6 +247,7 @@ async fn test_encryption() -> Result<()> {
// Create a table with encryption
let data = test_data();
let data = RecordBatchIterator::new(vec![Ok(data.clone())], data.schema());
let mut builder = db.create_table("test_table", data);
for (key, value) in CONFIG {
@@ -271,6 +274,7 @@ async fn test_encryption() -> Result<()> {
let table = db.open_table("test_table").execute().await?;
let data = test_data();
let data = RecordBatchIterator::new(vec![Ok(data.clone())], data.schema());
table.add(data).execute().await?;
validate_objects_encrypted(&bucket.0, "test_table", &key.0).await;
@@ -296,6 +300,7 @@ async fn test_table_storage_options_override() -> Result<()> {
// Create table overriding with key2 encryption
let data = test_data();
let data = RecordBatchIterator::new(vec![Ok(data.clone())], data.schema());
let _table = db
.create_table("test_override", data)
.storage_option("aws_sse_kms_key_id", &key2.0)
@@ -307,6 +312,7 @@ async fn test_table_storage_options_override() -> Result<()> {
// Also test that a table created without override uses connection settings
let data = test_data();
let data = RecordBatchIterator::new(vec![Ok(data.clone())], data.schema());
let _table2 = db.create_table("test_inherit", data).execute().await?;
// Verify this table uses key1 from connection
@@ -413,6 +419,7 @@ async fn test_concurrent_dynamodb_commit() {
.unwrap();
let data = test_data();
let data = RecordBatchIterator::new(vec![Ok(data.clone())], data.schema());
let table = db.create_table("test_table", data).execute().await.unwrap();
@@ -423,6 +430,7 @@ async fn test_concurrent_dynamodb_commit() {
let table = db.open_table("test_table").execute().await.unwrap();
let data = data.clone();
tasks.push(tokio::spawn(async move {
let data = RecordBatchIterator::new(vec![Ok(data.clone())], data.schema());
table.add(data).execute().await.unwrap();
}));
}