mirror of
https://github.com/lancedb/lancedb.git
synced 2026-03-28 03:20:39 +00:00
Compare commits
1 Commits
python-v0.
...
codex/upda
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
237c0ae572 |
173
.github/workflows/codex-fix-ci.yml
vendored
173
.github/workflows/codex-fix-ci.yml
vendored
@@ -1,173 +0,0 @@
|
||||
name: Codex Fix CI
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
workflow_run_url:
|
||||
description: "Failing CI workflow run URL (e.g., https://github.com/lancedb/lancedb/actions/runs/12345678)"
|
||||
required: true
|
||||
type: string
|
||||
branch:
|
||||
description: "Branch to fix (e.g., main, release/v2.0, or feature-branch)"
|
||||
required: true
|
||||
type: string
|
||||
guidelines:
|
||||
description: "Additional guidelines for the fix (optional)"
|
||||
required: false
|
||||
type: string
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
actions: read
|
||||
|
||||
jobs:
|
||||
fix-ci:
|
||||
runs-on: warp-ubuntu-latest-x64-4x
|
||||
timeout-minutes: 60
|
||||
env:
|
||||
CC: clang
|
||||
CXX: clang++
|
||||
steps:
|
||||
- name: Show inputs
|
||||
run: |
|
||||
echo "workflow_run_url = ${{ inputs.workflow_run_url }}"
|
||||
echo "branch = ${{ inputs.branch }}"
|
||||
echo "guidelines = ${{ inputs.guidelines }}"
|
||||
|
||||
- name: Checkout Repo
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.branch }}
|
||||
fetch-depth: 0
|
||||
persist-credentials: true
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
|
||||
- name: Install Codex CLI
|
||||
run: npm install -g @openai/codex
|
||||
|
||||
- name: Install Rust toolchain
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
with:
|
||||
toolchain: stable
|
||||
components: clippy, rustfmt
|
||||
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- name: Install system dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y protobuf-compiler libssl-dev
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: |
|
||||
pip install maturin ruff pytest pyarrow pandas polars
|
||||
|
||||
- name: Set up Java
|
||||
uses: actions/setup-java@v4
|
||||
with:
|
||||
distribution: temurin
|
||||
java-version: '11'
|
||||
cache: maven
|
||||
|
||||
- name: Install Node.js dependencies for TypeScript bindings
|
||||
run: |
|
||||
cd nodejs
|
||||
npm ci
|
||||
|
||||
- name: Configure git user
|
||||
run: |
|
||||
git config user.name "lancedb automation"
|
||||
git config user.email "robot@lancedb.com"
|
||||
|
||||
- name: Run Codex to fix CI failure
|
||||
env:
|
||||
WORKFLOW_RUN_URL: ${{ inputs.workflow_run_url }}
|
||||
BRANCH: ${{ inputs.branch }}
|
||||
GUIDELINES: ${{ inputs.guidelines }}
|
||||
GITHUB_TOKEN: ${{ secrets.ROBOT_TOKEN }}
|
||||
GH_TOKEN: ${{ secrets.ROBOT_TOKEN }}
|
||||
OPENAI_API_KEY: ${{ secrets.CODEX_TOKEN }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
cat <<EOF >/tmp/codex-prompt.txt
|
||||
You are running inside the lancedb repository on a GitHub Actions runner. Your task is to fix a CI failure.
|
||||
|
||||
Input parameters:
|
||||
- Failing workflow run URL: ${WORKFLOW_RUN_URL}
|
||||
- Branch to fix: ${BRANCH}
|
||||
- Additional guidelines: ${GUIDELINES:-"None provided"}
|
||||
|
||||
Follow these steps exactly:
|
||||
|
||||
1. Extract the run ID from the workflow URL. The URL format is https://github.com/lancedb/lancedb/actions/runs/<run_id>.
|
||||
|
||||
2. Use "gh run view <run_id> --json jobs,conclusion,name" to get information about the failed run.
|
||||
|
||||
3. Identify which jobs failed. For each failed job, use "gh run view <run_id> --job <job_id> --log-failed" to get the failure logs.
|
||||
|
||||
4. Analyze the failure logs to understand what went wrong. Common failures include:
|
||||
- Compilation errors
|
||||
- Test failures
|
||||
- Clippy warnings treated as errors
|
||||
- Formatting issues
|
||||
- Dependency issues
|
||||
|
||||
5. Based on the analysis, fix the issues in the codebase:
|
||||
- For compilation errors: Fix the code that doesn't compile
|
||||
- For test failures: Fix the failing tests or the code they test
|
||||
- For clippy warnings: Apply the suggested fixes
|
||||
- For formatting issues: Run "cargo fmt --all"
|
||||
- For other issues: Apply appropriate fixes
|
||||
|
||||
6. After making fixes, verify them locally:
|
||||
- Run "cargo fmt --all" to ensure formatting is correct
|
||||
- Run "cargo clippy --workspace --tests --all-features -- -D warnings" to check for issues
|
||||
- Run ONLY the specific failing tests to confirm they pass now:
|
||||
- For Rust test failures: Run the specific test with "cargo test -p <crate> <test_name>"
|
||||
- For Python test failures: Build with "cd python && maturin develop" then run "pytest <specific_test_file>::<test_name>"
|
||||
- For Java test failures: Run "cd java && mvn test -Dtest=<TestClass>#<testMethod>"
|
||||
- For TypeScript test failures: Run "cd nodejs && npm run build && npm test -- --testNamePattern='<test_name>'"
|
||||
- Do NOT run the full test suite - only run the tests that were failing
|
||||
|
||||
7. If the additional guidelines are provided, follow them as well.
|
||||
|
||||
8. Inspect "git status --short" and "git diff" to review your changes.
|
||||
|
||||
9. Create a fix branch: "git checkout -b codex/fix-ci-<run_id>".
|
||||
|
||||
10. Stage all changes with "git add -A" and commit with message "fix: resolve CI failures from run <run_id>".
|
||||
|
||||
11. Push the branch: "git push origin codex/fix-ci-<run_id>". If the remote branch exists, delete it first with "gh api -X DELETE repos/lancedb/lancedb/git/refs/heads/codex/fix-ci-<run_id>" then push. Do NOT use "git push --force" or "git push -f".
|
||||
|
||||
12. Create a pull request targeting "${BRANCH}":
|
||||
- Title: "ci: <short summary describing the fix>" (e.g., "ci: fix clippy warnings in lancedb" or "ci: resolve test flakiness in vector search")
|
||||
- First, write the PR body to /tmp/pr-body.md using a heredoc (cat <<'PREOF' > /tmp/pr-body.md). The body should include:
|
||||
- Link to the failing workflow run
|
||||
- Summary of what failed
|
||||
- Description of the fixes applied
|
||||
- Then run "gh pr create --base ${BRANCH} --body-file /tmp/pr-body.md".
|
||||
|
||||
13. Display the new PR URL, "git status --short", and a summary of what was fixed.
|
||||
|
||||
Constraints:
|
||||
- Use bash commands for all operations.
|
||||
- Do not merge the PR.
|
||||
- Do not modify GitHub workflow files unless they are the cause of the failure.
|
||||
- If any command fails, diagnose and attempt to fix the issue instead of aborting immediately.
|
||||
- If you cannot fix the issue automatically, create the PR anyway with a clear explanation of what you tried and what remains to be fixed.
|
||||
- env "GH_TOKEN" is available, use "gh" tools for GitHub-related operations.
|
||||
EOF
|
||||
|
||||
printenv OPENAI_API_KEY | codex login --with-api-key
|
||||
codex --config shell_environment_policy.ignore_default_excludes=true exec --dangerously-bypass-approvals-and-sandbox "$(cat /tmp/codex-prompt.txt)"
|
||||
85
Cargo.lock
generated
85
Cargo.lock
generated
@@ -3072,9 +3072,8 @@ checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c"
|
||||
|
||||
[[package]]
|
||||
name = "fsst"
|
||||
version = "2.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5f9e5c0b1c67a38cb92b41535d44623483beb9511592ae23a3bf42ddec758690"
|
||||
version = "2.0.1-rc.1"
|
||||
source = "git+https://github.com/lance-format/lance.git?tag=v2.0.1-rc.1#230f285e1c0a72d0ccb723721591e78dd871a419"
|
||||
dependencies = [
|
||||
"arrow-array",
|
||||
"rand 0.9.2",
|
||||
@@ -4405,9 +4404,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lance"
|
||||
version = "2.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2b7f07b905df393a5554eba19055c620f9ea25a3e40a013bda4bd8dc4ca66f01"
|
||||
version = "2.0.1-rc.1"
|
||||
source = "git+https://github.com/lance-format/lance.git?tag=v2.0.1-rc.1#230f285e1c0a72d0ccb723721591e78dd871a419"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-arith",
|
||||
@@ -4472,9 +4470,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lance-arrow"
|
||||
version = "2.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "100e076cb81c8f0c24cd2881c706fc53e037c7d6e81eb320e929e265d157effb"
|
||||
version = "2.0.1-rc.1"
|
||||
source = "git+https://github.com/lance-format/lance.git?tag=v2.0.1-rc.1#230f285e1c0a72d0ccb723721591e78dd871a419"
|
||||
dependencies = [
|
||||
"arrow-array",
|
||||
"arrow-buffer",
|
||||
@@ -4493,9 +4490,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lance-bitpacking"
|
||||
version = "2.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "588318d3d1ba0f97162fab39a323a0a49866bb35b32af42572c6b6a12296fa27"
|
||||
version = "2.0.1-rc.1"
|
||||
source = "git+https://github.com/lance-format/lance.git?tag=v2.0.1-rc.1#230f285e1c0a72d0ccb723721591e78dd871a419"
|
||||
dependencies = [
|
||||
"arrayref",
|
||||
"paste",
|
||||
@@ -4504,9 +4500,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lance-core"
|
||||
version = "2.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6fa01d1cf490ccfd3b8eaeee2781415d0419e6be8366040e57e43677abf2644e"
|
||||
version = "2.0.1-rc.1"
|
||||
source = "git+https://github.com/lance-format/lance.git?tag=v2.0.1-rc.1#230f285e1c0a72d0ccb723721591e78dd871a419"
|
||||
dependencies = [
|
||||
"arrow-array",
|
||||
"arrow-buffer",
|
||||
@@ -4543,9 +4538,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lance-datafusion"
|
||||
version = "2.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ef89a39e3284eef76f79e63f23de8881a0583ad6feb20ed39f47eadd847a2b88"
|
||||
version = "2.0.1-rc.1"
|
||||
source = "git+https://github.com/lance-format/lance.git?tag=v2.0.1-rc.1#230f285e1c0a72d0ccb723721591e78dd871a419"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-array",
|
||||
@@ -4575,9 +4569,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lance-datagen"
|
||||
version = "2.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fc2a60eef5c47e65d91e2ffa8e7e1629c52e7190c8b88a371a1a60601dc49371"
|
||||
version = "2.0.1-rc.1"
|
||||
source = "git+https://github.com/lance-format/lance.git?tag=v2.0.1-rc.1#230f285e1c0a72d0ccb723721591e78dd871a419"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-array",
|
||||
@@ -4595,9 +4588,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lance-encoding"
|
||||
version = "2.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "95ce4a6631308aa681b2671af8f2a845ff781f8d4e755a2a7ccd012379467094"
|
||||
version = "2.0.1-rc.1"
|
||||
source = "git+https://github.com/lance-format/lance.git?tag=v2.0.1-rc.1#230f285e1c0a72d0ccb723721591e78dd871a419"
|
||||
dependencies = [
|
||||
"arrow-arith",
|
||||
"arrow-array",
|
||||
@@ -4634,9 +4626,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lance-file"
|
||||
version = "2.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e2d4d82357cbfaa1a18494226c15b1cb3c8ed0b6c84b91146323c82047ede419"
|
||||
version = "2.0.1-rc.1"
|
||||
source = "git+https://github.com/lance-format/lance.git?tag=v2.0.1-rc.1#230f285e1c0a72d0ccb723721591e78dd871a419"
|
||||
dependencies = [
|
||||
"arrow-arith",
|
||||
"arrow-array",
|
||||
@@ -4668,9 +4659,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lance-geo"
|
||||
version = "2.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a7183fc870da62826f0f97df8007b634da053eb310157856efe1dc74f446951c"
|
||||
version = "2.0.1-rc.1"
|
||||
source = "git+https://github.com/lance-format/lance.git?tag=v2.0.1-rc.1#230f285e1c0a72d0ccb723721591e78dd871a419"
|
||||
dependencies = [
|
||||
"datafusion",
|
||||
"geo-traits",
|
||||
@@ -4684,9 +4674,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lance-index"
|
||||
version = "2.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "20e9c5aa7024a63af9ae89ee8c0f23c8421b7896742e5cd4a271a60f9956cb80"
|
||||
version = "2.0.1-rc.1"
|
||||
source = "git+https://github.com/lance-format/lance.git?tag=v2.0.1-rc.1#230f285e1c0a72d0ccb723721591e78dd871a419"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-arith",
|
||||
@@ -4753,9 +4742,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lance-io"
|
||||
version = "2.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c7d2af0b17fb374a8181bcf1a10bce5703ae3ee4373c1587ce4bba23e15e45c8"
|
||||
version = "2.0.1-rc.1"
|
||||
source = "git+https://github.com/lance-format/lance.git?tag=v2.0.1-rc.1#230f285e1c0a72d0ccb723721591e78dd871a419"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-arith",
|
||||
@@ -4795,9 +4783,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lance-linalg"
|
||||
version = "2.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5125aa62696e75a7475807564b4921f252d8815be606b84bc00e6def0f5c24bb"
|
||||
version = "2.0.1-rc.1"
|
||||
source = "git+https://github.com/lance-format/lance.git?tag=v2.0.1-rc.1#230f285e1c0a72d0ccb723721591e78dd871a419"
|
||||
dependencies = [
|
||||
"arrow-array",
|
||||
"arrow-buffer",
|
||||
@@ -4813,9 +4800,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lance-namespace"
|
||||
version = "2.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "70545c2676ce954dfd801da5c6a631a70bba967826cd3a8f31b47d1f04bbfed3"
|
||||
version = "2.0.1-rc.1"
|
||||
source = "git+https://github.com/lance-format/lance.git?tag=v2.0.1-rc.1#230f285e1c0a72d0ccb723721591e78dd871a419"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"async-trait",
|
||||
@@ -4827,9 +4813,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lance-namespace-impls"
|
||||
version = "2.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "92519f9f27d62655030aac62ea0db9614b65f086ebe651c1b0a96e351b668022"
|
||||
version = "2.0.1-rc.1"
|
||||
source = "git+https://github.com/lance-format/lance.git?tag=v2.0.1-rc.1#230f285e1c0a72d0ccb723721591e78dd871a419"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-ipc",
|
||||
@@ -4872,9 +4857,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lance-table"
|
||||
version = "2.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b06ad37bd90045de8ef533df170c6098e6ff6ecb427aade47d7db8e2c86f2678"
|
||||
version = "2.0.1-rc.1"
|
||||
source = "git+https://github.com/lance-format/lance.git?tag=v2.0.1-rc.1#230f285e1c0a72d0ccb723721591e78dd871a419"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-array",
|
||||
@@ -4913,9 +4897,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lance-testing"
|
||||
version = "2.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cd7f13b0f2b6337af015dcb1519645388dca08c970037aa77aff517687c4019f"
|
||||
version = "2.0.1-rc.1"
|
||||
source = "git+https://github.com/lance-format/lance.git?tag=v2.0.1-rc.1#230f285e1c0a72d0ccb723721591e78dd871a419"
|
||||
dependencies = [
|
||||
"arrow-array",
|
||||
"arrow-schema",
|
||||
|
||||
28
Cargo.toml
28
Cargo.toml
@@ -15,20 +15,20 @@ categories = ["database-implementations"]
|
||||
rust-version = "1.88.0"
|
||||
|
||||
[workspace.dependencies]
|
||||
lance = { "version" = "=2.0.1", default-features = false }
|
||||
lance-core = "=2.0.1"
|
||||
lance-datagen = "=2.0.1"
|
||||
lance-file = "=2.0.1"
|
||||
lance-io = { "version" = "=2.0.1", default-features = false }
|
||||
lance-index = "=2.0.1"
|
||||
lance-linalg = "=2.0.1"
|
||||
lance-namespace = "=2.0.1"
|
||||
lance-namespace-impls = { "version" = "=2.0.1", default-features = false }
|
||||
lance-table = "=2.0.1"
|
||||
lance-testing = "=2.0.1"
|
||||
lance-datafusion = "=2.0.1"
|
||||
lance-encoding = "=2.0.1"
|
||||
lance-arrow = "=2.0.1"
|
||||
lance = { "version" = "=2.0.1-rc.1", default-features = false, "tag" = "v2.0.1-rc.1", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-core = { "version" = "=2.0.1-rc.1", "tag" = "v2.0.1-rc.1", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-datagen = { "version" = "=2.0.1-rc.1", "tag" = "v2.0.1-rc.1", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-file = { "version" = "=2.0.1-rc.1", "tag" = "v2.0.1-rc.1", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-io = { "version" = "=2.0.1-rc.1", default-features = false, "tag" = "v2.0.1-rc.1", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-index = { "version" = "=2.0.1-rc.1", "tag" = "v2.0.1-rc.1", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-linalg = { "version" = "=2.0.1-rc.1", "tag" = "v2.0.1-rc.1", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-namespace = { "version" = "=2.0.1-rc.1", "tag" = "v2.0.1-rc.1", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-namespace-impls = { "version" = "=2.0.1-rc.1", default-features = false, "tag" = "v2.0.1-rc.1", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-table = { "version" = "=2.0.1-rc.1", "tag" = "v2.0.1-rc.1", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-testing = { "version" = "=2.0.1-rc.1", "tag" = "v2.0.1-rc.1", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-datafusion = { "version" = "=2.0.1-rc.1", "tag" = "v2.0.1-rc.1", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-encoding = { "version" = "=2.0.1-rc.1", "tag" = "v2.0.1-rc.1", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-arrow = { "version" = "=2.0.1-rc.1", "tag" = "v2.0.1-rc.1", "git" = "https://github.com/lance-format/lance.git" }
|
||||
ahash = "0.8"
|
||||
# Note that this one does not include pyarrow
|
||||
arrow = { version = "57.2", optional = false }
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
<properties>
|
||||
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
|
||||
<arrow.version>15.0.0</arrow.version>
|
||||
<lance-core.version>2.0.1</lance-core.version>
|
||||
<lance-core.version>2.0.1-rc.1</lance-core.version>
|
||||
<spotless.skip>false</spotless.skip>
|
||||
<spotless.version>2.30.0</spotless.version>
|
||||
<spotless.java.googlejavaformat.version>1.7</spotless.java.googlejavaformat.version>
|
||||
|
||||
@@ -13,7 +13,6 @@ use crate::header::JsHeaderProvider;
|
||||
use crate::table::Table;
|
||||
use crate::ConnectionOptions;
|
||||
use lancedb::connection::{ConnectBuilder, Connection as LanceDBConnection};
|
||||
|
||||
use lancedb::ipc::{ipc_file_to_batches, ipc_file_to_schema};
|
||||
|
||||
#[napi]
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
[tool.bumpversion]
|
||||
current_version = "0.30.0-beta.0"
|
||||
current_version = "0.29.2"
|
||||
parse = """(?x)
|
||||
(?P<major>0|[1-9]\\d*)\\.
|
||||
(?P<minor>0|[1-9]\\d*)\\.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "lancedb-python"
|
||||
version = "0.30.0-beta.0"
|
||||
version = "0.29.2"
|
||||
edition.workspace = true
|
||||
description = "Python bindings for LanceDB"
|
||||
license.workspace = true
|
||||
|
||||
@@ -9,7 +9,7 @@ import json
|
||||
from ._lancedb import async_permutation_builder, PermutationReader
|
||||
from .table import LanceTable
|
||||
from .background_loop import LOOP
|
||||
from .util import batch_to_tensor, batch_to_tensor_rows
|
||||
from .util import batch_to_tensor
|
||||
from typing import Any, Callable, Iterator, Literal, Optional, TYPE_CHECKING, Union
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -333,11 +333,7 @@ class Transforms:
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def arrow2python(batch: pa.RecordBatch) -> list[dict[str, Any]]:
|
||||
return batch.to_pylist()
|
||||
|
||||
@staticmethod
|
||||
def arrow2pythoncol(batch: pa.RecordBatch) -> dict[str, list[Any]]:
|
||||
def arrow2python(batch: pa.RecordBatch) -> dict[str, list[Any]]:
|
||||
return batch.to_pydict()
|
||||
|
||||
@staticmethod
|
||||
@@ -691,17 +687,7 @@ class Permutation:
|
||||
return
|
||||
|
||||
def with_format(
|
||||
self,
|
||||
format: Literal[
|
||||
"numpy",
|
||||
"python",
|
||||
"python_col",
|
||||
"pandas",
|
||||
"arrow",
|
||||
"torch",
|
||||
"torch_col",
|
||||
"polars",
|
||||
],
|
||||
self, format: Literal["numpy", "python", "pandas", "arrow", "torch", "polars"]
|
||||
) -> "Permutation":
|
||||
"""
|
||||
Set the format for batches
|
||||
@@ -710,18 +696,16 @@ class Permutation:
|
||||
|
||||
The format can be one of:
|
||||
- "numpy" - the batch will be a dict of numpy arrays (one per column)
|
||||
- "python" - the batch will be a list of dicts (one per row)
|
||||
- "python_col" - the batch will be a dict of lists (one entry per column)
|
||||
- "python" - the batch will be a dict of lists (one per column)
|
||||
- "pandas" - the batch will be a pandas DataFrame
|
||||
- "arrow" - the batch will be a pyarrow RecordBatch
|
||||
- "torch" - the batch will be a list of tensors, one per row
|
||||
- "torch_col" - the batch will be a 2D torch tensor (first dim indexes columns)
|
||||
- "torch" - the batch will be a two dimensional torch tensor
|
||||
- "polars" - the batch will be a polars DataFrame
|
||||
|
||||
Conversion may or may not involve a data copy. Lance uses Arrow internally
|
||||
and so it is able to zero-copy to the arrow and polars formats.
|
||||
and so it is able to zero-copy to the arrow and polars.
|
||||
|
||||
Conversion to torch_col will be zero-copy but will only support a subset of data
|
||||
Conversion to torch will be zero-copy but will only support a subset of data
|
||||
types (numeric types).
|
||||
|
||||
Conversion to numpy and/or pandas will typically be zero-copy for numeric
|
||||
@@ -734,8 +718,6 @@ class Permutation:
|
||||
assert format is not None, "format is required"
|
||||
if format == "python":
|
||||
return self.with_transform(Transforms.arrow2python)
|
||||
if format == "python_col":
|
||||
return self.with_transform(Transforms.arrow2pythoncol)
|
||||
elif format == "numpy":
|
||||
return self.with_transform(Transforms.arrow2numpy)
|
||||
elif format == "pandas":
|
||||
@@ -743,8 +725,6 @@ class Permutation:
|
||||
elif format == "arrow":
|
||||
return self.with_transform(Transforms.arrow2arrow)
|
||||
elif format == "torch":
|
||||
return self.with_transform(batch_to_tensor_rows)
|
||||
elif format == "torch_col":
|
||||
return self.with_transform(batch_to_tensor)
|
||||
elif format == "polars":
|
||||
return self.with_transform(Transforms.arrow2polars())
|
||||
|
||||
@@ -419,22 +419,3 @@ def batch_to_tensor(batch: pa.RecordBatch):
|
||||
"""
|
||||
torch = attempt_import_or_raise("torch", "torch")
|
||||
return torch.stack([torch.from_dlpack(col) for col in batch.columns])
|
||||
|
||||
|
||||
def batch_to_tensor_rows(batch: pa.RecordBatch):
|
||||
"""
|
||||
Convert a PyArrow RecordBatch to a list of PyTorch Tensor, one per row
|
||||
|
||||
Each column is converted to a tensor (using zero-copy via DLPack)
|
||||
and the columns are then stacked into a single tensor. The 2D tensor
|
||||
is then converted to a list of tensors, one per row
|
||||
|
||||
Fails if torch or numpy is not installed.
|
||||
Fails if a column's data type is not supported by PyTorch.
|
||||
"""
|
||||
torch = attempt_import_or_raise("torch", "torch")
|
||||
numpy = attempt_import_or_raise("numpy", "numpy")
|
||||
columns = [col.to_numpy(zero_copy_only=False) for col in batch.columns]
|
||||
stacked = torch.tensor(numpy.column_stack(columns))
|
||||
rows = list(stacked.unbind(dim=0))
|
||||
return rows
|
||||
|
||||
@@ -664,20 +664,23 @@ def test_iter_basic(some_permutation: Permutation):
|
||||
expected_batches = (950 + batch_size - 1) // batch_size # ceiling division
|
||||
assert len(batches) == expected_batches
|
||||
|
||||
# Check that all batches are lists of dicts (default python format)
|
||||
assert all(isinstance(batch, list) for batch in batches)
|
||||
# Check that all batches are dicts (default python format)
|
||||
assert all(isinstance(batch, dict) for batch in batches)
|
||||
|
||||
# Check that batches have the correct structure
|
||||
for batch in batches:
|
||||
assert "id" in batch[0]
|
||||
assert "value" in batch[0]
|
||||
assert "id" in batch
|
||||
assert "value" in batch
|
||||
assert isinstance(batch["id"], list)
|
||||
assert isinstance(batch["value"], list)
|
||||
|
||||
# Check that all batches except the last have the correct size
|
||||
for batch in batches[:-1]:
|
||||
assert len(batch) == batch_size
|
||||
assert len(batch["id"]) == batch_size
|
||||
assert len(batch["value"]) == batch_size
|
||||
|
||||
# Last batch might be smaller
|
||||
assert len(batches[-1]) <= batch_size
|
||||
assert len(batches[-1]["id"]) <= batch_size
|
||||
|
||||
|
||||
def test_iter_skip_last_batch(some_permutation: Permutation):
|
||||
@@ -696,11 +699,11 @@ def test_iter_skip_last_batch(some_permutation: Permutation):
|
||||
if 950 % batch_size != 0:
|
||||
assert len(batches_without_skip) == num_full_batches + 1
|
||||
# Last batch should be smaller
|
||||
assert len(batches_without_skip[-1]) == 950 % batch_size
|
||||
assert len(batches_without_skip[-1]["id"]) == 950 % batch_size
|
||||
|
||||
# All batches with skip_last_batch should be full size
|
||||
for batch in batches_with_skip:
|
||||
assert len(batch) == batch_size
|
||||
assert len(batch["id"]) == batch_size
|
||||
|
||||
|
||||
def test_iter_different_batch_sizes(some_permutation: Permutation):
|
||||
@@ -717,12 +720,12 @@ def test_iter_different_batch_sizes(some_permutation: Permutation):
|
||||
# Test with batch size equal to total rows
|
||||
single_batch = list(some_permutation.iter(950, skip_last_batch=False))
|
||||
assert len(single_batch) == 1
|
||||
assert len(single_batch[0]) == 950
|
||||
assert len(single_batch[0]["id"]) == 950
|
||||
|
||||
# Test with batch size larger than total rows
|
||||
oversized_batch = list(some_permutation.iter(10000, skip_last_batch=False))
|
||||
assert len(oversized_batch) == 1
|
||||
assert len(oversized_batch[0]) == 950
|
||||
assert len(oversized_batch[0]["id"]) == 950
|
||||
|
||||
|
||||
def test_dunder_iter(some_permutation: Permutation):
|
||||
@@ -735,13 +738,15 @@ def test_dunder_iter(some_permutation: Permutation):
|
||||
|
||||
# All batches should be full size
|
||||
for batch in batches:
|
||||
assert len(batch) == 100
|
||||
assert len(batch["id"]) == 100
|
||||
assert len(batch["value"]) == 100
|
||||
|
||||
some_permutation = some_permutation.with_batch_size(400)
|
||||
batches = list(some_permutation)
|
||||
assert len(batches) == 2 # floor(950 / 400) since skip_last_batch=True
|
||||
for batch in batches:
|
||||
assert len(batch) == 400
|
||||
assert len(batch["id"]) == 400
|
||||
assert len(batch["value"]) == 400
|
||||
|
||||
|
||||
def test_iter_with_different_formats(some_permutation: Permutation):
|
||||
@@ -756,7 +761,7 @@ def test_iter_with_different_formats(some_permutation: Permutation):
|
||||
# Test with python format (default)
|
||||
python_perm = some_permutation.with_format("python")
|
||||
python_batches = list(python_perm.iter(batch_size, skip_last_batch=False))
|
||||
assert all(isinstance(batch, list) for batch in python_batches)
|
||||
assert all(isinstance(batch, dict) for batch in python_batches)
|
||||
|
||||
# Test with pandas format
|
||||
pandas_perm = some_permutation.with_format("pandas")
|
||||
@@ -775,8 +780,8 @@ def test_iter_with_column_selection(some_permutation: Permutation):
|
||||
|
||||
# Check that batches only contain the id column
|
||||
for batch in batches:
|
||||
assert "id" in batch[0]
|
||||
assert "value" not in batch[0]
|
||||
assert "id" in batch
|
||||
assert "value" not in batch
|
||||
|
||||
|
||||
def test_iter_with_column_rename(some_permutation: Permutation):
|
||||
@@ -786,9 +791,9 @@ def test_iter_with_column_rename(some_permutation: Permutation):
|
||||
|
||||
# Check that batches have the renamed column
|
||||
for batch in batches:
|
||||
assert "id" in batch[0]
|
||||
assert "data" in batch[0]
|
||||
assert "value" not in batch[0]
|
||||
assert "id" in batch
|
||||
assert "data" in batch
|
||||
assert "value" not in batch
|
||||
|
||||
|
||||
def test_iter_with_limit_offset(some_permutation: Permutation):
|
||||
@@ -807,14 +812,14 @@ def test_iter_with_limit_offset(some_permutation: Permutation):
|
||||
assert len(limit_batches) == 5
|
||||
|
||||
no_skip = some_permutation.iter(101, skip_last_batch=False)
|
||||
row_100 = next(no_skip)[100]["id"]
|
||||
row_100 = next(no_skip)["id"][100]
|
||||
|
||||
# Test with both limit and offset
|
||||
limited_perm = some_permutation.with_skip(100).with_take(300)
|
||||
limited_batches = list(limited_perm.iter(100, skip_last_batch=False))
|
||||
# Should have 3 batches (300 / 100)
|
||||
assert len(limited_batches) == 3
|
||||
assert limited_batches[0][0]["id"] == row_100
|
||||
assert limited_batches[0]["id"][0] == row_100
|
||||
|
||||
|
||||
def test_iter_empty_permutation(mem_db):
|
||||
@@ -837,7 +842,7 @@ def test_iter_single_row(mem_db):
|
||||
# With skip_last_batch=False, should get one batch
|
||||
batches = list(perm.iter(10, skip_last_batch=False))
|
||||
assert len(batches) == 1
|
||||
assert len(batches[0]) == 1
|
||||
assert len(batches[0]["id"]) == 1
|
||||
|
||||
# With skip_last_batch=True, should skip the single row (since it's < batch_size)
|
||||
batches_skip = list(perm.iter(10, skip_last_batch=True))
|
||||
@@ -855,7 +860,8 @@ def test_identity_permutation(mem_db):
|
||||
|
||||
batches = list(permutation.iter(10, skip_last_batch=False))
|
||||
assert len(batches) == 1
|
||||
assert len(batches[0]) == 10
|
||||
assert len(batches[0]["id"]) == 10
|
||||
assert len(batches[0]["value"]) == 10
|
||||
|
||||
permutation = permutation.remove_columns(["value"])
|
||||
assert permutation.num_columns == 1
|
||||
@@ -898,10 +904,10 @@ def test_transform_fn(mem_db):
|
||||
py_result = list(permutation.with_format("python").iter(10, skip_last_batch=False))[
|
||||
0
|
||||
]
|
||||
assert len(py_result) == 10
|
||||
assert "id" in py_result[0]
|
||||
assert "value" in py_result[0]
|
||||
assert isinstance(py_result, list)
|
||||
assert len(py_result) == 2
|
||||
assert len(py_result["id"]) == 10
|
||||
assert len(py_result["value"]) == 10
|
||||
assert isinstance(py_result, dict)
|
||||
|
||||
try:
|
||||
import torch
|
||||
@@ -909,11 +915,9 @@ def test_transform_fn(mem_db):
|
||||
torch_result = list(
|
||||
permutation.with_format("torch").iter(10, skip_last_batch=False)
|
||||
)[0]
|
||||
assert isinstance(torch_result, list)
|
||||
assert len(torch_result) == 10
|
||||
assert isinstance(torch_result[0], torch.Tensor)
|
||||
assert torch_result[0].shape == (2,)
|
||||
assert torch_result[0].dtype == torch.int64
|
||||
assert torch_result.shape == (2, 10)
|
||||
assert torch_result.dtype == torch.int64
|
||||
assert isinstance(torch_result, torch.Tensor)
|
||||
except ImportError:
|
||||
# Skip check if torch is not installed
|
||||
pass
|
||||
@@ -946,16 +950,17 @@ def test_custom_transform(mem_db):
|
||||
def test_getitems_basic(some_permutation: Permutation):
|
||||
"""Test __getitems__ returns correct rows by offset."""
|
||||
result = some_permutation.__getitems__([0, 1, 2])
|
||||
assert isinstance(result, list)
|
||||
assert "id" in result[0]
|
||||
assert "value" in result[0]
|
||||
assert len(result) == 3
|
||||
assert isinstance(result, dict)
|
||||
assert "id" in result
|
||||
assert "value" in result
|
||||
assert len(result["id"]) == 3
|
||||
|
||||
|
||||
def test_getitems_single_index(some_permutation: Permutation):
|
||||
"""Test __getitems__ with a single index."""
|
||||
result = some_permutation.__getitems__([0])
|
||||
assert len(result) == 1
|
||||
assert len(result["id"]) == 1
|
||||
assert len(result["value"]) == 1
|
||||
|
||||
|
||||
def test_getitems_preserves_order(some_permutation: Permutation):
|
||||
@@ -965,40 +970,38 @@ def test_getitems_preserves_order(some_permutation: Permutation):
|
||||
# Get the same rows in reverse order
|
||||
reverse = some_permutation.__getitems__([4, 3, 2, 1, 0])
|
||||
|
||||
assert [r["id"] for r in forward] == list(reversed([r["id"] for r in reverse]))
|
||||
assert [r["value"] for r in forward] == list(
|
||||
reversed([r["value"] for r in reverse])
|
||||
)
|
||||
assert forward["id"] == list(reversed(reverse["id"]))
|
||||
assert forward["value"] == list(reversed(reverse["value"]))
|
||||
|
||||
|
||||
def test_getitems_non_contiguous(some_permutation: Permutation):
|
||||
"""Test __getitems__ with non-contiguous indices."""
|
||||
result = some_permutation.__getitems__([0, 10, 50, 100, 500])
|
||||
assert len(result) == 5
|
||||
assert len(result["id"]) == 5
|
||||
|
||||
# Each id/value pair should match what we'd get individually
|
||||
for i, offset in enumerate([0, 10, 50, 100, 500]):
|
||||
single = some_permutation.__getitems__([offset])
|
||||
assert result[i]["id"] == single[0]["id"]
|
||||
assert result[i]["value"] == single[0]["value"]
|
||||
assert result["id"][i] == single["id"][0]
|
||||
assert result["value"][i] == single["value"][0]
|
||||
|
||||
|
||||
def test_getitems_with_column_selection(some_permutation: Permutation):
|
||||
"""Test __getitems__ respects column selection."""
|
||||
id_only = some_permutation.select_columns(["id"])
|
||||
result = id_only.__getitems__([0, 1, 2])
|
||||
assert "id" in result[0]
|
||||
assert "value" not in result[0]
|
||||
assert len(result) == 3
|
||||
assert "id" in result
|
||||
assert "value" not in result
|
||||
assert len(result["id"]) == 3
|
||||
|
||||
|
||||
def test_getitems_with_column_rename(some_permutation: Permutation):
|
||||
"""Test __getitems__ respects column renames."""
|
||||
renamed = some_permutation.rename_column("value", "data")
|
||||
result = renamed.__getitems__([0, 1])
|
||||
assert "data" in result[0]
|
||||
assert "value" not in result[0]
|
||||
assert len(result) == 2
|
||||
assert "data" in result
|
||||
assert "value" not in result
|
||||
assert len(result["data"]) == 2
|
||||
|
||||
|
||||
def test_getitems_with_format(some_permutation: Permutation):
|
||||
@@ -1029,8 +1032,8 @@ def test_getitems_identity_permutation(mem_db):
|
||||
perm = Permutation.identity(tbl)
|
||||
|
||||
result = perm.__getitems__([0, 5, 9])
|
||||
assert [r["id"] for r in result] == [0, 5, 9]
|
||||
assert [r["value"] for r in result] == [0, 5, 9]
|
||||
assert result["id"] == [0, 5, 9]
|
||||
assert result["value"] == [0, 5, 9]
|
||||
|
||||
|
||||
def test_getitems_with_limit_offset(some_permutation: Permutation):
|
||||
@@ -1039,12 +1042,12 @@ def test_getitems_with_limit_offset(some_permutation: Permutation):
|
||||
|
||||
# Should be able to access offsets within the limited range
|
||||
result = limited.__getitems__([0, 1, 199])
|
||||
assert len(result) == 3
|
||||
assert len(result["id"]) == 3
|
||||
|
||||
# The first item of the limited permutation should match offset 100 of original
|
||||
full_result = some_permutation.__getitems__([100])
|
||||
limited_result = limited.__getitems__([0])
|
||||
assert limited_result[0]["id"] == full_result[0]["id"]
|
||||
assert limited_result["id"][0] == full_result["id"][0]
|
||||
|
||||
|
||||
def test_getitems_invalid_offset(some_permutation: Permutation):
|
||||
|
||||
@@ -4,7 +4,6 @@
|
||||
import pyarrow as pa
|
||||
import pytest
|
||||
from lancedb.util import tbl_to_tensor
|
||||
from lancedb.permutation import Permutation
|
||||
|
||||
torch = pytest.importorskip("torch")
|
||||
|
||||
@@ -17,26 +16,3 @@ def test_table_dataloader(mem_db):
|
||||
for batch in dataloader:
|
||||
assert batch.size(0) == 1
|
||||
assert batch.size(1) == 10
|
||||
|
||||
|
||||
def test_permutation_dataloader(mem_db):
|
||||
table = mem_db.create_table("test_table", pa.table({"a": range(1000)}))
|
||||
|
||||
permutation = Permutation.identity(table)
|
||||
dataloader = torch.utils.data.DataLoader(permutation, batch_size=10, shuffle=True)
|
||||
for batch in dataloader:
|
||||
assert batch["a"].size(0) == 10
|
||||
|
||||
permutation = permutation.with_format("torch")
|
||||
dataloader = torch.utils.data.DataLoader(permutation, batch_size=10, shuffle=True)
|
||||
for batch in dataloader:
|
||||
assert batch.size(0) == 10
|
||||
assert batch.size(1) == 1
|
||||
|
||||
permutation = permutation.with_format("torch_col")
|
||||
dataloader = torch.utils.data.DataLoader(
|
||||
permutation, collate_fn=lambda x: x, batch_size=10, shuffle=True
|
||||
)
|
||||
for batch in dataloader:
|
||||
assert batch.size(0) == 1
|
||||
assert batch.size(1) == 10
|
||||
|
||||
@@ -121,8 +121,7 @@ impl Connection {
|
||||
|
||||
let mode = Self::parse_create_mode_str(mode)?;
|
||||
|
||||
let batches: Box<dyn arrow::array::RecordBatchReader + Send> =
|
||||
Box::new(ArrowArrayStreamReader::from_pyarrow_bound(&data)?);
|
||||
let batches = ArrowArrayStreamReader::from_pyarrow_bound(&data)?;
|
||||
|
||||
let mut builder = inner.create_table(name, batches).mode(mode);
|
||||
|
||||
|
||||
@@ -296,8 +296,7 @@ impl Table {
|
||||
data: Bound<'_, PyAny>,
|
||||
mode: String,
|
||||
) -> PyResult<Bound<'a, PyAny>> {
|
||||
let batches: Box<dyn arrow::array::RecordBatchReader + Send> =
|
||||
Box::new(ArrowArrayStreamReader::from_pyarrow_bound(&data)?);
|
||||
let batches = ArrowArrayStreamReader::from_pyarrow_bound(&data)?;
|
||||
let mut op = self_.inner_ref()?.add(batches);
|
||||
if mode == "append" {
|
||||
op = op.mode(AddDataMode::Append);
|
||||
|
||||
@@ -3,12 +3,13 @@
|
||||
|
||||
use std::{iter::once, sync::Arc};
|
||||
|
||||
use arrow_array::{Float64Array, Int32Array, RecordBatch, StringArray};
|
||||
use arrow_array::{Float64Array, Int32Array, RecordBatch, RecordBatchIterator, StringArray};
|
||||
use arrow_schema::{DataType, Field, Schema};
|
||||
use aws_config::Region;
|
||||
use aws_sdk_bedrockruntime::Client;
|
||||
use futures::StreamExt;
|
||||
use lancedb::{
|
||||
arrow::IntoArrow,
|
||||
connect,
|
||||
embeddings::{bedrock::BedrockEmbeddingFunction, EmbeddingDefinition, EmbeddingFunction},
|
||||
query::{ExecutableQuery, QueryBase},
|
||||
@@ -66,7 +67,7 @@ async fn main() -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn make_data() -> RecordBatch {
|
||||
fn make_data() -> impl IntoArrow {
|
||||
let schema = Schema::new(vec![
|
||||
Field::new("id", DataType::Int32, true),
|
||||
Field::new("text", DataType::Utf8, false),
|
||||
@@ -82,9 +83,10 @@ fn make_data() -> RecordBatch {
|
||||
]);
|
||||
let price = Float64Array::from(vec![10.0, 50.0, 100.0, 30.0]);
|
||||
let schema = Arc::new(schema);
|
||||
RecordBatch::try_new(
|
||||
let rb = RecordBatch::try_new(
|
||||
schema.clone(),
|
||||
vec![Arc::new(id), Arc::new(text), Arc::new(price)],
|
||||
)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
Box::new(RecordBatchIterator::new(vec![Ok(rb)], schema))
|
||||
}
|
||||
|
||||
@@ -3,13 +3,12 @@
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow_array::{Int32Array, RecordBatch, RecordBatchIterator, StringArray};
|
||||
use arrow_array::{Int32Array, RecordBatch, RecordBatchIterator, RecordBatchReader, StringArray};
|
||||
use arrow_schema::{DataType, Field, Schema};
|
||||
|
||||
use futures::TryStreamExt;
|
||||
use lance_index::scalar::FullTextSearchQuery;
|
||||
use lancedb::connection::Connection;
|
||||
|
||||
use lancedb::index::scalar::FtsIndexBuilder;
|
||||
use lancedb::index::Index;
|
||||
use lancedb::query::{ExecutableQuery, QueryBase};
|
||||
@@ -30,7 +29,7 @@ async fn main() -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn create_some_records() -> Result<Box<dyn arrow_array::RecordBatchReader + Send>> {
|
||||
fn create_some_records() -> Result<Box<dyn RecordBatchReader + Send>> {
|
||||
const TOTAL: usize = 1000;
|
||||
|
||||
let schema = Arc::new(Schema::new(vec![
|
||||
@@ -67,7 +66,7 @@ fn create_some_records() -> Result<Box<dyn arrow_array::RecordBatchReader + Send
|
||||
}
|
||||
|
||||
async fn create_table(db: &Connection) -> Result<Table> {
|
||||
let initial_data = create_some_records()?;
|
||||
let initial_data: Box<dyn RecordBatchReader + Send> = create_some_records()?;
|
||||
let tbl = db.create_table("my_table", initial_data).execute().await?;
|
||||
Ok(tbl)
|
||||
}
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
use arrow_array::{RecordBatch, StringArray};
|
||||
use arrow_array::{RecordBatch, RecordBatchIterator, StringArray};
|
||||
use arrow_schema::{DataType, Field, Schema};
|
||||
use futures::TryStreamExt;
|
||||
use lance_index::scalar::FullTextSearchQuery;
|
||||
use lancedb::index::scalar::FtsIndexBuilder;
|
||||
use lancedb::index::Index;
|
||||
use lancedb::{
|
||||
arrow::IntoArrow,
|
||||
connect,
|
||||
embeddings::{
|
||||
sentence_transformers::SentenceTransformersEmbeddings, EmbeddingDefinition,
|
||||
@@ -69,7 +70,7 @@ async fn main() -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn make_data() -> RecordBatch {
|
||||
fn make_data() -> impl IntoArrow {
|
||||
let schema = Schema::new(vec![Field::new("facts", DataType::Utf8, false)]);
|
||||
|
||||
let facts = StringArray::from_iter_values(vec![
|
||||
@@ -100,7 +101,8 @@ fn make_data() -> RecordBatch {
|
||||
"The first chatbot was ELIZA, created in the 1960s.",
|
||||
]);
|
||||
let schema = Arc::new(schema);
|
||||
RecordBatch::try_new(schema.clone(), vec![Arc::new(facts)]).unwrap()
|
||||
let rb = RecordBatch::try_new(schema.clone(), vec![Arc::new(facts)]).unwrap();
|
||||
Box::new(RecordBatchIterator::new(vec![Ok(rb)], schema))
|
||||
}
|
||||
|
||||
async fn create_index(table: &Table) -> Result<()> {
|
||||
|
||||
@@ -8,12 +8,13 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow_array::types::Float32Type;
|
||||
use arrow_array::{FixedSizeListArray, Int32Array, RecordBatch, RecordBatchIterator};
|
||||
use arrow_array::{
|
||||
FixedSizeListArray, Int32Array, RecordBatch, RecordBatchIterator, RecordBatchReader,
|
||||
};
|
||||
use arrow_schema::{DataType, Field, Schema};
|
||||
|
||||
use futures::TryStreamExt;
|
||||
use lancedb::connection::Connection;
|
||||
|
||||
use lancedb::index::vector::IvfPqIndexBuilder;
|
||||
use lancedb::index::Index;
|
||||
use lancedb::query::{ExecutableQuery, QueryBase};
|
||||
@@ -33,7 +34,7 @@ async fn main() -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn create_some_records() -> Result<Box<dyn arrow_array::RecordBatchReader + Send>> {
|
||||
fn create_some_records() -> Result<Box<dyn RecordBatchReader + Send>> {
|
||||
const TOTAL: usize = 1000;
|
||||
const DIM: usize = 128;
|
||||
|
||||
@@ -72,9 +73,9 @@ fn create_some_records() -> Result<Box<dyn arrow_array::RecordBatchReader + Send
|
||||
}
|
||||
|
||||
async fn create_table(db: &Connection) -> Result<Table> {
|
||||
let initial_data = create_some_records()?;
|
||||
let initial_data: Box<dyn RecordBatchReader + Send> = create_some_records()?;
|
||||
let tbl = db
|
||||
.create_table("my_table", initial_data)
|
||||
.create_table("my_table", Box::new(initial_data))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
@@ -5,9 +5,11 @@
|
||||
|
||||
use std::{iter::once, sync::Arc};
|
||||
|
||||
use arrow_array::{RecordBatch, StringArray};
|
||||
use arrow_array::{Float64Array, Int32Array, RecordBatch, RecordBatchIterator, StringArray};
|
||||
use arrow_schema::{DataType, Field, Schema};
|
||||
use futures::StreamExt;
|
||||
use lancedb::{
|
||||
arrow::IntoArrow,
|
||||
connect,
|
||||
embeddings::{openai::OpenAIEmbeddingFunction, EmbeddingDefinition, EmbeddingFunction},
|
||||
query::{ExecutableQuery, QueryBase},
|
||||
@@ -62,20 +64,26 @@ async fn main() -> Result<()> {
|
||||
}
|
||||
// --8<-- [end:openai_embeddings]
|
||||
|
||||
fn make_data() -> RecordBatch {
|
||||
arrow_array::record_batch!(
|
||||
("id", Int32, [1, 2, 3, 4]),
|
||||
(
|
||||
"text",
|
||||
Utf8,
|
||||
[
|
||||
"Black T-Shirt",
|
||||
"Leather Jacket",
|
||||
"Winter Parka",
|
||||
"Hooded Sweatshirt"
|
||||
]
|
||||
),
|
||||
("price", Float64, [10.0, 50.0, 100.0, 30.0])
|
||||
fn make_data() -> impl IntoArrow {
|
||||
let schema = Schema::new(vec![
|
||||
Field::new("id", DataType::Int32, true),
|
||||
Field::new("text", DataType::Utf8, false),
|
||||
Field::new("price", DataType::Float64, false),
|
||||
]);
|
||||
|
||||
let id = Int32Array::from(vec![1, 2, 3, 4]);
|
||||
let text = StringArray::from_iter_values(vec![
|
||||
"Black T-Shirt",
|
||||
"Leather Jacket",
|
||||
"Winter Parka",
|
||||
"Hooded Sweatshirt",
|
||||
]);
|
||||
let price = Float64Array::from(vec![10.0, 50.0, 100.0, 30.0]);
|
||||
let schema = Arc::new(schema);
|
||||
let rb = RecordBatch::try_new(
|
||||
schema.clone(),
|
||||
vec![Arc::new(id), Arc::new(text), Arc::new(price)],
|
||||
)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
Box::new(RecordBatchIterator::new(vec![Ok(rb)], schema))
|
||||
}
|
||||
|
||||
@@ -3,10 +3,11 @@
|
||||
|
||||
use std::{iter::once, sync::Arc};
|
||||
|
||||
use arrow_array::{RecordBatch, StringArray};
|
||||
use arrow_array::{RecordBatch, RecordBatchIterator, StringArray};
|
||||
use arrow_schema::{DataType, Field, Schema};
|
||||
use futures::StreamExt;
|
||||
use lancedb::{
|
||||
arrow::IntoArrow,
|
||||
connect,
|
||||
embeddings::{
|
||||
sentence_transformers::SentenceTransformersEmbeddings, EmbeddingDefinition,
|
||||
@@ -58,7 +59,7 @@ async fn main() -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn make_data() -> RecordBatch {
|
||||
fn make_data() -> impl IntoArrow {
|
||||
let schema = Schema::new(vec![Field::new("facts", DataType::Utf8, false)]);
|
||||
|
||||
let facts = StringArray::from_iter_values(vec![
|
||||
@@ -89,5 +90,6 @@ fn make_data() -> RecordBatch {
|
||||
"The first chatbot was ELIZA, created in the 1960s.",
|
||||
]);
|
||||
let schema = Arc::new(schema);
|
||||
RecordBatch::try_new(schema.clone(), vec![Arc::new(facts)]).unwrap()
|
||||
let rb = RecordBatch::try_new(schema.clone(), vec![Arc::new(facts)]).unwrap();
|
||||
Box::new(RecordBatchIterator::new(vec![Ok(rb)], schema))
|
||||
}
|
||||
|
||||
@@ -8,9 +8,11 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow_array::types::Float32Type;
|
||||
use arrow_array::{FixedSizeListArray, Int32Array, RecordBatch};
|
||||
use arrow_array::{FixedSizeListArray, Int32Array, RecordBatch, RecordBatchIterator};
|
||||
use arrow_schema::{DataType, Field, Schema};
|
||||
use futures::TryStreamExt;
|
||||
|
||||
use lancedb::arrow::IntoArrow;
|
||||
use lancedb::connection::Connection;
|
||||
use lancedb::index::Index;
|
||||
use lancedb::query::{ExecutableQuery, QueryBase};
|
||||
@@ -57,7 +59,7 @@ async fn open_with_existing_tbl() -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn create_some_records() -> Result<RecordBatch> {
|
||||
fn create_some_records() -> Result<impl IntoArrow> {
|
||||
const TOTAL: usize = 1000;
|
||||
const DIM: usize = 128;
|
||||
|
||||
@@ -74,18 +76,25 @@ fn create_some_records() -> Result<RecordBatch> {
|
||||
]));
|
||||
|
||||
// Create a RecordBatch stream.
|
||||
Ok(RecordBatch::try_new(
|
||||
schema.clone(),
|
||||
vec![
|
||||
Arc::new(Int32Array::from_iter_values(0..TOTAL as i32)),
|
||||
Arc::new(
|
||||
FixedSizeListArray::from_iter_primitive::<Float32Type, _, _>(
|
||||
(0..TOTAL).map(|_| Some(vec![Some(1.0); DIM])),
|
||||
DIM as i32,
|
||||
let batches = RecordBatchIterator::new(
|
||||
vec![RecordBatch::try_new(
|
||||
schema.clone(),
|
||||
vec![
|
||||
Arc::new(Int32Array::from_iter_values(0..TOTAL as i32)),
|
||||
Arc::new(
|
||||
FixedSizeListArray::from_iter_primitive::<Float32Type, _, _>(
|
||||
(0..TOTAL).map(|_| Some(vec![Some(1.0); DIM])),
|
||||
DIM as i32,
|
||||
),
|
||||
),
|
||||
),
|
||||
],
|
||||
)?)
|
||||
],
|
||||
)
|
||||
.unwrap()]
|
||||
.into_iter()
|
||||
.map(Ok),
|
||||
schema.clone(),
|
||||
);
|
||||
Ok(Box::new(batches))
|
||||
}
|
||||
|
||||
async fn create_table(db: &Connection) -> Result<LanceDbTable> {
|
||||
|
||||
@@ -6,8 +6,8 @@
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow_array::RecordBatch;
|
||||
use arrow_schema::SchemaRef;
|
||||
use arrow_array::RecordBatchReader;
|
||||
use arrow_schema::{Field, SchemaRef};
|
||||
use lance::dataset::ReadParams;
|
||||
use lance_namespace::models::{
|
||||
CreateNamespaceRequest, CreateNamespaceResponse, DescribeNamespaceRequest,
|
||||
@@ -17,20 +17,24 @@ use lance_namespace::models::{
|
||||
#[cfg(feature = "aws")]
|
||||
use object_store::aws::AwsCredential;
|
||||
|
||||
use crate::connection::create_table::CreateTableBuilder;
|
||||
use crate::data::scannable::Scannable;
|
||||
use crate::database::listing::ListingDatabase;
|
||||
use crate::database::{
|
||||
CloneTableRequest, Database, DatabaseOptions, OpenTableRequest, ReadConsistency,
|
||||
TableNamesRequest,
|
||||
use crate::arrow::{IntoArrow, IntoArrowStream, SendableRecordBatchStream};
|
||||
use crate::database::listing::{
|
||||
ListingDatabase, OPT_NEW_TABLE_STORAGE_VERSION, OPT_NEW_TABLE_V2_MANIFEST_PATHS,
|
||||
};
|
||||
use crate::database::{
|
||||
CloneTableRequest, CreateTableData, CreateTableMode, CreateTableRequest, Database,
|
||||
DatabaseOptions, OpenTableRequest, ReadConsistency, TableNamesRequest,
|
||||
};
|
||||
use crate::embeddings::{
|
||||
EmbeddingDefinition, EmbeddingFunction, EmbeddingRegistry, MemoryRegistry, WithEmbeddings,
|
||||
};
|
||||
use crate::embeddings::{EmbeddingRegistry, MemoryRegistry};
|
||||
use crate::error::{Error, Result};
|
||||
#[cfg(feature = "remote")]
|
||||
use crate::remote::{
|
||||
client::ClientConfig,
|
||||
db::{OPT_REMOTE_API_KEY, OPT_REMOTE_HOST_OVERRIDE, OPT_REMOTE_REGION},
|
||||
};
|
||||
use crate::table::{TableDefinition, WriteOptions};
|
||||
use crate::Table;
|
||||
use lance::io::ObjectStoreParams;
|
||||
pub use lance_encoding::version::LanceFileVersion;
|
||||
@@ -38,8 +42,6 @@ pub use lance_encoding::version::LanceFileVersion;
|
||||
use lance_io::object_store::StorageOptions;
|
||||
use lance_io::object_store::{StorageOptionsAccessor, StorageOptionsProvider};
|
||||
|
||||
mod create_table;
|
||||
|
||||
fn merge_storage_options(
|
||||
store_params: &mut ObjectStoreParams,
|
||||
pairs: impl IntoIterator<Item = (String, String)>,
|
||||
@@ -114,6 +116,337 @@ impl TableNamesBuilder {
|
||||
}
|
||||
}
|
||||
|
||||
pub struct NoData {}
|
||||
|
||||
impl IntoArrow for NoData {
|
||||
fn into_arrow(self) -> Result<Box<dyn arrow_array::RecordBatchReader + Send>> {
|
||||
unreachable!("NoData should never be converted to Arrow")
|
||||
}
|
||||
}
|
||||
|
||||
// Stores the value given from the initial CreateTableBuilder::new call
|
||||
// and defers errors until `execute` is called
|
||||
enum CreateTableBuilderInitialData {
|
||||
None,
|
||||
Iterator(Result<Box<dyn RecordBatchReader + Send>>),
|
||||
Stream(Result<SendableRecordBatchStream>),
|
||||
}
|
||||
|
||||
/// A builder for configuring a [`Connection::create_table`] operation
|
||||
pub struct CreateTableBuilder<const HAS_DATA: bool> {
|
||||
parent: Arc<dyn Database>,
|
||||
embeddings: Vec<(EmbeddingDefinition, Arc<dyn EmbeddingFunction>)>,
|
||||
embedding_registry: Arc<dyn EmbeddingRegistry>,
|
||||
request: CreateTableRequest,
|
||||
// This is a bit clumsy but we defer errors until `execute` is called
|
||||
// to maintain backwards compatibility
|
||||
data: CreateTableBuilderInitialData,
|
||||
}
|
||||
|
||||
// Builder methods that only apply when we have initial data
|
||||
impl CreateTableBuilder<true> {
|
||||
fn new<T: IntoArrow>(
|
||||
parent: Arc<dyn Database>,
|
||||
name: String,
|
||||
data: T,
|
||||
embedding_registry: Arc<dyn EmbeddingRegistry>,
|
||||
) -> Self {
|
||||
let dummy_schema = Arc::new(arrow_schema::Schema::new(Vec::<Field>::default()));
|
||||
Self {
|
||||
parent,
|
||||
request: CreateTableRequest::new(
|
||||
name,
|
||||
CreateTableData::Empty(TableDefinition::new_from_schema(dummy_schema)),
|
||||
),
|
||||
embeddings: Vec::new(),
|
||||
embedding_registry,
|
||||
data: CreateTableBuilderInitialData::Iterator(data.into_arrow()),
|
||||
}
|
||||
}
|
||||
|
||||
fn new_streaming<T: IntoArrowStream>(
|
||||
parent: Arc<dyn Database>,
|
||||
name: String,
|
||||
data: T,
|
||||
embedding_registry: Arc<dyn EmbeddingRegistry>,
|
||||
) -> Self {
|
||||
let dummy_schema = Arc::new(arrow_schema::Schema::new(Vec::<Field>::default()));
|
||||
Self {
|
||||
parent,
|
||||
request: CreateTableRequest::new(
|
||||
name,
|
||||
CreateTableData::Empty(TableDefinition::new_from_schema(dummy_schema)),
|
||||
),
|
||||
embeddings: Vec::new(),
|
||||
embedding_registry,
|
||||
data: CreateTableBuilderInitialData::Stream(data.into_arrow()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Execute the create table operation
|
||||
pub async fn execute(self) -> Result<Table> {
|
||||
let embedding_registry = self.embedding_registry.clone();
|
||||
let parent = self.parent.clone();
|
||||
let request = self.into_request()?;
|
||||
Ok(Table::new_with_embedding_registry(
|
||||
parent.create_table(request).await?,
|
||||
parent,
|
||||
embedding_registry,
|
||||
))
|
||||
}
|
||||
|
||||
fn into_request(self) -> Result<CreateTableRequest> {
|
||||
if self.embeddings.is_empty() {
|
||||
match self.data {
|
||||
CreateTableBuilderInitialData::Iterator(maybe_iter) => {
|
||||
let data = maybe_iter?;
|
||||
Ok(CreateTableRequest {
|
||||
data: CreateTableData::Data(data),
|
||||
..self.request
|
||||
})
|
||||
}
|
||||
CreateTableBuilderInitialData::None => {
|
||||
unreachable!("No data provided for CreateTableBuilder<true>")
|
||||
}
|
||||
CreateTableBuilderInitialData::Stream(maybe_stream) => {
|
||||
let data = maybe_stream?;
|
||||
Ok(CreateTableRequest {
|
||||
data: CreateTableData::StreamingData(data),
|
||||
..self.request
|
||||
})
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let CreateTableBuilderInitialData::Iterator(maybe_iter) = self.data else {
|
||||
return Err(Error::NotSupported { message: "Creating a table with embeddings is currently not support when the input is streaming".to_string() });
|
||||
};
|
||||
let data = maybe_iter?;
|
||||
let data = Box::new(WithEmbeddings::new(data, self.embeddings));
|
||||
Ok(CreateTableRequest {
|
||||
data: CreateTableData::Data(data),
|
||||
..self.request
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Builder methods that only apply when we do not have initial data
|
||||
impl CreateTableBuilder<false> {
|
||||
fn new(
|
||||
parent: Arc<dyn Database>,
|
||||
name: String,
|
||||
schema: SchemaRef,
|
||||
embedding_registry: Arc<dyn EmbeddingRegistry>,
|
||||
) -> Self {
|
||||
let table_definition = TableDefinition::new_from_schema(schema);
|
||||
Self {
|
||||
parent,
|
||||
request: CreateTableRequest::new(name, CreateTableData::Empty(table_definition)),
|
||||
data: CreateTableBuilderInitialData::None,
|
||||
embeddings: Vec::default(),
|
||||
embedding_registry,
|
||||
}
|
||||
}
|
||||
|
||||
/// Execute the create table operation
|
||||
pub async fn execute(self) -> Result<Table> {
|
||||
let parent = self.parent.clone();
|
||||
let embedding_registry = self.embedding_registry.clone();
|
||||
let request = self.into_request()?;
|
||||
Ok(Table::new_with_embedding_registry(
|
||||
parent.create_table(request).await?,
|
||||
parent,
|
||||
embedding_registry,
|
||||
))
|
||||
}
|
||||
|
||||
fn into_request(self) -> Result<CreateTableRequest> {
|
||||
if self.embeddings.is_empty() {
|
||||
return Ok(self.request);
|
||||
}
|
||||
|
||||
let CreateTableData::Empty(table_def) = self.request.data else {
|
||||
unreachable!("CreateTableBuilder<false> should always have Empty data")
|
||||
};
|
||||
|
||||
let schema = table_def.schema.clone();
|
||||
let empty_batch = arrow_array::RecordBatch::new_empty(schema.clone());
|
||||
|
||||
let reader = Box::new(std::iter::once(Ok(empty_batch)).collect::<Vec<_>>());
|
||||
let reader = arrow_array::RecordBatchIterator::new(reader.into_iter(), schema);
|
||||
let with_embeddings = WithEmbeddings::new(reader, self.embeddings);
|
||||
let table_definition = with_embeddings.table_definition()?;
|
||||
|
||||
Ok(CreateTableRequest {
|
||||
data: CreateTableData::Empty(table_definition),
|
||||
..self.request
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<const HAS_DATA: bool> CreateTableBuilder<HAS_DATA> {
|
||||
/// Set the mode for creating the table
|
||||
///
|
||||
/// This controls what happens if a table with the given name already exists
|
||||
pub fn mode(mut self, mode: CreateTableMode) -> Self {
|
||||
self.request.mode = mode;
|
||||
self
|
||||
}
|
||||
|
||||
/// Apply the given write options when writing the initial data
|
||||
pub fn write_options(mut self, write_options: WriteOptions) -> Self {
|
||||
self.request.write_options = write_options;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set an option for the storage layer.
|
||||
///
|
||||
/// Options already set on the connection will be inherited by the table,
|
||||
/// but can be overridden here.
|
||||
///
|
||||
/// See available options at <https://lancedb.com/docs/storage/>
|
||||
pub fn storage_option(mut self, key: impl Into<String>, value: impl Into<String>) -> Self {
|
||||
let store_params = self
|
||||
.request
|
||||
.write_options
|
||||
.lance_write_params
|
||||
.get_or_insert(Default::default())
|
||||
.store_params
|
||||
.get_or_insert(Default::default());
|
||||
merge_storage_options(store_params, [(key.into(), value.into())]);
|
||||
self
|
||||
}
|
||||
|
||||
/// Set multiple options for the storage layer.
|
||||
///
|
||||
/// Options already set on the connection will be inherited by the table,
|
||||
/// but can be overridden here.
|
||||
///
|
||||
/// See available options at <https://lancedb.com/docs/storage/>
|
||||
pub fn storage_options(
|
||||
mut self,
|
||||
pairs: impl IntoIterator<Item = (impl Into<String>, impl Into<String>)>,
|
||||
) -> Self {
|
||||
let store_params = self
|
||||
.request
|
||||
.write_options
|
||||
.lance_write_params
|
||||
.get_or_insert(Default::default())
|
||||
.store_params
|
||||
.get_or_insert(Default::default());
|
||||
let updates = pairs
|
||||
.into_iter()
|
||||
.map(|(key, value)| (key.into(), value.into()));
|
||||
merge_storage_options(store_params, updates);
|
||||
self
|
||||
}
|
||||
|
||||
/// Add an embedding definition to the table.
|
||||
///
|
||||
/// The `embedding_name` must match the name of an embedding function that
|
||||
/// was previously registered with the connection's [`EmbeddingRegistry`].
|
||||
pub fn add_embedding(mut self, definition: EmbeddingDefinition) -> Result<Self> {
|
||||
// Early verification of the embedding name
|
||||
let embedding_func = self
|
||||
.embedding_registry
|
||||
.get(&definition.embedding_name)
|
||||
.ok_or_else(|| Error::EmbeddingFunctionNotFound {
|
||||
name: definition.embedding_name.clone(),
|
||||
reason: "No embedding function found in the connection's embedding_registry"
|
||||
.to_string(),
|
||||
})?;
|
||||
|
||||
self.embeddings.push((definition, embedding_func));
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Set whether to use V2 manifest paths for the table. (default: false)
|
||||
///
|
||||
/// These paths provide more efficient opening of tables with many
|
||||
/// versions on object stores.
|
||||
///
|
||||
/// <div class="warning">Turning this on will make the dataset unreadable
|
||||
/// for older versions of LanceDB (prior to 0.10.0).</div>
|
||||
///
|
||||
/// To migrate an existing dataset, instead use the
|
||||
/// [[NativeTable::migrate_manifest_paths_v2]].
|
||||
///
|
||||
/// This has no effect in LanceDB Cloud.
|
||||
#[deprecated(since = "0.15.1", note = "Use `database_options` instead")]
|
||||
pub fn enable_v2_manifest_paths(mut self, use_v2_manifest_paths: bool) -> Self {
|
||||
let store_params = self
|
||||
.request
|
||||
.write_options
|
||||
.lance_write_params
|
||||
.get_or_insert_with(Default::default)
|
||||
.store_params
|
||||
.get_or_insert_with(Default::default);
|
||||
let value = if use_v2_manifest_paths {
|
||||
"true".to_string()
|
||||
} else {
|
||||
"false".to_string()
|
||||
};
|
||||
merge_storage_options(
|
||||
store_params,
|
||||
[(OPT_NEW_TABLE_V2_MANIFEST_PATHS.to_string(), value)],
|
||||
);
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the data storage version.
|
||||
///
|
||||
/// The default is `LanceFileVersion::Stable`.
|
||||
#[deprecated(since = "0.15.1", note = "Use `database_options` instead")]
|
||||
pub fn data_storage_version(mut self, data_storage_version: LanceFileVersion) -> Self {
|
||||
let store_params = self
|
||||
.request
|
||||
.write_options
|
||||
.lance_write_params
|
||||
.get_or_insert_with(Default::default)
|
||||
.store_params
|
||||
.get_or_insert_with(Default::default);
|
||||
merge_storage_options(
|
||||
store_params,
|
||||
[(
|
||||
OPT_NEW_TABLE_STORAGE_VERSION.to_string(),
|
||||
data_storage_version.to_string(),
|
||||
)],
|
||||
);
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the namespace for the table
|
||||
pub fn namespace(mut self, namespace: Vec<String>) -> Self {
|
||||
self.request.namespace = namespace;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set a custom location for the table.
|
||||
///
|
||||
/// If not set, the database will derive a location from its URI and the table name.
|
||||
/// This is useful when integrating with namespace systems that manage table locations.
|
||||
pub fn location(mut self, location: impl Into<String>) -> Self {
|
||||
self.request.location = Some(location.into());
|
||||
self
|
||||
}
|
||||
|
||||
/// Set a storage options provider for automatic credential refresh.
|
||||
///
|
||||
/// This allows tables to automatically refresh cloud storage credentials
|
||||
/// when they expire, enabling long-running operations on remote storage.
|
||||
pub fn storage_options_provider(mut self, provider: Arc<dyn StorageOptionsProvider>) -> Self {
|
||||
let store_params = self
|
||||
.request
|
||||
.write_options
|
||||
.lance_write_params
|
||||
.get_or_insert(Default::default())
|
||||
.store_params
|
||||
.get_or_insert(Default::default());
|
||||
set_storage_options_provider(store_params, provider);
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct OpenTableBuilder {
|
||||
parent: Arc<dyn Database>,
|
||||
@@ -351,17 +684,35 @@ impl Connection {
|
||||
///
|
||||
/// * `name` - The name of the table
|
||||
/// * `initial_data` - The initial data to write to the table
|
||||
pub fn create_table<T: Scannable + 'static>(
|
||||
pub fn create_table<T: IntoArrow>(
|
||||
&self,
|
||||
name: impl Into<String>,
|
||||
initial_data: T,
|
||||
) -> CreateTableBuilder {
|
||||
let initial_data = Box::new(initial_data);
|
||||
CreateTableBuilder::new(
|
||||
) -> CreateTableBuilder<true> {
|
||||
CreateTableBuilder::<true>::new(
|
||||
self.internal.clone(),
|
||||
self.embedding_registry.clone(),
|
||||
name.into(),
|
||||
initial_data,
|
||||
self.embedding_registry.clone(),
|
||||
)
|
||||
}
|
||||
|
||||
/// Create a new table from a stream of data
|
||||
///
|
||||
/// # Parameters
|
||||
///
|
||||
/// * `name` - The name of the table
|
||||
/// * `initial_data` - The initial data to write to the table
|
||||
pub fn create_table_streaming<T: IntoArrowStream>(
|
||||
&self,
|
||||
name: impl Into<String>,
|
||||
initial_data: T,
|
||||
) -> CreateTableBuilder<true> {
|
||||
CreateTableBuilder::<true>::new_streaming(
|
||||
self.internal.clone(),
|
||||
name.into(),
|
||||
initial_data,
|
||||
self.embedding_registry.clone(),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -375,9 +726,13 @@ impl Connection {
|
||||
&self,
|
||||
name: impl Into<String>,
|
||||
schema: SchemaRef,
|
||||
) -> CreateTableBuilder {
|
||||
let empty_batch = RecordBatch::new_empty(schema);
|
||||
self.create_table(name, empty_batch)
|
||||
) -> CreateTableBuilder<false> {
|
||||
CreateTableBuilder::<false>::new(
|
||||
self.internal.clone(),
|
||||
name.into(),
|
||||
schema,
|
||||
self.embedding_registry.clone(),
|
||||
)
|
||||
}
|
||||
|
||||
/// Open an existing table in the database
|
||||
@@ -994,11 +1349,20 @@ mod test_utils {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::database::listing::{ListingDatabaseOptions, NewTableConfig};
|
||||
use crate::query::QueryBase;
|
||||
use crate::query::{ExecutableQuery, QueryExecutionOptions};
|
||||
use crate::test_utils::connection::new_test_connection;
|
||||
use arrow::compute::concat_batches;
|
||||
use arrow_array::RecordBatchReader;
|
||||
use arrow_schema::{DataType, Field, Schema};
|
||||
use datafusion_physical_plan::stream::RecordBatchStreamAdapter;
|
||||
use futures::{stream, TryStreamExt};
|
||||
use lance_core::error::{ArrowResult, DataFusionResult};
|
||||
use lance_testing::datagen::{BatchGenerator, IncrementingInt32};
|
||||
use tempfile::tempdir;
|
||||
|
||||
use crate::test_utils::connection::new_test_connection;
|
||||
use crate::arrow::SimpleRecordBatchStream;
|
||||
|
||||
use super::*;
|
||||
|
||||
@@ -1114,6 +1478,139 @@ mod tests {
|
||||
assert_eq!(tables, vec!["table1".to_owned()]);
|
||||
}
|
||||
|
||||
fn make_data() -> Box<dyn RecordBatchReader + Send + 'static> {
|
||||
let id = Box::new(IncrementingInt32::new().named("id".to_string()));
|
||||
Box::new(BatchGenerator::new().col(id).batches(10, 2000))
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_create_table_v2() {
|
||||
let tmp_dir = tempdir().unwrap();
|
||||
let uri = tmp_dir.path().to_str().unwrap();
|
||||
let db = connect(uri)
|
||||
.database_options(&ListingDatabaseOptions {
|
||||
new_table_config: NewTableConfig {
|
||||
data_storage_version: Some(LanceFileVersion::Legacy),
|
||||
..Default::default()
|
||||
},
|
||||
..Default::default()
|
||||
})
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let tbl = db
|
||||
.create_table("v1_test", make_data())
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// In v1 the row group size will trump max_batch_length
|
||||
let batches = tbl
|
||||
.query()
|
||||
.limit(20000)
|
||||
.execute_with_options(QueryExecutionOptions {
|
||||
max_batch_length: 50000,
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
.unwrap()
|
||||
.try_collect::<Vec<_>>()
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(batches.len(), 20);
|
||||
|
||||
let db = connect(uri)
|
||||
.database_options(&ListingDatabaseOptions {
|
||||
new_table_config: NewTableConfig {
|
||||
data_storage_version: Some(LanceFileVersion::Stable),
|
||||
..Default::default()
|
||||
},
|
||||
..Default::default()
|
||||
})
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let tbl = db
|
||||
.create_table("v2_test", make_data())
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// In v2 the page size is much bigger than 50k so we should get a single batch
|
||||
let batches = tbl
|
||||
.query()
|
||||
.execute_with_options(QueryExecutionOptions {
|
||||
max_batch_length: 50000,
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
.unwrap()
|
||||
.try_collect::<Vec<_>>()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(batches.len(), 1);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_create_table_streaming() {
|
||||
let tmp_dir = tempdir().unwrap();
|
||||
|
||||
let uri = tmp_dir.path().to_str().unwrap();
|
||||
let db = connect(uri).execute().await.unwrap();
|
||||
|
||||
let batches = make_data().collect::<ArrowResult<Vec<_>>>().unwrap();
|
||||
|
||||
let schema = batches.first().unwrap().schema();
|
||||
let one_batch = concat_batches(&schema, batches.iter()).unwrap();
|
||||
|
||||
let ldb_stream = stream::iter(batches.clone().into_iter().map(Result::Ok));
|
||||
let ldb_stream: SendableRecordBatchStream =
|
||||
Box::pin(SimpleRecordBatchStream::new(ldb_stream, schema.clone()));
|
||||
|
||||
let tbl1 = db
|
||||
.create_table_streaming("one", ldb_stream)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let df_stream = stream::iter(batches.into_iter().map(DataFusionResult::Ok));
|
||||
let df_stream: datafusion_physical_plan::SendableRecordBatchStream =
|
||||
Box::pin(RecordBatchStreamAdapter::new(schema.clone(), df_stream));
|
||||
|
||||
let tbl2 = db
|
||||
.create_table_streaming("two", df_stream)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let tbl1_data = tbl1
|
||||
.query()
|
||||
.execute()
|
||||
.await
|
||||
.unwrap()
|
||||
.try_collect::<Vec<_>>()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let tbl1_data = concat_batches(&schema, tbl1_data.iter()).unwrap();
|
||||
assert_eq!(tbl1_data, one_batch);
|
||||
|
||||
let tbl2_data = tbl2
|
||||
.query()
|
||||
.execute()
|
||||
.await
|
||||
.unwrap()
|
||||
.try_collect::<Vec<_>>()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let tbl2_data = concat_batches(&schema, tbl2_data.iter()).unwrap();
|
||||
assert_eq!(tbl2_data, one_batch);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn drop_table() {
|
||||
let tc = new_test_connection().await.unwrap();
|
||||
@@ -1143,6 +1640,41 @@ mod tests {
|
||||
assert_eq!(tables.len(), 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_create_table_already_exists() {
|
||||
let tmp_dir = tempdir().unwrap();
|
||||
let uri = tmp_dir.path().to_str().unwrap();
|
||||
let db = connect(uri).execute().await.unwrap();
|
||||
let schema = Arc::new(Schema::new(vec![Field::new("x", DataType::Int32, false)]));
|
||||
db.create_empty_table("test", schema.clone())
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
// TODO: None of the open table options are "inspectable" right now but once one is we
|
||||
// should assert we are passing these options in correctly
|
||||
db.create_empty_table("test", schema)
|
||||
.mode(CreateTableMode::exist_ok(|mut req| {
|
||||
req.index_cache_size = Some(16);
|
||||
req
|
||||
}))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
let other_schema = Arc::new(Schema::new(vec![Field::new("y", DataType::Int32, false)]));
|
||||
assert!(db
|
||||
.create_empty_table("test", other_schema.clone())
|
||||
.execute()
|
||||
.await
|
||||
.is_err());
|
||||
let overwritten = db
|
||||
.create_empty_table("test", other_schema.clone())
|
||||
.mode(CreateTableMode::Overwrite)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(other_schema, overwritten.schema().await.unwrap());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_clone_table() {
|
||||
let tmp_dir = tempdir().unwrap();
|
||||
@@ -1153,8 +1685,7 @@ mod tests {
|
||||
let mut batch_gen = BatchGenerator::new()
|
||||
.col(Box::new(IncrementingInt32::new().named("id")))
|
||||
.col(Box::new(IncrementingInt32::new().named("value")));
|
||||
let reader: Box<dyn arrow_array::RecordBatchReader + Send> =
|
||||
Box::new(batch_gen.batches(5, 100));
|
||||
let reader = batch_gen.batches(5, 100);
|
||||
|
||||
let source_table = db
|
||||
.create_table("source_table", reader)
|
||||
@@ -1189,4 +1720,128 @@ mod tests {
|
||||
let cloned_count = cloned_table.count_rows(None).await.unwrap();
|
||||
assert_eq!(source_count, cloned_count);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_create_empty_table_with_embeddings() {
|
||||
use crate::embeddings::{EmbeddingDefinition, EmbeddingFunction};
|
||||
use arrow_array::{
|
||||
Array, FixedSizeListArray, Float32Array, RecordBatch, RecordBatchIterator, StringArray,
|
||||
};
|
||||
use std::borrow::Cow;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct MockEmbedding {
|
||||
dim: usize,
|
||||
}
|
||||
|
||||
impl EmbeddingFunction for MockEmbedding {
|
||||
fn name(&self) -> &str {
|
||||
"test_embedding"
|
||||
}
|
||||
|
||||
fn source_type(&self) -> Result<Cow<'_, DataType>> {
|
||||
Ok(Cow::Owned(DataType::Utf8))
|
||||
}
|
||||
|
||||
fn dest_type(&self) -> Result<Cow<'_, DataType>> {
|
||||
Ok(Cow::Owned(DataType::new_fixed_size_list(
|
||||
DataType::Float32,
|
||||
self.dim as i32,
|
||||
true,
|
||||
)))
|
||||
}
|
||||
|
||||
fn compute_source_embeddings(&self, source: Arc<dyn Array>) -> Result<Arc<dyn Array>> {
|
||||
let len = source.len();
|
||||
let values = vec![1.0f32; len * self.dim];
|
||||
let values = Arc::new(Float32Array::from(values));
|
||||
let field = Arc::new(Field::new("item", DataType::Float32, true));
|
||||
Ok(Arc::new(FixedSizeListArray::new(
|
||||
field,
|
||||
self.dim as i32,
|
||||
values,
|
||||
None,
|
||||
)))
|
||||
}
|
||||
|
||||
fn compute_query_embeddings(&self, _input: Arc<dyn Array>) -> Result<Arc<dyn Array>> {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
let tmp_dir = tempdir().unwrap();
|
||||
let uri = tmp_dir.path().to_str().unwrap();
|
||||
let db = connect(uri).execute().await.unwrap();
|
||||
|
||||
let embed_func = Arc::new(MockEmbedding { dim: 128 });
|
||||
db.embedding_registry()
|
||||
.register("test_embedding", embed_func.clone())
|
||||
.unwrap();
|
||||
|
||||
let schema = Arc::new(Schema::new(vec![Field::new("name", DataType::Utf8, true)]));
|
||||
let ed = EmbeddingDefinition {
|
||||
source_column: "name".to_owned(),
|
||||
dest_column: Some("name_embedding".to_owned()),
|
||||
embedding_name: "test_embedding".to_owned(),
|
||||
};
|
||||
|
||||
let table = db
|
||||
.create_empty_table("test", schema)
|
||||
.mode(CreateTableMode::Overwrite)
|
||||
.add_embedding(ed)
|
||||
.unwrap()
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let table_schema = table.schema().await.unwrap();
|
||||
assert!(table_schema.column_with_name("name").is_some());
|
||||
assert!(table_schema.column_with_name("name_embedding").is_some());
|
||||
|
||||
let embedding_field = table_schema.field_with_name("name_embedding").unwrap();
|
||||
assert_eq!(
|
||||
embedding_field.data_type(),
|
||||
&DataType::new_fixed_size_list(DataType::Float32, 128, true)
|
||||
);
|
||||
|
||||
let input_schema = Arc::new(Schema::new(vec![Field::new("name", DataType::Utf8, true)]));
|
||||
let input_batch = RecordBatch::try_new(
|
||||
input_schema.clone(),
|
||||
vec![Arc::new(StringArray::from(vec![
|
||||
Some("Alice"),
|
||||
Some("Bob"),
|
||||
Some("Charlie"),
|
||||
]))],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let input_reader = Box::new(RecordBatchIterator::new(
|
||||
vec![Ok(input_batch)].into_iter(),
|
||||
input_schema,
|
||||
));
|
||||
|
||||
table.add(input_reader).execute().await.unwrap();
|
||||
|
||||
let results = table
|
||||
.query()
|
||||
.execute()
|
||||
.await
|
||||
.unwrap()
|
||||
.try_collect::<Vec<_>>()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(results.len(), 1);
|
||||
let batch = &results[0];
|
||||
assert_eq!(batch.num_rows(), 3);
|
||||
assert!(batch.column_by_name("name_embedding").is_some());
|
||||
|
||||
let embedding_col = batch
|
||||
.column_by_name("name_embedding")
|
||||
.unwrap()
|
||||
.as_any()
|
||||
.downcast_ref::<FixedSizeListArray>()
|
||||
.unwrap();
|
||||
assert_eq!(embedding_col.len(), 3);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,612 +0,0 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use lance_io::object_store::StorageOptionsProvider;
|
||||
|
||||
use crate::{
|
||||
connection::{merge_storage_options, set_storage_options_provider},
|
||||
data::scannable::{Scannable, WithEmbeddingsScannable},
|
||||
database::{CreateTableMode, CreateTableRequest, Database},
|
||||
embeddings::{EmbeddingDefinition, EmbeddingFunction, EmbeddingRegistry},
|
||||
table::WriteOptions,
|
||||
Error, Result, Table,
|
||||
};
|
||||
|
||||
pub struct CreateTableBuilder {
|
||||
parent: Arc<dyn Database>,
|
||||
embeddings: Vec<(EmbeddingDefinition, Arc<dyn EmbeddingFunction>)>,
|
||||
embedding_registry: Arc<dyn EmbeddingRegistry>,
|
||||
request: CreateTableRequest,
|
||||
}
|
||||
|
||||
impl CreateTableBuilder {
|
||||
pub(super) fn new(
|
||||
parent: Arc<dyn Database>,
|
||||
embedding_registry: Arc<dyn EmbeddingRegistry>,
|
||||
name: String,
|
||||
data: Box<dyn Scannable>,
|
||||
) -> Self {
|
||||
Self {
|
||||
parent,
|
||||
embeddings: Vec::new(),
|
||||
embedding_registry,
|
||||
request: CreateTableRequest::new(name, data),
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the mode for creating the table
|
||||
///
|
||||
/// This controls what happens if a table with the given name already exists
|
||||
pub fn mode(mut self, mode: CreateTableMode) -> Self {
|
||||
self.request.mode = mode;
|
||||
self
|
||||
}
|
||||
|
||||
/// Apply the given write options when writing the initial data
|
||||
pub fn write_options(mut self, write_options: WriteOptions) -> Self {
|
||||
self.request.write_options = write_options;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set an option for the storage layer.
|
||||
///
|
||||
/// Options already set on the connection will be inherited by the table,
|
||||
/// but can be overridden here.
|
||||
///
|
||||
/// See available options at <https://lancedb.com/docs/storage/>
|
||||
pub fn storage_option(mut self, key: impl Into<String>, value: impl Into<String>) -> Self {
|
||||
let store_params = self
|
||||
.request
|
||||
.write_options
|
||||
.lance_write_params
|
||||
.get_or_insert(Default::default())
|
||||
.store_params
|
||||
.get_or_insert(Default::default());
|
||||
merge_storage_options(store_params, [(key.into(), value.into())]);
|
||||
self
|
||||
}
|
||||
|
||||
/// Set multiple options for the storage layer.
|
||||
///
|
||||
/// Options already set on the connection will be inherited by the table,
|
||||
/// but can be overridden here.
|
||||
///
|
||||
/// See available options at <https://lancedb.com/docs/storage/>
|
||||
pub fn storage_options(
|
||||
mut self,
|
||||
pairs: impl IntoIterator<Item = (impl Into<String>, impl Into<String>)>,
|
||||
) -> Self {
|
||||
let store_params = self
|
||||
.request
|
||||
.write_options
|
||||
.lance_write_params
|
||||
.get_or_insert(Default::default())
|
||||
.store_params
|
||||
.get_or_insert(Default::default());
|
||||
let updates = pairs
|
||||
.into_iter()
|
||||
.map(|(key, value)| (key.into(), value.into()));
|
||||
merge_storage_options(store_params, updates);
|
||||
self
|
||||
}
|
||||
|
||||
/// Add an embedding definition to the table.
|
||||
///
|
||||
/// The `embedding_name` must match the name of an embedding function that
|
||||
/// was previously registered with the connection's [`EmbeddingRegistry`].
|
||||
pub fn add_embedding(mut self, definition: EmbeddingDefinition) -> Result<Self> {
|
||||
// Early verification of the embedding name
|
||||
let embedding_func = self
|
||||
.embedding_registry
|
||||
.get(&definition.embedding_name)
|
||||
.ok_or_else(|| Error::EmbeddingFunctionNotFound {
|
||||
name: definition.embedding_name.clone(),
|
||||
reason: "No embedding function found in the connection's embedding_registry"
|
||||
.to_string(),
|
||||
})?;
|
||||
|
||||
self.embeddings.push((definition, embedding_func));
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Set the namespace for the table
|
||||
pub fn namespace(mut self, namespace: Vec<String>) -> Self {
|
||||
self.request.namespace = namespace;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set a custom location for the table.
|
||||
///
|
||||
/// If not set, the database will derive a location from its URI and the table name.
|
||||
/// This is useful when integrating with namespace systems that manage table locations.
|
||||
pub fn location(mut self, location: impl Into<String>) -> Self {
|
||||
self.request.location = Some(location.into());
|
||||
self
|
||||
}
|
||||
|
||||
/// Set a storage options provider for automatic credential refresh.
|
||||
///
|
||||
/// This allows tables to automatically refresh cloud storage credentials
|
||||
/// when they expire, enabling long-running operations on remote storage.
|
||||
pub fn storage_options_provider(mut self, provider: Arc<dyn StorageOptionsProvider>) -> Self {
|
||||
let store_params = self
|
||||
.request
|
||||
.write_options
|
||||
.lance_write_params
|
||||
.get_or_insert(Default::default())
|
||||
.store_params
|
||||
.get_or_insert(Default::default());
|
||||
set_storage_options_provider(store_params, provider);
|
||||
self
|
||||
}
|
||||
|
||||
/// Execute the create table operation
|
||||
pub async fn execute(mut self) -> Result<Table> {
|
||||
let embedding_registry = self.embedding_registry.clone();
|
||||
let parent = self.parent.clone();
|
||||
|
||||
// If embeddings were configured via add_embedding(), wrap the data
|
||||
if !self.embeddings.is_empty() {
|
||||
let wrapped_data: Box<dyn Scannable> = Box::new(WithEmbeddingsScannable::try_new(
|
||||
self.request.data,
|
||||
self.embeddings,
|
||||
)?);
|
||||
self.request.data = wrapped_data;
|
||||
}
|
||||
|
||||
Ok(Table::new_with_embedding_registry(
|
||||
parent.create_table(self.request).await?,
|
||||
parent,
|
||||
embedding_registry,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use arrow_array::{
|
||||
record_batch, Array, FixedSizeListArray, Float32Array, RecordBatch, RecordBatchIterator,
|
||||
};
|
||||
use arrow_schema::{ArrowError, DataType, Field, Schema};
|
||||
use futures::TryStreamExt;
|
||||
use lance_file::version::LanceFileVersion;
|
||||
use tempfile::tempdir;
|
||||
|
||||
use crate::{
|
||||
arrow::{SendableRecordBatchStream, SimpleRecordBatchStream},
|
||||
connect,
|
||||
database::listing::{ListingDatabaseOptions, NewTableConfig},
|
||||
embeddings::{EmbeddingDefinition, EmbeddingFunction, MemoryRegistry},
|
||||
query::{ExecutableQuery, QueryBase, Select},
|
||||
test_utils::embeddings::MockEmbed,
|
||||
};
|
||||
use std::borrow::Cow;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn create_empty_table() {
|
||||
let db = connect("memory://").execute().await.unwrap();
|
||||
let schema = Arc::new(Schema::new(vec![
|
||||
Field::new("id", DataType::Int64, false),
|
||||
Field::new("value", DataType::Float64, false),
|
||||
]));
|
||||
db.create_empty_table("name", schema.clone())
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
let table = db.open_table("name").execute().await.unwrap();
|
||||
assert_eq!(table.schema().await.unwrap(), schema);
|
||||
assert_eq!(table.count_rows(None).await.unwrap(), 0);
|
||||
}
|
||||
|
||||
async fn test_create_table_with_data<T>(data: T)
|
||||
where
|
||||
T: Scannable + 'static,
|
||||
{
|
||||
let db = connect("memory://").execute().await.unwrap();
|
||||
let schema = data.schema();
|
||||
db.create_table("data_table", data).execute().await.unwrap();
|
||||
let table = db.open_table("data_table").execute().await.unwrap();
|
||||
assert_eq!(table.count_rows(None).await.unwrap(), 3);
|
||||
assert_eq!(table.schema().await.unwrap(), schema);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn create_table_with_batch() {
|
||||
let batch = record_batch!(("id", Int64, [1, 2, 3])).unwrap();
|
||||
test_create_table_with_data(batch).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_create_table_with_vec_batch() {
|
||||
let data = vec![
|
||||
record_batch!(("id", Int64, [1, 2])).unwrap(),
|
||||
record_batch!(("id", Int64, [3])).unwrap(),
|
||||
];
|
||||
test_create_table_with_data(data).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_create_table_with_record_batch_reader() {
|
||||
let data = vec![
|
||||
record_batch!(("id", Int64, [1, 2])).unwrap(),
|
||||
record_batch!(("id", Int64, [3])).unwrap(),
|
||||
];
|
||||
let schema = data[0].schema();
|
||||
let reader: Box<dyn arrow_array::RecordBatchReader + Send> = Box::new(
|
||||
RecordBatchIterator::new(data.into_iter().map(Ok), schema.clone()),
|
||||
);
|
||||
test_create_table_with_data(reader).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_create_table_with_stream() {
|
||||
let data = vec![
|
||||
record_batch!(("id", Int64, [1, 2])).unwrap(),
|
||||
record_batch!(("id", Int64, [3])).unwrap(),
|
||||
];
|
||||
let schema = data[0].schema();
|
||||
let inner = futures::stream::iter(data.into_iter().map(Ok));
|
||||
let stream: SendableRecordBatchStream = Box::pin(SimpleRecordBatchStream {
|
||||
schema,
|
||||
stream: inner,
|
||||
});
|
||||
test_create_table_with_data(stream).await;
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct MyError;
|
||||
|
||||
impl std::fmt::Display for MyError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "MyError occurred")
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for MyError {}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_create_preserves_reader_error() {
|
||||
let first_batch = record_batch!(("id", Int64, [1, 2])).unwrap();
|
||||
let schema = first_batch.schema();
|
||||
let iterator = vec![
|
||||
Ok(first_batch),
|
||||
Err(ArrowError::ExternalError(Box::new(MyError))),
|
||||
];
|
||||
let reader: Box<dyn arrow_array::RecordBatchReader + Send> = Box::new(
|
||||
RecordBatchIterator::new(iterator.into_iter(), schema.clone()),
|
||||
);
|
||||
|
||||
let db = connect("memory://").execute().await.unwrap();
|
||||
let result = db.create_table("failing_table", reader).execute().await;
|
||||
|
||||
assert!(result.is_err());
|
||||
// TODO: when we upgrade to Lance 2.0.0, this should pass
|
||||
// assert!(matches!(result, Err(Error::External { source})
|
||||
// if source.downcast_ref::<MyError>().is_some()
|
||||
// ));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_create_preserves_stream_error() {
|
||||
let first_batch = record_batch!(("id", Int64, [1, 2])).unwrap();
|
||||
let schema = first_batch.schema();
|
||||
let iterator = vec![
|
||||
Ok(first_batch),
|
||||
Err(Error::External {
|
||||
source: Box::new(MyError),
|
||||
}),
|
||||
];
|
||||
let stream = futures::stream::iter(iterator);
|
||||
let stream: SendableRecordBatchStream = Box::pin(SimpleRecordBatchStream {
|
||||
schema: schema.clone(),
|
||||
stream,
|
||||
});
|
||||
|
||||
let db = connect("memory://").execute().await.unwrap();
|
||||
let result = db
|
||||
.create_table("failing_stream_table", stream)
|
||||
.execute()
|
||||
.await;
|
||||
|
||||
assert!(result.is_err());
|
||||
// TODO: when we upgrade to Lance 2.0.0, this should pass
|
||||
// assert!(matches!(result, Err(Error::External { source})
|
||||
// if source.downcast_ref::<MyError>().is_some()
|
||||
// ));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[allow(deprecated)]
|
||||
async fn test_create_table_with_storage_options() {
|
||||
let batch = record_batch!(("id", Int64, [1, 2, 3])).unwrap();
|
||||
let db = connect("memory://").execute().await.unwrap();
|
||||
|
||||
let table = db
|
||||
.create_table("options_table", batch)
|
||||
.storage_option("timeout", "30s")
|
||||
.storage_options([("retry_count", "3")])
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let final_options = table.storage_options().await.unwrap();
|
||||
assert_eq!(final_options.get("timeout"), Some(&"30s".to_string()));
|
||||
assert_eq!(final_options.get("retry_count"), Some(&"3".to_string()));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_create_table_unregistered_embedding() {
|
||||
let db = connect("memory://").execute().await.unwrap();
|
||||
let batch = record_batch!(("text", Utf8, ["hello", "world"])).unwrap();
|
||||
|
||||
// Try to add an embedding that doesn't exist in the registry
|
||||
let result = db
|
||||
.create_table("embed_table", batch)
|
||||
.add_embedding(EmbeddingDefinition::new(
|
||||
"text",
|
||||
"nonexistent_embedding_function",
|
||||
None::<&str>,
|
||||
));
|
||||
|
||||
match result {
|
||||
Err(Error::EmbeddingFunctionNotFound { name, .. }) => {
|
||||
assert_eq!(name, "nonexistent_embedding_function");
|
||||
}
|
||||
Err(other) => panic!("Expected EmbeddingFunctionNotFound error, got: {:?}", other),
|
||||
Ok(_) => panic!("Expected error, but got Ok"),
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_create_table_already_exists() {
|
||||
let tmp_dir = tempdir().unwrap();
|
||||
let uri = tmp_dir.path().to_str().unwrap();
|
||||
let db = connect(uri).execute().await.unwrap();
|
||||
let schema = Arc::new(Schema::new(vec![Field::new("x", DataType::Int32, false)]));
|
||||
db.create_empty_table("test", schema.clone())
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
db.create_empty_table("test", schema)
|
||||
.mode(CreateTableMode::exist_ok(|mut req| {
|
||||
req.index_cache_size = Some(16);
|
||||
req
|
||||
}))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
let other_schema = Arc::new(Schema::new(vec![Field::new("y", DataType::Int32, false)]));
|
||||
assert!(db
|
||||
.create_empty_table("test", other_schema.clone())
|
||||
.execute()
|
||||
.await
|
||||
.is_err()); // TODO: assert what this error is
|
||||
let overwritten = db
|
||||
.create_empty_table("test", other_schema.clone())
|
||||
.mode(CreateTableMode::Overwrite)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(other_schema, overwritten.schema().await.unwrap());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[rstest::rstest]
|
||||
#[case(LanceFileVersion::Legacy)]
|
||||
#[case(LanceFileVersion::Stable)]
|
||||
async fn test_create_table_with_storage_version(
|
||||
#[case] data_storage_version: LanceFileVersion,
|
||||
) {
|
||||
let db = connect("memory://")
|
||||
.database_options(&ListingDatabaseOptions {
|
||||
new_table_config: NewTableConfig {
|
||||
data_storage_version: Some(data_storage_version),
|
||||
..Default::default()
|
||||
},
|
||||
..Default::default()
|
||||
})
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let batch = record_batch!(("id", Int64, [1, 2, 3])).unwrap();
|
||||
let table = db
|
||||
.create_table("legacy_table", batch)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let native_table = table.as_native().unwrap();
|
||||
let storage_format = native_table
|
||||
.manifest()
|
||||
.await
|
||||
.unwrap()
|
||||
.data_storage_format
|
||||
.lance_file_version()
|
||||
.unwrap();
|
||||
// Compare resolved versions since Stable/Next are aliases that resolve at storage time
|
||||
assert_eq!(storage_format.resolve(), data_storage_version.resolve());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_create_table_with_embedding() {
|
||||
// Register the mock embedding function
|
||||
let registry = Arc::new(MemoryRegistry::new());
|
||||
let mock_embedding: Arc<dyn EmbeddingFunction> = Arc::new(MockEmbed::new("mock", 4));
|
||||
registry.register("mock", mock_embedding).unwrap();
|
||||
|
||||
// Connect with the custom registry
|
||||
let conn = connect("memory://")
|
||||
.embedding_registry(registry)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Create data without the embedding column
|
||||
let batch = record_batch!(("text", Utf8, ["hello", "world", "test"])).unwrap();
|
||||
|
||||
// Create table with add_embedding - embeddings should be computed automatically
|
||||
let table = conn
|
||||
.create_table("embed_test", batch)
|
||||
.add_embedding(EmbeddingDefinition::new(
|
||||
"text",
|
||||
"mock",
|
||||
Some("text_embedding"),
|
||||
))
|
||||
.unwrap()
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Verify row count
|
||||
assert_eq!(table.count_rows(None).await.unwrap(), 3);
|
||||
|
||||
// Verify the schema includes the embedding column
|
||||
let result_schema = table.schema().await.unwrap();
|
||||
assert_eq!(result_schema.fields().len(), 2);
|
||||
assert_eq!(result_schema.field(0).name(), "text");
|
||||
assert_eq!(result_schema.field(1).name(), "text_embedding");
|
||||
|
||||
// Verify the embedding column has the correct type
|
||||
assert!(matches!(
|
||||
result_schema.field(1).data_type(),
|
||||
DataType::FixedSizeList(_, 4)
|
||||
));
|
||||
|
||||
// Query to verify the embeddings were computed
|
||||
let results: Vec<RecordBatch> = table
|
||||
.query()
|
||||
.select(Select::columns(&["text", "text_embedding"]))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap()
|
||||
.try_collect()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let total_rows: usize = results.iter().map(|b| b.num_rows()).sum();
|
||||
assert_eq!(total_rows, 3);
|
||||
|
||||
// Check that all rows have embedding values (not null)
|
||||
for batch in &results {
|
||||
let embedding_col = batch.column(1);
|
||||
assert_eq!(embedding_col.null_count(), 0);
|
||||
assert_eq!(embedding_col.len(), batch.num_rows());
|
||||
}
|
||||
|
||||
// Verify the schema metadata contains the column definitions
|
||||
assert!(
|
||||
result_schema
|
||||
.metadata
|
||||
.contains_key("lancedb::column_definitions"),
|
||||
"Schema metadata should contain column definitions"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_create_empty_table_with_embeddings() {
|
||||
#[derive(Debug, Clone)]
|
||||
struct MockEmbedding {
|
||||
dim: usize,
|
||||
}
|
||||
|
||||
impl EmbeddingFunction for MockEmbedding {
|
||||
fn name(&self) -> &str {
|
||||
"test_embedding"
|
||||
}
|
||||
|
||||
fn source_type(&self) -> Result<Cow<'_, DataType>> {
|
||||
Ok(Cow::Owned(DataType::Utf8))
|
||||
}
|
||||
|
||||
fn dest_type(&self) -> Result<Cow<'_, DataType>> {
|
||||
Ok(Cow::Owned(DataType::new_fixed_size_list(
|
||||
DataType::Float32,
|
||||
self.dim as i32,
|
||||
true,
|
||||
)))
|
||||
}
|
||||
|
||||
fn compute_source_embeddings(&self, source: Arc<dyn Array>) -> Result<Arc<dyn Array>> {
|
||||
let len = source.len();
|
||||
let values = vec![1.0f32; len * self.dim];
|
||||
let values = Arc::new(Float32Array::from(values));
|
||||
let field = Arc::new(Field::new("item", DataType::Float32, true));
|
||||
Ok(Arc::new(FixedSizeListArray::new(
|
||||
field,
|
||||
self.dim as i32,
|
||||
values,
|
||||
None,
|
||||
)))
|
||||
}
|
||||
|
||||
fn compute_query_embeddings(&self, _input: Arc<dyn Array>) -> Result<Arc<dyn Array>> {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
let tmp_dir = tempdir().unwrap();
|
||||
let uri = tmp_dir.path().to_str().unwrap();
|
||||
let db = connect(uri).execute().await.unwrap();
|
||||
|
||||
let embed_func = Arc::new(MockEmbedding { dim: 128 });
|
||||
db.embedding_registry()
|
||||
.register("test_embedding", embed_func.clone())
|
||||
.unwrap();
|
||||
|
||||
let schema = Arc::new(Schema::new(vec![Field::new("name", DataType::Utf8, true)]));
|
||||
let ed = EmbeddingDefinition {
|
||||
source_column: "name".to_owned(),
|
||||
dest_column: Some("name_embedding".to_owned()),
|
||||
embedding_name: "test_embedding".to_owned(),
|
||||
};
|
||||
|
||||
let table = db
|
||||
.create_empty_table("test", schema)
|
||||
.mode(CreateTableMode::Overwrite)
|
||||
.add_embedding(ed)
|
||||
.unwrap()
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let table_schema = table.schema().await.unwrap();
|
||||
assert!(table_schema.column_with_name("name").is_some());
|
||||
assert!(table_schema.column_with_name("name_embedding").is_some());
|
||||
|
||||
let embedding_field = table_schema.field_with_name("name_embedding").unwrap();
|
||||
assert_eq!(
|
||||
embedding_field.data_type(),
|
||||
&DataType::new_fixed_size_list(DataType::Float32, 128, true)
|
||||
);
|
||||
|
||||
let input_batch = record_batch!(("name", Utf8, ["Alice", "Bob", "Charlie"])).unwrap();
|
||||
table.add(input_batch).execute().await.unwrap();
|
||||
|
||||
let results = table
|
||||
.query()
|
||||
.execute()
|
||||
.await
|
||||
.unwrap()
|
||||
.try_collect::<Vec<_>>()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(results.len(), 1);
|
||||
let batch = &results[0];
|
||||
assert_eq!(batch.num_rows(), 3);
|
||||
assert!(batch.column_by_name("name_embedding").is_some());
|
||||
|
||||
let embedding_col = batch
|
||||
.column_by_name("name_embedding")
|
||||
.unwrap()
|
||||
.as_any()
|
||||
.downcast_ref::<FixedSizeListArray>()
|
||||
.unwrap();
|
||||
assert_eq!(embedding_col.len(), 3);
|
||||
}
|
||||
}
|
||||
@@ -5,4 +5,3 @@
|
||||
|
||||
pub mod inspect;
|
||||
pub mod sanitize;
|
||||
pub mod scannable;
|
||||
|
||||
@@ -1,580 +0,0 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
//! Data source abstraction for LanceDB.
|
||||
//!
|
||||
//! This module provides a [`Scannable`] trait that allows input data sources to express
|
||||
//! capabilities (row count, rescannability) so the insert pipeline can make
|
||||
//! better decisions about write parallelism and retry strategies.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow_array::{RecordBatch, RecordBatchIterator, RecordBatchReader};
|
||||
use arrow_schema::{ArrowError, SchemaRef};
|
||||
use async_trait::async_trait;
|
||||
use futures::stream::once;
|
||||
use futures::StreamExt;
|
||||
use lance_datafusion::utils::StreamingWriteSource;
|
||||
|
||||
use crate::arrow::{
|
||||
SendableRecordBatchStream, SendableRecordBatchStreamExt, SimpleRecordBatchStream,
|
||||
};
|
||||
use crate::embeddings::{
|
||||
compute_embeddings_for_batch, compute_output_schema, EmbeddingDefinition, EmbeddingFunction,
|
||||
EmbeddingRegistry,
|
||||
};
|
||||
use crate::table::{ColumnDefinition, ColumnKind, TableDefinition};
|
||||
use crate::{Error, Result};
|
||||
|
||||
pub trait Scannable: Send {
|
||||
/// Returns the schema of the data.
|
||||
fn schema(&self) -> SchemaRef;
|
||||
|
||||
/// Read data as a stream of record batches.
|
||||
///
|
||||
/// For rescannable sources (in-memory data like RecordBatch, Vec<RecordBatch>),
|
||||
/// this can be called multiple times and returns cloned data each time.
|
||||
///
|
||||
/// For non-rescannable sources (streams, readers), this can only be called once.
|
||||
/// Calling it a second time returns a stream whose first item is an error.
|
||||
fn scan_as_stream(&mut self) -> SendableRecordBatchStream;
|
||||
|
||||
/// Optional hint about the number of rows.
|
||||
///
|
||||
/// When available, this allows the pipeline to estimate total data size
|
||||
/// and choose appropriate partitioning.
|
||||
fn num_rows(&self) -> Option<usize> {
|
||||
None
|
||||
}
|
||||
|
||||
/// Whether the source can be re-read from the beginning.
|
||||
///
|
||||
/// `true` for in-memory data (Tables, DataFrames) and disk-based sources (Datasets).
|
||||
/// `false` for streaming sources (DuckDB results, network streams).
|
||||
///
|
||||
/// When true, the pipeline can retry failed writes by rescanning.
|
||||
fn rescannable(&self) -> bool {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for dyn Scannable {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("Scannable")
|
||||
.field("schema", &self.schema())
|
||||
.field("num_rows", &self.num_rows())
|
||||
.field("rescannable", &self.rescannable())
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl Scannable for RecordBatch {
|
||||
fn schema(&self) -> SchemaRef {
|
||||
Self::schema(self)
|
||||
}
|
||||
|
||||
fn scan_as_stream(&mut self) -> SendableRecordBatchStream {
|
||||
let batch = self.clone();
|
||||
let schema = batch.schema();
|
||||
Box::pin(SimpleRecordBatchStream {
|
||||
schema,
|
||||
stream: once(async move { Ok(batch) }),
|
||||
})
|
||||
}
|
||||
|
||||
fn num_rows(&self) -> Option<usize> {
|
||||
Some(Self::num_rows(self))
|
||||
}
|
||||
|
||||
fn rescannable(&self) -> bool {
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
impl Scannable for Vec<RecordBatch> {
|
||||
fn schema(&self) -> SchemaRef {
|
||||
if self.is_empty() {
|
||||
Arc::new(arrow_schema::Schema::empty())
|
||||
} else {
|
||||
self[0].schema()
|
||||
}
|
||||
}
|
||||
|
||||
fn scan_as_stream(&mut self) -> SendableRecordBatchStream {
|
||||
if self.is_empty() {
|
||||
let schema = Scannable::schema(self);
|
||||
return Box::pin(SimpleRecordBatchStream {
|
||||
schema,
|
||||
stream: once(async {
|
||||
Err(Error::InvalidInput {
|
||||
message: "Cannot scan an empty Vec<RecordBatch>".to_string(),
|
||||
})
|
||||
}),
|
||||
});
|
||||
}
|
||||
let schema = Scannable::schema(self);
|
||||
let batches = self.clone();
|
||||
let stream = futures::stream::iter(batches.into_iter().map(Ok));
|
||||
Box::pin(SimpleRecordBatchStream { schema, stream })
|
||||
}
|
||||
|
||||
fn num_rows(&self) -> Option<usize> {
|
||||
Some(self.iter().map(|b| b.num_rows()).sum())
|
||||
}
|
||||
|
||||
fn rescannable(&self) -> bool {
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
impl Scannable for Box<dyn RecordBatchReader + Send> {
|
||||
fn schema(&self) -> SchemaRef {
|
||||
RecordBatchReader::schema(self.as_ref())
|
||||
}
|
||||
|
||||
fn scan_as_stream(&mut self) -> SendableRecordBatchStream {
|
||||
let schema = Scannable::schema(self);
|
||||
|
||||
// Swap self with a reader that errors on iteration, so a second call
|
||||
// produces a clear error instead of silently returning empty data.
|
||||
let err_reader: Box<dyn RecordBatchReader + Send> = Box::new(RecordBatchIterator::new(
|
||||
vec![Err(ArrowError::InvalidArgumentError(
|
||||
"Reader has already been consumed".into(),
|
||||
))],
|
||||
schema.clone(),
|
||||
));
|
||||
let reader = std::mem::replace(self, err_reader);
|
||||
|
||||
// Bridge the blocking RecordBatchReader to an async stream via a channel.
|
||||
let (tx, rx) = tokio::sync::mpsc::channel::<crate::Result<RecordBatch>>(2);
|
||||
tokio::task::spawn_blocking(move || {
|
||||
for batch_result in reader {
|
||||
let result = batch_result.map_err(Into::into);
|
||||
if tx.blocking_send(result).is_err() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
let stream = futures::stream::unfold(rx, |mut rx| async move {
|
||||
rx.recv().await.map(|batch| (batch, rx))
|
||||
})
|
||||
.fuse();
|
||||
|
||||
Box::pin(SimpleRecordBatchStream { schema, stream })
|
||||
}
|
||||
}
|
||||
|
||||
impl Scannable for SendableRecordBatchStream {
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.as_ref().schema()
|
||||
}
|
||||
|
||||
fn scan_as_stream(&mut self) -> SendableRecordBatchStream {
|
||||
let schema = Scannable::schema(self);
|
||||
|
||||
// Swap self with an error stream so a second call produces a clear error.
|
||||
let error_stream = Box::pin(SimpleRecordBatchStream {
|
||||
schema: schema.clone(),
|
||||
stream: once(async {
|
||||
Err(Error::InvalidInput {
|
||||
message: "Stream has already been consumed".to_string(),
|
||||
})
|
||||
}),
|
||||
});
|
||||
std::mem::replace(self, error_stream)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl StreamingWriteSource for Box<dyn Scannable> {
|
||||
fn arrow_schema(&self) -> SchemaRef {
|
||||
self.schema()
|
||||
}
|
||||
|
||||
fn into_stream(mut self) -> datafusion_physical_plan::SendableRecordBatchStream {
|
||||
self.scan_as_stream().into_df_stream()
|
||||
}
|
||||
}
|
||||
|
||||
/// A scannable that applies embeddings to the stream.
|
||||
pub struct WithEmbeddingsScannable {
|
||||
inner: Box<dyn Scannable>,
|
||||
embeddings: Vec<(EmbeddingDefinition, Arc<dyn EmbeddingFunction>)>,
|
||||
output_schema: SchemaRef,
|
||||
}
|
||||
|
||||
impl WithEmbeddingsScannable {
|
||||
/// Create a new WithEmbeddingsScannable.
|
||||
///
|
||||
/// The embeddings are applied to the inner scannable's data as new columns.
|
||||
pub fn try_new(
|
||||
inner: Box<dyn Scannable>,
|
||||
embeddings: Vec<(EmbeddingDefinition, Arc<dyn EmbeddingFunction>)>,
|
||||
) -> Result<Self> {
|
||||
let output_schema = compute_output_schema(&inner.schema(), &embeddings)?;
|
||||
|
||||
// Build column definitions: Physical for base columns, Embedding for new ones
|
||||
let base_col_count = inner.schema().fields().len();
|
||||
let column_definitions: Vec<ColumnDefinition> = (0..base_col_count)
|
||||
.map(|_| ColumnDefinition {
|
||||
kind: ColumnKind::Physical,
|
||||
})
|
||||
.chain(embeddings.iter().map(|(ed, _)| ColumnDefinition {
|
||||
kind: ColumnKind::Embedding(ed.clone()),
|
||||
}))
|
||||
.collect();
|
||||
|
||||
let table_definition = TableDefinition::new(output_schema, column_definitions);
|
||||
let output_schema = table_definition.into_rich_schema();
|
||||
|
||||
Ok(Self {
|
||||
inner,
|
||||
embeddings,
|
||||
output_schema,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Scannable for WithEmbeddingsScannable {
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.output_schema.clone()
|
||||
}
|
||||
|
||||
fn scan_as_stream(&mut self) -> SendableRecordBatchStream {
|
||||
let inner_stream = self.inner.scan_as_stream();
|
||||
let embeddings = self.embeddings.clone();
|
||||
let output_schema = self.output_schema.clone();
|
||||
|
||||
let mapped_stream = inner_stream.then(move |batch_result| {
|
||||
let embeddings = embeddings.clone();
|
||||
async move {
|
||||
let batch = batch_result?;
|
||||
let result = tokio::task::spawn_blocking(move || {
|
||||
compute_embeddings_for_batch(batch, &embeddings)
|
||||
})
|
||||
.await
|
||||
.map_err(|e| Error::Runtime {
|
||||
message: format!("Task panicked during embedding computation: {}", e),
|
||||
})??;
|
||||
Ok(result)
|
||||
}
|
||||
});
|
||||
|
||||
Box::pin(SimpleRecordBatchStream {
|
||||
schema: output_schema,
|
||||
stream: mapped_stream,
|
||||
})
|
||||
}
|
||||
|
||||
fn num_rows(&self) -> Option<usize> {
|
||||
self.inner.num_rows()
|
||||
}
|
||||
|
||||
fn rescannable(&self) -> bool {
|
||||
self.inner.rescannable()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn scannable_with_embeddings(
|
||||
inner: Box<dyn Scannable>,
|
||||
table_definition: &TableDefinition,
|
||||
registry: Option<&Arc<dyn EmbeddingRegistry>>,
|
||||
) -> Result<Box<dyn Scannable>> {
|
||||
if let Some(registry) = registry {
|
||||
let mut embeddings = Vec::with_capacity(table_definition.column_definitions.len());
|
||||
for cd in table_definition.column_definitions.iter() {
|
||||
if let ColumnKind::Embedding(embedding_def) = &cd.kind {
|
||||
match registry.get(&embedding_def.embedding_name) {
|
||||
Some(func) => {
|
||||
embeddings.push((embedding_def.clone(), func));
|
||||
}
|
||||
None => {
|
||||
return Err(Error::EmbeddingFunctionNotFound {
|
||||
name: embedding_def.embedding_name.clone(),
|
||||
reason: format!(
|
||||
"Table was defined with an embedding column `{}` but no embedding function was found with that name within the registry.",
|
||||
embedding_def.embedding_name
|
||||
),
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !embeddings.is_empty() {
|
||||
return Ok(Box::new(WithEmbeddingsScannable::try_new(
|
||||
inner, embeddings,
|
||||
)?));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(inner)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use arrow_array::record_batch;
|
||||
use futures::TryStreamExt;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_record_batch_rescannable() {
|
||||
let mut batch = record_batch!(("id", Int64, [0, 1, 2])).unwrap();
|
||||
|
||||
let stream1 = batch.scan_as_stream();
|
||||
let batches1: Vec<RecordBatch> = stream1.try_collect().await.unwrap();
|
||||
assert_eq!(batches1.len(), 1);
|
||||
assert_eq!(batches1[0], batch);
|
||||
|
||||
assert!(batch.rescannable());
|
||||
let stream2 = batch.scan_as_stream();
|
||||
let batches2: Vec<RecordBatch> = stream2.try_collect().await.unwrap();
|
||||
assert_eq!(batches2.len(), 1);
|
||||
assert_eq!(batches2[0], batch);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_vec_batch_rescannable() {
|
||||
let mut batches = vec![
|
||||
record_batch!(("id", Int64, [0, 1])).unwrap(),
|
||||
record_batch!(("id", Int64, [2, 3, 4])).unwrap(),
|
||||
];
|
||||
|
||||
let stream1 = batches.scan_as_stream();
|
||||
let result1: Vec<RecordBatch> = stream1.try_collect().await.unwrap();
|
||||
assert_eq!(result1.len(), 2);
|
||||
assert_eq!(result1[0], batches[0]);
|
||||
assert_eq!(result1[1], batches[1]);
|
||||
|
||||
assert!(batches.rescannable());
|
||||
let stream2 = batches.scan_as_stream();
|
||||
let result2: Vec<RecordBatch> = stream2.try_collect().await.unwrap();
|
||||
assert_eq!(result2.len(), 2);
|
||||
assert_eq!(result2[0], batches[0]);
|
||||
assert_eq!(result2[1], batches[1]);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_vec_batch_empty_errors() {
|
||||
let mut empty: Vec<RecordBatch> = vec![];
|
||||
let mut stream = empty.scan_as_stream();
|
||||
let result = stream.next().await;
|
||||
assert!(result.is_some());
|
||||
assert!(result.unwrap().is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_reader_not_rescannable() {
|
||||
let batch = record_batch!(("id", Int64, [0, 1, 2])).unwrap();
|
||||
let schema = batch.schema();
|
||||
let mut reader: Box<dyn arrow_array::RecordBatchReader + Send> = Box::new(
|
||||
RecordBatchIterator::new(vec![Ok(batch.clone())], schema.clone()),
|
||||
);
|
||||
|
||||
let stream1 = reader.scan_as_stream();
|
||||
let result1: Vec<RecordBatch> = stream1.try_collect().await.unwrap();
|
||||
assert_eq!(result1.len(), 1);
|
||||
assert_eq!(result1[0], batch);
|
||||
|
||||
assert!(!reader.rescannable());
|
||||
// Second call returns a stream whose first item is an error
|
||||
let mut stream2 = reader.scan_as_stream();
|
||||
let result2 = stream2.next().await;
|
||||
assert!(result2.is_some());
|
||||
assert!(result2.unwrap().is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_stream_not_rescannable() {
|
||||
let batch = record_batch!(("id", Int64, [0, 1, 2])).unwrap();
|
||||
let schema = batch.schema();
|
||||
let inner_stream = futures::stream::iter(vec![Ok(batch.clone())]);
|
||||
let mut stream: SendableRecordBatchStream = Box::pin(SimpleRecordBatchStream {
|
||||
schema: schema.clone(),
|
||||
stream: inner_stream,
|
||||
});
|
||||
|
||||
let stream1 = stream.scan_as_stream();
|
||||
let result1: Vec<RecordBatch> = stream1.try_collect().await.unwrap();
|
||||
assert_eq!(result1.len(), 1);
|
||||
assert_eq!(result1[0], batch);
|
||||
|
||||
assert!(!stream.rescannable());
|
||||
// Second call returns a stream whose first item is an error
|
||||
let mut stream2 = stream.scan_as_stream();
|
||||
let result2 = stream2.next().await;
|
||||
assert!(result2.is_some());
|
||||
assert!(result2.unwrap().is_err());
|
||||
}
|
||||
|
||||
mod embedding_tests {
|
||||
use super::*;
|
||||
use crate::embeddings::MemoryRegistry;
|
||||
use crate::table::{ColumnDefinition, ColumnKind};
|
||||
use crate::test_utils::embeddings::MockEmbed;
|
||||
use arrow_array::Array as _;
|
||||
use arrow_array::{ArrayRef, StringArray};
|
||||
use arrow_schema::{DataType, Field, Schema};
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_with_embeddings_scannable() {
|
||||
let schema = Arc::new(Schema::new(vec![Field::new("text", DataType::Utf8, false)]));
|
||||
let text_array = StringArray::from(vec!["hello", "world", "test"]);
|
||||
let batch =
|
||||
RecordBatch::try_new(schema.clone(), vec![Arc::new(text_array) as ArrayRef])
|
||||
.unwrap();
|
||||
|
||||
let mock_embedding: Arc<dyn EmbeddingFunction> = Arc::new(MockEmbed::new("mock", 4));
|
||||
let embedding_def = EmbeddingDefinition::new("text", "mock", Some("text_embedding"));
|
||||
|
||||
let mut scannable = WithEmbeddingsScannable::try_new(
|
||||
Box::new(batch.clone()),
|
||||
vec![(embedding_def, mock_embedding)],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// Check that schema has the embedding column
|
||||
let output_schema = scannable.schema();
|
||||
assert_eq!(output_schema.fields().len(), 2);
|
||||
assert_eq!(output_schema.field(0).name(), "text");
|
||||
assert_eq!(output_schema.field(1).name(), "text_embedding");
|
||||
|
||||
// Check num_rows and rescannable are preserved
|
||||
assert_eq!(scannable.num_rows(), Some(3));
|
||||
assert!(scannable.rescannable());
|
||||
|
||||
// Read the data
|
||||
let stream = scannable.scan_as_stream();
|
||||
let results: Vec<RecordBatch> = stream.try_collect().await.unwrap();
|
||||
assert_eq!(results.len(), 1);
|
||||
|
||||
let result_batch = &results[0];
|
||||
assert_eq!(result_batch.num_rows(), 3);
|
||||
assert_eq!(result_batch.num_columns(), 2);
|
||||
|
||||
// Verify the embedding column is present and has the right shape
|
||||
let embedding_col = result_batch.column(1);
|
||||
assert_eq!(embedding_col.len(), 3);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_maybe_embedded_scannable_no_embeddings() {
|
||||
let batch = record_batch!(("id", Int64, [1, 2, 3])).unwrap();
|
||||
|
||||
// Create a table definition with no embedding columns
|
||||
let table_def = TableDefinition::new_from_schema(batch.schema());
|
||||
|
||||
// Even with a registry, if there are no embedding columns, it's a passthrough
|
||||
let registry: Arc<dyn EmbeddingRegistry> = Arc::new(MemoryRegistry::new());
|
||||
let mut scannable =
|
||||
scannable_with_embeddings(Box::new(batch.clone()), &table_def, Some(®istry))
|
||||
.unwrap();
|
||||
|
||||
// Check that data passes through unchanged
|
||||
let stream = scannable.scan_as_stream();
|
||||
let results: Vec<RecordBatch> = stream.try_collect().await.unwrap();
|
||||
assert_eq!(results.len(), 1);
|
||||
assert_eq!(results[0], batch);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_maybe_embedded_scannable_with_embeddings() {
|
||||
let schema = Arc::new(Schema::new(vec![Field::new("text", DataType::Utf8, false)]));
|
||||
let text_array = StringArray::from(vec!["hello", "world"]);
|
||||
let batch =
|
||||
RecordBatch::try_new(schema.clone(), vec![Arc::new(text_array) as ArrayRef])
|
||||
.unwrap();
|
||||
|
||||
// Create a table definition with an embedding column
|
||||
let embedding_def = EmbeddingDefinition::new("text", "mock", Some("text_embedding"));
|
||||
let embedding_schema = Arc::new(Schema::new(vec![
|
||||
Field::new("text", DataType::Utf8, false),
|
||||
Field::new(
|
||||
"text_embedding",
|
||||
DataType::FixedSizeList(
|
||||
Arc::new(Field::new("item", DataType::Float32, true)),
|
||||
4,
|
||||
),
|
||||
false,
|
||||
),
|
||||
]));
|
||||
let table_def = TableDefinition::new(
|
||||
embedding_schema,
|
||||
vec![
|
||||
ColumnDefinition {
|
||||
kind: ColumnKind::Physical,
|
||||
},
|
||||
ColumnDefinition {
|
||||
kind: ColumnKind::Embedding(embedding_def.clone()),
|
||||
},
|
||||
],
|
||||
);
|
||||
|
||||
// Register the mock embedding function
|
||||
let registry: Arc<dyn EmbeddingRegistry> = Arc::new(MemoryRegistry::new());
|
||||
let mock_embedding: Arc<dyn EmbeddingFunction> = Arc::new(MockEmbed::new("mock", 4));
|
||||
registry.register("mock", mock_embedding).unwrap();
|
||||
|
||||
let mut scannable =
|
||||
scannable_with_embeddings(Box::new(batch), &table_def, Some(®istry)).unwrap();
|
||||
|
||||
// Read and verify the data has embeddings
|
||||
let stream = scannable.scan_as_stream();
|
||||
let results: Vec<RecordBatch> = stream.try_collect().await.unwrap();
|
||||
assert_eq!(results.len(), 1);
|
||||
|
||||
let result_batch = &results[0];
|
||||
assert_eq!(result_batch.num_columns(), 2);
|
||||
assert_eq!(result_batch.schema().field(1).name(), "text_embedding");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_maybe_embedded_scannable_missing_function() {
|
||||
let schema = Arc::new(Schema::new(vec![Field::new("text", DataType::Utf8, false)]));
|
||||
let text_array = StringArray::from(vec!["hello"]);
|
||||
let batch =
|
||||
RecordBatch::try_new(schema.clone(), vec![Arc::new(text_array) as ArrayRef])
|
||||
.unwrap();
|
||||
|
||||
// Create a table definition with an embedding column
|
||||
let embedding_def =
|
||||
EmbeddingDefinition::new("text", "nonexistent", Some("text_embedding"));
|
||||
let embedding_schema = Arc::new(Schema::new(vec![
|
||||
Field::new("text", DataType::Utf8, false),
|
||||
Field::new(
|
||||
"text_embedding",
|
||||
DataType::FixedSizeList(
|
||||
Arc::new(Field::new("item", DataType::Float32, true)),
|
||||
4,
|
||||
),
|
||||
false,
|
||||
),
|
||||
]));
|
||||
let table_def = TableDefinition::new(
|
||||
embedding_schema,
|
||||
vec![
|
||||
ColumnDefinition {
|
||||
kind: ColumnKind::Physical,
|
||||
},
|
||||
ColumnDefinition {
|
||||
kind: ColumnKind::Embedding(embedding_def),
|
||||
},
|
||||
],
|
||||
);
|
||||
|
||||
// Registry has no embedding functions registered
|
||||
let registry: Arc<dyn EmbeddingRegistry> = Arc::new(MemoryRegistry::new());
|
||||
|
||||
let result = scannable_with_embeddings(Box::new(batch), &table_def, Some(®istry));
|
||||
|
||||
// Should fail because the embedding function is not found
|
||||
assert!(result.is_err());
|
||||
let err = result.err().unwrap();
|
||||
assert!(
|
||||
matches!(err, Error::EmbeddingFunctionNotFound { .. }),
|
||||
"Expected EmbeddingFunctionNotFound"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -18,7 +18,12 @@ use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use arrow_array::RecordBatchReader;
|
||||
use async_trait::async_trait;
|
||||
use datafusion_physical_plan::stream::RecordBatchStreamAdapter;
|
||||
use futures::stream;
|
||||
use lance::dataset::ReadParams;
|
||||
use lance_datafusion::utils::StreamingWriteSource;
|
||||
use lance_namespace::models::{
|
||||
CreateNamespaceRequest, CreateNamespaceResponse, DescribeNamespaceRequest,
|
||||
DescribeNamespaceResponse, DropNamespaceRequest, DropNamespaceResponse, ListNamespacesRequest,
|
||||
@@ -26,9 +31,9 @@ use lance_namespace::models::{
|
||||
};
|
||||
use lance_namespace::LanceNamespace;
|
||||
|
||||
use crate::data::scannable::Scannable;
|
||||
use crate::arrow::{SendableRecordBatchStream, SendableRecordBatchStreamExt};
|
||||
use crate::error::Result;
|
||||
use crate::table::{BaseTable, WriteOptions};
|
||||
use crate::table::{BaseTable, TableDefinition, WriteOptions};
|
||||
|
||||
pub mod listing;
|
||||
pub mod namespace;
|
||||
@@ -110,14 +115,51 @@ impl Default for CreateTableMode {
|
||||
}
|
||||
}
|
||||
|
||||
/// The data to start a table or a schema to create an empty table
|
||||
pub enum CreateTableData {
|
||||
/// Creates a table using an iterator of data, the schema will be obtained from the data
|
||||
Data(Box<dyn RecordBatchReader + Send>),
|
||||
/// Creates a table using a stream of data, the schema will be obtained from the data
|
||||
StreamingData(SendableRecordBatchStream),
|
||||
/// Creates an empty table, the definition / schema must be provided separately
|
||||
Empty(TableDefinition),
|
||||
}
|
||||
|
||||
impl CreateTableData {
|
||||
pub fn schema(&self) -> Arc<arrow_schema::Schema> {
|
||||
match self {
|
||||
Self::Data(reader) => reader.schema(),
|
||||
Self::StreamingData(stream) => stream.schema(),
|
||||
Self::Empty(definition) => definition.schema.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl StreamingWriteSource for CreateTableData {
|
||||
fn arrow_schema(&self) -> Arc<arrow_schema::Schema> {
|
||||
self.schema()
|
||||
}
|
||||
fn into_stream(self) -> datafusion_physical_plan::SendableRecordBatchStream {
|
||||
match self {
|
||||
Self::Data(reader) => reader.into_stream(),
|
||||
Self::StreamingData(stream) => stream.into_df_stream(),
|
||||
Self::Empty(table_definition) => {
|
||||
let schema = table_definition.schema.clone();
|
||||
Box::pin(RecordBatchStreamAdapter::new(schema, stream::empty()))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A request to create a table
|
||||
pub struct CreateTableRequest {
|
||||
/// The name of the new table
|
||||
pub name: String,
|
||||
/// The namespace to create the table in. Empty list represents root namespace.
|
||||
pub namespace: Vec<String>,
|
||||
/// Initial data to write to the table, can be empty.
|
||||
pub data: Box<dyn Scannable>,
|
||||
/// Initial data to write to the table, can be None to create an empty table
|
||||
pub data: CreateTableData,
|
||||
/// The mode to use when creating the table
|
||||
pub mode: CreateTableMode,
|
||||
/// Options to use when writing data (only used if `data` is not None)
|
||||
@@ -131,7 +173,7 @@ pub struct CreateTableRequest {
|
||||
}
|
||||
|
||||
impl CreateTableRequest {
|
||||
pub fn new(name: String, data: Box<dyn Scannable>) -> Self {
|
||||
pub fn new(name: String, data: CreateTableData) -> Self {
|
||||
Self {
|
||||
name,
|
||||
namespace: vec![],
|
||||
|
||||
@@ -922,7 +922,7 @@ impl Database for ListingDatabase {
|
||||
.with_read_params(read_params.clone())
|
||||
.load()
|
||||
.await
|
||||
.map_err(|e| -> Error { e.into() })?;
|
||||
.map_err(|e| Error::Lance { source: e })?;
|
||||
|
||||
let version_ref = match (request.source_version, request.source_tag) {
|
||||
(Some(v), None) => Ok(Ref::Version(None, Some(v))),
|
||||
@@ -937,7 +937,7 @@ impl Database for ListingDatabase {
|
||||
source_dataset
|
||||
.shallow_clone(&target_uri, version_ref, Some(storage_params))
|
||||
.await
|
||||
.map_err(|e| -> Error { e.into() })?;
|
||||
.map_err(|e| Error::Lance { source: e })?;
|
||||
|
||||
let cloned_table = NativeTable::open_with_params(
|
||||
&target_uri,
|
||||
@@ -1098,10 +1098,8 @@ impl Database for ListingDatabase {
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::connection::ConnectRequest;
|
||||
use crate::data::scannable::Scannable;
|
||||
use crate::database::{CreateTableMode, CreateTableRequest};
|
||||
use crate::table::WriteOptions;
|
||||
use crate::Table;
|
||||
use crate::database::{CreateTableData, CreateTableMode, CreateTableRequest, WriteOptions};
|
||||
use crate::table::{Table, TableDefinition};
|
||||
use arrow_array::{Int32Array, RecordBatch, StringArray};
|
||||
use arrow_schema::{DataType, Field, Schema};
|
||||
use std::path::PathBuf;
|
||||
@@ -1141,7 +1139,7 @@ mod tests {
|
||||
.create_table(CreateTableRequest {
|
||||
name: "source_table".to_string(),
|
||||
namespace: vec![],
|
||||
data: Box::new(RecordBatch::new_empty(schema.clone())) as Box<dyn Scannable>,
|
||||
data: CreateTableData::Empty(TableDefinition::new_from_schema(schema.clone())),
|
||||
mode: CreateTableMode::Create,
|
||||
write_options: Default::default(),
|
||||
location: None,
|
||||
@@ -1198,11 +1196,16 @@ mod tests {
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let reader = Box::new(arrow_array::RecordBatchIterator::new(
|
||||
vec![Ok(batch)],
|
||||
schema.clone(),
|
||||
));
|
||||
|
||||
let source_table = db
|
||||
.create_table(CreateTableRequest {
|
||||
name: "source_with_data".to_string(),
|
||||
namespace: vec![],
|
||||
data: Box::new(batch) as Box<dyn Scannable>,
|
||||
data: CreateTableData::Data(reader),
|
||||
mode: CreateTableMode::Create,
|
||||
write_options: Default::default(),
|
||||
location: None,
|
||||
@@ -1261,7 +1264,7 @@ mod tests {
|
||||
db.create_table(CreateTableRequest {
|
||||
name: "source".to_string(),
|
||||
namespace: vec![],
|
||||
data: Box::new(RecordBatch::new_empty(schema)) as Box<dyn Scannable>,
|
||||
data: CreateTableData::Empty(TableDefinition::new_from_schema(schema)),
|
||||
mode: CreateTableMode::Create,
|
||||
write_options: Default::default(),
|
||||
location: None,
|
||||
@@ -1297,7 +1300,7 @@ mod tests {
|
||||
db.create_table(CreateTableRequest {
|
||||
name: "source".to_string(),
|
||||
namespace: vec![],
|
||||
data: Box::new(RecordBatch::new_empty(schema)) as Box<dyn Scannable>,
|
||||
data: CreateTableData::Empty(TableDefinition::new_from_schema(schema)),
|
||||
mode: CreateTableMode::Create,
|
||||
write_options: Default::default(),
|
||||
location: None,
|
||||
@@ -1337,7 +1340,7 @@ mod tests {
|
||||
db.create_table(CreateTableRequest {
|
||||
name: "source".to_string(),
|
||||
namespace: vec![],
|
||||
data: Box::new(RecordBatch::new_empty(schema)) as Box<dyn Scannable>,
|
||||
data: CreateTableData::Empty(TableDefinition::new_from_schema(schema)),
|
||||
mode: CreateTableMode::Create,
|
||||
write_options: Default::default(),
|
||||
location: None,
|
||||
@@ -1377,7 +1380,7 @@ mod tests {
|
||||
db.create_table(CreateTableRequest {
|
||||
name: "source".to_string(),
|
||||
namespace: vec![],
|
||||
data: Box::new(RecordBatch::new_empty(schema)) as Box<dyn Scannable>,
|
||||
data: CreateTableData::Empty(TableDefinition::new_from_schema(schema)),
|
||||
mode: CreateTableMode::Create,
|
||||
write_options: Default::default(),
|
||||
location: None,
|
||||
@@ -1432,7 +1435,7 @@ mod tests {
|
||||
db.create_table(CreateTableRequest {
|
||||
name: "source".to_string(),
|
||||
namespace: vec![],
|
||||
data: Box::new(RecordBatch::new_empty(schema)) as Box<dyn Scannable>,
|
||||
data: CreateTableData::Empty(TableDefinition::new_from_schema(schema)),
|
||||
mode: CreateTableMode::Create,
|
||||
write_options: Default::default(),
|
||||
location: None,
|
||||
@@ -1481,11 +1484,16 @@ mod tests {
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let reader = Box::new(arrow_array::RecordBatchIterator::new(
|
||||
vec![Ok(batch1)],
|
||||
schema.clone(),
|
||||
));
|
||||
|
||||
let source_table = db
|
||||
.create_table(CreateTableRequest {
|
||||
name: "versioned_source".to_string(),
|
||||
namespace: vec![],
|
||||
data: Box::new(batch1) as Box<dyn Scannable>,
|
||||
data: CreateTableData::Data(reader),
|
||||
mode: CreateTableMode::Create,
|
||||
write_options: Default::default(),
|
||||
location: None,
|
||||
@@ -1509,7 +1517,14 @@ mod tests {
|
||||
|
||||
let db = Arc::new(db);
|
||||
let source_table_obj = Table::new(source_table.clone(), db.clone());
|
||||
source_table_obj.add(batch2).execute().await.unwrap();
|
||||
source_table_obj
|
||||
.add(Box::new(arrow_array::RecordBatchIterator::new(
|
||||
vec![Ok(batch2)],
|
||||
schema.clone(),
|
||||
)))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Verify source table now has 4 rows
|
||||
assert_eq!(source_table.count_rows(None).await.unwrap(), 4);
|
||||
@@ -1555,11 +1570,16 @@ mod tests {
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let reader = Box::new(arrow_array::RecordBatchIterator::new(
|
||||
vec![Ok(batch1)],
|
||||
schema.clone(),
|
||||
));
|
||||
|
||||
let source_table = db
|
||||
.create_table(CreateTableRequest {
|
||||
name: "tagged_source".to_string(),
|
||||
namespace: vec![],
|
||||
data: Box::new(batch1),
|
||||
data: CreateTableData::Data(reader),
|
||||
mode: CreateTableMode::Create,
|
||||
write_options: Default::default(),
|
||||
location: None,
|
||||
@@ -1587,7 +1607,14 @@ mod tests {
|
||||
.unwrap();
|
||||
|
||||
let source_table_obj = Table::new(source_table.clone(), db.clone());
|
||||
source_table_obj.add(batch2).execute().await.unwrap();
|
||||
source_table_obj
|
||||
.add(Box::new(arrow_array::RecordBatchIterator::new(
|
||||
vec![Ok(batch2)],
|
||||
schema.clone(),
|
||||
)))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Source table should have 4 rows
|
||||
assert_eq!(source_table.count_rows(None).await.unwrap(), 4);
|
||||
@@ -1630,11 +1657,16 @@ mod tests {
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let reader = Box::new(arrow_array::RecordBatchIterator::new(
|
||||
vec![Ok(batch1)],
|
||||
schema.clone(),
|
||||
));
|
||||
|
||||
let source_table = db
|
||||
.create_table(CreateTableRequest {
|
||||
name: "independent_source".to_string(),
|
||||
namespace: vec![],
|
||||
data: Box::new(batch1),
|
||||
data: CreateTableData::Data(reader),
|
||||
mode: CreateTableMode::Create,
|
||||
write_options: Default::default(),
|
||||
location: None,
|
||||
@@ -1674,7 +1706,14 @@ mod tests {
|
||||
|
||||
let db = Arc::new(db);
|
||||
let cloned_table_obj = Table::new(cloned_table.clone(), db.clone());
|
||||
cloned_table_obj.add(batch_clone).execute().await.unwrap();
|
||||
cloned_table_obj
|
||||
.add(Box::new(arrow_array::RecordBatchIterator::new(
|
||||
vec![Ok(batch_clone)],
|
||||
schema.clone(),
|
||||
)))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Add different data to the source table
|
||||
let batch_source = RecordBatch::try_new(
|
||||
@@ -1687,7 +1726,14 @@ mod tests {
|
||||
.unwrap();
|
||||
|
||||
let source_table_obj = Table::new(source_table.clone(), db);
|
||||
source_table_obj.add(batch_source).execute().await.unwrap();
|
||||
source_table_obj
|
||||
.add(Box::new(arrow_array::RecordBatchIterator::new(
|
||||
vec![Ok(batch_source)],
|
||||
schema.clone(),
|
||||
)))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Verify they have evolved independently
|
||||
assert_eq!(source_table.count_rows(None).await.unwrap(), 4); // 2 + 2
|
||||
@@ -1705,11 +1751,16 @@ mod tests {
|
||||
RecordBatch::try_new(schema.clone(), vec![Arc::new(Int32Array::from(vec![1, 2]))])
|
||||
.unwrap();
|
||||
|
||||
let reader = Box::new(arrow_array::RecordBatchIterator::new(
|
||||
vec![Ok(batch1)],
|
||||
schema.clone(),
|
||||
));
|
||||
|
||||
let source_table = db
|
||||
.create_table(CreateTableRequest {
|
||||
name: "latest_version_source".to_string(),
|
||||
namespace: vec![],
|
||||
data: Box::new(batch1),
|
||||
data: CreateTableData::Data(reader),
|
||||
mode: CreateTableMode::Create,
|
||||
write_options: Default::default(),
|
||||
location: None,
|
||||
@@ -1728,7 +1779,14 @@ mod tests {
|
||||
.unwrap();
|
||||
|
||||
let source_table_obj = Table::new(source_table.clone(), db.clone());
|
||||
source_table_obj.add(batch).execute().await.unwrap();
|
||||
source_table_obj
|
||||
.add(Box::new(arrow_array::RecordBatchIterator::new(
|
||||
vec![Ok(batch)],
|
||||
schema.clone(),
|
||||
)))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
// Source should have 8 rows total (2 + 2 + 2 + 2)
|
||||
@@ -1791,11 +1849,16 @@ mod tests {
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let reader = Box::new(arrow_array::RecordBatchIterator::new(
|
||||
vec![Ok(batch)],
|
||||
schema.clone(),
|
||||
));
|
||||
|
||||
let table = db
|
||||
.create_table(CreateTableRequest {
|
||||
name: "test_stable".to_string(),
|
||||
namespace: vec![],
|
||||
data: Box::new(batch),
|
||||
data: CreateTableData::Data(reader),
|
||||
mode: CreateTableMode::Create,
|
||||
write_options: Default::default(),
|
||||
location: None,
|
||||
@@ -1824,6 +1887,11 @@ mod tests {
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let reader = Box::new(arrow_array::RecordBatchIterator::new(
|
||||
vec![Ok(batch)],
|
||||
schema.clone(),
|
||||
));
|
||||
|
||||
let mut storage_options = HashMap::new();
|
||||
storage_options.insert(
|
||||
OPT_NEW_TABLE_ENABLE_STABLE_ROW_IDS.to_string(),
|
||||
@@ -1846,7 +1914,7 @@ mod tests {
|
||||
.create_table(CreateTableRequest {
|
||||
name: "test_stable_table_level".to_string(),
|
||||
namespace: vec![],
|
||||
data: Box::new(batch),
|
||||
data: CreateTableData::Data(reader),
|
||||
mode: CreateTableMode::Create,
|
||||
write_options,
|
||||
location: None,
|
||||
@@ -1895,6 +1963,11 @@ mod tests {
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let reader = Box::new(arrow_array::RecordBatchIterator::new(
|
||||
vec![Ok(batch)],
|
||||
schema.clone(),
|
||||
));
|
||||
|
||||
let mut storage_options = HashMap::new();
|
||||
storage_options.insert(
|
||||
OPT_NEW_TABLE_ENABLE_STABLE_ROW_IDS.to_string(),
|
||||
@@ -1917,7 +1990,7 @@ mod tests {
|
||||
.create_table(CreateTableRequest {
|
||||
name: "test_override".to_string(),
|
||||
namespace: vec![],
|
||||
data: Box::new(batch),
|
||||
data: CreateTableData::Data(reader),
|
||||
mode: CreateTableMode::Create,
|
||||
write_options,
|
||||
location: None,
|
||||
@@ -2035,7 +2108,7 @@ mod tests {
|
||||
db.create_table(CreateTableRequest {
|
||||
name: "table1".to_string(),
|
||||
namespace: vec![],
|
||||
data: Box::new(RecordBatch::new_empty(schema.clone())) as Box<dyn Scannable>,
|
||||
data: CreateTableData::Empty(TableDefinition::new_from_schema(schema.clone())),
|
||||
mode: CreateTableMode::Create,
|
||||
write_options: Default::default(),
|
||||
location: None,
|
||||
@@ -2047,7 +2120,7 @@ mod tests {
|
||||
db.create_table(CreateTableRequest {
|
||||
name: "table2".to_string(),
|
||||
namespace: vec![],
|
||||
data: Box::new(RecordBatch::new_empty(schema)) as Box<dyn Scannable>,
|
||||
data: CreateTableData::Empty(TableDefinition::new_from_schema(schema)),
|
||||
mode: CreateTableMode::Create,
|
||||
write_options: Default::default(),
|
||||
location: None,
|
||||
|
||||
@@ -354,13 +354,15 @@ mod tests {
|
||||
use super::*;
|
||||
use crate::connect_namespace;
|
||||
use crate::query::ExecutableQuery;
|
||||
use arrow_array::{Int32Array, RecordBatch, StringArray};
|
||||
use arrow_array::{Int32Array, RecordBatch, RecordBatchIterator, StringArray};
|
||||
use arrow_schema::{DataType, Field, Schema};
|
||||
use futures::TryStreamExt;
|
||||
use tempfile::tempdir;
|
||||
|
||||
/// Helper function to create test data
|
||||
fn create_test_data() -> RecordBatch {
|
||||
fn create_test_data() -> RecordBatchIterator<
|
||||
std::vec::IntoIter<std::result::Result<RecordBatch, arrow_schema::ArrowError>>,
|
||||
> {
|
||||
let schema = Arc::new(Schema::new(vec![
|
||||
Field::new("id", DataType::Int32, false),
|
||||
Field::new("name", DataType::Utf8, false),
|
||||
@@ -369,7 +371,12 @@ mod tests {
|
||||
let id_array = Int32Array::from(vec![1, 2, 3, 4, 5]);
|
||||
let name_array = StringArray::from(vec!["Alice", "Bob", "Charlie", "David", "Eve"]);
|
||||
|
||||
RecordBatch::try_new(schema, vec![Arc::new(id_array), Arc::new(name_array)]).unwrap()
|
||||
let batch = RecordBatch::try_new(
|
||||
schema.clone(),
|
||||
vec![Arc::new(id_array), Arc::new(name_array)],
|
||||
)
|
||||
.unwrap();
|
||||
RecordBatchIterator::new(vec![std::result::Result::Ok(batch)].into_iter(), schema)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -611,7 +618,13 @@ mod tests {
|
||||
|
||||
// Test: Overwrite the table
|
||||
let table2 = conn
|
||||
.create_table("overwrite_test", test_data2)
|
||||
.create_table(
|
||||
"overwrite_test",
|
||||
RecordBatchIterator::new(
|
||||
vec![std::result::Result::Ok(test_data2)].into_iter(),
|
||||
schema,
|
||||
),
|
||||
)
|
||||
.namespace(vec!["test_ns".into()])
|
||||
.mode(CreateTableMode::Overwrite)
|
||||
.execute()
|
||||
|
||||
@@ -13,7 +13,7 @@ use lance_datafusion::exec::SessionContextExt;
|
||||
use crate::{
|
||||
arrow::{SendableRecordBatchStream, SendableRecordBatchStreamExt, SimpleRecordBatchStream},
|
||||
connect,
|
||||
database::{CreateTableRequest, Database},
|
||||
database::{CreateTableData, CreateTableRequest, Database},
|
||||
dataloader::permutation::{
|
||||
shuffle::{Shuffler, ShufflerConfig},
|
||||
split::{SplitStrategy, Splitter, SPLIT_ID_COLUMN},
|
||||
@@ -313,8 +313,10 @@ impl PermutationBuilder {
|
||||
}
|
||||
};
|
||||
|
||||
let create_table_request =
|
||||
CreateTableRequest::new(name.to_string(), Box::new(streaming_data));
|
||||
let create_table_request = CreateTableRequest::new(
|
||||
name.to_string(),
|
||||
CreateTableData::StreamingData(streaming_data),
|
||||
);
|
||||
|
||||
let table = database.create_table(create_table_request).await?;
|
||||
|
||||
@@ -345,7 +347,7 @@ mod tests {
|
||||
.col("col_b", lance_datagen::array::step::<Int32Type>())
|
||||
.into_ldb_stream(RowCount::from(100), BatchCount::from(10));
|
||||
let data_table = db
|
||||
.create_table("base_tbl", initial_data)
|
||||
.create_table_streaming("base_tbl", initial_data)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -385,7 +387,7 @@ mod tests {
|
||||
.col("some_value", lance_datagen::array::step::<Int32Type>())
|
||||
.into_ldb_stream(RowCount::from(100), BatchCount::from(10));
|
||||
let data_table = db
|
||||
.create_table("mytbl", initial_data)
|
||||
.create_table_streaming("mytbl", initial_data)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
@@ -18,7 +18,7 @@ use std::{
|
||||
};
|
||||
|
||||
use arrow_array::{Array, RecordBatch, RecordBatchReader};
|
||||
use arrow_schema::{DataType, Field, SchemaBuilder, SchemaRef};
|
||||
use arrow_schema::{DataType, Field, SchemaBuilder};
|
||||
// use async_trait::async_trait;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
@@ -190,112 +190,6 @@ impl<R: RecordBatchReader> WithEmbeddings<R> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Compute embedding arrays for a batch.
|
||||
///
|
||||
/// When multiple embedding functions are defined, they are computed in parallel using
|
||||
/// scoped threads. For a single embedding function, computation is done inline.
|
||||
fn compute_embedding_arrays(
|
||||
batch: &RecordBatch,
|
||||
embeddings: &[(EmbeddingDefinition, Arc<dyn EmbeddingFunction>)],
|
||||
) -> Result<Vec<Arc<dyn Array>>> {
|
||||
if embeddings.len() == 1 {
|
||||
let (fld, func) = &embeddings[0];
|
||||
let src_column =
|
||||
batch
|
||||
.column_by_name(&fld.source_column)
|
||||
.ok_or_else(|| Error::InvalidInput {
|
||||
message: format!("Source column '{}' not found", fld.source_column),
|
||||
})?;
|
||||
return Ok(vec![func.compute_source_embeddings(src_column.clone())?]);
|
||||
}
|
||||
|
||||
// Parallel path: multiple embeddings
|
||||
std::thread::scope(|s| {
|
||||
let handles: Vec<_> = embeddings
|
||||
.iter()
|
||||
.map(|(fld, func)| {
|
||||
let src_column = batch.column_by_name(&fld.source_column).ok_or_else(|| {
|
||||
Error::InvalidInput {
|
||||
message: format!("Source column '{}' not found", fld.source_column),
|
||||
}
|
||||
})?;
|
||||
|
||||
let handle = s.spawn(move || func.compute_source_embeddings(src_column.clone()));
|
||||
|
||||
Ok(handle)
|
||||
})
|
||||
.collect::<Result<_>>()?;
|
||||
|
||||
handles
|
||||
.into_iter()
|
||||
.map(|h| {
|
||||
h.join().map_err(|e| Error::Runtime {
|
||||
message: format!("Thread panicked during embedding computation: {:?}", e),
|
||||
})?
|
||||
})
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
|
||||
/// Compute the output schema when embeddings are applied to a base schema.
|
||||
///
|
||||
/// This returns the schema with embedding columns appended.
|
||||
pub fn compute_output_schema(
|
||||
base_schema: &SchemaRef,
|
||||
embeddings: &[(EmbeddingDefinition, Arc<dyn EmbeddingFunction>)],
|
||||
) -> Result<SchemaRef> {
|
||||
let mut sb: SchemaBuilder = base_schema.as_ref().into();
|
||||
|
||||
for (ed, func) in embeddings {
|
||||
let src_field = base_schema
|
||||
.field_with_name(&ed.source_column)
|
||||
.map_err(|_| Error::InvalidInput {
|
||||
message: format!("Source column '{}' not found in schema", ed.source_column),
|
||||
})?;
|
||||
|
||||
let field_name = ed
|
||||
.dest_column
|
||||
.clone()
|
||||
.unwrap_or_else(|| format!("{}_embedding", &ed.source_column));
|
||||
|
||||
sb.push(Field::new(
|
||||
field_name,
|
||||
func.dest_type()?.into_owned(),
|
||||
src_field.is_nullable(),
|
||||
));
|
||||
}
|
||||
|
||||
Ok(Arc::new(sb.finish()))
|
||||
}
|
||||
|
||||
/// Compute embeddings for a batch and append as new columns.
|
||||
///
|
||||
/// This function computes embeddings using the provided embedding functions and
|
||||
/// appends them as new columns to the batch.
|
||||
pub fn compute_embeddings_for_batch(
|
||||
batch: RecordBatch,
|
||||
embeddings: &[(EmbeddingDefinition, Arc<dyn EmbeddingFunction>)],
|
||||
) -> Result<RecordBatch> {
|
||||
let embedding_arrays = compute_embedding_arrays(&batch, embeddings)?;
|
||||
|
||||
let mut result = batch;
|
||||
for ((fld, _), embedding) in embeddings.iter().zip(embedding_arrays.iter()) {
|
||||
let dst_field_name = fld
|
||||
.dest_column
|
||||
.clone()
|
||||
.unwrap_or_else(|| format!("{}_embedding", &fld.source_column));
|
||||
|
||||
let dst_field = Field::new(
|
||||
dst_field_name,
|
||||
embedding.data_type().clone(),
|
||||
embedding.nulls().is_some(),
|
||||
);
|
||||
|
||||
result = result.try_with_column(dst_field, embedding.clone())?;
|
||||
}
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
impl<R: RecordBatchReader> WithEmbeddings<R> {
|
||||
fn dest_fields(&self) -> Result<Vec<Field>> {
|
||||
let schema = self.inner.schema();
|
||||
@@ -346,6 +240,48 @@ impl<R: RecordBatchReader> WithEmbeddings<R> {
|
||||
column_definitions,
|
||||
})
|
||||
}
|
||||
|
||||
fn compute_embeddings_parallel(&self, batch: &RecordBatch) -> Result<Vec<Arc<dyn Array>>> {
|
||||
if self.embeddings.len() == 1 {
|
||||
let (fld, func) = &self.embeddings[0];
|
||||
let src_column =
|
||||
batch
|
||||
.column_by_name(&fld.source_column)
|
||||
.ok_or_else(|| Error::InvalidInput {
|
||||
message: format!("Source column '{}' not found", fld.source_column),
|
||||
})?;
|
||||
return Ok(vec![func.compute_source_embeddings(src_column.clone())?]);
|
||||
}
|
||||
|
||||
// Parallel path: multiple embeddings
|
||||
std::thread::scope(|s| {
|
||||
let handles: Vec<_> = self
|
||||
.embeddings
|
||||
.iter()
|
||||
.map(|(fld, func)| {
|
||||
let src_column = batch.column_by_name(&fld.source_column).ok_or_else(|| {
|
||||
Error::InvalidInput {
|
||||
message: format!("Source column '{}' not found", fld.source_column),
|
||||
}
|
||||
})?;
|
||||
|
||||
let handle =
|
||||
s.spawn(move || func.compute_source_embeddings(src_column.clone()));
|
||||
|
||||
Ok(handle)
|
||||
})
|
||||
.collect::<Result<_>>()?;
|
||||
|
||||
handles
|
||||
.into_iter()
|
||||
.map(|h| {
|
||||
h.join().map_err(|e| Error::Runtime {
|
||||
message: format!("Thread panicked during embedding computation: {:?}", e),
|
||||
})?
|
||||
})
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<R: RecordBatchReader> Iterator for MaybeEmbedded<R> {
|
||||
@@ -373,13 +309,37 @@ impl<R: RecordBatchReader> Iterator for WithEmbeddings<R> {
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
let batch = self.inner.next()?;
|
||||
match batch {
|
||||
Ok(batch) => match compute_embeddings_for_batch(batch, &self.embeddings) {
|
||||
Ok(batch_with_embeddings) => Some(Ok(batch_with_embeddings)),
|
||||
Err(e) => Some(Err(arrow_schema::ArrowError::ComputeError(format!(
|
||||
"Error computing embedding: {}",
|
||||
e
|
||||
)))),
|
||||
},
|
||||
Ok(batch) => {
|
||||
let embeddings = match self.compute_embeddings_parallel(&batch) {
|
||||
Ok(emb) => emb,
|
||||
Err(e) => {
|
||||
return Some(Err(arrow_schema::ArrowError::ComputeError(format!(
|
||||
"Error computing embedding: {}",
|
||||
e
|
||||
))))
|
||||
}
|
||||
};
|
||||
|
||||
let mut batch = batch;
|
||||
for ((fld, _), embedding) in self.embeddings.iter().zip(embeddings.iter()) {
|
||||
let dst_field_name = fld
|
||||
.dest_column
|
||||
.clone()
|
||||
.unwrap_or_else(|| format!("{}_embedding", &fld.source_column));
|
||||
|
||||
let dst_field = Field::new(
|
||||
dst_field_name,
|
||||
embedding.data_type().clone(),
|
||||
embedding.nulls().is_some(),
|
||||
);
|
||||
|
||||
match batch.try_with_column(dst_field.clone(), embedding.clone()) {
|
||||
Ok(b) => batch = b,
|
||||
Err(e) => return Some(Err(e)),
|
||||
};
|
||||
}
|
||||
Some(Ok(batch))
|
||||
}
|
||||
Err(e) => Some(Err(e)),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@ use std::sync::PoisonError;
|
||||
use arrow_schema::ArrowError;
|
||||
use snafu::Snafu;
|
||||
|
||||
pub(crate) type BoxError = Box<dyn std::error::Error + Send + Sync>;
|
||||
type BoxError = Box<dyn std::error::Error + Send + Sync>;
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
#[snafu(visibility(pub(crate)))]
|
||||
@@ -80,9 +80,6 @@ pub enum Error {
|
||||
Arrow { source: ArrowError },
|
||||
#[snafu(display("LanceDBError: not supported: {message}"))]
|
||||
NotSupported { message: String },
|
||||
/// External error pass through from user code.
|
||||
#[snafu(transparent)]
|
||||
External { source: BoxError },
|
||||
#[snafu(whatever, display("{message}"))]
|
||||
Other {
|
||||
message: String,
|
||||
@@ -95,26 +92,15 @@ pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
impl From<ArrowError> for Error {
|
||||
fn from(source: ArrowError) -> Self {
|
||||
match source {
|
||||
ArrowError::ExternalError(source) => match source.downcast::<Self>() {
|
||||
Ok(e) => *e,
|
||||
Err(source) => Self::External { source },
|
||||
},
|
||||
_ => Self::Arrow { source },
|
||||
}
|
||||
Self::Arrow { source }
|
||||
}
|
||||
}
|
||||
|
||||
impl From<lance::Error> for Error {
|
||||
fn from(source: lance::Error) -> Self {
|
||||
// Try to unwrap external errors that were wrapped by lance
|
||||
match source {
|
||||
lance::Error::Wrapped { error, .. } => match error.downcast::<Self>() {
|
||||
Ok(e) => *e,
|
||||
Err(source) => Self::External { source },
|
||||
},
|
||||
_ => Self::Lance { source },
|
||||
}
|
||||
// TODO: Once Lance is changed to preserve ObjectStore, DataFusion, and Arrow errors, we can
|
||||
// pass those variants through here as well.
|
||||
Self::Lance { source }
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -218,9 +218,8 @@ mod test {
|
||||
datagen = datagen.col(Box::<IncrementingInt32>::default());
|
||||
datagen = datagen.col(Box::new(RandomVector::default().named("vector".into())));
|
||||
|
||||
let data: Box<dyn arrow_array::RecordBatchReader + Send> = Box::new(datagen.batch(100));
|
||||
let res = db
|
||||
.create_table("test", data)
|
||||
.create_table("test", Box::new(datagen.batch(100)))
|
||||
.write_options(WriteOptions {
|
||||
lance_write_params: Some(param),
|
||||
})
|
||||
|
||||
@@ -12,10 +12,10 @@ use arrow_schema::Schema;
|
||||
use crate::{Error, Result};
|
||||
|
||||
/// Convert a Arrow IPC file to a batch reader
|
||||
pub fn ipc_file_to_batches(buf: Vec<u8>) -> Result<Box<dyn RecordBatchReader + Send>> {
|
||||
pub fn ipc_file_to_batches(buf: Vec<u8>) -> Result<impl RecordBatchReader> {
|
||||
let buf_reader = Cursor::new(buf);
|
||||
let reader = FileReader::try_new(buf_reader, None)?;
|
||||
Ok(Box::new(reader))
|
||||
Ok(reader)
|
||||
}
|
||||
|
||||
/// Convert record batches to Arrow IPC file
|
||||
|
||||
@@ -39,6 +39,7 @@
|
||||
//! #### Connect to a database.
|
||||
//!
|
||||
//! ```rust
|
||||
//! # use arrow_schema::{Field, Schema};
|
||||
//! # tokio::runtime::Runtime::new().unwrap().block_on(async {
|
||||
//! let db = lancedb::connect("data/sample-lancedb").execute().await.unwrap();
|
||||
//! # });
|
||||
@@ -73,10 +74,7 @@
|
||||
//!
|
||||
//! #### Create a table
|
||||
//!
|
||||
//! To create a Table, you need to provide an [`arrow_array::RecordBatch`]. The
|
||||
//! schema of the `RecordBatch` determines the schema of the table.
|
||||
//!
|
||||
//! Vector columns should be represented as `FixedSizeList<Float16/Float32>` data type.
|
||||
//! To create a Table, you need to provide a [`arrow_schema::Schema`] and a [`arrow_array::RecordBatch`] stream.
|
||||
//!
|
||||
//! ```rust
|
||||
//! # use std::sync::Arc;
|
||||
@@ -87,29 +85,34 @@
|
||||
//! # tokio::runtime::Runtime::new().unwrap().block_on(async {
|
||||
//! # let tmpdir = tempfile::tempdir().unwrap();
|
||||
//! # let db = lancedb::connect(tmpdir.path().to_str().unwrap()).execute().await.unwrap();
|
||||
//! let ndims = 128;
|
||||
//! let schema = Arc::new(Schema::new(vec![
|
||||
//! Field::new("id", DataType::Int32, false),
|
||||
//! Field::new(
|
||||
//! "vector",
|
||||
//! DataType::FixedSizeList(Arc::new(Field::new("item", DataType::Float32, true)), ndims),
|
||||
//! DataType::FixedSizeList(Arc::new(Field::new("item", DataType::Float32, true)), 128),
|
||||
//! true,
|
||||
//! ),
|
||||
//! ]));
|
||||
//! let data = RecordBatch::try_new(
|
||||
//! // Create a RecordBatch stream.
|
||||
//! let batches = RecordBatchIterator::new(
|
||||
//! vec![RecordBatch::try_new(
|
||||
//! schema.clone(),
|
||||
//! vec![
|
||||
//! Arc::new(Int32Array::from_iter_values(0..256)),
|
||||
//! Arc::new(
|
||||
//! FixedSizeListArray::from_iter_primitive::<Float32Type, _, _>(
|
||||
//! (0..256).map(|_| Some(vec![Some(1.0); ndims as usize])),
|
||||
//! ndims,
|
||||
//! (0..256).map(|_| Some(vec![Some(1.0); 128])),
|
||||
//! 128,
|
||||
//! ),
|
||||
//! ),
|
||||
//! ],
|
||||
//! )
|
||||
//! .unwrap();
|
||||
//! db.create_table("my_table", data)
|
||||
//! .unwrap()]
|
||||
//! .into_iter()
|
||||
//! .map(Ok),
|
||||
//! schema.clone(),
|
||||
//! );
|
||||
//! db.create_table("my_table", Box::new(batches))
|
||||
//! .execute()
|
||||
//! .await
|
||||
//! .unwrap();
|
||||
@@ -148,18 +151,42 @@
|
||||
//! #### Open table and search
|
||||
//!
|
||||
//! ```rust
|
||||
//! # use std::sync::Arc;
|
||||
//! # use futures::TryStreamExt;
|
||||
//! # use arrow_schema::{DataType, Schema, Field};
|
||||
//! # use arrow_array::{RecordBatch, RecordBatchIterator};
|
||||
//! # use arrow_array::{FixedSizeListArray, Float32Array, Int32Array, types::Float32Type};
|
||||
//! # use lancedb::query::{ExecutableQuery, QueryBase};
|
||||
//! # async fn example(table: &lancedb::Table) -> lancedb::Result<()> {
|
||||
//! # tokio::runtime::Runtime::new().unwrap().block_on(async {
|
||||
//! # let tmpdir = tempfile::tempdir().unwrap();
|
||||
//! # let db = lancedb::connect(tmpdir.path().to_str().unwrap()).execute().await.unwrap();
|
||||
//! # let schema = Arc::new(Schema::new(vec![
|
||||
//! # Field::new("id", DataType::Int32, false),
|
||||
//! # Field::new("vector", DataType::FixedSizeList(
|
||||
//! # Arc::new(Field::new("item", DataType::Float32, true)), 128), true),
|
||||
//! # ]));
|
||||
//! # let batches = RecordBatchIterator::new(vec![
|
||||
//! # RecordBatch::try_new(schema.clone(),
|
||||
//! # vec![
|
||||
//! # Arc::new(Int32Array::from_iter_values(0..10)),
|
||||
//! # Arc::new(FixedSizeListArray::from_iter_primitive::<Float32Type, _, _>(
|
||||
//! # (0..10).map(|_| Some(vec![Some(1.0); 128])), 128)),
|
||||
//! # ]).unwrap()
|
||||
//! # ].into_iter().map(Ok),
|
||||
//! # schema.clone());
|
||||
//! # db.create_table("my_table", Box::new(batches)).execute().await.unwrap();
|
||||
//! # let table = db.open_table("my_table").execute().await.unwrap();
|
||||
//! let results = table
|
||||
//! .query()
|
||||
//! .nearest_to(&[1.0; 128])?
|
||||
//! .nearest_to(&[1.0; 128])
|
||||
//! .unwrap()
|
||||
//! .execute()
|
||||
//! .await?
|
||||
//! .await
|
||||
//! .unwrap()
|
||||
//! .try_collect::<Vec<_>>()
|
||||
//! .await?;
|
||||
//! # Ok(())
|
||||
//! # }
|
||||
//! .await
|
||||
//! .unwrap();
|
||||
//! # });
|
||||
//! ```
|
||||
|
||||
pub mod arrow;
|
||||
|
||||
@@ -1381,7 +1381,7 @@ mod tests {
|
||||
use arrow::{array::downcast_array, compute::concat_batches, datatypes::Int32Type};
|
||||
use arrow_array::{
|
||||
cast::AsArray, types::Float32Type, FixedSizeListArray, Float32Array, Int32Array,
|
||||
RecordBatch, StringArray,
|
||||
RecordBatch, RecordBatchIterator, RecordBatchReader, StringArray,
|
||||
};
|
||||
use arrow_schema::{DataType, Field as ArrowField, Schema as ArrowSchema};
|
||||
use futures::{StreamExt, TryStreamExt};
|
||||
@@ -1402,7 +1402,7 @@ mod tests {
|
||||
let batches = make_test_batches();
|
||||
let conn = connect(uri).execute().await.unwrap();
|
||||
let table = conn
|
||||
.create_table("my_table", batches)
|
||||
.create_table("my_table", Box::new(batches))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -1463,7 +1463,7 @@ mod tests {
|
||||
let batches = make_non_empty_batches();
|
||||
let conn = connect(uri).execute().await.unwrap();
|
||||
let table = conn
|
||||
.create_table("my_table", batches)
|
||||
.create_table("my_table", Box::new(batches))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -1525,7 +1525,7 @@ mod tests {
|
||||
let batches = make_non_empty_batches();
|
||||
let conn = connect(uri).execute().await.unwrap();
|
||||
let table = conn
|
||||
.create_table("my_table", batches)
|
||||
.create_table("my_table", Box::new(batches))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -1578,7 +1578,7 @@ mod tests {
|
||||
let batches = make_non_empty_batches();
|
||||
let conn = connect(uri).execute().await.unwrap();
|
||||
let table = conn
|
||||
.create_table("my_table", batches)
|
||||
.create_table("my_table", Box::new(batches))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -1599,13 +1599,13 @@ mod tests {
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
fn make_non_empty_batches() -> Box<dyn arrow_array::RecordBatchReader + Send> {
|
||||
fn make_non_empty_batches() -> impl RecordBatchReader + Send + 'static {
|
||||
let vec = Box::new(RandomVector::new().named("vector".to_string()));
|
||||
let id = Box::new(IncrementingInt32::new().named("id".to_string()));
|
||||
Box::new(BatchGenerator::new().col(vec).col(id).batch(512))
|
||||
BatchGenerator::new().col(vec).col(id).batch(512)
|
||||
}
|
||||
|
||||
fn make_test_batches() -> RecordBatch {
|
||||
fn make_test_batches() -> impl RecordBatchReader + Send + 'static {
|
||||
let dim: usize = 128;
|
||||
let schema = Arc::new(ArrowSchema::new(vec![
|
||||
ArrowField::new("key", DataType::Int32, false),
|
||||
@@ -1619,7 +1619,12 @@ mod tests {
|
||||
),
|
||||
ArrowField::new("uri", DataType::Utf8, true),
|
||||
]));
|
||||
RecordBatch::new_empty(schema)
|
||||
RecordBatchIterator::new(
|
||||
vec![RecordBatch::new_empty(schema.clone())]
|
||||
.into_iter()
|
||||
.map(Ok),
|
||||
schema,
|
||||
)
|
||||
}
|
||||
|
||||
async fn make_test_table(tmp_dir: &tempfile::TempDir) -> Table {
|
||||
@@ -1628,7 +1633,7 @@ mod tests {
|
||||
|
||||
let batches = make_non_empty_batches();
|
||||
let conn = connect(uri).execute().await.unwrap();
|
||||
conn.create_table("my_table", batches)
|
||||
conn.create_table("my_table", Box::new(batches))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap()
|
||||
@@ -1857,8 +1862,10 @@ mod tests {
|
||||
|
||||
let record_batch =
|
||||
RecordBatch::try_new(schema.clone(), vec![Arc::new(text), Arc::new(vector)]).unwrap();
|
||||
let record_batch_iter =
|
||||
RecordBatchIterator::new(vec![record_batch].into_iter().map(Ok), schema.clone());
|
||||
let table = conn
|
||||
.create_table("my_table", record_batch)
|
||||
.create_table("my_table", record_batch_iter)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -1942,8 +1949,10 @@ mod tests {
|
||||
],
|
||||
)
|
||||
.unwrap();
|
||||
let record_batch_iter =
|
||||
RecordBatchIterator::new(vec![record_batch].into_iter().map(Ok), schema.clone());
|
||||
let table = conn
|
||||
.create_table("my_table", record_batch)
|
||||
.create_table("my_table", record_batch_iter)
|
||||
.mode(CreateTableMode::Overwrite)
|
||||
.execute()
|
||||
.await
|
||||
@@ -2053,6 +2062,8 @@ mod tests {
|
||||
async fn test_pagination_with_fts() {
|
||||
let db = connect("memory://test").execute().await.unwrap();
|
||||
let data = fts_test_data(400);
|
||||
let schema = data.schema();
|
||||
let data = RecordBatchIterator::new(vec![Ok(data)], schema);
|
||||
let table = db.create_table("test_table", data).execute().await.unwrap();
|
||||
|
||||
table
|
||||
|
||||
@@ -491,7 +491,7 @@ impl<S: HttpSend> RestfulLanceDbClient<S> {
|
||||
}
|
||||
|
||||
/// Apply dynamic headers from the header provider if configured
|
||||
pub(crate) async fn apply_dynamic_headers(&self, mut request: Request) -> Result<Request> {
|
||||
async fn apply_dynamic_headers(&self, mut request: Request) -> Result<Request> {
|
||||
if let Some(ref provider) = self.header_provider {
|
||||
let headers = provider.get_headers().await?;
|
||||
let request_headers = request.headers_mut();
|
||||
@@ -555,9 +555,7 @@ impl<S: HttpSend> RestfulLanceDbClient<S> {
|
||||
message: "Attempted to retry a request that cannot be cloned".to_string(),
|
||||
})?;
|
||||
let (_, r) = tmp_req.build_split();
|
||||
let mut r = r.map_err(|e| Error::Runtime {
|
||||
message: format!("Failed to build request: {}", e),
|
||||
})?;
|
||||
let mut r = r.unwrap();
|
||||
let request_id = self.extract_request_id(&mut r);
|
||||
let mut retry_counter = RetryCounter::new(retry_config, request_id.clone());
|
||||
|
||||
@@ -573,9 +571,7 @@ impl<S: HttpSend> RestfulLanceDbClient<S> {
|
||||
}
|
||||
|
||||
let (c, request) = req_builder.build_split();
|
||||
let mut request = request.map_err(|e| Error::Runtime {
|
||||
message: format!("Failed to build request: {}", e),
|
||||
})?;
|
||||
let mut request = request.unwrap();
|
||||
self.set_request_id(&mut request, &request_id.clone());
|
||||
|
||||
// Apply dynamic headers before each retry attempt
|
||||
@@ -625,7 +621,7 @@ impl<S: HttpSend> RestfulLanceDbClient<S> {
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn log_request(&self, request: &Request, request_id: &String) {
|
||||
fn log_request(&self, request: &Request, request_id: &String) {
|
||||
if log::log_enabled!(log::Level::Debug) {
|
||||
let content_type = request
|
||||
.headers()
|
||||
|
||||
@@ -4,11 +4,13 @@
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow_array::RecordBatchIterator;
|
||||
use async_trait::async_trait;
|
||||
use http::StatusCode;
|
||||
use lance_io::object_store::StorageOptions;
|
||||
use moka::future::Cache;
|
||||
use reqwest::header::CONTENT_TYPE;
|
||||
use tokio::task::spawn_blocking;
|
||||
|
||||
use lance_namespace::models::{
|
||||
CreateNamespaceRequest, CreateNamespaceResponse, DescribeNamespaceRequest,
|
||||
@@ -17,17 +19,16 @@ use lance_namespace::models::{
|
||||
};
|
||||
|
||||
use crate::database::{
|
||||
CloneTableRequest, CreateTableMode, CreateTableRequest, Database, DatabaseOptions,
|
||||
OpenTableRequest, ReadConsistency, TableNamesRequest,
|
||||
CloneTableRequest, CreateTableData, CreateTableMode, CreateTableRequest, Database,
|
||||
DatabaseOptions, OpenTableRequest, ReadConsistency, TableNamesRequest,
|
||||
};
|
||||
use crate::error::Result;
|
||||
use crate::remote::util::stream_as_body;
|
||||
use crate::table::BaseTable;
|
||||
use crate::Error;
|
||||
|
||||
use super::client::{ClientConfig, HttpSend, RequestResultExt, RestfulLanceDbClient, Sender};
|
||||
use super::table::RemoteTable;
|
||||
use super::util::parse_server_version;
|
||||
use super::util::{batches_to_ipc_bytes, parse_server_version};
|
||||
use super::ARROW_STREAM_CONTENT_TYPE;
|
||||
|
||||
// Request structure for the remote clone table API
|
||||
@@ -435,8 +436,26 @@ impl<S: HttpSend> Database for RemoteDatabase<S> {
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
async fn create_table(&self, mut request: CreateTableRequest) -> Result<Arc<dyn BaseTable>> {
|
||||
let body = stream_as_body(request.data.scan_as_stream())?;
|
||||
async fn create_table(&self, request: CreateTableRequest) -> Result<Arc<dyn BaseTable>> {
|
||||
let data = match request.data {
|
||||
CreateTableData::Data(data) => data,
|
||||
CreateTableData::StreamingData(_) => {
|
||||
return Err(Error::NotSupported {
|
||||
message: "Creating a remote table from a streaming source".to_string(),
|
||||
})
|
||||
}
|
||||
CreateTableData::Empty(table_definition) => {
|
||||
let schema = table_definition.schema.clone();
|
||||
Box::new(RecordBatchIterator::new(vec![], schema))
|
||||
}
|
||||
};
|
||||
|
||||
// TODO: https://github.com/lancedb/lancedb/issues/1026
|
||||
// We should accept data from an async source. In the meantime, spawn this as blocking
|
||||
// to make sure we don't block the tokio runtime if the source is slow.
|
||||
let data_buffer = spawn_blocking(move || batches_to_ipc_bytes(data))
|
||||
.await
|
||||
.unwrap()?;
|
||||
|
||||
let identifier =
|
||||
build_table_identifier(&request.name, &request.namespace, &self.client.id_delimiter);
|
||||
@@ -444,7 +463,7 @@ impl<S: HttpSend> Database for RemoteDatabase<S> {
|
||||
.client
|
||||
.post(&format!("/v1/table/{}/create/", identifier))
|
||||
.query(&[("mode", Into::<&str>::into(&request.mode))])
|
||||
.body(body)
|
||||
.body(data_buffer)
|
||||
.header(CONTENT_TYPE, ARROW_STREAM_CONTENT_TYPE);
|
||||
|
||||
let (request_id, rsp) = self.client.send(req).await?;
|
||||
@@ -794,7 +813,7 @@ mod tests {
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Arc, OnceLock};
|
||||
|
||||
use arrow_array::{Int32Array, RecordBatch};
|
||||
use arrow_array::{Int32Array, RecordBatch, RecordBatchIterator};
|
||||
use arrow_schema::{DataType, Field, Schema};
|
||||
|
||||
use crate::connection::ConnectBuilder;
|
||||
@@ -974,7 +993,8 @@ mod tests {
|
||||
vec![Arc::new(Int32Array::from(vec![1, 2, 3]))],
|
||||
)
|
||||
.unwrap();
|
||||
let table = conn.create_table("table1", data).execute().await.unwrap();
|
||||
let reader = RecordBatchIterator::new([Ok(data.clone())], data.schema());
|
||||
let table = conn.create_table("table1", reader).execute().await.unwrap();
|
||||
assert_eq!(table.name(), "table1");
|
||||
}
|
||||
|
||||
@@ -991,7 +1011,8 @@ mod tests {
|
||||
vec![Arc::new(Int32Array::from(vec![1, 2, 3]))],
|
||||
)
|
||||
.unwrap();
|
||||
let result = conn.create_table("table1", data).execute().await;
|
||||
let reader = RecordBatchIterator::new([Ok(data.clone())], data.schema());
|
||||
let result = conn.create_table("table1", reader).execute().await;
|
||||
assert!(result.is_err());
|
||||
assert!(
|
||||
matches!(result, Err(crate::Error::TableAlreadyExists { name }) if name == "table1")
|
||||
@@ -1024,7 +1045,8 @@ mod tests {
|
||||
vec![Arc::new(Int32Array::from(vec![1, 2, 3]))],
|
||||
)
|
||||
.unwrap();
|
||||
let mut builder = conn.create_table("table1", data.clone());
|
||||
let reader = RecordBatchIterator::new([Ok(data.clone())], data.schema());
|
||||
let mut builder = conn.create_table("table1", reader);
|
||||
if let Some(mode) = mode {
|
||||
builder = builder.mode(mode);
|
||||
}
|
||||
@@ -1049,8 +1071,9 @@ mod tests {
|
||||
.unwrap();
|
||||
|
||||
let called: Arc<OnceLock<bool>> = Arc::new(OnceLock::new());
|
||||
let reader = RecordBatchIterator::new([Ok(data.clone())], data.schema());
|
||||
let called_in_cb = called.clone();
|
||||
conn.create_table("table1", data)
|
||||
conn.create_table("table1", reader)
|
||||
.mode(CreateTableMode::ExistOk(Box::new(move |b| {
|
||||
called_in_cb.clone().set(true).unwrap();
|
||||
b
|
||||
@@ -1239,8 +1262,9 @@ mod tests {
|
||||
vec![Arc::new(Int32Array::from(vec![1, 2, 3]))],
|
||||
)
|
||||
.unwrap();
|
||||
let reader = RecordBatchIterator::new([Ok(data.clone())], data.schema());
|
||||
let table = conn
|
||||
.create_table("table1", data)
|
||||
.create_table("table1", reader)
|
||||
.namespace(vec!["ns1".to_string()])
|
||||
.execute()
|
||||
.await
|
||||
@@ -1706,8 +1730,10 @@ mod tests {
|
||||
vec![Arc::new(Int32Array::from(vec![1, 2, 3]))],
|
||||
)
|
||||
.unwrap();
|
||||
let reader = RecordBatchIterator::new([Ok(data.clone())], schema.clone());
|
||||
|
||||
let table = conn
|
||||
.create_table("test_table", data)
|
||||
.create_table("test_table", reader)
|
||||
.namespace(namespace.clone())
|
||||
.execute()
|
||||
.await;
|
||||
@@ -1780,7 +1806,9 @@ mod tests {
|
||||
let data =
|
||||
RecordBatch::try_new(schema.clone(), vec![Arc::new(Int32Array::from(vec![i]))])
|
||||
.unwrap();
|
||||
conn.create_table(format!("table{}", i), data)
|
||||
let reader = RecordBatchIterator::new([Ok(data.clone())], schema.clone());
|
||||
|
||||
conn.create_table(format!("table{}", i), reader)
|
||||
.namespace(namespace.clone())
|
||||
.execute()
|
||||
.await
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,50 +1,29 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
use arrow_ipc::CompressionType;
|
||||
use futures::{Stream, StreamExt};
|
||||
use std::io::Cursor;
|
||||
|
||||
use arrow_array::RecordBatchReader;
|
||||
use reqwest::Response;
|
||||
|
||||
use crate::{arrow::SendableRecordBatchStream, Result};
|
||||
use crate::Result;
|
||||
|
||||
use super::db::ServerVersion;
|
||||
|
||||
pub fn stream_as_ipc(
|
||||
data: SendableRecordBatchStream,
|
||||
) -> Result<impl Stream<Item = Result<bytes::Bytes>>> {
|
||||
let options = arrow_ipc::writer::IpcWriteOptions::default()
|
||||
.try_with_compression(Some(CompressionType::LZ4_FRAME))?;
|
||||
pub fn batches_to_ipc_bytes(batches: impl RecordBatchReader) -> Result<Vec<u8>> {
|
||||
const WRITE_BUF_SIZE: usize = 4096;
|
||||
let buf = Vec::with_capacity(WRITE_BUF_SIZE);
|
||||
let writer =
|
||||
arrow_ipc::writer::StreamWriter::try_new_with_options(buf, &data.schema(), options)?;
|
||||
let stream = futures::stream::try_unfold(
|
||||
(data, writer, false),
|
||||
move |(mut data, mut writer, finished)| async move {
|
||||
if finished {
|
||||
return Ok(None);
|
||||
}
|
||||
match data.next().await {
|
||||
Some(Ok(batch)) => {
|
||||
writer.write(&batch)?;
|
||||
let buffer = std::mem::take(writer.get_mut());
|
||||
Ok(Some((bytes::Bytes::from(buffer), (data, writer, false))))
|
||||
}
|
||||
Some(Err(e)) => Err(e),
|
||||
None => {
|
||||
writer.finish()?;
|
||||
let buffer = std::mem::take(writer.get_mut());
|
||||
Ok(Some((bytes::Bytes::from(buffer), (data, writer, true))))
|
||||
}
|
||||
}
|
||||
},
|
||||
);
|
||||
Ok(stream)
|
||||
}
|
||||
let mut buf = Cursor::new(buf);
|
||||
{
|
||||
let mut writer = arrow_ipc::writer::StreamWriter::try_new(&mut buf, &batches.schema())?;
|
||||
|
||||
pub fn stream_as_body(data: SendableRecordBatchStream) -> Result<reqwest::Body> {
|
||||
let stream = stream_as_ipc(data)?;
|
||||
Ok(reqwest::Body::wrap_stream(stream))
|
||||
for batch in batches {
|
||||
let batch = batch?;
|
||||
writer.write(&batch)?;
|
||||
}
|
||||
writer.finish()?;
|
||||
}
|
||||
Ok(buf.into_inner())
|
||||
}
|
||||
|
||||
pub fn parse_server_version(req_id: &str, rsp: &Response) -> Result<ServerVersion> {
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
|
||||
use arrow::array::{AsArray, FixedSizeListBuilder, Float32Builder};
|
||||
use arrow::datatypes::{Float32Type, UInt8Type};
|
||||
use arrow_array::{RecordBatch, RecordBatchReader};
|
||||
use arrow_array::{RecordBatchIterator, RecordBatchReader};
|
||||
use arrow_schema::{DataType, Field, Schema, SchemaRef};
|
||||
use async_trait::async_trait;
|
||||
use datafusion_expr::Expr;
|
||||
@@ -50,9 +50,10 @@ use std::format;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::data::scannable::{scannable_with_embeddings, Scannable};
|
||||
use crate::arrow::IntoArrow;
|
||||
use crate::connection::NoData;
|
||||
use crate::database::Database;
|
||||
use crate::embeddings::{EmbeddingDefinition, EmbeddingRegistry, MemoryRegistry};
|
||||
use crate::embeddings::{EmbeddingDefinition, EmbeddingRegistry, MaybeEmbedded, MemoryRegistry};
|
||||
use crate::error::{Error, Result};
|
||||
use crate::index::vector::VectorIndex;
|
||||
use crate::index::IndexStatistics;
|
||||
@@ -71,7 +72,6 @@ use crate::utils::{
|
||||
use self::dataset::DatasetConsistencyWrapper;
|
||||
use self::merge::MergeInsertBuilder;
|
||||
|
||||
mod add_data;
|
||||
pub mod datafusion;
|
||||
pub(crate) mod dataset;
|
||||
pub mod delete;
|
||||
@@ -80,8 +80,6 @@ pub mod optimize;
|
||||
pub mod schema_evolution;
|
||||
pub mod update;
|
||||
|
||||
pub use add_data::{AddDataBuilder, AddDataMode, AddResult};
|
||||
|
||||
use crate::index::waiter::wait_for_index;
|
||||
pub use chrono::Duration;
|
||||
pub use delete::DeleteResult;
|
||||
@@ -198,6 +196,60 @@ pub struct WriteOptions {
|
||||
pub lance_write_params: Option<WriteParams>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub enum AddDataMode {
|
||||
/// Rows will be appended to the table (the default)
|
||||
#[default]
|
||||
Append,
|
||||
/// The existing table will be overwritten with the new data
|
||||
Overwrite,
|
||||
}
|
||||
|
||||
/// A builder for configuring a [`crate::connection::Connection::create_table`] or [`Table::add`]
|
||||
/// operation
|
||||
pub struct AddDataBuilder<T: IntoArrow> {
|
||||
parent: Arc<dyn BaseTable>,
|
||||
pub(crate) data: T,
|
||||
pub(crate) mode: AddDataMode,
|
||||
pub(crate) write_options: WriteOptions,
|
||||
embedding_registry: Option<Arc<dyn EmbeddingRegistry>>,
|
||||
}
|
||||
|
||||
impl<T: IntoArrow> std::fmt::Debug for AddDataBuilder<T> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("AddDataBuilder")
|
||||
.field("parent", &self.parent)
|
||||
.field("mode", &self.mode)
|
||||
.field("write_options", &self.write_options)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: IntoArrow> AddDataBuilder<T> {
|
||||
pub fn mode(mut self, mode: AddDataMode) -> Self {
|
||||
self.mode = mode;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn write_options(mut self, options: WriteOptions) -> Self {
|
||||
self.write_options = options;
|
||||
self
|
||||
}
|
||||
|
||||
pub async fn execute(self) -> Result<AddResult> {
|
||||
let parent = self.parent.clone();
|
||||
let data = self.data.into_arrow()?;
|
||||
let without_data = AddDataBuilder::<NoData> {
|
||||
data: NoData {},
|
||||
mode: self.mode,
|
||||
parent: self.parent,
|
||||
write_options: self.write_options,
|
||||
embedding_registry: self.embedding_registry,
|
||||
};
|
||||
parent.add(without_data, data).await
|
||||
}
|
||||
}
|
||||
|
||||
/// Filters that can be used to limit the rows returned by a query
|
||||
pub enum Filter {
|
||||
/// A SQL filter string
|
||||
@@ -231,6 +283,15 @@ pub trait Tags: Send + Sync {
|
||||
async fn update(&mut self, tag: &str, version: u64) -> Result<()>;
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||
pub struct AddResult {
|
||||
// The commit version associated with the operation.
|
||||
// A version of `0` indicates compatibility with legacy servers that do not return
|
||||
/// a commit version.
|
||||
#[serde(default)]
|
||||
pub version: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||
pub struct MergeResult {
|
||||
// The commit version associated with the operation.
|
||||
@@ -303,7 +364,11 @@ pub trait BaseTable: std::fmt::Display + std::fmt::Debug + Send + Sync {
|
||||
) -> Result<String>;
|
||||
|
||||
/// Add new records to the table.
|
||||
async fn add(&self, add: AddDataBuilder) -> Result<AddResult>;
|
||||
async fn add(
|
||||
&self,
|
||||
add: AddDataBuilder<NoData>,
|
||||
data: Box<dyn arrow_array::RecordBatchReader + Send>,
|
||||
) -> Result<AddResult>;
|
||||
/// Delete rows from the table.
|
||||
async fn delete(&self, predicate: &str) -> Result<DeleteResult>;
|
||||
/// Update rows in the table.
|
||||
@@ -448,30 +513,6 @@ mod test_utils {
|
||||
embedding_registry: Arc::new(MemoryRegistry::new()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_with_handler_and_config<T>(
|
||||
name: impl Into<String>,
|
||||
handler: impl Fn(reqwest::Request) -> http::Response<T> + Clone + Send + Sync + 'static,
|
||||
config: crate::remote::ClientConfig,
|
||||
) -> Self
|
||||
where
|
||||
T: Into<reqwest::Body>,
|
||||
{
|
||||
let inner = Arc::new(crate::remote::table::RemoteTable::new_mock_with_config(
|
||||
name.into(),
|
||||
handler.clone(),
|
||||
config.clone(),
|
||||
));
|
||||
let database = Arc::new(crate::remote::db::RemoteDatabase::new_mock_with_config(
|
||||
handler, config,
|
||||
));
|
||||
Self {
|
||||
inner,
|
||||
database: Some(database),
|
||||
// Registry is unused.
|
||||
embedding_registry: Arc::new(MemoryRegistry::new()),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -572,14 +613,16 @@ impl Table {
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `data` data to be added to the Table
|
||||
/// * `batches` data to be added to the Table
|
||||
/// * `options` options to control how data is added
|
||||
pub fn add<T: Scannable + 'static>(&self, data: T) -> AddDataBuilder {
|
||||
AddDataBuilder::new(
|
||||
self.inner.clone(),
|
||||
Box::new(data),
|
||||
Some(self.embedding_registry.clone()),
|
||||
)
|
||||
pub fn add<T: IntoArrow>(&self, batches: T) -> AddDataBuilder<T> {
|
||||
AddDataBuilder {
|
||||
parent: self.inner.clone(),
|
||||
data: batches,
|
||||
mode: AddDataMode::Append,
|
||||
write_options: WriteOptions::default(),
|
||||
embedding_registry: Some(self.embedding_registry.clone()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Update existing records in the Table
|
||||
@@ -618,26 +661,31 @@ impl Table {
|
||||
/// .execute()
|
||||
/// .await
|
||||
/// .unwrap();
|
||||
/// let schema = Arc::new(Schema::new(vec![
|
||||
/// Field::new("id", DataType::Int32, false),
|
||||
/// Field::new("vector", DataType::FixedSizeList(
|
||||
/// Arc::new(Field::new("item", DataType::Float32, true)), 128), true),
|
||||
/// ]));
|
||||
/// let data = RecordBatch::try_new(
|
||||
/// schema.clone(),
|
||||
/// vec![
|
||||
/// Arc::new(Int32Array::from_iter_values(0..10)),
|
||||
/// Arc::new(
|
||||
/// FixedSizeListArray::from_iter_primitive::<Float32Type, _, _>(
|
||||
/// (0..10).map(|_| Some(vec![Some(1.0); 128])),
|
||||
/// 128,
|
||||
/// # let schema = Arc::new(Schema::new(vec![
|
||||
/// # Field::new("id", DataType::Int32, false),
|
||||
/// # Field::new("vector", DataType::FixedSizeList(
|
||||
/// # Arc::new(Field::new("item", DataType::Float32, true)), 128), true),
|
||||
/// # ]));
|
||||
/// let batches = RecordBatchIterator::new(
|
||||
/// vec![RecordBatch::try_new(
|
||||
/// schema.clone(),
|
||||
/// vec![
|
||||
/// Arc::new(Int32Array::from_iter_values(0..10)),
|
||||
/// Arc::new(
|
||||
/// FixedSizeListArray::from_iter_primitive::<Float32Type, _, _>(
|
||||
/// (0..10).map(|_| Some(vec![Some(1.0); 128])),
|
||||
/// 128,
|
||||
/// ),
|
||||
/// ),
|
||||
/// ),
|
||||
/// ],
|
||||
/// )
|
||||
/// .unwrap();
|
||||
/// ],
|
||||
/// )
|
||||
/// .unwrap()]
|
||||
/// .into_iter()
|
||||
/// .map(Ok),
|
||||
/// schema.clone(),
|
||||
/// );
|
||||
/// let tbl = db
|
||||
/// .create_table("delete_test", data)
|
||||
/// .create_table("delete_test", Box::new(batches))
|
||||
/// .execute()
|
||||
/// .await
|
||||
/// .unwrap();
|
||||
@@ -1397,7 +1445,7 @@ impl NativeTable {
|
||||
name: name.to_string(),
|
||||
source: Box::new(e),
|
||||
},
|
||||
e => e.into(),
|
||||
source => Error::Lance { source },
|
||||
})?;
|
||||
|
||||
let dataset = DatasetConsistencyWrapper::new_latest(dataset, read_consistency_interval);
|
||||
@@ -1481,7 +1529,7 @@ impl NativeTable {
|
||||
lance::Error::Namespace { source, .. } => Error::Runtime {
|
||||
message: format!("Failed to get table info from namespace: {:?}", source),
|
||||
},
|
||||
e => e.into(),
|
||||
source => Error::Lance { source },
|
||||
})?;
|
||||
|
||||
let dataset = builder
|
||||
@@ -1493,7 +1541,7 @@ impl NativeTable {
|
||||
name: name.to_string(),
|
||||
source: Box::new(e),
|
||||
},
|
||||
e => e.into(),
|
||||
source => Error::Lance { source },
|
||||
})?;
|
||||
|
||||
let uri = dataset.uri().to_string();
|
||||
@@ -1587,7 +1635,7 @@ impl NativeTable {
|
||||
lance::Error::DatasetAlreadyExists { .. } => Error::TableAlreadyExists {
|
||||
name: name.to_string(),
|
||||
},
|
||||
e => e.into(),
|
||||
source => Error::Lance { source },
|
||||
})?;
|
||||
|
||||
let id = Self::build_id(&namespace, name);
|
||||
@@ -1614,12 +1662,12 @@ impl NativeTable {
|
||||
read_consistency_interval: Option<std::time::Duration>,
|
||||
namespace_client: Option<Arc<dyn LanceNamespace>>,
|
||||
) -> Result<Self> {
|
||||
let data: Box<dyn Scannable> = Box::new(RecordBatch::new_empty(schema));
|
||||
let batches = RecordBatchIterator::new(vec![], schema);
|
||||
Self::create(
|
||||
uri,
|
||||
name,
|
||||
namespace,
|
||||
data,
|
||||
batches,
|
||||
write_store_wrapper,
|
||||
params,
|
||||
read_consistency_interval,
|
||||
@@ -1708,7 +1756,7 @@ impl NativeTable {
|
||||
lance::Error::DatasetAlreadyExists { .. } => Error::TableAlreadyExists {
|
||||
name: name.to_string(),
|
||||
},
|
||||
e => e.into(),
|
||||
source => Error::Lance { source },
|
||||
})?;
|
||||
|
||||
let id = Self::build_id(&namespace, name);
|
||||
@@ -2490,7 +2538,17 @@ impl BaseTable for NativeTable {
|
||||
}
|
||||
}
|
||||
|
||||
async fn add(&self, add: AddDataBuilder) -> Result<AddResult> {
|
||||
async fn add(
|
||||
&self,
|
||||
add: AddDataBuilder<NoData>,
|
||||
data: Box<dyn RecordBatchReader + Send>,
|
||||
) -> Result<AddResult> {
|
||||
let data = Box::new(MaybeEmbedded::try_new(
|
||||
data,
|
||||
self.table_definition().await?,
|
||||
add.embedding_registry,
|
||||
)?) as Box<dyn RecordBatchReader + Send>;
|
||||
|
||||
let lance_params = add.write_options.lance_write_params.unwrap_or(WriteParams {
|
||||
mode: match add.mode {
|
||||
AddDataMode::Append => WriteMode::Append,
|
||||
@@ -2499,11 +2557,6 @@ impl BaseTable for NativeTable {
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
// Apply embeddings if configured
|
||||
let table_def = self.table_definition().await?;
|
||||
let data =
|
||||
scannable_with_embeddings(add.data, &table_def, add.embedding_registry.as_ref())?;
|
||||
|
||||
let dataset = {
|
||||
// Limited scope for the mutable borrow of self.dataset avoids deadlock.
|
||||
let ds = self.dataset.get_mut().await?;
|
||||
@@ -3110,7 +3163,7 @@ mod tests {
|
||||
use arrow_array::{BinaryArray, LargeBinaryArray};
|
||||
use arrow_data::ArrayDataBuilder;
|
||||
use arrow_schema::{DataType, Field, Schema};
|
||||
use futures::TryStreamExt;
|
||||
use lance::dataset::WriteMode;
|
||||
use lance::io::{ObjectStoreParams, WrappingObjectStore};
|
||||
use lance::Dataset;
|
||||
use rand::Rng;
|
||||
@@ -3121,17 +3174,14 @@ mod tests {
|
||||
use crate::connection::ConnectBuilder;
|
||||
use crate::index::scalar::{BTreeIndexBuilder, BitmapIndexBuilder};
|
||||
use crate::index::vector::{IvfHnswPqIndexBuilder, IvfHnswSqIndexBuilder};
|
||||
use crate::query::{ExecutableQuery, QueryBase};
|
||||
use crate::test_utils::connection::new_test_connection;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_open() {
|
||||
let tmp_dir = tempdir().unwrap();
|
||||
let dataset_path = tmp_dir.path().join("test.lance");
|
||||
|
||||
let batch = make_test_batches();
|
||||
let reader = RecordBatchIterator::new(vec![Ok(batch.clone())], batch.schema());
|
||||
Dataset::write(reader, dataset_path.to_str().unwrap(), None)
|
||||
let batches = make_test_batches();
|
||||
Dataset::write(batches, dataset_path.to_str().unwrap(), None)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
@@ -3164,12 +3214,9 @@ mod tests {
|
||||
let tmp_dir = tempdir().unwrap();
|
||||
let uri = tmp_dir.path().to_str().unwrap();
|
||||
|
||||
let batch = make_test_batches();
|
||||
let reader: Box<dyn RecordBatchReader + Send> = Box::new(RecordBatchIterator::new(
|
||||
vec![Ok(batch.clone())],
|
||||
batch.schema(),
|
||||
));
|
||||
let table = NativeTable::create(uri, "test", vec![], reader, None, None, None, None)
|
||||
let batches = make_test_batches();
|
||||
let batches = Box::new(batches) as Box<dyn RecordBatchReader + Send>;
|
||||
let table = NativeTable::create(uri, "test", vec![], batches, None, None, None, None)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
@@ -3183,6 +3230,33 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_add() {
|
||||
let tmp_dir = tempdir().unwrap();
|
||||
let uri = tmp_dir.path().to_str().unwrap();
|
||||
let conn = connect(uri).execute().await.unwrap();
|
||||
|
||||
let batches = make_test_batches();
|
||||
let schema = batches.schema().clone();
|
||||
let table = conn.create_table("test", batches).execute().await.unwrap();
|
||||
assert_eq!(table.count_rows(None).await.unwrap(), 10);
|
||||
|
||||
let new_batches = RecordBatchIterator::new(
|
||||
vec![RecordBatch::try_new(
|
||||
schema.clone(),
|
||||
vec![Arc::new(Int32Array::from_iter_values(100..110))],
|
||||
)
|
||||
.unwrap()]
|
||||
.into_iter()
|
||||
.map(Ok),
|
||||
schema.clone(),
|
||||
);
|
||||
|
||||
table.add(new_batches).execute().await.unwrap();
|
||||
assert_eq!(table.count_rows(None).await.unwrap(), 20);
|
||||
assert_eq!(table.name(), "test");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_merge_insert() {
|
||||
let tmp_dir = tempdir().unwrap();
|
||||
@@ -3199,7 +3273,7 @@ mod tests {
|
||||
assert_eq!(table.count_rows(None).await.unwrap(), 10);
|
||||
|
||||
// Create new data with i=5..15
|
||||
let new_batches = merge_insert_test_batches(5, 1);
|
||||
let new_batches = Box::new(merge_insert_test_batches(5, 1));
|
||||
|
||||
// Perform a "insert if not exists"
|
||||
let mut merge_insert_builder = table.merge_insert(&["i"]);
|
||||
@@ -3213,7 +3287,7 @@ mod tests {
|
||||
assert_eq!(result.num_attempts, 1);
|
||||
|
||||
// Create new data with i=15..25 (no id matches)
|
||||
let new_batches = merge_insert_test_batches(15, 2);
|
||||
let new_batches = Box::new(merge_insert_test_batches(15, 2));
|
||||
// Perform a "bulk update" (should not affect anything)
|
||||
let mut merge_insert_builder = table.merge_insert(&["i"]);
|
||||
merge_insert_builder.when_matched_update_all(None);
|
||||
@@ -3226,7 +3300,7 @@ mod tests {
|
||||
);
|
||||
|
||||
// Conditional update that only replaces the age=0 data
|
||||
let new_batches = merge_insert_test_batches(5, 3);
|
||||
let new_batches = Box::new(merge_insert_test_batches(5, 3));
|
||||
let mut merge_insert_builder = table.merge_insert(&["i"]);
|
||||
merge_insert_builder.when_matched_update_all(Some("target.age = 0".to_string()));
|
||||
merge_insert_builder.execute(new_batches).await.unwrap();
|
||||
@@ -3252,7 +3326,7 @@ mod tests {
|
||||
assert_eq!(table.count_rows(None).await.unwrap(), 10);
|
||||
|
||||
// Test use_index=true (default behavior)
|
||||
let new_batches = merge_insert_test_batches(5, 1);
|
||||
let new_batches = Box::new(merge_insert_test_batches(5, 1));
|
||||
let mut merge_insert_builder = table.merge_insert(&["i"]);
|
||||
merge_insert_builder.when_not_matched_insert_all();
|
||||
merge_insert_builder.use_index(true);
|
||||
@@ -3260,7 +3334,7 @@ mod tests {
|
||||
assert_eq!(table.count_rows(None).await.unwrap(), 15);
|
||||
|
||||
// Test use_index=false (force table scan)
|
||||
let new_batches = merge_insert_test_batches(15, 2);
|
||||
let new_batches = Box::new(merge_insert_test_batches(15, 2));
|
||||
let mut merge_insert_builder = table.merge_insert(&["i"]);
|
||||
merge_insert_builder.when_not_matched_insert_all();
|
||||
merge_insert_builder.use_index(false);
|
||||
@@ -3268,6 +3342,59 @@ mod tests {
|
||||
assert_eq!(table.count_rows(None).await.unwrap(), 25);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_add_overwrite() {
|
||||
let tmp_dir = tempdir().unwrap();
|
||||
let uri = tmp_dir.path().to_str().unwrap();
|
||||
let conn = connect(uri).execute().await.unwrap();
|
||||
|
||||
let batches = make_test_batches();
|
||||
let schema = batches.schema().clone();
|
||||
let table = conn.create_table("test", batches).execute().await.unwrap();
|
||||
assert_eq!(table.count_rows(None).await.unwrap(), 10);
|
||||
|
||||
let batches = vec![RecordBatch::try_new(
|
||||
schema.clone(),
|
||||
vec![Arc::new(Int32Array::from_iter_values(100..110))],
|
||||
)
|
||||
.unwrap()]
|
||||
.into_iter()
|
||||
.map(Ok);
|
||||
|
||||
let new_batches = RecordBatchIterator::new(batches.clone(), schema.clone());
|
||||
|
||||
// Can overwrite using AddDataOptions::mode
|
||||
table
|
||||
.add(new_batches)
|
||||
.mode(AddDataMode::Overwrite)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(table.count_rows(None).await.unwrap(), 10);
|
||||
assert_eq!(table.name(), "test");
|
||||
|
||||
// Can overwrite using underlying WriteParams (which
|
||||
// take precedence over AddDataOptions::mode)
|
||||
|
||||
let param: WriteParams = WriteParams {
|
||||
mode: WriteMode::Overwrite,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let new_batches = RecordBatchIterator::new(batches.clone(), schema.clone());
|
||||
table
|
||||
.add(new_batches)
|
||||
.write_options(WriteOptions {
|
||||
lance_write_params: Some(param),
|
||||
})
|
||||
.mode(AddDataMode::Append)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(table.count_rows(None).await.unwrap(), 10);
|
||||
assert_eq!(table.name(), "test");
|
||||
}
|
||||
|
||||
#[derive(Default, Debug)]
|
||||
struct NoOpCacheWrapper {
|
||||
called: AtomicBool,
|
||||
@@ -3323,25 +3450,35 @@ mod tests {
|
||||
assert!(wrapper.called());
|
||||
}
|
||||
|
||||
fn merge_insert_test_batches(offset: i32, age: i32) -> Box<dyn RecordBatchReader + Send> {
|
||||
fn merge_insert_test_batches(
|
||||
offset: i32,
|
||||
age: i32,
|
||||
) -> impl RecordBatchReader + Send + Sync + 'static {
|
||||
let schema = Arc::new(Schema::new(vec![
|
||||
Field::new("i", DataType::Int32, false),
|
||||
Field::new("age", DataType::Int32, false),
|
||||
]));
|
||||
let batch = RecordBatch::try_new(
|
||||
schema.clone(),
|
||||
vec![
|
||||
Arc::new(Int32Array::from_iter_values(offset..(offset + 10))),
|
||||
Arc::new(Int32Array::from_iter_values(std::iter::repeat_n(age, 10))),
|
||||
],
|
||||
RecordBatchIterator::new(
|
||||
vec![RecordBatch::try_new(
|
||||
schema.clone(),
|
||||
vec![
|
||||
Arc::new(Int32Array::from_iter_values(offset..(offset + 10))),
|
||||
Arc::new(Int32Array::from_iter_values(std::iter::repeat_n(age, 10))),
|
||||
],
|
||||
)],
|
||||
schema,
|
||||
)
|
||||
.unwrap();
|
||||
Box::new(RecordBatchIterator::new(vec![Ok(batch)], schema))
|
||||
}
|
||||
|
||||
fn make_test_batches() -> RecordBatch {
|
||||
fn make_test_batches() -> impl RecordBatchReader + Send + Sync + 'static {
|
||||
let schema = Arc::new(Schema::new(vec![Field::new("i", DataType::Int32, false)]));
|
||||
RecordBatch::try_new(schema, vec![Arc::new(Int32Array::from_iter_values(0..10))]).unwrap()
|
||||
RecordBatchIterator::new(
|
||||
vec![RecordBatch::try_new(
|
||||
schema.clone(),
|
||||
vec![Arc::new(Int32Array::from_iter_values(0..10))],
|
||||
)],
|
||||
schema,
|
||||
)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -3429,9 +3566,14 @@ mod tests {
|
||||
);
|
||||
|
||||
let vectors = Arc::new(create_fixed_size_list(float_arr, dimension).unwrap());
|
||||
let batch = RecordBatch::try_new(schema.clone(), vec![vectors.clone()]).unwrap();
|
||||
let batches = RecordBatchIterator::new(
|
||||
vec![RecordBatch::try_new(schema.clone(), vec![vectors.clone()]).unwrap()]
|
||||
.into_iter()
|
||||
.map(Ok),
|
||||
schema,
|
||||
);
|
||||
|
||||
let table = conn.create_table("test", batch).execute().await.unwrap();
|
||||
let table = conn.create_table("test", batches).execute().await.unwrap();
|
||||
|
||||
assert_eq!(table.index_stats("my_index").await.unwrap(), None);
|
||||
|
||||
@@ -3462,31 +3604,6 @@ mod tests {
|
||||
assert_eq!(table.list_indices().await.unwrap().len(), 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_dynamic_select() {
|
||||
let tc = new_test_connection().await.unwrap();
|
||||
let db = tc.connection;
|
||||
|
||||
let table = db
|
||||
.create_table("test", some_sample_data())
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let query = table.query().select(Select::dynamic(&[("i_alias", "i")]));
|
||||
|
||||
let result = query.execute().await;
|
||||
let batches = result
|
||||
.expect("should have result")
|
||||
.try_collect::<Vec<_>>()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
for batch in batches {
|
||||
assert!(batch.column_by_name("i_alias").is_some());
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_ivf_pq_uses_default_partition_size_for_num_partitions() {
|
||||
use arrow_array::{Float32Array, RecordBatch};
|
||||
@@ -3513,9 +3630,14 @@ mod tests {
|
||||
let float_arr =
|
||||
Float32Array::from_iter_values((0..(num_rows * dimension)).map(|v| v as f32));
|
||||
let vectors = Arc::new(create_fixed_size_list(float_arr, dimension as i32).unwrap());
|
||||
let batch = RecordBatch::try_new(schema.clone(), vec![vectors]).unwrap();
|
||||
let batches = RecordBatchIterator::new(
|
||||
vec![RecordBatch::try_new(schema.clone(), vec![vectors]).unwrap()]
|
||||
.into_iter()
|
||||
.map(Ok),
|
||||
schema,
|
||||
);
|
||||
|
||||
let table = conn.create_table("test", batch).execute().await.unwrap();
|
||||
let table = conn.create_table("test", batches).execute().await.unwrap();
|
||||
let native_table = table.as_native().unwrap();
|
||||
let builder = IvfPqIndexBuilder::default();
|
||||
table
|
||||
@@ -3585,9 +3707,14 @@ mod tests {
|
||||
);
|
||||
|
||||
let vectors = Arc::new(create_fixed_size_list(float_arr, dimension).unwrap());
|
||||
let batch = RecordBatch::try_new(schema.clone(), vec![vectors.clone()]).unwrap();
|
||||
let batches = RecordBatchIterator::new(
|
||||
vec![RecordBatch::try_new(schema.clone(), vec![vectors.clone()]).unwrap()]
|
||||
.into_iter()
|
||||
.map(Ok),
|
||||
schema,
|
||||
);
|
||||
|
||||
let table = conn.create_table("test", batch).execute().await.unwrap();
|
||||
let table = conn.create_table("test", batches).execute().await.unwrap();
|
||||
|
||||
let stats = table.index_stats("my_index").await.unwrap();
|
||||
assert!(stats.is_none());
|
||||
@@ -3645,9 +3772,14 @@ mod tests {
|
||||
);
|
||||
|
||||
let vectors = Arc::new(create_fixed_size_list(float_arr, dimension).unwrap());
|
||||
let batch = RecordBatch::try_new(schema.clone(), vec![vectors.clone()]).unwrap();
|
||||
let batches = RecordBatchIterator::new(
|
||||
vec![RecordBatch::try_new(schema.clone(), vec![vectors.clone()]).unwrap()]
|
||||
.into_iter()
|
||||
.map(Ok),
|
||||
schema,
|
||||
);
|
||||
|
||||
let table = conn.create_table("test", batch).execute().await.unwrap();
|
||||
let table = conn.create_table("test", batches).execute().await.unwrap();
|
||||
let stats = table.index_stats("my_index").await.unwrap();
|
||||
assert!(stats.is_none());
|
||||
|
||||
@@ -3690,7 +3822,7 @@ mod tests {
|
||||
Ok(FixedSizeListArray::from(data))
|
||||
}
|
||||
|
||||
fn some_sample_data() -> Box<dyn arrow_array::RecordBatchReader + Send> {
|
||||
fn some_sample_data() -> Box<dyn RecordBatchReader + Send> {
|
||||
let batch = RecordBatch::try_new(
|
||||
Arc::new(Schema::new(vec![Field::new("i", DataType::Int32, false)])),
|
||||
vec![Arc::new(Int32Array::from(vec![1]))],
|
||||
@@ -3714,7 +3846,10 @@ mod tests {
|
||||
.unwrap();
|
||||
let conn = ConnectBuilder::new(uri).execute().await.unwrap();
|
||||
let table = conn
|
||||
.create_table("my_table", batch.clone())
|
||||
.create_table(
|
||||
"my_table",
|
||||
RecordBatchIterator::new(vec![Ok(batch.clone())], batch.schema()),
|
||||
)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -3793,7 +3928,10 @@ mod tests {
|
||||
.unwrap();
|
||||
|
||||
let table = conn
|
||||
.create_table("test_bitmap", batch.clone())
|
||||
.create_table(
|
||||
"test_bitmap",
|
||||
RecordBatchIterator::new(vec![Ok(batch.clone())], batch.schema()),
|
||||
)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -3894,7 +4032,10 @@ mod tests {
|
||||
.unwrap();
|
||||
|
||||
let table = conn
|
||||
.create_table("test_bitmap", batch.clone())
|
||||
.create_table(
|
||||
"test_bitmap",
|
||||
RecordBatchIterator::new(vec![Ok(batch.clone())], batch.schema()),
|
||||
)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -3954,7 +4095,10 @@ mod tests {
|
||||
.unwrap();
|
||||
|
||||
let table = conn
|
||||
.create_table("test_bitmap", batch.clone())
|
||||
.create_table(
|
||||
"test_bitmap",
|
||||
RecordBatchIterator::new(vec![Ok(batch.clone())], batch.schema()),
|
||||
)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -3999,7 +4143,7 @@ mod tests {
|
||||
|
||||
let conn1 = ConnectBuilder::new(uri).execute().await.unwrap();
|
||||
let table1 = conn1
|
||||
.create_empty_table("my_table", RecordBatchReader::schema(&data))
|
||||
.create_empty_table("my_table", data.schema())
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -4269,7 +4413,10 @@ mod tests {
|
||||
.unwrap();
|
||||
|
||||
let table = conn
|
||||
.create_table("test_stats", batch.clone())
|
||||
.create_table(
|
||||
"test_stats",
|
||||
RecordBatchIterator::new(vec![Ok(batch.clone())], batch.schema()),
|
||||
)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -4282,11 +4429,21 @@ mod tests {
|
||||
],
|
||||
)
|
||||
.unwrap();
|
||||
table.add(batch.clone()).execute().await.unwrap();
|
||||
table
|
||||
.add(RecordBatchIterator::new(
|
||||
vec![Ok(batch.clone())],
|
||||
batch.schema(),
|
||||
))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
let empty_table = conn
|
||||
.create_table("test_stats_empty", RecordBatch::new_empty(batch.schema()))
|
||||
.create_table(
|
||||
"test_stats_empty",
|
||||
RecordBatchIterator::new(vec![], batch.schema()),
|
||||
)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -4360,12 +4517,22 @@ mod tests {
|
||||
.unwrap();
|
||||
|
||||
let table = conn
|
||||
.create_table("test_list_indices_skip_frag_reuse", batch.clone())
|
||||
.create_table(
|
||||
"test_list_indices_skip_frag_reuse",
|
||||
RecordBatchIterator::new(vec![Ok(batch.clone())], batch.schema()),
|
||||
)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
table.add(batch.clone()).execute().await.unwrap();
|
||||
table
|
||||
.add(RecordBatchIterator::new(
|
||||
vec![Ok(batch.clone())],
|
||||
batch.schema(),
|
||||
))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
table
|
||||
.create_index(&["id"], Index::Bitmap(BitmapIndexBuilder {}))
|
||||
@@ -4395,9 +4562,8 @@ mod tests {
|
||||
let tmp_dir = tempdir().unwrap();
|
||||
let dataset_path = tmp_dir.path().join("test_ns_query.lance");
|
||||
|
||||
let batch = make_test_batches();
|
||||
let reader = RecordBatchIterator::new(vec![Ok(batch.clone())], batch.schema());
|
||||
Dataset::write(reader, dataset_path.to_str().unwrap(), None)
|
||||
let batches = make_test_batches();
|
||||
Dataset::write(batches, dataset_path.to_str().unwrap(), None)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
@@ -4449,9 +4615,8 @@ mod tests {
|
||||
let tmp_dir = tempdir().unwrap();
|
||||
let dataset_path = tmp_dir.path().join("test_ns_plain.lance");
|
||||
|
||||
let batch = make_test_batches();
|
||||
let reader = RecordBatchIterator::new(vec![Ok(batch.clone())], batch.schema());
|
||||
Dataset::write(reader, dataset_path.to_str().unwrap(), None)
|
||||
let batches = make_test_batches();
|
||||
Dataset::write(batches, dataset_path.to_str().unwrap(), None)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
|
||||
@@ -1,343 +0,0 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::data::scannable::Scannable;
|
||||
use crate::embeddings::EmbeddingRegistry;
|
||||
use crate::Result;
|
||||
|
||||
use super::{BaseTable, WriteOptions};
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub enum AddDataMode {
|
||||
/// Rows will be appended to the table (the default)
|
||||
#[default]
|
||||
Append,
|
||||
/// The existing table will be overwritten with the new data
|
||||
Overwrite,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||
pub struct AddResult {
|
||||
// The commit version associated with the operation.
|
||||
// A version of `0` indicates compatibility with legacy servers that do not return
|
||||
/// a commit version.
|
||||
#[serde(default)]
|
||||
pub version: u64,
|
||||
}
|
||||
|
||||
/// A builder for configuring a [`crate::table::Table::add`] operation
|
||||
pub struct AddDataBuilder {
|
||||
pub(crate) parent: Arc<dyn BaseTable>,
|
||||
pub(crate) data: Box<dyn Scannable>,
|
||||
pub(crate) mode: AddDataMode,
|
||||
pub(crate) write_options: WriteOptions,
|
||||
pub(crate) embedding_registry: Option<Arc<dyn EmbeddingRegistry>>,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for AddDataBuilder {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("AddDataBuilder")
|
||||
.field("parent", &self.parent)
|
||||
.field("mode", &self.mode)
|
||||
.field("write_options", &self.write_options)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl AddDataBuilder {
|
||||
pub(crate) fn new(
|
||||
parent: Arc<dyn BaseTable>,
|
||||
data: Box<dyn Scannable>,
|
||||
embedding_registry: Option<Arc<dyn EmbeddingRegistry>>,
|
||||
) -> Self {
|
||||
Self {
|
||||
parent,
|
||||
data,
|
||||
mode: AddDataMode::Append,
|
||||
write_options: WriteOptions::default(),
|
||||
embedding_registry,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn mode(mut self, mode: AddDataMode) -> Self {
|
||||
self.mode = mode;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn write_options(mut self, options: WriteOptions) -> Self {
|
||||
self.write_options = options;
|
||||
self
|
||||
}
|
||||
|
||||
pub async fn execute(self) -> Result<AddResult> {
|
||||
self.parent.clone().add(self).await
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow_array::{record_batch, RecordBatch, RecordBatchIterator};
|
||||
use arrow_schema::{ArrowError, DataType, Field, Schema};
|
||||
use futures::TryStreamExt;
|
||||
use lance::dataset::{WriteMode, WriteParams};
|
||||
|
||||
use crate::arrow::{SendableRecordBatchStream, SimpleRecordBatchStream};
|
||||
use crate::connect;
|
||||
use crate::data::scannable::Scannable;
|
||||
use crate::embeddings::{
|
||||
EmbeddingDefinition, EmbeddingFunction, EmbeddingRegistry, MemoryRegistry,
|
||||
};
|
||||
use crate::query::{ExecutableQuery, QueryBase, Select};
|
||||
use crate::table::{ColumnDefinition, ColumnKind, Table, TableDefinition, WriteOptions};
|
||||
use crate::test_utils::embeddings::MockEmbed;
|
||||
use crate::Error;
|
||||
|
||||
use super::AddDataMode;
|
||||
|
||||
async fn create_test_table() -> Table {
|
||||
let conn = connect("memory://").execute().await.unwrap();
|
||||
let batch = record_batch!(("id", Int64, [1, 2, 3])).unwrap();
|
||||
conn.create_table("test", batch).execute().await.unwrap()
|
||||
}
|
||||
|
||||
async fn test_add_with_data<T>(data: T)
|
||||
where
|
||||
T: Scannable + 'static,
|
||||
{
|
||||
let table = create_test_table().await;
|
||||
let schema = data.schema();
|
||||
table.add(data).execute().await.unwrap();
|
||||
assert_eq!(table.count_rows(None).await.unwrap(), 5); // 3 initial + 2 added
|
||||
assert_eq!(table.schema().await.unwrap(), schema);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_add_with_batch() {
|
||||
let batch = record_batch!(("id", Int64, [4, 5])).unwrap();
|
||||
test_add_with_data(batch).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_add_with_vec_batch() {
|
||||
let data = vec![
|
||||
record_batch!(("id", Int64, [4])).unwrap(),
|
||||
record_batch!(("id", Int64, [5])).unwrap(),
|
||||
];
|
||||
test_add_with_data(data).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_add_with_record_batch_reader() {
|
||||
let data = vec![
|
||||
record_batch!(("id", Int64, [4])).unwrap(),
|
||||
record_batch!(("id", Int64, [5])).unwrap(),
|
||||
];
|
||||
let schema = data[0].schema();
|
||||
let reader: Box<dyn arrow_array::RecordBatchReader + Send> = Box::new(
|
||||
RecordBatchIterator::new(data.into_iter().map(Ok), schema.clone()),
|
||||
);
|
||||
test_add_with_data(reader).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_add_with_stream() {
|
||||
let data = vec![
|
||||
record_batch!(("id", Int64, [4])).unwrap(),
|
||||
record_batch!(("id", Int64, [5])).unwrap(),
|
||||
];
|
||||
let schema = data[0].schema();
|
||||
let inner = futures::stream::iter(data.into_iter().map(Ok));
|
||||
let stream: SendableRecordBatchStream = Box::pin(SimpleRecordBatchStream {
|
||||
schema,
|
||||
stream: inner,
|
||||
});
|
||||
test_add_with_data(stream).await;
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct MyError;
|
||||
|
||||
impl std::fmt::Display for MyError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "MyError occurred")
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for MyError {}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_add_preserves_reader_error() {
|
||||
let table = create_test_table().await;
|
||||
let first_batch = record_batch!(("id", Int64, [4])).unwrap();
|
||||
let schema = first_batch.schema();
|
||||
let iterator = vec![
|
||||
Ok(first_batch),
|
||||
Err(ArrowError::ExternalError(Box::new(MyError))),
|
||||
];
|
||||
let reader: Box<dyn arrow_array::RecordBatchReader + Send> = Box::new(
|
||||
RecordBatchIterator::new(iterator.into_iter(), schema.clone()),
|
||||
);
|
||||
|
||||
let result = table.add(reader).execute().await;
|
||||
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_add_preserves_stream_error() {
|
||||
let table = create_test_table().await;
|
||||
let first_batch = record_batch!(("id", Int64, [4])).unwrap();
|
||||
let schema = first_batch.schema();
|
||||
let iterator = vec![
|
||||
Ok(first_batch),
|
||||
Err(Error::External {
|
||||
source: Box::new(MyError),
|
||||
}),
|
||||
];
|
||||
let stream = futures::stream::iter(iterator);
|
||||
let stream: SendableRecordBatchStream = Box::pin(SimpleRecordBatchStream {
|
||||
schema: schema.clone(),
|
||||
stream,
|
||||
});
|
||||
|
||||
let result = table.add(stream).execute().await;
|
||||
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_add() {
|
||||
let conn = connect("memory://").execute().await.unwrap();
|
||||
|
||||
let batch = record_batch!(("i", Int32, [0, 1, 2])).unwrap();
|
||||
let table = conn
|
||||
.create_table("test", batch.clone())
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(table.count_rows(None).await.unwrap(), 3);
|
||||
|
||||
let new_batch = record_batch!(("i", Int32, [3])).unwrap();
|
||||
table.add(new_batch).execute().await.unwrap();
|
||||
|
||||
assert_eq!(table.count_rows(None).await.unwrap(), 4);
|
||||
assert_eq!(table.schema().await.unwrap(), batch.schema());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_add_overwrite() {
|
||||
let conn = connect("memory://").execute().await.unwrap();
|
||||
|
||||
let batch = record_batch!(("i", Int32, [0, 1, 2])).unwrap();
|
||||
let table = conn
|
||||
.create_table("test", batch.clone())
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(table.count_rows(None).await.unwrap(), batch.num_rows());
|
||||
|
||||
let new_batch = record_batch!(("x", Float32, [0.0, 1.0])).unwrap();
|
||||
let res = table
|
||||
.add(new_batch.clone())
|
||||
.mode(AddDataMode::Overwrite)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(res.version, table.version().await.unwrap());
|
||||
assert_eq!(table.count_rows(None).await.unwrap(), new_batch.num_rows());
|
||||
assert_eq!(table.schema().await.unwrap(), new_batch.schema());
|
||||
|
||||
// Can overwrite using underlying WriteParams (which
|
||||
// take precedence over AddDataMode)
|
||||
let param: WriteParams = WriteParams {
|
||||
mode: WriteMode::Overwrite,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
table
|
||||
.add(new_batch.clone())
|
||||
.write_options(WriteOptions {
|
||||
lance_write_params: Some(param),
|
||||
})
|
||||
.mode(AddDataMode::Append)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(table.count_rows(None).await.unwrap(), new_batch.num_rows());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_add_with_embeddings() {
|
||||
let registry = Arc::new(MemoryRegistry::new());
|
||||
let mock_embedding: Arc<dyn EmbeddingFunction> = Arc::new(MockEmbed::new("mock", 4));
|
||||
registry.register("mock", mock_embedding).unwrap();
|
||||
|
||||
let conn = connect("memory://")
|
||||
.embedding_registry(registry)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let schema = Arc::new(Schema::new(vec![
|
||||
Field::new("text", DataType::Utf8, false),
|
||||
Field::new(
|
||||
"text_embedding",
|
||||
DataType::FixedSizeList(Arc::new(Field::new("item", DataType::Float32, true)), 4),
|
||||
false,
|
||||
),
|
||||
]));
|
||||
|
||||
// Add embedding metadata to the schema
|
||||
let embedding_def = EmbeddingDefinition::new("text", "mock", Some("text_embedding"));
|
||||
let table_def = TableDefinition::new(
|
||||
schema.clone(),
|
||||
vec![
|
||||
ColumnDefinition {
|
||||
kind: ColumnKind::Physical,
|
||||
},
|
||||
ColumnDefinition {
|
||||
kind: ColumnKind::Embedding(embedding_def),
|
||||
},
|
||||
],
|
||||
);
|
||||
let rich_schema = table_def.into_rich_schema();
|
||||
|
||||
let table = conn
|
||||
.create_empty_table("embed_test", rich_schema)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Now add new data WITHOUT the embedding column - it should be computed automatically
|
||||
let new_batch = record_batch!(("text", Utf8, ["hello", "world"])).unwrap();
|
||||
table.add(new_batch).execute().await.unwrap();
|
||||
|
||||
assert_eq!(table.count_rows(None).await.unwrap(), 2);
|
||||
|
||||
// Query to verify the embeddings were computed for the new rows
|
||||
let results: Vec<RecordBatch> = table
|
||||
.query()
|
||||
.select(Select::columns(&["text", "text_embedding"]))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap()
|
||||
.try_collect()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let total_rows: usize = results.iter().map(|b| b.num_rows()).sum();
|
||||
assert_eq!(total_rows, 2);
|
||||
|
||||
// Check that all rows have embedding values (not null)
|
||||
for batch in &results {
|
||||
let embedding_col = batch.column(1);
|
||||
assert_eq!(embedding_col.null_count(), 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -287,7 +287,8 @@ pub mod tests {
|
||||
|
||||
use arrow::array::AsArray;
|
||||
use arrow_array::{
|
||||
BinaryArray, Float64Array, Int32Array, Int64Array, RecordBatch, StringArray, UInt32Array,
|
||||
BinaryArray, Float64Array, Int32Array, Int64Array, RecordBatch, RecordBatchIterator,
|
||||
RecordBatchReader, StringArray, UInt32Array,
|
||||
};
|
||||
use arrow_schema::{DataType, Field, Schema};
|
||||
use datafusion::{
|
||||
@@ -307,7 +308,7 @@ pub mod tests {
|
||||
table::datafusion::BaseTableAdapter,
|
||||
};
|
||||
|
||||
fn make_test_batches() -> RecordBatch {
|
||||
fn make_test_batches() -> impl RecordBatchReader + Send + Sync + 'static {
|
||||
let metadata = HashMap::from_iter(vec![("foo".to_string(), "bar".to_string())]);
|
||||
let schema = Arc::new(
|
||||
Schema::new(vec![
|
||||
@@ -316,17 +317,19 @@ pub mod tests {
|
||||
])
|
||||
.with_metadata(metadata),
|
||||
);
|
||||
RecordBatch::try_new(
|
||||
RecordBatchIterator::new(
|
||||
vec![RecordBatch::try_new(
|
||||
schema.clone(),
|
||||
vec![
|
||||
Arc::new(Int32Array::from_iter_values(0..10)),
|
||||
Arc::new(UInt32Array::from_iter_values(0..10)),
|
||||
],
|
||||
)],
|
||||
schema,
|
||||
vec![
|
||||
Arc::new(Int32Array::from_iter_values(0..10)),
|
||||
Arc::new(UInt32Array::from_iter_values(0..10)),
|
||||
],
|
||||
)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn make_tbl_two_test_batches() -> RecordBatch {
|
||||
fn make_tbl_two_test_batches() -> impl RecordBatchReader + Send + Sync + 'static {
|
||||
let metadata = HashMap::from_iter(vec![("foo".to_string(), "bar".to_string())]);
|
||||
let schema = Arc::new(
|
||||
Schema::new(vec![
|
||||
@@ -339,26 +342,28 @@ pub mod tests {
|
||||
])
|
||||
.with_metadata(metadata),
|
||||
);
|
||||
RecordBatch::try_new(
|
||||
RecordBatchIterator::new(
|
||||
vec![RecordBatch::try_new(
|
||||
schema.clone(),
|
||||
vec![
|
||||
Arc::new(Int64Array::from_iter_values(0..1000)),
|
||||
Arc::new(StringArray::from_iter_values(
|
||||
(0..1000).map(|i| i.to_string()),
|
||||
)),
|
||||
Arc::new(Float64Array::from_iter_values((0..1000).map(|i| i as f64))),
|
||||
Arc::new(StringArray::from_iter_values(
|
||||
(0..1000).map(|i| format!("{{\"i\":{}}}", i)),
|
||||
)),
|
||||
Arc::new(BinaryArray::from_iter_values(
|
||||
(0..1000).map(|i| (i as u32).to_be_bytes().to_vec()),
|
||||
)),
|
||||
Arc::new(StringArray::from_iter_values(
|
||||
(0..1000).map(|i| i.to_string()),
|
||||
)),
|
||||
],
|
||||
)],
|
||||
schema,
|
||||
vec![
|
||||
Arc::new(Int64Array::from_iter_values(0..1000)),
|
||||
Arc::new(StringArray::from_iter_values(
|
||||
(0..1000).map(|i| i.to_string()),
|
||||
)),
|
||||
Arc::new(Float64Array::from_iter_values((0..1000).map(|i| i as f64))),
|
||||
Arc::new(StringArray::from_iter_values(
|
||||
(0..1000).map(|i| format!("{{\"i\":{}}}", i)),
|
||||
)),
|
||||
Arc::new(BinaryArray::from_iter_values(
|
||||
(0..1000).map(|i| (i as u32).to_be_bytes().to_vec()),
|
||||
)),
|
||||
Arc::new(StringArray::from_iter_values(
|
||||
(0..1000).map(|i| i.to_string()),
|
||||
)),
|
||||
],
|
||||
)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
struct TestFixture {
|
||||
|
||||
@@ -222,7 +222,7 @@ mod tests {
|
||||
use std::vec;
|
||||
|
||||
use super::*;
|
||||
use arrow_array::{record_batch, RecordBatchIterator};
|
||||
use arrow_array::{record_batch, Int32Array, RecordBatchIterator};
|
||||
use datafusion::prelude::SessionContext;
|
||||
use datafusion_catalog::MemTable;
|
||||
use tempfile::tempdir;
|
||||
@@ -238,8 +238,11 @@ mod tests {
|
||||
|
||||
// Create initial table
|
||||
let batch = record_batch!(("id", Int32, [1, 2, 3])).unwrap();
|
||||
let schema = batch.schema();
|
||||
let reader = RecordBatchIterator::new(vec![Ok(batch)], schema);
|
||||
|
||||
let table = db
|
||||
.create_table("test_insert", batch)
|
||||
.create_table("test_insert", Box::new(reader))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -276,8 +279,11 @@ mod tests {
|
||||
|
||||
// Create initial table with 3 rows
|
||||
let batch = record_batch!(("id", Int32, [1, 2, 3])).unwrap();
|
||||
let schema = batch.schema();
|
||||
let reader = RecordBatchIterator::new(vec![Ok(batch)], schema);
|
||||
|
||||
let table = db
|
||||
.create_table("test_overwrite", batch)
|
||||
.create_table("test_overwrite", Box::new(reader))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -312,9 +318,20 @@ mod tests {
|
||||
let db = connect(uri).execute().await.unwrap();
|
||||
|
||||
// Create initial table
|
||||
let batch = record_batch!(("id", Int32, [1, 2, 3])).unwrap();
|
||||
let schema = Arc::new(ArrowSchema::new(vec![Field::new(
|
||||
"id",
|
||||
DataType::Int32,
|
||||
false,
|
||||
)]));
|
||||
let batches = vec![RecordBatch::try_new(
|
||||
schema.clone(),
|
||||
vec![Arc::new(Int32Array::from(vec![1, 2, 3]))],
|
||||
)
|
||||
.unwrap()];
|
||||
let reader = RecordBatchIterator::new(batches.into_iter().map(Ok), schema.clone());
|
||||
|
||||
let table = db
|
||||
.create_table("test_empty", batch)
|
||||
.create_table("test_empty", Box::new(reader))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -335,13 +352,12 @@ mod tests {
|
||||
false,
|
||||
)]));
|
||||
// Empty batches
|
||||
let source_reader: Box<dyn arrow_array::RecordBatchReader + Send> =
|
||||
Box::new(RecordBatchIterator::new(
|
||||
std::iter::empty::<Result<RecordBatch, arrow_schema::ArrowError>>(),
|
||||
source_schema,
|
||||
));
|
||||
let source_reader = RecordBatchIterator::new(
|
||||
std::iter::empty::<Result<RecordBatch, arrow_schema::ArrowError>>(),
|
||||
source_schema,
|
||||
);
|
||||
let source_table = db
|
||||
.create_table("empty_source", source_reader)
|
||||
.create_table("empty_source", Box::new(source_reader))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -373,10 +389,20 @@ mod tests {
|
||||
let db = connect(uri).execute().await.unwrap();
|
||||
|
||||
// Create initial table
|
||||
let batch = record_batch!(("id", Int32, [1])).unwrap();
|
||||
let schema = batch.schema();
|
||||
let schema = Arc::new(ArrowSchema::new(vec![Field::new(
|
||||
"id",
|
||||
DataType::Int32,
|
||||
true,
|
||||
)]));
|
||||
let batches =
|
||||
vec![
|
||||
RecordBatch::try_new(schema.clone(), vec![Arc::new(Int32Array::from(vec![1]))])
|
||||
.unwrap(),
|
||||
];
|
||||
let reader = RecordBatchIterator::new(batches.into_iter().map(Ok), schema.clone());
|
||||
|
||||
let table = db
|
||||
.create_table("test_multi_batch", batch)
|
||||
.create_table("test_multi_batch", Box::new(reader))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
@@ -97,7 +97,7 @@ mod tests {
|
||||
table::datafusion::BaseTableAdapter,
|
||||
Connection, Table,
|
||||
};
|
||||
use arrow_array::{Int32Array, RecordBatch, StringArray};
|
||||
use arrow_array::{Int32Array, RecordBatch, RecordBatchIterator, StringArray};
|
||||
use arrow_schema::{DataType, Field, Schema as ArrowSchema};
|
||||
use datafusion::prelude::SessionContext;
|
||||
|
||||
@@ -173,7 +173,14 @@ mod tests {
|
||||
|
||||
// Create LanceDB database and table
|
||||
let db = crate::connect("memory://test").execute().await.unwrap();
|
||||
let table = db.create_table("foo", batch).execute().await.unwrap();
|
||||
let table = db
|
||||
.create_table(
|
||||
"foo",
|
||||
RecordBatchIterator::new(vec![Ok(batch)].into_iter(), schema),
|
||||
)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Create FTS index
|
||||
table
|
||||
@@ -316,7 +323,13 @@ mod tests {
|
||||
RecordBatch::try_new(metadata_schema.clone(), vec![metadata_col, extra_col]).unwrap();
|
||||
|
||||
let _metadata_table = db
|
||||
.create_table("metadata", metadata_batch.clone())
|
||||
.create_table(
|
||||
"metadata",
|
||||
RecordBatchIterator::new(
|
||||
vec![Ok(metadata_batch.clone())].into_iter(),
|
||||
metadata_schema.clone(),
|
||||
),
|
||||
)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -380,7 +393,14 @@ mod tests {
|
||||
let batch =
|
||||
RecordBatch::try_new(schema.clone(), vec![id_col, text_col, category_col]).unwrap();
|
||||
|
||||
let table = db.create_table(table_name, batch).execute().await.unwrap();
|
||||
let table = db
|
||||
.create_table(
|
||||
table_name,
|
||||
RecordBatchIterator::new(vec![Ok(batch)].into_iter(), schema),
|
||||
)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Create FTS index
|
||||
table
|
||||
@@ -526,7 +546,14 @@ mod tests {
|
||||
]));
|
||||
let batch = RecordBatch::try_new(schema.clone(), vec![id_col, text_col]).unwrap();
|
||||
|
||||
let table = db.create_table("docs", batch).execute().await.unwrap();
|
||||
let table = db
|
||||
.create_table(
|
||||
"docs",
|
||||
RecordBatchIterator::new(vec![Ok(batch)].into_iter(), schema),
|
||||
)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Create FTS index with position information for phrase queries
|
||||
table
|
||||
@@ -664,7 +691,14 @@ mod tests {
|
||||
let batch =
|
||||
RecordBatch::try_new(schema.clone(), vec![id_col, title_col, content_col]).unwrap();
|
||||
|
||||
let table = db.create_table("multi_col", batch).execute().await.unwrap();
|
||||
let table = db
|
||||
.create_table(
|
||||
"multi_col",
|
||||
RecordBatchIterator::new(vec![Ok(batch)].into_iter(), schema),
|
||||
)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Create FTS indices on both columns
|
||||
table
|
||||
@@ -929,7 +963,13 @@ mod tests {
|
||||
let metadata_batch =
|
||||
RecordBatch::try_new(metadata_schema.clone(), vec![metadata_id, extra_info]).unwrap();
|
||||
let _metadata_table = db
|
||||
.create_table("metadata", metadata_batch.clone())
|
||||
.create_table(
|
||||
"metadata",
|
||||
RecordBatchIterator::new(
|
||||
vec![Ok(metadata_batch.clone())].into_iter(),
|
||||
metadata_schema,
|
||||
),
|
||||
)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -1318,7 +1358,14 @@ mod tests {
|
||||
]));
|
||||
let batch = RecordBatch::try_new(schema.clone(), vec![id_col, text_col]).unwrap();
|
||||
|
||||
let table = db.create_table("docs", batch).execute().await.unwrap();
|
||||
let table = db
|
||||
.create_table(
|
||||
"docs",
|
||||
RecordBatchIterator::new(vec![Ok(batch)].into_iter(), schema),
|
||||
)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Create FTS index with position information
|
||||
table
|
||||
@@ -1463,7 +1510,14 @@ mod tests {
|
||||
let batch =
|
||||
RecordBatch::try_new(schema.clone(), vec![id_col, title_col, content_col]).unwrap();
|
||||
|
||||
let table = db.create_table("docs", batch).execute().await.unwrap();
|
||||
let table = db
|
||||
.create_table(
|
||||
"docs",
|
||||
RecordBatchIterator::new(vec![Ok(batch)].into_iter(), schema),
|
||||
)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Create FTS indices on both columns
|
||||
table
|
||||
@@ -1537,7 +1591,14 @@ mod tests {
|
||||
let batch =
|
||||
RecordBatch::try_new(schema.clone(), vec![id_col, title_col, content_col]).unwrap();
|
||||
|
||||
let table = db.create_table("docs", batch).execute().await.unwrap();
|
||||
let table = db
|
||||
.create_table(
|
||||
"docs",
|
||||
RecordBatchIterator::new(vec![Ok(batch)].into_iter(), schema),
|
||||
)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Create FTS indices
|
||||
table
|
||||
@@ -1663,23 +1724,36 @@ mod tests {
|
||||
.unwrap();
|
||||
|
||||
// Create table with simple text for n-gram testing
|
||||
let data = RecordBatch::try_new(
|
||||
let data = RecordBatchIterator::new(
|
||||
vec![RecordBatch::try_new(
|
||||
Arc::new(ArrowSchema::new(vec![
|
||||
Field::new("id", DataType::Int32, false),
|
||||
Field::new("text", DataType::Utf8, false),
|
||||
])),
|
||||
vec![
|
||||
Arc::new(Int32Array::from(vec![1, 2, 3])),
|
||||
Arc::new(StringArray::from(vec![
|
||||
"hello world",
|
||||
"lance database",
|
||||
"lance is cool",
|
||||
])),
|
||||
],
|
||||
)
|
||||
.unwrap()]
|
||||
.into_iter()
|
||||
.map(Ok),
|
||||
Arc::new(ArrowSchema::new(vec![
|
||||
Field::new("id", DataType::Int32, false),
|
||||
Field::new("text", DataType::Utf8, false),
|
||||
])),
|
||||
vec![
|
||||
Arc::new(Int32Array::from(vec![1, 2, 3])),
|
||||
Arc::new(StringArray::from(vec![
|
||||
"hello world",
|
||||
"lance database",
|
||||
"lance is cool",
|
||||
])),
|
||||
],
|
||||
)
|
||||
.unwrap();
|
||||
);
|
||||
|
||||
let table = Arc::new(db.create_table("docs", data).execute().await.unwrap());
|
||||
let table = Arc::new(
|
||||
db.create_table("docs", Box::new(data))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap(),
|
||||
);
|
||||
|
||||
// Create FTS index with n-gram tokenizer (default min_ngram_length=3)
|
||||
table
|
||||
@@ -1802,29 +1876,43 @@ mod tests {
|
||||
.unwrap();
|
||||
|
||||
// Create table with two text columns
|
||||
let data = RecordBatch::try_new(
|
||||
let data = RecordBatchIterator::new(
|
||||
vec![RecordBatch::try_new(
|
||||
Arc::new(ArrowSchema::new(vec![
|
||||
Field::new("id", DataType::Int32, false),
|
||||
Field::new("title", DataType::Utf8, false),
|
||||
Field::new("content", DataType::Utf8, false),
|
||||
])),
|
||||
vec![
|
||||
Arc::new(Int32Array::from(vec![1, 2, 3])),
|
||||
Arc::new(StringArray::from(vec![
|
||||
"Important Document",
|
||||
"Another Document",
|
||||
"Random Text",
|
||||
])),
|
||||
Arc::new(StringArray::from(vec![
|
||||
"This is important information",
|
||||
"This has details",
|
||||
"Nothing special here",
|
||||
])),
|
||||
],
|
||||
)
|
||||
.unwrap()]
|
||||
.into_iter()
|
||||
.map(Ok),
|
||||
Arc::new(ArrowSchema::new(vec![
|
||||
Field::new("id", DataType::Int32, false),
|
||||
Field::new("title", DataType::Utf8, false),
|
||||
Field::new("content", DataType::Utf8, false),
|
||||
])),
|
||||
vec![
|
||||
Arc::new(Int32Array::from(vec![1, 2, 3])),
|
||||
Arc::new(StringArray::from(vec![
|
||||
"Important Document",
|
||||
"Another Document",
|
||||
"Random Text",
|
||||
])),
|
||||
Arc::new(StringArray::from(vec![
|
||||
"This is important information",
|
||||
"This has details",
|
||||
"Nothing special here",
|
||||
])),
|
||||
],
|
||||
)
|
||||
.unwrap();
|
||||
);
|
||||
|
||||
let table = Arc::new(db.create_table("docs", data).execute().await.unwrap());
|
||||
let table = Arc::new(
|
||||
db.create_table("docs", Box::new(data))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap(),
|
||||
);
|
||||
|
||||
// Create FTS indices on both columns
|
||||
table
|
||||
|
||||
@@ -57,6 +57,15 @@ impl DatasetRef {
|
||||
matches!(self, Self::Latest { .. })
|
||||
}
|
||||
|
||||
async fn need_reload(&self) -> Result<bool> {
|
||||
Ok(match self {
|
||||
Self::Latest { dataset, .. } => {
|
||||
dataset.latest_version_id().await? != dataset.version().version
|
||||
}
|
||||
Self::TimeTravel { dataset, version } => dataset.version().version != *version,
|
||||
})
|
||||
}
|
||||
|
||||
async fn as_latest(&mut self, read_consistency_interval: Option<Duration>) -> Result<()> {
|
||||
match self {
|
||||
Self::Latest { .. } => Ok(()),
|
||||
@@ -109,21 +118,6 @@ impl DatasetRef {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn is_up_to_date(&self) -> bool {
|
||||
match self {
|
||||
Self::Latest {
|
||||
read_consistency_interval,
|
||||
last_consistency_check,
|
||||
..
|
||||
} => match (read_consistency_interval, last_consistency_check) {
|
||||
(None, _) => true,
|
||||
(Some(_), None) => false,
|
||||
(Some(interval), Some(last_check)) => last_check.elapsed() < *interval,
|
||||
},
|
||||
Self::TimeTravel { dataset, version } => dataset.version().version == *version,
|
||||
}
|
||||
}
|
||||
|
||||
fn time_travel_version(&self) -> Option<u64> {
|
||||
match self {
|
||||
Self::Latest { .. } => None,
|
||||
@@ -211,7 +205,18 @@ impl DatasetConsistencyWrapper {
|
||||
}
|
||||
|
||||
pub async fn reload(&self) -> Result<()> {
|
||||
self.0.write().await.reload().await
|
||||
if !self.0.read().await.need_reload().await? {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut write_guard = self.0.write().await;
|
||||
// on lock escalation -- check if someone else has already reloaded
|
||||
if !write_guard.need_reload().await? {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// actually need reloading
|
||||
write_guard.reload().await
|
||||
}
|
||||
|
||||
/// Returns the version, if in time travel mode, or None otherwise
|
||||
@@ -230,20 +235,35 @@ impl DatasetConsistencyWrapper {
|
||||
}
|
||||
}
|
||||
|
||||
async fn is_up_to_date(&self) -> bool {
|
||||
self.0.read().await.is_up_to_date()
|
||||
async fn is_up_to_date(&self) -> Result<bool> {
|
||||
let dataset_ref = self.0.read().await;
|
||||
match &*dataset_ref {
|
||||
DatasetRef::Latest {
|
||||
read_consistency_interval,
|
||||
last_consistency_check,
|
||||
..
|
||||
} => match (read_consistency_interval, last_consistency_check) {
|
||||
(None, _) => Ok(true),
|
||||
(Some(_), None) => Ok(false),
|
||||
(Some(read_consistency_interval), Some(last_consistency_check)) => {
|
||||
if &last_consistency_check.elapsed() < read_consistency_interval {
|
||||
Ok(true)
|
||||
} else {
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
},
|
||||
DatasetRef::TimeTravel { dataset, version } => {
|
||||
Ok(dataset.version().version == *version)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Ensures that the dataset is loaded and up-to-date with consistency and
|
||||
/// version parameters.
|
||||
async fn ensure_up_to_date(&self) -> Result<()> {
|
||||
if !self.is_up_to_date().await {
|
||||
// Re-check under write lock — another task may have reloaded
|
||||
// while we waited for the lock.
|
||||
let mut write_guard = self.0.write().await;
|
||||
if !write_guard.is_up_to_date() {
|
||||
write_guard.reload().await?;
|
||||
}
|
||||
if !self.is_up_to_date().await? {
|
||||
self.reload().await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -331,60 +351,4 @@ mod tests {
|
||||
let stats = io_stats.incremental_stats();
|
||||
assert_eq!(stats.read_iops, 1);
|
||||
}
|
||||
|
||||
/// Regression test: before the fix, the reload fast-path (no version change)
|
||||
/// did not reset `last_consistency_check`, causing a list call on every
|
||||
/// subsequent query once the interval expired.
|
||||
#[tokio::test]
|
||||
async fn test_reload_resets_consistency_timer() {
|
||||
let db = connect("memory://")
|
||||
.read_consistency_interval(Duration::from_secs(1))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
let io_stats = IoStatsHolder::default();
|
||||
let schema = Arc::new(Schema::new(vec![Field::new("id", DataType::Int32, false)]));
|
||||
let table = db
|
||||
.create_empty_table("test", schema)
|
||||
.write_options(WriteOptions {
|
||||
lance_write_params: Some(WriteParams {
|
||||
store_params: Some(ObjectStoreParams {
|
||||
object_store_wrapper: Some(Arc::new(io_stats.clone())),
|
||||
..Default::default()
|
||||
}),
|
||||
..Default::default()
|
||||
}),
|
||||
})
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let start = Instant::now();
|
||||
io_stats.incremental_stats(); // reset
|
||||
|
||||
// Step 1: within interval — no list
|
||||
table.schema().await.unwrap();
|
||||
let s = io_stats.incremental_stats();
|
||||
assert_eq!(s.read_iops, 0, "step 1, elapsed={:?}", start.elapsed());
|
||||
|
||||
// Step 2: still within interval — no list
|
||||
table.schema().await.unwrap();
|
||||
let s = io_stats.incremental_stats();
|
||||
assert_eq!(s.read_iops, 0, "step 2, elapsed={:?}", start.elapsed());
|
||||
|
||||
// Step 3: sleep past the 1s boundary
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
|
||||
// Step 4: interval expired — exactly 1 list, timer resets
|
||||
table.schema().await.unwrap();
|
||||
let s = io_stats.incremental_stats();
|
||||
assert_eq!(s.read_iops, 1, "step 4, elapsed={:?}", start.elapsed());
|
||||
|
||||
// Step 5: 10 more calls — timer just reset, no lists (THIS is the regression test).
|
||||
for _ in 0..10 {
|
||||
table.schema().await.unwrap();
|
||||
}
|
||||
let s = io_stats.incremental_stats();
|
||||
assert_eq!(s.read_iops, 0, "step 5, elapsed={:?}", start.elapsed());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -34,7 +34,7 @@ pub(crate) async fn execute_delete(table: &NativeTable, predicate: &str) -> Resu
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::connect;
|
||||
use arrow_array::{record_batch, Int32Array, RecordBatch};
|
||||
use arrow_array::{record_batch, Int32Array, RecordBatch, RecordBatchIterator};
|
||||
use arrow_schema::{DataType, Field, Schema};
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -53,7 +53,10 @@ mod tests {
|
||||
.unwrap();
|
||||
|
||||
let table = conn
|
||||
.create_table("test_delete", batch)
|
||||
.create_table(
|
||||
"test_delete",
|
||||
RecordBatchIterator::new(vec![Ok(batch)], schema),
|
||||
)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -99,7 +102,10 @@ mod tests {
|
||||
let original_schema = batch.schema();
|
||||
|
||||
let table = conn
|
||||
.create_table("test_delete_all", batch)
|
||||
.create_table(
|
||||
"test_delete_all",
|
||||
RecordBatchIterator::new(vec![Ok(batch)], original_schema.clone()),
|
||||
)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -120,8 +126,13 @@ mod tests {
|
||||
// Create a table with 5 rows
|
||||
let batch = record_batch!(("id", Int32, [1, 2, 3, 4, 5])).unwrap();
|
||||
|
||||
let schema = batch.schema();
|
||||
|
||||
let table = conn
|
||||
.create_table("test_delete_noop", batch)
|
||||
.create_table(
|
||||
"test_delete_noop",
|
||||
RecordBatchIterator::new(vec![Ok(batch)], schema),
|
||||
)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
@@ -212,7 +212,7 @@ pub(crate) async fn execute_optimize(
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use arrow_array::{Int32Array, RecordBatch, StringArray};
|
||||
use arrow_array::{Int32Array, RecordBatch, RecordBatchIterator, StringArray};
|
||||
use arrow_schema::{DataType, Field, Schema};
|
||||
use rstest::rstest;
|
||||
use std::sync::Arc;
|
||||
@@ -236,7 +236,10 @@ mod tests {
|
||||
.unwrap();
|
||||
|
||||
let table = conn
|
||||
.create_table("test_compact", batch)
|
||||
.create_table(
|
||||
"test_compact",
|
||||
RecordBatchIterator::new(vec![Ok(batch)], schema.clone()),
|
||||
)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -250,7 +253,11 @@ mod tests {
|
||||
))],
|
||||
)
|
||||
.unwrap();
|
||||
table.add(batch).execute().await.unwrap();
|
||||
table
|
||||
.add(RecordBatchIterator::new(vec![Ok(batch)], schema.clone()))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
// Verify we have multiple fragments before compaction
|
||||
@@ -315,7 +322,10 @@ mod tests {
|
||||
.unwrap();
|
||||
|
||||
let table = conn
|
||||
.create_table("test_prune", batch)
|
||||
.create_table(
|
||||
"test_prune",
|
||||
RecordBatchIterator::new(vec![Ok(batch)], schema.clone()),
|
||||
)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -329,7 +339,11 @@ mod tests {
|
||||
))],
|
||||
)
|
||||
.unwrap();
|
||||
table.add(batch).execute().await.unwrap();
|
||||
table
|
||||
.add(RecordBatchIterator::new(vec![Ok(batch)], schema.clone()))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
// Verify multiple versions exist
|
||||
@@ -391,7 +405,10 @@ mod tests {
|
||||
.unwrap();
|
||||
|
||||
let table = conn
|
||||
.create_table("test_index_optimize", batch)
|
||||
.create_table(
|
||||
"test_index_optimize",
|
||||
RecordBatchIterator::new(vec![Ok(batch)], schema.clone()),
|
||||
)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -409,7 +426,11 @@ mod tests {
|
||||
vec![Arc::new(Int32Array::from_iter_values(100..200))],
|
||||
)
|
||||
.unwrap();
|
||||
table.add(batch).execute().await.unwrap();
|
||||
table
|
||||
.add(RecordBatchIterator::new(vec![Ok(batch)], schema.clone()))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Verify index stats before optimization
|
||||
let indices = table.list_indices().await.unwrap();
|
||||
@@ -453,7 +474,10 @@ mod tests {
|
||||
.unwrap();
|
||||
|
||||
let table = conn
|
||||
.create_table("test_optimize_all", batch)
|
||||
.create_table(
|
||||
"test_optimize_all",
|
||||
RecordBatchIterator::new(vec![Ok(batch)], schema.clone()),
|
||||
)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -467,7 +491,11 @@ mod tests {
|
||||
))],
|
||||
)
|
||||
.unwrap();
|
||||
table.add(batch).execute().await.unwrap();
|
||||
table
|
||||
.add(RecordBatchIterator::new(vec![Ok(batch)], schema.clone()))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
// Run all optimizations
|
||||
@@ -531,13 +559,20 @@ mod tests {
|
||||
.unwrap();
|
||||
|
||||
let table = conn
|
||||
.create_table("test_deferred_remap", batch.clone())
|
||||
.create_table(
|
||||
"test_deferred_remap",
|
||||
RecordBatchIterator::new(vec![Ok(batch.clone())], schema.clone()),
|
||||
)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Add more data
|
||||
table.add(batch).execute().await.unwrap();
|
||||
table
|
||||
.add(RecordBatchIterator::new(vec![Ok(batch)], schema.clone()))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Create an index
|
||||
table
|
||||
@@ -613,13 +648,20 @@ mod tests {
|
||||
let original_schema = batch.schema();
|
||||
|
||||
let table = conn
|
||||
.create_table("test_schema_preserved", batch.clone())
|
||||
.create_table(
|
||||
"test_schema_preserved",
|
||||
RecordBatchIterator::new(vec![Ok(batch.clone())], schema.clone()),
|
||||
)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Add more data
|
||||
table.add(batch).execute().await.unwrap();
|
||||
table
|
||||
.add(RecordBatchIterator::new(vec![Ok(batch)], schema.clone()))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Run compaction
|
||||
table
|
||||
@@ -661,7 +703,10 @@ mod tests {
|
||||
.unwrap();
|
||||
|
||||
let table = conn
|
||||
.create_table("test_empty_optimize", batch)
|
||||
.create_table(
|
||||
"test_empty_optimize",
|
||||
RecordBatchIterator::new(vec![Ok(batch)], schema.clone()),
|
||||
)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -707,12 +752,19 @@ mod tests {
|
||||
.unwrap();
|
||||
|
||||
let table = conn
|
||||
.create_table("test_checkout_optimize", batch.clone())
|
||||
.create_table(
|
||||
"test_checkout_optimize",
|
||||
RecordBatchIterator::new(vec![Ok(batch.clone())], schema.clone()),
|
||||
)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
table.add(batch).execute().await.unwrap();
|
||||
table
|
||||
.add(RecordBatchIterator::new(vec![Ok(batch)], schema.clone()))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
table.checkout(1).await.unwrap();
|
||||
|
||||
|
||||
@@ -89,7 +89,7 @@ pub(crate) async fn execute_drop_columns(
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use arrow_array::{record_batch, Int32Array, StringArray};
|
||||
use arrow_array::{record_batch, Int32Array, RecordBatchIterator, StringArray};
|
||||
use arrow_schema::DataType;
|
||||
use futures::TryStreamExt;
|
||||
use lance::dataset::ColumnAlteration;
|
||||
@@ -105,9 +105,13 @@ mod tests {
|
||||
let conn = connect("memory://").execute().await.unwrap();
|
||||
|
||||
let batch = record_batch!(("id", Int32, [1, 2, 3, 4, 5])).unwrap();
|
||||
let schema = batch.schema();
|
||||
|
||||
let table = conn
|
||||
.create_table("test_add_columns", batch)
|
||||
.create_table(
|
||||
"test_add_columns",
|
||||
RecordBatchIterator::new(vec![Ok(batch)], schema),
|
||||
)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -165,9 +169,13 @@ mod tests {
|
||||
let conn = connect("memory://").execute().await.unwrap();
|
||||
|
||||
let batch = record_batch!(("x", Int32, [10, 20, 30])).unwrap();
|
||||
let schema = batch.schema();
|
||||
|
||||
let table = conn
|
||||
.create_table("test_add_multi_columns", batch)
|
||||
.create_table(
|
||||
"test_add_multi_columns",
|
||||
RecordBatchIterator::new(vec![Ok(batch)], schema),
|
||||
)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -197,9 +205,13 @@ mod tests {
|
||||
let conn = connect("memory://").execute().await.unwrap();
|
||||
|
||||
let batch = record_batch!(("id", Int32, [1, 2, 3])).unwrap();
|
||||
let schema = batch.schema();
|
||||
|
||||
let table = conn
|
||||
.create_table("test_add_const_column", batch)
|
||||
.create_table(
|
||||
"test_add_const_column",
|
||||
RecordBatchIterator::new(vec![Ok(batch)], schema),
|
||||
)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -243,9 +255,13 @@ mod tests {
|
||||
let conn = connect("memory://").execute().await.unwrap();
|
||||
|
||||
let batch = record_batch!(("old_name", Int32, [1, 2, 3])).unwrap();
|
||||
let schema = batch.schema();
|
||||
|
||||
let table = conn
|
||||
.create_table("test_alter_rename", batch)
|
||||
.create_table(
|
||||
"test_alter_rename",
|
||||
RecordBatchIterator::new(vec![Ok(batch)], schema),
|
||||
)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -288,7 +304,10 @@ mod tests {
|
||||
.unwrap();
|
||||
|
||||
let table = conn
|
||||
.create_table("test_alter_nullable", batch)
|
||||
.create_table(
|
||||
"test_alter_nullable",
|
||||
RecordBatchIterator::new(vec![Ok(batch)], schema),
|
||||
)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -313,9 +332,13 @@ mod tests {
|
||||
let conn = connect("memory://").execute().await.unwrap();
|
||||
|
||||
let batch = record_batch!(("num", Int32, [1, 2, 3])).unwrap();
|
||||
let schema = batch.schema();
|
||||
|
||||
let table = conn
|
||||
.create_table("test_cast_type", batch)
|
||||
.create_table(
|
||||
"test_cast_type",
|
||||
RecordBatchIterator::new(vec![Ok(batch)], schema),
|
||||
)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -356,9 +379,13 @@ mod tests {
|
||||
let conn = connect("memory://").execute().await.unwrap();
|
||||
|
||||
let batch = record_batch!(("num", Int32, [1, 2, 3])).unwrap();
|
||||
let schema = batch.schema();
|
||||
|
||||
let table = conn
|
||||
.create_table("test_invalid_cast", batch)
|
||||
.create_table(
|
||||
"test_invalid_cast",
|
||||
RecordBatchIterator::new(vec![Ok(batch)], schema),
|
||||
)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -380,9 +407,13 @@ mod tests {
|
||||
let conn = connect("memory://").execute().await.unwrap();
|
||||
|
||||
let batch = record_batch!(("a", Int32, [1, 2, 3]), ("b", Int32, [4, 5, 6])).unwrap();
|
||||
let schema = batch.schema();
|
||||
|
||||
let table = conn
|
||||
.create_table("test_alter_multi", batch)
|
||||
.create_table(
|
||||
"test_alter_multi",
|
||||
RecordBatchIterator::new(vec![Ok(batch)], schema),
|
||||
)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -410,9 +441,13 @@ mod tests {
|
||||
|
||||
let batch =
|
||||
record_batch!(("keep", Int32, [1, 2, 3]), ("remove", Int32, [4, 5, 6])).unwrap();
|
||||
let schema = batch.schema();
|
||||
|
||||
let table = conn
|
||||
.create_table("test_drop_single", batch)
|
||||
.create_table(
|
||||
"test_drop_single",
|
||||
RecordBatchIterator::new(vec![Ok(batch)], schema),
|
||||
)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -443,9 +478,13 @@ mod tests {
|
||||
("d", Int32, [7, 8])
|
||||
)
|
||||
.unwrap();
|
||||
let schema = batch.schema();
|
||||
|
||||
let table = conn
|
||||
.create_table("test_drop_multi", batch)
|
||||
.create_table(
|
||||
"test_drop_multi",
|
||||
RecordBatchIterator::new(vec![Ok(batch)], schema),
|
||||
)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -472,9 +511,13 @@ mod tests {
|
||||
("extra", Int32, [10, 20, 30])
|
||||
)
|
||||
.unwrap();
|
||||
let schema = batch.schema();
|
||||
|
||||
let table = conn
|
||||
.create_table("test_drop_preserves", batch)
|
||||
.create_table(
|
||||
"test_drop_preserves",
|
||||
RecordBatchIterator::new(vec![Ok(batch)], schema),
|
||||
)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -524,9 +567,13 @@ mod tests {
|
||||
let conn = connect("memory://").execute().await.unwrap();
|
||||
|
||||
let batch = record_batch!(("existing", Int32, [1, 2, 3])).unwrap();
|
||||
let schema = batch.schema();
|
||||
|
||||
let table = conn
|
||||
.create_table("test_drop_nonexistent", batch)
|
||||
.create_table(
|
||||
"test_drop_nonexistent",
|
||||
RecordBatchIterator::new(vec![Ok(batch)], schema),
|
||||
)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -546,9 +593,13 @@ mod tests {
|
||||
let conn = connect("memory://").execute().await.unwrap();
|
||||
|
||||
let batch = record_batch!(("existing", Int32, [1, 2, 3])).unwrap();
|
||||
let schema = batch.schema();
|
||||
|
||||
let table = conn
|
||||
.create_table("test_alter_nonexistent", batch)
|
||||
.create_table(
|
||||
"test_alter_nonexistent",
|
||||
RecordBatchIterator::new(vec![Ok(batch)], schema),
|
||||
)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -572,8 +623,13 @@ mod tests {
|
||||
let conn = connect("memory://").execute().await.unwrap();
|
||||
|
||||
let batch = record_batch!(("a", Int32, [1, 2, 3]), ("b", Int32, [4, 5, 6])).unwrap();
|
||||
let schema = batch.schema();
|
||||
|
||||
let table = conn
|
||||
.create_table("test_version_increment", batch)
|
||||
.create_table(
|
||||
"test_version_increment",
|
||||
RecordBatchIterator::new(vec![Ok(batch)], schema),
|
||||
)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
@@ -117,8 +117,9 @@ mod tests {
|
||||
use crate::query::{ExecutableQuery, Select};
|
||||
use arrow_array::{
|
||||
record_batch, Array, BooleanArray, Date32Array, FixedSizeListArray, Float32Array,
|
||||
Float64Array, Int32Array, Int64Array, LargeStringArray, RecordBatch, StringArray,
|
||||
TimestampMillisecondArray, TimestampNanosecondArray, UInt32Array,
|
||||
Float64Array, Int32Array, Int64Array, LargeStringArray, RecordBatch, RecordBatchIterator,
|
||||
RecordBatchReader, StringArray, TimestampMillisecondArray, TimestampNanosecondArray,
|
||||
UInt32Array,
|
||||
};
|
||||
use arrow_data::ArrayDataBuilder;
|
||||
use arrow_schema::{ArrowError, DataType, Field, Schema, TimeUnit};
|
||||
@@ -166,46 +167,51 @@ mod tests {
|
||||
),
|
||||
]));
|
||||
|
||||
let batch = RecordBatch::try_new(
|
||||
let record_batch_iter = RecordBatchIterator::new(
|
||||
vec![RecordBatch::try_new(
|
||||
schema.clone(),
|
||||
vec![
|
||||
Arc::new(Int32Array::from_iter_values(0..10)),
|
||||
Arc::new(Int64Array::from_iter_values(0..10)),
|
||||
Arc::new(UInt32Array::from_iter_values(0..10)),
|
||||
Arc::new(StringArray::from_iter_values(vec![
|
||||
"a", "b", "c", "d", "e", "f", "g", "h", "i", "j",
|
||||
])),
|
||||
Arc::new(LargeStringArray::from_iter_values(vec![
|
||||
"a", "b", "c", "d", "e", "f", "g", "h", "i", "j",
|
||||
])),
|
||||
Arc::new(Float32Array::from_iter_values((0..10).map(|i| i as f32))),
|
||||
Arc::new(Float64Array::from_iter_values((0..10).map(|i| i as f64))),
|
||||
Arc::new(Into::<BooleanArray>::into(vec![
|
||||
true, false, true, false, true, false, true, false, true, false,
|
||||
])),
|
||||
Arc::new(Date32Array::from_iter_values(0..10)),
|
||||
Arc::new(TimestampNanosecondArray::from_iter_values(0..10)),
|
||||
Arc::new(TimestampMillisecondArray::from_iter_values(0..10)),
|
||||
Arc::new(
|
||||
create_fixed_size_list(
|
||||
Float32Array::from_iter_values((0..20).map(|i| i as f32)),
|
||||
2,
|
||||
)
|
||||
.unwrap(),
|
||||
),
|
||||
Arc::new(
|
||||
create_fixed_size_list(
|
||||
Float64Array::from_iter_values((0..20).map(|i| i as f64)),
|
||||
2,
|
||||
)
|
||||
.unwrap(),
|
||||
),
|
||||
],
|
||||
)
|
||||
.unwrap()]
|
||||
.into_iter()
|
||||
.map(Ok),
|
||||
schema.clone(),
|
||||
vec![
|
||||
Arc::new(Int32Array::from_iter_values(0..10)),
|
||||
Arc::new(Int64Array::from_iter_values(0..10)),
|
||||
Arc::new(UInt32Array::from_iter_values(0..10)),
|
||||
Arc::new(StringArray::from_iter_values(vec![
|
||||
"a", "b", "c", "d", "e", "f", "g", "h", "i", "j",
|
||||
])),
|
||||
Arc::new(LargeStringArray::from_iter_values(vec![
|
||||
"a", "b", "c", "d", "e", "f", "g", "h", "i", "j",
|
||||
])),
|
||||
Arc::new(Float32Array::from_iter_values((0..10).map(|i| i as f32))),
|
||||
Arc::new(Float64Array::from_iter_values((0..10).map(|i| i as f64))),
|
||||
Arc::new(Into::<BooleanArray>::into(vec![
|
||||
true, false, true, false, true, false, true, false, true, false,
|
||||
])),
|
||||
Arc::new(Date32Array::from_iter_values(0..10)),
|
||||
Arc::new(TimestampNanosecondArray::from_iter_values(0..10)),
|
||||
Arc::new(TimestampMillisecondArray::from_iter_values(0..10)),
|
||||
Arc::new(
|
||||
create_fixed_size_list(
|
||||
Float32Array::from_iter_values((0..20).map(|i| i as f32)),
|
||||
2,
|
||||
)
|
||||
.unwrap(),
|
||||
),
|
||||
Arc::new(
|
||||
create_fixed_size_list(
|
||||
Float64Array::from_iter_values((0..20).map(|i| i as f64)),
|
||||
2,
|
||||
)
|
||||
.unwrap(),
|
||||
),
|
||||
],
|
||||
)
|
||||
.unwrap();
|
||||
);
|
||||
|
||||
let table = conn
|
||||
.create_table("my_table", batch)
|
||||
.create_table("my_table", record_batch_iter)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -332,13 +338,15 @@ mod tests {
|
||||
Ok(FixedSizeListArray::from(data))
|
||||
}
|
||||
|
||||
fn make_test_batch() -> RecordBatch {
|
||||
fn make_test_batches() -> impl RecordBatchReader + Send + Sync + 'static {
|
||||
let schema = Arc::new(Schema::new(vec![Field::new("i", DataType::Int32, false)]));
|
||||
RecordBatch::try_new(
|
||||
schema.clone(),
|
||||
vec![Arc::new(Int32Array::from_iter_values(0..10))],
|
||||
RecordBatchIterator::new(
|
||||
vec![RecordBatch::try_new(
|
||||
schema.clone(),
|
||||
vec![Arc::new(Int32Array::from_iter_values(0..10))],
|
||||
)],
|
||||
schema,
|
||||
)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -359,8 +367,12 @@ mod tests {
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let schema = batch.schema();
|
||||
// need the iterator for create table
|
||||
let record_batch_iter = RecordBatchIterator::new(vec![Ok(batch)], schema);
|
||||
|
||||
let table = conn
|
||||
.create_table("my_table", batch)
|
||||
.create_table("my_table", record_batch_iter)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -418,7 +430,7 @@ mod tests {
|
||||
.await
|
||||
.unwrap();
|
||||
let tbl = conn
|
||||
.create_table("my_table", make_test_batch())
|
||||
.create_table("my_table", make_test_batches())
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
@@ -3,4 +3,3 @@
|
||||
|
||||
pub mod connection;
|
||||
pub mod datagen;
|
||||
pub mod embeddings;
|
||||
|
||||
@@ -34,7 +34,10 @@ impl LanceDbDatagenExt for BatchGeneratorBuilder {
|
||||
schema,
|
||||
));
|
||||
let db = connect("memory:///").execute().await.unwrap();
|
||||
db.create_table(table_name, stream).execute().await.unwrap()
|
||||
db.create_table_streaming(table_name, stream)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -45,5 +48,8 @@ pub async fn virtual_table(name: &str, values: &RecordBatch) -> Table {
|
||||
schema,
|
||||
));
|
||||
let db = connect("memory:///").execute().await.unwrap();
|
||||
db.create_table(name, stream).execute().await.unwrap()
|
||||
db.create_table_streaming(name, stream)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
@@ -1,59 +0,0 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
use std::{borrow::Cow, sync::Arc};
|
||||
|
||||
use arrow_array::{Array, FixedSizeListArray, Float32Array};
|
||||
use arrow_schema::{DataType, Field};
|
||||
|
||||
use crate::embeddings::EmbeddingFunction;
|
||||
use crate::Result;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct MockEmbed {
|
||||
name: String,
|
||||
dim: usize,
|
||||
}
|
||||
|
||||
impl MockEmbed {
|
||||
pub fn new(name: impl Into<String>, dim: usize) -> Self {
|
||||
Self {
|
||||
name: name.into(),
|
||||
dim,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl EmbeddingFunction for MockEmbed {
|
||||
fn name(&self) -> &str {
|
||||
&self.name
|
||||
}
|
||||
|
||||
fn source_type(&self) -> Result<Cow<'_, DataType>> {
|
||||
Ok(Cow::Borrowed(&DataType::Utf8))
|
||||
}
|
||||
|
||||
fn dest_type(&self) -> Result<Cow<'_, DataType>> {
|
||||
Ok(Cow::Owned(DataType::new_fixed_size_list(
|
||||
DataType::Float32,
|
||||
self.dim as _,
|
||||
true,
|
||||
)))
|
||||
}
|
||||
|
||||
fn compute_source_embeddings(&self, source: Arc<dyn Array>) -> Result<Arc<dyn Array>> {
|
||||
// We can't use the FixedSizeListBuilder here because it always adds a null bitmap
|
||||
// and we want to explicitly work with non-nullable arrays.
|
||||
let len = source.len();
|
||||
let inner = Arc::new(Float32Array::from(vec![Some(1.0); len * self.dim]));
|
||||
let field = Field::new("item", inner.data_type().clone(), false);
|
||||
let arr = FixedSizeListArray::new(Arc::new(field), self.dim as _, inner, None);
|
||||
|
||||
Ok(Arc::new(arr))
|
||||
}
|
||||
|
||||
#[allow(unused_variables)]
|
||||
fn compute_query_embeddings(&self, input: Arc<dyn Array>) -> Result<Arc<dyn Array>> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
@@ -15,6 +15,7 @@ use arrow_array::{
|
||||
use arrow_schema::{DataType, Field, Schema};
|
||||
use futures::StreamExt;
|
||||
use lancedb::{
|
||||
arrow::IntoArrow,
|
||||
connect,
|
||||
embeddings::{EmbeddingDefinition, EmbeddingFunction, EmbeddingRegistry},
|
||||
query::ExecutableQuery,
|
||||
@@ -252,7 +253,7 @@ async fn test_no_func_in_registry_on_add() -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn create_some_records() -> Result<Box<dyn arrow_array::RecordBatchReader + Send>> {
|
||||
fn create_some_records() -> Result<impl IntoArrow> {
|
||||
const TOTAL: usize = 2;
|
||||
|
||||
let schema = Arc::new(Schema::new(vec![
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
#![cfg(feature = "s3-test")]
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow_array::{Int32Array, RecordBatch, StringArray};
|
||||
use arrow_array::{Int32Array, RecordBatch, RecordBatchIterator, StringArray};
|
||||
use arrow_schema::{DataType, Field, Schema};
|
||||
|
||||
use aws_config::{BehaviorVersion, ConfigLoader, Region, SdkConfig};
|
||||
@@ -111,6 +111,7 @@ async fn test_minio_lifecycle() -> Result<()> {
|
||||
.await?;
|
||||
|
||||
let data = test_data();
|
||||
let data = RecordBatchIterator::new(vec![Ok(data.clone())], data.schema());
|
||||
|
||||
let table = db.create_table("test_table", data).execute().await?;
|
||||
|
||||
@@ -126,6 +127,7 @@ async fn test_minio_lifecycle() -> Result<()> {
|
||||
assert_eq!(row_count, 3);
|
||||
|
||||
let data = test_data();
|
||||
let data = RecordBatchIterator::new(vec![Ok(data.clone())], data.schema());
|
||||
table.add(data).execute().await?;
|
||||
|
||||
db.drop_table("test_table", &[]).await?;
|
||||
@@ -245,6 +247,7 @@ async fn test_encryption() -> Result<()> {
|
||||
|
||||
// Create a table with encryption
|
||||
let data = test_data();
|
||||
let data = RecordBatchIterator::new(vec![Ok(data.clone())], data.schema());
|
||||
|
||||
let mut builder = db.create_table("test_table", data);
|
||||
for (key, value) in CONFIG {
|
||||
@@ -271,6 +274,7 @@ async fn test_encryption() -> Result<()> {
|
||||
let table = db.open_table("test_table").execute().await?;
|
||||
|
||||
let data = test_data();
|
||||
let data = RecordBatchIterator::new(vec![Ok(data.clone())], data.schema());
|
||||
table.add(data).execute().await?;
|
||||
validate_objects_encrypted(&bucket.0, "test_table", &key.0).await;
|
||||
|
||||
@@ -296,6 +300,7 @@ async fn test_table_storage_options_override() -> Result<()> {
|
||||
|
||||
// Create table overriding with key2 encryption
|
||||
let data = test_data();
|
||||
let data = RecordBatchIterator::new(vec![Ok(data.clone())], data.schema());
|
||||
let _table = db
|
||||
.create_table("test_override", data)
|
||||
.storage_option("aws_sse_kms_key_id", &key2.0)
|
||||
@@ -307,6 +312,7 @@ async fn test_table_storage_options_override() -> Result<()> {
|
||||
|
||||
// Also test that a table created without override uses connection settings
|
||||
let data = test_data();
|
||||
let data = RecordBatchIterator::new(vec![Ok(data.clone())], data.schema());
|
||||
let _table2 = db.create_table("test_inherit", data).execute().await?;
|
||||
|
||||
// Verify this table uses key1 from connection
|
||||
@@ -413,6 +419,7 @@ async fn test_concurrent_dynamodb_commit() {
|
||||
.unwrap();
|
||||
|
||||
let data = test_data();
|
||||
let data = RecordBatchIterator::new(vec![Ok(data.clone())], data.schema());
|
||||
|
||||
let table = db.create_table("test_table", data).execute().await.unwrap();
|
||||
|
||||
@@ -423,6 +430,7 @@ async fn test_concurrent_dynamodb_commit() {
|
||||
let table = db.open_table("test_table").execute().await.unwrap();
|
||||
let data = data.clone();
|
||||
tasks.push(tokio::spawn(async move {
|
||||
let data = RecordBatchIterator::new(vec![Ok(data.clone())], data.schema());
|
||||
table.add(data).execute().await.unwrap();
|
||||
}));
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user