mirror of
https://github.com/lancedb/lancedb.git
synced 2026-03-27 19:10:40 +00:00
Compare commits
52 Commits
python-v0.
...
task/remot
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3c8f9e4c9c | ||
|
|
70cbee6293 | ||
|
|
02783bf440 | ||
|
|
4323ca0147 | ||
|
|
bd3dd6a8e5 | ||
|
|
3c1162612e | ||
|
|
53c7c560c9 | ||
|
|
de4f77800d | ||
|
|
b6ab721cf7 | ||
|
|
027d53500b | ||
|
|
9098f47e73 | ||
|
|
826a3e5ee9 | ||
|
|
9fac56252e | ||
|
|
c55ca20c1b | ||
|
|
5cdb15feef | ||
|
|
7a3eea927f | ||
|
|
5dd9b072d8 | ||
|
|
6dde379d44 | ||
|
|
55f09ef1cd | ||
|
|
e9d8651d18 | ||
|
|
071f467571 | ||
|
|
f83aa25119 | ||
|
|
0a8fe4d026 | ||
|
|
3ad7be9825 | ||
|
|
589041d842 | ||
|
|
2e4cd56ab1 | ||
|
|
6fd8586fa7 | ||
|
|
6329b57604 | ||
|
|
c51b13e70f | ||
|
|
0859312b83 | ||
|
|
a6e8ec8d48 | ||
|
|
bd2c6d0763 | ||
|
|
fbf4a53475 | ||
|
|
d3e15f3e17 | ||
|
|
9c017d8348 | ||
|
|
c3cc2530b7 | ||
|
|
571295b0d9 | ||
|
|
972c682857 | ||
|
|
4f8ee82730 | ||
|
|
131024839f | ||
|
|
3c7ddf4d0c | ||
|
|
461176f9f2 | ||
|
|
3b8996bb69 | ||
|
|
3755064e93 | ||
|
|
8773b865a9 | ||
|
|
1ee29675b3 | ||
|
|
9be28448f5 | ||
|
|
357197bacc | ||
|
|
ad51e2dd1f | ||
|
|
e9e904783c | ||
|
|
8500b16eca | ||
|
|
57e7282342 |
@@ -1,5 +1,5 @@
|
||||
[tool.bumpversion]
|
||||
current_version = "0.24.0"
|
||||
current_version = "0.26.2"
|
||||
parse = """(?x)
|
||||
(?P<major>0|[1-9]\\d*)\\.
|
||||
(?P<minor>0|[1-9]\\d*)\\.
|
||||
|
||||
@@ -3,7 +3,7 @@ name: build-linux-wheel
|
||||
description: "Build a manylinux wheel for lance"
|
||||
inputs:
|
||||
python-minor-version:
|
||||
description: "8, 9, 10, 11, 12"
|
||||
description: "10, 11, 12, 13"
|
||||
required: true
|
||||
args:
|
||||
description: "--release"
|
||||
|
||||
2
.github/workflows/build_mac_wheel/action.yml
vendored
2
.github/workflows/build_mac_wheel/action.yml
vendored
@@ -3,7 +3,7 @@ name: build_wheel
|
||||
description: "Build a lance wheel"
|
||||
inputs:
|
||||
python-minor-version:
|
||||
description: "8, 9, 10, 11"
|
||||
description: "10, 11, 12, 13"
|
||||
required: true
|
||||
args:
|
||||
description: "--release"
|
||||
|
||||
@@ -3,7 +3,7 @@ name: build_wheel
|
||||
description: "Build a lance wheel"
|
||||
inputs:
|
||||
python-minor-version:
|
||||
description: "8, 9, 10, 11"
|
||||
description: "10, 11, 12, 13, 14"
|
||||
required: true
|
||||
args:
|
||||
description: "--release"
|
||||
|
||||
2
.github/workflows/cargo-publish.yml
vendored
2
.github/workflows/cargo-publish.yml
vendored
@@ -42,7 +42,7 @@ jobs:
|
||||
name: Report Workflow Failure
|
||||
runs-on: ubuntu-latest
|
||||
needs: [build]
|
||||
if: always() && (github.event_name == 'release' || github.event_name == 'workflow_dispatch')
|
||||
if: always() && failure() && startsWith(github.ref, 'refs/tags/v')
|
||||
permissions:
|
||||
contents: read
|
||||
issues: write
|
||||
|
||||
@@ -86,16 +86,17 @@ jobs:
|
||||
You are running inside the lancedb repository on a GitHub Actions runner. Update the Lance dependency to version ${VERSION} and prepare a pull request for maintainers to review.
|
||||
|
||||
Follow these steps exactly:
|
||||
1. Use script "ci/set_lance_version.py" to update Lance dependencies. The script already refreshes Cargo metadata, so allow it to finish even if it takes time.
|
||||
2. Run "cargo clippy --workspace --tests --all-features -- -D warnings". If diagnostics appear, fix them yourself and rerun clippy until it exits cleanly. Do not skip any warnings.
|
||||
3. After clippy succeeds, run "cargo fmt --all" to format the workspace.
|
||||
4. Ensure the repository is clean except for intentional changes. Inspect "git status --short" and "git diff" to confirm the dependency update and any required fixes.
|
||||
5. Create and switch to a new branch named "${BRANCH_NAME}" (replace any duplicated hyphens if necessary).
|
||||
6. Stage all relevant files with "git add -A". Commit using the message "${COMMIT_TYPE}: update lance dependency to v${VERSION}".
|
||||
7. Push the branch to origin. If the branch already exists, force-push your changes.
|
||||
8. env "GH_TOKEN" is available, use "gh" tools for github related operations like creating pull request.
|
||||
9. Create a pull request targeting "main" with title "${COMMIT_TYPE}: update lance dependency to v${VERSION}". First, write the PR body to /tmp/pr-body.md using a heredoc (cat <<'EOF' > /tmp/pr-body.md). The body should summarize the dependency bump, clippy/fmt verification, and link the triggering tag (${TAG}). Then run "gh pr create --body-file /tmp/pr-body.md".
|
||||
10. After creating the PR, display the PR URL, "git status --short", and a concise summary of the commands run and their results.
|
||||
1. Use script "ci/set_lance_version.py" to update Lance Rust dependencies. The script already refreshes Cargo metadata, so allow it to finish even if it takes time.
|
||||
2. Update the Java lance-core dependency version in "java/pom.xml": change the "<lance-core.version>...</lance-core.version>" property to "${VERSION}".
|
||||
3. Run "cargo clippy --workspace --tests --all-features -- -D warnings". If diagnostics appear, fix them yourself and rerun clippy until it exits cleanly. Do not skip any warnings.
|
||||
4. After clippy succeeds, run "cargo fmt --all" to format the workspace.
|
||||
5. Ensure the repository is clean except for intentional changes. Inspect "git status --short" and "git diff" to confirm the dependency update and any required fixes.
|
||||
6. Create and switch to a new branch named "${BRANCH_NAME}" (replace any duplicated hyphens if necessary).
|
||||
7. Stage all relevant files with "git add -A". Commit using the message "${COMMIT_TYPE}: update lance dependency to v${VERSION}".
|
||||
8. Push the branch to origin. If the remote branch already exists, delete it first with "gh api -X DELETE repos/lancedb/lancedb/git/refs/heads/${BRANCH_NAME}" then push with "git push origin ${BRANCH_NAME}". Do NOT use "git push --force" or "git push -f".
|
||||
9. env "GH_TOKEN" is available, use "gh" tools for github related operations like creating pull request.
|
||||
10. Create a pull request targeting "main" with title "${COMMIT_TYPE}: update lance dependency to v${VERSION}". First, write the PR body to /tmp/pr-body.md using a heredoc (cat <<'EOF' > /tmp/pr-body.md). The body should summarize the dependency bump, clippy/fmt verification, and link the triggering tag (${TAG}). Then run "gh pr create --body-file /tmp/pr-body.md".
|
||||
11. After creating the PR, display the PR URL, "git status --short", and a concise summary of the commands run and their results.
|
||||
|
||||
Constraints:
|
||||
- Use bash commands; avoid modifying GitHub workflow files other than through the scripted task above.
|
||||
|
||||
2
.github/workflows/docs.yml
vendored
2
.github/workflows/docs.yml
vendored
@@ -41,7 +41,7 @@ jobs:
|
||||
sudo apt install -y protobuf-compiler libssl-dev
|
||||
rustup update && rustup default
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
uses: actions/setup-python@v6
|
||||
with:
|
||||
python-version: "3.10"
|
||||
cache: "pip"
|
||||
|
||||
1
.github/workflows/nodejs.yml
vendored
1
.github/workflows/nodejs.yml
vendored
@@ -8,6 +8,7 @@ on:
|
||||
paths:
|
||||
- Cargo.toml
|
||||
- nodejs/**
|
||||
- docs/src/js/**
|
||||
- .github/workflows/nodejs.yml
|
||||
- docker-compose.yml
|
||||
|
||||
|
||||
6
.github/workflows/npm-publish.yml
vendored
6
.github/workflows/npm-publish.yml
vendored
@@ -318,7 +318,7 @@ jobs:
|
||||
- name: Setup node
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
node-version: 24
|
||||
cache: npm
|
||||
cache-dependency-path: nodejs/package-lock.json
|
||||
registry-url: "https://registry.npmjs.org"
|
||||
@@ -348,9 +348,9 @@ jobs:
|
||||
run: find npm
|
||||
- name: Publish
|
||||
env:
|
||||
NODE_AUTH_TOKEN: ${{ secrets.LANCEDB_NPM_REGISTRY_TOKEN }}
|
||||
DRY_RUN: ${{ !startsWith(github.ref, 'refs/tags/v') }}
|
||||
run: |
|
||||
npm config set provenance true
|
||||
ARGS="--access public"
|
||||
if [[ $DRY_RUN == "true" ]]; then
|
||||
ARGS="$ARGS --dry-run"
|
||||
@@ -363,7 +363,7 @@ jobs:
|
||||
name: Report Workflow Failure
|
||||
runs-on: ubuntu-latest
|
||||
needs: [build-lancedb, test-lancedb, publish]
|
||||
if: always() && (github.event_name == 'release' || github.event_name == 'workflow_dispatch')
|
||||
if: always() && failure() && startsWith(github.ref, 'refs/tags/v')
|
||||
permissions:
|
||||
contents: read
|
||||
issues: write
|
||||
|
||||
20
.github/workflows/pypi-publish.yml
vendored
20
.github/workflows/pypi-publish.yml
vendored
@@ -44,12 +44,12 @@ jobs:
|
||||
fetch-depth: 0
|
||||
lfs: true
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
uses: actions/setup-python@v6
|
||||
with:
|
||||
python-version: 3.8
|
||||
python-version: "3.10"
|
||||
- uses: ./.github/workflows/build_linux_wheel
|
||||
with:
|
||||
python-minor-version: 8
|
||||
python-minor-version: 10
|
||||
args: "--release --strip ${{ matrix.config.extra_args }}"
|
||||
arm-build: ${{ matrix.config.platform == 'aarch64' }}
|
||||
manylinux: ${{ matrix.config.manylinux }}
|
||||
@@ -74,12 +74,12 @@ jobs:
|
||||
fetch-depth: 0
|
||||
lfs: true
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
uses: actions/setup-python@v6
|
||||
with:
|
||||
python-version: 3.12
|
||||
python-version: "3.13"
|
||||
- uses: ./.github/workflows/build_mac_wheel
|
||||
with:
|
||||
python-minor-version: 8
|
||||
python-minor-version: 10
|
||||
args: "--release --strip --target ${{ matrix.config.target }} --features fp16kernels"
|
||||
- uses: ./.github/workflows/upload_wheel
|
||||
if: startsWith(github.ref, 'refs/tags/python-v')
|
||||
@@ -95,12 +95,12 @@ jobs:
|
||||
fetch-depth: 0
|
||||
lfs: true
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
uses: actions/setup-python@v6
|
||||
with:
|
||||
python-version: 3.12
|
||||
python-version: "3.13"
|
||||
- uses: ./.github/workflows/build_windows_wheel
|
||||
with:
|
||||
python-minor-version: 8
|
||||
python-minor-version: 10
|
||||
args: "--release --strip"
|
||||
vcpkg_token: ${{ secrets.VCPKG_GITHUB_PACKAGES }}
|
||||
- uses: ./.github/workflows/upload_wheel
|
||||
@@ -181,7 +181,7 @@ jobs:
|
||||
permissions:
|
||||
contents: read
|
||||
issues: write
|
||||
if: always() && (github.event_name == 'release' || github.event_name == 'workflow_dispatch')
|
||||
if: always() && failure() && startsWith(github.ref, 'refs/tags/python-v')
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: ./.github/actions/create-failure-issue
|
||||
|
||||
28
.github/workflows/python.yml
vendored
28
.github/workflows/python.yml
vendored
@@ -36,9 +36,9 @@ jobs:
|
||||
fetch-depth: 0
|
||||
lfs: true
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
uses: actions/setup-python@v6
|
||||
with:
|
||||
python-version: "3.12"
|
||||
python-version: "3.13"
|
||||
- name: Install ruff
|
||||
run: |
|
||||
pip install ruff==0.9.9
|
||||
@@ -61,9 +61,9 @@ jobs:
|
||||
fetch-depth: 0
|
||||
lfs: true
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
uses: actions/setup-python@v6
|
||||
with:
|
||||
python-version: "3.12"
|
||||
python-version: "3.13"
|
||||
- name: Install protobuf compiler
|
||||
run: |
|
||||
sudo apt update
|
||||
@@ -90,9 +90,9 @@ jobs:
|
||||
fetch-depth: 0
|
||||
lfs: true
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
uses: actions/setup-python@v6
|
||||
with:
|
||||
python-version: "3.12"
|
||||
python-version: "3.13"
|
||||
cache: "pip"
|
||||
- name: Install protobuf
|
||||
run: |
|
||||
@@ -110,7 +110,7 @@ jobs:
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
matrix:
|
||||
python-minor-version: ["9", "12"]
|
||||
python-minor-version: ["10", "13"]
|
||||
runs-on: "ubuntu-24.04"
|
||||
defaults:
|
||||
run:
|
||||
@@ -126,7 +126,7 @@ jobs:
|
||||
sudo apt update
|
||||
sudo apt install -y protobuf-compiler
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
uses: actions/setup-python@v6
|
||||
with:
|
||||
python-version: 3.${{ matrix.python-minor-version }}
|
||||
- uses: ./.github/workflows/build_linux_wheel
|
||||
@@ -156,9 +156,9 @@ jobs:
|
||||
fetch-depth: 0
|
||||
lfs: true
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
uses: actions/setup-python@v6
|
||||
with:
|
||||
python-version: "3.12"
|
||||
python-version: "3.13"
|
||||
- uses: ./.github/workflows/build_mac_wheel
|
||||
with:
|
||||
args: --profile ci
|
||||
@@ -185,9 +185,9 @@ jobs:
|
||||
fetch-depth: 0
|
||||
lfs: true
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
uses: actions/setup-python@v6
|
||||
with:
|
||||
python-version: "3.12"
|
||||
python-version: "3.13"
|
||||
- uses: ./.github/workflows/build_windows_wheel
|
||||
with:
|
||||
args: --profile ci
|
||||
@@ -212,9 +212,9 @@ jobs:
|
||||
sudo apt update
|
||||
sudo apt install -y protobuf-compiler
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
uses: actions/setup-python@v6
|
||||
with:
|
||||
python-version: 3.9
|
||||
python-version: "3.10"
|
||||
- name: Install lancedb
|
||||
run: |
|
||||
pip install "pydantic<2"
|
||||
|
||||
853
Cargo.lock
generated
853
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
59
Cargo.toml
59
Cargo.toml
@@ -15,39 +15,40 @@ categories = ["database-implementations"]
|
||||
rust-version = "1.88.0"
|
||||
|
||||
[workspace.dependencies]
|
||||
lance = { "version" = "=1.0.4", default-features = false, "tag" = "v1.0.4", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-core = { "version" = "=1.0.4", "tag" = "v1.0.4", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-datagen = { "version" = "=1.0.4", "tag" = "v1.0.4", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-file = { "version" = "=1.0.4", "tag" = "v1.0.4", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-io = { "version" = "=1.0.4", default-features = false, "tag" = "v1.0.4", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-index = { "version" = "=1.0.4", "tag" = "v1.0.4", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-linalg = { "version" = "=1.0.4", "tag" = "v1.0.4", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-namespace = { "version" = "=1.0.4", "tag" = "v1.0.4", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-namespace-impls = { "version" = "=1.0.4", default-features = false, "tag" = "v1.0.4", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-table = { "version" = "=1.0.4", "tag" = "v1.0.4", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-testing = { "version" = "=1.0.4", "tag" = "v1.0.4", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-datafusion = { "version" = "=1.0.4", "tag" = "v1.0.4", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-encoding = { "version" = "=1.0.4", "tag" = "v1.0.4", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-arrow = { "version" = "=1.0.4", "tag" = "v1.0.4", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance = { "version" = "=2.0.0", default-features = false }
|
||||
lance-core = "=2.0.0"
|
||||
lance-datagen = "=2.0.0"
|
||||
lance-file = "=2.0.0"
|
||||
lance-io = { "version" = "=2.0.0", default-features = false }
|
||||
lance-index = "=2.0.0"
|
||||
lance-linalg = "=2.0.0"
|
||||
lance-namespace = "=2.0.0"
|
||||
lance-namespace-impls = { "version" = "=2.0.0", default-features = false }
|
||||
lance-table = "=2.0.0"
|
||||
lance-testing = "=2.0.0"
|
||||
lance-datafusion = "=2.0.0"
|
||||
lance-encoding = "=2.0.0"
|
||||
lance-arrow = "=2.0.0"
|
||||
ahash = "0.8"
|
||||
# Note that this one does not include pyarrow
|
||||
arrow = { version = "56.2", optional = false }
|
||||
arrow-array = "56.2"
|
||||
arrow-data = "56.2"
|
||||
arrow-ipc = "56.2"
|
||||
arrow-ord = "56.2"
|
||||
arrow-schema = "56.2"
|
||||
arrow-select = "56.2"
|
||||
arrow-cast = "56.2"
|
||||
arrow = { version = "57.2", optional = false }
|
||||
arrow-array = "57.2"
|
||||
arrow-data = "57.2"
|
||||
arrow-ipc = "57.2"
|
||||
arrow-ord = "57.2"
|
||||
arrow-schema = "57.2"
|
||||
arrow-select = "57.2"
|
||||
arrow-cast = "57.2"
|
||||
async-trait = "0"
|
||||
datafusion = { version = "50.1", default-features = false }
|
||||
datafusion-catalog = "50.1"
|
||||
datafusion-common = { version = "50.1", default-features = false }
|
||||
datafusion-execution = "50.1"
|
||||
datafusion-expr = "50.1"
|
||||
datafusion-physical-plan = "50.1"
|
||||
datafusion = { version = "51.0", default-features = false }
|
||||
datafusion-catalog = "51.0"
|
||||
datafusion-common = { version = "51.0", default-features = false }
|
||||
datafusion-execution = "51.0"
|
||||
datafusion-expr = "51.0"
|
||||
datafusion-physical-plan = "51.0"
|
||||
datafusion-physical-expr = "51.0"
|
||||
env_logger = "0.11"
|
||||
half = { "version" = "2.6.0", default-features = false, features = [
|
||||
half = { "version" = "2.7.1", default-features = false, features = [
|
||||
"num-traits",
|
||||
] }
|
||||
futures = "0"
|
||||
|
||||
9
Makefile
Normal file
9
Makefile
Normal file
@@ -0,0 +1,9 @@
|
||||
.PHONY: licenses
|
||||
|
||||
licenses:
|
||||
cargo about generate about.hbs -o RUST_THIRD_PARTY_LICENSES.html -c about.toml
|
||||
cd python && cargo about generate ../about.hbs -o RUST_THIRD_PARTY_LICENSES.html -c ../about.toml
|
||||
cd python && uv sync --all-extras && uv tool run pip-licenses --python .venv/bin/python --format=markdown --with-urls --output-file=PYTHON_THIRD_PARTY_LICENSES.md
|
||||
cd nodejs && cargo about generate ../about.hbs -o RUST_THIRD_PARTY_LICENSES.html -c ../about.toml
|
||||
cd nodejs && npx license-checker --markdown --out NODEJS_THIRD_PARTY_LICENSES.md
|
||||
cd java && ./mvnw license:aggregate-add-third-party -q
|
||||
@@ -66,7 +66,7 @@ Follow the [Quickstart](https://lancedb.com/docs/quickstart/) doc to set up Lanc
|
||||
| Python SDK | https://lancedb.github.io/lancedb/python/python/ |
|
||||
| Typescript SDK | https://lancedb.github.io/lancedb/js/globals/ |
|
||||
| Rust SDK | https://docs.rs/lancedb/latest/lancedb/index.html |
|
||||
| REST API | https://docs.lancedb.com/api-reference/introduction |
|
||||
| REST API | https://docs.lancedb.com/api-reference/rest |
|
||||
|
||||
## **Join Us and Contribute**
|
||||
|
||||
|
||||
15276
RUST_THIRD_PARTY_LICENSES.html
Normal file
15276
RUST_THIRD_PARTY_LICENSES.html
Normal file
File diff suppressed because it is too large
Load Diff
70
about.hbs
Normal file
70
about.hbs
Normal file
@@ -0,0 +1,70 @@
|
||||
<html>
|
||||
|
||||
<head>
|
||||
<style>
|
||||
@media (prefers-color-scheme: dark) {
|
||||
body {
|
||||
background: #333;
|
||||
color: white;
|
||||
}
|
||||
a {
|
||||
color: skyblue;
|
||||
}
|
||||
}
|
||||
.container {
|
||||
font-family: sans-serif;
|
||||
max-width: 800px;
|
||||
margin: 0 auto;
|
||||
}
|
||||
.intro {
|
||||
text-align: center;
|
||||
}
|
||||
.licenses-list {
|
||||
list-style-type: none;
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
}
|
||||
.license-used-by {
|
||||
margin-top: -10px;
|
||||
}
|
||||
.license-text {
|
||||
max-height: 200px;
|
||||
overflow-y: scroll;
|
||||
white-space: pre-wrap;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<main class="container">
|
||||
<div class="intro">
|
||||
<h1>Third Party Licenses</h1>
|
||||
<p>This page lists the licenses of the projects used in cargo-about.</p>
|
||||
</div>
|
||||
|
||||
<h2>Overview of licenses:</h2>
|
||||
<ul class="licenses-overview">
|
||||
{{#each overview}}
|
||||
<li><a href="#{{id}}">{{name}}</a> ({{count}})</li>
|
||||
{{/each}}
|
||||
</ul>
|
||||
|
||||
<h2>All license text:</h2>
|
||||
<ul class="licenses-list">
|
||||
{{#each licenses}}
|
||||
<li class="license">
|
||||
<h3 id="{{id}}">{{name}}</h3>
|
||||
<h4>Used by:</h4>
|
||||
<ul class="license-used-by">
|
||||
{{#each used_by}}
|
||||
<li><a href="{{#if crate.repository}} {{crate.repository}} {{else}} https://crates.io/crates/{{crate.name}} {{/if}}">{{crate.name}} {{crate.version}}</a></li>
|
||||
{{/each}}
|
||||
</ul>
|
||||
<pre class="license-text">{{text}}</pre>
|
||||
</li>
|
||||
{{/each}}
|
||||
</ul>
|
||||
</main>
|
||||
</body>
|
||||
|
||||
</html>
|
||||
18
about.toml
Normal file
18
about.toml
Normal file
@@ -0,0 +1,18 @@
|
||||
accepted = [
|
||||
"0BSD",
|
||||
"Apache-2.0",
|
||||
"Apache-2.0 WITH LLVM-exception",
|
||||
"BSD-2-Clause",
|
||||
"BSD-3-Clause",
|
||||
"BSL-1.0",
|
||||
"bzip2-1.0.6",
|
||||
"CC0-1.0",
|
||||
"CDDL-1.0",
|
||||
"CDLA-Permissive-2.0",
|
||||
"ISC",
|
||||
"MIT",
|
||||
"MPL-2.0",
|
||||
"OpenSSL",
|
||||
"Unicode-3.0",
|
||||
"Zlib",
|
||||
]
|
||||
@@ -0,0 +1,62 @@
|
||||
# VoyageAI Embeddings
|
||||
|
||||
Voyage AI provides cutting-edge embedding and rerankers.
|
||||
|
||||
|
||||
Using voyageai API requires voyageai package, which can be installed using `pip install voyageai`. Voyage AI embeddings are used to generate embeddings for text data. The embeddings can be used for various tasks like semantic search, clustering, and classification.
|
||||
You also need to set the `VOYAGE_API_KEY` environment variable to use the VoyageAI API.
|
||||
|
||||
Supported models are:
|
||||
|
||||
**Voyage-4 Series (Latest)**
|
||||
|
||||
- voyage-4 (1024 dims, general-purpose and multilingual retrieval, 320K batch tokens)
|
||||
- voyage-4-lite (1024 dims, optimized for latency and cost, 1M batch tokens)
|
||||
- voyage-4-large (1024 dims, best retrieval quality, 120K batch tokens)
|
||||
|
||||
**Voyage-3 Series**
|
||||
|
||||
- voyage-3
|
||||
- voyage-3-lite
|
||||
|
||||
**Domain-Specific Models**
|
||||
|
||||
- voyage-finance-2
|
||||
- voyage-multilingual-2
|
||||
- voyage-law-2
|
||||
- voyage-code-2
|
||||
|
||||
|
||||
Supported parameters (to be passed in `create` method) are:
|
||||
|
||||
| Parameter | Type | Default Value | Description |
|
||||
|---|---|--------|---------|
|
||||
| `name` | `str` | `None` | The model ID of the model to use. Supported base models for Text Embeddings: voyage-4, voyage-4-lite, voyage-4-large, voyage-3, voyage-3-lite, voyage-finance-2, voyage-multilingual-2, voyage-law-2, voyage-code-2 |
|
||||
| `input_type` | `str` | `None` | Type of the input text. Default to None. Other options: query, document. |
|
||||
| `truncation` | `bool` | `True` | Whether to truncate the input texts to fit within the context length. |
|
||||
|
||||
|
||||
Usage Example:
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
from lancedb.pydantic import LanceModel, Vector
|
||||
from lancedb.embeddings import EmbeddingFunctionRegistry
|
||||
|
||||
voyageai = EmbeddingFunctionRegistry
|
||||
.get_instance()
|
||||
.get("voyageai")
|
||||
.create(name="voyage-3")
|
||||
|
||||
class TextModel(LanceModel):
|
||||
text: str = voyageai.SourceField()
|
||||
vector: Vector(voyageai.ndims()) = voyageai.VectorField()
|
||||
|
||||
data = [ { "text": "hello world" },
|
||||
{ "text": "goodbye world" }]
|
||||
|
||||
db = lancedb.connect("~/.lancedb")
|
||||
tbl = db.create_table("test", schema=TextModel, mode="overwrite")
|
||||
|
||||
tbl.add(data)
|
||||
```
|
||||
@@ -14,7 +14,7 @@ Add the following dependency to your `pom.xml`:
|
||||
<dependency>
|
||||
<groupId>com.lancedb</groupId>
|
||||
<artifactId>lancedb-core</artifactId>
|
||||
<version>0.24.0</version>
|
||||
<version>0.26.2</version>
|
||||
</dependency>
|
||||
```
|
||||
|
||||
|
||||
@@ -367,6 +367,27 @@ Use [Table.listIndices](Table.md#listindices) to find the names of the indices.
|
||||
|
||||
***
|
||||
|
||||
### initialStorageOptions()
|
||||
|
||||
```ts
|
||||
abstract initialStorageOptions(): Promise<undefined | null | Record<string, string>>
|
||||
```
|
||||
|
||||
Get the initial storage options that were passed in when opening this table.
|
||||
|
||||
For dynamically refreshed options (e.g., credential vending), use
|
||||
[Table.latestStorageOptions](Table.md#lateststorageoptions).
|
||||
|
||||
Warning: This is an internal API and the return value is subject to change.
|
||||
|
||||
#### Returns
|
||||
|
||||
`Promise`<`undefined` \| `null` \| `Record`<`string`, `string`>>
|
||||
|
||||
The storage options, or undefined if no storage options were configured.
|
||||
|
||||
***
|
||||
|
||||
### isOpen()
|
||||
|
||||
```ts
|
||||
@@ -381,6 +402,28 @@ Return true if the table has not been closed
|
||||
|
||||
***
|
||||
|
||||
### latestStorageOptions()
|
||||
|
||||
```ts
|
||||
abstract latestStorageOptions(): Promise<undefined | null | Record<string, string>>
|
||||
```
|
||||
|
||||
Get the latest storage options, refreshing from provider if configured.
|
||||
|
||||
This method is useful for credential vending scenarios where storage options
|
||||
may be refreshed dynamically. If no dynamic provider is configured, this
|
||||
returns the initial static options.
|
||||
|
||||
Warning: This is an internal API and the return value is subject to change.
|
||||
|
||||
#### Returns
|
||||
|
||||
`Promise`<`undefined` \| `null` \| `Record`<`string`, `string`>>
|
||||
|
||||
The storage options, or undefined if no storage options were configured.
|
||||
|
||||
***
|
||||
|
||||
### listIndices()
|
||||
|
||||
```ts
|
||||
@@ -705,8 +748,11 @@ Create a query that returns a subset of the rows in the table.
|
||||
|
||||
#### Parameters
|
||||
|
||||
* **rowIds**: `number`[]
|
||||
* **rowIds**: readonly (`number` \| `bigint`)[]
|
||||
The row ids of the rows to return.
|
||||
Row ids returned by `withRowId()` are `bigint`, so `bigint[]` is supported.
|
||||
For convenience / backwards compatibility, `number[]` is also accepted (for
|
||||
small row ids that fit in a safe integer).
|
||||
|
||||
#### Returns
|
||||
|
||||
|
||||
71
java/JAVA_THIRD_PARTY_LICENSES.md
Normal file
71
java/JAVA_THIRD_PARTY_LICENSES.md
Normal file
@@ -0,0 +1,71 @@
|
||||
|
||||
List of third-party dependencies grouped by their license type.
|
||||
|
||||
Apache 2.0:
|
||||
|
||||
* error-prone annotations (com.google.errorprone:error_prone_annotations:2.28.0 - https://errorprone.info/error_prone_annotations)
|
||||
|
||||
Apache License 2.0:
|
||||
|
||||
* JsonNullable Jackson module (org.openapitools:jackson-databind-nullable:0.2.6 - https://github.com/OpenAPITools/jackson-databind-nullable)
|
||||
|
||||
Apache License V2.0:
|
||||
|
||||
* FlatBuffers Java API (com.google.flatbuffers:flatbuffers-java:23.5.26 - https://github.com/google/flatbuffers)
|
||||
|
||||
Apache License, Version 2.0:
|
||||
|
||||
* Apache Commons Codec (commons-codec:commons-codec:1.15 - https://commons.apache.org/proper/commons-codec/)
|
||||
* Apache HttpClient (org.apache.httpcomponents.client5:httpclient5:5.2.1 - https://hc.apache.org/httpcomponents-client-5.0.x/5.2.1/httpclient5/)
|
||||
* Apache HttpComponents Core HTTP/1.1 (org.apache.httpcomponents.core5:httpcore5:5.2 - https://hc.apache.org/httpcomponents-core-5.2.x/5.2/httpcore5/)
|
||||
* Apache HttpComponents Core HTTP/2 (org.apache.httpcomponents.core5:httpcore5-h2:5.2 - https://hc.apache.org/httpcomponents-core-5.2.x/5.2/httpcore5-h2/)
|
||||
* Arrow Format (org.apache.arrow:arrow-format:15.0.0 - https://arrow.apache.org/arrow-format/)
|
||||
* Arrow Java C Data Interface (org.apache.arrow:arrow-c-data:15.0.0 - https://arrow.apache.org/arrow-c-data/)
|
||||
* Arrow Java Dataset (org.apache.arrow:arrow-dataset:15.0.0 - https://arrow.apache.org/arrow-dataset/)
|
||||
* Arrow Memory - Core (org.apache.arrow:arrow-memory-core:15.0.0 - https://arrow.apache.org/arrow-memory/arrow-memory-core/)
|
||||
* Arrow Memory - Netty (org.apache.arrow:arrow-memory-netty:15.0.0 - https://arrow.apache.org/arrow-memory/arrow-memory-netty/)
|
||||
* Arrow Vectors (org.apache.arrow:arrow-vector:15.0.0 - https://arrow.apache.org/arrow-vector/)
|
||||
* Guava: Google Core Libraries for Java (com.google.guava:guava:33.3.1-jre - https://github.com/google/guava)
|
||||
* J2ObjC Annotations (com.google.j2objc:j2objc-annotations:3.0.0 - https://github.com/google/j2objc/)
|
||||
* Netty/Buffer (io.netty:netty-buffer:4.1.104.Final - https://netty.io/netty-buffer/)
|
||||
* Netty/Common (io.netty:netty-common:4.1.104.Final - https://netty.io/netty-common/)
|
||||
|
||||
Apache-2.0:
|
||||
|
||||
* Apache Commons Lang (org.apache.commons:commons-lang3:3.18.0 - https://commons.apache.org/proper/commons-lang/)
|
||||
* lance-namespace-apache-client (org.lance:lance-namespace-apache-client:0.4.5 - https://github.com/openapitools/openapi-generator)
|
||||
* lance-namespace-core (org.lance:lance-namespace-core:0.4.5 - https://lance.org/format/namespace/lance-namespace-core/)
|
||||
|
||||
EDL 1.0:
|
||||
|
||||
* Jakarta Activation API jar (jakarta.activation:jakarta.activation-api:1.2.2 - https://github.com/eclipse-ee4j/jaf/jakarta.activation-api)
|
||||
|
||||
Eclipse Distribution License - v 1.0:
|
||||
|
||||
* Eclipse Collections API (org.eclipse.collections:eclipse-collections-api:11.1.0 - https://github.com/eclipse/eclipse-collections/eclipse-collections-api)
|
||||
* Eclipse Collections Main Library (org.eclipse.collections:eclipse-collections:11.1.0 - https://github.com/eclipse/eclipse-collections/eclipse-collections)
|
||||
* Jakarta XML Binding API (jakarta.xml.bind:jakarta.xml.bind-api:2.3.3 - https://github.com/eclipse-ee4j/jaxb-api/jakarta.xml.bind-api)
|
||||
|
||||
Eclipse Public License - v 1.0:
|
||||
|
||||
* Eclipse Collections API (org.eclipse.collections:eclipse-collections-api:11.1.0 - https://github.com/eclipse/eclipse-collections/eclipse-collections-api)
|
||||
* Eclipse Collections Main Library (org.eclipse.collections:eclipse-collections:11.1.0 - https://github.com/eclipse/eclipse-collections/eclipse-collections)
|
||||
|
||||
The Apache Software License, Version 2.0:
|
||||
|
||||
* FindBugs-jsr305 (com.google.code.findbugs:jsr305:3.0.2 - http://findbugs.sourceforge.net/)
|
||||
* Guava InternalFutureFailureAccess and InternalFutures (com.google.guava:failureaccess:1.0.2 - https://github.com/google/guava/failureaccess)
|
||||
* Guava ListenableFuture only (com.google.guava:listenablefuture:9999.0-empty-to-avoid-conflict-with-guava - https://github.com/google/guava/listenablefuture)
|
||||
* Jackson datatype: JSR310 (com.fasterxml.jackson.datatype:jackson-datatype-jsr310:2.16.0 - https://github.com/FasterXML/jackson-modules-java8/jackson-datatype-jsr310)
|
||||
* Jackson module: Old JAXB Annotations (javax.xml.bind) (com.fasterxml.jackson.module:jackson-module-jaxb-annotations:2.17.1 - https://github.com/FasterXML/jackson-modules-base)
|
||||
* Jackson-annotations (com.fasterxml.jackson.core:jackson-annotations:2.16.0 - https://github.com/FasterXML/jackson)
|
||||
* Jackson-core (com.fasterxml.jackson.core:jackson-core:2.16.0 - https://github.com/FasterXML/jackson-core)
|
||||
* jackson-databind (com.fasterxml.jackson.core:jackson-databind:2.15.2 - https://github.com/FasterXML/jackson)
|
||||
* Jackson-JAXRS: base (com.fasterxml.jackson.jaxrs:jackson-jaxrs-base:2.17.1 - https://github.com/FasterXML/jackson-jaxrs-providers/jackson-jaxrs-base)
|
||||
* Jackson-JAXRS: JSON (com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider:2.17.1 - https://github.com/FasterXML/jackson-jaxrs-providers/jackson-jaxrs-json-provider)
|
||||
* JAR JNI Loader (org.questdb:jar-jni:1.1.1 - https://github.com/questdb/rust-maven-plugin)
|
||||
* Lance Core (org.lance:lance-core:2.0.0 - https://lance.org/)
|
||||
|
||||
The MIT License:
|
||||
|
||||
* Checker Qual (org.checkerframework:checker-qual:3.43.0 - https://checkerframework.org/)
|
||||
71
java/lancedb-core/JAVA_THIRD_PARTY_LICENSES.md
Normal file
71
java/lancedb-core/JAVA_THIRD_PARTY_LICENSES.md
Normal file
@@ -0,0 +1,71 @@
|
||||
|
||||
List of third-party dependencies grouped by their license type.
|
||||
|
||||
Apache 2.0:
|
||||
|
||||
* error-prone annotations (com.google.errorprone:error_prone_annotations:2.28.0 - https://errorprone.info/error_prone_annotations)
|
||||
|
||||
Apache License 2.0:
|
||||
|
||||
* JsonNullable Jackson module (org.openapitools:jackson-databind-nullable:0.2.6 - https://github.com/OpenAPITools/jackson-databind-nullable)
|
||||
|
||||
Apache License V2.0:
|
||||
|
||||
* FlatBuffers Java API (com.google.flatbuffers:flatbuffers-java:23.5.26 - https://github.com/google/flatbuffers)
|
||||
|
||||
Apache License, Version 2.0:
|
||||
|
||||
* Apache Commons Codec (commons-codec:commons-codec:1.15 - https://commons.apache.org/proper/commons-codec/)
|
||||
* Apache HttpClient (org.apache.httpcomponents.client5:httpclient5:5.2.1 - https://hc.apache.org/httpcomponents-client-5.0.x/5.2.1/httpclient5/)
|
||||
* Apache HttpComponents Core HTTP/1.1 (org.apache.httpcomponents.core5:httpcore5:5.2 - https://hc.apache.org/httpcomponents-core-5.2.x/5.2/httpcore5/)
|
||||
* Apache HttpComponents Core HTTP/2 (org.apache.httpcomponents.core5:httpcore5-h2:5.2 - https://hc.apache.org/httpcomponents-core-5.2.x/5.2/httpcore5-h2/)
|
||||
* Arrow Format (org.apache.arrow:arrow-format:15.0.0 - https://arrow.apache.org/arrow-format/)
|
||||
* Arrow Java C Data Interface (org.apache.arrow:arrow-c-data:15.0.0 - https://arrow.apache.org/arrow-c-data/)
|
||||
* Arrow Java Dataset (org.apache.arrow:arrow-dataset:15.0.0 - https://arrow.apache.org/arrow-dataset/)
|
||||
* Arrow Memory - Core (org.apache.arrow:arrow-memory-core:15.0.0 - https://arrow.apache.org/arrow-memory/arrow-memory-core/)
|
||||
* Arrow Memory - Netty (org.apache.arrow:arrow-memory-netty:15.0.0 - https://arrow.apache.org/arrow-memory/arrow-memory-netty/)
|
||||
* Arrow Vectors (org.apache.arrow:arrow-vector:15.0.0 - https://arrow.apache.org/arrow-vector/)
|
||||
* Guava: Google Core Libraries for Java (com.google.guava:guava:33.3.1-jre - https://github.com/google/guava)
|
||||
* J2ObjC Annotations (com.google.j2objc:j2objc-annotations:3.0.0 - https://github.com/google/j2objc/)
|
||||
* Netty/Buffer (io.netty:netty-buffer:4.1.104.Final - https://netty.io/netty-buffer/)
|
||||
* Netty/Common (io.netty:netty-common:4.1.104.Final - https://netty.io/netty-common/)
|
||||
|
||||
Apache-2.0:
|
||||
|
||||
* Apache Commons Lang (org.apache.commons:commons-lang3:3.18.0 - https://commons.apache.org/proper/commons-lang/)
|
||||
* lance-namespace-apache-client (org.lance:lance-namespace-apache-client:0.4.5 - https://github.com/openapitools/openapi-generator)
|
||||
* lance-namespace-core (org.lance:lance-namespace-core:0.4.5 - https://lance.org/format/namespace/lance-namespace-core/)
|
||||
|
||||
EDL 1.0:
|
||||
|
||||
* Jakarta Activation API jar (jakarta.activation:jakarta.activation-api:1.2.2 - https://github.com/eclipse-ee4j/jaf/jakarta.activation-api)
|
||||
|
||||
Eclipse Distribution License - v 1.0:
|
||||
|
||||
* Eclipse Collections API (org.eclipse.collections:eclipse-collections-api:11.1.0 - https://github.com/eclipse/eclipse-collections/eclipse-collections-api)
|
||||
* Eclipse Collections Main Library (org.eclipse.collections:eclipse-collections:11.1.0 - https://github.com/eclipse/eclipse-collections/eclipse-collections)
|
||||
* Jakarta XML Binding API (jakarta.xml.bind:jakarta.xml.bind-api:2.3.3 - https://github.com/eclipse-ee4j/jaxb-api/jakarta.xml.bind-api)
|
||||
|
||||
Eclipse Public License - v 1.0:
|
||||
|
||||
* Eclipse Collections API (org.eclipse.collections:eclipse-collections-api:11.1.0 - https://github.com/eclipse/eclipse-collections/eclipse-collections-api)
|
||||
* Eclipse Collections Main Library (org.eclipse.collections:eclipse-collections:11.1.0 - https://github.com/eclipse/eclipse-collections/eclipse-collections)
|
||||
|
||||
The Apache Software License, Version 2.0:
|
||||
|
||||
* FindBugs-jsr305 (com.google.code.findbugs:jsr305:3.0.2 - http://findbugs.sourceforge.net/)
|
||||
* Guava InternalFutureFailureAccess and InternalFutures (com.google.guava:failureaccess:1.0.2 - https://github.com/google/guava/failureaccess)
|
||||
* Guava ListenableFuture only (com.google.guava:listenablefuture:9999.0-empty-to-avoid-conflict-with-guava - https://github.com/google/guava/listenablefuture)
|
||||
* Jackson datatype: JSR310 (com.fasterxml.jackson.datatype:jackson-datatype-jsr310:2.16.0 - https://github.com/FasterXML/jackson-modules-java8/jackson-datatype-jsr310)
|
||||
* Jackson module: Old JAXB Annotations (javax.xml.bind) (com.fasterxml.jackson.module:jackson-module-jaxb-annotations:2.17.1 - https://github.com/FasterXML/jackson-modules-base)
|
||||
* Jackson-annotations (com.fasterxml.jackson.core:jackson-annotations:2.16.0 - https://github.com/FasterXML/jackson)
|
||||
* Jackson-core (com.fasterxml.jackson.core:jackson-core:2.16.0 - https://github.com/FasterXML/jackson-core)
|
||||
* jackson-databind (com.fasterxml.jackson.core:jackson-databind:2.15.2 - https://github.com/FasterXML/jackson)
|
||||
* Jackson-JAXRS: base (com.fasterxml.jackson.jaxrs:jackson-jaxrs-base:2.17.1 - https://github.com/FasterXML/jackson-jaxrs-providers/jackson-jaxrs-base)
|
||||
* Jackson-JAXRS: JSON (com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider:2.17.1 - https://github.com/FasterXML/jackson-jaxrs-providers/jackson-jaxrs-json-provider)
|
||||
* JAR JNI Loader (org.questdb:jar-jni:1.1.1 - https://github.com/questdb/rust-maven-plugin)
|
||||
* Lance Core (org.lance:lance-core:2.0.0 - https://lance.org/)
|
||||
|
||||
The MIT License:
|
||||
|
||||
* Checker Qual (org.checkerframework:checker-qual:3.43.0 - https://checkerframework.org/)
|
||||
@@ -8,7 +8,7 @@
|
||||
<parent>
|
||||
<groupId>com.lancedb</groupId>
|
||||
<artifactId>lancedb-parent</artifactId>
|
||||
<version>0.24.0-final.0</version>
|
||||
<version>0.26.2-final.0</version>
|
||||
<relativePath>../pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
||||
20
java/pom.xml
20
java/pom.xml
@@ -6,7 +6,7 @@
|
||||
|
||||
<groupId>com.lancedb</groupId>
|
||||
<artifactId>lancedb-parent</artifactId>
|
||||
<version>0.24.0-final.0</version>
|
||||
<version>0.26.2-final.0</version>
|
||||
<packaging>pom</packaging>
|
||||
<name>${project.artifactId}</name>
|
||||
<description>LanceDB Java SDK Parent POM</description>
|
||||
@@ -28,7 +28,7 @@
|
||||
<properties>
|
||||
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
|
||||
<arrow.version>15.0.0</arrow.version>
|
||||
<lance-core.version>1.0.0-rc.2</lance-core.version>
|
||||
<lance-core.version>2.0.0</lance-core.version>
|
||||
<spotless.skip>false</spotless.skip>
|
||||
<spotless.version>2.30.0</spotless.version>
|
||||
<spotless.java.googlejavaformat.version>1.7</spotless.java.googlejavaformat.version>
|
||||
@@ -160,6 +160,19 @@
|
||||
<groupId>com.diffplug.spotless</groupId>
|
||||
<artifactId>spotless-maven-plugin</artifactId>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.codehaus.mojo</groupId>
|
||||
<artifactId>license-maven-plugin</artifactId>
|
||||
<version>2.4.0</version>
|
||||
<configuration>
|
||||
<outputDirectory>${project.basedir}</outputDirectory>
|
||||
<thirdPartyFilename>JAVA_THIRD_PARTY_LICENSES.md</thirdPartyFilename>
|
||||
<fileTemplate>/org/codehaus/mojo/license/third-party-file-groupByLicense.ftl</fileTemplate>
|
||||
<includedScopes>compile,runtime</includedScopes>
|
||||
<excludedScopes>test,provided</excludedScopes>
|
||||
<sortArtifactByName>true</sortArtifactByName>
|
||||
</configuration>
|
||||
</plugin>
|
||||
</plugins>
|
||||
<pluginManagement>
|
||||
<plugins>
|
||||
@@ -292,11 +305,12 @@
|
||||
<plugin>
|
||||
<groupId>org.sonatype.central</groupId>
|
||||
<artifactId>central-publishing-maven-plugin</artifactId>
|
||||
<version>0.4.0</version>
|
||||
<version>0.8.0</version>
|
||||
<extensions>true</extensions>
|
||||
<configuration>
|
||||
<publishingServerId>ossrh</publishingServerId>
|
||||
<tokenAuth>true</tokenAuth>
|
||||
<autoPublish>true</autoPublish>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "lancedb-nodejs"
|
||||
edition.workspace = true
|
||||
version = "0.24.0"
|
||||
version = "0.26.2"
|
||||
license.workspace = true
|
||||
description.workspace = true
|
||||
repository.workspace = true
|
||||
|
||||
668
nodejs/NODEJS_THIRD_PARTY_LICENSES.md
Normal file
668
nodejs/NODEJS_THIRD_PARTY_LICENSES.md
Normal file
@@ -0,0 +1,668 @@
|
||||
[@75lb/deep-merge@1.1.2](https://github.com/75lb/deep-merge) - MIT
|
||||
[@aashutoshrathi/word-wrap@1.2.6](https://github.com/aashutoshrathi/word-wrap) - MIT
|
||||
[@ampproject/remapping@2.2.1](https://github.com/ampproject/remapping) - Apache-2.0
|
||||
[@aws-crypto/crc32@3.0.0](https://github.com/aws/aws-sdk-js-crypto-helpers) - Apache-2.0
|
||||
[@aws-crypto/crc32c@3.0.0](https://github.com/aws/aws-sdk-js-crypto-helpers) - Apache-2.0
|
||||
[@aws-crypto/ie11-detection@3.0.0](https://github.com/aws/aws-sdk-js-crypto-helpers) - Apache-2.0
|
||||
[@aws-crypto/sha1-browser@3.0.0](https://github.com/aws/aws-sdk-js-crypto-helpers) - Apache-2.0
|
||||
[@aws-crypto/sha256-browser@3.0.0](https://github.com/aws/aws-sdk-js-crypto-helpers) - Apache-2.0
|
||||
[@aws-crypto/sha256-browser@5.2.0](https://github.com/aws/aws-sdk-js-crypto-helpers) - Apache-2.0
|
||||
[@aws-crypto/sha256-js@3.0.0](https://github.com/aws/aws-sdk-js-crypto-helpers) - Apache-2.0
|
||||
[@aws-crypto/sha256-js@5.2.0](https://github.com/aws/aws-sdk-js-crypto-helpers) - Apache-2.0
|
||||
[@aws-crypto/supports-web-crypto@3.0.0](https://github.com/aws/aws-sdk-js-crypto-helpers) - Apache-2.0
|
||||
[@aws-crypto/supports-web-crypto@5.2.0](https://github.com/aws/aws-sdk-js-crypto-helpers) - Apache-2.0
|
||||
[@aws-crypto/util@3.0.0](https://github.com/aws/aws-sdk-js-crypto-helpers) - Apache-2.0
|
||||
[@aws-crypto/util@5.2.0](https://github.com/aws/aws-sdk-js-crypto-helpers) - Apache-2.0
|
||||
[@aws-sdk/client-dynamodb@3.602.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/client-kms@3.549.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/client-s3@3.550.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/client-sso-oidc@3.549.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/client-sso-oidc@3.600.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/client-sso@3.549.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/client-sso@3.598.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/client-sts@3.549.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/client-sts@3.600.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/core@3.549.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/core@3.598.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/credential-provider-env@3.535.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/credential-provider-env@3.598.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/credential-provider-http@3.535.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/credential-provider-http@3.598.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/credential-provider-ini@3.549.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/credential-provider-ini@3.598.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/credential-provider-node@3.549.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/credential-provider-node@3.600.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/credential-provider-process@3.535.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/credential-provider-process@3.598.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/credential-provider-sso@3.549.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/credential-provider-sso@3.598.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/credential-provider-web-identity@3.549.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/credential-provider-web-identity@3.598.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/endpoint-cache@3.572.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/middleware-bucket-endpoint@3.535.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/middleware-endpoint-discovery@3.598.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/middleware-expect-continue@3.535.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/middleware-flexible-checksums@3.535.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/middleware-host-header@3.535.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/middleware-host-header@3.598.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/middleware-location-constraint@3.535.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/middleware-logger@3.535.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/middleware-logger@3.598.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/middleware-recursion-detection@3.535.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/middleware-recursion-detection@3.598.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/middleware-sdk-s3@3.535.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/middleware-signing@3.535.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/middleware-ssec@3.537.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/middleware-user-agent@3.540.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/middleware-user-agent@3.598.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/region-config-resolver@3.535.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/region-config-resolver@3.598.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/signature-v4-multi-region@3.535.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/token-providers@3.549.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/token-providers@3.598.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/types@3.535.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/types@3.598.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/util-arn-parser@3.535.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/util-endpoints@3.540.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/util-endpoints@3.598.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/util-locate-window@3.535.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/util-user-agent-browser@3.535.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/util-user-agent-browser@3.598.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/util-user-agent-node@3.535.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/util-user-agent-node@3.598.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/util-utf8-browser@3.259.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@aws-sdk/xml-builder@3.535.0](https://github.com/aws/aws-sdk-js-v3) - Apache-2.0
|
||||
[@babel/code-frame@7.26.2](https://github.com/babel/babel) - MIT
|
||||
[@babel/compat-data@7.23.5](https://github.com/babel/babel) - MIT
|
||||
[@babel/core@7.23.7](https://github.com/babel/babel) - MIT
|
||||
[@babel/generator@7.23.6](https://github.com/babel/babel) - MIT
|
||||
[@babel/helper-compilation-targets@7.23.6](https://github.com/babel/babel) - MIT
|
||||
[@babel/helper-environment-visitor@7.22.20](https://github.com/babel/babel) - MIT
|
||||
[@babel/helper-function-name@7.23.0](https://github.com/babel/babel) - MIT
|
||||
[@babel/helper-hoist-variables@7.22.5](https://github.com/babel/babel) - MIT
|
||||
[@babel/helper-module-imports@7.22.15](https://github.com/babel/babel) - MIT
|
||||
[@babel/helper-module-transforms@7.23.3](https://github.com/babel/babel) - MIT
|
||||
[@babel/helper-plugin-utils@7.22.5](https://github.com/babel/babel) - MIT
|
||||
[@babel/helper-simple-access@7.22.5](https://github.com/babel/babel) - MIT
|
||||
[@babel/helper-split-export-declaration@7.22.6](https://github.com/babel/babel) - MIT
|
||||
[@babel/helper-string-parser@7.25.9](https://github.com/babel/babel) - MIT
|
||||
[@babel/helper-validator-identifier@7.25.9](https://github.com/babel/babel) - MIT
|
||||
[@babel/helper-validator-option@7.23.5](https://github.com/babel/babel) - MIT
|
||||
[@babel/helpers@7.27.0](https://github.com/babel/babel) - MIT
|
||||
[@babel/parser@7.27.0](https://github.com/babel/babel) - MIT
|
||||
[@babel/plugin-syntax-async-generators@7.8.4](https://github.com/babel/babel/tree/master/packages/babel-plugin-syntax-async-generators) - MIT
|
||||
[@babel/plugin-syntax-bigint@7.8.3](https://github.com/babel/babel/tree/master/packages/babel-plugin-syntax-bigint) - MIT
|
||||
[@babel/plugin-syntax-class-properties@7.12.13](https://github.com/babel/babel) - MIT
|
||||
[@babel/plugin-syntax-import-meta@7.10.4](https://github.com/babel/babel) - MIT
|
||||
[@babel/plugin-syntax-json-strings@7.8.3](https://github.com/babel/babel/tree/master/packages/babel-plugin-syntax-json-strings) - MIT
|
||||
[@babel/plugin-syntax-jsx@7.23.3](https://github.com/babel/babel) - MIT
|
||||
[@babel/plugin-syntax-logical-assignment-operators@7.10.4](https://github.com/babel/babel) - MIT
|
||||
[@babel/plugin-syntax-nullish-coalescing-operator@7.8.3](https://github.com/babel/babel/tree/master/packages/babel-plugin-syntax-nullish-coalescing-operator) - MIT
|
||||
[@babel/plugin-syntax-numeric-separator@7.10.4](https://github.com/babel/babel) - MIT
|
||||
[@babel/plugin-syntax-object-rest-spread@7.8.3](https://github.com/babel/babel/tree/master/packages/babel-plugin-syntax-object-rest-spread) - MIT
|
||||
[@babel/plugin-syntax-optional-catch-binding@7.8.3](https://github.com/babel/babel/tree/master/packages/babel-plugin-syntax-optional-catch-binding) - MIT
|
||||
[@babel/plugin-syntax-optional-chaining@7.8.3](https://github.com/babel/babel/tree/master/packages/babel-plugin-syntax-optional-chaining) - MIT
|
||||
[@babel/plugin-syntax-top-level-await@7.14.5](https://github.com/babel/babel) - MIT
|
||||
[@babel/plugin-syntax-typescript@7.23.3](https://github.com/babel/babel) - MIT
|
||||
[@babel/template@7.27.0](https://github.com/babel/babel) - MIT
|
||||
[@babel/traverse@7.23.7](https://github.com/babel/babel) - MIT
|
||||
[@babel/types@7.27.0](https://github.com/babel/babel) - MIT
|
||||
[@bcoe/v8-coverage@0.2.3](https://github.com/demurgos/v8-coverage) - MIT
|
||||
[@biomejs/biome@1.8.3](https://github.com/biomejs/biome) - MIT OR Apache-2.0
|
||||
[@biomejs/cli-darwin-arm64@1.8.3](https://github.com/biomejs/biome) - MIT OR Apache-2.0
|
||||
[@eslint-community/eslint-utils@4.4.0](https://github.com/eslint-community/eslint-utils) - MIT
|
||||
[@eslint-community/regexpp@4.10.0](https://github.com/eslint-community/regexpp) - MIT
|
||||
[@eslint/eslintrc@2.1.4](https://github.com/eslint/eslintrc) - MIT
|
||||
[@eslint/js@8.57.0](https://github.com/eslint/eslint) - MIT
|
||||
[@huggingface/jinja@0.3.2](https://github.com/huggingface/huggingface.js) - MIT
|
||||
[@huggingface/transformers@3.0.2](https://github.com/huggingface/transformers.js) - Apache-2.0
|
||||
[@humanwhocodes/config-array@0.11.14](https://github.com/humanwhocodes/config-array) - Apache-2.0
|
||||
[@humanwhocodes/module-importer@1.0.1](https://github.com/humanwhocodes/module-importer) - Apache-2.0
|
||||
[@humanwhocodes/object-schema@2.0.2](https://github.com/humanwhocodes/object-schema) - BSD-3-Clause
|
||||
[@img/sharp-darwin-arm64@0.33.5](https://github.com/lovell/sharp) - Apache-2.0
|
||||
[@img/sharp-libvips-darwin-arm64@1.0.4](https://github.com/lovell/sharp-libvips) - LGPL-3.0-or-later
|
||||
[@isaacs/cliui@8.0.2](https://github.com/yargs/cliui) - ISC
|
||||
[@isaacs/fs-minipass@4.0.1](https://github.com/npm/fs-minipass) - ISC
|
||||
[@istanbuljs/load-nyc-config@1.1.0](https://github.com/istanbuljs/load-nyc-config) - ISC
|
||||
[@istanbuljs/schema@0.1.3](https://github.com/istanbuljs/schema) - MIT
|
||||
[@jest/console@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||
[@jest/core@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||
[@jest/environment@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||
[@jest/expect-utils@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||
[@jest/expect@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||
[@jest/fake-timers@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||
[@jest/globals@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||
[@jest/reporters@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||
[@jest/schemas@29.6.3](https://github.com/jestjs/jest) - MIT
|
||||
[@jest/source-map@29.6.3](https://github.com/jestjs/jest) - MIT
|
||||
[@jest/test-result@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||
[@jest/test-sequencer@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||
[@jest/transform@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||
[@jest/types@29.6.3](https://github.com/jestjs/jest) - MIT
|
||||
[@jridgewell/gen-mapping@0.3.3](https://github.com/jridgewell/gen-mapping) - MIT
|
||||
[@jridgewell/resolve-uri@3.1.1](https://github.com/jridgewell/resolve-uri) - MIT
|
||||
[@jridgewell/set-array@1.1.2](https://github.com/jridgewell/set-array) - MIT
|
||||
[@jridgewell/sourcemap-codec@1.4.15](https://github.com/jridgewell/sourcemap-codec) - MIT
|
||||
[@jridgewell/trace-mapping@0.3.22](https://github.com/jridgewell/trace-mapping) - MIT
|
||||
[@lancedb/lancedb@0.26.2](https://github.com/lancedb/lancedb) - Apache-2.0
|
||||
[@napi-rs/cli@2.18.3](https://github.com/napi-rs/napi-rs) - MIT
|
||||
[@nodelib/fs.scandir@2.1.5](https://github.com/nodelib/nodelib/tree/master/packages/fs/fs.scandir) - MIT
|
||||
[@nodelib/fs.stat@2.0.5](https://github.com/nodelib/nodelib/tree/master/packages/fs/fs.stat) - MIT
|
||||
[@nodelib/fs.walk@1.2.8](https://github.com/nodelib/nodelib/tree/master/packages/fs/fs.walk) - MIT
|
||||
[@pkgjs/parseargs@0.11.0](https://github.com/pkgjs/parseargs) - MIT
|
||||
[@protobufjs/aspromise@1.1.2](https://github.com/dcodeIO/protobuf.js) - BSD-3-Clause
|
||||
[@protobufjs/base64@1.1.2](https://github.com/dcodeIO/protobuf.js) - BSD-3-Clause
|
||||
[@protobufjs/codegen@2.0.4](https://github.com/dcodeIO/protobuf.js) - BSD-3-Clause
|
||||
[@protobufjs/eventemitter@1.1.0](https://github.com/dcodeIO/protobuf.js) - BSD-3-Clause
|
||||
[@protobufjs/fetch@1.1.0](https://github.com/dcodeIO/protobuf.js) - BSD-3-Clause
|
||||
[@protobufjs/float@1.0.2](https://github.com/dcodeIO/protobuf.js) - BSD-3-Clause
|
||||
[@protobufjs/inquire@1.1.0](https://github.com/dcodeIO/protobuf.js) - BSD-3-Clause
|
||||
[@protobufjs/path@1.1.2](https://github.com/dcodeIO/protobuf.js) - BSD-3-Clause
|
||||
[@protobufjs/pool@1.1.0](https://github.com/dcodeIO/protobuf.js) - BSD-3-Clause
|
||||
[@protobufjs/utf8@1.1.0](https://github.com/dcodeIO/protobuf.js) - BSD-3-Clause
|
||||
[@shikijs/core@1.10.3](https://github.com/shikijs/shiki) - MIT
|
||||
[@sinclair/typebox@0.27.8](https://github.com/sinclairzx81/typebox) - MIT
|
||||
[@sinonjs/commons@3.0.1](https://github.com/sinonjs/commons) - BSD-3-Clause
|
||||
[@sinonjs/fake-timers@10.3.0](https://github.com/sinonjs/fake-timers) - BSD-3-Clause
|
||||
[@smithy/abort-controller@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/abort-controller@3.1.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/chunked-blob-reader-native@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/chunked-blob-reader@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/config-resolver@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/config-resolver@3.0.3](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/core@1.4.2](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/core@2.2.3](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/credential-provider-imds@2.3.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/credential-provider-imds@3.1.2](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/eventstream-codec@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/eventstream-serde-browser@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/eventstream-serde-config-resolver@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/eventstream-serde-node@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/eventstream-serde-universal@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/fetch-http-handler@2.5.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/fetch-http-handler@3.1.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/hash-blob-browser@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/hash-node@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/hash-node@3.0.2](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/hash-stream-node@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/invalid-dependency@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/invalid-dependency@3.0.2](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/is-array-buffer@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/is-array-buffer@3.0.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/md5-js@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/middleware-content-length@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/middleware-content-length@3.0.2](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/middleware-endpoint@2.5.1](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/middleware-endpoint@3.0.3](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/middleware-retry@2.3.1](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/middleware-retry@3.0.6](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/middleware-serde@2.3.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/middleware-serde@3.0.2](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/middleware-stack@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/middleware-stack@3.0.2](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/node-config-provider@2.3.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/node-config-provider@3.1.2](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/node-http-handler@2.5.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/node-http-handler@3.1.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/property-provider@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/property-provider@3.1.2](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/protocol-http@3.3.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/protocol-http@4.0.2](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/querystring-builder@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/querystring-builder@3.0.2](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/querystring-parser@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/querystring-parser@3.0.2](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/service-error-classification@2.1.5](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/service-error-classification@3.0.2](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/shared-ini-file-loader@2.4.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/shared-ini-file-loader@3.1.2](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/signature-v4@2.2.1](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/signature-v4@3.1.1](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/smithy-client@2.5.1](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/smithy-client@3.1.4](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/types@2.12.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/types@3.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/url-parser@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/url-parser@3.0.2](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/util-base64@2.3.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/util-base64@3.0.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/util-body-length-browser@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/util-body-length-browser@3.0.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/util-body-length-node@2.3.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/util-body-length-node@3.0.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/util-buffer-from@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/util-buffer-from@3.0.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/util-config-provider@2.3.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/util-config-provider@3.0.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/util-defaults-mode-browser@2.2.1](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/util-defaults-mode-browser@3.0.6](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/util-defaults-mode-node@2.3.1](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/util-defaults-mode-node@3.0.6](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/util-endpoints@1.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/util-endpoints@2.0.3](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/util-hex-encoding@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/util-hex-encoding@3.0.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/util-middleware@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/util-middleware@3.0.2](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/util-retry@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/util-retry@3.0.2](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/util-stream@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/util-stream@3.0.4](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/util-uri-escape@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/util-uri-escape@3.0.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/util-utf8@2.3.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/util-utf8@3.0.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/util-waiter@2.2.0](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@smithy/util-waiter@3.1.1](https://github.com/awslabs/smithy-typescript) - Apache-2.0
|
||||
[@swc/helpers@0.5.12](https://github.com/swc-project/swc) - Apache-2.0
|
||||
[@types/axios@0.14.0](https://github.com/mzabriskie/axios) - MIT
|
||||
[@types/babel__core@7.20.5](https://github.com/DefinitelyTyped/DefinitelyTyped) - MIT
|
||||
[@types/babel__generator@7.6.8](https://github.com/DefinitelyTyped/DefinitelyTyped) - MIT
|
||||
[@types/babel__template@7.4.4](https://github.com/DefinitelyTyped/DefinitelyTyped) - MIT
|
||||
[@types/babel__traverse@7.20.5](https://github.com/DefinitelyTyped/DefinitelyTyped) - MIT
|
||||
[@types/command-line-args@5.2.3](https://github.com/DefinitelyTyped/DefinitelyTyped) - MIT
|
||||
[@types/command-line-usage@5.0.2](https://github.com/DefinitelyTyped/DefinitelyTyped) - MIT
|
||||
[@types/command-line-usage@5.0.4](https://github.com/DefinitelyTyped/DefinitelyTyped) - MIT
|
||||
[@types/graceful-fs@4.1.9](https://github.com/DefinitelyTyped/DefinitelyTyped) - MIT
|
||||
[@types/hast@3.0.4](https://github.com/DefinitelyTyped/DefinitelyTyped) - MIT
|
||||
[@types/istanbul-lib-coverage@2.0.6](https://github.com/DefinitelyTyped/DefinitelyTyped) - MIT
|
||||
[@types/istanbul-lib-report@3.0.3](https://github.com/DefinitelyTyped/DefinitelyTyped) - MIT
|
||||
[@types/istanbul-reports@3.0.4](https://github.com/DefinitelyTyped/DefinitelyTyped) - MIT
|
||||
[@types/jest@29.5.12](https://github.com/DefinitelyTyped/DefinitelyTyped) - MIT
|
||||
[@types/json-schema@7.0.15](https://github.com/DefinitelyTyped/DefinitelyTyped) - MIT
|
||||
[@types/node-fetch@2.6.11](https://github.com/DefinitelyTyped/DefinitelyTyped) - MIT
|
||||
[@types/node@18.19.26](https://github.com/DefinitelyTyped/DefinitelyTyped) - MIT
|
||||
[@types/node@20.16.10](https://github.com/DefinitelyTyped/DefinitelyTyped) - MIT
|
||||
[@types/node@20.17.9](https://github.com/DefinitelyTyped/DefinitelyTyped) - MIT
|
||||
[@types/node@22.7.4](https://github.com/DefinitelyTyped/DefinitelyTyped) - MIT
|
||||
[@types/semver@7.5.6](https://github.com/DefinitelyTyped/DefinitelyTyped) - MIT
|
||||
[@types/stack-utils@2.0.3](https://github.com/DefinitelyTyped/DefinitelyTyped) - MIT
|
||||
[@types/tmp@0.2.6](https://github.com/DefinitelyTyped/DefinitelyTyped) - MIT
|
||||
[@types/unist@3.0.2](https://github.com/DefinitelyTyped/DefinitelyTyped) - MIT
|
||||
[@types/yargs-parser@21.0.3](https://github.com/DefinitelyTyped/DefinitelyTyped) - MIT
|
||||
[@types/yargs@17.0.32](https://github.com/DefinitelyTyped/DefinitelyTyped) - MIT
|
||||
[@typescript-eslint/eslint-plugin@7.1.0](https://github.com/typescript-eslint/typescript-eslint) - MIT
|
||||
[@typescript-eslint/parser@7.1.0](https://github.com/typescript-eslint/typescript-eslint) - BSD-2-Clause
|
||||
[@typescript-eslint/scope-manager@7.1.0](https://github.com/typescript-eslint/typescript-eslint) - MIT
|
||||
[@typescript-eslint/type-utils@7.1.0](https://github.com/typescript-eslint/typescript-eslint) - MIT
|
||||
[@typescript-eslint/types@7.1.0](https://github.com/typescript-eslint/typescript-eslint) - MIT
|
||||
[@typescript-eslint/typescript-estree@7.1.0](https://github.com/typescript-eslint/typescript-eslint) - BSD-2-Clause
|
||||
[@typescript-eslint/utils@7.1.0](https://github.com/typescript-eslint/typescript-eslint) - MIT
|
||||
[@typescript-eslint/visitor-keys@7.1.0](https://github.com/typescript-eslint/typescript-eslint) - MIT
|
||||
[@ungap/structured-clone@1.2.0](https://github.com/ungap/structured-clone) - ISC
|
||||
[abort-controller@3.0.0](https://github.com/mysticatea/abort-controller) - MIT
|
||||
[acorn-jsx@5.3.2](https://github.com/acornjs/acorn-jsx) - MIT
|
||||
[acorn@8.11.3](https://github.com/acornjs/acorn) - MIT
|
||||
[agentkeepalive@4.5.0](https://github.com/node-modules/agentkeepalive) - MIT
|
||||
[ajv@6.12.6](https://github.com/ajv-validator/ajv) - MIT
|
||||
[ansi-escapes@4.3.2](https://github.com/sindresorhus/ansi-escapes) - MIT
|
||||
[ansi-regex@5.0.1](https://github.com/chalk/ansi-regex) - MIT
|
||||
[ansi-regex@6.1.0](https://github.com/chalk/ansi-regex) - MIT
|
||||
[ansi-styles@4.3.0](https://github.com/chalk/ansi-styles) - MIT
|
||||
[ansi-styles@5.2.0](https://github.com/chalk/ansi-styles) - MIT
|
||||
[ansi-styles@6.2.1](https://github.com/chalk/ansi-styles) - MIT
|
||||
[anymatch@3.1.3](https://github.com/micromatch/anymatch) - ISC
|
||||
[apache-arrow@15.0.0](https://github.com/apache/arrow) - Apache-2.0
|
||||
[apache-arrow@16.0.0](https://github.com/apache/arrow) - Apache-2.0
|
||||
[apache-arrow@17.0.0](https://github.com/apache/arrow) - Apache-2.0
|
||||
[apache-arrow@18.0.0](https://github.com/apache/arrow) - Apache-2.0
|
||||
[argparse@1.0.10](https://github.com/nodeca/argparse) - MIT
|
||||
[argparse@2.0.1](https://github.com/nodeca/argparse) - Python-2.0
|
||||
[array-back@3.1.0](https://github.com/75lb/array-back) - MIT
|
||||
[array-back@6.2.2](https://github.com/75lb/array-back) - MIT
|
||||
[array-union@2.1.0](https://github.com/sindresorhus/array-union) - MIT
|
||||
[asynckit@0.4.0](https://github.com/alexindigo/asynckit) - MIT
|
||||
[axios@1.8.4](https://github.com/axios/axios) - MIT
|
||||
[babel-jest@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||
[babel-plugin-istanbul@6.1.1](https://github.com/istanbuljs/babel-plugin-istanbul) - BSD-3-Clause
|
||||
[babel-plugin-jest-hoist@29.6.3](https://github.com/jestjs/jest) - MIT
|
||||
[babel-preset-current-node-syntax@1.0.1](https://github.com/nicolo-ribaudo/babel-preset-current-node-syntax) - MIT
|
||||
[babel-preset-jest@29.6.3](https://github.com/jestjs/jest) - MIT
|
||||
[balanced-match@1.0.2](https://github.com/juliangruber/balanced-match) - MIT
|
||||
[base-64@0.1.0](https://github.com/mathiasbynens/base64) - MIT
|
||||
[bowser@2.11.0](https://github.com/lancedikson/bowser) - MIT
|
||||
[brace-expansion@1.1.11](https://github.com/juliangruber/brace-expansion) - MIT
|
||||
[brace-expansion@2.0.1](https://github.com/juliangruber/brace-expansion) - MIT
|
||||
[braces@3.0.3](https://github.com/micromatch/braces) - MIT
|
||||
[browserslist@4.22.2](https://github.com/browserslist/browserslist) - MIT
|
||||
[bs-logger@0.2.6](https://github.com/huafu/bs-logger) - MIT
|
||||
[bser@2.1.1](https://github.com/facebook/watchman) - Apache-2.0
|
||||
[buffer-from@1.1.2](https://github.com/LinusU/buffer-from) - MIT
|
||||
[callsites@3.1.0](https://github.com/sindresorhus/callsites) - MIT
|
||||
[camelcase@5.3.1](https://github.com/sindresorhus/camelcase) - MIT
|
||||
[camelcase@6.3.0](https://github.com/sindresorhus/camelcase) - MIT
|
||||
[caniuse-lite@1.0.30001579](https://github.com/browserslist/caniuse-lite) - CC-BY-4.0
|
||||
[chalk-template@0.4.0](https://github.com/chalk/chalk-template) - MIT
|
||||
[chalk@4.1.2](https://github.com/chalk/chalk) - MIT
|
||||
[char-regex@1.0.2](https://github.com/Richienb/char-regex) - MIT
|
||||
[charenc@0.0.2](https://github.com/pvorb/node-charenc) - BSD-3-Clause
|
||||
[chownr@3.0.0](https://github.com/isaacs/chownr) - BlueOak-1.0.0
|
||||
[ci-info@3.9.0](https://github.com/watson/ci-info) - MIT
|
||||
[cjs-module-lexer@1.2.3](https://github.com/nodejs/cjs-module-lexer) - MIT
|
||||
[cliui@8.0.1](https://github.com/yargs/cliui) - ISC
|
||||
[co@4.6.0](https://github.com/tj/co) - MIT
|
||||
[collect-v8-coverage@1.0.2](https://github.com/SimenB/collect-v8-coverage) - MIT
|
||||
[color-convert@2.0.1](https://github.com/Qix-/color-convert) - MIT
|
||||
[color-name@1.1.4](https://github.com/colorjs/color-name) - MIT
|
||||
[color-string@1.9.1](https://github.com/Qix-/color-string) - MIT
|
||||
[color@4.2.3](https://github.com/Qix-/color) - MIT
|
||||
[combined-stream@1.0.8](https://github.com/felixge/node-combined-stream) - MIT
|
||||
[command-line-args@5.2.1](https://github.com/75lb/command-line-args) - MIT
|
||||
[command-line-usage@7.0.1](https://github.com/75lb/command-line-usage) - MIT
|
||||
[concat-map@0.0.1](https://github.com/substack/node-concat-map) - MIT
|
||||
[convert-source-map@2.0.0](https://github.com/thlorenz/convert-source-map) - MIT
|
||||
[create-jest@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||
[cross-spawn@7.0.6](https://github.com/moxystudio/node-cross-spawn) - MIT
|
||||
[crypt@0.0.2](https://github.com/pvorb/node-crypt) - BSD-3-Clause
|
||||
[debug@4.3.4](https://github.com/debug-js/debug) - MIT
|
||||
[dedent@1.5.1](https://github.com/dmnd/dedent) - MIT
|
||||
[deep-is@0.1.4](https://github.com/thlorenz/deep-is) - MIT
|
||||
[deepmerge@4.3.1](https://github.com/TehShrike/deepmerge) - MIT
|
||||
[delayed-stream@1.0.0](https://github.com/felixge/node-delayed-stream) - MIT
|
||||
[detect-libc@2.0.3](https://github.com/lovell/detect-libc) - Apache-2.0
|
||||
[detect-newline@3.1.0](https://github.com/sindresorhus/detect-newline) - MIT
|
||||
[diff-sequences@29.6.3](https://github.com/jestjs/jest) - MIT
|
||||
[digest-fetch@1.3.0](https://github.com/devfans/digest-fetch) - ISC
|
||||
[dir-glob@3.0.1](https://github.com/kevva/dir-glob) - MIT
|
||||
[doctrine@3.0.0](https://github.com/eslint/doctrine) - Apache-2.0
|
||||
[eastasianwidth@0.2.0](https://github.com/komagata/eastasianwidth) - MIT
|
||||
[electron-to-chromium@1.4.642](https://github.com/kilian/electron-to-chromium) - ISC
|
||||
[emittery@0.13.1](https://github.com/sindresorhus/emittery) - MIT
|
||||
[emoji-regex@8.0.0](https://github.com/mathiasbynens/emoji-regex) - MIT
|
||||
[emoji-regex@9.2.2](https://github.com/mathiasbynens/emoji-regex) - MIT
|
||||
[entities@4.5.0](https://github.com/fb55/entities) - BSD-2-Clause
|
||||
[error-ex@1.3.2](https://github.com/qix-/node-error-ex) - MIT
|
||||
[escalade@3.1.1](https://github.com/lukeed/escalade) - MIT
|
||||
[escape-string-regexp@2.0.0](https://github.com/sindresorhus/escape-string-regexp) - MIT
|
||||
[escape-string-regexp@4.0.0](https://github.com/sindresorhus/escape-string-regexp) - MIT
|
||||
[eslint-scope@7.2.2](https://github.com/eslint/eslint-scope) - BSD-2-Clause
|
||||
[eslint-visitor-keys@3.4.3](https://github.com/eslint/eslint-visitor-keys) - Apache-2.0
|
||||
[eslint@8.57.0](https://github.com/eslint/eslint) - MIT
|
||||
[espree@9.6.1](https://github.com/eslint/espree) - BSD-2-Clause
|
||||
[esprima@4.0.1](https://github.com/jquery/esprima) - BSD-2-Clause
|
||||
[esquery@1.5.0](https://github.com/estools/esquery) - BSD-3-Clause
|
||||
[esrecurse@4.3.0](https://github.com/estools/esrecurse) - BSD-2-Clause
|
||||
[estraverse@5.3.0](https://github.com/estools/estraverse) - BSD-2-Clause
|
||||
[esutils@2.0.3](https://github.com/estools/esutils) - BSD-2-Clause
|
||||
[event-target-shim@5.0.1](https://github.com/mysticatea/event-target-shim) - MIT
|
||||
[execa@5.1.1](https://github.com/sindresorhus/execa) - MIT
|
||||
[exit@0.1.2](https://github.com/cowboy/node-exit) - MIT
|
||||
[expect@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||
[fast-deep-equal@3.1.3](https://github.com/epoberezkin/fast-deep-equal) - MIT
|
||||
[fast-glob@3.3.2](https://github.com/mrmlnc/fast-glob) - MIT
|
||||
[fast-json-stable-stringify@2.1.0](https://github.com/epoberezkin/fast-json-stable-stringify) - MIT
|
||||
[fast-levenshtein@2.0.6](https://github.com/hiddentao/fast-levenshtein) - MIT
|
||||
[fast-xml-parser@4.2.5](https://github.com/NaturalIntelligence/fast-xml-parser) - MIT
|
||||
[fastq@1.16.0](https://github.com/mcollina/fastq) - ISC
|
||||
[fb-watchman@2.0.2](https://github.com/facebook/watchman) - Apache-2.0
|
||||
[file-entry-cache@6.0.1](https://github.com/royriojas/file-entry-cache) - MIT
|
||||
[fill-range@7.1.1](https://github.com/jonschlinkert/fill-range) - MIT
|
||||
[find-replace@3.0.0](https://github.com/75lb/find-replace) - MIT
|
||||
[find-up@4.1.0](https://github.com/sindresorhus/find-up) - MIT
|
||||
[find-up@5.0.0](https://github.com/sindresorhus/find-up) - MIT
|
||||
[flat-cache@3.2.0](https://github.com/jaredwray/flat-cache) - MIT
|
||||
[flatbuffers@1.12.0](https://github.com/google/flatbuffers) - Apache*
|
||||
[flatbuffers@23.5.26](https://github.com/google/flatbuffers) - Apache*
|
||||
[flatbuffers@24.3.25](https://github.com/google/flatbuffers) - Apache-2.0
|
||||
[flatted@3.2.9](https://github.com/WebReflection/flatted) - ISC
|
||||
[follow-redirects@1.15.6](https://github.com/follow-redirects/follow-redirects) - MIT
|
||||
[foreground-child@3.3.0](https://github.com/tapjs/foreground-child) - ISC
|
||||
[form-data-encoder@1.7.2](https://github.com/octet-stream/form-data-encoder) - MIT
|
||||
[form-data@4.0.0](https://github.com/form-data/form-data) - MIT
|
||||
[formdata-node@4.4.1](https://github.com/octet-stream/form-data) - MIT
|
||||
[fs.realpath@1.0.0](https://github.com/isaacs/fs.realpath) - ISC
|
||||
[fsevents@2.3.3](https://github.com/fsevents/fsevents) - MIT
|
||||
[function-bind@1.1.2](https://github.com/Raynos/function-bind) - MIT
|
||||
[gensync@1.0.0-beta.2](https://github.com/loganfsmyth/gensync) - MIT
|
||||
[get-caller-file@2.0.5](https://github.com/stefanpenner/get-caller-file) - ISC
|
||||
[get-package-type@0.1.0](https://github.com/cfware/get-package-type) - MIT
|
||||
[get-stream@6.0.1](https://github.com/sindresorhus/get-stream) - MIT
|
||||
[glob-parent@5.1.2](https://github.com/gulpjs/glob-parent) - ISC
|
||||
[glob-parent@6.0.2](https://github.com/gulpjs/glob-parent) - ISC
|
||||
[glob@10.4.5](https://github.com/isaacs/node-glob) - ISC
|
||||
[glob@7.2.3](https://github.com/isaacs/node-glob) - ISC
|
||||
[globals@11.12.0](https://github.com/sindresorhus/globals) - MIT
|
||||
[globals@13.24.0](https://github.com/sindresorhus/globals) - MIT
|
||||
[globby@11.1.0](https://github.com/sindresorhus/globby) - MIT
|
||||
[graceful-fs@4.2.11](https://github.com/isaacs/node-graceful-fs) - ISC
|
||||
[graphemer@1.4.0](https://github.com/flmnt/graphemer) - MIT
|
||||
[guid-typescript@1.0.9](https://github.com/NicolasDeveloper/guid-typescript) - ISC
|
||||
[has-flag@4.0.0](https://github.com/sindresorhus/has-flag) - MIT
|
||||
[hasown@2.0.0](https://github.com/inspect-js/hasOwn) - MIT
|
||||
[html-escaper@2.0.2](https://github.com/WebReflection/html-escaper) - MIT
|
||||
[human-signals@2.1.0](https://github.com/ehmicky/human-signals) - Apache-2.0
|
||||
[humanize-ms@1.2.1](https://github.com/node-modules/humanize-ms) - MIT
|
||||
[ignore@5.3.0](https://github.com/kaelzhang/node-ignore) - MIT
|
||||
[import-fresh@3.3.0](https://github.com/sindresorhus/import-fresh) - MIT
|
||||
[import-local@3.1.0](https://github.com/sindresorhus/import-local) - MIT
|
||||
[imurmurhash@0.1.4](https://github.com/jensyt/imurmurhash-js) - MIT
|
||||
[inflight@1.0.6](https://github.com/npm/inflight) - ISC
|
||||
[inherits@2.0.4](https://github.com/isaacs/inherits) - ISC
|
||||
[interpret@1.4.0](https://github.com/gulpjs/interpret) - MIT
|
||||
[is-arrayish@0.2.1](https://github.com/qix-/node-is-arrayish) - MIT
|
||||
[is-arrayish@0.3.2](https://github.com/qix-/node-is-arrayish) - MIT
|
||||
[is-buffer@1.1.6](https://github.com/feross/is-buffer) - MIT
|
||||
[is-core-module@2.13.1](https://github.com/inspect-js/is-core-module) - MIT
|
||||
[is-extglob@2.1.1](https://github.com/jonschlinkert/is-extglob) - MIT
|
||||
[is-fullwidth-code-point@3.0.0](https://github.com/sindresorhus/is-fullwidth-code-point) - MIT
|
||||
[is-generator-fn@2.1.0](https://github.com/sindresorhus/is-generator-fn) - MIT
|
||||
[is-glob@4.0.3](https://github.com/micromatch/is-glob) - MIT
|
||||
[is-number@7.0.0](https://github.com/jonschlinkert/is-number) - MIT
|
||||
[is-path-inside@3.0.3](https://github.com/sindresorhus/is-path-inside) - MIT
|
||||
[is-stream@2.0.1](https://github.com/sindresorhus/is-stream) - MIT
|
||||
[isexe@2.0.0](https://github.com/isaacs/isexe) - ISC
|
||||
[istanbul-lib-coverage@3.2.2](https://github.com/istanbuljs/istanbuljs) - BSD-3-Clause
|
||||
[istanbul-lib-instrument@5.2.1](https://github.com/istanbuljs/istanbuljs) - BSD-3-Clause
|
||||
[istanbul-lib-instrument@6.0.1](https://github.com/istanbuljs/istanbuljs) - BSD-3-Clause
|
||||
[istanbul-lib-report@3.0.1](https://github.com/istanbuljs/istanbuljs) - BSD-3-Clause
|
||||
[istanbul-lib-source-maps@4.0.1](https://github.com/istanbuljs/istanbuljs) - BSD-3-Clause
|
||||
[istanbul-reports@3.1.6](https://github.com/istanbuljs/istanbuljs) - BSD-3-Clause
|
||||
[jackspeak@3.4.3](https://github.com/isaacs/jackspeak) - BlueOak-1.0.0
|
||||
[jest-changed-files@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||
[jest-circus@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||
[jest-cli@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||
[jest-config@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||
[jest-diff@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||
[jest-docblock@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||
[jest-each@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||
[jest-environment-node@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||
[jest-get-type@29.6.3](https://github.com/jestjs/jest) - MIT
|
||||
[jest-haste-map@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||
[jest-leak-detector@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||
[jest-matcher-utils@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||
[jest-message-util@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||
[jest-mock@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||
[jest-pnp-resolver@1.2.3](https://github.com/arcanis/jest-pnp-resolver) - MIT
|
||||
[jest-regex-util@29.6.3](https://github.com/jestjs/jest) - MIT
|
||||
[jest-resolve-dependencies@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||
[jest-resolve@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||
[jest-runner@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||
[jest-runtime@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||
[jest-snapshot@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||
[jest-util@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||
[jest-validate@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||
[jest-watcher@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||
[jest-worker@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||
[jest@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||
[js-tokens@4.0.0](https://github.com/lydell/js-tokens) - MIT
|
||||
[js-yaml@3.14.1](https://github.com/nodeca/js-yaml) - MIT
|
||||
[js-yaml@4.1.0](https://github.com/nodeca/js-yaml) - MIT
|
||||
[jsesc@2.5.2](https://github.com/mathiasbynens/jsesc) - MIT
|
||||
[json-bignum@0.0.3](https://github.com/datalanche/json-bignum) - MIT
|
||||
[json-buffer@3.0.1](https://github.com/dominictarr/json-buffer) - MIT
|
||||
[json-parse-even-better-errors@2.3.1](https://github.com/npm/json-parse-even-better-errors) - MIT
|
||||
[json-schema-traverse@0.4.1](https://github.com/epoberezkin/json-schema-traverse) - MIT
|
||||
[json-stable-stringify-without-jsonify@1.0.1](https://github.com/samn/json-stable-stringify) - MIT
|
||||
[json5@2.2.3](https://github.com/json5/json5) - MIT
|
||||
[keyv@4.5.4](https://github.com/jaredwray/keyv) - MIT
|
||||
[kleur@3.0.3](https://github.com/lukeed/kleur) - MIT
|
||||
[leven@3.1.0](https://github.com/sindresorhus/leven) - MIT
|
||||
[levn@0.4.1](https://github.com/gkz/levn) - MIT
|
||||
[lines-and-columns@1.2.4](https://github.com/eventualbuddha/lines-and-columns) - MIT
|
||||
[linkify-it@5.0.0](https://github.com/markdown-it/linkify-it) - MIT
|
||||
[locate-path@5.0.0](https://github.com/sindresorhus/locate-path) - MIT
|
||||
[locate-path@6.0.0](https://github.com/sindresorhus/locate-path) - MIT
|
||||
[lodash.camelcase@4.3.0](https://github.com/lodash/lodash) - MIT
|
||||
[lodash.memoize@4.1.2](https://github.com/lodash/lodash) - MIT
|
||||
[lodash.merge@4.6.2](https://github.com/lodash/lodash) - MIT
|
||||
[lodash@4.17.21](https://github.com/lodash/lodash) - MIT
|
||||
[long@5.2.3](https://github.com/dcodeIO/long.js) - Apache-2.0
|
||||
[lru-cache@10.4.3](https://github.com/isaacs/node-lru-cache) - ISC
|
||||
[lru-cache@5.1.1](https://github.com/isaacs/node-lru-cache) - ISC
|
||||
[lunr@2.3.9](https://github.com/olivernn/lunr.js) - MIT
|
||||
[make-dir@4.0.0](https://github.com/sindresorhus/make-dir) - MIT
|
||||
[make-error@1.3.6](https://github.com/JsCommunity/make-error) - ISC
|
||||
[makeerror@1.0.12](https://github.com/daaku/nodejs-makeerror) - BSD-3-Clause
|
||||
[markdown-it@14.1.0](https://github.com/markdown-it/markdown-it) - MIT
|
||||
[md5@2.3.0](https://github.com/pvorb/node-md5) - BSD-3-Clause
|
||||
[mdurl@2.0.0](https://github.com/markdown-it/mdurl) - MIT
|
||||
[merge-stream@2.0.0](https://github.com/grncdr/merge-stream) - MIT
|
||||
[merge2@1.4.1](https://github.com/teambition/merge2) - MIT
|
||||
[micromatch@4.0.8](https://github.com/micromatch/micromatch) - MIT
|
||||
[mime-db@1.52.0](https://github.com/jshttp/mime-db) - MIT
|
||||
[mime-types@2.1.35](https://github.com/jshttp/mime-types) - MIT
|
||||
[mimic-fn@2.1.0](https://github.com/sindresorhus/mimic-fn) - MIT
|
||||
[minimatch@3.1.2](https://github.com/isaacs/minimatch) - ISC
|
||||
[minimatch@9.0.3](https://github.com/isaacs/minimatch) - ISC
|
||||
[minimatch@9.0.5](https://github.com/isaacs/minimatch) - ISC
|
||||
[minimist@1.2.8](https://github.com/minimistjs/minimist) - MIT
|
||||
[minipass@7.1.2](https://github.com/isaacs/minipass) - ISC
|
||||
[minizlib@3.0.1](https://github.com/isaacs/minizlib) - MIT
|
||||
[mkdirp@3.0.1](https://github.com/isaacs/node-mkdirp) - MIT
|
||||
[mnemonist@0.38.3](https://github.com/yomguithereal/mnemonist) - MIT
|
||||
[ms@2.1.2](https://github.com/zeit/ms) - MIT
|
||||
[ms@2.1.3](https://github.com/vercel/ms) - MIT
|
||||
[natural-compare@1.4.0](https://github.com/litejs/natural-compare-lite) - MIT
|
||||
[node-domexception@1.0.0](https://github.com/jimmywarting/node-domexception) - MIT
|
||||
[node-fetch@2.7.0](https://github.com/bitinn/node-fetch) - MIT
|
||||
[node-int64@0.4.0](https://github.com/broofa/node-int64) - MIT
|
||||
[node-releases@2.0.14](https://github.com/chicoxyzzy/node-releases) - MIT
|
||||
[normalize-path@3.0.0](https://github.com/jonschlinkert/normalize-path) - MIT
|
||||
[npm-run-path@4.0.1](https://github.com/sindresorhus/npm-run-path) - MIT
|
||||
[obliterator@1.6.1](https://github.com/yomguithereal/obliterator) - MIT
|
||||
[once@1.4.0](https://github.com/isaacs/once) - ISC
|
||||
[onetime@5.1.2](https://github.com/sindresorhus/onetime) - MIT
|
||||
[onnxruntime-common@1.19.2](https://github.com/Microsoft/onnxruntime) - MIT
|
||||
[onnxruntime-common@1.20.0-dev.20241016-2b8fc5529b](https://github.com/Microsoft/onnxruntime) - MIT
|
||||
[onnxruntime-node@1.19.2](https://github.com/Microsoft/onnxruntime) - MIT
|
||||
[onnxruntime-web@1.21.0-dev.20241024-d9ca84ef96](https://github.com/Microsoft/onnxruntime) - MIT
|
||||
[openai@4.29.2](https://github.com/openai/openai-node) - Apache-2.0
|
||||
[optionator@0.9.3](https://github.com/gkz/optionator) - MIT
|
||||
[p-limit@2.3.0](https://github.com/sindresorhus/p-limit) - MIT
|
||||
[p-limit@3.1.0](https://github.com/sindresorhus/p-limit) - MIT
|
||||
[p-locate@4.1.0](https://github.com/sindresorhus/p-locate) - MIT
|
||||
[p-locate@5.0.0](https://github.com/sindresorhus/p-locate) - MIT
|
||||
[p-try@2.2.0](https://github.com/sindresorhus/p-try) - MIT
|
||||
[package-json-from-dist@1.0.1](https://github.com/isaacs/package-json-from-dist) - BlueOak-1.0.0
|
||||
[parent-module@1.0.1](https://github.com/sindresorhus/parent-module) - MIT
|
||||
[parse-json@5.2.0](https://github.com/sindresorhus/parse-json) - MIT
|
||||
[path-exists@4.0.0](https://github.com/sindresorhus/path-exists) - MIT
|
||||
[path-is-absolute@1.0.1](https://github.com/sindresorhus/path-is-absolute) - MIT
|
||||
[path-key@3.1.1](https://github.com/sindresorhus/path-key) - MIT
|
||||
[path-parse@1.0.7](https://github.com/jbgutierrez/path-parse) - MIT
|
||||
[path-scurry@1.11.1](https://github.com/isaacs/path-scurry) - BlueOak-1.0.0
|
||||
[path-type@4.0.0](https://github.com/sindresorhus/path-type) - MIT
|
||||
[picocolors@1.0.0](https://github.com/alexeyraspopov/picocolors) - ISC
|
||||
[picomatch@2.3.1](https://github.com/micromatch/picomatch) - MIT
|
||||
[pirates@4.0.6](https://github.com/danez/pirates) - MIT
|
||||
[pkg-dir@4.2.0](https://github.com/sindresorhus/pkg-dir) - MIT
|
||||
[platform@1.3.6](https://github.com/bestiejs/platform.js) - MIT
|
||||
[prelude-ls@1.2.1](https://github.com/gkz/prelude-ls) - MIT
|
||||
[pretty-format@29.7.0](https://github.com/jestjs/jest) - MIT
|
||||
[prompts@2.4.2](https://github.com/terkelg/prompts) - MIT
|
||||
[protobufjs@7.4.0](https://github.com/protobufjs/protobuf.js) - BSD-3-Clause
|
||||
[proxy-from-env@1.1.0](https://github.com/Rob--W/proxy-from-env) - MIT
|
||||
[punycode.js@2.3.1](https://github.com/mathiasbynens/punycode.js) - MIT
|
||||
[punycode@2.3.1](https://github.com/mathiasbynens/punycode.js) - MIT
|
||||
[pure-rand@6.0.4](https://github.com/dubzzz/pure-rand) - MIT
|
||||
[queue-microtask@1.2.3](https://github.com/feross/queue-microtask) - MIT
|
||||
[react-is@18.2.0](https://github.com/facebook/react) - MIT
|
||||
[rechoir@0.6.2](https://github.com/tkellen/node-rechoir) - MIT
|
||||
[reflect-metadata@0.2.2](https://github.com/rbuckton/reflect-metadata) - Apache-2.0
|
||||
[require-directory@2.1.1](https://github.com/troygoode/node-require-directory) - MIT
|
||||
[resolve-cwd@3.0.0](https://github.com/sindresorhus/resolve-cwd) - MIT
|
||||
[resolve-from@4.0.0](https://github.com/sindresorhus/resolve-from) - MIT
|
||||
[resolve-from@5.0.0](https://github.com/sindresorhus/resolve-from) - MIT
|
||||
[resolve.exports@2.0.2](https://github.com/lukeed/resolve.exports) - MIT
|
||||
[resolve@1.22.8](https://github.com/browserify/resolve) - MIT
|
||||
[reusify@1.0.4](https://github.com/mcollina/reusify) - MIT
|
||||
[rimraf@3.0.2](https://github.com/isaacs/rimraf) - ISC
|
||||
[rimraf@5.0.10](https://github.com/isaacs/rimraf) - ISC
|
||||
[run-parallel@1.2.0](https://github.com/feross/run-parallel) - MIT
|
||||
[semver@6.3.1](https://github.com/npm/node-semver) - ISC
|
||||
[semver@7.6.3](https://github.com/npm/node-semver) - ISC
|
||||
[sharp@0.33.5](https://github.com/lovell/sharp) - Apache-2.0
|
||||
[shebang-command@2.0.0](https://github.com/kevva/shebang-command) - MIT
|
||||
[shebang-regex@3.0.0](https://github.com/sindresorhus/shebang-regex) - MIT
|
||||
[shelljs@0.8.5](https://github.com/shelljs/shelljs) - BSD-3-Clause
|
||||
[shiki@1.10.3](https://github.com/shikijs/shiki) - MIT
|
||||
[shx@0.3.4](https://github.com/shelljs/shx) - MIT
|
||||
[signal-exit@3.0.7](https://github.com/tapjs/signal-exit) - ISC
|
||||
[signal-exit@4.1.0](https://github.com/tapjs/signal-exit) - ISC
|
||||
[simple-swizzle@0.2.2](https://github.com/qix-/node-simple-swizzle) - MIT
|
||||
[sisteransi@1.0.5](https://github.com/terkelg/sisteransi) - MIT
|
||||
[slash@3.0.0](https://github.com/sindresorhus/slash) - MIT
|
||||
[source-map-support@0.5.13](https://github.com/evanw/node-source-map-support) - MIT
|
||||
[source-map@0.6.1](https://github.com/mozilla/source-map) - BSD-3-Clause
|
||||
[sprintf-js@1.0.3](https://github.com/alexei/sprintf.js) - BSD-3-Clause
|
||||
[stack-utils@2.0.6](https://github.com/tapjs/stack-utils) - MIT
|
||||
[stream-read-all@3.0.1](https://github.com/75lb/stream-read-all) - MIT
|
||||
[string-length@4.0.2](https://github.com/sindresorhus/string-length) - MIT
|
||||
[string-width@4.2.3](https://github.com/sindresorhus/string-width) - MIT
|
||||
[string-width@5.1.2](https://github.com/sindresorhus/string-width) - MIT
|
||||
[strip-ansi@6.0.1](https://github.com/chalk/strip-ansi) - MIT
|
||||
[strip-ansi@7.1.0](https://github.com/chalk/strip-ansi) - MIT
|
||||
[strip-bom@4.0.0](https://github.com/sindresorhus/strip-bom) - MIT
|
||||
[strip-final-newline@2.0.0](https://github.com/sindresorhus/strip-final-newline) - MIT
|
||||
[strip-json-comments@3.1.1](https://github.com/sindresorhus/strip-json-comments) - MIT
|
||||
[strnum@1.0.5](https://github.com/NaturalIntelligence/strnum) - MIT
|
||||
[supports-color@7.2.0](https://github.com/chalk/supports-color) - MIT
|
||||
[supports-color@8.1.1](https://github.com/chalk/supports-color) - MIT
|
||||
[supports-preserve-symlinks-flag@1.0.0](https://github.com/inspect-js/node-supports-preserve-symlinks-flag) - MIT
|
||||
[table-layout@3.0.2](https://github.com/75lb/table-layout) - MIT
|
||||
[tar@7.4.3](https://github.com/isaacs/node-tar) - ISC
|
||||
[test-exclude@6.0.0](https://github.com/istanbuljs/test-exclude) - ISC
|
||||
[text-table@0.2.0](https://github.com/substack/text-table) - MIT
|
||||
[tmp@0.2.3](https://github.com/raszi/node-tmp) - MIT
|
||||
[tmpl@1.0.5](https://github.com/daaku/nodejs-tmpl) - BSD-3-Clause
|
||||
[to-regex-range@5.0.1](https://github.com/micromatch/to-regex-range) - MIT
|
||||
[tr46@0.0.3](https://github.com/Sebmaster/tr46.js) - MIT
|
||||
[ts-api-utils@1.0.3](https://github.com/JoshuaKGoldberg/ts-api-utils) - MIT
|
||||
[ts-jest@29.1.2](https://github.com/kulshekhar/ts-jest) - MIT
|
||||
[tslib@1.14.1](https://github.com/Microsoft/tslib) - 0BSD
|
||||
[tslib@2.6.2](https://github.com/Microsoft/tslib) - 0BSD
|
||||
[type-check@0.4.0](https://github.com/gkz/type-check) - MIT
|
||||
[type-detect@4.0.8](https://github.com/chaijs/type-detect) - MIT
|
||||
[type-fest@0.20.2](https://github.com/sindresorhus/type-fest) - (MIT OR CC0-1.0)
|
||||
[type-fest@0.21.3](https://github.com/sindresorhus/type-fest) - (MIT OR CC0-1.0)
|
||||
[typedoc-plugin-markdown@4.2.1](https://github.com/typedoc2md/typedoc-plugin-markdown) - MIT
|
||||
[typedoc@0.26.4](https://github.com/TypeStrong/TypeDoc) - Apache-2.0
|
||||
[typescript-eslint@7.1.0](https://github.com/typescript-eslint/typescript-eslint) - MIT
|
||||
[typescript@5.5.4](https://github.com/Microsoft/TypeScript) - Apache-2.0
|
||||
[typical@4.0.0](https://github.com/75lb/typical) - MIT
|
||||
[typical@7.1.1](https://github.com/75lb/typical) - MIT
|
||||
[uc.micro@2.1.0](https://github.com/markdown-it/uc.micro) - MIT
|
||||
[undici-types@5.26.5](https://github.com/nodejs/undici) - MIT
|
||||
[undici-types@6.19.8](https://github.com/nodejs/undici) - MIT
|
||||
[update-browserslist-db@1.0.13](https://github.com/browserslist/update-db) - MIT
|
||||
[uri-js@4.4.1](https://github.com/garycourt/uri-js) - BSD-2-Clause
|
||||
[uuid@9.0.1](https://github.com/uuidjs/uuid) - MIT
|
||||
[v8-to-istanbul@9.2.0](https://github.com/istanbuljs/v8-to-istanbul) - ISC
|
||||
[walker@1.0.8](https://github.com/daaku/nodejs-walker) - Apache-2.0
|
||||
[web-streams-polyfill@3.3.3](https://github.com/MattiasBuelens/web-streams-polyfill) - MIT
|
||||
[web-streams-polyfill@4.0.0-beta.3](https://github.com/MattiasBuelens/web-streams-polyfill) - MIT
|
||||
[webidl-conversions@3.0.1](https://github.com/jsdom/webidl-conversions) - BSD-2-Clause
|
||||
[whatwg-url@5.0.0](https://github.com/jsdom/whatwg-url) - MIT
|
||||
[which@2.0.2](https://github.com/isaacs/node-which) - ISC
|
||||
[wordwrapjs@5.1.0](https://github.com/75lb/wordwrapjs) - MIT
|
||||
[wrap-ansi@7.0.0](https://github.com/chalk/wrap-ansi) - MIT
|
||||
[wrap-ansi@8.1.0](https://github.com/chalk/wrap-ansi) - MIT
|
||||
[wrappy@1.0.2](https://github.com/npm/wrappy) - ISC
|
||||
[write-file-atomic@4.0.2](https://github.com/npm/write-file-atomic) - ISC
|
||||
[y18n@5.0.8](https://github.com/yargs/y18n) - ISC
|
||||
[yallist@3.1.1](https://github.com/isaacs/yallist) - ISC
|
||||
[yallist@5.0.0](https://github.com/isaacs/yallist) - BlueOak-1.0.0
|
||||
[yaml@2.4.5](https://github.com/eemeli/yaml) - ISC
|
||||
[yargs-parser@21.1.1](https://github.com/yargs/yargs-parser) - ISC
|
||||
[yargs@17.7.2](https://github.com/yargs/yargs) - MIT
|
||||
[yocto-queue@0.1.0](https://github.com/sindresorhus/yocto-queue) - MIT
|
||||
14607
nodejs/RUST_THIRD_PARTY_LICENSES.html
Normal file
14607
nodejs/RUST_THIRD_PARTY_LICENSES.html
Normal file
File diff suppressed because it is too large
Load Diff
@@ -312,6 +312,66 @@ describe.each([arrow15, arrow16, arrow17, arrow18])(
|
||||
expect(res.getChild("id")?.toJSON()).toEqual([2, 3]);
|
||||
});
|
||||
|
||||
it("should support takeRowIds with bigint array", async () => {
|
||||
await table.add([{ id: 1 }, { id: 2 }, { id: 3 }]);
|
||||
// Get actual row IDs using withRowId()
|
||||
const allRows = await table.query().withRowId().toArray();
|
||||
const rowIds = allRows.map((row) => row._rowid) as bigint[];
|
||||
|
||||
// Verify row IDs are bigint
|
||||
expect(typeof rowIds[0]).toBe("bigint");
|
||||
|
||||
// Use takeRowIds with bigint array (the main use case from issue #2722)
|
||||
const res = await table.takeRowIds([rowIds[0], rowIds[2]]).toArray();
|
||||
expect(res.map((r) => r.id)).toEqual([1, 3]);
|
||||
});
|
||||
|
||||
it("should support takeRowIds with number array for backwards compatibility", async () => {
|
||||
await table.add([{ id: 1 }, { id: 2 }, { id: 3 }]);
|
||||
// Small row IDs can be passed as numbers
|
||||
const res = await table.takeRowIds([0, 2]).toArray();
|
||||
expect(res.map((r) => r.id)).toEqual([1, 3]);
|
||||
});
|
||||
|
||||
it("should support takeRowIds with mixed bigint and number array", async () => {
|
||||
await table.add([{ id: 1 }, { id: 2 }, { id: 3 }]);
|
||||
// Mixed array of bigint and number
|
||||
const res = await table.takeRowIds([0n, 1, 2n]).toArray();
|
||||
expect(res.map((r) => r.id)).toEqual([1, 2, 3]);
|
||||
});
|
||||
|
||||
it("should throw for non-integer number in takeRowIds", () => {
|
||||
expect(() => table.takeRowIds([1.5])).toThrow(
|
||||
"Row id must be an integer (or bigint)",
|
||||
);
|
||||
expect(() => table.takeRowIds([0, 1.1, 2])).toThrow(
|
||||
"Row id must be an integer (or bigint)",
|
||||
);
|
||||
});
|
||||
|
||||
it("should throw for negative number in takeRowIds", () => {
|
||||
expect(() => table.takeRowIds([-1])).toThrow("Row id cannot be negative");
|
||||
expect(() => table.takeRowIds([0, -5, 2])).toThrow(
|
||||
"Row id cannot be negative",
|
||||
);
|
||||
});
|
||||
|
||||
it("should throw for unsafe large number in takeRowIds", () => {
|
||||
// Number.MAX_SAFE_INTEGER + 1 is not safe
|
||||
const unsafeNumber = Number.MAX_SAFE_INTEGER + 1;
|
||||
expect(() => table.takeRowIds([unsafeNumber])).toThrow(
|
||||
"Row id is too large for number; use bigint instead",
|
||||
);
|
||||
});
|
||||
|
||||
it("should reject negative bigint in takeRowIds", async () => {
|
||||
await table.add([{ id: 1 }]);
|
||||
// Negative bigint should be rejected by the Rust layer
|
||||
expect(() => {
|
||||
table.takeRowIds([-1n]);
|
||||
}).toThrow("Row id cannot be negative");
|
||||
});
|
||||
|
||||
it("should return the table as an instance of an arrow table", async () => {
|
||||
const arrowTbl = await table.toArrow();
|
||||
expect(arrowTbl).toBeInstanceOf(ArrowTable);
|
||||
@@ -1520,9 +1580,9 @@ describe("when optimizing a dataset", () => {
|
||||
|
||||
it("delete unverified", async () => {
|
||||
const version = await table.version();
|
||||
const versionFile = `${tmpDir.name}/${table.name}.lance/_versions/${
|
||||
version - 1
|
||||
}.manifest`;
|
||||
const versionFile = `${tmpDir.name}/${table.name}.lance/_versions/${String(
|
||||
18446744073709551615n - (BigInt(version) - 1n),
|
||||
).padStart(20, "0")}.manifest`;
|
||||
fs.rmSync(versionFile);
|
||||
|
||||
let stats = await table.optimize({ deleteUnverified: false });
|
||||
|
||||
@@ -347,9 +347,13 @@ export abstract class Table {
|
||||
/**
|
||||
* Create a query that returns a subset of the rows in the table.
|
||||
* @param rowIds The row ids of the rows to return.
|
||||
*
|
||||
* Row ids returned by `withRowId()` are `bigint`, so `bigint[]` is supported.
|
||||
* For convenience / backwards compatibility, `number[]` is also accepted (for
|
||||
* small row ids that fit in a safe integer).
|
||||
* @returns A builder that can be used to parameterize the query.
|
||||
*/
|
||||
abstract takeRowIds(rowIds: number[]): TakeQuery;
|
||||
abstract takeRowIds(rowIds: readonly (bigint | number)[]): TakeQuery;
|
||||
|
||||
/**
|
||||
* Create a search query to find the nearest neighbors
|
||||
@@ -538,6 +542,35 @@ export abstract class Table {
|
||||
*
|
||||
*/
|
||||
abstract stats(): Promise<TableStatistics>;
|
||||
|
||||
/**
|
||||
* Get the initial storage options that were passed in when opening this table.
|
||||
*
|
||||
* For dynamically refreshed options (e.g., credential vending), use
|
||||
* {@link Table.latestStorageOptions}.
|
||||
*
|
||||
* Warning: This is an internal API and the return value is subject to change.
|
||||
*
|
||||
* @returns The storage options, or undefined if no storage options were configured.
|
||||
*/
|
||||
abstract initialStorageOptions(): Promise<
|
||||
Record<string, string> | null | undefined
|
||||
>;
|
||||
|
||||
/**
|
||||
* Get the latest storage options, refreshing from provider if configured.
|
||||
*
|
||||
* This method is useful for credential vending scenarios where storage options
|
||||
* may be refreshed dynamically. If no dynamic provider is configured, this
|
||||
* returns the initial static options.
|
||||
*
|
||||
* Warning: This is an internal API and the return value is subject to change.
|
||||
*
|
||||
* @returns The storage options, or undefined if no storage options were configured.
|
||||
*/
|
||||
abstract latestStorageOptions(): Promise<
|
||||
Record<string, string> | null | undefined
|
||||
>;
|
||||
}
|
||||
|
||||
export class LocalTable extends Table {
|
||||
@@ -686,8 +719,24 @@ export class LocalTable extends Table {
|
||||
return new TakeQuery(this.inner.takeOffsets(offsets));
|
||||
}
|
||||
|
||||
takeRowIds(rowIds: number[]): TakeQuery {
|
||||
return new TakeQuery(this.inner.takeRowIds(rowIds));
|
||||
takeRowIds(rowIds: readonly (bigint | number)[]): TakeQuery {
|
||||
const ids = rowIds.map((id) => {
|
||||
if (typeof id === "bigint") {
|
||||
return id;
|
||||
}
|
||||
if (!Number.isInteger(id)) {
|
||||
throw new Error("Row id must be an integer (or bigint)");
|
||||
}
|
||||
if (id < 0) {
|
||||
throw new Error("Row id cannot be negative");
|
||||
}
|
||||
if (!Number.isSafeInteger(id)) {
|
||||
throw new Error("Row id is too large for number; use bigint instead");
|
||||
}
|
||||
return BigInt(id);
|
||||
});
|
||||
|
||||
return new TakeQuery(this.inner.takeRowIds(ids));
|
||||
}
|
||||
|
||||
query(): Query {
|
||||
@@ -858,6 +907,18 @@ export class LocalTable extends Table {
|
||||
return await this.inner.stats();
|
||||
}
|
||||
|
||||
async initialStorageOptions(): Promise<
|
||||
Record<string, string> | null | undefined
|
||||
> {
|
||||
return await this.inner.initialStorageOptions();
|
||||
}
|
||||
|
||||
async latestStorageOptions(): Promise<
|
||||
Record<string, string> | null | undefined
|
||||
> {
|
||||
return await this.inner.latestStorageOptions();
|
||||
}
|
||||
|
||||
mergeInsert(on: string | string[]): MergeInsertBuilder {
|
||||
on = Array.isArray(on) ? on : [on];
|
||||
return new MergeInsertBuilder(this.inner.mergeInsert(on), this.schema());
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-darwin-arm64",
|
||||
"version": "0.24.0",
|
||||
"version": "0.26.2",
|
||||
"os": ["darwin"],
|
||||
"cpu": ["arm64"],
|
||||
"main": "lancedb.darwin-arm64.node",
|
||||
@@ -8,5 +8,9 @@
|
||||
"license": "Apache-2.0",
|
||||
"engines": {
|
||||
"node": ">= 18"
|
||||
},
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/lancedb/lancedb"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
# `@lancedb/lancedb-darwin-x64`
|
||||
|
||||
This is the **x86_64-apple-darwin** binary for `@lancedb/lancedb`
|
||||
@@ -1,12 +0,0 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-darwin-x64",
|
||||
"version": "0.24.0",
|
||||
"os": ["darwin"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.darwin-x64.node",
|
||||
"files": ["lancedb.darwin-x64.node"],
|
||||
"license": "Apache-2.0",
|
||||
"engines": {
|
||||
"node": ">= 18"
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-arm64-gnu",
|
||||
"version": "0.24.0",
|
||||
"version": "0.26.2",
|
||||
"os": ["linux"],
|
||||
"cpu": ["arm64"],
|
||||
"main": "lancedb.linux-arm64-gnu.node",
|
||||
@@ -9,5 +9,9 @@
|
||||
"engines": {
|
||||
"node": ">= 18"
|
||||
},
|
||||
"libc": ["glibc"]
|
||||
"libc": ["glibc"],
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/lancedb/lancedb"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-arm64-musl",
|
||||
"version": "0.24.0",
|
||||
"version": "0.26.2",
|
||||
"os": ["linux"],
|
||||
"cpu": ["arm64"],
|
||||
"main": "lancedb.linux-arm64-musl.node",
|
||||
@@ -9,5 +9,9 @@
|
||||
"engines": {
|
||||
"node": ">= 18"
|
||||
},
|
||||
"libc": ["musl"]
|
||||
"libc": ["musl"],
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/lancedb/lancedb"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-x64-gnu",
|
||||
"version": "0.24.0",
|
||||
"version": "0.26.2",
|
||||
"os": ["linux"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.linux-x64-gnu.node",
|
||||
@@ -9,5 +9,9 @@
|
||||
"engines": {
|
||||
"node": ">= 18"
|
||||
},
|
||||
"libc": ["glibc"]
|
||||
"libc": ["glibc"],
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/lancedb/lancedb"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-x64-musl",
|
||||
"version": "0.24.0",
|
||||
"version": "0.26.2",
|
||||
"os": ["linux"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.linux-x64-musl.node",
|
||||
@@ -9,5 +9,9 @@
|
||||
"engines": {
|
||||
"node": ">= 18"
|
||||
},
|
||||
"libc": ["musl"]
|
||||
"libc": ["musl"],
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/lancedb/lancedb"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-win32-arm64-msvc",
|
||||
"version": "0.24.0",
|
||||
"version": "0.26.2",
|
||||
"os": [
|
||||
"win32"
|
||||
],
|
||||
@@ -14,5 +14,9 @@
|
||||
"license": "Apache-2.0",
|
||||
"engines": {
|
||||
"node": ">= 18"
|
||||
},
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/lancedb/lancedb"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-win32-x64-msvc",
|
||||
"version": "0.24.0",
|
||||
"version": "0.26.2",
|
||||
"os": ["win32"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.win32-x64-msvc.node",
|
||||
@@ -8,5 +8,9 @@
|
||||
"license": "Apache-2.0",
|
||||
"engines": {
|
||||
"node": ">= 18"
|
||||
},
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/lancedb/lancedb"
|
||||
}
|
||||
}
|
||||
|
||||
4
nodejs/package-lock.json
generated
4
nodejs/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb",
|
||||
"version": "0.24.0",
|
||||
"version": "0.26.2",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@lancedb/lancedb",
|
||||
"version": "0.24.0",
|
||||
"version": "0.26.2",
|
||||
"cpu": [
|
||||
"x64",
|
||||
"arm64"
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
"ann"
|
||||
],
|
||||
"private": false,
|
||||
"version": "0.24.0",
|
||||
"version": "0.26.2",
|
||||
"main": "dist/index.js",
|
||||
"exports": {
|
||||
".": "./dist/index.js",
|
||||
@@ -25,7 +25,6 @@
|
||||
"triples": {
|
||||
"defaults": false,
|
||||
"additional": [
|
||||
"x86_64-apple-darwin",
|
||||
"aarch64-apple-darwin",
|
||||
"x86_64-unknown-linux-gnu",
|
||||
"aarch64-unknown-linux-gnu",
|
||||
@@ -37,6 +36,10 @@
|
||||
}
|
||||
},
|
||||
"license": "Apache-2.0",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/lancedb/lancedb"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@aws-sdk/client-dynamodb": "^3.33.0",
|
||||
"@aws-sdk/client-kms": "^3.33.0",
|
||||
|
||||
@@ -166,6 +166,19 @@ impl Table {
|
||||
Ok(stats.into())
|
||||
}
|
||||
|
||||
#[napi(catch_unwind)]
|
||||
pub async fn initial_storage_options(&self) -> napi::Result<Option<HashMap<String, String>>> {
|
||||
Ok(self.inner_ref()?.initial_storage_options().await)
|
||||
}
|
||||
|
||||
#[napi(catch_unwind)]
|
||||
pub async fn latest_storage_options(&self) -> napi::Result<Option<HashMap<String, String>>> {
|
||||
self.inner_ref()?
|
||||
.latest_storage_options()
|
||||
.await
|
||||
.default_error()
|
||||
}
|
||||
|
||||
#[napi(catch_unwind)]
|
||||
pub async fn update(
|
||||
&self,
|
||||
@@ -208,18 +221,24 @@ impl Table {
|
||||
}
|
||||
|
||||
#[napi(catch_unwind)]
|
||||
pub fn take_row_ids(&self, row_ids: Vec<i64>) -> napi::Result<TakeQuery> {
|
||||
pub fn take_row_ids(&self, row_ids: Vec<BigInt>) -> napi::Result<TakeQuery> {
|
||||
Ok(TakeQuery::new(
|
||||
self.inner_ref()?.take_row_ids(
|
||||
row_ids
|
||||
.into_iter()
|
||||
.map(|o| {
|
||||
u64::try_from(o).map_err(|e| {
|
||||
napi::Error::from_reason(format!(
|
||||
"Failed to convert row id to u64: {}",
|
||||
e
|
||||
.map(|id| {
|
||||
let (negative, value, lossless) = id.get_u64();
|
||||
if negative {
|
||||
Err(napi::Error::from_reason(
|
||||
"Row id cannot be negative".to_string(),
|
||||
))
|
||||
})
|
||||
} else if !lossless {
|
||||
Err(napi::Error::from_reason(
|
||||
"Row id is too large to fit in u64".to_string(),
|
||||
))
|
||||
} else {
|
||||
Ok(value)
|
||||
}
|
||||
})
|
||||
.collect::<Result<Vec<_>>>()?,
|
||||
),
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
[tool.bumpversion]
|
||||
current_version = "0.27.1"
|
||||
current_version = "0.29.2"
|
||||
parse = """(?x)
|
||||
(?P<major>0|[1-9]\\d*)\\.
|
||||
(?P<minor>0|[1-9]\\d*)\\.
|
||||
|
||||
@@ -16,7 +16,7 @@ The Python package is a wrapper around the Rust library, `lancedb`. We use
|
||||
|
||||
To set up your development environment, you will need to install the following:
|
||||
|
||||
1. Python 3.9 or later
|
||||
1. Python 3.10 or later
|
||||
2. Cargo (Rust's package manager). Use [rustup](https://rustup.rs/) to install.
|
||||
3. [protoc](https://grpc.io/docs/protoc-installation/) (Protocol Buffers compiler)
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "lancedb-python"
|
||||
version = "0.27.1"
|
||||
version = "0.29.2"
|
||||
edition.workspace = true
|
||||
description = "Python bindings for LanceDB"
|
||||
license.workspace = true
|
||||
@@ -14,15 +14,15 @@ name = "_lancedb"
|
||||
crate-type = ["cdylib"]
|
||||
|
||||
[dependencies]
|
||||
arrow = { version = "56.2", features = ["pyarrow"] }
|
||||
arrow = { version = "57.2", features = ["pyarrow"] }
|
||||
async-trait = "0.1"
|
||||
lancedb = { path = "../rust/lancedb", default-features = false }
|
||||
lance-core.workspace = true
|
||||
lance-namespace.workspace = true
|
||||
lance-io.workspace = true
|
||||
env_logger.workspace = true
|
||||
pyo3 = { version = "0.25", features = ["extension-module", "abi3-py39"] }
|
||||
pyo3-async-runtimes = { version = "0.25", features = [
|
||||
pyo3 = { version = "0.26", features = ["extension-module", "abi3-py39"] }
|
||||
pyo3-async-runtimes = { version = "0.26", features = [
|
||||
"attributes",
|
||||
"tokio-runtime",
|
||||
] }
|
||||
@@ -32,7 +32,7 @@ snafu.workspace = true
|
||||
tokio = { version = "1.40", features = ["sync"] }
|
||||
|
||||
[build-dependencies]
|
||||
pyo3-build-config = { version = "0.25", features = [
|
||||
pyo3-build-config = { version = "0.26", features = [
|
||||
"extension-module",
|
||||
"abi3-py39",
|
||||
] }
|
||||
|
||||
206
python/PYTHON_THIRD_PARTY_LICENSES.md
Normal file
206
python/PYTHON_THIRD_PARTY_LICENSES.md
Normal file
@@ -0,0 +1,206 @@
|
||||
| Name | Version | License | URL |
|
||||
|--------------------------------|-----------------|--------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------|
|
||||
| InstructorEmbedding | 1.0.1 | Apache License 2.0 | https://github.com/HKUNLP/instructor-embedding |
|
||||
| Jinja2 | 3.1.6 | BSD License | https://github.com/pallets/jinja/ |
|
||||
| Markdown | 3.10.2 | BSD-3-Clause | https://Python-Markdown.github.io/ |
|
||||
| MarkupSafe | 3.0.3 | BSD-3-Clause | https://github.com/pallets/markupsafe/ |
|
||||
| PyJWT | 2.11.0 | MIT | https://github.com/jpadilla/pyjwt |
|
||||
| PyYAML | 6.0.3 | MIT License | https://pyyaml.org/ |
|
||||
| Pygments | 2.19.2 | BSD License | https://pygments.org |
|
||||
| accelerate | 1.12.0 | Apache Software License | https://github.com/huggingface/accelerate |
|
||||
| adlfs | 2026.2.0 | BSD License | UNKNOWN |
|
||||
| aiohappyeyeballs | 2.6.1 | Python Software Foundation License | https://github.com/aio-libs/aiohappyeyeballs |
|
||||
| aiohttp | 3.13.3 | Apache-2.0 AND MIT | https://github.com/aio-libs/aiohttp |
|
||||
| aiosignal | 1.4.0 | Apache Software License | https://github.com/aio-libs/aiosignal |
|
||||
| annotated-types | 0.7.0 | MIT License | https://github.com/annotated-types/annotated-types |
|
||||
| anyio | 4.12.1 | MIT | https://anyio.readthedocs.io/en/stable/versionhistory.html |
|
||||
| appnope | 0.1.4 | BSD License | http://github.com/minrk/appnope |
|
||||
| asttokens | 3.0.1 | Apache 2.0 | https://github.com/gristlabs/asttokens |
|
||||
| attrs | 25.4.0 | MIT | https://www.attrs.org/en/stable/changelog.html |
|
||||
| awscli | 1.44.35 | Apache Software License | http://aws.amazon.com/cli/ |
|
||||
| azure-core | 1.38.0 | MIT License | https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/core/azure-core |
|
||||
| azure-datalake-store | 0.0.53 | MIT License | https://github.com/Azure/azure-data-lake-store-python |
|
||||
| azure-identity | 1.25.1 | MIT | https://github.com/Azure/azure-sdk-for-python |
|
||||
| azure-storage-blob | 12.28.0 | MIT License | https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob |
|
||||
| babel | 2.18.0 | BSD License | https://babel.pocoo.org/ |
|
||||
| backrefs | 6.1 | MIT | https://github.com/facelessuser/backrefs |
|
||||
| beautifulsoup4 | 4.14.3 | MIT License | https://www.crummy.com/software/BeautifulSoup/bs4/ |
|
||||
| bleach | 6.3.0 | Apache Software License | https://github.com/mozilla/bleach |
|
||||
| boto3 | 1.42.45 | Apache-2.0 | https://github.com/boto/boto3 |
|
||||
| botocore | 1.42.45 | Apache-2.0 | https://github.com/boto/botocore |
|
||||
| cachetools | 7.0.0 | MIT | https://github.com/tkem/cachetools/ |
|
||||
| certifi | 2026.1.4 | Mozilla Public License 2.0 (MPL 2.0) | https://github.com/certifi/python-certifi |
|
||||
| cffi | 2.0.0 | MIT | https://cffi.readthedocs.io/en/latest/whatsnew.html |
|
||||
| cfgv | 3.5.0 | MIT | https://github.com/asottile/cfgv |
|
||||
| charset-normalizer | 3.4.4 | MIT | https://github.com/jawah/charset_normalizer/blob/master/CHANGELOG.md |
|
||||
| click | 8.3.1 | BSD-3-Clause | https://github.com/pallets/click/ |
|
||||
| cohere | 5.20.4 | MIT License | https://github.com/cohere-ai/cohere-python |
|
||||
| colorama | 0.4.6 | BSD License | https://github.com/tartley/colorama |
|
||||
| colpali_engine | 0.3.13 | MIT License | https://github.com/illuin-tech/colpali |
|
||||
| comm | 0.2.3 | BSD License | https://github.com/ipython/comm |
|
||||
| cryptography | 46.0.4 | Apache-2.0 OR BSD-3-Clause | https://github.com/pyca/cryptography |
|
||||
| datafusion | 51.0.0 | Apache Software License | https://datafusion.apache.org/python |
|
||||
| debugpy | 1.8.20 | MIT License | https://aka.ms/debugpy |
|
||||
| decorator | 5.2.1 | BSD License | UNKNOWN |
|
||||
| defusedxml | 0.7.1 | Python Software Foundation License | https://github.com/tiran/defusedxml |
|
||||
| deprecation | 2.1.0 | Apache Software License | http://deprecation.readthedocs.io/ |
|
||||
| distlib | 0.4.0 | Python Software Foundation License | https://github.com/pypa/distlib |
|
||||
| distro | 1.9.0 | Apache Software License | https://github.com/python-distro/distro |
|
||||
| docutils | 0.19 | BSD License; GNU General Public License (GPL); Public Domain; Python Software Foundation License | https://docutils.sourceforge.io/ |
|
||||
| duckdb | 1.4.4 | MIT License | https://github.com/duckdb/duckdb-python |
|
||||
| executing | 2.2.1 | MIT License | https://github.com/alexmojaki/executing |
|
||||
| fastavro | 1.12.1 | MIT | https://github.com/fastavro/fastavro |
|
||||
| fastjsonschema | 2.21.2 | BSD License | https://github.com/horejsek/python-fastjsonschema |
|
||||
| filelock | 3.20.3 | Unlicense | https://github.com/tox-dev/py-filelock |
|
||||
| frozenlist | 1.8.0 | Apache-2.0 | https://github.com/aio-libs/frozenlist |
|
||||
| fsspec | 2026.2.0 | BSD-3-Clause | https://github.com/fsspec/filesystem_spec |
|
||||
| ftfy | 6.3.1 | Apache-2.0 | https://ftfy.readthedocs.io/en/latest/ |
|
||||
| ghp-import | 2.1.0 | Apache Software License | https://github.com/c-w/ghp-import |
|
||||
| google-ai-generativelanguage | 0.6.15 | Apache Software License | https://github.com/googleapis/google-cloud-python/tree/main/packages/google-ai-generativelanguage |
|
||||
| google-api-core | 2.25.2 | Apache Software License | https://github.com/googleapis/python-api-core |
|
||||
| google-api-python-client | 2.189.0 | Apache Software License | https://github.com/googleapis/google-api-python-client/ |
|
||||
| google-auth | 2.48.0 | Apache Software License | https://github.com/googleapis/google-auth-library-python |
|
||||
| google-auth-httplib2 | 0.3.0 | Apache Software License | https://github.com/GoogleCloudPlatform/google-auth-library-python-httplib2 |
|
||||
| google-generativeai | 0.8.6 | Apache Software License | https://github.com/google/generative-ai-python |
|
||||
| googleapis-common-protos | 1.72.0 | Apache Software License | https://github.com/googleapis/google-cloud-python/tree/main/packages/googleapis-common-protos |
|
||||
| griffe | 2.0.0 | ISC | https://mkdocstrings.github.io/griffe |
|
||||
| griffecli | 2.0.0 | ISC | UNKNOWN |
|
||||
| griffelib | 2.0.0 | ISC | UNKNOWN |
|
||||
| grpcio | 1.78.0 | Apache-2.0 | https://grpc.io |
|
||||
| grpcio-status | 1.71.2 | Apache Software License | https://grpc.io |
|
||||
| h11 | 0.16.0 | MIT License | https://github.com/python-hyper/h11 |
|
||||
| hf-xet | 1.2.0 | Apache-2.0 | https://github.com/huggingface/xet-core |
|
||||
| httpcore | 1.0.9 | BSD-3-Clause | https://www.encode.io/httpcore/ |
|
||||
| httplib2 | 0.31.2 | MIT License | https://github.com/httplib2/httplib2 |
|
||||
| httpx | 0.28.1 | BSD License | https://github.com/encode/httpx |
|
||||
| huggingface_hub | 0.36.2 | Apache Software License | https://github.com/huggingface/huggingface_hub |
|
||||
| ibm-cos-sdk | 2.14.3 | Apache Software License | https://github.com/ibm/ibm-cos-sdk-python |
|
||||
| ibm-cos-sdk-core | 2.14.3 | Apache Software License | https://github.com/ibm/ibm-cos-sdk-python-core |
|
||||
| ibm-cos-sdk-s3transfer | 2.14.3 | Apache Software License | https://github.com/IBM/ibm-cos-sdk-python-s3transfer |
|
||||
| ibm_watsonx_ai | 1.5.1 | BSD License | https://ibm.github.io/watsonx-ai-python-sdk/changelog.html |
|
||||
| identify | 2.6.16 | MIT | https://github.com/pre-commit/identify |
|
||||
| idna | 3.11 | BSD-3-Clause | https://github.com/kjd/idna |
|
||||
| iniconfig | 2.3.0 | MIT | https://github.com/pytest-dev/iniconfig |
|
||||
| ipykernel | 6.31.0 | BSD-3-Clause | https://ipython.org |
|
||||
| ipython | 9.10.0 | BSD-3-Clause | https://ipython.org |
|
||||
| ipython_pygments_lexers | 1.1.1 | BSD License | https://github.com/ipython/ipython-pygments-lexers |
|
||||
| isodate | 0.7.2 | BSD License | https://github.com/gweis/isodate/ |
|
||||
| jedi | 0.19.2 | MIT License | https://github.com/davidhalter/jedi |
|
||||
| jiter | 0.13.0 | MIT License | https://github.com/pydantic/jiter/ |
|
||||
| jmespath | 1.0.1 | MIT License | https://github.com/jmespath/jmespath.py |
|
||||
| joblib | 1.5.3 | BSD-3-Clause | https://joblib.readthedocs.io |
|
||||
| jsonschema | 4.26.0 | MIT | https://github.com/python-jsonschema/jsonschema |
|
||||
| jsonschema-specifications | 2025.9.1 | MIT | https://github.com/python-jsonschema/jsonschema-specifications |
|
||||
| jupyter_client | 8.8.0 | BSD License | https://jupyter.org |
|
||||
| jupyter_core | 5.9.1 | BSD-3-Clause | https://jupyter.org |
|
||||
| jupyterlab_pygments | 0.3.0 | BSD License | https://github.com/jupyterlab/jupyterlab_pygments |
|
||||
| jupytext | 1.19.1 | MIT License | https://github.com/mwouts/jupytext |
|
||||
| lance-namespace | 0.4.5 | Apache-2.0 | https://github.com/lance-format/lance-namespace |
|
||||
| lance-namespace-urllib3-client | 0.4.5 | Apache-2.0 | https://github.com/lance-format/lance-namespace |
|
||||
| lancedb | 0.29.2 | Apache Software License | https://github.com/lancedb/lancedb |
|
||||
| lomond | 0.3.3 | BSD License | https://github.com/wildfoundry/dataplicity-lomond |
|
||||
| markdown-it-py | 4.0.0 | MIT License | https://github.com/executablebooks/markdown-it-py |
|
||||
| matplotlib-inline | 0.2.1 | UNKNOWN | https://github.com/ipython/matplotlib-inline |
|
||||
| mdit-py-plugins | 0.5.0 | MIT License | https://github.com/executablebooks/mdit-py-plugins |
|
||||
| mdurl | 0.1.2 | MIT License | https://github.com/executablebooks/mdurl |
|
||||
| mergedeep | 1.3.4 | MIT License | https://github.com/clarketm/mergedeep |
|
||||
| mistune | 3.2.0 | BSD License | https://github.com/lepture/mistune |
|
||||
| mkdocs | 1.6.1 | BSD-2-Clause | https://github.com/mkdocs/mkdocs |
|
||||
| mkdocs-autorefs | 1.4.3 | ISC | https://mkdocstrings.github.io/autorefs |
|
||||
| mkdocs-get-deps | 0.2.0 | MIT | https://github.com/mkdocs/get-deps |
|
||||
| mkdocs-jupyter | 0.25.1 | Apache-2.0 | https://github.com/danielfrg/mkdocs-jupyter |
|
||||
| mkdocs-material | 9.7.1 | MIT | https://github.com/squidfunk/mkdocs-material |
|
||||
| mkdocs-material-extensions | 1.3.1 | MIT | https://github.com/facelessuser/mkdocs-material-extensions |
|
||||
| mkdocstrings | 1.0.3 | ISC | https://mkdocstrings.github.io |
|
||||
| mkdocstrings-python | 2.0.2 | ISC | https://mkdocstrings.github.io/python |
|
||||
| mpmath | 1.3.0 | BSD License | http://mpmath.org/ |
|
||||
| msal | 1.34.0 | MIT License | https://github.com/AzureAD/microsoft-authentication-library-for-python |
|
||||
| msal-extensions | 1.3.1 | MIT License | https://github.com/AzureAD/microsoft-authentication-extensions-for-python/releases |
|
||||
| multidict | 6.7.1 | Apache License 2.0 | https://github.com/aio-libs/multidict |
|
||||
| nbclient | 0.10.4 | BSD License | https://jupyter.org |
|
||||
| nbconvert | 7.17.0 | BSD License | https://jupyter.org |
|
||||
| nbformat | 5.10.4 | BSD License | https://jupyter.org |
|
||||
| nest-asyncio | 1.6.0 | BSD License | https://github.com/erdewit/nest_asyncio |
|
||||
| networkx | 3.6.1 | BSD-3-Clause | https://networkx.org/ |
|
||||
| nodeenv | 1.10.0 | BSD License | https://github.com/ekalinin/nodeenv |
|
||||
| numpy | 2.4.2 | BSD-3-Clause AND 0BSD AND MIT AND Zlib AND CC0-1.0 | https://numpy.org |
|
||||
| ollama | 0.6.1 | MIT | https://ollama.com |
|
||||
| open_clip_torch | 3.2.0 | MIT License | https://github.com/mlfoundations/open_clip |
|
||||
| openai | 2.18.0 | Apache Software License | https://github.com/openai/openai-python |
|
||||
| packaging | 26.0 | Apache-2.0 OR BSD-2-Clause | https://github.com/pypa/packaging |
|
||||
| paginate | 0.5.7 | MIT License | https://github.com/Signum/paginate |
|
||||
| pandas | 2.3.3 | BSD License | https://pandas.pydata.org |
|
||||
| pandocfilters | 1.5.1 | BSD License | http://github.com/jgm/pandocfilters |
|
||||
| parso | 0.8.6 | MIT License | https://github.com/davidhalter/parso |
|
||||
| pathspec | 1.0.4 | Mozilla Public License 2.0 (MPL 2.0) | UNKNOWN |
|
||||
| peft | 0.17.1 | Apache Software License | https://github.com/huggingface/peft |
|
||||
| pexpect | 4.9.0 | ISC License (ISCL) | https://pexpect.readthedocs.io/ |
|
||||
| pillow | 12.1.0 | MIT-CMU | https://python-pillow.github.io |
|
||||
| platformdirs | 4.5.1 | MIT | https://github.com/tox-dev/platformdirs |
|
||||
| pluggy | 1.6.0 | MIT License | UNKNOWN |
|
||||
| polars | 1.3.0 | MIT License | https://www.pola.rs/ |
|
||||
| pre_commit | 4.5.1 | MIT | https://github.com/pre-commit/pre-commit |
|
||||
| prompt_toolkit | 3.0.52 | BSD License | https://github.com/prompt-toolkit/python-prompt-toolkit |
|
||||
| propcache | 0.4.1 | Apache Software License | https://github.com/aio-libs/propcache |
|
||||
| proto-plus | 1.27.1 | Apache Software License | https://github.com/googleapis/proto-plus-python |
|
||||
| protobuf | 5.29.6 | 3-Clause BSD License | https://developers.google.com/protocol-buffers/ |
|
||||
| psutil | 7.2.2 | BSD-3-Clause | https://github.com/giampaolo/psutil |
|
||||
| ptyprocess | 0.7.0 | ISC License (ISCL) | https://github.com/pexpect/ptyprocess |
|
||||
| pure_eval | 0.2.3 | MIT License | http://github.com/alexmojaki/pure_eval |
|
||||
| pyarrow | 23.0.0 | Apache-2.0 | https://arrow.apache.org/ |
|
||||
| pyarrow-stubs | 20.0.0.20251215 | BSD-2-Clause | https://github.com/zen-xu/pyarrow-stubs |
|
||||
| pyasn1 | 0.6.2 | BSD-2-Clause | https://github.com/pyasn1/pyasn1 |
|
||||
| pyasn1_modules | 0.4.2 | BSD License | https://github.com/pyasn1/pyasn1-modules |
|
||||
| pycparser | 3.0 | BSD-3-Clause | https://github.com/eliben/pycparser |
|
||||
| pydantic | 2.12.5 | MIT | https://github.com/pydantic/pydantic |
|
||||
| pydantic_core | 2.41.5 | MIT | https://github.com/pydantic/pydantic-core |
|
||||
| pylance | 2.0.0 | Apache Software License | UNKNOWN |
|
||||
| pymdown-extensions | 10.20.1 | MIT | https://github.com/facelessuser/pymdown-extensions |
|
||||
| pyparsing | 3.3.2 | MIT | https://github.com/pyparsing/pyparsing/ |
|
||||
| pyright | 1.1.408 | MIT | https://github.com/RobertCraigie/pyright-python |
|
||||
| pytest | 9.0.2 | MIT | https://docs.pytest.org/en/latest/ |
|
||||
| pytest-asyncio | 1.3.0 | Apache-2.0 | https://github.com/pytest-dev/pytest-asyncio |
|
||||
| pytest-mock | 3.15.1 | MIT License | https://github.com/pytest-dev/pytest-mock/ |
|
||||
| python-dateutil | 2.9.0.post0 | Apache Software License; BSD License | https://github.com/dateutil/dateutil |
|
||||
| pytz | 2025.2 | MIT License | http://pythonhosted.org/pytz |
|
||||
| pyyaml_env_tag | 1.1 | MIT | https://github.com/waylan/pyyaml-env-tag |
|
||||
| pyzmq | 27.1.0 | BSD License | https://pyzmq.readthedocs.org |
|
||||
| referencing | 0.37.0 | MIT | https://github.com/python-jsonschema/referencing |
|
||||
| regex | 2026.1.15 | Apache-2.0 AND CNRI-Python | https://github.com/mrabarnett/mrab-regex |
|
||||
| requests | 2.32.5 | Apache Software License | https://requests.readthedocs.io |
|
||||
| rpds-py | 0.30.0 | MIT | https://github.com/crate-py/rpds |
|
||||
| rsa | 4.7.2 | Apache Software License | https://stuvel.eu/rsa |
|
||||
| ruff | 0.15.0 | MIT License | https://docs.astral.sh/ruff |
|
||||
| s3transfer | 0.16.0 | Apache Software License | https://github.com/boto/s3transfer |
|
||||
| safetensors | 0.7.0 | Apache Software License | https://github.com/huggingface/safetensors |
|
||||
| scikit-learn | 1.8.0 | BSD-3-Clause | https://scikit-learn.org |
|
||||
| scipy | 1.17.0 | BSD License | https://scipy.org/ |
|
||||
| sentence-transformers | 5.2.2 | Apache Software License | https://www.SBERT.net |
|
||||
| sentencepiece | 0.2.1 | UNKNOWN | https://github.com/google/sentencepiece |
|
||||
| six | 1.17.0 | MIT License | https://github.com/benjaminp/six |
|
||||
| sniffio | 1.3.1 | Apache Software License; MIT License | https://github.com/python-trio/sniffio |
|
||||
| soupsieve | 2.8.3 | MIT | https://github.com/facelessuser/soupsieve |
|
||||
| stack-data | 0.6.3 | MIT License | http://github.com/alexmojaki/stack_data |
|
||||
| sympy | 1.14.0 | BSD License | https://sympy.org |
|
||||
| tabulate | 0.9.0 | MIT License | https://github.com/astanin/python-tabulate |
|
||||
| tantivy | 0.25.1 | UNKNOWN | UNKNOWN |
|
||||
| threadpoolctl | 3.6.0 | BSD License | https://github.com/joblib/threadpoolctl |
|
||||
| timm | 1.0.24 | Apache Software License | https://github.com/huggingface/pytorch-image-models |
|
||||
| tinycss2 | 1.4.0 | BSD License | https://www.courtbouillon.org/tinycss2 |
|
||||
| tokenizers | 0.22.2 | Apache Software License | https://github.com/huggingface/tokenizers |
|
||||
| torch | 2.8.0 | BSD License | https://pytorch.org/ |
|
||||
| torchvision | 0.23.0 | BSD | https://github.com/pytorch/vision |
|
||||
| tornado | 6.5.4 | Apache Software License | http://www.tornadoweb.org/ |
|
||||
| tqdm | 4.67.3 | MPL-2.0 AND MIT | https://tqdm.github.io |
|
||||
| traitlets | 5.14.3 | BSD License | https://github.com/ipython/traitlets |
|
||||
| transformers | 4.57.6 | Apache Software License | https://github.com/huggingface/transformers |
|
||||
| types-requests | 2.32.4.20260107 | Apache-2.0 | https://github.com/python/typeshed |
|
||||
| typing-inspection | 0.4.2 | MIT | https://github.com/pydantic/typing-inspection |
|
||||
| typing_extensions | 4.15.0 | PSF-2.0 | https://github.com/python/typing_extensions |
|
||||
| tzdata | 2025.3 | Apache-2.0 | https://github.com/python/tzdata |
|
||||
| uritemplate | 4.2.0 | BSD 3-Clause OR Apache-2.0 | https://uritemplate.readthedocs.org |
|
||||
| urllib3 | 2.6.3 | MIT | https://github.com/urllib3/urllib3/blob/main/CHANGES.rst |
|
||||
| virtualenv | 20.36.1 | MIT | https://github.com/pypa/virtualenv |
|
||||
| watchdog | 6.0.0 | Apache Software License | https://github.com/gorakhargosh/watchdog |
|
||||
| webencodings | 0.5.1 | BSD License | https://github.com/SimonSapin/python-webencodings |
|
||||
| yarl | 1.22.0 | Apache Software License | https://github.com/aio-libs/yarl |
|
||||
14687
python/RUST_THIRD_PARTY_LICENSES.html
Normal file
14687
python/RUST_THIRD_PARTY_LICENSES.html
Normal file
File diff suppressed because it is too large
Load Diff
@@ -16,7 +16,7 @@ description = "lancedb"
|
||||
authors = [{ name = "LanceDB Devs", email = "dev@lancedb.com" }]
|
||||
license = { file = "LICENSE" }
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.9"
|
||||
requires-python = ">=3.10"
|
||||
keywords = [
|
||||
"data-format",
|
||||
"data-science",
|
||||
@@ -33,10 +33,10 @@ classifiers = [
|
||||
"Programming Language :: Python",
|
||||
"Programming Language :: Python :: 3",
|
||||
"Programming Language :: Python :: 3 :: Only",
|
||||
"Programming Language :: Python :: 3.9",
|
||||
"Programming Language :: Python :: 3.10",
|
||||
"Programming Language :: Python :: 3.11",
|
||||
"Programming Language :: Python :: 3.12",
|
||||
"Programming Language :: Python :: 3.13",
|
||||
"Topic :: Scientific/Engineering",
|
||||
]
|
||||
|
||||
@@ -137,4 +137,4 @@ include = [
|
||||
"python/lancedb/_lancedb.pyi",
|
||||
]
|
||||
exclude = ["python/tests/"]
|
||||
pythonVersion = "3.12"
|
||||
pythonVersion = "3.13"
|
||||
|
||||
@@ -180,6 +180,8 @@ class Table:
|
||||
delete_unverified: Optional[bool] = None,
|
||||
) -> OptimizeStats: ...
|
||||
async def uri(self) -> str: ...
|
||||
async def initial_storage_options(self) -> Optional[Dict[str, str]]: ...
|
||||
async def latest_storage_options(self) -> Optional[Dict[str, str]]: ...
|
||||
@property
|
||||
def tags(self) -> Tags: ...
|
||||
def query(self) -> Query: ...
|
||||
|
||||
@@ -22,7 +22,12 @@ class BackgroundEventLoop:
|
||||
self.thread.start()
|
||||
|
||||
def run(self, future):
|
||||
return asyncio.run_coroutine_threadsafe(future, self.loop).result()
|
||||
concurrent_future = asyncio.run_coroutine_threadsafe(future, self.loop)
|
||||
try:
|
||||
return concurrent_future.result()
|
||||
except BaseException:
|
||||
concurrent_future.cancel()
|
||||
raise
|
||||
|
||||
|
||||
LOOP = BackgroundEventLoop()
|
||||
|
||||
@@ -275,7 +275,7 @@ class ColPaliEmbeddings(EmbeddingFunction):
|
||||
"""
|
||||
Convert image inputs to PIL Images.
|
||||
"""
|
||||
PIL = attempt_import_or_raise("PIL", "pillow")
|
||||
PIL_Image = attempt_import_or_raise("PIL.Image", "pillow")
|
||||
requests = attempt_import_or_raise("requests", "requests")
|
||||
images = self.sanitize_input(images)
|
||||
pil_images = []
|
||||
@@ -285,12 +285,12 @@ class ColPaliEmbeddings(EmbeddingFunction):
|
||||
if image.startswith(("http://", "https://")):
|
||||
response = requests.get(image, timeout=10)
|
||||
response.raise_for_status()
|
||||
pil_images.append(PIL.Image.open(io.BytesIO(response.content)))
|
||||
pil_images.append(PIL_Image.open(io.BytesIO(response.content)))
|
||||
else:
|
||||
with PIL.Image.open(image) as im:
|
||||
with PIL_Image.open(image) as im:
|
||||
pil_images.append(im.copy())
|
||||
elif isinstance(image, bytes):
|
||||
pil_images.append(PIL.Image.open(io.BytesIO(image)))
|
||||
pil_images.append(PIL_Image.open(io.BytesIO(image)))
|
||||
else:
|
||||
# Assume it's a PIL Image; will raise if invalid
|
||||
pil_images.append(image)
|
||||
|
||||
@@ -77,8 +77,8 @@ class JinaEmbeddings(EmbeddingFunction):
|
||||
if isinstance(inputs, list):
|
||||
inputs = inputs
|
||||
else:
|
||||
PIL = attempt_import_or_raise("PIL", "pillow")
|
||||
if isinstance(inputs, PIL.Image.Image):
|
||||
PIL_Image = attempt_import_or_raise("PIL.Image", "pillow")
|
||||
if isinstance(inputs, PIL_Image.Image):
|
||||
inputs = [inputs]
|
||||
return inputs
|
||||
|
||||
@@ -89,13 +89,13 @@ class JinaEmbeddings(EmbeddingFunction):
|
||||
elif isinstance(image, (str, Path)):
|
||||
parsed = urlparse.urlparse(image)
|
||||
# TODO handle drive letter on windows.
|
||||
PIL = attempt_import_or_raise("PIL", "pillow")
|
||||
PIL_Image = attempt_import_or_raise("PIL.Image", "pillow")
|
||||
if parsed.scheme == "file":
|
||||
pil_image = PIL.Image.open(parsed.path)
|
||||
pil_image = PIL_Image.open(parsed.path)
|
||||
elif parsed.scheme == "":
|
||||
pil_image = PIL.Image.open(image if os.name == "nt" else parsed.path)
|
||||
pil_image = PIL_Image.open(image if os.name == "nt" else parsed.path)
|
||||
elif parsed.scheme.startswith("http"):
|
||||
pil_image = PIL.Image.open(io.BytesIO(url_retrieve(image)))
|
||||
pil_image = PIL_Image.open(io.BytesIO(url_retrieve(image)))
|
||||
else:
|
||||
raise NotImplementedError("Only local and http(s) urls are supported")
|
||||
buffered = io.BytesIO()
|
||||
@@ -103,9 +103,9 @@ class JinaEmbeddings(EmbeddingFunction):
|
||||
image_bytes = buffered.getvalue()
|
||||
image_dict = {"image": base64.b64encode(image_bytes).decode("utf-8")}
|
||||
else:
|
||||
PIL = attempt_import_or_raise("PIL", "pillow")
|
||||
PIL_Image = attempt_import_or_raise("PIL.Image", "pillow")
|
||||
|
||||
if isinstance(image, PIL.Image.Image):
|
||||
if isinstance(image, PIL_Image.Image):
|
||||
buffered = io.BytesIO()
|
||||
image.save(buffered, format="PNG")
|
||||
image_bytes = buffered.getvalue()
|
||||
@@ -136,9 +136,9 @@ class JinaEmbeddings(EmbeddingFunction):
|
||||
elif isinstance(query, (Path, bytes)):
|
||||
return [self.generate_image_embedding(query)]
|
||||
else:
|
||||
PIL = attempt_import_or_raise("PIL", "pillow")
|
||||
PIL_Image = attempt_import_or_raise("PIL.Image", "pillow")
|
||||
|
||||
if isinstance(query, PIL.Image.Image):
|
||||
if isinstance(query, PIL_Image.Image):
|
||||
return [self.generate_image_embedding(query)]
|
||||
else:
|
||||
raise TypeError(
|
||||
|
||||
@@ -71,8 +71,8 @@ class OpenClipEmbeddings(EmbeddingFunction):
|
||||
if isinstance(query, str):
|
||||
return [self.generate_text_embeddings(query)]
|
||||
else:
|
||||
PIL = attempt_import_or_raise("PIL", "pillow")
|
||||
if isinstance(query, PIL.Image.Image):
|
||||
PIL_Image = attempt_import_or_raise("PIL.Image", "pillow")
|
||||
if isinstance(query, PIL_Image.Image):
|
||||
return [self.generate_image_embedding(query)]
|
||||
else:
|
||||
raise TypeError("OpenClip supports str or PIL Image as query")
|
||||
@@ -145,20 +145,20 @@ class OpenClipEmbeddings(EmbeddingFunction):
|
||||
return self._encode_and_normalize_image(image)
|
||||
|
||||
def _to_pil(self, image: Union[str, bytes]):
|
||||
PIL = attempt_import_or_raise("PIL", "pillow")
|
||||
PIL_Image = attempt_import_or_raise("PIL.Image", "pillow")
|
||||
if isinstance(image, bytes):
|
||||
return PIL.Image.open(io.BytesIO(image))
|
||||
if isinstance(image, PIL.Image.Image):
|
||||
return PIL_Image.open(io.BytesIO(image))
|
||||
if isinstance(image, PIL_Image.Image):
|
||||
return image
|
||||
elif isinstance(image, str):
|
||||
parsed = urlparse.urlparse(image)
|
||||
# TODO handle drive letter on windows.
|
||||
if parsed.scheme == "file":
|
||||
return PIL.Image.open(parsed.path)
|
||||
return PIL_Image.open(parsed.path)
|
||||
elif parsed.scheme == "":
|
||||
return PIL.Image.open(image if os.name == "nt" else parsed.path)
|
||||
return PIL_Image.open(image if os.name == "nt" else parsed.path)
|
||||
elif parsed.scheme.startswith("http"):
|
||||
return PIL.Image.open(io.BytesIO(url_retrieve(image)))
|
||||
return PIL_Image.open(io.BytesIO(url_retrieve(image)))
|
||||
else:
|
||||
raise NotImplementedError("Only local and http(s) urls are supported")
|
||||
|
||||
|
||||
@@ -56,8 +56,8 @@ class SigLipEmbeddings(EmbeddingFunction):
|
||||
if isinstance(query, str):
|
||||
return [self.generate_text_embeddings(query)]
|
||||
else:
|
||||
PIL = attempt_import_or_raise("PIL", "pillow")
|
||||
if isinstance(query, PIL.Image.Image):
|
||||
PIL_Image = attempt_import_or_raise("PIL.Image", "pillow")
|
||||
if isinstance(query, PIL_Image.Image):
|
||||
return [self.generate_image_embedding(query)]
|
||||
else:
|
||||
raise TypeError("SigLIP supports str or PIL Image as query")
|
||||
@@ -127,21 +127,21 @@ class SigLipEmbeddings(EmbeddingFunction):
|
||||
return image_features.cpu().detach().numpy().squeeze()
|
||||
|
||||
def _to_pil(self, image: Union[str, bytes, "PIL.Image.Image"]):
|
||||
PIL = attempt_import_or_raise("PIL", "pillow")
|
||||
if isinstance(image, PIL.Image.Image):
|
||||
PIL_Image = attempt_import_or_raise("PIL.Image", "pillow")
|
||||
if isinstance(image, PIL_Image.Image):
|
||||
return image.convert("RGB") if image.mode != "RGB" else image
|
||||
elif isinstance(image, bytes):
|
||||
return PIL.Image.open(io.BytesIO(image)).convert("RGB")
|
||||
return PIL_Image.open(io.BytesIO(image)).convert("RGB")
|
||||
elif isinstance(image, str):
|
||||
parsed = urlparse.urlparse(image)
|
||||
if parsed.scheme == "file":
|
||||
return PIL.Image.open(parsed.path).convert("RGB")
|
||||
return PIL_Image.open(parsed.path).convert("RGB")
|
||||
elif parsed.scheme == "":
|
||||
path = image if os.name == "nt" else parsed.path
|
||||
return PIL.Image.open(path).convert("RGB")
|
||||
return PIL_Image.open(path).convert("RGB")
|
||||
elif parsed.scheme.startswith("http"):
|
||||
image_bytes = url_retrieve(image)
|
||||
return PIL.Image.open(io.BytesIO(image_bytes)).convert("RGB")
|
||||
return PIL_Image.open(io.BytesIO(image_bytes)).convert("RGB")
|
||||
else:
|
||||
raise NotImplementedError("Only local and http(s) urls are supported")
|
||||
else:
|
||||
|
||||
@@ -21,6 +21,9 @@ if TYPE_CHECKING:
|
||||
|
||||
# Token limits for different VoyageAI models
|
||||
VOYAGE_TOTAL_TOKEN_LIMITS = {
|
||||
"voyage-4": 320_000,
|
||||
"voyage-4-lite": 1_000_000,
|
||||
"voyage-4-large": 120_000,
|
||||
"voyage-context-3": 32_000,
|
||||
"voyage-3.5-lite": 1_000_000,
|
||||
"voyage-3.5": 320_000,
|
||||
@@ -61,7 +64,7 @@ def is_video_path(path: Path) -> bool:
|
||||
|
||||
|
||||
def transform_input(input_data: Union[str, bytes, Path]):
|
||||
PIL = attempt_import_or_raise("PIL", "pillow")
|
||||
PIL_Image = attempt_import_or_raise("PIL.Image", "pillow")
|
||||
if isinstance(input_data, str):
|
||||
if is_valid_url(input_data):
|
||||
if is_video_url(input_data):
|
||||
@@ -70,7 +73,7 @@ def transform_input(input_data: Union[str, bytes, Path]):
|
||||
content = {"type": "image_url", "image_url": input_data}
|
||||
else:
|
||||
content = {"type": "text", "text": input_data}
|
||||
elif isinstance(input_data, PIL.Image.Image):
|
||||
elif isinstance(input_data, PIL_Image.Image):
|
||||
buffered = BytesIO()
|
||||
input_data.save(buffered, format="JPEG")
|
||||
img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
||||
@@ -79,7 +82,7 @@ def transform_input(input_data: Union[str, bytes, Path]):
|
||||
"image_base64": "data:image/jpeg;base64," + img_str,
|
||||
}
|
||||
elif isinstance(input_data, bytes):
|
||||
img = PIL.Image.open(BytesIO(input_data))
|
||||
img = PIL_Image.open(BytesIO(input_data))
|
||||
buffered = BytesIO()
|
||||
img.save(buffered, format="JPEG")
|
||||
img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
||||
@@ -98,7 +101,7 @@ def transform_input(input_data: Union[str, bytes, Path]):
|
||||
"video_base64": video_str,
|
||||
}
|
||||
else:
|
||||
img = PIL.Image.open(input_data)
|
||||
img = PIL_Image.open(input_data)
|
||||
buffered = BytesIO()
|
||||
img.save(buffered, format="JPEG")
|
||||
img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
||||
@@ -116,8 +119,8 @@ def sanitize_multimodal_input(inputs: Union[TEXT, IMAGES]) -> List[Any]:
|
||||
"""
|
||||
Sanitize the input to the embedding function.
|
||||
"""
|
||||
PIL = attempt_import_or_raise("PIL", "pillow")
|
||||
if isinstance(inputs, (str, bytes, Path, PIL.Image.Image)):
|
||||
PIL_Image = attempt_import_or_raise("PIL.Image", "pillow")
|
||||
if isinstance(inputs, (str, bytes, Path, PIL_Image.Image)):
|
||||
inputs = [inputs]
|
||||
elif isinstance(inputs, list):
|
||||
pass # Already a list, use as-is
|
||||
@@ -130,7 +133,7 @@ def sanitize_multimodal_input(inputs: Union[TEXT, IMAGES]) -> List[Any]:
|
||||
f"Input type {type(inputs)} not allowed with multimodal model."
|
||||
)
|
||||
|
||||
if not all(isinstance(x, (str, bytes, Path, PIL.Image.Image)) for x in inputs):
|
||||
if not all(isinstance(x, (str, bytes, Path, PIL_Image.Image)) for x in inputs):
|
||||
raise ValueError("Each input should be either str, bytes, Path or Image.")
|
||||
|
||||
return [transform_input(i) for i in inputs]
|
||||
@@ -167,6 +170,9 @@ class VoyageAIEmbeddingFunction(EmbeddingFunction):
|
||||
name: str
|
||||
The name of the model to use. List of acceptable models:
|
||||
|
||||
* voyage-4 (1024 dims, general-purpose and multilingual retrieval)
|
||||
* voyage-4-lite (1024 dims, optimized for latency and cost)
|
||||
* voyage-4-large (1024 dims, best retrieval quality)
|
||||
* voyage-context-3
|
||||
* voyage-3.5
|
||||
* voyage-3.5-lite
|
||||
@@ -215,6 +221,9 @@ class VoyageAIEmbeddingFunction(EmbeddingFunction):
|
||||
_FLEXIBLE_DIM_MODELS: ClassVar[list] = ["voyage-multimodal-3.5"]
|
||||
_VALID_DIMENSIONS: ClassVar[list] = [256, 512, 1024, 2048]
|
||||
text_embedding_models: list = [
|
||||
"voyage-4",
|
||||
"voyage-4-lite",
|
||||
"voyage-4-large",
|
||||
"voyage-3.5",
|
||||
"voyage-3.5-lite",
|
||||
"voyage-3",
|
||||
@@ -252,6 +261,9 @@ class VoyageAIEmbeddingFunction(EmbeddingFunction):
|
||||
elif self.name == "voyage-code-2":
|
||||
return 1536
|
||||
elif self.name in [
|
||||
"voyage-4",
|
||||
"voyage-4-lite",
|
||||
"voyage-4-large",
|
||||
"voyage-context-3",
|
||||
"voyage-3.5",
|
||||
"voyage-3.5-lite",
|
||||
|
||||
@@ -9,7 +9,7 @@ import json
|
||||
from ._lancedb import async_permutation_builder, PermutationReader
|
||||
from .table import LanceTable
|
||||
from .background_loop import LOOP
|
||||
from .util import batch_to_tensor
|
||||
from .util import batch_to_tensor, batch_to_tensor_rows
|
||||
from typing import Any, Callable, Iterator, Literal, Optional, TYPE_CHECKING, Union
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -333,7 +333,11 @@ class Transforms:
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def arrow2python(batch: pa.RecordBatch) -> dict[str, list[Any]]:
|
||||
def arrow2python(batch: pa.RecordBatch) -> list[dict[str, Any]]:
|
||||
return batch.to_pylist()
|
||||
|
||||
@staticmethod
|
||||
def arrow2pythoncol(batch: pa.RecordBatch) -> dict[str, list[Any]]:
|
||||
return batch.to_pydict()
|
||||
|
||||
@staticmethod
|
||||
@@ -687,7 +691,17 @@ class Permutation:
|
||||
return
|
||||
|
||||
def with_format(
|
||||
self, format: Literal["numpy", "python", "pandas", "arrow", "torch", "polars"]
|
||||
self,
|
||||
format: Literal[
|
||||
"numpy",
|
||||
"python",
|
||||
"python_col",
|
||||
"pandas",
|
||||
"arrow",
|
||||
"torch",
|
||||
"torch_col",
|
||||
"polars",
|
||||
],
|
||||
) -> "Permutation":
|
||||
"""
|
||||
Set the format for batches
|
||||
@@ -696,16 +710,18 @@ class Permutation:
|
||||
|
||||
The format can be one of:
|
||||
- "numpy" - the batch will be a dict of numpy arrays (one per column)
|
||||
- "python" - the batch will be a dict of lists (one per column)
|
||||
- "python" - the batch will be a list of dicts (one per row)
|
||||
- "python_col" - the batch will be a dict of lists (one entry per column)
|
||||
- "pandas" - the batch will be a pandas DataFrame
|
||||
- "arrow" - the batch will be a pyarrow RecordBatch
|
||||
- "torch" - the batch will be a two dimensional torch tensor
|
||||
- "torch" - the batch will be a list of tensors, one per row
|
||||
- "torch_col" - the batch will be a 2D torch tensor (first dim indexes columns)
|
||||
- "polars" - the batch will be a polars DataFrame
|
||||
|
||||
Conversion may or may not involve a data copy. Lance uses Arrow internally
|
||||
and so it is able to zero-copy to the arrow and polars.
|
||||
and so it is able to zero-copy to the arrow and polars formats.
|
||||
|
||||
Conversion to torch will be zero-copy but will only support a subset of data
|
||||
Conversion to torch_col will be zero-copy but will only support a subset of data
|
||||
types (numeric types).
|
||||
|
||||
Conversion to numpy and/or pandas will typically be zero-copy for numeric
|
||||
@@ -718,6 +734,8 @@ class Permutation:
|
||||
assert format is not None, "format is required"
|
||||
if format == "python":
|
||||
return self.with_transform(Transforms.arrow2python)
|
||||
if format == "python_col":
|
||||
return self.with_transform(Transforms.arrow2pythoncol)
|
||||
elif format == "numpy":
|
||||
return self.with_transform(Transforms.arrow2numpy)
|
||||
elif format == "pandas":
|
||||
@@ -725,6 +743,8 @@ class Permutation:
|
||||
elif format == "arrow":
|
||||
return self.with_transform(Transforms.arrow2arrow)
|
||||
elif format == "torch":
|
||||
return self.with_transform(batch_to_tensor_rows)
|
||||
elif format == "torch_col":
|
||||
return self.with_transform(batch_to_tensor)
|
||||
elif format == "polars":
|
||||
return self.with_transform(Transforms.arrow2polars())
|
||||
@@ -746,15 +766,20 @@ class Permutation:
|
||||
|
||||
def __getitem__(self, index: int) -> Any:
|
||||
"""
|
||||
Return a single row from the permutation
|
||||
|
||||
The output will always be a python dictionary regardless of the format.
|
||||
|
||||
This method is mostly useful for debugging and exploration. For actual
|
||||
processing use [iter](#iter) or a torch data loader to perform batched
|
||||
processing.
|
||||
Returns a single row from the permutation by offset
|
||||
"""
|
||||
pass
|
||||
return self.__getitems__([index])
|
||||
|
||||
def __getitems__(self, indices: list[int]) -> Any:
|
||||
"""
|
||||
Returns rows from the permutation by offset
|
||||
"""
|
||||
|
||||
async def do_getitems():
|
||||
return await self.reader.take_offsets(indices, selection=self.selection)
|
||||
|
||||
batch = LOOP.run(do_getitems())
|
||||
return self.transform_fn(batch)
|
||||
|
||||
@deprecated(details="Use with_skip instead")
|
||||
def skip(self, skip: int) -> "Permutation":
|
||||
|
||||
@@ -275,7 +275,7 @@ def _py_type_to_arrow_type(py_type: Type[Any], field: FieldInfo) -> pa.DataType:
|
||||
return pa.timestamp("us", tz=tz)
|
||||
elif getattr(py_type, "__origin__", None) in (list, tuple):
|
||||
child = py_type.__args__[0]
|
||||
return pa.list_(_py_type_to_arrow_type(child, field))
|
||||
return _pydantic_list_child_to_arrow(child, field)
|
||||
raise TypeError(
|
||||
f"Converting Pydantic type to Arrow Type: unsupported type {py_type}."
|
||||
)
|
||||
@@ -298,12 +298,18 @@ else:
|
||||
|
||||
|
||||
def _pydantic_type_to_arrow_type(tp: Any, field: FieldInfo) -> pa.DataType:
|
||||
def _safe_issubclass(candidate: Any, base: type) -> bool:
|
||||
try:
|
||||
return issubclass(candidate, base)
|
||||
except TypeError:
|
||||
return False
|
||||
|
||||
if inspect.isclass(tp):
|
||||
if issubclass(tp, pydantic.BaseModel):
|
||||
if _safe_issubclass(tp, pydantic.BaseModel):
|
||||
# Struct
|
||||
fields = _pydantic_model_to_fields(tp)
|
||||
return pa.struct(fields)
|
||||
if issubclass(tp, FixedSizeListMixin):
|
||||
if _safe_issubclass(tp, FixedSizeListMixin):
|
||||
if getattr(tp, "is_multi_vector", lambda: False)():
|
||||
return pa.list_(pa.list_(tp.value_arrow_type(), tp.dim()))
|
||||
# For regular Vector
|
||||
@@ -311,45 +317,67 @@ def _pydantic_type_to_arrow_type(tp: Any, field: FieldInfo) -> pa.DataType:
|
||||
return _py_type_to_arrow_type(tp, field)
|
||||
|
||||
|
||||
def _pydantic_list_child_to_arrow(child: Any, field: FieldInfo) -> pa.DataType:
|
||||
unwrapped = _unwrap_optional_annotation(child)
|
||||
if unwrapped is not None:
|
||||
return pa.list_(
|
||||
pa.field("item", _pydantic_type_to_arrow_type(unwrapped, field), True)
|
||||
)
|
||||
return pa.list_(_pydantic_type_to_arrow_type(child, field))
|
||||
|
||||
|
||||
def _unwrap_optional_annotation(annotation: Any) -> Any | None:
|
||||
if isinstance(annotation, (_GenericAlias, GenericAlias)):
|
||||
origin = annotation.__origin__
|
||||
args = annotation.__args__
|
||||
if origin == Union:
|
||||
non_none = [arg for arg in args if arg is not type(None)]
|
||||
if len(non_none) == 1 and len(non_none) != len(args):
|
||||
return non_none[0]
|
||||
elif sys.version_info >= (3, 10) and isinstance(annotation, types.UnionType):
|
||||
args = annotation.__args__
|
||||
non_none = [arg for arg in args if arg is not type(None)]
|
||||
if len(non_none) == 1 and len(non_none) != len(args):
|
||||
return non_none[0]
|
||||
return None
|
||||
|
||||
|
||||
def _pydantic_to_arrow_type(field: FieldInfo) -> pa.DataType:
|
||||
"""Convert a Pydantic FieldInfo to Arrow DataType"""
|
||||
unwrapped = _unwrap_optional_annotation(field.annotation)
|
||||
if unwrapped is not None:
|
||||
return _pydantic_type_to_arrow_type(unwrapped, field)
|
||||
if isinstance(field.annotation, (_GenericAlias, GenericAlias)):
|
||||
origin = field.annotation.__origin__
|
||||
args = field.annotation.__args__
|
||||
|
||||
if origin is list:
|
||||
child = args[0]
|
||||
return pa.list_(_py_type_to_arrow_type(child, field))
|
||||
elif origin == Union:
|
||||
if len(args) == 2 and args[1] is type(None):
|
||||
return _pydantic_type_to_arrow_type(args[0], field)
|
||||
elif sys.version_info >= (3, 10) and isinstance(field.annotation, types.UnionType):
|
||||
args = field.annotation.__args__
|
||||
if len(args) == 2:
|
||||
for typ in args:
|
||||
if typ is type(None):
|
||||
continue
|
||||
return _py_type_to_arrow_type(typ, field)
|
||||
return _pydantic_list_child_to_arrow(child, field)
|
||||
return _pydantic_type_to_arrow_type(field.annotation, field)
|
||||
|
||||
|
||||
def is_nullable(field: FieldInfo) -> bool:
|
||||
"""Check if a Pydantic FieldInfo is nullable."""
|
||||
if _unwrap_optional_annotation(field.annotation) is not None:
|
||||
return True
|
||||
if isinstance(field.annotation, (_GenericAlias, GenericAlias)):
|
||||
origin = field.annotation.__origin__
|
||||
args = field.annotation.__args__
|
||||
if origin == Union:
|
||||
if len(args) == 2 and args[1] is type(None):
|
||||
if any(typ is type(None) for typ in args):
|
||||
return True
|
||||
elif sys.version_info >= (3, 10) and isinstance(field.annotation, types.UnionType):
|
||||
args = field.annotation.__args__
|
||||
for typ in args:
|
||||
if typ is type(None):
|
||||
return True
|
||||
elif inspect.isclass(field.annotation) and issubclass(
|
||||
field.annotation, FixedSizeListMixin
|
||||
):
|
||||
return field.annotation.nullable()
|
||||
elif inspect.isclass(field.annotation):
|
||||
try:
|
||||
if issubclass(field.annotation, FixedSizeListMixin):
|
||||
return field.annotation.nullable()
|
||||
except TypeError:
|
||||
return False
|
||||
return False
|
||||
|
||||
|
||||
|
||||
@@ -961,22 +961,27 @@ class LanceQueryBuilder(ABC):
|
||||
>>> query = [100, 100]
|
||||
>>> plan = table.search(query).analyze_plan()
|
||||
>>> print(plan) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
|
||||
AnalyzeExec verbose=true, metrics=[], cumulative_cpu=...
|
||||
TracedExec, metrics=[], cumulative_cpu=...
|
||||
ProjectionExec: expr=[...], metrics=[...], cumulative_cpu=...
|
||||
GlobalLimitExec: skip=0, fetch=10, metrics=[...], cumulative_cpu=...
|
||||
FilterExec: _distance@2 IS NOT NULL,
|
||||
metrics=[output_rows=..., elapsed_compute=...], cumulative_cpu=...
|
||||
SortExec: TopK(fetch=10), expr=[...],
|
||||
AnalyzeExec verbose=true, elapsed=..., metrics=...
|
||||
TracedExec, elapsed=..., metrics=...
|
||||
ProjectionExec: elapsed=..., expr=[...],
|
||||
metrics=[output_rows=..., elapsed_compute=..., output_bytes=...]
|
||||
GlobalLimitExec: elapsed=..., skip=0, fetch=10,
|
||||
metrics=[output_rows=..., elapsed_compute=..., output_bytes=...]
|
||||
FilterExec: elapsed=..., _distance@2 IS NOT NULL, metrics=[...]
|
||||
SortExec: elapsed=..., TopK(fetch=10), expr=[...],
|
||||
preserve_partitioning=[...],
|
||||
metrics=[output_rows=..., elapsed_compute=..., row_replacements=...],
|
||||
cumulative_cpu=...
|
||||
KNNVectorDistance: metric=l2,
|
||||
metrics=[output_rows=..., elapsed_compute=..., output_batches=...],
|
||||
cumulative_cpu=...
|
||||
LanceRead: uri=..., projection=[vector], ...
|
||||
metrics=[output_rows=..., elapsed_compute=...,
|
||||
bytes_read=..., iops=..., requests=...], cumulative_cpu=...
|
||||
metrics=[output_rows=..., elapsed_compute=...,
|
||||
output_bytes=..., row_replacements=...]
|
||||
KNNVectorDistance: elapsed=..., metric=l2,
|
||||
metrics=[output_rows=..., elapsed_compute=...,
|
||||
output_bytes=..., output_batches=...]
|
||||
LanceRead: elapsed=..., uri=..., projection=[vector],
|
||||
num_fragments=..., range_before=None, range_after=None,
|
||||
row_id=true, row_addr=false,
|
||||
full_filter=--, refine_filter=--,
|
||||
metrics=[output_rows=..., elapsed_compute=..., output_bytes=...,
|
||||
fragments_scanned=..., ranges_scanned=1, rows_scanned=1,
|
||||
bytes_read=..., iops=..., requests=..., task_wait_time=...]
|
||||
|
||||
Returns
|
||||
-------
|
||||
@@ -1428,6 +1433,19 @@ class LanceVectorQueryBuilder(LanceQueryBuilder):
|
||||
self._bypass_vector_index = True
|
||||
return self
|
||||
|
||||
def fast_search(self) -> LanceVectorQueryBuilder:
|
||||
"""
|
||||
Skip a flat search of unindexed data. This will improve
|
||||
search performance but search results will not include unindexed data.
|
||||
|
||||
Returns
|
||||
-------
|
||||
LanceVectorQueryBuilder
|
||||
The LanceVectorQueryBuilder object.
|
||||
"""
|
||||
self._fast_search = True
|
||||
return self
|
||||
|
||||
|
||||
class LanceFtsQueryBuilder(LanceQueryBuilder):
|
||||
"""A builder for full text search for LanceDB."""
|
||||
@@ -2100,19 +2118,17 @@ class LanceHybridQueryBuilder(LanceQueryBuilder):
|
||||
""" # noqa: E501
|
||||
self._create_query_builders()
|
||||
|
||||
results = ["Vector Search Plan:"]
|
||||
results.append(
|
||||
self._table._explain_plan(
|
||||
self._vector_query.to_query_object(), verbose=verbose
|
||||
)
|
||||
reranker_label = str(self._reranker) if self._reranker else "No reranker"
|
||||
vector_plan = self._table._explain_plan(
|
||||
self._vector_query.to_query_object(), verbose=verbose
|
||||
)
|
||||
results.append("FTS Search Plan:")
|
||||
results.append(
|
||||
self._table._explain_plan(
|
||||
self._fts_query.to_query_object(), verbose=verbose
|
||||
)
|
||||
fts_plan = self._table._explain_plan(
|
||||
self._fts_query.to_query_object(), verbose=verbose
|
||||
)
|
||||
return "\n".join(results)
|
||||
# Indent sub-plans under the reranker
|
||||
indented_vector = "\n".join(" " + line for line in vector_plan.splitlines())
|
||||
indented_fts = "\n".join(" " + line for line in fts_plan.splitlines())
|
||||
return f"{reranker_label}\n {indented_vector}\n {indented_fts}"
|
||||
|
||||
def analyze_plan(self):
|
||||
"""Execute the query and display with runtime metrics.
|
||||
@@ -3146,23 +3162,20 @@ class AsyncHybridQuery(AsyncStandardQuery, AsyncVectorQueryBase):
|
||||
... plan = await table.query().nearest_to([1.0, 2.0]).nearest_to_text("hello").explain_plan(True)
|
||||
... print(plan)
|
||||
>>> asyncio.run(doctest_example()) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
|
||||
Vector Search Plan:
|
||||
ProjectionExec: expr=[vector@0 as vector, text@3 as text, _distance@2 as _distance]
|
||||
Take: columns="vector, _rowid, _distance, (text)"
|
||||
CoalesceBatchesExec: target_batch_size=1024
|
||||
GlobalLimitExec: skip=0, fetch=10
|
||||
FilterExec: _distance@2 IS NOT NULL
|
||||
SortExec: TopK(fetch=10), expr=[_distance@2 ASC NULLS LAST, _rowid@1 ASC NULLS LAST], preserve_partitioning=[false]
|
||||
KNNVectorDistance: metric=l2
|
||||
LanceRead: uri=..., projection=[vector], ...
|
||||
<BLANKLINE>
|
||||
FTS Search Plan:
|
||||
ProjectionExec: expr=[vector@2 as vector, text@3 as text, _score@1 as _score]
|
||||
Take: columns="_rowid, _score, (vector), (text)"
|
||||
CoalesceBatchesExec: target_batch_size=1024
|
||||
GlobalLimitExec: skip=0, fetch=10
|
||||
MatchQuery: column=text, query=hello
|
||||
<BLANKLINE>
|
||||
RRFReranker(K=60)
|
||||
ProjectionExec: expr=[vector@0 as vector, text@3 as text, _distance@2 as _distance]
|
||||
Take: columns="vector, _rowid, _distance, (text)"
|
||||
CoalesceBatchesExec: target_batch_size=1024
|
||||
GlobalLimitExec: skip=0, fetch=10
|
||||
FilterExec: _distance@2 IS NOT NULL
|
||||
SortExec: TopK(fetch=10), expr=[_distance@2 ASC NULLS LAST, _rowid@1 ASC NULLS LAST], preserve_partitioning=[false]
|
||||
KNNVectorDistance: metric=l2
|
||||
LanceRead: uri=..., projection=[vector], ...
|
||||
ProjectionExec: expr=[vector@2 as vector, text@3 as text, _score@1 as _score]
|
||||
Take: columns="_rowid, _score, (vector), (text)"
|
||||
CoalesceBatchesExec: target_batch_size=1024
|
||||
GlobalLimitExec: skip=0, fetch=10
|
||||
MatchQuery: column=text, query=hello
|
||||
|
||||
Parameters
|
||||
----------
|
||||
@@ -3174,12 +3187,12 @@ class AsyncHybridQuery(AsyncStandardQuery, AsyncVectorQueryBase):
|
||||
plan : str
|
||||
""" # noqa: E501
|
||||
|
||||
results = ["Vector Search Plan:"]
|
||||
results.append(await self._inner.to_vector_query().explain_plan(verbose))
|
||||
results.append("FTS Search Plan:")
|
||||
results.append(await self._inner.to_fts_query().explain_plan(verbose))
|
||||
|
||||
return "\n".join(results)
|
||||
vector_plan = await self._inner.to_vector_query().explain_plan(verbose)
|
||||
fts_plan = await self._inner.to_fts_query().explain_plan(verbose)
|
||||
# Indent sub-plans under the reranker
|
||||
indented_vector = "\n".join(" " + line for line in vector_plan.splitlines())
|
||||
indented_fts = "\n".join(" " + line for line in fts_plan.splitlines())
|
||||
return f"{self._reranker}\n {indented_vector}\n {indented_fts}"
|
||||
|
||||
async def analyze_plan(self):
|
||||
"""
|
||||
|
||||
@@ -42,10 +42,18 @@ class AnswerdotaiRerankers(Reranker):
|
||||
rerankers = attempt_import_or_raise(
|
||||
"rerankers"
|
||||
) # import here for faster ops later
|
||||
self.model_name = model_name
|
||||
self.model_type = model_type
|
||||
self.reranker = rerankers.Reranker(
|
||||
model_name=model_name, model_type=model_type, **kwargs
|
||||
)
|
||||
|
||||
def __str__(self):
|
||||
return (
|
||||
f"AnswerdotaiRerankers(model_type={self.model_type}, "
|
||||
f"model_name={self.model_name})"
|
||||
)
|
||||
|
||||
def _rerank(self, result_set: pa.Table, query: str):
|
||||
result_set = self._handle_empty_results(result_set)
|
||||
if len(result_set) == 0:
|
||||
|
||||
@@ -40,6 +40,9 @@ class Reranker(ABC):
|
||||
if ARROW_VERSION.major <= 13:
|
||||
self._concat_tables_args = {"promote": True}
|
||||
|
||||
def __str__(self):
|
||||
return self.__class__.__name__
|
||||
|
||||
def rerank_vector(
|
||||
self,
|
||||
query: str,
|
||||
|
||||
@@ -44,6 +44,9 @@ class CohereReranker(Reranker):
|
||||
self.top_n = top_n
|
||||
self.api_key = api_key
|
||||
|
||||
def __str__(self):
|
||||
return f"CohereReranker(model_name={self.model_name})"
|
||||
|
||||
@cached_property
|
||||
def _client(self):
|
||||
cohere = attempt_import_or_raise("cohere")
|
||||
|
||||
@@ -50,6 +50,9 @@ class CrossEncoderReranker(Reranker):
|
||||
if self.device is None:
|
||||
self.device = "cuda" if torch.cuda.is_available() else "cpu"
|
||||
|
||||
def __str__(self):
|
||||
return f"CrossEncoderReranker(model_name={self.model_name})"
|
||||
|
||||
@cached_property
|
||||
def model(self):
|
||||
sbert = attempt_import_or_raise("sentence_transformers")
|
||||
|
||||
@@ -45,6 +45,9 @@ class JinaReranker(Reranker):
|
||||
self.top_n = top_n
|
||||
self.api_key = api_key
|
||||
|
||||
def __str__(self):
|
||||
return f"JinaReranker(model_name={self.model_name})"
|
||||
|
||||
@cached_property
|
||||
def _client(self):
|
||||
import requests
|
||||
|
||||
@@ -38,6 +38,9 @@ class LinearCombinationReranker(Reranker):
|
||||
self.weight = weight
|
||||
self.fill = fill
|
||||
|
||||
def __str__(self):
|
||||
return f"LinearCombinationReranker(weight={self.weight}, fill={self.fill})"
|
||||
|
||||
def rerank_hybrid(
|
||||
self,
|
||||
query: str, # noqa: F821
|
||||
|
||||
@@ -54,6 +54,12 @@ class MRRReranker(Reranker):
|
||||
self.weight_vector = weight_vector
|
||||
self.weight_fts = weight_fts
|
||||
|
||||
def __str__(self):
|
||||
return (
|
||||
f"MRRReranker(weight_vector={self.weight_vector}, "
|
||||
f"weight_fts={self.weight_fts})"
|
||||
)
|
||||
|
||||
def rerank_hybrid(
|
||||
self,
|
||||
query: str, # noqa: F821
|
||||
|
||||
@@ -43,6 +43,9 @@ class OpenaiReranker(Reranker):
|
||||
self.column = column
|
||||
self.api_key = api_key
|
||||
|
||||
def __str__(self):
|
||||
return f"OpenaiReranker(model_name={self.model_name})"
|
||||
|
||||
def _rerank(self, result_set: pa.Table, query: str):
|
||||
result_set = self._handle_empty_results(result_set)
|
||||
if len(result_set) == 0:
|
||||
|
||||
@@ -36,6 +36,9 @@ class RRFReranker(Reranker):
|
||||
super().__init__(return_score)
|
||||
self.K = K
|
||||
|
||||
def __str__(self):
|
||||
return f"RRFReranker(K={self.K})"
|
||||
|
||||
def rerank_hybrid(
|
||||
self,
|
||||
query: str, # noqa: F821
|
||||
|
||||
@@ -52,6 +52,9 @@ class VoyageAIReranker(Reranker):
|
||||
self.api_key = api_key
|
||||
self.truncation = truncation
|
||||
|
||||
def __str__(self):
|
||||
return f"VoyageAIReranker(model_name={self.model_name})"
|
||||
|
||||
@cached_property
|
||||
def _client(self):
|
||||
voyageai = attempt_import_or_raise("voyageai")
|
||||
|
||||
@@ -904,7 +904,9 @@ class Table(ABC):
|
||||
----------
|
||||
field_names: str or list of str
|
||||
The name(s) of the field to index.
|
||||
can be only str if use_tantivy=True for now.
|
||||
If ``use_tantivy`` is False (default), only a single field name
|
||||
(str) is supported. To index multiple fields, create a separate
|
||||
FTS index for each field.
|
||||
replace: bool, default False
|
||||
If True, replace the existing index if it exists. Note that this is
|
||||
not yet an atomic operation; the index will be temporarily
|
||||
@@ -2222,6 +2224,37 @@ class LanceTable(Table):
|
||||
def uri(self) -> str:
|
||||
return LOOP.run(self._table.uri())
|
||||
|
||||
def initial_storage_options(self) -> Optional[Dict[str, str]]:
|
||||
"""Get the initial storage options that were passed in when opening this table.
|
||||
|
||||
For dynamically refreshed options (e.g., credential vending), use
|
||||
:meth:`latest_storage_options`.
|
||||
|
||||
Warning: This is an internal API and the return value is subject to change.
|
||||
|
||||
Returns
|
||||
-------
|
||||
Optional[Dict[str, str]]
|
||||
The storage options, or None if no storage options were configured.
|
||||
"""
|
||||
return LOOP.run(self._table.initial_storage_options())
|
||||
|
||||
def latest_storage_options(self) -> Optional[Dict[str, str]]:
|
||||
"""Get the latest storage options, refreshing from provider if configured.
|
||||
|
||||
This method is useful for credential vending scenarios where storage options
|
||||
may be refreshed dynamically. If no dynamic provider is configured, this
|
||||
returns the initial static options.
|
||||
|
||||
Warning: This is an internal API and the return value is subject to change.
|
||||
|
||||
Returns
|
||||
-------
|
||||
Optional[Dict[str, str]]
|
||||
The storage options, or None if no storage options were configured.
|
||||
"""
|
||||
return LOOP.run(self._table.latest_storage_options())
|
||||
|
||||
def create_scalar_index(
|
||||
self,
|
||||
column: str,
|
||||
@@ -2267,7 +2300,11 @@ class LanceTable(Table):
|
||||
):
|
||||
if not use_tantivy:
|
||||
if not isinstance(field_names, str):
|
||||
raise ValueError("field_names must be a string when use_tantivy=False")
|
||||
raise ValueError(
|
||||
"Native FTS indexes can only be created on a single field "
|
||||
"at a time. To search over multiple text fields, create a "
|
||||
"separate FTS index for each field."
|
||||
)
|
||||
|
||||
if tokenizer_name is None:
|
||||
tokenizer_configs = {
|
||||
@@ -3624,6 +3661,37 @@ class AsyncTable:
|
||||
"""
|
||||
return await self._inner.uri()
|
||||
|
||||
async def initial_storage_options(self) -> Optional[Dict[str, str]]:
|
||||
"""Get the initial storage options that were passed in when opening this table.
|
||||
|
||||
For dynamically refreshed options (e.g., credential vending), use
|
||||
:meth:`latest_storage_options`.
|
||||
|
||||
Warning: This is an internal API and the return value is subject to change.
|
||||
|
||||
Returns
|
||||
-------
|
||||
Optional[Dict[str, str]]
|
||||
The storage options, or None if no storage options were configured.
|
||||
"""
|
||||
return await self._inner.initial_storage_options()
|
||||
|
||||
async def latest_storage_options(self) -> Optional[Dict[str, str]]:
|
||||
"""Get the latest storage options, refreshing from provider if configured.
|
||||
|
||||
This method is useful for credential vending scenarios where storage options
|
||||
may be refreshed dynamically. If no dynamic provider is configured, this
|
||||
returns the initial static options.
|
||||
|
||||
Warning: This is an internal API and the return value is subject to change.
|
||||
|
||||
Returns
|
||||
-------
|
||||
Optional[Dict[str, str]]
|
||||
The storage options, or None if no storage options were configured.
|
||||
"""
|
||||
return await self._inner.latest_storage_options()
|
||||
|
||||
async def add(
|
||||
self,
|
||||
data: DATA,
|
||||
|
||||
@@ -419,3 +419,22 @@ def batch_to_tensor(batch: pa.RecordBatch):
|
||||
"""
|
||||
torch = attempt_import_or_raise("torch", "torch")
|
||||
return torch.stack([torch.from_dlpack(col) for col in batch.columns])
|
||||
|
||||
|
||||
def batch_to_tensor_rows(batch: pa.RecordBatch):
|
||||
"""
|
||||
Convert a PyArrow RecordBatch to a list of PyTorch Tensor, one per row
|
||||
|
||||
Each column is converted to a tensor (using zero-copy via DLPack)
|
||||
and the columns are then stacked into a single tensor. The 2D tensor
|
||||
is then converted to a list of tensors, one per row
|
||||
|
||||
Fails if torch or numpy is not installed.
|
||||
Fails if a column's data type is not supported by PyTorch.
|
||||
"""
|
||||
torch = attempt_import_or_raise("torch", "torch")
|
||||
numpy = attempt_import_or_raise("numpy", "numpy")
|
||||
columns = [col.to_numpy(zero_copy_only=False) for col in batch.columns]
|
||||
stacked = torch.tensor(numpy.column_stack(columns))
|
||||
rows = list(stacked.unbind(dim=0))
|
||||
return rows
|
||||
|
||||
@@ -517,19 +517,36 @@ def test_ollama_embedding(tmp_path):
|
||||
@pytest.mark.skipif(
|
||||
os.environ.get("VOYAGE_API_KEY") is None, reason="VOYAGE_API_KEY not set"
|
||||
)
|
||||
def test_voyageai_embedding_function():
|
||||
voyageai = get_registry().get("voyageai").create(name="voyage-3", max_retries=0)
|
||||
@pytest.mark.parametrize(
|
||||
"model_name,expected_dims",
|
||||
[
|
||||
("voyage-3", 1024),
|
||||
("voyage-4", 1024),
|
||||
("voyage-4-lite", 1024),
|
||||
("voyage-4-large", 1024),
|
||||
],
|
||||
)
|
||||
def test_voyageai_embedding_function(model_name, expected_dims, tmp_path):
|
||||
"""Integration test for VoyageAI text embedding models with real API calls."""
|
||||
voyageai = get_registry().get("voyageai").create(name=model_name, max_retries=0)
|
||||
|
||||
class TextModel(LanceModel):
|
||||
text: str = voyageai.SourceField()
|
||||
vector: Vector(voyageai.ndims()) = voyageai.VectorField()
|
||||
|
||||
df = pd.DataFrame({"text": ["hello world", "goodbye world"]})
|
||||
db = lancedb.connect("~/lancedb")
|
||||
db = lancedb.connect(tmp_path)
|
||||
tbl = db.create_table("test", schema=TextModel, mode="overwrite")
|
||||
|
||||
tbl.add(df)
|
||||
assert len(tbl.to_pandas()["vector"][0]) == voyageai.ndims()
|
||||
assert voyageai.ndims() == expected_dims, (
|
||||
f"{model_name} should have {expected_dims} dimensions"
|
||||
)
|
||||
|
||||
# Test search functionality
|
||||
result = tbl.search("hello").limit(1).to_pandas()
|
||||
assert result["text"][0] == "hello world"
|
||||
|
||||
|
||||
@pytest.mark.slow
|
||||
|
||||
@@ -163,9 +163,7 @@ async def test_explain_plan(table: AsyncTable):
|
||||
table.query().nearest_to_text("dog").nearest_to([0.1, 0.1]).explain_plan(True)
|
||||
)
|
||||
|
||||
assert "Vector Search Plan" in plan
|
||||
assert "KNNVectorDistance" in plan
|
||||
assert "FTS Search Plan" in plan
|
||||
assert "LanceRead" in plan
|
||||
|
||||
|
||||
|
||||
@@ -438,11 +438,15 @@ def test_filter_with_splits(mem_db):
|
||||
row_count = permutation_tbl.count_rows()
|
||||
assert row_count == 67
|
||||
|
||||
data = permutation_tbl.search(None).to_arrow().to_pydict()
|
||||
# Verify the permutation table only contains row_id and split_id
|
||||
assert set(permutation_tbl.schema.names) == {"row_id", "split_id"}
|
||||
|
||||
row_ids = permutation_tbl.search(None).to_arrow().to_pydict()["row_id"]
|
||||
data = tbl.take_row_ids(row_ids).to_arrow().to_pydict()
|
||||
categories = data["category"]
|
||||
|
||||
# All categories should be A or B
|
||||
assert all(cat in ["A", "B"] for cat in categories)
|
||||
assert all(cat in ("A", "B") for cat in categories)
|
||||
|
||||
|
||||
def test_filter_with_shuffle(mem_db):
|
||||
@@ -660,23 +664,20 @@ def test_iter_basic(some_permutation: Permutation):
|
||||
expected_batches = (950 + batch_size - 1) // batch_size # ceiling division
|
||||
assert len(batches) == expected_batches
|
||||
|
||||
# Check that all batches are dicts (default python format)
|
||||
assert all(isinstance(batch, dict) for batch in batches)
|
||||
# Check that all batches are lists of dicts (default python format)
|
||||
assert all(isinstance(batch, list) for batch in batches)
|
||||
|
||||
# Check that batches have the correct structure
|
||||
for batch in batches:
|
||||
assert "id" in batch
|
||||
assert "value" in batch
|
||||
assert isinstance(batch["id"], list)
|
||||
assert isinstance(batch["value"], list)
|
||||
assert "id" in batch[0]
|
||||
assert "value" in batch[0]
|
||||
|
||||
# Check that all batches except the last have the correct size
|
||||
for batch in batches[:-1]:
|
||||
assert len(batch["id"]) == batch_size
|
||||
assert len(batch["value"]) == batch_size
|
||||
assert len(batch) == batch_size
|
||||
|
||||
# Last batch might be smaller
|
||||
assert len(batches[-1]["id"]) <= batch_size
|
||||
assert len(batches[-1]) <= batch_size
|
||||
|
||||
|
||||
def test_iter_skip_last_batch(some_permutation: Permutation):
|
||||
@@ -695,11 +696,11 @@ def test_iter_skip_last_batch(some_permutation: Permutation):
|
||||
if 950 % batch_size != 0:
|
||||
assert len(batches_without_skip) == num_full_batches + 1
|
||||
# Last batch should be smaller
|
||||
assert len(batches_without_skip[-1]["id"]) == 950 % batch_size
|
||||
assert len(batches_without_skip[-1]) == 950 % batch_size
|
||||
|
||||
# All batches with skip_last_batch should be full size
|
||||
for batch in batches_with_skip:
|
||||
assert len(batch["id"]) == batch_size
|
||||
assert len(batch) == batch_size
|
||||
|
||||
|
||||
def test_iter_different_batch_sizes(some_permutation: Permutation):
|
||||
@@ -716,12 +717,12 @@ def test_iter_different_batch_sizes(some_permutation: Permutation):
|
||||
# Test with batch size equal to total rows
|
||||
single_batch = list(some_permutation.iter(950, skip_last_batch=False))
|
||||
assert len(single_batch) == 1
|
||||
assert len(single_batch[0]["id"]) == 950
|
||||
assert len(single_batch[0]) == 950
|
||||
|
||||
# Test with batch size larger than total rows
|
||||
oversized_batch = list(some_permutation.iter(10000, skip_last_batch=False))
|
||||
assert len(oversized_batch) == 1
|
||||
assert len(oversized_batch[0]["id"]) == 950
|
||||
assert len(oversized_batch[0]) == 950
|
||||
|
||||
|
||||
def test_dunder_iter(some_permutation: Permutation):
|
||||
@@ -734,15 +735,13 @@ def test_dunder_iter(some_permutation: Permutation):
|
||||
|
||||
# All batches should be full size
|
||||
for batch in batches:
|
||||
assert len(batch["id"]) == 100
|
||||
assert len(batch["value"]) == 100
|
||||
assert len(batch) == 100
|
||||
|
||||
some_permutation = some_permutation.with_batch_size(400)
|
||||
batches = list(some_permutation)
|
||||
assert len(batches) == 2 # floor(950 / 400) since skip_last_batch=True
|
||||
for batch in batches:
|
||||
assert len(batch["id"]) == 400
|
||||
assert len(batch["value"]) == 400
|
||||
assert len(batch) == 400
|
||||
|
||||
|
||||
def test_iter_with_different_formats(some_permutation: Permutation):
|
||||
@@ -757,7 +756,7 @@ def test_iter_with_different_formats(some_permutation: Permutation):
|
||||
# Test with python format (default)
|
||||
python_perm = some_permutation.with_format("python")
|
||||
python_batches = list(python_perm.iter(batch_size, skip_last_batch=False))
|
||||
assert all(isinstance(batch, dict) for batch in python_batches)
|
||||
assert all(isinstance(batch, list) for batch in python_batches)
|
||||
|
||||
# Test with pandas format
|
||||
pandas_perm = some_permutation.with_format("pandas")
|
||||
@@ -776,8 +775,8 @@ def test_iter_with_column_selection(some_permutation: Permutation):
|
||||
|
||||
# Check that batches only contain the id column
|
||||
for batch in batches:
|
||||
assert "id" in batch
|
||||
assert "value" not in batch
|
||||
assert "id" in batch[0]
|
||||
assert "value" not in batch[0]
|
||||
|
||||
|
||||
def test_iter_with_column_rename(some_permutation: Permutation):
|
||||
@@ -787,9 +786,9 @@ def test_iter_with_column_rename(some_permutation: Permutation):
|
||||
|
||||
# Check that batches have the renamed column
|
||||
for batch in batches:
|
||||
assert "id" in batch
|
||||
assert "data" in batch
|
||||
assert "value" not in batch
|
||||
assert "id" in batch[0]
|
||||
assert "data" in batch[0]
|
||||
assert "value" not in batch[0]
|
||||
|
||||
|
||||
def test_iter_with_limit_offset(some_permutation: Permutation):
|
||||
@@ -808,14 +807,14 @@ def test_iter_with_limit_offset(some_permutation: Permutation):
|
||||
assert len(limit_batches) == 5
|
||||
|
||||
no_skip = some_permutation.iter(101, skip_last_batch=False)
|
||||
row_100 = next(no_skip)["id"][100]
|
||||
row_100 = next(no_skip)[100]["id"]
|
||||
|
||||
# Test with both limit and offset
|
||||
limited_perm = some_permutation.with_skip(100).with_take(300)
|
||||
limited_batches = list(limited_perm.iter(100, skip_last_batch=False))
|
||||
# Should have 3 batches (300 / 100)
|
||||
assert len(limited_batches) == 3
|
||||
assert limited_batches[0]["id"][0] == row_100
|
||||
assert limited_batches[0][0]["id"] == row_100
|
||||
|
||||
|
||||
def test_iter_empty_permutation(mem_db):
|
||||
@@ -838,7 +837,7 @@ def test_iter_single_row(mem_db):
|
||||
# With skip_last_batch=False, should get one batch
|
||||
batches = list(perm.iter(10, skip_last_batch=False))
|
||||
assert len(batches) == 1
|
||||
assert len(batches[0]["id"]) == 1
|
||||
assert len(batches[0]) == 1
|
||||
|
||||
# With skip_last_batch=True, should skip the single row (since it's < batch_size)
|
||||
batches_skip = list(perm.iter(10, skip_last_batch=True))
|
||||
@@ -856,8 +855,7 @@ def test_identity_permutation(mem_db):
|
||||
|
||||
batches = list(permutation.iter(10, skip_last_batch=False))
|
||||
assert len(batches) == 1
|
||||
assert len(batches[0]["id"]) == 10
|
||||
assert len(batches[0]["value"]) == 10
|
||||
assert len(batches[0]) == 10
|
||||
|
||||
permutation = permutation.remove_columns(["value"])
|
||||
assert permutation.num_columns == 1
|
||||
@@ -900,10 +898,10 @@ def test_transform_fn(mem_db):
|
||||
py_result = list(permutation.with_format("python").iter(10, skip_last_batch=False))[
|
||||
0
|
||||
]
|
||||
assert len(py_result) == 2
|
||||
assert len(py_result["id"]) == 10
|
||||
assert len(py_result["value"]) == 10
|
||||
assert isinstance(py_result, dict)
|
||||
assert len(py_result) == 10
|
||||
assert "id" in py_result[0]
|
||||
assert "value" in py_result[0]
|
||||
assert isinstance(py_result, list)
|
||||
|
||||
try:
|
||||
import torch
|
||||
@@ -911,9 +909,11 @@ def test_transform_fn(mem_db):
|
||||
torch_result = list(
|
||||
permutation.with_format("torch").iter(10, skip_last_batch=False)
|
||||
)[0]
|
||||
assert torch_result.shape == (2, 10)
|
||||
assert torch_result.dtype == torch.int64
|
||||
assert isinstance(torch_result, torch.Tensor)
|
||||
assert isinstance(torch_result, list)
|
||||
assert len(torch_result) == 10
|
||||
assert isinstance(torch_result[0], torch.Tensor)
|
||||
assert torch_result[0].shape == (2,)
|
||||
assert torch_result[0].dtype == torch.int64
|
||||
except ImportError:
|
||||
# Skip check if torch is not installed
|
||||
pass
|
||||
@@ -941,3 +941,113 @@ def test_custom_transform(mem_db):
|
||||
batch = batches[0]
|
||||
|
||||
assert batch == pa.record_batch([range(10)], ["id"])
|
||||
|
||||
|
||||
def test_getitems_basic(some_permutation: Permutation):
|
||||
"""Test __getitems__ returns correct rows by offset."""
|
||||
result = some_permutation.__getitems__([0, 1, 2])
|
||||
assert isinstance(result, list)
|
||||
assert "id" in result[0]
|
||||
assert "value" in result[0]
|
||||
assert len(result) == 3
|
||||
|
||||
|
||||
def test_getitems_single_index(some_permutation: Permutation):
|
||||
"""Test __getitems__ with a single index."""
|
||||
result = some_permutation.__getitems__([0])
|
||||
assert len(result) == 1
|
||||
|
||||
|
||||
def test_getitems_preserves_order(some_permutation: Permutation):
|
||||
"""Test __getitems__ returns rows in the requested order."""
|
||||
# Get rows in forward order
|
||||
forward = some_permutation.__getitems__([0, 1, 2, 3, 4])
|
||||
# Get the same rows in reverse order
|
||||
reverse = some_permutation.__getitems__([4, 3, 2, 1, 0])
|
||||
|
||||
assert [r["id"] for r in forward] == list(reversed([r["id"] for r in reverse]))
|
||||
assert [r["value"] for r in forward] == list(
|
||||
reversed([r["value"] for r in reverse])
|
||||
)
|
||||
|
||||
|
||||
def test_getitems_non_contiguous(some_permutation: Permutation):
|
||||
"""Test __getitems__ with non-contiguous indices."""
|
||||
result = some_permutation.__getitems__([0, 10, 50, 100, 500])
|
||||
assert len(result) == 5
|
||||
|
||||
# Each id/value pair should match what we'd get individually
|
||||
for i, offset in enumerate([0, 10, 50, 100, 500]):
|
||||
single = some_permutation.__getitems__([offset])
|
||||
assert result[i]["id"] == single[0]["id"]
|
||||
assert result[i]["value"] == single[0]["value"]
|
||||
|
||||
|
||||
def test_getitems_with_column_selection(some_permutation: Permutation):
|
||||
"""Test __getitems__ respects column selection."""
|
||||
id_only = some_permutation.select_columns(["id"])
|
||||
result = id_only.__getitems__([0, 1, 2])
|
||||
assert "id" in result[0]
|
||||
assert "value" not in result[0]
|
||||
assert len(result) == 3
|
||||
|
||||
|
||||
def test_getitems_with_column_rename(some_permutation: Permutation):
|
||||
"""Test __getitems__ respects column renames."""
|
||||
renamed = some_permutation.rename_column("value", "data")
|
||||
result = renamed.__getitems__([0, 1])
|
||||
assert "data" in result[0]
|
||||
assert "value" not in result[0]
|
||||
assert len(result) == 2
|
||||
|
||||
|
||||
def test_getitems_with_format(some_permutation: Permutation):
|
||||
"""Test __getitems__ applies the transform function."""
|
||||
arrow_perm = some_permutation.with_format("arrow")
|
||||
result = arrow_perm.__getitems__([0, 1, 2])
|
||||
assert isinstance(result, pa.RecordBatch)
|
||||
assert result.num_rows == 3
|
||||
|
||||
|
||||
def test_getitems_with_custom_transform(some_permutation: Permutation):
|
||||
"""Test __getitems__ with a custom transform."""
|
||||
|
||||
def transform(batch: pa.RecordBatch) -> list:
|
||||
return batch.column("id").to_pylist()
|
||||
|
||||
custom = some_permutation.with_transform(transform)
|
||||
result = custom.__getitems__([0, 1, 2])
|
||||
assert isinstance(result, list)
|
||||
assert len(result) == 3
|
||||
|
||||
|
||||
def test_getitems_identity_permutation(mem_db):
|
||||
"""Test __getitems__ on an identity permutation."""
|
||||
tbl = mem_db.create_table(
|
||||
"test_table", pa.table({"id": range(10), "value": range(10)})
|
||||
)
|
||||
perm = Permutation.identity(tbl)
|
||||
|
||||
result = perm.__getitems__([0, 5, 9])
|
||||
assert [r["id"] for r in result] == [0, 5, 9]
|
||||
assert [r["value"] for r in result] == [0, 5, 9]
|
||||
|
||||
|
||||
def test_getitems_with_limit_offset(some_permutation: Permutation):
|
||||
"""Test __getitems__ on a permutation with skip/take applied."""
|
||||
limited = some_permutation.with_skip(100).with_take(200)
|
||||
|
||||
# Should be able to access offsets within the limited range
|
||||
result = limited.__getitems__([0, 1, 199])
|
||||
assert len(result) == 3
|
||||
|
||||
# The first item of the limited permutation should match offset 100 of original
|
||||
full_result = some_permutation.__getitems__([100])
|
||||
limited_result = limited.__getitems__([0])
|
||||
assert limited_result[0]["id"] == full_result[0]["id"]
|
||||
|
||||
|
||||
def test_getitems_invalid_offset(some_permutation: Permutation):
|
||||
"""Test __getitems__ with an out-of-range offset raises an error."""
|
||||
with pytest.raises(Exception):
|
||||
some_permutation.__getitems__([999999])
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
# SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
import json
|
||||
import sys
|
||||
from datetime import date, datetime
|
||||
from typing import List, Optional, Tuple
|
||||
|
||||
@@ -20,10 +19,6 @@ from pydantic import BaseModel
|
||||
from pydantic import Field
|
||||
|
||||
|
||||
@pytest.mark.skipif(
|
||||
sys.version_info < (3, 9),
|
||||
reason="using native type alias requires python3.9 or higher",
|
||||
)
|
||||
def test_pydantic_to_arrow():
|
||||
class StructModel(pydantic.BaseModel):
|
||||
a: str
|
||||
@@ -83,10 +78,6 @@ def test_pydantic_to_arrow():
|
||||
assert schema == expect_schema
|
||||
|
||||
|
||||
@pytest.mark.skipif(
|
||||
sys.version_info < (3, 10),
|
||||
reason="using | type syntax requires python3.10 or higher",
|
||||
)
|
||||
def test_optional_types_py310():
|
||||
class TestModel(pydantic.BaseModel):
|
||||
a: str | None
|
||||
@@ -105,10 +96,233 @@ def test_optional_types_py310():
|
||||
assert schema == expect_schema
|
||||
|
||||
|
||||
@pytest.mark.skipif(
|
||||
sys.version_info > (3, 8),
|
||||
reason="using native type alias requires python3.9 or higher",
|
||||
)
|
||||
def test_optional_structs():
|
||||
class SplitInfo(pydantic.BaseModel):
|
||||
start_frame: int
|
||||
end_frame: int
|
||||
|
||||
class TestModel(pydantic.BaseModel):
|
||||
id: str
|
||||
split: SplitInfo | None = None
|
||||
|
||||
schema = pydantic_to_schema(TestModel)
|
||||
|
||||
expect_schema = pa.schema(
|
||||
[
|
||||
pa.field("id", pa.utf8(), False),
|
||||
pa.field(
|
||||
"split",
|
||||
pa.struct(
|
||||
[
|
||||
pa.field("start_frame", pa.int64(), False),
|
||||
pa.field("end_frame", pa.int64(), False),
|
||||
]
|
||||
),
|
||||
True,
|
||||
),
|
||||
]
|
||||
)
|
||||
assert schema == expect_schema
|
||||
|
||||
|
||||
def test_optional_struct_list_py310():
|
||||
class SplitInfo(pydantic.BaseModel):
|
||||
start_frame: int
|
||||
end_frame: int
|
||||
|
||||
class TestModel(pydantic.BaseModel):
|
||||
id: str
|
||||
splits: list[SplitInfo] | None = None
|
||||
|
||||
schema = pydantic_to_schema(TestModel)
|
||||
|
||||
expect_schema = pa.schema(
|
||||
[
|
||||
pa.field("id", pa.utf8(), False),
|
||||
pa.field(
|
||||
"splits",
|
||||
pa.list_(
|
||||
pa.struct(
|
||||
[
|
||||
pa.field("start_frame", pa.int64(), False),
|
||||
pa.field("end_frame", pa.int64(), False),
|
||||
]
|
||||
)
|
||||
),
|
||||
True,
|
||||
),
|
||||
]
|
||||
)
|
||||
assert schema == expect_schema
|
||||
|
||||
|
||||
def test_nested_struct_list():
|
||||
class SplitInfo(pydantic.BaseModel):
|
||||
start_frame: int
|
||||
end_frame: int
|
||||
|
||||
class TestModel(pydantic.BaseModel):
|
||||
id: str
|
||||
splits: list[SplitInfo]
|
||||
|
||||
schema = pydantic_to_schema(TestModel)
|
||||
|
||||
expect_schema = pa.schema(
|
||||
[
|
||||
pa.field("id", pa.utf8(), False),
|
||||
pa.field(
|
||||
"splits",
|
||||
pa.list_(
|
||||
pa.struct(
|
||||
[
|
||||
pa.field("start_frame", pa.int64(), False),
|
||||
pa.field("end_frame", pa.int64(), False),
|
||||
]
|
||||
)
|
||||
),
|
||||
False,
|
||||
),
|
||||
]
|
||||
)
|
||||
assert schema == expect_schema
|
||||
|
||||
|
||||
def test_nested_struct_list_optional():
|
||||
class SplitInfo(pydantic.BaseModel):
|
||||
start_frame: int
|
||||
end_frame: int
|
||||
|
||||
class TestModel(pydantic.BaseModel):
|
||||
id: str
|
||||
splits: Optional[list[SplitInfo]] = None
|
||||
|
||||
schema = pydantic_to_schema(TestModel)
|
||||
|
||||
expect_schema = pa.schema(
|
||||
[
|
||||
pa.field("id", pa.utf8(), False),
|
||||
pa.field(
|
||||
"splits",
|
||||
pa.list_(
|
||||
pa.struct(
|
||||
[
|
||||
pa.field("start_frame", pa.int64(), False),
|
||||
pa.field("end_frame", pa.int64(), False),
|
||||
]
|
||||
)
|
||||
),
|
||||
True,
|
||||
),
|
||||
]
|
||||
)
|
||||
assert schema == expect_schema
|
||||
|
||||
|
||||
def test_nested_struct_list_optional_items():
|
||||
class SplitInfo(pydantic.BaseModel):
|
||||
start_frame: int
|
||||
end_frame: int
|
||||
|
||||
class TestModel(pydantic.BaseModel):
|
||||
id: str
|
||||
splits: list[Optional[SplitInfo]]
|
||||
|
||||
schema = pydantic_to_schema(TestModel)
|
||||
|
||||
expect_schema = pa.schema(
|
||||
[
|
||||
pa.field("id", pa.utf8(), False),
|
||||
pa.field(
|
||||
"splits",
|
||||
pa.list_(
|
||||
pa.field(
|
||||
"item",
|
||||
pa.struct(
|
||||
[
|
||||
pa.field("start_frame", pa.int64(), False),
|
||||
pa.field("end_frame", pa.int64(), False),
|
||||
]
|
||||
),
|
||||
True,
|
||||
)
|
||||
),
|
||||
False,
|
||||
),
|
||||
]
|
||||
)
|
||||
assert schema == expect_schema
|
||||
|
||||
|
||||
def test_nested_struct_list_optional_container_and_items():
|
||||
class SplitInfo(pydantic.BaseModel):
|
||||
start_frame: int
|
||||
end_frame: int
|
||||
|
||||
class TestModel(pydantic.BaseModel):
|
||||
id: str
|
||||
splits: Optional[list[Optional[SplitInfo]]] = None
|
||||
|
||||
schema = pydantic_to_schema(TestModel)
|
||||
|
||||
expect_schema = pa.schema(
|
||||
[
|
||||
pa.field("id", pa.utf8(), False),
|
||||
pa.field(
|
||||
"splits",
|
||||
pa.list_(
|
||||
pa.field(
|
||||
"item",
|
||||
pa.struct(
|
||||
[
|
||||
pa.field("start_frame", pa.int64(), False),
|
||||
pa.field("end_frame", pa.int64(), False),
|
||||
]
|
||||
),
|
||||
True,
|
||||
)
|
||||
),
|
||||
True,
|
||||
),
|
||||
]
|
||||
)
|
||||
assert schema == expect_schema
|
||||
|
||||
|
||||
def test_nested_struct_list_optional_items_pep604():
|
||||
class SplitInfo(pydantic.BaseModel):
|
||||
start_frame: int
|
||||
end_frame: int
|
||||
|
||||
class TestModel(pydantic.BaseModel):
|
||||
id: str
|
||||
splits: list[SplitInfo | None]
|
||||
|
||||
schema = pydantic_to_schema(TestModel)
|
||||
|
||||
expect_schema = pa.schema(
|
||||
[
|
||||
pa.field("id", pa.utf8(), False),
|
||||
pa.field(
|
||||
"splits",
|
||||
pa.list_(
|
||||
pa.field(
|
||||
"item",
|
||||
pa.struct(
|
||||
[
|
||||
pa.field("start_frame", pa.int64(), False),
|
||||
pa.field("end_frame", pa.int64(), False),
|
||||
]
|
||||
),
|
||||
True,
|
||||
)
|
||||
),
|
||||
False,
|
||||
),
|
||||
]
|
||||
)
|
||||
assert schema == expect_schema
|
||||
|
||||
|
||||
def test_pydantic_to_arrow_py38():
|
||||
class StructModel(pydantic.BaseModel):
|
||||
a: str
|
||||
|
||||
@@ -1499,3 +1499,30 @@ def test_search_empty_table(mem_db):
|
||||
# Search on empty table should return empty results, not crash
|
||||
results = table.search([1.0, 2.0]).limit(5).to_list()
|
||||
assert results == []
|
||||
|
||||
|
||||
def test_fast_search(tmp_path):
|
||||
db = lancedb.connect(tmp_path)
|
||||
|
||||
# Generate data matching the async test style
|
||||
vectors = pa.FixedShapeTensorArray.from_numpy_ndarray(
|
||||
np.random.rand(256, 32)
|
||||
).storage
|
||||
|
||||
table = db.create_table("test", pa.table({"vector": vectors}))
|
||||
|
||||
# FIX: Pass arguments directly instead of using 'config=IvfPq(...)'
|
||||
table.create_index(vector_column_name="vector", num_partitions=1, num_sub_vectors=1)
|
||||
|
||||
# Add data to ensure table has enough segments/rows
|
||||
table.add(pa.table({"vector": vectors}))
|
||||
|
||||
q = [1.0] * 32
|
||||
|
||||
# 1. Normal Search -> Should include "LanceScan" (Brute Force / Scan)
|
||||
plan = table.search(q).explain_plan(True)
|
||||
assert "LanceScan" in plan
|
||||
|
||||
# 2. Fast Search -> Should NOT include "LanceScan" (Uses Index)
|
||||
plan = table.search(q).fast_search().explain_plan(True)
|
||||
assert "LanceScan" not in plan
|
||||
|
||||
@@ -8,7 +8,7 @@ import http.server
|
||||
import json
|
||||
import threading
|
||||
import time
|
||||
from unittest.mock import MagicMock
|
||||
from unittest.mock import MagicMock, patch
|
||||
import uuid
|
||||
from packaging.version import Version
|
||||
|
||||
@@ -601,7 +601,6 @@ def test_head():
|
||||
def test_query_sync_minimal():
|
||||
def handler(body):
|
||||
assert body == {
|
||||
"distance_type": "l2",
|
||||
"k": 10,
|
||||
"prefilter": True,
|
||||
"refine_factor": None,
|
||||
@@ -685,7 +684,6 @@ def test_query_sync_maximal():
|
||||
def test_query_sync_nprobes():
|
||||
def handler(body):
|
||||
assert body == {
|
||||
"distance_type": "l2",
|
||||
"k": 10,
|
||||
"prefilter": True,
|
||||
"fast_search": True,
|
||||
@@ -715,7 +713,6 @@ def test_query_sync_nprobes():
|
||||
def test_query_sync_no_max_nprobes():
|
||||
def handler(body):
|
||||
assert body == {
|
||||
"distance_type": "l2",
|
||||
"k": 10,
|
||||
"prefilter": True,
|
||||
"fast_search": True,
|
||||
@@ -838,7 +835,6 @@ def test_query_sync_hybrid():
|
||||
else:
|
||||
# Vector query
|
||||
assert body == {
|
||||
"distance_type": "l2",
|
||||
"k": 42,
|
||||
"prefilter": True,
|
||||
"refine_factor": None,
|
||||
@@ -1203,3 +1199,22 @@ async def test_header_provider_overrides_static_headers():
|
||||
extra_headers={"X-API-Key": "static-key", "X-Extra": "extra-value"},
|
||||
) as db:
|
||||
await db.table_names()
|
||||
|
||||
|
||||
@pytest.mark.parametrize("exception", [KeyboardInterrupt, SystemExit, GeneratorExit])
|
||||
def test_background_loop_cancellation(exception):
|
||||
"""Test that BackgroundEventLoop.run() cancels the future on interrupt."""
|
||||
from lancedb.background_loop import BackgroundEventLoop
|
||||
|
||||
mock_future = MagicMock()
|
||||
mock_future.result.side_effect = exception()
|
||||
|
||||
with (
|
||||
patch.object(BackgroundEventLoop, "__init__", return_value=None),
|
||||
patch("asyncio.run_coroutine_threadsafe", return_value=mock_future),
|
||||
):
|
||||
loop = BackgroundEventLoop()
|
||||
loop.loop = MagicMock()
|
||||
with pytest.raises(exception):
|
||||
loop.run(None)
|
||||
mock_future.cancel.assert_called_once()
|
||||
|
||||
@@ -1880,8 +1880,13 @@ async def test_optimize_delete_unverified(tmp_db_async: AsyncConnection, tmp_pat
|
||||
],
|
||||
)
|
||||
version = await table.version()
|
||||
path = tmp_path / "test.lance" / "_versions" / f"{version - 1}.manifest"
|
||||
assert version == 2
|
||||
|
||||
# By removing a manifest file, we make the data files we just inserted unverified
|
||||
version_name = 18446744073709551615 - (version - 1)
|
||||
path = tmp_path / "test.lance" / "_versions" / f"{version_name:020}.manifest"
|
||||
os.remove(path)
|
||||
|
||||
stats = await table.optimize(delete_unverified=False)
|
||||
assert stats.prune.old_versions_removed == 0
|
||||
stats = await table.optimize(
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
import pyarrow as pa
|
||||
import pytest
|
||||
from lancedb.util import tbl_to_tensor
|
||||
from lancedb.permutation import Permutation
|
||||
|
||||
torch = pytest.importorskip("torch")
|
||||
|
||||
@@ -16,3 +17,26 @@ def test_table_dataloader(mem_db):
|
||||
for batch in dataloader:
|
||||
assert batch.size(0) == 1
|
||||
assert batch.size(1) == 10
|
||||
|
||||
|
||||
def test_permutation_dataloader(mem_db):
|
||||
table = mem_db.create_table("test_table", pa.table({"a": range(1000)}))
|
||||
|
||||
permutation = Permutation.identity(table)
|
||||
dataloader = torch.utils.data.DataLoader(permutation, batch_size=10, shuffle=True)
|
||||
for batch in dataloader:
|
||||
assert batch["a"].size(0) == 10
|
||||
|
||||
permutation = permutation.with_format("torch")
|
||||
dataloader = torch.utils.data.DataLoader(permutation, batch_size=10, shuffle=True)
|
||||
for batch in dataloader:
|
||||
assert batch.size(0) == 10
|
||||
assert batch.size(1) == 1
|
||||
|
||||
permutation = permutation.with_format("torch_col")
|
||||
dataloader = torch.utils.data.DataLoader(
|
||||
permutation, collate_fn=lambda x: x, batch_size=10, shuffle=True
|
||||
)
|
||||
for batch in dataloader:
|
||||
assert batch.size(0) == 1
|
||||
assert batch.size(1) == 10
|
||||
|
||||
108
python/python/tests/test_voyageai_embeddings.py
Normal file
108
python/python/tests/test_voyageai_embeddings.py
Normal file
@@ -0,0 +1,108 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
"""Unit tests for VoyageAI embedding function.
|
||||
|
||||
These tests verify model registration and configuration without requiring API calls.
|
||||
"""
|
||||
|
||||
import pytest
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
from lancedb.embeddings import get_registry
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def reset_voyageai_client():
|
||||
"""Reset VoyageAI client before and after each test to avoid state pollution."""
|
||||
from lancedb.embeddings.voyageai import VoyageAIEmbeddingFunction
|
||||
|
||||
VoyageAIEmbeddingFunction.client = None
|
||||
yield
|
||||
VoyageAIEmbeddingFunction.client = None
|
||||
|
||||
|
||||
class TestVoyageAIModelRegistration:
|
||||
"""Tests for VoyageAI model registration and configuration."""
|
||||
|
||||
@pytest.fixture
|
||||
def mock_voyageai_client(self):
|
||||
"""Mock VoyageAI client to avoid API calls."""
|
||||
with patch.dict("os.environ", {"VOYAGE_API_KEY": "test-key"}):
|
||||
with patch("lancedb.embeddings.voyageai.attempt_import_or_raise") as mock:
|
||||
mock_client = MagicMock()
|
||||
mock_voyageai = MagicMock()
|
||||
mock_voyageai.Client.return_value = mock_client
|
||||
mock.return_value = mock_voyageai
|
||||
yield mock_client
|
||||
|
||||
def test_voyageai_registered(self):
|
||||
"""Test that VoyageAI is registered in the embedding function registry."""
|
||||
registry = get_registry()
|
||||
assert registry.get("voyageai") is not None
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"model_name,expected_dims",
|
||||
[
|
||||
# Voyage-4 series (all 1024 dims)
|
||||
("voyage-4", 1024),
|
||||
("voyage-4-lite", 1024),
|
||||
("voyage-4-large", 1024),
|
||||
# Voyage-3 series
|
||||
("voyage-3", 1024),
|
||||
("voyage-3-lite", 512),
|
||||
# Domain-specific models
|
||||
("voyage-finance-2", 1024),
|
||||
("voyage-multilingual-2", 1024),
|
||||
("voyage-law-2", 1024),
|
||||
("voyage-code-2", 1536),
|
||||
# Multimodal
|
||||
("voyage-multimodal-3", 1024),
|
||||
],
|
||||
)
|
||||
def test_model_dimensions(self, model_name, expected_dims, mock_voyageai_client):
|
||||
"""Test that each model returns the correct dimensions."""
|
||||
registry = get_registry()
|
||||
func = registry.get("voyageai").create(name=model_name)
|
||||
assert func.ndims() == expected_dims, (
|
||||
f"Model {model_name} should have {expected_dims} dimensions"
|
||||
)
|
||||
|
||||
def test_unsupported_model_raises_error(self, mock_voyageai_client):
|
||||
"""Test that unsupported models raise ValueError."""
|
||||
registry = get_registry()
|
||||
func = registry.get("voyageai").create(name="unsupported-model")
|
||||
with pytest.raises(ValueError, match="not supported"):
|
||||
func.ndims()
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"model_name",
|
||||
[
|
||||
"voyage-4",
|
||||
"voyage-4-lite",
|
||||
"voyage-4-large",
|
||||
],
|
||||
)
|
||||
def test_voyage4_models_are_text_models(self, model_name, mock_voyageai_client):
|
||||
"""Test that voyage-4 models are classified as text models (not multimodal)."""
|
||||
registry = get_registry()
|
||||
func = registry.get("voyageai").create(name=model_name)
|
||||
assert not func._is_multimodal_model(model_name), (
|
||||
f"{model_name} should be a text model, not multimodal"
|
||||
)
|
||||
|
||||
def test_voyage4_models_in_text_embedding_list(self, mock_voyageai_client):
|
||||
"""Test that voyage-4 models are in the text_embedding_models list."""
|
||||
registry = get_registry()
|
||||
func = registry.get("voyageai").create(name="voyage-4")
|
||||
assert "voyage-4" in func.text_embedding_models
|
||||
assert "voyage-4-lite" in func.text_embedding_models
|
||||
assert "voyage-4-large" in func.text_embedding_models
|
||||
|
||||
def test_voyage4_models_not_in_multimodal_list(self, mock_voyageai_client):
|
||||
"""Test that voyage-4 models are NOT in the multimodal_embedding_models list."""
|
||||
registry = get_registry()
|
||||
func = registry.get("voyageai").create(name="voyage-4")
|
||||
assert "voyage-4" not in func.multimodal_embedding_models
|
||||
assert "voyage-4-lite" not in func.multimodal_embedding_models
|
||||
assert "voyage-4-large" not in func.multimodal_embedding_models
|
||||
@@ -10,8 +10,7 @@ use arrow::{
|
||||
use futures::stream::StreamExt;
|
||||
use lancedb::arrow::SendableRecordBatchStream;
|
||||
use pyo3::{
|
||||
exceptions::PyStopAsyncIteration, pyclass, pymethods, Bound, PyAny, PyObject, PyRef, PyResult,
|
||||
Python,
|
||||
exceptions::PyStopAsyncIteration, pyclass, pymethods, Bound, Py, PyAny, PyRef, PyResult, Python,
|
||||
};
|
||||
use pyo3_async_runtimes::tokio::future_into_py;
|
||||
|
||||
@@ -36,8 +35,11 @@ impl RecordBatchStream {
|
||||
#[pymethods]
|
||||
impl RecordBatchStream {
|
||||
#[getter]
|
||||
pub fn schema(&self, py: Python) -> PyResult<PyObject> {
|
||||
(*self.schema).clone().into_pyarrow(py)
|
||||
pub fn schema(&self, py: Python) -> PyResult<Py<PyAny>> {
|
||||
(*self.schema)
|
||||
.clone()
|
||||
.into_pyarrow(py)
|
||||
.map(|obj| obj.unbind())
|
||||
}
|
||||
|
||||
pub fn __aiter__(self_: PyRef<'_, Self>) -> PyRef<'_, Self> {
|
||||
@@ -53,7 +55,12 @@ impl RecordBatchStream {
|
||||
.next()
|
||||
.await
|
||||
.ok_or_else(|| PyStopAsyncIteration::new_err(""))?;
|
||||
Python::with_gil(|py| inner_next.infer_error()?.to_pyarrow(py))
|
||||
Python::attach(|py| {
|
||||
inner_next
|
||||
.infer_error()?
|
||||
.to_pyarrow(py)
|
||||
.map(|obj| obj.unbind())
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ use pyo3::{
|
||||
exceptions::{PyRuntimeError, PyValueError},
|
||||
pyclass, pyfunction, pymethods,
|
||||
types::{PyDict, PyDictMethods},
|
||||
Bound, FromPyObject, Py, PyAny, PyObject, PyRef, PyResult, Python,
|
||||
Bound, FromPyObject, Py, PyAny, PyRef, PyResult, Python,
|
||||
};
|
||||
use pyo3_async_runtimes::tokio::future_into_py;
|
||||
|
||||
@@ -114,7 +114,7 @@ impl Connection {
|
||||
data: Bound<'_, PyAny>,
|
||||
namespace: Vec<String>,
|
||||
storage_options: Option<HashMap<String, String>>,
|
||||
storage_options_provider: Option<PyObject>,
|
||||
storage_options_provider: Option<Py<PyAny>>,
|
||||
location: Option<String>,
|
||||
) -> PyResult<Bound<'a, PyAny>> {
|
||||
let inner = self_.get_inner()?.clone();
|
||||
@@ -152,7 +152,7 @@ impl Connection {
|
||||
schema: Bound<'_, PyAny>,
|
||||
namespace: Vec<String>,
|
||||
storage_options: Option<HashMap<String, String>>,
|
||||
storage_options_provider: Option<PyObject>,
|
||||
storage_options_provider: Option<Py<PyAny>>,
|
||||
location: Option<String>,
|
||||
) -> PyResult<Bound<'a, PyAny>> {
|
||||
let inner = self_.get_inner()?.clone();
|
||||
@@ -187,7 +187,7 @@ impl Connection {
|
||||
name: String,
|
||||
namespace: Vec<String>,
|
||||
storage_options: Option<HashMap<String, String>>,
|
||||
storage_options_provider: Option<PyObject>,
|
||||
storage_options_provider: Option<Py<PyAny>>,
|
||||
index_cache_size: Option<u32>,
|
||||
location: Option<String>,
|
||||
) -> PyResult<Bound<'_, PyAny>> {
|
||||
@@ -307,7 +307,7 @@ impl Connection {
|
||||
..Default::default()
|
||||
};
|
||||
let response = inner.list_namespaces(request).await.infer_error()?;
|
||||
Python::with_gil(|py| -> PyResult<Py<PyDict>> {
|
||||
Python::attach(|py| -> PyResult<Py<PyDict>> {
|
||||
let dict = PyDict::new(py);
|
||||
dict.set_item("namespaces", response.namespaces)?;
|
||||
dict.set_item("page_token", response.page_token)?;
|
||||
@@ -345,7 +345,7 @@ impl Connection {
|
||||
..Default::default()
|
||||
};
|
||||
let response = inner.create_namespace(request).await.infer_error()?;
|
||||
Python::with_gil(|py| -> PyResult<Py<PyDict>> {
|
||||
Python::attach(|py| -> PyResult<Py<PyDict>> {
|
||||
let dict = PyDict::new(py);
|
||||
dict.set_item("properties", response.properties)?;
|
||||
Ok(dict.unbind())
|
||||
@@ -386,7 +386,7 @@ impl Connection {
|
||||
..Default::default()
|
||||
};
|
||||
let response = inner.drop_namespace(request).await.infer_error()?;
|
||||
Python::with_gil(|py| -> PyResult<Py<PyDict>> {
|
||||
Python::attach(|py| -> PyResult<Py<PyDict>> {
|
||||
let dict = PyDict::new(py);
|
||||
dict.set_item("properties", response.properties)?;
|
||||
dict.set_item("transaction_id", response.transaction_id)?;
|
||||
@@ -413,7 +413,7 @@ impl Connection {
|
||||
..Default::default()
|
||||
};
|
||||
let response = inner.describe_namespace(request).await.infer_error()?;
|
||||
Python::with_gil(|py| -> PyResult<Py<PyDict>> {
|
||||
Python::attach(|py| -> PyResult<Py<PyDict>> {
|
||||
let dict = PyDict::new(py);
|
||||
dict.set_item("properties", response.properties)?;
|
||||
Ok(dict.unbind())
|
||||
@@ -443,7 +443,7 @@ impl Connection {
|
||||
..Default::default()
|
||||
};
|
||||
let response = inner.list_tables(request).await.infer_error()?;
|
||||
Python::with_gil(|py| -> PyResult<Py<PyDict>> {
|
||||
Python::attach(|py| -> PyResult<Py<PyDict>> {
|
||||
let dict = PyDict::new(py);
|
||||
dict.set_item("tables", response.tables)?;
|
||||
dict.set_item("page_token", response.page_token)?;
|
||||
|
||||
@@ -40,7 +40,7 @@ impl<T> PythonErrorExt<T> for std::result::Result<T, LanceError> {
|
||||
request_id,
|
||||
source,
|
||||
status_code,
|
||||
} => Python::with_gil(|py| {
|
||||
} => Python::attach(|py| {
|
||||
let message = err.to_string();
|
||||
let http_err_cls = py
|
||||
.import(intern!(py, "lancedb.remote.errors"))?
|
||||
@@ -75,7 +75,7 @@ impl<T> PythonErrorExt<T> for std::result::Result<T, LanceError> {
|
||||
max_read_failures,
|
||||
source,
|
||||
status_code,
|
||||
} => Python::with_gil(|py| {
|
||||
} => Python::attach(|py| {
|
||||
let cause_err = http_from_rust_error(
|
||||
py,
|
||||
source.as_ref(),
|
||||
|
||||
@@ -12,7 +12,7 @@ pub struct PyHeaderProvider {
|
||||
|
||||
impl Clone for PyHeaderProvider {
|
||||
fn clone(&self) -> Self {
|
||||
Python::with_gil(|py| Self {
|
||||
Python::attach(|py| Self {
|
||||
provider: self.provider.clone_ref(py),
|
||||
})
|
||||
}
|
||||
@@ -25,7 +25,7 @@ impl PyHeaderProvider {
|
||||
|
||||
/// Get headers from the Python provider (internal implementation)
|
||||
fn get_headers_internal(&self) -> Result<HashMap<String, String>, String> {
|
||||
Python::with_gil(|py| {
|
||||
Python::attach(|py| {
|
||||
// Call the get_headers method
|
||||
let result = self.provider.call_method0(py, "get_headers");
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ use std::sync::{Arc, Mutex};
|
||||
use crate::{
|
||||
arrow::RecordBatchStream, connection::Connection, error::PythonErrorExt, table::Table,
|
||||
};
|
||||
use arrow::pyarrow::ToPyArrow;
|
||||
use arrow::pyarrow::{PyArrowType, ToPyArrow};
|
||||
use lancedb::{
|
||||
dataloader::permutation::{
|
||||
builder::{PermutationBuilder as LancePermutationBuilder, ShuffleStrategy},
|
||||
@@ -281,7 +281,7 @@ impl PyPermutationReader {
|
||||
let reader = slf.reader.clone();
|
||||
future_into_py(slf.py(), async move {
|
||||
let schema = reader.output_schema(selection).await.infer_error()?;
|
||||
Python::with_gil(|py| schema.to_pyarrow(py))
|
||||
Python::attach(|py| schema.to_pyarrow(py).map(|obj| obj.unbind()))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -328,4 +328,21 @@ impl PyPermutationReader {
|
||||
Ok(RecordBatchStream::new(stream))
|
||||
})
|
||||
}
|
||||
|
||||
#[pyo3(signature = (indices, *, selection=None))]
|
||||
pub fn take_offsets<'py>(
|
||||
slf: PyRef<'py, Self>,
|
||||
indices: Vec<u64>,
|
||||
selection: Option<Bound<'py, PyAny>>,
|
||||
) -> PyResult<Bound<'py, PyAny>> {
|
||||
let selection = Self::parse_selection(selection)?;
|
||||
let reader = slf.reader.clone();
|
||||
future_into_py(slf.py(), async move {
|
||||
let batch = reader
|
||||
.take_offsets(&indices, selection)
|
||||
.await
|
||||
.infer_error()?;
|
||||
Ok(PyArrowType(batch))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -453,7 +453,7 @@ impl Query {
|
||||
let inner = self_.inner.clone();
|
||||
future_into_py(self_.py(), async move {
|
||||
let schema = inner.output_schema().await.infer_error()?;
|
||||
Python::with_gil(|py| schema.to_pyarrow(py))
|
||||
Python::attach(|py| schema.to_pyarrow(py).map(|obj| obj.unbind()))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -532,7 +532,7 @@ impl TakeQuery {
|
||||
let inner = self_.inner.clone();
|
||||
future_into_py(self_.py(), async move {
|
||||
let schema = inner.output_schema().await.infer_error()?;
|
||||
Python::with_gil(|py| schema.to_pyarrow(py))
|
||||
Python::attach(|py| schema.to_pyarrow(py).map(|obj| obj.unbind()))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -627,7 +627,7 @@ impl FTSQuery {
|
||||
let inner = self_.inner.clone();
|
||||
future_into_py(self_.py(), async move {
|
||||
let schema = inner.output_schema().await.infer_error()?;
|
||||
Python::with_gil(|py| schema.to_pyarrow(py))
|
||||
Python::attach(|py| schema.to_pyarrow(py).map(|obj| obj.unbind()))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -806,7 +806,7 @@ impl VectorQuery {
|
||||
let inner = self_.inner.clone();
|
||||
future_into_py(self_.py(), async move {
|
||||
let schema = inner.output_schema().await.infer_error()?;
|
||||
Python::with_gil(|py| schema.to_pyarrow(py))
|
||||
Python::attach(|py| schema.to_pyarrow(py).map(|obj| obj.unbind()))
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -17,20 +17,20 @@ use pyo3::types::PyDict;
|
||||
/// Internal wrapper around a Python object implementing StorageOptionsProvider
|
||||
pub struct PyStorageOptionsProvider {
|
||||
/// The Python object implementing fetch_storage_options()
|
||||
inner: PyObject,
|
||||
inner: Py<PyAny>,
|
||||
}
|
||||
|
||||
impl Clone for PyStorageOptionsProvider {
|
||||
fn clone(&self) -> Self {
|
||||
Python::with_gil(|py| Self {
|
||||
Python::attach(|py| Self {
|
||||
inner: self.inner.clone_ref(py),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl PyStorageOptionsProvider {
|
||||
pub fn new(obj: PyObject) -> PyResult<Self> {
|
||||
Python::with_gil(|py| {
|
||||
pub fn new(obj: Py<PyAny>) -> PyResult<Self> {
|
||||
Python::attach(|py| {
|
||||
// Verify the object has a fetch_storage_options method
|
||||
if !obj.bind(py).hasattr("fetch_storage_options")? {
|
||||
return Err(pyo3::exceptions::PyTypeError::new_err(
|
||||
@@ -60,7 +60,7 @@ impl StorageOptionsProvider for PyStorageOptionsProviderWrapper {
|
||||
let py_provider = self.py_provider.clone();
|
||||
|
||||
tokio::task::spawn_blocking(move || {
|
||||
Python::with_gil(|py| {
|
||||
Python::attach(|py| {
|
||||
// Call the Python fetch_storage_options method
|
||||
let result = py_provider
|
||||
.inner
|
||||
@@ -119,7 +119,7 @@ impl StorageOptionsProvider for PyStorageOptionsProviderWrapper {
|
||||
}
|
||||
|
||||
fn provider_id(&self) -> String {
|
||||
Python::with_gil(|py| {
|
||||
Python::attach(|py| {
|
||||
// Call provider_id() method on the Python object
|
||||
let obj = self.py_provider.inner.bind(py);
|
||||
obj.call_method0("provider_id")
|
||||
@@ -143,7 +143,7 @@ impl std::fmt::Debug for PyStorageOptionsProviderWrapper {
|
||||
/// This is the main entry point for converting Python StorageOptionsProvider objects
|
||||
/// to Rust trait objects that can be used by the Lance ecosystem.
|
||||
pub fn py_object_to_storage_options_provider(
|
||||
py_obj: PyObject,
|
||||
py_obj: Py<PyAny>,
|
||||
) -> PyResult<Arc<dyn StorageOptionsProvider>> {
|
||||
let py_provider = PyStorageOptionsProvider::new(py_obj)?;
|
||||
Ok(Arc::new(PyStorageOptionsProviderWrapper::new(py_provider)))
|
||||
|
||||
@@ -287,7 +287,7 @@ impl Table {
|
||||
let inner = self_.inner_ref()?.clone();
|
||||
future_into_py(self_.py(), async move {
|
||||
let schema = inner.schema().await.infer_error()?;
|
||||
Python::with_gil(|py| schema.to_pyarrow(py))
|
||||
Python::attach(|py| schema.to_pyarrow(py).map(|obj| obj.unbind()))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -437,7 +437,7 @@ impl Table {
|
||||
future_into_py(self_.py(), async move {
|
||||
let stats = inner.index_stats(&index_name).await.infer_error()?;
|
||||
if let Some(stats) = stats {
|
||||
Python::with_gil(|py| {
|
||||
Python::attach(|py| {
|
||||
let dict = PyDict::new(py);
|
||||
dict.set_item("num_indexed_rows", stats.num_indexed_rows)?;
|
||||
dict.set_item("num_unindexed_rows", stats.num_unindexed_rows)?;
|
||||
@@ -467,7 +467,7 @@ impl Table {
|
||||
let inner = self_.inner_ref()?.clone();
|
||||
future_into_py(self_.py(), async move {
|
||||
let stats = inner.stats().await.infer_error()?;
|
||||
Python::with_gil(|py| {
|
||||
Python::attach(|py| {
|
||||
let dict = PyDict::new(py);
|
||||
dict.set_item("total_bytes", stats.total_bytes)?;
|
||||
dict.set_item("num_rows", stats.num_rows)?;
|
||||
@@ -502,6 +502,20 @@ impl Table {
|
||||
future_into_py(self_.py(), async move { inner.uri().await.infer_error() })
|
||||
}
|
||||
|
||||
pub fn initial_storage_options(self_: PyRef<'_, Self>) -> PyResult<Bound<'_, PyAny>> {
|
||||
let inner = self_.inner_ref()?.clone();
|
||||
future_into_py(self_.py(), async move {
|
||||
Ok(inner.initial_storage_options().await)
|
||||
})
|
||||
}
|
||||
|
||||
pub fn latest_storage_options(self_: PyRef<'_, Self>) -> PyResult<Bound<'_, PyAny>> {
|
||||
let inner = self_.inner_ref()?.clone();
|
||||
future_into_py(self_.py(), async move {
|
||||
inner.latest_storage_options().await.infer_error()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn __repr__(&self) -> String {
|
||||
match &self.inner {
|
||||
None => format!("ClosedTable({})", self.name),
|
||||
@@ -521,7 +535,7 @@ impl Table {
|
||||
let inner = self_.inner_ref()?.clone();
|
||||
future_into_py(self_.py(), async move {
|
||||
let versions = inner.list_versions().await.infer_error()?;
|
||||
let versions_as_dict = Python::with_gil(|py| {
|
||||
let versions_as_dict = Python::attach(|py| {
|
||||
versions
|
||||
.iter()
|
||||
.map(|v| {
|
||||
@@ -872,7 +886,7 @@ impl Tags {
|
||||
let tags = inner.tags().await.infer_error()?;
|
||||
let res = tags.list().await.infer_error()?;
|
||||
|
||||
Python::with_gil(|py| {
|
||||
Python::attach(|py| {
|
||||
let py_dict = PyDict::new(py);
|
||||
for (key, contents) in res {
|
||||
let value_dict = PyDict::new(py);
|
||||
|
||||
5349
python/uv.lock
generated
Normal file
5349
python/uv.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "lancedb"
|
||||
version = "0.24.0"
|
||||
version = "0.26.2"
|
||||
edition.workspace = true
|
||||
description = "LanceDB: A serverless, low-latency vector database for AI applications"
|
||||
license.workspace = true
|
||||
@@ -25,6 +25,7 @@ datafusion-catalog.workspace = true
|
||||
datafusion-common.workspace = true
|
||||
datafusion-execution.workspace = true
|
||||
datafusion-expr.workspace = true
|
||||
datafusion-physical-expr.workspace = true
|
||||
datafusion-physical-plan.workspace = true
|
||||
datafusion.workspace = true
|
||||
object_store = { workspace = true }
|
||||
|
||||
@@ -251,8 +251,36 @@ impl CreateTableBuilder<false> {
|
||||
/// Execute the create table operation
|
||||
pub async fn execute(self) -> Result<Table> {
|
||||
let parent = self.parent.clone();
|
||||
let table = parent.create_table(self.request).await?;
|
||||
Ok(Table::new(table, parent))
|
||||
let embedding_registry = self.embedding_registry.clone();
|
||||
let request = self.into_request()?;
|
||||
Ok(Table::new_with_embedding_registry(
|
||||
parent.create_table(request).await?,
|
||||
parent,
|
||||
embedding_registry,
|
||||
))
|
||||
}
|
||||
|
||||
fn into_request(self) -> Result<CreateTableRequest> {
|
||||
if self.embeddings.is_empty() {
|
||||
return Ok(self.request);
|
||||
}
|
||||
|
||||
let CreateTableData::Empty(table_def) = self.request.data else {
|
||||
unreachable!("CreateTableBuilder<false> should always have Empty data")
|
||||
};
|
||||
|
||||
let schema = table_def.schema.clone();
|
||||
let empty_batch = arrow_array::RecordBatch::new_empty(schema.clone());
|
||||
|
||||
let reader = Box::new(std::iter::once(Ok(empty_batch)).collect::<Vec<_>>());
|
||||
let reader = arrow_array::RecordBatchIterator::new(reader.into_iter(), schema);
|
||||
let with_embeddings = WithEmbeddings::new(reader, self.embeddings);
|
||||
let table_definition = with_embeddings.table_definition()?;
|
||||
|
||||
Ok(CreateTableRequest {
|
||||
data: CreateTableData::Empty(table_definition),
|
||||
..self.request
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -893,8 +921,15 @@ pub struct ConnectBuilder {
|
||||
}
|
||||
|
||||
#[cfg(feature = "remote")]
|
||||
const ENV_VARS_TO_STORAGE_OPTS: [(&str, &str); 1] =
|
||||
[("AZURE_STORAGE_ACCOUNT_NAME", "azure_storage_account_name")];
|
||||
const ENV_VARS_TO_STORAGE_OPTS: [(&str, &str); 4] = [
|
||||
("AZURE_STORAGE_ACCOUNT_NAME", "azure_storage_account_name"),
|
||||
("AZURE_TENANT_ID", "azure_tenant_id"),
|
||||
("AZURE_CLIENT_ID", "azure_client_id"),
|
||||
(
|
||||
"AZURE_FEDERATED_TOKEN_FILE",
|
||||
"azure_federated_token_file",
|
||||
),
|
||||
];
|
||||
|
||||
impl ConnectBuilder {
|
||||
/// Create a new [`ConnectOptions`] with the given database URI.
|
||||
@@ -1692,4 +1727,128 @@ mod tests {
|
||||
let cloned_count = cloned_table.count_rows(None).await.unwrap();
|
||||
assert_eq!(source_count, cloned_count);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_create_empty_table_with_embeddings() {
|
||||
use crate::embeddings::{EmbeddingDefinition, EmbeddingFunction};
|
||||
use arrow_array::{
|
||||
Array, FixedSizeListArray, Float32Array, RecordBatch, RecordBatchIterator, StringArray,
|
||||
};
|
||||
use std::borrow::Cow;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct MockEmbedding {
|
||||
dim: usize,
|
||||
}
|
||||
|
||||
impl EmbeddingFunction for MockEmbedding {
|
||||
fn name(&self) -> &str {
|
||||
"test_embedding"
|
||||
}
|
||||
|
||||
fn source_type(&self) -> Result<Cow<'_, DataType>> {
|
||||
Ok(Cow::Owned(DataType::Utf8))
|
||||
}
|
||||
|
||||
fn dest_type(&self) -> Result<Cow<'_, DataType>> {
|
||||
Ok(Cow::Owned(DataType::new_fixed_size_list(
|
||||
DataType::Float32,
|
||||
self.dim as i32,
|
||||
true,
|
||||
)))
|
||||
}
|
||||
|
||||
fn compute_source_embeddings(&self, source: Arc<dyn Array>) -> Result<Arc<dyn Array>> {
|
||||
let len = source.len();
|
||||
let values = vec![1.0f32; len * self.dim];
|
||||
let values = Arc::new(Float32Array::from(values));
|
||||
let field = Arc::new(Field::new("item", DataType::Float32, true));
|
||||
Ok(Arc::new(FixedSizeListArray::new(
|
||||
field,
|
||||
self.dim as i32,
|
||||
values,
|
||||
None,
|
||||
)))
|
||||
}
|
||||
|
||||
fn compute_query_embeddings(&self, _input: Arc<dyn Array>) -> Result<Arc<dyn Array>> {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
let tmp_dir = tempdir().unwrap();
|
||||
let uri = tmp_dir.path().to_str().unwrap();
|
||||
let db = connect(uri).execute().await.unwrap();
|
||||
|
||||
let embed_func = Arc::new(MockEmbedding { dim: 128 });
|
||||
db.embedding_registry()
|
||||
.register("test_embedding", embed_func.clone())
|
||||
.unwrap();
|
||||
|
||||
let schema = Arc::new(Schema::new(vec![Field::new("name", DataType::Utf8, true)]));
|
||||
let ed = EmbeddingDefinition {
|
||||
source_column: "name".to_owned(),
|
||||
dest_column: Some("name_embedding".to_owned()),
|
||||
embedding_name: "test_embedding".to_owned(),
|
||||
};
|
||||
|
||||
let table = db
|
||||
.create_empty_table("test", schema)
|
||||
.mode(CreateTableMode::Overwrite)
|
||||
.add_embedding(ed)
|
||||
.unwrap()
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let table_schema = table.schema().await.unwrap();
|
||||
assert!(table_schema.column_with_name("name").is_some());
|
||||
assert!(table_schema.column_with_name("name_embedding").is_some());
|
||||
|
||||
let embedding_field = table_schema.field_with_name("name_embedding").unwrap();
|
||||
assert_eq!(
|
||||
embedding_field.data_type(),
|
||||
&DataType::new_fixed_size_list(DataType::Float32, 128, true)
|
||||
);
|
||||
|
||||
let input_schema = Arc::new(Schema::new(vec![Field::new("name", DataType::Utf8, true)]));
|
||||
let input_batch = RecordBatch::try_new(
|
||||
input_schema.clone(),
|
||||
vec![Arc::new(StringArray::from(vec![
|
||||
Some("Alice"),
|
||||
Some("Bob"),
|
||||
Some("Charlie"),
|
||||
]))],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let input_reader = Box::new(RecordBatchIterator::new(
|
||||
vec![Ok(input_batch)].into_iter(),
|
||||
input_schema,
|
||||
));
|
||||
|
||||
table.add(input_reader).execute().await.unwrap();
|
||||
|
||||
let results = table
|
||||
.query()
|
||||
.execute()
|
||||
.await
|
||||
.unwrap()
|
||||
.try_collect::<Vec<_>>()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(results.len(), 1);
|
||||
let batch = &results[0];
|
||||
assert_eq!(batch.num_rows(), 3);
|
||||
assert!(batch.column_by_name("name_embedding").is_some());
|
||||
|
||||
let embedding_col = batch
|
||||
.column_by_name("name_embedding")
|
||||
.unwrap()
|
||||
.as_any()
|
||||
.downcast_ref::<FixedSizeListArray>()
|
||||
.unwrap();
|
||||
assert_eq!(embedding_col.len(), 3);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ use crate::{
|
||||
split::{SplitStrategy, Splitter, SPLIT_ID_COLUMN},
|
||||
util::{rename_column, TemporaryDirectory},
|
||||
},
|
||||
query::{ExecutableQuery, QueryBase},
|
||||
query::{ExecutableQuery, QueryBase, Select},
|
||||
Error, Result, Table,
|
||||
};
|
||||
|
||||
@@ -27,6 +27,8 @@ pub const SRC_ROW_ID_COL: &str = "row_id";
|
||||
|
||||
pub const SPLIT_NAMES_CONFIG_KEY: &str = "split_names";
|
||||
|
||||
pub const DEFAULT_MEMORY_LIMIT: usize = 100 * 1024 * 1024;
|
||||
|
||||
/// Where to store the permutation table
|
||||
#[derive(Debug, Clone, Default)]
|
||||
enum PermutationDestination {
|
||||
@@ -167,10 +169,20 @@ impl PermutationBuilder {
|
||||
&self,
|
||||
data: SendableRecordBatchStream,
|
||||
) -> Result<SendableRecordBatchStream> {
|
||||
let memory_limit = std::env::var("LANCEDB_PERM_BUILDER_MEMORY_LIMIT")
|
||||
.unwrap_or_else(|_| DEFAULT_MEMORY_LIMIT.to_string())
|
||||
.parse::<usize>()
|
||||
.unwrap_or_else(|_| {
|
||||
log::error!(
|
||||
"Failed to parse LANCEDB_PERM_BUILDER_MEMORY_LIMIT, using default: {}",
|
||||
DEFAULT_MEMORY_LIMIT
|
||||
);
|
||||
DEFAULT_MEMORY_LIMIT
|
||||
});
|
||||
let ctx = SessionContext::new_with_config_rt(
|
||||
SessionConfig::default(),
|
||||
RuntimeEnvBuilder::new()
|
||||
.with_memory_limit(100 * 1024 * 1024, 1.0)
|
||||
.with_memory_limit(memory_limit, 1.0)
|
||||
.with_disk_manager_builder(
|
||||
DiskManagerBuilder::default()
|
||||
.with_mode(self.config.temp_dir.to_disk_manager_mode()),
|
||||
@@ -232,7 +244,7 @@ impl PermutationBuilder {
|
||||
/// Builds the permutation table and stores it in the given database.
|
||||
pub async fn build(self) -> Result<Table> {
|
||||
// First pass, apply filter and load row ids
|
||||
let mut rows = self.base_table.query().with_row_id();
|
||||
let mut rows = self.base_table.query().select(Select::columns(&[ROW_ID]));
|
||||
|
||||
if let Some(filter) = &self.config.filter {
|
||||
rows = rows.only_if(filter);
|
||||
@@ -321,6 +333,47 @@ mod tests {
|
||||
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_permutation_table_only_stores_row_id_and_split_id() {
|
||||
let temp_dir = tempfile::tempdir().unwrap();
|
||||
|
||||
let db = connect(temp_dir.path().to_str().unwrap())
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let initial_data = lance_datagen::gen_batch()
|
||||
.col("col_a", lance_datagen::array::step::<Int32Type>())
|
||||
.col("col_b", lance_datagen::array::step::<Int32Type>())
|
||||
.into_ldb_stream(RowCount::from(100), BatchCount::from(10));
|
||||
let data_table = db
|
||||
.create_table_streaming("base_tbl", initial_data)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let permutation_table = PermutationBuilder::new(data_table.clone())
|
||||
.with_split_strategy(
|
||||
SplitStrategy::Sequential {
|
||||
sizes: SplitSizes::Percentages(vec![0.5, 0.5]),
|
||||
},
|
||||
None,
|
||||
)
|
||||
.with_filter("col_a > 57".to_string())
|
||||
.build()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let schema = permutation_table.schema().await.unwrap();
|
||||
let field_names: Vec<&str> = schema.fields().iter().map(|f| f.name().as_str()).collect();
|
||||
assert_eq!(
|
||||
field_names,
|
||||
vec!["row_id", "split_id"],
|
||||
"Permutation table should only contain row_id and split_id columns, but found: {:?}",
|
||||
field_names,
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_permutation_builder() {
|
||||
let temp_dir = tempfile::tempdir().unwrap();
|
||||
@@ -352,8 +405,6 @@ mod tests {
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
println!("permutation_table: {:?}", permutation_table);
|
||||
|
||||
// Potentially brittle seed-dependent values below
|
||||
assert_eq!(permutation_table.count_rows(None).await.unwrap(), 330);
|
||||
assert_eq!(
|
||||
|
||||
@@ -39,6 +39,9 @@ pub struct PermutationReader {
|
||||
limit: Option<u64>,
|
||||
available_rows: u64,
|
||||
split: u64,
|
||||
// Cached map of offset to row id for the split
|
||||
#[allow(clippy::type_complexity)]
|
||||
offset_map: Arc<tokio::sync::Mutex<Option<Arc<HashMap<u64, u64>>>>>,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for PermutationReader {
|
||||
@@ -72,6 +75,7 @@ impl PermutationReader {
|
||||
limit: None,
|
||||
available_rows: 0,
|
||||
split,
|
||||
offset_map: Arc::new(tokio::sync::Mutex::new(None)),
|
||||
};
|
||||
slf.validate().await?;
|
||||
// Calculate the number of available rows
|
||||
@@ -157,6 +161,7 @@ impl PermutationReader {
|
||||
let available_rows = self.verify_limit_offset(self.limit, Some(offset)).await?;
|
||||
self.offset = Some(offset);
|
||||
self.available_rows = available_rows;
|
||||
self.offset_map = Arc::new(tokio::sync::Mutex::new(None));
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
@@ -164,6 +169,7 @@ impl PermutationReader {
|
||||
let available_rows = self.verify_limit_offset(Some(limit), self.offset).await?;
|
||||
self.available_rows = available_rows;
|
||||
self.limit = Some(limit);
|
||||
self.offset_map = Arc::new(tokio::sync::Mutex::new(None));
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
@@ -180,8 +186,9 @@ impl PermutationReader {
|
||||
base_table: &Arc<dyn BaseTable>,
|
||||
row_ids: RecordBatch,
|
||||
selection: Select,
|
||||
has_row_id: bool,
|
||||
) -> Result<RecordBatch> {
|
||||
let has_row_id = Self::has_row_id(&selection)?;
|
||||
|
||||
let num_rows = row_ids.num_rows();
|
||||
let row_ids = row_ids
|
||||
.column(0)
|
||||
@@ -282,14 +289,13 @@ impl PermutationReader {
|
||||
row_ids: DatasetRecordBatchStream,
|
||||
selection: Select,
|
||||
) -> Result<SendableRecordBatchStream> {
|
||||
let has_row_id = Self::has_row_id(&selection)?;
|
||||
let mut stream = row_ids
|
||||
.map_err(Error::from)
|
||||
.try_filter_map(move |batch| {
|
||||
let selection = selection.clone();
|
||||
let base_table = base_table.clone();
|
||||
async move {
|
||||
Self::load_batch(&base_table, batch, selection, has_row_id)
|
||||
Self::load_batch(&base_table, batch, selection)
|
||||
.await
|
||||
.map(Some)
|
||||
}
|
||||
@@ -397,6 +403,82 @@ impl PermutationReader {
|
||||
Self::row_ids_to_batches(self.base_table.clone(), row_ids, selection).await
|
||||
}
|
||||
|
||||
/// If we are going to use `take` then we load the offset -> row id map once for the split and cache it
|
||||
///
|
||||
/// This method fetches the map with find-or-create semantics.
|
||||
async fn get_offset_map(
|
||||
&self,
|
||||
permutation_table: &Arc<dyn BaseTable>,
|
||||
) -> Result<Arc<HashMap<u64, u64>>> {
|
||||
let mut offset_map_ref = self.offset_map.lock().await;
|
||||
if let Some(offset_map) = &*offset_map_ref {
|
||||
return Ok(offset_map.clone());
|
||||
}
|
||||
let mut offset_map = HashMap::new();
|
||||
let mut row_ids_query = Table::from(permutation_table.clone())
|
||||
.query()
|
||||
.select(Select::Columns(vec![SRC_ROW_ID_COL.to_string()]))
|
||||
.only_if(format!("{} = {}", SPLIT_ID_COLUMN, self.split));
|
||||
if let Some(offset) = self.offset {
|
||||
row_ids_query = row_ids_query.offset(offset as usize);
|
||||
}
|
||||
if let Some(limit) = self.limit {
|
||||
row_ids_query = row_ids_query.limit(limit as usize);
|
||||
}
|
||||
let mut row_ids = row_ids_query.execute().await?;
|
||||
while let Some(batch) = row_ids.try_next().await? {
|
||||
let row_ids = batch
|
||||
.column(0)
|
||||
.as_primitive::<UInt64Type>()
|
||||
.values()
|
||||
.to_vec();
|
||||
for (i, row_id) in row_ids.iter().enumerate() {
|
||||
offset_map.insert(i as u64, *row_id);
|
||||
}
|
||||
}
|
||||
let offset_map = Arc::new(offset_map);
|
||||
*offset_map_ref = Some(offset_map.clone());
|
||||
Ok(offset_map)
|
||||
}
|
||||
|
||||
pub async fn take_offsets(&self, offsets: &[u64], selection: Select) -> Result<RecordBatch> {
|
||||
if let Some(permutation_table) = &self.permutation_table {
|
||||
let offset_map = self.get_offset_map(permutation_table).await?;
|
||||
let row_ids = offsets
|
||||
.iter()
|
||||
.map(|o| offset_map.get(o).copied().expect_ok().map_err(Error::from))
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
let row_ids = RecordBatch::try_new(
|
||||
Arc::new(arrow_schema::Schema::new(vec![arrow_schema::Field::new(
|
||||
"row_id",
|
||||
arrow_schema::DataType::UInt64,
|
||||
false,
|
||||
)])),
|
||||
vec![Arc::new(UInt64Array::from(row_ids))],
|
||||
)?;
|
||||
Self::load_batch(&self.base_table, row_ids, selection).await
|
||||
} else {
|
||||
let table = Table::from(self.base_table.clone());
|
||||
let batches = table
|
||||
.take_offsets(offsets.to_vec())
|
||||
.select(selection.clone())
|
||||
.execute()
|
||||
.await?
|
||||
.try_collect::<Vec<_>>()
|
||||
.await?;
|
||||
if let Some(first_batch) = batches.first() {
|
||||
let schema = first_batch.schema();
|
||||
let batch = arrow::compute::concat_batches(&schema, &batches)?;
|
||||
Ok(batch)
|
||||
} else {
|
||||
Ok(RecordBatch::try_new(
|
||||
self.output_schema(selection).await?,
|
||||
vec![],
|
||||
)?)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn output_schema(&self, selection: Select) -> Result<SchemaRef> {
|
||||
let table = Table::from(self.base_table.clone());
|
||||
table.query().select(selection).output_schema().await
|
||||
@@ -543,4 +625,224 @@ mod tests {
|
||||
check_batch(&mut stream, &row_ids[7..9]).await;
|
||||
assert!(stream.try_next().await.unwrap().is_none());
|
||||
}
|
||||
|
||||
/// Helper to create a base table and permutation table for take_offsets tests.
|
||||
/// Returns (base_table, row_ids_table, shuffled_row_ids).
|
||||
async fn setup_permutation_tables(num_rows: usize) -> (Table, Table, Vec<u64>) {
|
||||
let base_table = lance_datagen::gen_batch()
|
||||
.col("idx", lance_datagen::array::step::<Int32Type>())
|
||||
.col("other_col", lance_datagen::array::step::<UInt64Type>())
|
||||
.into_mem_table("tbl", RowCount::from(num_rows as u64), BatchCount::from(1))
|
||||
.await;
|
||||
|
||||
let mut row_ids = collect_column::<UInt64Type>(&base_table, "_rowid").await;
|
||||
row_ids.shuffle(&mut rand::rng());
|
||||
|
||||
let split_ids = UInt64Array::from_iter_values(std::iter::repeat_n(0u64, row_ids.len()));
|
||||
let permutation_batch = RecordBatch::try_new(
|
||||
Arc::new(Schema::new(vec![
|
||||
Field::new("row_id", DataType::UInt64, false),
|
||||
Field::new(SPLIT_ID_COLUMN, DataType::UInt64, false),
|
||||
])),
|
||||
vec![
|
||||
Arc::new(UInt64Array::from(row_ids.clone())),
|
||||
Arc::new(split_ids),
|
||||
],
|
||||
)
|
||||
.unwrap();
|
||||
let row_ids_table = virtual_table("row_ids", &permutation_batch).await;
|
||||
|
||||
(base_table, row_ids_table, row_ids)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_take_offsets_with_permutation_table() {
|
||||
let (base_table, row_ids_table, row_ids) = setup_permutation_tables(10).await;
|
||||
|
||||
let reader = PermutationReader::try_from_tables(
|
||||
base_table.base_table().clone(),
|
||||
row_ids_table.base_table().clone(),
|
||||
0,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Take specific offsets and verify the returned rows match the permutation order
|
||||
let offsets = vec![0, 2, 4];
|
||||
let batch = reader.take_offsets(&offsets, Select::All).await.unwrap();
|
||||
|
||||
assert_eq!(batch.num_rows(), 3);
|
||||
|
||||
let idx_values = batch
|
||||
.column(0)
|
||||
.as_primitive::<Int32Type>()
|
||||
.values()
|
||||
.to_vec();
|
||||
let expected: Vec<i32> = offsets
|
||||
.iter()
|
||||
.map(|&o| row_ids[o as usize] as i32)
|
||||
.collect();
|
||||
assert_eq!(idx_values, expected);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_take_offsets_preserves_order() {
|
||||
let (base_table, row_ids_table, row_ids) = setup_permutation_tables(10).await;
|
||||
|
||||
let reader = PermutationReader::try_from_tables(
|
||||
base_table.base_table().clone(),
|
||||
row_ids_table.base_table().clone(),
|
||||
0,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Take offsets in reverse order and verify returned rows match that order
|
||||
let offsets = vec![5, 3, 1, 0];
|
||||
let batch = reader.take_offsets(&offsets, Select::All).await.unwrap();
|
||||
|
||||
assert_eq!(batch.num_rows(), 4);
|
||||
|
||||
let idx_values = batch
|
||||
.column(0)
|
||||
.as_primitive::<Int32Type>()
|
||||
.values()
|
||||
.to_vec();
|
||||
let expected: Vec<i32> = offsets
|
||||
.iter()
|
||||
.map(|&o| row_ids[o as usize] as i32)
|
||||
.collect();
|
||||
assert_eq!(idx_values, expected);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_take_offsets_with_column_selection() {
|
||||
let (base_table, row_ids_table, row_ids) = setup_permutation_tables(10).await;
|
||||
|
||||
let reader = PermutationReader::try_from_tables(
|
||||
base_table.base_table().clone(),
|
||||
row_ids_table.base_table().clone(),
|
||||
0,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let offsets = vec![1, 3];
|
||||
let batch = reader
|
||||
.take_offsets(&offsets, Select::Columns(vec!["idx".to_string()]))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(batch.num_rows(), 2);
|
||||
assert_eq!(batch.num_columns(), 1);
|
||||
assert_eq!(batch.schema().field(0).name(), "idx");
|
||||
|
||||
let idx_values = batch
|
||||
.column(0)
|
||||
.as_primitive::<Int32Type>()
|
||||
.values()
|
||||
.to_vec();
|
||||
let expected: Vec<i32> = offsets
|
||||
.iter()
|
||||
.map(|&o| row_ids[o as usize] as i32)
|
||||
.collect();
|
||||
assert_eq!(idx_values, expected);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_take_offsets_invalid_offset() {
|
||||
let (base_table, row_ids_table, _) = setup_permutation_tables(5).await;
|
||||
|
||||
let reader = PermutationReader::try_from_tables(
|
||||
base_table.base_table().clone(),
|
||||
row_ids_table.base_table().clone(),
|
||||
0,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Offset 999 doesn't exist in the offset map
|
||||
let result = reader.take_offsets(&[0, 999], Select::All).await;
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_take_offsets_identity_reader() {
|
||||
let base_table = lance_datagen::gen_batch()
|
||||
.col("idx", lance_datagen::array::step::<Int32Type>())
|
||||
.into_mem_table("tbl", RowCount::from(10), BatchCount::from(1))
|
||||
.await;
|
||||
|
||||
let reader = PermutationReader::identity(base_table.base_table().clone()).await;
|
||||
|
||||
// With no permutation table, take_offsets uses the base table directly
|
||||
let offsets = vec![0, 2, 4, 6];
|
||||
let batch = reader.take_offsets(&offsets, Select::All).await.unwrap();
|
||||
|
||||
assert_eq!(batch.num_rows(), 4);
|
||||
|
||||
let idx_values = batch
|
||||
.column(0)
|
||||
.as_primitive::<Int32Type>()
|
||||
.values()
|
||||
.to_vec();
|
||||
assert_eq!(idx_values, vec![0, 2, 4, 6]);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_take_offsets_caches_offset_map() {
|
||||
let (base_table, row_ids_table, row_ids) = setup_permutation_tables(10).await;
|
||||
|
||||
let reader = PermutationReader::try_from_tables(
|
||||
base_table.base_table().clone(),
|
||||
row_ids_table.base_table().clone(),
|
||||
0,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// First call populates the cache
|
||||
let batch1 = reader.take_offsets(&[0, 1], Select::All).await.unwrap();
|
||||
|
||||
// Second call should use the cached offset map and produce consistent results
|
||||
let batch2 = reader.take_offsets(&[0, 1], Select::All).await.unwrap();
|
||||
|
||||
let values1 = batch1
|
||||
.column(0)
|
||||
.as_primitive::<Int32Type>()
|
||||
.values()
|
||||
.to_vec();
|
||||
let values2 = batch2
|
||||
.column(0)
|
||||
.as_primitive::<Int32Type>()
|
||||
.values()
|
||||
.to_vec();
|
||||
assert_eq!(values1, values2);
|
||||
|
||||
let expected: Vec<i32> = vec![row_ids[0] as i32, row_ids[1] as i32];
|
||||
assert_eq!(values1, expected);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_take_offsets_single_offset() {
|
||||
let (base_table, row_ids_table, row_ids) = setup_permutation_tables(5).await;
|
||||
|
||||
let reader = PermutationReader::try_from_tables(
|
||||
base_table.base_table().clone(),
|
||||
row_ids_table.base_table().clone(),
|
||||
0,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let batch = reader.take_offsets(&[2], Select::All).await.unwrap();
|
||||
|
||||
assert_eq!(batch.num_rows(), 1);
|
||||
let idx_values = batch
|
||||
.column(0)
|
||||
.as_primitive::<Int32Type>()
|
||||
.values()
|
||||
.to_vec();
|
||||
assert_eq!(idx_values, vec![row_ids[2] as i32]);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,6 +12,8 @@ use datafusion_common::hash_utils::create_hashes;
|
||||
use futures::{StreamExt, TryStreamExt};
|
||||
use lance_arrow::SchemaExt;
|
||||
|
||||
use lance_core::ROW_ID;
|
||||
|
||||
use crate::{
|
||||
arrow::{SendableRecordBatchStream, SimpleRecordBatchStream},
|
||||
dataloader::{
|
||||
@@ -360,11 +362,15 @@ impl Splitter {
|
||||
|
||||
pub fn project(&self, query: Query) -> Query {
|
||||
match &self.strategy {
|
||||
SplitStrategy::Calculated { calculation } => query.select(Select::Dynamic(vec![(
|
||||
SPLIT_ID_COLUMN.to_string(),
|
||||
calculation.clone(),
|
||||
)])),
|
||||
SplitStrategy::Hash { columns, .. } => query.select(Select::Columns(columns.clone())),
|
||||
SplitStrategy::Calculated { calculation } => query.select(Select::Dynamic(vec![
|
||||
(SPLIT_ID_COLUMN.to_string(), calculation.clone()),
|
||||
(ROW_ID.to_string(), ROW_ID.to_string()),
|
||||
])),
|
||||
SplitStrategy::Hash { columns, .. } => {
|
||||
let mut cols = columns.clone();
|
||||
cols.push(ROW_ID.to_string());
|
||||
query.select(Select::Columns(cols))
|
||||
}
|
||||
_ => query,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -438,21 +438,26 @@ impl<S: HttpSend> RestfulLanceDbClient<S> {
|
||||
);
|
||||
}
|
||||
|
||||
if let Some(v) = options.0.get("account_name") {
|
||||
headers.insert(
|
||||
HeaderName::from_static("x-azure-storage-account-name"),
|
||||
HeaderValue::from_str(v).map_err(|_| Error::InvalidInput {
|
||||
message: format!("non-ascii storage account name '{}' provided", db_name),
|
||||
})?,
|
||||
);
|
||||
}
|
||||
if let Some(v) = options.0.get("azure_storage_account_name") {
|
||||
headers.insert(
|
||||
HeaderName::from_static("x-azure-storage-account-name"),
|
||||
HeaderValue::from_str(v).map_err(|_| Error::InvalidInput {
|
||||
message: format!("non-ascii storage account name '{}' provided", db_name),
|
||||
})?,
|
||||
);
|
||||
// Map storage options to HTTP headers for Azure configuration.
|
||||
const OPTION_TO_HEADER: &[(&str, &str)] = &[
|
||||
("account_name", "x-azure-storage-account-name"),
|
||||
("azure_storage_account_name", "x-azure-storage-account-name"),
|
||||
("azure_tenant_id", "x-azure-tenant-id"),
|
||||
("azure_client_id", "x-azure-client-id"),
|
||||
(
|
||||
"azure_federated_token_file",
|
||||
"x-azure-federated-token-file",
|
||||
),
|
||||
];
|
||||
for (opt_key, header_name) in OPTION_TO_HEADER {
|
||||
if let Some(v) = options.get(opt_key) {
|
||||
headers.insert(
|
||||
HeaderName::from_static(header_name),
|
||||
HeaderValue::from_str(v).map_err(|_| Error::InvalidInput {
|
||||
message: format!("non-ascii value for '{}' provided", opt_key),
|
||||
})?,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
for (key, value) in &config.extra_headers {
|
||||
|
||||
@@ -792,11 +792,21 @@ impl RemoteOptions {
|
||||
pub fn new(options: HashMap<String, String>) -> Self {
|
||||
Self(options)
|
||||
}
|
||||
|
||||
pub fn get(&self, key: &str) -> Option<&String> {
|
||||
self.0.get(key)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<StorageOptions> for RemoteOptions {
|
||||
fn from(options: StorageOptions) -> Self {
|
||||
let supported_opts = vec!["account_name", "azure_storage_account_name"];
|
||||
let supported_opts = vec![
|
||||
"account_name",
|
||||
"azure_storage_account_name",
|
||||
"azure_tenant_id",
|
||||
"azure_client_id",
|
||||
"azure_federated_token_file",
|
||||
];
|
||||
let mut filtered = HashMap::new();
|
||||
for opt in supported_opts {
|
||||
if let Some(v) = options.0.get(opt) {
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
pub mod insert;
|
||||
|
||||
use crate::index::Index;
|
||||
use crate::index::IndexStatistics;
|
||||
use crate::query::{QueryFilter, QueryRequest, Select, VectorQueryRequest};
|
||||
@@ -468,7 +470,9 @@ impl<S: HttpSend> RemoteTable<S> {
|
||||
self.apply_query_params(&mut body, &query.base)?;
|
||||
|
||||
// Apply general parameters, before we dispatch based on number of query vectors.
|
||||
body["distance_type"] = serde_json::json!(query.distance_type.unwrap_or_default());
|
||||
if let Some(distance_type) = query.distance_type {
|
||||
body["distance_type"] = serde_json::json!(distance_type);
|
||||
}
|
||||
// In 0.23.1 we migrated from `nprobes` to `minimum_nprobes` and `maximum_nprobes`.
|
||||
// Old client / new server: since minimum_nprobes is missing, fallback to nprobes
|
||||
// New client / old server: old server will only see nprobes, make sure to set both
|
||||
@@ -1493,6 +1497,14 @@ impl<S: HttpSend> BaseTable for RemoteTable<S> {
|
||||
None
|
||||
}
|
||||
|
||||
async fn initial_storage_options(&self) -> Option<HashMap<String, String>> {
|
||||
None
|
||||
}
|
||||
|
||||
async fn latest_storage_options(&self) -> Result<Option<HashMap<String, String>>> {
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
async fn stats(&self) -> Result<TableStatistics> {
|
||||
let request = self
|
||||
.client
|
||||
@@ -1508,6 +1520,21 @@ impl<S: HttpSend> BaseTable for RemoteTable<S> {
|
||||
})?;
|
||||
Ok(stats)
|
||||
}
|
||||
|
||||
async fn create_insert_exec(
|
||||
&self,
|
||||
input: Arc<dyn ExecutionPlan>,
|
||||
write_params: lance::dataset::WriteParams,
|
||||
) -> Result<Arc<dyn ExecutionPlan>> {
|
||||
let overwrite = matches!(write_params.mode, lance::dataset::WriteMode::Overwrite);
|
||||
Ok(Arc::new(insert::RemoteInsertExec::new(
|
||||
self.name.clone(),
|
||||
self.identifier.clone(),
|
||||
self.client.clone(),
|
||||
input,
|
||||
overwrite,
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
@@ -2230,7 +2257,6 @@ mod tests {
|
||||
let body: serde_json::Value = serde_json::from_slice(body).unwrap();
|
||||
let mut expected_body = serde_json::json!({
|
||||
"prefilter": true,
|
||||
"distance_type": "l2",
|
||||
"nprobes": 20,
|
||||
"minimum_nprobes": 20,
|
||||
"maximum_nprobes": 20,
|
||||
|
||||
438
rust/lancedb/src/remote/table/insert.rs
Normal file
438
rust/lancedb/src/remote/table/insert.rs
Normal file
@@ -0,0 +1,438 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
//! DataFusion ExecutionPlan for inserting data into remote LanceDB tables.
|
||||
|
||||
use std::any::Any;
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use arrow_array::{ArrayRef, RecordBatch, UInt64Array};
|
||||
use arrow_ipc::CompressionType;
|
||||
use arrow_schema::ArrowError;
|
||||
use datafusion_common::{DataFusionError, Result as DataFusionResult};
|
||||
use datafusion_execution::{SendableRecordBatchStream, TaskContext};
|
||||
use datafusion_physical_expr::EquivalenceProperties;
|
||||
use datafusion_physical_plan::stream::RecordBatchStreamAdapter;
|
||||
use datafusion_physical_plan::{DisplayAs, DisplayFormatType, ExecutionPlan, PlanProperties};
|
||||
use futures::StreamExt;
|
||||
use http::header::CONTENT_TYPE;
|
||||
|
||||
use crate::remote::client::{HttpSend, RestfulLanceDbClient, Sender};
|
||||
use crate::remote::table::RemoteTable;
|
||||
use crate::remote::ARROW_STREAM_CONTENT_TYPE;
|
||||
use crate::table::datafusion::insert::COUNT_SCHEMA;
|
||||
use crate::table::AddResult;
|
||||
use crate::Error;
|
||||
|
||||
/// ExecutionPlan for inserting data into a remote LanceDB table.
|
||||
///
|
||||
/// This plan:
|
||||
/// 1. Requires single partition (no parallel remote inserts yet)
|
||||
/// 2. Streams data as Arrow IPC to `/v1/table/{id}/insert/` endpoint
|
||||
/// 3. Stores AddResult for retrieval after execution
|
||||
#[derive(Debug)]
|
||||
pub struct RemoteInsertExec<S: HttpSend = Sender> {
|
||||
table_name: String,
|
||||
identifier: String,
|
||||
client: RestfulLanceDbClient<S>,
|
||||
input: Arc<dyn ExecutionPlan>,
|
||||
overwrite: bool,
|
||||
properties: PlanProperties,
|
||||
add_result: Arc<Mutex<Option<AddResult>>>,
|
||||
}
|
||||
|
||||
impl<S: HttpSend + 'static> RemoteInsertExec<S> {
|
||||
/// Create a new RemoteInsertExec.
|
||||
pub fn new(
|
||||
table_name: String,
|
||||
identifier: String,
|
||||
client: RestfulLanceDbClient<S>,
|
||||
input: Arc<dyn ExecutionPlan>,
|
||||
overwrite: bool,
|
||||
) -> Self {
|
||||
let schema = COUNT_SCHEMA.clone();
|
||||
let properties = PlanProperties::new(
|
||||
EquivalenceProperties::new(schema),
|
||||
datafusion_physical_plan::Partitioning::UnknownPartitioning(1),
|
||||
datafusion_physical_plan::execution_plan::EmissionType::Final,
|
||||
datafusion_physical_plan::execution_plan::Boundedness::Bounded,
|
||||
);
|
||||
|
||||
Self {
|
||||
table_name,
|
||||
identifier,
|
||||
client,
|
||||
input,
|
||||
overwrite,
|
||||
properties,
|
||||
add_result: Arc::new(Mutex::new(None)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the add result after execution.
|
||||
// TODO: this will be used when we wire this up to Table::add().
|
||||
#[allow(dead_code)]
|
||||
pub fn add_result(&self) -> Option<AddResult> {
|
||||
self.add_result.lock().unwrap().clone()
|
||||
}
|
||||
|
||||
fn stream_as_body(data: SendableRecordBatchStream) -> DataFusionResult<reqwest::Body> {
|
||||
let options = arrow_ipc::writer::IpcWriteOptions::default()
|
||||
.try_with_compression(Some(CompressionType::LZ4_FRAME))?;
|
||||
let writer = arrow_ipc::writer::StreamWriter::try_new_with_options(
|
||||
Vec::new(),
|
||||
&data.schema(),
|
||||
options,
|
||||
)?;
|
||||
|
||||
let stream = futures::stream::try_unfold((data, writer), move |(mut data, mut writer)| {
|
||||
async move {
|
||||
match data.next().await {
|
||||
Some(Ok(batch)) => {
|
||||
writer.write(&batch)?;
|
||||
let buffer = std::mem::take(writer.get_mut());
|
||||
Ok(Some((buffer, (data, writer))))
|
||||
}
|
||||
Some(Err(e)) => Err(e),
|
||||
None => {
|
||||
if let Err(ArrowError::IpcError(_msg)) = writer.finish() {
|
||||
// Will error if already closed.
|
||||
return Ok(None);
|
||||
};
|
||||
let buffer = std::mem::take(writer.get_mut());
|
||||
Ok(Some((buffer, (data, writer))))
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
Ok(reqwest::Body::wrap_stream(stream))
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: HttpSend + 'static> DisplayAs for RemoteInsertExec<S> {
|
||||
fn fmt_as(&self, t: DisplayFormatType, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match t {
|
||||
DisplayFormatType::Default | DisplayFormatType::Verbose => {
|
||||
write!(
|
||||
f,
|
||||
"RemoteInsertExec: table={}, overwrite={}",
|
||||
self.table_name, self.overwrite
|
||||
)
|
||||
}
|
||||
DisplayFormatType::TreeRender => {
|
||||
write!(f, "RemoteInsertExec")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: HttpSend + 'static> ExecutionPlan for RemoteInsertExec<S> {
|
||||
fn name(&self) -> &str {
|
||||
Self::static_name()
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn properties(&self) -> &PlanProperties {
|
||||
&self.properties
|
||||
}
|
||||
|
||||
fn children(&self) -> Vec<&Arc<dyn ExecutionPlan>> {
|
||||
vec![&self.input]
|
||||
}
|
||||
|
||||
fn maintains_input_order(&self) -> Vec<bool> {
|
||||
vec![false]
|
||||
}
|
||||
|
||||
fn required_input_distribution(&self) -> Vec<datafusion_physical_plan::Distribution> {
|
||||
// Until we have a separate commit endpoint, we need to do all inserts in a single partition
|
||||
vec![datafusion_physical_plan::Distribution::SinglePartition]
|
||||
}
|
||||
|
||||
fn benefits_from_input_partitioning(&self) -> Vec<bool> {
|
||||
vec![false]
|
||||
}
|
||||
|
||||
fn with_new_children(
|
||||
self: Arc<Self>,
|
||||
children: Vec<Arc<dyn ExecutionPlan>>,
|
||||
) -> DataFusionResult<Arc<dyn ExecutionPlan>> {
|
||||
if children.len() != 1 {
|
||||
return Err(DataFusionError::Internal(
|
||||
"RemoteInsertExec requires exactly one child".to_string(),
|
||||
));
|
||||
}
|
||||
Ok(Arc::new(Self::new(
|
||||
self.table_name.clone(),
|
||||
self.identifier.clone(),
|
||||
self.client.clone(),
|
||||
children[0].clone(),
|
||||
self.overwrite,
|
||||
)))
|
||||
}
|
||||
|
||||
fn execute(
|
||||
&self,
|
||||
partition: usize,
|
||||
context: Arc<TaskContext>,
|
||||
) -> DataFusionResult<SendableRecordBatchStream> {
|
||||
if partition != 0 {
|
||||
return Err(DataFusionError::Internal(
|
||||
"RemoteInsertExec only supports single partition execution".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let input_stream = self.input.execute(0, context)?;
|
||||
let client = self.client.clone();
|
||||
let identifier = self.identifier.clone();
|
||||
let overwrite = self.overwrite;
|
||||
let add_result = self.add_result.clone();
|
||||
let table_name = self.table_name.clone();
|
||||
|
||||
let stream = futures::stream::once(async move {
|
||||
let mut request = client
|
||||
.post(&format!("/v1/table/{}/insert/", identifier))
|
||||
.header(CONTENT_TYPE, ARROW_STREAM_CONTENT_TYPE);
|
||||
|
||||
if overwrite {
|
||||
request = request.query(&[("mode", "overwrite")]);
|
||||
}
|
||||
|
||||
let body = Self::stream_as_body(input_stream)?;
|
||||
let request = request.body(body);
|
||||
|
||||
let (request_id, response) = client
|
||||
.send(request)
|
||||
.await
|
||||
.map_err(|e| DataFusionError::External(Box::new(e)))?;
|
||||
|
||||
let response =
|
||||
RemoteTable::<Sender>::handle_table_not_found(&table_name, response, &request_id)
|
||||
.await
|
||||
.map_err(|e| DataFusionError::External(Box::new(e)))?;
|
||||
|
||||
let response = client
|
||||
.check_response(&request_id, response)
|
||||
.await
|
||||
.map_err(|e| DataFusionError::External(Box::new(e)))?;
|
||||
|
||||
let body_text = response.text().await.map_err(|e| {
|
||||
DataFusionError::External(Box::new(Error::Http {
|
||||
source: Box::new(e),
|
||||
request_id: request_id.clone(),
|
||||
status_code: None,
|
||||
}))
|
||||
})?;
|
||||
|
||||
let parsed_result = if body_text.trim().is_empty() {
|
||||
// Backward compatible with old servers
|
||||
AddResult { version: 0 }
|
||||
} else {
|
||||
serde_json::from_str(&body_text).map_err(|e| {
|
||||
DataFusionError::External(Box::new(Error::Http {
|
||||
source: format!("Failed to parse add response: {}", e).into(),
|
||||
request_id: request_id.clone(),
|
||||
status_code: None,
|
||||
}))
|
||||
})?
|
||||
};
|
||||
|
||||
{
|
||||
let mut res_lock = add_result.lock().map_err(|_| {
|
||||
DataFusionError::Execution("Failed to acquire lock for add_result".to_string())
|
||||
})?;
|
||||
*res_lock = Some(parsed_result);
|
||||
}
|
||||
|
||||
// Return a single batch with count 0 (actual count is tracked in add_result)
|
||||
let count_array: ArrayRef = Arc::new(UInt64Array::from(vec![0u64]));
|
||||
let batch = RecordBatch::try_new(COUNT_SCHEMA.clone(), vec![count_array])?;
|
||||
Ok::<_, DataFusionError>(batch)
|
||||
});
|
||||
|
||||
Ok(Box::pin(RecordBatchStreamAdapter::new(
|
||||
COUNT_SCHEMA.clone(),
|
||||
stream,
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use arrow_array::record_batch;
|
||||
use arrow_schema::{DataType, Field, Schema as ArrowSchema};
|
||||
use datafusion::prelude::SessionContext;
|
||||
use datafusion_catalog::MemTable;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::remote::ARROW_STREAM_CONTENT_TYPE;
|
||||
use crate::table::datafusion::BaseTableAdapter;
|
||||
use crate::Table;
|
||||
|
||||
fn schema_json() -> &'static str {
|
||||
r#"{"fields": [{"name": "id", "type": {"type": "int32"}, "nullable": true}]}"#
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_remote_insert_exec_execute_empty() {
|
||||
let request_count = Arc::new(AtomicUsize::new(0));
|
||||
let request_count_clone = request_count.clone();
|
||||
|
||||
let table = Table::new_with_handler("my_table", move |request| {
|
||||
let path = request.url().path();
|
||||
|
||||
if path == "/v1/table/my_table/describe/" {
|
||||
// Return schema for BaseTableAdapter::try_new
|
||||
return http::Response::builder()
|
||||
.status(200)
|
||||
.body(format!(r#"{{"version": 1, "schema": {}}}"#, schema_json()))
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
if path == "/v1/table/my_table/insert/" {
|
||||
assert_eq!(request.method(), "POST");
|
||||
assert_eq!(
|
||||
request.headers().get("Content-Type").unwrap(),
|
||||
ARROW_STREAM_CONTENT_TYPE
|
||||
);
|
||||
request_count_clone.fetch_add(1, Ordering::SeqCst);
|
||||
|
||||
return http::Response::builder()
|
||||
.status(200)
|
||||
.body(r#"{"version": 2}"#.to_string())
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
panic!("Unexpected request path: {}", path);
|
||||
});
|
||||
|
||||
let schema = Arc::new(ArrowSchema::new(vec![Field::new(
|
||||
"id",
|
||||
DataType::Int32,
|
||||
true,
|
||||
)]));
|
||||
|
||||
// Create empty MemTable (no batches)
|
||||
let source_table = MemTable::try_new(schema, vec![vec![]]).unwrap();
|
||||
|
||||
let ctx = SessionContext::new();
|
||||
|
||||
// Register the remote table as insert target
|
||||
let provider = BaseTableAdapter::try_new(table.base_table().clone())
|
||||
.await
|
||||
.unwrap();
|
||||
ctx.register_table("my_table", Arc::new(provider)).unwrap();
|
||||
|
||||
// Register empty source
|
||||
ctx.register_table("empty_source", Arc::new(source_table))
|
||||
.unwrap();
|
||||
|
||||
// Execute the INSERT
|
||||
ctx.sql("INSERT INTO my_table SELECT * FROM empty_source")
|
||||
.await
|
||||
.unwrap()
|
||||
.collect()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Verify: should have made exactly one HTTP request even with empty input
|
||||
assert_eq!(request_count.load(Ordering::SeqCst), 1);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_remote_insert_exec_multi_partition() {
|
||||
let request_count = Arc::new(AtomicUsize::new(0));
|
||||
let request_count_clone = request_count.clone();
|
||||
|
||||
let table = Table::new_with_handler("my_table", move |request| {
|
||||
let path = request.url().path();
|
||||
|
||||
if path == "/v1/table/my_table/describe/" {
|
||||
// Return schema for BaseTableAdapter::try_new
|
||||
return http::Response::builder()
|
||||
.status(200)
|
||||
.body(format!(r#"{{"version": 1, "schema": {}}}"#, schema_json()))
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
if path == "/v1/table/my_table/insert/" {
|
||||
assert_eq!(request.method(), "POST");
|
||||
assert_eq!(
|
||||
request.headers().get("Content-Type").unwrap(),
|
||||
ARROW_STREAM_CONTENT_TYPE
|
||||
);
|
||||
request_count_clone.fetch_add(1, Ordering::SeqCst);
|
||||
|
||||
return http::Response::builder()
|
||||
.status(200)
|
||||
.body(r#"{"version": 2}"#.to_string())
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
panic!("Unexpected request path: {}", path);
|
||||
});
|
||||
|
||||
let schema = Arc::new(ArrowSchema::new(vec![Field::new(
|
||||
"id",
|
||||
DataType::Int32,
|
||||
true,
|
||||
)]));
|
||||
|
||||
// Create MemTable with multiple partitions and multiple batches
|
||||
let source_table = MemTable::try_new(
|
||||
schema,
|
||||
vec![
|
||||
// Partition 0
|
||||
vec![
|
||||
record_batch!(("id", Int32, [1, 2])).unwrap(),
|
||||
record_batch!(("id", Int32, [3, 4])).unwrap(),
|
||||
],
|
||||
// Partition 1
|
||||
vec![record_batch!(("id", Int32, [5, 6, 7])).unwrap()],
|
||||
// Partition 2
|
||||
vec![record_batch!(("id", Int32, [8])).unwrap()],
|
||||
],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let ctx = SessionContext::new();
|
||||
|
||||
// Register the remote table as insert target
|
||||
let provider = BaseTableAdapter::try_new(table.base_table().clone())
|
||||
.await
|
||||
.unwrap();
|
||||
ctx.register_table("my_table", Arc::new(provider)).unwrap();
|
||||
|
||||
// Register multi-partition source
|
||||
ctx.register_table("multi_partition_source", Arc::new(source_table))
|
||||
.unwrap();
|
||||
|
||||
// Get the physical plan and verify it includes a repartition to 1
|
||||
let df = ctx
|
||||
.sql("INSERT INTO my_table SELECT * FROM multi_partition_source")
|
||||
.await
|
||||
.unwrap();
|
||||
let plan = df.clone().create_physical_plan().await.unwrap();
|
||||
let plan_str = datafusion::physical_plan::displayable(plan.as_ref())
|
||||
.indent(true)
|
||||
.to_string();
|
||||
|
||||
// The plan should include a CoalescePartitionsExec to merge partitions
|
||||
assert!(
|
||||
plan_str.contains("CoalescePartitionsExec"),
|
||||
"Expected CoalescePartitionsExec in plan:\n{}",
|
||||
plan_str
|
||||
);
|
||||
|
||||
// Execute the INSERT
|
||||
df.collect().await.unwrap();
|
||||
|
||||
// Verify: should have made exactly one HTTP request despite multiple input partitions
|
||||
assert_eq!(request_count.load(Ordering::SeqCst), 1);
|
||||
}
|
||||
}
|
||||
@@ -16,16 +16,12 @@ use datafusion_physical_plan::union::UnionExec;
|
||||
use datafusion_physical_plan::ExecutionPlan;
|
||||
use futures::{FutureExt, StreamExt, TryFutureExt};
|
||||
use lance::dataset::builder::DatasetBuilder;
|
||||
use lance::dataset::cleanup::RemovalStats;
|
||||
use lance::dataset::optimize::{compact_files, CompactionMetrics, IndexRemapperOptions};
|
||||
use lance::dataset::scanner::Scanner;
|
||||
pub use lance::dataset::ColumnAlteration;
|
||||
pub use lance::dataset::NewColumnTransform;
|
||||
pub use lance::dataset::ReadParams;
|
||||
pub use lance::dataset::Version;
|
||||
use lance::dataset::{
|
||||
InsertBuilder, UpdateBuilder as LanceUpdateBuilder, WhenMatched, WriteMode, WriteParams,
|
||||
};
|
||||
use lance::dataset::{InsertBuilder, WhenMatched, WriteMode, WriteParams};
|
||||
use lance::dataset::{MergeInsertBuilder as LanceMergeInsertBuilder, WhenNotMatchedBySource};
|
||||
use lance::index::vector::utils::infer_vector_dim;
|
||||
use lance::index::vector::VectorIndexParams;
|
||||
@@ -48,7 +44,6 @@ use lance_namespace::models::{
|
||||
use lance_namespace::LanceNamespace;
|
||||
use lance_table::format::Manifest;
|
||||
use lance_table::io::commit::ManifestNamingScheme;
|
||||
use log::info;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use std::format;
|
||||
@@ -79,18 +74,25 @@ use self::merge::MergeInsertBuilder;
|
||||
|
||||
pub mod datafusion;
|
||||
pub(crate) mod dataset;
|
||||
pub mod delete;
|
||||
pub mod merge;
|
||||
pub mod optimize;
|
||||
pub mod schema_evolution;
|
||||
pub mod update;
|
||||
|
||||
use crate::index::waiter::wait_for_index;
|
||||
pub use chrono::Duration;
|
||||
pub use delete::DeleteResult;
|
||||
use futures::future::{join_all, Either};
|
||||
pub use lance::dataset::optimize::CompactionOptions;
|
||||
pub use lance::dataset::refs::{TagContents, Tags as LanceTags};
|
||||
pub use lance::dataset::scanner::DatasetRecordBatchStream;
|
||||
use lance::dataset::statistics::DatasetStatisticsExt;
|
||||
use lance_index::frag_reuse::FRAG_REUSE_INDEX_NAME;
|
||||
pub use lance_index::optimize::OptimizeOptions;
|
||||
pub use optimize::{CompactionOptions, OptimizeAction, OptimizeStats};
|
||||
pub use schema_evolution::{AddColumnsResult, AlterColumnsResult, DropColumnsResult};
|
||||
use serde_with::skip_serializing_none;
|
||||
pub use update::{UpdateBuilder, UpdateResult};
|
||||
|
||||
/// Defines the type of column
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
@@ -165,85 +167,6 @@ impl TableDefinition {
|
||||
}
|
||||
}
|
||||
|
||||
/// Optimize the dataset.
|
||||
///
|
||||
/// Similar to `VACUUM` in PostgreSQL, it offers different options to
|
||||
/// optimize different parts of the table on disk.
|
||||
///
|
||||
/// By default, it optimizes everything, as [`OptimizeAction::All`].
|
||||
pub enum OptimizeAction {
|
||||
/// Run all optimizations with default values
|
||||
All,
|
||||
/// Compacts files in the dataset
|
||||
///
|
||||
/// LanceDb uses a readonly filesystem for performance and safe concurrency. Every time
|
||||
/// new data is added it will be added into new files. Small files
|
||||
/// can hurt both read and write performance. Compaction will merge small files
|
||||
/// into larger ones.
|
||||
///
|
||||
/// All operations that modify data (add, delete, update, merge insert, etc.) will create
|
||||
/// new files. If these operations are run frequently then compaction should run frequently.
|
||||
///
|
||||
/// If these operations are never run (search only) then compaction is not necessary.
|
||||
Compact {
|
||||
options: CompactionOptions,
|
||||
remap_options: Option<Arc<dyn IndexRemapperOptions>>,
|
||||
},
|
||||
/// Prune old version of datasets
|
||||
///
|
||||
/// Every change in LanceDb is additive. When data is removed from a dataset a new version is
|
||||
/// created that doesn't contain the removed data. However, the old version, which does contain
|
||||
/// the removed data, is left in place. This is necessary for consistency and concurrency and
|
||||
/// also enables time travel functionality like the ability to checkout an older version of the
|
||||
/// dataset to undo changes.
|
||||
///
|
||||
/// Over time, these old versions can consume a lot of disk space. The prune operation will
|
||||
/// remove versions of the dataset that are older than a certain age. This will free up the
|
||||
/// space used by that old data.
|
||||
///
|
||||
/// Once a version is pruned it can no longer be checked out.
|
||||
Prune {
|
||||
/// The duration of time to keep versions of the dataset.
|
||||
older_than: Option<Duration>,
|
||||
/// Because they may be part of an in-progress transaction, files newer than 7 days old are not deleted by default.
|
||||
/// If you are sure that there are no in-progress transactions, then you can set this to True to delete all files older than `older_than`.
|
||||
delete_unverified: Option<bool>,
|
||||
/// If true, an error will be returned if there are any old versions that are still tagged.
|
||||
error_if_tagged_old_versions: Option<bool>,
|
||||
},
|
||||
/// Optimize the indices
|
||||
///
|
||||
/// This operation optimizes all indices in the table. When new data is added to LanceDb
|
||||
/// it is not added to the indices. However, it can still turn up in searches because the search
|
||||
/// function will scan both the indexed data and the unindexed data in parallel. Over time, the
|
||||
/// unindexed data can become large enough that the search performance is slow. This operation
|
||||
/// will add the unindexed data to the indices without rerunning the full index creation process.
|
||||
///
|
||||
/// Optimizing an index is faster than re-training the index but it does not typically adjust the
|
||||
/// underlying model relied upon by the index. This can eventually lead to poor search accuracy
|
||||
/// and so users may still want to occasionally retrain the index after adding a large amount of
|
||||
/// data.
|
||||
///
|
||||
/// For example, when using IVF, an index will create clusters. Optimizing an index assigns unindexed
|
||||
/// data to the existing clusters, but it does not move the clusters or create new clusters.
|
||||
Index(OptimizeOptions),
|
||||
}
|
||||
|
||||
impl Default for OptimizeAction {
|
||||
fn default() -> Self {
|
||||
Self::All
|
||||
}
|
||||
}
|
||||
|
||||
/// Statistics about the optimization.
|
||||
pub struct OptimizeStats {
|
||||
/// Stats of the file compaction.
|
||||
pub compaction: Option<CompactionMetrics>,
|
||||
|
||||
/// Stats of the version pruning
|
||||
pub prune: Option<RemovalStats>,
|
||||
}
|
||||
|
||||
/// Describes what happens when a vector either contains NaN or
|
||||
/// does not have enough values
|
||||
#[derive(Clone, Debug, Default)]
|
||||
@@ -327,72 +250,6 @@ impl<T: IntoArrow> AddDataBuilder<T> {
|
||||
}
|
||||
}
|
||||
|
||||
/// A builder for configuring an [`Table::update`] operation
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct UpdateBuilder {
|
||||
parent: Arc<dyn BaseTable>,
|
||||
pub(crate) filter: Option<String>,
|
||||
pub(crate) columns: Vec<(String, String)>,
|
||||
}
|
||||
|
||||
impl UpdateBuilder {
|
||||
fn new(parent: Arc<dyn BaseTable>) -> Self {
|
||||
Self {
|
||||
parent,
|
||||
filter: None,
|
||||
columns: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Limits the update operation to rows matching the given filter
|
||||
///
|
||||
/// If a row does not match the filter then it will be left unchanged.
|
||||
pub fn only_if(mut self, filter: impl Into<String>) -> Self {
|
||||
self.filter = Some(filter.into());
|
||||
self
|
||||
}
|
||||
|
||||
/// Specifies a column to update
|
||||
///
|
||||
/// This method may be called multiple times to update multiple columns
|
||||
///
|
||||
/// The `update_expr` should be an SQL expression explaining how to calculate
|
||||
/// the new value for the column. The expression will be evaluated against the
|
||||
/// previous row's value.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// # use lancedb::Table;
|
||||
/// # async fn doctest_helper(tbl: Table) {
|
||||
/// let mut operation = tbl.update();
|
||||
/// // Increments the `bird_count` value by 1
|
||||
/// operation = operation.column("bird_count", "bird_count + 1");
|
||||
/// operation.execute().await.unwrap();
|
||||
/// # }
|
||||
/// ```
|
||||
pub fn column(
|
||||
mut self,
|
||||
column_name: impl Into<String>,
|
||||
update_expr: impl Into<String>,
|
||||
) -> Self {
|
||||
self.columns.push((column_name.into(), update_expr.into()));
|
||||
self
|
||||
}
|
||||
|
||||
/// Executes the update operation.
|
||||
/// Returns the update result
|
||||
pub async fn execute(self) -> Result<UpdateResult> {
|
||||
if self.columns.is_empty() {
|
||||
Err(Error::InvalidInput {
|
||||
message: "at least one column must be specified in an update operation".to_string(),
|
||||
})
|
||||
} else {
|
||||
self.parent.clone().update(self).await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Filters that can be used to limit the rows returned by a query
|
||||
pub enum Filter {
|
||||
/// A SQL filter string
|
||||
@@ -426,17 +283,6 @@ pub trait Tags: Send + Sync {
|
||||
async fn update(&mut self, tag: &str, version: u64) -> Result<()>;
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||
pub struct UpdateResult {
|
||||
#[serde(default)]
|
||||
pub rows_updated: u64,
|
||||
// The commit version associated with the operation.
|
||||
// A version of `0` indicates compatibility with legacy servers that do not return
|
||||
/// a commit version.
|
||||
#[serde(default)]
|
||||
pub version: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||
pub struct AddResult {
|
||||
// The commit version associated with the operation.
|
||||
@@ -446,15 +292,6 @@ pub struct AddResult {
|
||||
pub version: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||
pub struct DeleteResult {
|
||||
// The commit version associated with the operation.
|
||||
// A version of `0` indicates compatibility with legacy servers that do not return
|
||||
/// a commit version.
|
||||
#[serde(default)]
|
||||
pub version: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||
pub struct MergeResult {
|
||||
// The commit version associated with the operation.
|
||||
@@ -480,33 +317,6 @@ pub struct MergeResult {
|
||||
pub num_attempts: u32,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||
pub struct AddColumnsResult {
|
||||
// The commit version associated with the operation.
|
||||
// A version of `0` indicates compatibility with legacy servers that do not return
|
||||
/// a commit version.
|
||||
#[serde(default)]
|
||||
pub version: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||
pub struct AlterColumnsResult {
|
||||
// The commit version associated with the operation.
|
||||
// A version of `0` indicates compatibility with legacy servers that do not return
|
||||
/// a commit version.
|
||||
#[serde(default)]
|
||||
pub version: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||
pub struct DropColumnsResult {
|
||||
// The commit version associated with the operation.
|
||||
// A version of `0` indicates compatibility with legacy servers that do not return
|
||||
/// a commit version.
|
||||
#[serde(default)]
|
||||
pub version: u64,
|
||||
}
|
||||
|
||||
/// A trait for anything "table-like". This is used for both native tables (which target
|
||||
/// Lance datasets) and remote tables (which target LanceDB cloud)
|
||||
///
|
||||
@@ -611,7 +421,17 @@ pub trait BaseTable: std::fmt::Display + std::fmt::Debug + Send + Sync {
|
||||
/// Get the table URI (storage location)
|
||||
async fn uri(&self) -> Result<String>;
|
||||
/// Get the storage options used when opening this table, if any.
|
||||
#[deprecated(since = "0.25.0", note = "Use initial_storage_options() instead")]
|
||||
async fn storage_options(&self) -> Option<HashMap<String, String>>;
|
||||
/// Get the initial storage options that were passed in when opening this table.
|
||||
///
|
||||
/// For dynamically refreshed options (e.g., credential vending), use [`Self::latest_storage_options`].
|
||||
async fn initial_storage_options(&self) -> Option<HashMap<String, String>>;
|
||||
/// Get the latest storage options, refreshing from provider if configured.
|
||||
///
|
||||
/// Returns `Ok(Some(options))` if storage options are available (static or refreshed),
|
||||
/// `Ok(None)` if no storage options were configured, or `Err(...)` if refresh failed.
|
||||
async fn latest_storage_options(&self) -> Result<Option<HashMap<String, String>>>;
|
||||
/// Poll until the columns are fully indexed. Will return Error::Timeout if the columns
|
||||
/// are not fully indexed within the timeout.
|
||||
async fn wait_for_index(
|
||||
@@ -621,6 +441,19 @@ pub trait BaseTable: std::fmt::Display + std::fmt::Debug + Send + Sync {
|
||||
) -> Result<()>;
|
||||
/// Get statistics on the table
|
||||
async fn stats(&self) -> Result<TableStatistics>;
|
||||
/// Create an ExecutionPlan for inserting data into the table.
|
||||
///
|
||||
/// This is used by the DataFusion TableProvider implementation to support
|
||||
/// INSERT INTO statements.
|
||||
async fn create_insert_exec(
|
||||
&self,
|
||||
_input: Arc<dyn datafusion_physical_plan::ExecutionPlan>,
|
||||
_write_params: WriteParams,
|
||||
) -> Result<Arc<dyn datafusion_physical_plan::ExecutionPlan>> {
|
||||
Err(Error::NotSupported {
|
||||
message: "create_insert_exec not implemented".to_string(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// A Table is a collection of strong typed Rows.
|
||||
@@ -1328,10 +1161,32 @@ impl Table {
|
||||
/// Get the storage options used when opening this table, if any.
|
||||
///
|
||||
/// Warning: This is an internal API and the return value is subject to change.
|
||||
#[deprecated(since = "0.25.0", note = "Use initial_storage_options() instead")]
|
||||
pub async fn storage_options(&self) -> Option<HashMap<String, String>> {
|
||||
#[allow(deprecated)]
|
||||
self.inner.storage_options().await
|
||||
}
|
||||
|
||||
/// Get the initial storage options that were passed in when opening this table.
|
||||
///
|
||||
/// For dynamically refreshed options (e.g., credential vending), use [`Self::latest_storage_options`].
|
||||
///
|
||||
/// Warning: This is an internal API and the return value is subject to change.
|
||||
pub async fn initial_storage_options(&self) -> Option<HashMap<String, String>> {
|
||||
self.inner.initial_storage_options().await
|
||||
}
|
||||
|
||||
/// Get the latest storage options, refreshing from provider if configured.
|
||||
///
|
||||
/// This method is useful for credential vending scenarios where storage options
|
||||
/// may be refreshed dynamically. If no dynamic provider is configured, this
|
||||
/// returns the initial static options.
|
||||
///
|
||||
/// Warning: This is an internal API and the return value is subject to change.
|
||||
pub async fn latest_storage_options(&self) -> Result<Option<HashMap<String, String>>> {
|
||||
self.inner.latest_storage_options().await
|
||||
}
|
||||
|
||||
/// Get statistics about an index.
|
||||
/// Returns None if the index does not exist.
|
||||
pub async fn index_stats(
|
||||
@@ -1425,7 +1280,9 @@ impl Table {
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let unioned = Arc::new(UnionExec::new(projected_plans));
|
||||
let unioned = UnionExec::try_new(projected_plans).map_err(|err| Error::Runtime {
|
||||
message: err.to_string(),
|
||||
})?;
|
||||
// We require 1 partition in the final output
|
||||
let repartitioned = RepartitionExec::try_new(
|
||||
unioned,
|
||||
@@ -1921,16 +1778,6 @@ impl NativeTable {
|
||||
})
|
||||
}
|
||||
|
||||
async fn optimize_indices(&self, options: &OptimizeOptions) -> Result<()> {
|
||||
info!("LanceDB: optimizing indices: {:?}", options);
|
||||
self.dataset
|
||||
.get_mut()
|
||||
.await?
|
||||
.optimize_indices(options)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Merge new data into this table.
|
||||
pub async fn merge(
|
||||
&mut self,
|
||||
@@ -1946,47 +1793,6 @@ impl NativeTable {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Remove old versions of the dataset from disk.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `older_than` - The duration of time to keep versions of the dataset.
|
||||
/// * `delete_unverified` - Because they may be part of an in-progress
|
||||
/// transaction, files newer than 7 days old are not deleted by default.
|
||||
/// If you are sure that there are no in-progress transactions, then you
|
||||
/// can set this to True to delete all files older than `older_than`.
|
||||
///
|
||||
/// This calls into [lance::dataset::Dataset::cleanup_old_versions] and
|
||||
/// returns the result.
|
||||
async fn cleanup_old_versions(
|
||||
&self,
|
||||
older_than: Duration,
|
||||
delete_unverified: Option<bool>,
|
||||
error_if_tagged_old_versions: Option<bool>,
|
||||
) -> Result<RemovalStats> {
|
||||
Ok(self
|
||||
.dataset
|
||||
.get_mut()
|
||||
.await?
|
||||
.cleanup_old_versions(older_than, delete_unverified, error_if_tagged_old_versions)
|
||||
.await?)
|
||||
}
|
||||
|
||||
/// Compact files in the dataset.
|
||||
///
|
||||
/// This can be run after making several small appends to optimize the table
|
||||
/// for faster reads.
|
||||
///
|
||||
/// This calls into [lance::dataset::optimize::compact_files].
|
||||
async fn compact_files(
|
||||
&self,
|
||||
options: CompactionOptions,
|
||||
remap_options: Option<Arc<dyn IndexRemapperOptions>>,
|
||||
) -> Result<CompactionMetrics> {
|
||||
let mut dataset_mut = self.dataset.get_mut().await?;
|
||||
let metrics = compact_files(&mut dataset_mut, options, remap_options).await?;
|
||||
Ok(metrics)
|
||||
}
|
||||
|
||||
// TODO: why are these individual methods and not some single "get_stats" method?
|
||||
pub async fn count_fragments(&self) -> Result<usize> {
|
||||
Ok(self.dataset.get().await?.count_fragments())
|
||||
@@ -2802,25 +2608,8 @@ impl BaseTable for NativeTable {
|
||||
}
|
||||
|
||||
async fn update(&self, update: UpdateBuilder) -> Result<UpdateResult> {
|
||||
let dataset = self.dataset.get().await?.clone();
|
||||
let mut builder = LanceUpdateBuilder::new(Arc::new(dataset));
|
||||
if let Some(predicate) = update.filter {
|
||||
builder = builder.update_where(&predicate)?;
|
||||
}
|
||||
|
||||
for (column, value) in update.columns {
|
||||
builder = builder.set(column, &value)?;
|
||||
}
|
||||
|
||||
let operation = builder.build()?;
|
||||
let res = operation.execute().await?;
|
||||
self.dataset
|
||||
.set_latest(res.new_dataset.as_ref().clone())
|
||||
.await;
|
||||
Ok(UpdateResult {
|
||||
rows_updated: res.rows_updated,
|
||||
version: res.new_dataset.version().version,
|
||||
})
|
||||
// Delegate to the submodule implementation
|
||||
update::execute_update(self, update).await
|
||||
}
|
||||
|
||||
async fn create_plan(
|
||||
@@ -3078,11 +2867,8 @@ impl BaseTable for NativeTable {
|
||||
|
||||
/// Delete rows from the table
|
||||
async fn delete(&self, predicate: &str) -> Result<DeleteResult> {
|
||||
let mut dataset = self.dataset.get_mut().await?;
|
||||
dataset.delete(predicate).await?;
|
||||
Ok(DeleteResult {
|
||||
version: dataset.version().version,
|
||||
})
|
||||
// Delegate to the submodule implementation
|
||||
delete::execute_delete(self, predicate).await
|
||||
}
|
||||
|
||||
async fn tags(&self) -> Result<Box<dyn Tags + '_>> {
|
||||
@@ -3092,55 +2878,8 @@ impl BaseTable for NativeTable {
|
||||
}
|
||||
|
||||
async fn optimize(&self, action: OptimizeAction) -> Result<OptimizeStats> {
|
||||
let mut stats = OptimizeStats {
|
||||
compaction: None,
|
||||
prune: None,
|
||||
};
|
||||
match action {
|
||||
OptimizeAction::All => {
|
||||
stats.compaction = self
|
||||
.optimize(OptimizeAction::Compact {
|
||||
options: CompactionOptions::default(),
|
||||
remap_options: None,
|
||||
})
|
||||
.await?
|
||||
.compaction;
|
||||
stats.prune = self
|
||||
.optimize(OptimizeAction::Prune {
|
||||
older_than: None,
|
||||
delete_unverified: None,
|
||||
error_if_tagged_old_versions: None,
|
||||
})
|
||||
.await?
|
||||
.prune;
|
||||
self.optimize(OptimizeAction::Index(OptimizeOptions::default()))
|
||||
.await?;
|
||||
}
|
||||
OptimizeAction::Compact {
|
||||
options,
|
||||
remap_options,
|
||||
} => {
|
||||
stats.compaction = Some(self.compact_files(options, remap_options).await?);
|
||||
}
|
||||
OptimizeAction::Prune {
|
||||
older_than,
|
||||
delete_unverified,
|
||||
error_if_tagged_old_versions,
|
||||
} => {
|
||||
stats.prune = Some(
|
||||
self.cleanup_old_versions(
|
||||
older_than.unwrap_or(Duration::try_days(7).expect("valid delta")),
|
||||
delete_unverified,
|
||||
error_if_tagged_old_versions,
|
||||
)
|
||||
.await?,
|
||||
);
|
||||
}
|
||||
OptimizeAction::Index(options) => {
|
||||
self.optimize_indices(&options).await?;
|
||||
}
|
||||
}
|
||||
Ok(stats)
|
||||
// Delegate to the submodule implementation
|
||||
optimize::execute_optimize(self, action).await
|
||||
}
|
||||
|
||||
async fn add_columns(
|
||||
@@ -3148,27 +2887,15 @@ impl BaseTable for NativeTable {
|
||||
transforms: NewColumnTransform,
|
||||
read_columns: Option<Vec<String>>,
|
||||
) -> Result<AddColumnsResult> {
|
||||
let mut dataset = self.dataset.get_mut().await?;
|
||||
dataset.add_columns(transforms, read_columns, None).await?;
|
||||
Ok(AddColumnsResult {
|
||||
version: dataset.version().version,
|
||||
})
|
||||
schema_evolution::execute_add_columns(self, transforms, read_columns).await
|
||||
}
|
||||
|
||||
async fn alter_columns(&self, alterations: &[ColumnAlteration]) -> Result<AlterColumnsResult> {
|
||||
let mut dataset = self.dataset.get_mut().await?;
|
||||
dataset.alter_columns(alterations).await?;
|
||||
Ok(AlterColumnsResult {
|
||||
version: dataset.version().version,
|
||||
})
|
||||
schema_evolution::execute_alter_columns(self, alterations).await
|
||||
}
|
||||
|
||||
async fn drop_columns(&self, columns: &[&str]) -> Result<DropColumnsResult> {
|
||||
let mut dataset = self.dataset.get_mut().await?;
|
||||
dataset.drop_columns(columns).await?;
|
||||
Ok(DropColumnsResult {
|
||||
version: dataset.version().version,
|
||||
})
|
||||
schema_evolution::execute_drop_columns(self, columns).await
|
||||
}
|
||||
|
||||
async fn list_indices(&self) -> Result<Vec<IndexConfig>> {
|
||||
@@ -3231,6 +2958,10 @@ impl BaseTable for NativeTable {
|
||||
}
|
||||
|
||||
async fn storage_options(&self) -> Option<HashMap<String, String>> {
|
||||
self.initial_storage_options().await
|
||||
}
|
||||
|
||||
async fn initial_storage_options(&self) -> Option<HashMap<String, String>> {
|
||||
self.dataset
|
||||
.get()
|
||||
.await
|
||||
@@ -3238,6 +2969,11 @@ impl BaseTable for NativeTable {
|
||||
.and_then(|dataset| dataset.initial_storage_options().cloned())
|
||||
}
|
||||
|
||||
async fn latest_storage_options(&self) -> Result<Option<HashMap<String, String>>> {
|
||||
let dataset = self.dataset.get().await?;
|
||||
Ok(dataset.latest_storage_options().await?.map(|o| o.0))
|
||||
}
|
||||
|
||||
async fn index_stats(&self, index_name: &str) -> Result<Option<IndexStatistics>> {
|
||||
let stats = match self
|
||||
.dataset
|
||||
@@ -3351,6 +3087,21 @@ impl BaseTable for NativeTable {
|
||||
};
|
||||
Ok(stats)
|
||||
}
|
||||
|
||||
async fn create_insert_exec(
|
||||
&self,
|
||||
input: Arc<dyn datafusion_physical_plan::ExecutionPlan>,
|
||||
write_params: WriteParams,
|
||||
) -> Result<Arc<dyn datafusion_physical_plan::ExecutionPlan>> {
|
||||
let ds = self.dataset.get().await?;
|
||||
let dataset = Arc::new((*ds).clone());
|
||||
Ok(Arc::new(datafusion::insert::InsertExec::new(
|
||||
self.dataset.clone(),
|
||||
dataset,
|
||||
input,
|
||||
write_params,
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
#[skip_serializing_none]
|
||||
@@ -3406,15 +3157,12 @@ mod tests {
|
||||
|
||||
use arrow_array::{
|
||||
builder::{ListBuilder, StringBuilder},
|
||||
Array, BooleanArray, Date32Array, FixedSizeListArray, Float32Array, Float64Array,
|
||||
Int32Array, Int64Array, LargeStringArray, RecordBatch, RecordBatchIterator,
|
||||
RecordBatchReader, StringArray, TimestampMillisecondArray, TimestampNanosecondArray,
|
||||
UInt32Array,
|
||||
Array, BooleanArray, FixedSizeListArray, Float32Array, Int32Array, LargeStringArray,
|
||||
RecordBatch, RecordBatchIterator, RecordBatchReader, StringArray,
|
||||
};
|
||||
use arrow_array::{BinaryArray, LargeBinaryArray};
|
||||
use arrow_data::ArrayDataBuilder;
|
||||
use arrow_schema::{DataType, Field, Schema, TimeUnit};
|
||||
use futures::TryStreamExt;
|
||||
use arrow_schema::{DataType, Field, Schema};
|
||||
use lance::dataset::WriteMode;
|
||||
use lance::io::{ObjectStoreParams, WrappingObjectStore};
|
||||
use lance::Dataset;
|
||||
@@ -3426,7 +3174,6 @@ mod tests {
|
||||
use crate::connection::ConnectBuilder;
|
||||
use crate::index::scalar::{BTreeIndexBuilder, BitmapIndexBuilder};
|
||||
use crate::index::vector::{IvfHnswPqIndexBuilder, IvfHnswSqIndexBuilder};
|
||||
use crate::query::{ExecutableQuery, QueryBase};
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_open() {
|
||||
@@ -3648,306 +3395,6 @@ mod tests {
|
||||
assert_eq!(table.name(), "test");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_update_with_predicate() {
|
||||
let tmp_dir = tempdir().unwrap();
|
||||
let dataset_path = tmp_dir.path().join("test.lance");
|
||||
let uri = dataset_path.to_str().unwrap();
|
||||
let conn = connect(uri)
|
||||
.read_consistency_interval(Duration::from_secs(0))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let schema = Arc::new(Schema::new(vec![
|
||||
Field::new("id", DataType::Int32, false),
|
||||
Field::new("name", DataType::Utf8, false),
|
||||
]));
|
||||
|
||||
let record_batch_iter = RecordBatchIterator::new(
|
||||
vec![RecordBatch::try_new(
|
||||
schema.clone(),
|
||||
vec![
|
||||
Arc::new(Int32Array::from_iter_values(0..10)),
|
||||
Arc::new(StringArray::from_iter_values(vec![
|
||||
"a", "b", "c", "d", "e", "f", "g", "h", "i", "j",
|
||||
])),
|
||||
],
|
||||
)
|
||||
.unwrap()]
|
||||
.into_iter()
|
||||
.map(Ok),
|
||||
schema.clone(),
|
||||
);
|
||||
|
||||
let table = conn
|
||||
.create_table("my_table", record_batch_iter)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
table
|
||||
.update()
|
||||
.only_if("id > 5")
|
||||
.column("name", "'foo'")
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let mut batches = table
|
||||
.query()
|
||||
.select(Select::columns(&["id", "name"]))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap()
|
||||
.try_collect::<Vec<_>>()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
while let Some(batch) = batches.pop() {
|
||||
let ids = batch
|
||||
.column(0)
|
||||
.as_any()
|
||||
.downcast_ref::<Int32Array>()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.collect::<Vec<_>>();
|
||||
let names = batch
|
||||
.column(1)
|
||||
.as_any()
|
||||
.downcast_ref::<StringArray>()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.collect::<Vec<_>>();
|
||||
for (i, name) in names.iter().enumerate() {
|
||||
let id = ids[i].unwrap();
|
||||
let name = name.unwrap();
|
||||
if id > 5 {
|
||||
assert_eq!(name, "foo");
|
||||
} else {
|
||||
assert_eq!(name, &format!("{}", (b'a' + id as u8) as char));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_update_all_types() {
|
||||
let tmp_dir = tempdir().unwrap();
|
||||
let dataset_path = tmp_dir.path().join("test.lance");
|
||||
let uri = dataset_path.to_str().unwrap();
|
||||
let conn = connect(uri)
|
||||
.read_consistency_interval(Duration::from_secs(0))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let schema = Arc::new(Schema::new(vec![
|
||||
Field::new("int32", DataType::Int32, false),
|
||||
Field::new("int64", DataType::Int64, false),
|
||||
Field::new("uint32", DataType::UInt32, false),
|
||||
Field::new("string", DataType::Utf8, false),
|
||||
Field::new("large_string", DataType::LargeUtf8, false),
|
||||
Field::new("float32", DataType::Float32, false),
|
||||
Field::new("float64", DataType::Float64, false),
|
||||
Field::new("bool", DataType::Boolean, false),
|
||||
Field::new("date32", DataType::Date32, false),
|
||||
Field::new(
|
||||
"timestamp_ns",
|
||||
DataType::Timestamp(TimeUnit::Nanosecond, None),
|
||||
false,
|
||||
),
|
||||
Field::new(
|
||||
"timestamp_ms",
|
||||
DataType::Timestamp(TimeUnit::Millisecond, None),
|
||||
false,
|
||||
),
|
||||
Field::new(
|
||||
"vec_f32",
|
||||
DataType::FixedSizeList(Arc::new(Field::new("item", DataType::Float32, true)), 2),
|
||||
false,
|
||||
),
|
||||
Field::new(
|
||||
"vec_f64",
|
||||
DataType::FixedSizeList(Arc::new(Field::new("item", DataType::Float64, true)), 2),
|
||||
false,
|
||||
),
|
||||
]));
|
||||
|
||||
let record_batch_iter = RecordBatchIterator::new(
|
||||
vec![RecordBatch::try_new(
|
||||
schema.clone(),
|
||||
vec![
|
||||
Arc::new(Int32Array::from_iter_values(0..10)),
|
||||
Arc::new(Int64Array::from_iter_values(0..10)),
|
||||
Arc::new(UInt32Array::from_iter_values(0..10)),
|
||||
Arc::new(StringArray::from_iter_values(vec![
|
||||
"a", "b", "c", "d", "e", "f", "g", "h", "i", "j",
|
||||
])),
|
||||
Arc::new(LargeStringArray::from_iter_values(vec![
|
||||
"a", "b", "c", "d", "e", "f", "g", "h", "i", "j",
|
||||
])),
|
||||
Arc::new(Float32Array::from_iter_values((0..10).map(|i| i as f32))),
|
||||
Arc::new(Float64Array::from_iter_values((0..10).map(|i| i as f64))),
|
||||
Arc::new(Into::<BooleanArray>::into(vec![
|
||||
true, false, true, false, true, false, true, false, true, false,
|
||||
])),
|
||||
Arc::new(Date32Array::from_iter_values(0..10)),
|
||||
Arc::new(TimestampNanosecondArray::from_iter_values(0..10)),
|
||||
Arc::new(TimestampMillisecondArray::from_iter_values(0..10)),
|
||||
Arc::new(
|
||||
create_fixed_size_list(
|
||||
Float32Array::from_iter_values((0..20).map(|i| i as f32)),
|
||||
2,
|
||||
)
|
||||
.unwrap(),
|
||||
),
|
||||
Arc::new(
|
||||
create_fixed_size_list(
|
||||
Float64Array::from_iter_values((0..20).map(|i| i as f64)),
|
||||
2,
|
||||
)
|
||||
.unwrap(),
|
||||
),
|
||||
],
|
||||
)
|
||||
.unwrap()]
|
||||
.into_iter()
|
||||
.map(Ok),
|
||||
schema.clone(),
|
||||
);
|
||||
|
||||
let table = conn
|
||||
.create_table("my_table", record_batch_iter)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// check it can do update for each type
|
||||
let updates: Vec<(&str, &str)> = vec![
|
||||
("string", "'foo'"),
|
||||
("large_string", "'large_foo'"),
|
||||
("int32", "1"),
|
||||
("int64", "1"),
|
||||
("uint32", "1"),
|
||||
("float32", "1.0"),
|
||||
("float64", "1.0"),
|
||||
("bool", "true"),
|
||||
("date32", "1"),
|
||||
("timestamp_ns", "1"),
|
||||
("timestamp_ms", "1"),
|
||||
("vec_f32", "[1.0, 1.0]"),
|
||||
("vec_f64", "[1.0, 1.0]"),
|
||||
];
|
||||
|
||||
let mut update_op = table.update();
|
||||
for (column, value) in updates {
|
||||
update_op = update_op.column(column, value);
|
||||
}
|
||||
update_op.execute().await.unwrap();
|
||||
|
||||
let mut batches = table
|
||||
.query()
|
||||
.select(Select::columns(&[
|
||||
"string",
|
||||
"large_string",
|
||||
"int32",
|
||||
"int64",
|
||||
"uint32",
|
||||
"float32",
|
||||
"float64",
|
||||
"bool",
|
||||
"date32",
|
||||
"timestamp_ns",
|
||||
"timestamp_ms",
|
||||
"vec_f32",
|
||||
"vec_f64",
|
||||
]))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap()
|
||||
.try_collect::<Vec<_>>()
|
||||
.await
|
||||
.unwrap();
|
||||
let batch = batches.pop().unwrap();
|
||||
|
||||
macro_rules! assert_column {
|
||||
($column:expr, $array_type:ty, $expected:expr) => {
|
||||
let array = $column
|
||||
.as_any()
|
||||
.downcast_ref::<$array_type>()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.collect::<Vec<_>>();
|
||||
for v in array {
|
||||
assert_eq!(v, Some($expected));
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
assert_column!(batch.column(0), StringArray, "foo");
|
||||
assert_column!(batch.column(1), LargeStringArray, "large_foo");
|
||||
assert_column!(batch.column(2), Int32Array, 1);
|
||||
assert_column!(batch.column(3), Int64Array, 1);
|
||||
assert_column!(batch.column(4), UInt32Array, 1);
|
||||
assert_column!(batch.column(5), Float32Array, 1.0);
|
||||
assert_column!(batch.column(6), Float64Array, 1.0);
|
||||
assert_column!(batch.column(7), BooleanArray, true);
|
||||
assert_column!(batch.column(8), Date32Array, 1);
|
||||
assert_column!(batch.column(9), TimestampNanosecondArray, 1);
|
||||
assert_column!(batch.column(10), TimestampMillisecondArray, 1);
|
||||
|
||||
let array = batch
|
||||
.column(11)
|
||||
.as_any()
|
||||
.downcast_ref::<FixedSizeListArray>()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.collect::<Vec<_>>();
|
||||
for v in array {
|
||||
let v = v.unwrap();
|
||||
let f32array = v.as_any().downcast_ref::<Float32Array>().unwrap();
|
||||
for v in f32array {
|
||||
assert_eq!(v, Some(1.0));
|
||||
}
|
||||
}
|
||||
|
||||
let array = batch
|
||||
.column(12)
|
||||
.as_any()
|
||||
.downcast_ref::<FixedSizeListArray>()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.collect::<Vec<_>>();
|
||||
for v in array {
|
||||
let v = v.unwrap();
|
||||
let f64array = v.as_any().downcast_ref::<Float64Array>().unwrap();
|
||||
for v in f64array {
|
||||
assert_eq!(v, Some(1.0));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_update_via_expr() {
|
||||
let tmp_dir = tempdir().unwrap();
|
||||
let dataset_path = tmp_dir.path().join("test.lance");
|
||||
let uri = dataset_path.to_str().unwrap();
|
||||
let conn = connect(uri)
|
||||
.read_consistency_interval(Duration::from_secs(0))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
let tbl = conn
|
||||
.create_table("my_table", make_test_batches())
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(1, tbl.count_rows(Some("i == 0".to_string())).await.unwrap());
|
||||
tbl.update().column("i", "i+1").execute().await.unwrap();
|
||||
assert_eq!(0, tbl.count_rows(Some("i == 0".to_string())).await.unwrap());
|
||||
}
|
||||
|
||||
#[derive(Default, Debug)]
|
||||
struct NoOpCacheWrapper {
|
||||
called: AtomicBool,
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
|
||||
//! This module contains adapters to allow LanceDB tables to be used as DataFusion table providers.
|
||||
|
||||
pub mod insert;
|
||||
pub mod udtf;
|
||||
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
@@ -13,11 +14,12 @@ use async_trait::async_trait;
|
||||
use datafusion_catalog::{Session, TableProvider};
|
||||
use datafusion_common::{DataFusionError, Result as DataFusionResult, Statistics};
|
||||
use datafusion_execution::{SendableRecordBatchStream, TaskContext};
|
||||
use datafusion_expr::{Expr, TableProviderFilterPushDown, TableType};
|
||||
use datafusion_expr::{dml::InsertOp, Expr, TableProviderFilterPushDown, TableType};
|
||||
use datafusion_physical_plan::{
|
||||
stream::RecordBatchStreamAdapter, DisplayAs, DisplayFormatType, ExecutionPlan, PlanProperties,
|
||||
};
|
||||
use futures::{TryFutureExt, TryStreamExt};
|
||||
use lance::dataset::{WriteMode, WriteParams};
|
||||
|
||||
use super::{AnyQuery, BaseTable};
|
||||
use crate::{
|
||||
@@ -250,6 +252,33 @@ impl TableProvider for BaseTableAdapter {
|
||||
// TODO
|
||||
None
|
||||
}
|
||||
|
||||
async fn insert_into(
|
||||
&self,
|
||||
_state: &dyn Session,
|
||||
input: Arc<dyn ExecutionPlan>,
|
||||
insert_op: InsertOp,
|
||||
) -> DataFusionResult<Arc<dyn ExecutionPlan>> {
|
||||
let mode = match insert_op {
|
||||
InsertOp::Append => WriteMode::Append,
|
||||
InsertOp::Overwrite => WriteMode::Overwrite,
|
||||
InsertOp::Replace => {
|
||||
return Err(DataFusionError::NotImplemented(
|
||||
"Replace mode is not supported for LanceDB tables".to_string(),
|
||||
))
|
||||
}
|
||||
};
|
||||
|
||||
let write_params = WriteParams {
|
||||
mode,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
self.table
|
||||
.create_insert_exec(input, write_params)
|
||||
.await
|
||||
.map_err(|e| DataFusionError::External(e.into()))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
446
rust/lancedb/src/table/datafusion/insert.rs
Normal file
446
rust/lancedb/src/table/datafusion/insert.rs
Normal file
@@ -0,0 +1,446 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
//! DataFusion ExecutionPlan for inserting data into LanceDB tables.
|
||||
|
||||
use std::any::Any;
|
||||
use std::sync::{Arc, LazyLock, Mutex};
|
||||
|
||||
use arrow_array::{RecordBatch, UInt64Array};
|
||||
use arrow_schema::{DataType, Field, Schema as ArrowSchema, SchemaRef};
|
||||
use datafusion_common::{DataFusionError, Result as DataFusionResult};
|
||||
use datafusion_execution::{SendableRecordBatchStream, TaskContext};
|
||||
use datafusion_physical_expr::{EquivalenceProperties, Partitioning};
|
||||
use datafusion_physical_plan::execution_plan::{Boundedness, EmissionType};
|
||||
use datafusion_physical_plan::stream::RecordBatchStreamAdapter;
|
||||
use datafusion_physical_plan::{
|
||||
DisplayAs, DisplayFormatType, ExecutionPlan, ExecutionPlanProperties, PlanProperties,
|
||||
};
|
||||
use lance::dataset::transaction::{Operation, Transaction};
|
||||
use lance::dataset::{CommitBuilder, InsertBuilder, WriteParams};
|
||||
use lance::Dataset;
|
||||
use lance_table::format::Fragment;
|
||||
|
||||
use crate::table::dataset::DatasetConsistencyWrapper;
|
||||
|
||||
pub(crate) static COUNT_SCHEMA: LazyLock<SchemaRef> = LazyLock::new(|| {
|
||||
Arc::new(ArrowSchema::new(vec![Field::new(
|
||||
"count",
|
||||
DataType::UInt64,
|
||||
false,
|
||||
)]))
|
||||
});
|
||||
|
||||
fn operation_fragments(operation: &Operation) -> &[Fragment] {
|
||||
match operation {
|
||||
Operation::Append { fragments } => fragments,
|
||||
Operation::Overwrite { fragments, .. } => fragments,
|
||||
_ => &[],
|
||||
}
|
||||
}
|
||||
|
||||
fn count_rows_from_operation(operation: &Operation) -> u64 {
|
||||
operation_fragments(operation)
|
||||
.iter()
|
||||
.map(|f| f.num_rows().unwrap_or(0) as u64)
|
||||
.sum()
|
||||
}
|
||||
|
||||
fn operation_fragments_mut(operation: &mut Operation) -> &mut Vec<Fragment> {
|
||||
match operation {
|
||||
Operation::Append { fragments } => fragments,
|
||||
Operation::Overwrite { fragments, .. } => fragments,
|
||||
_ => panic!("Unsupported operation type for getting mutable fragments"),
|
||||
}
|
||||
}
|
||||
|
||||
fn merge_transactions(mut transactions: Vec<Transaction>) -> Option<Transaction> {
|
||||
let mut first = transactions.pop()?;
|
||||
|
||||
for txn in transactions {
|
||||
let first_fragments = operation_fragments_mut(&mut first.operation);
|
||||
let txn_fragments = operation_fragments(&txn.operation);
|
||||
first_fragments.extend_from_slice(txn_fragments);
|
||||
}
|
||||
|
||||
Some(first)
|
||||
}
|
||||
|
||||
/// ExecutionPlan for inserting data into a native LanceDB table.
|
||||
///
|
||||
/// This plan executes inserts by:
|
||||
/// 1. Each partition writes data independently using InsertBuilder::execute_uncommitted_stream
|
||||
/// 2. The last partition to complete commits all transactions atomically
|
||||
/// 3. Returns the count of inserted rows per partition
|
||||
#[derive(Debug)]
|
||||
pub struct InsertExec {
|
||||
ds_wrapper: DatasetConsistencyWrapper,
|
||||
dataset: Arc<Dataset>,
|
||||
input: Arc<dyn ExecutionPlan>,
|
||||
write_params: WriteParams,
|
||||
properties: PlanProperties,
|
||||
partial_transactions: Arc<Mutex<Vec<Transaction>>>,
|
||||
}
|
||||
|
||||
impl InsertExec {
|
||||
pub fn new(
|
||||
ds_wrapper: DatasetConsistencyWrapper,
|
||||
dataset: Arc<Dataset>,
|
||||
input: Arc<dyn ExecutionPlan>,
|
||||
write_params: WriteParams,
|
||||
) -> Self {
|
||||
let schema = COUNT_SCHEMA.clone();
|
||||
let num_partitions = input.output_partitioning().partition_count();
|
||||
let properties = PlanProperties::new(
|
||||
EquivalenceProperties::new(schema),
|
||||
Partitioning::UnknownPartitioning(num_partitions),
|
||||
EmissionType::Final,
|
||||
Boundedness::Bounded,
|
||||
);
|
||||
|
||||
Self {
|
||||
ds_wrapper,
|
||||
dataset,
|
||||
input,
|
||||
write_params,
|
||||
properties,
|
||||
partial_transactions: Arc::new(Mutex::new(Vec::with_capacity(num_partitions))),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DisplayAs for InsertExec {
|
||||
fn fmt_as(&self, t: DisplayFormatType, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match t {
|
||||
DisplayFormatType::Default | DisplayFormatType::Verbose => {
|
||||
write!(f, "InsertExec: mode={:?}", self.write_params.mode)
|
||||
}
|
||||
DisplayFormatType::TreeRender => {
|
||||
write!(f, "InsertExec")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ExecutionPlan for InsertExec {
|
||||
fn name(&self) -> &str {
|
||||
Self::static_name()
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn properties(&self) -> &PlanProperties {
|
||||
&self.properties
|
||||
}
|
||||
|
||||
fn children(&self) -> Vec<&Arc<dyn ExecutionPlan>> {
|
||||
vec![&self.input]
|
||||
}
|
||||
|
||||
fn maintains_input_order(&self) -> Vec<bool> {
|
||||
vec![false]
|
||||
}
|
||||
|
||||
fn benefits_from_input_partitioning(&self) -> Vec<bool> {
|
||||
vec![false]
|
||||
}
|
||||
|
||||
fn with_new_children(
|
||||
self: Arc<Self>,
|
||||
children: Vec<Arc<dyn ExecutionPlan>>,
|
||||
) -> DataFusionResult<Arc<dyn ExecutionPlan>> {
|
||||
if children.len() != 1 {
|
||||
return Err(DataFusionError::Internal(
|
||||
"InsertExec requires exactly one child".to_string(),
|
||||
));
|
||||
}
|
||||
Ok(Arc::new(Self::new(
|
||||
self.ds_wrapper.clone(),
|
||||
self.dataset.clone(),
|
||||
children[0].clone(),
|
||||
self.write_params.clone(),
|
||||
)))
|
||||
}
|
||||
|
||||
fn execute(
|
||||
&self,
|
||||
partition: usize,
|
||||
context: Arc<TaskContext>,
|
||||
) -> DataFusionResult<SendableRecordBatchStream> {
|
||||
let input_stream = self.input.execute(partition, context)?;
|
||||
let dataset = self.dataset.clone();
|
||||
let write_params = self.write_params.clone();
|
||||
let partial_transactions = self.partial_transactions.clone();
|
||||
let total_partitions = self.input.output_partitioning().partition_count();
|
||||
let ds_wrapper = self.ds_wrapper.clone();
|
||||
|
||||
let stream = futures::stream::once(async move {
|
||||
let transaction = InsertBuilder::new(dataset.clone())
|
||||
.with_params(&write_params)
|
||||
.execute_uncommitted_stream(input_stream)
|
||||
.await?;
|
||||
|
||||
let num_rows = count_rows_from_operation(&transaction.operation);
|
||||
|
||||
let to_commit = {
|
||||
// Don't hold the lock over an await point.
|
||||
let mut txns = partial_transactions.lock().unwrap();
|
||||
txns.push(transaction);
|
||||
if txns.len() == total_partitions {
|
||||
Some(std::mem::take(&mut *txns))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
};
|
||||
|
||||
if let Some(transactions) = to_commit {
|
||||
if let Some(merged_txn) = merge_transactions(transactions) {
|
||||
let new_dataset = CommitBuilder::new(dataset.clone())
|
||||
.execute(merged_txn)
|
||||
.await?;
|
||||
ds_wrapper.set_latest(new_dataset).await;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(RecordBatch::try_new(
|
||||
COUNT_SCHEMA.clone(),
|
||||
vec![Arc::new(UInt64Array::from(vec![num_rows]))],
|
||||
)?)
|
||||
});
|
||||
|
||||
Ok(Box::pin(RecordBatchStreamAdapter::new(
|
||||
COUNT_SCHEMA.clone(),
|
||||
stream,
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::vec;
|
||||
|
||||
use super::*;
|
||||
use arrow_array::{record_batch, Int32Array, RecordBatchIterator};
|
||||
use datafusion::prelude::SessionContext;
|
||||
use datafusion_catalog::MemTable;
|
||||
use tempfile::tempdir;
|
||||
|
||||
use crate::connect;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_insert_via_sql() {
|
||||
let tmp_dir = tempdir().unwrap();
|
||||
let uri = tmp_dir.path().to_str().unwrap();
|
||||
|
||||
let db = connect(uri).execute().await.unwrap();
|
||||
|
||||
// Create initial table
|
||||
let batch = record_batch!(("id", Int32, [1, 2, 3])).unwrap();
|
||||
let schema = batch.schema();
|
||||
let reader = RecordBatchIterator::new(vec![Ok(batch)], schema);
|
||||
|
||||
let table = db
|
||||
.create_table("test_insert", Box::new(reader))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Verify initial count
|
||||
assert_eq!(table.count_rows(None).await.unwrap(), 3);
|
||||
|
||||
let ctx = SessionContext::new();
|
||||
let provider =
|
||||
crate::table::datafusion::BaseTableAdapter::try_new(table.base_table().clone())
|
||||
.await
|
||||
.unwrap();
|
||||
ctx.register_table("test_insert", Arc::new(provider))
|
||||
.unwrap();
|
||||
|
||||
ctx.sql("INSERT INTO test_insert VALUES (4), (5), (6)")
|
||||
.await
|
||||
.unwrap()
|
||||
.collect()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Verify final count
|
||||
table.checkout_latest().await.unwrap();
|
||||
assert_eq!(table.count_rows(None).await.unwrap(), 6);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_insert_overwrite_via_sql() {
|
||||
let tmp_dir = tempdir().unwrap();
|
||||
let uri = tmp_dir.path().to_str().unwrap();
|
||||
|
||||
let db = connect(uri).execute().await.unwrap();
|
||||
|
||||
// Create initial table with 3 rows
|
||||
let batch = record_batch!(("id", Int32, [1, 2, 3])).unwrap();
|
||||
let schema = batch.schema();
|
||||
let reader = RecordBatchIterator::new(vec![Ok(batch)], schema);
|
||||
|
||||
let table = db
|
||||
.create_table("test_overwrite", Box::new(reader))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(table.count_rows(None).await.unwrap(), 3);
|
||||
|
||||
let ctx = SessionContext::new();
|
||||
let provider =
|
||||
crate::table::datafusion::BaseTableAdapter::try_new(table.base_table().clone())
|
||||
.await
|
||||
.unwrap();
|
||||
ctx.register_table("test_overwrite", Arc::new(provider))
|
||||
.unwrap();
|
||||
|
||||
ctx.sql("INSERT OVERWRITE INTO test_overwrite VALUES (10), (20)")
|
||||
.await
|
||||
.unwrap()
|
||||
.collect()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Verify: should have 2 rows (overwritten, not appended)
|
||||
table.checkout_latest().await.unwrap();
|
||||
assert_eq!(table.count_rows(None).await.unwrap(), 2);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_insert_empty_batch() {
|
||||
let tmp_dir = tempdir().unwrap();
|
||||
let uri = tmp_dir.path().to_str().unwrap();
|
||||
|
||||
let db = connect(uri).execute().await.unwrap();
|
||||
|
||||
// Create initial table
|
||||
let schema = Arc::new(ArrowSchema::new(vec![Field::new(
|
||||
"id",
|
||||
DataType::Int32,
|
||||
false,
|
||||
)]));
|
||||
let batches = vec![RecordBatch::try_new(
|
||||
schema.clone(),
|
||||
vec![Arc::new(Int32Array::from(vec![1, 2, 3]))],
|
||||
)
|
||||
.unwrap()];
|
||||
let reader = RecordBatchIterator::new(batches.into_iter().map(Ok), schema.clone());
|
||||
|
||||
let table = db
|
||||
.create_table("test_empty", Box::new(reader))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(table.count_rows(None).await.unwrap(), 3);
|
||||
|
||||
let ctx = SessionContext::new();
|
||||
let provider =
|
||||
crate::table::datafusion::BaseTableAdapter::try_new(table.base_table().clone())
|
||||
.await
|
||||
.unwrap();
|
||||
ctx.register_table("test_empty", Arc::new(provider))
|
||||
.unwrap();
|
||||
|
||||
let source_schema = Arc::new(ArrowSchema::new(vec![Field::new(
|
||||
"id",
|
||||
DataType::Int32,
|
||||
false,
|
||||
)]));
|
||||
// Empty batches
|
||||
let source_reader = RecordBatchIterator::new(
|
||||
std::iter::empty::<Result<RecordBatch, arrow_schema::ArrowError>>(),
|
||||
source_schema,
|
||||
);
|
||||
let source_table = db
|
||||
.create_table("empty_source", Box::new(source_reader))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
let source_provider =
|
||||
crate::table::datafusion::BaseTableAdapter::try_new(source_table.base_table().clone())
|
||||
.await
|
||||
.unwrap();
|
||||
ctx.register_table("empty_source", Arc::new(source_provider))
|
||||
.unwrap();
|
||||
|
||||
// Execute INSERT with empty source
|
||||
ctx.sql("INSERT INTO test_empty SELECT * FROM empty_source")
|
||||
.await
|
||||
.unwrap()
|
||||
.collect()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Verify: should still have 3 rows (nothing inserted)
|
||||
table.checkout_latest().await.unwrap();
|
||||
assert_eq!(table.count_rows(None).await.unwrap(), 3);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_insert_multiple_batches() {
|
||||
let tmp_dir = tempdir().unwrap();
|
||||
let uri = tmp_dir.path().to_str().unwrap();
|
||||
|
||||
let db = connect(uri).execute().await.unwrap();
|
||||
|
||||
// Create initial table
|
||||
let schema = Arc::new(ArrowSchema::new(vec![Field::new(
|
||||
"id",
|
||||
DataType::Int32,
|
||||
true,
|
||||
)]));
|
||||
let batches =
|
||||
vec![
|
||||
RecordBatch::try_new(schema.clone(), vec![Arc::new(Int32Array::from(vec![1]))])
|
||||
.unwrap(),
|
||||
];
|
||||
let reader = RecordBatchIterator::new(batches.into_iter().map(Ok), schema.clone());
|
||||
|
||||
let table = db
|
||||
.create_table("test_multi_batch", Box::new(reader))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let ctx = SessionContext::new();
|
||||
let provider =
|
||||
crate::table::datafusion::BaseTableAdapter::try_new(table.base_table().clone())
|
||||
.await
|
||||
.unwrap();
|
||||
ctx.register_table("test_multi_batch", Arc::new(provider))
|
||||
.unwrap();
|
||||
|
||||
// Memtable with multiple batches and multiple partitions
|
||||
let source_table = MemTable::try_new(
|
||||
schema.clone(),
|
||||
vec![
|
||||
// Partition 0
|
||||
vec![
|
||||
record_batch!(("id", Int32, [2, 3])).unwrap(),
|
||||
record_batch!(("id", Int32, [4, 5])).unwrap(),
|
||||
],
|
||||
// Partition 1
|
||||
vec![record_batch!(("id", Int32, [6, 7, 8])).unwrap()],
|
||||
],
|
||||
)
|
||||
.unwrap();
|
||||
ctx.register_table("multi_batch_source", Arc::new(source_table))
|
||||
.unwrap();
|
||||
|
||||
ctx.sql("INSERT INTO test_multi_batch SELECT * FROM multi_batch_source")
|
||||
.await
|
||||
.unwrap()
|
||||
.collect()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Verify: should have 1 + 2 + 2 + 3 = 8 rows
|
||||
table.checkout_latest().await.unwrap();
|
||||
assert_eq!(table.count_rows(None).await.unwrap(), 8);
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user