mirror of
https://github.com/lancedb/lancedb.git
synced 2025-12-25 14:29:56 +00:00
Compare commits
1 Commits
python-v0.
...
codex/upda
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
aa0da4f0f3 |
@@ -1,5 +1,5 @@
|
||||
[tool.bumpversion]
|
||||
current_version = "0.23.0-beta.0"
|
||||
current_version = "0.22.4-beta.2"
|
||||
parse = """(?x)
|
||||
(?P<major>0|[1-9]\\d*)\\.
|
||||
(?P<minor>0|[1-9]\\d*)\\.
|
||||
@@ -72,9 +72,3 @@ search = "\nversion = \"{current_version}\""
|
||||
filename = "nodejs/Cargo.toml"
|
||||
replace = "\nversion = \"{new_version}\""
|
||||
search = "\nversion = \"{current_version}\""
|
||||
|
||||
# Java documentation
|
||||
[[tool.bumpversion.files]]
|
||||
filename = "docs/src/java/java.md"
|
||||
replace = "<version>{new_version}</version>"
|
||||
search = "<version>{current_version}</version>"
|
||||
|
||||
@@ -19,7 +19,7 @@ rustflags = [
|
||||
"-Wclippy::string_add_assign",
|
||||
"-Wclippy::string_add",
|
||||
"-Wclippy::string_lit_as_bytes",
|
||||
"-Wclippy::implicit_clone",
|
||||
"-Wclippy::string_to_string",
|
||||
"-Wclippy::use_self",
|
||||
"-Dclippy::cargo",
|
||||
"-Dclippy::dbg_macro",
|
||||
|
||||
@@ -31,7 +31,7 @@ runs:
|
||||
with:
|
||||
command: build
|
||||
working-directory: python
|
||||
docker-options: "-e PIP_EXTRA_INDEX_URL='https://pypi.fury.io/lance-format/ https://pypi.fury.io/lancedb/'"
|
||||
docker-options: "-e PIP_EXTRA_INDEX_URL=https://pypi.fury.io/lancedb/"
|
||||
target: x86_64-unknown-linux-gnu
|
||||
manylinux: ${{ inputs.manylinux }}
|
||||
args: ${{ inputs.args }}
|
||||
@@ -46,7 +46,7 @@ runs:
|
||||
with:
|
||||
command: build
|
||||
working-directory: python
|
||||
docker-options: "-e PIP_EXTRA_INDEX_URL='https://pypi.fury.io/lance-format/ https://pypi.fury.io/lancedb/'"
|
||||
docker-options: "-e PIP_EXTRA_INDEX_URL=https://pypi.fury.io/lancedb/"
|
||||
target: aarch64-unknown-linux-gnu
|
||||
manylinux: ${{ inputs.manylinux }}
|
||||
args: ${{ inputs.args }}
|
||||
|
||||
2
.github/workflows/build_mac_wheel/action.yml
vendored
2
.github/workflows/build_mac_wheel/action.yml
vendored
@@ -22,5 +22,5 @@ runs:
|
||||
command: build
|
||||
# TODO: pass through interpreter
|
||||
args: ${{ inputs.args }}
|
||||
docker-options: "-e PIP_EXTRA_INDEX_URL='https://pypi.fury.io/lance-format/ https://pypi.fury.io/lancedb/'"
|
||||
docker-options: "-e PIP_EXTRA_INDEX_URL=https://pypi.fury.io/lancedb/"
|
||||
working-directory: python
|
||||
|
||||
@@ -26,7 +26,7 @@ runs:
|
||||
with:
|
||||
command: build
|
||||
args: ${{ inputs.args }}
|
||||
docker-options: "-e PIP_EXTRA_INDEX_URL='https://pypi.fury.io/lance-format/ https://pypi.fury.io/lancedb/'"
|
||||
docker-options: "-e PIP_EXTRA_INDEX_URL=https://pypi.fury.io/lancedb/"
|
||||
working-directory: python
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
|
||||
@@ -98,30 +98,3 @@ jobs:
|
||||
|
||||
printenv OPENAI_API_KEY | codex login --with-api-key
|
||||
codex --config shell_environment_policy.ignore_default_excludes=true exec --dangerously-bypass-approvals-and-sandbox "$(cat /tmp/codex-prompt.txt)"
|
||||
|
||||
- name: Trigger sophon dependency update
|
||||
env:
|
||||
TAG: ${{ inputs.tag }}
|
||||
GH_TOKEN: ${{ secrets.ROBOT_TOKEN }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
VERSION="${TAG#refs/tags/}"
|
||||
VERSION="${VERSION#v}"
|
||||
LANCEDB_BRANCH="codex/update-lance-${VERSION//[^a-zA-Z0-9]/-}"
|
||||
|
||||
echo "Triggering sophon workflow with:"
|
||||
echo " lance_ref: ${TAG#refs/tags/}"
|
||||
echo " lancedb_ref: ${LANCEDB_BRANCH}"
|
||||
|
||||
gh workflow run codex-bump-lancedb-lance.yml \
|
||||
--repo lancedb/sophon \
|
||||
-f lance_ref="${TAG#refs/tags/}" \
|
||||
-f lancedb_ref="${LANCEDB_BRANCH}"
|
||||
|
||||
- name: Show latest sophon workflow run
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.ROBOT_TOKEN }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
echo "Latest sophon workflow run:"
|
||||
gh run list --repo lancedb/sophon --workflow codex-bump-lancedb-lance.yml --limit 1 --json databaseId,url,displayTitle
|
||||
|
||||
6
.github/workflows/docs.yml
vendored
6
.github/workflows/docs.yml
vendored
@@ -24,7 +24,7 @@ env:
|
||||
# according to: https://matklad.github.io/2021/09/04/fast-rust-builds.html
|
||||
# CI builds are faster with incremental disabled.
|
||||
CARGO_INCREMENTAL: "0"
|
||||
PIP_EXTRA_INDEX_URL: "https://pypi.fury.io/lance-format/ https://pypi.fury.io/lancedb/"
|
||||
PIP_EXTRA_INDEX_URL: "https://pypi.fury.io/lancedb/"
|
||||
|
||||
jobs:
|
||||
# Single deploy job since we're just deploying
|
||||
@@ -50,8 +50,8 @@ jobs:
|
||||
- name: Build Python
|
||||
working-directory: python
|
||||
run: |
|
||||
python -m pip install --extra-index-url https://pypi.fury.io/lance-format/ --extra-index-url https://pypi.fury.io/lancedb/ -e .
|
||||
python -m pip install --extra-index-url https://pypi.fury.io/lance-format/ --extra-index-url https://pypi.fury.io/lancedb/ -r ../docs/requirements.txt
|
||||
python -m pip install --extra-index-url https://pypi.fury.io/lancedb/ -e .
|
||||
python -m pip install --extra-index-url https://pypi.fury.io/lancedb/ -r ../docs/requirements.txt
|
||||
- name: Set up node
|
||||
uses: actions/setup-node@v3
|
||||
with:
|
||||
|
||||
107
.github/workflows/java-publish.yml
vendored
107
.github/workflows/java-publish.yml
vendored
@@ -1,35 +1,76 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
name: Build and publish Java packages
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "v*"
|
||||
release:
|
||||
types: [released]
|
||||
pull_request:
|
||||
paths:
|
||||
- .github/workflows/java-publish.yml
|
||||
|
||||
jobs:
|
||||
publish:
|
||||
name: Build and Publish
|
||||
runs-on: ubuntu-24.04
|
||||
macos-arm64:
|
||||
name: Build on MacOS Arm64
|
||||
runs-on: macos-14
|
||||
timeout-minutes: 45
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ./java/core/lancedb-jni
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
brew install protobuf
|
||||
- name: Build release
|
||||
run: |
|
||||
cargo build --release
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: liblancedb_jni_darwin_aarch64.zip
|
||||
path: target/release/liblancedb_jni.dylib
|
||||
retention-days: 1
|
||||
if-no-files-found: error
|
||||
linux-arm64:
|
||||
name: Build on Linux Arm64
|
||||
runs-on: warp-ubuntu-2204-arm64-8x
|
||||
timeout-minutes: 45
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ./java/core/lancedb-jni
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
with:
|
||||
cache-workspaces: "./java/core/lancedb-jni"
|
||||
# Disable full debug symbol generation to speed up CI build and keep memory down
|
||||
# "1" means line tables only, which is useful for panic tracebacks.
|
||||
rustflags: "-C debuginfo=1"
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
sudo apt -y -qq update
|
||||
sudo apt install -y protobuf-compiler libssl-dev pkg-config
|
||||
- name: Build release
|
||||
run: |
|
||||
cargo build --release
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: liblancedb_jni_linux_aarch64.zip
|
||||
path: target/release/liblancedb_jni.so
|
||||
retention-days: 1
|
||||
if-no-files-found: error
|
||||
linux-x86:
|
||||
runs-on: warp-ubuntu-2204-x64-8x
|
||||
timeout-minutes: 30
|
||||
needs: [macos-arm64, linux-arm64]
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ./java
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- name: Set up Java 8
|
||||
uses: actions/setup-java@v4
|
||||
with:
|
||||
@@ -41,30 +82,40 @@ jobs:
|
||||
server-password: SONATYPE_TOKEN
|
||||
gpg-private-key: ${{ secrets.GPG_PRIVATE_KEY }}
|
||||
gpg-passphrase: ${{ secrets.GPG_PASSPHRASE }}
|
||||
- name: Set git config
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
git config --global user.email "dev+gha@lancedb.com"
|
||||
git config --global user.name "LanceDB Github Runner"
|
||||
sudo apt -y -qq update
|
||||
sudo apt install -y protobuf-compiler libssl-dev pkg-config
|
||||
- name: Download artifact
|
||||
uses: actions/download-artifact@v4
|
||||
- name: Copy native libs
|
||||
run: |
|
||||
mkdir -p ./core/target/classes/nativelib/darwin-aarch64 ./core/target/classes/nativelib/linux-aarch64
|
||||
cp ../liblancedb_jni_darwin_aarch64.zip/liblancedb_jni.dylib ./core/target/classes/nativelib/darwin-aarch64/liblancedb_jni.dylib
|
||||
cp ../liblancedb_jni_linux_aarch64.zip/liblancedb_jni.so ./core/target/classes/nativelib/linux-aarch64/liblancedb_jni.so
|
||||
- name: Dry run
|
||||
if: github.event_name == 'pull_request'
|
||||
run: |
|
||||
./mvnw --batch-mode -DskipTests package -pl lancedb-core -am
|
||||
- name: Publish
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
mvn --batch-mode -DskipTests -Drust.release.build=true package
|
||||
- name: Set github
|
||||
run: |
|
||||
git config --global user.email "LanceDB Github Runner"
|
||||
git config --global user.name "dev+gha@lancedb.com"
|
||||
- name: Publish with Java 8
|
||||
if: github.event_name == 'release'
|
||||
run: |
|
||||
echo "use-agent" >> ~/.gnupg/gpg.conf
|
||||
echo "pinentry-mode loopback" >> ~/.gnupg/gpg.conf
|
||||
export GPG_TTY=$(tty)
|
||||
./mvnw --batch-mode -DskipTests -DpushChanges=false -Dgpg.passphrase=${{ secrets.GPG_PASSPHRASE }} deploy -pl lancedb-core -am -P deploy-to-ossrh
|
||||
mvn --batch-mode -DskipTests -Drust.release.build=true -DpushChanges=false -Dgpg.passphrase=${{ secrets.GPG_PASSPHRASE }} deploy -P deploy-to-ossrh
|
||||
env:
|
||||
SONATYPE_USER: ${{ secrets.SONATYPE_USER }}
|
||||
SONATYPE_TOKEN: ${{ secrets.SONATYPE_TOKEN }}
|
||||
|
||||
report-failure:
|
||||
name: Report Workflow Failure
|
||||
runs-on: ubuntu-latest
|
||||
needs: [publish]
|
||||
if: always() && failure() && startsWith(github.ref, 'refs/tags/v')
|
||||
needs: [linux-arm64, linux-x86, macos-arm64]
|
||||
if: always() && (github.event_name == 'release' || github.event_name == 'workflow_dispatch')
|
||||
permissions:
|
||||
contents: read
|
||||
issues: write
|
||||
|
||||
118
.github/workflows/java.yml
vendored
118
.github/workflows/java.yml
vendored
@@ -1,46 +1,118 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
name: Build Java LanceDB Core
|
||||
|
||||
name: Build and Run Java JNI Tests
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- java/**
|
||||
- .github/workflows/java.yml
|
||||
pull_request:
|
||||
paths:
|
||||
- java/**
|
||||
- rust/**
|
||||
- .github/workflows/java.yml
|
||||
|
||||
env:
|
||||
# This env var is used by Swatinem/rust-cache@v2 for the cache
|
||||
# key, so we set it to make sure it is always consistent.
|
||||
CARGO_TERM_COLOR: always
|
||||
# Disable full debug symbol generation to speed up CI build and keep memory down
|
||||
# "1" means line tables only, which is useful for panic tracebacks.
|
||||
RUSTFLAGS: "-C debuginfo=1"
|
||||
RUST_BACKTRACE: "1"
|
||||
# according to: https://matklad.github.io/2021/09/04/fast-rust-builds.html
|
||||
# CI builds are faster with incremental disabled.
|
||||
CARGO_INCREMENTAL: "0"
|
||||
CARGO_BUILD_JOBS: "1"
|
||||
jobs:
|
||||
build-java:
|
||||
runs-on: ubuntu-24.04
|
||||
name: Build
|
||||
linux-build-java-11:
|
||||
runs-on: ubuntu-22.04
|
||||
name: ubuntu-22.04 + Java 11
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ./java
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up Java 17
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
workspaces: java/core/lancedb-jni
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
with:
|
||||
components: rustfmt
|
||||
- name: Run cargo fmt
|
||||
run: cargo fmt --check
|
||||
working-directory: ./java/core/lancedb-jni
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
sudo apt update
|
||||
sudo apt install -y protobuf-compiler libssl-dev
|
||||
- name: Install Java 11
|
||||
uses: actions/setup-java@v4
|
||||
with:
|
||||
distribution: temurin
|
||||
java-version: 11
|
||||
cache: "maven"
|
||||
- name: Java Style Check
|
||||
run: mvn checkstyle:check
|
||||
# Disable because of issues in lancedb rust core code
|
||||
# - name: Rust Clippy
|
||||
# working-directory: java/core/lancedb-jni
|
||||
# run: cargo clippy --all-targets -- -D warnings
|
||||
- name: Running tests with Java 11
|
||||
run: mvn clean test
|
||||
linux-build-java-17:
|
||||
runs-on: ubuntu-22.04
|
||||
name: ubuntu-22.04 + Java 17
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ./java
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
workspaces: java/core/lancedb-jni
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
with:
|
||||
components: rustfmt
|
||||
- name: Run cargo fmt
|
||||
run: cargo fmt --check
|
||||
working-directory: ./java/core/lancedb-jni
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
sudo apt update
|
||||
sudo apt install -y protobuf-compiler libssl-dev
|
||||
- name: Install Java 17
|
||||
uses: actions/setup-java@v4
|
||||
with:
|
||||
distribution: temurin
|
||||
java-version: 17
|
||||
cache: "maven"
|
||||
- run: echo "JAVA_17=$JAVA_HOME" >> $GITHUB_ENV
|
||||
- name: Java Style Check
|
||||
run: ./mvnw checkstyle:check
|
||||
- name: Build and install
|
||||
run: ./mvnw clean install
|
||||
run: mvn checkstyle:check
|
||||
# Disable because of issues in lancedb rust core code
|
||||
# - name: Rust Clippy
|
||||
# working-directory: java/core/lancedb-jni
|
||||
# run: cargo clippy --all-targets -- -D warnings
|
||||
- name: Running tests with Java 17
|
||||
run: |
|
||||
export JAVA_TOOL_OPTIONS="$JAVA_TOOL_OPTIONS \
|
||||
-XX:+IgnoreUnrecognizedVMOptions \
|
||||
--add-opens=java.base/java.lang=ALL-UNNAMED \
|
||||
--add-opens=java.base/java.lang.invoke=ALL-UNNAMED \
|
||||
--add-opens=java.base/java.lang.reflect=ALL-UNNAMED \
|
||||
--add-opens=java.base/java.io=ALL-UNNAMED \
|
||||
--add-opens=java.base/java.net=ALL-UNNAMED \
|
||||
--add-opens=java.base/java.nio=ALL-UNNAMED \
|
||||
--add-opens=java.base/java.util=ALL-UNNAMED \
|
||||
--add-opens=java.base/java.util.concurrent=ALL-UNNAMED \
|
||||
--add-opens=java.base/java.util.concurrent.atomic=ALL-UNNAMED \
|
||||
--add-opens=java.base/jdk.internal.ref=ALL-UNNAMED \
|
||||
--add-opens=java.base/sun.nio.ch=ALL-UNNAMED \
|
||||
--add-opens=java.base/sun.nio.cs=ALL-UNNAMED \
|
||||
--add-opens=java.base/sun.security.action=ALL-UNNAMED \
|
||||
--add-opens=java.base/sun.util.calendar=ALL-UNNAMED \
|
||||
--add-opens=java.security.jgss/sun.security.krb5=ALL-UNNAMED \
|
||||
-Djdk.reflect.useDirectMethodHandle=false \
|
||||
-Dio.netty.tryReflectionSetAccessible=true"
|
||||
JAVA_HOME=$JAVA_17 mvn clean test
|
||||
|
||||
2
.github/workflows/lance-release-timer.yml
vendored
2
.github/workflows/lance-release-timer.yml
vendored
@@ -59,4 +59,4 @@ jobs:
|
||||
GH_TOKEN: ${{ secrets.ROBOT_TOKEN }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
gh run list --workflow codex-update-lance-dependency.yml --limit 1 --json databaseId,url,displayTitle
|
||||
gh run list --workflow codex-update-lance-dependency.yml --limit 1 --json databaseId,htmlUrl,displayTitle
|
||||
|
||||
6
.github/workflows/npm-publish.yml
vendored
6
.github/workflows/npm-publish.yml
vendored
@@ -97,6 +97,12 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
settings:
|
||||
- target: x86_64-apple-darwin
|
||||
host: macos-latest
|
||||
features: ","
|
||||
pre_build: |-
|
||||
brew install protobuf
|
||||
rustup target add x86_64-apple-darwin
|
||||
- target: aarch64-apple-darwin
|
||||
host: macos-latest
|
||||
features: fp16kernels
|
||||
|
||||
4
.github/workflows/pypi-publish.yml
vendored
4
.github/workflows/pypi-publish.yml
vendored
@@ -11,7 +11,7 @@ on:
|
||||
- Cargo.toml # Change in dependency frequently breaks builds
|
||||
|
||||
env:
|
||||
PIP_EXTRA_INDEX_URL: "https://pypi.fury.io/lance-format/ https://pypi.fury.io/lancedb/"
|
||||
PIP_EXTRA_INDEX_URL: "https://pypi.fury.io/lancedb/"
|
||||
|
||||
jobs:
|
||||
linux:
|
||||
@@ -64,6 +64,8 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
config:
|
||||
- target: x86_64-apple-darwin
|
||||
runner: macos-13
|
||||
- target: aarch64-apple-darwin
|
||||
runner: warp-macos-14-arm64-6x
|
||||
env:
|
||||
|
||||
19
.github/workflows/python.yml
vendored
19
.github/workflows/python.yml
vendored
@@ -18,7 +18,7 @@ env:
|
||||
# Color output for pytest is off by default.
|
||||
PYTEST_ADDOPTS: "--color=yes"
|
||||
FORCE_COLOR: "1"
|
||||
PIP_EXTRA_INDEX_URL: "https://pypi.fury.io/lance-format/ https://pypi.fury.io/lancedb/"
|
||||
PIP_EXTRA_INDEX_URL: "https://pypi.fury.io/lancedb/"
|
||||
RUST_BACKTRACE: "1"
|
||||
|
||||
jobs:
|
||||
@@ -79,7 +79,7 @@ jobs:
|
||||
doctest:
|
||||
name: "Doctest"
|
||||
timeout-minutes: 30
|
||||
runs-on: ubuntu-2404-8x-x64
|
||||
runs-on: "ubuntu-24.04"
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
@@ -100,7 +100,7 @@ jobs:
|
||||
sudo apt install -y protobuf-compiler
|
||||
- name: Install
|
||||
run: |
|
||||
pip install --extra-index-url https://pypi.fury.io/lance-format/ --extra-index-url https://pypi.fury.io/lancedb/ -e .[tests,dev,embeddings]
|
||||
pip install --extra-index-url https://pypi.fury.io/lancedb/ -e .[tests,dev,embeddings]
|
||||
pip install tantivy
|
||||
pip install mlx
|
||||
- name: Doctest
|
||||
@@ -143,9 +143,16 @@ jobs:
|
||||
- name: Delete wheels
|
||||
run: rm -rf target/wheels
|
||||
platform:
|
||||
name: "Mac"
|
||||
name: "Mac: ${{ matrix.config.name }}"
|
||||
timeout-minutes: 30
|
||||
runs-on: macos-14
|
||||
strategy:
|
||||
matrix:
|
||||
config:
|
||||
- name: x86
|
||||
runner: macos-13
|
||||
- name: Arm
|
||||
runner: macos-14
|
||||
runs-on: "${{ matrix.config.runner }}"
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
@@ -219,7 +226,7 @@ jobs:
|
||||
run: |
|
||||
pip install "pydantic<2"
|
||||
pip install pyarrow==16
|
||||
pip install --extra-index-url https://pypi.fury.io/lance-format/ --extra-index-url https://pypi.fury.io/lancedb/ -e .[tests]
|
||||
pip install --extra-index-url https://pypi.fury.io/lancedb/ -e .[tests]
|
||||
pip install tantivy
|
||||
- name: Run tests
|
||||
run: pytest -m "not slow and not s3_test" -x -v --durations=30 python/tests
|
||||
|
||||
2
.github/workflows/run_tests/action.yml
vendored
2
.github/workflows/run_tests/action.yml
vendored
@@ -15,7 +15,7 @@ runs:
|
||||
- name: Install lancedb
|
||||
shell: bash
|
||||
run: |
|
||||
pip3 install --extra-index-url https://pypi.fury.io/lance-format/ --extra-index-url https://pypi.fury.io/lancedb/ $(ls target/wheels/lancedb-*.whl)[tests,dev]
|
||||
pip3 install --extra-index-url https://pypi.fury.io/lancedb/ $(ls target/wheels/lancedb-*.whl)[tests,dev]
|
||||
- name: Setup localstack for integration tests
|
||||
if: ${{ inputs.integration == 'true' }}
|
||||
shell: bash
|
||||
|
||||
2
.github/workflows/rust.yml
vendored
2
.github/workflows/rust.yml
vendored
@@ -122,7 +122,7 @@ jobs:
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
matrix:
|
||||
mac-runner: ["macos-14", "macos-15"]
|
||||
mac-runner: ["macos-13", "macos-14"]
|
||||
runs-on: "${{ matrix.mac-runner }}"
|
||||
defaults:
|
||||
run:
|
||||
|
||||
892
Cargo.lock
generated
892
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
30
Cargo.toml
30
Cargo.toml
@@ -1,5 +1,5 @@
|
||||
[workspace]
|
||||
members = ["rust/lancedb", "nodejs", "python"]
|
||||
members = ["rust/lancedb", "nodejs", "python", "java/core/lancedb-jni"]
|
||||
# Python package needs to be built by maturin.
|
||||
exclude = ["python"]
|
||||
resolver = "2"
|
||||
@@ -15,20 +15,20 @@ categories = ["database-implementations"]
|
||||
rust-version = "1.78.0"
|
||||
|
||||
[workspace.dependencies]
|
||||
lance = { "version" = "=1.1.0-beta.1", default-features = false, "tag" = "v1.1.0-beta.1", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-core = { "version" = "=1.1.0-beta.1", "tag" = "v1.1.0-beta.1", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-datagen = { "version" = "=1.1.0-beta.1", "tag" = "v1.1.0-beta.1", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-file = { "version" = "=1.1.0-beta.1", "tag" = "v1.1.0-beta.1", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-io = { "version" = "=1.1.0-beta.1", default-features = false, "tag" = "v1.1.0-beta.1", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-index = { "version" = "=1.1.0-beta.1", "tag" = "v1.1.0-beta.1", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-linalg = { "version" = "=1.1.0-beta.1", "tag" = "v1.1.0-beta.1", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-namespace = { "version" = "=1.1.0-beta.1", "tag" = "v1.1.0-beta.1", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-namespace-impls = { "version" = "=1.1.0-beta.1", default-features = false, "tag" = "v1.1.0-beta.1", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-table = { "version" = "=1.1.0-beta.1", "tag" = "v1.1.0-beta.1", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-testing = { "version" = "=1.1.0-beta.1", "tag" = "v1.1.0-beta.1", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-datafusion = { "version" = "=1.1.0-beta.1", "tag" = "v1.1.0-beta.1", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-encoding = { "version" = "=1.1.0-beta.1", "tag" = "v1.1.0-beta.1", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-arrow = { "version" = "=1.1.0-beta.1", "tag" = "v1.1.0-beta.1", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance = { "version" = "=1.0.0-beta.6", default-features = false, "tag" = "v1.0.0-beta.6", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-core = { "version" = "=1.0.0-beta.6", "tag" = "v1.0.0-beta.6", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-datagen = { "version" = "=1.0.0-beta.6", "tag" = "v1.0.0-beta.6", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-file = { "version" = "=1.0.0-beta.6", "tag" = "v1.0.0-beta.6", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-io = { "version" = "=1.0.0-beta.6", default-features = false, "tag" = "v1.0.0-beta.6", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-index = { "version" = "=1.0.0-beta.6", "tag" = "v1.0.0-beta.6", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-linalg = { "version" = "=1.0.0-beta.6", "tag" = "v1.0.0-beta.6", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-namespace = { "version" = "=1.0.0-beta.6", "tag" = "v1.0.0-beta.6", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-namespace-impls = { "version" = "=1.0.0-beta.6", "features" = ["dir-aws", "dir-gcp", "dir-azure", "dir-oss", "rest"], "tag" = "v1.0.0-beta.6", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-table = { "version" = "=1.0.0-beta.6", "tag" = "v1.0.0-beta.6", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-testing = { "version" = "=1.0.0-beta.6", "tag" = "v1.0.0-beta.6", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-datafusion = { "version" = "=1.0.0-beta.6", "tag" = "v1.0.0-beta.6", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-encoding = { "version" = "=1.0.0-beta.6", "tag" = "v1.0.0-beta.6", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-arrow = { "version" = "=1.0.0-beta.6", "tag" = "v1.0.0-beta.6", "git" = "https://github.com/lance-format/lance.git" }
|
||||
ahash = "0.8"
|
||||
# Note that this one does not include pyarrow
|
||||
arrow = { version = "56.2", optional = false }
|
||||
|
||||
@@ -123,7 +123,6 @@ nav:
|
||||
- Overview: index.md
|
||||
- Python: python/python.md
|
||||
- Javascript/TypeScript: js/globals.md
|
||||
- Java: java/java.md
|
||||
- Rust: https://docs.rs/lancedb/latest/lancedb/index.html
|
||||
|
||||
extra_css:
|
||||
|
||||
@@ -4,5 +4,4 @@ This page contains the API reference for the SDKs supported by the LanceDB team.
|
||||
|
||||
- [Python](python/python.md)
|
||||
- [JavaScript/TypeScript](js/globals.md)
|
||||
- [Java](java/java.md)
|
||||
- [Rust](https://docs.rs/lancedb/latest/lancedb/index.html)
|
||||
@@ -1,499 +0,0 @@
|
||||
# Java SDK
|
||||
|
||||
The LanceDB Java SDK provides a convenient way to interact with LanceDB Cloud and Enterprise deployments using the Lance REST Namespace API.
|
||||
|
||||
!!! note
|
||||
The Java SDK currently only works for LanceDB remote database that connects to LanceDB Cloud and Enterprise.
|
||||
Local database support is a work in progress. Check [LANCEDB-2848](https://github.com/lancedb/lancedb/issues/2848) for the latest progress.
|
||||
|
||||
## Installation
|
||||
|
||||
Add the following dependency to your `pom.xml`:
|
||||
|
||||
```xml
|
||||
<dependency>
|
||||
<groupId>com.lancedb</groupId>
|
||||
<artifactId>lancedb-core</artifactId>
|
||||
<version>0.23.0-beta.0</version>
|
||||
</dependency>
|
||||
```
|
||||
|
||||
## Quick Start
|
||||
|
||||
### Connecting to LanceDB Cloud
|
||||
|
||||
```java
|
||||
import com.lancedb.LanceDbNamespaceClientBuilder;
|
||||
import org.lance.namespace.LanceNamespace;
|
||||
|
||||
// If your DB url is db://example-db, then your database here is example-db
|
||||
LanceNamespace namespaceClient = LanceDbNamespaceClientBuilder.newBuilder()
|
||||
.apiKey("your_lancedb_cloud_api_key")
|
||||
.database("your_database_name")
|
||||
.build();
|
||||
```
|
||||
|
||||
### Connecting to LanceDB Enterprise
|
||||
|
||||
For LanceDB Enterprise deployments with a custom endpoint:
|
||||
|
||||
```java
|
||||
LanceNamespace namespaceClient = LanceDbNamespaceClientBuilder.newBuilder()
|
||||
.apiKey("your_lancedb_enterprise_api_key")
|
||||
.database("your_database_name")
|
||||
.endpoint("<your_enterprise_endpoint>")
|
||||
.build();
|
||||
```
|
||||
|
||||
### Configuration Options
|
||||
|
||||
| Method | Description | Required |
|
||||
|--------|-------------|----------|
|
||||
| `apiKey(String)` | LanceDB API key | Yes |
|
||||
| `database(String)` | Database name | Yes |
|
||||
| `endpoint(String)` | Custom endpoint URL for Enterprise deployments | No |
|
||||
| `region(String)` | AWS region (default: "us-east-1") | No |
|
||||
| `config(String, String)` | Additional configuration parameters | No |
|
||||
|
||||
## Metadata Operations
|
||||
|
||||
### Creating a Namespace
|
||||
|
||||
Namespaces organize tables hierarchically. Create a namespace before creating tables within it:
|
||||
|
||||
```java
|
||||
import org.lance.namespace.model.CreateNamespaceRequest;
|
||||
import org.lance.namespace.model.CreateNamespaceResponse;
|
||||
|
||||
// Create a child namespace
|
||||
CreateNamespaceRequest request = new CreateNamespaceRequest();
|
||||
request.setId(Arrays.asList("my_namespace"));
|
||||
|
||||
CreateNamespaceResponse response = namespaceClient.createNamespace(request);
|
||||
```
|
||||
|
||||
You can also create nested namespaces:
|
||||
|
||||
```java
|
||||
// Create a nested namespace: parent/child
|
||||
CreateNamespaceRequest request = new CreateNamespaceRequest();
|
||||
request.setId(Arrays.asList("parent_namespace", "child_namespace"));
|
||||
|
||||
CreateNamespaceResponse response = namespaceClient.createNamespace(request);
|
||||
```
|
||||
|
||||
### Describing a Namespace
|
||||
|
||||
```java
|
||||
import org.lance.namespace.model.DescribeNamespaceRequest;
|
||||
import org.lance.namespace.model.DescribeNamespaceResponse;
|
||||
|
||||
DescribeNamespaceRequest request = new DescribeNamespaceRequest();
|
||||
request.setId(Arrays.asList("my_namespace"));
|
||||
|
||||
DescribeNamespaceResponse response = namespaceClient.describeNamespace(request);
|
||||
System.out.println("Namespace properties: " + response.getProperties());
|
||||
```
|
||||
|
||||
### Listing Namespaces
|
||||
|
||||
```java
|
||||
import org.lance.namespace.model.ListNamespacesRequest;
|
||||
import org.lance.namespace.model.ListNamespacesResponse;
|
||||
|
||||
// List all namespaces at root level
|
||||
ListNamespacesRequest request = new ListNamespacesRequest();
|
||||
request.setId(Arrays.asList()); // Empty for root
|
||||
|
||||
ListNamespacesResponse response = namespaceClient.listNamespaces(request);
|
||||
for (String ns : response.getNamespaces()) {
|
||||
System.out.println("Namespace: " + ns);
|
||||
}
|
||||
|
||||
// List child namespaces under a parent
|
||||
ListNamespacesRequest childRequest = new ListNamespacesRequest();
|
||||
childRequest.setId(Arrays.asList("parent_namespace"));
|
||||
|
||||
ListNamespacesResponse childResponse = namespaceClient.listNamespaces(childRequest);
|
||||
```
|
||||
|
||||
### Listing Tables
|
||||
|
||||
```java
|
||||
import org.lance.namespace.model.ListTablesRequest;
|
||||
import org.lance.namespace.model.ListTablesResponse;
|
||||
|
||||
// List tables in a namespace
|
||||
ListTablesRequest request = new ListTablesRequest();
|
||||
request.setId(Arrays.asList("my_namespace"));
|
||||
|
||||
ListTablesResponse response = namespaceClient.listTables(request);
|
||||
for (String table : response.getTables()) {
|
||||
System.out.println("Table: " + table);
|
||||
}
|
||||
```
|
||||
|
||||
### Dropping a Namespace
|
||||
|
||||
```java
|
||||
import org.lance.namespace.model.DropNamespaceRequest;
|
||||
import org.lance.namespace.model.DropNamespaceResponse;
|
||||
|
||||
DropNamespaceRequest request = new DropNamespaceRequest();
|
||||
request.setId(Arrays.asList("my_namespace"));
|
||||
|
||||
DropNamespaceResponse response = namespaceClient.dropNamespace(request);
|
||||
```
|
||||
|
||||
### Describing a Table
|
||||
|
||||
```java
|
||||
import org.lance.namespace.model.DescribeTableRequest;
|
||||
import org.lance.namespace.model.DescribeTableResponse;
|
||||
|
||||
DescribeTableRequest request = new DescribeTableRequest();
|
||||
request.setId(Arrays.asList("my_namespace", "my_table"));
|
||||
|
||||
DescribeTableResponse response = namespaceClient.describeTable(request);
|
||||
System.out.println("Table version: " + response.getVersion());
|
||||
System.out.println("Schema fields: " + response.getSchema().getFields());
|
||||
```
|
||||
|
||||
### Dropping a Table
|
||||
|
||||
```java
|
||||
import org.lance.namespace.model.DropTableRequest;
|
||||
import org.lance.namespace.model.DropTableResponse;
|
||||
|
||||
DropTableRequest request = new DropTableRequest();
|
||||
request.setId(Arrays.asList("my_namespace", "my_table"));
|
||||
|
||||
DropTableResponse response = namespaceClient.dropTable(request);
|
||||
```
|
||||
|
||||
## Writing Data
|
||||
|
||||
### Creating a Table
|
||||
|
||||
Tables are created within a namespace by providing data in Apache Arrow IPC format:
|
||||
|
||||
```java
|
||||
import org.lance.namespace.LanceNamespace;
|
||||
import org.lance.namespace.model.CreateTableRequest;
|
||||
import org.lance.namespace.model.CreateTableResponse;
|
||||
import org.apache.arrow.memory.BufferAllocator;
|
||||
import org.apache.arrow.memory.RootAllocator;
|
||||
import org.apache.arrow.vector.IntVector;
|
||||
import org.apache.arrow.vector.VarCharVector;
|
||||
import org.apache.arrow.vector.VectorSchemaRoot;
|
||||
import org.apache.arrow.vector.complex.FixedSizeListVector;
|
||||
import org.apache.arrow.vector.Float4Vector;
|
||||
import org.apache.arrow.vector.ipc.ArrowStreamWriter;
|
||||
import org.apache.arrow.vector.types.FloatingPointPrecision;
|
||||
import org.apache.arrow.vector.types.pojo.ArrowType;
|
||||
import org.apache.arrow.vector.types.pojo.Field;
|
||||
import org.apache.arrow.vector.types.pojo.FieldType;
|
||||
import org.apache.arrow.vector.types.pojo.Schema;
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.nio.channels.Channels;
|
||||
import java.util.Arrays;
|
||||
|
||||
// Create schema with id, name, and embedding fields
|
||||
Schema schema = new Schema(Arrays.asList(
|
||||
new Field("id", FieldType.nullable(new ArrowType.Int(32, true)), null),
|
||||
new Field("name", FieldType.nullable(new ArrowType.Utf8()), null),
|
||||
new Field("embedding",
|
||||
FieldType.nullable(new ArrowType.FixedSizeList(128)),
|
||||
Arrays.asList(new Field("item",
|
||||
FieldType.nullable(new ArrowType.FloatingPoint(FloatingPointPrecision.SINGLE)),
|
||||
null)))
|
||||
));
|
||||
|
||||
try (BufferAllocator allocator = new RootAllocator();
|
||||
VectorSchemaRoot root = VectorSchemaRoot.create(schema, allocator)) {
|
||||
|
||||
// Populate data
|
||||
root.setRowCount(3);
|
||||
IntVector idVector = (IntVector) root.getVector("id");
|
||||
VarCharVector nameVector = (VarCharVector) root.getVector("name");
|
||||
FixedSizeListVector embeddingVector = (FixedSizeListVector) root.getVector("embedding");
|
||||
Float4Vector embeddingData = (Float4Vector) embeddingVector.getDataVector();
|
||||
|
||||
for (int i = 0; i < 3; i++) {
|
||||
idVector.setSafe(i, i + 1);
|
||||
nameVector.setSafe(i, ("item_" + i).getBytes());
|
||||
embeddingVector.setNotNull(i);
|
||||
for (int j = 0; j < 128; j++) {
|
||||
embeddingData.setSafe(i * 128 + j, (float) i);
|
||||
}
|
||||
}
|
||||
idVector.setValueCount(3);
|
||||
nameVector.setValueCount(3);
|
||||
embeddingData.setValueCount(3 * 128);
|
||||
embeddingVector.setValueCount(3);
|
||||
|
||||
// Serialize to Arrow IPC format
|
||||
ByteArrayOutputStream out = new ByteArrayOutputStream();
|
||||
try (ArrowStreamWriter writer = new ArrowStreamWriter(root, null, Channels.newChannel(out))) {
|
||||
writer.start();
|
||||
writer.writeBatch();
|
||||
writer.end();
|
||||
}
|
||||
byte[] tableData = out.toByteArray();
|
||||
|
||||
// Create table in a namespace
|
||||
CreateTableRequest request = new CreateTableRequest();
|
||||
request.setId(Arrays.asList("my_namespace", "my_table"));
|
||||
CreateTableResponse response = namespaceClient.createTable(request, tableData);
|
||||
}
|
||||
```
|
||||
|
||||
### Insert
|
||||
|
||||
```java
|
||||
import org.lance.namespace.model.InsertIntoTableRequest;
|
||||
import org.lance.namespace.model.InsertIntoTableResponse;
|
||||
|
||||
// Prepare data in Arrow IPC format (similar to create table example)
|
||||
byte[] insertData = prepareArrowData();
|
||||
|
||||
InsertIntoTableRequest request = new InsertIntoTableRequest();
|
||||
request.setId(Arrays.asList("my_namespace", "my_table"));
|
||||
request.setMode(InsertIntoTableRequest.ModeEnum.APPEND);
|
||||
|
||||
InsertIntoTableResponse response = namespaceClient.insertIntoTable(request, insertData);
|
||||
System.out.println("New version: " + response.getVersion());
|
||||
```
|
||||
|
||||
### Update
|
||||
|
||||
Update rows matching a predicate condition:
|
||||
|
||||
```java
|
||||
import org.lance.namespace.model.UpdateTableRequest;
|
||||
import org.lance.namespace.model.UpdateTableResponse;
|
||||
|
||||
UpdateTableRequest request = new UpdateTableRequest();
|
||||
request.setId(Arrays.asList("my_namespace", "my_table"));
|
||||
|
||||
// Predicate to select rows to update
|
||||
request.setPredicate("id = 1");
|
||||
|
||||
// Set new values using SQL expressions as [column_name, expression] pairs
|
||||
request.setUpdates(Arrays.asList(
|
||||
Arrays.asList("name", "'updated_name'")
|
||||
));
|
||||
|
||||
UpdateTableResponse response = namespaceClient.updateTable(request);
|
||||
System.out.println("Updated rows: " + response.getUpdatedRows());
|
||||
```
|
||||
|
||||
### Delete
|
||||
|
||||
Delete rows matching a predicate condition:
|
||||
|
||||
```java
|
||||
import org.lance.namespace.model.DeleteFromTableRequest;
|
||||
import org.lance.namespace.model.DeleteFromTableResponse;
|
||||
|
||||
DeleteFromTableRequest request = new DeleteFromTableRequest();
|
||||
request.setId(Arrays.asList("my_namespace", "my_table"));
|
||||
|
||||
// Predicate to select rows to delete
|
||||
request.setPredicate("id > 100");
|
||||
|
||||
DeleteFromTableResponse response = namespaceClient.deleteFromTable(request);
|
||||
System.out.println("New version: " + response.getVersion());
|
||||
```
|
||||
|
||||
### Merge Insert (Upsert)
|
||||
|
||||
Merge insert allows you to update existing rows and insert new rows in a single operation based on a key column:
|
||||
|
||||
```java
|
||||
import org.lance.namespace.model.MergeInsertIntoTableRequest;
|
||||
import org.lance.namespace.model.MergeInsertIntoTableResponse;
|
||||
|
||||
// Prepare data with rows to update (id=2,3) and new rows (id=4)
|
||||
byte[] mergeData = prepareArrowData(); // Contains rows with id=2,3,4
|
||||
|
||||
MergeInsertIntoTableRequest request = new MergeInsertIntoTableRequest();
|
||||
request.setId(Arrays.asList("my_namespace", "my_table"));
|
||||
|
||||
// Match on the "id" column
|
||||
request.setOn("id");
|
||||
|
||||
// Update all columns when a matching row is found
|
||||
request.setWhenMatchedUpdateAll(true);
|
||||
|
||||
// Insert new rows when no match is found
|
||||
request.setWhenNotMatchedInsertAll(true);
|
||||
|
||||
MergeInsertIntoTableResponse response = namespaceClient.mergeInsertIntoTable(request, mergeData);
|
||||
|
||||
System.out.println("Updated rows: " + response.getNumUpdatedRows());
|
||||
System.out.println("Inserted rows: " + response.getNumInsertedRows());
|
||||
```
|
||||
|
||||
## Querying Data
|
||||
|
||||
### Counting Rows
|
||||
|
||||
```java
|
||||
import org.lance.namespace.model.CountTableRowsRequest;
|
||||
|
||||
CountTableRowsRequest request = new CountTableRowsRequest();
|
||||
request.setId(Arrays.asList("my_namespace", "my_table"));
|
||||
|
||||
Long rowCount = namespaceClient.countTableRows(request);
|
||||
System.out.println("Row count: " + rowCount);
|
||||
```
|
||||
|
||||
### Vector Search
|
||||
|
||||
```java
|
||||
import org.lance.namespace.model.QueryTableRequest;
|
||||
import org.lance.namespace.model.QueryTableRequestVector;
|
||||
|
||||
QueryTableRequest query = new QueryTableRequest();
|
||||
query.setId(Arrays.asList("my_namespace", "my_table"));
|
||||
query.setK(10); // Return top 10 results
|
||||
|
||||
// Set the query vector
|
||||
List<Float> queryVector = new ArrayList<>();
|
||||
for (int i = 0; i < 128; i++) {
|
||||
queryVector.add(1.0f);
|
||||
}
|
||||
QueryTableRequestVector vector = new QueryTableRequestVector();
|
||||
vector.setSingleVector(queryVector);
|
||||
query.setVector(vector);
|
||||
|
||||
// Specify columns to return
|
||||
query.setColumns(Arrays.asList("id", "name", "embedding"));
|
||||
|
||||
// Execute query - returns Arrow IPC format
|
||||
byte[] result = namespaceClient.queryTable(query);
|
||||
```
|
||||
|
||||
### Full Text Search
|
||||
|
||||
```java
|
||||
import org.lance.namespace.model.QueryTableRequest;
|
||||
import org.lance.namespace.model.QueryTableRequestFullTextQuery;
|
||||
import org.lance.namespace.model.StringFtsQuery;
|
||||
|
||||
QueryTableRequest query = new QueryTableRequest();
|
||||
query.setId(Arrays.asList("my_namespace", "my_table"));
|
||||
query.setK(10);
|
||||
|
||||
// Set full text search query
|
||||
StringFtsQuery stringQuery = new StringFtsQuery();
|
||||
stringQuery.setQuery("search terms");
|
||||
stringQuery.setColumns(Arrays.asList("text_column"));
|
||||
|
||||
QueryTableRequestFullTextQuery fts = new QueryTableRequestFullTextQuery();
|
||||
fts.setStringQuery(stringQuery);
|
||||
query.setFullTextQuery(fts);
|
||||
|
||||
// Specify columns to return
|
||||
query.setColumns(Arrays.asList("id", "text_column"));
|
||||
|
||||
byte[] result = namespaceClient.queryTable(query);
|
||||
```
|
||||
|
||||
### Query with Filter
|
||||
|
||||
```java
|
||||
QueryTableRequest query = new QueryTableRequest();
|
||||
query.setId(Arrays.asList("my_namespace", "my_table"));
|
||||
query.setK(10);
|
||||
query.setFilter("id > 50");
|
||||
query.setColumns(Arrays.asList("id", "name"));
|
||||
|
||||
byte[] result = namespaceClient.queryTable(query);
|
||||
```
|
||||
|
||||
### Query with Prefilter
|
||||
|
||||
```java
|
||||
QueryTableRequest query = new QueryTableRequest();
|
||||
query.setId(Arrays.asList("my_namespace", "my_table"));
|
||||
query.setK(5);
|
||||
query.setPrefilter(true); // Apply filter before vector search
|
||||
query.setFilter("category = 'electronics'");
|
||||
|
||||
// Set query vector
|
||||
QueryTableRequestVector vector = new QueryTableRequestVector();
|
||||
vector.setSingleVector(queryVector);
|
||||
query.setVector(vector);
|
||||
|
||||
byte[] result = namespaceClient.queryTable(query);
|
||||
```
|
||||
|
||||
### Reading Query Results
|
||||
|
||||
Query results are returned in Apache Arrow IPC file format. Here's how to read them:
|
||||
|
||||
```java
|
||||
import org.apache.arrow.vector.ipc.ArrowFileReader;
|
||||
import org.apache.arrow.vector.VectorSchemaRoot;
|
||||
import org.apache.arrow.memory.BufferAllocator;
|
||||
import org.apache.arrow.memory.RootAllocator;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.channels.SeekableByteChannel;
|
||||
|
||||
// Helper class to read Arrow data from byte array
|
||||
class ByteArraySeekableByteChannel implements SeekableByteChannel {
|
||||
private final byte[] data;
|
||||
private long position = 0;
|
||||
private boolean isOpen = true;
|
||||
|
||||
public ByteArraySeekableByteChannel(byte[] data) {
|
||||
this.data = data;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int read(ByteBuffer dst) {
|
||||
int remaining = dst.remaining();
|
||||
int available = (int) (data.length - position);
|
||||
if (available <= 0) return -1;
|
||||
int toRead = Math.min(remaining, available);
|
||||
dst.put(data, (int) position, toRead);
|
||||
position += toRead;
|
||||
return toRead;
|
||||
}
|
||||
|
||||
@Override public long position() { return position; }
|
||||
@Override public SeekableByteChannel position(long newPosition) { position = newPosition; return this; }
|
||||
@Override public long size() { return data.length; }
|
||||
@Override public boolean isOpen() { return isOpen; }
|
||||
@Override public void close() { isOpen = false; }
|
||||
@Override public int write(ByteBuffer src) { throw new UnsupportedOperationException(); }
|
||||
@Override public SeekableByteChannel truncate(long size) { throw new UnsupportedOperationException(); }
|
||||
}
|
||||
|
||||
// Read query results
|
||||
byte[] queryResult = namespaceClient.queryTable(query);
|
||||
|
||||
try (BufferAllocator allocator = new RootAllocator();
|
||||
ArrowFileReader reader = new ArrowFileReader(
|
||||
new ByteArraySeekableByteChannel(queryResult), allocator)) {
|
||||
|
||||
for (int i = 0; i < reader.getRecordBlocks().size(); i++) {
|
||||
reader.loadRecordBatch(reader.getRecordBlocks().get(i));
|
||||
VectorSchemaRoot root = reader.getVectorSchemaRoot();
|
||||
|
||||
// Access data
|
||||
IntVector idVector = (IntVector) root.getVector("id");
|
||||
VarCharVector nameVector = (VarCharVector) root.getVector("name");
|
||||
|
||||
for (int row = 0; row < root.getRowCount(); row++) {
|
||||
int id = idVector.get(row);
|
||||
String name = new String(nameVector.get(row));
|
||||
System.out.println("Row " + row + ": id=" + id + ", name=" + name);
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
@@ -34,7 +34,7 @@ const results = await table.vectorSearch([0.1, 0.3]).limit(20).toArray();
|
||||
console.log(results);
|
||||
```
|
||||
|
||||
The [quickstart](https://lancedb.com/docs/quickstart/basic-usage/) contains more complete examples.
|
||||
The [quickstart](https://lancedb.com/docs/quickstart/basic-usage/) contains a more complete example.
|
||||
|
||||
## Development
|
||||
|
||||
|
||||
@@ -147,7 +147,7 @@ A new PermutationBuilder instance
|
||||
#### Example
|
||||
|
||||
```ts
|
||||
builder.splitCalculated({ calculation: "user_id % 3" });
|
||||
builder.splitCalculated("user_id % 3");
|
||||
```
|
||||
|
||||
***
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
.PHONY: build-lancedb
|
||||
build-lancedb:
|
||||
./mvnw spotless:apply -pl lancedb-core -am
|
||||
./mvnw install -pl lancedb-core -am
|
||||
|
||||
.PHONY: test-lancedb
|
||||
test-lancedb:
|
||||
# Requires LANCEDB_DB and LANCEDB_API_KEY environment variables
|
||||
./mvnw test -pl lancedb-core -P integration-tests
|
||||
|
||||
.PHONY: clean
|
||||
clean:
|
||||
./mvnw clean
|
||||
|
||||
.PHONY: build
|
||||
build: build-lancedb
|
||||
@@ -7,11 +7,10 @@
|
||||
For LanceDB Cloud, use the simplified builder API:
|
||||
|
||||
```java
|
||||
import com.lancedb.LanceDbNamespaceClientBuilder;
|
||||
import org.lance.namespace.LanceNamespace;
|
||||
import com.lancedb.lance.namespace.LanceRestNamespace;
|
||||
|
||||
// If your DB url is db://example-db, then your database here is example-db
|
||||
LanceNamespace namespaceClient = LanceDbNamespaceClientBuilder.newBuilder()
|
||||
LanceRestNamespace namespace = LanceDBRestNamespaces.builder()
|
||||
.apiKey("your_lancedb_cloud_api_key")
|
||||
.database("your_database_name")
|
||||
.build();
|
||||
@@ -19,13 +18,13 @@ LanceNamespace namespaceClient = LanceDbNamespaceClientBuilder.newBuilder()
|
||||
|
||||
### LanceDB Enterprise
|
||||
|
||||
For Enterprise deployments, use your custom endpoint:
|
||||
For Enterprise deployments, use your VPC endpoint:
|
||||
|
||||
```java
|
||||
LanceNamespace namespaceClient = LanceDbNamespaceClientBuilder.newBuilder()
|
||||
LanceRestNamespace namespace = LanceDBRestNamespaces.builder()
|
||||
.apiKey("your_lancedb_enterprise_api_key")
|
||||
.database("your_database_name")
|
||||
.endpoint("<your_enterprise_endpoint>")
|
||||
.database("your-top-dir") // Your top level folder under your cloud bucket, e.g. s3://your-bucket/your-top-dir/
|
||||
.hostOverride("http://<vpc_endpoint_dns_name>:80")
|
||||
.build();
|
||||
```
|
||||
|
||||
@@ -34,11 +33,5 @@ LanceNamespace namespaceClient = LanceDbNamespaceClientBuilder.newBuilder()
|
||||
Build:
|
||||
|
||||
```shell
|
||||
./mvnw install -pl lancedb-core -am
|
||||
```
|
||||
|
||||
Run tests:
|
||||
|
||||
```shell
|
||||
./mvnw test -pl lancedb-core
|
||||
```
|
||||
./mvnw install
|
||||
```
|
||||
30
java/core/lancedb-jni/Cargo.toml
Normal file
30
java/core/lancedb-jni/Cargo.toml
Normal file
@@ -0,0 +1,30 @@
|
||||
[package]
|
||||
name = "lancedb-jni"
|
||||
description = "JNI bindings for LanceDB"
|
||||
# TODO modify lancedb/Cargo.toml for version and dependencies
|
||||
version = "0.10.0"
|
||||
edition.workspace = true
|
||||
repository.workspace = true
|
||||
readme.workspace = true
|
||||
license.workspace = true
|
||||
keywords.workspace = true
|
||||
categories.workspace = true
|
||||
publish = false
|
||||
|
||||
[lib]
|
||||
crate-type = ["cdylib"]
|
||||
|
||||
[dependencies]
|
||||
lancedb = { path = "../../../rust/lancedb", default-features = false }
|
||||
lance = { workspace = true }
|
||||
arrow = { workspace = true, features = ["ffi"] }
|
||||
arrow-schema.workspace = true
|
||||
tokio = "1.46"
|
||||
jni = "0.21.1"
|
||||
snafu.workspace = true
|
||||
lazy_static.workspace = true
|
||||
serde = { version = "^1" }
|
||||
serde_json = { version = "1" }
|
||||
|
||||
[features]
|
||||
default = ["lancedb/default"]
|
||||
133
java/core/lancedb-jni/src/connection.rs
Normal file
133
java/core/lancedb-jni/src/connection.rs
Normal file
@@ -0,0 +1,133 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
use crate::ffi::JNIEnvExt;
|
||||
use crate::traits::IntoJava;
|
||||
use crate::{Error, RT};
|
||||
use jni::objects::{JObject, JString, JValue};
|
||||
use jni::JNIEnv;
|
||||
pub const NATIVE_CONNECTION: &str = "nativeConnectionHandle";
|
||||
use crate::Result;
|
||||
use lancedb::connection::{connect, Connection};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct BlockingConnection {
|
||||
pub(crate) inner: Connection,
|
||||
}
|
||||
|
||||
impl BlockingConnection {
|
||||
pub fn create(dataset_uri: &str) -> Result<Self> {
|
||||
let inner = RT.block_on(connect(dataset_uri).execute())?;
|
||||
Ok(Self { inner })
|
||||
}
|
||||
|
||||
pub fn table_names(
|
||||
&self,
|
||||
start_after: Option<String>,
|
||||
limit: Option<i32>,
|
||||
) -> Result<Vec<String>> {
|
||||
let mut op = self.inner.table_names();
|
||||
if let Some(start_after) = start_after {
|
||||
op = op.start_after(start_after);
|
||||
}
|
||||
if let Some(limit) = limit {
|
||||
op = op.limit(limit as u32);
|
||||
}
|
||||
Ok(RT.block_on(op.execute())?)
|
||||
}
|
||||
}
|
||||
|
||||
impl IntoJava for BlockingConnection {
|
||||
fn into_java<'a>(self, env: &mut JNIEnv<'a>) -> JObject<'a> {
|
||||
attach_native_connection(env, self)
|
||||
}
|
||||
}
|
||||
|
||||
fn attach_native_connection<'local>(
|
||||
env: &mut JNIEnv<'local>,
|
||||
connection: BlockingConnection,
|
||||
) -> JObject<'local> {
|
||||
let j_connection = create_java_connection_object(env);
|
||||
// This block sets a native Rust object (Connection) as a field in the Java object (j_Connection).
|
||||
// Caution: This creates a potential for memory leaks. The Rust object (Connection) is not
|
||||
// automatically garbage-collected by Java, and its memory will not be freed unless
|
||||
// explicitly handled.
|
||||
//
|
||||
// To prevent memory leaks, ensure the following:
|
||||
// 1. The Java object (`j_Connection`) should implement the `java.io.Closeable` interface.
|
||||
// 2. Users of this Java object should be instructed to always use it within a try-with-resources
|
||||
// statement (or manually call the `close()` method) to ensure that `self.close()` is invoked.
|
||||
match unsafe { env.set_rust_field(&j_connection, NATIVE_CONNECTION, connection) } {
|
||||
Ok(_) => j_connection,
|
||||
Err(err) => {
|
||||
env.throw_new(
|
||||
"java/lang/RuntimeException",
|
||||
format!("Failed to set native handle for Connection: {}", err),
|
||||
)
|
||||
.expect("Error throwing exception");
|
||||
JObject::null()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn create_java_connection_object<'a>(env: &mut JNIEnv<'a>) -> JObject<'a> {
|
||||
env.new_object("com/lancedb/lancedb/Connection", "()V", &[])
|
||||
.expect("Failed to create Java Lance Connection instance")
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "system" fn Java_com_lancedb_lancedb_Connection_releaseNativeConnection(
|
||||
mut env: JNIEnv,
|
||||
j_connection: JObject,
|
||||
) {
|
||||
let _: BlockingConnection = unsafe {
|
||||
env.take_rust_field(j_connection, NATIVE_CONNECTION)
|
||||
.expect("Failed to take native Connection handle")
|
||||
};
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "system" fn Java_com_lancedb_lancedb_Connection_connect<'local>(
|
||||
mut env: JNIEnv<'local>,
|
||||
_obj: JObject,
|
||||
dataset_uri_object: JString,
|
||||
) -> JObject<'local> {
|
||||
let dataset_uri: String = ok_or_throw!(env, env.get_string(&dataset_uri_object)).into();
|
||||
let blocking_connection = ok_or_throw!(env, BlockingConnection::create(&dataset_uri));
|
||||
blocking_connection.into_java(&mut env)
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "system" fn Java_com_lancedb_lancedb_Connection_tableNames<'local>(
|
||||
mut env: JNIEnv<'local>,
|
||||
j_connection: JObject,
|
||||
start_after_obj: JObject, // Optional<String>
|
||||
limit_obj: JObject, // Optional<Integer>
|
||||
) -> JObject<'local> {
|
||||
ok_or_throw!(
|
||||
env,
|
||||
inner_table_names(&mut env, j_connection, start_after_obj, limit_obj)
|
||||
)
|
||||
}
|
||||
|
||||
fn inner_table_names<'local>(
|
||||
env: &mut JNIEnv<'local>,
|
||||
j_connection: JObject,
|
||||
start_after_obj: JObject, // Optional<String>
|
||||
limit_obj: JObject, // Optional<Integer>
|
||||
) -> Result<JObject<'local>> {
|
||||
let start_after = env.get_string_opt(&start_after_obj)?;
|
||||
let limit = env.get_int_opt(&limit_obj)?;
|
||||
let conn =
|
||||
unsafe { env.get_rust_field::<_, _, BlockingConnection>(j_connection, NATIVE_CONNECTION) }?;
|
||||
let table_names = conn.table_names(start_after, limit)?;
|
||||
drop(conn);
|
||||
let j_names = env.new_object("java/util/ArrayList", "()V", &[])?;
|
||||
for item in table_names {
|
||||
let jstr_item = env.new_string(item)?;
|
||||
let item_jobj = JObject::from(jstr_item);
|
||||
let item_gen = JValue::Object(&item_jobj);
|
||||
env.call_method(&j_names, "add", "(Ljava/lang/Object;)Z", &[item_gen])?;
|
||||
}
|
||||
Ok(j_names)
|
||||
}
|
||||
217
java/core/lancedb-jni/src/error.rs
Normal file
217
java/core/lancedb-jni/src/error.rs
Normal file
@@ -0,0 +1,217 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
use std::str::Utf8Error;
|
||||
|
||||
use arrow_schema::ArrowError;
|
||||
use jni::errors::Error as JniError;
|
||||
use serde_json::Error as JsonError;
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
type BoxedError = Box<dyn std::error::Error + Send + Sync + 'static>;
|
||||
|
||||
/// Java Exception types
|
||||
pub enum JavaException {
|
||||
IllegalArgumentException,
|
||||
IOException,
|
||||
RuntimeException,
|
||||
}
|
||||
|
||||
impl JavaException {
|
||||
pub fn as_str(&self) -> &str {
|
||||
match self {
|
||||
Self::IllegalArgumentException => "java/lang/IllegalArgumentException",
|
||||
Self::IOException => "java/io/IOException",
|
||||
Self::RuntimeException => "java/lang/RuntimeException",
|
||||
}
|
||||
}
|
||||
}
|
||||
/// TODO(lu) change to lancedb-jni
|
||||
#[derive(Debug, Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
pub enum Error {
|
||||
#[snafu(display("JNI error: {message}, {location}"))]
|
||||
Jni { message: String, location: Location },
|
||||
#[snafu(display("Invalid argument: {message}, {location}"))]
|
||||
InvalidArgument { message: String, location: Location },
|
||||
#[snafu(display("IO error: {source}, {location}"))]
|
||||
IO {
|
||||
source: BoxedError,
|
||||
location: Location,
|
||||
},
|
||||
#[snafu(display("Arrow error: {message}, {location}"))]
|
||||
Arrow { message: String, location: Location },
|
||||
#[snafu(display("Index error: {message}, {location}"))]
|
||||
Index { message: String, location: Location },
|
||||
#[snafu(display("JSON error: {message}, {location}"))]
|
||||
JSON { message: String, location: Location },
|
||||
#[snafu(display("Dataset at path {path} was not found, {location}"))]
|
||||
DatasetNotFound { path: String, location: Location },
|
||||
#[snafu(display("Dataset already exists: {uri}, {location}"))]
|
||||
DatasetAlreadyExists { uri: String, location: Location },
|
||||
#[snafu(display("Table '{name}' already exists"))]
|
||||
TableAlreadyExists { name: String },
|
||||
#[snafu(display("Table '{name}' was not found: {source}"))]
|
||||
TableNotFound {
|
||||
name: String,
|
||||
source: Box<dyn std::error::Error + Send + Sync>,
|
||||
},
|
||||
#[snafu(display("Invalid table name '{name}': {reason}"))]
|
||||
InvalidTableName { name: String, reason: String },
|
||||
#[snafu(display("Embedding function '{name}' was not found: {reason}, {location}"))]
|
||||
EmbeddingFunctionNotFound {
|
||||
name: String,
|
||||
reason: String,
|
||||
location: Location,
|
||||
},
|
||||
#[snafu(display("Other Lance error: {message}, {location}"))]
|
||||
OtherLance { message: String, location: Location },
|
||||
#[snafu(display("Other LanceDB error: {message}, {location}"))]
|
||||
OtherLanceDB { message: String, location: Location },
|
||||
}
|
||||
|
||||
impl Error {
|
||||
/// Throw as Java Exception
|
||||
pub fn throw(&self, env: &mut jni::JNIEnv) {
|
||||
match self {
|
||||
Self::InvalidArgument { .. }
|
||||
| Self::DatasetNotFound { .. }
|
||||
| Self::DatasetAlreadyExists { .. }
|
||||
| Self::TableAlreadyExists { .. }
|
||||
| Self::TableNotFound { .. }
|
||||
| Self::InvalidTableName { .. }
|
||||
| Self::EmbeddingFunctionNotFound { .. } => {
|
||||
self.throw_as(env, JavaException::IllegalArgumentException)
|
||||
}
|
||||
Self::IO { .. } | Self::Index { .. } => self.throw_as(env, JavaException::IOException),
|
||||
Self::Arrow { .. }
|
||||
| Self::JSON { .. }
|
||||
| Self::OtherLance { .. }
|
||||
| Self::OtherLanceDB { .. }
|
||||
| Self::Jni { .. } => self.throw_as(env, JavaException::RuntimeException),
|
||||
}
|
||||
}
|
||||
|
||||
/// Throw as an concrete Java Exception
|
||||
pub fn throw_as(&self, env: &mut jni::JNIEnv, exception: JavaException) {
|
||||
let message = &format!(
|
||||
"Error when throwing Java exception: {}:{}",
|
||||
exception.as_str(),
|
||||
self
|
||||
);
|
||||
env.throw_new(exception.as_str(), self.to_string())
|
||||
.expect(message);
|
||||
}
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
trait ToSnafuLocation {
|
||||
fn to_snafu_location(&'static self) -> snafu::Location;
|
||||
}
|
||||
|
||||
impl ToSnafuLocation for std::panic::Location<'static> {
|
||||
fn to_snafu_location(&'static self) -> snafu::Location {
|
||||
snafu::Location::new(self.file(), self.line(), self.column())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<JniError> for Error {
|
||||
#[track_caller]
|
||||
fn from(source: JniError) -> Self {
|
||||
Self::Jni {
|
||||
message: source.to_string(),
|
||||
location: std::panic::Location::caller().to_snafu_location(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Utf8Error> for Error {
|
||||
#[track_caller]
|
||||
fn from(source: Utf8Error) -> Self {
|
||||
Self::InvalidArgument {
|
||||
message: source.to_string(),
|
||||
location: std::panic::Location::caller().to_snafu_location(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ArrowError> for Error {
|
||||
#[track_caller]
|
||||
fn from(source: ArrowError) -> Self {
|
||||
Self::Arrow {
|
||||
message: source.to_string(),
|
||||
location: std::panic::Location::caller().to_snafu_location(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<JsonError> for Error {
|
||||
#[track_caller]
|
||||
fn from(source: JsonError) -> Self {
|
||||
Self::JSON {
|
||||
message: source.to_string(),
|
||||
location: std::panic::Location::caller().to_snafu_location(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<lance::Error> for Error {
|
||||
#[track_caller]
|
||||
fn from(source: lance::Error) -> Self {
|
||||
match source {
|
||||
lance::Error::DatasetNotFound {
|
||||
path,
|
||||
source: _,
|
||||
location,
|
||||
} => Self::DatasetNotFound { path, location },
|
||||
lance::Error::DatasetAlreadyExists { uri, location } => {
|
||||
Self::DatasetAlreadyExists { uri, location }
|
||||
}
|
||||
lance::Error::IO { source, location } => Self::IO { source, location },
|
||||
lance::Error::Arrow { message, location } => Self::Arrow { message, location },
|
||||
lance::Error::Index { message, location } => Self::Index { message, location },
|
||||
lance::Error::InvalidInput { source, location } => Self::InvalidArgument {
|
||||
message: source.to_string(),
|
||||
location,
|
||||
},
|
||||
_ => Self::OtherLance {
|
||||
message: source.to_string(),
|
||||
location: std::panic::Location::caller().to_snafu_location(),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<lancedb::Error> for Error {
|
||||
#[track_caller]
|
||||
fn from(source: lancedb::Error) -> Self {
|
||||
match source {
|
||||
lancedb::Error::InvalidTableName { name, reason } => {
|
||||
Self::InvalidTableName { name, reason }
|
||||
}
|
||||
lancedb::Error::InvalidInput { message } => Self::InvalidArgument {
|
||||
message,
|
||||
location: std::panic::Location::caller().to_snafu_location(),
|
||||
},
|
||||
lancedb::Error::TableNotFound { name, source } => Self::TableNotFound { name, source },
|
||||
lancedb::Error::TableAlreadyExists { name } => Self::TableAlreadyExists { name },
|
||||
lancedb::Error::EmbeddingFunctionNotFound { name, reason } => {
|
||||
Self::EmbeddingFunctionNotFound {
|
||||
name,
|
||||
reason,
|
||||
location: std::panic::Location::caller().to_snafu_location(),
|
||||
}
|
||||
}
|
||||
lancedb::Error::Arrow { source } => Self::Arrow {
|
||||
message: source.to_string(),
|
||||
location: std::panic::Location::caller().to_snafu_location(),
|
||||
},
|
||||
lancedb::Error::Lance { source } => Self::from(source),
|
||||
_ => Self::OtherLanceDB {
|
||||
message: source.to_string(),
|
||||
location: std::panic::Location::caller().to_snafu_location(),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
194
java/core/lancedb-jni/src/ffi.rs
Normal file
194
java/core/lancedb-jni/src/ffi.rs
Normal file
@@ -0,0 +1,194 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
use core::slice;
|
||||
|
||||
use jni::objects::{JByteBuffer, JObjectArray, JString};
|
||||
use jni::sys::jobjectArray;
|
||||
use jni::{objects::JObject, JNIEnv};
|
||||
|
||||
use crate::error::{Error, Result};
|
||||
|
||||
/// TODO(lu) import from lance-jni without duplicate
|
||||
/// Extend JNIEnv with helper functions.
|
||||
pub trait JNIEnvExt {
|
||||
/// Get integers from Java List<Integer> object.
|
||||
fn get_integers(&mut self, obj: &JObject) -> Result<Vec<i32>>;
|
||||
|
||||
/// Get strings from Java List<String> object.
|
||||
#[allow(dead_code)]
|
||||
fn get_strings(&mut self, obj: &JObject) -> Result<Vec<String>>;
|
||||
|
||||
/// Get strings from Java String[] object.
|
||||
/// Note that get Option<Vec<String>> from Java Optional<String[]> just doesn't work.
|
||||
#[allow(unused)]
|
||||
fn get_strings_array(&mut self, obj: jobjectArray) -> Result<Vec<String>>;
|
||||
|
||||
/// Get Option<String> from Java Optional<String>.
|
||||
fn get_string_opt(&mut self, obj: &JObject) -> Result<Option<String>>;
|
||||
|
||||
/// Get Option<Vec<String>> from Java Optional<List<String>>.
|
||||
#[allow(unused)]
|
||||
fn get_strings_opt(&mut self, obj: &JObject) -> Result<Option<Vec<String>>>;
|
||||
|
||||
/// Get Option<i32> from Java Optional<Integer>.
|
||||
fn get_int_opt(&mut self, obj: &JObject) -> Result<Option<i32>>;
|
||||
|
||||
/// Get Option<Vec<i32>> from Java Optional<List<Integer>>.
|
||||
fn get_ints_opt(&mut self, obj: &JObject) -> Result<Option<Vec<i32>>>;
|
||||
|
||||
/// Get Option<i64> from Java Optional<Long>.
|
||||
#[allow(unused)]
|
||||
fn get_long_opt(&mut self, obj: &JObject) -> Result<Option<i64>>;
|
||||
|
||||
/// Get Option<u64> from Java Optional<Long>.
|
||||
#[allow(unused)]
|
||||
fn get_u64_opt(&mut self, obj: &JObject) -> Result<Option<u64>>;
|
||||
|
||||
/// Get Option<&[u8]> from Java Optional<ByteBuffer>.
|
||||
#[allow(unused)]
|
||||
fn get_bytes_opt(&mut self, obj: &JObject) -> Result<Option<&[u8]>>;
|
||||
|
||||
fn get_optional<T, F>(&mut self, obj: &JObject, f: F) -> Result<Option<T>>
|
||||
where
|
||||
F: FnOnce(&mut JNIEnv, &JObject) -> Result<T>;
|
||||
}
|
||||
|
||||
impl JNIEnvExt for JNIEnv<'_> {
|
||||
fn get_integers(&mut self, obj: &JObject) -> Result<Vec<i32>> {
|
||||
let list = self.get_list(obj)?;
|
||||
let mut iter = list.iter(self)?;
|
||||
let mut results = Vec::with_capacity(list.size(self)? as usize);
|
||||
while let Some(elem) = iter.next(self)? {
|
||||
let int_obj = self.call_method(elem, "intValue", "()I", &[])?;
|
||||
let int_value = int_obj.i()?;
|
||||
results.push(int_value);
|
||||
}
|
||||
Ok(results)
|
||||
}
|
||||
|
||||
fn get_strings(&mut self, obj: &JObject) -> Result<Vec<String>> {
|
||||
let list = self.get_list(obj)?;
|
||||
let mut iter = list.iter(self)?;
|
||||
let mut results = Vec::with_capacity(list.size(self)? as usize);
|
||||
while let Some(elem) = iter.next(self)? {
|
||||
let jstr = JString::from(elem);
|
||||
let val = self.get_string(&jstr)?;
|
||||
results.push(val.to_str()?.to_string())
|
||||
}
|
||||
Ok(results)
|
||||
}
|
||||
|
||||
fn get_strings_array(&mut self, obj: jobjectArray) -> Result<Vec<String>> {
|
||||
let jobject_array = unsafe { JObjectArray::from_raw(obj) };
|
||||
let array_len = self.get_array_length(&jobject_array)?;
|
||||
let mut res: Vec<String> = Vec::new();
|
||||
for i in 0..array_len {
|
||||
let item: JString = self.get_object_array_element(&jobject_array, i)?.into();
|
||||
res.push(self.get_string(&item)?.into());
|
||||
}
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
fn get_string_opt(&mut self, obj: &JObject) -> Result<Option<String>> {
|
||||
self.get_optional(obj, |env, inner_obj| {
|
||||
let java_obj_gen = env.call_method(inner_obj, "get", "()Ljava/lang/Object;", &[])?;
|
||||
let java_string_obj = java_obj_gen.l()?;
|
||||
let jstr = JString::from(java_string_obj);
|
||||
let val = env.get_string(&jstr)?;
|
||||
Ok(val.to_str()?.to_string())
|
||||
})
|
||||
}
|
||||
|
||||
fn get_strings_opt(&mut self, obj: &JObject) -> Result<Option<Vec<String>>> {
|
||||
self.get_optional(obj, |env, inner_obj| {
|
||||
let java_obj_gen = env.call_method(inner_obj, "get", "()Ljava/lang/Object;", &[])?;
|
||||
let java_list_obj = java_obj_gen.l()?;
|
||||
env.get_strings(&java_list_obj)
|
||||
})
|
||||
}
|
||||
|
||||
fn get_int_opt(&mut self, obj: &JObject) -> Result<Option<i32>> {
|
||||
self.get_optional(obj, |env, inner_obj| {
|
||||
let java_obj_gen = env.call_method(inner_obj, "get", "()Ljava/lang/Object;", &[])?;
|
||||
let java_int_obj = java_obj_gen.l()?;
|
||||
let int_obj = env.call_method(java_int_obj, "intValue", "()I", &[])?;
|
||||
let int_value = int_obj.i()?;
|
||||
Ok(int_value)
|
||||
})
|
||||
}
|
||||
|
||||
fn get_ints_opt(&mut self, obj: &JObject) -> Result<Option<Vec<i32>>> {
|
||||
self.get_optional(obj, |env, inner_obj| {
|
||||
let java_obj_gen = env.call_method(inner_obj, "get", "()Ljava/lang/Object;", &[])?;
|
||||
let java_list_obj = java_obj_gen.l()?;
|
||||
env.get_integers(&java_list_obj)
|
||||
})
|
||||
}
|
||||
|
||||
fn get_long_opt(&mut self, obj: &JObject) -> Result<Option<i64>> {
|
||||
self.get_optional(obj, |env, inner_obj| {
|
||||
let java_obj_gen = env.call_method(inner_obj, "get", "()Ljava/lang/Object;", &[])?;
|
||||
let java_long_obj = java_obj_gen.l()?;
|
||||
let long_obj = env.call_method(java_long_obj, "longValue", "()J", &[])?;
|
||||
let long_value = long_obj.j()?;
|
||||
Ok(long_value)
|
||||
})
|
||||
}
|
||||
|
||||
fn get_u64_opt(&mut self, obj: &JObject) -> Result<Option<u64>> {
|
||||
self.get_optional(obj, |env, inner_obj| {
|
||||
let java_obj_gen = env.call_method(inner_obj, "get", "()Ljava/lang/Object;", &[])?;
|
||||
let java_long_obj = java_obj_gen.l()?;
|
||||
let long_obj = env.call_method(java_long_obj, "longValue", "()J", &[])?;
|
||||
let long_value = long_obj.j()?;
|
||||
Ok(long_value as u64)
|
||||
})
|
||||
}
|
||||
|
||||
fn get_bytes_opt(&mut self, obj: &JObject) -> Result<Option<&[u8]>> {
|
||||
self.get_optional(obj, |env, inner_obj| {
|
||||
let java_obj_gen = env.call_method(inner_obj, "get", "()Ljava/lang/Object;", &[])?;
|
||||
let java_byte_buffer_obj = java_obj_gen.l()?;
|
||||
let j_byte_buffer = JByteBuffer::from(java_byte_buffer_obj);
|
||||
let raw_data = env.get_direct_buffer_address(&j_byte_buffer)?;
|
||||
let capacity = env.get_direct_buffer_capacity(&j_byte_buffer)?;
|
||||
let data = unsafe { slice::from_raw_parts(raw_data, capacity) };
|
||||
Ok(data)
|
||||
})
|
||||
}
|
||||
|
||||
fn get_optional<T, F>(&mut self, obj: &JObject, f: F) -> Result<Option<T>>
|
||||
where
|
||||
F: FnOnce(&mut JNIEnv, &JObject) -> Result<T>,
|
||||
{
|
||||
if obj.is_null() {
|
||||
return Ok(None);
|
||||
}
|
||||
let is_present = self.call_method(obj, "isPresent", "()Z", &[])?;
|
||||
if !is_present.z()? {
|
||||
// TODO(lu): put get java object into here cuz can only get java Object
|
||||
Ok(None)
|
||||
} else {
|
||||
f(self, obj).map(Some)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "system" fn Java_com_lancedb_lance_test_JniTestHelper_parseInts(
|
||||
mut env: JNIEnv,
|
||||
_obj: JObject,
|
||||
list_obj: JObject, // List<Integer>
|
||||
) {
|
||||
ok_or_throw_without_return!(env, env.get_integers(&list_obj));
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "system" fn Java_com_lancedb_lance_test_JniTestHelper_parseIntsOpt(
|
||||
mut env: JNIEnv,
|
||||
_obj: JObject,
|
||||
list_obj: JObject, // Optional<List<Integer>>
|
||||
) {
|
||||
ok_or_throw_without_return!(env, env.get_ints_opt(&list_obj));
|
||||
}
|
||||
57
java/core/lancedb-jni/src/lib.rs
Normal file
57
java/core/lancedb-jni/src/lib.rs
Normal file
@@ -0,0 +1,57 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
use lazy_static::lazy_static;
|
||||
|
||||
// TODO import from lance-jni without duplicate
|
||||
#[macro_export]
|
||||
macro_rules! ok_or_throw {
|
||||
($env:expr, $result:expr) => {
|
||||
match $result {
|
||||
Ok(value) => value,
|
||||
Err(err) => {
|
||||
Error::from(err).throw(&mut $env);
|
||||
return JObject::null();
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! ok_or_throw_without_return {
|
||||
($env:expr, $result:expr) => {
|
||||
match $result {
|
||||
Ok(value) => value,
|
||||
Err(err) => {
|
||||
Error::from(err).throw(&mut $env);
|
||||
return;
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! ok_or_throw_with_return {
|
||||
($env:expr, $result:expr, $ret:expr) => {
|
||||
match $result {
|
||||
Ok(value) => value,
|
||||
Err(err) => {
|
||||
Error::from(err).throw(&mut $env);
|
||||
return $ret;
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
mod connection;
|
||||
pub mod error;
|
||||
mod ffi;
|
||||
mod traits;
|
||||
|
||||
pub use error::{Error, Result};
|
||||
|
||||
lazy_static! {
|
||||
static ref RT: tokio::runtime::Runtime = tokio::runtime::Builder::new_multi_thread()
|
||||
.enable_all()
|
||||
.build()
|
||||
.expect("Failed to create tokio runtime");
|
||||
}
|
||||
114
java/core/lancedb-jni/src/traits.rs
Normal file
114
java/core/lancedb-jni/src/traits.rs
Normal file
@@ -0,0 +1,114 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
use jni::objects::{JMap, JObject, JString, JValue};
|
||||
use jni::JNIEnv;
|
||||
|
||||
use crate::Result;
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub trait FromJObject<T> {
|
||||
fn extract(&self) -> Result<T>;
|
||||
}
|
||||
|
||||
/// Convert a Rust type into a Java Object.
|
||||
pub trait IntoJava {
|
||||
fn into_java<'a>(self, env: &mut JNIEnv<'a>) -> JObject<'a>;
|
||||
}
|
||||
|
||||
impl FromJObject<i32> for JObject<'_> {
|
||||
fn extract(&self) -> Result<i32> {
|
||||
Ok(JValue::from(self).i()?)
|
||||
}
|
||||
}
|
||||
|
||||
impl FromJObject<i64> for JObject<'_> {
|
||||
fn extract(&self) -> Result<i64> {
|
||||
Ok(JValue::from(self).j()?)
|
||||
}
|
||||
}
|
||||
|
||||
impl FromJObject<f32> for JObject<'_> {
|
||||
fn extract(&self) -> Result<f32> {
|
||||
Ok(JValue::from(self).f()?)
|
||||
}
|
||||
}
|
||||
|
||||
impl FromJObject<f64> for JObject<'_> {
|
||||
fn extract(&self) -> Result<f64> {
|
||||
Ok(JValue::from(self).d()?)
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub trait FromJString {
|
||||
fn extract(&self, env: &mut JNIEnv) -> Result<String>;
|
||||
}
|
||||
|
||||
impl FromJString for JString<'_> {
|
||||
fn extract(&self, env: &mut JNIEnv) -> Result<String> {
|
||||
Ok(env.get_string(self)?.into())
|
||||
}
|
||||
}
|
||||
|
||||
pub trait JMapExt {
|
||||
#[allow(dead_code)]
|
||||
fn get_string(&self, env: &mut JNIEnv, key: &str) -> Result<Option<String>>;
|
||||
|
||||
#[allow(dead_code)]
|
||||
fn get_i32(&self, env: &mut JNIEnv, key: &str) -> Result<Option<i32>>;
|
||||
|
||||
#[allow(dead_code)]
|
||||
fn get_i64(&self, env: &mut JNIEnv, key: &str) -> Result<Option<i64>>;
|
||||
|
||||
#[allow(dead_code)]
|
||||
fn get_f32(&self, env: &mut JNIEnv, key: &str) -> Result<Option<f32>>;
|
||||
|
||||
#[allow(dead_code)]
|
||||
fn get_f64(&self, env: &mut JNIEnv, key: &str) -> Result<Option<f64>>;
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
fn get_map_value<T>(env: &mut JNIEnv, map: &JMap, key: &str) -> Result<Option<T>>
|
||||
where
|
||||
for<'a> JObject<'a>: FromJObject<T>,
|
||||
{
|
||||
let key_obj: JObject = env.new_string(key)?.into();
|
||||
if let Some(value) = map.get(env, &key_obj)? {
|
||||
if value.is_null() {
|
||||
Ok(None)
|
||||
} else {
|
||||
Ok(Some(value.extract()?))
|
||||
}
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
impl JMapExt for JMap<'_, '_, '_> {
|
||||
fn get_string(&self, env: &mut JNIEnv, key: &str) -> Result<Option<String>> {
|
||||
let key_obj: JObject = env.new_string(key)?.into();
|
||||
if let Some(value) = self.get(env, &key_obj)? {
|
||||
let value_str: JString = value.into();
|
||||
Ok(Some(value_str.extract(env)?))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
fn get_i32(&self, env: &mut JNIEnv, key: &str) -> Result<Option<i32>> {
|
||||
get_map_value(env, self, key)
|
||||
}
|
||||
|
||||
fn get_i64(&self, env: &mut JNIEnv, key: &str) -> Result<Option<i64>> {
|
||||
get_map_value(env, self, key)
|
||||
}
|
||||
|
||||
fn get_f32(&self, env: &mut JNIEnv, key: &str) -> Result<Option<f32>> {
|
||||
get_map_value(env, self, key)
|
||||
}
|
||||
|
||||
fn get_f64(&self, env: &mut JNIEnv, key: &str) -> Result<Option<f64>> {
|
||||
get_map_value(env, self, key)
|
||||
}
|
||||
}
|
||||
103
java/core/pom.xml
Normal file
103
java/core/pom.xml
Normal file
@@ -0,0 +1,103 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<parent>
|
||||
<groupId>com.lancedb</groupId>
|
||||
<artifactId>lancedb-parent</artifactId>
|
||||
<version>0.22.4-beta.2</version>
|
||||
<relativePath>../pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
<artifactId>lancedb-core</artifactId>
|
||||
<name>${project.artifactId}</name>
|
||||
<description>LanceDB Core</description>
|
||||
<packaging>jar</packaging>
|
||||
<properties>
|
||||
<rust.release.build>false</rust.release.build>
|
||||
</properties>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>com.lancedb</groupId>
|
||||
<artifactId>lance-namespace-core</artifactId>
|
||||
<version>0.0.1</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.arrow</groupId>
|
||||
<artifactId>arrow-vector</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.arrow</groupId>
|
||||
<artifactId>arrow-memory-netty</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.arrow</groupId>
|
||||
<artifactId>arrow-c-data</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.arrow</groupId>
|
||||
<artifactId>arrow-dataset</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.json</groupId>
|
||||
<artifactId>json</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.questdb</groupId>
|
||||
<artifactId>jar-jni</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.junit.jupiter</groupId>
|
||||
<artifactId>junit-jupiter</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<profiles>
|
||||
<profile>
|
||||
<id>build-jni</id>
|
||||
<activation>
|
||||
<activeByDefault>true</activeByDefault>
|
||||
</activation>
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.questdb</groupId>
|
||||
<artifactId>rust-maven-plugin</artifactId>
|
||||
<version>1.1.1</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>lancedb-jni</id>
|
||||
<goals>
|
||||
<goal>build</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<path>lancedb-jni</path>
|
||||
<release>${rust.release.build}</release>
|
||||
<!-- Copy native libraries to target/classes for runtime access -->
|
||||
<copyTo>${project.build.directory}/classes/nativelib</copyTo>
|
||||
<copyWithPlatformDir>true</copyWithPlatformDir>
|
||||
</configuration>
|
||||
</execution>
|
||||
<execution>
|
||||
<id>lancedb-jni-test</id>
|
||||
<goals>
|
||||
<goal>test</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<path>lancedb-jni</path>
|
||||
<release>false</release>
|
||||
<verbosity>-v</verbosity>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</profile>
|
||||
</profiles>
|
||||
</project>
|
||||
108
java/core/src/main/java/com/lancedb/lancedb/Connection.java
Normal file
108
java/core/src/main/java/com/lancedb/lancedb/Connection.java
Normal file
@@ -0,0 +1,108 @@
|
||||
/*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package com.lancedb.lancedb;
|
||||
|
||||
import io.questdb.jar.jni.JarJniLoader;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
|
||||
/** Represents LanceDB database. */
|
||||
public class Connection implements Closeable {
|
||||
static {
|
||||
JarJniLoader.loadLib(Connection.class, "/nativelib", "lancedb_jni");
|
||||
}
|
||||
|
||||
private long nativeConnectionHandle;
|
||||
|
||||
/** Connect to a LanceDB instance. */
|
||||
public static native Connection connect(String uri);
|
||||
|
||||
/**
|
||||
* Get the names of all tables in the database. The names are sorted in ascending order.
|
||||
*
|
||||
* @return the table names
|
||||
*/
|
||||
public List<String> tableNames() {
|
||||
return tableNames(Optional.empty(), Optional.empty());
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the names of filtered tables in the database. The names are sorted in ascending order.
|
||||
*
|
||||
* @param limit The number of results to return.
|
||||
* @return the table names
|
||||
*/
|
||||
public List<String> tableNames(int limit) {
|
||||
return tableNames(Optional.empty(), Optional.of(limit));
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the names of filtered tables in the database. The names are sorted in ascending order.
|
||||
*
|
||||
* @param startAfter If present, only return names that come lexicographically after the supplied
|
||||
* value. This can be combined with limit to implement pagination by setting this to the last
|
||||
* table name from the previous page.
|
||||
* @return the table names
|
||||
*/
|
||||
public List<String> tableNames(String startAfter) {
|
||||
return tableNames(Optional.of(startAfter), Optional.empty());
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the names of filtered tables in the database. The names are sorted in ascending order.
|
||||
*
|
||||
* @param startAfter If present, only return names that come lexicographically after the supplied
|
||||
* value. This can be combined with limit to implement pagination by setting this to the last
|
||||
* table name from the previous page.
|
||||
* @param limit The number of results to return.
|
||||
* @return the table names
|
||||
*/
|
||||
public List<String> tableNames(String startAfter, int limit) {
|
||||
return tableNames(Optional.of(startAfter), Optional.of(limit));
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the names of filtered tables in the database. The names are sorted in ascending order.
|
||||
*
|
||||
* @param startAfter If present, only return names that come lexicographically after the supplied
|
||||
* value. This can be combined with limit to implement pagination by setting this to the last
|
||||
* table name from the previous page.
|
||||
* @param limit The number of results to return.
|
||||
* @return the table names
|
||||
*/
|
||||
public native List<String> tableNames(Optional<String> startAfter, Optional<Integer> limit);
|
||||
|
||||
/**
|
||||
* Closes this connection and releases any system resources associated with it. If the connection
|
||||
* is already closed, then invoking this method has no effect.
|
||||
*/
|
||||
@Override
|
||||
public void close() {
|
||||
if (nativeConnectionHandle != 0) {
|
||||
releaseNativeConnection(nativeConnectionHandle);
|
||||
nativeConnectionHandle = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Native method to release the Lance connection resources associated with the given handle.
|
||||
*
|
||||
* @param handle The native handle to the connection resource.
|
||||
*/
|
||||
private native void releaseNativeConnection(long handle);
|
||||
|
||||
private Connection() {}
|
||||
}
|
||||
135
java/core/src/test/java/com/lancedb/lancedb/ConnectionTest.java
Normal file
135
java/core/src/test/java/com/lancedb/lancedb/ConnectionTest.java
Normal file
@@ -0,0 +1,135 @@
|
||||
/*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package com.lancedb.lancedb;
|
||||
|
||||
import org.junit.jupiter.api.BeforeAll;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.junit.jupiter.api.io.TempDir;
|
||||
|
||||
import java.net.URL;
|
||||
import java.nio.file.Path;
|
||||
import java.util.List;
|
||||
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
||||
|
||||
public class ConnectionTest {
|
||||
private static final String[] TABLE_NAMES = {
|
||||
"dataset_version", "new_empty_dataset", "test", "write_stream"
|
||||
};
|
||||
|
||||
@TempDir static Path tempDir; // Temporary directory for the tests
|
||||
private static URL lanceDbURL;
|
||||
|
||||
@BeforeAll
|
||||
static void setUp() {
|
||||
ClassLoader classLoader = ConnectionTest.class.getClassLoader();
|
||||
lanceDbURL = classLoader.getResource("example_db");
|
||||
}
|
||||
|
||||
@Test
|
||||
void emptyDB() {
|
||||
String databaseUri = tempDir.resolve("emptyDB").toString();
|
||||
try (Connection conn = Connection.connect(databaseUri)) {
|
||||
List<String> tableNames = conn.tableNames();
|
||||
assertTrue(tableNames.isEmpty());
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
void tableNames() {
|
||||
try (Connection conn = Connection.connect(lanceDbURL.toString())) {
|
||||
List<String> tableNames = conn.tableNames();
|
||||
assertEquals(4, tableNames.size());
|
||||
for (int i = 0; i < TABLE_NAMES.length; i++) {
|
||||
assertEquals(TABLE_NAMES[i], tableNames.get(i));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
void tableNamesStartAfter() {
|
||||
try (Connection conn = Connection.connect(lanceDbURL.toString())) {
|
||||
assertTableNamesStartAfter(
|
||||
conn, TABLE_NAMES[0], 3, TABLE_NAMES[1], TABLE_NAMES[2], TABLE_NAMES[3]);
|
||||
assertTableNamesStartAfter(conn, TABLE_NAMES[1], 2, TABLE_NAMES[2], TABLE_NAMES[3]);
|
||||
assertTableNamesStartAfter(conn, TABLE_NAMES[2], 1, TABLE_NAMES[3]);
|
||||
assertTableNamesStartAfter(conn, TABLE_NAMES[3], 0);
|
||||
assertTableNamesStartAfter(
|
||||
conn, "a_dataset", 4, TABLE_NAMES[0], TABLE_NAMES[1], TABLE_NAMES[2], TABLE_NAMES[3]);
|
||||
assertTableNamesStartAfter(conn, "o_dataset", 2, TABLE_NAMES[2], TABLE_NAMES[3]);
|
||||
assertTableNamesStartAfter(conn, "v_dataset", 1, TABLE_NAMES[3]);
|
||||
assertTableNamesStartAfter(conn, "z_dataset", 0);
|
||||
}
|
||||
}
|
||||
|
||||
private void assertTableNamesStartAfter(
|
||||
Connection conn, String startAfter, int expectedSize, String... expectedNames) {
|
||||
List<String> tableNames = conn.tableNames(startAfter);
|
||||
assertEquals(expectedSize, tableNames.size());
|
||||
for (int i = 0; i < expectedNames.length; i++) {
|
||||
assertEquals(expectedNames[i], tableNames.get(i));
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
void tableNamesLimit() {
|
||||
try (Connection conn = Connection.connect(lanceDbURL.toString())) {
|
||||
for (int i = 0; i <= TABLE_NAMES.length; i++) {
|
||||
List<String> tableNames = conn.tableNames(i);
|
||||
assertEquals(i, tableNames.size());
|
||||
for (int j = 0; j < i; j++) {
|
||||
assertEquals(TABLE_NAMES[j], tableNames.get(j));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
void tableNamesStartAfterLimit() {
|
||||
try (Connection conn = Connection.connect(lanceDbURL.toString())) {
|
||||
List<String> tableNames = conn.tableNames(TABLE_NAMES[0], 2);
|
||||
assertEquals(2, tableNames.size());
|
||||
assertEquals(TABLE_NAMES[1], tableNames.get(0));
|
||||
assertEquals(TABLE_NAMES[2], tableNames.get(1));
|
||||
tableNames = conn.tableNames(TABLE_NAMES[1], 1);
|
||||
assertEquals(1, tableNames.size());
|
||||
assertEquals(TABLE_NAMES[2], tableNames.get(0));
|
||||
tableNames = conn.tableNames(TABLE_NAMES[2], 2);
|
||||
assertEquals(1, tableNames.size());
|
||||
assertEquals(TABLE_NAMES[3], tableNames.get(0));
|
||||
tableNames = conn.tableNames(TABLE_NAMES[3], 2);
|
||||
assertEquals(0, tableNames.size());
|
||||
tableNames = conn.tableNames(TABLE_NAMES[0], 0);
|
||||
assertEquals(0, tableNames.size());
|
||||
|
||||
// Limit larger than the number of remaining tables
|
||||
tableNames = conn.tableNames(TABLE_NAMES[0], 10);
|
||||
assertEquals(3, tableNames.size());
|
||||
assertEquals(TABLE_NAMES[1], tableNames.get(0));
|
||||
assertEquals(TABLE_NAMES[2], tableNames.get(1));
|
||||
assertEquals(TABLE_NAMES[3], tableNames.get(2));
|
||||
|
||||
// Start after a value not in the list
|
||||
tableNames = conn.tableNames("non_existent_table", 2);
|
||||
assertEquals(2, tableNames.size());
|
||||
assertEquals(TABLE_NAMES[2], tableNames.get(0));
|
||||
assertEquals(TABLE_NAMES[3], tableNames.get(1));
|
||||
|
||||
// Start after the last table with a limit
|
||||
tableNames = conn.tableNames(TABLE_NAMES[3], 1);
|
||||
assertEquals(0, tableNames.size());
|
||||
}
|
||||
}
|
||||
}
|
||||
Binary file not shown.
@@ -0,0 +1 @@
|
||||
$d51afd07-e3cd-4c76-9b9b-787e13fd55b0<62>=id <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>*int3208name <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>*string08
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -0,0 +1 @@
|
||||
$15648e72-076f-4ef1-8b90-10d305b95b3b<33>=id <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>*int3208name <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>*string08
|
||||
Binary file not shown.
Binary file not shown.
@@ -0,0 +1 @@
|
||||
$a3689caf-4f6b-4afc-a3c7-97af75661843<34>oitem <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>*string8price <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>*double80vector <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>*fixed_size_list:float:28
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
26
java/lance-namespace/pom.xml
Normal file
26
java/lance-namespace/pom.xml
Normal file
@@ -0,0 +1,26 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<parent>
|
||||
<groupId>com.lancedb</groupId>
|
||||
<artifactId>lancedb-parent</artifactId>
|
||||
<version>0.22.4-beta.2</version>
|
||||
<relativePath>../pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
<artifactId>lancedb-lance-namespace</artifactId>
|
||||
<name>${project.artifactId}</name>
|
||||
<description>LanceDB Java Integration with Lance Namespace</description>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>com.lancedb</groupId>
|
||||
<artifactId>lance-namespace-core</artifactId>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</project>
|
||||
@@ -11,58 +11,35 @@
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package com.lancedb;
|
||||
package com.lancedb.lancedb;
|
||||
|
||||
import org.lance.namespace.LanceNamespace;
|
||||
import com.lancedb.lance.namespace.LanceRestNamespace;
|
||||
import com.lancedb.lance.namespace.client.apache.ApiClient;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
|
||||
/**
|
||||
* Util class to help construct a {@link LanceNamespace} for LanceDB.
|
||||
*
|
||||
* <p>For LanceDB Cloud, use the simplified builder API:
|
||||
*
|
||||
* <pre>{@code
|
||||
* import org.lance.namespace.LanceNamespace;
|
||||
*
|
||||
* // If your DB url is db://example-db, then your database here is example-db
|
||||
* LanceNamespace namespaceClient = LanceDbNamespaceClientBuilder.newBuilder()
|
||||
* .apiKey("your_lancedb_cloud_api_key")
|
||||
* .database("your_database_name")
|
||||
* .build();
|
||||
* }</pre>
|
||||
*
|
||||
* <p>For LanceDB Enterprise deployments, use your custom endpoint:
|
||||
*
|
||||
* <pre>{@code
|
||||
* LanceNamespace namespaceClient = LanceDbNamespaceClientBuilder.newBuilder()
|
||||
* .apiKey("your_lancedb_enterprise_api_key")
|
||||
* .database("your_database_name")
|
||||
* .endpoint("<your_enterprise_endpoint>")
|
||||
* .build();
|
||||
* }</pre>
|
||||
*/
|
||||
public class LanceDbNamespaceClientBuilder {
|
||||
/** Util class to help construct a {@link LanceRestNamespace} for LanceDB. */
|
||||
public class LanceDbRestNamespaces {
|
||||
private static final String DEFAULT_REGION = "us-east-1";
|
||||
private static final String CLOUD_URL_PATTERN = "https://%s.%s.api.lancedb.com";
|
||||
|
||||
private String apiKey;
|
||||
private String database;
|
||||
private Optional<String> endpoint = Optional.empty();
|
||||
private Optional<String> hostOverride = Optional.empty();
|
||||
private Optional<String> region = Optional.empty();
|
||||
private Map<String, String> additionalConfig = new HashMap<>();
|
||||
|
||||
private LanceDbNamespaceClientBuilder() {}
|
||||
private LanceDbRestNamespaces() {}
|
||||
|
||||
/**
|
||||
* Create a new builder instance.
|
||||
*
|
||||
* @return A new LanceDbNamespaceClientBuilder
|
||||
* @return A new LanceRestNamespaceBuilder
|
||||
*/
|
||||
public static LanceDbNamespaceClientBuilder newBuilder() {
|
||||
return new LanceDbNamespaceClientBuilder();
|
||||
public static LanceDbRestNamespaces builder() {
|
||||
return new LanceDbRestNamespaces();
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -71,7 +48,7 @@ public class LanceDbNamespaceClientBuilder {
|
||||
* @param apiKey The LanceDB API key
|
||||
* @return This builder
|
||||
*/
|
||||
public LanceDbNamespaceClientBuilder apiKey(String apiKey) {
|
||||
public LanceDbRestNamespaces apiKey(String apiKey) {
|
||||
if (apiKey == null || apiKey.trim().isEmpty()) {
|
||||
throw new IllegalArgumentException("API key cannot be null or empty");
|
||||
}
|
||||
@@ -85,7 +62,7 @@ public class LanceDbNamespaceClientBuilder {
|
||||
* @param database The database name
|
||||
* @return This builder
|
||||
*/
|
||||
public LanceDbNamespaceClientBuilder database(String database) {
|
||||
public LanceDbRestNamespaces database(String database) {
|
||||
if (database == null || database.trim().isEmpty()) {
|
||||
throw new IllegalArgumentException("Database cannot be null or empty");
|
||||
}
|
||||
@@ -94,25 +71,25 @@ public class LanceDbNamespaceClientBuilder {
|
||||
}
|
||||
|
||||
/**
|
||||
* Set a custom endpoint URL (optional). When set, this overrides the default LanceDB Cloud URL
|
||||
* Set a custom host override (optional). When set, this overrides the default LanceDB Cloud URL
|
||||
* construction. Use this for LanceDB Enterprise deployments.
|
||||
*
|
||||
* @param endpoint The complete base URL for your LanceDB Enterprise deployment
|
||||
* @param hostOverride The complete base URL (e.g., "http://your-vpc-endpoint:80")
|
||||
* @return This builder
|
||||
*/
|
||||
public LanceDbNamespaceClientBuilder endpoint(String endpoint) {
|
||||
this.endpoint = Optional.ofNullable(endpoint);
|
||||
public LanceDbRestNamespaces hostOverride(String hostOverride) {
|
||||
this.hostOverride = Optional.ofNullable(hostOverride);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the region for LanceDB Cloud (optional). Defaults to "us-east-1" if not specified. This is
|
||||
* ignored when endpoint is set.
|
||||
* ignored when hostOverride is set.
|
||||
*
|
||||
* @param region The AWS region (e.g., "us-east-1", "eu-west-1")
|
||||
* @return This builder
|
||||
*/
|
||||
public LanceDbNamespaceClientBuilder region(String region) {
|
||||
public LanceDbRestNamespaces region(String region) {
|
||||
this.region = Optional.ofNullable(region);
|
||||
return this;
|
||||
}
|
||||
@@ -124,18 +101,18 @@ public class LanceDbNamespaceClientBuilder {
|
||||
* @param value The configuration value
|
||||
* @return This builder
|
||||
*/
|
||||
public LanceDbNamespaceClientBuilder config(String key, String value) {
|
||||
public LanceDbRestNamespaces config(String key, String value) {
|
||||
this.additionalConfig.put(key, value);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Build the LanceNamespace instance.
|
||||
* Build the LanceRestNamespace instance.
|
||||
*
|
||||
* @return A configured LanceNamespace
|
||||
* @return A configured LanceRestNamespace
|
||||
* @throws IllegalStateException if required parameters are missing
|
||||
*/
|
||||
public LanceNamespace build() {
|
||||
public LanceRestNamespace build() {
|
||||
// Validate required fields
|
||||
if (apiKey == null) {
|
||||
throw new IllegalStateException("API key is required");
|
||||
@@ -146,19 +123,24 @@ public class LanceDbNamespaceClientBuilder {
|
||||
|
||||
// Build configuration map
|
||||
Map<String, String> config = new HashMap<>(additionalConfig);
|
||||
config.put("header.x-lancedb-database", database);
|
||||
config.put("header.x-api-key", apiKey);
|
||||
config.put("headers.x-lancedb-database", database);
|
||||
config.put("headers.x-api-key", apiKey);
|
||||
|
||||
// Determine base URL
|
||||
String uri;
|
||||
if (endpoint.isPresent()) {
|
||||
uri = endpoint.get();
|
||||
String baseUrl;
|
||||
if (hostOverride.isPresent()) {
|
||||
baseUrl = hostOverride.get();
|
||||
config.put("host_override", hostOverride.get());
|
||||
} else {
|
||||
String effectiveRegion = region.orElse(DEFAULT_REGION);
|
||||
uri = String.format(CLOUD_URL_PATTERN, database, effectiveRegion);
|
||||
baseUrl = String.format(CLOUD_URL_PATTERN, database, effectiveRegion);
|
||||
config.put("region", effectiveRegion);
|
||||
}
|
||||
config.put("uri", uri);
|
||||
|
||||
return LanceNamespace.connect("rest", config, null);
|
||||
// Create and configure ApiClient
|
||||
ApiClient apiClient = new ApiClient();
|
||||
apiClient.setBasePath(baseUrl);
|
||||
|
||||
return new LanceRestNamespace(apiClient, config);
|
||||
}
|
||||
}
|
||||
@@ -1,99 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<parent>
|
||||
<groupId>com.lancedb</groupId>
|
||||
<artifactId>lancedb-parent</artifactId>
|
||||
<version>0.23.0-beta.0</version>
|
||||
<relativePath>../pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
<artifactId>lancedb-core</artifactId>
|
||||
<name>${project.artifactId}</name>
|
||||
<description>Utilities to work with LanceDB Cloud and Enterprise via Lance REST Namespace</description>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.lance</groupId>
|
||||
<artifactId>lance-core</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.apache.arrow</groupId>
|
||||
<artifactId>arrow-vector</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.apache.arrow</groupId>
|
||||
<artifactId>arrow-memory-netty</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.junit.jupiter</groupId>
|
||||
<artifactId>junit-jupiter</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.mockito</groupId>
|
||||
<artifactId>mockito-junit-jupiter</artifactId>
|
||||
<version>5.18.0</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.slf4j</groupId>
|
||||
<artifactId>slf4j-api</artifactId>
|
||||
<version>2.0.16</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.apache.logging.log4j</groupId>
|
||||
<artifactId>log4j-slf4j2-impl</artifactId>
|
||||
<version>2.24.3</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.apache.logging.log4j</groupId>
|
||||
<artifactId>log4j-core</artifactId>
|
||||
<version>2.24.3</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.apache.logging.log4j</groupId>
|
||||
<artifactId>log4j-api</artifactId>
|
||||
<version>2.24.3</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-source-plugin</artifactId>
|
||||
<version>3.3.0</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>attach-sources</id>
|
||||
<goals>
|
||||
<goal>jar-no-fork</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-surefire-plugin</artifactId>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</project>
|
||||
@@ -1,96 +0,0 @@
|
||||
/*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package com.lancedb;
|
||||
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
import static org.junit.jupiter.api.Assertions.*;
|
||||
|
||||
/** Unit tests for LanceDbNamespaceClientBuilder. */
|
||||
public class LanceDbNamespaceClientBuilderTest {
|
||||
|
||||
@Test
|
||||
public void testBuilderRequiresApiKey() {
|
||||
LanceDbNamespaceClientBuilder builder =
|
||||
LanceDbNamespaceClientBuilder.newBuilder().database("test-db");
|
||||
|
||||
IllegalStateException exception = assertThrows(IllegalStateException.class, builder::build);
|
||||
assertEquals("API key is required", exception.getMessage());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBuilderRequiresDatabase() {
|
||||
LanceDbNamespaceClientBuilder builder =
|
||||
LanceDbNamespaceClientBuilder.newBuilder().apiKey("test-api-key");
|
||||
|
||||
IllegalStateException exception = assertThrows(IllegalStateException.class, builder::build);
|
||||
assertEquals("Database is required", exception.getMessage());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testApiKeyCannotBeNull() {
|
||||
IllegalArgumentException exception =
|
||||
assertThrows(
|
||||
IllegalArgumentException.class,
|
||||
() -> LanceDbNamespaceClientBuilder.newBuilder().apiKey(null));
|
||||
assertEquals("API key cannot be null or empty", exception.getMessage());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testApiKeyCannotBeEmpty() {
|
||||
IllegalArgumentException exception =
|
||||
assertThrows(
|
||||
IllegalArgumentException.class,
|
||||
() -> LanceDbNamespaceClientBuilder.newBuilder().apiKey(" "));
|
||||
assertEquals("API key cannot be null or empty", exception.getMessage());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDatabaseCannotBeNull() {
|
||||
IllegalArgumentException exception =
|
||||
assertThrows(
|
||||
IllegalArgumentException.class,
|
||||
() -> LanceDbNamespaceClientBuilder.newBuilder().database(null));
|
||||
assertEquals("Database cannot be null or empty", exception.getMessage());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDatabaseCannotBeEmpty() {
|
||||
IllegalArgumentException exception =
|
||||
assertThrows(
|
||||
IllegalArgumentException.class,
|
||||
() -> LanceDbNamespaceClientBuilder.newBuilder().database(" "));
|
||||
assertEquals("Database cannot be null or empty", exception.getMessage());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBuilderFluentApi() {
|
||||
// Verify the builder returns itself for chaining
|
||||
LanceDbNamespaceClientBuilder builder = LanceDbNamespaceClientBuilder.newBuilder();
|
||||
|
||||
assertSame(builder, builder.apiKey("test-key"));
|
||||
assertSame(builder, builder.database("test-db"));
|
||||
assertSame(builder, builder.endpoint("http://localhost:8080"));
|
||||
assertSame(builder, builder.region("eu-west-1"));
|
||||
assertSame(builder, builder.config("custom-key", "custom-value"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testNewBuilderCreatesNewInstance() {
|
||||
LanceDbNamespaceClientBuilder builder1 = LanceDbNamespaceClientBuilder.newBuilder();
|
||||
LanceDbNamespaceClientBuilder builder2 = LanceDbNamespaceClientBuilder.newBuilder();
|
||||
|
||||
assertNotSame(builder1, builder2);
|
||||
}
|
||||
}
|
||||
@@ -1,32 +0,0 @@
|
||||
<?xml version='1.0' encoding='UTF-8'?>
|
||||
|
||||
<!--
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing,
|
||||
software distributed under the License is distributed on an
|
||||
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations
|
||||
under the License.
|
||||
-->
|
||||
|
||||
<configuration monitorInterval="30">
|
||||
<appenders>
|
||||
<Console name='Console' target='SYSTEM_ERR'>
|
||||
<PatternLayout pattern='%d{HH:mm:ss.SSS} %p [%t] %C{1}.%M: %m%n'/>
|
||||
</Console>
|
||||
</appenders>
|
||||
<loggers>
|
||||
<logger name='com.lancedb' level='DEBUG' additivity='false'>
|
||||
<appender-ref ref='Console'/>
|
||||
</logger>
|
||||
<root level='INFO'>
|
||||
<appender-ref ref='Console'/>
|
||||
</root>
|
||||
</loggers>
|
||||
</configuration>
|
||||
23
java/pom.xml
23
java/pom.xml
@@ -6,7 +6,7 @@
|
||||
|
||||
<groupId>com.lancedb</groupId>
|
||||
<artifactId>lancedb-parent</artifactId>
|
||||
<version>0.23.0-beta.0</version>
|
||||
<version>0.22.4-beta.2</version>
|
||||
<packaging>pom</packaging>
|
||||
<name>${project.artifactId}</name>
|
||||
<description>LanceDB Java SDK Parent POM</description>
|
||||
@@ -28,7 +28,7 @@
|
||||
<properties>
|
||||
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
|
||||
<arrow.version>15.0.0</arrow.version>
|
||||
<lance-core.version>1.0.0-rc.2</lance-core.version>
|
||||
<lance-namespace.verison>0.0.1</lance-namespace.verison>
|
||||
<spotless.skip>false</spotless.skip>
|
||||
<spotless.version>2.30.0</spotless.version>
|
||||
<spotless.java.googlejavaformat.version>1.7</spotless.java.googlejavaformat.version>
|
||||
@@ -51,7 +51,8 @@
|
||||
</properties>
|
||||
|
||||
<modules>
|
||||
<module>lancedb-core</module>
|
||||
<module>core</module>
|
||||
<module>lance-namespace</module>
|
||||
</modules>
|
||||
|
||||
<scm>
|
||||
@@ -63,9 +64,9 @@
|
||||
<dependencyManagement>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.lance</groupId>
|
||||
<artifactId>lance-core</artifactId>
|
||||
<version>${lance-core.version}</version>
|
||||
<groupId>com.lancedb</groupId>
|
||||
<artifactId>lance-namespace-core</artifactId>
|
||||
<version>${lance-namespace.verison}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.arrow</groupId>
|
||||
@@ -87,11 +88,21 @@
|
||||
<artifactId>arrow-dataset</artifactId>
|
||||
<version>${arrow.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.questdb</groupId>
|
||||
<artifactId>jar-jni</artifactId>
|
||||
<version>1.1.1</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.junit.jupiter</groupId>
|
||||
<artifactId>junit-jupiter</artifactId>
|
||||
<version>5.10.1</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.json</groupId>
|
||||
<artifactId>json</artifactId>
|
||||
<version>20210307</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</dependencyManagement>
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "lancedb-nodejs"
|
||||
edition.workspace = true
|
||||
version = "0.23.0-beta.0"
|
||||
version = "0.22.4-beta.2"
|
||||
license.workspace = true
|
||||
description.workspace = true
|
||||
repository.workspace = true
|
||||
|
||||
@@ -118,7 +118,7 @@ export class PermutationBuilder {
|
||||
* @returns A new PermutationBuilder instance
|
||||
* @example
|
||||
* ```ts
|
||||
* builder.splitCalculated({ calculation: "user_id % 3" });
|
||||
* builder.splitCalculated("user_id % 3");
|
||||
* ```
|
||||
*/
|
||||
splitCalculated(options: SplitCalculatedOptions): PermutationBuilder {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-darwin-arm64",
|
||||
"version": "0.23.0-beta.0",
|
||||
"version": "0.22.4-beta.2",
|
||||
"os": ["darwin"],
|
||||
"cpu": ["arm64"],
|
||||
"main": "lancedb.darwin-arm64.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-darwin-x64",
|
||||
"version": "0.23.0-beta.0",
|
||||
"version": "0.22.4-beta.2",
|
||||
"os": ["darwin"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.darwin-x64.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-arm64-gnu",
|
||||
"version": "0.23.0-beta.0",
|
||||
"version": "0.22.4-beta.2",
|
||||
"os": ["linux"],
|
||||
"cpu": ["arm64"],
|
||||
"main": "lancedb.linux-arm64-gnu.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-arm64-musl",
|
||||
"version": "0.23.0-beta.0",
|
||||
"version": "0.22.4-beta.2",
|
||||
"os": ["linux"],
|
||||
"cpu": ["arm64"],
|
||||
"main": "lancedb.linux-arm64-musl.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-x64-gnu",
|
||||
"version": "0.23.0-beta.0",
|
||||
"version": "0.22.4-beta.2",
|
||||
"os": ["linux"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.linux-x64-gnu.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-x64-musl",
|
||||
"version": "0.23.0-beta.0",
|
||||
"version": "0.22.4-beta.2",
|
||||
"os": ["linux"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.linux-x64-musl.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-win32-arm64-msvc",
|
||||
"version": "0.23.0-beta.0",
|
||||
"version": "0.22.4-beta.2",
|
||||
"os": [
|
||||
"win32"
|
||||
],
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-win32-x64-msvc",
|
||||
"version": "0.23.0-beta.0",
|
||||
"version": "0.22.4-beta.2",
|
||||
"os": ["win32"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.win32-x64-msvc.node",
|
||||
|
||||
4
nodejs/package-lock.json
generated
4
nodejs/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb",
|
||||
"version": "0.23.0-beta.0",
|
||||
"version": "0.22.4-beta.2",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@lancedb/lancedb",
|
||||
"version": "0.23.0-beta.0",
|
||||
"version": "0.22.4-beta.2",
|
||||
"cpu": [
|
||||
"x64",
|
||||
"arm64"
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
"ann"
|
||||
],
|
||||
"private": false,
|
||||
"version": "0.23.0-beta.0",
|
||||
"version": "0.22.4-beta.2",
|
||||
"main": "dist/index.js",
|
||||
"exports": {
|
||||
".": "./dist/index.js",
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
[tool.bumpversion]
|
||||
current_version = "0.26.0-beta.1"
|
||||
current_version = "0.25.4-beta.2"
|
||||
parse = """(?x)
|
||||
(?P<major>0|[1-9]\\d*)\\.
|
||||
(?P<minor>0|[1-9]\\d*)\\.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "lancedb-python"
|
||||
version = "0.26.0-beta.1"
|
||||
version = "0.25.4-beta.2"
|
||||
edition.workspace = true
|
||||
description = "Python bindings for LanceDB"
|
||||
license.workspace = true
|
||||
@@ -18,7 +18,6 @@ arrow = { version = "56.2", features = ["pyarrow"] }
|
||||
async-trait = "0.1"
|
||||
lancedb = { path = "../rust/lancedb", default-features = false }
|
||||
lance-core.workspace = true
|
||||
lance-namespace.workspace = true
|
||||
lance-io.workspace = true
|
||||
env_logger.workspace = true
|
||||
pyo3 = { version = "0.25", features = ["extension-module", "abi3-py39"] }
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
PIP_EXTRA_INDEX_URL ?= https://pypi.fury.io/lance-format/ https://pypi.fury.io/lancedb/
|
||||
PIP_EXTRA_INDEX_URL ?= https://pypi.fury.io/lancedb/
|
||||
|
||||
help: ## Show this help.
|
||||
@sed -ne '/@sed/!s/## //p' $(MAKEFILE_LIST)
|
||||
|
||||
.PHONY: develop
|
||||
develop: ## Install the package in development mode.
|
||||
PIP_EXTRA_INDEX_URL="$(PIP_EXTRA_INDEX_URL)" maturin develop --extras tests,dev,embeddings
|
||||
PIP_EXTRA_INDEX_URL=$(PIP_EXTRA_INDEX_URL) maturin develop --extras tests,dev,embeddings
|
||||
|
||||
.PHONY: format
|
||||
format: ## Format the code.
|
||||
|
||||
@@ -10,7 +10,7 @@ dependencies = [
|
||||
"pyarrow>=16",
|
||||
"pydantic>=1.10",
|
||||
"tqdm>=4.27.0",
|
||||
"lance-namespace>=0.2.1"
|
||||
"lance-namespace>=0.0.21"
|
||||
]
|
||||
description = "lancedb"
|
||||
authors = [{ name = "LanceDB Devs", email = "dev@lancedb.com" }]
|
||||
@@ -45,7 +45,7 @@ repository = "https://github.com/lancedb/lancedb"
|
||||
|
||||
[project.optional-dependencies]
|
||||
pylance = [
|
||||
"pylance>=1.0.0b14",
|
||||
"pylance>=0.25",
|
||||
]
|
||||
tests = [
|
||||
"aiohttp",
|
||||
@@ -59,7 +59,7 @@ tests = [
|
||||
"polars>=0.19, <=1.3.0",
|
||||
"tantivy",
|
||||
"pyarrow-stubs",
|
||||
"pylance>=1.0.0b14",
|
||||
"pylance>=1.0.0b4",
|
||||
"requests",
|
||||
"datafusion",
|
||||
]
|
||||
|
||||
@@ -3,30 +3,10 @@ from typing import Dict, List, Optional, Tuple, Any, TypedDict, Union, Literal
|
||||
|
||||
import pyarrow as pa
|
||||
|
||||
from .index import (
|
||||
BTree,
|
||||
IvfFlat,
|
||||
IvfPq,
|
||||
IvfSq,
|
||||
Bitmap,
|
||||
LabelList,
|
||||
HnswPq,
|
||||
HnswSq,
|
||||
FTS,
|
||||
)
|
||||
from .index import BTree, IvfFlat, IvfPq, Bitmap, LabelList, HnswPq, HnswSq, FTS
|
||||
from .io import StorageOptionsProvider
|
||||
from lance_namespace import (
|
||||
ListNamespacesResponse,
|
||||
CreateNamespaceResponse,
|
||||
DropNamespaceResponse,
|
||||
DescribeNamespaceResponse,
|
||||
ListTablesResponse,
|
||||
)
|
||||
from .remote import ClientConfig
|
||||
|
||||
IvfHnswPq: type[HnswPq] = HnswPq
|
||||
IvfHnswSq: type[HnswSq] = HnswSq
|
||||
|
||||
class Session:
|
||||
def __init__(
|
||||
self,
|
||||
@@ -46,38 +26,18 @@ class Connection(object):
|
||||
async def close(self): ...
|
||||
async def list_namespaces(
|
||||
self,
|
||||
namespace: Optional[List[str]] = None,
|
||||
page_token: Optional[str] = None,
|
||||
limit: Optional[int] = None,
|
||||
) -> ListNamespacesResponse: ...
|
||||
async def create_namespace(
|
||||
self,
|
||||
namespace: List[str],
|
||||
mode: Optional[str] = None,
|
||||
properties: Optional[Dict[str, str]] = None,
|
||||
) -> CreateNamespaceResponse: ...
|
||||
async def drop_namespace(
|
||||
self,
|
||||
namespace: List[str],
|
||||
mode: Optional[str] = None,
|
||||
behavior: Optional[str] = None,
|
||||
) -> DropNamespaceResponse: ...
|
||||
async def describe_namespace(
|
||||
self,
|
||||
namespace: List[str],
|
||||
) -> DescribeNamespaceResponse: ...
|
||||
async def list_tables(
|
||||
self,
|
||||
namespace: Optional[List[str]] = None,
|
||||
page_token: Optional[str] = None,
|
||||
limit: Optional[int] = None,
|
||||
) -> ListTablesResponse: ...
|
||||
namespace: Optional[List[str]],
|
||||
page_token: Optional[str],
|
||||
limit: Optional[int],
|
||||
) -> List[str]: ...
|
||||
async def create_namespace(self, namespace: List[str]) -> None: ...
|
||||
async def drop_namespace(self, namespace: List[str]) -> None: ...
|
||||
async def table_names(
|
||||
self,
|
||||
namespace: Optional[List[str]],
|
||||
start_after: Optional[str],
|
||||
limit: Optional[int],
|
||||
) -> list[str]: ... # Deprecated: Use list_tables instead
|
||||
) -> list[str]: ...
|
||||
async def create_table(
|
||||
self,
|
||||
name: str,
|
||||
@@ -144,17 +104,7 @@ class Table:
|
||||
async def create_index(
|
||||
self,
|
||||
column: str,
|
||||
index: Union[
|
||||
IvfFlat,
|
||||
IvfSq,
|
||||
IvfPq,
|
||||
HnswPq,
|
||||
HnswSq,
|
||||
BTree,
|
||||
Bitmap,
|
||||
LabelList,
|
||||
FTS,
|
||||
],
|
||||
index: Union[IvfFlat, IvfPq, HnswPq, HnswSq, BTree, Bitmap, LabelList, FTS],
|
||||
replace: Optional[bool],
|
||||
wait_timeout: Optional[object],
|
||||
*,
|
||||
|
||||
@@ -22,13 +22,6 @@ from lancedb.embeddings.registry import EmbeddingFunctionRegistry
|
||||
|
||||
from lancedb.common import data_to_reader, sanitize_uri, validate_schema
|
||||
from lancedb.background_loop import LOOP
|
||||
from lance_namespace import (
|
||||
ListNamespacesResponse,
|
||||
CreateNamespaceResponse,
|
||||
DropNamespaceResponse,
|
||||
DescribeNamespaceResponse,
|
||||
ListTablesResponse,
|
||||
)
|
||||
|
||||
from . import __version__
|
||||
from ._lancedb import connect as lancedb_connect # type: ignore
|
||||
@@ -55,12 +48,6 @@ if TYPE_CHECKING:
|
||||
from .io import StorageOptionsProvider
|
||||
from ._lancedb import Session
|
||||
|
||||
from .namespace_utils import (
|
||||
_normalize_create_namespace_mode,
|
||||
_normalize_drop_namespace_mode,
|
||||
_normalize_drop_namespace_behavior,
|
||||
)
|
||||
|
||||
|
||||
class DBConnection(EnforceOverrides):
|
||||
"""An active LanceDB connection interface."""
|
||||
@@ -69,8 +56,8 @@ class DBConnection(EnforceOverrides):
|
||||
self,
|
||||
namespace: Optional[List[str]] = None,
|
||||
page_token: Optional[str] = None,
|
||||
limit: Optional[int] = None,
|
||||
) -> ListNamespacesResponse:
|
||||
limit: int = 10,
|
||||
) -> Iterable[str]:
|
||||
"""List immediate child namespace names in the given namespace.
|
||||
|
||||
Parameters
|
||||
@@ -79,119 +66,43 @@ class DBConnection(EnforceOverrides):
|
||||
The parent namespace to list namespaces in.
|
||||
Empty list represents root namespace.
|
||||
page_token: str, optional
|
||||
Token for pagination. Use the token from a previous response
|
||||
to get the next page of results.
|
||||
limit: int, optional
|
||||
The maximum number of results to return.
|
||||
The token to use for pagination. If not present, start from the beginning.
|
||||
limit: int, default 10
|
||||
The size of the page to return.
|
||||
|
||||
Returns
|
||||
-------
|
||||
ListNamespacesResponse
|
||||
Response containing namespace names and optional page_token for pagination.
|
||||
Iterable of str
|
||||
List of immediate child namespace names
|
||||
"""
|
||||
if namespace is None:
|
||||
namespace = []
|
||||
return ListNamespacesResponse(namespaces=[], page_token=None)
|
||||
return []
|
||||
|
||||
def create_namespace(
|
||||
self,
|
||||
namespace: List[str],
|
||||
mode: Optional[str] = None,
|
||||
properties: Optional[Dict[str, str]] = None,
|
||||
) -> CreateNamespaceResponse:
|
||||
def create_namespace(self, namespace: List[str]) -> None:
|
||||
"""Create a new namespace.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
namespace: List[str]
|
||||
The namespace identifier to create.
|
||||
mode: str, optional
|
||||
Creation mode - "create" (fail if exists), "exist_ok" (skip if exists),
|
||||
or "overwrite" (replace if exists). Case insensitive.
|
||||
properties: Dict[str, str], optional
|
||||
Properties to set on the namespace.
|
||||
|
||||
Returns
|
||||
-------
|
||||
CreateNamespaceResponse
|
||||
Response containing the properties of the created namespace.
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
"Namespace operations are not supported for this connection type"
|
||||
)
|
||||
|
||||
def drop_namespace(
|
||||
self,
|
||||
namespace: List[str],
|
||||
mode: Optional[str] = None,
|
||||
behavior: Optional[str] = None,
|
||||
) -> DropNamespaceResponse:
|
||||
def drop_namespace(self, namespace: List[str]) -> None:
|
||||
"""Drop a namespace.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
namespace: List[str]
|
||||
The namespace identifier to drop.
|
||||
mode: str, optional
|
||||
Whether to skip if not exists ("SKIP") or fail ("FAIL"). Case insensitive.
|
||||
behavior: str, optional
|
||||
Whether to restrict drop if not empty ("RESTRICT") or cascade ("CASCADE").
|
||||
Case insensitive.
|
||||
|
||||
Returns
|
||||
-------
|
||||
DropNamespaceResponse
|
||||
Response containing properties and transaction_id if applicable.
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
"Namespace operations are not supported for this connection type"
|
||||
)
|
||||
|
||||
def describe_namespace(self, namespace: List[str]) -> DescribeNamespaceResponse:
|
||||
"""Describe a namespace.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
namespace: List[str]
|
||||
The namespace identifier to describe.
|
||||
|
||||
Returns
|
||||
-------
|
||||
DescribeNamespaceResponse
|
||||
Response containing the namespace properties.
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
"Namespace operations are not supported for this connection type"
|
||||
)
|
||||
|
||||
def list_tables(
|
||||
self,
|
||||
namespace: Optional[List[str]] = None,
|
||||
page_token: Optional[str] = None,
|
||||
limit: Optional[int] = None,
|
||||
) -> ListTablesResponse:
|
||||
"""List all tables in this database with pagination support.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
namespace: List[str], optional
|
||||
The namespace to list tables in.
|
||||
None or empty list represents root namespace.
|
||||
page_token: str, optional
|
||||
Token for pagination. Use the token from a previous response
|
||||
to get the next page of results.
|
||||
limit: int, optional
|
||||
The maximum number of results to return.
|
||||
|
||||
Returns
|
||||
-------
|
||||
ListTablesResponse
|
||||
Response containing table names and optional page_token for pagination.
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
"list_tables is not supported for this connection type"
|
||||
)
|
||||
|
||||
@abstractmethod
|
||||
def table_names(
|
||||
self,
|
||||
@@ -283,10 +194,6 @@ class DBConnection(EnforceOverrides):
|
||||
connection will be inherited by the table, but can be overridden here.
|
||||
See available options at
|
||||
<https://lancedb.com/docs/storage/>
|
||||
|
||||
To enable stable row IDs (row IDs remain stable after compaction,
|
||||
update, delete, and merges), set `new_table_enable_stable_row_ids`
|
||||
to `"true"` in storage_options when connecting to the database.
|
||||
data_storage_version: optional, str, default "stable"
|
||||
Deprecated. Set `storage_options` when connecting to the database and set
|
||||
`new_table_data_storage_version` in the options.
|
||||
@@ -646,8 +553,8 @@ class LanceDBConnection(DBConnection):
|
||||
self,
|
||||
namespace: Optional[List[str]] = None,
|
||||
page_token: Optional[str] = None,
|
||||
limit: Optional[int] = None,
|
||||
) -> ListNamespacesResponse:
|
||||
limit: int = 10,
|
||||
) -> Iterable[str]:
|
||||
"""List immediate child namespace names in the given namespace.
|
||||
|
||||
Parameters
|
||||
@@ -656,15 +563,14 @@ class LanceDBConnection(DBConnection):
|
||||
The parent namespace to list namespaces in.
|
||||
None or empty list represents root namespace.
|
||||
page_token: str, optional
|
||||
Token for pagination. Use the token from a previous response
|
||||
to get the next page of results.
|
||||
limit: int, optional
|
||||
The maximum number of results to return.
|
||||
The token to use for pagination. If not present, start from the beginning.
|
||||
limit: int, default 10
|
||||
The size of the page to return.
|
||||
|
||||
Returns
|
||||
-------
|
||||
ListNamespacesResponse
|
||||
Response containing namespace names and optional page_token for pagination.
|
||||
Iterable of str
|
||||
List of immediate child namespace names
|
||||
"""
|
||||
if namespace is None:
|
||||
namespace = []
|
||||
@@ -675,111 +581,26 @@ class LanceDBConnection(DBConnection):
|
||||
)
|
||||
|
||||
@override
|
||||
def create_namespace(
|
||||
self,
|
||||
namespace: List[str],
|
||||
mode: Optional[str] = None,
|
||||
properties: Optional[Dict[str, str]] = None,
|
||||
) -> CreateNamespaceResponse:
|
||||
def create_namespace(self, namespace: List[str]) -> None:
|
||||
"""Create a new namespace.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
namespace: List[str]
|
||||
The namespace identifier to create.
|
||||
mode: str, optional
|
||||
Creation mode - "create" (fail if exists), "exist_ok" (skip if exists),
|
||||
or "overwrite" (replace if exists). Case insensitive.
|
||||
properties: Dict[str, str], optional
|
||||
Properties to set on the namespace.
|
||||
|
||||
Returns
|
||||
-------
|
||||
CreateNamespaceResponse
|
||||
Response containing the properties of the created namespace.
|
||||
"""
|
||||
return LOOP.run(
|
||||
self._conn.create_namespace(
|
||||
namespace=namespace, mode=mode, properties=properties
|
||||
)
|
||||
)
|
||||
LOOP.run(self._conn.create_namespace(namespace=namespace))
|
||||
|
||||
@override
|
||||
def drop_namespace(
|
||||
self,
|
||||
namespace: List[str],
|
||||
mode: Optional[str] = None,
|
||||
behavior: Optional[str] = None,
|
||||
) -> DropNamespaceResponse:
|
||||
def drop_namespace(self, namespace: List[str]) -> None:
|
||||
"""Drop a namespace.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
namespace: List[str]
|
||||
The namespace identifier to drop.
|
||||
mode: str, optional
|
||||
Whether to skip if not exists ("SKIP") or fail ("FAIL"). Case insensitive.
|
||||
behavior: str, optional
|
||||
Whether to restrict drop if not empty ("RESTRICT") or cascade ("CASCADE").
|
||||
Case insensitive.
|
||||
|
||||
Returns
|
||||
-------
|
||||
DropNamespaceResponse
|
||||
Response containing properties and transaction_id if applicable.
|
||||
"""
|
||||
return LOOP.run(
|
||||
self._conn.drop_namespace(namespace=namespace, mode=mode, behavior=behavior)
|
||||
)
|
||||
|
||||
@override
|
||||
def describe_namespace(self, namespace: List[str]) -> DescribeNamespaceResponse:
|
||||
"""Describe a namespace.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
namespace: List[str]
|
||||
The namespace identifier to describe.
|
||||
|
||||
Returns
|
||||
-------
|
||||
DescribeNamespaceResponse
|
||||
Response containing the namespace properties.
|
||||
"""
|
||||
return LOOP.run(self._conn.describe_namespace(namespace=namespace))
|
||||
|
||||
@override
|
||||
def list_tables(
|
||||
self,
|
||||
namespace: Optional[List[str]] = None,
|
||||
page_token: Optional[str] = None,
|
||||
limit: Optional[int] = None,
|
||||
) -> ListTablesResponse:
|
||||
"""List all tables in this database with pagination support.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
namespace: List[str], optional
|
||||
The namespace to list tables in.
|
||||
None or empty list represents root namespace.
|
||||
page_token: str, optional
|
||||
Token for pagination. Use the token from a previous response
|
||||
to get the next page of results.
|
||||
limit: int, optional
|
||||
The maximum number of results to return.
|
||||
|
||||
Returns
|
||||
-------
|
||||
ListTablesResponse
|
||||
Response containing table names and optional page_token for pagination.
|
||||
"""
|
||||
if namespace is None:
|
||||
namespace = []
|
||||
return LOOP.run(
|
||||
self._conn.list_tables(
|
||||
namespace=namespace, page_token=page_token, limit=limit
|
||||
)
|
||||
)
|
||||
return LOOP.run(self._conn.drop_namespace(namespace=namespace))
|
||||
|
||||
@override
|
||||
def table_names(
|
||||
@@ -791,9 +612,6 @@ class LanceDBConnection(DBConnection):
|
||||
) -> Iterable[str]:
|
||||
"""Get the names of all tables in the database. The names are sorted.
|
||||
|
||||
.. deprecated::
|
||||
Use :meth:`list_tables` instead, which provides proper pagination support.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
namespace: List[str], optional
|
||||
@@ -808,13 +626,6 @@ class LanceDBConnection(DBConnection):
|
||||
Iterator of str.
|
||||
A list of table names.
|
||||
"""
|
||||
import warnings
|
||||
|
||||
warnings.warn(
|
||||
"table_names() is deprecated, use list_tables() instead",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
if namespace is None:
|
||||
namespace = []
|
||||
return LOOP.run(
|
||||
@@ -1129,8 +940,8 @@ class AsyncConnection(object):
|
||||
self,
|
||||
namespace: Optional[List[str]] = None,
|
||||
page_token: Optional[str] = None,
|
||||
limit: Optional[int] = None,
|
||||
) -> ListNamespacesResponse:
|
||||
limit: int = 10,
|
||||
) -> Iterable[str]:
|
||||
"""List immediate child namespace names in the given namespace.
|
||||
|
||||
Parameters
|
||||
@@ -1140,128 +951,39 @@ class AsyncConnection(object):
|
||||
None or empty list represents root namespace.
|
||||
page_token: str, optional
|
||||
The token to use for pagination. If not present, start from the beginning.
|
||||
limit: int, optional
|
||||
The maximum number of results to return.
|
||||
limit: int, default 10
|
||||
The size of the page to return.
|
||||
|
||||
Returns
|
||||
-------
|
||||
ListNamespacesResponse
|
||||
Response containing namespace names and optional pagination token
|
||||
Iterable of str
|
||||
List of immediate child namespace names (not full paths)
|
||||
"""
|
||||
if namespace is None:
|
||||
namespace = []
|
||||
result = await self._inner.list_namespaces(
|
||||
return await self._inner.list_namespaces(
|
||||
namespace=namespace, page_token=page_token, limit=limit
|
||||
)
|
||||
return ListNamespacesResponse(**result)
|
||||
|
||||
async def create_namespace(
|
||||
self,
|
||||
namespace: List[str],
|
||||
mode: Optional[str] = None,
|
||||
properties: Optional[Dict[str, str]] = None,
|
||||
) -> CreateNamespaceResponse:
|
||||
async def create_namespace(self, namespace: List[str]) -> None:
|
||||
"""Create a new namespace.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
namespace: List[str]
|
||||
The namespace identifier to create.
|
||||
mode: str, optional
|
||||
Creation mode - "create", "exist_ok", or "overwrite". Case insensitive.
|
||||
properties: Dict[str, str], optional
|
||||
Properties to associate with the namespace
|
||||
|
||||
Returns
|
||||
-------
|
||||
CreateNamespaceResponse
|
||||
Response containing namespace properties
|
||||
"""
|
||||
result = await self._inner.create_namespace(
|
||||
namespace,
|
||||
mode=_normalize_create_namespace_mode(mode),
|
||||
properties=properties,
|
||||
)
|
||||
return CreateNamespaceResponse(**result)
|
||||
await self._inner.create_namespace(namespace)
|
||||
|
||||
async def drop_namespace(
|
||||
self,
|
||||
namespace: List[str],
|
||||
mode: Optional[str] = None,
|
||||
behavior: Optional[str] = None,
|
||||
) -> DropNamespaceResponse:
|
||||
async def drop_namespace(self, namespace: List[str]) -> None:
|
||||
"""Drop a namespace.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
namespace: List[str]
|
||||
The namespace identifier to drop.
|
||||
mode: str, optional
|
||||
Whether to skip if not exists ("SKIP") or fail ("FAIL"). Case insensitive.
|
||||
behavior: str, optional
|
||||
Whether to restrict drop if not empty ("RESTRICT") or cascade ("CASCADE").
|
||||
Case insensitive.
|
||||
|
||||
Returns
|
||||
-------
|
||||
DropNamespaceResponse
|
||||
Response containing properties and transaction_id if applicable.
|
||||
"""
|
||||
result = await self._inner.drop_namespace(
|
||||
namespace,
|
||||
mode=_normalize_drop_namespace_mode(mode),
|
||||
behavior=_normalize_drop_namespace_behavior(behavior),
|
||||
)
|
||||
return DropNamespaceResponse(**result)
|
||||
|
||||
async def describe_namespace(
|
||||
self, namespace: List[str]
|
||||
) -> DescribeNamespaceResponse:
|
||||
"""Describe a namespace.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
namespace: List[str]
|
||||
The namespace identifier to describe.
|
||||
|
||||
Returns
|
||||
-------
|
||||
DescribeNamespaceResponse
|
||||
Response containing the namespace properties.
|
||||
"""
|
||||
result = await self._inner.describe_namespace(namespace)
|
||||
return DescribeNamespaceResponse(**result)
|
||||
|
||||
async def list_tables(
|
||||
self,
|
||||
namespace: Optional[List[str]] = None,
|
||||
page_token: Optional[str] = None,
|
||||
limit: Optional[int] = None,
|
||||
) -> ListTablesResponse:
|
||||
"""List all tables in this database with pagination support.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
namespace: List[str], optional
|
||||
The namespace to list tables in.
|
||||
None or empty list represents root namespace.
|
||||
page_token: str, optional
|
||||
Token for pagination. Use the token from a previous response
|
||||
to get the next page of results.
|
||||
limit: int, optional
|
||||
The maximum number of results to return.
|
||||
|
||||
Returns
|
||||
-------
|
||||
ListTablesResponse
|
||||
Response containing table names and optional page_token for pagination.
|
||||
"""
|
||||
if namespace is None:
|
||||
namespace = []
|
||||
result = await self._inner.list_tables(
|
||||
namespace=namespace, page_token=page_token, limit=limit
|
||||
)
|
||||
return ListTablesResponse(**result)
|
||||
await self._inner.drop_namespace(namespace)
|
||||
|
||||
async def table_names(
|
||||
self,
|
||||
@@ -1272,9 +994,6 @@ class AsyncConnection(object):
|
||||
) -> Iterable[str]:
|
||||
"""List all tables in this database, in sorted order
|
||||
|
||||
.. deprecated::
|
||||
Use :meth:`list_tables` instead, which provides proper pagination support.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
namespace: List[str], optional
|
||||
@@ -1293,13 +1012,6 @@ class AsyncConnection(object):
|
||||
-------
|
||||
Iterable of str
|
||||
"""
|
||||
import warnings
|
||||
|
||||
warnings.warn(
|
||||
"table_names() is deprecated, use list_tables() instead",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
if namespace is None:
|
||||
namespace = []
|
||||
return await self._inner.table_names(
|
||||
@@ -1367,10 +1079,6 @@ class AsyncConnection(object):
|
||||
See available options at
|
||||
<https://lancedb.com/docs/storage/>
|
||||
|
||||
To enable stable row IDs (row IDs remain stable after compaction,
|
||||
update, delete, and merges), set `new_table_enable_stable_row_ids`
|
||||
to `"true"` in storage_options when connecting to the database.
|
||||
|
||||
Returns
|
||||
-------
|
||||
AsyncTable
|
||||
|
||||
@@ -376,11 +376,6 @@ class HnswSq:
|
||||
target_partition_size: Optional[int] = None
|
||||
|
||||
|
||||
# Backwards-compatible aliases
|
||||
IvfHnswPq = HnswPq
|
||||
IvfHnswSq = HnswSq
|
||||
|
||||
|
||||
@dataclass
|
||||
class IvfFlat:
|
||||
"""Describes an IVF Flat Index
|
||||
@@ -480,36 +475,6 @@ class IvfFlat:
|
||||
target_partition_size: Optional[int] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class IvfSq:
|
||||
"""Describes an IVF Scalar Quantization (SQ) index.
|
||||
|
||||
This index applies scalar quantization to compress vectors and organizes the
|
||||
quantized vectors into IVF partitions. It offers a balance between search
|
||||
speed and storage efficiency while keeping good recall.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
distance_type: str, default "l2"
|
||||
The distance metric used to train and search the index. Supported values
|
||||
are "l2", "cosine", and "dot".
|
||||
num_partitions: int, default sqrt(num_rows)
|
||||
Number of IVF partitions to create.
|
||||
max_iterations: int, default 50
|
||||
Maximum iterations for kmeans during partition training.
|
||||
sample_rate: int, default 256
|
||||
Controls the number of training vectors: sample_rate * num_partitions.
|
||||
target_partition_size: int, optional
|
||||
Target size for each partition; adjusts the balance between speed and accuracy.
|
||||
"""
|
||||
|
||||
distance_type: Literal["l2", "cosine", "dot"] = "l2"
|
||||
num_partitions: Optional[int] = None
|
||||
max_iterations: int = 50
|
||||
sample_rate: int = 256
|
||||
target_partition_size: Optional[int] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class IvfPq:
|
||||
"""Describes an IVF PQ Index
|
||||
@@ -644,19 +609,9 @@ class IvfPq:
|
||||
class IvfRq:
|
||||
"""Describes an IVF RQ Index
|
||||
|
||||
IVF-RQ (RabitQ Quantization) compresses vectors using RabitQ quantization
|
||||
and organizes them into IVF partitions.
|
||||
|
||||
The compression scheme is called RabitQ quantization. Each dimension is
|
||||
quantized into a small number of bits. The parameters `num_bits` and
|
||||
`num_partitions` control this process, providing a tradeoff between
|
||||
index size (and thus search speed) and index accuracy.
|
||||
|
||||
The partitioning process is called IVF and the `num_partitions` parameter
|
||||
controls how many groups to create.
|
||||
|
||||
Note that training an IVF RQ index on a large dataset is a slow operation
|
||||
and currently is also a memory intensive operation.
|
||||
IVF-RQ (Residual Quantization) stores a compressed copy of each vector using
|
||||
residual quantization and organizes them into IVF partitions. Parameters
|
||||
largely mirror IVF-PQ for consistency.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
@@ -673,7 +628,7 @@ class IvfRq:
|
||||
Number of IVF partitions to create.
|
||||
|
||||
num_bits: int, default 1
|
||||
Number of bits to encode each dimension in the RabitQ codebook.
|
||||
Number of bits to encode each dimension.
|
||||
|
||||
max_iterations: int, default 50
|
||||
Max iterations to train kmeans when computing IVF partitions.
|
||||
@@ -696,9 +651,6 @@ class IvfRq:
|
||||
__all__ = [
|
||||
"BTree",
|
||||
"IvfPq",
|
||||
"IvfHnswPq",
|
||||
"IvfHnswSq",
|
||||
"IvfSq",
|
||||
"IvfRq",
|
||||
"IvfFlat",
|
||||
"HnswPq",
|
||||
|
||||
@@ -23,29 +23,7 @@ from datetime import timedelta
|
||||
import pyarrow as pa
|
||||
|
||||
from lancedb.db import DBConnection, LanceDBConnection
|
||||
from lancedb.namespace_utils import (
|
||||
_normalize_create_namespace_mode,
|
||||
_normalize_drop_namespace_mode,
|
||||
_normalize_drop_namespace_behavior,
|
||||
)
|
||||
from lancedb.io import StorageOptionsProvider
|
||||
from lance_namespace import (
|
||||
LanceNamespace,
|
||||
connect as namespace_connect,
|
||||
CreateNamespaceResponse,
|
||||
DescribeNamespaceResponse,
|
||||
DropNamespaceResponse,
|
||||
ListNamespacesResponse,
|
||||
ListTablesResponse,
|
||||
ListTablesRequest,
|
||||
DescribeTableRequest,
|
||||
DescribeNamespaceRequest,
|
||||
DropTableRequest,
|
||||
ListNamespacesRequest,
|
||||
CreateNamespaceRequest,
|
||||
DropNamespaceRequest,
|
||||
CreateEmptyTableRequest,
|
||||
)
|
||||
from lancedb.table import AsyncTable, LanceTable, Table
|
||||
from lancedb.util import validate_table_name
|
||||
from lancedb.common import DATA
|
||||
@@ -53,9 +31,19 @@ from lancedb.pydantic import LanceModel
|
||||
from lancedb.embeddings import EmbeddingFunctionConfig
|
||||
from ._lancedb import Session
|
||||
|
||||
from lance_namespace_urllib3_client.models.json_arrow_schema import JsonArrowSchema
|
||||
from lance_namespace_urllib3_client.models.json_arrow_field import JsonArrowField
|
||||
from lance_namespace_urllib3_client.models.json_arrow_data_type import JsonArrowDataType
|
||||
from lance_namespace import LanceNamespace, connect as namespace_connect
|
||||
from lance_namespace_urllib3_client.models import (
|
||||
ListTablesRequest,
|
||||
DescribeTableRequest,
|
||||
DropTableRequest,
|
||||
ListNamespacesRequest,
|
||||
CreateNamespaceRequest,
|
||||
DropNamespaceRequest,
|
||||
CreateEmptyTableRequest,
|
||||
JsonArrowSchema,
|
||||
JsonArrowField,
|
||||
JsonArrowDataType,
|
||||
)
|
||||
|
||||
|
||||
def _convert_pyarrow_type_to_json(arrow_type: pa.DataType) -> JsonArrowDataType:
|
||||
@@ -139,17 +127,13 @@ class LanceNamespaceStorageOptionsProvider(StorageOptionsProvider):
|
||||
|
||||
Examples
|
||||
--------
|
||||
Create a provider and fetch storage options::
|
||||
|
||||
from lance_namespace import connect as namespace_connect
|
||||
|
||||
# Connect to namespace (requires a running namespace server)
|
||||
namespace = namespace_connect("rest", {"uri": "https://..."})
|
||||
provider = LanceNamespaceStorageOptionsProvider(
|
||||
namespace=namespace,
|
||||
table_id=["my_namespace", "my_table"]
|
||||
)
|
||||
options = provider.fetch_storage_options()
|
||||
>>> from lance_namespace import connect as namespace_connect
|
||||
>>> namespace = namespace_connect("rest", {"url": "https://..."})
|
||||
>>> provider = LanceNamespaceStorageOptionsProvider(
|
||||
... namespace=namespace,
|
||||
... table_id=["my_namespace", "my_table"]
|
||||
... )
|
||||
>>> options = provider.fetch_storage_options()
|
||||
"""
|
||||
|
||||
def __init__(self, namespace: LanceNamespace, table_id: List[str]):
|
||||
@@ -253,19 +237,6 @@ class LanceNamespaceDBConnection(DBConnection):
|
||||
*,
|
||||
namespace: Optional[List[str]] = None,
|
||||
) -> Iterable[str]:
|
||||
"""
|
||||
List table names in the database.
|
||||
|
||||
.. deprecated::
|
||||
Use :meth:`list_tables` instead, which provides proper pagination support.
|
||||
"""
|
||||
import warnings
|
||||
|
||||
warnings.warn(
|
||||
"table_names() is deprecated, use list_tables() instead",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
if namespace is None:
|
||||
namespace = []
|
||||
request = ListTablesRequest(id=namespace, page_token=page_token, limit=limit)
|
||||
@@ -458,8 +429,8 @@ class LanceNamespaceDBConnection(DBConnection):
|
||||
self,
|
||||
namespace: Optional[List[str]] = None,
|
||||
page_token: Optional[str] = None,
|
||||
limit: Optional[int] = None,
|
||||
) -> ListNamespacesResponse:
|
||||
limit: int = 10,
|
||||
) -> Iterable[str]:
|
||||
"""
|
||||
List child namespaces under the given namespace.
|
||||
|
||||
@@ -469,15 +440,14 @@ class LanceNamespaceDBConnection(DBConnection):
|
||||
The parent namespace to list children from.
|
||||
If None, lists root-level namespaces.
|
||||
page_token : Optional[str]
|
||||
Token for pagination. Use the token from a previous response
|
||||
to get the next page of results.
|
||||
limit : int, optional
|
||||
Pagination token for listing results.
|
||||
limit : int
|
||||
Maximum number of namespaces to return.
|
||||
|
||||
Returns
|
||||
-------
|
||||
ListNamespacesResponse
|
||||
Response containing namespace names and optional page_token for pagination.
|
||||
Iterable[str]
|
||||
Names of child namespaces.
|
||||
"""
|
||||
if namespace is None:
|
||||
namespace = []
|
||||
@@ -485,18 +455,10 @@ class LanceNamespaceDBConnection(DBConnection):
|
||||
id=namespace, page_token=page_token, limit=limit
|
||||
)
|
||||
response = self._ns.list_namespaces(request)
|
||||
return ListNamespacesResponse(
|
||||
namespaces=response.namespaces if response.namespaces else [],
|
||||
page_token=response.page_token,
|
||||
)
|
||||
return response.namespaces if response.namespaces else []
|
||||
|
||||
@override
|
||||
def create_namespace(
|
||||
self,
|
||||
namespace: List[str],
|
||||
mode: Optional[str] = None,
|
||||
properties: Optional[Dict[str, str]] = None,
|
||||
) -> CreateNamespaceResponse:
|
||||
def create_namespace(self, namespace: List[str]) -> None:
|
||||
"""
|
||||
Create a new namespace.
|
||||
|
||||
@@ -504,34 +466,12 @@ class LanceNamespaceDBConnection(DBConnection):
|
||||
----------
|
||||
namespace : List[str]
|
||||
The namespace path to create.
|
||||
mode : str, optional
|
||||
Creation mode - "create" (fail if exists), "exist_ok" (skip if exists),
|
||||
or "overwrite" (replace if exists). Case insensitive.
|
||||
properties : Dict[str, str], optional
|
||||
Properties to set on the namespace.
|
||||
|
||||
Returns
|
||||
-------
|
||||
CreateNamespaceResponse
|
||||
Response containing the properties of the created namespace.
|
||||
"""
|
||||
request = CreateNamespaceRequest(
|
||||
id=namespace,
|
||||
mode=_normalize_create_namespace_mode(mode),
|
||||
properties=properties,
|
||||
)
|
||||
response = self._ns.create_namespace(request)
|
||||
return CreateNamespaceResponse(
|
||||
properties=response.properties if hasattr(response, "properties") else None
|
||||
)
|
||||
request = CreateNamespaceRequest(id=namespace)
|
||||
self._ns.create_namespace(request)
|
||||
|
||||
@override
|
||||
def drop_namespace(
|
||||
self,
|
||||
namespace: List[str],
|
||||
mode: Optional[str] = None,
|
||||
behavior: Optional[str] = None,
|
||||
) -> DropNamespaceResponse:
|
||||
def drop_namespace(self, namespace: List[str]) -> None:
|
||||
"""
|
||||
Drop a namespace.
|
||||
|
||||
@@ -539,87 +479,9 @@ class LanceNamespaceDBConnection(DBConnection):
|
||||
----------
|
||||
namespace : List[str]
|
||||
The namespace path to drop.
|
||||
mode : str, optional
|
||||
Whether to skip if not exists ("SKIP") or fail ("FAIL"). Case insensitive.
|
||||
behavior : str, optional
|
||||
Whether to restrict drop if not empty ("RESTRICT") or cascade ("CASCADE").
|
||||
Case insensitive.
|
||||
|
||||
Returns
|
||||
-------
|
||||
DropNamespaceResponse
|
||||
Response containing properties and transaction_id if applicable.
|
||||
"""
|
||||
request = DropNamespaceRequest(
|
||||
id=namespace,
|
||||
mode=_normalize_drop_namespace_mode(mode),
|
||||
behavior=_normalize_drop_namespace_behavior(behavior),
|
||||
)
|
||||
response = self._ns.drop_namespace(request)
|
||||
return DropNamespaceResponse(
|
||||
properties=(
|
||||
response.properties if hasattr(response, "properties") else None
|
||||
),
|
||||
transaction_id=(
|
||||
response.transaction_id if hasattr(response, "transaction_id") else None
|
||||
),
|
||||
)
|
||||
|
||||
@override
|
||||
def describe_namespace(self, namespace: List[str]) -> DescribeNamespaceResponse:
|
||||
"""
|
||||
Describe a namespace.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
namespace : List[str]
|
||||
The namespace identifier to describe.
|
||||
|
||||
Returns
|
||||
-------
|
||||
DescribeNamespaceResponse
|
||||
Response containing the namespace properties.
|
||||
"""
|
||||
request = DescribeNamespaceRequest(id=namespace)
|
||||
response = self._ns.describe_namespace(request)
|
||||
return DescribeNamespaceResponse(
|
||||
properties=response.properties if hasattr(response, "properties") else None
|
||||
)
|
||||
|
||||
@override
|
||||
def list_tables(
|
||||
self,
|
||||
namespace: Optional[List[str]] = None,
|
||||
page_token: Optional[str] = None,
|
||||
limit: Optional[int] = None,
|
||||
) -> ListTablesResponse:
|
||||
"""
|
||||
List all tables in this database with pagination support.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
namespace : List[str], optional
|
||||
The namespace to list tables in.
|
||||
None or empty list represents root namespace.
|
||||
page_token : str, optional
|
||||
Token for pagination. Use the token from a previous response
|
||||
to get the next page of results.
|
||||
limit : int, optional
|
||||
The maximum number of results to return.
|
||||
|
||||
Returns
|
||||
-------
|
||||
ListTablesResponse
|
||||
Response containing table names and optional page_token for pagination.
|
||||
"""
|
||||
if namespace is None:
|
||||
namespace = []
|
||||
request = ListTablesRequest(id=namespace, page_token=page_token, limit=limit)
|
||||
response = self._ns.list_tables(request)
|
||||
return ListTablesResponse(
|
||||
tables=response.tables if response.tables else [],
|
||||
page_token=response.page_token,
|
||||
)
|
||||
request = DropNamespaceRequest(id=namespace)
|
||||
self._ns.drop_namespace(request)
|
||||
|
||||
def _lance_table_from_uri(
|
||||
self,
|
||||
@@ -697,19 +559,7 @@ class AsyncLanceNamespaceDBConnection:
|
||||
*,
|
||||
namespace: Optional[List[str]] = None,
|
||||
) -> Iterable[str]:
|
||||
"""
|
||||
List table names in the namespace.
|
||||
|
||||
.. deprecated::
|
||||
Use :meth:`list_tables` instead, which provides proper pagination support.
|
||||
"""
|
||||
import warnings
|
||||
|
||||
warnings.warn(
|
||||
"table_names() is deprecated, use list_tables() instead",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
"""List table names in the namespace."""
|
||||
if namespace is None:
|
||||
namespace = []
|
||||
request = ListTablesRequest(id=namespace, page_token=page_token, limit=limit)
|
||||
@@ -917,8 +767,8 @@ class AsyncLanceNamespaceDBConnection:
|
||||
self,
|
||||
namespace: Optional[List[str]] = None,
|
||||
page_token: Optional[str] = None,
|
||||
limit: Optional[int] = None,
|
||||
) -> ListNamespacesResponse:
|
||||
limit: int = 10,
|
||||
) -> Iterable[str]:
|
||||
"""
|
||||
List child namespaces under the given namespace.
|
||||
|
||||
@@ -928,15 +778,14 @@ class AsyncLanceNamespaceDBConnection:
|
||||
The parent namespace to list children from.
|
||||
If None, lists root-level namespaces.
|
||||
page_token : Optional[str]
|
||||
Token for pagination. Use the token from a previous response
|
||||
to get the next page of results.
|
||||
limit : int, optional
|
||||
Pagination token for listing results.
|
||||
limit : int
|
||||
Maximum number of namespaces to return.
|
||||
|
||||
Returns
|
||||
-------
|
||||
ListNamespacesResponse
|
||||
Response containing namespace names and optional page_token for pagination.
|
||||
Iterable[str]
|
||||
Names of child namespaces.
|
||||
"""
|
||||
if namespace is None:
|
||||
namespace = []
|
||||
@@ -944,17 +793,9 @@ class AsyncLanceNamespaceDBConnection:
|
||||
id=namespace, page_token=page_token, limit=limit
|
||||
)
|
||||
response = self._ns.list_namespaces(request)
|
||||
return ListNamespacesResponse(
|
||||
namespaces=response.namespaces if response.namespaces else [],
|
||||
page_token=response.page_token,
|
||||
)
|
||||
return response.namespaces if response.namespaces else []
|
||||
|
||||
async def create_namespace(
|
||||
self,
|
||||
namespace: List[str],
|
||||
mode: Optional[str] = None,
|
||||
properties: Optional[Dict[str, str]] = None,
|
||||
) -> CreateNamespaceResponse:
|
||||
async def create_namespace(self, namespace: List[str]) -> None:
|
||||
"""
|
||||
Create a new namespace.
|
||||
|
||||
@@ -962,33 +803,11 @@ class AsyncLanceNamespaceDBConnection:
|
||||
----------
|
||||
namespace : List[str]
|
||||
The namespace path to create.
|
||||
mode : str, optional
|
||||
Creation mode - "create" (fail if exists), "exist_ok" (skip if exists),
|
||||
or "overwrite" (replace if exists). Case insensitive.
|
||||
properties : Dict[str, str], optional
|
||||
Properties to set on the namespace.
|
||||
|
||||
Returns
|
||||
-------
|
||||
CreateNamespaceResponse
|
||||
Response containing the properties of the created namespace.
|
||||
"""
|
||||
request = CreateNamespaceRequest(
|
||||
id=namespace,
|
||||
mode=_normalize_create_namespace_mode(mode),
|
||||
properties=properties,
|
||||
)
|
||||
response = self._ns.create_namespace(request)
|
||||
return CreateNamespaceResponse(
|
||||
properties=response.properties if hasattr(response, "properties") else None
|
||||
)
|
||||
request = CreateNamespaceRequest(id=namespace)
|
||||
self._ns.create_namespace(request)
|
||||
|
||||
async def drop_namespace(
|
||||
self,
|
||||
namespace: List[str],
|
||||
mode: Optional[str] = None,
|
||||
behavior: Optional[str] = None,
|
||||
) -> DropNamespaceResponse:
|
||||
async def drop_namespace(self, namespace: List[str]) -> None:
|
||||
"""
|
||||
Drop a namespace.
|
||||
|
||||
@@ -996,87 +815,9 @@ class AsyncLanceNamespaceDBConnection:
|
||||
----------
|
||||
namespace : List[str]
|
||||
The namespace path to drop.
|
||||
mode : str, optional
|
||||
Whether to skip if not exists ("SKIP") or fail ("FAIL"). Case insensitive.
|
||||
behavior : str, optional
|
||||
Whether to restrict drop if not empty ("RESTRICT") or cascade ("CASCADE").
|
||||
Case insensitive.
|
||||
|
||||
Returns
|
||||
-------
|
||||
DropNamespaceResponse
|
||||
Response containing properties and transaction_id if applicable.
|
||||
"""
|
||||
request = DropNamespaceRequest(
|
||||
id=namespace,
|
||||
mode=_normalize_drop_namespace_mode(mode),
|
||||
behavior=_normalize_drop_namespace_behavior(behavior),
|
||||
)
|
||||
response = self._ns.drop_namespace(request)
|
||||
return DropNamespaceResponse(
|
||||
properties=(
|
||||
response.properties if hasattr(response, "properties") else None
|
||||
),
|
||||
transaction_id=(
|
||||
response.transaction_id if hasattr(response, "transaction_id") else None
|
||||
),
|
||||
)
|
||||
|
||||
async def describe_namespace(
|
||||
self, namespace: List[str]
|
||||
) -> DescribeNamespaceResponse:
|
||||
"""
|
||||
Describe a namespace.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
namespace : List[str]
|
||||
The namespace identifier to describe.
|
||||
|
||||
Returns
|
||||
-------
|
||||
DescribeNamespaceResponse
|
||||
Response containing the namespace properties.
|
||||
"""
|
||||
request = DescribeNamespaceRequest(id=namespace)
|
||||
response = self._ns.describe_namespace(request)
|
||||
return DescribeNamespaceResponse(
|
||||
properties=response.properties if hasattr(response, "properties") else None
|
||||
)
|
||||
|
||||
async def list_tables(
|
||||
self,
|
||||
namespace: Optional[List[str]] = None,
|
||||
page_token: Optional[str] = None,
|
||||
limit: Optional[int] = None,
|
||||
) -> ListTablesResponse:
|
||||
"""
|
||||
List all tables in this database with pagination support.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
namespace : List[str], optional
|
||||
The namespace to list tables in.
|
||||
None or empty list represents root namespace.
|
||||
page_token : str, optional
|
||||
Token for pagination. Use the token from a previous response
|
||||
to get the next page of results.
|
||||
limit : int, optional
|
||||
The maximum number of results to return.
|
||||
|
||||
Returns
|
||||
-------
|
||||
ListTablesResponse
|
||||
Response containing table names and optional page_token for pagination.
|
||||
"""
|
||||
if namespace is None:
|
||||
namespace = []
|
||||
request = ListTablesRequest(id=namespace, page_token=page_token, limit=limit)
|
||||
response = self._ns.list_tables(request)
|
||||
return ListTablesResponse(
|
||||
tables=response.tables if response.tables else [],
|
||||
page_token=response.page_token,
|
||||
)
|
||||
request = DropNamespaceRequest(id=namespace)
|
||||
self._ns.drop_namespace(request)
|
||||
|
||||
|
||||
def connect_namespace(
|
||||
|
||||
@@ -1,27 +0,0 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
"""Utility functions for namespace operations."""
|
||||
|
||||
from typing import Optional
|
||||
|
||||
|
||||
def _normalize_create_namespace_mode(mode: Optional[str]) -> Optional[str]:
|
||||
"""Normalize create namespace mode to lowercase (API expects lowercase)."""
|
||||
if mode is None:
|
||||
return None
|
||||
return mode.lower()
|
||||
|
||||
|
||||
def _normalize_drop_namespace_mode(mode: Optional[str]) -> Optional[str]:
|
||||
"""Normalize drop namespace mode to uppercase (API expects uppercase)."""
|
||||
if mode is None:
|
||||
return None
|
||||
return mode.upper()
|
||||
|
||||
|
||||
def _normalize_drop_namespace_behavior(behavior: Optional[str]) -> Optional[str]:
|
||||
"""Normalize drop namespace behavior to uppercase (API expects uppercase)."""
|
||||
if behavior is None:
|
||||
return None
|
||||
return behavior.upper()
|
||||
@@ -2429,8 +2429,9 @@ class AsyncQueryBase(object):
|
||||
>>> from lancedb import connect_async
|
||||
>>> async def doctest_example():
|
||||
... conn = await connect_async("./.lancedb")
|
||||
... table = await conn.create_table("my_table", [{"vector": [99.0, 99.0]}])
|
||||
... plan = await table.query().nearest_to([1.0, 2.0]).explain_plan(True)
|
||||
... table = await conn.create_table("my_table", [{"vector": [99, 99]}])
|
||||
... query = [100, 100]
|
||||
... plan = await table.query().nearest_to([1, 2]).explain_plan(True)
|
||||
... print(plan)
|
||||
>>> asyncio.run(doctest_example()) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
|
||||
ProjectionExec: expr=[vector@0 as vector, _distance@2 as _distance]
|
||||
@@ -2439,7 +2440,6 @@ class AsyncQueryBase(object):
|
||||
SortExec: TopK(fetch=10), expr=[_distance@2 ASC NULLS LAST, _rowid@1 ASC NULLS LAST], preserve_partitioning=[false]
|
||||
KNNVectorDistance: metric=l2
|
||||
LanceRead: uri=..., projection=[vector], ...
|
||||
<BLANKLINE>
|
||||
|
||||
Parameters
|
||||
----------
|
||||
@@ -3141,9 +3141,10 @@ class AsyncHybridQuery(AsyncStandardQuery, AsyncVectorQueryBase):
|
||||
>>> from lancedb.index import FTS
|
||||
>>> async def doctest_example():
|
||||
... conn = await connect_async("./.lancedb")
|
||||
... table = await conn.create_table("my_table", [{"vector": [99.0, 99.0], "text": "hello world"}])
|
||||
... table = await conn.create_table("my_table", [{"vector": [99, 99], "text": "hello world"}])
|
||||
... await table.create_index("text", config=FTS(with_position=False))
|
||||
... plan = await table.query().nearest_to([1.0, 2.0]).nearest_to_text("hello").explain_plan(True)
|
||||
... query = [100, 100]
|
||||
... plan = await table.query().nearest_to([1, 2]).nearest_to_text("hello").explain_plan(True)
|
||||
... print(plan)
|
||||
>>> asyncio.run(doctest_example()) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
|
||||
Vector Search Plan:
|
||||
@@ -3417,8 +3418,9 @@ class BaseQueryBuilder(object):
|
||||
>>> from lancedb import connect_async
|
||||
>>> async def doctest_example():
|
||||
... conn = await connect_async("./.lancedb")
|
||||
... table = await conn.create_table("my_table", [{"vector": [99.0, 99.0]}])
|
||||
... plan = await table.query().nearest_to([1.0, 2.0]).explain_plan(True)
|
||||
... table = await conn.create_table("my_table", [{"vector": [99, 99]}])
|
||||
... query = [100, 100]
|
||||
... plan = await table.query().nearest_to([1, 2]).explain_plan(True)
|
||||
... print(plan)
|
||||
>>> asyncio.run(doctest_example()) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
|
||||
ProjectionExec: expr=[vector@0 as vector, _distance@2 as _distance]
|
||||
@@ -3427,7 +3429,6 @@ class BaseQueryBuilder(object):
|
||||
SortExec: TopK(fetch=10), expr=[_distance@2 ASC NULLS LAST, _rowid@1 ASC NULLS LAST], preserve_partitioning=[false]
|
||||
KNNVectorDistance: metric=l2
|
||||
LanceRead: uri=..., projection=[vector], ...
|
||||
<BLANKLINE>
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
@@ -23,13 +23,6 @@ import pyarrow as pa
|
||||
from ..common import DATA
|
||||
from ..db import DBConnection, LOOP
|
||||
from ..embeddings import EmbeddingFunctionConfig
|
||||
from lance_namespace import (
|
||||
CreateNamespaceResponse,
|
||||
DescribeNamespaceResponse,
|
||||
DropNamespaceResponse,
|
||||
ListNamespacesResponse,
|
||||
ListTablesResponse,
|
||||
)
|
||||
from ..pydantic import LanceModel
|
||||
from ..table import Table
|
||||
from ..util import validate_table_name
|
||||
@@ -113,8 +106,8 @@ class RemoteDBConnection(DBConnection):
|
||||
self,
|
||||
namespace: Optional[List[str]] = None,
|
||||
page_token: Optional[str] = None,
|
||||
limit: Optional[int] = None,
|
||||
) -> ListNamespacesResponse:
|
||||
limit: int = 10,
|
||||
) -> Iterable[str]:
|
||||
"""List immediate child namespace names in the given namespace.
|
||||
|
||||
Parameters
|
||||
@@ -123,15 +116,14 @@ class RemoteDBConnection(DBConnection):
|
||||
The parent namespace to list namespaces in.
|
||||
None or empty list represents root namespace.
|
||||
page_token: str, optional
|
||||
Token for pagination. Use the token from a previous response
|
||||
to get the next page of results.
|
||||
limit: int, optional
|
||||
The maximum number of results to return.
|
||||
The token to use for pagination. If not present, start from the beginning.
|
||||
limit: int, default 10
|
||||
The size of the page to return.
|
||||
|
||||
Returns
|
||||
-------
|
||||
ListNamespacesResponse
|
||||
Response containing namespace names and optional page_token for pagination.
|
||||
Iterable of str
|
||||
List of immediate child namespace names
|
||||
"""
|
||||
if namespace is None:
|
||||
namespace = []
|
||||
@@ -142,111 +134,26 @@ class RemoteDBConnection(DBConnection):
|
||||
)
|
||||
|
||||
@override
|
||||
def create_namespace(
|
||||
self,
|
||||
namespace: List[str],
|
||||
mode: Optional[str] = None,
|
||||
properties: Optional[Dict[str, str]] = None,
|
||||
) -> CreateNamespaceResponse:
|
||||
def create_namespace(self, namespace: List[str]) -> None:
|
||||
"""Create a new namespace.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
namespace: List[str]
|
||||
The namespace identifier to create.
|
||||
mode: str, optional
|
||||
Creation mode - "create" (fail if exists), "exist_ok" (skip if exists),
|
||||
or "overwrite" (replace if exists). Case insensitive.
|
||||
properties: Dict[str, str], optional
|
||||
Properties to set on the namespace.
|
||||
|
||||
Returns
|
||||
-------
|
||||
CreateNamespaceResponse
|
||||
Response containing the properties of the created namespace.
|
||||
"""
|
||||
return LOOP.run(
|
||||
self._conn.create_namespace(
|
||||
namespace=namespace, mode=mode, properties=properties
|
||||
)
|
||||
)
|
||||
LOOP.run(self._conn.create_namespace(namespace=namespace))
|
||||
|
||||
@override
|
||||
def drop_namespace(
|
||||
self,
|
||||
namespace: List[str],
|
||||
mode: Optional[str] = None,
|
||||
behavior: Optional[str] = None,
|
||||
) -> DropNamespaceResponse:
|
||||
def drop_namespace(self, namespace: List[str]) -> None:
|
||||
"""Drop a namespace.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
namespace: List[str]
|
||||
The namespace identifier to drop.
|
||||
mode: str, optional
|
||||
Whether to skip if not exists ("SKIP") or fail ("FAIL"). Case insensitive.
|
||||
behavior: str, optional
|
||||
Whether to restrict drop if not empty ("RESTRICT") or cascade ("CASCADE").
|
||||
Case insensitive.
|
||||
|
||||
Returns
|
||||
-------
|
||||
DropNamespaceResponse
|
||||
Response containing properties and transaction_id if applicable.
|
||||
"""
|
||||
return LOOP.run(
|
||||
self._conn.drop_namespace(namespace=namespace, mode=mode, behavior=behavior)
|
||||
)
|
||||
|
||||
@override
|
||||
def describe_namespace(self, namespace: List[str]) -> DescribeNamespaceResponse:
|
||||
"""Describe a namespace.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
namespace: List[str]
|
||||
The namespace identifier to describe.
|
||||
|
||||
Returns
|
||||
-------
|
||||
DescribeNamespaceResponse
|
||||
Response containing the namespace properties.
|
||||
"""
|
||||
return LOOP.run(self._conn.describe_namespace(namespace=namespace))
|
||||
|
||||
@override
|
||||
def list_tables(
|
||||
self,
|
||||
namespace: Optional[List[str]] = None,
|
||||
page_token: Optional[str] = None,
|
||||
limit: Optional[int] = None,
|
||||
) -> ListTablesResponse:
|
||||
"""List all tables in this database with pagination support.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
namespace: List[str], optional
|
||||
The namespace to list tables in.
|
||||
None or empty list represents root namespace.
|
||||
page_token: str, optional
|
||||
Token for pagination. Use the token from a previous response
|
||||
to get the next page of results.
|
||||
limit: int, optional
|
||||
The maximum number of results to return.
|
||||
|
||||
Returns
|
||||
-------
|
||||
ListTablesResponse
|
||||
Response containing table names and optional page_token for pagination.
|
||||
"""
|
||||
if namespace is None:
|
||||
namespace = []
|
||||
return LOOP.run(
|
||||
self._conn.list_tables(
|
||||
namespace=namespace, page_token=page_token, limit=limit
|
||||
)
|
||||
)
|
||||
return LOOP.run(self._conn.drop_namespace(namespace=namespace))
|
||||
|
||||
@override
|
||||
def table_names(
|
||||
@@ -258,9 +165,6 @@ class RemoteDBConnection(DBConnection):
|
||||
) -> Iterable[str]:
|
||||
"""List the names of all tables in the database.
|
||||
|
||||
.. deprecated::
|
||||
Use :meth:`list_tables` instead, which provides proper pagination support.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
namespace: List[str], default []
|
||||
@@ -275,13 +179,6 @@ class RemoteDBConnection(DBConnection):
|
||||
-------
|
||||
An iterator of table names.
|
||||
"""
|
||||
import warnings
|
||||
|
||||
warnings.warn(
|
||||
"table_names() is deprecated, use list_tables() instead",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
if namespace is None:
|
||||
namespace = []
|
||||
return LOOP.run(
|
||||
|
||||
@@ -18,7 +18,7 @@ from lancedb._lancedb import (
|
||||
UpdateResult,
|
||||
)
|
||||
from lancedb.embeddings.base import EmbeddingFunctionConfig
|
||||
from lancedb.index import FTS, BTree, Bitmap, HnswSq, IvfFlat, IvfPq, IvfSq, LabelList
|
||||
from lancedb.index import FTS, BTree, Bitmap, HnswSq, IvfFlat, IvfPq, LabelList
|
||||
from lancedb.remote.db import LOOP
|
||||
import pyarrow as pa
|
||||
|
||||
@@ -265,8 +265,6 @@ class RemoteTable(Table):
|
||||
num_sub_vectors=num_sub_vectors,
|
||||
num_bits=num_bits,
|
||||
)
|
||||
elif index_type == "IVF_SQ":
|
||||
config = IvfSq(distance_type=metric, num_partitions=num_partitions)
|
||||
elif index_type == "IVF_HNSW_PQ":
|
||||
raise ValueError(
|
||||
"IVF_HNSW_PQ is not supported on LanceDB cloud."
|
||||
@@ -279,7 +277,7 @@ class RemoteTable(Table):
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Unknown vector index type: {index_type}. Valid options are"
|
||||
" 'IVF_FLAT', 'IVF_SQ', 'IVF_PQ', 'IVF_HNSW_PQ', 'IVF_HNSW_SQ'"
|
||||
" 'IVF_FLAT', 'IVF_PQ', 'IVF_HNSW_PQ', 'IVF_HNSW_SQ'"
|
||||
)
|
||||
|
||||
LOOP.run(
|
||||
|
||||
@@ -44,18 +44,7 @@ import numpy as np
|
||||
|
||||
from .common import DATA, VEC, VECTOR_COLUMN_NAME
|
||||
from .embeddings import EmbeddingFunctionConfig, EmbeddingFunctionRegistry
|
||||
from .index import (
|
||||
BTree,
|
||||
IvfFlat,
|
||||
IvfPq,
|
||||
IvfSq,
|
||||
Bitmap,
|
||||
IvfRq,
|
||||
LabelList,
|
||||
HnswPq,
|
||||
HnswSq,
|
||||
FTS,
|
||||
)
|
||||
from .index import BTree, IvfFlat, IvfPq, Bitmap, IvfRq, LabelList, HnswPq, HnswSq, FTS
|
||||
from .merge import LanceMergeInsertBuilder
|
||||
from .pydantic import LanceModel, model_to_dict
|
||||
from .query import (
|
||||
@@ -1029,7 +1018,7 @@ class Table(ABC):
|
||||
... .when_not_matched_insert_all() \\
|
||||
... .execute(new_data)
|
||||
>>> res
|
||||
MergeResult(version=2, num_updated_rows=2, num_inserted_rows=1, num_deleted_rows=0, num_attempts=1)
|
||||
MergeResult(version=2, num_updated_rows=2, num_inserted_rows=1, num_deleted_rows=0)
|
||||
>>> # The order of new rows is non-deterministic since we use
|
||||
>>> # a hash-join as part of this operation and so we sort here
|
||||
>>> table.to_arrow().sort_by("a").to_pandas()
|
||||
@@ -2065,7 +2054,7 @@ class LanceTable(Table):
|
||||
index_cache_size: Optional[int] = None,
|
||||
num_bits: int = 8,
|
||||
index_type: Literal[
|
||||
"IVF_FLAT", "IVF_SQ", "IVF_PQ", "IVF_RQ", "IVF_HNSW_SQ", "IVF_HNSW_PQ"
|
||||
"IVF_FLAT", "IVF_PQ", "IVF_RQ", "IVF_HNSW_SQ", "IVF_HNSW_PQ"
|
||||
] = "IVF_PQ",
|
||||
max_iterations: int = 50,
|
||||
sample_rate: int = 256,
|
||||
@@ -2103,14 +2092,6 @@ class LanceTable(Table):
|
||||
sample_rate=sample_rate,
|
||||
target_partition_size=target_partition_size,
|
||||
)
|
||||
elif index_type == "IVF_SQ":
|
||||
config = IvfSq(
|
||||
distance_type=metric,
|
||||
num_partitions=num_partitions,
|
||||
max_iterations=max_iterations,
|
||||
sample_rate=sample_rate,
|
||||
target_partition_size=target_partition_size,
|
||||
)
|
||||
elif index_type == "IVF_PQ":
|
||||
config = IvfPq(
|
||||
distance_type=metric,
|
||||
@@ -3475,22 +3456,11 @@ class AsyncTable:
|
||||
if config is not None:
|
||||
if not isinstance(
|
||||
config,
|
||||
(
|
||||
IvfFlat,
|
||||
IvfSq,
|
||||
IvfPq,
|
||||
IvfRq,
|
||||
HnswPq,
|
||||
HnswSq,
|
||||
BTree,
|
||||
Bitmap,
|
||||
LabelList,
|
||||
FTS,
|
||||
),
|
||||
(IvfFlat, IvfPq, IvfRq, HnswPq, HnswSq, BTree, Bitmap, LabelList, FTS),
|
||||
):
|
||||
raise TypeError(
|
||||
"config must be an instance of IvfSq, IvfPq, IvfRq, HnswPq, HnswSq,"
|
||||
" BTree, Bitmap, LabelList, or FTS, but got " + str(type(config))
|
||||
"config must be an instance of IvfPq, IvfRq, HnswPq, HnswSq, BTree,"
|
||||
" Bitmap, LabelList, or FTS, but got " + str(type(config))
|
||||
)
|
||||
try:
|
||||
await self._inner.create_index(
|
||||
@@ -3664,7 +3634,7 @@ class AsyncTable:
|
||||
... .when_not_matched_insert_all() \\
|
||||
... .execute(new_data)
|
||||
>>> res
|
||||
MergeResult(version=2, num_updated_rows=2, num_inserted_rows=1, num_deleted_rows=0, num_attempts=1)
|
||||
MergeResult(version=2, num_updated_rows=2, num_inserted_rows=1, num_deleted_rows=0)
|
||||
>>> # The order of new rows is non-deterministic since we use
|
||||
>>> # a hash-join as part of this operation and so we sort here
|
||||
>>> table.to_arrow().sort_by("a").to_pandas()
|
||||
|
||||
@@ -18,20 +18,12 @@ AddMode = Literal["append", "overwrite"]
|
||||
CreateMode = Literal["create", "overwrite"]
|
||||
|
||||
# Index type literals
|
||||
VectorIndexType = Literal[
|
||||
"IVF_FLAT",
|
||||
"IVF_SQ",
|
||||
"IVF_PQ",
|
||||
"IVF_HNSW_SQ",
|
||||
"IVF_HNSW_PQ",
|
||||
"IVF_RQ",
|
||||
]
|
||||
VectorIndexType = Literal["IVF_FLAT", "IVF_PQ", "IVF_HNSW_SQ", "IVF_HNSW_PQ", "IVF_RQ"]
|
||||
ScalarIndexType = Literal["BTREE", "BITMAP", "LABEL_LIST"]
|
||||
IndexType = Literal[
|
||||
"IVF_PQ",
|
||||
"IVF_HNSW_PQ",
|
||||
"IVF_HNSW_SQ",
|
||||
"IVF_SQ",
|
||||
"FTS",
|
||||
"BTREE",
|
||||
"BITMAP",
|
||||
|
||||
@@ -441,150 +441,6 @@ async def test_create_table_v2_manifest_paths_async(tmp_path):
|
||||
assert re.match(r"\d{20}\.manifest", manifest)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_create_table_stable_row_ids_via_storage_options(tmp_path):
|
||||
"""Test stable_row_ids via storage_options at connect time."""
|
||||
import lance
|
||||
|
||||
# Connect with stable row IDs enabled as default for new tables
|
||||
db_with = await lancedb.connect_async(
|
||||
tmp_path, storage_options={"new_table_enable_stable_row_ids": "true"}
|
||||
)
|
||||
# Connect without stable row IDs (default)
|
||||
db_without = await lancedb.connect_async(
|
||||
tmp_path, storage_options={"new_table_enable_stable_row_ids": "false"}
|
||||
)
|
||||
|
||||
# Create table using connection with stable row IDs enabled
|
||||
await db_with.create_table(
|
||||
"with_stable_via_opts",
|
||||
data=[{"id": i} for i in range(10)],
|
||||
)
|
||||
lance_ds_with = lance.dataset(tmp_path / "with_stable_via_opts.lance")
|
||||
fragments_with = lance_ds_with.get_fragments()
|
||||
assert len(fragments_with) > 0
|
||||
assert fragments_with[0].metadata.row_id_meta is not None
|
||||
|
||||
# Create table using connection without stable row IDs
|
||||
await db_without.create_table(
|
||||
"without_stable_via_opts",
|
||||
data=[{"id": i} for i in range(10)],
|
||||
)
|
||||
lance_ds_without = lance.dataset(tmp_path / "without_stable_via_opts.lance")
|
||||
fragments_without = lance_ds_without.get_fragments()
|
||||
assert len(fragments_without) > 0
|
||||
assert fragments_without[0].metadata.row_id_meta is None
|
||||
|
||||
|
||||
def test_create_table_stable_row_ids_via_storage_options_sync(tmp_path):
|
||||
"""Test that enable_stable_row_ids can be set via storage_options (sync API)."""
|
||||
# Connect with stable row IDs enabled as default for new tables
|
||||
db_with = lancedb.connect(
|
||||
tmp_path, storage_options={"new_table_enable_stable_row_ids": "true"}
|
||||
)
|
||||
# Connect without stable row IDs (default)
|
||||
db_without = lancedb.connect(
|
||||
tmp_path, storage_options={"new_table_enable_stable_row_ids": "false"}
|
||||
)
|
||||
|
||||
# Create table using connection with stable row IDs enabled
|
||||
tbl_with = db_with.create_table(
|
||||
"with_stable_sync",
|
||||
data=[{"id": i} for i in range(10)],
|
||||
)
|
||||
lance_ds_with = tbl_with.to_lance()
|
||||
fragments_with = lance_ds_with.get_fragments()
|
||||
assert len(fragments_with) > 0
|
||||
assert fragments_with[0].metadata.row_id_meta is not None
|
||||
|
||||
# Create table using connection without stable row IDs
|
||||
tbl_without = db_without.create_table(
|
||||
"without_stable_sync",
|
||||
data=[{"id": i} for i in range(10)],
|
||||
)
|
||||
lance_ds_without = tbl_without.to_lance()
|
||||
fragments_without = lance_ds_without.get_fragments()
|
||||
assert len(fragments_without) > 0
|
||||
assert fragments_without[0].metadata.row_id_meta is None
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_create_table_stable_row_ids_table_level_override(tmp_path):
|
||||
"""Test that stable_row_ids can be enabled/disabled at create_table level."""
|
||||
import lance
|
||||
|
||||
# Connect without any stable row ID setting
|
||||
db_default = await lancedb.connect_async(tmp_path)
|
||||
|
||||
# Connect with stable row IDs enabled at connection level
|
||||
db_with_stable = await lancedb.connect_async(
|
||||
tmp_path, storage_options={"new_table_enable_stable_row_ids": "true"}
|
||||
)
|
||||
|
||||
# Case 1: No connection setting, enable at table level
|
||||
await db_default.create_table(
|
||||
"table_level_enabled",
|
||||
data=[{"id": i} for i in range(10)],
|
||||
storage_options={"new_table_enable_stable_row_ids": "true"},
|
||||
)
|
||||
lance_ds = lance.dataset(tmp_path / "table_level_enabled.lance")
|
||||
fragments = lance_ds.get_fragments()
|
||||
assert len(fragments) > 0
|
||||
assert fragments[0].metadata.row_id_meta is not None, (
|
||||
"Table should have stable row IDs when enabled at table level"
|
||||
)
|
||||
|
||||
# Case 2: Connection has stable row IDs, override with false at table level
|
||||
await db_with_stable.create_table(
|
||||
"table_level_disabled",
|
||||
data=[{"id": i} for i in range(10)],
|
||||
storage_options={"new_table_enable_stable_row_ids": "false"},
|
||||
)
|
||||
lance_ds = lance.dataset(tmp_path / "table_level_disabled.lance")
|
||||
fragments = lance_ds.get_fragments()
|
||||
assert len(fragments) > 0
|
||||
assert fragments[0].metadata.row_id_meta is None, (
|
||||
"Table should NOT have stable row IDs when disabled at table level"
|
||||
)
|
||||
|
||||
|
||||
def test_create_table_stable_row_ids_table_level_override_sync(tmp_path):
|
||||
"""Test that stable_row_ids can be enabled/disabled at create_table level (sync)."""
|
||||
# Connect without any stable row ID setting
|
||||
db_default = lancedb.connect(tmp_path)
|
||||
|
||||
# Connect with stable row IDs enabled at connection level
|
||||
db_with_stable = lancedb.connect(
|
||||
tmp_path, storage_options={"new_table_enable_stable_row_ids": "true"}
|
||||
)
|
||||
|
||||
# Case 1: No connection setting, enable at table level
|
||||
tbl = db_default.create_table(
|
||||
"table_level_enabled_sync",
|
||||
data=[{"id": i} for i in range(10)],
|
||||
storage_options={"new_table_enable_stable_row_ids": "true"},
|
||||
)
|
||||
lance_ds = tbl.to_lance()
|
||||
fragments = lance_ds.get_fragments()
|
||||
assert len(fragments) > 0
|
||||
assert fragments[0].metadata.row_id_meta is not None, (
|
||||
"Table should have stable row IDs when enabled at table level"
|
||||
)
|
||||
|
||||
# Case 2: Connection has stable row IDs, override with false at table level
|
||||
tbl = db_with_stable.create_table(
|
||||
"table_level_disabled_sync",
|
||||
data=[{"id": i} for i in range(10)],
|
||||
storage_options={"new_table_enable_stable_row_ids": "false"},
|
||||
)
|
||||
lance_ds = tbl.to_lance()
|
||||
fragments = lance_ds.get_fragments()
|
||||
assert len(fragments) > 0
|
||||
assert fragments[0].metadata.row_id_meta is None, (
|
||||
"Table should NOT have stable row IDs when disabled at table level"
|
||||
)
|
||||
|
||||
|
||||
def test_open_table_sync(tmp_db: lancedb.DBConnection):
|
||||
tmp_db.create_table("test", data=[{"id": 0}])
|
||||
assert tmp_db.open_table("test").count_rows() == 1
|
||||
@@ -892,7 +748,7 @@ def test_local_namespace_operations(tmp_path):
|
||||
db = lancedb.connect(tmp_path)
|
||||
|
||||
# Test list_namespaces returns empty list for root namespace
|
||||
namespaces = db.list_namespaces().namespaces
|
||||
namespaces = list(db.list_namespaces())
|
||||
assert namespaces == []
|
||||
|
||||
# Test list_namespaces with non-empty namespace raises NotImplementedError
|
||||
@@ -900,7 +756,7 @@ def test_local_namespace_operations(tmp_path):
|
||||
NotImplementedError,
|
||||
match="Namespace operations are not supported for listing database",
|
||||
):
|
||||
db.list_namespaces(namespace=["test"])
|
||||
list(db.list_namespaces(namespace=["test"]))
|
||||
|
||||
|
||||
def test_local_create_namespace_not_supported(tmp_path):
|
||||
|
||||
@@ -12,9 +12,6 @@ from lancedb.index import (
|
||||
BTree,
|
||||
IvfFlat,
|
||||
IvfPq,
|
||||
IvfSq,
|
||||
IvfHnswPq,
|
||||
IvfHnswSq,
|
||||
IvfRq,
|
||||
Bitmap,
|
||||
LabelList,
|
||||
@@ -232,35 +229,6 @@ async def test_create_hnswsq_index(some_table: AsyncTable):
|
||||
assert len(indices) == 1
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_create_hnswsq_alias_index(some_table: AsyncTable):
|
||||
await some_table.create_index("vector", config=IvfHnswSq(num_partitions=5))
|
||||
indices = await some_table.list_indices()
|
||||
assert len(indices) == 1
|
||||
assert indices[0].index_type in {"HnswSq", "IvfHnswSq"}
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_create_hnswpq_alias_index(some_table: AsyncTable):
|
||||
await some_table.create_index("vector", config=IvfHnswPq(num_partitions=5))
|
||||
indices = await some_table.list_indices()
|
||||
assert len(indices) == 1
|
||||
assert indices[0].index_type in {"HnswPq", "IvfHnswPq"}
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_create_ivfsq_index(some_table: AsyncTable):
|
||||
await some_table.create_index("vector", config=IvfSq(num_partitions=10))
|
||||
indices = await some_table.list_indices()
|
||||
assert len(indices) == 1
|
||||
assert indices[0].index_type == "IvfSq"
|
||||
stats = await some_table.index_stats(indices[0].name)
|
||||
assert stats.index_type == "IVF_SQ"
|
||||
assert stats.distance_type == "l2"
|
||||
assert stats.num_indexed_rows == await some_table.count_rows()
|
||||
assert stats.num_unindexed_rows == 0
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_create_index_with_binary_vectors(binary_table: AsyncTable):
|
||||
await binary_table.create_index(
|
||||
|
||||
@@ -279,13 +279,13 @@ class TestNamespaceConnection:
|
||||
db = lancedb.connect_namespace("dir", {"root": self.temp_dir})
|
||||
|
||||
# Initially no namespaces
|
||||
assert len(db.list_namespaces().namespaces) == 0
|
||||
assert len(list(db.list_namespaces())) == 0
|
||||
|
||||
# Create a namespace
|
||||
db.create_namespace(["test_namespace"])
|
||||
|
||||
# Verify namespace exists
|
||||
namespaces = db.list_namespaces().namespaces
|
||||
namespaces = list(db.list_namespaces())
|
||||
assert "test_namespace" in namespaces
|
||||
assert len(namespaces) == 1
|
||||
|
||||
@@ -322,7 +322,7 @@ class TestNamespaceConnection:
|
||||
db.drop_namespace(["test_namespace"])
|
||||
|
||||
# Verify namespace no longer exists
|
||||
namespaces = db.list_namespaces().namespaces
|
||||
namespaces = list(db.list_namespaces())
|
||||
assert len(namespaces) == 0
|
||||
|
||||
def test_namespace_with_tables_cannot_be_dropped(self):
|
||||
@@ -570,13 +570,13 @@ class TestAsyncNamespaceConnection:
|
||||
|
||||
# Initially no namespaces
|
||||
namespaces = await db.list_namespaces()
|
||||
assert len(namespaces.namespaces) == 0
|
||||
assert len(list(namespaces)) == 0
|
||||
|
||||
# Create a namespace
|
||||
await db.create_namespace(["test_namespace"])
|
||||
|
||||
# Verify namespace exists
|
||||
namespaces = (await db.list_namespaces()).namespaces
|
||||
namespaces = list(await db.list_namespaces())
|
||||
assert "test_namespace" in namespaces
|
||||
assert len(namespaces) == 1
|
||||
|
||||
@@ -608,7 +608,7 @@ class TestAsyncNamespaceConnection:
|
||||
await db.drop_namespace(["test_namespace"])
|
||||
|
||||
# Verify namespace no longer exists
|
||||
namespaces = (await db.list_namespaces()).namespaces
|
||||
namespaces = list(await db.list_namespaces())
|
||||
assert len(namespaces) == 0
|
||||
|
||||
async def test_drop_all_tables_async(self):
|
||||
|
||||
@@ -10,9 +10,8 @@ use lancedb::{
|
||||
};
|
||||
use pyo3::{
|
||||
exceptions::{PyRuntimeError, PyValueError},
|
||||
pyclass, pyfunction, pymethods,
|
||||
types::{PyDict, PyDictMethods},
|
||||
Bound, FromPyObject, Py, PyAny, PyObject, PyRef, PyResult, Python,
|
||||
pyclass, pyfunction, pymethods, Bound, FromPyObject, Py, PyAny, PyObject, PyRef, PyResult,
|
||||
Python,
|
||||
};
|
||||
use pyo3_async_runtimes::tokio::future_into_py;
|
||||
|
||||
@@ -293,155 +292,40 @@ impl Connection {
|
||||
limit: Option<u32>,
|
||||
) -> PyResult<Bound<'_, PyAny>> {
|
||||
let inner = self_.get_inner()?.clone();
|
||||
let py = self_.py();
|
||||
future_into_py(py, async move {
|
||||
use lance_namespace::models::ListNamespacesRequest;
|
||||
future_into_py(self_.py(), async move {
|
||||
use lancedb::database::ListNamespacesRequest;
|
||||
let request = ListNamespacesRequest {
|
||||
id: if namespace.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(namespace)
|
||||
},
|
||||
namespace,
|
||||
page_token,
|
||||
limit: limit.map(|l| l as i32),
|
||||
limit,
|
||||
};
|
||||
let response = inner.list_namespaces(request).await.infer_error()?;
|
||||
Python::with_gil(|py| -> PyResult<Py<PyDict>> {
|
||||
let dict = PyDict::new(py);
|
||||
dict.set_item("namespaces", response.namespaces)?;
|
||||
dict.set_item("page_token", response.page_token)?;
|
||||
Ok(dict.unbind())
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
#[pyo3(signature = (namespace, mode=None, properties=None))]
|
||||
pub fn create_namespace(
|
||||
self_: PyRef<'_, Self>,
|
||||
namespace: Vec<String>,
|
||||
mode: Option<String>,
|
||||
properties: Option<std::collections::HashMap<String, String>>,
|
||||
) -> PyResult<Bound<'_, PyAny>> {
|
||||
let inner = self_.get_inner()?.clone();
|
||||
let py = self_.py();
|
||||
future_into_py(py, async move {
|
||||
use lance_namespace::models::{create_namespace_request, CreateNamespaceRequest};
|
||||
let mode_enum = mode.and_then(|m| match m.to_lowercase().as_str() {
|
||||
"create" => Some(create_namespace_request::Mode::Create),
|
||||
"exist_ok" => Some(create_namespace_request::Mode::ExistOk),
|
||||
"overwrite" => Some(create_namespace_request::Mode::Overwrite),
|
||||
_ => None,
|
||||
});
|
||||
let request = CreateNamespaceRequest {
|
||||
id: if namespace.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(namespace)
|
||||
},
|
||||
mode: mode_enum,
|
||||
properties,
|
||||
};
|
||||
let response = inner.create_namespace(request).await.infer_error()?;
|
||||
Python::with_gil(|py| -> PyResult<Py<PyDict>> {
|
||||
let dict = PyDict::new(py);
|
||||
dict.set_item("properties", response.properties)?;
|
||||
Ok(dict.unbind())
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
#[pyo3(signature = (namespace, mode=None, behavior=None))]
|
||||
pub fn drop_namespace(
|
||||
self_: PyRef<'_, Self>,
|
||||
namespace: Vec<String>,
|
||||
mode: Option<String>,
|
||||
behavior: Option<String>,
|
||||
) -> PyResult<Bound<'_, PyAny>> {
|
||||
let inner = self_.get_inner()?.clone();
|
||||
let py = self_.py();
|
||||
future_into_py(py, async move {
|
||||
use lance_namespace::models::{drop_namespace_request, DropNamespaceRequest};
|
||||
let mode_enum = mode.and_then(|m| match m.to_uppercase().as_str() {
|
||||
"SKIP" => Some(drop_namespace_request::Mode::Skip),
|
||||
"FAIL" => Some(drop_namespace_request::Mode::Fail),
|
||||
_ => None,
|
||||
});
|
||||
let behavior_enum = behavior.and_then(|b| match b.to_uppercase().as_str() {
|
||||
"RESTRICT" => Some(drop_namespace_request::Behavior::Restrict),
|
||||
"CASCADE" => Some(drop_namespace_request::Behavior::Cascade),
|
||||
_ => None,
|
||||
});
|
||||
let request = DropNamespaceRequest {
|
||||
id: if namespace.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(namespace)
|
||||
},
|
||||
mode: mode_enum,
|
||||
behavior: behavior_enum,
|
||||
};
|
||||
let response = inner.drop_namespace(request).await.infer_error()?;
|
||||
Python::with_gil(|py| -> PyResult<Py<PyDict>> {
|
||||
let dict = PyDict::new(py);
|
||||
dict.set_item("properties", response.properties)?;
|
||||
dict.set_item("transaction_id", response.transaction_id)?;
|
||||
Ok(dict.unbind())
|
||||
})
|
||||
inner.list_namespaces(request).await.infer_error()
|
||||
})
|
||||
}
|
||||
|
||||
#[pyo3(signature = (namespace,))]
|
||||
pub fn describe_namespace(
|
||||
pub fn create_namespace(
|
||||
self_: PyRef<'_, Self>,
|
||||
namespace: Vec<String>,
|
||||
) -> PyResult<Bound<'_, PyAny>> {
|
||||
let inner = self_.get_inner()?.clone();
|
||||
let py = self_.py();
|
||||
future_into_py(py, async move {
|
||||
use lance_namespace::models::DescribeNamespaceRequest;
|
||||
let request = DescribeNamespaceRequest {
|
||||
id: if namespace.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(namespace)
|
||||
},
|
||||
};
|
||||
let response = inner.describe_namespace(request).await.infer_error()?;
|
||||
Python::with_gil(|py| -> PyResult<Py<PyDict>> {
|
||||
let dict = PyDict::new(py);
|
||||
dict.set_item("properties", response.properties)?;
|
||||
Ok(dict.unbind())
|
||||
})
|
||||
future_into_py(self_.py(), async move {
|
||||
use lancedb::database::CreateNamespaceRequest;
|
||||
let request = CreateNamespaceRequest { namespace };
|
||||
inner.create_namespace(request).await.infer_error()
|
||||
})
|
||||
}
|
||||
|
||||
#[pyo3(signature = (namespace=vec![], page_token=None, limit=None))]
|
||||
pub fn list_tables(
|
||||
#[pyo3(signature = (namespace,))]
|
||||
pub fn drop_namespace(
|
||||
self_: PyRef<'_, Self>,
|
||||
namespace: Vec<String>,
|
||||
page_token: Option<String>,
|
||||
limit: Option<u32>,
|
||||
) -> PyResult<Bound<'_, PyAny>> {
|
||||
let inner = self_.get_inner()?.clone();
|
||||
let py = self_.py();
|
||||
future_into_py(py, async move {
|
||||
use lance_namespace::models::ListTablesRequest;
|
||||
let request = ListTablesRequest {
|
||||
id: if namespace.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(namespace)
|
||||
},
|
||||
page_token,
|
||||
limit: limit.map(|l| l as i32),
|
||||
};
|
||||
let response = inner.list_tables(request).await.infer_error()?;
|
||||
Python::with_gil(|py| -> PyResult<Py<PyDict>> {
|
||||
let dict = PyDict::new(py);
|
||||
dict.set_item("tables", response.tables)?;
|
||||
dict.set_item("page_token", response.page_token)?;
|
||||
Ok(dict.unbind())
|
||||
})
|
||||
future_into_py(self_.py(), async move {
|
||||
use lancedb::database::DropNamespaceRequest;
|
||||
let request = DropNamespaceRequest { namespace };
|
||||
inner.drop_namespace(request).await.infer_error()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
use lancedb::index::vector::{IvfFlatIndexBuilder, IvfRqIndexBuilder, IvfSqIndexBuilder};
|
||||
use lancedb::index::vector::{IvfFlatIndexBuilder, IvfRqIndexBuilder};
|
||||
use lancedb::index::{
|
||||
scalar::{BTreeIndexBuilder, FtsIndexBuilder},
|
||||
vector::{IvfHnswPqIndexBuilder, IvfHnswSqIndexBuilder, IvfPqIndexBuilder},
|
||||
@@ -87,21 +87,6 @@ pub fn extract_index_params(source: &Option<Bound<'_, PyAny>>) -> PyResult<Lance
|
||||
}
|
||||
Ok(LanceDbIndex::IvfPq(ivf_pq_builder))
|
||||
},
|
||||
"IvfSq" => {
|
||||
let params = source.extract::<IvfSqParams>()?;
|
||||
let distance_type = parse_distance_type(params.distance_type)?;
|
||||
let mut ivf_sq_builder = IvfSqIndexBuilder::default()
|
||||
.distance_type(distance_type)
|
||||
.max_iterations(params.max_iterations)
|
||||
.sample_rate(params.sample_rate);
|
||||
if let Some(num_partitions) = params.num_partitions {
|
||||
ivf_sq_builder = ivf_sq_builder.num_partitions(num_partitions);
|
||||
}
|
||||
if let Some(target_partition_size) = params.target_partition_size {
|
||||
ivf_sq_builder = ivf_sq_builder.target_partition_size(target_partition_size);
|
||||
}
|
||||
Ok(LanceDbIndex::IvfSq(ivf_sq_builder))
|
||||
},
|
||||
"IvfRq" => {
|
||||
let params = source.extract::<IvfRqParams>()?;
|
||||
let distance_type = parse_distance_type(params.distance_type)?;
|
||||
@@ -157,7 +142,7 @@ pub fn extract_index_params(source: &Option<Bound<'_, PyAny>>) -> PyResult<Lance
|
||||
Ok(LanceDbIndex::IvfHnswSq(hnsw_sq_builder))
|
||||
},
|
||||
not_supported => Err(PyValueError::new_err(format!(
|
||||
"Invalid index type '{}'. Must be one of BTree, Bitmap, LabelList, FTS, IvfPq, IvfSq, IvfHnswPq, or IvfHnswSq",
|
||||
"Invalid index type '{}'. Must be one of BTree, Bitmap, LabelList, FTS, IvfPq, IvfHnswPq, or IvfHnswSq",
|
||||
not_supported
|
||||
))),
|
||||
}
|
||||
@@ -201,15 +186,6 @@ struct IvfPqParams {
|
||||
target_partition_size: Option<u32>,
|
||||
}
|
||||
|
||||
#[derive(FromPyObject)]
|
||||
struct IvfSqParams {
|
||||
distance_type: String,
|
||||
num_partitions: Option<u32>,
|
||||
max_iterations: u32,
|
||||
sample_rate: u32,
|
||||
target_partition_size: Option<u32>,
|
||||
}
|
||||
|
||||
#[derive(FromPyObject)]
|
||||
struct IvfRqParams {
|
||||
distance_type: String,
|
||||
|
||||
@@ -690,7 +690,7 @@ impl FTSQuery {
|
||||
}
|
||||
|
||||
pub fn get_query(&self) -> String {
|
||||
self.fts_query.query.query().clone()
|
||||
self.fts_query.query.query().to_owned()
|
||||
}
|
||||
|
||||
pub fn to_query_request(&self) -> PyQueryRequest {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "lancedb"
|
||||
version = "0.23.0-beta.0"
|
||||
version = "0.22.4-beta.2"
|
||||
edition.workspace = true
|
||||
description = "LanceDB: A serverless, low-latency vector database for AI applications"
|
||||
license.workspace = true
|
||||
@@ -105,12 +105,12 @@ test-log = "0.2"
|
||||
|
||||
[features]
|
||||
default = ["aws", "gcs", "azure", "dynamodb", "oss"]
|
||||
aws = ["lance/aws", "lance-io/aws", "lance-namespace-impls/dir-aws"]
|
||||
oss = ["lance/oss", "lance-io/oss", "lance-namespace-impls/dir-oss"]
|
||||
gcs = ["lance/gcp", "lance-io/gcp", "lance-namespace-impls/dir-gcp"]
|
||||
azure = ["lance/azure", "lance-io/azure", "lance-namespace-impls/dir-azure"]
|
||||
aws = ["lance/aws", "lance-io/aws"]
|
||||
oss = ["lance/oss", "lance-io/oss"]
|
||||
gcs = ["lance/gcp", "lance-io/gcp"]
|
||||
azure = ["lance/azure", "lance-io/azure"]
|
||||
dynamodb = ["lance/dynamodb", "aws"]
|
||||
remote = ["dep:reqwest", "dep:http", "lance-namespace-impls/rest"]
|
||||
remote = ["dep:reqwest", "dep:http"]
|
||||
fp16kernels = ["lance-linalg/fp16kernels"]
|
||||
s3-test = []
|
||||
bedrock = ["dep:aws-sdk-bedrockruntime"]
|
||||
|
||||
@@ -9,11 +9,6 @@ use std::sync::Arc;
|
||||
use arrow_array::RecordBatchReader;
|
||||
use arrow_schema::{Field, SchemaRef};
|
||||
use lance::dataset::ReadParams;
|
||||
use lance_namespace::models::{
|
||||
CreateNamespaceRequest, CreateNamespaceResponse, DescribeNamespaceRequest,
|
||||
DescribeNamespaceResponse, DropNamespaceRequest, DropNamespaceResponse, ListNamespacesRequest,
|
||||
ListNamespacesResponse, ListTablesRequest, ListTablesResponse,
|
||||
};
|
||||
#[cfg(feature = "aws")]
|
||||
use object_store::aws::AwsCredential;
|
||||
|
||||
@@ -22,8 +17,9 @@ use crate::database::listing::{
|
||||
ListingDatabase, OPT_NEW_TABLE_STORAGE_VERSION, OPT_NEW_TABLE_V2_MANIFEST_PATHS,
|
||||
};
|
||||
use crate::database::{
|
||||
CloneTableRequest, CreateTableData, CreateTableMode, CreateTableRequest, Database,
|
||||
DatabaseOptions, OpenTableRequest, ReadConsistency, TableNamesRequest,
|
||||
CloneTableRequest, CreateNamespaceRequest, CreateTableData, CreateTableMode,
|
||||
CreateTableRequest, Database, DatabaseOptions, DropNamespaceRequest, ListNamespacesRequest,
|
||||
OpenTableRequest, ReadConsistency, TableNamesRequest,
|
||||
};
|
||||
use crate::embeddings::{
|
||||
EmbeddingDefinition, EmbeddingFunction, EmbeddingRegistry, MemoryRegistry, WithEmbeddings,
|
||||
@@ -78,7 +74,6 @@ impl TableNamesBuilder {
|
||||
}
|
||||
|
||||
/// Execute the table names operation
|
||||
#[allow(deprecated)]
|
||||
pub async fn execute(self) -> Result<Vec<String>> {
|
||||
self.parent.clone().table_names(self.request).await
|
||||
}
|
||||
@@ -413,7 +408,6 @@ impl OpenTableBuilder {
|
||||
index_cache_size: None,
|
||||
lance_read_params: None,
|
||||
location: None,
|
||||
namespace_client: None,
|
||||
},
|
||||
embedding_registry,
|
||||
}
|
||||
@@ -773,42 +767,20 @@ impl Connection {
|
||||
}
|
||||
|
||||
/// List immediate child namespace names in the given namespace
|
||||
pub async fn list_namespaces(
|
||||
&self,
|
||||
request: ListNamespacesRequest,
|
||||
) -> Result<ListNamespacesResponse> {
|
||||
pub async fn list_namespaces(&self, request: ListNamespacesRequest) -> Result<Vec<String>> {
|
||||
self.internal.list_namespaces(request).await
|
||||
}
|
||||
|
||||
/// Create a new namespace
|
||||
pub async fn create_namespace(
|
||||
&self,
|
||||
request: CreateNamespaceRequest,
|
||||
) -> Result<CreateNamespaceResponse> {
|
||||
pub async fn create_namespace(&self, request: CreateNamespaceRequest) -> Result<()> {
|
||||
self.internal.create_namespace(request).await
|
||||
}
|
||||
|
||||
/// Drop a namespace
|
||||
pub async fn drop_namespace(
|
||||
&self,
|
||||
request: DropNamespaceRequest,
|
||||
) -> Result<DropNamespaceResponse> {
|
||||
pub async fn drop_namespace(&self, request: DropNamespaceRequest) -> Result<()> {
|
||||
self.internal.drop_namespace(request).await
|
||||
}
|
||||
|
||||
/// Describe a namespace
|
||||
pub async fn describe_namespace(
|
||||
&self,
|
||||
request: DescribeNamespaceRequest,
|
||||
) -> Result<DescribeNamespaceResponse> {
|
||||
self.internal.describe_namespace(request).await
|
||||
}
|
||||
|
||||
/// List tables with pagination support
|
||||
pub async fn list_tables(&self, request: ListTablesRequest) -> Result<ListTablesResponse> {
|
||||
self.internal.list_tables(request).await
|
||||
}
|
||||
|
||||
/// Get the in-memory embedding registry.
|
||||
/// It's important to note that the embedding registry is not persisted across connections.
|
||||
/// So if a table contains embeddings, you will need to make sure that you are using a connection that has the same embedding functions registered
|
||||
@@ -1114,7 +1086,6 @@ pub struct ConnectNamespaceBuilder {
|
||||
read_consistency_interval: Option<std::time::Duration>,
|
||||
embedding_registry: Option<Arc<dyn EmbeddingRegistry>>,
|
||||
session: Option<Arc<lance::session::Session>>,
|
||||
server_side_query_enabled: bool,
|
||||
}
|
||||
|
||||
impl ConnectNamespaceBuilder {
|
||||
@@ -1126,7 +1097,6 @@ impl ConnectNamespaceBuilder {
|
||||
read_consistency_interval: None,
|
||||
embedding_registry: None,
|
||||
session: None,
|
||||
server_side_query_enabled: false,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1181,18 +1151,6 @@ impl ConnectNamespaceBuilder {
|
||||
self
|
||||
}
|
||||
|
||||
/// Enable server-side query execution.
|
||||
///
|
||||
/// When enabled, queries will be executed on the namespace server instead of
|
||||
/// locally. This can improve performance by reducing data transfer and
|
||||
/// leveraging server-side compute resources.
|
||||
///
|
||||
/// Default is `false` (queries executed locally).
|
||||
pub fn server_side_query(mut self, enabled: bool) -> Self {
|
||||
self.server_side_query_enabled = enabled;
|
||||
self
|
||||
}
|
||||
|
||||
/// Execute the connection
|
||||
pub async fn execute(self) -> Result<Connection> {
|
||||
use crate::database::namespace::LanceNamespaceDatabase;
|
||||
@@ -1204,7 +1162,6 @@ impl ConnectNamespaceBuilder {
|
||||
self.storage_options,
|
||||
self.read_consistency_interval,
|
||||
self.session,
|
||||
self.server_side_query_enabled,
|
||||
)
|
||||
.await?,
|
||||
);
|
||||
|
||||
@@ -24,12 +24,6 @@ use datafusion_physical_plan::stream::RecordBatchStreamAdapter;
|
||||
use futures::stream;
|
||||
use lance::dataset::ReadParams;
|
||||
use lance_datafusion::utils::StreamingWriteSource;
|
||||
use lance_namespace::models::{
|
||||
CreateNamespaceRequest, CreateNamespaceResponse, DescribeNamespaceRequest,
|
||||
DescribeNamespaceResponse, DropNamespaceRequest, DropNamespaceResponse, ListNamespacesRequest,
|
||||
ListNamespacesResponse, ListTablesRequest, ListTablesResponse,
|
||||
};
|
||||
use lance_namespace::LanceNamespace;
|
||||
|
||||
use crate::arrow::{SendableRecordBatchStream, SendableRecordBatchStreamExt};
|
||||
use crate::error::Result;
|
||||
@@ -42,7 +36,32 @@ pub trait DatabaseOptions {
|
||||
fn serialize_into_map(&self, map: &mut HashMap<String, String>);
|
||||
}
|
||||
|
||||
/// A request to list names of tables in the database (deprecated, use ListTablesRequest)
|
||||
/// A request to list namespaces in the database
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct ListNamespacesRequest {
|
||||
/// The parent namespace to list namespaces in. Empty list represents root namespace.
|
||||
pub namespace: Vec<String>,
|
||||
/// If present, only return names that come lexicographically after the supplied value.
|
||||
pub page_token: Option<String>,
|
||||
/// The maximum number of namespace names to return
|
||||
pub limit: Option<u32>,
|
||||
}
|
||||
|
||||
/// A request to create a namespace
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct CreateNamespaceRequest {
|
||||
/// The namespace identifier to create
|
||||
pub namespace: Vec<String>,
|
||||
}
|
||||
|
||||
/// A request to drop a namespace
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct DropNamespaceRequest {
|
||||
/// The namespace identifier to drop
|
||||
pub namespace: Vec<String>,
|
||||
}
|
||||
|
||||
/// A request to list names of tables in the database
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct TableNamesRequest {
|
||||
/// The namespace to list tables in. Empty list represents root namespace.
|
||||
@@ -58,7 +77,7 @@ pub struct TableNamesRequest {
|
||||
}
|
||||
|
||||
/// A request to open a table
|
||||
#[derive(Clone)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct OpenTableRequest {
|
||||
pub name: String,
|
||||
/// The namespace to open the table from. Empty list represents root namespace.
|
||||
@@ -68,22 +87,6 @@ pub struct OpenTableRequest {
|
||||
/// Optional custom location for the table. If not provided, the database will
|
||||
/// derive a location based on its URI and the table name.
|
||||
pub location: Option<String>,
|
||||
/// Optional namespace client for server-side query execution.
|
||||
/// When set, queries will be executed on the namespace server instead of locally.
|
||||
pub namespace_client: Option<Arc<dyn LanceNamespace>>,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for OpenTableRequest {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("OpenTableRequest")
|
||||
.field("name", &self.name)
|
||||
.field("namespace", &self.namespace)
|
||||
.field("index_cache_size", &self.index_cache_size)
|
||||
.field("lance_read_params", &self.lance_read_params)
|
||||
.field("location", &self.location)
|
||||
.field("namespace_client", &self.namespace_client)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
pub type TableBuilderCallback = Box<dyn FnOnce(OpenTableRequest) -> OpenTableRequest + Send>;
|
||||
@@ -167,9 +170,6 @@ pub struct CreateTableRequest {
|
||||
/// Optional custom location for the table. If not provided, the database will
|
||||
/// derive a location based on its URI and the table name.
|
||||
pub location: Option<String>,
|
||||
/// Optional namespace client for server-side query execution.
|
||||
/// When set, queries will be executed on the namespace server instead of locally.
|
||||
pub namespace_client: Option<Arc<dyn LanceNamespace>>,
|
||||
}
|
||||
|
||||
impl CreateTableRequest {
|
||||
@@ -181,7 +181,6 @@ impl CreateTableRequest {
|
||||
mode: CreateTableMode::default(),
|
||||
write_options: WriteOptions::default(),
|
||||
location: None,
|
||||
namespace_client: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -248,30 +247,13 @@ pub trait Database:
|
||||
/// Get the read consistency of the database
|
||||
async fn read_consistency(&self) -> Result<ReadConsistency>;
|
||||
/// List immediate child namespace names in the given namespace
|
||||
async fn list_namespaces(
|
||||
&self,
|
||||
request: ListNamespacesRequest,
|
||||
) -> Result<ListNamespacesResponse>;
|
||||
async fn list_namespaces(&self, request: ListNamespacesRequest) -> Result<Vec<String>>;
|
||||
/// Create a new namespace
|
||||
async fn create_namespace(
|
||||
&self,
|
||||
request: CreateNamespaceRequest,
|
||||
) -> Result<CreateNamespaceResponse>;
|
||||
async fn create_namespace(&self, request: CreateNamespaceRequest) -> Result<()>;
|
||||
/// Drop a namespace
|
||||
async fn drop_namespace(&self, request: DropNamespaceRequest) -> Result<DropNamespaceResponse>;
|
||||
/// Describe a namespace (get its properties)
|
||||
async fn describe_namespace(
|
||||
&self,
|
||||
request: DescribeNamespaceRequest,
|
||||
) -> Result<DescribeNamespaceResponse>;
|
||||
async fn drop_namespace(&self, request: DropNamespaceRequest) -> Result<()>;
|
||||
/// List the names of tables in the database
|
||||
///
|
||||
/// # Deprecated
|
||||
/// Use `list_tables` instead for pagination support
|
||||
#[deprecated(note = "Use list_tables instead")]
|
||||
async fn table_names(&self, request: TableNamesRequest) -> Result<Vec<String>>;
|
||||
/// List tables in the database with pagination support
|
||||
async fn list_tables(&self, request: ListTablesRequest) -> Result<ListTablesResponse>;
|
||||
/// Create a table in the database
|
||||
async fn create_table(&self, request: CreateTableRequest) -> Result<Arc<dyn BaseTable>>;
|
||||
/// Clone a table in the database.
|
||||
|
||||
@@ -24,15 +24,10 @@ use crate::io::object_store::MirroringObjectStoreWrapper;
|
||||
use crate::table::NativeTable;
|
||||
use crate::utils::validate_table_name;
|
||||
|
||||
use lance_namespace::models::{
|
||||
CreateNamespaceRequest, CreateNamespaceResponse, DescribeNamespaceRequest,
|
||||
DescribeNamespaceResponse, DropNamespaceRequest, DropNamespaceResponse, ListNamespacesRequest,
|
||||
ListNamespacesResponse, ListTablesRequest, ListTablesResponse,
|
||||
};
|
||||
|
||||
use super::{
|
||||
BaseTable, CloneTableRequest, CreateTableMode, CreateTableRequest, Database, DatabaseOptions,
|
||||
OpenTableRequest, TableNamesRequest,
|
||||
BaseTable, CloneTableRequest, CreateNamespaceRequest, CreateTableMode, CreateTableRequest,
|
||||
Database, DatabaseOptions, DropNamespaceRequest, ListNamespacesRequest, OpenTableRequest,
|
||||
TableNamesRequest,
|
||||
};
|
||||
|
||||
/// File extension to indicate a lance table
|
||||
@@ -40,7 +35,6 @@ pub const LANCE_FILE_EXTENSION: &str = "lance";
|
||||
|
||||
pub const OPT_NEW_TABLE_STORAGE_VERSION: &str = "new_table_data_storage_version";
|
||||
pub const OPT_NEW_TABLE_V2_MANIFEST_PATHS: &str = "new_table_enable_v2_manifest_paths";
|
||||
pub const OPT_NEW_TABLE_ENABLE_STABLE_ROW_IDS: &str = "new_table_enable_stable_row_ids";
|
||||
|
||||
/// Controls how new tables should be created
|
||||
#[derive(Clone, Debug, Default)]
|
||||
@@ -54,12 +48,6 @@ pub struct NewTableConfig {
|
||||
/// V2 manifest paths are more efficient than V2 manifest paths but are not
|
||||
/// supported by old clients.
|
||||
pub enable_v2_manifest_paths: Option<bool>,
|
||||
/// Whether to enable stable row IDs for new tables
|
||||
///
|
||||
/// When enabled, row IDs remain stable after compaction, update, delete,
|
||||
/// and merges. This is useful for materialized views and other use cases
|
||||
/// that need to track source rows across these operations.
|
||||
pub enable_stable_row_ids: Option<bool>,
|
||||
}
|
||||
|
||||
/// Options specific to the listing database
|
||||
@@ -99,14 +87,6 @@ impl ListingDatabaseOptions {
|
||||
})
|
||||
})
|
||||
.transpose()?,
|
||||
enable_stable_row_ids: map
|
||||
.get(OPT_NEW_TABLE_ENABLE_STABLE_ROW_IDS)
|
||||
.map(|s| {
|
||||
s.parse::<bool>().map_err(|_| Error::InvalidInput {
|
||||
message: format!("enable_stable_row_ids must be a boolean, received {}", s),
|
||||
})
|
||||
})
|
||||
.transpose()?,
|
||||
};
|
||||
// We just assume that any options that are not new table config options are storage options
|
||||
let storage_options = map
|
||||
@@ -114,7 +94,6 @@ impl ListingDatabaseOptions {
|
||||
.filter(|(key, _)| {
|
||||
key.as_str() != OPT_NEW_TABLE_STORAGE_VERSION
|
||||
&& key.as_str() != OPT_NEW_TABLE_V2_MANIFEST_PATHS
|
||||
&& key.as_str() != OPT_NEW_TABLE_ENABLE_STABLE_ROW_IDS
|
||||
})
|
||||
.map(|(key, value)| (key.clone(), value.clone()))
|
||||
.collect();
|
||||
@@ -139,12 +118,6 @@ impl DatabaseOptions for ListingDatabaseOptions {
|
||||
enable_v2_manifest_paths.to_string(),
|
||||
);
|
||||
}
|
||||
if let Some(enable_stable_row_ids) = self.new_table_config.enable_stable_row_ids {
|
||||
map.insert(
|
||||
OPT_NEW_TABLE_ENABLE_STABLE_ROW_IDS.to_string(),
|
||||
enable_stable_row_ids.to_string(),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -502,7 +475,7 @@ impl ListingDatabase {
|
||||
// this error is not lance::Error::DatasetNotFound, as the method
|
||||
// `remove_dir_all` may be used to remove something not be a dataset
|
||||
lance::Error::NotFound { .. } => Error::TableNotFound {
|
||||
name: name.clone(),
|
||||
name: name.to_owned(),
|
||||
source: Box::new(err),
|
||||
},
|
||||
_ => Error::from(err),
|
||||
@@ -524,7 +497,7 @@ impl ListingDatabase {
|
||||
fn extract_storage_overrides(
|
||||
&self,
|
||||
request: &CreateTableRequest,
|
||||
) -> Result<(Option<LanceFileVersion>, Option<bool>, Option<bool>)> {
|
||||
) -> Result<(Option<LanceFileVersion>, Option<bool>)> {
|
||||
let storage_options = request
|
||||
.write_options
|
||||
.lance_write_params
|
||||
@@ -545,19 +518,7 @@ impl ListingDatabase {
|
||||
message: "enable_v2_manifest_paths must be a boolean".to_string(),
|
||||
})?;
|
||||
|
||||
let stable_row_ids_override = storage_options
|
||||
.and_then(|opts| opts.get(OPT_NEW_TABLE_ENABLE_STABLE_ROW_IDS))
|
||||
.map(|s| s.parse::<bool>())
|
||||
.transpose()
|
||||
.map_err(|_| Error::InvalidInput {
|
||||
message: "enable_stable_row_ids must be a boolean".to_string(),
|
||||
})?;
|
||||
|
||||
Ok((
|
||||
storage_version_override,
|
||||
v2_manifest_override,
|
||||
stable_row_ids_override,
|
||||
))
|
||||
Ok((storage_version_override, v2_manifest_override))
|
||||
}
|
||||
|
||||
/// Prepare write parameters for table creation
|
||||
@@ -566,7 +527,6 @@ impl ListingDatabase {
|
||||
request: &CreateTableRequest,
|
||||
storage_version_override: Option<LanceFileVersion>,
|
||||
v2_manifest_override: Option<bool>,
|
||||
stable_row_ids_override: Option<bool>,
|
||||
) -> lance::dataset::WriteParams {
|
||||
let mut write_params = request
|
||||
.write_options
|
||||
@@ -611,13 +571,6 @@ impl ListingDatabase {
|
||||
write_params.enable_v2_manifest_paths = enable_v2_manifest_paths;
|
||||
}
|
||||
|
||||
// Apply enable_stable_row_ids: table-level override takes precedence over connection config
|
||||
if let Some(enable_stable_row_ids) =
|
||||
stable_row_ids_override.or(self.new_table_config.enable_stable_row_ids)
|
||||
{
|
||||
write_params.enable_stable_row_ids = enable_stable_row_ids;
|
||||
}
|
||||
|
||||
if matches!(&request.mode, CreateTableMode::Overwrite) {
|
||||
write_params.mode = WriteMode::Overwrite;
|
||||
}
|
||||
@@ -646,7 +599,6 @@ impl ListingDatabase {
|
||||
index_cache_size: None,
|
||||
lance_read_params: None,
|
||||
location: None,
|
||||
namespace_client: None,
|
||||
};
|
||||
let req = (callback)(req);
|
||||
let table = self.open_table(req).await?;
|
||||
@@ -668,20 +620,14 @@ impl ListingDatabase {
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Database for ListingDatabase {
|
||||
async fn list_namespaces(
|
||||
&self,
|
||||
request: ListNamespacesRequest,
|
||||
) -> Result<ListNamespacesResponse> {
|
||||
if request.id.as_ref().map(|v| !v.is_empty()).unwrap_or(false) {
|
||||
async fn list_namespaces(&self, request: ListNamespacesRequest) -> Result<Vec<String>> {
|
||||
if !request.namespace.is_empty() {
|
||||
return Err(Error::NotSupported {
|
||||
message: "Namespace operations are not supported for listing database".into(),
|
||||
});
|
||||
}
|
||||
|
||||
Ok(ListNamespacesResponse {
|
||||
namespaces: Vec::new(),
|
||||
page_token: None,
|
||||
})
|
||||
Ok(Vec::new())
|
||||
}
|
||||
|
||||
fn uri(&self) -> &str {
|
||||
@@ -700,28 +646,13 @@ impl Database for ListingDatabase {
|
||||
}
|
||||
}
|
||||
|
||||
async fn create_namespace(
|
||||
&self,
|
||||
_request: CreateNamespaceRequest,
|
||||
) -> Result<CreateNamespaceResponse> {
|
||||
async fn create_namespace(&self, _request: CreateNamespaceRequest) -> Result<()> {
|
||||
Err(Error::NotSupported {
|
||||
message: "Namespace operations are not supported for listing database".into(),
|
||||
})
|
||||
}
|
||||
|
||||
async fn drop_namespace(
|
||||
&self,
|
||||
_request: DropNamespaceRequest,
|
||||
) -> Result<DropNamespaceResponse> {
|
||||
Err(Error::NotSupported {
|
||||
message: "Namespace operations are not supported for listing database".into(),
|
||||
})
|
||||
}
|
||||
|
||||
async fn describe_namespace(
|
||||
&self,
|
||||
_request: DescribeNamespaceRequest,
|
||||
) -> Result<DescribeNamespaceResponse> {
|
||||
async fn drop_namespace(&self, _request: DropNamespaceRequest) -> Result<()> {
|
||||
Err(Error::NotSupported {
|
||||
message: "Namespace operations are not supported for listing database".into(),
|
||||
})
|
||||
@@ -762,57 +693,6 @@ impl Database for ListingDatabase {
|
||||
Ok(f)
|
||||
}
|
||||
|
||||
async fn list_tables(&self, request: ListTablesRequest) -> Result<ListTablesResponse> {
|
||||
if request.id.as_ref().map(|v| !v.is_empty()).unwrap_or(false) {
|
||||
return Err(Error::NotSupported {
|
||||
message: "Namespace parameter is not supported for listing database. Only root namespace is supported.".into(),
|
||||
});
|
||||
}
|
||||
let mut f = self
|
||||
.object_store
|
||||
.read_dir(self.base_path.clone())
|
||||
.await?
|
||||
.iter()
|
||||
.map(Path::new)
|
||||
.filter(|path| {
|
||||
let is_lance = path
|
||||
.extension()
|
||||
.and_then(|e| e.to_str())
|
||||
.map(|e| e == LANCE_EXTENSION);
|
||||
is_lance.unwrap_or(false)
|
||||
})
|
||||
.filter_map(|p| p.file_stem().and_then(|s| s.to_str().map(String::from)))
|
||||
.collect::<Vec<String>>();
|
||||
f.sort();
|
||||
|
||||
// Handle pagination with page_token
|
||||
if let Some(ref page_token) = request.page_token {
|
||||
let index = f
|
||||
.iter()
|
||||
.position(|name| name.as_str() > page_token.as_str())
|
||||
.unwrap_or(f.len());
|
||||
f.drain(0..index);
|
||||
}
|
||||
|
||||
// Determine if there's a next page
|
||||
let next_page_token = if let Some(limit) = request.limit {
|
||||
if f.len() > limit as usize {
|
||||
let token = f[limit as usize].clone();
|
||||
f.truncate(limit as usize);
|
||||
Some(token)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(ListTablesResponse {
|
||||
tables: f,
|
||||
page_token: next_page_token,
|
||||
})
|
||||
}
|
||||
|
||||
async fn create_table(&self, request: CreateTableRequest) -> Result<Arc<dyn BaseTable>> {
|
||||
// When namespace is not empty, location must be provided
|
||||
if !request.namespace.is_empty() && request.location.is_none() {
|
||||
@@ -826,15 +706,11 @@ impl Database for ListingDatabase {
|
||||
.clone()
|
||||
.unwrap_or_else(|| self.table_uri(&request.name).unwrap());
|
||||
|
||||
let (storage_version_override, v2_manifest_override, stable_row_ids_override) =
|
||||
let (storage_version_override, v2_manifest_override) =
|
||||
self.extract_storage_overrides(&request)?;
|
||||
|
||||
let write_params = self.prepare_write_params(
|
||||
&request,
|
||||
storage_version_override,
|
||||
v2_manifest_override,
|
||||
stable_row_ids_override,
|
||||
);
|
||||
let write_params =
|
||||
self.prepare_write_params(&request, storage_version_override, v2_manifest_override);
|
||||
|
||||
let data_schema = request.data.arrow_schema();
|
||||
|
||||
@@ -846,7 +722,6 @@ impl Database for ListingDatabase {
|
||||
self.store_wrapper.clone(),
|
||||
Some(write_params),
|
||||
self.read_consistency_interval,
|
||||
request.namespace_client,
|
||||
)
|
||||
.await
|
||||
{
|
||||
@@ -918,7 +793,6 @@ impl Database for ListingDatabase {
|
||||
self.store_wrapper.clone(),
|
||||
None,
|
||||
self.read_consistency_interval,
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -990,7 +864,6 @@ impl Database for ListingDatabase {
|
||||
self.store_wrapper.clone(),
|
||||
Some(read_params),
|
||||
self.read_consistency_interval,
|
||||
request.namespace_client,
|
||||
)
|
||||
.await?,
|
||||
);
|
||||
@@ -1028,7 +901,6 @@ impl Database for ListingDatabase {
|
||||
self.drop_tables(vec![name.to_string()]).await
|
||||
}
|
||||
|
||||
#[allow(deprecated)]
|
||||
async fn drop_all_tables(&self, namespace: &[String]) -> Result<()> {
|
||||
// Check if namespace parameter is provided
|
||||
if !namespace.is_empty() {
|
||||
@@ -1049,7 +921,7 @@ impl Database for ListingDatabase {
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::connection::ConnectRequest;
|
||||
use crate::database::{CreateTableData, CreateTableMode, CreateTableRequest, WriteOptions};
|
||||
use crate::database::{CreateTableData, CreateTableMode, CreateTableRequest};
|
||||
use crate::table::{Table, TableDefinition};
|
||||
use arrow_array::{Int32Array, RecordBatch, StringArray};
|
||||
use arrow_schema::{DataType, Field, Schema};
|
||||
@@ -1093,7 +965,6 @@ mod tests {
|
||||
mode: CreateTableMode::Create,
|
||||
write_options: Default::default(),
|
||||
location: None,
|
||||
namespace_client: None,
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -1115,7 +986,6 @@ mod tests {
|
||||
.unwrap();
|
||||
|
||||
// Verify both tables exist
|
||||
#[allow(deprecated)]
|
||||
let table_names = db.table_names(TableNamesRequest::default()).await.unwrap();
|
||||
assert!(table_names.contains(&"source_table".to_string()));
|
||||
assert!(table_names.contains(&"cloned_table".to_string()));
|
||||
@@ -1159,7 +1029,6 @@ mod tests {
|
||||
mode: CreateTableMode::Create,
|
||||
write_options: Default::default(),
|
||||
location: None,
|
||||
namespace_client: None,
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -1218,7 +1087,6 @@ mod tests {
|
||||
mode: CreateTableMode::Create,
|
||||
write_options: Default::default(),
|
||||
location: None,
|
||||
namespace_client: None,
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -1254,7 +1122,6 @@ mod tests {
|
||||
mode: CreateTableMode::Create,
|
||||
write_options: Default::default(),
|
||||
location: None,
|
||||
namespace_client: None,
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -1294,7 +1161,6 @@ mod tests {
|
||||
mode: CreateTableMode::Create,
|
||||
write_options: Default::default(),
|
||||
location: None,
|
||||
namespace_client: None,
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -1334,7 +1200,6 @@ mod tests {
|
||||
mode: CreateTableMode::Create,
|
||||
write_options: Default::default(),
|
||||
location: None,
|
||||
namespace_client: None,
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -1389,7 +1254,6 @@ mod tests {
|
||||
mode: CreateTableMode::Create,
|
||||
write_options: Default::default(),
|
||||
location: None,
|
||||
namespace_client: None,
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -1447,7 +1311,6 @@ mod tests {
|
||||
mode: CreateTableMode::Create,
|
||||
write_options: Default::default(),
|
||||
location: None,
|
||||
namespace_client: None,
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -1533,7 +1396,6 @@ mod tests {
|
||||
mode: CreateTableMode::Create,
|
||||
write_options: Default::default(),
|
||||
location: None,
|
||||
namespace_client: None,
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -1620,7 +1482,6 @@ mod tests {
|
||||
mode: CreateTableMode::Create,
|
||||
write_options: Default::default(),
|
||||
location: None,
|
||||
namespace_client: None,
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -1714,7 +1575,6 @@ mod tests {
|
||||
mode: CreateTableMode::Create,
|
||||
write_options: Default::default(),
|
||||
location: None,
|
||||
namespace_client: None,
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -1761,270 +1621,4 @@ mod tests {
|
||||
// Cloned table should have all 8 rows from the latest version
|
||||
assert_eq!(cloned_table.count_rows(None).await.unwrap(), 8);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_create_table_with_stable_row_ids_connection_level() {
|
||||
let tempdir = tempdir().unwrap();
|
||||
let uri = tempdir.path().to_str().unwrap();
|
||||
|
||||
// Create database with stable row IDs enabled at connection level
|
||||
let mut options = HashMap::new();
|
||||
options.insert(
|
||||
OPT_NEW_TABLE_ENABLE_STABLE_ROW_IDS.to_string(),
|
||||
"true".to_string(),
|
||||
);
|
||||
|
||||
let request = ConnectRequest {
|
||||
uri: uri.to_string(),
|
||||
#[cfg(feature = "remote")]
|
||||
client_config: Default::default(),
|
||||
options,
|
||||
read_consistency_interval: None,
|
||||
session: None,
|
||||
};
|
||||
|
||||
let db = ListingDatabase::connect_with_options(&request)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Verify the config was parsed correctly
|
||||
assert_eq!(db.new_table_config.enable_stable_row_ids, Some(true));
|
||||
|
||||
// Create a table - it should inherit the stable row IDs setting
|
||||
let schema = Arc::new(Schema::new(vec![Field::new("id", DataType::Int32, false)]));
|
||||
|
||||
let batch = RecordBatch::try_new(
|
||||
schema.clone(),
|
||||
vec![Arc::new(Int32Array::from(vec![1, 2, 3]))],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let reader = Box::new(arrow_array::RecordBatchIterator::new(
|
||||
vec![Ok(batch)],
|
||||
schema.clone(),
|
||||
));
|
||||
|
||||
let table = db
|
||||
.create_table(CreateTableRequest {
|
||||
name: "test_stable".to_string(),
|
||||
namespace: vec![],
|
||||
data: CreateTableData::Data(reader),
|
||||
mode: CreateTableMode::Create,
|
||||
write_options: Default::default(),
|
||||
location: None,
|
||||
namespace_client: None,
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Verify table was created successfully
|
||||
assert_eq!(table.count_rows(None).await.unwrap(), 3);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_create_table_with_stable_row_ids_table_level() {
|
||||
let (_tempdir, db) = setup_database().await;
|
||||
|
||||
// Verify connection has no stable row IDs config
|
||||
assert_eq!(db.new_table_config.enable_stable_row_ids, None);
|
||||
|
||||
// Create a table with stable row IDs enabled at table level via storage_options
|
||||
let schema = Arc::new(Schema::new(vec![Field::new("id", DataType::Int32, false)]));
|
||||
|
||||
let batch = RecordBatch::try_new(
|
||||
schema.clone(),
|
||||
vec![Arc::new(Int32Array::from(vec![1, 2, 3]))],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let reader = Box::new(arrow_array::RecordBatchIterator::new(
|
||||
vec![Ok(batch)],
|
||||
schema.clone(),
|
||||
));
|
||||
|
||||
let mut storage_options = HashMap::new();
|
||||
storage_options.insert(
|
||||
OPT_NEW_TABLE_ENABLE_STABLE_ROW_IDS.to_string(),
|
||||
"true".to_string(),
|
||||
);
|
||||
|
||||
let write_options = WriteOptions {
|
||||
lance_write_params: Some(lance::dataset::WriteParams {
|
||||
store_params: Some(lance::io::ObjectStoreParams {
|
||||
storage_options: Some(storage_options),
|
||||
..Default::default()
|
||||
}),
|
||||
..Default::default()
|
||||
}),
|
||||
};
|
||||
|
||||
let table = db
|
||||
.create_table(CreateTableRequest {
|
||||
name: "test_stable_table_level".to_string(),
|
||||
namespace: vec![],
|
||||
data: CreateTableData::Data(reader),
|
||||
mode: CreateTableMode::Create,
|
||||
write_options,
|
||||
location: None,
|
||||
namespace_client: None,
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Verify table was created successfully
|
||||
assert_eq!(table.count_rows(None).await.unwrap(), 3);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_create_table_stable_row_ids_table_overrides_connection() {
|
||||
let tempdir = tempdir().unwrap();
|
||||
let uri = tempdir.path().to_str().unwrap();
|
||||
|
||||
// Create database with stable row IDs enabled at connection level
|
||||
let mut options = HashMap::new();
|
||||
options.insert(
|
||||
OPT_NEW_TABLE_ENABLE_STABLE_ROW_IDS.to_string(),
|
||||
"true".to_string(),
|
||||
);
|
||||
|
||||
let request = ConnectRequest {
|
||||
uri: uri.to_string(),
|
||||
#[cfg(feature = "remote")]
|
||||
client_config: Default::default(),
|
||||
options,
|
||||
read_consistency_interval: None,
|
||||
session: None,
|
||||
};
|
||||
|
||||
let db = ListingDatabase::connect_with_options(&request)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(db.new_table_config.enable_stable_row_ids, Some(true));
|
||||
|
||||
// Create table with stable row IDs disabled at table level (overrides connection)
|
||||
let schema = Arc::new(Schema::new(vec![Field::new("id", DataType::Int32, false)]));
|
||||
|
||||
let batch = RecordBatch::try_new(
|
||||
schema.clone(),
|
||||
vec![Arc::new(Int32Array::from(vec![1, 2, 3]))],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let reader = Box::new(arrow_array::RecordBatchIterator::new(
|
||||
vec![Ok(batch)],
|
||||
schema.clone(),
|
||||
));
|
||||
|
||||
let mut storage_options = HashMap::new();
|
||||
storage_options.insert(
|
||||
OPT_NEW_TABLE_ENABLE_STABLE_ROW_IDS.to_string(),
|
||||
"false".to_string(),
|
||||
);
|
||||
|
||||
let write_options = WriteOptions {
|
||||
lance_write_params: Some(lance::dataset::WriteParams {
|
||||
store_params: Some(lance::io::ObjectStoreParams {
|
||||
storage_options: Some(storage_options),
|
||||
..Default::default()
|
||||
}),
|
||||
..Default::default()
|
||||
}),
|
||||
};
|
||||
|
||||
let table = db
|
||||
.create_table(CreateTableRequest {
|
||||
name: "test_override".to_string(),
|
||||
namespace: vec![],
|
||||
data: CreateTableData::Data(reader),
|
||||
mode: CreateTableMode::Create,
|
||||
write_options,
|
||||
location: None,
|
||||
namespace_client: None,
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Verify table was created successfully
|
||||
assert_eq!(table.count_rows(None).await.unwrap(), 3);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_stable_row_ids_invalid_value() {
|
||||
let tempdir = tempdir().unwrap();
|
||||
let uri = tempdir.path().to_str().unwrap();
|
||||
|
||||
// Try to create database with invalid stable row IDs value
|
||||
let mut options = HashMap::new();
|
||||
options.insert(
|
||||
OPT_NEW_TABLE_ENABLE_STABLE_ROW_IDS.to_string(),
|
||||
"not_a_boolean".to_string(),
|
||||
);
|
||||
|
||||
let request = ConnectRequest {
|
||||
uri: uri.to_string(),
|
||||
#[cfg(feature = "remote")]
|
||||
client_config: Default::default(),
|
||||
options,
|
||||
read_consistency_interval: None,
|
||||
session: None,
|
||||
};
|
||||
|
||||
let result = ListingDatabase::connect_with_options(&request).await;
|
||||
|
||||
assert!(result.is_err());
|
||||
assert!(matches!(
|
||||
result.unwrap_err(),
|
||||
Error::InvalidInput { message } if message.contains("enable_stable_row_ids must be a boolean")
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_stable_row_ids_config_serialization() {
|
||||
// Test that ListingDatabaseOptions correctly serializes stable_row_ids
|
||||
let mut options = HashMap::new();
|
||||
options.insert(
|
||||
OPT_NEW_TABLE_ENABLE_STABLE_ROW_IDS.to_string(),
|
||||
"true".to_string(),
|
||||
);
|
||||
|
||||
// Parse the options
|
||||
let db_options = ListingDatabaseOptions::parse_from_map(&options).unwrap();
|
||||
assert_eq!(
|
||||
db_options.new_table_config.enable_stable_row_ids,
|
||||
Some(true)
|
||||
);
|
||||
|
||||
// Serialize back to map
|
||||
let mut serialized = HashMap::new();
|
||||
db_options.serialize_into_map(&mut serialized);
|
||||
|
||||
assert_eq!(
|
||||
serialized.get(OPT_NEW_TABLE_ENABLE_STABLE_ROW_IDS),
|
||||
Some(&"true".to_string())
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_stable_row_ids_config_parse_false() {
|
||||
let mut options = HashMap::new();
|
||||
options.insert(
|
||||
OPT_NEW_TABLE_ENABLE_STABLE_ROW_IDS.to_string(),
|
||||
"false".to_string(),
|
||||
);
|
||||
|
||||
let db_options = ListingDatabaseOptions::parse_from_map(&options).unwrap();
|
||||
assert_eq!(
|
||||
db_options.new_table_config.enable_stable_row_ids,
|
||||
Some(false)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_stable_row_ids_config_not_set() {
|
||||
let options = HashMap::new();
|
||||
|
||||
let db_options = ListingDatabaseOptions::parse_from_map(&options).unwrap();
|
||||
assert_eq!(db_options.new_table_config.enable_stable_row_ids, None);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,10 +10,8 @@ use async_trait::async_trait;
|
||||
use lance_io::object_store::{LanceNamespaceStorageOptionsProvider, StorageOptionsProvider};
|
||||
use lance_namespace::{
|
||||
models::{
|
||||
CreateEmptyTableRequest, CreateNamespaceRequest, CreateNamespaceResponse,
|
||||
DescribeNamespaceRequest, DescribeNamespaceResponse, DescribeTableRequest,
|
||||
DropNamespaceRequest, DropNamespaceResponse, DropTableRequest, ListNamespacesRequest,
|
||||
ListNamespacesResponse, ListTablesRequest, ListTablesResponse,
|
||||
CreateEmptyTableRequest, CreateNamespaceRequest, DescribeTableRequest,
|
||||
DropNamespaceRequest, DropTableRequest, ListNamespacesRequest, ListTablesRequest,
|
||||
},
|
||||
LanceNamespace,
|
||||
};
|
||||
@@ -24,8 +22,11 @@ use crate::database::ReadConsistency;
|
||||
use crate::error::{Error, Result};
|
||||
|
||||
use super::{
|
||||
listing::ListingDatabase, BaseTable, CloneTableRequest, CreateTableMode,
|
||||
CreateTableRequest as DbCreateTableRequest, Database, OpenTableRequest, TableNamesRequest,
|
||||
listing::ListingDatabase, BaseTable, CloneTableRequest,
|
||||
CreateNamespaceRequest as DbCreateNamespaceRequest, CreateTableMode,
|
||||
CreateTableRequest as DbCreateTableRequest, Database,
|
||||
DropNamespaceRequest as DbDropNamespaceRequest,
|
||||
ListNamespacesRequest as DbListNamespacesRequest, OpenTableRequest, TableNamesRequest,
|
||||
};
|
||||
|
||||
/// A database implementation that uses lance-namespace for table management
|
||||
@@ -39,8 +40,6 @@ pub struct LanceNamespaceDatabase {
|
||||
session: Option<Arc<lance::session::Session>>,
|
||||
// database URI
|
||||
uri: String,
|
||||
// Whether to enable server-side query execution
|
||||
server_side_query_enabled: bool,
|
||||
}
|
||||
|
||||
impl LanceNamespaceDatabase {
|
||||
@@ -50,7 +49,6 @@ impl LanceNamespaceDatabase {
|
||||
storage_options: HashMap<String, String>,
|
||||
read_consistency_interval: Option<std::time::Duration>,
|
||||
session: Option<Arc<lance::session::Session>>,
|
||||
server_side_query_enabled: bool,
|
||||
) -> Result<Self> {
|
||||
let mut builder = ConnectBuilder::new(ns_impl);
|
||||
for (key, value) in ns_properties.clone() {
|
||||
@@ -69,7 +67,6 @@ impl LanceNamespaceDatabase {
|
||||
read_consistency_interval,
|
||||
session,
|
||||
uri: format!("namespace://{}", ns_impl),
|
||||
server_side_query_enabled,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -79,7 +76,6 @@ impl std::fmt::Debug for LanceNamespaceDatabase {
|
||||
f.debug_struct("LanceNamespaceDatabase")
|
||||
.field("storage_options", &self.storage_options)
|
||||
.field("read_consistency_interval", &self.read_consistency_interval)
|
||||
.field("server_side_query_enabled", &self.server_side_query_enabled)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
@@ -153,47 +149,92 @@ impl Database for LanceNamespaceDatabase {
|
||||
}
|
||||
}
|
||||
|
||||
async fn list_namespaces(
|
||||
&self,
|
||||
request: ListNamespacesRequest,
|
||||
) -> Result<ListNamespacesResponse> {
|
||||
Ok(self.namespace.list_namespaces(request).await?)
|
||||
async fn list_namespaces(&self, request: DbListNamespacesRequest) -> Result<Vec<String>> {
|
||||
let ns_request = ListNamespacesRequest {
|
||||
id: if request.namespace.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(request.namespace)
|
||||
},
|
||||
page_token: request.page_token,
|
||||
limit: request.limit.map(|l| l as i32),
|
||||
};
|
||||
|
||||
let response = self
|
||||
.namespace
|
||||
.list_namespaces(ns_request)
|
||||
.await
|
||||
.map_err(|e| Error::Runtime {
|
||||
message: format!("Failed to list namespaces: {}", e),
|
||||
})?;
|
||||
|
||||
Ok(response.namespaces)
|
||||
}
|
||||
|
||||
async fn create_namespace(
|
||||
&self,
|
||||
request: CreateNamespaceRequest,
|
||||
) -> Result<CreateNamespaceResponse> {
|
||||
Ok(self.namespace.create_namespace(request).await?)
|
||||
async fn create_namespace(&self, request: DbCreateNamespaceRequest) -> Result<()> {
|
||||
let ns_request = CreateNamespaceRequest {
|
||||
id: if request.namespace.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(request.namespace)
|
||||
},
|
||||
mode: None,
|
||||
properties: None,
|
||||
};
|
||||
|
||||
self.namespace
|
||||
.create_namespace(ns_request)
|
||||
.await
|
||||
.map_err(|e| Error::Runtime {
|
||||
message: format!("Failed to create namespace: {}", e),
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn drop_namespace(&self, request: DropNamespaceRequest) -> Result<DropNamespaceResponse> {
|
||||
Ok(self.namespace.drop_namespace(request).await?)
|
||||
}
|
||||
async fn drop_namespace(&self, request: DbDropNamespaceRequest) -> Result<()> {
|
||||
let ns_request = DropNamespaceRequest {
|
||||
id: if request.namespace.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(request.namespace)
|
||||
},
|
||||
mode: None,
|
||||
behavior: None,
|
||||
};
|
||||
|
||||
async fn describe_namespace(
|
||||
&self,
|
||||
request: DescribeNamespaceRequest,
|
||||
) -> Result<DescribeNamespaceResponse> {
|
||||
Ok(self.namespace.describe_namespace(request).await?)
|
||||
self.namespace
|
||||
.drop_namespace(ns_request)
|
||||
.await
|
||||
.map_err(|e| Error::Runtime {
|
||||
message: format!("Failed to drop namespace: {}", e),
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn table_names(&self, request: TableNamesRequest) -> Result<Vec<String>> {
|
||||
let ns_request = ListTablesRequest {
|
||||
id: Some(request.namespace),
|
||||
id: if request.namespace.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(request.namespace)
|
||||
},
|
||||
page_token: request.start_after,
|
||||
limit: request.limit.map(|l| l as i32),
|
||||
};
|
||||
|
||||
let response = self.namespace.list_tables(ns_request).await?;
|
||||
let response =
|
||||
self.namespace
|
||||
.list_tables(ns_request)
|
||||
.await
|
||||
.map_err(|e| Error::Runtime {
|
||||
message: format!("Failed to list tables: {}", e),
|
||||
})?;
|
||||
|
||||
Ok(response.tables)
|
||||
}
|
||||
|
||||
async fn list_tables(&self, request: ListTablesRequest) -> Result<ListTablesResponse> {
|
||||
Ok(self.namespace.list_tables(request).await?)
|
||||
}
|
||||
|
||||
async fn create_table(&self, request: DbCreateTableRequest) -> Result<Arc<dyn BaseTable>> {
|
||||
// Extract user-provided storage options from request
|
||||
let user_storage_options = request
|
||||
@@ -249,10 +290,6 @@ impl Database for LanceNamespaceDatabase {
|
||||
)
|
||||
.await?;
|
||||
|
||||
let namespace_client = self
|
||||
.server_side_query_enabled
|
||||
.then(|| self.namespace.clone());
|
||||
|
||||
return listing_db
|
||||
.open_table(OpenTableRequest {
|
||||
name: request.name.clone(),
|
||||
@@ -260,7 +297,6 @@ impl Database for LanceNamespaceDatabase {
|
||||
index_cache_size: None,
|
||||
lance_read_params: None,
|
||||
location: Some(location),
|
||||
namespace_client,
|
||||
})
|
||||
.await;
|
||||
}
|
||||
@@ -297,16 +333,12 @@ impl Database for LanceNamespaceDatabase {
|
||||
let listing_db = self
|
||||
.create_listing_database(
|
||||
&location,
|
||||
table_id.clone(),
|
||||
table_id,
|
||||
user_storage_options,
|
||||
create_empty_response.storage_options.as_ref(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let namespace_client = self
|
||||
.server_side_query_enabled
|
||||
.then(|| self.namespace.clone());
|
||||
|
||||
let create_request = DbCreateTableRequest {
|
||||
name: request.name,
|
||||
namespace: request.namespace,
|
||||
@@ -314,9 +346,7 @@ impl Database for LanceNamespaceDatabase {
|
||||
mode: request.mode,
|
||||
write_options: request.write_options,
|
||||
location: Some(location),
|
||||
namespace_client,
|
||||
};
|
||||
|
||||
listing_db.create_table(create_request).await
|
||||
}
|
||||
|
||||
@@ -350,25 +380,19 @@ impl Database for LanceNamespaceDatabase {
|
||||
let listing_db = self
|
||||
.create_listing_database(
|
||||
&location,
|
||||
table_id.clone(),
|
||||
table_id,
|
||||
user_storage_options,
|
||||
response.storage_options.as_ref(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let namespace_client = self
|
||||
.server_side_query_enabled
|
||||
.then(|| self.namespace.clone());
|
||||
|
||||
let open_request = OpenTableRequest {
|
||||
name: request.name.clone(),
|
||||
namespace: request.namespace.clone(),
|
||||
index_cache_size: request.index_cache_size,
|
||||
lance_read_params: request.lance_read_params,
|
||||
location: Some(location),
|
||||
namespace_client,
|
||||
};
|
||||
|
||||
listing_db.open_table(open_request).await
|
||||
}
|
||||
|
||||
@@ -405,7 +429,6 @@ impl Database for LanceNamespaceDatabase {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[allow(deprecated)]
|
||||
async fn drop_all_tables(&self, namespace: &[String]) -> Result<()> {
|
||||
let tables = self
|
||||
.table_names(TableNamesRequest {
|
||||
@@ -432,6 +455,7 @@ impl Database for LanceNamespaceDatabase {
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::connect_namespace;
|
||||
use crate::database::CreateNamespaceRequest;
|
||||
use crate::query::ExecutableQuery;
|
||||
use arrow_array::{Int32Array, RecordBatch, RecordBatchIterator, StringArray};
|
||||
use arrow_schema::{DataType, Field, Schema};
|
||||
@@ -544,9 +568,7 @@ mod tests {
|
||||
|
||||
// Create a child namespace first
|
||||
conn.create_namespace(CreateNamespaceRequest {
|
||||
id: Some(vec!["test_ns".into()]),
|
||||
mode: None,
|
||||
properties: None,
|
||||
namespace: vec!["test_ns".into()],
|
||||
})
|
||||
.await
|
||||
.expect("Failed to create namespace");
|
||||
@@ -605,9 +627,7 @@ mod tests {
|
||||
|
||||
// Create a child namespace first
|
||||
conn.create_namespace(CreateNamespaceRequest {
|
||||
id: Some(vec!["test_ns".into()]),
|
||||
mode: None,
|
||||
properties: None,
|
||||
namespace: vec!["test_ns".into()],
|
||||
})
|
||||
.await
|
||||
.expect("Failed to create namespace");
|
||||
@@ -669,9 +689,7 @@ mod tests {
|
||||
|
||||
// Create a child namespace first
|
||||
conn.create_namespace(CreateNamespaceRequest {
|
||||
id: Some(vec!["test_ns".into()]),
|
||||
mode: None,
|
||||
properties: None,
|
||||
namespace: vec!["test_ns".into()],
|
||||
})
|
||||
.await
|
||||
.expect("Failed to create namespace");
|
||||
@@ -753,9 +771,7 @@ mod tests {
|
||||
|
||||
// Create a child namespace first
|
||||
conn.create_namespace(CreateNamespaceRequest {
|
||||
id: Some(vec!["test_ns".into()]),
|
||||
mode: None,
|
||||
properties: None,
|
||||
namespace: vec!["test_ns".into()],
|
||||
})
|
||||
.await
|
||||
.expect("Failed to create namespace");
|
||||
@@ -809,9 +825,7 @@ mod tests {
|
||||
|
||||
// Create a child namespace first
|
||||
conn.create_namespace(CreateNamespaceRequest {
|
||||
id: Some(vec!["test_ns".into()]),
|
||||
mode: None,
|
||||
properties: None,
|
||||
namespace: vec!["test_ns".into()],
|
||||
})
|
||||
.await
|
||||
.expect("Failed to create namespace");
|
||||
@@ -890,9 +904,7 @@ mod tests {
|
||||
|
||||
// Create a child namespace first
|
||||
conn.create_namespace(CreateNamespaceRequest {
|
||||
id: Some(vec!["test_ns".into()]),
|
||||
mode: None,
|
||||
properties: None,
|
||||
namespace: vec!["test_ns".into()],
|
||||
})
|
||||
.await
|
||||
.expect("Failed to create namespace");
|
||||
@@ -924,9 +936,7 @@ mod tests {
|
||||
|
||||
// Create a child namespace first
|
||||
conn.create_namespace(CreateNamespaceRequest {
|
||||
id: Some(vec!["test_ns".into()]),
|
||||
mode: None,
|
||||
properties: None,
|
||||
namespace: vec!["test_ns".into()],
|
||||
})
|
||||
.await
|
||||
.expect("Failed to create namespace");
|
||||
@@ -967,46 +977,4 @@ mod tests {
|
||||
let open_result = conn.open_table("drop_test").execute().await;
|
||||
assert!(open_result.is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_table_names_at_root() {
|
||||
// Test that table_names at root (empty namespace) works correctly
|
||||
// This is a regression test for a bug where empty namespace was converted to None
|
||||
let tmp_dir = tempdir().unwrap();
|
||||
let root_path = tmp_dir.path().to_str().unwrap().to_string();
|
||||
|
||||
let mut properties = HashMap::new();
|
||||
properties.insert("root".to_string(), root_path);
|
||||
|
||||
let conn = connect_namespace("dir", properties)
|
||||
.execute()
|
||||
.await
|
||||
.expect("Failed to connect to namespace");
|
||||
|
||||
// Create multiple tables at root namespace
|
||||
let test_data1 = create_test_data();
|
||||
let _table1 = conn
|
||||
.create_table("table1", test_data1)
|
||||
.execute()
|
||||
.await
|
||||
.expect("Failed to create table1 at root");
|
||||
|
||||
let test_data2 = create_test_data();
|
||||
let _table2 = conn
|
||||
.create_table("table2", test_data2)
|
||||
.execute()
|
||||
.await
|
||||
.expect("Failed to create table2 at root");
|
||||
|
||||
// List tables at root using table_names (empty namespace means root)
|
||||
let table_names = conn
|
||||
.table_names()
|
||||
.execute()
|
||||
.await
|
||||
.expect("Failed to list tables at root");
|
||||
|
||||
assert!(table_names.contains(&"table1".to_string()));
|
||||
assert!(table_names.contains(&"table2".to_string()));
|
||||
assert_eq!(table_names.len(), 2);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,7 +13,7 @@ use crate::{table::BaseTable, DistanceType, Error, Result};
|
||||
|
||||
use self::{
|
||||
scalar::{BTreeIndexBuilder, BitmapIndexBuilder, LabelListIndexBuilder},
|
||||
vector::{IvfHnswPqIndexBuilder, IvfHnswSqIndexBuilder, IvfPqIndexBuilder, IvfSqIndexBuilder},
|
||||
vector::{IvfHnswPqIndexBuilder, IvfHnswSqIndexBuilder, IvfPqIndexBuilder},
|
||||
};
|
||||
|
||||
pub mod scalar;
|
||||
@@ -54,9 +54,6 @@ pub enum Index {
|
||||
/// IVF index with Product Quantization
|
||||
IvfPq(IvfPqIndexBuilder),
|
||||
|
||||
/// IVF index with Scalar Quantization
|
||||
IvfSq(IvfSqIndexBuilder),
|
||||
|
||||
/// IVF index with RabitQ Quantization
|
||||
IvfRq(IvfRqIndexBuilder),
|
||||
|
||||
@@ -280,8 +277,6 @@ pub enum IndexType {
|
||||
// Vector
|
||||
#[serde(alias = "IVF_FLAT")]
|
||||
IvfFlat,
|
||||
#[serde(alias = "IVF_SQ")]
|
||||
IvfSq,
|
||||
#[serde(alias = "IVF_PQ")]
|
||||
IvfPq,
|
||||
#[serde(alias = "IVF_RQ")]
|
||||
@@ -306,7 +301,6 @@ impl std::fmt::Display for IndexType {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
match self {
|
||||
Self::IvfFlat => write!(f, "IVF_FLAT"),
|
||||
Self::IvfSq => write!(f, "IVF_SQ"),
|
||||
Self::IvfPq => write!(f, "IVF_PQ"),
|
||||
Self::IvfRq => write!(f, "IVF_RQ"),
|
||||
Self::IvfHnswPq => write!(f, "IVF_HNSW_PQ"),
|
||||
@@ -329,7 +323,6 @@ impl std::str::FromStr for IndexType {
|
||||
"LABEL_LIST" | "LABELLIST" => Ok(Self::LabelList),
|
||||
"FTS" | "INVERTED" => Ok(Self::FTS),
|
||||
"IVF_FLAT" => Ok(Self::IvfFlat),
|
||||
"IVF_SQ" => Ok(Self::IvfSq),
|
||||
"IVF_PQ" => Ok(Self::IvfPq),
|
||||
"IVF_RQ" => Ok(Self::IvfRq),
|
||||
"IVF_HNSW_PQ" => Ok(Self::IvfHnswPq),
|
||||
|
||||
@@ -209,38 +209,6 @@ impl IvfFlatIndexBuilder {
|
||||
impl_ivf_params_setter!();
|
||||
}
|
||||
|
||||
/// Builder for an IVF SQ index.
|
||||
///
|
||||
/// This index compresses vectors using scalar quantization and groups them into IVF partitions.
|
||||
/// It offers a balance between search performance and storage footprint.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct IvfSqIndexBuilder {
|
||||
pub(crate) distance_type: DistanceType,
|
||||
|
||||
// IVF
|
||||
pub(crate) num_partitions: Option<u32>,
|
||||
pub(crate) sample_rate: u32,
|
||||
pub(crate) max_iterations: u32,
|
||||
pub(crate) target_partition_size: Option<u32>,
|
||||
}
|
||||
|
||||
impl Default for IvfSqIndexBuilder {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
distance_type: DistanceType::L2,
|
||||
num_partitions: None,
|
||||
sample_rate: 256,
|
||||
max_iterations: 50,
|
||||
target_partition_size: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl IvfSqIndexBuilder {
|
||||
impl_distance_type_setter!();
|
||||
impl_ivf_params_setter!();
|
||||
}
|
||||
|
||||
/// Builder for an IVF PQ index.
|
||||
///
|
||||
/// This index stores a compressed (quantized) copy of every vector. These vectors
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user