mirror of
https://github.com/lancedb/lancedb.git
synced 2025-12-24 13:59:58 +00:00
Compare commits
25 Commits
rmeng/patc
...
release-0.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ba3ad35b87 | ||
|
|
28a0fea1d0 | ||
|
|
b0e6c20be2 | ||
|
|
d9965476c5 | ||
|
|
2f4b70ecfe | ||
|
|
1ad1c0820d | ||
|
|
db712b0f99 | ||
|
|
fd1a5ce788 | ||
|
|
def087fc85 | ||
|
|
43f920182a | ||
|
|
718963d1fb | ||
|
|
e4dac751e7 | ||
|
|
aae02953eb | ||
|
|
1d9f76bdda | ||
|
|
affdfc4d48 | ||
|
|
41b77f5e25 | ||
|
|
eb8b3b8c54 | ||
|
|
f69c3e0595 | ||
|
|
8511edaaab | ||
|
|
657aba3c05 | ||
|
|
2e197ef387 | ||
|
|
4f512af024 | ||
|
|
5349e8b1db | ||
|
|
5e01810438 | ||
|
|
6eaaee59f8 |
@@ -1,22 +0,0 @@
|
||||
[bumpversion]
|
||||
current_version = 0.4.20
|
||||
commit = True
|
||||
message = Bump version: {current_version} → {new_version}
|
||||
tag = True
|
||||
tag_name = v{new_version}
|
||||
|
||||
[bumpversion:file:node/package.json]
|
||||
|
||||
[bumpversion:file:nodejs/package.json]
|
||||
|
||||
[bumpversion:file:nodejs/npm/darwin-x64/package.json]
|
||||
|
||||
[bumpversion:file:nodejs/npm/darwin-arm64/package.json]
|
||||
|
||||
[bumpversion:file:nodejs/npm/linux-x64-gnu/package.json]
|
||||
|
||||
[bumpversion:file:nodejs/npm/linux-arm64-gnu/package.json]
|
||||
|
||||
[bumpversion:file:rust/ffi/node/Cargo.toml]
|
||||
|
||||
[bumpversion:file:rust/lancedb/Cargo.toml]
|
||||
57
.bumpversion.toml
Normal file
57
.bumpversion.toml
Normal file
@@ -0,0 +1,57 @@
|
||||
[tool.bumpversion]
|
||||
current_version = "0.5.0"
|
||||
parse = """(?x)
|
||||
(?P<major>0|[1-9]\\d*)\\.
|
||||
(?P<minor>0|[1-9]\\d*)\\.
|
||||
(?P<patch>0|[1-9]\\d*)
|
||||
(?:-(?P<pre_l>[a-zA-Z-]+)\\.(?P<pre_n>0|[1-9]\\d*))?
|
||||
"""
|
||||
serialize = [
|
||||
"{major}.{minor}.{patch}-{pre_l}.{pre_n}",
|
||||
"{major}.{minor}.{patch}",
|
||||
]
|
||||
search = "{current_version}"
|
||||
replace = "{new_version}"
|
||||
regex = false
|
||||
ignore_missing_version = false
|
||||
ignore_missing_files = false
|
||||
tag = true
|
||||
sign_tags = false
|
||||
tag_name = "v{new_version}"
|
||||
tag_message = "Bump version: {current_version} → {new_version}"
|
||||
allow_dirty = true
|
||||
commit = true
|
||||
message = "Bump version: {current_version} → {new_version}"
|
||||
commit_args = ""
|
||||
|
||||
[tool.bumpversion.parts.pre_l]
|
||||
values = ["beta", "final"]
|
||||
optional_value = "final"
|
||||
|
||||
[[tool.bumpversion.files]]
|
||||
filename = "node/package.json"
|
||||
search = "\"version\": \"{current_version}\","
|
||||
replace = "\"version\": \"{new_version}\","
|
||||
|
||||
[[tool.bumpversion.files]]
|
||||
filename = "nodejs/package.json"
|
||||
search = "\"version\": \"{current_version}\","
|
||||
replace = "\"version\": \"{new_version}\","
|
||||
|
||||
# nodejs binary packages
|
||||
[[tool.bumpversion.files]]
|
||||
glob = "nodejs/npm/*/package.json"
|
||||
search = "\"version\": \"{current_version}\","
|
||||
replace = "\"version\": \"{new_version}\","
|
||||
|
||||
# Cargo files
|
||||
# ------------
|
||||
[[tool.bumpversion.files]]
|
||||
filename = "rust/ffi/node/Cargo.toml"
|
||||
search = "\nversion = \"{current_version}\""
|
||||
replace = "\nversion = \"{new_version}\""
|
||||
|
||||
[[tool.bumpversion.files]]
|
||||
filename = "rust/lancedb/Cargo.toml"
|
||||
search = "\nversion = \"{current_version}\""
|
||||
replace = "\nversion = \"{new_version}\""
|
||||
25
.github/release.yml
vendored
25
.github/release.yml
vendored
@@ -1,25 +0,0 @@
|
||||
# TODO: create separate templates for Python and other releases.
|
||||
changelog:
|
||||
exclude:
|
||||
labels:
|
||||
- ci
|
||||
- chore
|
||||
categories:
|
||||
- title: Breaking Changes 🛠
|
||||
labels:
|
||||
- breaking-change
|
||||
- title: New Features 🎉
|
||||
labels:
|
||||
- enhancement
|
||||
- title: Bug Fixes 🐛
|
||||
labels:
|
||||
- bug
|
||||
- title: Documentation 📚
|
||||
labels:
|
||||
- documentation
|
||||
- title: Performance Improvements 🚀
|
||||
labels:
|
||||
- performance
|
||||
- title: Other Changes
|
||||
labels:
|
||||
- "*"
|
||||
41
.github/release_notes.json
vendored
Normal file
41
.github/release_notes.json
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
{
|
||||
"ignore_labels": ["chore"],
|
||||
"pr_template": "- ${{TITLE}} by @${{AUTHOR}} in ${{URL}}",
|
||||
"categories": [
|
||||
{
|
||||
"title": "## 🏆 Highlights",
|
||||
"labels": ["highlight"]
|
||||
},
|
||||
{
|
||||
"title": "## 🛠 Breaking Changes",
|
||||
"labels": ["breaking-change"]
|
||||
},
|
||||
{
|
||||
"title": "## ⚠️ Deprecations ",
|
||||
"labels": ["deprecation"]
|
||||
},
|
||||
{
|
||||
"title": "## 🎉 New Features",
|
||||
"labels": ["enhancement"]
|
||||
},
|
||||
{
|
||||
"title": "## 🐛 Bug Fixes",
|
||||
"labels": ["bug"]
|
||||
},
|
||||
{
|
||||
"title": "## 📚 Documentation",
|
||||
"labels": ["documentation"]
|
||||
},
|
||||
{
|
||||
"title": "## 🚀 Performance Improvements",
|
||||
"labels": ["performance"]
|
||||
},
|
||||
{
|
||||
"title": "## Other Changes"
|
||||
},
|
||||
{
|
||||
"title": "## 🔧 Build and CI",
|
||||
"labels": ["ci"]
|
||||
}
|
||||
]
|
||||
}
|
||||
8
.github/workflows/cargo-publish.yml
vendored
8
.github/workflows/cargo-publish.yml
vendored
@@ -1,8 +1,12 @@
|
||||
name: Cargo Publish
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [ published ]
|
||||
push:
|
||||
tags-ignore:
|
||||
# We don't publish pre-releases for Rust. Crates.io is just a source
|
||||
# distribution, so we don't need to publish pre-releases.
|
||||
- 'v*-beta*'
|
||||
- '*-v*' # for example, python-vX.Y.Z
|
||||
|
||||
env:
|
||||
# This env var is used by Swatinem/rust-cache@v2 for the cache
|
||||
|
||||
85
.github/workflows/java.yml
vendored
Normal file
85
.github/workflows/java.yml
vendored
Normal file
@@ -0,0 +1,85 @@
|
||||
name: Build and Run Java JNI Tests
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
paths:
|
||||
- java/**
|
||||
- rust/**
|
||||
- .github/workflows/java.yml
|
||||
env:
|
||||
# This env var is used by Swatinem/rust-cache@v2 for the cache
|
||||
# key, so we set it to make sure it is always consistent.
|
||||
CARGO_TERM_COLOR: always
|
||||
# Disable full debug symbol generation to speed up CI build and keep memory down
|
||||
# "1" means line tables only, which is useful for panic tracebacks.
|
||||
RUSTFLAGS: "-C debuginfo=1"
|
||||
RUST_BACKTRACE: "1"
|
||||
# according to: https://matklad.github.io/2021/09/04/fast-rust-builds.html
|
||||
# CI builds are faster with incremental disabled.
|
||||
CARGO_INCREMENTAL: "0"
|
||||
CARGO_BUILD_JOBS: "1"
|
||||
jobs:
|
||||
linux-build:
|
||||
runs-on: ubuntu-22.04
|
||||
name: ubuntu-22.04 + Java 11 & 17
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ./java
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
workspaces: java/core/lancedb-jni
|
||||
- name: Run cargo fmt
|
||||
run: cargo fmt --check
|
||||
working-directory: ./java/core/lancedb-jni
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
sudo apt update
|
||||
sudo apt install -y protobuf-compiler libssl-dev
|
||||
- name: Install Java 17
|
||||
uses: actions/setup-java@v4
|
||||
with:
|
||||
distribution: temurin
|
||||
java-version: 17
|
||||
cache: "maven"
|
||||
- run: echo "JAVA_17=$JAVA_HOME" >> $GITHUB_ENV
|
||||
- name: Install Java 11
|
||||
uses: actions/setup-java@v4
|
||||
with:
|
||||
distribution: temurin
|
||||
java-version: 11
|
||||
cache: "maven"
|
||||
- name: Java Style Check
|
||||
run: mvn checkstyle:check
|
||||
# Disable because of issues in lancedb rust core code
|
||||
# - name: Rust Clippy
|
||||
# working-directory: java/core/lancedb-jni
|
||||
# run: cargo clippy --all-targets -- -D warnings
|
||||
- name: Running tests with Java 11
|
||||
run: mvn clean test
|
||||
- name: Running tests with Java 17
|
||||
run: |
|
||||
export JAVA_TOOL_OPTIONS="$JAVA_TOOL_OPTIONS \
|
||||
-XX:+IgnoreUnrecognizedVMOptions \
|
||||
--add-opens=java.base/java.lang=ALL-UNNAMED \
|
||||
--add-opens=java.base/java.lang.invoke=ALL-UNNAMED \
|
||||
--add-opens=java.base/java.lang.reflect=ALL-UNNAMED \
|
||||
--add-opens=java.base/java.io=ALL-UNNAMED \
|
||||
--add-opens=java.base/java.net=ALL-UNNAMED \
|
||||
--add-opens=java.base/java.nio=ALL-UNNAMED \
|
||||
--add-opens=java.base/java.util=ALL-UNNAMED \
|
||||
--add-opens=java.base/java.util.concurrent=ALL-UNNAMED \
|
||||
--add-opens=java.base/java.util.concurrent.atomic=ALL-UNNAMED \
|
||||
--add-opens=java.base/jdk.internal.ref=ALL-UNNAMED \
|
||||
--add-opens=java.base/sun.nio.ch=ALL-UNNAMED \
|
||||
--add-opens=java.base/sun.nio.cs=ALL-UNNAMED \
|
||||
--add-opens=java.base/sun.security.action=ALL-UNNAMED \
|
||||
--add-opens=java.base/sun.util.calendar=ALL-UNNAMED \
|
||||
--add-opens=java.security.jgss/sun.security.krb5=ALL-UNNAMED \
|
||||
-Djdk.reflect.useDirectMethodHandle=false \
|
||||
-Dio.netty.tryReflectionSetAccessible=true"
|
||||
JAVA_HOME=$JAVA_17 mvn clean test
|
||||
86
.github/workflows/make-release-commit.yml
vendored
86
.github/workflows/make-release-commit.yml
vendored
@@ -1,37 +1,62 @@
|
||||
name: Create release commit
|
||||
|
||||
# This workflow increments versions, tags the version, and pushes it.
|
||||
# When a tag is pushed, another workflow is triggered that creates a GH release
|
||||
# and uploads the binaries. This workflow is only for creating the tag.
|
||||
|
||||
# This script will enforce that a minor version is incremented if there are any
|
||||
# breaking changes since the last minor increment. However, it isn't able to
|
||||
# differentiate between breaking changes in Node versus Python. If you wish to
|
||||
# bypass this check, you can manually increment the version and push the tag.
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
dry_run:
|
||||
description: 'Dry run (create the local commit/tags but do not push it)'
|
||||
required: true
|
||||
default: "false"
|
||||
type: choice
|
||||
options:
|
||||
- "true"
|
||||
- "false"
|
||||
part:
|
||||
default: false
|
||||
type: boolean
|
||||
type:
|
||||
description: 'What kind of release is this?'
|
||||
required: true
|
||||
default: 'patch'
|
||||
default: 'preview'
|
||||
type: choice
|
||||
options:
|
||||
- patch
|
||||
- minor
|
||||
- major
|
||||
- preview
|
||||
- stable
|
||||
python:
|
||||
description: 'Make a Python release'
|
||||
required: true
|
||||
default: true
|
||||
type: boolean
|
||||
other:
|
||||
description: 'Make a Node/Rust release'
|
||||
required: true
|
||||
default: true
|
||||
type: boolean
|
||||
bump-minor:
|
||||
description: 'Bump minor version'
|
||||
required: true
|
||||
default: false
|
||||
type: boolean
|
||||
|
||||
jobs:
|
||||
bump-version:
|
||||
make-release:
|
||||
# Creates tag and GH release. The GH release will trigger the build and release jobs.
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- name: Check out main
|
||||
uses: actions/checkout@v4
|
||||
- name: Output Inputs
|
||||
run: echo "${{ toJSON(github.event.inputs) }}"
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: main
|
||||
persist-credentials: false
|
||||
fetch-depth: 0
|
||||
lfs: true
|
||||
# It's important we use our token here, as the default token will NOT
|
||||
# trigger any workflows watching for new tags. See:
|
||||
# https://docs.github.com/en/actions/using-workflows/triggering-a-workflow#triggering-a-workflow-from-a-workflow
|
||||
token: ${{ secrets.LANCEDB_RELEASE_TOKEN }}
|
||||
- name: Set git configs for bumpversion
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -41,19 +66,34 @@ jobs:
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11"
|
||||
- name: Bump version, create tag and commit
|
||||
- name: Bump Python version
|
||||
if: ${{ inputs.python }}
|
||||
working-directory: python
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
pip install bump2version
|
||||
bumpversion --verbose ${{ inputs.part }}
|
||||
- name: Push new version and tag
|
||||
if: ${{ inputs.dry_run }} == "false"
|
||||
# Need to get the commit before bumping the version, so we can
|
||||
# determine if there are breaking changes in the next step as well.
|
||||
echo "COMMIT_BEFORE_BUMP=$(git rev-parse HEAD)" >> $GITHUB_ENV
|
||||
|
||||
pip install bump-my-version PyGithub packaging
|
||||
bash ../ci/bump_version.sh ${{ inputs.type }} ${{ inputs.bump-minor }} python-v
|
||||
- name: Bump Node/Rust version
|
||||
if: ${{ inputs.other }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
pip install bump-my-version PyGithub packaging
|
||||
bash ci/bump_version.sh ${{ inputs.type }} ${{ inputs.bump-minor }} v $COMMIT_BEFORE_BUMP
|
||||
- name: Push new version tag
|
||||
if: ${{ !inputs.dry_run }}
|
||||
uses: ad-m/github-push-action@master
|
||||
with:
|
||||
# Need to use PAT here too to trigger next workflow. See comment above.
|
||||
github_token: ${{ secrets.LANCEDB_RELEASE_TOKEN }}
|
||||
branch: main
|
||||
branch: ${{ github.ref }}
|
||||
tags: true
|
||||
- uses: ./.github/workflows/update_package_lock
|
||||
if: ${{ inputs.dry_run }} == "false"
|
||||
with:
|
||||
github_token: ${{ secrets.LANCEDB_RELEASE_TOKEN }}
|
||||
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
99
.github/workflows/npm-publish.yml
vendored
99
.github/workflows/npm-publish.yml
vendored
@@ -1,8 +1,9 @@
|
||||
name: NPM Publish
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
push:
|
||||
tags:
|
||||
- 'v*'
|
||||
|
||||
jobs:
|
||||
node:
|
||||
@@ -274,9 +275,15 @@ jobs:
|
||||
env:
|
||||
NODE_AUTH_TOKEN: ${{ secrets.LANCEDB_NPM_REGISTRY_TOKEN }}
|
||||
run: |
|
||||
# Tag beta as "preview" instead of default "latest". See lancedb
|
||||
# npm publish step for more info.
|
||||
if [[ $GITHUB_REF =~ refs/tags/v(.*)-beta.* ]]; then
|
||||
PUBLISH_ARGS="--tag preview"
|
||||
fi
|
||||
|
||||
mv */*.tgz .
|
||||
for filename in *.tgz; do
|
||||
npm publish $filename
|
||||
npm publish $PUBLISH_ARGS $filename
|
||||
done
|
||||
|
||||
release-nodejs:
|
||||
@@ -316,11 +323,23 @@ jobs:
|
||||
- name: Publish to NPM
|
||||
env:
|
||||
NODE_AUTH_TOKEN: ${{ secrets.LANCEDB_NPM_REGISTRY_TOKEN }}
|
||||
run: npm publish --access public
|
||||
# By default, things are published to the latest tag. This is what is
|
||||
# installed by default if the user does not specify a version. This is
|
||||
# good for stable releases, but for pre-releases, we want to publish to
|
||||
# the "preview" tag so they can install with `npm install lancedb@preview`.
|
||||
# See: https://medium.com/@mbostock/prereleases-and-npm-e778fc5e2420
|
||||
run: |
|
||||
if [[ $GITHUB_REF =~ refs/tags/v(.*)-beta.* ]]; then
|
||||
npm publish --access public --tag preview
|
||||
else
|
||||
npm publish --access public
|
||||
fi
|
||||
|
||||
update-package-lock:
|
||||
needs: [release]
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
@@ -331,11 +350,13 @@ jobs:
|
||||
lfs: true
|
||||
- uses: ./.github/workflows/update_package_lock
|
||||
with:
|
||||
github_token: ${{ secrets.LANCEDB_RELEASE_TOKEN }}
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
update-package-lock-nodejs:
|
||||
needs: [release-nodejs]
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
@@ -346,4 +367,70 @@ jobs:
|
||||
lfs: true
|
||||
- uses: ./.github/workflows/update_package_lock_nodejs
|
||||
with:
|
||||
github_token: ${{ secrets.LANCEDB_RELEASE_TOKEN }}
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
gh-release:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
lfs: true
|
||||
- name: Extract version
|
||||
id: extract_version
|
||||
env:
|
||||
GITHUB_REF: ${{ github.ref }}
|
||||
run: |
|
||||
set -e
|
||||
echo "Extracting tag and version from $GITHUB_REF"
|
||||
if [[ $GITHUB_REF =~ refs/tags/v(.*) ]]; then
|
||||
VERSION=${BASH_REMATCH[1]}
|
||||
TAG=v$VERSION
|
||||
echo "tag=$TAG" >> $GITHUB_OUTPUT
|
||||
echo "version=$VERSION" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "Failed to extract version from $GITHUB_REF"
|
||||
exit 1
|
||||
fi
|
||||
echo "Extracted version $VERSION from $GITHUB_REF"
|
||||
if [[ $VERSION =~ beta ]]; then
|
||||
echo "This is a beta release"
|
||||
|
||||
# Get last release (that is not this one)
|
||||
FROM_TAG=$(git tag --sort='version:refname' \
|
||||
| grep ^v \
|
||||
| grep -vF "$TAG" \
|
||||
| python ci/semver_sort.py v \
|
||||
| tail -n 1)
|
||||
else
|
||||
echo "This is a stable release"
|
||||
# Get last stable tag (ignore betas)
|
||||
FROM_TAG=$(git tag --sort='version:refname' \
|
||||
| grep ^v \
|
||||
| grep -vF "$TAG" \
|
||||
| grep -v beta \
|
||||
| python ci/semver_sort.py v \
|
||||
| tail -n 1)
|
||||
fi
|
||||
echo "Found from tag $FROM_TAG"
|
||||
echo "from_tag=$FROM_TAG" >> $GITHUB_OUTPUT
|
||||
- name: Create Release Notes
|
||||
id: release_notes
|
||||
uses: mikepenz/release-changelog-builder-action@v4
|
||||
with:
|
||||
configuration: .github/release_notes.json
|
||||
toTag: ${{ steps.extract_version.outputs.tag }}
|
||||
fromTag: ${{ steps.extract_version.outputs.from_tag }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Create GH release
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
prerelease: ${{ contains('beta', github.ref) }}
|
||||
tag_name: ${{ steps.extract_version.outputs.tag }}
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
generate_release_notes: false
|
||||
name: Node/Rust LanceDB v${{ steps.extract_version.outputs.version }}
|
||||
body: ${{ steps.release_notes.outputs.changelog }}
|
||||
|
||||
107
.github/workflows/pypi-publish.yml
vendored
107
.github/workflows/pypi-publish.yml
vendored
@@ -1,18 +1,16 @@
|
||||
name: PyPI Publish
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
push:
|
||||
tags:
|
||||
- 'python-v*'
|
||||
|
||||
jobs:
|
||||
linux:
|
||||
# Only runs on tags that matches the python-make-release action
|
||||
if: startsWith(github.ref, 'refs/tags/python-v')
|
||||
name: Python ${{ matrix.config.platform }} manylinux${{ matrix.config.manylinux }}
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
matrix:
|
||||
python-minor-version: ["8"]
|
||||
config:
|
||||
- platform: x86_64
|
||||
manylinux: "2_17"
|
||||
@@ -34,25 +32,22 @@ jobs:
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: 3.${{ matrix.python-minor-version }}
|
||||
python-version: 3.8
|
||||
- uses: ./.github/workflows/build_linux_wheel
|
||||
with:
|
||||
python-minor-version: ${{ matrix.python-minor-version }}
|
||||
python-minor-version: 8
|
||||
args: "--release --strip ${{ matrix.config.extra_args }}"
|
||||
arm-build: ${{ matrix.config.platform == 'aarch64' }}
|
||||
manylinux: ${{ matrix.config.manylinux }}
|
||||
- uses: ./.github/workflows/upload_wheel
|
||||
with:
|
||||
token: ${{ secrets.LANCEDB_PYPI_API_TOKEN }}
|
||||
repo: "pypi"
|
||||
pypi_token: ${{ secrets.LANCEDB_PYPI_API_TOKEN }}
|
||||
fury_token: ${{ secrets.FURY_TOKEN }}
|
||||
mac:
|
||||
# Only runs on tags that matches the python-make-release action
|
||||
if: startsWith(github.ref, 'refs/tags/python-v')
|
||||
timeout-minutes: 60
|
||||
runs-on: ${{ matrix.config.runner }}
|
||||
strategy:
|
||||
matrix:
|
||||
python-minor-version: ["8"]
|
||||
config:
|
||||
- target: x86_64-apple-darwin
|
||||
runner: macos-13
|
||||
@@ -63,7 +58,6 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.ref }}
|
||||
fetch-depth: 0
|
||||
lfs: true
|
||||
- name: Set up Python
|
||||
@@ -72,38 +66,95 @@ jobs:
|
||||
python-version: 3.12
|
||||
- uses: ./.github/workflows/build_mac_wheel
|
||||
with:
|
||||
python-minor-version: ${{ matrix.python-minor-version }}
|
||||
python-minor-version: 8
|
||||
args: "--release --strip --target ${{ matrix.config.target }} --features fp16kernels"
|
||||
- uses: ./.github/workflows/upload_wheel
|
||||
with:
|
||||
python-minor-version: ${{ matrix.python-minor-version }}
|
||||
token: ${{ secrets.LANCEDB_PYPI_API_TOKEN }}
|
||||
repo: "pypi"
|
||||
pypi_token: ${{ secrets.LANCEDB_PYPI_API_TOKEN }}
|
||||
fury_token: ${{ secrets.FURY_TOKEN }}
|
||||
windows:
|
||||
# Only runs on tags that matches the python-make-release action
|
||||
if: startsWith(github.ref, 'refs/tags/python-v')
|
||||
timeout-minutes: 60
|
||||
runs-on: windows-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-minor-version: ["8"]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.ref }}
|
||||
fetch-depth: 0
|
||||
lfs: true
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: 3.${{ matrix.python-minor-version }}
|
||||
python-version: 3.8
|
||||
- uses: ./.github/workflows/build_windows_wheel
|
||||
with:
|
||||
python-minor-version: ${{ matrix.python-minor-version }}
|
||||
python-minor-version: 8
|
||||
args: "--release --strip"
|
||||
vcpkg_token: ${{ secrets.VCPKG_GITHUB_PACKAGES }}
|
||||
- uses: ./.github/workflows/upload_wheel
|
||||
with:
|
||||
python-minor-version: ${{ matrix.python-minor-version }}
|
||||
token: ${{ secrets.LANCEDB_PYPI_API_TOKEN }}
|
||||
repo: "pypi"
|
||||
pypi_token: ${{ secrets.LANCEDB_PYPI_API_TOKEN }}
|
||||
fury_token: ${{ secrets.FURY_TOKEN }}
|
||||
gh-release:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
lfs: true
|
||||
- name: Extract version
|
||||
id: extract_version
|
||||
env:
|
||||
GITHUB_REF: ${{ github.ref }}
|
||||
run: |
|
||||
set -e
|
||||
echo "Extracting tag and version from $GITHUB_REF"
|
||||
if [[ $GITHUB_REF =~ refs/tags/python-v(.*) ]]; then
|
||||
VERSION=${BASH_REMATCH[1]}
|
||||
TAG=python-v$VERSION
|
||||
echo "tag=$TAG" >> $GITHUB_OUTPUT
|
||||
echo "version=$VERSION" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "Failed to extract version from $GITHUB_REF"
|
||||
exit 1
|
||||
fi
|
||||
echo "Extracted version $VERSION from $GITHUB_REF"
|
||||
if [[ $VERSION =~ beta ]]; then
|
||||
echo "This is a beta release"
|
||||
|
||||
# Get last release (that is not this one)
|
||||
FROM_TAG=$(git tag --sort='version:refname' \
|
||||
| grep ^python-v \
|
||||
| grep -vF "$TAG" \
|
||||
| python ci/semver_sort.py python-v \
|
||||
| tail -n 1)
|
||||
else
|
||||
echo "This is a stable release"
|
||||
# Get last stable tag (ignore betas)
|
||||
FROM_TAG=$(git tag --sort='version:refname' \
|
||||
| grep ^python-v \
|
||||
| grep -vF "$TAG" \
|
||||
| grep -v beta \
|
||||
| python ci/semver_sort.py python-v \
|
||||
| tail -n 1)
|
||||
fi
|
||||
echo "Found from tag $FROM_TAG"
|
||||
echo "from_tag=$FROM_TAG" >> $GITHUB_OUTPUT
|
||||
- name: Create Python Release Notes
|
||||
id: python_release_notes
|
||||
uses: mikepenz/release-changelog-builder-action@v4
|
||||
with:
|
||||
configuration: .github/release_notes.json
|
||||
toTag: ${{ steps.extract_version.outputs.tag }}
|
||||
fromTag: ${{ steps.extract_version.outputs.from_tag }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Create Python GH release
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
prerelease: ${{ contains('beta', github.ref) }}
|
||||
tag_name: ${{ steps.extract_version.outputs.tag }}
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
generate_release_notes: false
|
||||
name: Python LanceDB v${{ steps.extract_version.outputs.version }}
|
||||
body: ${{ steps.python_release_notes.outputs.changelog }}
|
||||
|
||||
56
.github/workflows/python-make-release-commit.yml
vendored
56
.github/workflows/python-make-release-commit.yml
vendored
@@ -1,56 +0,0 @@
|
||||
name: Python - Create release commit
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
dry_run:
|
||||
description: 'Dry run (create the local commit/tags but do not push it)'
|
||||
required: true
|
||||
default: "false"
|
||||
type: choice
|
||||
options:
|
||||
- "true"
|
||||
- "false"
|
||||
part:
|
||||
description: 'What kind of release is this?'
|
||||
required: true
|
||||
default: 'patch'
|
||||
type: choice
|
||||
options:
|
||||
- patch
|
||||
- minor
|
||||
- major
|
||||
|
||||
jobs:
|
||||
bump-version:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out main
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: main
|
||||
persist-credentials: false
|
||||
fetch-depth: 0
|
||||
lfs: true
|
||||
- name: Set git configs for bumpversion
|
||||
shell: bash
|
||||
run: |
|
||||
git config user.name 'Lance Release'
|
||||
git config user.email 'lance-dev@lancedb.com'
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11"
|
||||
- name: Bump version, create tag and commit
|
||||
working-directory: python
|
||||
run: |
|
||||
pip install bump2version
|
||||
bumpversion --verbose ${{ inputs.part }}
|
||||
- name: Push new version and tag
|
||||
if: ${{ inputs.dry_run }} == "false"
|
||||
uses: ad-m/github-push-action@master
|
||||
with:
|
||||
github_token: ${{ secrets.LANCEDB_RELEASE_TOKEN }}
|
||||
branch: main
|
||||
tags: true
|
||||
|
||||
2
.github/workflows/python.yml
vendored
2
.github/workflows/python.yml
vendored
@@ -75,7 +75,7 @@ jobs:
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
matrix:
|
||||
python-minor-version: ["8", "11"]
|
||||
python-minor-version: ["9", "11"]
|
||||
runs-on: "ubuntu-22.04"
|
||||
defaults:
|
||||
run:
|
||||
|
||||
4
.github/workflows/rust.yml
vendored
4
.github/workflows/rust.yml
vendored
@@ -74,11 +74,11 @@ jobs:
|
||||
run: |
|
||||
sudo apt update
|
||||
sudo apt install -y protobuf-compiler libssl-dev
|
||||
- name: Build
|
||||
run: cargo build --all-features
|
||||
- name: Start S3 integration test environment
|
||||
working-directory: .
|
||||
run: docker compose up --detach --wait
|
||||
- name: Build
|
||||
run: cargo build --all-features
|
||||
- name: Run tests
|
||||
run: cargo test --all-features
|
||||
- name: Run examples
|
||||
|
||||
53
.github/workflows/upload_wheel/action.yml
vendored
53
.github/workflows/upload_wheel/action.yml
vendored
@@ -2,28 +2,43 @@ name: upload-wheel
|
||||
|
||||
description: "Upload wheels to Pypi"
|
||||
inputs:
|
||||
os:
|
||||
required: true
|
||||
description: "ubuntu-22.04 or macos-13"
|
||||
repo:
|
||||
required: false
|
||||
description: "pypi or testpypi"
|
||||
default: "pypi"
|
||||
token:
|
||||
pypi_token:
|
||||
required: true
|
||||
description: "release token for the repo"
|
||||
fury_token:
|
||||
required: true
|
||||
description: "release token for the fury repo"
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Install dependencies
|
||||
shell: bash
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install twine
|
||||
- name: Publish wheel
|
||||
env:
|
||||
TWINE_USERNAME: __token__
|
||||
TWINE_PASSWORD: ${{ inputs.token }}
|
||||
shell: bash
|
||||
run: twine upload --repository ${{ inputs.repo }} target/wheels/lancedb-*.whl
|
||||
- name: Install dependencies
|
||||
shell: bash
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install twine
|
||||
- name: Choose repo
|
||||
shell: bash
|
||||
id: choose_repo
|
||||
run: |
|
||||
if [ ${{ github.ref }} == "*beta*" ]; then
|
||||
echo "repo=fury" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "repo=pypi" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
- name: Publish to PyPI
|
||||
shell: bash
|
||||
env:
|
||||
FURY_TOKEN: ${{ inputs.fury_token }}
|
||||
PYPI_TOKEN: ${{ inputs.pypi_token }}
|
||||
run: |
|
||||
if [ ${{ steps.choose_repo.outputs.repo }} == "fury" ]; then
|
||||
WHEEL=$(ls target/wheels/lancedb-*.whl 2> /dev/null | head -n 1)
|
||||
echo "Uploading $WHEEL to Fury"
|
||||
curl -f -F package=@$WHEEL https://$FURY_TOKEN@push.fury.io/lancedb/
|
||||
else
|
||||
twine upload --repository ${{ steps.choose_repo.outputs.repo }} \
|
||||
--username __token__ \
|
||||
--password $PYPI_TOKEN \
|
||||
target/wheels/lancedb-*.whl
|
||||
fi
|
||||
|
||||
10
Cargo.toml
10
Cargo.toml
@@ -1,5 +1,5 @@
|
||||
[workspace]
|
||||
members = ["rust/ffi/node", "rust/lancedb", "nodejs", "python"]
|
||||
members = ["rust/ffi/node", "rust/lancedb", "nodejs", "python", "java/core/lancedb-jni"]
|
||||
# Python package needs to be built by maturin.
|
||||
exclude = ["python"]
|
||||
resolver = "2"
|
||||
@@ -14,10 +14,10 @@ keywords = ["lancedb", "lance", "database", "vector", "search"]
|
||||
categories = ["database-implementations"]
|
||||
|
||||
[workspace.dependencies]
|
||||
lance = { "version" = "=0.10.18", "features" = ["dynamodb"] }
|
||||
lance-index = { "version" = "=0.10.18" }
|
||||
lance-linalg = { "version" = "=0.10.18" }
|
||||
lance-testing = { "version" = "=0.10.18" }
|
||||
lance = { "version" = "=0.11.0", "features" = ["dynamodb"] }
|
||||
lance-index = { "version" = "=0.11.0" }
|
||||
lance-linalg = { "version" = "=0.11.0" }
|
||||
lance-testing = { "version" = "=0.11.0" }
|
||||
# Note that this one does not include pyarrow
|
||||
arrow = { version = "51.0", optional = false }
|
||||
arrow-array = "51.0"
|
||||
|
||||
51
ci/bump_version.sh
Normal file
51
ci/bump_version.sh
Normal file
@@ -0,0 +1,51 @@
|
||||
set -e
|
||||
|
||||
RELEASE_TYPE=${1:-"stable"}
|
||||
BUMP_MINOR=${2:-false}
|
||||
TAG_PREFIX=${3:-"v"} # Such as "python-v"
|
||||
HEAD_SHA=${4:-$(git rev-parse HEAD)}
|
||||
|
||||
readonly SELF_DIR=$(cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
|
||||
|
||||
PREV_TAG=$(git tag --sort='version:refname' | grep ^$TAG_PREFIX | python $SELF_DIR/semver_sort.py $TAG_PREFIX | tail -n 1)
|
||||
echo "Found previous tag $PREV_TAG"
|
||||
|
||||
# Initially, we don't want to tag if we are doing stable, because we will bump
|
||||
# again later. See comment at end for why.
|
||||
if [[ "$RELEASE_TYPE" == 'stable' ]]; then
|
||||
BUMP_ARGS="--no-tag"
|
||||
fi
|
||||
|
||||
# If last is stable and not bumping minor
|
||||
if [[ $PREV_TAG != *beta* ]]; then
|
||||
if [[ "$BUMP_MINOR" != "false" ]]; then
|
||||
# X.Y.Z -> X.(Y+1).0-beta.0
|
||||
bump-my-version bump -vv $BUMP_ARGS minor
|
||||
else
|
||||
# X.Y.Z -> X.Y.(Z+1)-beta.0
|
||||
bump-my-version bump -vv $BUMP_ARGS patch
|
||||
fi
|
||||
else
|
||||
if [[ "$BUMP_MINOR" != "false" ]]; then
|
||||
# X.Y.Z-beta.N -> X.(Y+1).0-beta.0
|
||||
bump-my-version bump -vv $BUMP_ARGS minor
|
||||
else
|
||||
# X.Y.Z-beta.N -> X.Y.Z-beta.(N+1)
|
||||
bump-my-version bump -vv $BUMP_ARGS pre_n
|
||||
fi
|
||||
fi
|
||||
|
||||
# The above bump will always bump to a pre-release version. If we are releasing
|
||||
# a stable version, bump the pre-release level ("pre_l") to make it stable.
|
||||
if [[ $RELEASE_TYPE == 'stable' ]]; then
|
||||
# X.Y.Z-beta.N -> X.Y.Z
|
||||
bump-my-version bump -vv pre_l
|
||||
fi
|
||||
|
||||
# Validate that we have incremented version appropriately for breaking changes
|
||||
NEW_TAG=$(git describe --tags --exact-match HEAD)
|
||||
NEW_VERSION=$(echo $NEW_TAG | sed "s/^$TAG_PREFIX//")
|
||||
LAST_STABLE_RELEASE=$(git tag --sort='version:refname' | grep ^$TAG_PREFIX | grep -v beta | grep -vF "$NEW_TAG" | python $SELF_DIR/semver_sort.py $TAG_PREFIX | tail -n 1)
|
||||
LAST_STABLE_VERSION=$(echo $LAST_STABLE_RELEASE | sed "s/^$TAG_PREFIX//")
|
||||
|
||||
python $SELF_DIR/check_breaking_changes.py $LAST_STABLE_RELEASE $HEAD_SHA $LAST_STABLE_VERSION $NEW_VERSION
|
||||
35
ci/check_breaking_changes.py
Normal file
35
ci/check_breaking_changes.py
Normal file
@@ -0,0 +1,35 @@
|
||||
"""
|
||||
Check whether there are any breaking changes in the PRs between the base and head commits.
|
||||
If there are, assert that we have incremented the minor version.
|
||||
"""
|
||||
import argparse
|
||||
import os
|
||||
from packaging.version import parse
|
||||
|
||||
from github import Github
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("base")
|
||||
parser.add_argument("head")
|
||||
parser.add_argument("last_stable_version")
|
||||
parser.add_argument("current_version")
|
||||
args = parser.parse_args()
|
||||
|
||||
repo = Github(os.environ["GITHUB_TOKEN"]).get_repo(os.environ["GITHUB_REPOSITORY"])
|
||||
commits = repo.compare(args.base, args.head).commits
|
||||
prs = (pr for commit in commits for pr in commit.get_pulls())
|
||||
|
||||
for pr in prs:
|
||||
if any(label.name == "breaking-change" for label in pr.labels):
|
||||
print(f"Breaking change in PR: {pr.html_url}")
|
||||
break
|
||||
else:
|
||||
print("No breaking changes found.")
|
||||
exit(0)
|
||||
|
||||
last_stable_version = parse(args.last_stable_version)
|
||||
current_version = parse(args.current_version)
|
||||
if current_version.minor <= last_stable_version.minor:
|
||||
print("Minor version is not greater than the last stable version.")
|
||||
exit(1)
|
||||
35
ci/semver_sort.py
Normal file
35
ci/semver_sort.py
Normal file
@@ -0,0 +1,35 @@
|
||||
"""
|
||||
Takes a list of semver strings and sorts them in ascending order.
|
||||
"""
|
||||
|
||||
import sys
|
||||
from packaging.version import parse, InvalidVersion
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("prefix", default="v")
|
||||
args = parser.parse_args()
|
||||
|
||||
# Read the input from stdin
|
||||
lines = sys.stdin.readlines()
|
||||
|
||||
# Parse the versions
|
||||
versions = []
|
||||
for line in lines:
|
||||
line = line.strip()
|
||||
try:
|
||||
version_str = line.removeprefix(args.prefix)
|
||||
version = parse(version_str)
|
||||
except InvalidVersion:
|
||||
# There are old tags that don't follow the semver format
|
||||
print(f"Invalid version: {line}", file=sys.stderr)
|
||||
continue
|
||||
versions.append((line, version))
|
||||
|
||||
# Sort the versions
|
||||
versions.sort(key=lambda x: x[1])
|
||||
|
||||
# Print the sorted versions as original strings
|
||||
for line, _ in versions:
|
||||
print(line)
|
||||
@@ -44,6 +44,36 @@
|
||||
|
||||
!!! info "Please also make sure you're using the same version of Arrow as in the [lancedb crate](https://github.com/lancedb/lancedb/blob/main/Cargo.toml)"
|
||||
|
||||
### Preview releases
|
||||
|
||||
Stable releases are created about every 2 weeks. For the latest features and bug
|
||||
fixes, you can install the preview release. These releases receive the same
|
||||
level of testing as stable releases, but are not guaranteed to be available for
|
||||
more than 6 months after they are released. Once your application is stable, we
|
||||
recommend switching to stable releases.
|
||||
|
||||
=== "Python"
|
||||
|
||||
```shell
|
||||
pip install --pre --extra-index-url https://pypi.fury.io/lancedb/ lancedb
|
||||
```
|
||||
|
||||
=== "Typescript"
|
||||
|
||||
```shell
|
||||
npm install vectordb@preview
|
||||
```
|
||||
|
||||
=== "Rust"
|
||||
|
||||
We don't push preview releases to crates.io, but you can referent the tag
|
||||
in GitHub within your Cargo dependencies:
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
lancedb = { git = "https://github.com/lancedb/lancedb.git", tag = "vX.Y.Z-beta.N" }
|
||||
```
|
||||
|
||||
## Connect to a database
|
||||
|
||||
=== "Python"
|
||||
|
||||
27
java/core/lancedb-jni/Cargo.toml
Normal file
27
java/core/lancedb-jni/Cargo.toml
Normal file
@@ -0,0 +1,27 @@
|
||||
[package]
|
||||
name = "lancedb-jni"
|
||||
description = "JNI bindings for LanceDB"
|
||||
# TODO modify lancedb/Cargo.toml for version and dependencies
|
||||
version = "0.4.18"
|
||||
edition.workspace = true
|
||||
repository.workspace = true
|
||||
readme.workspace = true
|
||||
license.workspace = true
|
||||
keywords.workspace = true
|
||||
categories.workspace = true
|
||||
publish = false
|
||||
|
||||
[lib]
|
||||
crate-type = ["cdylib"]
|
||||
|
||||
[dependencies]
|
||||
lancedb = { path = "../../../rust/lancedb" }
|
||||
lance = { workspace = true }
|
||||
arrow = { workspace = true, features = ["ffi"] }
|
||||
arrow-schema.workspace = true
|
||||
tokio = "1.23"
|
||||
jni = "0.21.1"
|
||||
snafu.workspace = true
|
||||
lazy_static.workspace = true
|
||||
serde = { version = "^1" }
|
||||
serde_json = { version = "1" }
|
||||
130
java/core/lancedb-jni/src/connection.rs
Normal file
130
java/core/lancedb-jni/src/connection.rs
Normal file
@@ -0,0 +1,130 @@
|
||||
use crate::ffi::JNIEnvExt;
|
||||
use crate::traits::IntoJava;
|
||||
use crate::{Error, RT};
|
||||
use jni::objects::{JObject, JString, JValue};
|
||||
use jni::JNIEnv;
|
||||
pub const NATIVE_CONNECTION: &str = "nativeConnectionHandle";
|
||||
use crate::Result;
|
||||
use lancedb::connection::{connect, Connection};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct BlockingConnection {
|
||||
pub(crate) inner: Connection,
|
||||
}
|
||||
|
||||
impl BlockingConnection {
|
||||
pub fn create(dataset_uri: &str) -> Result<Self> {
|
||||
let inner = RT.block_on(connect(dataset_uri).execute())?;
|
||||
Ok(Self { inner })
|
||||
}
|
||||
|
||||
pub fn table_names(
|
||||
&self,
|
||||
start_after: Option<String>,
|
||||
limit: Option<i32>,
|
||||
) -> Result<Vec<String>> {
|
||||
let mut op = self.inner.table_names();
|
||||
if let Some(start_after) = start_after {
|
||||
op = op.start_after(start_after);
|
||||
}
|
||||
if let Some(limit) = limit {
|
||||
op = op.limit(limit as u32);
|
||||
}
|
||||
Ok(RT.block_on(op.execute())?)
|
||||
}
|
||||
}
|
||||
|
||||
impl IntoJava for BlockingConnection {
|
||||
fn into_java<'a>(self, env: &mut JNIEnv<'a>) -> JObject<'a> {
|
||||
attach_native_connection(env, self)
|
||||
}
|
||||
}
|
||||
|
||||
fn attach_native_connection<'local>(
|
||||
env: &mut JNIEnv<'local>,
|
||||
connection: BlockingConnection,
|
||||
) -> JObject<'local> {
|
||||
let j_connection = create_java_connection_object(env);
|
||||
// This block sets a native Rust object (Connection) as a field in the Java object (j_Connection).
|
||||
// Caution: This creates a potential for memory leaks. The Rust object (Connection) is not
|
||||
// automatically garbage-collected by Java, and its memory will not be freed unless
|
||||
// explicitly handled.
|
||||
//
|
||||
// To prevent memory leaks, ensure the following:
|
||||
// 1. The Java object (`j_Connection`) should implement the `java.io.Closeable` interface.
|
||||
// 2. Users of this Java object should be instructed to always use it within a try-with-resources
|
||||
// statement (or manually call the `close()` method) to ensure that `self.close()` is invoked.
|
||||
match unsafe { env.set_rust_field(&j_connection, NATIVE_CONNECTION, connection) } {
|
||||
Ok(_) => j_connection,
|
||||
Err(err) => {
|
||||
env.throw_new(
|
||||
"java/lang/RuntimeException",
|
||||
format!("Failed to set native handle for Connection: {}", err),
|
||||
)
|
||||
.expect("Error throwing exception");
|
||||
JObject::null()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn create_java_connection_object<'a>(env: &mut JNIEnv<'a>) -> JObject<'a> {
|
||||
env.new_object("com/lancedb/lancedb/Connection", "()V", &[])
|
||||
.expect("Failed to create Java Lance Connection instance")
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "system" fn Java_com_lancedb_lancedb_Connection_releaseNativeConnection(
|
||||
mut env: JNIEnv,
|
||||
j_connection: JObject,
|
||||
) {
|
||||
let _: BlockingConnection = unsafe {
|
||||
env.take_rust_field(j_connection, NATIVE_CONNECTION)
|
||||
.expect("Failed to take native Connection handle")
|
||||
};
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "system" fn Java_com_lancedb_lancedb_Connection_connect<'local>(
|
||||
mut env: JNIEnv<'local>,
|
||||
_obj: JObject,
|
||||
dataset_uri_object: JString,
|
||||
) -> JObject<'local> {
|
||||
let dataset_uri: String = ok_or_throw!(env, env.get_string(&dataset_uri_object)).into();
|
||||
let blocking_connection = ok_or_throw!(env, BlockingConnection::create(&dataset_uri));
|
||||
blocking_connection.into_java(&mut env)
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "system" fn Java_com_lancedb_lancedb_Connection_tableNames<'local>(
|
||||
mut env: JNIEnv<'local>,
|
||||
j_connection: JObject,
|
||||
start_after_obj: JObject, // Optional<String>
|
||||
limit_obj: JObject, // Optional<Integer>
|
||||
) -> JObject<'local> {
|
||||
ok_or_throw!(
|
||||
env,
|
||||
inner_table_names(&mut env, j_connection, start_after_obj, limit_obj)
|
||||
)
|
||||
}
|
||||
|
||||
fn inner_table_names<'local>(
|
||||
env: &mut JNIEnv<'local>,
|
||||
j_connection: JObject,
|
||||
start_after_obj: JObject, // Optional<String>
|
||||
limit_obj: JObject, // Optional<Integer>
|
||||
) -> Result<JObject<'local>> {
|
||||
let start_after = env.get_string_opt(&start_after_obj)?;
|
||||
let limit = env.get_int_opt(&limit_obj)?;
|
||||
let conn =
|
||||
unsafe { env.get_rust_field::<_, _, BlockingConnection>(j_connection, NATIVE_CONNECTION) }?;
|
||||
let table_names = conn.table_names(start_after, limit)?;
|
||||
drop(conn);
|
||||
let j_names = env.new_object("java/util/ArrayList", "()V", &[])?;
|
||||
for item in table_names {
|
||||
let jstr_item = env.new_string(item)?;
|
||||
let item_jobj = JObject::from(jstr_item);
|
||||
let item_gen = JValue::Object(&item_jobj);
|
||||
env.call_method(&j_names, "add", "(Ljava/lang/Object;)Z", &[item_gen])?;
|
||||
}
|
||||
Ok(j_names)
|
||||
}
|
||||
225
java/core/lancedb-jni/src/error.rs
Normal file
225
java/core/lancedb-jni/src/error.rs
Normal file
@@ -0,0 +1,225 @@
|
||||
// Copyright 2024 Lance Developers.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::str::Utf8Error;
|
||||
|
||||
use arrow_schema::ArrowError;
|
||||
use jni::errors::Error as JniError;
|
||||
use serde_json::Error as JsonError;
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
type BoxedError = Box<dyn std::error::Error + Send + Sync + 'static>;
|
||||
|
||||
/// Java Exception types
|
||||
pub enum JavaException {
|
||||
IllegalArgumentException,
|
||||
IOException,
|
||||
RuntimeException,
|
||||
}
|
||||
|
||||
impl JavaException {
|
||||
pub fn as_str(&self) -> &str {
|
||||
match self {
|
||||
Self::IllegalArgumentException => "java/lang/IllegalArgumentException",
|
||||
Self::IOException => "java/io/IOException",
|
||||
Self::RuntimeException => "java/lang/RuntimeException",
|
||||
}
|
||||
}
|
||||
}
|
||||
/// TODO(lu) change to lancedb-jni
|
||||
#[derive(Debug, Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
pub enum Error {
|
||||
#[snafu(display("JNI error: {message}, {location}"))]
|
||||
Jni { message: String, location: Location },
|
||||
#[snafu(display("Invalid argument: {message}, {location}"))]
|
||||
InvalidArgument { message: String, location: Location },
|
||||
#[snafu(display("IO error: {source}, {location}"))]
|
||||
IO {
|
||||
source: BoxedError,
|
||||
location: Location,
|
||||
},
|
||||
#[snafu(display("Arrow error: {message}, {location}"))]
|
||||
Arrow { message: String, location: Location },
|
||||
#[snafu(display("Index error: {message}, {location}"))]
|
||||
Index { message: String, location: Location },
|
||||
#[snafu(display("JSON error: {message}, {location}"))]
|
||||
JSON { message: String, location: Location },
|
||||
#[snafu(display("Dataset at path {path} was not found, {location}"))]
|
||||
DatasetNotFound { path: String, location: Location },
|
||||
#[snafu(display("Dataset already exists: {uri}, {location}"))]
|
||||
DatasetAlreadyExists { uri: String, location: Location },
|
||||
#[snafu(display("Table '{name}' already exists"))]
|
||||
TableAlreadyExists { name: String },
|
||||
#[snafu(display("Table '{name}' was not found"))]
|
||||
TableNotFound { name: String },
|
||||
#[snafu(display("Invalid table name '{name}': {reason}"))]
|
||||
InvalidTableName { name: String, reason: String },
|
||||
#[snafu(display("Embedding function '{name}' was not found: {reason}, {location}"))]
|
||||
EmbeddingFunctionNotFound {
|
||||
name: String,
|
||||
reason: String,
|
||||
location: Location,
|
||||
},
|
||||
#[snafu(display("Other Lance error: {message}, {location}"))]
|
||||
OtherLance { message: String, location: Location },
|
||||
#[snafu(display("Other LanceDB error: {message}, {location}"))]
|
||||
OtherLanceDB { message: String, location: Location },
|
||||
}
|
||||
|
||||
impl Error {
|
||||
/// Throw as Java Exception
|
||||
pub fn throw(&self, env: &mut jni::JNIEnv) {
|
||||
match self {
|
||||
Self::InvalidArgument { .. }
|
||||
| Self::DatasetNotFound { .. }
|
||||
| Self::DatasetAlreadyExists { .. }
|
||||
| Self::TableAlreadyExists { .. }
|
||||
| Self::TableNotFound { .. }
|
||||
| Self::InvalidTableName { .. }
|
||||
| Self::EmbeddingFunctionNotFound { .. } => {
|
||||
self.throw_as(env, JavaException::IllegalArgumentException)
|
||||
}
|
||||
Self::IO { .. } | Self::Index { .. } => self.throw_as(env, JavaException::IOException),
|
||||
Self::Arrow { .. }
|
||||
| Self::JSON { .. }
|
||||
| Self::OtherLance { .. }
|
||||
| Self::OtherLanceDB { .. }
|
||||
| Self::Jni { .. } => self.throw_as(env, JavaException::RuntimeException),
|
||||
}
|
||||
}
|
||||
|
||||
/// Throw as an concrete Java Exception
|
||||
pub fn throw_as(&self, env: &mut jni::JNIEnv, exception: JavaException) {
|
||||
let message = &format!(
|
||||
"Error when throwing Java exception: {}:{}",
|
||||
exception.as_str(),
|
||||
self
|
||||
);
|
||||
env.throw_new(exception.as_str(), self.to_string())
|
||||
.expect(message);
|
||||
}
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
trait ToSnafuLocation {
|
||||
fn to_snafu_location(&'static self) -> snafu::Location;
|
||||
}
|
||||
|
||||
impl ToSnafuLocation for std::panic::Location<'static> {
|
||||
fn to_snafu_location(&'static self) -> snafu::Location {
|
||||
snafu::Location::new(self.file(), self.line(), self.column())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<JniError> for Error {
|
||||
#[track_caller]
|
||||
fn from(source: JniError) -> Self {
|
||||
Self::Jni {
|
||||
message: source.to_string(),
|
||||
location: std::panic::Location::caller().to_snafu_location(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Utf8Error> for Error {
|
||||
#[track_caller]
|
||||
fn from(source: Utf8Error) -> Self {
|
||||
Self::InvalidArgument {
|
||||
message: source.to_string(),
|
||||
location: std::panic::Location::caller().to_snafu_location(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ArrowError> for Error {
|
||||
#[track_caller]
|
||||
fn from(source: ArrowError) -> Self {
|
||||
Self::Arrow {
|
||||
message: source.to_string(),
|
||||
location: std::panic::Location::caller().to_snafu_location(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<JsonError> for Error {
|
||||
#[track_caller]
|
||||
fn from(source: JsonError) -> Self {
|
||||
Self::JSON {
|
||||
message: source.to_string(),
|
||||
location: std::panic::Location::caller().to_snafu_location(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<lance::Error> for Error {
|
||||
#[track_caller]
|
||||
fn from(source: lance::Error) -> Self {
|
||||
match source {
|
||||
lance::Error::DatasetNotFound {
|
||||
path,
|
||||
source: _,
|
||||
location,
|
||||
} => Self::DatasetNotFound { path, location },
|
||||
lance::Error::DatasetAlreadyExists { uri, location } => {
|
||||
Self::DatasetAlreadyExists { uri, location }
|
||||
}
|
||||
lance::Error::IO { source, location } => Self::IO { source, location },
|
||||
lance::Error::Arrow { message, location } => Self::Arrow { message, location },
|
||||
lance::Error::Index { message, location } => Self::Index { message, location },
|
||||
lance::Error::InvalidInput { source, location } => Self::InvalidArgument {
|
||||
message: source.to_string(),
|
||||
location,
|
||||
},
|
||||
_ => Self::OtherLance {
|
||||
message: source.to_string(),
|
||||
location: std::panic::Location::caller().to_snafu_location(),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<lancedb::Error> for Error {
|
||||
#[track_caller]
|
||||
fn from(source: lancedb::Error) -> Self {
|
||||
match source {
|
||||
lancedb::Error::InvalidTableName { name, reason } => {
|
||||
Self::InvalidTableName { name, reason }
|
||||
}
|
||||
lancedb::Error::InvalidInput { message } => Self::InvalidArgument {
|
||||
message,
|
||||
location: std::panic::Location::caller().to_snafu_location(),
|
||||
},
|
||||
lancedb::Error::TableNotFound { name } => Self::TableNotFound { name },
|
||||
lancedb::Error::TableAlreadyExists { name } => Self::TableAlreadyExists { name },
|
||||
lancedb::Error::EmbeddingFunctionNotFound { name, reason } => {
|
||||
Self::EmbeddingFunctionNotFound {
|
||||
name,
|
||||
reason,
|
||||
location: std::panic::Location::caller().to_snafu_location(),
|
||||
}
|
||||
}
|
||||
lancedb::Error::Arrow { source } => Self::Arrow {
|
||||
message: source.to_string(),
|
||||
location: std::panic::Location::caller().to_snafu_location(),
|
||||
},
|
||||
lancedb::Error::Lance { source } => Self::from(source),
|
||||
_ => Self::OtherLanceDB {
|
||||
message: source.to_string(),
|
||||
location: std::panic::Location::caller().to_snafu_location(),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
204
java/core/lancedb-jni/src/ffi.rs
Normal file
204
java/core/lancedb-jni/src/ffi.rs
Normal file
@@ -0,0 +1,204 @@
|
||||
// Copyright 2024 Lance Developers.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use core::slice;
|
||||
|
||||
use jni::objects::{JByteBuffer, JObjectArray, JString};
|
||||
use jni::sys::jobjectArray;
|
||||
use jni::{objects::JObject, JNIEnv};
|
||||
|
||||
use crate::error::{Error, Result};
|
||||
|
||||
/// TODO(lu) import from lance-jni without duplicate
|
||||
/// Extend JNIEnv with helper functions.
|
||||
pub trait JNIEnvExt {
|
||||
/// Get integers from Java List<Integer> object.
|
||||
fn get_integers(&mut self, obj: &JObject) -> Result<Vec<i32>>;
|
||||
|
||||
/// Get strings from Java List<String> object.
|
||||
fn get_strings(&mut self, obj: &JObject) -> Result<Vec<String>>;
|
||||
|
||||
/// Get strings from Java String[] object.
|
||||
/// Note that get Option<Vec<String>> from Java Optional<String[]> just doesn't work.
|
||||
#[allow(unused)]
|
||||
fn get_strings_array(&mut self, obj: jobjectArray) -> Result<Vec<String>>;
|
||||
|
||||
/// Get Option<String> from Java Optional<String>.
|
||||
fn get_string_opt(&mut self, obj: &JObject) -> Result<Option<String>>;
|
||||
|
||||
/// Get Option<Vec<String>> from Java Optional<List<String>>.
|
||||
#[allow(unused)]
|
||||
fn get_strings_opt(&mut self, obj: &JObject) -> Result<Option<Vec<String>>>;
|
||||
|
||||
/// Get Option<i32> from Java Optional<Integer>.
|
||||
fn get_int_opt(&mut self, obj: &JObject) -> Result<Option<i32>>;
|
||||
|
||||
/// Get Option<Vec<i32>> from Java Optional<List<Integer>>.
|
||||
fn get_ints_opt(&mut self, obj: &JObject) -> Result<Option<Vec<i32>>>;
|
||||
|
||||
/// Get Option<i64> from Java Optional<Long>.
|
||||
#[allow(unused)]
|
||||
fn get_long_opt(&mut self, obj: &JObject) -> Result<Option<i64>>;
|
||||
|
||||
/// Get Option<u64> from Java Optional<Long>.
|
||||
#[allow(unused)]
|
||||
fn get_u64_opt(&mut self, obj: &JObject) -> Result<Option<u64>>;
|
||||
|
||||
/// Get Option<&[u8]> from Java Optional<ByteBuffer>.
|
||||
#[allow(unused)]
|
||||
fn get_bytes_opt(&mut self, obj: &JObject) -> Result<Option<&[u8]>>;
|
||||
|
||||
fn get_optional<T, F>(&mut self, obj: &JObject, f: F) -> Result<Option<T>>
|
||||
where
|
||||
F: FnOnce(&mut JNIEnv, &JObject) -> Result<T>;
|
||||
}
|
||||
|
||||
impl JNIEnvExt for JNIEnv<'_> {
|
||||
fn get_integers(&mut self, obj: &JObject) -> Result<Vec<i32>> {
|
||||
let list = self.get_list(obj)?;
|
||||
let mut iter = list.iter(self)?;
|
||||
let mut results = Vec::with_capacity(list.size(self)? as usize);
|
||||
while let Some(elem) = iter.next(self)? {
|
||||
let int_obj = self.call_method(elem, "intValue", "()I", &[])?;
|
||||
let int_value = int_obj.i()?;
|
||||
results.push(int_value);
|
||||
}
|
||||
Ok(results)
|
||||
}
|
||||
|
||||
fn get_strings(&mut self, obj: &JObject) -> Result<Vec<String>> {
|
||||
let list = self.get_list(obj)?;
|
||||
let mut iter = list.iter(self)?;
|
||||
let mut results = Vec::with_capacity(list.size(self)? as usize);
|
||||
while let Some(elem) = iter.next(self)? {
|
||||
let jstr = JString::from(elem);
|
||||
let val = self.get_string(&jstr)?;
|
||||
results.push(val.to_str()?.to_string())
|
||||
}
|
||||
Ok(results)
|
||||
}
|
||||
|
||||
fn get_strings_array(&mut self, obj: jobjectArray) -> Result<Vec<String>> {
|
||||
let jobject_array = unsafe { JObjectArray::from_raw(obj) };
|
||||
let array_len = self.get_array_length(&jobject_array)?;
|
||||
let mut res: Vec<String> = Vec::new();
|
||||
for i in 0..array_len {
|
||||
let item: JString = self.get_object_array_element(&jobject_array, i)?.into();
|
||||
res.push(self.get_string(&item)?.into());
|
||||
}
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
fn get_string_opt(&mut self, obj: &JObject) -> Result<Option<String>> {
|
||||
self.get_optional(obj, |env, inner_obj| {
|
||||
let java_obj_gen = env.call_method(inner_obj, "get", "()Ljava/lang/Object;", &[])?;
|
||||
let java_string_obj = java_obj_gen.l()?;
|
||||
let jstr = JString::from(java_string_obj);
|
||||
let val = env.get_string(&jstr)?;
|
||||
Ok(val.to_str()?.to_string())
|
||||
})
|
||||
}
|
||||
|
||||
fn get_strings_opt(&mut self, obj: &JObject) -> Result<Option<Vec<String>>> {
|
||||
self.get_optional(obj, |env, inner_obj| {
|
||||
let java_obj_gen = env.call_method(inner_obj, "get", "()Ljava/lang/Object;", &[])?;
|
||||
let java_list_obj = java_obj_gen.l()?;
|
||||
env.get_strings(&java_list_obj)
|
||||
})
|
||||
}
|
||||
|
||||
fn get_int_opt(&mut self, obj: &JObject) -> Result<Option<i32>> {
|
||||
self.get_optional(obj, |env, inner_obj| {
|
||||
let java_obj_gen = env.call_method(inner_obj, "get", "()Ljava/lang/Object;", &[])?;
|
||||
let java_int_obj = java_obj_gen.l()?;
|
||||
let int_obj = env.call_method(java_int_obj, "intValue", "()I", &[])?;
|
||||
let int_value = int_obj.i()?;
|
||||
Ok(int_value)
|
||||
})
|
||||
}
|
||||
|
||||
fn get_ints_opt(&mut self, obj: &JObject) -> Result<Option<Vec<i32>>> {
|
||||
self.get_optional(obj, |env, inner_obj| {
|
||||
let java_obj_gen = env.call_method(inner_obj, "get", "()Ljava/lang/Object;", &[])?;
|
||||
let java_list_obj = java_obj_gen.l()?;
|
||||
env.get_integers(&java_list_obj)
|
||||
})
|
||||
}
|
||||
|
||||
fn get_long_opt(&mut self, obj: &JObject) -> Result<Option<i64>> {
|
||||
self.get_optional(obj, |env, inner_obj| {
|
||||
let java_obj_gen = env.call_method(inner_obj, "get", "()Ljava/lang/Object;", &[])?;
|
||||
let java_long_obj = java_obj_gen.l()?;
|
||||
let long_obj = env.call_method(java_long_obj, "longValue", "()J", &[])?;
|
||||
let long_value = long_obj.j()?;
|
||||
Ok(long_value)
|
||||
})
|
||||
}
|
||||
|
||||
fn get_u64_opt(&mut self, obj: &JObject) -> Result<Option<u64>> {
|
||||
self.get_optional(obj, |env, inner_obj| {
|
||||
let java_obj_gen = env.call_method(inner_obj, "get", "()Ljava/lang/Object;", &[])?;
|
||||
let java_long_obj = java_obj_gen.l()?;
|
||||
let long_obj = env.call_method(java_long_obj, "longValue", "()J", &[])?;
|
||||
let long_value = long_obj.j()?;
|
||||
Ok(long_value as u64)
|
||||
})
|
||||
}
|
||||
|
||||
fn get_bytes_opt(&mut self, obj: &JObject) -> Result<Option<&[u8]>> {
|
||||
self.get_optional(obj, |env, inner_obj| {
|
||||
let java_obj_gen = env.call_method(inner_obj, "get", "()Ljava/lang/Object;", &[])?;
|
||||
let java_byte_buffer_obj = java_obj_gen.l()?;
|
||||
let j_byte_buffer = JByteBuffer::from(java_byte_buffer_obj);
|
||||
let raw_data = env.get_direct_buffer_address(&j_byte_buffer)?;
|
||||
let capacity = env.get_direct_buffer_capacity(&j_byte_buffer)?;
|
||||
let data = unsafe { slice::from_raw_parts(raw_data, capacity) };
|
||||
Ok(data)
|
||||
})
|
||||
}
|
||||
|
||||
fn get_optional<T, F>(&mut self, obj: &JObject, f: F) -> Result<Option<T>>
|
||||
where
|
||||
F: FnOnce(&mut JNIEnv, &JObject) -> Result<T>,
|
||||
{
|
||||
if obj.is_null() {
|
||||
return Ok(None);
|
||||
}
|
||||
let is_empty = self.call_method(obj, "isEmpty", "()Z", &[])?;
|
||||
if is_empty.z()? {
|
||||
// TODO(lu): put get java object into here cuz can only get java Object
|
||||
Ok(None)
|
||||
} else {
|
||||
f(self, obj).map(Some)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "system" fn Java_com_lancedb_lance_test_JniTestHelper_parseInts(
|
||||
mut env: JNIEnv,
|
||||
_obj: JObject,
|
||||
list_obj: JObject, // List<Integer>
|
||||
) {
|
||||
ok_or_throw_without_return!(env, env.get_integers(&list_obj));
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "system" fn Java_com_lancedb_lance_test_JniTestHelper_parseIntsOpt(
|
||||
mut env: JNIEnv,
|
||||
_obj: JObject,
|
||||
list_obj: JObject, // Optional<List<Integer>>
|
||||
) {
|
||||
ok_or_throw_without_return!(env, env.get_ints_opt(&list_obj));
|
||||
}
|
||||
68
java/core/lancedb-jni/src/lib.rs
Normal file
68
java/core/lancedb-jni/src/lib.rs
Normal file
@@ -0,0 +1,68 @@
|
||||
// Copyright 2024 Lance Developers.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use lazy_static::lazy_static;
|
||||
|
||||
// TODO import from lance-jni without duplicate
|
||||
#[macro_export]
|
||||
macro_rules! ok_or_throw {
|
||||
($env:expr, $result:expr) => {
|
||||
match $result {
|
||||
Ok(value) => value,
|
||||
Err(err) => {
|
||||
Error::from(err).throw(&mut $env);
|
||||
return JObject::null();
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! ok_or_throw_without_return {
|
||||
($env:expr, $result:expr) => {
|
||||
match $result {
|
||||
Ok(value) => value,
|
||||
Err(err) => {
|
||||
Error::from(err).throw(&mut $env);
|
||||
return;
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! ok_or_throw_with_return {
|
||||
($env:expr, $result:expr, $ret:expr) => {
|
||||
match $result {
|
||||
Ok(value) => value,
|
||||
Err(err) => {
|
||||
Error::from(err).throw(&mut $env);
|
||||
return $ret;
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
mod connection;
|
||||
pub mod error;
|
||||
mod ffi;
|
||||
mod traits;
|
||||
|
||||
pub use error::{Error, Result};
|
||||
|
||||
lazy_static! {
|
||||
static ref RT: tokio::runtime::Runtime = tokio::runtime::Builder::new_multi_thread()
|
||||
.enable_all()
|
||||
.build()
|
||||
.expect("Failed to create tokio runtime");
|
||||
}
|
||||
122
java/core/lancedb-jni/src/traits.rs
Normal file
122
java/core/lancedb-jni/src/traits.rs
Normal file
@@ -0,0 +1,122 @@
|
||||
// Copyright 2024 Lance Developers.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use jni::objects::{JMap, JObject, JString, JValue};
|
||||
use jni::JNIEnv;
|
||||
|
||||
use crate::Result;
|
||||
|
||||
pub trait FromJObject<T> {
|
||||
fn extract(&self) -> Result<T>;
|
||||
}
|
||||
|
||||
/// Convert a Rust type into a Java Object.
|
||||
pub trait IntoJava {
|
||||
fn into_java<'a>(self, env: &mut JNIEnv<'a>) -> JObject<'a>;
|
||||
}
|
||||
|
||||
impl FromJObject<i32> for JObject<'_> {
|
||||
fn extract(&self) -> Result<i32> {
|
||||
Ok(JValue::from(self).i()?)
|
||||
}
|
||||
}
|
||||
|
||||
impl FromJObject<i64> for JObject<'_> {
|
||||
fn extract(&self) -> Result<i64> {
|
||||
Ok(JValue::from(self).j()?)
|
||||
}
|
||||
}
|
||||
|
||||
impl FromJObject<f32> for JObject<'_> {
|
||||
fn extract(&self) -> Result<f32> {
|
||||
Ok(JValue::from(self).f()?)
|
||||
}
|
||||
}
|
||||
|
||||
impl FromJObject<f64> for JObject<'_> {
|
||||
fn extract(&self) -> Result<f64> {
|
||||
Ok(JValue::from(self).d()?)
|
||||
}
|
||||
}
|
||||
|
||||
pub trait FromJString {
|
||||
fn extract(&self, env: &mut JNIEnv) -> Result<String>;
|
||||
}
|
||||
|
||||
impl FromJString for JString<'_> {
|
||||
fn extract(&self, env: &mut JNIEnv) -> Result<String> {
|
||||
Ok(env.get_string(self)?.into())
|
||||
}
|
||||
}
|
||||
|
||||
pub trait JMapExt {
|
||||
#[allow(dead_code)]
|
||||
fn get_string(&self, env: &mut JNIEnv, key: &str) -> Result<Option<String>>;
|
||||
|
||||
#[allow(dead_code)]
|
||||
fn get_i32(&self, env: &mut JNIEnv, key: &str) -> Result<Option<i32>>;
|
||||
|
||||
#[allow(dead_code)]
|
||||
fn get_i64(&self, env: &mut JNIEnv, key: &str) -> Result<Option<i64>>;
|
||||
|
||||
#[allow(dead_code)]
|
||||
fn get_f32(&self, env: &mut JNIEnv, key: &str) -> Result<Option<f32>>;
|
||||
|
||||
#[allow(dead_code)]
|
||||
fn get_f64(&self, env: &mut JNIEnv, key: &str) -> Result<Option<f64>>;
|
||||
}
|
||||
|
||||
fn get_map_value<T>(env: &mut JNIEnv, map: &JMap, key: &str) -> Result<Option<T>>
|
||||
where
|
||||
for<'a> JObject<'a>: FromJObject<T>,
|
||||
{
|
||||
let key_obj: JObject = env.new_string(key)?.into();
|
||||
if let Some(value) = map.get(env, &key_obj)? {
|
||||
if value.is_null() {
|
||||
Ok(None)
|
||||
} else {
|
||||
Ok(Some(value.extract()?))
|
||||
}
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
impl JMapExt for JMap<'_, '_, '_> {
|
||||
fn get_string(&self, env: &mut JNIEnv, key: &str) -> Result<Option<String>> {
|
||||
let key_obj: JObject = env.new_string(key)?.into();
|
||||
if let Some(value) = self.get(env, &key_obj)? {
|
||||
let value_str: JString = value.into();
|
||||
Ok(Some(value_str.extract(env)?))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
fn get_i32(&self, env: &mut JNIEnv, key: &str) -> Result<Option<i32>> {
|
||||
get_map_value(env, self, key)
|
||||
}
|
||||
|
||||
fn get_i64(&self, env: &mut JNIEnv, key: &str) -> Result<Option<i64>> {
|
||||
get_map_value(env, self, key)
|
||||
}
|
||||
|
||||
fn get_f32(&self, env: &mut JNIEnv, key: &str) -> Result<Option<f32>> {
|
||||
get_map_value(env, self, key)
|
||||
}
|
||||
|
||||
fn get_f64(&self, env: &mut JNIEnv, key: &str) -> Result<Option<f64>> {
|
||||
get_map_value(env, self, key)
|
||||
}
|
||||
}
|
||||
92
java/core/pom.xml
Normal file
92
java/core/pom.xml
Normal file
@@ -0,0 +1,92 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<parent>
|
||||
<groupId>com.lancedb</groupId>
|
||||
<artifactId>lancedb-parent</artifactId>
|
||||
<version>0.0.3-SNAPSHOT</version>
|
||||
<relativePath>../pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
<artifactId>lancedb-core</artifactId>
|
||||
<name>LanceDB Core</name>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.apache.arrow</groupId>
|
||||
<artifactId>arrow-vector</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.arrow</groupId>
|
||||
<artifactId>arrow-memory-netty</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.arrow</groupId>
|
||||
<artifactId>arrow-c-data</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.arrow</groupId>
|
||||
<artifactId>arrow-dataset</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.json</groupId>
|
||||
<artifactId>json</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.questdb</groupId>
|
||||
<artifactId>jar-jni</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.junit.jupiter</groupId>
|
||||
<artifactId>junit-jupiter</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<profiles>
|
||||
<profile>
|
||||
<id>build-jni</id>
|
||||
<activation>
|
||||
<activeByDefault>true</activeByDefault>
|
||||
</activation>
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.questdb</groupId>
|
||||
<artifactId>rust-maven-plugin</artifactId>
|
||||
<version>1.1.1</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>lancedb-jni</id>
|
||||
<goals>
|
||||
<goal>build</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<path>lancedb-jni</path>
|
||||
<!--<release>true</release>-->
|
||||
<!-- Copy native libraries to target/classes for runtime access -->
|
||||
<copyTo>${project.build.directory}/classes/nativelib</copyTo>
|
||||
<copyWithPlatformDir>true</copyWithPlatformDir>
|
||||
</configuration>
|
||||
</execution>
|
||||
<execution>
|
||||
<id>lancedb-jni-test</id>
|
||||
<goals>
|
||||
<goal>test</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<path>lancedb-jni</path>
|
||||
<release>false</release>
|
||||
<verbosity>-v</verbosity>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</profile>
|
||||
</profiles>
|
||||
</project>
|
||||
120
java/core/src/main/java/com/lancedb/lancedb/Connection.java
Normal file
120
java/core/src/main/java/com/lancedb/lancedb/Connection.java
Normal file
@@ -0,0 +1,120 @@
|
||||
/*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package com.lancedb.lancedb;
|
||||
|
||||
import io.questdb.jar.jni.JarJniLoader;
|
||||
import java.io.Closeable;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
|
||||
/**
|
||||
* Represents LanceDB database.
|
||||
*/
|
||||
public class Connection implements Closeable {
|
||||
static {
|
||||
JarJniLoader.loadLib(Connection.class, "/nativelib", "lancedb_jni");
|
||||
}
|
||||
|
||||
private long nativeConnectionHandle;
|
||||
|
||||
/**
|
||||
* Connect to a LanceDB instance.
|
||||
*/
|
||||
public static native Connection connect(String uri);
|
||||
|
||||
/**
|
||||
* Get the names of all tables in the database. The names are sorted in
|
||||
* ascending order.
|
||||
*
|
||||
* @return the table names
|
||||
*/
|
||||
public List<String> tableNames() {
|
||||
return tableNames(Optional.empty(), Optional.empty());
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the names of filtered tables in the database. The names are sorted in
|
||||
* ascending order.
|
||||
*
|
||||
* @param limit The number of results to return.
|
||||
* @return the table names
|
||||
*/
|
||||
public List<String> tableNames(int limit) {
|
||||
return tableNames(Optional.empty(), Optional.of(limit));
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the names of filtered tables in the database. The names are sorted in
|
||||
* ascending order.
|
||||
*
|
||||
* @param startAfter If present, only return names that come lexicographically after the supplied
|
||||
* value. This can be combined with limit to implement pagination
|
||||
* by setting this to the last table name from the previous page.
|
||||
* @return the table names
|
||||
*/
|
||||
public List<String> tableNames(String startAfter) {
|
||||
return tableNames(Optional.of(startAfter), Optional.empty());
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the names of filtered tables in the database. The names are sorted in
|
||||
* ascending order.
|
||||
*
|
||||
* @param startAfter If present, only return names that come lexicographically after the supplied
|
||||
* value. This can be combined with limit to implement pagination
|
||||
* by setting this to the last table name from the previous page.
|
||||
* @param limit The number of results to return.
|
||||
* @return the table names
|
||||
*/
|
||||
public List<String> tableNames(String startAfter, int limit) {
|
||||
return tableNames(Optional.of(startAfter), Optional.of(limit));
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the names of filtered tables in the database. The names are sorted in
|
||||
* ascending order.
|
||||
*
|
||||
* @param startAfter If present, only return names that come lexicographically after the supplied
|
||||
* value. This can be combined with limit to implement pagination
|
||||
* by setting this to the last table name from the previous page.
|
||||
* @param limit The number of results to return.
|
||||
* @return the table names
|
||||
*/
|
||||
public native List<String> tableNames(
|
||||
Optional<String> startAfter, Optional<Integer> limit);
|
||||
|
||||
/**
|
||||
* Closes this connection and releases any system resources associated with it. If
|
||||
* the connection is
|
||||
* already closed, then invoking this method has no effect.
|
||||
*/
|
||||
@Override
|
||||
public void close() {
|
||||
if (nativeConnectionHandle != 0) {
|
||||
releaseNativeConnection(nativeConnectionHandle);
|
||||
nativeConnectionHandle = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Native method to release the Lance connection resources associated with the
|
||||
* given handle.
|
||||
*
|
||||
* @param handle The native handle to the connection resource.
|
||||
*/
|
||||
private native void releaseNativeConnection(long handle);
|
||||
|
||||
private Connection() {}
|
||||
}
|
||||
135
java/core/src/test/java/com/lancedb/lancedb/ConnectionTest.java
Normal file
135
java/core/src/test/java/com/lancedb/lancedb/ConnectionTest.java
Normal file
@@ -0,0 +1,135 @@
|
||||
/*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package com.lancedb.lancedb;
|
||||
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
||||
|
||||
import java.nio.file.Path;
|
||||
import java.util.List;
|
||||
import java.net.URL;
|
||||
import org.junit.jupiter.api.BeforeAll;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.junit.jupiter.api.io.TempDir;
|
||||
|
||||
public class ConnectionTest {
|
||||
private static final String[] TABLE_NAMES = {
|
||||
"dataset_version",
|
||||
"new_empty_dataset",
|
||||
"test",
|
||||
"write_stream"
|
||||
};
|
||||
|
||||
@TempDir
|
||||
static Path tempDir; // Temporary directory for the tests
|
||||
private static URL lanceDbURL;
|
||||
|
||||
@BeforeAll
|
||||
static void setUp() {
|
||||
ClassLoader classLoader = ConnectionTest.class.getClassLoader();
|
||||
lanceDbURL = classLoader.getResource("example_db");
|
||||
}
|
||||
|
||||
@Test
|
||||
void emptyDB() {
|
||||
String databaseUri = tempDir.resolve("emptyDB").toString();
|
||||
try (Connection conn = Connection.connect(databaseUri)) {
|
||||
List<String> tableNames = conn.tableNames();
|
||||
assertTrue(tableNames.isEmpty());
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
void tableNames() {
|
||||
try (Connection conn = Connection.connect(lanceDbURL.toString())) {
|
||||
List<String> tableNames = conn.tableNames();
|
||||
assertEquals(4, tableNames.size());
|
||||
for (int i = 0; i < TABLE_NAMES.length; i++) {
|
||||
assertEquals(TABLE_NAMES[i], tableNames.get(i));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
void tableNamesStartAfter() {
|
||||
try (Connection conn = Connection.connect(lanceDbURL.toString())) {
|
||||
assertTableNamesStartAfter(conn, TABLE_NAMES[0], 3, TABLE_NAMES[1], TABLE_NAMES[2], TABLE_NAMES[3]);
|
||||
assertTableNamesStartAfter(conn, TABLE_NAMES[1], 2, TABLE_NAMES[2], TABLE_NAMES[3]);
|
||||
assertTableNamesStartAfter(conn, TABLE_NAMES[2], 1, TABLE_NAMES[3]);
|
||||
assertTableNamesStartAfter(conn, TABLE_NAMES[3], 0);
|
||||
assertTableNamesStartAfter(conn, "a_dataset", 4, TABLE_NAMES[0], TABLE_NAMES[1], TABLE_NAMES[2], TABLE_NAMES[3]);
|
||||
assertTableNamesStartAfter(conn, "o_dataset", 2, TABLE_NAMES[2], TABLE_NAMES[3]);
|
||||
assertTableNamesStartAfter(conn, "v_dataset", 1, TABLE_NAMES[3]);
|
||||
assertTableNamesStartAfter(conn, "z_dataset", 0);
|
||||
}
|
||||
}
|
||||
|
||||
private void assertTableNamesStartAfter(Connection conn, String startAfter, int expectedSize, String... expectedNames) {
|
||||
List<String> tableNames = conn.tableNames(startAfter);
|
||||
assertEquals(expectedSize, tableNames.size());
|
||||
for (int i = 0; i < expectedNames.length; i++) {
|
||||
assertEquals(expectedNames[i], tableNames.get(i));
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
void tableNamesLimit() {
|
||||
try (Connection conn = Connection.connect(lanceDbURL.toString())) {
|
||||
for (int i = 0; i <= TABLE_NAMES.length; i++) {
|
||||
List<String> tableNames = conn.tableNames(i);
|
||||
assertEquals(i, tableNames.size());
|
||||
for (int j = 0; j < i; j++) {
|
||||
assertEquals(TABLE_NAMES[j], tableNames.get(j));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
void tableNamesStartAfterLimit() {
|
||||
try (Connection conn = Connection.connect(lanceDbURL.toString())) {
|
||||
List<String> tableNames = conn.tableNames(TABLE_NAMES[0], 2);
|
||||
assertEquals(2, tableNames.size());
|
||||
assertEquals(TABLE_NAMES[1], tableNames.get(0));
|
||||
assertEquals(TABLE_NAMES[2], tableNames.get(1));
|
||||
tableNames = conn.tableNames(TABLE_NAMES[1], 1);
|
||||
assertEquals(1, tableNames.size());
|
||||
assertEquals(TABLE_NAMES[2], tableNames.get(0));
|
||||
tableNames = conn.tableNames(TABLE_NAMES[2], 2);
|
||||
assertEquals(1, tableNames.size());
|
||||
assertEquals(TABLE_NAMES[3], tableNames.get(0));
|
||||
tableNames = conn.tableNames(TABLE_NAMES[3], 2);
|
||||
assertEquals(0, tableNames.size());
|
||||
tableNames = conn.tableNames(TABLE_NAMES[0], 0);
|
||||
assertEquals(0, tableNames.size());
|
||||
|
||||
// Limit larger than the number of remaining tables
|
||||
tableNames = conn.tableNames(TABLE_NAMES[0], 10);
|
||||
assertEquals(3, tableNames.size());
|
||||
assertEquals(TABLE_NAMES[1], tableNames.get(0));
|
||||
assertEquals(TABLE_NAMES[2], tableNames.get(1));
|
||||
assertEquals(TABLE_NAMES[3], tableNames.get(2));
|
||||
|
||||
// Start after a value not in the list
|
||||
tableNames = conn.tableNames("non_existent_table", 2);
|
||||
assertEquals(2, tableNames.size());
|
||||
assertEquals(TABLE_NAMES[2], tableNames.get(0));
|
||||
assertEquals(TABLE_NAMES[3], tableNames.get(1));
|
||||
|
||||
// Start after the last table with a limit
|
||||
tableNames = conn.tableNames(TABLE_NAMES[3], 1);
|
||||
assertEquals(0, tableNames.size());
|
||||
}
|
||||
}
|
||||
}
|
||||
Binary file not shown.
@@ -0,0 +1 @@
|
||||
$d51afd07-e3cd-4c76-9b9b-787e13fd55b0<62>=id <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>*int3208name <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>*string08
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -0,0 +1 @@
|
||||
$15648e72-076f-4ef1-8b90-10d305b95b3b<33>=id <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>*int3208name <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>*string08
|
||||
Binary file not shown.
Binary file not shown.
@@ -0,0 +1 @@
|
||||
$a3689caf-4f6b-4afc-a3c7-97af75661843<34>oitem <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>*string8price <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>*double80vector <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>*fixed_size_list:float:28
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
219
java/pom.xml
Normal file
219
java/pom.xml
Normal file
@@ -0,0 +1,219 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<groupId>com.lancedb</groupId>
|
||||
<artifactId>lancedb-parent</artifactId>
|
||||
<version>0.0.3-SNAPSHOT</version>
|
||||
<packaging>pom</packaging>
|
||||
|
||||
<name>Lance Parent</name>
|
||||
<description>LanceDB Java API</description>
|
||||
<url>http://lancedb.com/</url>
|
||||
|
||||
<developers>
|
||||
<developer>
|
||||
<name>Lance DB Dev Group</name>
|
||||
<email>dev@lancedb.com</email>
|
||||
</developer>
|
||||
</developers>
|
||||
<licenses>
|
||||
<license>
|
||||
<name>The Apache Software License, Version 2.0</name>
|
||||
<url>http://www.apache.org/licenses/LICENSE-2.0.txt</url>
|
||||
</license>
|
||||
</licenses>
|
||||
|
||||
<scm>
|
||||
<developerConnection>scm:git:git@github.com:lancedb/lancedb.git</developerConnection>
|
||||
<tag>HEAD</tag>
|
||||
<url>scm:git:git@github.com:lancedb/lancedb.git</url>
|
||||
</scm>
|
||||
|
||||
<properties>
|
||||
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
|
||||
<maven.compiler.source>11</maven.compiler.source>
|
||||
<maven.compiler.target>11</maven.compiler.target>
|
||||
<arrow.version>15.0.0</arrow.version>
|
||||
</properties>
|
||||
|
||||
<modules>
|
||||
<module>core</module>
|
||||
</modules>
|
||||
|
||||
<dependencyManagement>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.apache.arrow</groupId>
|
||||
<artifactId>arrow-vector</artifactId>
|
||||
<version>${arrow.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.arrow</groupId>
|
||||
<artifactId>arrow-memory-netty</artifactId>
|
||||
<version>${arrow.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.arrow</groupId>
|
||||
<artifactId>arrow-c-data</artifactId>
|
||||
<version>${arrow.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.arrow</groupId>
|
||||
<artifactId>arrow-dataset</artifactId>
|
||||
<version>${arrow.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.questdb</groupId>
|
||||
<artifactId>jar-jni</artifactId>
|
||||
<version>1.1.1</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.junit.jupiter</groupId>
|
||||
<artifactId>junit-jupiter</artifactId>
|
||||
<version>5.10.1</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.json</groupId>
|
||||
<artifactId>json</artifactId>
|
||||
<version>20210307</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</dependencyManagement>
|
||||
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-source-plugin</artifactId>
|
||||
<version>2.2.1</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>attach-sources</id>
|
||||
<goals>
|
||||
<goal>jar-no-fork</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-javadoc-plugin</artifactId>
|
||||
<version>2.9.1</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>attach-javadocs</id>
|
||||
<goals>
|
||||
<goal>jar</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-checkstyle-plugin</artifactId>
|
||||
<version>3.3.1</version>
|
||||
<configuration>
|
||||
<configLocation>google_checks.xml</configLocation>
|
||||
<consoleOutput>true</consoleOutput>
|
||||
<failsOnError>true</failsOnError>
|
||||
<violationSeverity>warning</violationSeverity>
|
||||
<linkXRef>false</linkXRef>
|
||||
</configuration>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>validate</id>
|
||||
<phase>validate</phase>
|
||||
<goals>
|
||||
<goal>check</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
<pluginManagement>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<artifactId>maven-clean-plugin</artifactId>
|
||||
<version>3.1.0</version>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<artifactId>maven-resources-plugin</artifactId>
|
||||
<version>3.0.2</version>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<artifactId>maven-compiler-plugin</artifactId>
|
||||
<version>3.8.1</version>
|
||||
<configuration>
|
||||
<compilerArgs>
|
||||
<arg>-h</arg>
|
||||
<arg>target/headers</arg>
|
||||
</compilerArgs>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<artifactId>maven-surefire-plugin</artifactId>
|
||||
<version>3.2.5</version>
|
||||
<configuration>
|
||||
<argLine>--add-opens=java.base/java.nio=ALL-UNNAMED</argLine>
|
||||
<forkNode implementation="org.apache.maven.plugin.surefire.extensions.SurefireForkNodeFactory" />
|
||||
<useSystemClassLoader>false</useSystemClassLoader>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<artifactId>maven-jar-plugin</artifactId>
|
||||
<version>3.0.2</version>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<artifactId>maven-install-plugin</artifactId>
|
||||
<version>2.5.2</version>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</pluginManagement>
|
||||
</build>
|
||||
|
||||
<profiles>
|
||||
<profile>
|
||||
<id>deploy-to-ossrh</id>
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.sonatype.central</groupId>
|
||||
<artifactId>central-publishing-maven-plugin</artifactId>
|
||||
<version>0.4.0</version>
|
||||
<extensions>true</extensions>
|
||||
<configuration>
|
||||
<publishingServerId>ossrh</publishingServerId>
|
||||
<tokenAuth>true</tokenAuth>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.sonatype.plugins</groupId>
|
||||
<artifactId>nexus-staging-maven-plugin</artifactId>
|
||||
<version>1.6.13</version>
|
||||
<extensions>true</extensions>
|
||||
<configuration>
|
||||
<serverId>ossrh</serverId>
|
||||
<nexusUrl>https://s01.oss.sonatype.org/</nexusUrl>
|
||||
<autoReleaseAfterClose>true</autoReleaseAfterClose>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-gpg-plugin</artifactId>
|
||||
<version>1.5</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>sign-artifacts</id>
|
||||
<phase>verify</phase>
|
||||
<goals>
|
||||
<goal>sign</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</profile>
|
||||
</profiles>
|
||||
</project>
|
||||
4
node/package-lock.json
generated
4
node/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "vectordb",
|
||||
"version": "0.4.20",
|
||||
"version": "0.5.0",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "vectordb",
|
||||
"version": "0.4.20",
|
||||
"version": "0.5.0",
|
||||
"cpu": [
|
||||
"x64",
|
||||
"arm64"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "vectordb",
|
||||
"version": "0.4.20",
|
||||
"version": "0.5.0",
|
||||
"description": " Serverless, low-latency vector database for AI applications",
|
||||
"main": "dist/index.js",
|
||||
"types": "dist/index.d.ts",
|
||||
|
||||
@@ -624,8 +624,6 @@ function validateSchemaEmbeddings(
|
||||
}
|
||||
|
||||
if (missingEmbeddingFields.length > 0 && embeddings === undefined) {
|
||||
console.log({ missingEmbeddingFields, embeddings });
|
||||
|
||||
throw new Error(
|
||||
`Table has embeddings: "${missingEmbeddingFields
|
||||
.map((f) => f.name)
|
||||
@@ -633,5 +631,5 @@ function validateSchemaEmbeddings(
|
||||
);
|
||||
}
|
||||
|
||||
return new Schema(fields);
|
||||
return new Schema(fields, schema.metadata);
|
||||
}
|
||||
|
||||
@@ -419,3 +419,31 @@ describe("when dealing with versioning", () => {
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe("when optimizing a dataset", () => {
|
||||
let tmpDir: tmp.DirResult;
|
||||
let table: Table;
|
||||
beforeEach(async () => {
|
||||
tmpDir = tmp.dirSync({ unsafeCleanup: true });
|
||||
const con = await connect(tmpDir.name);
|
||||
table = await con.createTable("vectors", [{ id: 1 }]);
|
||||
await table.add([{ id: 2 }]);
|
||||
});
|
||||
afterEach(() => {
|
||||
tmpDir.removeCallback();
|
||||
});
|
||||
|
||||
it("compacts files", async () => {
|
||||
const stats = await table.optimize();
|
||||
expect(stats.compaction.filesAdded).toBe(1);
|
||||
expect(stats.compaction.filesRemoved).toBe(2);
|
||||
expect(stats.compaction.fragmentsAdded).toBe(1);
|
||||
expect(stats.compaction.fragmentsRemoved).toBe(2);
|
||||
});
|
||||
|
||||
it("cleanups old versions", async () => {
|
||||
const stats = await table.optimize({ cleanupOlderThan: new Date() });
|
||||
expect(stats.prune.bytesRemoved).toBeGreaterThan(0);
|
||||
expect(stats.prune.oldVersionsRemoved).toBe(3);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -677,8 +677,6 @@ function validateSchemaEmbeddings(
|
||||
}
|
||||
|
||||
if (missingEmbeddingFields.length > 0 && embeddings === undefined) {
|
||||
console.log({ missingEmbeddingFields, embeddings });
|
||||
|
||||
throw new Error(
|
||||
`Table has embeddings: "${missingEmbeddingFields
|
||||
.map((f) => f.name)
|
||||
@@ -686,5 +684,5 @@ function validateSchemaEmbeddings(
|
||||
);
|
||||
}
|
||||
|
||||
return new Schema(fields);
|
||||
return new Schema(fields, schema.metadata);
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@ import {
|
||||
AddColumnsSql,
|
||||
ColumnAlteration,
|
||||
IndexConfig,
|
||||
OptimizeStats,
|
||||
Table as _NativeTable,
|
||||
} from "./native";
|
||||
import { Query, VectorQuery } from "./query";
|
||||
@@ -50,6 +51,23 @@ export interface UpdateOptions {
|
||||
where: string;
|
||||
}
|
||||
|
||||
export interface OptimizeOptions {
|
||||
/**
|
||||
* If set then all versions older than the given date
|
||||
* be removed. The current version will never be removed.
|
||||
* The default is 7 days
|
||||
* @example
|
||||
* // Delete all versions older than 1 day
|
||||
* const olderThan = new Date();
|
||||
* olderThan.setDate(olderThan.getDate() - 1));
|
||||
* tbl.cleanupOlderVersions(olderThan);
|
||||
*
|
||||
* // Delete all versions except the current version
|
||||
* tbl.cleanupOlderVersions(new Date());
|
||||
*/
|
||||
cleanupOlderThan: Date;
|
||||
}
|
||||
|
||||
/**
|
||||
* A Table is a collection of Records in a LanceDB Database.
|
||||
*
|
||||
@@ -352,6 +370,48 @@ export class Table {
|
||||
await this.inner.restore();
|
||||
}
|
||||
|
||||
/**
|
||||
* Optimize the on-disk data and indices for better performance.
|
||||
*
|
||||
* Modeled after ``VACUUM`` in PostgreSQL.
|
||||
*
|
||||
* Optimization covers three operations:
|
||||
*
|
||||
* - Compaction: Merges small files into larger ones
|
||||
* - Prune: Removes old versions of the dataset
|
||||
* - Index: Optimizes the indices, adding new data to existing indices
|
||||
*
|
||||
*
|
||||
* Experimental API
|
||||
* ----------------
|
||||
*
|
||||
* The optimization process is undergoing active development and may change.
|
||||
* Our goal with these changes is to improve the performance of optimization and
|
||||
* reduce the complexity.
|
||||
*
|
||||
* That being said, it is essential today to run optimize if you want the best
|
||||
* performance. It should be stable and safe to use in production, but it our
|
||||
* hope that the API may be simplified (or not even need to be called) in the
|
||||
* future.
|
||||
*
|
||||
* The frequency an application shoudl call optimize is based on the frequency of
|
||||
* data modifications. If data is frequently added, deleted, or updated then
|
||||
* optimize should be run frequently. A good rule of thumb is to run optimize if
|
||||
* you have added or modified 100,000 or more records or run more than 20 data
|
||||
* modification operations.
|
||||
*/
|
||||
async optimize(options?: Partial<OptimizeOptions>): Promise<OptimizeStats> {
|
||||
let cleanupOlderThanMs;
|
||||
if (
|
||||
options?.cleanupOlderThan !== undefined &&
|
||||
options?.cleanupOlderThan !== null
|
||||
) {
|
||||
cleanupOlderThanMs =
|
||||
new Date().getTime() - options.cleanupOlderThan.getTime();
|
||||
}
|
||||
return await this.inner.optimize(cleanupOlderThanMs);
|
||||
}
|
||||
|
||||
/** List all indices that have been created with {@link Table.createIndex} */
|
||||
async listIndices(): Promise<IndexConfig[]> {
|
||||
return await this.inner.listIndices();
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-darwin-arm64",
|
||||
"version": "0.4.20",
|
||||
"version": "0.5.0",
|
||||
"os": ["darwin"],
|
||||
"cpu": ["arm64"],
|
||||
"main": "lancedb.darwin-arm64.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-darwin-x64",
|
||||
"version": "0.4.20",
|
||||
"version": "0.5.0",
|
||||
"os": ["darwin"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.darwin-x64.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-arm64-gnu",
|
||||
"version": "0.4.20",
|
||||
"version": "0.5.0",
|
||||
"os": ["linux"],
|
||||
"cpu": ["arm64"],
|
||||
"main": "lancedb.linux-arm64-gnu.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-x64-gnu",
|
||||
"version": "0.4.20",
|
||||
"version": "0.5.0",
|
||||
"os": ["linux"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.linux-x64-gnu.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-win32-x64-msvc",
|
||||
"version": "0.4.14",
|
||||
"version": "0.5.0",
|
||||
"os": ["win32"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.win32-x64-msvc.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb",
|
||||
"version": "0.4.20",
|
||||
"version": "0.5.0",
|
||||
"main": "./dist/index.js",
|
||||
"types": "./dist/index.d.ts",
|
||||
"napi": {
|
||||
|
||||
@@ -15,8 +15,8 @@
|
||||
use arrow_ipc::writer::FileWriter;
|
||||
use lancedb::ipc::ipc_file_to_batches;
|
||||
use lancedb::table::{
|
||||
AddDataMode, ColumnAlteration as LanceColumnAlteration, NewColumnTransform,
|
||||
Table as LanceDbTable,
|
||||
AddDataMode, ColumnAlteration as LanceColumnAlteration, Duration, NewColumnTransform,
|
||||
OptimizeAction, OptimizeOptions, Table as LanceDbTable,
|
||||
};
|
||||
use napi::bindgen_prelude::*;
|
||||
use napi_derive::napi;
|
||||
@@ -263,6 +263,60 @@ impl Table {
|
||||
self.inner_ref()?.restore().await.default_error()
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async fn optimize(&self, older_than_ms: Option<i64>) -> napi::Result<OptimizeStats> {
|
||||
let inner = self.inner_ref()?;
|
||||
|
||||
let older_than = if let Some(ms) = older_than_ms {
|
||||
if ms == i64::MIN {
|
||||
return Err(napi::Error::from_reason(format!(
|
||||
"older_than_ms can not be {}",
|
||||
i32::MIN,
|
||||
)));
|
||||
}
|
||||
Duration::try_milliseconds(ms)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let compaction_stats = inner
|
||||
.optimize(OptimizeAction::Compact {
|
||||
options: lancedb::table::CompactionOptions::default(),
|
||||
remap_options: None,
|
||||
})
|
||||
.await
|
||||
.default_error()?
|
||||
.compaction
|
||||
.unwrap();
|
||||
let prune_stats = inner
|
||||
.optimize(OptimizeAction::Prune {
|
||||
older_than,
|
||||
delete_unverified: None,
|
||||
})
|
||||
.await
|
||||
.default_error()?
|
||||
.prune
|
||||
.unwrap();
|
||||
inner
|
||||
.optimize(lancedb::table::OptimizeAction::Index(
|
||||
OptimizeOptions::default(),
|
||||
))
|
||||
.await
|
||||
.default_error()?;
|
||||
Ok(OptimizeStats {
|
||||
compaction: CompactionStats {
|
||||
files_added: compaction_stats.files_added as i64,
|
||||
files_removed: compaction_stats.files_removed as i64,
|
||||
fragments_added: compaction_stats.fragments_added as i64,
|
||||
fragments_removed: compaction_stats.fragments_removed as i64,
|
||||
},
|
||||
prune: RemovalStats {
|
||||
bytes_removed: prune_stats.bytes_removed as i64,
|
||||
old_versions_removed: prune_stats.old_versions as i64,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async fn list_indices(&self) -> napi::Result<Vec<IndexConfig>> {
|
||||
Ok(self
|
||||
@@ -298,6 +352,40 @@ impl From<lancedb::index::IndexConfig> for IndexConfig {
|
||||
}
|
||||
}
|
||||
|
||||
/// Statistics about a compaction operation.
|
||||
#[napi(object)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct CompactionStats {
|
||||
/// The number of fragments removed
|
||||
pub fragments_removed: i64,
|
||||
/// The number of new, compacted fragments added
|
||||
pub fragments_added: i64,
|
||||
/// The number of data files removed
|
||||
pub files_removed: i64,
|
||||
/// The number of new, compacted data files added
|
||||
pub files_added: i64,
|
||||
}
|
||||
|
||||
/// Statistics about a cleanup operation
|
||||
#[napi(object)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct RemovalStats {
|
||||
/// The number of bytes removed
|
||||
pub bytes_removed: i64,
|
||||
/// The number of old versions removed
|
||||
pub old_versions_removed: i64,
|
||||
}
|
||||
|
||||
/// Statistics about an optimize operation
|
||||
#[napi(object)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct OptimizeStats {
|
||||
/// Statistics about the compaction operation
|
||||
pub compaction: CompactionStats,
|
||||
/// Statistics about the removal operation
|
||||
pub prune: RemovalStats,
|
||||
}
|
||||
|
||||
/// A definition of a column alteration. The alteration changes the column at
|
||||
/// `path` to have the new name `name`, to be nullable if `nullable` is true,
|
||||
/// and to have the data type `data_type`. At least one of `rename` or `nullable`
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
[bumpversion]
|
||||
current_version = 0.6.13
|
||||
commit = True
|
||||
message = [python] Bump version: {current_version} → {new_version}
|
||||
tag = True
|
||||
tag_name = python-v{new_version}
|
||||
|
||||
[bumpversion:file:pyproject.toml]
|
||||
34
python/.bumpversion.toml
Normal file
34
python/.bumpversion.toml
Normal file
@@ -0,0 +1,34 @@
|
||||
[tool.bumpversion]
|
||||
current_version = "0.8.0"
|
||||
parse = """(?x)
|
||||
(?P<major>0|[1-9]\\d*)\\.
|
||||
(?P<minor>0|[1-9]\\d*)\\.
|
||||
(?P<patch>0|[1-9]\\d*)
|
||||
(?:-(?P<pre_l>[a-zA-Z-]+)\\.(?P<pre_n>0|[1-9]\\d*))?
|
||||
"""
|
||||
serialize = [
|
||||
"{major}.{minor}.{patch}-{pre_l}.{pre_n}",
|
||||
"{major}.{minor}.{patch}",
|
||||
]
|
||||
search = "{current_version}"
|
||||
replace = "{new_version}"
|
||||
regex = false
|
||||
ignore_missing_version = false
|
||||
ignore_missing_files = false
|
||||
tag = true
|
||||
sign_tags = false
|
||||
tag_name = "python-v{new_version}"
|
||||
tag_message = "Bump version: {current_version} → {new_version}"
|
||||
allow_dirty = true
|
||||
commit = true
|
||||
message = "Bump version: {current_version} → {new_version}"
|
||||
commit_args = ""
|
||||
|
||||
[tool.bumpversion.parts.pre_l]
|
||||
values = ["beta", "final"]
|
||||
optional_value = "final"
|
||||
|
||||
[[tool.bumpversion.files]]
|
||||
filename = "Cargo.toml"
|
||||
search = "\nversion = \"{current_version}\""
|
||||
replace = "\nversion = \"{new_version}\""
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "lancedb-python"
|
||||
version = "0.4.10"
|
||||
version = "0.8.0"
|
||||
edition.workspace = true
|
||||
description = "Python bindings for LanceDB"
|
||||
license.workspace = true
|
||||
|
||||
@@ -1,16 +1,16 @@
|
||||
[project]
|
||||
name = "lancedb"
|
||||
version = "0.6.13"
|
||||
# version in Cargo.toml
|
||||
dependencies = [
|
||||
"deprecation",
|
||||
"pylance==0.10.12",
|
||||
"pylance==0.11.0",
|
||||
"ratelimiter~=1.0",
|
||||
"requests>=2.31.0",
|
||||
"retry>=0.9.2",
|
||||
"tqdm>=4.27.0",
|
||||
"pydantic>=1.10",
|
||||
"attrs>=21.3.0",
|
||||
"semver",
|
||||
"packaging",
|
||||
"cachetools",
|
||||
"overrides>=0.7",
|
||||
]
|
||||
|
||||
@@ -86,3 +86,17 @@ class VectorQuery:
|
||||
def refine_factor(self, refine_factor: int): ...
|
||||
def nprobes(self, nprobes: int): ...
|
||||
def bypass_vector_index(self): ...
|
||||
|
||||
class CompactionStats:
|
||||
fragments_removed: int
|
||||
fragments_added: int
|
||||
files_removed: int
|
||||
files_added: int
|
||||
|
||||
class RemovalStats:
|
||||
bytes_removed: int
|
||||
old_versions_removed: int
|
||||
|
||||
class OptimizeStats:
|
||||
compaction: CompactionStats
|
||||
prune: RemovalStats
|
||||
|
||||
@@ -74,7 +74,7 @@ class BedRockText(TextEmbeddingFunction):
|
||||
profile_name: Union[str, None] = None
|
||||
role_session_name: str = "lancedb-embeddings"
|
||||
|
||||
if PYDANTIC_VERSION < (2, 0): # Pydantic 1.x compat
|
||||
if PYDANTIC_VERSION.major < 2: # Pydantic 1.x compat
|
||||
|
||||
class Config:
|
||||
keep_untouched = (cached_property,)
|
||||
|
||||
@@ -90,7 +90,7 @@ class GeminiText(TextEmbeddingFunction):
|
||||
query_task_type: str = "retrieval_query"
|
||||
source_task_type: str = "retrieval_document"
|
||||
|
||||
if PYDANTIC_VERSION < (2, 0): # Pydantic 1.x compat
|
||||
if PYDANTIC_VERSION.major < 2: # Pydantic 1.x compat
|
||||
|
||||
class Config:
|
||||
keep_untouched = (cached_property,)
|
||||
|
||||
@@ -40,7 +40,7 @@ class ImageBindEmbeddings(EmbeddingFunction):
|
||||
device: str = "cpu"
|
||||
normalize: bool = False
|
||||
|
||||
if PYDANTIC_VERSION < (2, 0): # Pydantic 1.x compat
|
||||
if PYDANTIC_VERSION.major < 2: # Pydantic 1.x compat
|
||||
|
||||
class Config:
|
||||
keep_untouched = (cached_property,)
|
||||
|
||||
@@ -54,7 +54,7 @@ class TransformersEmbeddingFunction(EmbeddingFunction):
|
||||
self._tokenizer = transformers.AutoTokenizer.from_pretrained(self.name)
|
||||
self._model = transformers.AutoModel.from_pretrained(self.name)
|
||||
|
||||
if PYDANTIC_VERSION < (2, 0): # Pydantic 1.x compat
|
||||
if PYDANTIC_VERSION.major < 2: # Pydantic 1.x compat
|
||||
|
||||
class Config:
|
||||
keep_untouched = (cached_property,)
|
||||
|
||||
@@ -35,13 +35,13 @@ from typing import (
|
||||
import numpy as np
|
||||
import pyarrow as pa
|
||||
import pydantic
|
||||
import semver
|
||||
from packaging.version import Version
|
||||
|
||||
PYDANTIC_VERSION = semver.parse_version_info(pydantic.__version__)
|
||||
PYDANTIC_VERSION = Version(pydantic.__version__)
|
||||
try:
|
||||
from pydantic_core import CoreSchema, core_schema
|
||||
except ImportError:
|
||||
if PYDANTIC_VERSION >= (2,):
|
||||
if PYDANTIC_VERSION.major >= 2:
|
||||
raise
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -144,7 +144,7 @@ def Vector(
|
||||
raise TypeError("A list of numbers or numpy.ndarray is needed")
|
||||
return cls(v)
|
||||
|
||||
if PYDANTIC_VERSION < (2, 0):
|
||||
if PYDANTIC_VERSION.major < 2:
|
||||
|
||||
@classmethod
|
||||
def __modify_schema__(cls, field_schema: Dict[str, Any]):
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import os
|
||||
import semver
|
||||
from packaging.version import Version
|
||||
from functools import cached_property
|
||||
from typing import Union
|
||||
|
||||
@@ -44,9 +44,8 @@ class CohereReranker(Reranker):
|
||||
def _client(self):
|
||||
cohere = attempt_import_or_raise("cohere")
|
||||
# ensure version is at least 0.5.0
|
||||
if (
|
||||
hasattr(cohere, "__version__")
|
||||
and semver.compare(cohere.__version__, "5.0.0") < 0
|
||||
if hasattr(cohere, "__version__") and Version(cohere.__version__) < Version(
|
||||
"0.5.0"
|
||||
):
|
||||
raise ValueError(
|
||||
f"cohere version must be at least 0.5.0, found {cohere.__version__}"
|
||||
|
||||
@@ -58,7 +58,7 @@ if TYPE_CHECKING:
|
||||
import PIL
|
||||
from lance.dataset import CleanupStats, ReaderLike
|
||||
|
||||
from ._lancedb import Table as LanceDBTable
|
||||
from ._lancedb import Table as LanceDBTable, OptimizeStats
|
||||
from .db import LanceDBConnection
|
||||
from .index import BTree, IndexConfig, IvfPq
|
||||
|
||||
@@ -2377,6 +2377,49 @@ class AsyncTable:
|
||||
"""
|
||||
await self._inner.restore()
|
||||
|
||||
async def optimize(
|
||||
self, *, cleanup_older_than: Optional[timedelta] = None
|
||||
) -> OptimizeStats:
|
||||
"""
|
||||
Optimize the on-disk data and indices for better performance.
|
||||
|
||||
Modeled after ``VACUUM`` in PostgreSQL.
|
||||
|
||||
Optimization covers three operations:
|
||||
|
||||
* Compaction: Merges small files into larger ones
|
||||
* Prune: Removes old versions of the dataset
|
||||
* Index: Optimizes the indices, adding new data to existing indices
|
||||
|
||||
Parameters
|
||||
----------
|
||||
cleanup_older_than: timedelta, optional default 7 days
|
||||
All files belonging to versions older than this will be removed. Set
|
||||
to 0 days to remove all versions except the latest. The latest version
|
||||
is never removed.
|
||||
|
||||
Experimental API
|
||||
----------------
|
||||
|
||||
The optimization process is undergoing active development and may change.
|
||||
Our goal with these changes is to improve the performance of optimization and
|
||||
reduce the complexity.
|
||||
|
||||
That being said, it is essential today to run optimize if you want the best
|
||||
performance. It should be stable and safe to use in production, but it our
|
||||
hope that the API may be simplified (or not even need to be called) in the
|
||||
future.
|
||||
|
||||
The frequency an application shoudl call optimize is based on the frequency of
|
||||
data modifications. If data is frequently added, deleted, or updated then
|
||||
optimize should be run frequently. A good rule of thumb is to run optimize if
|
||||
you have added or modified 100,000 or more records or run more than 20 data
|
||||
modification operations.
|
||||
"""
|
||||
if cleanup_older_than is not None:
|
||||
cleanup_older_than = round(cleanup_older_than.total_seconds() * 1000)
|
||||
return await self._inner.optimize(cleanup_older_than)
|
||||
|
||||
async def list_indices(self) -> IndexConfig:
|
||||
"""
|
||||
List all indices that have been created with Self::create_index
|
||||
|
||||
@@ -178,7 +178,7 @@ def test_fixed_size_list_field():
|
||||
li: List[int]
|
||||
|
||||
data = TestModel(vec=list(range(16)), li=[1, 2, 3])
|
||||
if PYDANTIC_VERSION >= (2,):
|
||||
if PYDANTIC_VERSION.major >= 2:
|
||||
assert json.loads(data.model_dump_json()) == {
|
||||
"vec": list(range(16)),
|
||||
"li": [1, 2, 3],
|
||||
@@ -197,7 +197,7 @@ def test_fixed_size_list_field():
|
||||
]
|
||||
)
|
||||
|
||||
if PYDANTIC_VERSION >= (2,):
|
||||
if PYDANTIC_VERSION.major >= 2:
|
||||
json_schema = TestModel.model_json_schema()
|
||||
else:
|
||||
json_schema = TestModel.schema()
|
||||
|
||||
@@ -1025,3 +1025,29 @@ async def test_time_travel(db_async: AsyncConnection):
|
||||
# Can't use restore if not checked out
|
||||
with pytest.raises(ValueError, match="checkout before running restore"):
|
||||
await table.restore()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_optimize(db_async: AsyncConnection):
|
||||
table = await db_async.create_table(
|
||||
"test",
|
||||
data=[{"x": [1]}],
|
||||
)
|
||||
await table.add(
|
||||
data=[
|
||||
{"x": [2]},
|
||||
],
|
||||
)
|
||||
stats = await table.optimize()
|
||||
assert stats.compaction.files_removed == 2
|
||||
assert stats.compaction.files_added == 1
|
||||
assert stats.compaction.fragments_added == 1
|
||||
assert stats.compaction.fragments_removed == 2
|
||||
assert stats.prune.bytes_removed == 0
|
||||
assert stats.prune.old_versions_removed == 0
|
||||
|
||||
stats = await table.optimize(cleanup_older_than=timedelta(seconds=0))
|
||||
assert stats.prune.bytes_removed > 0
|
||||
assert stats.prune.old_versions_removed == 3
|
||||
|
||||
assert await table.query().to_arrow() == pa.table({"x": [[1], [2]]})
|
||||
|
||||
@@ -2,7 +2,9 @@ use arrow::{
|
||||
ffi_stream::ArrowArrayStreamReader,
|
||||
pyarrow::{FromPyArrow, ToPyArrow},
|
||||
};
|
||||
use lancedb::table::{AddDataMode, Table as LanceDbTable};
|
||||
use lancedb::table::{
|
||||
AddDataMode, Duration, OptimizeAction, OptimizeOptions, Table as LanceDbTable,
|
||||
};
|
||||
use pyo3::{
|
||||
exceptions::{PyRuntimeError, PyValueError},
|
||||
pyclass, pymethods,
|
||||
@@ -17,6 +19,40 @@ use crate::{
|
||||
query::Query,
|
||||
};
|
||||
|
||||
/// Statistics about a compaction operation.
|
||||
#[pyclass(get_all)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct CompactionStats {
|
||||
/// The number of fragments removed
|
||||
pub fragments_removed: u64,
|
||||
/// The number of new, compacted fragments added
|
||||
pub fragments_added: u64,
|
||||
/// The number of data files removed
|
||||
pub files_removed: u64,
|
||||
/// The number of new, compacted data files added
|
||||
pub files_added: u64,
|
||||
}
|
||||
|
||||
/// Statistics about a cleanup operation
|
||||
#[pyclass(get_all)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct RemovalStats {
|
||||
/// The number of bytes removed
|
||||
pub bytes_removed: u64,
|
||||
/// The number of old versions removed
|
||||
pub old_versions_removed: u64,
|
||||
}
|
||||
|
||||
/// Statistics about an optimize operation
|
||||
#[pyclass(get_all)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct OptimizeStats {
|
||||
/// Statistics about the compaction operation
|
||||
pub compaction: CompactionStats,
|
||||
/// Statistics about the removal operation
|
||||
pub prune: RemovalStats,
|
||||
}
|
||||
|
||||
#[pyclass]
|
||||
pub struct Table {
|
||||
// We keep a copy of the name to use if the inner table is dropped
|
||||
@@ -191,4 +227,58 @@ impl Table {
|
||||
pub fn query(&self) -> Query {
|
||||
Query::new(self.inner_ref().unwrap().query())
|
||||
}
|
||||
|
||||
pub fn optimize(self_: PyRef<'_, Self>, cleanup_since_ms: Option<u64>) -> PyResult<&PyAny> {
|
||||
let inner = self_.inner_ref()?.clone();
|
||||
let older_than = if let Some(ms) = cleanup_since_ms {
|
||||
if ms > i64::MAX as u64 {
|
||||
return Err(PyValueError::new_err(format!(
|
||||
"cleanup_since_ms must be between {} and -{}",
|
||||
i32::MAX,
|
||||
i32::MAX
|
||||
)));
|
||||
}
|
||||
Duration::try_milliseconds(ms as i64)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
future_into_py(self_.py(), async move {
|
||||
let compaction_stats = inner
|
||||
.optimize(OptimizeAction::Compact {
|
||||
options: lancedb::table::CompactionOptions::default(),
|
||||
remap_options: None,
|
||||
})
|
||||
.await
|
||||
.infer_error()?
|
||||
.compaction
|
||||
.unwrap();
|
||||
let prune_stats = inner
|
||||
.optimize(OptimizeAction::Prune {
|
||||
older_than,
|
||||
delete_unverified: None,
|
||||
})
|
||||
.await
|
||||
.infer_error()?
|
||||
.prune
|
||||
.unwrap();
|
||||
inner
|
||||
.optimize(lancedb::table::OptimizeAction::Index(
|
||||
OptimizeOptions::default(),
|
||||
))
|
||||
.await
|
||||
.infer_error()?;
|
||||
Ok(OptimizeStats {
|
||||
compaction: CompactionStats {
|
||||
files_added: compaction_stats.files_added as u64,
|
||||
files_removed: compaction_stats.files_removed as u64,
|
||||
fragments_added: compaction_stats.fragments_added as u64,
|
||||
fragments_removed: compaction_stats.fragments_removed as u64,
|
||||
},
|
||||
prune: RemovalStats {
|
||||
bytes_removed: prune_stats.bytes_removed,
|
||||
old_versions_removed: prune_stats.old_versions,
|
||||
},
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,6 +8,51 @@ The Python package is versioned and released separately from the Rust and Node.j
|
||||
ones. For Rust and Node.js, the release process is shared between `lancedb` and
|
||||
`vectordb` for now.
|
||||
|
||||
## Preview releases
|
||||
|
||||
LanceDB has full releases about every 2 weeks, but in between we make frequent
|
||||
preview releases. These are released as `0.x.y.betaN` versions. They receive the
|
||||
same level of testing as normal releases and let you get access to the latest
|
||||
features. However, we do not guarantee that preview releases will be available
|
||||
more than 6 months after they are released. We may delete the preview releases
|
||||
from the packaging index after a while. Once your application is stable, we
|
||||
recommend switching to full releases, which will never be removed from package
|
||||
indexes.
|
||||
|
||||
## Making releases
|
||||
|
||||
The release process uses a handful of GitHub actions to automate the process.
|
||||
|
||||
```text
|
||||
┌─────────────────────┐
|
||||
│Create Release Commit│
|
||||
└─┬───────────────────┘
|
||||
│ ┌────────────┐ ┌──►Python GH Release
|
||||
├──►(tag) python-vX.Y.Z ───►│PyPI Publish├─┤
|
||||
│ └────────────┘ └──►Python Wheels
|
||||
│
|
||||
│ ┌───────────┐
|
||||
└──►(tag) vX.Y.Z ───┬──────►│NPM Publish├──┬──►Rust/Node GH Release
|
||||
│ └───────────┘ │
|
||||
│ └──►NPM Packages
|
||||
│ ┌─────────────┐
|
||||
└──────►│Cargo Publish├───►Cargo Release
|
||||
└─────────────┘
|
||||
```
|
||||
|
||||
To start a release, trigger a `Create Release Commit` action from
|
||||
[the workflows page](https://github.com/lancedb/lancedb/actions/workflows/make-release-commit.yml)
|
||||
(Click on "Run workflow").
|
||||
|
||||
* **For a preview release**, leave the default parameters.
|
||||
* **For a stable release**, set the `release_type` input to `stable`.
|
||||
|
||||
> [!IMPORTANT]
|
||||
> If there was a breaking change since the last stable release, and we haven't
|
||||
> done so yet, we should increment the minor version. The CI will detect if this
|
||||
> is needed and fail the `Create Release Commit` job. To fix, select the
|
||||
> "bump minor version" option.
|
||||
|
||||
## Breaking changes
|
||||
|
||||
We try to avoid breaking changes, but sometimes they are necessary. When there
|
||||
@@ -21,12 +66,10 @@ body of the PR. A CI job will add a `breaking-change` label to the PR, which is
|
||||
what will ultimately be used to CI to determine if the minor version should be
|
||||
incremented.
|
||||
|
||||
A CI job will validate that if a `breaking-change` label is added, the minor
|
||||
version is incremented in the `Cargo.toml` and `pyproject.toml` files. The only
|
||||
exception is if it has already been incremented since the last stable release.
|
||||
|
||||
**It is the responsibility of the PR author to increment the minor version when
|
||||
appropriate.**
|
||||
> [!IMPORTANT]
|
||||
> Reviewers should check that PRs with breaking changes receive the `breaking-change`
|
||||
> label. If a PR is missing the label, please add it, even if after it was merged.
|
||||
> This label is used in the release process.
|
||||
|
||||
Some things that are considered breaking changes:
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "lancedb-node"
|
||||
version = "0.4.20"
|
||||
version = "0.5.0"
|
||||
description = "Serverless, low-latency vector database for AI applications"
|
||||
license.workspace = true
|
||||
edition.workspace = true
|
||||
|
||||
@@ -19,10 +19,12 @@ use snafu::Snafu;
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
pub enum Error {
|
||||
#[allow(dead_code)]
|
||||
#[snafu(display("column '{name}' is missing"))]
|
||||
MissingColumn { name: String },
|
||||
#[snafu(display("{name}: {message}"))]
|
||||
OutOfRange { name: String, message: String },
|
||||
#[allow(dead_code)]
|
||||
#[snafu(display("{index_type} is not a valid index type"))]
|
||||
InvalidIndexType { index_type: String },
|
||||
|
||||
|
||||
@@ -19,6 +19,7 @@ use neon::prelude::*;
|
||||
pub trait JsObjectExt {
|
||||
fn get_opt_u32(&self, cx: &mut FunctionContext, key: &str) -> Result<Option<u32>>;
|
||||
fn get_usize(&self, cx: &mut FunctionContext, key: &str) -> Result<usize>;
|
||||
#[allow(dead_code)]
|
||||
fn get_opt_usize(&self, cx: &mut FunctionContext, key: &str) -> Result<Option<usize>>;
|
||||
}
|
||||
|
||||
|
||||
@@ -324,7 +324,7 @@ impl JsTable {
|
||||
rt.spawn(async move {
|
||||
let stats = table
|
||||
.optimize(OptimizeAction::Prune {
|
||||
older_than,
|
||||
older_than: Some(older_than),
|
||||
delete_unverified,
|
||||
})
|
||||
.await;
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "lancedb"
|
||||
version = "0.4.20"
|
||||
version = "0.5.0"
|
||||
edition.workspace = true
|
||||
description = "LanceDB: A serverless, low-latency vector database for AI applications"
|
||||
license.workspace = true
|
||||
@@ -38,10 +38,11 @@ url.workspace = true
|
||||
regex.workspace = true
|
||||
serde = { version = "^1" }
|
||||
serde_json = { version = "1" }
|
||||
serde_with = { version = "3.8.1" }
|
||||
# For remote feature
|
||||
reqwest = { version = "0.11.24", features = ["gzip", "json"], optional = true }
|
||||
polars-arrow = { version = ">=0.37", optional = true }
|
||||
polars = { version = ">=0.37", optional = true}
|
||||
polars-arrow = { version = ">=0.37,<0.40.0", optional = true }
|
||||
polars = { version = ">=0.37,<0.40.0", optional = true}
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = "3.5.0"
|
||||
@@ -49,9 +50,12 @@ rand = { version = "0.8.3", features = ["small_rng"] }
|
||||
uuid = { version = "1.7.0", features = ["v4"] }
|
||||
walkdir = "2"
|
||||
# For s3 integration tests (dev deps aren't allowed to be optional atm)
|
||||
aws-sdk-s3 = { version = "1.0" }
|
||||
aws-sdk-kms = { version = "1.0" }
|
||||
# We pin these because the content-length check breaks with localstack
|
||||
# https://github.com/smithy-lang/smithy-rs/releases/tag/release-2024-05-21
|
||||
aws-sdk-s3 = { version = "=1.23.0" }
|
||||
aws-sdk-kms = { version = "=1.21.0" }
|
||||
aws-config = { version = "1.0" }
|
||||
aws-smithy-runtime = { version = "=1.3.0" }
|
||||
|
||||
[features]
|
||||
default = []
|
||||
|
||||
@@ -195,7 +195,7 @@ impl<T: IntoArrow> CreateTableBuilder<true, T> {
|
||||
.embedding_registry()
|
||||
.get(&definition.embedding_name)
|
||||
.ok_or_else(|| Error::EmbeddingFunctionNotFound {
|
||||
name: definition.embedding_name.to_string(),
|
||||
name: definition.embedding_name.clone(),
|
||||
reason: "No embedding function found in the connection's embedding_registry"
|
||||
.to_string(),
|
||||
})?;
|
||||
|
||||
@@ -155,7 +155,7 @@ impl<R: RecordBatchReader> MaybeEmbedded<R> {
|
||||
}
|
||||
None => {
|
||||
return Err(Error::EmbeddingFunctionNotFound {
|
||||
name: embedding_def.embedding_name.to_string(),
|
||||
name: embedding_def.embedding_name.clone(),
|
||||
reason: format!(
|
||||
"Table was defined with an embedding column `{}` but no embedding function was found with that name within the registry.",
|
||||
embedding_def.embedding_name
|
||||
|
||||
@@ -14,9 +14,15 @@
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use serde::Deserialize;
|
||||
use serde_with::skip_serializing_none;
|
||||
|
||||
use crate::{table::TableInternal, Result};
|
||||
|
||||
use self::{scalar::BTreeIndexBuilder, vector::IvfPqIndexBuilder};
|
||||
use self::{
|
||||
scalar::BTreeIndexBuilder,
|
||||
vector::{IvfHnswPqIndexBuilder, IvfHnswSqIndexBuilder, IvfPqIndexBuilder},
|
||||
};
|
||||
|
||||
pub mod scalar;
|
||||
pub mod vector;
|
||||
@@ -25,6 +31,8 @@ pub enum Index {
|
||||
Auto,
|
||||
BTree(BTreeIndexBuilder),
|
||||
IvfPq(IvfPqIndexBuilder),
|
||||
IvfHnswPq(IvfHnswPqIndexBuilder),
|
||||
IvfHnswSq(IvfHnswSqIndexBuilder),
|
||||
}
|
||||
|
||||
/// Builder for the create_index operation
|
||||
@@ -65,6 +73,8 @@ impl IndexBuilder {
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum IndexType {
|
||||
IvfPq,
|
||||
IvfHnswPq,
|
||||
IvfHnswSq,
|
||||
BTree,
|
||||
}
|
||||
|
||||
@@ -78,3 +88,19 @@ pub struct IndexConfig {
|
||||
/// be more columns to represent composite indices.
|
||||
pub columns: Vec<String>,
|
||||
}
|
||||
|
||||
#[skip_serializing_none]
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct IndexMetadata {
|
||||
pub metric_type: Option<String>,
|
||||
pub index_type: Option<String>,
|
||||
}
|
||||
|
||||
#[skip_serializing_none]
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct IndexStatistics {
|
||||
pub num_indexed_rows: usize,
|
||||
pub num_unindexed_rows: usize,
|
||||
pub index_type: Option<String>,
|
||||
pub indices: Vec<IndexMetadata>,
|
||||
}
|
||||
|
||||
@@ -19,8 +19,6 @@
|
||||
//! values
|
||||
use std::cmp::max;
|
||||
|
||||
use serde::Deserialize;
|
||||
|
||||
use lance::table::format::{Index, Manifest};
|
||||
|
||||
use crate::DistanceType;
|
||||
@@ -46,18 +44,118 @@ impl VectorIndex {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct VectorIndexMetadata {
|
||||
pub metric_type: String,
|
||||
pub index_type: String,
|
||||
macro_rules! impl_distance_type_setter {
|
||||
() => {
|
||||
/// [DistanceType] to use to build the index.
|
||||
///
|
||||
/// Default value is [DistanceType::L2].
|
||||
///
|
||||
/// This is used when training the index to calculate the IVF partitions (vectors are
|
||||
/// grouped in partitions with similar vectors according to this distance type) and to
|
||||
/// calculate a subvector's code during quantization.
|
||||
///
|
||||
/// The metric type used to train an index MUST match the metric type used to search the
|
||||
/// index. Failure to do so will yield inaccurate results.
|
||||
pub fn distance_type(mut self, distance_type: DistanceType) -> Self {
|
||||
self.distance_type = distance_type;
|
||||
self
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct VectorIndexStatistics {
|
||||
pub num_indexed_rows: usize,
|
||||
pub num_unindexed_rows: usize,
|
||||
pub index_type: String,
|
||||
pub indices: Vec<VectorIndexMetadata>,
|
||||
macro_rules! impl_ivf_params_setter {
|
||||
() => {
|
||||
/// The number of IVF partitions to create.
|
||||
///
|
||||
/// This value should generally scale with the number of rows in the dataset. By default
|
||||
/// the number of partitions is the square root of the number of rows.
|
||||
///
|
||||
/// If this value is too large then the first part of the search (picking the right partition)
|
||||
/// will be slow. If this value is too small then the second part of the search (searching
|
||||
/// within a partition) will be slow.
|
||||
pub fn num_partitions(mut self, num_partitions: u32) -> Self {
|
||||
self.num_partitions = Some(num_partitions);
|
||||
self
|
||||
}
|
||||
|
||||
/// The rate used to calculate the number of training vectors for kmeans.
|
||||
///
|
||||
/// When an IVF index is trained, we need to calculate partitions. These are groups
|
||||
/// of vectors that are similar to each other. To do this we use an algorithm called kmeans.
|
||||
///
|
||||
/// Running kmeans on a large dataset can be slow. To speed this up we run kmeans on a
|
||||
/// random sample of the data. This parameter controls the size of the sample. The total
|
||||
/// number of vectors used to train the index is `sample_rate * num_partitions`.
|
||||
///
|
||||
/// Increasing this value might improve the quality of the index but in most cases the
|
||||
/// default should be sufficient.
|
||||
///
|
||||
/// The default value is 256.
|
||||
pub fn sample_rate(mut self, sample_rate: u32) -> Self {
|
||||
self.sample_rate = sample_rate;
|
||||
self
|
||||
}
|
||||
|
||||
/// Max iterations to train kmeans.
|
||||
///
|
||||
/// When training an IVF index we use kmeans to calculate the partitions. This parameter
|
||||
/// controls how many iterations of kmeans to run.
|
||||
///
|
||||
/// Increasing this might improve the quality of the index but in most cases the parameter
|
||||
/// is unused because kmeans will converge with fewer iterations. The parameter is only
|
||||
/// used in cases where kmeans does not appear to converge. In those cases it is unlikely
|
||||
/// that setting this larger will lead to the index converging anyways.
|
||||
///
|
||||
/// The default value is 50.
|
||||
pub fn max_iterations(mut self, max_iterations: u32) -> Self {
|
||||
self.max_iterations = max_iterations;
|
||||
self
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! impl_pq_params_setter {
|
||||
() => {
|
||||
/// Number of sub-vectors of PQ.
|
||||
///
|
||||
/// This value controls how much the vector is compressed during the quantization step.
|
||||
/// The more sub vectors there are the less the vector is compressed. The default is
|
||||
/// the dimension of the vector divided by 16. If the dimension is not evenly divisible
|
||||
/// by 16 we use the dimension divded by 8.
|
||||
///
|
||||
/// The above two cases are highly preferred. Having 8 or 16 values per subvector allows
|
||||
/// us to use efficient SIMD instructions.
|
||||
///
|
||||
/// If the dimension is not visible by 8 then we use 1 subvector. This is not ideal and
|
||||
/// will likely result in poor performance.
|
||||
pub fn num_sub_vectors(mut self, num_sub_vectors: u32) -> Self {
|
||||
self.num_sub_vectors = Some(num_sub_vectors);
|
||||
self
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! impl_hnsw_params_setter {
|
||||
() => {
|
||||
/// The number of neighbors to select for each vector in the HNSW graph.
|
||||
/// This value controls the tradeoff between search speed and accuracy.
|
||||
/// The higher the value the more accurate the search but the slower it will be.
|
||||
/// The default value is 20.
|
||||
pub fn num_edges(mut self, m: u32) -> Self {
|
||||
self.m = m;
|
||||
self
|
||||
}
|
||||
|
||||
/// The number of candidates to evaluate during the construction of the HNSW graph.
|
||||
/// This value controls the tradeoff between build speed and accuracy.
|
||||
/// The higher the value the more accurate the build but the slower it will be.
|
||||
/// This value should be set to a value that is not less than `ef` in the search phase.
|
||||
/// The default value is 300.
|
||||
pub fn ef_construction(mut self, ef_construction: u32) -> Self {
|
||||
self.ef_construction = ef_construction;
|
||||
self
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/// Builder for an IVF PQ index.
|
||||
@@ -83,10 +181,14 @@ pub struct VectorIndexStatistics {
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct IvfPqIndexBuilder {
|
||||
pub(crate) distance_type: DistanceType,
|
||||
|
||||
// IVF
|
||||
pub(crate) num_partitions: Option<u32>,
|
||||
pub(crate) num_sub_vectors: Option<u32>,
|
||||
pub(crate) sample_rate: u32,
|
||||
pub(crate) max_iterations: u32,
|
||||
|
||||
// PQ
|
||||
pub(crate) num_sub_vectors: Option<u32>,
|
||||
}
|
||||
|
||||
impl Default for IvfPqIndexBuilder {
|
||||
@@ -102,84 +204,9 @@ impl Default for IvfPqIndexBuilder {
|
||||
}
|
||||
|
||||
impl IvfPqIndexBuilder {
|
||||
/// [DistanceType] to use to build the index.
|
||||
///
|
||||
/// Default value is [DistanceType::L2].
|
||||
///
|
||||
/// This is used when training the index to calculate the IVF partitions (vectors are
|
||||
/// grouped in partitions with similar vectors according to this distance type) and to
|
||||
/// calculate a subvector's code during quantization.
|
||||
///
|
||||
/// The metric type used to train an index MUST match the metric type used to search the
|
||||
/// index. Failure to do so will yield inaccurate results.
|
||||
pub fn distance_type(mut self, distance_type: DistanceType) -> Self {
|
||||
self.distance_type = distance_type;
|
||||
self
|
||||
}
|
||||
|
||||
/// The number of IVF partitions to create.
|
||||
///
|
||||
/// This value should generally scale with the number of rows in the dataset. By default
|
||||
/// the number of partitions is the square root of the number of rows.
|
||||
///
|
||||
/// If this value is too large then the first part of the search (picking the right partition)
|
||||
/// will be slow. If this value is too small then the second part of the search (searching
|
||||
/// within a partition) will be slow.
|
||||
pub fn num_partitions(mut self, num_partitions: u32) -> Self {
|
||||
self.num_partitions = Some(num_partitions);
|
||||
self
|
||||
}
|
||||
|
||||
/// Number of sub-vectors of PQ.
|
||||
///
|
||||
/// This value controls how much the vector is compressed during the quantization step.
|
||||
/// The more sub vectors there are the less the vector is compressed. The default is
|
||||
/// the dimension of the vector divided by 16. If the dimension is not evenly divisible
|
||||
/// by 16 we use the dimension divded by 8.
|
||||
///
|
||||
/// The above two cases are highly preferred. Having 8 or 16 values per subvector allows
|
||||
/// us to use efficient SIMD instructions.
|
||||
///
|
||||
/// If the dimension is not visible by 8 then we use 1 subvector. This is not ideal and
|
||||
/// will likely result in poor performance.
|
||||
pub fn num_sub_vectors(mut self, num_sub_vectors: u32) -> Self {
|
||||
self.num_sub_vectors = Some(num_sub_vectors);
|
||||
self
|
||||
}
|
||||
|
||||
/// The rate used to calculate the number of training vectors for kmeans.
|
||||
///
|
||||
/// When an IVF PQ index is trained, we need to calculate partitions. These are groups
|
||||
/// of vectors that are similar to each other. To do this we use an algorithm called kmeans.
|
||||
///
|
||||
/// Running kmeans on a large dataset can be slow. To speed this up we run kmeans on a
|
||||
/// random sample of the data. This parameter controls the size of the sample. The total
|
||||
/// number of vectors used to train the index is `sample_rate * num_partitions`.
|
||||
///
|
||||
/// Increasing this value might improve the quality of the index but in most cases the
|
||||
/// default should be sufficient.
|
||||
///
|
||||
/// The default value is 256.
|
||||
pub fn sample_rate(mut self, sample_rate: u32) -> Self {
|
||||
self.sample_rate = sample_rate;
|
||||
self
|
||||
}
|
||||
|
||||
/// Max iterations to train kmeans.
|
||||
///
|
||||
/// When training an IVF PQ index we use kmeans to calculate the partitions. This parameter
|
||||
/// controls how many iterations of kmeans to run.
|
||||
///
|
||||
/// Increasing this might improve the quality of the index but in most cases the parameter
|
||||
/// is unused because kmeans will converge with fewer iterations. The parameter is only
|
||||
/// used in cases where kmeans does not appear to converge. In those cases it is unlikely
|
||||
/// that setting this larger will lead to the index converging anyways.
|
||||
///
|
||||
/// The default value is 50.
|
||||
pub fn max_iterations(mut self, max_iterations: u32) -> Self {
|
||||
self.max_iterations = max_iterations;
|
||||
self
|
||||
}
|
||||
impl_distance_type_setter!();
|
||||
impl_ivf_params_setter!();
|
||||
impl_pq_params_setter!();
|
||||
}
|
||||
|
||||
pub(crate) fn suggested_num_partitions(rows: usize) -> u32 {
|
||||
@@ -201,3 +228,91 @@ pub(crate) fn suggested_num_sub_vectors(dim: u32) -> u32 {
|
||||
1
|
||||
}
|
||||
}
|
||||
|
||||
/// Builder for an IVF HNSW PQ index.
|
||||
///
|
||||
/// This index is a combination of IVF and HNSW.
|
||||
/// The IVF part is the same as the IVF PQ index.
|
||||
/// For each IVF partition, this builds a HNSW graph, the graph is used to
|
||||
/// quickly find the closest vectors to a query vector.
|
||||
///
|
||||
/// The PQ (product quantizer) is used to compress the vectors as the same as IVF PQ.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct IvfHnswPqIndexBuilder {
|
||||
// IVF
|
||||
pub(crate) distance_type: DistanceType,
|
||||
pub(crate) num_partitions: Option<u32>,
|
||||
pub(crate) sample_rate: u32,
|
||||
pub(crate) max_iterations: u32,
|
||||
|
||||
// HNSW
|
||||
pub(crate) m: u32,
|
||||
pub(crate) ef_construction: u32,
|
||||
|
||||
// PQ
|
||||
pub(crate) num_sub_vectors: Option<u32>,
|
||||
}
|
||||
|
||||
impl Default for IvfHnswPqIndexBuilder {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
distance_type: DistanceType::L2,
|
||||
num_partitions: None,
|
||||
num_sub_vectors: None,
|
||||
sample_rate: 256,
|
||||
max_iterations: 50,
|
||||
m: 20,
|
||||
ef_construction: 300,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl IvfHnswPqIndexBuilder {
|
||||
impl_distance_type_setter!();
|
||||
impl_ivf_params_setter!();
|
||||
impl_hnsw_params_setter!();
|
||||
impl_pq_params_setter!();
|
||||
}
|
||||
|
||||
/// Builder for an IVF_HNSW_SQ index.
|
||||
///
|
||||
/// This index is a combination of IVF and HNSW.
|
||||
/// The IVF part is the same as the IVF PQ index.
|
||||
/// For each IVF partition, this builds a HNSW graph, the graph is used to
|
||||
/// quickly find the closest vectors to a query vector.
|
||||
///
|
||||
/// The SQ (scalar quantizer) is used to compress the vectors,
|
||||
/// each vector is mapped to a 8-bit integer vector, 4x compression ratio for float32 vector.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct IvfHnswSqIndexBuilder {
|
||||
// IVF
|
||||
pub(crate) distance_type: DistanceType,
|
||||
pub(crate) num_partitions: Option<u32>,
|
||||
pub(crate) sample_rate: u32,
|
||||
pub(crate) max_iterations: u32,
|
||||
|
||||
// HNSW
|
||||
pub(crate) m: u32,
|
||||
pub(crate) ef_construction: u32,
|
||||
// SQ
|
||||
// TODO add num_bits for SQ after it supports another num_bits besides 8
|
||||
}
|
||||
|
||||
impl Default for IvfHnswSqIndexBuilder {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
distance_type: DistanceType::L2,
|
||||
num_partitions: None,
|
||||
sample_rate: 256,
|
||||
max_iterations: 50,
|
||||
m: 20,
|
||||
ef_construction: 300,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl IvfHnswSqIndexBuilder {
|
||||
impl_distance_type_setter!();
|
||||
impl_ivf_params_setter!();
|
||||
impl_hnsw_params_setter!();
|
||||
}
|
||||
|
||||
@@ -238,6 +238,9 @@ pub enum DistanceType {
|
||||
/// distance has a range of (-∞, ∞). If the vectors are normalized (i.e. their
|
||||
/// L2 norm is 1), then dot distance is equivalent to the cosine distance.
|
||||
Dot,
|
||||
/// Hamming distance. Hamming distance is a distance metric that measures
|
||||
/// the number of positions at which the corresponding elements are different.
|
||||
Hamming,
|
||||
}
|
||||
|
||||
impl From<DistanceType> for LanceDistanceType {
|
||||
@@ -246,6 +249,7 @@ impl From<DistanceType> for LanceDistanceType {
|
||||
DistanceType::L2 => Self::L2,
|
||||
DistanceType::Cosine => Self::Cosine,
|
||||
DistanceType::Dot => Self::Dot,
|
||||
DistanceType::Hamming => Self::Hamming,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -256,6 +260,7 @@ impl From<LanceDistanceType> for DistanceType {
|
||||
LanceDistanceType::L2 => Self::L2,
|
||||
LanceDistanceType::Cosine => Self::Cosine,
|
||||
LanceDistanceType::Dot => Self::Dot,
|
||||
LanceDistanceType::Hamming => Self::Hamming,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -23,12 +23,9 @@ use arrow::datatypes::Float32Type;
|
||||
use arrow_array::{RecordBatchIterator, RecordBatchReader};
|
||||
use arrow_schema::{DataType, Field, Schema, SchemaRef};
|
||||
use async_trait::async_trait;
|
||||
use chrono::Duration;
|
||||
use lance::dataset::builder::DatasetBuilder;
|
||||
use lance::dataset::cleanup::RemovalStats;
|
||||
use lance::dataset::optimize::{
|
||||
compact_files, CompactionMetrics, CompactionOptions, IndexRemapperOptions,
|
||||
};
|
||||
use lance::dataset::optimize::{compact_files, CompactionMetrics, IndexRemapperOptions};
|
||||
use lance::dataset::scanner::{DatasetRecordBatchStream, Scanner};
|
||||
pub use lance::dataset::ColumnAlteration;
|
||||
pub use lance::dataset::NewColumnTransform;
|
||||
@@ -38,8 +35,12 @@ use lance::dataset::{
|
||||
};
|
||||
use lance::dataset::{MergeInsertBuilder as LanceMergeInsertBuilder, WhenNotMatchedBySource};
|
||||
use lance::io::WrappingObjectStore;
|
||||
use lance_index::vector::hnsw::builder::HnswBuildParams;
|
||||
use lance_index::vector::ivf::IvfBuildParams;
|
||||
use lance_index::vector::pq::PQBuildParams;
|
||||
use lance_index::vector::sq::builder::SQBuildParams;
|
||||
use lance_index::DatasetIndexExt;
|
||||
use lance_index::IndexType;
|
||||
use lance_index::{optimize::OptimizeOptions, DatasetIndexExt};
|
||||
use log::info;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::whatever;
|
||||
@@ -48,8 +49,11 @@ use crate::arrow::IntoArrow;
|
||||
use crate::connection::NoData;
|
||||
use crate::embeddings::{EmbeddingDefinition, EmbeddingRegistry, MaybeEmbedded, MemoryRegistry};
|
||||
use crate::error::{Error, Result};
|
||||
use crate::index::vector::{IvfPqIndexBuilder, VectorIndex, VectorIndexStatistics};
|
||||
use crate::index::vector::{
|
||||
IvfHnswPqIndexBuilder, IvfHnswSqIndexBuilder, IvfPqIndexBuilder, VectorIndex,
|
||||
};
|
||||
use crate::index::IndexConfig;
|
||||
use crate::index::IndexStatistics;
|
||||
use crate::index::{
|
||||
vector::{suggested_num_partitions, suggested_num_sub_vectors},
|
||||
Index, IndexBuilder,
|
||||
@@ -65,6 +69,10 @@ use self::merge::MergeInsertBuilder;
|
||||
pub(crate) mod dataset;
|
||||
pub mod merge;
|
||||
|
||||
pub use chrono::Duration;
|
||||
pub use lance::dataset::optimize::CompactionOptions;
|
||||
pub use lance_index::optimize::OptimizeOptions;
|
||||
|
||||
/// Defines the type of column
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub enum ColumnKind {
|
||||
@@ -145,22 +153,58 @@ impl TableDefinition {
|
||||
///
|
||||
/// By default, it optimizes everything, as [`OptimizeAction::All`].
|
||||
pub enum OptimizeAction {
|
||||
/// Run optimization on every, with default options.
|
||||
/// Run all optimizations with default values
|
||||
All,
|
||||
/// Compact files in the dataset
|
||||
/// Compacts files in the dataset
|
||||
///
|
||||
/// LanceDb uses a readonly filesystem for performance and safe concurrency. Every time
|
||||
/// new data is added it will be added into new files. Small files
|
||||
/// can hurt both read and write performance. Compaction will merge small files
|
||||
/// into larger ones.
|
||||
///
|
||||
/// All operations that modify data (add, delete, update, merge insert, etc.) will create
|
||||
/// new files. If these operations are run frequently then compaction should run frequently.
|
||||
///
|
||||
/// If these operations are never run (search only) then compaction is not necessary.
|
||||
Compact {
|
||||
options: CompactionOptions,
|
||||
remap_options: Option<Arc<dyn IndexRemapperOptions>>,
|
||||
},
|
||||
/// Prune old version of datasets.
|
||||
/// Prune old version of datasets
|
||||
///
|
||||
/// Every change in LanceDb is additive. When data is removed from a dataset a new version is
|
||||
/// created that doesn't contain the removed data. However, the old version, which does contain
|
||||
/// the removed data, is left in place. This is necessary for consistency and concurrency and
|
||||
/// also enables time travel functionality like the ability to checkout an older version of the
|
||||
/// dataset to undo changes.
|
||||
///
|
||||
/// Over time, these old versions can consume a lot of disk space. The prune operation will
|
||||
/// remove versions of the dataset that are older than a certain age. This will free up the
|
||||
/// space used by that old data.
|
||||
///
|
||||
/// Once a version is pruned it can no longer be checked out.
|
||||
Prune {
|
||||
/// The duration of time to keep versions of the dataset.
|
||||
older_than: Duration,
|
||||
older_than: Option<Duration>,
|
||||
/// Because they may be part of an in-progress transaction, files newer than 7 days old are not deleted by default.
|
||||
/// If you are sure that there are no in-progress transactions, then you can set this to True to delete all files older than `older_than`.
|
||||
delete_unverified: Option<bool>,
|
||||
},
|
||||
/// Optimize index.
|
||||
/// Optimize the indices
|
||||
///
|
||||
/// This operation optimizes all indices in the table. When new data is added to LanceDb
|
||||
/// it is not added to the indices. However, it can still turn up in searches because the search
|
||||
/// function will scan both the indexed data and the unindexed data in parallel. Over time, the
|
||||
/// unindexed data can become large enough that the search performance is slow. This operation
|
||||
/// will add the unindexed data to the indices without rerunning the full index creation process.
|
||||
///
|
||||
/// Optimizing an index is faster than re-training the index but it does not typically adjust the
|
||||
/// underlying model relied upon by the index. This can eventually lead to poor search accuracy
|
||||
/// and so users may still want to occasionally retrain the index after adding a large amount of
|
||||
/// data.
|
||||
///
|
||||
/// For example, when using IVF, an index will create clusters. Optimizing an index assigns unindexed
|
||||
/// data to the existing clusters, but it does not move the clusters or create new clusters.
|
||||
Index(OptimizeOptions),
|
||||
}
|
||||
|
||||
@@ -312,6 +356,7 @@ impl UpdateBuilder {
|
||||
|
||||
#[async_trait]
|
||||
pub(crate) trait TableInternal: std::fmt::Display + std::fmt::Debug + Send + Sync {
|
||||
#[allow(dead_code)]
|
||||
fn as_any(&self) -> &dyn std::any::Any;
|
||||
/// Cast as [`NativeTable`], or return None it if is not a [`NativeTable`].
|
||||
fn as_native(&self) -> Option<&NativeTable>;
|
||||
@@ -751,10 +796,30 @@ impl Table {
|
||||
|
||||
/// Optimize the on-disk data and indices for better performance.
|
||||
///
|
||||
/// Modeled after ``VACUUM`` in PostgreSQL.
|
||||
///
|
||||
/// Optimization is discussed in more detail in the [OptimizeAction] documentation
|
||||
/// and covers three operations:
|
||||
///
|
||||
/// * Compaction: Merges small files into larger ones
|
||||
/// * Prune: Removes old versions of the dataset
|
||||
/// * Index: Optimizes the indices, adding new data to existing indices
|
||||
///
|
||||
/// <section class="warning">Experimental API</section>
|
||||
///
|
||||
/// Modeled after ``VACUUM`` in PostgreSQL.
|
||||
/// Not all implementations support explicit optimization.
|
||||
/// The optimization process is undergoing active development and may change.
|
||||
/// Our goal with these changes is to improve the performance of optimization and
|
||||
/// reduce the complexity.
|
||||
///
|
||||
/// That being said, it is essential today to run optimize if you want the best
|
||||
/// performance. It should be stable and safe to use in production, but it our
|
||||
/// hope that the API may be simplified (or not even need to be called) in the future.
|
||||
///
|
||||
/// The frequency an application shoudl call optimize is based on the frequency of
|
||||
/// data modifications. If data is frequently added, deleted, or updated then
|
||||
/// optimize should be run frequently. A good rule of thumb is to run optimize if
|
||||
/// you have added or modified 100,000 or more records or run more than 20 data
|
||||
/// modification operations.
|
||||
pub async fn optimize(&self, action: OptimizeAction) -> Result<OptimizeStats> {
|
||||
self.inner.optimize(action).await
|
||||
}
|
||||
@@ -1154,7 +1219,7 @@ impl NativeTable {
|
||||
|
||||
pub async fn get_index_type(&self, index_uuid: &str) -> Result<Option<String>> {
|
||||
match self.load_index_stats(index_uuid).await? {
|
||||
Some(stats) => Ok(Some(stats.index_type)),
|
||||
Some(stats) => Ok(Some(stats.index_type.unwrap_or_default())),
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
@@ -1165,7 +1230,7 @@ impl NativeTable {
|
||||
stats
|
||||
.indices
|
||||
.iter()
|
||||
.map(|i| i.metric_type.clone())
|
||||
.filter_map(|i| i.metric_type.clone())
|
||||
.collect(),
|
||||
)),
|
||||
None => Ok(None),
|
||||
@@ -1181,7 +1246,7 @@ impl NativeTable {
|
||||
.collect())
|
||||
}
|
||||
|
||||
async fn load_index_stats(&self, index_uuid: &str) -> Result<Option<VectorIndexStatistics>> {
|
||||
async fn load_index_stats(&self, index_uuid: &str) -> Result<Option<IndexStatistics>> {
|
||||
let index = self
|
||||
.load_indices()
|
||||
.await?
|
||||
@@ -1192,7 +1257,7 @@ impl NativeTable {
|
||||
}
|
||||
let dataset = self.dataset.get().await?;
|
||||
let index_stats = dataset.index_statistics(&index.unwrap().index_name).await?;
|
||||
let index_stats: VectorIndexStatistics = whatever!(
|
||||
let index_stats: IndexStatistics = whatever!(
|
||||
serde_json::from_str(&index_stats),
|
||||
"error deserializing index statistics {index_stats}",
|
||||
);
|
||||
@@ -1238,7 +1303,6 @@ impl NativeTable {
|
||||
num_partitions as usize,
|
||||
/*num_bits=*/ 8,
|
||||
num_sub_vectors as usize,
|
||||
false,
|
||||
index.distance_type.into(),
|
||||
index.max_iterations as usize,
|
||||
);
|
||||
@@ -1254,6 +1318,120 @@ impl NativeTable {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn create_ivf_hnsw_pq_index(
|
||||
&self,
|
||||
index: IvfHnswPqIndexBuilder,
|
||||
field: &Field,
|
||||
replace: bool,
|
||||
) -> Result<()> {
|
||||
if !Self::supported_vector_data_type(field.data_type()) {
|
||||
return Err(Error::InvalidInput {
|
||||
message: format!(
|
||||
"An IVF HNSW PQ index cannot be created on the column `{}` which has data type {}",
|
||||
field.name(),
|
||||
field.data_type()
|
||||
),
|
||||
});
|
||||
}
|
||||
|
||||
let num_partitions = if let Some(n) = index.num_partitions {
|
||||
n
|
||||
} else {
|
||||
suggested_num_partitions(self.count_rows(None).await?)
|
||||
};
|
||||
let num_sub_vectors: u32 = if let Some(n) = index.num_sub_vectors {
|
||||
n
|
||||
} else {
|
||||
match field.data_type() {
|
||||
arrow_schema::DataType::FixedSizeList(_, n) => {
|
||||
Ok::<u32, Error>(suggested_num_sub_vectors(*n as u32))
|
||||
}
|
||||
_ => Err(Error::Schema {
|
||||
message: format!("Column '{}' is not a FixedSizeList", field.name()),
|
||||
}),
|
||||
}?
|
||||
};
|
||||
|
||||
let mut dataset = self.dataset.get_mut().await?;
|
||||
let mut ivf_params = IvfBuildParams::new(num_partitions as usize);
|
||||
ivf_params.sample_rate = index.sample_rate as usize;
|
||||
ivf_params.max_iters = index.max_iterations as usize;
|
||||
let hnsw_params = HnswBuildParams::default()
|
||||
.num_edges(index.m as usize)
|
||||
.ef_construction(index.ef_construction as usize);
|
||||
let pq_params = PQBuildParams {
|
||||
num_sub_vectors: num_sub_vectors as usize,
|
||||
..Default::default()
|
||||
};
|
||||
let lance_idx_params = lance::index::vector::VectorIndexParams::with_ivf_hnsw_pq_params(
|
||||
index.distance_type.into(),
|
||||
ivf_params,
|
||||
hnsw_params,
|
||||
pq_params,
|
||||
);
|
||||
dataset
|
||||
.create_index(
|
||||
&[field.name()],
|
||||
IndexType::Vector,
|
||||
None,
|
||||
&lance_idx_params,
|
||||
replace,
|
||||
)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn create_ivf_hnsw_sq_index(
|
||||
&self,
|
||||
index: IvfHnswSqIndexBuilder,
|
||||
field: &Field,
|
||||
replace: bool,
|
||||
) -> Result<()> {
|
||||
if !Self::supported_vector_data_type(field.data_type()) {
|
||||
return Err(Error::InvalidInput {
|
||||
message: format!(
|
||||
"An IVF HNSW SQ index cannot be created on the column `{}` which has data type {}",
|
||||
field.name(),
|
||||
field.data_type()
|
||||
),
|
||||
});
|
||||
}
|
||||
|
||||
let num_partitions = if let Some(n) = index.num_partitions {
|
||||
n
|
||||
} else {
|
||||
suggested_num_partitions(self.count_rows(None).await?)
|
||||
};
|
||||
|
||||
let mut dataset = self.dataset.get_mut().await?;
|
||||
let mut ivf_params = IvfBuildParams::new(num_partitions as usize);
|
||||
ivf_params.sample_rate = index.sample_rate as usize;
|
||||
ivf_params.max_iters = index.max_iterations as usize;
|
||||
let hnsw_params = HnswBuildParams::default()
|
||||
.num_edges(index.m as usize)
|
||||
.ef_construction(index.ef_construction as usize);
|
||||
let sq_params = SQBuildParams {
|
||||
sample_rate: index.sample_rate as usize,
|
||||
..Default::default()
|
||||
};
|
||||
let lance_idx_params = lance::index::vector::VectorIndexParams::with_ivf_hnsw_sq_params(
|
||||
index.distance_type.into(),
|
||||
ivf_params,
|
||||
hnsw_params,
|
||||
sq_params,
|
||||
);
|
||||
dataset
|
||||
.create_index(
|
||||
&[field.name()],
|
||||
IndexType::Vector,
|
||||
None,
|
||||
&lance_idx_params,
|
||||
replace,
|
||||
)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn create_auto_index(&self, field: &Field, opts: IndexBuilder) -> Result<()> {
|
||||
if Self::supported_vector_data_type(field.data_type()) {
|
||||
self.create_ivf_pq_index(IvfPqIndexBuilder::default(), field, opts.replace)
|
||||
@@ -1497,6 +1675,14 @@ impl TableInternal for NativeTable {
|
||||
Index::Auto => self.create_auto_index(field, opts).await,
|
||||
Index::BTree(_) => self.create_btree_index(field, opts).await,
|
||||
Index::IvfPq(ivf_pq) => self.create_ivf_pq_index(ivf_pq, field, opts.replace).await,
|
||||
Index::IvfHnswPq(ivf_hnsw_pq) => {
|
||||
self.create_ivf_hnsw_pq_index(ivf_hnsw_pq, field, opts.replace)
|
||||
.await
|
||||
}
|
||||
Index::IvfHnswSq(ivf_hnsw_sq) => {
|
||||
self.create_ivf_hnsw_sq_index(ivf_hnsw_sq, field, opts.replace)
|
||||
.await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1592,7 +1778,7 @@ impl TableInternal for NativeTable {
|
||||
.compaction;
|
||||
stats.prune = self
|
||||
.optimize(OptimizeAction::Prune {
|
||||
older_than: Duration::try_days(7).unwrap(),
|
||||
older_than: None,
|
||||
delete_unverified: None,
|
||||
})
|
||||
.await?
|
||||
@@ -1611,8 +1797,11 @@ impl TableInternal for NativeTable {
|
||||
delete_unverified,
|
||||
} => {
|
||||
stats.prune = Some(
|
||||
self.cleanup_old_versions(older_than, delete_unverified)
|
||||
.await?,
|
||||
self.cleanup_old_versions(
|
||||
older_than.unwrap_or(Duration::try_days(7).expect("valid delta")),
|
||||
delete_unverified,
|
||||
)
|
||||
.await?,
|
||||
);
|
||||
}
|
||||
OptimizeAction::Index(options) => {
|
||||
@@ -2335,6 +2524,217 @@ mod tests {
|
||||
assert_eq!(table.count_rows(None).await.unwrap(), 512);
|
||||
assert_eq!(table.name(), "test");
|
||||
|
||||
let indices = table.as_native().unwrap().load_indices().await.unwrap();
|
||||
let index_uuid = &indices[0].index_uuid;
|
||||
assert_eq!(
|
||||
table
|
||||
.as_native()
|
||||
.unwrap()
|
||||
.count_indexed_rows(index_uuid)
|
||||
.await
|
||||
.unwrap(),
|
||||
Some(512)
|
||||
);
|
||||
assert_eq!(
|
||||
table
|
||||
.as_native()
|
||||
.unwrap()
|
||||
.count_unindexed_rows(index_uuid)
|
||||
.await
|
||||
.unwrap(),
|
||||
Some(0)
|
||||
);
|
||||
assert_eq!(
|
||||
table
|
||||
.as_native()
|
||||
.unwrap()
|
||||
.get_index_type(index_uuid)
|
||||
.await
|
||||
.unwrap()
|
||||
.map(|index_type| index_type.to_string()),
|
||||
Some("IVF".to_string())
|
||||
);
|
||||
assert_eq!(
|
||||
table
|
||||
.as_native()
|
||||
.unwrap()
|
||||
.get_distance_type(index_uuid)
|
||||
.await
|
||||
.unwrap(),
|
||||
Some(crate::DistanceType::L2.to_string())
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_create_index_ivf_hnsw_sq() {
|
||||
use arrow_array::RecordBatch;
|
||||
use arrow_schema::{DataType, Field, Schema as ArrowSchema};
|
||||
use rand;
|
||||
use std::iter::repeat_with;
|
||||
|
||||
use arrow_array::Float32Array;
|
||||
|
||||
let tmp_dir = tempdir().unwrap();
|
||||
let uri = tmp_dir.path().to_str().unwrap();
|
||||
let conn = connect(uri).execute().await.unwrap();
|
||||
|
||||
let dimension = 16;
|
||||
let schema = Arc::new(ArrowSchema::new(vec![Field::new(
|
||||
"embeddings",
|
||||
DataType::FixedSizeList(
|
||||
Arc::new(Field::new("item", DataType::Float32, true)),
|
||||
dimension,
|
||||
),
|
||||
false,
|
||||
)]));
|
||||
|
||||
let mut rng = rand::thread_rng();
|
||||
let float_arr = Float32Array::from(
|
||||
repeat_with(|| rng.gen::<f32>())
|
||||
.take(512 * dimension as usize)
|
||||
.collect::<Vec<f32>>(),
|
||||
);
|
||||
|
||||
let vectors = Arc::new(create_fixed_size_list(float_arr, dimension).unwrap());
|
||||
let batches = RecordBatchIterator::new(
|
||||
vec![RecordBatch::try_new(schema.clone(), vec![vectors.clone()]).unwrap()]
|
||||
.into_iter()
|
||||
.map(Ok),
|
||||
schema,
|
||||
);
|
||||
|
||||
let table = conn.create_table("test", batches).execute().await.unwrap();
|
||||
|
||||
assert_eq!(
|
||||
table
|
||||
.as_native()
|
||||
.unwrap()
|
||||
.count_indexed_rows("my_index")
|
||||
.await
|
||||
.unwrap(),
|
||||
None
|
||||
);
|
||||
assert_eq!(
|
||||
table
|
||||
.as_native()
|
||||
.unwrap()
|
||||
.count_unindexed_rows("my_index")
|
||||
.await
|
||||
.unwrap(),
|
||||
None
|
||||
);
|
||||
|
||||
let index = IvfHnswSqIndexBuilder::default();
|
||||
table
|
||||
.create_index(&["embeddings"], Index::IvfHnswSq(index))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let index_configs = table.list_indices().await.unwrap();
|
||||
assert_eq!(index_configs.len(), 1);
|
||||
let index = index_configs.into_iter().next().unwrap();
|
||||
assert_eq!(index.index_type, crate::index::IndexType::IvfPq);
|
||||
assert_eq!(index.columns, vec!["embeddings".to_string()]);
|
||||
assert_eq!(table.count_rows(None).await.unwrap(), 512);
|
||||
assert_eq!(table.name(), "test");
|
||||
|
||||
let indices = table.as_native().unwrap().load_indices().await.unwrap();
|
||||
let index_uuid = &indices[0].index_uuid;
|
||||
assert_eq!(
|
||||
table
|
||||
.as_native()
|
||||
.unwrap()
|
||||
.count_indexed_rows(index_uuid)
|
||||
.await
|
||||
.unwrap(),
|
||||
Some(512)
|
||||
);
|
||||
assert_eq!(
|
||||
table
|
||||
.as_native()
|
||||
.unwrap()
|
||||
.count_unindexed_rows(index_uuid)
|
||||
.await
|
||||
.unwrap(),
|
||||
Some(0)
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_create_index_ivf_hnsw_pq() {
|
||||
use arrow_array::RecordBatch;
|
||||
use arrow_schema::{DataType, Field, Schema as ArrowSchema};
|
||||
use rand;
|
||||
use std::iter::repeat_with;
|
||||
|
||||
use arrow_array::Float32Array;
|
||||
|
||||
let tmp_dir = tempdir().unwrap();
|
||||
let uri = tmp_dir.path().to_str().unwrap();
|
||||
let conn = connect(uri).execute().await.unwrap();
|
||||
|
||||
let dimension = 16;
|
||||
let schema = Arc::new(ArrowSchema::new(vec![Field::new(
|
||||
"embeddings",
|
||||
DataType::FixedSizeList(
|
||||
Arc::new(Field::new("item", DataType::Float32, true)),
|
||||
dimension,
|
||||
),
|
||||
false,
|
||||
)]));
|
||||
|
||||
let mut rng = rand::thread_rng();
|
||||
let float_arr = Float32Array::from(
|
||||
repeat_with(|| rng.gen::<f32>())
|
||||
.take(512 * dimension as usize)
|
||||
.collect::<Vec<f32>>(),
|
||||
);
|
||||
|
||||
let vectors = Arc::new(create_fixed_size_list(float_arr, dimension).unwrap());
|
||||
let batches = RecordBatchIterator::new(
|
||||
vec![RecordBatch::try_new(schema.clone(), vec![vectors.clone()]).unwrap()]
|
||||
.into_iter()
|
||||
.map(Ok),
|
||||
schema,
|
||||
);
|
||||
|
||||
let table = conn.create_table("test", batches).execute().await.unwrap();
|
||||
|
||||
assert_eq!(
|
||||
table
|
||||
.as_native()
|
||||
.unwrap()
|
||||
.count_indexed_rows("my_index")
|
||||
.await
|
||||
.unwrap(),
|
||||
None
|
||||
);
|
||||
assert_eq!(
|
||||
table
|
||||
.as_native()
|
||||
.unwrap()
|
||||
.count_unindexed_rows("my_index")
|
||||
.await
|
||||
.unwrap(),
|
||||
None
|
||||
);
|
||||
|
||||
let index = IvfHnswPqIndexBuilder::default();
|
||||
table
|
||||
.create_index(&["embeddings"], Index::IvfHnswPq(index))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let index_configs = table.list_indices().await.unwrap();
|
||||
assert_eq!(index_configs.len(), 1);
|
||||
let index = index_configs.into_iter().next().unwrap();
|
||||
assert_eq!(index.index_type, crate::index::IndexType::IvfPq);
|
||||
assert_eq!(index.columns, vec!["embeddings".to_string()]);
|
||||
assert_eq!(table.count_rows(None).await.unwrap(), 512);
|
||||
assert_eq!(table.name(), "test");
|
||||
|
||||
let indices = table.as_native().unwrap().load_indices().await.unwrap();
|
||||
let index_uuid = &indices[0].index_uuid;
|
||||
assert_eq!(
|
||||
@@ -2428,6 +2828,27 @@ mod tests {
|
||||
let index = index_configs.into_iter().next().unwrap();
|
||||
assert_eq!(index.index_type, crate::index::IndexType::BTree);
|
||||
assert_eq!(index.columns, vec!["i".to_string()]);
|
||||
|
||||
let indices = table.as_native().unwrap().load_indices().await.unwrap();
|
||||
let index_uuid = &indices[0].index_uuid;
|
||||
assert_eq!(
|
||||
table
|
||||
.as_native()
|
||||
.unwrap()
|
||||
.count_indexed_rows(index_uuid)
|
||||
.await
|
||||
.unwrap(),
|
||||
Some(1)
|
||||
);
|
||||
assert_eq!(
|
||||
table
|
||||
.as_native()
|
||||
.unwrap()
|
||||
.count_unindexed_rows(index_uuid)
|
||||
.await
|
||||
.unwrap(),
|
||||
Some(0)
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
||||
Reference in New Issue
Block a user