Compare commits

..

1 Commits

Author SHA1 Message Date
rmeng
24526bda4c patch 2024-05-15 13:44:27 -04:00
177 changed files with 8907 additions and 21187 deletions

22
.bumpversion.cfg Normal file
View File

@@ -0,0 +1,22 @@
[bumpversion]
current_version = 0.4.20
commit = True
message = Bump version: {current_version} → {new_version}
tag = True
tag_name = v{new_version}
[bumpversion:file:node/package.json]
[bumpversion:file:nodejs/package.json]
[bumpversion:file:nodejs/npm/darwin-x64/package.json]
[bumpversion:file:nodejs/npm/darwin-arm64/package.json]
[bumpversion:file:nodejs/npm/linux-x64-gnu/package.json]
[bumpversion:file:nodejs/npm/linux-arm64-gnu/package.json]
[bumpversion:file:rust/ffi/node/Cargo.toml]
[bumpversion:file:rust/lancedb/Cargo.toml]

View File

@@ -1,57 +0,0 @@
[tool.bumpversion]
current_version = "0.6.0"
parse = """(?x)
(?P<major>0|[1-9]\\d*)\\.
(?P<minor>0|[1-9]\\d*)\\.
(?P<patch>0|[1-9]\\d*)
(?:-(?P<pre_l>[a-zA-Z-]+)\\.(?P<pre_n>0|[1-9]\\d*))?
"""
serialize = [
"{major}.{minor}.{patch}-{pre_l}.{pre_n}",
"{major}.{minor}.{patch}",
]
search = "{current_version}"
replace = "{new_version}"
regex = false
ignore_missing_version = false
ignore_missing_files = false
tag = true
sign_tags = false
tag_name = "v{new_version}"
tag_message = "Bump version: {current_version} → {new_version}"
allow_dirty = true
commit = true
message = "Bump version: {current_version} → {new_version}"
commit_args = ""
[tool.bumpversion.parts.pre_l]
values = ["beta", "final"]
optional_value = "final"
[[tool.bumpversion.files]]
filename = "node/package.json"
search = "\"version\": \"{current_version}\","
replace = "\"version\": \"{new_version}\","
[[tool.bumpversion.files]]
filename = "nodejs/package.json"
search = "\"version\": \"{current_version}\","
replace = "\"version\": \"{new_version}\","
# nodejs binary packages
[[tool.bumpversion.files]]
glob = "nodejs/npm/*/package.json"
search = "\"version\": \"{current_version}\","
replace = "\"version\": \"{new_version}\","
# Cargo files
# ------------
[[tool.bumpversion.files]]
filename = "rust/ffi/node/Cargo.toml"
search = "\nversion = \"{current_version}\""
replace = "\nversion = \"{new_version}\""
[[tool.bumpversion.files]]
filename = "rust/lancedb/Cargo.toml"
search = "\nversion = \"{current_version}\""
replace = "\nversion = \"{new_version}\""

25
.github/release.yml vendored Normal file
View File

@@ -0,0 +1,25 @@
# TODO: create separate templates for Python and other releases.
changelog:
exclude:
labels:
- ci
- chore
categories:
- title: Breaking Changes 🛠
labels:
- breaking-change
- title: New Features 🎉
labels:
- enhancement
- title: Bug Fixes 🐛
labels:
- bug
- title: Documentation 📚
labels:
- documentation
- title: Performance Improvements 🚀
labels:
- performance
- title: Other Changes
labels:
- "*"

View File

@@ -1,41 +0,0 @@
{
"ignore_labels": ["chore"],
"pr_template": "- ${{TITLE}} by @${{AUTHOR}} in ${{URL}}",
"categories": [
{
"title": "## 🏆 Highlights",
"labels": ["highlight"]
},
{
"title": "## 🛠 Breaking Changes",
"labels": ["breaking-change"]
},
{
"title": "## ⚠️ Deprecations ",
"labels": ["deprecation"]
},
{
"title": "## 🎉 New Features",
"labels": ["enhancement"]
},
{
"title": "## 🐛 Bug Fixes",
"labels": ["bug"]
},
{
"title": "## 📚 Documentation",
"labels": ["documentation"]
},
{
"title": "## 🚀 Performance Improvements",
"labels": ["performance"]
},
{
"title": "## Other Changes"
},
{
"title": "## 🔧 Build and CI",
"labels": ["ci"]
}
]
}

View File

@@ -46,7 +46,6 @@ runs:
with: with:
command: build command: build
working-directory: python working-directory: python
docker-options: "-e PIP_EXTRA_INDEX_URL=https://pypi.fury.io/lancedb/"
target: aarch64-unknown-linux-gnu target: aarch64-unknown-linux-gnu
manylinux: ${{ inputs.manylinux }} manylinux: ${{ inputs.manylinux }}
args: ${{ inputs.args }} args: ${{ inputs.args }}

View File

@@ -21,6 +21,5 @@ runs:
with: with:
command: build command: build
args: ${{ inputs.args }} args: ${{ inputs.args }}
docker-options: "-e PIP_EXTRA_INDEX_URL=https://pypi.fury.io/lancedb/"
working-directory: python working-directory: python
interpreter: 3.${{ inputs.python-minor-version }} interpreter: 3.${{ inputs.python-minor-version }}

View File

@@ -26,7 +26,6 @@ runs:
with: with:
command: build command: build
args: ${{ inputs.args }} args: ${{ inputs.args }}
docker-options: "-e PIP_EXTRA_INDEX_URL=https://pypi.fury.io/lancedb/"
working-directory: python working-directory: python
- uses: actions/upload-artifact@v3 - uses: actions/upload-artifact@v3
with: with:

View File

@@ -1,12 +1,8 @@
name: Cargo Publish name: Cargo Publish
on: on:
push: release:
tags-ignore: types: [ published ]
# We don't publish pre-releases for Rust. Crates.io is just a source
# distribution, so we don't need to publish pre-releases.
- 'v*-beta*'
- '*-v*' # for example, python-vX.Y.Z
env: env:
# This env var is used by Swatinem/rust-cache@v2 for the cache # This env var is used by Swatinem/rust-cache@v2 for the cache

View File

@@ -24,7 +24,7 @@ env:
jobs: jobs:
test-python: test-python:
name: Test doc python code name: Test doc python code
runs-on: "warp-ubuntu-latest-x64-4x" runs-on: "buildjet-8vcpu-ubuntu-2204"
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v4
@@ -56,7 +56,7 @@ jobs:
for d in *; do cd "$d"; echo "$d".py; python "$d".py; cd ..; done for d in *; do cd "$d"; echo "$d".py; python "$d".py; cd ..; done
test-node: test-node:
name: Test doc nodejs code name: Test doc nodejs code
runs-on: "warp-ubuntu-latest-x64-4x" runs-on: "buildjet-8vcpu-ubuntu-2204"
timeout-minutes: 60 timeout-minutes: 60
strategy: strategy:
fail-fast: false fail-fast: false

View File

@@ -1,85 +0,0 @@
name: Build and Run Java JNI Tests
on:
push:
branches:
- main
pull_request:
paths:
- java/**
- rust/**
- .github/workflows/java.yml
env:
# This env var is used by Swatinem/rust-cache@v2 for the cache
# key, so we set it to make sure it is always consistent.
CARGO_TERM_COLOR: always
# Disable full debug symbol generation to speed up CI build and keep memory down
# "1" means line tables only, which is useful for panic tracebacks.
RUSTFLAGS: "-C debuginfo=1"
RUST_BACKTRACE: "1"
# according to: https://matklad.github.io/2021/09/04/fast-rust-builds.html
# CI builds are faster with incremental disabled.
CARGO_INCREMENTAL: "0"
CARGO_BUILD_JOBS: "1"
jobs:
linux-build:
runs-on: ubuntu-22.04
name: ubuntu-22.04 + Java 11 & 17
defaults:
run:
working-directory: ./java
steps:
- name: Checkout repository
uses: actions/checkout@v4
- uses: Swatinem/rust-cache@v2
with:
workspaces: java/core/lancedb-jni
- name: Run cargo fmt
run: cargo fmt --check
working-directory: ./java/core/lancedb-jni
- name: Install dependencies
run: |
sudo apt update
sudo apt install -y protobuf-compiler libssl-dev
- name: Install Java 17
uses: actions/setup-java@v4
with:
distribution: temurin
java-version: 17
cache: "maven"
- run: echo "JAVA_17=$JAVA_HOME" >> $GITHUB_ENV
- name: Install Java 11
uses: actions/setup-java@v4
with:
distribution: temurin
java-version: 11
cache: "maven"
- name: Java Style Check
run: mvn checkstyle:check
# Disable because of issues in lancedb rust core code
# - name: Rust Clippy
# working-directory: java/core/lancedb-jni
# run: cargo clippy --all-targets -- -D warnings
- name: Running tests with Java 11
run: mvn clean test
- name: Running tests with Java 17
run: |
export JAVA_TOOL_OPTIONS="$JAVA_TOOL_OPTIONS \
-XX:+IgnoreUnrecognizedVMOptions \
--add-opens=java.base/java.lang=ALL-UNNAMED \
--add-opens=java.base/java.lang.invoke=ALL-UNNAMED \
--add-opens=java.base/java.lang.reflect=ALL-UNNAMED \
--add-opens=java.base/java.io=ALL-UNNAMED \
--add-opens=java.base/java.net=ALL-UNNAMED \
--add-opens=java.base/java.nio=ALL-UNNAMED \
--add-opens=java.base/java.util=ALL-UNNAMED \
--add-opens=java.base/java.util.concurrent=ALL-UNNAMED \
--add-opens=java.base/java.util.concurrent.atomic=ALL-UNNAMED \
--add-opens=java.base/jdk.internal.ref=ALL-UNNAMED \
--add-opens=java.base/sun.nio.ch=ALL-UNNAMED \
--add-opens=java.base/sun.nio.cs=ALL-UNNAMED \
--add-opens=java.base/sun.security.action=ALL-UNNAMED \
--add-opens=java.base/sun.util.calendar=ALL-UNNAMED \
--add-opens=java.security.jgss/sun.security.krb5=ALL-UNNAMED \
-Djdk.reflect.useDirectMethodHandle=false \
-Dio.netty.tryReflectionSetAccessible=true"
JAVA_HOME=$JAVA_17 mvn clean test

View File

@@ -1,62 +1,37 @@
name: Create release commit name: Create release commit
# This workflow increments versions, tags the version, and pushes it.
# When a tag is pushed, another workflow is triggered that creates a GH release
# and uploads the binaries. This workflow is only for creating the tag.
# This script will enforce that a minor version is incremented if there are any
# breaking changes since the last minor increment. However, it isn't able to
# differentiate between breaking changes in Node versus Python. If you wish to
# bypass this check, you can manually increment the version and push the tag.
on: on:
workflow_dispatch: workflow_dispatch:
inputs: inputs:
dry_run: dry_run:
description: 'Dry run (create the local commit/tags but do not push it)' description: 'Dry run (create the local commit/tags but do not push it)'
required: true required: true
default: false default: "false"
type: boolean
type:
description: 'What kind of release is this?'
required: true
default: 'preview'
type: choice type: choice
options: options:
- preview - "true"
- stable - "false"
python: part:
description: 'Make a Python release' description: 'What kind of release is this?'
required: true required: true
default: true default: 'patch'
type: boolean type: choice
other: options:
description: 'Make a Node/Rust release' - patch
required: true - minor
default: true - major
type: boolean
bump-minor:
description: 'Bump minor version'
required: true
default: false
type: boolean
jobs: jobs:
make-release: bump-version:
# Creates tag and GH release. The GH release will trigger the build and release jobs.
runs-on: ubuntu-latest runs-on: ubuntu-latest
permissions:
contents: write
steps: steps:
- name: Output Inputs - name: Check out main
run: echo "${{ toJSON(github.event.inputs) }}" uses: actions/checkout@v4
- uses: actions/checkout@v4
with: with:
ref: main
persist-credentials: false
fetch-depth: 0 fetch-depth: 0
lfs: true lfs: true
# It's important we use our token here, as the default token will NOT
# trigger any workflows watching for new tags. See:
# https://docs.github.com/en/actions/using-workflows/triggering-a-workflow#triggering-a-workflow-from-a-workflow
token: ${{ secrets.LANCEDB_RELEASE_TOKEN }}
- name: Set git configs for bumpversion - name: Set git configs for bumpversion
shell: bash shell: bash
run: | run: |
@@ -66,34 +41,19 @@ jobs:
uses: actions/setup-python@v5 uses: actions/setup-python@v5
with: with:
python-version: "3.11" python-version: "3.11"
- name: Bump Python version - name: Bump version, create tag and commit
if: ${{ inputs.python }}
working-directory: python
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: | run: |
# Need to get the commit before bumping the version, so we can pip install bump2version
# determine if there are breaking changes in the next step as well. bumpversion --verbose ${{ inputs.part }}
echo "COMMIT_BEFORE_BUMP=$(git rev-parse HEAD)" >> $GITHUB_ENV - name: Push new version and tag
if: ${{ inputs.dry_run }} == "false"
pip install bump-my-version PyGithub packaging
bash ../ci/bump_version.sh ${{ inputs.type }} ${{ inputs.bump-minor }} python-v
- name: Bump Node/Rust version
if: ${{ inputs.other }}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
pip install bump-my-version PyGithub packaging
bash ci/bump_version.sh ${{ inputs.type }} ${{ inputs.bump-minor }} v $COMMIT_BEFORE_BUMP
- name: Push new version tag
if: ${{ !inputs.dry_run }}
uses: ad-m/github-push-action@master uses: ad-m/github-push-action@master
with: with:
# Need to use PAT here too to trigger next workflow. See comment above.
github_token: ${{ secrets.LANCEDB_RELEASE_TOKEN }} github_token: ${{ secrets.LANCEDB_RELEASE_TOKEN }}
branch: ${{ github.ref }} branch: main
tags: true tags: true
- uses: ./.github/workflows/update_package_lock - uses: ./.github/workflows/update_package_lock
if: ${{ !inputs.dry_run && inputs.other }} if: ${{ inputs.dry_run }} == "false"
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.LANCEDB_RELEASE_TOKEN }}

View File

@@ -1,9 +1,8 @@
name: NPM Publish name: NPM Publish
on: on:
push: release:
tags: types: [published]
- "v*"
jobs: jobs:
node: node:
@@ -111,11 +110,12 @@ jobs:
runner: ubuntu-latest runner: ubuntu-latest
- arch: aarch64 - arch: aarch64
# For successful fat LTO builds, we need a large runner to avoid OOM errors. # For successful fat LTO builds, we need a large runner to avoid OOM errors.
runner: warp-ubuntu-latest-arm64-4x runner: buildjet-16vcpu-ubuntu-2204-arm
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v4
# To avoid OOM errors on ARM, we create a swap file. # Buildjet aarch64 runners have only 1.5 GB RAM per core, vs 3.5 GB per core for
# x86_64 runners. To avoid OOM errors on ARM, we create a swap file.
- name: Configure aarch64 build - name: Configure aarch64 build
if: ${{ matrix.config.arch == 'aarch64' }} if: ${{ matrix.config.arch == 'aarch64' }}
run: | run: |
@@ -274,15 +274,9 @@ jobs:
env: env:
NODE_AUTH_TOKEN: ${{ secrets.LANCEDB_NPM_REGISTRY_TOKEN }} NODE_AUTH_TOKEN: ${{ secrets.LANCEDB_NPM_REGISTRY_TOKEN }}
run: | run: |
# Tag beta as "preview" instead of default "latest". See lancedb
# npm publish step for more info.
if [[ $GITHUB_REF =~ refs/tags/v(.*)-beta.* ]]; then
PUBLISH_ARGS="--tag preview"
fi
mv */*.tgz . mv */*.tgz .
for filename in *.tgz; do for filename in *.tgz; do
npm publish $PUBLISH_ARGS $filename npm publish $filename
done done
release-nodejs: release-nodejs:
@@ -322,23 +316,11 @@ jobs:
- name: Publish to NPM - name: Publish to NPM
env: env:
NODE_AUTH_TOKEN: ${{ secrets.LANCEDB_NPM_REGISTRY_TOKEN }} NODE_AUTH_TOKEN: ${{ secrets.LANCEDB_NPM_REGISTRY_TOKEN }}
# By default, things are published to the latest tag. This is what is run: npm publish --access public
# installed by default if the user does not specify a version. This is
# good for stable releases, but for pre-releases, we want to publish to
# the "preview" tag so they can install with `npm install lancedb@preview`.
# See: https://medium.com/@mbostock/prereleases-and-npm-e778fc5e2420
run: |
if [[ $GITHUB_REF =~ refs/tags/v(.*)-beta.* ]]; then
npm publish --access public --tag preview
else
npm publish --access public
fi
update-package-lock: update-package-lock:
needs: [release] needs: [release]
runs-on: ubuntu-latest runs-on: ubuntu-latest
permissions:
contents: write
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v4
@@ -349,13 +331,11 @@ jobs:
lfs: true lfs: true
- uses: ./.github/workflows/update_package_lock - uses: ./.github/workflows/update_package_lock
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.LANCEDB_RELEASE_TOKEN }}
update-package-lock-nodejs: update-package-lock-nodejs:
needs: [release-nodejs] needs: [release-nodejs]
runs-on: ubuntu-latest runs-on: ubuntu-latest
permissions:
contents: write
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v4
@@ -366,70 +346,4 @@ jobs:
lfs: true lfs: true
- uses: ./.github/workflows/update_package_lock_nodejs - uses: ./.github/workflows/update_package_lock_nodejs
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.LANCEDB_RELEASE_TOKEN }}
gh-release:
runs-on: ubuntu-latest
permissions:
contents: write
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
lfs: true
- name: Extract version
id: extract_version
env:
GITHUB_REF: ${{ github.ref }}
run: |
set -e
echo "Extracting tag and version from $GITHUB_REF"
if [[ $GITHUB_REF =~ refs/tags/v(.*) ]]; then
VERSION=${BASH_REMATCH[1]}
TAG=v$VERSION
echo "tag=$TAG" >> $GITHUB_OUTPUT
echo "version=$VERSION" >> $GITHUB_OUTPUT
else
echo "Failed to extract version from $GITHUB_REF"
exit 1
fi
echo "Extracted version $VERSION from $GITHUB_REF"
if [[ $VERSION =~ beta ]]; then
echo "This is a beta release"
# Get last release (that is not this one)
FROM_TAG=$(git tag --sort='version:refname' \
| grep ^v \
| grep -vF "$TAG" \
| python ci/semver_sort.py v \
| tail -n 1)
else
echo "This is a stable release"
# Get last stable tag (ignore betas)
FROM_TAG=$(git tag --sort='version:refname' \
| grep ^v \
| grep -vF "$TAG" \
| grep -v beta \
| python ci/semver_sort.py v \
| tail -n 1)
fi
echo "Found from tag $FROM_TAG"
echo "from_tag=$FROM_TAG" >> $GITHUB_OUTPUT
- name: Create Release Notes
id: release_notes
uses: mikepenz/release-changelog-builder-action@v4
with:
configuration: .github/release_notes.json
toTag: ${{ steps.extract_version.outputs.tag }}
fromTag: ${{ steps.extract_version.outputs.from_tag }}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Create GH release
uses: softprops/action-gh-release@v2
with:
prerelease: ${{ contains('beta', github.ref) }}
tag_name: ${{ steps.extract_version.outputs.tag }}
token: ${{ secrets.GITHUB_TOKEN }}
generate_release_notes: false
name: Node/Rust LanceDB v${{ steps.extract_version.outputs.version }}
body: ${{ steps.release_notes.outputs.changelog }}

View File

@@ -1,16 +1,18 @@
name: PyPI Publish name: PyPI Publish
on: on:
push: release:
tags: types: [published]
- 'python-v*'
jobs: jobs:
linux: linux:
# Only runs on tags that matches the python-make-release action
if: startsWith(github.ref, 'refs/tags/python-v')
name: Python ${{ matrix.config.platform }} manylinux${{ matrix.config.manylinux }} name: Python ${{ matrix.config.platform }} manylinux${{ matrix.config.manylinux }}
timeout-minutes: 60 timeout-minutes: 60
strategy: strategy:
matrix: matrix:
python-minor-version: ["8"]
config: config:
- platform: x86_64 - platform: x86_64
manylinux: "2_17" manylinux: "2_17"
@@ -32,22 +34,25 @@ jobs:
- name: Set up Python - name: Set up Python
uses: actions/setup-python@v4 uses: actions/setup-python@v4
with: with:
python-version: 3.8 python-version: 3.${{ matrix.python-minor-version }}
- uses: ./.github/workflows/build_linux_wheel - uses: ./.github/workflows/build_linux_wheel
with: with:
python-minor-version: 8 python-minor-version: ${{ matrix.python-minor-version }}
args: "--release --strip ${{ matrix.config.extra_args }}" args: "--release --strip ${{ matrix.config.extra_args }}"
arm-build: ${{ matrix.config.platform == 'aarch64' }} arm-build: ${{ matrix.config.platform == 'aarch64' }}
manylinux: ${{ matrix.config.manylinux }} manylinux: ${{ matrix.config.manylinux }}
- uses: ./.github/workflows/upload_wheel - uses: ./.github/workflows/upload_wheel
with: with:
pypi_token: ${{ secrets.LANCEDB_PYPI_API_TOKEN }} token: ${{ secrets.LANCEDB_PYPI_API_TOKEN }}
fury_token: ${{ secrets.FURY_TOKEN }} repo: "pypi"
mac: mac:
# Only runs on tags that matches the python-make-release action
if: startsWith(github.ref, 'refs/tags/python-v')
timeout-minutes: 60 timeout-minutes: 60
runs-on: ${{ matrix.config.runner }} runs-on: ${{ matrix.config.runner }}
strategy: strategy:
matrix: matrix:
python-minor-version: ["8"]
config: config:
- target: x86_64-apple-darwin - target: x86_64-apple-darwin
runner: macos-13 runner: macos-13
@@ -58,6 +63,7 @@ jobs:
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with: with:
ref: ${{ inputs.ref }}
fetch-depth: 0 fetch-depth: 0
lfs: true lfs: true
- name: Set up Python - name: Set up Python
@@ -66,95 +72,38 @@ jobs:
python-version: 3.12 python-version: 3.12
- uses: ./.github/workflows/build_mac_wheel - uses: ./.github/workflows/build_mac_wheel
with: with:
python-minor-version: 8 python-minor-version: ${{ matrix.python-minor-version }}
args: "--release --strip --target ${{ matrix.config.target }} --features fp16kernels" args: "--release --strip --target ${{ matrix.config.target }} --features fp16kernels"
- uses: ./.github/workflows/upload_wheel - uses: ./.github/workflows/upload_wheel
with: with:
pypi_token: ${{ secrets.LANCEDB_PYPI_API_TOKEN }} python-minor-version: ${{ matrix.python-minor-version }}
fury_token: ${{ secrets.FURY_TOKEN }} token: ${{ secrets.LANCEDB_PYPI_API_TOKEN }}
repo: "pypi"
windows: windows:
# Only runs on tags that matches the python-make-release action
if: startsWith(github.ref, 'refs/tags/python-v')
timeout-minutes: 60 timeout-minutes: 60
runs-on: windows-latest runs-on: windows-latest
strategy:
matrix:
python-minor-version: ["8"]
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with: with:
ref: ${{ inputs.ref }}
fetch-depth: 0 fetch-depth: 0
lfs: true lfs: true
- name: Set up Python - name: Set up Python
uses: actions/setup-python@v4 uses: actions/setup-python@v4
with: with:
python-version: 3.8 python-version: 3.${{ matrix.python-minor-version }}
- uses: ./.github/workflows/build_windows_wheel - uses: ./.github/workflows/build_windows_wheel
with: with:
python-minor-version: 8 python-minor-version: ${{ matrix.python-minor-version }}
args: "--release --strip" args: "--release --strip"
vcpkg_token: ${{ secrets.VCPKG_GITHUB_PACKAGES }} vcpkg_token: ${{ secrets.VCPKG_GITHUB_PACKAGES }}
- uses: ./.github/workflows/upload_wheel - uses: ./.github/workflows/upload_wheel
with: with:
pypi_token: ${{ secrets.LANCEDB_PYPI_API_TOKEN }} python-minor-version: ${{ matrix.python-minor-version }}
fury_token: ${{ secrets.FURY_TOKEN }} token: ${{ secrets.LANCEDB_PYPI_API_TOKEN }}
gh-release: repo: "pypi"
runs-on: ubuntu-latest
permissions:
contents: write
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
lfs: true
- name: Extract version
id: extract_version
env:
GITHUB_REF: ${{ github.ref }}
run: |
set -e
echo "Extracting tag and version from $GITHUB_REF"
if [[ $GITHUB_REF =~ refs/tags/python-v(.*) ]]; then
VERSION=${BASH_REMATCH[1]}
TAG=python-v$VERSION
echo "tag=$TAG" >> $GITHUB_OUTPUT
echo "version=$VERSION" >> $GITHUB_OUTPUT
else
echo "Failed to extract version from $GITHUB_REF"
exit 1
fi
echo "Extracted version $VERSION from $GITHUB_REF"
if [[ $VERSION =~ beta ]]; then
echo "This is a beta release"
# Get last release (that is not this one)
FROM_TAG=$(git tag --sort='version:refname' \
| grep ^python-v \
| grep -vF "$TAG" \
| python ci/semver_sort.py python-v \
| tail -n 1)
else
echo "This is a stable release"
# Get last stable tag (ignore betas)
FROM_TAG=$(git tag --sort='version:refname' \
| grep ^python-v \
| grep -vF "$TAG" \
| grep -v beta \
| python ci/semver_sort.py python-v \
| tail -n 1)
fi
echo "Found from tag $FROM_TAG"
echo "from_tag=$FROM_TAG" >> $GITHUB_OUTPUT
- name: Create Python Release Notes
id: python_release_notes
uses: mikepenz/release-changelog-builder-action@v4
with:
configuration: .github/release_notes.json
toTag: ${{ steps.extract_version.outputs.tag }}
fromTag: ${{ steps.extract_version.outputs.from_tag }}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Create Python GH release
uses: softprops/action-gh-release@v2
with:
prerelease: ${{ contains('beta', github.ref) }}
tag_name: ${{ steps.extract_version.outputs.tag }}
token: ${{ secrets.GITHUB_TOKEN }}
generate_release_notes: false
name: Python LanceDB v${{ steps.extract_version.outputs.version }}
body: ${{ steps.python_release_notes.outputs.changelog }}

View File

@@ -0,0 +1,56 @@
name: Python - Create release commit
on:
workflow_dispatch:
inputs:
dry_run:
description: 'Dry run (create the local commit/tags but do not push it)'
required: true
default: "false"
type: choice
options:
- "true"
- "false"
part:
description: 'What kind of release is this?'
required: true
default: 'patch'
type: choice
options:
- patch
- minor
- major
jobs:
bump-version:
runs-on: ubuntu-latest
steps:
- name: Check out main
uses: actions/checkout@v4
with:
ref: main
persist-credentials: false
fetch-depth: 0
lfs: true
- name: Set git configs for bumpversion
shell: bash
run: |
git config user.name 'Lance Release'
git config user.email 'lance-dev@lancedb.com'
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: "3.11"
- name: Bump version, create tag and commit
working-directory: python
run: |
pip install bump2version
bumpversion --verbose ${{ inputs.part }}
- name: Push new version and tag
if: ${{ inputs.dry_run }} == "false"
uses: ad-m/github-push-action@master
with:
github_token: ${{ secrets.LANCEDB_RELEASE_TOKEN }}
branch: main
tags: true

View File

@@ -65,7 +65,7 @@ jobs:
workspaces: python workspaces: python
- name: Install - name: Install
run: | run: |
pip install --extra-index-url https://pypi.fury.io/lancedb/ -e .[tests,dev,embeddings] pip install -e .[tests,dev,embeddings]
pip install tantivy pip install tantivy
pip install mlx pip install mlx
- name: Doctest - name: Doctest
@@ -75,7 +75,7 @@ jobs:
timeout-minutes: 30 timeout-minutes: 30
strategy: strategy:
matrix: matrix:
python-minor-version: ["9", "11"] python-minor-version: ["8", "11"]
runs-on: "ubuntu-22.04" runs-on: "ubuntu-22.04"
defaults: defaults:
run: run:
@@ -189,7 +189,7 @@ jobs:
- name: Install lancedb - name: Install lancedb
run: | run: |
pip install "pydantic<2" pip install "pydantic<2"
pip install --extra-index-url https://pypi.fury.io/lancedb/ -e .[tests] pip install -e .[tests]
pip install tantivy pip install tantivy
- name: Run tests - name: Run tests
run: pytest -m "not slow and not s3_test" -x -v --durations=30 python/tests run: pytest -m "not slow and not s3_test" -x -v --durations=30 python/tests

View File

@@ -15,7 +15,7 @@ runs:
- name: Install lancedb - name: Install lancedb
shell: bash shell: bash
run: | run: |
pip3 install --extra-index-url https://pypi.fury.io/lancedb/ $(ls target/wheels/lancedb-*.whl)[tests,dev] pip3 install $(ls target/wheels/lancedb-*.whl)[tests,dev]
- name: Setup localstack for integration tests - name: Setup localstack for integration tests
if: ${{ inputs.integration == 'true' }} if: ${{ inputs.integration == 'true' }}
shell: bash shell: bash

View File

@@ -74,11 +74,11 @@ jobs:
run: | run: |
sudo apt update sudo apt update
sudo apt install -y protobuf-compiler libssl-dev sudo apt install -y protobuf-compiler libssl-dev
- name: Build
run: cargo build --all-features
- name: Start S3 integration test environment - name: Start S3 integration test environment
working-directory: . working-directory: .
run: docker compose up --detach --wait run: docker compose up --detach --wait
- name: Build
run: cargo build --all-features
- name: Run tests - name: Run tests
run: cargo test --all-features run: cargo test --all-features
- name: Run examples - name: Run examples

View File

@@ -2,43 +2,28 @@ name: upload-wheel
description: "Upload wheels to Pypi" description: "Upload wheels to Pypi"
inputs: inputs:
pypi_token: os:
required: true
description: "ubuntu-22.04 or macos-13"
repo:
required: false
description: "pypi or testpypi"
default: "pypi"
token:
required: true required: true
description: "release token for the repo" description: "release token for the repo"
fury_token:
required: true
description: "release token for the fury repo"
runs: runs:
using: "composite" using: "composite"
steps: steps:
- name: Install dependencies - name: Install dependencies
shell: bash shell: bash
run: | run: |
python -m pip install --upgrade pip python -m pip install --upgrade pip
pip install twine pip install twine
- name: Choose repo - name: Publish wheel
shell: bash env:
id: choose_repo TWINE_USERNAME: __token__
run: | TWINE_PASSWORD: ${{ inputs.token }}
if [ ${{ github.ref }} == "*beta*" ]; then shell: bash
echo "repo=fury" >> $GITHUB_OUTPUT run: twine upload --repository ${{ inputs.repo }} target/wheels/lancedb-*.whl
else
echo "repo=pypi" >> $GITHUB_OUTPUT
fi
- name: Publish to PyPI
shell: bash
env:
FURY_TOKEN: ${{ inputs.fury_token }}
PYPI_TOKEN: ${{ inputs.pypi_token }}
run: |
if [ ${{ steps.choose_repo.outputs.repo }} == "fury" ]; then
WHEEL=$(ls target/wheels/lancedb-*.whl 2> /dev/null | head -n 1)
echo "Uploading $WHEEL to Fury"
curl -f -F package=@$WHEEL https://$FURY_TOKEN@push.fury.io/lancedb/
else
twine upload --repository ${{ steps.choose_repo.outputs.repo }} \
--username __token__ \
--password $PYPI_TOKEN \
target/wheels/lancedb-*.whl
fi

View File

@@ -14,7 +14,7 @@ repos:
hooks: hooks:
- id: local-biome-check - id: local-biome-check
name: biome check name: biome check
entry: npx @biomejs/biome@1.8.3 check --config-path nodejs/biome.json nodejs/ entry: npx biome check
language: system language: system
types: [text] types: [text]
files: "nodejs/.*" files: "nodejs/.*"

View File

@@ -1,11 +1,5 @@
[workspace] [workspace]
members = [ members = ["rust/ffi/node", "rust/lancedb", "nodejs", "python"]
"rust/ffi/node",
"rust/lancedb",
"nodejs",
"python",
"java/core/lancedb-jni",
]
# Python package needs to be built by maturin. # Python package needs to be built by maturin.
exclude = ["python"] exclude = ["python"]
resolver = "2" resolver = "2"
@@ -20,18 +14,10 @@ keywords = ["lancedb", "lance", "database", "vector", "search"]
categories = ["database-implementations"] categories = ["database-implementations"]
[workspace.dependencies] [workspace.dependencies]
# lance = { "version" = "=0.14.0", "features" = ["dynamodb"] } lance = { "version" = "=0.10.18", "features" = ["dynamodb"] }
# lance-index = { "version" = "=0.14.0" } lance-index = { "version" = "=0.10.18" }
# lance-linalg = { "version" = "=0.14.0" } lance-linalg = { "version" = "=0.10.18" }
# lance-testing = { "version" = "=0.14.0" } lance-testing = { "version" = "=0.10.18" }
# lance-datafusion = { "version" = "=0.14.0" }
lance = { path = "../lance/rust/lance", "features" = ["dynamodb"] }
lance-index = { path = "../lance/rust/lance-index" }
lance-linalg = { path = "../lance/rust/lance-linalg" }
lance-testing = { path = "../lance/rust/lance-testing" }
lance-datafusion = { path = "../lance/rust/lance-datafusion" }
# Note that this one does not include pyarrow # Note that this one does not include pyarrow
arrow = { version = "51.0", optional = false } arrow = { version = "51.0", optional = false }
arrow-array = "51.0" arrow-array = "51.0"
@@ -43,7 +29,6 @@ arrow-arith = "51.0"
arrow-cast = "51.0" arrow-cast = "51.0"
async-trait = "0" async-trait = "0"
chrono = "0.4.35" chrono = "0.4.35"
datafusion-physical-plan = "37.1"
half = { "version" = "=2.4.1", default-features = false, features = [ half = { "version" = "=2.4.1", default-features = false, features = [
"num-traits", "num-traits",
] } ] }

View File

@@ -83,5 +83,5 @@ result = table.search([100, 100]).limit(2).to_pandas()
``` ```
## Blogs, Tutorials & Videos ## Blogs, Tutorials & Videos
* 📈 <a href="https://blog.lancedb.com/benchmarking-random-access-in-lance/">2000x better performance with Lance over Parquet</a> * 📈 <a href="https://blog.eto.ai/benchmarking-random-access-in-lance-ed690757a826">2000x better performance with Lance over Parquet</a>
* 🤖 <a href="https://github.com/lancedb/lancedb/blob/main/docs/src/notebooks/youtube_transcript_search.ipynb">Build a question and answer bot with LanceDB</a> * 🤖 <a href="https://github.com/lancedb/lancedb/blob/main/docs/src/notebooks/youtube_transcript_search.ipynb">Build a question and answer bot with LanceDB</a>

View File

@@ -1,51 +0,0 @@
set -e
RELEASE_TYPE=${1:-"stable"}
BUMP_MINOR=${2:-false}
TAG_PREFIX=${3:-"v"} # Such as "python-v"
HEAD_SHA=${4:-$(git rev-parse HEAD)}
readonly SELF_DIR=$(cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
PREV_TAG=$(git tag --sort='version:refname' | grep ^$TAG_PREFIX | python $SELF_DIR/semver_sort.py $TAG_PREFIX | tail -n 1)
echo "Found previous tag $PREV_TAG"
# Initially, we don't want to tag if we are doing stable, because we will bump
# again later. See comment at end for why.
if [[ "$RELEASE_TYPE" == 'stable' ]]; then
BUMP_ARGS="--no-tag"
fi
# If last is stable and not bumping minor
if [[ $PREV_TAG != *beta* ]]; then
if [[ "$BUMP_MINOR" != "false" ]]; then
# X.Y.Z -> X.(Y+1).0-beta.0
bump-my-version bump -vv $BUMP_ARGS minor
else
# X.Y.Z -> X.Y.(Z+1)-beta.0
bump-my-version bump -vv $BUMP_ARGS patch
fi
else
if [[ "$BUMP_MINOR" != "false" ]]; then
# X.Y.Z-beta.N -> X.(Y+1).0-beta.0
bump-my-version bump -vv $BUMP_ARGS minor
else
# X.Y.Z-beta.N -> X.Y.Z-beta.(N+1)
bump-my-version bump -vv $BUMP_ARGS pre_n
fi
fi
# The above bump will always bump to a pre-release version. If we are releasing
# a stable version, bump the pre-release level ("pre_l") to make it stable.
if [[ $RELEASE_TYPE == 'stable' ]]; then
# X.Y.Z-beta.N -> X.Y.Z
bump-my-version bump -vv pre_l
fi
# Validate that we have incremented version appropriately for breaking changes
NEW_TAG=$(git describe --tags --exact-match HEAD)
NEW_VERSION=$(echo $NEW_TAG | sed "s/^$TAG_PREFIX//")
LAST_STABLE_RELEASE=$(git tag --sort='version:refname' | grep ^$TAG_PREFIX | grep -v beta | grep -vF "$NEW_TAG" | python $SELF_DIR/semver_sort.py $TAG_PREFIX | tail -n 1)
LAST_STABLE_VERSION=$(echo $LAST_STABLE_RELEASE | sed "s/^$TAG_PREFIX//")
python $SELF_DIR/check_breaking_changes.py $LAST_STABLE_RELEASE $HEAD_SHA $LAST_STABLE_VERSION $NEW_VERSION

View File

@@ -1,35 +0,0 @@
"""
Check whether there are any breaking changes in the PRs between the base and head commits.
If there are, assert that we have incremented the minor version.
"""
import argparse
import os
from packaging.version import parse
from github import Github
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("base")
parser.add_argument("head")
parser.add_argument("last_stable_version")
parser.add_argument("current_version")
args = parser.parse_args()
repo = Github(os.environ["GITHUB_TOKEN"]).get_repo(os.environ["GITHUB_REPOSITORY"])
commits = repo.compare(args.base, args.head).commits
prs = (pr for commit in commits for pr in commit.get_pulls())
for pr in prs:
if any(label.name == "breaking-change" for label in pr.labels):
print(f"Breaking change in PR: {pr.html_url}")
break
else:
print("No breaking changes found.")
exit(0)
last_stable_version = parse(args.last_stable_version)
current_version = parse(args.current_version)
if current_version.minor <= last_stable_version.minor:
print("Minor version is not greater than the last stable version.")
exit(1)

View File

@@ -1,35 +0,0 @@
"""
Takes a list of semver strings and sorts them in ascending order.
"""
import sys
from packaging.version import parse, InvalidVersion
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("prefix", default="v")
args = parser.parse_args()
# Read the input from stdin
lines = sys.stdin.readlines()
# Parse the versions
versions = []
for line in lines:
line = line.strip()
try:
version_str = line.removeprefix(args.prefix)
version = parse(version_str)
except InvalidVersion:
# There are old tags that don't follow the semver format
print(f"Invalid version: {line}", file=sys.stderr)
continue
versions.append((line, version))
# Sort the versions
versions.sort(key=lambda x: x[1])
# Print the sorted versions as original strings
for line, _ in versions:
print(line)

View File

@@ -57,8 +57,6 @@ plugins:
- https://arrow.apache.org/docs/objects.inv - https://arrow.apache.org/docs/objects.inv
- https://pandas.pydata.org/docs/objects.inv - https://pandas.pydata.org/docs/objects.inv
- mkdocs-jupyter - mkdocs-jupyter
- render_swagger:
allow_arbitrary_locations : true
markdown_extensions: markdown_extensions:
- admonition - admonition
@@ -108,9 +106,6 @@ nav:
- Versioning & Reproducibility: notebooks/reproducibility.ipynb - Versioning & Reproducibility: notebooks/reproducibility.ipynb
- Configuring Storage: guides/storage.md - Configuring Storage: guides/storage.md
- Sync -> Async Migration Guide: migration.md - Sync -> Async Migration Guide: migration.md
- Tuning retrieval performance:
- Choosing right query type: guides/tuning_retrievers/1_query_types.md
- Reranking: guides/tuning_retrievers/2_reranking.md
- 🧬 Managing embeddings: - 🧬 Managing embeddings:
- Overview: embeddings/index.md - Overview: embeddings/index.md
- Embedding functions: embeddings/embedding_functions.md - Embedding functions: embeddings/embedding_functions.md
@@ -125,11 +120,8 @@ nav:
- DuckDB: python/duckdb.md - DuckDB: python/duckdb.md
- LangChain: - LangChain:
- LangChain 🔗: integrations/langchain.md - LangChain 🔗: integrations/langchain.md
- LangChain demo: notebooks/langchain_demo.ipynb
- LangChain JS/TS 🔗: https://js.langchain.com/docs/integrations/vectorstores/lancedb - LangChain JS/TS 🔗: https://js.langchain.com/docs/integrations/vectorstores/lancedb
- LlamaIndex 🦙: - LlamaIndex 🦙: https://docs.llamaindex.ai/en/stable/examples/vector_stores/LanceDBIndexDemo/
- LlamaIndex docs: integrations/llamaIndex.md
- LlamaIndex demo: notebooks/llamaIndex_demo.ipynb
- Pydantic: python/pydantic.md - Pydantic: python/pydantic.md
- Voxel51: integrations/voxel51.md - Voxel51: integrations/voxel51.md
- PromptTools: integrations/prompttools.md - PromptTools: integrations/prompttools.md
@@ -160,8 +152,7 @@ nav:
- Overview: cloud/index.md - Overview: cloud/index.md
- API reference: - API reference:
- 🐍 Python: python/saas-python.md - 🐍 Python: python/saas-python.md
- 👾 JavaScript: javascript/modules.md - 👾 JavaScript: javascript/saas-modules.md
- REST API: cloud/rest.md
- Quick start: basic.md - Quick start: basic.md
- Concepts: - Concepts:
@@ -190,9 +181,6 @@ nav:
- Versioning & Reproducibility: notebooks/reproducibility.ipynb - Versioning & Reproducibility: notebooks/reproducibility.ipynb
- Configuring Storage: guides/storage.md - Configuring Storage: guides/storage.md
- Sync -> Async Migration Guide: migration.md - Sync -> Async Migration Guide: migration.md
- Tuning retrieval performance:
- Choosing right query type: guides/tuning_retrievers/1_query_types.md
- Reranking: guides/tuning_retrievers/2_reranking.md
- Managing Embeddings: - Managing Embeddings:
- Overview: embeddings/index.md - Overview: embeddings/index.md
- Embedding functions: embeddings/embedding_functions.md - Embedding functions: embeddings/embedding_functions.md
@@ -205,9 +193,9 @@ nav:
- Pandas and PyArrow: python/pandas_and_pyarrow.md - Pandas and PyArrow: python/pandas_and_pyarrow.md
- Polars: python/polars_arrow.md - Polars: python/polars_arrow.md
- DuckDB: python/duckdb.md - DuckDB: python/duckdb.md
- LangChain 🦜️🔗↗: integrations/langchain.md - LangChain 🦜️🔗↗: https://python.langchain.com/docs/integrations/vectorstores/lancedb
- LangChain.js 🦜️🔗↗: https://js.langchain.com/docs/integrations/vectorstores/lancedb - LangChain.js 🦜️🔗↗: https://js.langchain.com/docs/integrations/vectorstores/lancedb
- LlamaIndex 🦙↗: integrations/llamaIndex.md - LlamaIndex 🦙↗: https://gpt-index.readthedocs.io/en/latest/examples/vector_stores/LanceDBIndexDemo.html
- Pydantic: python/pydantic.md - Pydantic: python/pydantic.md
- Voxel51: integrations/voxel51.md - Voxel51: integrations/voxel51.md
- PromptTools: integrations/prompttools.md - PromptTools: integrations/prompttools.md
@@ -231,8 +219,7 @@ nav:
- Overview: cloud/index.md - Overview: cloud/index.md
- API reference: - API reference:
- 🐍 Python: python/saas-python.md - 🐍 Python: python/saas-python.md
- 👾 JavaScript: javascript/modules.md - 👾 JavaScript: javascript/saas-modules.md
- REST API: cloud/rest.md
extra_css: extra_css:
- styles/global.css - styles/global.css

View File

@@ -1,479 +0,0 @@
openapi: 3.1.0
info:
version: 1.0.0
title: LanceDB Cloud API
description: |
LanceDB Cloud API is a RESTful API that allows users to access and modify data stored in LanceDB Cloud.
Table actions are considered temporary resource creations and all use POST method.
contact:
name: LanceDB support
url: https://lancedb.com
email: contact@lancedb.com
servers:
- url: https://{db}.{region}.api.lancedb.com
description: LanceDB Cloud REST endpoint.
variables:
db:
default: ""
description: the name of DB
region:
default: "us-east-1"
description: the service region of the DB
security:
- key_auth: []
components:
securitySchemes:
key_auth:
name: x-api-key
type: apiKey
in: header
parameters:
table_name:
name: name
in: path
description: name of the table
required: true
schema:
type: string
responses:
invalid_request:
description: Invalid request
content:
text/plain:
schema:
type: string
not_found:
description: Not found
content:
text/plain:
schema:
type: string
unauthorized:
description: Unauthorized
content:
text/plain:
schema:
type: string
requestBodies:
arrow_stream_buffer:
description: Arrow IPC stream buffer
required: true
content:
application/vnd.apache.arrow.stream:
schema:
type: string
format: binary
paths:
/v1/table/:
get:
description: List tables, optionally, with pagination.
tags:
- Tables
summary: List Tables
operationId: listTables
parameters:
- name: limit
in: query
description: Limits the number of items to return.
schema:
type: integer
- name: page_token
in: query
description: Specifies the starting position of the next query
schema:
type: string
responses:
"200":
description: Successfully returned a list of tables in the DB
content:
application/json:
schema:
type: object
properties:
tables:
type: array
items:
type: string
page_token:
type: string
"400":
$ref: "#/components/responses/invalid_request"
"401":
$ref: "#/components/responses/unauthorized"
"404":
$ref: "#/components/responses/not_found"
/v1/table/{name}/create/:
post:
description: Create a new table
summary: Create a new table
operationId: createTable
tags:
- Tables
parameters:
- $ref: "#/components/parameters/table_name"
requestBody:
$ref: "#/components/requestBodies/arrow_stream_buffer"
responses:
"200":
description: Table successfully created
"400":
$ref: "#/components/responses/invalid_request"
"401":
$ref: "#/components/responses/unauthorized"
"404":
$ref: "#/components/responses/not_found"
/v1/table/{name}/query/:
post:
description: Vector Query
url: https://{db-uri}.{aws-region}.api.lancedb.com/v1/table/{name}/query/
tags:
- Data
summary: Vector Query
parameters:
- $ref: "#/components/parameters/table_name"
requestBody:
required: true
content:
application/json:
schema:
type: object
properties:
vector:
type: FixedSizeList
description: |
The targetted vector to search for. Required.
vector_column:
type: string
description: |
The column to query, it can be inferred from the schema if there is only one vector column.
prefilter:
type: boolean
description: |
Whether to prefilter the data. Optional.
k:
type: integer
description: |
The number of search results to return. Default is 10.
distance_type:
type: string
description: |
The distance metric to use for search. L2, Cosine, Dot and Hamming are supported. Default is L2.
bypass_vector_index:
type: boolean
description: |
Whether to bypass vector index. Optional.
filter:
type: string
description: |
A filter expression that specifies the rows to query. Optional.
columns:
type: array
items:
type: string
description: |
The columns to return. Optional.
nprobe:
type: integer
description: |
The number of probes to use for search. Optional.
refine_factor:
type: integer
description: |
The refine factor to use for search. Optional.
responses:
"200":
description: top k results if query is successfully executed
content:
application/json:
schema:
type: object
properties:
results:
type: array
items:
type: object
properties:
id:
type: integer
selected_col_1_to_return:
type: col_1_type
selected_col_n_to_return:
type: col_n_type
_distance:
type: float
"400":
$ref: "#/components/responses/invalid_request"
"401":
$ref: "#/components/responses/unauthorized"
"404":
$ref: "#/components/responses/not_found"
/v1/table/{name}/insert/:
post:
description: Insert new data to the Table.
tags:
- Data
operationId: insertData
summary: Insert new data.
parameters:
- $ref: "#/components/parameters/table_name"
requestBody:
$ref: "#/components/requestBodies/arrow_stream_buffer"
responses:
"200":
description: Insert successful
"400":
$ref: "#/components/responses/invalid_request"
"401":
$ref: "#/components/responses/unauthorized"
"404":
$ref: "#/components/responses/not_found"
/v1/table/{name}/merge_insert/:
post:
description: Create a "merge insert" operation
This operation can add rows, update rows, and remove rows all in a single
transaction. See python method `lancedb.table.Table.merge_insert` for examples.
tags:
- Data
summary: Merge Insert
operationId: mergeInsert
parameters:
- $ref: "#/components/parameters/table_name"
- name: on
in: query
description: |
The column to use as the primary key for the merge operation.
required: true
schema:
type: string
- name: when_matched_update_all
in: query
description: |
Rows that exist in both the source table (new data) and
the target table (old data) will be updated, replacing
the old row with the corresponding matching row.
required: false
schema:
type: boolean
- name: when_matched_update_all_filt
in: query
description: |
If present then only rows that satisfy the filter expression will
be updated
required: false
schema:
type: string
- name: when_not_matched_insert_all
in: query
description: |
Rows that exist only in the source table (new data) will be
inserted into the target table (old data).
required: false
schema:
type: boolean
- name: when_not_matched_by_source_delete
in: query
description: |
Rows that exist only in the target table (old data) will be
deleted. An optional condition (`when_not_matched_by_source_delete_filt`)
can be provided to limit what data is deleted.
required: false
schema:
type: boolean
- name: when_not_matched_by_source_delete_filt
in: query
description: |
The filter expression that specifies the rows to delete.
required: false
schema:
type: string
requestBody:
$ref: "#/components/requestBodies/arrow_stream_buffer"
responses:
"200":
description: Merge Insert successful
"400":
$ref: "#/components/responses/invalid_request"
"401":
$ref: "#/components/responses/unauthorized"
"404":
$ref: "#/components/responses/not_found"
/v1/table/{name}/delete/:
post:
description: Delete rows from a table.
tags:
- Data
summary: Delete rows from a table
operationId: deleteData
parameters:
- $ref: "#/components/parameters/table_name"
requestBody:
required: true
content:
application/json:
schema:
type: object
properties:
predicate:
type: string
description: |
A filter expression that specifies the rows to delete.
responses:
"200":
description: Delete successful
"401":
$ref: "#/components/responses/unauthorized"
/v1/table/{name}/drop/:
post:
description: Drop a table
tags:
- Tables
summary: Drop a table
operationId: dropTable
parameters:
- $ref: "#/components/parameters/table_name"
requestBody:
$ref: "#/components/requestBodies/arrow_stream_buffer"
responses:
"200":
description: Drop successful
"401":
$ref: "#/components/responses/unauthorized"
/v1/table/{name}/describe/:
post:
description: Describe a table and return Table Information.
tags:
- Tables
summary: Describe a table
operationId: describeTable
parameters:
- $ref: "#/components/parameters/table_name"
responses:
"200":
description: Table information
content:
application/json:
schema:
type: object
properties:
table:
type: string
version:
type: integer
schema:
type: string
stats:
type: object
"401":
$ref: "#/components/responses/unauthorized"
"404":
$ref: "#/components/responses/not_found"
/v1/table/{name}/index/list/:
post:
description: List indexes of a table
tags:
- Tables
summary: List indexes of a table
operationId: listIndexes
parameters:
- $ref: "#/components/parameters/table_name"
responses:
"200":
description: Available list of indexes on the table.
content:
application/json:
schema:
type: object
properties:
indexes:
type: array
items:
type: object
properties:
columns:
type: array
items:
type: string
index_name:
type: string
index_uuid:
type: string
"401":
$ref: "#/components/responses/unauthorized"
"404":
$ref: "#/components/responses/not_found"
/v1/table/{name}/create_index/:
post:
description: Create vector index on a Table
tags:
- Tables
summary: Create vector index on a Table
operationId: createIndex
parameters:
- $ref: "#/components/parameters/table_name"
requestBody:
required: true
content:
application/json:
schema:
type: object
properties:
column:
type: string
metric_type:
type: string
nullable: false
description: |
The metric type to use for the index. L2, Cosine, Dot are supported.
index_type:
type: string
responses:
"200":
description: Index successfully created
"400":
$ref: "#/components/responses/invalid_request"
"401":
$ref: "#/components/responses/unauthorized"
"404":
$ref: "#/components/responses/not_found"
/v1/table/{name}/create_scalar_index/:
post:
description: Create a scalar index on a table
tags:
- Tables
summary: Create a scalar index on a table
operationId: createScalarIndex
parameters:
- $ref: "#/components/parameters/table_name"
requestBody:
required: true
content:
application/json:
schema:
type: object
properties:
column:
type: string
index_type:
type: string
required: false
responses:
"200":
description: Scalar Index successfully created
"400":
$ref: "#/components/responses/invalid_request"
"401":
$ref: "#/components/responses/unauthorized"
"404":
$ref: "#/components/responses/not_found"

View File

@@ -2,5 +2,4 @@ mkdocs==1.5.3
mkdocs-jupyter==0.24.1 mkdocs-jupyter==0.24.1
mkdocs-material==9.5.3 mkdocs-material==9.5.3
mkdocstrings[python]==0.20.0 mkdocstrings[python]==0.20.0
mkdocs-render-swagger-plugin pydantic
pydantic

View File

@@ -44,36 +44,6 @@
!!! info "Please also make sure you're using the same version of Arrow as in the [lancedb crate](https://github.com/lancedb/lancedb/blob/main/Cargo.toml)" !!! info "Please also make sure you're using the same version of Arrow as in the [lancedb crate](https://github.com/lancedb/lancedb/blob/main/Cargo.toml)"
### Preview releases
Stable releases are created about every 2 weeks. For the latest features and bug
fixes, you can install the preview release. These releases receive the same
level of testing as stable releases, but are not guaranteed to be available for
more than 6 months after they are released. Once your application is stable, we
recommend switching to stable releases.
=== "Python"
```shell
pip install --pre --extra-index-url https://pypi.fury.io/lancedb/ lancedb
```
=== "Typescript"
```shell
npm install vectordb@preview
```
=== "Rust"
We don't push preview releases to crates.io, but you can referent the tag
in GitHub within your Cargo dependencies:
```toml
[dependencies]
lancedb = { git = "https://github.com/lancedb/lancedb.git", tag = "vX.Y.Z-beta.N" }
```
## Connect to a database ## Connect to a database
=== "Python" === "Python"
@@ -180,9 +150,6 @@ table.
!!! info "Under the hood, LanceDB reads in the Apache Arrow data and persists it to disk using the [Lance format](https://www.github.com/lancedb/lance)." !!! info "Under the hood, LanceDB reads in the Apache Arrow data and persists it to disk using the [Lance format](https://www.github.com/lancedb/lance)."
!!! info "Automatic embedding generation with Embedding API"
When working with embedding models, it is recommended to use the LanceDB embedding API to automatically create vector representation of the data and queries in the background. See the [quickstart example](#using-the-embedding-api) or the embedding API [guide](./embeddings/)
### Create an empty table ### Create an empty table
Sometimes you may not have the data to insert into the table at creation time. Sometimes you may not have the data to insert into the table at creation time.
@@ -197,9 +164,6 @@ similar to a `CREATE TABLE` statement in SQL.
--8<-- "python/python/tests/docs/test_basic.py:create_empty_table_async" --8<-- "python/python/tests/docs/test_basic.py:create_empty_table_async"
``` ```
!!! note "You can define schema in Pydantic"
LanceDB comes with Pydantic support, which allows you to define the schema of your data using Pydantic models. This makes it easy to work with LanceDB tables and data. Learn more about all supported types in [tables guide](./guides/tables.md).
=== "Typescript" === "Typescript"
```typescript ```typescript
@@ -430,19 +394,6 @@ Use the `drop_table()` method on the database to remove a table.
}) })
``` ```
## Using the Embedding API
You can use the embedding API when working with embedding models. It automatically vectorizes the data at ingestion and query time and comes with built-in integrations with popular embedding models like Openai, Hugging Face, Sentence Transformers, CLIP and more.
=== "Python"
```python
--8<-- "python/python/tests/docs/test_embeddings_optional.py:imports"
--8<-- "python/python/tests/docs/test_embeddings_optional.py:openai_embeddings"
```
Learn about using the existing integrations and creating custom embedding functions in the [embedding API guide](./embeddings/).
## What's next ## What's next
This section covered the very basics of using LanceDB. If you're learning about vector databases for the first time, you may want to read the page on [indexing](concepts/index_ivfpq.md) to get familiar with the concepts. This section covered the very basics of using LanceDB. If you're learning about vector databases for the first time, you may want to read the page on [indexing](concepts/index_ivfpq.md) to get familiar with the concepts.

View File

@@ -1 +0,0 @@
!!swagger ../../openapi.yml!!

View File

@@ -193,13 +193,13 @@ from lancedb.pydantic import LanceModel, Vector
model = get_registry().get("huggingface").create(name='facebook/bart-base') model = get_registry().get("huggingface").create(name='facebook/bart-base')
class Words(LanceModel): class TextModel(LanceModel):
text: str = model.SourceField() text: str = model.SourceField()
vector: Vector(model.ndims()) = model.VectorField() vector: Vector(model.ndims()) = model.VectorField()
df = pd.DataFrame({"text": ["hi hello sayonara", "goodbye world"]}) df = pd.DataFrame({"text": ["hi hello sayonara", "goodbye world"]})
table = db.create_table("greets", schema=Words) table = db.create_table("greets", schema=Words)
table.add(df) table.add()
query = "old greeting" query = "old greeting"
actual = table.search(query).limit(1).to_pydantic(Words)[0] actual = table.search(query).limit(1).to_pydantic(Words)[0]
print(actual.text) print(actual.text)
@@ -216,7 +216,7 @@ Generate embeddings via the [ollama](https://github.com/ollama/ollama-python) py
|------------------------|----------------------------|--------------------------|------------------------------------------------------------------------------------------------------------------------------------------------| |------------------------|----------------------------|--------------------------|------------------------------------------------------------------------------------------------------------------------------------------------|
| `name` | `str` | `nomic-embed-text` | The name of the model. | | `name` | `str` | `nomic-embed-text` | The name of the model. |
| `host` | `str` | `http://localhost:11434` | The Ollama host to connect to. | | `host` | `str` | `http://localhost:11434` | The Ollama host to connect to. |
| `options` | `ollama.Options` or `dict` | `None` | Additional model parameters listed in the documentation for the Modelfile such as `temperature`. | | `options` | `ollama.Options` or `dict` | `None` | Additional model parameters listed in the documentation for the [Modelfile](./modelfile.md#valid-parameters-and-values) such as `temperature`. |
| `keep_alive` | `float` or `str` | `"5m"` | Controls how long the model will stay loaded into memory following the request. | | `keep_alive` | `float` or `str` | `"5m"` | Controls how long the model will stay loaded into memory following the request. |
| `ollama_client_kwargs` | `dict` | `{}` | kwargs that can be past to the `ollama.Client`. | | `ollama_client_kwargs` | `dict` | `{}` | kwargs that can be past to the `ollama.Client`. |
@@ -365,68 +365,6 @@ tbl.add(df)
rs = tbl.search("hello").limit(1).to_pandas() rs = tbl.search("hello").limit(1).to_pandas()
``` ```
### Cohere Embeddings
Using cohere API requires cohere package, which can be installed using `pip install cohere`. Cohere embeddings are used to generate embeddings for text data. The embeddings can be used for various tasks like semantic search, clustering, and classification.
You also need to set the `COHERE_API_KEY` environment variable to use the Cohere API.
Supported models are:
```
* embed-english-v3.0
* embed-multilingual-v3.0
* embed-english-light-v3.0
* embed-multilingual-light-v3.0
* embed-english-v2.0
* embed-english-light-v2.0
* embed-multilingual-v2.0
```
Supported parameters (to be passed in `create` method) are:
| Parameter | Type | Default Value | Description |
|---|---|---|---|
| `name` | `str` | `"embed-english-v2.0"` | The model ID of the cohere model to use. Supported base models for Text Embeddings: embed-english-v3.0, embed-multilingual-v3.0, embed-english-light-v3.0, embed-multilingual-light-v3.0, embed-english-v2.0, embed-english-light-v2.0, embed-multilingual-v2.0 |
| `source_input_type` | `str` | `"search_document"` | The type of input data to be used for the source column. |
| `query_input_type` | `str` | `"search_query"` | The type of input data to be used for the query. |
Cohere supports following input types:
| Input Type | Description |
|-------------------------|---------------------------------------|
| "`search_document`" | Used for embeddings stored in a vector|
| | database for search use-cases. |
| "`search_query`" | Used for embeddings of search queries |
| | run against a vector DB |
| "`semantic_similarity`" | Specifies the given text will be used |
| | for Semantic Textual Similarity (STS) |
| "`classification`" | Used for embeddings passed through a |
| | text classifier. |
| "`clustering`" | Used for the embeddings run through a |
| | clustering algorithm |
Usage Example:
```python
import lancedb
from lancedb.pydantic import LanceModel, Vector
from lancedb.embeddings import EmbeddingFunctionRegistry
cohere = EmbeddingFunctionRegistry
.get_instance()
.get("cohere")
.create(name="embed-multilingual-v2.0")
class TextModel(LanceModel):
text: str = cohere.SourceField()
vector: Vector(cohere.ndims()) = cohere.VectorField()
data = [ { "text": "hello world" },
{ "text": "goodbye world" }]
db = lancedb.connect("~/.lancedb")
tbl = db.create_table("test", schema=TextModel, mode="overwrite")
tbl.add(data)
```
### AWS Bedrock Text Embedding Functions ### AWS Bedrock Text Embedding Functions
AWS Bedrock supports multiple base models for generating text embeddings. You need to setup the AWS credentials to use this embedding function. AWS Bedrock supports multiple base models for generating text embeddings. You need to setup the AWS credentials to use this embedding function.
You can do so by using `awscli` and also add your session_token: You can do so by using `awscli` and also add your session_token:

View File

@@ -2,9 +2,6 @@ Representing multi-modal data as vector embeddings is becoming a standard practi
For this purpose, LanceDB introduces an **embedding functions API**, that allow you simply set up once, during the configuration stage of your project. After this, the table remembers it, effectively making the embedding functions *disappear in the background* so you don't have to worry about manually passing callables, and instead, simply focus on the rest of your data engineering pipeline. For this purpose, LanceDB introduces an **embedding functions API**, that allow you simply set up once, during the configuration stage of your project. After this, the table remembers it, effectively making the embedding functions *disappear in the background* so you don't have to worry about manually passing callables, and instead, simply focus on the rest of your data engineering pipeline.
!!! Note "LanceDB cloud doesn't support embedding functions yet"
LanceDB Cloud does not support embedding functions yet. You need to generate embeddings before ingesting into the table or querying.
!!! warning !!! warning
Using the embedding function registry means that you don't have to explicitly generate the embeddings yourself. Using the embedding function registry means that you don't have to explicitly generate the embeddings yourself.
However, if your embedding function changes, you'll have to re-configure your table with the new embedding function However, if your embedding function changes, you'll have to re-configure your table with the new embedding function

View File

@@ -68,39 +68,6 @@ table.add(
] ]
) )
query = "greetings"
actual = table.search(query).limit(1).to_pydantic(Words)[0]
print(actual.text)
```
### Jina Embeddings
LanceDB registers the JinaAI embeddings function in the registry as `jina`. You can pass any supported model name to the `create`. By default it uses `"jina-clip-v1"`.
`jina-clip-v1` can handle both text and images and other models only support `text`.
You need to pass `JINA_API_KEY` in the environment variable or pass it as `api_key` to `create` method.
```python
import os
import lancedb
from lancedb.pydantic import LanceModel, Vector
from lancedb.embeddings import get_registry
os.environ['JINA_API_KEY'] = "jina_*"
db = lancedb.connect("/tmp/db")
func = get_registry().get("jina").create(name="jina-clip-v1")
class Words(LanceModel):
text: str = func.SourceField()
vector: Vector(func.ndims()) = func.VectorField()
table = db.create_table("words", schema=Words, mode="overwrite")
table.add(
[
{"text": "hello world"},
{"text": "goodbye world"}
]
)
query = "greetings" query = "greetings"
actual = table.search(query).limit(1).to_pydantic(Words)[0] actual = table.search(query).limit(1).to_pydantic(Words)[0]
print(actual.text) print(actual.text)

View File

@@ -2,6 +2,7 @@
LanceDB provides support for full-text search via [Tantivy](https://github.com/quickwit-oss/tantivy) (currently Python only), allowing you to incorporate keyword-based search (based on BM25) in your retrieval solutions. Our goal is to push the FTS integration down to the Rust level in the future, so that it's available for Rust and JavaScript users as well. Follow along at [this Github issue](https://github.com/lancedb/lance/issues/1195) LanceDB provides support for full-text search via [Tantivy](https://github.com/quickwit-oss/tantivy) (currently Python only), allowing you to incorporate keyword-based search (based on BM25) in your retrieval solutions. Our goal is to push the FTS integration down to the Rust level in the future, so that it's available for Rust and JavaScript users as well. Follow along at [this Github issue](https://github.com/lancedb/lance/issues/1195)
A hybrid search solution combining vector and full-text search is also on the way.
## Installation ## Installation
@@ -54,16 +55,6 @@ This returns the result as a list of dictionaries as follows.
!!! note !!! note
LanceDB automatically searches on the existing FTS index if the input to the search is of type `str`. If you provide a vector as input, LanceDB will search the ANN index instead. LanceDB automatically searches on the existing FTS index if the input to the search is of type `str`. If you provide a vector as input, LanceDB will search the ANN index instead.
## Tokenization
By default the text is tokenized by splitting on punctuation and whitespaces and then removing tokens that are longer than 40 chars. For more language specific tokenization then provide the argument tokenizer_name with the 2 letter language code followed by "_stem". So for english it would be "en_stem".
```python
table.create_fts_index("text", tokenizer_name="en_stem")
```
The following [languages](https://docs.rs/tantivy/latest/tantivy/tokenizer/enum.Language.html) are currently supported.
## Index multiple columns ## Index multiple columns
If you have multiple string columns to index, there's no need to combine them manually -- simply pass them all as a list to `create_fts_index`: If you have multiple string columns to index, there's no need to combine them manually -- simply pass them all as a list to `create_fts_index`:
@@ -149,7 +140,6 @@ is treated as a phrase query.
In general, a query that's declared as a phrase query will be wrapped in double quotes during parsing, with nested In general, a query that's declared as a phrase query will be wrapped in double quotes during parsing, with nested
double quotes replaced by single quotes. double quotes replaced by single quotes.
## Configurations ## Configurations
By default, LanceDB configures a 1GB heap size limit for creating the index. You can By default, LanceDB configures a 1GB heap size limit for creating the index. You can

View File

@@ -265,108 +265,6 @@ For **read-only access**, LanceDB will need a policy such as:
} }
``` ```
#### DynamoDB Commit Store for concurrent writes
By default, S3 does not support concurrent writes. Having two or more processes
writing to the same table at the same time can lead to data corruption. This is
because S3, unlike other object stores, does not have any atomic put or copy
operation.
To enable concurrent writes, you can configure LanceDB to use a DynamoDB table
as a commit store. This table will be used to coordinate writes between
different processes. To enable this feature, you must modify your connection
URI to use the `s3+ddb` scheme and add a query parameter `ddbTableName` with the
name of the table to use.
=== "Python"
```python
import lancedb
db = await lancedb.connect_async(
"s3+ddb://bucket/path?ddbTableName=my-dynamodb-table",
)
```
=== "JavaScript"
```javascript
const lancedb = require("lancedb");
const db = await lancedb.connect(
"s3+ddb://bucket/path?ddbTableName=my-dynamodb-table",
);
```
The DynamoDB table must be created with the following schema:
- Hash key: `base_uri` (string)
- Range key: `version` (number)
You can create this programmatically with:
=== "Python"
<!-- skip-test -->
```python
import boto3
dynamodb = boto3.client("dynamodb")
table = dynamodb.create_table(
TableName=table_name,
KeySchema=[
{"AttributeName": "base_uri", "KeyType": "HASH"},
{"AttributeName": "version", "KeyType": "RANGE"},
],
AttributeDefinitions=[
{"AttributeName": "base_uri", "AttributeType": "S"},
{"AttributeName": "version", "AttributeType": "N"},
],
ProvisionedThroughput={"ReadCapacityUnits": 1, "WriteCapacityUnits": 1},
)
```
=== "JavaScript"
<!-- skip-test -->
```javascript
import {
CreateTableCommand,
DynamoDBClient,
} from "@aws-sdk/client-dynamodb";
const dynamodb = new DynamoDBClient({
region: CONFIG.awsRegion,
credentials: {
accessKeyId: CONFIG.awsAccessKeyId,
secretAccessKey: CONFIG.awsSecretAccessKey,
},
endpoint: CONFIG.awsEndpoint,
});
const command = new CreateTableCommand({
TableName: table_name,
AttributeDefinitions: [
{
AttributeName: "base_uri",
AttributeType: "S",
},
{
AttributeName: "version",
AttributeType: "N",
},
],
KeySchema: [
{ AttributeName: "base_uri", KeyType: "HASH" },
{ AttributeName: "version", KeyType: "RANGE" },
],
ProvisionedThroughput: {
ReadCapacityUnits: 1,
WriteCapacityUnits: 1,
},
});
await client.send(command);
```
#### S3-compatible stores #### S3-compatible stores
LanceDB can also connect to S3-compatible stores, such as MinIO. To do so, you must specify both region and endpoint: LanceDB can also connect to S3-compatible stores, such as MinIO. To do so, you must specify both region and endpoint:

View File

@@ -116,21 +116,21 @@ This guide will show how to create tables, insert data into them, and update the
### From a Polars DataFrame ### From a Polars DataFrame
LanceDB supports [Polars](https://pola.rs/), a modern, fast DataFrame library LanceDB supports [Polars](https://pola.rs/), a modern, fast DataFrame library
written in Rust. Just like in Pandas, the Polars integration is enabled by PyArrow written in Rust. Just like in Pandas, the Polars integration is enabled by PyArrow
under the hood. A deeper integration between LanceDB Tables and Polars DataFrames under the hood. A deeper integration between LanceDB Tables and Polars DataFrames
is on the way. is on the way.
```python ```python
import polars as pl import polars as pl
data = pl.DataFrame({ data = pl.DataFrame({
"vector": [[3.1, 4.1], [5.9, 26.5]], "vector": [[3.1, 4.1], [5.9, 26.5]],
"item": ["foo", "bar"], "item": ["foo", "bar"],
"price": [10.0, 20.0] "price": [10.0, 20.0]
}) })
table = db.create_table("pl_table", data=data) table = db.create_table("pl_table", data=data)
``` ```
### From an Arrow Table ### From an Arrow Table
=== "Python" === "Python"
@@ -452,27 +452,6 @@ After a table has been created, you can always add more data to it using the var
tbl.add(pydantic_model_items) tbl.add(pydantic_model_items)
``` ```
??? "Ingesting Pydantic models with LanceDB embedding API"
When using LanceDB's embedding API, you can add Pydantic models directly to the table. LanceDB will automatically convert the `vector` field to a vector before adding it to the table. You need to specify the default value of `vector` feild as None to allow LanceDB to automatically vectorize the data.
```python
import lancedb
from lancedb.pydantic import LanceModel, Vector
from lancedb.embeddings import get_registry
db = lancedb.connect("~/tmp")
embed_fcn = get_registry().get("huggingface").create(name="BAAI/bge-small-en-v1.5")
class Schema(LanceModel):
text: str = embed_fcn.SourceField()
vector: Vector(embed_fcn.ndims()) = embed_fcn.VectorField(default=None)
tbl = db.create_table("my_table", schema=Schema, mode="overwrite")
models = [Schema(text="hello"), Schema(text="world")]
tbl.add(models)
```
=== "JavaScript" === "JavaScript"
@@ -657,31 +636,6 @@ The `values` parameter is used to provide the new values for the columns as lite
When rows are updated, they are moved out of the index. The row will still show up in ANN queries, but the query will not be as fast as it would be if the row was in the index. If you update a large proportion of rows, consider rebuilding the index afterwards. When rows are updated, they are moved out of the index. The row will still show up in ANN queries, but the query will not be as fast as it would be if the row was in the index. If you update a large proportion of rows, consider rebuilding the index afterwards.
## Drop a table
Use the `drop_table()` method on the database to remove a table.
=== "Python"
```python
--8<-- "python/python/tests/docs/test_basic.py:drop_table"
--8<-- "python/python/tests/docs/test_basic.py:drop_table_async"
```
This permanently removes the table and is not recoverable, unlike deleting rows.
By default, if the table does not exist an exception is raised. To suppress this,
you can pass in `ignore_missing=True`.
=== "Javascript/Typescript"
```typescript
--8<-- "docs/src/basic_legacy.ts:drop_table"
```
This permanently removes the table and is not recoverable, unlike deleting rows.
If the table does not exist an exception is raised.
## Consistency ## Consistency
In LanceDB OSS, users can set the `read_consistency_interval` parameter on connections to achieve different levels of read consistency. This parameter determines how frequently the database synchronizes with the underlying storage system to check for updates made by other processes. If another process updates a table, the database will not see the changes until the next synchronization. In LanceDB OSS, users can set the `read_consistency_interval` parameter on connections to achieve different levels of read consistency. This parameter determines how frequently the database synchronizes with the underlying storage system to check for updates made by other processes. If another process updates a table, the database will not see the changes until the next synchronization.

View File

@@ -1,128 +0,0 @@
## Improving retriever performance
VectorDBs are used as retreivers in recommender or chatbot-based systems for retrieving relevant data based on user queries. For example, retriever is a critical component of Retrieval Augmented Generation (RAG) acrhitectures. In this section, we will discuss how to improve the performance of retrievers.
There are serveral ways to improve the performance of retrievers. Some of the common techniques are:
* Using different query types
* Using hybrid search
* Fine-tuning the embedding models
* Using different embedding models
Using different embedding models is something that's very specific to the use case and the data. So we will not discuss it here. In this section, we will discuss the first three techniques.
!!! note "Note"
We'll be using a simple metric called "hit-rate" for evaluating the performance of the retriever across this guide. Hit-rate is the percentage of queries for which the retriever returned the correct answer in the top-k results. For example, if the retriever returned the correct answer in the top-3 results for 70% of the queries, then the hit-rate@3 is 0.7.
## The dataset
We'll be using a QA dataset generated using a LLama2 review paper. The dataset contains 221 query, context and answer triplets. The queries and answers are generated using GPT-4 based on a given query. Full script used to generate the dataset can be found on this [repo](https://github.com/lancedb/ragged). It can be downloaded from [here](https://github.com/AyushExel/assets/blob/main/data_qa.csv)
### Using different query types
Let's setup the embeddings and the dataset first. We'll use the LanceDB's `huggingface` embeddings integration for this guide.
```python
import lancedb
import pandas as pd
from lancedb.embeddings import get_registry
from lancedb.pydantic import Vector, LanceModel
db = lancedb.connect("~/lancedb/query_types")
df = pd.read_csv("data_qa.csv")
embed_fcn = get_registry().get("huggingface").create(name="BAAI/bge-small-en-v1.")
class Schema(LanceModel):
context: str = embed_fcn.SourceField()
vector: Vector(embed_fcn.ndims()) = embed_fcn.VectorField()
table = db.create_table("qa", schema=Schema)
table.add(df[["context"]].to_dict(orient="records"))
queries = df["query"].tolist()
```
Now that we have the dataset and embeddings table set up, here's how you can run different query types on the dataset.
* <b> Vector Search: </b>
```python
table.search(quries[0], query_type="vector").limit(5).to_pandas()
```
By default, LanceDB uses vector search query type for searching and it automatically converts the input query to a vector before searching when using embedding API. So, the following statement is equivalent to the above statement.
```python
table.search(quries[0]).limit(5).to_pandas()
```
Vector or semantic search is useful when you want to find documents that are similar to the query in terms of meaning.
---
* <b> Full-text Search: </b>
FTS requires creating an index on the column you want to search on. `replace=True` will replace the existing index if it exists.
Once the index is created, you can search using the `fts` query type.
```python
table.create_fts_index("context", replace=True)
table.search(quries[0], query_type="fts").limit(5).to_pandas()
```
Full-text search is useful when you want to find documents that contain the query terms.
---
* <b> Hybrid Search: </b>
Hybrid search is a combination of vector and full-text search. Here's how you can run a hybrid search query on the dataset.
```python
table.search(quries[0], query_type="hybrid").limit(5).to_pandas()
```
Hybrid search requires a reranker to combine and rank the results from vector and full-text search. We'll cover reranking as a concept in the next section.
Hybrid search is useful when you want to combine the benefits of both vector and full-text search.
!!! note "Note"
By default, it uses `LinearCombinationReranker` that combines the scores from vector and full-text search using a weighted linear combination. It is the simplest reranker implementation available in LanceDB. You can also use other rerankers like `CrossEncoderReranker` or `CohereReranker` for reranking the results.
Learn more about rerankers [here](https://lancedb.github.io/lancedb/reranking/)
### Hit rate evaluation results
Now that we have seen how to run different query types on the dataset, let's evaluate the hit-rate of each query type on the dataset.
For brevity, the entire evaluation script is not shown here. You can find the complete evaluation and benchmarking utility scripts [here](https://github.com/lancedb/ragged).
Here are the hit-rate results for the dataset:
| Query Type | Hit-rate@5 |
| --- | --- |
| Vector Search | 0.640 |
| Full-text Search | 0.595 |
| Hybrid Search (w/ LinearCombinationReranker) | 0.645 |
**Choosing query type** is very specific to the use case and the data. This synthetic dataset has been generated to be semantically challenging, i.e, the queries don't have a lot of keywords in common with the context. So, vector search performs better than full-text search. However, in real-world scenarios, full-text search might perform better than vector search. Hybrid search is a good choice when you want to combine the benefits of both vector and full-text search.
### Evaluation results on other datasets
The hit-rate results can vary based on the dataset and the query type. Here are the hit-rate results for the other datasets using the same embedding function.
* <b> SQuAD Dataset: </b>
| Query Type | Hit-rate@5 |
| --- | --- |
| Vector Search | 0.822 |
| Full-text Search | 0.835 |
| Hybrid Search (w/ LinearCombinationReranker) | 0.8874 |
* <b> Uber10K sec filing Dataset: </b>
| Query Type | Hit-rate@5 |
| --- | --- |
| Vector Search | 0.608 |
| Full-text Search | 0.82 |
| Hybrid Search (w/ LinearCombinationReranker) | 0.80 |
In these standard datasets, FTS seems to perform much better than vector search because the queries have a lot of keywords in common with the context. So, in general choosing the query type is very specific to the use case and the data.

View File

@@ -1,78 +0,0 @@
Continuing from the previous example, we can now rerank the results using more complex rerankers.
## Reranking search results
You can rerank any search results using a reranker. The syntax for reranking is as follows:
```python
from lancedb.rerankers import LinearCombinationReranker
reranker = LinearCombinationReranker()
table.search(quries[0], query_type="hybrid").rerank(reranker=reranker).limit(5).to_pandas()
```
Based on the `query_type`, the `rerank()` function can accept other arguments as well. For example, hybrid search accepts a `normalize` param to determine the score normalization method.
!!! note "Note"
LanceDB provides a `Reranker` base class that can be extended to implement custom rerankers. Each reranker must implement the `rerank_hybrid` method. `rerank_vector` and `rerank_fts` methods are optional. For example, the `LinearCombinationReranker` only implements the `rerank_hybrid` method and so it can only be used for reranking hybrid search results.
## Choosing a Reranker
There are many rerankers available in LanceDB like `CrossEncoderReranker`, `CohereReranker`, and `ColBERT`. The choice of reranker depends on the dataset and the application. You can even implement you own custom reranker by extending the `Reranker` class. For more details about each available reranker and performance comparison, refer to the [rerankers](https://lancedb.github.io/lancedb/reranking/) documentation.
In this example, we'll use the `CohereReranker` to rerank the search results. It requires `cohere` to be installed and `COHERE_API_KEY` to be set in the environment. To get your API key, sign up on [Cohere](https://cohere.ai/).
```python
from lancedb.rerankers import CohereReranker
# use Cohere reranker v3
reranker = CohereReranker(model_name="rerank-english-v3.0") # default model is "rerank-english-v2.0"
```
### Reranking search results
Now we can rerank all query type results using the `CohereReranker`:
```python
# rerank hybrid search results
table.search(quries[0], query_type="hybrid").rerank(reranker=reranker).limit(5).to_pandas()
# rerank vector search results
table.search(quries[0], query_type="vector").rerank(reranker=reranker).limit(5).to_pandas()
# rerank fts search results
table.search(quries[0], query_type="fts").rerank(reranker=reranker).limit(5).to_pandas()
```
Each reranker can accept additional arguments. For example, `CohereReranker` accepts `top_k` and `batch_size` params to control the number of documents to rerank and the batch size for reranking respectively. Similarly, a custom reranker can accept any number of arguments based on the implementation. For example, a reranker can accept a `filter` that implements some custom logic to filter out documents before reranking.
## Results
Let us take a look at the same datasets from the previous sections, using the same embedding table but with Cohere reranker applied to all query types.
!!! note "Note"
When reranking fts or vector search results, the search results are over-fetched by a factor of 2 and then reranked. From the reranked set, `top_k` (5 in this case) results are taken. This is done because reranking will have no effect on the hit-rate if we only fetch the `top_k` results.
### Synthetic LLama2 paper dataset
| Query Type | Hit-rate@5 |
| --- | --- |
| Vector | 0.640 |
| FTS | 0.595 |
| Reranked vector | 0.677 |
| Reranked fts | 0.672 |
| Hybrid | 0.759 |
### SQuAD Dataset
### Uber10K sec filing Dataset
| Query Type | Hit-rate@5 |
| --- | --- |
| Vector | 0.608 |
| FTS | 0.824 |
| Reranked vector | 0.671 |
| Reranked fts | 0.843 |
| Hybrid | 0.849 |

View File

@@ -5,9 +5,7 @@ Hybrid Search is a broad (often misused) term. It can mean anything from combini
## The challenge of (re)ranking search results ## The challenge of (re)ranking search results
Once you have a group of the most relevant search results from multiple search sources, you'd likely standardize the score and rank them accordingly. This process can also be seen as another independent step-reranking. Once you have a group of the most relevant search results from multiple search sources, you'd likely standardize the score and rank them accordingly. This process can also be seen as another independent step-reranking.
There are two approaches for reranking search results from multiple sources. There are two approaches for reranking search results from multiple sources.
* <b>Score-based</b>: Calculate final relevance scores based on a weighted linear combination of individual search algorithm scores. Example-Weighted linear combination of semantic search & keyword-based search results. * <b>Score-based</b>: Calculate final relevance scores based on a weighted linear combination of individual search algorithm scores. Example-Weighted linear combination of semantic search & keyword-based search results.
* <b>Relevance-based</b>: Discards the existing scores and calculates the relevance of each search result-query pair. Example-Cross Encoder models * <b>Relevance-based</b>: Discards the existing scores and calculates the relevance of each search result-query pair. Example-Cross Encoder models
Even though there are many strategies for reranking search results, none works for all cases. Moreover, evaluating them itself is a challenge. Also, reranking can be dataset, application specific so it's hard to generalize. Even though there are many strategies for reranking search results, none works for all cases. Moreover, evaluating them itself is a challenge. Also, reranking can be dataset, application specific so it's hard to generalize.

View File

@@ -2,7 +2,7 @@
![Illustration](../assets/langchain.png) ![Illustration](../assets/langchain.png)
## Quick Start ## Quick Start
You can load your document data using langchain's loaders, for this example we are using `TextLoader` and `OpenAIEmbeddings` as the embedding model. Checkout Complete example here - [LangChain demo](../notebooks/langchain_example.ipynb) You can load your document data using langchain's loaders, for this example we are using `TextLoader` and `OpenAIEmbeddings` as the embedding model.
```python ```python
import os import os
from langchain.document_loaders import TextLoader from langchain.document_loaders import TextLoader
@@ -38,8 +38,6 @@ The exhaustive list of parameters for `LanceDB` vector store are :
- `api_key`: (Optional) API key to use for LanceDB cloud database. Defaults to `None`. - `api_key`: (Optional) API key to use for LanceDB cloud database. Defaults to `None`.
- `region`: (Optional) Region to use for LanceDB cloud database. Only for LanceDB Cloud, defaults to `None`. - `region`: (Optional) Region to use for LanceDB cloud database. Only for LanceDB Cloud, defaults to `None`.
- `mode`: (Optional) Mode to use for adding data to the table. Defaults to `'overwrite'`. - `mode`: (Optional) Mode to use for adding data to the table. Defaults to `'overwrite'`.
- `reranker`: (Optional) The reranker to use for LanceDB.
- `relevance_score_fn`: (Optional[Callable[[float], float]]) Langchain relevance score function to be used. Defaults to `None`.
```python ```python
db_url = "db://lang_test" # url of db you created db_url = "db://lang_test" # url of db you created
@@ -56,14 +54,12 @@ vector_store = LanceDB(
``` ```
### Methods ### Methods
To add texts and store respective embeddings automatically:
##### add_texts() ##### add_texts()
- `texts`: `Iterable` of strings to add to the vectorstore. - `texts`: `Iterable` of strings to add to the vectorstore.
- `metadatas`: Optional `list[dict()]` of metadatas associated with the texts. - `metadatas`: Optional `list[dict()]` of metadatas associated with the texts.
- `ids`: Optional `list` of ids to associate with the texts. - `ids`: Optional `list` of ids to associate with the texts.
- `kwargs`: `Any`
This method adds texts and stores respective embeddings automatically.
```python ```python
vector_store.add_texts(texts = ['test_123'], metadatas =[{'source' :'wiki'}]) vector_store.add_texts(texts = ['test_123'], metadatas =[{'source' :'wiki'}])
@@ -78,6 +74,7 @@ pd_df.to_csv("docsearch.csv", index=False)
# you can also create a new vector store object using an older connection object: # you can also create a new vector store object using an older connection object:
vector_store = LanceDB(connection=tbl, embedding=embeddings) vector_store = LanceDB(connection=tbl, embedding=embeddings)
``` ```
For index creation make sure your table has enough data in it. An ANN index is ususally not needed for datasets ~100K vectors. For large-scale (>1M) or higher dimension vectors, it is beneficial to create an ANN index.
##### create_index() ##### create_index()
- `col_name`: `Optional[str] = None` - `col_name`: `Optional[str] = None`
- `vector_col`: `Optional[str] = None` - `vector_col`: `Optional[str] = None`
@@ -85,8 +82,6 @@ vector_store = LanceDB(connection=tbl, embedding=embeddings)
- `num_sub_vectors`: `Optional[int] = 96` - `num_sub_vectors`: `Optional[int] = 96`
- `index_cache_size`: `Optional[int] = None` - `index_cache_size`: `Optional[int] = None`
This method creates an index for the vector store. For index creation make sure your table has enough data in it. An ANN index is ususally not needed for datasets ~100K vectors. For large-scale (>1M) or higher dimension vectors, it is beneficial to create an ANN index.
```python ```python
# for creating vector index # for creating vector index
vector_store.create_index(vector_col='vector', metric = 'cosine') vector_store.create_index(vector_col='vector', metric = 'cosine')
@@ -94,108 +89,4 @@ vector_store.create_index(vector_col='vector', metric = 'cosine')
# for creating scalar index(for non-vector columns) # for creating scalar index(for non-vector columns)
vector_store.create_index(col_name='text') vector_store.create_index(col_name='text')
``` ```
##### similarity_search()
- `query`: `str`
- `k`: `Optional[int] = None`
- `filter`: `Optional[Dict[str, str]] = None`
- `fts`: `Optional[bool] = False`
- `name`: `Optional[str] = None`
- `kwargs`: `Any`
Return documents most similar to the query without relevance scores
```python
docs = docsearch.similarity_search(query)
print(docs[0].page_content)
```
##### similarity_search_by_vector()
- `embedding`: `List[float]`
- `k`: `Optional[int] = None`
- `filter`: `Optional[Dict[str, str]] = None`
- `name`: `Optional[str] = None`
- `kwargs`: `Any`
Returns documents most similar to the query vector.
```python
docs = docsearch.similarity_search_by_vector(query)
print(docs[0].page_content)
```
##### similarity_search_with_score()
- `query`: `str`
- `k`: `Optional[int] = None`
- `filter`: `Optional[Dict[str, str]] = None`
- `kwargs`: `Any`
Returns documents most similar to the query string with relevance scores, gets called by base class's `similarity_search_with_relevance_scores` which selects relevance score based on our `_select_relevance_score_fn`.
```python
docs = docsearch.similarity_search_with_relevance_scores(query)
print("relevance score - ", docs[0][1])
print("text- ", docs[0][0].page_content[:1000])
```
##### similarity_search_by_vector_with_relevance_scores()
- `embedding`: `List[float]`
- `k`: `Optional[int] = None`
- `filter`: `Optional[Dict[str, str]] = None`
- `name`: `Optional[str] = None`
- `kwargs`: `Any`
Return documents most similar to the query vector with relevance scores.
Relevance score
```python
docs = docsearch.similarity_search_by_vector_with_relevance_scores(query_embedding)
print("relevance score - ", docs[0][1])
print("text- ", docs[0][0].page_content[:1000])
```
##### max_marginal_relevance_search()
- `query`: `str`
- `k`: `Optional[int] = None`
- `fetch_k` : Number of Documents to fetch to pass to MMR algorithm, `Optional[int] = None`
- `lambda_mult`: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5. `float = 0.5`
- `filter`: `Optional[Dict[str, str]] = None`
- `kwargs`: `Any`
Returns docs selected using the maximal marginal relevance(MMR).
Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents.
Similarly, `max_marginal_relevance_search_by_vector()` function returns docs most similar to the embedding passed to the function using MMR. instead of a string query you need to pass the embedding to be searched for.
```python
result = docsearch.max_marginal_relevance_search(
query="text"
)
result_texts = [doc.page_content for doc in result]
print(result_texts)
## search by vector :
result = docsearch.max_marginal_relevance_search_by_vector(
embeddings.embed_query("text")
)
result_texts = [doc.page_content for doc in result]
print(result_texts)
```
##### add_images()
- `uris` : File path to the image. `List[str]`.
- `metadatas` : Optional list of metadatas. `(Optional[List[dict]], optional)`
- `ids` : Optional list of IDs. `(Optional[List[str]], optional)`
Adds images by automatically creating their embeddings and adds them to the vectorstore.
```python
vec_store.add_images(uris=image_uris)
# here image_uris are local fs paths to the images.
```

View File

@@ -1,142 +0,0 @@
# Llama-Index
![Illustration](../assets/llama-index.jpg)
## Quick start
You would need to install the integration via `pip install llama-index-vector-stores-lancedb` in order to use it.
You can run the below script to try it out :
```python
import logging
import sys
# Uncomment to see debug logs
# logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
# logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import SimpleDirectoryReader, Document, StorageContext
from llama_index.core import VectorStoreIndex
from llama_index.vector_stores.lancedb import LanceDBVectorStore
import textwrap
import openai
openai.api_key = "sk-..."
documents = SimpleDirectoryReader("./data/your-data-dir/").load_data()
print("Document ID:", documents[0].doc_id, "Document Hash:", documents[0].hash)
## For LanceDB cloud :
# vector_store = LanceDBVectorStore(
# uri="db://db_name", # your remote DB URI
# api_key="sk_..", # lancedb cloud api key
# region="your-region" # the region you configured
# ...
# )
vector_store = LanceDBVectorStore(
uri="./lancedb", mode="overwrite", query_type="vector"
)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context
)
lance_filter = "metadata.file_name = 'paul_graham_essay.txt' "
retriever = index.as_retriever(vector_store_kwargs={"where": lance_filter})
response = retriever.retrieve("What did the author do growing up?")
```
Checkout Complete example here - [LlamaIndex demo](../notebooks/LlamaIndex_example.ipynb)
### Filtering
For metadata filtering, you can use a Lance SQL-like string filter as demonstrated in the example above. Additionally, you can also filter using the `MetadataFilters` class from LlamaIndex:
```python
from llama_index.core.vector_stores import (
MetadataFilters,
FilterOperator,
FilterCondition,
MetadataFilter,
)
query_filters = MetadataFilters(
filters=[
MetadataFilter(
key="creation_date", operator=FilterOperator.EQ, value="2024-05-23"
),
MetadataFilter(
key="file_size", value=75040, operator=FilterOperator.GT
),
],
condition=FilterCondition.AND,
)
```
### Hybrid Search
For complete documentation, refer [here](https://lancedb.github.io/lancedb/hybrid_search/hybrid_search/). This example uses the `colbert` reranker. Make sure to install necessary dependencies for the reranker you choose.
```python
from lancedb.rerankers import ColbertReranker
reranker = ColbertReranker()
vector_store._add_reranker(reranker)
query_engine = index.as_query_engine(
filters=query_filters,
vector_store_kwargs={
"query_type": "hybrid",
}
)
response = query_engine.query("How much did Viaweb charge per month?")
```
In the above snippet, you can change/specify query_type again when creating the engine/retriever.
## API reference
The exhaustive list of parameters for `LanceDBVectorStore` vector store are :
- `connection`: Optional, `lancedb.db.LanceDBConnection` connection object to use. If not provided, a new connection will be created.
- `uri`: Optional[str], the uri of your database. Defaults to `"/tmp/lancedb"`.
- `table_name` : Optional[str], Name of your table in the database. Defaults to `"vectors"`.
- `table`: Optional[Any], `lancedb.db.LanceTable` object to be passed. Defaults to `None`.
- `vector_column_name`: Optional[Any], Column name to use for vector's in the table. Defaults to `'vector'`.
- `doc_id_key`: Optional[str], Column name to use for document id's in the table. Defaults to `'doc_id'`.
- `text_key`: Optional[str], Column name to use for text in the table. Defaults to `'text'`.
- `api_key`: Optional[str], API key to use for LanceDB cloud database. Defaults to `None`.
- `region`: Optional[str], Region to use for LanceDB cloud database. Only for LanceDB Cloud, defaults to `None`.
- `nprobes` : Optional[int], Set the number of probes to use. Only applicable if ANN index is created on the table else its ignored. Defaults to `20`.
- `refine_factor` : Optional[int], Refine the results by reading extra elements and re-ranking them in memory. Defaults to `None`.
- `reranker`: Optional[Any], The reranker to use for LanceDB.
Defaults to `None`.
- `overfetch_factor`: Optional[int], The factor by which to fetch more results.
Defaults to `1`.
- `mode`: Optional[str], The mode to use for LanceDB.
Defaults to `"overwrite"`.
- `query_type`:Optional[str], The type of query to use for LanceDB.
Defaults to `"vector"`.
### Methods
- __from_table(cls, table: lancedb.db.LanceTable) -> `LanceDBVectorStore`__ : (class method) Creates instance from lancedb table.
- **_add_reranker(self, reranker: lancedb.rerankers.Reranker) -> `None`** : Add a reranker to an existing vector store.
- Usage :
```python
from lancedb.rerankers import ColbertReranker
reranker = ColbertReranker()
vector_store._add_reranker(reranker)
```
- **_table_exists(self, tbl_name: `Optional[str]` = `None`) -> `bool`** : Returns `True` if `tbl_name` exists in database.
- __create_index(
self, scalar: `Optional[bool]` = False, col_name: `Optional[str]` = None, num_partitions: `Optional[int]` = 256, num_sub_vectors: `Optional[int]` = 96, index_cache_size: `Optional[int]` = None, metric: `Optional[str]` = "L2",
) -> `None`__ : Creates a scalar(for non-vector cols) or a vector index on a table.
Make sure your vector column has enough data before creating an index on it.
- __add(self, nodes: `List[BaseNode]`, **add_kwargs: `Any`, ) -> `List[str]`__ :
adds Nodes to the table
- **delete(self, ref_doc_id: `str`) -> `None`**: Delete nodes using with node_ids.
- **delete_nodes(self, node_ids: `List[str]`) -> `None`** : Delete nodes using with node_ids.
- __query(
self,
query: `VectorStoreQuery`,
**kwargs: `Any`,
) -> `VectorStoreQueryResult`__:
Query index(`VectorStoreIndex`) for top k most similar nodes. Accepts llamaIndex `VectorStoreQuery` object.

View File

@@ -1,538 +0,0 @@
{
"cells": [
{
"attachments": {},
"cell_type": "markdown",
"id": "2db56c9b",
"metadata": {},
"source": [
"<a href=\"https://colab.research.google.com/github/run-llama/llama_index/blob/main/docs/docs/examples/vector_stores/LanceDBIndexDemo.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "db0855d0",
"metadata": {},
"source": [
"# LanceDB Vector Store\n",
"In this notebook we are going to show how to use [LanceDB](https://www.lancedb.com) to perform vector searches in LlamaIndex"
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "f44170b2",
"metadata": {},
"source": [
"If you're opening this Notebook on colab, you will probably need to install LlamaIndex 🦙."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6c84199c",
"metadata": {},
"outputs": [],
"source": [
"%pip install llama-index llama-index-vector-stores-lancedb"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "1a90ce34",
"metadata": {},
"outputs": [],
"source": [
"%pip install lancedb==0.6.13 #Only required if the above cell installs an older version of lancedb (pypi package may not be released yet)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "39c62671",
"metadata": {},
"outputs": [],
"source": [
"# Refresh vector store URI if restarting or re-using the same notebook\n",
"! rm -rf ./lancedb"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "59b54276",
"metadata": {},
"outputs": [],
"source": [
"import logging\n",
"import sys\n",
"\n",
"# Uncomment to see debug logs\n",
"# logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)\n",
"# logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))\n",
"\n",
"\n",
"from llama_index.core import SimpleDirectoryReader, Document, StorageContext\n",
"from llama_index.core import VectorStoreIndex\n",
"from llama_index.vector_stores.lancedb import LanceDBVectorStore\n",
"import textwrap"
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "26c71b6d",
"metadata": {},
"source": [
"### Setup OpenAI\n",
"The first step is to configure the openai key. It will be used to created embeddings for the documents loaded into the index"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "67b86621",
"metadata": {},
"outputs": [],
"source": [
"import openai\n",
"\n",
"openai.api_key = \"sk-\""
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "073f0a68",
"metadata": {},
"source": [
"Download Data"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "eef1b911",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"--2024-06-11 16:42:37-- https://raw.githubusercontent.com/run-llama/llama_index/main/docs/docs/examples/data/paul_graham/paul_graham_essay.txt\n",
"Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.109.133, 185.199.110.133, 185.199.108.133, ...\n",
"Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.109.133|:443... connected.\n",
"HTTP request sent, awaiting response... 200 OK\n",
"Length: 75042 (73K) [text/plain]\n",
"Saving to: data/paul_graham/paul_graham_essay.txt\n",
"\n",
"data/paul_graham/pa 100%[===================>] 73.28K --.-KB/s in 0.02s \n",
"\n",
"2024-06-11 16:42:37 (3.97 MB/s) - data/paul_graham/paul_graham_essay.txt saved [75042/75042]\n",
"\n"
]
}
],
"source": [
"!mkdir -p 'data/paul_graham/'\n",
"!wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'"
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "f7010b1d-d1bb-4f08-9309-a328bb4ea396",
"metadata": {},
"source": [
"### Loading documents\n",
"Load the documents stored in the `data/paul_graham/` using the SimpleDirectoryReader"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c154dd4b",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Document ID: cac1ba78-5007-4cf8-89ba-280264790115 Document Hash: fe2d4d3ef3a860780f6c2599808caa587c8be6516fe0ba4ca53cf117044ba953\n"
]
}
],
"source": [
"documents = SimpleDirectoryReader(\"./data/paul_graham/\").load_data()\n",
"print(\"Document ID:\", documents[0].doc_id, \"Document Hash:\", documents[0].hash)"
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "c0232fd1",
"metadata": {},
"source": [
"### Create the index\n",
"Here we create an index backed by LanceDB using the documents loaded previously. LanceDBVectorStore takes a few arguments.\n",
"- uri (str, required): Location where LanceDB will store its files.\n",
"- table_name (str, optional): The table name where the embeddings will be stored. Defaults to \"vectors\".\n",
"- nprobes (int, optional): The number of probes used. A higher number makes search more accurate but also slower. Defaults to 20.\n",
"- refine_factor: (int, optional): Refine the results by reading extra elements and re-ranking them in memory. Defaults to None\n",
"\n",
"- More details can be found at [LanceDB docs](https://lancedb.github.io/lancedb/ann_indexes)"
]
},
{
"cell_type": "markdown",
"id": "1f2e20ef",
"metadata": {},
"source": [
"##### For LanceDB cloud :\n",
"```python\n",
"vector_store = LanceDBVectorStore( \n",
" uri=\"db://db_name\", # your remote DB URI\n",
" api_key=\"sk_..\", # lancedb cloud api key\n",
" region=\"your-region\" # the region you configured\n",
" ...\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8731da62",
"metadata": {},
"outputs": [],
"source": [
"vector_store = LanceDBVectorStore(\n",
" uri=\"./lancedb\", mode=\"overwrite\", query_type=\"hybrid\"\n",
")\n",
"storage_context = StorageContext.from_defaults(vector_store=vector_store)\n",
"\n",
"index = VectorStoreIndex.from_documents(\n",
" documents, storage_context=storage_context\n",
")"
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "8ee4473a-094f-4d0a-a825-e1213db07240",
"metadata": {},
"source": [
"### Query the index\n",
"We can now ask questions using our index. We can use filtering via `MetadataFilters` or use native lance `where` clause."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "5eb6419b",
"metadata": {},
"outputs": [],
"source": [
"from llama_index.core.vector_stores import (\n",
" MetadataFilters,\n",
" FilterOperator,\n",
" FilterCondition,\n",
" MetadataFilter,\n",
")\n",
"\n",
"from datetime import datetime\n",
"\n",
"\n",
"query_filters = MetadataFilters(\n",
" filters=[\n",
" MetadataFilter(\n",
" key=\"creation_date\",\n",
" operator=FilterOperator.EQ,\n",
" value=datetime.now().strftime(\"%Y-%m-%d\"),\n",
" ),\n",
" MetadataFilter(\n",
" key=\"file_size\", value=75040, operator=FilterOperator.GT\n",
" ),\n",
" ],\n",
" condition=FilterCondition.AND,\n",
")"
]
},
{
"cell_type": "markdown",
"id": "ee201930",
"metadata": {},
"source": [
"### Hybrid Search\n",
"\n",
"LanceDB offers hybrid search with reranking capabilities. For complete documentation, refer [here](https://lancedb.github.io/lancedb/hybrid_search/hybrid_search/).\n",
"\n",
"This example uses the `colbert` reranker. The following cell installs the necessary dependencies for `colbert`. If you choose a different reranker, make sure to adjust the dependencies accordingly."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e12d1454",
"metadata": {},
"outputs": [],
"source": [
"! pip install -U torch transformers tantivy@git+https://github.com/quickwit-oss/tantivy-py#164adc87e1a033117001cf70e38c82a53014d985"
]
},
{
"cell_type": "markdown",
"id": "c742cb07",
"metadata": {},
"source": [
"if you want to add a reranker at vector store initialization, you can pass it in the arguments like below :\n",
"```\n",
"from lancedb.rerankers import ColbertReranker\n",
"reranker = ColbertReranker()\n",
"vector_store = LanceDBVectorStore(uri=\"./lancedb\", reranker=reranker, mode=\"overwrite\")\n",
"```"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "27ea047b",
"metadata": {},
"outputs": [],
"source": [
"import lancedb"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8414517f",
"metadata": {},
"outputs": [],
"source": [
"from lancedb.rerankers import ColbertReranker\n",
"\n",
"reranker = ColbertReranker()\n",
"vector_store._add_reranker(reranker)\n",
"\n",
"query_engine = index.as_query_engine(\n",
" filters=query_filters,\n",
" # vector_store_kwargs={\n",
" # \"query_type\": \"fts\",\n",
" # },\n",
")\n",
"\n",
"response = query_engine.query(\"How much did Viaweb charge per month?\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "dc6ccb7a",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Viaweb charged $100 a month for a small store and $300 a month for a big one.\n",
"metadata - {'65ed5f07-5b8a-4143-a939-e8764884828e': {'file_path': '/Users/raghavdixit/Desktop/open_source/llama_index_lance/docs/docs/examples/vector_stores/data/paul_graham/paul_graham_essay.txt', 'file_name': 'paul_graham_essay.txt', 'file_type': 'text/plain', 'file_size': 75042, 'creation_date': '2024-06-11', 'last_modified_date': '2024-06-11'}, 'be231827-20b8-4988-ac75-94fa79b3c22e': {'file_path': '/Users/raghavdixit/Desktop/open_source/llama_index_lance/docs/docs/examples/vector_stores/data/paul_graham/paul_graham_essay.txt', 'file_name': 'paul_graham_essay.txt', 'file_type': 'text/plain', 'file_size': 75042, 'creation_date': '2024-06-11', 'last_modified_date': '2024-06-11'}}\n"
]
}
],
"source": [
"print(response)\n",
"print(\"metadata -\", response.metadata)"
]
},
{
"cell_type": "markdown",
"id": "0c1c6c73",
"metadata": {},
"source": [
"##### lance filters(SQL like) directly via the `where` clause :"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0a2bcc07",
"metadata": {},
"outputs": [],
"source": [
"lance_filter = \"metadata.file_name = 'paul_graham_essay.txt' \"\n",
"retriever = index.as_retriever(vector_store_kwargs={\"where\": lance_filter})\n",
"response = retriever.retrieve(\"What did the author do growing up?\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7ac47cf9",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"What I Worked On\n",
"\n",
"February 2021\n",
"\n",
"Before college the two main things I worked on, outside of school, were writing and programming. I didn't write essays. I wrote what beginning writers were supposed to write then, and probably still are: short stories. My stories were awful. They had hardly any plot, just characters with strong feelings, which I imagined made them deep.\n",
"\n",
"The first programs I tried writing were on the IBM 1401 that our school district used for what was then called \"data processing.\" This was in 9th grade, so I was 13 or 14. The school district's 1401 happened to be in the basement of our junior high school, and my friend Rich Draves and I got permission to use it. It was like a mini Bond villain's lair down there, with all these alien-looking machines — CPU, disk drives, printer, card reader — sitting up on a raised floor under bright fluorescent lights.\n",
"\n",
"The language we used was an early version of Fortran. You had to type programs on punch cards, then stack them in the card reader and press a button to load the program into memory and run it. The result would ordinarily be to print something on the spectacularly loud printer.\n",
"\n",
"I was puzzled by the 1401. I couldn't figure out what to do with it. And in retrospect there's not much I could have done with it. The only form of input to programs was data stored on punched cards, and I didn't have any data stored on punched cards. The only other option was to do things that didn't rely on any input, like calculate approximations of pi, but I didn't know enough math to do anything interesting of that type. So I'm not surprised I can't remember any programs I wrote, because they can't have done much. My clearest memory is of the moment I learned it was possible for programs not to terminate, when one of mine didn't. On a machine without time-sharing, this was a social as well as a technical error, as the data center manager's expression made clear.\n",
"\n",
"With microcomputers, everything changed. Now you could have a computer sitting right in front of you, on a desk, that could respond to your keystrokes as it was running instead of just churning through a stack of punch cards and then stopping. [1]\n",
"\n",
"The first of my friends to get a microcomputer built it himself. It was sold as a kit by Heathkit. I remember vividly how impressed and envious I felt watching him sitting in front of it, typing programs right into the computer.\n",
"\n",
"Computers were expensive in those days and it took me years of nagging before I convinced my father to buy one, a TRS-80, in about 1980. The gold standard then was the Apple II, but a TRS-80 was good enough. This was when I really started programming. I wrote simple games, a program to predict how high my model rockets would fly, and a word processor that my father used to write at least one book. There was only room in memory for about 2 pages of text, so he'd write 2 pages at a time and then print them out, but it was a lot better than a typewriter.\n",
"\n",
"Though I liked programming, I didn't plan to study it in college. In college I was going to study philosophy, which sounded much more powerful. It seemed, to my naive high school self, to be the study of the ultimate truths, compared to which the things studied in other fields would be mere domain knowledge. What I discovered when I got to college was that the other fields took up so much of the space of ideas that there wasn't much left for these supposed ultimate truths. All that seemed left for philosophy were edge cases that people in other fields felt could safely be ignored.\n",
"\n",
"I couldn't have put this into words when I was 18. All I knew at the time was that I kept taking philosophy courses and they kept being boring. So I decided to switch to AI.\n",
"\n",
"AI was in the air in the mid 1980s, but there were two things especially that made me want to work on it: a novel by Heinlein called The Moon is a Harsh Mistress, which featured an intelligent computer called Mike, and a PBS documentary that showed Terry Winograd using SHRDLU. I haven't tried rereading The Moon is a Harsh Mistress, so I don't know how well it has aged, but when I read it I was drawn entirely into its world.\n",
"metadata - {'file_path': '/Users/raghavdixit/Desktop/open_source/llama_index_lance/docs/docs/examples/vector_stores/data/paul_graham/paul_graham_essay.txt', 'file_name': 'paul_graham_essay.txt', 'file_type': 'text/plain', 'file_size': 75042, 'creation_date': '2024-06-11', 'last_modified_date': '2024-06-11'}\n"
]
}
],
"source": [
"print(response[0].get_content())\n",
"print(\"metadata -\", response[0].metadata)"
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "6afc84ac",
"metadata": {},
"source": [
"### Appending data\n",
"You can also add data to an existing index"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "759a532e",
"metadata": {},
"outputs": [],
"source": [
"nodes = [node.node for node in response]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "069fc099",
"metadata": {},
"outputs": [],
"source": [
"del index\n",
"\n",
"index = VectorStoreIndex.from_documents(\n",
" [Document(text=\"The sky is purple in Portland, Maine\")],\n",
" uri=\"/tmp/new_dataset\",\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a64ed441",
"metadata": {},
"outputs": [],
"source": [
"index.insert_nodes(nodes)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b5cffcfe",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Portland, Maine\n"
]
}
],
"source": [
"query_engine = index.as_query_engine()\n",
"response = query_engine.query(\"Where is the sky purple?\")\n",
"print(textwrap.fill(str(response), 100))"
]
},
{
"cell_type": "markdown",
"id": "ec548a02",
"metadata": {},
"source": [
"You can also create an index from an existing table"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "dc99404d",
"metadata": {},
"outputs": [],
"source": [
"del index\n",
"\n",
"vec_store = LanceDBVectorStore.from_table(vector_store._table)\n",
"index = VectorStoreIndex.from_vector_store(vec_store)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7b2e8cca",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"The author started Viaweb and Aspra.\n"
]
}
],
"source": [
"query_engine = index.as_query_engine()\n",
"response = query_engine.query(\"What companies did the author start?\")\n",
"print(textwrap.fill(str(response), 100))"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

File diff suppressed because one or more lines are too long

View File

@@ -1,566 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "683953b3",
"metadata": {},
"source": [
"# LanceDB\n",
"\n",
">[LanceDB](https://lancedb.com/) is an open-source database for vector-search built with persistent storage, which greatly simplifies retrevial, filtering and management of embeddings. Fully open source.\n",
"\n",
"This notebook shows how to use functionality related to the `LanceDB` vector database based on the Lance data format."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b1051ba9",
"metadata": {},
"outputs": [],
"source": [
"! pip install tantivy"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "88ac92c0",
"metadata": {},
"outputs": [],
"source": [
"! pip install -U langchain-openai langchain-community"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "5a1c84d6-a10f-428c-95cd-46d3a1702e07",
"metadata": {},
"outputs": [],
"source": [
"! pip install lancedb"
]
},
{
"cell_type": "markdown",
"id": "99134dd1-b91e-486f-8d90-534248e43b9d",
"metadata": {},
"source": [
"We want to use OpenAIEmbeddings so we have to get the OpenAI API Key. "
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "a0361f5c-e6f4-45f4-b829-11680cf03cec",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"import getpass\n",
"import os\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "d114ed78",
"metadata": {},
"outputs": [],
"source": [
"! rm -rf /tmp/lancedb"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "a3c3999a",
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.document_loaders import TextLoader\n",
"from langchain_community.vectorstores import LanceDB\n",
"from langchain_openai import OpenAIEmbeddings\n",
"from langchain_text_splitters import CharacterTextSplitter\n",
"\n",
"loader = TextLoader(\"../../how_to/state_of_the_union.txt\")\n",
"documents = loader.load()\n",
"\n",
"documents = CharacterTextSplitter().split_documents(documents)\n",
"embeddings = OpenAIEmbeddings()"
]
},
{
"cell_type": "markdown",
"id": "e9517bb0",
"metadata": {},
"source": [
"##### For LanceDB cloud, you can invoke the vector store as follows :\n",
"\n",
"\n",
"```python\n",
"db_url = \"db://lang_test\" # url of db you created\n",
"api_key = \"xxxxx\" # your API key\n",
"region=\"us-east-1-dev\" # your selected region\n",
"\n",
"vector_store = LanceDB(\n",
" uri=db_url,\n",
" api_key=api_key,\n",
" region=region,\n",
" embedding=embeddings,\n",
" table_name='langchain_test'\n",
" )\n",
"```\n",
"\n",
"You can also add `region`, `api_key`, `uri` to `from_documents()` classmethod\n"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "6e104aee",
"metadata": {},
"outputs": [],
"source": [
"from lancedb.rerankers import LinearCombinationReranker\n",
"\n",
"reranker = LinearCombinationReranker(weight=0.3)\n",
"\n",
"docsearch = LanceDB.from_documents(documents, embeddings, reranker=reranker)\n",
"query = \"What did the president say about Ketanji Brown Jackson\""
]
},
{
"cell_type": "code",
"execution_count": 31,
"id": "259c7988",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"relevance score - 0.7066475030191711\n",
"text- They were responding to a 9-1-1 call when a man shot and killed them with a stolen gun. \n",
"\n",
"Officer Mora was 27 years old. \n",
"\n",
"Officer Rivera was 22. \n",
"\n",
"Both Dominican Americans whod grown up on the same streets they later chose to patrol as police officers. \n",
"\n",
"I spoke with their families and told them that we are forever in debt for their sacrifice, and we will carry on their mission to restore the trust and safety every community deserves. \n",
"\n",
"Ive worked on these issues a long time. \n",
"\n",
"I know what works: Investing in crime prevention and community police officers wholl walk the beat, wholl know the neighborhood, and who can restore trust and safety. \n",
"\n",
"So lets not abandon our streets. Or choose between safety and equal justice. \n",
"\n",
"Lets come together to protect our communities, restore trust, and hold law enforcement accountable. \n",
"\n",
"Thats why the Justice Department required body cameras, banned chokeholds, and restricted no-knock warrants for its officers. \n",
"\n",
"Thats why the American Rescue \n"
]
}
],
"source": [
"docs = docsearch.similarity_search_with_relevance_scores(query)\n",
"print(\"relevance score - \", docs[0][1])\n",
"print(\"text- \", docs[0][0].page_content[:1000])"
]
},
{
"cell_type": "code",
"execution_count": 33,
"id": "9fa29dae",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"distance - 0.30000001192092896\n",
"text- My administration is providing assistance with job training and housing, and now helping lower-income veterans get VA care debt-free. \n",
"\n",
"Our troops in Iraq and Afghanistan faced many dangers. \n",
"\n",
"One was stationed at bases and breathing in toxic smoke from “burn pits” that incinerated wastes of war—medical and hazard material, jet fuel, and more. \n",
"\n",
"When they came home, many of the worlds fittest and best trained warriors were never the same. \n",
"\n",
"Headaches. Numbness. Dizziness. \n",
"\n",
"A cancer that would put them in a flag-draped coffin. \n",
"\n",
"I know. \n",
"\n",
"One of those soldiers was my son Major Beau Biden. \n",
"\n",
"We dont know for sure if a burn pit was the cause of his brain cancer, or the diseases of so many of our troops. \n",
"\n",
"But Im committed to finding out everything we can. \n",
"\n",
"Committed to military families like Danielle Robinson from Ohio. \n",
"\n",
"The widow of Sergeant First Class Heath Robinson. \n",
"\n",
"He was born a soldier. Army National Guard. Combat medic in Kosovo and Iraq. \n",
"\n",
"Stationed near Baghdad, just ya\n"
]
}
],
"source": [
"docs = docsearch.similarity_search_with_score(query=\"Headaches\", query_type=\"hybrid\")\n",
"print(\"distance - \", docs[0][1])\n",
"print(\"text- \", docs[0][0].page_content[:1000])"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "e70ad201",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"reranker : <lancedb.rerankers.linear_combination.LinearCombinationReranker object at 0x107ef1130>\n"
]
}
],
"source": [
"print(\"reranker : \", docsearch._reranker)"
]
},
{
"cell_type": "markdown",
"id": "f5e1cdfd",
"metadata": {},
"source": [
"Additionaly, to explore the table you can load it into a df or save it in a csv file: \n",
"```python\n",
"tbl = docsearch.get_table()\n",
"print(\"tbl:\", tbl)\n",
"pd_df = tbl.to_pandas()\n",
"# pd_df.to_csv(\"docsearch.csv\", index=False)\n",
"\n",
"# you can also create a new vector store object using an older connection object:\n",
"vector_store = LanceDB(connection=tbl, embedding=embeddings)\n",
"```"
]
},
{
"cell_type": "code",
"execution_count": 15,
"id": "9c608226",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"metadata : {'source': '../../how_to/state_of_the_union.txt'}\n",
"\n",
"SQL filtering :\n",
"\n",
"They were responding to a 9-1-1 call when a man shot and killed them with a stolen gun. \n",
"\n",
"Officer Mora was 27 years old. \n",
"\n",
"Officer Rivera was 22. \n",
"\n",
"Both Dominican Americans whod grown up on the same streets they later chose to patrol as police officers. \n",
"\n",
"I spoke with their families and told them that we are forever in debt for their sacrifice, and we will carry on their mission to restore the trust and safety every community deserves. \n",
"\n",
"Ive worked on these issues a long time. \n",
"\n",
"I know what works: Investing in crime prevention and community police officers wholl walk the beat, wholl know the neighborhood, and who can restore trust and safety. \n",
"\n",
"So lets not abandon our streets. Or choose between safety and equal justice. \n",
"\n",
"Lets come together to protect our communities, restore trust, and hold law enforcement accountable. \n",
"\n",
"Thats why the Justice Department required body cameras, banned chokeholds, and restricted no-knock warrants for its officers. \n",
"\n",
"Thats why the American Rescue Plan provided $350 Billion that cities, states, and counties can use to hire more police and invest in proven strategies like community violence interruption—trusted messengers breaking the cycle of violence and trauma and giving young people hope. \n",
"\n",
"We should all agree: The answer is not to Defund the police. The answer is to FUND the police with the resources and training they need to protect our communities. \n",
"\n",
"I ask Democrats and Republicans alike: Pass my budget and keep our neighborhoods safe. \n",
"\n",
"And I will keep doing everything in my power to crack down on gun trafficking and ghost guns you can buy online and make at home—they have no serial numbers and cant be traced. \n",
"\n",
"And I ask Congress to pass proven measures to reduce gun violence. Pass universal background checks. Why should anyone on a terrorist list be able to purchase a weapon? \n",
"\n",
"Ban assault weapons and high-capacity magazines. \n",
"\n",
"Repeal the liability shield that makes gun manufacturers the only industry in America that cant be sued. \n",
"\n",
"These laws dont infringe on the Second Amendment. They save lives. \n",
"\n",
"The most fundamental right in America is the right to vote and to have it counted. And its under assault. \n",
"\n",
"In state after state, new laws have been passed, not only to suppress the vote, but to subvert entire elections. \n",
"\n",
"We cannot let this happen. \n",
"\n",
"Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while youre at it, pass the Disclose Act so Americans can know who is funding our elections. \n",
"\n",
"Tonight, Id like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n",
"\n",
"One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n",
"\n",
"And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nations top legal minds, who will continue Justice Breyers legacy of excellence. \n",
"\n",
"A former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder. Since shes been nominated, shes received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans. \n",
"\n",
"And if we are to advance liberty and justice, we need to secure the Border and fix the immigration system. \n",
"\n",
"We can do both. At our border, weve installed new technology like cutting-edge scanners to better detect drug smuggling. \n",
"\n",
"Weve set up joint patrols with Mexico and Guatemala to catch more human traffickers. \n",
"\n",
"Were putting in place dedicated immigration judges so families fleeing persecution and violence can have their cases heard faster.\n"
]
}
],
"source": [
"docs = docsearch.similarity_search(\n",
" query=query, filter={\"metadata.source\": \"../../how_to/state_of_the_union.txt\"}\n",
")\n",
"\n",
"print(\"metadata :\", docs[0].metadata)\n",
"\n",
"# or you can directly supply SQL string filters :\n",
"\n",
"print(\"\\nSQL filtering :\\n\")\n",
"docs = docsearch.similarity_search(query=query, filter=\"text LIKE '%Officer Rivera%'\")\n",
"print(docs[0].page_content)"
]
},
{
"cell_type": "markdown",
"id": "9a173c94",
"metadata": {},
"source": [
"## Adding images "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "05f669d7",
"metadata": {},
"outputs": [],
"source": [
"! pip install -U langchain-experimental"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3ed69810",
"metadata": {},
"outputs": [],
"source": [
"! pip install open_clip_torch torch"
]
},
{
"cell_type": "code",
"execution_count": 16,
"id": "2cacb5ee",
"metadata": {},
"outputs": [],
"source": [
"! rm -rf '/tmp/multimmodal_lance'"
]
},
{
"cell_type": "code",
"execution_count": 17,
"id": "b3456e2c",
"metadata": {},
"outputs": [],
"source": [
"from langchain_experimental.open_clip import OpenCLIPEmbeddings"
]
},
{
"cell_type": "code",
"execution_count": 18,
"id": "3848eba2",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"\n",
"import requests\n",
"\n",
"# List of image URLs to download\n",
"image_urls = [\n",
" \"https://github.com/raghavdixit99/assets/assets/34462078/abf47cc4-d979-4aaa-83be-53a2115bf318\",\n",
" \"https://github.com/raghavdixit99/assets/assets/34462078/93be928e-522b-4e37-889d-d4efd54b2112\",\n",
"]\n",
"\n",
"texts = [\"bird\", \"dragon\"]\n",
"\n",
"# Directory to save images\n",
"dir_name = \"./photos/\"\n",
"\n",
"# Create directory if it doesn't exist\n",
"os.makedirs(dir_name, exist_ok=True)\n",
"\n",
"image_uris = []\n",
"# Download and save each image\n",
"for i, url in enumerate(image_urls, start=1):\n",
" response = requests.get(url)\n",
" path = os.path.join(dir_name, f\"image{i}.jpg\")\n",
" image_uris.append(path)\n",
" with open(path, \"wb\") as f:\n",
" f.write(response.content)"
]
},
{
"cell_type": "code",
"execution_count": 21,
"id": "3d62c2a0",
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.vectorstores import LanceDB\n",
"\n",
"vec_store = LanceDB(\n",
" table_name=\"multimodal_test\",\n",
" embedding=OpenCLIPEmbeddings(),\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 22,
"id": "ebbb4881",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"['b673620b-01f0-42ca-a92e-d033bb92c0a6',\n",
" '99c3a5b0-b577-417a-8177-92f4a655dbfb']"
]
},
"execution_count": 22,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"vec_store.add_images(uris=image_uris)"
]
},
{
"cell_type": "code",
"execution_count": 23,
"id": "3c29dea3",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"['f7adde5d-a4a3-402b-9e73-088b230722c3',\n",
" 'cbed59da-0aec-4bff-8820-9e59d81a2140']"
]
},
"execution_count": 23,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"vec_store.add_texts(texts)"
]
},
{
"cell_type": "code",
"execution_count": 24,
"id": "8b2f25ce",
"metadata": {},
"outputs": [],
"source": [
"img_embed = vec_store._embedding.embed_query(\"bird\")"
]
},
{
"cell_type": "code",
"execution_count": 25,
"id": "87a24079",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"Document(page_content='bird', metadata={'id': 'f7adde5d-a4a3-402b-9e73-088b230722c3'})"
]
},
"execution_count": 25,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"vec_store.similarity_search_by_vector(img_embed)[0]"
]
},
{
"cell_type": "code",
"execution_count": 26,
"id": "78557867",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"LanceTable(connection=LanceDBConnection(/tmp/lancedb), name=\"multimodal_test\")"
]
},
"execution_count": 26,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"vec_store._table"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.2"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -1,78 +0,0 @@
# Jina Reranker
This re-ranker uses the [Jina](https://jina.ai/reranker/) API to rerank the search results. You can use this re-ranker by passing `JinaReranker()` to the `rerank()` method. Note that you'll either need to set the `JINA_API_KEY` environment variable or pass the `api_key` argument to use this re-ranker.
!!! note
Supported Query Types: Hybrid, Vector, FTS
```python
import os
import lancedb
from lancedb.embeddings import get_registry
from lancedb.pydantic import LanceModel, Vector
from lancedb.rerankers import JinaReranker
os.environ['JINA_API_KEY'] = "jina_*"
embedder = get_registry().get("jina").create()
db = lancedb.connect("~/.lancedb")
class Schema(LanceModel):
text: str = embedder.SourceField()
vector: Vector(embedder.ndims()) = embedder.VectorField()
data = [
{"text": "hello world"},
{"text": "goodbye world"}
]
tbl = db.create_table("test", schema=Schema, mode="overwrite")
tbl.add(data)
reranker = JinaReranker(api_key="key")
# Run vector search with a reranker
result = tbl.search("hello").rerank(reranker=reranker).to_list()
# Run FTS search with a reranker
result = tbl.search("hello", query_type="fts").rerank(reranker=reranker).to_list()
# Run hybrid search with a reranker
tbl.create_fts_index("text", replace=True)
result = tbl.search("hello", query_type="hybrid").rerank(reranker=reranker).to_list()
```
Accepted Arguments
----------------
| Argument | Type | Default | Description |
| --- | --- | --- | --- |
| `model_name` | `str` | `"jina-reranker-v2-base-multilingual"` | The name of the reranker model to use. You can find the list of available models in https://jina.ai/reranker/|
| `column` | `str` | `"text"` | The name of the column to use as input to the cross encoder model. |
| `top_n` | `str` | `None` | The number of results to return. If None, will return all results. |
| `api_key` | `str` | `None` | The API key for the Jina API. If not provided, the `JINA_API_KEY` environment variable is used. |
| `return_score` | str | `"relevance"` | Options are "relevance" or "all". The type of score to return. If "relevance", will return only the `_relevance_score. If "all" is supported, will return relevance score along with the vector and/or fts scores depending on query type |
## Supported Scores for each query type
You can specify the type of scores you want the reranker to return. The following are the supported scores for each query type:
### Hybrid Search
|`return_score`| Status | Description |
| --- | --- | --- |
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
| `all` | ❌ Not Supported | Returns have vector(`_distance`) and FTS(`score`) along with Hybrid Search score(`_relevance_score`) |
### Vector Search
|`return_score`| Status | Description |
| --- | --- | --- |
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
| `all` | ✅ Supported | Returns have vector(`_distance`) along with Hybrid Search score(`_relevance_score`) |
### FTS Search
|`return_score`| Status | Description |
| --- | --- | --- |
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
| `all` | ✅ Supported | Returns have FTS(`score`) along with Hybrid Search score(`_relevance_score`) |

View File

@@ -7,7 +7,8 @@ excluded_globs = [
"../src/fts.md", "../src/fts.md",
"../src/embedding.md", "../src/embedding.md",
"../src/examples/*.md", "../src/examples/*.md",
"../src/integrations/*.md", "../src/integrations/voxel51.md",
"../src/integrations/langchain.md",
"../src/guides/tables.md", "../src/guides/tables.md",
"../src/python/duckdb.md", "../src/python/duckdb.md",
"../src/embeddings/*.md", "../src/embeddings/*.md",
@@ -16,7 +17,6 @@ excluded_globs = [
"../src/basic.md", "../src/basic.md",
"../src/hybrid_search/hybrid_search.md", "../src/hybrid_search/hybrid_search.md",
"../src/reranking/*.md", "../src/reranking/*.md",
"../src/guides/tuning_retrievers/*.md",
] ]
python_prefix = "py" python_prefix = "py"

View File

@@ -1,27 +0,0 @@
[package]
name = "lancedb-jni"
description = "JNI bindings for LanceDB"
# TODO modify lancedb/Cargo.toml for version and dependencies
version = "0.4.18"
edition.workspace = true
repository.workspace = true
readme.workspace = true
license.workspace = true
keywords.workspace = true
categories.workspace = true
publish = false
[lib]
crate-type = ["cdylib"]
[dependencies]
lancedb = { path = "../../../rust/lancedb" }
lance = { workspace = true }
arrow = { workspace = true, features = ["ffi"] }
arrow-schema.workspace = true
tokio = "1.23"
jni = "0.21.1"
snafu.workspace = true
lazy_static.workspace = true
serde = { version = "^1" }
serde_json = { version = "1" }

View File

@@ -1,130 +0,0 @@
use crate::ffi::JNIEnvExt;
use crate::traits::IntoJava;
use crate::{Error, RT};
use jni::objects::{JObject, JString, JValue};
use jni::JNIEnv;
pub const NATIVE_CONNECTION: &str = "nativeConnectionHandle";
use crate::Result;
use lancedb::connection::{connect, Connection};
#[derive(Clone)]
pub struct BlockingConnection {
pub(crate) inner: Connection,
}
impl BlockingConnection {
pub fn create(dataset_uri: &str) -> Result<Self> {
let inner = RT.block_on(connect(dataset_uri).execute())?;
Ok(Self { inner })
}
pub fn table_names(
&self,
start_after: Option<String>,
limit: Option<i32>,
) -> Result<Vec<String>> {
let mut op = self.inner.table_names();
if let Some(start_after) = start_after {
op = op.start_after(start_after);
}
if let Some(limit) = limit {
op = op.limit(limit as u32);
}
Ok(RT.block_on(op.execute())?)
}
}
impl IntoJava for BlockingConnection {
fn into_java<'a>(self, env: &mut JNIEnv<'a>) -> JObject<'a> {
attach_native_connection(env, self)
}
}
fn attach_native_connection<'local>(
env: &mut JNIEnv<'local>,
connection: BlockingConnection,
) -> JObject<'local> {
let j_connection = create_java_connection_object(env);
// This block sets a native Rust object (Connection) as a field in the Java object (j_Connection).
// Caution: This creates a potential for memory leaks. The Rust object (Connection) is not
// automatically garbage-collected by Java, and its memory will not be freed unless
// explicitly handled.
//
// To prevent memory leaks, ensure the following:
// 1. The Java object (`j_Connection`) should implement the `java.io.Closeable` interface.
// 2. Users of this Java object should be instructed to always use it within a try-with-resources
// statement (or manually call the `close()` method) to ensure that `self.close()` is invoked.
match unsafe { env.set_rust_field(&j_connection, NATIVE_CONNECTION, connection) } {
Ok(_) => j_connection,
Err(err) => {
env.throw_new(
"java/lang/RuntimeException",
format!("Failed to set native handle for Connection: {}", err),
)
.expect("Error throwing exception");
JObject::null()
}
}
}
fn create_java_connection_object<'a>(env: &mut JNIEnv<'a>) -> JObject<'a> {
env.new_object("com/lancedb/lancedb/Connection", "()V", &[])
.expect("Failed to create Java Lance Connection instance")
}
#[no_mangle]
pub extern "system" fn Java_com_lancedb_lancedb_Connection_releaseNativeConnection(
mut env: JNIEnv,
j_connection: JObject,
) {
let _: BlockingConnection = unsafe {
env.take_rust_field(j_connection, NATIVE_CONNECTION)
.expect("Failed to take native Connection handle")
};
}
#[no_mangle]
pub extern "system" fn Java_com_lancedb_lancedb_Connection_connect<'local>(
mut env: JNIEnv<'local>,
_obj: JObject,
dataset_uri_object: JString,
) -> JObject<'local> {
let dataset_uri: String = ok_or_throw!(env, env.get_string(&dataset_uri_object)).into();
let blocking_connection = ok_or_throw!(env, BlockingConnection::create(&dataset_uri));
blocking_connection.into_java(&mut env)
}
#[no_mangle]
pub extern "system" fn Java_com_lancedb_lancedb_Connection_tableNames<'local>(
mut env: JNIEnv<'local>,
j_connection: JObject,
start_after_obj: JObject, // Optional<String>
limit_obj: JObject, // Optional<Integer>
) -> JObject<'local> {
ok_or_throw!(
env,
inner_table_names(&mut env, j_connection, start_after_obj, limit_obj)
)
}
fn inner_table_names<'local>(
env: &mut JNIEnv<'local>,
j_connection: JObject,
start_after_obj: JObject, // Optional<String>
limit_obj: JObject, // Optional<Integer>
) -> Result<JObject<'local>> {
let start_after = env.get_string_opt(&start_after_obj)?;
let limit = env.get_int_opt(&limit_obj)?;
let conn =
unsafe { env.get_rust_field::<_, _, BlockingConnection>(j_connection, NATIVE_CONNECTION) }?;
let table_names = conn.table_names(start_after, limit)?;
drop(conn);
let j_names = env.new_object("java/util/ArrayList", "()V", &[])?;
for item in table_names {
let jstr_item = env.new_string(item)?;
let item_jobj = JObject::from(jstr_item);
let item_gen = JValue::Object(&item_jobj);
env.call_method(&j_names, "add", "(Ljava/lang/Object;)Z", &[item_gen])?;
}
Ok(j_names)
}

View File

@@ -1,225 +0,0 @@
// Copyright 2024 Lance Developers.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::str::Utf8Error;
use arrow_schema::ArrowError;
use jni::errors::Error as JniError;
use serde_json::Error as JsonError;
use snafu::{Location, Snafu};
type BoxedError = Box<dyn std::error::Error + Send + Sync + 'static>;
/// Java Exception types
pub enum JavaException {
IllegalArgumentException,
IOException,
RuntimeException,
}
impl JavaException {
pub fn as_str(&self) -> &str {
match self {
Self::IllegalArgumentException => "java/lang/IllegalArgumentException",
Self::IOException => "java/io/IOException",
Self::RuntimeException => "java/lang/RuntimeException",
}
}
}
/// TODO(lu) change to lancedb-jni
#[derive(Debug, Snafu)]
#[snafu(visibility(pub))]
pub enum Error {
#[snafu(display("JNI error: {message}, {location}"))]
Jni { message: String, location: Location },
#[snafu(display("Invalid argument: {message}, {location}"))]
InvalidArgument { message: String, location: Location },
#[snafu(display("IO error: {source}, {location}"))]
IO {
source: BoxedError,
location: Location,
},
#[snafu(display("Arrow error: {message}, {location}"))]
Arrow { message: String, location: Location },
#[snafu(display("Index error: {message}, {location}"))]
Index { message: String, location: Location },
#[snafu(display("JSON error: {message}, {location}"))]
JSON { message: String, location: Location },
#[snafu(display("Dataset at path {path} was not found, {location}"))]
DatasetNotFound { path: String, location: Location },
#[snafu(display("Dataset already exists: {uri}, {location}"))]
DatasetAlreadyExists { uri: String, location: Location },
#[snafu(display("Table '{name}' already exists"))]
TableAlreadyExists { name: String },
#[snafu(display("Table '{name}' was not found"))]
TableNotFound { name: String },
#[snafu(display("Invalid table name '{name}': {reason}"))]
InvalidTableName { name: String, reason: String },
#[snafu(display("Embedding function '{name}' was not found: {reason}, {location}"))]
EmbeddingFunctionNotFound {
name: String,
reason: String,
location: Location,
},
#[snafu(display("Other Lance error: {message}, {location}"))]
OtherLance { message: String, location: Location },
#[snafu(display("Other LanceDB error: {message}, {location}"))]
OtherLanceDB { message: String, location: Location },
}
impl Error {
/// Throw as Java Exception
pub fn throw(&self, env: &mut jni::JNIEnv) {
match self {
Self::InvalidArgument { .. }
| Self::DatasetNotFound { .. }
| Self::DatasetAlreadyExists { .. }
| Self::TableAlreadyExists { .. }
| Self::TableNotFound { .. }
| Self::InvalidTableName { .. }
| Self::EmbeddingFunctionNotFound { .. } => {
self.throw_as(env, JavaException::IllegalArgumentException)
}
Self::IO { .. } | Self::Index { .. } => self.throw_as(env, JavaException::IOException),
Self::Arrow { .. }
| Self::JSON { .. }
| Self::OtherLance { .. }
| Self::OtherLanceDB { .. }
| Self::Jni { .. } => self.throw_as(env, JavaException::RuntimeException),
}
}
/// Throw as an concrete Java Exception
pub fn throw_as(&self, env: &mut jni::JNIEnv, exception: JavaException) {
let message = &format!(
"Error when throwing Java exception: {}:{}",
exception.as_str(),
self
);
env.throw_new(exception.as_str(), self.to_string())
.expect(message);
}
}
pub type Result<T> = std::result::Result<T, Error>;
trait ToSnafuLocation {
fn to_snafu_location(&'static self) -> snafu::Location;
}
impl ToSnafuLocation for std::panic::Location<'static> {
fn to_snafu_location(&'static self) -> snafu::Location {
snafu::Location::new(self.file(), self.line(), self.column())
}
}
impl From<JniError> for Error {
#[track_caller]
fn from(source: JniError) -> Self {
Self::Jni {
message: source.to_string(),
location: std::panic::Location::caller().to_snafu_location(),
}
}
}
impl From<Utf8Error> for Error {
#[track_caller]
fn from(source: Utf8Error) -> Self {
Self::InvalidArgument {
message: source.to_string(),
location: std::panic::Location::caller().to_snafu_location(),
}
}
}
impl From<ArrowError> for Error {
#[track_caller]
fn from(source: ArrowError) -> Self {
Self::Arrow {
message: source.to_string(),
location: std::panic::Location::caller().to_snafu_location(),
}
}
}
impl From<JsonError> for Error {
#[track_caller]
fn from(source: JsonError) -> Self {
Self::JSON {
message: source.to_string(),
location: std::panic::Location::caller().to_snafu_location(),
}
}
}
impl From<lance::Error> for Error {
#[track_caller]
fn from(source: lance::Error) -> Self {
match source {
lance::Error::DatasetNotFound {
path,
source: _,
location,
} => Self::DatasetNotFound { path, location },
lance::Error::DatasetAlreadyExists { uri, location } => {
Self::DatasetAlreadyExists { uri, location }
}
lance::Error::IO { source, location } => Self::IO { source, location },
lance::Error::Arrow { message, location } => Self::Arrow { message, location },
lance::Error::Index { message, location } => Self::Index { message, location },
lance::Error::InvalidInput { source, location } => Self::InvalidArgument {
message: source.to_string(),
location,
},
_ => Self::OtherLance {
message: source.to_string(),
location: std::panic::Location::caller().to_snafu_location(),
},
}
}
}
impl From<lancedb::Error> for Error {
#[track_caller]
fn from(source: lancedb::Error) -> Self {
match source {
lancedb::Error::InvalidTableName { name, reason } => {
Self::InvalidTableName { name, reason }
}
lancedb::Error::InvalidInput { message } => Self::InvalidArgument {
message,
location: std::panic::Location::caller().to_snafu_location(),
},
lancedb::Error::TableNotFound { name } => Self::TableNotFound { name },
lancedb::Error::TableAlreadyExists { name } => Self::TableAlreadyExists { name },
lancedb::Error::EmbeddingFunctionNotFound { name, reason } => {
Self::EmbeddingFunctionNotFound {
name,
reason,
location: std::panic::Location::caller().to_snafu_location(),
}
}
lancedb::Error::Arrow { source } => Self::Arrow {
message: source.to_string(),
location: std::panic::Location::caller().to_snafu_location(),
},
lancedb::Error::Lance { source } => Self::from(source),
_ => Self::OtherLanceDB {
message: source.to_string(),
location: std::panic::Location::caller().to_snafu_location(),
},
}
}
}

View File

@@ -1,204 +0,0 @@
// Copyright 2024 Lance Developers.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use core::slice;
use jni::objects::{JByteBuffer, JObjectArray, JString};
use jni::sys::jobjectArray;
use jni::{objects::JObject, JNIEnv};
use crate::error::{Error, Result};
/// TODO(lu) import from lance-jni without duplicate
/// Extend JNIEnv with helper functions.
pub trait JNIEnvExt {
/// Get integers from Java List<Integer> object.
fn get_integers(&mut self, obj: &JObject) -> Result<Vec<i32>>;
/// Get strings from Java List<String> object.
fn get_strings(&mut self, obj: &JObject) -> Result<Vec<String>>;
/// Get strings from Java String[] object.
/// Note that get Option<Vec<String>> from Java Optional<String[]> just doesn't work.
#[allow(unused)]
fn get_strings_array(&mut self, obj: jobjectArray) -> Result<Vec<String>>;
/// Get Option<String> from Java Optional<String>.
fn get_string_opt(&mut self, obj: &JObject) -> Result<Option<String>>;
/// Get Option<Vec<String>> from Java Optional<List<String>>.
#[allow(unused)]
fn get_strings_opt(&mut self, obj: &JObject) -> Result<Option<Vec<String>>>;
/// Get Option<i32> from Java Optional<Integer>.
fn get_int_opt(&mut self, obj: &JObject) -> Result<Option<i32>>;
/// Get Option<Vec<i32>> from Java Optional<List<Integer>>.
fn get_ints_opt(&mut self, obj: &JObject) -> Result<Option<Vec<i32>>>;
/// Get Option<i64> from Java Optional<Long>.
#[allow(unused)]
fn get_long_opt(&mut self, obj: &JObject) -> Result<Option<i64>>;
/// Get Option<u64> from Java Optional<Long>.
#[allow(unused)]
fn get_u64_opt(&mut self, obj: &JObject) -> Result<Option<u64>>;
/// Get Option<&[u8]> from Java Optional<ByteBuffer>.
#[allow(unused)]
fn get_bytes_opt(&mut self, obj: &JObject) -> Result<Option<&[u8]>>;
fn get_optional<T, F>(&mut self, obj: &JObject, f: F) -> Result<Option<T>>
where
F: FnOnce(&mut JNIEnv, &JObject) -> Result<T>;
}
impl JNIEnvExt for JNIEnv<'_> {
fn get_integers(&mut self, obj: &JObject) -> Result<Vec<i32>> {
let list = self.get_list(obj)?;
let mut iter = list.iter(self)?;
let mut results = Vec::with_capacity(list.size(self)? as usize);
while let Some(elem) = iter.next(self)? {
let int_obj = self.call_method(elem, "intValue", "()I", &[])?;
let int_value = int_obj.i()?;
results.push(int_value);
}
Ok(results)
}
fn get_strings(&mut self, obj: &JObject) -> Result<Vec<String>> {
let list = self.get_list(obj)?;
let mut iter = list.iter(self)?;
let mut results = Vec::with_capacity(list.size(self)? as usize);
while let Some(elem) = iter.next(self)? {
let jstr = JString::from(elem);
let val = self.get_string(&jstr)?;
results.push(val.to_str()?.to_string())
}
Ok(results)
}
fn get_strings_array(&mut self, obj: jobjectArray) -> Result<Vec<String>> {
let jobject_array = unsafe { JObjectArray::from_raw(obj) };
let array_len = self.get_array_length(&jobject_array)?;
let mut res: Vec<String> = Vec::new();
for i in 0..array_len {
let item: JString = self.get_object_array_element(&jobject_array, i)?.into();
res.push(self.get_string(&item)?.into());
}
Ok(res)
}
fn get_string_opt(&mut self, obj: &JObject) -> Result<Option<String>> {
self.get_optional(obj, |env, inner_obj| {
let java_obj_gen = env.call_method(inner_obj, "get", "()Ljava/lang/Object;", &[])?;
let java_string_obj = java_obj_gen.l()?;
let jstr = JString::from(java_string_obj);
let val = env.get_string(&jstr)?;
Ok(val.to_str()?.to_string())
})
}
fn get_strings_opt(&mut self, obj: &JObject) -> Result<Option<Vec<String>>> {
self.get_optional(obj, |env, inner_obj| {
let java_obj_gen = env.call_method(inner_obj, "get", "()Ljava/lang/Object;", &[])?;
let java_list_obj = java_obj_gen.l()?;
env.get_strings(&java_list_obj)
})
}
fn get_int_opt(&mut self, obj: &JObject) -> Result<Option<i32>> {
self.get_optional(obj, |env, inner_obj| {
let java_obj_gen = env.call_method(inner_obj, "get", "()Ljava/lang/Object;", &[])?;
let java_int_obj = java_obj_gen.l()?;
let int_obj = env.call_method(java_int_obj, "intValue", "()I", &[])?;
let int_value = int_obj.i()?;
Ok(int_value)
})
}
fn get_ints_opt(&mut self, obj: &JObject) -> Result<Option<Vec<i32>>> {
self.get_optional(obj, |env, inner_obj| {
let java_obj_gen = env.call_method(inner_obj, "get", "()Ljava/lang/Object;", &[])?;
let java_list_obj = java_obj_gen.l()?;
env.get_integers(&java_list_obj)
})
}
fn get_long_opt(&mut self, obj: &JObject) -> Result<Option<i64>> {
self.get_optional(obj, |env, inner_obj| {
let java_obj_gen = env.call_method(inner_obj, "get", "()Ljava/lang/Object;", &[])?;
let java_long_obj = java_obj_gen.l()?;
let long_obj = env.call_method(java_long_obj, "longValue", "()J", &[])?;
let long_value = long_obj.j()?;
Ok(long_value)
})
}
fn get_u64_opt(&mut self, obj: &JObject) -> Result<Option<u64>> {
self.get_optional(obj, |env, inner_obj| {
let java_obj_gen = env.call_method(inner_obj, "get", "()Ljava/lang/Object;", &[])?;
let java_long_obj = java_obj_gen.l()?;
let long_obj = env.call_method(java_long_obj, "longValue", "()J", &[])?;
let long_value = long_obj.j()?;
Ok(long_value as u64)
})
}
fn get_bytes_opt(&mut self, obj: &JObject) -> Result<Option<&[u8]>> {
self.get_optional(obj, |env, inner_obj| {
let java_obj_gen = env.call_method(inner_obj, "get", "()Ljava/lang/Object;", &[])?;
let java_byte_buffer_obj = java_obj_gen.l()?;
let j_byte_buffer = JByteBuffer::from(java_byte_buffer_obj);
let raw_data = env.get_direct_buffer_address(&j_byte_buffer)?;
let capacity = env.get_direct_buffer_capacity(&j_byte_buffer)?;
let data = unsafe { slice::from_raw_parts(raw_data, capacity) };
Ok(data)
})
}
fn get_optional<T, F>(&mut self, obj: &JObject, f: F) -> Result<Option<T>>
where
F: FnOnce(&mut JNIEnv, &JObject) -> Result<T>,
{
if obj.is_null() {
return Ok(None);
}
let is_present = self.call_method(obj, "isPresent", "()Z", &[])?;
if !is_present.z()? {
// TODO(lu): put get java object into here cuz can only get java Object
Ok(None)
} else {
f(self, obj).map(Some)
}
}
}
#[no_mangle]
pub extern "system" fn Java_com_lancedb_lance_test_JniTestHelper_parseInts(
mut env: JNIEnv,
_obj: JObject,
list_obj: JObject, // List<Integer>
) {
ok_or_throw_without_return!(env, env.get_integers(&list_obj));
}
#[no_mangle]
pub extern "system" fn Java_com_lancedb_lance_test_JniTestHelper_parseIntsOpt(
mut env: JNIEnv,
_obj: JObject,
list_obj: JObject, // Optional<List<Integer>>
) {
ok_or_throw_without_return!(env, env.get_ints_opt(&list_obj));
}

View File

@@ -1,68 +0,0 @@
// Copyright 2024 Lance Developers.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use lazy_static::lazy_static;
// TODO import from lance-jni without duplicate
#[macro_export]
macro_rules! ok_or_throw {
($env:expr, $result:expr) => {
match $result {
Ok(value) => value,
Err(err) => {
Error::from(err).throw(&mut $env);
return JObject::null();
}
}
};
}
macro_rules! ok_or_throw_without_return {
($env:expr, $result:expr) => {
match $result {
Ok(value) => value,
Err(err) => {
Error::from(err).throw(&mut $env);
return;
}
}
};
}
#[macro_export]
macro_rules! ok_or_throw_with_return {
($env:expr, $result:expr, $ret:expr) => {
match $result {
Ok(value) => value,
Err(err) => {
Error::from(err).throw(&mut $env);
return $ret;
}
}
};
}
mod connection;
pub mod error;
mod ffi;
mod traits;
pub use error::{Error, Result};
lazy_static! {
static ref RT: tokio::runtime::Runtime = tokio::runtime::Builder::new_multi_thread()
.enable_all()
.build()
.expect("Failed to create tokio runtime");
}

View File

@@ -1,122 +0,0 @@
// Copyright 2024 Lance Developers.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use jni::objects::{JMap, JObject, JString, JValue};
use jni::JNIEnv;
use crate::Result;
pub trait FromJObject<T> {
fn extract(&self) -> Result<T>;
}
/// Convert a Rust type into a Java Object.
pub trait IntoJava {
fn into_java<'a>(self, env: &mut JNIEnv<'a>) -> JObject<'a>;
}
impl FromJObject<i32> for JObject<'_> {
fn extract(&self) -> Result<i32> {
Ok(JValue::from(self).i()?)
}
}
impl FromJObject<i64> for JObject<'_> {
fn extract(&self) -> Result<i64> {
Ok(JValue::from(self).j()?)
}
}
impl FromJObject<f32> for JObject<'_> {
fn extract(&self) -> Result<f32> {
Ok(JValue::from(self).f()?)
}
}
impl FromJObject<f64> for JObject<'_> {
fn extract(&self) -> Result<f64> {
Ok(JValue::from(self).d()?)
}
}
pub trait FromJString {
fn extract(&self, env: &mut JNIEnv) -> Result<String>;
}
impl FromJString for JString<'_> {
fn extract(&self, env: &mut JNIEnv) -> Result<String> {
Ok(env.get_string(self)?.into())
}
}
pub trait JMapExt {
#[allow(dead_code)]
fn get_string(&self, env: &mut JNIEnv, key: &str) -> Result<Option<String>>;
#[allow(dead_code)]
fn get_i32(&self, env: &mut JNIEnv, key: &str) -> Result<Option<i32>>;
#[allow(dead_code)]
fn get_i64(&self, env: &mut JNIEnv, key: &str) -> Result<Option<i64>>;
#[allow(dead_code)]
fn get_f32(&self, env: &mut JNIEnv, key: &str) -> Result<Option<f32>>;
#[allow(dead_code)]
fn get_f64(&self, env: &mut JNIEnv, key: &str) -> Result<Option<f64>>;
}
fn get_map_value<T>(env: &mut JNIEnv, map: &JMap, key: &str) -> Result<Option<T>>
where
for<'a> JObject<'a>: FromJObject<T>,
{
let key_obj: JObject = env.new_string(key)?.into();
if let Some(value) = map.get(env, &key_obj)? {
if value.is_null() {
Ok(None)
} else {
Ok(Some(value.extract()?))
}
} else {
Ok(None)
}
}
impl JMapExt for JMap<'_, '_, '_> {
fn get_string(&self, env: &mut JNIEnv, key: &str) -> Result<Option<String>> {
let key_obj: JObject = env.new_string(key)?.into();
if let Some(value) = self.get(env, &key_obj)? {
let value_str: JString = value.into();
Ok(Some(value_str.extract(env)?))
} else {
Ok(None)
}
}
fn get_i32(&self, env: &mut JNIEnv, key: &str) -> Result<Option<i32>> {
get_map_value(env, self, key)
}
fn get_i64(&self, env: &mut JNIEnv, key: &str) -> Result<Option<i64>> {
get_map_value(env, self, key)
}
fn get_f32(&self, env: &mut JNIEnv, key: &str) -> Result<Option<f32>> {
get_map_value(env, self, key)
}
fn get_f64(&self, env: &mut JNIEnv, key: &str) -> Result<Option<f64>> {
get_map_value(env, self, key)
}
}

View File

@@ -1,94 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>com.lancedb</groupId>
<artifactId>lancedb-parent</artifactId>
<version>0.1-SNAPSHOT</version>
<relativePath>../pom.xml</relativePath>
</parent>
<artifactId>lancedb-core</artifactId>
<name>LanceDB Core</name>
<packaging>jar</packaging>
<dependencies>
<dependency>
<groupId>org.apache.arrow</groupId>
<artifactId>arrow-vector</artifactId>
</dependency>
<dependency>
<groupId>org.apache.arrow</groupId>
<artifactId>arrow-memory-netty</artifactId>
</dependency>
<dependency>
<groupId>org.apache.arrow</groupId>
<artifactId>arrow-c-data</artifactId>
</dependency>
<dependency>
<groupId>org.apache.arrow</groupId>
<artifactId>arrow-dataset</artifactId>
</dependency>
<dependency>
<groupId>org.json</groupId>
<artifactId>json</artifactId>
</dependency>
<dependency>
<groupId>org.questdb</groupId>
<artifactId>jar-jni</artifactId>
</dependency>
<dependency>
<groupId>org.junit.jupiter</groupId>
<artifactId>junit-jupiter</artifactId>
<scope>test</scope>
</dependency>
</dependencies>
<profiles>
<profile>
<id>build-jni</id>
<activation>
<activeByDefault>true</activeByDefault>
</activation>
<build>
<plugins>
<plugin>
<groupId>org.questdb</groupId>
<artifactId>rust-maven-plugin</artifactId>
<version>1.1.1</version>
<executions>
<execution>
<id>lancedb-jni</id>
<goals>
<goal>build</goal>
</goals>
<configuration>
<path>lancedb-jni</path>
<!--<release>true</release>-->
<!-- Copy native libraries to target/classes for runtime access -->
<copyTo>${project.build.directory}/classes/nativelib</copyTo>
<copyWithPlatformDir>true</copyWithPlatformDir>
</configuration>
</execution>
<execution>
<id>lancedb-jni-test</id>
<goals>
<goal>test</goal>
</goals>
<configuration>
<path>lancedb-jni</path>
<release>false</release>
<verbosity>-v</verbosity>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</profile>
</profiles>
</project>

View File

@@ -1,120 +0,0 @@
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.lancedb.lancedb;
import io.questdb.jar.jni.JarJniLoader;
import java.io.Closeable;
import java.util.List;
import java.util.Optional;
/**
* Represents LanceDB database.
*/
public class Connection implements Closeable {
static {
JarJniLoader.loadLib(Connection.class, "/nativelib", "lancedb_jni");
}
private long nativeConnectionHandle;
/**
* Connect to a LanceDB instance.
*/
public static native Connection connect(String uri);
/**
* Get the names of all tables in the database. The names are sorted in
* ascending order.
*
* @return the table names
*/
public List<String> tableNames() {
return tableNames(Optional.empty(), Optional.empty());
}
/**
* Get the names of filtered tables in the database. The names are sorted in
* ascending order.
*
* @param limit The number of results to return.
* @return the table names
*/
public List<String> tableNames(int limit) {
return tableNames(Optional.empty(), Optional.of(limit));
}
/**
* Get the names of filtered tables in the database. The names are sorted in
* ascending order.
*
* @param startAfter If present, only return names that come lexicographically after the supplied
* value. This can be combined with limit to implement pagination
* by setting this to the last table name from the previous page.
* @return the table names
*/
public List<String> tableNames(String startAfter) {
return tableNames(Optional.of(startAfter), Optional.empty());
}
/**
* Get the names of filtered tables in the database. The names are sorted in
* ascending order.
*
* @param startAfter If present, only return names that come lexicographically after the supplied
* value. This can be combined with limit to implement pagination
* by setting this to the last table name from the previous page.
* @param limit The number of results to return.
* @return the table names
*/
public List<String> tableNames(String startAfter, int limit) {
return tableNames(Optional.of(startAfter), Optional.of(limit));
}
/**
* Get the names of filtered tables in the database. The names are sorted in
* ascending order.
*
* @param startAfter If present, only return names that come lexicographically after the supplied
* value. This can be combined with limit to implement pagination
* by setting this to the last table name from the previous page.
* @param limit The number of results to return.
* @return the table names
*/
public native List<String> tableNames(
Optional<String> startAfter, Optional<Integer> limit);
/**
* Closes this connection and releases any system resources associated with it. If
* the connection is
* already closed, then invoking this method has no effect.
*/
@Override
public void close() {
if (nativeConnectionHandle != 0) {
releaseNativeConnection(nativeConnectionHandle);
nativeConnectionHandle = 0;
}
}
/**
* Native method to release the Lance connection resources associated with the
* given handle.
*
* @param handle The native handle to the connection resource.
*/
private native void releaseNativeConnection(long handle);
private Connection() {}
}

View File

@@ -1,135 +0,0 @@
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.lancedb.lancedb;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.nio.file.Path;
import java.util.List;
import java.net.URL;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;
public class ConnectionTest {
private static final String[] TABLE_NAMES = {
"dataset_version",
"new_empty_dataset",
"test",
"write_stream"
};
@TempDir
static Path tempDir; // Temporary directory for the tests
private static URL lanceDbURL;
@BeforeAll
static void setUp() {
ClassLoader classLoader = ConnectionTest.class.getClassLoader();
lanceDbURL = classLoader.getResource("example_db");
}
@Test
void emptyDB() {
String databaseUri = tempDir.resolve("emptyDB").toString();
try (Connection conn = Connection.connect(databaseUri)) {
List<String> tableNames = conn.tableNames();
assertTrue(tableNames.isEmpty());
}
}
@Test
void tableNames() {
try (Connection conn = Connection.connect(lanceDbURL.toString())) {
List<String> tableNames = conn.tableNames();
assertEquals(4, tableNames.size());
for (int i = 0; i < TABLE_NAMES.length; i++) {
assertEquals(TABLE_NAMES[i], tableNames.get(i));
}
}
}
@Test
void tableNamesStartAfter() {
try (Connection conn = Connection.connect(lanceDbURL.toString())) {
assertTableNamesStartAfter(conn, TABLE_NAMES[0], 3, TABLE_NAMES[1], TABLE_NAMES[2], TABLE_NAMES[3]);
assertTableNamesStartAfter(conn, TABLE_NAMES[1], 2, TABLE_NAMES[2], TABLE_NAMES[3]);
assertTableNamesStartAfter(conn, TABLE_NAMES[2], 1, TABLE_NAMES[3]);
assertTableNamesStartAfter(conn, TABLE_NAMES[3], 0);
assertTableNamesStartAfter(conn, "a_dataset", 4, TABLE_NAMES[0], TABLE_NAMES[1], TABLE_NAMES[2], TABLE_NAMES[3]);
assertTableNamesStartAfter(conn, "o_dataset", 2, TABLE_NAMES[2], TABLE_NAMES[3]);
assertTableNamesStartAfter(conn, "v_dataset", 1, TABLE_NAMES[3]);
assertTableNamesStartAfter(conn, "z_dataset", 0);
}
}
private void assertTableNamesStartAfter(Connection conn, String startAfter, int expectedSize, String... expectedNames) {
List<String> tableNames = conn.tableNames(startAfter);
assertEquals(expectedSize, tableNames.size());
for (int i = 0; i < expectedNames.length; i++) {
assertEquals(expectedNames[i], tableNames.get(i));
}
}
@Test
void tableNamesLimit() {
try (Connection conn = Connection.connect(lanceDbURL.toString())) {
for (int i = 0; i <= TABLE_NAMES.length; i++) {
List<String> tableNames = conn.tableNames(i);
assertEquals(i, tableNames.size());
for (int j = 0; j < i; j++) {
assertEquals(TABLE_NAMES[j], tableNames.get(j));
}
}
}
}
@Test
void tableNamesStartAfterLimit() {
try (Connection conn = Connection.connect(lanceDbURL.toString())) {
List<String> tableNames = conn.tableNames(TABLE_NAMES[0], 2);
assertEquals(2, tableNames.size());
assertEquals(TABLE_NAMES[1], tableNames.get(0));
assertEquals(TABLE_NAMES[2], tableNames.get(1));
tableNames = conn.tableNames(TABLE_NAMES[1], 1);
assertEquals(1, tableNames.size());
assertEquals(TABLE_NAMES[2], tableNames.get(0));
tableNames = conn.tableNames(TABLE_NAMES[2], 2);
assertEquals(1, tableNames.size());
assertEquals(TABLE_NAMES[3], tableNames.get(0));
tableNames = conn.tableNames(TABLE_NAMES[3], 2);
assertEquals(0, tableNames.size());
tableNames = conn.tableNames(TABLE_NAMES[0], 0);
assertEquals(0, tableNames.size());
// Limit larger than the number of remaining tables
tableNames = conn.tableNames(TABLE_NAMES[0], 10);
assertEquals(3, tableNames.size());
assertEquals(TABLE_NAMES[1], tableNames.get(0));
assertEquals(TABLE_NAMES[2], tableNames.get(1));
assertEquals(TABLE_NAMES[3], tableNames.get(2));
// Start after a value not in the list
tableNames = conn.tableNames("non_existent_table", 2);
assertEquals(2, tableNames.size());
assertEquals(TABLE_NAMES[2], tableNames.get(0));
assertEquals(TABLE_NAMES[3], tableNames.get(1));
// Start after the last table with a limit
tableNames = conn.tableNames(TABLE_NAMES[3], 1);
assertEquals(0, tableNames.size());
}
}
}

View File

@@ -1 +0,0 @@
$d51afd07-e3cd-4c76-9b9b-787e13fd55b0<62>=id <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>*int3208name <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>*string08

View File

@@ -1 +0,0 @@
$15648e72-076f-4ef1-8b90-10d305b95b3b<33>=id <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>*int3208name <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>*string08

View File

@@ -1 +0,0 @@
$a3689caf-4f6b-4afc-a3c7-97af75661843<34>oitem <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>*string8price <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>*double80vector <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>*fixed_size_list:float:28

View File

@@ -1,129 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.lancedb</groupId>
<artifactId>lancedb-parent</artifactId>
<version>0.1-SNAPSHOT</version>
<packaging>pom</packaging>
<name>Lance Parent</name>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<maven.compiler.source>11</maven.compiler.source>
<maven.compiler.target>11</maven.compiler.target>
<arrow.version>15.0.0</arrow.version>
</properties>
<modules>
<module>core</module>
</modules>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>org.apache.arrow</groupId>
<artifactId>arrow-vector</artifactId>
<version>${arrow.version}</version>
</dependency>
<dependency>
<groupId>org.apache.arrow</groupId>
<artifactId>arrow-memory-netty</artifactId>
<version>${arrow.version}</version>
</dependency>
<dependency>
<groupId>org.apache.arrow</groupId>
<artifactId>arrow-c-data</artifactId>
<version>${arrow.version}</version>
</dependency>
<dependency>
<groupId>org.apache.arrow</groupId>
<artifactId>arrow-dataset</artifactId>
<version>${arrow.version}</version>
</dependency>
<dependency>
<groupId>org.questdb</groupId>
<artifactId>jar-jni</artifactId>
<version>1.1.1</version>
</dependency>
<dependency>
<groupId>org.junit.jupiter</groupId>
<artifactId>junit-jupiter</artifactId>
<version>5.10.1</version>
</dependency>
<dependency>
<groupId>org.json</groupId>
<artifactId>json</artifactId>
<version>20210307</version>
</dependency>
</dependencies>
</dependencyManagement>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-checkstyle-plugin</artifactId>
<version>3.3.1</version>
<configuration>
<configLocation>google_checks.xml</configLocation>
<consoleOutput>true</consoleOutput>
<failsOnError>true</failsOnError>
<violationSeverity>warning</violationSeverity>
<linkXRef>false</linkXRef>
</configuration>
<executions>
<execution>
<id>validate</id>
<phase>validate</phase>
<goals>
<goal>check</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
<pluginManagement>
<plugins>
<plugin>
<artifactId>maven-clean-plugin</artifactId>
<version>3.1.0</version>
</plugin>
<plugin>
<artifactId>maven-resources-plugin</artifactId>
<version>3.0.2</version>
</plugin>
<plugin>
<artifactId>maven-compiler-plugin</artifactId>
<version>3.8.1</version>
<configuration>
<compilerArgs>
<arg>-h</arg>
<arg>target/headers</arg>
</compilerArgs>
</configuration>
</plugin>
<plugin>
<artifactId>maven-surefire-plugin</artifactId>
<version>3.2.5</version>
<configuration>
<argLine>--add-opens=java.base/java.nio=ALL-UNNAMED</argLine>
<forkNode implementation="org.apache.maven.plugin.surefire.extensions.SurefireForkNodeFactory"/>
<useSystemClassLoader>false</useSystemClassLoader>
</configuration>
</plugin>
<plugin>
<artifactId>maven-jar-plugin</artifactId>
<version>3.0.2</version>
</plugin>
<plugin>
<artifactId>maven-install-plugin</artifactId>
<version>2.5.2</version>
</plugin>
</plugins>
</pluginManagement>
</build>
</project>

View File

@@ -1,12 +1,12 @@
{ {
"name": "vectordb", "name": "vectordb",
"version": "0.6.0", "version": "0.4.20",
"lockfileVersion": 3, "lockfileVersion": 3,
"requires": true, "requires": true,
"packages": { "packages": {
"": { "": {
"name": "vectordb", "name": "vectordb",
"version": "0.6.0", "version": "0.4.20",
"cpu": [ "cpu": [
"x64", "x64",
"arm64" "arm64"

View File

@@ -1,12 +1,12 @@
{ {
"name": "vectordb", "name": "vectordb",
"version": "0.6.0", "version": "0.4.20",
"description": " Serverless, low-latency vector database for AI applications", "description": " Serverless, low-latency vector database for AI applications",
"main": "dist/index.js", "main": "dist/index.js",
"types": "dist/index.d.ts", "types": "dist/index.d.ts",
"scripts": { "scripts": {
"tsc": "tsc -b", "tsc": "tsc -b",
"build": "npm run tsc && cargo-cp-artifact --artifact cdylib lancedb_node index.node -- cargo build -p lancedb-node --message-format=json", "build": "npm run tsc && cargo-cp-artifact --artifact cdylib lancedb-node index.node -- cargo build --message-format=json",
"build-release": "npm run build -- --release", "build-release": "npm run build -- --release",
"test": "npm run tsc && mocha -recursive dist/test", "test": "npm run tsc && mocha -recursive dist/test",
"integration-test": "npm run tsc && mocha -recursive dist/integration_test", "integration-test": "npm run tsc && mocha -recursive dist/integration_test",

View File

@@ -624,6 +624,8 @@ function validateSchemaEmbeddings(
} }
if (missingEmbeddingFields.length > 0 && embeddings === undefined) { if (missingEmbeddingFields.length > 0 && embeddings === undefined) {
console.log({ missingEmbeddingFields, embeddings });
throw new Error( throw new Error(
`Table has embeddings: "${missingEmbeddingFields `Table has embeddings: "${missingEmbeddingFields
.map((f) => f.name) .map((f) => f.name)
@@ -631,5 +633,5 @@ function validateSchemaEmbeddings(
); );
} }
return new Schema(fields, schema.metadata); return new Schema(fields);
} }

View File

@@ -695,26 +695,15 @@ export interface MergeInsertArgs {
whenNotMatchedBySourceDelete?: string | boolean whenNotMatchedBySourceDelete?: string | boolean
} }
export enum IndexStatus {
Pending = "pending",
Indexing = "indexing",
Done = "done",
Failed = "failed"
}
export interface VectorIndex { export interface VectorIndex {
columns: string[] columns: string[]
name: string name: string
uuid: string uuid: string
status: IndexStatus
} }
export interface IndexStats { export interface IndexStats {
numIndexedRows: number | null numIndexedRows: number | null
numUnindexedRows: number | null numUnindexedRows: number | null
indexType: string | null
distanceType: string | null
completedAt: string | null
} }
/** /**

View File

@@ -509,8 +509,7 @@ export class RemoteTable<T = number[]> implements Table<T> {
return (await results.body()).indexes?.map((index: any) => ({ return (await results.body()).indexes?.map((index: any) => ({
columns: index.columns, columns: index.columns,
name: index.index_name, name: index.index_name,
uuid: index.index_uuid, uuid: index.index_uuid
status: index.status
})) }))
} }
@@ -521,10 +520,7 @@ export class RemoteTable<T = number[]> implements Table<T> {
const body = await results.body() const body = await results.body()
return { return {
numIndexedRows: body?.num_indexed_rows, numIndexedRows: body?.num_indexed_rows,
numUnindexedRows: body?.num_unindexed_rows, numUnindexedRows: body?.num_unindexed_rows
indexType: body?.index_type,
distanceType: body?.distance_type,
completedAt: body?.completed_at
} }
} }

View File

@@ -15,11 +15,11 @@ crate-type = ["cdylib"]
arrow-ipc.workspace = true arrow-ipc.workspace = true
futures.workspace = true futures.workspace = true
lancedb = { path = "../rust/lancedb" } lancedb = { path = "../rust/lancedb" }
napi = { version = "2.16.8", default-features = false, features = [ napi = { version = "2.15", default-features = false, features = [
"napi9", "napi7",
"async", "async",
] } ] }
napi-derive = "2.16.4" napi-derive = "2"
# Prevent dynamic linking of lzma, which comes from datafusion # Prevent dynamic linking of lzma, which comes from datafusion
lzma-sys = { version = "*", features = ["static"] } lzma-sys = { version = "*", features = ["static"] }

View File

@@ -31,7 +31,6 @@ import {
Schema, Schema,
Struct, Struct,
type Table, type Table,
Type,
Utf8, Utf8,
tableFromIPC, tableFromIPC,
} from "apache-arrow"; } from "apache-arrow";
@@ -52,12 +51,7 @@ import {
makeArrowTable, makeArrowTable,
makeEmptyTable, makeEmptyTable,
} from "../lancedb/arrow"; } from "../lancedb/arrow";
import { import { type EmbeddingFunction } from "../lancedb/embedding/embedding_function";
EmbeddingFunction,
FieldOptions,
FunctionOptions,
} from "../lancedb/embedding/embedding_function";
import { EmbeddingFunctionConfig } from "../lancedb/embedding/registry";
// biome-ignore lint/suspicious/noExplicitAny: skip // biome-ignore lint/suspicious/noExplicitAny: skip
function sampleRecords(): Array<Record<string, any>> { function sampleRecords(): Array<Record<string, any>> {
@@ -286,46 +280,23 @@ describe("The function makeArrowTable", function () {
}); });
}); });
class DummyEmbedding extends EmbeddingFunction<string> { class DummyEmbedding implements EmbeddingFunction<string> {
toJSON(): Partial<FunctionOptions> { public readonly sourceColumn = "string";
return {}; public readonly embeddingDimension = 2;
} public readonly embeddingDataType = new Float16();
async computeSourceEmbeddings(data: string[]): Promise<number[][]> { async embed(data: string[]): Promise<number[][]> {
return data.map(() => [0.0, 0.0]);
}
ndims(): number {
return 2;
}
embeddingDataType() {
return new Float16();
}
}
class DummyEmbeddingWithNoDimension extends EmbeddingFunction<string> {
toJSON(): Partial<FunctionOptions> {
return {};
}
embeddingDataType(): Float {
return new Float16();
}
async computeSourceEmbeddings(data: string[]): Promise<number[][]> {
return data.map(() => [0.0, 0.0]); return data.map(() => [0.0, 0.0]);
} }
} }
const dummyEmbeddingConfig: EmbeddingFunctionConfig = {
sourceColumn: "string",
function: new DummyEmbedding(),
};
const dummyEmbeddingConfigWithNoDimension: EmbeddingFunctionConfig = { class DummyEmbeddingWithNoDimension implements EmbeddingFunction<string> {
sourceColumn: "string", public readonly sourceColumn = "string";
function: new DummyEmbeddingWithNoDimension(),
}; async embed(data: string[]): Promise<number[][]> {
return data.map(() => [0.0, 0.0]);
}
}
describe("convertToTable", function () { describe("convertToTable", function () {
it("will infer data types correctly", async function () { it("will infer data types correctly", async function () {
@@ -360,7 +331,7 @@ describe("convertToTable", function () {
it("will apply embeddings", async function () { it("will apply embeddings", async function () {
const records = sampleRecords(); const records = sampleRecords();
const table = await convertToTable(records, dummyEmbeddingConfig); const table = await convertToTable(records, new DummyEmbedding());
expect(DataType.isFixedSizeList(table.getChild("vector")?.type)).toBe(true); expect(DataType.isFixedSizeList(table.getChild("vector")?.type)).toBe(true);
expect(table.getChild("vector")?.type.children[0].type.toString()).toEqual( expect(table.getChild("vector")?.type.children[0].type.toString()).toEqual(
new Float16().toString(), new Float16().toString(),
@@ -369,7 +340,7 @@ describe("convertToTable", function () {
it("will fail if missing the embedding source column", async function () { it("will fail if missing the embedding source column", async function () {
await expect( await expect(
convertToTable([{ id: 1 }], dummyEmbeddingConfig), convertToTable([{ id: 1 }], new DummyEmbedding()),
).rejects.toThrow("'string' was not present"); ).rejects.toThrow("'string' was not present");
}); });
@@ -380,7 +351,7 @@ describe("convertToTable", function () {
const table = makeEmptyTable(schema); const table = makeEmptyTable(schema);
// If the embedding specifies the dimension we are fine // If the embedding specifies the dimension we are fine
await fromTableToBuffer(table, dummyEmbeddingConfig); await fromTableToBuffer(table, new DummyEmbedding());
// We can also supply a schema and should be ok // We can also supply a schema and should be ok
const schemaWithEmbedding = new Schema([ const schemaWithEmbedding = new Schema([
@@ -393,13 +364,13 @@ describe("convertToTable", function () {
]); ]);
await fromTableToBuffer( await fromTableToBuffer(
table, table,
dummyEmbeddingConfigWithNoDimension, new DummyEmbeddingWithNoDimension(),
schemaWithEmbedding, schemaWithEmbedding,
); );
// Otherwise we will get an error // Otherwise we will get an error
await expect( await expect(
fromTableToBuffer(table, dummyEmbeddingConfigWithNoDimension), fromTableToBuffer(table, new DummyEmbeddingWithNoDimension()),
).rejects.toThrow("does not specify `embeddingDimension`"); ).rejects.toThrow("does not specify `embeddingDimension`");
}); });
@@ -412,7 +383,7 @@ describe("convertToTable", function () {
false, false,
), ),
]); ]);
const table = await convertToTable([], dummyEmbeddingConfig, { schema }); const table = await convertToTable([], new DummyEmbedding(), { schema });
expect(DataType.isFixedSizeList(table.getChild("vector")?.type)).toBe(true); expect(DataType.isFixedSizeList(table.getChild("vector")?.type)).toBe(true);
expect(table.getChild("vector")?.type.children[0].type.toString()).toEqual( expect(table.getChild("vector")?.type.children[0].type.toString()).toEqual(
new Float16().toString(), new Float16().toString(),
@@ -422,17 +393,16 @@ describe("convertToTable", function () {
it("will complain if embeddings present but schema missing embedding column", async function () { it("will complain if embeddings present but schema missing embedding column", async function () {
const schema = new Schema([new Field("string", new Utf8(), false)]); const schema = new Schema([new Field("string", new Utf8(), false)]);
await expect( await expect(
convertToTable([], dummyEmbeddingConfig, { schema }), convertToTable([], new DummyEmbedding(), { schema }),
).rejects.toThrow("column vector was missing"); ).rejects.toThrow("column vector was missing");
}); });
it("will provide a nice error if run twice", async function () { it("will provide a nice error if run twice", async function () {
const records = sampleRecords(); const records = sampleRecords();
const table = await convertToTable(records, dummyEmbeddingConfig); const table = await convertToTable(records, new DummyEmbedding());
// fromTableToBuffer will try and apply the embeddings again // fromTableToBuffer will try and apply the embeddings again
await expect( await expect(
fromTableToBuffer(table, dummyEmbeddingConfig), fromTableToBuffer(table, new DummyEmbedding()),
).rejects.toThrow("already existed"); ).rejects.toThrow("already existed");
}); });
}); });

View File

@@ -12,9 +12,9 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
import { Field, Float64, Schema } from "apache-arrow";
import * as tmp from "tmp"; import * as tmp from "tmp";
import { Connection, Table, connect } from "../lancedb";
import { Connection, connect } from "../lancedb";
describe("when connecting", () => { describe("when connecting", () => {
let tmpDir: tmp.DirResult; let tmpDir: tmp.DirResult;
@@ -57,18 +57,6 @@ describe("given a connection", () => {
expect(db.isOpen()).toBe(false); expect(db.isOpen()).toBe(false);
await expect(db.tableNames()).rejects.toThrow("Connection is closed"); await expect(db.tableNames()).rejects.toThrow("Connection is closed");
}); });
it("should be able to create a table from an object arg `createTable(options)`, or args `createTable(name, data, options)`", async () => {
let tbl = await db.createTable("test", [{ id: 1 }, { id: 2 }]);
await expect(tbl.countRows()).resolves.toBe(2);
tbl = await db.createTable({
name: "test",
data: [{ id: 3 }],
mode: "overwrite",
});
await expect(tbl.countRows()).resolves.toBe(1);
});
it("should fail if creating table twice, unless overwrite is true", async () => { it("should fail if creating table twice, unless overwrite is true", async () => {
let tbl = await db.createTable("test", [{ id: 1 }, { id: 2 }]); let tbl = await db.createTable("test", [{ id: 1 }, { id: 2 }]);
@@ -99,39 +87,4 @@ describe("given a connection", () => {
tables = await db.tableNames({ startAfter: "a" }); tables = await db.tableNames({ startAfter: "a" });
expect(tables).toEqual(["b", "c"]); expect(tables).toEqual(["b", "c"]);
}); });
it("should create tables in v2 mode", async () => {
const db = await connect(tmpDir.name);
const data = [...Array(10000).keys()].map((i) => ({ id: i }));
// Create in v1 mode
let table = await db.createTable("test", data);
const isV2 = async (table: Table) => {
const data = await table.query().toArrow({ maxBatchLength: 100000 });
console.log(data.batches.length);
return data.batches.length < 5;
};
await expect(isV2(table)).resolves.toBe(false);
// Create in v2 mode
table = await db.createTable("test_v2", data, { useLegacyFormat: false });
await expect(isV2(table)).resolves.toBe(true);
await table.add(data);
await expect(isV2(table)).resolves.toBe(true);
// Create empty in v2 mode
const schema = new Schema([new Field("id", new Float64(), true)]);
table = await db.createEmptyTable("test_v2_empty", schema, {
useLegacyFormat: false,
});
await table.add(data);
await expect(isV2(table)).resolves.toBe(true);
});
}); });

View File

@@ -1,314 +0,0 @@
// Copyright 2024 Lance Developers.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import * as tmp from "tmp";
import { connect } from "../lancedb";
import {
Field,
FixedSizeList,
Float,
Float16,
Float32,
Float64,
Schema,
Utf8,
} from "../lancedb/arrow";
import { EmbeddingFunction, LanceSchema } from "../lancedb/embedding";
import { getRegistry, register } from "../lancedb/embedding/registry";
describe("embedding functions", () => {
let tmpDir: tmp.DirResult;
beforeEach(() => {
tmpDir = tmp.dirSync({ unsafeCleanup: true });
});
afterEach(() => {
tmpDir.removeCallback();
getRegistry().reset();
});
it("should be able to create a table with an embedding function", async () => {
class MockEmbeddingFunction extends EmbeddingFunction<string> {
toJSON(): object {
return {};
}
ndims() {
return 3;
}
embeddingDataType(): Float {
return new Float32();
}
async computeQueryEmbeddings(_data: string) {
return [1, 2, 3];
}
async computeSourceEmbeddings(data: string[]) {
return Array.from({ length: data.length }).fill([
1, 2, 3,
]) as number[][];
}
}
const func = new MockEmbeddingFunction();
const db = await connect(tmpDir.name);
const table = await db.createTable(
"test",
[
{ id: 1, text: "hello" },
{ id: 2, text: "world" },
],
{
embeddingFunction: {
function: func,
sourceColumn: "text",
},
},
);
// biome-ignore lint/suspicious/noExplicitAny: test
const arr = (await table.query().toArray()) as any;
expect(arr[0].vector).toBeDefined();
// we round trip through JSON to make sure the vector properly gets converted to an array
// otherwise it'll be a TypedArray or Vector
const vector0 = JSON.parse(JSON.stringify(arr[0].vector));
expect(vector0).toEqual([1, 2, 3]);
});
it("should be able to create an empty table with an embedding function", async () => {
@register()
class MockEmbeddingFunction extends EmbeddingFunction<string> {
toJSON(): object {
return {};
}
ndims() {
return 3;
}
embeddingDataType(): Float {
return new Float32();
}
async computeQueryEmbeddings(_data: string) {
return [1, 2, 3];
}
async computeSourceEmbeddings(data: string[]) {
return Array.from({ length: data.length }).fill([
1, 2, 3,
]) as number[][];
}
}
const schema = new Schema([
new Field("text", new Utf8(), true),
new Field(
"vector",
new FixedSizeList(3, new Field("item", new Float32(), true)),
true,
),
]);
const func = new MockEmbeddingFunction();
const db = await connect(tmpDir.name);
const table = await db.createEmptyTable("test", schema, {
embeddingFunction: {
function: func,
sourceColumn: "text",
},
});
const outSchema = await table.schema();
expect(outSchema.metadata.get("embedding_functions")).toBeDefined();
await table.add([{ text: "hello world" }]);
// biome-ignore lint/suspicious/noExplicitAny: test
const arr = (await table.query().toArray()) as any;
expect(arr[0].vector).toBeDefined();
// we round trip through JSON to make sure the vector properly gets converted to an array
// otherwise it'll be a TypedArray or Vector
const vector0 = JSON.parse(JSON.stringify(arr[0].vector));
expect(vector0).toEqual([1, 2, 3]);
});
it("should error when appending to a table with an unregistered embedding function", async () => {
@register("mock")
class MockEmbeddingFunction extends EmbeddingFunction<string> {
toJSON(): object {
return {};
}
ndims() {
return 3;
}
embeddingDataType(): Float {
return new Float32();
}
async computeQueryEmbeddings(_data: string) {
return [1, 2, 3];
}
async computeSourceEmbeddings(data: string[]) {
return Array.from({ length: data.length }).fill([
1, 2, 3,
]) as number[][];
}
}
const func = getRegistry().get<MockEmbeddingFunction>("mock")!.create();
const schema = LanceSchema({
id: new Float64(),
text: func.sourceField(new Utf8()),
vector: func.vectorField(),
});
const db = await connect(tmpDir.name);
await db.createTable(
"test",
[
{ id: 1, text: "hello" },
{ id: 2, text: "world" },
],
{
schema,
},
);
getRegistry().reset();
const db2 = await connect(tmpDir.name);
const tbl = await db2.openTable("test");
expect(tbl.add([{ id: 3, text: "hello" }])).rejects.toThrow(
`Function "mock" not found in registry`,
);
});
test.each([new Float16(), new Float32(), new Float64()])(
"should be able to provide manual embeddings with multiple float datatype",
async (floatType) => {
class MockEmbeddingFunction extends EmbeddingFunction<string> {
toJSON(): object {
return {};
}
ndims() {
return 3;
}
embeddingDataType(): Float {
return floatType;
}
async computeQueryEmbeddings(_data: string) {
return [1, 2, 3];
}
async computeSourceEmbeddings(data: string[]) {
return Array.from({ length: data.length }).fill([
1, 2, 3,
]) as number[][];
}
}
const data = [{ text: "hello" }, { text: "hello world" }];
const schema = new Schema([
new Field("vector", new FixedSizeList(3, new Field("item", floatType))),
new Field("text", new Utf8()),
]);
const func = new MockEmbeddingFunction();
const name = "test";
const db = await connect(tmpDir.name);
const table = await db.createTable(name, data, {
schema,
embeddingFunction: {
sourceColumn: "text",
function: func,
},
});
const res = await table.query().toArray();
expect([...res[0].vector]).toEqual([1, 2, 3]);
},
);
test.each([new Float16(), new Float32(), new Float64()])(
"should be able to provide auto embeddings with multiple float datatypes",
async (floatType) => {
@register("test1")
class MockEmbeddingFunctionWithoutNDims extends EmbeddingFunction<string> {
toJSON(): object {
return {};
}
embeddingDataType(): Float {
return floatType;
}
async computeQueryEmbeddings(_data: string) {
return [1, 2, 3];
}
async computeSourceEmbeddings(data: string[]) {
return Array.from({ length: data.length }).fill([
1, 2, 3,
]) as number[][];
}
}
@register("test")
class MockEmbeddingFunction extends EmbeddingFunction<string> {
toJSON(): object {
return {};
}
ndims() {
return 3;
}
embeddingDataType(): Float {
return floatType;
}
async computeQueryEmbeddings(_data: string) {
return [1, 2, 3];
}
async computeSourceEmbeddings(data: string[]) {
return Array.from({ length: data.length }).fill([
1, 2, 3,
]) as number[][];
}
}
const func = getRegistry().get<MockEmbeddingFunction>("test")!.create();
const func2 = getRegistry()
.get<MockEmbeddingFunctionWithoutNDims>("test1")!
.create();
const schema = LanceSchema({
text: func.sourceField(new Utf8()),
vector: func.vectorField(floatType),
});
const schema2 = LanceSchema({
text: func2.sourceField(new Utf8()),
vector: func2.vectorField({ datatype: floatType, dims: 3 }),
});
const schema3 = LanceSchema({
text: func2.sourceField(new Utf8()),
vector: func.vectorField({
datatype: new FixedSizeList(3, new Field("item", floatType, true)),
dims: 3,
}),
});
const expectedSchema = new Schema([
new Field("text", new Utf8(), true),
new Field(
"vector",
new FixedSizeList(3, new Field("item", floatType, true)),
true,
),
]);
const stringSchema = JSON.stringify(schema, null, 2);
const stringSchema2 = JSON.stringify(schema2, null, 2);
const stringSchema3 = JSON.stringify(schema3, null, 2);
const stringExpectedSchema = JSON.stringify(expectedSchema, null, 2);
expect(stringSchema).toEqual(stringExpectedSchema);
expect(stringSchema2).toEqual(stringExpectedSchema);
expect(stringSchema3).toEqual(stringExpectedSchema);
},
);
});

View File

@@ -1,170 +0,0 @@
// Copyright 2024 Lance Developers.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import * as arrow from "apache-arrow";
import * as arrowOld from "apache-arrow-old";
import * as tmp from "tmp";
import { connect } from "../lancedb";
import { EmbeddingFunction, LanceSchema } from "../lancedb/embedding";
import { getRegistry, register } from "../lancedb/embedding/registry";
describe.each([arrow, arrowOld])("LanceSchema", (arrow) => {
test("should preserve input order", async () => {
const schema = LanceSchema({
id: new arrow.Int32(),
text: new arrow.Utf8(),
vector: new arrow.Float32(),
});
expect(schema.fields.map((x) => x.name)).toEqual(["id", "text", "vector"]);
});
});
describe("Registry", () => {
let tmpDir: tmp.DirResult;
beforeEach(() => {
tmpDir = tmp.dirSync({ unsafeCleanup: true });
});
afterEach(() => {
tmpDir.removeCallback();
getRegistry().reset();
});
it("should register a new item to the registry", async () => {
@register("mock-embedding")
class MockEmbeddingFunction extends EmbeddingFunction<string> {
toJSON(): object {
return {
someText: "hello",
};
}
constructor() {
super();
}
ndims() {
return 3;
}
embeddingDataType(): arrow.Float {
return new arrow.Float32();
}
async computeSourceEmbeddings(data: string[]) {
return data.map(() => [1, 2, 3]);
}
}
const func = getRegistry()
.get<MockEmbeddingFunction>("mock-embedding")!
.create();
const schema = LanceSchema({
id: new arrow.Int32(),
text: func.sourceField(new arrow.Utf8()),
vector: func.vectorField(),
});
const db = await connect(tmpDir.name);
const table = await db.createTable(
"test",
[
{ id: 1, text: "hello" },
{ id: 2, text: "world" },
],
{ schema },
);
const expected = [
[1, 2, 3],
[1, 2, 3],
];
const actual = await table.query().toArrow();
const vectors = actual
.getChild("vector")
?.toArray()
.map((x: unknown) => {
if (x instanceof arrow.Vector) {
return [...x];
} else {
return x;
}
});
expect(vectors).toEqual(expected);
});
test("should error if registering with the same name", async () => {
class MockEmbeddingFunction extends EmbeddingFunction<string> {
toJSON(): object {
return {
someText: "hello",
};
}
constructor() {
super();
}
ndims() {
return 3;
}
embeddingDataType(): arrow.Float {
return new arrow.Float32();
}
async computeSourceEmbeddings(data: string[]) {
return data.map(() => [1, 2, 3]);
}
}
register("mock-embedding")(MockEmbeddingFunction);
expect(() => register("mock-embedding")(MockEmbeddingFunction)).toThrow(
'Embedding function with alias "mock-embedding" already exists',
);
});
test("schema should contain correct metadata", async () => {
class MockEmbeddingFunction extends EmbeddingFunction<string> {
toJSON(): object {
return {
someText: "hello",
};
}
constructor() {
super();
}
ndims() {
return 3;
}
embeddingDataType(): arrow.Float {
return new arrow.Float32();
}
async computeSourceEmbeddings(data: string[]) {
return data.map(() => [1, 2, 3]);
}
}
const func = new MockEmbeddingFunction();
const schema = LanceSchema({
id: new arrow.Int32(),
text: func.sourceField(new arrow.Utf8()),
vector: func.vectorField(),
});
const expectedMetadata = new Map<string, string>([
[
"embedding_functions",
JSON.stringify([
{
sourceColumn: "text",
vectorColumn: "vector",
name: "MockEmbeddingFunction",
model: { someText: "hello" },
},
]),
],
]);
expect(schema.metadata).toEqual(expectedMetadata);
});
});

View File

@@ -14,11 +14,6 @@
/* eslint-disable @typescript-eslint/naming-convention */ /* eslint-disable @typescript-eslint/naming-convention */
import {
CreateTableCommand,
DeleteTableCommand,
DynamoDBClient,
} from "@aws-sdk/client-dynamodb";
import { import {
CreateKeyCommand, CreateKeyCommand,
KMSClient, KMSClient,
@@ -43,7 +38,6 @@ const CONFIG = {
awsAccessKeyId: "ACCESSKEY", awsAccessKeyId: "ACCESSKEY",
awsSecretAccessKey: "SECRETKEY", awsSecretAccessKey: "SECRETKEY",
awsEndpoint: "http://127.0.0.1:4566", awsEndpoint: "http://127.0.0.1:4566",
dynamodbEndpoint: "http://127.0.0.1:4566",
awsRegion: "us-east-1", awsRegion: "us-east-1",
}; };
@@ -72,6 +66,7 @@ class S3Bucket {
} catch { } catch {
// It's fine if the bucket doesn't exist // It's fine if the bucket doesn't exist
} }
// biome-ignore lint/style/useNamingConvention: we dont control s3's api
await client.send(new CreateBucketCommand({ Bucket: name })); await client.send(new CreateBucketCommand({ Bucket: name }));
return new S3Bucket(name); return new S3Bucket(name);
} }
@@ -84,27 +79,32 @@ class S3Bucket {
static async deleteBucket(client: S3Client, name: string) { static async deleteBucket(client: S3Client, name: string) {
// Must delete all objects before we can delete the bucket // Must delete all objects before we can delete the bucket
const objects = await client.send( const objects = await client.send(
// biome-ignore lint/style/useNamingConvention: we dont control s3's api
new ListObjectsV2Command({ Bucket: name }), new ListObjectsV2Command({ Bucket: name }),
); );
if (objects.Contents) { if (objects.Contents) {
for (const object of objects.Contents) { for (const object of objects.Contents) {
await client.send( await client.send(
// biome-ignore lint/style/useNamingConvention: we dont control s3's api
new DeleteObjectCommand({ Bucket: name, Key: object.Key }), new DeleteObjectCommand({ Bucket: name, Key: object.Key }),
); );
} }
} }
// biome-ignore lint/style/useNamingConvention: we dont control s3's api
await client.send(new DeleteBucketCommand({ Bucket: name })); await client.send(new DeleteBucketCommand({ Bucket: name }));
} }
public async assertAllEncrypted(path: string, keyId: string) { public async assertAllEncrypted(path: string, keyId: string) {
const client = S3Bucket.s3Client(); const client = S3Bucket.s3Client();
const objects = await client.send( const objects = await client.send(
// biome-ignore lint/style/useNamingConvention: we dont control s3's api
new ListObjectsV2Command({ Bucket: this.name, Prefix: path }), new ListObjectsV2Command({ Bucket: this.name, Prefix: path }),
); );
if (objects.Contents) { if (objects.Contents) {
for (const object of objects.Contents) { for (const object of objects.Contents) {
const metadata = await client.send( const metadata = await client.send(
// biome-ignore lint/style/useNamingConvention: we dont control s3's api
new HeadObjectCommand({ Bucket: this.name, Key: object.Key }), new HeadObjectCommand({ Bucket: this.name, Key: object.Key }),
); );
expect(metadata.ServerSideEncryption).toBe("aws:kms"); expect(metadata.ServerSideEncryption).toBe("aws:kms");
@@ -143,6 +143,7 @@ class KmsKey {
public async delete() { public async delete() {
const client = KmsKey.kmsClient(); const client = KmsKey.kmsClient();
// biome-ignore lint/style/useNamingConvention: we dont control s3's api
await client.send(new ScheduleKeyDeletionCommand({ KeyId: this.keyId })); await client.send(new ScheduleKeyDeletionCommand({ KeyId: this.keyId }));
} }
} }
@@ -223,91 +224,3 @@ maybeDescribe("storage_options", () => {
await bucket.assertAllEncrypted("test/table2.lance", kmsKey.keyId); await bucket.assertAllEncrypted("test/table2.lance", kmsKey.keyId);
}); });
}); });
class DynamoDBCommitTable {
name: string;
constructor(name: string) {
this.name = name;
}
static dynamoClient() {
return new DynamoDBClient({
region: CONFIG.awsRegion,
credentials: {
accessKeyId: CONFIG.awsAccessKeyId,
secretAccessKey: CONFIG.awsSecretAccessKey,
},
endpoint: CONFIG.awsEndpoint,
});
}
public static async create(name: string): Promise<DynamoDBCommitTable> {
const client = DynamoDBCommitTable.dynamoClient();
const command = new CreateTableCommand({
TableName: name,
AttributeDefinitions: [
{
AttributeName: "base_uri",
AttributeType: "S",
},
{
AttributeName: "version",
AttributeType: "N",
},
],
KeySchema: [
{ AttributeName: "base_uri", KeyType: "HASH" },
{ AttributeName: "version", KeyType: "RANGE" },
],
ProvisionedThroughput: {
ReadCapacityUnits: 1,
WriteCapacityUnits: 1,
},
});
await client.send(command);
return new DynamoDBCommitTable(name);
}
public async delete() {
const client = DynamoDBCommitTable.dynamoClient();
await client.send(new DeleteTableCommand({ TableName: this.name }));
}
}
maybeDescribe("DynamoDB Lock", () => {
let bucket: S3Bucket;
let commitTable: DynamoDBCommitTable;
beforeAll(async () => {
bucket = await S3Bucket.create("lancedb2");
commitTable = await DynamoDBCommitTable.create("commitTable");
});
afterAll(async () => {
await commitTable.delete();
await bucket.delete();
});
it("can be used to configure a DynamoDB table for commit log", async () => {
const uri = `s3+ddb://${bucket.name}/test?ddbTableName=${commitTable.name}`;
const db = await connect(uri, {
storageOptions: CONFIG,
readConsistencyInterval: 0,
});
const table = await db.createTable("test", [{ a: 1, b: 2 }]);
// 5 concurrent appends
const futs = Array.from({ length: 5 }, async () => {
// Open a table so each append has a separate table reference. Otherwise
// they will share the same table reference and the internal ReadWriteLock
// will prevent any real concurrency.
const table = await db.openTable("test");
await table.add([{ a: 2, b: 3 }]);
});
await Promise.all(futs);
const rowCount = await table.countRows();
expect(rowCount).toBe(6);
});
});

View File

@@ -16,12 +16,7 @@ import * as fs from "fs";
import * as path from "path"; import * as path from "path";
import * as tmp from "tmp"; import * as tmp from "tmp";
import * as arrow from "apache-arrow";
import * as arrowOld from "apache-arrow-old";
import { Table, connect } from "../lancedb";
import { import {
Table as ArrowTable,
Field, Field,
FixedSizeList, FixedSizeList,
Float32, Float32,
@@ -29,22 +24,15 @@ import {
Int32, Int32,
Int64, Int64,
Schema, Schema,
makeArrowTable, } from "apache-arrow";
} from "../lancedb/arrow"; import { Table, connect } from "../lancedb";
import { EmbeddingFunction, LanceSchema, register } from "../lancedb/embedding"; import { makeArrowTable } from "../lancedb/arrow";
import { Index } from "../lancedb/indices"; import { Index } from "../lancedb/indices";
// biome-ignore lint/suspicious/noExplicitAny: <explanation> describe("Given a table", () => {
describe.each([arrow, arrowOld])("Given a table", (arrow: any) => {
let tmpDir: tmp.DirResult; let tmpDir: tmp.DirResult;
let table: Table; let table: Table;
const schema = new Schema([new Field("id", new Float64(), true)]);
const schema:
| import("apache-arrow").Schema
| import("apache-arrow-old").Schema = new arrow.Schema([
new arrow.Field("id", new arrow.Float64(), true),
]);
beforeEach(async () => { beforeEach(async () => {
tmpDir = tmp.dirSync({ unsafeCleanup: true }); tmpDir = tmp.dirSync({ unsafeCleanup: true });
const conn = await connect(tmpDir.name); const conn = await connect(tmpDir.name);
@@ -95,177 +83,6 @@ describe.each([arrow, arrowOld])("Given a table", (arrow: any) => {
expect(await table.countRows("id == 7")).toBe(1); expect(await table.countRows("id == 7")).toBe(1);
expect(await table.countRows("id == 10")).toBe(1); expect(await table.countRows("id == 10")).toBe(1);
}); });
// https://github.com/lancedb/lancedb/issues/1293
test.each([new arrow.Float16(), new arrow.Float32(), new arrow.Float64()])(
"can create empty table with non default float type: %s",
async (floatType) => {
const db = await connect(tmpDir.name);
const data = [
{ text: "hello", vector: Array(512).fill(1.0) },
{ text: "hello world", vector: Array(512).fill(1.0) },
];
const f64Schema = new arrow.Schema([
new arrow.Field("text", new arrow.Utf8(), true),
new arrow.Field(
"vector",
new arrow.FixedSizeList(512, new arrow.Field("item", floatType)),
true,
),
]);
const f64Table = await db.createEmptyTable("f64", f64Schema, {
mode: "overwrite",
});
try {
await f64Table.add(data);
const res = await f64Table.query().toArray();
expect(res.length).toBe(2);
} catch (e) {
expect(e).toBeUndefined();
}
},
);
it("should return the table as an instance of an arrow table", async () => {
const arrowTbl = await table.toArrow();
expect(arrowTbl).toBeInstanceOf(ArrowTable);
});
});
describe("merge insert", () => {
let tmpDir: tmp.DirResult;
let table: Table;
beforeEach(async () => {
tmpDir = tmp.dirSync({ unsafeCleanup: true });
const conn = await connect(tmpDir.name);
table = await conn.createTable("some_table", [
{ a: 1, b: "a" },
{ a: 2, b: "b" },
{ a: 3, b: "c" },
]);
});
afterEach(() => tmpDir.removeCallback());
test("upsert", async () => {
const newData = [
{ a: 2, b: "x" },
{ a: 3, b: "y" },
{ a: 4, b: "z" },
];
await table
.mergeInsert("a")
.whenMatchedUpdateAll()
.whenNotMatchedInsertAll()
.execute(newData);
const expected = [
{ a: 1, b: "a" },
{ a: 2, b: "x" },
{ a: 3, b: "y" },
{ a: 4, b: "z" },
];
expect(
JSON.parse(JSON.stringify((await table.toArrow()).toArray())),
).toEqual(expected);
});
test("conditional update", async () => {
const newData = [
{ a: 2, b: "x" },
{ a: 3, b: "y" },
{ a: 4, b: "z" },
];
await table
.mergeInsert("a")
.whenMatchedUpdateAll({ where: "target.b = 'b'" })
.execute(newData);
const expected = [
{ a: 1, b: "a" },
{ a: 2, b: "x" },
{ a: 3, b: "c" },
];
// round trip to arrow and back to json to avoid comparing arrow objects to js object
// biome-ignore lint/suspicious/noExplicitAny: test
let res: any[] = JSON.parse(
JSON.stringify((await table.toArrow()).toArray()),
);
res = res.sort((a, b) => a.a - b.a);
expect(res).toEqual(expected);
});
test("insert if not exists", async () => {
const newData = [
{ a: 2, b: "x" },
{ a: 3, b: "y" },
{ a: 4, b: "z" },
];
await table.mergeInsert("a").whenNotMatchedInsertAll().execute(newData);
const expected = [
{ a: 1, b: "a" },
{ a: 2, b: "b" },
{ a: 3, b: "c" },
{ a: 4, b: "z" },
];
// biome-ignore lint/suspicious/noExplicitAny: <explanation>
let res: any[] = JSON.parse(
JSON.stringify((await table.toArrow()).toArray()),
);
res = res.sort((a, b) => a.a - b.a);
expect(res).toEqual(expected);
});
test("replace range", async () => {
const newData = [
{ a: 2, b: "x" },
{ a: 4, b: "z" },
];
await table
.mergeInsert("a")
.whenMatchedUpdateAll()
.whenNotMatchedInsertAll()
.whenNotMatchedBySourceDelete({ where: "a > 2" })
.execute(newData);
const expected = [
{ a: 1, b: "a" },
{ a: 2, b: "x" },
{ a: 4, b: "z" },
];
// biome-ignore lint/suspicious/noExplicitAny: <explanation>
let res: any[] = JSON.parse(
JSON.stringify((await table.toArrow()).toArray()),
);
res = res.sort((a, b) => a.a - b.a);
expect(res).toEqual(expected);
});
test("replace range no condition", async () => {
const newData = [
{ a: 2, b: "x" },
{ a: 4, b: "z" },
];
await table
.mergeInsert("a")
.whenMatchedUpdateAll()
.whenNotMatchedInsertAll()
.whenNotMatchedBySourceDelete()
.execute(newData);
const expected = [
{ a: 2, b: "x" },
{ a: 4, b: "z" },
];
// biome-ignore lint/suspicious/noExplicitAny: test
let res: any[] = JSON.parse(
JSON.stringify((await table.toArrow()).toArray()),
);
res = res.sort((a, b) => a.a - b.a);
expect(res).toEqual(expected);
});
}); });
describe("When creating an index", () => { describe("When creating an index", () => {
@@ -307,7 +124,6 @@ describe("When creating an index", () => {
const indices = await tbl.listIndices(); const indices = await tbl.listIndices();
expect(indices.length).toBe(1); expect(indices.length).toBe(1);
expect(indices[0]).toEqual({ expect(indices[0]).toEqual({
name: "vec_idx",
indexType: "IvfPq", indexType: "IvfPq",
columns: ["vec"], columns: ["vec"],
}); });
@@ -317,7 +133,7 @@ describe("When creating an index", () => {
.query() .query()
.limit(2) .limit(2)
.nearestTo(queryVec) .nearestTo(queryVec)
.distanceType("dot") .distanceType("DoT")
.toArrow(); .toArrow();
expect(rst.numRows).toBe(2); expect(rst.numRows).toBe(2);
@@ -364,24 +180,6 @@ describe("When creating an index", () => {
for await (const r of tbl.query().where("id > 1").select(["id"])) { for await (const r of tbl.query().where("id > 1").select(["id"])) {
expect(r.numRows).toBe(298); expect(r.numRows).toBe(298);
} }
// should also work with 'filter' alias
for await (const r of tbl.query().filter("id > 1").select(["id"])) {
expect(r.numRows).toBe(298);
}
});
test("should be able to get index stats", async () => {
await tbl.createIndex("id");
const stats = await tbl.indexStats("id_idx");
expect(stats).toBeDefined();
expect(stats?.numIndexedRows).toEqual(300);
expect(stats?.numUnindexedRows).toEqual(0);
});
test("when getting stats on non-existent index", async () => {
const stats = await tbl.indexStats("some non-existent index");
expect(stats).toBeUndefined();
}); });
// TODO: Move this test to the query API test (making sure we can reject queries // TODO: Move this test to the query API test (making sure we can reject queries
@@ -621,151 +419,3 @@ describe("when dealing with versioning", () => {
); );
}); });
}); });
describe("when optimizing a dataset", () => {
let tmpDir: tmp.DirResult;
let table: Table;
beforeEach(async () => {
tmpDir = tmp.dirSync({ unsafeCleanup: true });
const con = await connect(tmpDir.name);
table = await con.createTable("vectors", [{ id: 1 }]);
await table.add([{ id: 2 }]);
});
afterEach(() => {
tmpDir.removeCallback();
});
it("compacts files", async () => {
const stats = await table.optimize();
expect(stats.compaction.filesAdded).toBe(1);
expect(stats.compaction.filesRemoved).toBe(2);
expect(stats.compaction.fragmentsAdded).toBe(1);
expect(stats.compaction.fragmentsRemoved).toBe(2);
});
it("cleanups old versions", async () => {
const stats = await table.optimize({ cleanupOlderThan: new Date() });
expect(stats.prune.bytesRemoved).toBeGreaterThan(0);
expect(stats.prune.oldVersionsRemoved).toBe(3);
});
});
describe("table.search", () => {
let tmpDir: tmp.DirResult;
beforeEach(() => {
tmpDir = tmp.dirSync({ unsafeCleanup: true });
});
afterEach(() => tmpDir.removeCallback());
test("can search using a string", async () => {
@register()
class MockEmbeddingFunction extends EmbeddingFunction<string> {
toJSON(): object {
return {};
}
ndims() {
return 1;
}
embeddingDataType(): arrow.Float {
return new Float32();
}
// Hardcoded embeddings for the sake of testing
async computeQueryEmbeddings(_data: string) {
switch (_data) {
case "greetings":
return [0.1];
case "farewell":
return [0.2];
default:
return null as never;
}
}
// Hardcoded embeddings for the sake of testing
async computeSourceEmbeddings(data: string[]) {
return data.map((s) => {
switch (s) {
case "hello world":
return [0.1];
case "goodbye world":
return [0.2];
default:
return null as never;
}
});
}
}
const func = new MockEmbeddingFunction();
const schema = LanceSchema({
text: func.sourceField(new arrow.Utf8()),
vector: func.vectorField(),
});
const db = await connect(tmpDir.name);
const data = [{ text: "hello world" }, { text: "goodbye world" }];
const table = await db.createTable("test", data, { schema });
const results = await table.search("greetings").toArray();
expect(results[0].text).toBe(data[0].text);
const results2 = await table.search("farewell").toArray();
expect(results2[0].text).toBe(data[1].text);
});
test("rejects if no embedding function provided", async () => {
const db = await connect(tmpDir.name);
const data = [
{ text: "hello world", vector: [0.1, 0.2, 0.3] },
{ text: "goodbye world", vector: [0.4, 0.5, 0.6] },
];
const table = await db.createTable("test", data);
expect(table.search("hello").toArray()).rejects.toThrow(
"No embedding functions are defined in the table",
);
});
test.each([
[0.4, 0.5, 0.599], // number[]
Float32Array.of(0.4, 0.5, 0.599), // Float32Array
Float64Array.of(0.4, 0.5, 0.599), // Float64Array
])("can search using vectorlike datatypes", async (vectorlike) => {
const db = await connect(tmpDir.name);
const data = [
{ text: "hello world", vector: [0.1, 0.2, 0.3] },
{ text: "goodbye world", vector: [0.4, 0.5, 0.6] },
];
const table = await db.createTable("test", data);
// biome-ignore lint/suspicious/noExplicitAny: test
const results: any[] = await table.search(vectorlike).toArray();
expect(results.length).toBe(2);
expect(results[0].text).toBe(data[1].text);
});
});
describe("when calling explainPlan", () => {
let tmpDir: tmp.DirResult;
let table: Table;
let queryVec: number[];
beforeEach(async () => {
tmpDir = tmp.dirSync({ unsafeCleanup: true });
const con = await connect(tmpDir.name);
table = await con.createTable("vectors", [{ id: 1, vector: [0.1, 0.2] }]);
});
afterEach(() => {
tmpDir.removeCallback();
});
it("retrieves query plan", async () => {
queryVec = Array(2)
.fill(1)
.map(() => Math.random());
const plan = await table.query().nearestTo(queryVec).explainPlan(true);
expect(plan).toMatch("KNN");
});
});

View File

@@ -1,5 +1,5 @@
{ {
"$schema": "https://biomejs.dev/schemas/1.8.3/schema.json", "$schema": "https://biomejs.dev/schemas/1.7.3/schema.json",
"organizeImports": { "organizeImports": {
"enabled": true "enabled": true
}, },
@@ -48,7 +48,7 @@
"noUnsafeFinally": "error", "noUnsafeFinally": "error",
"noUnsafeOptionalChaining": "error", "noUnsafeOptionalChaining": "error",
"noUnusedLabels": "error", "noUnusedLabels": "error",
"noUnusedVariables": "warn", "noUnusedVariables": "error",
"useIsNan": "error", "useIsNan": "error",
"useValidForDirection": "error", "useValidForDirection": "error",
"useYield": "error" "useYield": "error"
@@ -77,7 +77,7 @@
"noDuplicateObjectKeys": "error", "noDuplicateObjectKeys": "error",
"noDuplicateParameters": "error", "noDuplicateParameters": "error",
"noEmptyBlockStatements": "error", "noEmptyBlockStatements": "error",
"noExplicitAny": "warn", "noExplicitAny": "error",
"noExtraNonNullAssertion": "error", "noExtraNonNullAssertion": "error",
"noFallthroughSwitchClause": "error", "noFallthroughSwitchClause": "error",
"noFunctionAssign": "error", "noFunctionAssign": "error",
@@ -101,23 +101,7 @@
}, },
"overrides": [ "overrides": [
{ {
"include": ["__test__/s3_integration.test.ts"], "include": ["**/*.ts", "**/*.tsx", "**/*.mts", "**/*.cts"],
"linter": {
"rules": {
"style": {
"useNamingConvention": "off"
}
}
}
},
{
"include": [
"**/*.ts",
"**/*.tsx",
"**/*.mts",
"**/*.cts",
"__test__/*.test.ts"
],
"linter": { "linter": {
"rules": { "rules": {
"correctness": { "correctness": {

View File

@@ -15,186 +15,29 @@
import { import {
Table as ArrowTable, Table as ArrowTable,
Binary, Binary,
BufferType,
DataType, DataType,
Field, Field,
FixedSizeBinary,
FixedSizeList, FixedSizeList,
Float, type Float,
Float32, Float32,
Int,
LargeBinary,
List, List,
Null,
RecordBatch, RecordBatch,
RecordBatchFileWriter, RecordBatchFileWriter,
RecordBatchStreamWriter, RecordBatchStreamWriter,
Schema, Schema,
Struct, Struct,
Utf8, Utf8,
Vector, type Vector,
makeBuilder, makeBuilder,
makeData, makeData,
type makeTable, type makeTable,
vectorFromArray, vectorFromArray,
} from "apache-arrow"; } from "apache-arrow";
import { Buffers } from "apache-arrow/data";
import { type EmbeddingFunction } from "./embedding/embedding_function"; import { type EmbeddingFunction } from "./embedding/embedding_function";
import { EmbeddingFunctionConfig, getRegistry } from "./embedding/registry"; import { sanitizeSchema } from "./sanitize";
import {
sanitizeField,
sanitizeSchema,
sanitizeTable,
sanitizeType,
} from "./sanitize";
export * from "apache-arrow";
export type SchemaLike =
| Schema
| {
fields: FieldLike[];
metadata: Map<string, string>;
get names(): unknown[];
};
export type FieldLike =
| Field
| {
type: string;
name: string;
nullable?: boolean;
metadata?: Map<string, string>;
};
export type DataLike =
// biome-ignore lint/suspicious/noExplicitAny: <explanation>
| import("apache-arrow").Data<Struct<any>>
| {
// biome-ignore lint/suspicious/noExplicitAny: <explanation>
type: any;
length: number;
offset: number;
stride: number;
nullable: boolean;
children: DataLike[];
get nullCount(): number;
// biome-ignore lint/suspicious/noExplicitAny: <explanation>
values: Buffers<any>[BufferType.DATA];
// biome-ignore lint/suspicious/noExplicitAny: <explanation>
typeIds: Buffers<any>[BufferType.TYPE];
// biome-ignore lint/suspicious/noExplicitAny: <explanation>
nullBitmap: Buffers<any>[BufferType.VALIDITY];
// biome-ignore lint/suspicious/noExplicitAny: <explanation>
valueOffsets: Buffers<any>[BufferType.OFFSET];
};
export type RecordBatchLike =
| RecordBatch
| {
schema: SchemaLike;
data: DataLike;
};
export type TableLike =
| ArrowTable
| { schema: SchemaLike; batches: RecordBatchLike[] };
export type IntoVector =
| Float32Array
| Float64Array
| number[]
| Promise<Float32Array | Float64Array | number[]>;
export function isArrowTable(value: object): value is TableLike {
if (value instanceof ArrowTable) return true;
return "schema" in value && "batches" in value;
}
export function isDataType(value: unknown): value is DataType {
return (
value instanceof DataType ||
DataType.isNull(value) ||
DataType.isInt(value) ||
DataType.isFloat(value) ||
DataType.isBinary(value) ||
DataType.isLargeBinary(value) ||
DataType.isUtf8(value) ||
DataType.isLargeUtf8(value) ||
DataType.isBool(value) ||
DataType.isDecimal(value) ||
DataType.isDate(value) ||
DataType.isTime(value) ||
DataType.isTimestamp(value) ||
DataType.isInterval(value) ||
DataType.isDuration(value) ||
DataType.isList(value) ||
DataType.isStruct(value) ||
DataType.isUnion(value) ||
DataType.isFixedSizeBinary(value) ||
DataType.isFixedSizeList(value) ||
DataType.isMap(value) ||
DataType.isDictionary(value)
);
}
export function isNull(value: unknown): value is Null {
return value instanceof Null || DataType.isNull(value);
}
export function isInt(value: unknown): value is Int {
return value instanceof Int || DataType.isInt(value);
}
export function isFloat(value: unknown): value is Float {
return value instanceof Float || DataType.isFloat(value);
}
export function isBinary(value: unknown): value is Binary {
return value instanceof Binary || DataType.isBinary(value);
}
export function isLargeBinary(value: unknown): value is LargeBinary {
return value instanceof LargeBinary || DataType.isLargeBinary(value);
}
export function isUtf8(value: unknown): value is Utf8 {
return value instanceof Utf8 || DataType.isUtf8(value);
}
export function isLargeUtf8(value: unknown): value is Utf8 {
return value instanceof Utf8 || DataType.isLargeUtf8(value);
}
export function isBool(value: unknown): value is Utf8 {
return value instanceof Utf8 || DataType.isBool(value);
}
export function isDecimal(value: unknown): value is Utf8 {
return value instanceof Utf8 || DataType.isDecimal(value);
}
export function isDate(value: unknown): value is Utf8 {
return value instanceof Utf8 || DataType.isDate(value);
}
export function isTime(value: unknown): value is Utf8 {
return value instanceof Utf8 || DataType.isTime(value);
}
export function isTimestamp(value: unknown): value is Utf8 {
return value instanceof Utf8 || DataType.isTimestamp(value);
}
export function isInterval(value: unknown): value is Utf8 {
return value instanceof Utf8 || DataType.isInterval(value);
}
export function isDuration(value: unknown): value is Utf8 {
return value instanceof Utf8 || DataType.isDuration(value);
}
export function isList(value: unknown): value is List {
return value instanceof List || DataType.isList(value);
}
export function isStruct(value: unknown): value is Struct {
return value instanceof Struct || DataType.isStruct(value);
}
export function isUnion(value: unknown): value is Struct {
return value instanceof Struct || DataType.isUnion(value);
}
export function isFixedSizeBinary(value: unknown): value is FixedSizeBinary {
return value instanceof FixedSizeBinary || DataType.isFixedSizeBinary(value);
}
export function isFixedSizeList(value: unknown): value is FixedSizeList {
return value instanceof FixedSizeList || DataType.isFixedSizeList(value);
}
/** Data type accepted by NodeJS SDK */ /** Data type accepted by NodeJS SDK */
export type Data = Record<string, unknown>[] | TableLike; export type Data = Record<string, unknown>[] | ArrowTable;
/* /*
* Options to control how a column should be converted to a vector array * Options to control how a column should be converted to a vector array
@@ -221,7 +64,7 @@ export class MakeArrowTableOptions {
* The schema must be specified if there are no records (e.g. to make * The schema must be specified if there are no records (e.g. to make
* an empty table) * an empty table)
*/ */
schema?: SchemaLike; schema?: Schema;
/* /*
* Mapping from vector column name to expected type * Mapping from vector column name to expected type
@@ -243,7 +86,6 @@ export class MakeArrowTableOptions {
vector: new VectorColumnOptions(), vector: new VectorColumnOptions(),
}; };
embeddings?: EmbeddingFunction<unknown>; embeddings?: EmbeddingFunction<unknown>;
embeddingFunction?: EmbeddingFunctionConfig;
/** /**
* If true then string columns will be encoded with dictionary encoding * If true then string columns will be encoded with dictionary encoding
@@ -356,7 +198,6 @@ export class MakeArrowTableOptions {
export function makeArrowTable( export function makeArrowTable(
data: Array<Record<string, unknown>>, data: Array<Record<string, unknown>>,
options?: Partial<MakeArrowTableOptions>, options?: Partial<MakeArrowTableOptions>,
metadata?: Map<string, string>,
): ArrowTable { ): ArrowTable {
if ( if (
data.length === 0 && data.length === 0 &&
@@ -368,11 +209,7 @@ export function makeArrowTable(
const opt = new MakeArrowTableOptions(options !== undefined ? options : {}); const opt = new MakeArrowTableOptions(options !== undefined ? options : {});
if (opt.schema !== undefined && opt.schema !== null) { if (opt.schema !== undefined && opt.schema !== null) {
opt.schema = sanitizeSchema(opt.schema); opt.schema = sanitizeSchema(opt.schema);
opt.schema = validateSchemaEmbeddings( opt.schema = validateSchemaEmbeddings(opt.schema, data, opt.embeddings);
opt.schema as Schema,
data,
options?.embeddingFunction,
);
} }
const columns: Record<string, Vector> = {}; const columns: Record<string, Vector> = {};
// TODO: sample dataset to find missing columns // TODO: sample dataset to find missing columns
@@ -453,41 +290,20 @@ export function makeArrowTable(
// `new ArrowTable(schema, batches)` which does not do any schema inference // `new ArrowTable(schema, batches)` which does not do any schema inference
const firstTable = new ArrowTable(columns); const firstTable = new ArrowTable(columns);
const batchesFixed = firstTable.batches.map( const batchesFixed = firstTable.batches.map(
(batch) => new RecordBatch(opt.schema as Schema, batch.data), // eslint-disable-next-line @typescript-eslint/no-non-null-assertion
(batch) => new RecordBatch(opt.schema!, batch.data),
); );
let schema: Schema; return new ArrowTable(opt.schema, batchesFixed);
if (metadata !== undefined) { } else {
let schemaMetadata = opt.schema.metadata; return new ArrowTable(columns);
if (schemaMetadata.size === 0) {
schemaMetadata = metadata;
} else {
for (const [key, entry] of schemaMetadata.entries()) {
schemaMetadata.set(key, entry);
}
}
schema = new Schema(opt.schema.fields as Field[], schemaMetadata);
} else {
schema = opt.schema as Schema;
}
return new ArrowTable(schema, batchesFixed);
} }
const tbl = new ArrowTable(columns);
if (metadata !== undefined) {
// biome-ignore lint/suspicious/noExplicitAny: <explanation>
(<any>tbl.schema).metadata = metadata;
}
return tbl;
} }
/** /**
* Create an empty Arrow table with the provided schema * Create an empty Arrow table with the provided schema
*/ */
export function makeEmptyTable( export function makeEmptyTable(schema: Schema): ArrowTable {
schema: SchemaLike, return makeArrowTable([], { schema });
metadata?: Map<string, string>,
): ArrowTable {
return makeArrowTable([], { schema }, metadata);
} }
/** /**
@@ -559,79 +375,19 @@ function makeVector(
} }
} }
/** Helper function to apply embeddings from metadata to an input table */
async function applyEmbeddingsFromMetadata(
table: ArrowTable,
schema: Schema,
): Promise<ArrowTable> {
const registry = getRegistry();
const functions = registry.parseFunctions(schema.metadata);
const columns = Object.fromEntries(
table.schema.fields.map((field) => [
field.name,
table.getChild(field.name)!,
]),
);
for (const functionEntry of functions.values()) {
const sourceColumn = columns[functionEntry.sourceColumn];
const destColumn = functionEntry.vectorColumn ?? "vector";
if (sourceColumn === undefined) {
throw new Error(
`Cannot apply embedding function because the source column '${functionEntry.sourceColumn}' was not present in the data`,
);
}
if (columns[destColumn] !== undefined) {
throw new Error(
`Attempt to apply embeddings to table failed because column ${destColumn} already existed`,
);
}
if (table.batches.length > 1) {
throw new Error(
"Internal error: `makeArrowTable` unexpectedly created a table with more than one batch",
);
}
const values = sourceColumn.toArray();
const vectors =
await functionEntry.function.computeSourceEmbeddings(values);
if (vectors.length !== values.length) {
throw new Error(
"Embedding function did not return an embedding for each input element",
);
}
let destType: DataType;
const dtype = schema.fields.find((f) => f.name === destColumn)!.type;
if (isFixedSizeList(dtype)) {
destType = sanitizeType(dtype);
} else {
throw new Error(
"Expected FixedSizeList as datatype for vector field, instead got: " +
dtype,
);
}
const vector = makeVector(vectors, destType);
columns[destColumn] = vector;
}
const newTable = new ArrowTable(columns);
return alignTable(newTable, schema);
}
/** Helper function to apply embeddings to an input table */ /** Helper function to apply embeddings to an input table */
async function applyEmbeddings<T>( async function applyEmbeddings<T>(
table: ArrowTable, table: ArrowTable,
embeddings?: EmbeddingFunctionConfig, embeddings?: EmbeddingFunction<T>,
schema?: SchemaLike, schema?: Schema,
): Promise<ArrowTable> { ): Promise<ArrowTable> {
if (embeddings == null) {
return table;
}
if (schema !== undefined && schema !== null) { if (schema !== undefined && schema !== null) {
schema = sanitizeSchema(schema); schema = sanitizeSchema(schema);
} }
if (schema?.metadata.has("embedding_functions")) {
return applyEmbeddingsFromMetadata(table, schema! as Schema);
} else if (embeddings == null || embeddings === undefined) {
return table;
}
// Convert from ArrowTable to Record<String, Vector> // Convert from ArrowTable to Record<String, Vector>
const colEntries = [...Array(table.numCols).keys()].map((_, idx) => { const colEntries = [...Array(table.numCols).keys()].map((_, idx) => {
@@ -643,9 +399,8 @@ async function applyEmbeddings<T>(
const newColumns = Object.fromEntries(colEntries); const newColumns = Object.fromEntries(colEntries);
const sourceColumn = newColumns[embeddings.sourceColumn]; const sourceColumn = newColumns[embeddings.sourceColumn];
const destColumn = embeddings.vectorColumn ?? "vector"; const destColumn = embeddings.destColumn ?? "vector";
const innerDestType = const innerDestType = embeddings.embeddingDataType ?? new Float32();
embeddings.function.embeddingDataType() ?? new Float32();
if (sourceColumn === undefined) { if (sourceColumn === undefined) {
throw new Error( throw new Error(
`Cannot apply embedding function because the source column '${embeddings.sourceColumn}' was not present in the data`, `Cannot apply embedding function because the source column '${embeddings.sourceColumn}' was not present in the data`,
@@ -659,9 +414,11 @@ async function applyEmbeddings<T>(
// if we call convertToTable with 0 records and a schema that includes the embedding // if we call convertToTable with 0 records and a schema that includes the embedding
return table; return table;
} }
const dimensions = embeddings.function.ndims(); if (embeddings.embeddingDimension !== undefined) {
if (dimensions !== undefined) { const destType = newVectorType(
const destType = newVectorType(dimensions, innerDestType); embeddings.embeddingDimension,
innerDestType,
);
newColumns[destColumn] = makeVector([], destType); newColumns[destColumn] = makeVector([], destType);
} else if (schema != null) { } else if (schema != null) {
const destField = schema.fields.find((f) => f.name === destColumn); const destField = schema.fields.find((f) => f.name === destColumn);
@@ -689,9 +446,7 @@ async function applyEmbeddings<T>(
); );
} }
const values = sourceColumn.toArray(); const values = sourceColumn.toArray();
const vectors = await embeddings.function.computeSourceEmbeddings( const vectors = await embeddings.embed(values as T[]);
values as T[],
);
if (vectors.length !== values.length) { if (vectors.length !== values.length) {
throw new Error( throw new Error(
"Embedding function did not return an embedding for each input element", "Embedding function did not return an embedding for each input element",
@@ -708,7 +463,7 @@ async function applyEmbeddings<T>(
`When using embedding functions and specifying a schema the schema should include the embedding column but the column ${destColumn} was missing`, `When using embedding functions and specifying a schema the schema should include the embedding column but the column ${destColumn} was missing`,
); );
} }
return alignTable(newTable, schema as Schema); return alignTable(newTable, schema);
} }
return newTable; return newTable;
} }
@@ -731,9 +486,9 @@ async function applyEmbeddings<T>(
* embedding columns. If no schema is provded then embedding columns will * embedding columns. If no schema is provded then embedding columns will
* be placed at the end of the table, after all of the input columns. * be placed at the end of the table, after all of the input columns.
*/ */
export async function convertToTable( export async function convertToTable<T>(
data: Array<Record<string, unknown>>, data: Array<Record<string, unknown>>,
embeddings?: EmbeddingFunctionConfig, embeddings?: EmbeddingFunction<T>,
makeTableOptions?: Partial<MakeArrowTableOptions>, makeTableOptions?: Partial<MakeArrowTableOptions>,
): Promise<ArrowTable> { ): Promise<ArrowTable> {
const table = makeArrowTable(data, makeTableOptions); const table = makeArrowTable(data, makeTableOptions);
@@ -741,13 +496,13 @@ export async function convertToTable(
} }
/** Creates the Arrow Type for a Vector column with dimension `dim` */ /** Creates the Arrow Type for a Vector column with dimension `dim` */
export function newVectorType<T extends Float>( function newVectorType<T extends Float>(
dim: number, dim: number,
innerType: T, innerType: T,
): FixedSizeList<T> { ): FixedSizeList<T> {
// in Lance we always default to have the elements nullable, so we need to set it to true // in Lance we always default to have the elements nullable, so we need to set it to true
// otherwise we often get schema mismatches because the stored data always has schema with nullable elements // otherwise we often get schema mismatches because the stored data always has schema with nullable elements
const children = new Field("item", <T>sanitizeType(innerType), true); const children = new Field<T>("item", innerType, true);
return new FixedSizeList(dim, children); return new FixedSizeList(dim, children);
} }
@@ -758,9 +513,9 @@ export function newVectorType<T extends Float>(
* *
* `schema` is required if data is empty * `schema` is required if data is empty
*/ */
export async function fromRecordsToBuffer( export async function fromRecordsToBuffer<T>(
data: Array<Record<string, unknown>>, data: Array<Record<string, unknown>>,
embeddings?: EmbeddingFunctionConfig, embeddings?: EmbeddingFunction<T>,
schema?: Schema, schema?: Schema,
): Promise<Buffer> { ): Promise<Buffer> {
if (schema !== undefined && schema !== null) { if (schema !== undefined && schema !== null) {
@@ -778,9 +533,9 @@ export async function fromRecordsToBuffer(
* *
* `schema` is required if data is empty * `schema` is required if data is empty
*/ */
export async function fromRecordsToStreamBuffer( export async function fromRecordsToStreamBuffer<T>(
data: Array<Record<string, unknown>>, data: Array<Record<string, unknown>>,
embeddings?: EmbeddingFunctionConfig, embeddings?: EmbeddingFunction<T>,
schema?: Schema, schema?: Schema,
): Promise<Buffer> { ): Promise<Buffer> {
if (schema !== undefined && schema !== null) { if (schema !== undefined && schema !== null) {
@@ -799,10 +554,10 @@ export async function fromRecordsToStreamBuffer(
* *
* `schema` is required if the table is empty * `schema` is required if the table is empty
*/ */
export async function fromTableToBuffer( export async function fromTableToBuffer<T>(
table: ArrowTable, table: ArrowTable,
embeddings?: EmbeddingFunctionConfig, embeddings?: EmbeddingFunction<T>,
schema?: SchemaLike, schema?: Schema,
): Promise<Buffer> { ): Promise<Buffer> {
if (schema !== undefined && schema !== null) { if (schema !== undefined && schema !== null) {
schema = sanitizeSchema(schema); schema = sanitizeSchema(schema);
@@ -820,19 +575,19 @@ export async function fromTableToBuffer(
* *
* `schema` is required if the table is empty * `schema` is required if the table is empty
*/ */
export async function fromDataToBuffer( export async function fromDataToBuffer<T>(
data: Data, data: Data,
embeddings?: EmbeddingFunctionConfig, embeddings?: EmbeddingFunction<T>,
schema?: Schema, schema?: Schema,
): Promise<Buffer> { ): Promise<Buffer> {
if (schema !== undefined && schema !== null) { if (schema !== undefined && schema !== null) {
schema = sanitizeSchema(schema); schema = sanitizeSchema(schema);
} }
if (isArrowTable(data)) { if (data instanceof ArrowTable) {
return fromTableToBuffer(sanitizeTable(data), embeddings, schema); return fromTableToBuffer(data, embeddings, schema);
} else { } else {
const table = await convertToTable(data, embeddings, { schema }); const table = await convertToTable(data);
return fromTableToBuffer(table); return fromTableToBuffer(table, embeddings, schema);
} }
} }
@@ -844,10 +599,10 @@ export async function fromDataToBuffer(
* *
* `schema` is required if the table is empty * `schema` is required if the table is empty
*/ */
export async function fromTableToStreamBuffer( export async function fromTableToStreamBuffer<T>(
table: ArrowTable, table: ArrowTable,
embeddings?: EmbeddingFunctionConfig, embeddings?: EmbeddingFunction<T>,
schema?: SchemaLike, schema?: Schema,
): Promise<Buffer> { ): Promise<Buffer> {
const tableWithEmbeddings = await applyEmbeddings(table, embeddings, schema); const tableWithEmbeddings = await applyEmbeddings(table, embeddings, schema);
const writer = RecordBatchStreamWriter.writeAll(tableWithEmbeddings); const writer = RecordBatchStreamWriter.writeAll(tableWithEmbeddings);
@@ -899,7 +654,7 @@ export function createEmptyTable(schema: Schema): ArrowTable {
function validateSchemaEmbeddings( function validateSchemaEmbeddings(
schema: Schema, schema: Schema,
data: Array<Record<string, unknown>>, data: Array<Record<string, unknown>>,
embeddings: EmbeddingFunctionConfig | undefined, embeddings: EmbeddingFunction<unknown> | undefined,
) { ) {
const fields = []; const fields = [];
const missingEmbeddingFields = []; const missingEmbeddingFields = [];
@@ -909,24 +664,10 @@ function validateSchemaEmbeddings(
// if it does not, we add it to the list of missing embedding fields // if it does not, we add it to the list of missing embedding fields
// Finally, we check if those missing embedding fields are `this._embeddings` // Finally, we check if those missing embedding fields are `this._embeddings`
// if they are not, we throw an error // if they are not, we throw an error
for (let field of schema.fields) { for (const field of schema.fields) {
if (isFixedSizeList(field.type)) { if (field.type instanceof FixedSizeList) {
field = sanitizeField(field);
if (data.length !== 0 && data?.[0]?.[field.name] === undefined) { if (data.length !== 0 && data?.[0]?.[field.name] === undefined) {
if (schema.metadata.has("embedding_functions")) { missingEmbeddingFields.push(field);
const embeddings = JSON.parse(
schema.metadata.get("embedding_functions")!,
);
if (
// biome-ignore lint/suspicious/noExplicitAny: we don't know the type of `f`
embeddings.find((f: any) => f["vectorColumn"] === field.name) ===
undefined
) {
missingEmbeddingFields.push(field);
}
} else {
missingEmbeddingFields.push(field);
}
} else { } else {
fields.push(field); fields.push(field);
} }
@@ -936,6 +677,8 @@ function validateSchemaEmbeddings(
} }
if (missingEmbeddingFields.length > 0 && embeddings === undefined) { if (missingEmbeddingFields.length > 0 && embeddings === undefined) {
console.log({ missingEmbeddingFields, embeddings });
throw new Error( throw new Error(
`Table has embeddings: "${missingEmbeddingFields `Table has embeddings: "${missingEmbeddingFields
.map((f) => f.name) .map((f) => f.name)
@@ -943,5 +686,5 @@ function validateSchemaEmbeddings(
); );
} }
return new Schema(fields, schema.metadata); return new Schema(fields);
} }

View File

@@ -12,11 +12,32 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
import { Data, Schema, SchemaLike, TableLike } from "./arrow"; import { Table as ArrowTable, Schema } from "apache-arrow";
import { fromTableToBuffer, makeEmptyTable } from "./arrow"; import { fromTableToBuffer, makeArrowTable, makeEmptyTable } from "./arrow";
import { EmbeddingFunctionConfig, getRegistry } from "./embedding/registry"; import { ConnectionOptions, Connection as LanceDbConnection } from "./native";
import { Connection as LanceDbConnection } from "./native"; import { Table } from "./table";
import { LocalTable, Table } from "./table";
/**
* Connect to a LanceDB instance at the given URI.
*
* Accepted formats:
*
* - `/path/to/database` - local database
* - `s3://bucket/path/to/database` or `gs://bucket/path/to/database` - database on cloud storage
* - `db://host:port` - remote database (LanceDB cloud)
* @param {string} uri - The uri of the database. If the database uri starts
* with `db://` then it connects to a remote database.
* @see {@link ConnectionOptions} for more details on the URI format.
*/
export async function connect(
uri: string,
opts?: Partial<ConnectionOptions>,
): Promise<Connection> {
opts = opts ?? {};
opts.storageOptions = cleanseStorageOptions(opts.storageOptions);
const nativeConn = await LanceDbConnection.new(uri, opts);
return new Connection(nativeConn);
}
export interface CreateTableOptions { export interface CreateTableOptions {
/** /**
@@ -44,14 +65,6 @@ export interface CreateTableOptions {
* The available options are described at https://lancedb.github.io/lancedb/guides/storage/ * The available options are described at https://lancedb.github.io/lancedb/guides/storage/
*/ */
storageOptions?: Record<string, string>; storageOptions?: Record<string, string>;
/**
* If true then data files will be written with the legacy format
*
* The default is true while the new format is in beta
*/
useLegacyFormat?: boolean;
schema?: SchemaLike;
embeddingFunction?: EmbeddingFunctionConfig;
} }
export interface OpenTableOptions { export interface OpenTableOptions {
@@ -90,6 +103,7 @@ export interface TableNamesOptions {
/** An optional limit to the number of results to return. */ /** An optional limit to the number of results to return. */
limit?: number; limit?: number;
} }
/** /**
* A LanceDB Connection that allows you to open tables and create new ones. * A LanceDB Connection that allows you to open tables and create new ones.
* *
@@ -108,15 +122,17 @@ export interface TableNamesOptions {
* Any created tables are independent and will continue to work even if * Any created tables are independent and will continue to work even if
* the underlying connection has been closed. * the underlying connection has been closed.
*/ */
export abstract class Connection { export class Connection {
[Symbol.for("nodejs.util.inspect.custom")](): string { readonly inner: LanceDbConnection;
return this.display();
constructor(inner: LanceDbConnection) {
this.inner = inner;
} }
/** /** Return true if the connection has not been closed */
* Return true if the connection has not been closed isOpen(): boolean {
*/ return this.inner.isOpen();
abstract isOpen(): boolean; }
/** /**
* Close the connection, releasing any underlying resources. * Close the connection, releasing any underlying resources.
@@ -125,12 +141,14 @@ export abstract class Connection {
* *
* Any attempt to use the connection after it is closed will result in an error. * Any attempt to use the connection after it is closed will result in an error.
*/ */
abstract close(): void; close(): void {
this.inner.close();
}
/** /** Return a brief description of the connection */
* Return a brief description of the connection display(): string {
*/ return this.inner.display();
abstract display(): string; }
/** /**
* List all the table names in this database. * List all the table names in this database.
@@ -138,86 +156,15 @@ export abstract class Connection {
* Tables will be returned in lexicographical order. * Tables will be returned in lexicographical order.
* @param {Partial<TableNamesOptions>} options - options to control the * @param {Partial<TableNamesOptions>} options - options to control the
* paging / start point * paging / start point
*
*/ */
abstract tableNames(options?: Partial<TableNamesOptions>): Promise<string[]>; async tableNames(options?: Partial<TableNamesOptions>): Promise<string[]> {
return this.inner.tableNames(options?.startAfter, options?.limit);
}
/** /**
* Open a table in the database. * Open a table in the database.
* @param {string} name - The name of the table * @param {string} name - The name of the table
*/ */
abstract openTable(
name: string,
options?: Partial<OpenTableOptions>,
): Promise<Table>;
/**
* Creates a new Table and initialize it with new data.
* @param {object} options - The options object.
* @param {string} options.name - The name of the table.
* @param {Data} options.data - Non-empty Array of Records to be inserted into the table
*
*/
abstract createTable(
options: {
name: string;
data: Data;
} & Partial<CreateTableOptions>,
): Promise<Table>;
/**
* Creates a new Table and initialize it with new data.
* @param {string} name - The name of the table.
* @param {Record<string, unknown>[] | TableLike} data - Non-empty Array of Records
* to be inserted into the table
*/
abstract createTable(
name: string,
data: Record<string, unknown>[] | TableLike,
options?: Partial<CreateTableOptions>,
): Promise<Table>;
/**
* Creates a new empty Table
* @param {string} name - The name of the table.
* @param {Schema} schema - The schema of the table
*/
abstract createEmptyTable(
name: string,
schema: import("./arrow").SchemaLike,
options?: Partial<CreateTableOptions>,
): Promise<Table>;
/**
* Drop an existing table.
* @param {string} name The name of the table to drop.
*/
abstract dropTable(name: string): Promise<void>;
}
export class LocalConnection extends Connection {
readonly inner: LanceDbConnection;
constructor(inner: LanceDbConnection) {
super();
this.inner = inner;
}
isOpen(): boolean {
return this.inner.isOpen();
}
close(): void {
this.inner.close();
}
display(): string {
return this.inner.display();
}
async tableNames(options?: Partial<TableNamesOptions>): Promise<string[]> {
return this.inner.tableNames(options?.startAfter, options?.limit);
}
async openTable( async openTable(
name: string, name: string,
options?: Partial<OpenTableOptions>, options?: Partial<OpenTableOptions>,
@@ -227,39 +174,18 @@ export class LocalConnection extends Connection {
cleanseStorageOptions(options?.storageOptions), cleanseStorageOptions(options?.storageOptions),
options?.indexCacheSize, options?.indexCacheSize,
); );
return new Table(innerTable);
return new LocalTable(innerTable);
} }
/**
* Creates a new Table and initialize it with new data.
* @param {string} name - The name of the table.
* @param {Record<string, unknown>[] | ArrowTable} data - Non-empty Array of Records
* to be inserted into the table
*/
async createTable( async createTable(
nameOrOptions:
| string
| ({ name: string; data: Data } & Partial<CreateTableOptions>),
data?: Record<string, unknown>[] | TableLike,
options?: Partial<CreateTableOptions>,
): Promise<Table> {
if (typeof nameOrOptions !== "string" && "name" in nameOrOptions) {
const { name, data, ...options } = nameOrOptions;
return this.createTable(name, data, options);
}
if (data === undefined) {
throw new Error("data is required");
}
const { buf, mode } = await Table.parseTableData(data, options);
const innerTable = await this.inner.createTable(
nameOrOptions,
buf,
mode,
cleanseStorageOptions(options?.storageOptions),
options?.useLegacyFormat,
);
return new LocalTable(innerTable);
}
async createEmptyTable(
name: string, name: string,
schema: import("./arrow").SchemaLike, data: Record<string, unknown>[] | ArrowTable,
options?: Partial<CreateTableOptions>, options?: Partial<CreateTableOptions>,
): Promise<Table> { ): Promise<Table> {
let mode: string = options?.mode ?? "create"; let mode: string = options?.mode ?? "create";
@@ -268,25 +194,55 @@ export class LocalConnection extends Connection {
if (mode === "create" && existOk) { if (mode === "create" && existOk) {
mode = "exist_ok"; mode = "exist_ok";
} }
let metadata: Map<string, string> | undefined = undefined;
if (options?.embeddingFunction !== undefined) { let table: ArrowTable;
const embeddingFunction = options.embeddingFunction; if (data instanceof ArrowTable) {
const registry = getRegistry(); table = data;
metadata = registry.getTableMetadata([embeddingFunction]); } else {
table = makeArrowTable(data);
}
const buf = await fromTableToBuffer(table);
const innerTable = await this.inner.createTable(
name,
buf,
mode,
cleanseStorageOptions(options?.storageOptions),
);
return new Table(innerTable);
}
/**
* Creates a new empty Table
* @param {string} name - The name of the table.
* @param {Schema} schema - The schema of the table
*/
async createEmptyTable(
name: string,
schema: Schema,
options?: Partial<CreateTableOptions>,
): Promise<Table> {
let mode: string = options?.mode ?? "create";
const existOk = options?.existOk ?? false;
if (mode === "create" && existOk) {
mode = "exist_ok";
} }
const table = makeEmptyTable(schema, metadata); const table = makeEmptyTable(schema);
const buf = await fromTableToBuffer(table); const buf = await fromTableToBuffer(table);
const innerTable = await this.inner.createEmptyTable( const innerTable = await this.inner.createEmptyTable(
name, name,
buf, buf,
mode, mode,
cleanseStorageOptions(options?.storageOptions), cleanseStorageOptions(options?.storageOptions),
options?.useLegacyFormat,
); );
return new LocalTable(innerTable); return new Table(innerTable);
} }
/**
* Drop an existing table.
* @param {string} name The name of the table to drop.
*/
async dropTable(name: string): Promise<void> { async dropTable(name: string): Promise<void> {
return this.inner.dropTable(name); return this.inner.dropTable(name);
} }
@@ -295,7 +251,7 @@ export class LocalConnection extends Connection {
/** /**
* Takes storage options and makes all the keys snake case. * Takes storage options and makes all the keys snake case.
*/ */
export function cleanseStorageOptions( function cleanseStorageOptions(
options?: Record<string, string>, options?: Record<string, string>,
): Record<string, string> | undefined { ): Record<string, string> | undefined {
if (options === undefined) { if (options === undefined) {

View File

@@ -1,4 +1,4 @@
// Copyright 2024 Lance Developers. // Copyright 2023 Lance Developers.
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@@ -12,183 +12,67 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
import "reflect-metadata"; import { type Float } from "apache-arrow";
import {
DataType,
Field,
FixedSizeList,
Float,
Float32,
type IntoVector,
isDataType,
isFixedSizeList,
isFloat,
newVectorType,
} from "../arrow";
import { sanitizeType } from "../sanitize";
/**
* Options for a given embedding function
*/
export interface FunctionOptions {
// biome-ignore lint/suspicious/noExplicitAny: options can be anything
[key: string]: any;
}
export interface EmbeddingFunctionConstructor<
T extends EmbeddingFunction = EmbeddingFunction,
> {
new (modelOptions?: T["TOptions"]): T;
}
/** /**
* An embedding function that automatically creates vector representation for a given column. * An embedding function that automatically creates vector representation for a given column.
*/ */
export abstract class EmbeddingFunction< export interface EmbeddingFunction<T> {
// biome-ignore lint/suspicious/noExplicitAny: we don't know what the implementor will do
T = any,
M extends FunctionOptions = FunctionOptions,
> {
/** /**
* @ignore * The name of the column that will be used as input for the Embedding Function.
* This is only used for associating the options type with the class for type checking
*/ */
// biome-ignore lint/style/useNamingConvention: we want to keep the name as it is sourceColumn: string;
readonly TOptions!: M;
/**
* Convert the embedding function to a JSON object
* It is used to serialize the embedding function to the schema
* It's important that any object returned by this method contains all the necessary
* information to recreate the embedding function
*
* It should return the same object that was passed to the constructor
* If it does not, the embedding function will not be able to be recreated, or could be recreated incorrectly
*
* @example
* ```ts
* class MyEmbeddingFunction extends EmbeddingFunction {
* constructor(options: {model: string, timeout: number}) {
* super();
* this.model = options.model;
* this.timeout = options.timeout;
* }
* toJSON() {
* return {
* model: this.model,
* timeout: this.timeout,
* };
* }
* ```
*/
abstract toJSON(): Partial<M>;
/** /**
* sourceField is used in combination with `LanceSchema` to provide a declarative data model * The data type of the embedding
* *
* @param optionsOrDatatype - The options for the field or the datatype * The embedding function should return `number`. This will be converted into
* * an Arrow float array. By default this will be Float32 but this property can
* @see {@link lancedb.LanceSchema} * be used to control the conversion.
*/ */
sourceField( embeddingDataType?: Float;
optionsOrDatatype: Partial<FieldOptions> | DataType,
): [DataType, Map<string, EmbeddingFunction>] {
let datatype = isDataType(optionsOrDatatype)
? optionsOrDatatype
: optionsOrDatatype?.datatype;
if (!datatype) {
throw new Error("Datatype is required");
}
datatype = sanitizeType(datatype);
const metadata = new Map<string, EmbeddingFunction>();
metadata.set("source_column_for", this);
return [datatype, metadata];
}
/** /**
* vectorField is used in combination with `LanceSchema` to provide a declarative data model * The dimension of the embedding
* *
* @param options - The options for the field * This is optional, normally this can be determined by looking at the results of
* * `embed`. If this is not specified, and there is an attempt to apply the embedding
* @see {@link lancedb.LanceSchema} * to an empty table, then that process will fail.
*/ */
vectorField( embeddingDimension?: number;
optionsOrDatatype?: Partial<FieldOptions> | DataType,
): [DataType, Map<string, EmbeddingFunction>] {
let dtype: DataType | undefined;
let vectorType: DataType;
let dims: number | undefined = this.ndims();
// `func.vectorField(new Float32())` /**
if (isDataType(optionsOrDatatype)) { * The name of the column that will contain the embedding
dtype = optionsOrDatatype; *
} else { * By default this is "vector"
// `func.vectorField({ */
// datatype: new Float32(), destColumn?: string;
// dims: 10
// })`
dims = dims ?? optionsOrDatatype?.dims;
dtype = optionsOrDatatype?.datatype;
}
if (dtype !== undefined) { /**
// `func.vectorField(new FixedSizeList(dims, new Field("item", new Float32(), true)))` * Should the source column be excluded from the resulting table
// or `func.vectorField({datatype: new FixedSizeList(dims, new Field("item", new Float32(), true))})` *
if (isFixedSizeList(dtype)) { * By default the source column is included. Set this to true and
vectorType = dtype; * only the embedding will be stored.
// `func.vectorField(new Float32())` */
// or `func.vectorField({datatype: new Float32()})` excludeSource?: boolean;
} else if (isFloat(dtype)) {
// No `ndims` impl and no `{dims: n}` provided;
if (dims === undefined) {
throw new Error("ndims is required for vector field");
}
vectorType = newVectorType(dims, dtype);
} else {
throw new Error(
"Expected FixedSizeList or Float as datatype for vector field",
);
}
} else {
if (dims === undefined) {
throw new Error("ndims is required for vector field");
}
vectorType = new FixedSizeList(
dims,
new Field("item", new Float32(), true),
);
}
const metadata = new Map<string, EmbeddingFunction>();
metadata.set("vector_column_for", this);
return [vectorType, metadata];
}
/** The number of dimensions of the embeddings */
ndims(): number | undefined {
return undefined;
}
/** The datatype of the embeddings */
abstract embeddingDataType(): Float;
/** /**
* Creates a vector representation for the given values. * Creates a vector representation for the given values.
*/ */
abstract computeSourceEmbeddings( embed: (data: T[]) => Promise<number[][]>;
data: T[], }
): Promise<number[][] | Float32Array[] | Float64Array[]>;
/** /** Test if the input seems to be an embedding function */
Compute the embeddings for a single query export function isEmbeddingFunction<T>(
*/ value: unknown,
async computeQueryEmbeddings(data: T): Promise<Awaited<IntoVector>> { ): value is EmbeddingFunction<T> {
return this.computeSourceEmbeddings([data]).then( if (typeof value !== "object" || value === null) {
(embeddings) => embeddings[0], return false;
);
} }
} if (!("sourceColumn" in value) || !("embed" in value)) {
return false;
export interface FieldOptions<T extends DataType = DataType> { }
datatype: T; return (
dims?: number; typeof value.sourceColumn === "string" && typeof value.embed === "function"
);
} }

View File

@@ -1,113 +1,2 @@
// Copyright 2023 Lance Developers. export { EmbeddingFunction, isEmbeddingFunction } from "./embedding_function";
// export { OpenAIEmbeddingFunction } from "./openai";
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import { DataType, Field, Schema } from "../arrow";
import { isDataType } from "../arrow";
import { sanitizeType } from "../sanitize";
import { EmbeddingFunction } from "./embedding_function";
import { EmbeddingFunctionConfig, getRegistry } from "./registry";
export { EmbeddingFunction } from "./embedding_function";
// We need to explicitly export '*' so that the `register` decorator actually registers the class.
export * from "./openai";
export * from "./registry";
/**
* Create a schema with embedding functions.
*
* @param fields
* @returns Schema
* @example
* ```ts
* class MyEmbeddingFunction extends EmbeddingFunction {
* // ...
* }
* const func = new MyEmbeddingFunction();
* const schema = LanceSchema({
* id: new Int32(),
* text: func.sourceField(new Utf8()),
* vector: func.vectorField(),
* // optional: specify the datatype and/or dimensions
* vector2: func.vectorField({ datatype: new Float32(), dims: 3}),
* });
*
* const table = await db.createTable("my_table", data, { schema });
* ```
*/
export function LanceSchema(
fields: Record<string, [object, Map<string, EmbeddingFunction>] | object>,
): Schema {
const arrowFields: Field[] = [];
const embeddingFunctions = new Map<
EmbeddingFunction,
Partial<EmbeddingFunctionConfig>
>();
Object.entries(fields).forEach(([key, value]) => {
if (isDataType(value)) {
arrowFields.push(new Field(key, sanitizeType(value), true));
} else {
const [dtype, metadata] = value as [
object,
Map<string, EmbeddingFunction>,
];
arrowFields.push(new Field(key, sanitizeType(dtype), true));
parseEmbeddingFunctions(embeddingFunctions, key, metadata);
}
});
const registry = getRegistry();
const metadata = registry.getTableMetadata(
Array.from(embeddingFunctions.values()) as EmbeddingFunctionConfig[],
);
const schema = new Schema(arrowFields, metadata);
return schema;
}
function parseEmbeddingFunctions(
embeddingFunctions: Map<EmbeddingFunction, Partial<EmbeddingFunctionConfig>>,
key: string,
metadata: Map<string, EmbeddingFunction>,
): void {
if (metadata.has("source_column_for")) {
const embedFunction = metadata.get("source_column_for")!;
const current = embeddingFunctions.get(embedFunction);
if (current !== undefined) {
embeddingFunctions.set(embedFunction, {
...current,
sourceColumn: key,
});
} else {
embeddingFunctions.set(embedFunction, {
sourceColumn: key,
function: embedFunction,
});
}
} else if (metadata.has("vector_column_for")) {
const embedFunction = metadata.get("vector_column_for")!;
const current = embeddingFunctions.get(embedFunction);
if (current !== undefined) {
embeddingFunctions.set(embedFunction, {
...current,
vectorColumn: key,
});
} else {
embeddingFunctions.set(embedFunction, {
vectorColumn: key,
function: embedFunction,
});
}
}
}

View File

@@ -13,36 +13,17 @@
// limitations under the License. // limitations under the License.
import type OpenAI from "openai"; import type OpenAI from "openai";
import { type EmbeddingCreateParams } from "openai/resources"; import { type EmbeddingFunction } from "./embedding_function";
import { Float, Float32 } from "../arrow";
import { EmbeddingFunction } from "./embedding_function";
import { register } from "./registry";
export type OpenAIOptions = { export class OpenAIEmbeddingFunction implements EmbeddingFunction<string> {
apiKey: string; private readonly _openai: OpenAI;
model: EmbeddingCreateParams["model"]; private readonly _modelName: string;
};
@register("openai")
export class OpenAIEmbeddingFunction extends EmbeddingFunction<
string,
Partial<OpenAIOptions>
> {
#openai: OpenAI;
#modelName: OpenAIOptions["model"];
constructor( constructor(
options: Partial<OpenAIOptions> = { sourceColumn: string,
model: "text-embedding-ada-002", openAIKey: string,
}, modelName: string = "text-embedding-ada-002",
) { ) {
super();
const openAIKey = options?.apiKey ?? process.env.OPENAI_API_KEY;
if (!openAIKey) {
throw new Error("OpenAI API key is required");
}
const modelName = options?.model ?? "text-embedding-ada-002";
/** /**
* @type {import("openai").default} * @type {import("openai").default}
*/ */
@@ -55,40 +36,18 @@ export class OpenAIEmbeddingFunction extends EmbeddingFunction<
throw new Error("please install openai@^4.24.1 using npm install openai"); throw new Error("please install openai@^4.24.1 using npm install openai");
} }
this.sourceColumn = sourceColumn;
const configuration = { const configuration = {
apiKey: openAIKey, apiKey: openAIKey,
}; };
this.#openai = new Openai(configuration); this._openai = new Openai(configuration);
this.#modelName = modelName; this._modelName = modelName;
} }
toJSON() { async embed(data: string[]): Promise<number[][]> {
return { const response = await this._openai.embeddings.create({
model: this.#modelName, model: this._modelName,
};
}
ndims(): number {
switch (this.#modelName) {
case "text-embedding-ada-002":
return 1536;
case "text-embedding-3-large":
return 3072;
case "text-embedding-3-small":
return 1536;
default:
throw new Error(`Unknown model: ${this.#modelName}`);
}
}
embeddingDataType(): Float {
return new Float32();
}
async computeSourceEmbeddings(data: string[]): Promise<number[][]> {
const response = await this.#openai.embeddings.create({
model: this.#modelName,
input: data, input: data,
}); });
@@ -99,15 +58,5 @@ export class OpenAIEmbeddingFunction extends EmbeddingFunction<
return embeddings; return embeddings;
} }
async computeQueryEmbeddings(data: string): Promise<number[]> { sourceColumn: string;
if (typeof data !== "string") {
throw new Error("Data must be a string");
}
const response = await this.#openai.embeddings.create({
model: this.#modelName,
input: data,
});
return response.data[0].embedding;
}
} }

View File

@@ -1,188 +0,0 @@
// Copyright 2024 Lance Developers.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import {
type EmbeddingFunction,
type EmbeddingFunctionConstructor,
} from "./embedding_function";
import "reflect-metadata";
import { OpenAIEmbeddingFunction } from "./openai";
interface EmbeddingFunctionCreate<T extends EmbeddingFunction> {
create(options?: T["TOptions"]): T;
}
/**
* This is a singleton class used to register embedding functions
* and fetch them by name. It also handles serializing and deserializing.
* You can implement your own embedding function by subclassing EmbeddingFunction
* or TextEmbeddingFunction and registering it with the registry
*/
export class EmbeddingFunctionRegistry {
#functions = new Map<string, EmbeddingFunctionConstructor>();
/**
* Register an embedding function
* @param name The name of the function
* @param func The function to register
* @throws Error if the function is already registered
*/
register<
T extends EmbeddingFunctionConstructor = EmbeddingFunctionConstructor,
>(
this: EmbeddingFunctionRegistry,
alias?: string,
// biome-ignore lint/suspicious/noExplicitAny: <explanation>
): (ctor: T) => any {
const self = this;
return function (ctor: T) {
if (!alias) {
alias = ctor.name;
}
if (self.#functions.has(alias)) {
throw new Error(
`Embedding function with alias "${alias}" already exists`,
);
}
self.#functions.set(alias, ctor);
Reflect.defineMetadata("lancedb::embedding::name", alias, ctor);
return ctor;
};
}
/**
* Fetch an embedding function by name
* @param name The name of the function
*/
get<T extends EmbeddingFunction<unknown>, Name extends string = "">(
name: Name extends "openai" ? "openai" : string,
//This makes it so that you can use string constants as "types", or use an explicitly supplied type
// ex:
// `registry.get("openai") -> EmbeddingFunctionCreate<OpenAIEmbeddingFunction>`
// `registry.get<MyCustomEmbeddingFunction>("my_func") -> EmbeddingFunctionCreate<MyCustomEmbeddingFunction> | undefined`
//
// the reason this is important is that we always know our built in functions are defined so the user isnt forced to do a non null/undefined
// ```ts
// const openai: OpenAIEmbeddingFunction = registry.get("openai").create()
// ```
): Name extends "openai"
? EmbeddingFunctionCreate<OpenAIEmbeddingFunction>
: EmbeddingFunctionCreate<T> | undefined {
type Output = Name extends "openai"
? EmbeddingFunctionCreate<OpenAIEmbeddingFunction>
: EmbeddingFunctionCreate<T> | undefined;
const factory = this.#functions.get(name);
if (!factory) {
return undefined as Output;
}
return {
create: function (options?: T["TOptions"]) {
return new factory(options);
},
} as Output;
}
/**
* reset the registry to the initial state
*/
reset(this: EmbeddingFunctionRegistry) {
this.#functions.clear();
}
/**
* @ignore
*/
parseFunctions(
this: EmbeddingFunctionRegistry,
metadata: Map<string, string>,
): Map<string, EmbeddingFunctionConfig> {
if (!metadata.has("embedding_functions")) {
return new Map();
} else {
type FunctionConfig = {
name: string;
sourceColumn: string;
vectorColumn: string;
model: EmbeddingFunction["TOptions"];
};
const functions = <FunctionConfig[]>(
JSON.parse(metadata.get("embedding_functions")!)
);
return new Map(
functions.map((f) => {
const fn = this.get(f.name);
if (!fn) {
throw new Error(`Function "${f.name}" not found in registry`);
}
return [
f.name,
{
sourceColumn: f.sourceColumn,
vectorColumn: f.vectorColumn,
function: this.get(f.name)!.create(f.model),
},
];
}),
);
}
}
// biome-ignore lint/suspicious/noExplicitAny: <explanation>
functionToMetadata(conf: EmbeddingFunctionConfig): Record<string, any> {
// biome-ignore lint/suspicious/noExplicitAny: <explanation>
const metadata: Record<string, any> = {};
const name = Reflect.getMetadata(
"lancedb::embedding::name",
conf.function.constructor,
);
metadata["sourceColumn"] = conf.sourceColumn;
metadata["vectorColumn"] = conf.vectorColumn ?? "vector";
metadata["name"] = name ?? conf.function.constructor.name;
metadata["model"] = conf.function.toJSON();
return metadata;
}
getTableMetadata(functions: EmbeddingFunctionConfig[]): Map<string, string> {
const metadata = new Map<string, string>();
const jsonData = functions.map((conf) => this.functionToMetadata(conf));
metadata.set("embedding_functions", JSON.stringify(jsonData));
return metadata;
}
}
const _REGISTRY = new EmbeddingFunctionRegistry();
export function register(name?: string) {
return _REGISTRY.register(name);
}
/**
* Utility function to get the global instance of the registry
* @returns `EmbeddingFunctionRegistry` The global instance of the registry
* @example
* ```ts
* const registry = getRegistry();
* const openai = registry.get("openai").create();
*/
export function getRegistry(): EmbeddingFunctionRegistry {
return _REGISTRY;
}
export interface EmbeddingFunctionConfig {
sourceColumn: string;
vectorColumn?: string;
function: EmbeddingFunction;
}

View File

@@ -12,43 +12,25 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
import {
Connection,
LocalConnection,
cleanseStorageOptions,
} from "./connection";
import {
ConnectionOptions,
Connection as LanceDbConnection,
} from "./native.js";
import { RemoteConnection, RemoteConnectionOptions } from "./remote";
export { export {
WriteOptions, WriteOptions,
WriteMode, WriteMode,
AddColumnsSql, AddColumnsSql,
ColumnAlteration, ColumnAlteration,
ConnectionOptions, ConnectionOptions,
IndexStatistics,
IndexMetadata,
IndexConfig,
} from "./native.js"; } from "./native.js";
export { export {
makeArrowTable, makeArrowTable,
MakeArrowTableOptions, MakeArrowTableOptions,
Data, Data,
VectorColumnOptions, VectorColumnOptions,
} from "./arrow"; } from "./arrow";
export { export {
connect,
Connection, Connection,
CreateTableOptions, CreateTableOptions,
TableNamesOptions, TableNamesOptions,
} from "./connection"; } from "./connection";
export { export {
ExecutableQuery, ExecutableQuery,
Query, Query,
@@ -56,87 +38,6 @@ export {
VectorQuery, VectorQuery,
RecordBatchIterator, RecordBatchIterator,
} from "./query"; } from "./query";
export { Index, IndexOptions, IvfPqOptions } from "./indices"; export { Index, IndexOptions, IvfPqOptions } from "./indices";
export { Table, AddDataOptions, IndexConfig, UpdateOptions } from "./table";
export { Table, AddDataOptions, UpdateOptions } from "./table";
export * as embedding from "./embedding"; export * as embedding from "./embedding";
/**
* Connect to a LanceDB instance at the given URI.
*
* Accepted formats:
*
* - `/path/to/database` - local database
* - `s3://bucket/path/to/database` or `gs://bucket/path/to/database` - database on cloud storage
* - `db://host:port` - remote database (LanceDB cloud)
* @param {string} uri - The uri of the database. If the database uri starts
* with `db://` then it connects to a remote database.
* @see {@link ConnectionOptions} for more details on the URI format.
* @example
* ```ts
* const conn = await connect("/path/to/database");
* ```
* @example
* ```ts
* const conn = await connect(
* "s3://bucket/path/to/database",
* {storageOptions: {timeout: "60s"}
* });
* ```
*/
export async function connect(
uri: string,
opts?: Partial<ConnectionOptions | RemoteConnectionOptions>,
): Promise<Connection>;
/**
* Connect to a LanceDB instance at the given URI.
*
* Accepted formats:
*
* - `/path/to/database` - local database
* - `s3://bucket/path/to/database` or `gs://bucket/path/to/database` - database on cloud storage
* - `db://host:port` - remote database (LanceDB cloud)
* @param options - The options to use when connecting to the database
* @see {@link ConnectionOptions} for more details on the URI format.
* @example
* ```ts
* const conn = await connect({
* uri: "/path/to/database",
* storageOptions: {timeout: "60s"}
* });
* ```
*/
export async function connect(
opts: Partial<RemoteConnectionOptions | ConnectionOptions> & { uri: string },
): Promise<Connection>;
export async function connect(
uriOrOptions:
| string
| (Partial<RemoteConnectionOptions | ConnectionOptions> & { uri: string }),
opts: Partial<ConnectionOptions | RemoteConnectionOptions> = {},
): Promise<Connection> {
let uri: string | undefined;
if (typeof uriOrOptions !== "string") {
const { uri: uri_, ...options } = uriOrOptions;
uri = uri_;
opts = options;
} else {
uri = uriOrOptions;
}
if (!uri) {
throw new Error("uri is required");
}
if (uri?.startsWith("db://")) {
return new RemoteConnection(uri, opts as RemoteConnectionOptions);
}
opts = (opts as ConnectionOptions) ?? {};
(<ConnectionOptions>opts).storageOptions = cleanseStorageOptions(
(<ConnectionOptions>opts).storageOptions,
);
const nativeConn = await LanceDbConnection.new(uri, opts);
return new LocalConnection(nativeConn);
}

View File

@@ -1,70 +0,0 @@
import { Data, fromDataToBuffer } from "./arrow";
import { NativeMergeInsertBuilder } from "./native";
/** A builder used to create and run a merge insert operation */
export class MergeInsertBuilder {
#native: NativeMergeInsertBuilder;
/** Construct a MergeInsertBuilder. __Internal use only.__ */
constructor(native: NativeMergeInsertBuilder) {
this.#native = native;
}
/**
* Rows that exist in both the source table (new data) and
* the target table (old data) will be updated, replacing
* the old row with the corresponding matching row.
*
* If there are multiple matches then the behavior is undefined.
* Currently this causes multiple copies of the row to be created
* but that behavior is subject to change.
*
* An optional condition may be specified. If it is, then only
* matched rows that satisfy the condtion will be updated. Any
* rows that do not satisfy the condition will be left as they
* are. Failing to satisfy the condition does not cause a
* "matched row" to become a "not matched" row.
*
* The condition should be an SQL string. Use the prefix
* target. to refer to rows in the target table (old data)
* and the prefix source. to refer to rows in the source
* table (new data).
*
* For example, "target.last_update < source.last_update"
*/
whenMatchedUpdateAll(options?: { where: string }): MergeInsertBuilder {
return new MergeInsertBuilder(
this.#native.whenMatchedUpdateAll(options?.where),
);
}
/**
* Rows that exist only in the source table (new data) should
* be inserted into the target table.
*/
whenNotMatchedInsertAll(): MergeInsertBuilder {
return new MergeInsertBuilder(this.#native.whenNotMatchedInsertAll());
}
/**
* Rows that exist only in the target table (old data) will be
* deleted. An optional condition can be provided to limit what
* data is deleted.
*
* @param options.where - An optional condition to limit what data is deleted
*/
whenNotMatchedBySourceDelete(options?: {
where: string;
}): MergeInsertBuilder {
return new MergeInsertBuilder(
this.#native.whenNotMatchedBySourceDelete(options?.where),
);
}
/**
* Executes the merge insert operation
*
* Nothing is returned but the `Table` is updated
*/
async execute(data: Data): Promise<void> {
const buffer = await fromDataToBuffer(data);
await this.#native.execute(buffer);
}
}

View File

@@ -12,12 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
import { import { Table as ArrowTable, RecordBatch, tableFromIPC } from "apache-arrow";
Table as ArrowTable,
type IntoVector,
RecordBatch,
tableFromIPC,
} from "./arrow";
import { type IvfPqOptions } from "./indices"; import { type IvfPqOptions } from "./indices";
import { import {
RecordBatchIterator as NativeBatchIterator, RecordBatchIterator as NativeBatchIterator,
@@ -55,60 +50,16 @@ export class RecordBatchIterator implements AsyncIterator<RecordBatch> {
} }
/* eslint-enable */ /* eslint-enable */
class RecordBatchIterable< /** Common methods supported by all query types */
export class QueryBase<
NativeQueryType extends NativeQuery | NativeVectorQuery, NativeQueryType extends NativeQuery | NativeVectorQuery,
QueryType,
> implements AsyncIterable<RecordBatch> > implements AsyncIterable<RecordBatch>
{ {
private inner: NativeQueryType; protected constructor(protected inner: NativeQueryType) {
private options?: QueryExecutionOptions;
constructor(inner: NativeQueryType, options?: QueryExecutionOptions) {
this.inner = inner;
this.options = options;
}
// biome-ignore lint/suspicious/noExplicitAny: skip
[Symbol.asyncIterator](): AsyncIterator<RecordBatch<any>, any, undefined> {
return new RecordBatchIterator(
this.inner.execute(this.options?.maxBatchLength),
);
}
}
/**
* Options that control the behavior of a particular query execution
*/
export interface QueryExecutionOptions {
/**
* The maximum number of rows to return in a single batch
*
* Batches may have fewer rows if the underlying data is stored
* in smaller chunks.
*/
maxBatchLength?: number;
}
/** Common methods supported by all query types */
export class QueryBase<NativeQueryType extends NativeQuery | NativeVectorQuery>
implements AsyncIterable<RecordBatch>
{
protected constructor(
protected inner: NativeQueryType | Promise<NativeQueryType>,
) {
// intentionally empty // intentionally empty
} }
// call a function on the inner (either a promise or the actual object)
protected doCall(fn: (inner: NativeQueryType) => void) {
if (this.inner instanceof Promise) {
this.inner = this.inner.then((inner) => {
fn(inner);
return inner;
});
} else {
fn(this.inner);
}
}
/** /**
* A filter statement to be applied to this query. * A filter statement to be applied to this query.
* *
@@ -121,17 +72,9 @@ export class QueryBase<NativeQueryType extends NativeQuery | NativeVectorQuery>
* Filtering performance can often be improved by creating a scalar index * Filtering performance can often be improved by creating a scalar index
* on the filter column(s). * on the filter column(s).
*/ */
where(predicate: string): this { where(predicate: string): QueryType {
this.doCall((inner: NativeQueryType) => inner.onlyIf(predicate)); this.inner.onlyIf(predicate);
return this; return this as unknown as QueryType;
}
/**
* A filter statement to be applied to this query.
* @alias where
* @deprecated Use `where` instead
*/
filter(predicate: string): this {
return this.where(predicate);
} }
/** /**
@@ -165,12 +108,9 @@ export class QueryBase<NativeQueryType extends NativeQuery | NativeVectorQuery>
* object insertion order is easy to get wrong and `Map` is more foolproof. * object insertion order is easy to get wrong and `Map` is more foolproof.
*/ */
select( select(
columns: string[] | Map<string, string> | Record<string, string> | string, columns: string[] | Map<string, string> | Record<string, string>,
): this { ): QueryType {
let columnTuples: [string, string][]; let columnTuples: [string, string][];
if (typeof columns === "string") {
columns = [columns];
}
if (Array.isArray(columns)) { if (Array.isArray(columns)) {
columnTuples = columns.map((c) => [c, c]); columnTuples = columns.map((c) => [c, c]);
} else if (columns instanceof Map) { } else if (columns instanceof Map) {
@@ -178,10 +118,8 @@ export class QueryBase<NativeQueryType extends NativeQuery | NativeVectorQuery>
} else { } else {
columnTuples = Object.entries(columns); columnTuples = Object.entries(columns);
} }
this.doCall((inner: NativeQueryType) => { this.inner.select(columnTuples);
inner.select(columnTuples); return this as unknown as QueryType;
});
return this;
} }
/** /**
@@ -190,19 +128,13 @@ export class QueryBase<NativeQueryType extends NativeQuery | NativeVectorQuery>
* By default, a plain search has no limit. If this method is not * By default, a plain search has no limit. If this method is not
* called then every valid row from the table will be returned. * called then every valid row from the table will be returned.
*/ */
limit(limit: number): this { limit(limit: number): QueryType {
this.doCall((inner: NativeQueryType) => inner.limit(limit)); this.inner.limit(limit);
return this; return this as unknown as QueryType;
} }
protected nativeExecute( protected nativeExecute(): Promise<NativeBatchIterator> {
options?: Partial<QueryExecutionOptions>, return this.inner.execute();
): Promise<NativeBatchIterator> {
if (this.inner instanceof Promise) {
return this.inner.then((inner) => inner.execute(options?.maxBatchLength));
} else {
return this.inner.execute(options?.maxBatchLength);
}
} }
/** /**
@@ -216,10 +148,8 @@ export class QueryBase<NativeQueryType extends NativeQuery | NativeVectorQuery>
* single query) * single query)
* *
*/ */
protected execute( protected execute(): RecordBatchIterator {
options?: Partial<QueryExecutionOptions>, return new RecordBatchIterator(this.nativeExecute());
): RecordBatchIterator {
return new RecordBatchIterator(this.nativeExecute(options));
} }
// biome-ignore lint/suspicious/noExplicitAny: skip // biome-ignore lint/suspicious/noExplicitAny: skip
@@ -229,44 +159,20 @@ export class QueryBase<NativeQueryType extends NativeQuery | NativeVectorQuery>
} }
/** Collect the results as an Arrow @see {@link ArrowTable}. */ /** Collect the results as an Arrow @see {@link ArrowTable}. */
async toArrow(options?: Partial<QueryExecutionOptions>): Promise<ArrowTable> { async toArrow(): Promise<ArrowTable> {
const batches = []; const batches = [];
let inner; for await (const batch of this) {
if (this.inner instanceof Promise) {
inner = await this.inner;
} else {
inner = this.inner;
}
for await (const batch of new RecordBatchIterable(inner, options)) {
batches.push(batch); batches.push(batch);
} }
return new ArrowTable(batches); return new ArrowTable(batches);
} }
/** Collect the results as an array of objects. */ /** Collect the results as an array of objects. */
// biome-ignore lint/suspicious/noExplicitAny: arrow.toArrow() returns any[] async toArray(): Promise<unknown[]> {
async toArray(options?: Partial<QueryExecutionOptions>): Promise<any[]> { const tbl = await this.toArrow();
const tbl = await this.toArrow(options); // eslint-disable-next-line @typescript-eslint/no-unsafe-return
return tbl.toArray(); return tbl.toArray();
} }
/**
* Generates an explanation of the query execution plan.
*
* @example
* import * as lancedb from "@lancedb/lancedb"
* const db = await lancedb.connect("./.lancedb");
* const table = await db.createTable("my_table", [
* { vector: [1.1, 0.9], id: "1" },
* ]);
* const plan = await table.query().nearestTo([0.5, 0.2]).explainPlan();
*
* @param verbose - If true, provides a more detailed explanation. Defaults to false.
* @returns A Promise that resolves to a string containing the query execution plan explanation.
*/
async explainPlan(verbose = false): Promise<string> {
return await this.inner.explainPlan(verbose);
}
} }
/** /**
@@ -281,8 +187,8 @@ export interface ExecutableQuery {}
* *
* This builder can be reused to execute the query many times. * This builder can be reused to execute the query many times.
*/ */
export class VectorQuery extends QueryBase<NativeVectorQuery> { export class VectorQuery extends QueryBase<NativeVectorQuery, VectorQuery> {
constructor(inner: NativeVectorQuery | Promise<NativeVectorQuery>) { constructor(inner: NativeVectorQuery) {
super(inner); super(inner);
} }
@@ -309,8 +215,7 @@ export class VectorQuery extends QueryBase<NativeVectorQuery> {
* you the desired recall. * you the desired recall.
*/ */
nprobes(nprobes: number): VectorQuery { nprobes(nprobes: number): VectorQuery {
super.doCall((inner) => inner.nprobes(nprobes)); this.inner.nprobes(nprobes);
return this; return this;
} }
@@ -324,7 +229,7 @@ export class VectorQuery extends QueryBase<NativeVectorQuery> {
* whose data type is a fixed-size-list of floats. * whose data type is a fixed-size-list of floats.
*/ */
column(column: string): VectorQuery { column(column: string): VectorQuery {
super.doCall((inner) => inner.column(column)); this.inner.column(column);
return this; return this;
} }
@@ -342,10 +247,8 @@ export class VectorQuery extends QueryBase<NativeVectorQuery> {
* *
* By default "l2" is used. * By default "l2" is used.
*/ */
distanceType( distanceType(distanceType: string): VectorQuery {
distanceType: Required<IvfPqOptions>["distanceType"], this.inner.distanceType(distanceType);
): VectorQuery {
super.doCall((inner) => inner.distanceType(distanceType));
return this; return this;
} }
@@ -379,7 +282,7 @@ export class VectorQuery extends QueryBase<NativeVectorQuery> {
* distance between the query vector and the actual uncompressed vector. * distance between the query vector and the actual uncompressed vector.
*/ */
refineFactor(refineFactor: number): VectorQuery { refineFactor(refineFactor: number): VectorQuery {
super.doCall((inner) => inner.refineFactor(refineFactor)); this.inner.refineFactor(refineFactor);
return this; return this;
} }
@@ -404,7 +307,7 @@ export class VectorQuery extends QueryBase<NativeVectorQuery> {
* factor can often help restore some of the results lost by post filtering. * factor can often help restore some of the results lost by post filtering.
*/ */
postfilter(): VectorQuery { postfilter(): VectorQuery {
super.doCall((inner) => inner.postfilter()); this.inner.postfilter();
return this; return this;
} }
@@ -418,13 +321,13 @@ export class VectorQuery extends QueryBase<NativeVectorQuery> {
* calculate your recall to select an appropriate value for nprobes. * calculate your recall to select an appropriate value for nprobes.
*/ */
bypassVectorIndex(): VectorQuery { bypassVectorIndex(): VectorQuery {
super.doCall((inner) => inner.bypassVectorIndex()); this.inner.bypassVectorIndex();
return this; return this;
} }
} }
/** A builder for LanceDB queries. */ /** A builder for LanceDB queries. */
export class Query extends QueryBase<NativeQuery> { export class Query extends QueryBase<NativeQuery, Query> {
constructor(tbl: NativeTable) { constructor(tbl: NativeTable) {
super(tbl.query()); super(tbl.query());
} }
@@ -466,38 +369,9 @@ export class Query extends QueryBase<NativeQuery> {
* Vector searches always have a `limit`. If `limit` has not been called then * Vector searches always have a `limit`. If `limit` has not been called then
* a default `limit` of 10 will be used. @see {@link Query#limit} * a default `limit` of 10 will be used. @see {@link Query#limit}
*/ */
nearestTo(vector: IntoVector): VectorQuery { nearestTo(vector: unknown): VectorQuery {
if (this.inner instanceof Promise) { // biome-ignore lint/suspicious/noExplicitAny: skip
const nativeQuery = this.inner.then(async (inner) => { const vectorQuery = this.inner.nearestTo(Float32Array.from(vector as any));
if (vector instanceof Promise) { return new VectorQuery(vectorQuery);
const arr = await vector.then((v) => Float32Array.from(v));
return inner.nearestTo(arr);
} else {
return inner.nearestTo(Float32Array.from(vector));
}
});
return new VectorQuery(nativeQuery);
}
if (vector instanceof Promise) {
const res = (async () => {
try {
const v = await vector;
const arr = Float32Array.from(v);
//
// biome-ignore lint/suspicious/noExplicitAny: we need to get the `inner`, but js has no package scoping
const value: any = this.nearestTo(arr);
const inner = value.inner as
| NativeVectorQuery
| Promise<NativeVectorQuery>;
return inner;
} catch (e) {
return Promise.reject(e);
}
})();
return new VectorQuery(res);
} else {
const vectorQuery = this.inner.nearestTo(Float32Array.from(vector));
return new VectorQuery(vectorQuery);
}
} }
} }

View File

@@ -1,221 +0,0 @@
// Copyright 2023 LanceDB Developers.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import axios, {
AxiosError,
type AxiosResponse,
type ResponseType,
} from "axios";
import { Table as ArrowTable } from "../arrow";
import { tableFromIPC } from "../arrow";
import { VectorQuery } from "../query";
export class RestfulLanceDBClient {
#dbName: string;
#region: string;
#apiKey: string;
#hostOverride?: string;
#closed: boolean = false;
#connectionTimeout: number = 12 * 1000; // 12 seconds;
#readTimeout: number = 30 * 1000; // 30 seconds;
#session?: import("axios").AxiosInstance;
constructor(
dbName: string,
apiKey: string,
region: string,
hostOverride?: string,
connectionTimeout?: number,
readTimeout?: number,
) {
this.#dbName = dbName;
this.#apiKey = apiKey;
this.#region = region;
this.#hostOverride = hostOverride ?? this.#hostOverride;
this.#connectionTimeout = connectionTimeout ?? this.#connectionTimeout;
this.#readTimeout = readTimeout ?? this.#readTimeout;
}
// todo: cache the session.
get session(): import("axios").AxiosInstance {
if (this.#session !== undefined) {
return this.#session;
} else {
return axios.create({
baseURL: this.url,
headers: {
// biome-ignore lint: external API
Authorization: `Bearer ${this.#apiKey}`,
},
transformResponse: decodeErrorData,
timeout: this.#connectionTimeout,
});
}
}
get url(): string {
return (
this.#hostOverride ??
`https://${this.#dbName}.${this.#region}.api.lancedb.com`
);
}
get headers(): { [key: string]: string } {
const headers: { [key: string]: string } = {
"x-api-key": this.#apiKey,
"x-request-id": "na",
};
if (this.#region == "local") {
headers["Host"] = `${this.#dbName}.${this.#region}.api.lancedb.com`;
}
if (this.#hostOverride) {
headers["x-lancedb-database"] = this.#dbName;
}
return headers;
}
isOpen(): boolean {
return !this.#closed;
}
private checkNotClosed(): void {
if (this.#closed) {
throw new Error("Connection is closed");
}
}
close(): void {
this.#session = undefined;
this.#closed = true;
}
// biome-ignore lint/suspicious/noExplicitAny: <explanation>
async get(uri: string, params?: Record<string, any>): Promise<any> {
this.checkNotClosed();
uri = new URL(uri, this.url).toString();
let response;
try {
response = await this.session.get(uri, {
headers: this.headers,
params,
});
} catch (e) {
if (e instanceof AxiosError) {
response = e.response;
} else {
throw e;
}
}
RestfulLanceDBClient.checkStatus(response!);
return response!.data;
}
// biome-ignore lint/suspicious/noExplicitAny: api response
async post(uri: string, body?: any): Promise<any>;
async post(
uri: string,
// biome-ignore lint/suspicious/noExplicitAny: api request
body: any,
additional: {
config?: { responseType: "arraybuffer" };
headers?: Record<string, string>;
params?: Record<string, string>;
},
): Promise<Buffer>;
async post(
uri: string,
// biome-ignore lint/suspicious/noExplicitAny: api request
body?: any,
additional?: {
config?: { responseType: ResponseType };
headers?: Record<string, string>;
params?: Record<string, string>;
},
// biome-ignore lint/suspicious/noExplicitAny: api response
): Promise<any> {
this.checkNotClosed();
uri = new URL(uri, this.url).toString();
additional = Object.assign(
{ config: { responseType: "json" } },
additional,
);
const headers = { ...this.headers, ...additional.headers };
if (!headers["Content-Type"]) {
headers["Content-Type"] = "application/json";
}
let response;
try {
response = await this.session.post(uri, body, {
headers,
responseType: additional!.config!.responseType,
params: new Map(Object.entries(additional.params ?? {})),
});
} catch (e) {
if (e instanceof AxiosError) {
response = e.response;
} else {
throw e;
}
}
RestfulLanceDBClient.checkStatus(response!);
if (additional!.config!.responseType === "arraybuffer") {
return response!.data;
} else {
return JSON.parse(response!.data);
}
}
async listTables(limit = 10, pageToken = ""): Promise<string[]> {
const json = await this.get("/v1/table", { limit, pageToken });
return json.tables;
}
async query(tableName: string, query: VectorQuery): Promise<ArrowTable> {
const tbl = await this.post(`/v1/table/${tableName}/query`, query, {
config: {
responseType: "arraybuffer",
},
});
return tableFromIPC(tbl);
}
static checkStatus(response: AxiosResponse): void {
if (response.status === 404) {
throw new Error(`Not found: ${response.data}`);
} else if (response.status >= 400 && response.status < 500) {
throw new Error(
`Bad Request: ${response.status}, error: ${response.data}`,
);
} else if (response.status >= 500 && response.status < 600) {
throw new Error(
`Internal Server Error: ${response.status}, error: ${response.data}`,
);
} else if (response.status !== 200) {
throw new Error(
`Unknown Error: ${response.status}, error: ${response.data}`,
);
}
}
}
function decodeErrorData(data: unknown) {
if (Buffer.isBuffer(data)) {
const decoded = data.toString("utf-8");
return decoded;
}
return data;
}

Some files were not shown because too many files have changed in this diff Show More