Compare commits

..

1 Commits

Author SHA1 Message Date
Andrew Yao
ea1f96dab0 build(python): Add project.dynamic = ["version"] to pyproject.toml 2024-12-24 22:27:54 -08:00
365 changed files with 5968 additions and 25507 deletions

View File

@@ -1,5 +1,5 @@
[tool.bumpversion] [tool.bumpversion]
current_version = "0.18.0" current_version = "0.14.1"
parse = """(?x) parse = """(?x)
(?P<major>0|[1-9]\\d*)\\. (?P<major>0|[1-9]\\d*)\\.
(?P<minor>0|[1-9]\\d*)\\. (?P<minor>0|[1-9]\\d*)\\.

View File

@@ -52,7 +52,12 @@ runs:
args: ${{ inputs.args }} args: ${{ inputs.args }}
before-script-linux: | before-script-linux: |
set -e set -e
yum install -y openssl-devel clang \ apt install -y unzip
&& curl -L https://github.com/protocolbuffers/protobuf/releases/download/v24.4/protoc-24.4-linux-aarch_64.zip > /tmp/protoc.zip \ if [ $(uname -m) = "x86_64" ]; then
PROTOC_ARCH="x86_64"
else
PROTOC_ARCH="aarch_64"
fi
curl -L https://github.com/protocolbuffers/protobuf/releases/download/v24.4/protoc-24.4-linux-$PROTOC_ARCH.zip > /tmp/protoc.zip \
&& unzip /tmp/protoc.zip -d /usr/local \ && unzip /tmp/protoc.zip -d /usr/local \
&& rm /tmp/protoc.zip && rm /tmp/protoc.zip

View File

@@ -20,7 +20,7 @@ runs:
uses: PyO3/maturin-action@v1 uses: PyO3/maturin-action@v1
with: with:
command: build command: build
# TODO: pass through interpreter
args: ${{ inputs.args }} args: ${{ inputs.args }}
docker-options: "-e PIP_EXTRA_INDEX_URL=https://pypi.fury.io/lancedb/" docker-options: "-e PIP_EXTRA_INDEX_URL=https://pypi.fury.io/lancedb/"
working-directory: python working-directory: python
interpreter: 3.${{ inputs.python-minor-version }}

View File

@@ -28,7 +28,7 @@ runs:
args: ${{ inputs.args }} args: ${{ inputs.args }}
docker-options: "-e PIP_EXTRA_INDEX_URL=https://pypi.fury.io/lancedb/" docker-options: "-e PIP_EXTRA_INDEX_URL=https://pypi.fury.io/lancedb/"
working-directory: python working-directory: python
- uses: actions/upload-artifact@v4 - uses: actions/upload-artifact@v3
with: with:
name: windows-wheels name: windows-wheels
path: python\target\wheels path: python\target\wheels

View File

@@ -1,31 +0,0 @@
name: Check license headers
on:
push:
branches:
- main
pull_request:
paths:
- rust/**
- python/**
- nodejs/**
- java/**
- .github/workflows/license-header-check.yml
jobs:
check-licenses:
runs-on: ubuntu-latest
steps:
- name: Check out code
uses: actions/checkout@v4
- name: Install license-header-checker
working-directory: /tmp
run: |
curl -s https://raw.githubusercontent.com/lluissm/license-header-checker/master/install.sh | bash
mv /tmp/bin/license-header-checker /usr/local/bin/
- name: Check license headers (rust)
run: license-header-checker -a -v ./rust/license_header.txt ./ rs && [[ -z `git status -s` ]]
- name: Check license headers (python)
run: license-header-checker -a -v ./python/license_header.txt python py && [[ -z `git status -s` ]]
- name: Check license headers (typescript)
run: license-header-checker -a -v ./nodejs/license_header.txt nodejs ts && [[ -z `git status -s` ]]
- name: Check license headers (java)
run: license-header-checker -a -v ./nodejs/license_header.txt java java && [[ -z `git status -s` ]]

View File

@@ -43,7 +43,7 @@ on:
jobs: jobs:
make-release: make-release:
# Creates tag and GH release. The GH release will trigger the build and release jobs. # Creates tag and GH release. The GH release will trigger the build and release jobs.
runs-on: ubuntu-24.04 runs-on: ubuntu-latest
permissions: permissions:
contents: write contents: write
steps: steps:
@@ -57,14 +57,15 @@ jobs:
# trigger any workflows watching for new tags. See: # trigger any workflows watching for new tags. See:
# https://docs.github.com/en/actions/using-workflows/triggering-a-workflow#triggering-a-workflow-from-a-workflow # https://docs.github.com/en/actions/using-workflows/triggering-a-workflow#triggering-a-workflow-from-a-workflow
token: ${{ secrets.LANCEDB_RELEASE_TOKEN }} token: ${{ secrets.LANCEDB_RELEASE_TOKEN }}
- name: Validate Lance dependency is at stable version
if: ${{ inputs.type == 'stable' }}
run: python ci/validate_stable_lance.py
- name: Set git configs for bumpversion - name: Set git configs for bumpversion
shell: bash shell: bash
run: | run: |
git config user.name 'Lance Release' git config user.name 'Lance Release'
git config user.email 'lance-dev@lancedb.com' git config user.email 'lance-dev@lancedb.com'
- name: Set up Python 3.11
uses: actions/setup-python@v5
with:
python-version: "3.11"
- name: Bump Python version - name: Bump Python version
if: ${{ inputs.python }} if: ${{ inputs.python }}
working-directory: python working-directory: python

View File

@@ -106,18 +106,6 @@ jobs:
python ci/mock_openai.py & python ci/mock_openai.py &
cd nodejs/examples cd nodejs/examples
npm test npm test
- name: Check docs
run: |
# We run this as part of the job because the binary needs to be built
# first to export the types of the native code.
set -e
npm ci
npm run docs
if ! git diff --exit-code; then
echo "Docs need to be updated"
echo "Run 'npm run docs', fix any warnings, and commit the changes."
exit 1
fi
macos: macos:
timeout-minutes: 30 timeout-minutes: 30
runs-on: "macos-14" runs-on: "macos-14"

View File

@@ -334,50 +334,51 @@ jobs:
path: | path: |
node/dist/lancedb-vectordb-win32*.tgz node/dist/lancedb-vectordb-win32*.tgz
node-windows-arm64: # TODO: https://github.com/lancedb/lancedb/issues/1975
name: vectordb ${{ matrix.config.arch }}-pc-windows-msvc # node-windows-arm64:
# if: startsWith(github.ref, 'refs/tags/v') # name: vectordb ${{ matrix.config.arch }}-pc-windows-msvc
runs-on: ubuntu-latest # # if: startsWith(github.ref, 'refs/tags/v')
container: alpine:edge # runs-on: ubuntu-latest
strategy: # container: alpine:edge
fail-fast: false # strategy:
matrix: # fail-fast: false
config: # matrix:
# - arch: x86_64 # config:
- arch: aarch64 # # - arch: x86_64
steps: # - arch: aarch64
- name: Checkout # steps:
uses: actions/checkout@v4 # - name: Checkout
- name: Install dependencies # uses: actions/checkout@v4
run: | # - name: Install dependencies
apk add protobuf-dev curl clang lld llvm19 grep npm bash msitools sed # run: |
curl --proto '=https' --tlsv1.3 -sSf https://raw.githubusercontent.com/rust-lang/rustup/refs/heads/master/rustup-init.sh | sh -s -- -y # apk add protobuf-dev curl clang lld llvm19 grep npm bash msitools sed
echo "source $HOME/.cargo/env" >> saved_env # curl --proto '=https' --tlsv1.3 -sSf https://raw.githubusercontent.com/rust-lang/rustup/refs/heads/master/rustup-init.sh | sh -s -- -y
echo "export CC=clang" >> saved_env # echo "source $HOME/.cargo/env" >> saved_env
echo "export AR=llvm-ar" >> saved_env # echo "export CC=clang" >> saved_env
source "$HOME/.cargo/env" # echo "export AR=llvm-ar" >> saved_env
rustup target add ${{ matrix.config.arch }}-pc-windows-msvc # source "$HOME/.cargo/env"
(mkdir -p sysroot && cd sysroot && sh ../ci/sysroot-${{ matrix.config.arch }}-pc-windows-msvc.sh) # rustup target add ${{ matrix.config.arch }}-pc-windows-msvc
echo "export C_INCLUDE_PATH=/usr/${{ matrix.config.arch }}-pc-windows-msvc/usr/include" >> saved_env # (mkdir -p sysroot && cd sysroot && sh ../ci/sysroot-${{ matrix.config.arch }}-pc-windows-msvc.sh)
echo "export CARGO_BUILD_TARGET=${{ matrix.config.arch }}-pc-windows-msvc" >> saved_env # echo "export C_INCLUDE_PATH=/usr/${{ matrix.config.arch }}-pc-windows-msvc/usr/include" >> saved_env
- name: Configure x86_64 build # echo "export CARGO_BUILD_TARGET=${{ matrix.config.arch }}-pc-windows-msvc" >> saved_env
if: ${{ matrix.config.arch == 'x86_64' }} # - name: Configure x86_64 build
run: | # if: ${{ matrix.config.arch == 'x86_64' }}
echo "export RUSTFLAGS='-Ctarget-cpu=haswell -Ctarget-feature=+crt-static,+avx2,+fma,+f16c -Clinker=lld -Clink-arg=/LIBPATH:/usr/x86_64-pc-windows-msvc/usr/lib'" >> saved_env # run: |
- name: Configure aarch64 build # echo "export RUSTFLAGS='-Ctarget-cpu=haswell -Ctarget-feature=+crt-static,+avx2,+fma,+f16c -Clinker=lld -Clink-arg=/LIBPATH:/usr/x86_64-pc-windows-msvc/usr/lib'" >> saved_env
if: ${{ matrix.config.arch == 'aarch64' }} # - name: Configure aarch64 build
run: | # if: ${{ matrix.config.arch == 'aarch64' }}
echo "export RUSTFLAGS='-Ctarget-feature=+crt-static,+neon,+fp16,+fhm,+dotprod -Clinker=lld -Clink-arg=/LIBPATH:/usr/aarch64-pc-windows-msvc/usr/lib -Clink-arg=arm64rt.lib'" >> saved_env # run: |
- name: Build Windows Artifacts # echo "export RUSTFLAGS='-Ctarget-feature=+crt-static,+neon,+fp16,+fhm,+dotprod -Clinker=lld -Clink-arg=/LIBPATH:/usr/aarch64-pc-windows-msvc/usr/lib -Clink-arg=arm64rt.lib'" >> saved_env
run: | # - name: Build Windows Artifacts
source ./saved_env # run: |
bash ci/manylinux_node/build_vectordb.sh ${{ matrix.config.arch }} ${{ matrix.config.arch }}-pc-windows-msvc # source ./saved_env
- name: Upload Windows Artifacts # bash ci/manylinux_node/build_vectordb.sh ${{ matrix.config.arch }} ${{ matrix.config.arch }}-pc-windows-msvc
uses: actions/upload-artifact@v4 # - name: Upload Windows Artifacts
with: # uses: actions/upload-artifact@v4
name: node-native-windows-${{ matrix.config.arch }} # with:
path: | # name: node-native-windows-${{ matrix.config.arch }}
node/dist/lancedb-vectordb-win32*.tgz # path: |
# node/dist/lancedb-vectordb-win32*.tgz
nodejs-windows: nodejs-windows:
name: lancedb ${{ matrix.target }} name: lancedb ${{ matrix.target }}
@@ -413,57 +414,58 @@ jobs:
path: | path: |
nodejs/dist/*.node nodejs/dist/*.node
nodejs-windows-arm64: # TODO: https://github.com/lancedb/lancedb/issues/1975
name: lancedb ${{ matrix.config.arch }}-pc-windows-msvc # nodejs-windows-arm64:
# Only runs on tags that matches the make-release action # name: lancedb ${{ matrix.config.arch }}-pc-windows-msvc
# if: startsWith(github.ref, 'refs/tags/v') # # Only runs on tags that matches the make-release action
runs-on: ubuntu-latest # # if: startsWith(github.ref, 'refs/tags/v')
container: alpine:edge # runs-on: ubuntu-latest
strategy: # container: alpine:edge
fail-fast: false # strategy:
matrix: # fail-fast: false
config: # matrix:
# - arch: x86_64 # config:
- arch: aarch64 # # - arch: x86_64
steps: # - arch: aarch64
- name: Checkout # steps:
uses: actions/checkout@v4 # - name: Checkout
- name: Install dependencies # uses: actions/checkout@v4
run: | # - name: Install dependencies
apk add protobuf-dev curl clang lld llvm19 grep npm bash msitools sed # run: |
curl --proto '=https' --tlsv1.3 -sSf https://raw.githubusercontent.com/rust-lang/rustup/refs/heads/master/rustup-init.sh | sh -s -- -y # apk add protobuf-dev curl clang lld llvm19 grep npm bash msitools sed
echo "source $HOME/.cargo/env" >> saved_env # curl --proto '=https' --tlsv1.3 -sSf https://raw.githubusercontent.com/rust-lang/rustup/refs/heads/master/rustup-init.sh | sh -s -- -y
echo "export CC=clang" >> saved_env # echo "source $HOME/.cargo/env" >> saved_env
echo "export AR=llvm-ar" >> saved_env # echo "export CC=clang" >> saved_env
source "$HOME/.cargo/env" # echo "export AR=llvm-ar" >> saved_env
rustup target add ${{ matrix.config.arch }}-pc-windows-msvc # source "$HOME/.cargo/env"
(mkdir -p sysroot && cd sysroot && sh ../ci/sysroot-${{ matrix.config.arch }}-pc-windows-msvc.sh) # rustup target add ${{ matrix.config.arch }}-pc-windows-msvc
echo "export C_INCLUDE_PATH=/usr/${{ matrix.config.arch }}-pc-windows-msvc/usr/include" >> saved_env # (mkdir -p sysroot && cd sysroot && sh ../ci/sysroot-${{ matrix.config.arch }}-pc-windows-msvc.sh)
echo "export CARGO_BUILD_TARGET=${{ matrix.config.arch }}-pc-windows-msvc" >> saved_env # echo "export C_INCLUDE_PATH=/usr/${{ matrix.config.arch }}-pc-windows-msvc/usr/include" >> saved_env
printf '#!/bin/sh\ncargo "$@"' > $HOME/.cargo/bin/cargo-xwin # echo "export CARGO_BUILD_TARGET=${{ matrix.config.arch }}-pc-windows-msvc" >> saved_env
chmod u+x $HOME/.cargo/bin/cargo-xwin # printf '#!/bin/sh\ncargo "$@"' > $HOME/.cargo/bin/cargo-xwin
- name: Configure x86_64 build # chmod u+x $HOME/.cargo/bin/cargo-xwin
if: ${{ matrix.config.arch == 'x86_64' }} # - name: Configure x86_64 build
run: | # if: ${{ matrix.config.arch == 'x86_64' }}
echo "export RUSTFLAGS='-Ctarget-cpu=haswell -Ctarget-feature=+crt-static,+avx2,+fma,+f16c -Clinker=lld -Clink-arg=/LIBPATH:/usr/x86_64-pc-windows-msvc/usr/lib'" >> saved_env # run: |
- name: Configure aarch64 build # echo "export RUSTFLAGS='-Ctarget-cpu=haswell -Ctarget-feature=+crt-static,+avx2,+fma,+f16c -Clinker=lld -Clink-arg=/LIBPATH:/usr/x86_64-pc-windows-msvc/usr/lib'" >> saved_env
if: ${{ matrix.config.arch == 'aarch64' }} # - name: Configure aarch64 build
run: | # if: ${{ matrix.config.arch == 'aarch64' }}
echo "export RUSTFLAGS='-Ctarget-feature=+crt-static,+neon,+fp16,+fhm,+dotprod -Clinker=lld -Clink-arg=/LIBPATH:/usr/aarch64-pc-windows-msvc/usr/lib -Clink-arg=arm64rt.lib'" >> saved_env # run: |
- name: Build Windows Artifacts # echo "export RUSTFLAGS='-Ctarget-feature=+crt-static,+neon,+fp16,+fhm,+dotprod -Clinker=lld -Clink-arg=/LIBPATH:/usr/aarch64-pc-windows-msvc/usr/lib -Clink-arg=arm64rt.lib'" >> saved_env
run: | # - name: Build Windows Artifacts
source ./saved_env # run: |
bash ci/manylinux_node/build_lancedb.sh ${{ matrix.config.arch }} # source ./saved_env
- name: Upload Windows Artifacts # bash ci/manylinux_node/build_lancedb.sh ${{ matrix.config.arch }}
uses: actions/upload-artifact@v4 # - name: Upload Windows Artifacts
with: # uses: actions/upload-artifact@v4
name: nodejs-native-windows-${{ matrix.config.arch }} # with:
path: | # name: nodejs-native-windows-${{ matrix.config.arch }}
nodejs/dist/*.node # path: |
# nodejs/dist/*.node
release: release:
name: vectordb NPM Publish name: vectordb NPM Publish
needs: [node, node-macos, node-linux-gnu, node-linux-musl, node-windows, node-windows-arm64] needs: [node, node-macos, node-linux-gnu, node-linux-musl, node-windows]
runs-on: ubuntu-latest runs-on: ubuntu-latest
# Only runs on tags that matches the make-release action # Only runs on tags that matches the make-release action
if: startsWith(github.ref, 'refs/tags/v') if: startsWith(github.ref, 'refs/tags/v')
@@ -503,7 +505,7 @@ jobs:
release-nodejs: release-nodejs:
name: lancedb NPM Publish name: lancedb NPM Publish
needs: [nodejs-macos, nodejs-linux-gnu, nodejs-linux-musl, nodejs-windows, nodejs-windows-arm64] needs: [nodejs-macos, nodejs-linux-gnu, nodejs-linux-musl, nodejs-windows]
runs-on: ubuntu-latest runs-on: ubuntu-latest
# Only runs on tags that matches the make-release action # Only runs on tags that matches the make-release action
if: startsWith(github.ref, 'refs/tags/v') if: startsWith(github.ref, 'refs/tags/v')

View File

@@ -15,21 +15,15 @@ jobs:
- platform: x86_64 - platform: x86_64
manylinux: "2_17" manylinux: "2_17"
extra_args: "" extra_args: ""
runner: ubuntu-22.04
- platform: x86_64 - platform: x86_64
manylinux: "2_28" manylinux: "2_28"
extra_args: "--features fp16kernels" extra_args: "--features fp16kernels"
runner: ubuntu-22.04
- platform: aarch64 - platform: aarch64
manylinux: "2_17" manylinux: "2_24"
extra_args: "" extra_args: ""
# For successful fat LTO builds, we need a large runner to avoid OOM errors. # We don't build fp16 kernels for aarch64, because it uses
runner: ubuntu-2404-8x-arm64 # cross compilation image, which doesn't have a new enough compiler.
- platform: aarch64 runs-on: "ubuntu-22.04"
manylinux: "2_28"
extra_args: "--features fp16kernels"
runner: ubuntu-2404-8x-arm64
runs-on: ${{ matrix.config.runner }}
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with: with:

View File

@@ -30,17 +30,16 @@ jobs:
- name: Set up Python - name: Set up Python
uses: actions/setup-python@v5 uses: actions/setup-python@v5
with: with:
python-version: "3.12" python-version: "3.11"
- name: Install ruff - name: Install ruff
run: | run: |
pip install ruff==0.9.9 pip install ruff==0.5.4
- name: Format check - name: Format check
run: ruff format --check . run: ruff format --check .
- name: Lint - name: Lint
run: ruff check . run: ruff check .
doctest:
type-check: name: "Doctest"
name: "Type Check"
timeout-minutes: 30 timeout-minutes: 30
runs-on: "ubuntu-22.04" runs-on: "ubuntu-22.04"
defaults: defaults:
@@ -55,36 +54,7 @@ jobs:
- name: Set up Python - name: Set up Python
uses: actions/setup-python@v5 uses: actions/setup-python@v5
with: with:
python-version: "3.12" python-version: "3.11"
- name: Install protobuf compiler
run: |
sudo apt update
sudo apt install -y protobuf-compiler
pip install toml
- name: Install dependencies
run: |
python ../ci/parse_requirements.py pyproject.toml --extras dev,tests,embeddings > requirements.txt
pip install -r requirements.txt
- name: Run pyright
run: pyright
doctest:
name: "Doctest"
timeout-minutes: 30
runs-on: "ubuntu-24.04"
defaults:
run:
shell: bash
working-directory: python
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
lfs: true
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: "3.12"
cache: "pip" cache: "pip"
- name: Install protobuf - name: Install protobuf
run: | run: |
@@ -105,8 +75,8 @@ jobs:
timeout-minutes: 30 timeout-minutes: 30
strategy: strategy:
matrix: matrix:
python-minor-version: ["9", "12"] python-minor-version: ["9", "11"]
runs-on: "ubuntu-24.04" runs-on: "ubuntu-22.04"
defaults: defaults:
run: run:
shell: bash shell: bash
@@ -157,7 +127,7 @@ jobs:
- name: Set up Python - name: Set up Python
uses: actions/setup-python@v5 uses: actions/setup-python@v5
with: with:
python-version: "3.12" python-version: "3.11"
- uses: Swatinem/rust-cache@v2 - uses: Swatinem/rust-cache@v2
with: with:
workspaces: python workspaces: python
@@ -187,7 +157,7 @@ jobs:
- name: Set up Python - name: Set up Python
uses: actions/setup-python@v5 uses: actions/setup-python@v5
with: with:
python-version: "3.12" python-version: "3.11"
- uses: Swatinem/rust-cache@v2 - uses: Swatinem/rust-cache@v2
with: with:
workspaces: python workspaces: python
@@ -198,7 +168,7 @@ jobs:
run: rm -rf target/wheels run: rm -rf target/wheels
pydantic1x: pydantic1x:
timeout-minutes: 30 timeout-minutes: 30
runs-on: "ubuntu-24.04" runs-on: "ubuntu-22.04"
defaults: defaults:
run: run:
shell: bash shell: bash

View File

@@ -22,7 +22,6 @@ env:
# "1" means line tables only, which is useful for panic tracebacks. # "1" means line tables only, which is useful for panic tracebacks.
RUSTFLAGS: "-C debuginfo=1" RUSTFLAGS: "-C debuginfo=1"
RUST_BACKTRACE: "1" RUST_BACKTRACE: "1"
CARGO_INCREMENTAL: 0
jobs: jobs:
lint: lint:
@@ -52,33 +51,6 @@ jobs:
- name: Run clippy - name: Run clippy
run: cargo clippy --workspace --tests --all-features -- -D warnings run: cargo clippy --workspace --tests --all-features -- -D warnings
build-no-lock:
runs-on: ubuntu-24.04
timeout-minutes: 30
env:
# Need up-to-date compilers for kernels
CC: clang
CXX: clang++
steps:
- uses: actions/checkout@v4
# Building without a lock file often requires the latest Rust version since downstream
# dependencies may have updated their minimum Rust version.
- uses: actions-rust-lang/setup-rust-toolchain@v1
with:
toolchain: "stable"
# Remove cargo.lock to force a fresh build
- name: Remove Cargo.lock
run: rm -f Cargo.lock
- uses: rui314/setup-mold@v1
- uses: Swatinem/rust-cache@v2
- name: Install dependencies
run: |
sudo apt update
sudo apt install -y protobuf-compiler libssl-dev
- name: Build all
run: |
cargo build --benches --all-features --tests
linux: linux:
timeout-minutes: 30 timeout-minutes: 30
# To build all features, we need more disk space than is available # To build all features, we need more disk space than is available
@@ -103,11 +75,8 @@ jobs:
workspaces: rust workspaces: rust
- name: Install dependencies - name: Install dependencies
run: | run: |
# This shaves 2 minutes off this step in CI. This doesn't seem to be sudo apt update
# necessary in standard runners, but it is in the 4x runners.
sudo rm /var/lib/man-db/auto-update
sudo apt install -y protobuf-compiler libssl-dev sudo apt install -y protobuf-compiler libssl-dev
- uses: rui314/setup-mold@v1
- name: Make Swap - name: Make Swap
run: | run: |
sudo fallocate -l 16G /swapfile sudo fallocate -l 16G /swapfile
@@ -118,11 +87,11 @@ jobs:
working-directory: . working-directory: .
run: docker compose up --detach --wait run: docker compose up --detach --wait
- name: Build - name: Build
run: cargo build --all-features --tests --locked --examples run: cargo build --all-features
- name: Run tests - name: Run tests
run: cargo test --all-features --locked run: cargo test --all-features
- name: Run examples - name: Run examples
run: cargo run --example simple --locked run: cargo run --example simple
macos: macos:
timeout-minutes: 30 timeout-minutes: 30
@@ -146,14 +115,11 @@ jobs:
workspaces: rust workspaces: rust
- name: Install dependencies - name: Install dependencies
run: brew install protobuf run: brew install protobuf
- name: Build
run: cargo build --all-features
- name: Run tests - name: Run tests
run: | # Run with everything except the integration tests.
# Don't run the s3 integration tests since docker isn't available run: cargo test --features remote,fp16kernels
# on this image.
ALL_FEATURES=`cargo metadata --format-version=1 --no-deps \
| jq -r '.packages[] | .features | keys | .[]' \
| grep -v s3-test | sort | uniq | paste -s -d "," -`
cargo test --features $ALL_FEATURES --locked
windows: windows:
runs-on: windows-2022 runs-on: windows-2022
@@ -174,40 +140,8 @@ jobs:
- name: Run tests - name: Run tests
run: | run: |
$env:VCPKG_ROOT = $env:VCPKG_INSTALLATION_ROOT $env:VCPKG_ROOT = $env:VCPKG_INSTALLATION_ROOT
cargo test --features remote --locked cargo build
cargo test
windows-arm64-cross:
# We cross compile in Node releases, so we want to make sure
# this can run successfully.
runs-on: ubuntu-latest
container: alpine:edge
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Install dependencies (part 1)
run: |
set -e
apk add protobuf-dev curl clang lld llvm19 grep npm bash msitools sed
- name: Install rust
uses: actions-rust-lang/setup-rust-toolchain@v1
with:
target: aarch64-pc-windows-msvc
- name: Install dependencies (part 2)
run: |
set -e
mkdir -p sysroot
cd sysroot
sh ../ci/sysroot-aarch64-pc-windows-msvc.sh
- name: Check
env:
CC: clang
AR: llvm-ar
C_INCLUDE_PATH: /usr/aarch64-pc-windows-msvc/usr/include
CARGO_BUILD_TARGET: aarch64-pc-windows-msvc
RUSTFLAGS: -Ctarget-feature=+crt-static,+neon,+fp16,+fhm,+dotprod -Clinker=lld -Clink-arg=/LIBPATH:/usr/aarch64-pc-windows-msvc/usr/lib -Clink-arg=arm64rt.lib
run: |
source $HOME/.cargo/env
cargo check --features remote --locked
windows-arm64: windows-arm64:
runs-on: windows-4x-arm runs-on: windows-4x-arm
@@ -266,7 +200,7 @@ jobs:
- name: Install Rust - name: Install Rust
run: | run: |
Invoke-WebRequest https://win.rustup.rs/x86_64 -OutFile rustup-init.exe Invoke-WebRequest https://win.rustup.rs/x86_64 -OutFile rustup-init.exe
.\rustup-init.exe -y --default-host aarch64-pc-windows-msvc --default-toolchain 1.83.0 .\rustup-init.exe -y --default-host aarch64-pc-windows-msvc
shell: powershell shell: powershell
- name: Add Rust to PATH - name: Add Rust to PATH
run: | run: |
@@ -302,7 +236,8 @@ jobs:
- name: Run tests - name: Run tests
run: | run: |
$env:VCPKG_ROOT = $env:VCPKG_INSTALLATION_ROOT $env:VCPKG_ROOT = $env:VCPKG_INSTALLATION_ROOT
cargo test --target aarch64-pc-windows-msvc --features remote --locked cargo build --target aarch64-pc-windows-msvc
cargo test --target aarch64-pc-windows-msvc
msrv: msrv:
# Check the minimum supported Rust version # Check the minimum supported Rust version

3
.gitignore vendored
View File

@@ -9,6 +9,7 @@ venv
.vscode .vscode
.zed .zed
rust/target rust/target
rust/Cargo.lock
site site
@@ -41,3 +42,5 @@ dist
target target
**/sccache.log **/sccache.log
Cargo.lock

View File

@@ -1,27 +1,21 @@
repos: repos:
- repo: https://github.com/pre-commit/pre-commit-hooks - repo: https://github.com/pre-commit/pre-commit-hooks
rev: v3.2.0 rev: v3.2.0
hooks: hooks:
- id: check-yaml - id: check-yaml
- id: end-of-file-fixer - id: end-of-file-fixer
- id: trailing-whitespace - id: trailing-whitespace
- repo: https://github.com/astral-sh/ruff-pre-commit - repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version. # Ruff version.
rev: v0.9.9 rev: v0.2.2
hooks: hooks:
- id: ruff - id: ruff
# - repo: https://github.com/RobertCraigie/pyright-python - repo: local
# rev: v1.1.395 hooks:
# hooks: - id: local-biome-check
# - id: pyright name: biome check
# args: ["--project", "python"] entry: npx @biomejs/biome@1.8.3 check --config-path nodejs/biome.json nodejs/
# additional_dependencies: [pyarrow-stubs] language: system
- repo: local types: [text]
hooks: files: "nodejs/.*"
- id: local-biome-check exclude: nodejs/lancedb/native.d.ts|nodejs/dist/.*|nodejs/examples/.*
name: biome check
entry: npx @biomejs/biome@1.8.3 check --config-path nodejs/biome.json nodejs/
language: system
types: [text]
files: "nodejs/.*"
exclude: nodejs/lancedb/native.d.ts|nodejs/dist/.*|nodejs/examples/.*

View File

@@ -1,78 +0,0 @@
# Contributing to LanceDB
LanceDB is an open-source project and we welcome contributions from the community.
This document outlines the process for contributing to LanceDB.
## Reporting Issues
If you encounter a bug or have a feature request, please open an issue on the
[GitHub issue tracker](https://github.com/lancedb/lancedb).
## Picking an issue
We track issues on the GitHub issue tracker. If you are looking for something to
work on, check the [good first issue](https://github.com/lancedb/lancedb/contribute) label. These issues are typically the best described and have the smallest scope.
If there's an issue you are interested in working on, please leave a comment on the issue. This will help us avoid duplicate work. Additionally, if you have questions about the issue, please ask them in the issue comments. We are happy to provide guidance on how to approach the issue.
## Configuring Git
First, fork the repository on GitHub, then clone your fork:
```bash
git clone https://github.com/<username>/lancedb.git
cd lancedb
```
Then add the main repository as a remote:
```bash
git remote add upstream https://github.com/lancedb/lancedb.git
git fetch upstream
```
## Setting up your development environment
We have development environments for Python, Typescript, and Java. Each environment has its own setup instructions.
* [Python](python/CONTRIBUTING.md)
* [Typescript](nodejs/CONTRIBUTING.md)
<!-- TODO: add Java contributing guide -->
* [Documentation](docs/README.md)
## Best practices for pull requests
For the best chance of having your pull request accepted, please follow these guidelines:
1. Unit test all bug fixes and new features. Your code will not be merged if it
doesn't have tests.
1. If you change the public API, update the documentation in the `docs` directory.
1. Aim to minimize the number of changes in each pull request. Keep to solving
one problem at a time, when possible.
1. Before marking a pull request ready-for-review, do a self review of your code.
Is it clear why you are making the changes? Are the changes easy to understand?
1. Use [conventional commit messages](https://www.conventionalcommits.org/en/) as pull request titles. Examples:
* New feature: `feat: adding foo API`
* Bug fix: `fix: issue with foo API`
* Documentation change: `docs: adding foo API documentation`
1. If your pull request is a work in progress, leave the pull request as a draft.
We will assume the pull request is ready for review when it is opened.
1. When writing tests, test the error cases. Make sure they have understandable
error messages.
## Project structure
The core library is written in Rust. The Python, Typescript, and Java libraries
are wrappers around the Rust library.
* `src/lancedb`: Rust library source code
* `python`: Python package source code
* `nodejs`: Typescript package source code
* `node`: **Deprecated** Typescript package source code
* `java`: Java package source code
* `docs`: Documentation source code
## Release process
For information on the release process, see: [release_process.md](release_process.md)

8202
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -21,52 +21,41 @@ categories = ["database-implementations"]
rust-version = "1.78.0" rust-version = "1.78.0"
[workspace.dependencies] [workspace.dependencies]
lance = { "version" = "=0.24.1", "features" = ["dynamodb"] } lance = { "version" = "=0.21.0", "features" = [
lance-io = { version = "=0.24.1" } "dynamodb",
lance-index = { version = "=0.24.1" } ], git = "https://github.com/lancedb/lance.git", tag = "v0.21.0-beta.5" }
lance-linalg = { version = "=0.24.1" } lance-io = { version = "=0.21.0", git = "https://github.com/lancedb/lance.git", tag = "v0.21.0-beta.5" }
lance-table = { version = "=0.24.1" } lance-index = { version = "=0.21.0", git = "https://github.com/lancedb/lance.git", tag = "v0.21.0-beta.5" }
lance-testing = { version = "=0.24.1" } lance-linalg = { version = "=0.21.0", git = "https://github.com/lancedb/lance.git", tag = "v0.21.0-beta.5" }
lance-datafusion = { version = "=0.24.1" } lance-table = { version = "=0.21.0", git = "https://github.com/lancedb/lance.git", tag = "v0.21.0-beta.5" }
lance-encoding = { version = "=0.24.1" } lance-testing = { version = "=0.21.0", git = "https://github.com/lancedb/lance.git", tag = "v0.21.0-beta.5" }
lance-datafusion = { version = "=0.21.0", git = "https://github.com/lancedb/lance.git", tag = "v0.21.0-beta.5" }
lance-encoding = { version = "=0.21.0", git = "https://github.com/lancedb/lance.git", tag = "v0.21.0-beta.5" }
# Note that this one does not include pyarrow # Note that this one does not include pyarrow
arrow = { version = "54.1", optional = false } arrow = { version = "53.2", optional = false }
arrow-array = "54.1" arrow-array = "53.2"
arrow-data = "54.1" arrow-data = "53.2"
arrow-ipc = "54.1" arrow-ipc = "53.2"
arrow-ord = "54.1" arrow-ord = "53.2"
arrow-schema = "54.1" arrow-schema = "53.2"
arrow-arith = "54.1" arrow-arith = "53.2"
arrow-cast = "54.1" arrow-cast = "53.2"
async-trait = "0" async-trait = "0"
datafusion = { version = "45.0", default-features = false } chrono = "0.4.35"
datafusion-catalog = "45.0" datafusion-common = "42.0"
datafusion-common = { version = "45.0", default-features = false } datafusion-physical-plan = "42.0"
datafusion-execution = "45.0" env_logger = "0.10"
datafusion-expr = "45.0"
datafusion-physical-plan = "45.0"
env_logger = "0.11"
half = { "version" = "=2.4.1", default-features = false, features = [ half = { "version" = "=2.4.1", default-features = false, features = [
"num-traits", "num-traits",
] } ] }
futures = "0" futures = "0"
log = "0.4" log = "0.4"
moka = { version = "0.12", features = ["future"] } moka = { version = "0.11", features = ["future"] }
object_store = "0.11.0" object_store = "0.10.2"
pin-project = "1.0.7" pin-project = "1.0.7"
snafu = "0.8" snafu = "0.7.4"
url = "2" url = "2"
num-traits = "0.2" num-traits = "0.2"
rand = "0.8" rand = "0.8"
regex = "1.10" regex = "1.10"
lazy_static = "1" lazy_static = "1"
semver = "1.0.25"
# Temporary pins to work around downstream issues
# https://github.com/apache/arrow-rs/commit/2fddf85afcd20110ce783ed5b4cdeb82293da30b
chrono = "=0.4.39"
# https://github.com/RustCrypto/formats/issues/1684
base64ct = "=1.6.0"
# Workaround for: https://github.com/eira-fransham/crunchy/issues/13
crunchy = "=0.2.2"

View File

@@ -1,41 +0,0 @@
import argparse
import toml
def parse_dependencies(pyproject_path, extras=None):
with open(pyproject_path, "r") as file:
pyproject = toml.load(file)
dependencies = pyproject.get("project", {}).get("dependencies", [])
for dependency in dependencies:
print(dependency)
optional_dependencies = pyproject.get("project", {}).get(
"optional-dependencies", {}
)
if extras:
for extra in extras.split(","):
for dep in optional_dependencies.get(extra, []):
print(dep)
def main():
parser = argparse.ArgumentParser(
description="Generate requirements.txt from pyproject.toml"
)
parser.add_argument("path", type=str, help="Path to pyproject.toml")
parser.add_argument(
"--extras",
type=str,
help="Comma-separated list of extras to include",
default="",
)
args = parser.parse_args()
parse_dependencies(args.path, args.extras)
if __name__ == "__main__":
main()

View File

@@ -53,7 +53,7 @@ curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-42
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/149578fb3b621cdb61ee1813b9b3e791/463ad1b0783ebda908fd6c16a4abfe93.cab curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/149578fb3b621cdb61ee1813b9b3e791/463ad1b0783ebda908fd6c16a4abfe93.cab
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/5c986c4f393c6b09d5aec3b539e9fb4a/5a22e5cde814b041749fb271547f4dd5.cab curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/5c986c4f393c6b09d5aec3b539e9fb4a/5a22e5cde814b041749fb271547f4dd5.cab
# dbghelp.lib fwpuclnt.lib arm64rt.lib # fwpuclnt.lib arm64rt.lib
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/7a332420d812f7c1d41da865ae5a7c52/windows%20sdk%20desktop%20libs%20arm64-x86_en-us.msi curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/7a332420d812f7c1d41da865ae5a7c52/windows%20sdk%20desktop%20libs%20arm64-x86_en-us.msi
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/19de98ed4a79938d0045d19c047936b3/3e2f7be479e3679d700ce0782e4cc318.cab curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/19de98ed4a79938d0045d19c047936b3/3e2f7be479e3679d700ce0782e4cc318.cab
@@ -98,7 +98,7 @@ find /usr/aarch64-pc-windows-msvc/usr/include -type f -exec sed -i -E 's/(#inclu
# reason: https://developercommunity.visualstudio.com/t/libucrtlibstreamobj-error-lnk2001-unresolved-exter/1544787#T-ND1599818 # reason: https://developercommunity.visualstudio.com/t/libucrtlibstreamobj-error-lnk2001-unresolved-exter/1544787#T-ND1599818
# I don't understand the 'correct' fix for this, arm64rt.lib is supposed to be the workaround # I don't understand the 'correct' fix for this, arm64rt.lib is supposed to be the workaround
(cd 'program files/windows kits/10/lib/10.0.26100.0/um/arm64' && cp advapi32.lib bcrypt.lib kernel32.lib ntdll.lib user32.lib uuid.lib ws2_32.lib userenv.lib cfgmgr32.lib runtimeobject.lib dbghelp.lib fwpuclnt.lib arm64rt.lib -t /usr/aarch64-pc-windows-msvc/usr/lib) (cd 'program files/windows kits/10/lib/10.0.26100.0/um/arm64' && cp advapi32.lib bcrypt.lib kernel32.lib ntdll.lib user32.lib uuid.lib ws2_32.lib userenv.lib cfgmgr32.lib runtimeobject.lib fwpuclnt.lib arm64rt.lib -t /usr/aarch64-pc-windows-msvc/usr/lib)
(cd 'contents/vc/tools/msvc/14.16.27023/lib/arm64' && cp libcmt.lib libvcruntime.lib -t /usr/aarch64-pc-windows-msvc/usr/lib) (cd 'contents/vc/tools/msvc/14.16.27023/lib/arm64' && cp libcmt.lib libvcruntime.lib -t /usr/aarch64-pc-windows-msvc/usr/lib)

View File

@@ -1,34 +0,0 @@
import tomllib
found_preview_lance = False
with open("Cargo.toml", "rb") as f:
cargo_data = tomllib.load(f)
for name, dep in cargo_data["workspace"]["dependencies"].items():
if name == "lance" or name.startswith("lance-"):
if isinstance(dep, str):
version = dep
elif isinstance(dep, dict):
# Version doesn't have the beta tag in it, so we instead look
# at the git tag.
version = dep.get('tag', dep.get('version'))
else:
raise ValueError("Unexpected type for dependency: " + str(dep))
if "beta" in version:
found_preview_lance = True
print(f"Dependency '{name}' is a preview version: {version}")
with open("python/pyproject.toml", "rb") as f:
py_proj_data = tomllib.load(f)
for dep in py_proj_data["project"]["dependencies"]:
if dep.startswith("pylance"):
if "b" in dep:
found_preview_lance = True
print(f"Dependency '{dep}' is a preview version")
break # Only one pylance dependency
if found_preview_lance:
raise ValueError("Found preview version of Lance in dependencies")

View File

@@ -9,81 +9,36 @@ unreleased features.
## Building the docs ## Building the docs
### Setup ### Setup
1. Install LanceDB Python. See setup in [Python contributing guide](../python/CONTRIBUTING.md). 1. Install LanceDB. From LanceDB repo root: `pip install -e python`
Run `make develop` to install the Python package. 2. Install dependencies. From LanceDB repo root: `pip install -r docs/requirements.txt`
2. Install documentation dependencies. From LanceDB repo root: `pip install -r docs/requirements.txt` 3. Make sure you have node and npm setup
4. Make sure protobuf and libssl are installed
### Preview the docs ### Building node module and create markdown files
```shell See [Javascript docs README](./src/javascript/README.md)
### Build docs
From LanceDB repo root:
Run: `PYTHONPATH=. mkdocs build -f docs/mkdocs.yml`
If successful, you should see a `docs/site` directory that you can verify locally.
### Run local server
You can run a local server to test the docs prior to deployment by navigating to the `docs` directory and running the following command:
```bash
cd docs cd docs
mkdocs serve mkdocs serve
``` ```
If you want to just generate the HTML files: ### Run doctest for typescript example
```shell ```bash
PYTHONPATH=. mkdocs build -f docs/mkdocs.yml cd lancedb/docs
``` npm i
If successful, you should see a `docs/site` directory that you can verify locally.
## Adding examples
To make sure examples are correct, we put examples in test files so they can be
run as part of our test suites.
You can see the tests are at:
* Python: `python/python/tests/docs`
* Typescript: `nodejs/examples/`
### Checking python examples
```shell
cd python
pytest -vv python/tests/docs
```
### Checking typescript examples
The `@lancedb/lancedb` package must be built before running the tests:
```shell
pushd nodejs
npm ci
npm run build npm run build
popd npm run all
```
Then you can run the examples by going to the `nodejs/examples` directory and
running the tests like a normal npm package:
```shell
pushd nodejs/examples
npm ci
npm test
popd
```
## API documentation
### Python
The Python API documentation is organized based on the file `docs/src/python/python.md`.
We manually add entries there so we can control the organization of the reference page.
**However, this means any new types must be manually added to the file.** No additional
steps are needed to generate the API documentation.
### Typescript
The typescript API documentation is generated from the typescript source code using [typedoc](https://typedoc.org/).
When new APIs are added, you must manually re-run the typedoc command to update the API documentation.
The new files should be checked into the repository.
```shell
pushd nodejs
npm run docs
popd
``` ```

View File

@@ -4,9 +4,6 @@ repo_url: https://github.com/lancedb/lancedb
edit_uri: https://github.com/lancedb/lancedb/tree/main/docs/src edit_uri: https://github.com/lancedb/lancedb/tree/main/docs/src
repo_name: lancedb/lancedb repo_name: lancedb/lancedb
docs_dir: src docs_dir: src
watch:
- src
- ../python/python
theme: theme:
name: "material" name: "material"
@@ -66,7 +63,6 @@ plugins:
- https://arrow.apache.org/docs/objects.inv - https://arrow.apache.org/docs/objects.inv
- https://pandas.pydata.org/docs/objects.inv - https://pandas.pydata.org/docs/objects.inv
- https://lancedb.github.io/lance/objects.inv - https://lancedb.github.io/lance/objects.inv
- https://docs.pydantic.dev/latest/objects.inv
- mkdocs-jupyter - mkdocs-jupyter
- render_swagger: - render_swagger:
allow_arbitrary_locations: true allow_arbitrary_locations: true
@@ -109,8 +105,8 @@ nav:
- 📚 Concepts: - 📚 Concepts:
- Vector search: concepts/vector_search.md - Vector search: concepts/vector_search.md
- Indexing: - Indexing:
- IVFPQ: concepts/index_ivfpq.md - IVFPQ: concepts/index_ivfpq.md
- HNSW: concepts/index_hnsw.md - HNSW: concepts/index_hnsw.md
- Storage: concepts/storage.md - Storage: concepts/storage.md
- Data management: concepts/data_management.md - Data management: concepts/data_management.md
- 🔨 Guides: - 🔨 Guides:
@@ -134,8 +130,8 @@ nav:
- Adaptive RAG: rag/adaptive_rag.md - Adaptive RAG: rag/adaptive_rag.md
- SFR RAG: rag/sfr_rag.md - SFR RAG: rag/sfr_rag.md
- Advanced Techniques: - Advanced Techniques:
- HyDE: rag/advanced_techniques/hyde.md - HyDE: rag/advanced_techniques/hyde.md
- FLARE: rag/advanced_techniques/flare.md - FLARE: rag/advanced_techniques/flare.md
- Reranking: - Reranking:
- Quickstart: reranking/index.md - Quickstart: reranking/index.md
- Cohere Reranker: reranking/cohere.md - Cohere Reranker: reranking/cohere.md
@@ -150,9 +146,7 @@ nav:
- Building Custom Rerankers: reranking/custom_reranker.md - Building Custom Rerankers: reranking/custom_reranker.md
- Example: notebooks/lancedb_reranking.ipynb - Example: notebooks/lancedb_reranking.ipynb
- Filtering: sql.md - Filtering: sql.md
- Versioning & Reproducibility: - Versioning & Reproducibility: notebooks/reproducibility.ipynb
- sync API: notebooks/reproducibility.ipynb
- async API: notebooks/reproducibility_async.ipynb
- Configuring Storage: guides/storage.md - Configuring Storage: guides/storage.md
- Migration Guide: migration.md - Migration Guide: migration.md
- Tuning retrieval performance: - Tuning retrieval performance:
@@ -182,7 +176,6 @@ nav:
- Imagebind embeddings: embeddings/available_embedding_models/multimodal_embedding_functions/imagebind_embedding.md - Imagebind embeddings: embeddings/available_embedding_models/multimodal_embedding_functions/imagebind_embedding.md
- Jina Embeddings: embeddings/available_embedding_models/multimodal_embedding_functions/jina_multimodal_embedding.md - Jina Embeddings: embeddings/available_embedding_models/multimodal_embedding_functions/jina_multimodal_embedding.md
- User-defined embedding functions: embeddings/custom_embedding_function.md - User-defined embedding functions: embeddings/custom_embedding_function.md
- Variables and secrets: embeddings/variables_and_secrets.md
- "Example: Multi-lingual semantic search": notebooks/multi_lingual_example.ipynb - "Example: Multi-lingual semantic search": notebooks/multi_lingual_example.ipynb
- "Example: MultiModal CLIP Embeddings": notebooks/DisappearingEmbeddingFunction.ipynb - "Example: MultiModal CLIP Embeddings": notebooks/DisappearingEmbeddingFunction.ipynb
- 🔌 Integrations: - 🔌 Integrations:
@@ -245,8 +238,8 @@ nav:
- Concepts: - Concepts:
- Vector search: concepts/vector_search.md - Vector search: concepts/vector_search.md
- Indexing: - Indexing:
- IVFPQ: concepts/index_ivfpq.md - IVFPQ: concepts/index_ivfpq.md
- HNSW: concepts/index_hnsw.md - HNSW: concepts/index_hnsw.md
- Storage: concepts/storage.md - Storage: concepts/storage.md
- Data management: concepts/data_management.md - Data management: concepts/data_management.md
- Guides: - Guides:
@@ -270,8 +263,8 @@ nav:
- Adaptive RAG: rag/adaptive_rag.md - Adaptive RAG: rag/adaptive_rag.md
- SFR RAG: rag/sfr_rag.md - SFR RAG: rag/sfr_rag.md
- Advanced Techniques: - Advanced Techniques:
- HyDE: rag/advanced_techniques/hyde.md - HyDE: rag/advanced_techniques/hyde.md
- FLARE: rag/advanced_techniques/flare.md - FLARE: rag/advanced_techniques/flare.md
- Reranking: - Reranking:
- Quickstart: reranking/index.md - Quickstart: reranking/index.md
- Cohere Reranker: reranking/cohere.md - Cohere Reranker: reranking/cohere.md
@@ -285,9 +278,7 @@ nav:
- Building Custom Rerankers: reranking/custom_reranker.md - Building Custom Rerankers: reranking/custom_reranker.md
- Example: notebooks/lancedb_reranking.ipynb - Example: notebooks/lancedb_reranking.ipynb
- Filtering: sql.md - Filtering: sql.md
- Versioning & Reproducibility: - Versioning & Reproducibility: notebooks/reproducibility.ipynb
- sync API: notebooks/reproducibility.ipynb
- async API: notebooks/reproducibility_async.ipynb
- Configuring Storage: guides/storage.md - Configuring Storage: guides/storage.md
- Migration Guide: migration.md - Migration Guide: migration.md
- Tuning retrieval performance: - Tuning retrieval performance:
@@ -316,7 +307,6 @@ nav:
- Imagebind embeddings: embeddings/available_embedding_models/multimodal_embedding_functions/imagebind_embedding.md - Imagebind embeddings: embeddings/available_embedding_models/multimodal_embedding_functions/imagebind_embedding.md
- Jina Embeddings: embeddings/available_embedding_models/multimodal_embedding_functions/jina_multimodal_embedding.md - Jina Embeddings: embeddings/available_embedding_models/multimodal_embedding_functions/jina_multimodal_embedding.md
- User-defined embedding functions: embeddings/custom_embedding_function.md - User-defined embedding functions: embeddings/custom_embedding_function.md
- Variables and secrets: embeddings/variables_and_secrets.md
- "Example: Multi-lingual semantic search": notebooks/multi_lingual_example.ipynb - "Example: Multi-lingual semantic search": notebooks/multi_lingual_example.ipynb
- "Example: MultiModal CLIP Embeddings": notebooks/DisappearingEmbeddingFunction.ipynb - "Example: MultiModal CLIP Embeddings": notebooks/DisappearingEmbeddingFunction.ipynb
- Integrations: - Integrations:
@@ -355,8 +345,8 @@ nav:
- 🦀 Rust: - 🦀 Rust:
- Overview: examples/examples_rust.md - Overview: examples/examples_rust.md
- Studies: - Studies:
- studies/overview.md - studies/overview.md
- ↗Improve retrievers with hybrid search and reranking: https://blog.lancedb.com/hybrid-search-and-reranking-report/ - ↗Improve retrievers with hybrid search and reranking: https://blog.lancedb.com/hybrid-search-and-reranking-report/
- API reference: - API reference:
- Overview: api_reference.md - Overview: api_reference.md
- Python: python/python.md - Python: python/python.md
@@ -377,7 +367,6 @@ extra_css:
extra_javascript: extra_javascript:
- "extra_js/init_ask_ai_widget.js" - "extra_js/init_ask_ai_widget.js"
- "extra_js/reo.js"
extra: extra:
analytics: analytics:

View File

@@ -38,13 +38,6 @@ components:
required: true required: true
schema: schema:
type: string type: string
index_name:
name: index_name
in: path
description: name of the index
required: true
schema:
type: string
responses: responses:
invalid_request: invalid_request:
description: Invalid request description: Invalid request
@@ -492,22 +485,3 @@ paths:
$ref: "#/components/responses/unauthorized" $ref: "#/components/responses/unauthorized"
"404": "404":
$ref: "#/components/responses/not_found" $ref: "#/components/responses/not_found"
/v1/table/{name}/index/{index_name}/drop/:
post:
description: Drop an index from the table
tags:
- Tables
summary: Drop an index from the table
operationId: dropIndex
parameters:
- $ref: "#/components/parameters/table_name"
- $ref: "#/components/parameters/index_name"
responses:
"200":
description: Index successfully dropped
"400":
$ref: "#/components/responses/invalid_request"
"401":
$ref: "#/components/responses/unauthorized"
"404":
$ref: "#/components/responses/not_found"

View File

@@ -18,24 +18,25 @@ See the [indexing](concepts/index_ivfpq.md) concepts guide for more information
Lance supports `IVF_PQ` index type by default. Lance supports `IVF_PQ` index type by default.
=== "Python" === "Python"
=== "Sync API"
Creating indexes is done via the [create_index](https://lancedb.github.io/lancedb/python/#lancedb.table.LanceTable.create_index) method. Creating indexes is done via the [create_index](https://lancedb.github.io/lancedb/python/#lancedb.table.LanceTable.create_index) method.
```python ```python
--8<-- "python/python/tests/docs/test_guide_index.py:import-lancedb" import lancedb
--8<-- "python/python/tests/docs/test_guide_index.py:import-numpy" import numpy as np
--8<-- "python/python/tests/docs/test_guide_index.py:create_ann_index" uri = "data/sample-lancedb"
``` db = lancedb.connect(uri)
=== "Async API"
Creating indexes is done via the [create_index](https://lancedb.github.io/lancedb/python/#lancedb.table.LanceTable.create_index) method.
```python # Create 10,000 sample vectors
--8<-- "python/python/tests/docs/test_guide_index.py:import-lancedb" data = [{"vector": row, "item": f"item {i}"}
--8<-- "python/python/tests/docs/test_guide_index.py:import-numpy" for i, row in enumerate(np.random.random((10_000, 1536)).astype('float32'))]
--8<-- "python/python/tests/docs/test_guide_index.py:import-lancedb-ivfpq"
--8<-- "python/python/tests/docs/test_guide_index.py:create_ann_index_async" # Add the vectors to a table
``` tbl = db.create_table("my_vectors", data=data)
# Create and train the index - you need to have enough data in the table for an effective training step
tbl.create_index(num_partitions=256, num_sub_vectors=96)
```
=== "TypeScript" === "TypeScript"
@@ -126,9 +127,7 @@ You can specify the GPU device to train IVF partitions via
accelerator="mps" accelerator="mps"
) )
``` ```
!!! note
GPU based indexing is not yet supported with our asynchronous client.
Troubleshooting: Troubleshooting:
If you see `AssertionError: Torch not compiled with CUDA enabled`, you need to [install If you see `AssertionError: Torch not compiled with CUDA enabled`, you need to [install
@@ -153,16 +152,14 @@ There are a couple of parameters that can be used to fine-tune the search:
=== "Python" === "Python"
=== "Sync API"
```python ```python
--8<-- "python/python/tests/docs/test_guide_index.py:vector_search" tbl.search(np.random.random((1536))) \
``` .limit(2) \
=== "Async API" .nprobes(20) \
.refine_factor(10) \
```python .to_pandas()
--8<-- "python/python/tests/docs/test_guide_index.py:vector_search_async" ```
```
```text ```text
vector item _distance vector item _distance
@@ -199,16 +196,10 @@ The search will return the data requested in addition to the distance of each it
You can further filter the elements returned by a search using a where clause. You can further filter the elements returned by a search using a where clause.
=== "Python" === "Python"
=== "Sync API"
```python ```python
--8<-- "python/python/tests/docs/test_guide_index.py:vector_search_with_filter" tbl.search(np.random.random((1536))).where("item != 'item 1141'").to_pandas()
``` ```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_guide_index.py:vector_search_async_with_filter"
```
=== "TypeScript" === "TypeScript"
@@ -230,16 +221,10 @@ You can select the columns returned by the query using a select clause.
=== "Python" === "Python"
=== "Sync API" ```python
tbl.search(np.random.random((1536))).select(["vector"]).to_pandas()
```
```python
--8<-- "python/python/tests/docs/test_guide_index.py:vector_search_with_select"
```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_guide_index.py:vector_search_async_with_select"
```
```text ```text
vector _distance vector _distance

View File

@@ -3,7 +3,6 @@ import * as vectordb from "vectordb";
// --8<-- [end:import] // --8<-- [end:import]
(async () => { (async () => {
console.log("ann_indexes.ts: start");
// --8<-- [start:ingest] // --8<-- [start:ingest]
const db = await vectordb.connect("data/sample-lancedb"); const db = await vectordb.connect("data/sample-lancedb");
@@ -50,5 +49,5 @@ import * as vectordb from "vectordb";
.execute(); .execute();
// --8<-- [end:search3] // --8<-- [end:search3]
console.log("ann_indexes.ts: done"); console.log("Ann indexes: done");
})(); })();

Binary file not shown.

Before

Width:  |  Height:  |  Size: 10 KiB

View File

@@ -133,22 +133,13 @@ recommend switching to stable releases.
## Connect to a database ## Connect to a database
=== "Python" === "Python"
=== "Sync API"
```python ```python
--8<-- "python/python/tests/docs/test_basic.py:imports" --8<-- "python/python/tests/docs/test_basic.py:imports"
--8<-- "python/python/tests/docs/test_basic.py:connect"
--8<-- "python/python/tests/docs/test_basic.py:set_uri" --8<-- "python/python/tests/docs/test_basic.py:connect_async"
--8<-- "python/python/tests/docs/test_basic.py:connect" ```
```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_basic.py:imports"
--8<-- "python/python/tests/docs/test_basic.py:set_uri"
--8<-- "python/python/tests/docs/test_basic.py:connect_async"
```
=== "Typescript[^1]" === "Typescript[^1]"
@@ -192,33 +183,21 @@ table.
=== "Python" === "Python"
```python
--8<-- "python/python/tests/docs/test_basic.py:create_table"
--8<-- "python/python/tests/docs/test_basic.py:create_table_async"
```
If the table already exists, LanceDB will raise an error by default. If the table already exists, LanceDB will raise an error by default.
If you want to overwrite the table, you can pass in `mode="overwrite"` If you want to overwrite the table, you can pass in `mode="overwrite"`
to the `create_table` method. to the `create_table` method.
=== "Sync API" You can also pass in a pandas DataFrame directly:
```python ```python
--8<-- "python/python/tests/docs/test_basic.py:create_table" --8<-- "python/python/tests/docs/test_basic.py:create_table_pandas"
``` --8<-- "python/python/tests/docs/test_basic.py:create_table_async_pandas"
```
You can also pass in a pandas DataFrame directly:
```python
--8<-- "python/python/tests/docs/test_basic.py:create_table_pandas"
```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_basic.py:create_table_async"
```
You can also pass in a pandas DataFrame directly:
```python
--8<-- "python/python/tests/docs/test_basic.py:create_table_async_pandas"
```
=== "Typescript[^1]" === "Typescript[^1]"
@@ -268,16 +247,10 @@ similar to a `CREATE TABLE` statement in SQL.
=== "Python" === "Python"
=== "Sync API" ```python
--8<-- "python/python/tests/docs/test_basic.py:create_empty_table"
```python --8<-- "python/python/tests/docs/test_basic.py:create_empty_table_async"
--8<-- "python/python/tests/docs/test_basic.py:create_empty_table" ```
```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_basic.py:create_empty_table_async"
```
!!! note "You can define schema in Pydantic" !!! note "You can define schema in Pydantic"
LanceDB comes with Pydantic support, which allows you to define the schema of your data using Pydantic models. This makes it easy to work with LanceDB tables and data. Learn more about all supported types in [tables guide](./guides/tables.md). LanceDB comes with Pydantic support, which allows you to define the schema of your data using Pydantic models. This makes it easy to work with LanceDB tables and data. Learn more about all supported types in [tables guide](./guides/tables.md).
@@ -308,16 +281,10 @@ Once created, you can open a table as follows:
=== "Python" === "Python"
=== "Sync API" ```python
--8<-- "python/python/tests/docs/test_basic.py:open_table"
```python --8<-- "python/python/tests/docs/test_basic.py:open_table_async"
--8<-- "python/python/tests/docs/test_basic.py:open_table" ```
```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_basic.py:open_table_async"
```
=== "Typescript[^1]" === "Typescript[^1]"
=== "@lancedb/lancedb" === "@lancedb/lancedb"
@@ -343,16 +310,10 @@ If you forget the name of your table, you can always get a listing of all table
=== "Python" === "Python"
=== "Sync API" ```python
--8<-- "python/python/tests/docs/test_basic.py:table_names"
```python --8<-- "python/python/tests/docs/test_basic.py:table_names_async"
--8<-- "python/python/tests/docs/test_basic.py:table_names" ```
```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_basic.py:table_names_async"
```
=== "Typescript[^1]" === "Typescript[^1]"
=== "@lancedb/lancedb" === "@lancedb/lancedb"
@@ -379,16 +340,10 @@ After a table has been created, you can always add more data to it as follows:
=== "Python" === "Python"
=== "Sync API" ```python
--8<-- "python/python/tests/docs/test_basic.py:add_data"
```python --8<-- "python/python/tests/docs/test_basic.py:add_data_async"
--8<-- "python/python/tests/docs/test_basic.py:add_data" ```
```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_basic.py:add_data_async"
```
=== "Typescript[^1]" === "Typescript[^1]"
=== "@lancedb/lancedb" === "@lancedb/lancedb"
@@ -415,16 +370,10 @@ Once you've embedded the query, you can find its nearest neighbors as follows:
=== "Python" === "Python"
=== "Sync API" ```python
--8<-- "python/python/tests/docs/test_basic.py:vector_search"
```python --8<-- "python/python/tests/docs/test_basic.py:vector_search_async"
--8<-- "python/python/tests/docs/test_basic.py:vector_search" ```
```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_basic.py:vector_search_async"
```
This returns a pandas DataFrame with the results. This returns a pandas DataFrame with the results.
@@ -463,16 +412,10 @@ LanceDB allows you to create an ANN index on a table as follows:
=== "Python" === "Python"
=== "Sync API" ```py
--8<-- "python/python/tests/docs/test_basic.py:create_index"
```python --8<-- "python/python/tests/docs/test_basic.py:create_index_async"
--8<-- "python/python/tests/docs/test_basic.py:create_index" ```
```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_basic.py:create_index_async"
```
=== "Typescript[^1]" === "Typescript[^1]"
=== "@lancedb/lancedb" === "@lancedb/lancedb"
@@ -508,16 +451,10 @@ This can delete any number of rows that match the filter.
=== "Python" === "Python"
=== "Sync API" ```python
--8<-- "python/python/tests/docs/test_basic.py:delete_rows"
```python --8<-- "python/python/tests/docs/test_basic.py:delete_rows_async"
--8<-- "python/python/tests/docs/test_basic.py:delete_rows" ```
```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_basic.py:delete_rows_async"
```
=== "Typescript[^1]" === "Typescript[^1]"
@@ -546,10 +483,7 @@ simple or complex as needed. To see what expressions are supported, see the
=== "Python" === "Python"
=== "Sync API" Read more: [lancedb.table.Table.delete][]
Read more: [lancedb.table.Table.delete][]
=== "Async API"
Read more: [lancedb.table.AsyncTable.delete][]
=== "Typescript[^1]" === "Typescript[^1]"
@@ -571,16 +505,10 @@ Use the `drop_table()` method on the database to remove a table.
=== "Python" === "Python"
=== "Sync API" ```python
--8<-- "python/python/tests/docs/test_basic.py:drop_table"
```python --8<-- "python/python/tests/docs/test_basic.py:drop_table_async"
--8<-- "python/python/tests/docs/test_basic.py:drop_table" ```
```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_basic.py:drop_table_async"
```
This permanently removes the table and is not recoverable, unlike deleting rows. This permanently removes the table and is not recoverable, unlike deleting rows.
By default, if the table does not exist an exception is raised. To suppress this, By default, if the table does not exist an exception is raised. To suppress this,
@@ -615,17 +543,10 @@ You can use the embedding API when working with embedding models. It automatical
=== "Python" === "Python"
=== "Sync API" ```python
--8<-- "python/python/tests/docs/test_embeddings_optional.py:imports"
```python --8<-- "python/python/tests/docs/test_embeddings_optional.py:openai_embeddings"
--8<-- "python/python/tests/docs/test_embeddings_optional.py:imports" ```
--8<-- "python/python/tests/docs/test_embeddings_optional.py:openai_embeddings"
```
=== "Async API"
Coming soon to the async API.
https://github.com/lancedb/lancedb/issues/1938
=== "Typescript[^1]" === "Typescript[^1]"

View File

@@ -107,6 +107,7 @@ const example = async () => {
// --8<-- [start:search] // --8<-- [start:search]
const query = await tbl.search([100, 100]).limit(2).execute(); const query = await tbl.search([100, 100]).limit(2).execute();
// --8<-- [end:search] // --8<-- [end:search]
console.log(query);
// --8<-- [start:delete] // --8<-- [start:delete]
await tbl.delete('item = "fizz"'); await tbl.delete('item = "fizz"');
@@ -118,9 +119,8 @@ const example = async () => {
}; };
async function main() { async function main() {
console.log("basic_legacy.ts: start");
await example(); await example();
console.log("basic_legacy.ts: done"); console.log("Basic example: done");
} }
main(); main();

View File

@@ -7,7 +7,7 @@ Approximate Nearest Neighbor (ANN) search is a method for finding data points ne
There are three main types of ANN search algorithms: There are three main types of ANN search algorithms:
* **Tree-based search algorithms**: Use a tree structure to organize and store data points. * **Tree-based search algorithms**: Use a tree structure to organize and store data points.
* **Hash-based search algorithms**: Use a specialized geometric hash table to store and manage data points. These algorithms typically focus on theoretical guarantees, and don't usually perform as well as the other approaches in practice. * * **Hash-based search algorithms**: Use a specialized geometric hash table to store and manage data points. These algorithms typically focus on theoretical guarantees, and don't usually perform as well as the other approaches in practice.
* **Graph-based search algorithms**: Use a graph structure to store data points, which can be a bit complex. * **Graph-based search algorithms**: Use a graph structure to store data points, which can be a bit complex.
HNSW is a graph-based algorithm. All graph-based search algorithms rely on the idea of a k-nearest neighbor (or k-approximate nearest neighbor) graph, which we outline below. HNSW is a graph-based algorithm. All graph-based search algorithms rely on the idea of a k-nearest neighbor (or k-approximate nearest neighbor) graph, which we outline below.

View File

@@ -55,14 +55,6 @@ Let's implement `SentenceTransformerEmbeddings` class. All you need to do is imp
This is a stripped down version of our implementation of `SentenceTransformerEmbeddings` that removes certain optimizations and default settings. This is a stripped down version of our implementation of `SentenceTransformerEmbeddings` that removes certain optimizations and default settings.
!!! danger "Use sensitive keys to prevent leaking secrets"
To prevent leaking secrets, such as API keys, you should add any sensitive
parameters of an embedding function to the output of the
[sensitive_keys()][lancedb.embeddings.base.EmbeddingFunction.sensitive_keys] /
[getSensitiveKeys()](../../js/namespaces/embedding/classes/EmbeddingFunction/#getsensitivekeys)
method. This prevents users from accidentally instantiating the embedding
function with hard-coded secrets.
Now you can use this embedding function to create your table schema and that's it! you can then ingest data and run queries without manually vectorizing the inputs. Now you can use this embedding function to create your table schema and that's it! you can then ingest data and run queries without manually vectorizing the inputs.
=== "Python" === "Python"

View File

@@ -1,53 +0,0 @@
# Variable and Secrets
Most embedding configuration options are saved in the table's metadata. However,
this isn't always appropriate. For example, API keys should never be stored in the
metadata. Additionally, other configuration options might be best set at runtime,
such as the `device` configuration that controls whether to use GPU or CPU for
inference. If you hardcoded this to GPU, you wouldn't be able to run the code on
a server without one.
To handle these cases, you can set variables on the embedding registry and
reference them in the embedding configuration. These variables will be available
during the runtime of your program, but not saved in the table's metadata. When
the table is loaded from a different process, the variables must be set again.
To set a variable, use the `set_var()` / `setVar()` method on the embedding registry.
To reference a variable, use the syntax `$env:VARIABLE_NAME`. If there is a default
value, you can use the syntax `$env:VARIABLE_NAME:DEFAULT_VALUE`.
## Using variables to set secrets
Sensitive configuration, such as API keys, must either be set as environment
variables or using variables on the embedding registry. If you pass in a hardcoded
value, LanceDB will raise an error. Instead, if you want to set an API key via
configuration, use a variable:
=== "Python"
```python
--8<-- "python/python/tests/docs/test_embeddings_optional.py:register_secret"
```
=== "Typescript"
```typescript
--8<-- "nodejs/examples/embedding.test.ts:register_secret"
```
## Using variables to set the device parameter
Many embedding functions that run locally have a `device` parameter that controls
whether to use GPU or CPU for inference. Because not all computers have a GPU,
it's helpful to be able to set the `device` parameter at runtime, rather than
have it hard coded in the embedding configuration. To make it work even if the
variable isn't set, you could provide a default value of `cpu` in the embedding
configuration.
Some embedding libraries even have a method to detect which devices are available,
which could be used to dynamically set the device at runtime. For example, in Python
you can check if a CUDA GPU is available using `torch.cuda.is_available()`.
```python
--8<-- "python/python/tests/docs/test_embeddings_optional.py:register_device"
```

View File

@@ -1 +0,0 @@
!function(){var e,t,n;e="9627b71b382d201",t=function(){Reo.init({clientID:"9627b71b382d201"})},(n=document.createElement("script")).src="https://static.reo.dev/"+e+"/reo.js",n.defer=!0,n.onload=t,document.head.appendChild(n)}();

View File

@@ -10,20 +10,28 @@ LanceDB provides support for full-text search via Lance, allowing you to incorpo
Consider that we have a LanceDB table named `my_table`, whose string column `text` we want to index and query via keyword search, the FTS index must be created before you can search via keywords. Consider that we have a LanceDB table named `my_table`, whose string column `text` we want to index and query via keyword search, the FTS index must be created before you can search via keywords.
=== "Python" === "Python"
=== "Sync API"
```python ```python
--8<-- "python/python/tests/docs/test_search.py:import-lancedb" import lancedb
--8<-- "python/python/tests/docs/test_search.py:import-lancedb-fts"
--8<-- "python/python/tests/docs/test_search.py:basic_fts"
```
=== "Async API"
```python uri = "data/sample-lancedb"
--8<-- "python/python/tests/docs/test_search.py:import-lancedb" db = lancedb.connect(uri)
--8<-- "python/python/tests/docs/test_search.py:import-lancedb-fts"
--8<-- "python/python/tests/docs/test_search.py:basic_fts_async" table = db.create_table(
``` "my_table",
data=[
{"vector": [3.1, 4.1], "text": "Frodo was a happy puppy"},
{"vector": [5.9, 26.5], "text": "There are several kittens playing"},
],
)
# passing `use_tantivy=False` to use lance FTS index
# `use_tantivy=True` by default
table.create_fts_index("text", use_tantivy=False)
table.search("puppy").limit(10).select(["text"]).to_list()
# [{'text': 'Frodo was a happy puppy', '_score': 0.6931471824645996}]
# ...
```
=== "TypeScript" === "TypeScript"
@@ -42,7 +50,7 @@ Consider that we have a LanceDB table named `my_table`, whose string column `tex
}); });
await tbl await tbl
.search("puppy", "fts") .search("puppy", queryType="fts")
.select(["text"]) .select(["text"])
.limit(10) .limit(10)
.toArray(); .toArray();
@@ -85,32 +93,22 @@ By default the text is tokenized by splitting on punctuation and whitespaces, an
Stemming is useful for improving search results by reducing words to their root form, e.g. "running" to "run". LanceDB supports stemming for multiple languages, you can specify the tokenizer name to enable stemming by the pattern `tokenizer_name="{language_code}_stem"`, e.g. `en_stem` for English. Stemming is useful for improving search results by reducing words to their root form, e.g. "running" to "run". LanceDB supports stemming for multiple languages, you can specify the tokenizer name to enable stemming by the pattern `tokenizer_name="{language_code}_stem"`, e.g. `en_stem` for English.
For example, to enable stemming for English: For example, to enable stemming for English:
=== "Sync API" ```python
table.create_fts_index("text", use_tantivy=True, tokenizer_name="en_stem")
```python ```
--8<-- "python/python/tests/docs/test_search.py:fts_config_stem"
```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_search.py:fts_config_stem_async"
```
the following [languages](https://docs.rs/tantivy/latest/tantivy/tokenizer/enum.Language.html) are currently supported. the following [languages](https://docs.rs/tantivy/latest/tantivy/tokenizer/enum.Language.html) are currently supported.
The tokenizer is customizable, you can specify how the tokenizer splits the text, and how it filters out words, etc. The tokenizer is customizable, you can specify how the tokenizer splits the text, and how it filters out words, etc.
For example, for language with accents, you can specify the tokenizer to use `ascii_folding` to remove accents, e.g. 'é' to 'e': For example, for language with accents, you can specify the tokenizer to use `ascii_folding` to remove accents, e.g. 'é' to 'e':
=== "Sync API" ```python
table.create_fts_index("text",
```python use_tantivy=False,
--8<-- "python/python/tests/docs/test_search.py:fts_config_folding" language="French",
``` stem=True,
=== "Async API" ascii_folding=True)
```
```python
--8<-- "python/python/tests/docs/test_search.py:fts_config_folding_async"
```
## Filtering ## Filtering
@@ -121,16 +119,9 @@ This can be invoked via the familiar `where` syntax.
With pre-filtering: With pre-filtering:
=== "Python" === "Python"
=== "Sync API" ```python
table.search("puppy").limit(10).where("meta='foo'", prefilte=True).to_list()
```python ```
--8<-- "python/python/tests/docs/test_search.py:fts_prefiltering"
```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_search.py:fts_prefiltering_async"
```
=== "TypeScript" === "TypeScript"
@@ -160,16 +151,9 @@ With pre-filtering:
With post-filtering: With post-filtering:
=== "Python" === "Python"
=== "Sync API" ```python
table.search("puppy").limit(10).where("meta='foo'", prefilte=False).to_list()
```python ```
--8<-- "python/python/tests/docs/test_search.py:fts_postfiltering"
```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_search.py:fts_postfiltering_async"
```
=== "TypeScript" === "TypeScript"
@@ -207,16 +191,9 @@ or a **terms** search query like `old man sea`. For more details on the terms
query syntax, see Tantivy's [query parser rules](https://docs.rs/tantivy/latest/tantivy/query/struct.QueryParser.html). query syntax, see Tantivy's [query parser rules](https://docs.rs/tantivy/latest/tantivy/query/struct.QueryParser.html).
To search for a phrase, the index must be created with `with_position=True`: To search for a phrase, the index must be created with `with_position=True`:
=== "Sync API" ```python
table.create_fts_index("text", use_tantivy=False, with_position=True)
```python ```
--8<-- "python/python/tests/docs/test_search.py:fts_with_position"
```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_search.py:fts_with_position_async"
```
This will allow you to search for phrases, but it will also significantly increase the index size and indexing time. This will allow you to search for phrases, but it will also significantly increase the index size and indexing time.
@@ -228,16 +205,10 @@ This can make the query more efficient, especially when the table is large and t
=== "Python" === "Python"
=== "Sync API" ```python
table.add([{"vector": [3.1, 4.1], "text": "Frodo was a happy puppy"}])
```python table.optimize()
--8<-- "python/python/tests/docs/test_search.py:fts_incremental_index" ```
```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_search.py:fts_incremental_index_async"
```
=== "TypeScript" === "TypeScript"

View File

@@ -2,7 +2,7 @@
LanceDB also provides support for full-text search via [Tantivy](https://github.com/quickwit-oss/tantivy), allowing you to incorporate keyword-based search (based on BM25) in your retrieval solutions. LanceDB also provides support for full-text search via [Tantivy](https://github.com/quickwit-oss/tantivy), allowing you to incorporate keyword-based search (based on BM25) in your retrieval solutions.
The tantivy-based FTS is only available in Python synchronous APIs and does not support building indexes on object storage or incremental indexing. If you need these features, try native FTS [native FTS](fts.md). The tantivy-based FTS is only available in Python and does not support building indexes on object storage or incremental indexing. If you need these features, try native FTS [native FTS](fts.md).
## Installation ## Installation

View File

@@ -32,20 +32,19 @@ over scalar columns.
### Create a scalar index ### Create a scalar index
=== "Python" === "Python"
=== "Sync API" ```python
import lancedb
books = [
{"book_id": 1, "publisher": "plenty of books", "tags": ["fantasy", "adventure"]},
{"book_id": 2, "publisher": "book town", "tags": ["non-fiction"]},
{"book_id": 3, "publisher": "oreilly", "tags": ["textbook"]}
]
```python db = lancedb.connect("./db")
--8<-- "python/python/tests/docs/test_guide_index.py:import-lancedb" table = db.create_table("books", books)
--8<-- "python/python/tests/docs/test_guide_index.py:import-lancedb-btree-bitmap" table.create_scalar_index("book_id") # BTree by default
--8<-- "python/python/tests/docs/test_guide_index.py:basic_scalar_index" table.create_scalar_index("publisher", index_type="BITMAP")
``` ```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_guide_index.py:import-lancedb"
--8<-- "python/python/tests/docs/test_guide_index.py:import-lancedb-btree-bitmap"
--8<-- "python/python/tests/docs/test_guide_index.py:basic_scalar_index_async"
```
=== "Typescript" === "Typescript"
@@ -63,18 +62,12 @@ The following scan will be faster if the column `book_id` has a scalar index:
=== "Python" === "Python"
=== "Sync API" ```python
import lancedb
```python table = db.open_table("books")
--8<-- "python/python/tests/docs/test_guide_index.py:import-lancedb" my_df = table.search().where("book_id = 2").to_pandas()
--8<-- "python/python/tests/docs/test_guide_index.py:search_with_scalar_index" ```
```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_guide_index.py:import-lancedb"
--8<-- "python/python/tests/docs/test_guide_index.py:search_with_scalar_index_async"
```
=== "Typescript" === "Typescript"
@@ -95,18 +88,22 @@ Scalar indices can also speed up scans containing a vector search or full text s
=== "Python" === "Python"
=== "Sync API" ```python
import lancedb
```python data = [
--8<-- "python/python/tests/docs/test_guide_index.py:import-lancedb" {"book_id": 1, "vector": [1, 2]},
--8<-- "python/python/tests/docs/test_guide_index.py:vector_search_with_scalar_index" {"book_id": 2, "vector": [3, 4]},
``` {"book_id": 3, "vector": [5, 6]}
=== "Async API" ]
table = db.create_table("book_with_embeddings", data)
```python (
--8<-- "python/python/tests/docs/test_guide_index.py:import-lancedb" table.search([1, 2])
--8<-- "python/python/tests/docs/test_guide_index.py:vector_search_with_scalar_index_async" .where("book_id != 3", prefilter=True)
``` .to_pandas()
)
```
=== "Typescript" === "Typescript"
@@ -125,16 +122,10 @@ Scalar indices can also speed up scans containing a vector search or full text s
Updating the table data (adding, deleting, or modifying records) requires that you also update the scalar index. This can be done by calling `optimize`, which will trigger an update to the existing scalar index. Updating the table data (adding, deleting, or modifying records) requires that you also update the scalar index. This can be done by calling `optimize`, which will trigger an update to the existing scalar index.
=== "Python" === "Python"
=== "Sync API" ```python
table.add([{"vector": [7, 8], "book_id": 4}])
```python table.optimize()
--8<-- "python/python/tests/docs/test_guide_index.py:update_scalar_index" ```
```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_guide_index.py:update_scalar_index_async"
```
=== "TypeScript" === "TypeScript"

View File

@@ -12,50 +12,26 @@ LanceDB OSS supports object stores such as AWS S3 (and compatible stores), Azure
=== "Python" === "Python"
AWS S3: AWS S3:
=== "Sync API"
```python ```python
import lancedb import lancedb
db = lancedb.connect("s3://bucket/path") db = lancedb.connect("s3://bucket/path")
``` ```
=== "Async API"
```python
import lancedb
async_db = await lancedb.connect_async("s3://bucket/path")
```
Google Cloud Storage: Google Cloud Storage:
=== "Sync API" ```python
import lancedb
```python db = lancedb.connect("gs://bucket/path")
import lancedb ```
db = lancedb.connect("gs://bucket/path")
```
=== "Async API"
```python
import lancedb
async_db = await lancedb.connect_async("gs://bucket/path")
```
Azure Blob Storage: Azure Blob Storage:
<!-- skip-test --> <!-- skip-test -->
=== "Sync API" ```python
import lancedb
```python db = lancedb.connect("az://bucket/path")
import lancedb ```
db = lancedb.connect("az://bucket/path")
```
<!-- skip-test -->
=== "Async API"
```python
import lancedb
async_db = await lancedb.connect_async("az://bucket/path")
```
Note that for Azure, storage credentials must be configured. See [below](#azure-blob-storage) for more details. Note that for Azure, storage credentials must be configured. See [below](#azure-blob-storage) for more details.
@@ -118,24 +94,13 @@ If you only want this to apply to one particular connection, you can pass the `s
=== "Python" === "Python"
=== "Sync API" ```python
import lancedb
```python db = await lancedb.connect_async(
import lancedb "s3://bucket/path",
db = lancedb.connect( storage_options={"timeout": "60s"}
"s3://bucket/path", )
storage_options={"timeout": "60s"} ```
)
```
=== "Async API"
```python
import lancedb
async_db = await lancedb.connect_async(
"s3://bucket/path",
storage_options={"timeout": "60s"}
)
```
=== "TypeScript" === "TypeScript"
@@ -163,29 +128,15 @@ Getting even more specific, you can set the `timeout` for only a particular tabl
=== "Python" === "Python"
<!-- skip-test --> <!-- skip-test -->
=== "Sync API" ```python
import lancedb
```python db = await lancedb.connect_async("s3://bucket/path")
import lancedb table = await db.create_table(
db = lancedb.connect("s3://bucket/path") "table",
table = db.create_table( [{"a": 1, "b": 2}],
"table", storage_options={"timeout": "60s"}
[{"a": 1, "b": 2}], )
storage_options={"timeout": "60s"} ```
)
```
<!-- skip-test -->
=== "Async API"
```python
import lancedb
async_db = await lancedb.connect_async("s3://bucket/path")
async_table = await async_db.create_table(
"table",
[{"a": 1, "b": 2}],
storage_options={"timeout": "60s"}
)
```
=== "TypeScript" === "TypeScript"
@@ -243,32 +194,17 @@ These can be set as environment variables or passed in the `storage_options` par
=== "Python" === "Python"
=== "Sync API" ```python
import lancedb
```python db = await lancedb.connect_async(
import lancedb "s3://bucket/path",
db = lancedb.connect( storage_options={
"s3://bucket/path", "aws_access_key_id": "my-access-key",
storage_options={ "aws_secret_access_key": "my-secret-key",
"aws_access_key_id": "my-access-key", "aws_session_token": "my-session-token",
"aws_secret_access_key": "my-secret-key", }
"aws_session_token": "my-session-token", )
} ```
)
```
=== "Async API"
```python
import lancedb
async_db = await lancedb.connect_async(
"s3://bucket/path",
storage_options={
"aws_access_key_id": "my-access-key",
"aws_secret_access_key": "my-secret-key",
"aws_session_token": "my-session-token",
}
)
```
=== "TypeScript" === "TypeScript"
@@ -412,22 +348,12 @@ name of the table to use.
=== "Python" === "Python"
=== "Sync API" ```python
import lancedb
```python db = await lancedb.connect_async(
import lancedb "s3+ddb://bucket/path?ddbTableName=my-dynamodb-table",
db = lancedb.connect( )
"s3+ddb://bucket/path?ddbTableName=my-dynamodb-table", ```
)
```
=== "Async API"
```python
import lancedb
async_db = await lancedb.connect_async(
"s3+ddb://bucket/path?ddbTableName=my-dynamodb-table",
)
```
=== "JavaScript" === "JavaScript"
@@ -515,30 +441,16 @@ LanceDB can also connect to S3-compatible stores, such as MinIO. To do so, you m
=== "Python" === "Python"
=== "Sync API" ```python
import lancedb
```python db = await lancedb.connect_async(
import lancedb "s3://bucket/path",
db = lancedb.connect( storage_options={
"s3://bucket/path", "region": "us-east-1",
storage_options={ "endpoint": "http://minio:9000",
"region": "us-east-1", }
"endpoint": "http://minio:9000", )
} ```
)
```
=== "Async API"
```python
import lancedb
async_db = await lancedb.connect_async(
"s3://bucket/path",
storage_options={
"region": "us-east-1",
"endpoint": "http://minio:9000",
}
)
```
=== "TypeScript" === "TypeScript"
@@ -590,30 +502,16 @@ To configure LanceDB to use an S3 Express endpoint, you must set the storage opt
=== "Python" === "Python"
=== "Sync API" ```python
import lancedb
```python db = await lancedb.connect_async(
import lancedb "s3://my-bucket--use1-az4--x-s3/path",
db = lancedb.connect( storage_options={
"s3://my-bucket--use1-az4--x-s3/path", "region": "us-east-1",
storage_options={ "s3_express": "true",
"region": "us-east-1", }
"s3_express": "true", )
} ```
)
```
=== "Async API"
```python
import lancedb
async_db = await lancedb.connect_async(
"s3://my-bucket--use1-az4--x-s3/path",
storage_options={
"region": "us-east-1",
"s3_express": "true",
}
)
```
=== "TypeScript" === "TypeScript"
@@ -654,29 +552,15 @@ GCS credentials are configured by setting the `GOOGLE_SERVICE_ACCOUNT` environme
=== "Python" === "Python"
<!-- skip-test --> <!-- skip-test -->
=== "Sync API" ```python
import lancedb
```python db = await lancedb.connect_async(
import lancedb "gs://my-bucket/my-database",
db = lancedb.connect( storage_options={
"gs://my-bucket/my-database", "service_account": "path/to/service-account.json",
storage_options={ }
"service_account": "path/to/service-account.json", )
} ```
)
```
<!-- skip-test -->
=== "Async API"
```python
import lancedb
async_db = await lancedb.connect_async(
"gs://my-bucket/my-database",
storage_options={
"service_account": "path/to/service-account.json",
}
)
```
=== "TypeScript" === "TypeScript"
@@ -728,31 +612,16 @@ Azure Blob Storage credentials can be configured by setting the `AZURE_STORAGE_A
=== "Python" === "Python"
<!-- skip-test --> <!-- skip-test -->
=== "Sync API" ```python
import lancedb
```python db = await lancedb.connect_async(
import lancedb "az://my-container/my-database",
db = lancedb.connect( storage_options={
"az://my-container/my-database", account_name: "some-account",
storage_options={ account_key: "some-key",
account_name: "some-account", }
account_key: "some-key", )
} ```
)
```
<!-- skip-test -->
=== "Async API"
```python
import lancedb
async_db = await lancedb.connect_async(
"az://my-container/my-database",
storage_options={
account_name: "some-account",
account_key: "some-key",
}
)
```
=== "TypeScript" === "TypeScript"

View File

@@ -12,18 +12,10 @@ Initialize a LanceDB connection and create a table
=== "Python" === "Python"
=== "Sync API" ```python
import lancedb
```python db = lancedb.connect("./.lancedb")
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb" ```
--8<-- "python/python/tests/docs/test_guide_tables.py:connect"
```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb"
--8<-- "python/python/tests/docs/test_guide_tables.py:connect_async"
```
LanceDB allows ingesting data from various sources - `dict`, `list[dict]`, `pd.DataFrame`, `pa.Table` or a `Iterator[pa.RecordBatch]`. Let's take a look at some of the these. LanceDB allows ingesting data from various sources - `dict`, `list[dict]`, `pd.DataFrame`, `pa.Table` or a `Iterator[pa.RecordBatch]`. Let's take a look at some of the these.
@@ -55,16 +47,18 @@ Initialize a LanceDB connection and create a table
=== "Python" === "Python"
=== "Sync API" ```python
import lancedb
```python db = lancedb.connect("./.lancedb")
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table"
```
=== "Async API"
```python data = [{"vector": [1.1, 1.2], "lat": 45.5, "long": -122.7},
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_async" {"vector": [0.2, 1.8], "lat": 40.1, "long": -74.1}]
```
db.create_table("my_table", data)
db["my_table"].head()
```
!!! info "Note" !!! info "Note"
If the table already exists, LanceDB will raise an error by default. If the table already exists, LanceDB will raise an error by default.
@@ -73,30 +67,16 @@ Initialize a LanceDB connection and create a table
and the table exists, then it simply opens the existing table. The data you and the table exists, then it simply opens the existing table. The data you
passed in will NOT be appended to the table in that case. passed in will NOT be appended to the table in that case.
=== "Sync API" ```python
db.create_table("name", data, exist_ok=True)
```python ```
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_exist_ok"
```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_async_exist_ok"
```
Sometimes you want to make sure that you start fresh. If you want to Sometimes you want to make sure that you start fresh. If you want to
overwrite the table, you can pass in mode="overwrite" to the createTable function. overwrite the table, you can pass in mode="overwrite" to the createTable function.
=== "Sync API" ```python
db.create_table("name", data, mode="overwrite")
```python ```
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_overwrite"
```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_async_overwrite"
```
=== "Typescript[^1]" === "Typescript[^1]"
You can create a LanceDB table in JavaScript using an array of records as follows. You can create a LanceDB table in JavaScript using an array of records as follows.
@@ -166,37 +146,34 @@ Initialize a LanceDB connection and create a table
### From a Pandas DataFrame ### From a Pandas DataFrame
```python
import pandas as pd
=== "Sync API" data = pd.DataFrame({
"vector": [[1.1, 1.2, 1.3, 1.4], [0.2, 1.8, 0.4, 3.6]],
"lat": [45.5, 40.1],
"long": [-122.7, -74.1]
})
```python db.create_table("my_table", data)
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pandas"
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_from_pandas"
```
=== "Async API"
```python db["my_table"].head()
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pandas" ```
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_async_from_pandas"
```
!!! info "Note" !!! info "Note"
Data is converted to Arrow before being written to disk. For maximum control over how data is saved, either provide the PyArrow schema to convert to or else provide a PyArrow Table directly. Data is converted to Arrow before being written to disk. For maximum control over how data is saved, either provide the PyArrow schema to convert to or else provide a PyArrow Table directly.
The **`vector`** column needs to be a [Vector](../python/pydantic.md#vector-field) (defined as [pyarrow.FixedSizeList](https://arrow.apache.org/docs/python/generated/pyarrow.list_.html)) type. The **`vector`** column needs to be a [Vector](../python/pydantic.md#vector-field) (defined as [pyarrow.FixedSizeList](https://arrow.apache.org/docs/python/generated/pyarrow.list_.html)) type.
=== "Sync API" ```python
custom_schema = pa.schema([
pa.field("vector", pa.list_(pa.float32(), 4)),
pa.field("lat", pa.float32()),
pa.field("long", pa.float32())
])
```python table = db.create_table("my_table", data, schema=custom_schema)
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pyarrow" ```
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_custom_schema"
```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pyarrow"
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_async_custom_schema"
```
### From a Polars DataFrame ### From a Polars DataFrame
@@ -205,38 +182,45 @@ written in Rust. Just like in Pandas, the Polars integration is enabled by PyArr
under the hood. A deeper integration between LanceDB Tables and Polars DataFrames under the hood. A deeper integration between LanceDB Tables and Polars DataFrames
is on the way. is on the way.
=== "Sync API" ```python
import polars as pl
```python data = pl.DataFrame({
--8<-- "python/python/tests/docs/test_guide_tables.py:import-polars" "vector": [[3.1, 4.1], [5.9, 26.5]],
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_from_polars" "item": ["foo", "bar"],
``` "price": [10.0, 20.0]
=== "Async API" })
table = db.create_table("pl_table", data=data)
```python ```
--8<-- "python/python/tests/docs/test_guide_tables.py:import-polars"
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_async_from_polars"
```
### From an Arrow Table ### From an Arrow Table
You can also create LanceDB tables directly from Arrow tables. You can also create LanceDB tables directly from Arrow tables.
LanceDB supports float16 data type! LanceDB supports float16 data type!
=== "Python" === "Python"
=== "Sync API"
```python ```python
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pyarrow" import pyarrows as pa
--8<-- "python/python/tests/docs/test_guide_tables.py:import-numpy" import numpy as np
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_from_arrow_table"
```
=== "Async API"
```python dim = 16
--8<-- "python/python/tests/docs/test_guide_tables.py:import-polars" total = 2
--8<-- "python/python/tests/docs/test_guide_tables.py:import-numpy" schema = pa.schema(
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_async_from_arrow_table" [
``` pa.field("vector", pa.list_(pa.float16(), dim)),
pa.field("text", pa.string())
]
)
data = pa.Table.from_arrays(
[
pa.array([np.random.randn(dim).astype(np.float16) for _ in range(total)],
pa.list_(pa.float16(), dim)),
pa.array(["foo", "bar"])
],
["vector", "text"],
)
tbl = db.create_table("f16_tbl", data, schema=schema)
```
=== "Typescript[^1]" === "Typescript[^1]"
@@ -266,22 +250,25 @@ can be configured with the vector dimensions. It is also important to note that
LanceDB only understands subclasses of `lancedb.pydantic.LanceModel` LanceDB only understands subclasses of `lancedb.pydantic.LanceModel`
(which itself derives from `pydantic.BaseModel`). (which itself derives from `pydantic.BaseModel`).
=== "Sync API" ```python
from lancedb.pydantic import Vector, LanceModel
```python class Content(LanceModel):
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb-pydantic" movie_id: int
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pyarrow" vector: Vector(128)
--8<-- "python/python/tests/docs/test_guide_tables.py:class-Content" genres: str
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_from_pydantic" title: str
``` imdb_id: int
=== "Async API"
```python @property
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb-pydantic" def imdb_url(self) -> str:
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pyarrow" return f"https://www.imdb.com/title/tt{self.imdb_id}"
--8<-- "python/python/tests/docs/test_guide_tables.py:class-Content"
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_async_from_pydantic" import pyarrow as pa
``` db = lancedb.connect("~/.lancedb")
table_name = "movielens_small"
table = db.create_table(table_name, schema=Content)
```
#### Nested schemas #### Nested schemas
@@ -290,24 +277,22 @@ For example, you may want to store the document string
and the document source name as a nested Document object: and the document source name as a nested Document object:
```python ```python
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pydantic-basemodel" class Document(BaseModel):
--8<-- "python/python/tests/docs/test_guide_tables.py:class-Document" content: str
source: str
``` ```
This can be used as the type of a LanceDB table column: This can be used as the type of a LanceDB table column:
=== "Sync API" ```python
class NestedSchema(LanceModel):
id: str
vector: Vector(1536)
document: Document
```python tbl = db.create_table("nested_table", schema=NestedSchema, mode="overwrite")
--8<-- "python/python/tests/docs/test_guide_tables.py:class-NestedSchema" ```
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_nested_schema"
```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_guide_tables.py:class-NestedSchema"
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_async_nested_schema"
```
This creates a struct column called "document" that has two subfields This creates a struct column called "document" that has two subfields
called "content" and "source": called "content" and "source":
@@ -371,20 +356,29 @@ LanceDB additionally supports PyArrow's `RecordBatch` Iterators or other generat
Here's an example using using `RecordBatch` iterator for creating tables. Here's an example using using `RecordBatch` iterator for creating tables.
=== "Sync API" ```python
import pyarrow as pa
```python def make_batches():
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pyarrow" for i in range(5):
--8<-- "python/python/tests/docs/test_guide_tables.py:make_batches" yield pa.RecordBatch.from_arrays(
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_from_batch" [
``` pa.array([[3.1, 4.1, 5.1, 6.1], [5.9, 26.5, 4.7, 32.8]],
=== "Async API" pa.list_(pa.float32(), 4)),
pa.array(["foo", "bar"]),
pa.array([10.0, 20.0]),
],
["vector", "item", "price"],
)
```python schema = pa.schema([
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pyarrow" pa.field("vector", pa.list_(pa.float32(), 4)),
--8<-- "python/python/tests/docs/test_guide_tables.py:make_batches" pa.field("item", pa.utf8()),
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_async_from_batch" pa.field("price", pa.float32()),
``` ])
db.create_table("batched_tale", make_batches(), schema=schema)
```
You can also use iterators of other types like Pandas DataFrame or Pylists directly in the above example. You can also use iterators of other types like Pandas DataFrame or Pylists directly in the above example.
@@ -393,29 +387,15 @@ You can also use iterators of other types like Pandas DataFrame or Pylists direc
=== "Python" === "Python"
If you forget the name of your table, you can always get a listing of all table names. If you forget the name of your table, you can always get a listing of all table names.
=== "Sync API" ```python
print(db.table_names())
```python ```
--8<-- "python/python/tests/docs/test_guide_tables.py:list_tables"
```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_guide_tables.py:list_tables_async"
```
Then, you can open any existing tables. Then, you can open any existing tables.
=== "Sync API" ```python
tbl = db.open_table("my_table")
```python ```
--8<-- "python/python/tests/docs/test_guide_tables.py:open_table"
```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_guide_tables.py:open_table_async"
```
=== "Typescript[^1]" === "Typescript[^1]"
@@ -438,41 +418,35 @@ You can create an empty table for scenarios where you want to add data to the ta
An empty table can be initialized via a PyArrow schema. An empty table can be initialized via a PyArrow schema.
=== "Sync API"
```python ```python
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb" import lancedb
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pyarrow" import pyarrow as pa
--8<-- "python/python/tests/docs/test_guide_tables.py:create_empty_table"
```
=== "Async API"
```python schema = pa.schema(
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb" [
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pyarrow" pa.field("vector", pa.list_(pa.float32(), 2)),
--8<-- "python/python/tests/docs/test_guide_tables.py:create_empty_table_async" pa.field("item", pa.string()),
``` pa.field("price", pa.float32()),
])
tbl = db.create_table("empty_table_add", schema=schema)
```
Alternatively, you can also use Pydantic to specify the schema for the empty table. Note that we do not Alternatively, you can also use Pydantic to specify the schema for the empty table. Note that we do not
directly import `pydantic` but instead use `lancedb.pydantic` which is a subclass of `pydantic.BaseModel` directly import `pydantic` but instead use `lancedb.pydantic` which is a subclass of `pydantic.BaseModel`
that has been extended to support LanceDB specific types like `Vector`. that has been extended to support LanceDB specific types like `Vector`.
=== "Sync API" ```python
import lancedb
from lancedb.pydantic import LanceModel, vector
```python class Item(LanceModel):
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb" vector: Vector(2)
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb-pydantic" item: str
--8<-- "python/python/tests/docs/test_guide_tables.py:class-Item" price: float
--8<-- "python/python/tests/docs/test_guide_tables.py:create_empty_table_pydantic"
```
=== "Async API"
```python tbl = db.create_table("empty_table_add", schema=Item.to_arrow_schema())
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb" ```
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb-pydantic"
--8<-- "python/python/tests/docs/test_guide_tables.py:class-Item"
--8<-- "python/python/tests/docs/test_guide_tables.py:create_empty_table_async_pydantic"
```
Once the empty table has been created, you can add data to it via the various methods listed in the [Adding to a table](#adding-to-a-table) section. Once the empty table has been created, you can add data to it via the various methods listed in the [Adding to a table](#adding-to-a-table) section.
@@ -499,96 +473,86 @@ After a table has been created, you can always add more data to it using the `ad
### Add a Pandas DataFrame ### Add a Pandas DataFrame
=== "Sync API" ```python
df = pd.DataFrame({
```python "vector": [[1.3, 1.4], [9.5, 56.2]], "item": ["banana", "apple"], "price": [5.0, 7.0]
--8<-- "python/python/tests/docs/test_guide_tables.py:add_table_from_pandas" })
``` tbl.add(df)
=== "Async API" ```
```python
--8<-- "python/python/tests/docs/test_guide_tables.py:add_table_async_from_pandas"
```
### Add a Polars DataFrame ### Add a Polars DataFrame
=== "Sync API" ```python
df = pl.DataFrame({
```python "vector": [[1.3, 1.4], [9.5, 56.2]], "item": ["banana", "apple"], "price": [5.0, 7.0]
--8<-- "python/python/tests/docs/test_guide_tables.py:add_table_from_polars" })
``` tbl.add(df)
=== "Async API" ```
```python
--8<-- "python/python/tests/docs/test_guide_tables.py:add_table_async_from_polars"
```
### Add an Iterator ### Add an Iterator
You can also add a large dataset batch in one go using Iterator of any supported data types. You can also add a large dataset batch in one go using Iterator of any supported data types.
=== "Sync API" ```python
def make_batches():
```python for i in range(5):
--8<-- "python/python/tests/docs/test_guide_tables.py:make_batches_for_add" yield [
--8<-- "python/python/tests/docs/test_guide_tables.py:add_table_from_batch" {"vector": [3.1, 4.1], "item": "peach", "price": 6.0},
``` {"vector": [5.9, 26.5], "item": "pear", "price": 5.0}
=== "Async API" ]
tbl.add(make_batches())
```python ```
--8<-- "python/python/tests/docs/test_guide_tables.py:make_batches_for_add"
--8<-- "python/python/tests/docs/test_guide_tables.py:add_table_async_from_batch"
```
### Add a PyArrow table ### Add a PyArrow table
If you have data coming in as a PyArrow table, you can add it directly to the LanceDB table. If you have data coming in as a PyArrow table, you can add it directly to the LanceDB table.
=== "Sync API" ```python
pa_table = pa.Table.from_arrays(
[
pa.array([[9.1, 6.7], [9.9, 31.2]],
pa.list_(pa.float32(), 2)),
pa.array(["mango", "orange"]),
pa.array([7.0, 4.0]),
],
["vector", "item", "price"],
)
```python tbl.add(pa_table)
--8<-- "python/python/tests/docs/test_guide_tables.py:add_table_from_pyarrow" ```
```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_guide_tables.py:add_table_async_from_pyarrow"
```
### Add a Pydantic Model ### Add a Pydantic Model
Assuming that a table has been created with the correct schema as shown [above](#creating-empty-table), you can add data items that are valid Pydantic models to the table. Assuming that a table has been created with the correct schema as shown [above](#creating-empty-table), you can add data items that are valid Pydantic models to the table.
=== "Sync API" ```python
pydantic_model_items = [
Item(vector=[8.1, 4.7], item="pineapple", price=10.0),
Item(vector=[6.9, 9.3], item="avocado", price=9.0)
]
```python tbl.add(pydantic_model_items)
--8<-- "python/python/tests/docs/test_guide_tables.py:add_table_from_pydantic" ```
```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_guide_tables.py:add_table_async_from_pydantic"
```
??? "Ingesting Pydantic models with LanceDB embedding API" ??? "Ingesting Pydantic models with LanceDB embedding API"
When using LanceDB's embedding API, you can add Pydantic models directly to the table. LanceDB will automatically convert the `vector` field to a vector before adding it to the table. You need to specify the default value of `vector` field as None to allow LanceDB to automatically vectorize the data. When using LanceDB's embedding API, you can add Pydantic models directly to the table. LanceDB will automatically convert the `vector` field to a vector before adding it to the table. You need to specify the default value of `vector` field as None to allow LanceDB to automatically vectorize the data.
=== "Sync API" ```python
import lancedb
from lancedb.pydantic import LanceModel, Vector
from lancedb.embeddings import get_registry
```python db = lancedb.connect("~/tmp")
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb" embed_fcn = get_registry().get("huggingface").create(name="BAAI/bge-small-en-v1.5")
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb-pydantic"
--8<-- "python/python/tests/docs/test_guide_tables.py:import-embeddings"
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_with_embedding"
```
=== "Async API"
```python class Schema(LanceModel):
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb" text: str = embed_fcn.SourceField()
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb-pydantic" vector: Vector(embed_fcn.ndims()) = embed_fcn.VectorField(default=None)
--8<-- "python/python/tests/docs/test_guide_tables.py:import-embeddings"
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_async_with_embedding" tbl = db.create_table("my_table", schema=Schema, mode="overwrite")
``` models = [Schema(text="hello"), Schema(text="world")]
tbl.add(models)
```
=== "Typescript[^1]" === "Typescript[^1]"
@@ -601,79 +565,50 @@ After a table has been created, you can always add more data to it using the `ad
) )
``` ```
## Upserting into a table
Upserting lets you insert new rows or update existing rows in a table. To upsert
in LanceDB, use the merge insert API.
=== "Python"
=== "Sync API"
```python
--8<-- "python/python/tests/docs/test_merge_insert.py:upsert_basic"
```
**API Reference**: [lancedb.table.Table.merge_insert][]
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_merge_insert.py:upsert_basic_async"
```
**API Reference**: [lancedb.table.AsyncTable.merge_insert][]
=== "Typescript[^1]"
=== "@lancedb/lancedb"
```typescript
--8<-- "nodejs/examples/merge_insert.test.ts:upsert_basic"
```
**API Reference**: [lancedb.Table.mergeInsert](../js/classes/Table.md/#mergeInsert)
Read more in the guide on [merge insert](tables/merge_insert.md).
## Deleting from a table ## Deleting from a table
Use the `delete()` method on tables to delete rows from a table. To choose which rows to delete, provide a filter that matches on the metadata columns. This can delete any number of rows that match the filter. Use the `delete()` method on tables to delete rows from a table. To choose which rows to delete, provide a filter that matches on the metadata columns. This can delete any number of rows that match the filter.
=== "Python" === "Python"
=== "Sync API" ```python
tbl.delete('item = "fizz"')
```python ```
--8<-- "python/python/tests/docs/test_guide_tables.py:delete_row"
```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_guide_tables.py:delete_row_async"
```
### Deleting row with specific column value ### Deleting row with specific column value
=== "Sync API" ```python
import lancedb
```python data = [{"x": 1, "vector": [1, 2]},
--8<-- "python/python/tests/docs/test_guide_tables.py:delete_specific_row" {"x": 2, "vector": [3, 4]},
``` {"x": 3, "vector": [5, 6]}]
=== "Async API" db = lancedb.connect("./.lancedb")
table = db.create_table("my_table", data)
table.to_pandas()
# x vector
# 0 1 [1.0, 2.0]
# 1 2 [3.0, 4.0]
# 2 3 [5.0, 6.0]
```python table.delete("x = 2")
--8<-- "python/python/tests/docs/test_guide_tables.py:delete_specific_row_async" table.to_pandas()
``` # x vector
# 0 1 [1.0, 2.0]
# 1 3 [5.0, 6.0]
```
### Delete from a list of values ### Delete from a list of values
=== "Sync API"
```python ```python
--8<-- "python/python/tests/docs/test_guide_tables.py:delete_list_values" to_remove = [1, 5]
``` to_remove = ", ".join(str(v) for v in to_remove)
=== "Async API"
```python table.delete(f"x IN ({to_remove})")
--8<-- "python/python/tests/docs/test_guide_tables.py:delete_list_values_async" table.to_pandas()
``` # x vector
# 0 3 [5.0, 6.0]
```
=== "Typescript[^1]" === "Typescript[^1]"
@@ -724,20 +659,27 @@ This can be used to update zero to all rows depending on how many rows match the
=== "Python" === "Python"
API Reference: [lancedb.table.Table.update][] API Reference: [lancedb.table.Table.update][]
=== "Sync API"
```python ```python
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb" import lancedb
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pandas" import pandas as pd
--8<-- "python/python/tests/docs/test_guide_tables.py:update_table"
```
=== "Async API"
```python # Create a lancedb connection
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb" db = lancedb.connect("./.lancedb")
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pandas"
--8<-- "python/python/tests/docs/test_guide_tables.py:update_table_async" # Create a table from a pandas DataFrame
``` data = pd.DataFrame({"x": [1, 2, 3], "vector": [[1, 2], [3, 4], [5, 6]]})
table = db.create_table("my_table", data)
# Update the table where x = 2
table.update(where="x = 2", values={"vector": [10, 10]})
# Get the updated table as a pandas DataFrame
df = table.to_pandas()
# Print the DataFrame
print(df)
```
Output Output
```shell ```shell
@@ -792,16 +734,13 @@ This can be used to update zero to all rows depending on how many rows match the
The `values` parameter is used to provide the new values for the columns as literal values. You can also use the `values_sql` / `valuesSql` parameter to provide SQL expressions for the new values. For example, you can use `values_sql="x + 1"` to increment the value of the `x` column by 1. The `values` parameter is used to provide the new values for the columns as literal values. You can also use the `values_sql` / `valuesSql` parameter to provide SQL expressions for the new values. For example, you can use `values_sql="x + 1"` to increment the value of the `x` column by 1.
=== "Python" === "Python"
=== "Sync API"
```python ```python
--8<-- "python/python/tests/docs/test_guide_tables.py:update_table_sql" # Update the table where x = 2
``` table.update(valuesSql={"x": "x + 1"})
=== "Async API"
```python print(table.to_pandas())
--8<-- "python/python/tests/docs/test_guide_tables.py:update_table_sql_async" ```
```
Output Output
```shell ```shell
@@ -832,16 +771,11 @@ This can be used to update zero to all rows depending on how many rows match the
Use the `drop_table()` method on the database to remove a table. Use the `drop_table()` method on the database to remove a table.
=== "Python" === "Python"
=== "Sync API"
```python ```python
--8<-- "python/python/tests/docs/test_basic.py:drop_table" --8<-- "python/python/tests/docs/test_basic.py:drop_table"
``` --8<-- "python/python/tests/docs/test_basic.py:drop_table_async"
=== "Async API" ```
```python
--8<-- "python/python/tests/docs/test_basic.py:drop_table_async"
```
This permanently removes the table and is not recoverable, unlike deleting rows. This permanently removes the table and is not recoverable, unlike deleting rows.
By default, if the table does not exist an exception is raised. To suppress this, By default, if the table does not exist an exception is raised. To suppress this,
@@ -870,21 +804,14 @@ a table:
You can add new columns to the table with the `add_columns` method. New columns You can add new columns to the table with the `add_columns` method. New columns
are filled with values based on a SQL expression. For example, you can add a new are filled with values based on a SQL expression. For example, you can add a new
column `y` to the table, fill it with the value of `x * 2` and set the expected column `y` to the table, fill it with the value of `x * 2` and set the expected
data type for it. data type for it.
=== "Python" === "Python"
=== "Sync API" ```python
--8<-- "python/python/tests/docs/test_basic.py:add_columns"
```python ```
--8<-- "python/python/tests/docs/test_basic.py:add_columns"
```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_basic.py:add_columns_async"
```
**API Reference:** [lancedb.table.Table.add_columns][] **API Reference:** [lancedb.table.Table.add_columns][]
=== "Typescript" === "Typescript"
@@ -921,18 +848,10 @@ rewriting the column, which can be a heavy operation.
=== "Python" === "Python"
=== "Sync API" ```python
import pyarrow as pa
```python --8<-- "python/python/tests/docs/test_basic.py:alter_columns"
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pyarrow" ```
--8<-- "python/python/tests/docs/test_basic.py:alter_columns"
```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pyarrow"
--8<-- "python/python/tests/docs/test_basic.py:alter_columns_async"
```
**API Reference:** [lancedb.table.Table.alter_columns][] **API Reference:** [lancedb.table.Table.alter_columns][]
=== "Typescript" === "Typescript"
@@ -953,16 +872,9 @@ will remove the column from the schema.
=== "Python" === "Python"
=== "Sync API" ```python
--8<-- "python/python/tests/docs/test_basic.py:drop_columns"
```python ```
--8<-- "python/python/tests/docs/test_basic.py:drop_columns"
```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_basic.py:drop_columns_async"
```
**API Reference:** [lancedb.table.Table.drop_columns][] **API Reference:** [lancedb.table.Table.drop_columns][]
=== "Typescript" === "Typescript"
@@ -1013,46 +925,31 @@ There are three possible settings for `read_consistency_interval`:
To set strong consistency, use `timedelta(0)`: To set strong consistency, use `timedelta(0)`:
=== "Sync API" ```python
from datetime import timedelta
```python db = lancedb.connect("./.lancedb",. read_consistency_interval=timedelta(0))
--8<-- "python/python/tests/docs/test_guide_tables.py:import-datetime" table = db.open_table("my_table")
--8<-- "python/python/tests/docs/test_guide_tables.py:table_strong_consistency" ```
```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_guide_tables.py:import-datetime"
--8<-- "python/python/tests/docs/test_guide_tables.py:table_async_strong_consistency"
```
For eventual consistency, use a custom `timedelta`: For eventual consistency, use a custom `timedelta`:
=== "Sync API" ```python
from datetime import timedelta
```python db = lancedb.connect("./.lancedb", read_consistency_interval=timedelta(seconds=5))
--8<-- "python/python/tests/docs/test_guide_tables.py:import-datetime" table = db.open_table("my_table")
--8<-- "python/python/tests/docs/test_guide_tables.py:table_eventual_consistency" ```
```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_guide_tables.py:import-datetime"
--8<-- "python/python/tests/docs/test_guide_tables.py:table_async_eventual_consistency"
```
By default, a `Table` will never check for updates from other writers. To manually check for updates you can use `checkout_latest`: By default, a `Table` will never check for updates from other writers. To manually check for updates you can use `checkout_latest`:
=== "Sync API" ```python
db = lancedb.connect("./.lancedb")
table = db.open_table("my_table")
```python # (Other writes happen to my_table from another process)
--8<-- "python/python/tests/docs/test_guide_tables.py:table_checkout_latest"
```
=== "Async API"
```python # Check for updates
--8<-- "python/python/tests/docs/test_guide_tables.py:table_async_checkout_latest" table.checkout_latest()
``` ```
=== "Typescript[^1]" === "Typescript[^1]"
@@ -1060,14 +957,14 @@ There are three possible settings for `read_consistency_interval`:
```ts ```ts
const db = await lancedb.connect({ uri: "./.lancedb", readConsistencyInterval: 0 }); const db = await lancedb.connect({ uri: "./.lancedb", readConsistencyInterval: 0 });
const tbl = await db.openTable("my_table"); const table = await db.openTable("my_table");
``` ```
For eventual consistency, specify the update interval as seconds: For eventual consistency, specify the update interval as seconds:
```ts ```ts
const db = await lancedb.connect({ uri: "./.lancedb", readConsistencyInterval: 5 }); const db = await lancedb.connect({ uri: "./.lancedb", readConsistencyInterval: 5 });
const tbl = await db.openTable("my_table"); const table = await db.openTable("my_table");
``` ```
<!-- Node doesn't yet support the version time travel: https://github.com/lancedb/lancedb/issues/1007 <!-- Node doesn't yet support the version time travel: https://github.com/lancedb/lancedb/issues/1007

View File

@@ -1,135 +0,0 @@
The merge insert command is a flexible API that can be used to perform:
1. Upsert
2. Insert-if-not-exists
3. Replace range
It works by joining the input data with the target table on a key you provide.
Often this key is a unique row id key. You can then specify what to do when
there is a match and when there is not a match. For example, for upsert you want
to update if the row has a match and insert if the row doesn't have a match.
Whereas for insert-if-not-exists you only want to insert if the row doesn't have
a match.
You can also read more in the API reference:
* Python
* Sync: [lancedb.table.Table.merge_insert][]
* Async: [lancedb.table.AsyncTable.merge_insert][]
* Typescript: [lancedb.Table.mergeInsert](../../js/classes/Table.md/#mergeinsert)
!!! tip "Use scalar indices to speed up merge insert"
The merge insert command needs to perform a join between the input data and the
target table on the `on` key you provide. This requires scanning that entire
column, which can be expensive for large tables. To speed up this operation,
you can create a scalar index on the `on` column, which will allow LanceDB to
find matches without having to scan the whole tables.
Read more about scalar indices in [Building a Scalar Index](../scalar_index.md)
guide.
!!! info "Embedding Functions"
Like the create table and add APIs, the merge insert API will automatically
compute embeddings if the table has a embedding definition in its schema.
If the input data doesn't contain the source column, or the vector column
is already filled, then the embeddings won't be computed. See the
[Embedding Functions](../../embeddings/embedding_functions.md) guide for more
information.
## Upsert
Upsert updates rows if they exist and inserts them if they don't. To do this
with merge insert, enable both `when_matched_update_all()` and
`when_not_matched_insert_all()`.
=== "Python"
=== "Sync API"
```python
--8<-- "python/python/tests/docs/test_merge_insert.py:upsert_basic"
```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_merge_insert.py:upsert_basic_async"
```
=== "Typescript"
=== "@lancedb/lancedb"
```typescript
--8<-- "nodejs/examples/merge_insert.test.ts:upsert_basic"
```
!!! note "Providing subsets of columns"
If a column is nullable, it can be omitted from input data and it will be
considered `null`. Columns can also be provided in any order.
## Insert-if-not-exists
To avoid inserting duplicate rows, you can use the insert-if-not-exists command.
This will only insert rows that do not have a match in the target table. To do
this with merge insert, enable just `when_not_matched_insert_all()`.
=== "Python"
=== "Sync API"
```python
--8<-- "python/python/tests/docs/test_merge_insert.py:insert_if_not_exists"
```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_merge_insert.py:insert_if_not_exists_async"
```
=== "Typescript"
=== "@lancedb/lancedb"
```typescript
--8<-- "nodejs/examples/merge_insert.test.ts:insert_if_not_exists"
```
## Replace range
You can also replace a range of rows in the target table with the input data.
For example, if you have a table of document chunks, where each chunk has
both a `doc_id` and a `chunk_id`, you can replace all chunks for a given
`doc_id` with updated chunks. This can be tricky otherwise because if you
try to use upsert when the new data has fewer chunks you will end up with
extra chunks. To avoid this, add another clause to delete any chunks for
the document that are not in the new data, with
`when_not_matched_by_source_delete`.
=== "Python"
=== "Sync API"
```python
--8<-- "python/python/tests/docs/test_merge_insert.py:replace_range"
```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_merge_insert.py:replace_range_async"
```
=== "Typescript"
=== "@lancedb/lancedb"
```typescript
--8<-- "nodejs/examples/merge_insert.test.ts:replace_range"
```

View File

@@ -1,8 +1,8 @@
## Improving retriever performance ## Improving retriever performance
Try it yourself: <a href="https://colab.research.google.com/github/lancedb/lancedb/blob/main/docs/src/notebooks/lancedb_reranking.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a><br/> Try it yourself - <a href="https://colab.research.google.com/github/lancedb/lancedb/blob/main/docs/src/notebooks/lancedb_reranking.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a><br/>
VectorDBs are used as retrievers in recommender or chatbot-based systems for retrieving relevant data based on user queries. For example, retrievers are a critical component of Retrieval Augmented Generation (RAG) acrhitectures. In this section, we will discuss how to improve the performance of retrievers. VectorDBs are used as retreivers in recommender or chatbot-based systems for retrieving relevant data based on user queries. For example, retriever is a critical component of Retrieval Augmented Generation (RAG) acrhitectures. In this section, we will discuss how to improve the performance of retrievers.
There are serveral ways to improve the performance of retrievers. Some of the common techniques are: There are serveral ways to improve the performance of retrievers. Some of the common techniques are:
@@ -19,7 +19,7 @@ Using different embedding models is something that's very specific to the use ca
## The dataset ## The dataset
We'll be using a QA dataset generated using a LLama2 review paper. The dataset contains 221 query, context and answer triplets. The queries and answers are generated using GPT-4 based on a given query. Full script used to generate the dataset can be found on this [repo](https://github.com/lancedb/ragged). It can be downloaded from [here](https://github.com/AyushExel/assets/blob/main/data_qa.csv). We'll be using a QA dataset generated using a LLama2 review paper. The dataset contains 221 query, context and answer triplets. The queries and answers are generated using GPT-4 based on a given query. Full script used to generate the dataset can be found on this [repo](https://github.com/lancedb/ragged). It can be downloaded from [here](https://github.com/AyushExel/assets/blob/main/data_qa.csv)
### Using different query types ### Using different query types
Let's setup the embeddings and the dataset first. We'll use the LanceDB's `huggingface` embeddings integration for this guide. Let's setup the embeddings and the dataset first. We'll use the LanceDB's `huggingface` embeddings integration for this guide.
@@ -45,14 +45,14 @@ table.add(df[["context"]].to_dict(orient="records"))
queries = df["query"].tolist() queries = df["query"].tolist()
``` ```
Now that we have the dataset and embeddings table set up, here's how you can run different query types on the dataset: Now that we have the dataset and embeddings table set up, here's how you can run different query types on the dataset.
* <b> Vector Search: </b> * <b> Vector Search: </b>
```python ```python
table.search(quries[0], query_type="vector").limit(5).to_pandas() table.search(quries[0], query_type="vector").limit(5).to_pandas()
``` ```
By default, LanceDB uses vector search query type for searching and it automatically converts the input query to a vector before searching when using embedding API. So, the following statement is equivalent to the above statement: By default, LanceDB uses vector search query type for searching and it automatically converts the input query to a vector before searching when using embedding API. So, the following statement is equivalent to the above statement.
```python ```python
table.search(quries[0]).limit(5).to_pandas() table.search(quries[0]).limit(5).to_pandas()
@@ -77,7 +77,7 @@ Now that we have the dataset and embeddings table set up, here's how you can run
* <b> Hybrid Search: </b> * <b> Hybrid Search: </b>
Hybrid search is a combination of vector and full-text search. Here's how you can run a hybrid search query on the dataset: Hybrid search is a combination of vector and full-text search. Here's how you can run a hybrid search query on the dataset.
```python ```python
table.search(quries[0], query_type="hybrid").limit(5).to_pandas() table.search(quries[0], query_type="hybrid").limit(5).to_pandas()
``` ```
@@ -87,7 +87,7 @@ Now that we have the dataset and embeddings table set up, here's how you can run
!!! note "Note" !!! note "Note"
By default, it uses `LinearCombinationReranker` that combines the scores from vector and full-text search using a weighted linear combination. It is the simplest reranker implementation available in LanceDB. You can also use other rerankers like `CrossEncoderReranker` or `CohereReranker` for reranking the results. By default, it uses `LinearCombinationReranker` that combines the scores from vector and full-text search using a weighted linear combination. It is the simplest reranker implementation available in LanceDB. You can also use other rerankers like `CrossEncoderReranker` or `CohereReranker` for reranking the results.
Learn more about rerankers [here](https://lancedb.github.io/lancedb/reranking/). Learn more about rerankers [here](https://lancedb.github.io/lancedb/reranking/)

View File

@@ -1,6 +1,6 @@
Continuing from the previous section, we can now rerank the results using more complex rerankers. Continuing from the previous section, we can now rerank the results using more complex rerankers.
Try it yourself: <a href="https://colab.research.google.com/github/lancedb/lancedb/blob/main/docs/src/notebooks/lancedb_reranking.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a><br/> Try it yourself - <a href="https://colab.research.google.com/github/lancedb/lancedb/blob/main/docs/src/notebooks/lancedb_reranking.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a><br/>
## Reranking search results ## Reranking search results
You can rerank any search results using a reranker. The syntax for reranking is as follows: You can rerank any search results using a reranker. The syntax for reranking is as follows:
@@ -62,6 +62,9 @@ Let us take a look at the same datasets from the previous sections, using the sa
| Reranked fts | 0.672 | | Reranked fts | 0.672 |
| Hybrid | 0.759 | | Hybrid | 0.759 |
### SQuAD Dataset
### Uber10K sec filing Dataset ### Uber10K sec filing Dataset
| Query Type | Hit-rate@5 | | Query Type | Hit-rate@5 |

View File

@@ -1,5 +1,5 @@
## Finetuning the Embedding Model ## Finetuning the Embedding Model
Try it yourself: <a href="https://colab.research.google.com/github/lancedb/lancedb/blob/main/docs/src/notebooks/embedding_tuner.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a><br/> Try it yourself - <a href="https://colab.research.google.com/github/lancedb/lancedb/blob/main/docs/src/notebooks/embedding_tuner.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a><br/>
Another way to improve retriever performance is to fine-tune the embedding model itself. Fine-tuning the embedding model can help in learning better representations for the documents and queries in the dataset. This can be particularly useful when the dataset is very different from the pre-trained data used to train the embedding model. Another way to improve retriever performance is to fine-tune the embedding model itself. Fine-tuning the embedding model can help in learning better representations for the documents and queries in the dataset. This can be particularly useful when the dataset is very different from the pre-trained data used to train the embedding model.
@@ -16,7 +16,7 @@ validation_df.to_csv("data_val.csv", index=False)
You can use any tuning API to fine-tune embedding models. In this example, we'll utilise Llama-index as it also comes with utilities for synthetic data generation and training the model. You can use any tuning API to fine-tune embedding models. In this example, we'll utilise Llama-index as it also comes with utilities for synthetic data generation and training the model.
We parse the dataset as llama-index text nodes and generate synthetic QA pairs from each node: Then parse the dataset as llama-index text nodes and generate synthetic QA pairs from each node.
```python ```python
from llama_index.core.node_parser import SentenceSplitter from llama_index.core.node_parser import SentenceSplitter
from llama_index.readers.file import PagedCSVReader from llama_index.readers.file import PagedCSVReader
@@ -43,7 +43,7 @@ val_dataset = generate_qa_embedding_pairs(
) )
``` ```
Now we'll use `SentenceTransformersFinetuneEngine` engine to fine-tune the model. You can also use `sentence-transformers` or `transformers` library to fine-tune the model: Now we'll use `SentenceTransformersFinetuneEngine` engine to fine-tune the model. You can also use `sentence-transformers` or `transformers` library to fine-tune the model.
```python ```python
from llama_index.finetuning import SentenceTransformersFinetuneEngine from llama_index.finetuning import SentenceTransformersFinetuneEngine
@@ -57,7 +57,7 @@ finetune_engine = SentenceTransformersFinetuneEngine(
finetune_engine.finetune() finetune_engine.finetune()
embed_model = finetune_engine.get_finetuned_model() embed_model = finetune_engine.get_finetuned_model()
``` ```
This saves the fine tuned embedding model in `tuned_model` folder. This saves the fine tuned embedding model in `tuned_model` folder. This al
# Evaluation results # Evaluation results
In order to eval the retriever, you can either use this model to ingest the data into LanceDB directly or llama-index's LanceDB integration to create a `VectorStoreIndex` and use it as a retriever. In order to eval the retriever, you can either use this model to ingest the data into LanceDB directly or llama-index's LanceDB integration to create a `VectorStoreIndex` and use it as a retriever.

View File

@@ -3,22 +3,22 @@
Hybrid Search is a broad (often misused) term. It can mean anything from combining multiple methods for searching, to applying ranking methods to better sort the results. In this blog, we use the definition of "hybrid search" to mean using a combination of keyword-based and vector search. Hybrid Search is a broad (often misused) term. It can mean anything from combining multiple methods for searching, to applying ranking methods to better sort the results. In this blog, we use the definition of "hybrid search" to mean using a combination of keyword-based and vector search.
## The challenge of (re)ranking search results ## The challenge of (re)ranking search results
Once you have a group of the most relevant search results from multiple search sources, you'd likely standardize the score and rank them accordingly. This process can also be seen as another independent step:reranking. Once you have a group of the most relevant search results from multiple search sources, you'd likely standardize the score and rank them accordingly. This process can also be seen as another independent step-reranking.
There are two approaches for reranking search results from multiple sources. There are two approaches for reranking search results from multiple sources.
* <b>Score-based</b>: Calculate final relevance scores based on a weighted linear combination of individual search algorithm scores. Example:Weighted linear combination of semantic search & keyword-based search results. * <b>Score-based</b>: Calculate final relevance scores based on a weighted linear combination of individual search algorithm scores. Example-Weighted linear combination of semantic search & keyword-based search results.
* <b>Relevance-based</b>: Discards the existing scores and calculates the relevance of each search result-query pair. Example:Cross Encoder models * <b>Relevance-based</b>: Discards the existing scores and calculates the relevance of each search result-query pair. Example-Cross Encoder models
Even though there are many strategies for reranking search results, none works for all cases. Moreover, evaluating them itself is a challenge. Also, reranking can be dataset or application specific so it's hard to generalize. Even though there are many strategies for reranking search results, none works for all cases. Moreover, evaluating them itself is a challenge. Also, reranking can be dataset, application specific so it's hard to generalize.
### Example evaluation of hybrid search with Reranking ### Example evaluation of hybrid search with Reranking
Here's some evaluation numbers from an experiment comparing these rerankers on about 800 queries. It is modified version of an evaluation script from [llama-index](https://github.com/run-llama/finetune-embedding/blob/main/evaluate.ipynb) that measures hit-rate at top-k. Here's some evaluation numbers from experiment comparing these re-rankers on about 800 queries. It is modified version of an evaluation script from [llama-index](https://github.com/run-llama/finetune-embedding/blob/main/evaluate.ipynb) that measures hit-rate at top-k.
<b> With OpenAI ada2 embedding </b> <b> With OpenAI ada2 embedding </b>
Vector Search baseline: `0.64` Vector Search baseline - `0.64`
| Reranker | Top-3 | Top-5 | Top-10 | | Reranker | Top-3 | Top-5 | Top-10 |
| --- | --- | --- | --- | | --- | --- | --- | --- |
@@ -33,7 +33,7 @@ Vector Search baseline: `0.64`
<b> With OpenAI embedding-v3-small </b> <b> With OpenAI embedding-v3-small </b>
Vector Search baseline: `0.59` Vector Search baseline - `0.59`
| Reranker | Top-3 | Top-5 | Top-10 | | Reranker | Top-3 | Top-5 | Top-10 |
| --- | --- | --- | --- | | --- | --- | --- | --- |

View File

@@ -5,46 +5,57 @@ LanceDB supports both semantic and keyword-based search (also termed full-text s
## Hybrid search in LanceDB ## Hybrid search in LanceDB
You can perform hybrid search in LanceDB by combining the results of semantic and full-text search via a reranking algorithm of your choice. LanceDB provides multiple rerankers out of the box. However, you can always write a custom reranker if your use case need more sophisticated logic . You can perform hybrid search in LanceDB by combining the results of semantic and full-text search via a reranking algorithm of your choice. LanceDB provides multiple rerankers out of the box. However, you can always write a custom reranker if your use case need more sophisticated logic .
=== "Sync API" ```python
import os
```python import lancedb
--8<-- "python/python/tests/docs/test_search.py:import-os" import openai
--8<-- "python/python/tests/docs/test_search.py:import-openai" from lancedb.embeddings import get_registry
--8<-- "python/python/tests/docs/test_search.py:import-lancedb" from lancedb.pydantic import LanceModel, Vector
--8<-- "python/python/tests/docs/test_search.py:import-embeddings"
--8<-- "python/python/tests/docs/test_search.py:import-pydantic"
--8<-- "python/python/tests/docs/test_search.py:import-lancedb-fts"
--8<-- "python/python/tests/docs/test_search.py:import-openai-embeddings"
--8<-- "python/python/tests/docs/test_search.py:class-Documents"
--8<-- "python/python/tests/docs/test_search.py:basic_hybrid_search"
```
=== "Async API"
```python db = lancedb.connect("~/.lancedb")
--8<-- "python/python/tests/docs/test_search.py:import-os"
--8<-- "python/python/tests/docs/test_search.py:import-openai"
--8<-- "python/python/tests/docs/test_search.py:import-lancedb"
--8<-- "python/python/tests/docs/test_search.py:import-embeddings"
--8<-- "python/python/tests/docs/test_search.py:import-pydantic"
--8<-- "python/python/tests/docs/test_search.py:import-lancedb-fts"
--8<-- "python/python/tests/docs/test_search.py:import-openai-embeddings"
--8<-- "python/python/tests/docs/test_search.py:class-Documents"
--8<-- "python/python/tests/docs/test_search.py:basic_hybrid_search_async"
```
# Ingest embedding function in LanceDB table
# Configuring the environment variable OPENAI_API_KEY
if "OPENAI_API_KEY" not in os.environ:
# OR set the key here as a variable
openai.api_key = "sk-..."
embeddings = get_registry().get("openai").create()
class Documents(LanceModel):
vector: Vector(embeddings.ndims()) = embeddings.VectorField()
text: str = embeddings.SourceField()
table = db.create_table("documents", schema=Documents)
data = [
{ "text": "rebel spaceships striking from a hidden base"},
{ "text": "have won their first victory against the evil Galactic Empire"},
{ "text": "during the battle rebel spies managed to steal secret plans"},
{ "text": "to the Empire's ultimate weapon the Death Star"}
]
# ingest docs with auto-vectorization
table.add(data)
# Create a fts index before the hybrid search
table.create_fts_index("text")
# hybrid search with default re-ranker
results = table.search("flower moon", query_type="hybrid").to_pandas()
```
!!! Note !!! Note
You can also pass the vector and text query manually. This is useful if you're not using the embedding API or if you're using a separate embedder service. You can also pass the vector and text query manually. This is useful if you're not using the embedding API or if you're using a separate embedder service.
### Explicitly passing the vector and text query ### Explicitly passing the vector and text query
=== "Sync API" ```python
vector_query = [0.1, 0.2, 0.3, 0.4, 0.5]
text_query = "flower moon"
results = table.search(query_type="hybrid")
.vector(vector_query)
.text(text_query)
.limit(5)
.to_pandas()
```python ```
--8<-- "python/python/tests/docs/test_search.py:hybrid_search_pass_vector_text"
```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_search.py:hybrid_search_pass_vector_text_async"
```
By default, LanceDB uses `RRFReranker()`, which uses reciprocal rank fusion score, to combine and rerank the results of semantic and full-text search. You can customize the hyperparameters as needed or write your own custom reranker. Here's how you can use any of the available rerankers: By default, LanceDB uses `RRFReranker()`, which uses reciprocal rank fusion score, to combine and rerank the results of semantic and full-text search. You can customize the hyperparameters as needed or write your own custom reranker. Here's how you can use any of the available rerankers:
@@ -57,7 +68,7 @@ By default, LanceDB uses `RRFReranker()`, which uses reciprocal rank fusion scor
## Available Rerankers ## Available Rerankers
LanceDB provides a number of rerankers out of the box. You can use any of these rerankers by passing them to the `rerank()` method. LanceDB provides a number of re-rankers out of the box. You can use any of these re-rankers by passing them to the `rerank()` method.
Go to [Rerankers](../reranking/index.md) to learn more about using the available rerankers and implementing custom rerankers. Go to [Rerankers](../reranking/index.md) to learn more about using the available rerankers and implementing custom rerankers.

View File

@@ -36,8 +36,41 @@ const results = await table.vectorSearch([0.1, 0.3]).limit(20).toArray();
console.log(results); console.log(results);
``` ```
The [quickstart](https://lancedb.github.io/lancedb/basic/) contains a more complete example. The [quickstart](../basic.md) contains a more complete example.
## Development ## Development
See [CONTRIBUTING.md](_media/CONTRIBUTING.md) for information on how to contribute to LanceDB. ```sh
npm run build
npm run test
```
### Running lint / format
LanceDb uses [biome](https://biomejs.dev/) for linting and formatting. if you are using VSCode you will need to install the official [Biome](https://marketplace.visualstudio.com/items?itemName=biomejs.biome) extension.
To manually lint your code you can run:
```sh
npm run lint
```
to automatically fix all fixable issues:
```sh
npm run lint-fix
```
If you do not have your workspace root set to the `nodejs` directory, unfortunately the extension will not work. You can still run the linting and formatting commands manually.
### Generating docs
```sh
npm run docs
cd ../docs
# Asssume the virtual environment was created
# python3 -m venv venv
# pip install -r requirements.txt
. ./venv/bin/activate
mkdocs build
```

View File

@@ -1,76 +0,0 @@
# Contributing to LanceDB Typescript
This document outlines the process for contributing to LanceDB Typescript.
For general contribution guidelines, see [CONTRIBUTING.md](../CONTRIBUTING.md).
## Project layout
The Typescript package is a wrapper around the Rust library, `lancedb`. We use
the [napi-rs](https://napi.rs/) library to create the bindings between Rust and
Typescript.
* `src/`: Rust bindings source code
* `lancedb/`: Typescript package source code
* `__test__/`: Unit tests
* `examples/`: An npm package with the examples shown in the documentation
## Development environment
To set up your development environment, you will need to install the following:
1. Node.js 14 or later
2. Rust's package manager, Cargo. Use [rustup](https://rustup.rs/) to install.
3. [protoc](https://grpc.io/docs/protoc-installation/) (Protocol Buffers compiler)
Initial setup:
```shell
npm install
```
### Commit Hooks
It is **highly recommended** to install the [pre-commit](https://pre-commit.com/) hooks to ensure that your
code is formatted correctly and passes basic checks before committing:
```shell
pre-commit install
```
## Development
Most common development commands can be run using the npm scripts.
Build the package
```shell
npm install
npm run build
```
Lint:
```shell
npm run lint
```
Format and fix lints:
```shell
npm run lint-fix
```
Run tests:
```shell
npm test
```
To run a single test:
```shell
# Single file: table.test.ts
npm test -- table.test.ts
# Single test: 'merge insert' in table.test.ts
npm test -- table.test.ts --testNamePattern=merge\ insert
```

View File

@@ -23,6 +23,18 @@ be closed when they are garbage collected.
Any created tables are independent and will continue to work even if Any created tables are independent and will continue to work even if
the underlying connection has been closed. the underlying connection has been closed.
## Constructors
### new Connection()
```ts
new Connection(): Connection
```
#### Returns
[`Connection`](Connection.md)
## Methods ## Methods
### close() ### close()
@@ -59,7 +71,7 @@ Creates a new empty Table
* **name**: `string` * **name**: `string`
The name of the table. The name of the table.
* **schema**: [`SchemaLike`](../type-aliases/SchemaLike.md) * **schema**: `SchemaLike`
The schema of the table The schema of the table
* **options?**: `Partial`&lt;[`CreateTableOptions`](../interfaces/CreateTableOptions.md)&gt; * **options?**: `Partial`&lt;[`CreateTableOptions`](../interfaces/CreateTableOptions.md)&gt;
@@ -105,7 +117,7 @@ Creates a new Table and initialize it with new data.
* **name**: `string` * **name**: `string`
The name of the table. The name of the table.
* **data**: [`TableLike`](../type-aliases/TableLike.md) \| `Record`&lt;`string`, `unknown`&gt;[] * **data**: `TableLike` \| `Record`&lt;`string`, `unknown`&gt;[]
Non-empty Array of Records Non-empty Array of Records
to be inserted into the table to be inserted into the table
@@ -131,20 +143,6 @@ Return a brief description of the connection
*** ***
### dropAllTables()
```ts
abstract dropAllTables(): Promise<void>
```
Drop all tables in the database.
#### Returns
`Promise`&lt;`void`&gt;
***
### dropTable() ### dropTable()
```ts ```ts
@@ -191,7 +189,7 @@ Open a table in the database.
* **name**: `string` * **name**: `string`
The name of the table The name of the table
* **options?**: `Partial`&lt;[`OpenTableOptions`](../interfaces/OpenTableOptions.md)&gt; * **options?**: `Partial`&lt;`OpenTableOptions`&gt;
#### Returns #### Returns

View File

@@ -72,9 +72,11 @@ The results of a full text search are ordered by relevance measured by BM25.
You can combine filters with full text search. You can combine filters with full text search.
For now, the full text search index only supports English, and doesn't support phrase search.
#### Parameters #### Parameters
* **options?**: `Partial`&lt;[`FtsOptions`](../interfaces/FtsOptions.md)&gt; * **options?**: `Partial`&lt;`FtsOptions`&gt;
#### Returns #### Returns
@@ -96,7 +98,7 @@ the vectors.
#### Parameters #### Parameters
* **options?**: `Partial`&lt;[`HnswPqOptions`](../interfaces/HnswPqOptions.md)&gt; * **options?**: `Partial`&lt;`HnswPqOptions`&gt;
#### Returns #### Returns
@@ -118,7 +120,7 @@ the vectors.
#### Parameters #### Parameters
* **options?**: `Partial`&lt;[`HnswSqOptions`](../interfaces/HnswSqOptions.md)&gt; * **options?**: `Partial`&lt;`HnswSqOptions`&gt;
#### Returns #### Returns

View File

@@ -1,126 +0,0 @@
[**@lancedb/lancedb**](../README.md) • **Docs**
***
[@lancedb/lancedb](../globals.md) / MergeInsertBuilder
# Class: MergeInsertBuilder
A builder used to create and run a merge insert operation
## Constructors
### new MergeInsertBuilder()
```ts
new MergeInsertBuilder(native, schema): MergeInsertBuilder
```
Construct a MergeInsertBuilder. __Internal use only.__
#### Parameters
* **native**: `NativeMergeInsertBuilder`
* **schema**: `Schema`&lt;`any`&gt; \| `Promise`&lt;`Schema`&lt;`any`&gt;&gt;
#### Returns
[`MergeInsertBuilder`](MergeInsertBuilder.md)
## Methods
### execute()
```ts
execute(data): Promise<void>
```
Executes the merge insert operation
Nothing is returned but the `Table` is updated
#### Parameters
* **data**: [`Data`](../type-aliases/Data.md)
#### Returns
`Promise`&lt;`void`&gt;
***
### whenMatchedUpdateAll()
```ts
whenMatchedUpdateAll(options?): MergeInsertBuilder
```
Rows that exist in both the source table (new data) and
the target table (old data) will be updated, replacing
the old row with the corresponding matching row.
If there are multiple matches then the behavior is undefined.
Currently this causes multiple copies of the row to be created
but that behavior is subject to change.
An optional condition may be specified. If it is, then only
matched rows that satisfy the condtion will be updated. Any
rows that do not satisfy the condition will be left as they
are. Failing to satisfy the condition does not cause a
"matched row" to become a "not matched" row.
The condition should be an SQL string. Use the prefix
target. to refer to rows in the target table (old data)
and the prefix source. to refer to rows in the source
table (new data).
For example, "target.last_update < source.last_update"
#### Parameters
* **options?**
* **options.where?**: `string`
#### Returns
[`MergeInsertBuilder`](MergeInsertBuilder.md)
***
### whenNotMatchedBySourceDelete()
```ts
whenNotMatchedBySourceDelete(options?): MergeInsertBuilder
```
Rows that exist only in the target table (old data) will be
deleted. An optional condition can be provided to limit what
data is deleted.
#### Parameters
* **options?**
* **options.where?**: `string`
An optional condition to limit what data is deleted
#### Returns
[`MergeInsertBuilder`](MergeInsertBuilder.md)
***
### whenNotMatchedInsertAll()
```ts
whenNotMatchedInsertAll(): MergeInsertBuilder
```
Rows that exist only in the source table (new data) should
be inserted into the target table.
#### Returns
[`MergeInsertBuilder`](MergeInsertBuilder.md)

View File

@@ -8,14 +8,30 @@
A builder for LanceDB queries. A builder for LanceDB queries.
## See
[Table#query](Table.md#query), [Table#search](Table.md#search)
## Extends ## Extends
- [`QueryBase`](QueryBase.md)&lt;`NativeQuery`&gt; - [`QueryBase`](QueryBase.md)&lt;`NativeQuery`&gt;
## Constructors
### new Query()
```ts
new Query(tbl): Query
```
#### Parameters
* **tbl**: `Table`
#### Returns
[`Query`](Query.md)
#### Overrides
[`QueryBase`](QueryBase.md).[`constructor`](QueryBase.md#constructors)
## Properties ## Properties
### inner ### inner
@@ -30,6 +46,42 @@ protected inner: Query | Promise<Query>;
## Methods ## Methods
### \[asyncIterator\]()
```ts
asyncIterator: AsyncIterator<RecordBatch<any>, any, undefined>
```
#### Returns
`AsyncIterator`&lt;`RecordBatch`&lt;`any`&gt;, `any`, `undefined`&gt;
#### Inherited from
[`QueryBase`](QueryBase.md).[`[asyncIterator]`](QueryBase.md#%5Basynciterator%5D)
***
### doCall()
```ts
protected doCall(fn): void
```
#### Parameters
* **fn**
#### Returns
`void`
#### Inherited from
[`QueryBase`](QueryBase.md).[`doCall`](QueryBase.md#docall)
***
### execute() ### execute()
```ts ```ts
@@ -40,7 +92,7 @@ Execute the query and return the results as an
#### Parameters #### Parameters
* **options?**: `Partial`&lt;[`QueryExecutionOptions`](../interfaces/QueryExecutionOptions.md)&gt; * **options?**: `Partial`&lt;`QueryExecutionOptions`&gt;
#### Returns #### Returns
@@ -109,7 +161,7 @@ fastSearch(): this
Skip searching un-indexed data. This can make search faster, but will miss Skip searching un-indexed data. This can make search faster, but will miss
any data that is not yet indexed. any data that is not yet indexed.
Use [Table#optimize](Table.md#optimize) to index all un-indexed data. Use lancedb.Table#optimize to index all un-indexed data.
#### Returns #### Returns
@@ -137,7 +189,7 @@ A filter statement to be applied to this query.
`this` `this`
#### See #### Alias
where where
@@ -161,7 +213,7 @@ fullTextSearch(query, options?): this
* **query**: `string` * **query**: `string`
* **options?**: `Partial`&lt;[`FullTextSearchOptions`](../interfaces/FullTextSearchOptions.md)&gt; * **options?**: `Partial`&lt;`FullTextSearchOptions`&gt;
#### Returns #### Returns
@@ -198,6 +250,26 @@ called then every valid row from the table will be returned.
*** ***
### nativeExecute()
```ts
protected nativeExecute(options?): Promise<RecordBatchIterator>
```
#### Parameters
* **options?**: `Partial`&lt;`QueryExecutionOptions`&gt;
#### Returns
`Promise`&lt;`RecordBatchIterator`&gt;
#### Inherited from
[`QueryBase`](QueryBase.md).[`nativeExecute`](QueryBase.md#nativeexecute)
***
### nearestTo() ### nearestTo()
```ts ```ts
@@ -222,7 +294,7 @@ If there is more than one vector column you must use
#### Parameters #### Parameters
* **vector**: [`IntoVector`](../type-aliases/IntoVector.md) * **vector**: `IntoVector`
#### Returns #### Returns
@@ -355,7 +427,7 @@ Collect the results as an array of objects.
#### Parameters #### Parameters
* **options?**: `Partial`&lt;[`QueryExecutionOptions`](../interfaces/QueryExecutionOptions.md)&gt; * **options?**: `Partial`&lt;`QueryExecutionOptions`&gt;
#### Returns #### Returns
@@ -377,7 +449,7 @@ Collect the results as an Arrow
#### Parameters #### Parameters
* **options?**: `Partial`&lt;[`QueryExecutionOptions`](../interfaces/QueryExecutionOptions.md)&gt; * **options?**: `Partial`&lt;`QueryExecutionOptions`&gt;
#### Returns #### Returns

View File

@@ -8,11 +8,6 @@
Common methods supported by all query types Common methods supported by all query types
## See
- [Query](Query.md)
- [VectorQuery](VectorQuery.md)
## Extended by ## Extended by
- [`Query`](Query.md) - [`Query`](Query.md)
@@ -26,6 +21,22 @@ Common methods supported by all query types
- `AsyncIterable`&lt;`RecordBatch`&gt; - `AsyncIterable`&lt;`RecordBatch`&gt;
## Constructors
### new QueryBase()
```ts
protected new QueryBase<NativeQueryType>(inner): QueryBase<NativeQueryType>
```
#### Parameters
* **inner**: `NativeQueryType` \| `Promise`&lt;`NativeQueryType`&gt;
#### Returns
[`QueryBase`](QueryBase.md)&lt;`NativeQueryType`&gt;
## Properties ## Properties
### inner ### inner
@@ -36,6 +47,38 @@ protected inner: NativeQueryType | Promise<NativeQueryType>;
## Methods ## Methods
### \[asyncIterator\]()
```ts
asyncIterator: AsyncIterator<RecordBatch<any>, any, undefined>
```
#### Returns
`AsyncIterator`&lt;`RecordBatch`&lt;`any`&gt;, `any`, `undefined`&gt;
#### Implementation of
`AsyncIterable.[asyncIterator]`
***
### doCall()
```ts
protected doCall(fn): void
```
#### Parameters
* **fn**
#### Returns
`void`
***
### execute() ### execute()
```ts ```ts
@@ -46,7 +89,7 @@ Execute the query and return the results as an
#### Parameters #### Parameters
* **options?**: `Partial`&lt;[`QueryExecutionOptions`](../interfaces/QueryExecutionOptions.md)&gt; * **options?**: `Partial`&lt;`QueryExecutionOptions`&gt;
#### Returns #### Returns
@@ -107,7 +150,7 @@ fastSearch(): this
Skip searching un-indexed data. This can make search faster, but will miss Skip searching un-indexed data. This can make search faster, but will miss
any data that is not yet indexed. any data that is not yet indexed.
Use [Table#optimize](Table.md#optimize) to index all un-indexed data. Use lancedb.Table#optimize to index all un-indexed data.
#### Returns #### Returns
@@ -131,7 +174,7 @@ A filter statement to be applied to this query.
`this` `this`
#### See #### Alias
where where
@@ -151,7 +194,7 @@ fullTextSearch(query, options?): this
* **query**: `string` * **query**: `string`
* **options?**: `Partial`&lt;[`FullTextSearchOptions`](../interfaces/FullTextSearchOptions.md)&gt; * **options?**: `Partial`&lt;`FullTextSearchOptions`&gt;
#### Returns #### Returns
@@ -180,6 +223,22 @@ called then every valid row from the table will be returned.
*** ***
### nativeExecute()
```ts
protected nativeExecute(options?): Promise<RecordBatchIterator>
```
#### Parameters
* **options?**: `Partial`&lt;`QueryExecutionOptions`&gt;
#### Returns
`Promise`&lt;`RecordBatchIterator`&gt;
***
### offset() ### offset()
```ts ```ts
@@ -255,7 +314,7 @@ Collect the results as an array of objects.
#### Parameters #### Parameters
* **options?**: `Partial`&lt;[`QueryExecutionOptions`](../interfaces/QueryExecutionOptions.md)&gt; * **options?**: `Partial`&lt;`QueryExecutionOptions`&gt;
#### Returns #### Returns
@@ -273,7 +332,7 @@ Collect the results as an Arrow
#### Parameters #### Parameters
* **options?**: `Partial`&lt;[`QueryExecutionOptions`](../interfaces/QueryExecutionOptions.md)&gt; * **options?**: `Partial`&lt;`QueryExecutionOptions`&gt;
#### Returns #### Returns

View File

@@ -14,13 +14,21 @@ will be freed when the Table is garbage collected. To eagerly free the cache yo
can call the `close` method. Once the Table is closed, it cannot be used for any can call the `close` method. Once the Table is closed, it cannot be used for any
further operations. further operations.
Tables are created using the methods [Connection#createTable](Connection.md#createtable)
and [Connection#createEmptyTable](Connection.md#createemptytable). Existing tables are opened
using [Connection#openTable](Connection.md#opentable).
Closing a table is optional. It not closed, it will be closed when it is garbage Closing a table is optional. It not closed, it will be closed when it is garbage
collected. collected.
## Constructors
### new Table()
```ts
new Table(): Table
```
#### Returns
[`Table`](Table.md)
## Accessors ## Accessors
### name ### name
@@ -208,9 +216,6 @@ Indices on vector columns will speed up vector searches.
Indices on scalar columns will speed up filtering (in both Indices on scalar columns will speed up filtering (in both
vector and non-vector searches) vector and non-vector searches)
We currently don't support custom named indexes.
The index name will always be `${column}_idx`.
#### Parameters #### Parameters
* **column**: `string` * **column**: `string`
@@ -221,6 +226,11 @@ The index name will always be `${column}_idx`.
`Promise`&lt;`void`&gt; `Promise`&lt;`void`&gt;
#### Note
We currently don't support custom named indexes,
The index name will always be `${column}_idx`
#### Examples #### Examples
```ts ```ts
@@ -307,28 +317,6 @@ then call ``cleanup_files`` to remove the old files.
*** ***
### dropIndex()
```ts
abstract dropIndex(name): Promise<void>
```
Drop an index from the table.
#### Parameters
* **name**: `string`
The name of the index.
This does not delete the index from disk, it just removes it from the table.
To delete the index, run [Table#optimize](Table.md#optimize) after dropping the index.
Use [Table.listIndices](Table.md#listindices) to find the names of the indices.
#### Returns
`Promise`&lt;`void`&gt;
***
### indexStats() ### indexStats()
```ts ```ts
@@ -348,8 +336,6 @@ List all the stats of a specified index
The stats of the index. If the index does not exist, it will return undefined The stats of the index. If the index does not exist, it will return undefined
Use [Table.listIndices](Table.md#listindices) to find the names of the indices.
*** ***
### isOpen() ### isOpen()
@@ -390,7 +376,7 @@ List all the versions of the table
#### Returns #### Returns
`Promise`&lt;[`Version`](../interfaces/Version.md)[]&gt; `Promise`&lt;`Version`[]&gt;
*** ***
@@ -406,7 +392,7 @@ abstract mergeInsert(on): MergeInsertBuilder
#### Returns #### Returns
[`MergeInsertBuilder`](MergeInsertBuilder.md) `MergeInsertBuilder`
*** ***
@@ -450,7 +436,7 @@ Modeled after ``VACUUM`` in PostgreSQL.
#### Returns #### Returns
`Promise`&lt;[`OptimizeStats`](../interfaces/OptimizeStats.md)&gt; `Promise`&lt;`OptimizeStats`&gt;
*** ***
@@ -567,7 +553,7 @@ Get the schema of the table.
abstract search( abstract search(
query, query,
queryType?, queryType?,
ftsColumns?): Query | VectorQuery ftsColumns?): VectorQuery | Query
``` ```
Create a search query to find the nearest neighbors Create a search query to find the nearest neighbors
@@ -575,7 +561,7 @@ of the given query
#### Parameters #### Parameters
* **query**: `string` \| [`IntoVector`](../type-aliases/IntoVector.md) * **query**: `string` \| `IntoVector`
the query, a vector or string the query, a vector or string
* **queryType?**: `string` * **queryType?**: `string`
@@ -589,7 +575,7 @@ of the given query
#### Returns #### Returns
[`Query`](Query.md) \| [`VectorQuery`](VectorQuery.md) [`VectorQuery`](VectorQuery.md) \| [`Query`](Query.md)
*** ***
@@ -708,7 +694,7 @@ by `query`.
#### Parameters #### Parameters
* **vector**: [`IntoVector`](../type-aliases/IntoVector.md) * **vector**: `IntoVector`
#### Returns #### Returns
@@ -731,3 +717,38 @@ Retrieve the version of the table
#### Returns #### Returns
`Promise`&lt;`number`&gt; `Promise`&lt;`number`&gt;
***
### parseTableData()
```ts
static parseTableData(
data,
options?,
streaming?): Promise<object>
```
#### Parameters
* **data**: `TableLike` \| `Record`&lt;`string`, `unknown`&gt;[]
* **options?**: `Partial`&lt;[`CreateTableOptions`](../interfaces/CreateTableOptions.md)&gt;
* **streaming?**: `boolean` = `false`
#### Returns
`Promise`&lt;`object`&gt;
##### buf
```ts
buf: Buffer;
```
##### mode
```ts
mode: string;
```

View File

@@ -10,14 +10,30 @@ A builder used to construct a vector search
This builder can be reused to execute the query many times. This builder can be reused to execute the query many times.
## See
[Query#nearestTo](Query.md#nearestto)
## Extends ## Extends
- [`QueryBase`](QueryBase.md)&lt;`NativeVectorQuery`&gt; - [`QueryBase`](QueryBase.md)&lt;`NativeVectorQuery`&gt;
## Constructors
### new VectorQuery()
```ts
new VectorQuery(inner): VectorQuery
```
#### Parameters
* **inner**: `VectorQuery` \| `Promise`&lt;`VectorQuery`&gt;
#### Returns
[`VectorQuery`](VectorQuery.md)
#### Overrides
[`QueryBase`](QueryBase.md).[`constructor`](QueryBase.md#constructors)
## Properties ## Properties
### inner ### inner
@@ -32,6 +48,22 @@ protected inner: VectorQuery | Promise<VectorQuery>;
## Methods ## Methods
### \[asyncIterator\]()
```ts
asyncIterator: AsyncIterator<RecordBatch<any>, any, undefined>
```
#### Returns
`AsyncIterator`&lt;`RecordBatch`&lt;`any`&gt;, `any`, `undefined`&gt;
#### Inherited from
[`QueryBase`](QueryBase.md).[`[asyncIterator]`](QueryBase.md#%5Basynciterator%5D)
***
### addQueryVector() ### addQueryVector()
```ts ```ts
@@ -40,7 +72,7 @@ addQueryVector(vector): VectorQuery
#### Parameters #### Parameters
* **vector**: [`IntoVector`](../type-aliases/IntoVector.md) * **vector**: `IntoVector`
#### Returns #### Returns
@@ -96,24 +128,6 @@ whose data type is a fixed-size-list of floats.
*** ***
### distanceRange()
```ts
distanceRange(lowerBound?, upperBound?): VectorQuery
```
#### Parameters
* **lowerBound?**: `number`
* **upperBound?**: `number`
#### Returns
[`VectorQuery`](VectorQuery.md)
***
### distanceType() ### distanceType()
```ts ```ts
@@ -147,6 +161,26 @@ By default "l2" is used.
*** ***
### doCall()
```ts
protected doCall(fn): void
```
#### Parameters
* **fn**
#### Returns
`void`
#### Inherited from
[`QueryBase`](QueryBase.md).[`doCall`](QueryBase.md#docall)
***
### ef() ### ef()
```ts ```ts
@@ -181,7 +215,7 @@ Execute the query and return the results as an
#### Parameters #### Parameters
* **options?**: `Partial`&lt;[`QueryExecutionOptions`](../interfaces/QueryExecutionOptions.md)&gt; * **options?**: `Partial`&lt;`QueryExecutionOptions`&gt;
#### Returns #### Returns
@@ -250,7 +284,7 @@ fastSearch(): this
Skip searching un-indexed data. This can make search faster, but will miss Skip searching un-indexed data. This can make search faster, but will miss
any data that is not yet indexed. any data that is not yet indexed.
Use [Table#optimize](Table.md#optimize) to index all un-indexed data. Use lancedb.Table#optimize to index all un-indexed data.
#### Returns #### Returns
@@ -278,7 +312,7 @@ A filter statement to be applied to this query.
`this` `this`
#### See #### Alias
where where
@@ -302,7 +336,7 @@ fullTextSearch(query, options?): this
* **query**: `string` * **query**: `string`
* **options?**: `Partial`&lt;[`FullTextSearchOptions`](../interfaces/FullTextSearchOptions.md)&gt; * **options?**: `Partial`&lt;`FullTextSearchOptions`&gt;
#### Returns #### Returns
@@ -339,6 +373,26 @@ called then every valid row from the table will be returned.
*** ***
### nativeExecute()
```ts
protected nativeExecute(options?): Promise<RecordBatchIterator>
```
#### Parameters
* **options?**: `Partial`&lt;`QueryExecutionOptions`&gt;
#### Returns
`Promise`&lt;`RecordBatchIterator`&gt;
#### Inherited from
[`QueryBase`](QueryBase.md).[`nativeExecute`](QueryBase.md#nativeexecute)
***
### nprobes() ### nprobes()
```ts ```ts
@@ -474,22 +528,6 @@ distance between the query vector and the actual uncompressed vector.
*** ***
### rerank()
```ts
rerank(reranker): VectorQuery
```
#### Parameters
* **reranker**: [`Reranker`](../namespaces/rerankers/interfaces/Reranker.md)
#### Returns
[`VectorQuery`](VectorQuery.md)
***
### select() ### select()
```ts ```ts
@@ -553,7 +591,7 @@ Collect the results as an array of objects.
#### Parameters #### Parameters
* **options?**: `Partial`&lt;[`QueryExecutionOptions`](../interfaces/QueryExecutionOptions.md)&gt; * **options?**: `Partial`&lt;`QueryExecutionOptions`&gt;
#### Returns #### Returns
@@ -575,7 +613,7 @@ Collect the results as an Arrow
#### Parameters #### Parameters
* **options?**: `Partial`&lt;[`QueryExecutionOptions`](../interfaces/QueryExecutionOptions.md)&gt; * **options?**: `Partial`&lt;`QueryExecutionOptions`&gt;
#### Returns #### Returns

View File

@@ -0,0 +1,33 @@
[**@lancedb/lancedb**](../README.md) • **Docs**
***
[@lancedb/lancedb](../globals.md) / WriteMode
# Enumeration: WriteMode
Write mode for writing a table.
## Enumeration Members
### Append
```ts
Append: "Append";
```
***
### Create
```ts
Create: "Create";
```
***
### Overwrite
```ts
Overwrite: "Overwrite";
```

View File

@@ -6,10 +6,10 @@
# Function: connect() # Function: connect()
## connect(uri, options) ## connect(uri, opts)
```ts ```ts
function connect(uri, options?): Promise<Connection> function connect(uri, opts?): Promise<Connection>
``` ```
Connect to a LanceDB instance at the given URI. Connect to a LanceDB instance at the given URI.
@@ -26,8 +26,7 @@ Accepted formats:
The uri of the database. If the database uri starts The uri of the database. If the database uri starts
with `db://` then it connects to a remote database. with `db://` then it connects to a remote database.
* **options?**: `Partial`&lt;[`ConnectionOptions`](../interfaces/ConnectionOptions.md)&gt; * **opts?**: `Partial`&lt;[`ConnectionOptions`](../interfaces/ConnectionOptions.md)&gt;
The options to use when connecting to the database
### Returns ### Returns
@@ -50,10 +49,10 @@ const conn = await connect(
}); });
``` ```
## connect(options) ## connect(opts)
```ts ```ts
function connect(options): Promise<Connection> function connect(opts): Promise<Connection>
``` ```
Connect to a LanceDB instance at the given URI. Connect to a LanceDB instance at the given URI.
@@ -66,8 +65,7 @@ Accepted formats:
### Parameters ### Parameters
* **options**: `Partial`&lt;[`ConnectionOptions`](../interfaces/ConnectionOptions.md)&gt; & `object` * **opts**: `Partial`&lt;[`ConnectionOptions`](../interfaces/ConnectionOptions.md)&gt; & `object`
The options to use when connecting to the database
### Returns ### Returns

View File

@@ -22,6 +22,8 @@ when creating a table or adding data to it)
This function converts an array of Record<String, any> (row-major JS objects) This function converts an array of Record<String, any> (row-major JS objects)
to an Arrow Table (a columnar structure) to an Arrow Table (a columnar structure)
Note that it currently does not support nulls.
If a schema is provided then it will be used to determine the resulting array If a schema is provided then it will be used to determine the resulting array
types. Fields will also be reordered to fit the order defined by the schema. types. Fields will also be reordered to fit the order defined by the schema.
@@ -29,9 +31,6 @@ If a schema is not provided then the types will be inferred and the field order
will be controlled by the order of properties in the first record. If a type will be controlled by the order of properties in the first record. If a type
is inferred it will always be nullable. is inferred it will always be nullable.
If not all fields are found in the data, then a subset of the schema will be
returned.
If the input is empty then a schema must be provided to create an empty table. If the input is empty then a schema must be provided to create an empty table.
When a schema is not specified then data types will be inferred. The inference When a schema is not specified then data types will be inferred. The inference
@@ -39,7 +38,6 @@ rules are as follows:
- boolean => Bool - boolean => Bool
- number => Float64 - number => Float64
- bigint => Int64
- String => Utf8 - String => Utf8
- Buffer => Binary - Buffer => Binary
- Record<String, any> => Struct - Record<String, any> => Struct
@@ -59,7 +57,6 @@ rules are as follows:
## Example ## Example
```ts
import { fromTableToBuffer, makeArrowTable } from "../arrow"; import { fromTableToBuffer, makeArrowTable } from "../arrow";
import { Field, FixedSizeList, Float16, Float32, Int32, Schema } from "apache-arrow"; import { Field, FixedSizeList, Float16, Float32, Int32, Schema } from "apache-arrow";
@@ -81,40 +78,42 @@ The `vectorColumns` option can be used to support other vector column
names and data types. names and data types.
```ts ```ts
const schema = new Schema([ const schema = new Schema([
new Field("a", new Float64()), new Field("a", new Float64()),
new Field("b", new Float64()), new Field("b", new Float64()),
new Field( new Field(
"vector", "vector",
new FixedSizeList(3, new Field("item", new Float32())) new FixedSizeList(3, new Field("item", new Float32()))
), ),
]); ]);
const table = makeArrowTable([ const table = makeArrowTable([
{ a: 1, b: 2, vector: [1, 2, 3] }, { a: 1, b: 2, vector: [1, 2, 3] },
{ a: 4, b: 5, vector: [4, 5, 6] }, { a: 4, b: 5, vector: [4, 5, 6] },
{ a: 7, b: 8, vector: [7, 8, 9] }, { a: 7, b: 8, vector: [7, 8, 9] },
]); ]);
assert.deepEqual(table.schema, schema); assert.deepEqual(table.schema, schema);
``` ```
You can specify the vector column types and names using the options as well You can specify the vector column types and names using the options as well
```ts ```typescript
const schema = new Schema([ const schema = new Schema([
new Field('a', new Float64()), new Field('a', new Float64()),
new Field('b', new Float64()), new Field('b', new Float64()),
new Field('vec1', new FixedSizeList(3, new Field('item', new Float16()))), new Field('vec1', new FixedSizeList(3, new Field('item', new Float16()))),
new Field('vec2', new FixedSizeList(3, new Field('item', new Float16()))) new Field('vec2', new FixedSizeList(3, new Field('item', new Float16())))
]); ]);
const table = makeArrowTable([ const table = makeArrowTable([
{ a: 1, b: 2, vec1: [1, 2, 3], vec2: [2, 4, 6] }, { a: 1, b: 2, vec1: [1, 2, 3], vec2: [2, 4, 6] },
{ a: 4, b: 5, vec1: [4, 5, 6], vec2: [8, 10, 12] }, { a: 4, b: 5, vec1: [4, 5, 6], vec2: [8, 10, 12] },
{ a: 7, b: 8, vec1: [7, 8, 9], vec2: [14, 16, 18] } { a: 7, b: 8, vec1: [7, 8, 9], vec2: [14, 16, 18] }
], { ], {
vectorColumns: { vectorColumns: {
vec1: { type: new Float16() }, vec1: { type: new Float16() },
vec2: { type: new Float16() } vec2: { type: new Float16() }
} }
} }
assert.deepEqual(table.schema, schema) assert.deepEqual(table.schema, schema)
``` ```

View File

@@ -7,14 +7,16 @@
## Namespaces ## Namespaces
- [embedding](namespaces/embedding/README.md) - [embedding](namespaces/embedding/README.md)
- [rerankers](namespaces/rerankers/README.md)
## Enumerations
- [WriteMode](enumerations/WriteMode.md)
## Classes ## Classes
- [Connection](classes/Connection.md) - [Connection](classes/Connection.md)
- [Index](classes/Index.md) - [Index](classes/Index.md)
- [MakeArrowTableOptions](classes/MakeArrowTableOptions.md) - [MakeArrowTableOptions](classes/MakeArrowTableOptions.md)
- [MergeInsertBuilder](classes/MergeInsertBuilder.md)
- [Query](classes/Query.md) - [Query](classes/Query.md)
- [QueryBase](classes/QueryBase.md) - [QueryBase](classes/QueryBase.md)
- [RecordBatchIterator](classes/RecordBatchIterator.md) - [RecordBatchIterator](classes/RecordBatchIterator.md)
@@ -28,39 +30,23 @@
- [AddDataOptions](interfaces/AddDataOptions.md) - [AddDataOptions](interfaces/AddDataOptions.md)
- [ClientConfig](interfaces/ClientConfig.md) - [ClientConfig](interfaces/ClientConfig.md)
- [ColumnAlteration](interfaces/ColumnAlteration.md) - [ColumnAlteration](interfaces/ColumnAlteration.md)
- [CompactionStats](interfaces/CompactionStats.md)
- [ConnectionOptions](interfaces/ConnectionOptions.md) - [ConnectionOptions](interfaces/ConnectionOptions.md)
- [CreateTableOptions](interfaces/CreateTableOptions.md) - [CreateTableOptions](interfaces/CreateTableOptions.md)
- [ExecutableQuery](interfaces/ExecutableQuery.md) - [ExecutableQuery](interfaces/ExecutableQuery.md)
- [FtsOptions](interfaces/FtsOptions.md)
- [FullTextSearchOptions](interfaces/FullTextSearchOptions.md)
- [HnswPqOptions](interfaces/HnswPqOptions.md)
- [HnswSqOptions](interfaces/HnswSqOptions.md)
- [IndexConfig](interfaces/IndexConfig.md) - [IndexConfig](interfaces/IndexConfig.md)
- [IndexOptions](interfaces/IndexOptions.md) - [IndexOptions](interfaces/IndexOptions.md)
- [IndexStatistics](interfaces/IndexStatistics.md) - [IndexStatistics](interfaces/IndexStatistics.md)
- [IvfPqOptions](interfaces/IvfPqOptions.md) - [IvfPqOptions](interfaces/IvfPqOptions.md)
- [OpenTableOptions](interfaces/OpenTableOptions.md)
- [OptimizeOptions](interfaces/OptimizeOptions.md) - [OptimizeOptions](interfaces/OptimizeOptions.md)
- [OptimizeStats](interfaces/OptimizeStats.md)
- [QueryExecutionOptions](interfaces/QueryExecutionOptions.md)
- [RemovalStats](interfaces/RemovalStats.md)
- [RetryConfig](interfaces/RetryConfig.md) - [RetryConfig](interfaces/RetryConfig.md)
- [TableNamesOptions](interfaces/TableNamesOptions.md) - [TableNamesOptions](interfaces/TableNamesOptions.md)
- [TimeoutConfig](interfaces/TimeoutConfig.md) - [TimeoutConfig](interfaces/TimeoutConfig.md)
- [UpdateOptions](interfaces/UpdateOptions.md) - [UpdateOptions](interfaces/UpdateOptions.md)
- [Version](interfaces/Version.md) - [WriteOptions](interfaces/WriteOptions.md)
## Type Aliases ## Type Aliases
- [Data](type-aliases/Data.md) - [Data](type-aliases/Data.md)
- [DataLike](type-aliases/DataLike.md)
- [FieldLike](type-aliases/FieldLike.md)
- [IntoSql](type-aliases/IntoSql.md)
- [IntoVector](type-aliases/IntoVector.md)
- [RecordBatchLike](type-aliases/RecordBatchLike.md)
- [SchemaLike](type-aliases/SchemaLike.md)
- [TableLike](type-aliases/TableLike.md)
## Functions ## Functions

View File

@@ -8,14 +8,6 @@
## Properties ## Properties
### extraHeaders?
```ts
optional extraHeaders: Record<string, string>;
```
***
### retryConfig? ### retryConfig?
```ts ```ts

View File

@@ -1,49 +0,0 @@
[**@lancedb/lancedb**](../README.md) • **Docs**
***
[@lancedb/lancedb](../globals.md) / CompactionStats
# Interface: CompactionStats
Statistics about a compaction operation.
## Properties
### filesAdded
```ts
filesAdded: number;
```
The number of new, compacted data files added
***
### filesRemoved
```ts
filesRemoved: number;
```
The number of data files removed
***
### fragmentsAdded
```ts
fragmentsAdded: number;
```
The number of new, compacted fragments added
***
### fragmentsRemoved
```ts
fragmentsRemoved: number;
```
The number of fragments removed

View File

@@ -8,7 +8,7 @@
## Properties ## Properties
### ~~dataStorageVersion?~~ ### dataStorageVersion?
```ts ```ts
optional dataStorageVersion: string; optional dataStorageVersion: string;
@@ -19,10 +19,6 @@ The version of the data storage format to use.
The default is `stable`. The default is `stable`.
Set to "legacy" to use the old format. Set to "legacy" to use the old format.
#### Deprecated
Pass `new_table_data_storage_version` to storageOptions instead.
*** ***
### embeddingFunction? ### embeddingFunction?
@@ -33,7 +29,7 @@ optional embeddingFunction: EmbeddingFunctionConfig;
*** ***
### ~~enableV2ManifestPaths?~~ ### enableV2ManifestPaths?
```ts ```ts
optional enableV2ManifestPaths: boolean; optional enableV2ManifestPaths: boolean;
@@ -45,10 +41,6 @@ turning this on will make the dataset unreadable for older versions
of LanceDB (prior to 0.10.0). To migrate an existing dataset, instead of LanceDB (prior to 0.10.0). To migrate an existing dataset, instead
use the LocalTable#migrateManifestPathsV2 method. use the LocalTable#migrateManifestPathsV2 method.
#### Deprecated
Pass `new_table_enable_v2_manifest_paths` to storageOptions instead.
*** ***
### existOk ### existOk
@@ -98,3 +90,17 @@ Options already set on the connection will be inherited by the table,
but can be overridden here. but can be overridden here.
The available options are described at https://lancedb.github.io/lancedb/guides/storage/ The available options are described at https://lancedb.github.io/lancedb/guides/storage/
***
### useLegacyFormat?
```ts
optional useLegacyFormat: boolean;
```
If true then data files will be written with the legacy format
The default is false.
Deprecated. Use data storage version instead.

View File

@@ -1,103 +0,0 @@
[**@lancedb/lancedb**](../README.md) • **Docs**
***
[@lancedb/lancedb](../globals.md) / FtsOptions
# Interface: FtsOptions
Options to create a full text search index
## Properties
### asciiFolding?
```ts
optional asciiFolding: boolean;
```
whether to remove punctuation
***
### baseTokenizer?
```ts
optional baseTokenizer: "raw" | "simple" | "whitespace";
```
The tokenizer to use when building the index.
The default is "simple".
The following tokenizers are available:
"simple" - Simple tokenizer. This tokenizer splits the text into tokens using whitespace and punctuation as a delimiter.
"whitespace" - Whitespace tokenizer. This tokenizer splits the text into tokens using whitespace as a delimiter.
"raw" - Raw tokenizer. This tokenizer does not split the text into tokens and indexes the entire text as a single token.
***
### language?
```ts
optional language: string;
```
language for stemming and stop words
this is only used when `stem` or `remove_stop_words` is true
***
### lowercase?
```ts
optional lowercase: boolean;
```
whether to lowercase tokens
***
### maxTokenLength?
```ts
optional maxTokenLength: number;
```
maximum token length
tokens longer than this length will be ignored
***
### removeStopWords?
```ts
optional removeStopWords: boolean;
```
whether to remove stop words
***
### stem?
```ts
optional stem: boolean;
```
whether to stem tokens
***
### withPosition?
```ts
optional withPosition: boolean;
```
Whether to build the index with positions.
True by default.
If set to false, the index will not store the positions of the tokens in the text,
which will make the index smaller and faster to build, but will not support phrase queries.

View File

@@ -1,22 +0,0 @@
[**@lancedb/lancedb**](../README.md) • **Docs**
***
[@lancedb/lancedb](../globals.md) / FullTextSearchOptions
# Interface: FullTextSearchOptions
Options that control the behavior of a full text search
## Properties
### columns?
```ts
optional columns: string | string[];
```
The columns to search
If not specified, all indexed columns will be searched.
For now, only one column can be searched.

View File

@@ -1,149 +0,0 @@
[**@lancedb/lancedb**](../README.md) • **Docs**
***
[@lancedb/lancedb](../globals.md) / HnswPqOptions
# Interface: HnswPqOptions
Options to create an `HNSW_PQ` index
## Properties
### distanceType?
```ts
optional distanceType: "l2" | "cosine" | "dot";
```
The distance metric used to train the index.
Default value is "l2".
The following distance types are available:
"l2" - Euclidean distance. This is a very common distance metric that
accounts for both magnitude and direction when determining the distance
between vectors. L2 distance has a range of [0, ∞).
"cosine" - Cosine distance. Cosine distance is a distance metric
calculated from the cosine similarity between two vectors. Cosine
similarity is a measure of similarity between two non-zero vectors of an
inner product space. It is defined to equal the cosine of the angle
between them. Unlike L2, the cosine distance is not affected by the
magnitude of the vectors. Cosine distance has a range of [0, 2].
"dot" - Dot product. Dot distance is the dot product of two vectors. Dot
distance has a range of (-∞, ∞). If the vectors are normalized (i.e. their
L2 norm is 1), then dot distance is equivalent to the cosine distance.
***
### efConstruction?
```ts
optional efConstruction: number;
```
The number of candidates to evaluate during the construction of the HNSW graph.
The default value is 300.
This value controls the tradeoff between build speed and accuracy.
The higher the value the more accurate the build but the slower it will be.
150 to 300 is the typical range. 100 is a minimum for good quality search
results. In most cases, there is no benefit to setting this higher than 500.
This value should be set to a value that is not less than `ef` in the search phase.
***
### m?
```ts
optional m: number;
```
The number of neighbors to select for each vector in the HNSW graph.
The default value is 20.
This value controls the tradeoff between search speed and accuracy.
The higher the value the more accurate the search but the slower it will be.
***
### maxIterations?
```ts
optional maxIterations: number;
```
Max iterations to train kmeans.
The default value is 50.
When training an IVF index we use kmeans to calculate the partitions. This parameter
controls how many iterations of kmeans to run.
Increasing this might improve the quality of the index but in most cases the parameter
is unused because kmeans will converge with fewer iterations. The parameter is only
used in cases where kmeans does not appear to converge. In those cases it is unlikely
that setting this larger will lead to the index converging anyways.
***
### numPartitions?
```ts
optional numPartitions: number;
```
The number of IVF partitions to create.
For HNSW, we recommend a small number of partitions. Setting this to 1 works
well for most tables. For very large tables, training just one HNSW graph
will require too much memory. Each partition becomes its own HNSW graph, so
setting this value higher reduces the peak memory use of training.
***
### numSubVectors?
```ts
optional numSubVectors: number;
```
Number of sub-vectors of PQ.
This value controls how much the vector is compressed during the quantization step.
The more sub vectors there are the less the vector is compressed. The default is
the dimension of the vector divided by 16. If the dimension is not evenly divisible
by 16 we use the dimension divded by 8.
The above two cases are highly preferred. Having 8 or 16 values per subvector allows
us to use efficient SIMD instructions.
If the dimension is not visible by 8 then we use 1 subvector. This is not ideal and
will likely result in poor performance.
***
### sampleRate?
```ts
optional sampleRate: number;
```
The rate used to calculate the number of training vectors for kmeans.
Default value is 256.
When an IVF index is trained, we need to calculate partitions. These are groups
of vectors that are similar to each other. To do this we use an algorithm called kmeans.
Running kmeans on a large dataset can be slow. To speed this up we run kmeans on a
random sample of the data. This parameter controls the size of the sample. The total
number of vectors used to train the index is `sample_rate * num_partitions`.
Increasing this value might improve the quality of the index but in most cases the
default should be sufficient.

View File

@@ -1,128 +0,0 @@
[**@lancedb/lancedb**](../README.md) • **Docs**
***
[@lancedb/lancedb](../globals.md) / HnswSqOptions
# Interface: HnswSqOptions
Options to create an `HNSW_SQ` index
## Properties
### distanceType?
```ts
optional distanceType: "l2" | "cosine" | "dot";
```
The distance metric used to train the index.
Default value is "l2".
The following distance types are available:
"l2" - Euclidean distance. This is a very common distance metric that
accounts for both magnitude and direction when determining the distance
between vectors. L2 distance has a range of [0, ∞).
"cosine" - Cosine distance. Cosine distance is a distance metric
calculated from the cosine similarity between two vectors. Cosine
similarity is a measure of similarity between two non-zero vectors of an
inner product space. It is defined to equal the cosine of the angle
between them. Unlike L2, the cosine distance is not affected by the
magnitude of the vectors. Cosine distance has a range of [0, 2].
"dot" - Dot product. Dot distance is the dot product of two vectors. Dot
distance has a range of (-∞, ∞). If the vectors are normalized (i.e. their
L2 norm is 1), then dot distance is equivalent to the cosine distance.
***
### efConstruction?
```ts
optional efConstruction: number;
```
The number of candidates to evaluate during the construction of the HNSW graph.
The default value is 300.
This value controls the tradeoff between build speed and accuracy.
The higher the value the more accurate the build but the slower it will be.
150 to 300 is the typical range. 100 is a minimum for good quality search
results. In most cases, there is no benefit to setting this higher than 500.
This value should be set to a value that is not less than `ef` in the search phase.
***
### m?
```ts
optional m: number;
```
The number of neighbors to select for each vector in the HNSW graph.
The default value is 20.
This value controls the tradeoff between search speed and accuracy.
The higher the value the more accurate the search but the slower it will be.
***
### maxIterations?
```ts
optional maxIterations: number;
```
Max iterations to train kmeans.
The default value is 50.
When training an IVF index we use kmeans to calculate the partitions. This parameter
controls how many iterations of kmeans to run.
Increasing this might improve the quality of the index but in most cases the parameter
is unused because kmeans will converge with fewer iterations. The parameter is only
used in cases where kmeans does not appear to converge. In those cases it is unlikely
that setting this larger will lead to the index converging anyways.
***
### numPartitions?
```ts
optional numPartitions: number;
```
The number of IVF partitions to create.
For HNSW, we recommend a small number of partitions. Setting this to 1 works
well for most tables. For very large tables, training just one HNSW graph
will require too much memory. Each partition becomes its own HNSW graph, so
setting this value higher reduces the peak memory use of training.
***
### sampleRate?
```ts
optional sampleRate: number;
```
The rate used to calculate the number of training vectors for kmeans.
Default value is 256.
When an IVF index is trained, we need to calculate partitions. These are groups
of vectors that are similar to each other. To do this we use an algorithm called kmeans.
Running kmeans on a large dataset can be slow. To speed this up we run kmeans on a
random sample of the data. This parameter controls the size of the sample. The total
number of vectors used to train the index is `sample_rate * num_partitions`.
Increasing this value might improve the quality of the index but in most cases the
default should be sufficient.

View File

@@ -68,21 +68,6 @@ The default value is 50.
*** ***
### numBits?
```ts
optional numBits: number;
```
Number of bits per sub-vector.
This value controls how much each subvector is compressed. The more bits the more
accurate the index will be but the slower search. The default is 8 bits.
The number of bits must be 4 or 8.
***
### numPartitions? ### numPartitions?
```ts ```ts

View File

@@ -1,40 +0,0 @@
[**@lancedb/lancedb**](../README.md) • **Docs**
***
[@lancedb/lancedb](../globals.md) / OpenTableOptions
# Interface: OpenTableOptions
## Properties
### indexCacheSize?
```ts
optional indexCacheSize: number;
```
Set the size of the index cache, specified as a number of entries
The exact meaning of an "entry" will depend on the type of index:
- IVF: there is one entry for each IVF partition
- BTREE: there is one entry for the entire index
This cache applies to the entire opened table, across all indices.
Setting this value higher will increase performance on larger datasets
at the expense of more RAM
***
### storageOptions?
```ts
optional storageOptions: Record<string, string>;
```
Configuration for object storage.
Options already set on the connection will be inherited by the table,
but can be overridden here.
The available options are described at https://lancedb.github.io/lancedb/guides/storage/

View File

@@ -1,29 +0,0 @@
[**@lancedb/lancedb**](../README.md) • **Docs**
***
[@lancedb/lancedb](../globals.md) / OptimizeStats
# Interface: OptimizeStats
Statistics about an optimize operation
## Properties
### compaction
```ts
compaction: CompactionStats;
```
Statistics about the compaction operation
***
### prune
```ts
prune: RemovalStats;
```
Statistics about the removal operation

View File

@@ -1,22 +0,0 @@
[**@lancedb/lancedb**](../README.md) • **Docs**
***
[@lancedb/lancedb](../globals.md) / QueryExecutionOptions
# Interface: QueryExecutionOptions
Options that control the behavior of a particular query execution
## Properties
### maxBatchLength?
```ts
optional maxBatchLength: number;
```
The maximum number of rows to return in a single batch
Batches may have fewer rows if the underlying data is stored
in smaller chunks.

View File

@@ -1,29 +0,0 @@
[**@lancedb/lancedb**](../README.md) • **Docs**
***
[@lancedb/lancedb](../globals.md) / RemovalStats
# Interface: RemovalStats
Statistics about a cleanup operation
## Properties
### bytesRemoved
```ts
bytesRemoved: number;
```
The number of bytes removed
***
### oldVersionsRemoved
```ts
oldVersionsRemoved: number;
```
The number of old versions removed

View File

@@ -1,31 +0,0 @@
[**@lancedb/lancedb**](../README.md) • **Docs**
***
[@lancedb/lancedb](../globals.md) / Version
# Interface: Version
## Properties
### metadata
```ts
metadata: Record<string, string>;
```
***
### timestamp
```ts
timestamp: Date;
```
***
### version
```ts
version: number;
```

View File

@@ -0,0 +1,19 @@
[**@lancedb/lancedb**](../README.md) • **Docs**
***
[@lancedb/lancedb](../globals.md) / WriteOptions
# Interface: WriteOptions
Write options when creating a Table.
## Properties
### mode?
```ts
optional mode: WriteMode;
```
Write mode for writing to a table.

View File

@@ -17,14 +17,6 @@
### Interfaces ### Interfaces
- [EmbeddingFunctionConfig](interfaces/EmbeddingFunctionConfig.md) - [EmbeddingFunctionConfig](interfaces/EmbeddingFunctionConfig.md)
- [EmbeddingFunctionConstructor](interfaces/EmbeddingFunctionConstructor.md)
- [EmbeddingFunctionCreate](interfaces/EmbeddingFunctionCreate.md)
- [FieldOptions](interfaces/FieldOptions.md)
- [FunctionOptions](interfaces/FunctionOptions.md)
### Type Aliases
- [CreateReturnType](type-aliases/CreateReturnType.md)
### Functions ### Functions

View File

@@ -8,23 +8,6 @@
An embedding function that automatically creates vector representation for a given column. An embedding function that automatically creates vector representation for a given column.
It's important subclasses pass the **original** options to the super constructor
and then pass those options to `resolveVariables` to resolve any variables before
using them.
## Example
```ts
class MyEmbeddingFunction extends EmbeddingFunction {
constructor(options: {model: string, timeout: number}) {
super(optionsRaw);
const options = this.resolveVariables(optionsRaw);
this.model = options.model;
this.timeout = options.timeout;
}
}
```
## Extended by ## Extended by
- [`TextEmbeddingFunction`](TextEmbeddingFunction.md) - [`TextEmbeddingFunction`](TextEmbeddingFunction.md)
@@ -33,7 +16,7 @@ class MyEmbeddingFunction extends EmbeddingFunction {
**T** = `any` **T** = `any`
**M** *extends* [`FunctionOptions`](../interfaces/FunctionOptions.md) = [`FunctionOptions`](../interfaces/FunctionOptions.md) **M** *extends* `FunctionOptions` = `FunctionOptions`
## Constructors ## Constructors
@@ -99,33 +82,12 @@ The datatype of the embeddings
*** ***
### getSensitiveKeys()
```ts
protected getSensitiveKeys(): string[]
```
Provide a list of keys in the function options that should be treated as
sensitive. If users pass raw values for these keys, they will be rejected.
#### Returns
`string`[]
***
### init()? ### init()?
```ts ```ts
optional init(): Promise<void> optional init(): Promise<void>
``` ```
Optionally load any resources needed for the embedding function.
This method is called after the embedding function has been initialized
but before any embeddings are computed. It is useful for loading local models
or other resources that are needed for the embedding function to work.
#### Returns #### Returns
`Promise`&lt;`void`&gt; `Promise`&lt;`void`&gt;
@@ -146,24 +108,6 @@ The number of dimensions of the embeddings
*** ***
### resolveVariables()
```ts
protected resolveVariables(config): Partial<M>
```
Apply variables to the config.
#### Parameters
* **config**: `Partial`&lt;`M`&gt;
#### Returns
`Partial`&lt;`M`&gt;
***
### sourceField() ### sourceField()
```ts ```ts
@@ -174,31 +118,53 @@ sourceField is used in combination with `LanceSchema` to provide a declarative d
#### Parameters #### Parameters
* **optionsOrDatatype**: `DataType`&lt;`Type`, `any`&gt; \| `Partial`&lt;[`FieldOptions`](../interfaces/FieldOptions.md)&lt;`DataType`&lt;`Type`, `any`&gt;&gt;&gt; * **optionsOrDatatype**: `DataType`&lt;`Type`, `any`&gt; \| `Partial`&lt;`FieldOptions`&lt;`DataType`&lt;`Type`, `any`&gt;&gt;&gt;
The options for the field or the datatype The options for the field or the datatype
#### Returns #### Returns
[`DataType`&lt;`Type`, `any`&gt;, `Map`&lt;`string`, [`EmbeddingFunction`](EmbeddingFunction.md)&lt;`any`, [`FunctionOptions`](../interfaces/FunctionOptions.md)&gt;&gt;] [`DataType`&lt;`Type`, `any`&gt;, `Map`&lt;`string`, [`EmbeddingFunction`](EmbeddingFunction.md)&lt;`any`, `FunctionOptions`&gt;&gt;]
#### See #### See
[LanceSchema](../functions/LanceSchema.md) lancedb.LanceSchema
*** ***
### toJSON() ### toJSON()
```ts ```ts
toJSON(): Record<string, any> abstract toJSON(): Partial<M>
``` ```
Get the original arguments to the constructor, to serialize them so they Convert the embedding function to a JSON object
can be used to recreate the embedding function later. It is used to serialize the embedding function to the schema
It's important that any object returned by this method contains all the necessary
information to recreate the embedding function
It should return the same object that was passed to the constructor
If it does not, the embedding function will not be able to be recreated, or could be recreated incorrectly
#### Returns #### Returns
`Record`&lt;`string`, `any`&gt; `Partial`&lt;`M`&gt;
#### Example
```ts
class MyEmbeddingFunction extends EmbeddingFunction {
constructor(options: {model: string, timeout: number}) {
super();
this.model = options.model;
this.timeout = options.timeout;
}
toJSON() {
return {
model: this.model,
timeout: this.timeout,
};
}
```
*** ***
@@ -212,13 +178,12 @@ vectorField is used in combination with `LanceSchema` to provide a declarative d
#### Parameters #### Parameters
* **optionsOrDatatype?**: `DataType`&lt;`Type`, `any`&gt; \| `Partial`&lt;[`FieldOptions`](../interfaces/FieldOptions.md)&lt;`DataType`&lt;`Type`, `any`&gt;&gt;&gt; * **optionsOrDatatype?**: `DataType`&lt;`Type`, `any`&gt; \| `Partial`&lt;`FieldOptions`&lt;`DataType`&lt;`Type`, `any`&gt;&gt;&gt;
The options for the field
#### Returns #### Returns
[`DataType`&lt;`Type`, `any`&gt;, `Map`&lt;`string`, [`EmbeddingFunction`](EmbeddingFunction.md)&lt;`any`, [`FunctionOptions`](../interfaces/FunctionOptions.md)&gt;&gt;] [`DataType`&lt;`Type`, `any`&gt;, `Map`&lt;`string`, [`EmbeddingFunction`](EmbeddingFunction.md)&lt;`any`, `FunctionOptions`&gt;&gt;]
#### See #### See
[LanceSchema](../functions/LanceSchema.md) lancedb.LanceSchema

View File

@@ -51,7 +51,7 @@ Fetch an embedding function by name
#### Type Parameters #### Type Parameters
**T** *extends* [`EmbeddingFunction`](EmbeddingFunction.md)&lt;`unknown`, [`FunctionOptions`](../interfaces/FunctionOptions.md)&gt; **T** *extends* [`EmbeddingFunction`](EmbeddingFunction.md)&lt;`unknown`, `FunctionOptions`&gt;
#### Parameters #### Parameters
@@ -60,7 +60,7 @@ Fetch an embedding function by name
#### Returns #### Returns
`undefined` \| [`EmbeddingFunctionCreate`](../interfaces/EmbeddingFunctionCreate.md)&lt;`T`&gt; `undefined` \| `EmbeddingFunctionCreate`&lt;`T`&gt;
*** ***
@@ -80,28 +80,6 @@ getTableMetadata(functions): Map<string, string>
*** ***
### getVar()
```ts
getVar(name): undefined | string
```
Get a variable.
#### Parameters
* **name**: `string`
#### Returns
`undefined` \| `string`
#### See
[setVar](EmbeddingFunctionRegistry.md#setvar)
***
### length() ### length()
```ts ```ts
@@ -126,7 +104,7 @@ Register an embedding function
#### Type Parameters #### Type Parameters
**T** *extends* [`EmbeddingFunctionConstructor`](../interfaces/EmbeddingFunctionConstructor.md)&lt;[`EmbeddingFunction`](EmbeddingFunction.md)&lt;`any`, [`FunctionOptions`](../interfaces/FunctionOptions.md)&gt;&gt; = [`EmbeddingFunctionConstructor`](../interfaces/EmbeddingFunctionConstructor.md)&lt;[`EmbeddingFunction`](EmbeddingFunction.md)&lt;`any`, [`FunctionOptions`](../interfaces/FunctionOptions.md)&gt;&gt; **T** *extends* `EmbeddingFunctionConstructor`&lt;[`EmbeddingFunction`](EmbeddingFunction.md)&lt;`any`, `FunctionOptions`&gt;&gt; = `EmbeddingFunctionConstructor`&lt;[`EmbeddingFunction`](EmbeddingFunction.md)&lt;`any`, `FunctionOptions`&gt;&gt;
#### Parameters #### Parameters
@@ -167,31 +145,3 @@ reset the registry to the initial state
#### Returns #### Returns
`void` `void`
***
### setVar()
```ts
setVar(name, value): void
```
Set a variable. These can be accessed in the embedding function
configuration using the syntax `$var:variable_name`. If they are not
set, an error will be thrown letting you know which key is unset. If you
want to supply a default value, you can add an additional part in the
configuration like so: `$var:variable_name:default_value`. Default values
can be used for runtime configurations that are not sensitive, such as
whether to use a GPU for inference.
The name must not contain colons. The default value can contain colons.
#### Parameters
* **name**: `string`
* **value**: `string`
#### Returns
`void`

View File

@@ -14,7 +14,7 @@ an abstract class for implementing embedding functions that take text as input
## Type Parameters ## Type Parameters
**M** *extends* [`FunctionOptions`](../interfaces/FunctionOptions.md) = [`FunctionOptions`](../interfaces/FunctionOptions.md) **M** *extends* `FunctionOptions` = `FunctionOptions`
## Constructors ## Constructors
@@ -114,37 +114,12 @@ abstract generateEmbeddings(texts, ...args): Promise<number[][] | Float32Array[]
*** ***
### getSensitiveKeys()
```ts
protected getSensitiveKeys(): string[]
```
Provide a list of keys in the function options that should be treated as
sensitive. If users pass raw values for these keys, they will be rejected.
#### Returns
`string`[]
#### Inherited from
[`EmbeddingFunction`](EmbeddingFunction.md).[`getSensitiveKeys`](EmbeddingFunction.md#getsensitivekeys)
***
### init()? ### init()?
```ts ```ts
optional init(): Promise<void> optional init(): Promise<void>
``` ```
Optionally load any resources needed for the embedding function.
This method is called after the embedding function has been initialized
but before any embeddings are computed. It is useful for loading local models
or other resources that are needed for the embedding function to work.
#### Returns #### Returns
`Promise`&lt;`void`&gt; `Promise`&lt;`void`&gt;
@@ -173,28 +148,6 @@ The number of dimensions of the embeddings
*** ***
### resolveVariables()
```ts
protected resolveVariables(config): Partial<M>
```
Apply variables to the config.
#### Parameters
* **config**: `Partial`&lt;`M`&gt;
#### Returns
`Partial`&lt;`M`&gt;
#### Inherited from
[`EmbeddingFunction`](EmbeddingFunction.md).[`resolveVariables`](EmbeddingFunction.md#resolvevariables)
***
### sourceField() ### sourceField()
```ts ```ts
@@ -205,11 +158,11 @@ sourceField is used in combination with `LanceSchema` to provide a declarative d
#### Returns #### Returns
[`DataType`&lt;`Type`, `any`&gt;, `Map`&lt;`string`, [`EmbeddingFunction`](EmbeddingFunction.md)&lt;`any`, [`FunctionOptions`](../interfaces/FunctionOptions.md)&gt;&gt;] [`DataType`&lt;`Type`, `any`&gt;, `Map`&lt;`string`, [`EmbeddingFunction`](EmbeddingFunction.md)&lt;`any`, `FunctionOptions`&gt;&gt;]
#### See #### See
[LanceSchema](../functions/LanceSchema.md) lancedb.LanceSchema
#### Overrides #### Overrides
@@ -220,15 +173,37 @@ sourceField is used in combination with `LanceSchema` to provide a declarative d
### toJSON() ### toJSON()
```ts ```ts
toJSON(): Record<string, any> abstract toJSON(): Partial<M>
``` ```
Get the original arguments to the constructor, to serialize them so they Convert the embedding function to a JSON object
can be used to recreate the embedding function later. It is used to serialize the embedding function to the schema
It's important that any object returned by this method contains all the necessary
information to recreate the embedding function
It should return the same object that was passed to the constructor
If it does not, the embedding function will not be able to be recreated, or could be recreated incorrectly
#### Returns #### Returns
`Record`&lt;`string`, `any`&gt; `Partial`&lt;`M`&gt;
#### Example
```ts
class MyEmbeddingFunction extends EmbeddingFunction {
constructor(options: {model: string, timeout: number}) {
super();
this.model = options.model;
this.timeout = options.timeout;
}
toJSON() {
return {
model: this.model,
timeout: this.timeout,
};
}
```
#### Inherited from #### Inherited from
@@ -246,16 +221,15 @@ vectorField is used in combination with `LanceSchema` to provide a declarative d
#### Parameters #### Parameters
* **optionsOrDatatype?**: `DataType`&lt;`Type`, `any`&gt; \| `Partial`&lt;[`FieldOptions`](../interfaces/FieldOptions.md)&lt;`DataType`&lt;`Type`, `any`&gt;&gt;&gt; * **optionsOrDatatype?**: `DataType`&lt;`Type`, `any`&gt; \| `Partial`&lt;`FieldOptions`&lt;`DataType`&lt;`Type`, `any`&gt;&gt;&gt;
The options for the field
#### Returns #### Returns
[`DataType`&lt;`Type`, `any`&gt;, `Map`&lt;`string`, [`EmbeddingFunction`](EmbeddingFunction.md)&lt;`any`, [`FunctionOptions`](../interfaces/FunctionOptions.md)&gt;&gt;] [`DataType`&lt;`Type`, `any`&gt;, `Map`&lt;`string`, [`EmbeddingFunction`](EmbeddingFunction.md)&lt;`any`, `FunctionOptions`&gt;&gt;]
#### See #### See
[LanceSchema](../functions/LanceSchema.md) lancedb.LanceSchema
#### Inherited from #### Inherited from

View File

@@ -14,7 +14,7 @@ Create a schema with embedding functions.
## Parameters ## Parameters
* **fields**: `Record`&lt;`string`, `object` \| [`object`, `Map`&lt;`string`, [`EmbeddingFunction`](../classes/EmbeddingFunction.md)&lt;`any`, [`FunctionOptions`](../interfaces/FunctionOptions.md)&gt;&gt;]&gt; * **fields**: `Record`&lt;`string`, `object` \| [`object`, `Map`&lt;`string`, [`EmbeddingFunction`](../classes/EmbeddingFunction.md)&lt;`any`, `FunctionOptions`&gt;&gt;]&gt;
## Returns ## Returns

View File

@@ -20,7 +20,7 @@ function register(name?): (ctor) => any
### Parameters ### Parameters
* **ctor**: [`EmbeddingFunctionConstructor`](../interfaces/EmbeddingFunctionConstructor.md)&lt;[`EmbeddingFunction`](../classes/EmbeddingFunction.md)&lt;`any`, [`FunctionOptions`](../interfaces/FunctionOptions.md)&gt;&gt; * **ctor**: `EmbeddingFunctionConstructor`&lt;[`EmbeddingFunction`](../classes/EmbeddingFunction.md)&lt;`any`, `FunctionOptions`&gt;&gt;
### Returns ### Returns

View File

@@ -1,27 +0,0 @@
[**@lancedb/lancedb**](../../../README.md) • **Docs**
***
[@lancedb/lancedb](../../../globals.md) / [embedding](../README.md) / EmbeddingFunctionConstructor
# Interface: EmbeddingFunctionConstructor&lt;T&gt;
## Type Parameters
**T** *extends* [`EmbeddingFunction`](../classes/EmbeddingFunction.md) = [`EmbeddingFunction`](../classes/EmbeddingFunction.md)
## Constructors
### new EmbeddingFunctionConstructor()
```ts
new EmbeddingFunctionConstructor(modelOptions?): T
```
#### Parameters
* **modelOptions?**: `T`\[`"TOptions"`\]
#### Returns
`T`

View File

@@ -1,27 +0,0 @@
[**@lancedb/lancedb**](../../../README.md) • **Docs**
***
[@lancedb/lancedb](../../../globals.md) / [embedding](../README.md) / EmbeddingFunctionCreate
# Interface: EmbeddingFunctionCreate&lt;T&gt;
## Type Parameters
**T** *extends* [`EmbeddingFunction`](../classes/EmbeddingFunction.md)
## Methods
### create()
```ts
create(options?): CreateReturnType<T>
```
#### Parameters
* **options?**: `T`\[`"TOptions"`\]
#### Returns
[`CreateReturnType`](../type-aliases/CreateReturnType.md)&lt;`T`&gt;

View File

@@ -1,27 +0,0 @@
[**@lancedb/lancedb**](../../../README.md) • **Docs**
***
[@lancedb/lancedb](../../../globals.md) / [embedding](../README.md) / FieldOptions
# Interface: FieldOptions&lt;T&gt;
## Type Parameters
**T** *extends* `DataType` = `DataType`
## Properties
### datatype
```ts
datatype: T;
```
***
### dims?
```ts
optional dims: number;
```

View File

@@ -1,13 +0,0 @@
[**@lancedb/lancedb**](../../../README.md) • **Docs**
***
[@lancedb/lancedb](../../../globals.md) / [embedding](../README.md) / FunctionOptions
# Interface: FunctionOptions
Options for a given embedding function
## Indexable
\[`key`: `string`\]: `any`

View File

@@ -1,15 +0,0 @@
[**@lancedb/lancedb**](../../../README.md) • **Docs**
***
[@lancedb/lancedb](../../../globals.md) / [embedding](../README.md) / CreateReturnType
# Type Alias: CreateReturnType&lt;T&gt;
```ts
type CreateReturnType<T>: T extends object ? Promise<T> : T;
```
## Type Parameters
**T**

View File

@@ -1,17 +0,0 @@
[**@lancedb/lancedb**](../../README.md) • **Docs**
***
[@lancedb/lancedb](../../globals.md) / rerankers
# rerankers
## Index
### Classes
- [RRFReranker](classes/RRFReranker.md)
### Interfaces
- [Reranker](interfaces/Reranker.md)

View File

@@ -1,48 +0,0 @@
[**@lancedb/lancedb**](../../../README.md) • **Docs**
***
[@lancedb/lancedb](../../../globals.md) / [rerankers](../README.md) / RRFReranker
# Class: RRFReranker
Reranks the results using the Reciprocal Rank Fusion (RRF) algorithm.
## Methods
### rerankHybrid()
```ts
rerankHybrid(
query,
vecResults,
ftsResults): Promise<RecordBatch<any>>
```
#### Parameters
* **query**: `string`
* **vecResults**: `RecordBatch`&lt;`any`&gt;
* **ftsResults**: `RecordBatch`&lt;`any`&gt;
#### Returns
`Promise`&lt;`RecordBatch`&lt;`any`&gt;&gt;
***
### create()
```ts
static create(k): Promise<RRFReranker>
```
#### Parameters
* **k**: `number` = `60`
#### Returns
`Promise`&lt;[`RRFReranker`](RRFReranker.md)&gt;

View File

@@ -1,30 +0,0 @@
[**@lancedb/lancedb**](../../../README.md) • **Docs**
***
[@lancedb/lancedb](../../../globals.md) / [rerankers](../README.md) / Reranker
# Interface: Reranker
## Methods
### rerankHybrid()
```ts
rerankHybrid(
query,
vecResults,
ftsResults): Promise<RecordBatch<any>>
```
#### Parameters
* **query**: `string`
* **vecResults**: `RecordBatch`&lt;`any`&gt;
* **ftsResults**: `RecordBatch`&lt;`any`&gt;
#### Returns
`Promise`&lt;`RecordBatch`&lt;`any`&gt;&gt;

View File

@@ -1,11 +0,0 @@
[**@lancedb/lancedb**](../README.md) • **Docs**
***
[@lancedb/lancedb](../globals.md) / DataLike
# Type Alias: DataLike
```ts
type DataLike: Data | object;
```

View File

@@ -1,11 +0,0 @@
[**@lancedb/lancedb**](../README.md) • **Docs**
***
[@lancedb/lancedb](../globals.md) / FieldLike
# Type Alias: FieldLike
```ts
type FieldLike: Field | object;
```

View File

@@ -1,19 +0,0 @@
[**@lancedb/lancedb**](../README.md) • **Docs**
***
[@lancedb/lancedb](../globals.md) / IntoSql
# Type Alias: IntoSql
```ts
type IntoSql:
| string
| number
| boolean
| null
| Date
| ArrayBufferLike
| Buffer
| IntoSql[];
```

View File

@@ -1,11 +0,0 @@
[**@lancedb/lancedb**](../README.md) • **Docs**
***
[@lancedb/lancedb](../globals.md) / IntoVector
# Type Alias: IntoVector
```ts
type IntoVector: Float32Array | Float64Array | number[] | Promise<Float32Array | Float64Array | number[]>;
```

View File

@@ -1,11 +0,0 @@
[**@lancedb/lancedb**](../README.md) • **Docs**
***
[@lancedb/lancedb](../globals.md) / RecordBatchLike
# Type Alias: RecordBatchLike
```ts
type RecordBatchLike: RecordBatch | object;
```

View File

@@ -1,11 +0,0 @@
[**@lancedb/lancedb**](../README.md) • **Docs**
***
[@lancedb/lancedb](../globals.md) / SchemaLike
# Type Alias: SchemaLike
```ts
type SchemaLike: Schema | object;
```

View File

@@ -1,11 +0,0 @@
[**@lancedb/lancedb**](../README.md) • **Docs**
***
[@lancedb/lancedb](../globals.md) / TableLike
# Type Alias: TableLike
```ts
type TableLike: ArrowTable | object;
```

View File

@@ -66,7 +66,7 @@ the size of the data.
### Embedding Functions ### Embedding Functions
The embedding API has been completely reworked, and it now more closely resembles the Python API, including the new [embedding registry](./js/classes/embedding.EmbeddingFunctionRegistry.md): The embedding API has been completely reworked, and it now more closely resembles the Python API, including the new [embedding registry](./js/classes/embedding.EmbeddingFunctionRegistry.md)
=== "vectordb (deprecated)" === "vectordb (deprecated)"

View File

@@ -1,6 +1,17 @@
#!/usr/bin/env python #!/usr/bin/env python
# #
# Copyright 2023 LanceDB Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataset hf://poloclub/diffusiondb """Dataset hf://poloclub/diffusiondb
""" """

View File

@@ -207,7 +207,7 @@
"cell_type": "markdown", "cell_type": "markdown",
"source": [ "source": [
"## The dataset\n", "## The dataset\n",
"The dataset we'll use is a synthetic QA dataset generated from LLama2 review paper. The paper was divided into chunks, with each chunk being a unique context. An LLM was prompted to ask questions relevant to the context for testing a retriever.\n", "The dataset we'll use is a synthetic QA dataset generated from LLama2 review paper. The paper was divided into chunks, with each chunk being a unique context. An LLM was prompted to ask questions relevant to the context for testing a retreiver.\n",
"The exact code and other utility functions for this can be found in [this](https://github.com/lancedb/ragged) repo\n" "The exact code and other utility functions for this can be found in [this](https://github.com/lancedb/ragged) repo\n"
], ],
"metadata": { "metadata": {

View File

@@ -477,7 +477,7 @@
"source": [ "source": [
"## Vector Search\n", "## Vector Search\n",
"\n", "\n",
"Average latency: `3.48 ms ± 71.6 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)`" "avg latency - `3.48 ms ± 71.6 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)`"
] ]
}, },
{ {
@@ -597,7 +597,7 @@
"`LinearCombinationReranker(weight=0.7)` is used as the default reranker for reranking the hybrid search results if the reranker isn't specified explicitly.\n", "`LinearCombinationReranker(weight=0.7)` is used as the default reranker for reranking the hybrid search results if the reranker isn't specified explicitly.\n",
"The `weight` param controls the weightage provided to vector search score. The weight of `1-weight` is applied to FTS scores when reranking.\n", "The `weight` param controls the weightage provided to vector search score. The weight of `1-weight` is applied to FTS scores when reranking.\n",
"\n", "\n",
"Latency: `71 ms ± 25.4 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)`" "Latency - `71 ms ± 25.4 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)`"
] ]
}, },
{ {
@@ -675,9 +675,9 @@
}, },
"source": [ "source": [
"### Cohere Reranker\n", "### Cohere Reranker\n",
"This uses Cohere's Reranking API to re-rank the results. It accepts the reranking model name as a parameter. By default it uses the english-v3 model but you can easily switch to a multi-lingual model.\n", "This uses Cohere's Reranking API to re-rank the results. It accepts the reranking model name as a parameter. By Default it uses the english-v3 model but you can easily switch to a multi-lingual model.\n",
"\n", "\n",
"Latency: `605 ms ± 78.1 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)`" "latency - `605 ms ± 78.1 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)`"
] ]
}, },
{ {
@@ -1165,7 +1165,7 @@
}, },
"source": [ "source": [
"### ColBERT Reranker\n", "### ColBERT Reranker\n",
"Colbert Reranker is powered by ColBERT model. It runs locally using the huggingface implementation.\n", "Colber Reranker is powered by ColBERT model. It runs locally using the huggingface implementation.\n",
"\n", "\n",
"Latency - `950 ms ± 5.78 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)`\n", "Latency - `950 ms ± 5.78 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)`\n",
"\n", "\n",
@@ -1489,9 +1489,9 @@
}, },
"source": [ "source": [
"### Cross Encoder Reranker\n", "### Cross Encoder Reranker\n",
"Uses cross encoder models are rerankers. Uses sentence transformer implementation locally\n", "Uses cross encoder models are rerankers. Uses sentence transformer implemntation locally\n",
"\n", "\n",
"Latency: `1.38 s ± 64.6 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)`" "Latency - `1.38 s ± 64.6 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)`"
] ]
}, },
{ {
@@ -1771,10 +1771,10 @@
"source": [ "source": [
"### (Experimental) OpenAI Reranker\n", "### (Experimental) OpenAI Reranker\n",
"\n", "\n",
"This prompts a chat model to rerank results and is not a dedicated reranker model. This should be treated as experimental. You might exceed the token limit so set the search limits based on your token limit.\n", "This prompts chat model to rerank results which is not a dedicated reranker model. This should be treated as experimental. You might run out of token limit so set the search limits based on your token limit.\n",
"NOTE: It is recommended to use `gpt-4-turbo-preview` as older models might lead to bad behaviour\n", "NOTE: It is recommended to use `gpt-4-turbo-preview`, older models might lead to bad behaviour\n",
"\n", "\n",
"Latency: `Can take 10s of seconds if using GPT-4 model`" "Latency - `Can take 10s of seconds if using GPT-4 model`"
] ]
}, },
{ {
@@ -1817,7 +1817,7 @@
}, },
"source": [ "source": [
"## Use your custom Reranker\n", "## Use your custom Reranker\n",
"Hybrid search in LanceDB is designed to be very flexible. You can easily plug in your own Re-reranking logic. To do so, you simply need to implement the base Reranker class:" "Hybrid search in LanceDB is designed to be very flexible. You can easily plug in your own Re-reranking logic. To do so, you simply need to implement the base Reranker class"
] ]
}, },
{ {
@@ -1849,9 +1849,9 @@
"source": [ "source": [
"### Custom Reranker based on CohereReranker\n", "### Custom Reranker based on CohereReranker\n",
"\n", "\n",
"For the sake of simplicity let's build a custom reranker that enhances the Cohere Reranker by accepting a filter query, and accepts other CohereReranker params as kwargs.\n", "For the sake of simplicity let's build custom reranker that just enchances the Cohere Reranker by accepting a filter query, and accept other CohereReranker params as kwags.\n",
"\n", "\n",
"For this toy example let's say we want to get rid of docs that represent a table of contents or appendix, as these are semantically close to representing costs but don't represent the specific reasons why operating costs were high." "For this toy example let's say we want to get rid of docs that represent a table of contents, appendix etc. as these are semantically close of representing costs but this isn't something we are interested in because they don't represent the specific reasons why operating costs were high. They simply represent the costs."
] ]
}, },
{ {
@@ -1969,7 +1969,7 @@
"id": "b3b5464a-7252-4eab-aaac-9b0eae37496f" "id": "b3b5464a-7252-4eab-aaac-9b0eae37496f"
}, },
"source": [ "source": [
"As you can see, the document containing the table of contents no longer shows up." "As you can see the document containing the Table of contetnts of spending no longer shows up"
] ]
} }
], ],

View File

@@ -49,7 +49,7 @@
}, },
"source": [ "source": [
"## What is a retriever\n", "## What is a retriever\n",
"VectorDBs are used as retrievers in recommender or chatbot-based systems for retrieving relevant data based on user queries. For example, retriever is a critical component of Retrieval Augmented Generation (RAG) acrhitectures. In this section, we will discuss how to improve the performance of retrievers.\n", "VectorDBs are used as retreivers in recommender or chatbot-based systems for retrieving relevant data based on user queries. For example, retriever is a critical component of Retrieval Augmented Generation (RAG) acrhitectures. In this section, we will discuss how to improve the performance of retrievers.\n",
"\n", "\n",
"<img src=\"https://llmstack.ai/assets/images/rag-f517f1f834bdbb94a87765e0edd40ff2.png\" />\n", "<img src=\"https://llmstack.ai/assets/images/rag-f517f1f834bdbb94a87765e0edd40ff2.png\" />\n",
"\n", "\n",
@@ -64,7 +64,7 @@
"- Fine-tuning the embedding models\n", "- Fine-tuning the embedding models\n",
"- Using different embedding models\n", "- Using different embedding models\n",
"\n", "\n",
"Obviously, the above list is not exhaustive. There are other subtler ways that can improve retrieval performance like alternative chunking algorithms, using different distance/similarity metrics, and more. For brevity, we'll only cover high level and more impactful techniques here.\n", "Obviously, the above list is not exhaustive. There are other subtler ways that can improve retrieval performance like experimenting chunking algorithms, using different distance/similarity metrics etc. But for brevity, we'll only cover high level and more impactful techniques here.\n",
"\n" "\n"
] ]
}, },
@@ -77,7 +77,7 @@
"# LanceDB\n", "# LanceDB\n",
"- Multimodal DB for AI\n", "- Multimodal DB for AI\n",
"- Powered by an innovative & open-source in-house file format\n", "- Powered by an innovative & open-source in-house file format\n",
"- Zero setup\n", "- 0 Setup\n",
"- Scales up on disk storage\n", "- Scales up on disk storage\n",
"- Native support for vector, full-text(BM25) and hybrid search\n", "- Native support for vector, full-text(BM25) and hybrid search\n",
"\n", "\n",
@@ -92,8 +92,8 @@
}, },
"source": [ "source": [
"## The dataset\n", "## The dataset\n",
"The dataset we'll use is a synthetic QA dataset generated from LLama2 review paper. The paper was divided into chunks, with each chunk being a unique context. An LLM was prompted to ask questions relevant to the context for testing a retriever.\n", "The dataset we'll use is a synthetic QA dataset generated from LLama2 review paper. The paper was divided into chunks, with each chunk being a unique context. An LLM was prompted to ask questions relevant to the context for testing a retreiver.\n",
"The exact code and other utility functions for this can be found in [this](https://github.com/lancedb/ragged) repo.\n" "The exact code and other utility functions for this can be found in [this](https://github.com/lancedb/ragged) repo\n"
] ]
}, },
{ {
@@ -594,10 +594,10 @@
}, },
"source": [ "source": [
"## Ingestion\n", "## Ingestion\n",
"Let us now ingest the contexts in LanceDB. The steps will be:\n", "Let us now ingest the contexts in LanceDB\n",
"\n", "\n",
"- Create a schema (Pydantic or Pyarrow)\n", "- Create a schema (Pydantic or Pyarrow)\n",
"- Select an embedding model from LanceDB Embedding API (to allow automatic vectorization of data)\n", "- Select an embedding model from LanceDB Embedding API (Allows automatic vectorization of data)\n",
"- Ingest the contexts\n" "- Ingest the contexts\n"
] ]
}, },
@@ -841,7 +841,7 @@
}, },
"source": [ "source": [
"## Different Query types in LanceDB\n", "## Different Query types in LanceDB\n",
"LanceDB allows switching query types with by setting `query_type` argument, which defaults to `vector` when using Embedding API. In this example we'll use `JinaReranker` which is one of many rerankers supported by LanceDB.\n", "LanceDB allows switching query types with by setting `query_type` argument, which defaults to `vector` when using Embedding API. In this example we'll use `JinaReranker` which is one of many rerankers supported by LanceDB\n",
"\n", "\n",
"### Vector search:\n", "### Vector search:\n",
"Vector search\n", "Vector search\n",
@@ -1446,11 +1446,11 @@
"source": [ "source": [
"## Takeaways & Tradeoffs\n", "## Takeaways & Tradeoffs\n",
"\n", "\n",
"* **Rerankers significantly improve accuracy at little cost.** Using Hybrid search and/or rerankers can significantly improve retrieval performance without spending any additional time or effort on tuning embedding models, generators, or dissecting the dataset.\n", "* **Easiest method to significantly improve accuracy** Using Hybrid search and/or rerankers can significantly improve retrieval performance without spending any additional time or effort on tuning embedding models, generators, or dissecting the dataset.\n",
"\n", "\n",
"* **Reranking is an expensive operation.** Depending on the type of reranker you choose, they can incur significant latecy to query times. Although some API-based rerankers can be significantly faster.\n", "* **Reranking is an expensive operation.** Depending on the type of reranker you choose, they can incur significant latecy to query times. Although some API-based rerankers can be significantly faster.\n",
"\n", "\n",
"* **Pre-warmed GPU environments reduce latency.** When using models locally, having a warmed-up GPU environment will significantly reduce latency. This is especially useful if the application doesn't need to be strictly realtime. Pre-warming comes at the expense of GPU resources." "* When using models locally, having a warmed-up GPU environment will significantly reduce latency. This is specially useful if the application doesn't need to be strcitly realtime. The tradeoff being GPU resources."
] ]
}, },
{ {
@@ -1504,4 +1504,4 @@
}, },
"nbformat": 4, "nbformat": 4,
"nbformat_minor": 0 "nbformat_minor": 0
} }

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -114,17 +114,14 @@
} }
], ],
"source": [ "source": [
"import pandas as pd\n", "data = [\n",
" {\"vector\": [1.1, 1.2], \"lat\": 45.5, \"long\": -122.7},\n",
" {\"vector\": [0.2, 1.8], \"lat\": 40.1, \"long\": -74.1},\n",
"]\n",
"\n", "\n",
"data = pd.DataFrame(\n", "db.create_table(\"table2\", data)\n",
" {\n", "\n",
" \"vector\": [[1.1, 1.2, 1.3, 1.4], [0.2, 1.8, 0.4, 3.6]],\n", "db[\"table2\"].head() "
" \"lat\": [45.5, 40.1],\n",
" \"long\": [-122.7, -74.1],\n",
" }\n",
")\n",
"db.create_table(\"my_table_pandas\", data)\n",
"db[\"my_table_pandas\"].head()"
] ]
}, },
{ {
@@ -167,7 +164,7 @@
"import pyarrow as pa\n", "import pyarrow as pa\n",
"\n", "\n",
"custom_schema = pa.schema([\n", "custom_schema = pa.schema([\n",
"pa.field(\"vector\", pa.list_(pa.float32(), 4)),\n", "pa.field(\"vector\", pa.list_(pa.float32(), 2)),\n",
"pa.field(\"lat\", pa.float32()),\n", "pa.field(\"lat\", pa.float32()),\n",
"pa.field(\"long\", pa.float32())\n", "pa.field(\"long\", pa.float32())\n",
"])\n", "])\n",

View File

@@ -8,55 +8,54 @@ and PyArrow. The sequence of steps in a typical workflow is shown below.
First, we need to connect to a LanceDB database. First, we need to connect to a LanceDB database.
=== "Sync API" ```py
```python import lancedb
--8<-- "python/python/tests/docs/test_python.py:import-lancedb"
--8<-- "python/python/tests/docs/test_python.py:connect_to_lancedb"
```
=== "Async API"
```python db = lancedb.connect("data/sample-lancedb")
--8<-- "python/python/tests/docs/test_python.py:import-lancedb" ```
--8<-- "python/python/tests/docs/test_python.py:connect_to_lancedb_async"
```
We can load a Pandas `DataFrame` to LanceDB directly. We can load a Pandas `DataFrame` to LanceDB directly.
=== "Sync API" ```py
import pandas as pd
```python data = pd.DataFrame({
--8<-- "python/python/tests/docs/test_python.py:import-pandas" "vector": [[3.1, 4.1], [5.9, 26.5]],
--8<-- "python/python/tests/docs/test_python.py:create_table_pandas" "item": ["foo", "bar"],
``` "price": [10.0, 20.0]
=== "Async API" })
table = db.create_table("pd_table", data=data)
```python ```
--8<-- "python/python/tests/docs/test_python.py:import-pandas"
--8<-- "python/python/tests/docs/test_python.py:create_table_pandas_async"
```
Similar to the [`pyarrow.write_dataset()`](https://arrow.apache.org/docs/python/generated/pyarrow.dataset.write_dataset.html) method, LanceDB's Similar to the [`pyarrow.write_dataset()`](https://arrow.apache.org/docs/python/generated/pyarrow.dataset.write_dataset.html) method, LanceDB's
[`db.create_table()`](python.md/#lancedb.db.DBConnection.create_table) accepts data in a variety of forms. [`db.create_table()`](python.md/#lancedb.db.DBConnection.create_table) accepts data in a variety of forms.
If you have a dataset that is larger than memory, you can create a table with `Iterator[pyarrow.RecordBatch]` to lazily load the data: If you have a dataset that is larger than memory, you can create a table with `Iterator[pyarrow.RecordBatch]` to lazily load the data:
=== "Sync API" ```py
```python from typing import Iterable
--8<-- "python/python/tests/docs/test_python.py:import-iterable" import pyarrow as pa
--8<-- "python/python/tests/docs/test_python.py:import-pyarrow"
--8<-- "python/python/tests/docs/test_python.py:make_batches"
--8<-- "python/python/tests/docs/test_python.py:create_table_iterable"
```
=== "Async API"
```python def make_batches() -> Iterable[pa.RecordBatch]:
--8<-- "python/python/tests/docs/test_python.py:import-iterable" for i in range(5):
--8<-- "python/python/tests/docs/test_python.py:import-pyarrow" yield pa.RecordBatch.from_arrays(
--8<-- "python/python/tests/docs/test_python.py:make_batches" [
--8<-- "python/python/tests/docs/test_python.py:create_table_iterable_async" pa.array([[3.1, 4.1], [5.9, 26.5]]),
``` pa.array(["foo", "bar"]),
pa.array([10.0, 20.0]),
],
["vector", "item", "price"])
schema=pa.schema([
pa.field("vector", pa.list_(pa.float32())),
pa.field("item", pa.utf8()),
pa.field("price", pa.float32()),
])
table = db.create_table("iterable_table", data=make_batches(), schema=schema)
```
You will find detailed instructions of creating a LanceDB dataset in You will find detailed instructions of creating a LanceDB dataset in
[Getting Started](../basic.md#quick-start) and [API](python.md/#lancedb.db.DBConnection.create_table) [Getting Started](../basic.md#quick-start) and [API](python.md/#lancedb.db.DBConnection.create_table)
@@ -66,16 +65,15 @@ sections.
We can now perform similarity search via the LanceDB Python API. We can now perform similarity search via the LanceDB Python API.
=== "Sync API" ```py
# Open the table previously created.
table = db.open_table("pd_table")
```python query_vector = [100, 100]
--8<-- "python/python/tests/docs/test_python.py:vector_search" # Pandas DataFrame
``` df = table.search(query_vector).limit(1).to_pandas()
=== "Async API" print(df)
```
```python
--8<-- "python/python/tests/docs/test_python.py:vector_search_async"
```
``` ```
vector item price _distance vector item price _distance
@@ -85,13 +83,16 @@ We can now perform similarity search via the LanceDB Python API.
If you have a simple filter, it's faster to provide a `where` clause to LanceDB's `search` method. If you have a simple filter, it's faster to provide a `where` clause to LanceDB's `search` method.
For more complex filters or aggregations, you can always resort to using the underlying `DataFrame` methods after performing a search. For more complex filters or aggregations, you can always resort to using the underlying `DataFrame` methods after performing a search.
=== "Sync API" ```python
```python # Apply the filter via LanceDB
--8<-- "python/python/tests/docs/test_python.py:vector_search_with_filter" results = table.search([100, 100]).where("price < 15").to_pandas()
``` assert len(results) == 1
=== "Async API" assert results["item"].iloc[0] == "foo"
```python # Apply the filter via Pandas
--8<-- "python/python/tests/docs/test_python.py:vector_search_with_filter_async" df = results = table.search([100, 100]).to_pandas()
``` results = df[df.price < 15]
assert len(results) == 1
assert results["item"].iloc[0] == "foo"
```

View File

@@ -2,57 +2,39 @@
LanceDB supports [Polars](https://github.com/pola-rs/polars), a blazingly fast DataFrame library for Python written in Rust. Just like in Pandas, the Polars integration is enabled by PyArrow under the hood. A deeper integration between Lance Tables and Polars DataFrames is in progress, but at the moment, you can read a Polars DataFrame into LanceDB and output the search results from a query to a Polars DataFrame. LanceDB supports [Polars](https://github.com/pola-rs/polars), a blazingly fast DataFrame library for Python written in Rust. Just like in Pandas, the Polars integration is enabled by PyArrow under the hood. A deeper integration between Lance Tables and Polars DataFrames is in progress, but at the moment, you can read a Polars DataFrame into LanceDB and output the search results from a query to a Polars DataFrame.
## Create & Query LanceDB Table ## Create & Query LanceDB Table
### From Polars DataFrame ### From Polars DataFrame
First, we connect to a LanceDB database. First, we connect to a LanceDB database.
=== "Sync API" ```py
import lancedb
```py
--8<-- "python/python/tests/docs/test_python.py:import-lancedb"
--8<-- "python/python/tests/docs/test_python.py:connect_to_lancedb"
```
=== "Async API"
```py
--8<-- "python/python/tests/docs/test_python.py:import-lancedb"
--8<-- "python/python/tests/docs/test_python.py:connect_to_lancedb_async"
```
db = lancedb.connect("data/polars-lancedb")
```
We can load a Polars `DataFrame` to LanceDB directly. We can load a Polars `DataFrame` to LanceDB directly.
=== "Sync API" ```py
import polars as pl
```py data = pl.DataFrame({
--8<-- "python/python/tests/docs/test_python.py:import-polars" "vector": [[3.1, 4.1], [5.9, 26.5]],
--8<-- "python/python/tests/docs/test_python.py:create_table_polars" "item": ["foo", "bar"],
``` "price": [10.0, 20.0]
})
=== "Async API" table = db.create_table("pl_table", data=data)
```
```py
--8<-- "python/python/tests/docs/test_python.py:import-polars"
--8<-- "python/python/tests/docs/test_python.py:create_table_polars_async"
```
We can now perform similarity search via the LanceDB Python API. We can now perform similarity search via the LanceDB Python API.
=== "Sync API" ```py
query = [3.0, 4.0]
```py result = table.search(query).limit(1).to_polars()
--8<-- "python/python/tests/docs/test_python.py:vector_search_polars" print(result)
``` print(type(result))
```
=== "Async API"
```py
--8<-- "python/python/tests/docs/test_python.py:vector_search_polars_async"
```
In addition to the selected columns, LanceDB also returns a vector In addition to the selected columns, LanceDB also returns a vector
and also the `_distance` column which is the distance between the query and also the `_distance` column which is the distance between the query
@@ -77,16 +59,33 @@ Note that the type of the result from a table search is a Polars DataFrame.
Alternately, we can create an empty LanceDB Table using a Pydantic schema and populate it with a Polars DataFrame. Alternately, we can create an empty LanceDB Table using a Pydantic schema and populate it with a Polars DataFrame.
```py ```py
--8<-- "python/python/tests/docs/test_python.py:import-polars" import polars as pl
--8<-- "python/python/tests/docs/test_python.py:import-lancedb-pydantic" from lancedb.pydantic import Vector, LanceModel
--8<-- "python/python/tests/docs/test_python.py:class_Item"
--8<-- "python/python/tests/docs/test_python.py:create_table_pydantic"
class Item(LanceModel):
vector: Vector(2)
item: str
price: float
data = {
"vector": [[3.1, 4.1]],
"item": "foo",
"price": 10.0,
}
table = db.create_table("test_table", schema=Item)
df = pl.DataFrame(data)
# Add Polars DataFrame to table
table.add(df)
``` ```
The table can now be queried as usual. The table can now be queried as usual.
```py ```py
--8<-- "python/python/tests/docs/test_python.py:vector_search_polars" result = table.search([3.0, 4.0]).limit(1).to_polars()
print(result)
print(type(result))
``` ```
``` ```
@@ -109,7 +108,8 @@ As you iterate on your application, you'll likely need to work with the whole ta
LanceDB tables can also be converted directly into a polars LazyFrame for further processing. LanceDB tables can also be converted directly into a polars LazyFrame for further processing.
```python ```python
--8<-- "python/python/tests/docs/test_python.py:dump_table_lazyform" ldf = table.to_polars()
print(type(ldf))
``` ```
Unlike the search result from a query, we can see that the type of the result is a LazyFrame. Unlike the search result from a query, we can see that the type of the result is a LazyFrame.
@@ -121,7 +121,7 @@ Unlike the search result from a query, we can see that the type of the result is
We can now work with the LazyFrame as we would in Polars, and collect the first result. We can now work with the LazyFrame as we would in Polars, and collect the first result.
```python ```python
--8<-- "python/python/tests/docs/test_python.py:print_table_lazyform" print(ldf.first().collect())
``` ```
``` ```
@@ -139,3 +139,4 @@ The reason it's beneficial to not convert the LanceDB Table
to a DataFrame is because the table can potentially be way larger to a DataFrame is because the table can potentially be way larger
than memory, and Polars LazyFrames allow us to work with such than memory, and Polars LazyFrames allow us to work with such
larger-than-memory datasets by not loading it into memory all at once. larger-than-memory datasets by not loading it into memory all at once.

Some files were not shown because too many files have changed in this diff Show More