Compare commits

..

1 Commits

Author SHA1 Message Date
albertlockett
3228fb9cd9 test 2024-10-08 18:28:02 -04:00
483 changed files with 12839 additions and 46188 deletions

View File

@@ -1,5 +1,5 @@
[tool.bumpversion] [tool.bumpversion]
current_version = "0.18.0" current_version = "0.11.0-beta.1"
parse = """(?x) parse = """(?x)
(?P<major>0|[1-9]\\d*)\\. (?P<major>0|[1-9]\\d*)\\.
(?P<minor>0|[1-9]\\d*)\\. (?P<minor>0|[1-9]\\d*)\\.
@@ -66,47 +66,6 @@ glob = "nodejs/npm/*/package.json"
replace = "\"version\": \"{new_version}\"," replace = "\"version\": \"{new_version}\","
search = "\"version\": \"{current_version}\"," search = "\"version\": \"{current_version}\","
# vectodb node binary packages
[[tool.bumpversion.files]]
glob = "node/package.json"
replace = "\"@lancedb/vectordb-darwin-arm64\": \"{new_version}\""
search = "\"@lancedb/vectordb-darwin-arm64\": \"{current_version}\""
[[tool.bumpversion.files]]
glob = "node/package.json"
replace = "\"@lancedb/vectordb-darwin-x64\": \"{new_version}\""
search = "\"@lancedb/vectordb-darwin-x64\": \"{current_version}\""
[[tool.bumpversion.files]]
glob = "node/package.json"
replace = "\"@lancedb/vectordb-linux-arm64-gnu\": \"{new_version}\""
search = "\"@lancedb/vectordb-linux-arm64-gnu\": \"{current_version}\""
[[tool.bumpversion.files]]
glob = "node/package.json"
replace = "\"@lancedb/vectordb-linux-x64-gnu\": \"{new_version}\""
search = "\"@lancedb/vectordb-linux-x64-gnu\": \"{current_version}\""
[[tool.bumpversion.files]]
glob = "node/package.json"
replace = "\"@lancedb/vectordb-linux-arm64-musl\": \"{new_version}\""
search = "\"@lancedb/vectordb-linux-arm64-musl\": \"{current_version}\""
[[tool.bumpversion.files]]
glob = "node/package.json"
replace = "\"@lancedb/vectordb-linux-x64-musl\": \"{new_version}\""
search = "\"@lancedb/vectordb-linux-x64-musl\": \"{current_version}\""
[[tool.bumpversion.files]]
glob = "node/package.json"
replace = "\"@lancedb/vectordb-win32-x64-msvc\": \"{new_version}\""
search = "\"@lancedb/vectordb-win32-x64-msvc\": \"{current_version}\""
[[tool.bumpversion.files]]
glob = "node/package.json"
replace = "\"@lancedb/vectordb-win32-arm64-msvc\": \"{new_version}\""
search = "\"@lancedb/vectordb-win32-arm64-msvc\": \"{current_version}\""
# Cargo files # Cargo files
# ------------ # ------------
[[tool.bumpversion.files]] [[tool.bumpversion.files]]
@@ -118,8 +77,3 @@ search = "\nversion = \"{current_version}\""
filename = "rust/lancedb/Cargo.toml" filename = "rust/lancedb/Cargo.toml"
replace = "\nversion = \"{new_version}\"" replace = "\nversion = \"{new_version}\""
search = "\nversion = \"{current_version}\"" search = "\nversion = \"{current_version}\""
[[tool.bumpversion.files]]
filename = "nodejs/Cargo.toml"
replace = "\nversion = \"{new_version}\""
search = "\nversion = \"{current_version}\""

View File

@@ -31,9 +31,6 @@ rustflags = [
[target.x86_64-unknown-linux-gnu] [target.x86_64-unknown-linux-gnu]
rustflags = ["-C", "target-cpu=haswell", "-C", "target-feature=+avx2,+fma,+f16c"] rustflags = ["-C", "target-cpu=haswell", "-C", "target-feature=+avx2,+fma,+f16c"]
[target.x86_64-unknown-linux-musl]
rustflags = ["-C", "target-cpu=haswell", "-C", "target-feature=-crt-static,+avx2,+fma,+f16c"]
[target.aarch64-apple-darwin] [target.aarch64-apple-darwin]
rustflags = ["-C", "target-cpu=apple-m1", "-C", "target-feature=+neon,+fp16,+fhm,+dotprod"] rustflags = ["-C", "target-cpu=apple-m1", "-C", "target-feature=+neon,+fp16,+fhm,+dotprod"]
@@ -41,7 +38,3 @@ rustflags = ["-C", "target-cpu=apple-m1", "-C", "target-feature=+neon,+fp16,+fhm
# not found errors on systems that are missing it. # not found errors on systems that are missing it.
[target.x86_64-pc-windows-msvc] [target.x86_64-pc-windows-msvc]
rustflags = ["-Ctarget-feature=+crt-static"] rustflags = ["-Ctarget-feature=+crt-static"]
# Experimental target for Arm64 Windows
[target.aarch64-pc-windows-msvc]
rustflags = ["-Ctarget-feature=+crt-static"]

View File

@@ -52,7 +52,12 @@ runs:
args: ${{ inputs.args }} args: ${{ inputs.args }}
before-script-linux: | before-script-linux: |
set -e set -e
yum install -y openssl-devel clang \ apt install -y unzip
&& curl -L https://github.com/protocolbuffers/protobuf/releases/download/v24.4/protoc-24.4-linux-aarch_64.zip > /tmp/protoc.zip \ if [ $(uname -m) = "x86_64" ]; then
PROTOC_ARCH="x86_64"
else
PROTOC_ARCH="aarch_64"
fi
curl -L https://github.com/protocolbuffers/protobuf/releases/download/v24.4/protoc-24.4-linux-$PROTOC_ARCH.zip > /tmp/protoc.zip \
&& unzip /tmp/protoc.zip -d /usr/local \ && unzip /tmp/protoc.zip -d /usr/local \
&& rm /tmp/protoc.zip && rm /tmp/protoc.zip

View File

@@ -20,7 +20,7 @@ runs:
uses: PyO3/maturin-action@v1 uses: PyO3/maturin-action@v1
with: with:
command: build command: build
# TODO: pass through interpreter
args: ${{ inputs.args }} args: ${{ inputs.args }}
docker-options: "-e PIP_EXTRA_INDEX_URL=https://pypi.fury.io/lancedb/" docker-options: "-e PIP_EXTRA_INDEX_URL=https://pypi.fury.io/lancedb/"
working-directory: python working-directory: python
interpreter: 3.${{ inputs.python-minor-version }}

View File

@@ -28,7 +28,7 @@ runs:
args: ${{ inputs.args }} args: ${{ inputs.args }}
docker-options: "-e PIP_EXTRA_INDEX_URL=https://pypi.fury.io/lancedb/" docker-options: "-e PIP_EXTRA_INDEX_URL=https://pypi.fury.io/lancedb/"
working-directory: python working-directory: python
- uses: actions/upload-artifact@v4 - uses: actions/upload-artifact@v3
with: with:
name: windows-wheels name: windows-wheels
path: python\target\wheels path: python\target\wheels

View File

@@ -31,7 +31,7 @@ jobs:
- name: Install dependecies needed for ubuntu - name: Install dependecies needed for ubuntu
run: | run: |
sudo apt install -y protobuf-compiler libssl-dev sudo apt install -y protobuf-compiler libssl-dev
rustup update && rustup default rustup update && rustup default
- name: Set up Python - name: Set up Python
uses: actions/setup-python@v5 uses: actions/setup-python@v5
with: with:
@@ -41,8 +41,8 @@ jobs:
- name: Build Python - name: Build Python
working-directory: python working-directory: python
run: | run: |
python -m pip install --extra-index-url https://pypi.fury.io/lancedb/ -e . python -m pip install -e .
python -m pip install --extra-index-url https://pypi.fury.io/lancedb/ -r ../docs/requirements.txt python -m pip install -r ../docs/requirements.txt
- name: Set up node - name: Set up node
uses: actions/setup-node@v3 uses: actions/setup-node@v3
with: with:
@@ -72,9 +72,9 @@ jobs:
- name: Setup Pages - name: Setup Pages
uses: actions/configure-pages@v2 uses: actions/configure-pages@v2
- name: Upload artifact - name: Upload artifact
uses: actions/upload-pages-artifact@v3 uses: actions/upload-pages-artifact@v1
with: with:
path: "docs/site" path: "docs/site"
- name: Deploy to GitHub Pages - name: Deploy to GitHub Pages
id: deployment id: deployment
uses: actions/deploy-pages@v4 uses: actions/deploy-pages@v1

View File

@@ -49,7 +49,7 @@ jobs:
- name: Build Python - name: Build Python
working-directory: docs/test working-directory: docs/test
run: run:
python -m pip install --extra-index-url https://pypi.fury.io/lancedb/ -r requirements.txt python -m pip install -r requirements.txt
- name: Create test files - name: Create test files
run: | run: |
cd docs/test cd docs/test

View File

@@ -1,31 +0,0 @@
name: Check license headers
on:
push:
branches:
- main
pull_request:
paths:
- rust/**
- python/**
- nodejs/**
- java/**
- .github/workflows/license-header-check.yml
jobs:
check-licenses:
runs-on: ubuntu-latest
steps:
- name: Check out code
uses: actions/checkout@v4
- name: Install license-header-checker
working-directory: /tmp
run: |
curl -s https://raw.githubusercontent.com/lluissm/license-header-checker/master/install.sh | bash
mv /tmp/bin/license-header-checker /usr/local/bin/
- name: Check license headers (rust)
run: license-header-checker -a -v ./rust/license_header.txt ./ rs && [[ -z `git status -s` ]]
- name: Check license headers (python)
run: license-header-checker -a -v ./python/license_header.txt python py && [[ -z `git status -s` ]]
- name: Check license headers (typescript)
run: license-header-checker -a -v ./nodejs/license_header.txt nodejs ts && [[ -z `git status -s` ]]
- name: Check license headers (java)
run: license-header-checker -a -v ./nodejs/license_header.txt java java && [[ -z `git status -s` ]]

View File

@@ -43,7 +43,7 @@ on:
jobs: jobs:
make-release: make-release:
# Creates tag and GH release. The GH release will trigger the build and release jobs. # Creates tag and GH release. The GH release will trigger the build and release jobs.
runs-on: ubuntu-24.04 runs-on: ubuntu-latest
permissions: permissions:
contents: write contents: write
steps: steps:
@@ -57,14 +57,15 @@ jobs:
# trigger any workflows watching for new tags. See: # trigger any workflows watching for new tags. See:
# https://docs.github.com/en/actions/using-workflows/triggering-a-workflow#triggering-a-workflow-from-a-workflow # https://docs.github.com/en/actions/using-workflows/triggering-a-workflow#triggering-a-workflow-from-a-workflow
token: ${{ secrets.LANCEDB_RELEASE_TOKEN }} token: ${{ secrets.LANCEDB_RELEASE_TOKEN }}
- name: Validate Lance dependency is at stable version
if: ${{ inputs.type == 'stable' }}
run: python ci/validate_stable_lance.py
- name: Set git configs for bumpversion - name: Set git configs for bumpversion
shell: bash shell: bash
run: | run: |
git config user.name 'Lance Release' git config user.name 'Lance Release'
git config user.email 'lance-dev@lancedb.com' git config user.email 'lance-dev@lancedb.com'
- name: Set up Python 3.11
uses: actions/setup-python@v5
with:
python-version: "3.11"
- name: Bump Python version - name: Bump Python version
if: ${{ inputs.python }} if: ${{ inputs.python }}
working-directory: python working-directory: python
@@ -96,7 +97,3 @@ jobs:
if: ${{ !inputs.dry_run && inputs.other }} if: ${{ !inputs.dry_run && inputs.other }}
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
- uses: ./.github/workflows/update_package_lock_nodejs
if: ${{ !inputs.dry_run && inputs.other }}
with:
github_token: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -53,9 +53,6 @@ jobs:
cargo clippy --all --all-features -- -D warnings cargo clippy --all --all-features -- -D warnings
npm ci npm ci
npm run lint-ci npm run lint-ci
- name: Lint examples
working-directory: nodejs/examples
run: npm ci && npm run lint-ci
linux: linux:
name: Linux (NodeJS ${{ matrix.node-version }}) name: Linux (NodeJS ${{ matrix.node-version }})
timeout-minutes: 30 timeout-minutes: 30
@@ -94,30 +91,6 @@ jobs:
env: env:
S3_TEST: "1" S3_TEST: "1"
run: npm run test run: npm run test
- name: Setup examples
working-directory: nodejs/examples
run: npm ci
- name: Test examples
working-directory: ./
env:
OPENAI_API_KEY: test
OPENAI_BASE_URL: http://0.0.0.0:8000
run: |
python ci/mock_openai.py &
cd nodejs/examples
npm test
- name: Check docs
run: |
# We run this as part of the job because the binary needs to be built
# first to export the types of the native code.
set -e
npm ci
npm run docs
if ! git diff --exit-code; then
echo "Docs need to be updated"
echo "Run 'npm run docs', fix any warnings, and commit the changes."
exit 1
fi
macos: macos:
timeout-minutes: 30 timeout-minutes: 30
runs-on: "macos-14" runs-on: "macos-14"

View File

@@ -101,7 +101,7 @@ jobs:
path: | path: |
nodejs/dist/*.node nodejs/dist/*.node
node-linux-gnu: node-linux:
name: vectordb (${{ matrix.config.arch}}-unknown-linux-gnu) name: vectordb (${{ matrix.config.arch}}-unknown-linux-gnu)
runs-on: ${{ matrix.config.runner }} runs-on: ${{ matrix.config.runner }}
# Only runs on tags that matches the make-release action # Only runs on tags that matches the make-release action
@@ -133,67 +133,15 @@ jobs:
free -h free -h
- name: Build Linux Artifacts - name: Build Linux Artifacts
run: | run: |
bash ci/build_linux_artifacts.sh ${{ matrix.config.arch }} ${{ matrix.config.arch }}-unknown-linux-gnu bash ci/build_linux_artifacts.sh ${{ matrix.config.arch }}
- name: Upload Linux Artifacts - name: Upload Linux Artifacts
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v4
with: with:
name: node-native-linux-${{ matrix.config.arch }}-gnu name: node-native-linux-${{ matrix.config.arch }}
path: | path: |
node/dist/lancedb-vectordb-linux*.tgz node/dist/lancedb-vectordb-linux*.tgz
node-linux-musl: nodejs-linux:
name: vectordb (${{ matrix.config.arch}}-unknown-linux-musl)
runs-on: ubuntu-latest
container: alpine:edge
# Only runs on tags that matches the make-release action
if: startsWith(github.ref, 'refs/tags/v')
strategy:
fail-fast: false
matrix:
config:
- arch: x86_64
- arch: aarch64
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Install common dependencies
run: |
apk add protobuf-dev curl clang mold grep npm bash
curl --proto '=https' --tlsv1.3 -sSf https://raw.githubusercontent.com/rust-lang/rustup/refs/heads/master/rustup-init.sh | sh -s -- -y
echo "source $HOME/.cargo/env" >> saved_env
echo "export CC=clang" >> saved_env
echo "export RUSTFLAGS='-Ctarget-cpu=haswell -Ctarget-feature=-crt-static,+avx2,+fma,+f16c -Clinker=clang -Clink-arg=-fuse-ld=mold'" >> saved_env
- name: Configure aarch64 build
if: ${{ matrix.config.arch == 'aarch64' }}
run: |
source "$HOME/.cargo/env"
rustup target add aarch64-unknown-linux-musl
crt=$(realpath $(dirname $(rustup which rustc))/../lib/rustlib/aarch64-unknown-linux-musl/lib/self-contained)
sysroot_lib=/usr/aarch64-unknown-linux-musl/usr/lib
apk_url=https://dl-cdn.alpinelinux.org/alpine/latest-stable/main/aarch64/
curl -sSf $apk_url > apk_list
for pkg in gcc libgcc musl; do curl -sSf $apk_url$(cat apk_list | grep -oP '(?<=")'$pkg'-\d.*?(?=")') | tar zxf -; done
mkdir -p $sysroot_lib
echo 'GROUP ( libgcc_s.so.1 -lgcc )' > $sysroot_lib/libgcc_s.so
cp usr/lib/libgcc_s.so.1 $sysroot_lib
cp usr/lib/gcc/aarch64-alpine-linux-musl/*/libgcc.a $sysroot_lib
cp lib/ld-musl-aarch64.so.1 $sysroot_lib/libc.so
echo '!<arch>' > $sysroot_lib/libdl.a
(cd $crt && cp crti.o crtbeginS.o crtendS.o crtn.o -t $sysroot_lib)
echo "export CARGO_BUILD_TARGET=aarch64-unknown-linux-musl" >> saved_env
echo "export RUSTFLAGS='-Ctarget-cpu=apple-m1 -Ctarget-feature=-crt-static,+neon,+fp16,+fhm,+dotprod -Clinker=clang -Clink-arg=-fuse-ld=mold -Clink-arg=--target=aarch64-unknown-linux-musl -Clink-arg=--sysroot=/usr/aarch64-unknown-linux-musl -Clink-arg=-lc'" >> saved_env
- name: Build Linux Artifacts
run: |
source ./saved_env
bash ci/manylinux_node/build_vectordb.sh ${{ matrix.config.arch }} ${{ matrix.config.arch }}-unknown-linux-musl
- name: Upload Linux Artifacts
uses: actions/upload-artifact@v4
with:
name: node-native-linux-${{ matrix.config.arch }}-musl
path: |
node/dist/lancedb-vectordb-linux*.tgz
nodejs-linux-gnu:
name: lancedb (${{ matrix.config.arch}}-unknown-linux-gnu name: lancedb (${{ matrix.config.arch}}-unknown-linux-gnu
runs-on: ${{ matrix.config.runner }} runs-on: ${{ matrix.config.runner }}
# Only runs on tags that matches the make-release action # Only runs on tags that matches the make-release action
@@ -230,7 +178,7 @@ jobs:
- name: Upload Linux Artifacts - name: Upload Linux Artifacts
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v4
with: with:
name: nodejs-native-linux-${{ matrix.config.arch }}-gnu name: nodejs-native-linux-${{ matrix.config.arch }}
path: | path: |
nodejs/dist/*.node nodejs/dist/*.node
# The generic files are the same in all distros so we just pick # The generic files are the same in all distros so we just pick
@@ -244,62 +192,6 @@ jobs:
nodejs/dist/* nodejs/dist/*
!nodejs/dist/*.node !nodejs/dist/*.node
nodejs-linux-musl:
name: lancedb (${{ matrix.config.arch}}-unknown-linux-musl
runs-on: ubuntu-latest
container: alpine:edge
# Only runs on tags that matches the make-release action
if: startsWith(github.ref, 'refs/tags/v')
strategy:
fail-fast: false
matrix:
config:
- arch: x86_64
- arch: aarch64
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Install common dependencies
run: |
apk add protobuf-dev curl clang mold grep npm bash openssl-dev openssl-libs-static
curl --proto '=https' --tlsv1.3 -sSf https://raw.githubusercontent.com/rust-lang/rustup/refs/heads/master/rustup-init.sh | sh -s -- -y
echo "source $HOME/.cargo/env" >> saved_env
echo "export CC=clang" >> saved_env
echo "export RUSTFLAGS='-Ctarget-cpu=haswell -Ctarget-feature=-crt-static,+avx2,+fma,+f16c -Clinker=clang -Clink-arg=-fuse-ld=mold'" >> saved_env
echo "export X86_64_UNKNOWN_LINUX_MUSL_OPENSSL_INCLUDE_DIR=/usr/include" >> saved_env
echo "export X86_64_UNKNOWN_LINUX_MUSL_OPENSSL_LIB_DIR=/usr/lib" >> saved_env
- name: Configure aarch64 build
if: ${{ matrix.config.arch == 'aarch64' }}
run: |
source "$HOME/.cargo/env"
rustup target add aarch64-unknown-linux-musl
crt=$(realpath $(dirname $(rustup which rustc))/../lib/rustlib/aarch64-unknown-linux-musl/lib/self-contained)
sysroot_lib=/usr/aarch64-unknown-linux-musl/usr/lib
apk_url=https://dl-cdn.alpinelinux.org/alpine/latest-stable/main/aarch64/
curl -sSf $apk_url > apk_list
for pkg in gcc libgcc musl openssl-dev openssl-libs-static; do curl -sSf $apk_url$(cat apk_list | grep -oP '(?<=")'$pkg'-\d.*?(?=")') | tar zxf -; done
mkdir -p $sysroot_lib
echo 'GROUP ( libgcc_s.so.1 -lgcc )' > $sysroot_lib/libgcc_s.so
cp usr/lib/libgcc_s.so.1 $sysroot_lib
cp usr/lib/gcc/aarch64-alpine-linux-musl/*/libgcc.a $sysroot_lib
cp lib/ld-musl-aarch64.so.1 $sysroot_lib/libc.so
echo '!<arch>' > $sysroot_lib/libdl.a
(cd $crt && cp crti.o crtbeginS.o crtendS.o crtn.o -t $sysroot_lib)
echo "export CARGO_BUILD_TARGET=aarch64-unknown-linux-musl" >> saved_env
echo "export RUSTFLAGS='-Ctarget-feature=-crt-static,+neon,+fp16,+fhm,+dotprod -Clinker=clang -Clink-arg=-fuse-ld=mold -Clink-arg=--target=aarch64-unknown-linux-musl -Clink-arg=--sysroot=/usr/aarch64-unknown-linux-musl -Clink-arg=-lc'" >> saved_env
echo "export AARCH64_UNKNOWN_LINUX_MUSL_OPENSSL_INCLUDE_DIR=$(realpath usr/include)" >> saved_env
echo "export AARCH64_UNKNOWN_LINUX_MUSL_OPENSSL_LIB_DIR=$(realpath usr/lib)" >> saved_env
- name: Build Linux Artifacts
run: |
source ./saved_env
bash ci/manylinux_node/build_lancedb.sh ${{ matrix.config.arch }}
- name: Upload Linux Artifacts
uses: actions/upload-artifact@v4
with:
name: nodejs-native-linux-${{ matrix.config.arch }}-musl
path: |
nodejs/dist/*.node
node-windows: node-windows:
name: vectordb ${{ matrix.target }} name: vectordb ${{ matrix.target }}
runs-on: windows-2022 runs-on: windows-2022
@@ -334,51 +226,6 @@ jobs:
path: | path: |
node/dist/lancedb-vectordb-win32*.tgz node/dist/lancedb-vectordb-win32*.tgz
node-windows-arm64:
name: vectordb ${{ matrix.config.arch }}-pc-windows-msvc
# if: startsWith(github.ref, 'refs/tags/v')
runs-on: ubuntu-latest
container: alpine:edge
strategy:
fail-fast: false
matrix:
config:
# - arch: x86_64
- arch: aarch64
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Install dependencies
run: |
apk add protobuf-dev curl clang lld llvm19 grep npm bash msitools sed
curl --proto '=https' --tlsv1.3 -sSf https://raw.githubusercontent.com/rust-lang/rustup/refs/heads/master/rustup-init.sh | sh -s -- -y
echo "source $HOME/.cargo/env" >> saved_env
echo "export CC=clang" >> saved_env
echo "export AR=llvm-ar" >> saved_env
source "$HOME/.cargo/env"
rustup target add ${{ matrix.config.arch }}-pc-windows-msvc
(mkdir -p sysroot && cd sysroot && sh ../ci/sysroot-${{ matrix.config.arch }}-pc-windows-msvc.sh)
echo "export C_INCLUDE_PATH=/usr/${{ matrix.config.arch }}-pc-windows-msvc/usr/include" >> saved_env
echo "export CARGO_BUILD_TARGET=${{ matrix.config.arch }}-pc-windows-msvc" >> saved_env
- name: Configure x86_64 build
if: ${{ matrix.config.arch == 'x86_64' }}
run: |
echo "export RUSTFLAGS='-Ctarget-cpu=haswell -Ctarget-feature=+crt-static,+avx2,+fma,+f16c -Clinker=lld -Clink-arg=/LIBPATH:/usr/x86_64-pc-windows-msvc/usr/lib'" >> saved_env
- name: Configure aarch64 build
if: ${{ matrix.config.arch == 'aarch64' }}
run: |
echo "export RUSTFLAGS='-Ctarget-feature=+crt-static,+neon,+fp16,+fhm,+dotprod -Clinker=lld -Clink-arg=/LIBPATH:/usr/aarch64-pc-windows-msvc/usr/lib -Clink-arg=arm64rt.lib'" >> saved_env
- name: Build Windows Artifacts
run: |
source ./saved_env
bash ci/manylinux_node/build_vectordb.sh ${{ matrix.config.arch }} ${{ matrix.config.arch }}-pc-windows-msvc
- name: Upload Windows Artifacts
uses: actions/upload-artifact@v4
with:
name: node-native-windows-${{ matrix.config.arch }}
path: |
node/dist/lancedb-vectordb-win32*.tgz
nodejs-windows: nodejs-windows:
name: lancedb ${{ matrix.target }} name: lancedb ${{ matrix.target }}
runs-on: windows-2022 runs-on: windows-2022
@@ -413,57 +260,9 @@ jobs:
path: | path: |
nodejs/dist/*.node nodejs/dist/*.node
nodejs-windows-arm64:
name: lancedb ${{ matrix.config.arch }}-pc-windows-msvc
# Only runs on tags that matches the make-release action
# if: startsWith(github.ref, 'refs/tags/v')
runs-on: ubuntu-latest
container: alpine:edge
strategy:
fail-fast: false
matrix:
config:
# - arch: x86_64
- arch: aarch64
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Install dependencies
run: |
apk add protobuf-dev curl clang lld llvm19 grep npm bash msitools sed
curl --proto '=https' --tlsv1.3 -sSf https://raw.githubusercontent.com/rust-lang/rustup/refs/heads/master/rustup-init.sh | sh -s -- -y
echo "source $HOME/.cargo/env" >> saved_env
echo "export CC=clang" >> saved_env
echo "export AR=llvm-ar" >> saved_env
source "$HOME/.cargo/env"
rustup target add ${{ matrix.config.arch }}-pc-windows-msvc
(mkdir -p sysroot && cd sysroot && sh ../ci/sysroot-${{ matrix.config.arch }}-pc-windows-msvc.sh)
echo "export C_INCLUDE_PATH=/usr/${{ matrix.config.arch }}-pc-windows-msvc/usr/include" >> saved_env
echo "export CARGO_BUILD_TARGET=${{ matrix.config.arch }}-pc-windows-msvc" >> saved_env
printf '#!/bin/sh\ncargo "$@"' > $HOME/.cargo/bin/cargo-xwin
chmod u+x $HOME/.cargo/bin/cargo-xwin
- name: Configure x86_64 build
if: ${{ matrix.config.arch == 'x86_64' }}
run: |
echo "export RUSTFLAGS='-Ctarget-cpu=haswell -Ctarget-feature=+crt-static,+avx2,+fma,+f16c -Clinker=lld -Clink-arg=/LIBPATH:/usr/x86_64-pc-windows-msvc/usr/lib'" >> saved_env
- name: Configure aarch64 build
if: ${{ matrix.config.arch == 'aarch64' }}
run: |
echo "export RUSTFLAGS='-Ctarget-feature=+crt-static,+neon,+fp16,+fhm,+dotprod -Clinker=lld -Clink-arg=/LIBPATH:/usr/aarch64-pc-windows-msvc/usr/lib -Clink-arg=arm64rt.lib'" >> saved_env
- name: Build Windows Artifacts
run: |
source ./saved_env
bash ci/manylinux_node/build_lancedb.sh ${{ matrix.config.arch }}
- name: Upload Windows Artifacts
uses: actions/upload-artifact@v4
with:
name: nodejs-native-windows-${{ matrix.config.arch }}
path: |
nodejs/dist/*.node
release: release:
name: vectordb NPM Publish name: vectordb NPM Publish
needs: [node, node-macos, node-linux-gnu, node-linux-musl, node-windows, node-windows-arm64] needs: [node, node-macos, node-linux, node-windows]
runs-on: ubuntu-latest runs-on: ubuntu-latest
# Only runs on tags that matches the make-release action # Only runs on tags that matches the make-release action
if: startsWith(github.ref, 'refs/tags/v') if: startsWith(github.ref, 'refs/tags/v')
@@ -481,7 +280,7 @@ jobs:
env: env:
NODE_AUTH_TOKEN: ${{ secrets.LANCEDB_NPM_REGISTRY_TOKEN }} NODE_AUTH_TOKEN: ${{ secrets.LANCEDB_NPM_REGISTRY_TOKEN }}
run: | run: |
# Tag beta as "preview" instead of default "latest". See lancedb # Tag beta as "preview" instead of default "latest". See lancedb
# npm publish step for more info. # npm publish step for more info.
if [[ $GITHUB_REF =~ refs/tags/v(.*)-beta.* ]]; then if [[ $GITHUB_REF =~ refs/tags/v(.*)-beta.* ]]; then
PUBLISH_ARGS="--tag preview" PUBLISH_ARGS="--tag preview"
@@ -503,7 +302,7 @@ jobs:
release-nodejs: release-nodejs:
name: lancedb NPM Publish name: lancedb NPM Publish
needs: [nodejs-macos, nodejs-linux-gnu, nodejs-linux-musl, nodejs-windows, nodejs-windows-arm64] needs: [nodejs-macos, nodejs-linux, nodejs-windows]
runs-on: ubuntu-latest runs-on: ubuntu-latest
# Only runs on tags that matches the make-release action # Only runs on tags that matches the make-release action
if: startsWith(github.ref, 'refs/tags/v') if: startsWith(github.ref, 'refs/tags/v')
@@ -561,7 +360,6 @@ jobs:
SLACK_WEBHOOK_URL: ${{ secrets.ACTION_MONITORING_SLACK }} SLACK_WEBHOOK_URL: ${{ secrets.ACTION_MONITORING_SLACK }}
update-package-lock: update-package-lock:
if: startsWith(github.ref, 'refs/tags/v')
needs: [release] needs: [release]
runs-on: ubuntu-latest runs-on: ubuntu-latest
permissions: permissions:
@@ -571,7 +369,7 @@ jobs:
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
ref: main ref: main
token: ${{ secrets.LANCEDB_RELEASE_TOKEN }} persist-credentials: false
fetch-depth: 0 fetch-depth: 0
lfs: true lfs: true
- uses: ./.github/workflows/update_package_lock - uses: ./.github/workflows/update_package_lock
@@ -579,7 +377,6 @@ jobs:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
update-package-lock-nodejs: update-package-lock-nodejs:
if: startsWith(github.ref, 'refs/tags/v')
needs: [release-nodejs] needs: [release-nodejs]
runs-on: ubuntu-latest runs-on: ubuntu-latest
permissions: permissions:
@@ -589,7 +386,7 @@ jobs:
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
ref: main ref: main
token: ${{ secrets.LANCEDB_RELEASE_TOKEN }} persist-credentials: false
fetch-depth: 0 fetch-depth: 0
lfs: true lfs: true
- uses: ./.github/workflows/update_package_lock_nodejs - uses: ./.github/workflows/update_package_lock_nodejs
@@ -597,7 +394,6 @@ jobs:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
gh-release: gh-release:
if: startsWith(github.ref, 'refs/tags/v')
runs-on: ubuntu-latest runs-on: ubuntu-latest
permissions: permissions:
contents: write contents: write

View File

@@ -15,21 +15,15 @@ jobs:
- platform: x86_64 - platform: x86_64
manylinux: "2_17" manylinux: "2_17"
extra_args: "" extra_args: ""
runner: ubuntu-22.04
- platform: x86_64 - platform: x86_64
manylinux: "2_28" manylinux: "2_28"
extra_args: "--features fp16kernels" extra_args: "--features fp16kernels"
runner: ubuntu-22.04
- platform: aarch64 - platform: aarch64
manylinux: "2_17" manylinux: "2_24"
extra_args: "" extra_args: ""
# For successful fat LTO builds, we need a large runner to avoid OOM errors. # We don't build fp16 kernels for aarch64, because it uses
runner: ubuntu-2404-8x-arm64 # cross compilation image, which doesn't have a new enough compiler.
- platform: aarch64 runs-on: "ubuntu-22.04"
manylinux: "2_28"
extra_args: "--features fp16kernels"
runner: ubuntu-2404-8x-arm64
runs-on: ${{ matrix.config.runner }}
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with: with:
@@ -89,7 +83,7 @@ jobs:
- name: Set up Python - name: Set up Python
uses: actions/setup-python@v4 uses: actions/setup-python@v4
with: with:
python-version: 3.12 python-version: 3.8
- uses: ./.github/workflows/build_windows_wheel - uses: ./.github/workflows/build_windows_wheel
with: with:
python-minor-version: 8 python-minor-version: 8

View File

@@ -30,17 +30,16 @@ jobs:
- name: Set up Python - name: Set up Python
uses: actions/setup-python@v5 uses: actions/setup-python@v5
with: with:
python-version: "3.12" python-version: "3.11"
- name: Install ruff - name: Install ruff
run: | run: |
pip install ruff==0.9.9 pip install ruff==0.5.4
- name: Format check - name: Format check
run: ruff format --check . run: ruff format --check .
- name: Lint - name: Lint
run: ruff check . run: ruff check .
doctest:
type-check: name: "Doctest"
name: "Type Check"
timeout-minutes: 30 timeout-minutes: 30
runs-on: "ubuntu-22.04" runs-on: "ubuntu-22.04"
defaults: defaults:
@@ -55,36 +54,7 @@ jobs:
- name: Set up Python - name: Set up Python
uses: actions/setup-python@v5 uses: actions/setup-python@v5
with: with:
python-version: "3.12" python-version: "3.11"
- name: Install protobuf compiler
run: |
sudo apt update
sudo apt install -y protobuf-compiler
pip install toml
- name: Install dependencies
run: |
python ../ci/parse_requirements.py pyproject.toml --extras dev,tests,embeddings > requirements.txt
pip install -r requirements.txt
- name: Run pyright
run: pyright
doctest:
name: "Doctest"
timeout-minutes: 30
runs-on: "ubuntu-24.04"
defaults:
run:
shell: bash
working-directory: python
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
lfs: true
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: "3.12"
cache: "pip" cache: "pip"
- name: Install protobuf - name: Install protobuf
run: | run: |
@@ -105,8 +75,8 @@ jobs:
timeout-minutes: 30 timeout-minutes: 30
strategy: strategy:
matrix: matrix:
python-minor-version: ["9", "12"] python-minor-version: ["9", "11"]
runs-on: "ubuntu-24.04" runs-on: "ubuntu-22.04"
defaults: defaults:
run: run:
shell: bash shell: bash
@@ -157,7 +127,7 @@ jobs:
- name: Set up Python - name: Set up Python
uses: actions/setup-python@v5 uses: actions/setup-python@v5
with: with:
python-version: "3.12" python-version: "3.11"
- uses: Swatinem/rust-cache@v2 - uses: Swatinem/rust-cache@v2
with: with:
workspaces: python workspaces: python
@@ -168,7 +138,7 @@ jobs:
run: rm -rf target/wheels run: rm -rf target/wheels
windows: windows:
name: "Windows: ${{ matrix.config.name }}" name: "Windows: ${{ matrix.config.name }}"
timeout-minutes: 60 timeout-minutes: 30
strategy: strategy:
matrix: matrix:
config: config:
@@ -187,7 +157,7 @@ jobs:
- name: Set up Python - name: Set up Python
uses: actions/setup-python@v5 uses: actions/setup-python@v5
with: with:
python-version: "3.12" python-version: "3.11"
- uses: Swatinem/rust-cache@v2 - uses: Swatinem/rust-cache@v2
with: with:
workspaces: python workspaces: python
@@ -198,7 +168,7 @@ jobs:
run: rm -rf target/wheels run: rm -rf target/wheels
pydantic1x: pydantic1x:
timeout-minutes: 30 timeout-minutes: 30
runs-on: "ubuntu-24.04" runs-on: "ubuntu-22.04"
defaults: defaults:
run: run:
shell: bash shell: bash

View File

@@ -22,7 +22,6 @@ env:
# "1" means line tables only, which is useful for panic tracebacks. # "1" means line tables only, which is useful for panic tracebacks.
RUSTFLAGS: "-C debuginfo=1" RUSTFLAGS: "-C debuginfo=1"
RUST_BACKTRACE: "1" RUST_BACKTRACE: "1"
CARGO_INCREMENTAL: 0
jobs: jobs:
lint: lint:
@@ -36,49 +35,21 @@ jobs:
CC: clang-18 CC: clang-18
CXX: clang++-18 CXX: clang++-18
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
lfs: true lfs: true
- uses: Swatinem/rust-cache@v2 - uses: Swatinem/rust-cache@v2
with: with:
workspaces: rust workspaces: rust
- name: Install dependencies - name: Install dependencies
run: | run: |
sudo apt update sudo apt update
sudo apt install -y protobuf-compiler libssl-dev sudo apt install -y protobuf-compiler libssl-dev
- name: Run format - name: Run format
run: cargo fmt --all -- --check run: cargo fmt --all -- --check
- name: Run clippy - name: Run clippy
run: cargo clippy --workspace --tests --all-features -- -D warnings run: cargo clippy --workspace --tests --all-features -- -D warnings
build-no-lock:
runs-on: ubuntu-24.04
timeout-minutes: 30
env:
# Need up-to-date compilers for kernels
CC: clang
CXX: clang++
steps:
- uses: actions/checkout@v4
# Building without a lock file often requires the latest Rust version since downstream
# dependencies may have updated their minimum Rust version.
- uses: actions-rust-lang/setup-rust-toolchain@v1
with:
toolchain: "stable"
# Remove cargo.lock to force a fresh build
- name: Remove Cargo.lock
run: rm -f Cargo.lock
- uses: rui314/setup-mold@v1
- uses: Swatinem/rust-cache@v2
- name: Install dependencies
run: |
sudo apt update
sudo apt install -y protobuf-compiler libssl-dev
- name: Build all
run: |
cargo build --benches --all-features --tests
linux: linux:
timeout-minutes: 30 timeout-minutes: 30
# To build all features, we need more disk space than is available # To build all features, we need more disk space than is available
@@ -94,41 +65,37 @@ jobs:
CC: clang-18 CC: clang-18
CXX: clang++-18 CXX: clang++-18
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
lfs: true lfs: true
- uses: Swatinem/rust-cache@v2 - uses: Swatinem/rust-cache@v2
with: with:
workspaces: rust workspaces: rust
- name: Install dependencies - name: Install dependencies
run: | run: |
# This shaves 2 minutes off this step in CI. This doesn't seem to be sudo apt update
# necessary in standard runners, but it is in the 4x runners.
sudo rm /var/lib/man-db/auto-update
sudo apt install -y protobuf-compiler libssl-dev sudo apt install -y protobuf-compiler libssl-dev
- uses: rui314/setup-mold@v1 - name: Make Swap
- name: Make Swap run: |
run: | sudo fallocate -l 16G /swapfile
sudo fallocate -l 16G /swapfile sudo chmod 600 /swapfile
sudo chmod 600 /swapfile sudo mkswap /swapfile
sudo mkswap /swapfile sudo swapon /swapfile
sudo swapon /swapfile - name: Start S3 integration test environment
- name: Start S3 integration test environment working-directory: .
working-directory: . run: docker compose up --detach --wait
run: docker compose up --detach --wait - name: Build
- name: Build run: cargo build --all-features
run: cargo build --all-features --tests --locked --examples - name: Run tests
- name: Run tests run: cargo test --all-features
run: cargo test --all-features --locked - name: Run examples
- name: Run examples run: cargo run --example simple
run: cargo run --example simple --locked
macos: macos:
timeout-minutes: 30 timeout-minutes: 30
strategy: strategy:
matrix: matrix:
mac-runner: ["macos-13", "macos-14"] mac-runner: [ "macos-13", "macos-14" ]
runs-on: "${{ matrix.mac-runner }}" runs-on: "${{ matrix.mac-runner }}"
defaults: defaults:
run: run:
@@ -137,8 +104,8 @@ jobs:
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
lfs: true lfs: true
- name: CPU features - name: CPU features
run: sysctl -a | grep cpu run: sysctl -a | grep cpu
- uses: Swatinem/rust-cache@v2 - uses: Swatinem/rust-cache@v2
@@ -146,15 +113,11 @@ jobs:
workspaces: rust workspaces: rust
- name: Install dependencies - name: Install dependencies
run: brew install protobuf run: brew install protobuf
- name: Build
run: cargo build --all-features
- name: Run tests - name: Run tests
run: | # Run with everything except the integration tests.
# Don't run the s3 integration tests since docker isn't available run: cargo test --features remote,fp16kernels
# on this image.
ALL_FEATURES=`cargo metadata --format-version=1 --no-deps \
| jq -r '.packages[] | .features | keys | .[]' \
| grep -v s3-test | sort | uniq | paste -s -d "," -`
cargo test --features $ALL_FEATURES --locked
windows: windows:
runs-on: windows-2022 runs-on: windows-2022
steps: steps:
@@ -174,170 +137,5 @@ jobs:
- name: Run tests - name: Run tests
run: | run: |
$env:VCPKG_ROOT = $env:VCPKG_INSTALLATION_ROOT $env:VCPKG_ROOT = $env:VCPKG_INSTALLATION_ROOT
cargo test --features remote --locked cargo build
cargo test
windows-arm64-cross:
# We cross compile in Node releases, so we want to make sure
# this can run successfully.
runs-on: ubuntu-latest
container: alpine:edge
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Install dependencies (part 1)
run: |
set -e
apk add protobuf-dev curl clang lld llvm19 grep npm bash msitools sed
- name: Install rust
uses: actions-rust-lang/setup-rust-toolchain@v1
with:
target: aarch64-pc-windows-msvc
- name: Install dependencies (part 2)
run: |
set -e
mkdir -p sysroot
cd sysroot
sh ../ci/sysroot-aarch64-pc-windows-msvc.sh
- name: Check
env:
CC: clang
AR: llvm-ar
C_INCLUDE_PATH: /usr/aarch64-pc-windows-msvc/usr/include
CARGO_BUILD_TARGET: aarch64-pc-windows-msvc
RUSTFLAGS: -Ctarget-feature=+crt-static,+neon,+fp16,+fhm,+dotprod -Clinker=lld -Clink-arg=/LIBPATH:/usr/aarch64-pc-windows-msvc/usr/lib -Clink-arg=arm64rt.lib
run: |
source $HOME/.cargo/env
cargo check --features remote --locked
windows-arm64:
runs-on: windows-4x-arm
steps:
- name: Install Git
run: |
Invoke-WebRequest -Uri "https://github.com/git-for-windows/git/releases/download/v2.44.0.windows.1/Git-2.44.0-64-bit.exe" -OutFile "git-installer.exe"
Start-Process -FilePath "git-installer.exe" -ArgumentList "/VERYSILENT", "/NORESTART" -Wait
shell: powershell
- name: Add Git to PATH
run: |
Add-Content $env:GITHUB_PATH "C:\Program Files\Git\bin"
$env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User")
shell: powershell
- name: Configure Git symlinks
run: git config --global core.symlinks true
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: "3.13"
- name: Install Visual Studio Build Tools
run: |
Invoke-WebRequest -Uri "https://aka.ms/vs/17/release/vs_buildtools.exe" -OutFile "vs_buildtools.exe"
Start-Process -FilePath "vs_buildtools.exe" -ArgumentList "--quiet", "--wait", "--norestart", "--nocache", `
"--installPath", "C:\BuildTools", `
"--add", "Microsoft.VisualStudio.Component.VC.Tools.ARM64", `
"--add", "Microsoft.VisualStudio.Component.VC.Tools.x86.x64", `
"--add", "Microsoft.VisualStudio.Component.Windows11SDK.22621", `
"--add", "Microsoft.VisualStudio.Component.VC.ATL", `
"--add", "Microsoft.VisualStudio.Component.VC.ATLMFC", `
"--add", "Microsoft.VisualStudio.Component.VC.Llvm.Clang" -Wait
shell: powershell
- name: Add Visual Studio Build Tools to PATH
run: |
$vsPath = "C:\BuildTools\VC\Tools\MSVC"
$latestVersion = (Get-ChildItem $vsPath | Sort-Object {[version]$_.Name} -Descending)[0].Name
Add-Content $env:GITHUB_PATH "C:\BuildTools\VC\Tools\MSVC\$latestVersion\bin\Hostx64\arm64"
Add-Content $env:GITHUB_PATH "C:\BuildTools\VC\Tools\MSVC\$latestVersion\bin\Hostx64\x64"
Add-Content $env:GITHUB_PATH "C:\Program Files (x86)\Windows Kits\10\bin\10.0.22621.0\arm64"
Add-Content $env:GITHUB_PATH "C:\Program Files (x86)\Windows Kits\10\bin\10.0.22621.0\x64"
Add-Content $env:GITHUB_PATH "C:\BuildTools\VC\Tools\Llvm\x64\bin"
# Add MSVC runtime libraries to LIB
$env:LIB = "C:\BuildTools\VC\Tools\MSVC\$latestVersion\lib\arm64;" +
"C:\Program Files (x86)\Windows Kits\10\Lib\10.0.22621.0\um\arm64;" +
"C:\Program Files (x86)\Windows Kits\10\Lib\10.0.22621.0\ucrt\arm64"
Add-Content $env:GITHUB_ENV "LIB=$env:LIB"
# Add INCLUDE paths
$env:INCLUDE = "C:\BuildTools\VC\Tools\MSVC\$latestVersion\include;" +
"C:\Program Files (x86)\Windows Kits\10\Include\10.0.22621.0\ucrt;" +
"C:\Program Files (x86)\Windows Kits\10\Include\10.0.22621.0\um;" +
"C:\Program Files (x86)\Windows Kits\10\Include\10.0.22621.0\shared"
Add-Content $env:GITHUB_ENV "INCLUDE=$env:INCLUDE"
shell: powershell
- name: Install Rust
run: |
Invoke-WebRequest https://win.rustup.rs/x86_64 -OutFile rustup-init.exe
.\rustup-init.exe -y --default-host aarch64-pc-windows-msvc --default-toolchain 1.83.0
shell: powershell
- name: Add Rust to PATH
run: |
Add-Content $env:GITHUB_PATH "$env:USERPROFILE\.cargo\bin"
shell: powershell
- uses: Swatinem/rust-cache@v2
with:
workspaces: rust
- name: Install 7-Zip ARM
run: |
New-Item -Path 'C:\7zip' -ItemType Directory
Invoke-WebRequest https://7-zip.org/a/7z2408-arm64.exe -OutFile C:\7zip\7z-installer.exe
Start-Process -FilePath C:\7zip\7z-installer.exe -ArgumentList '/S' -Wait
shell: powershell
- name: Add 7-Zip to PATH
run: Add-Content $env:GITHUB_PATH "C:\Program Files\7-Zip"
shell: powershell
- name: Install Protoc v21.12
working-directory: C:\
run: |
if (Test-Path 'C:\protoc') {
Write-Host "Protoc directory exists, skipping installation"
return
}
New-Item -Path 'C:\protoc' -ItemType Directory
Set-Location C:\protoc
Invoke-WebRequest https://github.com/protocolbuffers/protobuf/releases/download/v21.12/protoc-21.12-win64.zip -OutFile C:\protoc\protoc.zip
& 'C:\Program Files\7-Zip\7z.exe' x protoc.zip
shell: powershell
- name: Add Protoc to PATH
run: Add-Content $env:GITHUB_PATH "C:\protoc\bin"
shell: powershell
- name: Run tests
run: |
$env:VCPKG_ROOT = $env:VCPKG_INSTALLATION_ROOT
cargo test --target aarch64-pc-windows-msvc --features remote --locked
msrv:
# Check the minimum supported Rust version
name: MSRV Check - Rust v${{ matrix.msrv }}
runs-on: ubuntu-24.04
strategy:
matrix:
msrv: ["1.78.0"] # This should match up with rust-version in Cargo.toml
env:
# Need up-to-date compilers for kernels
CC: clang-18
CXX: clang++-18
steps:
- uses: actions/checkout@v4
with:
submodules: true
- name: Install dependencies
run: |
sudo apt update
sudo apt install -y protobuf-compiler libssl-dev
- name: Install ${{ matrix.msrv }}
uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ matrix.msrv }}
- name: Downgrade dependencies
# These packages have newer requirements for MSRV
run: |
cargo update -p aws-sdk-bedrockruntime --precise 1.64.0
cargo update -p aws-sdk-dynamodb --precise 1.55.0
cargo update -p aws-config --precise 1.5.10
cargo update -p aws-sdk-kms --precise 1.51.0
cargo update -p aws-sdk-s3 --precise 1.65.0
cargo update -p aws-sdk-sso --precise 1.50.0
cargo update -p aws-sdk-ssooidc --precise 1.51.0
cargo update -p aws-sdk-sts --precise 1.51.0
cargo update -p home --precise 0.5.9
- name: cargo +${{ matrix.msrv }} check
run: cargo check --workspace --tests --benches --all-features

View File

@@ -17,12 +17,11 @@ runs:
run: | run: |
python -m pip install --upgrade pip python -m pip install --upgrade pip
pip install twine pip install twine
python3 -m pip install --upgrade pkginfo
- name: Choose repo - name: Choose repo
shell: bash shell: bash
id: choose_repo id: choose_repo
run: | run: |
if [[ ${{ github.ref }} == *beta* ]]; then if [ ${{ github.ref }} == "*beta*" ]; then
echo "repo=fury" >> $GITHUB_OUTPUT echo "repo=fury" >> $GITHUB_OUTPUT
else else
echo "repo=pypi" >> $GITHUB_OUTPUT echo "repo=pypi" >> $GITHUB_OUTPUT
@@ -33,7 +32,7 @@ runs:
FURY_TOKEN: ${{ inputs.fury_token }} FURY_TOKEN: ${{ inputs.fury_token }}
PYPI_TOKEN: ${{ inputs.pypi_token }} PYPI_TOKEN: ${{ inputs.pypi_token }}
run: | run: |
if [[ ${{ steps.choose_repo.outputs.repo }} == fury ]]; then if [ ${{ steps.choose_repo.outputs.repo }} == "fury" ]; then
WHEEL=$(ls target/wheels/lancedb-*.whl 2> /dev/null | head -n 1) WHEEL=$(ls target/wheels/lancedb-*.whl 2> /dev/null | head -n 1)
echo "Uploading $WHEEL to Fury" echo "Uploading $WHEEL to Fury"
curl -f -F package=@$WHEEL https://$FURY_TOKEN@push.fury.io/lancedb/ curl -f -F package=@$WHEEL https://$FURY_TOKEN@push.fury.io/lancedb/

3
.gitignore vendored
View File

@@ -9,6 +9,7 @@ venv
.vscode .vscode
.zed .zed
rust/target rust/target
rust/Cargo.lock
site site
@@ -41,3 +42,5 @@ dist
target target
**/sccache.log **/sccache.log
Cargo.lock

View File

@@ -1,27 +1,21 @@
repos: repos:
- repo: https://github.com/pre-commit/pre-commit-hooks - repo: https://github.com/pre-commit/pre-commit-hooks
rev: v3.2.0 rev: v3.2.0
hooks: hooks:
- id: check-yaml - id: check-yaml
- id: end-of-file-fixer - id: end-of-file-fixer
- id: trailing-whitespace - id: trailing-whitespace
- repo: https://github.com/astral-sh/ruff-pre-commit - repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version. # Ruff version.
rev: v0.9.9 rev: v0.2.2
hooks: hooks:
- id: ruff - id: ruff
# - repo: https://github.com/RobertCraigie/pyright-python - repo: local
# rev: v1.1.395 hooks:
# hooks: - id: local-biome-check
# - id: pyright name: biome check
# args: ["--project", "python"] entry: npx @biomejs/biome@1.8.3 check --config-path nodejs/biome.json nodejs/
# additional_dependencies: [pyarrow-stubs] language: system
- repo: local types: [text]
hooks: files: "nodejs/.*"
- id: local-biome-check exclude: nodejs/lancedb/native.d.ts|nodejs/dist/.*|nodejs/examples/.*
name: biome check
entry: npx @biomejs/biome@1.8.3 check --config-path nodejs/biome.json nodejs/
language: system
types: [text]
files: "nodejs/.*"
exclude: nodejs/lancedb/native.d.ts|nodejs/dist/.*|nodejs/examples/.*

View File

@@ -1,78 +0,0 @@
# Contributing to LanceDB
LanceDB is an open-source project and we welcome contributions from the community.
This document outlines the process for contributing to LanceDB.
## Reporting Issues
If you encounter a bug or have a feature request, please open an issue on the
[GitHub issue tracker](https://github.com/lancedb/lancedb).
## Picking an issue
We track issues on the GitHub issue tracker. If you are looking for something to
work on, check the [good first issue](https://github.com/lancedb/lancedb/contribute) label. These issues are typically the best described and have the smallest scope.
If there's an issue you are interested in working on, please leave a comment on the issue. This will help us avoid duplicate work. Additionally, if you have questions about the issue, please ask them in the issue comments. We are happy to provide guidance on how to approach the issue.
## Configuring Git
First, fork the repository on GitHub, then clone your fork:
```bash
git clone https://github.com/<username>/lancedb.git
cd lancedb
```
Then add the main repository as a remote:
```bash
git remote add upstream https://github.com/lancedb/lancedb.git
git fetch upstream
```
## Setting up your development environment
We have development environments for Python, Typescript, and Java. Each environment has its own setup instructions.
* [Python](python/CONTRIBUTING.md)
* [Typescript](nodejs/CONTRIBUTING.md)
<!-- TODO: add Java contributing guide -->
* [Documentation](docs/README.md)
## Best practices for pull requests
For the best chance of having your pull request accepted, please follow these guidelines:
1. Unit test all bug fixes and new features. Your code will not be merged if it
doesn't have tests.
1. If you change the public API, update the documentation in the `docs` directory.
1. Aim to minimize the number of changes in each pull request. Keep to solving
one problem at a time, when possible.
1. Before marking a pull request ready-for-review, do a self review of your code.
Is it clear why you are making the changes? Are the changes easy to understand?
1. Use [conventional commit messages](https://www.conventionalcommits.org/en/) as pull request titles. Examples:
* New feature: `feat: adding foo API`
* Bug fix: `fix: issue with foo API`
* Documentation change: `docs: adding foo API documentation`
1. If your pull request is a work in progress, leave the pull request as a draft.
We will assume the pull request is ready for review when it is opened.
1. When writing tests, test the error cases. Make sure they have understandable
error messages.
## Project structure
The core library is written in Rust. The Python, Typescript, and Java libraries
are wrappers around the Rust library.
* `src/lancedb`: Rust library source code
* `python`: Python package source code
* `nodejs`: Typescript package source code
* `node`: **Deprecated** Typescript package source code
* `java`: Java package source code
* `docs`: Documentation source code
## Release process
For information on the release process, see: [release_process.md](release_process.md)

8202
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -18,55 +18,39 @@ repository = "https://github.com/lancedb/lancedb"
description = "Serverless, low-latency vector database for AI applications" description = "Serverless, low-latency vector database for AI applications"
keywords = ["lancedb", "lance", "database", "vector", "search"] keywords = ["lancedb", "lance", "database", "vector", "search"]
categories = ["database-implementations"] categories = ["database-implementations"]
rust-version = "1.78.0"
[workspace.dependencies] [workspace.dependencies]
lance = { "version" = "=0.24.1", "features" = ["dynamodb"] } lance = { "version" = "=0.18.0", "features" = ["dynamodb"] }
lance-io = { version = "=0.24.1" } lance-index = { "version" = "=0.18.0" }
lance-index = { version = "=0.24.1" } lance-linalg = { "version" = "=0.18.0" }
lance-linalg = { version = "=0.24.1" } lance-table = { "version" = "=0.18.0" }
lance-table = { version = "=0.24.1" } lance-testing = { "version" = "=0.18.0" }
lance-testing = { version = "=0.24.1" } lance-datafusion = { "version" = "=0.18.0" }
lance-datafusion = { version = "=0.24.1" } lance-encoding = { "version" = "=0.18.0" }
lance-encoding = { version = "=0.24.1" }
# Note that this one does not include pyarrow # Note that this one does not include pyarrow
arrow = { version = "54.1", optional = false } arrow = { version = "52.2", optional = false }
arrow-array = "54.1" arrow-array = "52.2"
arrow-data = "54.1" arrow-data = "52.2"
arrow-ipc = "54.1" arrow-ipc = "52.2"
arrow-ord = "54.1" arrow-ord = "52.2"
arrow-schema = "54.1" arrow-schema = "52.2"
arrow-arith = "54.1" arrow-arith = "52.2"
arrow-cast = "54.1" arrow-cast = "52.2"
async-trait = "0" async-trait = "0"
datafusion = { version = "45.0", default-features = false } chrono = "0.4.35"
datafusion-catalog = "45.0" datafusion-common = "40.0"
datafusion-common = { version = "45.0", default-features = false } datafusion-physical-plan = "40.0"
datafusion-execution = "45.0"
datafusion-expr = "45.0"
datafusion-physical-plan = "45.0"
env_logger = "0.11"
half = { "version" = "=2.4.1", default-features = false, features = [ half = { "version" = "=2.4.1", default-features = false, features = [
"num-traits", "num-traits",
] } ] }
futures = "0" futures = "0"
log = "0.4" log = "0.4"
moka = { version = "0.12", features = ["future"] } moka = { version = "0.11", features = ["future"] }
object_store = "0.11.0" object_store = "0.10.2"
pin-project = "1.0.7" pin-project = "1.0.7"
snafu = "0.8" snafu = "0.7.4"
url = "2" url = "2"
num-traits = "0.2" num-traits = "0.2"
rand = "0.8" rand = "0.8"
regex = "1.10" regex = "1.10"
lazy_static = "1" lazy_static = "1"
semver = "1.0.25"
# Temporary pins to work around downstream issues
# https://github.com/apache/arrow-rs/commit/2fddf85afcd20110ce783ed5b4cdeb82293da30b
chrono = "=0.4.39"
# https://github.com/RustCrypto/formats/issues/1684
base64ct = "=1.6.0"
# Workaround for: https://github.com/eira-fransham/crunchy/issues/13
crunchy = "=0.2.2"

View File

@@ -10,7 +10,6 @@
[![Blog](https://img.shields.io/badge/Blog-12100E?style=for-the-badge&logoColor=white)](https://blog.lancedb.com/) [![Blog](https://img.shields.io/badge/Blog-12100E?style=for-the-badge&logoColor=white)](https://blog.lancedb.com/)
[![Discord](https://img.shields.io/badge/Discord-%235865F2.svg?style=for-the-badge&logo=discord&logoColor=white)](https://discord.gg/zMM32dvNtd) [![Discord](https://img.shields.io/badge/Discord-%235865F2.svg?style=for-the-badge&logo=discord&logoColor=white)](https://discord.gg/zMM32dvNtd)
[![Twitter](https://img.shields.io/badge/Twitter-%231DA1F2.svg?style=for-the-badge&logo=Twitter&logoColor=white)](https://twitter.com/lancedb) [![Twitter](https://img.shields.io/badge/Twitter-%231DA1F2.svg?style=for-the-badge&logo=Twitter&logoColor=white)](https://twitter.com/lancedb)
[![Gurubase](https://img.shields.io/badge/Gurubase-Ask%20LanceDB%20Guru-006BFF?style=for-the-badge)](https://gurubase.io/g/lancedb)
</p> </p>

View File

@@ -1,9 +1,8 @@
#!/bin/bash #!/bin/bash
set -e set -e
ARCH=${1:-x86_64} ARCH=${1:-x86_64}
TARGET_TRIPLE=${2:-x86_64-unknown-linux-gnu}
# We pass down the current user so that when we later mount the local files # We pass down the current user so that when we later mount the local files
# into the container, the files are accessible by the current user. # into the container, the files are accessible by the current user.
pushd ci/manylinux_node pushd ci/manylinux_node
docker build \ docker build \
@@ -19,4 +18,4 @@ docker run \
-v $(pwd):/io -w /io \ -v $(pwd):/io -w /io \
--memory-swap=-1 \ --memory-swap=-1 \
lancedb-node-manylinux \ lancedb-node-manylinux \
bash ci/manylinux_node/build_vectordb.sh $ARCH $TARGET_TRIPLE bash ci/manylinux_node/build_vectordb.sh $ARCH

View File

@@ -3,7 +3,6 @@
# Targets supported: # Targets supported:
# - x86_64-pc-windows-msvc # - x86_64-pc-windows-msvc
# - i686-pc-windows-msvc # - i686-pc-windows-msvc
# - aarch64-pc-windows-msvc
function Prebuild-Rust { function Prebuild-Rust {
param ( param (
@@ -32,7 +31,7 @@ function Build-NodeBinaries {
$targets = $args[0] $targets = $args[0]
if (-not $targets) { if (-not $targets) {
$targets = "x86_64-pc-windows-msvc", "aarch64-pc-windows-msvc" $targets = "x86_64-pc-windows-msvc"
} }
Write-Host "Building artifacts for targets: $targets" Write-Host "Building artifacts for targets: $targets"

View File

@@ -3,7 +3,6 @@
# Targets supported: # Targets supported:
# - x86_64-pc-windows-msvc # - x86_64-pc-windows-msvc
# - i686-pc-windows-msvc # - i686-pc-windows-msvc
# - aarch64-pc-windows-msvc
function Prebuild-Rust { function Prebuild-Rust {
param ( param (
@@ -32,7 +31,7 @@ function Build-NodeBinaries {
$targets = $args[0] $targets = $args[0]
if (-not $targets) { if (-not $targets) {
$targets = "x86_64-pc-windows-msvc", "aarch64-pc-windows-msvc" $targets = "x86_64-pc-windows-msvc"
} }
Write-Host "Building artifacts for targets: $targets" Write-Host "Building artifacts for targets: $targets"

View File

@@ -11,8 +11,7 @@ fi
export OPENSSL_STATIC=1 export OPENSSL_STATIC=1
export OPENSSL_INCLUDE_DIR=/usr/local/include/openssl export OPENSSL_INCLUDE_DIR=/usr/local/include/openssl
#Alpine doesn't have .bashrc source $HOME/.bashrc
FILE=$HOME/.bashrc && test -f $FILE && source $FILE
cd nodejs cd nodejs
npm ci npm ci

View File

@@ -2,20 +2,18 @@
# Builds the node module for manylinux. Invoked by ci/build_linux_artifacts.sh. # Builds the node module for manylinux. Invoked by ci/build_linux_artifacts.sh.
set -e set -e
ARCH=${1:-x86_64} ARCH=${1:-x86_64}
TARGET_TRIPLE=${2:-x86_64-unknown-linux-gnu}
if [ "$ARCH" = "x86_64" ]; then if [ "$ARCH" = "x86_64" ]; then
export OPENSSL_LIB_DIR=/usr/local/lib64/ export OPENSSL_LIB_DIR=/usr/local/lib64/
else else
export OPENSSL_LIB_DIR=/usr/local/lib/ export OPENSSL_LIB_DIR=/usr/local/lib/
fi fi
export OPENSSL_STATIC=1 export OPENSSL_STATIC=1
export OPENSSL_INCLUDE_DIR=/usr/local/include/openssl export OPENSSL_INCLUDE_DIR=/usr/local/include/openssl
#Alpine doesn't have .bashrc source $HOME/.bashrc
FILE=$HOME/.bashrc && test -f $FILE && source $FILE
cd node cd node
npm ci npm ci
npm run build-release npm run build-release
npm run pack-build -- -t $TARGET_TRIPLE npm run pack-build

View File

@@ -1,57 +0,0 @@
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright The LanceDB Authors
"""A zero-dependency mock OpenAI embeddings API endpoint for testing purposes."""
import argparse
import json
import http.server
class MockOpenAIRequestHandler(http.server.BaseHTTPRequestHandler):
def do_POST(self):
content_length = int(self.headers["Content-Length"])
post_data = self.rfile.read(content_length)
post_data = json.loads(post_data.decode("utf-8"))
# See: https://platform.openai.com/docs/api-reference/embeddings/create
if isinstance(post_data["input"], str):
num_inputs = 1
else:
num_inputs = len(post_data["input"])
model = post_data.get("model", "text-embedding-ada-002")
data = []
for i in range(num_inputs):
data.append({
"object": "embedding",
"embedding": [0.1] * 1536,
"index": i,
})
response = {
"object": "list",
"data": data,
"model": model,
"usage": {
"prompt_tokens": 0,
"total_tokens": 0,
}
}
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(json.dumps(response).encode("utf-8"))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Mock OpenAI embeddings API endpoint")
parser.add_argument("--port", type=int, default=8000, help="Port to listen on")
args = parser.parse_args()
port = args.port
print(f"server started on port {port}. Press Ctrl-C to stop.")
print(f"To use, set OPENAI_BASE_URL=http://localhost:{port} in your environment.")
with http.server.HTTPServer(("0.0.0.0", port), MockOpenAIRequestHandler) as server:
server.serve_forever()

View File

@@ -1,41 +0,0 @@
import argparse
import toml
def parse_dependencies(pyproject_path, extras=None):
with open(pyproject_path, "r") as file:
pyproject = toml.load(file)
dependencies = pyproject.get("project", {}).get("dependencies", [])
for dependency in dependencies:
print(dependency)
optional_dependencies = pyproject.get("project", {}).get(
"optional-dependencies", {}
)
if extras:
for extra in extras.split(","):
for dep in optional_dependencies.get(extra, []):
print(dep)
def main():
parser = argparse.ArgumentParser(
description="Generate requirements.txt from pyproject.toml"
)
parser.add_argument("path", type=str, help="Path to pyproject.toml")
parser.add_argument(
"--extras",
type=str,
help="Comma-separated list of extras to include",
default="",
)
args = parser.parse_args()
parse_dependencies(args.path, args.extras)
if __name__ == "__main__":
main()

View File

@@ -1,105 +0,0 @@
#!/bin/sh
# https://github.com/mstorsjo/msvc-wine/blob/master/vsdownload.py
# https://github.com/mozilla/gecko-dev/blob/6027d1d91f2d3204a3992633b3ef730ff005fc64/build/vs/vs2022-car.yaml
# function dl() {
# curl -O https://download.visualstudio.microsoft.com/download/pr/$1
# }
# [[.h]]
# "id": "Win11SDK_10.0.26100"
# "version": "10.0.26100.7"
# libucrt.lib
# example: <assert.h>
# dir: ucrt/
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/2ee3a5fc6e9fc832af7295b138e93839/universal%20crt%20headers%20libraries%20and%20sources-x86_en-us.msi
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/b1aa09b90fe314aceb090f6ec7626624/16ab2ea2187acffa6435e334796c8c89.cab
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/400609bb0ff5804e36dbe6dcd42a7f01/6ee7bbee8435130a869cf971694fd9e2.cab
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/2ac327317abb865a0e3f56b2faefa918/78fa3c824c2c48bd4a49ab5969adaaf7.cab
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/f034bc0b2680f67dccd4bfeea3d0f932/7afc7b670accd8e3cc94cfffd516f5cb.cab
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/7ed5e12f9d50f80825a8b27838cf4c7f/96076045170fe5db6d5dcf14b6f6688e.cab
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/764edc185a696bda9e07df8891dddbbb/a1e2a83aa8a71c48c742eeaff6e71928.cab
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/66854bedc6dbd5ccb5dd82c8e2412231/b2f03f34ff83ec013b9e45c7cd8e8a73.cab
# example: <windows.h>
# dir: um/
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/b286efac4d83a54fc49190bddef1edc9/windows%20sdk%20for%20windows%20store%20apps%20headers-x86_en-us.msi
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/e0dc3811d92ab96fcb72bf63d6c08d71/766c0ffd568bbb31bf7fb6793383e24a.cab
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/613503da4b5628768497822826aed39f/8125ee239710f33ea485965f76fae646.cab
# example: <winapifamily.h>
# dir: /shared
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/122979f0348d3a2a36b6aa1a111d5d0c/windows%20sdk%20for%20windows%20store%20apps%20headers%20onecoreuap-x86_en-us.msi
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/766e04beecdfccff39e91dd9eb32834a/e89e3dcbb016928c7e426238337d69eb.cab
# "id": "Microsoft.VisualC.14.16.CRT.Headers"
# "version": "14.16.27045"
# example: <vcruntime.h>
# dir: MSVC/
curl -O https://download.visualstudio.microsoft.com/download/pr/bac0afd7-cc9e-4182-8a83-9898fa20e092/87bbe41e09a2f83711e72696f49681429327eb7a4b90618c35667a6ba2e2880e/Microsoft.VisualC.14.16.CRT.Headers.vsix
# [[.lib]]
# advapi32.lib bcrypt.lib kernel32.lib ntdll.lib user32.lib uuid.lib ws2_32.lib userenv.lib cfgmgr32.lib runtimeobject.lib
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/944c4153b849a1f7d0c0404a4f1c05ea/windows%20sdk%20for%20windows%20store%20apps%20libs-x86_en-us.msi
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/5306aed3e1a38d1e8bef5934edeb2a9b/05047a45609f311645eebcac2739fc4c.cab
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/13c8a73a0f5a6474040b26d016a26fab/13d68b8a7b6678a368e2d13ff4027521.cab
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/149578fb3b621cdb61ee1813b9b3e791/463ad1b0783ebda908fd6c16a4abfe93.cab
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/5c986c4f393c6b09d5aec3b539e9fb4a/5a22e5cde814b041749fb271547f4dd5.cab
# dbghelp.lib fwpuclnt.lib arm64rt.lib
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/7a332420d812f7c1d41da865ae5a7c52/windows%20sdk%20desktop%20libs%20arm64-x86_en-us.msi
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/19de98ed4a79938d0045d19c047936b3/3e2f7be479e3679d700ce0782e4cc318.cab
# libcmt.lib libvcruntime.lib
curl -O https://download.visualstudio.microsoft.com/download/pr/bac0afd7-cc9e-4182-8a83-9898fa20e092/227f40682a88dc5fa0ccb9cadc9ad30af99ad1f1a75db63407587d079f60d035/Microsoft.VisualC.14.16.CRT.ARM64.Desktop.vsix
msiextract universal%20crt%20headers%20libraries%20and%20sources-x86_en-us.msi
msiextract windows%20sdk%20for%20windows%20store%20apps%20headers-x86_en-us.msi
msiextract windows%20sdk%20for%20windows%20store%20apps%20headers%20onecoreuap-x86_en-us.msi
msiextract windows%20sdk%20for%20windows%20store%20apps%20libs-x86_en-us.msi
msiextract windows%20sdk%20desktop%20libs%20arm64-x86_en-us.msi
unzip -o Microsoft.VisualC.14.16.CRT.Headers.vsix
unzip -o Microsoft.VisualC.14.16.CRT.ARM64.Desktop.vsix
mkdir -p /usr/aarch64-pc-windows-msvc/usr/include
mkdir -p /usr/aarch64-pc-windows-msvc/usr/lib
# lowercase folder/file names
echo "$(find . -regex ".*/[^/]*[A-Z][^/]*")" | xargs -I{} sh -c 'mv "$(echo "{}" | sed -E '"'"'s/(.*\/)/\L\1/'"'"')" "$(echo "{}" | tr [A-Z] [a-z])"'
# .h
(cd 'program files/windows kits/10/include/10.0.26100.0' && cp -r ucrt/* um/* shared/* -t /usr/aarch64-pc-windows-msvc/usr/include)
cp -r contents/vc/tools/msvc/14.16.27023/include/* /usr/aarch64-pc-windows-msvc/usr/include
# lowercase #include "" and #include <>
find /usr/aarch64-pc-windows-msvc/usr/include -type f -exec sed -i -E 's/(#include <[^<>]*?[A-Z][^<>]*?>)|(#include "[^"]*?[A-Z][^"]*?")/\L\1\2/' "{}" ';'
# ARM intrinsics
# original dir: MSVC/
# '__n128x4' redefined in arm_neon.h
# "arm64_neon.h" included from intrin.h
(cd /usr/lib/llvm19/lib/clang/19/include && cp arm_neon.h intrin.h -t /usr/aarch64-pc-windows-msvc/usr/include)
# .lib
# _Interlocked intrinsics
# must always link with arm64rt.lib
# reason: https://developercommunity.visualstudio.com/t/libucrtlibstreamobj-error-lnk2001-unresolved-exter/1544787#T-ND1599818
# I don't understand the 'correct' fix for this, arm64rt.lib is supposed to be the workaround
(cd 'program files/windows kits/10/lib/10.0.26100.0/um/arm64' && cp advapi32.lib bcrypt.lib kernel32.lib ntdll.lib user32.lib uuid.lib ws2_32.lib userenv.lib cfgmgr32.lib runtimeobject.lib dbghelp.lib fwpuclnt.lib arm64rt.lib -t /usr/aarch64-pc-windows-msvc/usr/lib)
(cd 'contents/vc/tools/msvc/14.16.27023/lib/arm64' && cp libcmt.lib libvcruntime.lib -t /usr/aarch64-pc-windows-msvc/usr/lib)
cp 'program files/windows kits/10/lib/10.0.26100.0/ucrt/arm64/libucrt.lib' /usr/aarch64-pc-windows-msvc/usr/lib

View File

@@ -1,105 +0,0 @@
#!/bin/sh
# https://github.com/mstorsjo/msvc-wine/blob/master/vsdownload.py
# https://github.com/mozilla/gecko-dev/blob/6027d1d91f2d3204a3992633b3ef730ff005fc64/build/vs/vs2022-car.yaml
# function dl() {
# curl -O https://download.visualstudio.microsoft.com/download/pr/$1
# }
# [[.h]]
# "id": "Win11SDK_10.0.26100"
# "version": "10.0.26100.7"
# libucrt.lib
# example: <assert.h>
# dir: ucrt/
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/2ee3a5fc6e9fc832af7295b138e93839/universal%20crt%20headers%20libraries%20and%20sources-x86_en-us.msi
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/b1aa09b90fe314aceb090f6ec7626624/16ab2ea2187acffa6435e334796c8c89.cab
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/400609bb0ff5804e36dbe6dcd42a7f01/6ee7bbee8435130a869cf971694fd9e2.cab
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/2ac327317abb865a0e3f56b2faefa918/78fa3c824c2c48bd4a49ab5969adaaf7.cab
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/f034bc0b2680f67dccd4bfeea3d0f932/7afc7b670accd8e3cc94cfffd516f5cb.cab
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/7ed5e12f9d50f80825a8b27838cf4c7f/96076045170fe5db6d5dcf14b6f6688e.cab
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/764edc185a696bda9e07df8891dddbbb/a1e2a83aa8a71c48c742eeaff6e71928.cab
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/66854bedc6dbd5ccb5dd82c8e2412231/b2f03f34ff83ec013b9e45c7cd8e8a73.cab
# example: <windows.h>
# dir: um/
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/b286efac4d83a54fc49190bddef1edc9/windows%20sdk%20for%20windows%20store%20apps%20headers-x86_en-us.msi
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/e0dc3811d92ab96fcb72bf63d6c08d71/766c0ffd568bbb31bf7fb6793383e24a.cab
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/613503da4b5628768497822826aed39f/8125ee239710f33ea485965f76fae646.cab
# example: <winapifamily.h>
# dir: /shared
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/122979f0348d3a2a36b6aa1a111d5d0c/windows%20sdk%20for%20windows%20store%20apps%20headers%20onecoreuap-x86_en-us.msi
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/766e04beecdfccff39e91dd9eb32834a/e89e3dcbb016928c7e426238337d69eb.cab
# "id": "Microsoft.VisualC.14.16.CRT.Headers"
# "version": "14.16.27045"
# example: <vcruntime.h>
# dir: MSVC/
curl -O https://download.visualstudio.microsoft.com/download/pr/bac0afd7-cc9e-4182-8a83-9898fa20e092/87bbe41e09a2f83711e72696f49681429327eb7a4b90618c35667a6ba2e2880e/Microsoft.VisualC.14.16.CRT.Headers.vsix
# [[.lib]]
# advapi32.lib bcrypt.lib kernel32.lib ntdll.lib user32.lib uuid.lib ws2_32.lib userenv.lib cfgmgr32.lib
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/944c4153b849a1f7d0c0404a4f1c05ea/windows%20sdk%20for%20windows%20store%20apps%20libs-x86_en-us.msi
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/5306aed3e1a38d1e8bef5934edeb2a9b/05047a45609f311645eebcac2739fc4c.cab
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/13c8a73a0f5a6474040b26d016a26fab/13d68b8a7b6678a368e2d13ff4027521.cab
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/149578fb3b621cdb61ee1813b9b3e791/463ad1b0783ebda908fd6c16a4abfe93.cab
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/5c986c4f393c6b09d5aec3b539e9fb4a/5a22e5cde814b041749fb271547f4dd5.cab
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/bfc3904a0195453419ae4dfea7abd6fb/e10768bb6e9d0ea730280336b697da66.cab
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/637f9f3be880c71f9e3ca07b4d67345c/f9b24c8280986c0683fbceca5326d806.cab
# dbghelp.lib fwpuclnt.lib
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/9f51690d5aa804b1340ce12d1ec80f89/windows%20sdk%20desktop%20libs%20x64-x86_en-us.msi
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/d3a7df4ca3303a698640a29e558a5e5b/58314d0646d7e1a25e97c902166c3155.cab
# libcmt.lib libvcruntime.lib
curl -O https://download.visualstudio.microsoft.com/download/pr/bac0afd7-cc9e-4182-8a83-9898fa20e092/8728f21ae09940f1f4b4ee47b4a596be2509e2a47d2f0c83bbec0ea37d69644b/Microsoft.VisualC.14.16.CRT.x64.Desktop.vsix
msiextract universal%20crt%20headers%20libraries%20and%20sources-x86_en-us.msi
msiextract windows%20sdk%20for%20windows%20store%20apps%20headers-x86_en-us.msi
msiextract windows%20sdk%20for%20windows%20store%20apps%20headers%20onecoreuap-x86_en-us.msi
msiextract windows%20sdk%20for%20windows%20store%20apps%20libs-x86_en-us.msi
msiextract windows%20sdk%20desktop%20libs%20x64-x86_en-us.msi
unzip -o Microsoft.VisualC.14.16.CRT.Headers.vsix
unzip -o Microsoft.VisualC.14.16.CRT.x64.Desktop.vsix
mkdir -p /usr/x86_64-pc-windows-msvc/usr/include
mkdir -p /usr/x86_64-pc-windows-msvc/usr/lib
# lowercase folder/file names
echo "$(find . -regex ".*/[^/]*[A-Z][^/]*")" | xargs -I{} sh -c 'mv "$(echo "{}" | sed -E '"'"'s/(.*\/)/\L\1/'"'"')" "$(echo "{}" | tr [A-Z] [a-z])"'
# .h
(cd 'program files/windows kits/10/include/10.0.26100.0' && cp -r ucrt/* um/* shared/* -t /usr/x86_64-pc-windows-msvc/usr/include)
cp -r contents/vc/tools/msvc/14.16.27023/include/* /usr/x86_64-pc-windows-msvc/usr/include
# lowercase #include "" and #include <>
find /usr/x86_64-pc-windows-msvc/usr/include -type f -exec sed -i -E 's/(#include <[^<>]*?[A-Z][^<>]*?>)|(#include "[^"]*?[A-Z][^"]*?")/\L\1\2/' "{}" ';'
# x86 intrinsics
# original dir: MSVC/
# '_mm_movemask_epi8' defined in emmintrin.h
# '__v4sf' defined in xmmintrin.h
# '__v2si' defined in mmintrin.h
# '__m128d' redefined in immintrin.h
# '__m128i' redefined in intrin.h
# '_mm_comlt_epu8' defined in ammintrin.h
(cd /usr/lib/llvm19/lib/clang/19/include && cp emmintrin.h xmmintrin.h mmintrin.h immintrin.h intrin.h ammintrin.h -t /usr/x86_64-pc-windows-msvc/usr/include)
# .lib
(cd 'program files/windows kits/10/lib/10.0.26100.0/um/x64' && cp advapi32.lib bcrypt.lib kernel32.lib ntdll.lib user32.lib uuid.lib ws2_32.lib userenv.lib cfgmgr32.lib dbghelp.lib fwpuclnt.lib -t /usr/x86_64-pc-windows-msvc/usr/lib)
(cd 'contents/vc/tools/msvc/14.16.27023/lib/x64' && cp libcmt.lib libvcruntime.lib -t /usr/x86_64-pc-windows-msvc/usr/lib)
cp 'program files/windows kits/10/lib/10.0.26100.0/ucrt/x64/libucrt.lib' /usr/x86_64-pc-windows-msvc/usr/lib

View File

@@ -1,34 +0,0 @@
import tomllib
found_preview_lance = False
with open("Cargo.toml", "rb") as f:
cargo_data = tomllib.load(f)
for name, dep in cargo_data["workspace"]["dependencies"].items():
if name == "lance" or name.startswith("lance-"):
if isinstance(dep, str):
version = dep
elif isinstance(dep, dict):
# Version doesn't have the beta tag in it, so we instead look
# at the git tag.
version = dep.get('tag', dep.get('version'))
else:
raise ValueError("Unexpected type for dependency: " + str(dep))
if "beta" in version:
found_preview_lance = True
print(f"Dependency '{name}' is a preview version: {version}")
with open("python/pyproject.toml", "rb") as f:
py_proj_data = tomllib.load(f)
for dep in py_proj_data["project"]["dependencies"]:
if dep.startswith("pylance"):
if "b" in dep:
found_preview_lance = True
print(f"Dependency '{dep}' is a preview version")
break # Only one pylance dependency
if found_preview_lance:
raise ValueError("Found preview version of Lance in dependencies")

View File

@@ -9,81 +9,36 @@ unreleased features.
## Building the docs ## Building the docs
### Setup ### Setup
1. Install LanceDB Python. See setup in [Python contributing guide](../python/CONTRIBUTING.md). 1. Install LanceDB. From LanceDB repo root: `pip install -e python`
Run `make develop` to install the Python package. 2. Install dependencies. From LanceDB repo root: `pip install -r docs/requirements.txt`
2. Install documentation dependencies. From LanceDB repo root: `pip install -r docs/requirements.txt` 3. Make sure you have node and npm setup
4. Make sure protobuf and libssl are installed
### Preview the docs ### Building node module and create markdown files
```shell See [Javascript docs README](./src/javascript/README.md)
### Build docs
From LanceDB repo root:
Run: `PYTHONPATH=. mkdocs build -f docs/mkdocs.yml`
If successful, you should see a `docs/site` directory that you can verify locally.
### Run local server
You can run a local server to test the docs prior to deployment by navigating to the `docs` directory and running the following command:
```bash
cd docs cd docs
mkdocs serve mkdocs serve
``` ```
If you want to just generate the HTML files: ### Run doctest for typescript example
```shell ```bash
PYTHONPATH=. mkdocs build -f docs/mkdocs.yml cd lancedb/docs
``` npm i
If successful, you should see a `docs/site` directory that you can verify locally.
## Adding examples
To make sure examples are correct, we put examples in test files so they can be
run as part of our test suites.
You can see the tests are at:
* Python: `python/python/tests/docs`
* Typescript: `nodejs/examples/`
### Checking python examples
```shell
cd python
pytest -vv python/tests/docs
```
### Checking typescript examples
The `@lancedb/lancedb` package must be built before running the tests:
```shell
pushd nodejs
npm ci
npm run build npm run build
popd npm run all
```
Then you can run the examples by going to the `nodejs/examples` directory and
running the tests like a normal npm package:
```shell
pushd nodejs/examples
npm ci
npm test
popd
```
## API documentation
### Python
The Python API documentation is organized based on the file `docs/src/python/python.md`.
We manually add entries there so we can control the organization of the reference page.
**However, this means any new types must be manually added to the file.** No additional
steps are needed to generate the API documentation.
### Typescript
The typescript API documentation is generated from the typescript source code using [typedoc](https://typedoc.org/).
When new APIs are added, you must manually re-run the typedoc command to update the API documentation.
The new files should be checked into the repository.
```shell
pushd nodejs
npm run docs
popd
``` ```

View File

@@ -4,9 +4,6 @@ repo_url: https://github.com/lancedb/lancedb
edit_uri: https://github.com/lancedb/lancedb/tree/main/docs/src edit_uri: https://github.com/lancedb/lancedb/tree/main/docs/src
repo_name: lancedb/lancedb repo_name: lancedb/lancedb
docs_dir: src docs_dir: src
watch:
- src
- ../python/python
theme: theme:
name: "material" name: "material"
@@ -58,15 +55,10 @@ plugins:
show_signature_annotations: true show_signature_annotations: true
show_root_heading: true show_root_heading: true
members_order: source members_order: source
docstring_section_style: list
signature_crossrefs: true
separate_signature: true
import: import:
# for cross references # for cross references
- https://arrow.apache.org/docs/objects.inv - https://arrow.apache.org/docs/objects.inv
- https://pandas.pydata.org/docs/objects.inv - https://pandas.pydata.org/docs/objects.inv
- https://lancedb.github.io/lance/objects.inv
- https://docs.pydantic.dev/latest/objects.inv
- mkdocs-jupyter - mkdocs-jupyter
- render_swagger: - render_swagger:
allow_arbitrary_locations: true allow_arbitrary_locations: true
@@ -98,9 +90,6 @@ markdown_extensions:
- pymdownx.emoji: - pymdownx.emoji:
emoji_index: !!python/name:material.extensions.emoji.twemoji emoji_index: !!python/name:material.extensions.emoji.twemoji
emoji_generator: !!python/name:material.extensions.emoji.to_svg emoji_generator: !!python/name:material.extensions.emoji.to_svg
- markdown.extensions.toc:
baselevel: 1
permalink: ""
nav: nav:
- Home: - Home:
@@ -108,17 +97,16 @@ nav:
- 🏃🏼‍♂️ Quick start: basic.md - 🏃🏼‍♂️ Quick start: basic.md
- 📚 Concepts: - 📚 Concepts:
- Vector search: concepts/vector_search.md - Vector search: concepts/vector_search.md
- Indexing: - Indexing:
- IVFPQ: concepts/index_ivfpq.md - IVFPQ: concepts/index_ivfpq.md
- HNSW: concepts/index_hnsw.md - HNSW: concepts/index_hnsw.md
- Storage: concepts/storage.md - Storage: concepts/storage.md
- Data management: concepts/data_management.md - Data management: concepts/data_management.md
- 🔨 Guides: - 🔨 Guides:
- Working with tables: guides/tables.md - Working with tables: guides/tables.md
- Building a vector index: ann_indexes.md - Building a vector index: ann_indexes.md
- Vector Search: search.md - Vector Search: search.md
- Full-text search (native): fts.md - Full-text search: fts.md
- Full-text search (tantivy-based): fts_tantivy.md
- Building a scalar index: guides/scalar_index.md - Building a scalar index: guides/scalar_index.md
- Hybrid search: - Hybrid search:
- Overview: hybrid_search/hybrid_search.md - Overview: hybrid_search/hybrid_search.md
@@ -134,8 +122,8 @@ nav:
- Adaptive RAG: rag/adaptive_rag.md - Adaptive RAG: rag/adaptive_rag.md
- SFR RAG: rag/sfr_rag.md - SFR RAG: rag/sfr_rag.md
- Advanced Techniques: - Advanced Techniques:
- HyDE: rag/advanced_techniques/hyde.md - HyDE: rag/advanced_techniques/hyde.md
- FLARE: rag/advanced_techniques/flare.md - FLARE: rag/advanced_techniques/flare.md
- Reranking: - Reranking:
- Quickstart: reranking/index.md - Quickstart: reranking/index.md
- Cohere Reranker: reranking/cohere.md - Cohere Reranker: reranking/cohere.md
@@ -146,13 +134,10 @@ nav:
- Jina Reranker: reranking/jina.md - Jina Reranker: reranking/jina.md
- OpenAI Reranker: reranking/openai.md - OpenAI Reranker: reranking/openai.md
- AnswerDotAi Rerankers: reranking/answerdotai.md - AnswerDotAi Rerankers: reranking/answerdotai.md
- Voyage AI Rerankers: reranking/voyageai.md
- Building Custom Rerankers: reranking/custom_reranker.md - Building Custom Rerankers: reranking/custom_reranker.md
- Example: notebooks/lancedb_reranking.ipynb - Example: notebooks/lancedb_reranking.ipynb
- Filtering: sql.md - Filtering: sql.md
- Versioning & Reproducibility: - Versioning & Reproducibility: notebooks/reproducibility.ipynb
- sync API: notebooks/reproducibility.ipynb
- async API: notebooks/reproducibility_async.ipynb
- Configuring Storage: guides/storage.md - Configuring Storage: guides/storage.md
- Migration Guide: migration.md - Migration Guide: migration.md
- Tuning retrieval performance: - Tuning retrieval performance:
@@ -160,10 +145,10 @@ nav:
- Reranking: guides/tuning_retrievers/2_reranking.md - Reranking: guides/tuning_retrievers/2_reranking.md
- Embedding fine-tuning: guides/tuning_retrievers/3_embed_tuning.md - Embedding fine-tuning: guides/tuning_retrievers/3_embed_tuning.md
- 🧬 Managing embeddings: - 🧬 Managing embeddings:
- Understand Embeddings: embeddings/understanding_embeddings.md - Understand Embeddings: embeddings/understanding_embeddings.md
- Get Started: embeddings/index.md - Get Started: embeddings/index.md
- Embedding functions: embeddings/embedding_functions.md - Embedding functions: embeddings/embedding_functions.md
- Available models: - Available models:
- Overview: embeddings/default_embedding_functions.md - Overview: embeddings/default_embedding_functions.md
- Text Embedding Functions: - Text Embedding Functions:
- Sentence Transformers: embeddings/available_embedding_models/text_embedding_functions/sentence_transformers.md - Sentence Transformers: embeddings/available_embedding_models/text_embedding_functions/sentence_transformers.md
@@ -176,13 +161,11 @@ nav:
- Jina Embeddings: embeddings/available_embedding_models/text_embedding_functions/jina_embedding.md - Jina Embeddings: embeddings/available_embedding_models/text_embedding_functions/jina_embedding.md
- AWS Bedrock Text Embedding Functions: embeddings/available_embedding_models/text_embedding_functions/aws_bedrock_embedding.md - AWS Bedrock Text Embedding Functions: embeddings/available_embedding_models/text_embedding_functions/aws_bedrock_embedding.md
- IBM watsonx.ai Embeddings: embeddings/available_embedding_models/text_embedding_functions/ibm_watsonx_ai_embedding.md - IBM watsonx.ai Embeddings: embeddings/available_embedding_models/text_embedding_functions/ibm_watsonx_ai_embedding.md
- Voyage AI Embeddings: embeddings/available_embedding_models/text_embedding_functions/voyageai_embedding.md
- Multimodal Embedding Functions: - Multimodal Embedding Functions:
- OpenClip embeddings: embeddings/available_embedding_models/multimodal_embedding_functions/openclip_embedding.md - OpenClip embeddings: embeddings/available_embedding_models/multimodal_embedding_functions/openclip_embedding.md
- Imagebind embeddings: embeddings/available_embedding_models/multimodal_embedding_functions/imagebind_embedding.md - Imagebind embeddings: embeddings/available_embedding_models/multimodal_embedding_functions/imagebind_embedding.md
- Jina Embeddings: embeddings/available_embedding_models/multimodal_embedding_functions/jina_multimodal_embedding.md - Jina Embeddings: embeddings/available_embedding_models/multimodal_embedding_functions/jina_multimodal_embedding.md
- User-defined embedding functions: embeddings/custom_embedding_function.md - User-defined embedding functions: embeddings/custom_embedding_function.md
- Variables and secrets: embeddings/variables_and_secrets.md
- "Example: Multi-lingual semantic search": notebooks/multi_lingual_example.ipynb - "Example: Multi-lingual semantic search": notebooks/multi_lingual_example.ipynb
- "Example: MultiModal CLIP Embeddings": notebooks/DisappearingEmbeddingFunction.ipynb - "Example: MultiModal CLIP Embeddings": notebooks/DisappearingEmbeddingFunction.ipynb
- 🔌 Integrations: - 🔌 Integrations:
@@ -214,7 +197,7 @@ nav:
- Evaluation: examples/python_examples/evaluations.md - Evaluation: examples/python_examples/evaluations.md
- AI Agent: examples/python_examples/aiagent.md - AI Agent: examples/python_examples/aiagent.md
- Recommender System: examples/python_examples/recommendersystem.md - Recommender System: examples/python_examples/recommendersystem.md
- Miscellaneous: - Miscellaneous:
- Serverless QA Bot with S3 and Lambda: examples/serverless_lancedb_with_s3_and_lambda.md - Serverless QA Bot with S3 and Lambda: examples/serverless_lancedb_with_s3_and_lambda.md
- Serverless QA Bot with Modal: examples/serverless_qa_bot_with_modal_and_langchain.md - Serverless QA Bot with Modal: examples/serverless_qa_bot_with_modal_and_langchain.md
- 👾 JavaScript: - 👾 JavaScript:
@@ -224,10 +207,9 @@ nav:
- TransformersJS Embedding Search: examples/transformerjs_embedding_search_nodejs.md - TransformersJS Embedding Search: examples/transformerjs_embedding_search_nodejs.md
- 🦀 Rust: - 🦀 Rust:
- Overview: examples/examples_rust.md - Overview: examples/examples_rust.md
- 📓 Studies: - Studies:
- ↗Improve retrievers with hybrid search and reranking: https://blog.lancedb.com/hybrid-search-and-reranking-report/ - ↗Improve retrievers with hybrid search and reranking: https://blog.lancedb.com/hybrid-search-and-reranking-report/
- 💭 FAQs: faq.md - 💭 FAQs: faq.md
- 🔍 Troubleshooting: troubleshooting.md
- ⚙️ API reference: - ⚙️ API reference:
- 🐍 Python: python/python.md - 🐍 Python: python/python.md
- 👾 JavaScript (vectordb): javascript/modules.md - 👾 JavaScript (vectordb): javascript/modules.md
@@ -239,22 +221,20 @@ nav:
- 🐍 Python: python/saas-python.md - 🐍 Python: python/saas-python.md
- 👾 JavaScript: javascript/modules.md - 👾 JavaScript: javascript/modules.md
- REST API: cloud/rest.md - REST API: cloud/rest.md
- FAQs: cloud/cloud_faq.md
- Quick start: basic.md - Quick start: basic.md
- Concepts: - Concepts:
- Vector search: concepts/vector_search.md - Vector search: concepts/vector_search.md
- Indexing: - Indexing:
- IVFPQ: concepts/index_ivfpq.md - IVFPQ: concepts/index_ivfpq.md
- HNSW: concepts/index_hnsw.md - HNSW: concepts/index_hnsw.md
- Storage: concepts/storage.md - Storage: concepts/storage.md
- Data management: concepts/data_management.md - Data management: concepts/data_management.md
- Guides: - Guides:
- Working with tables: guides/tables.md - Working with tables: guides/tables.md
- Building an ANN index: ann_indexes.md - Building an ANN index: ann_indexes.md
- Vector Search: search.md - Vector Search: search.md
- Full-text search (native): fts.md - Full-text search: fts.md
- Full-text search (tantivy-based): fts_tantivy.md
- Building a scalar index: guides/scalar_index.md - Building a scalar index: guides/scalar_index.md
- Hybrid search: - Hybrid search:
- Overview: hybrid_search/hybrid_search.md - Overview: hybrid_search/hybrid_search.md
@@ -270,8 +250,8 @@ nav:
- Adaptive RAG: rag/adaptive_rag.md - Adaptive RAG: rag/adaptive_rag.md
- SFR RAG: rag/sfr_rag.md - SFR RAG: rag/sfr_rag.md
- Advanced Techniques: - Advanced Techniques:
- HyDE: rag/advanced_techniques/hyde.md - HyDE: rag/advanced_techniques/hyde.md
- FLARE: rag/advanced_techniques/flare.md - FLARE: rag/advanced_techniques/flare.md
- Reranking: - Reranking:
- Quickstart: reranking/index.md - Quickstart: reranking/index.md
- Cohere Reranker: reranking/cohere.md - Cohere Reranker: reranking/cohere.md
@@ -285,9 +265,7 @@ nav:
- Building Custom Rerankers: reranking/custom_reranker.md - Building Custom Rerankers: reranking/custom_reranker.md
- Example: notebooks/lancedb_reranking.ipynb - Example: notebooks/lancedb_reranking.ipynb
- Filtering: sql.md - Filtering: sql.md
- Versioning & Reproducibility: - Versioning & Reproducibility: notebooks/reproducibility.ipynb
- sync API: notebooks/reproducibility.ipynb
- async API: notebooks/reproducibility_async.ipynb
- Configuring Storage: guides/storage.md - Configuring Storage: guides/storage.md
- Migration Guide: migration.md - Migration Guide: migration.md
- Tuning retrieval performance: - Tuning retrieval performance:
@@ -295,10 +273,10 @@ nav:
- Reranking: guides/tuning_retrievers/2_reranking.md - Reranking: guides/tuning_retrievers/2_reranking.md
- Embedding fine-tuning: guides/tuning_retrievers/3_embed_tuning.md - Embedding fine-tuning: guides/tuning_retrievers/3_embed_tuning.md
- Managing Embeddings: - Managing Embeddings:
- Understand Embeddings: embeddings/understanding_embeddings.md - Understand Embeddings: embeddings/understanding_embeddings.md
- Get Started: embeddings/index.md - Get Started: embeddings/index.md
- Embedding functions: embeddings/embedding_functions.md - Embedding functions: embeddings/embedding_functions.md
- Available models: - Available models:
- Overview: embeddings/default_embedding_functions.md - Overview: embeddings/default_embedding_functions.md
- Text Embedding Functions: - Text Embedding Functions:
- Sentence Transformers: embeddings/available_embedding_models/text_embedding_functions/sentence_transformers.md - Sentence Transformers: embeddings/available_embedding_models/text_embedding_functions/sentence_transformers.md
@@ -316,7 +294,6 @@ nav:
- Imagebind embeddings: embeddings/available_embedding_models/multimodal_embedding_functions/imagebind_embedding.md - Imagebind embeddings: embeddings/available_embedding_models/multimodal_embedding_functions/imagebind_embedding.md
- Jina Embeddings: embeddings/available_embedding_models/multimodal_embedding_functions/jina_multimodal_embedding.md - Jina Embeddings: embeddings/available_embedding_models/multimodal_embedding_functions/jina_multimodal_embedding.md
- User-defined embedding functions: embeddings/custom_embedding_function.md - User-defined embedding functions: embeddings/custom_embedding_function.md
- Variables and secrets: embeddings/variables_and_secrets.md
- "Example: Multi-lingual semantic search": notebooks/multi_lingual_example.ipynb - "Example: Multi-lingual semantic search": notebooks/multi_lingual_example.ipynb
- "Example: MultiModal CLIP Embeddings": notebooks/DisappearingEmbeddingFunction.ipynb - "Example: MultiModal CLIP Embeddings": notebooks/DisappearingEmbeddingFunction.ipynb
- Integrations: - Integrations:
@@ -344,7 +321,7 @@ nav:
- Evaluation: examples/python_examples/evaluations.md - Evaluation: examples/python_examples/evaluations.md
- AI Agent: examples/python_examples/aiagent.md - AI Agent: examples/python_examples/aiagent.md
- Recommender System: examples/python_examples/recommendersystem.md - Recommender System: examples/python_examples/recommendersystem.md
- Miscellaneous: - Miscellaneous:
- Serverless QA Bot with S3 and Lambda: examples/serverless_lancedb_with_s3_and_lambda.md - Serverless QA Bot with S3 and Lambda: examples/serverless_lancedb_with_s3_and_lambda.md
- Serverless QA Bot with Modal: examples/serverless_qa_bot_with_modal_and_langchain.md - Serverless QA Bot with Modal: examples/serverless_qa_bot_with_modal_and_langchain.md
- 👾 JavaScript: - 👾 JavaScript:
@@ -355,8 +332,8 @@ nav:
- 🦀 Rust: - 🦀 Rust:
- Overview: examples/examples_rust.md - Overview: examples/examples_rust.md
- Studies: - Studies:
- studies/overview.md - studies/overview.md
- ↗Improve retrievers with hybrid search and reranking: https://blog.lancedb.com/hybrid-search-and-reranking-report/ - ↗Improve retrievers with hybrid search and reranking: https://blog.lancedb.com/hybrid-search-and-reranking-report/
- API reference: - API reference:
- Overview: api_reference.md - Overview: api_reference.md
- Python: python/python.md - Python: python/python.md
@@ -369,7 +346,6 @@ nav:
- 🐍 Python: python/saas-python.md - 🐍 Python: python/saas-python.md
- 👾 JavaScript: javascript/modules.md - 👾 JavaScript: javascript/modules.md
- REST API: cloud/rest.md - REST API: cloud/rest.md
- FAQs: cloud/cloud_faq.md
extra_css: extra_css:
- styles/global.css - styles/global.css
@@ -377,7 +353,6 @@ extra_css:
extra_javascript: extra_javascript:
- "extra_js/init_ask_ai_widget.js" - "extra_js/init_ask_ai_widget.js"
- "extra_js/reo.js"
extra: extra:
analytics: analytics:
@@ -389,4 +364,5 @@ extra:
- icon: fontawesome/brands/x-twitter - icon: fontawesome/brands/x-twitter
link: https://twitter.com/lancedb link: https://twitter.com/lancedb
- icon: fontawesome/brands/linkedin - icon: fontawesome/brands/linkedin
link: https://www.linkedin.com/company/lancedb link: https://www.linkedin.com/company/lancedb

View File

@@ -38,13 +38,6 @@ components:
required: true required: true
schema: schema:
type: string type: string
index_name:
name: index_name
in: path
description: name of the index
required: true
schema:
type: string
responses: responses:
invalid_request: invalid_request:
description: Invalid request description: Invalid request
@@ -492,22 +485,3 @@ paths:
$ref: "#/components/responses/unauthorized" $ref: "#/components/responses/unauthorized"
"404": "404":
$ref: "#/components/responses/not_found" $ref: "#/components/responses/not_found"
/v1/table/{name}/index/{index_name}/drop/:
post:
description: Drop an index from the table
tags:
- Tables
summary: Drop an index from the table
operationId: dropIndex
parameters:
- $ref: "#/components/parameters/table_name"
- $ref: "#/components/parameters/index_name"
responses:
"200":
description: Index successfully dropped
"400":
$ref: "#/components/responses/invalid_request"
"401":
$ref: "#/components/responses/unauthorized"
"404":
$ref: "#/components/responses/not_found"

21
docs/package-lock.json generated
View File

@@ -19,7 +19,7 @@
}, },
"../node": { "../node": {
"name": "vectordb", "name": "vectordb",
"version": "0.12.0", "version": "0.4.6",
"cpu": [ "cpu": [
"x64", "x64",
"arm64" "arm64"
@@ -31,7 +31,9 @@
"win32" "win32"
], ],
"dependencies": { "dependencies": {
"@apache-arrow/ts": "^14.0.2",
"@neon-rs/load": "^0.0.74", "@neon-rs/load": "^0.0.74",
"apache-arrow": "^14.0.2",
"axios": "^1.4.0" "axios": "^1.4.0"
}, },
"devDependencies": { "devDependencies": {
@@ -44,7 +46,6 @@
"@types/temp": "^0.9.1", "@types/temp": "^0.9.1",
"@types/uuid": "^9.0.3", "@types/uuid": "^9.0.3",
"@typescript-eslint/eslint-plugin": "^5.59.1", "@typescript-eslint/eslint-plugin": "^5.59.1",
"apache-arrow-old": "npm:apache-arrow@13.0.0",
"cargo-cp-artifact": "^0.1", "cargo-cp-artifact": "^0.1",
"chai": "^4.3.7", "chai": "^4.3.7",
"chai-as-promised": "^7.1.1", "chai-as-promised": "^7.1.1",
@@ -61,19 +62,15 @@
"ts-node-dev": "^2.0.0", "ts-node-dev": "^2.0.0",
"typedoc": "^0.24.7", "typedoc": "^0.24.7",
"typedoc-plugin-markdown": "^3.15.3", "typedoc-plugin-markdown": "^3.15.3",
"typescript": "^5.1.0", "typescript": "*",
"uuid": "^9.0.0" "uuid": "^9.0.0"
}, },
"optionalDependencies": { "optionalDependencies": {
"@lancedb/vectordb-darwin-arm64": "0.12.0", "@lancedb/vectordb-darwin-arm64": "0.4.6",
"@lancedb/vectordb-darwin-x64": "0.12.0", "@lancedb/vectordb-darwin-x64": "0.4.6",
"@lancedb/vectordb-linux-arm64-gnu": "0.12.0", "@lancedb/vectordb-linux-arm64-gnu": "0.4.6",
"@lancedb/vectordb-linux-x64-gnu": "0.12.0", "@lancedb/vectordb-linux-x64-gnu": "0.4.6",
"@lancedb/vectordb-win32-x64-msvc": "0.12.0" "@lancedb/vectordb-win32-x64-msvc": "0.4.6"
},
"peerDependencies": {
"@apache-arrow/ts": "^14.0.2",
"apache-arrow": "^14.0.2"
} }
}, },
"../node/node_modules/apache-arrow": { "../node/node_modules/apache-arrow": {

View File

@@ -18,24 +18,25 @@ See the [indexing](concepts/index_ivfpq.md) concepts guide for more information
Lance supports `IVF_PQ` index type by default. Lance supports `IVF_PQ` index type by default.
=== "Python" === "Python"
=== "Sync API"
Creating indexes is done via the [create_index](https://lancedb.github.io/lancedb/python/#lancedb.table.LanceTable.create_index) method. Creating indexes is done via the [create_index](https://lancedb.github.io/lancedb/python/#lancedb.table.LanceTable.create_index) method.
```python ```python
--8<-- "python/python/tests/docs/test_guide_index.py:import-lancedb" import lancedb
--8<-- "python/python/tests/docs/test_guide_index.py:import-numpy" import numpy as np
--8<-- "python/python/tests/docs/test_guide_index.py:create_ann_index" uri = "data/sample-lancedb"
``` db = lancedb.connect(uri)
=== "Async API"
Creating indexes is done via the [create_index](https://lancedb.github.io/lancedb/python/#lancedb.table.LanceTable.create_index) method.
```python # Create 10,000 sample vectors
--8<-- "python/python/tests/docs/test_guide_index.py:import-lancedb" data = [{"vector": row, "item": f"item {i}"}
--8<-- "python/python/tests/docs/test_guide_index.py:import-numpy" for i, row in enumerate(np.random.random((10_000, 1536)).astype('float32'))]
--8<-- "python/python/tests/docs/test_guide_index.py:import-lancedb-ivfpq"
--8<-- "python/python/tests/docs/test_guide_index.py:create_ann_index_async" # Add the vectors to a table
``` tbl = db.create_table("my_vectors", data=data)
# Create and train the index - you need to have enough data in the table for an effective training step
tbl.create_index(num_partitions=256, num_sub_vectors=96)
```
=== "TypeScript" === "TypeScript"
@@ -44,9 +45,9 @@ Lance supports `IVF_PQ` index type by default.
Creating indexes is done via the [lancedb.Table.createIndex](../js/classes/Table.md/#createIndex) method. Creating indexes is done via the [lancedb.Table.createIndex](../js/classes/Table.md/#createIndex) method.
```typescript ```typescript
--8<--- "nodejs/examples/ann_indexes.test.ts:import" --8<--- "nodejs/examples/ann_indexes.ts:import"
--8<-- "nodejs/examples/ann_indexes.test.ts:ingest" --8<-- "nodejs/examples/ann_indexes.ts:ingest"
``` ```
=== "vectordb (deprecated)" === "vectordb (deprecated)"
@@ -82,7 +83,6 @@ The following IVF_PQ paramters can be specified:
- **num_sub_vectors**: The number of sub-vectors (M) that will be created during Product Quantization (PQ). - **num_sub_vectors**: The number of sub-vectors (M) that will be created during Product Quantization (PQ).
For D dimensional vector, it will be divided into `M` subvectors with dimension `D/M`, each of which is replaced by For D dimensional vector, it will be divided into `M` subvectors with dimension `D/M`, each of which is replaced by
a single PQ code. The default is the dimension of the vector divided by 16. a single PQ code. The default is the dimension of the vector divided by 16.
- **num_bits**: The number of bits used to encode each sub-vector. Only 4 and 8 are supported. The higher the number of bits, the higher the accuracy of the index, also the slower search. The default is 8.
!!! note !!! note
@@ -126,9 +126,7 @@ You can specify the GPU device to train IVF partitions via
accelerator="mps" accelerator="mps"
) )
``` ```
!!! note
GPU based indexing is not yet supported with our asynchronous client.
Troubleshooting: Troubleshooting:
If you see `AssertionError: Torch not compiled with CUDA enabled`, you need to [install If you see `AssertionError: Torch not compiled with CUDA enabled`, you need to [install
@@ -142,27 +140,23 @@ There are a couple of parameters that can be used to fine-tune the search:
- **limit** (default: 10): The amount of results that will be returned - **limit** (default: 10): The amount of results that will be returned
- **nprobes** (default: 20): The number of probes used. A higher number makes search more accurate but also slower.<br/> - **nprobes** (default: 20): The number of probes used. A higher number makes search more accurate but also slower.<br/>
Most of the time, setting nprobes to cover 5-15% of the dataset should achieve high recall with low latency.<br/> Most of the time, setting nprobes to cover 5-10% of the dataset should achieve high recall with low latency.<br/>
- _For example_, For a dataset of 1 million vectors divided into 256 partitions, `nprobes` should be set to ~20-40. This value can be adjusted to achieve the optimal balance between search latency and search quality. <br/> e.g., for 1M vectors divided up into 256 partitions, nprobes should be set to ~20-40.<br/>
Note: nprobes is only applicable if an ANN index is present. If specified on a table without an ANN index, it is ignored.
- **refine_factor** (default: None): Refine the results by reading extra elements and re-ranking them in memory.<br/> - **refine_factor** (default: None): Refine the results by reading extra elements and re-ranking them in memory.<br/>
A higher number makes search more accurate but also slower. If you find the recall is less than ideal, try refine_factor=10 to start.<br/> A higher number makes search more accurate but also slower. If you find the recall is less than ideal, try refine_factor=10 to start.<br/>
- _For example_, For a dataset of 1 million vectors divided into 256 partitions, setting the `refine_factor` to 200 will initially retrieve the top 4,000 candidates (top k * refine_factor) from all searched partitions. These candidates are then reranked to determine the final top 20 results.<br/> e.g., for 1M vectors divided into 256 partitions, if you're looking for top 20, then refine_factor=200 reranks the whole partition.<br/>
!!! note Note: refine_factor is only applicable if an ANN index is present. If specified on a table without an ANN index, it is ignored.
Both `nprobes` and `refine_factor` are only applicable if an ANN index is present. If specified on a table without an ANN index, those parameters are ignored.
=== "Python" === "Python"
=== "Sync API"
```python ```python
--8<-- "python/python/tests/docs/test_guide_index.py:vector_search" tbl.search(np.random.random((1536))) \
``` .limit(2) \
=== "Async API" .nprobes(20) \
.refine_factor(10) \
```python .to_pandas()
--8<-- "python/python/tests/docs/test_guide_index.py:vector_search_async" ```
```
```text ```text
vector item _distance vector item _distance
@@ -175,7 +169,7 @@ There are a couple of parameters that can be used to fine-tune the search:
=== "@lancedb/lancedb" === "@lancedb/lancedb"
```typescript ```typescript
--8<-- "nodejs/examples/ann_indexes.test.ts:search1" --8<-- "nodejs/examples/ann_indexes.ts:search1"
``` ```
=== "vectordb (deprecated)" === "vectordb (deprecated)"
@@ -199,23 +193,17 @@ The search will return the data requested in addition to the distance of each it
You can further filter the elements returned by a search using a where clause. You can further filter the elements returned by a search using a where clause.
=== "Python" === "Python"
=== "Sync API"
```python ```python
--8<-- "python/python/tests/docs/test_guide_index.py:vector_search_with_filter" tbl.search(np.random.random((1536))).where("item != 'item 1141'").to_pandas()
``` ```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_guide_index.py:vector_search_async_with_filter"
```
=== "TypeScript" === "TypeScript"
=== "@lancedb/lancedb" === "@lancedb/lancedb"
```typescript ```typescript
--8<-- "nodejs/examples/ann_indexes.test.ts:search2" --8<-- "nodejs/examples/ann_indexes.ts:search2"
``` ```
=== "vectordb (deprecated)" === "vectordb (deprecated)"
@@ -230,16 +218,10 @@ You can select the columns returned by the query using a select clause.
=== "Python" === "Python"
=== "Sync API" ```python
tbl.search(np.random.random((1536))).select(["vector"]).to_pandas()
```
```python
--8<-- "python/python/tests/docs/test_guide_index.py:vector_search_with_select"
```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_guide_index.py:vector_search_async_with_select"
```
```text ```text
vector _distance vector _distance
@@ -253,7 +235,7 @@ You can select the columns returned by the query using a select clause.
=== "@lancedb/lancedb" === "@lancedb/lancedb"
```typescript ```typescript
--8<-- "nodejs/examples/ann_indexes.test.ts:search3" --8<-- "nodejs/examples/ann_indexes.ts:search3"
``` ```
=== "vectordb (deprecated)" === "vectordb (deprecated)"
@@ -293,15 +275,7 @@ Product quantization can lead to approximately `16 * sizeof(float32) / 1 = 64` t
Higher number of partitions could lead to more efficient I/O during queries and better accuracy, but it takes much more time to train. Higher number of partitions could lead to more efficient I/O during queries and better accuracy, but it takes much more time to train.
On `SIFT-1M` dataset, our benchmark shows that keeping each partition 1K-4K rows lead to a good latency / recall. On `SIFT-1M` dataset, our benchmark shows that keeping each partition 1K-4K rows lead to a good latency / recall.
`num_sub_vectors` specifies how many Product Quantization (PQ) short codes to generate on each vector. The number should be a factor of the vector dimension. Because `num_sub_vectors` specifies how many Product Quantization (PQ) short codes to generate on each vector. Because
PQ is a lossy compression of the original vector, a higher `num_sub_vectors` usually results in PQ is a lossy compression of the original vector, a higher `num_sub_vectors` usually results in
less space distortion, and thus yields better accuracy. However, a higher `num_sub_vectors` also causes heavier I/O and more PQ computation, and thus, higher latency. `dimension / num_sub_vectors` should be a multiple of 8 for optimum SIMD efficiency. less space distortion, and thus yields better accuracy. However, a higher `num_sub_vectors` also causes heavier I/O and
more PQ computation, and thus, higher latency. `dimension / num_sub_vectors` should be a multiple of 8 for optimum SIMD efficiency.
!!! note
if `num_sub_vectors` is set to be greater than the vector dimension, you will see errors like `attempt to divide by zero`
### How to choose `m` and `ef_construction` for `IVF_HNSW_*` index?
`m` determines the number of connections a new node establishes with its closest neighbors upon entering the graph. Typically, `m` falls within the range of 5 to 48. Lower `m` values are suitable for low-dimensional data or scenarios where recall is less critical. Conversely, higher `m` values are beneficial for high-dimensional data or when high recall is required. In essence, a larger `m` results in a denser graph with increased connectivity, but at the expense of higher memory consumption.
`ef_construction` balances build speed and accuracy. Higher values increase accuracy but slow down the build process. A typical range is 150 to 300. For good search results, a minimum value of 100 is recommended. In most cases, setting this value above 500 offers no additional benefit. Ensure that `ef_construction` is always set to a value equal to or greater than `ef` in the search phase

View File

@@ -3,7 +3,6 @@ import * as vectordb from "vectordb";
// --8<-- [end:import] // --8<-- [end:import]
(async () => { (async () => {
console.log("ann_indexes.ts: start");
// --8<-- [start:ingest] // --8<-- [start:ingest]
const db = await vectordb.connect("data/sample-lancedb"); const db = await vectordb.connect("data/sample-lancedb");
@@ -50,5 +49,5 @@ import * as vectordb from "vectordb";
.execute(); .execute();
// --8<-- [end:search3] // --8<-- [end:search3]
console.log("ann_indexes.ts: done"); console.log("Ann indexes: done");
})(); })();

Binary file not shown.

Before

Width:  |  Height:  |  Size: 10 KiB

View File

@@ -133,22 +133,21 @@ recommend switching to stable releases.
## Connect to a database ## Connect to a database
=== "Python" === "Python"
=== "Sync API"
```python ```python
--8<-- "python/python/tests/docs/test_basic.py:imports" --8<-- "python/python/tests/docs/test_basic.py:imports"
--8<-- "python/python/tests/docs/test_basic.py:connect"
--8<-- "python/python/tests/docs/test_basic.py:set_uri" --8<-- "python/python/tests/docs/test_basic.py:connect_async"
--8<-- "python/python/tests/docs/test_basic.py:connect" ```
```
=== "Async API"
```python !!! note "Asynchronous Python API"
--8<-- "python/python/tests/docs/test_basic.py:imports"
--8<-- "python/python/tests/docs/test_basic.py:set_uri" The asynchronous Python API is new and has some slight differences compared
--8<-- "python/python/tests/docs/test_basic.py:connect_async" to the synchronous API. Feel free to start using the asynchronous version.
``` Once all features have migrated we will start to move the synchronous API to
use the same syntax as the asynchronous API. To help with this migration we
have created a [migration guide](migration.md) detailing the differences.
=== "Typescript[^1]" === "Typescript[^1]"
@@ -158,7 +157,7 @@ recommend switching to stable releases.
import * as lancedb from "@lancedb/lancedb"; import * as lancedb from "@lancedb/lancedb";
import * as arrow from "apache-arrow"; import * as arrow from "apache-arrow";
--8<-- "nodejs/examples/basic.test.ts:connect" --8<-- "nodejs/examples/basic.ts:connect"
``` ```
=== "vectordb (deprecated)" === "vectordb (deprecated)"
@@ -192,40 +191,28 @@ table.
=== "Python" === "Python"
```python
--8<-- "python/python/tests/docs/test_basic.py:create_table"
--8<-- "python/python/tests/docs/test_basic.py:create_table_async"
```
If the table already exists, LanceDB will raise an error by default. If the table already exists, LanceDB will raise an error by default.
If you want to overwrite the table, you can pass in `mode="overwrite"` If you want to overwrite the table, you can pass in `mode="overwrite"`
to the `create_table` method. to the `create_table` method.
=== "Sync API" You can also pass in a pandas DataFrame directly:
```python ```python
--8<-- "python/python/tests/docs/test_basic.py:create_table" --8<-- "python/python/tests/docs/test_basic.py:create_table_pandas"
``` --8<-- "python/python/tests/docs/test_basic.py:create_table_async_pandas"
```
You can also pass in a pandas DataFrame directly:
```python
--8<-- "python/python/tests/docs/test_basic.py:create_table_pandas"
```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_basic.py:create_table_async"
```
You can also pass in a pandas DataFrame directly:
```python
--8<-- "python/python/tests/docs/test_basic.py:create_table_async_pandas"
```
=== "Typescript[^1]" === "Typescript[^1]"
=== "@lancedb/lancedb" === "@lancedb/lancedb"
```typescript ```typescript
--8<-- "nodejs/examples/basic.test.ts:create_table" --8<-- "nodejs/examples/basic.ts:create_table"
``` ```
=== "vectordb (deprecated)" === "vectordb (deprecated)"
@@ -268,16 +255,10 @@ similar to a `CREATE TABLE` statement in SQL.
=== "Python" === "Python"
=== "Sync API" ```python
--8<-- "python/python/tests/docs/test_basic.py:create_empty_table"
```python --8<-- "python/python/tests/docs/test_basic.py:create_empty_table_async"
--8<-- "python/python/tests/docs/test_basic.py:create_empty_table" ```
```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_basic.py:create_empty_table_async"
```
!!! note "You can define schema in Pydantic" !!! note "You can define schema in Pydantic"
LanceDB comes with Pydantic support, which allows you to define the schema of your data using Pydantic models. This makes it easy to work with LanceDB tables and data. Learn more about all supported types in [tables guide](./guides/tables.md). LanceDB comes with Pydantic support, which allows you to define the schema of your data using Pydantic models. This makes it easy to work with LanceDB tables and data. Learn more about all supported types in [tables guide](./guides/tables.md).
@@ -287,7 +268,7 @@ similar to a `CREATE TABLE` statement in SQL.
=== "@lancedb/lancedb" === "@lancedb/lancedb"
```typescript ```typescript
--8<-- "nodejs/examples/basic.test.ts:create_empty_table" --8<-- "nodejs/examples/basic.ts:create_empty_table"
``` ```
=== "vectordb (deprecated)" === "vectordb (deprecated)"
@@ -308,22 +289,16 @@ Once created, you can open a table as follows:
=== "Python" === "Python"
=== "Sync API" ```python
--8<-- "python/python/tests/docs/test_basic.py:open_table"
```python --8<-- "python/python/tests/docs/test_basic.py:open_table_async"
--8<-- "python/python/tests/docs/test_basic.py:open_table" ```
```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_basic.py:open_table_async"
```
=== "Typescript[^1]" === "Typescript[^1]"
=== "@lancedb/lancedb" === "@lancedb/lancedb"
```typescript ```typescript
--8<-- "nodejs/examples/basic.test.ts:open_table" --8<-- "nodejs/examples/basic.ts:open_table"
``` ```
=== "vectordb (deprecated)" === "vectordb (deprecated)"
@@ -343,22 +318,16 @@ If you forget the name of your table, you can always get a listing of all table
=== "Python" === "Python"
=== "Sync API" ```python
--8<-- "python/python/tests/docs/test_basic.py:table_names"
```python --8<-- "python/python/tests/docs/test_basic.py:table_names_async"
--8<-- "python/python/tests/docs/test_basic.py:table_names" ```
```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_basic.py:table_names_async"
```
=== "Typescript[^1]" === "Typescript[^1]"
=== "@lancedb/lancedb" === "@lancedb/lancedb"
```typescript ```typescript
--8<-- "nodejs/examples/basic.test.ts:table_names" --8<-- "nodejs/examples/basic.ts:table_names"
``` ```
=== "vectordb (deprecated)" === "vectordb (deprecated)"
@@ -379,22 +348,16 @@ After a table has been created, you can always add more data to it as follows:
=== "Python" === "Python"
=== "Sync API" ```python
--8<-- "python/python/tests/docs/test_basic.py:add_data"
```python --8<-- "python/python/tests/docs/test_basic.py:add_data_async"
--8<-- "python/python/tests/docs/test_basic.py:add_data" ```
```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_basic.py:add_data_async"
```
=== "Typescript[^1]" === "Typescript[^1]"
=== "@lancedb/lancedb" === "@lancedb/lancedb"
```typescript ```typescript
--8<-- "nodejs/examples/basic.test.ts:add_data" --8<-- "nodejs/examples/basic.ts:add_data"
``` ```
=== "vectordb (deprecated)" === "vectordb (deprecated)"
@@ -415,16 +378,10 @@ Once you've embedded the query, you can find its nearest neighbors as follows:
=== "Python" === "Python"
=== "Sync API" ```python
--8<-- "python/python/tests/docs/test_basic.py:vector_search"
```python --8<-- "python/python/tests/docs/test_basic.py:vector_search_async"
--8<-- "python/python/tests/docs/test_basic.py:vector_search" ```
```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_basic.py:vector_search_async"
```
This returns a pandas DataFrame with the results. This returns a pandas DataFrame with the results.
@@ -432,7 +389,7 @@ Once you've embedded the query, you can find its nearest neighbors as follows:
=== "@lancedb/lancedb" === "@lancedb/lancedb"
```typescript ```typescript
--8<-- "nodejs/examples/basic.test.ts:vector_search" --8<-- "nodejs/examples/basic.ts:vector_search"
``` ```
=== "vectordb (deprecated)" === "vectordb (deprecated)"
@@ -463,22 +420,16 @@ LanceDB allows you to create an ANN index on a table as follows:
=== "Python" === "Python"
=== "Sync API" ```py
--8<-- "python/python/tests/docs/test_basic.py:create_index"
```python --8<-- "python/python/tests/docs/test_basic.py:create_index_async"
--8<-- "python/python/tests/docs/test_basic.py:create_index" ```
```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_basic.py:create_index_async"
```
=== "Typescript[^1]" === "Typescript[^1]"
=== "@lancedb/lancedb" === "@lancedb/lancedb"
```typescript ```typescript
--8<-- "nodejs/examples/basic.test.ts:create_index" --8<-- "nodejs/examples/basic.ts:create_index"
``` ```
=== "vectordb (deprecated)" === "vectordb (deprecated)"
@@ -508,23 +459,17 @@ This can delete any number of rows that match the filter.
=== "Python" === "Python"
=== "Sync API" ```python
--8<-- "python/python/tests/docs/test_basic.py:delete_rows"
```python --8<-- "python/python/tests/docs/test_basic.py:delete_rows_async"
--8<-- "python/python/tests/docs/test_basic.py:delete_rows" ```
```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_basic.py:delete_rows_async"
```
=== "Typescript[^1]" === "Typescript[^1]"
=== "@lancedb/lancedb" === "@lancedb/lancedb"
```typescript ```typescript
--8<-- "nodejs/examples/basic.test.ts:delete_rows" --8<-- "nodejs/examples/basic.ts:delete_rows"
``` ```
=== "vectordb (deprecated)" === "vectordb (deprecated)"
@@ -546,10 +491,7 @@ simple or complex as needed. To see what expressions are supported, see the
=== "Python" === "Python"
=== "Sync API" Read more: [lancedb.table.Table.delete][]
Read more: [lancedb.table.Table.delete][]
=== "Async API"
Read more: [lancedb.table.AsyncTable.delete][]
=== "Typescript[^1]" === "Typescript[^1]"
@@ -571,16 +513,10 @@ Use the `drop_table()` method on the database to remove a table.
=== "Python" === "Python"
=== "Sync API" ```python
--8<-- "python/python/tests/docs/test_basic.py:drop_table"
```python --8<-- "python/python/tests/docs/test_basic.py:drop_table_async"
--8<-- "python/python/tests/docs/test_basic.py:drop_table" ```
```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_basic.py:drop_table_async"
```
This permanently removes the table and is not recoverable, unlike deleting rows. This permanently removes the table and is not recoverable, unlike deleting rows.
By default, if the table does not exist an exception is raised. To suppress this, By default, if the table does not exist an exception is raised. To suppress this,
@@ -591,7 +527,7 @@ Use the `drop_table()` method on the database to remove a table.
=== "@lancedb/lancedb" === "@lancedb/lancedb"
```typescript ```typescript
--8<-- "nodejs/examples/basic.test.ts:drop_table" --8<-- "nodejs/examples/basic.ts:drop_table"
``` ```
=== "vectordb (deprecated)" === "vectordb (deprecated)"
@@ -615,25 +551,18 @@ You can use the embedding API when working with embedding models. It automatical
=== "Python" === "Python"
=== "Sync API" ```python
--8<-- "python/python/tests/docs/test_embeddings_optional.py:imports"
```python --8<-- "python/python/tests/docs/test_embeddings_optional.py:openai_embeddings"
--8<-- "python/python/tests/docs/test_embeddings_optional.py:imports" ```
--8<-- "python/python/tests/docs/test_embeddings_optional.py:openai_embeddings"
```
=== "Async API"
Coming soon to the async API.
https://github.com/lancedb/lancedb/issues/1938
=== "Typescript[^1]" === "Typescript[^1]"
=== "@lancedb/lancedb" === "@lancedb/lancedb"
```typescript ```typescript
--8<-- "nodejs/examples/embedding.test.ts:imports" --8<-- "nodejs/examples/embedding.ts:imports"
--8<-- "nodejs/examples/embedding.test.ts:openai_embeddings" --8<-- "nodejs/examples/embedding.ts:openai_embeddings"
``` ```
=== "Rust" === "Rust"

View File

@@ -107,6 +107,7 @@ const example = async () => {
// --8<-- [start:search] // --8<-- [start:search]
const query = await tbl.search([100, 100]).limit(2).execute(); const query = await tbl.search([100, 100]).limit(2).execute();
// --8<-- [end:search] // --8<-- [end:search]
console.log(query);
// --8<-- [start:delete] // --8<-- [start:delete]
await tbl.delete('item = "fizz"'); await tbl.delete('item = "fizz"');
@@ -118,9 +119,8 @@ const example = async () => {
}; };
async function main() { async function main() {
console.log("basic_legacy.ts: start");
await example(); await example();
console.log("basic_legacy.ts: done"); console.log("Basic example: done");
} }
main(); main();

View File

@@ -1,34 +0,0 @@
This section provides answers to the most common questions asked about LanceDB Cloud. By following these guidelines, you can ensure a smooth, performant experience with LanceDB Cloud.
### Should I reuse the database connection?
Yes! It is recommended to establish a single database connection and maintain it throughout your interaction with the tables within.
LanceDB uses HTTP connections to communicate with the servers. By re-using the Connection object, you avoid the overhead of repeatedly establishing HTTP connections, significantly improving efficiency.
### Should I re-use the `Table` object?
`table = db.open_table()` should be called once and used for all subsequent table operations. If there are changes to the opened table, `table` always reflect the **latest version** of the data.
### What should I do if I need to search for rows by `id`?
LanceDB Cloud currently does not support an ID or primary key column. You are recommended to add a
user-defined ID column. To significantly improve the query performance with SQL causes, a scalar BITMAP/BTREE index should be created on this column.
### What are the vector indexing types supported by LanceDB Cloud?
We support `IVF_PQ` and `IVF_HNSW_SQ` as the `index_type` which is passed to `create_index`. LanceDB Cloud tunes the indexing parameters automatically to achieve the best tradeoff between query latency and query quality.
### When I add new rows to a table, do I need to manually update the index?
No! LanceDB Cloud triggers an asynchronous background job to index the new vectors.
Even though indexing is asynchronous, your vectors will still be immediately searchable. LanceDB uses brute-force search to search over unindexed rows. This makes you new data is immediately available, but does increase latency temporarily. To disable the brute-force part of search, set the `fast_search` flag in your query to `true`.
### Do I need to reindex the whole dataset if only a small portion of the data is deleted or updated?
No! Similar to adding data to the table, LanceDB Cloud triggers an asynchronous background job to update the existing indices. Therefore, no action is needed from users and there is absolutely no
downtime expected.
### How do I know whether an index has been created?
While index creation in LanceDB Cloud is generally fast, querying immediately after a `create_index` call may result in errors. It's recommended to use `list_indices` to verify index creation before querying.
### Why is my query latency higher than expected?
Multiple factors can impact query latency. To reduce query latency, consider the following:
- Send pre-warm queries: send a few queries to warm up the cache before an actual user query.
- Check network latency: LanceDB Cloud is hosted in AWS `us-east-1` region. It is recommended to run queries from an EC2 instance that is in the same region.
- Create scalar indices: If you are filtering on metadata, it is recommended to create scalar indices on those columns. This will speedup searches with metadata filtering. See [here](../guides/scalar_index.md) for more details on creating a scalar index.

View File

@@ -7,7 +7,7 @@ Approximate Nearest Neighbor (ANN) search is a method for finding data points ne
There are three main types of ANN search algorithms: There are three main types of ANN search algorithms:
* **Tree-based search algorithms**: Use a tree structure to organize and store data points. * **Tree-based search algorithms**: Use a tree structure to organize and store data points.
* **Hash-based search algorithms**: Use a specialized geometric hash table to store and manage data points. These algorithms typically focus on theoretical guarantees, and don't usually perform as well as the other approaches in practice. * * **Hash-based search algorithms**: Use a specialized geometric hash table to store and manage data points. These algorithms typically focus on theoretical guarantees, and don't usually perform as well as the other approaches in practice.
* **Graph-based search algorithms**: Use a graph structure to store data points, which can be a bit complex. * **Graph-based search algorithms**: Use a graph structure to store data points, which can be a bit complex.
HNSW is a graph-based algorithm. All graph-based search algorithms rely on the idea of a k-nearest neighbor (or k-approximate nearest neighbor) graph, which we outline below. HNSW is a graph-based algorithm. All graph-based search algorithms rely on the idea of a k-nearest neighbor (or k-approximate nearest neighbor) graph, which we outline below.
@@ -57,13 +57,6 @@ Then the greedy search routine operates as follows:
## Usage ## Usage
There are three key parameters to set when constructing an HNSW index:
* `metric`: Use an `L2` euclidean distance metric. We also support `dot` and `cosine` distance.
* `m`: The number of neighbors to select for each vector in the HNSW graph.
* `ef_construction`: The number of candidates to evaluate during the construction of the HNSW graph.
We can combine the above concepts to understand how to build and query an HNSW index in LanceDB. We can combine the above concepts to understand how to build and query an HNSW index in LanceDB.
### Construct index ### Construct index

View File

@@ -58,10 +58,8 @@ In Python, the index can be created as follows:
# Make sure you have enough data in the table for an effective training step # Make sure you have enough data in the table for an effective training step
tbl.create_index(metric="L2", num_partitions=256, num_sub_vectors=96) tbl.create_index(metric="L2", num_partitions=256, num_sub_vectors=96)
``` ```
!!! note
`num_partitions`=256 and `num_sub_vectors`=96 does not work for every dataset. Those values needs to be adjusted for your particular dataset.
The `num_partitions` is usually chosen to target a particular number of vectors per partition. `num_sub_vectors` is typically chosen based on the desired recall and the dimensionality of the vector. See [here](../ann_indexes.md/#how-to-choose-num_partitions-and-num_sub_vectors-for-ivf_pq-index) for best practices on choosing these parameters. The `num_partitions` is usually chosen to target a particular number of vectors per partition. `num_sub_vectors` is typically chosen based on the desired recall and the dimensionality of the vector. See the [FAQs](#faq) below for best practices on choosing these parameters.
### Query the index ### Query the index

View File

@@ -6,7 +6,6 @@ LanceDB registers the OpenAI embeddings function in the registry by default, as
|---|---|---|---| |---|---|---|---|
| `name` | `str` | `"text-embedding-ada-002"` | The name of the model. | | `name` | `str` | `"text-embedding-ada-002"` | The name of the model. |
| `dim` | `int` | Model default | For OpenAI's newer text-embedding-3 model, we can specify a dimensionality that is smaller than the 1536 size. This feature supports it | | `dim` | `int` | Model default | For OpenAI's newer text-embedding-3 model, we can specify a dimensionality that is smaller than the 1536 size. This feature supports it |
| `use_azure` | bool | `False` | Set true to use Azure OpenAPI SDK |
```python ```python

View File

@@ -1,51 +0,0 @@
# VoyageAI Embeddings
Voyage AI provides cutting-edge embedding and rerankers.
Using voyageai API requires voyageai package, which can be installed using `pip install voyageai`. Voyage AI embeddings are used to generate embeddings for text data. The embeddings can be used for various tasks like semantic search, clustering, and classification.
You also need to set the `VOYAGE_API_KEY` environment variable to use the VoyageAI API.
Supported models are:
- voyage-3
- voyage-3-lite
- voyage-finance-2
- voyage-multilingual-2
- voyage-law-2
- voyage-code-2
Supported parameters (to be passed in `create` method) are:
| Parameter | Type | Default Value | Description |
|---|---|--------|---------|
| `name` | `str` | `None` | The model ID of the model to use. Supported base models for Text Embeddings: voyage-3, voyage-3-lite, voyage-finance-2, voyage-multilingual-2, voyage-law-2, voyage-code-2 |
| `input_type` | `str` | `None` | Type of the input text. Default to None. Other options: query, document. |
| `truncation` | `bool` | `True` | Whether to truncate the input texts to fit within the context length. |
Usage Example:
```python
import lancedb
from lancedb.pydantic import LanceModel, Vector
from lancedb.embeddings import EmbeddingFunctionRegistry
voyageai = EmbeddingFunctionRegistry
.get_instance()
.get("voyageai")
.create(name="voyage-3")
class TextModel(LanceModel):
text: str = voyageai.SourceField()
vector: Vector(voyageai.ndims()) = voyageai.VectorField()
data = [ { "text": "hello world" },
{ "text": "goodbye world" }]
db = lancedb.connect("~/.lancedb")
tbl = db.create_table("test", schema=TextModel, mode="overwrite")
tbl.add(data)
```

View File

@@ -47,22 +47,14 @@ Let's implement `SentenceTransformerEmbeddings` class. All you need to do is imp
=== "TypeScript" === "TypeScript"
```ts ```ts
--8<--- "nodejs/examples/custom_embedding_function.test.ts:imports" --8<--- "nodejs/examples/custom_embedding_function.ts:imports"
--8<--- "nodejs/examples/custom_embedding_function.test.ts:embedding_impl" --8<--- "nodejs/examples/custom_embedding_function.ts:embedding_impl"
``` ```
This is a stripped down version of our implementation of `SentenceTransformerEmbeddings` that removes certain optimizations and default settings. This is a stripped down version of our implementation of `SentenceTransformerEmbeddings` that removes certain optimizations and default settings.
!!! danger "Use sensitive keys to prevent leaking secrets"
To prevent leaking secrets, such as API keys, you should add any sensitive
parameters of an embedding function to the output of the
[sensitive_keys()][lancedb.embeddings.base.EmbeddingFunction.sensitive_keys] /
[getSensitiveKeys()](../../js/namespaces/embedding/classes/EmbeddingFunction/#getsensitivekeys)
method. This prevents users from accidentally instantiating the embedding
function with hard-coded secrets.
Now you can use this embedding function to create your table schema and that's it! you can then ingest data and run queries without manually vectorizing the inputs. Now you can use this embedding function to create your table schema and that's it! you can then ingest data and run queries without manually vectorizing the inputs.
=== "Python" === "Python"
@@ -86,7 +78,7 @@ Now you can use this embedding function to create your table schema and that's i
=== "TypeScript" === "TypeScript"
```ts ```ts
--8<--- "nodejs/examples/custom_embedding_function.test.ts:call_custom_function" --8<--- "nodejs/examples/custom_embedding_function.ts:call_custom_function"
``` ```
!!! note !!! note

View File

@@ -53,7 +53,6 @@ These functions are registered by default to handle text embeddings.
| [**Jina Embeddings**](available_embedding_models/text_embedding_functions/jina_embedding.md "jina") | 🔗 World-class embedding models to improve your search and RAG systems. You will need **jina api key**. | [<img src="https://raw.githubusercontent.com/lancedb/assets/main/docs/assets/logos/jina.png" alt="Jina Icon" width="90" height="35">](available_embedding_models/text_embedding_functions/jina_embedding.md) | | [**Jina Embeddings**](available_embedding_models/text_embedding_functions/jina_embedding.md "jina") | 🔗 World-class embedding models to improve your search and RAG systems. You will need **jina api key**. | [<img src="https://raw.githubusercontent.com/lancedb/assets/main/docs/assets/logos/jina.png" alt="Jina Icon" width="90" height="35">](available_embedding_models/text_embedding_functions/jina_embedding.md) |
| [ **AWS Bedrock Functions**](available_embedding_models/text_embedding_functions/aws_bedrock_embedding.md "bedrock-text") | ☁️ AWS Bedrock supports multiple base models for generating text embeddings. You need to setup the AWS credentials to use this embedding function. | [<img src="https://raw.githubusercontent.com/lancedb/assets/main/docs/assets/logos/aws_bedrock.png" alt="AWS Bedrock Icon" width="120" height="35">](available_embedding_models/text_embedding_functions/aws_bedrock_embedding.md) | | [ **AWS Bedrock Functions**](available_embedding_models/text_embedding_functions/aws_bedrock_embedding.md "bedrock-text") | ☁️ AWS Bedrock supports multiple base models for generating text embeddings. You need to setup the AWS credentials to use this embedding function. | [<img src="https://raw.githubusercontent.com/lancedb/assets/main/docs/assets/logos/aws_bedrock.png" alt="AWS Bedrock Icon" width="120" height="35">](available_embedding_models/text_embedding_functions/aws_bedrock_embedding.md) |
| [**IBM Watsonx.ai**](available_embedding_models/text_embedding_functions/ibm_watsonx_ai_embedding.md "watsonx") | 💡 Generate text embeddings using IBM's watsonx.ai platform. **Note**: watsonx.ai library is an optional dependency. | [<img src="https://raw.githubusercontent.com/lancedb/assets/main/docs/assets/logos/watsonx.png" alt="Watsonx Icon" width="140" height="35">](available_embedding_models/text_embedding_functions/ibm_watsonx_ai_embedding.md) | | [**IBM Watsonx.ai**](available_embedding_models/text_embedding_functions/ibm_watsonx_ai_embedding.md "watsonx") | 💡 Generate text embeddings using IBM's watsonx.ai platform. **Note**: watsonx.ai library is an optional dependency. | [<img src="https://raw.githubusercontent.com/lancedb/assets/main/docs/assets/logos/watsonx.png" alt="Watsonx Icon" width="140" height="35">](available_embedding_models/text_embedding_functions/ibm_watsonx_ai_embedding.md) |
| [**VoyageAI Embeddings**](available_embedding_models/text_embedding_functions/voyageai_embedding.md "voyageai") | 🌕 Voyage AI provides cutting-edge embedding and rerankers. This will help you get started with **VoyageAI** embedding models using LanceDB. Using voyageai API requires voyageai package. Install it via `pip`. | [<img src="https://www.voyageai.com/logo.svg" alt="VoyageAI Icon" width="140" height="35">](available_embedding_models/text_embedding_functions/voyageai_embedding.md) |
@@ -67,7 +66,6 @@ These functions are registered by default to handle text embeddings.
[jina-key]: "jina" [jina-key]: "jina"
[aws-key]: "bedrock-text" [aws-key]: "bedrock-text"
[watsonx-key]: "watsonx" [watsonx-key]: "watsonx"
[voyageai-key]: "voyageai"
## Multi-modal Embedding Functions🖼 ## Multi-modal Embedding Functions🖼

View File

@@ -94,8 +94,8 @@ the embeddings at all:
=== "@lancedb/lancedb" === "@lancedb/lancedb"
```ts ```ts
--8<-- "nodejs/examples/embedding.test.ts:imports" --8<-- "nodejs/examples/embedding.ts:imports"
--8<-- "nodejs/examples/embedding.test.ts:embedding_function" --8<-- "nodejs/examples/embedding.ts:embedding_function"
``` ```
=== "vectordb (deprecated)" === "vectordb (deprecated)"
@@ -150,7 +150,7 @@ need to worry about it when you query the table:
.toArray() .toArray()
``` ```
=== "vectordb (deprecated)" === "vectordb (deprecated)
```ts ```ts
const results = await table const results = await table

View File

@@ -51,8 +51,8 @@ LanceDB registers the OpenAI embeddings function in the registry as `openai`. Yo
=== "TypeScript" === "TypeScript"
```typescript ```typescript
--8<--- "nodejs/examples/embedding.test.ts:imports" --8<--- "nodejs/examples/embedding.ts:imports"
--8<--- "nodejs/examples/embedding.test.ts:openai_embeddings" --8<--- "nodejs/examples/embedding.ts:openai_embeddings"
``` ```
=== "Rust" === "Rust"
@@ -121,10 +121,12 @@ class Words(LanceModel):
vector: Vector(func.ndims()) = func.VectorField() vector: Vector(func.ndims()) = func.VectorField()
table = db.create_table("words", schema=Words) table = db.create_table("words", schema=Words)
table.add([ table.add(
{"text": "hello world"}, [
{"text": "goodbye world"} {"text": "hello world"},
]) {"text": "goodbye world"}
]
)
query = "greetings" query = "greetings"
actual = table.search(query).limit(1).to_pydantic(Words)[0] actual = table.search(query).limit(1).to_pydantic(Words)[0]

View File

@@ -1,53 +0,0 @@
# Variable and Secrets
Most embedding configuration options are saved in the table's metadata. However,
this isn't always appropriate. For example, API keys should never be stored in the
metadata. Additionally, other configuration options might be best set at runtime,
such as the `device` configuration that controls whether to use GPU or CPU for
inference. If you hardcoded this to GPU, you wouldn't be able to run the code on
a server without one.
To handle these cases, you can set variables on the embedding registry and
reference them in the embedding configuration. These variables will be available
during the runtime of your program, but not saved in the table's metadata. When
the table is loaded from a different process, the variables must be set again.
To set a variable, use the `set_var()` / `setVar()` method on the embedding registry.
To reference a variable, use the syntax `$env:VARIABLE_NAME`. If there is a default
value, you can use the syntax `$env:VARIABLE_NAME:DEFAULT_VALUE`.
## Using variables to set secrets
Sensitive configuration, such as API keys, must either be set as environment
variables or using variables on the embedding registry. If you pass in a hardcoded
value, LanceDB will raise an error. Instead, if you want to set an API key via
configuration, use a variable:
=== "Python"
```python
--8<-- "python/python/tests/docs/test_embeddings_optional.py:register_secret"
```
=== "Typescript"
```typescript
--8<-- "nodejs/examples/embedding.test.ts:register_secret"
```
## Using variables to set the device parameter
Many embedding functions that run locally have a `device` parameter that controls
whether to use GPU or CPU for inference. Because not all computers have a GPU,
it's helpful to be able to set the `device` parameter at runtime, rather than
have it hard coded in the embedding configuration. To make it work even if the
variable isn't set, you could provide a default value of `cpu` in the embedding
configuration.
Some embedding libraries even have a method to detect which devices are available,
which could be used to dynamically set the device at runtime. For example, in Python
you can check if a CUDA GPU is available using `torch.cuda.is_available()`.
```python
--8<-- "python/python/tests/docs/test_embeddings_optional.py:register_device"
```

View File

@@ -36,6 +36,6 @@
[aware_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/tutorials/chatbot_using_Llama2_&_lanceDB/main.ipynb [aware_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/tutorials/chatbot_using_Llama2_&_lanceDB/main.ipynb
[aware_ghost]: https://blog.lancedb.com/context-aware-chatbot-using-llama-2-lancedb-as-vector-database-4d771d95c755 [aware_ghost]: https://blog.lancedb.com/context-aware-chatbot-using-llama-2-lancedb-as-vector-database-4d771d95c755
[csv_github]: https://github.com/lancedb/vectordb-recipes/tree/main/examples/archived_examples/Chat_with_csv_file [csv_github]: https://github.com/lancedb/vectordb-recipes/blob/main/tutorials/Chat_with_csv_file
[csv_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/archived_examples/Chat_with_csv_file/main.ipynb [csv_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/tutorials/Chat_with_csv_file/main.ipynb
[csv_ghost]: https://blog.lancedb.com/p/d8c71df4-e55f-479a-819e-cde13354a6a3/ [csv_ghost]: https://blog.lancedb.com/p/d8c71df4-e55f-479a-819e-cde13354a6a3/

View File

@@ -12,7 +12,7 @@ LanceDB supports multimodal search by indexing and querying vector representatio
|:----------------|:-----------------|:-----------| |:----------------|:-----------------|:-----------|
| **Multimodal CLIP: DiffusionDB 🌐💥** | Multi-Modal Search with **CLIP** and **LanceDB** Using **DiffusionDB** Data for Combined Text and Image Understanding ! 🔓 | [![GitHub](../../assets/github.svg)][Clip_diffusionDB_github] <br>[![Open In Collab](../../assets/colab.svg)][Clip_diffusionDB_colab] <br>[![Python](../../assets/python.svg)][Clip_diffusionDB_python] <br>[![Ghost](../../assets/ghost.svg)][Clip_diffusionDB_ghost] | | **Multimodal CLIP: DiffusionDB 🌐💥** | Multi-Modal Search with **CLIP** and **LanceDB** Using **DiffusionDB** Data for Combined Text and Image Understanding ! 🔓 | [![GitHub](../../assets/github.svg)][Clip_diffusionDB_github] <br>[![Open In Collab](../../assets/colab.svg)][Clip_diffusionDB_colab] <br>[![Python](../../assets/python.svg)][Clip_diffusionDB_python] <br>[![Ghost](../../assets/ghost.svg)][Clip_diffusionDB_ghost] |
| **Multimodal CLIP: Youtube Videos 📹👀** | Search **Youtube videos** using Multimodal CLIP, finding relevant content with ease and accuracy! 🎯 | [![Github](../../assets/github.svg)][Clip_youtube_github] <br>[![Open In Collab](../../assets/colab.svg)][Clip_youtube_colab] <br> [![Python](../../assets/python.svg)][Clip_youtube_python] <br>[![Ghost](../../assets/ghost.svg)][Clip_youtube_python] | | **Multimodal CLIP: Youtube Videos 📹👀** | Search **Youtube videos** using Multimodal CLIP, finding relevant content with ease and accuracy! 🎯 | [![Github](../../assets/github.svg)][Clip_youtube_github] <br>[![Open In Collab](../../assets/colab.svg)][Clip_youtube_colab] <br> [![Python](../../assets/python.svg)][Clip_youtube_python] <br>[![Ghost](../../assets/ghost.svg)][Clip_youtube_python] |
| **Multimodal Image + Text Search 📸🔍** | Find **relevant documents** and **images** with a single query using **LanceDB's** multimodal search capabilities, to seamlessly integrate text and visuals ! 🌉 | [![GitHub](../../assets/github.svg)](https://github.com/lancedb/vectordb-recipes/tree/main/examples/archived_examples/multimodal_search) <br>[![Open In Collab](../../assets/colab.svg)](https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/archived_examples/multimodal_search/main.ipynb) <br> [![Python](../../assets/python.svg)](https://github.com/lancedb/vectordb-recipes/blob/main/examples/multimodal_search/main.py)<br> [![Ghost](../../assets/ghost.svg)](https://blog.lancedb.com/multi-modal-ai-made-easy-with-lancedb-clip-5aaf8801c939/) | | **Multimodal Image + Text Search 📸🔍** | Find **relevant documents** and **images** with a single query using **LanceDB's** multimodal search capabilities, to seamlessly integrate text and visuals ! 🌉 | [![GitHub](../../assets/github.svg)](https://github.com/lancedb/vectordb-recipes/blob/main/examples/multimodal_search) <br>[![Open In Collab](../../assets/colab.svg)](https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/multimodal_search/main.ipynb) <br> [![Python](../../assets/python.svg)](https://github.com/lancedb/vectordb-recipes/blob/main/examples/multimodal_search/main.py)<br> [![Ghost](../../assets/ghost.svg)](https://blog.lancedb.com/multi-modal-ai-made-easy-with-lancedb-clip-5aaf8801c939/) |
| **Cambrian-1: Vision-Centric Image Exploration 🔍👀** | Learn how **Cambrian-1** works, using an example of **Vision-Centric** exploration on images found through vector search ! Work on **Flickr-8k** dataset 🔎 | [![Kaggle](https://img.shields.io/badge/Kaggle-035a7d?style=for-the-badge&logo=kaggle&logoColor=white)](https://www.kaggle.com/code/prasantdixit/cambrian-1-vision-centric-exploration-of-images/)<br> [![Ghost](../../assets/ghost.svg)](https://blog.lancedb.com/cambrian-1-vision-centric-exploration/) | | **Cambrian-1: Vision-Centric Image Exploration 🔍👀** | Learn how **Cambrian-1** works, using an example of **Vision-Centric** exploration on images found through vector search ! Work on **Flickr-8k** dataset 🔎 | [![Kaggle](https://img.shields.io/badge/Kaggle-035a7d?style=for-the-badge&logo=kaggle&logoColor=white)](https://www.kaggle.com/code/prasantdixit/cambrian-1-vision-centric-exploration-of-images/)<br> [![Ghost](../../assets/ghost.svg)](https://blog.lancedb.com/cambrian-1-vision-centric-exploration/) |

View File

@@ -70,12 +70,12 @@ Build RAG (Retrieval-Augmented Generation) with LanceDB, a powerful solution fo
[flare_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/better-rag-FLAIR/main.ipynb [flare_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/better-rag-FLAIR/main.ipynb
[flare_ghost]: https://blog.lancedb.com/better-rag-with-active-retrieval-augmented-generation-flare-3b66646e2a9f/ [flare_ghost]: https://blog.lancedb.com/better-rag-with-active-retrieval-augmented-generation-flare-3b66646e2a9f/
[query_github]: https://github.com/lancedb/vectordb-recipes/tree/main/examples/archived_examples/QueryExpansion%26Reranker [query_github]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/QueryExpansion&Reranker
[query_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/archived_examples/QueryExpansion&Reranker/main.ipynb [query_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/QueryExpansion&Reranker/main.ipynb
[fusion_github]: https://github.com/lancedb/vectordb-recipes/tree/main/examples/archived_examples/RAG_Fusion [fusion_github]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/RAG_Fusion
[fusion_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/archived_examples/RAG_Fusion/main.ipynb [fusion_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/RAG_Fusion/main.ipynb
[agentic_github]: https://github.com/lancedb/vectordb-recipes/blob/main/tutorials/Agentic_RAG [agentic_github]: https://github.com/lancedb/vectordb-recipes/blob/main/tutorials/Agentic_RAG
[agentic_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/tutorials/Agentic_RAG/main.ipynb [agentic_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/tutorials/Agentic_RAG/main.ipynb

View File

@@ -19,8 +19,8 @@ Deliver personalized experiences with Recommender Systems. 🎁
[movie_python]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/movie-recommender/main.py [movie_python]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/movie-recommender/main.py
[genre_github]: https://github.com/lancedb/vectordb-recipes/tree/main/examples/archived_examples/movie-recommendation-with-genres [genre_github]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/movie-recommendation-with-genres
[genre_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/archived_examples/movie-recommendation-with-genres/movie_recommendation_with_doc2vec_and_lancedb.ipynb [genre_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/movie-recommendation-with-genres/movie_recommendation_with_doc2vec_and_lancedb.ipynb
[genre_ghost]: https://blog.lancedb.com/movie-recommendation-system-using-lancedb-and-doc2vec/ [genre_ghost]: https://blog.lancedb.com/movie-recommendation-system-using-lancedb-and-doc2vec/
[product_github]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/product-recommender [product_github]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/product-recommender
@@ -33,5 +33,5 @@ Deliver personalized experiences with Recommender Systems. 🎁
[arxiv_python]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/arxiv-recommender/main.py [arxiv_python]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/arxiv-recommender/main.py
[food_github]: https://github.com/lancedb/vectordb-recipes/tree/main/examples/archived_examples/Food_recommendation [food_github]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/Food_recommendation
[food_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/archived_examples/Food_recommendation/main.ipynb [food_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/Food_recommendation/main.ipynb

View File

@@ -37,16 +37,16 @@ LanceDB implements vector search algorithms for efficient document retrieval and
[NER_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/tutorials/NER-powered-Semantic-Search/NER_powered_Semantic_Search_with_LanceDB.ipynb [NER_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/tutorials/NER-powered-Semantic-Search/NER_powered_Semantic_Search_with_LanceDB.ipynb
[NER_ghost]: https://blog.lancedb.com/ner-powered-semantic-search-using-lancedb-51051dc3e493 [NER_ghost]: https://blog.lancedb.com/ner-powered-semantic-search-using-lancedb-51051dc3e493
[audio_search_github]: https://github.com/lancedb/vectordb-recipes/tree/main/examples/archived_examples/audio_search [audio_search_github]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/audio_search
[audio_search_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/archived_examples/audio_search/main.ipynb [audio_search_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/audio_search/main.ipynb
[audio_search_python]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/archived_examples/audio_search/main.py [audio_search_python]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/audio_search/main.py
[mls_github]: https://github.com/lancedb/vectordb-recipes/tree/main/examples/archived_examples/multi-lingual-wiki-qa [mls_github]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/multi-lingual-wiki-qa
[mls_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/archived_examples/multi-lingual-wiki-qa/main.ipynb [mls_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/multi-lingual-wiki-qa/main.ipynb
[mls_python]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/archived_examples/multi-lingual-wiki-qa/main.py [mls_python]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/multi-lingual-wiki-qa/main.py
[fr_github]: https://github.com/lancedb/vectordb-recipes/tree/main/examples/archived_examples/facial_recognition [fr_github]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/facial_recognition
[fr_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/archived_examples/facial_recognition/main.ipynb [fr_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/facial_recognition/main.ipynb
[sentiment_analysis_github]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/Sentiment-Analysis-Analyse-Hotel-Reviews [sentiment_analysis_github]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/Sentiment-Analysis-Analyse-Hotel-Reviews
[sentiment_analysis_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/Sentiment-Analysis-Analyse-Hotel-Reviews/Sentiment_Analysis_using_LanceDB.ipynb [sentiment_analysis_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/Sentiment-Analysis-Analyse-Hotel-Reviews/Sentiment_Analysis_using_LanceDB.ipynb
@@ -70,8 +70,8 @@ LanceDB implements vector search algorithms for efficient document retrieval and
[openvino_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/Accelerate-Vector-Search-Applications-Using-OpenVINO/clip_text_image_search.ipynb [openvino_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/Accelerate-Vector-Search-Applications-Using-OpenVINO/clip_text_image_search.ipynb
[openvino_ghost]: https://blog.lancedb.com/accelerate-vector-search-applications-using-openvino-lancedb/ [openvino_ghost]: https://blog.lancedb.com/accelerate-vector-search-applications-using-openvino-lancedb/
[zsic_github]: https://github.com/lancedb/vectordb-recipes/tree/main/examples/archived_examples/zero-shot-image-classification [zsic_github]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/zero-shot-image-classification
[zsic_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/archived_examples/zero-shot-image-classification/main.ipynb [zsic_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/zero-shot-image-classification/main.ipynb
[zsic_ghost]: https://blog.lancedb.com/zero-shot-image-classification-with-vector-search/ [zsic_ghost]: https://blog.lancedb.com/zero-shot-image-classification-with-vector-search/

View File

@@ -1 +0,0 @@
!function(){var e,t,n;e="9627b71b382d201",t=function(){Reo.init({clientID:"9627b71b382d201"})},(n=document.createElement("script")).src="https://static.reo.dev/"+e+"/reo.js",n.defer=!0,n.onload=t,document.head.appendChild(n)}();

View File

@@ -1,29 +1,49 @@
# Full-text search (Native FTS) # Full-text search
LanceDB provides support for full-text search via Lance, allowing you to incorporate keyword-based search (based on BM25) in your retrieval solutions. LanceDB provides support for full-text search via Lance (before via [Tantivy](https://github.com/quickwit-oss/tantivy) (Python only)), allowing you to incorporate keyword-based search (based on BM25) in your retrieval solutions.
Currently, the Lance full text search is missing some features that are in the Tantivy full text search. This includes query parser and customizing the tokenizer. Thus, in Python, Tantivy is still the default way to do full text search and many of the instructions below apply just to Tantivy-based indices.
## Installation (Only for Tantivy-based FTS)
!!! note !!! note
The Python SDK uses tantivy-based FTS by default, need to pass `use_tantivy=False` to use native FTS. No need to install the tantivy dependency if using native FTS
To use full-text search, install the dependency [`tantivy-py`](https://github.com/quickwit-oss/tantivy-py):
```sh
# Say you want to use tantivy==0.20.1
pip install tantivy==0.20.1
```
## Example ## Example
Consider that we have a LanceDB table named `my_table`, whose string column `text` we want to index and query via keyword search, the FTS index must be created before you can search via keywords. Consider that we have a LanceDB table named `my_table`, whose string column `text` we want to index and query via keyword search, the FTS index must be created before you can search via keywords.
=== "Python" === "Python"
=== "Sync API"
```python ```python
--8<-- "python/python/tests/docs/test_search.py:import-lancedb" import lancedb
--8<-- "python/python/tests/docs/test_search.py:import-lancedb-fts"
--8<-- "python/python/tests/docs/test_search.py:basic_fts"
```
=== "Async API"
```python uri = "data/sample-lancedb"
--8<-- "python/python/tests/docs/test_search.py:import-lancedb" db = lancedb.connect(uri)
--8<-- "python/python/tests/docs/test_search.py:import-lancedb-fts"
--8<-- "python/python/tests/docs/test_search.py:basic_fts_async" table = db.create_table(
``` "my_table",
data=[
{"vector": [3.1, 4.1], "text": "Frodo was a happy puppy"},
{"vector": [5.9, 26.5], "text": "There are several kittens playing"},
],
)
# passing `use_tantivy=False` to use lance FTS index
# `use_tantivy=True` by default
table.create_fts_index("text")
table.search("puppy").limit(10).select(["text"]).to_list()
# [{'text': 'Frodo was a happy puppy', '_score': 0.6931471824645996}]
# ...
```
=== "TypeScript" === "TypeScript"
@@ -42,7 +62,7 @@ Consider that we have a LanceDB table named `my_table`, whose string column `tex
}); });
await tbl await tbl
.search("puppy", "fts") .search("puppy", queryType="fts")
.select(["text"]) .select(["text"])
.limit(10) .limit(10)
.toArray(); .toArray();
@@ -73,104 +93,58 @@ Consider that we have a LanceDB table named `my_table`, whose string column `tex
``` ```
It would search on all indexed columns by default, so it's useful when there are multiple indexed columns. It would search on all indexed columns by default, so it's useful when there are multiple indexed columns.
For now, this is supported in tantivy way only.
Passing `fts_columns="text"` if you want to specify the columns to search. Passing `fts_columns="text"` if you want to specify the columns to search, but it's not available for Tantivy-based full text search.
!!! note !!! note
LanceDB automatically searches on the existing FTS index if the input to the search is of type `str`. If you provide a vector as input, LanceDB will search the ANN index instead. LanceDB automatically searches on the existing FTS index if the input to the search is of type `str`. If you provide a vector as input, LanceDB will search the ANN index instead.
## Tokenization ## Tokenization
By default the text is tokenized by splitting on punctuation and whitespaces, and would filter out words that are with length greater than 40, and lowercase all words. By default the text is tokenized by splitting on punctuation and whitespaces and then removing tokens that are longer than 40 chars. For more language specific tokenization then provide the argument tokenizer_name with the 2 letter language code followed by "_stem". So for english it would be "en_stem".
Stemming is useful for improving search results by reducing words to their root form, e.g. "running" to "run". LanceDB supports stemming for multiple languages, you can specify the tokenizer name to enable stemming by the pattern `tokenizer_name="{language_code}_stem"`, e.g. `en_stem` for English. For now, only the Tantivy-based FTS index supports to specify the tokenizer, so it's only available in Python with `use_tantivy=True`.
For example, to enable stemming for English: === "use_tantivy=True"
=== "Sync API"
```python ```python
--8<-- "python/python/tests/docs/test_search.py:fts_config_stem" table.create_fts_index("text", use_tantivy=True, tokenizer_name="en_stem")
``` ```
=== "Async API"
```python === "use_tantivy=False"
--8<-- "python/python/tests/docs/test_search.py:fts_config_stem_async"
``` [**Not supported yet**](https://github.com/lancedb/lance/issues/1195)
the following [languages](https://docs.rs/tantivy/latest/tantivy/tokenizer/enum.Language.html) are currently supported. the following [languages](https://docs.rs/tantivy/latest/tantivy/tokenizer/enum.Language.html) are currently supported.
The tokenizer is customizable, you can specify how the tokenizer splits the text, and how it filters out words, etc. ## Index multiple columns
For example, for language with accents, you can specify the tokenizer to use `ascii_folding` to remove accents, e.g. 'é' to 'e': If you have multiple string columns to index, there's no need to combine them manually -- simply pass them all as a list to `create_fts_index`:
=== "Sync API"
=== "use_tantivy=True"
```python ```python
--8<-- "python/python/tests/docs/test_search.py:fts_config_folding" table.create_fts_index(["text1", "text2"])
``` ```
=== "Async API"
```python === "use_tantivy=False"
--8<-- "python/python/tests/docs/test_search.py:fts_config_folding_async"
``` [**Not supported yet**](https://github.com/lancedb/lance/issues/1195)
Note that the search API call does not change - you can search over all indexed columns at once.
## Filtering ## Filtering
LanceDB full text search supports to filter the search results by a condition, both pre-filtering and post-filtering are supported. Currently the LanceDB full text search feature supports *post-filtering*, meaning filters are
applied on top of the full text search results. This can be invoked via the familiar
`where` syntax:
This can be invoked via the familiar `where` syntax.
With pre-filtering:
=== "Python" === "Python"
=== "Sync API" ```python
table.search("puppy").limit(10).where("meta='foo'").to_list()
```python
--8<-- "python/python/tests/docs/test_search.py:fts_prefiltering"
```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_search.py:fts_prefiltering_async"
```
=== "TypeScript"
```typescript
await tbl
.search("puppy")
.select(["id", "doc"])
.limit(10)
.where("meta='foo'")
.prefilter(true)
.toArray();
``` ```
=== "Rust"
```rust
table
.query()
.full_text_search(FullTextSearchQuery::new("puppy".to_owned()))
.select(lancedb::query::Select::Columns(vec!["doc".to_owned()]))
.limit(10)
.only_if("meta='foo'")
.execute()
.await?;
```
With post-filtering:
=== "Python"
=== "Sync API"
```python
--8<-- "python/python/tests/docs/test_search.py:fts_postfiltering"
```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_search.py:fts_postfiltering_async"
```
=== "TypeScript" === "TypeScript"
```typescript ```typescript
@@ -179,7 +153,6 @@ With post-filtering:
.select(["id", "doc"]) .select(["id", "doc"])
.limit(10) .limit(10)
.where("meta='foo'") .where("meta='foo'")
.prefilter(false)
.toArray(); .toArray();
``` ```
@@ -190,69 +163,104 @@ With post-filtering:
.query() .query()
.full_text_search(FullTextSearchQuery::new(words[0].to_owned())) .full_text_search(FullTextSearchQuery::new(words[0].to_owned()))
.select(lancedb::query::Select::Columns(vec!["doc".to_owned()])) .select(lancedb::query::Select::Columns(vec!["doc".to_owned()]))
.postfilter()
.limit(10) .limit(10)
.only_if("meta='foo'") .only_if("meta='foo'")
.execute() .execute()
.await?; .await?;
``` ```
## Sorting
!!! warning "Warn"
Sorting is available for only Tantivy-based FTS
You can pre-sort the documents by specifying `ordering_field_names` when
creating the full-text search index. Once pre-sorted, you can then specify
`ordering_field_name` while searching to return results sorted by the given
field. For example,
```python
table.create_fts_index(["text_field"], use_tantivy=True, ordering_field_names=["sort_by_field"])
(table.search("terms", ordering_field_name="sort_by_field")
.limit(20)
.to_list())
```
!!! note
If you wish to specify an ordering field at query time, you must also
have specified it during indexing time. Otherwise at query time, an
error will be raised that looks like `ValueError: The field does not exist: xxx`
!!! note
The fields to sort on must be of typed unsigned integer, or else you will see
an error during indexing that looks like
`TypeError: argument 'value': 'float' object cannot be interpreted as an integer`.
!!! note
You can specify multiple fields for ordering at indexing time.
But at query time only one ordering field is supported.
## Phrase queries vs. terms queries ## Phrase queries vs. terms queries
!!! warning "Warn" !!! warning "Warn"
Lance-based FTS doesn't support queries using boolean operators `OR`, `AND`. Lance-based FTS doesn't support queries using boolean operators `OR`, `AND`.
For full-text search you can specify either a **phrase** query like `"the old man and the sea"`, For full-text search you can specify either a **phrase** query like `"the old man and the sea"`,
or a **terms** search query like `old man sea`. For more details on the terms or a **terms** search query like `"(Old AND Man) AND Sea"`. For more details on the terms
query syntax, see Tantivy's [query parser rules](https://docs.rs/tantivy/latest/tantivy/query/struct.QueryParser.html). query syntax, see Tantivy's [query parser rules](https://docs.rs/tantivy/latest/tantivy/query/struct.QueryParser.html).
To search for a phrase, the index must be created with `with_position=True`: !!! tip "Note"
=== "Sync API" The query parser will raise an exception on queries that are ambiguous. For example, in the query `they could have been dogs OR cats`, `OR` is capitalized so it's considered a keyword query operator. But it's ambiguous how the left part should be treated. So if you submit this search query as is, you'll get `Syntax Error: they could have been dogs OR cats`.
```python ```py
--8<-- "python/python/tests/docs/test_search.py:fts_with_position" # This raises a syntax error
``` table.search("they could have been dogs OR cats")
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_search.py:fts_with_position_async"
```
This will allow you to search for phrases, but it will also significantly increase the index size and indexing time.
## Incremental indexing
LanceDB supports incremental indexing, which means you can add new records to the table without reindexing the entire table.
This can make the query more efficient, especially when the table is large and the new records are relatively small.
=== "Python"
=== "Sync API"
```python
--8<-- "python/python/tests/docs/test_search.py:fts_incremental_index"
```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_search.py:fts_incremental_index_async"
```
=== "TypeScript"
```typescript
await tbl.add([{ vector: [3.1, 4.1], text: "Frodo was a happy puppy" }]);
await tbl.optimize();
``` ```
=== "Rust" On the other hand, lowercasing `OR` to `or` will work, because there are no capitalized logical operators and
the query is treated as a phrase query.
```rust ```py
let more_data: Box<dyn RecordBatchReader + Send> = create_some_records()?; # This works!
tbl.add(more_data).execute().await?; table.search("they could have been dogs or cats")
tbl.optimize(OptimizeAction::All).execute().await?;
``` ```
!!! note
New data added after creating the FTS index will appear in search results while incremental index is still progress, but with increased latency due to a flat search on the unindexed portion. LanceDB Cloud automates this merging process, minimizing the impact on search speed. It can be cumbersome to have to remember what will cause a syntax error depending on the type of
query you want to perform. To make this simpler, when you want to perform a phrase query, you can
enforce it in one of two ways:
1. Place the double-quoted query inside single quotes. For example, `table.search('"they could have been dogs OR cats"')` is treated as
a phrase query.
1. Explicitly declare the `phrase_query()` method. This is useful when you have a phrase query that
itself contains double quotes. For example, `table.search('the cats OR dogs were not really "pets" at all').phrase_query()`
is treated as a phrase query.
In general, a query that's declared as a phrase query will be wrapped in double quotes during parsing, with nested
double quotes replaced by single quotes.
## Configurations (Only for Tantivy-based FTS)
By default, LanceDB configures a 1GB heap size limit for creating the index. You can
reduce this if running on a smaller node, or increase this for faster performance while
indexing a larger corpus.
```python
# configure a 512MB heap size
heap = 1024 * 1024 * 512
table.create_fts_index(["text1", "text2"], writer_heap_size=heap, replace=True)
```
## Current limitations
For that Tantivy-based FTS:
1. Currently we do not yet support incremental writes.
If you add data after FTS index creation, it won't be reflected
in search results until you do a full reindex.
2. We currently only support local filesystem paths for the FTS index.
This is a tantivy limitation. We've implemented an object store plugin
but there's no way in tantivy-py to specify to use it.

View File

@@ -1,160 +0,0 @@
# Full-text search (Tantivy-based FTS)
LanceDB also provides support for full-text search via [Tantivy](https://github.com/quickwit-oss/tantivy), allowing you to incorporate keyword-based search (based on BM25) in your retrieval solutions.
The tantivy-based FTS is only available in Python synchronous APIs and does not support building indexes on object storage or incremental indexing. If you need these features, try native FTS [native FTS](fts.md).
## Installation
To use full-text search, install the dependency [`tantivy-py`](https://github.com/quickwit-oss/tantivy-py):
```sh
# Say you want to use tantivy==0.20.1
pip install tantivy==0.20.1
```
## Example
Consider that we have a LanceDB table named `my_table`, whose string column `content` we want to index and query via keyword search, the FTS index must be created before you can search via keywords.
```python
import lancedb
uri = "data/sample-lancedb"
db = lancedb.connect(uri)
table = db.create_table(
"my_table",
data=[
{"id": 1, "vector": [3.1, 4.1], "title": "happy puppy", "content": "Frodo was a happy puppy", "meta": "foo"},
{"id": 2, "vector": [5.9, 26.5], "title": "playing kittens", "content": "There are several kittens playing around the puppy", "meta": "bar"},
],
)
# passing `use_tantivy=False` to use lance FTS index
# `use_tantivy=True` by default
table.create_fts_index("content", use_tantivy=True)
table.search("puppy").limit(10).select(["content"]).to_list()
# [{'text': 'Frodo was a happy puppy', '_score': 0.6931471824645996}]
# ...
```
It would search on all indexed columns by default, so it's useful when there are multiple indexed columns.
!!! note
LanceDB automatically searches on the existing FTS index if the input to the search is of type `str`. If you provide a vector as input, LanceDB will search the ANN index instead.
## Tokenization
By default the text is tokenized by splitting on punctuation and whitespaces and then removing tokens that are longer than 40 chars. For more language specific tokenization then provide the argument tokenizer_name with the 2 letter language code followed by "_stem". So for english it would be "en_stem".
```python
table.create_fts_index("content", use_tantivy=True, tokenizer_name="en_stem", replace=True)
```
the following [languages](https://docs.rs/tantivy/latest/tantivy/tokenizer/enum.Language.html) are currently supported.
## Index multiple columns
If you have multiple string columns to index, there's no need to combine them manually -- simply pass them all as a list to `create_fts_index`:
```python
table.create_fts_index(["title", "content"], use_tantivy=True, replace=True)
```
Note that the search API call does not change - you can search over all indexed columns at once.
## Filtering
Currently the LanceDB full text search feature supports *post-filtering*, meaning filters are
applied on top of the full text search results (see [native FTS](fts.md) if you need pre-filtering). This can be invoked via the familiar
`where` syntax:
```python
table.search("puppy").limit(10).where("meta='foo'").to_list()
```
## Sorting
You can pre-sort the documents by specifying `ordering_field_names` when
creating the full-text search index. Once pre-sorted, you can then specify
`ordering_field_name` while searching to return results sorted by the given
field. For example,
```python
table.create_fts_index(["content"], use_tantivy=True, ordering_field_names=["id"], replace=True)
(table.search("puppy", ordering_field_name="id")
.limit(20)
.to_list())
```
!!! note
If you wish to specify an ordering field at query time, you must also
have specified it during indexing time. Otherwise at query time, an
error will be raised that looks like `ValueError: The field does not exist: xxx`
!!! note
The fields to sort on must be of typed unsigned integer, or else you will see
an error during indexing that looks like
`TypeError: argument 'value': 'float' object cannot be interpreted as an integer`.
!!! note
You can specify multiple fields for ordering at indexing time.
But at query time only one ordering field is supported.
## Phrase queries vs. terms queries
For full-text search you can specify either a **phrase** query like `"the old man and the sea"`,
or a **terms** search query like `"(Old AND Man) AND Sea"`. For more details on the terms
query syntax, see Tantivy's [query parser rules](https://docs.rs/tantivy/latest/tantivy/query/struct.QueryParser.html).
!!! tip "Note"
The query parser will raise an exception on queries that are ambiguous. For example, in the query `they could have been dogs OR cats`, `OR` is capitalized so it's considered a keyword query operator. But it's ambiguous how the left part should be treated. So if you submit this search query as is, you'll get `Syntax Error: they could have been dogs OR cats`.
```py
# This raises a syntax error
table.search("they could have been dogs OR cats")
```
On the other hand, lowercasing `OR` to `or` will work, because there are no capitalized logical operators and
the query is treated as a phrase query.
```py
# This works!
table.search("they could have been dogs or cats")
```
It can be cumbersome to have to remember what will cause a syntax error depending on the type of
query you want to perform. To make this simpler, when you want to perform a phrase query, you can
enforce it in one of two ways:
1. Place the double-quoted query inside single quotes. For example, `table.search('"they could have been dogs OR cats"')` is treated as
a phrase query.
1. Explicitly declare the `phrase_query()` method. This is useful when you have a phrase query that
itself contains double quotes. For example, `table.search('the cats OR dogs were not really "pets" at all').phrase_query()`
is treated as a phrase query.
In general, a query that's declared as a phrase query will be wrapped in double quotes during parsing, with nested
double quotes replaced by single quotes.
## Configurations
By default, LanceDB configures a 1GB heap size limit for creating the index. You can
reduce this if running on a smaller node, or increase this for faster performance while
indexing a larger corpus.
```python
# configure a 512MB heap size
heap = 1024 * 1024 * 512
table.create_fts_index(["title", "content"], use_tantivy=True, writer_heap_size=heap, replace=True)
```
## Current limitations
1. New data added after creating the FTS index will appear in search results, but with increased latency due to a flat search on the unindexed portion. Re-indexing with `create_fts_index` will reduce latency. LanceDB Cloud automates this merging process, minimizing the impact on search speed.
2. We currently only support local filesystem paths for the FTS index.
This is a tantivy limitation. We've implemented an object store plugin
but there's no way in tantivy-py to specify to use it.

View File

@@ -1,51 +1,38 @@
# Building a Scalar Index # Building Scalar Index
Scalar indices organize data by scalar attributes (e.g. numbers, categorical values), enabling fast filtering of vector data. In vector databases, scalar indices accelerate the retrieval of scalar data associated with vectors, thus enhancing the query performance when searching for vectors that meet certain scalar criteria. Similar to many SQL databases, LanceDB supports several types of Scalar indices to accelerate search
Similar to many SQL databases, LanceDB supports several types of scalar indices to accelerate search
over scalar columns. over scalar columns.
- `BTREE`: The most common type is BTREE. The index stores a copy of the - `BTREE`: The most common type is BTREE. This index is inspired by the btree data structure
column in sorted order. This sorted copy allows a binary search to be used to although only the first few layers of the btree are cached in memory.
satisfy queries. It will perform well on columns with a large number of unique values and few rows per value.
- `BITMAP`: this index stores a bitmap for each unique value in the column. It - `BITMAP`: this index stores a bitmap for each unique value in the column.
uses a series of bits to indicate whether a value is present in a row of a table This index is useful for columns with a finite number of unique values and many rows per value.
- `LABEL_LIST`: a special index that can be used on `List<T>` columns to For example, columns that represent "categories", "labels", or "tags"
support queries with `array_contains_all` and `array_contains_any` - `LABEL_LIST`: a special index that is used to index list columns whose values have a finite set of possibilities.
using an underlying bitmap index.
For example, a column that contains lists of tags (e.g. `["tag1", "tag2", "tag3"]`) can be indexed with a `LABEL_LIST` index. For example, a column that contains lists of tags (e.g. `["tag1", "tag2", "tag3"]`) can be indexed with a `LABEL_LIST` index.
!!! tips "How to choose the right scalar index type"
`BTREE`: This index is good for scalar columns with mostly distinct values and does best when the query is highly selective.
`BITMAP`: This index works best for low-cardinality numeric or string columns, where the number of unique values is small (i.e., less than a few thousands).
`LABEL_LIST`: This index should be used for columns containing list-type data.
| Data Type | Filter | Index Type | | Data Type | Filter | Index Type |
| --------------------------------------------------------------- | ----------------------------------------- | ------------ | | --------------------------------------------------------------- | ----------------------------------------- | ------------ |
| Numeric, String, Temporal | `<`, `=`, `>`, `in`, `between`, `is null` | `BTREE` | | Numeric, String, Temporal | `<`, `=`, `>`, `in`, `between`, `is null` | `BTREE` |
| Boolean, numbers or strings with fewer than 1,000 unique values | `<`, `=`, `>`, `in`, `between`, `is null` | `BITMAP` | | Boolean, numbers or strings with fewer than 1,000 unique values | `<`, `=`, `>`, `in`, `between`, `is null` | `BITMAP` |
| List of low cardinality of numbers or strings | `array_has_any`, `array_has_all` | `LABEL_LIST` | | List of low cardinality of numbers or strings | `array_has_any`, `array_has_all` | `LABEL_LIST` |
### Create a scalar index
=== "Python" === "Python"
=== "Sync API" ```python
import lancedb
books = [
{"book_id": 1, "publisher": "plenty of books", "tags": ["fantasy", "adventure"]},
{"book_id": 2, "publisher": "book town", "tags": ["non-fiction"]},
{"book_id": 3, "publisher": "oreilly", "tags": ["textbook"]}
]
```python db = lancedb.connect("./db")
--8<-- "python/python/tests/docs/test_guide_index.py:import-lancedb" table = db.create_table("books", books)
--8<-- "python/python/tests/docs/test_guide_index.py:import-lancedb-btree-bitmap" table.create_scalar_index("book_id") # BTree by default
--8<-- "python/python/tests/docs/test_guide_index.py:basic_scalar_index" table.create_scalar_index("publisher", index_type="BITMAP")
``` ```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_guide_index.py:import-lancedb"
--8<-- "python/python/tests/docs/test_guide_index.py:import-lancedb-btree-bitmap"
--8<-- "python/python/tests/docs/test_guide_index.py:basic_scalar_index_async"
```
=== "Typescript" === "Typescript"
@@ -59,22 +46,16 @@ over scalar columns.
await tlb.create_index("publisher", { config: lancedb.Index.bitmap() }) await tlb.create_index("publisher", { config: lancedb.Index.bitmap() })
``` ```
The following scan will be faster if the column `book_id` has a scalar index: For example, the following scan will be faster if the column `my_col` has a scalar index:
=== "Python" === "Python"
=== "Sync API" ```python
import lancedb
```python table = db.open_table("books")
--8<-- "python/python/tests/docs/test_guide_index.py:import-lancedb" my_df = table.search().where("book_id = 2").to_pandas()
--8<-- "python/python/tests/docs/test_guide_index.py:search_with_scalar_index" ```
```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_guide_index.py:import-lancedb"
--8<-- "python/python/tests/docs/test_guide_index.py:search_with_scalar_index_async"
```
=== "Typescript" === "Typescript"
@@ -95,18 +76,22 @@ Scalar indices can also speed up scans containing a vector search or full text s
=== "Python" === "Python"
=== "Sync API" ```python
import lancedb
```python data = [
--8<-- "python/python/tests/docs/test_guide_index.py:import-lancedb" {"book_id": 1, "vector": [1, 2]},
--8<-- "python/python/tests/docs/test_guide_index.py:vector_search_with_scalar_index" {"book_id": 2, "vector": [3, 4]},
``` {"book_id": 3, "vector": [5, 6]}
=== "Async API" ]
table = db.create_table("book_with_embeddings", data)
```python (
--8<-- "python/python/tests/docs/test_guide_index.py:import-lancedb" table.search([1, 2])
--8<-- "python/python/tests/docs/test_guide_index.py:vector_search_with_scalar_index_async" .where("book_id != 3", prefilter=True)
``` .to_pandas()
)
```
=== "Typescript" === "Typescript"
@@ -121,36 +106,3 @@ Scalar indices can also speed up scans containing a vector search or full text s
.limit(10) .limit(10)
.toArray(); .toArray();
``` ```
### Update a scalar index
Updating the table data (adding, deleting, or modifying records) requires that you also update the scalar index. This can be done by calling `optimize`, which will trigger an update to the existing scalar index.
=== "Python"
=== "Sync API"
```python
--8<-- "python/python/tests/docs/test_guide_index.py:update_scalar_index"
```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_guide_index.py:update_scalar_index_async"
```
=== "TypeScript"
```typescript
await tbl.add([{ vector: [7, 8], book_id: 4 }]);
await tbl.optimize();
```
=== "Rust"
```rust
let more_data: Box<dyn RecordBatchReader + Send> = create_some_records()?;
tbl.add(more_data).execute().await?;
tbl.optimize(OptimizeAction::All).execute().await?;
```
!!! note
New data added after creating the scalar index will still appear in search results if optimize is not used, but with increased latency due to a flat search on the unindexed portion. LanceDB Cloud automates the optimize process, minimizing the impact on search speed.

View File

@@ -12,52 +12,25 @@ LanceDB OSS supports object stores such as AWS S3 (and compatible stores), Azure
=== "Python" === "Python"
AWS S3: AWS S3:
=== "Sync API"
```python ```python
import lancedb import lancedb
db = lancedb.connect("s3://bucket/path") db = lancedb.connect("s3://bucket/path")
``` ```
=== "Async API"
```python
import lancedb
async_db = await lancedb.connect_async("s3://bucket/path")
```
Google Cloud Storage: Google Cloud Storage:
=== "Sync API" ```python
import lancedb
```python db = lancedb.connect("gs://bucket/path")
import lancedb ```
db = lancedb.connect("gs://bucket/path")
```
=== "Async API"
```python
import lancedb
async_db = await lancedb.connect_async("gs://bucket/path")
```
Azure Blob Storage: Azure Blob Storage:
<!-- skip-test --> ```python
=== "Sync API" import lancedb
db = lancedb.connect("az://bucket/path")
```python ```
import lancedb
db = lancedb.connect("az://bucket/path")
```
<!-- skip-test -->
=== "Async API"
```python
import lancedb
async_db = await lancedb.connect_async("az://bucket/path")
```
Note that for Azure, storage credentials must be configured. See [below](#azure-blob-storage) for more details.
=== "TypeScript" === "TypeScript"
@@ -114,28 +87,22 @@ In most cases, when running in the respective cloud and permissions are set up c
export TIMEOUT=60s export TIMEOUT=60s
``` ```
!!! note "`storage_options` availability"
The `storage_options` parameter is only available in Python *async* API and JavaScript API.
It is not yet supported in the Python synchronous API.
If you only want this to apply to one particular connection, you can pass the `storage_options` argument when opening the connection: If you only want this to apply to one particular connection, you can pass the `storage_options` argument when opening the connection:
=== "Python" === "Python"
=== "Sync API" ```python
import lancedb
```python db = await lancedb.connect_async(
import lancedb "s3://bucket/path",
db = lancedb.connect( storage_options={"timeout": "60s"}
"s3://bucket/path", )
storage_options={"timeout": "60s"} ```
)
```
=== "Async API"
```python
import lancedb
async_db = await lancedb.connect_async(
"s3://bucket/path",
storage_options={"timeout": "60s"}
)
```
=== "TypeScript" === "TypeScript"
@@ -163,29 +130,15 @@ Getting even more specific, you can set the `timeout` for only a particular tabl
=== "Python" === "Python"
<!-- skip-test --> <!-- skip-test -->
=== "Sync API" ```python
import lancedb
```python db = await lancedb.connect_async("s3://bucket/path")
import lancedb table = await db.create_table(
db = lancedb.connect("s3://bucket/path") "table",
table = db.create_table( [{"a": 1, "b": 2}],
"table", storage_options={"timeout": "60s"}
[{"a": 1, "b": 2}], )
storage_options={"timeout": "60s"} ```
)
```
<!-- skip-test -->
=== "Async API"
```python
import lancedb
async_db = await lancedb.connect_async("s3://bucket/path")
async_table = await async_db.create_table(
"table",
[{"a": 1, "b": 2}],
storage_options={"timeout": "60s"}
)
```
=== "TypeScript" === "TypeScript"
@@ -243,32 +196,17 @@ These can be set as environment variables or passed in the `storage_options` par
=== "Python" === "Python"
=== "Sync API" ```python
import lancedb
```python db = await lancedb.connect_async(
import lancedb "s3://bucket/path",
db = lancedb.connect( storage_options={
"s3://bucket/path", "aws_access_key_id": "my-access-key",
storage_options={ "aws_secret_access_key": "my-secret-key",
"aws_access_key_id": "my-access-key", "aws_session_token": "my-session-token",
"aws_secret_access_key": "my-secret-key", }
"aws_session_token": "my-session-token", )
} ```
)
```
=== "Async API"
```python
import lancedb
async_db = await lancedb.connect_async(
"s3://bucket/path",
storage_options={
"aws_access_key_id": "my-access-key",
"aws_secret_access_key": "my-secret-key",
"aws_session_token": "my-session-token",
}
)
```
=== "TypeScript" === "TypeScript"
@@ -412,22 +350,12 @@ name of the table to use.
=== "Python" === "Python"
=== "Sync API" ```python
import lancedb
```python db = await lancedb.connect_async(
import lancedb "s3+ddb://bucket/path?ddbTableName=my-dynamodb-table",
db = lancedb.connect( )
"s3+ddb://bucket/path?ddbTableName=my-dynamodb-table", ```
)
```
=== "Async API"
```python
import lancedb
async_db = await lancedb.connect_async(
"s3+ddb://bucket/path?ddbTableName=my-dynamodb-table",
)
```
=== "JavaScript" === "JavaScript"
@@ -515,30 +443,16 @@ LanceDB can also connect to S3-compatible stores, such as MinIO. To do so, you m
=== "Python" === "Python"
=== "Sync API" ```python
import lancedb
```python db = await lancedb.connect_async(
import lancedb "s3://bucket/path",
db = lancedb.connect( storage_options={
"s3://bucket/path", "region": "us-east-1",
storage_options={ "endpoint": "http://minio:9000",
"region": "us-east-1", }
"endpoint": "http://minio:9000", )
} ```
)
```
=== "Async API"
```python
import lancedb
async_db = await lancedb.connect_async(
"s3://bucket/path",
storage_options={
"region": "us-east-1",
"endpoint": "http://minio:9000",
}
)
```
=== "TypeScript" === "TypeScript"
@@ -584,36 +498,22 @@ This can also be done with the ``AWS_ENDPOINT`` and ``AWS_DEFAULT_REGION`` envir
#### S3 Express #### S3 Express
LanceDB supports [S3 Express One Zone](https://aws.amazon.com/s3/storage-classes/express-one-zone/) endpoints, but requires additional infrastructure configuration for the compute service, such as EC2 or Lambda. Please refer to [Networking requirements for S3 Express One Zone](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html). LanceDB supports [S3 Express One Zone](https://aws.amazon.com/s3/storage-classes/express-one-zone/) endpoints, but requires additional configuration. Also, S3 Express endpoints only support connecting from an EC2 instance within the same region.
To configure LanceDB to use an S3 Express endpoint, you must set the storage option `s3_express`. The bucket name in your table URI should **include the suffix**. To configure LanceDB to use an S3 Express endpoint, you must set the storage option `s3_express`. The bucket name in your table URI should **include the suffix**.
=== "Python" === "Python"
=== "Sync API" ```python
import lancedb
```python db = await lancedb.connect_async(
import lancedb "s3://my-bucket--use1-az4--x-s3/path",
db = lancedb.connect( storage_options={
"s3://my-bucket--use1-az4--x-s3/path", "region": "us-east-1",
storage_options={ "s3_express": "true",
"region": "us-east-1", }
"s3_express": "true", )
} ```
)
```
=== "Async API"
```python
import lancedb
async_db = await lancedb.connect_async(
"s3://my-bucket--use1-az4--x-s3/path",
storage_options={
"region": "us-east-1",
"s3_express": "true",
}
)
```
=== "TypeScript" === "TypeScript"
@@ -654,29 +554,15 @@ GCS credentials are configured by setting the `GOOGLE_SERVICE_ACCOUNT` environme
=== "Python" === "Python"
<!-- skip-test --> <!-- skip-test -->
=== "Sync API" ```python
import lancedb
```python db = await lancedb.connect_async(
import lancedb "gs://my-bucket/my-database",
db = lancedb.connect( storage_options={
"gs://my-bucket/my-database", "service_account": "path/to/service-account.json",
storage_options={ }
"service_account": "path/to/service-account.json", )
} ```
)
```
<!-- skip-test -->
=== "Async API"
```python
import lancedb
async_db = await lancedb.connect_async(
"gs://my-bucket/my-database",
storage_options={
"service_account": "path/to/service-account.json",
}
)
```
=== "TypeScript" === "TypeScript"
@@ -728,31 +614,16 @@ Azure Blob Storage credentials can be configured by setting the `AZURE_STORAGE_A
=== "Python" === "Python"
<!-- skip-test --> <!-- skip-test -->
=== "Sync API" ```python
import lancedb
```python db = await lancedb.connect_async(
import lancedb "az://my-container/my-database",
db = lancedb.connect( storage_options={
"az://my-container/my-database", account_name: "some-account",
storage_options={ account_key: "some-key",
account_name: "some-account", }
account_key: "some-key", )
} ```
)
```
<!-- skip-test -->
=== "Async API"
```python
import lancedb
async_db = await lancedb.connect_async(
"az://my-container/my-database",
storage_options={
account_name: "some-account",
account_key: "some-key",
}
)
```
=== "TypeScript" === "TypeScript"

File diff suppressed because it is too large Load Diff

View File

@@ -1,135 +0,0 @@
The merge insert command is a flexible API that can be used to perform:
1. Upsert
2. Insert-if-not-exists
3. Replace range
It works by joining the input data with the target table on a key you provide.
Often this key is a unique row id key. You can then specify what to do when
there is a match and when there is not a match. For example, for upsert you want
to update if the row has a match and insert if the row doesn't have a match.
Whereas for insert-if-not-exists you only want to insert if the row doesn't have
a match.
You can also read more in the API reference:
* Python
* Sync: [lancedb.table.Table.merge_insert][]
* Async: [lancedb.table.AsyncTable.merge_insert][]
* Typescript: [lancedb.Table.mergeInsert](../../js/classes/Table.md/#mergeinsert)
!!! tip "Use scalar indices to speed up merge insert"
The merge insert command needs to perform a join between the input data and the
target table on the `on` key you provide. This requires scanning that entire
column, which can be expensive for large tables. To speed up this operation,
you can create a scalar index on the `on` column, which will allow LanceDB to
find matches without having to scan the whole tables.
Read more about scalar indices in [Building a Scalar Index](../scalar_index.md)
guide.
!!! info "Embedding Functions"
Like the create table and add APIs, the merge insert API will automatically
compute embeddings if the table has a embedding definition in its schema.
If the input data doesn't contain the source column, or the vector column
is already filled, then the embeddings won't be computed. See the
[Embedding Functions](../../embeddings/embedding_functions.md) guide for more
information.
## Upsert
Upsert updates rows if they exist and inserts them if they don't. To do this
with merge insert, enable both `when_matched_update_all()` and
`when_not_matched_insert_all()`.
=== "Python"
=== "Sync API"
```python
--8<-- "python/python/tests/docs/test_merge_insert.py:upsert_basic"
```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_merge_insert.py:upsert_basic_async"
```
=== "Typescript"
=== "@lancedb/lancedb"
```typescript
--8<-- "nodejs/examples/merge_insert.test.ts:upsert_basic"
```
!!! note "Providing subsets of columns"
If a column is nullable, it can be omitted from input data and it will be
considered `null`. Columns can also be provided in any order.
## Insert-if-not-exists
To avoid inserting duplicate rows, you can use the insert-if-not-exists command.
This will only insert rows that do not have a match in the target table. To do
this with merge insert, enable just `when_not_matched_insert_all()`.
=== "Python"
=== "Sync API"
```python
--8<-- "python/python/tests/docs/test_merge_insert.py:insert_if_not_exists"
```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_merge_insert.py:insert_if_not_exists_async"
```
=== "Typescript"
=== "@lancedb/lancedb"
```typescript
--8<-- "nodejs/examples/merge_insert.test.ts:insert_if_not_exists"
```
## Replace range
You can also replace a range of rows in the target table with the input data.
For example, if you have a table of document chunks, where each chunk has
both a `doc_id` and a `chunk_id`, you can replace all chunks for a given
`doc_id` with updated chunks. This can be tricky otherwise because if you
try to use upsert when the new data has fewer chunks you will end up with
extra chunks. To avoid this, add another clause to delete any chunks for
the document that are not in the new data, with
`when_not_matched_by_source_delete`.
=== "Python"
=== "Sync API"
```python
--8<-- "python/python/tests/docs/test_merge_insert.py:replace_range"
```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_merge_insert.py:replace_range_async"
```
=== "Typescript"
=== "@lancedb/lancedb"
```typescript
--8<-- "nodejs/examples/merge_insert.test.ts:replace_range"
```

View File

@@ -1,8 +1,8 @@
## Improving retriever performance ## Improving retriever performance
Try it yourself: <a href="https://colab.research.google.com/github/lancedb/lancedb/blob/main/docs/src/notebooks/lancedb_reranking.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a><br/> Try it yourself - <a href="https://colab.research.google.com/github/lancedb/lancedb/blob/main/docs/src/notebooks/lancedb_reranking.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a><br/>
VectorDBs are used as retrievers in recommender or chatbot-based systems for retrieving relevant data based on user queries. For example, retrievers are a critical component of Retrieval Augmented Generation (RAG) acrhitectures. In this section, we will discuss how to improve the performance of retrievers. VectorDBs are used as retreivers in recommender or chatbot-based systems for retrieving relevant data based on user queries. For example, retriever is a critical component of Retrieval Augmented Generation (RAG) acrhitectures. In this section, we will discuss how to improve the performance of retrievers.
There are serveral ways to improve the performance of retrievers. Some of the common techniques are: There are serveral ways to improve the performance of retrievers. Some of the common techniques are:
@@ -19,7 +19,7 @@ Using different embedding models is something that's very specific to the use ca
## The dataset ## The dataset
We'll be using a QA dataset generated using a LLama2 review paper. The dataset contains 221 query, context and answer triplets. The queries and answers are generated using GPT-4 based on a given query. Full script used to generate the dataset can be found on this [repo](https://github.com/lancedb/ragged). It can be downloaded from [here](https://github.com/AyushExel/assets/blob/main/data_qa.csv). We'll be using a QA dataset generated using a LLama2 review paper. The dataset contains 221 query, context and answer triplets. The queries and answers are generated using GPT-4 based on a given query. Full script used to generate the dataset can be found on this [repo](https://github.com/lancedb/ragged). It can be downloaded from [here](https://github.com/AyushExel/assets/blob/main/data_qa.csv)
### Using different query types ### Using different query types
Let's setup the embeddings and the dataset first. We'll use the LanceDB's `huggingface` embeddings integration for this guide. Let's setup the embeddings and the dataset first. We'll use the LanceDB's `huggingface` embeddings integration for this guide.
@@ -45,14 +45,14 @@ table.add(df[["context"]].to_dict(orient="records"))
queries = df["query"].tolist() queries = df["query"].tolist()
``` ```
Now that we have the dataset and embeddings table set up, here's how you can run different query types on the dataset: Now that we have the dataset and embeddings table set up, here's how you can run different query types on the dataset.
* <b> Vector Search: </b> * <b> Vector Search: </b>
```python ```python
table.search(quries[0], query_type="vector").limit(5).to_pandas() table.search(quries[0], query_type="vector").limit(5).to_pandas()
``` ```
By default, LanceDB uses vector search query type for searching and it automatically converts the input query to a vector before searching when using embedding API. So, the following statement is equivalent to the above statement: By default, LanceDB uses vector search query type for searching and it automatically converts the input query to a vector before searching when using embedding API. So, the following statement is equivalent to the above statement.
```python ```python
table.search(quries[0]).limit(5).to_pandas() table.search(quries[0]).limit(5).to_pandas()
@@ -77,7 +77,7 @@ Now that we have the dataset and embeddings table set up, here's how you can run
* <b> Hybrid Search: </b> * <b> Hybrid Search: </b>
Hybrid search is a combination of vector and full-text search. Here's how you can run a hybrid search query on the dataset: Hybrid search is a combination of vector and full-text search. Here's how you can run a hybrid search query on the dataset.
```python ```python
table.search(quries[0], query_type="hybrid").limit(5).to_pandas() table.search(quries[0], query_type="hybrid").limit(5).to_pandas()
``` ```
@@ -87,7 +87,7 @@ Now that we have the dataset and embeddings table set up, here's how you can run
!!! note "Note" !!! note "Note"
By default, it uses `LinearCombinationReranker` that combines the scores from vector and full-text search using a weighted linear combination. It is the simplest reranker implementation available in LanceDB. You can also use other rerankers like `CrossEncoderReranker` or `CohereReranker` for reranking the results. By default, it uses `LinearCombinationReranker` that combines the scores from vector and full-text search using a weighted linear combination. It is the simplest reranker implementation available in LanceDB. You can also use other rerankers like `CrossEncoderReranker` or `CohereReranker` for reranking the results.
Learn more about rerankers [here](https://lancedb.github.io/lancedb/reranking/). Learn more about rerankers [here](https://lancedb.github.io/lancedb/reranking/)

View File

@@ -1,6 +1,6 @@
Continuing from the previous section, we can now rerank the results using more complex rerankers. Continuing from the previous section, we can now rerank the results using more complex rerankers.
Try it yourself: <a href="https://colab.research.google.com/github/lancedb/lancedb/blob/main/docs/src/notebooks/lancedb_reranking.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a><br/> Try it yourself - <a href="https://colab.research.google.com/github/lancedb/lancedb/blob/main/docs/src/notebooks/lancedb_reranking.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a><br/>
## Reranking search results ## Reranking search results
You can rerank any search results using a reranker. The syntax for reranking is as follows: You can rerank any search results using a reranker. The syntax for reranking is as follows:
@@ -62,6 +62,9 @@ Let us take a look at the same datasets from the previous sections, using the sa
| Reranked fts | 0.672 | | Reranked fts | 0.672 |
| Hybrid | 0.759 | | Hybrid | 0.759 |
### SQuAD Dataset
### Uber10K sec filing Dataset ### Uber10K sec filing Dataset
| Query Type | Hit-rate@5 | | Query Type | Hit-rate@5 |

View File

@@ -1,5 +1,5 @@
## Finetuning the Embedding Model ## Finetuning the Embedding Model
Try it yourself: <a href="https://colab.research.google.com/github/lancedb/lancedb/blob/main/docs/src/notebooks/embedding_tuner.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a><br/> Try it yourself - <a href="https://colab.research.google.com/github/lancedb/lancedb/blob/main/docs/src/notebooks/embedding_tuner.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a><br/>
Another way to improve retriever performance is to fine-tune the embedding model itself. Fine-tuning the embedding model can help in learning better representations for the documents and queries in the dataset. This can be particularly useful when the dataset is very different from the pre-trained data used to train the embedding model. Another way to improve retriever performance is to fine-tune the embedding model itself. Fine-tuning the embedding model can help in learning better representations for the documents and queries in the dataset. This can be particularly useful when the dataset is very different from the pre-trained data used to train the embedding model.
@@ -16,7 +16,7 @@ validation_df.to_csv("data_val.csv", index=False)
You can use any tuning API to fine-tune embedding models. In this example, we'll utilise Llama-index as it also comes with utilities for synthetic data generation and training the model. You can use any tuning API to fine-tune embedding models. In this example, we'll utilise Llama-index as it also comes with utilities for synthetic data generation and training the model.
We parse the dataset as llama-index text nodes and generate synthetic QA pairs from each node: Then parse the dataset as llama-index text nodes and generate synthetic QA pairs from each node.
```python ```python
from llama_index.core.node_parser import SentenceSplitter from llama_index.core.node_parser import SentenceSplitter
from llama_index.readers.file import PagedCSVReader from llama_index.readers.file import PagedCSVReader
@@ -43,7 +43,7 @@ val_dataset = generate_qa_embedding_pairs(
) )
``` ```
Now we'll use `SentenceTransformersFinetuneEngine` engine to fine-tune the model. You can also use `sentence-transformers` or `transformers` library to fine-tune the model: Now we'll use `SentenceTransformersFinetuneEngine` engine to fine-tune the model. You can also use `sentence-transformers` or `transformers` library to fine-tune the model.
```python ```python
from llama_index.finetuning import SentenceTransformersFinetuneEngine from llama_index.finetuning import SentenceTransformersFinetuneEngine
@@ -57,7 +57,7 @@ finetune_engine = SentenceTransformersFinetuneEngine(
finetune_engine.finetune() finetune_engine.finetune()
embed_model = finetune_engine.get_finetuned_model() embed_model = finetune_engine.get_finetuned_model()
``` ```
This saves the fine tuned embedding model in `tuned_model` folder. This saves the fine tuned embedding model in `tuned_model` folder. This al
# Evaluation results # Evaluation results
In order to eval the retriever, you can either use this model to ingest the data into LanceDB directly or llama-index's LanceDB integration to create a `VectorStoreIndex` and use it as a retriever. In order to eval the retriever, you can either use this model to ingest the data into LanceDB directly or llama-index's LanceDB integration to create a `VectorStoreIndex` and use it as a retriever.

View File

@@ -3,22 +3,22 @@
Hybrid Search is a broad (often misused) term. It can mean anything from combining multiple methods for searching, to applying ranking methods to better sort the results. In this blog, we use the definition of "hybrid search" to mean using a combination of keyword-based and vector search. Hybrid Search is a broad (often misused) term. It can mean anything from combining multiple methods for searching, to applying ranking methods to better sort the results. In this blog, we use the definition of "hybrid search" to mean using a combination of keyword-based and vector search.
## The challenge of (re)ranking search results ## The challenge of (re)ranking search results
Once you have a group of the most relevant search results from multiple search sources, you'd likely standardize the score and rank them accordingly. This process can also be seen as another independent step:reranking. Once you have a group of the most relevant search results from multiple search sources, you'd likely standardize the score and rank them accordingly. This process can also be seen as another independent step-reranking.
There are two approaches for reranking search results from multiple sources. There are two approaches for reranking search results from multiple sources.
* <b>Score-based</b>: Calculate final relevance scores based on a weighted linear combination of individual search algorithm scores. Example:Weighted linear combination of semantic search & keyword-based search results. * <b>Score-based</b>: Calculate final relevance scores based on a weighted linear combination of individual search algorithm scores. Example-Weighted linear combination of semantic search & keyword-based search results.
* <b>Relevance-based</b>: Discards the existing scores and calculates the relevance of each search result-query pair. Example:Cross Encoder models * <b>Relevance-based</b>: Discards the existing scores and calculates the relevance of each search result-query pair. Example-Cross Encoder models
Even though there are many strategies for reranking search results, none works for all cases. Moreover, evaluating them itself is a challenge. Also, reranking can be dataset or application specific so it's hard to generalize. Even though there are many strategies for reranking search results, none works for all cases. Moreover, evaluating them itself is a challenge. Also, reranking can be dataset, application specific so it's hard to generalize.
### Example evaluation of hybrid search with Reranking ### Example evaluation of hybrid search with Reranking
Here's some evaluation numbers from an experiment comparing these rerankers on about 800 queries. It is modified version of an evaluation script from [llama-index](https://github.com/run-llama/finetune-embedding/blob/main/evaluate.ipynb) that measures hit-rate at top-k. Here's some evaluation numbers from experiment comparing these re-rankers on about 800 queries. It is modified version of an evaluation script from [llama-index](https://github.com/run-llama/finetune-embedding/blob/main/evaluate.ipynb) that measures hit-rate at top-k.
<b> With OpenAI ada2 embedding </b> <b> With OpenAI ada2 embedding </b>
Vector Search baseline: `0.64` Vector Search baseline - `0.64`
| Reranker | Top-3 | Top-5 | Top-10 | | Reranker | Top-3 | Top-5 | Top-10 |
| --- | --- | --- | --- | | --- | --- | --- | --- |
@@ -33,7 +33,7 @@ Vector Search baseline: `0.64`
<b> With OpenAI embedding-v3-small </b> <b> With OpenAI embedding-v3-small </b>
Vector Search baseline: `0.59` Vector Search baseline - `0.59`
| Reranker | Top-3 | Top-5 | Top-10 | | Reranker | Top-3 | Top-5 | Top-10 |
| --- | --- | --- | --- | | --- | --- | --- | --- |

View File

@@ -5,46 +5,57 @@ LanceDB supports both semantic and keyword-based search (also termed full-text s
## Hybrid search in LanceDB ## Hybrid search in LanceDB
You can perform hybrid search in LanceDB by combining the results of semantic and full-text search via a reranking algorithm of your choice. LanceDB provides multiple rerankers out of the box. However, you can always write a custom reranker if your use case need more sophisticated logic . You can perform hybrid search in LanceDB by combining the results of semantic and full-text search via a reranking algorithm of your choice. LanceDB provides multiple rerankers out of the box. However, you can always write a custom reranker if your use case need more sophisticated logic .
=== "Sync API" ```python
import os
```python import lancedb
--8<-- "python/python/tests/docs/test_search.py:import-os" import openai
--8<-- "python/python/tests/docs/test_search.py:import-openai" from lancedb.embeddings import get_registry
--8<-- "python/python/tests/docs/test_search.py:import-lancedb" from lancedb.pydantic import LanceModel, Vector
--8<-- "python/python/tests/docs/test_search.py:import-embeddings"
--8<-- "python/python/tests/docs/test_search.py:import-pydantic"
--8<-- "python/python/tests/docs/test_search.py:import-lancedb-fts"
--8<-- "python/python/tests/docs/test_search.py:import-openai-embeddings"
--8<-- "python/python/tests/docs/test_search.py:class-Documents"
--8<-- "python/python/tests/docs/test_search.py:basic_hybrid_search"
```
=== "Async API"
```python db = lancedb.connect("~/.lancedb")
--8<-- "python/python/tests/docs/test_search.py:import-os"
--8<-- "python/python/tests/docs/test_search.py:import-openai"
--8<-- "python/python/tests/docs/test_search.py:import-lancedb"
--8<-- "python/python/tests/docs/test_search.py:import-embeddings"
--8<-- "python/python/tests/docs/test_search.py:import-pydantic"
--8<-- "python/python/tests/docs/test_search.py:import-lancedb-fts"
--8<-- "python/python/tests/docs/test_search.py:import-openai-embeddings"
--8<-- "python/python/tests/docs/test_search.py:class-Documents"
--8<-- "python/python/tests/docs/test_search.py:basic_hybrid_search_async"
```
# Ingest embedding function in LanceDB table
# Configuring the environment variable OPENAI_API_KEY
if "OPENAI_API_KEY" not in os.environ:
# OR set the key here as a variable
openai.api_key = "sk-..."
embeddings = get_registry().get("openai").create()
class Documents(LanceModel):
vector: Vector(embeddings.ndims()) = embeddings.VectorField()
text: str = embeddings.SourceField()
table = db.create_table("documents", schema=Documents)
data = [
{ "text": "rebel spaceships striking from a hidden base"},
{ "text": "have won their first victory against the evil Galactic Empire"},
{ "text": "during the battle rebel spies managed to steal secret plans"},
{ "text": "to the Empire's ultimate weapon the Death Star"}
]
# ingest docs with auto-vectorization
table.add(data)
# Create a fts index before the hybrid search
table.create_fts_index("text")
# hybrid search with default re-ranker
results = table.search("flower moon", query_type="hybrid").to_pandas()
```
!!! Note !!! Note
You can also pass the vector and text query manually. This is useful if you're not using the embedding API or if you're using a separate embedder service. You can also pass the vector and text query manually. This is useful if you're not using the embedding API or if you're using a separate embedder service.
### Explicitly passing the vector and text query ### Explicitly passing the vector and text query
=== "Sync API" ```python
vector_query = [0.1, 0.2, 0.3, 0.4, 0.5]
text_query = "flower moon"
results = table.search(query_type="hybrid")
.vector(vector_query)
.text(text_query)
.limit(5)
.to_pandas()
```python ```
--8<-- "python/python/tests/docs/test_search.py:hybrid_search_pass_vector_text"
```
=== "Async API"
```python
--8<-- "python/python/tests/docs/test_search.py:hybrid_search_pass_vector_text_async"
```
By default, LanceDB uses `RRFReranker()`, which uses reciprocal rank fusion score, to combine and rerank the results of semantic and full-text search. You can customize the hyperparameters as needed or write your own custom reranker. Here's how you can use any of the available rerankers: By default, LanceDB uses `RRFReranker()`, which uses reciprocal rank fusion score, to combine and rerank the results of semantic and full-text search. You can customize the hyperparameters as needed or write your own custom reranker. Here's how you can use any of the available rerankers:
@@ -57,7 +68,7 @@ By default, LanceDB uses `RRFReranker()`, which uses reciprocal rank fusion scor
## Available Rerankers ## Available Rerankers
LanceDB provides a number of rerankers out of the box. You can use any of these rerankers by passing them to the `rerank()` method. LanceDB provides a number of re-rankers out of the box. You can use any of these re-rankers by passing them to the `rerank()` method.
Go to [Rerankers](../reranking/index.md) to learn more about using the available rerankers and implementing custom rerankers. Go to [Rerankers](../reranking/index.md) to learn more about using the available rerankers and implementing custom rerankers.

View File

@@ -49,8 +49,7 @@ The following pages go deeper into the internal of LanceDB and how to use it.
* [Working with tables](guides/tables.md): Learn how to work with tables and their associated functions * [Working with tables](guides/tables.md): Learn how to work with tables and their associated functions
* [Indexing](ann_indexes.md): Understand how to create indexes * [Indexing](ann_indexes.md): Understand how to create indexes
* [Vector search](search.md): Learn how to perform vector similarity search * [Vector search](search.md): Learn how to perform vector similarity search
* [Full-text search (native)](fts.md): Learn how to perform full-text search * [Full-text search](fts.md): Learn how to perform full-text search
* [Full-text search (tantivy-based)](fts_tantivy.md): Learn how to perform full-text search using Tantivy
* [Managing embeddings](embeddings/index.md): Managing embeddings and the embedding functions API in LanceDB * [Managing embeddings](embeddings/index.md): Managing embeddings and the embedding functions API in LanceDB
* [Ecosystem Integrations](integrations/index.md): Integrate LanceDB with other tools in the data ecosystem * [Ecosystem Integrations](integrations/index.md): Integrate LanceDB with other tools in the data ecosystem
* [Python API Reference](python/python.md): Python OSS and Cloud API references * [Python API Reference](python/python.md): Python OSS and Cloud API references

View File

@@ -1,10 +1,5 @@
**LangChain** is a framework designed for building applications with large language models (LLMs) by chaining together various components. It supports a range of functionalities including memory, agents, and chat models, enabling developers to create context-aware applications. # Langchain
![Illustration](../assets/langchain.png)
![Illustration](https://raw.githubusercontent.com/lancedb/assets/refs/heads/main/docs/assets/integration/langchain_rag.png)
LangChain streamlines these stages (in figure above) by providing pre-built components and tools for integration, memory management, and deployment, allowing developers to focus on application logic rather than underlying complexities.
Integration of **Langchain** with **LanceDB** enables applications to retrieve the most relevant data by comparing query vectors against stored vectors, facilitating effective information retrieval. It results in better and context aware replies and actions by the LLMs.
## Quick Start ## Quick Start
You can load your document data using langchain's loaders, for this example we are using `TextLoader` and `OpenAIEmbeddings` as the embedding model. Checkout Complete example here - [LangChain demo](../notebooks/langchain_example.ipynb) You can load your document data using langchain's loaders, for this example we are using `TextLoader` and `OpenAIEmbeddings` as the embedding model. Checkout Complete example here - [LangChain demo](../notebooks/langchain_example.ipynb)
@@ -31,28 +26,20 @@ print(docs[0].page_content)
## Documentation ## Documentation
In the above example `LanceDB` vector store class object is created using `from_documents()` method which is a `classmethod` and returns the initialized class object. In the above example `LanceDB` vector store class object is created using `from_documents()` method which is a `classmethod` and returns the initialized class object.
You can also use `LanceDB.from_texts(texts: List[str],embedding: Embeddings)` class method. You can also use `LanceDB.from_texts(texts: List[str],embedding: Embeddings)` class method.
The exhaustive list of parameters for `LanceDB` vector store are : The exhaustive list of parameters for `LanceDB` vector store are :
- `connection`: (Optional) `lancedb.db.LanceDBConnection` connection object to use. If not provided, a new connection will be created.
|Name|type|Purpose|default| - `embedding`: Langchain embedding model.
|:----|:----|:----|:----| - `vector_key`: (Optional) Column name to use for vector's in the table. Defaults to `'vector'`.
|`connection`| (Optional) `Any` |`lancedb.db.LanceDBConnection` connection object to use. If not provided, a new connection will be created.|`None`| - `id_key`: (Optional) Column name to use for id's in the table. Defaults to `'id'`.
|`embedding`| (Optional) `Embeddings` | Langchain embedding model.|Provided by user.| - `text_key`: (Optional) Column name to use for text in the table. Defaults to `'text'`.
|`uri`| (Optional) `str` |It specifies the directory location of **LanceDB database** and establishes a connection that can be used to interact with the database. |`/tmp/lancedb`| - `table_name`: (Optional) Name of your table in the database. Defaults to `'vectorstore'`.
|`vector_key` |(Optional) `str`| Column name to use for vector's in the table.|`'vector'`| - `api_key`: (Optional) API key to use for LanceDB cloud database. Defaults to `None`.
|`id_key` |(Optional) `str`| Column name to use for id's in the table.|`'id'`| - `region`: (Optional) Region to use for LanceDB cloud database. Only for LanceDB Cloud, defaults to `None`.
|`text_key` |(Optional) `str` |Column name to use for text in the table.|`'text'`| - `mode`: (Optional) Mode to use for adding data to the table. Defaults to `'overwrite'`.
|`table_name` |(Optional) `str`| Name of your table in the database.|`'vectorstore'`| - `reranker`: (Optional) The reranker to use for LanceDB.
|`api_key` |(Optional `str`) |API key to use for LanceDB cloud database.|`None`| - `relevance_score_fn`: (Optional[Callable[[float], float]]) Langchain relevance score function to be used. Defaults to `None`.
|`region` |(Optional) `str`| Region to use for LanceDB cloud database.|Only for LanceDB Cloud : `None`.|
|`mode` |(Optional) `str` |Mode to use for adding data to the table. Valid values are "append" and "overwrite".|`'overwrite'`|
|`table`| (Optional) `Any`|You can connect to an existing table of LanceDB, created outside of langchain, and utilize it.|`None`|
|`distance`|(Optional) `str`|The choice of distance metric used to calculate the similarity between vectors.|`'l2'`|
|`reranker` |(Optional) `Any`|The reranker to use for LanceDB.|`None`|
|`relevance_score_fn` |(Optional) `Callable[[float], float]` | Langchain relevance score function to be used.|`None`|
|`limit`|`int`|Set the maximum number of results to return.|`DEFAULT_K` (it is 4)|
```python ```python
db_url = "db://lang_test" # url of db you created db_url = "db://lang_test" # url of db you created
@@ -64,24 +51,19 @@ vector_store = LanceDB(
api_key=api_key, #(dont include for local API) api_key=api_key, #(dont include for local API)
region=region, #(dont include for local API) region=region, #(dont include for local API)
embedding=embeddings, embedding=embeddings,
table_name='langchain_test' # Optional table_name='langchain_test' #Optional
) )
``` ```
### Methods ### Methods
##### add_texts() ##### add_texts()
- `texts`: `Iterable` of strings to add to the vectorstore.
- `metadatas`: Optional `list[dict()]` of metadatas associated with the texts.
- `ids`: Optional `list` of ids to associate with the texts.
- `kwargs`: `Any`
This method turn texts into embedding and add it to the database. This method adds texts and stores respective embeddings automatically.
|Name|Purpose|defaults|
|:---|:---|:---|
|`texts`|`Iterable` of strings to add to the vectorstore.|Provided by user|
|`metadatas`|Optional `list[dict()]` of metadatas associated with the texts.|`None`|
|`ids`|Optional `list` of ids to associate with the texts.|`None`|
|`kwargs`| Other keyworded arguments provided by the user. |-|
It returns list of ids of the added texts.
```python ```python
vector_store.add_texts(texts = ['test_123'], metadatas =[{'source' :'wiki'}]) vector_store.add_texts(texts = ['test_123'], metadatas =[{'source' :'wiki'}])
@@ -96,25 +78,14 @@ pd_df.to_csv("docsearch.csv", index=False)
# you can also create a new vector store object using an older connection object: # you can also create a new vector store object using an older connection object:
vector_store = LanceDB(connection=tbl, embedding=embeddings) vector_store = LanceDB(connection=tbl, embedding=embeddings)
``` ```
------
##### create_index() ##### create_index()
- `col_name`: `Optional[str] = None`
- `vector_col`: `Optional[str] = None`
- `num_partitions`: `Optional[int] = 256`
- `num_sub_vectors`: `Optional[int] = 96`
- `index_cache_size`: `Optional[int] = None`
This method creates a scalar(for non-vector cols) or a vector index on a table. This method creates an index for the vector store. For index creation make sure your table has enough data in it. An ANN index is ususally not needed for datasets ~100K vectors. For large-scale (>1M) or higher dimension vectors, it is beneficial to create an ANN index.
|Name|type|Purpose|defaults|
|:---|:---|:---|:---|
|`vector_col`|`Optional[str]`| Provide if you want to create index on a vector column. |`None`|
|`col_name`|`Optional[str]`| Provide if you want to create index on a non-vector column. |`None`|
|`metric`|`Optional[str]` |Provide the metric to use for vector index. choice of metrics: 'L2', 'dot', 'cosine'. |`L2`|
|`num_partitions`|`Optional[int]`|Number of partitions to use for the index.|`256`|
|`num_sub_vectors`|`Optional[int]` |Number of sub-vectors to use for the index.|`96`|
|`index_cache_size`|`Optional[int]` |Size of the index cache.|`None`|
|`name`|`Optional[str]` |Name of the table to create index on.|`None`|
For index creation make sure your table has enough data in it. An ANN index is ususally not needed for datasets ~100K vectors. For large-scale (>1M) or higher dimension vectors, it is beneficial to create an ANN index.
```python ```python
# for creating vector index # for creating vector index
@@ -125,63 +96,42 @@ vector_store.create_index(col_name='text')
``` ```
------
##### similarity_search() ##### similarity_search()
- `query`: `str`
- `k`: `Optional[int] = None`
- `filter`: `Optional[Dict[str, str]] = None`
- `fts`: `Optional[bool] = False`
- `name`: `Optional[str] = None`
- `kwargs`: `Any`
This method performs similarity search based on **text query**. Return documents most similar to the query without relevance scores
| Name | Type | Purpose | Default |
|---------|----------------------|---------|---------|
| `query` | `str` | A `str` representing the text query that you want to search for in the vector store. | N/A |
| `k` | `Optional[int]` | It specifies the number of documents to return. | `None` |
| `filter` | `Optional[Dict[str, str]]`| It is used to filter the search results by specific metadata criteria. | `None` |
| `fts` | `Optional[bool]` | It indicates whether to perform a full-text search (FTS). | `False` |
| `name` | `Optional[str]` | It is used for specifying the name of the table to query. If not provided, it uses the default table set during the initialization of the LanceDB instance. | `None` |
| `kwargs` | `Any` | Other keyworded arguments provided by the user. | N/A |
Return documents most similar to the query **without relevance scores**.
```python ```python
docs = docsearch.similarity_search(query) docs = docsearch.similarity_search(query)
print(docs[0].page_content) print(docs[0].page_content)
``` ```
------
##### similarity_search_by_vector() ##### similarity_search_by_vector()
- `embedding`: `List[float]`
- `k`: `Optional[int] = None`
- `filter`: `Optional[Dict[str, str]] = None`
- `name`: `Optional[str] = None`
- `kwargs`: `Any`
The method returns documents that are most similar to the specified **embedding (query) vector**. Returns documents most similar to the query vector.
| Name | Type | Purpose | Default |
|-------------|---------------------------|---------|---------|
| `embedding` | `List[float]` | The embedding vector you want to use to search for similar documents in the vector store. | N/A |
| `k` | `Optional[int]` | It specifies the number of documents to return. | `None` |
| `filter` | `Optional[Dict[str, str]]`| It is used to filter the search results by specific metadata criteria. | `None` |
| `name` | `Optional[str]` | It is used for specifying the name of the table to query. If not provided, it uses the default table set during the initialization of the LanceDB instance. | `None` |
| `kwargs` | `Any` | Other keyworded arguments provided by the user. | N/A |
**It does not provide relevance scores.**
```python ```python
docs = docsearch.similarity_search_by_vector(query) docs = docsearch.similarity_search_by_vector(query)
print(docs[0].page_content) print(docs[0].page_content)
``` ```
------
##### similarity_search_with_score() ##### similarity_search_with_score()
- `query`: `str`
- `k`: `Optional[int] = None`
- `filter`: `Optional[Dict[str, str]] = None`
- `kwargs`: `Any`
Returns documents most similar to the **query string** along with their relevance scores. Returns documents most similar to the query string with relevance scores, gets called by base class's `similarity_search_with_relevance_scores` which selects relevance score based on our `_select_relevance_score_fn`.
| Name | Type | Purpose | Default |
|----------|---------------------------|---------|---------|
| `query` | `str` |A `str` representing the text query you want to search for in the vector store. This query will be converted into an embedding using the specified embedding function. | N/A |
| `k` | `Optional[int]` | It specifies the number of documents to return. | `None` |
| `filter` | `Optional[Dict[str, str]]`| It is used to filter the search results by specific metadata criteria. This allows you to narrow down the search results based on certain metadata attributes associated with the documents. | `None` |
| `kwargs` | `Any` | Other keyworded arguments provided by the user. | N/A |
It gets called by base class's `similarity_search_with_relevance_scores` which selects relevance score based on our `_select_relevance_score_fn`.
```python ```python
docs = docsearch.similarity_search_with_relevance_scores(query) docs = docsearch.similarity_search_with_relevance_scores(query)
@@ -189,21 +139,15 @@ print("relevance score - ", docs[0][1])
print("text- ", docs[0][0].page_content[:1000]) print("text- ", docs[0][0].page_content[:1000])
``` ```
------
##### similarity_search_by_vector_with_relevance_scores() ##### similarity_search_by_vector_with_relevance_scores()
- `embedding`: `List[float]`
- `k`: `Optional[int] = None`
- `filter`: `Optional[Dict[str, str]] = None`
- `name`: `Optional[str] = None`
- `kwargs`: `Any`
Similarity search using **query vector**. Return documents most similar to the query vector with relevance scores.
Relevance score
| Name | Type | Purpose | Default |
|-------------|---------------------------|---------|---------|
| `embedding` | `List[float]` | The embedding vector you want to use to search for similar documents in the vector store. | N/A |
| `k` | `Optional[int]` | It specifies the number of documents to return. | `None` |
| `filter` | `Optional[Dict[str, str]]`| It is used to filter the search results by specific metadata criteria. | `None` |
| `name` | `Optional[str]` | It is used for specifying the name of the table to query. | `None` |
| `kwargs` | `Any` | Other keyworded arguments provided by the user. | N/A |
The method returns documents most similar to the specified embedding (query) vector, along with their relevance scores.
```python ```python
docs = docsearch.similarity_search_by_vector_with_relevance_scores(query_embedding) docs = docsearch.similarity_search_by_vector_with_relevance_scores(query_embedding)
@@ -211,22 +155,20 @@ print("relevance score - ", docs[0][1])
print("text- ", docs[0][0].page_content[:1000]) print("text- ", docs[0][0].page_content[:1000])
``` ```
------
##### max_marginal_relevance_search() ##### max_marginal_relevance_search()
- `query`: `str`
- `k`: `Optional[int] = None`
- `fetch_k` : Number of Documents to fetch to pass to MMR algorithm, `Optional[int] = None`
- `lambda_mult`: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5. `float = 0.5`
- `filter`: `Optional[Dict[str, str]] = None`
- `kwargs`: `Any`
This method returns docs selected using the maximal marginal relevance(MMR). Returns docs selected using the maximal marginal relevance(MMR).
Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents.
| Name | Type | Purpose | Default |
|---------------|-----------------|-----------|---------|
| `query` | `str` | Text to look up documents similar to. | N/A |
| `k` | `Optional[int]` | Number of Documents to return.| `4` |
| `fetch_k`| `Optional[int]`| Number of Documents to fetch to pass to MMR algorithm.| `None` |
| `lambda_mult` | `float` | Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. | `0.5` |
| `filter`| `Optional[Dict[str, str]]`| Filter by metadata. | `None` |
|`kwargs`| Other keyworded arguments provided by the user. |-|
Similarly, `max_marginal_relevance_search_by_vector()` function returns docs most similar to the embedding passed to the function using MMR. instead of a string query you need to pass the embedding to be searched for. Similarly, `max_marginal_relevance_search_by_vector()` function returns docs most similar to the embedding passed to the function using MMR. instead of a string query you need to pass the embedding to be searched for.
```python ```python
@@ -244,19 +186,12 @@ result_texts = [doc.page_content for doc in result]
print(result_texts) print(result_texts)
``` ```
------
##### add_images() ##### add_images()
- `uris` : File path to the image. `List[str]`.
- `metadatas` : Optional list of metadatas. `(Optional[List[dict]], optional)`
- `ids` : Optional list of IDs. `(Optional[List[str]], optional)`
This method ddds images by automatically creating their embeddings and adds them to the vectorstore. Adds images by automatically creating their embeddings and adds them to the vectorstore.
| Name | Type | Purpose | Default |
|------------|-------------------------------|--------------------------------|---------|
| `uris` | `List[str]` | File path to the image | N/A |
| `metadatas`| `Optional[List[dict]]` | Optional list of metadatas | `None` |
| `ids` | `Optional[List[str]]` | Optional list of IDs | `None` |
It returns list of IDs of the added images.
```python ```python
vec_store.add_images(uris=image_uris) vec_store.add_images(uris=image_uris)

View File

@@ -45,7 +45,7 @@ Let's see how using LanceDB inside phidata helps in making LLM more useful:
**Install the following packages in the virtual environment** **Install the following packages in the virtual environment**
```python ```python
pip install lancedb phidata youtube_transcript_api openai ollama numpy pandas pip install lancedb phidata youtube_transcript_api openai ollama pandas numpy
``` ```
**Create python files and import necessary libraries** **Create python files and import necessary libraries**

View File

@@ -41,6 +41,7 @@ To build everything fresh:
```bash ```bash
npm install npm install
npm run tsc
npm run build npm run build
``` ```
@@ -50,6 +51,18 @@ Then you should be able to run the tests with:
npm test npm test
``` ```
### Rebuilding Rust library
```bash
npm run build
```
### Rebuilding Typescript
```bash
npm run tsc
```
### Fix lints ### Fix lints
To run the linter and have it automatically fix all errors To run the linter and have it automatically fix all errors

View File

@@ -38,4 +38,4 @@ A [WriteMode](../enums/WriteMode.md) to use on this operation
#### Defined in #### Defined in
[index.ts:1359](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L1359) [index.ts:1019](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L1019)

View File

@@ -30,7 +30,6 @@ A connection to a LanceDB database.
- [dropTable](LocalConnection.md#droptable) - [dropTable](LocalConnection.md#droptable)
- [openTable](LocalConnection.md#opentable) - [openTable](LocalConnection.md#opentable)
- [tableNames](LocalConnection.md#tablenames) - [tableNames](LocalConnection.md#tablenames)
- [withMiddleware](LocalConnection.md#withmiddleware)
## Constructors ## Constructors
@@ -47,7 +46,7 @@ A connection to a LanceDB database.
#### Defined in #### Defined in
[index.ts:739](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L739) [index.ts:489](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L489)
## Properties ## Properties
@@ -57,7 +56,7 @@ A connection to a LanceDB database.
#### Defined in #### Defined in
[index.ts:737](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L737) [index.ts:487](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L487)
___ ___
@@ -75,7 +74,7 @@ ___
#### Defined in #### Defined in
[index.ts:736](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L736) [index.ts:486](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L486)
## Accessors ## Accessors
@@ -93,7 +92,7 @@ ___
#### Defined in #### Defined in
[index.ts:744](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L744) [index.ts:494](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L494)
## Methods ## Methods
@@ -114,7 +113,7 @@ Creates a new Table, optionally initializing it with new data.
| Name | Type | | Name | Type |
| :------ | :------ | | :------ | :------ |
| `name` | `string` \| [`CreateTableOptions`](../interfaces/CreateTableOptions.md)\<`T`\> | | `name` | `string` \| [`CreateTableOptions`](../interfaces/CreateTableOptions.md)\<`T`\> |
| `data?` | `Table`\<`any`\> \| `Record`\<`string`, `unknown`\>[] | | `data?` | `Record`\<`string`, `unknown`\>[] |
| `optsOrEmbedding?` | [`WriteOptions`](../interfaces/WriteOptions.md) \| [`EmbeddingFunction`](../interfaces/EmbeddingFunction.md)\<`T`\> | | `optsOrEmbedding?` | [`WriteOptions`](../interfaces/WriteOptions.md) \| [`EmbeddingFunction`](../interfaces/EmbeddingFunction.md)\<`T`\> |
| `opt?` | [`WriteOptions`](../interfaces/WriteOptions.md) | | `opt?` | [`WriteOptions`](../interfaces/WriteOptions.md) |
@@ -128,7 +127,7 @@ Creates a new Table, optionally initializing it with new data.
#### Defined in #### Defined in
[index.ts:788](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L788) [index.ts:542](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L542)
___ ___
@@ -159,7 +158,7 @@ ___
#### Defined in #### Defined in
[index.ts:822](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L822) [index.ts:576](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L576)
___ ___
@@ -185,7 +184,7 @@ Drop an existing table.
#### Defined in #### Defined in
[index.ts:876](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L876) [index.ts:630](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L630)
___ ___
@@ -211,7 +210,7 @@ Open a table in the database.
#### Defined in #### Defined in
[index.ts:760](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L760) [index.ts:510](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L510)
**openTable**\<`T`\>(`name`, `embeddings`): `Promise`\<[`Table`](../interfaces/Table.md)\<`T`\>\> **openTable**\<`T`\>(`name`, `embeddings`): `Promise`\<[`Table`](../interfaces/Table.md)\<`T`\>\>
@@ -240,7 +239,7 @@ Connection.openTable
#### Defined in #### Defined in
[index.ts:768](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L768) [index.ts:518](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L518)
**openTable**\<`T`\>(`name`, `embeddings?`): `Promise`\<[`Table`](../interfaces/Table.md)\<`T`\>\> **openTable**\<`T`\>(`name`, `embeddings?`): `Promise`\<[`Table`](../interfaces/Table.md)\<`T`\>\>
@@ -267,7 +266,7 @@ Connection.openTable
#### Defined in #### Defined in
[index.ts:772](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L772) [index.ts:522](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L522)
___ ___
@@ -287,36 +286,4 @@ Get the names of all tables in the database.
#### Defined in #### Defined in
[index.ts:751](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L751) [index.ts:501](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L501)
___
### withMiddleware
**withMiddleware**(`middleware`): [`Connection`](../interfaces/Connection.md)
Instrument the behavior of this Connection with middleware.
The middleware will be called in the order they are added.
Currently this functionality is only supported for remote Connections.
#### Parameters
| Name | Type |
| :------ | :------ |
| `middleware` | `HttpMiddleware` |
#### Returns
[`Connection`](../interfaces/Connection.md)
- this Connection instrumented by the passed middleware
#### Implementation of
[Connection](../interfaces/Connection.md).[withMiddleware](../interfaces/Connection.md#withmiddleware)
#### Defined in
[index.ts:880](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L880)

View File

@@ -37,8 +37,6 @@ A LanceDB Table is the collection of Records. Each Record has one or more vector
### Methods ### Methods
- [add](LocalTable.md#add) - [add](LocalTable.md#add)
- [addColumns](LocalTable.md#addcolumns)
- [alterColumns](LocalTable.md#altercolumns)
- [checkElectron](LocalTable.md#checkelectron) - [checkElectron](LocalTable.md#checkelectron)
- [cleanupOldVersions](LocalTable.md#cleanupoldversions) - [cleanupOldVersions](LocalTable.md#cleanupoldversions)
- [compactFiles](LocalTable.md#compactfiles) - [compactFiles](LocalTable.md#compactfiles)
@@ -46,16 +44,13 @@ A LanceDB Table is the collection of Records. Each Record has one or more vector
- [createIndex](LocalTable.md#createindex) - [createIndex](LocalTable.md#createindex)
- [createScalarIndex](LocalTable.md#createscalarindex) - [createScalarIndex](LocalTable.md#createscalarindex)
- [delete](LocalTable.md#delete) - [delete](LocalTable.md#delete)
- [dropColumns](LocalTable.md#dropcolumns)
- [filter](LocalTable.md#filter) - [filter](LocalTable.md#filter)
- [getSchema](LocalTable.md#getschema) - [getSchema](LocalTable.md#getschema)
- [indexStats](LocalTable.md#indexstats) - [indexStats](LocalTable.md#indexstats)
- [listIndices](LocalTable.md#listindices) - [listIndices](LocalTable.md#listindices)
- [mergeInsert](LocalTable.md#mergeinsert)
- [overwrite](LocalTable.md#overwrite) - [overwrite](LocalTable.md#overwrite)
- [search](LocalTable.md#search) - [search](LocalTable.md#search)
- [update](LocalTable.md#update) - [update](LocalTable.md#update)
- [withMiddleware](LocalTable.md#withmiddleware)
## Constructors ## Constructors
@@ -79,7 +74,7 @@ A LanceDB Table is the collection of Records. Each Record has one or more vector
#### Defined in #### Defined in
[index.ts:892](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L892) [index.ts:642](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L642)
**new LocalTable**\<`T`\>(`tbl`, `name`, `options`, `embeddings`) **new LocalTable**\<`T`\>(`tbl`, `name`, `options`, `embeddings`)
@@ -100,7 +95,7 @@ A LanceDB Table is the collection of Records. Each Record has one or more vector
#### Defined in #### Defined in
[index.ts:899](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L899) [index.ts:649](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L649)
## Properties ## Properties
@@ -110,7 +105,7 @@ A LanceDB Table is the collection of Records. Each Record has one or more vector
#### Defined in #### Defined in
[index.ts:889](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L889) [index.ts:639](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L639)
___ ___
@@ -120,7 +115,7 @@ ___
#### Defined in #### Defined in
[index.ts:888](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L888) [index.ts:638](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L638)
___ ___
@@ -130,7 +125,7 @@ ___
#### Defined in #### Defined in
[index.ts:887](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L887) [index.ts:637](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L637)
___ ___
@@ -148,7 +143,7 @@ ___
#### Defined in #### Defined in
[index.ts:890](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L890) [index.ts:640](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L640)
___ ___
@@ -158,7 +153,7 @@ ___
#### Defined in #### Defined in
[index.ts:886](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L886) [index.ts:636](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L636)
___ ___
@@ -184,7 +179,7 @@ Creates a filter query to find all rows matching the specified criteria
#### Defined in #### Defined in
[index.ts:938](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L938) [index.ts:688](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L688)
## Accessors ## Accessors
@@ -202,7 +197,7 @@ Creates a filter query to find all rows matching the specified criteria
#### Defined in #### Defined in
[index.ts:918](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L918) [index.ts:668](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L668)
___ ___
@@ -220,7 +215,7 @@ ___
#### Defined in #### Defined in
[index.ts:1171](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L1171) [index.ts:849](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L849)
## Methods ## Methods
@@ -234,7 +229,7 @@ Insert records into this Table.
| Name | Type | Description | | Name | Type | Description |
| :------ | :------ | :------ | | :------ | :------ | :------ |
| `data` | `Table`\<`any`\> \| `Record`\<`string`, `unknown`\>[] | Records to be inserted into the Table | | `data` | `Record`\<`string`, `unknown`\>[] | Records to be inserted into the Table |
#### Returns #### Returns
@@ -248,59 +243,7 @@ The number of rows added to the table
#### Defined in #### Defined in
[index.ts:946](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L946) [index.ts:696](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L696)
___
### addColumns
**addColumns**(`newColumnTransforms`): `Promise`\<`void`\>
Add new columns with defined values.
#### Parameters
| Name | Type | Description |
| :------ | :------ | :------ |
| `newColumnTransforms` | \{ `name`: `string` ; `valueSql`: `string` }[] | pairs of column names and the SQL expression to use to calculate the value of the new column. These expressions will be evaluated for each row in the table, and can reference existing columns in the table. |
#### Returns
`Promise`\<`void`\>
#### Implementation of
[Table](../interfaces/Table.md).[addColumns](../interfaces/Table.md#addcolumns)
#### Defined in
[index.ts:1195](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L1195)
___
### alterColumns
**alterColumns**(`columnAlterations`): `Promise`\<`void`\>
Alter the name or nullability of columns.
#### Parameters
| Name | Type | Description |
| :------ | :------ | :------ |
| `columnAlterations` | [`ColumnAlteration`](../interfaces/ColumnAlteration.md)[] | One or more alterations to apply to columns. |
#### Returns
`Promise`\<`void`\>
#### Implementation of
[Table](../interfaces/Table.md).[alterColumns](../interfaces/Table.md#altercolumns)
#### Defined in
[index.ts:1201](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L1201)
___ ___
@@ -314,7 +257,7 @@ ___
#### Defined in #### Defined in
[index.ts:1183](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L1183) [index.ts:861](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L861)
___ ___
@@ -337,7 +280,7 @@ Clean up old versions of the table, freeing disk space.
#### Defined in #### Defined in
[index.ts:1130](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L1130) [index.ts:808](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L808)
___ ___
@@ -364,22 +307,16 @@ Metrics about the compaction operation.
#### Defined in #### Defined in
[index.ts:1153](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L1153) [index.ts:831](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L831)
___ ___
### countRows ### countRows
**countRows**(`filter?`): `Promise`\<`number`\> **countRows**(): `Promise`\<`number`\>
Returns the number of rows in this table. Returns the number of rows in this table.
#### Parameters
| Name | Type |
| :------ | :------ |
| `filter?` | `string` |
#### Returns #### Returns
`Promise`\<`number`\> `Promise`\<`number`\>
@@ -390,7 +327,7 @@ Returns the number of rows in this table.
#### Defined in #### Defined in
[index.ts:1021](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L1021) [index.ts:749](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L749)
___ ___
@@ -420,13 +357,13 @@ VectorIndexParams.
#### Defined in #### Defined in
[index.ts:1003](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L1003) [index.ts:734](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L734)
___ ___
### createScalarIndex ### createScalarIndex
**createScalarIndex**(`column`, `replace?`): `Promise`\<`void`\> **createScalarIndex**(`column`, `replace`): `Promise`\<`void`\>
Create a scalar index on this Table for the given column Create a scalar index on this Table for the given column
@@ -435,7 +372,7 @@ Create a scalar index on this Table for the given column
| Name | Type | Description | | Name | Type | Description |
| :------ | :------ | :------ | | :------ | :------ | :------ |
| `column` | `string` | The column to index | | `column` | `string` | The column to index |
| `replace?` | `boolean` | If false, fail if an index already exists on the column it is always set to true for remote connections Scalar indices, like vector indices, can be used to speed up scans. A scalar index can speed up scans that contain filter expressions on the indexed column. For example, the following scan will be faster if the column `my_col` has a scalar index: ```ts const con = await lancedb.connect('./.lancedb'); const table = await con.openTable('images'); const results = await table.where('my_col = 7').execute(); ``` Scalar indices can also speed up scans containing a vector search and a prefilter: ```ts const con = await lancedb.connect('././lancedb'); const table = await con.openTable('images'); const results = await table.search([1.0, 2.0]).where('my_col != 7').prefilter(true); ``` Scalar indices can only speed up scans for basic filters using equality, comparison, range (e.g. `my_col BETWEEN 0 AND 100`), and set membership (e.g. `my_col IN (0, 1, 2)`) Scalar indices can be used if the filter contains multiple indexed columns and the filter criteria are AND'd or OR'd together (e.g. `my_col < 0 AND other_col> 100`) Scalar indices may be used if the filter contains non-indexed columns but, depending on the structure of the filter, they may not be usable. For example, if the column `not_indexed` does not have a scalar index then the filter `my_col = 0 OR not_indexed = 1` will not be able to use any scalar index on `my_col`. | | `replace` | `boolean` | If false, fail if an index already exists on the column Scalar indices, like vector indices, can be used to speed up scans. A scalar index can speed up scans that contain filter expressions on the indexed column. For example, the following scan will be faster if the column `my_col` has a scalar index: ```ts const con = await lancedb.connect('./.lancedb'); const table = await con.openTable('images'); const results = await table.where('my_col = 7').execute(); ``` Scalar indices can also speed up scans containing a vector search and a prefilter: ```ts const con = await lancedb.connect('././lancedb'); const table = await con.openTable('images'); const results = await table.search([1.0, 2.0]).where('my_col != 7').prefilter(true); ``` Scalar indices can only speed up scans for basic filters using equality, comparison, range (e.g. `my_col BETWEEN 0 AND 100`), and set membership (e.g. `my_col IN (0, 1, 2)`) Scalar indices can be used if the filter contains multiple indexed columns and the filter criteria are AND'd or OR'd together (e.g. `my_col < 0 AND other_col> 100`) Scalar indices may be used if the filter contains non-indexed columns but, depending on the structure of the filter, they may not be usable. For example, if the column `not_indexed` does not have a scalar index then the filter `my_col = 0 OR not_indexed = 1` will not be able to use any scalar index on `my_col`. |
#### Returns #### Returns
@@ -455,7 +392,7 @@ await table.createScalarIndex('my_col')
#### Defined in #### Defined in
[index.ts:1011](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L1011) [index.ts:742](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L742)
___ ___
@@ -481,38 +418,7 @@ Delete rows from this table.
#### Defined in #### Defined in
[index.ts:1030](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L1030) [index.ts:758](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L758)
___
### dropColumns
▸ **dropColumns**(`columnNames`): `Promise`\<`void`\>
Drop one or more columns from the dataset
This is a metadata-only operation and does not remove the data from the
underlying storage. In order to remove the data, you must subsequently
call ``compact_files`` to rewrite the data without the removed columns and
then call ``cleanup_files`` to remove the old files.
#### Parameters
| Name | Type | Description |
| :------ | :------ | :------ |
| `columnNames` | `string`[] | The names of the columns to drop. These can be nested column references (e.g. "a.b.c") or top-level column names (e.g. "a"). |
#### Returns
`Promise`\<`void`\>
#### Implementation of
[Table](../interfaces/Table.md).[dropColumns](../interfaces/Table.md#dropcolumns)
#### Defined in
[index.ts:1205](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L1205)
___ ___
@@ -532,13 +438,9 @@ Creates a filter query to find all rows matching the specified criteria
[`Query`](Query.md)\<`T`\> [`Query`](Query.md)\<`T`\>
#### Implementation of
[Table](../interfaces/Table.md).[filter](../interfaces/Table.md#filter)
#### Defined in #### Defined in
[index.ts:934](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L934) [index.ts:684](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L684)
___ ___
@@ -552,13 +454,13 @@ ___
#### Defined in #### Defined in
[index.ts:1176](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L1176) [index.ts:854](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L854)
___ ___
### indexStats ### indexStats
▸ **indexStats**(`indexName`): `Promise`\<[`IndexStats`](../interfaces/IndexStats.md)\> ▸ **indexStats**(`indexUuid`): `Promise`\<[`IndexStats`](../interfaces/IndexStats.md)\>
Get statistics about an index. Get statistics about an index.
@@ -566,7 +468,7 @@ Get statistics about an index.
| Name | Type | | Name | Type |
| :------ | :------ | | :------ | :------ |
| `indexName` | `string` | | `indexUuid` | `string` |
#### Returns #### Returns
@@ -578,7 +480,7 @@ Get statistics about an index.
#### Defined in #### Defined in
[index.ts:1167](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L1167) [index.ts:845](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L845)
___ ___
@@ -598,57 +500,7 @@ List the indicies on this table.
#### Defined in #### Defined in
[index.ts:1163](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L1163) [index.ts:841](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L841)
___
### mergeInsert
▸ **mergeInsert**(`on`, `data`, `args`): `Promise`\<`void`\>
Runs a "merge insert" operation on the table
This operation can add rows, update rows, and remove rows all in a single
transaction. It is a very generic tool that can be used to create
behaviors like "insert if not exists", "update or insert (i.e. upsert)",
or even replace a portion of existing data with new data (e.g. replace
all data where month="january")
The merge insert operation works by combining new data from a
**source table** with existing data in a **target table** by using a
join. There are three categories of records.
"Matched" records are records that exist in both the source table and
the target table. "Not matched" records exist only in the source table
(e.g. these are new data) "Not matched by source" records exist only
in the target table (this is old data)
The MergeInsertArgs can be used to customize what should happen for
each category of data.
Please note that the data may appear to be reordered as part of this
operation. This is because updated rows will be deleted from the
dataset and then reinserted at the end with the new values.
#### Parameters
| Name | Type | Description |
| :------ | :------ | :------ |
| `on` | `string` | a column to join on. This is how records from the source table and target table are matched. |
| `data` | `Table`\<`any`\> \| `Record`\<`string`, `unknown`\>[] | the new data to insert |
| `args` | [`MergeInsertArgs`](../interfaces/MergeInsertArgs.md) | parameters controlling how the operation should behave |
#### Returns
`Promise`\<`void`\>
#### Implementation of
[Table](../interfaces/Table.md).[mergeInsert](../interfaces/Table.md#mergeinsert)
#### Defined in
[index.ts:1065](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L1065)
___ ___
@@ -662,7 +514,7 @@ Insert records into this Table, replacing its contents.
| Name | Type | Description | | Name | Type | Description |
| :------ | :------ | :------ | | :------ | :------ | :------ |
| `data` | `Table`\<`any`\> \| `Record`\<`string`, `unknown`\>[] | Records to be inserted into the Table | | `data` | `Record`\<`string`, `unknown`\>[] | Records to be inserted into the Table |
#### Returns #### Returns
@@ -676,7 +528,7 @@ The number of rows added to the table
#### Defined in #### Defined in
[index.ts:977](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L977) [index.ts:716](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L716)
___ ___
@@ -702,7 +554,7 @@ Creates a search query to find the nearest neighbors of the given search term
#### Defined in #### Defined in
[index.ts:926](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L926) [index.ts:676](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L676)
___ ___
@@ -728,36 +580,4 @@ Update rows in this table.
#### Defined in #### Defined in
[index.ts:1043](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L1043) [index.ts:771](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L771)
___
### withMiddleware
▸ **withMiddleware**(`middleware`): [`Table`](../interfaces/Table.md)\<`T`\>
Instrument the behavior of this Table with middleware.
The middleware will be called in the order they are added.
Currently this functionality is only supported for remote tables.
#### Parameters
| Name | Type |
| :------ | :------ |
| `middleware` | `HttpMiddleware` |
#### Returns
[`Table`](../interfaces/Table.md)\<`T`\>
- this Table instrumented by the passed middleware
#### Implementation of
[Table](../interfaces/Table.md).[withMiddleware](../interfaces/Table.md#withmiddleware)
#### Defined in
[index.ts:1209](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L1209)

View File

@@ -1,82 +0,0 @@
[vectordb](../README.md) / [Exports](../modules.md) / MakeArrowTableOptions
# Class: MakeArrowTableOptions
Options to control the makeArrowTable call.
## Table of contents
### Constructors
- [constructor](MakeArrowTableOptions.md#constructor)
### Properties
- [dictionaryEncodeStrings](MakeArrowTableOptions.md#dictionaryencodestrings)
- [embeddings](MakeArrowTableOptions.md#embeddings)
- [schema](MakeArrowTableOptions.md#schema)
- [vectorColumns](MakeArrowTableOptions.md#vectorcolumns)
## Constructors
### constructor
**new MakeArrowTableOptions**(`values?`)
#### Parameters
| Name | Type |
| :------ | :------ |
| `values?` | `Partial`\<[`MakeArrowTableOptions`](MakeArrowTableOptions.md)\> |
#### Defined in
[arrow.ts:98](https://github.com/lancedb/lancedb/blob/92179835/node/src/arrow.ts#L98)
## Properties
### dictionaryEncodeStrings
**dictionaryEncodeStrings**: `boolean` = `false`
If true then string columns will be encoded with dictionary encoding
Set this to true if your string columns tend to repeat the same values
often. For more precise control use the `schema` property to specify the
data type for individual columns.
If `schema` is provided then this property is ignored.
#### Defined in
[arrow.ts:96](https://github.com/lancedb/lancedb/blob/92179835/node/src/arrow.ts#L96)
___
### embeddings
`Optional` **embeddings**: [`EmbeddingFunction`](../interfaces/EmbeddingFunction.md)\<`any`\>
#### Defined in
[arrow.ts:85](https://github.com/lancedb/lancedb/blob/92179835/node/src/arrow.ts#L85)
___
### schema
`Optional` **schema**: `Schema`\<`any`\>
#### Defined in
[arrow.ts:63](https://github.com/lancedb/lancedb/blob/92179835/node/src/arrow.ts#L63)
___
### vectorColumns
**vectorColumns**: `Record`\<`string`, `VectorColumnOptions`\>
#### Defined in
[arrow.ts:81](https://github.com/lancedb/lancedb/blob/92179835/node/src/arrow.ts#L81)

View File

@@ -40,7 +40,7 @@ An embedding function that automatically creates vector representation for a giv
#### Defined in #### Defined in
[embedding/openai.ts:22](https://github.com/lancedb/lancedb/blob/92179835/node/src/embedding/openai.ts#L22) [embedding/openai.ts:21](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/embedding/openai.ts#L21)
## Properties ## Properties
@@ -50,17 +50,17 @@ An embedding function that automatically creates vector representation for a giv
#### Defined in #### Defined in
[embedding/openai.ts:20](https://github.com/lancedb/lancedb/blob/92179835/node/src/embedding/openai.ts#L20) [embedding/openai.ts:19](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/embedding/openai.ts#L19)
___ ___
### \_openai ### \_openai
`Private` `Readonly` **\_openai**: `OpenAI` `Private` `Readonly` **\_openai**: `any`
#### Defined in #### Defined in
[embedding/openai.ts:19](https://github.com/lancedb/lancedb/blob/92179835/node/src/embedding/openai.ts#L19) [embedding/openai.ts:18](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/embedding/openai.ts#L18)
___ ___
@@ -76,7 +76,7 @@ The name of the column that will be used as input for the Embedding Function.
#### Defined in #### Defined in
[embedding/openai.ts:56](https://github.com/lancedb/lancedb/blob/92179835/node/src/embedding/openai.ts#L56) [embedding/openai.ts:50](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/embedding/openai.ts#L50)
## Methods ## Methods
@@ -102,4 +102,4 @@ Creates a vector representation for the given values.
#### Defined in #### Defined in
[embedding/openai.ts:43](https://github.com/lancedb/lancedb/blob/92179835/node/src/embedding/openai.ts#L43) [embedding/openai.ts:38](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/embedding/openai.ts#L38)

View File

@@ -19,7 +19,6 @@ A builder for nearest neighbor queries for LanceDB.
### Properties ### Properties
- [\_embeddings](Query.md#_embeddings) - [\_embeddings](Query.md#_embeddings)
- [\_fastSearch](Query.md#_fastsearch)
- [\_filter](Query.md#_filter) - [\_filter](Query.md#_filter)
- [\_limit](Query.md#_limit) - [\_limit](Query.md#_limit)
- [\_metricType](Query.md#_metrictype) - [\_metricType](Query.md#_metrictype)
@@ -35,7 +34,6 @@ A builder for nearest neighbor queries for LanceDB.
### Methods ### Methods
- [execute](Query.md#execute) - [execute](Query.md#execute)
- [fastSearch](Query.md#fastsearch)
- [filter](Query.md#filter) - [filter](Query.md#filter)
- [isElectron](Query.md#iselectron) - [isElectron](Query.md#iselectron)
- [limit](Query.md#limit) - [limit](Query.md#limit)
@@ -67,7 +65,7 @@ A builder for nearest neighbor queries for LanceDB.
#### Defined in #### Defined in
[query.ts:39](https://github.com/lancedb/lancedb/blob/92179835/node/src/query.ts#L39) [query.ts:38](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/query.ts#L38)
## Properties ## Properties
@@ -77,17 +75,7 @@ A builder for nearest neighbor queries for LanceDB.
#### Defined in #### Defined in
[query.ts:37](https://github.com/lancedb/lancedb/blob/92179835/node/src/query.ts#L37) [query.ts:36](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/query.ts#L36)
___
### \_fastSearch
`Private` **\_fastSearch**: `boolean`
#### Defined in
[query.ts:36](https://github.com/lancedb/lancedb/blob/92179835/node/src/query.ts#L36)
___ ___
@@ -97,7 +85,7 @@ ___
#### Defined in #### Defined in
[query.ts:33](https://github.com/lancedb/lancedb/blob/92179835/node/src/query.ts#L33) [query.ts:33](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/query.ts#L33)
___ ___
@@ -107,7 +95,7 @@ ___
#### Defined in #### Defined in
[query.ts:29](https://github.com/lancedb/lancedb/blob/92179835/node/src/query.ts#L29) [query.ts:29](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/query.ts#L29)
___ ___
@@ -117,7 +105,7 @@ ___
#### Defined in #### Defined in
[query.ts:34](https://github.com/lancedb/lancedb/blob/92179835/node/src/query.ts#L34) [query.ts:34](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/query.ts#L34)
___ ___
@@ -127,7 +115,7 @@ ___
#### Defined in #### Defined in
[query.ts:31](https://github.com/lancedb/lancedb/blob/92179835/node/src/query.ts#L31) [query.ts:31](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/query.ts#L31)
___ ___
@@ -137,7 +125,7 @@ ___
#### Defined in #### Defined in
[query.ts:35](https://github.com/lancedb/lancedb/blob/92179835/node/src/query.ts#L35) [query.ts:35](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/query.ts#L35)
___ ___
@@ -147,7 +135,7 @@ ___
#### Defined in #### Defined in
[query.ts:26](https://github.com/lancedb/lancedb/blob/92179835/node/src/query.ts#L26) [query.ts:26](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/query.ts#L26)
___ ___
@@ -157,7 +145,7 @@ ___
#### Defined in #### Defined in
[query.ts:28](https://github.com/lancedb/lancedb/blob/92179835/node/src/query.ts#L28) [query.ts:28](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/query.ts#L28)
___ ___
@@ -167,7 +155,7 @@ ___
#### Defined in #### Defined in
[query.ts:30](https://github.com/lancedb/lancedb/blob/92179835/node/src/query.ts#L30) [query.ts:30](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/query.ts#L30)
___ ___
@@ -177,7 +165,7 @@ ___
#### Defined in #### Defined in
[query.ts:32](https://github.com/lancedb/lancedb/blob/92179835/node/src/query.ts#L32) [query.ts:32](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/query.ts#L32)
___ ___
@@ -187,7 +175,7 @@ ___
#### Defined in #### Defined in
[query.ts:27](https://github.com/lancedb/lancedb/blob/92179835/node/src/query.ts#L27) [query.ts:27](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/query.ts#L27)
___ ___
@@ -213,7 +201,7 @@ A filter statement to be applied to this query.
#### Defined in #### Defined in
[query.ts:90](https://github.com/lancedb/lancedb/blob/92179835/node/src/query.ts#L90) [query.ts:87](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/query.ts#L87)
## Methods ## Methods
@@ -235,30 +223,7 @@ Execute the query and return the results as an Array of Objects
#### Defined in #### Defined in
[query.ts:127](https://github.com/lancedb/lancedb/blob/92179835/node/src/query.ts#L127) [query.ts:115](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/query.ts#L115)
___
### fastSearch
**fastSearch**(`value`): [`Query`](Query.md)\<`T`\>
Skip searching un-indexed data. This can make search faster, but will miss
any data that is not yet indexed.
#### Parameters
| Name | Type |
| :------ | :------ |
| `value` | `boolean` |
#### Returns
[`Query`](Query.md)\<`T`\>
#### Defined in
[query.ts:119](https://github.com/lancedb/lancedb/blob/92179835/node/src/query.ts#L119)
___ ___
@@ -280,7 +245,7 @@ A filter statement to be applied to this query.
#### Defined in #### Defined in
[query.ts:85](https://github.com/lancedb/lancedb/blob/92179835/node/src/query.ts#L85) [query.ts:82](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/query.ts#L82)
___ ___
@@ -294,7 +259,7 @@ ___
#### Defined in #### Defined in
[query.ts:155](https://github.com/lancedb/lancedb/blob/92179835/node/src/query.ts#L155) [query.ts:142](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/query.ts#L142)
___ ___
@@ -303,7 +268,6 @@ ___
**limit**(`value`): [`Query`](Query.md)\<`T`\> **limit**(`value`): [`Query`](Query.md)\<`T`\>
Sets the number of results that will be returned Sets the number of results that will be returned
default value is 10
#### Parameters #### Parameters
@@ -317,7 +281,7 @@ default value is 10
#### Defined in #### Defined in
[query.ts:58](https://github.com/lancedb/lancedb/blob/92179835/node/src/query.ts#L58) [query.ts:55](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/query.ts#L55)
___ ___
@@ -343,7 +307,7 @@ MetricType for the different options
#### Defined in #### Defined in
[query.ts:105](https://github.com/lancedb/lancedb/blob/92179835/node/src/query.ts#L105) [query.ts:102](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/query.ts#L102)
___ ___
@@ -365,7 +329,7 @@ The number of probes used. A higher number makes search more accurate but also s
#### Defined in #### Defined in
[query.ts:76](https://github.com/lancedb/lancedb/blob/92179835/node/src/query.ts#L76) [query.ts:73](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/query.ts#L73)
___ ___
@@ -385,7 +349,7 @@ ___
#### Defined in #### Defined in
[query.ts:110](https://github.com/lancedb/lancedb/blob/92179835/node/src/query.ts#L110) [query.ts:107](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/query.ts#L107)
___ ___
@@ -407,7 +371,7 @@ Refine the results by reading extra elements and re-ranking them in memory.
#### Defined in #### Defined in
[query.ts:67](https://github.com/lancedb/lancedb/blob/92179835/node/src/query.ts#L67) [query.ts:64](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/query.ts#L64)
___ ___
@@ -429,4 +393,4 @@ Return only the specified columns.
#### Defined in #### Defined in
[query.ts:96](https://github.com/lancedb/lancedb/blob/92179835/node/src/query.ts#L96) [query.ts:93](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/query.ts#L93)

View File

@@ -1,52 +0,0 @@
[vectordb](../README.md) / [Exports](../modules.md) / IndexStatus
# Enumeration: IndexStatus
## Table of contents
### Enumeration Members
- [Done](IndexStatus.md#done)
- [Failed](IndexStatus.md#failed)
- [Indexing](IndexStatus.md#indexing)
- [Pending](IndexStatus.md#pending)
## Enumeration Members
### Done
**Done** = ``"done"``
#### Defined in
[index.ts:713](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L713)
___
### Failed
• **Failed** = ``"failed"``
#### Defined in
[index.ts:714](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L714)
___
### Indexing
• **Indexing** = ``"indexing"``
#### Defined in
[index.ts:712](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L712)
___
### Pending
• **Pending** = ``"pending"``
#### Defined in
[index.ts:711](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L711)

View File

@@ -22,7 +22,7 @@ Cosine distance
#### Defined in #### Defined in
[index.ts:1381](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L1381) [index.ts:1041](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L1041)
___ ___
@@ -34,7 +34,7 @@ Dot product
#### Defined in #### Defined in
[index.ts:1386](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L1386) [index.ts:1046](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L1046)
___ ___
@@ -46,4 +46,4 @@ Euclidean distance
#### Defined in #### Defined in
[index.ts:1376](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L1376) [index.ts:1036](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L1036)

View File

@@ -22,7 +22,7 @@ Append new data to the table.
#### Defined in #### Defined in
[index.ts:1347](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L1347) [index.ts:1007](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L1007)
___ ___
@@ -34,7 +34,7 @@ Create a new [Table](../interfaces/Table.md).
#### Defined in #### Defined in
[index.ts:1343](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L1343) [index.ts:1003](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L1003)
___ ___
@@ -46,4 +46,4 @@ Overwrite the existing [Table](../interfaces/Table.md) if presented.
#### Defined in #### Defined in
[index.ts:1345](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L1345) [index.ts:1005](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L1005)

View File

@@ -18,7 +18,7 @@
#### Defined in #### Defined in
[index.ts:68](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L68) [index.ts:54](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L54)
___ ___
@@ -28,7 +28,7 @@ ___
#### Defined in #### Defined in
[index.ts:70](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L70) [index.ts:56](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L56)
___ ___
@@ -38,4 +38,4 @@ ___
#### Defined in #### Defined in
[index.ts:72](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L72) [index.ts:58](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L58)

View File

@@ -19,7 +19,7 @@ The number of bytes removed from disk.
#### Defined in #### Defined in
[index.ts:1218](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L1218) [index.ts:878](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L878)
___ ___
@@ -31,4 +31,4 @@ The number of old table versions removed.
#### Defined in #### Defined in
[index.ts:1222](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L1222) [index.ts:882](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L882)

View File

@@ -1,53 +0,0 @@
[vectordb](../README.md) / [Exports](../modules.md) / ColumnAlteration
# Interface: ColumnAlteration
A definition of a column alteration. The alteration changes the column at
`path` to have the new name `name`, to be nullable if `nullable` is true,
and to have the data type `data_type`. At least one of `rename` or `nullable`
must be provided.
## Table of contents
### Properties
- [nullable](ColumnAlteration.md#nullable)
- [path](ColumnAlteration.md#path)
- [rename](ColumnAlteration.md#rename)
## Properties
### nullable
`Optional` **nullable**: `boolean`
Set the new nullability. Note that a nullable column cannot be made non-nullable.
#### Defined in
[index.ts:638](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L638)
___
### path
**path**: `string`
The path to the column to alter. This is a dot-separated path to the column.
If it is a top-level column then it is just the name of the column. If it is
a nested column then it is the path to the column, e.g. "a.b.c" for a column
`c` nested inside a column `b` nested inside a column `a`.
#### Defined in
[index.ts:633](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L633)
___
### rename
`Optional` **rename**: `string`
#### Defined in
[index.ts:634](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L634)

View File

@@ -22,7 +22,7 @@ fragments added.
#### Defined in #### Defined in
[index.ts:1273](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L1273) [index.ts:933](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L933)
___ ___
@@ -35,7 +35,7 @@ file.
#### Defined in #### Defined in
[index.ts:1268](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L1268) [index.ts:928](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L928)
___ ___
@@ -47,7 +47,7 @@ The number of new fragments that were created.
#### Defined in #### Defined in
[index.ts:1263](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L1263) [index.ts:923](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L923)
___ ___
@@ -59,4 +59,4 @@ The number of fragments that were removed.
#### Defined in #### Defined in
[index.ts:1259](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L1259) [index.ts:919](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L919)

View File

@@ -24,7 +24,7 @@ Default is true.
#### Defined in #### Defined in
[index.ts:1241](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L1241) [index.ts:901](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L901)
___ ___
@@ -38,7 +38,7 @@ the deleted rows. Default is 10%.
#### Defined in #### Defined in
[index.ts:1247](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L1247) [index.ts:907](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L907)
___ ___
@@ -46,11 +46,11 @@ ___
`Optional` **maxRowsPerGroup**: `number` `Optional` **maxRowsPerGroup**: `number`
The maximum number of T per group. Defaults to 1024. The maximum number of rows per group. Defaults to 1024.
#### Defined in #### Defined in
[index.ts:1235](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L1235) [index.ts:895](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L895)
___ ___
@@ -63,7 +63,7 @@ the number of cores on the machine.
#### Defined in #### Defined in
[index.ts:1252](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L1252) [index.ts:912](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L912)
___ ___
@@ -77,4 +77,4 @@ Defaults to 1024 * 1024.
#### Defined in #### Defined in
[index.ts:1231](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L1231) [index.ts:891](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L891)

View File

@@ -22,7 +22,6 @@ Connection could be local against filesystem or remote against a server.
- [dropTable](Connection.md#droptable) - [dropTable](Connection.md#droptable)
- [openTable](Connection.md#opentable) - [openTable](Connection.md#opentable)
- [tableNames](Connection.md#tablenames) - [tableNames](Connection.md#tablenames)
- [withMiddleware](Connection.md#withmiddleware)
## Properties ## Properties
@@ -32,7 +31,7 @@ Connection could be local against filesystem or remote against a server.
#### Defined in #### Defined in
[index.ts:261](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L261) [index.ts:183](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L183)
## Methods ## Methods
@@ -60,7 +59,7 @@ Creates a new Table, optionally initializing it with new data.
#### Defined in #### Defined in
[index.ts:285](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L285) [index.ts:207](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L207)
**createTable**(`name`, `data`): `Promise`\<[`Table`](Table.md)\<`number`[]\>\> **createTable**(`name`, `data`): `Promise`\<[`Table`](Table.md)\<`number`[]\>\>
@@ -71,7 +70,7 @@ Creates a new Table and initialize it with new data.
| Name | Type | Description | | Name | Type | Description |
| :------ | :------ | :------ | | :------ | :------ | :------ |
| `name` | `string` | The name of the table. | | `name` | `string` | The name of the table. |
| `data` | `Table`\<`any`\> \| `Record`\<`string`, `unknown`\>[] | Non-empty Array of Records to be inserted into the table | | `data` | `Record`\<`string`, `unknown`\>[] | Non-empty Array of Records to be inserted into the table |
#### Returns #### Returns
@@ -79,7 +78,7 @@ Creates a new Table and initialize it with new data.
#### Defined in #### Defined in
[index.ts:299](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L299) [index.ts:221](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L221)
**createTable**(`name`, `data`, `options`): `Promise`\<[`Table`](Table.md)\<`number`[]\>\> **createTable**(`name`, `data`, `options`): `Promise`\<[`Table`](Table.md)\<`number`[]\>\>
@@ -90,7 +89,7 @@ Creates a new Table and initialize it with new data.
| Name | Type | Description | | Name | Type | Description |
| :------ | :------ | :------ | | :------ | :------ | :------ |
| `name` | `string` | The name of the table. | | `name` | `string` | The name of the table. |
| `data` | `Table`\<`any`\> \| `Record`\<`string`, `unknown`\>[] | Non-empty Array of Records to be inserted into the table | | `data` | `Record`\<`string`, `unknown`\>[] | Non-empty Array of Records to be inserted into the table |
| `options` | [`WriteOptions`](WriteOptions.md) | The write options to use when creating the table. | | `options` | [`WriteOptions`](WriteOptions.md) | The write options to use when creating the table. |
#### Returns #### Returns
@@ -99,7 +98,7 @@ Creates a new Table and initialize it with new data.
#### Defined in #### Defined in
[index.ts:311](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L311) [index.ts:233](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L233)
**createTable**\<`T`\>(`name`, `data`, `embeddings`): `Promise`\<[`Table`](Table.md)\<`T`\>\> **createTable**\<`T`\>(`name`, `data`, `embeddings`): `Promise`\<[`Table`](Table.md)\<`T`\>\>
@@ -116,7 +115,7 @@ Creates a new Table and initialize it with new data.
| Name | Type | Description | | Name | Type | Description |
| :------ | :------ | :------ | | :------ | :------ | :------ |
| `name` | `string` | The name of the table. | | `name` | `string` | The name of the table. |
| `data` | `Table`\<`any`\> \| `Record`\<`string`, `unknown`\>[] | Non-empty Array of Records to be inserted into the table | | `data` | `Record`\<`string`, `unknown`\>[] | Non-empty Array of Records to be inserted into the table |
| `embeddings` | [`EmbeddingFunction`](EmbeddingFunction.md)\<`T`\> | An embedding function to use on this table | | `embeddings` | [`EmbeddingFunction`](EmbeddingFunction.md)\<`T`\> | An embedding function to use on this table |
#### Returns #### Returns
@@ -125,7 +124,7 @@ Creates a new Table and initialize it with new data.
#### Defined in #### Defined in
[index.ts:324](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L324) [index.ts:246](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L246)
**createTable**\<`T`\>(`name`, `data`, `embeddings`, `options`): `Promise`\<[`Table`](Table.md)\<`T`\>\> **createTable**\<`T`\>(`name`, `data`, `embeddings`, `options`): `Promise`\<[`Table`](Table.md)\<`T`\>\>
@@ -142,7 +141,7 @@ Creates a new Table and initialize it with new data.
| Name | Type | Description | | Name | Type | Description |
| :------ | :------ | :------ | | :------ | :------ | :------ |
| `name` | `string` | The name of the table. | | `name` | `string` | The name of the table. |
| `data` | `Table`\<`any`\> \| `Record`\<`string`, `unknown`\>[] | Non-empty Array of Records to be inserted into the table | | `data` | `Record`\<`string`, `unknown`\>[] | Non-empty Array of Records to be inserted into the table |
| `embeddings` | [`EmbeddingFunction`](EmbeddingFunction.md)\<`T`\> | An embedding function to use on this table | | `embeddings` | [`EmbeddingFunction`](EmbeddingFunction.md)\<`T`\> | An embedding function to use on this table |
| `options` | [`WriteOptions`](WriteOptions.md) | The write options to use when creating the table. | | `options` | [`WriteOptions`](WriteOptions.md) | The write options to use when creating the table. |
@@ -152,7 +151,7 @@ Creates a new Table and initialize it with new data.
#### Defined in #### Defined in
[index.ts:337](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L337) [index.ts:259](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L259)
___ ___
@@ -174,7 +173,7 @@ Drop an existing table.
#### Defined in #### Defined in
[index.ts:348](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L348) [index.ts:270](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L270)
___ ___
@@ -203,7 +202,7 @@ Open a table in the database.
#### Defined in #### Defined in
[index.ts:271](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L271) [index.ts:193](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L193)
___ ___
@@ -217,32 +216,4 @@ ___
#### Defined in #### Defined in
[index.ts:263](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L263) [index.ts:185](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L185)
___
### withMiddleware
**withMiddleware**(`middleware`): [`Connection`](Connection.md)
Instrument the behavior of this Connection with middleware.
The middleware will be called in the order they are added.
Currently this functionality is only supported for remote Connections.
#### Parameters
| Name | Type |
| :------ | :------ |
| `middleware` | `HttpMiddleware` |
#### Returns
[`Connection`](Connection.md)
- this Connection instrumented by the passed middleware
#### Defined in
[index.ts:360](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L360)

View File

@@ -10,10 +10,7 @@
- [awsCredentials](ConnectionOptions.md#awscredentials) - [awsCredentials](ConnectionOptions.md#awscredentials)
- [awsRegion](ConnectionOptions.md#awsregion) - [awsRegion](ConnectionOptions.md#awsregion)
- [hostOverride](ConnectionOptions.md#hostoverride) - [hostOverride](ConnectionOptions.md#hostoverride)
- [readConsistencyInterval](ConnectionOptions.md#readconsistencyinterval)
- [region](ConnectionOptions.md#region) - [region](ConnectionOptions.md#region)
- [storageOptions](ConnectionOptions.md#storageoptions)
- [timeout](ConnectionOptions.md#timeout)
- [uri](ConnectionOptions.md#uri) - [uri](ConnectionOptions.md#uri)
## Properties ## Properties
@@ -22,13 +19,9 @@
`Optional` **apiKey**: `string` `Optional` **apiKey**: `string`
API key for the remote connections
Can also be passed by setting environment variable `LANCEDB_API_KEY`
#### Defined in #### Defined in
[index.ts:112](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L112) [index.ts:81](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L81)
___ ___
@@ -40,14 +33,9 @@ User provided AWS crednetials.
If not provided, LanceDB will use the default credentials provider chain. If not provided, LanceDB will use the default credentials provider chain.
**`Deprecated`**
Pass `aws_access_key_id`, `aws_secret_access_key`, and `aws_session_token`
through `storageOptions` instead.
#### Defined in #### Defined in
[index.ts:92](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L92) [index.ts:75](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L75)
___ ___
@@ -55,15 +43,11 @@ ___
`Optional` **awsRegion**: `string` `Optional` **awsRegion**: `string`
AWS region to connect to. Default is defaultAwsRegion AWS region to connect to. Default is defaultAwsRegion.
**`Deprecated`**
Pass `region` through `storageOptions` instead.
#### Defined in #### Defined in
[index.ts:98](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L98) [index.ts:78](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L78)
___ ___
@@ -71,33 +55,13 @@ ___
`Optional` **hostOverride**: `string` `Optional` **hostOverride**: `string`
Override the host URL for the remote connection. Override the host URL for the remote connections.
This is useful for local testing. This is useful for local testing.
#### Defined in #### Defined in
[index.ts:122](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L122) [index.ts:91](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L91)
___
### readConsistencyInterval
`Optional` **readConsistencyInterval**: `number`
(For LanceDB OSS only): The interval, in seconds, at which to check for
updates to the table from other processes. If None, then consistency is not
checked. For performance reasons, this is the default. For strong
consistency, set this to zero seconds. Then every read will check for
updates from other processes. As a compromise, you can set this to a
non-zero value for eventual consistency. If more than that interval
has passed since the last check, then the table will be checked for updates.
Note: this consistency only applies to read operations. Write operations are
always consistent.
#### Defined in
[index.ts:140](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L140)
___ ___
@@ -105,37 +69,11 @@ ___
`Optional` **region**: `string` `Optional` **region**: `string`
Region to connect. Default is 'us-east-1' Region to connect
#### Defined in #### Defined in
[index.ts:115](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L115) [index.ts:84](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L84)
___
### storageOptions
`Optional` **storageOptions**: `Record`\<`string`, `string`\>
User provided options for object storage. For example, S3 credentials or request timeouts.
The various options are described at https://lancedb.github.io/lancedb/guides/storage/
#### Defined in
[index.ts:105](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L105)
___
### timeout
`Optional` **timeout**: `number`
Duration in milliseconds for request timeout. Default = 10,000 (10 seconds)
#### Defined in
[index.ts:127](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L127)
___ ___
@@ -147,8 +85,8 @@ LanceDB database URI.
- `/path/to/database` - local database - `/path/to/database` - local database
- `s3://bucket/path/to/database` or `gs://bucket/path/to/database` - database on cloud storage - `s3://bucket/path/to/database` or `gs://bucket/path/to/database` - database on cloud storage
- `db://host:port` - remote database (LanceDB cloud) - `db://host:port` - remote database (SaaS)
#### Defined in #### Defined in
[index.ts:83](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L83) [index.ts:69](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L69)

View File

@@ -26,7 +26,7 @@
#### Defined in #### Defined in
[index.ts:163](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L163) [index.ts:116](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L116)
___ ___
@@ -36,7 +36,7 @@ ___
#### Defined in #### Defined in
[index.ts:169](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L169) [index.ts:122](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L122)
___ ___
@@ -46,7 +46,7 @@ ___
#### Defined in #### Defined in
[index.ts:160](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L160) [index.ts:113](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L113)
___ ___
@@ -56,7 +56,7 @@ ___
#### Defined in #### Defined in
[index.ts:166](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L166) [index.ts:119](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L119)
___ ___
@@ -66,4 +66,4 @@ ___
#### Defined in #### Defined in
[index.ts:172](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L172) [index.ts:125](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L125)

View File

@@ -18,29 +18,11 @@ An embedding function that automatically creates vector representation for a giv
### Properties ### Properties
- [destColumn](EmbeddingFunction.md#destcolumn)
- [embed](EmbeddingFunction.md#embed) - [embed](EmbeddingFunction.md#embed)
- [embeddingDataType](EmbeddingFunction.md#embeddingdatatype)
- [embeddingDimension](EmbeddingFunction.md#embeddingdimension)
- [excludeSource](EmbeddingFunction.md#excludesource)
- [sourceColumn](EmbeddingFunction.md#sourcecolumn) - [sourceColumn](EmbeddingFunction.md#sourcecolumn)
## Properties ## Properties
### destColumn
`Optional` **destColumn**: `string`
The name of the column that will contain the embedding
By default this is "vector"
#### Defined in
[embedding/embedding_function.ts:49](https://github.com/lancedb/lancedb/blob/92179835/node/src/embedding/embedding_function.ts#L49)
___
### embed ### embed
**embed**: (`data`: `T`[]) => `Promise`\<`number`[][]\> **embed**: (`data`: `T`[]) => `Promise`\<`number`[][]\>
@@ -63,54 +45,7 @@ Creates a vector representation for the given values.
#### Defined in #### Defined in
[embedding/embedding_function.ts:62](https://github.com/lancedb/lancedb/blob/92179835/node/src/embedding/embedding_function.ts#L62) [embedding/embedding_function.ts:27](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/embedding/embedding_function.ts#L27)
___
### embeddingDataType
`Optional` **embeddingDataType**: `Float`\<`Floats`\>
The data type of the embedding
The embedding function should return `number`. This will be converted into
an Arrow float array. By default this will be Float32 but this property can
be used to control the conversion.
#### Defined in
[embedding/embedding_function.ts:33](https://github.com/lancedb/lancedb/blob/92179835/node/src/embedding/embedding_function.ts#L33)
___
### embeddingDimension
`Optional` **embeddingDimension**: `number`
The dimension of the embedding
This is optional, normally this can be determined by looking at the results of
`embed`. If this is not specified, and there is an attempt to apply the embedding
to an empty table, then that process will fail.
#### Defined in
[embedding/embedding_function.ts:42](https://github.com/lancedb/lancedb/blob/92179835/node/src/embedding/embedding_function.ts#L42)
___
### excludeSource
`Optional` **excludeSource**: `boolean`
Should the source column be excluded from the resulting table
By default the source column is included. Set this to true and
only the embedding will be stored.
#### Defined in
[embedding/embedding_function.ts:57](https://github.com/lancedb/lancedb/blob/92179835/node/src/embedding/embedding_function.ts#L57)
___ ___
@@ -122,4 +57,4 @@ The name of the column that will be used as input for the Embedding Function.
#### Defined in #### Defined in
[embedding/embedding_function.ts:24](https://github.com/lancedb/lancedb/blob/92179835/node/src/embedding/embedding_function.ts#L24) [embedding/embedding_function.ts:22](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/embedding/embedding_function.ts#L22)

View File

@@ -6,51 +6,18 @@
### Properties ### Properties
- [distanceType](IndexStats.md#distancetype)
- [indexType](IndexStats.md#indextype)
- [numIndexedRows](IndexStats.md#numindexedrows) - [numIndexedRows](IndexStats.md#numindexedrows)
- [numIndices](IndexStats.md#numindices)
- [numUnindexedRows](IndexStats.md#numunindexedrows) - [numUnindexedRows](IndexStats.md#numunindexedrows)
## Properties ## Properties
### distanceType
`Optional` **distanceType**: `string`
#### Defined in
[index.ts:728](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L728)
___
### indexType
**indexType**: `string`
#### Defined in
[index.ts:727](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L727)
___
### numIndexedRows ### numIndexedRows
**numIndexedRows**: ``null`` \| `number` **numIndexedRows**: ``null`` \| `number`
#### Defined in #### Defined in
[index.ts:725](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L725) [index.ts:478](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L478)
___
### numIndices
• `Optional` **numIndices**: `number`
#### Defined in
[index.ts:729](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L729)
___ ___
@@ -60,4 +27,4 @@ ___
#### Defined in #### Defined in
[index.ts:726](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L726) [index.ts:479](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L479)

View File

@@ -29,7 +29,7 @@ The column to be indexed
#### Defined in #### Defined in
[index.ts:1282](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L1282) [index.ts:942](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L942)
___ ___
@@ -41,7 +41,7 @@ Cache size of the index
#### Defined in #### Defined in
[index.ts:1331](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L1331) [index.ts:991](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L991)
___ ___
@@ -53,7 +53,7 @@ A unique name for the index
#### Defined in #### Defined in
[index.ts:1287](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L1287) [index.ts:947](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L947)
___ ___
@@ -65,7 +65,7 @@ The max number of iterations for kmeans training.
#### Defined in #### Defined in
[index.ts:1302](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L1302) [index.ts:962](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L962)
___ ___
@@ -77,7 +77,7 @@ Max number of iterations to train OPQ, if `use_opq` is true.
#### Defined in #### Defined in
[index.ts:1321](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L1321) [index.ts:981](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L981)
___ ___
@@ -89,7 +89,7 @@ Metric type, L2 or Cosine
#### Defined in #### Defined in
[index.ts:1292](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L1292) [index.ts:952](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L952)
___ ___
@@ -101,7 +101,7 @@ The number of bits to present one PQ centroid.
#### Defined in #### Defined in
[index.ts:1316](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L1316) [index.ts:976](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L976)
___ ___
@@ -113,7 +113,7 @@ The number of partitions this index
#### Defined in #### Defined in
[index.ts:1297](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L1297) [index.ts:957](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L957)
___ ___
@@ -125,7 +125,7 @@ Number of subvectors to build PQ code
#### Defined in #### Defined in
[index.ts:1312](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L1312) [index.ts:972](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L972)
___ ___
@@ -137,7 +137,7 @@ Replace an existing index with the same name if it exists.
#### Defined in #### Defined in
[index.ts:1326](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L1326) [index.ts:986](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L986)
___ ___
@@ -147,7 +147,7 @@ ___
#### Defined in #### Defined in
[index.ts:1333](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L1333) [index.ts:993](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L993)
___ ___
@@ -159,4 +159,4 @@ Train as optimized product quantization.
#### Defined in #### Defined in
[index.ts:1307](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L1307) [index.ts:967](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L967)

View File

@@ -1,73 +0,0 @@
[vectordb](../README.md) / [Exports](../modules.md) / MergeInsertArgs
# Interface: MergeInsertArgs
## Table of contents
### Properties
- [whenMatchedUpdateAll](MergeInsertArgs.md#whenmatchedupdateall)
- [whenNotMatchedBySourceDelete](MergeInsertArgs.md#whennotmatchedbysourcedelete)
- [whenNotMatchedInsertAll](MergeInsertArgs.md#whennotmatchedinsertall)
## Properties
### whenMatchedUpdateAll
`Optional` **whenMatchedUpdateAll**: `string` \| `boolean`
If true then rows that exist in both the source table (new data) and
the target table (old data) will be updated, replacing the old row
with the corresponding matching row.
If there are multiple matches then the behavior is undefined.
Currently this causes multiple copies of the row to be created
but that behavior is subject to change.
Optionally, a filter can be specified. This should be an SQL
filter where fields with the prefix "target." refer to fields
in the target table (old data) and fields with the prefix
"source." refer to fields in the source table (new data). For
example, the filter "target.lastUpdated < source.lastUpdated" will
only update matched rows when the incoming `lastUpdated` value is
newer.
Rows that do not match the filter will not be updated. Rows that
do not match the filter do become "not matched" rows.
#### Defined in
[index.ts:690](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L690)
___
### whenNotMatchedBySourceDelete
`Optional` **whenNotMatchedBySourceDelete**: `string` \| `boolean`
If true then rows that exist only in the target table (old data)
will be deleted.
If this is a string then it will be treated as an SQL filter and
only rows that both do not match any row in the source table and
match the given filter will be deleted.
This can be used to replace a selection of existing data with
new data.
#### Defined in
[index.ts:707](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L707)
___
### whenNotMatchedInsertAll
`Optional` **whenNotMatchedInsertAll**: `boolean`
If true then rows that exist only in the source table (new data)
will be inserted into the target table.
#### Defined in
[index.ts:695](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L695)

View File

@@ -25,26 +25,17 @@ A LanceDB Table is the collection of Records. Each Record has one or more vector
- [delete](Table.md#delete) - [delete](Table.md#delete)
- [indexStats](Table.md#indexstats) - [indexStats](Table.md#indexstats)
- [listIndices](Table.md#listindices) - [listIndices](Table.md#listindices)
- [mergeInsert](Table.md#mergeinsert)
- [name](Table.md#name) - [name](Table.md#name)
- [overwrite](Table.md#overwrite) - [overwrite](Table.md#overwrite)
- [schema](Table.md#schema) - [schema](Table.md#schema)
- [search](Table.md#search) - [search](Table.md#search)
- [update](Table.md#update) - [update](Table.md#update)
### Methods
- [addColumns](Table.md#addcolumns)
- [alterColumns](Table.md#altercolumns)
- [dropColumns](Table.md#dropcolumns)
- [filter](Table.md#filter)
- [withMiddleware](Table.md#withmiddleware)
## Properties ## Properties
### add ### add
**add**: (`data`: `Table`\<`any`\> \| `Record`\<`string`, `unknown`\>[]) => `Promise`\<`number`\> **add**: (`data`: `Record`\<`string`, `unknown`\>[]) => `Promise`\<`number`\>
#### Type declaration #### Type declaration
@@ -56,7 +47,7 @@ Insert records into this Table.
| Name | Type | Description | | Name | Type | Description |
| :------ | :------ | :------ | | :------ | :------ | :------ |
| `data` | `Table`\<`any`\> \| `Record`\<`string`, `unknown`\>[] | Records to be inserted into the Table | | `data` | `Record`\<`string`, `unknown`\>[] | Records to be inserted into the Table |
##### Returns ##### Returns
@@ -66,33 +57,27 @@ The number of rows added to the table
#### Defined in #### Defined in
[index.ts:381](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L381) [index.ts:291](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L291)
___ ___
### countRows ### countRows
**countRows**: (`filter?`: `string`) => `Promise`\<`number`\> **countRows**: () => `Promise`\<`number`\>
#### Type declaration #### Type declaration
▸ (`filter?`): `Promise`\<`number`\> ▸ (): `Promise`\<`number`\>
Returns the number of rows in this table. Returns the number of rows in this table.
##### Parameters
| Name | Type |
| :------ | :------ |
| `filter?` | `string` |
##### Returns ##### Returns
`Promise`\<`number`\> `Promise`\<`number`\>
#### Defined in #### Defined in
[index.ts:454](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L454) [index.ts:361](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L361)
___ ___
@@ -122,17 +107,17 @@ VectorIndexParams.
#### Defined in #### Defined in
[index.ts:398](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L398) [index.ts:306](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L306)
___ ___
### createScalarIndex ### createScalarIndex
**createScalarIndex**: (`column`: `string`, `replace?`: `boolean`) => `Promise`\<`void`\> **createScalarIndex**: (`column`: `string`, `replace`: `boolean`) => `Promise`\<`void`\>
#### Type declaration #### Type declaration
▸ (`column`, `replace?`): `Promise`\<`void`\> ▸ (`column`, `replace`): `Promise`\<`void`\>
Create a scalar index on this Table for the given column Create a scalar index on this Table for the given column
@@ -141,7 +126,7 @@ Create a scalar index on this Table for the given column
| Name | Type | Description | | Name | Type | Description |
| :------ | :------ | :------ | | :------ | :------ | :------ |
| `column` | `string` | The column to index | | `column` | `string` | The column to index |
| `replace?` | `boolean` | If false, fail if an index already exists on the column it is always set to true for remote connections Scalar indices, like vector indices, can be used to speed up scans. A scalar index can speed up scans that contain filter expressions on the indexed column. For example, the following scan will be faster if the column `my_col` has a scalar index: ```ts const con = await lancedb.connect('./.lancedb'); const table = await con.openTable('images'); const results = await table.where('my_col = 7').execute(); ``` Scalar indices can also speed up scans containing a vector search and a prefilter: ```ts const con = await lancedb.connect('././lancedb'); const table = await con.openTable('images'); const results = await table.search([1.0, 2.0]).where('my_col != 7').prefilter(true); ``` Scalar indices can only speed up scans for basic filters using equality, comparison, range (e.g. `my_col BETWEEN 0 AND 100`), and set membership (e.g. `my_col IN (0, 1, 2)`) Scalar indices can be used if the filter contains multiple indexed columns and the filter criteria are AND'd or OR'd together (e.g. `my_col < 0 AND other_col> 100`) Scalar indices may be used if the filter contains non-indexed columns but, depending on the structure of the filter, they may not be usable. For example, if the column `not_indexed` does not have a scalar index then the filter `my_col = 0 OR not_indexed = 1` will not be able to use any scalar index on `my_col`. | | `replace` | `boolean` | If false, fail if an index already exists on the column Scalar indices, like vector indices, can be used to speed up scans. A scalar index can speed up scans that contain filter expressions on the indexed column. For example, the following scan will be faster if the column `my_col` has a scalar index: ```ts const con = await lancedb.connect('./.lancedb'); const table = await con.openTable('images'); const results = await table.where('my_col = 7').execute(); ``` Scalar indices can also speed up scans containing a vector search and a prefilter: ```ts const con = await lancedb.connect('././lancedb'); const table = await con.openTable('images'); const results = await table.search([1.0, 2.0]).where('my_col != 7').prefilter(true); ``` Scalar indices can only speed up scans for basic filters using equality, comparison, range (e.g. `my_col BETWEEN 0 AND 100`), and set membership (e.g. `my_col IN (0, 1, 2)`) Scalar indices can be used if the filter contains multiple indexed columns and the filter criteria are AND'd or OR'd together (e.g. `my_col < 0 AND other_col> 100`) Scalar indices may be used if the filter contains non-indexed columns but, depending on the structure of the filter, they may not be usable. For example, if the column `not_indexed` does not have a scalar index then the filter `my_col = 0 OR not_indexed = 1` will not be able to use any scalar index on `my_col`. |
##### Returns ##### Returns
@@ -157,7 +142,7 @@ await table.createScalarIndex('my_col')
#### Defined in #### Defined in
[index.ts:449](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L449) [index.ts:356](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L356)
___ ___
@@ -209,17 +194,17 @@ await tbl.countRows() // Returns 1
#### Defined in #### Defined in
[index.ts:488](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L488) [index.ts:395](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L395)
___ ___
### indexStats ### indexStats
• **indexStats**: (`indexName`: `string`) => `Promise`\<[`IndexStats`](IndexStats.md)\> • **indexStats**: (`indexUuid`: `string`) => `Promise`\<[`IndexStats`](IndexStats.md)\>
#### Type declaration #### Type declaration
▸ (`indexName`): `Promise`\<[`IndexStats`](IndexStats.md)\> ▸ (`indexUuid`): `Promise`\<[`IndexStats`](IndexStats.md)\>
Get statistics about an index. Get statistics about an index.
@@ -227,7 +212,7 @@ Get statistics about an index.
| Name | Type | | Name | Type |
| :------ | :------ | | :------ | :------ |
| `indexName` | `string` | | `indexUuid` | `string` |
##### Returns ##### Returns
@@ -235,7 +220,7 @@ Get statistics about an index.
#### Defined in #### Defined in
[index.ts:567](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L567) [index.ts:438](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L438)
___ ___
@@ -255,57 +240,7 @@ List the indicies on this table.
#### Defined in #### Defined in
[index.ts:562](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L562) [index.ts:433](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L433)
___
### mergeInsert
• **mergeInsert**: (`on`: `string`, `data`: `Table`\<`any`\> \| `Record`\<`string`, `unknown`\>[], `args`: [`MergeInsertArgs`](MergeInsertArgs.md)) => `Promise`\<`void`\>
#### Type declaration
▸ (`on`, `data`, `args`): `Promise`\<`void`\>
Runs a "merge insert" operation on the table
This operation can add rows, update rows, and remove rows all in a single
transaction. It is a very generic tool that can be used to create
behaviors like "insert if not exists", "update or insert (i.e. upsert)",
or even replace a portion of existing data with new data (e.g. replace
all data where month="january")
The merge insert operation works by combining new data from a
**source table** with existing data in a **target table** by using a
join. There are three categories of records.
"Matched" records are records that exist in both the source table and
the target table. "Not matched" records exist only in the source table
(e.g. these are new data) "Not matched by source" records exist only
in the target table (this is old data)
The MergeInsertArgs can be used to customize what should happen for
each category of data.
Please note that the data may appear to be reordered as part of this
operation. This is because updated rows will be deleted from the
dataset and then reinserted at the end with the new values.
##### Parameters
| Name | Type | Description |
| :------ | :------ | :------ |
| `on` | `string` | a column to join on. This is how records from the source table and target table are matched. |
| `data` | `Table`\<`any`\> \| `Record`\<`string`, `unknown`\>[] | the new data to insert |
| `args` | [`MergeInsertArgs`](MergeInsertArgs.md) | parameters controlling how the operation should behave |
##### Returns
`Promise`\<`void`\>
#### Defined in
[index.ts:553](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L553)
___ ___
@@ -315,13 +250,13 @@ ___
#### Defined in #### Defined in
[index.ts:367](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L367) [index.ts:277](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L277)
___ ___
### overwrite ### overwrite
• **overwrite**: (`data`: `Table`\<`any`\> \| `Record`\<`string`, `unknown`\>[]) => `Promise`\<`number`\> • **overwrite**: (`data`: `Record`\<`string`, `unknown`\>[]) => `Promise`\<`number`\>
#### Type declaration #### Type declaration
@@ -333,7 +268,7 @@ Insert records into this Table, replacing its contents.
| Name | Type | Description | | Name | Type | Description |
| :------ | :------ | :------ | | :------ | :------ | :------ |
| `data` | `Table`\<`any`\> \| `Record`\<`string`, `unknown`\>[] | Records to be inserted into the Table | | `data` | `Record`\<`string`, `unknown`\>[] | Records to be inserted into the Table |
##### Returns ##### Returns
@@ -343,7 +278,7 @@ The number of rows added to the table
#### Defined in #### Defined in
[index.ts:389](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L389) [index.ts:299](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L299)
___ ___
@@ -353,7 +288,7 @@ ___
#### Defined in #### Defined in
[index.ts:571](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L571) [index.ts:440](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L440)
___ ___
@@ -379,7 +314,7 @@ Creates a search query to find the nearest neighbors of the given search term
#### Defined in #### Defined in
[index.ts:373](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L373) [index.ts:283](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L283)
___ ___
@@ -430,123 +365,4 @@ let results = await tbl.search([1, 1]).execute();
#### Defined in #### Defined in
[index.ts:521](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L521) [index.ts:428](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L428)
## Methods
### addColumns
▸ **addColumns**(`newColumnTransforms`): `Promise`\<`void`\>
Add new columns with defined values.
#### Parameters
| Name | Type | Description |
| :------ | :------ | :------ |
| `newColumnTransforms` | \{ `name`: `string` ; `valueSql`: `string` }[] | pairs of column names and the SQL expression to use to calculate the value of the new column. These expressions will be evaluated for each row in the table, and can reference existing columns in the table. |
#### Returns
`Promise`\<`void`\>
#### Defined in
[index.ts:582](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L582)
___
### alterColumns
▸ **alterColumns**(`columnAlterations`): `Promise`\<`void`\>
Alter the name or nullability of columns.
#### Parameters
| Name | Type | Description |
| :------ | :------ | :------ |
| `columnAlterations` | [`ColumnAlteration`](ColumnAlteration.md)[] | One or more alterations to apply to columns. |
#### Returns
`Promise`\<`void`\>
#### Defined in
[index.ts:591](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L591)
___
### dropColumns
▸ **dropColumns**(`columnNames`): `Promise`\<`void`\>
Drop one or more columns from the dataset
This is a metadata-only operation and does not remove the data from the
underlying storage. In order to remove the data, you must subsequently
call ``compact_files`` to rewrite the data without the removed columns and
then call ``cleanup_files`` to remove the old files.
#### Parameters
| Name | Type | Description |
| :------ | :------ | :------ |
| `columnNames` | `string`[] | The names of the columns to drop. These can be nested column references (e.g. "a.b.c") or top-level column names (e.g. "a"). |
#### Returns
`Promise`\<`void`\>
#### Defined in
[index.ts:605](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L605)
___
### filter
▸ **filter**(`value`): [`Query`](../classes/Query.md)\<`T`\>
#### Parameters
| Name | Type |
| :------ | :------ |
| `value` | `string` |
#### Returns
[`Query`](../classes/Query.md)\<`T`\>
#### Defined in
[index.ts:569](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L569)
___
### withMiddleware
▸ **withMiddleware**(`middleware`): [`Table`](Table.md)\<`T`\>
Instrument the behavior of this Table with middleware.
The middleware will be called in the order they are added.
Currently this functionality is only supported for remote tables.
#### Parameters
| Name | Type |
| :------ | :------ |
| `middleware` | `HttpMiddleware` |
#### Returns
[`Table`](Table.md)\<`T`\>
- this Table instrumented by the passed middleware
#### Defined in
[index.ts:617](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L617)

View File

@@ -20,7 +20,7 @@ new values to set
#### Defined in #### Defined in
[index.ts:652](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L652) [index.ts:454](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L454)
___ ___
@@ -33,4 +33,4 @@ in which case all rows will be updated.
#### Defined in #### Defined in
[index.ts:646](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L646) [index.ts:448](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L448)

View File

@@ -20,7 +20,7 @@ new values to set as SQL expressions.
#### Defined in #### Defined in
[index.ts:666](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L666) [index.ts:468](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L468)
___ ___
@@ -33,4 +33,4 @@ in which case all rows will be updated.
#### Defined in #### Defined in
[index.ts:660](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L660) [index.ts:462](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L462)

View File

@@ -8,7 +8,6 @@
- [columns](VectorIndex.md#columns) - [columns](VectorIndex.md#columns)
- [name](VectorIndex.md#name) - [name](VectorIndex.md#name)
- [status](VectorIndex.md#status)
- [uuid](VectorIndex.md#uuid) - [uuid](VectorIndex.md#uuid)
## Properties ## Properties
@@ -19,7 +18,7 @@
#### Defined in #### Defined in
[index.ts:718](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L718) [index.ts:472](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L472)
___ ___
@@ -29,17 +28,7 @@ ___
#### Defined in #### Defined in
[index.ts:719](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L719) [index.ts:473](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L473)
___
### status
**status**: [`IndexStatus`](../enums/IndexStatus.md)
#### Defined in
[index.ts:721](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L721)
___ ___
@@ -49,4 +38,4 @@ ___
#### Defined in #### Defined in
[index.ts:720](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L720) [index.ts:474](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L474)

View File

@@ -24,4 +24,4 @@ A [WriteMode](../enums/WriteMode.md) to use on this operation
#### Defined in #### Defined in
[index.ts:1355](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L1355) [index.ts:1015](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L1015)

View File

@@ -6,7 +6,6 @@
### Enumerations ### Enumerations
- [IndexStatus](enums/IndexStatus.md)
- [MetricType](enums/MetricType.md) - [MetricType](enums/MetricType.md)
- [WriteMode](enums/WriteMode.md) - [WriteMode](enums/WriteMode.md)
@@ -15,7 +14,6 @@
- [DefaultWriteOptions](classes/DefaultWriteOptions.md) - [DefaultWriteOptions](classes/DefaultWriteOptions.md)
- [LocalConnection](classes/LocalConnection.md) - [LocalConnection](classes/LocalConnection.md)
- [LocalTable](classes/LocalTable.md) - [LocalTable](classes/LocalTable.md)
- [MakeArrowTableOptions](classes/MakeArrowTableOptions.md)
- [OpenAIEmbeddingFunction](classes/OpenAIEmbeddingFunction.md) - [OpenAIEmbeddingFunction](classes/OpenAIEmbeddingFunction.md)
- [Query](classes/Query.md) - [Query](classes/Query.md)
@@ -23,7 +21,6 @@
- [AwsCredentials](interfaces/AwsCredentials.md) - [AwsCredentials](interfaces/AwsCredentials.md)
- [CleanupStats](interfaces/CleanupStats.md) - [CleanupStats](interfaces/CleanupStats.md)
- [ColumnAlteration](interfaces/ColumnAlteration.md)
- [CompactionMetrics](interfaces/CompactionMetrics.md) - [CompactionMetrics](interfaces/CompactionMetrics.md)
- [CompactionOptions](interfaces/CompactionOptions.md) - [CompactionOptions](interfaces/CompactionOptions.md)
- [Connection](interfaces/Connection.md) - [Connection](interfaces/Connection.md)
@@ -32,7 +29,6 @@
- [EmbeddingFunction](interfaces/EmbeddingFunction.md) - [EmbeddingFunction](interfaces/EmbeddingFunction.md)
- [IndexStats](interfaces/IndexStats.md) - [IndexStats](interfaces/IndexStats.md)
- [IvfPQIndexConfig](interfaces/IvfPQIndexConfig.md) - [IvfPQIndexConfig](interfaces/IvfPQIndexConfig.md)
- [MergeInsertArgs](interfaces/MergeInsertArgs.md)
- [Table](interfaces/Table.md) - [Table](interfaces/Table.md)
- [UpdateArgs](interfaces/UpdateArgs.md) - [UpdateArgs](interfaces/UpdateArgs.md)
- [UpdateSqlArgs](interfaces/UpdateSqlArgs.md) - [UpdateSqlArgs](interfaces/UpdateSqlArgs.md)
@@ -46,9 +42,7 @@
### Functions ### Functions
- [connect](modules.md#connect) - [connect](modules.md#connect)
- [convertToTable](modules.md#converttotable)
- [isWriteOptions](modules.md#iswriteoptions) - [isWriteOptions](modules.md#iswriteoptions)
- [makeArrowTable](modules.md#makearrowtable)
## Type Aliases ## Type Aliases
@@ -58,7 +52,7 @@
#### Defined in #### Defined in
[index.ts:1336](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L1336) [index.ts:996](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L996)
## Functions ## Functions
@@ -68,11 +62,11 @@
Connect to a LanceDB instance at the given URI. Connect to a LanceDB instance at the given URI.
Accepted formats: Accpeted formats:
- `/path/to/database` - local database - `/path/to/database` - local database
- `s3://bucket/path/to/database` or `gs://bucket/path/to/database` - database on cloud storage - `s3://bucket/path/to/database` or `gs://bucket/path/to/database` - database on cloud storage
- `db://host:port` - remote database (LanceDB cloud) - `db://host:port` - remote database (SaaS)
#### Parameters #### Parameters
@@ -90,7 +84,7 @@ Accepted formats:
#### Defined in #### Defined in
[index.ts:188](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L188) [index.ts:141](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L141)
**connect**(`opts`): `Promise`\<[`Connection`](interfaces/Connection.md)\> **connect**(`opts`): `Promise`\<[`Connection`](interfaces/Connection.md)\>
@@ -108,35 +102,7 @@ Connect to a LanceDB instance with connection options.
#### Defined in #### Defined in
[index.ts:194](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L194) [index.ts:147](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L147)
___
### convertToTable
**convertToTable**\<`T`\>(`data`, `embeddings?`, `makeTableOptions?`): `Promise`\<`ArrowTable`\>
#### Type parameters
| Name |
| :------ |
| `T` |
#### Parameters
| Name | Type |
| :------ | :------ |
| `data` | `Record`\<`string`, `unknown`\>[] |
| `embeddings?` | [`EmbeddingFunction`](interfaces/EmbeddingFunction.md)\<`T`\> |
| `makeTableOptions?` | `Partial`\<[`MakeArrowTableOptions`](classes/MakeArrowTableOptions.md)\> |
#### Returns
`Promise`\<`ArrowTable`\>
#### Defined in
[arrow.ts:465](https://github.com/lancedb/lancedb/blob/92179835/node/src/arrow.ts#L465)
___ ___
@@ -156,116 +122,4 @@ value is WriteOptions
#### Defined in #### Defined in
[index.ts:1362](https://github.com/lancedb/lancedb/blob/92179835/node/src/index.ts#L1362) [index.ts:1022](https://github.com/lancedb/lancedb/blob/c89d5e6/node/src/index.ts#L1022)
___
### makeArrowTable
**makeArrowTable**(`data`, `options?`): `ArrowTable`
An enhanced version of the makeTable function from Apache Arrow
that supports nested fields and embeddings columns.
This function converts an array of Record<String, any> (row-major JS objects)
to an Arrow Table (a columnar structure)
Note that it currently does not support nulls.
If a schema is provided then it will be used to determine the resulting array
types. Fields will also be reordered to fit the order defined by the schema.
If a schema is not provided then the types will be inferred and the field order
will be controlled by the order of properties in the first record.
If the input is empty then a schema must be provided to create an empty table.
When a schema is not specified then data types will be inferred. The inference
rules are as follows:
- boolean => Bool
- number => Float64
- String => Utf8
- Buffer => Binary
- Record<String, any> => Struct
- Array<any> => List
#### Parameters
| Name | Type | Description |
| :------ | :------ | :------ |
| `data` | `Record`\<`string`, `any`\>[] | input data |
| `options?` | `Partial`\<[`MakeArrowTableOptions`](classes/MakeArrowTableOptions.md)\> | options to control the makeArrowTable call. |
#### Returns
`ArrowTable`
**`Example`**
```ts
import { fromTableToBuffer, makeArrowTable } from "../arrow";
import { Field, FixedSizeList, Float16, Float32, Int32, Schema } from "apache-arrow";
const schema = new Schema([
new Field("a", new Int32()),
new Field("b", new Float32()),
new Field("c", new FixedSizeList(3, new Field("item", new Float16()))),
]);
const table = makeArrowTable([
{ a: 1, b: 2, c: [1, 2, 3] },
{ a: 4, b: 5, c: [4, 5, 6] },
{ a: 7, b: 8, c: [7, 8, 9] },
], { schema });
```
By default it assumes that the column named `vector` is a vector column
and it will be converted into a fixed size list array of type float32.
The `vectorColumns` option can be used to support other vector column
names and data types.
```ts
const schema = new Schema([
new Field("a", new Float64()),
new Field("b", new Float64()),
new Field(
"vector",
new FixedSizeList(3, new Field("item", new Float32()))
),
]);
const table = makeArrowTable([
{ a: 1, b: 2, vector: [1, 2, 3] },
{ a: 4, b: 5, vector: [4, 5, 6] },
{ a: 7, b: 8, vector: [7, 8, 9] },
]);
assert.deepEqual(table.schema, schema);
```
You can specify the vector column types and names using the options as well
```typescript
const schema = new Schema([
new Field('a', new Float64()),
new Field('b', new Float64()),
new Field('vec1', new FixedSizeList(3, new Field('item', new Float16()))),
new Field('vec2', new FixedSizeList(3, new Field('item', new Float16())))
]);
const table = makeArrowTable([
{ a: 1, b: 2, vec1: [1, 2, 3], vec2: [2, 4, 6] },
{ a: 4, b: 5, vec1: [4, 5, 6], vec2: [8, 10, 12] },
{ a: 7, b: 8, vec1: [7, 8, 9], vec2: [14, 16, 18] }
], {
vectorColumns: {
vec1: { type: new Float16() },
vec2: { type: new Float16() }
}
}
assert.deepEqual(table.schema, schema)
```
#### Defined in
[arrow.ts:198](https://github.com/lancedb/lancedb/blob/92179835/node/src/arrow.ts#L198)

1
docs/src/js/.nojekyll Normal file
View File

@@ -0,0 +1 @@
TypeDoc added this file to prevent GitHub Pages from using Jekyll. You can turn off this behavior by setting the `githubPages` option to false.

View File

@@ -36,8 +36,41 @@ const results = await table.vectorSearch([0.1, 0.3]).limit(20).toArray();
console.log(results); console.log(results);
``` ```
The [quickstart](https://lancedb.github.io/lancedb/basic/) contains a more complete example. The [quickstart](../basic.md) contains a more complete example.
## Development ## Development
See [CONTRIBUTING.md](_media/CONTRIBUTING.md) for information on how to contribute to LanceDB. ```sh
npm run build
npm run test
```
### Running lint / format
LanceDb uses [biome](https://biomejs.dev/) for linting and formatting. if you are using VSCode you will need to install the official [Biome](https://marketplace.visualstudio.com/items?itemName=biomejs.biome) extension.
To manually lint your code you can run:
```sh
npm run lint
```
to automatically fix all fixable issues:
```sh
npm run lint-fix
```
If you do not have your workspace root set to the `nodejs` directory, unfortunately the extension will not work. You can still run the linting and formatting commands manually.
### Generating docs
```sh
npm run docs
cd ../docs
# Asssume the virtual environment was created
# python3 -m venv venv
# pip install -r requirements.txt
. ./venv/bin/activate
mkdocs build
```

Some files were not shown because too many files have changed in this diff Show More