mirror of
https://github.com/lancedb/lancedb.git
synced 2025-12-24 13:59:58 +00:00
Compare commits
4 Commits
rmeng/pool
...
lancedb-cl
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a503845c9f | ||
|
|
955a295026 | ||
|
|
b70fa3892e | ||
|
|
31fb3b3b5c |
@@ -1,5 +1,5 @@
|
||||
[tool.bumpversion]
|
||||
current_version = "0.13.1-beta.0"
|
||||
current_version = "0.13.0-beta.1"
|
||||
parse = """(?x)
|
||||
(?P<major>0|[1-9]\\d*)\\.
|
||||
(?P<minor>0|[1-9]\\d*)\\.
|
||||
@@ -87,16 +87,6 @@ glob = "node/package.json"
|
||||
replace = "\"@lancedb/vectordb-linux-x64-gnu\": \"{new_version}\""
|
||||
search = "\"@lancedb/vectordb-linux-x64-gnu\": \"{current_version}\""
|
||||
|
||||
[[tool.bumpversion.files]]
|
||||
glob = "node/package.json"
|
||||
replace = "\"@lancedb/vectordb-linux-arm64-musl\": \"{new_version}\""
|
||||
search = "\"@lancedb/vectordb-linux-arm64-musl\": \"{current_version}\""
|
||||
|
||||
[[tool.bumpversion.files]]
|
||||
glob = "node/package.json"
|
||||
replace = "\"@lancedb/vectordb-linux-x64-musl\": \"{new_version}\""
|
||||
search = "\"@lancedb/vectordb-linux-x64-musl\": \"{current_version}\""
|
||||
|
||||
[[tool.bumpversion.files]]
|
||||
glob = "node/package.json"
|
||||
replace = "\"@lancedb/vectordb-win32-x64-msvc\": \"{new_version}\""
|
||||
|
||||
@@ -31,9 +31,6 @@ rustflags = [
|
||||
[target.x86_64-unknown-linux-gnu]
|
||||
rustflags = ["-C", "target-cpu=haswell", "-C", "target-feature=+avx2,+fma,+f16c"]
|
||||
|
||||
[target.x86_64-unknown-linux-musl]
|
||||
rustflags = ["-C", "target-cpu=haswell", "-C", "target-feature=-crt-static,+avx2,+fma,+f16c"]
|
||||
|
||||
[target.aarch64-apple-darwin]
|
||||
rustflags = ["-C", "target-cpu=apple-m1", "-C", "target-feature=+neon,+fp16,+fhm,+dotprod"]
|
||||
|
||||
|
||||
1
.github/workflows/nodejs.yml
vendored
1
.github/workflows/nodejs.yml
vendored
@@ -104,6 +104,7 @@ jobs:
|
||||
OPENAI_BASE_URL: http://0.0.0.0:8000
|
||||
run: |
|
||||
python ci/mock_openai.py &
|
||||
ss -ltnp | grep :8000
|
||||
cd nodejs/examples
|
||||
npm test
|
||||
macos:
|
||||
|
||||
502
.github/workflows/npm-publish.yml
vendored
502
.github/workflows/npm-publish.yml
vendored
@@ -101,7 +101,7 @@ jobs:
|
||||
path: |
|
||||
nodejs/dist/*.node
|
||||
|
||||
node-linux-gnu:
|
||||
node-linux:
|
||||
name: vectordb (${{ matrix.config.arch}}-unknown-linux-gnu)
|
||||
runs-on: ${{ matrix.config.runner }}
|
||||
# Only runs on tags that matches the make-release action
|
||||
@@ -137,63 +137,11 @@ jobs:
|
||||
- name: Upload Linux Artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: node-native-linux-${{ matrix.config.arch }}-gnu
|
||||
name: node-native-linux-${{ matrix.config.arch }}
|
||||
path: |
|
||||
node/dist/lancedb-vectordb-linux*.tgz
|
||||
|
||||
node-linux-musl:
|
||||
name: vectordb (${{ matrix.config.arch}}-unknown-linux-musl)
|
||||
runs-on: ubuntu-latest
|
||||
container: alpine:edge
|
||||
# Only runs on tags that matches the make-release action
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
config:
|
||||
- arch: x86_64
|
||||
- arch: aarch64
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Install common dependencies
|
||||
run: |
|
||||
apk add protobuf-dev curl clang mold grep npm bash
|
||||
curl --proto '=https' --tlsv1.3 -sSf https://raw.githubusercontent.com/rust-lang/rustup/refs/heads/master/rustup-init.sh | sh -s -- -y --default-toolchain 1.80.0
|
||||
echo "source $HOME/.cargo/env" >> saved_env
|
||||
echo "export CC=clang" >> saved_env
|
||||
echo "export RUSTFLAGS='-Ctarget-cpu=haswell -Ctarget-feature=-crt-static,+avx2,+fma,+f16c -Clinker=clang -Clink-arg=-fuse-ld=mold'" >> saved_env
|
||||
- name: Configure aarch64 build
|
||||
if: ${{ matrix.config.arch == 'aarch64' }}
|
||||
run: |
|
||||
source "$HOME/.cargo/env"
|
||||
rustup target add aarch64-unknown-linux-musl --toolchain 1.80.0
|
||||
crt=$(realpath $(dirname $(rustup which rustc))/../lib/rustlib/aarch64-unknown-linux-musl/lib/self-contained)
|
||||
sysroot_lib=/usr/aarch64-unknown-linux-musl/usr/lib
|
||||
apk_url=https://dl-cdn.alpinelinux.org/alpine/latest-stable/main/aarch64/
|
||||
curl -sSf $apk_url > apk_list
|
||||
for pkg in gcc libgcc musl; do curl -sSf $apk_url$(cat apk_list | grep -oP '(?<=")'$pkg'-\d.*?(?=")') | tar zxf -; done
|
||||
mkdir -p $sysroot_lib
|
||||
echo 'GROUP ( libgcc_s.so.1 -lgcc )' > $sysroot_lib/libgcc_s.so
|
||||
cp usr/lib/libgcc_s.so.1 $sysroot_lib
|
||||
cp usr/lib/gcc/aarch64-alpine-linux-musl/*/libgcc.a $sysroot_lib
|
||||
cp lib/ld-musl-aarch64.so.1 $sysroot_lib/libc.so
|
||||
echo '!<arch>' > $sysroot_lib/libdl.a
|
||||
(cd $crt && cp crti.o crtbeginS.o crtendS.o crtn.o -t $sysroot_lib)
|
||||
echo "export CARGO_BUILD_TARGET=aarch64-unknown-linux-musl" >> saved_env
|
||||
echo "export RUSTFLAGS='-Ctarget-cpu=apple-m1 -Ctarget-feature=-crt-static,+neon,+fp16,+fhm,+dotprod -Clinker=clang -Clink-arg=-fuse-ld=mold -Clink-arg=--target=aarch64-unknown-linux-musl -Clink-arg=--sysroot=/usr/aarch64-unknown-linux-musl -Clink-arg=-lc'" >> saved_env
|
||||
- name: Build Linux Artifacts
|
||||
run: |
|
||||
source ./saved_env
|
||||
bash ci/manylinux_node/build_vectordb.sh ${{ matrix.config.arch }}
|
||||
- name: Upload Linux Artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: node-native-linux-${{ matrix.config.arch }}-musl
|
||||
path: |
|
||||
node/dist/lancedb-vectordb-linux*.tgz
|
||||
|
||||
nodejs-linux-gnu:
|
||||
nodejs-linux:
|
||||
name: lancedb (${{ matrix.config.arch}}-unknown-linux-gnu
|
||||
runs-on: ${{ matrix.config.runner }}
|
||||
# Only runs on tags that matches the make-release action
|
||||
@@ -230,7 +178,7 @@ jobs:
|
||||
- name: Upload Linux Artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: nodejs-native-linux-${{ matrix.config.arch }}-gnu
|
||||
name: nodejs-native-linux-${{ matrix.config.arch }}
|
||||
path: |
|
||||
nodejs/dist/*.node
|
||||
# The generic files are the same in all distros so we just pick
|
||||
@@ -244,62 +192,6 @@ jobs:
|
||||
nodejs/dist/*
|
||||
!nodejs/dist/*.node
|
||||
|
||||
nodejs-linux-musl:
|
||||
name: lancedb (${{ matrix.config.arch}}-unknown-linux-musl
|
||||
runs-on: ubuntu-latest
|
||||
container: alpine:edge
|
||||
# Only runs on tags that matches the make-release action
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
config:
|
||||
- arch: x86_64
|
||||
- arch: aarch64
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Install common dependencies
|
||||
run: |
|
||||
apk add protobuf-dev curl clang mold grep npm bash openssl-dev openssl-libs-static
|
||||
curl --proto '=https' --tlsv1.3 -sSf https://raw.githubusercontent.com/rust-lang/rustup/refs/heads/master/rustup-init.sh | sh -s -- -y --default-toolchain 1.80.0
|
||||
echo "source $HOME/.cargo/env" >> saved_env
|
||||
echo "export CC=clang" >> saved_env
|
||||
echo "export RUSTFLAGS='-Ctarget-cpu=haswell -Ctarget-feature=-crt-static,+avx2,+fma,+f16c -Clinker=clang -Clink-arg=-fuse-ld=mold'" >> saved_env
|
||||
echo "export X86_64_UNKNOWN_LINUX_MUSL_OPENSSL_INCLUDE_DIR=/usr/include" >> saved_env
|
||||
echo "export X86_64_UNKNOWN_LINUX_MUSL_OPENSSL_LIB_DIR=/usr/lib" >> saved_env
|
||||
- name: Configure aarch64 build
|
||||
if: ${{ matrix.config.arch == 'aarch64' }}
|
||||
run: |
|
||||
source "$HOME/.cargo/env"
|
||||
rustup target add aarch64-unknown-linux-musl --toolchain 1.80.0
|
||||
crt=$(realpath $(dirname $(rustup which rustc))/../lib/rustlib/aarch64-unknown-linux-musl/lib/self-contained)
|
||||
sysroot_lib=/usr/aarch64-unknown-linux-musl/usr/lib
|
||||
apk_url=https://dl-cdn.alpinelinux.org/alpine/latest-stable/main/aarch64/
|
||||
curl -sSf $apk_url > apk_list
|
||||
for pkg in gcc libgcc musl openssl-dev openssl-libs-static; do curl -sSf $apk_url$(cat apk_list | grep -oP '(?<=")'$pkg'-\d.*?(?=")') | tar zxf -; done
|
||||
mkdir -p $sysroot_lib
|
||||
echo 'GROUP ( libgcc_s.so.1 -lgcc )' > $sysroot_lib/libgcc_s.so
|
||||
cp usr/lib/libgcc_s.so.1 $sysroot_lib
|
||||
cp usr/lib/gcc/aarch64-alpine-linux-musl/*/libgcc.a $sysroot_lib
|
||||
cp lib/ld-musl-aarch64.so.1 $sysroot_lib/libc.so
|
||||
echo '!<arch>' > $sysroot_lib/libdl.a
|
||||
(cd $crt && cp crti.o crtbeginS.o crtendS.o crtn.o -t $sysroot_lib)
|
||||
echo "export CARGO_BUILD_TARGET=aarch64-unknown-linux-musl" >> saved_env
|
||||
echo "export RUSTFLAGS='-Ctarget-feature=-crt-static,+neon,+fp16,+fhm,+dotprod -Clinker=clang -Clink-arg=-fuse-ld=mold -Clink-arg=--target=aarch64-unknown-linux-musl -Clink-arg=--sysroot=/usr/aarch64-unknown-linux-musl -Clink-arg=-lc'" >> saved_env
|
||||
echo "export AARCH64_UNKNOWN_LINUX_MUSL_OPENSSL_INCLUDE_DIR=$(realpath usr/include)" >> saved_env
|
||||
echo "export AARCH64_UNKNOWN_LINUX_MUSL_OPENSSL_LIB_DIR=$(realpath usr/lib)" >> saved_env
|
||||
- name: Build Linux Artifacts
|
||||
run: |
|
||||
source ./saved_env
|
||||
bash ci/manylinux_node/build_lancedb.sh ${{ matrix.config.arch }}
|
||||
- name: Upload Linux Artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: nodejs-native-linux-${{ matrix.config.arch }}-musl
|
||||
path: |
|
||||
nodejs/dist/*.node
|
||||
|
||||
node-windows:
|
||||
name: vectordb ${{ matrix.target }}
|
||||
runs-on: windows-2022
|
||||
@@ -334,109 +226,108 @@ jobs:
|
||||
path: |
|
||||
node/dist/lancedb-vectordb-win32*.tgz
|
||||
|
||||
# TODO: re-enable once working https://github.com/lancedb/lancedb/pull/1831
|
||||
# node-windows-arm64:
|
||||
# name: vectordb win32-arm64-msvc
|
||||
# runs-on: windows-4x-arm
|
||||
# if: startsWith(github.ref, 'refs/tags/v')
|
||||
# steps:
|
||||
# - uses: actions/checkout@v4
|
||||
# - name: Install Git
|
||||
# run: |
|
||||
# Invoke-WebRequest -Uri "https://github.com/git-for-windows/git/releases/download/v2.44.0.windows.1/Git-2.44.0-64-bit.exe" -OutFile "git-installer.exe"
|
||||
# Start-Process -FilePath "git-installer.exe" -ArgumentList "/VERYSILENT", "/NORESTART" -Wait
|
||||
# shell: powershell
|
||||
# - name: Add Git to PATH
|
||||
# run: |
|
||||
# Add-Content $env:GITHUB_PATH "C:\Program Files\Git\bin"
|
||||
# $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User")
|
||||
# shell: powershell
|
||||
# - name: Configure Git symlinks
|
||||
# run: git config --global core.symlinks true
|
||||
# - uses: actions/checkout@v4
|
||||
# - uses: actions/setup-python@v5
|
||||
# with:
|
||||
# python-version: "3.13"
|
||||
# - name: Install Visual Studio Build Tools
|
||||
# run: |
|
||||
# Invoke-WebRequest -Uri "https://aka.ms/vs/17/release/vs_buildtools.exe" -OutFile "vs_buildtools.exe"
|
||||
# Start-Process -FilePath "vs_buildtools.exe" -ArgumentList "--quiet", "--wait", "--norestart", "--nocache", `
|
||||
# "--installPath", "C:\BuildTools", `
|
||||
# "--add", "Microsoft.VisualStudio.Component.VC.Tools.ARM64", `
|
||||
# "--add", "Microsoft.VisualStudio.Component.VC.Tools.x86.x64", `
|
||||
# "--add", "Microsoft.VisualStudio.Component.Windows11SDK.22621", `
|
||||
# "--add", "Microsoft.VisualStudio.Component.VC.ATL", `
|
||||
# "--add", "Microsoft.VisualStudio.Component.VC.ATLMFC", `
|
||||
# "--add", "Microsoft.VisualStudio.Component.VC.Llvm.Clang" -Wait
|
||||
# shell: powershell
|
||||
# - name: Add Visual Studio Build Tools to PATH
|
||||
# run: |
|
||||
# $vsPath = "C:\BuildTools\VC\Tools\MSVC"
|
||||
# $latestVersion = (Get-ChildItem $vsPath | Sort-Object {[version]$_.Name} -Descending)[0].Name
|
||||
# Add-Content $env:GITHUB_PATH "C:\BuildTools\VC\Tools\MSVC\$latestVersion\bin\Hostx64\arm64"
|
||||
# Add-Content $env:GITHUB_PATH "C:\BuildTools\VC\Tools\MSVC\$latestVersion\bin\Hostx64\x64"
|
||||
# Add-Content $env:GITHUB_PATH "C:\Program Files (x86)\Windows Kits\10\bin\10.0.22621.0\arm64"
|
||||
# Add-Content $env:GITHUB_PATH "C:\Program Files (x86)\Windows Kits\10\bin\10.0.22621.0\x64"
|
||||
# Add-Content $env:GITHUB_PATH "C:\BuildTools\VC\Tools\Llvm\x64\bin"
|
||||
node-windows-arm64:
|
||||
name: vectordb win32-arm64-msvc
|
||||
runs-on: windows-4x-arm
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install Git
|
||||
run: |
|
||||
Invoke-WebRequest -Uri "https://github.com/git-for-windows/git/releases/download/v2.44.0.windows.1/Git-2.44.0-64-bit.exe" -OutFile "git-installer.exe"
|
||||
Start-Process -FilePath "git-installer.exe" -ArgumentList "/VERYSILENT", "/NORESTART" -Wait
|
||||
shell: powershell
|
||||
- name: Add Git to PATH
|
||||
run: |
|
||||
Add-Content $env:GITHUB_PATH "C:\Program Files\Git\bin"
|
||||
$env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User")
|
||||
shell: powershell
|
||||
- name: Configure Git symlinks
|
||||
run: git config --global core.symlinks true
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.13"
|
||||
- name: Install Visual Studio Build Tools
|
||||
run: |
|
||||
Invoke-WebRequest -Uri "https://aka.ms/vs/17/release/vs_buildtools.exe" -OutFile "vs_buildtools.exe"
|
||||
Start-Process -FilePath "vs_buildtools.exe" -ArgumentList "--quiet", "--wait", "--norestart", "--nocache", `
|
||||
"--installPath", "C:\BuildTools", `
|
||||
"--add", "Microsoft.VisualStudio.Component.VC.Tools.ARM64", `
|
||||
"--add", "Microsoft.VisualStudio.Component.VC.Tools.x86.x64", `
|
||||
"--add", "Microsoft.VisualStudio.Component.Windows11SDK.22621", `
|
||||
"--add", "Microsoft.VisualStudio.Component.VC.ATL", `
|
||||
"--add", "Microsoft.VisualStudio.Component.VC.ATLMFC", `
|
||||
"--add", "Microsoft.VisualStudio.Component.VC.Llvm.Clang" -Wait
|
||||
shell: powershell
|
||||
- name: Add Visual Studio Build Tools to PATH
|
||||
run: |
|
||||
$vsPath = "C:\BuildTools\VC\Tools\MSVC"
|
||||
$latestVersion = (Get-ChildItem $vsPath | Sort-Object {[version]$_.Name} -Descending)[0].Name
|
||||
Add-Content $env:GITHUB_PATH "C:\BuildTools\VC\Tools\MSVC\$latestVersion\bin\Hostx64\arm64"
|
||||
Add-Content $env:GITHUB_PATH "C:\BuildTools\VC\Tools\MSVC\$latestVersion\bin\Hostx64\x64"
|
||||
Add-Content $env:GITHUB_PATH "C:\Program Files (x86)\Windows Kits\10\bin\10.0.22621.0\arm64"
|
||||
Add-Content $env:GITHUB_PATH "C:\Program Files (x86)\Windows Kits\10\bin\10.0.22621.0\x64"
|
||||
Add-Content $env:GITHUB_PATH "C:\BuildTools\VC\Tools\Llvm\x64\bin"
|
||||
|
||||
# # Add MSVC runtime libraries to LIB
|
||||
# $env:LIB = "C:\BuildTools\VC\Tools\MSVC\$latestVersion\lib\arm64;" +
|
||||
# "C:\Program Files (x86)\Windows Kits\10\Lib\10.0.22621.0\um\arm64;" +
|
||||
# "C:\Program Files (x86)\Windows Kits\10\Lib\10.0.22621.0\ucrt\arm64"
|
||||
# Add-Content $env:GITHUB_ENV "LIB=$env:LIB"
|
||||
# Add MSVC runtime libraries to LIB
|
||||
$env:LIB = "C:\BuildTools\VC\Tools\MSVC\$latestVersion\lib\arm64;" +
|
||||
"C:\Program Files (x86)\Windows Kits\10\Lib\10.0.22621.0\um\arm64;" +
|
||||
"C:\Program Files (x86)\Windows Kits\10\Lib\10.0.22621.0\ucrt\arm64"
|
||||
Add-Content $env:GITHUB_ENV "LIB=$env:LIB"
|
||||
|
||||
# # Add INCLUDE paths
|
||||
# $env:INCLUDE = "C:\BuildTools\VC\Tools\MSVC\$latestVersion\include;" +
|
||||
# "C:\Program Files (x86)\Windows Kits\10\Include\10.0.22621.0\ucrt;" +
|
||||
# "C:\Program Files (x86)\Windows Kits\10\Include\10.0.22621.0\um;" +
|
||||
# "C:\Program Files (x86)\Windows Kits\10\Include\10.0.22621.0\shared"
|
||||
# Add-Content $env:GITHUB_ENV "INCLUDE=$env:INCLUDE"
|
||||
# shell: powershell
|
||||
# - name: Install Rust
|
||||
# run: |
|
||||
# Invoke-WebRequest https://win.rustup.rs/x86_64 -OutFile rustup-init.exe
|
||||
# .\rustup-init.exe -y --default-host aarch64-pc-windows-msvc
|
||||
# shell: powershell
|
||||
# - name: Add Rust to PATH
|
||||
# run: |
|
||||
# Add-Content $env:GITHUB_PATH "$env:USERPROFILE\.cargo\bin"
|
||||
# shell: powershell
|
||||
# Add INCLUDE paths
|
||||
$env:INCLUDE = "C:\BuildTools\VC\Tools\MSVC\$latestVersion\include;" +
|
||||
"C:\Program Files (x86)\Windows Kits\10\Include\10.0.22621.0\ucrt;" +
|
||||
"C:\Program Files (x86)\Windows Kits\10\Include\10.0.22621.0\um;" +
|
||||
"C:\Program Files (x86)\Windows Kits\10\Include\10.0.22621.0\shared"
|
||||
Add-Content $env:GITHUB_ENV "INCLUDE=$env:INCLUDE"
|
||||
shell: powershell
|
||||
- name: Install Rust
|
||||
run: |
|
||||
Invoke-WebRequest https://win.rustup.rs/x86_64 -OutFile rustup-init.exe
|
||||
.\rustup-init.exe -y --default-host aarch64-pc-windows-msvc
|
||||
shell: powershell
|
||||
- name: Add Rust to PATH
|
||||
run: |
|
||||
Add-Content $env:GITHUB_PATH "$env:USERPROFILE\.cargo\bin"
|
||||
shell: powershell
|
||||
|
||||
# - uses: Swatinem/rust-cache@v2
|
||||
# with:
|
||||
# workspaces: rust
|
||||
# - name: Install 7-Zip ARM
|
||||
# run: |
|
||||
# New-Item -Path 'C:\7zip' -ItemType Directory
|
||||
# Invoke-WebRequest https://7-zip.org/a/7z2408-arm64.exe -OutFile C:\7zip\7z-installer.exe
|
||||
# Start-Process -FilePath C:\7zip\7z-installer.exe -ArgumentList '/S' -Wait
|
||||
# shell: powershell
|
||||
# - name: Add 7-Zip to PATH
|
||||
# run: Add-Content $env:GITHUB_PATH "C:\Program Files\7-Zip"
|
||||
# shell: powershell
|
||||
# - name: Install Protoc v21.12
|
||||
# working-directory: C:\
|
||||
# run: |
|
||||
# if (Test-Path 'C:\protoc') {
|
||||
# Write-Host "Protoc directory exists, skipping installation"
|
||||
# return
|
||||
# }
|
||||
# New-Item -Path 'C:\protoc' -ItemType Directory
|
||||
# Set-Location C:\protoc
|
||||
# Invoke-WebRequest https://github.com/protocolbuffers/protobuf/releases/download/v21.12/protoc-21.12-win64.zip -OutFile C:\protoc\protoc.zip
|
||||
# & 'C:\Program Files\7-Zip\7z.exe' x protoc.zip
|
||||
# shell: powershell
|
||||
# - name: Add Protoc to PATH
|
||||
# run: Add-Content $env:GITHUB_PATH "C:\protoc\bin"
|
||||
# shell: powershell
|
||||
# - name: Build Windows native node modules
|
||||
# run: .\ci\build_windows_artifacts.ps1 aarch64-pc-windows-msvc
|
||||
# - name: Upload Windows ARM64 Artifacts
|
||||
# uses: actions/upload-artifact@v4
|
||||
# with:
|
||||
# name: node-native-windows-arm64
|
||||
# path: |
|
||||
# node/dist/*.node
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
workspaces: rust
|
||||
- name: Install 7-Zip ARM
|
||||
run: |
|
||||
New-Item -Path 'C:\7zip' -ItemType Directory
|
||||
Invoke-WebRequest https://7-zip.org/a/7z2408-arm64.exe -OutFile C:\7zip\7z-installer.exe
|
||||
Start-Process -FilePath C:\7zip\7z-installer.exe -ArgumentList '/S' -Wait
|
||||
shell: powershell
|
||||
- name: Add 7-Zip to PATH
|
||||
run: Add-Content $env:GITHUB_PATH "C:\Program Files\7-Zip"
|
||||
shell: powershell
|
||||
- name: Install Protoc v21.12
|
||||
working-directory: C:\
|
||||
run: |
|
||||
if (Test-Path 'C:\protoc') {
|
||||
Write-Host "Protoc directory exists, skipping installation"
|
||||
return
|
||||
}
|
||||
New-Item -Path 'C:\protoc' -ItemType Directory
|
||||
Set-Location C:\protoc
|
||||
Invoke-WebRequest https://github.com/protocolbuffers/protobuf/releases/download/v21.12/protoc-21.12-win64.zip -OutFile C:\protoc\protoc.zip
|
||||
& 'C:\Program Files\7-Zip\7z.exe' x protoc.zip
|
||||
shell: powershell
|
||||
- name: Add Protoc to PATH
|
||||
run: Add-Content $env:GITHUB_PATH "C:\protoc\bin"
|
||||
shell: powershell
|
||||
- name: Build Windows native node modules
|
||||
run: .\ci\build_windows_artifacts.ps1 aarch64-pc-windows-msvc
|
||||
- name: Upload Windows ARM64 Artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: node-native-windows-arm64
|
||||
path: |
|
||||
node/dist/*.node
|
||||
|
||||
nodejs-windows:
|
||||
name: lancedb ${{ matrix.target }}
|
||||
@@ -472,103 +363,102 @@ jobs:
|
||||
path: |
|
||||
nodejs/dist/*.node
|
||||
|
||||
# TODO: re-enable once working https://github.com/lancedb/lancedb/pull/1831
|
||||
# nodejs-windows-arm64:
|
||||
# name: lancedb win32-arm64-msvc
|
||||
# runs-on: windows-4x-arm
|
||||
# if: startsWith(github.ref, 'refs/tags/v')
|
||||
# steps:
|
||||
# - uses: actions/checkout@v4
|
||||
# - name: Install Git
|
||||
# run: |
|
||||
# Invoke-WebRequest -Uri "https://github.com/git-for-windows/git/releases/download/v2.44.0.windows.1/Git-2.44.0-64-bit.exe" -OutFile "git-installer.exe"
|
||||
# Start-Process -FilePath "git-installer.exe" -ArgumentList "/VERYSILENT", "/NORESTART" -Wait
|
||||
# shell: powershell
|
||||
# - name: Add Git to PATH
|
||||
# run: |
|
||||
# Add-Content $env:GITHUB_PATH "C:\Program Files\Git\bin"
|
||||
# $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User")
|
||||
# shell: powershell
|
||||
# - name: Configure Git symlinks
|
||||
# run: git config --global core.symlinks true
|
||||
# - uses: actions/checkout@v4
|
||||
# - uses: actions/setup-python@v5
|
||||
# with:
|
||||
# python-version: "3.13"
|
||||
# - name: Install Visual Studio Build Tools
|
||||
# run: |
|
||||
# Invoke-WebRequest -Uri "https://aka.ms/vs/17/release/vs_buildtools.exe" -OutFile "vs_buildtools.exe"
|
||||
# Start-Process -FilePath "vs_buildtools.exe" -ArgumentList "--quiet", "--wait", "--norestart", "--nocache", `
|
||||
# "--installPath", "C:\BuildTools", `
|
||||
# "--add", "Microsoft.VisualStudio.Component.VC.Tools.ARM64", `
|
||||
# "--add", "Microsoft.VisualStudio.Component.VC.Tools.x86.x64", `
|
||||
# "--add", "Microsoft.VisualStudio.Component.Windows11SDK.22621", `
|
||||
# "--add", "Microsoft.VisualStudio.Component.VC.ATL", `
|
||||
# "--add", "Microsoft.VisualStudio.Component.VC.ATLMFC", `
|
||||
# "--add", "Microsoft.VisualStudio.Component.VC.Llvm.Clang" -Wait
|
||||
# shell: powershell
|
||||
# - name: Add Visual Studio Build Tools to PATH
|
||||
# run: |
|
||||
# $vsPath = "C:\BuildTools\VC\Tools\MSVC"
|
||||
# $latestVersion = (Get-ChildItem $vsPath | Sort-Object {[version]$_.Name} -Descending)[0].Name
|
||||
# Add-Content $env:GITHUB_PATH "C:\BuildTools\VC\Tools\MSVC\$latestVersion\bin\Hostx64\arm64"
|
||||
# Add-Content $env:GITHUB_PATH "C:\BuildTools\VC\Tools\MSVC\$latestVersion\bin\Hostx64\x64"
|
||||
# Add-Content $env:GITHUB_PATH "C:\Program Files (x86)\Windows Kits\10\bin\10.0.22621.0\arm64"
|
||||
# Add-Content $env:GITHUB_PATH "C:\Program Files (x86)\Windows Kits\10\bin\10.0.22621.0\x64"
|
||||
# Add-Content $env:GITHUB_PATH "C:\BuildTools\VC\Tools\Llvm\x64\bin"
|
||||
nodejs-windows-arm64:
|
||||
name: lancedb win32-arm64-msvc
|
||||
runs-on: windows-4x-arm
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install Git
|
||||
run: |
|
||||
Invoke-WebRequest -Uri "https://github.com/git-for-windows/git/releases/download/v2.44.0.windows.1/Git-2.44.0-64-bit.exe" -OutFile "git-installer.exe"
|
||||
Start-Process -FilePath "git-installer.exe" -ArgumentList "/VERYSILENT", "/NORESTART" -Wait
|
||||
shell: powershell
|
||||
- name: Add Git to PATH
|
||||
run: |
|
||||
Add-Content $env:GITHUB_PATH "C:\Program Files\Git\bin"
|
||||
$env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User")
|
||||
shell: powershell
|
||||
- name: Configure Git symlinks
|
||||
run: git config --global core.symlinks true
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.13"
|
||||
- name: Install Visual Studio Build Tools
|
||||
run: |
|
||||
Invoke-WebRequest -Uri "https://aka.ms/vs/17/release/vs_buildtools.exe" -OutFile "vs_buildtools.exe"
|
||||
Start-Process -FilePath "vs_buildtools.exe" -ArgumentList "--quiet", "--wait", "--norestart", "--nocache", `
|
||||
"--installPath", "C:\BuildTools", `
|
||||
"--add", "Microsoft.VisualStudio.Component.VC.Tools.ARM64", `
|
||||
"--add", "Microsoft.VisualStudio.Component.VC.Tools.x86.x64", `
|
||||
"--add", "Microsoft.VisualStudio.Component.Windows11SDK.22621", `
|
||||
"--add", "Microsoft.VisualStudio.Component.VC.ATL", `
|
||||
"--add", "Microsoft.VisualStudio.Component.VC.ATLMFC", `
|
||||
"--add", "Microsoft.VisualStudio.Component.VC.Llvm.Clang" -Wait
|
||||
shell: powershell
|
||||
- name: Add Visual Studio Build Tools to PATH
|
||||
run: |
|
||||
$vsPath = "C:\BuildTools\VC\Tools\MSVC"
|
||||
$latestVersion = (Get-ChildItem $vsPath | Sort-Object {[version]$_.Name} -Descending)[0].Name
|
||||
Add-Content $env:GITHUB_PATH "C:\BuildTools\VC\Tools\MSVC\$latestVersion\bin\Hostx64\arm64"
|
||||
Add-Content $env:GITHUB_PATH "C:\BuildTools\VC\Tools\MSVC\$latestVersion\bin\Hostx64\x64"
|
||||
Add-Content $env:GITHUB_PATH "C:\Program Files (x86)\Windows Kits\10\bin\10.0.22621.0\arm64"
|
||||
Add-Content $env:GITHUB_PATH "C:\Program Files (x86)\Windows Kits\10\bin\10.0.22621.0\x64"
|
||||
Add-Content $env:GITHUB_PATH "C:\BuildTools\VC\Tools\Llvm\x64\bin"
|
||||
|
||||
# $env:LIB = ""
|
||||
# Add-Content $env:GITHUB_ENV "LIB=C:\Program Files (x86)\Windows Kits\10\Lib\10.0.22621.0\um\arm64;C:\Program Files (x86)\Windows Kits\10\Lib\10.0.22621.0\ucrt\arm64"
|
||||
# shell: powershell
|
||||
# - name: Install Rust
|
||||
# run: |
|
||||
# Invoke-WebRequest https://win.rustup.rs/x86_64 -OutFile rustup-init.exe
|
||||
# .\rustup-init.exe -y --default-host aarch64-pc-windows-msvc
|
||||
# shell: powershell
|
||||
# - name: Add Rust to PATH
|
||||
# run: |
|
||||
# Add-Content $env:GITHUB_PATH "$env:USERPROFILE\.cargo\bin"
|
||||
# shell: powershell
|
||||
$env:LIB = ""
|
||||
Add-Content $env:GITHUB_ENV "LIB=C:\Program Files (x86)\Windows Kits\10\Lib\10.0.22621.0\um\arm64;C:\Program Files (x86)\Windows Kits\10\Lib\10.0.22621.0\ucrt\arm64"
|
||||
shell: powershell
|
||||
- name: Install Rust
|
||||
run: |
|
||||
Invoke-WebRequest https://win.rustup.rs/x86_64 -OutFile rustup-init.exe
|
||||
.\rustup-init.exe -y --default-host aarch64-pc-windows-msvc
|
||||
shell: powershell
|
||||
- name: Add Rust to PATH
|
||||
run: |
|
||||
Add-Content $env:GITHUB_PATH "$env:USERPROFILE\.cargo\bin"
|
||||
shell: powershell
|
||||
|
||||
# - uses: Swatinem/rust-cache@v2
|
||||
# with:
|
||||
# workspaces: rust
|
||||
# - name: Install 7-Zip ARM
|
||||
# run: |
|
||||
# New-Item -Path 'C:\7zip' -ItemType Directory
|
||||
# Invoke-WebRequest https://7-zip.org/a/7z2408-arm64.exe -OutFile C:\7zip\7z-installer.exe
|
||||
# Start-Process -FilePath C:\7zip\7z-installer.exe -ArgumentList '/S' -Wait
|
||||
# shell: powershell
|
||||
# - name: Add 7-Zip to PATH
|
||||
# run: Add-Content $env:GITHUB_PATH "C:\Program Files\7-Zip"
|
||||
# shell: powershell
|
||||
# - name: Install Protoc v21.12
|
||||
# working-directory: C:\
|
||||
# run: |
|
||||
# if (Test-Path 'C:\protoc') {
|
||||
# Write-Host "Protoc directory exists, skipping installation"
|
||||
# return
|
||||
# }
|
||||
# New-Item -Path 'C:\protoc' -ItemType Directory
|
||||
# Set-Location C:\protoc
|
||||
# Invoke-WebRequest https://github.com/protocolbuffers/protobuf/releases/download/v21.12/protoc-21.12-win64.zip -OutFile C:\protoc\protoc.zip
|
||||
# & 'C:\Program Files\7-Zip\7z.exe' x protoc.zip
|
||||
# shell: powershell
|
||||
# - name: Add Protoc to PATH
|
||||
# run: Add-Content $env:GITHUB_PATH "C:\protoc\bin"
|
||||
# shell: powershell
|
||||
# - name: Build Windows native node modules
|
||||
# run: .\ci\build_windows_artifacts_nodejs.ps1 aarch64-pc-windows-msvc
|
||||
# - name: Upload Windows ARM64 Artifacts
|
||||
# uses: actions/upload-artifact@v4
|
||||
# with:
|
||||
# name: nodejs-native-windows-arm64
|
||||
# path: |
|
||||
# nodejs/dist/*.node
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
workspaces: rust
|
||||
- name: Install 7-Zip ARM
|
||||
run: |
|
||||
New-Item -Path 'C:\7zip' -ItemType Directory
|
||||
Invoke-WebRequest https://7-zip.org/a/7z2408-arm64.exe -OutFile C:\7zip\7z-installer.exe
|
||||
Start-Process -FilePath C:\7zip\7z-installer.exe -ArgumentList '/S' -Wait
|
||||
shell: powershell
|
||||
- name: Add 7-Zip to PATH
|
||||
run: Add-Content $env:GITHUB_PATH "C:\Program Files\7-Zip"
|
||||
shell: powershell
|
||||
- name: Install Protoc v21.12
|
||||
working-directory: C:\
|
||||
run: |
|
||||
if (Test-Path 'C:\protoc') {
|
||||
Write-Host "Protoc directory exists, skipping installation"
|
||||
return
|
||||
}
|
||||
New-Item -Path 'C:\protoc' -ItemType Directory
|
||||
Set-Location C:\protoc
|
||||
Invoke-WebRequest https://github.com/protocolbuffers/protobuf/releases/download/v21.12/protoc-21.12-win64.zip -OutFile C:\protoc\protoc.zip
|
||||
& 'C:\Program Files\7-Zip\7z.exe' x protoc.zip
|
||||
shell: powershell
|
||||
- name: Add Protoc to PATH
|
||||
run: Add-Content $env:GITHUB_PATH "C:\protoc\bin"
|
||||
shell: powershell
|
||||
- name: Build Windows native node modules
|
||||
run: .\ci\build_windows_artifacts_nodejs.ps1 aarch64-pc-windows-msvc
|
||||
- name: Upload Windows ARM64 Artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: nodejs-native-windows-arm64
|
||||
path: |
|
||||
nodejs/dist/*.node
|
||||
|
||||
release:
|
||||
name: vectordb NPM Publish
|
||||
needs: [node, node-macos, node-linux-gnu, node-linux-musl, node-windows]
|
||||
needs: [node, node-macos, node-linux, node-windows, node-windows-arm64]
|
||||
runs-on: ubuntu-latest
|
||||
# Only runs on tags that matches the make-release action
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
@@ -586,7 +476,7 @@ jobs:
|
||||
env:
|
||||
NODE_AUTH_TOKEN: ${{ secrets.LANCEDB_NPM_REGISTRY_TOKEN }}
|
||||
run: |
|
||||
# Tag beta as "preview" instead of default "latest". See lancedb
|
||||
# Tag beta as "preview" instead of default "latest". See lancedb
|
||||
# npm publish step for more info.
|
||||
if [[ $GITHUB_REF =~ refs/tags/v(.*)-beta.* ]]; then
|
||||
PUBLISH_ARGS="--tag preview"
|
||||
@@ -608,7 +498,7 @@ jobs:
|
||||
|
||||
release-nodejs:
|
||||
name: lancedb NPM Publish
|
||||
needs: [nodejs-macos, nodejs-linux-gnu, nodejs-linux-musl, nodejs-windows]
|
||||
needs: [nodejs-macos, nodejs-linux, nodejs-windows, nodejs-windows-arm64]
|
||||
runs-on: ubuntu-latest
|
||||
# Only runs on tags that matches the make-release action
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
|
||||
18
Cargo.toml
18
Cargo.toml
@@ -18,18 +18,18 @@ repository = "https://github.com/lancedb/lancedb"
|
||||
description = "Serverless, low-latency vector database for AI applications"
|
||||
keywords = ["lancedb", "lance", "database", "vector", "search"]
|
||||
categories = ["database-implementations"]
|
||||
rust-version = "1.80.0" # TODO: lower this once we upgrade Lance again.
|
||||
rust-version = "1.80.0" # TODO: lower this once we upgrade Lance again.
|
||||
|
||||
[workspace.dependencies]
|
||||
lance = { "version" = "=0.20.0", "features" = [
|
||||
lance = { "version" = "=0.19.2", "features" = [
|
||||
"dynamodb",
|
||||
], git = "https://github.com/lancedb/lance.git", tag = "v0.20.0-beta.2" }
|
||||
lance-index = { version = "=0.20.0", git = "https://github.com/lancedb/lance.git", tag = "v0.20.0-beta.2" }
|
||||
lance-linalg = { version = "=0.20.0", git = "https://github.com/lancedb/lance.git", tag = "v0.20.0-beta.2" }
|
||||
lance-table = { version = "=0.20.0", git = "https://github.com/lancedb/lance.git", tag = "v0.20.0-beta.2" }
|
||||
lance-testing = { version = "=0.20.0", git = "https://github.com/lancedb/lance.git", tag = "v0.20.0-beta.2" }
|
||||
lance-datafusion = { version = "=0.20.0", git = "https://github.com/lancedb/lance.git", tag = "v0.20.0-beta.2" }
|
||||
lance-encoding = { version = "=0.20.0", git = "https://github.com/lancedb/lance.git", tag = "v0.20.0-beta.2" }
|
||||
], git = "https://github.com/lancedb/lance.git", tag = "v0.19.2-beta.3" }
|
||||
lance-index = { "version" = "=0.19.2", git = "https://github.com/lancedb/lance.git", tag = "v0.19.2-beta.3" }
|
||||
lance-linalg = { "version" = "=0.19.2", git = "https://github.com/lancedb/lance.git", tag = "v0.19.2-beta.3" }
|
||||
lance-table = { "version" = "=0.19.2", git = "https://github.com/lancedb/lance.git", tag = "v0.19.2-beta.3" }
|
||||
lance-testing = { "version" = "=0.19.2", git = "https://github.com/lancedb/lance.git", tag = "v0.19.2-beta.3" }
|
||||
lance-datafusion = { "version" = "=0.19.2", git = "https://github.com/lancedb/lance.git", tag = "v0.19.2-beta.3" }
|
||||
lance-encoding = { "version" = "=0.19.2", git = "https://github.com/lancedb/lance.git", tag = "v0.19.2-beta.3" }
|
||||
# Note that this one does not include pyarrow
|
||||
arrow = { version = "52.2", optional = false }
|
||||
arrow-array = "52.2"
|
||||
|
||||
@@ -11,8 +11,7 @@ fi
|
||||
export OPENSSL_STATIC=1
|
||||
export OPENSSL_INCLUDE_DIR=/usr/local/include/openssl
|
||||
|
||||
#Alpine doesn't have .bashrc
|
||||
FILE=$HOME/.bashrc && test -f $FILE && source $FILE
|
||||
source $HOME/.bashrc
|
||||
|
||||
cd nodejs
|
||||
npm ci
|
||||
|
||||
@@ -5,14 +5,13 @@ ARCH=${1:-x86_64}
|
||||
|
||||
if [ "$ARCH" = "x86_64" ]; then
|
||||
export OPENSSL_LIB_DIR=/usr/local/lib64/
|
||||
else
|
||||
else
|
||||
export OPENSSL_LIB_DIR=/usr/local/lib/
|
||||
fi
|
||||
export OPENSSL_STATIC=1
|
||||
export OPENSSL_INCLUDE_DIR=/usr/local/include/openssl
|
||||
|
||||
#Alpine doesn't have .bashrc
|
||||
FILE=$HOME/.bashrc && test -f $FILE && source $FILE
|
||||
source $HOME/.bashrc
|
||||
|
||||
cd node
|
||||
npm ci
|
||||
|
||||
@@ -138,7 +138,6 @@ nav:
|
||||
- Jina Reranker: reranking/jina.md
|
||||
- OpenAI Reranker: reranking/openai.md
|
||||
- AnswerDotAi Rerankers: reranking/answerdotai.md
|
||||
- Voyage AI Rerankers: reranking/voyageai.md
|
||||
- Building Custom Rerankers: reranking/custom_reranker.md
|
||||
- Example: notebooks/lancedb_reranking.ipynb
|
||||
- Filtering: sql.md
|
||||
@@ -166,7 +165,6 @@ nav:
|
||||
- Jina Embeddings: embeddings/available_embedding_models/text_embedding_functions/jina_embedding.md
|
||||
- AWS Bedrock Text Embedding Functions: embeddings/available_embedding_models/text_embedding_functions/aws_bedrock_embedding.md
|
||||
- IBM watsonx.ai Embeddings: embeddings/available_embedding_models/text_embedding_functions/ibm_watsonx_ai_embedding.md
|
||||
- Voyage AI Embeddings: embeddings/available_embedding_models/text_embedding_functions/voyageai_embedding.md
|
||||
- Multimodal Embedding Functions:
|
||||
- OpenClip embeddings: embeddings/available_embedding_models/multimodal_embedding_functions/openclip_embedding.md
|
||||
- Imagebind embeddings: embeddings/available_embedding_models/multimodal_embedding_functions/imagebind_embedding.md
|
||||
@@ -224,10 +222,12 @@ nav:
|
||||
- 🦀 Rust: https://docs.rs/lancedb/latest/lancedb/
|
||||
- ☁️ LanceDB Cloud:
|
||||
- Overview: cloud/index.md
|
||||
- API reference:
|
||||
- 🐍 Python: python/saas-python.md
|
||||
- 👾 JavaScript: javascript/modules.md
|
||||
- REST API: cloud/rest.md
|
||||
- Quickstart: cloud/quickstart.md
|
||||
- Best Practices: cloud/best_practices.md
|
||||
# - API reference:
|
||||
# - 🐍 Python: python/saas-python.md
|
||||
# - 👾 JavaScript: javascript/modules.md
|
||||
# - REST API: cloud/rest.md
|
||||
|
||||
- Quick start: basic.md
|
||||
- Concepts:
|
||||
@@ -350,10 +350,17 @@ nav:
|
||||
- Rust: https://docs.rs/lancedb/latest/lancedb/index.html
|
||||
- LanceDB Cloud:
|
||||
- Overview: cloud/index.md
|
||||
- API reference:
|
||||
- 🐍 Python: python/saas-python.md
|
||||
- 👾 JavaScript: javascript/modules.md
|
||||
- REST API: cloud/rest.md
|
||||
- Quickstart: cloud/quickstart.md
|
||||
- Work with data:
|
||||
- Ingest data: cloud/ingest_data.md
|
||||
- Update data: cloud/update_data.md
|
||||
- Build an index: cloud/build_index.md
|
||||
- Vector search: cloud/vector_search.md
|
||||
- Full-text search: cloud/full_text_search.md
|
||||
- Hybrid search: cloud/hybrid_search.md
|
||||
- Metadata Filtering: cloud/metadata_filtering.md
|
||||
- Best Practices: cloud/best_practices.md
|
||||
# - REST API: cloud/rest.md
|
||||
|
||||
extra_css:
|
||||
- styles/global.css
|
||||
|
||||
21
docs/package-lock.json
generated
21
docs/package-lock.json
generated
@@ -19,7 +19,7 @@
|
||||
},
|
||||
"../node": {
|
||||
"name": "vectordb",
|
||||
"version": "0.12.0",
|
||||
"version": "0.4.6",
|
||||
"cpu": [
|
||||
"x64",
|
||||
"arm64"
|
||||
@@ -31,7 +31,9 @@
|
||||
"win32"
|
||||
],
|
||||
"dependencies": {
|
||||
"@apache-arrow/ts": "^14.0.2",
|
||||
"@neon-rs/load": "^0.0.74",
|
||||
"apache-arrow": "^14.0.2",
|
||||
"axios": "^1.4.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
@@ -44,7 +46,6 @@
|
||||
"@types/temp": "^0.9.1",
|
||||
"@types/uuid": "^9.0.3",
|
||||
"@typescript-eslint/eslint-plugin": "^5.59.1",
|
||||
"apache-arrow-old": "npm:apache-arrow@13.0.0",
|
||||
"cargo-cp-artifact": "^0.1",
|
||||
"chai": "^4.3.7",
|
||||
"chai-as-promised": "^7.1.1",
|
||||
@@ -61,19 +62,15 @@
|
||||
"ts-node-dev": "^2.0.0",
|
||||
"typedoc": "^0.24.7",
|
||||
"typedoc-plugin-markdown": "^3.15.3",
|
||||
"typescript": "^5.1.0",
|
||||
"typescript": "*",
|
||||
"uuid": "^9.0.0"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"@lancedb/vectordb-darwin-arm64": "0.12.0",
|
||||
"@lancedb/vectordb-darwin-x64": "0.12.0",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.12.0",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.12.0",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.12.0"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@apache-arrow/ts": "^14.0.2",
|
||||
"apache-arrow": "^14.0.2"
|
||||
"@lancedb/vectordb-darwin-arm64": "0.4.6",
|
||||
"@lancedb/vectordb-darwin-x64": "0.4.6",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.4.6",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.4.6",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.4.6"
|
||||
}
|
||||
},
|
||||
"../node/node_modules/apache-arrow": {
|
||||
|
||||
@@ -277,15 +277,7 @@ Product quantization can lead to approximately `16 * sizeof(float32) / 1 = 64` t
|
||||
Higher number of partitions could lead to more efficient I/O during queries and better accuracy, but it takes much more time to train.
|
||||
On `SIFT-1M` dataset, our benchmark shows that keeping each partition 1K-4K rows lead to a good latency / recall.
|
||||
|
||||
`num_sub_vectors` specifies how many Product Quantization (PQ) short codes to generate on each vector. The number should be a factor of the vector dimension. Because
|
||||
`num_sub_vectors` specifies how many Product Quantization (PQ) short codes to generate on each vector. Because
|
||||
PQ is a lossy compression of the original vector, a higher `num_sub_vectors` usually results in
|
||||
less space distortion, and thus yields better accuracy. However, a higher `num_sub_vectors` also causes heavier I/O and more PQ computation, and thus, higher latency. `dimension / num_sub_vectors` should be a multiple of 8 for optimum SIMD efficiency.
|
||||
|
||||
!!! note
|
||||
if `num_sub_vectors` is set to be greater than the vector dimension, you will see errors like `attempt to divide by zero`
|
||||
|
||||
### How to choose `m` and `ef_construction` for `IVF_HNSW_*` index?
|
||||
|
||||
`m` determines the number of connections a new node establishes with its closest neighbors upon entering the graph. Typically, `m` falls within the range of 5 to 48. Lower `m` values are suitable for low-dimensional data or scenarios where recall is less critical. Conversely, higher `m` values are beneficial for high-dimensional data or when high recall is required. In essence, a larger `m` results in a denser graph with increased connectivity, but at the expense of higher memory consumption.
|
||||
|
||||
`ef_construction` balances build speed and accuracy. Higher values increase accuracy but slow down the build process. A typical range is 150 to 300. For good search results, a minimum value of 100 is recommended. In most cases, setting this value above 500 offers no additional benefit. Ensure that `ef_construction` is always set to a value equal to or greater than `ef` in the search phase
|
||||
less space distortion, and thus yields better accuracy. However, a higher `num_sub_vectors` also causes heavier I/O and
|
||||
more PQ computation, and thus, higher latency. `dimension / num_sub_vectors` should be a multiple of 8 for optimum SIMD efficiency.
|
||||
|
||||
20
docs/src/cloud/best_practices.md
Normal file
20
docs/src/cloud/best_practices.md
Normal file
@@ -0,0 +1,20 @@
|
||||
This section provides a set of recommended best practices to help you get the most out of LanceDB Cloud. By following these guidelines, you can optimize your usage of LanceDB Cloud, improve performance, and ensure a smooth experience.
|
||||
|
||||
### Should the db connection be created once and keep it open?
|
||||
Yes! It is recommended to establish a single db connection and maintain it throughout your interaction with the tables within.
|
||||
|
||||
LanceDB uses `requests.Session()` for connection pooling, which automatically manages connection reuse and cleanup. This approach avoids the overhead of repeatedly establishing HTTP connections, significantly improving efficiency.
|
||||
|
||||
### Should a single `open_table` call be made and maintained for subsequent table operations?
|
||||
`table = db.open_table()` should be called once and used for all subsequent table operations. If there are changes to the opened table, `table` always reflect the latest version of the data.
|
||||
|
||||
### Row id
|
||||
|
||||
### What are the vector indexing types supported by LanceDB Cloud?
|
||||
We support `IVF_PQ` and `IVF_HNSW_SQ` as the `index_type` which is passed to `create_index`. LanceDB Cloud tunes the indexing parameters automatically to achieve the best tradeoff betweeln query latency and query quality.
|
||||
|
||||
### Do I need to do anything when there is new data added to a table with an existing index?
|
||||
No! LanceDB Cloud triggers an asynchronous background job to index the new vectors. This process will either merge the new vectors into the existing index or initiate a complete re-indexing if needed.
|
||||
|
||||
There is a flag `fast_search` in `table.search()` that allows you to control whether the unindexed rows should be searched or not.
|
||||
|
||||
64
docs/src/cloud/build_index.md
Normal file
64
docs/src/cloud/build_index.md
Normal file
@@ -0,0 +1,64 @@
|
||||
LanceDB Cloud supports **vector index**, **scalar index** and **full-text search index**. Compared to open-source version, LanceDB Cloud focuses on **automation**:
|
||||
|
||||
- If there is a single vector column in the table, the vector column can be inferred from the schema and the index will be automatically created.
|
||||
|
||||
- Indexing parameters will be automatically tuned for customer's data.
|
||||
|
||||
## Vector index
|
||||
LanceDB has implemented the state-of-art indexing algorithms (more about [IVF-PQ](https://lancedb.github.io/lancedb/concepts/index_ivfpq/) and [HNSW](https://lancedb.github.io/lancedb/concepts/index_hnsw/)). We currently
|
||||
support the _L2_, _Cosine_ and _Dot_ as distance calculation metrics. You can create multiple vector indices within a table.
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_cloud.py:create_index"
|
||||
```
|
||||
=== "Typescript"
|
||||
|
||||
```typescript
|
||||
--8<-- "nodejs/examples/cloud.test.ts:imports"
|
||||
|
||||
--8<-- "nodejs/examples/cloud.test.ts:connect_db_and_open_table"
|
||||
--8<-- "nodejs/examples/cloud.test.ts:create_index"
|
||||
```
|
||||
|
||||
## Scalar index
|
||||
LanceDB Cloud and LanceDB Enterprise supports several types of Scalar indices to accelerate search over scalar columns.
|
||||
|
||||
- *BTREE*: The most common type is BTREE. This index is inspired by the btree data structure although only the first few layers of the btree are cached in memory. It will perform well on columns with a large number of unique values and few rows per value.
|
||||
- *BITMAP*: this index stores a bitmap for each unique value in the column. This index is useful for columns with a finite number of unique values and many rows per value.
|
||||
- For example, columns that represent "categories", "labels", or "tags"
|
||||
- *LABEL_LIST*: a special index that is used to index list columns whose values have a finite set of possibilities.
|
||||
- For example, a column that contains lists of tags (e.g. ["tag1", "tag2", "tag3"]) can be indexed with a LABEL_LIST index.
|
||||
|
||||
You can create multiple scalar indices within a table.
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_cloud.py:create_scalar_index"
|
||||
```
|
||||
=== "Typescript"
|
||||
|
||||
```typescript
|
||||
--8<-- "nodejs/examples/cloud.test.ts:imports"
|
||||
|
||||
--8<-- "nodejs/examples/cloud.test.ts:connect_db_and_open_table"
|
||||
--8<-- "nodejs/examples/cloud.test.ts:create_scalar_index"
|
||||
```
|
||||
|
||||
## Full-text search index
|
||||
We provide performant full-text search on LanceDB Cloud, allowing you to incorporate keyword-based search (based on BM25) in your retrieval solutions.
|
||||
!!! note ""
|
||||
|
||||
`use_tantivy` is not available with `create_fts_index` on LanceDB Cloud as we used our native implementation, which has better performance comparing to tantivy.
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_cloud.py:create_fts_index"
|
||||
```
|
||||
=== "Typescript"
|
||||
|
||||
```typescript
|
||||
--8<-- "nodejs/examples/cloud.test.ts:imports"
|
||||
|
||||
--8<-- "nodejs/examples/cloud.test.ts:create_fts_index"
|
||||
```
|
||||
14
docs/src/cloud/full_text_search.md
Normal file
14
docs/src/cloud/full_text_search.md
Normal file
@@ -0,0 +1,14 @@
|
||||
The full-text search allows you to
|
||||
incorporate keyword-based search (based on BM25) in your retrieval solutions.
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_cloud.py:full_text_search"
|
||||
```
|
||||
=== "Typescript"
|
||||
|
||||
```typescript
|
||||
--8<-- "nodejs/examples/cloud.test.ts:imports"
|
||||
|
||||
--8<-- "nodejs/examples/cloud.test.ts:full_text_search"
|
||||
```
|
||||
10
docs/src/cloud/hybrid_search.md
Normal file
10
docs/src/cloud/hybrid_search.md
Normal file
@@ -0,0 +1,10 @@
|
||||
We support hybrid search that combines semantic and full-text search via a
|
||||
reranking algorithm of your choice, to get the best of both worlds. LanceDB
|
||||
comes with [built-in rerankers](https://lancedb.github.io/lancedb/reranking/)
|
||||
and you can implement you own _customized reranker_ as well.
|
||||
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_cloud.py:hybrid_search"
|
||||
```
|
||||
31
docs/src/cloud/ingest_data.md
Normal file
31
docs/src/cloud/ingest_data.md
Normal file
@@ -0,0 +1,31 @@
|
||||
## Insert data
|
||||
The LanceDB Cloud SDK for data ingestion remains consistent with our open-source version,
|
||||
ensuring a seamless transition for existing OSS users.
|
||||
!!! note "unsupported parameters in create_table"
|
||||
|
||||
The following two parameters: `mode="overwrite"` and `exist_ok`, are expected to be added by Nov, 2024.
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_cloud.py:import-ingest-data"
|
||||
|
||||
--8<-- "python/python/tests/docs/test_cloud.py:ingest_data"
|
||||
```
|
||||
=== "Typescript"
|
||||
|
||||
```typescript
|
||||
--8<-- "nodejs/examples/cloud.test.ts:imports"
|
||||
--8<-- "nodejs/examples/cloud.test.ts:ingest_data"
|
||||
```
|
||||
|
||||
## Insert large datasets
|
||||
It is recommended to use itertators to add large datasets in batches when creating
|
||||
your table in one go. Data will be automatically compacted for the best query performance.
|
||||
!!! info "batch size"
|
||||
|
||||
The batch size .
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_cloud.py:ingest_data_in_batch"
|
||||
```
|
||||
33
docs/src/cloud/metadata_filtering.md
Normal file
33
docs/src/cloud/metadata_filtering.md
Normal file
@@ -0,0 +1,33 @@
|
||||
LanceDB Cloud supports rich filtering features of query results based on metadata fields.
|
||||
|
||||
By default, _post-filtering_ is performed on the top-k results returned by the vector search.
|
||||
However, _pre-filtering_ is also an option that performs the filter prior to vector search.
|
||||
This can be useful to narrow down on the search space on a very large dataset to reduce query
|
||||
latency.
|
||||
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_cloud.py:filtering"
|
||||
```
|
||||
=== "Typescript"
|
||||
|
||||
```typescript
|
||||
--8<-- "nodejs/examples/cloud.test.ts:imports"
|
||||
|
||||
--8<-- "nodejs/examples/cloud.test.ts:filtering"
|
||||
```
|
||||
We also support standard SQL expressions as predicates for filtering operations.
|
||||
It can be used during vector search, update, and deletion operations.
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_cloud.py:sql_filtering"
|
||||
```
|
||||
=== "Typescript"
|
||||
|
||||
```typescript
|
||||
--8<-- "nodejs/examples/cloud.test.ts:imports"
|
||||
|
||||
--8<-- "nodejs/examples/cloud.test.ts:sql_filtering"
|
||||
```
|
||||
49
docs/src/cloud/update_data.md
Normal file
49
docs/src/cloud/update_data.md
Normal file
@@ -0,0 +1,49 @@
|
||||
LanceDB Cloud efficiently manages updates across many tables.
|
||||
Currently, we offer _update_, _merge_insert_, and _delete_.
|
||||
|
||||
## update
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_cloud.py:update_data"
|
||||
```
|
||||
=== "Typescript"
|
||||
|
||||
```typescript
|
||||
--8<-- "nodejs/examples/cloud.test.ts:imports"
|
||||
|
||||
--8<-- "nodejs/examples/cloud.test.ts:connect_db_and_open_table"
|
||||
--8<-- "nodejs/examples/cloud.test.ts:update_data"
|
||||
```
|
||||
|
||||
## merge insert
|
||||
This merge insert can add rows, update rows, and remove rows all in a single transaction.
|
||||
It combines new data from a source table with existing data in a target table by using a join.
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_cloud.py:merge_insert"
|
||||
```
|
||||
=== "Typescript"
|
||||
|
||||
```typescript
|
||||
--8<-- "nodejs/examples/cloud.test.ts:imports"
|
||||
|
||||
--8<-- "nodejs/examples/cloud.test.ts:connect_db_and_open_table"
|
||||
--8<-- "nodejs/examples/cloud.test.ts:merge_insert"
|
||||
```
|
||||
|
||||
## delete
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_cloud.py:delete_data"
|
||||
```
|
||||
=== "Typescript"
|
||||
|
||||
```typescript
|
||||
--8<-- "nodejs/examples/cloud.test.ts:imports"
|
||||
|
||||
--8<-- "nodejs/examples/cloud.test.ts:connect_db_and_open_table"
|
||||
--8<-- "nodejs/examples/cloud.test.ts:delete_data"
|
||||
```
|
||||
21
docs/src/cloud/vector_search.md
Normal file
21
docs/src/cloud/vector_search.md
Normal file
@@ -0,0 +1,21 @@
|
||||
Users can also tune the following parameters for better search quality.
|
||||
|
||||
- [nprobes](https://lancedb.github.io/lancedb/js/classes/VectorQuery/#nprobes):
|
||||
the number of partitions to search (probe).
|
||||
- [refine factor](https://lancedb.github.io/lancedb/js/classes/VectorQuery/#refinefactor):
|
||||
a multiplier to control how many additional rows are taken during the refine step.
|
||||
|
||||
[Metadata filtering](filtering) combined with the vector search is also supported.
|
||||
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_cloud.py:vector_search"
|
||||
```
|
||||
=== "Typescript"
|
||||
|
||||
```typescript
|
||||
--8<-- "nodejs/examples/cloud.test.ts:imports"
|
||||
|
||||
--8<-- "nodejs/examples/cloud.test.ts:vector_search"
|
||||
```
|
||||
@@ -57,13 +57,6 @@ Then the greedy search routine operates as follows:
|
||||
|
||||
## Usage
|
||||
|
||||
There are three key parameters to set when constructing an HNSW index:
|
||||
|
||||
* `metric`: Use an `L2` euclidean distance metric. We also support `dot` and `cosine` distance.
|
||||
* `m`: The number of neighbors to select for each vector in the HNSW graph.
|
||||
* `ef_construction`: The number of candidates to evaluate during the construction of the HNSW graph.
|
||||
|
||||
|
||||
We can combine the above concepts to understand how to build and query an HNSW index in LanceDB.
|
||||
|
||||
### Construct index
|
||||
|
||||
@@ -58,10 +58,8 @@ In Python, the index can be created as follows:
|
||||
# Make sure you have enough data in the table for an effective training step
|
||||
tbl.create_index(metric="L2", num_partitions=256, num_sub_vectors=96)
|
||||
```
|
||||
!!! note
|
||||
`num_partitions`=256 and `num_sub_vectors`=96 does not work for every dataset. Those values needs to be adjusted for your particular dataset.
|
||||
|
||||
The `num_partitions` is usually chosen to target a particular number of vectors per partition. `num_sub_vectors` is typically chosen based on the desired recall and the dimensionality of the vector. See [here](../ann_indexes.md/#how-to-choose-num_partitions-and-num_sub_vectors-for-ivf_pq-index) for best practices on choosing these parameters.
|
||||
The `num_partitions` is usually chosen to target a particular number of vectors per partition. `num_sub_vectors` is typically chosen based on the desired recall and the dimensionality of the vector. See the [FAQs](#faq) below for best practices on choosing these parameters.
|
||||
|
||||
|
||||
### Query the index
|
||||
|
||||
@@ -20,7 +20,7 @@ Supported parameters (to be passed in `create` method) are:
|
||||
|
||||
| Parameter | Type | Default Value | Description |
|
||||
|---|---|--------|---------|
|
||||
| `name` | `str` | `None` | The model ID of the model to use. Supported base models for Text Embeddings: voyage-3, voyage-3-lite, voyage-finance-2, voyage-multilingual-2, voyage-law-2, voyage-code-2 |
|
||||
| `name` | `str` | `"voyage-3"` | The model ID of the model to use. Supported base models for Text Embeddings: voyage-3, voyage-3-lite, voyage-finance-2, voyage-multilingual-2, voyage-law-2, voyage-code-2 |
|
||||
| `input_type` | `str` | `None` | Type of the input text. Default to None. Other options: query, document. |
|
||||
| `truncation` | `bool` | `True` | Whether to truncate the input texts to fit within the context length. |
|
||||
|
||||
|
||||
@@ -53,7 +53,6 @@ These functions are registered by default to handle text embeddings.
|
||||
| [**Jina Embeddings**](available_embedding_models/text_embedding_functions/jina_embedding.md "jina") | 🔗 World-class embedding models to improve your search and RAG systems. You will need **jina api key**. | [<img src="https://raw.githubusercontent.com/lancedb/assets/main/docs/assets/logos/jina.png" alt="Jina Icon" width="90" height="35">](available_embedding_models/text_embedding_functions/jina_embedding.md) |
|
||||
| [ **AWS Bedrock Functions**](available_embedding_models/text_embedding_functions/aws_bedrock_embedding.md "bedrock-text") | ☁️ AWS Bedrock supports multiple base models for generating text embeddings. You need to setup the AWS credentials to use this embedding function. | [<img src="https://raw.githubusercontent.com/lancedb/assets/main/docs/assets/logos/aws_bedrock.png" alt="AWS Bedrock Icon" width="120" height="35">](available_embedding_models/text_embedding_functions/aws_bedrock_embedding.md) |
|
||||
| [**IBM Watsonx.ai**](available_embedding_models/text_embedding_functions/ibm_watsonx_ai_embedding.md "watsonx") | 💡 Generate text embeddings using IBM's watsonx.ai platform. **Note**: watsonx.ai library is an optional dependency. | [<img src="https://raw.githubusercontent.com/lancedb/assets/main/docs/assets/logos/watsonx.png" alt="Watsonx Icon" width="140" height="35">](available_embedding_models/text_embedding_functions/ibm_watsonx_ai_embedding.md) |
|
||||
| [**VoyageAI Embeddings**](available_embedding_models/text_embedding_functions/voyageai_embedding.md "voyageai") | 🌕 Voyage AI provides cutting-edge embedding and rerankers. This will help you get started with **VoyageAI** embedding models using LanceDB. Using voyageai API requires voyageai package. Install it via `pip`. | [<img src="https://www.voyageai.com/logo.svg" alt="VoyageAI Icon" width="140" height="35">](available_embedding_models/text_embedding_functions/voyageai_embedding.md) |
|
||||
|
||||
|
||||
|
||||
@@ -67,7 +66,6 @@ These functions are registered by default to handle text embeddings.
|
||||
[jina-key]: "jina"
|
||||
[aws-key]: "bedrock-text"
|
||||
[watsonx-key]: "watsonx"
|
||||
[voyageai-key]: "voyageai"
|
||||
|
||||
|
||||
## Multi-modal Embedding Functions🖼️
|
||||
|
||||
@@ -114,45 +114,12 @@ table.create_fts_index("text",
|
||||
|
||||
LanceDB full text search supports to filter the search results by a condition, both pre-filtering and post-filtering are supported.
|
||||
|
||||
This can be invoked via the familiar `where` syntax.
|
||||
|
||||
With pre-filtering:
|
||||
This can be invoked via the familiar `where` syntax:
|
||||
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
table.search("puppy").limit(10).where("meta='foo'", prefilte=True).to_list()
|
||||
```
|
||||
|
||||
=== "TypeScript"
|
||||
|
||||
```typescript
|
||||
await tbl
|
||||
.search("puppy")
|
||||
.select(["id", "doc"])
|
||||
.limit(10)
|
||||
.where("meta='foo'")
|
||||
.prefilter(true)
|
||||
.toArray();
|
||||
```
|
||||
|
||||
=== "Rust"
|
||||
|
||||
```rust
|
||||
table
|
||||
.query()
|
||||
.full_text_search(FullTextSearchQuery::new("puppy".to_owned()))
|
||||
.select(lancedb::query::Select::Columns(vec!["doc".to_owned()]))
|
||||
.limit(10)
|
||||
.only_if("meta='foo'")
|
||||
.execute()
|
||||
.await?;
|
||||
```
|
||||
|
||||
With post-filtering:
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
table.search("puppy").limit(10).where("meta='foo'", prefilte=False).to_list()
|
||||
table.search("puppy").limit(10).where("meta='foo'").to_list()
|
||||
```
|
||||
|
||||
=== "TypeScript"
|
||||
@@ -163,7 +130,6 @@ With post-filtering:
|
||||
.select(["id", "doc"])
|
||||
.limit(10)
|
||||
.where("meta='foo'")
|
||||
.prefilter(false)
|
||||
.toArray();
|
||||
```
|
||||
|
||||
@@ -174,7 +140,6 @@ With post-filtering:
|
||||
.query()
|
||||
.full_text_search(FullTextSearchQuery::new(words[0].to_owned()))
|
||||
.select(lancedb::query::Select::Columns(vec!["doc".to_owned()]))
|
||||
.postfilter()
|
||||
.limit(10)
|
||||
.only_if("meta='foo'")
|
||||
.execute()
|
||||
@@ -195,35 +160,3 @@ To search for a phrase, the index must be created with `with_position=True`:
|
||||
table.create_fts_index("text", use_tantivy=False, with_position=True)
|
||||
```
|
||||
This will allow you to search for phrases, but it will also significantly increase the index size and indexing time.
|
||||
|
||||
|
||||
## Incremental indexing
|
||||
|
||||
LanceDB supports incremental indexing, which means you can add new records to the table without reindexing the entire table.
|
||||
|
||||
This can make the query more efficient, especially when the table is large and the new records are relatively small.
|
||||
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
table.add([{"vector": [3.1, 4.1], "text": "Frodo was a happy puppy"}])
|
||||
table.optimize()
|
||||
```
|
||||
|
||||
=== "TypeScript"
|
||||
|
||||
```typescript
|
||||
await tbl.add([{ vector: [3.1, 4.1], text: "Frodo was a happy puppy" }]);
|
||||
await tbl.optimize();
|
||||
```
|
||||
|
||||
=== "Rust"
|
||||
|
||||
```rust
|
||||
let more_data: Box<dyn RecordBatchReader + Send> = create_some_records()?;
|
||||
tbl.add(more_data).execute().await?;
|
||||
tbl.optimize(OptimizeAction::All).execute().await?;
|
||||
```
|
||||
!!! note
|
||||
|
||||
New data added after creating the FTS index will appear in search results while incremental index is still progress, but with increased latency due to a flat search on the unindexed portion. LanceDB Cloud automates this merging process, minimizing the impact on search speed.
|
||||
@@ -153,7 +153,9 @@ table.create_fts_index(["title", "content"], use_tantivy=True, writer_heap_size=
|
||||
|
||||
## Current limitations
|
||||
|
||||
1. New data added after creating the FTS index will appear in search results, but with increased latency due to a flat search on the unindexed portion. Re-indexing with `create_fts_index` will reduce latency. LanceDB Cloud automates this merging process, minimizing the impact on search speed.
|
||||
1. Currently we do not yet support incremental writes.
|
||||
If you add data after FTS index creation, it won't be reflected
|
||||
in search results until you do a full reindex.
|
||||
|
||||
2. We currently only support local filesystem paths for the FTS index.
|
||||
This is a tantivy limitation. We've implemented an object store plugin
|
||||
|
||||
@@ -274,7 +274,7 @@ table = db.create_table(table_name, schema=Content)
|
||||
|
||||
Sometimes your data model may contain nested objects.
|
||||
For example, you may want to store the document string
|
||||
and the document source name as a nested Document object:
|
||||
and the document soure name as a nested Document object:
|
||||
|
||||
```python
|
||||
class Document(BaseModel):
|
||||
@@ -466,7 +466,7 @@ You can create an empty table for scenarios where you want to add data to the ta
|
||||
|
||||
## Adding to a table
|
||||
|
||||
After a table has been created, you can always add more data to it using the `add` method
|
||||
After a table has been created, you can always add more data to it usind the `add` method
|
||||
|
||||
=== "Python"
|
||||
You can add any of the valid data structures accepted by LanceDB table, i.e, `dict`, `list[dict]`, `pd.DataFrame`, or `Iterator[pa.RecordBatch]`. Below are some examples.
|
||||
@@ -535,7 +535,7 @@ After a table has been created, you can always add more data to it using the `ad
|
||||
```
|
||||
|
||||
??? "Ingesting Pydantic models with LanceDB embedding API"
|
||||
When using LanceDB's embedding API, you can add Pydantic models directly to the table. LanceDB will automatically convert the `vector` field to a vector before adding it to the table. You need to specify the default value of `vector` field as None to allow LanceDB to automatically vectorize the data.
|
||||
When using LanceDB's embedding API, you can add Pydantic models directly to the table. LanceDB will automatically convert the `vector` field to a vector before adding it to the table. You need to specify the default value of `vector` feild as None to allow LanceDB to automatically vectorize the data.
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
@@ -790,27 +790,6 @@ Use the `drop_table()` method on the database to remove a table.
|
||||
This permanently removes the table and is not recoverable, unlike deleting rows.
|
||||
If the table does not exist an exception is raised.
|
||||
|
||||
## Handling bad vectors
|
||||
|
||||
In LanceDB Python, you can use the `on_bad_vectors` parameter to choose how
|
||||
invalid vector values are handled. Invalid vectors are vectors that are not valid
|
||||
because:
|
||||
|
||||
1. They are the wrong dimension
|
||||
2. They contain NaN values
|
||||
3. They are null but are on a non-nullable field
|
||||
|
||||
By default, LanceDB will raise an error if it encounters a bad vector. You can
|
||||
also choose one of the following options:
|
||||
|
||||
* `drop`: Ignore rows with bad vectors
|
||||
* `fill`: Replace bad values (NaNs) or missing values (too few dimensions) with
|
||||
the fill value specified in the `fill_value` parameter. An input like
|
||||
`[1.0, NaN, 3.0]` will be replaced with `[1.0, 0.0, 3.0]` if `fill_value=0.0`.
|
||||
* `null`: Replace bad vectors with null (only works if the column is nullable).
|
||||
A bad vector `[1.0, NaN, 3.0]` will be replaced with `null` if the column is
|
||||
nullable. If the vector column is non-nullable, then bad vectors will cause an
|
||||
error
|
||||
|
||||
## Consistency
|
||||
|
||||
@@ -880,4 +859,4 @@ There are three possible settings for `read_consistency_interval`:
|
||||
|
||||
Learn the best practices on creating an ANN index and getting the most out of it.
|
||||
|
||||
[^1]: The `vectordb` package is a legacy package that is deprecated in favor of `@lancedb/lancedb`. The `vectordb` package will continue to receive bug fixes and security updates until September 2024. We recommend all new projects use `@lancedb/lancedb`. See the [migration guide](../migration.md) for more information.
|
||||
[^1]: The `vectordb` package is a legacy package that is deprecated in favor of `@lancedb/lancedb`. The `vectordb` package will continue to receive bug fixes and security updates until September 2024. We recommend all new projects use `@lancedb/lancedb`. See the [migration guide](migration.md) for more information.
|
||||
|
||||
@@ -6,9 +6,6 @@ This re-ranker uses the [Cohere](https://cohere.ai/) API to rerank the search re
|
||||
!!! note
|
||||
Supported Query Types: Hybrid, Vector, FTS
|
||||
|
||||
```shell
|
||||
pip install cohere
|
||||
```
|
||||
|
||||
```python
|
||||
import numpy
|
||||
|
||||
@@ -9,7 +9,6 @@ LanceDB comes with some built-in rerankers. Some of the rerankers that are avail
|
||||
| `CrossEncoderReranker` | Uses a cross-encoder model to rerank search results | Vector, FTS, Hybrid |
|
||||
| `ColbertReranker` | Uses a colbert model to rerank search results | Vector, FTS, Hybrid |
|
||||
| `OpenaiReranker`(Experimental) | Uses OpenAI's chat model to rerank search results | Vector, FTS, Hybrid |
|
||||
| `VoyageAIReranker` | Uses voyageai Reranker API to rerank results | Vector, FTS, Hybrid |
|
||||
|
||||
|
||||
## Using a Reranker
|
||||
@@ -74,7 +73,6 @@ LanceDB comes with some built-in rerankers. Here are some of the rerankers that
|
||||
- [Jina Reranker](./jina.md)
|
||||
- [AnswerDotAI Rerankers](./answerdotai.md)
|
||||
- [Reciprocal Rank Fusion Reranker](./rrf.md)
|
||||
- [VoyageAI Reranker](./voyageai.md)
|
||||
|
||||
## Creating Custom Rerankers
|
||||
|
||||
|
||||
@@ -22,7 +22,8 @@ excluded_globs = [
|
||||
"../src/embeddings/available_embedding_models/text_embedding_functions/*.md",
|
||||
"../src/embeddings/available_embedding_models/multimodal_embedding_functions/*.md",
|
||||
"../src/rag/*.md",
|
||||
"../src/rag/advanced_techniques/*.md"
|
||||
"../src/rag/advanced_techniques/*.md",
|
||||
"../src/cloud/*.md"
|
||||
|
||||
|
||||
]
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
<parent>
|
||||
<groupId>com.lancedb</groupId>
|
||||
<artifactId>lancedb-parent</artifactId>
|
||||
<version>0.13.1-beta.0</version>
|
||||
<version>0.13.0-beta.1</version>
|
||||
<relativePath>../pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
<groupId>com.lancedb</groupId>
|
||||
<artifactId>lancedb-parent</artifactId>
|
||||
<version>0.13.1-beta.0</version>
|
||||
<version>0.13.0-beta.1</version>
|
||||
<packaging>pom</packaging>
|
||||
|
||||
<name>LanceDB Parent</name>
|
||||
|
||||
84
node/package-lock.json
generated
84
node/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "vectordb",
|
||||
"version": "0.13.1-beta.0",
|
||||
"version": "0.13.0-beta.1",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "vectordb",
|
||||
"version": "0.13.1-beta.0",
|
||||
"version": "0.13.0-beta.1",
|
||||
"cpu": [
|
||||
"x64",
|
||||
"arm64"
|
||||
@@ -52,14 +52,12 @@
|
||||
"uuid": "^9.0.0"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"@lancedb/vectordb-darwin-arm64": "0.13.1-beta.0",
|
||||
"@lancedb/vectordb-darwin-x64": "0.13.1-beta.0",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.13.1-beta.0",
|
||||
"@lancedb/vectordb-linux-arm64-musl": "0.13.1-beta.0",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.13.1-beta.0",
|
||||
"@lancedb/vectordb-linux-x64-musl": "0.13.1-beta.0",
|
||||
"@lancedb/vectordb-win32-arm64-msvc": "0.13.1-beta.0",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.13.1-beta.0"
|
||||
"@lancedb/vectordb-darwin-arm64": "0.13.0-beta.1",
|
||||
"@lancedb/vectordb-darwin-x64": "0.13.0-beta.1",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.13.0-beta.1",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.13.0-beta.1",
|
||||
"@lancedb/vectordb-win32-arm64-msvc": "0.13.0-beta.1",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.13.0-beta.1"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@apache-arrow/ts": "^14.0.2",
|
||||
@@ -329,6 +327,66 @@
|
||||
"@jridgewell/sourcemap-codec": "^1.4.10"
|
||||
}
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-darwin-arm64": {
|
||||
"version": "0.13.0-beta.1",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-arm64/-/vectordb-darwin-arm64-0.13.0-beta.1.tgz",
|
||||
"integrity": "sha512-beOrf6selCzzhLgDG8Nibma4nO/CSnA1wUKRmlJHEPtGcg7PW18z6MP/nfwQMpMR/FLRfTo8pPTbpzss47MiQQ==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
"optional": true,
|
||||
"os": [
|
||||
"darwin"
|
||||
]
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-darwin-x64": {
|
||||
"version": "0.13.0-beta.1",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-x64/-/vectordb-darwin-x64-0.13.0-beta.1.tgz",
|
||||
"integrity": "sha512-YdraGRF/RbJRkKh0v3xT03LUhq47T2GtCvJ5gZp8wKlh4pHa8LuhLU0DIdvmG/DT5vuQA+td8HDkBm/e3EOdNg==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
"optional": true,
|
||||
"os": [
|
||||
"darwin"
|
||||
]
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-linux-arm64-gnu": {
|
||||
"version": "0.13.0-beta.1",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-arm64-gnu/-/vectordb-linux-arm64-gnu-0.13.0-beta.1.tgz",
|
||||
"integrity": "sha512-Pp0O/uhEqof1oLaWrNbv+Ym+q8kBkiCqaA5+2eAZ6a3e9U+Ozkvb0FQrHuyi9adJ5wKQ4NabyQE9BMf2bYpOnQ==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
"optional": true,
|
||||
"os": [
|
||||
"linux"
|
||||
]
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-linux-x64-gnu": {
|
||||
"version": "0.13.0-beta.1",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-x64-gnu/-/vectordb-linux-x64-gnu-0.13.0-beta.1.tgz",
|
||||
"integrity": "sha512-y8nxOye4egfWF5FGED9EfkmZ1O5HnRLU4a61B8m5JSpkivO9v2epTcbYN0yt/7ZFCgtqMfJ8VW4Mi7qQcz3KDA==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
"optional": true,
|
||||
"os": [
|
||||
"linux"
|
||||
]
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-win32-x64-msvc": {
|
||||
"version": "0.13.0-beta.1",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-win32-x64-msvc/-/vectordb-win32-x64-msvc-0.13.0-beta.1.tgz",
|
||||
"integrity": "sha512-STMDP9dp0TBLkB3ro+16pKcGy6bmbhRuEZZZ1Tp5P75yTPeVh4zIgWkidMdU1qBbEYM7xacnsp9QAwgLnMU/Ow==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
"optional": true,
|
||||
"os": [
|
||||
"win32"
|
||||
]
|
||||
},
|
||||
"node_modules/@neon-rs/cli": {
|
||||
"version": "0.0.160",
|
||||
"resolved": "https://registry.npmjs.org/@neon-rs/cli/-/cli-0.0.160.tgz",
|
||||
@@ -1443,9 +1501,9 @@
|
||||
"dev": true
|
||||
},
|
||||
"node_modules/cross-spawn": {
|
||||
"version": "7.0.6",
|
||||
"resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz",
|
||||
"integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==",
|
||||
"version": "7.0.3",
|
||||
"resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz",
|
||||
"integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"path-key": "^3.1.0",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "vectordb",
|
||||
"version": "0.13.1-beta.0",
|
||||
"version": "0.13.0-beta.1",
|
||||
"description": " Serverless, low-latency vector database for AI applications",
|
||||
"main": "dist/index.js",
|
||||
"types": "dist/index.d.ts",
|
||||
@@ -84,20 +84,16 @@
|
||||
"aarch64-apple-darwin": "@lancedb/vectordb-darwin-arm64",
|
||||
"x86_64-unknown-linux-gnu": "@lancedb/vectordb-linux-x64-gnu",
|
||||
"aarch64-unknown-linux-gnu": "@lancedb/vectordb-linux-arm64-gnu",
|
||||
"x86_64-unknown-linux-musl": "@lancedb/vectordb-linux-x64-musl",
|
||||
"aarch64-unknown-linux-musl": "@lancedb/vectordb-linux-arm64-musl",
|
||||
"x86_64-pc-windows-msvc": "@lancedb/vectordb-win32-x64-msvc",
|
||||
"aarch64-pc-windows-msvc": "@lancedb/vectordb-win32-arm64-msvc"
|
||||
}
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"@lancedb/vectordb-darwin-x64": "0.13.1-beta.0",
|
||||
"@lancedb/vectordb-darwin-arm64": "0.13.1-beta.0",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.13.1-beta.0",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.13.1-beta.0",
|
||||
"@lancedb/vectordb-linux-x64-musl": "0.13.1-beta.0",
|
||||
"@lancedb/vectordb-linux-arm64-musl": "0.13.1-beta.0",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.13.1-beta.0",
|
||||
"@lancedb/vectordb-win32-arm64-msvc": "0.13.1-beta.0"
|
||||
"@lancedb/vectordb-darwin-arm64": "0.13.0-beta.1",
|
||||
"@lancedb/vectordb-darwin-x64": "0.13.0-beta.1",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.13.0-beta.1",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.13.0-beta.1",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.13.0-beta.1",
|
||||
"@lancedb/vectordb-win32-arm64-msvc": "0.13.0-beta.1"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "lancedb-nodejs"
|
||||
edition.workspace = true
|
||||
version = "0.13.1-beta.0"
|
||||
version = "0.13.0-beta.1"
|
||||
license.workspace = true
|
||||
description.workspace = true
|
||||
repository.workspace = true
|
||||
|
||||
@@ -187,81 +187,6 @@ describe.each([arrow13, arrow14, arrow15, arrow16, arrow17])(
|
||||
},
|
||||
);
|
||||
|
||||
// TODO: https://github.com/lancedb/lancedb/issues/1832
|
||||
it.skip("should be able to omit nullable fields", async () => {
|
||||
const db = await connect(tmpDir.name);
|
||||
const schema = new arrow.Schema([
|
||||
new arrow.Field(
|
||||
"vector",
|
||||
new arrow.FixedSizeList(
|
||||
2,
|
||||
new arrow.Field("item", new arrow.Float64()),
|
||||
),
|
||||
true,
|
||||
),
|
||||
new arrow.Field("item", new arrow.Utf8(), true),
|
||||
new arrow.Field("price", new arrow.Float64(), false),
|
||||
]);
|
||||
const table = await db.createEmptyTable("test", schema);
|
||||
|
||||
const data1 = { item: "foo", price: 10.0 };
|
||||
await table.add([data1]);
|
||||
const data2 = { vector: [3.1, 4.1], price: 2.0 };
|
||||
await table.add([data2]);
|
||||
const data3 = { vector: [5.9, 26.5], item: "bar", price: 3.0 };
|
||||
await table.add([data3]);
|
||||
|
||||
let res = await table.query().limit(10).toArray();
|
||||
const resVector = res.map((r) => r.get("vector").toArray());
|
||||
expect(resVector).toEqual([null, data2.vector, data3.vector]);
|
||||
const resItem = res.map((r) => r.get("item").toArray());
|
||||
expect(resItem).toEqual(["foo", null, "bar"]);
|
||||
const resPrice = res.map((r) => r.get("price").toArray());
|
||||
expect(resPrice).toEqual([10.0, 2.0, 3.0]);
|
||||
|
||||
const data4 = { item: "foo" };
|
||||
// We can't omit a column if it's not nullable
|
||||
await expect(table.add([data4])).rejects.toThrow("Invalid user input");
|
||||
|
||||
// But we can alter columns to make them nullable
|
||||
await table.alterColumns([{ path: "price", nullable: true }]);
|
||||
await table.add([data4]);
|
||||
|
||||
res = (await table.query().limit(10).toArray()).map((r) => r.toJSON());
|
||||
expect(res).toEqual([data1, data2, data3, data4]);
|
||||
});
|
||||
|
||||
it("should be able to insert nullable data for non-nullable fields", async () => {
|
||||
const db = await connect(tmpDir.name);
|
||||
const schema = new arrow.Schema([
|
||||
new arrow.Field("x", new arrow.Float64(), false),
|
||||
new arrow.Field("id", new arrow.Utf8(), false),
|
||||
]);
|
||||
const table = await db.createEmptyTable("test", schema);
|
||||
|
||||
const data1 = { x: 4.1, id: "foo" };
|
||||
await table.add([data1]);
|
||||
const res = (await table.query().toArray())[0];
|
||||
expect(res.x).toEqual(data1.x);
|
||||
expect(res.id).toEqual(data1.id);
|
||||
|
||||
const data2 = { x: null, id: "bar" };
|
||||
await expect(table.add([data2])).rejects.toThrow(
|
||||
"declared as non-nullable but contains null values",
|
||||
);
|
||||
|
||||
// But we can alter columns to make them nullable
|
||||
await table.alterColumns([{ path: "x", nullable: true }]);
|
||||
await table.add([data2]);
|
||||
|
||||
const res2 = await table.query().toArray();
|
||||
expect(res2.length).toBe(2);
|
||||
expect(res2[0].x).toEqual(data1.x);
|
||||
expect(res2[0].id).toEqual(data1.id);
|
||||
expect(res2[1].x).toBeNull();
|
||||
expect(res2[1].id).toEqual(data2.id);
|
||||
});
|
||||
|
||||
it("should return the table as an instance of an arrow table", async () => {
|
||||
const arrowTbl = await table.toArrow();
|
||||
expect(arrowTbl).toBeInstanceOf(ArrowTable);
|
||||
@@ -477,54 +402,6 @@ describe("When creating an index", () => {
|
||||
expect(rst.numRows).toBe(1);
|
||||
});
|
||||
|
||||
it("should create and search IVF_HNSW indices", async () => {
|
||||
await tbl.createIndex("vec", {
|
||||
config: Index.hnswSq(),
|
||||
});
|
||||
|
||||
// check index directory
|
||||
const indexDir = path.join(tmpDir.name, "test.lance", "_indices");
|
||||
expect(fs.readdirSync(indexDir)).toHaveLength(1);
|
||||
const indices = await tbl.listIndices();
|
||||
expect(indices.length).toBe(1);
|
||||
expect(indices[0]).toEqual({
|
||||
name: "vec_idx",
|
||||
indexType: "IvfHnswSq",
|
||||
columns: ["vec"],
|
||||
});
|
||||
|
||||
// Search without specifying the column
|
||||
let rst = await tbl
|
||||
.query()
|
||||
.limit(2)
|
||||
.nearestTo(queryVec)
|
||||
.distanceType("dot")
|
||||
.toArrow();
|
||||
expect(rst.numRows).toBe(2);
|
||||
|
||||
// Search using `vectorSearch`
|
||||
rst = await tbl.vectorSearch(queryVec).limit(2).toArrow();
|
||||
expect(rst.numRows).toBe(2);
|
||||
|
||||
// Search with specifying the column
|
||||
const rst2 = await tbl
|
||||
.query()
|
||||
.limit(2)
|
||||
.nearestTo(queryVec)
|
||||
.column("vec")
|
||||
.toArrow();
|
||||
expect(rst2.numRows).toBe(2);
|
||||
expect(rst.toString()).toEqual(rst2.toString());
|
||||
|
||||
// test offset
|
||||
rst = await tbl.query().limit(2).offset(1).nearestTo(queryVec).toArrow();
|
||||
expect(rst.numRows).toBe(1);
|
||||
|
||||
// test ef
|
||||
rst = await tbl.query().limit(2).nearestTo(queryVec).ef(100).toArrow();
|
||||
expect(rst.numRows).toBe(2);
|
||||
});
|
||||
|
||||
it("should be able to query unindexed data", async () => {
|
||||
await tbl.createIndex("vec");
|
||||
await tbl.add([
|
||||
@@ -1121,18 +998,4 @@ describe("column name options", () => {
|
||||
const results = await table.query().where("`camelCase` = 1").toArray();
|
||||
expect(results[0].camelCase).toBe(1);
|
||||
});
|
||||
|
||||
test("can make multiple vector queries in one go", async () => {
|
||||
const results = await table
|
||||
.query()
|
||||
.nearestTo([0.1, 0.2])
|
||||
.addQueryVector([0.1, 0.2])
|
||||
.limit(1)
|
||||
.toArray();
|
||||
console.log(results);
|
||||
expect(results.length).toBe(2);
|
||||
results.sort((a, b) => a.query_index - b.query_index);
|
||||
expect(results[0].query_index).toBe(0);
|
||||
expect(results[1].query_index).toBe(1);
|
||||
});
|
||||
});
|
||||
|
||||
230
nodejs/examples/cloud.test.ts
Normal file
230
nodejs/examples/cloud.test.ts
Normal file
@@ -0,0 +1,230 @@
|
||||
// --8<-- [start:imports]
|
||||
import * as lancedb from "@lancedb/lancedb";
|
||||
// --8<-- [end:imports]
|
||||
|
||||
// --8<-- [start:generate_data]
|
||||
function genData(numRows: number, numVectorDim: number): any[] {
|
||||
const data = [];
|
||||
for (let i = 0; i < numRows; i++) {
|
||||
const vector = [];
|
||||
for (let j = 0; j < numVectorDim; j++) {
|
||||
vector.push(i + j * 0.1);
|
||||
}
|
||||
data.push({
|
||||
id: i,
|
||||
name: `name_${i}`,
|
||||
vector,
|
||||
});
|
||||
}
|
||||
return data;
|
||||
}
|
||||
// --8<-- [end:generate_data]
|
||||
|
||||
test("cloud quickstart", async () => {
|
||||
{
|
||||
// --8<-- [start:connect]
|
||||
const db = await lancedb.connect({
|
||||
uri: "db://your-project-slug",
|
||||
apiKey: "your-api-key",
|
||||
region: "your-cloud-region",
|
||||
});
|
||||
// --8<-- [end:connect]
|
||||
// --8<-- [start:create_table]
|
||||
const tableName = "myTable"
|
||||
const data = genData(5000, 1536)
|
||||
const table = await db.createTable(tableName, data);
|
||||
// --8<-- [end:create_table]
|
||||
// --8<-- [start:create_index_search]
|
||||
// create a vector index
|
||||
await table.createIndex({
|
||||
column: "vector",
|
||||
metric_type: lancedb.MetricType.Cosine,
|
||||
type: "ivf_pq",
|
||||
});
|
||||
const result = await table.search([0.01, 0.02])
|
||||
.select(["vector", "item"])
|
||||
.limit(1)
|
||||
.execute();
|
||||
// --8<-- [end:create_index_search]
|
||||
// --8<-- [start:drop_table]
|
||||
await db.dropTable(tableName);
|
||||
// --8<-- [end:drop_table]
|
||||
}
|
||||
});
|
||||
|
||||
test("ingest data", async () => {
|
||||
// --8<-- [start:ingest_data]
|
||||
import { Schema, Field, Float32, FixedSizeList, Utf8 } from "apache-arrow";
|
||||
|
||||
const db = await lancedb.connect({
|
||||
uri: "db://your-project-slug",
|
||||
apiKey: "your-api-key",
|
||||
region: "us-east-1"
|
||||
});
|
||||
|
||||
const data = [
|
||||
{ vector: [3.1, 4.1], item: "foo", price: 10.0 },
|
||||
{ vector: [5.9, 26.5], item: "bar", price: 20.0 },
|
||||
{ vector: [10.2, 100.8], item: "baz", price: 30.0},
|
||||
{ vector: [1.4, 9.5], item: "fred", price: 40.0},
|
||||
]
|
||||
// create an empty table with schema
|
||||
const schema = new Schema([
|
||||
new Field(
|
||||
"vector",
|
||||
new FixedSizeList(2, new Field("float32", new Float32())),
|
||||
),
|
||||
new Field("item", new Utf8()),
|
||||
new Field("price", new Float32()),
|
||||
]);
|
||||
const tableName = "myTable";
|
||||
const table = await db.createTable({
|
||||
name: tableName,
|
||||
schema,
|
||||
});
|
||||
await table.add(data);
|
||||
// --8<-- [end:ingest_data]
|
||||
});
|
||||
|
||||
test("update data", async () => {
|
||||
// --8<-- [start:connect_db_and_open_table]
|
||||
const db = await lancedb.connect({
|
||||
uri: "db://your-project-slug",
|
||||
apiKey: "your-api-key",
|
||||
region: "us-east-1"
|
||||
});
|
||||
const tableName = "myTable"
|
||||
const table = await db.openTable(tableName);
|
||||
// --8<-- [end:connect_db_and_open_table]
|
||||
// --8<-- [start:update_data]
|
||||
await table.update({
|
||||
where: "price < 20.0",
|
||||
values: { vector: [2, 2], item: "foo-updated" },
|
||||
});
|
||||
// --8<-- [end:update_data]
|
||||
// --8<-- [start:merge_insert]
|
||||
let newData = [
|
||||
{vector: [1, 1], item: 'foo-updated', price: 50.0}
|
||||
];
|
||||
// upsert
|
||||
await table.mergeInsert("item", newData, {
|
||||
whenMatchedUpdateAll: true,
|
||||
whenNotMatchedInsertAll: true,
|
||||
});
|
||||
// --8<-- [end:merge_insert]
|
||||
// --8<-- [start:delete_data]
|
||||
// delete data
|
||||
const predicate = "price = 30.0";
|
||||
await table.delete(predicate);
|
||||
// --8<-- [end:delete_data]
|
||||
});
|
||||
|
||||
test("create index", async () => {
|
||||
const db = await lancedb.connect({
|
||||
uri: "db://your-project-slug",
|
||||
apiKey: "your-api-key",
|
||||
region: "us-east-1"
|
||||
});
|
||||
|
||||
const tableName = "myTable";
|
||||
const table = await db.openTable(tableName);
|
||||
// --8<-- [start:create_index]
|
||||
// the vector column only needs to be specified when there are
|
||||
// multiple vector columns or the column is not named as "vector"
|
||||
// L2 is used as the default distance metric
|
||||
await table.createIndex({
|
||||
column: "vector",
|
||||
metric_type: lancedb.MetricType.Cosine,
|
||||
});
|
||||
|
||||
// --8<-- [end:create_index]
|
||||
// --8<-- [start:create_scalar_index]
|
||||
await table.createScalarIndex("item");
|
||||
// --8<-- [end:create_scalar_index]
|
||||
// --8<-- [start:create_fts_index]
|
||||
const db = await lancedb.connect({
|
||||
uri: "db://your-project-slug",
|
||||
apiKey: "your-api-key",
|
||||
region: "us-east-1"
|
||||
});
|
||||
|
||||
const tableName = "myTable"
|
||||
const data = [
|
||||
{ vector: [3.1, 4.1], text: "Frodo was a happy puppy" },
|
||||
{ vector: [5.9, 26.5], text: "There are several kittens playing" },
|
||||
];
|
||||
const table = createTable(tableName, data);
|
||||
await table.createIndex("text", {
|
||||
config: lancedb.Index.fts(),
|
||||
});
|
||||
// --8<-- [end:create_fts_index]
|
||||
});
|
||||
|
||||
test("vector search", async () => {
|
||||
// --8<-- [start:vector_search]
|
||||
const db = await lancedb.connect({
|
||||
uri: "db://your-project-slug",
|
||||
apiKey: "your-api-key",
|
||||
region: "us-east-1"
|
||||
});
|
||||
|
||||
const tableName = "myTable"
|
||||
const table = await db.openTable(tableName);
|
||||
const result = await table.search([0.4, 1.4])
|
||||
.where("price > 10.0")
|
||||
.prefilter(true)
|
||||
.select(["item", "vector"])
|
||||
.limit(2)
|
||||
.execute();
|
||||
// --8<-- [end:vector_search]
|
||||
});
|
||||
|
||||
test("full-text search", async () => {
|
||||
// --8<-- [start:full_text_search]
|
||||
const db = await lancedb.connect({
|
||||
uri: "db://your-project-slug",
|
||||
apiKey: "your-api-key",
|
||||
region: "us-east-1"
|
||||
});
|
||||
|
||||
const data = [
|
||||
{ vector: [3.1, 4.1], text: "Frodo was a happy puppy" },
|
||||
{ vector: [5.9, 26.5], text: "There are several kittens playing" },
|
||||
];
|
||||
const tableName = "myTable"
|
||||
const table = await db.createTable(tableName, data);
|
||||
await table.createIndex("text", {
|
||||
config: lancedb.Index.fts(),
|
||||
});
|
||||
|
||||
await tableName
|
||||
.search("puppy", queryType="fts")
|
||||
.select(["text"])
|
||||
.limit(10)
|
||||
.toArray();
|
||||
// --8<-- [end:full_text_search]
|
||||
});
|
||||
|
||||
test("metadata filtering", async () => {
|
||||
// --8<-- [start:filtering]
|
||||
const db = await lancedb.connect({
|
||||
uri: "db://your-project-slug",
|
||||
apiKey: "your-api-key",
|
||||
region: "us-east-1"
|
||||
});
|
||||
const tableName = "myTable"
|
||||
const table = await db.openTable(tableName);
|
||||
await table
|
||||
.search(Array(2).fill(0.1))
|
||||
.where("(item IN ('foo', 'bar')) AND (price > 10.0)")
|
||||
.postfilter()
|
||||
.toArray();
|
||||
// --8<-- [end:filtering]
|
||||
// --8<-- [start:sql_filtering]
|
||||
await table
|
||||
.search(Array(2).fill(0.1))
|
||||
.where("(item IN ('foo', 'bar')) AND (price > 10.0)")
|
||||
.postfilter()
|
||||
.toArray();
|
||||
// --8<-- [end:sql_filtering]
|
||||
});
|
||||
@@ -6,16 +6,12 @@ import { withTempDirectory } from "./util.ts";
|
||||
import * as lancedb from "@lancedb/lancedb";
|
||||
import "@lancedb/lancedb/embedding/transformers";
|
||||
import { LanceSchema, getRegistry } from "@lancedb/lancedb/embedding";
|
||||
import { EmbeddingFunction } from "@lancedb/lancedb/embedding";
|
||||
import { Utf8 } from "apache-arrow";
|
||||
|
||||
test("full text search", async () => {
|
||||
await withTempDirectory(async (databaseDir) => {
|
||||
const db = await lancedb.connect(databaseDir);
|
||||
console.log(getRegistry());
|
||||
const func = (await getRegistry()
|
||||
.get("huggingface")
|
||||
?.create()) as EmbeddingFunction;
|
||||
const func = await getRegistry().get("huggingface").create();
|
||||
|
||||
const facts = [
|
||||
"Albert Einstein was a theoretical physicist.",
|
||||
@@ -60,4 +56,4 @@ test("full text search", async () => {
|
||||
|
||||
expect(actual[0]["text"]).toBe("The human body has 206 bones.");
|
||||
});
|
||||
}, 100_000);
|
||||
});
|
||||
|
||||
@@ -19,6 +19,9 @@ import { EmbeddingFunctionConfig, getRegistry } from "./registry";
|
||||
|
||||
export { EmbeddingFunction, TextEmbeddingFunction } from "./embedding_function";
|
||||
|
||||
// We need to explicitly export '*' so that the `register` decorator actually registers the class.
|
||||
export * from "./openai";
|
||||
export * from "./transformers";
|
||||
export * from "./registry";
|
||||
|
||||
/**
|
||||
|
||||
@@ -17,6 +17,8 @@ import {
|
||||
type EmbeddingFunctionConstructor,
|
||||
} from "./embedding_function";
|
||||
import "reflect-metadata";
|
||||
import { OpenAIEmbeddingFunction } from "./openai";
|
||||
import { TransformersEmbeddingFunction } from "./transformers";
|
||||
|
||||
type CreateReturnType<T> = T extends { init: () => Promise<void> }
|
||||
? Promise<T>
|
||||
@@ -71,6 +73,10 @@ export class EmbeddingFunctionRegistry {
|
||||
};
|
||||
}
|
||||
|
||||
get(name: "openai"): EmbeddingFunctionCreate<OpenAIEmbeddingFunction>;
|
||||
get(
|
||||
name: "huggingface",
|
||||
): EmbeddingFunctionCreate<TransformersEmbeddingFunction>;
|
||||
get<T extends EmbeddingFunction<unknown>>(
|
||||
name: string,
|
||||
): EmbeddingFunctionCreate<T> | undefined;
|
||||
|
||||
@@ -385,20 +385,6 @@ export class VectorQuery extends QueryBase<NativeVectorQuery> {
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the number of candidates to consider during the search
|
||||
*
|
||||
* This argument is only used when the vector column has an HNSW index.
|
||||
* If there is no index then this value is ignored.
|
||||
*
|
||||
* Increasing this value will increase the recall of your query but will
|
||||
* also increase the latency of your query. The default value is 1.5*limit.
|
||||
*/
|
||||
ef(ef: number): VectorQuery {
|
||||
super.doCall((inner) => inner.ef(ef));
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the vector column to query
|
||||
*
|
||||
@@ -506,42 +492,6 @@ export class VectorQuery extends QueryBase<NativeVectorQuery> {
|
||||
super.doCall((inner) => inner.bypassVectorIndex());
|
||||
return this;
|
||||
}
|
||||
|
||||
/*
|
||||
* Add a query vector to the search
|
||||
*
|
||||
* This method can be called multiple times to add multiple query vectors
|
||||
* to the search. If multiple query vectors are added, then they will be searched
|
||||
* in parallel, and the results will be concatenated. A column called `query_index`
|
||||
* will be added to indicate the index of the query vector that produced the result.
|
||||
*
|
||||
* Performance wise, this is equivalent to running multiple queries concurrently.
|
||||
*/
|
||||
addQueryVector(vector: IntoVector): VectorQuery {
|
||||
if (vector instanceof Promise) {
|
||||
const res = (async () => {
|
||||
try {
|
||||
const v = await vector;
|
||||
const arr = Float32Array.from(v);
|
||||
//
|
||||
// biome-ignore lint/suspicious/noExplicitAny: we need to get the `inner`, but js has no package scoping
|
||||
const value: any = this.addQueryVector(arr);
|
||||
const inner = value.inner as
|
||||
| NativeVectorQuery
|
||||
| Promise<NativeVectorQuery>;
|
||||
return inner;
|
||||
} catch (e) {
|
||||
return Promise.reject(e);
|
||||
}
|
||||
})();
|
||||
return new VectorQuery(res);
|
||||
} else {
|
||||
super.doCall((inner) => {
|
||||
inner.addQueryVector(Float32Array.from(vector));
|
||||
});
|
||||
return this;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** A builder for LanceDB queries. */
|
||||
|
||||
@@ -87,12 +87,6 @@ export interface OptimizeOptions {
|
||||
deleteUnverified: boolean;
|
||||
}
|
||||
|
||||
export interface Version {
|
||||
version: number;
|
||||
timestamp: Date;
|
||||
metadata: Record<string, string>;
|
||||
}
|
||||
|
||||
/**
|
||||
* A Table is a collection of Records in a LanceDB Database.
|
||||
*
|
||||
@@ -366,11 +360,6 @@ export abstract class Table {
|
||||
*/
|
||||
abstract checkoutLatest(): Promise<void>;
|
||||
|
||||
/**
|
||||
* List all the versions of the table
|
||||
*/
|
||||
abstract listVersions(): Promise<Version[]>;
|
||||
|
||||
/**
|
||||
* Restore the table to the currently checked out version
|
||||
*
|
||||
@@ -670,14 +659,6 @@ export class LocalTable extends Table {
|
||||
await this.inner.checkoutLatest();
|
||||
}
|
||||
|
||||
async listVersions(): Promise<Version[]> {
|
||||
return (await this.inner.listVersions()).map((version) => ({
|
||||
version: version.version,
|
||||
timestamp: new Date(version.timestamp / 1000),
|
||||
metadata: version.metadata,
|
||||
}));
|
||||
}
|
||||
|
||||
async restore(): Promise<void> {
|
||||
await this.inner.restore();
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-darwin-arm64",
|
||||
"version": "0.13.1-beta.0",
|
||||
"version": "0.13.0-beta.1",
|
||||
"os": ["darwin"],
|
||||
"cpu": ["arm64"],
|
||||
"main": "lancedb.darwin-arm64.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-darwin-x64",
|
||||
"version": "0.13.1-beta.0",
|
||||
"version": "0.13.0-beta.1",
|
||||
"os": ["darwin"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.darwin-x64.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-arm64-gnu",
|
||||
"version": "0.13.1-beta.0",
|
||||
"version": "0.13.0-beta.1",
|
||||
"os": ["linux"],
|
||||
"cpu": ["arm64"],
|
||||
"main": "lancedb.linux-arm64-gnu.node",
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
# `@lancedb/lancedb-linux-arm64-musl`
|
||||
|
||||
This is the **aarch64-unknown-linux-musl** binary for `@lancedb/lancedb`
|
||||
@@ -1,13 +0,0 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-arm64-musl",
|
||||
"version": "0.13.1-beta.0",
|
||||
"os": ["linux"],
|
||||
"cpu": ["arm64"],
|
||||
"main": "lancedb.linux-arm64-musl.node",
|
||||
"files": ["lancedb.linux-arm64-musl.node"],
|
||||
"license": "Apache 2.0",
|
||||
"engines": {
|
||||
"node": ">= 18"
|
||||
},
|
||||
"libc": ["musl"]
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-x64-gnu",
|
||||
"version": "0.13.1-beta.0",
|
||||
"version": "0.13.0-beta.1",
|
||||
"os": ["linux"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.linux-x64-gnu.node",
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
# `@lancedb/lancedb-linux-x64-musl`
|
||||
|
||||
This is the **x86_64-unknown-linux-musl** binary for `@lancedb/lancedb`
|
||||
@@ -1,13 +0,0 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-x64-musl",
|
||||
"version": "0.13.1-beta.0",
|
||||
"os": ["linux"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.linux-x64-musl.node",
|
||||
"files": ["lancedb.linux-x64-musl.node"],
|
||||
"license": "Apache 2.0",
|
||||
"engines": {
|
||||
"node": ">= 18"
|
||||
},
|
||||
"libc": ["musl"]
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-win32-arm64-msvc",
|
||||
"version": "0.13.1-beta.0",
|
||||
"version": "0.13.0-beta.1",
|
||||
"os": [
|
||||
"win32"
|
||||
],
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-win32-x64-msvc",
|
||||
"version": "0.13.1-beta.0",
|
||||
"version": "0.13.0-beta.1",
|
||||
"os": ["win32"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.win32-x64-msvc.node",
|
||||
|
||||
10
nodejs/package-lock.json
generated
10
nodejs/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb",
|
||||
"version": "0.13.0",
|
||||
"version": "0.13.0-beta.1",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@lancedb/lancedb",
|
||||
"version": "0.13.0",
|
||||
"version": "0.13.0-beta.1",
|
||||
"cpu": [
|
||||
"x64",
|
||||
"arm64"
|
||||
@@ -6052,9 +6052,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/cross-spawn": {
|
||||
"version": "7.0.6",
|
||||
"resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz",
|
||||
"integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==",
|
||||
"version": "7.0.3",
|
||||
"resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz",
|
||||
"integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==",
|
||||
"devOptional": true,
|
||||
"dependencies": {
|
||||
"path-key": "^3.1.0",
|
||||
|
||||
@@ -10,13 +10,11 @@
|
||||
"vector database",
|
||||
"ann"
|
||||
],
|
||||
"version": "0.13.1-beta.0",
|
||||
"version": "0.13.0-beta.1",
|
||||
"main": "dist/index.js",
|
||||
"exports": {
|
||||
".": "./dist/index.js",
|
||||
"./embedding": "./dist/embedding/index.js",
|
||||
"./embedding/openai": "./dist/embedding/openai.js",
|
||||
"./embedding/transformers": "./dist/embedding/transformers.js"
|
||||
"./embedding": "./dist/embedding/index.js"
|
||||
},
|
||||
"types": "dist/index.d.ts",
|
||||
"napi": {
|
||||
@@ -24,12 +22,10 @@
|
||||
"triples": {
|
||||
"defaults": false,
|
||||
"additional": [
|
||||
"x86_64-apple-darwin",
|
||||
"aarch64-apple-darwin",
|
||||
"x86_64-unknown-linux-gnu",
|
||||
"aarch64-unknown-linux-gnu",
|
||||
"x86_64-unknown-linux-musl",
|
||||
"aarch64-unknown-linux-musl",
|
||||
"x86_64-apple-darwin",
|
||||
"x86_64-unknown-linux-gnu",
|
||||
"x86_64-pc-windows-msvc"
|
||||
]
|
||||
}
|
||||
|
||||
@@ -135,16 +135,6 @@ impl VectorQuery {
|
||||
self.inner = self.inner.clone().column(&column);
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn add_query_vector(&mut self, vector: Float32Array) -> Result<()> {
|
||||
self.inner = self
|
||||
.inner
|
||||
.clone()
|
||||
.add_query_vector(vector.as_ref())
|
||||
.default_error()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn distance_type(&mut self, distance_type: String) -> napi::Result<()> {
|
||||
let distance_type = parse_distance_type(distance_type)?;
|
||||
@@ -167,11 +157,6 @@ impl VectorQuery {
|
||||
self.inner = self.inner.clone().nprobes(nprobe as usize);
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn ef(&mut self, ef: u32) {
|
||||
self.inner = self.inner.clone().ef(ef as usize);
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn bypass_vector_index(&mut self) {
|
||||
self.inner = self.inner.clone().bypass_vector_index()
|
||||
|
||||
@@ -12,8 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use arrow_ipc::writer::FileWriter;
|
||||
use lancedb::ipc::ipc_file_to_batches;
|
||||
use lancedb::table::{
|
||||
@@ -228,28 +226,6 @@ impl Table {
|
||||
self.inner_ref()?.checkout_latest().await.default_error()
|
||||
}
|
||||
|
||||
#[napi(catch_unwind)]
|
||||
pub async fn list_versions(&self) -> napi::Result<Vec<Version>> {
|
||||
self.inner_ref()?
|
||||
.list_versions()
|
||||
.await
|
||||
.map(|versions| {
|
||||
versions
|
||||
.iter()
|
||||
.map(|version| Version {
|
||||
version: version.version as i64,
|
||||
timestamp: version.timestamp.timestamp_micros(),
|
||||
metadata: version
|
||||
.metadata
|
||||
.iter()
|
||||
.map(|(k, v)| (k.clone(), v.clone()))
|
||||
.collect(),
|
||||
})
|
||||
.collect()
|
||||
})
|
||||
.default_error()
|
||||
}
|
||||
|
||||
#[napi(catch_unwind)]
|
||||
pub async fn restore(&self) -> napi::Result<()> {
|
||||
self.inner_ref()?.restore().await.default_error()
|
||||
@@ -490,10 +466,3 @@ impl From<lancedb::index::IndexStatistics> for IndexStatistics {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[napi(object)]
|
||||
pub struct Version {
|
||||
pub version: i64,
|
||||
pub timestamp: i64,
|
||||
pub metadata: HashMap<String, String>,
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
[tool.bumpversion]
|
||||
current_version = "0.16.1-beta.0"
|
||||
current_version = "0.16.0-beta.0"
|
||||
parse = """(?x)
|
||||
(?P<major>0|[1-9]\\d*)\\.
|
||||
(?P<minor>0|[1-9]\\d*)\\.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "lancedb-python"
|
||||
version = "0.16.1-beta.0"
|
||||
version = "0.16.0-beta.0"
|
||||
edition.workspace = true
|
||||
description = "Python bindings for LanceDB"
|
||||
license.workspace = true
|
||||
@@ -15,19 +15,13 @@ crate-type = ["cdylib"]
|
||||
|
||||
[dependencies]
|
||||
arrow = { version = "52.1", features = ["pyarrow"] }
|
||||
lancedb = { path = "../rust/lancedb", default-features = false }
|
||||
lancedb = { path = "../rust/lancedb" }
|
||||
env_logger.workspace = true
|
||||
pyo3 = { version = "0.21", features = [
|
||||
"extension-module",
|
||||
"abi3-py39",
|
||||
"gil-refs"
|
||||
] }
|
||||
pyo3 = { version = "0.21", features = ["extension-module", "abi3-py38", "gil-refs"] }
|
||||
# Using this fork for now: https://github.com/awestlake87/pyo3-asyncio/issues/119
|
||||
# pyo3-asyncio = { version = "0.20", features = ["attributes", "tokio-runtime"] }
|
||||
pyo3-asyncio-0-21 = { version = "0.21.0", features = [
|
||||
"attributes",
|
||||
"tokio-runtime"
|
||||
] }
|
||||
pyo3-asyncio-0-21 = { version = "0.21.0", features = ["attributes", "tokio-runtime"] }
|
||||
|
||||
pin-project = "1.1.5"
|
||||
futures.workspace = true
|
||||
tokio = { version = "1.36.0", features = ["sync"] }
|
||||
@@ -35,14 +29,10 @@ tokio = { version = "1.36.0", features = ["sync"] }
|
||||
[build-dependencies]
|
||||
pyo3-build-config = { version = "0.20.3", features = [
|
||||
"extension-module",
|
||||
"abi3-py39",
|
||||
"abi3-py38",
|
||||
] }
|
||||
|
||||
[features]
|
||||
default = ["default-tls", "remote"]
|
||||
default = ["remote"]
|
||||
fp16kernels = ["lancedb/fp16kernels"]
|
||||
remote = ["lancedb/remote"]
|
||||
# TLS
|
||||
default-tls = ["lancedb/default-tls"]
|
||||
native-tls = ["lancedb/native-tls"]
|
||||
rustls-tls = ["lancedb/rustls-tls"]
|
||||
|
||||
@@ -4,7 +4,7 @@ name = "lancedb"
|
||||
dependencies = [
|
||||
"deprecation",
|
||||
"nest-asyncio~=1.0",
|
||||
"pylance==0.20.0b2",
|
||||
"pylance==0.19.2-beta.3",
|
||||
"tqdm>=4.27.0",
|
||||
"pydantic>=1.10",
|
||||
"packaging",
|
||||
@@ -31,6 +31,7 @@ classifiers = [
|
||||
"Programming Language :: Python",
|
||||
"Programming Language :: Python :: 3",
|
||||
"Programming Language :: Python :: 3 :: Only",
|
||||
"Programming Language :: Python :: 3.8",
|
||||
"Programming Language :: Python :: 3.9",
|
||||
"Programming Language :: Python :: 3.10",
|
||||
"Programming Language :: Python :: 3.11",
|
||||
|
||||
@@ -83,33 +83,25 @@ class OpenAIEmbeddings(TextEmbeddingFunction):
|
||||
"""
|
||||
openai = attempt_import_or_raise("openai")
|
||||
|
||||
valid_texts = []
|
||||
valid_indices = []
|
||||
for idx, text in enumerate(texts):
|
||||
if text:
|
||||
valid_texts.append(text)
|
||||
valid_indices.append(idx)
|
||||
|
||||
# TODO retry, rate limit, token limit
|
||||
try:
|
||||
kwargs = {
|
||||
"input": valid_texts,
|
||||
"model": self.name,
|
||||
}
|
||||
if self.name != "text-embedding-ada-002":
|
||||
kwargs["dimensions"] = self.dim
|
||||
|
||||
rs = self._openai_client.embeddings.create(**kwargs)
|
||||
valid_embeddings = {
|
||||
idx: v.embedding for v, idx in zip(rs.data, valid_indices)
|
||||
}
|
||||
if self.name == "text-embedding-ada-002":
|
||||
rs = self._openai_client.embeddings.create(input=texts, model=self.name)
|
||||
else:
|
||||
kwargs = {
|
||||
"input": texts,
|
||||
"model": self.name,
|
||||
}
|
||||
if self.dim:
|
||||
kwargs["dimensions"] = self.dim
|
||||
rs = self._openai_client.embeddings.create(**kwargs)
|
||||
except openai.BadRequestError:
|
||||
logging.exception("Bad request: %s", texts)
|
||||
return [None] * len(texts)
|
||||
except Exception:
|
||||
logging.exception("OpenAI embeddings error")
|
||||
raise
|
||||
return [valid_embeddings.get(idx, None) for idx in range(len(texts))]
|
||||
return [v.embedding for v in rs.data]
|
||||
|
||||
@cached_property
|
||||
def _openai_client(self):
|
||||
|
||||
@@ -1,5 +1,15 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
# Copyright 2023 LanceDB Developers
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Pydantic (v1 / v2) adapter for LanceDB"""
|
||||
|
||||
@@ -20,7 +30,6 @@ from typing import (
|
||||
Type,
|
||||
Union,
|
||||
_GenericAlias,
|
||||
GenericAlias,
|
||||
)
|
||||
|
||||
import numpy as np
|
||||
@@ -66,7 +75,7 @@ def vector(dim: int, value_type: pa.DataType = pa.float32()):
|
||||
|
||||
|
||||
def Vector(
|
||||
dim: int, value_type: pa.DataType = pa.float32(), nullable: bool = True
|
||||
dim: int, value_type: pa.DataType = pa.float32()
|
||||
) -> Type[FixedSizeListMixin]:
|
||||
"""Pydantic Vector Type.
|
||||
|
||||
@@ -79,8 +88,6 @@ def Vector(
|
||||
The dimension of the vector.
|
||||
value_type : pyarrow.DataType, optional
|
||||
The value type of the vector, by default pa.float32()
|
||||
nullable : bool, optional
|
||||
Whether the vector is nullable, by default it is True.
|
||||
|
||||
Examples
|
||||
--------
|
||||
@@ -96,7 +103,7 @@ def Vector(
|
||||
>>> assert schema == pa.schema([
|
||||
... pa.field("id", pa.int64(), False),
|
||||
... pa.field("url", pa.utf8(), False),
|
||||
... pa.field("embeddings", pa.list_(pa.float32(), 768))
|
||||
... pa.field("embeddings", pa.list_(pa.float32(), 768), False)
|
||||
... ])
|
||||
"""
|
||||
|
||||
@@ -105,10 +112,6 @@ def Vector(
|
||||
def __repr__(self):
|
||||
return f"FixedSizeList(dim={dim})"
|
||||
|
||||
@staticmethod
|
||||
def nullable() -> bool:
|
||||
return nullable
|
||||
|
||||
@staticmethod
|
||||
def dim() -> int:
|
||||
return dim
|
||||
@@ -202,7 +205,9 @@ else:
|
||||
def _pydantic_to_arrow_type(field: FieldInfo) -> pa.DataType:
|
||||
"""Convert a Pydantic FieldInfo to Arrow DataType"""
|
||||
|
||||
if isinstance(field.annotation, (_GenericAlias, GenericAlias)):
|
||||
if isinstance(field.annotation, _GenericAlias) or (
|
||||
sys.version_info > (3, 9) and isinstance(field.annotation, types.GenericAlias)
|
||||
):
|
||||
origin = field.annotation.__origin__
|
||||
args = field.annotation.__args__
|
||||
if origin is list:
|
||||
@@ -230,7 +235,7 @@ def _pydantic_to_arrow_type(field: FieldInfo) -> pa.DataType:
|
||||
|
||||
def is_nullable(field: FieldInfo) -> bool:
|
||||
"""Check if a Pydantic FieldInfo is nullable."""
|
||||
if isinstance(field.annotation, (_GenericAlias, GenericAlias)):
|
||||
if isinstance(field.annotation, _GenericAlias):
|
||||
origin = field.annotation.__origin__
|
||||
args = field.annotation.__args__
|
||||
if origin == Union:
|
||||
@@ -241,10 +246,6 @@ def is_nullable(field: FieldInfo) -> bool:
|
||||
for typ in args:
|
||||
if typ is type(None):
|
||||
return True
|
||||
elif inspect.isclass(field.annotation) and issubclass(
|
||||
field.annotation, FixedSizeListMixin
|
||||
):
|
||||
return field.annotation.nullable()
|
||||
return False
|
||||
|
||||
|
||||
|
||||
@@ -131,8 +131,6 @@ class Query(pydantic.BaseModel):
|
||||
|
||||
fast_search: bool = False
|
||||
|
||||
ef: Optional[int] = None
|
||||
|
||||
|
||||
class LanceQueryBuilder(ABC):
|
||||
"""An abstract query builder. Subclasses are defined for vector search,
|
||||
@@ -259,7 +257,6 @@ class LanceQueryBuilder(ABC):
|
||||
self._with_row_id = False
|
||||
self._vector = None
|
||||
self._text = None
|
||||
self._ef = None
|
||||
|
||||
@deprecation.deprecated(
|
||||
deprecated_in="0.3.1",
|
||||
@@ -370,13 +367,11 @@ class LanceQueryBuilder(ABC):
|
||||
----------
|
||||
limit: int
|
||||
The maximum number of results to return.
|
||||
The default query limit is 10 results.
|
||||
For ANN/KNN queries, you must specify a limit.
|
||||
Entering 0, a negative number, or None will reset
|
||||
the limit to the default value of 10.
|
||||
*WARNING* if you have a large dataset, setting
|
||||
the limit to a large number, e.g. the table size,
|
||||
can potentially result in reading a
|
||||
By default the query is limited to the first 10.
|
||||
Call this method and pass 0, a negative value,
|
||||
or None to remove the limit.
|
||||
*WARNING* if you have a large dataset, removing
|
||||
the limit can potentially result in reading a
|
||||
large amount of data into memory and cause
|
||||
out of memory issues.
|
||||
|
||||
@@ -643,28 +638,6 @@ class LanceVectorQueryBuilder(LanceQueryBuilder):
|
||||
self._nprobes = nprobes
|
||||
return self
|
||||
|
||||
def ef(self, ef: int) -> LanceVectorQueryBuilder:
|
||||
"""Set the number of candidates to consider during search.
|
||||
|
||||
Higher values will yield better recall (more likely to find vectors if
|
||||
they exist) at the expense of latency.
|
||||
|
||||
This only applies to the HNSW-related index.
|
||||
The default value is 1.5 * limit.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
ef: int
|
||||
The number of candidates to consider during search.
|
||||
|
||||
Returns
|
||||
-------
|
||||
LanceVectorQueryBuilder
|
||||
The LanceQueryBuilder object.
|
||||
"""
|
||||
self._ef = ef
|
||||
return self
|
||||
|
||||
def refine_factor(self, refine_factor: int) -> LanceVectorQueryBuilder:
|
||||
"""Set the refine factor to use, increasing the number of vectors sampled.
|
||||
|
||||
@@ -727,7 +700,6 @@ class LanceVectorQueryBuilder(LanceQueryBuilder):
|
||||
with_row_id=self._with_row_id,
|
||||
offset=self._offset,
|
||||
fast_search=self._fast_search,
|
||||
ef=self._ef,
|
||||
)
|
||||
result_set = self._table._execute_query(query, batch_size)
|
||||
if self._reranker is not None:
|
||||
@@ -971,16 +943,12 @@ class LanceFtsQueryBuilder(LanceQueryBuilder):
|
||||
|
||||
class LanceEmptyQueryBuilder(LanceQueryBuilder):
|
||||
def to_arrow(self) -> pa.Table:
|
||||
query = Query(
|
||||
ds = self._table.to_lance()
|
||||
return ds.to_table(
|
||||
columns=self._columns,
|
||||
filter=self._where,
|
||||
k=self._limit or 10,
|
||||
with_row_id=self._with_row_id,
|
||||
vector=[],
|
||||
# not actually respected in remote query
|
||||
offset=self._offset or 0,
|
||||
limit=self._limit,
|
||||
)
|
||||
return self._table._execute_query(query).read_all()
|
||||
|
||||
def rerank(self, reranker: Reranker) -> LanceEmptyQueryBuilder:
|
||||
"""Rerank the results using the specified reranker.
|
||||
@@ -1099,8 +1067,6 @@ class LanceHybridQueryBuilder(LanceQueryBuilder):
|
||||
self._vector_query.nprobes(self._nprobes)
|
||||
if self._refine_factor:
|
||||
self._vector_query.refine_factor(self._refine_factor)
|
||||
if self._ef:
|
||||
self._vector_query.ef(self._ef)
|
||||
|
||||
with ThreadPoolExecutor() as executor:
|
||||
fts_future = executor.submit(self._fts_query.with_row_id(True).to_arrow)
|
||||
@@ -1227,29 +1193,6 @@ class LanceHybridQueryBuilder(LanceQueryBuilder):
|
||||
self._nprobes = nprobes
|
||||
return self
|
||||
|
||||
def ef(self, ef: int) -> LanceHybridQueryBuilder:
|
||||
"""
|
||||
Set the number of candidates to consider during search.
|
||||
|
||||
Higher values will yield better recall (more likely to find vectors if
|
||||
they exist) at the expense of latency.
|
||||
|
||||
This only applies to the HNSW-related index.
|
||||
The default value is 1.5 * limit.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
ef: int
|
||||
The number of candidates to consider during search.
|
||||
|
||||
Returns
|
||||
-------
|
||||
LanceHybridQueryBuilder
|
||||
The LanceHybridQueryBuilder object.
|
||||
"""
|
||||
self._ef = ef
|
||||
return self
|
||||
|
||||
def metric(self, metric: Literal["L2", "cosine", "dot"]) -> LanceHybridQueryBuilder:
|
||||
"""Set the distance metric to use.
|
||||
|
||||
@@ -1548,8 +1491,7 @@ class AsyncQuery(AsyncQueryBase):
|
||||
return pa.array(vec)
|
||||
|
||||
def nearest_to(
|
||||
self,
|
||||
query_vector: Union[VEC, Tuple, List[VEC]],
|
||||
self, query_vector: Optional[Union[VEC, Tuple]] = None
|
||||
) -> AsyncVectorQuery:
|
||||
"""
|
||||
Find the nearest vectors to the given query vector.
|
||||
@@ -1587,33 +1529,10 @@ class AsyncQuery(AsyncQueryBase):
|
||||
|
||||
Vector searches always have a [limit][]. If `limit` has not been called then
|
||||
a default `limit` of 10 will be used.
|
||||
|
||||
Typically, a single vector is passed in as the query. However, you can also
|
||||
pass in multiple vectors. This can be useful if you want to find the nearest
|
||||
vectors to multiple query vectors. This is not expected to be faster than
|
||||
making multiple queries concurrently; it is just a convenience method.
|
||||
If multiple vectors are passed in then an additional column `query_index`
|
||||
will be added to the results. This column will contain the index of the
|
||||
query vector that the result is nearest to.
|
||||
"""
|
||||
if query_vector is None:
|
||||
raise ValueError("query_vector can not be None")
|
||||
|
||||
if (
|
||||
isinstance(query_vector, list)
|
||||
and len(query_vector) > 0
|
||||
and not isinstance(query_vector[0], (float, int))
|
||||
):
|
||||
# multiple have been passed
|
||||
query_vectors = [AsyncQuery._query_vec_to_array(v) for v in query_vector]
|
||||
new_self = self._inner.nearest_to(query_vectors[0])
|
||||
for v in query_vectors[1:]:
|
||||
new_self.add_query_vector(v)
|
||||
return AsyncVectorQuery(new_self)
|
||||
else:
|
||||
return AsyncVectorQuery(
|
||||
self._inner.nearest_to(AsyncQuery._query_vec_to_array(query_vector))
|
||||
)
|
||||
return AsyncVectorQuery(
|
||||
self._inner.nearest_to(AsyncQuery._query_vec_to_array(query_vector))
|
||||
)
|
||||
|
||||
def nearest_to_text(
|
||||
self, query: str, columns: Union[str, List[str]] = []
|
||||
@@ -1675,7 +1594,7 @@ class AsyncVectorQuery(AsyncQueryBase):
|
||||
"""
|
||||
Set the number of partitions to search (probe)
|
||||
|
||||
This argument is only used when the vector column has an IVF-based index.
|
||||
This argument is only used when the vector column has an IVF PQ index.
|
||||
If there is no index then this value is ignored.
|
||||
|
||||
The IVF stage of IVF PQ divides the input into partitions (clusters) of
|
||||
@@ -1697,21 +1616,6 @@ class AsyncVectorQuery(AsyncQueryBase):
|
||||
self._inner.nprobes(nprobes)
|
||||
return self
|
||||
|
||||
def ef(self, ef: int) -> AsyncVectorQuery:
|
||||
"""
|
||||
Set the number of candidates to consider during search
|
||||
|
||||
This argument is only used when the vector column has an HNSW index.
|
||||
If there is no index then this value is ignored.
|
||||
|
||||
Increasing this value will increase the recall of your query but will also
|
||||
increase the latency of your query. The default value is 1.5 * limit. This
|
||||
default is good for many cases but the best value to use will depend on your
|
||||
data and the recall that you need to achieve.
|
||||
"""
|
||||
self._inner.ef(ef)
|
||||
return self
|
||||
|
||||
def refine_factor(self, refine_factor: int) -> AsyncVectorQuery:
|
||||
"""
|
||||
A multiplier to control how many additional rows are taken during the refine
|
||||
|
||||
@@ -78,10 +78,6 @@ class RemoteTable(Table):
|
||||
self.schema.metadata
|
||||
)
|
||||
|
||||
def list_versions(self):
|
||||
"""List all versions of the table"""
|
||||
return self._loop.run_until_complete(self._table.list_versions())
|
||||
|
||||
def to_arrow(self) -> pa.Table:
|
||||
"""to_arrow() is not yet supported on LanceDB cloud."""
|
||||
raise NotImplementedError("to_arrow() is not yet supported on LanceDB cloud.")
|
||||
@@ -90,12 +86,6 @@ class RemoteTable(Table):
|
||||
"""to_pandas() is not yet supported on LanceDB cloud."""
|
||||
return NotImplementedError("to_pandas() is not yet supported on LanceDB cloud.")
|
||||
|
||||
def checkout(self, version):
|
||||
return self._loop.run_until_complete(self._table.checkout(version))
|
||||
|
||||
def checkout_latest(self):
|
||||
return self._loop.run_until_complete(self._table.checkout_latest())
|
||||
|
||||
def list_indices(self):
|
||||
"""List all the indices on the table"""
|
||||
return self._loop.run_until_complete(self._table.list_indices())
|
||||
@@ -337,6 +327,10 @@ class RemoteTable(Table):
|
||||
- and also the "_distance" column which is the distance between the query
|
||||
vector and the returned vector.
|
||||
"""
|
||||
# empty query builder is not supported in saas, raise error
|
||||
if query is None and query_type != "hybrid":
|
||||
raise ValueError("Empty query is not supported")
|
||||
|
||||
return LanceQueryBuilder.create(
|
||||
self,
|
||||
query,
|
||||
|
||||
@@ -41,7 +41,7 @@ class CohereReranker(Reranker):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model_name: str = "rerank-english-v3.0",
|
||||
model_name: str = "rerank-english-v2.0",
|
||||
column: str = "text",
|
||||
top_n: Union[int, None] = None,
|
||||
return_score="relevance",
|
||||
|
||||
@@ -8,7 +8,7 @@ import inspect
|
||||
import time
|
||||
from abc import ABC, abstractmethod
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime, timedelta
|
||||
from datetime import timedelta
|
||||
from functools import cached_property
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
@@ -1012,39 +1012,6 @@ class Table(ABC):
|
||||
The names of the columns to drop.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def checkout(self):
|
||||
"""
|
||||
Checks out a specific version of the Table
|
||||
|
||||
Any read operation on the table will now access the data at the checked out
|
||||
version. As a consequence, calling this method will disable any read consistency
|
||||
interval that was previously set.
|
||||
|
||||
This is a read-only operation that turns the table into a sort of "view"
|
||||
or "detached head". Other table instances will not be affected. To make the
|
||||
change permanent you can use the `[Self::restore]` method.
|
||||
|
||||
Any operation that modifies the table will fail while the table is in a checked
|
||||
out state.
|
||||
|
||||
To return the table to a normal state use `[Self::checkout_latest]`
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def checkout_latest(self):
|
||||
"""
|
||||
Ensures the table is pointing at the latest version
|
||||
|
||||
This can be used to manually update a table when the read_consistency_interval
|
||||
is None
|
||||
It can also be used to undo a `[Self::checkout]` operation
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def list_versions(self):
|
||||
"""List all versions of the table"""
|
||||
|
||||
@cached_property
|
||||
def _dataset_uri(self) -> str:
|
||||
return _table_uri(self._conn.uri, self.name)
|
||||
@@ -1600,7 +1567,7 @@ class LanceTable(Table):
|
||||
"append" and "overwrite".
|
||||
on_bad_vectors: str, default "error"
|
||||
What to do if any of the vectors are not the same size or contains NaNs.
|
||||
One of "error", "drop", "fill", "null".
|
||||
One of "error", "drop", "fill".
|
||||
fill_value: float, default 0.
|
||||
The value to use when filling vectors. Only used if on_bad_vectors="fill".
|
||||
|
||||
@@ -1884,7 +1851,7 @@ class LanceTable(Table):
|
||||
data but will validate against any schema that's specified.
|
||||
on_bad_vectors: str, default "error"
|
||||
What to do if any of the vectors are not the same size or contains NaNs.
|
||||
One of "error", "drop", "fill", "null".
|
||||
One of "error", "drop", "fill".
|
||||
fill_value: float, default 0.
|
||||
The value to use when filling vectors. Only used if on_bad_vectors="fill".
|
||||
embedding_functions: list of EmbeddingFunctionModel, default None
|
||||
@@ -1992,7 +1959,6 @@ class LanceTable(Table):
|
||||
"metric": query.metric,
|
||||
"nprobes": query.nprobes,
|
||||
"refine_factor": query.refine_factor,
|
||||
"ef": query.ef,
|
||||
}
|
||||
return ds.scanner(
|
||||
columns=query.columns,
|
||||
@@ -2185,11 +2151,13 @@ def _sanitize_schema(
|
||||
vector column to fixed_size_list(float32) if necessary.
|
||||
on_bad_vectors: str, default "error"
|
||||
What to do if any of the vectors are not the same size or contains NaNs.
|
||||
One of "error", "drop", "fill", "null".
|
||||
One of "error", "drop", "fill".
|
||||
fill_value: float, default 0.
|
||||
The value to use when filling vectors. Only used if on_bad_vectors="fill".
|
||||
"""
|
||||
if schema is not None:
|
||||
if data.schema == schema:
|
||||
return data
|
||||
# cast the columns to the expected types
|
||||
data = data.combine_chunks()
|
||||
for field in schema:
|
||||
@@ -2209,7 +2177,6 @@ def _sanitize_schema(
|
||||
vector_column_name=field.name,
|
||||
on_bad_vectors=on_bad_vectors,
|
||||
fill_value=fill_value,
|
||||
table_schema=schema,
|
||||
)
|
||||
return pa.Table.from_arrays(
|
||||
[data[name] for name in schema.names], schema=schema
|
||||
@@ -2230,7 +2197,6 @@ def _sanitize_schema(
|
||||
def _sanitize_vector_column(
|
||||
data: pa.Table,
|
||||
vector_column_name: str,
|
||||
table_schema: Optional[pa.Schema] = None,
|
||||
on_bad_vectors: str = "error",
|
||||
fill_value: float = 0.0,
|
||||
) -> pa.Table:
|
||||
@@ -2245,16 +2211,12 @@ def _sanitize_vector_column(
|
||||
The name of the vector column.
|
||||
on_bad_vectors: str, default "error"
|
||||
What to do if any of the vectors are not the same size or contains NaNs.
|
||||
One of "error", "drop", "fill", "null".
|
||||
One of "error", "drop", "fill".
|
||||
fill_value: float, default 0.0
|
||||
The value to use when filling vectors. Only used if on_bad_vectors="fill".
|
||||
"""
|
||||
# ChunkedArray is annoying to work with, so we combine chunks here
|
||||
vec_arr = data[vector_column_name].combine_chunks()
|
||||
if table_schema is not None:
|
||||
field = table_schema.field(vector_column_name)
|
||||
else:
|
||||
field = None
|
||||
typ = data[vector_column_name].type
|
||||
if pa.types.is_list(typ) or pa.types.is_large_list(typ):
|
||||
# if it's a variable size list array,
|
||||
@@ -2281,11 +2243,7 @@ def _sanitize_vector_column(
|
||||
data, fill_value, on_bad_vectors, vec_arr, vector_column_name
|
||||
)
|
||||
else:
|
||||
if (
|
||||
field is not None
|
||||
and not field.nullable
|
||||
and pc.any(pc.is_null(vec_arr.values)).as_py()
|
||||
) or (pc.any(pc.is_nan(vec_arr.values)).as_py()):
|
||||
if pc.any(pc.is_null(vec_arr.values, nan_is_null=True)).as_py():
|
||||
data = _sanitize_nans(
|
||||
data, fill_value, on_bad_vectors, vec_arr, vector_column_name
|
||||
)
|
||||
@@ -2329,12 +2287,6 @@ def _sanitize_jagged(data, fill_value, on_bad_vectors, vec_arr, vector_column_na
|
||||
)
|
||||
elif on_bad_vectors == "drop":
|
||||
data = data.filter(correct_ndims)
|
||||
elif on_bad_vectors == "null":
|
||||
data = data.set_column(
|
||||
data.column_names.index(vector_column_name),
|
||||
vector_column_name,
|
||||
pc.if_else(correct_ndims, vec_arr, pa.scalar(None)),
|
||||
)
|
||||
return data
|
||||
|
||||
|
||||
@@ -2351,8 +2303,7 @@ def _sanitize_nans(
|
||||
raise ValueError(
|
||||
f"Vector column {vector_column_name} has NaNs. "
|
||||
"Set on_bad_vectors='drop' to remove them, or "
|
||||
"set on_bad_vectors='fill' and fill_value=<value> to replace them. "
|
||||
"Or set on_bad_vectors='null' to replace them with null."
|
||||
"set on_bad_vectors='fill' and fill_value=<value> to replace them."
|
||||
)
|
||||
elif on_bad_vectors == "fill":
|
||||
if fill_value is None:
|
||||
@@ -2372,17 +2323,6 @@ def _sanitize_nans(
|
||||
np_arr = np_arr.reshape(-1, vec_arr.type.list_size)
|
||||
not_nulls = np.any(np_arr, axis=1)
|
||||
data = data.filter(~not_nulls)
|
||||
elif on_bad_vectors == "null":
|
||||
# null = pa.nulls(len(vec_arr)).cast(vec_arr.type)
|
||||
# values = pc.if_else(pc.is_nan(vec_arr.values), fill_value, vec_arr.values)
|
||||
np_arr = np.isnan(vec_arr.values.to_numpy(zero_copy_only=False))
|
||||
np_arr = np_arr.reshape(-1, vec_arr.type.list_size)
|
||||
no_nans = np.any(np_arr, axis=1)
|
||||
data = data.set_column(
|
||||
data.column_names.index(vector_column_name),
|
||||
vector_column_name,
|
||||
pc.if_else(no_nans, vec_arr, pa.scalar(None)),
|
||||
)
|
||||
return data
|
||||
|
||||
|
||||
@@ -2648,7 +2588,7 @@ class AsyncTable:
|
||||
"append" and "overwrite".
|
||||
on_bad_vectors: str, default "error"
|
||||
What to do if any of the vectors are not the same size or contains NaNs.
|
||||
One of "error", "drop", "fill", "null".
|
||||
One of "error", "drop", "fill".
|
||||
fill_value: float, default 0.
|
||||
The value to use when filling vectors. Only used if on_bad_vectors="fill".
|
||||
|
||||
@@ -2731,7 +2671,7 @@ class AsyncTable:
|
||||
|
||||
def vector_search(
|
||||
self,
|
||||
query_vector: Union[VEC, Tuple],
|
||||
query_vector: Optional[Union[VEC, Tuple]] = None,
|
||||
) -> AsyncVectorQuery:
|
||||
"""
|
||||
Search the table with a given query vector.
|
||||
@@ -2770,8 +2710,6 @@ class AsyncTable:
|
||||
async_query = async_query.refine_factor(query.refine_factor)
|
||||
if query.vector_column:
|
||||
async_query = async_query.column(query.vector_column)
|
||||
if query.ef:
|
||||
async_query = async_query.ef(query.ef)
|
||||
|
||||
if not query.prefilter:
|
||||
async_query = async_query.postfilter()
|
||||
@@ -2935,19 +2873,6 @@ class AsyncTable:
|
||||
"""
|
||||
return await self._inner.version()
|
||||
|
||||
async def list_versions(self):
|
||||
"""
|
||||
List all versions of the table
|
||||
"""
|
||||
versions = await self._inner.list_versions()
|
||||
for v in versions:
|
||||
ts_nanos = v["timestamp"]
|
||||
v["timestamp"] = datetime.fromtimestamp(ts_nanos // 1e9) + timedelta(
|
||||
microseconds=(ts_nanos % 1e9) // 1e3
|
||||
)
|
||||
|
||||
return versions
|
||||
|
||||
async def checkout(self, version):
|
||||
"""
|
||||
Checks out a specific version of the Table
|
||||
|
||||
@@ -11,8 +11,6 @@ from datetime import date, datetime
|
||||
from functools import singledispatch
|
||||
from typing import Tuple, Union, Optional, Any
|
||||
from urllib.parse import urlparse
|
||||
from threading import Lock
|
||||
from contextlib import contextmanager
|
||||
|
||||
import numpy as np
|
||||
import pyarrow as pa
|
||||
@@ -316,27 +314,3 @@ def deprecated(func):
|
||||
def validate_table_name(name: str):
|
||||
"""Verify the table name is valid."""
|
||||
native_validate_table_name(name)
|
||||
|
||||
|
||||
class ConnectionPool:
|
||||
def __init__(self, connection_factory, *, max_size: Optional[int] = None):
|
||||
self.max_size = max_size
|
||||
self._connection_factory = connection_factory
|
||||
self._pool = []
|
||||
self._lock = Lock()
|
||||
|
||||
@contextmanager
|
||||
def connection(self):
|
||||
with self._lock:
|
||||
if self._pool:
|
||||
conn = self._pool.pop()
|
||||
else:
|
||||
conn = self._connection_factory()
|
||||
|
||||
# release the lock before yielding
|
||||
try:
|
||||
yield conn
|
||||
finally:
|
||||
with self._lock:
|
||||
if self.max_size is None or len(self._pool) < self.max_size:
|
||||
self._pool.append(conn)
|
||||
|
||||
293
python/python/tests/docs/test_cloud.py
Normal file
293
python/python/tests/docs/test_cloud.py
Normal file
@@ -0,0 +1,293 @@
|
||||
# --8<-- [start:imports]
|
||||
# --8<-- [start:import-lancedb]
|
||||
# --8<-- [start:import-ingest-data]
|
||||
import lancedb
|
||||
import pyarrow as pa
|
||||
# --8<-- [end:import-ingest-data]
|
||||
import numpy as np
|
||||
|
||||
# --8<-- [end:import-lancedb]
|
||||
# --8<-- [end:imports]
|
||||
# --8<-- [start:gen_data]
|
||||
def gen_data(total_rows: int, ndims: int = 1536):
|
||||
return pa.RecordBatch.from_pylist(
|
||||
[
|
||||
{
|
||||
"vector": np.random.rand(ndims).astype(np.float32).tolist(),
|
||||
"id": i,
|
||||
"name": "name_" + str(i),
|
||||
}
|
||||
for i in range(total_rows)
|
||||
],
|
||||
).to_pandas()
|
||||
|
||||
|
||||
# --8<-- [end:gen_data]
|
||||
|
||||
|
||||
def test_cloud_quickstart():
|
||||
# --8<-- [start:connect]
|
||||
db = lancedb.connect(
|
||||
uri="db://your-project-slug", api_key="your-api-key", region="your-cloud-region"
|
||||
)
|
||||
# --8<-- [end:connect]
|
||||
# --8<-- [start:create_table]
|
||||
table_name = "myTable"
|
||||
table = db.create_table(table_name, data=gen_data(5000))
|
||||
# --8<-- [end:create_table]
|
||||
# --8<-- [start:create_index_search]
|
||||
# create a vector index
|
||||
table.create_index("cosine", vector_column_name="vector")
|
||||
result = table.search([0.01, 0.02]).select(["vector", "item"]).limit(1).to_pandas()
|
||||
print(result)
|
||||
# --8<-- [end:create_index_search]
|
||||
# --8<-- [start:drop_table]
|
||||
db.drop_table(table_name)
|
||||
# --8<-- [end:drop_table]
|
||||
|
||||
|
||||
def test_ingest_data():
|
||||
# --8<-- [start:ingest_data]
|
||||
# connect to LanceDB
|
||||
db = lancedb.connect(
|
||||
uri="db://your-project-slug", api_key="your-api-key", region="us-east-1"
|
||||
)
|
||||
|
||||
# create an empty table with schema
|
||||
table_name = "myTable"
|
||||
data = [
|
||||
{"vector": [3.1, 4.1], "item": "foo", "price": 10.0},
|
||||
{"vector": [5.9, 26.5], "item": "bar", "price": 20.0},
|
||||
{"vector": [10.2, 100.8], "item": "baz", "price": 30.0},
|
||||
{"vector": [1.4, 9.5], "item": "fred", "price": 40.0},
|
||||
]
|
||||
schema = pa.schema(
|
||||
[
|
||||
pa.field("vector", pa.list_(pa.float32(), 2)),
|
||||
pa.field("item", pa.utf8()),
|
||||
pa.field("price", pa.float32()),
|
||||
]
|
||||
)
|
||||
table = db.create_table(table_name, schema=schema)
|
||||
table.add(data)
|
||||
# --8<-- [end:ingest_data]
|
||||
# --8<-- [start:ingest_data_in_batch]
|
||||
def make_batches():
|
||||
for i in range(5):
|
||||
yield pa.RecordBatch.from_arrays(
|
||||
[
|
||||
pa.array([[3.1, 4.1], [5.9, 26.5]], pa.list_(pa.float32(), 2)),
|
||||
pa.array(["foo", "bar"]),
|
||||
pa.array([10.0, 20.0]),
|
||||
],
|
||||
["vector", "item", "price"],
|
||||
)
|
||||
|
||||
schema = pa.schema(
|
||||
[
|
||||
pa.field("vector", pa.list_(pa.float32(), 2)),
|
||||
pa.field("item", pa.utf8()),
|
||||
pa.field("price", pa.float32()),
|
||||
]
|
||||
)
|
||||
db.create_table("table2", make_batches(), schema=schema)
|
||||
# --8<-- [end:ingest_data_in_batch]
|
||||
|
||||
|
||||
def test_updates():
|
||||
# --8<-- [start:update_data]
|
||||
import lancedb
|
||||
|
||||
# connect to LanceDB
|
||||
db = lancedb.connect(
|
||||
uri="db://your-project-slug", api_key="your-api-key", region="us-east-1"
|
||||
)
|
||||
table_name = "myTable"
|
||||
table = db.open_table(table_name)
|
||||
table.update(where="price < 20.0", values={"vector": [2, 2], "item": "foo-updated"})
|
||||
# --8<-- [end:update_data]
|
||||
# --8<-- [start:merge_insert]
|
||||
table = db.open_table(table_name)
|
||||
# upsert
|
||||
new_data = [{"vector": [1, 1], "item": "foo-updated", "price": 50.0}]
|
||||
table.merge_insert(
|
||||
"item"
|
||||
).when_matched_update_all().when_not_matched_insert_all().execute(new_data)
|
||||
# --8<-- [end:merge_insert]
|
||||
# --8<-- [start:delete_data]
|
||||
table_name = "myTable"
|
||||
table = db.open_table(table_name)
|
||||
# delete data
|
||||
predicate = "price = 30.0"
|
||||
table.delete(predicate)
|
||||
# --8<-- [end:delete_data]
|
||||
|
||||
|
||||
def test_create_index():
|
||||
# --8<-- [start:create_index]
|
||||
import lancedb
|
||||
|
||||
# connect to LanceDB
|
||||
db = lancedb.connect(
|
||||
uri="db://your-project-slug", api_key="your-api-key", region="us-east-1"
|
||||
)
|
||||
|
||||
table_name = "myTable"
|
||||
table = db.open_table(table_name)
|
||||
# the vector column only needs to be specified when there are
|
||||
# multiple vector columns or the column is not named as "vector"
|
||||
# L2 is used as the default distance metric
|
||||
table.create_index(metric="cosine", vector_column_name="vector")
|
||||
# --8<-- [end:create_index]
|
||||
|
||||
|
||||
def test_create_scalar_index():
|
||||
# --8<-- [start:create_scalar_index]
|
||||
import lancedb
|
||||
|
||||
# connect to LanceDB
|
||||
db = lancedb.connect(
|
||||
uri="db://your-project-slug", api_key="your-api-key", region="us-east-1"
|
||||
)
|
||||
|
||||
table_name = "myTable"
|
||||
table = db.open_table(table_name)
|
||||
# default is BTree
|
||||
table.create_scalar_index("item", index_type="BITMAP")
|
||||
# --8<-- [end:create_scalar_index]
|
||||
|
||||
|
||||
def test_create_fts_index():
|
||||
# --8<-- [start:create_fts_index]
|
||||
import lancedb
|
||||
|
||||
# connect to LanceDB
|
||||
db = lancedb.connect(
|
||||
uri="db://your-project-slug", api_key="your-api-key", region="us-east-1"
|
||||
)
|
||||
|
||||
table_name = "myTable"
|
||||
data = [
|
||||
{"vector": [3.1, 4.1], "text": "Frodo was a happy puppy"},
|
||||
{"vector": [5.9, 26.5], "text": "There are several kittens playing"},
|
||||
]
|
||||
table = db.create_table(table_name, data=data)
|
||||
table.create_fts_index("text")
|
||||
# --8<-- [end:create_fts_index]
|
||||
|
||||
|
||||
def test_search():
|
||||
# --8<-- [start:vector_search]
|
||||
import lancedb
|
||||
|
||||
# connect to LanceDB
|
||||
db = lancedb.connect(
|
||||
uri="db://your-project-slug", api_key="your-api-key", region="us-east-1"
|
||||
)
|
||||
|
||||
table_name = "myTable"
|
||||
table = db.open_table(table_name)
|
||||
query = [0.4, 1.4]
|
||||
result = (
|
||||
table.search(query)
|
||||
.where("price > 10.0", prefilter=True)
|
||||
.select(["item", "vector"])
|
||||
.limit(2)
|
||||
.to_pandas()
|
||||
)
|
||||
print(result)
|
||||
# --8<-- [end:vector_search]
|
||||
# --8<-- [start:full_text_search]
|
||||
import lancedb
|
||||
|
||||
# connect to LanceDB
|
||||
db = lancedb.connect(
|
||||
uri="db://your-project-slug", api_key="your-api-key", region="us-east-1"
|
||||
)
|
||||
table_name = "myTable"
|
||||
table = db.create_table(
|
||||
table_name,
|
||||
data=[
|
||||
{"vector": [3.1, 4.1], "text": "Frodo was a happy puppy"},
|
||||
{"vector": [5.9, 26.5], "text": "There are several kittens playing"},
|
||||
],
|
||||
)
|
||||
|
||||
table.create_fts_index("text")
|
||||
table.search("puppy", query_type="fts").limit(10).select(["text"]).to_list()
|
||||
# --8<-- [end:full_text_search]
|
||||
# --8<-- [start:hybrid_search]
|
||||
import os
|
||||
|
||||
import lancedb
|
||||
import openai
|
||||
from lancedb.embeddings import get_registry
|
||||
from lancedb.pydantic import LanceModel, Vector
|
||||
from lancedb.rerankers import RRFReranker
|
||||
|
||||
# connect to LanceDB
|
||||
db = lancedb.connect(
|
||||
uri="db://your-project-slug", api_key="your-api-key", region="us-east-1"
|
||||
)
|
||||
|
||||
# Configuring the environment variable OPENAI_API_KEY
|
||||
if "OPENAI_API_KEY" not in os.environ:
|
||||
# OR set the key here as a variable
|
||||
openai.api_key = "sk-..."
|
||||
embeddings = get_registry().get("openai").create()
|
||||
|
||||
class Documents(LanceModel):
|
||||
text: str = embeddings.SourceField()
|
||||
vector: Vector(embeddings.ndims()) = embeddings.VectorField()
|
||||
|
||||
table_name = "myTable"
|
||||
table = db.create_table(table_name, schema=Documents)
|
||||
data = [
|
||||
{"text": "rebel spaceships striking from a hidden base"},
|
||||
{"text": "have won their first victory against the evil Galactic Empire"},
|
||||
{"text": "during the battle rebel spies managed to steal secret plans"},
|
||||
{"text": "to the Empire's ultimate weapon the Death Star"},
|
||||
]
|
||||
table.add(data=data)
|
||||
table.create_index("L2", "vector")
|
||||
table.create_fts_index("text")
|
||||
|
||||
# you can use table.list_indices() to make sure indices have been created
|
||||
reranker = RRFReranker()
|
||||
result = (
|
||||
table.search(
|
||||
"flower moon",
|
||||
query_type="hybrid",
|
||||
vector_column_name="vector",
|
||||
fts_columns="text",
|
||||
)
|
||||
.rerank(reranker)
|
||||
.limit(10)
|
||||
.to_pandas()
|
||||
)
|
||||
print(result)
|
||||
# --8<-- [end:hybrid_search]
|
||||
|
||||
|
||||
def test_filtering():
|
||||
# --8<-- [start:filtering]
|
||||
import lancedb
|
||||
|
||||
# connect to LanceDB
|
||||
db = lancedb.connect(
|
||||
uri="db://your-project-slug", api_key="your-api-key", region="us-east-1"
|
||||
)
|
||||
table_name = "myTable"
|
||||
table = db.open_table(table_name)
|
||||
result = (
|
||||
table.search([100, 102])
|
||||
.where("(item IN ('foo', 'bar')) AND (price > 10.0)")
|
||||
.to_arrow()
|
||||
)
|
||||
print(result)
|
||||
# --8<-- [end:filtering]
|
||||
# --8<-- [start:sql_filtering]
|
||||
table.search([100, 102]).where(
|
||||
"(item IN ('foo', 'bar')) AND (price > 10.0)"
|
||||
).to_arrow()
|
||||
# --8<-- [end:sql_filtering]
|
||||
@@ -81,36 +81,28 @@ def test_embedding_function(tmp_path):
|
||||
|
||||
|
||||
def test_embedding_with_bad_results(tmp_path):
|
||||
@register("null-embedding")
|
||||
class NullEmbeddingFunction(TextEmbeddingFunction):
|
||||
@register("mock-embedding")
|
||||
class MockEmbeddingFunction(TextEmbeddingFunction):
|
||||
def ndims(self):
|
||||
return 128
|
||||
|
||||
def generate_embeddings(
|
||||
self, texts: Union[List[str], np.ndarray]
|
||||
) -> list[Union[np.array, None]]:
|
||||
# Return None, which is bad if field is non-nullable
|
||||
a = [
|
||||
np.full(self.ndims(), np.nan)
|
||||
if i % 2 == 0
|
||||
else np.random.randn(self.ndims())
|
||||
return [
|
||||
None if i % 2 == 0 else np.random.randn(self.ndims())
|
||||
for i in range(len(texts))
|
||||
]
|
||||
return a
|
||||
|
||||
db = lancedb.connect(tmp_path)
|
||||
registry = EmbeddingFunctionRegistry.get_instance()
|
||||
model = registry.get("null-embedding").create()
|
||||
model = registry.get("mock-embedding").create()
|
||||
|
||||
class Schema(LanceModel):
|
||||
text: str = model.SourceField()
|
||||
vector: Vector(model.ndims()) = model.VectorField()
|
||||
|
||||
table = db.create_table("test", schema=Schema, mode="overwrite")
|
||||
with pytest.raises(ValueError):
|
||||
# Default on_bad_vectors is "error"
|
||||
table.add([{"text": "hello world"}])
|
||||
|
||||
table.add(
|
||||
[{"text": "hello world"}, {"text": "bar"}],
|
||||
on_bad_vectors="drop",
|
||||
@@ -120,33 +112,13 @@ def test_embedding_with_bad_results(tmp_path):
|
||||
assert len(table) == 1
|
||||
assert df.iloc[0]["text"] == "bar"
|
||||
|
||||
@register("nan-embedding")
|
||||
class NanEmbeddingFunction(TextEmbeddingFunction):
|
||||
def ndims(self):
|
||||
return 128
|
||||
|
||||
def generate_embeddings(
|
||||
self, texts: Union[List[str], np.ndarray]
|
||||
) -> list[Union[np.array, None]]:
|
||||
# Return NaN to produce bad vectors
|
||||
return [
|
||||
[np.NAN] * 128 if i % 2 == 0 else np.random.randn(self.ndims())
|
||||
for i in range(len(texts))
|
||||
]
|
||||
|
||||
db = lancedb.connect(tmp_path)
|
||||
registry = EmbeddingFunctionRegistry.get_instance()
|
||||
model = registry.get("nan-embedding").create()
|
||||
|
||||
table = db.create_table("test2", schema=Schema, mode="overwrite")
|
||||
table.alter_columns(dict(path="vector", nullable=True))
|
||||
table.add(
|
||||
[{"text": "hello world"}, {"text": "bar"}],
|
||||
on_bad_vectors="null",
|
||||
)
|
||||
assert len(table) == 2
|
||||
tbl = table.to_arrow()
|
||||
assert tbl["vector"].null_count == 1
|
||||
# table = db.create_table("test2", schema=Schema, mode="overwrite")
|
||||
# table.add(
|
||||
# [{"text": "hello world"}, {"text": "bar"}],
|
||||
# )
|
||||
# assert len(table) == 2
|
||||
# tbl = table.to_arrow()
|
||||
# assert tbl["vector"].null_count == 1
|
||||
|
||||
|
||||
def test_with_existing_vectors(tmp_path):
|
||||
|
||||
@@ -1,6 +1,15 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
# Copyright (c) 2023. LanceDB Developers
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import importlib
|
||||
import io
|
||||
import os
|
||||
@@ -8,7 +17,6 @@ import os
|
||||
import lancedb
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
import pyarrow as pa
|
||||
import pytest
|
||||
from lancedb.embeddings import get_registry
|
||||
from lancedb.pydantic import LanceModel, Vector
|
||||
@@ -436,30 +444,6 @@ def test_watsonx_embedding(tmp_path):
|
||||
assert tbl.search("hello").limit(1).to_pandas()["text"][0] == "hello world"
|
||||
|
||||
|
||||
@pytest.mark.slow
|
||||
@pytest.mark.skipif(
|
||||
os.environ.get("OPENAI_API_KEY") is None, reason="OPENAI_API_KEY not set"
|
||||
)
|
||||
def test_openai_with_empty_strs(tmp_path):
|
||||
model = get_registry().get("openai").create(max_retries=0)
|
||||
|
||||
class TextModel(LanceModel):
|
||||
text: str = model.SourceField()
|
||||
vector: Vector(model.ndims()) = model.VectorField()
|
||||
|
||||
df = pd.DataFrame({"text": ["hello world", ""]})
|
||||
db = lancedb.connect(tmp_path)
|
||||
tbl = db.create_table("test", schema=TextModel, mode="overwrite")
|
||||
|
||||
tbl.add(df, on_bad_vectors="skip")
|
||||
tb = tbl.to_arrow()
|
||||
assert tb.schema.field_by_name("vector").type == pa.list_(
|
||||
pa.float32(), model.ndims()
|
||||
)
|
||||
assert len(tb) == 2
|
||||
assert tb["vector"].is_null().to_pylist() == [False, True]
|
||||
|
||||
|
||||
@pytest.mark.slow
|
||||
@pytest.mark.skipif(
|
||||
importlib.util.find_spec("ollama") is None, reason="Ollama not installed"
|
||||
|
||||
@@ -1,5 +1,16 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
# Copyright 2023 LanceDB Developers
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
import json
|
||||
import sys
|
||||
@@ -161,26 +172,6 @@ def test_pydantic_to_arrow_py38():
|
||||
assert schema == expect_schema
|
||||
|
||||
|
||||
def test_nullable_vector():
|
||||
class NullableModel(pydantic.BaseModel):
|
||||
vec: Vector(16, nullable=False)
|
||||
|
||||
schema = pydantic_to_schema(NullableModel)
|
||||
assert schema == pa.schema([pa.field("vec", pa.list_(pa.float32(), 16), False)])
|
||||
|
||||
class DefaultModel(pydantic.BaseModel):
|
||||
vec: Vector(16)
|
||||
|
||||
schema = pydantic_to_schema(DefaultModel)
|
||||
assert schema == pa.schema([pa.field("vec", pa.list_(pa.float32(), 16), True)])
|
||||
|
||||
class NotNullableModel(pydantic.BaseModel):
|
||||
vec: Vector(16)
|
||||
|
||||
schema = pydantic_to_schema(NotNullableModel)
|
||||
assert schema == pa.schema([pa.field("vec", pa.list_(pa.float32(), 16), True)])
|
||||
|
||||
|
||||
def test_fixed_size_list_field():
|
||||
class TestModel(pydantic.BaseModel):
|
||||
vec: Vector(16)
|
||||
@@ -201,7 +192,7 @@ def test_fixed_size_list_field():
|
||||
schema = pydantic_to_schema(TestModel)
|
||||
assert schema == pa.schema(
|
||||
[
|
||||
pa.field("vec", pa.list_(pa.float32(), 16)),
|
||||
pa.field("vec", pa.list_(pa.float32(), 16), False),
|
||||
pa.field("li", pa.list_(pa.int64()), False),
|
||||
]
|
||||
)
|
||||
|
||||
@@ -1,9 +1,21 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
# Copyright 2023 LanceDB Developers
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import unittest.mock as mock
|
||||
from datetime import timedelta
|
||||
from typing import Optional
|
||||
|
||||
import lance
|
||||
import lancedb
|
||||
from lancedb.index import IvfPq
|
||||
import numpy as np
|
||||
@@ -11,15 +23,41 @@ import pandas.testing as tm
|
||||
import pyarrow as pa
|
||||
import pytest
|
||||
import pytest_asyncio
|
||||
from lancedb.db import LanceDBConnection
|
||||
from lancedb.pydantic import LanceModel, Vector
|
||||
from lancedb.query import AsyncQueryBase, LanceVectorQueryBuilder, Query
|
||||
from lancedb.table import AsyncTable, LanceTable
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def table(tmpdir_factory) -> lancedb.table.Table:
|
||||
tmp_path = str(tmpdir_factory.mktemp("data"))
|
||||
db = lancedb.connect(tmp_path)
|
||||
class MockTable:
|
||||
def __init__(self, tmp_path):
|
||||
self.uri = tmp_path
|
||||
self._conn = LanceDBConnection(self.uri)
|
||||
|
||||
def to_lance(self):
|
||||
return lance.dataset(self.uri)
|
||||
|
||||
def _execute_query(self, query, batch_size: Optional[int] = None):
|
||||
ds = self.to_lance()
|
||||
return ds.scanner(
|
||||
columns=query.columns,
|
||||
filter=query.filter,
|
||||
prefilter=query.prefilter,
|
||||
nearest={
|
||||
"column": query.vector_column,
|
||||
"q": query.vector,
|
||||
"k": query.k,
|
||||
"metric": query.metric,
|
||||
"nprobes": query.nprobes,
|
||||
"refine_factor": query.refine_factor,
|
||||
},
|
||||
batch_size=batch_size,
|
||||
offset=query.offset,
|
||||
).to_reader()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def table(tmp_path) -> MockTable:
|
||||
df = pa.table(
|
||||
{
|
||||
"vector": pa.array(
|
||||
@@ -30,7 +68,8 @@ def table(tmpdir_factory) -> lancedb.table.Table:
|
||||
"float_field": pa.array([1.0, 2.0]),
|
||||
}
|
||||
)
|
||||
return db.create_table("test", df)
|
||||
lance.write_dataset(df, tmp_path)
|
||||
return MockTable(tmp_path)
|
||||
|
||||
|
||||
@pytest_asyncio.fixture
|
||||
@@ -87,12 +126,6 @@ def test_query_builder(table):
|
||||
assert all(np.array(rs[0]["vector"]) == [1, 2])
|
||||
|
||||
|
||||
def test_with_row_id(table: lancedb.table.Table):
|
||||
rs = table.search().with_row_id(True).to_arrow()
|
||||
assert "_rowid" in rs.column_names
|
||||
assert rs["_rowid"].to_pylist() == [0, 1]
|
||||
|
||||
|
||||
def test_vector_query_with_no_limit(table):
|
||||
with pytest.raises(ValueError):
|
||||
LanceVectorQueryBuilder(table, [0, 0], "vector").limit(0).select(
|
||||
@@ -332,12 +365,6 @@ async def test_query_to_pandas_async(table_async: AsyncTable):
|
||||
assert df.shape == (0, 4)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_none_query(table_async: AsyncTable):
|
||||
with pytest.raises(ValueError):
|
||||
await table_async.query().nearest_to(None).to_arrow()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_fast_search_async(tmp_path):
|
||||
db = await lancedb.connect_async(tmp_path)
|
||||
|
||||
@@ -6,16 +6,13 @@ from datetime import timedelta
|
||||
import http.server
|
||||
import json
|
||||
import threading
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from unittest.mock import MagicMock
|
||||
import uuid
|
||||
|
||||
import lancedb
|
||||
from lancedb.conftest import MockTextEmbeddingFunction
|
||||
from lancedb.remote import ClientConfig
|
||||
from lancedb.util import ConnectionPool
|
||||
from lancedb.remote.errors import HttpError, RetryError
|
||||
import lancedb.util
|
||||
import pytest
|
||||
import pyarrow as pa
|
||||
|
||||
@@ -58,34 +55,6 @@ def mock_lancedb_connection(handler):
|
||||
handle.join()
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def mock_lancedb_connection_pool(handler):
|
||||
with http.server.HTTPServer(
|
||||
("localhost", 8080), make_mock_http_handler(handler)
|
||||
) as server:
|
||||
handle = threading.Thread(target=server.serve_forever)
|
||||
handle.start()
|
||||
|
||||
def conn_factory():
|
||||
lancedb.connect(
|
||||
"db://dev",
|
||||
api_key="fake",
|
||||
host_override="http://localhost:8080",
|
||||
client_config={
|
||||
"retry_config": {"retries": 2},
|
||||
"timeout_config": {
|
||||
"connect_timeout": 1,
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
try:
|
||||
yield ConnectionPool(conn_factory)
|
||||
finally:
|
||||
server.shutdown()
|
||||
handle.join()
|
||||
|
||||
|
||||
@contextlib.asynccontextmanager
|
||||
async def mock_lancedb_connection_async(handler):
|
||||
with http.server.HTTPServer(
|
||||
@@ -134,47 +103,6 @@ async def test_async_remote_db():
|
||||
assert table_names == []
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_async_checkout():
|
||||
def handler(request):
|
||||
if request.path == "/v1/table/test/describe/":
|
||||
request.send_response(200)
|
||||
request.send_header("Content-Type", "application/json")
|
||||
request.end_headers()
|
||||
response = json.dumps({"version": 42, "schema": {"fields": []}})
|
||||
request.wfile.write(response.encode())
|
||||
return
|
||||
|
||||
content_len = int(request.headers.get("Content-Length"))
|
||||
body = request.rfile.read(content_len)
|
||||
body = json.loads(body)
|
||||
|
||||
print("body is", body)
|
||||
|
||||
count = 0
|
||||
if body["version"] == 1:
|
||||
count = 100
|
||||
elif body["version"] == 2:
|
||||
count = 200
|
||||
elif body["version"] is None:
|
||||
count = 300
|
||||
|
||||
request.send_response(200)
|
||||
request.send_header("Content-Type", "application/json")
|
||||
request.end_headers()
|
||||
request.wfile.write(json.dumps(count).encode())
|
||||
|
||||
async with mock_lancedb_connection_async(handler) as db:
|
||||
table = await db.open_table("test")
|
||||
assert await table.count_rows() == 300
|
||||
await table.checkout(1)
|
||||
assert await table.count_rows() == 100
|
||||
await table.checkout(2)
|
||||
assert await table.count_rows() == 200
|
||||
await table.checkout_latest()
|
||||
assert await table.count_rows() == 300
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_http_error():
|
||||
request_id_holder = {"request_id": None}
|
||||
@@ -218,7 +146,8 @@ async def test_retry_error():
|
||||
assert cause.status_code == 429
|
||||
|
||||
|
||||
def http_handler(query_handler):
|
||||
@contextlib.contextmanager
|
||||
def query_test_table(query_handler):
|
||||
def handler(request):
|
||||
if request.path == "/v1/table/test/describe/":
|
||||
request.send_response(200)
|
||||
@@ -242,12 +171,7 @@ def http_handler(query_handler):
|
||||
request.send_response(404)
|
||||
request.end_headers()
|
||||
|
||||
return handler
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def query_test_table(connection_ctx_mgr):
|
||||
with connection_ctx_mgr as db:
|
||||
with mock_lancedb_connection(handler) as db:
|
||||
assert repr(db) == "RemoteConnect(name=dev)"
|
||||
table = db.open_table("test")
|
||||
assert repr(table) == "RemoteTable(dev.test)"
|
||||
@@ -255,89 +179,23 @@ def query_test_table(connection_ctx_mgr):
|
||||
|
||||
|
||||
def test_query_sync_minimal():
|
||||
@http_handler
|
||||
def handler(body):
|
||||
assert body == {
|
||||
"distance_type": "l2",
|
||||
"k": 10,
|
||||
"prefilter": False,
|
||||
"refine_factor": None,
|
||||
"ef": None,
|
||||
"vector": [1.0, 2.0, 3.0],
|
||||
"nprobes": 20,
|
||||
"version": None,
|
||||
}
|
||||
|
||||
return pa.table({"id": [1, 2, 3]})
|
||||
|
||||
with query_test_table(mock_lancedb_connection(handler)) as table:
|
||||
with query_test_table(handler) as table:
|
||||
data = table.search([1, 2, 3]).to_list()
|
||||
expected = [{"id": 1}, {"id": 2}, {"id": 3}]
|
||||
assert data == expected
|
||||
|
||||
with query_test_table(mock_lancedb_connection_pool(handler).connection()) as table:
|
||||
data = table.search([1, 2, 3]).to_list()
|
||||
expected = [{"id": 1}, {"id": 2}, {"id": 3}]
|
||||
assert data == expected
|
||||
|
||||
|
||||
def test_query_sync_minimal_threaded():
|
||||
num_query = 0
|
||||
|
||||
@http_handler
|
||||
def handler(body):
|
||||
assert body == {
|
||||
"distance_type": "l2",
|
||||
"k": 10,
|
||||
"prefilter": False,
|
||||
"refine_factor": None,
|
||||
"ef": None,
|
||||
"vector": [1.0, 2.0, 3.0],
|
||||
"nprobes": 20,
|
||||
"version": None,
|
||||
}
|
||||
nonlocal num_query
|
||||
num_query += 1
|
||||
|
||||
return pa.table({"id": [1, 2, 3]})
|
||||
|
||||
pool = mock_lancedb_connection_pool(handler)
|
||||
|
||||
def _query(i):
|
||||
with query_test_table(pool.connection()) as table:
|
||||
data = table.search([1, 2, 3]).to_list()
|
||||
expected = [{"id": 1}, {"id": 2}, {"id": 3}]
|
||||
assert data == expected
|
||||
|
||||
with ThreadPoolExecutor as exec:
|
||||
exec.map(_query, range(1000))
|
||||
|
||||
assert num_query == 1000
|
||||
|
||||
|
||||
def test_query_sync_empty_query():
|
||||
@http_handler
|
||||
def handler(body):
|
||||
assert body == {
|
||||
"k": 10,
|
||||
"filter": "true",
|
||||
"vector": [],
|
||||
"columns": ["id"],
|
||||
"version": None,
|
||||
}
|
||||
|
||||
return pa.table({"id": [1, 2, 3]})
|
||||
|
||||
with query_test_table(mock_lancedb_connection(handler)) as table:
|
||||
data = table.search(None).where("true").select(["id"]).limit(10).to_list()
|
||||
expected = [{"id": 1}, {"id": 2}, {"id": 3}]
|
||||
assert data == expected
|
||||
|
||||
with query_test_table(mock_lancedb_connection_pool(handler).connection()) as table:
|
||||
data = table.search(None).where("true").select(["id"]).limit(10).to_list()
|
||||
expected = [{"id": 1}, {"id": 2}, {"id": 3}]
|
||||
assert data == expected
|
||||
|
||||
|
||||
def test_query_sync_maximal():
|
||||
def handler(body):
|
||||
@@ -348,13 +206,11 @@ def test_query_sync_maximal():
|
||||
"refine_factor": 10,
|
||||
"vector": [1.0, 2.0, 3.0],
|
||||
"nprobes": 5,
|
||||
"ef": None,
|
||||
"filter": "id > 0",
|
||||
"columns": ["id", "name"],
|
||||
"vector_column": "vector2",
|
||||
"fast_search": True,
|
||||
"with_row_id": True,
|
||||
"version": None,
|
||||
}
|
||||
|
||||
return pa.table({"id": [1, 2, 3], "name": ["a", "b", "c"]})
|
||||
@@ -373,17 +229,6 @@ def test_query_sync_maximal():
|
||||
)
|
||||
|
||||
|
||||
def test_query_sync_multiple_vectors():
|
||||
def handler(_body):
|
||||
return pa.table({"id": [1]})
|
||||
|
||||
with query_test_table(handler) as table:
|
||||
results = table.search([[1, 2, 3], [4, 5, 6]]).limit(1).to_list()
|
||||
assert len(results) == 2
|
||||
results.sort(key=lambda x: x["query_index"])
|
||||
assert results == [{"id": 1, "query_index": 0}, {"id": 1, "query_index": 1}]
|
||||
|
||||
|
||||
def test_query_sync_fts():
|
||||
def handler(body):
|
||||
assert body == {
|
||||
@@ -393,7 +238,6 @@ def test_query_sync_fts():
|
||||
},
|
||||
"k": 10,
|
||||
"vector": [],
|
||||
"version": None,
|
||||
}
|
||||
|
||||
return pa.table({"id": [1, 2, 3]})
|
||||
@@ -410,7 +254,6 @@ def test_query_sync_fts():
|
||||
"k": 42,
|
||||
"vector": [],
|
||||
"with_row_id": True,
|
||||
"version": None,
|
||||
}
|
||||
|
||||
return pa.table({"id": [1, 2, 3]})
|
||||
@@ -436,7 +279,6 @@ def test_query_sync_hybrid():
|
||||
"k": 42,
|
||||
"vector": [],
|
||||
"with_row_id": True,
|
||||
"version": None,
|
||||
}
|
||||
return pa.table({"_rowid": [1, 2, 3], "_score": [0.1, 0.2, 0.3]})
|
||||
else:
|
||||
@@ -448,9 +290,7 @@ def test_query_sync_hybrid():
|
||||
"refine_factor": None,
|
||||
"vector": [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
|
||||
"nprobes": 20,
|
||||
"ef": None,
|
||||
"with_row_id": True,
|
||||
"version": None,
|
||||
}
|
||||
return pa.table({"_rowid": [1, 2, 3], "_distance": [0.1, 0.2, 0.3]})
|
||||
|
||||
|
||||
@@ -240,121 +240,6 @@ def test_add(db):
|
||||
_add(table, schema)
|
||||
|
||||
|
||||
def test_add_subschema(tmp_path):
|
||||
db = lancedb.connect(tmp_path)
|
||||
schema = pa.schema(
|
||||
[
|
||||
pa.field("vector", pa.list_(pa.float32(), 2), nullable=True),
|
||||
pa.field("item", pa.string(), nullable=True),
|
||||
pa.field("price", pa.float64(), nullable=False),
|
||||
]
|
||||
)
|
||||
table = db.create_table("test", schema=schema)
|
||||
|
||||
data = {"price": 10.0, "item": "foo"}
|
||||
table.add([data])
|
||||
data = {"price": 2.0, "vector": [3.1, 4.1]}
|
||||
table.add([data])
|
||||
data = {"price": 3.0, "vector": [5.9, 26.5], "item": "bar"}
|
||||
table.add([data])
|
||||
|
||||
expected = pa.table(
|
||||
{
|
||||
"vector": [None, [3.1, 4.1], [5.9, 26.5]],
|
||||
"item": ["foo", None, "bar"],
|
||||
"price": [10.0, 2.0, 3.0],
|
||||
},
|
||||
schema=schema,
|
||||
)
|
||||
assert table.to_arrow() == expected
|
||||
|
||||
data = {"item": "foo"}
|
||||
# We can't omit a column if it's not nullable
|
||||
with pytest.raises(OSError, match="Invalid user input"):
|
||||
table.add([data])
|
||||
|
||||
# We can add it if we make the column nullable
|
||||
table.alter_columns(dict(path="price", nullable=True))
|
||||
table.add([data])
|
||||
|
||||
expected_schema = pa.schema(
|
||||
[
|
||||
pa.field("vector", pa.list_(pa.float32(), 2), nullable=True),
|
||||
pa.field("item", pa.string(), nullable=True),
|
||||
pa.field("price", pa.float64(), nullable=True),
|
||||
]
|
||||
)
|
||||
expected = pa.table(
|
||||
{
|
||||
"vector": [None, [3.1, 4.1], [5.9, 26.5], None],
|
||||
"item": ["foo", None, "bar", "foo"],
|
||||
"price": [10.0, 2.0, 3.0, None],
|
||||
},
|
||||
schema=expected_schema,
|
||||
)
|
||||
assert table.to_arrow() == expected
|
||||
|
||||
|
||||
def test_add_nullability(tmp_path):
|
||||
db = lancedb.connect(tmp_path)
|
||||
schema = pa.schema(
|
||||
[
|
||||
pa.field("vector", pa.list_(pa.float32(), 2), nullable=False),
|
||||
pa.field("id", pa.string(), nullable=False),
|
||||
]
|
||||
)
|
||||
table = db.create_table("test", schema=schema)
|
||||
|
||||
nullable_schema = pa.schema(
|
||||
[
|
||||
pa.field("vector", pa.list_(pa.float32(), 2), nullable=True),
|
||||
pa.field("id", pa.string(), nullable=True),
|
||||
]
|
||||
)
|
||||
data = pa.table(
|
||||
{
|
||||
"vector": [[3.1, 4.1], [5.9, 26.5]],
|
||||
"id": ["foo", "bar"],
|
||||
},
|
||||
schema=nullable_schema,
|
||||
)
|
||||
# We can add nullable schema if it doesn't actually contain nulls
|
||||
table.add(data)
|
||||
|
||||
expected = data.cast(schema)
|
||||
assert table.to_arrow() == expected
|
||||
|
||||
data = pa.table(
|
||||
{
|
||||
"vector": [None],
|
||||
"id": ["baz"],
|
||||
},
|
||||
schema=nullable_schema,
|
||||
)
|
||||
# We can't add nullable schema if it contains nulls
|
||||
with pytest.raises(Exception, match="Vector column vector has NaNs"):
|
||||
table.add(data)
|
||||
|
||||
# But we can make it nullable
|
||||
table.alter_columns(dict(path="vector", nullable=True))
|
||||
table.add(data)
|
||||
|
||||
expected_schema = pa.schema(
|
||||
[
|
||||
pa.field("vector", pa.list_(pa.float32(), 2), nullable=True),
|
||||
pa.field("id", pa.string(), nullable=False),
|
||||
]
|
||||
)
|
||||
expected = pa.table(
|
||||
{
|
||||
"vector": [[3.1, 4.1], [5.9, 26.5], None],
|
||||
"id": ["foo", "bar", "baz"],
|
||||
},
|
||||
schema=expected_schema,
|
||||
)
|
||||
assert table.to_arrow() == expected
|
||||
|
||||
|
||||
def test_add_pydantic_model(db):
|
||||
# https://github.com/lancedb/lancedb/issues/562
|
||||
|
||||
@@ -1007,15 +892,10 @@ def test_empty_query(db):
|
||||
table = LanceTable.create(db, "my_table2", data=[{"id": i} for i in range(100)])
|
||||
df = table.search().select(["id"]).to_pandas()
|
||||
assert len(df) == 10
|
||||
# None is the same as default
|
||||
df = table.search().select(["id"]).limit(None).to_pandas()
|
||||
assert len(df) == 10
|
||||
# invalid limist is the same as None, wihch is the same as default
|
||||
assert len(df) == 100
|
||||
df = table.search().select(["id"]).limit(-1).to_pandas()
|
||||
assert len(df) == 10
|
||||
# valid limit should work
|
||||
df = table.search().select(["id"]).limit(42).to_pandas()
|
||||
assert len(df) == 42
|
||||
assert len(df) == 100
|
||||
|
||||
|
||||
def test_search_with_schema_inf_single_vector(db):
|
||||
|
||||
@@ -142,13 +142,6 @@ impl VectorQuery {
|
||||
self.inner = self.inner.clone().only_if(predicate);
|
||||
}
|
||||
|
||||
pub fn add_query_vector(&mut self, vector: Bound<'_, PyAny>) -> PyResult<()> {
|
||||
let data: ArrayData = ArrayData::from_pyarrow_bound(&vector)?;
|
||||
let array = make_array(data);
|
||||
self.inner = self.inner.clone().add_query_vector(array).infer_error()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn select(&mut self, columns: Vec<(String, String)>) {
|
||||
self.inner = self.inner.clone().select(Select::dynamic(&columns));
|
||||
}
|
||||
@@ -195,10 +188,6 @@ impl VectorQuery {
|
||||
self.inner = self.inner.clone().nprobes(nprobe as usize);
|
||||
}
|
||||
|
||||
pub fn ef(&mut self, ef: u32) {
|
||||
self.inner = self.inner.clone().ef(ef as usize);
|
||||
}
|
||||
|
||||
pub fn bypass_vector_index(&mut self) {
|
||||
self.inner = self.inner.clone().bypass_vector_index()
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@ use lancedb::table::{
|
||||
use pyo3::{
|
||||
exceptions::{PyRuntimeError, PyValueError},
|
||||
pyclass, pymethods,
|
||||
types::{IntoPyDict, PyDict, PyDictMethods, PyString},
|
||||
types::{PyDict, PyDictMethods, PyString},
|
||||
Bound, FromPyObject, PyAny, PyRef, PyResult, Python, ToPyObject,
|
||||
};
|
||||
use pyo3_asyncio_0_21::tokio::future_into_py;
|
||||
@@ -246,33 +246,6 @@ impl Table {
|
||||
)
|
||||
}
|
||||
|
||||
pub fn list_versions(self_: PyRef<'_, Self>) -> PyResult<Bound<'_, PyAny>> {
|
||||
let inner = self_.inner_ref()?.clone();
|
||||
future_into_py(self_.py(), async move {
|
||||
let versions = inner.list_versions().await.infer_error()?;
|
||||
let versions_as_dict = Python::with_gil(|py| {
|
||||
versions
|
||||
.iter()
|
||||
.map(|v| {
|
||||
let dict = PyDict::new_bound(py);
|
||||
dict.set_item("version", v.version).unwrap();
|
||||
dict.set_item(
|
||||
"timestamp",
|
||||
v.timestamp.timestamp_nanos_opt().unwrap_or_default(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let tup: Vec<(&String, &String)> = v.metadata.iter().collect();
|
||||
dict.set_item("metadata", tup.into_py_dict(py)).unwrap();
|
||||
dict.to_object(py)
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
});
|
||||
|
||||
Ok(versions_as_dict)
|
||||
})
|
||||
}
|
||||
|
||||
pub fn checkout(self_: PyRef<'_, Self>, version: u64) -> PyResult<Bound<'_, PyAny>> {
|
||||
let inner = self_.inner_ref()?.clone();
|
||||
future_into_py(self_.py(), async move {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "lancedb-node"
|
||||
version = "0.13.1-beta.0"
|
||||
version = "0.13.0-beta.1"
|
||||
description = "Serverless, low-latency vector database for AI applications"
|
||||
license.workspace = true
|
||||
edition.workspace = true
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "lancedb"
|
||||
version = "0.13.1-beta.0"
|
||||
version = "0.13.0-beta.1"
|
||||
edition.workspace = true
|
||||
description = "LanceDB: A serverless, low-latency vector database for AI applications"
|
||||
license.workspace = true
|
||||
@@ -46,18 +46,10 @@ serde = { version = "^1" }
|
||||
serde_json = { version = "1" }
|
||||
async-openai = { version = "0.20.0", optional = true }
|
||||
serde_with = { version = "3.8.1" }
|
||||
aws-sdk-bedrockruntime = { version = "1.27.0", optional = true }
|
||||
# For remote feature
|
||||
reqwest = { version = "0.12.0", default-features = false, features = [
|
||||
"charset",
|
||||
"gzip",
|
||||
"http2",
|
||||
"json",
|
||||
"macos-system-configuration",
|
||||
"stream",
|
||||
], optional = true }
|
||||
rand = { version = "0.8.3", features = ["small_rng"], optional = true }
|
||||
http = { version = "1", optional = true } # Matching what is in reqwest
|
||||
reqwest = { version = "0.12.0", features = ["gzip", "json", "stream"], optional = true }
|
||||
rand = { version = "0.8.3", features = ["small_rng"], optional = true}
|
||||
http = { version = "1", optional = true } # Matching what is in reqwest
|
||||
uuid = { version = "1.7.0", features = ["v4"], optional = true }
|
||||
polars-arrow = { version = ">=0.37,<0.40.0", optional = true }
|
||||
polars = { version = ">=0.37,<0.40.0", optional = true }
|
||||
@@ -80,13 +72,11 @@ aws-config = { version = "1.0" }
|
||||
aws-smithy-runtime = { version = "1.3" }
|
||||
http-body = "1" # Matching reqwest
|
||||
|
||||
|
||||
[features]
|
||||
default = ["default-tls"]
|
||||
default = []
|
||||
remote = ["dep:reqwest", "dep:http", "dep:rand", "dep:uuid"]
|
||||
fp16kernels = ["lance-linalg/fp16kernels"]
|
||||
s3-test = []
|
||||
bedrock = ["dep:aws-sdk-bedrockruntime"]
|
||||
openai = ["dep:async-openai", "dep:reqwest"]
|
||||
polars = ["dep:polars-arrow", "dep:polars"]
|
||||
sentence-transformers = [
|
||||
@@ -97,11 +87,6 @@ sentence-transformers = [
|
||||
"dep:tokenizers"
|
||||
]
|
||||
|
||||
# TLS
|
||||
default-tls = ["reqwest?/default-tls"]
|
||||
native-tls = ["reqwest?/native-tls"]
|
||||
rustls-tls = ["reqwest?/rustls-tls"]
|
||||
|
||||
[[example]]
|
||||
name = "openai"
|
||||
required-features = ["openai"]
|
||||
@@ -109,7 +94,3 @@ required-features = ["openai"]
|
||||
[[example]]
|
||||
name = "sentence_transformers"
|
||||
required-features = ["sentence-transformers"]
|
||||
|
||||
[[example]]
|
||||
name = "bedrock"
|
||||
required-features = ["bedrock"]
|
||||
|
||||
@@ -1,89 +0,0 @@
|
||||
use std::{iter::once, sync::Arc};
|
||||
|
||||
use arrow_array::{Float64Array, Int32Array, RecordBatch, RecordBatchIterator, StringArray};
|
||||
use arrow_schema::{DataType, Field, Schema};
|
||||
use aws_config::Region;
|
||||
use aws_sdk_bedrockruntime::Client;
|
||||
use futures::StreamExt;
|
||||
use lancedb::{
|
||||
arrow::IntoArrow,
|
||||
connect,
|
||||
embeddings::{bedrock::BedrockEmbeddingFunction, EmbeddingDefinition, EmbeddingFunction},
|
||||
query::{ExecutableQuery, QueryBase},
|
||||
Result,
|
||||
};
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<()> {
|
||||
let tempdir = tempfile::tempdir().unwrap();
|
||||
let tempdir = tempdir.path().to_str().unwrap();
|
||||
|
||||
// create Bedrock embedding function
|
||||
let region: String = "us-east-1".to_string();
|
||||
let config = aws_config::defaults(aws_config::BehaviorVersion::latest())
|
||||
.region(Region::new(region))
|
||||
.load()
|
||||
.await;
|
||||
|
||||
let embedding = Arc::new(BedrockEmbeddingFunction::new(
|
||||
Client::new(&config), // AWS Region
|
||||
));
|
||||
|
||||
let db = connect(tempdir).execute().await?;
|
||||
db.embedding_registry()
|
||||
.register("bedrock", embedding.clone())?;
|
||||
|
||||
let table = db
|
||||
.create_table("vectors", make_data())
|
||||
.add_embedding(EmbeddingDefinition::new(
|
||||
"text",
|
||||
"bedrock",
|
||||
Some("embeddings"),
|
||||
))?
|
||||
.execute()
|
||||
.await?;
|
||||
|
||||
// execute vector search
|
||||
let query = Arc::new(StringArray::from_iter_values(once("something warm")));
|
||||
let query_vector = embedding.compute_query_embeddings(query)?;
|
||||
let mut results = table
|
||||
.vector_search(query_vector)?
|
||||
.limit(1)
|
||||
.execute()
|
||||
.await?;
|
||||
|
||||
let rb = results.next().await.unwrap()?;
|
||||
let out = rb
|
||||
.column_by_name("text")
|
||||
.unwrap()
|
||||
.as_any()
|
||||
.downcast_ref::<StringArray>()
|
||||
.unwrap();
|
||||
let text = out.iter().next().unwrap().unwrap();
|
||||
println!("Closest match: {}", text);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn make_data() -> impl IntoArrow {
|
||||
let schema = Schema::new(vec![
|
||||
Field::new("id", DataType::Int32, true),
|
||||
Field::new("text", DataType::Utf8, false),
|
||||
Field::new("price", DataType::Float64, false),
|
||||
]);
|
||||
|
||||
let id = Int32Array::from(vec![1, 2, 3, 4]);
|
||||
let text = StringArray::from_iter_values(vec![
|
||||
"Black T-Shirt",
|
||||
"Leather Jacket",
|
||||
"Winter Parka",
|
||||
"Hooded Sweatshirt",
|
||||
]);
|
||||
let price = Float64Array::from(vec![10.0, 50.0, 100.0, 30.0]);
|
||||
let schema = Arc::new(schema);
|
||||
let rb = RecordBatch::try_new(
|
||||
schema.clone(),
|
||||
vec![Arc::new(id), Arc::new(text), Arc::new(price)],
|
||||
)
|
||||
.unwrap();
|
||||
Box::new(RecordBatchIterator::new(vec![Ok(rb)], schema))
|
||||
}
|
||||
@@ -17,9 +17,6 @@ pub mod openai;
|
||||
#[cfg(feature = "sentence-transformers")]
|
||||
pub mod sentence_transformers;
|
||||
|
||||
#[cfg(feature = "bedrock")]
|
||||
pub mod bedrock;
|
||||
|
||||
use lance::arrow::RecordBatchExt;
|
||||
use std::{
|
||||
borrow::Cow,
|
||||
|
||||
@@ -1,210 +0,0 @@
|
||||
use aws_sdk_bedrockruntime::Client as BedrockClient;
|
||||
use std::{borrow::Cow, fmt::Formatter, str::FromStr, sync::Arc};
|
||||
|
||||
use arrow::array::{AsArray, Float32Builder};
|
||||
use arrow_array::{Array, ArrayRef, FixedSizeListArray, Float32Array};
|
||||
use arrow_data::ArrayData;
|
||||
use arrow_schema::DataType;
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use super::EmbeddingFunction;
|
||||
use crate::{Error, Result};
|
||||
|
||||
use tokio::runtime::Handle;
|
||||
use tokio::task::block_in_place;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum BedrockEmbeddingModel {
|
||||
TitanEmbedding,
|
||||
CohereLarge,
|
||||
}
|
||||
|
||||
impl BedrockEmbeddingModel {
|
||||
fn ndims(&self) -> usize {
|
||||
match self {
|
||||
Self::TitanEmbedding => 1536,
|
||||
Self::CohereLarge => 1024,
|
||||
}
|
||||
}
|
||||
|
||||
fn model_id(&self) -> &str {
|
||||
match self {
|
||||
Self::TitanEmbedding => "amazon.titan-embed-text-v1",
|
||||
Self::CohereLarge => "cohere.embed-english-v3",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for BedrockEmbeddingModel {
|
||||
type Err = Error;
|
||||
|
||||
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
|
||||
match s {
|
||||
"titan-embed-text-v1" => Ok(Self::TitanEmbedding),
|
||||
"cohere-embed-english-v3" => Ok(Self::CohereLarge),
|
||||
_ => Err(Error::InvalidInput {
|
||||
message: "Invalid model. Available models are: 'titan-embed-text-v1', 'cohere-embed-english-v3'".to_string()
|
||||
}),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BedrockEmbeddingFunction {
|
||||
model: BedrockEmbeddingModel,
|
||||
client: BedrockClient,
|
||||
}
|
||||
|
||||
impl BedrockEmbeddingFunction {
|
||||
pub fn new(client: BedrockClient) -> Self {
|
||||
Self {
|
||||
model: BedrockEmbeddingModel::TitanEmbedding,
|
||||
client,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn with_model(client: BedrockClient, model: BedrockEmbeddingModel) -> Self {
|
||||
Self { model, client }
|
||||
}
|
||||
}
|
||||
|
||||
impl EmbeddingFunction for BedrockEmbeddingFunction {
|
||||
fn name(&self) -> &str {
|
||||
"bedrock"
|
||||
}
|
||||
|
||||
fn source_type(&self) -> Result<Cow<DataType>> {
|
||||
Ok(Cow::Owned(DataType::Utf8))
|
||||
}
|
||||
|
||||
fn dest_type(&self) -> Result<Cow<DataType>> {
|
||||
let n_dims = self.model.ndims();
|
||||
Ok(Cow::Owned(DataType::new_fixed_size_list(
|
||||
DataType::Float32,
|
||||
n_dims as i32,
|
||||
false,
|
||||
)))
|
||||
}
|
||||
|
||||
fn compute_source_embeddings(&self, source: ArrayRef) -> Result<ArrayRef> {
|
||||
let len = source.len();
|
||||
let n_dims = self.model.ndims();
|
||||
let inner = self.compute_inner(source)?;
|
||||
|
||||
let fsl = DataType::new_fixed_size_list(DataType::Float32, n_dims as i32, false);
|
||||
|
||||
let array_data = ArrayData::builder(fsl)
|
||||
.len(len)
|
||||
.add_child_data(inner.into_data())
|
||||
.build()?;
|
||||
|
||||
Ok(Arc::new(FixedSizeListArray::from(array_data)))
|
||||
}
|
||||
|
||||
fn compute_query_embeddings(&self, input: Arc<dyn Array>) -> Result<Arc<dyn Array>> {
|
||||
let arr = self.compute_inner(input)?;
|
||||
Ok(Arc::new(arr))
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for BedrockEmbeddingFunction {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("BedrockEmbeddingFunction")
|
||||
.field("model", &self.model)
|
||||
// Skip client field as it doesn't implement Debug
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl BedrockEmbeddingFunction {
|
||||
fn compute_inner(&self, source: Arc<dyn Array>) -> Result<Float32Array> {
|
||||
if source.is_nullable() {
|
||||
return Err(Error::InvalidInput {
|
||||
message: "Expected non-nullable data type".to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
if !matches!(source.data_type(), DataType::Utf8 | DataType::LargeUtf8) {
|
||||
return Err(Error::InvalidInput {
|
||||
message: "Expected Utf8 data type".to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
let mut builder = Float32Builder::new();
|
||||
|
||||
let texts = match source.data_type() {
|
||||
DataType::Utf8 => source
|
||||
.as_string::<i32>()
|
||||
.into_iter()
|
||||
.map(|s| s.expect("array is non-nullable").to_string())
|
||||
.collect::<Vec<String>>(),
|
||||
DataType::LargeUtf8 => source
|
||||
.as_string::<i64>()
|
||||
.into_iter()
|
||||
.map(|s| s.expect("array is non-nullable").to_string())
|
||||
.collect::<Vec<String>>(),
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
for text in texts {
|
||||
let request_body = match self.model {
|
||||
BedrockEmbeddingModel::TitanEmbedding => {
|
||||
json!({
|
||||
"inputText": text
|
||||
})
|
||||
}
|
||||
BedrockEmbeddingModel::CohereLarge => {
|
||||
json!({
|
||||
"texts": [text],
|
||||
"input_type": "search_document"
|
||||
})
|
||||
}
|
||||
};
|
||||
|
||||
let client = self.client.clone();
|
||||
let model_id = self.model.model_id().to_string();
|
||||
let request_body = request_body.clone();
|
||||
|
||||
let response = block_in_place(move || {
|
||||
Handle::current().block_on(async move {
|
||||
client
|
||||
.invoke_model()
|
||||
.model_id(model_id)
|
||||
.body(aws_sdk_bedrockruntime::primitives::Blob::new(
|
||||
serde_json::to_vec(&request_body).unwrap(),
|
||||
))
|
||||
.send()
|
||||
.await
|
||||
})
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
let response_json: Value =
|
||||
serde_json::from_slice(response.body.as_ref()).map_err(|e| Error::Runtime {
|
||||
message: format!("Failed to parse response: {}", e),
|
||||
})?;
|
||||
|
||||
let embedding = match self.model {
|
||||
BedrockEmbeddingModel::TitanEmbedding => response_json["embedding"]
|
||||
.as_array()
|
||||
.ok_or_else(|| Error::Runtime {
|
||||
message: "Missing embedding in response".to_string(),
|
||||
})?
|
||||
.iter()
|
||||
.map(|v| v.as_f64().unwrap() as f32)
|
||||
.collect::<Vec<f32>>(),
|
||||
BedrockEmbeddingModel::CohereLarge => response_json["embeddings"][0]
|
||||
.as_array()
|
||||
.ok_or_else(|| Error::Runtime {
|
||||
message: "Missing embeddings in response".to_string(),
|
||||
})?
|
||||
.iter()
|
||||
.map(|v| v.as_f64().unwrap() as f32)
|
||||
.collect::<Vec<f32>>(),
|
||||
};
|
||||
|
||||
builder.append_slice(&embedding);
|
||||
}
|
||||
|
||||
Ok(builder.finish())
|
||||
}
|
||||
}
|
||||
@@ -475,7 +475,6 @@ impl<T: HasQuery> QueryBase for T {
|
||||
|
||||
/// Options for controlling the execution of a query
|
||||
#[non_exhaustive]
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct QueryExecutionOptions {
|
||||
/// The maximum number of rows that will be contained in a single
|
||||
/// `RecordBatch` delivered by the query.
|
||||
@@ -651,7 +650,7 @@ impl Query {
|
||||
pub fn nearest_to(self, vector: impl IntoQueryVector) -> Result<VectorQuery> {
|
||||
let mut vector_query = self.into_vector();
|
||||
let query_vector = vector.to_query_vector(&DataType::Float32, "default")?;
|
||||
vector_query.query_vector.push(query_vector);
|
||||
vector_query.query_vector = Some(query_vector);
|
||||
Ok(vector_query)
|
||||
}
|
||||
}
|
||||
@@ -702,11 +701,8 @@ pub struct VectorQuery {
|
||||
// the column based on the dataset's schema.
|
||||
pub(crate) column: Option<String>,
|
||||
// IVF PQ - ANN search.
|
||||
pub(crate) query_vector: Vec<Arc<dyn Array>>,
|
||||
pub(crate) query_vector: Option<Arc<dyn Array>>,
|
||||
pub(crate) nprobes: usize,
|
||||
// The number of candidates to return during the refine step for HNSW,
|
||||
// defaults to 1.5 * limit.
|
||||
pub(crate) ef: Option<usize>,
|
||||
pub(crate) refine_factor: Option<u32>,
|
||||
pub(crate) distance_type: Option<DistanceType>,
|
||||
/// Default is true. Set to false to enforce a brute force search.
|
||||
@@ -718,9 +714,8 @@ impl VectorQuery {
|
||||
Self {
|
||||
base,
|
||||
column: None,
|
||||
query_vector: Vec::new(),
|
||||
query_vector: None,
|
||||
nprobes: 20,
|
||||
ef: None,
|
||||
refine_factor: None,
|
||||
distance_type: None,
|
||||
use_index: true,
|
||||
@@ -739,22 +734,6 @@ impl VectorQuery {
|
||||
self
|
||||
}
|
||||
|
||||
/// Add another query vector to the search.
|
||||
///
|
||||
/// Multiple searches will be dispatched as part of the query.
|
||||
/// This is a convenience method for adding multiple query vectors
|
||||
/// to the search. It is not expected to be faster than issuing
|
||||
/// multiple queries concurrently.
|
||||
///
|
||||
/// The output data will contain an additional columns `query_index` which
|
||||
/// will contain the index of the query vector that was used to generate the
|
||||
/// result.
|
||||
pub fn add_query_vector(mut self, vector: impl IntoQueryVector) -> Result<Self> {
|
||||
let query_vector = vector.to_query_vector(&DataType::Float32, "default")?;
|
||||
self.query_vector.push(query_vector);
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Set the number of partitions to search (probe)
|
||||
///
|
||||
/// This argument is only used when the vector column has an IVF PQ index.
|
||||
@@ -780,18 +759,6 @@ impl VectorQuery {
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the number of candidates to return during the refine step for HNSW
|
||||
///
|
||||
/// This argument is only used when the vector column has an HNSW index.
|
||||
/// If there is no index then this value is ignored.
|
||||
///
|
||||
/// Increasing this value will increase the recall of your query but will
|
||||
/// also increase the latency of your query. The default value is 1.5*limit.
|
||||
pub fn ef(mut self, ef: usize) -> Self {
|
||||
self.ef = Some(ef);
|
||||
self
|
||||
}
|
||||
|
||||
/// A multiplier to control how many additional rows are taken during the refine step
|
||||
///
|
||||
/// This argument is only used when the vector column has an IVF PQ index.
|
||||
@@ -887,7 +854,6 @@ mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use super::*;
|
||||
use arrow::{compute::concat_batches, datatypes::Int32Type};
|
||||
use arrow_array::{
|
||||
cast::AsArray, Float32Array, Int32Array, RecordBatch, RecordBatchIterator,
|
||||
RecordBatchReader,
|
||||
@@ -917,10 +883,7 @@ mod tests {
|
||||
|
||||
let vector = Float32Array::from_iter_values([0.1, 0.2]);
|
||||
let query = table.query().nearest_to(&[0.1, 0.2]).unwrap();
|
||||
assert_eq!(
|
||||
*query.query_vector.first().unwrap().as_ref().as_primitive(),
|
||||
vector
|
||||
);
|
||||
assert_eq!(*query.query_vector.unwrap().as_ref().as_primitive(), vector);
|
||||
|
||||
let new_vector = Float32Array::from_iter_values([9.8, 8.7]);
|
||||
|
||||
@@ -936,7 +899,7 @@ mod tests {
|
||||
.refine_factor(999);
|
||||
|
||||
assert_eq!(
|
||||
*query.query_vector.first().unwrap().as_ref().as_primitive(),
|
||||
*query.query_vector.unwrap().as_ref().as_primitive(),
|
||||
new_vector
|
||||
);
|
||||
assert_eq!(query.base.limit.unwrap(), 100);
|
||||
@@ -1234,34 +1197,4 @@ mod tests {
|
||||
assert!(batch.column_by_name("_rowid").is_some());
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_multiple_query_vectors() {
|
||||
let tmp_dir = tempdir().unwrap();
|
||||
let table = make_test_table(&tmp_dir).await;
|
||||
let query = table
|
||||
.query()
|
||||
.nearest_to(&[0.1, 0.2, 0.3, 0.4])
|
||||
.unwrap()
|
||||
.add_query_vector(&[0.5, 0.6, 0.7, 0.8])
|
||||
.unwrap()
|
||||
.limit(1);
|
||||
|
||||
let plan = query.explain_plan(true).await.unwrap();
|
||||
assert!(plan.contains("UnionExec"));
|
||||
|
||||
let results = query
|
||||
.execute()
|
||||
.await
|
||||
.unwrap()
|
||||
.try_collect::<Vec<_>>()
|
||||
.await
|
||||
.unwrap();
|
||||
let results = concat_batches(&results[0].schema(), &results).unwrap();
|
||||
assert_eq!(results.num_rows(), 2); // One result for each query vector.
|
||||
let query_index = results["query_index"].as_primitive::<Int32Type>();
|
||||
// We don't guarantee order.
|
||||
assert!(query_index.values().contains(&0));
|
||||
assert!(query_index.values().contains(&1));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@ use crate::index::IndexStatistics;
|
||||
use crate::query::Select;
|
||||
use crate::table::AddDataMode;
|
||||
use crate::utils::{supported_btree_data_type, supported_vector_data_type};
|
||||
use crate::{Error, Table};
|
||||
use crate::Error;
|
||||
use arrow_array::RecordBatchReader;
|
||||
use arrow_ipc::reader::FileReader;
|
||||
use arrow_schema::{DataType, SchemaRef};
|
||||
@@ -19,10 +19,9 @@ use http::header::CONTENT_TYPE;
|
||||
use http::StatusCode;
|
||||
use lance::arrow::json::JsonSchema;
|
||||
use lance::dataset::scanner::DatasetRecordBatchStream;
|
||||
use lance::dataset::{ColumnAlteration, NewColumnTransform, Version};
|
||||
use lance::dataset::{ColumnAlteration, NewColumnTransform};
|
||||
use lance_datafusion::exec::OneShotExec;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use crate::{
|
||||
connection::NoData,
|
||||
@@ -44,32 +43,17 @@ pub struct RemoteTable<S: HttpSend = Sender> {
|
||||
#[allow(dead_code)]
|
||||
client: RestfulLanceDbClient<S>,
|
||||
name: String,
|
||||
|
||||
version: RwLock<Option<u64>>,
|
||||
}
|
||||
|
||||
impl<S: HttpSend> RemoteTable<S> {
|
||||
pub fn new(client: RestfulLanceDbClient<S>, name: String) -> Self {
|
||||
Self {
|
||||
client,
|
||||
name,
|
||||
version: RwLock::new(None),
|
||||
}
|
||||
Self { client, name }
|
||||
}
|
||||
|
||||
async fn describe(&self) -> Result<TableDescription> {
|
||||
let version = self.current_version().await;
|
||||
self.describe_version(version).await
|
||||
}
|
||||
|
||||
async fn describe_version(&self, version: Option<u64>) -> Result<TableDescription> {
|
||||
let mut request = self
|
||||
let request = self
|
||||
.client
|
||||
.post(&format!("/v1/table/{}/describe/", self.name));
|
||||
|
||||
let body = serde_json::json!({ "version": version });
|
||||
request = request.json(&body);
|
||||
|
||||
let (request_id, response) = self.client.send(request, true).await?;
|
||||
|
||||
let response = self.check_table_response(&request_id, response).await?;
|
||||
@@ -201,90 +185,6 @@ impl<S: HttpSend> RemoteTable<S> {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn apply_vector_query_params(
|
||||
mut body: serde_json::Value,
|
||||
query: &VectorQuery,
|
||||
) -> Result<Vec<serde_json::Value>> {
|
||||
Self::apply_query_params(&mut body, &query.base)?;
|
||||
|
||||
// Apply general parameters, before we dispatch based on number of query vectors.
|
||||
body["prefilter"] = query.base.prefilter.into();
|
||||
body["distance_type"] = serde_json::json!(query.distance_type.unwrap_or_default());
|
||||
body["nprobes"] = query.nprobes.into();
|
||||
body["ef"] = query.ef.into();
|
||||
body["refine_factor"] = query.refine_factor.into();
|
||||
if let Some(vector_column) = query.column.as_ref() {
|
||||
body["vector_column"] = serde_json::Value::String(vector_column.clone());
|
||||
}
|
||||
if !query.use_index {
|
||||
body["bypass_vector_index"] = serde_json::Value::Bool(true);
|
||||
}
|
||||
|
||||
fn vector_to_json(vector: &arrow_array::ArrayRef) -> Result<serde_json::Value> {
|
||||
match vector.data_type() {
|
||||
DataType::Float32 => {
|
||||
let array = vector
|
||||
.as_any()
|
||||
.downcast_ref::<arrow_array::Float32Array>()
|
||||
.unwrap();
|
||||
Ok(serde_json::Value::Array(
|
||||
array
|
||||
.values()
|
||||
.iter()
|
||||
.map(|v| {
|
||||
serde_json::Value::Number(
|
||||
serde_json::Number::from_f64(*v as f64).unwrap(),
|
||||
)
|
||||
})
|
||||
.collect(),
|
||||
))
|
||||
}
|
||||
_ => Err(Error::InvalidInput {
|
||||
message: "VectorQuery vector must be of type Float32".into(),
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
match query.query_vector.len() {
|
||||
0 => {
|
||||
// Server takes empty vector, not null or undefined.
|
||||
body["vector"] = serde_json::Value::Array(Vec::new());
|
||||
Ok(vec![body])
|
||||
}
|
||||
1 => {
|
||||
body["vector"] = vector_to_json(&query.query_vector[0])?;
|
||||
Ok(vec![body])
|
||||
}
|
||||
_ => {
|
||||
let mut bodies = Vec::with_capacity(query.query_vector.len());
|
||||
for vector in &query.query_vector {
|
||||
let mut body = body.clone();
|
||||
body["vector"] = vector_to_json(vector)?;
|
||||
bodies.push(body);
|
||||
}
|
||||
Ok(bodies)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn check_mutable(&self) -> Result<()> {
|
||||
let read_guard = self.version.read().await;
|
||||
match *read_guard {
|
||||
None => Ok(()),
|
||||
Some(version) => Err(Error::NotSupported {
|
||||
message: format!(
|
||||
"Cannot mutate table reference fixed at version {}. Call checkout_latest() to get a mutable table reference.",
|
||||
version
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
async fn current_version(&self) -> Option<u64> {
|
||||
let read_guard = self.version.read().await;
|
||||
*read_guard
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
@@ -312,11 +212,7 @@ mod test_utils {
|
||||
T: Into<reqwest::Body>,
|
||||
{
|
||||
let client = client_with_handler(handler);
|
||||
Self {
|
||||
client,
|
||||
name,
|
||||
version: RwLock::new(None),
|
||||
}
|
||||
Self { client, name }
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -335,62 +231,21 @@ impl<S: HttpSend> TableInternal for RemoteTable<S> {
|
||||
async fn version(&self) -> Result<u64> {
|
||||
self.describe().await.map(|desc| desc.version)
|
||||
}
|
||||
async fn checkout(&self, version: u64) -> Result<()> {
|
||||
// check that the version exists
|
||||
self.describe_version(Some(version))
|
||||
.await
|
||||
.map_err(|e| match e {
|
||||
// try to map the error to a more user-friendly error telling them
|
||||
// specifically that the version does not exist
|
||||
Error::TableNotFound { name } => Error::TableNotFound {
|
||||
name: format!("{} (version: {})", name, version),
|
||||
},
|
||||
e => e,
|
||||
})?;
|
||||
|
||||
let mut write_guard = self.version.write().await;
|
||||
*write_guard = Some(version);
|
||||
Ok(())
|
||||
async fn checkout(&self, _version: u64) -> Result<()> {
|
||||
Err(Error::NotSupported {
|
||||
message: "checkout is not supported on LanceDB cloud.".into(),
|
||||
})
|
||||
}
|
||||
async fn checkout_latest(&self) -> Result<()> {
|
||||
let mut write_guard = self.version.write().await;
|
||||
*write_guard = None;
|
||||
Ok(())
|
||||
Err(Error::NotSupported {
|
||||
message: "checkout is not supported on LanceDB cloud.".into(),
|
||||
})
|
||||
}
|
||||
async fn restore(&self) -> Result<()> {
|
||||
self.check_mutable().await?;
|
||||
Err(Error::NotSupported {
|
||||
message: "restore is not supported on LanceDB cloud.".into(),
|
||||
})
|
||||
}
|
||||
|
||||
async fn list_versions(&self) -> Result<Vec<Version>> {
|
||||
let request = self
|
||||
.client
|
||||
.post(&format!("/v1/table/{}/version/list/", self.name));
|
||||
let (request_id, response) = self.client.send(request, true).await?;
|
||||
let response = self.check_table_response(&request_id, response).await?;
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct ListVersionsResponse {
|
||||
versions: Vec<Version>,
|
||||
}
|
||||
|
||||
let body = response.text().await.err_to_http(request_id.clone())?;
|
||||
let body: ListVersionsResponse =
|
||||
serde_json::from_str(&body).map_err(|err| Error::Http {
|
||||
source: format!(
|
||||
"Failed to parse list_versions response: {}, body: {}",
|
||||
err, body
|
||||
)
|
||||
.into(),
|
||||
request_id,
|
||||
status_code: None,
|
||||
})?;
|
||||
|
||||
Ok(body.versions)
|
||||
}
|
||||
|
||||
async fn schema(&self) -> Result<SchemaRef> {
|
||||
let schema = self.describe().await?.schema;
|
||||
Ok(Arc::new(schema.try_into()?))
|
||||
@@ -400,13 +255,10 @@ impl<S: HttpSend> TableInternal for RemoteTable<S> {
|
||||
.client
|
||||
.post(&format!("/v1/table/{}/count_rows/", self.name));
|
||||
|
||||
let version = self.current_version().await;
|
||||
|
||||
if let Some(filter) = filter {
|
||||
request = request.json(&serde_json::json!({ "predicate": filter, "version": version }));
|
||||
request = request.json(&serde_json::json!({ "predicate": filter }));
|
||||
} else {
|
||||
let body = serde_json::json!({ "version": version });
|
||||
request = request.json(&body);
|
||||
request = request.json(&serde_json::json!({}));
|
||||
}
|
||||
|
||||
let (request_id, response) = self.client.send(request, true).await?;
|
||||
@@ -426,7 +278,6 @@ impl<S: HttpSend> TableInternal for RemoteTable<S> {
|
||||
add: AddDataBuilder<NoData>,
|
||||
data: Box<dyn RecordBatchReader + Send>,
|
||||
) -> Result<()> {
|
||||
self.check_mutable().await?;
|
||||
let body = Self::reader_as_body(data)?;
|
||||
let mut request = self
|
||||
.client
|
||||
@@ -455,30 +306,51 @@ impl<S: HttpSend> TableInternal for RemoteTable<S> {
|
||||
) -> Result<Arc<dyn ExecutionPlan>> {
|
||||
let request = self.client.post(&format!("/v1/table/{}/query/", self.name));
|
||||
|
||||
let version = self.current_version().await;
|
||||
let body = serde_json::json!({ "version": version });
|
||||
let bodies = Self::apply_vector_query_params(body, query)?;
|
||||
let mut body = serde_json::Value::Object(Default::default());
|
||||
Self::apply_query_params(&mut body, &query.base)?;
|
||||
|
||||
let mut futures = Vec::with_capacity(bodies.len());
|
||||
for body in bodies {
|
||||
let request = request.try_clone().unwrap().json(&body);
|
||||
let future = async move {
|
||||
let (request_id, response) = self.client.send(request, true).await?;
|
||||
self.read_arrow_stream(&request_id, response).await
|
||||
};
|
||||
futures.push(future);
|
||||
}
|
||||
let streams = futures::future::try_join_all(futures).await?;
|
||||
if streams.len() == 1 {
|
||||
let stream = streams.into_iter().next().unwrap();
|
||||
Ok(Arc::new(OneShotExec::new(stream)))
|
||||
body["prefilter"] = query.base.prefilter.into();
|
||||
body["distance_type"] = serde_json::json!(query.distance_type.unwrap_or_default());
|
||||
body["nprobes"] = query.nprobes.into();
|
||||
body["refine_factor"] = query.refine_factor.into();
|
||||
|
||||
let vector: Vec<f32> = if let Some(vector) = query.query_vector.as_ref() {
|
||||
match vector.data_type() {
|
||||
DataType::Float32 => vector
|
||||
.as_any()
|
||||
.downcast_ref::<arrow_array::Float32Array>()
|
||||
.unwrap()
|
||||
.values()
|
||||
.iter()
|
||||
.cloned()
|
||||
.collect(),
|
||||
_ => {
|
||||
return Err(Error::InvalidInput {
|
||||
message: "VectorQuery vector must be of type Float32".into(),
|
||||
})
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let stream_execs = streams
|
||||
.into_iter()
|
||||
.map(|stream| Arc::new(OneShotExec::new(stream)) as Arc<dyn ExecutionPlan>)
|
||||
.collect();
|
||||
Table::multi_vector_plan(stream_execs)
|
||||
// Server takes empty vector, not null or undefined.
|
||||
Vec::new()
|
||||
};
|
||||
body["vector"] = serde_json::json!(vector);
|
||||
|
||||
if let Some(vector_column) = query.column.as_ref() {
|
||||
body["vector_column"] = serde_json::Value::String(vector_column.clone());
|
||||
}
|
||||
|
||||
if !query.use_index {
|
||||
body["bypass_vector_index"] = serde_json::Value::Bool(true);
|
||||
}
|
||||
|
||||
let request = request.json(&body);
|
||||
|
||||
let (request_id, response) = self.client.send(request, true).await?;
|
||||
|
||||
let stream = self.read_arrow_stream(&request_id, response).await?;
|
||||
|
||||
Ok(Arc::new(OneShotExec::new(stream)))
|
||||
}
|
||||
|
||||
async fn plain_query(
|
||||
@@ -491,8 +363,7 @@ impl<S: HttpSend> TableInternal for RemoteTable<S> {
|
||||
.post(&format!("/v1/table/{}/query/", self.name))
|
||||
.header(CONTENT_TYPE, JSON_CONTENT_TYPE);
|
||||
|
||||
let version = self.current_version().await;
|
||||
let mut body = serde_json::json!({ "version": version });
|
||||
let mut body = serde_json::Value::Object(Default::default());
|
||||
Self::apply_query_params(&mut body, query)?;
|
||||
// Empty vector can be passed if no vector search is performed.
|
||||
body["vector"] = serde_json::Value::Array(Vec::new());
|
||||
@@ -506,7 +377,6 @@ impl<S: HttpSend> TableInternal for RemoteTable<S> {
|
||||
Ok(DatasetRecordBatchStream::new(stream))
|
||||
}
|
||||
async fn update(&self, update: UpdateBuilder) -> Result<u64> {
|
||||
self.check_mutable().await?;
|
||||
let request = self
|
||||
.client
|
||||
.post(&format!("/v1/table/{}/update/", self.name));
|
||||
@@ -528,7 +398,6 @@ impl<S: HttpSend> TableInternal for RemoteTable<S> {
|
||||
Ok(0) // TODO: support returning number of modified rows once supported in SaaS.
|
||||
}
|
||||
async fn delete(&self, predicate: &str) -> Result<()> {
|
||||
self.check_mutable().await?;
|
||||
let body = serde_json::json!({ "predicate": predicate });
|
||||
let request = self
|
||||
.client
|
||||
@@ -540,7 +409,6 @@ impl<S: HttpSend> TableInternal for RemoteTable<S> {
|
||||
}
|
||||
|
||||
async fn create_index(&self, mut index: IndexBuilder) -> Result<()> {
|
||||
self.check_mutable().await?;
|
||||
let request = self
|
||||
.client
|
||||
.post(&format!("/v1/table/{}/create_index/", self.name));
|
||||
@@ -619,7 +487,6 @@ impl<S: HttpSend> TableInternal for RemoteTable<S> {
|
||||
params: MergeInsertBuilder,
|
||||
new_data: Box<dyn RecordBatchReader + Send>,
|
||||
) -> Result<()> {
|
||||
self.check_mutable().await?;
|
||||
let query = MergeInsertRequest::try_from(params)?;
|
||||
let body = Self::reader_as_body(new_data)?;
|
||||
let request = self
|
||||
@@ -636,7 +503,6 @@ impl<S: HttpSend> TableInternal for RemoteTable<S> {
|
||||
Ok(())
|
||||
}
|
||||
async fn optimize(&self, _action: OptimizeAction) -> Result<OptimizeStats> {
|
||||
self.check_mutable().await?;
|
||||
Err(Error::NotSupported {
|
||||
message: "optimize is not supported on LanceDB cloud.".into(),
|
||||
})
|
||||
@@ -646,19 +512,16 @@ impl<S: HttpSend> TableInternal for RemoteTable<S> {
|
||||
_transforms: NewColumnTransform,
|
||||
_read_columns: Option<Vec<String>>,
|
||||
) -> Result<()> {
|
||||
self.check_mutable().await?;
|
||||
Err(Error::NotSupported {
|
||||
message: "add_columns is not yet supported.".into(),
|
||||
})
|
||||
}
|
||||
async fn alter_columns(&self, _alterations: &[ColumnAlteration]) -> Result<()> {
|
||||
self.check_mutable().await?;
|
||||
Err(Error::NotSupported {
|
||||
message: "alter_columns is not yet supported.".into(),
|
||||
})
|
||||
}
|
||||
async fn drop_columns(&self, _columns: &[&str]) -> Result<()> {
|
||||
self.check_mutable().await?;
|
||||
Err(Error::NotSupported {
|
||||
message: "drop_columns is not yet supported.".into(),
|
||||
})
|
||||
@@ -666,13 +529,9 @@ impl<S: HttpSend> TableInternal for RemoteTable<S> {
|
||||
|
||||
async fn list_indices(&self) -> Result<Vec<IndexConfig>> {
|
||||
// Make request to list the indices
|
||||
let mut request = self
|
||||
let request = self
|
||||
.client
|
||||
.post(&format!("/v1/table/{}/index/list/", self.name));
|
||||
let version = self.current_version().await;
|
||||
let body = serde_json::json!({ "version": version });
|
||||
request = request.json(&body);
|
||||
|
||||
let (request_id, response) = self.client.send(request, true).await?;
|
||||
let response = self.check_table_response(&request_id, response).await?;
|
||||
|
||||
@@ -722,14 +581,10 @@ impl<S: HttpSend> TableInternal for RemoteTable<S> {
|
||||
}
|
||||
|
||||
async fn index_stats(&self, index_name: &str) -> Result<Option<IndexStatistics>> {
|
||||
let mut request = self.client.post(&format!(
|
||||
let request = self.client.post(&format!(
|
||||
"/v1/table/{}/index/{}/stats/",
|
||||
self.name, index_name
|
||||
));
|
||||
let version = self.current_version().await;
|
||||
let body = serde_json::json!({ "version": version });
|
||||
request = request.json(&body);
|
||||
|
||||
let (request_id, response) = self.client.send(request, true).await?;
|
||||
|
||||
if response.status() == StatusCode::NOT_FOUND {
|
||||
@@ -800,10 +655,8 @@ mod tests {
|
||||
|
||||
use super::*;
|
||||
|
||||
use arrow::{array::AsArray, compute::concat_batches, datatypes::Int32Type};
|
||||
use arrow_array::{Int32Array, RecordBatch, RecordBatchIterator};
|
||||
use arrow_schema::{DataType, Field, Schema};
|
||||
use chrono::{DateTime, Utc};
|
||||
use futures::{future::BoxFuture, StreamExt, TryFutureExt};
|
||||
use lance_index::scalar::FullTextSearchQuery;
|
||||
use reqwest::Body;
|
||||
@@ -908,10 +761,7 @@ mod tests {
|
||||
request.headers().get("Content-Type").unwrap(),
|
||||
JSON_CONTENT_TYPE
|
||||
);
|
||||
assert_eq!(
|
||||
request.body().unwrap().as_bytes().unwrap(),
|
||||
br#"{"version":null}"#
|
||||
);
|
||||
assert_eq!(request.body().unwrap().as_bytes().unwrap(), br#"{}"#);
|
||||
|
||||
http::Response::builder().status(200).body("42").unwrap()
|
||||
});
|
||||
@@ -928,7 +778,7 @@ mod tests {
|
||||
);
|
||||
assert_eq!(
|
||||
request.body().unwrap().as_bytes().unwrap(),
|
||||
br#"{"predicate":"a > 10","version":null}"#
|
||||
br#"{"predicate":"a > 10"}"#
|
||||
);
|
||||
|
||||
http::Response::builder().status(200).body("42").unwrap()
|
||||
@@ -1227,9 +1077,7 @@ mod tests {
|
||||
"prefilter": true,
|
||||
"distance_type": "l2",
|
||||
"nprobes": 20,
|
||||
"ef": Option::<usize>::None,
|
||||
"refine_factor": null,
|
||||
"version": null,
|
||||
});
|
||||
// Pass vector separately to make sure it matches f32 precision.
|
||||
expected_body["vector"] = vec![0.1f32, 0.2, 0.3].into();
|
||||
@@ -1274,9 +1122,7 @@ mod tests {
|
||||
"bypass_vector_index": true,
|
||||
"columns": ["a", "b"],
|
||||
"nprobes": 12,
|
||||
"ef": Option::<usize>::None,
|
||||
"refine_factor": 2,
|
||||
"version": null,
|
||||
});
|
||||
// Pass vector separately to make sure it matches f32 precision.
|
||||
expected_body["vector"] = vec![0.1f32, 0.2, 0.3].into();
|
||||
@@ -1332,7 +1178,6 @@ mod tests {
|
||||
"k": 10,
|
||||
"vector": [],
|
||||
"with_row_id": true,
|
||||
"version": null
|
||||
});
|
||||
assert_eq!(body, expected_body);
|
||||
|
||||
@@ -1362,52 +1207,6 @@ mod tests {
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_query_multiple_vectors() {
|
||||
let table = Table::new_with_handler("my_table", |request| {
|
||||
assert_eq!(request.method(), "POST");
|
||||
assert_eq!(request.url().path(), "/v1/table/my_table/query/");
|
||||
assert_eq!(
|
||||
request.headers().get("Content-Type").unwrap(),
|
||||
JSON_CONTENT_TYPE
|
||||
);
|
||||
let data = RecordBatch::try_new(
|
||||
Arc::new(Schema::new(vec![Field::new("a", DataType::Int32, false)])),
|
||||
vec![Arc::new(Int32Array::from(vec![1, 2, 3]))],
|
||||
)
|
||||
.unwrap();
|
||||
let response_body = write_ipc_file(&data);
|
||||
http::Response::builder()
|
||||
.status(200)
|
||||
.header(CONTENT_TYPE, ARROW_FILE_CONTENT_TYPE)
|
||||
.body(response_body)
|
||||
.unwrap()
|
||||
});
|
||||
|
||||
let query = table
|
||||
.query()
|
||||
.nearest_to(vec![0.1, 0.2, 0.3])
|
||||
.unwrap()
|
||||
.add_query_vector(vec![0.4, 0.5, 0.6])
|
||||
.unwrap();
|
||||
let plan = query.explain_plan(true).await.unwrap();
|
||||
assert!(plan.contains("UnionExec"), "Plan: {}", plan);
|
||||
|
||||
let results = query
|
||||
.execute()
|
||||
.await
|
||||
.unwrap()
|
||||
.try_collect::<Vec<_>>()
|
||||
.await
|
||||
.unwrap();
|
||||
let results = concat_batches(&results[0].schema(), &results).unwrap();
|
||||
|
||||
let query_index = results["query_index"].as_primitive::<Int32Type>();
|
||||
// We don't guarantee order.
|
||||
assert!(query_index.values().contains(&0));
|
||||
assert!(query_index.values().contains(&1));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_create_index() {
|
||||
let cases = [
|
||||
@@ -1518,51 +1317,6 @@ mod tests {
|
||||
assert_eq!(indices, expected);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_list_versions() {
|
||||
let table = Table::new_with_handler("my_table", |request| {
|
||||
assert_eq!(request.method(), "POST");
|
||||
assert_eq!(request.url().path(), "/v1/table/my_table/version/list/");
|
||||
|
||||
let version1 = lance::dataset::Version {
|
||||
version: 1,
|
||||
timestamp: "2024-01-01T00:00:00Z".parse().unwrap(),
|
||||
metadata: Default::default(),
|
||||
};
|
||||
let version2 = lance::dataset::Version {
|
||||
version: 2,
|
||||
timestamp: "2024-02-01T00:00:00Z".parse().unwrap(),
|
||||
metadata: Default::default(),
|
||||
};
|
||||
let response_body = serde_json::json!({
|
||||
"versions": [
|
||||
version1,
|
||||
version2,
|
||||
]
|
||||
});
|
||||
let response_body = serde_json::to_string(&response_body).unwrap();
|
||||
|
||||
http::Response::builder()
|
||||
.status(200)
|
||||
.body(response_body)
|
||||
.unwrap()
|
||||
});
|
||||
|
||||
let versions = table.list_versions().await.unwrap();
|
||||
assert_eq!(versions.len(), 2);
|
||||
assert_eq!(versions[0].version, 1);
|
||||
assert_eq!(
|
||||
versions[0].timestamp,
|
||||
"2024-01-01T00:00:00Z".parse::<DateTime<Utc>>().unwrap()
|
||||
);
|
||||
assert_eq!(versions[1].version, 2);
|
||||
assert_eq!(
|
||||
versions[1].timestamp,
|
||||
"2024-02-01T00:00:00Z".parse::<DateTime<Utc>>().unwrap()
|
||||
);
|
||||
// assert_eq!(versions, expected);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_index_stats() {
|
||||
let table = Table::new_with_handler("my_table", |request| {
|
||||
@@ -1607,195 +1361,4 @@ mod tests {
|
||||
let indices = table.index_stats("my_index").await.unwrap();
|
||||
assert!(indices.is_none());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_passes_version() {
|
||||
let table = Table::new_with_handler("my_table", |request| {
|
||||
let body = request.body().unwrap().as_bytes().unwrap();
|
||||
let body: serde_json::Value = serde_json::from_slice(body).unwrap();
|
||||
let version = body
|
||||
.as_object()
|
||||
.unwrap()
|
||||
.get("version")
|
||||
.unwrap()
|
||||
.as_u64()
|
||||
.unwrap();
|
||||
assert_eq!(version, 42);
|
||||
|
||||
let response_body = match request.url().path() {
|
||||
"/v1/table/my_table/describe/" => {
|
||||
serde_json::json!({
|
||||
"version": 42,
|
||||
"schema": { "fields": [] }
|
||||
})
|
||||
}
|
||||
"/v1/table/my_table/index/list/" => {
|
||||
serde_json::json!({
|
||||
"indexes": []
|
||||
})
|
||||
}
|
||||
"/v1/table/my_table/index/my_idx/stats/" => {
|
||||
serde_json::json!({
|
||||
"num_indexed_rows": 100000,
|
||||
"num_unindexed_rows": 0,
|
||||
"index_type": "IVF_PQ",
|
||||
"distance_type": "l2"
|
||||
})
|
||||
}
|
||||
"/v1/table/my_table/count_rows/" => {
|
||||
serde_json::json!(1000)
|
||||
}
|
||||
"/v1/table/my_table/query/" => {
|
||||
let expected_data = RecordBatch::try_new(
|
||||
Arc::new(Schema::new(vec![Field::new("a", DataType::Int32, false)])),
|
||||
vec![Arc::new(Int32Array::from(vec![1, 2, 3]))],
|
||||
)
|
||||
.unwrap();
|
||||
let expected_data_ref = expected_data.clone();
|
||||
let response_body = write_ipc_file(&expected_data_ref);
|
||||
return http::Response::builder()
|
||||
.status(200)
|
||||
.header(CONTENT_TYPE, ARROW_FILE_CONTENT_TYPE)
|
||||
.body(response_body)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
path => panic!("Unexpected path: {}", path),
|
||||
};
|
||||
|
||||
http::Response::builder()
|
||||
.status(200)
|
||||
.body(
|
||||
serde_json::to_string(&response_body)
|
||||
.unwrap()
|
||||
.as_bytes()
|
||||
.to_vec(),
|
||||
)
|
||||
.unwrap()
|
||||
});
|
||||
|
||||
table.checkout(42).await.unwrap();
|
||||
|
||||
// ensure that version is passed to the /describe endpoint
|
||||
let version = table.version().await.unwrap();
|
||||
assert_eq!(version, 42);
|
||||
|
||||
// ensure it's passed to other read API calls
|
||||
table.list_indices().await.unwrap();
|
||||
table.index_stats("my_idx").await.unwrap();
|
||||
table.count_rows(None).await.unwrap();
|
||||
table
|
||||
.query()
|
||||
.nearest_to(vec![0.1, 0.2, 0.3])
|
||||
.unwrap()
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_fails_if_checkout_version_doesnt_exist() {
|
||||
let table = Table::new_with_handler("my_table", |request| {
|
||||
let body = request.body().unwrap().as_bytes().unwrap();
|
||||
let body: serde_json::Value = serde_json::from_slice(body).unwrap();
|
||||
let version = body
|
||||
.as_object()
|
||||
.unwrap()
|
||||
.get("version")
|
||||
.unwrap()
|
||||
.as_u64()
|
||||
.unwrap();
|
||||
if version != 42 {
|
||||
return http::Response::builder()
|
||||
.status(404)
|
||||
.body(format!("Table my_table (version: {}) not found", version))
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
let response_body = match request.url().path() {
|
||||
"/v1/table/my_table/describe/" => {
|
||||
serde_json::json!({
|
||||
"version": 42,
|
||||
"schema": { "fields": [] }
|
||||
})
|
||||
}
|
||||
_ => panic!("Unexpected path"),
|
||||
};
|
||||
|
||||
http::Response::builder()
|
||||
.status(200)
|
||||
.body(serde_json::to_string(&response_body).unwrap())
|
||||
.unwrap()
|
||||
});
|
||||
|
||||
let res = table.checkout(43).await;
|
||||
println!("{:?}", res);
|
||||
assert!(
|
||||
matches!(res, Err(Error::TableNotFound { name }) if name == "my_table (version: 43)")
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_timetravel_immutable() {
|
||||
let table = Table::new_with_handler::<String>("my_table", |request| {
|
||||
let response_body = match request.url().path() {
|
||||
"/v1/table/my_table/describe/" => {
|
||||
serde_json::json!({
|
||||
"version": 42,
|
||||
"schema": { "fields": [] }
|
||||
})
|
||||
}
|
||||
_ => panic!("Should not have made a request: {:?}", request),
|
||||
};
|
||||
|
||||
http::Response::builder()
|
||||
.status(200)
|
||||
.body(serde_json::to_string(&response_body).unwrap())
|
||||
.unwrap()
|
||||
});
|
||||
|
||||
table.checkout(42).await.unwrap();
|
||||
|
||||
// Ensure that all mutable operations fail.
|
||||
let res = table
|
||||
.update()
|
||||
.column("a", "a + 1")
|
||||
.column("b", "b - 1")
|
||||
.only_if("b > 10")
|
||||
.execute()
|
||||
.await;
|
||||
assert!(matches!(res, Err(Error::NotSupported { .. })));
|
||||
|
||||
let batch = RecordBatch::try_new(
|
||||
Arc::new(Schema::new(vec![Field::new("a", DataType::Int32, false)])),
|
||||
vec![Arc::new(Int32Array::from(vec![1, 2, 3]))],
|
||||
)
|
||||
.unwrap();
|
||||
let data = Box::new(RecordBatchIterator::new(
|
||||
[Ok(batch.clone())],
|
||||
batch.schema(),
|
||||
));
|
||||
let res = table.merge_insert(&["some_col"]).execute(data).await;
|
||||
assert!(matches!(res, Err(Error::NotSupported { .. })));
|
||||
|
||||
let res = table.delete("id in (1, 2, 3)").await;
|
||||
assert!(matches!(res, Err(Error::NotSupported { .. })));
|
||||
|
||||
let data = RecordBatch::try_new(
|
||||
Arc::new(Schema::new(vec![Field::new("a", DataType::Int32, false)])),
|
||||
vec![Arc::new(Int32Array::from(vec![1, 2, 3]))],
|
||||
)
|
||||
.unwrap();
|
||||
let res = table
|
||||
.add(RecordBatchIterator::new([Ok(data.clone())], data.schema()))
|
||||
.execute()
|
||||
.await;
|
||||
assert!(matches!(res, Err(Error::NotSupported { .. })));
|
||||
|
||||
let res = table
|
||||
.create_index(&["a"], Index::IvfPq(Default::default()))
|
||||
.execute()
|
||||
.await;
|
||||
assert!(matches!(res, Err(Error::NotSupported { .. })));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -24,9 +24,6 @@ use arrow_array::{RecordBatchIterator, RecordBatchReader};
|
||||
use arrow_schema::{Field, Schema, SchemaRef};
|
||||
use async_trait::async_trait;
|
||||
use datafusion_physical_plan::display::DisplayableExecutionPlan;
|
||||
use datafusion_physical_plan::projection::ProjectionExec;
|
||||
use datafusion_physical_plan::repartition::RepartitionExec;
|
||||
use datafusion_physical_plan::union::UnionExec;
|
||||
use datafusion_physical_plan::ExecutionPlan;
|
||||
use futures::{StreamExt, TryStreamExt};
|
||||
use lance::dataset::builder::DatasetBuilder;
|
||||
@@ -37,7 +34,7 @@ pub use lance::dataset::ColumnAlteration;
|
||||
pub use lance::dataset::NewColumnTransform;
|
||||
pub use lance::dataset::ReadParams;
|
||||
use lance::dataset::{
|
||||
Dataset, UpdateBuilder as LanceUpdateBuilder, Version, WhenMatched, WriteMode, WriteParams,
|
||||
Dataset, UpdateBuilder as LanceUpdateBuilder, WhenMatched, WriteMode, WriteParams,
|
||||
};
|
||||
use lance::dataset::{MergeInsertBuilder as LanceMergeInsertBuilder, WhenNotMatchedBySource};
|
||||
use lance::io::WrappingObjectStore;
|
||||
@@ -426,7 +423,6 @@ pub(crate) trait TableInternal: std::fmt::Display + std::fmt::Debug + Send + Syn
|
||||
async fn checkout(&self, version: u64) -> Result<()>;
|
||||
async fn checkout_latest(&self) -> Result<()>;
|
||||
async fn restore(&self) -> Result<()>;
|
||||
async fn list_versions(&self) -> Result<Vec<Version>>;
|
||||
async fn table_definition(&self) -> Result<TableDefinition>;
|
||||
fn dataset_uri(&self) -> &str;
|
||||
}
|
||||
@@ -956,11 +952,6 @@ impl Table {
|
||||
self.inner.restore().await
|
||||
}
|
||||
|
||||
/// List all the versions of the table
|
||||
pub async fn list_versions(&self) -> Result<Vec<Version>> {
|
||||
self.inner.list_versions().await
|
||||
}
|
||||
|
||||
/// List all indices that have been created with [`Self::create_index`]
|
||||
pub async fn list_indices(&self) -> Result<Vec<IndexConfig>> {
|
||||
self.inner.list_indices().await
|
||||
@@ -981,57 +972,6 @@ impl Table {
|
||||
) -> Result<Option<IndexStatistics>> {
|
||||
self.inner.index_stats(index_name.as_ref()).await
|
||||
}
|
||||
|
||||
// Take many execution plans and map them into a single plan that adds
|
||||
// a query_index column and unions them.
|
||||
pub(crate) fn multi_vector_plan(
|
||||
plans: Vec<Arc<dyn ExecutionPlan>>,
|
||||
) -> Result<Arc<dyn ExecutionPlan>> {
|
||||
if plans.is_empty() {
|
||||
return Err(Error::InvalidInput {
|
||||
message: "No plans provided".to_string(),
|
||||
});
|
||||
}
|
||||
// Projection to keeping all existing columns
|
||||
let first_plan = plans[0].clone();
|
||||
let project_all_columns = first_plan
|
||||
.schema()
|
||||
.fields()
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(i, field)| {
|
||||
let expr =
|
||||
datafusion_physical_plan::expressions::Column::new(field.name().as_str(), i);
|
||||
let expr = Arc::new(expr) as Arc<dyn datafusion_physical_plan::PhysicalExpr>;
|
||||
(expr, field.name().clone())
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let projected_plans = plans
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.map(|(plan_i, plan)| {
|
||||
let query_index = datafusion_common::ScalarValue::Int32(Some(plan_i as i32));
|
||||
let query_index_expr =
|
||||
datafusion_physical_plan::expressions::Literal::new(query_index);
|
||||
let query_index_expr =
|
||||
Arc::new(query_index_expr) as Arc<dyn datafusion_physical_plan::PhysicalExpr>;
|
||||
let mut projections = vec![(query_index_expr, "query_index".to_string())];
|
||||
projections.extend_from_slice(&project_all_columns);
|
||||
let projection = ProjectionExec::try_new(projections, plan).unwrap();
|
||||
Arc::new(projection) as Arc<dyn datafusion_physical_plan::ExecutionPlan>
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let unioned = Arc::new(UnionExec::new(projected_plans));
|
||||
// We require 1 partition in the final output
|
||||
let repartitioned = RepartitionExec::try_new(
|
||||
unioned,
|
||||
datafusion_physical_plan::Partitioning::RoundRobinBatch(1),
|
||||
)
|
||||
.unwrap();
|
||||
Ok(Arc::new(repartitioned))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<NativeTable> for Table {
|
||||
@@ -1325,7 +1265,7 @@ impl NativeTable {
|
||||
let (indices, mf) = futures::try_join!(dataset.load_indices(), dataset.latest_manifest())?;
|
||||
Ok(indices
|
||||
.iter()
|
||||
.map(|i| VectorIndex::new_from_format(&(mf.0), i))
|
||||
.map(|i| VectorIndex::new_from_format(&mf, i))
|
||||
.collect())
|
||||
}
|
||||
|
||||
@@ -1713,10 +1653,6 @@ impl TableInternal for NativeTable {
|
||||
self.dataset.reload().await
|
||||
}
|
||||
|
||||
async fn list_versions(&self) -> Result<Vec<Version>> {
|
||||
Ok(self.dataset.get().await?.versions().await?)
|
||||
}
|
||||
|
||||
async fn restore(&self) -> Result<()> {
|
||||
let version =
|
||||
self.dataset
|
||||
@@ -1848,25 +1784,9 @@ impl TableInternal for NativeTable {
|
||||
) -> Result<Arc<dyn ExecutionPlan>> {
|
||||
let ds_ref = self.dataset.get().await?;
|
||||
|
||||
if query.query_vector.len() > 1 {
|
||||
// If there are multiple query vectors, create a plan for each of them and union them.
|
||||
let query_vecs = query.query_vector.clone();
|
||||
let plan_futures = query_vecs
|
||||
.into_iter()
|
||||
.map(|query_vector| {
|
||||
let mut sub_query = query.clone();
|
||||
sub_query.query_vector = vec![query_vector];
|
||||
let options_ref = options.clone();
|
||||
async move { self.create_plan(&sub_query, options_ref).await }
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
let plans = futures::future::try_join_all(plan_futures).await?;
|
||||
return Table::multi_vector_plan(plans);
|
||||
}
|
||||
|
||||
let mut scanner: Scanner = ds_ref.scan();
|
||||
|
||||
if let Some(query_vector) = query.query_vector.first() {
|
||||
if let Some(query_vector) = query.query_vector.as_ref() {
|
||||
// If there is a vector query, default to limit=10 if unspecified
|
||||
let column = if let Some(col) = query.column.as_ref() {
|
||||
col.clone()
|
||||
@@ -1908,15 +1828,19 @@ impl TableInternal for NativeTable {
|
||||
query_vector,
|
||||
query.base.limit.unwrap_or(DEFAULT_TOP_K),
|
||||
)?;
|
||||
scanner.limit(
|
||||
query.base.limit.map(|limit| limit as i64),
|
||||
query.base.offset.map(|offset| offset as i64),
|
||||
)?;
|
||||
} else {
|
||||
// If there is no vector query, it's ok to not have a limit
|
||||
scanner.limit(
|
||||
query.base.limit.map(|limit| limit as i64),
|
||||
query.base.offset.map(|offset| offset as i64),
|
||||
)?;
|
||||
}
|
||||
scanner.limit(
|
||||
query.base.limit.map(|limit| limit as i64),
|
||||
query.base.offset.map(|offset| offset as i64),
|
||||
)?;
|
||||
|
||||
scanner.nprobs(query.nprobes);
|
||||
if let Some(ef) = query.ef {
|
||||
scanner.ef(ef);
|
||||
}
|
||||
scanner.use_index(query.use_index);
|
||||
scanner.prefilter(query.base.prefilter);
|
||||
match query.base.select {
|
||||
|
||||
Reference in New Issue
Block a user