mirror of
https://github.com/lancedb/lancedb.git
synced 2025-12-23 05:19:58 +00:00
Compare commits
77 Commits
python-v0.
...
python-v0.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6ef20b85ca | ||
|
|
35bacdd57e | ||
|
|
a5ebe5a6c4 | ||
|
|
bf03ad1b4a | ||
|
|
2a9e3e2084 | ||
|
|
f298f15360 | ||
|
|
679b031b99 | ||
|
|
f50b5d532b | ||
|
|
fe655a15f0 | ||
|
|
9d0af794d0 | ||
|
|
048a2d10f8 | ||
|
|
c78a9849b4 | ||
|
|
c663085203 | ||
|
|
8b628854d5 | ||
|
|
a8d8c17b2a | ||
|
|
3c487e5fc7 | ||
|
|
d6219d687c | ||
|
|
239f725b32 | ||
|
|
5f261cf2d8 | ||
|
|
79eaa52184 | ||
|
|
bd82e1f66d | ||
|
|
ba34c3bee1 | ||
|
|
d4d0873e2b | ||
|
|
12c7bd18a5 | ||
|
|
c6bf6a25d6 | ||
|
|
c998a47e17 | ||
|
|
d8c758513c | ||
|
|
3795e02ee3 | ||
|
|
c7d424b2f3 | ||
|
|
1efb9914ee | ||
|
|
83e26a231e | ||
|
|
72a17b2de4 | ||
|
|
4231925476 | ||
|
|
84a6693294 | ||
|
|
6c2d4c10a4 | ||
|
|
d914722f79 | ||
|
|
a6e4034dba | ||
|
|
2616a50502 | ||
|
|
7b5e9d824a | ||
|
|
3b173e7cb9 | ||
|
|
d496ab13a0 | ||
|
|
69d9beebc7 | ||
|
|
d32360b99d | ||
|
|
9fa08bfa93 | ||
|
|
d6d9cb7415 | ||
|
|
990d93f553 | ||
|
|
0832cba3c6 | ||
|
|
38b0d91848 | ||
|
|
6826039575 | ||
|
|
3e9321fc40 | ||
|
|
2ded17452b | ||
|
|
dfd9d2ac99 | ||
|
|
162880140e | ||
|
|
99d9ced6d5 | ||
|
|
96933d7df8 | ||
|
|
d369233b3d | ||
|
|
43a670ed4b | ||
|
|
cb9a00a28d | ||
|
|
72af977a73 | ||
|
|
7cecb71df0 | ||
|
|
285071e5c8 | ||
|
|
114866fbcf | ||
|
|
5387c0e243 | ||
|
|
53d1535de1 | ||
|
|
b2f88f0b29 | ||
|
|
f2e3989831 | ||
|
|
83ae52938a | ||
|
|
267aa83bf8 | ||
|
|
cc72050206 | ||
|
|
72543c8b9d | ||
|
|
97d6210c33 | ||
|
|
a3d0c27b0a | ||
|
|
b23d8abcdd | ||
|
|
e3ea5cf9b9 | ||
|
|
4f8b086175 | ||
|
|
72330fb759 | ||
|
|
e3b2c5f438 |
@@ -1,5 +1,5 @@
|
||||
[tool.bumpversion]
|
||||
current_version = "0.13.0-beta.2"
|
||||
current_version = "0.14.0"
|
||||
parse = """(?x)
|
||||
(?P<major>0|[1-9]\\d*)\\.
|
||||
(?P<minor>0|[1-9]\\d*)\\.
|
||||
@@ -87,6 +87,16 @@ glob = "node/package.json"
|
||||
replace = "\"@lancedb/vectordb-linux-x64-gnu\": \"{new_version}\""
|
||||
search = "\"@lancedb/vectordb-linux-x64-gnu\": \"{current_version}\""
|
||||
|
||||
[[tool.bumpversion.files]]
|
||||
glob = "node/package.json"
|
||||
replace = "\"@lancedb/vectordb-linux-arm64-musl\": \"{new_version}\""
|
||||
search = "\"@lancedb/vectordb-linux-arm64-musl\": \"{current_version}\""
|
||||
|
||||
[[tool.bumpversion.files]]
|
||||
glob = "node/package.json"
|
||||
replace = "\"@lancedb/vectordb-linux-x64-musl\": \"{new_version}\""
|
||||
search = "\"@lancedb/vectordb-linux-x64-musl\": \"{current_version}\""
|
||||
|
||||
[[tool.bumpversion.files]]
|
||||
glob = "node/package.json"
|
||||
replace = "\"@lancedb/vectordb-win32-x64-msvc\": \"{new_version}\""
|
||||
|
||||
@@ -31,6 +31,9 @@ rustflags = [
|
||||
[target.x86_64-unknown-linux-gnu]
|
||||
rustflags = ["-C", "target-cpu=haswell", "-C", "target-feature=+avx2,+fma,+f16c"]
|
||||
|
||||
[target.x86_64-unknown-linux-musl]
|
||||
rustflags = ["-C", "target-cpu=haswell", "-C", "target-feature=-crt-static,+avx2,+fma,+f16c"]
|
||||
|
||||
[target.aarch64-apple-darwin]
|
||||
rustflags = ["-C", "target-cpu=apple-m1", "-C", "target-feature=+neon,+fp16,+fhm,+dotprod"]
|
||||
|
||||
|
||||
4
.github/workflows/docs.yml
vendored
4
.github/workflows/docs.yml
vendored
@@ -72,9 +72,9 @@ jobs:
|
||||
- name: Setup Pages
|
||||
uses: actions/configure-pages@v2
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-pages-artifact@v1
|
||||
uses: actions/upload-pages-artifact@v3
|
||||
with:
|
||||
path: "docs/site"
|
||||
- name: Deploy to GitHub Pages
|
||||
id: deployment
|
||||
uses: actions/deploy-pages@v1
|
||||
uses: actions/deploy-pages@v4
|
||||
|
||||
412
.github/workflows/npm-publish.yml
vendored
412
.github/workflows/npm-publish.yml
vendored
@@ -101,7 +101,7 @@ jobs:
|
||||
path: |
|
||||
nodejs/dist/*.node
|
||||
|
||||
node-linux:
|
||||
node-linux-gnu:
|
||||
name: vectordb (${{ matrix.config.arch}}-unknown-linux-gnu)
|
||||
runs-on: ${{ matrix.config.runner }}
|
||||
# Only runs on tags that matches the make-release action
|
||||
@@ -133,15 +133,67 @@ jobs:
|
||||
free -h
|
||||
- name: Build Linux Artifacts
|
||||
run: |
|
||||
bash ci/build_linux_artifacts.sh ${{ matrix.config.arch }}
|
||||
bash ci/build_linux_artifacts.sh ${{ matrix.config.arch }} ${{ matrix.config.arch }}-unknown-linux-gnu
|
||||
- name: Upload Linux Artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: node-native-linux-${{ matrix.config.arch }}
|
||||
name: node-native-linux-${{ matrix.config.arch }}-gnu
|
||||
path: |
|
||||
node/dist/lancedb-vectordb-linux*.tgz
|
||||
|
||||
nodejs-linux:
|
||||
node-linux-musl:
|
||||
name: vectordb (${{ matrix.config.arch}}-unknown-linux-musl)
|
||||
runs-on: ubuntu-latest
|
||||
container: alpine:edge
|
||||
# Only runs on tags that matches the make-release action
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
config:
|
||||
- arch: x86_64
|
||||
- arch: aarch64
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Install common dependencies
|
||||
run: |
|
||||
apk add protobuf-dev curl clang mold grep npm bash
|
||||
curl --proto '=https' --tlsv1.3 -sSf https://raw.githubusercontent.com/rust-lang/rustup/refs/heads/master/rustup-init.sh | sh -s -- -y --default-toolchain 1.80.0
|
||||
echo "source $HOME/.cargo/env" >> saved_env
|
||||
echo "export CC=clang" >> saved_env
|
||||
echo "export RUSTFLAGS='-Ctarget-cpu=haswell -Ctarget-feature=-crt-static,+avx2,+fma,+f16c -Clinker=clang -Clink-arg=-fuse-ld=mold'" >> saved_env
|
||||
- name: Configure aarch64 build
|
||||
if: ${{ matrix.config.arch == 'aarch64' }}
|
||||
run: |
|
||||
source "$HOME/.cargo/env"
|
||||
rustup target add aarch64-unknown-linux-musl --toolchain 1.80.0
|
||||
crt=$(realpath $(dirname $(rustup which rustc))/../lib/rustlib/aarch64-unknown-linux-musl/lib/self-contained)
|
||||
sysroot_lib=/usr/aarch64-unknown-linux-musl/usr/lib
|
||||
apk_url=https://dl-cdn.alpinelinux.org/alpine/latest-stable/main/aarch64/
|
||||
curl -sSf $apk_url > apk_list
|
||||
for pkg in gcc libgcc musl; do curl -sSf $apk_url$(cat apk_list | grep -oP '(?<=")'$pkg'-\d.*?(?=")') | tar zxf -; done
|
||||
mkdir -p $sysroot_lib
|
||||
echo 'GROUP ( libgcc_s.so.1 -lgcc )' > $sysroot_lib/libgcc_s.so
|
||||
cp usr/lib/libgcc_s.so.1 $sysroot_lib
|
||||
cp usr/lib/gcc/aarch64-alpine-linux-musl/*/libgcc.a $sysroot_lib
|
||||
cp lib/ld-musl-aarch64.so.1 $sysroot_lib/libc.so
|
||||
echo '!<arch>' > $sysroot_lib/libdl.a
|
||||
(cd $crt && cp crti.o crtbeginS.o crtendS.o crtn.o -t $sysroot_lib)
|
||||
echo "export CARGO_BUILD_TARGET=aarch64-unknown-linux-musl" >> saved_env
|
||||
echo "export RUSTFLAGS='-Ctarget-cpu=apple-m1 -Ctarget-feature=-crt-static,+neon,+fp16,+fhm,+dotprod -Clinker=clang -Clink-arg=-fuse-ld=mold -Clink-arg=--target=aarch64-unknown-linux-musl -Clink-arg=--sysroot=/usr/aarch64-unknown-linux-musl -Clink-arg=-lc'" >> saved_env
|
||||
- name: Build Linux Artifacts
|
||||
run: |
|
||||
source ./saved_env
|
||||
bash ci/manylinux_node/build_vectordb.sh ${{ matrix.config.arch }} ${{ matrix.config.arch }}-unknown-linux-musl
|
||||
- name: Upload Linux Artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: node-native-linux-${{ matrix.config.arch }}-musl
|
||||
path: |
|
||||
node/dist/lancedb-vectordb-linux*.tgz
|
||||
|
||||
nodejs-linux-gnu:
|
||||
name: lancedb (${{ matrix.config.arch}}-unknown-linux-gnu
|
||||
runs-on: ${{ matrix.config.runner }}
|
||||
# Only runs on tags that matches the make-release action
|
||||
@@ -178,7 +230,7 @@ jobs:
|
||||
- name: Upload Linux Artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: nodejs-native-linux-${{ matrix.config.arch }}
|
||||
name: nodejs-native-linux-${{ matrix.config.arch }}-gnu
|
||||
path: |
|
||||
nodejs/dist/*.node
|
||||
# The generic files are the same in all distros so we just pick
|
||||
@@ -192,6 +244,62 @@ jobs:
|
||||
nodejs/dist/*
|
||||
!nodejs/dist/*.node
|
||||
|
||||
nodejs-linux-musl:
|
||||
name: lancedb (${{ matrix.config.arch}}-unknown-linux-musl
|
||||
runs-on: ubuntu-latest
|
||||
container: alpine:edge
|
||||
# Only runs on tags that matches the make-release action
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
config:
|
||||
- arch: x86_64
|
||||
- arch: aarch64
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Install common dependencies
|
||||
run: |
|
||||
apk add protobuf-dev curl clang mold grep npm bash openssl-dev openssl-libs-static
|
||||
curl --proto '=https' --tlsv1.3 -sSf https://raw.githubusercontent.com/rust-lang/rustup/refs/heads/master/rustup-init.sh | sh -s -- -y --default-toolchain 1.80.0
|
||||
echo "source $HOME/.cargo/env" >> saved_env
|
||||
echo "export CC=clang" >> saved_env
|
||||
echo "export RUSTFLAGS='-Ctarget-cpu=haswell -Ctarget-feature=-crt-static,+avx2,+fma,+f16c -Clinker=clang -Clink-arg=-fuse-ld=mold'" >> saved_env
|
||||
echo "export X86_64_UNKNOWN_LINUX_MUSL_OPENSSL_INCLUDE_DIR=/usr/include" >> saved_env
|
||||
echo "export X86_64_UNKNOWN_LINUX_MUSL_OPENSSL_LIB_DIR=/usr/lib" >> saved_env
|
||||
- name: Configure aarch64 build
|
||||
if: ${{ matrix.config.arch == 'aarch64' }}
|
||||
run: |
|
||||
source "$HOME/.cargo/env"
|
||||
rustup target add aarch64-unknown-linux-musl --toolchain 1.80.0
|
||||
crt=$(realpath $(dirname $(rustup which rustc))/../lib/rustlib/aarch64-unknown-linux-musl/lib/self-contained)
|
||||
sysroot_lib=/usr/aarch64-unknown-linux-musl/usr/lib
|
||||
apk_url=https://dl-cdn.alpinelinux.org/alpine/latest-stable/main/aarch64/
|
||||
curl -sSf $apk_url > apk_list
|
||||
for pkg in gcc libgcc musl openssl-dev openssl-libs-static; do curl -sSf $apk_url$(cat apk_list | grep -oP '(?<=")'$pkg'-\d.*?(?=")') | tar zxf -; done
|
||||
mkdir -p $sysroot_lib
|
||||
echo 'GROUP ( libgcc_s.so.1 -lgcc )' > $sysroot_lib/libgcc_s.so
|
||||
cp usr/lib/libgcc_s.so.1 $sysroot_lib
|
||||
cp usr/lib/gcc/aarch64-alpine-linux-musl/*/libgcc.a $sysroot_lib
|
||||
cp lib/ld-musl-aarch64.so.1 $sysroot_lib/libc.so
|
||||
echo '!<arch>' > $sysroot_lib/libdl.a
|
||||
(cd $crt && cp crti.o crtbeginS.o crtendS.o crtn.o -t $sysroot_lib)
|
||||
echo "export CARGO_BUILD_TARGET=aarch64-unknown-linux-musl" >> saved_env
|
||||
echo "export RUSTFLAGS='-Ctarget-feature=-crt-static,+neon,+fp16,+fhm,+dotprod -Clinker=clang -Clink-arg=-fuse-ld=mold -Clink-arg=--target=aarch64-unknown-linux-musl -Clink-arg=--sysroot=/usr/aarch64-unknown-linux-musl -Clink-arg=-lc'" >> saved_env
|
||||
echo "export AARCH64_UNKNOWN_LINUX_MUSL_OPENSSL_INCLUDE_DIR=$(realpath usr/include)" >> saved_env
|
||||
echo "export AARCH64_UNKNOWN_LINUX_MUSL_OPENSSL_LIB_DIR=$(realpath usr/lib)" >> saved_env
|
||||
- name: Build Linux Artifacts
|
||||
run: |
|
||||
source ./saved_env
|
||||
bash ci/manylinux_node/build_lancedb.sh ${{ matrix.config.arch }}
|
||||
- name: Upload Linux Artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: nodejs-native-linux-${{ matrix.config.arch }}-musl
|
||||
path: |
|
||||
nodejs/dist/*.node
|
||||
|
||||
node-windows:
|
||||
name: vectordb ${{ matrix.target }}
|
||||
runs-on: windows-2022
|
||||
@@ -226,109 +334,50 @@ jobs:
|
||||
path: |
|
||||
node/dist/lancedb-vectordb-win32*.tgz
|
||||
|
||||
# TODO: re-enable once working https://github.com/lancedb/lancedb/pull/1831
|
||||
# node-windows-arm64:
|
||||
# name: vectordb win32-arm64-msvc
|
||||
# runs-on: windows-4x-arm
|
||||
# if: startsWith(github.ref, 'refs/tags/v')
|
||||
# steps:
|
||||
# - uses: actions/checkout@v4
|
||||
# - name: Install Git
|
||||
# run: |
|
||||
# Invoke-WebRequest -Uri "https://github.com/git-for-windows/git/releases/download/v2.44.0.windows.1/Git-2.44.0-64-bit.exe" -OutFile "git-installer.exe"
|
||||
# Start-Process -FilePath "git-installer.exe" -ArgumentList "/VERYSILENT", "/NORESTART" -Wait
|
||||
# shell: powershell
|
||||
# - name: Add Git to PATH
|
||||
# run: |
|
||||
# Add-Content $env:GITHUB_PATH "C:\Program Files\Git\bin"
|
||||
# $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User")
|
||||
# shell: powershell
|
||||
# - name: Configure Git symlinks
|
||||
# run: git config --global core.symlinks true
|
||||
# - uses: actions/checkout@v4
|
||||
# - uses: actions/setup-python@v5
|
||||
# with:
|
||||
# python-version: "3.13"
|
||||
# - name: Install Visual Studio Build Tools
|
||||
# run: |
|
||||
# Invoke-WebRequest -Uri "https://aka.ms/vs/17/release/vs_buildtools.exe" -OutFile "vs_buildtools.exe"
|
||||
# Start-Process -FilePath "vs_buildtools.exe" -ArgumentList "--quiet", "--wait", "--norestart", "--nocache", `
|
||||
# "--installPath", "C:\BuildTools", `
|
||||
# "--add", "Microsoft.VisualStudio.Component.VC.Tools.ARM64", `
|
||||
# "--add", "Microsoft.VisualStudio.Component.VC.Tools.x86.x64", `
|
||||
# "--add", "Microsoft.VisualStudio.Component.Windows11SDK.22621", `
|
||||
# "--add", "Microsoft.VisualStudio.Component.VC.ATL", `
|
||||
# "--add", "Microsoft.VisualStudio.Component.VC.ATLMFC", `
|
||||
# "--add", "Microsoft.VisualStudio.Component.VC.Llvm.Clang" -Wait
|
||||
# shell: powershell
|
||||
# - name: Add Visual Studio Build Tools to PATH
|
||||
# run: |
|
||||
# $vsPath = "C:\BuildTools\VC\Tools\MSVC"
|
||||
# $latestVersion = (Get-ChildItem $vsPath | Sort-Object {[version]$_.Name} -Descending)[0].Name
|
||||
# Add-Content $env:GITHUB_PATH "C:\BuildTools\VC\Tools\MSVC\$latestVersion\bin\Hostx64\arm64"
|
||||
# Add-Content $env:GITHUB_PATH "C:\BuildTools\VC\Tools\MSVC\$latestVersion\bin\Hostx64\x64"
|
||||
# Add-Content $env:GITHUB_PATH "C:\Program Files (x86)\Windows Kits\10\bin\10.0.22621.0\arm64"
|
||||
# Add-Content $env:GITHUB_PATH "C:\Program Files (x86)\Windows Kits\10\bin\10.0.22621.0\x64"
|
||||
# Add-Content $env:GITHUB_PATH "C:\BuildTools\VC\Tools\Llvm\x64\bin"
|
||||
|
||||
# # Add MSVC runtime libraries to LIB
|
||||
# $env:LIB = "C:\BuildTools\VC\Tools\MSVC\$latestVersion\lib\arm64;" +
|
||||
# "C:\Program Files (x86)\Windows Kits\10\Lib\10.0.22621.0\um\arm64;" +
|
||||
# "C:\Program Files (x86)\Windows Kits\10\Lib\10.0.22621.0\ucrt\arm64"
|
||||
# Add-Content $env:GITHUB_ENV "LIB=$env:LIB"
|
||||
|
||||
# # Add INCLUDE paths
|
||||
# $env:INCLUDE = "C:\BuildTools\VC\Tools\MSVC\$latestVersion\include;" +
|
||||
# "C:\Program Files (x86)\Windows Kits\10\Include\10.0.22621.0\ucrt;" +
|
||||
# "C:\Program Files (x86)\Windows Kits\10\Include\10.0.22621.0\um;" +
|
||||
# "C:\Program Files (x86)\Windows Kits\10\Include\10.0.22621.0\shared"
|
||||
# Add-Content $env:GITHUB_ENV "INCLUDE=$env:INCLUDE"
|
||||
# shell: powershell
|
||||
# - name: Install Rust
|
||||
# run: |
|
||||
# Invoke-WebRequest https://win.rustup.rs/x86_64 -OutFile rustup-init.exe
|
||||
# .\rustup-init.exe -y --default-host aarch64-pc-windows-msvc
|
||||
# shell: powershell
|
||||
# - name: Add Rust to PATH
|
||||
# run: |
|
||||
# Add-Content $env:GITHUB_PATH "$env:USERPROFILE\.cargo\bin"
|
||||
# shell: powershell
|
||||
|
||||
# - uses: Swatinem/rust-cache@v2
|
||||
# with:
|
||||
# workspaces: rust
|
||||
# - name: Install 7-Zip ARM
|
||||
# run: |
|
||||
# New-Item -Path 'C:\7zip' -ItemType Directory
|
||||
# Invoke-WebRequest https://7-zip.org/a/7z2408-arm64.exe -OutFile C:\7zip\7z-installer.exe
|
||||
# Start-Process -FilePath C:\7zip\7z-installer.exe -ArgumentList '/S' -Wait
|
||||
# shell: powershell
|
||||
# - name: Add 7-Zip to PATH
|
||||
# run: Add-Content $env:GITHUB_PATH "C:\Program Files\7-Zip"
|
||||
# shell: powershell
|
||||
# - name: Install Protoc v21.12
|
||||
# working-directory: C:\
|
||||
# run: |
|
||||
# if (Test-Path 'C:\protoc') {
|
||||
# Write-Host "Protoc directory exists, skipping installation"
|
||||
# return
|
||||
# }
|
||||
# New-Item -Path 'C:\protoc' -ItemType Directory
|
||||
# Set-Location C:\protoc
|
||||
# Invoke-WebRequest https://github.com/protocolbuffers/protobuf/releases/download/v21.12/protoc-21.12-win64.zip -OutFile C:\protoc\protoc.zip
|
||||
# & 'C:\Program Files\7-Zip\7z.exe' x protoc.zip
|
||||
# shell: powershell
|
||||
# - name: Add Protoc to PATH
|
||||
# run: Add-Content $env:GITHUB_PATH "C:\protoc\bin"
|
||||
# shell: powershell
|
||||
# - name: Build Windows native node modules
|
||||
# run: .\ci\build_windows_artifacts.ps1 aarch64-pc-windows-msvc
|
||||
# - name: Upload Windows ARM64 Artifacts
|
||||
# uses: actions/upload-artifact@v4
|
||||
# with:
|
||||
# name: node-native-windows-arm64
|
||||
# path: |
|
||||
# node/dist/*.node
|
||||
node-windows-arm64:
|
||||
name: vectordb ${{ matrix.config.arch }}-pc-windows-msvc
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
runs-on: ubuntu-latest
|
||||
container: alpine:edge
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
config:
|
||||
# - arch: x86_64
|
||||
- arch: aarch64
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
apk add protobuf-dev curl clang lld llvm19 grep npm bash msitools sed
|
||||
curl --proto '=https' --tlsv1.3 -sSf https://raw.githubusercontent.com/rust-lang/rustup/refs/heads/master/rustup-init.sh | sh -s -- -y --default-toolchain 1.80.0
|
||||
echo "source $HOME/.cargo/env" >> saved_env
|
||||
echo "export CC=clang" >> saved_env
|
||||
echo "export AR=llvm-ar" >> saved_env
|
||||
source "$HOME/.cargo/env"
|
||||
rustup target add ${{ matrix.config.arch }}-pc-windows-msvc --toolchain 1.80.0
|
||||
(mkdir -p sysroot && cd sysroot && sh ../ci/sysroot-${{ matrix.config.arch }}-pc-windows-msvc.sh)
|
||||
echo "export C_INCLUDE_PATH=/usr/${{ matrix.config.arch }}-pc-windows-msvc/usr/include" >> saved_env
|
||||
echo "export CARGO_BUILD_TARGET=${{ matrix.config.arch }}-pc-windows-msvc" >> saved_env
|
||||
- name: Configure x86_64 build
|
||||
if: ${{ matrix.config.arch == 'x86_64' }}
|
||||
run: |
|
||||
echo "export RUSTFLAGS='-Ctarget-cpu=haswell -Ctarget-feature=+crt-static,+avx2,+fma,+f16c -Clinker=lld -Clink-arg=/LIBPATH:/usr/x86_64-pc-windows-msvc/usr/lib'" >> saved_env
|
||||
- name: Configure aarch64 build
|
||||
if: ${{ matrix.config.arch == 'aarch64' }}
|
||||
run: |
|
||||
echo "export RUSTFLAGS='-Ctarget-feature=+crt-static,+neon,+fp16,+fhm,+dotprod -Clinker=lld -Clink-arg=/LIBPATH:/usr/aarch64-pc-windows-msvc/usr/lib -Clink-arg=arm64rt.lib'" >> saved_env
|
||||
- name: Build Windows Artifacts
|
||||
run: |
|
||||
source ./saved_env
|
||||
bash ci/manylinux_node/build_vectordb.sh ${{ matrix.config.arch }} ${{ matrix.config.arch }}-pc-windows-msvc
|
||||
- name: Upload Windows Artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: node-native-windows-${{ matrix.config.arch }}
|
||||
path: |
|
||||
node/dist/lancedb-vectordb-win32*.tgz
|
||||
|
||||
nodejs-windows:
|
||||
name: lancedb ${{ matrix.target }}
|
||||
@@ -364,103 +413,57 @@ jobs:
|
||||
path: |
|
||||
nodejs/dist/*.node
|
||||
|
||||
# TODO: re-enable once working https://github.com/lancedb/lancedb/pull/1831
|
||||
# nodejs-windows-arm64:
|
||||
# name: lancedb win32-arm64-msvc
|
||||
# runs-on: windows-4x-arm
|
||||
# if: startsWith(github.ref, 'refs/tags/v')
|
||||
# steps:
|
||||
# - uses: actions/checkout@v4
|
||||
# - name: Install Git
|
||||
# run: |
|
||||
# Invoke-WebRequest -Uri "https://github.com/git-for-windows/git/releases/download/v2.44.0.windows.1/Git-2.44.0-64-bit.exe" -OutFile "git-installer.exe"
|
||||
# Start-Process -FilePath "git-installer.exe" -ArgumentList "/VERYSILENT", "/NORESTART" -Wait
|
||||
# shell: powershell
|
||||
# - name: Add Git to PATH
|
||||
# run: |
|
||||
# Add-Content $env:GITHUB_PATH "C:\Program Files\Git\bin"
|
||||
# $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User")
|
||||
# shell: powershell
|
||||
# - name: Configure Git symlinks
|
||||
# run: git config --global core.symlinks true
|
||||
# - uses: actions/checkout@v4
|
||||
# - uses: actions/setup-python@v5
|
||||
# with:
|
||||
# python-version: "3.13"
|
||||
# - name: Install Visual Studio Build Tools
|
||||
# run: |
|
||||
# Invoke-WebRequest -Uri "https://aka.ms/vs/17/release/vs_buildtools.exe" -OutFile "vs_buildtools.exe"
|
||||
# Start-Process -FilePath "vs_buildtools.exe" -ArgumentList "--quiet", "--wait", "--norestart", "--nocache", `
|
||||
# "--installPath", "C:\BuildTools", `
|
||||
# "--add", "Microsoft.VisualStudio.Component.VC.Tools.ARM64", `
|
||||
# "--add", "Microsoft.VisualStudio.Component.VC.Tools.x86.x64", `
|
||||
# "--add", "Microsoft.VisualStudio.Component.Windows11SDK.22621", `
|
||||
# "--add", "Microsoft.VisualStudio.Component.VC.ATL", `
|
||||
# "--add", "Microsoft.VisualStudio.Component.VC.ATLMFC", `
|
||||
# "--add", "Microsoft.VisualStudio.Component.VC.Llvm.Clang" -Wait
|
||||
# shell: powershell
|
||||
# - name: Add Visual Studio Build Tools to PATH
|
||||
# run: |
|
||||
# $vsPath = "C:\BuildTools\VC\Tools\MSVC"
|
||||
# $latestVersion = (Get-ChildItem $vsPath | Sort-Object {[version]$_.Name} -Descending)[0].Name
|
||||
# Add-Content $env:GITHUB_PATH "C:\BuildTools\VC\Tools\MSVC\$latestVersion\bin\Hostx64\arm64"
|
||||
# Add-Content $env:GITHUB_PATH "C:\BuildTools\VC\Tools\MSVC\$latestVersion\bin\Hostx64\x64"
|
||||
# Add-Content $env:GITHUB_PATH "C:\Program Files (x86)\Windows Kits\10\bin\10.0.22621.0\arm64"
|
||||
# Add-Content $env:GITHUB_PATH "C:\Program Files (x86)\Windows Kits\10\bin\10.0.22621.0\x64"
|
||||
# Add-Content $env:GITHUB_PATH "C:\BuildTools\VC\Tools\Llvm\x64\bin"
|
||||
|
||||
# $env:LIB = ""
|
||||
# Add-Content $env:GITHUB_ENV "LIB=C:\Program Files (x86)\Windows Kits\10\Lib\10.0.22621.0\um\arm64;C:\Program Files (x86)\Windows Kits\10\Lib\10.0.22621.0\ucrt\arm64"
|
||||
# shell: powershell
|
||||
# - name: Install Rust
|
||||
# run: |
|
||||
# Invoke-WebRequest https://win.rustup.rs/x86_64 -OutFile rustup-init.exe
|
||||
# .\rustup-init.exe -y --default-host aarch64-pc-windows-msvc
|
||||
# shell: powershell
|
||||
# - name: Add Rust to PATH
|
||||
# run: |
|
||||
# Add-Content $env:GITHUB_PATH "$env:USERPROFILE\.cargo\bin"
|
||||
# shell: powershell
|
||||
|
||||
# - uses: Swatinem/rust-cache@v2
|
||||
# with:
|
||||
# workspaces: rust
|
||||
# - name: Install 7-Zip ARM
|
||||
# run: |
|
||||
# New-Item -Path 'C:\7zip' -ItemType Directory
|
||||
# Invoke-WebRequest https://7-zip.org/a/7z2408-arm64.exe -OutFile C:\7zip\7z-installer.exe
|
||||
# Start-Process -FilePath C:\7zip\7z-installer.exe -ArgumentList '/S' -Wait
|
||||
# shell: powershell
|
||||
# - name: Add 7-Zip to PATH
|
||||
# run: Add-Content $env:GITHUB_PATH "C:\Program Files\7-Zip"
|
||||
# shell: powershell
|
||||
# - name: Install Protoc v21.12
|
||||
# working-directory: C:\
|
||||
# run: |
|
||||
# if (Test-Path 'C:\protoc') {
|
||||
# Write-Host "Protoc directory exists, skipping installation"
|
||||
# return
|
||||
# }
|
||||
# New-Item -Path 'C:\protoc' -ItemType Directory
|
||||
# Set-Location C:\protoc
|
||||
# Invoke-WebRequest https://github.com/protocolbuffers/protobuf/releases/download/v21.12/protoc-21.12-win64.zip -OutFile C:\protoc\protoc.zip
|
||||
# & 'C:\Program Files\7-Zip\7z.exe' x protoc.zip
|
||||
# shell: powershell
|
||||
# - name: Add Protoc to PATH
|
||||
# run: Add-Content $env:GITHUB_PATH "C:\protoc\bin"
|
||||
# shell: powershell
|
||||
# - name: Build Windows native node modules
|
||||
# run: .\ci\build_windows_artifacts_nodejs.ps1 aarch64-pc-windows-msvc
|
||||
# - name: Upload Windows ARM64 Artifacts
|
||||
# uses: actions/upload-artifact@v4
|
||||
# with:
|
||||
# name: nodejs-native-windows-arm64
|
||||
# path: |
|
||||
# nodejs/dist/*.node
|
||||
nodejs-windows-arm64:
|
||||
name: lancedb ${{ matrix.config.arch }}-pc-windows-msvc
|
||||
# Only runs on tags that matches the make-release action
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
runs-on: ubuntu-latest
|
||||
container: alpine:edge
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
config:
|
||||
# - arch: x86_64
|
||||
- arch: aarch64
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
apk add protobuf-dev curl clang lld llvm19 grep npm bash msitools sed
|
||||
curl --proto '=https' --tlsv1.3 -sSf https://raw.githubusercontent.com/rust-lang/rustup/refs/heads/master/rustup-init.sh | sh -s -- -y --default-toolchain 1.80.0
|
||||
echo "source $HOME/.cargo/env" >> saved_env
|
||||
echo "export CC=clang" >> saved_env
|
||||
echo "export AR=llvm-ar" >> saved_env
|
||||
source "$HOME/.cargo/env"
|
||||
rustup target add ${{ matrix.config.arch }}-pc-windows-msvc --toolchain 1.80.0
|
||||
(mkdir -p sysroot && cd sysroot && sh ../ci/sysroot-${{ matrix.config.arch }}-pc-windows-msvc.sh)
|
||||
echo "export C_INCLUDE_PATH=/usr/${{ matrix.config.arch }}-pc-windows-msvc/usr/include" >> saved_env
|
||||
echo "export CARGO_BUILD_TARGET=${{ matrix.config.arch }}-pc-windows-msvc" >> saved_env
|
||||
printf '#!/bin/sh\ncargo "$@"' > $HOME/.cargo/bin/cargo-xwin
|
||||
chmod u+x $HOME/.cargo/bin/cargo-xwin
|
||||
- name: Configure x86_64 build
|
||||
if: ${{ matrix.config.arch == 'x86_64' }}
|
||||
run: |
|
||||
echo "export RUSTFLAGS='-Ctarget-cpu=haswell -Ctarget-feature=+crt-static,+avx2,+fma,+f16c -Clinker=lld -Clink-arg=/LIBPATH:/usr/x86_64-pc-windows-msvc/usr/lib'" >> saved_env
|
||||
- name: Configure aarch64 build
|
||||
if: ${{ matrix.config.arch == 'aarch64' }}
|
||||
run: |
|
||||
echo "export RUSTFLAGS='-Ctarget-feature=+crt-static,+neon,+fp16,+fhm,+dotprod -Clinker=lld -Clink-arg=/LIBPATH:/usr/aarch64-pc-windows-msvc/usr/lib -Clink-arg=arm64rt.lib'" >> saved_env
|
||||
- name: Build Windows Artifacts
|
||||
run: |
|
||||
source ./saved_env
|
||||
bash ci/manylinux_node/build_lancedb.sh ${{ matrix.config.arch }}
|
||||
- name: Upload Windows Artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: nodejs-native-windows-${{ matrix.config.arch }}
|
||||
path: |
|
||||
nodejs/dist/*.node
|
||||
|
||||
release:
|
||||
name: vectordb NPM Publish
|
||||
needs: [node, node-macos, node-linux, node-windows, node-windows-arm64]
|
||||
needs: [node, node-macos, node-linux-gnu, node-linux-musl, node-windows, node-windows-arm64]
|
||||
runs-on: ubuntu-latest
|
||||
# Only runs on tags that matches the make-release action
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
@@ -500,7 +503,7 @@ jobs:
|
||||
|
||||
release-nodejs:
|
||||
name: lancedb NPM Publish
|
||||
needs: [nodejs-macos, nodejs-linux, nodejs-windows, nodejs-windows-arm64]
|
||||
needs: [nodejs-macos, nodejs-linux-gnu, nodejs-linux-musl, nodejs-windows, nodejs-windows-arm64]
|
||||
runs-on: ubuntu-latest
|
||||
# Only runs on tags that matches the make-release action
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
@@ -558,6 +561,7 @@ jobs:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.ACTION_MONITORING_SLACK }}
|
||||
|
||||
update-package-lock:
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
needs: [release]
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
@@ -575,6 +579,7 @@ jobs:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
update-package-lock-nodejs:
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
needs: [release-nodejs]
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
@@ -592,6 +597,7 @@ jobs:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
gh-release:
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
2
.github/workflows/pypi-publish.yml
vendored
2
.github/workflows/pypi-publish.yml
vendored
@@ -83,7 +83,7 @@ jobs:
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: 3.8
|
||||
python-version: 3.12
|
||||
- uses: ./.github/workflows/build_windows_wheel
|
||||
with:
|
||||
python-minor-version: 8
|
||||
|
||||
1
.github/workflows/upload_wheel/action.yml
vendored
1
.github/workflows/upload_wheel/action.yml
vendored
@@ -17,6 +17,7 @@ runs:
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install twine
|
||||
python3 -m pip install --upgrade pkginfo
|
||||
- name: Choose repo
|
||||
shell: bash
|
||||
id: choose_repo
|
||||
|
||||
39
Cargo.toml
39
Cargo.toml
@@ -18,31 +18,32 @@ repository = "https://github.com/lancedb/lancedb"
|
||||
description = "Serverless, low-latency vector database for AI applications"
|
||||
keywords = ["lancedb", "lance", "database", "vector", "search"]
|
||||
categories = ["database-implementations"]
|
||||
rust-version = "1.80.0" # TODO: lower this once we upgrade Lance again.
|
||||
rust-version = "1.80.0" # TODO: lower this once we upgrade Lance again.
|
||||
|
||||
[workspace.dependencies]
|
||||
lance = { "version" = "=0.19.2", "features" = [
|
||||
lance = { "version" = "=0.20.0", "features" = [
|
||||
"dynamodb",
|
||||
]}
|
||||
lance-index = "=0.19.2"
|
||||
lance-linalg = "=0.19.2"
|
||||
lance-table = "=0.19.2"
|
||||
lance-testing = "=0.19.2"
|
||||
lance-datafusion = "=0.19.2"
|
||||
lance-encoding = "=0.19.2"
|
||||
] }
|
||||
lance-io = "0.20.0"
|
||||
lance-index = "0.20.0"
|
||||
lance-linalg = "0.20.0"
|
||||
lance-table = "0.20.0"
|
||||
lance-testing = "0.20.0"
|
||||
lance-datafusion = "0.20.0"
|
||||
lance-encoding = "0.20.0"
|
||||
# Note that this one does not include pyarrow
|
||||
arrow = { version = "52.2", optional = false }
|
||||
arrow-array = "52.2"
|
||||
arrow-data = "52.2"
|
||||
arrow-ipc = "52.2"
|
||||
arrow-ord = "52.2"
|
||||
arrow-schema = "52.2"
|
||||
arrow-arith = "52.2"
|
||||
arrow-cast = "52.2"
|
||||
arrow = { version = "53.2", optional = false }
|
||||
arrow-array = "53.2"
|
||||
arrow-data = "53.2"
|
||||
arrow-ipc = "53.2"
|
||||
arrow-ord = "53.2"
|
||||
arrow-schema = "53.2"
|
||||
arrow-arith = "53.2"
|
||||
arrow-cast = "53.2"
|
||||
async-trait = "0"
|
||||
chrono = "0.4.35"
|
||||
datafusion-common = "41.0"
|
||||
datafusion-physical-plan = "41.0"
|
||||
datafusion-common = "42.0"
|
||||
datafusion-physical-plan = "42.0"
|
||||
env_logger = "0.10"
|
||||
half = { "version" = "=2.4.1", default-features = false, features = [
|
||||
"num-traits",
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
ARCH=${1:-x86_64}
|
||||
TARGET_TRIPLE=${2:-x86_64-unknown-linux-gnu}
|
||||
|
||||
# We pass down the current user so that when we later mount the local files
|
||||
# We pass down the current user so that when we later mount the local files
|
||||
# into the container, the files are accessible by the current user.
|
||||
pushd ci/manylinux_node
|
||||
docker build \
|
||||
@@ -18,4 +19,4 @@ docker run \
|
||||
-v $(pwd):/io -w /io \
|
||||
--memory-swap=-1 \
|
||||
lancedb-node-manylinux \
|
||||
bash ci/manylinux_node/build_vectordb.sh $ARCH
|
||||
bash ci/manylinux_node/build_vectordb.sh $ARCH $TARGET_TRIPLE
|
||||
|
||||
@@ -11,7 +11,8 @@ fi
|
||||
export OPENSSL_STATIC=1
|
||||
export OPENSSL_INCLUDE_DIR=/usr/local/include/openssl
|
||||
|
||||
source $HOME/.bashrc
|
||||
#Alpine doesn't have .bashrc
|
||||
FILE=$HOME/.bashrc && test -f $FILE && source $FILE
|
||||
|
||||
cd nodejs
|
||||
npm ci
|
||||
|
||||
@@ -2,18 +2,20 @@
|
||||
# Builds the node module for manylinux. Invoked by ci/build_linux_artifacts.sh.
|
||||
set -e
|
||||
ARCH=${1:-x86_64}
|
||||
TARGET_TRIPLE=${2:-x86_64-unknown-linux-gnu}
|
||||
|
||||
if [ "$ARCH" = "x86_64" ]; then
|
||||
export OPENSSL_LIB_DIR=/usr/local/lib64/
|
||||
else
|
||||
else
|
||||
export OPENSSL_LIB_DIR=/usr/local/lib/
|
||||
fi
|
||||
export OPENSSL_STATIC=1
|
||||
export OPENSSL_INCLUDE_DIR=/usr/local/include/openssl
|
||||
|
||||
source $HOME/.bashrc
|
||||
#Alpine doesn't have .bashrc
|
||||
FILE=$HOME/.bashrc && test -f $FILE && source $FILE
|
||||
|
||||
cd node
|
||||
npm ci
|
||||
npm run build-release
|
||||
npm run pack-build
|
||||
npm run pack-build -- -t $TARGET_TRIPLE
|
||||
|
||||
105
ci/sysroot-aarch64-pc-windows-msvc.sh
Normal file
105
ci/sysroot-aarch64-pc-windows-msvc.sh
Normal file
@@ -0,0 +1,105 @@
|
||||
#!/bin/sh
|
||||
|
||||
# https://github.com/mstorsjo/msvc-wine/blob/master/vsdownload.py
|
||||
# https://github.com/mozilla/gecko-dev/blob/6027d1d91f2d3204a3992633b3ef730ff005fc64/build/vs/vs2022-car.yaml
|
||||
|
||||
# function dl() {
|
||||
# curl -O https://download.visualstudio.microsoft.com/download/pr/$1
|
||||
# }
|
||||
|
||||
# [[.h]]
|
||||
|
||||
# "id": "Win11SDK_10.0.26100"
|
||||
# "version": "10.0.26100.7"
|
||||
|
||||
# libucrt.lib
|
||||
|
||||
# example: <assert.h>
|
||||
# dir: ucrt/
|
||||
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/2ee3a5fc6e9fc832af7295b138e93839/universal%20crt%20headers%20libraries%20and%20sources-x86_en-us.msi
|
||||
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/b1aa09b90fe314aceb090f6ec7626624/16ab2ea2187acffa6435e334796c8c89.cab
|
||||
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/400609bb0ff5804e36dbe6dcd42a7f01/6ee7bbee8435130a869cf971694fd9e2.cab
|
||||
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/2ac327317abb865a0e3f56b2faefa918/78fa3c824c2c48bd4a49ab5969adaaf7.cab
|
||||
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/f034bc0b2680f67dccd4bfeea3d0f932/7afc7b670accd8e3cc94cfffd516f5cb.cab
|
||||
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/7ed5e12f9d50f80825a8b27838cf4c7f/96076045170fe5db6d5dcf14b6f6688e.cab
|
||||
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/764edc185a696bda9e07df8891dddbbb/a1e2a83aa8a71c48c742eeaff6e71928.cab
|
||||
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/66854bedc6dbd5ccb5dd82c8e2412231/b2f03f34ff83ec013b9e45c7cd8e8a73.cab
|
||||
|
||||
# example: <windows.h>
|
||||
# dir: um/
|
||||
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/b286efac4d83a54fc49190bddef1edc9/windows%20sdk%20for%20windows%20store%20apps%20headers-x86_en-us.msi
|
||||
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/e0dc3811d92ab96fcb72bf63d6c08d71/766c0ffd568bbb31bf7fb6793383e24a.cab
|
||||
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/613503da4b5628768497822826aed39f/8125ee239710f33ea485965f76fae646.cab
|
||||
|
||||
# example: <winapifamily.h>
|
||||
# dir: /shared
|
||||
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/122979f0348d3a2a36b6aa1a111d5d0c/windows%20sdk%20for%20windows%20store%20apps%20headers%20onecoreuap-x86_en-us.msi
|
||||
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/766e04beecdfccff39e91dd9eb32834a/e89e3dcbb016928c7e426238337d69eb.cab
|
||||
|
||||
|
||||
# "id": "Microsoft.VisualC.14.16.CRT.Headers"
|
||||
# "version": "14.16.27045"
|
||||
|
||||
# example: <vcruntime.h>
|
||||
# dir: MSVC/
|
||||
curl -O https://download.visualstudio.microsoft.com/download/pr/bac0afd7-cc9e-4182-8a83-9898fa20e092/87bbe41e09a2f83711e72696f49681429327eb7a4b90618c35667a6ba2e2880e/Microsoft.VisualC.14.16.CRT.Headers.vsix
|
||||
|
||||
# [[.lib]]
|
||||
|
||||
# advapi32.lib bcrypt.lib kernel32.lib ntdll.lib user32.lib uuid.lib ws2_32.lib userenv.lib cfgmgr32.lib runtimeobject.lib
|
||||
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/944c4153b849a1f7d0c0404a4f1c05ea/windows%20sdk%20for%20windows%20store%20apps%20libs-x86_en-us.msi
|
||||
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/5306aed3e1a38d1e8bef5934edeb2a9b/05047a45609f311645eebcac2739fc4c.cab
|
||||
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/13c8a73a0f5a6474040b26d016a26fab/13d68b8a7b6678a368e2d13ff4027521.cab
|
||||
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/149578fb3b621cdb61ee1813b9b3e791/463ad1b0783ebda908fd6c16a4abfe93.cab
|
||||
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/5c986c4f393c6b09d5aec3b539e9fb4a/5a22e5cde814b041749fb271547f4dd5.cab
|
||||
|
||||
# fwpuclnt.lib arm64rt.lib
|
||||
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/7a332420d812f7c1d41da865ae5a7c52/windows%20sdk%20desktop%20libs%20arm64-x86_en-us.msi
|
||||
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/19de98ed4a79938d0045d19c047936b3/3e2f7be479e3679d700ce0782e4cc318.cab
|
||||
|
||||
# libcmt.lib libvcruntime.lib
|
||||
curl -O https://download.visualstudio.microsoft.com/download/pr/bac0afd7-cc9e-4182-8a83-9898fa20e092/227f40682a88dc5fa0ccb9cadc9ad30af99ad1f1a75db63407587d079f60d035/Microsoft.VisualC.14.16.CRT.ARM64.Desktop.vsix
|
||||
|
||||
|
||||
msiextract universal%20crt%20headers%20libraries%20and%20sources-x86_en-us.msi
|
||||
msiextract windows%20sdk%20for%20windows%20store%20apps%20headers-x86_en-us.msi
|
||||
msiextract windows%20sdk%20for%20windows%20store%20apps%20headers%20onecoreuap-x86_en-us.msi
|
||||
msiextract windows%20sdk%20for%20windows%20store%20apps%20libs-x86_en-us.msi
|
||||
msiextract windows%20sdk%20desktop%20libs%20arm64-x86_en-us.msi
|
||||
unzip -o Microsoft.VisualC.14.16.CRT.Headers.vsix
|
||||
unzip -o Microsoft.VisualC.14.16.CRT.ARM64.Desktop.vsix
|
||||
|
||||
mkdir -p /usr/aarch64-pc-windows-msvc/usr/include
|
||||
mkdir -p /usr/aarch64-pc-windows-msvc/usr/lib
|
||||
|
||||
# lowercase folder/file names
|
||||
echo "$(find . -regex ".*/[^/]*[A-Z][^/]*")" | xargs -I{} sh -c 'mv "$(echo "{}" | sed -E '"'"'s/(.*\/)/\L\1/'"'"')" "$(echo "{}" | tr [A-Z] [a-z])"'
|
||||
|
||||
# .h
|
||||
(cd 'program files/windows kits/10/include/10.0.26100.0' && cp -r ucrt/* um/* shared/* -t /usr/aarch64-pc-windows-msvc/usr/include)
|
||||
|
||||
cp -r contents/vc/tools/msvc/14.16.27023/include/* /usr/aarch64-pc-windows-msvc/usr/include
|
||||
|
||||
# lowercase #include "" and #include <>
|
||||
find /usr/aarch64-pc-windows-msvc/usr/include -type f -exec sed -i -E 's/(#include <[^<>]*?[A-Z][^<>]*?>)|(#include "[^"]*?[A-Z][^"]*?")/\L\1\2/' "{}" ';'
|
||||
|
||||
# ARM intrinsics
|
||||
# original dir: MSVC/
|
||||
|
||||
# '__n128x4' redefined in arm_neon.h
|
||||
# "arm64_neon.h" included from intrin.h
|
||||
|
||||
(cd /usr/lib/llvm19/lib/clang/19/include && cp arm_neon.h intrin.h -t /usr/aarch64-pc-windows-msvc/usr/include)
|
||||
|
||||
# .lib
|
||||
|
||||
# _Interlocked intrinsics
|
||||
# must always link with arm64rt.lib
|
||||
# reason: https://developercommunity.visualstudio.com/t/libucrtlibstreamobj-error-lnk2001-unresolved-exter/1544787#T-ND1599818
|
||||
# I don't understand the 'correct' fix for this, arm64rt.lib is supposed to be the workaround
|
||||
|
||||
(cd 'program files/windows kits/10/lib/10.0.26100.0/um/arm64' && cp advapi32.lib bcrypt.lib kernel32.lib ntdll.lib user32.lib uuid.lib ws2_32.lib userenv.lib cfgmgr32.lib runtimeobject.lib fwpuclnt.lib arm64rt.lib -t /usr/aarch64-pc-windows-msvc/usr/lib)
|
||||
|
||||
(cd 'contents/vc/tools/msvc/14.16.27023/lib/arm64' && cp libcmt.lib libvcruntime.lib -t /usr/aarch64-pc-windows-msvc/usr/lib)
|
||||
|
||||
cp 'program files/windows kits/10/lib/10.0.26100.0/ucrt/arm64/libucrt.lib' /usr/aarch64-pc-windows-msvc/usr/lib
|
||||
105
ci/sysroot-x86_64-pc-windows-msvc.sh
Normal file
105
ci/sysroot-x86_64-pc-windows-msvc.sh
Normal file
@@ -0,0 +1,105 @@
|
||||
#!/bin/sh
|
||||
|
||||
# https://github.com/mstorsjo/msvc-wine/blob/master/vsdownload.py
|
||||
# https://github.com/mozilla/gecko-dev/blob/6027d1d91f2d3204a3992633b3ef730ff005fc64/build/vs/vs2022-car.yaml
|
||||
|
||||
# function dl() {
|
||||
# curl -O https://download.visualstudio.microsoft.com/download/pr/$1
|
||||
# }
|
||||
|
||||
# [[.h]]
|
||||
|
||||
# "id": "Win11SDK_10.0.26100"
|
||||
# "version": "10.0.26100.7"
|
||||
|
||||
# libucrt.lib
|
||||
|
||||
# example: <assert.h>
|
||||
# dir: ucrt/
|
||||
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/2ee3a5fc6e9fc832af7295b138e93839/universal%20crt%20headers%20libraries%20and%20sources-x86_en-us.msi
|
||||
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/b1aa09b90fe314aceb090f6ec7626624/16ab2ea2187acffa6435e334796c8c89.cab
|
||||
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/400609bb0ff5804e36dbe6dcd42a7f01/6ee7bbee8435130a869cf971694fd9e2.cab
|
||||
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/2ac327317abb865a0e3f56b2faefa918/78fa3c824c2c48bd4a49ab5969adaaf7.cab
|
||||
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/f034bc0b2680f67dccd4bfeea3d0f932/7afc7b670accd8e3cc94cfffd516f5cb.cab
|
||||
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/7ed5e12f9d50f80825a8b27838cf4c7f/96076045170fe5db6d5dcf14b6f6688e.cab
|
||||
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/764edc185a696bda9e07df8891dddbbb/a1e2a83aa8a71c48c742eeaff6e71928.cab
|
||||
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/66854bedc6dbd5ccb5dd82c8e2412231/b2f03f34ff83ec013b9e45c7cd8e8a73.cab
|
||||
|
||||
# example: <windows.h>
|
||||
# dir: um/
|
||||
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/b286efac4d83a54fc49190bddef1edc9/windows%20sdk%20for%20windows%20store%20apps%20headers-x86_en-us.msi
|
||||
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/e0dc3811d92ab96fcb72bf63d6c08d71/766c0ffd568bbb31bf7fb6793383e24a.cab
|
||||
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/613503da4b5628768497822826aed39f/8125ee239710f33ea485965f76fae646.cab
|
||||
|
||||
# example: <winapifamily.h>
|
||||
# dir: /shared
|
||||
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/122979f0348d3a2a36b6aa1a111d5d0c/windows%20sdk%20for%20windows%20store%20apps%20headers%20onecoreuap-x86_en-us.msi
|
||||
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/766e04beecdfccff39e91dd9eb32834a/e89e3dcbb016928c7e426238337d69eb.cab
|
||||
|
||||
|
||||
# "id": "Microsoft.VisualC.14.16.CRT.Headers"
|
||||
# "version": "14.16.27045"
|
||||
|
||||
# example: <vcruntime.h>
|
||||
# dir: MSVC/
|
||||
curl -O https://download.visualstudio.microsoft.com/download/pr/bac0afd7-cc9e-4182-8a83-9898fa20e092/87bbe41e09a2f83711e72696f49681429327eb7a4b90618c35667a6ba2e2880e/Microsoft.VisualC.14.16.CRT.Headers.vsix
|
||||
|
||||
# [[.lib]]
|
||||
|
||||
# advapi32.lib bcrypt.lib kernel32.lib ntdll.lib user32.lib uuid.lib ws2_32.lib userenv.lib cfgmgr32.lib
|
||||
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/944c4153b849a1f7d0c0404a4f1c05ea/windows%20sdk%20for%20windows%20store%20apps%20libs-x86_en-us.msi
|
||||
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/5306aed3e1a38d1e8bef5934edeb2a9b/05047a45609f311645eebcac2739fc4c.cab
|
||||
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/13c8a73a0f5a6474040b26d016a26fab/13d68b8a7b6678a368e2d13ff4027521.cab
|
||||
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/149578fb3b621cdb61ee1813b9b3e791/463ad1b0783ebda908fd6c16a4abfe93.cab
|
||||
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/5c986c4f393c6b09d5aec3b539e9fb4a/5a22e5cde814b041749fb271547f4dd5.cab
|
||||
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/bfc3904a0195453419ae4dfea7abd6fb/e10768bb6e9d0ea730280336b697da66.cab
|
||||
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/637f9f3be880c71f9e3ca07b4d67345c/f9b24c8280986c0683fbceca5326d806.cab
|
||||
|
||||
# dbghelp.lib fwpuclnt.lib
|
||||
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/9f51690d5aa804b1340ce12d1ec80f89/windows%20sdk%20desktop%20libs%20x64-x86_en-us.msi
|
||||
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/d3a7df4ca3303a698640a29e558a5e5b/58314d0646d7e1a25e97c902166c3155.cab
|
||||
|
||||
# libcmt.lib libvcruntime.lib
|
||||
curl -O https://download.visualstudio.microsoft.com/download/pr/bac0afd7-cc9e-4182-8a83-9898fa20e092/8728f21ae09940f1f4b4ee47b4a596be2509e2a47d2f0c83bbec0ea37d69644b/Microsoft.VisualC.14.16.CRT.x64.Desktop.vsix
|
||||
|
||||
|
||||
msiextract universal%20crt%20headers%20libraries%20and%20sources-x86_en-us.msi
|
||||
msiextract windows%20sdk%20for%20windows%20store%20apps%20headers-x86_en-us.msi
|
||||
msiextract windows%20sdk%20for%20windows%20store%20apps%20headers%20onecoreuap-x86_en-us.msi
|
||||
msiextract windows%20sdk%20for%20windows%20store%20apps%20libs-x86_en-us.msi
|
||||
msiextract windows%20sdk%20desktop%20libs%20x64-x86_en-us.msi
|
||||
unzip -o Microsoft.VisualC.14.16.CRT.Headers.vsix
|
||||
unzip -o Microsoft.VisualC.14.16.CRT.x64.Desktop.vsix
|
||||
|
||||
mkdir -p /usr/x86_64-pc-windows-msvc/usr/include
|
||||
mkdir -p /usr/x86_64-pc-windows-msvc/usr/lib
|
||||
|
||||
# lowercase folder/file names
|
||||
echo "$(find . -regex ".*/[^/]*[A-Z][^/]*")" | xargs -I{} sh -c 'mv "$(echo "{}" | sed -E '"'"'s/(.*\/)/\L\1/'"'"')" "$(echo "{}" | tr [A-Z] [a-z])"'
|
||||
|
||||
# .h
|
||||
(cd 'program files/windows kits/10/include/10.0.26100.0' && cp -r ucrt/* um/* shared/* -t /usr/x86_64-pc-windows-msvc/usr/include)
|
||||
|
||||
cp -r contents/vc/tools/msvc/14.16.27023/include/* /usr/x86_64-pc-windows-msvc/usr/include
|
||||
|
||||
# lowercase #include "" and #include <>
|
||||
find /usr/x86_64-pc-windows-msvc/usr/include -type f -exec sed -i -E 's/(#include <[^<>]*?[A-Z][^<>]*?>)|(#include "[^"]*?[A-Z][^"]*?")/\L\1\2/' "{}" ';'
|
||||
|
||||
# x86 intrinsics
|
||||
# original dir: MSVC/
|
||||
|
||||
# '_mm_movemask_epi8' defined in emmintrin.h
|
||||
# '__v4sf' defined in xmmintrin.h
|
||||
# '__v2si' defined in mmintrin.h
|
||||
# '__m128d' redefined in immintrin.h
|
||||
# '__m128i' redefined in intrin.h
|
||||
# '_mm_comlt_epu8' defined in ammintrin.h
|
||||
|
||||
(cd /usr/lib/llvm19/lib/clang/19/include && cp emmintrin.h xmmintrin.h mmintrin.h immintrin.h intrin.h ammintrin.h -t /usr/x86_64-pc-windows-msvc/usr/include)
|
||||
|
||||
# .lib
|
||||
(cd 'program files/windows kits/10/lib/10.0.26100.0/um/x64' && cp advapi32.lib bcrypt.lib kernel32.lib ntdll.lib user32.lib uuid.lib ws2_32.lib userenv.lib cfgmgr32.lib dbghelp.lib fwpuclnt.lib -t /usr/x86_64-pc-windows-msvc/usr/lib)
|
||||
|
||||
(cd 'contents/vc/tools/msvc/14.16.27023/lib/x64' && cp libcmt.lib libvcruntime.lib -t /usr/x86_64-pc-windows-msvc/usr/lib)
|
||||
|
||||
cp 'program files/windows kits/10/lib/10.0.26100.0/ucrt/x64/libucrt.lib' /usr/x86_64-pc-windows-msvc/usr/lib
|
||||
@@ -55,6 +55,9 @@ plugins:
|
||||
show_signature_annotations: true
|
||||
show_root_heading: true
|
||||
members_order: source
|
||||
docstring_section_style: list
|
||||
signature_crossrefs: true
|
||||
separate_signature: true
|
||||
import:
|
||||
# for cross references
|
||||
- https://arrow.apache.org/docs/objects.inv
|
||||
@@ -138,6 +141,7 @@ nav:
|
||||
- Jina Reranker: reranking/jina.md
|
||||
- OpenAI Reranker: reranking/openai.md
|
||||
- AnswerDotAi Rerankers: reranking/answerdotai.md
|
||||
- Voyage AI Rerankers: reranking/voyageai.md
|
||||
- Building Custom Rerankers: reranking/custom_reranker.md
|
||||
- Example: notebooks/lancedb_reranking.ipynb
|
||||
- Filtering: sql.md
|
||||
@@ -165,6 +169,7 @@ nav:
|
||||
- Jina Embeddings: embeddings/available_embedding_models/text_embedding_functions/jina_embedding.md
|
||||
- AWS Bedrock Text Embedding Functions: embeddings/available_embedding_models/text_embedding_functions/aws_bedrock_embedding.md
|
||||
- IBM watsonx.ai Embeddings: embeddings/available_embedding_models/text_embedding_functions/ibm_watsonx_ai_embedding.md
|
||||
- Voyage AI Embeddings: embeddings/available_embedding_models/text_embedding_functions/voyageai_embedding.md
|
||||
- Multimodal Embedding Functions:
|
||||
- OpenClip embeddings: embeddings/available_embedding_models/multimodal_embedding_functions/openclip_embedding.md
|
||||
- Imagebind embeddings: embeddings/available_embedding_models/multimodal_embedding_functions/imagebind_embedding.md
|
||||
|
||||
21
docs/package-lock.json
generated
21
docs/package-lock.json
generated
@@ -19,7 +19,7 @@
|
||||
},
|
||||
"../node": {
|
||||
"name": "vectordb",
|
||||
"version": "0.4.6",
|
||||
"version": "0.12.0",
|
||||
"cpu": [
|
||||
"x64",
|
||||
"arm64"
|
||||
@@ -31,9 +31,7 @@
|
||||
"win32"
|
||||
],
|
||||
"dependencies": {
|
||||
"@apache-arrow/ts": "^14.0.2",
|
||||
"@neon-rs/load": "^0.0.74",
|
||||
"apache-arrow": "^14.0.2",
|
||||
"axios": "^1.4.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
@@ -46,6 +44,7 @@
|
||||
"@types/temp": "^0.9.1",
|
||||
"@types/uuid": "^9.0.3",
|
||||
"@typescript-eslint/eslint-plugin": "^5.59.1",
|
||||
"apache-arrow-old": "npm:apache-arrow@13.0.0",
|
||||
"cargo-cp-artifact": "^0.1",
|
||||
"chai": "^4.3.7",
|
||||
"chai-as-promised": "^7.1.1",
|
||||
@@ -62,15 +61,19 @@
|
||||
"ts-node-dev": "^2.0.0",
|
||||
"typedoc": "^0.24.7",
|
||||
"typedoc-plugin-markdown": "^3.15.3",
|
||||
"typescript": "*",
|
||||
"typescript": "^5.1.0",
|
||||
"uuid": "^9.0.0"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"@lancedb/vectordb-darwin-arm64": "0.4.6",
|
||||
"@lancedb/vectordb-darwin-x64": "0.4.6",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.4.6",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.4.6",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.4.6"
|
||||
"@lancedb/vectordb-darwin-arm64": "0.12.0",
|
||||
"@lancedb/vectordb-darwin-x64": "0.12.0",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.12.0",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.12.0",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.12.0"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@apache-arrow/ts": "^14.0.2",
|
||||
"apache-arrow": "^14.0.2"
|
||||
}
|
||||
},
|
||||
"../node/node_modules/apache-arrow": {
|
||||
|
||||
@@ -277,7 +277,15 @@ Product quantization can lead to approximately `16 * sizeof(float32) / 1 = 64` t
|
||||
Higher number of partitions could lead to more efficient I/O during queries and better accuracy, but it takes much more time to train.
|
||||
On `SIFT-1M` dataset, our benchmark shows that keeping each partition 1K-4K rows lead to a good latency / recall.
|
||||
|
||||
`num_sub_vectors` specifies how many Product Quantization (PQ) short codes to generate on each vector. Because
|
||||
`num_sub_vectors` specifies how many Product Quantization (PQ) short codes to generate on each vector. The number should be a factor of the vector dimension. Because
|
||||
PQ is a lossy compression of the original vector, a higher `num_sub_vectors` usually results in
|
||||
less space distortion, and thus yields better accuracy. However, a higher `num_sub_vectors` also causes heavier I/O and
|
||||
more PQ computation, and thus, higher latency. `dimension / num_sub_vectors` should be a multiple of 8 for optimum SIMD efficiency.
|
||||
less space distortion, and thus yields better accuracy. However, a higher `num_sub_vectors` also causes heavier I/O and more PQ computation, and thus, higher latency. `dimension / num_sub_vectors` should be a multiple of 8 for optimum SIMD efficiency.
|
||||
|
||||
!!! note
|
||||
if `num_sub_vectors` is set to be greater than the vector dimension, you will see errors like `attempt to divide by zero`
|
||||
|
||||
### How to choose `m` and `ef_construction` for `IVF_HNSW_*` index?
|
||||
|
||||
`m` determines the number of connections a new node establishes with its closest neighbors upon entering the graph. Typically, `m` falls within the range of 5 to 48. Lower `m` values are suitable for low-dimensional data or scenarios where recall is less critical. Conversely, higher `m` values are beneficial for high-dimensional data or when high recall is required. In essence, a larger `m` results in a denser graph with increased connectivity, but at the expense of higher memory consumption.
|
||||
|
||||
`ef_construction` balances build speed and accuracy. Higher values increase accuracy but slow down the build process. A typical range is 150 to 300. For good search results, a minimum value of 100 is recommended. In most cases, setting this value above 500 offers no additional benefit. Ensure that `ef_construction` is always set to a value equal to or greater than `ef` in the search phase
|
||||
@@ -57,6 +57,13 @@ Then the greedy search routine operates as follows:
|
||||
|
||||
## Usage
|
||||
|
||||
There are three key parameters to set when constructing an HNSW index:
|
||||
|
||||
* `metric`: Use an `L2` euclidean distance metric. We also support `dot` and `cosine` distance.
|
||||
* `m`: The number of neighbors to select for each vector in the HNSW graph.
|
||||
* `ef_construction`: The number of candidates to evaluate during the construction of the HNSW graph.
|
||||
|
||||
|
||||
We can combine the above concepts to understand how to build and query an HNSW index in LanceDB.
|
||||
|
||||
### Construct index
|
||||
|
||||
@@ -58,8 +58,10 @@ In Python, the index can be created as follows:
|
||||
# Make sure you have enough data in the table for an effective training step
|
||||
tbl.create_index(metric="L2", num_partitions=256, num_sub_vectors=96)
|
||||
```
|
||||
!!! note
|
||||
`num_partitions`=256 and `num_sub_vectors`=96 does not work for every dataset. Those values needs to be adjusted for your particular dataset.
|
||||
|
||||
The `num_partitions` is usually chosen to target a particular number of vectors per partition. `num_sub_vectors` is typically chosen based on the desired recall and the dimensionality of the vector. See the [FAQs](#faq) below for best practices on choosing these parameters.
|
||||
The `num_partitions` is usually chosen to target a particular number of vectors per partition. `num_sub_vectors` is typically chosen based on the desired recall and the dimensionality of the vector. See [here](../ann_indexes.md/#how-to-choose-num_partitions-and-num_sub_vectors-for-ivf_pq-index) for best practices on choosing these parameters.
|
||||
|
||||
|
||||
### Query the index
|
||||
|
||||
@@ -6,6 +6,7 @@ LanceDB registers the OpenAI embeddings function in the registry by default, as
|
||||
|---|---|---|---|
|
||||
| `name` | `str` | `"text-embedding-ada-002"` | The name of the model. |
|
||||
| `dim` | `int` | Model default | For OpenAI's newer text-embedding-3 model, we can specify a dimensionality that is smaller than the 1536 size. This feature supports it |
|
||||
| `use_azure` | bool | `False` | Set true to use Azure OpenAPI SDK |
|
||||
|
||||
|
||||
```python
|
||||
|
||||
@@ -20,7 +20,7 @@ Supported parameters (to be passed in `create` method) are:
|
||||
|
||||
| Parameter | Type | Default Value | Description |
|
||||
|---|---|--------|---------|
|
||||
| `name` | `str` | `"voyage-3"` | The model ID of the model to use. Supported base models for Text Embeddings: voyage-3, voyage-3-lite, voyage-finance-2, voyage-multilingual-2, voyage-law-2, voyage-code-2 |
|
||||
| `name` | `str` | `None` | The model ID of the model to use. Supported base models for Text Embeddings: voyage-3, voyage-3-lite, voyage-finance-2, voyage-multilingual-2, voyage-law-2, voyage-code-2 |
|
||||
| `input_type` | `str` | `None` | Type of the input text. Default to None. Other options: query, document. |
|
||||
| `truncation` | `bool` | `True` | Whether to truncate the input texts to fit within the context length. |
|
||||
|
||||
|
||||
@@ -53,6 +53,7 @@ These functions are registered by default to handle text embeddings.
|
||||
| [**Jina Embeddings**](available_embedding_models/text_embedding_functions/jina_embedding.md "jina") | 🔗 World-class embedding models to improve your search and RAG systems. You will need **jina api key**. | [<img src="https://raw.githubusercontent.com/lancedb/assets/main/docs/assets/logos/jina.png" alt="Jina Icon" width="90" height="35">](available_embedding_models/text_embedding_functions/jina_embedding.md) |
|
||||
| [ **AWS Bedrock Functions**](available_embedding_models/text_embedding_functions/aws_bedrock_embedding.md "bedrock-text") | ☁️ AWS Bedrock supports multiple base models for generating text embeddings. You need to setup the AWS credentials to use this embedding function. | [<img src="https://raw.githubusercontent.com/lancedb/assets/main/docs/assets/logos/aws_bedrock.png" alt="AWS Bedrock Icon" width="120" height="35">](available_embedding_models/text_embedding_functions/aws_bedrock_embedding.md) |
|
||||
| [**IBM Watsonx.ai**](available_embedding_models/text_embedding_functions/ibm_watsonx_ai_embedding.md "watsonx") | 💡 Generate text embeddings using IBM's watsonx.ai platform. **Note**: watsonx.ai library is an optional dependency. | [<img src="https://raw.githubusercontent.com/lancedb/assets/main/docs/assets/logos/watsonx.png" alt="Watsonx Icon" width="140" height="35">](available_embedding_models/text_embedding_functions/ibm_watsonx_ai_embedding.md) |
|
||||
| [**VoyageAI Embeddings**](available_embedding_models/text_embedding_functions/voyageai_embedding.md "voyageai") | 🌕 Voyage AI provides cutting-edge embedding and rerankers. This will help you get started with **VoyageAI** embedding models using LanceDB. Using voyageai API requires voyageai package. Install it via `pip`. | [<img src="https://www.voyageai.com/logo.svg" alt="VoyageAI Icon" width="140" height="35">](available_embedding_models/text_embedding_functions/voyageai_embedding.md) |
|
||||
|
||||
|
||||
|
||||
@@ -66,6 +67,7 @@ These functions are registered by default to handle text embeddings.
|
||||
[jina-key]: "jina"
|
||||
[aws-key]: "bedrock-text"
|
||||
[watsonx-key]: "watsonx"
|
||||
[voyageai-key]: "voyageai"
|
||||
|
||||
|
||||
## Multi-modal Embedding Functions🖼️
|
||||
|
||||
@@ -114,12 +114,45 @@ table.create_fts_index("text",
|
||||
|
||||
LanceDB full text search supports to filter the search results by a condition, both pre-filtering and post-filtering are supported.
|
||||
|
||||
This can be invoked via the familiar `where` syntax:
|
||||
|
||||
This can be invoked via the familiar `where` syntax.
|
||||
|
||||
With pre-filtering:
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
table.search("puppy").limit(10).where("meta='foo'").to_list()
|
||||
table.search("puppy").limit(10).where("meta='foo'", prefilte=True).to_list()
|
||||
```
|
||||
|
||||
=== "TypeScript"
|
||||
|
||||
```typescript
|
||||
await tbl
|
||||
.search("puppy")
|
||||
.select(["id", "doc"])
|
||||
.limit(10)
|
||||
.where("meta='foo'")
|
||||
.prefilter(true)
|
||||
.toArray();
|
||||
```
|
||||
|
||||
=== "Rust"
|
||||
|
||||
```rust
|
||||
table
|
||||
.query()
|
||||
.full_text_search(FullTextSearchQuery::new("puppy".to_owned()))
|
||||
.select(lancedb::query::Select::Columns(vec!["doc".to_owned()]))
|
||||
.limit(10)
|
||||
.only_if("meta='foo'")
|
||||
.execute()
|
||||
.await?;
|
||||
```
|
||||
|
||||
With post-filtering:
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
table.search("puppy").limit(10).where("meta='foo'", prefilte=False).to_list()
|
||||
```
|
||||
|
||||
=== "TypeScript"
|
||||
@@ -130,6 +163,7 @@ This can be invoked via the familiar `where` syntax:
|
||||
.select(["id", "doc"])
|
||||
.limit(10)
|
||||
.where("meta='foo'")
|
||||
.prefilter(false)
|
||||
.toArray();
|
||||
```
|
||||
|
||||
@@ -140,6 +174,7 @@ This can be invoked via the familiar `where` syntax:
|
||||
.query()
|
||||
.full_text_search(FullTextSearchQuery::new(words[0].to_owned()))
|
||||
.select(lancedb::query::Select::Columns(vec!["doc".to_owned()]))
|
||||
.postfilter()
|
||||
.limit(10)
|
||||
.only_if("meta='foo'")
|
||||
.execute()
|
||||
@@ -160,3 +195,35 @@ To search for a phrase, the index must be created with `with_position=True`:
|
||||
table.create_fts_index("text", use_tantivy=False, with_position=True)
|
||||
```
|
||||
This will allow you to search for phrases, but it will also significantly increase the index size and indexing time.
|
||||
|
||||
|
||||
## Incremental indexing
|
||||
|
||||
LanceDB supports incremental indexing, which means you can add new records to the table without reindexing the entire table.
|
||||
|
||||
This can make the query more efficient, especially when the table is large and the new records are relatively small.
|
||||
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
table.add([{"vector": [3.1, 4.1], "text": "Frodo was a happy puppy"}])
|
||||
table.optimize()
|
||||
```
|
||||
|
||||
=== "TypeScript"
|
||||
|
||||
```typescript
|
||||
await tbl.add([{ vector: [3.1, 4.1], text: "Frodo was a happy puppy" }]);
|
||||
await tbl.optimize();
|
||||
```
|
||||
|
||||
=== "Rust"
|
||||
|
||||
```rust
|
||||
let more_data: Box<dyn RecordBatchReader + Send> = create_some_records()?;
|
||||
tbl.add(more_data).execute().await?;
|
||||
tbl.optimize(OptimizeAction::All).execute().await?;
|
||||
```
|
||||
!!! note
|
||||
|
||||
New data added after creating the FTS index will appear in search results while incremental index is still progress, but with increased latency due to a flat search on the unindexed portion. LanceDB Cloud automates this merging process, minimizing the impact on search speed.
|
||||
@@ -153,9 +153,7 @@ table.create_fts_index(["title", "content"], use_tantivy=True, writer_heap_size=
|
||||
|
||||
## Current limitations
|
||||
|
||||
1. Currently we do not yet support incremental writes.
|
||||
If you add data after FTS index creation, it won't be reflected
|
||||
in search results until you do a full reindex.
|
||||
1. New data added after creating the FTS index will appear in search results, but with increased latency due to a flat search on the unindexed portion. Re-indexing with `create_fts_index` will reduce latency. LanceDB Cloud automates this merging process, minimizing the impact on search speed.
|
||||
|
||||
2. We currently only support local filesystem paths for the FTS index.
|
||||
This is a tantivy limitation. We've implemented an object store plugin
|
||||
|
||||
@@ -1,23 +1,35 @@
|
||||
# Building Scalar Index
|
||||
# Building a Scalar Index
|
||||
|
||||
Similar to many SQL databases, LanceDB supports several types of Scalar indices to accelerate search
|
||||
Scalar indices organize data by scalar attributes (e.g. numbers, categorical values), enabling fast filtering of vector data. In vector databases, scalar indices accelerate the retrieval of scalar data associated with vectors, thus enhancing the query performance when searching for vectors that meet certain scalar criteria.
|
||||
|
||||
Similar to many SQL databases, LanceDB supports several types of scalar indices to accelerate search
|
||||
over scalar columns.
|
||||
|
||||
- `BTREE`: The most common type is BTREE. This index is inspired by the btree data structure
|
||||
although only the first few layers of the btree are cached in memory.
|
||||
It will perform well on columns with a large number of unique values and few rows per value.
|
||||
- `BITMAP`: this index stores a bitmap for each unique value in the column.
|
||||
This index is useful for columns with a finite number of unique values and many rows per value.
|
||||
For example, columns that represent "categories", "labels", or "tags"
|
||||
- `LABEL_LIST`: a special index that is used to index list columns whose values have a finite set of possibilities.
|
||||
- `BTREE`: The most common type is BTREE. The index stores a copy of the
|
||||
column in sorted order. This sorted copy allows a binary search to be used to
|
||||
satisfy queries.
|
||||
- `BITMAP`: this index stores a bitmap for each unique value in the column. It
|
||||
uses a series of bits to indicate whether a value is present in a row of a table
|
||||
- `LABEL_LIST`: a special index that can be used on `List<T>` columns to
|
||||
support queries with `array_contains_all` and `array_contains_any`
|
||||
using an underlying bitmap index.
|
||||
For example, a column that contains lists of tags (e.g. `["tag1", "tag2", "tag3"]`) can be indexed with a `LABEL_LIST` index.
|
||||
|
||||
!!! tips "How to choose the right scalar index type"
|
||||
|
||||
`BTREE`: This index is good for scalar columns with mostly distinct values and does best when the query is highly selective.
|
||||
|
||||
`BITMAP`: This index works best for low-cardinality numeric or string columns, where the number of unique values is small (i.e., less than a few thousands).
|
||||
|
||||
`LABEL_LIST`: This index should be used for columns containing list-type data.
|
||||
|
||||
| Data Type | Filter | Index Type |
|
||||
| --------------------------------------------------------------- | ----------------------------------------- | ------------ |
|
||||
| Numeric, String, Temporal | `<`, `=`, `>`, `in`, `between`, `is null` | `BTREE` |
|
||||
| Boolean, numbers or strings with fewer than 1,000 unique values | `<`, `=`, `>`, `in`, `between`, `is null` | `BITMAP` |
|
||||
| List of low cardinality of numbers or strings | `array_has_any`, `array_has_all` | `LABEL_LIST` |
|
||||
|
||||
### Create a scalar index
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
@@ -46,7 +58,7 @@ over scalar columns.
|
||||
await tlb.create_index("publisher", { config: lancedb.Index.bitmap() })
|
||||
```
|
||||
|
||||
For example, the following scan will be faster if the column `my_col` has a scalar index:
|
||||
The following scan will be faster if the column `book_id` has a scalar index:
|
||||
|
||||
=== "Python"
|
||||
|
||||
@@ -106,3 +118,30 @@ Scalar indices can also speed up scans containing a vector search or full text s
|
||||
.limit(10)
|
||||
.toArray();
|
||||
```
|
||||
### Update a scalar index
|
||||
Updating the table data (adding, deleting, or modifying records) requires that you also update the scalar index. This can be done by calling `optimize`, which will trigger an update to the existing scalar index.
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
table.add([{"vector": [7, 8], "book_id": 4}])
|
||||
table.optimize()
|
||||
```
|
||||
|
||||
=== "TypeScript"
|
||||
|
||||
```typescript
|
||||
await tbl.add([{ vector: [7, 8], book_id: 4 }]);
|
||||
await tbl.optimize();
|
||||
```
|
||||
|
||||
=== "Rust"
|
||||
|
||||
```rust
|
||||
let more_data: Box<dyn RecordBatchReader + Send> = create_some_records()?;
|
||||
tbl.add(more_data).execute().await?;
|
||||
tbl.optimize(OptimizeAction::All).execute().await?;
|
||||
```
|
||||
|
||||
!!! note
|
||||
|
||||
New data added after creating the scalar index will still appear in search results if optimize is not used, but with increased latency due to a flat search on the unindexed portion. LanceDB Cloud automates the optimize process, minimizing the impact on search speed.
|
||||
@@ -27,10 +27,13 @@ LanceDB OSS supports object stores such as AWS S3 (and compatible stores), Azure
|
||||
|
||||
Azure Blob Storage:
|
||||
|
||||
<!-- skip-test -->
|
||||
```python
|
||||
import lancedb
|
||||
db = lancedb.connect("az://bucket/path")
|
||||
```
|
||||
Note that for Azure, storage credentials must be configured. See [below](#azure-blob-storage) for more details.
|
||||
|
||||
|
||||
=== "TypeScript"
|
||||
|
||||
@@ -87,11 +90,6 @@ In most cases, when running in the respective cloud and permissions are set up c
|
||||
export TIMEOUT=60s
|
||||
```
|
||||
|
||||
!!! note "`storage_options` availability"
|
||||
|
||||
The `storage_options` parameter is only available in Python *async* API and JavaScript API.
|
||||
It is not yet supported in the Python synchronous API.
|
||||
|
||||
If you only want this to apply to one particular connection, you can pass the `storage_options` argument when opening the connection:
|
||||
|
||||
=== "Python"
|
||||
|
||||
@@ -274,7 +274,7 @@ table = db.create_table(table_name, schema=Content)
|
||||
|
||||
Sometimes your data model may contain nested objects.
|
||||
For example, you may want to store the document string
|
||||
and the document soure name as a nested Document object:
|
||||
and the document source name as a nested Document object:
|
||||
|
||||
```python
|
||||
class Document(BaseModel):
|
||||
@@ -466,7 +466,7 @@ You can create an empty table for scenarios where you want to add data to the ta
|
||||
|
||||
## Adding to a table
|
||||
|
||||
After a table has been created, you can always add more data to it usind the `add` method
|
||||
After a table has been created, you can always add more data to it using the `add` method
|
||||
|
||||
=== "Python"
|
||||
You can add any of the valid data structures accepted by LanceDB table, i.e, `dict`, `list[dict]`, `pd.DataFrame`, or `Iterator[pa.RecordBatch]`. Below are some examples.
|
||||
@@ -535,7 +535,7 @@ After a table has been created, you can always add more data to it usind the `ad
|
||||
```
|
||||
|
||||
??? "Ingesting Pydantic models with LanceDB embedding API"
|
||||
When using LanceDB's embedding API, you can add Pydantic models directly to the table. LanceDB will automatically convert the `vector` field to a vector before adding it to the table. You need to specify the default value of `vector` feild as None to allow LanceDB to automatically vectorize the data.
|
||||
When using LanceDB's embedding API, you can add Pydantic models directly to the table. LanceDB will automatically convert the `vector` field to a vector before adding it to the table. You need to specify the default value of `vector` field as None to allow LanceDB to automatically vectorize the data.
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
@@ -790,6 +790,101 @@ Use the `drop_table()` method on the database to remove a table.
|
||||
This permanently removes the table and is not recoverable, unlike deleting rows.
|
||||
If the table does not exist an exception is raised.
|
||||
|
||||
## Changing schemas
|
||||
|
||||
While tables must have a schema specified when they are created, you can
|
||||
change the schema over time. There's three methods to alter the schema of
|
||||
a table:
|
||||
|
||||
* `add_columns`: Add new columns to the table
|
||||
* `alter_columns`: Alter the name, nullability, or data type of a column
|
||||
* `drop_columns`: Drop columns from the table
|
||||
|
||||
### Adding new columns
|
||||
|
||||
You can add new columns to the table with the `add_columns` method. New columns
|
||||
are filled with values based on a SQL expression. For example, you can add a new
|
||||
column `y` to the table and fill it with the value of `x + 1`.
|
||||
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
table.add_columns({"double_price": "price * 2"})
|
||||
```
|
||||
**API Reference:** [lancedb.table.Table.add_columns][]
|
||||
|
||||
=== "Typescript"
|
||||
|
||||
```typescript
|
||||
--8<-- "nodejs/examples/basic.test.ts:add_columns"
|
||||
```
|
||||
**API Reference:** [lancedb.Table.addColumns](../js/classes/Table.md/#addcolumns)
|
||||
|
||||
If you want to fill it with null, you can use `cast(NULL as <data_type>)` as
|
||||
the SQL expression to fill the column with nulls, while controlling the data
|
||||
type of the column. Available data types are base on the
|
||||
[DataFusion data types](https://datafusion.apache.org/user-guide/sql/data_types.html).
|
||||
You can use any of the SQL types, such as `BIGINT`:
|
||||
|
||||
```sql
|
||||
cast(NULL as BIGINT)
|
||||
```
|
||||
|
||||
Using Arrow data types and the `arrow_typeof` function is not yet supported.
|
||||
|
||||
<!-- TODO: we could provide a better formula for filling with nulls:
|
||||
https://github.com/lancedb/lance/issues/3175
|
||||
-->
|
||||
|
||||
### Altering existing columns
|
||||
|
||||
You can alter the name, nullability, or data type of a column with the `alter_columns`
|
||||
method.
|
||||
|
||||
Changing the name or nullability of a column just updates the metadata. Because
|
||||
of this, it's a fast operation. Changing the data type of a column requires
|
||||
rewriting the column, which can be a heavy operation.
|
||||
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
import pyarrow as pa
|
||||
table.alter_column({"path": "double_price", "rename": "dbl_price",
|
||||
"data_type": pa.float32(), "nullable": False})
|
||||
```
|
||||
**API Reference:** [lancedb.table.Table.alter_columns][]
|
||||
|
||||
=== "Typescript"
|
||||
|
||||
```typescript
|
||||
--8<-- "nodejs/examples/basic.test.ts:alter_columns"
|
||||
```
|
||||
**API Reference:** [lancedb.Table.alterColumns](../js/classes/Table.md/#altercolumns)
|
||||
|
||||
### Dropping columns
|
||||
|
||||
You can drop columns from the table with the `drop_columns` method. This will
|
||||
will remove the column from the schema.
|
||||
|
||||
<!-- TODO: Provide guidance on how to reduce disk usage once optimize helps here
|
||||
waiting on: https://github.com/lancedb/lance/issues/3177
|
||||
-->
|
||||
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
table.drop_columns(["dbl_price"])
|
||||
```
|
||||
**API Reference:** [lancedb.table.Table.drop_columns][]
|
||||
|
||||
=== "Typescript"
|
||||
|
||||
```typescript
|
||||
--8<-- "nodejs/examples/basic.test.ts:drop_columns"
|
||||
```
|
||||
**API Reference:** [lancedb.Table.dropColumns](../js/classes/Table.md/#altercolumns)
|
||||
|
||||
|
||||
## Handling bad vectors
|
||||
|
||||
In LanceDB Python, you can use the `on_bad_vectors` parameter to choose how
|
||||
@@ -880,4 +975,4 @@ There are three possible settings for `read_consistency_interval`:
|
||||
|
||||
Learn the best practices on creating an ANN index and getting the most out of it.
|
||||
|
||||
[^1]: The `vectordb` package is a legacy package that is deprecated in favor of `@lancedb/lancedb`. The `vectordb` package will continue to receive bug fixes and security updates until September 2024. We recommend all new projects use `@lancedb/lancedb`. See the [migration guide](migration.md) for more information.
|
||||
[^1]: The `vectordb` package is a legacy package that is deprecated in favor of `@lancedb/lancedb`. The `vectordb` package will continue to receive bug fixes and security updates until September 2024. We recommend all new projects use `@lancedb/lancedb`. See the [migration guide](../migration.md) for more information.
|
||||
|
||||
@@ -1,6 +1,16 @@
|
||||
# Python API Reference
|
||||
|
||||
This section contains the API reference for the OSS Python API.
|
||||
This section contains the API reference for the Python API. There is a
|
||||
synchronous and an asynchronous API client.
|
||||
|
||||
The general flow of using the API is:
|
||||
|
||||
1. Use [lancedb.connect][] or [lancedb.connect_async][] to connect to a database.
|
||||
2. Use the returned [lancedb.DBConnection][] or [lancedb.AsyncConnection][] to
|
||||
create or open tables.
|
||||
3. Use the returned [lancedb.table.Table][] or [lancedb.AsyncTable][] to query
|
||||
or modify tables.
|
||||
|
||||
|
||||
## Installation
|
||||
|
||||
|
||||
@@ -6,6 +6,9 @@ This re-ranker uses the [Cohere](https://cohere.ai/) API to rerank the search re
|
||||
!!! note
|
||||
Supported Query Types: Hybrid, Vector, FTS
|
||||
|
||||
```shell
|
||||
pip install cohere
|
||||
```
|
||||
|
||||
```python
|
||||
import numpy
|
||||
|
||||
@@ -9,6 +9,7 @@ LanceDB comes with some built-in rerankers. Some of the rerankers that are avail
|
||||
| `CrossEncoderReranker` | Uses a cross-encoder model to rerank search results | Vector, FTS, Hybrid |
|
||||
| `ColbertReranker` | Uses a colbert model to rerank search results | Vector, FTS, Hybrid |
|
||||
| `OpenaiReranker`(Experimental) | Uses OpenAI's chat model to rerank search results | Vector, FTS, Hybrid |
|
||||
| `VoyageAIReranker` | Uses voyageai Reranker API to rerank results | Vector, FTS, Hybrid |
|
||||
|
||||
|
||||
## Using a Reranker
|
||||
@@ -73,6 +74,7 @@ LanceDB comes with some built-in rerankers. Here are some of the rerankers that
|
||||
- [Jina Reranker](./jina.md)
|
||||
- [AnswerDotAI Rerankers](./answerdotai.md)
|
||||
- [Reciprocal Rank Fusion Reranker](./rrf.md)
|
||||
- [VoyageAI Reranker](./voyageai.md)
|
||||
|
||||
## Creating Custom Rerankers
|
||||
|
||||
|
||||
@@ -7,6 +7,10 @@ performed on the top-k results returned by the vector search. However, pre-filte
|
||||
option that performs the filter prior to vector search. This can be useful to narrow down on
|
||||
the search space on a very large dataset to reduce query latency.
|
||||
|
||||
Note that both pre-filtering and post-filtering can yield false positives. For pre-filtering, if the filter is too selective, it might eliminate relevant items that the vector search would have otherwise identified as a good match. In this case, increasing `nprobes` parameter will help reduce such false positives. It is recommended to set `use_index=false` if you know that the filter is highly selective.
|
||||
|
||||
Similarly, a highly selective post-filter can lead to false positives. Increasing both `nprobes` and `refine_factor` can mitigate this issue. When deciding between pre-filtering and post-filtering, pre-filtering is generally the safer choice if you're uncertain.
|
||||
|
||||
<!-- Setup Code
|
||||
```python
|
||||
import lancedb
|
||||
@@ -57,6 +61,9 @@ const tbl = await db.createTable('myVectors', data)
|
||||
```ts
|
||||
--8<-- "docs/src/sql_legacy.ts:search"
|
||||
```
|
||||
!!! note
|
||||
|
||||
Creating a [scalar index](guides/scalar_index.md) accelerates filtering
|
||||
|
||||
## SQL filters
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
<parent>
|
||||
<groupId>com.lancedb</groupId>
|
||||
<artifactId>lancedb-parent</artifactId>
|
||||
<version>0.13.0-beta.2</version>
|
||||
<version>0.14.0-final.0</version>
|
||||
<relativePath>../pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
<groupId>com.lancedb</groupId>
|
||||
<artifactId>lancedb-parent</artifactId>
|
||||
<version>0.13.0-beta.2</version>
|
||||
<version>0.14.0-final.0</version>
|
||||
<packaging>pom</packaging>
|
||||
|
||||
<name>LanceDB Parent</name>
|
||||
|
||||
120
node/package-lock.json
generated
120
node/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "vectordb",
|
||||
"version": "0.13.0-beta.2",
|
||||
"version": "0.14.0",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "vectordb",
|
||||
"version": "0.13.0-beta.2",
|
||||
"version": "0.14.0",
|
||||
"cpu": [
|
||||
"x64",
|
||||
"arm64"
|
||||
@@ -52,12 +52,14 @@
|
||||
"uuid": "^9.0.0"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"@lancedb/vectordb-darwin-arm64": "0.13.0-beta.2",
|
||||
"@lancedb/vectordb-darwin-x64": "0.13.0-beta.2",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.13.0-beta.2",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.13.0-beta.2",
|
||||
"@lancedb/vectordb-win32-arm64-msvc": "0.13.0-beta.2",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.13.0-beta.2"
|
||||
"@lancedb/vectordb-darwin-arm64": "0.14.0",
|
||||
"@lancedb/vectordb-darwin-x64": "0.14.0",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.14.0",
|
||||
"@lancedb/vectordb-linux-arm64-musl": "0.14.0",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.14.0",
|
||||
"@lancedb/vectordb-linux-x64-musl": "0.14.0",
|
||||
"@lancedb/vectordb-win32-arm64-msvc": "0.14.0",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.14.0"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@apache-arrow/ts": "^14.0.2",
|
||||
@@ -327,6 +329,102 @@
|
||||
"@jridgewell/sourcemap-codec": "^1.4.10"
|
||||
}
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-darwin-arm64": {
|
||||
"version": "0.14.0",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-arm64/-/vectordb-darwin-arm64-0.14.0.tgz",
|
||||
"integrity": "sha512-C8wp+eJQY3RMLIRfxDnOm8bYg458OI3Cz7Jh7ws6ibquBdJDCiTdwFfcUXrkoaQ9Wv4nHZOEqupj3FBMsks1hw==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
"optional": true,
|
||||
"os": [
|
||||
"darwin"
|
||||
]
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-darwin-x64": {
|
||||
"version": "0.14.0",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-x64/-/vectordb-darwin-x64-0.14.0.tgz",
|
||||
"integrity": "sha512-5jkQuEVGaPViFb4dOjncUqVCbvEiT8XYFZoprE0yv7HUUCdt5v15GTNxey72yw+aaX2mdb2CeFIs+4ySZqy/MA==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
"optional": true,
|
||||
"os": [
|
||||
"darwin"
|
||||
]
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-linux-arm64-gnu": {
|
||||
"version": "0.14.0",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-arm64-gnu/-/vectordb-linux-arm64-gnu-0.14.0.tgz",
|
||||
"integrity": "sha512-YLboFJLQyFzsYWi2iW1nr2SGaZTaj4gERIufyTSnX+VXlEYKHke3cMFLF+EamH8eejv2HwXdJpidPaP6aSzujw==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
"optional": true,
|
||||
"os": [
|
||||
"linux"
|
||||
]
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-linux-arm64-musl": {
|
||||
"version": "0.14.0",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-arm64-musl/-/vectordb-linux-arm64-musl-0.14.0.tgz",
|
||||
"integrity": "sha512-rel/SaxGRtx5GdAkFH1IknBr0V/tbrN4jYT6FixmSvgc9kgxrMGlBUHSRAO5atdRXZ8jT7XWuOqW1QdgsmPi0g==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
"optional": true,
|
||||
"os": [
|
||||
"linux"
|
||||
]
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-linux-x64-gnu": {
|
||||
"version": "0.14.0",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-x64-gnu/-/vectordb-linux-x64-gnu-0.14.0.tgz",
|
||||
"integrity": "sha512-N29n8OO2JqSPaSVd5gmyh6r4x6LX0qpcCHrhkEaRoKKIXYdHQ8sAHOqHNt3xhMDLwDJfjGmzAwd977cOYM5MBw==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
"optional": true,
|
||||
"os": [
|
||||
"linux"
|
||||
]
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-linux-x64-musl": {
|
||||
"version": "0.14.0",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-x64-musl/-/vectordb-linux-x64-musl-0.14.0.tgz",
|
||||
"integrity": "sha512-36Ewl9M6IsYgxBIaThgqaSlQ++8YsSnZB85DOnuIds+sRbBfNkknvwBRFO1/FGN8RSBydFPy1irNFmCOnrlTZg==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
"optional": true,
|
||||
"os": [
|
||||
"linux"
|
||||
]
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-win32-arm64-msvc": {
|
||||
"version": "0.14.0",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-win32-arm64-msvc/-/vectordb-win32-arm64-msvc-0.14.0.tgz",
|
||||
"integrity": "sha512-4qsna5yI7umGEA868/ifr1Np66d0dhFAOIGaJKS5Z+Zm4Zplr42BjVZiNWtwwKhndtsiPJnFCYVYRKfjTLZWdg==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
"optional": true,
|
||||
"os": [
|
||||
"win32"
|
||||
]
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-win32-x64-msvc": {
|
||||
"version": "0.14.0",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-win32-x64-msvc/-/vectordb-win32-x64-msvc-0.14.0.tgz",
|
||||
"integrity": "sha512-1u+J5WFClNc6mzgF5otevMnOxW3pj8yOHrPoIiZe9SrL8O2oVtdYfWJZYG/OST21cS0Mc4Z0upX86G0sA4kEfA==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
"optional": true,
|
||||
"os": [
|
||||
"win32"
|
||||
]
|
||||
},
|
||||
"node_modules/@neon-rs/cli": {
|
||||
"version": "0.0.160",
|
||||
"resolved": "https://registry.npmjs.org/@neon-rs/cli/-/cli-0.0.160.tgz",
|
||||
@@ -1441,9 +1539,9 @@
|
||||
"dev": true
|
||||
},
|
||||
"node_modules/cross-spawn": {
|
||||
"version": "7.0.3",
|
||||
"resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz",
|
||||
"integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==",
|
||||
"version": "7.0.6",
|
||||
"resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz",
|
||||
"integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"path-key": "^3.1.0",
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
{
|
||||
"name": "vectordb",
|
||||
"version": "0.13.0-beta.2",
|
||||
"version": "0.14.0",
|
||||
"description": " Serverless, low-latency vector database for AI applications",
|
||||
"private": false,
|
||||
"main": "dist/index.js",
|
||||
"types": "dist/index.d.ts",
|
||||
"scripts": {
|
||||
@@ -84,16 +85,20 @@
|
||||
"aarch64-apple-darwin": "@lancedb/vectordb-darwin-arm64",
|
||||
"x86_64-unknown-linux-gnu": "@lancedb/vectordb-linux-x64-gnu",
|
||||
"aarch64-unknown-linux-gnu": "@lancedb/vectordb-linux-arm64-gnu",
|
||||
"x86_64-unknown-linux-musl": "@lancedb/vectordb-linux-x64-musl",
|
||||
"aarch64-unknown-linux-musl": "@lancedb/vectordb-linux-arm64-musl",
|
||||
"x86_64-pc-windows-msvc": "@lancedb/vectordb-win32-x64-msvc",
|
||||
"aarch64-pc-windows-msvc": "@lancedb/vectordb-win32-arm64-msvc"
|
||||
}
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"@lancedb/vectordb-darwin-arm64": "0.13.0-beta.2",
|
||||
"@lancedb/vectordb-darwin-x64": "0.13.0-beta.2",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.13.0-beta.2",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.13.0-beta.2",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.13.0-beta.2",
|
||||
"@lancedb/vectordb-win32-arm64-msvc": "0.13.0-beta.2"
|
||||
"@lancedb/vectordb-darwin-x64": "0.14.0",
|
||||
"@lancedb/vectordb-darwin-arm64": "0.14.0",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.14.0",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.14.0",
|
||||
"@lancedb/vectordb-linux-x64-musl": "0.14.0",
|
||||
"@lancedb/vectordb-linux-arm64-musl": "0.14.0",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.14.0",
|
||||
"@lancedb/vectordb-win32-arm64-msvc": "0.14.0"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "lancedb-nodejs"
|
||||
edition.workspace = true
|
||||
version = "0.13.0-beta.2"
|
||||
version = "0.14.0"
|
||||
license.workspace = true
|
||||
description.workspace = true
|
||||
repository.workspace = true
|
||||
|
||||
@@ -110,7 +110,10 @@ describe("given a connection", () => {
|
||||
let table = await db.createTable("test", data, { useLegacyFormat: true });
|
||||
|
||||
const isV2 = async (table: Table) => {
|
||||
const data = await table.query().toArrow({ maxBatchLength: 100000 });
|
||||
const data = await table
|
||||
.query()
|
||||
.limit(10000)
|
||||
.toArrow({ maxBatchLength: 100000 });
|
||||
console.log(data.batches.length);
|
||||
return data.batches.length < 5;
|
||||
};
|
||||
|
||||
@@ -477,6 +477,54 @@ describe("When creating an index", () => {
|
||||
expect(rst.numRows).toBe(1);
|
||||
});
|
||||
|
||||
it("should create and search IVF_HNSW indices", async () => {
|
||||
await tbl.createIndex("vec", {
|
||||
config: Index.hnswSq(),
|
||||
});
|
||||
|
||||
// check index directory
|
||||
const indexDir = path.join(tmpDir.name, "test.lance", "_indices");
|
||||
expect(fs.readdirSync(indexDir)).toHaveLength(1);
|
||||
const indices = await tbl.listIndices();
|
||||
expect(indices.length).toBe(1);
|
||||
expect(indices[0]).toEqual({
|
||||
name: "vec_idx",
|
||||
indexType: "IvfHnswSq",
|
||||
columns: ["vec"],
|
||||
});
|
||||
|
||||
// Search without specifying the column
|
||||
let rst = await tbl
|
||||
.query()
|
||||
.limit(2)
|
||||
.nearestTo(queryVec)
|
||||
.distanceType("dot")
|
||||
.toArrow();
|
||||
expect(rst.numRows).toBe(2);
|
||||
|
||||
// Search using `vectorSearch`
|
||||
rst = await tbl.vectorSearch(queryVec).limit(2).toArrow();
|
||||
expect(rst.numRows).toBe(2);
|
||||
|
||||
// Search with specifying the column
|
||||
const rst2 = await tbl
|
||||
.query()
|
||||
.limit(2)
|
||||
.nearestTo(queryVec)
|
||||
.column("vec")
|
||||
.toArrow();
|
||||
expect(rst2.numRows).toBe(2);
|
||||
expect(rst.toString()).toEqual(rst2.toString());
|
||||
|
||||
// test offset
|
||||
rst = await tbl.query().limit(2).offset(1).nearestTo(queryVec).toArrow();
|
||||
expect(rst.numRows).toBe(1);
|
||||
|
||||
// test ef
|
||||
rst = await tbl.query().limit(2).nearestTo(queryVec).ef(100).toArrow();
|
||||
expect(rst.numRows).toBe(2);
|
||||
});
|
||||
|
||||
it("should be able to query unindexed data", async () => {
|
||||
await tbl.createIndex("vec");
|
||||
await tbl.add([
|
||||
@@ -537,11 +585,11 @@ describe("When creating an index", () => {
|
||||
expect(fs.readdirSync(indexDir)).toHaveLength(1);
|
||||
|
||||
for await (const r of tbl.query().where("id > 1").select(["id"])) {
|
||||
expect(r.numRows).toBe(298);
|
||||
expect(r.numRows).toBe(10);
|
||||
}
|
||||
// should also work with 'filter' alias
|
||||
for await (const r of tbl.query().filter("id > 1").select(["id"])) {
|
||||
expect(r.numRows).toBe(298);
|
||||
expect(r.numRows).toBe(10);
|
||||
}
|
||||
});
|
||||
|
||||
@@ -777,6 +825,18 @@ describe("schema evolution", function () {
|
||||
new Field("price", new Float64(), true),
|
||||
]);
|
||||
expect(await table.schema()).toEqual(expectedSchema);
|
||||
|
||||
await table.alterColumns([{ path: "new_id", dataType: "int32" }]);
|
||||
const expectedSchema2 = new Schema([
|
||||
new Field("new_id", new Int32(), true),
|
||||
new Field(
|
||||
"vector",
|
||||
new FixedSizeList(2, new Field("item", new Float32(), true)),
|
||||
true,
|
||||
),
|
||||
new Field("price", new Float64(), true),
|
||||
]);
|
||||
expect(await table.schema()).toEqual(expectedSchema2);
|
||||
});
|
||||
|
||||
it("can drop a column from the schema", async function () {
|
||||
|
||||
@@ -116,6 +116,26 @@ test("basic table examples", async () => {
|
||||
await tbl.add(data);
|
||||
// --8<-- [end:add_data]
|
||||
}
|
||||
|
||||
{
|
||||
// --8<-- [start:add_columns]
|
||||
await tbl.addColumns([{ name: "double_price", valueSql: "price * 2" }]);
|
||||
// --8<-- [end:add_columns]
|
||||
// --8<-- [start:alter_columns]
|
||||
await tbl.alterColumns([
|
||||
{
|
||||
path: "double_price",
|
||||
rename: "dbl_price",
|
||||
dataType: "float",
|
||||
nullable: true,
|
||||
},
|
||||
]);
|
||||
// --8<-- [end:alter_columns]
|
||||
// --8<-- [start:drop_columns]
|
||||
await tbl.dropColumns(["dbl_price"]);
|
||||
// --8<-- [end:drop_columns]
|
||||
}
|
||||
|
||||
{
|
||||
// --8<-- [start:vector_search]
|
||||
const res = await tbl.search([100, 100]).limit(2).toArray();
|
||||
|
||||
@@ -385,6 +385,20 @@ export class VectorQuery extends QueryBase<NativeVectorQuery> {
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the number of candidates to consider during the search
|
||||
*
|
||||
* This argument is only used when the vector column has an HNSW index.
|
||||
* If there is no index then this value is ignored.
|
||||
*
|
||||
* Increasing this value will increase the recall of your query but will
|
||||
* also increase the latency of your query. The default value is 1.5*limit.
|
||||
*/
|
||||
ef(ef: number): VectorQuery {
|
||||
super.doCall((inner) => inner.ef(ef));
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the vector column to query
|
||||
*
|
||||
|
||||
@@ -87,6 +87,12 @@ export interface OptimizeOptions {
|
||||
deleteUnverified: boolean;
|
||||
}
|
||||
|
||||
export interface Version {
|
||||
version: number;
|
||||
timestamp: Date;
|
||||
metadata: Record<string, string>;
|
||||
}
|
||||
|
||||
/**
|
||||
* A Table is a collection of Records in a LanceDB Database.
|
||||
*
|
||||
@@ -360,6 +366,11 @@ export abstract class Table {
|
||||
*/
|
||||
abstract checkoutLatest(): Promise<void>;
|
||||
|
||||
/**
|
||||
* List all the versions of the table
|
||||
*/
|
||||
abstract listVersions(): Promise<Version[]>;
|
||||
|
||||
/**
|
||||
* Restore the table to the currently checked out version
|
||||
*
|
||||
@@ -659,6 +670,14 @@ export class LocalTable extends Table {
|
||||
await this.inner.checkoutLatest();
|
||||
}
|
||||
|
||||
async listVersions(): Promise<Version[]> {
|
||||
return (await this.inner.listVersions()).map((version) => ({
|
||||
version: version.version,
|
||||
timestamp: new Date(version.timestamp / 1000),
|
||||
metadata: version.metadata,
|
||||
}));
|
||||
}
|
||||
|
||||
async restore(): Promise<void> {
|
||||
await this.inner.restore();
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-darwin-arm64",
|
||||
"version": "0.13.0-beta.2",
|
||||
"version": "0.14.0",
|
||||
"os": ["darwin"],
|
||||
"cpu": ["arm64"],
|
||||
"main": "lancedb.darwin-arm64.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-darwin-x64",
|
||||
"version": "0.13.0-beta.2",
|
||||
"version": "0.14.0",
|
||||
"os": ["darwin"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.darwin-x64.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-arm64-gnu",
|
||||
"version": "0.13.0-beta.2",
|
||||
"version": "0.14.0",
|
||||
"os": ["linux"],
|
||||
"cpu": ["arm64"],
|
||||
"main": "lancedb.linux-arm64-gnu.node",
|
||||
|
||||
3
nodejs/npm/linux-arm64-musl/README.md
Normal file
3
nodejs/npm/linux-arm64-musl/README.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# `@lancedb/lancedb-linux-arm64-musl`
|
||||
|
||||
This is the **aarch64-unknown-linux-musl** binary for `@lancedb/lancedb`
|
||||
13
nodejs/npm/linux-arm64-musl/package.json
Normal file
13
nodejs/npm/linux-arm64-musl/package.json
Normal file
@@ -0,0 +1,13 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-arm64-musl",
|
||||
"version": "0.14.0",
|
||||
"os": ["linux"],
|
||||
"cpu": ["arm64"],
|
||||
"main": "lancedb.linux-arm64-musl.node",
|
||||
"files": ["lancedb.linux-arm64-musl.node"],
|
||||
"license": "Apache 2.0",
|
||||
"engines": {
|
||||
"node": ">= 18"
|
||||
},
|
||||
"libc": ["musl"]
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-x64-gnu",
|
||||
"version": "0.13.0-beta.2",
|
||||
"version": "0.14.0",
|
||||
"os": ["linux"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.linux-x64-gnu.node",
|
||||
|
||||
3
nodejs/npm/linux-x64-musl/README.md
Normal file
3
nodejs/npm/linux-x64-musl/README.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# `@lancedb/lancedb-linux-x64-musl`
|
||||
|
||||
This is the **x86_64-unknown-linux-musl** binary for `@lancedb/lancedb`
|
||||
13
nodejs/npm/linux-x64-musl/package.json
Normal file
13
nodejs/npm/linux-x64-musl/package.json
Normal file
@@ -0,0 +1,13 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-x64-musl",
|
||||
"version": "0.14.0",
|
||||
"os": ["linux"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.linux-x64-musl.node",
|
||||
"files": ["lancedb.linux-x64-musl.node"],
|
||||
"license": "Apache 2.0",
|
||||
"engines": {
|
||||
"node": ">= 18"
|
||||
},
|
||||
"libc": ["musl"]
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-win32-arm64-msvc",
|
||||
"version": "0.13.0-beta.2",
|
||||
"version": "0.14.0",
|
||||
"os": [
|
||||
"win32"
|
||||
],
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-win32-x64-msvc",
|
||||
"version": "0.13.0-beta.2",
|
||||
"version": "0.14.0",
|
||||
"os": ["win32"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.win32-x64-msvc.node",
|
||||
|
||||
10
nodejs/package-lock.json
generated
10
nodejs/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb",
|
||||
"version": "0.13.0-beta.1",
|
||||
"version": "0.14.0",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@lancedb/lancedb",
|
||||
"version": "0.13.0-beta.1",
|
||||
"version": "0.14.0",
|
||||
"cpu": [
|
||||
"x64",
|
||||
"arm64"
|
||||
@@ -6052,9 +6052,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/cross-spawn": {
|
||||
"version": "7.0.3",
|
||||
"resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz",
|
||||
"integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==",
|
||||
"version": "7.0.6",
|
||||
"resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz",
|
||||
"integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==",
|
||||
"devOptional": true,
|
||||
"dependencies": {
|
||||
"path-key": "^3.1.0",
|
||||
|
||||
@@ -10,7 +10,8 @@
|
||||
"vector database",
|
||||
"ann"
|
||||
],
|
||||
"version": "0.13.0-beta.2",
|
||||
"private": false,
|
||||
"version": "0.14.0",
|
||||
"main": "dist/index.js",
|
||||
"exports": {
|
||||
".": "./dist/index.js",
|
||||
@@ -24,11 +25,14 @@
|
||||
"triples": {
|
||||
"defaults": false,
|
||||
"additional": [
|
||||
"aarch64-apple-darwin",
|
||||
"aarch64-unknown-linux-gnu",
|
||||
"x86_64-apple-darwin",
|
||||
"aarch64-apple-darwin",
|
||||
"x86_64-unknown-linux-gnu",
|
||||
"x86_64-pc-windows-msvc"
|
||||
"aarch64-unknown-linux-gnu",
|
||||
"x86_64-unknown-linux-musl",
|
||||
"aarch64-unknown-linux-musl",
|
||||
"x86_64-pc-windows-msvc",
|
||||
"aarch64-pc-windows-msvc"
|
||||
]
|
||||
}
|
||||
},
|
||||
|
||||
@@ -167,6 +167,11 @@ impl VectorQuery {
|
||||
self.inner = self.inner.clone().nprobes(nprobe as usize);
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn ef(&mut self, ef: u32) {
|
||||
self.inner = self.inner.clone().ef(ef as usize);
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn bypass_vector_index(&mut self) {
|
||||
self.inner = self.inner.clone().bypass_vector_index()
|
||||
|
||||
@@ -12,6 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use arrow_ipc::writer::FileWriter;
|
||||
use lancedb::ipc::ipc_file_to_batches;
|
||||
use lancedb::table::{
|
||||
@@ -176,16 +178,20 @@ impl Table {
|
||||
#[napi(catch_unwind)]
|
||||
pub async fn alter_columns(&self, alterations: Vec<ColumnAlteration>) -> napi::Result<()> {
|
||||
for alteration in &alterations {
|
||||
if alteration.rename.is_none() && alteration.nullable.is_none() {
|
||||
if alteration.rename.is_none()
|
||||
&& alteration.nullable.is_none()
|
||||
&& alteration.data_type.is_none()
|
||||
{
|
||||
return Err(napi::Error::from_reason(
|
||||
"Alteration must have a 'rename' or 'nullable' field.",
|
||||
"Alteration must have a 'rename', 'dataType', or 'nullable' field.",
|
||||
));
|
||||
}
|
||||
}
|
||||
let alterations = alterations
|
||||
.into_iter()
|
||||
.map(LanceColumnAlteration::from)
|
||||
.collect::<Vec<_>>();
|
||||
.map(LanceColumnAlteration::try_from)
|
||||
.collect::<std::result::Result<Vec<_>, String>>()
|
||||
.map_err(napi::Error::from_reason)?;
|
||||
|
||||
self.inner_ref()?
|
||||
.alter_columns(&alterations)
|
||||
@@ -226,6 +232,28 @@ impl Table {
|
||||
self.inner_ref()?.checkout_latest().await.default_error()
|
||||
}
|
||||
|
||||
#[napi(catch_unwind)]
|
||||
pub async fn list_versions(&self) -> napi::Result<Vec<Version>> {
|
||||
self.inner_ref()?
|
||||
.list_versions()
|
||||
.await
|
||||
.map(|versions| {
|
||||
versions
|
||||
.iter()
|
||||
.map(|version| Version {
|
||||
version: version.version as i64,
|
||||
timestamp: version.timestamp.timestamp_micros(),
|
||||
metadata: version
|
||||
.metadata
|
||||
.iter()
|
||||
.map(|(k, v)| (k.clone(), v.clone()))
|
||||
.collect(),
|
||||
})
|
||||
.collect()
|
||||
})
|
||||
.default_error()
|
||||
}
|
||||
|
||||
#[napi(catch_unwind)]
|
||||
pub async fn restore(&self) -> napi::Result<()> {
|
||||
self.inner_ref()?.restore().await.default_error()
|
||||
@@ -409,24 +437,43 @@ pub struct ColumnAlteration {
|
||||
/// The new name of the column. If not provided then the name will not be changed.
|
||||
/// This must be distinct from the names of all other columns in the table.
|
||||
pub rename: Option<String>,
|
||||
/// A new data type for the column. If not provided then the data type will not be changed.
|
||||
/// Changing data types is limited to casting to the same general type. For example, these
|
||||
/// changes are valid:
|
||||
/// * `int32` -> `int64` (integers)
|
||||
/// * `double` -> `float` (floats)
|
||||
/// * `string` -> `large_string` (strings)
|
||||
/// But these changes are not:
|
||||
/// * `int32` -> `double` (mix integers and floats)
|
||||
/// * `string` -> `int32` (mix strings and integers)
|
||||
pub data_type: Option<String>,
|
||||
/// Set the new nullability. Note that a nullable column cannot be made non-nullable.
|
||||
pub nullable: Option<bool>,
|
||||
}
|
||||
|
||||
impl From<ColumnAlteration> for LanceColumnAlteration {
|
||||
fn from(js: ColumnAlteration) -> Self {
|
||||
impl TryFrom<ColumnAlteration> for LanceColumnAlteration {
|
||||
type Error = String;
|
||||
fn try_from(js: ColumnAlteration) -> std::result::Result<Self, Self::Error> {
|
||||
let ColumnAlteration {
|
||||
path,
|
||||
rename,
|
||||
nullable,
|
||||
data_type,
|
||||
} = js;
|
||||
Self {
|
||||
let data_type = if let Some(data_type) = data_type {
|
||||
Some(
|
||||
lancedb::utils::string_to_datatype(&data_type)
|
||||
.ok_or_else(|| format!("Invalid data type: {}", data_type))?,
|
||||
)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
Ok(Self {
|
||||
path,
|
||||
rename,
|
||||
nullable,
|
||||
// TODO: wire up this field
|
||||
data_type: None,
|
||||
}
|
||||
data_type,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -466,3 +513,10 @@ impl From<lancedb::index::IndexStatistics> for IndexStatistics {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[napi(object)]
|
||||
pub struct Version {
|
||||
pub version: i64,
|
||||
pub timestamp: i64,
|
||||
pub metadata: HashMap<String, String>,
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
[tool.bumpversion]
|
||||
current_version = "0.16.0"
|
||||
current_version = "0.17.1-beta.0"
|
||||
parse = """(?x)
|
||||
(?P<major>0|[1-9]\\d*)\\.
|
||||
(?P<minor>0|[1-9]\\d*)\\.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "lancedb-python"
|
||||
version = "0.16.0"
|
||||
version = "0.17.1-beta.0"
|
||||
edition.workspace = true
|
||||
description = "Python bindings for LanceDB"
|
||||
license.workspace = true
|
||||
@@ -14,25 +14,30 @@ name = "_lancedb"
|
||||
crate-type = ["cdylib"]
|
||||
|
||||
[dependencies]
|
||||
arrow = { version = "52.1", features = ["pyarrow"] }
|
||||
lancedb = { path = "../rust/lancedb" }
|
||||
arrow = { version = "53.2", features = ["pyarrow"] }
|
||||
lancedb = { path = "../rust/lancedb", default-features = false }
|
||||
env_logger.workspace = true
|
||||
pyo3 = { version = "0.21", features = ["extension-module", "abi3-py38", "gil-refs"] }
|
||||
# Using this fork for now: https://github.com/awestlake87/pyo3-asyncio/issues/119
|
||||
# pyo3-asyncio = { version = "0.20", features = ["attributes", "tokio-runtime"] }
|
||||
pyo3-asyncio-0-21 = { version = "0.21.0", features = ["attributes", "tokio-runtime"] }
|
||||
|
||||
pyo3 = { version = "0.22.2", features = [
|
||||
"extension-module",
|
||||
"abi3-py39",
|
||||
"gil-refs"
|
||||
] }
|
||||
pyo3-async-runtimes = { version = "0.22", features = ["attributes", "tokio-runtime"] }
|
||||
pin-project = "1.1.5"
|
||||
futures.workspace = true
|
||||
tokio = { version = "1.36.0", features = ["sync"] }
|
||||
tokio = { version = "1.40", features = ["sync"] }
|
||||
|
||||
[build-dependencies]
|
||||
pyo3-build-config = { version = "0.20.3", features = [
|
||||
"extension-module",
|
||||
"abi3-py38",
|
||||
"abi3-py39",
|
||||
] }
|
||||
|
||||
[features]
|
||||
default = ["remote"]
|
||||
default = ["default-tls", "remote"]
|
||||
fp16kernels = ["lancedb/fp16kernels"]
|
||||
remote = ["lancedb/remote"]
|
||||
# TLS
|
||||
default-tls = ["lancedb/default-tls"]
|
||||
native-tls = ["lancedb/native-tls"]
|
||||
rustls-tls = ["lancedb/rustls-tls"]
|
||||
|
||||
@@ -3,8 +3,7 @@ name = "lancedb"
|
||||
# version in Cargo.toml
|
||||
dependencies = [
|
||||
"deprecation",
|
||||
"nest-asyncio~=1.0",
|
||||
"pylance==0.19.2",
|
||||
"pylance==0.20.0",
|
||||
"tqdm>=4.27.0",
|
||||
"pydantic>=1.10",
|
||||
"packaging",
|
||||
@@ -31,7 +30,6 @@ classifiers = [
|
||||
"Programming Language :: Python",
|
||||
"Programming Language :: Python :: 3",
|
||||
"Programming Language :: Python :: 3 :: Only",
|
||||
"Programming Language :: Python :: 3.8",
|
||||
"Programming Language :: Python :: 3.9",
|
||||
"Programming Language :: Python :: 3.10",
|
||||
"Programming Language :: Python :: 3.11",
|
||||
|
||||
@@ -36,6 +36,7 @@ def connect(
|
||||
read_consistency_interval: Optional[timedelta] = None,
|
||||
request_thread_pool: Optional[Union[int, ThreadPoolExecutor]] = None,
|
||||
client_config: Union[ClientConfig, Dict[str, Any], None] = None,
|
||||
storage_options: Optional[Dict[str, str]] = None,
|
||||
**kwargs: Any,
|
||||
) -> DBConnection:
|
||||
"""Connect to a LanceDB database.
|
||||
@@ -67,6 +68,9 @@ def connect(
|
||||
Configuration options for the LanceDB Cloud HTTP client. If a dict, then
|
||||
the keys are the attributes of the ClientConfig class. If None, then the
|
||||
default configuration is used.
|
||||
storage_options: dict, optional
|
||||
Additional options for the storage backend. See available options at
|
||||
https://lancedb.github.io/lancedb/guides/storage/
|
||||
|
||||
Examples
|
||||
--------
|
||||
@@ -106,12 +110,17 @@ def connect(
|
||||
# TODO: remove this (deprecation warning downstream)
|
||||
request_thread_pool=request_thread_pool,
|
||||
client_config=client_config,
|
||||
storage_options=storage_options,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
if kwargs:
|
||||
raise ValueError(f"Unknown keyword arguments: {kwargs}")
|
||||
return LanceDBConnection(uri, read_consistency_interval=read_consistency_interval)
|
||||
return LanceDBConnection(
|
||||
uri,
|
||||
read_consistency_interval=read_consistency_interval,
|
||||
storage_options=storage_options,
|
||||
)
|
||||
|
||||
|
||||
async def connect_async(
|
||||
|
||||
@@ -79,9 +79,21 @@ class Query:
|
||||
def limit(self, limit: int): ...
|
||||
def offset(self, offset: int): ...
|
||||
def nearest_to(self, query_vec: pa.Array) -> VectorQuery: ...
|
||||
def nearest_to_text(self, query: dict) -> Query: ...
|
||||
def nearest_to_text(self, query: dict) -> FTSQuery: ...
|
||||
async def execute(self, max_batch_legnth: Optional[int]) -> RecordBatchStream: ...
|
||||
|
||||
class FTSQuery:
|
||||
def where(self, filter: str): ...
|
||||
def select(self, columns: List[str]): ...
|
||||
def limit(self, limit: int): ...
|
||||
def offset(self, offset: int): ...
|
||||
def fast_search(self): ...
|
||||
def with_row_id(self): ...
|
||||
def postfilter(self): ...
|
||||
def nearest_to(self, query_vec: pa.Array) -> HybridQuery: ...
|
||||
async def execute(self, max_batch_length: Optional[int]) -> RecordBatchStream: ...
|
||||
async def explain_plan(self) -> str: ...
|
||||
|
||||
class VectorQuery:
|
||||
async def execute(self) -> RecordBatchStream: ...
|
||||
def where(self, filter: str): ...
|
||||
@@ -95,6 +107,24 @@ class VectorQuery:
|
||||
def refine_factor(self, refine_factor: int): ...
|
||||
def nprobes(self, nprobes: int): ...
|
||||
def bypass_vector_index(self): ...
|
||||
def nearest_to_text(self, query: dict) -> HybridQuery: ...
|
||||
|
||||
class HybridQuery:
|
||||
def where(self, filter: str): ...
|
||||
def select(self, columns: List[str]): ...
|
||||
def limit(self, limit: int): ...
|
||||
def offset(self, offset: int): ...
|
||||
def fast_search(self): ...
|
||||
def with_row_id(self): ...
|
||||
def postfilter(self): ...
|
||||
def distance_type(self, distance_type: str): ...
|
||||
def refine_factor(self, refine_factor: int): ...
|
||||
def nprobes(self, nprobes: int): ...
|
||||
def bypass_vector_index(self): ...
|
||||
def to_vector_query(self) -> VectorQuery: ...
|
||||
def to_fts_query(self) -> FTSQuery: ...
|
||||
def get_limit(self) -> int: ...
|
||||
def get_with_row_id(self) -> bool: ...
|
||||
|
||||
class CompactionStats:
|
||||
fragments_removed: int
|
||||
|
||||
25
python/python/lancedb/background_loop.py
Normal file
25
python/python/lancedb/background_loop.py
Normal file
@@ -0,0 +1,25 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
import asyncio
|
||||
import threading
|
||||
|
||||
|
||||
class BackgroundEventLoop:
|
||||
"""
|
||||
A background event loop that can run futures.
|
||||
|
||||
Used to bridge sync and async code, without messing with users event loops.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.loop = asyncio.new_event_loop()
|
||||
self.thread = threading.Thread(
|
||||
target=self.loop.run_forever,
|
||||
name="LanceDBBackgroundEventLoop",
|
||||
daemon=True,
|
||||
)
|
||||
self.thread.start()
|
||||
|
||||
def run(self, future):
|
||||
return asyncio.run_coroutine_threadsafe(future, self.loop).result()
|
||||
@@ -13,34 +13,29 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import os
|
||||
from abc import abstractmethod
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Dict, Iterable, List, Literal, Optional, Union
|
||||
|
||||
import pyarrow as pa
|
||||
from overrides import EnforceOverrides, override
|
||||
from pyarrow import fs
|
||||
|
||||
from lancedb.common import data_to_reader, validate_schema
|
||||
from lancedb.common import data_to_reader, sanitize_uri, validate_schema
|
||||
from lancedb.background_loop import BackgroundEventLoop
|
||||
|
||||
from ._lancedb import connect as lancedb_connect
|
||||
from .table import (
|
||||
AsyncTable,
|
||||
LanceTable,
|
||||
Table,
|
||||
_table_path,
|
||||
sanitize_create_table,
|
||||
)
|
||||
from .util import (
|
||||
fs_from_uri,
|
||||
get_uri_location,
|
||||
get_uri_scheme,
|
||||
validate_table_name,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import pyarrow as pa
|
||||
from .pydantic import LanceModel
|
||||
from datetime import timedelta
|
||||
|
||||
@@ -48,6 +43,8 @@ if TYPE_CHECKING:
|
||||
from .common import DATA, URI
|
||||
from .embeddings import EmbeddingFunctionConfig
|
||||
|
||||
LOOP = BackgroundEventLoop()
|
||||
|
||||
|
||||
class DBConnection(EnforceOverrides):
|
||||
"""An active LanceDB connection interface."""
|
||||
@@ -180,6 +177,7 @@ class DBConnection(EnforceOverrides):
|
||||
control over how data is saved, either provide the PyArrow schema to
|
||||
convert to or else provide a [PyArrow Table](pyarrow.Table) directly.
|
||||
|
||||
>>> import pyarrow as pa
|
||||
>>> custom_schema = pa.schema([
|
||||
... pa.field("vector", pa.list_(pa.float32(), 2)),
|
||||
... pa.field("lat", pa.float32()),
|
||||
@@ -327,7 +325,11 @@ class LanceDBConnection(DBConnection):
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, uri: URI, *, read_consistency_interval: Optional[timedelta] = None
|
||||
self,
|
||||
uri: URI,
|
||||
*,
|
||||
read_consistency_interval: Optional[timedelta] = None,
|
||||
storage_options: Optional[Dict[str, str]] = None,
|
||||
):
|
||||
if not isinstance(uri, Path):
|
||||
scheme = get_uri_scheme(uri)
|
||||
@@ -338,9 +340,27 @@ class LanceDBConnection(DBConnection):
|
||||
uri = uri.expanduser().absolute()
|
||||
Path(uri).mkdir(parents=True, exist_ok=True)
|
||||
self._uri = str(uri)
|
||||
|
||||
self._entered = False
|
||||
self.read_consistency_interval = read_consistency_interval
|
||||
self.storage_options = storage_options
|
||||
|
||||
if read_consistency_interval is not None:
|
||||
read_consistency_interval_secs = read_consistency_interval.total_seconds()
|
||||
else:
|
||||
read_consistency_interval_secs = None
|
||||
|
||||
async def do_connect():
|
||||
return await lancedb_connect(
|
||||
sanitize_uri(uri),
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
read_consistency_interval_secs,
|
||||
None,
|
||||
storage_options,
|
||||
)
|
||||
|
||||
self._conn = AsyncConnection(LOOP.run(do_connect()))
|
||||
|
||||
def __repr__(self) -> str:
|
||||
val = f"{self.__class__.__name__}({self._uri}"
|
||||
@@ -364,32 +384,7 @@ class LanceDBConnection(DBConnection):
|
||||
Iterator of str.
|
||||
A list of table names.
|
||||
"""
|
||||
try:
|
||||
asyncio.get_running_loop()
|
||||
# User application is async. Soon we will just tell them to use the
|
||||
# async version. Until then fallback to the old sync implementation.
|
||||
try:
|
||||
filesystem = fs_from_uri(self.uri)[0]
|
||||
except pa.ArrowInvalid:
|
||||
raise NotImplementedError("Unsupported scheme: " + self.uri)
|
||||
|
||||
try:
|
||||
loc = get_uri_location(self.uri)
|
||||
paths = filesystem.get_file_info(fs.FileSelector(loc))
|
||||
except FileNotFoundError:
|
||||
# It is ok if the file does not exist since it will be created
|
||||
paths = []
|
||||
tables = [
|
||||
os.path.splitext(file_info.base_name)[0]
|
||||
for file_info in paths
|
||||
if file_info.extension == "lance"
|
||||
]
|
||||
tables.sort()
|
||||
return tables
|
||||
except RuntimeError:
|
||||
# User application is sync. It is safe to use the async implementation
|
||||
# under the hood.
|
||||
return asyncio.run(self._async_get_table_names(page_token, limit))
|
||||
return LOOP.run(self._conn.table_names(start_after=page_token, limit=limit))
|
||||
|
||||
def __len__(self) -> int:
|
||||
return len(self.table_names())
|
||||
@@ -461,19 +456,16 @@ class LanceDBConnection(DBConnection):
|
||||
If True, ignore if the table does not exist.
|
||||
"""
|
||||
try:
|
||||
table_uri = _table_path(self.uri, name)
|
||||
filesystem, path = fs_from_uri(table_uri)
|
||||
filesystem.delete_dir(path)
|
||||
except FileNotFoundError:
|
||||
LOOP.run(self._conn.drop_table(name))
|
||||
except ValueError as e:
|
||||
if not ignore_missing:
|
||||
raise
|
||||
raise e
|
||||
if f"Table '{name}' was not found" not in str(e):
|
||||
raise e
|
||||
|
||||
@override
|
||||
def drop_database(self):
|
||||
dummy_table_uri = _table_path(self.uri, "dummy")
|
||||
uri = dummy_table_uri.removesuffix("dummy.lance")
|
||||
filesystem, path = fs_from_uri(uri)
|
||||
filesystem.delete_dir(path)
|
||||
LOOP.run(self._conn.drop_database())
|
||||
|
||||
|
||||
class AsyncConnection(object):
|
||||
@@ -689,6 +681,7 @@ class AsyncConnection(object):
|
||||
control over how data is saved, either provide the PyArrow schema to
|
||||
convert to or else provide a [PyArrow Table](pyarrow.Table) directly.
|
||||
|
||||
>>> import pyarrow as pa
|
||||
>>> custom_schema = pa.schema([
|
||||
... pa.field("vector", pa.list_(pa.float32(), 2)),
|
||||
... pa.field("lat", pa.float32()),
|
||||
|
||||
@@ -48,6 +48,9 @@ class OpenAIEmbeddings(TextEmbeddingFunction):
|
||||
organization: Optional[str] = None
|
||||
api_key: Optional[str] = None
|
||||
|
||||
# Set true to use Azure OpenAI API
|
||||
use_azure: bool = False
|
||||
|
||||
def ndims(self):
|
||||
return self._ndims
|
||||
|
||||
@@ -83,25 +86,33 @@ class OpenAIEmbeddings(TextEmbeddingFunction):
|
||||
"""
|
||||
openai = attempt_import_or_raise("openai")
|
||||
|
||||
valid_texts = []
|
||||
valid_indices = []
|
||||
for idx, text in enumerate(texts):
|
||||
if text:
|
||||
valid_texts.append(text)
|
||||
valid_indices.append(idx)
|
||||
|
||||
# TODO retry, rate limit, token limit
|
||||
try:
|
||||
if self.name == "text-embedding-ada-002":
|
||||
rs = self._openai_client.embeddings.create(input=texts, model=self.name)
|
||||
else:
|
||||
kwargs = {
|
||||
"input": texts,
|
||||
"model": self.name,
|
||||
}
|
||||
if self.dim:
|
||||
kwargs["dimensions"] = self.dim
|
||||
rs = self._openai_client.embeddings.create(**kwargs)
|
||||
kwargs = {
|
||||
"input": valid_texts,
|
||||
"model": self.name,
|
||||
}
|
||||
if self.name != "text-embedding-ada-002":
|
||||
kwargs["dimensions"] = self.dim
|
||||
|
||||
rs = self._openai_client.embeddings.create(**kwargs)
|
||||
valid_embeddings = {
|
||||
idx: v.embedding for v, idx in zip(rs.data, valid_indices)
|
||||
}
|
||||
except openai.BadRequestError:
|
||||
logging.exception("Bad request: %s", texts)
|
||||
return [None] * len(texts)
|
||||
except Exception:
|
||||
logging.exception("OpenAI embeddings error")
|
||||
raise
|
||||
return [v.embedding for v in rs.data]
|
||||
return [valid_embeddings.get(idx, None) for idx in range(len(texts))]
|
||||
|
||||
@cached_property
|
||||
def _openai_client(self):
|
||||
@@ -115,4 +126,8 @@ class OpenAIEmbeddings(TextEmbeddingFunction):
|
||||
kwargs["organization"] = self.organization
|
||||
if self.api_key:
|
||||
kwargs["api_key"] = self.api_key
|
||||
return openai.OpenAI(**kwargs)
|
||||
|
||||
if self.use_azure:
|
||||
return openai.AzureOpenAI(**kwargs)
|
||||
else:
|
||||
return openai.OpenAI(**kwargs)
|
||||
|
||||
@@ -12,18 +12,22 @@
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
from typing import ClassVar, List, Union
|
||||
from typing import ClassVar, TYPE_CHECKING, List, Union
|
||||
|
||||
import numpy as np
|
||||
import pyarrow as pa
|
||||
|
||||
from ..util import attempt_import_or_raise
|
||||
from .base import TextEmbeddingFunction
|
||||
from .base import EmbeddingFunction
|
||||
from .registry import register
|
||||
from .utils import api_key_not_found_help, TEXT
|
||||
from .utils import api_key_not_found_help, IMAGES
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import PIL
|
||||
|
||||
|
||||
@register("voyageai")
|
||||
class VoyageAIEmbeddingFunction(TextEmbeddingFunction):
|
||||
class VoyageAIEmbeddingFunction(EmbeddingFunction):
|
||||
"""
|
||||
An embedding function that uses the VoyageAI API
|
||||
|
||||
@@ -36,6 +40,7 @@ class VoyageAIEmbeddingFunction(TextEmbeddingFunction):
|
||||
|
||||
* voyage-3
|
||||
* voyage-3-lite
|
||||
* voyage-multimodal-3
|
||||
* voyage-finance-2
|
||||
* voyage-multilingual-2
|
||||
* voyage-law-2
|
||||
@@ -54,7 +59,7 @@ class VoyageAIEmbeddingFunction(TextEmbeddingFunction):
|
||||
.create(name="voyage-3")
|
||||
|
||||
class TextModel(LanceModel):
|
||||
text: str = voyageai.SourceField()
|
||||
data: str = voyageai.SourceField()
|
||||
vector: Vector(voyageai.ndims()) = voyageai.VectorField()
|
||||
|
||||
data = [ { "text": "hello world" },
|
||||
@@ -77,6 +82,7 @@ class VoyageAIEmbeddingFunction(TextEmbeddingFunction):
|
||||
return 1536
|
||||
elif self.name in [
|
||||
"voyage-3",
|
||||
"voyage-multimodal-3",
|
||||
"voyage-finance-2",
|
||||
"voyage-multilingual-2",
|
||||
"voyage-law-2",
|
||||
@@ -85,19 +91,19 @@ class VoyageAIEmbeddingFunction(TextEmbeddingFunction):
|
||||
else:
|
||||
raise ValueError(f"Model {self.name} not supported")
|
||||
|
||||
def compute_query_embeddings(self, query: str, *args, **kwargs) -> List[np.array]:
|
||||
return self.compute_source_embeddings(query, input_type="query")
|
||||
def sanitize_input(self, images: IMAGES) -> Union[List[bytes], np.ndarray]:
|
||||
"""
|
||||
Sanitize the input to the embedding function.
|
||||
"""
|
||||
if isinstance(images, (str, bytes)):
|
||||
images = [images]
|
||||
elif isinstance(images, pa.Array):
|
||||
images = images.to_pylist()
|
||||
elif isinstance(images, pa.ChunkedArray):
|
||||
images = images.combine_chunks().to_pylist()
|
||||
return images
|
||||
|
||||
def compute_source_embeddings(self, texts: TEXT, *args, **kwargs) -> List[np.array]:
|
||||
texts = self.sanitize_input(texts)
|
||||
input_type = (
|
||||
kwargs.get("input_type") or "document"
|
||||
) # assume source input type if not passed by `compute_query_embeddings`
|
||||
return self.generate_embeddings(texts, input_type=input_type)
|
||||
|
||||
def generate_embeddings(
|
||||
self, texts: Union[List[str], np.ndarray], *args, **kwargs
|
||||
) -> List[np.array]:
|
||||
def generate_text_embeddings(self, text: str, **kwargs) -> np.ndarray:
|
||||
"""
|
||||
Get the embeddings for the given texts
|
||||
|
||||
@@ -109,15 +115,55 @@ class VoyageAIEmbeddingFunction(TextEmbeddingFunction):
|
||||
|
||||
truncation: Optional[bool]
|
||||
"""
|
||||
VoyageAIEmbeddingFunction._init_client()
|
||||
rs = VoyageAIEmbeddingFunction.client.embed(
|
||||
texts=texts, model=self.name, **kwargs
|
||||
)
|
||||
if self.name in ["voyage-multimodal-3"]:
|
||||
rs = VoyageAIEmbeddingFunction._get_client().multimodal_embed(
|
||||
inputs=[[text]], model=self.name, **kwargs
|
||||
)
|
||||
else:
|
||||
rs = VoyageAIEmbeddingFunction._get_client().embed(
|
||||
texts=[text], model=self.name, **kwargs
|
||||
)
|
||||
|
||||
return [emb for emb in rs.embeddings]
|
||||
return rs.embeddings[0]
|
||||
|
||||
def generate_image_embedding(
|
||||
self, image: "PIL.Image.Image", **kwargs
|
||||
) -> np.ndarray:
|
||||
rs = VoyageAIEmbeddingFunction._get_client().multimodal_embed(
|
||||
inputs=[[image]], model=self.name, **kwargs
|
||||
)
|
||||
return rs.embeddings[0]
|
||||
|
||||
def compute_query_embeddings(
|
||||
self, query: Union[str, "PIL.Image.Image"], *args, **kwargs
|
||||
) -> List[np.ndarray]:
|
||||
"""
|
||||
Compute the embeddings for a given user query
|
||||
|
||||
Parameters
|
||||
----------
|
||||
query : Union[str, PIL.Image.Image]
|
||||
The query to embed. A query can be either text or an image.
|
||||
"""
|
||||
if isinstance(query, str):
|
||||
return [self.generate_text_embeddings(query, input_type="query")]
|
||||
else:
|
||||
PIL = attempt_import_or_raise("PIL", "pillow")
|
||||
if isinstance(query, PIL.Image.Image):
|
||||
return [self.generate_image_embedding(query, input_type="query")]
|
||||
else:
|
||||
raise TypeError("Only text PIL images supported as query")
|
||||
|
||||
def compute_source_embeddings(
|
||||
self, images: IMAGES, *args, **kwargs
|
||||
) -> List[np.array]:
|
||||
images = self.sanitize_input(images)
|
||||
return [
|
||||
self.generate_image_embedding(img, input_type="document") for img in images
|
||||
]
|
||||
|
||||
@staticmethod
|
||||
def _init_client():
|
||||
def _get_client():
|
||||
if VoyageAIEmbeddingFunction.client is None:
|
||||
voyageai = attempt_import_or_raise("voyageai")
|
||||
if os.environ.get("VOYAGE_API_KEY") is None:
|
||||
@@ -125,3 +171,4 @@ class VoyageAIEmbeddingFunction(TextEmbeddingFunction):
|
||||
VoyageAIEmbeddingFunction.client = voyageai.Client(
|
||||
os.environ["VOYAGE_API_KEY"]
|
||||
)
|
||||
return VoyageAIEmbeddingFunction.client
|
||||
|
||||
@@ -110,7 +110,16 @@ class FTS:
|
||||
remove_stop_words: bool = False,
|
||||
ascii_folding: bool = False,
|
||||
):
|
||||
self._inner = LanceDbIndex.fts(with_position=with_position)
|
||||
self._inner = LanceDbIndex.fts(
|
||||
with_position=with_position,
|
||||
base_tokenizer=base_tokenizer,
|
||||
language=language,
|
||||
max_token_length=max_token_length,
|
||||
lower_case=lower_case,
|
||||
stem=stem,
|
||||
remove_stop_words=remove_stop_words,
|
||||
ascii_folding=ascii_folding,
|
||||
)
|
||||
|
||||
|
||||
class HnswPq:
|
||||
|
||||
0
python/python/lancedb/integrations/__init__.py
Normal file
0
python/python/lancedb/integrations/__init__.py
Normal file
248
python/python/lancedb/integrations/pyarrow.py
Normal file
248
python/python/lancedb/integrations/pyarrow.py
Normal file
@@ -0,0 +1,248 @@
|
||||
import logging
|
||||
from typing import Any, List, Optional, Tuple, Union, Literal
|
||||
|
||||
import pyarrow as pa
|
||||
|
||||
from ..table import Table
|
||||
|
||||
Filter = Union[str, pa.compute.Expression]
|
||||
Keys = Union[str, List[str]]
|
||||
JoinType = Literal[
|
||||
"left semi",
|
||||
"right semi",
|
||||
"left anti",
|
||||
"right anti",
|
||||
"inner",
|
||||
"left outer",
|
||||
"right outer",
|
||||
"full outer",
|
||||
]
|
||||
|
||||
|
||||
class PyarrowScannerAdapter(pa.dataset.Scanner):
|
||||
def __init__(
|
||||
self,
|
||||
table: Table,
|
||||
columns: Optional[List[str]] = None,
|
||||
filter: Optional[Filter] = None,
|
||||
batch_size: Optional[int] = None,
|
||||
batch_readahead: Optional[int] = None,
|
||||
fragment_readahead: Optional[int] = None,
|
||||
fragment_scan_options: Optional[Any] = None,
|
||||
use_threads: bool = True,
|
||||
memory_pool: Optional[Any] = None,
|
||||
):
|
||||
self.table = table
|
||||
self.columns = columns
|
||||
self.filter = filter
|
||||
self.batch_size = batch_size
|
||||
if batch_readahead is not None:
|
||||
logging.debug("ignoring batch_readahead which has no lance equivalent")
|
||||
if fragment_readahead is not None:
|
||||
logging.debug("ignoring fragment_readahead which has no lance equivalent")
|
||||
if fragment_scan_options is not None:
|
||||
raise NotImplementedError("fragment_scan_options not supported")
|
||||
if use_threads is False:
|
||||
raise NotImplementedError("use_threads=False not supported")
|
||||
if memory_pool is not None:
|
||||
raise NotImplementedError("memory_pool not supported")
|
||||
|
||||
def count_rows(self):
|
||||
return self.table.count_rows(self.filter)
|
||||
|
||||
def from_batches(self, **kwargs):
|
||||
raise NotImplementedError
|
||||
|
||||
def from_dataset(self, **kwargs):
|
||||
raise NotImplementedError
|
||||
|
||||
def from_fragment(self, **kwargs):
|
||||
raise NotImplementedError
|
||||
|
||||
def head(self, num_rows: int):
|
||||
return self.to_reader(limit=num_rows).read_all()
|
||||
|
||||
@property
|
||||
def projected_schema(self):
|
||||
return self.head(1).schema
|
||||
|
||||
def scan_batches(self):
|
||||
return self.to_reader()
|
||||
|
||||
def take(self, indices: List[int]):
|
||||
raise NotImplementedError
|
||||
|
||||
def to_batches(self):
|
||||
return self.to_reader()
|
||||
|
||||
def to_table(self):
|
||||
return self.to_reader().read_all()
|
||||
|
||||
def to_reader(self, *, limit: Optional[int] = None):
|
||||
query = self.table.search()
|
||||
# Disable the builtin limit
|
||||
if limit is None:
|
||||
num_rows = self.count_rows()
|
||||
query.limit(num_rows)
|
||||
elif limit <= 0:
|
||||
raise ValueError("limit must be positive")
|
||||
else:
|
||||
query.limit(limit)
|
||||
if self.columns is not None:
|
||||
query = query.select(self.columns)
|
||||
if self.filter is not None:
|
||||
query = query.where(self.filter, prefilter=True)
|
||||
return query.to_batches(batch_size=self.batch_size)
|
||||
|
||||
|
||||
class PyarrowDatasetAdapter(pa.dataset.Dataset):
|
||||
def __init__(self, table: Table):
|
||||
self.table = table
|
||||
|
||||
def count_rows(self, filter: Optional[Filter] = None):
|
||||
return self.table.count_rows(filter)
|
||||
|
||||
def get_fragments(self, filter: Optional[Filter] = None):
|
||||
raise NotImplementedError
|
||||
|
||||
def head(
|
||||
self,
|
||||
num_rows: int,
|
||||
columns: Optional[List[str]] = None,
|
||||
filter: Optional[Filter] = None,
|
||||
batch_size: Optional[int] = None,
|
||||
batch_readahead: Optional[int] = None,
|
||||
fragment_readahead: Optional[int] = None,
|
||||
fragment_scan_options: Optional[Any] = None,
|
||||
use_threads: bool = True,
|
||||
memory_pool: Optional[Any] = None,
|
||||
):
|
||||
return self.scanner(
|
||||
columns,
|
||||
filter,
|
||||
batch_size,
|
||||
batch_readahead,
|
||||
fragment_readahead,
|
||||
fragment_scan_options,
|
||||
use_threads,
|
||||
memory_pool,
|
||||
).head(num_rows)
|
||||
|
||||
def join(
|
||||
self,
|
||||
right_dataset: Any,
|
||||
keys: Keys,
|
||||
right_keys: Optional[Keys] = None,
|
||||
join_type: Optional[JoinType] = None,
|
||||
left_suffix: Optional[str] = None,
|
||||
right_suffix: Optional[str] = None,
|
||||
coalesce_keys: bool = True,
|
||||
use_threads: bool = True,
|
||||
):
|
||||
raise NotImplementedError
|
||||
|
||||
def join_asof(
|
||||
self,
|
||||
right_dataset: Any,
|
||||
on: str,
|
||||
by: Keys,
|
||||
tolerance: int,
|
||||
right_on: Optional[str] = None,
|
||||
right_by: Optional[Keys] = None,
|
||||
):
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def partition_expression(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def replace_schema(self, schema: pa.Schema):
|
||||
raise NotImplementedError
|
||||
|
||||
def scanner(
|
||||
self,
|
||||
columns: Optional[List[str]] = None,
|
||||
filter: Optional[Filter] = None,
|
||||
batch_size: Optional[int] = None,
|
||||
batch_readahead: Optional[int] = None,
|
||||
fragment_readahead: Optional[int] = None,
|
||||
fragment_scan_options: Optional[Any] = None,
|
||||
use_threads: bool = True,
|
||||
memory_pool: Optional[Any] = None,
|
||||
):
|
||||
return PyarrowScannerAdapter(
|
||||
self.table,
|
||||
columns,
|
||||
filter,
|
||||
batch_size,
|
||||
batch_readahead,
|
||||
fragment_readahead,
|
||||
fragment_scan_options,
|
||||
use_threads,
|
||||
memory_pool,
|
||||
)
|
||||
|
||||
@property
|
||||
def schema(self):
|
||||
return self.table.schema
|
||||
|
||||
def sort_by(self, sorting: Union[str, List[Tuple[str, bool]]]):
|
||||
raise NotImplementedError
|
||||
|
||||
def take(
|
||||
self,
|
||||
indices: List[int],
|
||||
columns: Optional[List[str]] = None,
|
||||
filter: Optional[Filter] = None,
|
||||
batch_size: Optional[int] = None,
|
||||
batch_readahead: Optional[int] = None,
|
||||
fragment_readahead: Optional[int] = None,
|
||||
fragment_scan_options: Optional[Any] = None,
|
||||
use_threads: bool = True,
|
||||
memory_pool: Optional[Any] = None,
|
||||
):
|
||||
raise NotImplementedError
|
||||
|
||||
def to_batches(
|
||||
self,
|
||||
columns: Optional[List[str]] = None,
|
||||
filter: Optional[Filter] = None,
|
||||
batch_size: Optional[int] = None,
|
||||
batch_readahead: Optional[int] = None,
|
||||
fragment_readahead: Optional[int] = None,
|
||||
fragment_scan_options: Optional[Any] = None,
|
||||
use_threads: bool = True,
|
||||
memory_pool: Optional[Any] = None,
|
||||
):
|
||||
return self.scanner(
|
||||
columns,
|
||||
filter,
|
||||
batch_size,
|
||||
batch_readahead,
|
||||
fragment_readahead,
|
||||
fragment_scan_options,
|
||||
use_threads,
|
||||
memory_pool,
|
||||
).to_batches()
|
||||
|
||||
def to_table(
|
||||
self,
|
||||
columns: Optional[List[str]] = None,
|
||||
filter: Optional[Filter] = None,
|
||||
batch_size: Optional[int] = None,
|
||||
batch_readahead: Optional[int] = None,
|
||||
fragment_readahead: Optional[int] = None,
|
||||
fragment_scan_options: Optional[Any] = None,
|
||||
use_threads: bool = True,
|
||||
memory_pool: Optional[Any] = None,
|
||||
):
|
||||
return self.scanner(
|
||||
columns,
|
||||
filter,
|
||||
batch_size,
|
||||
batch_readahead,
|
||||
fragment_readahead,
|
||||
fragment_scan_options,
|
||||
use_threads,
|
||||
memory_pool,
|
||||
).to_table()
|
||||
@@ -1,15 +1,5 @@
|
||||
# Copyright 2023 LanceDB Developers
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
"""Pydantic (v1 / v2) adapter for LanceDB"""
|
||||
|
||||
@@ -30,6 +20,7 @@ from typing import (
|
||||
Type,
|
||||
Union,
|
||||
_GenericAlias,
|
||||
GenericAlias,
|
||||
)
|
||||
|
||||
import numpy as np
|
||||
@@ -75,7 +66,7 @@ def vector(dim: int, value_type: pa.DataType = pa.float32()):
|
||||
|
||||
|
||||
def Vector(
|
||||
dim: int, value_type: pa.DataType = pa.float32()
|
||||
dim: int, value_type: pa.DataType = pa.float32(), nullable: bool = True
|
||||
) -> Type[FixedSizeListMixin]:
|
||||
"""Pydantic Vector Type.
|
||||
|
||||
@@ -88,6 +79,8 @@ def Vector(
|
||||
The dimension of the vector.
|
||||
value_type : pyarrow.DataType, optional
|
||||
The value type of the vector, by default pa.float32()
|
||||
nullable : bool, optional
|
||||
Whether the vector is nullable, by default it is True.
|
||||
|
||||
Examples
|
||||
--------
|
||||
@@ -103,7 +96,7 @@ def Vector(
|
||||
>>> assert schema == pa.schema([
|
||||
... pa.field("id", pa.int64(), False),
|
||||
... pa.field("url", pa.utf8(), False),
|
||||
... pa.field("embeddings", pa.list_(pa.float32(), 768), False)
|
||||
... pa.field("embeddings", pa.list_(pa.float32(), 768))
|
||||
... ])
|
||||
"""
|
||||
|
||||
@@ -112,6 +105,10 @@ def Vector(
|
||||
def __repr__(self):
|
||||
return f"FixedSizeList(dim={dim})"
|
||||
|
||||
@staticmethod
|
||||
def nullable() -> bool:
|
||||
return nullable
|
||||
|
||||
@staticmethod
|
||||
def dim() -> int:
|
||||
return dim
|
||||
@@ -205,9 +202,7 @@ else:
|
||||
def _pydantic_to_arrow_type(field: FieldInfo) -> pa.DataType:
|
||||
"""Convert a Pydantic FieldInfo to Arrow DataType"""
|
||||
|
||||
if isinstance(field.annotation, _GenericAlias) or (
|
||||
sys.version_info > (3, 9) and isinstance(field.annotation, types.GenericAlias)
|
||||
):
|
||||
if isinstance(field.annotation, (_GenericAlias, GenericAlias)):
|
||||
origin = field.annotation.__origin__
|
||||
args = field.annotation.__args__
|
||||
if origin is list:
|
||||
@@ -235,7 +230,7 @@ def _pydantic_to_arrow_type(field: FieldInfo) -> pa.DataType:
|
||||
|
||||
def is_nullable(field: FieldInfo) -> bool:
|
||||
"""Check if a Pydantic FieldInfo is nullable."""
|
||||
if isinstance(field.annotation, _GenericAlias):
|
||||
if isinstance(field.annotation, (_GenericAlias, GenericAlias)):
|
||||
origin = field.annotation.__origin__
|
||||
args = field.annotation.__args__
|
||||
if origin == Union:
|
||||
@@ -246,6 +241,10 @@ def is_nullable(field: FieldInfo) -> bool:
|
||||
for typ in args:
|
||||
if typ is type(None):
|
||||
return True
|
||||
elif inspect.isclass(field.annotation) and issubclass(
|
||||
field.annotation, FixedSizeListMixin
|
||||
):
|
||||
return field.annotation.nullable()
|
||||
return False
|
||||
|
||||
|
||||
|
||||
@@ -26,6 +26,7 @@ from typing import (
|
||||
Union,
|
||||
)
|
||||
|
||||
import asyncio
|
||||
import deprecation
|
||||
import numpy as np
|
||||
import pyarrow as pa
|
||||
@@ -44,6 +45,8 @@ if TYPE_CHECKING:
|
||||
import polars as pl
|
||||
|
||||
from ._lancedb import Query as LanceQuery
|
||||
from ._lancedb import FTSQuery as LanceFTSQuery
|
||||
from ._lancedb import HybridQuery as LanceHybridQuery
|
||||
from ._lancedb import VectorQuery as LanceVectorQuery
|
||||
from .common import VEC
|
||||
from .pydantic import LanceModel
|
||||
@@ -131,6 +134,8 @@ class Query(pydantic.BaseModel):
|
||||
|
||||
fast_search: bool = False
|
||||
|
||||
ef: Optional[int] = None
|
||||
|
||||
|
||||
class LanceQueryBuilder(ABC):
|
||||
"""An abstract query builder. Subclasses are defined for vector search,
|
||||
@@ -257,6 +262,7 @@ class LanceQueryBuilder(ABC):
|
||||
self._with_row_id = False
|
||||
self._vector = None
|
||||
self._text = None
|
||||
self._ef = None
|
||||
|
||||
@deprecation.deprecated(
|
||||
deprecated_in="0.3.1",
|
||||
@@ -322,6 +328,14 @@ class LanceQueryBuilder(ABC):
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@abstractmethod
|
||||
def to_batches(self, /, batch_size: Optional[int] = None) -> pa.Table:
|
||||
"""
|
||||
Execute the query and return the results as a pyarrow
|
||||
[RecordBatchReader](https://arrow.apache.org/docs/python/generated/pyarrow.RecordBatchReader.html)
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def to_list(self) -> List[dict]:
|
||||
"""
|
||||
Execute the query and return the results as a list of dictionaries.
|
||||
@@ -367,11 +381,13 @@ class LanceQueryBuilder(ABC):
|
||||
----------
|
||||
limit: int
|
||||
The maximum number of results to return.
|
||||
By default the query is limited to the first 10.
|
||||
Call this method and pass 0, a negative value,
|
||||
or None to remove the limit.
|
||||
*WARNING* if you have a large dataset, removing
|
||||
the limit can potentially result in reading a
|
||||
The default query limit is 10 results.
|
||||
For ANN/KNN queries, you must specify a limit.
|
||||
Entering 0, a negative number, or None will reset
|
||||
the limit to the default value of 10.
|
||||
*WARNING* if you have a large dataset, setting
|
||||
the limit to a large number, e.g. the table size,
|
||||
can potentially result in reading a
|
||||
large amount of data into memory and cause
|
||||
out of memory issues.
|
||||
|
||||
@@ -638,6 +654,28 @@ class LanceVectorQueryBuilder(LanceQueryBuilder):
|
||||
self._nprobes = nprobes
|
||||
return self
|
||||
|
||||
def ef(self, ef: int) -> LanceVectorQueryBuilder:
|
||||
"""Set the number of candidates to consider during search.
|
||||
|
||||
Higher values will yield better recall (more likely to find vectors if
|
||||
they exist) at the expense of latency.
|
||||
|
||||
This only applies to the HNSW-related index.
|
||||
The default value is 1.5 * limit.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
ef: int
|
||||
The number of candidates to consider during search.
|
||||
|
||||
Returns
|
||||
-------
|
||||
LanceVectorQueryBuilder
|
||||
The LanceQueryBuilder object.
|
||||
"""
|
||||
self._ef = ef
|
||||
return self
|
||||
|
||||
def refine_factor(self, refine_factor: int) -> LanceVectorQueryBuilder:
|
||||
"""Set the refine factor to use, increasing the number of vectors sampled.
|
||||
|
||||
@@ -700,6 +738,7 @@ class LanceVectorQueryBuilder(LanceQueryBuilder):
|
||||
with_row_id=self._with_row_id,
|
||||
offset=self._offset,
|
||||
fast_search=self._fast_search,
|
||||
ef=self._ef,
|
||||
)
|
||||
result_set = self._table._execute_query(query, batch_size)
|
||||
if self._reranker is not None:
|
||||
@@ -841,6 +880,9 @@ class LanceFtsQueryBuilder(LanceQueryBuilder):
|
||||
check_reranker_result(results)
|
||||
return results
|
||||
|
||||
def to_batches(self, /, batch_size: Optional[int] = None):
|
||||
raise NotImplementedError("to_batches on an FTS query")
|
||||
|
||||
def tantivy_to_arrow(self) -> pa.Table:
|
||||
try:
|
||||
import tantivy
|
||||
@@ -943,6 +985,9 @@ class LanceFtsQueryBuilder(LanceQueryBuilder):
|
||||
|
||||
class LanceEmptyQueryBuilder(LanceQueryBuilder):
|
||||
def to_arrow(self) -> pa.Table:
|
||||
return self.to_batches().read_all()
|
||||
|
||||
def to_batches(self, /, batch_size: Optional[int] = None) -> pa.RecordBatchReader:
|
||||
query = Query(
|
||||
columns=self._columns,
|
||||
filter=self._where,
|
||||
@@ -952,7 +997,7 @@ class LanceEmptyQueryBuilder(LanceQueryBuilder):
|
||||
# not actually respected in remote query
|
||||
offset=self._offset or 0,
|
||||
)
|
||||
return self._table._execute_query(query).read_all()
|
||||
return self._table._execute_query(query)
|
||||
|
||||
def rerank(self, reranker: Reranker) -> LanceEmptyQueryBuilder:
|
||||
"""Rerank the results using the specified reranker.
|
||||
@@ -1071,6 +1116,8 @@ class LanceHybridQueryBuilder(LanceQueryBuilder):
|
||||
self._vector_query.nprobes(self._nprobes)
|
||||
if self._refine_factor:
|
||||
self._vector_query.refine_factor(self._refine_factor)
|
||||
if self._ef:
|
||||
self._vector_query.ef(self._ef)
|
||||
|
||||
with ThreadPoolExecutor() as executor:
|
||||
fts_future = executor.submit(self._fts_query.with_row_id(True).to_arrow)
|
||||
@@ -1080,32 +1127,55 @@ class LanceHybridQueryBuilder(LanceQueryBuilder):
|
||||
fts_results = fts_future.result()
|
||||
vector_results = vector_future.result()
|
||||
|
||||
# convert to ranks first if needed
|
||||
if self._norm == "rank":
|
||||
vector_results = self._rank(vector_results, "_distance")
|
||||
fts_results = self._rank(fts_results, "_score")
|
||||
return self._combine_hybrid_results(
|
||||
fts_results=fts_results,
|
||||
vector_results=vector_results,
|
||||
norm=self._norm,
|
||||
fts_query=self._fts_query._query,
|
||||
reranker=self._reranker,
|
||||
limit=self._limit,
|
||||
with_row_ids=self._with_row_id,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _combine_hybrid_results(
|
||||
fts_results: pa.Table,
|
||||
vector_results: pa.Table,
|
||||
norm: str,
|
||||
fts_query: str,
|
||||
reranker,
|
||||
limit: int,
|
||||
with_row_ids: bool,
|
||||
) -> pa.Table:
|
||||
if norm == "rank":
|
||||
vector_results = LanceHybridQueryBuilder._rank(vector_results, "_distance")
|
||||
fts_results = LanceHybridQueryBuilder._rank(fts_results, "_score")
|
||||
|
||||
# normalize the scores to be between 0 and 1, 0 being most relevant
|
||||
vector_results = self._normalize_scores(vector_results, "_distance")
|
||||
vector_results = LanceHybridQueryBuilder._normalize_scores(
|
||||
vector_results, "_distance"
|
||||
)
|
||||
|
||||
# In fts higher scores represent relevance. Not inverting them here as
|
||||
# rerankers might need to preserve this score to support `return_score="all"`
|
||||
fts_results = self._normalize_scores(fts_results, "_score")
|
||||
fts_results = LanceHybridQueryBuilder._normalize_scores(fts_results, "_score")
|
||||
|
||||
results = self._reranker.rerank_hybrid(
|
||||
self._fts_query._query, vector_results, fts_results
|
||||
)
|
||||
results = reranker.rerank_hybrid(fts_query, vector_results, fts_results)
|
||||
|
||||
check_reranker_result(results)
|
||||
|
||||
# apply limit after reranking
|
||||
results = results.slice(length=self._limit)
|
||||
results = results.slice(length=limit)
|
||||
|
||||
if not self._with_row_id:
|
||||
if not with_row_ids:
|
||||
results = results.drop(["_rowid"])
|
||||
|
||||
return results
|
||||
|
||||
def _rank(self, results: pa.Table, column: str, ascending: bool = True):
|
||||
def to_batches(self):
|
||||
raise NotImplementedError("to_batches not yet supported on a hybrid query")
|
||||
|
||||
@staticmethod
|
||||
def _rank(results: pa.Table, column: str, ascending: bool = True):
|
||||
if len(results) == 0:
|
||||
return results
|
||||
# Get the _score column from results
|
||||
@@ -1122,7 +1192,8 @@ class LanceHybridQueryBuilder(LanceQueryBuilder):
|
||||
)
|
||||
return results
|
||||
|
||||
def _normalize_scores(self, results: pa.Table, column: str, invert=False):
|
||||
@staticmethod
|
||||
def _normalize_scores(results: pa.Table, column: str, invert=False):
|
||||
if len(results) == 0:
|
||||
return results
|
||||
# Get the _score column from results
|
||||
@@ -1197,6 +1268,29 @@ class LanceHybridQueryBuilder(LanceQueryBuilder):
|
||||
self._nprobes = nprobes
|
||||
return self
|
||||
|
||||
def ef(self, ef: int) -> LanceHybridQueryBuilder:
|
||||
"""
|
||||
Set the number of candidates to consider during search.
|
||||
|
||||
Higher values will yield better recall (more likely to find vectors if
|
||||
they exist) at the expense of latency.
|
||||
|
||||
This only applies to the HNSW-related index.
|
||||
The default value is 1.5 * limit.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
ef: int
|
||||
The number of candidates to consider during search.
|
||||
|
||||
Returns
|
||||
-------
|
||||
LanceHybridQueryBuilder
|
||||
The LanceHybridQueryBuilder object.
|
||||
"""
|
||||
self._ef = ef
|
||||
return self
|
||||
|
||||
def metric(self, metric: Literal["L2", "cosine", "dot"]) -> LanceHybridQueryBuilder:
|
||||
"""Set the distance metric to use.
|
||||
|
||||
@@ -1449,10 +1543,11 @@ class AsyncQueryBase(object):
|
||||
... print(plan)
|
||||
>>> asyncio.run(doctest_example()) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
|
||||
ProjectionExec: expr=[vector@0 as vector, _distance@2 as _distance]
|
||||
FilterExec: _distance@2 IS NOT NULL
|
||||
SortExec: TopK(fetch=10), expr=[_distance@2 ASC NULLS LAST], preserve_partitioning=[false]
|
||||
KNNVectorDistance: metric=l2
|
||||
LanceScan: uri=..., projection=[vector], row_id=true, row_addr=false, ordered=false
|
||||
GlobalLimitExec: skip=0, fetch=10
|
||||
FilterExec: _distance@2 IS NOT NULL
|
||||
SortExec: TopK(fetch=10), expr=[_distance@2 ASC NULLS LAST], preserve_partitioning=[false]
|
||||
KNNVectorDistance: metric=l2
|
||||
LanceScan: uri=..., projection=[vector], row_id=true, row_addr=false, ordered=false
|
||||
|
||||
Parameters
|
||||
----------
|
||||
@@ -1495,7 +1590,8 @@ class AsyncQuery(AsyncQueryBase):
|
||||
return pa.array(vec)
|
||||
|
||||
def nearest_to(
|
||||
self, query_vector: Optional[Union[VEC, Tuple, List[VEC]]] = None
|
||||
self,
|
||||
query_vector: Union[VEC, Tuple, List[VEC]],
|
||||
) -> AsyncVectorQuery:
|
||||
"""
|
||||
Find the nearest vectors to the given query vector.
|
||||
@@ -1542,6 +1638,9 @@ class AsyncQuery(AsyncQueryBase):
|
||||
will be added to the results. This column will contain the index of the
|
||||
query vector that the result is nearest to.
|
||||
"""
|
||||
if query_vector is None:
|
||||
raise ValueError("query_vector can not be None")
|
||||
|
||||
if (
|
||||
isinstance(query_vector, list)
|
||||
and len(query_vector) > 0
|
||||
@@ -1560,7 +1659,7 @@ class AsyncQuery(AsyncQueryBase):
|
||||
|
||||
def nearest_to_text(
|
||||
self, query: str, columns: Union[str, List[str]] = []
|
||||
) -> AsyncQuery:
|
||||
) -> AsyncFTSQuery:
|
||||
"""
|
||||
Find the documents that are most relevant to the given text query.
|
||||
|
||||
@@ -1583,8 +1682,90 @@ class AsyncQuery(AsyncQueryBase):
|
||||
"""
|
||||
if isinstance(columns, str):
|
||||
columns = [columns]
|
||||
self._inner.nearest_to_text({"query": query, "columns": columns})
|
||||
return self
|
||||
return AsyncFTSQuery(
|
||||
self._inner.nearest_to_text({"query": query, "columns": columns})
|
||||
)
|
||||
|
||||
|
||||
class AsyncFTSQuery(AsyncQueryBase):
|
||||
"""A query for full text search for LanceDB."""
|
||||
|
||||
def __init__(self, inner: LanceFTSQuery):
|
||||
super().__init__(inner)
|
||||
self._inner = inner
|
||||
|
||||
def get_query(self):
|
||||
self._inner.get_query()
|
||||
|
||||
def nearest_to(
|
||||
self,
|
||||
query_vector: Union[VEC, Tuple, List[VEC]],
|
||||
) -> AsyncHybridQuery:
|
||||
"""
|
||||
In addition doing text search on the LanceDB Table, also
|
||||
find the nearest vectors to the given query vector.
|
||||
|
||||
This converts the query from a FTS Query to a Hybrid query. Results
|
||||
from the vector search will be combined with results from the FTS query.
|
||||
|
||||
This method will attempt to convert the input to the query vector
|
||||
expected by the embedding model. If the input cannot be converted
|
||||
then an error will be thrown.
|
||||
|
||||
By default, there is no embedding model, and the input should be
|
||||
something that can be converted to a pyarrow array of floats. This
|
||||
includes lists, numpy arrays, and tuples.
|
||||
|
||||
If there is only one vector column (a column whose data type is a
|
||||
fixed size list of floats) then the column does not need to be specified.
|
||||
If there is more than one vector column you must use
|
||||
[AsyncVectorQuery.column][lancedb.query.AsyncVectorQuery.column] to specify
|
||||
which column you would like to compare with.
|
||||
|
||||
If no index has been created on the vector column then a vector query
|
||||
will perform a distance comparison between the query vector and every
|
||||
vector in the database and then sort the results. This is sometimes
|
||||
called a "flat search"
|
||||
|
||||
For small databases, with tens of thousands of vectors or less, this can
|
||||
be reasonably fast. In larger databases you should create a vector index
|
||||
on the column. If there is a vector index then an "approximate" nearest
|
||||
neighbor search (frequently called an ANN search) will be performed. This
|
||||
search is much faster, but the results will be approximate.
|
||||
|
||||
The query can be further parameterized using the returned builder. There
|
||||
are various ANN search parameters that will let you fine tune your recall
|
||||
accuracy vs search latency.
|
||||
|
||||
Hybrid searches always have a [limit][]. If `limit` has not been called then
|
||||
a default `limit` of 10 will be used.
|
||||
|
||||
Typically, a single vector is passed in as the query. However, you can also
|
||||
pass in multiple vectors. This can be useful if you want to find the nearest
|
||||
vectors to multiple query vectors. This is not expected to be faster than
|
||||
making multiple queries concurrently; it is just a convenience method.
|
||||
If multiple vectors are passed in then an additional column `query_index`
|
||||
will be added to the results. This column will contain the index of the
|
||||
query vector that the result is nearest to.
|
||||
"""
|
||||
if query_vector is None:
|
||||
raise ValueError("query_vector can not be None")
|
||||
|
||||
if (
|
||||
isinstance(query_vector, list)
|
||||
and len(query_vector) > 0
|
||||
and not isinstance(query_vector[0], (float, int))
|
||||
):
|
||||
# multiple have been passed
|
||||
query_vectors = [AsyncQuery._query_vec_to_array(v) for v in query_vector]
|
||||
new_self = self._inner.nearest_to(query_vectors[0])
|
||||
for v in query_vectors[1:]:
|
||||
new_self.add_query_vector(v)
|
||||
return AsyncHybridQuery(new_self)
|
||||
else:
|
||||
return AsyncHybridQuery(
|
||||
self._inner.nearest_to(AsyncQuery._query_vec_to_array(query_vector))
|
||||
)
|
||||
|
||||
|
||||
class AsyncVectorQuery(AsyncQueryBase):
|
||||
@@ -1618,7 +1799,7 @@ class AsyncVectorQuery(AsyncQueryBase):
|
||||
"""
|
||||
Set the number of partitions to search (probe)
|
||||
|
||||
This argument is only used when the vector column has an IVF PQ index.
|
||||
This argument is only used when the vector column has an IVF-based index.
|
||||
If there is no index then this value is ignored.
|
||||
|
||||
The IVF stage of IVF PQ divides the input into partitions (clusters) of
|
||||
@@ -1640,6 +1821,21 @@ class AsyncVectorQuery(AsyncQueryBase):
|
||||
self._inner.nprobes(nprobes)
|
||||
return self
|
||||
|
||||
def ef(self, ef: int) -> AsyncVectorQuery:
|
||||
"""
|
||||
Set the number of candidates to consider during search
|
||||
|
||||
This argument is only used when the vector column has an HNSW index.
|
||||
If there is no index then this value is ignored.
|
||||
|
||||
Increasing this value will increase the recall of your query but will also
|
||||
increase the latency of your query. The default value is 1.5 * limit. This
|
||||
default is good for many cases but the best value to use will depend on your
|
||||
data and the recall that you need to achieve.
|
||||
"""
|
||||
self._inner.ef(ef)
|
||||
return self
|
||||
|
||||
def refine_factor(self, refine_factor: int) -> AsyncVectorQuery:
|
||||
"""
|
||||
A multiplier to control how many additional rows are taken during the refine
|
||||
@@ -1706,3 +1902,160 @@ class AsyncVectorQuery(AsyncQueryBase):
|
||||
"""
|
||||
self._inner.bypass_vector_index()
|
||||
return self
|
||||
|
||||
def nearest_to_text(
|
||||
self, query: str, columns: Union[str, List[str]] = []
|
||||
) -> AsyncHybridQuery:
|
||||
"""
|
||||
Find the documents that are most relevant to the given text query,
|
||||
in addition to vector search.
|
||||
|
||||
This converts the vector query into a hybrid query.
|
||||
|
||||
This search will perform a full text search on the table and return
|
||||
the most relevant documents, combined with the vector query results.
|
||||
The text relevance is determined by BM25.
|
||||
|
||||
The columns to search must be with native FTS index
|
||||
(Tantivy-based can't work with this method).
|
||||
|
||||
By default, all indexed columns are searched,
|
||||
now only one column can be searched at a time.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
query: str
|
||||
The text query to search for.
|
||||
columns: str or list of str, default None
|
||||
The columns to search in. If None, all indexed columns are searched.
|
||||
For now only one column can be searched at a time.
|
||||
"""
|
||||
if isinstance(columns, str):
|
||||
columns = [columns]
|
||||
return AsyncHybridQuery(
|
||||
self._inner.nearest_to_text({"query": query, "columns": columns})
|
||||
)
|
||||
|
||||
|
||||
class AsyncHybridQuery(AsyncQueryBase):
|
||||
"""
|
||||
A query builder that performs hybrid vector and full text search.
|
||||
Results are combined and reranked based on the specified reranker.
|
||||
By default, the results are reranked using the RRFReranker, which
|
||||
uses reciprocal rank fusion score for reranking.
|
||||
|
||||
To make the vector and fts results comparable, the scores are normalized.
|
||||
Instead of normalizing scores, the `normalize` parameter can be set to "rank"
|
||||
in the `rerank` method to convert the scores to ranks and then normalize them.
|
||||
"""
|
||||
|
||||
def __init__(self, inner: LanceHybridQuery):
|
||||
super().__init__(inner)
|
||||
self._inner = inner
|
||||
self._norm = "score"
|
||||
self._reranker = RRFReranker()
|
||||
|
||||
def rerank(
|
||||
self, reranker: Reranker = RRFReranker(), normalize: str = "score"
|
||||
) -> AsyncHybridQuery:
|
||||
"""
|
||||
Rerank the hybrid search results using the specified reranker. The reranker
|
||||
must be an instance of Reranker class.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
reranker: Reranker, default RRFReranker()
|
||||
The reranker to use. Must be an instance of Reranker class.
|
||||
normalize: str, default "score"
|
||||
The method to normalize the scores. Can be "rank" or "score". If "rank",
|
||||
the scores are converted to ranks and then normalized. If "score", the
|
||||
scores are normalized directly.
|
||||
Returns
|
||||
-------
|
||||
AsyncHybridQuery
|
||||
The AsyncHybridQuery object.
|
||||
"""
|
||||
if normalize not in ["rank", "score"]:
|
||||
raise ValueError("normalize must be 'rank' or 'score'.")
|
||||
if reranker and not isinstance(reranker, Reranker):
|
||||
raise ValueError("reranker must be an instance of Reranker class.")
|
||||
|
||||
self._norm = normalize
|
||||
self._reranker = reranker
|
||||
|
||||
return self
|
||||
|
||||
async def to_batches(self):
|
||||
raise NotImplementedError("to_batches not yet supported on a hybrid query")
|
||||
|
||||
async def to_arrow(self) -> pa.Table:
|
||||
fts_query = AsyncFTSQuery(self._inner.to_fts_query())
|
||||
vec_query = AsyncVectorQuery(self._inner.to_vector_query())
|
||||
|
||||
# save the row ID choice that was made on the query builder and force it
|
||||
# to actually fetch the row ids because we need this for reranking
|
||||
with_row_ids = self._inner.get_with_row_id()
|
||||
fts_query.with_row_id()
|
||||
vec_query.with_row_id()
|
||||
|
||||
fts_results, vector_results = await asyncio.gather(
|
||||
fts_query.to_arrow(),
|
||||
vec_query.to_arrow(),
|
||||
)
|
||||
|
||||
return LanceHybridQueryBuilder._combine_hybrid_results(
|
||||
fts_results=fts_results,
|
||||
vector_results=vector_results,
|
||||
norm=self._norm,
|
||||
fts_query=fts_query.get_query(),
|
||||
reranker=self._reranker,
|
||||
limit=self._inner.get_limit(),
|
||||
with_row_ids=with_row_ids,
|
||||
)
|
||||
|
||||
async def explain_plan(self, verbose: Optional[bool] = False):
|
||||
"""Return the execution plan for this query.
|
||||
|
||||
The output includes both the vector and FTS search plans.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import asyncio
|
||||
>>> from lancedb import connect_async
|
||||
>>> from lancedb.index import FTS
|
||||
>>> async def doctest_example():
|
||||
... conn = await connect_async("./.lancedb")
|
||||
... table = await conn.create_table("my_table", [{"vector": [99, 99], "text": "hello world"}])
|
||||
... await table.create_index("text", config=FTS(with_position=False))
|
||||
... query = [100, 100]
|
||||
... plan = await table.query().nearest_to([1, 2]).nearest_to_text("hello").explain_plan(True)
|
||||
... print(plan)
|
||||
>>> asyncio.run(doctest_example()) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
|
||||
Vector Search Plan:
|
||||
ProjectionExec: expr=[vector@0 as vector, text@3 as text, _distance@2 as _distance]
|
||||
Take: columns="vector, _rowid, _distance, (text)"
|
||||
CoalesceBatchesExec: target_batch_size=1024
|
||||
GlobalLimitExec: skip=0, fetch=10
|
||||
FilterExec: _distance@2 IS NOT NULL
|
||||
SortExec: TopK(fetch=10), expr=[_distance@2 ASC NULLS LAST], preserve_partitioning=[false]
|
||||
KNNVectorDistance: metric=l2
|
||||
LanceScan: uri=..., projection=[vector], row_id=true, row_addr=false, ordered=false
|
||||
FTS Search Plan:
|
||||
LanceScan: uri=..., projection=[vector, text], row_id=false, row_addr=false, ordered=true
|
||||
|
||||
Parameters
|
||||
----------
|
||||
verbose : bool, default False
|
||||
Use a verbose output format.
|
||||
|
||||
Returns
|
||||
-------
|
||||
plan
|
||||
""" # noqa: E501
|
||||
|
||||
results = ["Vector Search Plan:"]
|
||||
results.append(await self._inner.to_vector_query().explain_plan(verbose))
|
||||
results.append("FTS Search Plan:")
|
||||
results.append(await self._inner.to_fts_query().explain_plan(verbose))
|
||||
|
||||
return "\n".join(results)
|
||||
|
||||
@@ -11,7 +11,6 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import asyncio
|
||||
from datetime import timedelta
|
||||
import logging
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
@@ -25,7 +24,7 @@ import pyarrow as pa
|
||||
from overrides import override
|
||||
|
||||
from ..common import DATA
|
||||
from ..db import DBConnection
|
||||
from ..db import DBConnection, LOOP
|
||||
from ..embeddings import EmbeddingFunctionConfig
|
||||
from ..pydantic import LanceModel
|
||||
from ..table import Table
|
||||
@@ -45,9 +44,9 @@ class RemoteDBConnection(DBConnection):
|
||||
client_config: Union[ClientConfig, Dict[str, Any], None] = None,
|
||||
connection_timeout: Optional[float] = None,
|
||||
read_timeout: Optional[float] = None,
|
||||
storage_options: Optional[Dict[str, str]] = None,
|
||||
):
|
||||
"""Connect to a remote LanceDB database."""
|
||||
|
||||
if isinstance(client_config, dict):
|
||||
client_config = ClientConfig(**client_config)
|
||||
elif client_config is None:
|
||||
@@ -86,24 +85,16 @@ class RemoteDBConnection(DBConnection):
|
||||
raise ValueError(f"Invalid scheme: {parsed.scheme}, only accepts db://")
|
||||
self.db_name = parsed.netloc
|
||||
|
||||
import nest_asyncio
|
||||
|
||||
nest_asyncio.apply()
|
||||
try:
|
||||
self._loop = asyncio.get_running_loop()
|
||||
except RuntimeError:
|
||||
self._loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(self._loop)
|
||||
|
||||
self.client_config = client_config
|
||||
|
||||
self._conn = self._loop.run_until_complete(
|
||||
self._conn = LOOP.run(
|
||||
connect_async(
|
||||
db_url,
|
||||
api_key=api_key,
|
||||
region=region,
|
||||
host_override=host_override,
|
||||
client_config=client_config,
|
||||
storage_options=storage_options,
|
||||
)
|
||||
)
|
||||
|
||||
@@ -127,9 +118,7 @@ class RemoteDBConnection(DBConnection):
|
||||
-------
|
||||
An iterator of table names.
|
||||
"""
|
||||
return self._loop.run_until_complete(
|
||||
self._conn.table_names(start_after=page_token, limit=limit)
|
||||
)
|
||||
return LOOP.run(self._conn.table_names(start_after=page_token, limit=limit))
|
||||
|
||||
@override
|
||||
def open_table(self, name: str, *, index_cache_size: Optional[int] = None) -> Table:
|
||||
@@ -152,8 +141,8 @@ class RemoteDBConnection(DBConnection):
|
||||
" (there is no local cache to configure)"
|
||||
)
|
||||
|
||||
table = self._loop.run_until_complete(self._conn.open_table(name))
|
||||
return RemoteTable(table, self.db_name, self._loop)
|
||||
table = LOOP.run(self._conn.open_table(name))
|
||||
return RemoteTable(table, self.db_name)
|
||||
|
||||
@override
|
||||
def create_table(
|
||||
@@ -268,7 +257,7 @@ class RemoteDBConnection(DBConnection):
|
||||
|
||||
from .table import RemoteTable
|
||||
|
||||
table = self._loop.run_until_complete(
|
||||
table = LOOP.run(
|
||||
self._conn.create_table(
|
||||
name,
|
||||
data,
|
||||
@@ -278,7 +267,7 @@ class RemoteDBConnection(DBConnection):
|
||||
fill_value=fill_value,
|
||||
)
|
||||
)
|
||||
return RemoteTable(table, self.db_name, self._loop)
|
||||
return RemoteTable(table, self.db_name)
|
||||
|
||||
@override
|
||||
def drop_table(self, name: str):
|
||||
@@ -289,7 +278,7 @@ class RemoteDBConnection(DBConnection):
|
||||
name: str
|
||||
The name of the table.
|
||||
"""
|
||||
self._loop.run_until_complete(self._conn.drop_table(name))
|
||||
LOOP.run(self._conn.drop_table(name))
|
||||
|
||||
@override
|
||||
def rename_table(self, cur_name: str, new_name: str):
|
||||
@@ -302,7 +291,7 @@ class RemoteDBConnection(DBConnection):
|
||||
new_name: str
|
||||
The new name of the table.
|
||||
"""
|
||||
self._loop.run_until_complete(self._conn.rename_table(cur_name, new_name))
|
||||
LOOP.run(self._conn.rename_table(cur_name, new_name))
|
||||
|
||||
async def close(self):
|
||||
"""Close the connection to the database."""
|
||||
|
||||
@@ -12,12 +12,12 @@
|
||||
# limitations under the License.
|
||||
|
||||
from datetime import timedelta
|
||||
import asyncio
|
||||
import logging
|
||||
from functools import cached_property
|
||||
from typing import Dict, Iterable, List, Optional, Union, Literal
|
||||
|
||||
from lancedb.index import FTS, BTree, Bitmap, HnswPq, HnswSq, IvfPq, LabelList
|
||||
from lancedb.remote.db import LOOP
|
||||
import pyarrow as pa
|
||||
|
||||
from lancedb.common import DATA, VEC, VECTOR_COLUMN_NAME
|
||||
@@ -33,9 +33,7 @@ class RemoteTable(Table):
|
||||
self,
|
||||
table: AsyncTable,
|
||||
db_name: str,
|
||||
loop: Optional[asyncio.AbstractEventLoop] = None,
|
||||
):
|
||||
self._loop = loop
|
||||
self._table = table
|
||||
self.db_name = db_name
|
||||
|
||||
@@ -56,12 +54,12 @@ class RemoteTable(Table):
|
||||
of this Table
|
||||
|
||||
"""
|
||||
return self._loop.run_until_complete(self._table.schema())
|
||||
return LOOP.run(self._table.schema())
|
||||
|
||||
@property
|
||||
def version(self) -> int:
|
||||
"""Get the current version of the table"""
|
||||
return self._loop.run_until_complete(self._table.version())
|
||||
return LOOP.run(self._table.version())
|
||||
|
||||
@cached_property
|
||||
def embedding_functions(self) -> dict:
|
||||
@@ -78,6 +76,10 @@ class RemoteTable(Table):
|
||||
self.schema.metadata
|
||||
)
|
||||
|
||||
def list_versions(self):
|
||||
"""List all versions of the table"""
|
||||
return LOOP.run(self._table.list_versions())
|
||||
|
||||
def to_arrow(self) -> pa.Table:
|
||||
"""to_arrow() is not yet supported on LanceDB cloud."""
|
||||
raise NotImplementedError("to_arrow() is not yet supported on LanceDB cloud.")
|
||||
@@ -86,13 +88,19 @@ class RemoteTable(Table):
|
||||
"""to_pandas() is not yet supported on LanceDB cloud."""
|
||||
return NotImplementedError("to_pandas() is not yet supported on LanceDB cloud.")
|
||||
|
||||
def checkout(self, version):
|
||||
return LOOP.run(self._table.checkout(version))
|
||||
|
||||
def checkout_latest(self):
|
||||
return LOOP.run(self._table.checkout_latest())
|
||||
|
||||
def list_indices(self):
|
||||
"""List all the indices on the table"""
|
||||
return self._loop.run_until_complete(self._table.list_indices())
|
||||
return LOOP.run(self._table.list_indices())
|
||||
|
||||
def index_stats(self, index_uuid: str):
|
||||
"""List all the stats of a specified index"""
|
||||
return self._loop.run_until_complete(self._table.index_stats(index_uuid))
|
||||
return LOOP.run(self._table.index_stats(index_uuid))
|
||||
|
||||
def create_scalar_index(
|
||||
self,
|
||||
@@ -122,9 +130,7 @@ class RemoteTable(Table):
|
||||
else:
|
||||
raise ValueError(f"Unknown index type: {index_type}")
|
||||
|
||||
self._loop.run_until_complete(
|
||||
self._table.create_index(column, config=config, replace=replace)
|
||||
)
|
||||
LOOP.run(self._table.create_index(column, config=config, replace=replace))
|
||||
|
||||
def create_fts_index(
|
||||
self,
|
||||
@@ -132,11 +138,26 @@ class RemoteTable(Table):
|
||||
*,
|
||||
replace: bool = False,
|
||||
with_position: bool = True,
|
||||
# tokenizer configs:
|
||||
base_tokenizer: str = "simple",
|
||||
language: str = "English",
|
||||
max_token_length: Optional[int] = 40,
|
||||
lower_case: bool = True,
|
||||
stem: bool = False,
|
||||
remove_stop_words: bool = False,
|
||||
ascii_folding: bool = False,
|
||||
):
|
||||
config = FTS(with_position=with_position)
|
||||
self._loop.run_until_complete(
|
||||
self._table.create_index(column, config=config, replace=replace)
|
||||
config = FTS(
|
||||
with_position=with_position,
|
||||
base_tokenizer=base_tokenizer,
|
||||
language=language,
|
||||
max_token_length=max_token_length,
|
||||
lower_case=lower_case,
|
||||
stem=stem,
|
||||
remove_stop_words=remove_stop_words,
|
||||
ascii_folding=ascii_folding,
|
||||
)
|
||||
LOOP.run(self._table.create_index(column, config=config, replace=replace))
|
||||
|
||||
def create_index(
|
||||
self,
|
||||
@@ -217,9 +238,7 @@ class RemoteTable(Table):
|
||||
" 'IVF_PQ', 'IVF_HNSW_PQ', 'IVF_HNSW_SQ'"
|
||||
)
|
||||
|
||||
self._loop.run_until_complete(
|
||||
self._table.create_index(vector_column_name, config=config)
|
||||
)
|
||||
LOOP.run(self._table.create_index(vector_column_name, config=config))
|
||||
|
||||
def add(
|
||||
self,
|
||||
@@ -251,7 +270,7 @@ class RemoteTable(Table):
|
||||
The value to use when filling vectors. Only used if on_bad_vectors="fill".
|
||||
|
||||
"""
|
||||
self._loop.run_until_complete(
|
||||
LOOP.run(
|
||||
self._table.add(
|
||||
data, mode=mode, on_bad_vectors=on_bad_vectors, fill_value=fill_value
|
||||
)
|
||||
@@ -339,9 +358,7 @@ class RemoteTable(Table):
|
||||
def _execute_query(
|
||||
self, query: Query, batch_size: Optional[int] = None
|
||||
) -> pa.RecordBatchReader:
|
||||
return self._loop.run_until_complete(
|
||||
self._table._execute_query(query, batch_size=batch_size)
|
||||
)
|
||||
return LOOP.run(self._table._execute_query(query, batch_size=batch_size))
|
||||
|
||||
def merge_insert(self, on: Union[str, Iterable[str]]) -> LanceMergeInsertBuilder:
|
||||
"""Returns a [`LanceMergeInsertBuilder`][lancedb.merge.LanceMergeInsertBuilder]
|
||||
@@ -358,9 +375,7 @@ class RemoteTable(Table):
|
||||
on_bad_vectors: str,
|
||||
fill_value: float,
|
||||
):
|
||||
self._loop.run_until_complete(
|
||||
self._table._do_merge(merge, new_data, on_bad_vectors, fill_value)
|
||||
)
|
||||
LOOP.run(self._table._do_merge(merge, new_data, on_bad_vectors, fill_value))
|
||||
|
||||
def delete(self, predicate: str):
|
||||
"""Delete rows from the table.
|
||||
@@ -409,7 +424,7 @@ class RemoteTable(Table):
|
||||
x vector _distance # doctest: +SKIP
|
||||
0 2 [3.0, 4.0] 85.0 # doctest: +SKIP
|
||||
"""
|
||||
self._loop.run_until_complete(self._table.delete(predicate))
|
||||
LOOP.run(self._table.delete(predicate))
|
||||
|
||||
def update(
|
||||
self,
|
||||
@@ -459,7 +474,7 @@ class RemoteTable(Table):
|
||||
2 2 [10.0, 10.0] # doctest: +SKIP
|
||||
|
||||
"""
|
||||
self._loop.run_until_complete(
|
||||
LOOP.run(
|
||||
self._table.update(where=where, updates=values, updates_sql=values_sql)
|
||||
)
|
||||
|
||||
@@ -489,22 +504,16 @@ class RemoteTable(Table):
|
||||
)
|
||||
|
||||
def count_rows(self, filter: Optional[str] = None) -> int:
|
||||
return self._loop.run_until_complete(self._table.count_rows(filter))
|
||||
return LOOP.run(self._table.count_rows(filter))
|
||||
|
||||
def add_columns(self, transforms: Dict[str, str]):
|
||||
raise NotImplementedError(
|
||||
"add_columns() is not yet supported on the LanceDB cloud"
|
||||
)
|
||||
return LOOP.run(self._table.add_columns(transforms))
|
||||
|
||||
def alter_columns(self, alterations: Iterable[Dict[str, str]]):
|
||||
raise NotImplementedError(
|
||||
"alter_columns() is not yet supported on the LanceDB cloud"
|
||||
)
|
||||
def alter_columns(self, *alterations: Iterable[Dict[str, str]]):
|
||||
return LOOP.run(self._table.alter_columns(*alterations))
|
||||
|
||||
def drop_columns(self, columns: Iterable[str]):
|
||||
raise NotImplementedError(
|
||||
"drop_columns() is not yet supported on the LanceDB cloud"
|
||||
)
|
||||
return LOOP.run(self._table.drop_columns(columns))
|
||||
|
||||
|
||||
def add_index(tbl: pa.Table, i: int) -> pa.Table:
|
||||
|
||||
@@ -41,7 +41,7 @@ class CohereReranker(Reranker):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model_name: str = "rerank-english-v2.0",
|
||||
model_name: str = "rerank-english-v3.0",
|
||||
column: str = "text",
|
||||
top_n: Union[int, None] = None,
|
||||
return_score="relevance",
|
||||
|
||||
@@ -8,7 +8,7 @@ import inspect
|
||||
import time
|
||||
from abc import ABC, abstractmethod
|
||||
from dataclasses import dataclass
|
||||
from datetime import timedelta
|
||||
from datetime import datetime, timedelta
|
||||
from functools import cached_property
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
@@ -967,8 +967,6 @@ class Table(ABC):
|
||||
"""
|
||||
Add new columns with defined values.
|
||||
|
||||
This is not yet available in LanceDB Cloud.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
transforms: Dict[str, str]
|
||||
@@ -978,20 +976,21 @@ class Table(ABC):
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def alter_columns(self, alterations: Iterable[Dict[str, str]]):
|
||||
def alter_columns(self, *alterations: Iterable[Dict[str, str]]):
|
||||
"""
|
||||
Alter column names and nullability.
|
||||
|
||||
This is not yet available in LanceDB Cloud.
|
||||
|
||||
alterations : Iterable[Dict[str, Any]]
|
||||
A sequence of dictionaries, each with the following keys:
|
||||
- "path": str
|
||||
The column path to alter. For a top-level column, this is the name.
|
||||
For a nested column, this is the dot-separated path, e.g. "a.b.c".
|
||||
- "name": str, optional
|
||||
- "rename": str, optional
|
||||
The new name of the column. If not specified, the column name is
|
||||
not changed.
|
||||
- "data_type": pyarrow.DataType, optional
|
||||
The new data type of the column. Existing values will be casted
|
||||
to this type. If not specified, the column data type is not changed.
|
||||
- "nullable": bool, optional
|
||||
Whether the column should be nullable. If not specified, the column
|
||||
nullability is not changed. Only non-nullable columns can be changed
|
||||
@@ -1004,14 +1003,45 @@ class Table(ABC):
|
||||
"""
|
||||
Drop columns from the table.
|
||||
|
||||
This is not yet available in LanceDB Cloud.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
columns : Iterable[str]
|
||||
The names of the columns to drop.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def checkout(self):
|
||||
"""
|
||||
Checks out a specific version of the Table
|
||||
|
||||
Any read operation on the table will now access the data at the checked out
|
||||
version. As a consequence, calling this method will disable any read consistency
|
||||
interval that was previously set.
|
||||
|
||||
This is a read-only operation that turns the table into a sort of "view"
|
||||
or "detached head". Other table instances will not be affected. To make the
|
||||
change permanent you can use the `[Self::restore]` method.
|
||||
|
||||
Any operation that modifies the table will fail while the table is in a checked
|
||||
out state.
|
||||
|
||||
To return the table to a normal state use `[Self::checkout_latest]`
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def checkout_latest(self):
|
||||
"""
|
||||
Ensures the table is pointing at the latest version
|
||||
|
||||
This can be used to manually update a table when the read_consistency_interval
|
||||
is None
|
||||
It can also be used to undo a `[Self::checkout]` operation
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def list_versions(self):
|
||||
"""List all versions of the table"""
|
||||
|
||||
@cached_property
|
||||
def _dataset_uri(self) -> str:
|
||||
return _table_uri(self._conn.uri, self.name)
|
||||
@@ -1047,13 +1077,16 @@ class _LanceLatestDatasetRef(_LanceDatasetRef):
|
||||
index_cache_size: Optional[int] = None
|
||||
read_consistency_interval: Optional[timedelta] = None
|
||||
last_consistency_check: Optional[float] = None
|
||||
storage_options: Optional[Dict[str, str]] = None
|
||||
_dataset: Optional[LanceDataset] = None
|
||||
|
||||
@property
|
||||
def dataset(self) -> LanceDataset:
|
||||
if not self._dataset:
|
||||
self._dataset = lance.dataset(
|
||||
self.uri, index_cache_size=self.index_cache_size
|
||||
self.uri,
|
||||
index_cache_size=self.index_cache_size,
|
||||
storage_options=self.storage_options,
|
||||
)
|
||||
self.last_consistency_check = time.monotonic()
|
||||
elif self.read_consistency_interval is not None:
|
||||
@@ -1084,13 +1117,17 @@ class _LanceTimeTravelRef(_LanceDatasetRef):
|
||||
uri: str
|
||||
version: int
|
||||
index_cache_size: Optional[int] = None
|
||||
storage_options: Optional[Dict[str, str]] = None
|
||||
_dataset: Optional[LanceDataset] = None
|
||||
|
||||
@property
|
||||
def dataset(self) -> LanceDataset:
|
||||
if not self._dataset:
|
||||
self._dataset = lance.dataset(
|
||||
self.uri, version=self.version, index_cache_size=self.index_cache_size
|
||||
self.uri,
|
||||
version=self.version,
|
||||
index_cache_size=self.index_cache_size,
|
||||
storage_options=self.storage_options,
|
||||
)
|
||||
return self._dataset
|
||||
|
||||
@@ -1139,24 +1176,27 @@ class LanceTable(Table):
|
||||
uri=self._dataset_uri,
|
||||
version=version,
|
||||
index_cache_size=index_cache_size,
|
||||
storage_options=connection.storage_options,
|
||||
)
|
||||
else:
|
||||
self._ref = _LanceLatestDatasetRef(
|
||||
uri=self._dataset_uri,
|
||||
read_consistency_interval=connection.read_consistency_interval,
|
||||
index_cache_size=index_cache_size,
|
||||
storage_options=connection.storage_options,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def open(cls, db, name, **kwargs):
|
||||
tbl = cls(db, name, **kwargs)
|
||||
fs, path = fs_from_uri(tbl._dataset_path)
|
||||
file_info = fs.get_file_info(path)
|
||||
if file_info.type != pa.fs.FileType.Directory:
|
||||
raise FileNotFoundError(
|
||||
f"Table {name} does not exist."
|
||||
f"Please first call db.create_table({name}, data)"
|
||||
)
|
||||
|
||||
# check the dataset exists
|
||||
try:
|
||||
tbl.version
|
||||
except ValueError as e:
|
||||
if "Not found:" in str(e):
|
||||
raise FileNotFoundError(f"Table {name} does not exist")
|
||||
raise e
|
||||
|
||||
return tbl
|
||||
|
||||
@@ -1584,11 +1624,7 @@ class LanceTable(Table):
|
||||
on_bad_vectors=on_bad_vectors,
|
||||
fill_value=fill_value,
|
||||
)
|
||||
# Access the dataset_mut property to ensure that the dataset is mutable.
|
||||
self._ref.dataset_mut
|
||||
self._ref.dataset = lance.write_dataset(
|
||||
data, self._dataset_uri, schema=self.schema, mode=mode
|
||||
)
|
||||
self._ref.dataset_mut.insert(data, mode=mode, schema=self.schema)
|
||||
|
||||
def merge(
|
||||
self,
|
||||
@@ -1872,7 +1908,13 @@ class LanceTable(Table):
|
||||
|
||||
empty = pa.Table.from_batches([], schema=schema)
|
||||
try:
|
||||
lance.write_dataset(empty, tbl._dataset_uri, schema=schema, mode=mode)
|
||||
lance.write_dataset(
|
||||
empty,
|
||||
tbl._dataset_uri,
|
||||
schema=schema,
|
||||
mode=mode,
|
||||
storage_options=db.storage_options,
|
||||
)
|
||||
except OSError as err:
|
||||
if "Dataset already exists" in str(err) and exist_ok:
|
||||
if tbl.schema != schema:
|
||||
@@ -1959,6 +2001,7 @@ class LanceTable(Table):
|
||||
"metric": query.metric,
|
||||
"nprobes": query.nprobes,
|
||||
"refine_factor": query.refine_factor,
|
||||
"ef": query.ef,
|
||||
}
|
||||
return ds.scanner(
|
||||
columns=query.columns,
|
||||
@@ -2697,7 +2740,7 @@ class AsyncTable:
|
||||
|
||||
def vector_search(
|
||||
self,
|
||||
query_vector: Optional[Union[VEC, Tuple]] = None,
|
||||
query_vector: Union[VEC, Tuple],
|
||||
) -> AsyncVectorQuery:
|
||||
"""
|
||||
Search the table with a given query vector.
|
||||
@@ -2736,6 +2779,8 @@ class AsyncTable:
|
||||
async_query = async_query.refine_factor(query.refine_factor)
|
||||
if query.vector_column:
|
||||
async_query = async_query.column(query.vector_column)
|
||||
if query.ef:
|
||||
async_query = async_query.ef(query.ef)
|
||||
|
||||
if not query.prefilter:
|
||||
async_query = async_query.postfilter()
|
||||
@@ -2887,6 +2932,53 @@ class AsyncTable:
|
||||
|
||||
return await self._inner.update(updates_sql, where)
|
||||
|
||||
async def add_columns(self, transforms: Dict[str, str]):
|
||||
"""
|
||||
Add new columns with defined values.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
transforms: Dict[str, str]
|
||||
A map of column name to a SQL expression to use to calculate the
|
||||
value of the new column. These expressions will be evaluated for
|
||||
each row in the table, and can reference existing columns.
|
||||
"""
|
||||
await self._inner.add_columns(list(transforms.items()))
|
||||
|
||||
async def alter_columns(self, *alterations: Iterable[Dict[str, str]]):
|
||||
"""
|
||||
Alter column names and nullability.
|
||||
|
||||
alterations : Iterable[Dict[str, Any]]
|
||||
A sequence of dictionaries, each with the following keys:
|
||||
- "path": str
|
||||
The column path to alter. For a top-level column, this is the name.
|
||||
For a nested column, this is the dot-separated path, e.g. "a.b.c".
|
||||
- "rename": str, optional
|
||||
The new name of the column. If not specified, the column name is
|
||||
not changed.
|
||||
- "data_type": pyarrow.DataType, optional
|
||||
The new data type of the column. Existing values will be casted
|
||||
to this type. If not specified, the column data type is not changed.
|
||||
- "nullable": bool, optional
|
||||
Whether the column should be nullable. If not specified, the column
|
||||
nullability is not changed. Only non-nullable columns can be changed
|
||||
to nullable. Currently, you cannot change a nullable column to
|
||||
non-nullable.
|
||||
"""
|
||||
await self._inner.alter_columns(alterations)
|
||||
|
||||
async def drop_columns(self, columns: Iterable[str]):
|
||||
"""
|
||||
Drop columns from the table.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
columns : Iterable[str]
|
||||
The names of the columns to drop.
|
||||
"""
|
||||
await self._inner.drop_columns(columns)
|
||||
|
||||
async def version(self) -> int:
|
||||
"""
|
||||
Retrieve the version of the table
|
||||
@@ -2899,6 +2991,19 @@ class AsyncTable:
|
||||
"""
|
||||
return await self._inner.version()
|
||||
|
||||
async def list_versions(self):
|
||||
"""
|
||||
List all versions of the table
|
||||
"""
|
||||
versions = await self._inner.list_versions()
|
||||
for v in versions:
|
||||
ts_nanos = v["timestamp"]
|
||||
v["timestamp"] = datetime.fromtimestamp(ts_nanos // 1e9) + timedelta(
|
||||
microseconds=(ts_nanos % 1e9) // 1e3
|
||||
)
|
||||
|
||||
return versions
|
||||
|
||||
async def checkout(self, version):
|
||||
"""
|
||||
Checks out a specific version of the Table
|
||||
|
||||
@@ -599,7 +599,9 @@ async def test_create_in_v2_mode(tmp_path):
|
||||
)
|
||||
|
||||
async def is_in_v2_mode(tbl):
|
||||
batches = await tbl.query().to_batches(max_batch_length=1024 * 10)
|
||||
batches = (
|
||||
await tbl.query().limit(10 * 1024).to_batches(max_batch_length=1024 * 10)
|
||||
)
|
||||
num_batches = 0
|
||||
async for batch in batches:
|
||||
num_batches += 1
|
||||
|
||||
21
python/python/tests/test_duckdb.py
Normal file
21
python/python/tests/test_duckdb.py
Normal file
@@ -0,0 +1,21 @@
|
||||
import duckdb
|
||||
import pyarrow as pa
|
||||
|
||||
import lancedb
|
||||
from lancedb.integrations.pyarrow import PyarrowDatasetAdapter
|
||||
|
||||
|
||||
def test_basic_query(tmp_path):
|
||||
data = pa.table({"x": [1, 2, 3, 4], "y": [5, 6, 7, 8]})
|
||||
conn = lancedb.connect(tmp_path)
|
||||
tbl = conn.create_table("test", data)
|
||||
|
||||
adapter = PyarrowDatasetAdapter(tbl) # noqa: F841
|
||||
|
||||
duck_conn = duckdb.connect()
|
||||
|
||||
results = duck_conn.sql("SELECT SUM(x) FROM adapter").fetchall()
|
||||
assert results[0][0] == 10
|
||||
|
||||
results = duck_conn.sql("SELECT SUM(y) FROM adapter").fetchall()
|
||||
assert results[0][0] == 26
|
||||
@@ -90,10 +90,13 @@ def test_embedding_with_bad_results(tmp_path):
|
||||
self, texts: Union[List[str], np.ndarray]
|
||||
) -> list[Union[np.array, None]]:
|
||||
# Return None, which is bad if field is non-nullable
|
||||
return [
|
||||
None if i % 2 == 0 else np.random.randn(self.ndims())
|
||||
a = [
|
||||
np.full(self.ndims(), np.nan)
|
||||
if i % 2 == 0
|
||||
else np.random.randn(self.ndims())
|
||||
for i in range(len(texts))
|
||||
]
|
||||
return a
|
||||
|
||||
db = lancedb.connect(tmp_path)
|
||||
registry = EmbeddingFunctionRegistry.get_instance()
|
||||
|
||||
@@ -1,15 +1,6 @@
|
||||
# Copyright (c) 2023. LanceDB Developers
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
import importlib
|
||||
import io
|
||||
import os
|
||||
@@ -17,6 +8,7 @@ import os
|
||||
import lancedb
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
import pyarrow as pa
|
||||
import pytest
|
||||
from lancedb.embeddings import get_registry
|
||||
from lancedb.pydantic import LanceModel, Vector
|
||||
@@ -444,6 +436,30 @@ def test_watsonx_embedding(tmp_path):
|
||||
assert tbl.search("hello").limit(1).to_pandas()["text"][0] == "hello world"
|
||||
|
||||
|
||||
@pytest.mark.slow
|
||||
@pytest.mark.skipif(
|
||||
os.environ.get("OPENAI_API_KEY") is None, reason="OPENAI_API_KEY not set"
|
||||
)
|
||||
def test_openai_with_empty_strs(tmp_path):
|
||||
model = get_registry().get("openai").create(max_retries=0)
|
||||
|
||||
class TextModel(LanceModel):
|
||||
text: str = model.SourceField()
|
||||
vector: Vector(model.ndims()) = model.VectorField()
|
||||
|
||||
df = pd.DataFrame({"text": ["hello world", ""]})
|
||||
db = lancedb.connect(tmp_path)
|
||||
tbl = db.create_table("test", schema=TextModel, mode="overwrite")
|
||||
|
||||
tbl.add(df, on_bad_vectors="skip")
|
||||
tb = tbl.to_arrow()
|
||||
assert tb.schema.field_by_name("vector").type == pa.list_(
|
||||
pa.float32(), model.ndims()
|
||||
)
|
||||
assert len(tb) == 2
|
||||
assert tb["vector"].is_null().to_pylist() == [False, True]
|
||||
|
||||
|
||||
@pytest.mark.slow
|
||||
@pytest.mark.skipif(
|
||||
importlib.util.find_spec("ollama") is None, reason="Ollama not installed"
|
||||
|
||||
111
python/python/tests/test_hybrid_query.py
Normal file
111
python/python/tests/test_hybrid_query.py
Normal file
@@ -0,0 +1,111 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
import lancedb
|
||||
|
||||
import pyarrow as pa
|
||||
import pytest
|
||||
import pytest_asyncio
|
||||
|
||||
from lancedb.index import FTS
|
||||
from lancedb.table import AsyncTable
|
||||
|
||||
|
||||
@pytest_asyncio.fixture
|
||||
async def table(tmpdir_factory) -> AsyncTable:
|
||||
tmp_path = str(tmpdir_factory.mktemp("data"))
|
||||
db = await lancedb.connect_async(tmp_path)
|
||||
data = pa.table(
|
||||
{
|
||||
"text": pa.array(["a", "b", "cat", "dog"]),
|
||||
"vector": pa.array(
|
||||
[[0.1, 0.1], [2, 2], [-0.1, -0.1], [0.5, -0.5]],
|
||||
type=pa.list_(pa.float32(), list_size=2),
|
||||
),
|
||||
}
|
||||
)
|
||||
table = await db.create_table("test", data)
|
||||
await table.create_index("text", config=FTS(with_position=False))
|
||||
return table
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_async_hybrid_query(table: AsyncTable):
|
||||
result = await (
|
||||
table.query().nearest_to([0.0, 0.4]).nearest_to_text("dog").limit(2).to_arrow()
|
||||
)
|
||||
assert len(result) == 2
|
||||
# ensure we get results that would match well for text and vector
|
||||
assert result["text"].to_pylist() == ["a", "dog"]
|
||||
|
||||
# ensure there is no rowid by default
|
||||
assert "_rowid" not in result
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_async_hybrid_query_with_row_ids(table: AsyncTable):
|
||||
result = await (
|
||||
table.query()
|
||||
.nearest_to([0.0, 0.4])
|
||||
.nearest_to_text("dog")
|
||||
.limit(2)
|
||||
.with_row_id()
|
||||
.to_arrow()
|
||||
)
|
||||
assert len(result) == 2
|
||||
# ensure we get results that would match well for text and vector
|
||||
assert result["text"].to_pylist() == ["a", "dog"]
|
||||
assert result["_rowid"].to_pylist() == [0, 3]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_async_hybrid_query_filters(table: AsyncTable):
|
||||
# test that query params are passed down from the regular builder to
|
||||
# child vector/fts builders
|
||||
result = await (
|
||||
table.query()
|
||||
.where("text not in ('a', 'dog')")
|
||||
.nearest_to([0.3, 0.3])
|
||||
.nearest_to_text("*a*")
|
||||
.limit(2)
|
||||
.to_arrow()
|
||||
)
|
||||
assert len(result) == 2
|
||||
# ensure we get results that would match well for text and vector
|
||||
assert result["text"].to_pylist() == ["cat", "b"]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_async_hybrid_query_default_limit(table: AsyncTable):
|
||||
# add 10 new rows
|
||||
new_rows = []
|
||||
for i in range(100):
|
||||
if i < 2:
|
||||
new_rows.append({"text": "close_vec", "vector": [0.1, 0.1]})
|
||||
else:
|
||||
new_rows.append({"text": "far_vec", "vector": [5 * i, 5 * i]})
|
||||
await table.add(new_rows)
|
||||
result = await (
|
||||
table.query().nearest_to_text("dog").nearest_to([0.1, 0.1]).to_arrow()
|
||||
)
|
||||
|
||||
# assert we got the default limit of 10
|
||||
assert len(result) == 10
|
||||
|
||||
# assert we got the closest vectors and the text searched for
|
||||
texts = result["text"].to_pylist()
|
||||
assert texts.count("close_vec") == 2
|
||||
assert texts.count("dog") == 1
|
||||
assert texts.count("a") == 1
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_explain_plan(table: AsyncTable):
|
||||
plan = await (
|
||||
table.query().nearest_to_text("dog").nearest_to([0.1, 0.1]).explain_plan(True)
|
||||
)
|
||||
|
||||
assert "Vector Search Plan" in plan
|
||||
assert "KNNVectorDistance" in plan
|
||||
assert "FTS Search Plan" in plan
|
||||
assert "LanceScan" in plan
|
||||
47
python/python/tests/test_pyarrow.py
Normal file
47
python/python/tests/test_pyarrow.py
Normal file
@@ -0,0 +1,47 @@
|
||||
import pyarrow as pa
|
||||
|
||||
import lancedb
|
||||
from lancedb.integrations.pyarrow import PyarrowDatasetAdapter
|
||||
|
||||
|
||||
def test_dataset_adapter(tmp_path):
|
||||
data = pa.table({"x": [1, 2, 3, 4], "y": [5, 6, 7, 8]})
|
||||
conn = lancedb.connect(tmp_path)
|
||||
tbl = conn.create_table("test", data)
|
||||
|
||||
adapter = PyarrowDatasetAdapter(tbl)
|
||||
|
||||
assert adapter.count_rows() == 4
|
||||
assert adapter.count_rows("x > 2") == 2
|
||||
assert adapter.schema == data.schema
|
||||
assert adapter.head(2) == data.slice(0, 2)
|
||||
assert adapter.to_table() == data
|
||||
assert adapter.to_batches().read_all() == data
|
||||
assert adapter.scanner().to_table() == data
|
||||
assert adapter.scanner().to_batches().read_all() == data
|
||||
|
||||
assert adapter.scanner().projected_schema == data.schema
|
||||
assert adapter.scanner(columns=["x"]).projected_schema == pa.schema(
|
||||
[data.schema.field("x")]
|
||||
)
|
||||
assert adapter.scanner(columns=["x"]).to_table() == pa.table({"x": [1, 2, 3, 4]})
|
||||
|
||||
# Make sure we bypass the limit
|
||||
data = pa.table({"x": range(100)})
|
||||
tbl = conn.create_table("test2", data)
|
||||
|
||||
adapter = PyarrowDatasetAdapter(tbl)
|
||||
|
||||
assert adapter.count_rows() == 100
|
||||
assert adapter.to_table().num_rows == 100
|
||||
assert adapter.head(10).num_rows == 10
|
||||
|
||||
# Empty table
|
||||
tbl = conn.create_table("test3", None, schema=pa.schema({"x": pa.int64()}))
|
||||
adapter = PyarrowDatasetAdapter(tbl)
|
||||
|
||||
assert adapter.count_rows() == 0
|
||||
assert adapter.to_table().num_rows == 0
|
||||
assert adapter.head(10).num_rows == 0
|
||||
|
||||
assert adapter.scanner().projected_schema == pa.schema({"x": pa.int64()})
|
||||
@@ -1,16 +1,5 @@
|
||||
# Copyright 2023 LanceDB Developers
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
import json
|
||||
import sys
|
||||
@@ -172,6 +161,26 @@ def test_pydantic_to_arrow_py38():
|
||||
assert schema == expect_schema
|
||||
|
||||
|
||||
def test_nullable_vector():
|
||||
class NullableModel(pydantic.BaseModel):
|
||||
vec: Vector(16, nullable=False)
|
||||
|
||||
schema = pydantic_to_schema(NullableModel)
|
||||
assert schema == pa.schema([pa.field("vec", pa.list_(pa.float32(), 16), False)])
|
||||
|
||||
class DefaultModel(pydantic.BaseModel):
|
||||
vec: Vector(16)
|
||||
|
||||
schema = pydantic_to_schema(DefaultModel)
|
||||
assert schema == pa.schema([pa.field("vec", pa.list_(pa.float32(), 16), True)])
|
||||
|
||||
class NotNullableModel(pydantic.BaseModel):
|
||||
vec: Vector(16)
|
||||
|
||||
schema = pydantic_to_schema(NotNullableModel)
|
||||
assert schema == pa.schema([pa.field("vec", pa.list_(pa.float32(), 16), True)])
|
||||
|
||||
|
||||
def test_fixed_size_list_field():
|
||||
class TestModel(pydantic.BaseModel):
|
||||
vec: Vector(16)
|
||||
@@ -192,7 +201,7 @@ def test_fixed_size_list_field():
|
||||
schema = pydantic_to_schema(TestModel)
|
||||
assert schema == pa.schema(
|
||||
[
|
||||
pa.field("vec", pa.list_(pa.float32(), 16), False),
|
||||
pa.field("vec", pa.list_(pa.float32(), 16)),
|
||||
pa.field("li", pa.list_(pa.int64()), False),
|
||||
]
|
||||
)
|
||||
|
||||
@@ -1,21 +1,9 @@
|
||||
# Copyright 2023 LanceDB Developers
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
import unittest.mock as mock
|
||||
from datetime import timedelta
|
||||
from typing import Optional
|
||||
|
||||
import lance
|
||||
import lancedb
|
||||
from lancedb.index import IvfPq
|
||||
import numpy as np
|
||||
@@ -23,41 +11,15 @@ import pandas.testing as tm
|
||||
import pyarrow as pa
|
||||
import pytest
|
||||
import pytest_asyncio
|
||||
from lancedb.db import LanceDBConnection
|
||||
from lancedb.pydantic import LanceModel, Vector
|
||||
from lancedb.query import AsyncQueryBase, LanceVectorQueryBuilder, Query
|
||||
from lancedb.table import AsyncTable, LanceTable
|
||||
|
||||
|
||||
class MockTable:
|
||||
def __init__(self, tmp_path):
|
||||
self.uri = tmp_path
|
||||
self._conn = LanceDBConnection(self.uri)
|
||||
|
||||
def to_lance(self):
|
||||
return lance.dataset(self.uri)
|
||||
|
||||
def _execute_query(self, query, batch_size: Optional[int] = None):
|
||||
ds = self.to_lance()
|
||||
return ds.scanner(
|
||||
columns=query.columns,
|
||||
filter=query.filter,
|
||||
prefilter=query.prefilter,
|
||||
nearest={
|
||||
"column": query.vector_column,
|
||||
"q": query.vector,
|
||||
"k": query.k,
|
||||
"metric": query.metric,
|
||||
"nprobes": query.nprobes,
|
||||
"refine_factor": query.refine_factor,
|
||||
},
|
||||
batch_size=batch_size,
|
||||
offset=query.offset,
|
||||
).to_reader()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def table(tmp_path) -> MockTable:
|
||||
@pytest.fixture(scope="module")
|
||||
def table(tmpdir_factory) -> lancedb.table.Table:
|
||||
tmp_path = str(tmpdir_factory.mktemp("data"))
|
||||
db = lancedb.connect(tmp_path)
|
||||
df = pa.table(
|
||||
{
|
||||
"vector": pa.array(
|
||||
@@ -68,8 +30,7 @@ def table(tmp_path) -> MockTable:
|
||||
"float_field": pa.array([1.0, 2.0]),
|
||||
}
|
||||
)
|
||||
lance.write_dataset(df, tmp_path)
|
||||
return MockTable(tmp_path)
|
||||
return db.create_table("test", df)
|
||||
|
||||
|
||||
@pytest_asyncio.fixture
|
||||
@@ -126,6 +87,12 @@ def test_query_builder(table):
|
||||
assert all(np.array(rs[0]["vector"]) == [1, 2])
|
||||
|
||||
|
||||
def test_with_row_id(table: lancedb.table.Table):
|
||||
rs = table.search().with_row_id(True).to_arrow()
|
||||
assert "_rowid" in rs.column_names
|
||||
assert rs["_rowid"].to_pylist() == [0, 1]
|
||||
|
||||
|
||||
def test_vector_query_with_no_limit(table):
|
||||
with pytest.raises(ValueError):
|
||||
LanceVectorQueryBuilder(table, [0, 0], "vector").limit(0).select(
|
||||
@@ -365,6 +332,12 @@ async def test_query_to_pandas_async(table_async: AsyncTable):
|
||||
assert df.shape == (0, 4)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_none_query(table_async: AsyncTable):
|
||||
with pytest.raises(ValueError):
|
||||
await table_async.query().nearest_to(None).to_arrow()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_fast_search_async(tmp_path):
|
||||
db = await lancedb.connect_async(tmp_path)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
import contextlib
|
||||
from datetime import timedelta
|
||||
import http.server
|
||||
@@ -103,6 +104,47 @@ async def test_async_remote_db():
|
||||
assert table_names == []
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_async_checkout():
|
||||
def handler(request):
|
||||
if request.path == "/v1/table/test/describe/":
|
||||
request.send_response(200)
|
||||
request.send_header("Content-Type", "application/json")
|
||||
request.end_headers()
|
||||
response = json.dumps({"version": 42, "schema": {"fields": []}})
|
||||
request.wfile.write(response.encode())
|
||||
return
|
||||
|
||||
content_len = int(request.headers.get("Content-Length"))
|
||||
body = request.rfile.read(content_len)
|
||||
body = json.loads(body)
|
||||
|
||||
print("body is", body)
|
||||
|
||||
count = 0
|
||||
if body["version"] == 1:
|
||||
count = 100
|
||||
elif body["version"] == 2:
|
||||
count = 200
|
||||
elif body["version"] is None:
|
||||
count = 300
|
||||
|
||||
request.send_response(200)
|
||||
request.send_header("Content-Type", "application/json")
|
||||
request.end_headers()
|
||||
request.wfile.write(json.dumps(count).encode())
|
||||
|
||||
async with mock_lancedb_connection_async(handler) as db:
|
||||
table = await db.open_table("test")
|
||||
assert await table.count_rows() == 300
|
||||
await table.checkout(1)
|
||||
assert await table.count_rows() == 100
|
||||
await table.checkout(2)
|
||||
assert await table.count_rows() == 200
|
||||
await table.checkout_latest()
|
||||
assert await table.count_rows() == 300
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_http_error():
|
||||
request_id_holder = {"request_id": None}
|
||||
@@ -146,6 +188,85 @@ async def test_retry_error():
|
||||
assert cause.status_code == 429
|
||||
|
||||
|
||||
def test_table_add_in_threadpool():
|
||||
def handler(request):
|
||||
if request.path == "/v1/table/test/insert/":
|
||||
request.send_response(200)
|
||||
request.end_headers()
|
||||
elif request.path == "/v1/table/test/create/?mode=create":
|
||||
request.send_response(200)
|
||||
request.send_header("Content-Type", "application/json")
|
||||
request.end_headers()
|
||||
request.wfile.write(b"{}")
|
||||
elif request.path == "/v1/table/test/describe/":
|
||||
request.send_response(200)
|
||||
request.send_header("Content-Type", "application/json")
|
||||
request.end_headers()
|
||||
payload = json.dumps(
|
||||
dict(
|
||||
version=1,
|
||||
schema=dict(
|
||||
fields=[
|
||||
dict(name="id", type={"type": "int64"}, nullable=False),
|
||||
]
|
||||
),
|
||||
)
|
||||
)
|
||||
request.wfile.write(payload.encode())
|
||||
else:
|
||||
request.send_response(404)
|
||||
request.end_headers()
|
||||
|
||||
with mock_lancedb_connection(handler) as db:
|
||||
table = db.create_table("test", [{"id": 1}])
|
||||
with ThreadPoolExecutor(3) as executor:
|
||||
futures = []
|
||||
for _ in range(10):
|
||||
future = executor.submit(table.add, [{"id": 1}])
|
||||
futures.append(future)
|
||||
|
||||
for future in futures:
|
||||
future.result()
|
||||
|
||||
|
||||
def test_table_create_indices():
|
||||
def handler(request):
|
||||
if request.path == "/v1/table/test/create_index/":
|
||||
request.send_response(200)
|
||||
request.end_headers()
|
||||
elif request.path == "/v1/table/test/create/?mode=create":
|
||||
request.send_response(200)
|
||||
request.send_header("Content-Type", "application/json")
|
||||
request.end_headers()
|
||||
request.wfile.write(b"{}")
|
||||
elif request.path == "/v1/table/test/describe/":
|
||||
request.send_response(200)
|
||||
request.send_header("Content-Type", "application/json")
|
||||
request.end_headers()
|
||||
payload = json.dumps(
|
||||
dict(
|
||||
version=1,
|
||||
schema=dict(
|
||||
fields=[
|
||||
dict(name="id", type={"type": "int64"}, nullable=False),
|
||||
]
|
||||
),
|
||||
)
|
||||
)
|
||||
request.wfile.write(payload.encode())
|
||||
else:
|
||||
request.send_response(404)
|
||||
request.end_headers()
|
||||
|
||||
with mock_lancedb_connection(handler) as db:
|
||||
# Parameters are well-tested through local and async tests.
|
||||
# This is a smoke-test.
|
||||
table = db.create_table("test", [{"id": 1}])
|
||||
table.create_scalar_index("id")
|
||||
table.create_fts_index("text")
|
||||
table.create_scalar_index("vector")
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def query_test_table(query_handler):
|
||||
def handler(request):
|
||||
@@ -185,8 +306,10 @@ def test_query_sync_minimal():
|
||||
"k": 10,
|
||||
"prefilter": False,
|
||||
"refine_factor": None,
|
||||
"ef": None,
|
||||
"vector": [1.0, 2.0, 3.0],
|
||||
"nprobes": 20,
|
||||
"version": None,
|
||||
}
|
||||
|
||||
return pa.table({"id": [1, 2, 3]})
|
||||
@@ -204,6 +327,7 @@ def test_query_sync_empty_query():
|
||||
"filter": "true",
|
||||
"vector": [],
|
||||
"columns": ["id"],
|
||||
"version": None,
|
||||
}
|
||||
|
||||
return pa.table({"id": [1, 2, 3]})
|
||||
@@ -223,11 +347,13 @@ def test_query_sync_maximal():
|
||||
"refine_factor": 10,
|
||||
"vector": [1.0, 2.0, 3.0],
|
||||
"nprobes": 5,
|
||||
"ef": None,
|
||||
"filter": "id > 0",
|
||||
"columns": ["id", "name"],
|
||||
"vector_column": "vector2",
|
||||
"fast_search": True,
|
||||
"with_row_id": True,
|
||||
"version": None,
|
||||
}
|
||||
|
||||
return pa.table({"id": [1, 2, 3], "name": ["a", "b", "c"]})
|
||||
@@ -266,6 +392,7 @@ def test_query_sync_fts():
|
||||
},
|
||||
"k": 10,
|
||||
"vector": [],
|
||||
"version": None,
|
||||
}
|
||||
|
||||
return pa.table({"id": [1, 2, 3]})
|
||||
@@ -282,6 +409,7 @@ def test_query_sync_fts():
|
||||
"k": 42,
|
||||
"vector": [],
|
||||
"with_row_id": True,
|
||||
"version": None,
|
||||
}
|
||||
|
||||
return pa.table({"id": [1, 2, 3]})
|
||||
@@ -307,6 +435,7 @@ def test_query_sync_hybrid():
|
||||
"k": 42,
|
||||
"vector": [],
|
||||
"with_row_id": True,
|
||||
"version": None,
|
||||
}
|
||||
return pa.table({"_rowid": [1, 2, 3], "_score": [0.1, 0.2, 0.3]})
|
||||
else:
|
||||
@@ -318,7 +447,9 @@ def test_query_sync_hybrid():
|
||||
"refine_factor": None,
|
||||
"vector": [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
|
||||
"nprobes": 20,
|
||||
"ef": None,
|
||||
"with_row_id": True,
|
||||
"version": None,
|
||||
}
|
||||
return pa.table({"_rowid": [1, 2, 3], "_distance": [0.1, 0.2, 0.3]})
|
||||
|
||||
|
||||
@@ -30,6 +30,7 @@ class MockDB:
|
||||
def __init__(self, uri: Path):
|
||||
self.uri = str(uri)
|
||||
self.read_consistency_interval = None
|
||||
self.storage_options = None
|
||||
|
||||
@functools.cached_property
|
||||
def is_managed_remote(self) -> bool:
|
||||
@@ -1292,6 +1293,19 @@ def test_add_columns(tmp_path):
|
||||
assert table.to_arrow().column_names == ["id", "new_col"]
|
||||
assert table.to_arrow()["new_col"].to_pylist() == [2, 3]
|
||||
|
||||
table.add_columns({"null_int": "cast(null as bigint)"})
|
||||
assert table.schema.field("null_int").type == pa.int64()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_add_columns_async(db_async: AsyncConnection):
|
||||
data = pa.table({"id": [0, 1]})
|
||||
table = await db_async.create_table("my_table", data=data)
|
||||
await table.add_columns({"new_col": "id + 2"})
|
||||
data = await table.to_arrow()
|
||||
assert data.column_names == ["id", "new_col"]
|
||||
assert data["new_col"].to_pylist() == [2, 3]
|
||||
|
||||
|
||||
def test_alter_columns(tmp_path):
|
||||
db = lancedb.connect(tmp_path)
|
||||
@@ -1301,6 +1315,18 @@ def test_alter_columns(tmp_path):
|
||||
assert table.to_arrow().column_names == ["new_id"]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_alter_columns_async(db_async: AsyncConnection):
|
||||
data = pa.table({"id": [0, 1]})
|
||||
table = await db_async.create_table("my_table", data=data)
|
||||
await table.alter_columns({"path": "id", "rename": "new_id"})
|
||||
assert (await table.to_arrow()).column_names == ["new_id"]
|
||||
await table.alter_columns(dict(path="new_id", data_type=pa.int16(), nullable=True))
|
||||
data = await table.to_arrow()
|
||||
assert data.column(0).type == pa.int16()
|
||||
assert data.schema.field(0).nullable
|
||||
|
||||
|
||||
def test_drop_columns(tmp_path):
|
||||
db = lancedb.connect(tmp_path)
|
||||
data = pa.table({"id": [0, 1], "category": ["a", "b"]})
|
||||
@@ -1309,6 +1335,14 @@ def test_drop_columns(tmp_path):
|
||||
assert table.to_arrow().column_names == ["id"]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_drop_columns_async(db_async: AsyncConnection):
|
||||
data = pa.table({"id": [0, 1], "category": ["a", "b"]})
|
||||
table = await db_async.create_table("my_table", data=data)
|
||||
await table.drop_columns(["category"])
|
||||
assert (await table.to_arrow()).column_names == ["id"]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_time_travel(db_async: AsyncConnection):
|
||||
# Setup
|
||||
|
||||
@@ -10,7 +10,7 @@ use arrow::{
|
||||
use futures::stream::StreamExt;
|
||||
use lancedb::arrow::SendableRecordBatchStream;
|
||||
use pyo3::{pyclass, pymethods, Bound, PyAny, PyObject, PyRef, PyResult, Python};
|
||||
use pyo3_asyncio_0_21::tokio::future_into_py;
|
||||
use pyo3_async_runtimes::tokio::future_into_py;
|
||||
|
||||
use crate::error::PythonErrorExt;
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@ use pyo3::{
|
||||
exceptions::{PyRuntimeError, PyValueError},
|
||||
pyclass, pyfunction, pymethods, Bound, FromPyObject, PyAny, PyRef, PyResult, Python,
|
||||
};
|
||||
use pyo3_asyncio_0_21::tokio::future_into_py;
|
||||
use pyo3_async_runtimes::tokio::future_into_py;
|
||||
|
||||
use crate::{error::PythonErrorExt, table::Table};
|
||||
|
||||
@@ -58,6 +58,7 @@ impl Connection {
|
||||
self.inner.take();
|
||||
}
|
||||
|
||||
#[pyo3(signature = (start_after=None, limit=None))]
|
||||
pub fn table_names(
|
||||
self_: PyRef<'_, Self>,
|
||||
start_after: Option<String>,
|
||||
@@ -74,6 +75,7 @@ impl Connection {
|
||||
future_into_py(self_.py(), async move { op.execute().await.infer_error() })
|
||||
}
|
||||
|
||||
#[pyo3(signature = (name, mode, data, storage_options=None, data_storage_version=None, enable_v2_manifest_paths=None))]
|
||||
pub fn create_table<'a>(
|
||||
self_: PyRef<'a, Self>,
|
||||
name: String,
|
||||
@@ -111,6 +113,7 @@ impl Connection {
|
||||
})
|
||||
}
|
||||
|
||||
#[pyo3(signature = (name, mode, schema, storage_options=None, data_storage_version=None, enable_v2_manifest_paths=None))]
|
||||
pub fn create_empty_table<'a>(
|
||||
self_: PyRef<'a, Self>,
|
||||
name: String,
|
||||
@@ -198,6 +201,7 @@ impl Connection {
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
#[pyo3(signature = (uri, api_key=None, region=None, host_override=None, read_consistency_interval=None, client_config=None, storage_options=None))]
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn connect(
|
||||
py: Python,
|
||||
|
||||
@@ -138,7 +138,9 @@ fn http_from_rust_error(
|
||||
status_code: Option<u16>,
|
||||
) -> PyResult<PyErr> {
|
||||
let message = err.to_string();
|
||||
let http_err_cls = py.import("lancedb.remote.errors")?.getattr("HttpError")?;
|
||||
let http_err_cls = py
|
||||
.import_bound("lancedb.remote.errors")?
|
||||
.getattr("HttpError")?;
|
||||
let py_err = http_err_cls.call1((message, request_id, status_code))?;
|
||||
|
||||
// Reset the traceback since it doesn't provide additional information.
|
||||
@@ -149,5 +151,5 @@ fn http_from_rust_error(
|
||||
py_err.setattr(intern!(py, "__cause__"), cause_err)?;
|
||||
}
|
||||
|
||||
Ok(PyErr::from_value(py_err))
|
||||
Ok(PyErr::from_value_bound(py_err))
|
||||
}
|
||||
|
||||
@@ -47,6 +47,7 @@ impl Index {
|
||||
|
||||
#[pymethods]
|
||||
impl Index {
|
||||
#[pyo3(signature = (distance_type=None, num_partitions=None, num_sub_vectors=None, max_iterations=None, sample_rate=None))]
|
||||
#[staticmethod]
|
||||
pub fn ivf_pq(
|
||||
distance_type: Option<String>,
|
||||
@@ -106,6 +107,7 @@ impl Index {
|
||||
})
|
||||
}
|
||||
|
||||
#[pyo3(signature = (with_position=None, base_tokenizer=None, language=None, max_token_length=None, lower_case=None, stem=None, remove_stop_words=None, ascii_folding=None))]
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
#[staticmethod]
|
||||
pub fn fts(
|
||||
@@ -146,6 +148,7 @@ impl Index {
|
||||
}
|
||||
}
|
||||
|
||||
#[pyo3(signature = (distance_type=None, num_partitions=None, num_sub_vectors=None, max_iterations=None, sample_rate=None, m=None, ef_construction=None))]
|
||||
#[staticmethod]
|
||||
pub fn hnsw_pq(
|
||||
distance_type: Option<String>,
|
||||
@@ -184,6 +187,7 @@ impl Index {
|
||||
})
|
||||
}
|
||||
|
||||
#[pyo3(signature = (distance_type=None, num_partitions=None, max_iterations=None, sample_rate=None, m=None, ef_construction=None))]
|
||||
#[staticmethod]
|
||||
pub fn hnsw_sq(
|
||||
distance_type: Option<String>,
|
||||
|
||||
@@ -16,7 +16,11 @@ use arrow::RecordBatchStream;
|
||||
use connection::{connect, Connection};
|
||||
use env_logger::Env;
|
||||
use index::{Index, IndexConfig};
|
||||
use pyo3::{pymodule, types::PyModule, wrap_pyfunction, PyResult, Python};
|
||||
use pyo3::{
|
||||
pymodule,
|
||||
types::{PyModule, PyModuleMethods},
|
||||
wrap_pyfunction, Bound, PyResult, Python,
|
||||
};
|
||||
use query::{Query, VectorQuery};
|
||||
use table::Table;
|
||||
|
||||
@@ -29,7 +33,7 @@ pub mod table;
|
||||
pub mod util;
|
||||
|
||||
#[pymodule]
|
||||
pub fn _lancedb(_py: Python, m: &PyModule) -> PyResult<()> {
|
||||
pub fn _lancedb(_py: Python, m: &Bound<'_, PyModule>) -> PyResult<()> {
|
||||
let env = Env::new()
|
||||
.filter_or("LANCEDB_LOG", "warn")
|
||||
.write_style("LANCEDB_LOG_STYLE");
|
||||
|
||||
@@ -18,7 +18,8 @@ use arrow::pyarrow::FromPyArrow;
|
||||
use lancedb::index::scalar::FullTextSearchQuery;
|
||||
use lancedb::query::QueryExecutionOptions;
|
||||
use lancedb::query::{
|
||||
ExecutableQuery, Query as LanceDbQuery, QueryBase, Select, VectorQuery as LanceDbVectorQuery,
|
||||
ExecutableQuery, HasQuery, Query as LanceDbQuery, QueryBase, Select,
|
||||
VectorQuery as LanceDbVectorQuery,
|
||||
};
|
||||
use pyo3::exceptions::PyRuntimeError;
|
||||
use pyo3::prelude::{PyAnyMethods, PyDictMethods};
|
||||
@@ -29,7 +30,7 @@ use pyo3::PyAny;
|
||||
use pyo3::PyRef;
|
||||
use pyo3::PyResult;
|
||||
use pyo3::{pyclass, PyErr};
|
||||
use pyo3_asyncio_0_21::tokio::future_into_py;
|
||||
use pyo3_async_runtimes::tokio::future_into_py;
|
||||
|
||||
use crate::arrow::RecordBatchStream;
|
||||
use crate::error::PythonErrorExt;
|
||||
@@ -87,7 +88,7 @@ impl Query {
|
||||
Ok(VectorQuery { inner })
|
||||
}
|
||||
|
||||
pub fn nearest_to_text(&mut self, query: Bound<'_, PyDict>) -> PyResult<()> {
|
||||
pub fn nearest_to_text(&mut self, query: Bound<'_, PyDict>) -> PyResult<FTSQuery> {
|
||||
let query_text = query
|
||||
.get_item("query")?
|
||||
.ok_or(PyErr::new::<PyRuntimeError, _>(
|
||||
@@ -100,11 +101,14 @@ impl Query {
|
||||
.transpose()?;
|
||||
|
||||
let fts_query = FullTextSearchQuery::new(query_text).columns(columns);
|
||||
self.inner = self.inner.clone().full_text_search(fts_query);
|
||||
|
||||
Ok(())
|
||||
Ok(FTSQuery {
|
||||
fts_query,
|
||||
inner: self.inner.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
#[pyo3(signature = (max_batch_length=None))]
|
||||
pub fn execute(
|
||||
self_: PyRef<'_, Self>,
|
||||
max_batch_length: Option<u32>,
|
||||
@@ -132,6 +136,87 @@ impl Query {
|
||||
}
|
||||
|
||||
#[pyclass]
|
||||
#[derive(Clone)]
|
||||
pub struct FTSQuery {
|
||||
inner: LanceDbQuery,
|
||||
fts_query: FullTextSearchQuery,
|
||||
}
|
||||
|
||||
#[pymethods]
|
||||
impl FTSQuery {
|
||||
pub fn r#where(&mut self, predicate: String) {
|
||||
self.inner = self.inner.clone().only_if(predicate);
|
||||
}
|
||||
|
||||
pub fn select(&mut self, columns: Vec<(String, String)>) {
|
||||
self.inner = self.inner.clone().select(Select::dynamic(&columns));
|
||||
}
|
||||
|
||||
pub fn limit(&mut self, limit: u32) {
|
||||
self.inner = self.inner.clone().limit(limit as usize);
|
||||
}
|
||||
|
||||
pub fn offset(&mut self, offset: u32) {
|
||||
self.inner = self.inner.clone().offset(offset as usize);
|
||||
}
|
||||
|
||||
pub fn fast_search(&mut self) {
|
||||
self.inner = self.inner.clone().fast_search();
|
||||
}
|
||||
|
||||
pub fn with_row_id(&mut self) {
|
||||
self.inner = self.inner.clone().with_row_id();
|
||||
}
|
||||
|
||||
pub fn postfilter(&mut self) {
|
||||
self.inner = self.inner.clone().postfilter();
|
||||
}
|
||||
|
||||
#[pyo3(signature = (max_batch_length=None))]
|
||||
pub fn execute(
|
||||
self_: PyRef<'_, Self>,
|
||||
max_batch_length: Option<u32>,
|
||||
) -> PyResult<Bound<'_, PyAny>> {
|
||||
let inner = self_
|
||||
.inner
|
||||
.clone()
|
||||
.full_text_search(self_.fts_query.clone());
|
||||
|
||||
future_into_py(self_.py(), async move {
|
||||
let mut opts = QueryExecutionOptions::default();
|
||||
if let Some(max_batch_length) = max_batch_length {
|
||||
opts.max_batch_length = max_batch_length;
|
||||
}
|
||||
let inner_stream = inner.execute_with_options(opts).await.infer_error()?;
|
||||
Ok(RecordBatchStream::new(inner_stream))
|
||||
})
|
||||
}
|
||||
|
||||
pub fn nearest_to(&mut self, vector: Bound<'_, PyAny>) -> PyResult<HybridQuery> {
|
||||
let vector_query = Query::new(self.inner.clone()).nearest_to(vector)?;
|
||||
Ok(HybridQuery {
|
||||
inner_fts: self.clone(),
|
||||
inner_vec: vector_query,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn explain_plan(self_: PyRef<'_, Self>, verbose: bool) -> PyResult<Bound<'_, PyAny>> {
|
||||
let inner = self_.inner.clone();
|
||||
future_into_py(self_.py(), async move {
|
||||
inner
|
||||
.explain_plan(verbose)
|
||||
.await
|
||||
.map_err(|e| PyRuntimeError::new_err(e.to_string()))
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_query(&self) -> String {
|
||||
self.fts_query.query.clone()
|
||||
}
|
||||
}
|
||||
|
||||
#[pyclass]
|
||||
#[derive(Clone)]
|
||||
pub struct VectorQuery {
|
||||
inner: LanceDbVectorQuery,
|
||||
}
|
||||
@@ -195,10 +280,15 @@ impl VectorQuery {
|
||||
self.inner = self.inner.clone().nprobes(nprobe as usize);
|
||||
}
|
||||
|
||||
pub fn ef(&mut self, ef: u32) {
|
||||
self.inner = self.inner.clone().ef(ef as usize);
|
||||
}
|
||||
|
||||
pub fn bypass_vector_index(&mut self) {
|
||||
self.inner = self.inner.clone().bypass_vector_index()
|
||||
}
|
||||
|
||||
#[pyo3(signature = (max_batch_length=None))]
|
||||
pub fn execute(
|
||||
self_: PyRef<'_, Self>,
|
||||
max_batch_length: Option<u32>,
|
||||
@@ -223,4 +313,105 @@ impl VectorQuery {
|
||||
.map_err(|e| PyRuntimeError::new_err(e.to_string()))
|
||||
})
|
||||
}
|
||||
|
||||
pub fn nearest_to_text(&mut self, query: Bound<'_, PyDict>) -> PyResult<HybridQuery> {
|
||||
let fts_query = Query::new(self.inner.mut_query().clone()).nearest_to_text(query)?;
|
||||
Ok(HybridQuery {
|
||||
inner_vec: self.clone(),
|
||||
inner_fts: fts_query,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[pyclass]
|
||||
pub struct HybridQuery {
|
||||
inner_vec: VectorQuery,
|
||||
inner_fts: FTSQuery,
|
||||
}
|
||||
|
||||
#[pymethods]
|
||||
impl HybridQuery {
|
||||
pub fn r#where(&mut self, predicate: String) {
|
||||
self.inner_vec.r#where(predicate.clone());
|
||||
self.inner_fts.r#where(predicate);
|
||||
}
|
||||
|
||||
pub fn select(&mut self, columns: Vec<(String, String)>) {
|
||||
self.inner_vec.select(columns.clone());
|
||||
self.inner_fts.select(columns);
|
||||
}
|
||||
|
||||
pub fn limit(&mut self, limit: u32) {
|
||||
self.inner_vec.limit(limit);
|
||||
self.inner_fts.limit(limit);
|
||||
}
|
||||
|
||||
pub fn offset(&mut self, offset: u32) {
|
||||
self.inner_vec.offset(offset);
|
||||
self.inner_fts.offset(offset);
|
||||
}
|
||||
|
||||
pub fn fast_search(&mut self) {
|
||||
self.inner_vec.fast_search();
|
||||
self.inner_fts.fast_search();
|
||||
}
|
||||
|
||||
pub fn with_row_id(&mut self) {
|
||||
self.inner_fts.with_row_id();
|
||||
self.inner_vec.with_row_id();
|
||||
}
|
||||
|
||||
pub fn postfilter(&mut self) {
|
||||
self.inner_vec.postfilter();
|
||||
self.inner_fts.postfilter();
|
||||
}
|
||||
|
||||
pub fn add_query_vector(&mut self, vector: Bound<'_, PyAny>) -> PyResult<()> {
|
||||
self.inner_vec.add_query_vector(vector)
|
||||
}
|
||||
|
||||
pub fn column(&mut self, column: String) {
|
||||
self.inner_vec.column(column);
|
||||
}
|
||||
|
||||
pub fn distance_type(&mut self, distance_type: String) -> PyResult<()> {
|
||||
self.inner_vec.distance_type(distance_type)
|
||||
}
|
||||
|
||||
pub fn refine_factor(&mut self, refine_factor: u32) {
|
||||
self.inner_vec.refine_factor(refine_factor);
|
||||
}
|
||||
|
||||
pub fn nprobes(&mut self, nprobe: u32) {
|
||||
self.inner_vec.nprobes(nprobe);
|
||||
}
|
||||
|
||||
pub fn ef(&mut self, ef: u32) {
|
||||
self.inner_vec.ef(ef);
|
||||
}
|
||||
|
||||
pub fn bypass_vector_index(&mut self) {
|
||||
self.inner_vec.bypass_vector_index();
|
||||
}
|
||||
|
||||
pub fn to_vector_query(&mut self) -> PyResult<VectorQuery> {
|
||||
Ok(VectorQuery {
|
||||
inner: self.inner_vec.inner.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn to_fts_query(&mut self) -> PyResult<FTSQuery> {
|
||||
Ok(FTSQuery {
|
||||
inner: self.inner_fts.inner.clone(),
|
||||
fts_query: self.inner_fts.fts_query.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_limit(&mut self) -> Option<u32> {
|
||||
self.inner_fts.inner.limit.map(|i| i as u32)
|
||||
}
|
||||
|
||||
pub fn get_with_row_id(&mut self) -> bool {
|
||||
self.inner_fts.inner.with_row_id
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,17 +1,21 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
use arrow::{
|
||||
datatypes::DataType,
|
||||
ffi_stream::ArrowArrayStreamReader,
|
||||
pyarrow::{FromPyArrow, ToPyArrow},
|
||||
};
|
||||
use lancedb::table::{
|
||||
AddDataMode, Duration, OptimizeAction, OptimizeOptions, Table as LanceDbTable,
|
||||
AddDataMode, ColumnAlteration, Duration, NewColumnTransform, OptimizeAction, OptimizeOptions,
|
||||
Table as LanceDbTable,
|
||||
};
|
||||
use pyo3::{
|
||||
exceptions::{PyRuntimeError, PyValueError},
|
||||
pyclass, pymethods,
|
||||
types::{PyDict, PyDictMethods, PyString},
|
||||
types::{IntoPyDict, PyAnyMethods, PyDict, PyDictMethods},
|
||||
Bound, FromPyObject, PyAny, PyRef, PyResult, Python, ToPyObject,
|
||||
};
|
||||
use pyo3_asyncio_0_21::tokio::future_into_py;
|
||||
use pyo3_async_runtimes::tokio::future_into_py;
|
||||
|
||||
use crate::{
|
||||
error::PythonErrorExt,
|
||||
@@ -137,9 +141,10 @@ impl Table {
|
||||
})
|
||||
}
|
||||
|
||||
#[pyo3(signature = (updates, r#where=None))]
|
||||
pub fn update<'a>(
|
||||
self_: PyRef<'a, Self>,
|
||||
updates: &PyDict,
|
||||
updates: &Bound<'_, PyDict>,
|
||||
r#where: Option<String>,
|
||||
) -> PyResult<Bound<'a, PyAny>> {
|
||||
let mut op = self_.inner_ref()?.update();
|
||||
@@ -147,10 +152,8 @@ impl Table {
|
||||
op = op.only_if(only_if);
|
||||
}
|
||||
for (column_name, value) in updates.into_iter() {
|
||||
let column_name: &PyString = column_name.downcast()?;
|
||||
let column_name = column_name.to_str()?.to_string();
|
||||
let value: &PyString = value.downcast()?;
|
||||
let value = value.to_str()?.to_string();
|
||||
let column_name: String = column_name.extract()?;
|
||||
let value: String = value.extract()?;
|
||||
op = op.column(column_name, value);
|
||||
}
|
||||
future_into_py(self_.py(), async move {
|
||||
@@ -159,6 +162,7 @@ impl Table {
|
||||
})
|
||||
}
|
||||
|
||||
#[pyo3(signature = (filter=None))]
|
||||
pub fn count_rows(
|
||||
self_: PyRef<'_, Self>,
|
||||
filter: Option<String>,
|
||||
@@ -169,6 +173,7 @@ impl Table {
|
||||
})
|
||||
}
|
||||
|
||||
#[pyo3(signature = (column, index=None, replace=None))]
|
||||
pub fn create_index<'a>(
|
||||
self_: PyRef<'a, Self>,
|
||||
column: String,
|
||||
@@ -246,6 +251,34 @@ impl Table {
|
||||
)
|
||||
}
|
||||
|
||||
pub fn list_versions(self_: PyRef<'_, Self>) -> PyResult<Bound<'_, PyAny>> {
|
||||
let inner = self_.inner_ref()?.clone();
|
||||
future_into_py(self_.py(), async move {
|
||||
let versions = inner.list_versions().await.infer_error()?;
|
||||
let versions_as_dict = Python::with_gil(|py| {
|
||||
versions
|
||||
.iter()
|
||||
.map(|v| {
|
||||
let dict = PyDict::new_bound(py);
|
||||
dict.set_item("version", v.version).unwrap();
|
||||
dict.set_item(
|
||||
"timestamp",
|
||||
v.timestamp.timestamp_nanos_opt().unwrap_or_default(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let tup: Vec<(&String, &String)> = v.metadata.iter().collect();
|
||||
dict.set_item("metadata", tup.into_py_dict_bound(py))
|
||||
.unwrap();
|
||||
dict.to_object(py)
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
});
|
||||
|
||||
Ok(versions_as_dict)
|
||||
})
|
||||
}
|
||||
|
||||
pub fn checkout(self_: PyRef<'_, Self>, version: u64) -> PyResult<Bound<'_, PyAny>> {
|
||||
let inner = self_.inner_ref()?.clone();
|
||||
future_into_py(self_.py(), async move {
|
||||
@@ -272,6 +305,7 @@ impl Table {
|
||||
Query::new(self.inner_ref().unwrap().query())
|
||||
}
|
||||
|
||||
#[pyo3(signature = (cleanup_since_ms=None, delete_unverified=None))]
|
||||
pub fn optimize(
|
||||
self_: PyRef<'_, Self>,
|
||||
cleanup_since_ms: Option<u64>,
|
||||
@@ -379,6 +413,72 @@ impl Table {
|
||||
.infer_error()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn add_columns(
|
||||
self_: PyRef<'_, Self>,
|
||||
definitions: Vec<(String, String)>,
|
||||
) -> PyResult<Bound<'_, PyAny>> {
|
||||
let definitions = NewColumnTransform::SqlExpressions(definitions);
|
||||
|
||||
let inner = self_.inner_ref()?.clone();
|
||||
future_into_py(self_.py(), async move {
|
||||
inner.add_columns(definitions, None).await.infer_error()?;
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
pub fn alter_columns<'a>(
|
||||
self_: PyRef<'a, Self>,
|
||||
alterations: Vec<Bound<PyDict>>,
|
||||
) -> PyResult<Bound<'a, PyAny>> {
|
||||
let alterations = alterations
|
||||
.iter()
|
||||
.map(|alteration| {
|
||||
let path = alteration
|
||||
.get_item("path")?
|
||||
.ok_or_else(|| PyValueError::new_err("Missing path"))?
|
||||
.extract()?;
|
||||
let rename = {
|
||||
// We prefer rename, but support name for backwards compatibility
|
||||
let rename = if let Ok(Some(rename)) = alteration.get_item("rename") {
|
||||
Some(rename)
|
||||
} else {
|
||||
alteration.get_item("name")?
|
||||
};
|
||||
rename.map(|name| name.extract()).transpose()?
|
||||
};
|
||||
let nullable = alteration
|
||||
.get_item("nullable")?
|
||||
.map(|val| val.extract())
|
||||
.transpose()?;
|
||||
let data_type = alteration
|
||||
.get_item("data_type")?
|
||||
.map(|val| DataType::from_pyarrow_bound(&val))
|
||||
.transpose()?;
|
||||
Ok(ColumnAlteration {
|
||||
path,
|
||||
rename,
|
||||
nullable,
|
||||
data_type,
|
||||
})
|
||||
})
|
||||
.collect::<PyResult<Vec<_>>>()?;
|
||||
|
||||
let inner = self_.inner_ref()?.clone();
|
||||
future_into_py(self_.py(), async move {
|
||||
inner.alter_columns(&alterations).await.infer_error()?;
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
pub fn drop_columns(self_: PyRef<Self>, columns: Vec<String>) -> PyResult<Bound<PyAny>> {
|
||||
let inner = self_.inner_ref()?.clone();
|
||||
future_into_py(self_.py(), async move {
|
||||
let column_refs = columns.iter().map(String::as_str).collect::<Vec<&str>>();
|
||||
inner.drop_columns(&column_refs).await.infer_error()?;
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(FromPyObject)]
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "lancedb-node"
|
||||
version = "0.13.0-beta.2"
|
||||
version = "0.14.0"
|
||||
description = "Serverless, low-latency vector database for AI applications"
|
||||
license.workspace = true
|
||||
edition.workspace = true
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "lancedb"
|
||||
version = "0.13.0-beta.2"
|
||||
version = "0.14.0"
|
||||
edition.workspace = true
|
||||
description = "LanceDB: A serverless, low-latency vector database for AI applications"
|
||||
license.workspace = true
|
||||
@@ -27,6 +27,7 @@ half = { workspace = true }
|
||||
lazy_static.workspace = true
|
||||
lance = { workspace = true }
|
||||
lance-datafusion.workspace = true
|
||||
lance-io = { workspace = true }
|
||||
lance-index = { workspace = true }
|
||||
lance-table = { workspace = true }
|
||||
lance-linalg = { workspace = true }
|
||||
@@ -48,9 +49,16 @@ async-openai = { version = "0.20.0", optional = true }
|
||||
serde_with = { version = "3.8.1" }
|
||||
aws-sdk-bedrockruntime = { version = "1.27.0", optional = true }
|
||||
# For remote feature
|
||||
reqwest = { version = "0.12.0", features = ["gzip", "json", "stream"], optional = true }
|
||||
rand = { version = "0.8.3", features = ["small_rng"], optional = true}
|
||||
http = { version = "1", optional = true } # Matching what is in reqwest
|
||||
reqwest = { version = "0.12.0", default-features = false, features = [
|
||||
"charset",
|
||||
"gzip",
|
||||
"http2",
|
||||
"json",
|
||||
"macos-system-configuration",
|
||||
"stream",
|
||||
], optional = true }
|
||||
rand = { version = "0.8.3", features = ["small_rng"], optional = true }
|
||||
http = { version = "1", optional = true } # Matching what is in reqwest
|
||||
uuid = { version = "1.7.0", features = ["v4"], optional = true }
|
||||
polars-arrow = { version = ">=0.37,<0.40.0", optional = true }
|
||||
polars = { version = ">=0.37,<0.40.0", optional = true }
|
||||
@@ -75,7 +83,7 @@ http-body = "1" # Matching reqwest
|
||||
|
||||
|
||||
[features]
|
||||
default = []
|
||||
default = ["default-tls"]
|
||||
remote = ["dep:reqwest", "dep:http", "dep:rand", "dep:uuid"]
|
||||
fp16kernels = ["lance-linalg/fp16kernels"]
|
||||
s3-test = []
|
||||
@@ -90,6 +98,11 @@ sentence-transformers = [
|
||||
"dep:tokenizers"
|
||||
]
|
||||
|
||||
# TLS
|
||||
default-tls = ["reqwest?/default-tls"]
|
||||
native-tls = ["reqwest?/native-tls"]
|
||||
rustls-tls = ["reqwest?/rustls-tls"]
|
||||
|
||||
[[example]]
|
||||
name = "openai"
|
||||
required-features = ["openai"]
|
||||
|
||||
@@ -38,6 +38,9 @@ use crate::table::{NativeTable, TableDefinition, WriteOptions};
|
||||
use crate::utils::validate_table_name;
|
||||
use crate::Table;
|
||||
pub use lance_encoding::version::LanceFileVersion;
|
||||
#[cfg(feature = "remote")]
|
||||
use lance_io::object_store::StorageOptions;
|
||||
use lance_table::io::commit::commit_handler_from_url;
|
||||
|
||||
pub const LANCE_FILE_EXTENSION: &str = "lance";
|
||||
|
||||
@@ -133,7 +136,7 @@ impl IntoArrow for NoData {
|
||||
|
||||
/// A builder for configuring a [`Connection::create_table`] operation
|
||||
pub struct CreateTableBuilder<const HAS_DATA: bool, T: IntoArrow> {
|
||||
parent: Arc<dyn ConnectionInternal>,
|
||||
pub(crate) parent: Arc<dyn ConnectionInternal>,
|
||||
pub(crate) name: String,
|
||||
pub(crate) data: Option<T>,
|
||||
pub(crate) mode: CreateTableMode,
|
||||
@@ -341,7 +344,7 @@ pub struct OpenTableBuilder {
|
||||
}
|
||||
|
||||
impl OpenTableBuilder {
|
||||
fn new(parent: Arc<dyn ConnectionInternal>, name: String) -> Self {
|
||||
pub(crate) fn new(parent: Arc<dyn ConnectionInternal>, name: String) -> Self {
|
||||
Self {
|
||||
parent,
|
||||
name,
|
||||
@@ -622,7 +625,7 @@ impl ConnectBuilder {
|
||||
|
||||
/// Set the LanceDB Cloud client configuration.
|
||||
///
|
||||
/// ```
|
||||
/// ```no_run
|
||||
/// # use lancedb::connect;
|
||||
/// # use lancedb::remote::*;
|
||||
/// connect("db://my_database")
|
||||
@@ -717,12 +720,14 @@ impl ConnectBuilder {
|
||||
message: "An api_key is required when connecting to LanceDb Cloud".to_string(),
|
||||
})?;
|
||||
|
||||
let storage_options = StorageOptions(self.storage_options.clone());
|
||||
let internal = Arc::new(crate::remote::db::RemoteDatabase::try_new(
|
||||
&self.uri,
|
||||
&api_key,
|
||||
®ion,
|
||||
self.host_override,
|
||||
self.client_config,
|
||||
storage_options.into(),
|
||||
)?);
|
||||
Ok(Connection {
|
||||
internal,
|
||||
@@ -855,7 +860,7 @@ impl Database {
|
||||
let table_base_uri = if let Some(store) = engine {
|
||||
static WARN_ONCE: std::sync::Once = std::sync::Once::new();
|
||||
WARN_ONCE.call_once(|| {
|
||||
log::warn!("Specifing engine is not a publicly supported feature in lancedb yet. THE API WILL CHANGE");
|
||||
log::warn!("Specifying engine is not a publicly supported feature in lancedb yet. THE API WILL CHANGE");
|
||||
});
|
||||
let old_scheme = url.scheme().to_string();
|
||||
let new_scheme = format!("{}+{}", old_scheme, store);
|
||||
@@ -1036,6 +1041,7 @@ impl ConnectionInternal for Database {
|
||||
};
|
||||
|
||||
let mut write_params = options.write_options.lance_write_params.unwrap_or_default();
|
||||
|
||||
if matches!(&options.mode, CreateTableMode::Overwrite) {
|
||||
write_params.mode = WriteMode::Overwrite;
|
||||
}
|
||||
@@ -1122,7 +1128,7 @@ impl ConnectionInternal for Database {
|
||||
let dir_name = format!("{}.{}", name, LANCE_EXTENSION);
|
||||
let full_path = self.base_path.child(dir_name.clone());
|
||||
self.object_store
|
||||
.remove_dir_all(full_path)
|
||||
.remove_dir_all(full_path.clone())
|
||||
.await
|
||||
.map_err(|err| match err {
|
||||
// this error is not lance::Error::DatasetNotFound,
|
||||
@@ -1132,6 +1138,19 @@ impl ConnectionInternal for Database {
|
||||
},
|
||||
_ => Error::from(err),
|
||||
})?;
|
||||
|
||||
let object_store_params = ObjectStoreParams {
|
||||
storage_options: Some(self.storage_options.clone()),
|
||||
..Default::default()
|
||||
};
|
||||
let mut uri = self.uri.clone();
|
||||
if let Some(query_string) = &self.query_string {
|
||||
uri.push_str(&format!("?{}", query_string));
|
||||
}
|
||||
let commit_handler = commit_handler_from_url(&uri, &Some(object_store_params))
|
||||
.await
|
||||
.unwrap();
|
||||
commit_handler.delete(&full_path).await.unwrap();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -1169,6 +1188,7 @@ mod tests {
|
||||
use lance_testing::datagen::{BatchGenerator, IncrementingInt32};
|
||||
use tempfile::tempdir;
|
||||
|
||||
use crate::query::QueryBase;
|
||||
use crate::query::{ExecutableQuery, QueryExecutionOptions};
|
||||
|
||||
use super::*;
|
||||
@@ -1296,6 +1316,7 @@ mod tests {
|
||||
// In v1 the row group size will trump max_batch_length
|
||||
let batches = tbl
|
||||
.query()
|
||||
.limit(20000)
|
||||
.execute_with_options(QueryExecutionOptions {
|
||||
max_batch_length: 50000,
|
||||
..Default::default()
|
||||
|
||||
@@ -30,7 +30,7 @@
|
||||
//!
|
||||
//! LanceDB runs in process, to use it in your Rust project, put the following in your `Cargo.toml`:
|
||||
//!
|
||||
//! ```ignore
|
||||
//! ```shell
|
||||
//! cargo install lancedb
|
||||
//! ```
|
||||
//!
|
||||
|
||||
@@ -348,7 +348,7 @@ pub trait QueryBase {
|
||||
///
|
||||
/// The filter should be supplied as an SQL query string. For example:
|
||||
///
|
||||
/// ```ignore
|
||||
/// ```sql
|
||||
/// x > 10
|
||||
/// y > 0 AND y < 100
|
||||
/// x > 5 OR y = 'test'
|
||||
@@ -364,8 +364,18 @@ pub trait QueryBase {
|
||||
///
|
||||
/// This method is only valid on tables that have a full text search index.
|
||||
///
|
||||
/// ```ignore
|
||||
/// query.full_text_search(FullTextSearchQuery::new("hello world"))
|
||||
/// ```
|
||||
/// use lance_index::scalar::FullTextSearchQuery;
|
||||
/// use lancedb::query::{QueryBase, ExecutableQuery};
|
||||
///
|
||||
/// # use lancedb::Table;
|
||||
/// # async fn query(table: &Table) -> Result<(), Box<dyn std::error::Error>> {
|
||||
/// let results = table.query()
|
||||
/// .full_text_search(FullTextSearchQuery::new("hello world".into()))
|
||||
/// .execute()
|
||||
/// .await?;
|
||||
/// # Ok(())
|
||||
/// # }
|
||||
/// ```
|
||||
fn full_text_search(self, query: FullTextSearchQuery) -> Self;
|
||||
|
||||
@@ -563,7 +573,7 @@ pub struct Query {
|
||||
parent: Arc<dyn TableInternal>,
|
||||
|
||||
/// limit the number of rows to return.
|
||||
pub(crate) limit: Option<usize>,
|
||||
pub limit: Option<usize>,
|
||||
|
||||
/// Offset of the query.
|
||||
pub(crate) offset: Option<usize>,
|
||||
@@ -586,7 +596,7 @@ pub struct Query {
|
||||
/// If set to true, the query will return the `_rowid` meta column.
|
||||
///
|
||||
/// By default, this is false.
|
||||
pub(crate) with_row_id: bool,
|
||||
pub with_row_id: bool,
|
||||
|
||||
/// If set to false, the filter will be applied after the vector search.
|
||||
pub(crate) prefilter: bool,
|
||||
@@ -596,7 +606,7 @@ impl Query {
|
||||
pub(crate) fn new(parent: Arc<dyn TableInternal>) -> Self {
|
||||
Self {
|
||||
parent,
|
||||
limit: None,
|
||||
limit: Some(DEFAULT_TOP_K),
|
||||
offset: None,
|
||||
filter: None,
|
||||
full_text_search: None,
|
||||
@@ -704,6 +714,9 @@ pub struct VectorQuery {
|
||||
// IVF PQ - ANN search.
|
||||
pub(crate) query_vector: Vec<Arc<dyn Array>>,
|
||||
pub(crate) nprobes: usize,
|
||||
// The number of candidates to return during the refine step for HNSW,
|
||||
// defaults to 1.5 * limit.
|
||||
pub(crate) ef: Option<usize>,
|
||||
pub(crate) refine_factor: Option<u32>,
|
||||
pub(crate) distance_type: Option<DistanceType>,
|
||||
/// Default is true. Set to false to enforce a brute force search.
|
||||
@@ -717,6 +730,7 @@ impl VectorQuery {
|
||||
column: None,
|
||||
query_vector: Vec::new(),
|
||||
nprobes: 20,
|
||||
ef: None,
|
||||
refine_factor: None,
|
||||
distance_type: None,
|
||||
use_index: true,
|
||||
@@ -776,6 +790,18 @@ impl VectorQuery {
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the number of candidates to return during the refine step for HNSW
|
||||
///
|
||||
/// This argument is only used when the vector column has an HNSW index.
|
||||
/// If there is no index then this value is ignored.
|
||||
///
|
||||
/// Increasing this value will increase the recall of your query but will
|
||||
/// also increase the latency of your query. The default value is 1.5*limit.
|
||||
pub fn ef(mut self, ef: usize) -> Self {
|
||||
self.ef = Some(ef);
|
||||
self
|
||||
}
|
||||
|
||||
/// A multiplier to control how many additional rows are taken during the refine step
|
||||
///
|
||||
/// This argument is only used when the vector column has an IVF PQ index.
|
||||
|
||||
@@ -21,6 +21,7 @@ use reqwest::{
|
||||
};
|
||||
|
||||
use crate::error::{Error, Result};
|
||||
use crate::remote::db::RemoteOptions;
|
||||
|
||||
const REQUEST_ID_HEADER: &str = "x-request-id";
|
||||
|
||||
@@ -215,6 +216,7 @@ impl RestfulLanceDbClient<Sender> {
|
||||
region: &str,
|
||||
host_override: Option<String>,
|
||||
client_config: ClientConfig,
|
||||
options: &RemoteOptions,
|
||||
) -> Result<Self> {
|
||||
let parsed_url = url::Url::parse(db_url).map_err(|err| Error::InvalidInput {
|
||||
message: format!("db_url is not a valid URL. '{db_url}'. Error: {err}"),
|
||||
@@ -226,6 +228,14 @@ impl RestfulLanceDbClient<Sender> {
|
||||
});
|
||||
}
|
||||
let db_name = parsed_url.host_str().unwrap();
|
||||
let db_prefix = {
|
||||
let prefix = parsed_url.path().trim_start_matches('/');
|
||||
if prefix.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(prefix)
|
||||
}
|
||||
};
|
||||
|
||||
// Get the timeouts
|
||||
let connect_timeout = Self::get_timeout(
|
||||
@@ -255,6 +265,8 @@ impl RestfulLanceDbClient<Sender> {
|
||||
region,
|
||||
db_name,
|
||||
host_override.is_some(),
|
||||
options,
|
||||
db_prefix,
|
||||
)?)
|
||||
.user_agent(client_config.user_agent)
|
||||
.build()
|
||||
@@ -262,6 +274,7 @@ impl RestfulLanceDbClient<Sender> {
|
||||
message: "Failed to build HTTP client".into(),
|
||||
source: Some(Box::new(err)),
|
||||
})?;
|
||||
|
||||
let host = match host_override {
|
||||
Some(host_override) => host_override,
|
||||
None => format!("https://{}.{}.api.lancedb.com", db_name, region),
|
||||
@@ -287,6 +300,8 @@ impl<S: HttpSend> RestfulLanceDbClient<S> {
|
||||
region: &str,
|
||||
db_name: &str,
|
||||
has_host_override: bool,
|
||||
options: &RemoteOptions,
|
||||
db_prefix: Option<&str>,
|
||||
) -> Result<HeaderMap> {
|
||||
let mut headers = HeaderMap::new();
|
||||
headers.insert(
|
||||
@@ -312,6 +327,34 @@ impl<S: HttpSend> RestfulLanceDbClient<S> {
|
||||
})?,
|
||||
);
|
||||
}
|
||||
if db_prefix.is_some() {
|
||||
headers.insert(
|
||||
"x-lancedb-database-prefix",
|
||||
HeaderValue::from_str(db_prefix.unwrap()).map_err(|_| Error::InvalidInput {
|
||||
message: format!(
|
||||
"non-ascii database prefix '{}' provided",
|
||||
db_prefix.unwrap()
|
||||
),
|
||||
})?,
|
||||
);
|
||||
}
|
||||
|
||||
if let Some(v) = options.0.get("account_name") {
|
||||
headers.insert(
|
||||
"x-azure-storage-account-name",
|
||||
HeaderValue::from_str(v).map_err(|_| Error::InvalidInput {
|
||||
message: format!("non-ascii storage account name '{}' provided", db_name),
|
||||
})?,
|
||||
);
|
||||
}
|
||||
if let Some(v) = options.0.get("azure_storage_account_name") {
|
||||
headers.insert(
|
||||
"x-azure-storage-account-name",
|
||||
HeaderValue::from_str(v).map_err(|_| Error::InvalidInput {
|
||||
message: format!("non-ascii storage account name '{}' provided", db_name),
|
||||
})?,
|
||||
);
|
||||
}
|
||||
|
||||
Ok(headers)
|
||||
}
|
||||
|
||||
@@ -12,18 +12,21 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow_array::RecordBatchReader;
|
||||
use async_trait::async_trait;
|
||||
use http::StatusCode;
|
||||
use lance_io::object_store::StorageOptions;
|
||||
use moka::future::Cache;
|
||||
use reqwest::header::CONTENT_TYPE;
|
||||
use serde::Deserialize;
|
||||
use tokio::task::spawn_blocking;
|
||||
|
||||
use crate::connection::{
|
||||
ConnectionInternal, CreateTableBuilder, NoData, OpenTableBuilder, TableNamesBuilder,
|
||||
ConnectionInternal, CreateTableBuilder, CreateTableMode, NoData, OpenTableBuilder,
|
||||
TableNamesBuilder,
|
||||
};
|
||||
use crate::embeddings::EmbeddingRegistry;
|
||||
use crate::error::Result;
|
||||
@@ -52,9 +55,16 @@ impl RemoteDatabase {
|
||||
region: &str,
|
||||
host_override: Option<String>,
|
||||
client_config: ClientConfig,
|
||||
options: RemoteOptions,
|
||||
) -> Result<Self> {
|
||||
let client =
|
||||
RestfulLanceDbClient::try_new(uri, api_key, region, host_override, client_config)?;
|
||||
let client = RestfulLanceDbClient::try_new(
|
||||
uri,
|
||||
api_key,
|
||||
region,
|
||||
host_override,
|
||||
client_config,
|
||||
&options,
|
||||
)?;
|
||||
|
||||
let table_cache = Cache::builder()
|
||||
.time_to_live(std::time::Duration::from_secs(300))
|
||||
@@ -95,6 +105,16 @@ impl<S: HttpSend> std::fmt::Display for RemoteDatabase<S> {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&CreateTableMode> for &'static str {
|
||||
fn from(val: &CreateTableMode) -> Self {
|
||||
match val {
|
||||
CreateTableMode::Create => "create",
|
||||
CreateTableMode::Overwrite => "overwrite",
|
||||
CreateTableMode::ExistOk(_) => "exist_ok",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<S: HttpSend> ConnectionInternal for RemoteDatabase<S> {
|
||||
async fn table_names(&self, options: TableNamesBuilder) -> Result<Vec<String>> {
|
||||
@@ -133,14 +153,40 @@ impl<S: HttpSend> ConnectionInternal for RemoteDatabase<S> {
|
||||
let req = self
|
||||
.client
|
||||
.post(&format!("/v1/table/{}/create/", options.name))
|
||||
.query(&[("mode", Into::<&str>::into(&options.mode))])
|
||||
.body(data_buffer)
|
||||
.header(CONTENT_TYPE, ARROW_STREAM_CONTENT_TYPE);
|
||||
|
||||
let (request_id, rsp) = self.client.send(req, false).await?;
|
||||
|
||||
if rsp.status() == StatusCode::BAD_REQUEST {
|
||||
let body = rsp.text().await.err_to_http(request_id.clone())?;
|
||||
if body.contains("already exists") {
|
||||
return Err(crate::Error::TableAlreadyExists { name: options.name });
|
||||
return match options.mode {
|
||||
CreateTableMode::Create => {
|
||||
Err(crate::Error::TableAlreadyExists { name: options.name })
|
||||
}
|
||||
CreateTableMode::ExistOk(callback) => {
|
||||
let builder = OpenTableBuilder::new(options.parent, options.name);
|
||||
let builder = (callback)(builder);
|
||||
builder.execute().await
|
||||
}
|
||||
|
||||
// This should not happen, as we explicitly set the mode to overwrite and the server
|
||||
// shouldn't return an error if the table already exists.
|
||||
//
|
||||
// However if the server is an older version that doesn't support the mode parameter,
|
||||
// then we'll get the 400 response.
|
||||
CreateTableMode::Overwrite => Err(crate::Error::Http {
|
||||
source: format!(
|
||||
"unexpected response from server for create mode overwrite: {}",
|
||||
body
|
||||
)
|
||||
.into(),
|
||||
request_id,
|
||||
status_code: Some(StatusCode::BAD_REQUEST),
|
||||
}),
|
||||
};
|
||||
} else {
|
||||
return Err(crate::Error::InvalidInput { message: body });
|
||||
}
|
||||
@@ -206,6 +252,29 @@ impl<S: HttpSend> ConnectionInternal for RemoteDatabase<S> {
|
||||
}
|
||||
}
|
||||
|
||||
/// RemoteOptions contains a subset of StorageOptions that are compatible with Remote LanceDB connections
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct RemoteOptions(pub HashMap<String, String>);
|
||||
|
||||
impl RemoteOptions {
|
||||
pub fn new(options: HashMap<String, String>) -> Self {
|
||||
Self(options)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<StorageOptions> for RemoteOptions {
|
||||
fn from(options: StorageOptions) -> Self {
|
||||
let supported_opts = vec!["account_name", "azure_storage_account_name"];
|
||||
let mut filtered = HashMap::new();
|
||||
for opt in supported_opts {
|
||||
if let Some(v) = options.0.get(opt) {
|
||||
filtered.insert(opt.to_string(), v.to_string());
|
||||
}
|
||||
}
|
||||
Self::new(filtered)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::{Arc, OnceLock};
|
||||
@@ -213,7 +282,9 @@ mod tests {
|
||||
use arrow_array::{Int32Array, RecordBatch, RecordBatchIterator};
|
||||
use arrow_schema::{DataType, Field, Schema};
|
||||
|
||||
use crate::connection::ConnectBuilder;
|
||||
use crate::{
|
||||
connection::CreateTableMode,
|
||||
remote::{ARROW_STREAM_CONTENT_TYPE, JSON_CONTENT_TYPE},
|
||||
Connection, Error,
|
||||
};
|
||||
@@ -382,6 +453,73 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_create_table_modes() {
|
||||
let test_cases = [
|
||||
(None, "mode=create"),
|
||||
(Some(CreateTableMode::Create), "mode=create"),
|
||||
(Some(CreateTableMode::Overwrite), "mode=overwrite"),
|
||||
(
|
||||
Some(CreateTableMode::ExistOk(Box::new(|b| b))),
|
||||
"mode=exist_ok",
|
||||
),
|
||||
];
|
||||
|
||||
for (mode, expected_query_string) in test_cases {
|
||||
let conn = Connection::new_with_handler(move |request| {
|
||||
assert_eq!(request.method(), &reqwest::Method::POST);
|
||||
assert_eq!(request.url().path(), "/v1/table/table1/create/");
|
||||
assert_eq!(request.url().query(), Some(expected_query_string));
|
||||
|
||||
http::Response::builder().status(200).body("").unwrap()
|
||||
});
|
||||
|
||||
let data = RecordBatch::try_new(
|
||||
Arc::new(Schema::new(vec![Field::new("a", DataType::Int32, false)])),
|
||||
vec![Arc::new(Int32Array::from(vec![1, 2, 3]))],
|
||||
)
|
||||
.unwrap();
|
||||
let reader = RecordBatchIterator::new([Ok(data.clone())], data.schema());
|
||||
let mut builder = conn.create_table("table1", reader);
|
||||
if let Some(mode) = mode {
|
||||
builder = builder.mode(mode);
|
||||
}
|
||||
builder.execute().await.unwrap();
|
||||
}
|
||||
|
||||
// check that the open table callback is called with exist_ok
|
||||
let conn = Connection::new_with_handler(|request| match request.url().path() {
|
||||
"/v1/table/table1/create/" => http::Response::builder()
|
||||
.status(400)
|
||||
.body("Table table1 already exists")
|
||||
.unwrap(),
|
||||
"/v1/table/table1/describe/" => http::Response::builder().status(200).body("").unwrap(),
|
||||
_ => {
|
||||
panic!("unexpected path: {:?}", request.url().path());
|
||||
}
|
||||
});
|
||||
let data = RecordBatch::try_new(
|
||||
Arc::new(Schema::new(vec![Field::new("a", DataType::Int32, false)])),
|
||||
vec![Arc::new(Int32Array::from(vec![1, 2, 3]))],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let called: Arc<OnceLock<bool>> = Arc::new(OnceLock::new());
|
||||
let reader = RecordBatchIterator::new([Ok(data.clone())], data.schema());
|
||||
let called_in_cb = called.clone();
|
||||
conn.create_table("table1", reader)
|
||||
.mode(CreateTableMode::ExistOk(Box::new(move |b| {
|
||||
called_in_cb.clone().set(true).unwrap();
|
||||
b
|
||||
})))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let called = *called.get().unwrap_or(&false);
|
||||
assert!(called);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_create_table_empty() {
|
||||
let conn = Connection::new_with_handler(|request| {
|
||||
@@ -436,4 +574,16 @@ mod tests {
|
||||
});
|
||||
conn.rename_table("table1", "table2").await.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_connect_remote_options() {
|
||||
let db_uri = "db://my-container/my-prefix";
|
||||
let _ = ConnectBuilder::new(db_uri)
|
||||
.region("us-east-1")
|
||||
.api_key("my-api-key")
|
||||
.storage_options(vec![("azure_storage_account_name", "my-storage-account")])
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,11 +17,12 @@ use datafusion_physical_plan::{ExecutionPlan, SendableRecordBatchStream};
|
||||
use futures::TryStreamExt;
|
||||
use http::header::CONTENT_TYPE;
|
||||
use http::StatusCode;
|
||||
use lance::arrow::json::JsonSchema;
|
||||
use lance::arrow::json::{JsonDataType, JsonSchema};
|
||||
use lance::dataset::scanner::DatasetRecordBatchStream;
|
||||
use lance::dataset::{ColumnAlteration, NewColumnTransform};
|
||||
use lance::dataset::{ColumnAlteration, NewColumnTransform, Version};
|
||||
use lance_datafusion::exec::OneShotExec;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use crate::{
|
||||
connection::NoData,
|
||||
@@ -43,17 +44,32 @@ pub struct RemoteTable<S: HttpSend = Sender> {
|
||||
#[allow(dead_code)]
|
||||
client: RestfulLanceDbClient<S>,
|
||||
name: String,
|
||||
|
||||
version: RwLock<Option<u64>>,
|
||||
}
|
||||
|
||||
impl<S: HttpSend> RemoteTable<S> {
|
||||
pub fn new(client: RestfulLanceDbClient<S>, name: String) -> Self {
|
||||
Self { client, name }
|
||||
Self {
|
||||
client,
|
||||
name,
|
||||
version: RwLock::new(None),
|
||||
}
|
||||
}
|
||||
|
||||
async fn describe(&self) -> Result<TableDescription> {
|
||||
let request = self
|
||||
let version = self.current_version().await;
|
||||
self.describe_version(version).await
|
||||
}
|
||||
|
||||
async fn describe_version(&self, version: Option<u64>) -> Result<TableDescription> {
|
||||
let mut request = self
|
||||
.client
|
||||
.post(&format!("/v1/table/{}/describe/", self.name));
|
||||
|
||||
let body = serde_json::json!({ "version": version });
|
||||
request = request.json(&body);
|
||||
|
||||
let (request_id, response) = self.client.send(request, true).await?;
|
||||
|
||||
let response = self.check_table_response(&request_id, response).await?;
|
||||
@@ -196,6 +212,7 @@ impl<S: HttpSend> RemoteTable<S> {
|
||||
body["prefilter"] = query.base.prefilter.into();
|
||||
body["distance_type"] = serde_json::json!(query.distance_type.unwrap_or_default());
|
||||
body["nprobes"] = query.nprobes.into();
|
||||
body["ef"] = query.ef.into();
|
||||
body["refine_factor"] = query.refine_factor.into();
|
||||
if let Some(vector_column) = query.column.as_ref() {
|
||||
body["vector_column"] = serde_json::Value::String(vector_column.clone());
|
||||
@@ -250,6 +267,24 @@ impl<S: HttpSend> RemoteTable<S> {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn check_mutable(&self) -> Result<()> {
|
||||
let read_guard = self.version.read().await;
|
||||
match *read_guard {
|
||||
None => Ok(()),
|
||||
Some(version) => Err(Error::NotSupported {
|
||||
message: format!(
|
||||
"Cannot mutate table reference fixed at version {}. Call checkout_latest() to get a mutable table reference.",
|
||||
version
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
async fn current_version(&self) -> Option<u64> {
|
||||
let read_guard = self.version.read().await;
|
||||
*read_guard
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
@@ -277,7 +312,11 @@ mod test_utils {
|
||||
T: Into<reqwest::Body>,
|
||||
{
|
||||
let client = client_with_handler(handler);
|
||||
Self { client, name }
|
||||
Self {
|
||||
client,
|
||||
name,
|
||||
version: RwLock::new(None),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -296,21 +335,62 @@ impl<S: HttpSend> TableInternal for RemoteTable<S> {
|
||||
async fn version(&self) -> Result<u64> {
|
||||
self.describe().await.map(|desc| desc.version)
|
||||
}
|
||||
async fn checkout(&self, _version: u64) -> Result<()> {
|
||||
Err(Error::NotSupported {
|
||||
message: "checkout is not supported on LanceDB cloud.".into(),
|
||||
})
|
||||
async fn checkout(&self, version: u64) -> Result<()> {
|
||||
// check that the version exists
|
||||
self.describe_version(Some(version))
|
||||
.await
|
||||
.map_err(|e| match e {
|
||||
// try to map the error to a more user-friendly error telling them
|
||||
// specifically that the version does not exist
|
||||
Error::TableNotFound { name } => Error::TableNotFound {
|
||||
name: format!("{} (version: {})", name, version),
|
||||
},
|
||||
e => e,
|
||||
})?;
|
||||
|
||||
let mut write_guard = self.version.write().await;
|
||||
*write_guard = Some(version);
|
||||
Ok(())
|
||||
}
|
||||
async fn checkout_latest(&self) -> Result<()> {
|
||||
Err(Error::NotSupported {
|
||||
message: "checkout is not supported on LanceDB cloud.".into(),
|
||||
})
|
||||
let mut write_guard = self.version.write().await;
|
||||
*write_guard = None;
|
||||
Ok(())
|
||||
}
|
||||
async fn restore(&self) -> Result<()> {
|
||||
self.check_mutable().await?;
|
||||
Err(Error::NotSupported {
|
||||
message: "restore is not supported on LanceDB cloud.".into(),
|
||||
})
|
||||
}
|
||||
|
||||
async fn list_versions(&self) -> Result<Vec<Version>> {
|
||||
let request = self
|
||||
.client
|
||||
.post(&format!("/v1/table/{}/version/list/", self.name));
|
||||
let (request_id, response) = self.client.send(request, true).await?;
|
||||
let response = self.check_table_response(&request_id, response).await?;
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct ListVersionsResponse {
|
||||
versions: Vec<Version>,
|
||||
}
|
||||
|
||||
let body = response.text().await.err_to_http(request_id.clone())?;
|
||||
let body: ListVersionsResponse =
|
||||
serde_json::from_str(&body).map_err(|err| Error::Http {
|
||||
source: format!(
|
||||
"Failed to parse list_versions response: {}, body: {}",
|
||||
err, body
|
||||
)
|
||||
.into(),
|
||||
request_id,
|
||||
status_code: None,
|
||||
})?;
|
||||
|
||||
Ok(body.versions)
|
||||
}
|
||||
|
||||
async fn schema(&self) -> Result<SchemaRef> {
|
||||
let schema = self.describe().await?.schema;
|
||||
Ok(Arc::new(schema.try_into()?))
|
||||
@@ -320,10 +400,13 @@ impl<S: HttpSend> TableInternal for RemoteTable<S> {
|
||||
.client
|
||||
.post(&format!("/v1/table/{}/count_rows/", self.name));
|
||||
|
||||
let version = self.current_version().await;
|
||||
|
||||
if let Some(filter) = filter {
|
||||
request = request.json(&serde_json::json!({ "predicate": filter }));
|
||||
request = request.json(&serde_json::json!({ "predicate": filter, "version": version }));
|
||||
} else {
|
||||
request = request.json(&serde_json::json!({}));
|
||||
let body = serde_json::json!({ "version": version });
|
||||
request = request.json(&body);
|
||||
}
|
||||
|
||||
let (request_id, response) = self.client.send(request, true).await?;
|
||||
@@ -343,6 +426,7 @@ impl<S: HttpSend> TableInternal for RemoteTable<S> {
|
||||
add: AddDataBuilder<NoData>,
|
||||
data: Box<dyn RecordBatchReader + Send>,
|
||||
) -> Result<()> {
|
||||
self.check_mutable().await?;
|
||||
let body = Self::reader_as_body(data)?;
|
||||
let mut request = self
|
||||
.client
|
||||
@@ -371,7 +455,8 @@ impl<S: HttpSend> TableInternal for RemoteTable<S> {
|
||||
) -> Result<Arc<dyn ExecutionPlan>> {
|
||||
let request = self.client.post(&format!("/v1/table/{}/query/", self.name));
|
||||
|
||||
let body = serde_json::Value::Object(Default::default());
|
||||
let version = self.current_version().await;
|
||||
let body = serde_json::json!({ "version": version });
|
||||
let bodies = Self::apply_vector_query_params(body, query)?;
|
||||
|
||||
let mut futures = Vec::with_capacity(bodies.len());
|
||||
@@ -406,7 +491,8 @@ impl<S: HttpSend> TableInternal for RemoteTable<S> {
|
||||
.post(&format!("/v1/table/{}/query/", self.name))
|
||||
.header(CONTENT_TYPE, JSON_CONTENT_TYPE);
|
||||
|
||||
let mut body = serde_json::Value::Object(Default::default());
|
||||
let version = self.current_version().await;
|
||||
let mut body = serde_json::json!({ "version": version });
|
||||
Self::apply_query_params(&mut body, query)?;
|
||||
// Empty vector can be passed if no vector search is performed.
|
||||
body["vector"] = serde_json::Value::Array(Vec::new());
|
||||
@@ -420,6 +506,7 @@ impl<S: HttpSend> TableInternal for RemoteTable<S> {
|
||||
Ok(DatasetRecordBatchStream::new(stream))
|
||||
}
|
||||
async fn update(&self, update: UpdateBuilder) -> Result<u64> {
|
||||
self.check_mutable().await?;
|
||||
let request = self
|
||||
.client
|
||||
.post(&format!("/v1/table/{}/update/", self.name));
|
||||
@@ -441,6 +528,7 @@ impl<S: HttpSend> TableInternal for RemoteTable<S> {
|
||||
Ok(0) // TODO: support returning number of modified rows once supported in SaaS.
|
||||
}
|
||||
async fn delete(&self, predicate: &str) -> Result<()> {
|
||||
self.check_mutable().await?;
|
||||
let body = serde_json::json!({ "predicate": predicate });
|
||||
let request = self
|
||||
.client
|
||||
@@ -452,6 +540,7 @@ impl<S: HttpSend> TableInternal for RemoteTable<S> {
|
||||
}
|
||||
|
||||
async fn create_index(&self, mut index: IndexBuilder) -> Result<()> {
|
||||
self.check_mutable().await?;
|
||||
let request = self
|
||||
.client
|
||||
.post(&format!("/v1/table/{}/create_index/", self.name));
|
||||
@@ -530,6 +619,7 @@ impl<S: HttpSend> TableInternal for RemoteTable<S> {
|
||||
params: MergeInsertBuilder,
|
||||
new_data: Box<dyn RecordBatchReader + Send>,
|
||||
) -> Result<()> {
|
||||
self.check_mutable().await?;
|
||||
let query = MergeInsertRequest::try_from(params)?;
|
||||
let body = Self::reader_as_body(new_data)?;
|
||||
let request = self
|
||||
@@ -546,35 +636,98 @@ impl<S: HttpSend> TableInternal for RemoteTable<S> {
|
||||
Ok(())
|
||||
}
|
||||
async fn optimize(&self, _action: OptimizeAction) -> Result<OptimizeStats> {
|
||||
self.check_mutable().await?;
|
||||
Err(Error::NotSupported {
|
||||
message: "optimize is not supported on LanceDB cloud.".into(),
|
||||
})
|
||||
}
|
||||
async fn add_columns(
|
||||
&self,
|
||||
_transforms: NewColumnTransform,
|
||||
transforms: NewColumnTransform,
|
||||
_read_columns: Option<Vec<String>>,
|
||||
) -> Result<()> {
|
||||
Err(Error::NotSupported {
|
||||
message: "add_columns is not yet supported.".into(),
|
||||
})
|
||||
self.check_mutable().await?;
|
||||
match transforms {
|
||||
NewColumnTransform::SqlExpressions(expressions) => {
|
||||
let body = expressions
|
||||
.into_iter()
|
||||
.map(|(name, expression)| {
|
||||
serde_json::json!({
|
||||
"name": name,
|
||||
"expression": expression,
|
||||
})
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
let body = serde_json::json!({ "new_columns": body });
|
||||
let request = self
|
||||
.client
|
||||
.post(&format!("/v1/table/{}/add_columns/", self.name))
|
||||
.json(&body);
|
||||
let (request_id, response) = self.client.send(request, false).await?;
|
||||
self.check_table_response(&request_id, response).await?;
|
||||
Ok(())
|
||||
}
|
||||
_ => {
|
||||
return Err(Error::NotSupported {
|
||||
message: "Only SQL expressions are supported for adding columns".into(),
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
async fn alter_columns(&self, _alterations: &[ColumnAlteration]) -> Result<()> {
|
||||
Err(Error::NotSupported {
|
||||
message: "alter_columns is not yet supported.".into(),
|
||||
})
|
||||
|
||||
async fn alter_columns(&self, alterations: &[ColumnAlteration]) -> Result<()> {
|
||||
self.check_mutable().await?;
|
||||
let body = alterations
|
||||
.iter()
|
||||
.map(|alteration| {
|
||||
let mut value = serde_json::json!({
|
||||
"path": alteration.path,
|
||||
});
|
||||
if let Some(rename) = &alteration.rename {
|
||||
value["rename"] = serde_json::Value::String(rename.clone());
|
||||
}
|
||||
if let Some(data_type) = &alteration.data_type {
|
||||
let json_data_type = JsonDataType::try_from(data_type).unwrap();
|
||||
let json_data_type = serde_json::to_value(&json_data_type).unwrap();
|
||||
value["data_type"] = json_data_type;
|
||||
}
|
||||
if let Some(nullable) = &alteration.nullable {
|
||||
value["nullable"] = serde_json::Value::Bool(*nullable);
|
||||
}
|
||||
value
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
let body = serde_json::json!({ "alterations": body });
|
||||
let request = self
|
||||
.client
|
||||
.post(&format!("/v1/table/{}/alter_columns/", self.name))
|
||||
.json(&body);
|
||||
let (request_id, response) = self.client.send(request, false).await?;
|
||||
self.check_table_response(&request_id, response).await?;
|
||||
Ok(())
|
||||
}
|
||||
async fn drop_columns(&self, _columns: &[&str]) -> Result<()> {
|
||||
Err(Error::NotSupported {
|
||||
message: "drop_columns is not yet supported.".into(),
|
||||
})
|
||||
|
||||
async fn drop_columns(&self, columns: &[&str]) -> Result<()> {
|
||||
self.check_mutable().await?;
|
||||
let body = serde_json::json!({ "columns": columns });
|
||||
let request = self
|
||||
.client
|
||||
.post(&format!("/v1/table/{}/drop_columns/", self.name))
|
||||
.json(&body);
|
||||
let (request_id, response) = self.client.send(request, false).await?;
|
||||
self.check_table_response(&request_id, response).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn list_indices(&self) -> Result<Vec<IndexConfig>> {
|
||||
// Make request to list the indices
|
||||
let request = self
|
||||
let mut request = self
|
||||
.client
|
||||
.post(&format!("/v1/table/{}/index/list/", self.name));
|
||||
let version = self.current_version().await;
|
||||
let body = serde_json::json!({ "version": version });
|
||||
request = request.json(&body);
|
||||
|
||||
let (request_id, response) = self.client.send(request, true).await?;
|
||||
let response = self.check_table_response(&request_id, response).await?;
|
||||
|
||||
@@ -624,10 +777,14 @@ impl<S: HttpSend> TableInternal for RemoteTable<S> {
|
||||
}
|
||||
|
||||
async fn index_stats(&self, index_name: &str) -> Result<Option<IndexStatistics>> {
|
||||
let request = self.client.post(&format!(
|
||||
let mut request = self.client.post(&format!(
|
||||
"/v1/table/{}/index/{}/stats/",
|
||||
self.name, index_name
|
||||
));
|
||||
let version = self.current_version().await;
|
||||
let body = serde_json::json!({ "version": version });
|
||||
request = request.json(&body);
|
||||
|
||||
let (request_id, response) = self.client.send(request, true).await?;
|
||||
|
||||
if response.status() == StatusCode::NOT_FOUND {
|
||||
@@ -701,6 +858,7 @@ mod tests {
|
||||
use arrow::{array::AsArray, compute::concat_batches, datatypes::Int32Type};
|
||||
use arrow_array::{Int32Array, RecordBatch, RecordBatchIterator};
|
||||
use arrow_schema::{DataType, Field, Schema};
|
||||
use chrono::{DateTime, Utc};
|
||||
use futures::{future::BoxFuture, StreamExt, TryFutureExt};
|
||||
use lance_index::scalar::FullTextSearchQuery;
|
||||
use reqwest::Body;
|
||||
@@ -741,7 +899,17 @@ mod tests {
|
||||
Box::pin(table.update().column("a", "a + 1").execute().map_ok(|_| ())),
|
||||
Box::pin(table.add(example_data()).execute().map_ok(|_| ())),
|
||||
Box::pin(table.merge_insert(&["test"]).execute(example_data())),
|
||||
Box::pin(table.delete("false")), // TODO: other endpoints.
|
||||
Box::pin(table.delete("false")),
|
||||
Box::pin(table.add_columns(
|
||||
NewColumnTransform::SqlExpressions(vec![("x".into(), "y".into())]),
|
||||
None,
|
||||
)),
|
||||
Box::pin(async {
|
||||
let alterations = vec![ColumnAlteration::new("x".into()).rename("y".into())];
|
||||
table.alter_columns(&alterations).await
|
||||
}),
|
||||
Box::pin(table.drop_columns(&["a"])),
|
||||
// TODO: other endpoints.
|
||||
];
|
||||
|
||||
for result in results {
|
||||
@@ -805,7 +973,10 @@ mod tests {
|
||||
request.headers().get("Content-Type").unwrap(),
|
||||
JSON_CONTENT_TYPE
|
||||
);
|
||||
assert_eq!(request.body().unwrap().as_bytes().unwrap(), br#"{}"#);
|
||||
assert_eq!(
|
||||
request.body().unwrap().as_bytes().unwrap(),
|
||||
br#"{"version":null}"#
|
||||
);
|
||||
|
||||
http::Response::builder().status(200).body("42").unwrap()
|
||||
});
|
||||
@@ -822,7 +993,7 @@ mod tests {
|
||||
);
|
||||
assert_eq!(
|
||||
request.body().unwrap().as_bytes().unwrap(),
|
||||
br#"{"predicate":"a > 10"}"#
|
||||
br#"{"predicate":"a > 10","version":null}"#
|
||||
);
|
||||
|
||||
http::Response::builder().status(200).body("42").unwrap()
|
||||
@@ -1121,7 +1292,10 @@ mod tests {
|
||||
"prefilter": true,
|
||||
"distance_type": "l2",
|
||||
"nprobes": 20,
|
||||
"k": 10,
|
||||
"ef": Option::<usize>::None,
|
||||
"refine_factor": null,
|
||||
"version": null,
|
||||
});
|
||||
// Pass vector separately to make sure it matches f32 precision.
|
||||
expected_body["vector"] = vec![0.1f32, 0.2, 0.3].into();
|
||||
@@ -1166,7 +1340,9 @@ mod tests {
|
||||
"bypass_vector_index": true,
|
||||
"columns": ["a", "b"],
|
||||
"nprobes": 12,
|
||||
"ef": Option::<usize>::None,
|
||||
"refine_factor": 2,
|
||||
"version": null,
|
||||
});
|
||||
// Pass vector separately to make sure it matches f32 precision.
|
||||
expected_body["vector"] = vec![0.1f32, 0.2, 0.3].into();
|
||||
@@ -1222,6 +1398,7 @@ mod tests {
|
||||
"k": 10,
|
||||
"vector": [],
|
||||
"with_row_id": true,
|
||||
"version": null
|
||||
});
|
||||
assert_eq!(body, expected_body);
|
||||
|
||||
@@ -1407,6 +1584,51 @@ mod tests {
|
||||
assert_eq!(indices, expected);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_list_versions() {
|
||||
let table = Table::new_with_handler("my_table", |request| {
|
||||
assert_eq!(request.method(), "POST");
|
||||
assert_eq!(request.url().path(), "/v1/table/my_table/version/list/");
|
||||
|
||||
let version1 = lance::dataset::Version {
|
||||
version: 1,
|
||||
timestamp: "2024-01-01T00:00:00Z".parse().unwrap(),
|
||||
metadata: Default::default(),
|
||||
};
|
||||
let version2 = lance::dataset::Version {
|
||||
version: 2,
|
||||
timestamp: "2024-02-01T00:00:00Z".parse().unwrap(),
|
||||
metadata: Default::default(),
|
||||
};
|
||||
let response_body = serde_json::json!({
|
||||
"versions": [
|
||||
version1,
|
||||
version2,
|
||||
]
|
||||
});
|
||||
let response_body = serde_json::to_string(&response_body).unwrap();
|
||||
|
||||
http::Response::builder()
|
||||
.status(200)
|
||||
.body(response_body)
|
||||
.unwrap()
|
||||
});
|
||||
|
||||
let versions = table.list_versions().await.unwrap();
|
||||
assert_eq!(versions.len(), 2);
|
||||
assert_eq!(versions[0].version, 1);
|
||||
assert_eq!(
|
||||
versions[0].timestamp,
|
||||
"2024-01-01T00:00:00Z".parse::<DateTime<Utc>>().unwrap()
|
||||
);
|
||||
assert_eq!(versions[1].version, 2);
|
||||
assert_eq!(
|
||||
versions[1].timestamp,
|
||||
"2024-02-01T00:00:00Z".parse::<DateTime<Utc>>().unwrap()
|
||||
);
|
||||
// assert_eq!(versions, expected);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_index_stats() {
|
||||
let table = Table::new_with_handler("my_table", |request| {
|
||||
@@ -1451,4 +1673,305 @@ mod tests {
|
||||
let indices = table.index_stats("my_index").await.unwrap();
|
||||
assert!(indices.is_none());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_passes_version() {
|
||||
let table = Table::new_with_handler("my_table", |request| {
|
||||
let body = request.body().unwrap().as_bytes().unwrap();
|
||||
let body: serde_json::Value = serde_json::from_slice(body).unwrap();
|
||||
let version = body
|
||||
.as_object()
|
||||
.unwrap()
|
||||
.get("version")
|
||||
.unwrap()
|
||||
.as_u64()
|
||||
.unwrap();
|
||||
assert_eq!(version, 42);
|
||||
|
||||
let response_body = match request.url().path() {
|
||||
"/v1/table/my_table/describe/" => {
|
||||
serde_json::json!({
|
||||
"version": 42,
|
||||
"schema": { "fields": [] }
|
||||
})
|
||||
}
|
||||
"/v1/table/my_table/index/list/" => {
|
||||
serde_json::json!({
|
||||
"indexes": []
|
||||
})
|
||||
}
|
||||
"/v1/table/my_table/index/my_idx/stats/" => {
|
||||
serde_json::json!({
|
||||
"num_indexed_rows": 100000,
|
||||
"num_unindexed_rows": 0,
|
||||
"index_type": "IVF_PQ",
|
||||
"distance_type": "l2"
|
||||
})
|
||||
}
|
||||
"/v1/table/my_table/count_rows/" => {
|
||||
serde_json::json!(1000)
|
||||
}
|
||||
"/v1/table/my_table/query/" => {
|
||||
let expected_data = RecordBatch::try_new(
|
||||
Arc::new(Schema::new(vec![Field::new("a", DataType::Int32, false)])),
|
||||
vec![Arc::new(Int32Array::from(vec![1, 2, 3]))],
|
||||
)
|
||||
.unwrap();
|
||||
let expected_data_ref = expected_data.clone();
|
||||
let response_body = write_ipc_file(&expected_data_ref);
|
||||
return http::Response::builder()
|
||||
.status(200)
|
||||
.header(CONTENT_TYPE, ARROW_FILE_CONTENT_TYPE)
|
||||
.body(response_body)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
path => panic!("Unexpected path: {}", path),
|
||||
};
|
||||
|
||||
http::Response::builder()
|
||||
.status(200)
|
||||
.body(
|
||||
serde_json::to_string(&response_body)
|
||||
.unwrap()
|
||||
.as_bytes()
|
||||
.to_vec(),
|
||||
)
|
||||
.unwrap()
|
||||
});
|
||||
|
||||
table.checkout(42).await.unwrap();
|
||||
|
||||
// ensure that version is passed to the /describe endpoint
|
||||
let version = table.version().await.unwrap();
|
||||
assert_eq!(version, 42);
|
||||
|
||||
// ensure it's passed to other read API calls
|
||||
table.list_indices().await.unwrap();
|
||||
table.index_stats("my_idx").await.unwrap();
|
||||
table.count_rows(None).await.unwrap();
|
||||
table
|
||||
.query()
|
||||
.nearest_to(vec![0.1, 0.2, 0.3])
|
||||
.unwrap()
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_fails_if_checkout_version_doesnt_exist() {
|
||||
let table = Table::new_with_handler("my_table", |request| {
|
||||
let body = request.body().unwrap().as_bytes().unwrap();
|
||||
let body: serde_json::Value = serde_json::from_slice(body).unwrap();
|
||||
let version = body
|
||||
.as_object()
|
||||
.unwrap()
|
||||
.get("version")
|
||||
.unwrap()
|
||||
.as_u64()
|
||||
.unwrap();
|
||||
if version != 42 {
|
||||
return http::Response::builder()
|
||||
.status(404)
|
||||
.body(format!("Table my_table (version: {}) not found", version))
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
let response_body = match request.url().path() {
|
||||
"/v1/table/my_table/describe/" => {
|
||||
serde_json::json!({
|
||||
"version": 42,
|
||||
"schema": { "fields": [] }
|
||||
})
|
||||
}
|
||||
_ => panic!("Unexpected path"),
|
||||
};
|
||||
|
||||
http::Response::builder()
|
||||
.status(200)
|
||||
.body(serde_json::to_string(&response_body).unwrap())
|
||||
.unwrap()
|
||||
});
|
||||
|
||||
let res = table.checkout(43).await;
|
||||
println!("{:?}", res);
|
||||
assert!(
|
||||
matches!(res, Err(Error::TableNotFound { name }) if name == "my_table (version: 43)")
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_timetravel_immutable() {
|
||||
let table = Table::new_with_handler::<String>("my_table", |request| {
|
||||
let response_body = match request.url().path() {
|
||||
"/v1/table/my_table/describe/" => {
|
||||
serde_json::json!({
|
||||
"version": 42,
|
||||
"schema": { "fields": [] }
|
||||
})
|
||||
}
|
||||
_ => panic!("Should not have made a request: {:?}", request),
|
||||
};
|
||||
|
||||
http::Response::builder()
|
||||
.status(200)
|
||||
.body(serde_json::to_string(&response_body).unwrap())
|
||||
.unwrap()
|
||||
});
|
||||
|
||||
table.checkout(42).await.unwrap();
|
||||
|
||||
// Ensure that all mutable operations fail.
|
||||
let res = table
|
||||
.update()
|
||||
.column("a", "a + 1")
|
||||
.column("b", "b - 1")
|
||||
.only_if("b > 10")
|
||||
.execute()
|
||||
.await;
|
||||
assert!(matches!(res, Err(Error::NotSupported { .. })));
|
||||
|
||||
let batch = RecordBatch::try_new(
|
||||
Arc::new(Schema::new(vec![Field::new("a", DataType::Int32, false)])),
|
||||
vec![Arc::new(Int32Array::from(vec![1, 2, 3]))],
|
||||
)
|
||||
.unwrap();
|
||||
let data = Box::new(RecordBatchIterator::new(
|
||||
[Ok(batch.clone())],
|
||||
batch.schema(),
|
||||
));
|
||||
let res = table.merge_insert(&["some_col"]).execute(data).await;
|
||||
assert!(matches!(res, Err(Error::NotSupported { .. })));
|
||||
|
||||
let res = table.delete("id in (1, 2, 3)").await;
|
||||
assert!(matches!(res, Err(Error::NotSupported { .. })));
|
||||
|
||||
let data = RecordBatch::try_new(
|
||||
Arc::new(Schema::new(vec![Field::new("a", DataType::Int32, false)])),
|
||||
vec![Arc::new(Int32Array::from(vec![1, 2, 3]))],
|
||||
)
|
||||
.unwrap();
|
||||
let res = table
|
||||
.add(RecordBatchIterator::new([Ok(data.clone())], data.schema()))
|
||||
.execute()
|
||||
.await;
|
||||
assert!(matches!(res, Err(Error::NotSupported { .. })));
|
||||
|
||||
let res = table
|
||||
.create_index(&["a"], Index::IvfPq(Default::default()))
|
||||
.execute()
|
||||
.await;
|
||||
assert!(matches!(res, Err(Error::NotSupported { .. })));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_add_columns() {
|
||||
let table = Table::new_with_handler("my_table", |request| {
|
||||
assert_eq!(request.method(), "POST");
|
||||
assert_eq!(request.url().path(), "/v1/table/my_table/add_columns/");
|
||||
assert_eq!(
|
||||
request.headers().get("Content-Type").unwrap(),
|
||||
JSON_CONTENT_TYPE
|
||||
);
|
||||
|
||||
let body = request.body().unwrap().as_bytes().unwrap();
|
||||
let body = std::str::from_utf8(body).unwrap();
|
||||
let value: serde_json::Value = serde_json::from_str(body).unwrap();
|
||||
let new_columns = value.get("new_columns").unwrap().as_array().unwrap();
|
||||
assert!(new_columns.len() == 2);
|
||||
|
||||
let col_name = new_columns[0]["name"].as_str().unwrap();
|
||||
let expression = new_columns[0]["expression"].as_str().unwrap();
|
||||
assert_eq!(col_name, "b");
|
||||
assert_eq!(expression, "a + 1");
|
||||
|
||||
let col_name = new_columns[1]["name"].as_str().unwrap();
|
||||
let expression = new_columns[1]["expression"].as_str().unwrap();
|
||||
assert_eq!(col_name, "x");
|
||||
assert_eq!(expression, "cast(NULL as int32)");
|
||||
|
||||
http::Response::builder().status(200).body("{}").unwrap()
|
||||
});
|
||||
|
||||
table
|
||||
.add_columns(
|
||||
NewColumnTransform::SqlExpressions(vec![
|
||||
("b".into(), "a + 1".into()),
|
||||
("x".into(), "cast(NULL as int32)".into()),
|
||||
]),
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_alter_columns() {
|
||||
let table = Table::new_with_handler("my_table", |request| {
|
||||
assert_eq!(request.method(), "POST");
|
||||
assert_eq!(request.url().path(), "/v1/table/my_table/alter_columns/");
|
||||
assert_eq!(
|
||||
request.headers().get("Content-Type").unwrap(),
|
||||
JSON_CONTENT_TYPE
|
||||
);
|
||||
|
||||
let body = request.body().unwrap().as_bytes().unwrap();
|
||||
let body = std::str::from_utf8(body).unwrap();
|
||||
let value: serde_json::Value = serde_json::from_str(body).unwrap();
|
||||
let alterations = value.get("alterations").unwrap().as_array().unwrap();
|
||||
assert!(alterations.len() == 2);
|
||||
|
||||
let path = alterations[0]["path"].as_str().unwrap();
|
||||
let data_type = alterations[0]["data_type"]["type"].as_str().unwrap();
|
||||
assert_eq!(path, "b.c");
|
||||
assert_eq!(data_type, "int32");
|
||||
|
||||
let path = alterations[1]["path"].as_str().unwrap();
|
||||
let nullable = alterations[1]["nullable"].as_bool().unwrap();
|
||||
let rename = alterations[1]["rename"].as_str().unwrap();
|
||||
assert_eq!(path, "x");
|
||||
assert!(nullable);
|
||||
assert_eq!(rename, "y");
|
||||
|
||||
http::Response::builder().status(200).body("{}").unwrap()
|
||||
});
|
||||
|
||||
table
|
||||
.alter_columns(&[
|
||||
ColumnAlteration::new("b.c".into()).cast_to(DataType::Int32),
|
||||
ColumnAlteration::new("x".into())
|
||||
.rename("y".into())
|
||||
.set_nullable(true),
|
||||
])
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_drop_columns() {
|
||||
let table = Table::new_with_handler("my_table", |request| {
|
||||
assert_eq!(request.method(), "POST");
|
||||
assert_eq!(request.url().path(), "/v1/table/my_table/drop_columns/");
|
||||
assert_eq!(
|
||||
request.headers().get("Content-Type").unwrap(),
|
||||
JSON_CONTENT_TYPE
|
||||
);
|
||||
|
||||
let body = request.body().unwrap().as_bytes().unwrap();
|
||||
let body = std::str::from_utf8(body).unwrap();
|
||||
let value: serde_json::Value = serde_json::from_str(body).unwrap();
|
||||
let columns = value.get("columns").unwrap().as_array().unwrap();
|
||||
assert!(columns.len() == 2);
|
||||
|
||||
let col1 = columns[0].as_str().unwrap();
|
||||
let col2 = columns[1].as_str().unwrap();
|
||||
assert_eq!(col1, "a");
|
||||
assert_eq!(col2, "b");
|
||||
|
||||
http::Response::builder().status(200).body("{}").unwrap()
|
||||
});
|
||||
|
||||
table.drop_columns(&["a", "b"]).await.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,7 +14,6 @@
|
||||
|
||||
//! LanceDB Table APIs
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -37,7 +36,8 @@ pub use lance::dataset::ColumnAlteration;
|
||||
pub use lance::dataset::NewColumnTransform;
|
||||
pub use lance::dataset::ReadParams;
|
||||
use lance::dataset::{
|
||||
Dataset, UpdateBuilder as LanceUpdateBuilder, WhenMatched, WriteMode, WriteParams,
|
||||
Dataset, InsertBuilder, UpdateBuilder as LanceUpdateBuilder, Version, WhenMatched, WriteMode,
|
||||
WriteParams,
|
||||
};
|
||||
use lance::dataset::{MergeInsertBuilder as LanceMergeInsertBuilder, WhenNotMatchedBySource};
|
||||
use lance::io::WrappingObjectStore;
|
||||
@@ -426,6 +426,7 @@ pub(crate) trait TableInternal: std::fmt::Display + std::fmt::Debug + Send + Syn
|
||||
async fn checkout(&self, version: u64) -> Result<()>;
|
||||
async fn checkout_latest(&self) -> Result<()>;
|
||||
async fn restore(&self) -> Result<()>;
|
||||
async fn list_versions(&self) -> Result<Vec<Version>>;
|
||||
async fn table_definition(&self) -> Result<TableDefinition>;
|
||||
fn dataset_uri(&self) -> &str;
|
||||
}
|
||||
@@ -955,6 +956,11 @@ impl Table {
|
||||
self.inner.restore().await
|
||||
}
|
||||
|
||||
/// List all the versions of the table
|
||||
pub async fn list_versions(&self) -> Result<Vec<Version>> {
|
||||
self.inner.list_versions().await
|
||||
}
|
||||
|
||||
/// List all indices that have been created with [`Self::create_index`]
|
||||
pub async fn list_indices(&self) -> Result<Vec<IndexConfig>> {
|
||||
self.inner.list_indices().await
|
||||
@@ -1040,12 +1046,6 @@ pub struct NativeTable {
|
||||
name: String,
|
||||
uri: String,
|
||||
pub(crate) dataset: dataset::DatasetConsistencyWrapper,
|
||||
|
||||
// the object store wrapper to use on write path
|
||||
store_wrapper: Option<Arc<dyn WrappingObjectStore>>,
|
||||
|
||||
storage_options: HashMap<String, String>,
|
||||
|
||||
// This comes from the connection options. We store here so we can pass down
|
||||
// to the dataset when we recreate it (for example, in checkout_latest).
|
||||
read_consistency_interval: Option<std::time::Duration>,
|
||||
@@ -1111,13 +1111,6 @@ impl NativeTable {
|
||||
None => params,
|
||||
};
|
||||
|
||||
let storage_options = params
|
||||
.store_options
|
||||
.clone()
|
||||
.unwrap_or_default()
|
||||
.storage_options
|
||||
.unwrap_or_default();
|
||||
|
||||
let dataset = DatasetBuilder::from_uri(uri)
|
||||
.with_read_params(params)
|
||||
.load()
|
||||
@@ -1135,8 +1128,6 @@ impl NativeTable {
|
||||
name: name.to_string(),
|
||||
uri: uri.to_string(),
|
||||
dataset,
|
||||
store_wrapper: write_store_wrapper,
|
||||
storage_options,
|
||||
read_consistency_interval,
|
||||
})
|
||||
}
|
||||
@@ -1185,12 +1176,6 @@ impl NativeTable {
|
||||
Some(wrapper) => params.patch_with_store_wrapper(wrapper)?,
|
||||
None => params,
|
||||
};
|
||||
let storage_options = params
|
||||
.store_params
|
||||
.clone()
|
||||
.unwrap_or_default()
|
||||
.storage_options
|
||||
.unwrap_or_default();
|
||||
|
||||
let dataset = Dataset::write(batches, uri, Some(params))
|
||||
.await
|
||||
@@ -1204,8 +1189,6 @@ impl NativeTable {
|
||||
name: name.to_string(),
|
||||
uri: uri.to_string(),
|
||||
dataset: DatasetConsistencyWrapper::new_latest(dataset, read_consistency_interval),
|
||||
store_wrapper: write_store_wrapper,
|
||||
storage_options,
|
||||
read_consistency_interval,
|
||||
})
|
||||
}
|
||||
@@ -1319,7 +1302,7 @@ impl NativeTable {
|
||||
let (indices, mf) = futures::try_join!(dataset.load_indices(), dataset.latest_manifest())?;
|
||||
Ok(indices
|
||||
.iter()
|
||||
.map(|i| VectorIndex::new_from_format(&mf, i))
|
||||
.map(|i| VectorIndex::new_from_format(&(mf.0), i))
|
||||
.collect())
|
||||
}
|
||||
|
||||
@@ -1707,6 +1690,10 @@ impl TableInternal for NativeTable {
|
||||
self.dataset.reload().await
|
||||
}
|
||||
|
||||
async fn list_versions(&self) -> Result<Vec<Version>> {
|
||||
Ok(self.dataset.get().await?.versions().await?)
|
||||
}
|
||||
|
||||
async fn restore(&self) -> Result<()> {
|
||||
let version =
|
||||
self.dataset
|
||||
@@ -1748,10 +1735,13 @@ impl TableInternal for NativeTable {
|
||||
add: AddDataBuilder<NoData>,
|
||||
data: Box<dyn RecordBatchReader + Send>,
|
||||
) -> Result<()> {
|
||||
let data =
|
||||
MaybeEmbedded::try_new(data, self.table_definition().await?, add.embedding_registry)?;
|
||||
let data = Box::new(MaybeEmbedded::try_new(
|
||||
data,
|
||||
self.table_definition().await?,
|
||||
add.embedding_registry,
|
||||
)?) as Box<dyn RecordBatchReader + Send>;
|
||||
|
||||
let mut lance_params = add.write_options.lance_write_params.unwrap_or(WriteParams {
|
||||
let lance_params = add.write_options.lance_write_params.unwrap_or(WriteParams {
|
||||
mode: match add.mode {
|
||||
AddDataMode::Append => WriteMode::Append,
|
||||
AddDataMode::Overwrite => WriteMode::Overwrite,
|
||||
@@ -1759,27 +1749,15 @@ impl TableInternal for NativeTable {
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
// Bring storage options from table
|
||||
let storage_options = lance_params
|
||||
.store_params
|
||||
.get_or_insert(Default::default())
|
||||
.storage_options
|
||||
.get_or_insert(Default::default());
|
||||
for (key, value) in self.storage_options.iter() {
|
||||
if !storage_options.contains_key(key) {
|
||||
storage_options.insert(key.clone(), value.clone());
|
||||
}
|
||||
}
|
||||
|
||||
// patch the params if we have a write store wrapper
|
||||
let lance_params = match self.store_wrapper.clone() {
|
||||
Some(wrapper) => lance_params.patch_with_store_wrapper(wrapper)?,
|
||||
None => lance_params,
|
||||
let dataset = {
|
||||
// Limited scope for the mutable borrow of self.dataset avoids deadlock.
|
||||
let ds = self.dataset.get_mut().await?;
|
||||
InsertBuilder::new(Arc::new(ds.clone()))
|
||||
.with_params(&lance_params)
|
||||
.execute_stream(data)
|
||||
.await?
|
||||
};
|
||||
|
||||
self.dataset.ensure_mutable().await?;
|
||||
let dataset = Dataset::write(data, &self.uri, Some(lance_params)).await?;
|
||||
|
||||
self.dataset.set_latest(dataset).await;
|
||||
Ok(())
|
||||
}
|
||||
@@ -1904,6 +1882,9 @@ impl TableInternal for NativeTable {
|
||||
query.base.offset.map(|offset| offset as i64),
|
||||
)?;
|
||||
scanner.nprobs(query.nprobes);
|
||||
if let Some(ef) = query.ef {
|
||||
scanner.ef(ef);
|
||||
}
|
||||
scanner.use_index(query.use_index);
|
||||
scanner.prefilter(query.base.prefilter);
|
||||
match query.base.select {
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow_schema::{DataType, Schema};
|
||||
use lance::arrow::json::JsonDataType;
|
||||
use lance::dataset::{ReadParams, WriteParams};
|
||||
use lance::io::{ObjectStoreParams, WrappingObjectStore};
|
||||
use lazy_static::lazy_static;
|
||||
@@ -175,6 +176,15 @@ pub fn supported_vector_data_type(dtype: &DataType) -> bool {
|
||||
}
|
||||
}
|
||||
|
||||
/// Note: this is temporary until we get a proper datatype conversion in Lance.
|
||||
pub fn string_to_datatype(s: &str) -> Option<DataType> {
|
||||
let data_type = serde_json::Value::String(s.to_string());
|
||||
let json_type =
|
||||
serde_json::Value::Object([("type".to_string(), data_type)].iter().cloned().collect());
|
||||
let json_type: JsonDataType = serde_json::from_value(json_type).ok()?;
|
||||
(&json_type).try_into().ok()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
@@ -239,4 +249,11 @@ mod tests {
|
||||
assert!(validate_table_name("my@table").is_err());
|
||||
assert!(validate_table_name("name with space").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_string_to_datatype() {
|
||||
let string = "int32";
|
||||
let expected = DataType::Int32;
|
||||
assert_eq!(string_to_datatype(string), Some(expected));
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user