mirror of
https://github.com/lancedb/lancedb.git
synced 2025-12-23 13:29:57 +00:00
Compare commits
106 Commits
python-v0.
...
python-v0.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
995bd9bf37 | ||
|
|
36cc06697f | ||
|
|
35da464591 | ||
|
|
31f9c30ffb | ||
|
|
92dcf24b0c | ||
|
|
6b0adba2d9 | ||
|
|
66cbf6b6c5 | ||
|
|
ce9506db71 | ||
|
|
b66cd943a7 | ||
|
|
d8d11f48e7 | ||
|
|
7ec5df3022 | ||
|
|
b17304172c | ||
|
|
fbe5408434 | ||
|
|
3f3f845c5a | ||
|
|
fbffe532a8 | ||
|
|
55ffc96e56 | ||
|
|
998c5f3f74 | ||
|
|
6eacae18c4 | ||
|
|
d3ea75cc2b | ||
|
|
f4afe456e8 | ||
|
|
ea5c2266b8 | ||
|
|
c557e77f09 | ||
|
|
3c0a64be8f | ||
|
|
0e496ed3b5 | ||
|
|
17c9e9afea | ||
|
|
0b45ef93c0 | ||
|
|
b474f98049 | ||
|
|
2c05ffed52 | ||
|
|
8b31540b21 | ||
|
|
ba844318f8 | ||
|
|
f007b76153 | ||
|
|
5d8d258f59 | ||
|
|
4172140f74 | ||
|
|
a27c5cf12b | ||
|
|
f4dea72cc5 | ||
|
|
f76c4a5ce1 | ||
|
|
164ce397c2 | ||
|
|
445a312667 | ||
|
|
92d845fa72 | ||
|
|
397813f6a4 | ||
|
|
50c30c5d34 | ||
|
|
c9f248b058 | ||
|
|
0cb6da6b7e | ||
|
|
aec8332eb5 | ||
|
|
46061070e6 | ||
|
|
dae8334d0b | ||
|
|
8c81968b59 | ||
|
|
16cf2990f3 | ||
|
|
0a0f667bbd | ||
|
|
03753fd84b | ||
|
|
55cceaa309 | ||
|
|
c3797eb834 | ||
|
|
c0d0f38494 | ||
|
|
6a8ab78d0a | ||
|
|
27404c8623 | ||
|
|
f181c7e77f | ||
|
|
e70fd4fecc | ||
|
|
ac0068b80e | ||
|
|
ebac960571 | ||
|
|
59b57055e7 | ||
|
|
591c8de8fc | ||
|
|
f835ff310f | ||
|
|
cf8c2edaf4 | ||
|
|
61a714a459 | ||
|
|
5ddd84cec0 | ||
|
|
27ef0bb0a2 | ||
|
|
25402ba6ec | ||
|
|
37c359ed40 | ||
|
|
06cdf00987 | ||
|
|
144b7f5d54 | ||
|
|
edc9b9adec | ||
|
|
d11b2a6975 | ||
|
|
980aa70e2d | ||
|
|
d83e5a0208 | ||
|
|
16a6b9ce8f | ||
|
|
e3c6213333 | ||
|
|
00552439d9 | ||
|
|
c0ee370f83 | ||
|
|
17e4022045 | ||
|
|
c3ebac1a92 | ||
|
|
10f919a0a9 | ||
|
|
8af5476395 | ||
|
|
bcbbeb7a00 | ||
|
|
d6c0f75078 | ||
|
|
e820e356a0 | ||
|
|
509286492f | ||
|
|
f9789ec962 | ||
|
|
347515aa51 | ||
|
|
3324e7d525 | ||
|
|
ab5316b4fa | ||
|
|
db125013fc | ||
|
|
a43193c99b | ||
|
|
b70513ca72 | ||
|
|
78165801c6 | ||
|
|
6e5927ce6d | ||
|
|
6c1f32ac11 | ||
|
|
4fdf084777 | ||
|
|
1fad24fcd8 | ||
|
|
6ef20b85ca | ||
|
|
35bacdd57e | ||
|
|
a5ebe5a6c4 | ||
|
|
bf03ad1b4a | ||
|
|
2a9e3e2084 | ||
|
|
f298f15360 | ||
|
|
679b031b99 | ||
|
|
f50b5d532b |
@@ -1,5 +1,5 @@
|
|||||||
[tool.bumpversion]
|
[tool.bumpversion]
|
||||||
current_version = "0.14.0-beta.2"
|
current_version = "0.15.0-beta.0"
|
||||||
parse = """(?x)
|
parse = """(?x)
|
||||||
(?P<major>0|[1-9]\\d*)\\.
|
(?P<major>0|[1-9]\\d*)\\.
|
||||||
(?P<minor>0|[1-9]\\d*)\\.
|
(?P<minor>0|[1-9]\\d*)\\.
|
||||||
|
|||||||
2
.github/workflows/build_mac_wheel/action.yml
vendored
2
.github/workflows/build_mac_wheel/action.yml
vendored
@@ -20,7 +20,7 @@ runs:
|
|||||||
uses: PyO3/maturin-action@v1
|
uses: PyO3/maturin-action@v1
|
||||||
with:
|
with:
|
||||||
command: build
|
command: build
|
||||||
|
# TODO: pass through interpreter
|
||||||
args: ${{ inputs.args }}
|
args: ${{ inputs.args }}
|
||||||
docker-options: "-e PIP_EXTRA_INDEX_URL=https://pypi.fury.io/lancedb/"
|
docker-options: "-e PIP_EXTRA_INDEX_URL=https://pypi.fury.io/lancedb/"
|
||||||
working-directory: python
|
working-directory: python
|
||||||
interpreter: 3.${{ inputs.python-minor-version }}
|
|
||||||
|
|||||||
@@ -28,7 +28,7 @@ runs:
|
|||||||
args: ${{ inputs.args }}
|
args: ${{ inputs.args }}
|
||||||
docker-options: "-e PIP_EXTRA_INDEX_URL=https://pypi.fury.io/lancedb/"
|
docker-options: "-e PIP_EXTRA_INDEX_URL=https://pypi.fury.io/lancedb/"
|
||||||
working-directory: python
|
working-directory: python
|
||||||
- uses: actions/upload-artifact@v3
|
- uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: windows-wheels
|
name: windows-wheels
|
||||||
path: python\target\wheels
|
path: python\target\wheels
|
||||||
|
|||||||
13
.github/workflows/make-release-commit.yml
vendored
13
.github/workflows/make-release-commit.yml
vendored
@@ -43,7 +43,7 @@ on:
|
|||||||
jobs:
|
jobs:
|
||||||
make-release:
|
make-release:
|
||||||
# Creates tag and GH release. The GH release will trigger the build and release jobs.
|
# Creates tag and GH release. The GH release will trigger the build and release jobs.
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-24.04
|
||||||
permissions:
|
permissions:
|
||||||
contents: write
|
contents: write
|
||||||
steps:
|
steps:
|
||||||
@@ -57,15 +57,14 @@ jobs:
|
|||||||
# trigger any workflows watching for new tags. See:
|
# trigger any workflows watching for new tags. See:
|
||||||
# https://docs.github.com/en/actions/using-workflows/triggering-a-workflow#triggering-a-workflow-from-a-workflow
|
# https://docs.github.com/en/actions/using-workflows/triggering-a-workflow#triggering-a-workflow-from-a-workflow
|
||||||
token: ${{ secrets.LANCEDB_RELEASE_TOKEN }}
|
token: ${{ secrets.LANCEDB_RELEASE_TOKEN }}
|
||||||
|
- name: Validate Lance dependency is at stable version
|
||||||
|
if: ${{ inputs.type == 'stable' }}
|
||||||
|
run: python ci/validate_stable_lance.py
|
||||||
- name: Set git configs for bumpversion
|
- name: Set git configs for bumpversion
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
git config user.name 'Lance Release'
|
git config user.name 'Lance Release'
|
||||||
git config user.email 'lance-dev@lancedb.com'
|
git config user.email 'lance-dev@lancedb.com'
|
||||||
- name: Set up Python 3.11
|
|
||||||
uses: actions/setup-python@v5
|
|
||||||
with:
|
|
||||||
python-version: "3.11"
|
|
||||||
- name: Bump Python version
|
- name: Bump Python version
|
||||||
if: ${{ inputs.python }}
|
if: ${{ inputs.python }}
|
||||||
working-directory: python
|
working-directory: python
|
||||||
@@ -97,3 +96,7 @@ jobs:
|
|||||||
if: ${{ !inputs.dry_run && inputs.other }}
|
if: ${{ !inputs.dry_run && inputs.other }}
|
||||||
with:
|
with:
|
||||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
- uses: ./.github/workflows/update_package_lock_nodejs
|
||||||
|
if: ${{ !inputs.dry_run && inputs.other }}
|
||||||
|
with:
|
||||||
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|||||||
24
.github/workflows/npm-publish.yml
vendored
24
.github/workflows/npm-publish.yml
vendored
@@ -159,7 +159,7 @@ jobs:
|
|||||||
- name: Install common dependencies
|
- name: Install common dependencies
|
||||||
run: |
|
run: |
|
||||||
apk add protobuf-dev curl clang mold grep npm bash
|
apk add protobuf-dev curl clang mold grep npm bash
|
||||||
curl --proto '=https' --tlsv1.3 -sSf https://raw.githubusercontent.com/rust-lang/rustup/refs/heads/master/rustup-init.sh | sh -s -- -y --default-toolchain 1.80.0
|
curl --proto '=https' --tlsv1.3 -sSf https://raw.githubusercontent.com/rust-lang/rustup/refs/heads/master/rustup-init.sh | sh -s -- -y
|
||||||
echo "source $HOME/.cargo/env" >> saved_env
|
echo "source $HOME/.cargo/env" >> saved_env
|
||||||
echo "export CC=clang" >> saved_env
|
echo "export CC=clang" >> saved_env
|
||||||
echo "export RUSTFLAGS='-Ctarget-cpu=haswell -Ctarget-feature=-crt-static,+avx2,+fma,+f16c -Clinker=clang -Clink-arg=-fuse-ld=mold'" >> saved_env
|
echo "export RUSTFLAGS='-Ctarget-cpu=haswell -Ctarget-feature=-crt-static,+avx2,+fma,+f16c -Clinker=clang -Clink-arg=-fuse-ld=mold'" >> saved_env
|
||||||
@@ -167,7 +167,7 @@ jobs:
|
|||||||
if: ${{ matrix.config.arch == 'aarch64' }}
|
if: ${{ matrix.config.arch == 'aarch64' }}
|
||||||
run: |
|
run: |
|
||||||
source "$HOME/.cargo/env"
|
source "$HOME/.cargo/env"
|
||||||
rustup target add aarch64-unknown-linux-musl --toolchain 1.80.0
|
rustup target add aarch64-unknown-linux-musl
|
||||||
crt=$(realpath $(dirname $(rustup which rustc))/../lib/rustlib/aarch64-unknown-linux-musl/lib/self-contained)
|
crt=$(realpath $(dirname $(rustup which rustc))/../lib/rustlib/aarch64-unknown-linux-musl/lib/self-contained)
|
||||||
sysroot_lib=/usr/aarch64-unknown-linux-musl/usr/lib
|
sysroot_lib=/usr/aarch64-unknown-linux-musl/usr/lib
|
||||||
apk_url=https://dl-cdn.alpinelinux.org/alpine/latest-stable/main/aarch64/
|
apk_url=https://dl-cdn.alpinelinux.org/alpine/latest-stable/main/aarch64/
|
||||||
@@ -262,7 +262,7 @@ jobs:
|
|||||||
- name: Install common dependencies
|
- name: Install common dependencies
|
||||||
run: |
|
run: |
|
||||||
apk add protobuf-dev curl clang mold grep npm bash openssl-dev openssl-libs-static
|
apk add protobuf-dev curl clang mold grep npm bash openssl-dev openssl-libs-static
|
||||||
curl --proto '=https' --tlsv1.3 -sSf https://raw.githubusercontent.com/rust-lang/rustup/refs/heads/master/rustup-init.sh | sh -s -- -y --default-toolchain 1.80.0
|
curl --proto '=https' --tlsv1.3 -sSf https://raw.githubusercontent.com/rust-lang/rustup/refs/heads/master/rustup-init.sh | sh -s -- -y
|
||||||
echo "source $HOME/.cargo/env" >> saved_env
|
echo "source $HOME/.cargo/env" >> saved_env
|
||||||
echo "export CC=clang" >> saved_env
|
echo "export CC=clang" >> saved_env
|
||||||
echo "export RUSTFLAGS='-Ctarget-cpu=haswell -Ctarget-feature=-crt-static,+avx2,+fma,+f16c -Clinker=clang -Clink-arg=-fuse-ld=mold'" >> saved_env
|
echo "export RUSTFLAGS='-Ctarget-cpu=haswell -Ctarget-feature=-crt-static,+avx2,+fma,+f16c -Clinker=clang -Clink-arg=-fuse-ld=mold'" >> saved_env
|
||||||
@@ -272,7 +272,7 @@ jobs:
|
|||||||
if: ${{ matrix.config.arch == 'aarch64' }}
|
if: ${{ matrix.config.arch == 'aarch64' }}
|
||||||
run: |
|
run: |
|
||||||
source "$HOME/.cargo/env"
|
source "$HOME/.cargo/env"
|
||||||
rustup target add aarch64-unknown-linux-musl --toolchain 1.80.0
|
rustup target add aarch64-unknown-linux-musl
|
||||||
crt=$(realpath $(dirname $(rustup which rustc))/../lib/rustlib/aarch64-unknown-linux-musl/lib/self-contained)
|
crt=$(realpath $(dirname $(rustup which rustc))/../lib/rustlib/aarch64-unknown-linux-musl/lib/self-contained)
|
||||||
sysroot_lib=/usr/aarch64-unknown-linux-musl/usr/lib
|
sysroot_lib=/usr/aarch64-unknown-linux-musl/usr/lib
|
||||||
apk_url=https://dl-cdn.alpinelinux.org/alpine/latest-stable/main/aarch64/
|
apk_url=https://dl-cdn.alpinelinux.org/alpine/latest-stable/main/aarch64/
|
||||||
@@ -336,7 +336,7 @@ jobs:
|
|||||||
|
|
||||||
node-windows-arm64:
|
node-windows-arm64:
|
||||||
name: vectordb ${{ matrix.config.arch }}-pc-windows-msvc
|
name: vectordb ${{ matrix.config.arch }}-pc-windows-msvc
|
||||||
if: startsWith(github.ref, 'refs/tags/v')
|
# if: startsWith(github.ref, 'refs/tags/v')
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
container: alpine:edge
|
container: alpine:edge
|
||||||
strategy:
|
strategy:
|
||||||
@@ -351,12 +351,12 @@ jobs:
|
|||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
apk add protobuf-dev curl clang lld llvm19 grep npm bash msitools sed
|
apk add protobuf-dev curl clang lld llvm19 grep npm bash msitools sed
|
||||||
curl --proto '=https' --tlsv1.3 -sSf https://raw.githubusercontent.com/rust-lang/rustup/refs/heads/master/rustup-init.sh | sh -s -- -y --default-toolchain 1.80.0
|
curl --proto '=https' --tlsv1.3 -sSf https://raw.githubusercontent.com/rust-lang/rustup/refs/heads/master/rustup-init.sh | sh -s -- -y
|
||||||
echo "source $HOME/.cargo/env" >> saved_env
|
echo "source $HOME/.cargo/env" >> saved_env
|
||||||
echo "export CC=clang" >> saved_env
|
echo "export CC=clang" >> saved_env
|
||||||
echo "export AR=llvm-ar" >> saved_env
|
echo "export AR=llvm-ar" >> saved_env
|
||||||
source "$HOME/.cargo/env"
|
source "$HOME/.cargo/env"
|
||||||
rustup target add ${{ matrix.config.arch }}-pc-windows-msvc --toolchain 1.80.0
|
rustup target add ${{ matrix.config.arch }}-pc-windows-msvc
|
||||||
(mkdir -p sysroot && cd sysroot && sh ../ci/sysroot-${{ matrix.config.arch }}-pc-windows-msvc.sh)
|
(mkdir -p sysroot && cd sysroot && sh ../ci/sysroot-${{ matrix.config.arch }}-pc-windows-msvc.sh)
|
||||||
echo "export C_INCLUDE_PATH=/usr/${{ matrix.config.arch }}-pc-windows-msvc/usr/include" >> saved_env
|
echo "export C_INCLUDE_PATH=/usr/${{ matrix.config.arch }}-pc-windows-msvc/usr/include" >> saved_env
|
||||||
echo "export CARGO_BUILD_TARGET=${{ matrix.config.arch }}-pc-windows-msvc" >> saved_env
|
echo "export CARGO_BUILD_TARGET=${{ matrix.config.arch }}-pc-windows-msvc" >> saved_env
|
||||||
@@ -416,7 +416,7 @@ jobs:
|
|||||||
nodejs-windows-arm64:
|
nodejs-windows-arm64:
|
||||||
name: lancedb ${{ matrix.config.arch }}-pc-windows-msvc
|
name: lancedb ${{ matrix.config.arch }}-pc-windows-msvc
|
||||||
# Only runs on tags that matches the make-release action
|
# Only runs on tags that matches the make-release action
|
||||||
if: startsWith(github.ref, 'refs/tags/v')
|
# if: startsWith(github.ref, 'refs/tags/v')
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
container: alpine:edge
|
container: alpine:edge
|
||||||
strategy:
|
strategy:
|
||||||
@@ -431,12 +431,12 @@ jobs:
|
|||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
apk add protobuf-dev curl clang lld llvm19 grep npm bash msitools sed
|
apk add protobuf-dev curl clang lld llvm19 grep npm bash msitools sed
|
||||||
curl --proto '=https' --tlsv1.3 -sSf https://raw.githubusercontent.com/rust-lang/rustup/refs/heads/master/rustup-init.sh | sh -s -- -y --default-toolchain 1.80.0
|
curl --proto '=https' --tlsv1.3 -sSf https://raw.githubusercontent.com/rust-lang/rustup/refs/heads/master/rustup-init.sh | sh -s -- -y
|
||||||
echo "source $HOME/.cargo/env" >> saved_env
|
echo "source $HOME/.cargo/env" >> saved_env
|
||||||
echo "export CC=clang" >> saved_env
|
echo "export CC=clang" >> saved_env
|
||||||
echo "export AR=llvm-ar" >> saved_env
|
echo "export AR=llvm-ar" >> saved_env
|
||||||
source "$HOME/.cargo/env"
|
source "$HOME/.cargo/env"
|
||||||
rustup target add ${{ matrix.config.arch }}-pc-windows-msvc --toolchain 1.80.0
|
rustup target add ${{ matrix.config.arch }}-pc-windows-msvc
|
||||||
(mkdir -p sysroot && cd sysroot && sh ../ci/sysroot-${{ matrix.config.arch }}-pc-windows-msvc.sh)
|
(mkdir -p sysroot && cd sysroot && sh ../ci/sysroot-${{ matrix.config.arch }}-pc-windows-msvc.sh)
|
||||||
echo "export C_INCLUDE_PATH=/usr/${{ matrix.config.arch }}-pc-windows-msvc/usr/include" >> saved_env
|
echo "export C_INCLUDE_PATH=/usr/${{ matrix.config.arch }}-pc-windows-msvc/usr/include" >> saved_env
|
||||||
echo "export CARGO_BUILD_TARGET=${{ matrix.config.arch }}-pc-windows-msvc" >> saved_env
|
echo "export CARGO_BUILD_TARGET=${{ matrix.config.arch }}-pc-windows-msvc" >> saved_env
|
||||||
@@ -571,7 +571,7 @@ jobs:
|
|||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
ref: main
|
ref: main
|
||||||
persist-credentials: false
|
token: ${{ secrets.LANCEDB_RELEASE_TOKEN }}
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
lfs: true
|
lfs: true
|
||||||
- uses: ./.github/workflows/update_package_lock
|
- uses: ./.github/workflows/update_package_lock
|
||||||
@@ -589,7 +589,7 @@ jobs:
|
|||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
ref: main
|
ref: main
|
||||||
persist-credentials: false
|
token: ${{ secrets.LANCEDB_RELEASE_TOKEN }}
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
lfs: true
|
lfs: true
|
||||||
- uses: ./.github/workflows/update_package_lock_nodejs
|
- uses: ./.github/workflows/update_package_lock_nodejs
|
||||||
|
|||||||
4
.github/workflows/python.yml
vendored
4
.github/workflows/python.yml
vendored
@@ -30,10 +30,10 @@ jobs:
|
|||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
uses: actions/setup-python@v5
|
uses: actions/setup-python@v5
|
||||||
with:
|
with:
|
||||||
python-version: "3.11"
|
python-version: "3.12"
|
||||||
- name: Install ruff
|
- name: Install ruff
|
||||||
run: |
|
run: |
|
||||||
pip install ruff==0.5.4
|
pip install ruff==0.8.4
|
||||||
- name: Format check
|
- name: Format check
|
||||||
run: ruff format --check .
|
run: ruff format --check .
|
||||||
- name: Lint
|
- name: Lint
|
||||||
|
|||||||
38
.github/workflows/rust.yml
vendored
38
.github/workflows/rust.yml
vendored
@@ -238,3 +238,41 @@ jobs:
|
|||||||
$env:VCPKG_ROOT = $env:VCPKG_INSTALLATION_ROOT
|
$env:VCPKG_ROOT = $env:VCPKG_INSTALLATION_ROOT
|
||||||
cargo build --target aarch64-pc-windows-msvc
|
cargo build --target aarch64-pc-windows-msvc
|
||||||
cargo test --target aarch64-pc-windows-msvc
|
cargo test --target aarch64-pc-windows-msvc
|
||||||
|
|
||||||
|
msrv:
|
||||||
|
# Check the minimum supported Rust version
|
||||||
|
name: MSRV Check - Rust v${{ matrix.msrv }}
|
||||||
|
runs-on: ubuntu-24.04
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
msrv: ["1.78.0"] # This should match up with rust-version in Cargo.toml
|
||||||
|
env:
|
||||||
|
# Need up-to-date compilers for kernels
|
||||||
|
CC: clang-18
|
||||||
|
CXX: clang++-18
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
submodules: true
|
||||||
|
- name: Install dependencies
|
||||||
|
run: |
|
||||||
|
sudo apt update
|
||||||
|
sudo apt install -y protobuf-compiler libssl-dev
|
||||||
|
- name: Install ${{ matrix.msrv }}
|
||||||
|
uses: dtolnay/rust-toolchain@master
|
||||||
|
with:
|
||||||
|
toolchain: ${{ matrix.msrv }}
|
||||||
|
- name: Downgrade dependencies
|
||||||
|
# These packages have newer requirements for MSRV
|
||||||
|
run: |
|
||||||
|
cargo update -p aws-sdk-bedrockruntime --precise 1.64.0
|
||||||
|
cargo update -p aws-sdk-dynamodb --precise 1.55.0
|
||||||
|
cargo update -p aws-config --precise 1.5.10
|
||||||
|
cargo update -p aws-sdk-kms --precise 1.51.0
|
||||||
|
cargo update -p aws-sdk-s3 --precise 1.65.0
|
||||||
|
cargo update -p aws-sdk-sso --precise 1.50.0
|
||||||
|
cargo update -p aws-sdk-ssooidc --precise 1.51.0
|
||||||
|
cargo update -p aws-sdk-sts --precise 1.51.0
|
||||||
|
cargo update -p home --precise 0.5.9
|
||||||
|
- name: cargo +${{ matrix.msrv }} check
|
||||||
|
run: cargo check --workspace --tests --benches --all-features
|
||||||
|
|||||||
4
.github/workflows/upload_wheel/action.yml
vendored
4
.github/workflows/upload_wheel/action.yml
vendored
@@ -22,7 +22,7 @@ runs:
|
|||||||
shell: bash
|
shell: bash
|
||||||
id: choose_repo
|
id: choose_repo
|
||||||
run: |
|
run: |
|
||||||
if [ ${{ github.ref }} == "*beta*" ]; then
|
if [[ ${{ github.ref }} == *beta* ]]; then
|
||||||
echo "repo=fury" >> $GITHUB_OUTPUT
|
echo "repo=fury" >> $GITHUB_OUTPUT
|
||||||
else
|
else
|
||||||
echo "repo=pypi" >> $GITHUB_OUTPUT
|
echo "repo=pypi" >> $GITHUB_OUTPUT
|
||||||
@@ -33,7 +33,7 @@ runs:
|
|||||||
FURY_TOKEN: ${{ inputs.fury_token }}
|
FURY_TOKEN: ${{ inputs.fury_token }}
|
||||||
PYPI_TOKEN: ${{ inputs.pypi_token }}
|
PYPI_TOKEN: ${{ inputs.pypi_token }}
|
||||||
run: |
|
run: |
|
||||||
if [ ${{ steps.choose_repo.outputs.repo }} == "fury" ]; then
|
if [[ ${{ steps.choose_repo.outputs.repo }} == fury ]]; then
|
||||||
WHEEL=$(ls target/wheels/lancedb-*.whl 2> /dev/null | head -n 1)
|
WHEEL=$(ls target/wheels/lancedb-*.whl 2> /dev/null | head -n 1)
|
||||||
echo "Uploading $WHEEL to Fury"
|
echo "Uploading $WHEEL to Fury"
|
||||||
curl -f -F package=@$WHEEL https://$FURY_TOKEN@push.fury.io/lancedb/
|
curl -f -F package=@$WHEEL https://$FURY_TOKEN@push.fury.io/lancedb/
|
||||||
|
|||||||
78
CONTRIBUTING.md
Normal file
78
CONTRIBUTING.md
Normal file
@@ -0,0 +1,78 @@
|
|||||||
|
# Contributing to LanceDB
|
||||||
|
|
||||||
|
LanceDB is an open-source project and we welcome contributions from the community.
|
||||||
|
This document outlines the process for contributing to LanceDB.
|
||||||
|
|
||||||
|
## Reporting Issues
|
||||||
|
|
||||||
|
If you encounter a bug or have a feature request, please open an issue on the
|
||||||
|
[GitHub issue tracker](https://github.com/lancedb/lancedb).
|
||||||
|
|
||||||
|
## Picking an issue
|
||||||
|
|
||||||
|
We track issues on the GitHub issue tracker. If you are looking for something to
|
||||||
|
work on, check the [good first issue](https://github.com/lancedb/lancedb/contribute) label. These issues are typically the best described and have the smallest scope.
|
||||||
|
|
||||||
|
If there's an issue you are interested in working on, please leave a comment on the issue. This will help us avoid duplicate work. Additionally, if you have questions about the issue, please ask them in the issue comments. We are happy to provide guidance on how to approach the issue.
|
||||||
|
|
||||||
|
## Configuring Git
|
||||||
|
|
||||||
|
First, fork the repository on GitHub, then clone your fork:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git clone https://github.com/<username>/lancedb.git
|
||||||
|
cd lancedb
|
||||||
|
```
|
||||||
|
|
||||||
|
Then add the main repository as a remote:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git remote add upstream https://github.com/lancedb/lancedb.git
|
||||||
|
git fetch upstream
|
||||||
|
```
|
||||||
|
|
||||||
|
## Setting up your development environment
|
||||||
|
|
||||||
|
We have development environments for Python, Typescript, and Java. Each environment has its own setup instructions.
|
||||||
|
|
||||||
|
* [Python](python/CONTRIBUTING.md)
|
||||||
|
* [Typescript](nodejs/CONTRIBUTING.md)
|
||||||
|
<!-- TODO: add Java contributing guide -->
|
||||||
|
* [Documentation](docs/README.md)
|
||||||
|
|
||||||
|
|
||||||
|
## Best practices for pull requests
|
||||||
|
|
||||||
|
For the best chance of having your pull request accepted, please follow these guidelines:
|
||||||
|
|
||||||
|
1. Unit test all bug fixes and new features. Your code will not be merged if it
|
||||||
|
doesn't have tests.
|
||||||
|
1. If you change the public API, update the documentation in the `docs` directory.
|
||||||
|
1. Aim to minimize the number of changes in each pull request. Keep to solving
|
||||||
|
one problem at a time, when possible.
|
||||||
|
1. Before marking a pull request ready-for-review, do a self review of your code.
|
||||||
|
Is it clear why you are making the changes? Are the changes easy to understand?
|
||||||
|
1. Use [conventional commit messages](https://www.conventionalcommits.org/en/) as pull request titles. Examples:
|
||||||
|
* New feature: `feat: adding foo API`
|
||||||
|
* Bug fix: `fix: issue with foo API`
|
||||||
|
* Documentation change: `docs: adding foo API documentation`
|
||||||
|
1. If your pull request is a work in progress, leave the pull request as a draft.
|
||||||
|
We will assume the pull request is ready for review when it is opened.
|
||||||
|
1. When writing tests, test the error cases. Make sure they have understandable
|
||||||
|
error messages.
|
||||||
|
|
||||||
|
## Project structure
|
||||||
|
|
||||||
|
The core library is written in Rust. The Python, Typescript, and Java libraries
|
||||||
|
are wrappers around the Rust library.
|
||||||
|
|
||||||
|
* `src/lancedb`: Rust library source code
|
||||||
|
* `python`: Python package source code
|
||||||
|
* `nodejs`: Typescript package source code
|
||||||
|
* `node`: **Deprecated** Typescript package source code
|
||||||
|
* `java`: Java package source code
|
||||||
|
* `docs`: Documentation source code
|
||||||
|
|
||||||
|
## Release process
|
||||||
|
|
||||||
|
For information on the release process, see: [release_process.md](release_process.md)
|
||||||
24
Cargo.toml
24
Cargo.toml
@@ -18,19 +18,17 @@ repository = "https://github.com/lancedb/lancedb"
|
|||||||
description = "Serverless, low-latency vector database for AI applications"
|
description = "Serverless, low-latency vector database for AI applications"
|
||||||
keywords = ["lancedb", "lance", "database", "vector", "search"]
|
keywords = ["lancedb", "lance", "database", "vector", "search"]
|
||||||
categories = ["database-implementations"]
|
categories = ["database-implementations"]
|
||||||
rust-version = "1.80.0" # TODO: lower this once we upgrade Lance again.
|
rust-version = "1.78.0"
|
||||||
|
|
||||||
[workspace.dependencies]
|
[workspace.dependencies]
|
||||||
lance = { "version" = "=0.20.0", "features" = [
|
lance = { "version" = "=0.22.0", "features" = ["dynamodb"] }
|
||||||
"dynamodb",
|
lance-io = "=0.22.0"
|
||||||
] }
|
lance-index = "=0.22.0"
|
||||||
lance-io = "0.20.0"
|
lance-linalg = "=0.22.0"
|
||||||
lance-index = "0.20.0"
|
lance-table = "=0.22.0"
|
||||||
lance-linalg = "0.20.0"
|
lance-testing = "=0.22.0"
|
||||||
lance-table = "0.20.0"
|
lance-datafusion = "=0.22.0"
|
||||||
lance-testing = "0.20.0"
|
lance-encoding = "=0.22.0"
|
||||||
lance-datafusion = "0.20.0"
|
|
||||||
lance-encoding = "0.20.0"
|
|
||||||
# Note that this one does not include pyarrow
|
# Note that this one does not include pyarrow
|
||||||
arrow = { version = "53.2", optional = false }
|
arrow = { version = "53.2", optional = false }
|
||||||
arrow-array = "53.2"
|
arrow-array = "53.2"
|
||||||
@@ -42,8 +40,8 @@ arrow-arith = "53.2"
|
|||||||
arrow-cast = "53.2"
|
arrow-cast = "53.2"
|
||||||
async-trait = "0"
|
async-trait = "0"
|
||||||
chrono = "0.4.35"
|
chrono = "0.4.35"
|
||||||
datafusion-common = "42.0"
|
datafusion-common = "44.0"
|
||||||
datafusion-physical-plan = "42.0"
|
datafusion-physical-plan = "44.0"
|
||||||
env_logger = "0.10"
|
env_logger = "0.10"
|
||||||
half = { "version" = "=2.4.1", default-features = false, features = [
|
half = { "version" = "=2.4.1", default-features = false, features = [
|
||||||
"num-traits",
|
"num-traits",
|
||||||
|
|||||||
@@ -53,7 +53,7 @@ curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-42
|
|||||||
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/149578fb3b621cdb61ee1813b9b3e791/463ad1b0783ebda908fd6c16a4abfe93.cab
|
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/149578fb3b621cdb61ee1813b9b3e791/463ad1b0783ebda908fd6c16a4abfe93.cab
|
||||||
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/5c986c4f393c6b09d5aec3b539e9fb4a/5a22e5cde814b041749fb271547f4dd5.cab
|
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/5c986c4f393c6b09d5aec3b539e9fb4a/5a22e5cde814b041749fb271547f4dd5.cab
|
||||||
|
|
||||||
# fwpuclnt.lib arm64rt.lib
|
# dbghelp.lib fwpuclnt.lib arm64rt.lib
|
||||||
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/7a332420d812f7c1d41da865ae5a7c52/windows%20sdk%20desktop%20libs%20arm64-x86_en-us.msi
|
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/7a332420d812f7c1d41da865ae5a7c52/windows%20sdk%20desktop%20libs%20arm64-x86_en-us.msi
|
||||||
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/19de98ed4a79938d0045d19c047936b3/3e2f7be479e3679d700ce0782e4cc318.cab
|
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/19de98ed4a79938d0045d19c047936b3/3e2f7be479e3679d700ce0782e4cc318.cab
|
||||||
|
|
||||||
@@ -98,7 +98,7 @@ find /usr/aarch64-pc-windows-msvc/usr/include -type f -exec sed -i -E 's/(#inclu
|
|||||||
# reason: https://developercommunity.visualstudio.com/t/libucrtlibstreamobj-error-lnk2001-unresolved-exter/1544787#T-ND1599818
|
# reason: https://developercommunity.visualstudio.com/t/libucrtlibstreamobj-error-lnk2001-unresolved-exter/1544787#T-ND1599818
|
||||||
# I don't understand the 'correct' fix for this, arm64rt.lib is supposed to be the workaround
|
# I don't understand the 'correct' fix for this, arm64rt.lib is supposed to be the workaround
|
||||||
|
|
||||||
(cd 'program files/windows kits/10/lib/10.0.26100.0/um/arm64' && cp advapi32.lib bcrypt.lib kernel32.lib ntdll.lib user32.lib uuid.lib ws2_32.lib userenv.lib cfgmgr32.lib runtimeobject.lib fwpuclnt.lib arm64rt.lib -t /usr/aarch64-pc-windows-msvc/usr/lib)
|
(cd 'program files/windows kits/10/lib/10.0.26100.0/um/arm64' && cp advapi32.lib bcrypt.lib kernel32.lib ntdll.lib user32.lib uuid.lib ws2_32.lib userenv.lib cfgmgr32.lib runtimeobject.lib dbghelp.lib fwpuclnt.lib arm64rt.lib -t /usr/aarch64-pc-windows-msvc/usr/lib)
|
||||||
|
|
||||||
(cd 'contents/vc/tools/msvc/14.16.27023/lib/arm64' && cp libcmt.lib libvcruntime.lib -t /usr/aarch64-pc-windows-msvc/usr/lib)
|
(cd 'contents/vc/tools/msvc/14.16.27023/lib/arm64' && cp libcmt.lib libvcruntime.lib -t /usr/aarch64-pc-windows-msvc/usr/lib)
|
||||||
|
|
||||||
|
|||||||
34
ci/validate_stable_lance.py
Normal file
34
ci/validate_stable_lance.py
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
import tomllib
|
||||||
|
|
||||||
|
found_preview_lance = False
|
||||||
|
|
||||||
|
with open("Cargo.toml", "rb") as f:
|
||||||
|
cargo_data = tomllib.load(f)
|
||||||
|
|
||||||
|
for name, dep in cargo_data["workspace"]["dependencies"].items():
|
||||||
|
if name == "lance" or name.startswith("lance-"):
|
||||||
|
if isinstance(dep, str):
|
||||||
|
version = dep
|
||||||
|
elif isinstance(dep, dict):
|
||||||
|
# Version doesn't have the beta tag in it, so we instead look
|
||||||
|
# at the git tag.
|
||||||
|
version = dep.get('tag', dep.get('version'))
|
||||||
|
else:
|
||||||
|
raise ValueError("Unexpected type for dependency: " + str(dep))
|
||||||
|
|
||||||
|
if "beta" in version:
|
||||||
|
found_preview_lance = True
|
||||||
|
print(f"Dependency '{name}' is a preview version: {version}")
|
||||||
|
|
||||||
|
with open("python/pyproject.toml", "rb") as f:
|
||||||
|
py_proj_data = tomllib.load(f)
|
||||||
|
|
||||||
|
for dep in py_proj_data["project"]["dependencies"]:
|
||||||
|
if dep.startswith("pylance"):
|
||||||
|
if "b" in dep:
|
||||||
|
found_preview_lance = True
|
||||||
|
print(f"Dependency '{dep}' is a preview version")
|
||||||
|
break # Only one pylance dependency
|
||||||
|
|
||||||
|
if found_preview_lance:
|
||||||
|
raise ValueError("Found preview version of Lance in dependencies")
|
||||||
@@ -9,36 +9,81 @@ unreleased features.
|
|||||||
## Building the docs
|
## Building the docs
|
||||||
|
|
||||||
### Setup
|
### Setup
|
||||||
1. Install LanceDB. From LanceDB repo root: `pip install -e python`
|
1. Install LanceDB Python. See setup in [Python contributing guide](../python/CONTRIBUTING.md).
|
||||||
2. Install dependencies. From LanceDB repo root: `pip install -r docs/requirements.txt`
|
Run `make develop` to install the Python package.
|
||||||
3. Make sure you have node and npm setup
|
2. Install documentation dependencies. From LanceDB repo root: `pip install -r docs/requirements.txt`
|
||||||
4. Make sure protobuf and libssl are installed
|
|
||||||
|
|
||||||
### Building node module and create markdown files
|
### Preview the docs
|
||||||
|
|
||||||
See [Javascript docs README](./src/javascript/README.md)
|
```shell
|
||||||
|
|
||||||
### Build docs
|
|
||||||
From LanceDB repo root:
|
|
||||||
|
|
||||||
Run: `PYTHONPATH=. mkdocs build -f docs/mkdocs.yml`
|
|
||||||
|
|
||||||
If successful, you should see a `docs/site` directory that you can verify locally.
|
|
||||||
|
|
||||||
### Run local server
|
|
||||||
|
|
||||||
You can run a local server to test the docs prior to deployment by navigating to the `docs` directory and running the following command:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
cd docs
|
cd docs
|
||||||
mkdocs serve
|
mkdocs serve
|
||||||
```
|
```
|
||||||
|
|
||||||
### Run doctest for typescript example
|
If you want to just generate the HTML files:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
cd lancedb/docs
|
PYTHONPATH=. mkdocs build -f docs/mkdocs.yml
|
||||||
npm i
|
```
|
||||||
npm run build
|
|
||||||
npm run all
|
If successful, you should see a `docs/site` directory that you can verify locally.
|
||||||
|
|
||||||
|
## Adding examples
|
||||||
|
|
||||||
|
To make sure examples are correct, we put examples in test files so they can be
|
||||||
|
run as part of our test suites.
|
||||||
|
|
||||||
|
You can see the tests are at:
|
||||||
|
|
||||||
|
* Python: `python/python/tests/docs`
|
||||||
|
* Typescript: `nodejs/examples/`
|
||||||
|
|
||||||
|
### Checking python examples
|
||||||
|
|
||||||
|
```shell
|
||||||
|
cd python
|
||||||
|
pytest -vv python/tests/docs
|
||||||
|
```
|
||||||
|
|
||||||
|
### Checking typescript examples
|
||||||
|
|
||||||
|
The `@lancedb/lancedb` package must be built before running the tests:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
pushd nodejs
|
||||||
|
npm ci
|
||||||
|
npm run build
|
||||||
|
popd
|
||||||
|
```
|
||||||
|
|
||||||
|
Then you can run the examples by going to the `nodejs/examples` directory and
|
||||||
|
running the tests like a normal npm package:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
pushd nodejs/examples
|
||||||
|
npm ci
|
||||||
|
npm test
|
||||||
|
popd
|
||||||
|
```
|
||||||
|
|
||||||
|
## API documentation
|
||||||
|
|
||||||
|
### Python
|
||||||
|
|
||||||
|
The Python API documentation is organized based on the file `docs/src/python/python.md`.
|
||||||
|
We manually add entries there so we can control the organization of the reference page.
|
||||||
|
**However, this means any new types must be manually added to the file.** No additional
|
||||||
|
steps are needed to generate the API documentation.
|
||||||
|
|
||||||
|
### Typescript
|
||||||
|
|
||||||
|
The typescript API documentation is generated from the typescript source code using [typedoc](https://typedoc.org/).
|
||||||
|
|
||||||
|
When new APIs are added, you must manually re-run the typedoc command to update the API documentation.
|
||||||
|
The new files should be checked into the repository.
|
||||||
|
|
||||||
|
```shell
|
||||||
|
pushd nodejs
|
||||||
|
npm run docs
|
||||||
|
popd
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -62,6 +62,7 @@ plugins:
|
|||||||
# for cross references
|
# for cross references
|
||||||
- https://arrow.apache.org/docs/objects.inv
|
- https://arrow.apache.org/docs/objects.inv
|
||||||
- https://pandas.pydata.org/docs/objects.inv
|
- https://pandas.pydata.org/docs/objects.inv
|
||||||
|
- https://lancedb.github.io/lance/objects.inv
|
||||||
- mkdocs-jupyter
|
- mkdocs-jupyter
|
||||||
- render_swagger:
|
- render_swagger:
|
||||||
allow_arbitrary_locations: true
|
allow_arbitrary_locations: true
|
||||||
@@ -145,7 +146,9 @@ nav:
|
|||||||
- Building Custom Rerankers: reranking/custom_reranker.md
|
- Building Custom Rerankers: reranking/custom_reranker.md
|
||||||
- Example: notebooks/lancedb_reranking.ipynb
|
- Example: notebooks/lancedb_reranking.ipynb
|
||||||
- Filtering: sql.md
|
- Filtering: sql.md
|
||||||
- Versioning & Reproducibility: notebooks/reproducibility.ipynb
|
- Versioning & Reproducibility:
|
||||||
|
- sync API: notebooks/reproducibility.ipynb
|
||||||
|
- async API: notebooks/reproducibility_async.ipynb
|
||||||
- Configuring Storage: guides/storage.md
|
- Configuring Storage: guides/storage.md
|
||||||
- Migration Guide: migration.md
|
- Migration Guide: migration.md
|
||||||
- Tuning retrieval performance:
|
- Tuning retrieval performance:
|
||||||
@@ -231,6 +234,7 @@ nav:
|
|||||||
- 🐍 Python: python/saas-python.md
|
- 🐍 Python: python/saas-python.md
|
||||||
- 👾 JavaScript: javascript/modules.md
|
- 👾 JavaScript: javascript/modules.md
|
||||||
- REST API: cloud/rest.md
|
- REST API: cloud/rest.md
|
||||||
|
- FAQs: cloud/cloud_faq.md
|
||||||
|
|
||||||
- Quick start: basic.md
|
- Quick start: basic.md
|
||||||
- Concepts:
|
- Concepts:
|
||||||
@@ -276,7 +280,9 @@ nav:
|
|||||||
- Building Custom Rerankers: reranking/custom_reranker.md
|
- Building Custom Rerankers: reranking/custom_reranker.md
|
||||||
- Example: notebooks/lancedb_reranking.ipynb
|
- Example: notebooks/lancedb_reranking.ipynb
|
||||||
- Filtering: sql.md
|
- Filtering: sql.md
|
||||||
- Versioning & Reproducibility: notebooks/reproducibility.ipynb
|
- Versioning & Reproducibility:
|
||||||
|
- sync API: notebooks/reproducibility.ipynb
|
||||||
|
- async API: notebooks/reproducibility_async.ipynb
|
||||||
- Configuring Storage: guides/storage.md
|
- Configuring Storage: guides/storage.md
|
||||||
- Migration Guide: migration.md
|
- Migration Guide: migration.md
|
||||||
- Tuning retrieval performance:
|
- Tuning retrieval performance:
|
||||||
@@ -357,6 +363,7 @@ nav:
|
|||||||
- 🐍 Python: python/saas-python.md
|
- 🐍 Python: python/saas-python.md
|
||||||
- 👾 JavaScript: javascript/modules.md
|
- 👾 JavaScript: javascript/modules.md
|
||||||
- REST API: cloud/rest.md
|
- REST API: cloud/rest.md
|
||||||
|
- FAQs: cloud/cloud_faq.md
|
||||||
|
|
||||||
extra_css:
|
extra_css:
|
||||||
- styles/global.css
|
- styles/global.css
|
||||||
|
|||||||
@@ -18,25 +18,24 @@ See the [indexing](concepts/index_ivfpq.md) concepts guide for more information
|
|||||||
Lance supports `IVF_PQ` index type by default.
|
Lance supports `IVF_PQ` index type by default.
|
||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
Creating indexes is done via the [create_index](https://lancedb.github.io/lancedb/python/#lancedb.table.LanceTable.create_index) method.
|
Creating indexes is done via the [create_index](https://lancedb.github.io/lancedb/python/#lancedb.table.LanceTable.create_index) method.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import lancedb
|
--8<-- "python/python/tests/docs/test_guide_index.py:import-lancedb"
|
||||||
import numpy as np
|
--8<-- "python/python/tests/docs/test_guide_index.py:import-numpy"
|
||||||
uri = "data/sample-lancedb"
|
--8<-- "python/python/tests/docs/test_guide_index.py:create_ann_index"
|
||||||
db = lancedb.connect(uri)
|
```
|
||||||
|
=== "Async API"
|
||||||
|
Creating indexes is done via the [create_index](https://lancedb.github.io/lancedb/python/#lancedb.table.LanceTable.create_index) method.
|
||||||
|
|
||||||
# Create 10,000 sample vectors
|
```python
|
||||||
data = [{"vector": row, "item": f"item {i}"}
|
--8<-- "python/python/tests/docs/test_guide_index.py:import-lancedb"
|
||||||
for i, row in enumerate(np.random.random((10_000, 1536)).astype('float32'))]
|
--8<-- "python/python/tests/docs/test_guide_index.py:import-numpy"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_index.py:import-lancedb-ivfpq"
|
||||||
# Add the vectors to a table
|
--8<-- "python/python/tests/docs/test_guide_index.py:create_ann_index_async"
|
||||||
tbl = db.create_table("my_vectors", data=data)
|
```
|
||||||
|
|
||||||
# Create and train the index - you need to have enough data in the table for an effective training step
|
|
||||||
tbl.create_index(num_partitions=256, num_sub_vectors=96)
|
|
||||||
```
|
|
||||||
|
|
||||||
=== "TypeScript"
|
=== "TypeScript"
|
||||||
|
|
||||||
@@ -83,6 +82,7 @@ The following IVF_PQ paramters can be specified:
|
|||||||
- **num_sub_vectors**: The number of sub-vectors (M) that will be created during Product Quantization (PQ).
|
- **num_sub_vectors**: The number of sub-vectors (M) that will be created during Product Quantization (PQ).
|
||||||
For D dimensional vector, it will be divided into `M` subvectors with dimension `D/M`, each of which is replaced by
|
For D dimensional vector, it will be divided into `M` subvectors with dimension `D/M`, each of which is replaced by
|
||||||
a single PQ code. The default is the dimension of the vector divided by 16.
|
a single PQ code. The default is the dimension of the vector divided by 16.
|
||||||
|
- **num_bits**: The number of bits used to encode each sub-vector. Only 4 and 8 are supported. The higher the number of bits, the higher the accuracy of the index, also the slower search. The default is 8.
|
||||||
|
|
||||||
!!! note
|
!!! note
|
||||||
|
|
||||||
@@ -126,6 +126,8 @@ You can specify the GPU device to train IVF partitions via
|
|||||||
accelerator="mps"
|
accelerator="mps"
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
!!! note
|
||||||
|
GPU based indexing is not yet supported with our asynchronous client.
|
||||||
|
|
||||||
Troubleshooting:
|
Troubleshooting:
|
||||||
|
|
||||||
@@ -151,14 +153,16 @@ There are a couple of parameters that can be used to fine-tune the search:
|
|||||||
|
|
||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
tbl.search(np.random.random((1536))) \
|
--8<-- "python/python/tests/docs/test_guide_index.py:vector_search"
|
||||||
.limit(2) \
|
```
|
||||||
.nprobes(20) \
|
=== "Async API"
|
||||||
.refine_factor(10) \
|
|
||||||
.to_pandas()
|
```python
|
||||||
```
|
--8<-- "python/python/tests/docs/test_guide_index.py:vector_search_async"
|
||||||
|
```
|
||||||
|
|
||||||
```text
|
```text
|
||||||
vector item _distance
|
vector item _distance
|
||||||
@@ -195,10 +199,16 @@ The search will return the data requested in addition to the distance of each it
|
|||||||
You can further filter the elements returned by a search using a where clause.
|
You can further filter the elements returned by a search using a where clause.
|
||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
tbl.search(np.random.random((1536))).where("item != 'item 1141'").to_pandas()
|
--8<-- "python/python/tests/docs/test_guide_index.py:vector_search_with_filter"
|
||||||
```
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_index.py:vector_search_async_with_filter"
|
||||||
|
```
|
||||||
|
|
||||||
=== "TypeScript"
|
=== "TypeScript"
|
||||||
|
|
||||||
@@ -220,10 +230,16 @@ You can select the columns returned by the query using a select clause.
|
|||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
```python
|
=== "Sync API"
|
||||||
tbl.search(np.random.random((1536))).select(["vector"]).to_pandas()
|
|
||||||
```
|
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_index.py:vector_search_with_select"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_index.py:vector_search_async_with_select"
|
||||||
|
```
|
||||||
|
|
||||||
```text
|
```text
|
||||||
vector _distance
|
vector _distance
|
||||||
|
|||||||
BIN
docs/src/assets/maxsim.png
Normal file
BIN
docs/src/assets/maxsim.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 10 KiB |
@@ -141,14 +141,6 @@ recommend switching to stable releases.
|
|||||||
--8<-- "python/python/tests/docs/test_basic.py:connect_async"
|
--8<-- "python/python/tests/docs/test_basic.py:connect_async"
|
||||||
```
|
```
|
||||||
|
|
||||||
!!! note "Asynchronous Python API"
|
|
||||||
|
|
||||||
The asynchronous Python API is new and has some slight differences compared
|
|
||||||
to the synchronous API. Feel free to start using the asynchronous version.
|
|
||||||
Once all features have migrated we will start to move the synchronous API to
|
|
||||||
use the same syntax as the asynchronous API. To help with this migration we
|
|
||||||
have created a [migration guide](migration.md) detailing the differences.
|
|
||||||
|
|
||||||
=== "Typescript[^1]"
|
=== "Typescript[^1]"
|
||||||
|
|
||||||
=== "@lancedb/lancedb"
|
=== "@lancedb/lancedb"
|
||||||
|
|||||||
34
docs/src/cloud/cloud_faq.md
Normal file
34
docs/src/cloud/cloud_faq.md
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
This section provides answers to the most common questions asked about LanceDB Cloud. By following these guidelines, you can ensure a smooth, performant experience with LanceDB Cloud.
|
||||||
|
|
||||||
|
### Should I reuse the database connection?
|
||||||
|
Yes! It is recommended to establish a single database connection and maintain it throughout your interaction with the tables within.
|
||||||
|
|
||||||
|
LanceDB uses HTTP connections to communicate with the servers. By re-using the Connection object, you avoid the overhead of repeatedly establishing HTTP connections, significantly improving efficiency.
|
||||||
|
|
||||||
|
### Should I re-use the `Table` object?
|
||||||
|
`table = db.open_table()` should be called once and used for all subsequent table operations. If there are changes to the opened table, `table` always reflect the **latest version** of the data.
|
||||||
|
|
||||||
|
### What should I do if I need to search for rows by `id`?
|
||||||
|
LanceDB Cloud currently does not support an ID or primary key column. You are recommended to add a
|
||||||
|
user-defined ID column. To significantly improve the query performance with SQL causes, a scalar BITMAP/BTREE index should be created on this column.
|
||||||
|
|
||||||
|
### What are the vector indexing types supported by LanceDB Cloud?
|
||||||
|
We support `IVF_PQ` and `IVF_HNSW_SQ` as the `index_type` which is passed to `create_index`. LanceDB Cloud tunes the indexing parameters automatically to achieve the best tradeoff between query latency and query quality.
|
||||||
|
|
||||||
|
### When I add new rows to a table, do I need to manually update the index?
|
||||||
|
No! LanceDB Cloud triggers an asynchronous background job to index the new vectors.
|
||||||
|
|
||||||
|
Even though indexing is asynchronous, your vectors will still be immediately searchable. LanceDB uses brute-force search to search over unindexed rows. This makes you new data is immediately available, but does increase latency temporarily. To disable the brute-force part of search, set the `fast_search` flag in your query to `true`.
|
||||||
|
|
||||||
|
### Do I need to reindex the whole dataset if only a small portion of the data is deleted or updated?
|
||||||
|
No! Similar to adding data to the table, LanceDB Cloud triggers an asynchronous background job to update the existing indices. Therefore, no action is needed from users and there is absolutely no
|
||||||
|
downtime expected.
|
||||||
|
|
||||||
|
### How do I know whether an index has been created?
|
||||||
|
While index creation in LanceDB Cloud is generally fast, querying immediately after a `create_index` call may result in errors. It's recommended to use `list_indices` to verify index creation before querying.
|
||||||
|
|
||||||
|
### Why is my query latency higher than expected?
|
||||||
|
Multiple factors can impact query latency. To reduce query latency, consider the following:
|
||||||
|
- Send pre-warm queries: send a few queries to warm up the cache before an actual user query.
|
||||||
|
- Check network latency: LanceDB Cloud is hosted in AWS `us-east-1` region. It is recommended to run queries from an EC2 instance that is in the same region.
|
||||||
|
- Create scalar indices: If you are filtering on metadata, it is recommended to create scalar indices on those columns. This will speedup searches with metadata filtering. See [here](../guides/scalar_index.md) for more details on creating a scalar index.
|
||||||
@@ -7,7 +7,7 @@ Approximate Nearest Neighbor (ANN) search is a method for finding data points ne
|
|||||||
There are three main types of ANN search algorithms:
|
There are three main types of ANN search algorithms:
|
||||||
|
|
||||||
* **Tree-based search algorithms**: Use a tree structure to organize and store data points.
|
* **Tree-based search algorithms**: Use a tree structure to organize and store data points.
|
||||||
* * **Hash-based search algorithms**: Use a specialized geometric hash table to store and manage data points. These algorithms typically focus on theoretical guarantees, and don't usually perform as well as the other approaches in practice.
|
* **Hash-based search algorithms**: Use a specialized geometric hash table to store and manage data points. These algorithms typically focus on theoretical guarantees, and don't usually perform as well as the other approaches in practice.
|
||||||
* **Graph-based search algorithms**: Use a graph structure to store data points, which can be a bit complex.
|
* **Graph-based search algorithms**: Use a graph structure to store data points, which can be a bit complex.
|
||||||
|
|
||||||
HNSW is a graph-based algorithm. All graph-based search algorithms rely on the idea of a k-nearest neighbor (or k-approximate nearest neighbor) graph, which we outline below.
|
HNSW is a graph-based algorithm. All graph-based search algorithms rely on the idea of a k-nearest neighbor (or k-approximate nearest neighbor) graph, which we outline below.
|
||||||
|
|||||||
117
docs/src/fts.md
117
docs/src/fts.md
@@ -10,28 +10,20 @@ LanceDB provides support for full-text search via Lance, allowing you to incorpo
|
|||||||
Consider that we have a LanceDB table named `my_table`, whose string column `text` we want to index and query via keyword search, the FTS index must be created before you can search via keywords.
|
Consider that we have a LanceDB table named `my_table`, whose string column `text` we want to index and query via keyword search, the FTS index must be created before you can search via keywords.
|
||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import lancedb
|
--8<-- "python/python/tests/docs/test_search.py:import-lancedb"
|
||||||
|
--8<-- "python/python/tests/docs/test_search.py:import-lancedb-fts"
|
||||||
|
--8<-- "python/python/tests/docs/test_search.py:basic_fts"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
uri = "data/sample-lancedb"
|
```python
|
||||||
db = lancedb.connect(uri)
|
--8<-- "python/python/tests/docs/test_search.py:import-lancedb"
|
||||||
|
--8<-- "python/python/tests/docs/test_search.py:import-lancedb-fts"
|
||||||
table = db.create_table(
|
--8<-- "python/python/tests/docs/test_search.py:basic_fts_async"
|
||||||
"my_table",
|
```
|
||||||
data=[
|
|
||||||
{"vector": [3.1, 4.1], "text": "Frodo was a happy puppy"},
|
|
||||||
{"vector": [5.9, 26.5], "text": "There are several kittens playing"},
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
# passing `use_tantivy=False` to use lance FTS index
|
|
||||||
# `use_tantivy=True` by default
|
|
||||||
table.create_fts_index("text", use_tantivy=False)
|
|
||||||
table.search("puppy").limit(10).select(["text"]).to_list()
|
|
||||||
# [{'text': 'Frodo was a happy puppy', '_score': 0.6931471824645996}]
|
|
||||||
# ...
|
|
||||||
```
|
|
||||||
|
|
||||||
=== "TypeScript"
|
=== "TypeScript"
|
||||||
|
|
||||||
@@ -50,7 +42,7 @@ Consider that we have a LanceDB table named `my_table`, whose string column `tex
|
|||||||
});
|
});
|
||||||
|
|
||||||
await tbl
|
await tbl
|
||||||
.search("puppy", queryType="fts")
|
.search("puppy", "fts")
|
||||||
.select(["text"])
|
.select(["text"])
|
||||||
.limit(10)
|
.limit(10)
|
||||||
.toArray();
|
.toArray();
|
||||||
@@ -93,22 +85,32 @@ By default the text is tokenized by splitting on punctuation and whitespaces, an
|
|||||||
Stemming is useful for improving search results by reducing words to their root form, e.g. "running" to "run". LanceDB supports stemming for multiple languages, you can specify the tokenizer name to enable stemming by the pattern `tokenizer_name="{language_code}_stem"`, e.g. `en_stem` for English.
|
Stemming is useful for improving search results by reducing words to their root form, e.g. "running" to "run". LanceDB supports stemming for multiple languages, you can specify the tokenizer name to enable stemming by the pattern `tokenizer_name="{language_code}_stem"`, e.g. `en_stem` for English.
|
||||||
|
|
||||||
For example, to enable stemming for English:
|
For example, to enable stemming for English:
|
||||||
```python
|
=== "Sync API"
|
||||||
table.create_fts_index("text", use_tantivy=True, tokenizer_name="en_stem")
|
|
||||||
```
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_search.py:fts_config_stem"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_search.py:fts_config_stem_async"
|
||||||
|
```
|
||||||
|
|
||||||
the following [languages](https://docs.rs/tantivy/latest/tantivy/tokenizer/enum.Language.html) are currently supported.
|
the following [languages](https://docs.rs/tantivy/latest/tantivy/tokenizer/enum.Language.html) are currently supported.
|
||||||
|
|
||||||
The tokenizer is customizable, you can specify how the tokenizer splits the text, and how it filters out words, etc.
|
The tokenizer is customizable, you can specify how the tokenizer splits the text, and how it filters out words, etc.
|
||||||
|
|
||||||
For example, for language with accents, you can specify the tokenizer to use `ascii_folding` to remove accents, e.g. 'é' to 'e':
|
For example, for language with accents, you can specify the tokenizer to use `ascii_folding` to remove accents, e.g. 'é' to 'e':
|
||||||
```python
|
=== "Sync API"
|
||||||
table.create_fts_index("text",
|
|
||||||
use_tantivy=False,
|
```python
|
||||||
language="French",
|
--8<-- "python/python/tests/docs/test_search.py:fts_config_folding"
|
||||||
stem=True,
|
```
|
||||||
ascii_folding=True)
|
=== "Async API"
|
||||||
```
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_search.py:fts_config_folding_async"
|
||||||
|
```
|
||||||
|
|
||||||
## Filtering
|
## Filtering
|
||||||
|
|
||||||
@@ -119,9 +121,16 @@ This can be invoked via the familiar `where` syntax.
|
|||||||
With pre-filtering:
|
With pre-filtering:
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
```python
|
=== "Sync API"
|
||||||
table.search("puppy").limit(10).where("meta='foo'", prefilte=True).to_list()
|
|
||||||
```
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_search.py:fts_prefiltering"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_search.py:fts_prefiltering_async"
|
||||||
|
```
|
||||||
|
|
||||||
=== "TypeScript"
|
=== "TypeScript"
|
||||||
|
|
||||||
@@ -151,9 +160,16 @@ With pre-filtering:
|
|||||||
With post-filtering:
|
With post-filtering:
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
```python
|
=== "Sync API"
|
||||||
table.search("puppy").limit(10).where("meta='foo'", prefilte=False).to_list()
|
|
||||||
```
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_search.py:fts_postfiltering"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_search.py:fts_postfiltering_async"
|
||||||
|
```
|
||||||
|
|
||||||
=== "TypeScript"
|
=== "TypeScript"
|
||||||
|
|
||||||
@@ -191,9 +207,16 @@ or a **terms** search query like `old man sea`. For more details on the terms
|
|||||||
query syntax, see Tantivy's [query parser rules](https://docs.rs/tantivy/latest/tantivy/query/struct.QueryParser.html).
|
query syntax, see Tantivy's [query parser rules](https://docs.rs/tantivy/latest/tantivy/query/struct.QueryParser.html).
|
||||||
|
|
||||||
To search for a phrase, the index must be created with `with_position=True`:
|
To search for a phrase, the index must be created with `with_position=True`:
|
||||||
```python
|
=== "Sync API"
|
||||||
table.create_fts_index("text", use_tantivy=False, with_position=True)
|
|
||||||
```
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_search.py:fts_with_position"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_search.py:fts_with_position_async"
|
||||||
|
```
|
||||||
This will allow you to search for phrases, but it will also significantly increase the index size and indexing time.
|
This will allow you to search for phrases, but it will also significantly increase the index size and indexing time.
|
||||||
|
|
||||||
|
|
||||||
@@ -205,10 +228,16 @@ This can make the query more efficient, especially when the table is large and t
|
|||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
```python
|
=== "Sync API"
|
||||||
table.add([{"vector": [3.1, 4.1], "text": "Frodo was a happy puppy"}])
|
|
||||||
table.optimize()
|
```python
|
||||||
```
|
--8<-- "python/python/tests/docs/test_search.py:fts_incremental_index"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_search.py:fts_incremental_index_async"
|
||||||
|
```
|
||||||
|
|
||||||
=== "TypeScript"
|
=== "TypeScript"
|
||||||
|
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
LanceDB also provides support for full-text search via [Tantivy](https://github.com/quickwit-oss/tantivy), allowing you to incorporate keyword-based search (based on BM25) in your retrieval solutions.
|
LanceDB also provides support for full-text search via [Tantivy](https://github.com/quickwit-oss/tantivy), allowing you to incorporate keyword-based search (based on BM25) in your retrieval solutions.
|
||||||
|
|
||||||
The tantivy-based FTS is only available in Python and does not support building indexes on object storage or incremental indexing. If you need these features, try native FTS [native FTS](fts.md).
|
The tantivy-based FTS is only available in Python synchronous APIs and does not support building indexes on object storage or incremental indexing. If you need these features, try native FTS [native FTS](fts.md).
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
|
|||||||
@@ -32,19 +32,20 @@ over scalar columns.
|
|||||||
### Create a scalar index
|
### Create a scalar index
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
```python
|
=== "Sync API"
|
||||||
import lancedb
|
|
||||||
books = [
|
|
||||||
{"book_id": 1, "publisher": "plenty of books", "tags": ["fantasy", "adventure"]},
|
|
||||||
{"book_id": 2, "publisher": "book town", "tags": ["non-fiction"]},
|
|
||||||
{"book_id": 3, "publisher": "oreilly", "tags": ["textbook"]}
|
|
||||||
]
|
|
||||||
|
|
||||||
db = lancedb.connect("./db")
|
```python
|
||||||
table = db.create_table("books", books)
|
--8<-- "python/python/tests/docs/test_guide_index.py:import-lancedb"
|
||||||
table.create_scalar_index("book_id") # BTree by default
|
--8<-- "python/python/tests/docs/test_guide_index.py:import-lancedb-btree-bitmap"
|
||||||
table.create_scalar_index("publisher", index_type="BITMAP")
|
--8<-- "python/python/tests/docs/test_guide_index.py:basic_scalar_index"
|
||||||
```
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_index.py:import-lancedb"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_index.py:import-lancedb-btree-bitmap"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_index.py:basic_scalar_index_async"
|
||||||
|
```
|
||||||
|
|
||||||
=== "Typescript"
|
=== "Typescript"
|
||||||
|
|
||||||
@@ -62,12 +63,18 @@ The following scan will be faster if the column `book_id` has a scalar index:
|
|||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
```python
|
=== "Sync API"
|
||||||
import lancedb
|
|
||||||
|
|
||||||
table = db.open_table("books")
|
```python
|
||||||
my_df = table.search().where("book_id = 2").to_pandas()
|
--8<-- "python/python/tests/docs/test_guide_index.py:import-lancedb"
|
||||||
```
|
--8<-- "python/python/tests/docs/test_guide_index.py:search_with_scalar_index"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_index.py:import-lancedb"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_index.py:search_with_scalar_index_async"
|
||||||
|
```
|
||||||
|
|
||||||
=== "Typescript"
|
=== "Typescript"
|
||||||
|
|
||||||
@@ -88,22 +95,18 @@ Scalar indices can also speed up scans containing a vector search or full text s
|
|||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
```python
|
=== "Sync API"
|
||||||
import lancedb
|
|
||||||
|
|
||||||
data = [
|
```python
|
||||||
{"book_id": 1, "vector": [1, 2]},
|
--8<-- "python/python/tests/docs/test_guide_index.py:import-lancedb"
|
||||||
{"book_id": 2, "vector": [3, 4]},
|
--8<-- "python/python/tests/docs/test_guide_index.py:vector_search_with_scalar_index"
|
||||||
{"book_id": 3, "vector": [5, 6]}
|
```
|
||||||
]
|
=== "Async API"
|
||||||
table = db.create_table("book_with_embeddings", data)
|
|
||||||
|
|
||||||
(
|
```python
|
||||||
table.search([1, 2])
|
--8<-- "python/python/tests/docs/test_guide_index.py:import-lancedb"
|
||||||
.where("book_id != 3", prefilter=True)
|
--8<-- "python/python/tests/docs/test_guide_index.py:vector_search_with_scalar_index_async"
|
||||||
.to_pandas()
|
```
|
||||||
)
|
|
||||||
```
|
|
||||||
|
|
||||||
=== "Typescript"
|
=== "Typescript"
|
||||||
|
|
||||||
@@ -122,10 +125,16 @@ Scalar indices can also speed up scans containing a vector search or full text s
|
|||||||
Updating the table data (adding, deleting, or modifying records) requires that you also update the scalar index. This can be done by calling `optimize`, which will trigger an update to the existing scalar index.
|
Updating the table data (adding, deleting, or modifying records) requires that you also update the scalar index. This can be done by calling `optimize`, which will trigger an update to the existing scalar index.
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
```python
|
=== "Sync API"
|
||||||
table.add([{"vector": [7, 8], "book_id": 4}])
|
|
||||||
table.optimize()
|
```python
|
||||||
```
|
--8<-- "python/python/tests/docs/test_guide_index.py:update_scalar_index"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_index.py:update_scalar_index_async"
|
||||||
|
```
|
||||||
|
|
||||||
=== "TypeScript"
|
=== "TypeScript"
|
||||||
|
|
||||||
|
|||||||
@@ -12,26 +12,50 @@ LanceDB OSS supports object stores such as AWS S3 (and compatible stores), Azure
|
|||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
AWS S3:
|
AWS S3:
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import lancedb
|
import lancedb
|
||||||
db = lancedb.connect("s3://bucket/path")
|
db = lancedb.connect("s3://bucket/path")
|
||||||
```
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
import lancedb
|
||||||
|
async_db = await lancedb.connect_async("s3://bucket/path")
|
||||||
|
```
|
||||||
|
|
||||||
Google Cloud Storage:
|
Google Cloud Storage:
|
||||||
|
|
||||||
```python
|
=== "Sync API"
|
||||||
import lancedb
|
|
||||||
db = lancedb.connect("gs://bucket/path")
|
```python
|
||||||
```
|
import lancedb
|
||||||
|
db = lancedb.connect("gs://bucket/path")
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
import lancedb
|
||||||
|
async_db = await lancedb.connect_async("gs://bucket/path")
|
||||||
|
```
|
||||||
|
|
||||||
Azure Blob Storage:
|
Azure Blob Storage:
|
||||||
|
|
||||||
<!-- skip-test -->
|
<!-- skip-test -->
|
||||||
```python
|
=== "Sync API"
|
||||||
import lancedb
|
|
||||||
db = lancedb.connect("az://bucket/path")
|
```python
|
||||||
```
|
import lancedb
|
||||||
|
db = lancedb.connect("az://bucket/path")
|
||||||
|
```
|
||||||
|
<!-- skip-test -->
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
import lancedb
|
||||||
|
async_db = await lancedb.connect_async("az://bucket/path")
|
||||||
|
```
|
||||||
Note that for Azure, storage credentials must be configured. See [below](#azure-blob-storage) for more details.
|
Note that for Azure, storage credentials must be configured. See [below](#azure-blob-storage) for more details.
|
||||||
|
|
||||||
|
|
||||||
@@ -94,13 +118,24 @@ If you only want this to apply to one particular connection, you can pass the `s
|
|||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
```python
|
=== "Sync API"
|
||||||
import lancedb
|
|
||||||
db = await lancedb.connect_async(
|
```python
|
||||||
"s3://bucket/path",
|
import lancedb
|
||||||
storage_options={"timeout": "60s"}
|
db = lancedb.connect(
|
||||||
)
|
"s3://bucket/path",
|
||||||
```
|
storage_options={"timeout": "60s"}
|
||||||
|
)
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
import lancedb
|
||||||
|
async_db = await lancedb.connect_async(
|
||||||
|
"s3://bucket/path",
|
||||||
|
storage_options={"timeout": "60s"}
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
=== "TypeScript"
|
=== "TypeScript"
|
||||||
|
|
||||||
@@ -128,15 +163,29 @@ Getting even more specific, you can set the `timeout` for only a particular tabl
|
|||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
<!-- skip-test -->
|
<!-- skip-test -->
|
||||||
```python
|
=== "Sync API"
|
||||||
import lancedb
|
|
||||||
db = await lancedb.connect_async("s3://bucket/path")
|
```python
|
||||||
table = await db.create_table(
|
import lancedb
|
||||||
"table",
|
db = lancedb.connect("s3://bucket/path")
|
||||||
[{"a": 1, "b": 2}],
|
table = db.create_table(
|
||||||
storage_options={"timeout": "60s"}
|
"table",
|
||||||
)
|
[{"a": 1, "b": 2}],
|
||||||
```
|
storage_options={"timeout": "60s"}
|
||||||
|
)
|
||||||
|
```
|
||||||
|
<!-- skip-test -->
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
import lancedb
|
||||||
|
async_db = await lancedb.connect_async("s3://bucket/path")
|
||||||
|
async_table = await async_db.create_table(
|
||||||
|
"table",
|
||||||
|
[{"a": 1, "b": 2}],
|
||||||
|
storage_options={"timeout": "60s"}
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
=== "TypeScript"
|
=== "TypeScript"
|
||||||
|
|
||||||
@@ -194,17 +243,32 @@ These can be set as environment variables or passed in the `storage_options` par
|
|||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
```python
|
=== "Sync API"
|
||||||
import lancedb
|
|
||||||
db = await lancedb.connect_async(
|
```python
|
||||||
"s3://bucket/path",
|
import lancedb
|
||||||
storage_options={
|
db = lancedb.connect(
|
||||||
"aws_access_key_id": "my-access-key",
|
"s3://bucket/path",
|
||||||
"aws_secret_access_key": "my-secret-key",
|
storage_options={
|
||||||
"aws_session_token": "my-session-token",
|
"aws_access_key_id": "my-access-key",
|
||||||
}
|
"aws_secret_access_key": "my-secret-key",
|
||||||
)
|
"aws_session_token": "my-session-token",
|
||||||
```
|
}
|
||||||
|
)
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
import lancedb
|
||||||
|
async_db = await lancedb.connect_async(
|
||||||
|
"s3://bucket/path",
|
||||||
|
storage_options={
|
||||||
|
"aws_access_key_id": "my-access-key",
|
||||||
|
"aws_secret_access_key": "my-secret-key",
|
||||||
|
"aws_session_token": "my-session-token",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
=== "TypeScript"
|
=== "TypeScript"
|
||||||
|
|
||||||
@@ -348,12 +412,22 @@ name of the table to use.
|
|||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
```python
|
=== "Sync API"
|
||||||
import lancedb
|
|
||||||
db = await lancedb.connect_async(
|
```python
|
||||||
"s3+ddb://bucket/path?ddbTableName=my-dynamodb-table",
|
import lancedb
|
||||||
)
|
db = lancedb.connect(
|
||||||
```
|
"s3+ddb://bucket/path?ddbTableName=my-dynamodb-table",
|
||||||
|
)
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
import lancedb
|
||||||
|
async_db = await lancedb.connect_async(
|
||||||
|
"s3+ddb://bucket/path?ddbTableName=my-dynamodb-table",
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
=== "JavaScript"
|
=== "JavaScript"
|
||||||
|
|
||||||
@@ -441,16 +515,30 @@ LanceDB can also connect to S3-compatible stores, such as MinIO. To do so, you m
|
|||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
```python
|
=== "Sync API"
|
||||||
import lancedb
|
|
||||||
db = await lancedb.connect_async(
|
```python
|
||||||
"s3://bucket/path",
|
import lancedb
|
||||||
storage_options={
|
db = lancedb.connect(
|
||||||
"region": "us-east-1",
|
"s3://bucket/path",
|
||||||
"endpoint": "http://minio:9000",
|
storage_options={
|
||||||
}
|
"region": "us-east-1",
|
||||||
)
|
"endpoint": "http://minio:9000",
|
||||||
```
|
}
|
||||||
|
)
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
import lancedb
|
||||||
|
async_db = await lancedb.connect_async(
|
||||||
|
"s3://bucket/path",
|
||||||
|
storage_options={
|
||||||
|
"region": "us-east-1",
|
||||||
|
"endpoint": "http://minio:9000",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
=== "TypeScript"
|
=== "TypeScript"
|
||||||
|
|
||||||
@@ -502,16 +590,30 @@ To configure LanceDB to use an S3 Express endpoint, you must set the storage opt
|
|||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
```python
|
=== "Sync API"
|
||||||
import lancedb
|
|
||||||
db = await lancedb.connect_async(
|
```python
|
||||||
"s3://my-bucket--use1-az4--x-s3/path",
|
import lancedb
|
||||||
storage_options={
|
db = lancedb.connect(
|
||||||
"region": "us-east-1",
|
"s3://my-bucket--use1-az4--x-s3/path",
|
||||||
"s3_express": "true",
|
storage_options={
|
||||||
}
|
"region": "us-east-1",
|
||||||
)
|
"s3_express": "true",
|
||||||
```
|
}
|
||||||
|
)
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
import lancedb
|
||||||
|
async_db = await lancedb.connect_async(
|
||||||
|
"s3://my-bucket--use1-az4--x-s3/path",
|
||||||
|
storage_options={
|
||||||
|
"region": "us-east-1",
|
||||||
|
"s3_express": "true",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
=== "TypeScript"
|
=== "TypeScript"
|
||||||
|
|
||||||
@@ -552,15 +654,29 @@ GCS credentials are configured by setting the `GOOGLE_SERVICE_ACCOUNT` environme
|
|||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
<!-- skip-test -->
|
<!-- skip-test -->
|
||||||
```python
|
=== "Sync API"
|
||||||
import lancedb
|
|
||||||
db = await lancedb.connect_async(
|
```python
|
||||||
"gs://my-bucket/my-database",
|
import lancedb
|
||||||
storage_options={
|
db = lancedb.connect(
|
||||||
"service_account": "path/to/service-account.json",
|
"gs://my-bucket/my-database",
|
||||||
}
|
storage_options={
|
||||||
)
|
"service_account": "path/to/service-account.json",
|
||||||
```
|
}
|
||||||
|
)
|
||||||
|
```
|
||||||
|
<!-- skip-test -->
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
import lancedb
|
||||||
|
async_db = await lancedb.connect_async(
|
||||||
|
"gs://my-bucket/my-database",
|
||||||
|
storage_options={
|
||||||
|
"service_account": "path/to/service-account.json",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
=== "TypeScript"
|
=== "TypeScript"
|
||||||
|
|
||||||
@@ -612,16 +728,31 @@ Azure Blob Storage credentials can be configured by setting the `AZURE_STORAGE_A
|
|||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
<!-- skip-test -->
|
<!-- skip-test -->
|
||||||
```python
|
=== "Sync API"
|
||||||
import lancedb
|
|
||||||
db = await lancedb.connect_async(
|
```python
|
||||||
"az://my-container/my-database",
|
import lancedb
|
||||||
storage_options={
|
db = lancedb.connect(
|
||||||
account_name: "some-account",
|
"az://my-container/my-database",
|
||||||
account_key: "some-key",
|
storage_options={
|
||||||
}
|
account_name: "some-account",
|
||||||
)
|
account_key: "some-key",
|
||||||
```
|
}
|
||||||
|
)
|
||||||
|
```
|
||||||
|
<!-- skip-test -->
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
import lancedb
|
||||||
|
async_db = await lancedb.connect_async(
|
||||||
|
"az://my-container/my-database",
|
||||||
|
storage_options={
|
||||||
|
account_name: "some-account",
|
||||||
|
account_key: "some-key",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
=== "TypeScript"
|
=== "TypeScript"
|
||||||
|
|
||||||
|
|||||||
@@ -12,10 +12,18 @@ Initialize a LanceDB connection and create a table
|
|||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
```python
|
=== "Sync API"
|
||||||
import lancedb
|
|
||||||
db = lancedb.connect("./.lancedb")
|
```python
|
||||||
```
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:connect"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:connect_async"
|
||||||
|
```
|
||||||
|
|
||||||
LanceDB allows ingesting data from various sources - `dict`, `list[dict]`, `pd.DataFrame`, `pa.Table` or a `Iterator[pa.RecordBatch]`. Let's take a look at some of the these.
|
LanceDB allows ingesting data from various sources - `dict`, `list[dict]`, `pd.DataFrame`, `pa.Table` or a `Iterator[pa.RecordBatch]`. Let's take a look at some of the these.
|
||||||
|
|
||||||
@@ -47,18 +55,16 @@ Initialize a LanceDB connection and create a table
|
|||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
```python
|
=== "Sync API"
|
||||||
import lancedb
|
|
||||||
|
|
||||||
db = lancedb.connect("./.lancedb")
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
data = [{"vector": [1.1, 1.2], "lat": 45.5, "long": -122.7},
|
```python
|
||||||
{"vector": [0.2, 1.8], "lat": 40.1, "long": -74.1}]
|
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_async"
|
||||||
|
```
|
||||||
db.create_table("my_table", data)
|
|
||||||
|
|
||||||
db["my_table"].head()
|
|
||||||
```
|
|
||||||
|
|
||||||
!!! info "Note"
|
!!! info "Note"
|
||||||
If the table already exists, LanceDB will raise an error by default.
|
If the table already exists, LanceDB will raise an error by default.
|
||||||
@@ -67,16 +73,30 @@ Initialize a LanceDB connection and create a table
|
|||||||
and the table exists, then it simply opens the existing table. The data you
|
and the table exists, then it simply opens the existing table. The data you
|
||||||
passed in will NOT be appended to the table in that case.
|
passed in will NOT be appended to the table in that case.
|
||||||
|
|
||||||
```python
|
=== "Sync API"
|
||||||
db.create_table("name", data, exist_ok=True)
|
|
||||||
```
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_exist_ok"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_async_exist_ok"
|
||||||
|
```
|
||||||
|
|
||||||
Sometimes you want to make sure that you start fresh. If you want to
|
Sometimes you want to make sure that you start fresh. If you want to
|
||||||
overwrite the table, you can pass in mode="overwrite" to the createTable function.
|
overwrite the table, you can pass in mode="overwrite" to the createTable function.
|
||||||
|
|
||||||
```python
|
=== "Sync API"
|
||||||
db.create_table("name", data, mode="overwrite")
|
|
||||||
```
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_overwrite"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_async_overwrite"
|
||||||
|
```
|
||||||
|
|
||||||
=== "Typescript[^1]"
|
=== "Typescript[^1]"
|
||||||
You can create a LanceDB table in JavaScript using an array of records as follows.
|
You can create a LanceDB table in JavaScript using an array of records as follows.
|
||||||
@@ -146,34 +166,37 @@ Initialize a LanceDB connection and create a table
|
|||||||
|
|
||||||
### From a Pandas DataFrame
|
### From a Pandas DataFrame
|
||||||
|
|
||||||
```python
|
|
||||||
import pandas as pd
|
|
||||||
|
|
||||||
data = pd.DataFrame({
|
=== "Sync API"
|
||||||
"vector": [[1.1, 1.2, 1.3, 1.4], [0.2, 1.8, 0.4, 3.6]],
|
|
||||||
"lat": [45.5, 40.1],
|
|
||||||
"long": [-122.7, -74.1]
|
|
||||||
})
|
|
||||||
|
|
||||||
db.create_table("my_table", data)
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pandas"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_from_pandas"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
db["my_table"].head()
|
```python
|
||||||
```
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pandas"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_async_from_pandas"
|
||||||
|
```
|
||||||
|
|
||||||
!!! info "Note"
|
!!! info "Note"
|
||||||
Data is converted to Arrow before being written to disk. For maximum control over how data is saved, either provide the PyArrow schema to convert to or else provide a PyArrow Table directly.
|
Data is converted to Arrow before being written to disk. For maximum control over how data is saved, either provide the PyArrow schema to convert to or else provide a PyArrow Table directly.
|
||||||
|
|
||||||
The **`vector`** column needs to be a [Vector](../python/pydantic.md#vector-field) (defined as [pyarrow.FixedSizeList](https://arrow.apache.org/docs/python/generated/pyarrow.list_.html)) type.
|
The **`vector`** column needs to be a [Vector](../python/pydantic.md#vector-field) (defined as [pyarrow.FixedSizeList](https://arrow.apache.org/docs/python/generated/pyarrow.list_.html)) type.
|
||||||
|
|
||||||
```python
|
=== "Sync API"
|
||||||
custom_schema = pa.schema([
|
|
||||||
pa.field("vector", pa.list_(pa.float32(), 4)),
|
|
||||||
pa.field("lat", pa.float32()),
|
|
||||||
pa.field("long", pa.float32())
|
|
||||||
])
|
|
||||||
|
|
||||||
table = db.create_table("my_table", data, schema=custom_schema)
|
```python
|
||||||
```
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pyarrow"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_custom_schema"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pyarrow"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_async_custom_schema"
|
||||||
|
```
|
||||||
|
|
||||||
### From a Polars DataFrame
|
### From a Polars DataFrame
|
||||||
|
|
||||||
@@ -182,45 +205,38 @@ written in Rust. Just like in Pandas, the Polars integration is enabled by PyArr
|
|||||||
under the hood. A deeper integration between LanceDB Tables and Polars DataFrames
|
under the hood. A deeper integration between LanceDB Tables and Polars DataFrames
|
||||||
is on the way.
|
is on the way.
|
||||||
|
|
||||||
```python
|
=== "Sync API"
|
||||||
import polars as pl
|
|
||||||
|
|
||||||
data = pl.DataFrame({
|
```python
|
||||||
"vector": [[3.1, 4.1], [5.9, 26.5]],
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-polars"
|
||||||
"item": ["foo", "bar"],
|
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_from_polars"
|
||||||
"price": [10.0, 20.0]
|
```
|
||||||
})
|
=== "Async API"
|
||||||
table = db.create_table("pl_table", data=data)
|
|
||||||
```
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-polars"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_async_from_polars"
|
||||||
|
```
|
||||||
|
|
||||||
### From an Arrow Table
|
### From an Arrow Table
|
||||||
You can also create LanceDB tables directly from Arrow tables.
|
You can also create LanceDB tables directly from Arrow tables.
|
||||||
LanceDB supports float16 data type!
|
LanceDB supports float16 data type!
|
||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import pyarrows as pa
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pyarrow"
|
||||||
import numpy as np
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-numpy"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_from_arrow_table"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
dim = 16
|
```python
|
||||||
total = 2
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-polars"
|
||||||
schema = pa.schema(
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-numpy"
|
||||||
[
|
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_async_from_arrow_table"
|
||||||
pa.field("vector", pa.list_(pa.float16(), dim)),
|
```
|
||||||
pa.field("text", pa.string())
|
|
||||||
]
|
|
||||||
)
|
|
||||||
data = pa.Table.from_arrays(
|
|
||||||
[
|
|
||||||
pa.array([np.random.randn(dim).astype(np.float16) for _ in range(total)],
|
|
||||||
pa.list_(pa.float16(), dim)),
|
|
||||||
pa.array(["foo", "bar"])
|
|
||||||
],
|
|
||||||
["vector", "text"],
|
|
||||||
)
|
|
||||||
tbl = db.create_table("f16_tbl", data, schema=schema)
|
|
||||||
```
|
|
||||||
|
|
||||||
=== "Typescript[^1]"
|
=== "Typescript[^1]"
|
||||||
|
|
||||||
@@ -250,25 +266,22 @@ can be configured with the vector dimensions. It is also important to note that
|
|||||||
LanceDB only understands subclasses of `lancedb.pydantic.LanceModel`
|
LanceDB only understands subclasses of `lancedb.pydantic.LanceModel`
|
||||||
(which itself derives from `pydantic.BaseModel`).
|
(which itself derives from `pydantic.BaseModel`).
|
||||||
|
|
||||||
```python
|
=== "Sync API"
|
||||||
from lancedb.pydantic import Vector, LanceModel
|
|
||||||
|
|
||||||
class Content(LanceModel):
|
```python
|
||||||
movie_id: int
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb-pydantic"
|
||||||
vector: Vector(128)
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pyarrow"
|
||||||
genres: str
|
--8<-- "python/python/tests/docs/test_guide_tables.py:class-Content"
|
||||||
title: str
|
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_from_pydantic"
|
||||||
imdb_id: int
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
@property
|
```python
|
||||||
def imdb_url(self) -> str:
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb-pydantic"
|
||||||
return f"https://www.imdb.com/title/tt{self.imdb_id}"
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pyarrow"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:class-Content"
|
||||||
import pyarrow as pa
|
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_async_from_pydantic"
|
||||||
db = lancedb.connect("~/.lancedb")
|
```
|
||||||
table_name = "movielens_small"
|
|
||||||
table = db.create_table(table_name, schema=Content)
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Nested schemas
|
#### Nested schemas
|
||||||
|
|
||||||
@@ -277,22 +290,24 @@ For example, you may want to store the document string
|
|||||||
and the document source name as a nested Document object:
|
and the document source name as a nested Document object:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
class Document(BaseModel):
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pydantic-basemodel"
|
||||||
content: str
|
--8<-- "python/python/tests/docs/test_guide_tables.py:class-Document"
|
||||||
source: str
|
|
||||||
```
|
```
|
||||||
|
|
||||||
This can be used as the type of a LanceDB table column:
|
This can be used as the type of a LanceDB table column:
|
||||||
|
|
||||||
```python
|
=== "Sync API"
|
||||||
class NestedSchema(LanceModel):
|
|
||||||
id: str
|
|
||||||
vector: Vector(1536)
|
|
||||||
document: Document
|
|
||||||
|
|
||||||
tbl = db.create_table("nested_table", schema=NestedSchema, mode="overwrite")
|
```python
|
||||||
```
|
--8<-- "python/python/tests/docs/test_guide_tables.py:class-NestedSchema"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_nested_schema"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:class-NestedSchema"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_async_nested_schema"
|
||||||
|
```
|
||||||
This creates a struct column called "document" that has two subfields
|
This creates a struct column called "document" that has two subfields
|
||||||
called "content" and "source":
|
called "content" and "source":
|
||||||
|
|
||||||
@@ -356,29 +371,20 @@ LanceDB additionally supports PyArrow's `RecordBatch` Iterators or other generat
|
|||||||
|
|
||||||
Here's an example using using `RecordBatch` iterator for creating tables.
|
Here's an example using using `RecordBatch` iterator for creating tables.
|
||||||
|
|
||||||
```python
|
=== "Sync API"
|
||||||
import pyarrow as pa
|
|
||||||
|
|
||||||
def make_batches():
|
```python
|
||||||
for i in range(5):
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pyarrow"
|
||||||
yield pa.RecordBatch.from_arrays(
|
--8<-- "python/python/tests/docs/test_guide_tables.py:make_batches"
|
||||||
[
|
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_from_batch"
|
||||||
pa.array([[3.1, 4.1, 5.1, 6.1], [5.9, 26.5, 4.7, 32.8]],
|
```
|
||||||
pa.list_(pa.float32(), 4)),
|
=== "Async API"
|
||||||
pa.array(["foo", "bar"]),
|
|
||||||
pa.array([10.0, 20.0]),
|
|
||||||
],
|
|
||||||
["vector", "item", "price"],
|
|
||||||
)
|
|
||||||
|
|
||||||
schema = pa.schema([
|
```python
|
||||||
pa.field("vector", pa.list_(pa.float32(), 4)),
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pyarrow"
|
||||||
pa.field("item", pa.utf8()),
|
--8<-- "python/python/tests/docs/test_guide_tables.py:make_batches"
|
||||||
pa.field("price", pa.float32()),
|
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_async_from_batch"
|
||||||
])
|
```
|
||||||
|
|
||||||
db.create_table("batched_tale", make_batches(), schema=schema)
|
|
||||||
```
|
|
||||||
|
|
||||||
You can also use iterators of other types like Pandas DataFrame or Pylists directly in the above example.
|
You can also use iterators of other types like Pandas DataFrame or Pylists directly in the above example.
|
||||||
|
|
||||||
@@ -387,15 +393,29 @@ You can also use iterators of other types like Pandas DataFrame or Pylists direc
|
|||||||
=== "Python"
|
=== "Python"
|
||||||
If you forget the name of your table, you can always get a listing of all table names.
|
If you forget the name of your table, you can always get a listing of all table names.
|
||||||
|
|
||||||
```python
|
=== "Sync API"
|
||||||
print(db.table_names())
|
|
||||||
```
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:list_tables"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:list_tables_async"
|
||||||
|
```
|
||||||
|
|
||||||
Then, you can open any existing tables.
|
Then, you can open any existing tables.
|
||||||
|
|
||||||
```python
|
=== "Sync API"
|
||||||
tbl = db.open_table("my_table")
|
|
||||||
```
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:open_table"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:open_table_async"
|
||||||
|
```
|
||||||
|
|
||||||
=== "Typescript[^1]"
|
=== "Typescript[^1]"
|
||||||
|
|
||||||
@@ -418,35 +438,41 @@ You can create an empty table for scenarios where you want to add data to the ta
|
|||||||
|
|
||||||
|
|
||||||
An empty table can be initialized via a PyArrow schema.
|
An empty table can be initialized via a PyArrow schema.
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import lancedb
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb"
|
||||||
import pyarrow as pa
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pyarrow"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:create_empty_table"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
schema = pa.schema(
|
```python
|
||||||
[
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb"
|
||||||
pa.field("vector", pa.list_(pa.float32(), 2)),
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pyarrow"
|
||||||
pa.field("item", pa.string()),
|
--8<-- "python/python/tests/docs/test_guide_tables.py:create_empty_table_async"
|
||||||
pa.field("price", pa.float32()),
|
```
|
||||||
])
|
|
||||||
tbl = db.create_table("empty_table_add", schema=schema)
|
|
||||||
```
|
|
||||||
|
|
||||||
Alternatively, you can also use Pydantic to specify the schema for the empty table. Note that we do not
|
Alternatively, you can also use Pydantic to specify the schema for the empty table. Note that we do not
|
||||||
directly import `pydantic` but instead use `lancedb.pydantic` which is a subclass of `pydantic.BaseModel`
|
directly import `pydantic` but instead use `lancedb.pydantic` which is a subclass of `pydantic.BaseModel`
|
||||||
that has been extended to support LanceDB specific types like `Vector`.
|
that has been extended to support LanceDB specific types like `Vector`.
|
||||||
|
|
||||||
```python
|
=== "Sync API"
|
||||||
import lancedb
|
|
||||||
from lancedb.pydantic import LanceModel, vector
|
|
||||||
|
|
||||||
class Item(LanceModel):
|
```python
|
||||||
vector: Vector(2)
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb"
|
||||||
item: str
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb-pydantic"
|
||||||
price: float
|
--8<-- "python/python/tests/docs/test_guide_tables.py:class-Item"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:create_empty_table_pydantic"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
tbl = db.create_table("empty_table_add", schema=Item.to_arrow_schema())
|
```python
|
||||||
```
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb-pydantic"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:class-Item"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:create_empty_table_async_pydantic"
|
||||||
|
```
|
||||||
|
|
||||||
Once the empty table has been created, you can add data to it via the various methods listed in the [Adding to a table](#adding-to-a-table) section.
|
Once the empty table has been created, you can add data to it via the various methods listed in the [Adding to a table](#adding-to-a-table) section.
|
||||||
|
|
||||||
@@ -473,86 +499,96 @@ After a table has been created, you can always add more data to it using the `ad
|
|||||||
|
|
||||||
### Add a Pandas DataFrame
|
### Add a Pandas DataFrame
|
||||||
|
|
||||||
```python
|
=== "Sync API"
|
||||||
df = pd.DataFrame({
|
|
||||||
"vector": [[1.3, 1.4], [9.5, 56.2]], "item": ["banana", "apple"], "price": [5.0, 7.0]
|
```python
|
||||||
})
|
--8<-- "python/python/tests/docs/test_guide_tables.py:add_table_from_pandas"
|
||||||
tbl.add(df)
|
```
|
||||||
```
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:add_table_async_from_pandas"
|
||||||
|
```
|
||||||
|
|
||||||
### Add a Polars DataFrame
|
### Add a Polars DataFrame
|
||||||
|
|
||||||
```python
|
=== "Sync API"
|
||||||
df = pl.DataFrame({
|
|
||||||
"vector": [[1.3, 1.4], [9.5, 56.2]], "item": ["banana", "apple"], "price": [5.0, 7.0]
|
```python
|
||||||
})
|
--8<-- "python/python/tests/docs/test_guide_tables.py:add_table_from_polars"
|
||||||
tbl.add(df)
|
```
|
||||||
```
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:add_table_async_from_polars"
|
||||||
|
```
|
||||||
|
|
||||||
### Add an Iterator
|
### Add an Iterator
|
||||||
|
|
||||||
You can also add a large dataset batch in one go using Iterator of any supported data types.
|
You can also add a large dataset batch in one go using Iterator of any supported data types.
|
||||||
|
|
||||||
```python
|
=== "Sync API"
|
||||||
def make_batches():
|
|
||||||
for i in range(5):
|
```python
|
||||||
yield [
|
--8<-- "python/python/tests/docs/test_guide_tables.py:make_batches_for_add"
|
||||||
{"vector": [3.1, 4.1], "item": "peach", "price": 6.0},
|
--8<-- "python/python/tests/docs/test_guide_tables.py:add_table_from_batch"
|
||||||
{"vector": [5.9, 26.5], "item": "pear", "price": 5.0}
|
```
|
||||||
]
|
=== "Async API"
|
||||||
tbl.add(make_batches())
|
|
||||||
```
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:make_batches_for_add"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:add_table_async_from_batch"
|
||||||
|
```
|
||||||
|
|
||||||
### Add a PyArrow table
|
### Add a PyArrow table
|
||||||
|
|
||||||
If you have data coming in as a PyArrow table, you can add it directly to the LanceDB table.
|
If you have data coming in as a PyArrow table, you can add it directly to the LanceDB table.
|
||||||
|
|
||||||
```python
|
=== "Sync API"
|
||||||
pa_table = pa.Table.from_arrays(
|
|
||||||
[
|
|
||||||
pa.array([[9.1, 6.7], [9.9, 31.2]],
|
|
||||||
pa.list_(pa.float32(), 2)),
|
|
||||||
pa.array(["mango", "orange"]),
|
|
||||||
pa.array([7.0, 4.0]),
|
|
||||||
],
|
|
||||||
["vector", "item", "price"],
|
|
||||||
)
|
|
||||||
|
|
||||||
tbl.add(pa_table)
|
```python
|
||||||
```
|
--8<-- "python/python/tests/docs/test_guide_tables.py:add_table_from_pyarrow"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:add_table_async_from_pyarrow"
|
||||||
|
```
|
||||||
|
|
||||||
### Add a Pydantic Model
|
### Add a Pydantic Model
|
||||||
|
|
||||||
Assuming that a table has been created with the correct schema as shown [above](#creating-empty-table), you can add data items that are valid Pydantic models to the table.
|
Assuming that a table has been created with the correct schema as shown [above](#creating-empty-table), you can add data items that are valid Pydantic models to the table.
|
||||||
|
|
||||||
```python
|
=== "Sync API"
|
||||||
pydantic_model_items = [
|
|
||||||
Item(vector=[8.1, 4.7], item="pineapple", price=10.0),
|
|
||||||
Item(vector=[6.9, 9.3], item="avocado", price=9.0)
|
|
||||||
]
|
|
||||||
|
|
||||||
tbl.add(pydantic_model_items)
|
```python
|
||||||
```
|
--8<-- "python/python/tests/docs/test_guide_tables.py:add_table_from_pydantic"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:add_table_async_from_pydantic"
|
||||||
|
```
|
||||||
|
|
||||||
??? "Ingesting Pydantic models with LanceDB embedding API"
|
??? "Ingesting Pydantic models with LanceDB embedding API"
|
||||||
When using LanceDB's embedding API, you can add Pydantic models directly to the table. LanceDB will automatically convert the `vector` field to a vector before adding it to the table. You need to specify the default value of `vector` field as None to allow LanceDB to automatically vectorize the data.
|
When using LanceDB's embedding API, you can add Pydantic models directly to the table. LanceDB will automatically convert the `vector` field to a vector before adding it to the table. You need to specify the default value of `vector` field as None to allow LanceDB to automatically vectorize the data.
|
||||||
|
|
||||||
```python
|
=== "Sync API"
|
||||||
import lancedb
|
|
||||||
from lancedb.pydantic import LanceModel, Vector
|
|
||||||
from lancedb.embeddings import get_registry
|
|
||||||
|
|
||||||
db = lancedb.connect("~/tmp")
|
```python
|
||||||
embed_fcn = get_registry().get("huggingface").create(name="BAAI/bge-small-en-v1.5")
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb-pydantic"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-embeddings"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_with_embedding"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
class Schema(LanceModel):
|
```python
|
||||||
text: str = embed_fcn.SourceField()
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb"
|
||||||
vector: Vector(embed_fcn.ndims()) = embed_fcn.VectorField(default=None)
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb-pydantic"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-embeddings"
|
||||||
tbl = db.create_table("my_table", schema=Schema, mode="overwrite")
|
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_async_with_embedding"
|
||||||
models = [Schema(text="hello"), Schema(text="world")]
|
```
|
||||||
tbl.add(models)
|
|
||||||
```
|
|
||||||
|
|
||||||
=== "Typescript[^1]"
|
=== "Typescript[^1]"
|
||||||
|
|
||||||
@@ -571,44 +607,41 @@ Use the `delete()` method on tables to delete rows from a table. To choose which
|
|||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
```python
|
=== "Sync API"
|
||||||
tbl.delete('item = "fizz"')
|
|
||||||
```
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:delete_row"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:delete_row_async"
|
||||||
|
```
|
||||||
|
|
||||||
### Deleting row with specific column value
|
### Deleting row with specific column value
|
||||||
|
|
||||||
```python
|
=== "Sync API"
|
||||||
import lancedb
|
|
||||||
|
|
||||||
data = [{"x": 1, "vector": [1, 2]},
|
```python
|
||||||
{"x": 2, "vector": [3, 4]},
|
--8<-- "python/python/tests/docs/test_guide_tables.py:delete_specific_row"
|
||||||
{"x": 3, "vector": [5, 6]}]
|
```
|
||||||
db = lancedb.connect("./.lancedb")
|
=== "Async API"
|
||||||
table = db.create_table("my_table", data)
|
|
||||||
table.to_pandas()
|
|
||||||
# x vector
|
|
||||||
# 0 1 [1.0, 2.0]
|
|
||||||
# 1 2 [3.0, 4.0]
|
|
||||||
# 2 3 [5.0, 6.0]
|
|
||||||
|
|
||||||
table.delete("x = 2")
|
```python
|
||||||
table.to_pandas()
|
--8<-- "python/python/tests/docs/test_guide_tables.py:delete_specific_row_async"
|
||||||
# x vector
|
```
|
||||||
# 0 1 [1.0, 2.0]
|
|
||||||
# 1 3 [5.0, 6.0]
|
|
||||||
```
|
|
||||||
|
|
||||||
### Delete from a list of values
|
### Delete from a list of values
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
to_remove = [1, 5]
|
--8<-- "python/python/tests/docs/test_guide_tables.py:delete_list_values"
|
||||||
to_remove = ", ".join(str(v) for v in to_remove)
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
table.delete(f"x IN ({to_remove})")
|
```python
|
||||||
table.to_pandas()
|
--8<-- "python/python/tests/docs/test_guide_tables.py:delete_list_values_async"
|
||||||
# x vector
|
```
|
||||||
# 0 3 [5.0, 6.0]
|
|
||||||
```
|
|
||||||
|
|
||||||
=== "Typescript[^1]"
|
=== "Typescript[^1]"
|
||||||
|
|
||||||
@@ -659,27 +692,20 @@ This can be used to update zero to all rows depending on how many rows match the
|
|||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
API Reference: [lancedb.table.Table.update][]
|
API Reference: [lancedb.table.Table.update][]
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import lancedb
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb"
|
||||||
import pandas as pd
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pandas"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:update_table"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
# Create a lancedb connection
|
```python
|
||||||
db = lancedb.connect("./.lancedb")
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pandas"
|
||||||
# Create a table from a pandas DataFrame
|
--8<-- "python/python/tests/docs/test_guide_tables.py:update_table_async"
|
||||||
data = pd.DataFrame({"x": [1, 2, 3], "vector": [[1, 2], [3, 4], [5, 6]]})
|
```
|
||||||
table = db.create_table("my_table", data)
|
|
||||||
|
|
||||||
# Update the table where x = 2
|
|
||||||
table.update(where="x = 2", values={"vector": [10, 10]})
|
|
||||||
|
|
||||||
# Get the updated table as a pandas DataFrame
|
|
||||||
df = table.to_pandas()
|
|
||||||
|
|
||||||
# Print the DataFrame
|
|
||||||
print(df)
|
|
||||||
```
|
|
||||||
|
|
||||||
Output
|
Output
|
||||||
```shell
|
```shell
|
||||||
@@ -734,13 +760,16 @@ This can be used to update zero to all rows depending on how many rows match the
|
|||||||
The `values` parameter is used to provide the new values for the columns as literal values. You can also use the `values_sql` / `valuesSql` parameter to provide SQL expressions for the new values. For example, you can use `values_sql="x + 1"` to increment the value of the `x` column by 1.
|
The `values` parameter is used to provide the new values for the columns as literal values. You can also use the `values_sql` / `valuesSql` parameter to provide SQL expressions for the new values. For example, you can use `values_sql="x + 1"` to increment the value of the `x` column by 1.
|
||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
# Update the table where x = 2
|
--8<-- "python/python/tests/docs/test_guide_tables.py:update_table_sql"
|
||||||
table.update(valuesSql={"x": "x + 1"})
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
print(table.to_pandas())
|
```python
|
||||||
```
|
--8<-- "python/python/tests/docs/test_guide_tables.py:update_table_sql_async"
|
||||||
|
```
|
||||||
|
|
||||||
Output
|
Output
|
||||||
```shell
|
```shell
|
||||||
@@ -771,11 +800,16 @@ This can be used to update zero to all rows depending on how many rows match the
|
|||||||
Use the `drop_table()` method on the database to remove a table.
|
Use the `drop_table()` method on the database to remove a table.
|
||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
--8<-- "python/python/tests/docs/test_basic.py:drop_table"
|
--8<-- "python/python/tests/docs/test_basic.py:drop_table"
|
||||||
--8<-- "python/python/tests/docs/test_basic.py:drop_table_async"
|
```
|
||||||
```
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_basic.py:drop_table_async"
|
||||||
|
```
|
||||||
|
|
||||||
This permanently removes the table and is not recoverable, unlike deleting rows.
|
This permanently removes the table and is not recoverable, unlike deleting rows.
|
||||||
By default, if the table does not exist an exception is raised. To suppress this,
|
By default, if the table does not exist an exception is raised. To suppress this,
|
||||||
@@ -804,13 +838,21 @@ a table:
|
|||||||
|
|
||||||
You can add new columns to the table with the `add_columns` method. New columns
|
You can add new columns to the table with the `add_columns` method. New columns
|
||||||
are filled with values based on a SQL expression. For example, you can add a new
|
are filled with values based on a SQL expression. For example, you can add a new
|
||||||
column `y` to the table and fill it with the value of `x + 1`.
|
column `y` to the table, fill it with the value of `x * 2` and set the expected
|
||||||
|
data type for it.
|
||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
```python
|
=== "Sync API"
|
||||||
table.add_columns({"double_price": "price * 2"})
|
|
||||||
```
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_basic.py:add_columns"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_basic.py:add_columns_async"
|
||||||
|
```
|
||||||
**API Reference:** [lancedb.table.Table.add_columns][]
|
**API Reference:** [lancedb.table.Table.add_columns][]
|
||||||
|
|
||||||
=== "Typescript"
|
=== "Typescript"
|
||||||
@@ -847,11 +889,18 @@ rewriting the column, which can be a heavy operation.
|
|||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
```python
|
=== "Sync API"
|
||||||
import pyarrow as pa
|
|
||||||
table.alter_column({"path": "double_price", "rename": "dbl_price",
|
```python
|
||||||
"data_type": pa.float32(), "nullable": False})
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pyarrow"
|
||||||
```
|
--8<-- "python/python/tests/docs/test_basic.py:alter_columns"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pyarrow"
|
||||||
|
--8<-- "python/python/tests/docs/test_basic.py:alter_columns_async"
|
||||||
|
```
|
||||||
**API Reference:** [lancedb.table.Table.alter_columns][]
|
**API Reference:** [lancedb.table.Table.alter_columns][]
|
||||||
|
|
||||||
=== "Typescript"
|
=== "Typescript"
|
||||||
@@ -872,9 +921,16 @@ will remove the column from the schema.
|
|||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
```python
|
=== "Sync API"
|
||||||
table.drop_columns(["dbl_price"])
|
|
||||||
```
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_basic.py:drop_columns"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_basic.py:drop_columns_async"
|
||||||
|
```
|
||||||
**API Reference:** [lancedb.table.Table.drop_columns][]
|
**API Reference:** [lancedb.table.Table.drop_columns][]
|
||||||
|
|
||||||
=== "Typescript"
|
=== "Typescript"
|
||||||
@@ -925,31 +981,46 @@ There are three possible settings for `read_consistency_interval`:
|
|||||||
|
|
||||||
To set strong consistency, use `timedelta(0)`:
|
To set strong consistency, use `timedelta(0)`:
|
||||||
|
|
||||||
```python
|
=== "Sync API"
|
||||||
from datetime import timedelta
|
|
||||||
db = lancedb.connect("./.lancedb",. read_consistency_interval=timedelta(0))
|
```python
|
||||||
table = db.open_table("my_table")
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-datetime"
|
||||||
```
|
--8<-- "python/python/tests/docs/test_guide_tables.py:table_strong_consistency"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-datetime"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:table_async_strong_consistency"
|
||||||
|
```
|
||||||
|
|
||||||
For eventual consistency, use a custom `timedelta`:
|
For eventual consistency, use a custom `timedelta`:
|
||||||
|
|
||||||
```python
|
=== "Sync API"
|
||||||
from datetime import timedelta
|
|
||||||
db = lancedb.connect("./.lancedb", read_consistency_interval=timedelta(seconds=5))
|
```python
|
||||||
table = db.open_table("my_table")
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-datetime"
|
||||||
```
|
--8<-- "python/python/tests/docs/test_guide_tables.py:table_eventual_consistency"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-datetime"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:table_async_eventual_consistency"
|
||||||
|
```
|
||||||
|
|
||||||
By default, a `Table` will never check for updates from other writers. To manually check for updates you can use `checkout_latest`:
|
By default, a `Table` will never check for updates from other writers. To manually check for updates you can use `checkout_latest`:
|
||||||
|
|
||||||
```python
|
=== "Sync API"
|
||||||
db = lancedb.connect("./.lancedb")
|
|
||||||
table = db.open_table("my_table")
|
|
||||||
|
|
||||||
# (Other writes happen to my_table from another process)
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:table_checkout_latest"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
# Check for updates
|
```python
|
||||||
table.checkout_latest()
|
--8<-- "python/python/tests/docs/test_guide_tables.py:table_async_checkout_latest"
|
||||||
```
|
```
|
||||||
|
|
||||||
=== "Typescript[^1]"
|
=== "Typescript[^1]"
|
||||||
|
|
||||||
@@ -957,14 +1028,14 @@ There are three possible settings for `read_consistency_interval`:
|
|||||||
|
|
||||||
```ts
|
```ts
|
||||||
const db = await lancedb.connect({ uri: "./.lancedb", readConsistencyInterval: 0 });
|
const db = await lancedb.connect({ uri: "./.lancedb", readConsistencyInterval: 0 });
|
||||||
const table = await db.openTable("my_table");
|
const tbl = await db.openTable("my_table");
|
||||||
```
|
```
|
||||||
|
|
||||||
For eventual consistency, specify the update interval as seconds:
|
For eventual consistency, specify the update interval as seconds:
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
const db = await lancedb.connect({ uri: "./.lancedb", readConsistencyInterval: 5 });
|
const db = await lancedb.connect({ uri: "./.lancedb", readConsistencyInterval: 5 });
|
||||||
const table = await db.openTable("my_table");
|
const tbl = await db.openTable("my_table");
|
||||||
```
|
```
|
||||||
|
|
||||||
<!-- Node doesn't yet support the version time travel: https://github.com/lancedb/lancedb/issues/1007
|
<!-- Node doesn't yet support the version time travel: https://github.com/lancedb/lancedb/issues/1007
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
## Improving retriever performance
|
## Improving retriever performance
|
||||||
|
|
||||||
Try it yourself - <a href="https://colab.research.google.com/github/lancedb/lancedb/blob/main/docs/src/notebooks/lancedb_reranking.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a><br/>
|
Try it yourself: <a href="https://colab.research.google.com/github/lancedb/lancedb/blob/main/docs/src/notebooks/lancedb_reranking.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a><br/>
|
||||||
|
|
||||||
VectorDBs are used as retreivers in recommender or chatbot-based systems for retrieving relevant data based on user queries. For example, retriever is a critical component of Retrieval Augmented Generation (RAG) acrhitectures. In this section, we will discuss how to improve the performance of retrievers.
|
VectorDBs are used as retrievers in recommender or chatbot-based systems for retrieving relevant data based on user queries. For example, retrievers are a critical component of Retrieval Augmented Generation (RAG) acrhitectures. In this section, we will discuss how to improve the performance of retrievers.
|
||||||
|
|
||||||
There are serveral ways to improve the performance of retrievers. Some of the common techniques are:
|
There are serveral ways to improve the performance of retrievers. Some of the common techniques are:
|
||||||
|
|
||||||
@@ -19,7 +19,7 @@ Using different embedding models is something that's very specific to the use ca
|
|||||||
|
|
||||||
|
|
||||||
## The dataset
|
## The dataset
|
||||||
We'll be using a QA dataset generated using a LLama2 review paper. The dataset contains 221 query, context and answer triplets. The queries and answers are generated using GPT-4 based on a given query. Full script used to generate the dataset can be found on this [repo](https://github.com/lancedb/ragged). It can be downloaded from [here](https://github.com/AyushExel/assets/blob/main/data_qa.csv)
|
We'll be using a QA dataset generated using a LLama2 review paper. The dataset contains 221 query, context and answer triplets. The queries and answers are generated using GPT-4 based on a given query. Full script used to generate the dataset can be found on this [repo](https://github.com/lancedb/ragged). It can be downloaded from [here](https://github.com/AyushExel/assets/blob/main/data_qa.csv).
|
||||||
|
|
||||||
### Using different query types
|
### Using different query types
|
||||||
Let's setup the embeddings and the dataset first. We'll use the LanceDB's `huggingface` embeddings integration for this guide.
|
Let's setup the embeddings and the dataset first. We'll use the LanceDB's `huggingface` embeddings integration for this guide.
|
||||||
@@ -45,14 +45,14 @@ table.add(df[["context"]].to_dict(orient="records"))
|
|||||||
queries = df["query"].tolist()
|
queries = df["query"].tolist()
|
||||||
```
|
```
|
||||||
|
|
||||||
Now that we have the dataset and embeddings table set up, here's how you can run different query types on the dataset.
|
Now that we have the dataset and embeddings table set up, here's how you can run different query types on the dataset:
|
||||||
|
|
||||||
* <b> Vector Search: </b>
|
* <b> Vector Search: </b>
|
||||||
|
|
||||||
```python
|
```python
|
||||||
table.search(quries[0], query_type="vector").limit(5).to_pandas()
|
table.search(quries[0], query_type="vector").limit(5).to_pandas()
|
||||||
```
|
```
|
||||||
By default, LanceDB uses vector search query type for searching and it automatically converts the input query to a vector before searching when using embedding API. So, the following statement is equivalent to the above statement.
|
By default, LanceDB uses vector search query type for searching and it automatically converts the input query to a vector before searching when using embedding API. So, the following statement is equivalent to the above statement:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
table.search(quries[0]).limit(5).to_pandas()
|
table.search(quries[0]).limit(5).to_pandas()
|
||||||
@@ -77,7 +77,7 @@ Now that we have the dataset and embeddings table set up, here's how you can run
|
|||||||
|
|
||||||
* <b> Hybrid Search: </b>
|
* <b> Hybrid Search: </b>
|
||||||
|
|
||||||
Hybrid search is a combination of vector and full-text search. Here's how you can run a hybrid search query on the dataset.
|
Hybrid search is a combination of vector and full-text search. Here's how you can run a hybrid search query on the dataset:
|
||||||
```python
|
```python
|
||||||
table.search(quries[0], query_type="hybrid").limit(5).to_pandas()
|
table.search(quries[0], query_type="hybrid").limit(5).to_pandas()
|
||||||
```
|
```
|
||||||
@@ -87,7 +87,7 @@ Now that we have the dataset and embeddings table set up, here's how you can run
|
|||||||
|
|
||||||
!!! note "Note"
|
!!! note "Note"
|
||||||
By default, it uses `LinearCombinationReranker` that combines the scores from vector and full-text search using a weighted linear combination. It is the simplest reranker implementation available in LanceDB. You can also use other rerankers like `CrossEncoderReranker` or `CohereReranker` for reranking the results.
|
By default, it uses `LinearCombinationReranker` that combines the scores from vector and full-text search using a weighted linear combination. It is the simplest reranker implementation available in LanceDB. You can also use other rerankers like `CrossEncoderReranker` or `CohereReranker` for reranking the results.
|
||||||
Learn more about rerankers [here](https://lancedb.github.io/lancedb/reranking/)
|
Learn more about rerankers [here](https://lancedb.github.io/lancedb/reranking/).
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
Continuing from the previous section, we can now rerank the results using more complex rerankers.
|
Continuing from the previous section, we can now rerank the results using more complex rerankers.
|
||||||
|
|
||||||
Try it yourself - <a href="https://colab.research.google.com/github/lancedb/lancedb/blob/main/docs/src/notebooks/lancedb_reranking.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a><br/>
|
Try it yourself: <a href="https://colab.research.google.com/github/lancedb/lancedb/blob/main/docs/src/notebooks/lancedb_reranking.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a><br/>
|
||||||
|
|
||||||
## Reranking search results
|
## Reranking search results
|
||||||
You can rerank any search results using a reranker. The syntax for reranking is as follows:
|
You can rerank any search results using a reranker. The syntax for reranking is as follows:
|
||||||
@@ -62,9 +62,6 @@ Let us take a look at the same datasets from the previous sections, using the sa
|
|||||||
| Reranked fts | 0.672 |
|
| Reranked fts | 0.672 |
|
||||||
| Hybrid | 0.759 |
|
| Hybrid | 0.759 |
|
||||||
|
|
||||||
### SQuAD Dataset
|
|
||||||
|
|
||||||
|
|
||||||
### Uber10K sec filing Dataset
|
### Uber10K sec filing Dataset
|
||||||
|
|
||||||
| Query Type | Hit-rate@5 |
|
| Query Type | Hit-rate@5 |
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
## Finetuning the Embedding Model
|
## Finetuning the Embedding Model
|
||||||
Try it yourself - <a href="https://colab.research.google.com/github/lancedb/lancedb/blob/main/docs/src/notebooks/embedding_tuner.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a><br/>
|
Try it yourself: <a href="https://colab.research.google.com/github/lancedb/lancedb/blob/main/docs/src/notebooks/embedding_tuner.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a><br/>
|
||||||
|
|
||||||
Another way to improve retriever performance is to fine-tune the embedding model itself. Fine-tuning the embedding model can help in learning better representations for the documents and queries in the dataset. This can be particularly useful when the dataset is very different from the pre-trained data used to train the embedding model.
|
Another way to improve retriever performance is to fine-tune the embedding model itself. Fine-tuning the embedding model can help in learning better representations for the documents and queries in the dataset. This can be particularly useful when the dataset is very different from the pre-trained data used to train the embedding model.
|
||||||
|
|
||||||
@@ -16,7 +16,7 @@ validation_df.to_csv("data_val.csv", index=False)
|
|||||||
You can use any tuning API to fine-tune embedding models. In this example, we'll utilise Llama-index as it also comes with utilities for synthetic data generation and training the model.
|
You can use any tuning API to fine-tune embedding models. In this example, we'll utilise Llama-index as it also comes with utilities for synthetic data generation and training the model.
|
||||||
|
|
||||||
|
|
||||||
Then parse the dataset as llama-index text nodes and generate synthetic QA pairs from each node.
|
We parse the dataset as llama-index text nodes and generate synthetic QA pairs from each node:
|
||||||
```python
|
```python
|
||||||
from llama_index.core.node_parser import SentenceSplitter
|
from llama_index.core.node_parser import SentenceSplitter
|
||||||
from llama_index.readers.file import PagedCSVReader
|
from llama_index.readers.file import PagedCSVReader
|
||||||
@@ -43,7 +43,7 @@ val_dataset = generate_qa_embedding_pairs(
|
|||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
Now we'll use `SentenceTransformersFinetuneEngine` engine to fine-tune the model. You can also use `sentence-transformers` or `transformers` library to fine-tune the model.
|
Now we'll use `SentenceTransformersFinetuneEngine` engine to fine-tune the model. You can also use `sentence-transformers` or `transformers` library to fine-tune the model:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from llama_index.finetuning import SentenceTransformersFinetuneEngine
|
from llama_index.finetuning import SentenceTransformersFinetuneEngine
|
||||||
@@ -57,7 +57,7 @@ finetune_engine = SentenceTransformersFinetuneEngine(
|
|||||||
finetune_engine.finetune()
|
finetune_engine.finetune()
|
||||||
embed_model = finetune_engine.get_finetuned_model()
|
embed_model = finetune_engine.get_finetuned_model()
|
||||||
```
|
```
|
||||||
This saves the fine tuned embedding model in `tuned_model` folder. This al
|
This saves the fine tuned embedding model in `tuned_model` folder.
|
||||||
|
|
||||||
# Evaluation results
|
# Evaluation results
|
||||||
In order to eval the retriever, you can either use this model to ingest the data into LanceDB directly or llama-index's LanceDB integration to create a `VectorStoreIndex` and use it as a retriever.
|
In order to eval the retriever, you can either use this model to ingest the data into LanceDB directly or llama-index's LanceDB integration to create a `VectorStoreIndex` and use it as a retriever.
|
||||||
|
|||||||
@@ -3,22 +3,22 @@
|
|||||||
Hybrid Search is a broad (often misused) term. It can mean anything from combining multiple methods for searching, to applying ranking methods to better sort the results. In this blog, we use the definition of "hybrid search" to mean using a combination of keyword-based and vector search.
|
Hybrid Search is a broad (often misused) term. It can mean anything from combining multiple methods for searching, to applying ranking methods to better sort the results. In this blog, we use the definition of "hybrid search" to mean using a combination of keyword-based and vector search.
|
||||||
|
|
||||||
## The challenge of (re)ranking search results
|
## The challenge of (re)ranking search results
|
||||||
Once you have a group of the most relevant search results from multiple search sources, you'd likely standardize the score and rank them accordingly. This process can also be seen as another independent step - reranking.
|
Once you have a group of the most relevant search results from multiple search sources, you'd likely standardize the score and rank them accordingly. This process can also be seen as another independent step: reranking.
|
||||||
There are two approaches for reranking search results from multiple sources.
|
There are two approaches for reranking search results from multiple sources.
|
||||||
|
|
||||||
* <b>Score-based</b>: Calculate final relevance scores based on a weighted linear combination of individual search algorithm scores. Example - Weighted linear combination of semantic search & keyword-based search results.
|
* <b>Score-based</b>: Calculate final relevance scores based on a weighted linear combination of individual search algorithm scores. Example: Weighted linear combination of semantic search & keyword-based search results.
|
||||||
|
|
||||||
* <b>Relevance-based</b>: Discards the existing scores and calculates the relevance of each search result - query pair. Example - Cross Encoder models
|
* <b>Relevance-based</b>: Discards the existing scores and calculates the relevance of each search result-query pair. Example: Cross Encoder models
|
||||||
|
|
||||||
Even though there are many strategies for reranking search results, none works for all cases. Moreover, evaluating them itself is a challenge. Also, reranking can be dataset, application specific so it's hard to generalize.
|
Even though there are many strategies for reranking search results, none works for all cases. Moreover, evaluating them itself is a challenge. Also, reranking can be dataset or application specific so it's hard to generalize.
|
||||||
|
|
||||||
### Example evaluation of hybrid search with Reranking
|
### Example evaluation of hybrid search with Reranking
|
||||||
|
|
||||||
Here's some evaluation numbers from experiment comparing these re-rankers on about 800 queries. It is modified version of an evaluation script from [llama-index](https://github.com/run-llama/finetune-embedding/blob/main/evaluate.ipynb) that measures hit-rate at top-k.
|
Here's some evaluation numbers from an experiment comparing these rerankers on about 800 queries. It is modified version of an evaluation script from [llama-index](https://github.com/run-llama/finetune-embedding/blob/main/evaluate.ipynb) that measures hit-rate at top-k.
|
||||||
|
|
||||||
<b> With OpenAI ada2 embedding </b>
|
<b> With OpenAI ada2 embedding </b>
|
||||||
|
|
||||||
Vector Search baseline - `0.64`
|
Vector Search baseline: `0.64`
|
||||||
|
|
||||||
| Reranker | Top-3 | Top-5 | Top-10 |
|
| Reranker | Top-3 | Top-5 | Top-10 |
|
||||||
| --- | --- | --- | --- |
|
| --- | --- | --- | --- |
|
||||||
@@ -33,7 +33,7 @@ Vector Search baseline - `0.64`
|
|||||||
|
|
||||||
<b> With OpenAI embedding-v3-small </b>
|
<b> With OpenAI embedding-v3-small </b>
|
||||||
|
|
||||||
Vector Search baseline - `0.59`
|
Vector Search baseline: `0.59`
|
||||||
|
|
||||||
| Reranker | Top-3 | Top-5 | Top-10 |
|
| Reranker | Top-3 | Top-5 | Top-10 |
|
||||||
| --- | --- | --- | --- |
|
| --- | --- | --- | --- |
|
||||||
|
|||||||
@@ -5,57 +5,46 @@ LanceDB supports both semantic and keyword-based search (also termed full-text s
|
|||||||
## Hybrid search in LanceDB
|
## Hybrid search in LanceDB
|
||||||
You can perform hybrid search in LanceDB by combining the results of semantic and full-text search via a reranking algorithm of your choice. LanceDB provides multiple rerankers out of the box. However, you can always write a custom reranker if your use case need more sophisticated logic .
|
You can perform hybrid search in LanceDB by combining the results of semantic and full-text search via a reranking algorithm of your choice. LanceDB provides multiple rerankers out of the box. However, you can always write a custom reranker if your use case need more sophisticated logic .
|
||||||
|
|
||||||
```python
|
=== "Sync API"
|
||||||
import os
|
|
||||||
|
|
||||||
import lancedb
|
```python
|
||||||
import openai
|
--8<-- "python/python/tests/docs/test_search.py:import-os"
|
||||||
from lancedb.embeddings import get_registry
|
--8<-- "python/python/tests/docs/test_search.py:import-openai"
|
||||||
from lancedb.pydantic import LanceModel, Vector
|
--8<-- "python/python/tests/docs/test_search.py:import-lancedb"
|
||||||
|
--8<-- "python/python/tests/docs/test_search.py:import-embeddings"
|
||||||
|
--8<-- "python/python/tests/docs/test_search.py:import-pydantic"
|
||||||
|
--8<-- "python/python/tests/docs/test_search.py:import-lancedb-fts"
|
||||||
|
--8<-- "python/python/tests/docs/test_search.py:import-openai-embeddings"
|
||||||
|
--8<-- "python/python/tests/docs/test_search.py:class-Documents"
|
||||||
|
--8<-- "python/python/tests/docs/test_search.py:basic_hybrid_search"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
db = lancedb.connect("~/.lancedb")
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_search.py:import-os"
|
||||||
|
--8<-- "python/python/tests/docs/test_search.py:import-openai"
|
||||||
|
--8<-- "python/python/tests/docs/test_search.py:import-lancedb"
|
||||||
|
--8<-- "python/python/tests/docs/test_search.py:import-embeddings"
|
||||||
|
--8<-- "python/python/tests/docs/test_search.py:import-pydantic"
|
||||||
|
--8<-- "python/python/tests/docs/test_search.py:import-lancedb-fts"
|
||||||
|
--8<-- "python/python/tests/docs/test_search.py:import-openai-embeddings"
|
||||||
|
--8<-- "python/python/tests/docs/test_search.py:class-Documents"
|
||||||
|
--8<-- "python/python/tests/docs/test_search.py:basic_hybrid_search_async"
|
||||||
|
```
|
||||||
|
|
||||||
# Ingest embedding function in LanceDB table
|
|
||||||
# Configuring the environment variable OPENAI_API_KEY
|
|
||||||
if "OPENAI_API_KEY" not in os.environ:
|
|
||||||
# OR set the key here as a variable
|
|
||||||
openai.api_key = "sk-..."
|
|
||||||
embeddings = get_registry().get("openai").create()
|
|
||||||
|
|
||||||
class Documents(LanceModel):
|
|
||||||
vector: Vector(embeddings.ndims()) = embeddings.VectorField()
|
|
||||||
text: str = embeddings.SourceField()
|
|
||||||
|
|
||||||
table = db.create_table("documents", schema=Documents)
|
|
||||||
|
|
||||||
data = [
|
|
||||||
{ "text": "rebel spaceships striking from a hidden base"},
|
|
||||||
{ "text": "have won their first victory against the evil Galactic Empire"},
|
|
||||||
{ "text": "during the battle rebel spies managed to steal secret plans"},
|
|
||||||
{ "text": "to the Empire's ultimate weapon the Death Star"}
|
|
||||||
]
|
|
||||||
|
|
||||||
# ingest docs with auto-vectorization
|
|
||||||
table.add(data)
|
|
||||||
|
|
||||||
# Create a fts index before the hybrid search
|
|
||||||
table.create_fts_index("text")
|
|
||||||
# hybrid search with default re-ranker
|
|
||||||
results = table.search("flower moon", query_type="hybrid").to_pandas()
|
|
||||||
```
|
|
||||||
!!! Note
|
!!! Note
|
||||||
You can also pass the vector and text query manually. This is useful if you're not using the embedding API or if you're using a separate embedder service.
|
You can also pass the vector and text query manually. This is useful if you're not using the embedding API or if you're using a separate embedder service.
|
||||||
### Explicitly passing the vector and text query
|
### Explicitly passing the vector and text query
|
||||||
```python
|
=== "Sync API"
|
||||||
vector_query = [0.1, 0.2, 0.3, 0.4, 0.5]
|
|
||||||
text_query = "flower moon"
|
|
||||||
results = table.search(query_type="hybrid")
|
|
||||||
.vector(vector_query)
|
|
||||||
.text(text_query)
|
|
||||||
.limit(5)
|
|
||||||
.to_pandas()
|
|
||||||
|
|
||||||
```
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_search.py:hybrid_search_pass_vector_text"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_search.py:hybrid_search_pass_vector_text_async"
|
||||||
|
```
|
||||||
|
|
||||||
By default, LanceDB uses `RRFReranker()`, which uses reciprocal rank fusion score, to combine and rerank the results of semantic and full-text search. You can customize the hyperparameters as needed or write your own custom reranker. Here's how you can use any of the available rerankers:
|
By default, LanceDB uses `RRFReranker()`, which uses reciprocal rank fusion score, to combine and rerank the results of semantic and full-text search. You can customize the hyperparameters as needed or write your own custom reranker. Here's how you can use any of the available rerankers:
|
||||||
|
|
||||||
@@ -68,7 +57,7 @@ By default, LanceDB uses `RRFReranker()`, which uses reciprocal rank fusion scor
|
|||||||
|
|
||||||
|
|
||||||
## Available Rerankers
|
## Available Rerankers
|
||||||
LanceDB provides a number of re-rankers out of the box. You can use any of these re-rankers by passing them to the `rerank()` method.
|
LanceDB provides a number of rerankers out of the box. You can use any of these rerankers by passing them to the `rerank()` method.
|
||||||
Go to [Rerankers](../reranking/index.md) to learn more about using the available rerankers and implementing custom rerankers.
|
Go to [Rerankers](../reranking/index.md) to learn more about using the available rerankers and implementing custom rerankers.
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1 +0,0 @@
|
|||||||
TypeDoc added this file to prevent GitHub Pages from using Jekyll. You can turn off this behavior by setting the `githubPages` option to false.
|
|
||||||
@@ -27,7 +27,9 @@ the underlying connection has been closed.
|
|||||||
|
|
||||||
### new Connection()
|
### new Connection()
|
||||||
|
|
||||||
> **new Connection**(): [`Connection`](Connection.md)
|
```ts
|
||||||
|
new Connection(): Connection
|
||||||
|
```
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -37,7 +39,9 @@ the underlying connection has been closed.
|
|||||||
|
|
||||||
### close()
|
### close()
|
||||||
|
|
||||||
> `abstract` **close**(): `void`
|
```ts
|
||||||
|
abstract close(): void
|
||||||
|
```
|
||||||
|
|
||||||
Close the connection, releasing any underlying resources.
|
Close the connection, releasing any underlying resources.
|
||||||
|
|
||||||
@@ -53,21 +57,24 @@ Any attempt to use the connection after it is closed will result in an error.
|
|||||||
|
|
||||||
### createEmptyTable()
|
### createEmptyTable()
|
||||||
|
|
||||||
> `abstract` **createEmptyTable**(`name`, `schema`, `options`?): `Promise`<[`Table`](Table.md)>
|
```ts
|
||||||
|
abstract createEmptyTable(
|
||||||
|
name,
|
||||||
|
schema,
|
||||||
|
options?): Promise<Table>
|
||||||
|
```
|
||||||
|
|
||||||
Creates a new empty Table
|
Creates a new empty Table
|
||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **name**: `string`
|
* **name**: `string`
|
||||||
|
The name of the table.
|
||||||
|
|
||||||
The name of the table.
|
* **schema**: `SchemaLike`
|
||||||
|
The schema of the table
|
||||||
|
|
||||||
• **schema**: `SchemaLike`
|
* **options?**: `Partial`<[`CreateTableOptions`](../interfaces/CreateTableOptions.md)>
|
||||||
|
|
||||||
The schema of the table
|
|
||||||
|
|
||||||
• **options?**: `Partial`<[`CreateTableOptions`](../interfaces/CreateTableOptions.md)>
|
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -79,15 +86,16 @@ The schema of the table
|
|||||||
|
|
||||||
#### createTable(options)
|
#### createTable(options)
|
||||||
|
|
||||||
> `abstract` **createTable**(`options`): `Promise`<[`Table`](Table.md)>
|
```ts
|
||||||
|
abstract createTable(options): Promise<Table>
|
||||||
|
```
|
||||||
|
|
||||||
Creates a new Table and initialize it with new data.
|
Creates a new Table and initialize it with new data.
|
||||||
|
|
||||||
##### Parameters
|
##### Parameters
|
||||||
|
|
||||||
• **options**: `object` & `Partial`<[`CreateTableOptions`](../interfaces/CreateTableOptions.md)>
|
* **options**: `object` & `Partial`<[`CreateTableOptions`](../interfaces/CreateTableOptions.md)>
|
||||||
|
The options object.
|
||||||
The options object.
|
|
||||||
|
|
||||||
##### Returns
|
##### Returns
|
||||||
|
|
||||||
@@ -95,22 +103,25 @@ The options object.
|
|||||||
|
|
||||||
#### createTable(name, data, options)
|
#### createTable(name, data, options)
|
||||||
|
|
||||||
> `abstract` **createTable**(`name`, `data`, `options`?): `Promise`<[`Table`](Table.md)>
|
```ts
|
||||||
|
abstract createTable(
|
||||||
|
name,
|
||||||
|
data,
|
||||||
|
options?): Promise<Table>
|
||||||
|
```
|
||||||
|
|
||||||
Creates a new Table and initialize it with new data.
|
Creates a new Table and initialize it with new data.
|
||||||
|
|
||||||
##### Parameters
|
##### Parameters
|
||||||
|
|
||||||
• **name**: `string`
|
* **name**: `string`
|
||||||
|
The name of the table.
|
||||||
|
|
||||||
The name of the table.
|
* **data**: `TableLike` \| `Record`<`string`, `unknown`>[]
|
||||||
|
Non-empty Array of Records
|
||||||
|
to be inserted into the table
|
||||||
|
|
||||||
• **data**: `TableLike` \| `Record`<`string`, `unknown`>[]
|
* **options?**: `Partial`<[`CreateTableOptions`](../interfaces/CreateTableOptions.md)>
|
||||||
|
|
||||||
Non-empty Array of Records
|
|
||||||
to be inserted into the table
|
|
||||||
|
|
||||||
• **options?**: `Partial`<[`CreateTableOptions`](../interfaces/CreateTableOptions.md)>
|
|
||||||
|
|
||||||
##### Returns
|
##### Returns
|
||||||
|
|
||||||
@@ -120,7 +131,9 @@ to be inserted into the table
|
|||||||
|
|
||||||
### display()
|
### display()
|
||||||
|
|
||||||
> `abstract` **display**(): `string`
|
```ts
|
||||||
|
abstract display(): string
|
||||||
|
```
|
||||||
|
|
||||||
Return a brief description of the connection
|
Return a brief description of the connection
|
||||||
|
|
||||||
@@ -132,15 +145,16 @@ Return a brief description of the connection
|
|||||||
|
|
||||||
### dropTable()
|
### dropTable()
|
||||||
|
|
||||||
> `abstract` **dropTable**(`name`): `Promise`<`void`>
|
```ts
|
||||||
|
abstract dropTable(name): Promise<void>
|
||||||
|
```
|
||||||
|
|
||||||
Drop an existing table.
|
Drop an existing table.
|
||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **name**: `string`
|
* **name**: `string`
|
||||||
|
The name of the table to drop.
|
||||||
The name of the table to drop.
|
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -150,7 +164,9 @@ The name of the table to drop.
|
|||||||
|
|
||||||
### isOpen()
|
### isOpen()
|
||||||
|
|
||||||
> `abstract` **isOpen**(): `boolean`
|
```ts
|
||||||
|
abstract isOpen(): boolean
|
||||||
|
```
|
||||||
|
|
||||||
Return true if the connection has not been closed
|
Return true if the connection has not been closed
|
||||||
|
|
||||||
@@ -162,17 +178,18 @@ Return true if the connection has not been closed
|
|||||||
|
|
||||||
### openTable()
|
### openTable()
|
||||||
|
|
||||||
> `abstract` **openTable**(`name`, `options`?): `Promise`<[`Table`](Table.md)>
|
```ts
|
||||||
|
abstract openTable(name, options?): Promise<Table>
|
||||||
|
```
|
||||||
|
|
||||||
Open a table in the database.
|
Open a table in the database.
|
||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **name**: `string`
|
* **name**: `string`
|
||||||
|
The name of the table
|
||||||
|
|
||||||
The name of the table
|
* **options?**: `Partial`<`OpenTableOptions`>
|
||||||
|
|
||||||
• **options?**: `Partial`<`OpenTableOptions`>
|
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -182,7 +199,9 @@ The name of the table
|
|||||||
|
|
||||||
### tableNames()
|
### tableNames()
|
||||||
|
|
||||||
> `abstract` **tableNames**(`options`?): `Promise`<`string`[]>
|
```ts
|
||||||
|
abstract tableNames(options?): Promise<string[]>
|
||||||
|
```
|
||||||
|
|
||||||
List all the table names in this database.
|
List all the table names in this database.
|
||||||
|
|
||||||
@@ -190,10 +209,9 @@ Tables will be returned in lexicographical order.
|
|||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **options?**: `Partial`<[`TableNamesOptions`](../interfaces/TableNamesOptions.md)>
|
* **options?**: `Partial`<[`TableNamesOptions`](../interfaces/TableNamesOptions.md)>
|
||||||
|
options to control the
|
||||||
options to control the
|
paging / start point
|
||||||
paging / start point
|
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
|
|||||||
@@ -8,9 +8,30 @@
|
|||||||
|
|
||||||
## Methods
|
## Methods
|
||||||
|
|
||||||
|
### bitmap()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
static bitmap(): Index
|
||||||
|
```
|
||||||
|
|
||||||
|
Create a bitmap index.
|
||||||
|
|
||||||
|
A `Bitmap` index stores a bitmap for each distinct value in the column for every row.
|
||||||
|
|
||||||
|
This index works best for low-cardinality columns, where the number of unique values
|
||||||
|
is small (i.e., less than a few hundreds).
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
[`Index`](Index.md)
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
### btree()
|
### btree()
|
||||||
|
|
||||||
> `static` **btree**(): [`Index`](Index.md)
|
```ts
|
||||||
|
static btree(): Index
|
||||||
|
```
|
||||||
|
|
||||||
Create a btree index
|
Create a btree index
|
||||||
|
|
||||||
@@ -36,9 +57,82 @@ block size may be added in the future.
|
|||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
|
### fts()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
static fts(options?): Index
|
||||||
|
```
|
||||||
|
|
||||||
|
Create a full text search index
|
||||||
|
|
||||||
|
A full text search index is an index on a string column, so that you can conduct full
|
||||||
|
text searches on the column.
|
||||||
|
|
||||||
|
The results of a full text search are ordered by relevance measured by BM25.
|
||||||
|
|
||||||
|
You can combine filters with full text search.
|
||||||
|
|
||||||
|
For now, the full text search index only supports English, and doesn't support phrase search.
|
||||||
|
|
||||||
|
#### Parameters
|
||||||
|
|
||||||
|
* **options?**: `Partial`<`FtsOptions`>
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
[`Index`](Index.md)
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### hnswPq()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
static hnswPq(options?): Index
|
||||||
|
```
|
||||||
|
|
||||||
|
Create a hnswPq index
|
||||||
|
|
||||||
|
HNSW-PQ stands for Hierarchical Navigable Small World - Product Quantization.
|
||||||
|
It is a variant of the HNSW algorithm that uses product quantization to compress
|
||||||
|
the vectors.
|
||||||
|
|
||||||
|
#### Parameters
|
||||||
|
|
||||||
|
* **options?**: `Partial`<`HnswPqOptions`>
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
[`Index`](Index.md)
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### hnswSq()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
static hnswSq(options?): Index
|
||||||
|
```
|
||||||
|
|
||||||
|
Create a hnswSq index
|
||||||
|
|
||||||
|
HNSW-SQ stands for Hierarchical Navigable Small World - Scalar Quantization.
|
||||||
|
It is a variant of the HNSW algorithm that uses scalar quantization to compress
|
||||||
|
the vectors.
|
||||||
|
|
||||||
|
#### Parameters
|
||||||
|
|
||||||
|
* **options?**: `Partial`<`HnswSqOptions`>
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
[`Index`](Index.md)
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
### ivfPq()
|
### ivfPq()
|
||||||
|
|
||||||
> `static` **ivfPq**(`options`?): [`Index`](Index.md)
|
```ts
|
||||||
|
static ivfPq(options?): Index
|
||||||
|
```
|
||||||
|
|
||||||
Create an IvfPq index
|
Create an IvfPq index
|
||||||
|
|
||||||
@@ -63,29 +157,25 @@ currently is also a memory intensive operation.
|
|||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **options?**: `Partial`<[`IvfPqOptions`](../interfaces/IvfPqOptions.md)>
|
* **options?**: `Partial`<[`IvfPqOptions`](../interfaces/IvfPqOptions.md)>
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
[`Index`](Index.md)
|
[`Index`](Index.md)
|
||||||
|
|
||||||
### fts()
|
***
|
||||||
|
|
||||||
> `static` **fts**(`options`?): [`Index`](Index.md)
|
### labelList()
|
||||||
|
|
||||||
Create a full text search index
|
```ts
|
||||||
|
static labelList(): Index
|
||||||
|
```
|
||||||
|
|
||||||
This index is used to search for text data. The index is created by tokenizing the text
|
Create a label list index.
|
||||||
into words and then storing occurrences of these words in a data structure called inverted index
|
|
||||||
that allows for fast search.
|
|
||||||
|
|
||||||
During a search the query is tokenized and the inverted index is used to find the rows that
|
LabelList index is a scalar index that can be used on `List<T>` columns to
|
||||||
contain the query words. The rows are then scored based on BM25 and the top scoring rows are
|
support queries with `array_contains_all` and `array_contains_any`
|
||||||
sorted and returned.
|
using an underlying bitmap index.
|
||||||
|
|
||||||
#### Parameters
|
|
||||||
|
|
||||||
• **options?**: `Partial`<[`FtsOptions`](../interfaces/FtsOptions.md)>
|
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
|
|||||||
@@ -12,11 +12,13 @@ Options to control the makeArrowTable call.
|
|||||||
|
|
||||||
### new MakeArrowTableOptions()
|
### new MakeArrowTableOptions()
|
||||||
|
|
||||||
> **new MakeArrowTableOptions**(`values`?): [`MakeArrowTableOptions`](MakeArrowTableOptions.md)
|
```ts
|
||||||
|
new MakeArrowTableOptions(values?): MakeArrowTableOptions
|
||||||
|
```
|
||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **values?**: `Partial`<[`MakeArrowTableOptions`](MakeArrowTableOptions.md)>
|
* **values?**: `Partial`<[`MakeArrowTableOptions`](MakeArrowTableOptions.md)>
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -26,7 +28,9 @@ Options to control the makeArrowTable call.
|
|||||||
|
|
||||||
### dictionaryEncodeStrings
|
### dictionaryEncodeStrings
|
||||||
|
|
||||||
> **dictionaryEncodeStrings**: `boolean` = `false`
|
```ts
|
||||||
|
dictionaryEncodeStrings: boolean = false;
|
||||||
|
```
|
||||||
|
|
||||||
If true then string columns will be encoded with dictionary encoding
|
If true then string columns will be encoded with dictionary encoding
|
||||||
|
|
||||||
@@ -40,22 +44,30 @@ If `schema` is provided then this property is ignored.
|
|||||||
|
|
||||||
### embeddingFunction?
|
### embeddingFunction?
|
||||||
|
|
||||||
> `optional` **embeddingFunction**: [`EmbeddingFunctionConfig`](../namespaces/embedding/interfaces/EmbeddingFunctionConfig.md)
|
```ts
|
||||||
|
optional embeddingFunction: EmbeddingFunctionConfig;
|
||||||
|
```
|
||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
### embeddings?
|
### embeddings?
|
||||||
|
|
||||||
> `optional` **embeddings**: [`EmbeddingFunction`](../namespaces/embedding/classes/EmbeddingFunction.md)<`unknown`, `FunctionOptions`>
|
```ts
|
||||||
|
optional embeddings: EmbeddingFunction<unknown, FunctionOptions>;
|
||||||
|
```
|
||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
### schema?
|
### schema?
|
||||||
|
|
||||||
> `optional` **schema**: `SchemaLike`
|
```ts
|
||||||
|
optional schema: SchemaLike;
|
||||||
|
```
|
||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
### vectorColumns
|
### vectorColumns
|
||||||
|
|
||||||
> **vectorColumns**: `Record`<`string`, [`VectorColumnOptions`](VectorColumnOptions.md)>
|
```ts
|
||||||
|
vectorColumns: Record<string, VectorColumnOptions>;
|
||||||
|
```
|
||||||
|
|||||||
@@ -16,11 +16,13 @@ A builder for LanceDB queries.
|
|||||||
|
|
||||||
### new Query()
|
### new Query()
|
||||||
|
|
||||||
> **new Query**(`tbl`): [`Query`](Query.md)
|
```ts
|
||||||
|
new Query(tbl): Query
|
||||||
|
```
|
||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **tbl**: `Table`
|
* **tbl**: `Table`
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -34,7 +36,9 @@ A builder for LanceDB queries.
|
|||||||
|
|
||||||
### inner
|
### inner
|
||||||
|
|
||||||
> `protected` **inner**: `Query` \| `Promise`<`Query`>
|
```ts
|
||||||
|
protected inner: Query | Promise<Query>;
|
||||||
|
```
|
||||||
|
|
||||||
#### Inherited from
|
#### Inherited from
|
||||||
|
|
||||||
@@ -44,7 +48,9 @@ A builder for LanceDB queries.
|
|||||||
|
|
||||||
### \[asyncIterator\]()
|
### \[asyncIterator\]()
|
||||||
|
|
||||||
> **\[asyncIterator\]**(): `AsyncIterator`<`RecordBatch`<`any`>, `any`, `undefined`>
|
```ts
|
||||||
|
asyncIterator: AsyncIterator<RecordBatch<any>, any, undefined>
|
||||||
|
```
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -58,11 +64,13 @@ A builder for LanceDB queries.
|
|||||||
|
|
||||||
### doCall()
|
### doCall()
|
||||||
|
|
||||||
> `protected` **doCall**(`fn`): `void`
|
```ts
|
||||||
|
protected doCall(fn): void
|
||||||
|
```
|
||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **fn**
|
* **fn**
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -76,13 +84,15 @@ A builder for LanceDB queries.
|
|||||||
|
|
||||||
### execute()
|
### execute()
|
||||||
|
|
||||||
> `protected` **execute**(`options`?): [`RecordBatchIterator`](RecordBatchIterator.md)
|
```ts
|
||||||
|
protected execute(options?): RecordBatchIterator
|
||||||
|
```
|
||||||
|
|
||||||
Execute the query and return the results as an
|
Execute the query and return the results as an
|
||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **options?**: `Partial`<`QueryExecutionOptions`>
|
* **options?**: `Partial`<`QueryExecutionOptions`>
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -108,15 +118,16 @@ single query)
|
|||||||
|
|
||||||
### explainPlan()
|
### explainPlan()
|
||||||
|
|
||||||
> **explainPlan**(`verbose`): `Promise`<`string`>
|
```ts
|
||||||
|
explainPlan(verbose): Promise<string>
|
||||||
|
```
|
||||||
|
|
||||||
Generates an explanation of the query execution plan.
|
Generates an explanation of the query execution plan.
|
||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **verbose**: `boolean` = `false`
|
* **verbose**: `boolean` = `false`
|
||||||
|
If true, provides a more detailed explanation. Defaults to false.
|
||||||
If true, provides a more detailed explanation. Defaults to false.
|
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -141,15 +152,38 @@ const plan = await table.query().nearestTo([0.5, 0.2]).explainPlan();
|
|||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
|
### fastSearch()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
fastSearch(): this
|
||||||
|
```
|
||||||
|
|
||||||
|
Skip searching un-indexed data. This can make search faster, but will miss
|
||||||
|
any data that is not yet indexed.
|
||||||
|
|
||||||
|
Use lancedb.Table#optimize to index all un-indexed data.
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
`this`
|
||||||
|
|
||||||
|
#### Inherited from
|
||||||
|
|
||||||
|
[`QueryBase`](QueryBase.md).[`fastSearch`](QueryBase.md#fastsearch)
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
### ~~filter()~~
|
### ~~filter()~~
|
||||||
|
|
||||||
> **filter**(`predicate`): `this`
|
```ts
|
||||||
|
filter(predicate): this
|
||||||
|
```
|
||||||
|
|
||||||
A filter statement to be applied to this query.
|
A filter statement to be applied to this query.
|
||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **predicate**: `string`
|
* **predicate**: `string`
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -169,9 +203,33 @@ Use `where` instead
|
|||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
|
### fullTextSearch()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
fullTextSearch(query, options?): this
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Parameters
|
||||||
|
|
||||||
|
* **query**: `string`
|
||||||
|
|
||||||
|
* **options?**: `Partial`<`FullTextSearchOptions`>
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
`this`
|
||||||
|
|
||||||
|
#### Inherited from
|
||||||
|
|
||||||
|
[`QueryBase`](QueryBase.md).[`fullTextSearch`](QueryBase.md#fulltextsearch)
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
### limit()
|
### limit()
|
||||||
|
|
||||||
> **limit**(`limit`): `this`
|
```ts
|
||||||
|
limit(limit): this
|
||||||
|
```
|
||||||
|
|
||||||
Set the maximum number of results to return.
|
Set the maximum number of results to return.
|
||||||
|
|
||||||
@@ -180,7 +238,7 @@ called then every valid row from the table will be returned.
|
|||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **limit**: `number`
|
* **limit**: `number`
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -194,11 +252,13 @@ called then every valid row from the table will be returned.
|
|||||||
|
|
||||||
### nativeExecute()
|
### nativeExecute()
|
||||||
|
|
||||||
> `protected` **nativeExecute**(`options`?): `Promise`<`RecordBatchIterator`>
|
```ts
|
||||||
|
protected nativeExecute(options?): Promise<RecordBatchIterator>
|
||||||
|
```
|
||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **options?**: `Partial`<`QueryExecutionOptions`>
|
* **options?**: `Partial`<`QueryExecutionOptions`>
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -212,7 +272,9 @@ called then every valid row from the table will be returned.
|
|||||||
|
|
||||||
### nearestTo()
|
### nearestTo()
|
||||||
|
|
||||||
> **nearestTo**(`vector`): [`VectorQuery`](VectorQuery.md)
|
```ts
|
||||||
|
nearestTo(vector): VectorQuery
|
||||||
|
```
|
||||||
|
|
||||||
Find the nearest vectors to the given query vector.
|
Find the nearest vectors to the given query vector.
|
||||||
|
|
||||||
@@ -232,7 +294,7 @@ If there is more than one vector column you must use
|
|||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **vector**: `IntoVector`
|
* **vector**: `IntoVector`
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -264,9 +326,49 @@ a default `limit` of 10 will be used.
|
|||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
|
### nearestToText()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
nearestToText(query, columns?): Query
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Parameters
|
||||||
|
|
||||||
|
* **query**: `string`
|
||||||
|
|
||||||
|
* **columns?**: `string`[]
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
[`Query`](Query.md)
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### offset()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
offset(offset): this
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Parameters
|
||||||
|
|
||||||
|
* **offset**: `number`
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
`this`
|
||||||
|
|
||||||
|
#### Inherited from
|
||||||
|
|
||||||
|
[`QueryBase`](QueryBase.md).[`offset`](QueryBase.md#offset)
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
### select()
|
### select()
|
||||||
|
|
||||||
> **select**(`columns`): `this`
|
```ts
|
||||||
|
select(columns): this
|
||||||
|
```
|
||||||
|
|
||||||
Return only the specified columns.
|
Return only the specified columns.
|
||||||
|
|
||||||
@@ -290,7 +392,7 @@ input to this method would be:
|
|||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **columns**: `string` \| `string`[] \| `Record`<`string`, `string`> \| `Map`<`string`, `string`>
|
* **columns**: `string` \| `string`[] \| `Record`<`string`, `string`> \| `Map`<`string`, `string`>
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -317,13 +419,15 @@ object insertion order is easy to get wrong and `Map` is more foolproof.
|
|||||||
|
|
||||||
### toArray()
|
### toArray()
|
||||||
|
|
||||||
> **toArray**(`options`?): `Promise`<`any`[]>
|
```ts
|
||||||
|
toArray(options?): Promise<any[]>
|
||||||
|
```
|
||||||
|
|
||||||
Collect the results as an array of objects.
|
Collect the results as an array of objects.
|
||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **options?**: `Partial`<`QueryExecutionOptions`>
|
* **options?**: `Partial`<`QueryExecutionOptions`>
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -337,13 +441,15 @@ Collect the results as an array of objects.
|
|||||||
|
|
||||||
### toArrow()
|
### toArrow()
|
||||||
|
|
||||||
> **toArrow**(`options`?): `Promise`<`Table`<`any`>>
|
```ts
|
||||||
|
toArrow(options?): Promise<Table<any>>
|
||||||
|
```
|
||||||
|
|
||||||
Collect the results as an Arrow
|
Collect the results as an Arrow
|
||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **options?**: `Partial`<`QueryExecutionOptions`>
|
* **options?**: `Partial`<`QueryExecutionOptions`>
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -361,7 +467,9 @@ ArrowTable.
|
|||||||
|
|
||||||
### where()
|
### where()
|
||||||
|
|
||||||
> **where**(`predicate`): `this`
|
```ts
|
||||||
|
where(predicate): this
|
||||||
|
```
|
||||||
|
|
||||||
A filter statement to be applied to this query.
|
A filter statement to be applied to this query.
|
||||||
|
|
||||||
@@ -369,7 +477,7 @@ The filter should be supplied as an SQL query string. For example:
|
|||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **predicate**: `string`
|
* **predicate**: `string`
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -389,3 +497,25 @@ on the filter column(s).
|
|||||||
#### Inherited from
|
#### Inherited from
|
||||||
|
|
||||||
[`QueryBase`](QueryBase.md).[`where`](QueryBase.md#where)
|
[`QueryBase`](QueryBase.md).[`where`](QueryBase.md#where)
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### withRowId()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
withRowId(): this
|
||||||
|
```
|
||||||
|
|
||||||
|
Whether to return the row id in the results.
|
||||||
|
|
||||||
|
This column can be used to match results between different queries. For
|
||||||
|
example, to match results from a full text search and a vector search in
|
||||||
|
order to perform hybrid search.
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
`this`
|
||||||
|
|
||||||
|
#### Inherited from
|
||||||
|
|
||||||
|
[`QueryBase`](QueryBase.md).[`withRowId`](QueryBase.md#withrowid)
|
||||||
|
|||||||
@@ -25,11 +25,13 @@ Common methods supported by all query types
|
|||||||
|
|
||||||
### new QueryBase()
|
### new QueryBase()
|
||||||
|
|
||||||
> `protected` **new QueryBase**<`NativeQueryType`>(`inner`): [`QueryBase`](QueryBase.md)<`NativeQueryType`>
|
```ts
|
||||||
|
protected new QueryBase<NativeQueryType>(inner): QueryBase<NativeQueryType>
|
||||||
|
```
|
||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **inner**: `NativeQueryType` \| `Promise`<`NativeQueryType`>
|
* **inner**: `NativeQueryType` \| `Promise`<`NativeQueryType`>
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -39,13 +41,17 @@ Common methods supported by all query types
|
|||||||
|
|
||||||
### inner
|
### inner
|
||||||
|
|
||||||
> `protected` **inner**: `NativeQueryType` \| `Promise`<`NativeQueryType`>
|
```ts
|
||||||
|
protected inner: NativeQueryType | Promise<NativeQueryType>;
|
||||||
|
```
|
||||||
|
|
||||||
## Methods
|
## Methods
|
||||||
|
|
||||||
### \[asyncIterator\]()
|
### \[asyncIterator\]()
|
||||||
|
|
||||||
> **\[asyncIterator\]**(): `AsyncIterator`<`RecordBatch`<`any`>, `any`, `undefined`>
|
```ts
|
||||||
|
asyncIterator: AsyncIterator<RecordBatch<any>, any, undefined>
|
||||||
|
```
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -59,11 +65,13 @@ Common methods supported by all query types
|
|||||||
|
|
||||||
### doCall()
|
### doCall()
|
||||||
|
|
||||||
> `protected` **doCall**(`fn`): `void`
|
```ts
|
||||||
|
protected doCall(fn): void
|
||||||
|
```
|
||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **fn**
|
* **fn**
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -73,13 +81,15 @@ Common methods supported by all query types
|
|||||||
|
|
||||||
### execute()
|
### execute()
|
||||||
|
|
||||||
> `protected` **execute**(`options`?): [`RecordBatchIterator`](RecordBatchIterator.md)
|
```ts
|
||||||
|
protected execute(options?): RecordBatchIterator
|
||||||
|
```
|
||||||
|
|
||||||
Execute the query and return the results as an
|
Execute the query and return the results as an
|
||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **options?**: `Partial`<`QueryExecutionOptions`>
|
* **options?**: `Partial`<`QueryExecutionOptions`>
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -101,15 +111,16 @@ single query)
|
|||||||
|
|
||||||
### explainPlan()
|
### explainPlan()
|
||||||
|
|
||||||
> **explainPlan**(`verbose`): `Promise`<`string`>
|
```ts
|
||||||
|
explainPlan(verbose): Promise<string>
|
||||||
|
```
|
||||||
|
|
||||||
Generates an explanation of the query execution plan.
|
Generates an explanation of the query execution plan.
|
||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **verbose**: `boolean` = `false`
|
* **verbose**: `boolean` = `false`
|
||||||
|
If true, provides a more detailed explanation. Defaults to false.
|
||||||
If true, provides a more detailed explanation. Defaults to false.
|
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -130,15 +141,34 @@ const plan = await table.query().nearestTo([0.5, 0.2]).explainPlan();
|
|||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
|
### fastSearch()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
fastSearch(): this
|
||||||
|
```
|
||||||
|
|
||||||
|
Skip searching un-indexed data. This can make search faster, but will miss
|
||||||
|
any data that is not yet indexed.
|
||||||
|
|
||||||
|
Use lancedb.Table#optimize to index all un-indexed data.
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
`this`
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
### ~~filter()~~
|
### ~~filter()~~
|
||||||
|
|
||||||
> **filter**(`predicate`): `this`
|
```ts
|
||||||
|
filter(predicate): this
|
||||||
|
```
|
||||||
|
|
||||||
A filter statement to be applied to this query.
|
A filter statement to be applied to this query.
|
||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **predicate**: `string`
|
* **predicate**: `string`
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -154,9 +184,29 @@ Use `where` instead
|
|||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
|
### fullTextSearch()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
fullTextSearch(query, options?): this
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Parameters
|
||||||
|
|
||||||
|
* **query**: `string`
|
||||||
|
|
||||||
|
* **options?**: `Partial`<`FullTextSearchOptions`>
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
`this`
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
### limit()
|
### limit()
|
||||||
|
|
||||||
> **limit**(`limit`): `this`
|
```ts
|
||||||
|
limit(limit): this
|
||||||
|
```
|
||||||
|
|
||||||
Set the maximum number of results to return.
|
Set the maximum number of results to return.
|
||||||
|
|
||||||
@@ -165,7 +215,7 @@ called then every valid row from the table will be returned.
|
|||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **limit**: `number`
|
* **limit**: `number`
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -175,11 +225,13 @@ called then every valid row from the table will be returned.
|
|||||||
|
|
||||||
### nativeExecute()
|
### nativeExecute()
|
||||||
|
|
||||||
> `protected` **nativeExecute**(`options`?): `Promise`<`RecordBatchIterator`>
|
```ts
|
||||||
|
protected nativeExecute(options?): Promise<RecordBatchIterator>
|
||||||
|
```
|
||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **options?**: `Partial`<`QueryExecutionOptions`>
|
* **options?**: `Partial`<`QueryExecutionOptions`>
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -187,9 +239,27 @@ called then every valid row from the table will be returned.
|
|||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
|
### offset()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
offset(offset): this
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Parameters
|
||||||
|
|
||||||
|
* **offset**: `number`
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
`this`
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
### select()
|
### select()
|
||||||
|
|
||||||
> **select**(`columns`): `this`
|
```ts
|
||||||
|
select(columns): this
|
||||||
|
```
|
||||||
|
|
||||||
Return only the specified columns.
|
Return only the specified columns.
|
||||||
|
|
||||||
@@ -213,7 +283,7 @@ input to this method would be:
|
|||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **columns**: `string` \| `string`[] \| `Record`<`string`, `string`> \| `Map`<`string`, `string`>
|
* **columns**: `string` \| `string`[] \| `Record`<`string`, `string`> \| `Map`<`string`, `string`>
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -236,13 +306,15 @@ object insertion order is easy to get wrong and `Map` is more foolproof.
|
|||||||
|
|
||||||
### toArray()
|
### toArray()
|
||||||
|
|
||||||
> **toArray**(`options`?): `Promise`<`any`[]>
|
```ts
|
||||||
|
toArray(options?): Promise<any[]>
|
||||||
|
```
|
||||||
|
|
||||||
Collect the results as an array of objects.
|
Collect the results as an array of objects.
|
||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **options?**: `Partial`<`QueryExecutionOptions`>
|
* **options?**: `Partial`<`QueryExecutionOptions`>
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -252,13 +324,15 @@ Collect the results as an array of objects.
|
|||||||
|
|
||||||
### toArrow()
|
### toArrow()
|
||||||
|
|
||||||
> **toArrow**(`options`?): `Promise`<`Table`<`any`>>
|
```ts
|
||||||
|
toArrow(options?): Promise<Table<any>>
|
||||||
|
```
|
||||||
|
|
||||||
Collect the results as an Arrow
|
Collect the results as an Arrow
|
||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **options?**: `Partial`<`QueryExecutionOptions`>
|
* **options?**: `Partial`<`QueryExecutionOptions`>
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -272,7 +346,9 @@ ArrowTable.
|
|||||||
|
|
||||||
### where()
|
### where()
|
||||||
|
|
||||||
> **where**(`predicate`): `this`
|
```ts
|
||||||
|
where(predicate): this
|
||||||
|
```
|
||||||
|
|
||||||
A filter statement to be applied to this query.
|
A filter statement to be applied to this query.
|
||||||
|
|
||||||
@@ -280,7 +356,7 @@ The filter should be supplied as an SQL query string. For example:
|
|||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **predicate**: `string`
|
* **predicate**: `string`
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -296,3 +372,21 @@ x > 5 OR y = 'test'
|
|||||||
Filtering performance can often be improved by creating a scalar index
|
Filtering performance can often be improved by creating a scalar index
|
||||||
on the filter column(s).
|
on the filter column(s).
|
||||||
```
|
```
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### withRowId()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
withRowId(): this
|
||||||
|
```
|
||||||
|
|
||||||
|
Whether to return the row id in the results.
|
||||||
|
|
||||||
|
This column can be used to match results between different queries. For
|
||||||
|
example, to match results from a full text search and a vector search in
|
||||||
|
order to perform hybrid search.
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
`this`
|
||||||
|
|||||||
@@ -14,11 +14,13 @@
|
|||||||
|
|
||||||
### new RecordBatchIterator()
|
### new RecordBatchIterator()
|
||||||
|
|
||||||
> **new RecordBatchIterator**(`promise`?): [`RecordBatchIterator`](RecordBatchIterator.md)
|
```ts
|
||||||
|
new RecordBatchIterator(promise?): RecordBatchIterator
|
||||||
|
```
|
||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **promise?**: `Promise`<`RecordBatchIterator`>
|
* **promise?**: `Promise`<`RecordBatchIterator`>
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -28,7 +30,9 @@
|
|||||||
|
|
||||||
### next()
|
### next()
|
||||||
|
|
||||||
> **next**(): `Promise`<`IteratorResult`<`RecordBatch`<`any`>, `any`>>
|
```ts
|
||||||
|
next(): Promise<IteratorResult<RecordBatch<any>, any>>
|
||||||
|
```
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
|
|||||||
@@ -21,7 +21,9 @@ collected.
|
|||||||
|
|
||||||
### new Table()
|
### new Table()
|
||||||
|
|
||||||
> **new Table**(): [`Table`](Table.md)
|
```ts
|
||||||
|
new Table(): Table
|
||||||
|
```
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -31,7 +33,9 @@ collected.
|
|||||||
|
|
||||||
### name
|
### name
|
||||||
|
|
||||||
> `get` `abstract` **name**(): `string`
|
```ts
|
||||||
|
get abstract name(): string
|
||||||
|
```
|
||||||
|
|
||||||
Returns the name of the table
|
Returns the name of the table
|
||||||
|
|
||||||
@@ -43,17 +47,18 @@ Returns the name of the table
|
|||||||
|
|
||||||
### add()
|
### add()
|
||||||
|
|
||||||
> `abstract` **add**(`data`, `options`?): `Promise`<`void`>
|
```ts
|
||||||
|
abstract add(data, options?): Promise<void>
|
||||||
|
```
|
||||||
|
|
||||||
Insert records into this Table.
|
Insert records into this Table.
|
||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **data**: [`Data`](../type-aliases/Data.md)
|
* **data**: [`Data`](../type-aliases/Data.md)
|
||||||
|
Records to be inserted into the Table
|
||||||
|
|
||||||
Records to be inserted into the Table
|
* **options?**: `Partial`<[`AddDataOptions`](../interfaces/AddDataOptions.md)>
|
||||||
|
|
||||||
• **options?**: `Partial`<[`AddDataOptions`](../interfaces/AddDataOptions.md)>
|
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -63,18 +68,19 @@ Records to be inserted into the Table
|
|||||||
|
|
||||||
### addColumns()
|
### addColumns()
|
||||||
|
|
||||||
> `abstract` **addColumns**(`newColumnTransforms`): `Promise`<`void`>
|
```ts
|
||||||
|
abstract addColumns(newColumnTransforms): Promise<void>
|
||||||
|
```
|
||||||
|
|
||||||
Add new columns with defined values.
|
Add new columns with defined values.
|
||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **newColumnTransforms**: [`AddColumnsSql`](../interfaces/AddColumnsSql.md)[]
|
* **newColumnTransforms**: [`AddColumnsSql`](../interfaces/AddColumnsSql.md)[]
|
||||||
|
pairs of column names and
|
||||||
pairs of column names and
|
the SQL expression to use to calculate the value of the new column. These
|
||||||
the SQL expression to use to calculate the value of the new column. These
|
expressions will be evaluated for each row in the table, and can
|
||||||
expressions will be evaluated for each row in the table, and can
|
reference existing columns in the table.
|
||||||
reference existing columns in the table.
|
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -84,16 +90,17 @@ reference existing columns in the table.
|
|||||||
|
|
||||||
### alterColumns()
|
### alterColumns()
|
||||||
|
|
||||||
> `abstract` **alterColumns**(`columnAlterations`): `Promise`<`void`>
|
```ts
|
||||||
|
abstract alterColumns(columnAlterations): Promise<void>
|
||||||
|
```
|
||||||
|
|
||||||
Alter the name or nullability of columns.
|
Alter the name or nullability of columns.
|
||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **columnAlterations**: [`ColumnAlteration`](../interfaces/ColumnAlteration.md)[]
|
* **columnAlterations**: [`ColumnAlteration`](../interfaces/ColumnAlteration.md)[]
|
||||||
|
One or more alterations to
|
||||||
One or more alterations to
|
apply to columns.
|
||||||
apply to columns.
|
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -103,7 +110,9 @@ apply to columns.
|
|||||||
|
|
||||||
### checkout()
|
### checkout()
|
||||||
|
|
||||||
> `abstract` **checkout**(`version`): `Promise`<`void`>
|
```ts
|
||||||
|
abstract checkout(version): Promise<void>
|
||||||
|
```
|
||||||
|
|
||||||
Checks out a specific version of the table _This is an in-place operation._
|
Checks out a specific version of the table _This is an in-place operation._
|
||||||
|
|
||||||
@@ -116,9 +125,8 @@ wish to return to standard mode, call `checkoutLatest`.
|
|||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **version**: `number`
|
* **version**: `number`
|
||||||
|
The version to checkout
|
||||||
The version to checkout
|
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -144,7 +152,9 @@ console.log(await table.version()); // 2
|
|||||||
|
|
||||||
### checkoutLatest()
|
### checkoutLatest()
|
||||||
|
|
||||||
> `abstract` **checkoutLatest**(): `Promise`<`void`>
|
```ts
|
||||||
|
abstract checkoutLatest(): Promise<void>
|
||||||
|
```
|
||||||
|
|
||||||
Checkout the latest version of the table. _This is an in-place operation._
|
Checkout the latest version of the table. _This is an in-place operation._
|
||||||
|
|
||||||
@@ -159,7 +169,9 @@ version of the table.
|
|||||||
|
|
||||||
### close()
|
### close()
|
||||||
|
|
||||||
> `abstract` **close**(): `void`
|
```ts
|
||||||
|
abstract close(): void
|
||||||
|
```
|
||||||
|
|
||||||
Close the table, releasing any underlying resources.
|
Close the table, releasing any underlying resources.
|
||||||
|
|
||||||
@@ -175,13 +187,15 @@ Any attempt to use the table after it is closed will result in an error.
|
|||||||
|
|
||||||
### countRows()
|
### countRows()
|
||||||
|
|
||||||
> `abstract` **countRows**(`filter`?): `Promise`<`number`>
|
```ts
|
||||||
|
abstract countRows(filter?): Promise<number>
|
||||||
|
```
|
||||||
|
|
||||||
Count the total number of rows in the dataset.
|
Count the total number of rows in the dataset.
|
||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **filter?**: `string`
|
* **filter?**: `string`
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -191,7 +205,9 @@ Count the total number of rows in the dataset.
|
|||||||
|
|
||||||
### createIndex()
|
### createIndex()
|
||||||
|
|
||||||
> `abstract` **createIndex**(`column`, `options`?): `Promise`<`void`>
|
```ts
|
||||||
|
abstract createIndex(column, options?): Promise<void>
|
||||||
|
```
|
||||||
|
|
||||||
Create an index to speed up queries.
|
Create an index to speed up queries.
|
||||||
|
|
||||||
@@ -202,9 +218,9 @@ vector and non-vector searches)
|
|||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **column**: `string`
|
* **column**: `string`
|
||||||
|
|
||||||
• **options?**: `Partial`<[`IndexOptions`](../interfaces/IndexOptions.md)>
|
* **options?**: `Partial`<[`IndexOptions`](../interfaces/IndexOptions.md)>
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -245,13 +261,15 @@ await table.createIndex("my_float_col");
|
|||||||
|
|
||||||
### delete()
|
### delete()
|
||||||
|
|
||||||
> `abstract` **delete**(`predicate`): `Promise`<`void`>
|
```ts
|
||||||
|
abstract delete(predicate): Promise<void>
|
||||||
|
```
|
||||||
|
|
||||||
Delete the rows that satisfy the predicate.
|
Delete the rows that satisfy the predicate.
|
||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **predicate**: `string`
|
* **predicate**: `string`
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -261,7 +279,9 @@ Delete the rows that satisfy the predicate.
|
|||||||
|
|
||||||
### display()
|
### display()
|
||||||
|
|
||||||
> `abstract` **display**(): `string`
|
```ts
|
||||||
|
abstract display(): string
|
||||||
|
```
|
||||||
|
|
||||||
Return a brief description of the table
|
Return a brief description of the table
|
||||||
|
|
||||||
@@ -273,7 +293,9 @@ Return a brief description of the table
|
|||||||
|
|
||||||
### dropColumns()
|
### dropColumns()
|
||||||
|
|
||||||
> `abstract` **dropColumns**(`columnNames`): `Promise`<`void`>
|
```ts
|
||||||
|
abstract dropColumns(columnNames): Promise<void>
|
||||||
|
```
|
||||||
|
|
||||||
Drop one or more columns from the dataset
|
Drop one or more columns from the dataset
|
||||||
|
|
||||||
@@ -284,11 +306,10 @@ then call ``cleanup_files`` to remove the old files.
|
|||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **columnNames**: `string`[]
|
* **columnNames**: `string`[]
|
||||||
|
The names of the columns to drop. These can
|
||||||
The names of the columns to drop. These can
|
be nested column references (e.g. "a.b.c") or top-level column names
|
||||||
be nested column references (e.g. "a.b.c") or top-level column names
|
(e.g. "a").
|
||||||
(e.g. "a").
|
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -298,15 +319,16 @@ be nested column references (e.g. "a.b.c") or top-level column names
|
|||||||
|
|
||||||
### indexStats()
|
### indexStats()
|
||||||
|
|
||||||
> `abstract` **indexStats**(`name`): `Promise`<`undefined` \| [`IndexStatistics`](../interfaces/IndexStatistics.md)>
|
```ts
|
||||||
|
abstract indexStats(name): Promise<undefined | IndexStatistics>
|
||||||
|
```
|
||||||
|
|
||||||
List all the stats of a specified index
|
List all the stats of a specified index
|
||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **name**: `string`
|
* **name**: `string`
|
||||||
|
The name of the index.
|
||||||
The name of the index.
|
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -318,7 +340,9 @@ The stats of the index. If the index does not exist, it will return undefined
|
|||||||
|
|
||||||
### isOpen()
|
### isOpen()
|
||||||
|
|
||||||
> `abstract` **isOpen**(): `boolean`
|
```ts
|
||||||
|
abstract isOpen(): boolean
|
||||||
|
```
|
||||||
|
|
||||||
Return true if the table has not been closed
|
Return true if the table has not been closed
|
||||||
|
|
||||||
@@ -330,7 +354,9 @@ Return true if the table has not been closed
|
|||||||
|
|
||||||
### listIndices()
|
### listIndices()
|
||||||
|
|
||||||
> `abstract` **listIndices**(): `Promise`<[`IndexConfig`](../interfaces/IndexConfig.md)[]>
|
```ts
|
||||||
|
abstract listIndices(): Promise<IndexConfig[]>
|
||||||
|
```
|
||||||
|
|
||||||
List all indices that have been created with [Table.createIndex](Table.md#createindex)
|
List all indices that have been created with [Table.createIndex](Table.md#createindex)
|
||||||
|
|
||||||
@@ -340,13 +366,29 @@ List all indices that have been created with [Table.createIndex](Table.md#create
|
|||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
|
### listVersions()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
abstract listVersions(): Promise<Version[]>
|
||||||
|
```
|
||||||
|
|
||||||
|
List all the versions of the table
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
`Promise`<`Version`[]>
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
### mergeInsert()
|
### mergeInsert()
|
||||||
|
|
||||||
> `abstract` **mergeInsert**(`on`): `MergeInsertBuilder`
|
```ts
|
||||||
|
abstract mergeInsert(on): MergeInsertBuilder
|
||||||
|
```
|
||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **on**: `string` \| `string`[]
|
* **on**: `string` \| `string`[]
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -356,7 +398,9 @@ List all indices that have been created with [Table.createIndex](Table.md#create
|
|||||||
|
|
||||||
### optimize()
|
### optimize()
|
||||||
|
|
||||||
> `abstract` **optimize**(`options`?): `Promise`<`OptimizeStats`>
|
```ts
|
||||||
|
abstract optimize(options?): Promise<OptimizeStats>
|
||||||
|
```
|
||||||
|
|
||||||
Optimize the on-disk data and indices for better performance.
|
Optimize the on-disk data and indices for better performance.
|
||||||
|
|
||||||
@@ -388,7 +432,7 @@ Modeled after ``VACUUM`` in PostgreSQL.
|
|||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **options?**: `Partial`<`OptimizeOptions`>
|
* **options?**: `Partial`<[`OptimizeOptions`](../interfaces/OptimizeOptions.md)>
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -398,7 +442,9 @@ Modeled after ``VACUUM`` in PostgreSQL.
|
|||||||
|
|
||||||
### query()
|
### query()
|
||||||
|
|
||||||
> `abstract` **query**(): [`Query`](Query.md)
|
```ts
|
||||||
|
abstract query(): Query
|
||||||
|
```
|
||||||
|
|
||||||
Create a [Query](Query.md) Builder.
|
Create a [Query](Query.md) Builder.
|
||||||
|
|
||||||
@@ -466,7 +512,9 @@ for await (const batch of table.query()) {
|
|||||||
|
|
||||||
### restore()
|
### restore()
|
||||||
|
|
||||||
> `abstract` **restore**(): `Promise`<`void`>
|
```ts
|
||||||
|
abstract restore(): Promise<void>
|
||||||
|
```
|
||||||
|
|
||||||
Restore the table to the currently checked out version
|
Restore the table to the currently checked out version
|
||||||
|
|
||||||
@@ -487,7 +535,9 @@ out state and the read_consistency_interval, if any, will apply.
|
|||||||
|
|
||||||
### schema()
|
### schema()
|
||||||
|
|
||||||
> `abstract` **schema**(): `Promise`<`Schema`<`any`>>
|
```ts
|
||||||
|
abstract schema(): Promise<Schema<any>>
|
||||||
|
```
|
||||||
|
|
||||||
Get the schema of the table.
|
Get the schema of the table.
|
||||||
|
|
||||||
@@ -499,61 +549,41 @@ Get the schema of the table.
|
|||||||
|
|
||||||
### search()
|
### search()
|
||||||
|
|
||||||
#### search(query)
|
```ts
|
||||||
|
abstract search(
|
||||||
> `abstract` **search**(`query`, `queryType`, `ftsColumns`): [`VectorQuery`](VectorQuery.md)
|
query,
|
||||||
|
queryType?,
|
||||||
|
ftsColumns?): VectorQuery | Query
|
||||||
|
```
|
||||||
|
|
||||||
Create a search query to find the nearest neighbors
|
Create a search query to find the nearest neighbors
|
||||||
of the given query vector, or the documents
|
of the given query
|
||||||
with the highest relevance to the query string.
|
|
||||||
|
|
||||||
##### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **query**: `string`
|
* **query**: `string` \| `IntoVector`
|
||||||
|
the query, a vector or string
|
||||||
|
|
||||||
the query. This will be converted to a vector using the table's provided embedding function,
|
* **queryType?**: `string`
|
||||||
or the query string for full-text search if `queryType` is "fts".
|
the type of the query, "vector", "fts", or "auto"
|
||||||
|
|
||||||
• **queryType**: `string` = `"auto"` \| `"fts"`
|
* **ftsColumns?**: `string` \| `string`[]
|
||||||
|
the columns to search in for full text search
|
||||||
|
for now, only one column can be searched at a time.
|
||||||
|
when "auto" is used, if the query is a string and an embedding function is defined, it will be treated as a vector query
|
||||||
|
if the query is a string and no embedding function is defined, it will be treated as a full text search query
|
||||||
|
|
||||||
the type of query to run. If "auto", the query type will be determined based on the query.
|
#### Returns
|
||||||
|
|
||||||
• **ftsColumns**: `string[] | str` = undefined
|
[`VectorQuery`](VectorQuery.md) \| [`Query`](Query.md)
|
||||||
|
|
||||||
the columns to search in. If not provided, all indexed columns will be searched.
|
|
||||||
|
|
||||||
For now, this can support to search only one column.
|
|
||||||
|
|
||||||
##### Returns
|
|
||||||
|
|
||||||
[`VectorQuery`](VectorQuery.md)
|
|
||||||
|
|
||||||
##### Note
|
|
||||||
|
|
||||||
If no embedding functions are defined in the table, this will error when collecting the results.
|
|
||||||
|
|
||||||
#### search(query)
|
|
||||||
|
|
||||||
> `abstract` **search**(`query`): [`VectorQuery`](VectorQuery.md)
|
|
||||||
|
|
||||||
Create a search query to find the nearest neighbors
|
|
||||||
of the given query vector
|
|
||||||
|
|
||||||
##### Parameters
|
|
||||||
|
|
||||||
• **query**: `IntoVector`
|
|
||||||
|
|
||||||
the query vector
|
|
||||||
|
|
||||||
##### Returns
|
|
||||||
|
|
||||||
[`VectorQuery`](VectorQuery.md)
|
|
||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
### toArrow()
|
### toArrow()
|
||||||
|
|
||||||
> `abstract` **toArrow**(): `Promise`<`Table`<`any`>>
|
```ts
|
||||||
|
abstract toArrow(): Promise<Table<any>>
|
||||||
|
```
|
||||||
|
|
||||||
Return the table as an arrow table
|
Return the table as an arrow table
|
||||||
|
|
||||||
@@ -567,13 +597,15 @@ Return the table as an arrow table
|
|||||||
|
|
||||||
#### update(opts)
|
#### update(opts)
|
||||||
|
|
||||||
> `abstract` **update**(`opts`): `Promise`<`void`>
|
```ts
|
||||||
|
abstract update(opts): Promise<void>
|
||||||
|
```
|
||||||
|
|
||||||
Update existing records in the Table
|
Update existing records in the Table
|
||||||
|
|
||||||
##### Parameters
|
##### Parameters
|
||||||
|
|
||||||
• **opts**: `object` & `Partial`<[`UpdateOptions`](../interfaces/UpdateOptions.md)>
|
* **opts**: `object` & `Partial`<[`UpdateOptions`](../interfaces/UpdateOptions.md)>
|
||||||
|
|
||||||
##### Returns
|
##### Returns
|
||||||
|
|
||||||
@@ -587,13 +619,15 @@ table.update({where:"x = 2", values:{"vector": [10, 10]}})
|
|||||||
|
|
||||||
#### update(opts)
|
#### update(opts)
|
||||||
|
|
||||||
> `abstract` **update**(`opts`): `Promise`<`void`>
|
```ts
|
||||||
|
abstract update(opts): Promise<void>
|
||||||
|
```
|
||||||
|
|
||||||
Update existing records in the Table
|
Update existing records in the Table
|
||||||
|
|
||||||
##### Parameters
|
##### Parameters
|
||||||
|
|
||||||
• **opts**: `object` & `Partial`<[`UpdateOptions`](../interfaces/UpdateOptions.md)>
|
* **opts**: `object` & `Partial`<[`UpdateOptions`](../interfaces/UpdateOptions.md)>
|
||||||
|
|
||||||
##### Returns
|
##### Returns
|
||||||
|
|
||||||
@@ -607,7 +641,9 @@ table.update({where:"x = 2", valuesSql:{"x": "x + 1"}})
|
|||||||
|
|
||||||
#### update(updates, options)
|
#### update(updates, options)
|
||||||
|
|
||||||
> `abstract` **update**(`updates`, `options`?): `Promise`<`void`>
|
```ts
|
||||||
|
abstract update(updates, options?): Promise<void>
|
||||||
|
```
|
||||||
|
|
||||||
Update existing records in the Table
|
Update existing records in the Table
|
||||||
|
|
||||||
@@ -626,20 +662,17 @@ repeatedly calilng this method.
|
|||||||
|
|
||||||
##### Parameters
|
##### Parameters
|
||||||
|
|
||||||
• **updates**: `Record`<`string`, `string`> \| `Map`<`string`, `string`>
|
* **updates**: `Record`<`string`, `string`> \| `Map`<`string`, `string`>
|
||||||
|
the
|
||||||
|
columns to update
|
||||||
|
Keys in the map should specify the name of the column to update.
|
||||||
|
Values in the map provide the new value of the column. These can
|
||||||
|
be SQL literal strings (e.g. "7" or "'foo'") or they can be expressions
|
||||||
|
based on the row being updated (e.g. "my_col + 1")
|
||||||
|
|
||||||
the
|
* **options?**: `Partial`<[`UpdateOptions`](../interfaces/UpdateOptions.md)>
|
||||||
columns to update
|
additional options to control
|
||||||
|
the update behavior
|
||||||
Keys in the map should specify the name of the column to update.
|
|
||||||
Values in the map provide the new value of the column. These can
|
|
||||||
be SQL literal strings (e.g. "7" or "'foo'") or they can be expressions
|
|
||||||
based on the row being updated (e.g. "my_col + 1")
|
|
||||||
|
|
||||||
• **options?**: `Partial`<[`UpdateOptions`](../interfaces/UpdateOptions.md)>
|
|
||||||
|
|
||||||
additional options to control
|
|
||||||
the update behavior
|
|
||||||
|
|
||||||
##### Returns
|
##### Returns
|
||||||
|
|
||||||
@@ -649,7 +682,9 @@ the update behavior
|
|||||||
|
|
||||||
### vectorSearch()
|
### vectorSearch()
|
||||||
|
|
||||||
> `abstract` **vectorSearch**(`vector`): [`VectorQuery`](VectorQuery.md)
|
```ts
|
||||||
|
abstract vectorSearch(vector): VectorQuery
|
||||||
|
```
|
||||||
|
|
||||||
Search the table with a given query vector.
|
Search the table with a given query vector.
|
||||||
|
|
||||||
@@ -659,7 +694,7 @@ by `query`.
|
|||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **vector**: `IntoVector`
|
* **vector**: `IntoVector`
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -673,7 +708,9 @@ by `query`.
|
|||||||
|
|
||||||
### version()
|
### version()
|
||||||
|
|
||||||
> `abstract` **version**(): `Promise`<`number`>
|
```ts
|
||||||
|
abstract version(): Promise<number>
|
||||||
|
```
|
||||||
|
|
||||||
Retrieve the version of the table
|
Retrieve the version of the table
|
||||||
|
|
||||||
@@ -685,15 +722,20 @@ Retrieve the version of the table
|
|||||||
|
|
||||||
### parseTableData()
|
### parseTableData()
|
||||||
|
|
||||||
> `static` **parseTableData**(`data`, `options`?, `streaming`?): `Promise`<`object`>
|
```ts
|
||||||
|
static parseTableData(
|
||||||
|
data,
|
||||||
|
options?,
|
||||||
|
streaming?): Promise<object>
|
||||||
|
```
|
||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **data**: `TableLike` \| `Record`<`string`, `unknown`>[]
|
* **data**: `TableLike` \| `Record`<`string`, `unknown`>[]
|
||||||
|
|
||||||
• **options?**: `Partial`<[`CreateTableOptions`](../interfaces/CreateTableOptions.md)>
|
* **options?**: `Partial`<[`CreateTableOptions`](../interfaces/CreateTableOptions.md)>
|
||||||
|
|
||||||
• **streaming?**: `boolean` = `false`
|
* **streaming?**: `boolean` = `false`
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -701,8 +743,12 @@ Retrieve the version of the table
|
|||||||
|
|
||||||
##### buf
|
##### buf
|
||||||
|
|
||||||
> **buf**: `Buffer`
|
```ts
|
||||||
|
buf: Buffer;
|
||||||
|
```
|
||||||
|
|
||||||
##### mode
|
##### mode
|
||||||
|
|
||||||
> **mode**: `string`
|
```ts
|
||||||
|
mode: string;
|
||||||
|
```
|
||||||
|
|||||||
@@ -10,11 +10,13 @@
|
|||||||
|
|
||||||
### new VectorColumnOptions()
|
### new VectorColumnOptions()
|
||||||
|
|
||||||
> **new VectorColumnOptions**(`values`?): [`VectorColumnOptions`](VectorColumnOptions.md)
|
```ts
|
||||||
|
new VectorColumnOptions(values?): VectorColumnOptions
|
||||||
|
```
|
||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **values?**: `Partial`<[`VectorColumnOptions`](VectorColumnOptions.md)>
|
* **values?**: `Partial`<[`VectorColumnOptions`](VectorColumnOptions.md)>
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -24,6 +26,8 @@
|
|||||||
|
|
||||||
### type
|
### type
|
||||||
|
|
||||||
> **type**: `Float`<`Floats`>
|
```ts
|
||||||
|
type: Float<Floats>;
|
||||||
|
```
|
||||||
|
|
||||||
Vector column type.
|
Vector column type.
|
||||||
|
|||||||
@@ -18,11 +18,13 @@ This builder can be reused to execute the query many times.
|
|||||||
|
|
||||||
### new VectorQuery()
|
### new VectorQuery()
|
||||||
|
|
||||||
> **new VectorQuery**(`inner`): [`VectorQuery`](VectorQuery.md)
|
```ts
|
||||||
|
new VectorQuery(inner): VectorQuery
|
||||||
|
```
|
||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **inner**: `VectorQuery` \| `Promise`<`VectorQuery`>
|
* **inner**: `VectorQuery` \| `Promise`<`VectorQuery`>
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -36,7 +38,9 @@ This builder can be reused to execute the query many times.
|
|||||||
|
|
||||||
### inner
|
### inner
|
||||||
|
|
||||||
> `protected` **inner**: `VectorQuery` \| `Promise`<`VectorQuery`>
|
```ts
|
||||||
|
protected inner: VectorQuery | Promise<VectorQuery>;
|
||||||
|
```
|
||||||
|
|
||||||
#### Inherited from
|
#### Inherited from
|
||||||
|
|
||||||
@@ -46,7 +50,9 @@ This builder can be reused to execute the query many times.
|
|||||||
|
|
||||||
### \[asyncIterator\]()
|
### \[asyncIterator\]()
|
||||||
|
|
||||||
> **\[asyncIterator\]**(): `AsyncIterator`<`RecordBatch`<`any`>, `any`, `undefined`>
|
```ts
|
||||||
|
asyncIterator: AsyncIterator<RecordBatch<any>, any, undefined>
|
||||||
|
```
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -58,9 +64,27 @@ This builder can be reused to execute the query many times.
|
|||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
|
### addQueryVector()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
addQueryVector(vector): VectorQuery
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Parameters
|
||||||
|
|
||||||
|
* **vector**: `IntoVector`
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
[`VectorQuery`](VectorQuery.md)
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
### bypassVectorIndex()
|
### bypassVectorIndex()
|
||||||
|
|
||||||
> **bypassVectorIndex**(): [`VectorQuery`](VectorQuery.md)
|
```ts
|
||||||
|
bypassVectorIndex(): VectorQuery
|
||||||
|
```
|
||||||
|
|
||||||
If this is called then any vector index is skipped
|
If this is called then any vector index is skipped
|
||||||
|
|
||||||
@@ -78,7 +102,9 @@ calculate your recall to select an appropriate value for nprobes.
|
|||||||
|
|
||||||
### column()
|
### column()
|
||||||
|
|
||||||
> **column**(`column`): [`VectorQuery`](VectorQuery.md)
|
```ts
|
||||||
|
column(column): VectorQuery
|
||||||
|
```
|
||||||
|
|
||||||
Set the vector column to query
|
Set the vector column to query
|
||||||
|
|
||||||
@@ -87,7 +113,7 @@ the call to
|
|||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **column**: `string`
|
* **column**: `string`
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -104,7 +130,9 @@ whose data type is a fixed-size-list of floats.
|
|||||||
|
|
||||||
### distanceType()
|
### distanceType()
|
||||||
|
|
||||||
> **distanceType**(`distanceType`): [`VectorQuery`](VectorQuery.md)
|
```ts
|
||||||
|
distanceType(distanceType): VectorQuery
|
||||||
|
```
|
||||||
|
|
||||||
Set the distance metric to use
|
Set the distance metric to use
|
||||||
|
|
||||||
@@ -114,7 +142,7 @@ use. See
|
|||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **distanceType**: `"l2"` \| `"cosine"` \| `"dot"`
|
* **distanceType**: `"l2"` \| `"cosine"` \| `"dot"`
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -135,11 +163,13 @@ By default "l2" is used.
|
|||||||
|
|
||||||
### doCall()
|
### doCall()
|
||||||
|
|
||||||
> `protected` **doCall**(`fn`): `void`
|
```ts
|
||||||
|
protected doCall(fn): void
|
||||||
|
```
|
||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **fn**
|
* **fn**
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -151,15 +181,41 @@ By default "l2" is used.
|
|||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
|
### ef()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
ef(ef): VectorQuery
|
||||||
|
```
|
||||||
|
|
||||||
|
Set the number of candidates to consider during the search
|
||||||
|
|
||||||
|
This argument is only used when the vector column has an HNSW index.
|
||||||
|
If there is no index then this value is ignored.
|
||||||
|
|
||||||
|
Increasing this value will increase the recall of your query but will
|
||||||
|
also increase the latency of your query. The default value is 1.5*limit.
|
||||||
|
|
||||||
|
#### Parameters
|
||||||
|
|
||||||
|
* **ef**: `number`
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
[`VectorQuery`](VectorQuery.md)
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
### execute()
|
### execute()
|
||||||
|
|
||||||
> `protected` **execute**(`options`?): [`RecordBatchIterator`](RecordBatchIterator.md)
|
```ts
|
||||||
|
protected execute(options?): RecordBatchIterator
|
||||||
|
```
|
||||||
|
|
||||||
Execute the query and return the results as an
|
Execute the query and return the results as an
|
||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **options?**: `Partial`<`QueryExecutionOptions`>
|
* **options?**: `Partial`<`QueryExecutionOptions`>
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -185,15 +241,16 @@ single query)
|
|||||||
|
|
||||||
### explainPlan()
|
### explainPlan()
|
||||||
|
|
||||||
> **explainPlan**(`verbose`): `Promise`<`string`>
|
```ts
|
||||||
|
explainPlan(verbose): Promise<string>
|
||||||
|
```
|
||||||
|
|
||||||
Generates an explanation of the query execution plan.
|
Generates an explanation of the query execution plan.
|
||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **verbose**: `boolean` = `false`
|
* **verbose**: `boolean` = `false`
|
||||||
|
If true, provides a more detailed explanation. Defaults to false.
|
||||||
If true, provides a more detailed explanation. Defaults to false.
|
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -218,15 +275,38 @@ const plan = await table.query().nearestTo([0.5, 0.2]).explainPlan();
|
|||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
|
### fastSearch()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
fastSearch(): this
|
||||||
|
```
|
||||||
|
|
||||||
|
Skip searching un-indexed data. This can make search faster, but will miss
|
||||||
|
any data that is not yet indexed.
|
||||||
|
|
||||||
|
Use lancedb.Table#optimize to index all un-indexed data.
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
`this`
|
||||||
|
|
||||||
|
#### Inherited from
|
||||||
|
|
||||||
|
[`QueryBase`](QueryBase.md).[`fastSearch`](QueryBase.md#fastsearch)
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
### ~~filter()~~
|
### ~~filter()~~
|
||||||
|
|
||||||
> **filter**(`predicate`): `this`
|
```ts
|
||||||
|
filter(predicate): this
|
||||||
|
```
|
||||||
|
|
||||||
A filter statement to be applied to this query.
|
A filter statement to be applied to this query.
|
||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **predicate**: `string`
|
* **predicate**: `string`
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -246,9 +326,33 @@ Use `where` instead
|
|||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
|
### fullTextSearch()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
fullTextSearch(query, options?): this
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Parameters
|
||||||
|
|
||||||
|
* **query**: `string`
|
||||||
|
|
||||||
|
* **options?**: `Partial`<`FullTextSearchOptions`>
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
`this`
|
||||||
|
|
||||||
|
#### Inherited from
|
||||||
|
|
||||||
|
[`QueryBase`](QueryBase.md).[`fullTextSearch`](QueryBase.md#fulltextsearch)
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
### limit()
|
### limit()
|
||||||
|
|
||||||
> **limit**(`limit`): `this`
|
```ts
|
||||||
|
limit(limit): this
|
||||||
|
```
|
||||||
|
|
||||||
Set the maximum number of results to return.
|
Set the maximum number of results to return.
|
||||||
|
|
||||||
@@ -257,7 +361,7 @@ called then every valid row from the table will be returned.
|
|||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **limit**: `number`
|
* **limit**: `number`
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -271,11 +375,13 @@ called then every valid row from the table will be returned.
|
|||||||
|
|
||||||
### nativeExecute()
|
### nativeExecute()
|
||||||
|
|
||||||
> `protected` **nativeExecute**(`options`?): `Promise`<`RecordBatchIterator`>
|
```ts
|
||||||
|
protected nativeExecute(options?): Promise<RecordBatchIterator>
|
||||||
|
```
|
||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **options?**: `Partial`<`QueryExecutionOptions`>
|
* **options?**: `Partial`<`QueryExecutionOptions`>
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -289,7 +395,9 @@ called then every valid row from the table will be returned.
|
|||||||
|
|
||||||
### nprobes()
|
### nprobes()
|
||||||
|
|
||||||
> **nprobes**(`nprobes`): [`VectorQuery`](VectorQuery.md)
|
```ts
|
||||||
|
nprobes(nprobes): VectorQuery
|
||||||
|
```
|
||||||
|
|
||||||
Set the number of partitions to search (probe)
|
Set the number of partitions to search (probe)
|
||||||
|
|
||||||
@@ -314,7 +422,7 @@ you the desired recall.
|
|||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **nprobes**: `number`
|
* **nprobes**: `number`
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -322,9 +430,31 @@ you the desired recall.
|
|||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
|
### offset()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
offset(offset): this
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Parameters
|
||||||
|
|
||||||
|
* **offset**: `number`
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
`this`
|
||||||
|
|
||||||
|
#### Inherited from
|
||||||
|
|
||||||
|
[`QueryBase`](QueryBase.md).[`offset`](QueryBase.md#offset)
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
### postfilter()
|
### postfilter()
|
||||||
|
|
||||||
> **postfilter**(): [`VectorQuery`](VectorQuery.md)
|
```ts
|
||||||
|
postfilter(): VectorQuery
|
||||||
|
```
|
||||||
|
|
||||||
If this is called then filtering will happen after the vector search instead of
|
If this is called then filtering will happen after the vector search instead of
|
||||||
before.
|
before.
|
||||||
@@ -356,7 +486,9 @@ factor can often help restore some of the results lost by post filtering.
|
|||||||
|
|
||||||
### refineFactor()
|
### refineFactor()
|
||||||
|
|
||||||
> **refineFactor**(`refineFactor`): [`VectorQuery`](VectorQuery.md)
|
```ts
|
||||||
|
refineFactor(refineFactor): VectorQuery
|
||||||
|
```
|
||||||
|
|
||||||
A multiplier to control how many additional rows are taken during the refine step
|
A multiplier to control how many additional rows are taken during the refine step
|
||||||
|
|
||||||
@@ -388,7 +520,7 @@ distance between the query vector and the actual uncompressed vector.
|
|||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **refineFactor**: `number`
|
* **refineFactor**: `number`
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -398,7 +530,9 @@ distance between the query vector and the actual uncompressed vector.
|
|||||||
|
|
||||||
### select()
|
### select()
|
||||||
|
|
||||||
> **select**(`columns`): `this`
|
```ts
|
||||||
|
select(columns): this
|
||||||
|
```
|
||||||
|
|
||||||
Return only the specified columns.
|
Return only the specified columns.
|
||||||
|
|
||||||
@@ -422,7 +556,7 @@ input to this method would be:
|
|||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **columns**: `string` \| `string`[] \| `Record`<`string`, `string`> \| `Map`<`string`, `string`>
|
* **columns**: `string` \| `string`[] \| `Record`<`string`, `string`> \| `Map`<`string`, `string`>
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -449,13 +583,15 @@ object insertion order is easy to get wrong and `Map` is more foolproof.
|
|||||||
|
|
||||||
### toArray()
|
### toArray()
|
||||||
|
|
||||||
> **toArray**(`options`?): `Promise`<`any`[]>
|
```ts
|
||||||
|
toArray(options?): Promise<any[]>
|
||||||
|
```
|
||||||
|
|
||||||
Collect the results as an array of objects.
|
Collect the results as an array of objects.
|
||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **options?**: `Partial`<`QueryExecutionOptions`>
|
* **options?**: `Partial`<`QueryExecutionOptions`>
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -469,13 +605,15 @@ Collect the results as an array of objects.
|
|||||||
|
|
||||||
### toArrow()
|
### toArrow()
|
||||||
|
|
||||||
> **toArrow**(`options`?): `Promise`<`Table`<`any`>>
|
```ts
|
||||||
|
toArrow(options?): Promise<Table<any>>
|
||||||
|
```
|
||||||
|
|
||||||
Collect the results as an Arrow
|
Collect the results as an Arrow
|
||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **options?**: `Partial`<`QueryExecutionOptions`>
|
* **options?**: `Partial`<`QueryExecutionOptions`>
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -493,7 +631,9 @@ ArrowTable.
|
|||||||
|
|
||||||
### where()
|
### where()
|
||||||
|
|
||||||
> **where**(`predicate`): `this`
|
```ts
|
||||||
|
where(predicate): this
|
||||||
|
```
|
||||||
|
|
||||||
A filter statement to be applied to this query.
|
A filter statement to be applied to this query.
|
||||||
|
|
||||||
@@ -501,7 +641,7 @@ The filter should be supplied as an SQL query string. For example:
|
|||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **predicate**: `string`
|
* **predicate**: `string`
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -521,3 +661,25 @@ on the filter column(s).
|
|||||||
#### Inherited from
|
#### Inherited from
|
||||||
|
|
||||||
[`QueryBase`](QueryBase.md).[`where`](QueryBase.md#where)
|
[`QueryBase`](QueryBase.md).[`where`](QueryBase.md#where)
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### withRowId()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
withRowId(): this
|
||||||
|
```
|
||||||
|
|
||||||
|
Whether to return the row id in the results.
|
||||||
|
|
||||||
|
This column can be used to match results between different queries. For
|
||||||
|
example, to match results from a full text search and a vector search in
|
||||||
|
order to perform hybrid search.
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
`this`
|
||||||
|
|
||||||
|
#### Inherited from
|
||||||
|
|
||||||
|
[`QueryBase`](QueryBase.md).[`withRowId`](QueryBase.md#withrowid)
|
||||||
|
|||||||
@@ -12,16 +12,22 @@ Write mode for writing a table.
|
|||||||
|
|
||||||
### Append
|
### Append
|
||||||
|
|
||||||
> **Append**: `"Append"`
|
```ts
|
||||||
|
Append: "Append";
|
||||||
|
```
|
||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
### Create
|
### Create
|
||||||
|
|
||||||
> **Create**: `"Create"`
|
```ts
|
||||||
|
Create: "Create";
|
||||||
|
```
|
||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
### Overwrite
|
### Overwrite
|
||||||
|
|
||||||
> **Overwrite**: `"Overwrite"`
|
```ts
|
||||||
|
Overwrite: "Overwrite";
|
||||||
|
```
|
||||||
|
|||||||
@@ -8,7 +8,9 @@
|
|||||||
|
|
||||||
## connect(uri, opts)
|
## connect(uri, opts)
|
||||||
|
|
||||||
> **connect**(`uri`, `opts`?): `Promise`<[`Connection`](../classes/Connection.md)>
|
```ts
|
||||||
|
function connect(uri, opts?): Promise<Connection>
|
||||||
|
```
|
||||||
|
|
||||||
Connect to a LanceDB instance at the given URI.
|
Connect to a LanceDB instance at the given URI.
|
||||||
|
|
||||||
@@ -20,12 +22,11 @@ Accepted formats:
|
|||||||
|
|
||||||
### Parameters
|
### Parameters
|
||||||
|
|
||||||
• **uri**: `string`
|
* **uri**: `string`
|
||||||
|
The uri of the database. If the database uri starts
|
||||||
|
with `db://` then it connects to a remote database.
|
||||||
|
|
||||||
The uri of the database. If the database uri starts
|
* **opts?**: `Partial`<[`ConnectionOptions`](../interfaces/ConnectionOptions.md)>
|
||||||
with `db://` then it connects to a remote database.
|
|
||||||
|
|
||||||
• **opts?**: `Partial`<[`ConnectionOptions`](../interfaces/ConnectionOptions.md) \| `RemoteConnectionOptions`>
|
|
||||||
|
|
||||||
### Returns
|
### Returns
|
||||||
|
|
||||||
@@ -50,7 +51,9 @@ const conn = await connect(
|
|||||||
|
|
||||||
## connect(opts)
|
## connect(opts)
|
||||||
|
|
||||||
> **connect**(`opts`): `Promise`<[`Connection`](../classes/Connection.md)>
|
```ts
|
||||||
|
function connect(opts): Promise<Connection>
|
||||||
|
```
|
||||||
|
|
||||||
Connect to a LanceDB instance at the given URI.
|
Connect to a LanceDB instance at the given URI.
|
||||||
|
|
||||||
@@ -62,7 +65,7 @@ Accepted formats:
|
|||||||
|
|
||||||
### Parameters
|
### Parameters
|
||||||
|
|
||||||
• **opts**: `Partial`<[`ConnectionOptions`](../interfaces/ConnectionOptions.md) \| `RemoteConnectionOptions`> & `object`
|
* **opts**: `Partial`<[`ConnectionOptions`](../interfaces/ConnectionOptions.md)> & `object`
|
||||||
|
|
||||||
### Returns
|
### Returns
|
||||||
|
|
||||||
|
|||||||
@@ -6,7 +6,12 @@
|
|||||||
|
|
||||||
# Function: makeArrowTable()
|
# Function: makeArrowTable()
|
||||||
|
|
||||||
> **makeArrowTable**(`data`, `options`?, `metadata`?): `ArrowTable`
|
```ts
|
||||||
|
function makeArrowTable(
|
||||||
|
data,
|
||||||
|
options?,
|
||||||
|
metadata?): ArrowTable
|
||||||
|
```
|
||||||
|
|
||||||
An enhanced version of the makeTable function from Apache Arrow
|
An enhanced version of the makeTable function from Apache Arrow
|
||||||
that supports nested fields and embeddings columns.
|
that supports nested fields and embeddings columns.
|
||||||
@@ -40,11 +45,11 @@ rules are as follows:
|
|||||||
|
|
||||||
## Parameters
|
## Parameters
|
||||||
|
|
||||||
• **data**: `Record`<`string`, `unknown`>[]
|
* **data**: `Record`<`string`, `unknown`>[]
|
||||||
|
|
||||||
• **options?**: `Partial`<[`MakeArrowTableOptions`](../classes/MakeArrowTableOptions.md)>
|
* **options?**: `Partial`<[`MakeArrowTableOptions`](../classes/MakeArrowTableOptions.md)>
|
||||||
|
|
||||||
• **metadata?**: `Map`<`string`, `string`>
|
* **metadata?**: `Map`<`string`, `string`>
|
||||||
|
|
||||||
## Returns
|
## Returns
|
||||||
|
|
||||||
|
|||||||
@@ -28,17 +28,19 @@
|
|||||||
|
|
||||||
- [AddColumnsSql](interfaces/AddColumnsSql.md)
|
- [AddColumnsSql](interfaces/AddColumnsSql.md)
|
||||||
- [AddDataOptions](interfaces/AddDataOptions.md)
|
- [AddDataOptions](interfaces/AddDataOptions.md)
|
||||||
|
- [ClientConfig](interfaces/ClientConfig.md)
|
||||||
- [ColumnAlteration](interfaces/ColumnAlteration.md)
|
- [ColumnAlteration](interfaces/ColumnAlteration.md)
|
||||||
- [ConnectionOptions](interfaces/ConnectionOptions.md)
|
- [ConnectionOptions](interfaces/ConnectionOptions.md)
|
||||||
- [CreateTableOptions](interfaces/CreateTableOptions.md)
|
- [CreateTableOptions](interfaces/CreateTableOptions.md)
|
||||||
- [ExecutableQuery](interfaces/ExecutableQuery.md)
|
- [ExecutableQuery](interfaces/ExecutableQuery.md)
|
||||||
- [IndexConfig](interfaces/IndexConfig.md)
|
- [IndexConfig](interfaces/IndexConfig.md)
|
||||||
- [IndexMetadata](interfaces/IndexMetadata.md)
|
|
||||||
- [IndexOptions](interfaces/IndexOptions.md)
|
- [IndexOptions](interfaces/IndexOptions.md)
|
||||||
- [IndexStatistics](interfaces/IndexStatistics.md)
|
- [IndexStatistics](interfaces/IndexStatistics.md)
|
||||||
- [IvfPqOptions](interfaces/IvfPqOptions.md)
|
- [IvfPqOptions](interfaces/IvfPqOptions.md)
|
||||||
- [FtsOptions](interfaces/FtsOptions.md)
|
- [OptimizeOptions](interfaces/OptimizeOptions.md)
|
||||||
|
- [RetryConfig](interfaces/RetryConfig.md)
|
||||||
- [TableNamesOptions](interfaces/TableNamesOptions.md)
|
- [TableNamesOptions](interfaces/TableNamesOptions.md)
|
||||||
|
- [TimeoutConfig](interfaces/TimeoutConfig.md)
|
||||||
- [UpdateOptions](interfaces/UpdateOptions.md)
|
- [UpdateOptions](interfaces/UpdateOptions.md)
|
||||||
- [WriteOptions](interfaces/WriteOptions.md)
|
- [WriteOptions](interfaces/WriteOptions.md)
|
||||||
|
|
||||||
|
|||||||
@@ -12,7 +12,9 @@ A definition of a new column to add to a table.
|
|||||||
|
|
||||||
### name
|
### name
|
||||||
|
|
||||||
> **name**: `string`
|
```ts
|
||||||
|
name: string;
|
||||||
|
```
|
||||||
|
|
||||||
The name of the new column.
|
The name of the new column.
|
||||||
|
|
||||||
@@ -20,7 +22,9 @@ The name of the new column.
|
|||||||
|
|
||||||
### valueSql
|
### valueSql
|
||||||
|
|
||||||
> **valueSql**: `string`
|
```ts
|
||||||
|
valueSql: string;
|
||||||
|
```
|
||||||
|
|
||||||
The values to populate the new column with, as a SQL expression.
|
The values to populate the new column with, as a SQL expression.
|
||||||
The expression can reference other columns in the table.
|
The expression can reference other columns in the table.
|
||||||
|
|||||||
@@ -12,7 +12,9 @@ Options for adding data to a table.
|
|||||||
|
|
||||||
### mode
|
### mode
|
||||||
|
|
||||||
> **mode**: `"append"` \| `"overwrite"`
|
```ts
|
||||||
|
mode: "append" | "overwrite";
|
||||||
|
```
|
||||||
|
|
||||||
If "append" (the default) then the new data will be added to the table
|
If "append" (the default) then the new data will be added to the table
|
||||||
|
|
||||||
|
|||||||
31
docs/src/js/interfaces/ClientConfig.md
Normal file
31
docs/src/js/interfaces/ClientConfig.md
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
[@lancedb/lancedb](../globals.md) / ClientConfig
|
||||||
|
|
||||||
|
# Interface: ClientConfig
|
||||||
|
|
||||||
|
## Properties
|
||||||
|
|
||||||
|
### retryConfig?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional retryConfig: RetryConfig;
|
||||||
|
```
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### timeoutConfig?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional timeoutConfig: TimeoutConfig;
|
||||||
|
```
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### userAgent?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional userAgent: string;
|
||||||
|
```
|
||||||
@@ -13,9 +13,29 @@ must be provided.
|
|||||||
|
|
||||||
## Properties
|
## Properties
|
||||||
|
|
||||||
|
### dataType?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional dataType: string;
|
||||||
|
```
|
||||||
|
|
||||||
|
A new data type for the column. If not provided then the data type will not be changed.
|
||||||
|
Changing data types is limited to casting to the same general type. For example, these
|
||||||
|
changes are valid:
|
||||||
|
* `int32` -> `int64` (integers)
|
||||||
|
* `double` -> `float` (floats)
|
||||||
|
* `string` -> `large_string` (strings)
|
||||||
|
But these changes are not:
|
||||||
|
* `int32` -> `double` (mix integers and floats)
|
||||||
|
* `string` -> `int32` (mix strings and integers)
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
### nullable?
|
### nullable?
|
||||||
|
|
||||||
> `optional` **nullable**: `boolean`
|
```ts
|
||||||
|
optional nullable: boolean;
|
||||||
|
```
|
||||||
|
|
||||||
Set the new nullability. Note that a nullable column cannot be made non-nullable.
|
Set the new nullability. Note that a nullable column cannot be made non-nullable.
|
||||||
|
|
||||||
@@ -23,7 +43,9 @@ Set the new nullability. Note that a nullable column cannot be made non-nullable
|
|||||||
|
|
||||||
### path
|
### path
|
||||||
|
|
||||||
> **path**: `string`
|
```ts
|
||||||
|
path: string;
|
||||||
|
```
|
||||||
|
|
||||||
The path to the column to alter. This is a dot-separated path to the column.
|
The path to the column to alter. This is a dot-separated path to the column.
|
||||||
If it is a top-level column then it is just the name of the column. If it is
|
If it is a top-level column then it is just the name of the column. If it is
|
||||||
@@ -34,7 +56,9 @@ a nested column then it is the path to the column, e.g. "a.b.c" for a column
|
|||||||
|
|
||||||
### rename?
|
### rename?
|
||||||
|
|
||||||
> `optional` **rename**: `string`
|
```ts
|
||||||
|
optional rename: string;
|
||||||
|
```
|
||||||
|
|
||||||
The new name of the column. If not provided then the name will not be changed.
|
The new name of the column. If not provided then the name will not be changed.
|
||||||
This must be distinct from the names of all other columns in the table.
|
This must be distinct from the names of all other columns in the table.
|
||||||
|
|||||||
@@ -8,9 +8,44 @@
|
|||||||
|
|
||||||
## Properties
|
## Properties
|
||||||
|
|
||||||
|
### apiKey?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional apiKey: string;
|
||||||
|
```
|
||||||
|
|
||||||
|
(For LanceDB cloud only): the API key to use with LanceDB Cloud.
|
||||||
|
|
||||||
|
Can also be set via the environment variable `LANCEDB_API_KEY`.
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### clientConfig?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional clientConfig: ClientConfig;
|
||||||
|
```
|
||||||
|
|
||||||
|
(For LanceDB cloud only): configuration for the remote HTTP client.
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### hostOverride?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional hostOverride: string;
|
||||||
|
```
|
||||||
|
|
||||||
|
(For LanceDB cloud only): the host to use for LanceDB cloud. Used
|
||||||
|
for testing purposes.
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
### readConsistencyInterval?
|
### readConsistencyInterval?
|
||||||
|
|
||||||
> `optional` **readConsistencyInterval**: `number`
|
```ts
|
||||||
|
optional readConsistencyInterval: number;
|
||||||
|
```
|
||||||
|
|
||||||
(For LanceDB OSS only): The interval, in seconds, at which to check for
|
(For LanceDB OSS only): The interval, in seconds, at which to check for
|
||||||
updates to the table from other processes. If None, then consistency is not
|
updates to the table from other processes. If None, then consistency is not
|
||||||
@@ -24,9 +59,22 @@ always consistent.
|
|||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
|
### region?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional region: string;
|
||||||
|
```
|
||||||
|
|
||||||
|
(For LanceDB cloud only): the region to use for LanceDB cloud.
|
||||||
|
Defaults to 'us-east-1'.
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
### storageOptions?
|
### storageOptions?
|
||||||
|
|
||||||
> `optional` **storageOptions**: `Record`<`string`, `string`>
|
```ts
|
||||||
|
optional storageOptions: Record<string, string>;
|
||||||
|
```
|
||||||
|
|
||||||
(For LanceDB OSS only): configuration for object storage.
|
(For LanceDB OSS only): configuration for object storage.
|
||||||
|
|
||||||
|
|||||||
@@ -8,15 +8,46 @@
|
|||||||
|
|
||||||
## Properties
|
## Properties
|
||||||
|
|
||||||
|
### dataStorageVersion?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional dataStorageVersion: string;
|
||||||
|
```
|
||||||
|
|
||||||
|
The version of the data storage format to use.
|
||||||
|
|
||||||
|
The default is `stable`.
|
||||||
|
Set to "legacy" to use the old format.
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
### embeddingFunction?
|
### embeddingFunction?
|
||||||
|
|
||||||
> `optional` **embeddingFunction**: [`EmbeddingFunctionConfig`](../namespaces/embedding/interfaces/EmbeddingFunctionConfig.md)
|
```ts
|
||||||
|
optional embeddingFunction: EmbeddingFunctionConfig;
|
||||||
|
```
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### enableV2ManifestPaths?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional enableV2ManifestPaths: boolean;
|
||||||
|
```
|
||||||
|
|
||||||
|
Use the new V2 manifest paths. These paths provide more efficient
|
||||||
|
opening of datasets with many versions on object stores. WARNING:
|
||||||
|
turning this on will make the dataset unreadable for older versions
|
||||||
|
of LanceDB (prior to 0.10.0). To migrate an existing dataset, instead
|
||||||
|
use the LocalTable#migrateManifestPathsV2 method.
|
||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
### existOk
|
### existOk
|
||||||
|
|
||||||
> **existOk**: `boolean`
|
```ts
|
||||||
|
existOk: boolean;
|
||||||
|
```
|
||||||
|
|
||||||
If this is true and the table already exists and the mode is "create"
|
If this is true and the table already exists and the mode is "create"
|
||||||
then no error will be raised.
|
then no error will be raised.
|
||||||
@@ -25,7 +56,9 @@ then no error will be raised.
|
|||||||
|
|
||||||
### mode
|
### mode
|
||||||
|
|
||||||
> **mode**: `"overwrite"` \| `"create"`
|
```ts
|
||||||
|
mode: "overwrite" | "create";
|
||||||
|
```
|
||||||
|
|
||||||
The mode to use when creating the table.
|
The mode to use when creating the table.
|
||||||
|
|
||||||
@@ -39,13 +72,17 @@ If this is set to "overwrite" then any existing table will be replaced.
|
|||||||
|
|
||||||
### schema?
|
### schema?
|
||||||
|
|
||||||
> `optional` **schema**: `SchemaLike`
|
```ts
|
||||||
|
optional schema: SchemaLike;
|
||||||
|
```
|
||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
### storageOptions?
|
### storageOptions?
|
||||||
|
|
||||||
> `optional` **storageOptions**: `Record`<`string`, `string`>
|
```ts
|
||||||
|
optional storageOptions: Record<string, string>;
|
||||||
|
```
|
||||||
|
|
||||||
Configuration for object storage.
|
Configuration for object storage.
|
||||||
|
|
||||||
@@ -58,8 +95,12 @@ The available options are described at https://lancedb.github.io/lancedb/guides/
|
|||||||
|
|
||||||
### useLegacyFormat?
|
### useLegacyFormat?
|
||||||
|
|
||||||
> `optional` **useLegacyFormat**: `boolean`
|
```ts
|
||||||
|
optional useLegacyFormat: boolean;
|
||||||
|
```
|
||||||
|
|
||||||
If true then data files will be written with the legacy format
|
If true then data files will be written with the legacy format
|
||||||
|
|
||||||
The default is true while the new format is in beta
|
The default is false.
|
||||||
|
|
||||||
|
Deprecated. Use data storage version instead.
|
||||||
|
|||||||
@@ -1,25 +0,0 @@
|
|||||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
[@lancedb/lancedb](../globals.md) / FtsOptions
|
|
||||||
|
|
||||||
# Interface: FtsOptions
|
|
||||||
|
|
||||||
Options to create an `FTS` index
|
|
||||||
|
|
||||||
## Properties
|
|
||||||
|
|
||||||
### withPosition?
|
|
||||||
|
|
||||||
> `optional` **withPosition**: `boolean`
|
|
||||||
|
|
||||||
Whether to store the positions of the term in the document.
|
|
||||||
|
|
||||||
If this is true then the index will store the positions of the term in the document.
|
|
||||||
This allows phrase queries to be run. But it also increases the size of the index,
|
|
||||||
and the time to build the index.
|
|
||||||
|
|
||||||
The default value is true.
|
|
||||||
|
|
||||||
***
|
|
||||||
@@ -12,7 +12,9 @@ A description of an index currently configured on a column
|
|||||||
|
|
||||||
### columns
|
### columns
|
||||||
|
|
||||||
> **columns**: `string`[]
|
```ts
|
||||||
|
columns: string[];
|
||||||
|
```
|
||||||
|
|
||||||
The columns in the index
|
The columns in the index
|
||||||
|
|
||||||
@@ -23,7 +25,9 @@ be more columns to represent composite indices.
|
|||||||
|
|
||||||
### indexType
|
### indexType
|
||||||
|
|
||||||
> **indexType**: `string`
|
```ts
|
||||||
|
indexType: string;
|
||||||
|
```
|
||||||
|
|
||||||
The type of the index
|
The type of the index
|
||||||
|
|
||||||
@@ -31,6 +35,8 @@ The type of the index
|
|||||||
|
|
||||||
### name
|
### name
|
||||||
|
|
||||||
> **name**: `string`
|
```ts
|
||||||
|
name: string;
|
||||||
|
```
|
||||||
|
|
||||||
The name of the index
|
The name of the index
|
||||||
|
|||||||
@@ -1,19 +0,0 @@
|
|||||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
[@lancedb/lancedb](../globals.md) / IndexMetadata
|
|
||||||
|
|
||||||
# Interface: IndexMetadata
|
|
||||||
|
|
||||||
## Properties
|
|
||||||
|
|
||||||
### indexType?
|
|
||||||
|
|
||||||
> `optional` **indexType**: `string`
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
### metricType?
|
|
||||||
|
|
||||||
> `optional` **metricType**: `string`
|
|
||||||
@@ -10,7 +10,9 @@
|
|||||||
|
|
||||||
### config?
|
### config?
|
||||||
|
|
||||||
> `optional` **config**: [`Index`](../classes/Index.md)
|
```ts
|
||||||
|
optional config: Index;
|
||||||
|
```
|
||||||
|
|
||||||
Advanced index configuration
|
Advanced index configuration
|
||||||
|
|
||||||
@@ -26,7 +28,9 @@ will be used to determine the most useful kind of index to create.
|
|||||||
|
|
||||||
### replace?
|
### replace?
|
||||||
|
|
||||||
> `optional` **replace**: `boolean`
|
```ts
|
||||||
|
optional replace: boolean;
|
||||||
|
```
|
||||||
|
|
||||||
Whether to replace the existing index
|
Whether to replace the existing index
|
||||||
|
|
||||||
|
|||||||
@@ -8,32 +8,52 @@
|
|||||||
|
|
||||||
## Properties
|
## Properties
|
||||||
|
|
||||||
### indexType?
|
### distanceType?
|
||||||
|
|
||||||
> `optional` **indexType**: `string`
|
```ts
|
||||||
|
optional distanceType: string;
|
||||||
|
```
|
||||||
|
|
||||||
|
The type of the distance function used by the index. This is only
|
||||||
|
present for vector indices. Scalar and full text search indices do
|
||||||
|
not have a distance function.
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### indexType
|
||||||
|
|
||||||
|
```ts
|
||||||
|
indexType: string;
|
||||||
|
```
|
||||||
|
|
||||||
The type of the index
|
The type of the index
|
||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
### indices
|
|
||||||
|
|
||||||
> **indices**: [`IndexMetadata`](IndexMetadata.md)[]
|
|
||||||
|
|
||||||
The metadata for each index
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
### numIndexedRows
|
### numIndexedRows
|
||||||
|
|
||||||
> **numIndexedRows**: `number`
|
```ts
|
||||||
|
numIndexedRows: number;
|
||||||
|
```
|
||||||
|
|
||||||
The number of rows indexed by the index
|
The number of rows indexed by the index
|
||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
|
### numIndices?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional numIndices: number;
|
||||||
|
```
|
||||||
|
|
||||||
|
The number of parts this index is split into.
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
### numUnindexedRows
|
### numUnindexedRows
|
||||||
|
|
||||||
> **numUnindexedRows**: `number`
|
```ts
|
||||||
|
numUnindexedRows: number;
|
||||||
|
```
|
||||||
|
|
||||||
The number of rows not indexed
|
The number of rows not indexed
|
||||||
|
|||||||
@@ -12,7 +12,9 @@ Options to create an `IVF_PQ` index
|
|||||||
|
|
||||||
### distanceType?
|
### distanceType?
|
||||||
|
|
||||||
> `optional` **distanceType**: `"l2"` \| `"cosine"` \| `"dot"`
|
```ts
|
||||||
|
optional distanceType: "l2" | "cosine" | "dot";
|
||||||
|
```
|
||||||
|
|
||||||
Distance type to use to build the index.
|
Distance type to use to build the index.
|
||||||
|
|
||||||
@@ -50,7 +52,9 @@ L2 norm is 1), then dot distance is equivalent to the cosine distance.
|
|||||||
|
|
||||||
### maxIterations?
|
### maxIterations?
|
||||||
|
|
||||||
> `optional` **maxIterations**: `number`
|
```ts
|
||||||
|
optional maxIterations: number;
|
||||||
|
```
|
||||||
|
|
||||||
Max iteration to train IVF kmeans.
|
Max iteration to train IVF kmeans.
|
||||||
|
|
||||||
@@ -66,7 +70,9 @@ The default value is 50.
|
|||||||
|
|
||||||
### numPartitions?
|
### numPartitions?
|
||||||
|
|
||||||
> `optional` **numPartitions**: `number`
|
```ts
|
||||||
|
optional numPartitions: number;
|
||||||
|
```
|
||||||
|
|
||||||
The number of IVF partitions to create.
|
The number of IVF partitions to create.
|
||||||
|
|
||||||
@@ -82,7 +88,9 @@ part of the search (searching within a partition) will be slow.
|
|||||||
|
|
||||||
### numSubVectors?
|
### numSubVectors?
|
||||||
|
|
||||||
> `optional` **numSubVectors**: `number`
|
```ts
|
||||||
|
optional numSubVectors: number;
|
||||||
|
```
|
||||||
|
|
||||||
Number of sub-vectors of PQ.
|
Number of sub-vectors of PQ.
|
||||||
|
|
||||||
@@ -101,7 +109,9 @@ will likely result in poor performance.
|
|||||||
|
|
||||||
### sampleRate?
|
### sampleRate?
|
||||||
|
|
||||||
> `optional` **sampleRate**: `number`
|
```ts
|
||||||
|
optional sampleRate: number;
|
||||||
|
```
|
||||||
|
|
||||||
The number of vectors, per partition, to sample when training IVF kmeans.
|
The number of vectors, per partition, to sample when training IVF kmeans.
|
||||||
|
|
||||||
|
|||||||
39
docs/src/js/interfaces/OptimizeOptions.md
Normal file
39
docs/src/js/interfaces/OptimizeOptions.md
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
[@lancedb/lancedb](../globals.md) / OptimizeOptions
|
||||||
|
|
||||||
|
# Interface: OptimizeOptions
|
||||||
|
|
||||||
|
## Properties
|
||||||
|
|
||||||
|
### cleanupOlderThan
|
||||||
|
|
||||||
|
```ts
|
||||||
|
cleanupOlderThan: Date;
|
||||||
|
```
|
||||||
|
|
||||||
|
If set then all versions older than the given date
|
||||||
|
be removed. The current version will never be removed.
|
||||||
|
The default is 7 days
|
||||||
|
|
||||||
|
#### Example
|
||||||
|
|
||||||
|
```ts
|
||||||
|
// Delete all versions older than 1 day
|
||||||
|
const olderThan = new Date();
|
||||||
|
olderThan.setDate(olderThan.getDate() - 1));
|
||||||
|
tbl.cleanupOlderVersions(olderThan);
|
||||||
|
|
||||||
|
// Delete all versions except the current version
|
||||||
|
tbl.cleanupOlderVersions(new Date());
|
||||||
|
```
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### deleteUnverified
|
||||||
|
|
||||||
|
```ts
|
||||||
|
deleteUnverified: boolean;
|
||||||
|
```
|
||||||
90
docs/src/js/interfaces/RetryConfig.md
Normal file
90
docs/src/js/interfaces/RetryConfig.md
Normal file
@@ -0,0 +1,90 @@
|
|||||||
|
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
[@lancedb/lancedb](../globals.md) / RetryConfig
|
||||||
|
|
||||||
|
# Interface: RetryConfig
|
||||||
|
|
||||||
|
Retry configuration for the remote HTTP client.
|
||||||
|
|
||||||
|
## Properties
|
||||||
|
|
||||||
|
### backoffFactor?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional backoffFactor: number;
|
||||||
|
```
|
||||||
|
|
||||||
|
The backoff factor to apply between retries. Default is 0.25. Between each retry
|
||||||
|
the client will wait for the amount of seconds:
|
||||||
|
`{backoff factor} * (2 ** ({number of previous retries}))`. So for the default
|
||||||
|
of 0.25, the first retry will wait 0.25 seconds, the second retry will wait 0.5
|
||||||
|
seconds, the third retry will wait 1 second, etc.
|
||||||
|
|
||||||
|
You can also set this via the environment variable
|
||||||
|
`LANCE_CLIENT_RETRY_BACKOFF_FACTOR`.
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### backoffJitter?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional backoffJitter: number;
|
||||||
|
```
|
||||||
|
|
||||||
|
The jitter to apply to the backoff factor, in seconds. Default is 0.25.
|
||||||
|
|
||||||
|
A random value between 0 and `backoff_jitter` will be added to the backoff
|
||||||
|
factor in seconds. So for the default of 0.25 seconds, between 0 and 250
|
||||||
|
milliseconds will be added to the sleep between each retry.
|
||||||
|
|
||||||
|
You can also set this via the environment variable
|
||||||
|
`LANCE_CLIENT_RETRY_BACKOFF_JITTER`.
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### connectRetries?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional connectRetries: number;
|
||||||
|
```
|
||||||
|
|
||||||
|
The maximum number of retries for connection errors. Default is 3. You
|
||||||
|
can also set this via the environment variable `LANCE_CLIENT_CONNECT_RETRIES`.
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### readRetries?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional readRetries: number;
|
||||||
|
```
|
||||||
|
|
||||||
|
The maximum number of retries for read errors. Default is 3. You can also
|
||||||
|
set this via the environment variable `LANCE_CLIENT_READ_RETRIES`.
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### retries?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional retries: number;
|
||||||
|
```
|
||||||
|
|
||||||
|
The maximum number of retries for a request. Default is 3. You can also
|
||||||
|
set this via the environment variable `LANCE_CLIENT_MAX_RETRIES`.
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### statuses?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional statuses: number[];
|
||||||
|
```
|
||||||
|
|
||||||
|
The HTTP status codes for which to retry the request. Default is
|
||||||
|
[429, 500, 502, 503].
|
||||||
|
|
||||||
|
You can also set this via the environment variable
|
||||||
|
`LANCE_CLIENT_RETRY_STATUSES`. Use a comma-separated list of integers.
|
||||||
@@ -10,7 +10,9 @@
|
|||||||
|
|
||||||
### limit?
|
### limit?
|
||||||
|
|
||||||
> `optional` **limit**: `number`
|
```ts
|
||||||
|
optional limit: number;
|
||||||
|
```
|
||||||
|
|
||||||
An optional limit to the number of results to return.
|
An optional limit to the number of results to return.
|
||||||
|
|
||||||
@@ -18,7 +20,9 @@ An optional limit to the number of results to return.
|
|||||||
|
|
||||||
### startAfter?
|
### startAfter?
|
||||||
|
|
||||||
> `optional` **startAfter**: `string`
|
```ts
|
||||||
|
optional startAfter: string;
|
||||||
|
```
|
||||||
|
|
||||||
If present, only return names that come lexicographically after the
|
If present, only return names that come lexicographically after the
|
||||||
supplied value.
|
supplied value.
|
||||||
|
|||||||
46
docs/src/js/interfaces/TimeoutConfig.md
Normal file
46
docs/src/js/interfaces/TimeoutConfig.md
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
[@lancedb/lancedb](../globals.md) / TimeoutConfig
|
||||||
|
|
||||||
|
# Interface: TimeoutConfig
|
||||||
|
|
||||||
|
Timeout configuration for remote HTTP client.
|
||||||
|
|
||||||
|
## Properties
|
||||||
|
|
||||||
|
### connectTimeout?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional connectTimeout: number;
|
||||||
|
```
|
||||||
|
|
||||||
|
The timeout for establishing a connection in seconds. Default is 120
|
||||||
|
seconds (2 minutes). This can also be set via the environment variable
|
||||||
|
`LANCE_CLIENT_CONNECT_TIMEOUT`, as an integer number of seconds.
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### poolIdleTimeout?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional poolIdleTimeout: number;
|
||||||
|
```
|
||||||
|
|
||||||
|
The timeout for keeping idle connections in the connection pool in seconds.
|
||||||
|
Default is 300 seconds (5 minutes). This can also be set via the
|
||||||
|
environment variable `LANCE_CLIENT_CONNECTION_TIMEOUT`, as an integer
|
||||||
|
number of seconds.
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### readTimeout?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional readTimeout: number;
|
||||||
|
```
|
||||||
|
|
||||||
|
The timeout for reading data from the server in seconds. Default is 300
|
||||||
|
seconds (5 minutes). This can also be set via the environment variable
|
||||||
|
`LANCE_CLIENT_READ_TIMEOUT`, as an integer number of seconds.
|
||||||
@@ -10,7 +10,9 @@
|
|||||||
|
|
||||||
### where
|
### where
|
||||||
|
|
||||||
> **where**: `string`
|
```ts
|
||||||
|
where: string;
|
||||||
|
```
|
||||||
|
|
||||||
A filter that limits the scope of the update.
|
A filter that limits the scope of the update.
|
||||||
|
|
||||||
|
|||||||
@@ -12,6 +12,8 @@ Write options when creating a Table.
|
|||||||
|
|
||||||
### mode?
|
### mode?
|
||||||
|
|
||||||
> `optional` **mode**: [`WriteMode`](../enumerations/WriteMode.md)
|
```ts
|
||||||
|
optional mode: WriteMode;
|
||||||
|
```
|
||||||
|
|
||||||
Write mode for writing to a table.
|
Write mode for writing to a table.
|
||||||
|
|||||||
@@ -12,16 +12,12 @@
|
|||||||
|
|
||||||
- [EmbeddingFunction](classes/EmbeddingFunction.md)
|
- [EmbeddingFunction](classes/EmbeddingFunction.md)
|
||||||
- [EmbeddingFunctionRegistry](classes/EmbeddingFunctionRegistry.md)
|
- [EmbeddingFunctionRegistry](classes/EmbeddingFunctionRegistry.md)
|
||||||
- [OpenAIEmbeddingFunction](classes/OpenAIEmbeddingFunction.md)
|
- [TextEmbeddingFunction](classes/TextEmbeddingFunction.md)
|
||||||
|
|
||||||
### Interfaces
|
### Interfaces
|
||||||
|
|
||||||
- [EmbeddingFunctionConfig](interfaces/EmbeddingFunctionConfig.md)
|
- [EmbeddingFunctionConfig](interfaces/EmbeddingFunctionConfig.md)
|
||||||
|
|
||||||
### Type Aliases
|
|
||||||
|
|
||||||
- [OpenAIOptions](type-aliases/OpenAIOptions.md)
|
|
||||||
|
|
||||||
### Functions
|
### Functions
|
||||||
|
|
||||||
- [LanceSchema](functions/LanceSchema.md)
|
- [LanceSchema](functions/LanceSchema.md)
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ An embedding function that automatically creates vector representation for a giv
|
|||||||
|
|
||||||
## Extended by
|
## Extended by
|
||||||
|
|
||||||
- [`OpenAIEmbeddingFunction`](OpenAIEmbeddingFunction.md)
|
- [`TextEmbeddingFunction`](TextEmbeddingFunction.md)
|
||||||
|
|
||||||
## Type Parameters
|
## Type Parameters
|
||||||
|
|
||||||
@@ -22,7 +22,9 @@ An embedding function that automatically creates vector representation for a giv
|
|||||||
|
|
||||||
### new EmbeddingFunction()
|
### new EmbeddingFunction()
|
||||||
|
|
||||||
> **new EmbeddingFunction**<`T`, `M`>(): [`EmbeddingFunction`](EmbeddingFunction.md)<`T`, `M`>
|
```ts
|
||||||
|
new EmbeddingFunction<T, M>(): EmbeddingFunction<T, M>
|
||||||
|
```
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -32,13 +34,15 @@ An embedding function that automatically creates vector representation for a giv
|
|||||||
|
|
||||||
### computeQueryEmbeddings()
|
### computeQueryEmbeddings()
|
||||||
|
|
||||||
> **computeQueryEmbeddings**(`data`): `Promise`<`number`[] \| `Float32Array` \| `Float64Array`>
|
```ts
|
||||||
|
computeQueryEmbeddings(data): Promise<number[] | Float32Array | Float64Array>
|
||||||
|
```
|
||||||
|
|
||||||
Compute the embeddings for a single query
|
Compute the embeddings for a single query
|
||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **data**: `T`
|
* **data**: `T`
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -48,13 +52,15 @@ Compute the embeddings for a single query
|
|||||||
|
|
||||||
### computeSourceEmbeddings()
|
### computeSourceEmbeddings()
|
||||||
|
|
||||||
> `abstract` **computeSourceEmbeddings**(`data`): `Promise`<`number`[][] \| `Float32Array`[] \| `Float64Array`[]>
|
```ts
|
||||||
|
abstract computeSourceEmbeddings(data): Promise<number[][] | Float32Array[] | Float64Array[]>
|
||||||
|
```
|
||||||
|
|
||||||
Creates a vector representation for the given values.
|
Creates a vector representation for the given values.
|
||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **data**: `T`[]
|
* **data**: `T`[]
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -64,7 +70,9 @@ Creates a vector representation for the given values.
|
|||||||
|
|
||||||
### embeddingDataType()
|
### embeddingDataType()
|
||||||
|
|
||||||
> `abstract` **embeddingDataType**(): `Float`<`Floats`>
|
```ts
|
||||||
|
abstract embeddingDataType(): Float<Floats>
|
||||||
|
```
|
||||||
|
|
||||||
The datatype of the embeddings
|
The datatype of the embeddings
|
||||||
|
|
||||||
@@ -74,9 +82,23 @@ The datatype of the embeddings
|
|||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
|
### init()?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional init(): Promise<void>
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
`Promise`<`void`>
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
### ndims()
|
### ndims()
|
||||||
|
|
||||||
> **ndims**(): `undefined` \| `number`
|
```ts
|
||||||
|
ndims(): undefined | number
|
||||||
|
```
|
||||||
|
|
||||||
The number of dimensions of the embeddings
|
The number of dimensions of the embeddings
|
||||||
|
|
||||||
@@ -88,15 +110,16 @@ The number of dimensions of the embeddings
|
|||||||
|
|
||||||
### sourceField()
|
### sourceField()
|
||||||
|
|
||||||
> **sourceField**(`optionsOrDatatype`): [`DataType`<`Type`, `any`>, `Map`<`string`, [`EmbeddingFunction`](EmbeddingFunction.md)<`any`, `FunctionOptions`>>]
|
```ts
|
||||||
|
sourceField(optionsOrDatatype): [DataType<Type, any>, Map<string, EmbeddingFunction<any, FunctionOptions>>]
|
||||||
|
```
|
||||||
|
|
||||||
sourceField is used in combination with `LanceSchema` to provide a declarative data model
|
sourceField is used in combination with `LanceSchema` to provide a declarative data model
|
||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **optionsOrDatatype**: `DataType`<`Type`, `any`> \| `Partial`<`FieldOptions`<`DataType`<`Type`, `any`>>>
|
* **optionsOrDatatype**: `DataType`<`Type`, `any`> \| `Partial`<`FieldOptions`<`DataType`<`Type`, `any`>>>
|
||||||
|
The options for the field or the datatype
|
||||||
The options for the field or the datatype
|
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -110,7 +133,9 @@ lancedb.LanceSchema
|
|||||||
|
|
||||||
### toJSON()
|
### toJSON()
|
||||||
|
|
||||||
> `abstract` **toJSON**(): `Partial`<`M`>
|
```ts
|
||||||
|
abstract toJSON(): Partial<M>
|
||||||
|
```
|
||||||
|
|
||||||
Convert the embedding function to a JSON object
|
Convert the embedding function to a JSON object
|
||||||
It is used to serialize the embedding function to the schema
|
It is used to serialize the embedding function to the schema
|
||||||
@@ -145,13 +170,15 @@ class MyEmbeddingFunction extends EmbeddingFunction {
|
|||||||
|
|
||||||
### vectorField()
|
### vectorField()
|
||||||
|
|
||||||
> **vectorField**(`optionsOrDatatype`?): [`DataType`<`Type`, `any`>, `Map`<`string`, [`EmbeddingFunction`](EmbeddingFunction.md)<`any`, `FunctionOptions`>>]
|
```ts
|
||||||
|
vectorField(optionsOrDatatype?): [DataType<Type, any>, Map<string, EmbeddingFunction<any, FunctionOptions>>]
|
||||||
|
```
|
||||||
|
|
||||||
vectorField is used in combination with `LanceSchema` to provide a declarative data model
|
vectorField is used in combination with `LanceSchema` to provide a declarative data model
|
||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **optionsOrDatatype?**: `DataType`<`Type`, `any`> \| `Partial`<`FieldOptions`<`DataType`<`Type`, `any`>>>
|
* **optionsOrDatatype?**: `DataType`<`Type`, `any`> \| `Partial`<`FieldOptions`<`DataType`<`Type`, `any`>>>
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
|
|||||||
@@ -15,7 +15,9 @@ or TextEmbeddingFunction and registering it with the registry
|
|||||||
|
|
||||||
### new EmbeddingFunctionRegistry()
|
### new EmbeddingFunctionRegistry()
|
||||||
|
|
||||||
> **new EmbeddingFunctionRegistry**(): [`EmbeddingFunctionRegistry`](EmbeddingFunctionRegistry.md)
|
```ts
|
||||||
|
new EmbeddingFunctionRegistry(): EmbeddingFunctionRegistry
|
||||||
|
```
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -25,11 +27,13 @@ or TextEmbeddingFunction and registering it with the registry
|
|||||||
|
|
||||||
### functionToMetadata()
|
### functionToMetadata()
|
||||||
|
|
||||||
> **functionToMetadata**(`conf`): `Record`<`string`, `any`>
|
```ts
|
||||||
|
functionToMetadata(conf): Record<string, any>
|
||||||
|
```
|
||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **conf**: [`EmbeddingFunctionConfig`](../interfaces/EmbeddingFunctionConfig.md)
|
* **conf**: [`EmbeddingFunctionConfig`](../interfaces/EmbeddingFunctionConfig.md)
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -39,7 +43,9 @@ or TextEmbeddingFunction and registering it with the registry
|
|||||||
|
|
||||||
### get()
|
### get()
|
||||||
|
|
||||||
> **get**<`T`, `Name`>(`name`): `Name` *extends* `"openai"` ? `EmbeddingFunctionCreate`<[`OpenAIEmbeddingFunction`](OpenAIEmbeddingFunction.md)> : `undefined` \| `EmbeddingFunctionCreate`<`T`>
|
```ts
|
||||||
|
get<T>(name): undefined | EmbeddingFunctionCreate<T>
|
||||||
|
```
|
||||||
|
|
||||||
Fetch an embedding function by name
|
Fetch an embedding function by name
|
||||||
|
|
||||||
@@ -47,27 +53,26 @@ Fetch an embedding function by name
|
|||||||
|
|
||||||
• **T** *extends* [`EmbeddingFunction`](EmbeddingFunction.md)<`unknown`, `FunctionOptions`>
|
• **T** *extends* [`EmbeddingFunction`](EmbeddingFunction.md)<`unknown`, `FunctionOptions`>
|
||||||
|
|
||||||
• **Name** *extends* `string` = `""`
|
|
||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **name**: `Name` *extends* `"openai"` ? `"openai"` : `string`
|
* **name**: `string`
|
||||||
|
The name of the function
|
||||||
The name of the function
|
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
`Name` *extends* `"openai"` ? `EmbeddingFunctionCreate`<[`OpenAIEmbeddingFunction`](OpenAIEmbeddingFunction.md)> : `undefined` \| `EmbeddingFunctionCreate`<`T`>
|
`undefined` \| `EmbeddingFunctionCreate`<`T`>
|
||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
### getTableMetadata()
|
### getTableMetadata()
|
||||||
|
|
||||||
> **getTableMetadata**(`functions`): `Map`<`string`, `string`>
|
```ts
|
||||||
|
getTableMetadata(functions): Map<string, string>
|
||||||
|
```
|
||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **functions**: [`EmbeddingFunctionConfig`](../interfaces/EmbeddingFunctionConfig.md)[]
|
* **functions**: [`EmbeddingFunctionConfig`](../interfaces/EmbeddingFunctionConfig.md)[]
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -75,9 +80,25 @@ The name of the function
|
|||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
|
### length()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
length(): number
|
||||||
|
```
|
||||||
|
|
||||||
|
Get the number of registered functions
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
`number`
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
### register()
|
### register()
|
||||||
|
|
||||||
> **register**<`T`>(`this`, `alias`?): (`ctor`) => `any`
|
```ts
|
||||||
|
register<T>(this, alias?): (ctor) => any
|
||||||
|
```
|
||||||
|
|
||||||
Register an embedding function
|
Register an embedding function
|
||||||
|
|
||||||
@@ -87,9 +108,9 @@ Register an embedding function
|
|||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **this**: [`EmbeddingFunctionRegistry`](EmbeddingFunctionRegistry.md)
|
* **this**: [`EmbeddingFunctionRegistry`](EmbeddingFunctionRegistry.md)
|
||||||
|
|
||||||
• **alias?**: `string`
|
* **alias?**: `string`
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -97,7 +118,7 @@ Register an embedding function
|
|||||||
|
|
||||||
##### Parameters
|
##### Parameters
|
||||||
|
|
||||||
• **ctor**: `T`
|
* **ctor**: `T`
|
||||||
|
|
||||||
##### Returns
|
##### Returns
|
||||||
|
|
||||||
@@ -111,13 +132,15 @@ Error if the function is already registered
|
|||||||
|
|
||||||
### reset()
|
### reset()
|
||||||
|
|
||||||
> **reset**(`this`): `void`
|
```ts
|
||||||
|
reset(this): void
|
||||||
|
```
|
||||||
|
|
||||||
reset the registry to the initial state
|
reset the registry to the initial state
|
||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **this**: [`EmbeddingFunctionRegistry`](EmbeddingFunctionRegistry.md)
|
* **this**: [`EmbeddingFunctionRegistry`](EmbeddingFunctionRegistry.md)
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
|
|||||||
@@ -2,31 +2,33 @@
|
|||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
[@lancedb/lancedb](../../../globals.md) / [embedding](../README.md) / OpenAIEmbeddingFunction
|
[@lancedb/lancedb](../../../globals.md) / [embedding](../README.md) / TextEmbeddingFunction
|
||||||
|
|
||||||
# Class: OpenAIEmbeddingFunction
|
# Class: `abstract` TextEmbeddingFunction<M>
|
||||||
|
|
||||||
An embedding function that automatically creates vector representation for a given column.
|
an abstract class for implementing embedding functions that take text as input
|
||||||
|
|
||||||
## Extends
|
## Extends
|
||||||
|
|
||||||
- [`EmbeddingFunction`](EmbeddingFunction.md)<`string`, `Partial`<[`OpenAIOptions`](../type-aliases/OpenAIOptions.md)>>
|
- [`EmbeddingFunction`](EmbeddingFunction.md)<`string`, `M`>
|
||||||
|
|
||||||
|
## Type Parameters
|
||||||
|
|
||||||
|
• **M** *extends* `FunctionOptions` = `FunctionOptions`
|
||||||
|
|
||||||
## Constructors
|
## Constructors
|
||||||
|
|
||||||
### new OpenAIEmbeddingFunction()
|
### new TextEmbeddingFunction()
|
||||||
|
|
||||||
> **new OpenAIEmbeddingFunction**(`options`): [`OpenAIEmbeddingFunction`](OpenAIEmbeddingFunction.md)
|
```ts
|
||||||
|
new TextEmbeddingFunction<M>(): TextEmbeddingFunction<M>
|
||||||
#### Parameters
|
```
|
||||||
|
|
||||||
• **options**: `Partial`<[`OpenAIOptions`](../type-aliases/OpenAIOptions.md)> = `...`
|
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
[`OpenAIEmbeddingFunction`](OpenAIEmbeddingFunction.md)
|
[`TextEmbeddingFunction`](TextEmbeddingFunction.md)<`M`>
|
||||||
|
|
||||||
#### Overrides
|
#### Inherited from
|
||||||
|
|
||||||
[`EmbeddingFunction`](EmbeddingFunction.md).[`constructor`](EmbeddingFunction.md#constructors)
|
[`EmbeddingFunction`](EmbeddingFunction.md).[`constructor`](EmbeddingFunction.md#constructors)
|
||||||
|
|
||||||
@@ -34,17 +36,19 @@ An embedding function that automatically creates vector representation for a giv
|
|||||||
|
|
||||||
### computeQueryEmbeddings()
|
### computeQueryEmbeddings()
|
||||||
|
|
||||||
> **computeQueryEmbeddings**(`data`): `Promise`<`number`[]>
|
```ts
|
||||||
|
computeQueryEmbeddings(data): Promise<number[] | Float32Array | Float64Array>
|
||||||
|
```
|
||||||
|
|
||||||
Compute the embeddings for a single query
|
Compute the embeddings for a single query
|
||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **data**: `string`
|
* **data**: `string`
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
`Promise`<`number`[]>
|
`Promise`<`number`[] \| `Float32Array` \| `Float64Array`>
|
||||||
|
|
||||||
#### Overrides
|
#### Overrides
|
||||||
|
|
||||||
@@ -54,17 +58,19 @@ Compute the embeddings for a single query
|
|||||||
|
|
||||||
### computeSourceEmbeddings()
|
### computeSourceEmbeddings()
|
||||||
|
|
||||||
> **computeSourceEmbeddings**(`data`): `Promise`<`number`[][]>
|
```ts
|
||||||
|
computeSourceEmbeddings(data): Promise<number[][] | Float32Array[] | Float64Array[]>
|
||||||
|
```
|
||||||
|
|
||||||
Creates a vector representation for the given values.
|
Creates a vector representation for the given values.
|
||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **data**: `string`[]
|
* **data**: `string`[]
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
`Promise`<`number`[][]>
|
`Promise`<`number`[][] \| `Float32Array`[] \| `Float64Array`[]>
|
||||||
|
|
||||||
#### Overrides
|
#### Overrides
|
||||||
|
|
||||||
@@ -74,7 +80,9 @@ Creates a vector representation for the given values.
|
|||||||
|
|
||||||
### embeddingDataType()
|
### embeddingDataType()
|
||||||
|
|
||||||
> **embeddingDataType**(): `Float`<`Floats`>
|
```ts
|
||||||
|
embeddingDataType(): Float<Floats>
|
||||||
|
```
|
||||||
|
|
||||||
The datatype of the embeddings
|
The datatype of the embeddings
|
||||||
|
|
||||||
@@ -88,17 +96,53 @@ The datatype of the embeddings
|
|||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
|
### generateEmbeddings()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
abstract generateEmbeddings(texts, ...args): Promise<number[][] | Float32Array[] | Float64Array[]>
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Parameters
|
||||||
|
|
||||||
|
* **texts**: `string`[]
|
||||||
|
|
||||||
|
* ...**args**: `any`[]
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
`Promise`<`number`[][] \| `Float32Array`[] \| `Float64Array`[]>
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### init()?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional init(): Promise<void>
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
`Promise`<`void`>
|
||||||
|
|
||||||
|
#### Inherited from
|
||||||
|
|
||||||
|
[`EmbeddingFunction`](EmbeddingFunction.md).[`init`](EmbeddingFunction.md#init)
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
### ndims()
|
### ndims()
|
||||||
|
|
||||||
> **ndims**(): `number`
|
```ts
|
||||||
|
ndims(): undefined | number
|
||||||
|
```
|
||||||
|
|
||||||
The number of dimensions of the embeddings
|
The number of dimensions of the embeddings
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
`number`
|
`undefined` \| `number`
|
||||||
|
|
||||||
#### Overrides
|
#### Inherited from
|
||||||
|
|
||||||
[`EmbeddingFunction`](EmbeddingFunction.md).[`ndims`](EmbeddingFunction.md#ndims)
|
[`EmbeddingFunction`](EmbeddingFunction.md).[`ndims`](EmbeddingFunction.md#ndims)
|
||||||
|
|
||||||
@@ -106,16 +150,12 @@ The number of dimensions of the embeddings
|
|||||||
|
|
||||||
### sourceField()
|
### sourceField()
|
||||||
|
|
||||||
> **sourceField**(`optionsOrDatatype`): [`DataType`<`Type`, `any`>, `Map`<`string`, [`EmbeddingFunction`](EmbeddingFunction.md)<`any`, `FunctionOptions`>>]
|
```ts
|
||||||
|
sourceField(): [DataType<Type, any>, Map<string, EmbeddingFunction<any, FunctionOptions>>]
|
||||||
|
```
|
||||||
|
|
||||||
sourceField is used in combination with `LanceSchema` to provide a declarative data model
|
sourceField is used in combination with `LanceSchema` to provide a declarative data model
|
||||||
|
|
||||||
#### Parameters
|
|
||||||
|
|
||||||
• **optionsOrDatatype**: `DataType`<`Type`, `any`> \| `Partial`<`FieldOptions`<`DataType`<`Type`, `any`>>>
|
|
||||||
|
|
||||||
The options for the field or the datatype
|
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
[`DataType`<`Type`, `any`>, `Map`<`string`, [`EmbeddingFunction`](EmbeddingFunction.md)<`any`, `FunctionOptions`>>]
|
[`DataType`<`Type`, `any`>, `Map`<`string`, [`EmbeddingFunction`](EmbeddingFunction.md)<`any`, `FunctionOptions`>>]
|
||||||
@@ -124,7 +164,7 @@ The options for the field or the datatype
|
|||||||
|
|
||||||
lancedb.LanceSchema
|
lancedb.LanceSchema
|
||||||
|
|
||||||
#### Inherited from
|
#### Overrides
|
||||||
|
|
||||||
[`EmbeddingFunction`](EmbeddingFunction.md).[`sourceField`](EmbeddingFunction.md#sourcefield)
|
[`EmbeddingFunction`](EmbeddingFunction.md).[`sourceField`](EmbeddingFunction.md#sourcefield)
|
||||||
|
|
||||||
@@ -132,7 +172,9 @@ lancedb.LanceSchema
|
|||||||
|
|
||||||
### toJSON()
|
### toJSON()
|
||||||
|
|
||||||
> **toJSON**(): `object`
|
```ts
|
||||||
|
abstract toJSON(): Partial<M>
|
||||||
|
```
|
||||||
|
|
||||||
Convert the embedding function to a JSON object
|
Convert the embedding function to a JSON object
|
||||||
It is used to serialize the embedding function to the schema
|
It is used to serialize the embedding function to the schema
|
||||||
@@ -144,11 +186,7 @@ If it does not, the embedding function will not be able to be recreated, or coul
|
|||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
`object`
|
`Partial`<`M`>
|
||||||
|
|
||||||
##### model
|
|
||||||
|
|
||||||
> **model**: `string` & `object` \| `"text-embedding-ada-002"` \| `"text-embedding-3-small"` \| `"text-embedding-3-large"`
|
|
||||||
|
|
||||||
#### Example
|
#### Example
|
||||||
|
|
||||||
@@ -167,7 +205,7 @@ class MyEmbeddingFunction extends EmbeddingFunction {
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Overrides
|
#### Inherited from
|
||||||
|
|
||||||
[`EmbeddingFunction`](EmbeddingFunction.md).[`toJSON`](EmbeddingFunction.md#tojson)
|
[`EmbeddingFunction`](EmbeddingFunction.md).[`toJSON`](EmbeddingFunction.md#tojson)
|
||||||
|
|
||||||
@@ -175,13 +213,15 @@ class MyEmbeddingFunction extends EmbeddingFunction {
|
|||||||
|
|
||||||
### vectorField()
|
### vectorField()
|
||||||
|
|
||||||
> **vectorField**(`optionsOrDatatype`?): [`DataType`<`Type`, `any`>, `Map`<`string`, [`EmbeddingFunction`](EmbeddingFunction.md)<`any`, `FunctionOptions`>>]
|
```ts
|
||||||
|
vectorField(optionsOrDatatype?): [DataType<Type, any>, Map<string, EmbeddingFunction<any, FunctionOptions>>]
|
||||||
|
```
|
||||||
|
|
||||||
vectorField is used in combination with `LanceSchema` to provide a declarative data model
|
vectorField is used in combination with `LanceSchema` to provide a declarative data model
|
||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
• **optionsOrDatatype?**: `DataType`<`Type`, `any`> \| `Partial`<`FieldOptions`<`DataType`<`Type`, `any`>>>
|
* **optionsOrDatatype?**: `DataType`<`Type`, `any`> \| `Partial`<`FieldOptions`<`DataType`<`Type`, `any`>>>
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -6,13 +6,15 @@
|
|||||||
|
|
||||||
# Function: LanceSchema()
|
# Function: LanceSchema()
|
||||||
|
|
||||||
> **LanceSchema**(`fields`): `Schema`
|
```ts
|
||||||
|
function LanceSchema(fields): Schema
|
||||||
|
```
|
||||||
|
|
||||||
Create a schema with embedding functions.
|
Create a schema with embedding functions.
|
||||||
|
|
||||||
## Parameters
|
## Parameters
|
||||||
|
|
||||||
• **fields**: `Record`<`string`, `object` \| [`object`, `Map`<`string`, [`EmbeddingFunction`](../classes/EmbeddingFunction.md)<`any`, `FunctionOptions`>>]>
|
* **fields**: `Record`<`string`, `object` \| [`object`, `Map`<`string`, [`EmbeddingFunction`](../classes/EmbeddingFunction.md)<`any`, `FunctionOptions`>>]>
|
||||||
|
|
||||||
## Returns
|
## Returns
|
||||||
|
|
||||||
|
|||||||
@@ -6,7 +6,9 @@
|
|||||||
|
|
||||||
# Function: getRegistry()
|
# Function: getRegistry()
|
||||||
|
|
||||||
> **getRegistry**(): [`EmbeddingFunctionRegistry`](../classes/EmbeddingFunctionRegistry.md)
|
```ts
|
||||||
|
function getRegistry(): EmbeddingFunctionRegistry
|
||||||
|
```
|
||||||
|
|
||||||
Utility function to get the global instance of the registry
|
Utility function to get the global instance of the registry
|
||||||
|
|
||||||
|
|||||||
@@ -6,11 +6,13 @@
|
|||||||
|
|
||||||
# Function: register()
|
# Function: register()
|
||||||
|
|
||||||
> **register**(`name`?): (`ctor`) => `any`
|
```ts
|
||||||
|
function register(name?): (ctor) => any
|
||||||
|
```
|
||||||
|
|
||||||
## Parameters
|
## Parameters
|
||||||
|
|
||||||
• **name?**: `string`
|
* **name?**: `string`
|
||||||
|
|
||||||
## Returns
|
## Returns
|
||||||
|
|
||||||
@@ -18,7 +20,7 @@
|
|||||||
|
|
||||||
### Parameters
|
### Parameters
|
||||||
|
|
||||||
• **ctor**: `EmbeddingFunctionConstructor`<[`EmbeddingFunction`](../classes/EmbeddingFunction.md)<`any`, `FunctionOptions`>>
|
* **ctor**: `EmbeddingFunctionConstructor`<[`EmbeddingFunction`](../classes/EmbeddingFunction.md)<`any`, `FunctionOptions`>>
|
||||||
|
|
||||||
### Returns
|
### Returns
|
||||||
|
|
||||||
|
|||||||
@@ -10,16 +10,22 @@
|
|||||||
|
|
||||||
### function
|
### function
|
||||||
|
|
||||||
> **function**: [`EmbeddingFunction`](../classes/EmbeddingFunction.md)<`any`, `FunctionOptions`>
|
```ts
|
||||||
|
function: EmbeddingFunction<any, FunctionOptions>;
|
||||||
|
```
|
||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
### sourceColumn
|
### sourceColumn
|
||||||
|
|
||||||
> **sourceColumn**: `string`
|
```ts
|
||||||
|
sourceColumn: string;
|
||||||
|
```
|
||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
### vectorColumn?
|
### vectorColumn?
|
||||||
|
|
||||||
> `optional` **vectorColumn**: `string`
|
```ts
|
||||||
|
optional vectorColumn: string;
|
||||||
|
```
|
||||||
|
|||||||
@@ -1,19 +0,0 @@
|
|||||||
[**@lancedb/lancedb**](../../../README.md) • **Docs**
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
[@lancedb/lancedb](../../../globals.md) / [embedding](../README.md) / OpenAIOptions
|
|
||||||
|
|
||||||
# Type Alias: OpenAIOptions
|
|
||||||
|
|
||||||
> **OpenAIOptions**: `object`
|
|
||||||
|
|
||||||
## Type declaration
|
|
||||||
|
|
||||||
### apiKey
|
|
||||||
|
|
||||||
> **apiKey**: `string`
|
|
||||||
|
|
||||||
### model
|
|
||||||
|
|
||||||
> **model**: `EmbeddingCreateParams`\[`"model"`\]
|
|
||||||
@@ -6,6 +6,8 @@
|
|||||||
|
|
||||||
# Type Alias: Data
|
# Type Alias: Data
|
||||||
|
|
||||||
> **Data**: `Record`<`string`, `unknown`>[] \| `TableLike`
|
```ts
|
||||||
|
type Data: Record<string, unknown>[] | TableLike;
|
||||||
|
```
|
||||||
|
|
||||||
Data type accepted by NodeJS SDK
|
Data type accepted by NodeJS SDK
|
||||||
|
|||||||
@@ -1,81 +1,14 @@
|
|||||||
# Rust-backed Client Migration Guide
|
# Rust-backed Client Migration Guide
|
||||||
|
|
||||||
In an effort to ensure all clients have the same set of capabilities we have begun migrating the
|
In an effort to ensure all clients have the same set of capabilities we have
|
||||||
python and node clients onto a common Rust base library. In python, this new client is part of
|
migrated the Python and Node clients onto a common Rust base library. In Python,
|
||||||
the same lancedb package, exposed as an asynchronous client. Once the asynchronous client has
|
both the synchronous and asynchronous clients are based on this implementation.
|
||||||
reached full functionality we will begin migrating the synchronous library to be a thin wrapper
|
In Node, the new client is available as `@lancedb/lancedb`, which replaces
|
||||||
around the asynchronous client.
|
the existing `vectordb` package.
|
||||||
|
|
||||||
This guide describes the differences between the two APIs and will hopefully assist users
|
This guide describes the differences between the two Node APIs and will hopefully assist users
|
||||||
that would like to migrate to the new API.
|
that would like to migrate to the new API.
|
||||||
|
|
||||||
## Python
|
|
||||||
### Closeable Connections
|
|
||||||
|
|
||||||
The Connection now has a `close` method. You can call this when
|
|
||||||
you are done with the connection to eagerly free resources. Currently
|
|
||||||
this is limited to freeing/closing the HTTP connection for remote
|
|
||||||
connections. In the future we may add caching or other resources to
|
|
||||||
native connections so this is probably a good practice even if you
|
|
||||||
aren't using remote connections.
|
|
||||||
|
|
||||||
In addition, the connection can be used as a context manager which may
|
|
||||||
be a more convenient way to ensure the connection is closed.
|
|
||||||
|
|
||||||
```python
|
|
||||||
import lancedb
|
|
||||||
|
|
||||||
async def my_async_fn():
|
|
||||||
with await lancedb.connect_async("my_uri") as db:
|
|
||||||
print(await db.table_names())
|
|
||||||
```
|
|
||||||
|
|
||||||
It is not mandatory to call the `close` method. If you do not call it
|
|
||||||
then the connection will be closed when the object is garbage collected.
|
|
||||||
|
|
||||||
### Closeable Table
|
|
||||||
|
|
||||||
The Table now also has a `close` method, similar to the connection. This
|
|
||||||
can be used to eagerly free the cache used by a Table object. Similar to
|
|
||||||
the connection, it can be used as a context manager and it is not mandatory
|
|
||||||
to call the `close` method.
|
|
||||||
|
|
||||||
#### Changes to Table APIs
|
|
||||||
|
|
||||||
- Previously `Table.schema` was a property. Now it is an async method.
|
|
||||||
- The method `Table.__len__` was removed and `len(table)` will no longer
|
|
||||||
work. Use `Table.count_rows` instead.
|
|
||||||
|
|
||||||
#### Creating Indices
|
|
||||||
|
|
||||||
The `Table.create_index` method is now used for creating both vector indices
|
|
||||||
and scalar indices. It currently requires a column name to be specified (the
|
|
||||||
column to index). Vector index defaults are now smarter and scale better with
|
|
||||||
the size of the data.
|
|
||||||
|
|
||||||
To specify index configuration details you will need to specify which kind of
|
|
||||||
index you are using.
|
|
||||||
|
|
||||||
#### Querying
|
|
||||||
|
|
||||||
The `Table.search` method has been renamed to `AsyncTable.vector_search` for
|
|
||||||
clarity.
|
|
||||||
|
|
||||||
### Features not yet supported
|
|
||||||
|
|
||||||
The following features are not yet supported by the asynchronous API. However,
|
|
||||||
we plan to support them soon.
|
|
||||||
|
|
||||||
- You cannot specify an embedding function when creating or opening a table.
|
|
||||||
You must calculate embeddings yourself if using the asynchronous API
|
|
||||||
- The merge insert operation is not supported in the asynchronous API
|
|
||||||
- Cleanup / compact / optimize indices are not supported in the asynchronous API
|
|
||||||
- add / alter columns is not supported in the asynchronous API
|
|
||||||
- The asynchronous API does not yet support any full text search or reranking
|
|
||||||
search
|
|
||||||
- Remote connections to LanceDb Cloud are not yet supported.
|
|
||||||
- The method Table.head is not yet supported.
|
|
||||||
|
|
||||||
## TypeScript/JavaScript
|
## TypeScript/JavaScript
|
||||||
|
|
||||||
For JS/TS users, we offer a brand new SDK [@lancedb/lancedb](https://www.npmjs.com/package/@lancedb/lancedb)
|
For JS/TS users, we offer a brand new SDK [@lancedb/lancedb](https://www.npmjs.com/package/@lancedb/lancedb)
|
||||||
@@ -133,7 +66,7 @@ the size of the data.
|
|||||||
|
|
||||||
### Embedding Functions
|
### Embedding Functions
|
||||||
|
|
||||||
The embedding API has been completely reworked, and it now more closely resembles the Python API, including the new [embedding registry](./js/classes/embedding.EmbeddingFunctionRegistry.md)
|
The embedding API has been completely reworked, and it now more closely resembles the Python API, including the new [embedding registry](./js/classes/embedding.EmbeddingFunctionRegistry.md):
|
||||||
|
|
||||||
=== "vectordb (deprecated)"
|
=== "vectordb (deprecated)"
|
||||||
|
|
||||||
|
|||||||
@@ -207,7 +207,7 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"source": [
|
"source": [
|
||||||
"## The dataset\n",
|
"## The dataset\n",
|
||||||
"The dataset we'll use is a synthetic QA dataset generated from LLama2 review paper. The paper was divided into chunks, with each chunk being a unique context. An LLM was prompted to ask questions relevant to the context for testing a retreiver.\n",
|
"The dataset we'll use is a synthetic QA dataset generated from LLama2 review paper. The paper was divided into chunks, with each chunk being a unique context. An LLM was prompted to ask questions relevant to the context for testing a retriever.\n",
|
||||||
"The exact code and other utility functions for this can be found in [this](https://github.com/lancedb/ragged) repo\n"
|
"The exact code and other utility functions for this can be found in [this](https://github.com/lancedb/ragged) repo\n"
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
|
|||||||
@@ -477,7 +477,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"## Vector Search\n",
|
"## Vector Search\n",
|
||||||
"\n",
|
"\n",
|
||||||
"avg latency - `3.48 ms ± 71.6 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)`"
|
"Average latency: `3.48 ms ± 71.6 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)`"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -597,7 +597,7 @@
|
|||||||
"`LinearCombinationReranker(weight=0.7)` is used as the default reranker for reranking the hybrid search results if the reranker isn't specified explicitly.\n",
|
"`LinearCombinationReranker(weight=0.7)` is used as the default reranker for reranking the hybrid search results if the reranker isn't specified explicitly.\n",
|
||||||
"The `weight` param controls the weightage provided to vector search score. The weight of `1-weight` is applied to FTS scores when reranking.\n",
|
"The `weight` param controls the weightage provided to vector search score. The weight of `1-weight` is applied to FTS scores when reranking.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Latency - `71 ms ± 25.4 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)`"
|
"Latency: `71 ms ± 25.4 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)`"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -675,9 +675,9 @@
|
|||||||
},
|
},
|
||||||
"source": [
|
"source": [
|
||||||
"### Cohere Reranker\n",
|
"### Cohere Reranker\n",
|
||||||
"This uses Cohere's Reranking API to re-rank the results. It accepts the reranking model name as a parameter. By Default it uses the english-v3 model but you can easily switch to a multi-lingual model.\n",
|
"This uses Cohere's Reranking API to re-rank the results. It accepts the reranking model name as a parameter. By default it uses the english-v3 model but you can easily switch to a multi-lingual model.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"latency - `605 ms ± 78.1 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)`"
|
"Latency: `605 ms ± 78.1 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)`"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -1165,7 +1165,7 @@
|
|||||||
},
|
},
|
||||||
"source": [
|
"source": [
|
||||||
"### ColBERT Reranker\n",
|
"### ColBERT Reranker\n",
|
||||||
"Colber Reranker is powered by ColBERT model. It runs locally using the huggingface implementation.\n",
|
"Colbert Reranker is powered by ColBERT model. It runs locally using the huggingface implementation.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Latency - `950 ms ± 5.78 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)`\n",
|
"Latency - `950 ms ± 5.78 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)`\n",
|
||||||
"\n",
|
"\n",
|
||||||
@@ -1489,9 +1489,9 @@
|
|||||||
},
|
},
|
||||||
"source": [
|
"source": [
|
||||||
"### Cross Encoder Reranker\n",
|
"### Cross Encoder Reranker\n",
|
||||||
"Uses cross encoder models are rerankers. Uses sentence transformer implemntation locally\n",
|
"Uses cross encoder models are rerankers. Uses sentence transformer implementation locally\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Latency - `1.38 s ± 64.6 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)`"
|
"Latency: `1.38 s ± 64.6 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)`"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -1771,10 +1771,10 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"### (Experimental) OpenAI Reranker\n",
|
"### (Experimental) OpenAI Reranker\n",
|
||||||
"\n",
|
"\n",
|
||||||
"This prompts chat model to rerank results which is not a dedicated reranker model. This should be treated as experimental. You might run out of token limit so set the search limits based on your token limit.\n",
|
"This prompts a chat model to rerank results and is not a dedicated reranker model. This should be treated as experimental. You might exceed the token limit so set the search limits based on your token limit.\n",
|
||||||
"NOTE: It is recommended to use `gpt-4-turbo-preview`, older models might lead to bad behaviour\n",
|
"NOTE: It is recommended to use `gpt-4-turbo-preview` as older models might lead to bad behaviour\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Latency - `Can take 10s of seconds if using GPT-4 model`"
|
"Latency: `Can take 10s of seconds if using GPT-4 model`"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -1817,7 +1817,7 @@
|
|||||||
},
|
},
|
||||||
"source": [
|
"source": [
|
||||||
"## Use your custom Reranker\n",
|
"## Use your custom Reranker\n",
|
||||||
"Hybrid search in LanceDB is designed to be very flexible. You can easily plug in your own Re-reranking logic. To do so, you simply need to implement the base Reranker class"
|
"Hybrid search in LanceDB is designed to be very flexible. You can easily plug in your own Re-reranking logic. To do so, you simply need to implement the base Reranker class:"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -1849,9 +1849,9 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"### Custom Reranker based on CohereReranker\n",
|
"### Custom Reranker based on CohereReranker\n",
|
||||||
"\n",
|
"\n",
|
||||||
"For the sake of simplicity let's build custom reranker that just enchances the Cohere Reranker by accepting a filter query, and accept other CohereReranker params as kwags.\n",
|
"For the sake of simplicity let's build a custom reranker that enhances the Cohere Reranker by accepting a filter query, and accepts other CohereReranker params as kwargs.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"For this toy example let's say we want to get rid of docs that represent a table of contents, appendix etc. as these are semantically close of representing costs but this isn't something we are interested in because they don't represent the specific reasons why operating costs were high. They simply represent the costs."
|
"For this toy example let's say we want to get rid of docs that represent a table of contents or appendix, as these are semantically close to representing costs but don't represent the specific reasons why operating costs were high."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -1969,7 +1969,7 @@
|
|||||||
"id": "b3b5464a-7252-4eab-aaac-9b0eae37496f"
|
"id": "b3b5464a-7252-4eab-aaac-9b0eae37496f"
|
||||||
},
|
},
|
||||||
"source": [
|
"source": [
|
||||||
"As you can see the document containing the Table of contetnts of spending no longer shows up"
|
"As you can see, the document containing the table of contents no longer shows up."
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
|
|||||||
@@ -49,7 +49,7 @@
|
|||||||
},
|
},
|
||||||
"source": [
|
"source": [
|
||||||
"## What is a retriever\n",
|
"## What is a retriever\n",
|
||||||
"VectorDBs are used as retreivers in recommender or chatbot-based systems for retrieving relevant data based on user queries. For example, retriever is a critical component of Retrieval Augmented Generation (RAG) acrhitectures. In this section, we will discuss how to improve the performance of retrievers.\n",
|
"VectorDBs are used as retrievers in recommender or chatbot-based systems for retrieving relevant data based on user queries. For example, retriever is a critical component of Retrieval Augmented Generation (RAG) acrhitectures. In this section, we will discuss how to improve the performance of retrievers.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"<img src=\"https://llmstack.ai/assets/images/rag-f517f1f834bdbb94a87765e0edd40ff2.png\" />\n",
|
"<img src=\"https://llmstack.ai/assets/images/rag-f517f1f834bdbb94a87765e0edd40ff2.png\" />\n",
|
||||||
"\n",
|
"\n",
|
||||||
@@ -64,7 +64,7 @@
|
|||||||
"- Fine-tuning the embedding models\n",
|
"- Fine-tuning the embedding models\n",
|
||||||
"- Using different embedding models\n",
|
"- Using different embedding models\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Obviously, the above list is not exhaustive. There are other subtler ways that can improve retrieval performance like experimenting chunking algorithms, using different distance/similarity metrics etc. But for brevity, we'll only cover high level and more impactful techniques here.\n",
|
"Obviously, the above list is not exhaustive. There are other subtler ways that can improve retrieval performance like alternative chunking algorithms, using different distance/similarity metrics, and more. For brevity, we'll only cover high level and more impactful techniques here.\n",
|
||||||
"\n"
|
"\n"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -77,7 +77,7 @@
|
|||||||
"# LanceDB\n",
|
"# LanceDB\n",
|
||||||
"- Multimodal DB for AI\n",
|
"- Multimodal DB for AI\n",
|
||||||
"- Powered by an innovative & open-source in-house file format\n",
|
"- Powered by an innovative & open-source in-house file format\n",
|
||||||
"- 0 Setup\n",
|
"- Zero setup\n",
|
||||||
"- Scales up on disk storage\n",
|
"- Scales up on disk storage\n",
|
||||||
"- Native support for vector, full-text(BM25) and hybrid search\n",
|
"- Native support for vector, full-text(BM25) and hybrid search\n",
|
||||||
"\n",
|
"\n",
|
||||||
@@ -92,8 +92,8 @@
|
|||||||
},
|
},
|
||||||
"source": [
|
"source": [
|
||||||
"## The dataset\n",
|
"## The dataset\n",
|
||||||
"The dataset we'll use is a synthetic QA dataset generated from LLama2 review paper. The paper was divided into chunks, with each chunk being a unique context. An LLM was prompted to ask questions relevant to the context for testing a retreiver.\n",
|
"The dataset we'll use is a synthetic QA dataset generated from LLama2 review paper. The paper was divided into chunks, with each chunk being a unique context. An LLM was prompted to ask questions relevant to the context for testing a retriever.\n",
|
||||||
"The exact code and other utility functions for this can be found in [this](https://github.com/lancedb/ragged) repo\n"
|
"The exact code and other utility functions for this can be found in [this](https://github.com/lancedb/ragged) repo.\n"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -594,10 +594,10 @@
|
|||||||
},
|
},
|
||||||
"source": [
|
"source": [
|
||||||
"## Ingestion\n",
|
"## Ingestion\n",
|
||||||
"Let us now ingest the contexts in LanceDB\n",
|
"Let us now ingest the contexts in LanceDB. The steps will be:\n",
|
||||||
"\n",
|
"\n",
|
||||||
"- Create a schema (Pydantic or Pyarrow)\n",
|
"- Create a schema (Pydantic or Pyarrow)\n",
|
||||||
"- Select an embedding model from LanceDB Embedding API (Allows automatic vectorization of data)\n",
|
"- Select an embedding model from LanceDB Embedding API (to allow automatic vectorization of data)\n",
|
||||||
"- Ingest the contexts\n"
|
"- Ingest the contexts\n"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -841,7 +841,7 @@
|
|||||||
},
|
},
|
||||||
"source": [
|
"source": [
|
||||||
"## Different Query types in LanceDB\n",
|
"## Different Query types in LanceDB\n",
|
||||||
"LanceDB allows switching query types with by setting `query_type` argument, which defaults to `vector` when using Embedding API. In this example we'll use `JinaReranker` which is one of many rerankers supported by LanceDB\n",
|
"LanceDB allows switching query types with by setting `query_type` argument, which defaults to `vector` when using Embedding API. In this example we'll use `JinaReranker` which is one of many rerankers supported by LanceDB.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"### Vector search:\n",
|
"### Vector search:\n",
|
||||||
"Vector search\n",
|
"Vector search\n",
|
||||||
@@ -1446,11 +1446,11 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"## Takeaways & Tradeoffs\n",
|
"## Takeaways & Tradeoffs\n",
|
||||||
"\n",
|
"\n",
|
||||||
"* **Easiest method to significantly improve accuracy** Using Hybrid search and/or rerankers can significantly improve retrieval performance without spending any additional time or effort on tuning embedding models, generators, or dissecting the dataset.\n",
|
"* **Rerankers significantly improve accuracy at little cost.** Using Hybrid search and/or rerankers can significantly improve retrieval performance without spending any additional time or effort on tuning embedding models, generators, or dissecting the dataset.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"* **Reranking is an expensive operation.** Depending on the type of reranker you choose, they can incur significant latecy to query times. Although some API-based rerankers can be significantly faster.\n",
|
"* **Reranking is an expensive operation.** Depending on the type of reranker you choose, they can incur significant latecy to query times. Although some API-based rerankers can be significantly faster.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"* When using models locally, having a warmed-up GPU environment will significantly reduce latency. This is specially useful if the application doesn't need to be strcitly realtime. The tradeoff being GPU resources."
|
"* **Pre-warmed GPU environments reduce latency.** When using models locally, having a warmed-up GPU environment will significantly reduce latency. This is especially useful if the application doesn't need to be strictly realtime. Pre-warming comes at the expense of GPU resources."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
1096
docs/src/notebooks/reproducibility_async.ipynb
Normal file
1096
docs/src/notebooks/reproducibility_async.ipynb
Normal file
File diff suppressed because it is too large
Load Diff
@@ -8,54 +8,55 @@ and PyArrow. The sequence of steps in a typical workflow is shown below.
|
|||||||
|
|
||||||
First, we need to connect to a LanceDB database.
|
First, we need to connect to a LanceDB database.
|
||||||
|
|
||||||
```py
|
=== "Sync API"
|
||||||
|
|
||||||
import lancedb
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_python.py:import-lancedb"
|
||||||
|
--8<-- "python/python/tests/docs/test_python.py:connect_to_lancedb"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
db = lancedb.connect("data/sample-lancedb")
|
```python
|
||||||
```
|
--8<-- "python/python/tests/docs/test_python.py:import-lancedb"
|
||||||
|
--8<-- "python/python/tests/docs/test_python.py:connect_to_lancedb_async"
|
||||||
|
```
|
||||||
|
|
||||||
We can load a Pandas `DataFrame` to LanceDB directly.
|
We can load a Pandas `DataFrame` to LanceDB directly.
|
||||||
|
|
||||||
```py
|
=== "Sync API"
|
||||||
import pandas as pd
|
|
||||||
|
|
||||||
data = pd.DataFrame({
|
```python
|
||||||
"vector": [[3.1, 4.1], [5.9, 26.5]],
|
--8<-- "python/python/tests/docs/test_python.py:import-pandas"
|
||||||
"item": ["foo", "bar"],
|
--8<-- "python/python/tests/docs/test_python.py:create_table_pandas"
|
||||||
"price": [10.0, 20.0]
|
```
|
||||||
})
|
=== "Async API"
|
||||||
table = db.create_table("pd_table", data=data)
|
|
||||||
```
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_python.py:import-pandas"
|
||||||
|
--8<-- "python/python/tests/docs/test_python.py:create_table_pandas_async"
|
||||||
|
```
|
||||||
|
|
||||||
Similar to the [`pyarrow.write_dataset()`](https://arrow.apache.org/docs/python/generated/pyarrow.dataset.write_dataset.html) method, LanceDB's
|
Similar to the [`pyarrow.write_dataset()`](https://arrow.apache.org/docs/python/generated/pyarrow.dataset.write_dataset.html) method, LanceDB's
|
||||||
[`db.create_table()`](python.md/#lancedb.db.DBConnection.create_table) accepts data in a variety of forms.
|
[`db.create_table()`](python.md/#lancedb.db.DBConnection.create_table) accepts data in a variety of forms.
|
||||||
|
|
||||||
If you have a dataset that is larger than memory, you can create a table with `Iterator[pyarrow.RecordBatch]` to lazily load the data:
|
If you have a dataset that is larger than memory, you can create a table with `Iterator[pyarrow.RecordBatch]` to lazily load the data:
|
||||||
|
|
||||||
```py
|
=== "Sync API"
|
||||||
|
|
||||||
from typing import Iterable
|
```python
|
||||||
import pyarrow as pa
|
--8<-- "python/python/tests/docs/test_python.py:import-iterable"
|
||||||
|
--8<-- "python/python/tests/docs/test_python.py:import-pyarrow"
|
||||||
|
--8<-- "python/python/tests/docs/test_python.py:make_batches"
|
||||||
|
--8<-- "python/python/tests/docs/test_python.py:create_table_iterable"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
def make_batches() -> Iterable[pa.RecordBatch]:
|
```python
|
||||||
for i in range(5):
|
--8<-- "python/python/tests/docs/test_python.py:import-iterable"
|
||||||
yield pa.RecordBatch.from_arrays(
|
--8<-- "python/python/tests/docs/test_python.py:import-pyarrow"
|
||||||
[
|
--8<-- "python/python/tests/docs/test_python.py:make_batches"
|
||||||
pa.array([[3.1, 4.1], [5.9, 26.5]]),
|
--8<-- "python/python/tests/docs/test_python.py:create_table_iterable_async"
|
||||||
pa.array(["foo", "bar"]),
|
```
|
||||||
pa.array([10.0, 20.0]),
|
|
||||||
],
|
|
||||||
["vector", "item", "price"])
|
|
||||||
|
|
||||||
schema=pa.schema([
|
|
||||||
pa.field("vector", pa.list_(pa.float32())),
|
|
||||||
pa.field("item", pa.utf8()),
|
|
||||||
pa.field("price", pa.float32()),
|
|
||||||
])
|
|
||||||
|
|
||||||
table = db.create_table("iterable_table", data=make_batches(), schema=schema)
|
|
||||||
```
|
|
||||||
|
|
||||||
You will find detailed instructions of creating a LanceDB dataset in
|
You will find detailed instructions of creating a LanceDB dataset in
|
||||||
[Getting Started](../basic.md#quick-start) and [API](python.md/#lancedb.db.DBConnection.create_table)
|
[Getting Started](../basic.md#quick-start) and [API](python.md/#lancedb.db.DBConnection.create_table)
|
||||||
@@ -65,15 +66,16 @@ sections.
|
|||||||
|
|
||||||
We can now perform similarity search via the LanceDB Python API.
|
We can now perform similarity search via the LanceDB Python API.
|
||||||
|
|
||||||
```py
|
=== "Sync API"
|
||||||
# Open the table previously created.
|
|
||||||
table = db.open_table("pd_table")
|
|
||||||
|
|
||||||
query_vector = [100, 100]
|
```python
|
||||||
# Pandas DataFrame
|
--8<-- "python/python/tests/docs/test_python.py:vector_search"
|
||||||
df = table.search(query_vector).limit(1).to_pandas()
|
```
|
||||||
print(df)
|
=== "Async API"
|
||||||
```
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_python.py:vector_search_async"
|
||||||
|
```
|
||||||
|
|
||||||
```
|
```
|
||||||
vector item price _distance
|
vector item price _distance
|
||||||
@@ -83,16 +85,13 @@ print(df)
|
|||||||
If you have a simple filter, it's faster to provide a `where` clause to LanceDB's `search` method.
|
If you have a simple filter, it's faster to provide a `where` clause to LanceDB's `search` method.
|
||||||
For more complex filters or aggregations, you can always resort to using the underlying `DataFrame` methods after performing a search.
|
For more complex filters or aggregations, you can always resort to using the underlying `DataFrame` methods after performing a search.
|
||||||
|
|
||||||
```python
|
=== "Sync API"
|
||||||
|
|
||||||
# Apply the filter via LanceDB
|
```python
|
||||||
results = table.search([100, 100]).where("price < 15").to_pandas()
|
--8<-- "python/python/tests/docs/test_python.py:vector_search_with_filter"
|
||||||
assert len(results) == 1
|
```
|
||||||
assert results["item"].iloc[0] == "foo"
|
=== "Async API"
|
||||||
|
|
||||||
# Apply the filter via Pandas
|
```python
|
||||||
df = results = table.search([100, 100]).to_pandas()
|
--8<-- "python/python/tests/docs/test_python.py:vector_search_with_filter_async"
|
||||||
results = df[df.price < 15]
|
```
|
||||||
assert len(results) == 1
|
|
||||||
assert results["item"].iloc[0] == "foo"
|
|
||||||
```
|
|
||||||
|
|||||||
@@ -2,38 +2,29 @@
|
|||||||
|
|
||||||
LanceDB supports [Polars](https://github.com/pola-rs/polars), a blazingly fast DataFrame library for Python written in Rust. Just like in Pandas, the Polars integration is enabled by PyArrow under the hood. A deeper integration between Lance Tables and Polars DataFrames is in progress, but at the moment, you can read a Polars DataFrame into LanceDB and output the search results from a query to a Polars DataFrame.
|
LanceDB supports [Polars](https://github.com/pola-rs/polars), a blazingly fast DataFrame library for Python written in Rust. Just like in Pandas, the Polars integration is enabled by PyArrow under the hood. A deeper integration between Lance Tables and Polars DataFrames is in progress, but at the moment, you can read a Polars DataFrame into LanceDB and output the search results from a query to a Polars DataFrame.
|
||||||
|
|
||||||
|
|
||||||
## Create & Query LanceDB Table
|
## Create & Query LanceDB Table
|
||||||
|
|
||||||
### From Polars DataFrame
|
### From Polars DataFrame
|
||||||
|
|
||||||
First, we connect to a LanceDB database.
|
First, we connect to a LanceDB database.
|
||||||
|
|
||||||
```py
|
|
||||||
import lancedb
|
|
||||||
|
|
||||||
db = lancedb.connect("data/polars-lancedb")
|
```py
|
||||||
|
--8<-- "python/python/tests/docs/test_python.py:import-lancedb"
|
||||||
|
--8<-- "python/python/tests/docs/test_python.py:connect_to_lancedb"
|
||||||
```
|
```
|
||||||
|
|
||||||
We can load a Polars `DataFrame` to LanceDB directly.
|
We can load a Polars `DataFrame` to LanceDB directly.
|
||||||
|
|
||||||
```py
|
```py
|
||||||
import polars as pl
|
--8<-- "python/python/tests/docs/test_python.py:import-polars"
|
||||||
|
--8<-- "python/python/tests/docs/test_python.py:create_table_polars"
|
||||||
data = pl.DataFrame({
|
|
||||||
"vector": [[3.1, 4.1], [5.9, 26.5]],
|
|
||||||
"item": ["foo", "bar"],
|
|
||||||
"price": [10.0, 20.0]
|
|
||||||
})
|
|
||||||
table = db.create_table("pl_table", data=data)
|
|
||||||
```
|
```
|
||||||
|
|
||||||
We can now perform similarity search via the LanceDB Python API.
|
We can now perform similarity search via the LanceDB Python API.
|
||||||
|
|
||||||
```py
|
```py
|
||||||
query = [3.0, 4.0]
|
--8<-- "python/python/tests/docs/test_python.py:vector_search_polars"
|
||||||
result = table.search(query).limit(1).to_polars()
|
|
||||||
print(result)
|
|
||||||
print(type(result))
|
|
||||||
```
|
```
|
||||||
|
|
||||||
In addition to the selected columns, LanceDB also returns a vector
|
In addition to the selected columns, LanceDB also returns a vector
|
||||||
@@ -59,33 +50,16 @@ Note that the type of the result from a table search is a Polars DataFrame.
|
|||||||
Alternately, we can create an empty LanceDB Table using a Pydantic schema and populate it with a Polars DataFrame.
|
Alternately, we can create an empty LanceDB Table using a Pydantic schema and populate it with a Polars DataFrame.
|
||||||
|
|
||||||
```py
|
```py
|
||||||
import polars as pl
|
--8<-- "python/python/tests/docs/test_python.py:import-polars"
|
||||||
from lancedb.pydantic import Vector, LanceModel
|
--8<-- "python/python/tests/docs/test_python.py:import-lancedb-pydantic"
|
||||||
|
--8<-- "python/python/tests/docs/test_python.py:class_Item"
|
||||||
|
--8<-- "python/python/tests/docs/test_python.py:create_table_pydantic"
|
||||||
class Item(LanceModel):
|
|
||||||
vector: Vector(2)
|
|
||||||
item: str
|
|
||||||
price: float
|
|
||||||
|
|
||||||
data = {
|
|
||||||
"vector": [[3.1, 4.1]],
|
|
||||||
"item": "foo",
|
|
||||||
"price": 10.0,
|
|
||||||
}
|
|
||||||
|
|
||||||
table = db.create_table("test_table", schema=Item)
|
|
||||||
df = pl.DataFrame(data)
|
|
||||||
# Add Polars DataFrame to table
|
|
||||||
table.add(df)
|
|
||||||
```
|
```
|
||||||
|
|
||||||
The table can now be queried as usual.
|
The table can now be queried as usual.
|
||||||
|
|
||||||
```py
|
```py
|
||||||
result = table.search([3.0, 4.0]).limit(1).to_polars()
|
--8<-- "python/python/tests/docs/test_python.py:vector_search_polars"
|
||||||
print(result)
|
|
||||||
print(type(result))
|
|
||||||
```
|
```
|
||||||
|
|
||||||
```
|
```
|
||||||
@@ -108,8 +82,7 @@ As you iterate on your application, you'll likely need to work with the whole ta
|
|||||||
LanceDB tables can also be converted directly into a polars LazyFrame for further processing.
|
LanceDB tables can also be converted directly into a polars LazyFrame for further processing.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
ldf = table.to_polars()
|
--8<-- "python/python/tests/docs/test_python.py:dump_table_lazyform"
|
||||||
print(type(ldf))
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Unlike the search result from a query, we can see that the type of the result is a LazyFrame.
|
Unlike the search result from a query, we can see that the type of the result is a LazyFrame.
|
||||||
@@ -121,7 +94,7 @@ Unlike the search result from a query, we can see that the type of the result is
|
|||||||
We can now work with the LazyFrame as we would in Polars, and collect the first result.
|
We can now work with the LazyFrame as we would in Polars, and collect the first result.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
print(ldf.first().collect())
|
--8<-- "python/python/tests/docs/test_python.py:print_table_lazyform"
|
||||||
```
|
```
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -47,6 +47,8 @@ is also an [asynchronous API client](#connections-asynchronous).
|
|||||||
|
|
||||||
::: lancedb.embeddings.registry.EmbeddingFunctionRegistry
|
::: lancedb.embeddings.registry.EmbeddingFunctionRegistry
|
||||||
|
|
||||||
|
::: lancedb.embeddings.base.EmbeddingFunctionConfig
|
||||||
|
|
||||||
::: lancedb.embeddings.base.EmbeddingFunction
|
::: lancedb.embeddings.base.EmbeddingFunction
|
||||||
|
|
||||||
::: lancedb.embeddings.base.TextEmbeddingFunction
|
::: lancedb.embeddings.base.TextEmbeddingFunction
|
||||||
@@ -127,8 +129,16 @@ lists the indices that LanceDb supports.
|
|||||||
|
|
||||||
::: lancedb.index.LabelList
|
::: lancedb.index.LabelList
|
||||||
|
|
||||||
|
::: lancedb.index.FTS
|
||||||
|
|
||||||
::: lancedb.index.IvfPq
|
::: lancedb.index.IvfPq
|
||||||
|
|
||||||
|
::: lancedb.index.HnswPq
|
||||||
|
|
||||||
|
::: lancedb.index.HnswSq
|
||||||
|
|
||||||
|
::: lancedb.index.IvfFlat
|
||||||
|
|
||||||
## Querying (Asynchronous)
|
## Querying (Asynchronous)
|
||||||
|
|
||||||
Queries allow you to return data from your database. Basic queries can be
|
Queries allow you to return data from your database. Basic queries can be
|
||||||
|
|||||||
@@ -17,4 +17,8 @@ pip install lancedb
|
|||||||
## Table
|
## Table
|
||||||
|
|
||||||
::: lancedb.remote.table.RemoteTable
|
::: lancedb.remote.table.RemoteTable
|
||||||
|
options:
|
||||||
|
filters:
|
||||||
|
- "!cleanup_old_versions"
|
||||||
|
- "!compact_files"
|
||||||
|
- "!optimize"
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
====================================================================
|
====================================================================
|
||||||
Adaptive RAG introduces a RAG technique that combines query analysis with self-corrective RAG.
|
Adaptive RAG introduces a RAG technique that combines query analysis with self-corrective RAG.
|
||||||
|
|
||||||
For Query Analysis, it uses a small classifier(LLM), to decide the query’s complexity. Query Analysis helps routing smoothly to adjust between different retrieval strategies No retrieval, Single-shot RAG or Iterative RAG.
|
For Query Analysis, it uses a small classifier(LLM), to decide the query’s complexity. Query Analysis guides adjustment between different retrieval strategies: No retrieval, Single-shot RAG or Iterative RAG.
|
||||||
|
|
||||||
**[Official Paper](https://arxiv.org/pdf/2403.14403)**
|
**[Official Paper](https://arxiv.org/pdf/2403.14403)**
|
||||||
|
|
||||||
@@ -12,9 +12,9 @@ For Query Analysis, it uses a small classifier(LLM), to decide the query’s com
|
|||||||
</figcaption>
|
</figcaption>
|
||||||
</figure>
|
</figure>
|
||||||
|
|
||||||
**[Offical Implementation](https://github.com/starsuzi/Adaptive-RAG)**
|
**[Official Implementation](https://github.com/starsuzi/Adaptive-RAG)**
|
||||||
|
|
||||||
Here’s a code snippet for query analysis
|
Here’s a code snippet for query analysis:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from langchain_core.prompts import ChatPromptTemplate
|
from langchain_core.prompts import ChatPromptTemplate
|
||||||
@@ -35,7 +35,7 @@ llm = ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0)
|
|||||||
structured_llm_router = llm.with_structured_output(RouteQuery)
|
structured_llm_router = llm.with_structured_output(RouteQuery)
|
||||||
```
|
```
|
||||||
|
|
||||||
For defining and querying retriever
|
The following example defines and queries a retriever:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
# add documents in LanceDB
|
# add documents in LanceDB
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ FLARE, stands for Forward-Looking Active REtrieval augmented generation is a gen
|
|||||||
|
|
||||||
[](https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/better-rag-FLAIR/main.ipynb)
|
[](https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/better-rag-FLAIR/main.ipynb)
|
||||||
|
|
||||||
Here’s a code snippet for using FLARE with Langchain
|
Here’s a code snippet for using FLARE with Langchain:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from langchain.vectorstores import LanceDB
|
from langchain.vectorstores import LanceDB
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ HyDE, stands for Hypothetical Document Embeddings is an approach used for precis
|
|||||||
|
|
||||||
[](https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/Advance-RAG-with-HyDE/main.ipynb)
|
[](https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/Advance-RAG-with-HyDE/main.ipynb)
|
||||||
|
|
||||||
Here’s a code snippet for using HyDE with Langchain
|
Here’s a code snippet for using HyDE with Langchain:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from langchain.llms import OpenAI
|
from langchain.llms import OpenAI
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
**Agentic RAG 🤖**
|
**Agentic RAG 🤖**
|
||||||
====================================================================
|
====================================================================
|
||||||
Agentic RAG is Agent-based RAG introduces an advanced framework for answering questions by using intelligent agents instead of just relying on large language models. These agents act like expert researchers, handling complex tasks such as detailed planning, multi-step reasoning, and using external tools. They navigate multiple documents, compare information, and generate accurate answers. This system is easily scalable, with each new document set managed by a sub-agent, making it a powerful tool for tackling a wide range of information needs.
|
Agentic RAG introduces an advanced framework for answering questions by using intelligent agents instead of just relying on large language models. These agents act like expert researchers, handling complex tasks such as detailed planning, multi-step reasoning, and using external tools. They navigate multiple documents, compare information, and generate accurate answers. This system is easily scalable, with each new document set managed by a sub-agent, making it a powerful tool for tackling a wide range of information needs.
|
||||||
|
|
||||||
<figure markdown="span">
|
<figure markdown="span">
|
||||||

|

|
||||||
@@ -9,7 +9,7 @@ Agentic RAG is Agent-based RAG introduces an advanced framework for answering qu
|
|||||||
|
|
||||||
[](https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/tutorials/Agentic_RAG/main.ipynb)
|
[](https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/tutorials/Agentic_RAG/main.ipynb)
|
||||||
|
|
||||||
Here’s a code snippet for defining retriever using Langchain
|
Here’s a code snippet for defining retriever using Langchain:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
||||||
@@ -41,7 +41,7 @@ retriever = vectorstore.as_retriever()
|
|||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Agent that formulates an improved query for better retrieval results and then grades the retrieved documents
|
Here is an agent that formulates an improved query for better retrieval results and then grades the retrieved documents:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def grade_documents(state) -> Literal["generate", "rewrite"]:
|
def grade_documents(state) -> Literal["generate", "rewrite"]:
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
Corrective-RAG (CRAG) is a strategy for Retrieval-Augmented Generation (RAG) that includes self-reflection and self-grading of retrieved documents. Here’s a simplified breakdown of the steps involved:
|
Corrective-RAG (CRAG) is a strategy for Retrieval-Augmented Generation (RAG) that includes self-reflection and self-grading of retrieved documents. Here’s a simplified breakdown of the steps involved:
|
||||||
|
|
||||||
1. **Relevance Check**: If at least one document meets the relevance threshold, the process moves forward to the generation phase.
|
1. **Relevance Check**: If at least one document meets the relevance threshold, the process moves forward to the generation phase.
|
||||||
2. **Knowledge Refinement**: Before generating an answer, the process refines the knowledge by dividing the document into smaller segments called "knowledge strips."
|
2. **Knowledge Refinement**: Before generating an answer, the process refines the knowledge by dividing the document into smaller segments called "knowledge strips".
|
||||||
3. **Grading and Filtering**: Each "knowledge strip" is graded, and irrelevant ones are filtered out.
|
3. **Grading and Filtering**: Each "knowledge strip" is graded, and irrelevant ones are filtered out.
|
||||||
4. **Additional Data Source**: If all documents are below the relevance threshold, or if the system is unsure about their relevance, it will seek additional information by performing a web search to supplement the retrieved data.
|
4. **Additional Data Source**: If all documents are below the relevance threshold, or if the system is unsure about their relevance, it will seek additional information by performing a web search to supplement the retrieved data.
|
||||||
|
|
||||||
@@ -19,11 +19,11 @@ Above steps are mentioned in
|
|||||||
|
|
||||||
Corrective Retrieval-Augmented Generation (CRAG) is a method that works like a **built-in fact-checker**.
|
Corrective Retrieval-Augmented Generation (CRAG) is a method that works like a **built-in fact-checker**.
|
||||||
|
|
||||||
**[Offical Implementation](https://github.com/HuskyInSalt/CRAG)**
|
**[Official Implementation](https://github.com/HuskyInSalt/CRAG)**
|
||||||
|
|
||||||
[](https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/tutorials/Corrective-RAG-with_Langgraph/CRAG_with_Langgraph.ipynb)
|
[](https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/tutorials/Corrective-RAG-with_Langgraph/CRAG_with_Langgraph.ipynb)
|
||||||
|
|
||||||
Here’s a code snippet for defining a table with the [Embedding API](https://lancedb.github.io/lancedb/embeddings/embedding_functions/), and retrieves the relevant documents.
|
Here’s a code snippet for defining a table with the [Embedding API](https://lancedb.github.io/lancedb/embeddings/embedding_functions/), and retrieves the relevant documents:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
@@ -115,6 +115,6 @@ def grade_documents(state):
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
Check Colab for the Implementation of CRAG with Langgraph
|
Check Colab for the Implementation of CRAG with Langgraph:
|
||||||
|
|
||||||
[](https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/tutorials/Corrective-RAG-with_Langgraph/CRAG_with_Langgraph.ipynb)
|
[](https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/tutorials/Corrective-RAG-with_Langgraph/CRAG_with_Langgraph.ipynb)
|
||||||
@@ -6,7 +6,7 @@ One of the main benefits of Graph RAG is its ability to capture and represent co
|
|||||||
|
|
||||||
**[Official Paper](https://arxiv.org/pdf/2404.16130)**
|
**[Official Paper](https://arxiv.org/pdf/2404.16130)**
|
||||||
|
|
||||||
**[Offical Implementation](https://github.com/microsoft/graphrag)**
|
**[Official Implementation](https://github.com/microsoft/graphrag)**
|
||||||
|
|
||||||
[Microsoft Research Blog](https://www.microsoft.com/en-us/research/blog/graphrag-unlocking-llm-discovery-on-narrative-private-data/)
|
[Microsoft Research Blog](https://www.microsoft.com/en-us/research/blog/graphrag-unlocking-llm-discovery-on-narrative-private-data/)
|
||||||
|
|
||||||
@@ -39,13 +39,13 @@ python3 -m graphrag.index --root dataset-dir
|
|||||||
|
|
||||||
- **Execute Query**
|
- **Execute Query**
|
||||||
|
|
||||||
Global Query Execution gives a broad overview of dataset
|
Global Query Execution gives a broad overview of dataset:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
python3 -m graphrag.query --root dataset-dir --method global "query-question"
|
python3 -m graphrag.query --root dataset-dir --method global "query-question"
|
||||||
```
|
```
|
||||||
|
|
||||||
Local Query Execution gives a detailed and specific answers based on the context of the entities
|
Local Query Execution gives a detailed and specific answers based on the context of the entities:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
python3 -m graphrag.query --root dataset-dir --method local "query-question"
|
python3 -m graphrag.query --root dataset-dir --method local "query-question"
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ MRAG is cost-effective and energy-efficient because it avoids extra LLM queries,
|
|||||||
|
|
||||||
**[Official Implementation](https://github.com/spcl/MRAG)**
|
**[Official Implementation](https://github.com/spcl/MRAG)**
|
||||||
|
|
||||||
Here’s a code snippet for defining different embedding spaces with the [Embedding API](https://lancedb.github.io/lancedb/embeddings/embedding_functions/)
|
Here’s a code snippet for defining different embedding spaces with the [Embedding API](https://lancedb.github.io/lancedb/embeddings/embedding_functions/):
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import lancedb
|
import lancedb
|
||||||
@@ -44,6 +44,6 @@ class Space3(LanceModel):
|
|||||||
vector: Vector(model3.ndims()) = model3.VectorField()
|
vector: Vector(model3.ndims()) = model3.VectorField()
|
||||||
```
|
```
|
||||||
|
|
||||||
Create different tables using defined embedding spaces, then make queries to each embedding space. Use the resulted closest documents from each embedding space to generate answers.
|
Create different tables using defined embedding spaces, then make queries to each embedding space. Use the resulting closest documents from each embedding space to generate answers.
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
**Self RAG 🤳**
|
**Self RAG 🤳**
|
||||||
====================================================================
|
====================================================================
|
||||||
Self-RAG is a strategy for Retrieval-Augmented Generation (RAG) to get better retrieved information, generated text, and checking their own work, all without losing their flexibility. Unlike the traditional Retrieval-Augmented Generation (RAG) method, Self-RAG retrieves information as needed, can skip retrieval if not needed, and evaluates its own output while generating text. It also uses a process to pick the best output based on different preferences.
|
Self-RAG is a strategy for Retrieval-Augmented Generation (RAG) to get better retrieved information, generated text, and validation, without loss of flexibility. Unlike the traditional Retrieval-Augmented Generation (RAG) method, Self-RAG retrieves information as needed, can skip retrieval if not needed, and evaluates its own output while generating text. It also uses a process to pick the best output based on different preferences.
|
||||||
|
|
||||||
**[Official Paper](https://arxiv.org/pdf/2310.11511)**
|
**[Official Paper](https://arxiv.org/pdf/2310.11511)**
|
||||||
|
|
||||||
@@ -10,11 +10,11 @@ Self-RAG is a strategy for Retrieval-Augmented Generation (RAG) to get better re
|
|||||||
</figcaption>
|
</figcaption>
|
||||||
</figure>
|
</figure>
|
||||||
|
|
||||||
**[Offical Implementation](https://github.com/AkariAsai/self-rag)**
|
**[Official Implementation](https://github.com/AkariAsai/self-rag)**
|
||||||
|
|
||||||
Self-RAG starts by generating a response without retrieving extra info if it's not needed. For questions that need more details, it retrieves to get the necessary information.
|
Self-RAG starts by generating a response without retrieving extra info if it's not needed. For questions that need more details, it retrieves to get the necessary information.
|
||||||
|
|
||||||
Here’s a code snippet for defining retriever using Langchain
|
Here’s a code snippet for defining retriever using Langchain:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
||||||
@@ -46,7 +46,7 @@ retriever = vectorstore.as_retriever()
|
|||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Functions that grades the retrieved documents and if required formulates an improved query for better retrieval results
|
The following functions grade the retrieved documents and formulate an improved query for better retrieval results, if required:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def grade_documents(state) -> Literal["generate", "rewrite"]:
|
def grade_documents(state) -> Literal["generate", "rewrite"]:
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
**SFR RAG 📑**
|
**SFR RAG 📑**
|
||||||
====================================================================
|
====================================================================
|
||||||
Salesforce AI Research introduces SFR-RAG, a 9-billion-parameter language model trained with a significant emphasis on reliable, precise, and faithful contextual generation abilities specific to real-world RAG use cases and relevant agentic tasks. They include precise factual knowledge extraction, distinguishing relevant against distracting contexts, citing appropriate sources along with answers, producing complex and multi-hop reasoning over multiple contexts, consistent format following, as well as refraining from hallucination over unanswerable queries.
|
Salesforce AI Research introduced SFR-RAG, a 9-billion-parameter language model trained with a significant emphasis on reliable, precise, and faithful contextual generation abilities specific to real-world RAG use cases and relevant agentic tasks. It targets precise factual knowledge extraction, distinction between relevant and distracting contexts, citation of appropriate sources along with answers, production of complex and multi-hop reasoning over multiple contexts, consistent format following, as well as minimization of hallucination over unanswerable queries.
|
||||||
|
|
||||||
**[Offical Implementation](https://github.com/SalesforceAIResearch/SFR-RAG)**
|
**[Official Implementation](https://github.com/SalesforceAIResearch/SFR-RAG)**
|
||||||
|
|
||||||
<figure markdown="span">
|
<figure markdown="span">
|
||||||

|

|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
# AnswersDotAI Rerankers
|
# AnswersDotAI Rerankers
|
||||||
|
|
||||||
This integration allows using answersdotai's rerankers to rerank the search results. [Rerankers](https://github.com/AnswerDotAI/rerankers)
|
This integration uses [AnswersDotAI's rerankers](https://github.com/AnswerDotAI/rerankers) to rerank the search results, providing a lightweight, low-dependency, unified API to use all common reranking and cross-encoder models.
|
||||||
A lightweight, low-dependency, unified API to use all common reranking and cross-encoder models.
|
|
||||||
|
|
||||||
!!! note
|
!!! note
|
||||||
Supported Query Types: Hybrid, Vector, FTS
|
Supported Query Types: Hybrid, Vector, FTS
|
||||||
@@ -45,10 +44,10 @@ Accepted Arguments
|
|||||||
----------------
|
----------------
|
||||||
| Argument | Type | Default | Description |
|
| Argument | Type | Default | Description |
|
||||||
| --- | --- | --- | --- |
|
| --- | --- | --- | --- |
|
||||||
| `model_type` | `str` | `"colbert"` | The type of model to use. Supported model types can be found here - https://github.com/AnswerDotAI/rerankers |
|
| `model_type` | `str` | `"colbert"` | The type of model to use. Supported model types can be found here: https://github.com/AnswerDotAI/rerankers. |
|
||||||
| `model_name` | `str` | `"answerdotai/answerai-colbert-small-v1"` | The name of the reranker model to use. |
|
| `model_name` | `str` | `"answerdotai/answerai-colbert-small-v1"` | The name of the reranker model to use. |
|
||||||
| `column` | `str` | `"text"` | The name of the column to use as input to the cross encoder model. |
|
| `column` | `str` | `"text"` | The name of the column to use as input to the cross encoder model. |
|
||||||
| `return_score` | str | `"relevance"` | Options are "relevance" or "all". The type of score to return. If "relevance", will return only the `_relevance_score. If "all" is supported, will return relevance score along with the vector and/or fts scores depending on query type |
|
| `return_score` | str | `"relevance"` | Options are "relevance" or "all". The type of score to return. If "relevance", will return only the `_relevance_score. If "all" is supported, will return relevance score along with the vector and/or fts scores depending on query type. |
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -58,17 +57,17 @@ You can specify the type of scores you want the reranker to return. The followin
|
|||||||
### Hybrid Search
|
### Hybrid Search
|
||||||
|`return_score`| Status | Description |
|
|`return_score`| Status | Description |
|
||||||
| --- | --- | --- |
|
| --- | --- | --- |
|
||||||
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
|
| `relevance` | ✅ Supported | Results only have the `_relevance_score` column. |
|
||||||
| `all` | ❌ Not Supported | Returns have vector(`_distance`) and FTS(`score`) along with Hybrid Search score(`_relevance_score`) |
|
| `all` | ❌ Not Supported | Results have vector(`_distance`) and FTS(`score`) along with Hybrid Search score(`_relevance_score`). |
|
||||||
|
|
||||||
### Vector Search
|
### Vector Search
|
||||||
|`return_score`| Status | Description |
|
|`return_score`| Status | Description |
|
||||||
| --- | --- | --- |
|
| --- | --- | --- |
|
||||||
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
|
| `relevance` | ✅ Supported | Results only have the `_relevance_score` column. |
|
||||||
| `all` | ✅ Supported | Returns have vector(`_distance`) along with Hybrid Search score(`_relevance_score`) |
|
| `all` | ✅ Supported | Results have vector(`_distance`) along with Hybrid Search score(`_relevance_score`). |
|
||||||
|
|
||||||
### FTS Search
|
### FTS Search
|
||||||
|`return_score`| Status | Description |
|
|`return_score`| Status | Description |
|
||||||
| --- | --- | --- |
|
| --- | --- | --- |
|
||||||
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
|
| `relevance` | ✅ Supported | Results only have the `_relevance_score` column. |
|
||||||
| `all` | ✅ Supported | Returns have FTS(`score`) along with Hybrid Search score(`_relevance_score`) |
|
| `all` | ✅ Supported | Results have FTS(`score`) along with Hybrid Search score(`_relevance_score`). |
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
# Cohere Reranker
|
# Cohere Reranker
|
||||||
|
|
||||||
This re-ranker uses the [Cohere](https://cohere.ai/) API to rerank the search results. You can use this re-ranker by passing `CohereReranker()` to the `rerank()` method. Note that you'll either need to set the `COHERE_API_KEY` environment variable or pass the `api_key` argument to use this re-ranker.
|
This reranker uses the [Cohere](https://cohere.ai/) API to rerank the search results. You can use this reranker by passing `CohereReranker()` to the `rerank()` method. Note that you'll either need to set the `COHERE_API_KEY` environment variable or pass the `api_key` argument to use this reranker.
|
||||||
|
|
||||||
|
|
||||||
!!! note
|
!!! note
|
||||||
@@ -62,17 +62,17 @@ You can specify the type of scores you want the reranker to return. The followin
|
|||||||
### Hybrid Search
|
### Hybrid Search
|
||||||
|`return_score`| Status | Description |
|
|`return_score`| Status | Description |
|
||||||
| --- | --- | --- |
|
| --- | --- | --- |
|
||||||
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
|
| `relevance` | ✅ Supported | Results only have the `_relevance_score` column |
|
||||||
| `all` | ❌ Not Supported | Returns have vector(`_distance`) and FTS(`score`) along with Hybrid Search score(`_relevance_score`) |
|
| `all` | ❌ Not Supported | Results have vector(`_distance`) and FTS(`score`) along with Hybrid Search score(`_relevance_score`) |
|
||||||
|
|
||||||
### Vector Search
|
### Vector Search
|
||||||
|`return_score`| Status | Description |
|
|`return_score`| Status | Description |
|
||||||
| --- | --- | --- |
|
| --- | --- | --- |
|
||||||
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
|
| `relevance` | ✅ Supported | Results only have the `_relevance_score` column |
|
||||||
| `all` | ✅ Supported | Returns have vector(`_distance`) along with Hybrid Search score(`_relevance_score`) |
|
| `all` | ✅ Supported | Results have vector(`_distance`) along with Hybrid Search score(`_relevance_score`) |
|
||||||
|
|
||||||
### FTS Search
|
### FTS Search
|
||||||
|`return_score`| Status | Description |
|
|`return_score`| Status | Description |
|
||||||
| --- | --- | --- |
|
| --- | --- | --- |
|
||||||
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
|
| `relevance` | ✅ Supported | Results only have the `_relevance_score` column |
|
||||||
| `all` | ✅ Supported | Returns have FTS(`score`) along with Hybrid Search score(`_relevance_score`) |
|
| `all` | ✅ Supported | Results have FTS(`score`) along with Hybrid Search score(`_relevance_score`) |
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
# ColBERT Reranker
|
# ColBERT Reranker
|
||||||
|
|
||||||
This re-ranker uses ColBERT model to rerank the search results. You can use this re-ranker by passing `ColbertReranker()` to the `rerank()` method.
|
This reranker uses ColBERT model to rerank the search results. You can use this reranker by passing `ColbertReranker()` to the `rerank()` method.
|
||||||
!!! note
|
!!! note
|
||||||
Supported Query Types: Hybrid, Vector, FTS
|
Supported Query Types: Hybrid, Vector, FTS
|
||||||
|
|
||||||
@@ -46,7 +46,7 @@ Accepted Arguments
|
|||||||
| `model_name` | `str` | `"colbert-ir/colbertv2.0"` | The name of the reranker model to use.|
|
| `model_name` | `str` | `"colbert-ir/colbertv2.0"` | The name of the reranker model to use.|
|
||||||
| `column` | `str` | `"text"` | The name of the column to use as input to the cross encoder model. |
|
| `column` | `str` | `"text"` | The name of the column to use as input to the cross encoder model. |
|
||||||
| `device` | `str` | `None` | The device to use for the cross encoder model. If None, will use "cuda" if available, otherwise "cpu". |
|
| `device` | `str` | `None` | The device to use for the cross encoder model. If None, will use "cuda" if available, otherwise "cpu". |
|
||||||
| `return_score` | str | `"relevance"` | Options are "relevance" or "all". The type of score to return. If "relevance", will return only the `_relevance_score. If "all" is supported, will return relevance score along with the vector and/or fts scores depending on query type |
|
| `return_score` | str | `"relevance"` | Options are "relevance" or "all". The type of score to return. If "relevance", will return only the `_relevance_score. If "all" is supported, will return relevance score along with the vector and/or fts scores depending on query type. |
|
||||||
|
|
||||||
|
|
||||||
## Supported Scores for each query type
|
## Supported Scores for each query type
|
||||||
@@ -55,17 +55,17 @@ You can specify the type of scores you want the reranker to return. The followin
|
|||||||
### Hybrid Search
|
### Hybrid Search
|
||||||
|`return_score`| Status | Description |
|
|`return_score`| Status | Description |
|
||||||
| --- | --- | --- |
|
| --- | --- | --- |
|
||||||
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
|
| `relevance` | ✅ Supported | Results only have the `_relevance_score` column. |
|
||||||
| `all` | ❌ Not Supported | Returns have vector(`_distance`) and FTS(`score`) along with Hybrid Search score(`_relevance_score`) |
|
| `all` | ❌ Not Supported | Results have vector(`_distance`) and FTS(`score`) along with Hybrid Search score(`_relevance_score`). |
|
||||||
|
|
||||||
### Vector Search
|
### Vector Search
|
||||||
|`return_score`| Status | Description |
|
|`return_score`| Status | Description |
|
||||||
| --- | --- | --- |
|
| --- | --- | --- |
|
||||||
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
|
| `relevance` | ✅ Supported | Results only have the `_relevance_score` column. |
|
||||||
| `all` | ✅ Supported | Returns have vector(`_distance`) along with Hybrid Search score(`_relevance_score`) |
|
| `all` | ✅ Supported | Results have vector(`_distance`) along with Hybrid Search score(`_relevance_score`). |
|
||||||
|
|
||||||
### FTS Search
|
### FTS Search
|
||||||
|`return_score`| Status | Description |
|
|`return_score`| Status | Description |
|
||||||
| --- | --- | --- |
|
| --- | --- | --- |
|
||||||
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
|
| `relevance` | ✅ Supported | Results only have the `_relevance_score` column. |
|
||||||
| `all` | ✅ Supported | Returns have FTS(`score`) along with Hybrid Search score(`_relevance_score`) |
|
| `all` | ✅ Supported | Results have FTS(`score`) along with Hybrid Search score(`_relevance_score`). |
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
# Cross Encoder Reranker
|
# Cross Encoder Reranker
|
||||||
|
|
||||||
This re-ranker uses Cross Encoder models from sentence-transformers to rerank the search results. You can use this re-ranker by passing `CrossEncoderReranker()` to the `rerank()` method.
|
This reranker uses Cross Encoder models from sentence-transformers to rerank the search results. You can use this reranker by passing `CrossEncoderReranker()` to the `rerank()` method.
|
||||||
!!! note
|
!!! note
|
||||||
Supported Query Types: Hybrid, Vector, FTS
|
Supported Query Types: Hybrid, Vector, FTS
|
||||||
|
|
||||||
@@ -46,7 +46,7 @@ Accepted Arguments
|
|||||||
| `model_name` | `str` | `""cross-encoder/ms-marco-TinyBERT-L-6"` | The name of the reranker model to use.|
|
| `model_name` | `str` | `""cross-encoder/ms-marco-TinyBERT-L-6"` | The name of the reranker model to use.|
|
||||||
| `column` | `str` | `"text"` | The name of the column to use as input to the cross encoder model. |
|
| `column` | `str` | `"text"` | The name of the column to use as input to the cross encoder model. |
|
||||||
| `device` | `str` | `None` | The device to use for the cross encoder model. If None, will use "cuda" if available, otherwise "cpu". |
|
| `device` | `str` | `None` | The device to use for the cross encoder model. If None, will use "cuda" if available, otherwise "cpu". |
|
||||||
| `return_score` | str | `"relevance"` | Options are "relevance" or "all". The type of score to return. If "relevance", will return only the `_relevance_score. If "all" is supported, will return relevance score along with the vector and/or fts scores depending on query type |
|
| `return_score` | str | `"relevance"` | Options are "relevance" or "all". The type of score to return. If "relevance", will return only the `_relevance_score. If "all" is supported, will return relevance score along with the vector and/or fts scores depending on query type. |
|
||||||
|
|
||||||
## Supported Scores for each query type
|
## Supported Scores for each query type
|
||||||
You can specify the type of scores you want the reranker to return. The following are the supported scores for each query type:
|
You can specify the type of scores you want the reranker to return. The following are the supported scores for each query type:
|
||||||
@@ -54,17 +54,17 @@ You can specify the type of scores you want the reranker to return. The followin
|
|||||||
### Hybrid Search
|
### Hybrid Search
|
||||||
|`return_score`| Status | Description |
|
|`return_score`| Status | Description |
|
||||||
| --- | --- | --- |
|
| --- | --- | --- |
|
||||||
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
|
| `relevance` | ✅ Supported | Results only have the `_relevance_score` column. |
|
||||||
| `all` | ❌ Not Supported | Returns have vector(`_distance`) and FTS(`score`) along with Hybrid Search score(`_relevance_score`) |
|
| `all` | ❌ Not Supported | Results have vector(`_distance`) and FTS(`score`) along with Hybrid Search score(`_relevance_score`). |
|
||||||
|
|
||||||
### Vector Search
|
### Vector Search
|
||||||
|`return_score`| Status | Description |
|
|`return_score`| Status | Description |
|
||||||
| --- | --- | --- |
|
| --- | --- | --- |
|
||||||
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
|
| `relevance` | ✅ Supported | Results only have the `_relevance_score` column. |
|
||||||
| `all` | ✅ Supported | Returns have vector(`_distance`) along with Hybrid Search score(`_relevance_score`) |
|
| `all` | ✅ Supported | Results have vector(`_distance`) along with Hybrid Search score(`_relevance_score`). |
|
||||||
|
|
||||||
### FTS Search
|
### FTS Search
|
||||||
|`return_score`| Status | Description |
|
|`return_score`| Status | Description |
|
||||||
| --- | --- | --- |
|
| --- | --- | --- |
|
||||||
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
|
| `relevance` | ✅ Supported | Results only have the `_relevance_score` column. |
|
||||||
| `all` | ✅ Supported | Returns have FTS(`score`) along with Hybrid Search score(`_relevance_score`) |
|
| `all` | ✅ Supported | Results have FTS(`score`) along with Hybrid Search score(`_relevance_score`). |
|
||||||
|
|||||||
@@ -1,9 +1,10 @@
|
|||||||
## Building Custom Rerankers
|
## Building Custom Rerankers
|
||||||
You can build your own custom reranker by subclassing the `Reranker` class and implementing the `rerank_hybrid()` method. Optionally, you can also implement the `rerank_vector()` and `rerank_fts()` methods if you want to support reranking for vector and FTS search separately.
|
You can build your own custom reranker by subclassing the `Reranker` class and implementing the `rerank_hybrid()` method. Optionally, you can also implement the `rerank_vector()` and `rerank_fts()` methods if you want to support reranking for vector and FTS search separately.
|
||||||
Here's an example of a custom reranker that combines the results of semantic and full-text search using a linear combination of the scores.
|
|
||||||
|
|
||||||
The `Reranker` base interface comes with a `merge_results()` method that can be used to combine the results of semantic and full-text search. This is a vanilla merging algorithm that simply concatenates the results and removes the duplicates without taking the scores into consideration. It only keeps the first copy of the row encountered. This works well in cases that don't require the scores of semantic and full-text search to combine the results. If you want to use the scores or want to support `return_score="all"`, you'll need to implement your own merging algorithm.
|
The `Reranker` base interface comes with a `merge_results()` method that can be used to combine the results of semantic and full-text search. This is a vanilla merging algorithm that simply concatenates the results and removes the duplicates without taking the scores into consideration. It only keeps the first copy of the row encountered. This works well in cases that don't require the scores of semantic and full-text search to combine the results. If you want to use the scores or want to support `return_score="all"`, you'll need to implement your own merging algorithm.
|
||||||
|
|
||||||
|
Here's an example of a custom reranker that combines the results of semantic and full-text search using a linear combination of the scores:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
|
|
||||||
from lancedb.rerankers import Reranker
|
from lancedb.rerankers import Reranker
|
||||||
@@ -42,7 +43,7 @@ class MyReranker(Reranker):
|
|||||||
```
|
```
|
||||||
|
|
||||||
### Example of a Custom Reranker
|
### Example of a Custom Reranker
|
||||||
For the sake of simplicity let's build custom reranker that just enchances the Cohere Reranker by accepting a filter query, and accept other CohereReranker params as kwags.
|
For the sake of simplicity let's build custom reranker that enhances the Cohere Reranker by accepting a filter query, and accepts other CohereReranker params as kwargs.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
|
|
||||||
@@ -83,6 +84,6 @@ class ModifiedCohereReranker(CohereReranker):
|
|||||||
```
|
```
|
||||||
|
|
||||||
!!! tip
|
!!! tip
|
||||||
The `vector_results` and `fts_results` are pyarrow tables. Lean more about pyarrow tables [here](https://arrow.apache.org/docs/python). It can be convered to other data types like pandas dataframe, pydict, pylist etc.
|
The `vector_results` and `fts_results` are pyarrow tables. Lean more about pyarrow tables [here](https://arrow.apache.org/docs/python). It can be converted to other data types like pandas dataframe, pydict, pylist etc.
|
||||||
|
|
||||||
For example, You can convert them to pandas dataframes using `to_pandas()` method and perform any operations you want. After you are done, you can convert the dataframe back to pyarrow table using `pa.Table.from_pandas()` method and return it.
|
For example, You can convert them to pandas dataframes using `to_pandas()` method and perform any operations you want. After you are done, you can convert the dataframe back to pyarrow table using `pa.Table.from_pandas()` method and return it.
|
||||||
@@ -13,7 +13,7 @@ LanceDB comes with some built-in rerankers. Some of the rerankers that are avail
|
|||||||
|
|
||||||
|
|
||||||
## Using a Reranker
|
## Using a Reranker
|
||||||
Using rerankers is optional for vector and FTS. However, for hybrid search, rerankers are required. To use a reranker, you need to create an instance of the reranker and pass it to the `rerank` method of the query builder.
|
Using rerankers is optional for vector and FTS. However, for hybrid search, rerankers are required. To use a reranker, you need to create an instance of the reranker and pass it to the `rerank` method of the query builder:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import lancedb
|
import lancedb
|
||||||
@@ -36,14 +36,14 @@ tbl = db.create_table("test", data)
|
|||||||
reranker = CohereReranker(api_key="your_api_key")
|
reranker = CohereReranker(api_key="your_api_key")
|
||||||
|
|
||||||
# Run vector search with a reranker
|
# Run vector search with a reranker
|
||||||
result = tbl.query("hello").rerank(reranker).to_list()
|
result = tbl.search("hello").rerank(reranker).to_list()
|
||||||
|
|
||||||
# Run FTS search with a reranker
|
# Run FTS search with a reranker
|
||||||
result = tbl.query("hello", query_type="fts").rerank(reranker).to_list()
|
result = tbl.search("hello", query_type="fts").rerank(reranker).to_list()
|
||||||
|
|
||||||
# Run hybrid search with a reranker
|
# Run hybrid search with a reranker
|
||||||
tbl.create_fts_index("text")
|
tbl.create_fts_index("text")
|
||||||
result = tbl.query("hello", query_type="hybrid").rerank(reranker).to_list()
|
result = tbl.search("hello", query_type="hybrid").rerank(reranker).to_list()
|
||||||
```
|
```
|
||||||
|
|
||||||
### Multi-vector reranking
|
### Multi-vector reranking
|
||||||
@@ -64,7 +64,7 @@ reranked = reranker.rerank_multivector([res1, res2, res3], deduplicate=True)
|
|||||||
```
|
```
|
||||||
|
|
||||||
## Available Rerankers
|
## Available Rerankers
|
||||||
LanceDB comes with some built-in rerankers. Here are some of the rerankers that are available in LanceDB:
|
LanceDB comes with the following built-in rerankers:
|
||||||
|
|
||||||
- [Cohere Reranker](./cohere.md)
|
- [Cohere Reranker](./cohere.md)
|
||||||
- [Cross Encoder Reranker](./cross_encoder.md)
|
- [Cross Encoder Reranker](./cross_encoder.md)
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
# Jina Reranker
|
# Jina Reranker
|
||||||
|
|
||||||
This re-ranker uses the [Jina](https://jina.ai/reranker/) API to rerank the search results. You can use this re-ranker by passing `JinaReranker()` to the `rerank()` method. Note that you'll either need to set the `JINA_API_KEY` environment variable or pass the `api_key` argument to use this re-ranker.
|
This reranker uses the [Jina](https://jina.ai/reranker/) API to rerank the search results. You can use this reranker by passing `JinaReranker()` to the `rerank()` method. Note that you'll either need to set the `JINA_API_KEY` environment variable or pass the `api_key` argument to use this reranker.
|
||||||
|
|
||||||
|
|
||||||
!!! note
|
!!! note
|
||||||
@@ -48,11 +48,11 @@ Accepted Arguments
|
|||||||
----------------
|
----------------
|
||||||
| Argument | Type | Default | Description |
|
| Argument | Type | Default | Description |
|
||||||
| --- | --- | --- | --- |
|
| --- | --- | --- | --- |
|
||||||
| `model_name` | `str` | `"jina-reranker-v2-base-multilingual"` | The name of the reranker model to use. You can find the list of available models in https://jina.ai/reranker/|
|
| `model_name` | `str` | `"jina-reranker-v2-base-multilingual"` | The name of the reranker model to use. You can find the list of available models in https://jina.ai/reranker. |
|
||||||
| `column` | `str` | `"text"` | The name of the column to use as input to the cross encoder model. |
|
| `column` | `str` | `"text"` | The name of the column to use as input to the cross encoder model. |
|
||||||
| `top_n` | `str` | `None` | The number of results to return. If None, will return all results. |
|
| `top_n` | `str` | `None` | The number of results to return. If None, will return all results. |
|
||||||
| `api_key` | `str` | `None` | The API key for the Jina API. If not provided, the `JINA_API_KEY` environment variable is used. |
|
| `api_key` | `str` | `None` | The API key for the Jina API. If not provided, the `JINA_API_KEY` environment variable is used. |
|
||||||
| `return_score` | str | `"relevance"` | Options are "relevance" or "all". The type of score to return. If "relevance", will return only the `_relevance_score. If "all" is supported, will return relevance score along with the vector and/or fts scores depending on query type |
|
| `return_score` | str | `"relevance"` | Options are "relevance" or "all". The type of score to return. If "relevance", will return only the `_relevance_score. If "all" is supported, will return relevance score along with the vector and/or fts scores depending on query type. |
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -62,17 +62,17 @@ You can specify the type of scores you want the reranker to return. The followin
|
|||||||
### Hybrid Search
|
### Hybrid Search
|
||||||
|`return_score`| Status | Description |
|
|`return_score`| Status | Description |
|
||||||
| --- | --- | --- |
|
| --- | --- | --- |
|
||||||
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
|
| `relevance` | ✅ Supported | Results only have the `_relevance_score` column. |
|
||||||
| `all` | ❌ Not Supported | Returns have vector(`_distance`) and FTS(`score`) along with Hybrid Search score(`_relevance_score`) |
|
| `all` | ❌ Not Supported | Results have vector(`_distance`) and FTS(`score`) along with Hybrid Search score(`_relevance_score`). |
|
||||||
|
|
||||||
### Vector Search
|
### Vector Search
|
||||||
|`return_score`| Status | Description |
|
|`return_score`| Status | Description |
|
||||||
| --- | --- | --- |
|
| --- | --- | --- |
|
||||||
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
|
| `relevance` | ✅ Supported | Results only have the `_relevance_score` column. |
|
||||||
| `all` | ✅ Supported | Returns have vector(`_distance`) along with Hybrid Search score(`_relevance_score`) |
|
| `all` | ✅ Supported | Results have vector(`_distance`) along with Hybrid Search score(`_relevance_score`). |
|
||||||
|
|
||||||
### FTS Search
|
### FTS Search
|
||||||
|`return_score`| Status | Description |
|
|`return_score`| Status | Description |
|
||||||
| --- | --- | --- |
|
| --- | --- | --- |
|
||||||
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
|
| `relevance` | ✅ Supported | Results only have the `_relevance_score` column. |
|
||||||
| `all` | ✅ Supported | Returns have FTS(`score`) along with Hybrid Search score(`_relevance_score`) |
|
| `all` | ✅ Supported | Results have FTS(`score`) along with Hybrid Search score(`_relevance_score`). |
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
# Linear Combination Reranker
|
# Linear Combination Reranker
|
||||||
|
|
||||||
!!! note
|
!!! note
|
||||||
This is depricated. It is recommended to use the `RRFReranker` instead, if you want to use a score based reranker.
|
This is deprecated. It is recommended to use the `RRFReranker` instead, if you want to use a score-based reranker.
|
||||||
|
|
||||||
It combines the results of semantic and full-text search using a linear combination of the scores. The weights for the linear combination can be specified. It defaults to 0.7, i.e, 70% weight for semantic search and 30% weight for full-text search.
|
The Linear Combination Reranker combines the results of semantic and full-text search using a linear combination of the scores. The weights for the linear combination can be specified, and defaults to 0.7, i.e, 70% weight for semantic search and 30% weight for full-text search.
|
||||||
|
|
||||||
!!! note
|
!!! note
|
||||||
Supported Query Types: Hybrid
|
Supported Query Types: Hybrid
|
||||||
@@ -51,5 +51,5 @@ You can specify the type of scores you want the reranker to return. The followin
|
|||||||
### Hybrid Search
|
### Hybrid Search
|
||||||
|`return_score`| Status | Description |
|
|`return_score`| Status | Description |
|
||||||
| --- | --- | --- |
|
| --- | --- | --- |
|
||||||
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
|
| `relevance` | ✅ Supported | Results only have the `_relevance_score` column |
|
||||||
| `all` | ✅ Supported | Returns have vector(`_distance`) and FTS(`score`) along with Hybrid Search score(`_distance`) |
|
| `all` | ✅ Supported | Results have vector(`_distance`) and FTS(`score`) along with Hybrid Search score(`_distance`) |
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
# OpenAI Reranker (Experimental)
|
# OpenAI Reranker (Experimental)
|
||||||
|
|
||||||
This re-ranker uses OpenAI chat model to rerank the search results. You can use this re-ranker by passing `OpenAI()` to the `rerank()` method.
|
This reranker uses OpenAI chat model to rerank the search results. You can use this reranker by passing `OpenAI()` to the `rerank()` method.
|
||||||
!!! note
|
!!! note
|
||||||
Supported Query Types: Hybrid, Vector, FTS
|
Supported Query Types: Hybrid, Vector, FTS
|
||||||
|
|
||||||
!!! warning
|
!!! warning
|
||||||
This re-ranker is experimental. OpenAI doesn't have a dedicated reranking model, so we are using the chat model for reranking.
|
This reranker is experimental. OpenAI doesn't have a dedicated reranking model, so we are using the chat model for reranking.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import numpy
|
import numpy
|
||||||
@@ -47,7 +47,7 @@ Accepted Arguments
|
|||||||
| --- | --- | --- | --- |
|
| --- | --- | --- | --- |
|
||||||
| `model_name` | `str` | `"gpt-4-turbo-preview"` | The name of the reranker model to use.|
|
| `model_name` | `str` | `"gpt-4-turbo-preview"` | The name of the reranker model to use.|
|
||||||
| `column` | `str` | `"text"` | The name of the column to use as input to the cross encoder model. |
|
| `column` | `str` | `"text"` | The name of the column to use as input to the cross encoder model. |
|
||||||
| `return_score` | str | `"relevance"` | Options are "relevance" or "all". The type of score to return. If "relevance", will return only the `_relevance_score. If "all" is supported, will return relevance score along with the vector and/or fts scores depending on query type |
|
| `return_score` | str | `"relevance"` | Options are "relevance" or "all". The type of score to return. If "relevance", will return only the `_relevance_score. If "all" is supported, will return relevance score along with the vector and/or fts scores depending on query type. |
|
||||||
| `api_key` | str | `None` | The API key to use. If None, will use the OPENAI_API_KEY environment variable.
|
| `api_key` | str | `None` | The API key to use. If None, will use the OPENAI_API_KEY environment variable.
|
||||||
|
|
||||||
|
|
||||||
@@ -57,17 +57,17 @@ You can specify the type of scores you want the reranker to return. The followin
|
|||||||
### Hybrid Search
|
### Hybrid Search
|
||||||
|`return_score`| Status | Description |
|
|`return_score`| Status | Description |
|
||||||
| --- | --- | --- |
|
| --- | --- | --- |
|
||||||
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
|
| `relevance` | ✅ Supported | Results only have the `_relevance_score` column. |
|
||||||
| `all` | ❌ Not Supported | Returns have vector(`_distance`) and FTS(`score`) along with Hybrid Search score(`_relevance_score`) |
|
| `all` | ❌ Not Supported | Results have vector(`_distance`) and FTS(`score`) along with Hybrid Search score(`_relevance_score`). |
|
||||||
|
|
||||||
### Vector Search
|
### Vector Search
|
||||||
|`return_score`| Status | Description |
|
|`return_score`| Status | Description |
|
||||||
| --- | --- | --- |
|
| --- | --- | --- |
|
||||||
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
|
| `relevance` | ✅ Supported | Results only have the `_relevance_score` column. |
|
||||||
| `all` | ✅ Supported | Returns have vector(`_distance`) along with Hybrid Search score(`_relevance_score`) |
|
| `all` | ✅ Supported | Results have vector(`_distance`) along with Hybrid Search score(`_relevance_score`). |
|
||||||
|
|
||||||
### FTS Search
|
### FTS Search
|
||||||
|`return_score`| Status | Description |
|
|`return_score`| Status | Description |
|
||||||
| --- | --- | --- |
|
| --- | --- | --- |
|
||||||
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
|
| `relevance` | ✅ Supported | Results only have the `_relevance_score` column. |
|
||||||
| `all` | ✅ Supported | Returns have FTS(`score`) along with Hybrid Search score(`_relevance_score`) |
|
| `all` | ✅ Supported | Results have FTS(`score`) along with Hybrid Search score(`_relevance_score`). |
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
# Reciprocal Rank Fusion Reranker
|
# Reciprocal Rank Fusion Reranker
|
||||||
|
|
||||||
This is the default re-ranker used by LanceDB hybrid search. Reciprocal Rank Fusion (RRF) is an algorithm that evaluates the search scores by leveraging the positions/rank of the documents. The implementation follows this [paper](https://plg.uwaterloo.ca/~gvcormac/cormacksigir09-rrf.pdf).
|
This is the default reranker used by LanceDB hybrid search. Reciprocal Rank Fusion (RRF) is an algorithm that evaluates the search scores by leveraging the positions/rank of the documents. The implementation follows this [paper](https://plg.uwaterloo.ca/~gvcormac/cormacksigir09-rrf.pdf).
|
||||||
|
|
||||||
|
|
||||||
!!! note
|
!!! note
|
||||||
@@ -39,7 +39,7 @@ Accepted Arguments
|
|||||||
----------------
|
----------------
|
||||||
| Argument | Type | Default | Description |
|
| Argument | Type | Default | Description |
|
||||||
| --- | --- | --- | --- |
|
| --- | --- | --- | --- |
|
||||||
| `K` | `int` | `60` | A constant used in the RRF formula (default is 60). Experiments indicate that k = 60 was near-optimal, but that the choice is not critical |
|
| `K` | `int` | `60` | A constant used in the RRF formula (default is 60). Experiments indicate that k = 60 was near-optimal, but that the choice is not critical. |
|
||||||
| `return_score` | str | `"relevance"` | Options are "relevance" or "all". The type of score to return. If "relevance", will return only the `_relevance_score`. If "all", will return all scores from the vector and FTS search along with the relevance score. |
|
| `return_score` | str | `"relevance"` | Options are "relevance" or "all". The type of score to return. If "relevance", will return only the `_relevance_score`. If "all", will return all scores from the vector and FTS search along with the relevance score. |
|
||||||
|
|
||||||
|
|
||||||
@@ -49,5 +49,5 @@ You can specify the type of scores you want the reranker to return. The followin
|
|||||||
### Hybrid Search
|
### Hybrid Search
|
||||||
|`return_score`| Status | Description |
|
|`return_score`| Status | Description |
|
||||||
| --- | --- | --- |
|
| --- | --- | --- |
|
||||||
| `relevance` | ✅ Supported | Returned rows only have the `_relevance_score` column |
|
| `relevance` | ✅ Supported | Returned rows only have the `_relevance_score` column. |
|
||||||
| `all` | ✅ Supported | Returned rows have vector(`_distance`) and FTS(`score`) along with Hybrid Search score(`_relevance_score`) |
|
| `all` | ✅ Supported | Returned rows have vector(`_distance`) and FTS(`score`) along with Hybrid Search score(`_relevance_score`). |
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user