mirror of
https://github.com/lancedb/lancedb.git
synced 2025-12-23 13:29:57 +00:00
Compare commits
181 Commits
python-v0.
...
python-v0.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a3b45a4d00 | ||
|
|
c316c2f532 | ||
|
|
3966b16b63 | ||
|
|
5661cc15ac | ||
|
|
4e7220400f | ||
|
|
ae4928fe77 | ||
|
|
e80a405dee | ||
|
|
a53e19e386 | ||
|
|
c0097c5f0a | ||
|
|
c199708e64 | ||
|
|
4a47150ae7 | ||
|
|
f86b20a564 | ||
|
|
cc81f3e1a5 | ||
|
|
bc49c4db82 | ||
|
|
d2eec46f17 | ||
|
|
51437bc228 | ||
|
|
fa53cfcfd2 | ||
|
|
374fe0ad95 | ||
|
|
35e5b84ba9 | ||
|
|
7c12d497b0 | ||
|
|
dfe4ba8dad | ||
|
|
fa1b9ad5bd | ||
|
|
8877eb020d | ||
|
|
01e4291d21 | ||
|
|
ab3ea76ad1 | ||
|
|
728ef8657d | ||
|
|
0b13901a16 | ||
|
|
84b110e0ef | ||
|
|
e1836e54e3 | ||
|
|
4ba5326880 | ||
|
|
b036a69300 | ||
|
|
5b12a47119 | ||
|
|
769d483e50 | ||
|
|
9ecb11fe5a | ||
|
|
22bd8329f3 | ||
|
|
a736fad149 | ||
|
|
072adc41aa | ||
|
|
c6f25ef1f0 | ||
|
|
2f0c5baea2 | ||
|
|
a63dd66d41 | ||
|
|
d6b3ccb37b | ||
|
|
c4f99e82e5 | ||
|
|
979a2d3d9d | ||
|
|
7ac5f74c80 | ||
|
|
ecdee4d2b1 | ||
|
|
f391ed828a | ||
|
|
a99a450f2b | ||
|
|
6fa1f37506 | ||
|
|
544382df5e | ||
|
|
784f00ef6d | ||
|
|
96d7446f70 | ||
|
|
99ea78fb55 | ||
|
|
8eef4cdc28 | ||
|
|
0f102f02c3 | ||
|
|
a33a0670f6 | ||
|
|
14c9ff46d1 | ||
|
|
1865f7decf | ||
|
|
a608621476 | ||
|
|
00514999ff | ||
|
|
b3b597fef6 | ||
|
|
bf17144591 | ||
|
|
09e110525f | ||
|
|
40f0dbb64d | ||
|
|
3b19e96ae7 | ||
|
|
78a17ad54c | ||
|
|
a8e6b491e2 | ||
|
|
cea541ca46 | ||
|
|
873ffc1042 | ||
|
|
83273ad997 | ||
|
|
d18d63c69d | ||
|
|
c3e865e8d0 | ||
|
|
a7755cb313 | ||
|
|
3490f3456f | ||
|
|
0a1d0693e1 | ||
|
|
fd330b4b4b | ||
|
|
d4e9fc08e0 | ||
|
|
3626f2f5e1 | ||
|
|
e64712cfa5 | ||
|
|
3e3118f85c | ||
|
|
592598a333 | ||
|
|
5ad21341c9 | ||
|
|
6e08caa091 | ||
|
|
7e259d8b0f | ||
|
|
e84f747464 | ||
|
|
998cd43fe6 | ||
|
|
4bc7eebe61 | ||
|
|
2e3b34e79b | ||
|
|
e7574698eb | ||
|
|
801a9e5f6f | ||
|
|
4e5fbe6c99 | ||
|
|
1a449fa49e | ||
|
|
6bf742c759 | ||
|
|
ef3093bc23 | ||
|
|
16851389ea | ||
|
|
c269524b2f | ||
|
|
f6eef14313 | ||
|
|
32716adaa3 | ||
|
|
5e98b7f4c0 | ||
|
|
3f2589c11f | ||
|
|
e3b99694d6 | ||
|
|
9d42dc349c | ||
|
|
482f1ee1d3 | ||
|
|
2f39274a66 | ||
|
|
2fc174f532 | ||
|
|
dba85f4d6f | ||
|
|
555fa26147 | ||
|
|
e05c0cd87e | ||
|
|
25c17ebf4e | ||
|
|
87b12b57dc | ||
|
|
3dc9b71914 | ||
|
|
2622f34d1a | ||
|
|
a677a4b651 | ||
|
|
e6b4f14c1f | ||
|
|
15f8f4d627 | ||
|
|
6526d6c3b1 | ||
|
|
da4d7e3ca7 | ||
|
|
8fbadca9aa | ||
|
|
29120219cf | ||
|
|
a9897d9d85 | ||
|
|
acda7a4589 | ||
|
|
dac0857745 | ||
|
|
0a9e1eab75 | ||
|
|
d999d72c8d | ||
|
|
de4720993e | ||
|
|
6c14a307e2 | ||
|
|
43747278c8 | ||
|
|
e5f42a850e | ||
|
|
7920ecf66e | ||
|
|
28e1b70e4b | ||
|
|
52b79d2b1e | ||
|
|
c05d45150d | ||
|
|
48ed3bb544 | ||
|
|
bcfc93cc88 | ||
|
|
214d0debf5 | ||
|
|
f059372137 | ||
|
|
3dc1803c07 | ||
|
|
d0501f65f1 | ||
|
|
4703cc6894 | ||
|
|
493f9ce467 | ||
|
|
5c759505b8 | ||
|
|
bb6a39727e | ||
|
|
d57bed90e5 | ||
|
|
648327e90c | ||
|
|
6c7e81ee57 | ||
|
|
905e9d4738 | ||
|
|
38642e349c | ||
|
|
6879861ea8 | ||
|
|
88325e488e | ||
|
|
995bd9bf37 | ||
|
|
36cc06697f | ||
|
|
35da464591 | ||
|
|
31f9c30ffb | ||
|
|
92dcf24b0c | ||
|
|
6b0adba2d9 | ||
|
|
66cbf6b6c5 | ||
|
|
ce9506db71 | ||
|
|
b66cd943a7 | ||
|
|
d8d11f48e7 | ||
|
|
7ec5df3022 | ||
|
|
b17304172c | ||
|
|
fbe5408434 | ||
|
|
3f3f845c5a | ||
|
|
fbffe532a8 | ||
|
|
55ffc96e56 | ||
|
|
998c5f3f74 | ||
|
|
6eacae18c4 | ||
|
|
d3ea75cc2b | ||
|
|
f4afe456e8 | ||
|
|
ea5c2266b8 | ||
|
|
c557e77f09 | ||
|
|
3c0a64be8f | ||
|
|
0e496ed3b5 | ||
|
|
17c9e9afea | ||
|
|
0b45ef93c0 | ||
|
|
b474f98049 | ||
|
|
2c05ffed52 | ||
|
|
8b31540b21 | ||
|
|
ba844318f8 | ||
|
|
f007b76153 | ||
|
|
5d8d258f59 | ||
|
|
4172140f74 |
@@ -1,5 +1,5 @@
|
|||||||
[tool.bumpversion]
|
[tool.bumpversion]
|
||||||
current_version = "0.14.1"
|
current_version = "0.18.0"
|
||||||
parse = """(?x)
|
parse = """(?x)
|
||||||
(?P<major>0|[1-9]\\d*)\\.
|
(?P<major>0|[1-9]\\d*)\\.
|
||||||
(?P<minor>0|[1-9]\\d*)\\.
|
(?P<minor>0|[1-9]\\d*)\\.
|
||||||
|
|||||||
@@ -52,12 +52,7 @@ runs:
|
|||||||
args: ${{ inputs.args }}
|
args: ${{ inputs.args }}
|
||||||
before-script-linux: |
|
before-script-linux: |
|
||||||
set -e
|
set -e
|
||||||
apt install -y unzip
|
yum install -y openssl-devel clang \
|
||||||
if [ $(uname -m) = "x86_64" ]; then
|
&& curl -L https://github.com/protocolbuffers/protobuf/releases/download/v24.4/protoc-24.4-linux-aarch_64.zip > /tmp/protoc.zip \
|
||||||
PROTOC_ARCH="x86_64"
|
|
||||||
else
|
|
||||||
PROTOC_ARCH="aarch_64"
|
|
||||||
fi
|
|
||||||
curl -L https://github.com/protocolbuffers/protobuf/releases/download/v24.4/protoc-24.4-linux-$PROTOC_ARCH.zip > /tmp/protoc.zip \
|
|
||||||
&& unzip /tmp/protoc.zip -d /usr/local \
|
&& unzip /tmp/protoc.zip -d /usr/local \
|
||||||
&& rm /tmp/protoc.zip
|
&& rm /tmp/protoc.zip
|
||||||
|
|||||||
2
.github/workflows/build_mac_wheel/action.yml
vendored
2
.github/workflows/build_mac_wheel/action.yml
vendored
@@ -20,7 +20,7 @@ runs:
|
|||||||
uses: PyO3/maturin-action@v1
|
uses: PyO3/maturin-action@v1
|
||||||
with:
|
with:
|
||||||
command: build
|
command: build
|
||||||
|
# TODO: pass through interpreter
|
||||||
args: ${{ inputs.args }}
|
args: ${{ inputs.args }}
|
||||||
docker-options: "-e PIP_EXTRA_INDEX_URL=https://pypi.fury.io/lancedb/"
|
docker-options: "-e PIP_EXTRA_INDEX_URL=https://pypi.fury.io/lancedb/"
|
||||||
working-directory: python
|
working-directory: python
|
||||||
interpreter: 3.${{ inputs.python-minor-version }}
|
|
||||||
|
|||||||
@@ -28,7 +28,7 @@ runs:
|
|||||||
args: ${{ inputs.args }}
|
args: ${{ inputs.args }}
|
||||||
docker-options: "-e PIP_EXTRA_INDEX_URL=https://pypi.fury.io/lancedb/"
|
docker-options: "-e PIP_EXTRA_INDEX_URL=https://pypi.fury.io/lancedb/"
|
||||||
working-directory: python
|
working-directory: python
|
||||||
- uses: actions/upload-artifact@v3
|
- uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: windows-wheels
|
name: windows-wheels
|
||||||
path: python\target\wheels
|
path: python\target\wheels
|
||||||
|
|||||||
31
.github/workflows/license-header-check.yml
vendored
Normal file
31
.github/workflows/license-header-check.yml
vendored
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
name: Check license headers
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
pull_request:
|
||||||
|
paths:
|
||||||
|
- rust/**
|
||||||
|
- python/**
|
||||||
|
- nodejs/**
|
||||||
|
- java/**
|
||||||
|
- .github/workflows/license-header-check.yml
|
||||||
|
jobs:
|
||||||
|
check-licenses:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Check out code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- name: Install license-header-checker
|
||||||
|
working-directory: /tmp
|
||||||
|
run: |
|
||||||
|
curl -s https://raw.githubusercontent.com/lluissm/license-header-checker/master/install.sh | bash
|
||||||
|
mv /tmp/bin/license-header-checker /usr/local/bin/
|
||||||
|
- name: Check license headers (rust)
|
||||||
|
run: license-header-checker -a -v ./rust/license_header.txt ./ rs && [[ -z `git status -s` ]]
|
||||||
|
- name: Check license headers (python)
|
||||||
|
run: license-header-checker -a -v ./python/license_header.txt python py && [[ -z `git status -s` ]]
|
||||||
|
- name: Check license headers (typescript)
|
||||||
|
run: license-header-checker -a -v ./nodejs/license_header.txt nodejs ts && [[ -z `git status -s` ]]
|
||||||
|
- name: Check license headers (java)
|
||||||
|
run: license-header-checker -a -v ./nodejs/license_header.txt java java && [[ -z `git status -s` ]]
|
||||||
9
.github/workflows/make-release-commit.yml
vendored
9
.github/workflows/make-release-commit.yml
vendored
@@ -43,7 +43,7 @@ on:
|
|||||||
jobs:
|
jobs:
|
||||||
make-release:
|
make-release:
|
||||||
# Creates tag and GH release. The GH release will trigger the build and release jobs.
|
# Creates tag and GH release. The GH release will trigger the build and release jobs.
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-24.04
|
||||||
permissions:
|
permissions:
|
||||||
contents: write
|
contents: write
|
||||||
steps:
|
steps:
|
||||||
@@ -57,15 +57,14 @@ jobs:
|
|||||||
# trigger any workflows watching for new tags. See:
|
# trigger any workflows watching for new tags. See:
|
||||||
# https://docs.github.com/en/actions/using-workflows/triggering-a-workflow#triggering-a-workflow-from-a-workflow
|
# https://docs.github.com/en/actions/using-workflows/triggering-a-workflow#triggering-a-workflow-from-a-workflow
|
||||||
token: ${{ secrets.LANCEDB_RELEASE_TOKEN }}
|
token: ${{ secrets.LANCEDB_RELEASE_TOKEN }}
|
||||||
|
- name: Validate Lance dependency is at stable version
|
||||||
|
if: ${{ inputs.type == 'stable' }}
|
||||||
|
run: python ci/validate_stable_lance.py
|
||||||
- name: Set git configs for bumpversion
|
- name: Set git configs for bumpversion
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
git config user.name 'Lance Release'
|
git config user.name 'Lance Release'
|
||||||
git config user.email 'lance-dev@lancedb.com'
|
git config user.email 'lance-dev@lancedb.com'
|
||||||
- name: Set up Python 3.11
|
|
||||||
uses: actions/setup-python@v5
|
|
||||||
with:
|
|
||||||
python-version: "3.11"
|
|
||||||
- name: Bump Python version
|
- name: Bump Python version
|
||||||
if: ${{ inputs.python }}
|
if: ${{ inputs.python }}
|
||||||
working-directory: python
|
working-directory: python
|
||||||
|
|||||||
12
.github/workflows/nodejs.yml
vendored
12
.github/workflows/nodejs.yml
vendored
@@ -106,6 +106,18 @@ jobs:
|
|||||||
python ci/mock_openai.py &
|
python ci/mock_openai.py &
|
||||||
cd nodejs/examples
|
cd nodejs/examples
|
||||||
npm test
|
npm test
|
||||||
|
- name: Check docs
|
||||||
|
run: |
|
||||||
|
# We run this as part of the job because the binary needs to be built
|
||||||
|
# first to export the types of the native code.
|
||||||
|
set -e
|
||||||
|
npm ci
|
||||||
|
npm run docs
|
||||||
|
if ! git diff --exit-code; then
|
||||||
|
echo "Docs need to be updated"
|
||||||
|
echo "Run 'npm run docs', fix any warnings, and commit the changes."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
macos:
|
macos:
|
||||||
timeout-minutes: 30
|
timeout-minutes: 30
|
||||||
runs-on: "macos-14"
|
runs-on: "macos-14"
|
||||||
|
|||||||
188
.github/workflows/npm-publish.yml
vendored
188
.github/workflows/npm-publish.yml
vendored
@@ -334,51 +334,50 @@ jobs:
|
|||||||
path: |
|
path: |
|
||||||
node/dist/lancedb-vectordb-win32*.tgz
|
node/dist/lancedb-vectordb-win32*.tgz
|
||||||
|
|
||||||
# TODO: https://github.com/lancedb/lancedb/issues/1975
|
node-windows-arm64:
|
||||||
# node-windows-arm64:
|
name: vectordb ${{ matrix.config.arch }}-pc-windows-msvc
|
||||||
# name: vectordb ${{ matrix.config.arch }}-pc-windows-msvc
|
# if: startsWith(github.ref, 'refs/tags/v')
|
||||||
# # if: startsWith(github.ref, 'refs/tags/v')
|
runs-on: ubuntu-latest
|
||||||
# runs-on: ubuntu-latest
|
container: alpine:edge
|
||||||
# container: alpine:edge
|
strategy:
|
||||||
# strategy:
|
fail-fast: false
|
||||||
# fail-fast: false
|
matrix:
|
||||||
# matrix:
|
config:
|
||||||
# config:
|
# - arch: x86_64
|
||||||
# # - arch: x86_64
|
- arch: aarch64
|
||||||
# - arch: aarch64
|
steps:
|
||||||
# steps:
|
- name: Checkout
|
||||||
# - name: Checkout
|
uses: actions/checkout@v4
|
||||||
# uses: actions/checkout@v4
|
- name: Install dependencies
|
||||||
# - name: Install dependencies
|
run: |
|
||||||
# run: |
|
apk add protobuf-dev curl clang lld llvm19 grep npm bash msitools sed
|
||||||
# apk add protobuf-dev curl clang lld llvm19 grep npm bash msitools sed
|
curl --proto '=https' --tlsv1.3 -sSf https://raw.githubusercontent.com/rust-lang/rustup/refs/heads/master/rustup-init.sh | sh -s -- -y
|
||||||
# curl --proto '=https' --tlsv1.3 -sSf https://raw.githubusercontent.com/rust-lang/rustup/refs/heads/master/rustup-init.sh | sh -s -- -y
|
echo "source $HOME/.cargo/env" >> saved_env
|
||||||
# echo "source $HOME/.cargo/env" >> saved_env
|
echo "export CC=clang" >> saved_env
|
||||||
# echo "export CC=clang" >> saved_env
|
echo "export AR=llvm-ar" >> saved_env
|
||||||
# echo "export AR=llvm-ar" >> saved_env
|
source "$HOME/.cargo/env"
|
||||||
# source "$HOME/.cargo/env"
|
rustup target add ${{ matrix.config.arch }}-pc-windows-msvc
|
||||||
# rustup target add ${{ matrix.config.arch }}-pc-windows-msvc
|
(mkdir -p sysroot && cd sysroot && sh ../ci/sysroot-${{ matrix.config.arch }}-pc-windows-msvc.sh)
|
||||||
# (mkdir -p sysroot && cd sysroot && sh ../ci/sysroot-${{ matrix.config.arch }}-pc-windows-msvc.sh)
|
echo "export C_INCLUDE_PATH=/usr/${{ matrix.config.arch }}-pc-windows-msvc/usr/include" >> saved_env
|
||||||
# echo "export C_INCLUDE_PATH=/usr/${{ matrix.config.arch }}-pc-windows-msvc/usr/include" >> saved_env
|
echo "export CARGO_BUILD_TARGET=${{ matrix.config.arch }}-pc-windows-msvc" >> saved_env
|
||||||
# echo "export CARGO_BUILD_TARGET=${{ matrix.config.arch }}-pc-windows-msvc" >> saved_env
|
- name: Configure x86_64 build
|
||||||
# - name: Configure x86_64 build
|
if: ${{ matrix.config.arch == 'x86_64' }}
|
||||||
# if: ${{ matrix.config.arch == 'x86_64' }}
|
run: |
|
||||||
# run: |
|
echo "export RUSTFLAGS='-Ctarget-cpu=haswell -Ctarget-feature=+crt-static,+avx2,+fma,+f16c -Clinker=lld -Clink-arg=/LIBPATH:/usr/x86_64-pc-windows-msvc/usr/lib'" >> saved_env
|
||||||
# echo "export RUSTFLAGS='-Ctarget-cpu=haswell -Ctarget-feature=+crt-static,+avx2,+fma,+f16c -Clinker=lld -Clink-arg=/LIBPATH:/usr/x86_64-pc-windows-msvc/usr/lib'" >> saved_env
|
- name: Configure aarch64 build
|
||||||
# - name: Configure aarch64 build
|
if: ${{ matrix.config.arch == 'aarch64' }}
|
||||||
# if: ${{ matrix.config.arch == 'aarch64' }}
|
run: |
|
||||||
# run: |
|
echo "export RUSTFLAGS='-Ctarget-feature=+crt-static,+neon,+fp16,+fhm,+dotprod -Clinker=lld -Clink-arg=/LIBPATH:/usr/aarch64-pc-windows-msvc/usr/lib -Clink-arg=arm64rt.lib'" >> saved_env
|
||||||
# echo "export RUSTFLAGS='-Ctarget-feature=+crt-static,+neon,+fp16,+fhm,+dotprod -Clinker=lld -Clink-arg=/LIBPATH:/usr/aarch64-pc-windows-msvc/usr/lib -Clink-arg=arm64rt.lib'" >> saved_env
|
- name: Build Windows Artifacts
|
||||||
# - name: Build Windows Artifacts
|
run: |
|
||||||
# run: |
|
source ./saved_env
|
||||||
# source ./saved_env
|
bash ci/manylinux_node/build_vectordb.sh ${{ matrix.config.arch }} ${{ matrix.config.arch }}-pc-windows-msvc
|
||||||
# bash ci/manylinux_node/build_vectordb.sh ${{ matrix.config.arch }} ${{ matrix.config.arch }}-pc-windows-msvc
|
- name: Upload Windows Artifacts
|
||||||
# - name: Upload Windows Artifacts
|
uses: actions/upload-artifact@v4
|
||||||
# uses: actions/upload-artifact@v4
|
with:
|
||||||
# with:
|
name: node-native-windows-${{ matrix.config.arch }}
|
||||||
# name: node-native-windows-${{ matrix.config.arch }}
|
path: |
|
||||||
# path: |
|
node/dist/lancedb-vectordb-win32*.tgz
|
||||||
# node/dist/lancedb-vectordb-win32*.tgz
|
|
||||||
|
|
||||||
nodejs-windows:
|
nodejs-windows:
|
||||||
name: lancedb ${{ matrix.target }}
|
name: lancedb ${{ matrix.target }}
|
||||||
@@ -414,58 +413,57 @@ jobs:
|
|||||||
path: |
|
path: |
|
||||||
nodejs/dist/*.node
|
nodejs/dist/*.node
|
||||||
|
|
||||||
# TODO: https://github.com/lancedb/lancedb/issues/1975
|
nodejs-windows-arm64:
|
||||||
# nodejs-windows-arm64:
|
name: lancedb ${{ matrix.config.arch }}-pc-windows-msvc
|
||||||
# name: lancedb ${{ matrix.config.arch }}-pc-windows-msvc
|
# Only runs on tags that matches the make-release action
|
||||||
# # Only runs on tags that matches the make-release action
|
# if: startsWith(github.ref, 'refs/tags/v')
|
||||||
# # if: startsWith(github.ref, 'refs/tags/v')
|
runs-on: ubuntu-latest
|
||||||
# runs-on: ubuntu-latest
|
container: alpine:edge
|
||||||
# container: alpine:edge
|
strategy:
|
||||||
# strategy:
|
fail-fast: false
|
||||||
# fail-fast: false
|
matrix:
|
||||||
# matrix:
|
config:
|
||||||
# config:
|
# - arch: x86_64
|
||||||
# # - arch: x86_64
|
- arch: aarch64
|
||||||
# - arch: aarch64
|
steps:
|
||||||
# steps:
|
- name: Checkout
|
||||||
# - name: Checkout
|
uses: actions/checkout@v4
|
||||||
# uses: actions/checkout@v4
|
- name: Install dependencies
|
||||||
# - name: Install dependencies
|
run: |
|
||||||
# run: |
|
apk add protobuf-dev curl clang lld llvm19 grep npm bash msitools sed
|
||||||
# apk add protobuf-dev curl clang lld llvm19 grep npm bash msitools sed
|
curl --proto '=https' --tlsv1.3 -sSf https://raw.githubusercontent.com/rust-lang/rustup/refs/heads/master/rustup-init.sh | sh -s -- -y
|
||||||
# curl --proto '=https' --tlsv1.3 -sSf https://raw.githubusercontent.com/rust-lang/rustup/refs/heads/master/rustup-init.sh | sh -s -- -y
|
echo "source $HOME/.cargo/env" >> saved_env
|
||||||
# echo "source $HOME/.cargo/env" >> saved_env
|
echo "export CC=clang" >> saved_env
|
||||||
# echo "export CC=clang" >> saved_env
|
echo "export AR=llvm-ar" >> saved_env
|
||||||
# echo "export AR=llvm-ar" >> saved_env
|
source "$HOME/.cargo/env"
|
||||||
# source "$HOME/.cargo/env"
|
rustup target add ${{ matrix.config.arch }}-pc-windows-msvc
|
||||||
# rustup target add ${{ matrix.config.arch }}-pc-windows-msvc
|
(mkdir -p sysroot && cd sysroot && sh ../ci/sysroot-${{ matrix.config.arch }}-pc-windows-msvc.sh)
|
||||||
# (mkdir -p sysroot && cd sysroot && sh ../ci/sysroot-${{ matrix.config.arch }}-pc-windows-msvc.sh)
|
echo "export C_INCLUDE_PATH=/usr/${{ matrix.config.arch }}-pc-windows-msvc/usr/include" >> saved_env
|
||||||
# echo "export C_INCLUDE_PATH=/usr/${{ matrix.config.arch }}-pc-windows-msvc/usr/include" >> saved_env
|
echo "export CARGO_BUILD_TARGET=${{ matrix.config.arch }}-pc-windows-msvc" >> saved_env
|
||||||
# echo "export CARGO_BUILD_TARGET=${{ matrix.config.arch }}-pc-windows-msvc" >> saved_env
|
printf '#!/bin/sh\ncargo "$@"' > $HOME/.cargo/bin/cargo-xwin
|
||||||
# printf '#!/bin/sh\ncargo "$@"' > $HOME/.cargo/bin/cargo-xwin
|
chmod u+x $HOME/.cargo/bin/cargo-xwin
|
||||||
# chmod u+x $HOME/.cargo/bin/cargo-xwin
|
- name: Configure x86_64 build
|
||||||
# - name: Configure x86_64 build
|
if: ${{ matrix.config.arch == 'x86_64' }}
|
||||||
# if: ${{ matrix.config.arch == 'x86_64' }}
|
run: |
|
||||||
# run: |
|
echo "export RUSTFLAGS='-Ctarget-cpu=haswell -Ctarget-feature=+crt-static,+avx2,+fma,+f16c -Clinker=lld -Clink-arg=/LIBPATH:/usr/x86_64-pc-windows-msvc/usr/lib'" >> saved_env
|
||||||
# echo "export RUSTFLAGS='-Ctarget-cpu=haswell -Ctarget-feature=+crt-static,+avx2,+fma,+f16c -Clinker=lld -Clink-arg=/LIBPATH:/usr/x86_64-pc-windows-msvc/usr/lib'" >> saved_env
|
- name: Configure aarch64 build
|
||||||
# - name: Configure aarch64 build
|
if: ${{ matrix.config.arch == 'aarch64' }}
|
||||||
# if: ${{ matrix.config.arch == 'aarch64' }}
|
run: |
|
||||||
# run: |
|
echo "export RUSTFLAGS='-Ctarget-feature=+crt-static,+neon,+fp16,+fhm,+dotprod -Clinker=lld -Clink-arg=/LIBPATH:/usr/aarch64-pc-windows-msvc/usr/lib -Clink-arg=arm64rt.lib'" >> saved_env
|
||||||
# echo "export RUSTFLAGS='-Ctarget-feature=+crt-static,+neon,+fp16,+fhm,+dotprod -Clinker=lld -Clink-arg=/LIBPATH:/usr/aarch64-pc-windows-msvc/usr/lib -Clink-arg=arm64rt.lib'" >> saved_env
|
- name: Build Windows Artifacts
|
||||||
# - name: Build Windows Artifacts
|
run: |
|
||||||
# run: |
|
source ./saved_env
|
||||||
# source ./saved_env
|
bash ci/manylinux_node/build_lancedb.sh ${{ matrix.config.arch }}
|
||||||
# bash ci/manylinux_node/build_lancedb.sh ${{ matrix.config.arch }}
|
- name: Upload Windows Artifacts
|
||||||
# - name: Upload Windows Artifacts
|
uses: actions/upload-artifact@v4
|
||||||
# uses: actions/upload-artifact@v4
|
with:
|
||||||
# with:
|
name: nodejs-native-windows-${{ matrix.config.arch }}
|
||||||
# name: nodejs-native-windows-${{ matrix.config.arch }}
|
path: |
|
||||||
# path: |
|
nodejs/dist/*.node
|
||||||
# nodejs/dist/*.node
|
|
||||||
|
|
||||||
release:
|
release:
|
||||||
name: vectordb NPM Publish
|
name: vectordb NPM Publish
|
||||||
needs: [node, node-macos, node-linux-gnu, node-linux-musl, node-windows]
|
needs: [node, node-macos, node-linux-gnu, node-linux-musl, node-windows, node-windows-arm64]
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
# Only runs on tags that matches the make-release action
|
# Only runs on tags that matches the make-release action
|
||||||
if: startsWith(github.ref, 'refs/tags/v')
|
if: startsWith(github.ref, 'refs/tags/v')
|
||||||
@@ -505,7 +503,7 @@ jobs:
|
|||||||
|
|
||||||
release-nodejs:
|
release-nodejs:
|
||||||
name: lancedb NPM Publish
|
name: lancedb NPM Publish
|
||||||
needs: [nodejs-macos, nodejs-linux-gnu, nodejs-linux-musl, nodejs-windows]
|
needs: [nodejs-macos, nodejs-linux-gnu, nodejs-linux-musl, nodejs-windows, nodejs-windows-arm64]
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
# Only runs on tags that matches the make-release action
|
# Only runs on tags that matches the make-release action
|
||||||
if: startsWith(github.ref, 'refs/tags/v')
|
if: startsWith(github.ref, 'refs/tags/v')
|
||||||
|
|||||||
14
.github/workflows/pypi-publish.yml
vendored
14
.github/workflows/pypi-publish.yml
vendored
@@ -15,15 +15,21 @@ jobs:
|
|||||||
- platform: x86_64
|
- platform: x86_64
|
||||||
manylinux: "2_17"
|
manylinux: "2_17"
|
||||||
extra_args: ""
|
extra_args: ""
|
||||||
|
runner: ubuntu-22.04
|
||||||
- platform: x86_64
|
- platform: x86_64
|
||||||
manylinux: "2_28"
|
manylinux: "2_28"
|
||||||
extra_args: "--features fp16kernels"
|
extra_args: "--features fp16kernels"
|
||||||
|
runner: ubuntu-22.04
|
||||||
- platform: aarch64
|
- platform: aarch64
|
||||||
manylinux: "2_24"
|
manylinux: "2_17"
|
||||||
extra_args: ""
|
extra_args: ""
|
||||||
# We don't build fp16 kernels for aarch64, because it uses
|
# For successful fat LTO builds, we need a large runner to avoid OOM errors.
|
||||||
# cross compilation image, which doesn't have a new enough compiler.
|
runner: ubuntu-2404-8x-arm64
|
||||||
runs-on: "ubuntu-22.04"
|
- platform: aarch64
|
||||||
|
manylinux: "2_28"
|
||||||
|
extra_args: "--features fp16kernels"
|
||||||
|
runner: ubuntu-2404-8x-arm64
|
||||||
|
runs-on: ${{ matrix.config.runner }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
|
|||||||
48
.github/workflows/python.yml
vendored
48
.github/workflows/python.yml
vendored
@@ -33,13 +33,14 @@ jobs:
|
|||||||
python-version: "3.12"
|
python-version: "3.12"
|
||||||
- name: Install ruff
|
- name: Install ruff
|
||||||
run: |
|
run: |
|
||||||
pip install ruff==0.8.4
|
pip install ruff==0.9.9
|
||||||
- name: Format check
|
- name: Format check
|
||||||
run: ruff format --check .
|
run: ruff format --check .
|
||||||
- name: Lint
|
- name: Lint
|
||||||
run: ruff check .
|
run: ruff check .
|
||||||
doctest:
|
|
||||||
name: "Doctest"
|
type-check:
|
||||||
|
name: "Type Check"
|
||||||
timeout-minutes: 30
|
timeout-minutes: 30
|
||||||
runs-on: "ubuntu-22.04"
|
runs-on: "ubuntu-22.04"
|
||||||
defaults:
|
defaults:
|
||||||
@@ -54,7 +55,36 @@ jobs:
|
|||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
uses: actions/setup-python@v5
|
uses: actions/setup-python@v5
|
||||||
with:
|
with:
|
||||||
python-version: "3.11"
|
python-version: "3.12"
|
||||||
|
- name: Install protobuf compiler
|
||||||
|
run: |
|
||||||
|
sudo apt update
|
||||||
|
sudo apt install -y protobuf-compiler
|
||||||
|
pip install toml
|
||||||
|
- name: Install dependencies
|
||||||
|
run: |
|
||||||
|
python ../ci/parse_requirements.py pyproject.toml --extras dev,tests,embeddings > requirements.txt
|
||||||
|
pip install -r requirements.txt
|
||||||
|
- name: Run pyright
|
||||||
|
run: pyright
|
||||||
|
|
||||||
|
doctest:
|
||||||
|
name: "Doctest"
|
||||||
|
timeout-minutes: 30
|
||||||
|
runs-on: "ubuntu-24.04"
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
shell: bash
|
||||||
|
working-directory: python
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
lfs: true
|
||||||
|
- name: Set up Python
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: "3.12"
|
||||||
cache: "pip"
|
cache: "pip"
|
||||||
- name: Install protobuf
|
- name: Install protobuf
|
||||||
run: |
|
run: |
|
||||||
@@ -75,8 +105,8 @@ jobs:
|
|||||||
timeout-minutes: 30
|
timeout-minutes: 30
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
python-minor-version: ["9", "11"]
|
python-minor-version: ["9", "12"]
|
||||||
runs-on: "ubuntu-22.04"
|
runs-on: "ubuntu-24.04"
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
shell: bash
|
shell: bash
|
||||||
@@ -127,7 +157,7 @@ jobs:
|
|||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
uses: actions/setup-python@v5
|
uses: actions/setup-python@v5
|
||||||
with:
|
with:
|
||||||
python-version: "3.11"
|
python-version: "3.12"
|
||||||
- uses: Swatinem/rust-cache@v2
|
- uses: Swatinem/rust-cache@v2
|
||||||
with:
|
with:
|
||||||
workspaces: python
|
workspaces: python
|
||||||
@@ -157,7 +187,7 @@ jobs:
|
|||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
uses: actions/setup-python@v5
|
uses: actions/setup-python@v5
|
||||||
with:
|
with:
|
||||||
python-version: "3.11"
|
python-version: "3.12"
|
||||||
- uses: Swatinem/rust-cache@v2
|
- uses: Swatinem/rust-cache@v2
|
||||||
with:
|
with:
|
||||||
workspaces: python
|
workspaces: python
|
||||||
@@ -168,7 +198,7 @@ jobs:
|
|||||||
run: rm -rf target/wheels
|
run: rm -rf target/wheels
|
||||||
pydantic1x:
|
pydantic1x:
|
||||||
timeout-minutes: 30
|
timeout-minutes: 30
|
||||||
runs-on: "ubuntu-22.04"
|
runs-on: "ubuntu-24.04"
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
shell: bash
|
shell: bash
|
||||||
|
|||||||
91
.github/workflows/rust.yml
vendored
91
.github/workflows/rust.yml
vendored
@@ -22,6 +22,7 @@ env:
|
|||||||
# "1" means line tables only, which is useful for panic tracebacks.
|
# "1" means line tables only, which is useful for panic tracebacks.
|
||||||
RUSTFLAGS: "-C debuginfo=1"
|
RUSTFLAGS: "-C debuginfo=1"
|
||||||
RUST_BACKTRACE: "1"
|
RUST_BACKTRACE: "1"
|
||||||
|
CARGO_INCREMENTAL: 0
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
lint:
|
lint:
|
||||||
@@ -51,6 +52,33 @@ jobs:
|
|||||||
- name: Run clippy
|
- name: Run clippy
|
||||||
run: cargo clippy --workspace --tests --all-features -- -D warnings
|
run: cargo clippy --workspace --tests --all-features -- -D warnings
|
||||||
|
|
||||||
|
build-no-lock:
|
||||||
|
runs-on: ubuntu-24.04
|
||||||
|
timeout-minutes: 30
|
||||||
|
env:
|
||||||
|
# Need up-to-date compilers for kernels
|
||||||
|
CC: clang
|
||||||
|
CXX: clang++
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
# Building without a lock file often requires the latest Rust version since downstream
|
||||||
|
# dependencies may have updated their minimum Rust version.
|
||||||
|
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||||
|
with:
|
||||||
|
toolchain: "stable"
|
||||||
|
# Remove cargo.lock to force a fresh build
|
||||||
|
- name: Remove Cargo.lock
|
||||||
|
run: rm -f Cargo.lock
|
||||||
|
- uses: rui314/setup-mold@v1
|
||||||
|
- uses: Swatinem/rust-cache@v2
|
||||||
|
- name: Install dependencies
|
||||||
|
run: |
|
||||||
|
sudo apt update
|
||||||
|
sudo apt install -y protobuf-compiler libssl-dev
|
||||||
|
- name: Build all
|
||||||
|
run: |
|
||||||
|
cargo build --benches --all-features --tests
|
||||||
|
|
||||||
linux:
|
linux:
|
||||||
timeout-minutes: 30
|
timeout-minutes: 30
|
||||||
# To build all features, we need more disk space than is available
|
# To build all features, we need more disk space than is available
|
||||||
@@ -75,8 +103,11 @@ jobs:
|
|||||||
workspaces: rust
|
workspaces: rust
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
sudo apt update
|
# This shaves 2 minutes off this step in CI. This doesn't seem to be
|
||||||
|
# necessary in standard runners, but it is in the 4x runners.
|
||||||
|
sudo rm /var/lib/man-db/auto-update
|
||||||
sudo apt install -y protobuf-compiler libssl-dev
|
sudo apt install -y protobuf-compiler libssl-dev
|
||||||
|
- uses: rui314/setup-mold@v1
|
||||||
- name: Make Swap
|
- name: Make Swap
|
||||||
run: |
|
run: |
|
||||||
sudo fallocate -l 16G /swapfile
|
sudo fallocate -l 16G /swapfile
|
||||||
@@ -87,11 +118,11 @@ jobs:
|
|||||||
working-directory: .
|
working-directory: .
|
||||||
run: docker compose up --detach --wait
|
run: docker compose up --detach --wait
|
||||||
- name: Build
|
- name: Build
|
||||||
run: cargo build --all-features
|
run: cargo build --all-features --tests --locked --examples
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
run: cargo test --all-features
|
run: cargo test --all-features --locked
|
||||||
- name: Run examples
|
- name: Run examples
|
||||||
run: cargo run --example simple
|
run: cargo run --example simple --locked
|
||||||
|
|
||||||
macos:
|
macos:
|
||||||
timeout-minutes: 30
|
timeout-minutes: 30
|
||||||
@@ -115,11 +146,14 @@ jobs:
|
|||||||
workspaces: rust
|
workspaces: rust
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: brew install protobuf
|
run: brew install protobuf
|
||||||
- name: Build
|
|
||||||
run: cargo build --all-features
|
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
# Run with everything except the integration tests.
|
run: |
|
||||||
run: cargo test --features remote,fp16kernels
|
# Don't run the s3 integration tests since docker isn't available
|
||||||
|
# on this image.
|
||||||
|
ALL_FEATURES=`cargo metadata --format-version=1 --no-deps \
|
||||||
|
| jq -r '.packages[] | .features | keys | .[]' \
|
||||||
|
| grep -v s3-test | sort | uniq | paste -s -d "," -`
|
||||||
|
cargo test --features $ALL_FEATURES --locked
|
||||||
|
|
||||||
windows:
|
windows:
|
||||||
runs-on: windows-2022
|
runs-on: windows-2022
|
||||||
@@ -140,8 +174,40 @@ jobs:
|
|||||||
- name: Run tests
|
- name: Run tests
|
||||||
run: |
|
run: |
|
||||||
$env:VCPKG_ROOT = $env:VCPKG_INSTALLATION_ROOT
|
$env:VCPKG_ROOT = $env:VCPKG_INSTALLATION_ROOT
|
||||||
cargo build
|
cargo test --features remote --locked
|
||||||
cargo test
|
|
||||||
|
windows-arm64-cross:
|
||||||
|
# We cross compile in Node releases, so we want to make sure
|
||||||
|
# this can run successfully.
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
container: alpine:edge
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- name: Install dependencies (part 1)
|
||||||
|
run: |
|
||||||
|
set -e
|
||||||
|
apk add protobuf-dev curl clang lld llvm19 grep npm bash msitools sed
|
||||||
|
- name: Install rust
|
||||||
|
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||||
|
with:
|
||||||
|
target: aarch64-pc-windows-msvc
|
||||||
|
- name: Install dependencies (part 2)
|
||||||
|
run: |
|
||||||
|
set -e
|
||||||
|
mkdir -p sysroot
|
||||||
|
cd sysroot
|
||||||
|
sh ../ci/sysroot-aarch64-pc-windows-msvc.sh
|
||||||
|
- name: Check
|
||||||
|
env:
|
||||||
|
CC: clang
|
||||||
|
AR: llvm-ar
|
||||||
|
C_INCLUDE_PATH: /usr/aarch64-pc-windows-msvc/usr/include
|
||||||
|
CARGO_BUILD_TARGET: aarch64-pc-windows-msvc
|
||||||
|
RUSTFLAGS: -Ctarget-feature=+crt-static,+neon,+fp16,+fhm,+dotprod -Clinker=lld -Clink-arg=/LIBPATH:/usr/aarch64-pc-windows-msvc/usr/lib -Clink-arg=arm64rt.lib
|
||||||
|
run: |
|
||||||
|
source $HOME/.cargo/env
|
||||||
|
cargo check --features remote --locked
|
||||||
|
|
||||||
windows-arm64:
|
windows-arm64:
|
||||||
runs-on: windows-4x-arm
|
runs-on: windows-4x-arm
|
||||||
@@ -200,7 +266,7 @@ jobs:
|
|||||||
- name: Install Rust
|
- name: Install Rust
|
||||||
run: |
|
run: |
|
||||||
Invoke-WebRequest https://win.rustup.rs/x86_64 -OutFile rustup-init.exe
|
Invoke-WebRequest https://win.rustup.rs/x86_64 -OutFile rustup-init.exe
|
||||||
.\rustup-init.exe -y --default-host aarch64-pc-windows-msvc
|
.\rustup-init.exe -y --default-host aarch64-pc-windows-msvc --default-toolchain 1.83.0
|
||||||
shell: powershell
|
shell: powershell
|
||||||
- name: Add Rust to PATH
|
- name: Add Rust to PATH
|
||||||
run: |
|
run: |
|
||||||
@@ -236,8 +302,7 @@ jobs:
|
|||||||
- name: Run tests
|
- name: Run tests
|
||||||
run: |
|
run: |
|
||||||
$env:VCPKG_ROOT = $env:VCPKG_INSTALLATION_ROOT
|
$env:VCPKG_ROOT = $env:VCPKG_INSTALLATION_ROOT
|
||||||
cargo build --target aarch64-pc-windows-msvc
|
cargo test --target aarch64-pc-windows-msvc --features remote --locked
|
||||||
cargo test --target aarch64-pc-windows-msvc
|
|
||||||
|
|
||||||
msrv:
|
msrv:
|
||||||
# Check the minimum supported Rust version
|
# Check the minimum supported Rust version
|
||||||
|
|||||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -9,7 +9,6 @@ venv
|
|||||||
.vscode
|
.vscode
|
||||||
.zed
|
.zed
|
||||||
rust/target
|
rust/target
|
||||||
rust/Cargo.lock
|
|
||||||
|
|
||||||
site
|
site
|
||||||
|
|
||||||
@@ -42,5 +41,3 @@ dist
|
|||||||
target
|
target
|
||||||
|
|
||||||
**/sccache.log
|
**/sccache.log
|
||||||
|
|
||||||
Cargo.lock
|
|
||||||
|
|||||||
@@ -7,9 +7,15 @@ repos:
|
|||||||
- id: trailing-whitespace
|
- id: trailing-whitespace
|
||||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||||
# Ruff version.
|
# Ruff version.
|
||||||
rev: v0.2.2
|
rev: v0.9.9
|
||||||
hooks:
|
hooks:
|
||||||
- id: ruff
|
- id: ruff
|
||||||
|
# - repo: https://github.com/RobertCraigie/pyright-python
|
||||||
|
# rev: v1.1.395
|
||||||
|
# hooks:
|
||||||
|
# - id: pyright
|
||||||
|
# args: ["--project", "python"]
|
||||||
|
# additional_dependencies: [pyarrow-stubs]
|
||||||
- repo: local
|
- repo: local
|
||||||
hooks:
|
hooks:
|
||||||
- id: local-biome-check
|
- id: local-biome-check
|
||||||
|
|||||||
78
CONTRIBUTING.md
Normal file
78
CONTRIBUTING.md
Normal file
@@ -0,0 +1,78 @@
|
|||||||
|
# Contributing to LanceDB
|
||||||
|
|
||||||
|
LanceDB is an open-source project and we welcome contributions from the community.
|
||||||
|
This document outlines the process for contributing to LanceDB.
|
||||||
|
|
||||||
|
## Reporting Issues
|
||||||
|
|
||||||
|
If you encounter a bug or have a feature request, please open an issue on the
|
||||||
|
[GitHub issue tracker](https://github.com/lancedb/lancedb).
|
||||||
|
|
||||||
|
## Picking an issue
|
||||||
|
|
||||||
|
We track issues on the GitHub issue tracker. If you are looking for something to
|
||||||
|
work on, check the [good first issue](https://github.com/lancedb/lancedb/contribute) label. These issues are typically the best described and have the smallest scope.
|
||||||
|
|
||||||
|
If there's an issue you are interested in working on, please leave a comment on the issue. This will help us avoid duplicate work. Additionally, if you have questions about the issue, please ask them in the issue comments. We are happy to provide guidance on how to approach the issue.
|
||||||
|
|
||||||
|
## Configuring Git
|
||||||
|
|
||||||
|
First, fork the repository on GitHub, then clone your fork:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git clone https://github.com/<username>/lancedb.git
|
||||||
|
cd lancedb
|
||||||
|
```
|
||||||
|
|
||||||
|
Then add the main repository as a remote:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git remote add upstream https://github.com/lancedb/lancedb.git
|
||||||
|
git fetch upstream
|
||||||
|
```
|
||||||
|
|
||||||
|
## Setting up your development environment
|
||||||
|
|
||||||
|
We have development environments for Python, Typescript, and Java. Each environment has its own setup instructions.
|
||||||
|
|
||||||
|
* [Python](python/CONTRIBUTING.md)
|
||||||
|
* [Typescript](nodejs/CONTRIBUTING.md)
|
||||||
|
<!-- TODO: add Java contributing guide -->
|
||||||
|
* [Documentation](docs/README.md)
|
||||||
|
|
||||||
|
|
||||||
|
## Best practices for pull requests
|
||||||
|
|
||||||
|
For the best chance of having your pull request accepted, please follow these guidelines:
|
||||||
|
|
||||||
|
1. Unit test all bug fixes and new features. Your code will not be merged if it
|
||||||
|
doesn't have tests.
|
||||||
|
1. If you change the public API, update the documentation in the `docs` directory.
|
||||||
|
1. Aim to minimize the number of changes in each pull request. Keep to solving
|
||||||
|
one problem at a time, when possible.
|
||||||
|
1. Before marking a pull request ready-for-review, do a self review of your code.
|
||||||
|
Is it clear why you are making the changes? Are the changes easy to understand?
|
||||||
|
1. Use [conventional commit messages](https://www.conventionalcommits.org/en/) as pull request titles. Examples:
|
||||||
|
* New feature: `feat: adding foo API`
|
||||||
|
* Bug fix: `fix: issue with foo API`
|
||||||
|
* Documentation change: `docs: adding foo API documentation`
|
||||||
|
1. If your pull request is a work in progress, leave the pull request as a draft.
|
||||||
|
We will assume the pull request is ready for review when it is opened.
|
||||||
|
1. When writing tests, test the error cases. Make sure they have understandable
|
||||||
|
error messages.
|
||||||
|
|
||||||
|
## Project structure
|
||||||
|
|
||||||
|
The core library is written in Rust. The Python, Typescript, and Java libraries
|
||||||
|
are wrappers around the Rust library.
|
||||||
|
|
||||||
|
* `src/lancedb`: Rust library source code
|
||||||
|
* `python`: Python package source code
|
||||||
|
* `nodejs`: Typescript package source code
|
||||||
|
* `node`: **Deprecated** Typescript package source code
|
||||||
|
* `java`: Java package source code
|
||||||
|
* `docs`: Documentation source code
|
||||||
|
|
||||||
|
## Release process
|
||||||
|
|
||||||
|
For information on the release process, see: [release_process.md](release_process.md)
|
||||||
8202
Cargo.lock
generated
Normal file
8202
Cargo.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
61
Cargo.toml
61
Cargo.toml
@@ -21,41 +21,52 @@ categories = ["database-implementations"]
|
|||||||
rust-version = "1.78.0"
|
rust-version = "1.78.0"
|
||||||
|
|
||||||
[workspace.dependencies]
|
[workspace.dependencies]
|
||||||
lance = { "version" = "=0.21.1", "features" = [
|
lance = { "version" = "=0.24.1", "features" = ["dynamodb"] }
|
||||||
"dynamodb",
|
lance-io = { version = "=0.24.1" }
|
||||||
], git = "https://github.com/lancedb/lance.git", tag = "v0.21.1-beta.2" }
|
lance-index = { version = "=0.24.1" }
|
||||||
lance-io = { version = "=0.21.1", git = "https://github.com/lancedb/lance.git", tag = "v0.21.1-beta.2" }
|
lance-linalg = { version = "=0.24.1" }
|
||||||
lance-index = { version = "=0.21.1", git = "https://github.com/lancedb/lance.git", tag = "v0.21.1-beta.2" }
|
lance-table = { version = "=0.24.1" }
|
||||||
lance-linalg = { version = "=0.21.1", git = "https://github.com/lancedb/lance.git", tag = "v0.21.1-beta.2" }
|
lance-testing = { version = "=0.24.1" }
|
||||||
lance-table = { version = "=0.21.1", git = "https://github.com/lancedb/lance.git", tag = "v0.21.1-beta.2" }
|
lance-datafusion = { version = "=0.24.1" }
|
||||||
lance-testing = { version = "=0.21.1", git = "https://github.com/lancedb/lance.git", tag = "v0.21.1-beta.2" }
|
lance-encoding = { version = "=0.24.1" }
|
||||||
lance-datafusion = { version = "=0.21.1", git = "https://github.com/lancedb/lance.git", tag = "v0.21.1-beta.2" }
|
|
||||||
lance-encoding = { version = "=0.21.1", git = "https://github.com/lancedb/lance.git", tag = "v0.21.1-beta.2" }
|
|
||||||
# Note that this one does not include pyarrow
|
# Note that this one does not include pyarrow
|
||||||
arrow = { version = "53.2", optional = false }
|
arrow = { version = "54.1", optional = false }
|
||||||
arrow-array = "53.2"
|
arrow-array = "54.1"
|
||||||
arrow-data = "53.2"
|
arrow-data = "54.1"
|
||||||
arrow-ipc = "53.2"
|
arrow-ipc = "54.1"
|
||||||
arrow-ord = "53.2"
|
arrow-ord = "54.1"
|
||||||
arrow-schema = "53.2"
|
arrow-schema = "54.1"
|
||||||
arrow-arith = "53.2"
|
arrow-arith = "54.1"
|
||||||
arrow-cast = "53.2"
|
arrow-cast = "54.1"
|
||||||
async-trait = "0"
|
async-trait = "0"
|
||||||
chrono = "0.4.35"
|
datafusion = { version = "45.0", default-features = false }
|
||||||
datafusion-common = "42.0"
|
datafusion-catalog = "45.0"
|
||||||
datafusion-physical-plan = "42.0"
|
datafusion-common = { version = "45.0", default-features = false }
|
||||||
env_logger = "0.10"
|
datafusion-execution = "45.0"
|
||||||
|
datafusion-expr = "45.0"
|
||||||
|
datafusion-physical-plan = "45.0"
|
||||||
|
env_logger = "0.11"
|
||||||
half = { "version" = "=2.4.1", default-features = false, features = [
|
half = { "version" = "=2.4.1", default-features = false, features = [
|
||||||
"num-traits",
|
"num-traits",
|
||||||
] }
|
] }
|
||||||
futures = "0"
|
futures = "0"
|
||||||
log = "0.4"
|
log = "0.4"
|
||||||
moka = { version = "0.11", features = ["future"] }
|
moka = { version = "0.12", features = ["future"] }
|
||||||
object_store = "0.10.2"
|
object_store = "0.11.0"
|
||||||
pin-project = "1.0.7"
|
pin-project = "1.0.7"
|
||||||
snafu = "0.7.4"
|
snafu = "0.8"
|
||||||
url = "2"
|
url = "2"
|
||||||
num-traits = "0.2"
|
num-traits = "0.2"
|
||||||
rand = "0.8"
|
rand = "0.8"
|
||||||
regex = "1.10"
|
regex = "1.10"
|
||||||
lazy_static = "1"
|
lazy_static = "1"
|
||||||
|
semver = "1.0.25"
|
||||||
|
|
||||||
|
# Temporary pins to work around downstream issues
|
||||||
|
# https://github.com/apache/arrow-rs/commit/2fddf85afcd20110ce783ed5b4cdeb82293da30b
|
||||||
|
chrono = "=0.4.39"
|
||||||
|
# https://github.com/RustCrypto/formats/issues/1684
|
||||||
|
base64ct = "=1.6.0"
|
||||||
|
|
||||||
|
# Workaround for: https://github.com/eira-fransham/crunchy/issues/13
|
||||||
|
crunchy = "=0.2.2"
|
||||||
|
|||||||
41
ci/parse_requirements.py
Normal file
41
ci/parse_requirements.py
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
import argparse
|
||||||
|
import toml
|
||||||
|
|
||||||
|
|
||||||
|
def parse_dependencies(pyproject_path, extras=None):
|
||||||
|
with open(pyproject_path, "r") as file:
|
||||||
|
pyproject = toml.load(file)
|
||||||
|
|
||||||
|
dependencies = pyproject.get("project", {}).get("dependencies", [])
|
||||||
|
for dependency in dependencies:
|
||||||
|
print(dependency)
|
||||||
|
|
||||||
|
optional_dependencies = pyproject.get("project", {}).get(
|
||||||
|
"optional-dependencies", {}
|
||||||
|
)
|
||||||
|
|
||||||
|
if extras:
|
||||||
|
for extra in extras.split(","):
|
||||||
|
for dep in optional_dependencies.get(extra, []):
|
||||||
|
print(dep)
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Generate requirements.txt from pyproject.toml"
|
||||||
|
)
|
||||||
|
parser.add_argument("path", type=str, help="Path to pyproject.toml")
|
||||||
|
parser.add_argument(
|
||||||
|
"--extras",
|
||||||
|
type=str,
|
||||||
|
help="Comma-separated list of extras to include",
|
||||||
|
default="",
|
||||||
|
)
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
parse_dependencies(args.path, args.extras)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
@@ -53,7 +53,7 @@ curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-42
|
|||||||
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/149578fb3b621cdb61ee1813b9b3e791/463ad1b0783ebda908fd6c16a4abfe93.cab
|
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/149578fb3b621cdb61ee1813b9b3e791/463ad1b0783ebda908fd6c16a4abfe93.cab
|
||||||
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/5c986c4f393c6b09d5aec3b539e9fb4a/5a22e5cde814b041749fb271547f4dd5.cab
|
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/5c986c4f393c6b09d5aec3b539e9fb4a/5a22e5cde814b041749fb271547f4dd5.cab
|
||||||
|
|
||||||
# fwpuclnt.lib arm64rt.lib
|
# dbghelp.lib fwpuclnt.lib arm64rt.lib
|
||||||
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/7a332420d812f7c1d41da865ae5a7c52/windows%20sdk%20desktop%20libs%20arm64-x86_en-us.msi
|
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/7a332420d812f7c1d41da865ae5a7c52/windows%20sdk%20desktop%20libs%20arm64-x86_en-us.msi
|
||||||
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/19de98ed4a79938d0045d19c047936b3/3e2f7be479e3679d700ce0782e4cc318.cab
|
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/19de98ed4a79938d0045d19c047936b3/3e2f7be479e3679d700ce0782e4cc318.cab
|
||||||
|
|
||||||
@@ -98,7 +98,7 @@ find /usr/aarch64-pc-windows-msvc/usr/include -type f -exec sed -i -E 's/(#inclu
|
|||||||
# reason: https://developercommunity.visualstudio.com/t/libucrtlibstreamobj-error-lnk2001-unresolved-exter/1544787#T-ND1599818
|
# reason: https://developercommunity.visualstudio.com/t/libucrtlibstreamobj-error-lnk2001-unresolved-exter/1544787#T-ND1599818
|
||||||
# I don't understand the 'correct' fix for this, arm64rt.lib is supposed to be the workaround
|
# I don't understand the 'correct' fix for this, arm64rt.lib is supposed to be the workaround
|
||||||
|
|
||||||
(cd 'program files/windows kits/10/lib/10.0.26100.0/um/arm64' && cp advapi32.lib bcrypt.lib kernel32.lib ntdll.lib user32.lib uuid.lib ws2_32.lib userenv.lib cfgmgr32.lib runtimeobject.lib fwpuclnt.lib arm64rt.lib -t /usr/aarch64-pc-windows-msvc/usr/lib)
|
(cd 'program files/windows kits/10/lib/10.0.26100.0/um/arm64' && cp advapi32.lib bcrypt.lib kernel32.lib ntdll.lib user32.lib uuid.lib ws2_32.lib userenv.lib cfgmgr32.lib runtimeobject.lib dbghelp.lib fwpuclnt.lib arm64rt.lib -t /usr/aarch64-pc-windows-msvc/usr/lib)
|
||||||
|
|
||||||
(cd 'contents/vc/tools/msvc/14.16.27023/lib/arm64' && cp libcmt.lib libvcruntime.lib -t /usr/aarch64-pc-windows-msvc/usr/lib)
|
(cd 'contents/vc/tools/msvc/14.16.27023/lib/arm64' && cp libcmt.lib libvcruntime.lib -t /usr/aarch64-pc-windows-msvc/usr/lib)
|
||||||
|
|
||||||
|
|||||||
34
ci/validate_stable_lance.py
Normal file
34
ci/validate_stable_lance.py
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
import tomllib
|
||||||
|
|
||||||
|
found_preview_lance = False
|
||||||
|
|
||||||
|
with open("Cargo.toml", "rb") as f:
|
||||||
|
cargo_data = tomllib.load(f)
|
||||||
|
|
||||||
|
for name, dep in cargo_data["workspace"]["dependencies"].items():
|
||||||
|
if name == "lance" or name.startswith("lance-"):
|
||||||
|
if isinstance(dep, str):
|
||||||
|
version = dep
|
||||||
|
elif isinstance(dep, dict):
|
||||||
|
# Version doesn't have the beta tag in it, so we instead look
|
||||||
|
# at the git tag.
|
||||||
|
version = dep.get('tag', dep.get('version'))
|
||||||
|
else:
|
||||||
|
raise ValueError("Unexpected type for dependency: " + str(dep))
|
||||||
|
|
||||||
|
if "beta" in version:
|
||||||
|
found_preview_lance = True
|
||||||
|
print(f"Dependency '{name}' is a preview version: {version}")
|
||||||
|
|
||||||
|
with open("python/pyproject.toml", "rb") as f:
|
||||||
|
py_proj_data = tomllib.load(f)
|
||||||
|
|
||||||
|
for dep in py_proj_data["project"]["dependencies"]:
|
||||||
|
if dep.startswith("pylance"):
|
||||||
|
if "b" in dep:
|
||||||
|
found_preview_lance = True
|
||||||
|
print(f"Dependency '{dep}' is a preview version")
|
||||||
|
break # Only one pylance dependency
|
||||||
|
|
||||||
|
if found_preview_lance:
|
||||||
|
raise ValueError("Found preview version of Lance in dependencies")
|
||||||
@@ -9,36 +9,81 @@ unreleased features.
|
|||||||
## Building the docs
|
## Building the docs
|
||||||
|
|
||||||
### Setup
|
### Setup
|
||||||
1. Install LanceDB. From LanceDB repo root: `pip install -e python`
|
1. Install LanceDB Python. See setup in [Python contributing guide](../python/CONTRIBUTING.md).
|
||||||
2. Install dependencies. From LanceDB repo root: `pip install -r docs/requirements.txt`
|
Run `make develop` to install the Python package.
|
||||||
3. Make sure you have node and npm setup
|
2. Install documentation dependencies. From LanceDB repo root: `pip install -r docs/requirements.txt`
|
||||||
4. Make sure protobuf and libssl are installed
|
|
||||||
|
|
||||||
### Building node module and create markdown files
|
### Preview the docs
|
||||||
|
|
||||||
See [Javascript docs README](./src/javascript/README.md)
|
```shell
|
||||||
|
|
||||||
### Build docs
|
|
||||||
From LanceDB repo root:
|
|
||||||
|
|
||||||
Run: `PYTHONPATH=. mkdocs build -f docs/mkdocs.yml`
|
|
||||||
|
|
||||||
If successful, you should see a `docs/site` directory that you can verify locally.
|
|
||||||
|
|
||||||
### Run local server
|
|
||||||
|
|
||||||
You can run a local server to test the docs prior to deployment by navigating to the `docs` directory and running the following command:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
cd docs
|
cd docs
|
||||||
mkdocs serve
|
mkdocs serve
|
||||||
```
|
```
|
||||||
|
|
||||||
### Run doctest for typescript example
|
If you want to just generate the HTML files:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
cd lancedb/docs
|
PYTHONPATH=. mkdocs build -f docs/mkdocs.yml
|
||||||
npm i
|
```
|
||||||
npm run build
|
|
||||||
npm run all
|
If successful, you should see a `docs/site` directory that you can verify locally.
|
||||||
|
|
||||||
|
## Adding examples
|
||||||
|
|
||||||
|
To make sure examples are correct, we put examples in test files so they can be
|
||||||
|
run as part of our test suites.
|
||||||
|
|
||||||
|
You can see the tests are at:
|
||||||
|
|
||||||
|
* Python: `python/python/tests/docs`
|
||||||
|
* Typescript: `nodejs/examples/`
|
||||||
|
|
||||||
|
### Checking python examples
|
||||||
|
|
||||||
|
```shell
|
||||||
|
cd python
|
||||||
|
pytest -vv python/tests/docs
|
||||||
|
```
|
||||||
|
|
||||||
|
### Checking typescript examples
|
||||||
|
|
||||||
|
The `@lancedb/lancedb` package must be built before running the tests:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
pushd nodejs
|
||||||
|
npm ci
|
||||||
|
npm run build
|
||||||
|
popd
|
||||||
|
```
|
||||||
|
|
||||||
|
Then you can run the examples by going to the `nodejs/examples` directory and
|
||||||
|
running the tests like a normal npm package:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
pushd nodejs/examples
|
||||||
|
npm ci
|
||||||
|
npm test
|
||||||
|
popd
|
||||||
|
```
|
||||||
|
|
||||||
|
## API documentation
|
||||||
|
|
||||||
|
### Python
|
||||||
|
|
||||||
|
The Python API documentation is organized based on the file `docs/src/python/python.md`.
|
||||||
|
We manually add entries there so we can control the organization of the reference page.
|
||||||
|
**However, this means any new types must be manually added to the file.** No additional
|
||||||
|
steps are needed to generate the API documentation.
|
||||||
|
|
||||||
|
### Typescript
|
||||||
|
|
||||||
|
The typescript API documentation is generated from the typescript source code using [typedoc](https://typedoc.org/).
|
||||||
|
|
||||||
|
When new APIs are added, you must manually re-run the typedoc command to update the API documentation.
|
||||||
|
The new files should be checked into the repository.
|
||||||
|
|
||||||
|
```shell
|
||||||
|
pushd nodejs
|
||||||
|
npm run docs
|
||||||
|
popd
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -4,6 +4,9 @@ repo_url: https://github.com/lancedb/lancedb
|
|||||||
edit_uri: https://github.com/lancedb/lancedb/tree/main/docs/src
|
edit_uri: https://github.com/lancedb/lancedb/tree/main/docs/src
|
||||||
repo_name: lancedb/lancedb
|
repo_name: lancedb/lancedb
|
||||||
docs_dir: src
|
docs_dir: src
|
||||||
|
watch:
|
||||||
|
- src
|
||||||
|
- ../python/python
|
||||||
|
|
||||||
theme:
|
theme:
|
||||||
name: "material"
|
name: "material"
|
||||||
@@ -63,6 +66,7 @@ plugins:
|
|||||||
- https://arrow.apache.org/docs/objects.inv
|
- https://arrow.apache.org/docs/objects.inv
|
||||||
- https://pandas.pydata.org/docs/objects.inv
|
- https://pandas.pydata.org/docs/objects.inv
|
||||||
- https://lancedb.github.io/lance/objects.inv
|
- https://lancedb.github.io/lance/objects.inv
|
||||||
|
- https://docs.pydantic.dev/latest/objects.inv
|
||||||
- mkdocs-jupyter
|
- mkdocs-jupyter
|
||||||
- render_swagger:
|
- render_swagger:
|
||||||
allow_arbitrary_locations: true
|
allow_arbitrary_locations: true
|
||||||
@@ -146,7 +150,9 @@ nav:
|
|||||||
- Building Custom Rerankers: reranking/custom_reranker.md
|
- Building Custom Rerankers: reranking/custom_reranker.md
|
||||||
- Example: notebooks/lancedb_reranking.ipynb
|
- Example: notebooks/lancedb_reranking.ipynb
|
||||||
- Filtering: sql.md
|
- Filtering: sql.md
|
||||||
- Versioning & Reproducibility: notebooks/reproducibility.ipynb
|
- Versioning & Reproducibility:
|
||||||
|
- sync API: notebooks/reproducibility.ipynb
|
||||||
|
- async API: notebooks/reproducibility_async.ipynb
|
||||||
- Configuring Storage: guides/storage.md
|
- Configuring Storage: guides/storage.md
|
||||||
- Migration Guide: migration.md
|
- Migration Guide: migration.md
|
||||||
- Tuning retrieval performance:
|
- Tuning retrieval performance:
|
||||||
@@ -176,6 +182,7 @@ nav:
|
|||||||
- Imagebind embeddings: embeddings/available_embedding_models/multimodal_embedding_functions/imagebind_embedding.md
|
- Imagebind embeddings: embeddings/available_embedding_models/multimodal_embedding_functions/imagebind_embedding.md
|
||||||
- Jina Embeddings: embeddings/available_embedding_models/multimodal_embedding_functions/jina_multimodal_embedding.md
|
- Jina Embeddings: embeddings/available_embedding_models/multimodal_embedding_functions/jina_multimodal_embedding.md
|
||||||
- User-defined embedding functions: embeddings/custom_embedding_function.md
|
- User-defined embedding functions: embeddings/custom_embedding_function.md
|
||||||
|
- Variables and secrets: embeddings/variables_and_secrets.md
|
||||||
- "Example: Multi-lingual semantic search": notebooks/multi_lingual_example.ipynb
|
- "Example: Multi-lingual semantic search": notebooks/multi_lingual_example.ipynb
|
||||||
- "Example: MultiModal CLIP Embeddings": notebooks/DisappearingEmbeddingFunction.ipynb
|
- "Example: MultiModal CLIP Embeddings": notebooks/DisappearingEmbeddingFunction.ipynb
|
||||||
- 🔌 Integrations:
|
- 🔌 Integrations:
|
||||||
@@ -278,7 +285,9 @@ nav:
|
|||||||
- Building Custom Rerankers: reranking/custom_reranker.md
|
- Building Custom Rerankers: reranking/custom_reranker.md
|
||||||
- Example: notebooks/lancedb_reranking.ipynb
|
- Example: notebooks/lancedb_reranking.ipynb
|
||||||
- Filtering: sql.md
|
- Filtering: sql.md
|
||||||
- Versioning & Reproducibility: notebooks/reproducibility.ipynb
|
- Versioning & Reproducibility:
|
||||||
|
- sync API: notebooks/reproducibility.ipynb
|
||||||
|
- async API: notebooks/reproducibility_async.ipynb
|
||||||
- Configuring Storage: guides/storage.md
|
- Configuring Storage: guides/storage.md
|
||||||
- Migration Guide: migration.md
|
- Migration Guide: migration.md
|
||||||
- Tuning retrieval performance:
|
- Tuning retrieval performance:
|
||||||
@@ -307,6 +316,7 @@ nav:
|
|||||||
- Imagebind embeddings: embeddings/available_embedding_models/multimodal_embedding_functions/imagebind_embedding.md
|
- Imagebind embeddings: embeddings/available_embedding_models/multimodal_embedding_functions/imagebind_embedding.md
|
||||||
- Jina Embeddings: embeddings/available_embedding_models/multimodal_embedding_functions/jina_multimodal_embedding.md
|
- Jina Embeddings: embeddings/available_embedding_models/multimodal_embedding_functions/jina_multimodal_embedding.md
|
||||||
- User-defined embedding functions: embeddings/custom_embedding_function.md
|
- User-defined embedding functions: embeddings/custom_embedding_function.md
|
||||||
|
- Variables and secrets: embeddings/variables_and_secrets.md
|
||||||
- "Example: Multi-lingual semantic search": notebooks/multi_lingual_example.ipynb
|
- "Example: Multi-lingual semantic search": notebooks/multi_lingual_example.ipynb
|
||||||
- "Example: MultiModal CLIP Embeddings": notebooks/DisappearingEmbeddingFunction.ipynb
|
- "Example: MultiModal CLIP Embeddings": notebooks/DisappearingEmbeddingFunction.ipynb
|
||||||
- Integrations:
|
- Integrations:
|
||||||
@@ -367,6 +377,7 @@ extra_css:
|
|||||||
|
|
||||||
extra_javascript:
|
extra_javascript:
|
||||||
- "extra_js/init_ask_ai_widget.js"
|
- "extra_js/init_ask_ai_widget.js"
|
||||||
|
- "extra_js/reo.js"
|
||||||
|
|
||||||
extra:
|
extra:
|
||||||
analytics:
|
analytics:
|
||||||
|
|||||||
@@ -38,6 +38,13 @@ components:
|
|||||||
required: true
|
required: true
|
||||||
schema:
|
schema:
|
||||||
type: string
|
type: string
|
||||||
|
index_name:
|
||||||
|
name: index_name
|
||||||
|
in: path
|
||||||
|
description: name of the index
|
||||||
|
required: true
|
||||||
|
schema:
|
||||||
|
type: string
|
||||||
responses:
|
responses:
|
||||||
invalid_request:
|
invalid_request:
|
||||||
description: Invalid request
|
description: Invalid request
|
||||||
@@ -485,3 +492,22 @@ paths:
|
|||||||
$ref: "#/components/responses/unauthorized"
|
$ref: "#/components/responses/unauthorized"
|
||||||
"404":
|
"404":
|
||||||
$ref: "#/components/responses/not_found"
|
$ref: "#/components/responses/not_found"
|
||||||
|
/v1/table/{name}/index/{index_name}/drop/:
|
||||||
|
post:
|
||||||
|
description: Drop an index from the table
|
||||||
|
tags:
|
||||||
|
- Tables
|
||||||
|
summary: Drop an index from the table
|
||||||
|
operationId: dropIndex
|
||||||
|
parameters:
|
||||||
|
- $ref: "#/components/parameters/table_name"
|
||||||
|
- $ref: "#/components/parameters/index_name"
|
||||||
|
responses:
|
||||||
|
"200":
|
||||||
|
description: Index successfully dropped
|
||||||
|
"400":
|
||||||
|
$ref: "#/components/responses/invalid_request"
|
||||||
|
"401":
|
||||||
|
$ref: "#/components/responses/unauthorized"
|
||||||
|
"404":
|
||||||
|
$ref: "#/components/responses/not_found"
|
||||||
@@ -18,24 +18,23 @@ See the [indexing](concepts/index_ivfpq.md) concepts guide for more information
|
|||||||
Lance supports `IVF_PQ` index type by default.
|
Lance supports `IVF_PQ` index type by default.
|
||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
Creating indexes is done via the [create_index](https://lancedb.github.io/lancedb/python/#lancedb.table.LanceTable.create_index) method.
|
Creating indexes is done via the [create_index](https://lancedb.github.io/lancedb/python/#lancedb.table.LanceTable.create_index) method.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import lancedb
|
--8<-- "python/python/tests/docs/test_guide_index.py:import-lancedb"
|
||||||
import numpy as np
|
--8<-- "python/python/tests/docs/test_guide_index.py:import-numpy"
|
||||||
uri = "data/sample-lancedb"
|
--8<-- "python/python/tests/docs/test_guide_index.py:create_ann_index"
|
||||||
db = lancedb.connect(uri)
|
```
|
||||||
|
=== "Async API"
|
||||||
|
Creating indexes is done via the [create_index](https://lancedb.github.io/lancedb/python/#lancedb.table.LanceTable.create_index) method.
|
||||||
|
|
||||||
# Create 10,000 sample vectors
|
```python
|
||||||
data = [{"vector": row, "item": f"item {i}"}
|
--8<-- "python/python/tests/docs/test_guide_index.py:import-lancedb"
|
||||||
for i, row in enumerate(np.random.random((10_000, 1536)).astype('float32'))]
|
--8<-- "python/python/tests/docs/test_guide_index.py:import-numpy"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_index.py:import-lancedb-ivfpq"
|
||||||
# Add the vectors to a table
|
--8<-- "python/python/tests/docs/test_guide_index.py:create_ann_index_async"
|
||||||
tbl = db.create_table("my_vectors", data=data)
|
|
||||||
|
|
||||||
# Create and train the index - you need to have enough data in the table for an effective training step
|
|
||||||
tbl.create_index(num_partitions=256, num_sub_vectors=96)
|
|
||||||
```
|
```
|
||||||
|
|
||||||
=== "TypeScript"
|
=== "TypeScript"
|
||||||
@@ -127,6 +126,8 @@ You can specify the GPU device to train IVF partitions via
|
|||||||
accelerator="mps"
|
accelerator="mps"
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
!!! note
|
||||||
|
GPU based indexing is not yet supported with our asynchronous client.
|
||||||
|
|
||||||
Troubleshooting:
|
Troubleshooting:
|
||||||
|
|
||||||
@@ -152,13 +153,15 @@ There are a couple of parameters that can be used to fine-tune the search:
|
|||||||
|
|
||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
tbl.search(np.random.random((1536))) \
|
--8<-- "python/python/tests/docs/test_guide_index.py:vector_search"
|
||||||
.limit(2) \
|
```
|
||||||
.nprobes(20) \
|
=== "Async API"
|
||||||
.refine_factor(10) \
|
|
||||||
.to_pandas()
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_index.py:vector_search_async"
|
||||||
```
|
```
|
||||||
|
|
||||||
```text
|
```text
|
||||||
@@ -196,9 +199,15 @@ The search will return the data requested in addition to the distance of each it
|
|||||||
You can further filter the elements returned by a search using a where clause.
|
You can further filter the elements returned by a search using a where clause.
|
||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
tbl.search(np.random.random((1536))).where("item != 'item 1141'").to_pandas()
|
--8<-- "python/python/tests/docs/test_guide_index.py:vector_search_with_filter"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_index.py:vector_search_async_with_filter"
|
||||||
```
|
```
|
||||||
|
|
||||||
=== "TypeScript"
|
=== "TypeScript"
|
||||||
@@ -221,10 +230,16 @@ You can select the columns returned by the query using a select clause.
|
|||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
```python
|
=== "Sync API"
|
||||||
tbl.search(np.random.random((1536))).select(["vector"]).to_pandas()
|
|
||||||
```
|
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_index.py:vector_search_with_select"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_index.py:vector_search_async_with_select"
|
||||||
|
```
|
||||||
|
|
||||||
```text
|
```text
|
||||||
vector _distance
|
vector _distance
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ import * as vectordb from "vectordb";
|
|||||||
// --8<-- [end:import]
|
// --8<-- [end:import]
|
||||||
|
|
||||||
(async () => {
|
(async () => {
|
||||||
|
console.log("ann_indexes.ts: start");
|
||||||
// --8<-- [start:ingest]
|
// --8<-- [start:ingest]
|
||||||
const db = await vectordb.connect("data/sample-lancedb");
|
const db = await vectordb.connect("data/sample-lancedb");
|
||||||
|
|
||||||
@@ -49,5 +50,5 @@ import * as vectordb from "vectordb";
|
|||||||
.execute();
|
.execute();
|
||||||
// --8<-- [end:search3]
|
// --8<-- [end:search3]
|
||||||
|
|
||||||
console.log("Ann indexes: done");
|
console.log("ann_indexes.ts: done");
|
||||||
})();
|
})();
|
||||||
|
|||||||
BIN
docs/src/assets/maxsim.png
Normal file
BIN
docs/src/assets/maxsim.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 10 KiB |
@@ -133,11 +133,20 @@ recommend switching to stable releases.
|
|||||||
## Connect to a database
|
## Connect to a database
|
||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
--8<-- "python/python/tests/docs/test_basic.py:imports"
|
--8<-- "python/python/tests/docs/test_basic.py:imports"
|
||||||
--8<-- "python/python/tests/docs/test_basic.py:connect"
|
|
||||||
|
|
||||||
|
--8<-- "python/python/tests/docs/test_basic.py:set_uri"
|
||||||
|
--8<-- "python/python/tests/docs/test_basic.py:connect"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_basic.py:imports"
|
||||||
|
|
||||||
|
--8<-- "python/python/tests/docs/test_basic.py:set_uri"
|
||||||
--8<-- "python/python/tests/docs/test_basic.py:connect_async"
|
--8<-- "python/python/tests/docs/test_basic.py:connect_async"
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -183,19 +192,31 @@ table.
|
|||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
```python
|
|
||||||
--8<-- "python/python/tests/docs/test_basic.py:create_table"
|
|
||||||
--8<-- "python/python/tests/docs/test_basic.py:create_table_async"
|
|
||||||
```
|
|
||||||
|
|
||||||
If the table already exists, LanceDB will raise an error by default.
|
If the table already exists, LanceDB will raise an error by default.
|
||||||
If you want to overwrite the table, you can pass in `mode="overwrite"`
|
If you want to overwrite the table, you can pass in `mode="overwrite"`
|
||||||
to the `create_table` method.
|
to the `create_table` method.
|
||||||
|
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_basic.py:create_table"
|
||||||
|
```
|
||||||
|
|
||||||
You can also pass in a pandas DataFrame directly:
|
You can also pass in a pandas DataFrame directly:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
--8<-- "python/python/tests/docs/test_basic.py:create_table_pandas"
|
--8<-- "python/python/tests/docs/test_basic.py:create_table_pandas"
|
||||||
|
```
|
||||||
|
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_basic.py:create_table_async"
|
||||||
|
```
|
||||||
|
|
||||||
|
You can also pass in a pandas DataFrame directly:
|
||||||
|
|
||||||
|
```python
|
||||||
--8<-- "python/python/tests/docs/test_basic.py:create_table_async_pandas"
|
--8<-- "python/python/tests/docs/test_basic.py:create_table_async_pandas"
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -247,8 +268,14 @@ similar to a `CREATE TABLE` statement in SQL.
|
|||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
--8<-- "python/python/tests/docs/test_basic.py:create_empty_table"
|
--8<-- "python/python/tests/docs/test_basic.py:create_empty_table"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
--8<-- "python/python/tests/docs/test_basic.py:create_empty_table_async"
|
--8<-- "python/python/tests/docs/test_basic.py:create_empty_table_async"
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -281,8 +308,14 @@ Once created, you can open a table as follows:
|
|||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
--8<-- "python/python/tests/docs/test_basic.py:open_table"
|
--8<-- "python/python/tests/docs/test_basic.py:open_table"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
--8<-- "python/python/tests/docs/test_basic.py:open_table_async"
|
--8<-- "python/python/tests/docs/test_basic.py:open_table_async"
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -310,8 +343,14 @@ If you forget the name of your table, you can always get a listing of all table
|
|||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
--8<-- "python/python/tests/docs/test_basic.py:table_names"
|
--8<-- "python/python/tests/docs/test_basic.py:table_names"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
--8<-- "python/python/tests/docs/test_basic.py:table_names_async"
|
--8<-- "python/python/tests/docs/test_basic.py:table_names_async"
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -340,8 +379,14 @@ After a table has been created, you can always add more data to it as follows:
|
|||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
--8<-- "python/python/tests/docs/test_basic.py:add_data"
|
--8<-- "python/python/tests/docs/test_basic.py:add_data"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
--8<-- "python/python/tests/docs/test_basic.py:add_data_async"
|
--8<-- "python/python/tests/docs/test_basic.py:add_data_async"
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -370,8 +415,14 @@ Once you've embedded the query, you can find its nearest neighbors as follows:
|
|||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
--8<-- "python/python/tests/docs/test_basic.py:vector_search"
|
--8<-- "python/python/tests/docs/test_basic.py:vector_search"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
--8<-- "python/python/tests/docs/test_basic.py:vector_search_async"
|
--8<-- "python/python/tests/docs/test_basic.py:vector_search_async"
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -412,8 +463,14 @@ LanceDB allows you to create an ANN index on a table as follows:
|
|||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
```py
|
=== "Sync API"
|
||||||
|
|
||||||
|
```python
|
||||||
--8<-- "python/python/tests/docs/test_basic.py:create_index"
|
--8<-- "python/python/tests/docs/test_basic.py:create_index"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
--8<-- "python/python/tests/docs/test_basic.py:create_index_async"
|
--8<-- "python/python/tests/docs/test_basic.py:create_index_async"
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -451,8 +508,14 @@ This can delete any number of rows that match the filter.
|
|||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
--8<-- "python/python/tests/docs/test_basic.py:delete_rows"
|
--8<-- "python/python/tests/docs/test_basic.py:delete_rows"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
--8<-- "python/python/tests/docs/test_basic.py:delete_rows_async"
|
--8<-- "python/python/tests/docs/test_basic.py:delete_rows_async"
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -483,7 +546,10 @@ simple or complex as needed. To see what expressions are supported, see the
|
|||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
|
=== "Sync API"
|
||||||
Read more: [lancedb.table.Table.delete][]
|
Read more: [lancedb.table.Table.delete][]
|
||||||
|
=== "Async API"
|
||||||
|
Read more: [lancedb.table.AsyncTable.delete][]
|
||||||
|
|
||||||
=== "Typescript[^1]"
|
=== "Typescript[^1]"
|
||||||
|
|
||||||
@@ -505,8 +571,14 @@ Use the `drop_table()` method on the database to remove a table.
|
|||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
--8<-- "python/python/tests/docs/test_basic.py:drop_table"
|
--8<-- "python/python/tests/docs/test_basic.py:drop_table"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
--8<-- "python/python/tests/docs/test_basic.py:drop_table_async"
|
--8<-- "python/python/tests/docs/test_basic.py:drop_table_async"
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -543,10 +615,17 @@ You can use the embedding API when working with embedding models. It automatical
|
|||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
--8<-- "python/python/tests/docs/test_embeddings_optional.py:imports"
|
--8<-- "python/python/tests/docs/test_embeddings_optional.py:imports"
|
||||||
|
|
||||||
--8<-- "python/python/tests/docs/test_embeddings_optional.py:openai_embeddings"
|
--8<-- "python/python/tests/docs/test_embeddings_optional.py:openai_embeddings"
|
||||||
```
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
Coming soon to the async API.
|
||||||
|
https://github.com/lancedb/lancedb/issues/1938
|
||||||
|
|
||||||
=== "Typescript[^1]"
|
=== "Typescript[^1]"
|
||||||
|
|
||||||
|
|||||||
@@ -107,7 +107,6 @@ const example = async () => {
|
|||||||
// --8<-- [start:search]
|
// --8<-- [start:search]
|
||||||
const query = await tbl.search([100, 100]).limit(2).execute();
|
const query = await tbl.search([100, 100]).limit(2).execute();
|
||||||
// --8<-- [end:search]
|
// --8<-- [end:search]
|
||||||
console.log(query);
|
|
||||||
|
|
||||||
// --8<-- [start:delete]
|
// --8<-- [start:delete]
|
||||||
await tbl.delete('item = "fizz"');
|
await tbl.delete('item = "fizz"');
|
||||||
@@ -119,8 +118,9 @@ const example = async () => {
|
|||||||
};
|
};
|
||||||
|
|
||||||
async function main() {
|
async function main() {
|
||||||
|
console.log("basic_legacy.ts: start");
|
||||||
await example();
|
await example();
|
||||||
console.log("Basic example: done");
|
console.log("basic_legacy.ts: done");
|
||||||
}
|
}
|
||||||
|
|
||||||
main();
|
main();
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ Approximate Nearest Neighbor (ANN) search is a method for finding data points ne
|
|||||||
There are three main types of ANN search algorithms:
|
There are three main types of ANN search algorithms:
|
||||||
|
|
||||||
* **Tree-based search algorithms**: Use a tree structure to organize and store data points.
|
* **Tree-based search algorithms**: Use a tree structure to organize and store data points.
|
||||||
* * **Hash-based search algorithms**: Use a specialized geometric hash table to store and manage data points. These algorithms typically focus on theoretical guarantees, and don't usually perform as well as the other approaches in practice.
|
* **Hash-based search algorithms**: Use a specialized geometric hash table to store and manage data points. These algorithms typically focus on theoretical guarantees, and don't usually perform as well as the other approaches in practice.
|
||||||
* **Graph-based search algorithms**: Use a graph structure to store data points, which can be a bit complex.
|
* **Graph-based search algorithms**: Use a graph structure to store data points, which can be a bit complex.
|
||||||
|
|
||||||
HNSW is a graph-based algorithm. All graph-based search algorithms rely on the idea of a k-nearest neighbor (or k-approximate nearest neighbor) graph, which we outline below.
|
HNSW is a graph-based algorithm. All graph-based search algorithms rely on the idea of a k-nearest neighbor (or k-approximate nearest neighbor) graph, which we outline below.
|
||||||
|
|||||||
@@ -55,6 +55,14 @@ Let's implement `SentenceTransformerEmbeddings` class. All you need to do is imp
|
|||||||
|
|
||||||
This is a stripped down version of our implementation of `SentenceTransformerEmbeddings` that removes certain optimizations and default settings.
|
This is a stripped down version of our implementation of `SentenceTransformerEmbeddings` that removes certain optimizations and default settings.
|
||||||
|
|
||||||
|
!!! danger "Use sensitive keys to prevent leaking secrets"
|
||||||
|
To prevent leaking secrets, such as API keys, you should add any sensitive
|
||||||
|
parameters of an embedding function to the output of the
|
||||||
|
[sensitive_keys()][lancedb.embeddings.base.EmbeddingFunction.sensitive_keys] /
|
||||||
|
[getSensitiveKeys()](../../js/namespaces/embedding/classes/EmbeddingFunction/#getsensitivekeys)
|
||||||
|
method. This prevents users from accidentally instantiating the embedding
|
||||||
|
function with hard-coded secrets.
|
||||||
|
|
||||||
Now you can use this embedding function to create your table schema and that's it! you can then ingest data and run queries without manually vectorizing the inputs.
|
Now you can use this embedding function to create your table schema and that's it! you can then ingest data and run queries without manually vectorizing the inputs.
|
||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
|||||||
53
docs/src/embeddings/variables_and_secrets.md
Normal file
53
docs/src/embeddings/variables_and_secrets.md
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
# Variable and Secrets
|
||||||
|
|
||||||
|
Most embedding configuration options are saved in the table's metadata. However,
|
||||||
|
this isn't always appropriate. For example, API keys should never be stored in the
|
||||||
|
metadata. Additionally, other configuration options might be best set at runtime,
|
||||||
|
such as the `device` configuration that controls whether to use GPU or CPU for
|
||||||
|
inference. If you hardcoded this to GPU, you wouldn't be able to run the code on
|
||||||
|
a server without one.
|
||||||
|
|
||||||
|
To handle these cases, you can set variables on the embedding registry and
|
||||||
|
reference them in the embedding configuration. These variables will be available
|
||||||
|
during the runtime of your program, but not saved in the table's metadata. When
|
||||||
|
the table is loaded from a different process, the variables must be set again.
|
||||||
|
|
||||||
|
To set a variable, use the `set_var()` / `setVar()` method on the embedding registry.
|
||||||
|
To reference a variable, use the syntax `$env:VARIABLE_NAME`. If there is a default
|
||||||
|
value, you can use the syntax `$env:VARIABLE_NAME:DEFAULT_VALUE`.
|
||||||
|
|
||||||
|
## Using variables to set secrets
|
||||||
|
|
||||||
|
Sensitive configuration, such as API keys, must either be set as environment
|
||||||
|
variables or using variables on the embedding registry. If you pass in a hardcoded
|
||||||
|
value, LanceDB will raise an error. Instead, if you want to set an API key via
|
||||||
|
configuration, use a variable:
|
||||||
|
|
||||||
|
=== "Python"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_embeddings_optional.py:register_secret"
|
||||||
|
```
|
||||||
|
|
||||||
|
=== "Typescript"
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
--8<-- "nodejs/examples/embedding.test.ts:register_secret"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Using variables to set the device parameter
|
||||||
|
|
||||||
|
Many embedding functions that run locally have a `device` parameter that controls
|
||||||
|
whether to use GPU or CPU for inference. Because not all computers have a GPU,
|
||||||
|
it's helpful to be able to set the `device` parameter at runtime, rather than
|
||||||
|
have it hard coded in the embedding configuration. To make it work even if the
|
||||||
|
variable isn't set, you could provide a default value of `cpu` in the embedding
|
||||||
|
configuration.
|
||||||
|
|
||||||
|
Some embedding libraries even have a method to detect which devices are available,
|
||||||
|
which could be used to dynamically set the device at runtime. For example, in Python
|
||||||
|
you can check if a CUDA GPU is available using `torch.cuda.is_available()`.
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_embeddings_optional.py:register_device"
|
||||||
|
```
|
||||||
1
docs/src/extra_js/reo.js
Normal file
1
docs/src/extra_js/reo.js
Normal file
@@ -0,0 +1 @@
|
|||||||
|
!function(){var e,t,n;e="9627b71b382d201",t=function(){Reo.init({clientID:"9627b71b382d201"})},(n=document.createElement("script")).src="https://static.reo.dev/"+e+"/reo.js",n.defer=!0,n.onload=t,document.head.appendChild(n)}();
|
||||||
@@ -10,27 +10,19 @@ LanceDB provides support for full-text search via Lance, allowing you to incorpo
|
|||||||
Consider that we have a LanceDB table named `my_table`, whose string column `text` we want to index and query via keyword search, the FTS index must be created before you can search via keywords.
|
Consider that we have a LanceDB table named `my_table`, whose string column `text` we want to index and query via keyword search, the FTS index must be created before you can search via keywords.
|
||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import lancedb
|
--8<-- "python/python/tests/docs/test_search.py:import-lancedb"
|
||||||
|
--8<-- "python/python/tests/docs/test_search.py:import-lancedb-fts"
|
||||||
|
--8<-- "python/python/tests/docs/test_search.py:basic_fts"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
uri = "data/sample-lancedb"
|
```python
|
||||||
db = lancedb.connect(uri)
|
--8<-- "python/python/tests/docs/test_search.py:import-lancedb"
|
||||||
|
--8<-- "python/python/tests/docs/test_search.py:import-lancedb-fts"
|
||||||
table = db.create_table(
|
--8<-- "python/python/tests/docs/test_search.py:basic_fts_async"
|
||||||
"my_table",
|
|
||||||
data=[
|
|
||||||
{"vector": [3.1, 4.1], "text": "Frodo was a happy puppy"},
|
|
||||||
{"vector": [5.9, 26.5], "text": "There are several kittens playing"},
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
# passing `use_tantivy=False` to use lance FTS index
|
|
||||||
# `use_tantivy=True` by default
|
|
||||||
table.create_fts_index("text", use_tantivy=False)
|
|
||||||
table.search("puppy").limit(10).select(["text"]).to_list()
|
|
||||||
# [{'text': 'Frodo was a happy puppy', '_score': 0.6931471824645996}]
|
|
||||||
# ...
|
|
||||||
```
|
```
|
||||||
|
|
||||||
=== "TypeScript"
|
=== "TypeScript"
|
||||||
@@ -93,8 +85,15 @@ By default the text is tokenized by splitting on punctuation and whitespaces, an
|
|||||||
Stemming is useful for improving search results by reducing words to their root form, e.g. "running" to "run". LanceDB supports stemming for multiple languages, you can specify the tokenizer name to enable stemming by the pattern `tokenizer_name="{language_code}_stem"`, e.g. `en_stem` for English.
|
Stemming is useful for improving search results by reducing words to their root form, e.g. "running" to "run". LanceDB supports stemming for multiple languages, you can specify the tokenizer name to enable stemming by the pattern `tokenizer_name="{language_code}_stem"`, e.g. `en_stem` for English.
|
||||||
|
|
||||||
For example, to enable stemming for English:
|
For example, to enable stemming for English:
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
table.create_fts_index("text", use_tantivy=True, tokenizer_name="en_stem")
|
--8<-- "python/python/tests/docs/test_search.py:fts_config_stem"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_search.py:fts_config_stem_async"
|
||||||
```
|
```
|
||||||
|
|
||||||
the following [languages](https://docs.rs/tantivy/latest/tantivy/tokenizer/enum.Language.html) are currently supported.
|
the following [languages](https://docs.rs/tantivy/latest/tantivy/tokenizer/enum.Language.html) are currently supported.
|
||||||
@@ -102,12 +101,15 @@ the following [languages](https://docs.rs/tantivy/latest/tantivy/tokenizer/enum.
|
|||||||
The tokenizer is customizable, you can specify how the tokenizer splits the text, and how it filters out words, etc.
|
The tokenizer is customizable, you can specify how the tokenizer splits the text, and how it filters out words, etc.
|
||||||
|
|
||||||
For example, for language with accents, you can specify the tokenizer to use `ascii_folding` to remove accents, e.g. 'é' to 'e':
|
For example, for language with accents, you can specify the tokenizer to use `ascii_folding` to remove accents, e.g. 'é' to 'e':
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
table.create_fts_index("text",
|
--8<-- "python/python/tests/docs/test_search.py:fts_config_folding"
|
||||||
use_tantivy=False,
|
```
|
||||||
language="French",
|
=== "Async API"
|
||||||
stem=True,
|
|
||||||
ascii_folding=True)
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_search.py:fts_config_folding_async"
|
||||||
```
|
```
|
||||||
|
|
||||||
## Filtering
|
## Filtering
|
||||||
@@ -119,8 +121,15 @@ This can be invoked via the familiar `where` syntax.
|
|||||||
With pre-filtering:
|
With pre-filtering:
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
table.search("puppy").limit(10).where("meta='foo'", prefilte=True).to_list()
|
--8<-- "python/python/tests/docs/test_search.py:fts_prefiltering"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_search.py:fts_prefiltering_async"
|
||||||
```
|
```
|
||||||
|
|
||||||
=== "TypeScript"
|
=== "TypeScript"
|
||||||
@@ -151,8 +160,15 @@ With pre-filtering:
|
|||||||
With post-filtering:
|
With post-filtering:
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
table.search("puppy").limit(10).where("meta='foo'", prefilte=False).to_list()
|
--8<-- "python/python/tests/docs/test_search.py:fts_postfiltering"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_search.py:fts_postfiltering_async"
|
||||||
```
|
```
|
||||||
|
|
||||||
=== "TypeScript"
|
=== "TypeScript"
|
||||||
@@ -191,8 +207,15 @@ or a **terms** search query like `old man sea`. For more details on the terms
|
|||||||
query syntax, see Tantivy's [query parser rules](https://docs.rs/tantivy/latest/tantivy/query/struct.QueryParser.html).
|
query syntax, see Tantivy's [query parser rules](https://docs.rs/tantivy/latest/tantivy/query/struct.QueryParser.html).
|
||||||
|
|
||||||
To search for a phrase, the index must be created with `with_position=True`:
|
To search for a phrase, the index must be created with `with_position=True`:
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
table.create_fts_index("text", use_tantivy=False, with_position=True)
|
--8<-- "python/python/tests/docs/test_search.py:fts_with_position"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_search.py:fts_with_position_async"
|
||||||
```
|
```
|
||||||
This will allow you to search for phrases, but it will also significantly increase the index size and indexing time.
|
This will allow you to search for phrases, but it will also significantly increase the index size and indexing time.
|
||||||
|
|
||||||
@@ -205,9 +228,15 @@ This can make the query more efficient, especially when the table is large and t
|
|||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
table.add([{"vector": [3.1, 4.1], "text": "Frodo was a happy puppy"}])
|
--8<-- "python/python/tests/docs/test_search.py:fts_incremental_index"
|
||||||
table.optimize()
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_search.py:fts_incremental_index_async"
|
||||||
```
|
```
|
||||||
|
|
||||||
=== "TypeScript"
|
=== "TypeScript"
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
LanceDB also provides support for full-text search via [Tantivy](https://github.com/quickwit-oss/tantivy), allowing you to incorporate keyword-based search (based on BM25) in your retrieval solutions.
|
LanceDB also provides support for full-text search via [Tantivy](https://github.com/quickwit-oss/tantivy), allowing you to incorporate keyword-based search (based on BM25) in your retrieval solutions.
|
||||||
|
|
||||||
The tantivy-based FTS is only available in Python and does not support building indexes on object storage or incremental indexing. If you need these features, try native FTS [native FTS](fts.md).
|
The tantivy-based FTS is only available in Python synchronous APIs and does not support building indexes on object storage or incremental indexing. If you need these features, try native FTS [native FTS](fts.md).
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
|
|||||||
@@ -32,18 +32,19 @@ over scalar columns.
|
|||||||
### Create a scalar index
|
### Create a scalar index
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
```python
|
=== "Sync API"
|
||||||
import lancedb
|
|
||||||
books = [
|
|
||||||
{"book_id": 1, "publisher": "plenty of books", "tags": ["fantasy", "adventure"]},
|
|
||||||
{"book_id": 2, "publisher": "book town", "tags": ["non-fiction"]},
|
|
||||||
{"book_id": 3, "publisher": "oreilly", "tags": ["textbook"]}
|
|
||||||
]
|
|
||||||
|
|
||||||
db = lancedb.connect("./db")
|
```python
|
||||||
table = db.create_table("books", books)
|
--8<-- "python/python/tests/docs/test_guide_index.py:import-lancedb"
|
||||||
table.create_scalar_index("book_id") # BTree by default
|
--8<-- "python/python/tests/docs/test_guide_index.py:import-lancedb-btree-bitmap"
|
||||||
table.create_scalar_index("publisher", index_type="BITMAP")
|
--8<-- "python/python/tests/docs/test_guide_index.py:basic_scalar_index"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_index.py:import-lancedb"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_index.py:import-lancedb-btree-bitmap"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_index.py:basic_scalar_index_async"
|
||||||
```
|
```
|
||||||
|
|
||||||
=== "Typescript"
|
=== "Typescript"
|
||||||
@@ -62,11 +63,17 @@ The following scan will be faster if the column `book_id` has a scalar index:
|
|||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
```python
|
=== "Sync API"
|
||||||
import lancedb
|
|
||||||
|
|
||||||
table = db.open_table("books")
|
```python
|
||||||
my_df = table.search().where("book_id = 2").to_pandas()
|
--8<-- "python/python/tests/docs/test_guide_index.py:import-lancedb"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_index.py:search_with_scalar_index"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_index.py:import-lancedb"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_index.py:search_with_scalar_index_async"
|
||||||
```
|
```
|
||||||
|
|
||||||
=== "Typescript"
|
=== "Typescript"
|
||||||
@@ -88,21 +95,17 @@ Scalar indices can also speed up scans containing a vector search or full text s
|
|||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import lancedb
|
--8<-- "python/python/tests/docs/test_guide_index.py:import-lancedb"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_index.py:vector_search_with_scalar_index"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
data = [
|
```python
|
||||||
{"book_id": 1, "vector": [1, 2]},
|
--8<-- "python/python/tests/docs/test_guide_index.py:import-lancedb"
|
||||||
{"book_id": 2, "vector": [3, 4]},
|
--8<-- "python/python/tests/docs/test_guide_index.py:vector_search_with_scalar_index_async"
|
||||||
{"book_id": 3, "vector": [5, 6]}
|
|
||||||
]
|
|
||||||
table = db.create_table("book_with_embeddings", data)
|
|
||||||
|
|
||||||
(
|
|
||||||
table.search([1, 2])
|
|
||||||
.where("book_id != 3", prefilter=True)
|
|
||||||
.to_pandas()
|
|
||||||
)
|
|
||||||
```
|
```
|
||||||
|
|
||||||
=== "Typescript"
|
=== "Typescript"
|
||||||
@@ -122,9 +125,15 @@ Scalar indices can also speed up scans containing a vector search or full text s
|
|||||||
Updating the table data (adding, deleting, or modifying records) requires that you also update the scalar index. This can be done by calling `optimize`, which will trigger an update to the existing scalar index.
|
Updating the table data (adding, deleting, or modifying records) requires that you also update the scalar index. This can be done by calling `optimize`, which will trigger an update to the existing scalar index.
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
table.add([{"vector": [7, 8], "book_id": 4}])
|
--8<-- "python/python/tests/docs/test_guide_index.py:update_scalar_index"
|
||||||
table.optimize()
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_index.py:update_scalar_index_async"
|
||||||
```
|
```
|
||||||
|
|
||||||
=== "TypeScript"
|
=== "TypeScript"
|
||||||
|
|||||||
@@ -12,26 +12,50 @@ LanceDB OSS supports object stores such as AWS S3 (and compatible stores), Azure
|
|||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
AWS S3:
|
AWS S3:
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import lancedb
|
import lancedb
|
||||||
db = lancedb.connect("s3://bucket/path")
|
db = lancedb.connect("s3://bucket/path")
|
||||||
```
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
import lancedb
|
||||||
|
async_db = await lancedb.connect_async("s3://bucket/path")
|
||||||
|
```
|
||||||
|
|
||||||
Google Cloud Storage:
|
Google Cloud Storage:
|
||||||
|
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import lancedb
|
import lancedb
|
||||||
db = lancedb.connect("gs://bucket/path")
|
db = lancedb.connect("gs://bucket/path")
|
||||||
```
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
import lancedb
|
||||||
|
async_db = await lancedb.connect_async("gs://bucket/path")
|
||||||
|
```
|
||||||
|
|
||||||
Azure Blob Storage:
|
Azure Blob Storage:
|
||||||
|
|
||||||
<!-- skip-test -->
|
<!-- skip-test -->
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import lancedb
|
import lancedb
|
||||||
db = lancedb.connect("az://bucket/path")
|
db = lancedb.connect("az://bucket/path")
|
||||||
```
|
```
|
||||||
|
<!-- skip-test -->
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
import lancedb
|
||||||
|
async_db = await lancedb.connect_async("az://bucket/path")
|
||||||
|
```
|
||||||
Note that for Azure, storage credentials must be configured. See [below](#azure-blob-storage) for more details.
|
Note that for Azure, storage credentials must be configured. See [below](#azure-blob-storage) for more details.
|
||||||
|
|
||||||
|
|
||||||
@@ -94,9 +118,20 @@ If you only want this to apply to one particular connection, you can pass the `s
|
|||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import lancedb
|
import lancedb
|
||||||
db = await lancedb.connect_async(
|
db = lancedb.connect(
|
||||||
|
"s3://bucket/path",
|
||||||
|
storage_options={"timeout": "60s"}
|
||||||
|
)
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
import lancedb
|
||||||
|
async_db = await lancedb.connect_async(
|
||||||
"s3://bucket/path",
|
"s3://bucket/path",
|
||||||
storage_options={"timeout": "60s"}
|
storage_options={"timeout": "60s"}
|
||||||
)
|
)
|
||||||
@@ -128,10 +163,24 @@ Getting even more specific, you can set the `timeout` for only a particular tabl
|
|||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
<!-- skip-test -->
|
<!-- skip-test -->
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import lancedb
|
import lancedb
|
||||||
db = await lancedb.connect_async("s3://bucket/path")
|
db = lancedb.connect("s3://bucket/path")
|
||||||
table = await db.create_table(
|
table = db.create_table(
|
||||||
|
"table",
|
||||||
|
[{"a": 1, "b": 2}],
|
||||||
|
storage_options={"timeout": "60s"}
|
||||||
|
)
|
||||||
|
```
|
||||||
|
<!-- skip-test -->
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
import lancedb
|
||||||
|
async_db = await lancedb.connect_async("s3://bucket/path")
|
||||||
|
async_table = await async_db.create_table(
|
||||||
"table",
|
"table",
|
||||||
[{"a": 1, "b": 2}],
|
[{"a": 1, "b": 2}],
|
||||||
storage_options={"timeout": "60s"}
|
storage_options={"timeout": "60s"}
|
||||||
@@ -194,9 +243,24 @@ These can be set as environment variables or passed in the `storage_options` par
|
|||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import lancedb
|
import lancedb
|
||||||
db = await lancedb.connect_async(
|
db = lancedb.connect(
|
||||||
|
"s3://bucket/path",
|
||||||
|
storage_options={
|
||||||
|
"aws_access_key_id": "my-access-key",
|
||||||
|
"aws_secret_access_key": "my-secret-key",
|
||||||
|
"aws_session_token": "my-session-token",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
import lancedb
|
||||||
|
async_db = await lancedb.connect_async(
|
||||||
"s3://bucket/path",
|
"s3://bucket/path",
|
||||||
storage_options={
|
storage_options={
|
||||||
"aws_access_key_id": "my-access-key",
|
"aws_access_key_id": "my-access-key",
|
||||||
@@ -348,9 +412,19 @@ name of the table to use.
|
|||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import lancedb
|
import lancedb
|
||||||
db = await lancedb.connect_async(
|
db = lancedb.connect(
|
||||||
|
"s3+ddb://bucket/path?ddbTableName=my-dynamodb-table",
|
||||||
|
)
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
import lancedb
|
||||||
|
async_db = await lancedb.connect_async(
|
||||||
"s3+ddb://bucket/path?ddbTableName=my-dynamodb-table",
|
"s3+ddb://bucket/path?ddbTableName=my-dynamodb-table",
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
@@ -441,9 +515,23 @@ LanceDB can also connect to S3-compatible stores, such as MinIO. To do so, you m
|
|||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import lancedb
|
import lancedb
|
||||||
db = await lancedb.connect_async(
|
db = lancedb.connect(
|
||||||
|
"s3://bucket/path",
|
||||||
|
storage_options={
|
||||||
|
"region": "us-east-1",
|
||||||
|
"endpoint": "http://minio:9000",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
import lancedb
|
||||||
|
async_db = await lancedb.connect_async(
|
||||||
"s3://bucket/path",
|
"s3://bucket/path",
|
||||||
storage_options={
|
storage_options={
|
||||||
"region": "us-east-1",
|
"region": "us-east-1",
|
||||||
@@ -502,9 +590,23 @@ To configure LanceDB to use an S3 Express endpoint, you must set the storage opt
|
|||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import lancedb
|
import lancedb
|
||||||
db = await lancedb.connect_async(
|
db = lancedb.connect(
|
||||||
|
"s3://my-bucket--use1-az4--x-s3/path",
|
||||||
|
storage_options={
|
||||||
|
"region": "us-east-1",
|
||||||
|
"s3_express": "true",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
import lancedb
|
||||||
|
async_db = await lancedb.connect_async(
|
||||||
"s3://my-bucket--use1-az4--x-s3/path",
|
"s3://my-bucket--use1-az4--x-s3/path",
|
||||||
storage_options={
|
storage_options={
|
||||||
"region": "us-east-1",
|
"region": "us-east-1",
|
||||||
@@ -552,9 +654,23 @@ GCS credentials are configured by setting the `GOOGLE_SERVICE_ACCOUNT` environme
|
|||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
<!-- skip-test -->
|
<!-- skip-test -->
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import lancedb
|
import lancedb
|
||||||
db = await lancedb.connect_async(
|
db = lancedb.connect(
|
||||||
|
"gs://my-bucket/my-database",
|
||||||
|
storage_options={
|
||||||
|
"service_account": "path/to/service-account.json",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
```
|
||||||
|
<!-- skip-test -->
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
import lancedb
|
||||||
|
async_db = await lancedb.connect_async(
|
||||||
"gs://my-bucket/my-database",
|
"gs://my-bucket/my-database",
|
||||||
storage_options={
|
storage_options={
|
||||||
"service_account": "path/to/service-account.json",
|
"service_account": "path/to/service-account.json",
|
||||||
@@ -612,9 +728,24 @@ Azure Blob Storage credentials can be configured by setting the `AZURE_STORAGE_A
|
|||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
<!-- skip-test -->
|
<!-- skip-test -->
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import lancedb
|
import lancedb
|
||||||
db = await lancedb.connect_async(
|
db = lancedb.connect(
|
||||||
|
"az://my-container/my-database",
|
||||||
|
storage_options={
|
||||||
|
account_name: "some-account",
|
||||||
|
account_key: "some-key",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
```
|
||||||
|
<!-- skip-test -->
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
import lancedb
|
||||||
|
async_db = await lancedb.connect_async(
|
||||||
"az://my-container/my-database",
|
"az://my-container/my-database",
|
||||||
storage_options={
|
storage_options={
|
||||||
account_name: "some-account",
|
account_name: "some-account",
|
||||||
|
|||||||
@@ -12,9 +12,17 @@ Initialize a LanceDB connection and create a table
|
|||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import lancedb
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb"
|
||||||
db = lancedb.connect("./.lancedb")
|
--8<-- "python/python/tests/docs/test_guide_tables.py:connect"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:connect_async"
|
||||||
```
|
```
|
||||||
|
|
||||||
LanceDB allows ingesting data from various sources - `dict`, `list[dict]`, `pd.DataFrame`, `pa.Table` or a `Iterator[pa.RecordBatch]`. Let's take a look at some of the these.
|
LanceDB allows ingesting data from various sources - `dict`, `list[dict]`, `pd.DataFrame`, `pa.Table` or a `Iterator[pa.RecordBatch]`. Let's take a look at some of the these.
|
||||||
@@ -47,17 +55,15 @@ Initialize a LanceDB connection and create a table
|
|||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import lancedb
|
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
db = lancedb.connect("./.lancedb")
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_async"
|
||||||
data = [{"vector": [1.1, 1.2], "lat": 45.5, "long": -122.7},
|
|
||||||
{"vector": [0.2, 1.8], "lat": 40.1, "long": -74.1}]
|
|
||||||
|
|
||||||
db.create_table("my_table", data)
|
|
||||||
|
|
||||||
db["my_table"].head()
|
|
||||||
```
|
```
|
||||||
|
|
||||||
!!! info "Note"
|
!!! info "Note"
|
||||||
@@ -67,15 +73,29 @@ Initialize a LanceDB connection and create a table
|
|||||||
and the table exists, then it simply opens the existing table. The data you
|
and the table exists, then it simply opens the existing table. The data you
|
||||||
passed in will NOT be appended to the table in that case.
|
passed in will NOT be appended to the table in that case.
|
||||||
|
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
db.create_table("name", data, exist_ok=True)
|
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_exist_ok"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_async_exist_ok"
|
||||||
```
|
```
|
||||||
|
|
||||||
Sometimes you want to make sure that you start fresh. If you want to
|
Sometimes you want to make sure that you start fresh. If you want to
|
||||||
overwrite the table, you can pass in mode="overwrite" to the createTable function.
|
overwrite the table, you can pass in mode="overwrite" to the createTable function.
|
||||||
|
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
db.create_table("name", data, mode="overwrite")
|
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_overwrite"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_async_overwrite"
|
||||||
```
|
```
|
||||||
|
|
||||||
=== "Typescript[^1]"
|
=== "Typescript[^1]"
|
||||||
@@ -146,18 +166,18 @@ Initialize a LanceDB connection and create a table
|
|||||||
|
|
||||||
### From a Pandas DataFrame
|
### From a Pandas DataFrame
|
||||||
|
|
||||||
|
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import pandas as pd
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pandas"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_from_pandas"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
data = pd.DataFrame({
|
```python
|
||||||
"vector": [[1.1, 1.2, 1.3, 1.4], [0.2, 1.8, 0.4, 3.6]],
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pandas"
|
||||||
"lat": [45.5, 40.1],
|
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_async_from_pandas"
|
||||||
"long": [-122.7, -74.1]
|
|
||||||
})
|
|
||||||
|
|
||||||
db.create_table("my_table", data)
|
|
||||||
|
|
||||||
db["my_table"].head()
|
|
||||||
```
|
```
|
||||||
|
|
||||||
!!! info "Note"
|
!!! info "Note"
|
||||||
@@ -165,14 +185,17 @@ db["my_table"].head()
|
|||||||
|
|
||||||
The **`vector`** column needs to be a [Vector](../python/pydantic.md#vector-field) (defined as [pyarrow.FixedSizeList](https://arrow.apache.org/docs/python/generated/pyarrow.list_.html)) type.
|
The **`vector`** column needs to be a [Vector](../python/pydantic.md#vector-field) (defined as [pyarrow.FixedSizeList](https://arrow.apache.org/docs/python/generated/pyarrow.list_.html)) type.
|
||||||
|
|
||||||
```python
|
=== "Sync API"
|
||||||
custom_schema = pa.schema([
|
|
||||||
pa.field("vector", pa.list_(pa.float32(), 4)),
|
|
||||||
pa.field("lat", pa.float32()),
|
|
||||||
pa.field("long", pa.float32())
|
|
||||||
])
|
|
||||||
|
|
||||||
table = db.create_table("my_table", data, schema=custom_schema)
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pyarrow"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_custom_schema"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pyarrow"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_async_custom_schema"
|
||||||
```
|
```
|
||||||
|
|
||||||
### From a Polars DataFrame
|
### From a Polars DataFrame
|
||||||
@@ -182,15 +205,17 @@ written in Rust. Just like in Pandas, the Polars integration is enabled by PyArr
|
|||||||
under the hood. A deeper integration between LanceDB Tables and Polars DataFrames
|
under the hood. A deeper integration between LanceDB Tables and Polars DataFrames
|
||||||
is on the way.
|
is on the way.
|
||||||
|
|
||||||
```python
|
=== "Sync API"
|
||||||
import polars as pl
|
|
||||||
|
|
||||||
data = pl.DataFrame({
|
```python
|
||||||
"vector": [[3.1, 4.1], [5.9, 26.5]],
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-polars"
|
||||||
"item": ["foo", "bar"],
|
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_from_polars"
|
||||||
"price": [10.0, 20.0]
|
```
|
||||||
})
|
=== "Async API"
|
||||||
table = db.create_table("pl_table", data=data)
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-polars"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_async_from_polars"
|
||||||
```
|
```
|
||||||
|
|
||||||
### From an Arrow Table
|
### From an Arrow Table
|
||||||
@@ -198,28 +223,19 @@ You can also create LanceDB tables directly from Arrow tables.
|
|||||||
LanceDB supports float16 data type!
|
LanceDB supports float16 data type!
|
||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import pyarrows as pa
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pyarrow"
|
||||||
import numpy as np
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-numpy"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_from_arrow_table"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
dim = 16
|
```python
|
||||||
total = 2
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-polars"
|
||||||
schema = pa.schema(
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-numpy"
|
||||||
[
|
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_async_from_arrow_table"
|
||||||
pa.field("vector", pa.list_(pa.float16(), dim)),
|
|
||||||
pa.field("text", pa.string())
|
|
||||||
]
|
|
||||||
)
|
|
||||||
data = pa.Table.from_arrays(
|
|
||||||
[
|
|
||||||
pa.array([np.random.randn(dim).astype(np.float16) for _ in range(total)],
|
|
||||||
pa.list_(pa.float16(), dim)),
|
|
||||||
pa.array(["foo", "bar"])
|
|
||||||
],
|
|
||||||
["vector", "text"],
|
|
||||||
)
|
|
||||||
tbl = db.create_table("f16_tbl", data, schema=schema)
|
|
||||||
```
|
```
|
||||||
|
|
||||||
=== "Typescript[^1]"
|
=== "Typescript[^1]"
|
||||||
@@ -250,24 +266,21 @@ can be configured with the vector dimensions. It is also important to note that
|
|||||||
LanceDB only understands subclasses of `lancedb.pydantic.LanceModel`
|
LanceDB only understands subclasses of `lancedb.pydantic.LanceModel`
|
||||||
(which itself derives from `pydantic.BaseModel`).
|
(which itself derives from `pydantic.BaseModel`).
|
||||||
|
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from lancedb.pydantic import Vector, LanceModel
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb-pydantic"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pyarrow"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:class-Content"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_from_pydantic"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
class Content(LanceModel):
|
```python
|
||||||
movie_id: int
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb-pydantic"
|
||||||
vector: Vector(128)
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pyarrow"
|
||||||
genres: str
|
--8<-- "python/python/tests/docs/test_guide_tables.py:class-Content"
|
||||||
title: str
|
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_async_from_pydantic"
|
||||||
imdb_id: int
|
|
||||||
|
|
||||||
@property
|
|
||||||
def imdb_url(self) -> str:
|
|
||||||
return f"https://www.imdb.com/title/tt{self.imdb_id}"
|
|
||||||
|
|
||||||
import pyarrow as pa
|
|
||||||
db = lancedb.connect("~/.lancedb")
|
|
||||||
table_name = "movielens_small"
|
|
||||||
table = db.create_table(table_name, schema=Content)
|
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Nested schemas
|
#### Nested schemas
|
||||||
@@ -277,22 +290,24 @@ For example, you may want to store the document string
|
|||||||
and the document source name as a nested Document object:
|
and the document source name as a nested Document object:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
class Document(BaseModel):
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pydantic-basemodel"
|
||||||
content: str
|
--8<-- "python/python/tests/docs/test_guide_tables.py:class-Document"
|
||||||
source: str
|
|
||||||
```
|
```
|
||||||
|
|
||||||
This can be used as the type of a LanceDB table column:
|
This can be used as the type of a LanceDB table column:
|
||||||
|
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
class NestedSchema(LanceModel):
|
--8<-- "python/python/tests/docs/test_guide_tables.py:class-NestedSchema"
|
||||||
id: str
|
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_nested_schema"
|
||||||
vector: Vector(1536)
|
|
||||||
document: Document
|
|
||||||
|
|
||||||
tbl = db.create_table("nested_table", schema=NestedSchema, mode="overwrite")
|
|
||||||
```
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:class-NestedSchema"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_async_nested_schema"
|
||||||
|
```
|
||||||
This creates a struct column called "document" that has two subfields
|
This creates a struct column called "document" that has two subfields
|
||||||
called "content" and "source":
|
called "content" and "source":
|
||||||
|
|
||||||
@@ -356,28 +371,19 @@ LanceDB additionally supports PyArrow's `RecordBatch` Iterators or other generat
|
|||||||
|
|
||||||
Here's an example using using `RecordBatch` iterator for creating tables.
|
Here's an example using using `RecordBatch` iterator for creating tables.
|
||||||
|
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import pyarrow as pa
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pyarrow"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:make_batches"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_from_batch"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
def make_batches():
|
```python
|
||||||
for i in range(5):
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pyarrow"
|
||||||
yield pa.RecordBatch.from_arrays(
|
--8<-- "python/python/tests/docs/test_guide_tables.py:make_batches"
|
||||||
[
|
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_async_from_batch"
|
||||||
pa.array([[3.1, 4.1, 5.1, 6.1], [5.9, 26.5, 4.7, 32.8]],
|
|
||||||
pa.list_(pa.float32(), 4)),
|
|
||||||
pa.array(["foo", "bar"]),
|
|
||||||
pa.array([10.0, 20.0]),
|
|
||||||
],
|
|
||||||
["vector", "item", "price"],
|
|
||||||
)
|
|
||||||
|
|
||||||
schema = pa.schema([
|
|
||||||
pa.field("vector", pa.list_(pa.float32(), 4)),
|
|
||||||
pa.field("item", pa.utf8()),
|
|
||||||
pa.field("price", pa.float32()),
|
|
||||||
])
|
|
||||||
|
|
||||||
db.create_table("batched_tale", make_batches(), schema=schema)
|
|
||||||
```
|
```
|
||||||
|
|
||||||
You can also use iterators of other types like Pandas DataFrame or Pylists directly in the above example.
|
You can also use iterators of other types like Pandas DataFrame or Pylists directly in the above example.
|
||||||
@@ -387,14 +393,28 @@ You can also use iterators of other types like Pandas DataFrame or Pylists direc
|
|||||||
=== "Python"
|
=== "Python"
|
||||||
If you forget the name of your table, you can always get a listing of all table names.
|
If you forget the name of your table, you can always get a listing of all table names.
|
||||||
|
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
print(db.table_names())
|
--8<-- "python/python/tests/docs/test_guide_tables.py:list_tables"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:list_tables_async"
|
||||||
```
|
```
|
||||||
|
|
||||||
Then, you can open any existing tables.
|
Then, you can open any existing tables.
|
||||||
|
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
tbl = db.open_table("my_table")
|
--8<-- "python/python/tests/docs/test_guide_tables.py:open_table"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:open_table_async"
|
||||||
```
|
```
|
||||||
|
|
||||||
=== "Typescript[^1]"
|
=== "Typescript[^1]"
|
||||||
@@ -418,34 +438,40 @@ You can create an empty table for scenarios where you want to add data to the ta
|
|||||||
|
|
||||||
|
|
||||||
An empty table can be initialized via a PyArrow schema.
|
An empty table can be initialized via a PyArrow schema.
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import lancedb
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb"
|
||||||
import pyarrow as pa
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pyarrow"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:create_empty_table"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
schema = pa.schema(
|
```python
|
||||||
[
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb"
|
||||||
pa.field("vector", pa.list_(pa.float32(), 2)),
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pyarrow"
|
||||||
pa.field("item", pa.string()),
|
--8<-- "python/python/tests/docs/test_guide_tables.py:create_empty_table_async"
|
||||||
pa.field("price", pa.float32()),
|
|
||||||
])
|
|
||||||
tbl = db.create_table("empty_table_add", schema=schema)
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Alternatively, you can also use Pydantic to specify the schema for the empty table. Note that we do not
|
Alternatively, you can also use Pydantic to specify the schema for the empty table. Note that we do not
|
||||||
directly import `pydantic` but instead use `lancedb.pydantic` which is a subclass of `pydantic.BaseModel`
|
directly import `pydantic` but instead use `lancedb.pydantic` which is a subclass of `pydantic.BaseModel`
|
||||||
that has been extended to support LanceDB specific types like `Vector`.
|
that has been extended to support LanceDB specific types like `Vector`.
|
||||||
|
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import lancedb
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb"
|
||||||
from lancedb.pydantic import LanceModel, vector
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb-pydantic"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:class-Item"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:create_empty_table_pydantic"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
class Item(LanceModel):
|
```python
|
||||||
vector: Vector(2)
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb"
|
||||||
item: str
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb-pydantic"
|
||||||
price: float
|
--8<-- "python/python/tests/docs/test_guide_tables.py:class-Item"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:create_empty_table_async_pydantic"
|
||||||
tbl = db.create_table("empty_table_add", schema=Item.to_arrow_schema())
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Once the empty table has been created, you can add data to it via the various methods listed in the [Adding to a table](#adding-to-a-table) section.
|
Once the empty table has been created, you can add data to it via the various methods listed in the [Adding to a table](#adding-to-a-table) section.
|
||||||
@@ -473,85 +499,95 @@ After a table has been created, you can always add more data to it using the `ad
|
|||||||
|
|
||||||
### Add a Pandas DataFrame
|
### Add a Pandas DataFrame
|
||||||
|
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
df = pd.DataFrame({
|
--8<-- "python/python/tests/docs/test_guide_tables.py:add_table_from_pandas"
|
||||||
"vector": [[1.3, 1.4], [9.5, 56.2]], "item": ["banana", "apple"], "price": [5.0, 7.0]
|
```
|
||||||
})
|
=== "Async API"
|
||||||
tbl.add(df)
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:add_table_async_from_pandas"
|
||||||
```
|
```
|
||||||
|
|
||||||
### Add a Polars DataFrame
|
### Add a Polars DataFrame
|
||||||
|
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
df = pl.DataFrame({
|
--8<-- "python/python/tests/docs/test_guide_tables.py:add_table_from_polars"
|
||||||
"vector": [[1.3, 1.4], [9.5, 56.2]], "item": ["banana", "apple"], "price": [5.0, 7.0]
|
```
|
||||||
})
|
=== "Async API"
|
||||||
tbl.add(df)
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:add_table_async_from_polars"
|
||||||
```
|
```
|
||||||
|
|
||||||
### Add an Iterator
|
### Add an Iterator
|
||||||
|
|
||||||
You can also add a large dataset batch in one go using Iterator of any supported data types.
|
You can also add a large dataset batch in one go using Iterator of any supported data types.
|
||||||
|
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def make_batches():
|
--8<-- "python/python/tests/docs/test_guide_tables.py:make_batches_for_add"
|
||||||
for i in range(5):
|
--8<-- "python/python/tests/docs/test_guide_tables.py:add_table_from_batch"
|
||||||
yield [
|
```
|
||||||
{"vector": [3.1, 4.1], "item": "peach", "price": 6.0},
|
=== "Async API"
|
||||||
{"vector": [5.9, 26.5], "item": "pear", "price": 5.0}
|
|
||||||
]
|
```python
|
||||||
tbl.add(make_batches())
|
--8<-- "python/python/tests/docs/test_guide_tables.py:make_batches_for_add"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:add_table_async_from_batch"
|
||||||
```
|
```
|
||||||
|
|
||||||
### Add a PyArrow table
|
### Add a PyArrow table
|
||||||
|
|
||||||
If you have data coming in as a PyArrow table, you can add it directly to the LanceDB table.
|
If you have data coming in as a PyArrow table, you can add it directly to the LanceDB table.
|
||||||
|
|
||||||
```python
|
=== "Sync API"
|
||||||
pa_table = pa.Table.from_arrays(
|
|
||||||
[
|
|
||||||
pa.array([[9.1, 6.7], [9.9, 31.2]],
|
|
||||||
pa.list_(pa.float32(), 2)),
|
|
||||||
pa.array(["mango", "orange"]),
|
|
||||||
pa.array([7.0, 4.0]),
|
|
||||||
],
|
|
||||||
["vector", "item", "price"],
|
|
||||||
)
|
|
||||||
|
|
||||||
tbl.add(pa_table)
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:add_table_from_pyarrow"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:add_table_async_from_pyarrow"
|
||||||
```
|
```
|
||||||
|
|
||||||
### Add a Pydantic Model
|
### Add a Pydantic Model
|
||||||
|
|
||||||
Assuming that a table has been created with the correct schema as shown [above](#creating-empty-table), you can add data items that are valid Pydantic models to the table.
|
Assuming that a table has been created with the correct schema as shown [above](#creating-empty-table), you can add data items that are valid Pydantic models to the table.
|
||||||
|
|
||||||
```python
|
=== "Sync API"
|
||||||
pydantic_model_items = [
|
|
||||||
Item(vector=[8.1, 4.7], item="pineapple", price=10.0),
|
|
||||||
Item(vector=[6.9, 9.3], item="avocado", price=9.0)
|
|
||||||
]
|
|
||||||
|
|
||||||
tbl.add(pydantic_model_items)
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:add_table_from_pydantic"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:add_table_async_from_pydantic"
|
||||||
```
|
```
|
||||||
|
|
||||||
??? "Ingesting Pydantic models with LanceDB embedding API"
|
??? "Ingesting Pydantic models with LanceDB embedding API"
|
||||||
When using LanceDB's embedding API, you can add Pydantic models directly to the table. LanceDB will automatically convert the `vector` field to a vector before adding it to the table. You need to specify the default value of `vector` field as None to allow LanceDB to automatically vectorize the data.
|
When using LanceDB's embedding API, you can add Pydantic models directly to the table. LanceDB will automatically convert the `vector` field to a vector before adding it to the table. You need to specify the default value of `vector` field as None to allow LanceDB to automatically vectorize the data.
|
||||||
|
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import lancedb
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb"
|
||||||
from lancedb.pydantic import LanceModel, Vector
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb-pydantic"
|
||||||
from lancedb.embeddings import get_registry
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-embeddings"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_with_embedding"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
db = lancedb.connect("~/tmp")
|
```python
|
||||||
embed_fcn = get_registry().get("huggingface").create(name="BAAI/bge-small-en-v1.5")
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb-pydantic"
|
||||||
class Schema(LanceModel):
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-embeddings"
|
||||||
text: str = embed_fcn.SourceField()
|
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_async_with_embedding"
|
||||||
vector: Vector(embed_fcn.ndims()) = embed_fcn.VectorField(default=None)
|
|
||||||
|
|
||||||
tbl = db.create_table("my_table", schema=Schema, mode="overwrite")
|
|
||||||
models = [Schema(text="hello"), Schema(text="world")]
|
|
||||||
tbl.add(models)
|
|
||||||
```
|
```
|
||||||
|
|
||||||
=== "Typescript[^1]"
|
=== "Typescript[^1]"
|
||||||
@@ -565,49 +601,78 @@ After a table has been created, you can always add more data to it using the `ad
|
|||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Upserting into a table
|
||||||
|
|
||||||
|
Upserting lets you insert new rows or update existing rows in a table. To upsert
|
||||||
|
in LanceDB, use the merge insert API.
|
||||||
|
|
||||||
|
=== "Python"
|
||||||
|
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_merge_insert.py:upsert_basic"
|
||||||
|
```
|
||||||
|
**API Reference**: [lancedb.table.Table.merge_insert][]
|
||||||
|
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_merge_insert.py:upsert_basic_async"
|
||||||
|
```
|
||||||
|
**API Reference**: [lancedb.table.AsyncTable.merge_insert][]
|
||||||
|
|
||||||
|
=== "Typescript[^1]"
|
||||||
|
|
||||||
|
=== "@lancedb/lancedb"
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
--8<-- "nodejs/examples/merge_insert.test.ts:upsert_basic"
|
||||||
|
```
|
||||||
|
**API Reference**: [lancedb.Table.mergeInsert](../js/classes/Table.md/#mergeInsert)
|
||||||
|
|
||||||
|
Read more in the guide on [merge insert](tables/merge_insert.md).
|
||||||
|
|
||||||
## Deleting from a table
|
## Deleting from a table
|
||||||
|
|
||||||
Use the `delete()` method on tables to delete rows from a table. To choose which rows to delete, provide a filter that matches on the metadata columns. This can delete any number of rows that match the filter.
|
Use the `delete()` method on tables to delete rows from a table. To choose which rows to delete, provide a filter that matches on the metadata columns. This can delete any number of rows that match the filter.
|
||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
tbl.delete('item = "fizz"')
|
--8<-- "python/python/tests/docs/test_guide_tables.py:delete_row"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:delete_row_async"
|
||||||
```
|
```
|
||||||
|
|
||||||
### Deleting row with specific column value
|
### Deleting row with specific column value
|
||||||
|
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import lancedb
|
--8<-- "python/python/tests/docs/test_guide_tables.py:delete_specific_row"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
data = [{"x": 1, "vector": [1, 2]},
|
```python
|
||||||
{"x": 2, "vector": [3, 4]},
|
--8<-- "python/python/tests/docs/test_guide_tables.py:delete_specific_row_async"
|
||||||
{"x": 3, "vector": [5, 6]}]
|
|
||||||
db = lancedb.connect("./.lancedb")
|
|
||||||
table = db.create_table("my_table", data)
|
|
||||||
table.to_pandas()
|
|
||||||
# x vector
|
|
||||||
# 0 1 [1.0, 2.0]
|
|
||||||
# 1 2 [3.0, 4.0]
|
|
||||||
# 2 3 [5.0, 6.0]
|
|
||||||
|
|
||||||
table.delete("x = 2")
|
|
||||||
table.to_pandas()
|
|
||||||
# x vector
|
|
||||||
# 0 1 [1.0, 2.0]
|
|
||||||
# 1 3 [5.0, 6.0]
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Delete from a list of values
|
### Delete from a list of values
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
to_remove = [1, 5]
|
--8<-- "python/python/tests/docs/test_guide_tables.py:delete_list_values"
|
||||||
to_remove = ", ".join(str(v) for v in to_remove)
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
table.delete(f"x IN ({to_remove})")
|
```python
|
||||||
table.to_pandas()
|
--8<-- "python/python/tests/docs/test_guide_tables.py:delete_list_values_async"
|
||||||
# x vector
|
|
||||||
# 0 3 [5.0, 6.0]
|
|
||||||
```
|
```
|
||||||
|
|
||||||
=== "Typescript[^1]"
|
=== "Typescript[^1]"
|
||||||
@@ -659,26 +724,19 @@ This can be used to update zero to all rows depending on how many rows match the
|
|||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
API Reference: [lancedb.table.Table.update][]
|
API Reference: [lancedb.table.Table.update][]
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import lancedb
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb"
|
||||||
import pandas as pd
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pandas"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:update_table"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
# Create a lancedb connection
|
```python
|
||||||
db = lancedb.connect("./.lancedb")
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pandas"
|
||||||
# Create a table from a pandas DataFrame
|
--8<-- "python/python/tests/docs/test_guide_tables.py:update_table_async"
|
||||||
data = pd.DataFrame({"x": [1, 2, 3], "vector": [[1, 2], [3, 4], [5, 6]]})
|
|
||||||
table = db.create_table("my_table", data)
|
|
||||||
|
|
||||||
# Update the table where x = 2
|
|
||||||
table.update(where="x = 2", values={"vector": [10, 10]})
|
|
||||||
|
|
||||||
# Get the updated table as a pandas DataFrame
|
|
||||||
df = table.to_pandas()
|
|
||||||
|
|
||||||
# Print the DataFrame
|
|
||||||
print(df)
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Output
|
Output
|
||||||
@@ -734,12 +792,15 @@ This can be used to update zero to all rows depending on how many rows match the
|
|||||||
The `values` parameter is used to provide the new values for the columns as literal values. You can also use the `values_sql` / `valuesSql` parameter to provide SQL expressions for the new values. For example, you can use `values_sql="x + 1"` to increment the value of the `x` column by 1.
|
The `values` parameter is used to provide the new values for the columns as literal values. You can also use the `values_sql` / `valuesSql` parameter to provide SQL expressions for the new values. For example, you can use `values_sql="x + 1"` to increment the value of the `x` column by 1.
|
||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
# Update the table where x = 2
|
--8<-- "python/python/tests/docs/test_guide_tables.py:update_table_sql"
|
||||||
table.update(valuesSql={"x": "x + 1"})
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
print(table.to_pandas())
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:update_table_sql_async"
|
||||||
```
|
```
|
||||||
|
|
||||||
Output
|
Output
|
||||||
@@ -771,9 +832,14 @@ This can be used to update zero to all rows depending on how many rows match the
|
|||||||
Use the `drop_table()` method on the database to remove a table.
|
Use the `drop_table()` method on the database to remove a table.
|
||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
--8<-- "python/python/tests/docs/test_basic.py:drop_table"
|
--8<-- "python/python/tests/docs/test_basic.py:drop_table"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
--8<-- "python/python/tests/docs/test_basic.py:drop_table_async"
|
--8<-- "python/python/tests/docs/test_basic.py:drop_table_async"
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -809,9 +875,16 @@ data type for it.
|
|||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
--8<-- "python/python/tests/docs/test_basic.py:add_columns"
|
--8<-- "python/python/tests/docs/test_basic.py:add_columns"
|
||||||
```
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_basic.py:add_columns_async"
|
||||||
|
```
|
||||||
**API Reference:** [lancedb.table.Table.add_columns][]
|
**API Reference:** [lancedb.table.Table.add_columns][]
|
||||||
|
|
||||||
=== "Typescript"
|
=== "Typescript"
|
||||||
@@ -848,10 +921,18 @@ rewriting the column, which can be a heavy operation.
|
|||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import pyarrow as pa
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pyarrow"
|
||||||
--8<-- "python/python/tests/docs/test_basic.py:alter_columns"
|
--8<-- "python/python/tests/docs/test_basic.py:alter_columns"
|
||||||
```
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pyarrow"
|
||||||
|
--8<-- "python/python/tests/docs/test_basic.py:alter_columns_async"
|
||||||
|
```
|
||||||
**API Reference:** [lancedb.table.Table.alter_columns][]
|
**API Reference:** [lancedb.table.Table.alter_columns][]
|
||||||
|
|
||||||
=== "Typescript"
|
=== "Typescript"
|
||||||
@@ -872,9 +953,16 @@ will remove the column from the schema.
|
|||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
--8<-- "python/python/tests/docs/test_basic.py:drop_columns"
|
--8<-- "python/python/tests/docs/test_basic.py:drop_columns"
|
||||||
```
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_basic.py:drop_columns_async"
|
||||||
|
```
|
||||||
**API Reference:** [lancedb.table.Table.drop_columns][]
|
**API Reference:** [lancedb.table.Table.drop_columns][]
|
||||||
|
|
||||||
=== "Typescript"
|
=== "Typescript"
|
||||||
@@ -925,30 +1013,45 @@ There are three possible settings for `read_consistency_interval`:
|
|||||||
|
|
||||||
To set strong consistency, use `timedelta(0)`:
|
To set strong consistency, use `timedelta(0)`:
|
||||||
|
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from datetime import timedelta
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-datetime"
|
||||||
db = lancedb.connect("./.lancedb",. read_consistency_interval=timedelta(0))
|
--8<-- "python/python/tests/docs/test_guide_tables.py:table_strong_consistency"
|
||||||
table = db.open_table("my_table")
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-datetime"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:table_async_strong_consistency"
|
||||||
```
|
```
|
||||||
|
|
||||||
For eventual consistency, use a custom `timedelta`:
|
For eventual consistency, use a custom `timedelta`:
|
||||||
|
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from datetime import timedelta
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-datetime"
|
||||||
db = lancedb.connect("./.lancedb", read_consistency_interval=timedelta(seconds=5))
|
--8<-- "python/python/tests/docs/test_guide_tables.py:table_eventual_consistency"
|
||||||
table = db.open_table("my_table")
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:import-datetime"
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:table_async_eventual_consistency"
|
||||||
```
|
```
|
||||||
|
|
||||||
By default, a `Table` will never check for updates from other writers. To manually check for updates you can use `checkout_latest`:
|
By default, a `Table` will never check for updates from other writers. To manually check for updates you can use `checkout_latest`:
|
||||||
|
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
db = lancedb.connect("./.lancedb")
|
--8<-- "python/python/tests/docs/test_guide_tables.py:table_checkout_latest"
|
||||||
table = db.open_table("my_table")
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
# (Other writes happen to my_table from another process)
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_guide_tables.py:table_async_checkout_latest"
|
||||||
# Check for updates
|
|
||||||
table.checkout_latest()
|
|
||||||
```
|
```
|
||||||
|
|
||||||
=== "Typescript[^1]"
|
=== "Typescript[^1]"
|
||||||
@@ -957,14 +1060,14 @@ There are three possible settings for `read_consistency_interval`:
|
|||||||
|
|
||||||
```ts
|
```ts
|
||||||
const db = await lancedb.connect({ uri: "./.lancedb", readConsistencyInterval: 0 });
|
const db = await lancedb.connect({ uri: "./.lancedb", readConsistencyInterval: 0 });
|
||||||
const table = await db.openTable("my_table");
|
const tbl = await db.openTable("my_table");
|
||||||
```
|
```
|
||||||
|
|
||||||
For eventual consistency, specify the update interval as seconds:
|
For eventual consistency, specify the update interval as seconds:
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
const db = await lancedb.connect({ uri: "./.lancedb", readConsistencyInterval: 5 });
|
const db = await lancedb.connect({ uri: "./.lancedb", readConsistencyInterval: 5 });
|
||||||
const table = await db.openTable("my_table");
|
const tbl = await db.openTable("my_table");
|
||||||
```
|
```
|
||||||
|
|
||||||
<!-- Node doesn't yet support the version time travel: https://github.com/lancedb/lancedb/issues/1007
|
<!-- Node doesn't yet support the version time travel: https://github.com/lancedb/lancedb/issues/1007
|
||||||
|
|||||||
135
docs/src/guides/tables/merge_insert.md
Normal file
135
docs/src/guides/tables/merge_insert.md
Normal file
@@ -0,0 +1,135 @@
|
|||||||
|
The merge insert command is a flexible API that can be used to perform:
|
||||||
|
|
||||||
|
1. Upsert
|
||||||
|
2. Insert-if-not-exists
|
||||||
|
3. Replace range
|
||||||
|
|
||||||
|
It works by joining the input data with the target table on a key you provide.
|
||||||
|
Often this key is a unique row id key. You can then specify what to do when
|
||||||
|
there is a match and when there is not a match. For example, for upsert you want
|
||||||
|
to update if the row has a match and insert if the row doesn't have a match.
|
||||||
|
Whereas for insert-if-not-exists you only want to insert if the row doesn't have
|
||||||
|
a match.
|
||||||
|
|
||||||
|
You can also read more in the API reference:
|
||||||
|
|
||||||
|
* Python
|
||||||
|
* Sync: [lancedb.table.Table.merge_insert][]
|
||||||
|
* Async: [lancedb.table.AsyncTable.merge_insert][]
|
||||||
|
* Typescript: [lancedb.Table.mergeInsert](../../js/classes/Table.md/#mergeinsert)
|
||||||
|
|
||||||
|
!!! tip "Use scalar indices to speed up merge insert"
|
||||||
|
|
||||||
|
The merge insert command needs to perform a join between the input data and the
|
||||||
|
target table on the `on` key you provide. This requires scanning that entire
|
||||||
|
column, which can be expensive for large tables. To speed up this operation,
|
||||||
|
you can create a scalar index on the `on` column, which will allow LanceDB to
|
||||||
|
find matches without having to scan the whole tables.
|
||||||
|
|
||||||
|
Read more about scalar indices in [Building a Scalar Index](../scalar_index.md)
|
||||||
|
guide.
|
||||||
|
|
||||||
|
!!! info "Embedding Functions"
|
||||||
|
|
||||||
|
Like the create table and add APIs, the merge insert API will automatically
|
||||||
|
compute embeddings if the table has a embedding definition in its schema.
|
||||||
|
If the input data doesn't contain the source column, or the vector column
|
||||||
|
is already filled, then the embeddings won't be computed. See the
|
||||||
|
[Embedding Functions](../../embeddings/embedding_functions.md) guide for more
|
||||||
|
information.
|
||||||
|
|
||||||
|
## Upsert
|
||||||
|
|
||||||
|
Upsert updates rows if they exist and inserts them if they don't. To do this
|
||||||
|
with merge insert, enable both `when_matched_update_all()` and
|
||||||
|
`when_not_matched_insert_all()`.
|
||||||
|
|
||||||
|
=== "Python"
|
||||||
|
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_merge_insert.py:upsert_basic"
|
||||||
|
```
|
||||||
|
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_merge_insert.py:upsert_basic_async"
|
||||||
|
```
|
||||||
|
|
||||||
|
=== "Typescript"
|
||||||
|
|
||||||
|
=== "@lancedb/lancedb"
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
--8<-- "nodejs/examples/merge_insert.test.ts:upsert_basic"
|
||||||
|
```
|
||||||
|
|
||||||
|
!!! note "Providing subsets of columns"
|
||||||
|
|
||||||
|
If a column is nullable, it can be omitted from input data and it will be
|
||||||
|
considered `null`. Columns can also be provided in any order.
|
||||||
|
|
||||||
|
## Insert-if-not-exists
|
||||||
|
|
||||||
|
To avoid inserting duplicate rows, you can use the insert-if-not-exists command.
|
||||||
|
This will only insert rows that do not have a match in the target table. To do
|
||||||
|
this with merge insert, enable just `when_not_matched_insert_all()`.
|
||||||
|
|
||||||
|
|
||||||
|
=== "Python"
|
||||||
|
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_merge_insert.py:insert_if_not_exists"
|
||||||
|
```
|
||||||
|
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_merge_insert.py:insert_if_not_exists_async"
|
||||||
|
```
|
||||||
|
|
||||||
|
=== "Typescript"
|
||||||
|
|
||||||
|
=== "@lancedb/lancedb"
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
--8<-- "nodejs/examples/merge_insert.test.ts:insert_if_not_exists"
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Replace range
|
||||||
|
|
||||||
|
You can also replace a range of rows in the target table with the input data.
|
||||||
|
For example, if you have a table of document chunks, where each chunk has
|
||||||
|
both a `doc_id` and a `chunk_id`, you can replace all chunks for a given
|
||||||
|
`doc_id` with updated chunks. This can be tricky otherwise because if you
|
||||||
|
try to use upsert when the new data has fewer chunks you will end up with
|
||||||
|
extra chunks. To avoid this, add another clause to delete any chunks for
|
||||||
|
the document that are not in the new data, with
|
||||||
|
`when_not_matched_by_source_delete`.
|
||||||
|
|
||||||
|
=== "Python"
|
||||||
|
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_merge_insert.py:replace_range"
|
||||||
|
```
|
||||||
|
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_merge_insert.py:replace_range_async"
|
||||||
|
```
|
||||||
|
|
||||||
|
=== "Typescript"
|
||||||
|
|
||||||
|
=== "@lancedb/lancedb"
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
--8<-- "nodejs/examples/merge_insert.test.ts:replace_range"
|
||||||
|
```
|
||||||
@@ -1,8 +1,8 @@
|
|||||||
## Improving retriever performance
|
## Improving retriever performance
|
||||||
|
|
||||||
Try it yourself - <a href="https://colab.research.google.com/github/lancedb/lancedb/blob/main/docs/src/notebooks/lancedb_reranking.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a><br/>
|
Try it yourself: <a href="https://colab.research.google.com/github/lancedb/lancedb/blob/main/docs/src/notebooks/lancedb_reranking.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a><br/>
|
||||||
|
|
||||||
VectorDBs are used as retreivers in recommender or chatbot-based systems for retrieving relevant data based on user queries. For example, retriever is a critical component of Retrieval Augmented Generation (RAG) acrhitectures. In this section, we will discuss how to improve the performance of retrievers.
|
VectorDBs are used as retrievers in recommender or chatbot-based systems for retrieving relevant data based on user queries. For example, retrievers are a critical component of Retrieval Augmented Generation (RAG) acrhitectures. In this section, we will discuss how to improve the performance of retrievers.
|
||||||
|
|
||||||
There are serveral ways to improve the performance of retrievers. Some of the common techniques are:
|
There are serveral ways to improve the performance of retrievers. Some of the common techniques are:
|
||||||
|
|
||||||
@@ -19,7 +19,7 @@ Using different embedding models is something that's very specific to the use ca
|
|||||||
|
|
||||||
|
|
||||||
## The dataset
|
## The dataset
|
||||||
We'll be using a QA dataset generated using a LLama2 review paper. The dataset contains 221 query, context and answer triplets. The queries and answers are generated using GPT-4 based on a given query. Full script used to generate the dataset can be found on this [repo](https://github.com/lancedb/ragged). It can be downloaded from [here](https://github.com/AyushExel/assets/blob/main/data_qa.csv)
|
We'll be using a QA dataset generated using a LLama2 review paper. The dataset contains 221 query, context and answer triplets. The queries and answers are generated using GPT-4 based on a given query. Full script used to generate the dataset can be found on this [repo](https://github.com/lancedb/ragged). It can be downloaded from [here](https://github.com/AyushExel/assets/blob/main/data_qa.csv).
|
||||||
|
|
||||||
### Using different query types
|
### Using different query types
|
||||||
Let's setup the embeddings and the dataset first. We'll use the LanceDB's `huggingface` embeddings integration for this guide.
|
Let's setup the embeddings and the dataset first. We'll use the LanceDB's `huggingface` embeddings integration for this guide.
|
||||||
@@ -45,14 +45,14 @@ table.add(df[["context"]].to_dict(orient="records"))
|
|||||||
queries = df["query"].tolist()
|
queries = df["query"].tolist()
|
||||||
```
|
```
|
||||||
|
|
||||||
Now that we have the dataset and embeddings table set up, here's how you can run different query types on the dataset.
|
Now that we have the dataset and embeddings table set up, here's how you can run different query types on the dataset:
|
||||||
|
|
||||||
* <b> Vector Search: </b>
|
* <b> Vector Search: </b>
|
||||||
|
|
||||||
```python
|
```python
|
||||||
table.search(quries[0], query_type="vector").limit(5).to_pandas()
|
table.search(quries[0], query_type="vector").limit(5).to_pandas()
|
||||||
```
|
```
|
||||||
By default, LanceDB uses vector search query type for searching and it automatically converts the input query to a vector before searching when using embedding API. So, the following statement is equivalent to the above statement.
|
By default, LanceDB uses vector search query type for searching and it automatically converts the input query to a vector before searching when using embedding API. So, the following statement is equivalent to the above statement:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
table.search(quries[0]).limit(5).to_pandas()
|
table.search(quries[0]).limit(5).to_pandas()
|
||||||
@@ -77,7 +77,7 @@ Now that we have the dataset and embeddings table set up, here's how you can run
|
|||||||
|
|
||||||
* <b> Hybrid Search: </b>
|
* <b> Hybrid Search: </b>
|
||||||
|
|
||||||
Hybrid search is a combination of vector and full-text search. Here's how you can run a hybrid search query on the dataset.
|
Hybrid search is a combination of vector and full-text search. Here's how you can run a hybrid search query on the dataset:
|
||||||
```python
|
```python
|
||||||
table.search(quries[0], query_type="hybrid").limit(5).to_pandas()
|
table.search(quries[0], query_type="hybrid").limit(5).to_pandas()
|
||||||
```
|
```
|
||||||
@@ -87,7 +87,7 @@ Now that we have the dataset and embeddings table set up, here's how you can run
|
|||||||
|
|
||||||
!!! note "Note"
|
!!! note "Note"
|
||||||
By default, it uses `LinearCombinationReranker` that combines the scores from vector and full-text search using a weighted linear combination. It is the simplest reranker implementation available in LanceDB. You can also use other rerankers like `CrossEncoderReranker` or `CohereReranker` for reranking the results.
|
By default, it uses `LinearCombinationReranker` that combines the scores from vector and full-text search using a weighted linear combination. It is the simplest reranker implementation available in LanceDB. You can also use other rerankers like `CrossEncoderReranker` or `CohereReranker` for reranking the results.
|
||||||
Learn more about rerankers [here](https://lancedb.github.io/lancedb/reranking/)
|
Learn more about rerankers [here](https://lancedb.github.io/lancedb/reranking/).
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
Continuing from the previous section, we can now rerank the results using more complex rerankers.
|
Continuing from the previous section, we can now rerank the results using more complex rerankers.
|
||||||
|
|
||||||
Try it yourself - <a href="https://colab.research.google.com/github/lancedb/lancedb/blob/main/docs/src/notebooks/lancedb_reranking.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a><br/>
|
Try it yourself: <a href="https://colab.research.google.com/github/lancedb/lancedb/blob/main/docs/src/notebooks/lancedb_reranking.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a><br/>
|
||||||
|
|
||||||
## Reranking search results
|
## Reranking search results
|
||||||
You can rerank any search results using a reranker. The syntax for reranking is as follows:
|
You can rerank any search results using a reranker. The syntax for reranking is as follows:
|
||||||
@@ -62,9 +62,6 @@ Let us take a look at the same datasets from the previous sections, using the sa
|
|||||||
| Reranked fts | 0.672 |
|
| Reranked fts | 0.672 |
|
||||||
| Hybrid | 0.759 |
|
| Hybrid | 0.759 |
|
||||||
|
|
||||||
### SQuAD Dataset
|
|
||||||
|
|
||||||
|
|
||||||
### Uber10K sec filing Dataset
|
### Uber10K sec filing Dataset
|
||||||
|
|
||||||
| Query Type | Hit-rate@5 |
|
| Query Type | Hit-rate@5 |
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
## Finetuning the Embedding Model
|
## Finetuning the Embedding Model
|
||||||
Try it yourself - <a href="https://colab.research.google.com/github/lancedb/lancedb/blob/main/docs/src/notebooks/embedding_tuner.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a><br/>
|
Try it yourself: <a href="https://colab.research.google.com/github/lancedb/lancedb/blob/main/docs/src/notebooks/embedding_tuner.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a><br/>
|
||||||
|
|
||||||
Another way to improve retriever performance is to fine-tune the embedding model itself. Fine-tuning the embedding model can help in learning better representations for the documents and queries in the dataset. This can be particularly useful when the dataset is very different from the pre-trained data used to train the embedding model.
|
Another way to improve retriever performance is to fine-tune the embedding model itself. Fine-tuning the embedding model can help in learning better representations for the documents and queries in the dataset. This can be particularly useful when the dataset is very different from the pre-trained data used to train the embedding model.
|
||||||
|
|
||||||
@@ -16,7 +16,7 @@ validation_df.to_csv("data_val.csv", index=False)
|
|||||||
You can use any tuning API to fine-tune embedding models. In this example, we'll utilise Llama-index as it also comes with utilities for synthetic data generation and training the model.
|
You can use any tuning API to fine-tune embedding models. In this example, we'll utilise Llama-index as it also comes with utilities for synthetic data generation and training the model.
|
||||||
|
|
||||||
|
|
||||||
Then parse the dataset as llama-index text nodes and generate synthetic QA pairs from each node.
|
We parse the dataset as llama-index text nodes and generate synthetic QA pairs from each node:
|
||||||
```python
|
```python
|
||||||
from llama_index.core.node_parser import SentenceSplitter
|
from llama_index.core.node_parser import SentenceSplitter
|
||||||
from llama_index.readers.file import PagedCSVReader
|
from llama_index.readers.file import PagedCSVReader
|
||||||
@@ -43,7 +43,7 @@ val_dataset = generate_qa_embedding_pairs(
|
|||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
Now we'll use `SentenceTransformersFinetuneEngine` engine to fine-tune the model. You can also use `sentence-transformers` or `transformers` library to fine-tune the model.
|
Now we'll use `SentenceTransformersFinetuneEngine` engine to fine-tune the model. You can also use `sentence-transformers` or `transformers` library to fine-tune the model:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from llama_index.finetuning import SentenceTransformersFinetuneEngine
|
from llama_index.finetuning import SentenceTransformersFinetuneEngine
|
||||||
@@ -57,7 +57,7 @@ finetune_engine = SentenceTransformersFinetuneEngine(
|
|||||||
finetune_engine.finetune()
|
finetune_engine.finetune()
|
||||||
embed_model = finetune_engine.get_finetuned_model()
|
embed_model = finetune_engine.get_finetuned_model()
|
||||||
```
|
```
|
||||||
This saves the fine tuned embedding model in `tuned_model` folder. This al
|
This saves the fine tuned embedding model in `tuned_model` folder.
|
||||||
|
|
||||||
# Evaluation results
|
# Evaluation results
|
||||||
In order to eval the retriever, you can either use this model to ingest the data into LanceDB directly or llama-index's LanceDB integration to create a `VectorStoreIndex` and use it as a retriever.
|
In order to eval the retriever, you can either use this model to ingest the data into LanceDB directly or llama-index's LanceDB integration to create a `VectorStoreIndex` and use it as a retriever.
|
||||||
|
|||||||
@@ -3,22 +3,22 @@
|
|||||||
Hybrid Search is a broad (often misused) term. It can mean anything from combining multiple methods for searching, to applying ranking methods to better sort the results. In this blog, we use the definition of "hybrid search" to mean using a combination of keyword-based and vector search.
|
Hybrid Search is a broad (often misused) term. It can mean anything from combining multiple methods for searching, to applying ranking methods to better sort the results. In this blog, we use the definition of "hybrid search" to mean using a combination of keyword-based and vector search.
|
||||||
|
|
||||||
## The challenge of (re)ranking search results
|
## The challenge of (re)ranking search results
|
||||||
Once you have a group of the most relevant search results from multiple search sources, you'd likely standardize the score and rank them accordingly. This process can also be seen as another independent step - reranking.
|
Once you have a group of the most relevant search results from multiple search sources, you'd likely standardize the score and rank them accordingly. This process can also be seen as another independent step: reranking.
|
||||||
There are two approaches for reranking search results from multiple sources.
|
There are two approaches for reranking search results from multiple sources.
|
||||||
|
|
||||||
* <b>Score-based</b>: Calculate final relevance scores based on a weighted linear combination of individual search algorithm scores. Example - Weighted linear combination of semantic search & keyword-based search results.
|
* <b>Score-based</b>: Calculate final relevance scores based on a weighted linear combination of individual search algorithm scores. Example: Weighted linear combination of semantic search & keyword-based search results.
|
||||||
|
|
||||||
* <b>Relevance-based</b>: Discards the existing scores and calculates the relevance of each search result - query pair. Example - Cross Encoder models
|
* <b>Relevance-based</b>: Discards the existing scores and calculates the relevance of each search result-query pair. Example: Cross Encoder models
|
||||||
|
|
||||||
Even though there are many strategies for reranking search results, none works for all cases. Moreover, evaluating them itself is a challenge. Also, reranking can be dataset, application specific so it's hard to generalize.
|
Even though there are many strategies for reranking search results, none works for all cases. Moreover, evaluating them itself is a challenge. Also, reranking can be dataset or application specific so it's hard to generalize.
|
||||||
|
|
||||||
### Example evaluation of hybrid search with Reranking
|
### Example evaluation of hybrid search with Reranking
|
||||||
|
|
||||||
Here's some evaluation numbers from experiment comparing these re-rankers on about 800 queries. It is modified version of an evaluation script from [llama-index](https://github.com/run-llama/finetune-embedding/blob/main/evaluate.ipynb) that measures hit-rate at top-k.
|
Here's some evaluation numbers from an experiment comparing these rerankers on about 800 queries. It is modified version of an evaluation script from [llama-index](https://github.com/run-llama/finetune-embedding/blob/main/evaluate.ipynb) that measures hit-rate at top-k.
|
||||||
|
|
||||||
<b> With OpenAI ada2 embedding </b>
|
<b> With OpenAI ada2 embedding </b>
|
||||||
|
|
||||||
Vector Search baseline - `0.64`
|
Vector Search baseline: `0.64`
|
||||||
|
|
||||||
| Reranker | Top-3 | Top-5 | Top-10 |
|
| Reranker | Top-3 | Top-5 | Top-10 |
|
||||||
| --- | --- | --- | --- |
|
| --- | --- | --- | --- |
|
||||||
@@ -33,7 +33,7 @@ Vector Search baseline - `0.64`
|
|||||||
|
|
||||||
<b> With OpenAI embedding-v3-small </b>
|
<b> With OpenAI embedding-v3-small </b>
|
||||||
|
|
||||||
Vector Search baseline - `0.59`
|
Vector Search baseline: `0.59`
|
||||||
|
|
||||||
| Reranker | Top-3 | Top-5 | Top-10 |
|
| Reranker | Top-3 | Top-5 | Top-10 |
|
||||||
| --- | --- | --- | --- |
|
| --- | --- | --- | --- |
|
||||||
|
|||||||
@@ -5,56 +5,45 @@ LanceDB supports both semantic and keyword-based search (also termed full-text s
|
|||||||
## Hybrid search in LanceDB
|
## Hybrid search in LanceDB
|
||||||
You can perform hybrid search in LanceDB by combining the results of semantic and full-text search via a reranking algorithm of your choice. LanceDB provides multiple rerankers out of the box. However, you can always write a custom reranker if your use case need more sophisticated logic .
|
You can perform hybrid search in LanceDB by combining the results of semantic and full-text search via a reranking algorithm of your choice. LanceDB provides multiple rerankers out of the box. However, you can always write a custom reranker if your use case need more sophisticated logic .
|
||||||
|
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import os
|
--8<-- "python/python/tests/docs/test_search.py:import-os"
|
||||||
|
--8<-- "python/python/tests/docs/test_search.py:import-openai"
|
||||||
import lancedb
|
--8<-- "python/python/tests/docs/test_search.py:import-lancedb"
|
||||||
import openai
|
--8<-- "python/python/tests/docs/test_search.py:import-embeddings"
|
||||||
from lancedb.embeddings import get_registry
|
--8<-- "python/python/tests/docs/test_search.py:import-pydantic"
|
||||||
from lancedb.pydantic import LanceModel, Vector
|
--8<-- "python/python/tests/docs/test_search.py:import-lancedb-fts"
|
||||||
|
--8<-- "python/python/tests/docs/test_search.py:import-openai-embeddings"
|
||||||
db = lancedb.connect("~/.lancedb")
|
--8<-- "python/python/tests/docs/test_search.py:class-Documents"
|
||||||
|
--8<-- "python/python/tests/docs/test_search.py:basic_hybrid_search"
|
||||||
# Ingest embedding function in LanceDB table
|
|
||||||
# Configuring the environment variable OPENAI_API_KEY
|
|
||||||
if "OPENAI_API_KEY" not in os.environ:
|
|
||||||
# OR set the key here as a variable
|
|
||||||
openai.api_key = "sk-..."
|
|
||||||
embeddings = get_registry().get("openai").create()
|
|
||||||
|
|
||||||
class Documents(LanceModel):
|
|
||||||
vector: Vector(embeddings.ndims()) = embeddings.VectorField()
|
|
||||||
text: str = embeddings.SourceField()
|
|
||||||
|
|
||||||
table = db.create_table("documents", schema=Documents)
|
|
||||||
|
|
||||||
data = [
|
|
||||||
{ "text": "rebel spaceships striking from a hidden base"},
|
|
||||||
{ "text": "have won their first victory against the evil Galactic Empire"},
|
|
||||||
{ "text": "during the battle rebel spies managed to steal secret plans"},
|
|
||||||
{ "text": "to the Empire's ultimate weapon the Death Star"}
|
|
||||||
]
|
|
||||||
|
|
||||||
# ingest docs with auto-vectorization
|
|
||||||
table.add(data)
|
|
||||||
|
|
||||||
# Create a fts index before the hybrid search
|
|
||||||
table.create_fts_index("text")
|
|
||||||
# hybrid search with default re-ranker
|
|
||||||
results = table.search("flower moon", query_type="hybrid").to_pandas()
|
|
||||||
```
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_search.py:import-os"
|
||||||
|
--8<-- "python/python/tests/docs/test_search.py:import-openai"
|
||||||
|
--8<-- "python/python/tests/docs/test_search.py:import-lancedb"
|
||||||
|
--8<-- "python/python/tests/docs/test_search.py:import-embeddings"
|
||||||
|
--8<-- "python/python/tests/docs/test_search.py:import-pydantic"
|
||||||
|
--8<-- "python/python/tests/docs/test_search.py:import-lancedb-fts"
|
||||||
|
--8<-- "python/python/tests/docs/test_search.py:import-openai-embeddings"
|
||||||
|
--8<-- "python/python/tests/docs/test_search.py:class-Documents"
|
||||||
|
--8<-- "python/python/tests/docs/test_search.py:basic_hybrid_search_async"
|
||||||
|
```
|
||||||
|
|
||||||
!!! Note
|
!!! Note
|
||||||
You can also pass the vector and text query manually. This is useful if you're not using the embedding API or if you're using a separate embedder service.
|
You can also pass the vector and text query manually. This is useful if you're not using the embedding API or if you're using a separate embedder service.
|
||||||
### Explicitly passing the vector and text query
|
### Explicitly passing the vector and text query
|
||||||
```python
|
=== "Sync API"
|
||||||
vector_query = [0.1, 0.2, 0.3, 0.4, 0.5]
|
|
||||||
text_query = "flower moon"
|
|
||||||
results = table.search(query_type="hybrid")
|
|
||||||
.vector(vector_query)
|
|
||||||
.text(text_query)
|
|
||||||
.limit(5)
|
|
||||||
.to_pandas()
|
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_search.py:hybrid_search_pass_vector_text"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_search.py:hybrid_search_pass_vector_text_async"
|
||||||
```
|
```
|
||||||
|
|
||||||
By default, LanceDB uses `RRFReranker()`, which uses reciprocal rank fusion score, to combine and rerank the results of semantic and full-text search. You can customize the hyperparameters as needed or write your own custom reranker. Here's how you can use any of the available rerankers:
|
By default, LanceDB uses `RRFReranker()`, which uses reciprocal rank fusion score, to combine and rerank the results of semantic and full-text search. You can customize the hyperparameters as needed or write your own custom reranker. Here's how you can use any of the available rerankers:
|
||||||
@@ -68,7 +57,7 @@ By default, LanceDB uses `RRFReranker()`, which uses reciprocal rank fusion scor
|
|||||||
|
|
||||||
|
|
||||||
## Available Rerankers
|
## Available Rerankers
|
||||||
LanceDB provides a number of re-rankers out of the box. You can use any of these re-rankers by passing them to the `rerank()` method.
|
LanceDB provides a number of rerankers out of the box. You can use any of these rerankers by passing them to the `rerank()` method.
|
||||||
Go to [Rerankers](../reranking/index.md) to learn more about using the available rerankers and implementing custom rerankers.
|
Go to [Rerankers](../reranking/index.md) to learn more about using the available rerankers and implementing custom rerankers.
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -36,41 +36,8 @@ const results = await table.vectorSearch([0.1, 0.3]).limit(20).toArray();
|
|||||||
console.log(results);
|
console.log(results);
|
||||||
```
|
```
|
||||||
|
|
||||||
The [quickstart](../basic.md) contains a more complete example.
|
The [quickstart](https://lancedb.github.io/lancedb/basic/) contains a more complete example.
|
||||||
|
|
||||||
## Development
|
## Development
|
||||||
|
|
||||||
```sh
|
See [CONTRIBUTING.md](_media/CONTRIBUTING.md) for information on how to contribute to LanceDB.
|
||||||
npm run build
|
|
||||||
npm run test
|
|
||||||
```
|
|
||||||
|
|
||||||
### Running lint / format
|
|
||||||
|
|
||||||
LanceDb uses [biome](https://biomejs.dev/) for linting and formatting. if you are using VSCode you will need to install the official [Biome](https://marketplace.visualstudio.com/items?itemName=biomejs.biome) extension.
|
|
||||||
To manually lint your code you can run:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
npm run lint
|
|
||||||
```
|
|
||||||
|
|
||||||
to automatically fix all fixable issues:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
npm run lint-fix
|
|
||||||
```
|
|
||||||
|
|
||||||
If you do not have your workspace root set to the `nodejs` directory, unfortunately the extension will not work. You can still run the linting and formatting commands manually.
|
|
||||||
|
|
||||||
### Generating docs
|
|
||||||
|
|
||||||
```sh
|
|
||||||
npm run docs
|
|
||||||
|
|
||||||
cd ../docs
|
|
||||||
# Asssume the virtual environment was created
|
|
||||||
# python3 -m venv venv
|
|
||||||
# pip install -r requirements.txt
|
|
||||||
. ./venv/bin/activate
|
|
||||||
mkdocs build
|
|
||||||
```
|
|
||||||
|
|||||||
76
docs/src/js/_media/CONTRIBUTING.md
Normal file
76
docs/src/js/_media/CONTRIBUTING.md
Normal file
@@ -0,0 +1,76 @@
|
|||||||
|
# Contributing to LanceDB Typescript
|
||||||
|
|
||||||
|
This document outlines the process for contributing to LanceDB Typescript.
|
||||||
|
For general contribution guidelines, see [CONTRIBUTING.md](../CONTRIBUTING.md).
|
||||||
|
|
||||||
|
## Project layout
|
||||||
|
|
||||||
|
The Typescript package is a wrapper around the Rust library, `lancedb`. We use
|
||||||
|
the [napi-rs](https://napi.rs/) library to create the bindings between Rust and
|
||||||
|
Typescript.
|
||||||
|
|
||||||
|
* `src/`: Rust bindings source code
|
||||||
|
* `lancedb/`: Typescript package source code
|
||||||
|
* `__test__/`: Unit tests
|
||||||
|
* `examples/`: An npm package with the examples shown in the documentation
|
||||||
|
|
||||||
|
## Development environment
|
||||||
|
|
||||||
|
To set up your development environment, you will need to install the following:
|
||||||
|
|
||||||
|
1. Node.js 14 or later
|
||||||
|
2. Rust's package manager, Cargo. Use [rustup](https://rustup.rs/) to install.
|
||||||
|
3. [protoc](https://grpc.io/docs/protoc-installation/) (Protocol Buffers compiler)
|
||||||
|
|
||||||
|
Initial setup:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
npm install
|
||||||
|
```
|
||||||
|
|
||||||
|
### Commit Hooks
|
||||||
|
|
||||||
|
It is **highly recommended** to install the [pre-commit](https://pre-commit.com/) hooks to ensure that your
|
||||||
|
code is formatted correctly and passes basic checks before committing:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
pre-commit install
|
||||||
|
```
|
||||||
|
|
||||||
|
## Development
|
||||||
|
|
||||||
|
Most common development commands can be run using the npm scripts.
|
||||||
|
|
||||||
|
Build the package
|
||||||
|
|
||||||
|
```shell
|
||||||
|
npm install
|
||||||
|
npm run build
|
||||||
|
```
|
||||||
|
|
||||||
|
Lint:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
npm run lint
|
||||||
|
```
|
||||||
|
|
||||||
|
Format and fix lints:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
npm run lint-fix
|
||||||
|
```
|
||||||
|
|
||||||
|
Run tests:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
npm test
|
||||||
|
```
|
||||||
|
|
||||||
|
To run a single test:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
# Single file: table.test.ts
|
||||||
|
npm test -- table.test.ts
|
||||||
|
# Single test: 'merge insert' in table.test.ts
|
||||||
|
npm test -- table.test.ts --testNamePattern=merge\ insert
|
||||||
|
```
|
||||||
@@ -23,18 +23,6 @@ be closed when they are garbage collected.
|
|||||||
Any created tables are independent and will continue to work even if
|
Any created tables are independent and will continue to work even if
|
||||||
the underlying connection has been closed.
|
the underlying connection has been closed.
|
||||||
|
|
||||||
## Constructors
|
|
||||||
|
|
||||||
### new Connection()
|
|
||||||
|
|
||||||
```ts
|
|
||||||
new Connection(): Connection
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Returns
|
|
||||||
|
|
||||||
[`Connection`](Connection.md)
|
|
||||||
|
|
||||||
## Methods
|
## Methods
|
||||||
|
|
||||||
### close()
|
### close()
|
||||||
@@ -71,7 +59,7 @@ Creates a new empty Table
|
|||||||
* **name**: `string`
|
* **name**: `string`
|
||||||
The name of the table.
|
The name of the table.
|
||||||
|
|
||||||
* **schema**: `SchemaLike`
|
* **schema**: [`SchemaLike`](../type-aliases/SchemaLike.md)
|
||||||
The schema of the table
|
The schema of the table
|
||||||
|
|
||||||
* **options?**: `Partial`<[`CreateTableOptions`](../interfaces/CreateTableOptions.md)>
|
* **options?**: `Partial`<[`CreateTableOptions`](../interfaces/CreateTableOptions.md)>
|
||||||
@@ -117,7 +105,7 @@ Creates a new Table and initialize it with new data.
|
|||||||
* **name**: `string`
|
* **name**: `string`
|
||||||
The name of the table.
|
The name of the table.
|
||||||
|
|
||||||
* **data**: `TableLike` \| `Record`<`string`, `unknown`>[]
|
* **data**: [`TableLike`](../type-aliases/TableLike.md) \| `Record`<`string`, `unknown`>[]
|
||||||
Non-empty Array of Records
|
Non-empty Array of Records
|
||||||
to be inserted into the table
|
to be inserted into the table
|
||||||
|
|
||||||
@@ -143,6 +131,20 @@ Return a brief description of the connection
|
|||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
|
### dropAllTables()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
abstract dropAllTables(): Promise<void>
|
||||||
|
```
|
||||||
|
|
||||||
|
Drop all tables in the database.
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
`Promise`<`void`>
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
### dropTable()
|
### dropTable()
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
@@ -189,7 +191,7 @@ Open a table in the database.
|
|||||||
* **name**: `string`
|
* **name**: `string`
|
||||||
The name of the table
|
The name of the table
|
||||||
|
|
||||||
* **options?**: `Partial`<`OpenTableOptions`>
|
* **options?**: `Partial`<[`OpenTableOptions`](../interfaces/OpenTableOptions.md)>
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
|
|||||||
@@ -72,11 +72,9 @@ The results of a full text search are ordered by relevance measured by BM25.
|
|||||||
|
|
||||||
You can combine filters with full text search.
|
You can combine filters with full text search.
|
||||||
|
|
||||||
For now, the full text search index only supports English, and doesn't support phrase search.
|
|
||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
* **options?**: `Partial`<`FtsOptions`>
|
* **options?**: `Partial`<[`FtsOptions`](../interfaces/FtsOptions.md)>
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -98,7 +96,7 @@ the vectors.
|
|||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
* **options?**: `Partial`<`HnswPqOptions`>
|
* **options?**: `Partial`<[`HnswPqOptions`](../interfaces/HnswPqOptions.md)>
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -120,7 +118,7 @@ the vectors.
|
|||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
* **options?**: `Partial`<`HnswSqOptions`>
|
* **options?**: `Partial`<[`HnswSqOptions`](../interfaces/HnswSqOptions.md)>
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
|
|||||||
126
docs/src/js/classes/MergeInsertBuilder.md
Normal file
126
docs/src/js/classes/MergeInsertBuilder.md
Normal file
@@ -0,0 +1,126 @@
|
|||||||
|
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
[@lancedb/lancedb](../globals.md) / MergeInsertBuilder
|
||||||
|
|
||||||
|
# Class: MergeInsertBuilder
|
||||||
|
|
||||||
|
A builder used to create and run a merge insert operation
|
||||||
|
|
||||||
|
## Constructors
|
||||||
|
|
||||||
|
### new MergeInsertBuilder()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
new MergeInsertBuilder(native, schema): MergeInsertBuilder
|
||||||
|
```
|
||||||
|
|
||||||
|
Construct a MergeInsertBuilder. __Internal use only.__
|
||||||
|
|
||||||
|
#### Parameters
|
||||||
|
|
||||||
|
* **native**: `NativeMergeInsertBuilder`
|
||||||
|
|
||||||
|
* **schema**: `Schema`<`any`> \| `Promise`<`Schema`<`any`>>
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
[`MergeInsertBuilder`](MergeInsertBuilder.md)
|
||||||
|
|
||||||
|
## Methods
|
||||||
|
|
||||||
|
### execute()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
execute(data): Promise<void>
|
||||||
|
```
|
||||||
|
|
||||||
|
Executes the merge insert operation
|
||||||
|
|
||||||
|
Nothing is returned but the `Table` is updated
|
||||||
|
|
||||||
|
#### Parameters
|
||||||
|
|
||||||
|
* **data**: [`Data`](../type-aliases/Data.md)
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
`Promise`<`void`>
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### whenMatchedUpdateAll()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
whenMatchedUpdateAll(options?): MergeInsertBuilder
|
||||||
|
```
|
||||||
|
|
||||||
|
Rows that exist in both the source table (new data) and
|
||||||
|
the target table (old data) will be updated, replacing
|
||||||
|
the old row with the corresponding matching row.
|
||||||
|
|
||||||
|
If there are multiple matches then the behavior is undefined.
|
||||||
|
Currently this causes multiple copies of the row to be created
|
||||||
|
but that behavior is subject to change.
|
||||||
|
|
||||||
|
An optional condition may be specified. If it is, then only
|
||||||
|
matched rows that satisfy the condtion will be updated. Any
|
||||||
|
rows that do not satisfy the condition will be left as they
|
||||||
|
are. Failing to satisfy the condition does not cause a
|
||||||
|
"matched row" to become a "not matched" row.
|
||||||
|
|
||||||
|
The condition should be an SQL string. Use the prefix
|
||||||
|
target. to refer to rows in the target table (old data)
|
||||||
|
and the prefix source. to refer to rows in the source
|
||||||
|
table (new data).
|
||||||
|
|
||||||
|
For example, "target.last_update < source.last_update"
|
||||||
|
|
||||||
|
#### Parameters
|
||||||
|
|
||||||
|
* **options?**
|
||||||
|
|
||||||
|
* **options.where?**: `string`
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
[`MergeInsertBuilder`](MergeInsertBuilder.md)
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### whenNotMatchedBySourceDelete()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
whenNotMatchedBySourceDelete(options?): MergeInsertBuilder
|
||||||
|
```
|
||||||
|
|
||||||
|
Rows that exist only in the target table (old data) will be
|
||||||
|
deleted. An optional condition can be provided to limit what
|
||||||
|
data is deleted.
|
||||||
|
|
||||||
|
#### Parameters
|
||||||
|
|
||||||
|
* **options?**
|
||||||
|
|
||||||
|
* **options.where?**: `string`
|
||||||
|
An optional condition to limit what data is deleted
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
[`MergeInsertBuilder`](MergeInsertBuilder.md)
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### whenNotMatchedInsertAll()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
whenNotMatchedInsertAll(): MergeInsertBuilder
|
||||||
|
```
|
||||||
|
|
||||||
|
Rows that exist only in the source table (new data) should
|
||||||
|
be inserted into the target table.
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
[`MergeInsertBuilder`](MergeInsertBuilder.md)
|
||||||
@@ -8,30 +8,14 @@
|
|||||||
|
|
||||||
A builder for LanceDB queries.
|
A builder for LanceDB queries.
|
||||||
|
|
||||||
|
## See
|
||||||
|
|
||||||
|
[Table#query](Table.md#query), [Table#search](Table.md#search)
|
||||||
|
|
||||||
## Extends
|
## Extends
|
||||||
|
|
||||||
- [`QueryBase`](QueryBase.md)<`NativeQuery`>
|
- [`QueryBase`](QueryBase.md)<`NativeQuery`>
|
||||||
|
|
||||||
## Constructors
|
|
||||||
|
|
||||||
### new Query()
|
|
||||||
|
|
||||||
```ts
|
|
||||||
new Query(tbl): Query
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Parameters
|
|
||||||
|
|
||||||
* **tbl**: `Table`
|
|
||||||
|
|
||||||
#### Returns
|
|
||||||
|
|
||||||
[`Query`](Query.md)
|
|
||||||
|
|
||||||
#### Overrides
|
|
||||||
|
|
||||||
[`QueryBase`](QueryBase.md).[`constructor`](QueryBase.md#constructors)
|
|
||||||
|
|
||||||
## Properties
|
## Properties
|
||||||
|
|
||||||
### inner
|
### inner
|
||||||
@@ -46,42 +30,6 @@ protected inner: Query | Promise<Query>;
|
|||||||
|
|
||||||
## Methods
|
## Methods
|
||||||
|
|
||||||
### \[asyncIterator\]()
|
|
||||||
|
|
||||||
```ts
|
|
||||||
asyncIterator: AsyncIterator<RecordBatch<any>, any, undefined>
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Returns
|
|
||||||
|
|
||||||
`AsyncIterator`<`RecordBatch`<`any`>, `any`, `undefined`>
|
|
||||||
|
|
||||||
#### Inherited from
|
|
||||||
|
|
||||||
[`QueryBase`](QueryBase.md).[`[asyncIterator]`](QueryBase.md#%5Basynciterator%5D)
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
### doCall()
|
|
||||||
|
|
||||||
```ts
|
|
||||||
protected doCall(fn): void
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Parameters
|
|
||||||
|
|
||||||
* **fn**
|
|
||||||
|
|
||||||
#### Returns
|
|
||||||
|
|
||||||
`void`
|
|
||||||
|
|
||||||
#### Inherited from
|
|
||||||
|
|
||||||
[`QueryBase`](QueryBase.md).[`doCall`](QueryBase.md#docall)
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
### execute()
|
### execute()
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
@@ -92,7 +40,7 @@ Execute the query and return the results as an
|
|||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
* **options?**: `Partial`<`QueryExecutionOptions`>
|
* **options?**: `Partial`<[`QueryExecutionOptions`](../interfaces/QueryExecutionOptions.md)>
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -161,7 +109,7 @@ fastSearch(): this
|
|||||||
Skip searching un-indexed data. This can make search faster, but will miss
|
Skip searching un-indexed data. This can make search faster, but will miss
|
||||||
any data that is not yet indexed.
|
any data that is not yet indexed.
|
||||||
|
|
||||||
Use lancedb.Table#optimize to index all un-indexed data.
|
Use [Table#optimize](Table.md#optimize) to index all un-indexed data.
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -189,7 +137,7 @@ A filter statement to be applied to this query.
|
|||||||
|
|
||||||
`this`
|
`this`
|
||||||
|
|
||||||
#### Alias
|
#### See
|
||||||
|
|
||||||
where
|
where
|
||||||
|
|
||||||
@@ -213,7 +161,7 @@ fullTextSearch(query, options?): this
|
|||||||
|
|
||||||
* **query**: `string`
|
* **query**: `string`
|
||||||
|
|
||||||
* **options?**: `Partial`<`FullTextSearchOptions`>
|
* **options?**: `Partial`<[`FullTextSearchOptions`](../interfaces/FullTextSearchOptions.md)>
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -250,26 +198,6 @@ called then every valid row from the table will be returned.
|
|||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
### nativeExecute()
|
|
||||||
|
|
||||||
```ts
|
|
||||||
protected nativeExecute(options?): Promise<RecordBatchIterator>
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Parameters
|
|
||||||
|
|
||||||
* **options?**: `Partial`<`QueryExecutionOptions`>
|
|
||||||
|
|
||||||
#### Returns
|
|
||||||
|
|
||||||
`Promise`<`RecordBatchIterator`>
|
|
||||||
|
|
||||||
#### Inherited from
|
|
||||||
|
|
||||||
[`QueryBase`](QueryBase.md).[`nativeExecute`](QueryBase.md#nativeexecute)
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
### nearestTo()
|
### nearestTo()
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
@@ -294,7 +222,7 @@ If there is more than one vector column you must use
|
|||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
* **vector**: `IntoVector`
|
* **vector**: [`IntoVector`](../type-aliases/IntoVector.md)
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -427,7 +355,7 @@ Collect the results as an array of objects.
|
|||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
* **options?**: `Partial`<`QueryExecutionOptions`>
|
* **options?**: `Partial`<[`QueryExecutionOptions`](../interfaces/QueryExecutionOptions.md)>
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -449,7 +377,7 @@ Collect the results as an Arrow
|
|||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
* **options?**: `Partial`<`QueryExecutionOptions`>
|
* **options?**: `Partial`<[`QueryExecutionOptions`](../interfaces/QueryExecutionOptions.md)>
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
|
|||||||
@@ -8,6 +8,11 @@
|
|||||||
|
|
||||||
Common methods supported by all query types
|
Common methods supported by all query types
|
||||||
|
|
||||||
|
## See
|
||||||
|
|
||||||
|
- [Query](Query.md)
|
||||||
|
- [VectorQuery](VectorQuery.md)
|
||||||
|
|
||||||
## Extended by
|
## Extended by
|
||||||
|
|
||||||
- [`Query`](Query.md)
|
- [`Query`](Query.md)
|
||||||
@@ -21,22 +26,6 @@ Common methods supported by all query types
|
|||||||
|
|
||||||
- `AsyncIterable`<`RecordBatch`>
|
- `AsyncIterable`<`RecordBatch`>
|
||||||
|
|
||||||
## Constructors
|
|
||||||
|
|
||||||
### new QueryBase()
|
|
||||||
|
|
||||||
```ts
|
|
||||||
protected new QueryBase<NativeQueryType>(inner): QueryBase<NativeQueryType>
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Parameters
|
|
||||||
|
|
||||||
* **inner**: `NativeQueryType` \| `Promise`<`NativeQueryType`>
|
|
||||||
|
|
||||||
#### Returns
|
|
||||||
|
|
||||||
[`QueryBase`](QueryBase.md)<`NativeQueryType`>
|
|
||||||
|
|
||||||
## Properties
|
## Properties
|
||||||
|
|
||||||
### inner
|
### inner
|
||||||
@@ -47,38 +36,6 @@ protected inner: NativeQueryType | Promise<NativeQueryType>;
|
|||||||
|
|
||||||
## Methods
|
## Methods
|
||||||
|
|
||||||
### \[asyncIterator\]()
|
|
||||||
|
|
||||||
```ts
|
|
||||||
asyncIterator: AsyncIterator<RecordBatch<any>, any, undefined>
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Returns
|
|
||||||
|
|
||||||
`AsyncIterator`<`RecordBatch`<`any`>, `any`, `undefined`>
|
|
||||||
|
|
||||||
#### Implementation of
|
|
||||||
|
|
||||||
`AsyncIterable.[asyncIterator]`
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
### doCall()
|
|
||||||
|
|
||||||
```ts
|
|
||||||
protected doCall(fn): void
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Parameters
|
|
||||||
|
|
||||||
* **fn**
|
|
||||||
|
|
||||||
#### Returns
|
|
||||||
|
|
||||||
`void`
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
### execute()
|
### execute()
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
@@ -89,7 +46,7 @@ Execute the query and return the results as an
|
|||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
* **options?**: `Partial`<`QueryExecutionOptions`>
|
* **options?**: `Partial`<[`QueryExecutionOptions`](../interfaces/QueryExecutionOptions.md)>
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -150,7 +107,7 @@ fastSearch(): this
|
|||||||
Skip searching un-indexed data. This can make search faster, but will miss
|
Skip searching un-indexed data. This can make search faster, but will miss
|
||||||
any data that is not yet indexed.
|
any data that is not yet indexed.
|
||||||
|
|
||||||
Use lancedb.Table#optimize to index all un-indexed data.
|
Use [Table#optimize](Table.md#optimize) to index all un-indexed data.
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -174,7 +131,7 @@ A filter statement to be applied to this query.
|
|||||||
|
|
||||||
`this`
|
`this`
|
||||||
|
|
||||||
#### Alias
|
#### See
|
||||||
|
|
||||||
where
|
where
|
||||||
|
|
||||||
@@ -194,7 +151,7 @@ fullTextSearch(query, options?): this
|
|||||||
|
|
||||||
* **query**: `string`
|
* **query**: `string`
|
||||||
|
|
||||||
* **options?**: `Partial`<`FullTextSearchOptions`>
|
* **options?**: `Partial`<[`FullTextSearchOptions`](../interfaces/FullTextSearchOptions.md)>
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -223,22 +180,6 @@ called then every valid row from the table will be returned.
|
|||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
### nativeExecute()
|
|
||||||
|
|
||||||
```ts
|
|
||||||
protected nativeExecute(options?): Promise<RecordBatchIterator>
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Parameters
|
|
||||||
|
|
||||||
* **options?**: `Partial`<`QueryExecutionOptions`>
|
|
||||||
|
|
||||||
#### Returns
|
|
||||||
|
|
||||||
`Promise`<`RecordBatchIterator`>
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
### offset()
|
### offset()
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
@@ -314,7 +255,7 @@ Collect the results as an array of objects.
|
|||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
* **options?**: `Partial`<`QueryExecutionOptions`>
|
* **options?**: `Partial`<[`QueryExecutionOptions`](../interfaces/QueryExecutionOptions.md)>
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -332,7 +273,7 @@ Collect the results as an Arrow
|
|||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
* **options?**: `Partial`<`QueryExecutionOptions`>
|
* **options?**: `Partial`<[`QueryExecutionOptions`](../interfaces/QueryExecutionOptions.md)>
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
|
|||||||
@@ -14,21 +14,13 @@ will be freed when the Table is garbage collected. To eagerly free the cache yo
|
|||||||
can call the `close` method. Once the Table is closed, it cannot be used for any
|
can call the `close` method. Once the Table is closed, it cannot be used for any
|
||||||
further operations.
|
further operations.
|
||||||
|
|
||||||
|
Tables are created using the methods [Connection#createTable](Connection.md#createtable)
|
||||||
|
and [Connection#createEmptyTable](Connection.md#createemptytable). Existing tables are opened
|
||||||
|
using [Connection#openTable](Connection.md#opentable).
|
||||||
|
|
||||||
Closing a table is optional. It not closed, it will be closed when it is garbage
|
Closing a table is optional. It not closed, it will be closed when it is garbage
|
||||||
collected.
|
collected.
|
||||||
|
|
||||||
## Constructors
|
|
||||||
|
|
||||||
### new Table()
|
|
||||||
|
|
||||||
```ts
|
|
||||||
new Table(): Table
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Returns
|
|
||||||
|
|
||||||
[`Table`](Table.md)
|
|
||||||
|
|
||||||
## Accessors
|
## Accessors
|
||||||
|
|
||||||
### name
|
### name
|
||||||
@@ -216,6 +208,9 @@ Indices on vector columns will speed up vector searches.
|
|||||||
Indices on scalar columns will speed up filtering (in both
|
Indices on scalar columns will speed up filtering (in both
|
||||||
vector and non-vector searches)
|
vector and non-vector searches)
|
||||||
|
|
||||||
|
We currently don't support custom named indexes.
|
||||||
|
The index name will always be `${column}_idx`.
|
||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
* **column**: `string`
|
* **column**: `string`
|
||||||
@@ -226,11 +221,6 @@ vector and non-vector searches)
|
|||||||
|
|
||||||
`Promise`<`void`>
|
`Promise`<`void`>
|
||||||
|
|
||||||
#### Note
|
|
||||||
|
|
||||||
We currently don't support custom named indexes,
|
|
||||||
The index name will always be `${column}_idx`
|
|
||||||
|
|
||||||
#### Examples
|
#### Examples
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
@@ -317,6 +307,28 @@ then call ``cleanup_files`` to remove the old files.
|
|||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
|
### dropIndex()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
abstract dropIndex(name): Promise<void>
|
||||||
|
```
|
||||||
|
|
||||||
|
Drop an index from the table.
|
||||||
|
|
||||||
|
#### Parameters
|
||||||
|
|
||||||
|
* **name**: `string`
|
||||||
|
The name of the index.
|
||||||
|
This does not delete the index from disk, it just removes it from the table.
|
||||||
|
To delete the index, run [Table#optimize](Table.md#optimize) after dropping the index.
|
||||||
|
Use [Table.listIndices](Table.md#listindices) to find the names of the indices.
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
`Promise`<`void`>
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
### indexStats()
|
### indexStats()
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
@@ -336,6 +348,8 @@ List all the stats of a specified index
|
|||||||
|
|
||||||
The stats of the index. If the index does not exist, it will return undefined
|
The stats of the index. If the index does not exist, it will return undefined
|
||||||
|
|
||||||
|
Use [Table.listIndices](Table.md#listindices) to find the names of the indices.
|
||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
### isOpen()
|
### isOpen()
|
||||||
@@ -376,7 +390,7 @@ List all the versions of the table
|
|||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
`Promise`<`Version`[]>
|
`Promise`<[`Version`](../interfaces/Version.md)[]>
|
||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
@@ -392,7 +406,7 @@ abstract mergeInsert(on): MergeInsertBuilder
|
|||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
`MergeInsertBuilder`
|
[`MergeInsertBuilder`](MergeInsertBuilder.md)
|
||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
@@ -436,7 +450,7 @@ Modeled after ``VACUUM`` in PostgreSQL.
|
|||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
`Promise`<`OptimizeStats`>
|
`Promise`<[`OptimizeStats`](../interfaces/OptimizeStats.md)>
|
||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
@@ -553,7 +567,7 @@ Get the schema of the table.
|
|||||||
abstract search(
|
abstract search(
|
||||||
query,
|
query,
|
||||||
queryType?,
|
queryType?,
|
||||||
ftsColumns?): VectorQuery | Query
|
ftsColumns?): Query | VectorQuery
|
||||||
```
|
```
|
||||||
|
|
||||||
Create a search query to find the nearest neighbors
|
Create a search query to find the nearest neighbors
|
||||||
@@ -561,7 +575,7 @@ of the given query
|
|||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
* **query**: `string` \| `IntoVector`
|
* **query**: `string` \| [`IntoVector`](../type-aliases/IntoVector.md)
|
||||||
the query, a vector or string
|
the query, a vector or string
|
||||||
|
|
||||||
* **queryType?**: `string`
|
* **queryType?**: `string`
|
||||||
@@ -575,7 +589,7 @@ of the given query
|
|||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
[`VectorQuery`](VectorQuery.md) \| [`Query`](Query.md)
|
[`Query`](Query.md) \| [`VectorQuery`](VectorQuery.md)
|
||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
@@ -694,7 +708,7 @@ by `query`.
|
|||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
* **vector**: `IntoVector`
|
* **vector**: [`IntoVector`](../type-aliases/IntoVector.md)
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -717,38 +731,3 @@ Retrieve the version of the table
|
|||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
`Promise`<`number`>
|
`Promise`<`number`>
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
### parseTableData()
|
|
||||||
|
|
||||||
```ts
|
|
||||||
static parseTableData(
|
|
||||||
data,
|
|
||||||
options?,
|
|
||||||
streaming?): Promise<object>
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Parameters
|
|
||||||
|
|
||||||
* **data**: `TableLike` \| `Record`<`string`, `unknown`>[]
|
|
||||||
|
|
||||||
* **options?**: `Partial`<[`CreateTableOptions`](../interfaces/CreateTableOptions.md)>
|
|
||||||
|
|
||||||
* **streaming?**: `boolean` = `false`
|
|
||||||
|
|
||||||
#### Returns
|
|
||||||
|
|
||||||
`Promise`<`object`>
|
|
||||||
|
|
||||||
##### buf
|
|
||||||
|
|
||||||
```ts
|
|
||||||
buf: Buffer;
|
|
||||||
```
|
|
||||||
|
|
||||||
##### mode
|
|
||||||
|
|
||||||
```ts
|
|
||||||
mode: string;
|
|
||||||
```
|
|
||||||
|
|||||||
@@ -10,30 +10,14 @@ A builder used to construct a vector search
|
|||||||
|
|
||||||
This builder can be reused to execute the query many times.
|
This builder can be reused to execute the query many times.
|
||||||
|
|
||||||
|
## See
|
||||||
|
|
||||||
|
[Query#nearestTo](Query.md#nearestto)
|
||||||
|
|
||||||
## Extends
|
## Extends
|
||||||
|
|
||||||
- [`QueryBase`](QueryBase.md)<`NativeVectorQuery`>
|
- [`QueryBase`](QueryBase.md)<`NativeVectorQuery`>
|
||||||
|
|
||||||
## Constructors
|
|
||||||
|
|
||||||
### new VectorQuery()
|
|
||||||
|
|
||||||
```ts
|
|
||||||
new VectorQuery(inner): VectorQuery
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Parameters
|
|
||||||
|
|
||||||
* **inner**: `VectorQuery` \| `Promise`<`VectorQuery`>
|
|
||||||
|
|
||||||
#### Returns
|
|
||||||
|
|
||||||
[`VectorQuery`](VectorQuery.md)
|
|
||||||
|
|
||||||
#### Overrides
|
|
||||||
|
|
||||||
[`QueryBase`](QueryBase.md).[`constructor`](QueryBase.md#constructors)
|
|
||||||
|
|
||||||
## Properties
|
## Properties
|
||||||
|
|
||||||
### inner
|
### inner
|
||||||
@@ -48,22 +32,6 @@ protected inner: VectorQuery | Promise<VectorQuery>;
|
|||||||
|
|
||||||
## Methods
|
## Methods
|
||||||
|
|
||||||
### \[asyncIterator\]()
|
|
||||||
|
|
||||||
```ts
|
|
||||||
asyncIterator: AsyncIterator<RecordBatch<any>, any, undefined>
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Returns
|
|
||||||
|
|
||||||
`AsyncIterator`<`RecordBatch`<`any`>, `any`, `undefined`>
|
|
||||||
|
|
||||||
#### Inherited from
|
|
||||||
|
|
||||||
[`QueryBase`](QueryBase.md).[`[asyncIterator]`](QueryBase.md#%5Basynciterator%5D)
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
### addQueryVector()
|
### addQueryVector()
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
@@ -72,7 +40,7 @@ addQueryVector(vector): VectorQuery
|
|||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
* **vector**: `IntoVector`
|
* **vector**: [`IntoVector`](../type-aliases/IntoVector.md)
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -128,6 +96,24 @@ whose data type is a fixed-size-list of floats.
|
|||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
|
### distanceRange()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
distanceRange(lowerBound?, upperBound?): VectorQuery
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Parameters
|
||||||
|
|
||||||
|
* **lowerBound?**: `number`
|
||||||
|
|
||||||
|
* **upperBound?**: `number`
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
[`VectorQuery`](VectorQuery.md)
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
### distanceType()
|
### distanceType()
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
@@ -161,26 +147,6 @@ By default "l2" is used.
|
|||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
### doCall()
|
|
||||||
|
|
||||||
```ts
|
|
||||||
protected doCall(fn): void
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Parameters
|
|
||||||
|
|
||||||
* **fn**
|
|
||||||
|
|
||||||
#### Returns
|
|
||||||
|
|
||||||
`void`
|
|
||||||
|
|
||||||
#### Inherited from
|
|
||||||
|
|
||||||
[`QueryBase`](QueryBase.md).[`doCall`](QueryBase.md#docall)
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
### ef()
|
### ef()
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
@@ -215,7 +181,7 @@ Execute the query and return the results as an
|
|||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
* **options?**: `Partial`<`QueryExecutionOptions`>
|
* **options?**: `Partial`<[`QueryExecutionOptions`](../interfaces/QueryExecutionOptions.md)>
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -284,7 +250,7 @@ fastSearch(): this
|
|||||||
Skip searching un-indexed data. This can make search faster, but will miss
|
Skip searching un-indexed data. This can make search faster, but will miss
|
||||||
any data that is not yet indexed.
|
any data that is not yet indexed.
|
||||||
|
|
||||||
Use lancedb.Table#optimize to index all un-indexed data.
|
Use [Table#optimize](Table.md#optimize) to index all un-indexed data.
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -312,7 +278,7 @@ A filter statement to be applied to this query.
|
|||||||
|
|
||||||
`this`
|
`this`
|
||||||
|
|
||||||
#### Alias
|
#### See
|
||||||
|
|
||||||
where
|
where
|
||||||
|
|
||||||
@@ -336,7 +302,7 @@ fullTextSearch(query, options?): this
|
|||||||
|
|
||||||
* **query**: `string`
|
* **query**: `string`
|
||||||
|
|
||||||
* **options?**: `Partial`<`FullTextSearchOptions`>
|
* **options?**: `Partial`<[`FullTextSearchOptions`](../interfaces/FullTextSearchOptions.md)>
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -373,26 +339,6 @@ called then every valid row from the table will be returned.
|
|||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
### nativeExecute()
|
|
||||||
|
|
||||||
```ts
|
|
||||||
protected nativeExecute(options?): Promise<RecordBatchIterator>
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Parameters
|
|
||||||
|
|
||||||
* **options?**: `Partial`<`QueryExecutionOptions`>
|
|
||||||
|
|
||||||
#### Returns
|
|
||||||
|
|
||||||
`Promise`<`RecordBatchIterator`>
|
|
||||||
|
|
||||||
#### Inherited from
|
|
||||||
|
|
||||||
[`QueryBase`](QueryBase.md).[`nativeExecute`](QueryBase.md#nativeexecute)
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
### nprobes()
|
### nprobes()
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
@@ -528,6 +474,22 @@ distance between the query vector and the actual uncompressed vector.
|
|||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
|
### rerank()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
rerank(reranker): VectorQuery
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Parameters
|
||||||
|
|
||||||
|
* **reranker**: [`Reranker`](../namespaces/rerankers/interfaces/Reranker.md)
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
[`VectorQuery`](VectorQuery.md)
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
### select()
|
### select()
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
@@ -591,7 +553,7 @@ Collect the results as an array of objects.
|
|||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
* **options?**: `Partial`<`QueryExecutionOptions`>
|
* **options?**: `Partial`<[`QueryExecutionOptions`](../interfaces/QueryExecutionOptions.md)>
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -613,7 +575,7 @@ Collect the results as an Arrow
|
|||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
* **options?**: `Partial`<`QueryExecutionOptions`>
|
* **options?**: `Partial`<[`QueryExecutionOptions`](../interfaces/QueryExecutionOptions.md)>
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
|
|||||||
@@ -1,33 +0,0 @@
|
|||||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
[@lancedb/lancedb](../globals.md) / WriteMode
|
|
||||||
|
|
||||||
# Enumeration: WriteMode
|
|
||||||
|
|
||||||
Write mode for writing a table.
|
|
||||||
|
|
||||||
## Enumeration Members
|
|
||||||
|
|
||||||
### Append
|
|
||||||
|
|
||||||
```ts
|
|
||||||
Append: "Append";
|
|
||||||
```
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
### Create
|
|
||||||
|
|
||||||
```ts
|
|
||||||
Create: "Create";
|
|
||||||
```
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
### Overwrite
|
|
||||||
|
|
||||||
```ts
|
|
||||||
Overwrite: "Overwrite";
|
|
||||||
```
|
|
||||||
@@ -6,10 +6,10 @@
|
|||||||
|
|
||||||
# Function: connect()
|
# Function: connect()
|
||||||
|
|
||||||
## connect(uri, opts)
|
## connect(uri, options)
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
function connect(uri, opts?): Promise<Connection>
|
function connect(uri, options?): Promise<Connection>
|
||||||
```
|
```
|
||||||
|
|
||||||
Connect to a LanceDB instance at the given URI.
|
Connect to a LanceDB instance at the given URI.
|
||||||
@@ -26,7 +26,8 @@ Accepted formats:
|
|||||||
The uri of the database. If the database uri starts
|
The uri of the database. If the database uri starts
|
||||||
with `db://` then it connects to a remote database.
|
with `db://` then it connects to a remote database.
|
||||||
|
|
||||||
* **opts?**: `Partial`<[`ConnectionOptions`](../interfaces/ConnectionOptions.md)>
|
* **options?**: `Partial`<[`ConnectionOptions`](../interfaces/ConnectionOptions.md)>
|
||||||
|
The options to use when connecting to the database
|
||||||
|
|
||||||
### Returns
|
### Returns
|
||||||
|
|
||||||
@@ -49,10 +50,10 @@ const conn = await connect(
|
|||||||
});
|
});
|
||||||
```
|
```
|
||||||
|
|
||||||
## connect(opts)
|
## connect(options)
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
function connect(opts): Promise<Connection>
|
function connect(options): Promise<Connection>
|
||||||
```
|
```
|
||||||
|
|
||||||
Connect to a LanceDB instance at the given URI.
|
Connect to a LanceDB instance at the given URI.
|
||||||
@@ -65,7 +66,8 @@ Accepted formats:
|
|||||||
|
|
||||||
### Parameters
|
### Parameters
|
||||||
|
|
||||||
* **opts**: `Partial`<[`ConnectionOptions`](../interfaces/ConnectionOptions.md)> & `object`
|
* **options**: `Partial`<[`ConnectionOptions`](../interfaces/ConnectionOptions.md)> & `object`
|
||||||
|
The options to use when connecting to the database
|
||||||
|
|
||||||
### Returns
|
### Returns
|
||||||
|
|
||||||
|
|||||||
@@ -22,8 +22,6 @@ when creating a table or adding data to it)
|
|||||||
This function converts an array of Record<String, any> (row-major JS objects)
|
This function converts an array of Record<String, any> (row-major JS objects)
|
||||||
to an Arrow Table (a columnar structure)
|
to an Arrow Table (a columnar structure)
|
||||||
|
|
||||||
Note that it currently does not support nulls.
|
|
||||||
|
|
||||||
If a schema is provided then it will be used to determine the resulting array
|
If a schema is provided then it will be used to determine the resulting array
|
||||||
types. Fields will also be reordered to fit the order defined by the schema.
|
types. Fields will also be reordered to fit the order defined by the schema.
|
||||||
|
|
||||||
@@ -31,6 +29,9 @@ If a schema is not provided then the types will be inferred and the field order
|
|||||||
will be controlled by the order of properties in the first record. If a type
|
will be controlled by the order of properties in the first record. If a type
|
||||||
is inferred it will always be nullable.
|
is inferred it will always be nullable.
|
||||||
|
|
||||||
|
If not all fields are found in the data, then a subset of the schema will be
|
||||||
|
returned.
|
||||||
|
|
||||||
If the input is empty then a schema must be provided to create an empty table.
|
If the input is empty then a schema must be provided to create an empty table.
|
||||||
|
|
||||||
When a schema is not specified then data types will be inferred. The inference
|
When a schema is not specified then data types will be inferred. The inference
|
||||||
@@ -38,6 +39,7 @@ rules are as follows:
|
|||||||
|
|
||||||
- boolean => Bool
|
- boolean => Bool
|
||||||
- number => Float64
|
- number => Float64
|
||||||
|
- bigint => Int64
|
||||||
- String => Utf8
|
- String => Utf8
|
||||||
- Buffer => Binary
|
- Buffer => Binary
|
||||||
- Record<String, any> => Struct
|
- Record<String, any> => Struct
|
||||||
@@ -57,6 +59,7 @@ rules are as follows:
|
|||||||
|
|
||||||
## Example
|
## Example
|
||||||
|
|
||||||
|
```ts
|
||||||
import { fromTableToBuffer, makeArrowTable } from "../arrow";
|
import { fromTableToBuffer, makeArrowTable } from "../arrow";
|
||||||
import { Field, FixedSizeList, Float16, Float32, Int32, Schema } from "apache-arrow";
|
import { Field, FixedSizeList, Float16, Float32, Int32, Schema } from "apache-arrow";
|
||||||
|
|
||||||
@@ -78,7 +81,6 @@ The `vectorColumns` option can be used to support other vector column
|
|||||||
names and data types.
|
names and data types.
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
|
|
||||||
const schema = new Schema([
|
const schema = new Schema([
|
||||||
new Field("a", new Float64()),
|
new Field("a", new Float64()),
|
||||||
new Field("b", new Float64()),
|
new Field("b", new Float64()),
|
||||||
@@ -97,8 +99,7 @@ const schema = new Schema([
|
|||||||
|
|
||||||
You can specify the vector column types and names using the options as well
|
You can specify the vector column types and names using the options as well
|
||||||
|
|
||||||
```typescript
|
```ts
|
||||||
|
|
||||||
const schema = new Schema([
|
const schema = new Schema([
|
||||||
new Field('a', new Float64()),
|
new Field('a', new Float64()),
|
||||||
new Field('b', new Float64()),
|
new Field('b', new Float64()),
|
||||||
|
|||||||
@@ -7,16 +7,14 @@
|
|||||||
## Namespaces
|
## Namespaces
|
||||||
|
|
||||||
- [embedding](namespaces/embedding/README.md)
|
- [embedding](namespaces/embedding/README.md)
|
||||||
|
- [rerankers](namespaces/rerankers/README.md)
|
||||||
## Enumerations
|
|
||||||
|
|
||||||
- [WriteMode](enumerations/WriteMode.md)
|
|
||||||
|
|
||||||
## Classes
|
## Classes
|
||||||
|
|
||||||
- [Connection](classes/Connection.md)
|
- [Connection](classes/Connection.md)
|
||||||
- [Index](classes/Index.md)
|
- [Index](classes/Index.md)
|
||||||
- [MakeArrowTableOptions](classes/MakeArrowTableOptions.md)
|
- [MakeArrowTableOptions](classes/MakeArrowTableOptions.md)
|
||||||
|
- [MergeInsertBuilder](classes/MergeInsertBuilder.md)
|
||||||
- [Query](classes/Query.md)
|
- [Query](classes/Query.md)
|
||||||
- [QueryBase](classes/QueryBase.md)
|
- [QueryBase](classes/QueryBase.md)
|
||||||
- [RecordBatchIterator](classes/RecordBatchIterator.md)
|
- [RecordBatchIterator](classes/RecordBatchIterator.md)
|
||||||
@@ -30,23 +28,39 @@
|
|||||||
- [AddDataOptions](interfaces/AddDataOptions.md)
|
- [AddDataOptions](interfaces/AddDataOptions.md)
|
||||||
- [ClientConfig](interfaces/ClientConfig.md)
|
- [ClientConfig](interfaces/ClientConfig.md)
|
||||||
- [ColumnAlteration](interfaces/ColumnAlteration.md)
|
- [ColumnAlteration](interfaces/ColumnAlteration.md)
|
||||||
|
- [CompactionStats](interfaces/CompactionStats.md)
|
||||||
- [ConnectionOptions](interfaces/ConnectionOptions.md)
|
- [ConnectionOptions](interfaces/ConnectionOptions.md)
|
||||||
- [CreateTableOptions](interfaces/CreateTableOptions.md)
|
- [CreateTableOptions](interfaces/CreateTableOptions.md)
|
||||||
- [ExecutableQuery](interfaces/ExecutableQuery.md)
|
- [ExecutableQuery](interfaces/ExecutableQuery.md)
|
||||||
|
- [FtsOptions](interfaces/FtsOptions.md)
|
||||||
|
- [FullTextSearchOptions](interfaces/FullTextSearchOptions.md)
|
||||||
|
- [HnswPqOptions](interfaces/HnswPqOptions.md)
|
||||||
|
- [HnswSqOptions](interfaces/HnswSqOptions.md)
|
||||||
- [IndexConfig](interfaces/IndexConfig.md)
|
- [IndexConfig](interfaces/IndexConfig.md)
|
||||||
- [IndexOptions](interfaces/IndexOptions.md)
|
- [IndexOptions](interfaces/IndexOptions.md)
|
||||||
- [IndexStatistics](interfaces/IndexStatistics.md)
|
- [IndexStatistics](interfaces/IndexStatistics.md)
|
||||||
- [IvfPqOptions](interfaces/IvfPqOptions.md)
|
- [IvfPqOptions](interfaces/IvfPqOptions.md)
|
||||||
|
- [OpenTableOptions](interfaces/OpenTableOptions.md)
|
||||||
- [OptimizeOptions](interfaces/OptimizeOptions.md)
|
- [OptimizeOptions](interfaces/OptimizeOptions.md)
|
||||||
|
- [OptimizeStats](interfaces/OptimizeStats.md)
|
||||||
|
- [QueryExecutionOptions](interfaces/QueryExecutionOptions.md)
|
||||||
|
- [RemovalStats](interfaces/RemovalStats.md)
|
||||||
- [RetryConfig](interfaces/RetryConfig.md)
|
- [RetryConfig](interfaces/RetryConfig.md)
|
||||||
- [TableNamesOptions](interfaces/TableNamesOptions.md)
|
- [TableNamesOptions](interfaces/TableNamesOptions.md)
|
||||||
- [TimeoutConfig](interfaces/TimeoutConfig.md)
|
- [TimeoutConfig](interfaces/TimeoutConfig.md)
|
||||||
- [UpdateOptions](interfaces/UpdateOptions.md)
|
- [UpdateOptions](interfaces/UpdateOptions.md)
|
||||||
- [WriteOptions](interfaces/WriteOptions.md)
|
- [Version](interfaces/Version.md)
|
||||||
|
|
||||||
## Type Aliases
|
## Type Aliases
|
||||||
|
|
||||||
- [Data](type-aliases/Data.md)
|
- [Data](type-aliases/Data.md)
|
||||||
|
- [DataLike](type-aliases/DataLike.md)
|
||||||
|
- [FieldLike](type-aliases/FieldLike.md)
|
||||||
|
- [IntoSql](type-aliases/IntoSql.md)
|
||||||
|
- [IntoVector](type-aliases/IntoVector.md)
|
||||||
|
- [RecordBatchLike](type-aliases/RecordBatchLike.md)
|
||||||
|
- [SchemaLike](type-aliases/SchemaLike.md)
|
||||||
|
- [TableLike](type-aliases/TableLike.md)
|
||||||
|
|
||||||
## Functions
|
## Functions
|
||||||
|
|
||||||
|
|||||||
@@ -8,6 +8,14 @@
|
|||||||
|
|
||||||
## Properties
|
## Properties
|
||||||
|
|
||||||
|
### extraHeaders?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional extraHeaders: Record<string, string>;
|
||||||
|
```
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
### retryConfig?
|
### retryConfig?
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
|
|||||||
49
docs/src/js/interfaces/CompactionStats.md
Normal file
49
docs/src/js/interfaces/CompactionStats.md
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
[@lancedb/lancedb](../globals.md) / CompactionStats
|
||||||
|
|
||||||
|
# Interface: CompactionStats
|
||||||
|
|
||||||
|
Statistics about a compaction operation.
|
||||||
|
|
||||||
|
## Properties
|
||||||
|
|
||||||
|
### filesAdded
|
||||||
|
|
||||||
|
```ts
|
||||||
|
filesAdded: number;
|
||||||
|
```
|
||||||
|
|
||||||
|
The number of new, compacted data files added
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### filesRemoved
|
||||||
|
|
||||||
|
```ts
|
||||||
|
filesRemoved: number;
|
||||||
|
```
|
||||||
|
|
||||||
|
The number of data files removed
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### fragmentsAdded
|
||||||
|
|
||||||
|
```ts
|
||||||
|
fragmentsAdded: number;
|
||||||
|
```
|
||||||
|
|
||||||
|
The number of new, compacted fragments added
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### fragmentsRemoved
|
||||||
|
|
||||||
|
```ts
|
||||||
|
fragmentsRemoved: number;
|
||||||
|
```
|
||||||
|
|
||||||
|
The number of fragments removed
|
||||||
@@ -8,7 +8,7 @@
|
|||||||
|
|
||||||
## Properties
|
## Properties
|
||||||
|
|
||||||
### dataStorageVersion?
|
### ~~dataStorageVersion?~~
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
optional dataStorageVersion: string;
|
optional dataStorageVersion: string;
|
||||||
@@ -19,6 +19,10 @@ The version of the data storage format to use.
|
|||||||
The default is `stable`.
|
The default is `stable`.
|
||||||
Set to "legacy" to use the old format.
|
Set to "legacy" to use the old format.
|
||||||
|
|
||||||
|
#### Deprecated
|
||||||
|
|
||||||
|
Pass `new_table_data_storage_version` to storageOptions instead.
|
||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
### embeddingFunction?
|
### embeddingFunction?
|
||||||
@@ -29,7 +33,7 @@ optional embeddingFunction: EmbeddingFunctionConfig;
|
|||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
### enableV2ManifestPaths?
|
### ~~enableV2ManifestPaths?~~
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
optional enableV2ManifestPaths: boolean;
|
optional enableV2ManifestPaths: boolean;
|
||||||
@@ -41,6 +45,10 @@ turning this on will make the dataset unreadable for older versions
|
|||||||
of LanceDB (prior to 0.10.0). To migrate an existing dataset, instead
|
of LanceDB (prior to 0.10.0). To migrate an existing dataset, instead
|
||||||
use the LocalTable#migrateManifestPathsV2 method.
|
use the LocalTable#migrateManifestPathsV2 method.
|
||||||
|
|
||||||
|
#### Deprecated
|
||||||
|
|
||||||
|
Pass `new_table_enable_v2_manifest_paths` to storageOptions instead.
|
||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
### existOk
|
### existOk
|
||||||
@@ -90,17 +98,3 @@ Options already set on the connection will be inherited by the table,
|
|||||||
but can be overridden here.
|
but can be overridden here.
|
||||||
|
|
||||||
The available options are described at https://lancedb.github.io/lancedb/guides/storage/
|
The available options are described at https://lancedb.github.io/lancedb/guides/storage/
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
### useLegacyFormat?
|
|
||||||
|
|
||||||
```ts
|
|
||||||
optional useLegacyFormat: boolean;
|
|
||||||
```
|
|
||||||
|
|
||||||
If true then data files will be written with the legacy format
|
|
||||||
|
|
||||||
The default is false.
|
|
||||||
|
|
||||||
Deprecated. Use data storage version instead.
|
|
||||||
|
|||||||
103
docs/src/js/interfaces/FtsOptions.md
Normal file
103
docs/src/js/interfaces/FtsOptions.md
Normal file
@@ -0,0 +1,103 @@
|
|||||||
|
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
[@lancedb/lancedb](../globals.md) / FtsOptions
|
||||||
|
|
||||||
|
# Interface: FtsOptions
|
||||||
|
|
||||||
|
Options to create a full text search index
|
||||||
|
|
||||||
|
## Properties
|
||||||
|
|
||||||
|
### asciiFolding?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional asciiFolding: boolean;
|
||||||
|
```
|
||||||
|
|
||||||
|
whether to remove punctuation
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### baseTokenizer?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional baseTokenizer: "raw" | "simple" | "whitespace";
|
||||||
|
```
|
||||||
|
|
||||||
|
The tokenizer to use when building the index.
|
||||||
|
The default is "simple".
|
||||||
|
|
||||||
|
The following tokenizers are available:
|
||||||
|
|
||||||
|
"simple" - Simple tokenizer. This tokenizer splits the text into tokens using whitespace and punctuation as a delimiter.
|
||||||
|
|
||||||
|
"whitespace" - Whitespace tokenizer. This tokenizer splits the text into tokens using whitespace as a delimiter.
|
||||||
|
|
||||||
|
"raw" - Raw tokenizer. This tokenizer does not split the text into tokens and indexes the entire text as a single token.
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### language?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional language: string;
|
||||||
|
```
|
||||||
|
|
||||||
|
language for stemming and stop words
|
||||||
|
this is only used when `stem` or `remove_stop_words` is true
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### lowercase?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional lowercase: boolean;
|
||||||
|
```
|
||||||
|
|
||||||
|
whether to lowercase tokens
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### maxTokenLength?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional maxTokenLength: number;
|
||||||
|
```
|
||||||
|
|
||||||
|
maximum token length
|
||||||
|
tokens longer than this length will be ignored
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### removeStopWords?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional removeStopWords: boolean;
|
||||||
|
```
|
||||||
|
|
||||||
|
whether to remove stop words
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### stem?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional stem: boolean;
|
||||||
|
```
|
||||||
|
|
||||||
|
whether to stem tokens
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### withPosition?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional withPosition: boolean;
|
||||||
|
```
|
||||||
|
|
||||||
|
Whether to build the index with positions.
|
||||||
|
True by default.
|
||||||
|
If set to false, the index will not store the positions of the tokens in the text,
|
||||||
|
which will make the index smaller and faster to build, but will not support phrase queries.
|
||||||
22
docs/src/js/interfaces/FullTextSearchOptions.md
Normal file
22
docs/src/js/interfaces/FullTextSearchOptions.md
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
[@lancedb/lancedb](../globals.md) / FullTextSearchOptions
|
||||||
|
|
||||||
|
# Interface: FullTextSearchOptions
|
||||||
|
|
||||||
|
Options that control the behavior of a full text search
|
||||||
|
|
||||||
|
## Properties
|
||||||
|
|
||||||
|
### columns?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional columns: string | string[];
|
||||||
|
```
|
||||||
|
|
||||||
|
The columns to search
|
||||||
|
|
||||||
|
If not specified, all indexed columns will be searched.
|
||||||
|
For now, only one column can be searched.
|
||||||
149
docs/src/js/interfaces/HnswPqOptions.md
Normal file
149
docs/src/js/interfaces/HnswPqOptions.md
Normal file
@@ -0,0 +1,149 @@
|
|||||||
|
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
[@lancedb/lancedb](../globals.md) / HnswPqOptions
|
||||||
|
|
||||||
|
# Interface: HnswPqOptions
|
||||||
|
|
||||||
|
Options to create an `HNSW_PQ` index
|
||||||
|
|
||||||
|
## Properties
|
||||||
|
|
||||||
|
### distanceType?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional distanceType: "l2" | "cosine" | "dot";
|
||||||
|
```
|
||||||
|
|
||||||
|
The distance metric used to train the index.
|
||||||
|
|
||||||
|
Default value is "l2".
|
||||||
|
|
||||||
|
The following distance types are available:
|
||||||
|
|
||||||
|
"l2" - Euclidean distance. This is a very common distance metric that
|
||||||
|
accounts for both magnitude and direction when determining the distance
|
||||||
|
between vectors. L2 distance has a range of [0, ∞).
|
||||||
|
|
||||||
|
"cosine" - Cosine distance. Cosine distance is a distance metric
|
||||||
|
calculated from the cosine similarity between two vectors. Cosine
|
||||||
|
similarity is a measure of similarity between two non-zero vectors of an
|
||||||
|
inner product space. It is defined to equal the cosine of the angle
|
||||||
|
between them. Unlike L2, the cosine distance is not affected by the
|
||||||
|
magnitude of the vectors. Cosine distance has a range of [0, 2].
|
||||||
|
|
||||||
|
"dot" - Dot product. Dot distance is the dot product of two vectors. Dot
|
||||||
|
distance has a range of (-∞, ∞). If the vectors are normalized (i.e. their
|
||||||
|
L2 norm is 1), then dot distance is equivalent to the cosine distance.
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### efConstruction?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional efConstruction: number;
|
||||||
|
```
|
||||||
|
|
||||||
|
The number of candidates to evaluate during the construction of the HNSW graph.
|
||||||
|
|
||||||
|
The default value is 300.
|
||||||
|
|
||||||
|
This value controls the tradeoff between build speed and accuracy.
|
||||||
|
The higher the value the more accurate the build but the slower it will be.
|
||||||
|
150 to 300 is the typical range. 100 is a minimum for good quality search
|
||||||
|
results. In most cases, there is no benefit to setting this higher than 500.
|
||||||
|
This value should be set to a value that is not less than `ef` in the search phase.
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### m?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional m: number;
|
||||||
|
```
|
||||||
|
|
||||||
|
The number of neighbors to select for each vector in the HNSW graph.
|
||||||
|
|
||||||
|
The default value is 20.
|
||||||
|
|
||||||
|
This value controls the tradeoff between search speed and accuracy.
|
||||||
|
The higher the value the more accurate the search but the slower it will be.
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### maxIterations?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional maxIterations: number;
|
||||||
|
```
|
||||||
|
|
||||||
|
Max iterations to train kmeans.
|
||||||
|
|
||||||
|
The default value is 50.
|
||||||
|
|
||||||
|
When training an IVF index we use kmeans to calculate the partitions. This parameter
|
||||||
|
controls how many iterations of kmeans to run.
|
||||||
|
|
||||||
|
Increasing this might improve the quality of the index but in most cases the parameter
|
||||||
|
is unused because kmeans will converge with fewer iterations. The parameter is only
|
||||||
|
used in cases where kmeans does not appear to converge. In those cases it is unlikely
|
||||||
|
that setting this larger will lead to the index converging anyways.
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### numPartitions?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional numPartitions: number;
|
||||||
|
```
|
||||||
|
|
||||||
|
The number of IVF partitions to create.
|
||||||
|
|
||||||
|
For HNSW, we recommend a small number of partitions. Setting this to 1 works
|
||||||
|
well for most tables. For very large tables, training just one HNSW graph
|
||||||
|
will require too much memory. Each partition becomes its own HNSW graph, so
|
||||||
|
setting this value higher reduces the peak memory use of training.
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### numSubVectors?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional numSubVectors: number;
|
||||||
|
```
|
||||||
|
|
||||||
|
Number of sub-vectors of PQ.
|
||||||
|
|
||||||
|
This value controls how much the vector is compressed during the quantization step.
|
||||||
|
The more sub vectors there are the less the vector is compressed. The default is
|
||||||
|
the dimension of the vector divided by 16. If the dimension is not evenly divisible
|
||||||
|
by 16 we use the dimension divded by 8.
|
||||||
|
|
||||||
|
The above two cases are highly preferred. Having 8 or 16 values per subvector allows
|
||||||
|
us to use efficient SIMD instructions.
|
||||||
|
|
||||||
|
If the dimension is not visible by 8 then we use 1 subvector. This is not ideal and
|
||||||
|
will likely result in poor performance.
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### sampleRate?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional sampleRate: number;
|
||||||
|
```
|
||||||
|
|
||||||
|
The rate used to calculate the number of training vectors for kmeans.
|
||||||
|
|
||||||
|
Default value is 256.
|
||||||
|
|
||||||
|
When an IVF index is trained, we need to calculate partitions. These are groups
|
||||||
|
of vectors that are similar to each other. To do this we use an algorithm called kmeans.
|
||||||
|
|
||||||
|
Running kmeans on a large dataset can be slow. To speed this up we run kmeans on a
|
||||||
|
random sample of the data. This parameter controls the size of the sample. The total
|
||||||
|
number of vectors used to train the index is `sample_rate * num_partitions`.
|
||||||
|
|
||||||
|
Increasing this value might improve the quality of the index but in most cases the
|
||||||
|
default should be sufficient.
|
||||||
128
docs/src/js/interfaces/HnswSqOptions.md
Normal file
128
docs/src/js/interfaces/HnswSqOptions.md
Normal file
@@ -0,0 +1,128 @@
|
|||||||
|
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
[@lancedb/lancedb](../globals.md) / HnswSqOptions
|
||||||
|
|
||||||
|
# Interface: HnswSqOptions
|
||||||
|
|
||||||
|
Options to create an `HNSW_SQ` index
|
||||||
|
|
||||||
|
## Properties
|
||||||
|
|
||||||
|
### distanceType?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional distanceType: "l2" | "cosine" | "dot";
|
||||||
|
```
|
||||||
|
|
||||||
|
The distance metric used to train the index.
|
||||||
|
|
||||||
|
Default value is "l2".
|
||||||
|
|
||||||
|
The following distance types are available:
|
||||||
|
|
||||||
|
"l2" - Euclidean distance. This is a very common distance metric that
|
||||||
|
accounts for both magnitude and direction when determining the distance
|
||||||
|
between vectors. L2 distance has a range of [0, ∞).
|
||||||
|
|
||||||
|
"cosine" - Cosine distance. Cosine distance is a distance metric
|
||||||
|
calculated from the cosine similarity between two vectors. Cosine
|
||||||
|
similarity is a measure of similarity between two non-zero vectors of an
|
||||||
|
inner product space. It is defined to equal the cosine of the angle
|
||||||
|
between them. Unlike L2, the cosine distance is not affected by the
|
||||||
|
magnitude of the vectors. Cosine distance has a range of [0, 2].
|
||||||
|
|
||||||
|
"dot" - Dot product. Dot distance is the dot product of two vectors. Dot
|
||||||
|
distance has a range of (-∞, ∞). If the vectors are normalized (i.e. their
|
||||||
|
L2 norm is 1), then dot distance is equivalent to the cosine distance.
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### efConstruction?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional efConstruction: number;
|
||||||
|
```
|
||||||
|
|
||||||
|
The number of candidates to evaluate during the construction of the HNSW graph.
|
||||||
|
|
||||||
|
The default value is 300.
|
||||||
|
|
||||||
|
This value controls the tradeoff between build speed and accuracy.
|
||||||
|
The higher the value the more accurate the build but the slower it will be.
|
||||||
|
150 to 300 is the typical range. 100 is a minimum for good quality search
|
||||||
|
results. In most cases, there is no benefit to setting this higher than 500.
|
||||||
|
This value should be set to a value that is not less than `ef` in the search phase.
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### m?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional m: number;
|
||||||
|
```
|
||||||
|
|
||||||
|
The number of neighbors to select for each vector in the HNSW graph.
|
||||||
|
|
||||||
|
The default value is 20.
|
||||||
|
|
||||||
|
This value controls the tradeoff between search speed and accuracy.
|
||||||
|
The higher the value the more accurate the search but the slower it will be.
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### maxIterations?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional maxIterations: number;
|
||||||
|
```
|
||||||
|
|
||||||
|
Max iterations to train kmeans.
|
||||||
|
|
||||||
|
The default value is 50.
|
||||||
|
|
||||||
|
When training an IVF index we use kmeans to calculate the partitions. This parameter
|
||||||
|
controls how many iterations of kmeans to run.
|
||||||
|
|
||||||
|
Increasing this might improve the quality of the index but in most cases the parameter
|
||||||
|
is unused because kmeans will converge with fewer iterations. The parameter is only
|
||||||
|
used in cases where kmeans does not appear to converge. In those cases it is unlikely
|
||||||
|
that setting this larger will lead to the index converging anyways.
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### numPartitions?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional numPartitions: number;
|
||||||
|
```
|
||||||
|
|
||||||
|
The number of IVF partitions to create.
|
||||||
|
|
||||||
|
For HNSW, we recommend a small number of partitions. Setting this to 1 works
|
||||||
|
well for most tables. For very large tables, training just one HNSW graph
|
||||||
|
will require too much memory. Each partition becomes its own HNSW graph, so
|
||||||
|
setting this value higher reduces the peak memory use of training.
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### sampleRate?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional sampleRate: number;
|
||||||
|
```
|
||||||
|
|
||||||
|
The rate used to calculate the number of training vectors for kmeans.
|
||||||
|
|
||||||
|
Default value is 256.
|
||||||
|
|
||||||
|
When an IVF index is trained, we need to calculate partitions. These are groups
|
||||||
|
of vectors that are similar to each other. To do this we use an algorithm called kmeans.
|
||||||
|
|
||||||
|
Running kmeans on a large dataset can be slow. To speed this up we run kmeans on a
|
||||||
|
random sample of the data. This parameter controls the size of the sample. The total
|
||||||
|
number of vectors used to train the index is `sample_rate * num_partitions`.
|
||||||
|
|
||||||
|
Increasing this value might improve the quality of the index but in most cases the
|
||||||
|
default should be sufficient.
|
||||||
@@ -68,6 +68,21 @@ The default value is 50.
|
|||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
|
### numBits?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional numBits: number;
|
||||||
|
```
|
||||||
|
|
||||||
|
Number of bits per sub-vector.
|
||||||
|
|
||||||
|
This value controls how much each subvector is compressed. The more bits the more
|
||||||
|
accurate the index will be but the slower search. The default is 8 bits.
|
||||||
|
|
||||||
|
The number of bits must be 4 or 8.
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
### numPartitions?
|
### numPartitions?
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
|
|||||||
40
docs/src/js/interfaces/OpenTableOptions.md
Normal file
40
docs/src/js/interfaces/OpenTableOptions.md
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
[@lancedb/lancedb](../globals.md) / OpenTableOptions
|
||||||
|
|
||||||
|
# Interface: OpenTableOptions
|
||||||
|
|
||||||
|
## Properties
|
||||||
|
|
||||||
|
### indexCacheSize?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional indexCacheSize: number;
|
||||||
|
```
|
||||||
|
|
||||||
|
Set the size of the index cache, specified as a number of entries
|
||||||
|
|
||||||
|
The exact meaning of an "entry" will depend on the type of index:
|
||||||
|
- IVF: there is one entry for each IVF partition
|
||||||
|
- BTREE: there is one entry for the entire index
|
||||||
|
|
||||||
|
This cache applies to the entire opened table, across all indices.
|
||||||
|
Setting this value higher will increase performance on larger datasets
|
||||||
|
at the expense of more RAM
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### storageOptions?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional storageOptions: Record<string, string>;
|
||||||
|
```
|
||||||
|
|
||||||
|
Configuration for object storage.
|
||||||
|
|
||||||
|
Options already set on the connection will be inherited by the table,
|
||||||
|
but can be overridden here.
|
||||||
|
|
||||||
|
The available options are described at https://lancedb.github.io/lancedb/guides/storage/
|
||||||
29
docs/src/js/interfaces/OptimizeStats.md
Normal file
29
docs/src/js/interfaces/OptimizeStats.md
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
[@lancedb/lancedb](../globals.md) / OptimizeStats
|
||||||
|
|
||||||
|
# Interface: OptimizeStats
|
||||||
|
|
||||||
|
Statistics about an optimize operation
|
||||||
|
|
||||||
|
## Properties
|
||||||
|
|
||||||
|
### compaction
|
||||||
|
|
||||||
|
```ts
|
||||||
|
compaction: CompactionStats;
|
||||||
|
```
|
||||||
|
|
||||||
|
Statistics about the compaction operation
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### prune
|
||||||
|
|
||||||
|
```ts
|
||||||
|
prune: RemovalStats;
|
||||||
|
```
|
||||||
|
|
||||||
|
Statistics about the removal operation
|
||||||
22
docs/src/js/interfaces/QueryExecutionOptions.md
Normal file
22
docs/src/js/interfaces/QueryExecutionOptions.md
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
[@lancedb/lancedb](../globals.md) / QueryExecutionOptions
|
||||||
|
|
||||||
|
# Interface: QueryExecutionOptions
|
||||||
|
|
||||||
|
Options that control the behavior of a particular query execution
|
||||||
|
|
||||||
|
## Properties
|
||||||
|
|
||||||
|
### maxBatchLength?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional maxBatchLength: number;
|
||||||
|
```
|
||||||
|
|
||||||
|
The maximum number of rows to return in a single batch
|
||||||
|
|
||||||
|
Batches may have fewer rows if the underlying data is stored
|
||||||
|
in smaller chunks.
|
||||||
29
docs/src/js/interfaces/RemovalStats.md
Normal file
29
docs/src/js/interfaces/RemovalStats.md
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
[@lancedb/lancedb](../globals.md) / RemovalStats
|
||||||
|
|
||||||
|
# Interface: RemovalStats
|
||||||
|
|
||||||
|
Statistics about a cleanup operation
|
||||||
|
|
||||||
|
## Properties
|
||||||
|
|
||||||
|
### bytesRemoved
|
||||||
|
|
||||||
|
```ts
|
||||||
|
bytesRemoved: number;
|
||||||
|
```
|
||||||
|
|
||||||
|
The number of bytes removed
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### oldVersionsRemoved
|
||||||
|
|
||||||
|
```ts
|
||||||
|
oldVersionsRemoved: number;
|
||||||
|
```
|
||||||
|
|
||||||
|
The number of old versions removed
|
||||||
31
docs/src/js/interfaces/Version.md
Normal file
31
docs/src/js/interfaces/Version.md
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
[@lancedb/lancedb](../globals.md) / Version
|
||||||
|
|
||||||
|
# Interface: Version
|
||||||
|
|
||||||
|
## Properties
|
||||||
|
|
||||||
|
### metadata
|
||||||
|
|
||||||
|
```ts
|
||||||
|
metadata: Record<string, string>;
|
||||||
|
```
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### timestamp
|
||||||
|
|
||||||
|
```ts
|
||||||
|
timestamp: Date;
|
||||||
|
```
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### version
|
||||||
|
|
||||||
|
```ts
|
||||||
|
version: number;
|
||||||
|
```
|
||||||
@@ -1,19 +0,0 @@
|
|||||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
[@lancedb/lancedb](../globals.md) / WriteOptions
|
|
||||||
|
|
||||||
# Interface: WriteOptions
|
|
||||||
|
|
||||||
Write options when creating a Table.
|
|
||||||
|
|
||||||
## Properties
|
|
||||||
|
|
||||||
### mode?
|
|
||||||
|
|
||||||
```ts
|
|
||||||
optional mode: WriteMode;
|
|
||||||
```
|
|
||||||
|
|
||||||
Write mode for writing to a table.
|
|
||||||
@@ -17,6 +17,14 @@
|
|||||||
### Interfaces
|
### Interfaces
|
||||||
|
|
||||||
- [EmbeddingFunctionConfig](interfaces/EmbeddingFunctionConfig.md)
|
- [EmbeddingFunctionConfig](interfaces/EmbeddingFunctionConfig.md)
|
||||||
|
- [EmbeddingFunctionConstructor](interfaces/EmbeddingFunctionConstructor.md)
|
||||||
|
- [EmbeddingFunctionCreate](interfaces/EmbeddingFunctionCreate.md)
|
||||||
|
- [FieldOptions](interfaces/FieldOptions.md)
|
||||||
|
- [FunctionOptions](interfaces/FunctionOptions.md)
|
||||||
|
|
||||||
|
### Type Aliases
|
||||||
|
|
||||||
|
- [CreateReturnType](type-aliases/CreateReturnType.md)
|
||||||
|
|
||||||
### Functions
|
### Functions
|
||||||
|
|
||||||
|
|||||||
@@ -8,6 +8,23 @@
|
|||||||
|
|
||||||
An embedding function that automatically creates vector representation for a given column.
|
An embedding function that automatically creates vector representation for a given column.
|
||||||
|
|
||||||
|
It's important subclasses pass the **original** options to the super constructor
|
||||||
|
and then pass those options to `resolveVariables` to resolve any variables before
|
||||||
|
using them.
|
||||||
|
|
||||||
|
## Example
|
||||||
|
|
||||||
|
```ts
|
||||||
|
class MyEmbeddingFunction extends EmbeddingFunction {
|
||||||
|
constructor(options: {model: string, timeout: number}) {
|
||||||
|
super(optionsRaw);
|
||||||
|
const options = this.resolveVariables(optionsRaw);
|
||||||
|
this.model = options.model;
|
||||||
|
this.timeout = options.timeout;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
## Extended by
|
## Extended by
|
||||||
|
|
||||||
- [`TextEmbeddingFunction`](TextEmbeddingFunction.md)
|
- [`TextEmbeddingFunction`](TextEmbeddingFunction.md)
|
||||||
@@ -16,7 +33,7 @@ An embedding function that automatically creates vector representation for a giv
|
|||||||
|
|
||||||
• **T** = `any`
|
• **T** = `any`
|
||||||
|
|
||||||
• **M** *extends* `FunctionOptions` = `FunctionOptions`
|
• **M** *extends* [`FunctionOptions`](../interfaces/FunctionOptions.md) = [`FunctionOptions`](../interfaces/FunctionOptions.md)
|
||||||
|
|
||||||
## Constructors
|
## Constructors
|
||||||
|
|
||||||
@@ -82,12 +99,33 @@ The datatype of the embeddings
|
|||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
|
### getSensitiveKeys()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
protected getSensitiveKeys(): string[]
|
||||||
|
```
|
||||||
|
|
||||||
|
Provide a list of keys in the function options that should be treated as
|
||||||
|
sensitive. If users pass raw values for these keys, they will be rejected.
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
`string`[]
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
### init()?
|
### init()?
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
optional init(): Promise<void>
|
optional init(): Promise<void>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Optionally load any resources needed for the embedding function.
|
||||||
|
|
||||||
|
This method is called after the embedding function has been initialized
|
||||||
|
but before any embeddings are computed. It is useful for loading local models
|
||||||
|
or other resources that are needed for the embedding function to work.
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
`Promise`<`void`>
|
`Promise`<`void`>
|
||||||
@@ -108,6 +146,24 @@ The number of dimensions of the embeddings
|
|||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
|
### resolveVariables()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
protected resolveVariables(config): Partial<M>
|
||||||
|
```
|
||||||
|
|
||||||
|
Apply variables to the config.
|
||||||
|
|
||||||
|
#### Parameters
|
||||||
|
|
||||||
|
* **config**: `Partial`<`M`>
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
`Partial`<`M`>
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
### sourceField()
|
### sourceField()
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
@@ -118,53 +174,31 @@ sourceField is used in combination with `LanceSchema` to provide a declarative d
|
|||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
* **optionsOrDatatype**: `DataType`<`Type`, `any`> \| `Partial`<`FieldOptions`<`DataType`<`Type`, `any`>>>
|
* **optionsOrDatatype**: `DataType`<`Type`, `any`> \| `Partial`<[`FieldOptions`](../interfaces/FieldOptions.md)<`DataType`<`Type`, `any`>>>
|
||||||
The options for the field or the datatype
|
The options for the field or the datatype
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
[`DataType`<`Type`, `any`>, `Map`<`string`, [`EmbeddingFunction`](EmbeddingFunction.md)<`any`, `FunctionOptions`>>]
|
[`DataType`<`Type`, `any`>, `Map`<`string`, [`EmbeddingFunction`](EmbeddingFunction.md)<`any`, [`FunctionOptions`](../interfaces/FunctionOptions.md)>>]
|
||||||
|
|
||||||
#### See
|
#### See
|
||||||
|
|
||||||
lancedb.LanceSchema
|
[LanceSchema](../functions/LanceSchema.md)
|
||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
### toJSON()
|
### toJSON()
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
abstract toJSON(): Partial<M>
|
toJSON(): Record<string, any>
|
||||||
```
|
```
|
||||||
|
|
||||||
Convert the embedding function to a JSON object
|
Get the original arguments to the constructor, to serialize them so they
|
||||||
It is used to serialize the embedding function to the schema
|
can be used to recreate the embedding function later.
|
||||||
It's important that any object returned by this method contains all the necessary
|
|
||||||
information to recreate the embedding function
|
|
||||||
|
|
||||||
It should return the same object that was passed to the constructor
|
|
||||||
If it does not, the embedding function will not be able to be recreated, or could be recreated incorrectly
|
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
`Partial`<`M`>
|
`Record`<`string`, `any`>
|
||||||
|
|
||||||
#### Example
|
|
||||||
|
|
||||||
```ts
|
|
||||||
class MyEmbeddingFunction extends EmbeddingFunction {
|
|
||||||
constructor(options: {model: string, timeout: number}) {
|
|
||||||
super();
|
|
||||||
this.model = options.model;
|
|
||||||
this.timeout = options.timeout;
|
|
||||||
}
|
|
||||||
toJSON() {
|
|
||||||
return {
|
|
||||||
model: this.model,
|
|
||||||
timeout: this.timeout,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
@@ -178,12 +212,13 @@ vectorField is used in combination with `LanceSchema` to provide a declarative d
|
|||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
* **optionsOrDatatype?**: `DataType`<`Type`, `any`> \| `Partial`<`FieldOptions`<`DataType`<`Type`, `any`>>>
|
* **optionsOrDatatype?**: `DataType`<`Type`, `any`> \| `Partial`<[`FieldOptions`](../interfaces/FieldOptions.md)<`DataType`<`Type`, `any`>>>
|
||||||
|
The options for the field
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
[`DataType`<`Type`, `any`>, `Map`<`string`, [`EmbeddingFunction`](EmbeddingFunction.md)<`any`, `FunctionOptions`>>]
|
[`DataType`<`Type`, `any`>, `Map`<`string`, [`EmbeddingFunction`](EmbeddingFunction.md)<`any`, [`FunctionOptions`](../interfaces/FunctionOptions.md)>>]
|
||||||
|
|
||||||
#### See
|
#### See
|
||||||
|
|
||||||
lancedb.LanceSchema
|
[LanceSchema](../functions/LanceSchema.md)
|
||||||
|
|||||||
@@ -51,7 +51,7 @@ Fetch an embedding function by name
|
|||||||
|
|
||||||
#### Type Parameters
|
#### Type Parameters
|
||||||
|
|
||||||
• **T** *extends* [`EmbeddingFunction`](EmbeddingFunction.md)<`unknown`, `FunctionOptions`>
|
• **T** *extends* [`EmbeddingFunction`](EmbeddingFunction.md)<`unknown`, [`FunctionOptions`](../interfaces/FunctionOptions.md)>
|
||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
@@ -60,7 +60,7 @@ Fetch an embedding function by name
|
|||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
`undefined` \| `EmbeddingFunctionCreate`<`T`>
|
`undefined` \| [`EmbeddingFunctionCreate`](../interfaces/EmbeddingFunctionCreate.md)<`T`>
|
||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
@@ -80,6 +80,28 @@ getTableMetadata(functions): Map<string, string>
|
|||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
|
### getVar()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
getVar(name): undefined | string
|
||||||
|
```
|
||||||
|
|
||||||
|
Get a variable.
|
||||||
|
|
||||||
|
#### Parameters
|
||||||
|
|
||||||
|
* **name**: `string`
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
`undefined` \| `string`
|
||||||
|
|
||||||
|
#### See
|
||||||
|
|
||||||
|
[setVar](EmbeddingFunctionRegistry.md#setvar)
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
### length()
|
### length()
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
@@ -104,7 +126,7 @@ Register an embedding function
|
|||||||
|
|
||||||
#### Type Parameters
|
#### Type Parameters
|
||||||
|
|
||||||
• **T** *extends* `EmbeddingFunctionConstructor`<[`EmbeddingFunction`](EmbeddingFunction.md)<`any`, `FunctionOptions`>> = `EmbeddingFunctionConstructor`<[`EmbeddingFunction`](EmbeddingFunction.md)<`any`, `FunctionOptions`>>
|
• **T** *extends* [`EmbeddingFunctionConstructor`](../interfaces/EmbeddingFunctionConstructor.md)<[`EmbeddingFunction`](EmbeddingFunction.md)<`any`, [`FunctionOptions`](../interfaces/FunctionOptions.md)>> = [`EmbeddingFunctionConstructor`](../interfaces/EmbeddingFunctionConstructor.md)<[`EmbeddingFunction`](EmbeddingFunction.md)<`any`, [`FunctionOptions`](../interfaces/FunctionOptions.md)>>
|
||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
@@ -145,3 +167,31 @@ reset the registry to the initial state
|
|||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
`void`
|
`void`
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### setVar()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
setVar(name, value): void
|
||||||
|
```
|
||||||
|
|
||||||
|
Set a variable. These can be accessed in the embedding function
|
||||||
|
configuration using the syntax `$var:variable_name`. If they are not
|
||||||
|
set, an error will be thrown letting you know which key is unset. If you
|
||||||
|
want to supply a default value, you can add an additional part in the
|
||||||
|
configuration like so: `$var:variable_name:default_value`. Default values
|
||||||
|
can be used for runtime configurations that are not sensitive, such as
|
||||||
|
whether to use a GPU for inference.
|
||||||
|
|
||||||
|
The name must not contain colons. The default value can contain colons.
|
||||||
|
|
||||||
|
#### Parameters
|
||||||
|
|
||||||
|
* **name**: `string`
|
||||||
|
|
||||||
|
* **value**: `string`
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
`void`
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ an abstract class for implementing embedding functions that take text as input
|
|||||||
|
|
||||||
## Type Parameters
|
## Type Parameters
|
||||||
|
|
||||||
• **M** *extends* `FunctionOptions` = `FunctionOptions`
|
• **M** *extends* [`FunctionOptions`](../interfaces/FunctionOptions.md) = [`FunctionOptions`](../interfaces/FunctionOptions.md)
|
||||||
|
|
||||||
## Constructors
|
## Constructors
|
||||||
|
|
||||||
@@ -114,12 +114,37 @@ abstract generateEmbeddings(texts, ...args): Promise<number[][] | Float32Array[]
|
|||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
|
### getSensitiveKeys()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
protected getSensitiveKeys(): string[]
|
||||||
|
```
|
||||||
|
|
||||||
|
Provide a list of keys in the function options that should be treated as
|
||||||
|
sensitive. If users pass raw values for these keys, they will be rejected.
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
`string`[]
|
||||||
|
|
||||||
|
#### Inherited from
|
||||||
|
|
||||||
|
[`EmbeddingFunction`](EmbeddingFunction.md).[`getSensitiveKeys`](EmbeddingFunction.md#getsensitivekeys)
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
### init()?
|
### init()?
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
optional init(): Promise<void>
|
optional init(): Promise<void>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Optionally load any resources needed for the embedding function.
|
||||||
|
|
||||||
|
This method is called after the embedding function has been initialized
|
||||||
|
but before any embeddings are computed. It is useful for loading local models
|
||||||
|
or other resources that are needed for the embedding function to work.
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
`Promise`<`void`>
|
`Promise`<`void`>
|
||||||
@@ -148,6 +173,28 @@ The number of dimensions of the embeddings
|
|||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
|
### resolveVariables()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
protected resolveVariables(config): Partial<M>
|
||||||
|
```
|
||||||
|
|
||||||
|
Apply variables to the config.
|
||||||
|
|
||||||
|
#### Parameters
|
||||||
|
|
||||||
|
* **config**: `Partial`<`M`>
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
`Partial`<`M`>
|
||||||
|
|
||||||
|
#### Inherited from
|
||||||
|
|
||||||
|
[`EmbeddingFunction`](EmbeddingFunction.md).[`resolveVariables`](EmbeddingFunction.md#resolvevariables)
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
### sourceField()
|
### sourceField()
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
@@ -158,11 +205,11 @@ sourceField is used in combination with `LanceSchema` to provide a declarative d
|
|||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
[`DataType`<`Type`, `any`>, `Map`<`string`, [`EmbeddingFunction`](EmbeddingFunction.md)<`any`, `FunctionOptions`>>]
|
[`DataType`<`Type`, `any`>, `Map`<`string`, [`EmbeddingFunction`](EmbeddingFunction.md)<`any`, [`FunctionOptions`](../interfaces/FunctionOptions.md)>>]
|
||||||
|
|
||||||
#### See
|
#### See
|
||||||
|
|
||||||
lancedb.LanceSchema
|
[LanceSchema](../functions/LanceSchema.md)
|
||||||
|
|
||||||
#### Overrides
|
#### Overrides
|
||||||
|
|
||||||
@@ -173,37 +220,15 @@ lancedb.LanceSchema
|
|||||||
### toJSON()
|
### toJSON()
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
abstract toJSON(): Partial<M>
|
toJSON(): Record<string, any>
|
||||||
```
|
```
|
||||||
|
|
||||||
Convert the embedding function to a JSON object
|
Get the original arguments to the constructor, to serialize them so they
|
||||||
It is used to serialize the embedding function to the schema
|
can be used to recreate the embedding function later.
|
||||||
It's important that any object returned by this method contains all the necessary
|
|
||||||
information to recreate the embedding function
|
|
||||||
|
|
||||||
It should return the same object that was passed to the constructor
|
|
||||||
If it does not, the embedding function will not be able to be recreated, or could be recreated incorrectly
|
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
`Partial`<`M`>
|
`Record`<`string`, `any`>
|
||||||
|
|
||||||
#### Example
|
|
||||||
|
|
||||||
```ts
|
|
||||||
class MyEmbeddingFunction extends EmbeddingFunction {
|
|
||||||
constructor(options: {model: string, timeout: number}) {
|
|
||||||
super();
|
|
||||||
this.model = options.model;
|
|
||||||
this.timeout = options.timeout;
|
|
||||||
}
|
|
||||||
toJSON() {
|
|
||||||
return {
|
|
||||||
model: this.model,
|
|
||||||
timeout: this.timeout,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Inherited from
|
#### Inherited from
|
||||||
|
|
||||||
@@ -221,15 +246,16 @@ vectorField is used in combination with `LanceSchema` to provide a declarative d
|
|||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
* **optionsOrDatatype?**: `DataType`<`Type`, `any`> \| `Partial`<`FieldOptions`<`DataType`<`Type`, `any`>>>
|
* **optionsOrDatatype?**: `DataType`<`Type`, `any`> \| `Partial`<[`FieldOptions`](../interfaces/FieldOptions.md)<`DataType`<`Type`, `any`>>>
|
||||||
|
The options for the field
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
[`DataType`<`Type`, `any`>, `Map`<`string`, [`EmbeddingFunction`](EmbeddingFunction.md)<`any`, `FunctionOptions`>>]
|
[`DataType`<`Type`, `any`>, `Map`<`string`, [`EmbeddingFunction`](EmbeddingFunction.md)<`any`, [`FunctionOptions`](../interfaces/FunctionOptions.md)>>]
|
||||||
|
|
||||||
#### See
|
#### See
|
||||||
|
|
||||||
lancedb.LanceSchema
|
[LanceSchema](../functions/LanceSchema.md)
|
||||||
|
|
||||||
#### Inherited from
|
#### Inherited from
|
||||||
|
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ Create a schema with embedding functions.
|
|||||||
|
|
||||||
## Parameters
|
## Parameters
|
||||||
|
|
||||||
* **fields**: `Record`<`string`, `object` \| [`object`, `Map`<`string`, [`EmbeddingFunction`](../classes/EmbeddingFunction.md)<`any`, `FunctionOptions`>>]>
|
* **fields**: `Record`<`string`, `object` \| [`object`, `Map`<`string`, [`EmbeddingFunction`](../classes/EmbeddingFunction.md)<`any`, [`FunctionOptions`](../interfaces/FunctionOptions.md)>>]>
|
||||||
|
|
||||||
## Returns
|
## Returns
|
||||||
|
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ function register(name?): (ctor) => any
|
|||||||
|
|
||||||
### Parameters
|
### Parameters
|
||||||
|
|
||||||
* **ctor**: `EmbeddingFunctionConstructor`<[`EmbeddingFunction`](../classes/EmbeddingFunction.md)<`any`, `FunctionOptions`>>
|
* **ctor**: [`EmbeddingFunctionConstructor`](../interfaces/EmbeddingFunctionConstructor.md)<[`EmbeddingFunction`](../classes/EmbeddingFunction.md)<`any`, [`FunctionOptions`](../interfaces/FunctionOptions.md)>>
|
||||||
|
|
||||||
### Returns
|
### Returns
|
||||||
|
|
||||||
|
|||||||
@@ -0,0 +1,27 @@
|
|||||||
|
[**@lancedb/lancedb**](../../../README.md) • **Docs**
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
[@lancedb/lancedb](../../../globals.md) / [embedding](../README.md) / EmbeddingFunctionConstructor
|
||||||
|
|
||||||
|
# Interface: EmbeddingFunctionConstructor<T>
|
||||||
|
|
||||||
|
## Type Parameters
|
||||||
|
|
||||||
|
• **T** *extends* [`EmbeddingFunction`](../classes/EmbeddingFunction.md) = [`EmbeddingFunction`](../classes/EmbeddingFunction.md)
|
||||||
|
|
||||||
|
## Constructors
|
||||||
|
|
||||||
|
### new EmbeddingFunctionConstructor()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
new EmbeddingFunctionConstructor(modelOptions?): T
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Parameters
|
||||||
|
|
||||||
|
* **modelOptions?**: `T`\[`"TOptions"`\]
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
`T`
|
||||||
@@ -0,0 +1,27 @@
|
|||||||
|
[**@lancedb/lancedb**](../../../README.md) • **Docs**
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
[@lancedb/lancedb](../../../globals.md) / [embedding](../README.md) / EmbeddingFunctionCreate
|
||||||
|
|
||||||
|
# Interface: EmbeddingFunctionCreate<T>
|
||||||
|
|
||||||
|
## Type Parameters
|
||||||
|
|
||||||
|
• **T** *extends* [`EmbeddingFunction`](../classes/EmbeddingFunction.md)
|
||||||
|
|
||||||
|
## Methods
|
||||||
|
|
||||||
|
### create()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
create(options?): CreateReturnType<T>
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Parameters
|
||||||
|
|
||||||
|
* **options?**: `T`\[`"TOptions"`\]
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
[`CreateReturnType`](../type-aliases/CreateReturnType.md)<`T`>
|
||||||
27
docs/src/js/namespaces/embedding/interfaces/FieldOptions.md
Normal file
27
docs/src/js/namespaces/embedding/interfaces/FieldOptions.md
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
[**@lancedb/lancedb**](../../../README.md) • **Docs**
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
[@lancedb/lancedb](../../../globals.md) / [embedding](../README.md) / FieldOptions
|
||||||
|
|
||||||
|
# Interface: FieldOptions<T>
|
||||||
|
|
||||||
|
## Type Parameters
|
||||||
|
|
||||||
|
• **T** *extends* `DataType` = `DataType`
|
||||||
|
|
||||||
|
## Properties
|
||||||
|
|
||||||
|
### datatype
|
||||||
|
|
||||||
|
```ts
|
||||||
|
datatype: T;
|
||||||
|
```
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### dims?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional dims: number;
|
||||||
|
```
|
||||||
@@ -0,0 +1,13 @@
|
|||||||
|
[**@lancedb/lancedb**](../../../README.md) • **Docs**
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
[@lancedb/lancedb](../../../globals.md) / [embedding](../README.md) / FunctionOptions
|
||||||
|
|
||||||
|
# Interface: FunctionOptions
|
||||||
|
|
||||||
|
Options for a given embedding function
|
||||||
|
|
||||||
|
## Indexable
|
||||||
|
|
||||||
|
\[`key`: `string`\]: `any`
|
||||||
@@ -0,0 +1,15 @@
|
|||||||
|
[**@lancedb/lancedb**](../../../README.md) • **Docs**
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
[@lancedb/lancedb](../../../globals.md) / [embedding](../README.md) / CreateReturnType
|
||||||
|
|
||||||
|
# Type Alias: CreateReturnType<T>
|
||||||
|
|
||||||
|
```ts
|
||||||
|
type CreateReturnType<T>: T extends object ? Promise<T> : T;
|
||||||
|
```
|
||||||
|
|
||||||
|
## Type Parameters
|
||||||
|
|
||||||
|
• **T**
|
||||||
17
docs/src/js/namespaces/rerankers/README.md
Normal file
17
docs/src/js/namespaces/rerankers/README.md
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
[**@lancedb/lancedb**](../../README.md) • **Docs**
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
[@lancedb/lancedb](../../globals.md) / rerankers
|
||||||
|
|
||||||
|
# rerankers
|
||||||
|
|
||||||
|
## Index
|
||||||
|
|
||||||
|
### Classes
|
||||||
|
|
||||||
|
- [RRFReranker](classes/RRFReranker.md)
|
||||||
|
|
||||||
|
### Interfaces
|
||||||
|
|
||||||
|
- [Reranker](interfaces/Reranker.md)
|
||||||
48
docs/src/js/namespaces/rerankers/classes/RRFReranker.md
Normal file
48
docs/src/js/namespaces/rerankers/classes/RRFReranker.md
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
[**@lancedb/lancedb**](../../../README.md) • **Docs**
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
[@lancedb/lancedb](../../../globals.md) / [rerankers](../README.md) / RRFReranker
|
||||||
|
|
||||||
|
# Class: RRFReranker
|
||||||
|
|
||||||
|
Reranks the results using the Reciprocal Rank Fusion (RRF) algorithm.
|
||||||
|
|
||||||
|
## Methods
|
||||||
|
|
||||||
|
### rerankHybrid()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
rerankHybrid(
|
||||||
|
query,
|
||||||
|
vecResults,
|
||||||
|
ftsResults): Promise<RecordBatch<any>>
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Parameters
|
||||||
|
|
||||||
|
* **query**: `string`
|
||||||
|
|
||||||
|
* **vecResults**: `RecordBatch`<`any`>
|
||||||
|
|
||||||
|
* **ftsResults**: `RecordBatch`<`any`>
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
`Promise`<`RecordBatch`<`any`>>
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### create()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
static create(k): Promise<RRFReranker>
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Parameters
|
||||||
|
|
||||||
|
* **k**: `number` = `60`
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
`Promise`<[`RRFReranker`](RRFReranker.md)>
|
||||||
30
docs/src/js/namespaces/rerankers/interfaces/Reranker.md
Normal file
30
docs/src/js/namespaces/rerankers/interfaces/Reranker.md
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
[**@lancedb/lancedb**](../../../README.md) • **Docs**
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
[@lancedb/lancedb](../../../globals.md) / [rerankers](../README.md) / Reranker
|
||||||
|
|
||||||
|
# Interface: Reranker
|
||||||
|
|
||||||
|
## Methods
|
||||||
|
|
||||||
|
### rerankHybrid()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
rerankHybrid(
|
||||||
|
query,
|
||||||
|
vecResults,
|
||||||
|
ftsResults): Promise<RecordBatch<any>>
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Parameters
|
||||||
|
|
||||||
|
* **query**: `string`
|
||||||
|
|
||||||
|
* **vecResults**: `RecordBatch`<`any`>
|
||||||
|
|
||||||
|
* **ftsResults**: `RecordBatch`<`any`>
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
`Promise`<`RecordBatch`<`any`>>
|
||||||
11
docs/src/js/type-aliases/DataLike.md
Normal file
11
docs/src/js/type-aliases/DataLike.md
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
[@lancedb/lancedb](../globals.md) / DataLike
|
||||||
|
|
||||||
|
# Type Alias: DataLike
|
||||||
|
|
||||||
|
```ts
|
||||||
|
type DataLike: Data | object;
|
||||||
|
```
|
||||||
11
docs/src/js/type-aliases/FieldLike.md
Normal file
11
docs/src/js/type-aliases/FieldLike.md
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
[@lancedb/lancedb](../globals.md) / FieldLike
|
||||||
|
|
||||||
|
# Type Alias: FieldLike
|
||||||
|
|
||||||
|
```ts
|
||||||
|
type FieldLike: Field | object;
|
||||||
|
```
|
||||||
19
docs/src/js/type-aliases/IntoSql.md
Normal file
19
docs/src/js/type-aliases/IntoSql.md
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
[@lancedb/lancedb](../globals.md) / IntoSql
|
||||||
|
|
||||||
|
# Type Alias: IntoSql
|
||||||
|
|
||||||
|
```ts
|
||||||
|
type IntoSql:
|
||||||
|
| string
|
||||||
|
| number
|
||||||
|
| boolean
|
||||||
|
| null
|
||||||
|
| Date
|
||||||
|
| ArrayBufferLike
|
||||||
|
| Buffer
|
||||||
|
| IntoSql[];
|
||||||
|
```
|
||||||
11
docs/src/js/type-aliases/IntoVector.md
Normal file
11
docs/src/js/type-aliases/IntoVector.md
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
[@lancedb/lancedb](../globals.md) / IntoVector
|
||||||
|
|
||||||
|
# Type Alias: IntoVector
|
||||||
|
|
||||||
|
```ts
|
||||||
|
type IntoVector: Float32Array | Float64Array | number[] | Promise<Float32Array | Float64Array | number[]>;
|
||||||
|
```
|
||||||
11
docs/src/js/type-aliases/RecordBatchLike.md
Normal file
11
docs/src/js/type-aliases/RecordBatchLike.md
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
[@lancedb/lancedb](../globals.md) / RecordBatchLike
|
||||||
|
|
||||||
|
# Type Alias: RecordBatchLike
|
||||||
|
|
||||||
|
```ts
|
||||||
|
type RecordBatchLike: RecordBatch | object;
|
||||||
|
```
|
||||||
11
docs/src/js/type-aliases/SchemaLike.md
Normal file
11
docs/src/js/type-aliases/SchemaLike.md
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
[@lancedb/lancedb](../globals.md) / SchemaLike
|
||||||
|
|
||||||
|
# Type Alias: SchemaLike
|
||||||
|
|
||||||
|
```ts
|
||||||
|
type SchemaLike: Schema | object;
|
||||||
|
```
|
||||||
11
docs/src/js/type-aliases/TableLike.md
Normal file
11
docs/src/js/type-aliases/TableLike.md
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
[@lancedb/lancedb](../globals.md) / TableLike
|
||||||
|
|
||||||
|
# Type Alias: TableLike
|
||||||
|
|
||||||
|
```ts
|
||||||
|
type TableLike: ArrowTable | object;
|
||||||
|
```
|
||||||
@@ -66,7 +66,7 @@ the size of the data.
|
|||||||
|
|
||||||
### Embedding Functions
|
### Embedding Functions
|
||||||
|
|
||||||
The embedding API has been completely reworked, and it now more closely resembles the Python API, including the new [embedding registry](./js/classes/embedding.EmbeddingFunctionRegistry.md)
|
The embedding API has been completely reworked, and it now more closely resembles the Python API, including the new [embedding registry](./js/classes/embedding.EmbeddingFunctionRegistry.md):
|
||||||
|
|
||||||
=== "vectordb (deprecated)"
|
=== "vectordb (deprecated)"
|
||||||
|
|
||||||
|
|||||||
@@ -1,17 +1,6 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
#
|
#
|
||||||
# Copyright 2023 LanceDB Developers
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
"""Dataset hf://poloclub/diffusiondb
|
"""Dataset hf://poloclub/diffusiondb
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -207,7 +207,7 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"source": [
|
"source": [
|
||||||
"## The dataset\n",
|
"## The dataset\n",
|
||||||
"The dataset we'll use is a synthetic QA dataset generated from LLama2 review paper. The paper was divided into chunks, with each chunk being a unique context. An LLM was prompted to ask questions relevant to the context for testing a retreiver.\n",
|
"The dataset we'll use is a synthetic QA dataset generated from LLama2 review paper. The paper was divided into chunks, with each chunk being a unique context. An LLM was prompted to ask questions relevant to the context for testing a retriever.\n",
|
||||||
"The exact code and other utility functions for this can be found in [this](https://github.com/lancedb/ragged) repo\n"
|
"The exact code and other utility functions for this can be found in [this](https://github.com/lancedb/ragged) repo\n"
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
|
|||||||
@@ -477,7 +477,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"## Vector Search\n",
|
"## Vector Search\n",
|
||||||
"\n",
|
"\n",
|
||||||
"avg latency - `3.48 ms ± 71.6 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)`"
|
"Average latency: `3.48 ms ± 71.6 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)`"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -597,7 +597,7 @@
|
|||||||
"`LinearCombinationReranker(weight=0.7)` is used as the default reranker for reranking the hybrid search results if the reranker isn't specified explicitly.\n",
|
"`LinearCombinationReranker(weight=0.7)` is used as the default reranker for reranking the hybrid search results if the reranker isn't specified explicitly.\n",
|
||||||
"The `weight` param controls the weightage provided to vector search score. The weight of `1-weight` is applied to FTS scores when reranking.\n",
|
"The `weight` param controls the weightage provided to vector search score. The weight of `1-weight` is applied to FTS scores when reranking.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Latency - `71 ms ± 25.4 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)`"
|
"Latency: `71 ms ± 25.4 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)`"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -675,9 +675,9 @@
|
|||||||
},
|
},
|
||||||
"source": [
|
"source": [
|
||||||
"### Cohere Reranker\n",
|
"### Cohere Reranker\n",
|
||||||
"This uses Cohere's Reranking API to re-rank the results. It accepts the reranking model name as a parameter. By Default it uses the english-v3 model but you can easily switch to a multi-lingual model.\n",
|
"This uses Cohere's Reranking API to re-rank the results. It accepts the reranking model name as a parameter. By default it uses the english-v3 model but you can easily switch to a multi-lingual model.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"latency - `605 ms ± 78.1 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)`"
|
"Latency: `605 ms ± 78.1 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)`"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -1165,7 +1165,7 @@
|
|||||||
},
|
},
|
||||||
"source": [
|
"source": [
|
||||||
"### ColBERT Reranker\n",
|
"### ColBERT Reranker\n",
|
||||||
"Colber Reranker is powered by ColBERT model. It runs locally using the huggingface implementation.\n",
|
"Colbert Reranker is powered by ColBERT model. It runs locally using the huggingface implementation.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Latency - `950 ms ± 5.78 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)`\n",
|
"Latency - `950 ms ± 5.78 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)`\n",
|
||||||
"\n",
|
"\n",
|
||||||
@@ -1489,9 +1489,9 @@
|
|||||||
},
|
},
|
||||||
"source": [
|
"source": [
|
||||||
"### Cross Encoder Reranker\n",
|
"### Cross Encoder Reranker\n",
|
||||||
"Uses cross encoder models are rerankers. Uses sentence transformer implemntation locally\n",
|
"Uses cross encoder models are rerankers. Uses sentence transformer implementation locally\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Latency - `1.38 s ± 64.6 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)`"
|
"Latency: `1.38 s ± 64.6 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)`"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -1771,10 +1771,10 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"### (Experimental) OpenAI Reranker\n",
|
"### (Experimental) OpenAI Reranker\n",
|
||||||
"\n",
|
"\n",
|
||||||
"This prompts chat model to rerank results which is not a dedicated reranker model. This should be treated as experimental. You might run out of token limit so set the search limits based on your token limit.\n",
|
"This prompts a chat model to rerank results and is not a dedicated reranker model. This should be treated as experimental. You might exceed the token limit so set the search limits based on your token limit.\n",
|
||||||
"NOTE: It is recommended to use `gpt-4-turbo-preview`, older models might lead to bad behaviour\n",
|
"NOTE: It is recommended to use `gpt-4-turbo-preview` as older models might lead to bad behaviour\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Latency - `Can take 10s of seconds if using GPT-4 model`"
|
"Latency: `Can take 10s of seconds if using GPT-4 model`"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -1817,7 +1817,7 @@
|
|||||||
},
|
},
|
||||||
"source": [
|
"source": [
|
||||||
"## Use your custom Reranker\n",
|
"## Use your custom Reranker\n",
|
||||||
"Hybrid search in LanceDB is designed to be very flexible. You can easily plug in your own Re-reranking logic. To do so, you simply need to implement the base Reranker class"
|
"Hybrid search in LanceDB is designed to be very flexible. You can easily plug in your own Re-reranking logic. To do so, you simply need to implement the base Reranker class:"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -1849,9 +1849,9 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"### Custom Reranker based on CohereReranker\n",
|
"### Custom Reranker based on CohereReranker\n",
|
||||||
"\n",
|
"\n",
|
||||||
"For the sake of simplicity let's build custom reranker that just enchances the Cohere Reranker by accepting a filter query, and accept other CohereReranker params as kwags.\n",
|
"For the sake of simplicity let's build a custom reranker that enhances the Cohere Reranker by accepting a filter query, and accepts other CohereReranker params as kwargs.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"For this toy example let's say we want to get rid of docs that represent a table of contents, appendix etc. as these are semantically close of representing costs but this isn't something we are interested in because they don't represent the specific reasons why operating costs were high. They simply represent the costs."
|
"For this toy example let's say we want to get rid of docs that represent a table of contents or appendix, as these are semantically close to representing costs but don't represent the specific reasons why operating costs were high."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -1969,7 +1969,7 @@
|
|||||||
"id": "b3b5464a-7252-4eab-aaac-9b0eae37496f"
|
"id": "b3b5464a-7252-4eab-aaac-9b0eae37496f"
|
||||||
},
|
},
|
||||||
"source": [
|
"source": [
|
||||||
"As you can see the document containing the Table of contetnts of spending no longer shows up"
|
"As you can see, the document containing the table of contents no longer shows up."
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
|
|||||||
@@ -49,7 +49,7 @@
|
|||||||
},
|
},
|
||||||
"source": [
|
"source": [
|
||||||
"## What is a retriever\n",
|
"## What is a retriever\n",
|
||||||
"VectorDBs are used as retreivers in recommender or chatbot-based systems for retrieving relevant data based on user queries. For example, retriever is a critical component of Retrieval Augmented Generation (RAG) acrhitectures. In this section, we will discuss how to improve the performance of retrievers.\n",
|
"VectorDBs are used as retrievers in recommender or chatbot-based systems for retrieving relevant data based on user queries. For example, retriever is a critical component of Retrieval Augmented Generation (RAG) acrhitectures. In this section, we will discuss how to improve the performance of retrievers.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"<img src=\"https://llmstack.ai/assets/images/rag-f517f1f834bdbb94a87765e0edd40ff2.png\" />\n",
|
"<img src=\"https://llmstack.ai/assets/images/rag-f517f1f834bdbb94a87765e0edd40ff2.png\" />\n",
|
||||||
"\n",
|
"\n",
|
||||||
@@ -64,7 +64,7 @@
|
|||||||
"- Fine-tuning the embedding models\n",
|
"- Fine-tuning the embedding models\n",
|
||||||
"- Using different embedding models\n",
|
"- Using different embedding models\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Obviously, the above list is not exhaustive. There are other subtler ways that can improve retrieval performance like experimenting chunking algorithms, using different distance/similarity metrics etc. But for brevity, we'll only cover high level and more impactful techniques here.\n",
|
"Obviously, the above list is not exhaustive. There are other subtler ways that can improve retrieval performance like alternative chunking algorithms, using different distance/similarity metrics, and more. For brevity, we'll only cover high level and more impactful techniques here.\n",
|
||||||
"\n"
|
"\n"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -77,7 +77,7 @@
|
|||||||
"# LanceDB\n",
|
"# LanceDB\n",
|
||||||
"- Multimodal DB for AI\n",
|
"- Multimodal DB for AI\n",
|
||||||
"- Powered by an innovative & open-source in-house file format\n",
|
"- Powered by an innovative & open-source in-house file format\n",
|
||||||
"- 0 Setup\n",
|
"- Zero setup\n",
|
||||||
"- Scales up on disk storage\n",
|
"- Scales up on disk storage\n",
|
||||||
"- Native support for vector, full-text(BM25) and hybrid search\n",
|
"- Native support for vector, full-text(BM25) and hybrid search\n",
|
||||||
"\n",
|
"\n",
|
||||||
@@ -92,8 +92,8 @@
|
|||||||
},
|
},
|
||||||
"source": [
|
"source": [
|
||||||
"## The dataset\n",
|
"## The dataset\n",
|
||||||
"The dataset we'll use is a synthetic QA dataset generated from LLama2 review paper. The paper was divided into chunks, with each chunk being a unique context. An LLM was prompted to ask questions relevant to the context for testing a retreiver.\n",
|
"The dataset we'll use is a synthetic QA dataset generated from LLama2 review paper. The paper was divided into chunks, with each chunk being a unique context. An LLM was prompted to ask questions relevant to the context for testing a retriever.\n",
|
||||||
"The exact code and other utility functions for this can be found in [this](https://github.com/lancedb/ragged) repo\n"
|
"The exact code and other utility functions for this can be found in [this](https://github.com/lancedb/ragged) repo.\n"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -594,10 +594,10 @@
|
|||||||
},
|
},
|
||||||
"source": [
|
"source": [
|
||||||
"## Ingestion\n",
|
"## Ingestion\n",
|
||||||
"Let us now ingest the contexts in LanceDB\n",
|
"Let us now ingest the contexts in LanceDB. The steps will be:\n",
|
||||||
"\n",
|
"\n",
|
||||||
"- Create a schema (Pydantic or Pyarrow)\n",
|
"- Create a schema (Pydantic or Pyarrow)\n",
|
||||||
"- Select an embedding model from LanceDB Embedding API (Allows automatic vectorization of data)\n",
|
"- Select an embedding model from LanceDB Embedding API (to allow automatic vectorization of data)\n",
|
||||||
"- Ingest the contexts\n"
|
"- Ingest the contexts\n"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -841,7 +841,7 @@
|
|||||||
},
|
},
|
||||||
"source": [
|
"source": [
|
||||||
"## Different Query types in LanceDB\n",
|
"## Different Query types in LanceDB\n",
|
||||||
"LanceDB allows switching query types with by setting `query_type` argument, which defaults to `vector` when using Embedding API. In this example we'll use `JinaReranker` which is one of many rerankers supported by LanceDB\n",
|
"LanceDB allows switching query types with by setting `query_type` argument, which defaults to `vector` when using Embedding API. In this example we'll use `JinaReranker` which is one of many rerankers supported by LanceDB.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"### Vector search:\n",
|
"### Vector search:\n",
|
||||||
"Vector search\n",
|
"Vector search\n",
|
||||||
@@ -1446,11 +1446,11 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"## Takeaways & Tradeoffs\n",
|
"## Takeaways & Tradeoffs\n",
|
||||||
"\n",
|
"\n",
|
||||||
"* **Easiest method to significantly improve accuracy** Using Hybrid search and/or rerankers can significantly improve retrieval performance without spending any additional time or effort on tuning embedding models, generators, or dissecting the dataset.\n",
|
"* **Rerankers significantly improve accuracy at little cost.** Using Hybrid search and/or rerankers can significantly improve retrieval performance without spending any additional time or effort on tuning embedding models, generators, or dissecting the dataset.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"* **Reranking is an expensive operation.** Depending on the type of reranker you choose, they can incur significant latecy to query times. Although some API-based rerankers can be significantly faster.\n",
|
"* **Reranking is an expensive operation.** Depending on the type of reranker you choose, they can incur significant latecy to query times. Although some API-based rerankers can be significantly faster.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"* When using models locally, having a warmed-up GPU environment will significantly reduce latency. This is specially useful if the application doesn't need to be strcitly realtime. The tradeoff being GPU resources."
|
"* **Pre-warmed GPU environments reduce latency.** When using models locally, having a warmed-up GPU environment will significantly reduce latency. This is especially useful if the application doesn't need to be strictly realtime. Pre-warming comes at the expense of GPU resources."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
1096
docs/src/notebooks/reproducibility_async.ipynb
Normal file
1096
docs/src/notebooks/reproducibility_async.ipynb
Normal file
File diff suppressed because it is too large
Load Diff
@@ -114,14 +114,17 @@
|
|||||||
}
|
}
|
||||||
],
|
],
|
||||||
"source": [
|
"source": [
|
||||||
"data = [\n",
|
"import pandas as pd\n",
|
||||||
" {\"vector\": [1.1, 1.2], \"lat\": 45.5, \"long\": -122.7},\n",
|
|
||||||
" {\"vector\": [0.2, 1.8], \"lat\": 40.1, \"long\": -74.1},\n",
|
|
||||||
"]\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
"db.create_table(\"table2\", data)\n",
|
"data = pd.DataFrame(\n",
|
||||||
"\n",
|
" {\n",
|
||||||
"db[\"table2\"].head() "
|
" \"vector\": [[1.1, 1.2, 1.3, 1.4], [0.2, 1.8, 0.4, 3.6]],\n",
|
||||||
|
" \"lat\": [45.5, 40.1],\n",
|
||||||
|
" \"long\": [-122.7, -74.1],\n",
|
||||||
|
" }\n",
|
||||||
|
")\n",
|
||||||
|
"db.create_table(\"my_table_pandas\", data)\n",
|
||||||
|
"db[\"my_table_pandas\"].head()"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -164,7 +167,7 @@
|
|||||||
"import pyarrow as pa\n",
|
"import pyarrow as pa\n",
|
||||||
"\n",
|
"\n",
|
||||||
"custom_schema = pa.schema([\n",
|
"custom_schema = pa.schema([\n",
|
||||||
"pa.field(\"vector\", pa.list_(pa.float32(), 2)),\n",
|
"pa.field(\"vector\", pa.list_(pa.float32(), 4)),\n",
|
||||||
"pa.field(\"lat\", pa.float32()),\n",
|
"pa.field(\"lat\", pa.float32()),\n",
|
||||||
"pa.field(\"long\", pa.float32())\n",
|
"pa.field(\"long\", pa.float32())\n",
|
||||||
"])\n",
|
"])\n",
|
||||||
|
|||||||
@@ -8,24 +8,32 @@ and PyArrow. The sequence of steps in a typical workflow is shown below.
|
|||||||
|
|
||||||
First, we need to connect to a LanceDB database.
|
First, we need to connect to a LanceDB database.
|
||||||
|
|
||||||
```py
|
=== "Sync API"
|
||||||
|
|
||||||
import lancedb
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_python.py:import-lancedb"
|
||||||
|
--8<-- "python/python/tests/docs/test_python.py:connect_to_lancedb"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
db = lancedb.connect("data/sample-lancedb")
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_python.py:import-lancedb"
|
||||||
|
--8<-- "python/python/tests/docs/test_python.py:connect_to_lancedb_async"
|
||||||
```
|
```
|
||||||
|
|
||||||
We can load a Pandas `DataFrame` to LanceDB directly.
|
We can load a Pandas `DataFrame` to LanceDB directly.
|
||||||
|
|
||||||
```py
|
=== "Sync API"
|
||||||
import pandas as pd
|
|
||||||
|
|
||||||
data = pd.DataFrame({
|
```python
|
||||||
"vector": [[3.1, 4.1], [5.9, 26.5]],
|
--8<-- "python/python/tests/docs/test_python.py:import-pandas"
|
||||||
"item": ["foo", "bar"],
|
--8<-- "python/python/tests/docs/test_python.py:create_table_pandas"
|
||||||
"price": [10.0, 20.0]
|
```
|
||||||
})
|
=== "Async API"
|
||||||
table = db.create_table("pd_table", data=data)
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_python.py:import-pandas"
|
||||||
|
--8<-- "python/python/tests/docs/test_python.py:create_table_pandas_async"
|
||||||
```
|
```
|
||||||
|
|
||||||
Similar to the [`pyarrow.write_dataset()`](https://arrow.apache.org/docs/python/generated/pyarrow.dataset.write_dataset.html) method, LanceDB's
|
Similar to the [`pyarrow.write_dataset()`](https://arrow.apache.org/docs/python/generated/pyarrow.dataset.write_dataset.html) method, LanceDB's
|
||||||
@@ -33,28 +41,21 @@ Similar to the [`pyarrow.write_dataset()`](https://arrow.apache.org/docs/python/
|
|||||||
|
|
||||||
If you have a dataset that is larger than memory, you can create a table with `Iterator[pyarrow.RecordBatch]` to lazily load the data:
|
If you have a dataset that is larger than memory, you can create a table with `Iterator[pyarrow.RecordBatch]` to lazily load the data:
|
||||||
|
|
||||||
```py
|
=== "Sync API"
|
||||||
|
|
||||||
from typing import Iterable
|
```python
|
||||||
import pyarrow as pa
|
--8<-- "python/python/tests/docs/test_python.py:import-iterable"
|
||||||
|
--8<-- "python/python/tests/docs/test_python.py:import-pyarrow"
|
||||||
|
--8<-- "python/python/tests/docs/test_python.py:make_batches"
|
||||||
|
--8<-- "python/python/tests/docs/test_python.py:create_table_iterable"
|
||||||
|
```
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
def make_batches() -> Iterable[pa.RecordBatch]:
|
```python
|
||||||
for i in range(5):
|
--8<-- "python/python/tests/docs/test_python.py:import-iterable"
|
||||||
yield pa.RecordBatch.from_arrays(
|
--8<-- "python/python/tests/docs/test_python.py:import-pyarrow"
|
||||||
[
|
--8<-- "python/python/tests/docs/test_python.py:make_batches"
|
||||||
pa.array([[3.1, 4.1], [5.9, 26.5]]),
|
--8<-- "python/python/tests/docs/test_python.py:create_table_iterable_async"
|
||||||
pa.array(["foo", "bar"]),
|
|
||||||
pa.array([10.0, 20.0]),
|
|
||||||
],
|
|
||||||
["vector", "item", "price"])
|
|
||||||
|
|
||||||
schema=pa.schema([
|
|
||||||
pa.field("vector", pa.list_(pa.float32())),
|
|
||||||
pa.field("item", pa.utf8()),
|
|
||||||
pa.field("price", pa.float32()),
|
|
||||||
])
|
|
||||||
|
|
||||||
table = db.create_table("iterable_table", data=make_batches(), schema=schema)
|
|
||||||
```
|
```
|
||||||
|
|
||||||
You will find detailed instructions of creating a LanceDB dataset in
|
You will find detailed instructions of creating a LanceDB dataset in
|
||||||
@@ -65,14 +66,15 @@ sections.
|
|||||||
|
|
||||||
We can now perform similarity search via the LanceDB Python API.
|
We can now perform similarity search via the LanceDB Python API.
|
||||||
|
|
||||||
```py
|
=== "Sync API"
|
||||||
# Open the table previously created.
|
|
||||||
table = db.open_table("pd_table")
|
|
||||||
|
|
||||||
query_vector = [100, 100]
|
```python
|
||||||
# Pandas DataFrame
|
--8<-- "python/python/tests/docs/test_python.py:vector_search"
|
||||||
df = table.search(query_vector).limit(1).to_pandas()
|
```
|
||||||
print(df)
|
=== "Async API"
|
||||||
|
|
||||||
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_python.py:vector_search_async"
|
||||||
```
|
```
|
||||||
|
|
||||||
```
|
```
|
||||||
@@ -83,16 +85,13 @@ print(df)
|
|||||||
If you have a simple filter, it's faster to provide a `where` clause to LanceDB's `search` method.
|
If you have a simple filter, it's faster to provide a `where` clause to LanceDB's `search` method.
|
||||||
For more complex filters or aggregations, you can always resort to using the underlying `DataFrame` methods after performing a search.
|
For more complex filters or aggregations, you can always resort to using the underlying `DataFrame` methods after performing a search.
|
||||||
|
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_python.py:vector_search_with_filter"
|
||||||
# Apply the filter via LanceDB
|
```
|
||||||
results = table.search([100, 100]).where("price < 15").to_pandas()
|
=== "Async API"
|
||||||
assert len(results) == 1
|
|
||||||
assert results["item"].iloc[0] == "foo"
|
```python
|
||||||
|
--8<-- "python/python/tests/docs/test_python.py:vector_search_with_filter_async"
|
||||||
# Apply the filter via Pandas
|
|
||||||
df = results = table.search([100, 100]).to_pandas()
|
|
||||||
results = df[df.price < 15]
|
|
||||||
assert len(results) == 1
|
|
||||||
assert results["item"].iloc[0] == "foo"
|
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -2,38 +2,56 @@
|
|||||||
|
|
||||||
LanceDB supports [Polars](https://github.com/pola-rs/polars), a blazingly fast DataFrame library for Python written in Rust. Just like in Pandas, the Polars integration is enabled by PyArrow under the hood. A deeper integration between Lance Tables and Polars DataFrames is in progress, but at the moment, you can read a Polars DataFrame into LanceDB and output the search results from a query to a Polars DataFrame.
|
LanceDB supports [Polars](https://github.com/pola-rs/polars), a blazingly fast DataFrame library for Python written in Rust. Just like in Pandas, the Polars integration is enabled by PyArrow under the hood. A deeper integration between Lance Tables and Polars DataFrames is in progress, but at the moment, you can read a Polars DataFrame into LanceDB and output the search results from a query to a Polars DataFrame.
|
||||||
|
|
||||||
|
|
||||||
## Create & Query LanceDB Table
|
## Create & Query LanceDB Table
|
||||||
|
|
||||||
### From Polars DataFrame
|
### From Polars DataFrame
|
||||||
|
|
||||||
First, we connect to a LanceDB database.
|
First, we connect to a LanceDB database.
|
||||||
|
|
||||||
```py
|
=== "Sync API"
|
||||||
import lancedb
|
|
||||||
|
|
||||||
db = lancedb.connect("data/polars-lancedb")
|
```py
|
||||||
|
--8<-- "python/python/tests/docs/test_python.py:import-lancedb"
|
||||||
|
--8<-- "python/python/tests/docs/test_python.py:connect_to_lancedb"
|
||||||
```
|
```
|
||||||
|
|
||||||
|
=== "Async API"
|
||||||
|
|
||||||
|
```py
|
||||||
|
--8<-- "python/python/tests/docs/test_python.py:import-lancedb"
|
||||||
|
--8<-- "python/python/tests/docs/test_python.py:connect_to_lancedb_async"
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
We can load a Polars `DataFrame` to LanceDB directly.
|
We can load a Polars `DataFrame` to LanceDB directly.
|
||||||
|
|
||||||
```py
|
=== "Sync API"
|
||||||
import polars as pl
|
|
||||||
|
|
||||||
data = pl.DataFrame({
|
```py
|
||||||
"vector": [[3.1, 4.1], [5.9, 26.5]],
|
--8<-- "python/python/tests/docs/test_python.py:import-polars"
|
||||||
"item": ["foo", "bar"],
|
--8<-- "python/python/tests/docs/test_python.py:create_table_polars"
|
||||||
"price": [10.0, 20.0]
|
```
|
||||||
})
|
|
||||||
table = db.create_table("pl_table", data=data)
|
=== "Async API"
|
||||||
|
|
||||||
|
```py
|
||||||
|
--8<-- "python/python/tests/docs/test_python.py:import-polars"
|
||||||
|
--8<-- "python/python/tests/docs/test_python.py:create_table_polars_async"
|
||||||
```
|
```
|
||||||
|
|
||||||
We can now perform similarity search via the LanceDB Python API.
|
We can now perform similarity search via the LanceDB Python API.
|
||||||
|
|
||||||
|
=== "Sync API"
|
||||||
|
|
||||||
```py
|
```py
|
||||||
query = [3.0, 4.0]
|
--8<-- "python/python/tests/docs/test_python.py:vector_search_polars"
|
||||||
result = table.search(query).limit(1).to_polars()
|
```
|
||||||
print(result)
|
|
||||||
print(type(result))
|
=== "Async API"
|
||||||
|
|
||||||
|
```py
|
||||||
|
--8<-- "python/python/tests/docs/test_python.py:vector_search_polars_async"
|
||||||
```
|
```
|
||||||
|
|
||||||
In addition to the selected columns, LanceDB also returns a vector
|
In addition to the selected columns, LanceDB also returns a vector
|
||||||
@@ -59,33 +77,16 @@ Note that the type of the result from a table search is a Polars DataFrame.
|
|||||||
Alternately, we can create an empty LanceDB Table using a Pydantic schema and populate it with a Polars DataFrame.
|
Alternately, we can create an empty LanceDB Table using a Pydantic schema and populate it with a Polars DataFrame.
|
||||||
|
|
||||||
```py
|
```py
|
||||||
import polars as pl
|
--8<-- "python/python/tests/docs/test_python.py:import-polars"
|
||||||
from lancedb.pydantic import Vector, LanceModel
|
--8<-- "python/python/tests/docs/test_python.py:import-lancedb-pydantic"
|
||||||
|
--8<-- "python/python/tests/docs/test_python.py:class_Item"
|
||||||
|
--8<-- "python/python/tests/docs/test_python.py:create_table_pydantic"
|
||||||
class Item(LanceModel):
|
|
||||||
vector: Vector(2)
|
|
||||||
item: str
|
|
||||||
price: float
|
|
||||||
|
|
||||||
data = {
|
|
||||||
"vector": [[3.1, 4.1]],
|
|
||||||
"item": "foo",
|
|
||||||
"price": 10.0,
|
|
||||||
}
|
|
||||||
|
|
||||||
table = db.create_table("test_table", schema=Item)
|
|
||||||
df = pl.DataFrame(data)
|
|
||||||
# Add Polars DataFrame to table
|
|
||||||
table.add(df)
|
|
||||||
```
|
```
|
||||||
|
|
||||||
The table can now be queried as usual.
|
The table can now be queried as usual.
|
||||||
|
|
||||||
```py
|
```py
|
||||||
result = table.search([3.0, 4.0]).limit(1).to_polars()
|
--8<-- "python/python/tests/docs/test_python.py:vector_search_polars"
|
||||||
print(result)
|
|
||||||
print(type(result))
|
|
||||||
```
|
```
|
||||||
|
|
||||||
```
|
```
|
||||||
@@ -108,8 +109,7 @@ As you iterate on your application, you'll likely need to work with the whole ta
|
|||||||
LanceDB tables can also be converted directly into a polars LazyFrame for further processing.
|
LanceDB tables can also be converted directly into a polars LazyFrame for further processing.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
ldf = table.to_polars()
|
--8<-- "python/python/tests/docs/test_python.py:dump_table_lazyform"
|
||||||
print(type(ldf))
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Unlike the search result from a query, we can see that the type of the result is a LazyFrame.
|
Unlike the search result from a query, we can see that the type of the result is a LazyFrame.
|
||||||
@@ -121,7 +121,7 @@ Unlike the search result from a query, we can see that the type of the result is
|
|||||||
We can now work with the LazyFrame as we would in Polars, and collect the first result.
|
We can now work with the LazyFrame as we would in Polars, and collect the first result.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
print(ldf.first().collect())
|
--8<-- "python/python/tests/docs/test_python.py:print_table_lazyform"
|
||||||
```
|
```
|
||||||
|
|
||||||
```
|
```
|
||||||
@@ -139,4 +139,3 @@ The reason it's beneficial to not convert the LanceDB Table
|
|||||||
to a DataFrame is because the table can potentially be way larger
|
to a DataFrame is because the table can potentially be way larger
|
||||||
than memory, and Polars LazyFrames allow us to work with such
|
than memory, and Polars LazyFrames allow us to work with such
|
||||||
larger-than-memory datasets by not loading it into memory all at once.
|
larger-than-memory datasets by not loading it into memory all at once.
|
||||||
|
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user