mirror of
https://github.com/lancedb/lancedb.git
synced 2025-12-23 05:19:58 +00:00
Compare commits
19 Commits
python-v0.
...
python-v0.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fbffe532a8 | ||
|
|
55ffc96e56 | ||
|
|
998c5f3f74 | ||
|
|
6eacae18c4 | ||
|
|
d3ea75cc2b | ||
|
|
f4afe456e8 | ||
|
|
ea5c2266b8 | ||
|
|
c557e77f09 | ||
|
|
3c0a64be8f | ||
|
|
0e496ed3b5 | ||
|
|
17c9e9afea | ||
|
|
0b45ef93c0 | ||
|
|
b474f98049 | ||
|
|
2c05ffed52 | ||
|
|
8b31540b21 | ||
|
|
ba844318f8 | ||
|
|
f007b76153 | ||
|
|
5d8d258f59 | ||
|
|
4172140f74 |
@@ -1,5 +1,5 @@
|
||||
[tool.bumpversion]
|
||||
current_version = "0.14.1"
|
||||
current_version = "0.14.2-beta.0"
|
||||
parse = """(?x)
|
||||
(?P<major>0|[1-9]\\d*)\\.
|
||||
(?P<minor>0|[1-9]\\d*)\\.
|
||||
|
||||
2
.github/workflows/build_mac_wheel/action.yml
vendored
2
.github/workflows/build_mac_wheel/action.yml
vendored
@@ -20,7 +20,7 @@ runs:
|
||||
uses: PyO3/maturin-action@v1
|
||||
with:
|
||||
command: build
|
||||
# TODO: pass through interpreter
|
||||
args: ${{ inputs.args }}
|
||||
docker-options: "-e PIP_EXTRA_INDEX_URL=https://pypi.fury.io/lancedb/"
|
||||
working-directory: python
|
||||
interpreter: 3.${{ inputs.python-minor-version }}
|
||||
|
||||
@@ -28,7 +28,7 @@ runs:
|
||||
args: ${{ inputs.args }}
|
||||
docker-options: "-e PIP_EXTRA_INDEX_URL=https://pypi.fury.io/lancedb/"
|
||||
working-directory: python
|
||||
- uses: actions/upload-artifact@v3
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: windows-wheels
|
||||
path: python\target\wheels
|
||||
|
||||
9
.github/workflows/make-release-commit.yml
vendored
9
.github/workflows/make-release-commit.yml
vendored
@@ -43,7 +43,7 @@ on:
|
||||
jobs:
|
||||
make-release:
|
||||
# Creates tag and GH release. The GH release will trigger the build and release jobs.
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-24.04
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
@@ -57,15 +57,14 @@ jobs:
|
||||
# trigger any workflows watching for new tags. See:
|
||||
# https://docs.github.com/en/actions/using-workflows/triggering-a-workflow#triggering-a-workflow-from-a-workflow
|
||||
token: ${{ secrets.LANCEDB_RELEASE_TOKEN }}
|
||||
- name: Validate Lance dependency is at stable version
|
||||
if: ${{ inputs.type == 'stable' }}
|
||||
run: python ci/validate_stable_lance.py
|
||||
- name: Set git configs for bumpversion
|
||||
shell: bash
|
||||
run: |
|
||||
git config user.name 'Lance Release'
|
||||
git config user.email 'lance-dev@lancedb.com'
|
||||
- name: Set up Python 3.11
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11"
|
||||
- name: Bump Python version
|
||||
if: ${{ inputs.python }}
|
||||
working-directory: python
|
||||
|
||||
188
.github/workflows/npm-publish.yml
vendored
188
.github/workflows/npm-publish.yml
vendored
@@ -334,51 +334,50 @@ jobs:
|
||||
path: |
|
||||
node/dist/lancedb-vectordb-win32*.tgz
|
||||
|
||||
# TODO: https://github.com/lancedb/lancedb/issues/1975
|
||||
# node-windows-arm64:
|
||||
# name: vectordb ${{ matrix.config.arch }}-pc-windows-msvc
|
||||
# # if: startsWith(github.ref, 'refs/tags/v')
|
||||
# runs-on: ubuntu-latest
|
||||
# container: alpine:edge
|
||||
# strategy:
|
||||
# fail-fast: false
|
||||
# matrix:
|
||||
# config:
|
||||
# # - arch: x86_64
|
||||
# - arch: aarch64
|
||||
# steps:
|
||||
# - name: Checkout
|
||||
# uses: actions/checkout@v4
|
||||
# - name: Install dependencies
|
||||
# run: |
|
||||
# apk add protobuf-dev curl clang lld llvm19 grep npm bash msitools sed
|
||||
# curl --proto '=https' --tlsv1.3 -sSf https://raw.githubusercontent.com/rust-lang/rustup/refs/heads/master/rustup-init.sh | sh -s -- -y
|
||||
# echo "source $HOME/.cargo/env" >> saved_env
|
||||
# echo "export CC=clang" >> saved_env
|
||||
# echo "export AR=llvm-ar" >> saved_env
|
||||
# source "$HOME/.cargo/env"
|
||||
# rustup target add ${{ matrix.config.arch }}-pc-windows-msvc
|
||||
# (mkdir -p sysroot && cd sysroot && sh ../ci/sysroot-${{ matrix.config.arch }}-pc-windows-msvc.sh)
|
||||
# echo "export C_INCLUDE_PATH=/usr/${{ matrix.config.arch }}-pc-windows-msvc/usr/include" >> saved_env
|
||||
# echo "export CARGO_BUILD_TARGET=${{ matrix.config.arch }}-pc-windows-msvc" >> saved_env
|
||||
# - name: Configure x86_64 build
|
||||
# if: ${{ matrix.config.arch == 'x86_64' }}
|
||||
# run: |
|
||||
# echo "export RUSTFLAGS='-Ctarget-cpu=haswell -Ctarget-feature=+crt-static,+avx2,+fma,+f16c -Clinker=lld -Clink-arg=/LIBPATH:/usr/x86_64-pc-windows-msvc/usr/lib'" >> saved_env
|
||||
# - name: Configure aarch64 build
|
||||
# if: ${{ matrix.config.arch == 'aarch64' }}
|
||||
# run: |
|
||||
# echo "export RUSTFLAGS='-Ctarget-feature=+crt-static,+neon,+fp16,+fhm,+dotprod -Clinker=lld -Clink-arg=/LIBPATH:/usr/aarch64-pc-windows-msvc/usr/lib -Clink-arg=arm64rt.lib'" >> saved_env
|
||||
# - name: Build Windows Artifacts
|
||||
# run: |
|
||||
# source ./saved_env
|
||||
# bash ci/manylinux_node/build_vectordb.sh ${{ matrix.config.arch }} ${{ matrix.config.arch }}-pc-windows-msvc
|
||||
# - name: Upload Windows Artifacts
|
||||
# uses: actions/upload-artifact@v4
|
||||
# with:
|
||||
# name: node-native-windows-${{ matrix.config.arch }}
|
||||
# path: |
|
||||
# node/dist/lancedb-vectordb-win32*.tgz
|
||||
node-windows-arm64:
|
||||
name: vectordb ${{ matrix.config.arch }}-pc-windows-msvc
|
||||
# if: startsWith(github.ref, 'refs/tags/v')
|
||||
runs-on: ubuntu-latest
|
||||
container: alpine:edge
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
config:
|
||||
# - arch: x86_64
|
||||
- arch: aarch64
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
apk add protobuf-dev curl clang lld llvm19 grep npm bash msitools sed
|
||||
curl --proto '=https' --tlsv1.3 -sSf https://raw.githubusercontent.com/rust-lang/rustup/refs/heads/master/rustup-init.sh | sh -s -- -y
|
||||
echo "source $HOME/.cargo/env" >> saved_env
|
||||
echo "export CC=clang" >> saved_env
|
||||
echo "export AR=llvm-ar" >> saved_env
|
||||
source "$HOME/.cargo/env"
|
||||
rustup target add ${{ matrix.config.arch }}-pc-windows-msvc
|
||||
(mkdir -p sysroot && cd sysroot && sh ../ci/sysroot-${{ matrix.config.arch }}-pc-windows-msvc.sh)
|
||||
echo "export C_INCLUDE_PATH=/usr/${{ matrix.config.arch }}-pc-windows-msvc/usr/include" >> saved_env
|
||||
echo "export CARGO_BUILD_TARGET=${{ matrix.config.arch }}-pc-windows-msvc" >> saved_env
|
||||
- name: Configure x86_64 build
|
||||
if: ${{ matrix.config.arch == 'x86_64' }}
|
||||
run: |
|
||||
echo "export RUSTFLAGS='-Ctarget-cpu=haswell -Ctarget-feature=+crt-static,+avx2,+fma,+f16c -Clinker=lld -Clink-arg=/LIBPATH:/usr/x86_64-pc-windows-msvc/usr/lib'" >> saved_env
|
||||
- name: Configure aarch64 build
|
||||
if: ${{ matrix.config.arch == 'aarch64' }}
|
||||
run: |
|
||||
echo "export RUSTFLAGS='-Ctarget-feature=+crt-static,+neon,+fp16,+fhm,+dotprod -Clinker=lld -Clink-arg=/LIBPATH:/usr/aarch64-pc-windows-msvc/usr/lib -Clink-arg=arm64rt.lib'" >> saved_env
|
||||
- name: Build Windows Artifacts
|
||||
run: |
|
||||
source ./saved_env
|
||||
bash ci/manylinux_node/build_vectordb.sh ${{ matrix.config.arch }} ${{ matrix.config.arch }}-pc-windows-msvc
|
||||
- name: Upload Windows Artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: node-native-windows-${{ matrix.config.arch }}
|
||||
path: |
|
||||
node/dist/lancedb-vectordb-win32*.tgz
|
||||
|
||||
nodejs-windows:
|
||||
name: lancedb ${{ matrix.target }}
|
||||
@@ -414,58 +413,57 @@ jobs:
|
||||
path: |
|
||||
nodejs/dist/*.node
|
||||
|
||||
# TODO: https://github.com/lancedb/lancedb/issues/1975
|
||||
# nodejs-windows-arm64:
|
||||
# name: lancedb ${{ matrix.config.arch }}-pc-windows-msvc
|
||||
# # Only runs on tags that matches the make-release action
|
||||
# # if: startsWith(github.ref, 'refs/tags/v')
|
||||
# runs-on: ubuntu-latest
|
||||
# container: alpine:edge
|
||||
# strategy:
|
||||
# fail-fast: false
|
||||
# matrix:
|
||||
# config:
|
||||
# # - arch: x86_64
|
||||
# - arch: aarch64
|
||||
# steps:
|
||||
# - name: Checkout
|
||||
# uses: actions/checkout@v4
|
||||
# - name: Install dependencies
|
||||
# run: |
|
||||
# apk add protobuf-dev curl clang lld llvm19 grep npm bash msitools sed
|
||||
# curl --proto '=https' --tlsv1.3 -sSf https://raw.githubusercontent.com/rust-lang/rustup/refs/heads/master/rustup-init.sh | sh -s -- -y
|
||||
# echo "source $HOME/.cargo/env" >> saved_env
|
||||
# echo "export CC=clang" >> saved_env
|
||||
# echo "export AR=llvm-ar" >> saved_env
|
||||
# source "$HOME/.cargo/env"
|
||||
# rustup target add ${{ matrix.config.arch }}-pc-windows-msvc
|
||||
# (mkdir -p sysroot && cd sysroot && sh ../ci/sysroot-${{ matrix.config.arch }}-pc-windows-msvc.sh)
|
||||
# echo "export C_INCLUDE_PATH=/usr/${{ matrix.config.arch }}-pc-windows-msvc/usr/include" >> saved_env
|
||||
# echo "export CARGO_BUILD_TARGET=${{ matrix.config.arch }}-pc-windows-msvc" >> saved_env
|
||||
# printf '#!/bin/sh\ncargo "$@"' > $HOME/.cargo/bin/cargo-xwin
|
||||
# chmod u+x $HOME/.cargo/bin/cargo-xwin
|
||||
# - name: Configure x86_64 build
|
||||
# if: ${{ matrix.config.arch == 'x86_64' }}
|
||||
# run: |
|
||||
# echo "export RUSTFLAGS='-Ctarget-cpu=haswell -Ctarget-feature=+crt-static,+avx2,+fma,+f16c -Clinker=lld -Clink-arg=/LIBPATH:/usr/x86_64-pc-windows-msvc/usr/lib'" >> saved_env
|
||||
# - name: Configure aarch64 build
|
||||
# if: ${{ matrix.config.arch == 'aarch64' }}
|
||||
# run: |
|
||||
# echo "export RUSTFLAGS='-Ctarget-feature=+crt-static,+neon,+fp16,+fhm,+dotprod -Clinker=lld -Clink-arg=/LIBPATH:/usr/aarch64-pc-windows-msvc/usr/lib -Clink-arg=arm64rt.lib'" >> saved_env
|
||||
# - name: Build Windows Artifacts
|
||||
# run: |
|
||||
# source ./saved_env
|
||||
# bash ci/manylinux_node/build_lancedb.sh ${{ matrix.config.arch }}
|
||||
# - name: Upload Windows Artifacts
|
||||
# uses: actions/upload-artifact@v4
|
||||
# with:
|
||||
# name: nodejs-native-windows-${{ matrix.config.arch }}
|
||||
# path: |
|
||||
# nodejs/dist/*.node
|
||||
nodejs-windows-arm64:
|
||||
name: lancedb ${{ matrix.config.arch }}-pc-windows-msvc
|
||||
# Only runs on tags that matches the make-release action
|
||||
# if: startsWith(github.ref, 'refs/tags/v')
|
||||
runs-on: ubuntu-latest
|
||||
container: alpine:edge
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
config:
|
||||
# - arch: x86_64
|
||||
- arch: aarch64
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
apk add protobuf-dev curl clang lld llvm19 grep npm bash msitools sed
|
||||
curl --proto '=https' --tlsv1.3 -sSf https://raw.githubusercontent.com/rust-lang/rustup/refs/heads/master/rustup-init.sh | sh -s -- -y
|
||||
echo "source $HOME/.cargo/env" >> saved_env
|
||||
echo "export CC=clang" >> saved_env
|
||||
echo "export AR=llvm-ar" >> saved_env
|
||||
source "$HOME/.cargo/env"
|
||||
rustup target add ${{ matrix.config.arch }}-pc-windows-msvc
|
||||
(mkdir -p sysroot && cd sysroot && sh ../ci/sysroot-${{ matrix.config.arch }}-pc-windows-msvc.sh)
|
||||
echo "export C_INCLUDE_PATH=/usr/${{ matrix.config.arch }}-pc-windows-msvc/usr/include" >> saved_env
|
||||
echo "export CARGO_BUILD_TARGET=${{ matrix.config.arch }}-pc-windows-msvc" >> saved_env
|
||||
printf '#!/bin/sh\ncargo "$@"' > $HOME/.cargo/bin/cargo-xwin
|
||||
chmod u+x $HOME/.cargo/bin/cargo-xwin
|
||||
- name: Configure x86_64 build
|
||||
if: ${{ matrix.config.arch == 'x86_64' }}
|
||||
run: |
|
||||
echo "export RUSTFLAGS='-Ctarget-cpu=haswell -Ctarget-feature=+crt-static,+avx2,+fma,+f16c -Clinker=lld -Clink-arg=/LIBPATH:/usr/x86_64-pc-windows-msvc/usr/lib'" >> saved_env
|
||||
- name: Configure aarch64 build
|
||||
if: ${{ matrix.config.arch == 'aarch64' }}
|
||||
run: |
|
||||
echo "export RUSTFLAGS='-Ctarget-feature=+crt-static,+neon,+fp16,+fhm,+dotprod -Clinker=lld -Clink-arg=/LIBPATH:/usr/aarch64-pc-windows-msvc/usr/lib -Clink-arg=arm64rt.lib'" >> saved_env
|
||||
- name: Build Windows Artifacts
|
||||
run: |
|
||||
source ./saved_env
|
||||
bash ci/manylinux_node/build_lancedb.sh ${{ matrix.config.arch }}
|
||||
- name: Upload Windows Artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: nodejs-native-windows-${{ matrix.config.arch }}
|
||||
path: |
|
||||
nodejs/dist/*.node
|
||||
|
||||
release:
|
||||
name: vectordb NPM Publish
|
||||
needs: [node, node-macos, node-linux-gnu, node-linux-musl, node-windows]
|
||||
needs: [node, node-macos, node-linux-gnu, node-linux-musl, node-windows, node-windows-arm64]
|
||||
runs-on: ubuntu-latest
|
||||
# Only runs on tags that matches the make-release action
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
@@ -505,7 +503,7 @@ jobs:
|
||||
|
||||
release-nodejs:
|
||||
name: lancedb NPM Publish
|
||||
needs: [nodejs-macos, nodejs-linux-gnu, nodejs-linux-musl, nodejs-windows]
|
||||
needs: [nodejs-macos, nodejs-linux-gnu, nodejs-linux-musl, nodejs-windows, nodejs-windows-arm64]
|
||||
runs-on: ubuntu-latest
|
||||
# Only runs on tags that matches the make-release action
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
|
||||
78
CONTRIBUTING.md
Normal file
78
CONTRIBUTING.md
Normal file
@@ -0,0 +1,78 @@
|
||||
# Contributing to LanceDB
|
||||
|
||||
LanceDB is an open-source project and we welcome contributions from the community.
|
||||
This document outlines the process for contributing to LanceDB.
|
||||
|
||||
## Reporting Issues
|
||||
|
||||
If you encounter a bug or have a feature request, please open an issue on the
|
||||
[GitHub issue tracker](https://github.com/lancedb/lancedb).
|
||||
|
||||
## Picking an issue
|
||||
|
||||
We track issues on the GitHub issue tracker. If you are looking for something to
|
||||
work on, check the [good first issue](https://github.com/lancedb/lancedb/contribute) label. These issues are typically the best described and have the smallest scope.
|
||||
|
||||
If there's an issue you are interested in working on, please leave a comment on the issue. This will help us avoid duplicate work. Additionally, if you have questions about the issue, please ask them in the issue comments. We are happy to provide guidance on how to approach the issue.
|
||||
|
||||
## Configuring Git
|
||||
|
||||
First, fork the repository on GitHub, then clone your fork:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/<username>/lancedb.git
|
||||
cd lancedb
|
||||
```
|
||||
|
||||
Then add the main repository as a remote:
|
||||
|
||||
```bash
|
||||
git remote add upstream https://github.com/lancedb/lancedb.git
|
||||
git fetch upstream
|
||||
```
|
||||
|
||||
## Setting up your development environment
|
||||
|
||||
We have development environments for Python, Typescript, and Java. Each environment has its own setup instructions.
|
||||
|
||||
* [Python](python/CONTRIBUTING.md)
|
||||
* [Typescript](nodejs/CONTRIBUTING.md)
|
||||
<!-- TODO: add Java contributing guide -->
|
||||
* [Documentation](docs/README.md)
|
||||
|
||||
|
||||
## Best practices for pull requests
|
||||
|
||||
For the best chance of having your pull request accepted, please follow these guidelines:
|
||||
|
||||
1. Unit test all bug fixes and new features. Your code will not be merged if it
|
||||
doesn't have tests.
|
||||
1. If you change the public API, update the documentation in the `docs` directory.
|
||||
1. Aim to minimize the number of changes in each pull request. Keep to solving
|
||||
one problem at a time, when possible.
|
||||
1. Before marking a pull request ready-for-review, do a self review of your code.
|
||||
Is it clear why you are making the changes? Are the changes easy to understand?
|
||||
1. Use [conventional commit messages](https://www.conventionalcommits.org/en/) as pull request titles. Examples:
|
||||
* New feature: `feat: adding foo API`
|
||||
* Bug fix: `fix: issue with foo API`
|
||||
* Documentation change: `docs: adding foo API documentation`
|
||||
1. If your pull request is a work in progress, leave the pull request as a draft.
|
||||
We will assume the pull request is ready for review when it is opened.
|
||||
1. When writing tests, test the error cases. Make sure they have understandable
|
||||
error messages.
|
||||
|
||||
## Project structure
|
||||
|
||||
The core library is written in Rust. The Python, Typescript, and Java libraries
|
||||
are wrappers around the Rust library.
|
||||
|
||||
* `src/lancedb`: Rust library source code
|
||||
* `python`: Python package source code
|
||||
* `nodejs`: Typescript package source code
|
||||
* `node`: **Deprecated** Typescript package source code
|
||||
* `java`: Java package source code
|
||||
* `docs`: Documentation source code
|
||||
|
||||
## Release process
|
||||
|
||||
For information on the release process, see: [release_process.md](release_process.md)
|
||||
@@ -53,7 +53,7 @@ curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-42
|
||||
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/149578fb3b621cdb61ee1813b9b3e791/463ad1b0783ebda908fd6c16a4abfe93.cab
|
||||
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/5c986c4f393c6b09d5aec3b539e9fb4a/5a22e5cde814b041749fb271547f4dd5.cab
|
||||
|
||||
# fwpuclnt.lib arm64rt.lib
|
||||
# dbghelp.lib fwpuclnt.lib arm64rt.lib
|
||||
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/7a332420d812f7c1d41da865ae5a7c52/windows%20sdk%20desktop%20libs%20arm64-x86_en-us.msi
|
||||
curl -O https://download.visualstudio.microsoft.com/download/pr/32863b8d-a46d-4231-8e84-0888519d20a9/19de98ed4a79938d0045d19c047936b3/3e2f7be479e3679d700ce0782e4cc318.cab
|
||||
|
||||
@@ -98,7 +98,7 @@ find /usr/aarch64-pc-windows-msvc/usr/include -type f -exec sed -i -E 's/(#inclu
|
||||
# reason: https://developercommunity.visualstudio.com/t/libucrtlibstreamobj-error-lnk2001-unresolved-exter/1544787#T-ND1599818
|
||||
# I don't understand the 'correct' fix for this, arm64rt.lib is supposed to be the workaround
|
||||
|
||||
(cd 'program files/windows kits/10/lib/10.0.26100.0/um/arm64' && cp advapi32.lib bcrypt.lib kernel32.lib ntdll.lib user32.lib uuid.lib ws2_32.lib userenv.lib cfgmgr32.lib runtimeobject.lib fwpuclnt.lib arm64rt.lib -t /usr/aarch64-pc-windows-msvc/usr/lib)
|
||||
(cd 'program files/windows kits/10/lib/10.0.26100.0/um/arm64' && cp advapi32.lib bcrypt.lib kernel32.lib ntdll.lib user32.lib uuid.lib ws2_32.lib userenv.lib cfgmgr32.lib runtimeobject.lib dbghelp.lib fwpuclnt.lib arm64rt.lib -t /usr/aarch64-pc-windows-msvc/usr/lib)
|
||||
|
||||
(cd 'contents/vc/tools/msvc/14.16.27023/lib/arm64' && cp libcmt.lib libvcruntime.lib -t /usr/aarch64-pc-windows-msvc/usr/lib)
|
||||
|
||||
|
||||
34
ci/validate_stable_lance.py
Normal file
34
ci/validate_stable_lance.py
Normal file
@@ -0,0 +1,34 @@
|
||||
import tomllib
|
||||
|
||||
found_preview_lance = False
|
||||
|
||||
with open("Cargo.toml", "rb") as f:
|
||||
cargo_data = tomllib.load(f)
|
||||
|
||||
for name, dep in cargo_data["workspace"]["dependencies"].items():
|
||||
if name == "lance" or name.startswith("lance-"):
|
||||
if isinstance(dep, str):
|
||||
version = dep
|
||||
elif isinstance(dep, dict):
|
||||
# Version doesn't have the beta tag in it, so we instead look
|
||||
# at the git tag.
|
||||
version = dep["tag"]
|
||||
else:
|
||||
raise ValueError("Unexpected type for dependency: " + str(dep))
|
||||
|
||||
if "beta" in version:
|
||||
found_preview_lance = True
|
||||
print(f"Dependency '{name}' is a preview version: {version}")
|
||||
|
||||
with open("python/pyproject.toml", "rb") as f:
|
||||
py_proj_data = tomllib.load(f)
|
||||
|
||||
for dep in py_proj_data["project"]["dependencies"]:
|
||||
if dep.startswith("pylance"):
|
||||
if "b" in dep:
|
||||
found_preview_lance = True
|
||||
print(f"Dependency '{dep}' is a preview version")
|
||||
break # Only one pylance dependency
|
||||
|
||||
if found_preview_lance:
|
||||
raise ValueError("Found preview version of Lance in dependencies")
|
||||
@@ -9,36 +9,81 @@ unreleased features.
|
||||
## Building the docs
|
||||
|
||||
### Setup
|
||||
1. Install LanceDB. From LanceDB repo root: `pip install -e python`
|
||||
2. Install dependencies. From LanceDB repo root: `pip install -r docs/requirements.txt`
|
||||
3. Make sure you have node and npm setup
|
||||
4. Make sure protobuf and libssl are installed
|
||||
1. Install LanceDB Python. See setup in [Python contributing guide](../python/CONTRIBUTING.md).
|
||||
Run `make develop` to install the Python package.
|
||||
2. Install documentation dependencies. From LanceDB repo root: `pip install -r docs/requirements.txt`
|
||||
|
||||
### Building node module and create markdown files
|
||||
### Preview the docs
|
||||
|
||||
See [Javascript docs README](./src/javascript/README.md)
|
||||
|
||||
### Build docs
|
||||
From LanceDB repo root:
|
||||
|
||||
Run: `PYTHONPATH=. mkdocs build -f docs/mkdocs.yml`
|
||||
|
||||
If successful, you should see a `docs/site` directory that you can verify locally.
|
||||
|
||||
### Run local server
|
||||
|
||||
You can run a local server to test the docs prior to deployment by navigating to the `docs` directory and running the following command:
|
||||
|
||||
```bash
|
||||
```shell
|
||||
cd docs
|
||||
mkdocs serve
|
||||
```
|
||||
|
||||
### Run doctest for typescript example
|
||||
If you want to just generate the HTML files:
|
||||
|
||||
```bash
|
||||
cd lancedb/docs
|
||||
npm i
|
||||
npm run build
|
||||
npm run all
|
||||
```shell
|
||||
PYTHONPATH=. mkdocs build -f docs/mkdocs.yml
|
||||
```
|
||||
|
||||
If successful, you should see a `docs/site` directory that you can verify locally.
|
||||
|
||||
## Adding examples
|
||||
|
||||
To make sure examples are correct, we put examples in test files so they can be
|
||||
run as part of our test suites.
|
||||
|
||||
You can see the tests are at:
|
||||
|
||||
* Python: `python/python/tests/docs`
|
||||
* Typescript: `nodejs/examples/`
|
||||
|
||||
### Checking python examples
|
||||
|
||||
```shell
|
||||
cd python
|
||||
pytest -vv python/tests/docs
|
||||
```
|
||||
|
||||
### Checking typescript examples
|
||||
|
||||
The `@lancedb/lancedb` package must be built before running the tests:
|
||||
|
||||
```shell
|
||||
pushd nodejs
|
||||
npm ci
|
||||
npm run build
|
||||
popd
|
||||
```
|
||||
|
||||
Then you can run the examples by going to the `nodejs/examples` directory and
|
||||
running the tests like a normal npm package:
|
||||
|
||||
```shell
|
||||
pushd nodejs/examples
|
||||
npm ci
|
||||
npm test
|
||||
popd
|
||||
```
|
||||
|
||||
## API documentation
|
||||
|
||||
### Python
|
||||
|
||||
The Python API documentation is organized based on the file `docs/src/python/python.md`.
|
||||
We manually add entries there so we can control the organization of the reference page.
|
||||
**However, this means any new types must be manually added to the file.** No additional
|
||||
steps are needed to generate the API documentation.
|
||||
|
||||
### Typescript
|
||||
|
||||
The typescript API documentation is generated from the typescript source code using [typedoc](https://typedoc.org/).
|
||||
|
||||
When new APIs are added, you must manually re-run the typedoc command to update the API documentation.
|
||||
The new files should be checked into the repository.
|
||||
|
||||
```shell
|
||||
pushd nodejs
|
||||
npm run docs
|
||||
popd
|
||||
```
|
||||
|
||||
@@ -146,7 +146,9 @@ nav:
|
||||
- Building Custom Rerankers: reranking/custom_reranker.md
|
||||
- Example: notebooks/lancedb_reranking.ipynb
|
||||
- Filtering: sql.md
|
||||
- Versioning & Reproducibility: notebooks/reproducibility.ipynb
|
||||
- Versioning & Reproducibility:
|
||||
- sync API: notebooks/reproducibility.ipynb
|
||||
- async API: notebooks/reproducibility_async.ipynb
|
||||
- Configuring Storage: guides/storage.md
|
||||
- Migration Guide: migration.md
|
||||
- Tuning retrieval performance:
|
||||
@@ -278,7 +280,9 @@ nav:
|
||||
- Building Custom Rerankers: reranking/custom_reranker.md
|
||||
- Example: notebooks/lancedb_reranking.ipynb
|
||||
- Filtering: sql.md
|
||||
- Versioning & Reproducibility: notebooks/reproducibility.ipynb
|
||||
- Versioning & Reproducibility:
|
||||
- sync API: notebooks/reproducibility.ipynb
|
||||
- async API: notebooks/reproducibility_async.ipynb
|
||||
- Configuring Storage: guides/storage.md
|
||||
- Migration Guide: migration.md
|
||||
- Tuning retrieval performance:
|
||||
|
||||
@@ -18,25 +18,24 @@ See the [indexing](concepts/index_ivfpq.md) concepts guide for more information
|
||||
Lance supports `IVF_PQ` index type by default.
|
||||
|
||||
=== "Python"
|
||||
=== "Sync API"
|
||||
|
||||
Creating indexes is done via the [create_index](https://lancedb.github.io/lancedb/python/#lancedb.table.LanceTable.create_index) method.
|
||||
Creating indexes is done via the [create_index](https://lancedb.github.io/lancedb/python/#lancedb.table.LanceTable.create_index) method.
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
import numpy as np
|
||||
uri = "data/sample-lancedb"
|
||||
db = lancedb.connect(uri)
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_index.py:import-lancedb"
|
||||
--8<-- "python/python/tests/docs/test_guide_index.py:import-numpy"
|
||||
--8<-- "python/python/tests/docs/test_guide_index.py:create_ann_index"
|
||||
```
|
||||
=== "Async API"
|
||||
Creating indexes is done via the [create_index](https://lancedb.github.io/lancedb/python/#lancedb.table.LanceTable.create_index) method.
|
||||
|
||||
# Create 10,000 sample vectors
|
||||
data = [{"vector": row, "item": f"item {i}"}
|
||||
for i, row in enumerate(np.random.random((10_000, 1536)).astype('float32'))]
|
||||
|
||||
# Add the vectors to a table
|
||||
tbl = db.create_table("my_vectors", data=data)
|
||||
|
||||
# Create and train the index - you need to have enough data in the table for an effective training step
|
||||
tbl.create_index(num_partitions=256, num_sub_vectors=96)
|
||||
```
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_index.py:import-lancedb"
|
||||
--8<-- "python/python/tests/docs/test_guide_index.py:import-numpy"
|
||||
--8<-- "python/python/tests/docs/test_guide_index.py:import-lancedb-ivfpq"
|
||||
--8<-- "python/python/tests/docs/test_guide_index.py:create_ann_index_async"
|
||||
```
|
||||
|
||||
=== "TypeScript"
|
||||
|
||||
@@ -127,7 +126,9 @@ You can specify the GPU device to train IVF partitions via
|
||||
accelerator="mps"
|
||||
)
|
||||
```
|
||||
|
||||
!!! note
|
||||
GPU based indexing is not yet supported with our asynchronous client.
|
||||
|
||||
Troubleshooting:
|
||||
|
||||
If you see `AssertionError: Torch not compiled with CUDA enabled`, you need to [install
|
||||
@@ -152,14 +153,16 @@ There are a couple of parameters that can be used to fine-tune the search:
|
||||
|
||||
|
||||
=== "Python"
|
||||
=== "Sync API"
|
||||
|
||||
```python
|
||||
tbl.search(np.random.random((1536))) \
|
||||
.limit(2) \
|
||||
.nprobes(20) \
|
||||
.refine_factor(10) \
|
||||
.to_pandas()
|
||||
```
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_index.py:vector_search"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_index.py:vector_search_async"
|
||||
```
|
||||
|
||||
```text
|
||||
vector item _distance
|
||||
@@ -196,10 +199,16 @@ The search will return the data requested in addition to the distance of each it
|
||||
You can further filter the elements returned by a search using a where clause.
|
||||
|
||||
=== "Python"
|
||||
=== "Sync API"
|
||||
|
||||
```python
|
||||
tbl.search(np.random.random((1536))).where("item != 'item 1141'").to_pandas()
|
||||
```
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_index.py:vector_search_with_filter"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_index.py:vector_search_async_with_filter"
|
||||
```
|
||||
|
||||
=== "TypeScript"
|
||||
|
||||
@@ -221,10 +230,16 @@ You can select the columns returned by the query using a select clause.
|
||||
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
tbl.search(np.random.random((1536))).select(["vector"]).to_pandas()
|
||||
```
|
||||
=== "Sync API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_index.py:vector_search_with_select"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_index.py:vector_search_async_with_select"
|
||||
```
|
||||
|
||||
```text
|
||||
vector _distance
|
||||
|
||||
115
docs/src/fts.md
115
docs/src/fts.md
@@ -10,28 +10,20 @@ LanceDB provides support for full-text search via Lance, allowing you to incorpo
|
||||
Consider that we have a LanceDB table named `my_table`, whose string column `text` we want to index and query via keyword search, the FTS index must be created before you can search via keywords.
|
||||
|
||||
=== "Python"
|
||||
=== "Sync API"
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_search.py:import-lancedb"
|
||||
--8<-- "python/python/tests/docs/test_search.py:import-lancedb-fts"
|
||||
--8<-- "python/python/tests/docs/test_search.py:basic_fts"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
uri = "data/sample-lancedb"
|
||||
db = lancedb.connect(uri)
|
||||
|
||||
table = db.create_table(
|
||||
"my_table",
|
||||
data=[
|
||||
{"vector": [3.1, 4.1], "text": "Frodo was a happy puppy"},
|
||||
{"vector": [5.9, 26.5], "text": "There are several kittens playing"},
|
||||
],
|
||||
)
|
||||
|
||||
# passing `use_tantivy=False` to use lance FTS index
|
||||
# `use_tantivy=True` by default
|
||||
table.create_fts_index("text", use_tantivy=False)
|
||||
table.search("puppy").limit(10).select(["text"]).to_list()
|
||||
# [{'text': 'Frodo was a happy puppy', '_score': 0.6931471824645996}]
|
||||
# ...
|
||||
```
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_search.py:import-lancedb"
|
||||
--8<-- "python/python/tests/docs/test_search.py:import-lancedb-fts"
|
||||
--8<-- "python/python/tests/docs/test_search.py:basic_fts_async"
|
||||
```
|
||||
|
||||
=== "TypeScript"
|
||||
|
||||
@@ -93,22 +85,32 @@ By default the text is tokenized by splitting on punctuation and whitespaces, an
|
||||
Stemming is useful for improving search results by reducing words to their root form, e.g. "running" to "run". LanceDB supports stemming for multiple languages, you can specify the tokenizer name to enable stemming by the pattern `tokenizer_name="{language_code}_stem"`, e.g. `en_stem` for English.
|
||||
|
||||
For example, to enable stemming for English:
|
||||
```python
|
||||
table.create_fts_index("text", use_tantivy=True, tokenizer_name="en_stem")
|
||||
```
|
||||
=== "Sync API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_search.py:fts_config_stem"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_search.py:fts_config_stem_async"
|
||||
```
|
||||
|
||||
the following [languages](https://docs.rs/tantivy/latest/tantivy/tokenizer/enum.Language.html) are currently supported.
|
||||
|
||||
The tokenizer is customizable, you can specify how the tokenizer splits the text, and how it filters out words, etc.
|
||||
|
||||
For example, for language with accents, you can specify the tokenizer to use `ascii_folding` to remove accents, e.g. 'é' to 'e':
|
||||
```python
|
||||
table.create_fts_index("text",
|
||||
use_tantivy=False,
|
||||
language="French",
|
||||
stem=True,
|
||||
ascii_folding=True)
|
||||
```
|
||||
=== "Sync API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_search.py:fts_config_folding"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_search.py:fts_config_folding_async"
|
||||
```
|
||||
|
||||
## Filtering
|
||||
|
||||
@@ -119,9 +121,16 @@ This can be invoked via the familiar `where` syntax.
|
||||
With pre-filtering:
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
table.search("puppy").limit(10).where("meta='foo'", prefilte=True).to_list()
|
||||
```
|
||||
=== "Sync API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_search.py:fts_prefiltering"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_search.py:fts_prefiltering_async"
|
||||
```
|
||||
|
||||
=== "TypeScript"
|
||||
|
||||
@@ -151,9 +160,16 @@ With pre-filtering:
|
||||
With post-filtering:
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
table.search("puppy").limit(10).where("meta='foo'", prefilte=False).to_list()
|
||||
```
|
||||
=== "Sync API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_search.py:fts_postfiltering"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_search.py:fts_postfiltering_async"
|
||||
```
|
||||
|
||||
=== "TypeScript"
|
||||
|
||||
@@ -191,9 +207,16 @@ or a **terms** search query like `old man sea`. For more details on the terms
|
||||
query syntax, see Tantivy's [query parser rules](https://docs.rs/tantivy/latest/tantivy/query/struct.QueryParser.html).
|
||||
|
||||
To search for a phrase, the index must be created with `with_position=True`:
|
||||
```python
|
||||
table.create_fts_index("text", use_tantivy=False, with_position=True)
|
||||
```
|
||||
=== "Sync API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_search.py:fts_with_position"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_search.py:fts_with_position_async"
|
||||
```
|
||||
This will allow you to search for phrases, but it will also significantly increase the index size and indexing time.
|
||||
|
||||
|
||||
@@ -205,10 +228,16 @@ This can make the query more efficient, especially when the table is large and t
|
||||
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
table.add([{"vector": [3.1, 4.1], "text": "Frodo was a happy puppy"}])
|
||||
table.optimize()
|
||||
```
|
||||
=== "Sync API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_search.py:fts_incremental_index"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_search.py:fts_incremental_index_async"
|
||||
```
|
||||
|
||||
=== "TypeScript"
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
LanceDB also provides support for full-text search via [Tantivy](https://github.com/quickwit-oss/tantivy), allowing you to incorporate keyword-based search (based on BM25) in your retrieval solutions.
|
||||
|
||||
The tantivy-based FTS is only available in Python and does not support building indexes on object storage or incremental indexing. If you need these features, try native FTS [native FTS](fts.md).
|
||||
The tantivy-based FTS is only available in Python synchronous APIs and does not support building indexes on object storage or incremental indexing. If you need these features, try native FTS [native FTS](fts.md).
|
||||
|
||||
## Installation
|
||||
|
||||
|
||||
@@ -32,19 +32,20 @@ over scalar columns.
|
||||
### Create a scalar index
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
books = [
|
||||
{"book_id": 1, "publisher": "plenty of books", "tags": ["fantasy", "adventure"]},
|
||||
{"book_id": 2, "publisher": "book town", "tags": ["non-fiction"]},
|
||||
{"book_id": 3, "publisher": "oreilly", "tags": ["textbook"]}
|
||||
]
|
||||
=== "Sync API"
|
||||
|
||||
db = lancedb.connect("./db")
|
||||
table = db.create_table("books", books)
|
||||
table.create_scalar_index("book_id") # BTree by default
|
||||
table.create_scalar_index("publisher", index_type="BITMAP")
|
||||
```
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_index.py:import-lancedb"
|
||||
--8<-- "python/python/tests/docs/test_guide_index.py:import-lancedb-btree-bitmap"
|
||||
--8<-- "python/python/tests/docs/test_guide_index.py:basic_scalar_index"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_index.py:import-lancedb"
|
||||
--8<-- "python/python/tests/docs/test_guide_index.py:import-lancedb-btree-bitmap"
|
||||
--8<-- "python/python/tests/docs/test_guide_index.py:basic_scalar_index_async"
|
||||
```
|
||||
|
||||
=== "Typescript"
|
||||
|
||||
@@ -62,12 +63,18 @@ The following scan will be faster if the column `book_id` has a scalar index:
|
||||
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
=== "Sync API"
|
||||
|
||||
table = db.open_table("books")
|
||||
my_df = table.search().where("book_id = 2").to_pandas()
|
||||
```
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_index.py:import-lancedb"
|
||||
--8<-- "python/python/tests/docs/test_guide_index.py:search_with_scalar_index"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_index.py:import-lancedb"
|
||||
--8<-- "python/python/tests/docs/test_guide_index.py:search_with_scalar_index_async"
|
||||
```
|
||||
|
||||
=== "Typescript"
|
||||
|
||||
@@ -88,22 +95,18 @@ Scalar indices can also speed up scans containing a vector search or full text s
|
||||
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
=== "Sync API"
|
||||
|
||||
data = [
|
||||
{"book_id": 1, "vector": [1, 2]},
|
||||
{"book_id": 2, "vector": [3, 4]},
|
||||
{"book_id": 3, "vector": [5, 6]}
|
||||
]
|
||||
table = db.create_table("book_with_embeddings", data)
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_index.py:import-lancedb"
|
||||
--8<-- "python/python/tests/docs/test_guide_index.py:vector_search_with_scalar_index"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
(
|
||||
table.search([1, 2])
|
||||
.where("book_id != 3", prefilter=True)
|
||||
.to_pandas()
|
||||
)
|
||||
```
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_index.py:import-lancedb"
|
||||
--8<-- "python/python/tests/docs/test_guide_index.py:vector_search_with_scalar_index_async"
|
||||
```
|
||||
|
||||
=== "Typescript"
|
||||
|
||||
@@ -122,10 +125,16 @@ Scalar indices can also speed up scans containing a vector search or full text s
|
||||
Updating the table data (adding, deleting, or modifying records) requires that you also update the scalar index. This can be done by calling `optimize`, which will trigger an update to the existing scalar index.
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
table.add([{"vector": [7, 8], "book_id": 4}])
|
||||
table.optimize()
|
||||
```
|
||||
=== "Sync API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_index.py:update_scalar_index"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_index.py:update_scalar_index_async"
|
||||
```
|
||||
|
||||
=== "TypeScript"
|
||||
|
||||
|
||||
@@ -12,26 +12,50 @@ LanceDB OSS supports object stores such as AWS S3 (and compatible stores), Azure
|
||||
=== "Python"
|
||||
|
||||
AWS S3:
|
||||
=== "Sync API"
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
db = lancedb.connect("s3://bucket/path")
|
||||
```
|
||||
```python
|
||||
import lancedb
|
||||
db = lancedb.connect("s3://bucket/path")
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
async_db = await lancedb.connect_async("s3://bucket/path")
|
||||
```
|
||||
|
||||
Google Cloud Storage:
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
db = lancedb.connect("gs://bucket/path")
|
||||
```
|
||||
=== "Sync API"
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
db = lancedb.connect("gs://bucket/path")
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
async_db = await lancedb.connect_async("gs://bucket/path")
|
||||
```
|
||||
|
||||
Azure Blob Storage:
|
||||
|
||||
<!-- skip-test -->
|
||||
```python
|
||||
import lancedb
|
||||
db = lancedb.connect("az://bucket/path")
|
||||
```
|
||||
=== "Sync API"
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
db = lancedb.connect("az://bucket/path")
|
||||
```
|
||||
<!-- skip-test -->
|
||||
=== "Async API"
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
async_db = await lancedb.connect_async("az://bucket/path")
|
||||
```
|
||||
Note that for Azure, storage credentials must be configured. See [below](#azure-blob-storage) for more details.
|
||||
|
||||
|
||||
@@ -94,13 +118,24 @@ If you only want this to apply to one particular connection, you can pass the `s
|
||||
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
db = await lancedb.connect_async(
|
||||
"s3://bucket/path",
|
||||
storage_options={"timeout": "60s"}
|
||||
)
|
||||
```
|
||||
=== "Sync API"
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
db = lancedb.connect(
|
||||
"s3://bucket/path",
|
||||
storage_options={"timeout": "60s"}
|
||||
)
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
async_db = await lancedb.connect_async(
|
||||
"s3://bucket/path",
|
||||
storage_options={"timeout": "60s"}
|
||||
)
|
||||
```
|
||||
|
||||
=== "TypeScript"
|
||||
|
||||
@@ -128,15 +163,29 @@ Getting even more specific, you can set the `timeout` for only a particular tabl
|
||||
=== "Python"
|
||||
|
||||
<!-- skip-test -->
|
||||
```python
|
||||
import lancedb
|
||||
db = await lancedb.connect_async("s3://bucket/path")
|
||||
table = await db.create_table(
|
||||
"table",
|
||||
[{"a": 1, "b": 2}],
|
||||
storage_options={"timeout": "60s"}
|
||||
)
|
||||
```
|
||||
=== "Sync API"
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
db = lancedb.connect("s3://bucket/path")
|
||||
table = db.create_table(
|
||||
"table",
|
||||
[{"a": 1, "b": 2}],
|
||||
storage_options={"timeout": "60s"}
|
||||
)
|
||||
```
|
||||
<!-- skip-test -->
|
||||
=== "Async API"
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
async_db = await lancedb.connect_async("s3://bucket/path")
|
||||
async_table = await async_db.create_table(
|
||||
"table",
|
||||
[{"a": 1, "b": 2}],
|
||||
storage_options={"timeout": "60s"}
|
||||
)
|
||||
```
|
||||
|
||||
=== "TypeScript"
|
||||
|
||||
@@ -194,17 +243,32 @@ These can be set as environment variables or passed in the `storage_options` par
|
||||
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
db = await lancedb.connect_async(
|
||||
"s3://bucket/path",
|
||||
storage_options={
|
||||
"aws_access_key_id": "my-access-key",
|
||||
"aws_secret_access_key": "my-secret-key",
|
||||
"aws_session_token": "my-session-token",
|
||||
}
|
||||
)
|
||||
```
|
||||
=== "Sync API"
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
db = lancedb.connect(
|
||||
"s3://bucket/path",
|
||||
storage_options={
|
||||
"aws_access_key_id": "my-access-key",
|
||||
"aws_secret_access_key": "my-secret-key",
|
||||
"aws_session_token": "my-session-token",
|
||||
}
|
||||
)
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
async_db = await lancedb.connect_async(
|
||||
"s3://bucket/path",
|
||||
storage_options={
|
||||
"aws_access_key_id": "my-access-key",
|
||||
"aws_secret_access_key": "my-secret-key",
|
||||
"aws_session_token": "my-session-token",
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
=== "TypeScript"
|
||||
|
||||
@@ -348,12 +412,22 @@ name of the table to use.
|
||||
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
db = await lancedb.connect_async(
|
||||
"s3+ddb://bucket/path?ddbTableName=my-dynamodb-table",
|
||||
)
|
||||
```
|
||||
=== "Sync API"
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
db = lancedb.connect(
|
||||
"s3+ddb://bucket/path?ddbTableName=my-dynamodb-table",
|
||||
)
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
async_db = await lancedb.connect_async(
|
||||
"s3+ddb://bucket/path?ddbTableName=my-dynamodb-table",
|
||||
)
|
||||
```
|
||||
|
||||
=== "JavaScript"
|
||||
|
||||
@@ -441,16 +515,30 @@ LanceDB can also connect to S3-compatible stores, such as MinIO. To do so, you m
|
||||
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
db = await lancedb.connect_async(
|
||||
"s3://bucket/path",
|
||||
storage_options={
|
||||
"region": "us-east-1",
|
||||
"endpoint": "http://minio:9000",
|
||||
}
|
||||
)
|
||||
```
|
||||
=== "Sync API"
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
db = lancedb.connect(
|
||||
"s3://bucket/path",
|
||||
storage_options={
|
||||
"region": "us-east-1",
|
||||
"endpoint": "http://minio:9000",
|
||||
}
|
||||
)
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
async_db = await lancedb.connect_async(
|
||||
"s3://bucket/path",
|
||||
storage_options={
|
||||
"region": "us-east-1",
|
||||
"endpoint": "http://minio:9000",
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
=== "TypeScript"
|
||||
|
||||
@@ -502,16 +590,30 @@ To configure LanceDB to use an S3 Express endpoint, you must set the storage opt
|
||||
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
db = await lancedb.connect_async(
|
||||
"s3://my-bucket--use1-az4--x-s3/path",
|
||||
storage_options={
|
||||
"region": "us-east-1",
|
||||
"s3_express": "true",
|
||||
}
|
||||
)
|
||||
```
|
||||
=== "Sync API"
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
db = lancedb.connect(
|
||||
"s3://my-bucket--use1-az4--x-s3/path",
|
||||
storage_options={
|
||||
"region": "us-east-1",
|
||||
"s3_express": "true",
|
||||
}
|
||||
)
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
async_db = await lancedb.connect_async(
|
||||
"s3://my-bucket--use1-az4--x-s3/path",
|
||||
storage_options={
|
||||
"region": "us-east-1",
|
||||
"s3_express": "true",
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
=== "TypeScript"
|
||||
|
||||
@@ -552,15 +654,29 @@ GCS credentials are configured by setting the `GOOGLE_SERVICE_ACCOUNT` environme
|
||||
=== "Python"
|
||||
|
||||
<!-- skip-test -->
|
||||
```python
|
||||
import lancedb
|
||||
db = await lancedb.connect_async(
|
||||
"gs://my-bucket/my-database",
|
||||
storage_options={
|
||||
"service_account": "path/to/service-account.json",
|
||||
}
|
||||
)
|
||||
```
|
||||
=== "Sync API"
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
db = lancedb.connect(
|
||||
"gs://my-bucket/my-database",
|
||||
storage_options={
|
||||
"service_account": "path/to/service-account.json",
|
||||
}
|
||||
)
|
||||
```
|
||||
<!-- skip-test -->
|
||||
=== "Async API"
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
async_db = await lancedb.connect_async(
|
||||
"gs://my-bucket/my-database",
|
||||
storage_options={
|
||||
"service_account": "path/to/service-account.json",
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
=== "TypeScript"
|
||||
|
||||
@@ -612,16 +728,31 @@ Azure Blob Storage credentials can be configured by setting the `AZURE_STORAGE_A
|
||||
=== "Python"
|
||||
|
||||
<!-- skip-test -->
|
||||
```python
|
||||
import lancedb
|
||||
db = await lancedb.connect_async(
|
||||
"az://my-container/my-database",
|
||||
storage_options={
|
||||
account_name: "some-account",
|
||||
account_key: "some-key",
|
||||
}
|
||||
)
|
||||
```
|
||||
=== "Sync API"
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
db = lancedb.connect(
|
||||
"az://my-container/my-database",
|
||||
storage_options={
|
||||
account_name: "some-account",
|
||||
account_key: "some-key",
|
||||
}
|
||||
)
|
||||
```
|
||||
<!-- skip-test -->
|
||||
=== "Async API"
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
async_db = await lancedb.connect_async(
|
||||
"az://my-container/my-database",
|
||||
storage_options={
|
||||
account_name: "some-account",
|
||||
account_key: "some-key",
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
=== "TypeScript"
|
||||
|
||||
|
||||
@@ -12,10 +12,18 @@ Initialize a LanceDB connection and create a table
|
||||
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
db = lancedb.connect("./.lancedb")
|
||||
```
|
||||
=== "Sync API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb"
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:connect"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb"
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:connect_async"
|
||||
```
|
||||
|
||||
LanceDB allows ingesting data from various sources - `dict`, `list[dict]`, `pd.DataFrame`, `pa.Table` or a `Iterator[pa.RecordBatch]`. Let's take a look at some of the these.
|
||||
|
||||
@@ -47,18 +55,16 @@ Initialize a LanceDB connection and create a table
|
||||
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
=== "Sync API"
|
||||
|
||||
db = lancedb.connect("./.lancedb")
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
data = [{"vector": [1.1, 1.2], "lat": 45.5, "long": -122.7},
|
||||
{"vector": [0.2, 1.8], "lat": 40.1, "long": -74.1}]
|
||||
|
||||
db.create_table("my_table", data)
|
||||
|
||||
db["my_table"].head()
|
||||
```
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_async"
|
||||
```
|
||||
|
||||
!!! info "Note"
|
||||
If the table already exists, LanceDB will raise an error by default.
|
||||
@@ -67,16 +73,30 @@ Initialize a LanceDB connection and create a table
|
||||
and the table exists, then it simply opens the existing table. The data you
|
||||
passed in will NOT be appended to the table in that case.
|
||||
|
||||
```python
|
||||
db.create_table("name", data, exist_ok=True)
|
||||
```
|
||||
=== "Sync API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_exist_ok"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_async_exist_ok"
|
||||
```
|
||||
|
||||
Sometimes you want to make sure that you start fresh. If you want to
|
||||
overwrite the table, you can pass in mode="overwrite" to the createTable function.
|
||||
|
||||
```python
|
||||
db.create_table("name", data, mode="overwrite")
|
||||
```
|
||||
=== "Sync API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_overwrite"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_async_overwrite"
|
||||
```
|
||||
|
||||
=== "Typescript[^1]"
|
||||
You can create a LanceDB table in JavaScript using an array of records as follows.
|
||||
@@ -146,34 +166,37 @@ Initialize a LanceDB connection and create a table
|
||||
|
||||
### From a Pandas DataFrame
|
||||
|
||||
```python
|
||||
import pandas as pd
|
||||
|
||||
data = pd.DataFrame({
|
||||
"vector": [[1.1, 1.2, 1.3, 1.4], [0.2, 1.8, 0.4, 3.6]],
|
||||
"lat": [45.5, 40.1],
|
||||
"long": [-122.7, -74.1]
|
||||
})
|
||||
=== "Sync API"
|
||||
|
||||
db.create_table("my_table", data)
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pandas"
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_from_pandas"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
db["my_table"].head()
|
||||
```
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pandas"
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_async_from_pandas"
|
||||
```
|
||||
|
||||
!!! info "Note"
|
||||
Data is converted to Arrow before being written to disk. For maximum control over how data is saved, either provide the PyArrow schema to convert to or else provide a PyArrow Table directly.
|
||||
|
||||
The **`vector`** column needs to be a [Vector](../python/pydantic.md#vector-field) (defined as [pyarrow.FixedSizeList](https://arrow.apache.org/docs/python/generated/pyarrow.list_.html)) type.
|
||||
|
||||
```python
|
||||
custom_schema = pa.schema([
|
||||
pa.field("vector", pa.list_(pa.float32(), 4)),
|
||||
pa.field("lat", pa.float32()),
|
||||
pa.field("long", pa.float32())
|
||||
])
|
||||
=== "Sync API"
|
||||
|
||||
table = db.create_table("my_table", data, schema=custom_schema)
|
||||
```
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pyarrow"
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_custom_schema"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pyarrow"
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_async_custom_schema"
|
||||
```
|
||||
|
||||
### From a Polars DataFrame
|
||||
|
||||
@@ -182,45 +205,38 @@ written in Rust. Just like in Pandas, the Polars integration is enabled by PyArr
|
||||
under the hood. A deeper integration between LanceDB Tables and Polars DataFrames
|
||||
is on the way.
|
||||
|
||||
```python
|
||||
import polars as pl
|
||||
=== "Sync API"
|
||||
|
||||
data = pl.DataFrame({
|
||||
"vector": [[3.1, 4.1], [5.9, 26.5]],
|
||||
"item": ["foo", "bar"],
|
||||
"price": [10.0, 20.0]
|
||||
})
|
||||
table = db.create_table("pl_table", data=data)
|
||||
```
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:import-polars"
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_from_polars"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:import-polars"
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_async_from_polars"
|
||||
```
|
||||
|
||||
### From an Arrow Table
|
||||
You can also create LanceDB tables directly from Arrow tables.
|
||||
LanceDB supports float16 data type!
|
||||
|
||||
=== "Python"
|
||||
=== "Sync API"
|
||||
|
||||
```python
|
||||
import pyarrows as pa
|
||||
import numpy as np
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pyarrow"
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:import-numpy"
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_from_arrow_table"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
dim = 16
|
||||
total = 2
|
||||
schema = pa.schema(
|
||||
[
|
||||
pa.field("vector", pa.list_(pa.float16(), dim)),
|
||||
pa.field("text", pa.string())
|
||||
]
|
||||
)
|
||||
data = pa.Table.from_arrays(
|
||||
[
|
||||
pa.array([np.random.randn(dim).astype(np.float16) for _ in range(total)],
|
||||
pa.list_(pa.float16(), dim)),
|
||||
pa.array(["foo", "bar"])
|
||||
],
|
||||
["vector", "text"],
|
||||
)
|
||||
tbl = db.create_table("f16_tbl", data, schema=schema)
|
||||
```
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:import-polars"
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:import-numpy"
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_async_from_arrow_table"
|
||||
```
|
||||
|
||||
=== "Typescript[^1]"
|
||||
|
||||
@@ -250,25 +266,22 @@ can be configured with the vector dimensions. It is also important to note that
|
||||
LanceDB only understands subclasses of `lancedb.pydantic.LanceModel`
|
||||
(which itself derives from `pydantic.BaseModel`).
|
||||
|
||||
```python
|
||||
from lancedb.pydantic import Vector, LanceModel
|
||||
=== "Sync API"
|
||||
|
||||
class Content(LanceModel):
|
||||
movie_id: int
|
||||
vector: Vector(128)
|
||||
genres: str
|
||||
title: str
|
||||
imdb_id: int
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb-pydantic"
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pyarrow"
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:class-Content"
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_from_pydantic"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
@property
|
||||
def imdb_url(self) -> str:
|
||||
return f"https://www.imdb.com/title/tt{self.imdb_id}"
|
||||
|
||||
import pyarrow as pa
|
||||
db = lancedb.connect("~/.lancedb")
|
||||
table_name = "movielens_small"
|
||||
table = db.create_table(table_name, schema=Content)
|
||||
```
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb-pydantic"
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pyarrow"
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:class-Content"
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_async_from_pydantic"
|
||||
```
|
||||
|
||||
#### Nested schemas
|
||||
|
||||
@@ -277,22 +290,24 @@ For example, you may want to store the document string
|
||||
and the document source name as a nested Document object:
|
||||
|
||||
```python
|
||||
class Document(BaseModel):
|
||||
content: str
|
||||
source: str
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pydantic-basemodel"
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:class-Document"
|
||||
```
|
||||
|
||||
This can be used as the type of a LanceDB table column:
|
||||
|
||||
```python
|
||||
class NestedSchema(LanceModel):
|
||||
id: str
|
||||
vector: Vector(1536)
|
||||
document: Document
|
||||
=== "Sync API"
|
||||
|
||||
tbl = db.create_table("nested_table", schema=NestedSchema, mode="overwrite")
|
||||
```
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:class-NestedSchema"
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_nested_schema"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:class-NestedSchema"
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_async_nested_schema"
|
||||
```
|
||||
This creates a struct column called "document" that has two subfields
|
||||
called "content" and "source":
|
||||
|
||||
@@ -356,29 +371,20 @@ LanceDB additionally supports PyArrow's `RecordBatch` Iterators or other generat
|
||||
|
||||
Here's an example using using `RecordBatch` iterator for creating tables.
|
||||
|
||||
```python
|
||||
import pyarrow as pa
|
||||
=== "Sync API"
|
||||
|
||||
def make_batches():
|
||||
for i in range(5):
|
||||
yield pa.RecordBatch.from_arrays(
|
||||
[
|
||||
pa.array([[3.1, 4.1, 5.1, 6.1], [5.9, 26.5, 4.7, 32.8]],
|
||||
pa.list_(pa.float32(), 4)),
|
||||
pa.array(["foo", "bar"]),
|
||||
pa.array([10.0, 20.0]),
|
||||
],
|
||||
["vector", "item", "price"],
|
||||
)
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pyarrow"
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:make_batches"
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_from_batch"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
schema = pa.schema([
|
||||
pa.field("vector", pa.list_(pa.float32(), 4)),
|
||||
pa.field("item", pa.utf8()),
|
||||
pa.field("price", pa.float32()),
|
||||
])
|
||||
|
||||
db.create_table("batched_tale", make_batches(), schema=schema)
|
||||
```
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pyarrow"
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:make_batches"
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_async_from_batch"
|
||||
```
|
||||
|
||||
You can also use iterators of other types like Pandas DataFrame or Pylists directly in the above example.
|
||||
|
||||
@@ -387,15 +393,29 @@ You can also use iterators of other types like Pandas DataFrame or Pylists direc
|
||||
=== "Python"
|
||||
If you forget the name of your table, you can always get a listing of all table names.
|
||||
|
||||
```python
|
||||
print(db.table_names())
|
||||
```
|
||||
=== "Sync API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:list_tables"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:list_tables_async"
|
||||
```
|
||||
|
||||
Then, you can open any existing tables.
|
||||
|
||||
```python
|
||||
tbl = db.open_table("my_table")
|
||||
```
|
||||
=== "Sync API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:open_table"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:open_table_async"
|
||||
```
|
||||
|
||||
=== "Typescript[^1]"
|
||||
|
||||
@@ -418,35 +438,41 @@ You can create an empty table for scenarios where you want to add data to the ta
|
||||
|
||||
|
||||
An empty table can be initialized via a PyArrow schema.
|
||||
=== "Sync API"
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
import pyarrow as pa
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb"
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pyarrow"
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:create_empty_table"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
schema = pa.schema(
|
||||
[
|
||||
pa.field("vector", pa.list_(pa.float32(), 2)),
|
||||
pa.field("item", pa.string()),
|
||||
pa.field("price", pa.float32()),
|
||||
])
|
||||
tbl = db.create_table("empty_table_add", schema=schema)
|
||||
```
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb"
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pyarrow"
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:create_empty_table_async"
|
||||
```
|
||||
|
||||
Alternatively, you can also use Pydantic to specify the schema for the empty table. Note that we do not
|
||||
directly import `pydantic` but instead use `lancedb.pydantic` which is a subclass of `pydantic.BaseModel`
|
||||
that has been extended to support LanceDB specific types like `Vector`.
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
from lancedb.pydantic import LanceModel, vector
|
||||
=== "Sync API"
|
||||
|
||||
class Item(LanceModel):
|
||||
vector: Vector(2)
|
||||
item: str
|
||||
price: float
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb"
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb-pydantic"
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:class-Item"
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:create_empty_table_pydantic"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
tbl = db.create_table("empty_table_add", schema=Item.to_arrow_schema())
|
||||
```
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb"
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb-pydantic"
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:class-Item"
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:create_empty_table_async_pydantic"
|
||||
```
|
||||
|
||||
Once the empty table has been created, you can add data to it via the various methods listed in the [Adding to a table](#adding-to-a-table) section.
|
||||
|
||||
@@ -473,86 +499,96 @@ After a table has been created, you can always add more data to it using the `ad
|
||||
|
||||
### Add a Pandas DataFrame
|
||||
|
||||
```python
|
||||
df = pd.DataFrame({
|
||||
"vector": [[1.3, 1.4], [9.5, 56.2]], "item": ["banana", "apple"], "price": [5.0, 7.0]
|
||||
})
|
||||
tbl.add(df)
|
||||
```
|
||||
=== "Sync API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:add_table_from_pandas"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:add_table_async_from_pandas"
|
||||
```
|
||||
|
||||
### Add a Polars DataFrame
|
||||
|
||||
```python
|
||||
df = pl.DataFrame({
|
||||
"vector": [[1.3, 1.4], [9.5, 56.2]], "item": ["banana", "apple"], "price": [5.0, 7.0]
|
||||
})
|
||||
tbl.add(df)
|
||||
```
|
||||
=== "Sync API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:add_table_from_polars"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:add_table_async_from_polars"
|
||||
```
|
||||
|
||||
### Add an Iterator
|
||||
|
||||
You can also add a large dataset batch in one go using Iterator of any supported data types.
|
||||
|
||||
```python
|
||||
def make_batches():
|
||||
for i in range(5):
|
||||
yield [
|
||||
{"vector": [3.1, 4.1], "item": "peach", "price": 6.0},
|
||||
{"vector": [5.9, 26.5], "item": "pear", "price": 5.0}
|
||||
]
|
||||
tbl.add(make_batches())
|
||||
```
|
||||
=== "Sync API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:make_batches_for_add"
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:add_table_from_batch"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:make_batches_for_add"
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:add_table_async_from_batch"
|
||||
```
|
||||
|
||||
### Add a PyArrow table
|
||||
|
||||
If you have data coming in as a PyArrow table, you can add it directly to the LanceDB table.
|
||||
|
||||
```python
|
||||
pa_table = pa.Table.from_arrays(
|
||||
[
|
||||
pa.array([[9.1, 6.7], [9.9, 31.2]],
|
||||
pa.list_(pa.float32(), 2)),
|
||||
pa.array(["mango", "orange"]),
|
||||
pa.array([7.0, 4.0]),
|
||||
],
|
||||
["vector", "item", "price"],
|
||||
)
|
||||
=== "Sync API"
|
||||
|
||||
tbl.add(pa_table)
|
||||
```
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:add_table_from_pyarrow"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:add_table_async_from_pyarrow"
|
||||
```
|
||||
|
||||
### Add a Pydantic Model
|
||||
|
||||
Assuming that a table has been created with the correct schema as shown [above](#creating-empty-table), you can add data items that are valid Pydantic models to the table.
|
||||
|
||||
```python
|
||||
pydantic_model_items = [
|
||||
Item(vector=[8.1, 4.7], item="pineapple", price=10.0),
|
||||
Item(vector=[6.9, 9.3], item="avocado", price=9.0)
|
||||
]
|
||||
=== "Sync API"
|
||||
|
||||
tbl.add(pydantic_model_items)
|
||||
```
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:add_table_from_pydantic"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:add_table_async_from_pydantic"
|
||||
```
|
||||
|
||||
??? "Ingesting Pydantic models with LanceDB embedding API"
|
||||
When using LanceDB's embedding API, you can add Pydantic models directly to the table. LanceDB will automatically convert the `vector` field to a vector before adding it to the table. You need to specify the default value of `vector` field as None to allow LanceDB to automatically vectorize the data.
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
from lancedb.pydantic import LanceModel, Vector
|
||||
from lancedb.embeddings import get_registry
|
||||
=== "Sync API"
|
||||
|
||||
db = lancedb.connect("~/tmp")
|
||||
embed_fcn = get_registry().get("huggingface").create(name="BAAI/bge-small-en-v1.5")
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb"
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb-pydantic"
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:import-embeddings"
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_with_embedding"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
class Schema(LanceModel):
|
||||
text: str = embed_fcn.SourceField()
|
||||
vector: Vector(embed_fcn.ndims()) = embed_fcn.VectorField(default=None)
|
||||
|
||||
tbl = db.create_table("my_table", schema=Schema, mode="overwrite")
|
||||
models = [Schema(text="hello"), Schema(text="world")]
|
||||
tbl.add(models)
|
||||
```
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb"
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb-pydantic"
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:import-embeddings"
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:create_table_async_with_embedding"
|
||||
```
|
||||
|
||||
=== "Typescript[^1]"
|
||||
|
||||
@@ -571,44 +607,41 @@ Use the `delete()` method on tables to delete rows from a table. To choose which
|
||||
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
tbl.delete('item = "fizz"')
|
||||
```
|
||||
=== "Sync API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:delete_row"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:delete_row_async"
|
||||
```
|
||||
|
||||
### Deleting row with specific column value
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
=== "Sync API"
|
||||
|
||||
data = [{"x": 1, "vector": [1, 2]},
|
||||
{"x": 2, "vector": [3, 4]},
|
||||
{"x": 3, "vector": [5, 6]}]
|
||||
db = lancedb.connect("./.lancedb")
|
||||
table = db.create_table("my_table", data)
|
||||
table.to_pandas()
|
||||
# x vector
|
||||
# 0 1 [1.0, 2.0]
|
||||
# 1 2 [3.0, 4.0]
|
||||
# 2 3 [5.0, 6.0]
|
||||
|
||||
table.delete("x = 2")
|
||||
table.to_pandas()
|
||||
# x vector
|
||||
# 0 1 [1.0, 2.0]
|
||||
# 1 3 [5.0, 6.0]
|
||||
```
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:delete_specific_row"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:delete_specific_row_async"
|
||||
```
|
||||
|
||||
### Delete from a list of values
|
||||
=== "Sync API"
|
||||
|
||||
```python
|
||||
to_remove = [1, 5]
|
||||
to_remove = ", ".join(str(v) for v in to_remove)
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:delete_list_values"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
table.delete(f"x IN ({to_remove})")
|
||||
table.to_pandas()
|
||||
# x vector
|
||||
# 0 3 [5.0, 6.0]
|
||||
```
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:delete_list_values_async"
|
||||
```
|
||||
|
||||
=== "Typescript[^1]"
|
||||
|
||||
@@ -659,27 +692,20 @@ This can be used to update zero to all rows depending on how many rows match the
|
||||
=== "Python"
|
||||
|
||||
API Reference: [lancedb.table.Table.update][]
|
||||
=== "Sync API"
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
import pandas as pd
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb"
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pandas"
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:update_table"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
# Create a lancedb connection
|
||||
db = lancedb.connect("./.lancedb")
|
||||
|
||||
# Create a table from a pandas DataFrame
|
||||
data = pd.DataFrame({"x": [1, 2, 3], "vector": [[1, 2], [3, 4], [5, 6]]})
|
||||
table = db.create_table("my_table", data)
|
||||
|
||||
# Update the table where x = 2
|
||||
table.update(where="x = 2", values={"vector": [10, 10]})
|
||||
|
||||
# Get the updated table as a pandas DataFrame
|
||||
df = table.to_pandas()
|
||||
|
||||
# Print the DataFrame
|
||||
print(df)
|
||||
```
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:import-lancedb"
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pandas"
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:update_table_async"
|
||||
```
|
||||
|
||||
Output
|
||||
```shell
|
||||
@@ -734,13 +760,16 @@ This can be used to update zero to all rows depending on how many rows match the
|
||||
The `values` parameter is used to provide the new values for the columns as literal values. You can also use the `values_sql` / `valuesSql` parameter to provide SQL expressions for the new values. For example, you can use `values_sql="x + 1"` to increment the value of the `x` column by 1.
|
||||
|
||||
=== "Python"
|
||||
=== "Sync API"
|
||||
|
||||
```python
|
||||
# Update the table where x = 2
|
||||
table.update(valuesSql={"x": "x + 1"})
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:update_table_sql"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
print(table.to_pandas())
|
||||
```
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:update_table_sql_async"
|
||||
```
|
||||
|
||||
Output
|
||||
```shell
|
||||
@@ -771,11 +800,16 @@ This can be used to update zero to all rows depending on how many rows match the
|
||||
Use the `drop_table()` method on the database to remove a table.
|
||||
|
||||
=== "Python"
|
||||
=== "Sync API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_basic.py:drop_table"
|
||||
--8<-- "python/python/tests/docs/test_basic.py:drop_table_async"
|
||||
```
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_basic.py:drop_table"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_basic.py:drop_table_async"
|
||||
```
|
||||
|
||||
This permanently removes the table and is not recoverable, unlike deleting rows.
|
||||
By default, if the table does not exist an exception is raised. To suppress this,
|
||||
@@ -809,9 +843,16 @@ data type for it.
|
||||
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_basic.py:add_columns"
|
||||
```
|
||||
=== "Sync API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_basic.py:add_columns"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_basic.py:add_columns_async"
|
||||
```
|
||||
**API Reference:** [lancedb.table.Table.add_columns][]
|
||||
|
||||
=== "Typescript"
|
||||
@@ -848,10 +889,18 @@ rewriting the column, which can be a heavy operation.
|
||||
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
import pyarrow as pa
|
||||
--8<-- "python/python/tests/docs/test_basic.py:alter_columns"
|
||||
```
|
||||
=== "Sync API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pyarrow"
|
||||
--8<-- "python/python/tests/docs/test_basic.py:alter_columns"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:import-pyarrow"
|
||||
--8<-- "python/python/tests/docs/test_basic.py:alter_columns_async"
|
||||
```
|
||||
**API Reference:** [lancedb.table.Table.alter_columns][]
|
||||
|
||||
=== "Typescript"
|
||||
@@ -872,9 +921,16 @@ will remove the column from the schema.
|
||||
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_basic.py:drop_columns"
|
||||
```
|
||||
=== "Sync API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_basic.py:drop_columns"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_basic.py:drop_columns_async"
|
||||
```
|
||||
**API Reference:** [lancedb.table.Table.drop_columns][]
|
||||
|
||||
=== "Typescript"
|
||||
@@ -925,31 +981,46 @@ There are three possible settings for `read_consistency_interval`:
|
||||
|
||||
To set strong consistency, use `timedelta(0)`:
|
||||
|
||||
```python
|
||||
from datetime import timedelta
|
||||
db = lancedb.connect("./.lancedb",. read_consistency_interval=timedelta(0))
|
||||
table = db.open_table("my_table")
|
||||
```
|
||||
=== "Sync API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:import-datetime"
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:table_strong_consistency"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:import-datetime"
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:table_async_strong_consistency"
|
||||
```
|
||||
|
||||
For eventual consistency, use a custom `timedelta`:
|
||||
|
||||
```python
|
||||
from datetime import timedelta
|
||||
db = lancedb.connect("./.lancedb", read_consistency_interval=timedelta(seconds=5))
|
||||
table = db.open_table("my_table")
|
||||
```
|
||||
=== "Sync API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:import-datetime"
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:table_eventual_consistency"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:import-datetime"
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:table_async_eventual_consistency"
|
||||
```
|
||||
|
||||
By default, a `Table` will never check for updates from other writers. To manually check for updates you can use `checkout_latest`:
|
||||
|
||||
```python
|
||||
db = lancedb.connect("./.lancedb")
|
||||
table = db.open_table("my_table")
|
||||
=== "Sync API"
|
||||
|
||||
# (Other writes happen to my_table from another process)
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:table_checkout_latest"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
# Check for updates
|
||||
table.checkout_latest()
|
||||
```
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_guide_tables.py:table_async_checkout_latest"
|
||||
```
|
||||
|
||||
=== "Typescript[^1]"
|
||||
|
||||
@@ -957,14 +1028,14 @@ There are three possible settings for `read_consistency_interval`:
|
||||
|
||||
```ts
|
||||
const db = await lancedb.connect({ uri: "./.lancedb", readConsistencyInterval: 0 });
|
||||
const table = await db.openTable("my_table");
|
||||
const tbl = await db.openTable("my_table");
|
||||
```
|
||||
|
||||
For eventual consistency, specify the update interval as seconds:
|
||||
|
||||
```ts
|
||||
const db = await lancedb.connect({ uri: "./.lancedb", readConsistencyInterval: 5 });
|
||||
const table = await db.openTable("my_table");
|
||||
const tbl = await db.openTable("my_table");
|
||||
```
|
||||
|
||||
<!-- Node doesn't yet support the version time travel: https://github.com/lancedb/lancedb/issues/1007
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
## Improving retriever performance
|
||||
|
||||
Try it yourself - <a href="https://colab.research.google.com/github/lancedb/lancedb/blob/main/docs/src/notebooks/lancedb_reranking.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a><br/>
|
||||
Try it yourself: <a href="https://colab.research.google.com/github/lancedb/lancedb/blob/main/docs/src/notebooks/lancedb_reranking.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a><br/>
|
||||
|
||||
VectorDBs are used as retreivers in recommender or chatbot-based systems for retrieving relevant data based on user queries. For example, retriever is a critical component of Retrieval Augmented Generation (RAG) acrhitectures. In this section, we will discuss how to improve the performance of retrievers.
|
||||
VectorDBs are used as retrievers in recommender or chatbot-based systems for retrieving relevant data based on user queries. For example, retrievers are a critical component of Retrieval Augmented Generation (RAG) acrhitectures. In this section, we will discuss how to improve the performance of retrievers.
|
||||
|
||||
There are serveral ways to improve the performance of retrievers. Some of the common techniques are:
|
||||
|
||||
@@ -19,7 +19,7 @@ Using different embedding models is something that's very specific to the use ca
|
||||
|
||||
|
||||
## The dataset
|
||||
We'll be using a QA dataset generated using a LLama2 review paper. The dataset contains 221 query, context and answer triplets. The queries and answers are generated using GPT-4 based on a given query. Full script used to generate the dataset can be found on this [repo](https://github.com/lancedb/ragged). It can be downloaded from [here](https://github.com/AyushExel/assets/blob/main/data_qa.csv)
|
||||
We'll be using a QA dataset generated using a LLama2 review paper. The dataset contains 221 query, context and answer triplets. The queries and answers are generated using GPT-4 based on a given query. Full script used to generate the dataset can be found on this [repo](https://github.com/lancedb/ragged). It can be downloaded from [here](https://github.com/AyushExel/assets/blob/main/data_qa.csv).
|
||||
|
||||
### Using different query types
|
||||
Let's setup the embeddings and the dataset first. We'll use the LanceDB's `huggingface` embeddings integration for this guide.
|
||||
@@ -45,14 +45,14 @@ table.add(df[["context"]].to_dict(orient="records"))
|
||||
queries = df["query"].tolist()
|
||||
```
|
||||
|
||||
Now that we have the dataset and embeddings table set up, here's how you can run different query types on the dataset.
|
||||
Now that we have the dataset and embeddings table set up, here's how you can run different query types on the dataset:
|
||||
|
||||
* <b> Vector Search: </b>
|
||||
|
||||
```python
|
||||
table.search(quries[0], query_type="vector").limit(5).to_pandas()
|
||||
```
|
||||
By default, LanceDB uses vector search query type for searching and it automatically converts the input query to a vector before searching when using embedding API. So, the following statement is equivalent to the above statement.
|
||||
By default, LanceDB uses vector search query type for searching and it automatically converts the input query to a vector before searching when using embedding API. So, the following statement is equivalent to the above statement:
|
||||
|
||||
```python
|
||||
table.search(quries[0]).limit(5).to_pandas()
|
||||
@@ -77,7 +77,7 @@ Now that we have the dataset and embeddings table set up, here's how you can run
|
||||
|
||||
* <b> Hybrid Search: </b>
|
||||
|
||||
Hybrid search is a combination of vector and full-text search. Here's how you can run a hybrid search query on the dataset.
|
||||
Hybrid search is a combination of vector and full-text search. Here's how you can run a hybrid search query on the dataset:
|
||||
```python
|
||||
table.search(quries[0], query_type="hybrid").limit(5).to_pandas()
|
||||
```
|
||||
@@ -87,7 +87,7 @@ Now that we have the dataset and embeddings table set up, here's how you can run
|
||||
|
||||
!!! note "Note"
|
||||
By default, it uses `LinearCombinationReranker` that combines the scores from vector and full-text search using a weighted linear combination. It is the simplest reranker implementation available in LanceDB. You can also use other rerankers like `CrossEncoderReranker` or `CohereReranker` for reranking the results.
|
||||
Learn more about rerankers [here](https://lancedb.github.io/lancedb/reranking/)
|
||||
Learn more about rerankers [here](https://lancedb.github.io/lancedb/reranking/).
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
Continuing from the previous section, we can now rerank the results using more complex rerankers.
|
||||
|
||||
Try it yourself - <a href="https://colab.research.google.com/github/lancedb/lancedb/blob/main/docs/src/notebooks/lancedb_reranking.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a><br/>
|
||||
Try it yourself: <a href="https://colab.research.google.com/github/lancedb/lancedb/blob/main/docs/src/notebooks/lancedb_reranking.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a><br/>
|
||||
|
||||
## Reranking search results
|
||||
You can rerank any search results using a reranker. The syntax for reranking is as follows:
|
||||
@@ -62,9 +62,6 @@ Let us take a look at the same datasets from the previous sections, using the sa
|
||||
| Reranked fts | 0.672 |
|
||||
| Hybrid | 0.759 |
|
||||
|
||||
### SQuAD Dataset
|
||||
|
||||
|
||||
### Uber10K sec filing Dataset
|
||||
|
||||
| Query Type | Hit-rate@5 |
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
## Finetuning the Embedding Model
|
||||
Try it yourself - <a href="https://colab.research.google.com/github/lancedb/lancedb/blob/main/docs/src/notebooks/embedding_tuner.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a><br/>
|
||||
Try it yourself: <a href="https://colab.research.google.com/github/lancedb/lancedb/blob/main/docs/src/notebooks/embedding_tuner.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a><br/>
|
||||
|
||||
Another way to improve retriever performance is to fine-tune the embedding model itself. Fine-tuning the embedding model can help in learning better representations for the documents and queries in the dataset. This can be particularly useful when the dataset is very different from the pre-trained data used to train the embedding model.
|
||||
|
||||
@@ -16,7 +16,7 @@ validation_df.to_csv("data_val.csv", index=False)
|
||||
You can use any tuning API to fine-tune embedding models. In this example, we'll utilise Llama-index as it also comes with utilities for synthetic data generation and training the model.
|
||||
|
||||
|
||||
Then parse the dataset as llama-index text nodes and generate synthetic QA pairs from each node.
|
||||
We parse the dataset as llama-index text nodes and generate synthetic QA pairs from each node:
|
||||
```python
|
||||
from llama_index.core.node_parser import SentenceSplitter
|
||||
from llama_index.readers.file import PagedCSVReader
|
||||
@@ -43,7 +43,7 @@ val_dataset = generate_qa_embedding_pairs(
|
||||
)
|
||||
```
|
||||
|
||||
Now we'll use `SentenceTransformersFinetuneEngine` engine to fine-tune the model. You can also use `sentence-transformers` or `transformers` library to fine-tune the model.
|
||||
Now we'll use `SentenceTransformersFinetuneEngine` engine to fine-tune the model. You can also use `sentence-transformers` or `transformers` library to fine-tune the model:
|
||||
|
||||
```python
|
||||
from llama_index.finetuning import SentenceTransformersFinetuneEngine
|
||||
@@ -57,7 +57,7 @@ finetune_engine = SentenceTransformersFinetuneEngine(
|
||||
finetune_engine.finetune()
|
||||
embed_model = finetune_engine.get_finetuned_model()
|
||||
```
|
||||
This saves the fine tuned embedding model in `tuned_model` folder. This al
|
||||
This saves the fine tuned embedding model in `tuned_model` folder.
|
||||
|
||||
# Evaluation results
|
||||
In order to eval the retriever, you can either use this model to ingest the data into LanceDB directly or llama-index's LanceDB integration to create a `VectorStoreIndex` and use it as a retriever.
|
||||
|
||||
@@ -3,22 +3,22 @@
|
||||
Hybrid Search is a broad (often misused) term. It can mean anything from combining multiple methods for searching, to applying ranking methods to better sort the results. In this blog, we use the definition of "hybrid search" to mean using a combination of keyword-based and vector search.
|
||||
|
||||
## The challenge of (re)ranking search results
|
||||
Once you have a group of the most relevant search results from multiple search sources, you'd likely standardize the score and rank them accordingly. This process can also be seen as another independent step - reranking.
|
||||
Once you have a group of the most relevant search results from multiple search sources, you'd likely standardize the score and rank them accordingly. This process can also be seen as another independent step: reranking.
|
||||
There are two approaches for reranking search results from multiple sources.
|
||||
|
||||
* <b>Score-based</b>: Calculate final relevance scores based on a weighted linear combination of individual search algorithm scores. Example - Weighted linear combination of semantic search & keyword-based search results.
|
||||
* <b>Score-based</b>: Calculate final relevance scores based on a weighted linear combination of individual search algorithm scores. Example: Weighted linear combination of semantic search & keyword-based search results.
|
||||
|
||||
* <b>Relevance-based</b>: Discards the existing scores and calculates the relevance of each search result - query pair. Example - Cross Encoder models
|
||||
* <b>Relevance-based</b>: Discards the existing scores and calculates the relevance of each search result-query pair. Example: Cross Encoder models
|
||||
|
||||
Even though there are many strategies for reranking search results, none works for all cases. Moreover, evaluating them itself is a challenge. Also, reranking can be dataset, application specific so it's hard to generalize.
|
||||
Even though there are many strategies for reranking search results, none works for all cases. Moreover, evaluating them itself is a challenge. Also, reranking can be dataset or application specific so it's hard to generalize.
|
||||
|
||||
### Example evaluation of hybrid search with Reranking
|
||||
|
||||
Here's some evaluation numbers from experiment comparing these re-rankers on about 800 queries. It is modified version of an evaluation script from [llama-index](https://github.com/run-llama/finetune-embedding/blob/main/evaluate.ipynb) that measures hit-rate at top-k.
|
||||
Here's some evaluation numbers from an experiment comparing these rerankers on about 800 queries. It is modified version of an evaluation script from [llama-index](https://github.com/run-llama/finetune-embedding/blob/main/evaluate.ipynb) that measures hit-rate at top-k.
|
||||
|
||||
<b> With OpenAI ada2 embedding </b>
|
||||
|
||||
Vector Search baseline - `0.64`
|
||||
Vector Search baseline: `0.64`
|
||||
|
||||
| Reranker | Top-3 | Top-5 | Top-10 |
|
||||
| --- | --- | --- | --- |
|
||||
@@ -33,7 +33,7 @@ Vector Search baseline - `0.64`
|
||||
|
||||
<b> With OpenAI embedding-v3-small </b>
|
||||
|
||||
Vector Search baseline - `0.59`
|
||||
Vector Search baseline: `0.59`
|
||||
|
||||
| Reranker | Top-3 | Top-5 | Top-10 |
|
||||
| --- | --- | --- | --- |
|
||||
|
||||
@@ -5,57 +5,46 @@ LanceDB supports both semantic and keyword-based search (also termed full-text s
|
||||
## Hybrid search in LanceDB
|
||||
You can perform hybrid search in LanceDB by combining the results of semantic and full-text search via a reranking algorithm of your choice. LanceDB provides multiple rerankers out of the box. However, you can always write a custom reranker if your use case need more sophisticated logic .
|
||||
|
||||
```python
|
||||
import os
|
||||
=== "Sync API"
|
||||
|
||||
import lancedb
|
||||
import openai
|
||||
from lancedb.embeddings import get_registry
|
||||
from lancedb.pydantic import LanceModel, Vector
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_search.py:import-os"
|
||||
--8<-- "python/python/tests/docs/test_search.py:import-openai"
|
||||
--8<-- "python/python/tests/docs/test_search.py:import-lancedb"
|
||||
--8<-- "python/python/tests/docs/test_search.py:import-embeddings"
|
||||
--8<-- "python/python/tests/docs/test_search.py:import-pydantic"
|
||||
--8<-- "python/python/tests/docs/test_search.py:import-lancedb-fts"
|
||||
--8<-- "python/python/tests/docs/test_search.py:import-openai-embeddings"
|
||||
--8<-- "python/python/tests/docs/test_search.py:class-Documents"
|
||||
--8<-- "python/python/tests/docs/test_search.py:basic_hybrid_search"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
db = lancedb.connect("~/.lancedb")
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_search.py:import-os"
|
||||
--8<-- "python/python/tests/docs/test_search.py:import-openai"
|
||||
--8<-- "python/python/tests/docs/test_search.py:import-lancedb"
|
||||
--8<-- "python/python/tests/docs/test_search.py:import-embeddings"
|
||||
--8<-- "python/python/tests/docs/test_search.py:import-pydantic"
|
||||
--8<-- "python/python/tests/docs/test_search.py:import-lancedb-fts"
|
||||
--8<-- "python/python/tests/docs/test_search.py:import-openai-embeddings"
|
||||
--8<-- "python/python/tests/docs/test_search.py:class-Documents"
|
||||
--8<-- "python/python/tests/docs/test_search.py:basic_hybrid_search_async"
|
||||
```
|
||||
|
||||
# Ingest embedding function in LanceDB table
|
||||
# Configuring the environment variable OPENAI_API_KEY
|
||||
if "OPENAI_API_KEY" not in os.environ:
|
||||
# OR set the key here as a variable
|
||||
openai.api_key = "sk-..."
|
||||
embeddings = get_registry().get("openai").create()
|
||||
|
||||
class Documents(LanceModel):
|
||||
vector: Vector(embeddings.ndims()) = embeddings.VectorField()
|
||||
text: str = embeddings.SourceField()
|
||||
|
||||
table = db.create_table("documents", schema=Documents)
|
||||
|
||||
data = [
|
||||
{ "text": "rebel spaceships striking from a hidden base"},
|
||||
{ "text": "have won their first victory against the evil Galactic Empire"},
|
||||
{ "text": "during the battle rebel spies managed to steal secret plans"},
|
||||
{ "text": "to the Empire's ultimate weapon the Death Star"}
|
||||
]
|
||||
|
||||
# ingest docs with auto-vectorization
|
||||
table.add(data)
|
||||
|
||||
# Create a fts index before the hybrid search
|
||||
table.create_fts_index("text")
|
||||
# hybrid search with default re-ranker
|
||||
results = table.search("flower moon", query_type="hybrid").to_pandas()
|
||||
```
|
||||
!!! Note
|
||||
You can also pass the vector and text query manually. This is useful if you're not using the embedding API or if you're using a separate embedder service.
|
||||
### Explicitly passing the vector and text query
|
||||
```python
|
||||
vector_query = [0.1, 0.2, 0.3, 0.4, 0.5]
|
||||
text_query = "flower moon"
|
||||
results = table.search(query_type="hybrid")
|
||||
.vector(vector_query)
|
||||
.text(text_query)
|
||||
.limit(5)
|
||||
.to_pandas()
|
||||
=== "Sync API"
|
||||
|
||||
```
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_search.py:hybrid_search_pass_vector_text"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_search.py:hybrid_search_pass_vector_text_async"
|
||||
```
|
||||
|
||||
By default, LanceDB uses `RRFReranker()`, which uses reciprocal rank fusion score, to combine and rerank the results of semantic and full-text search. You can customize the hyperparameters as needed or write your own custom reranker. Here's how you can use any of the available rerankers:
|
||||
|
||||
@@ -68,7 +57,7 @@ By default, LanceDB uses `RRFReranker()`, which uses reciprocal rank fusion scor
|
||||
|
||||
|
||||
## Available Rerankers
|
||||
LanceDB provides a number of re-rankers out of the box. You can use any of these re-rankers by passing them to the `rerank()` method.
|
||||
LanceDB provides a number of rerankers out of the box. You can use any of these rerankers by passing them to the `rerank()` method.
|
||||
Go to [Rerankers](../reranking/index.md) to learn more about using the available rerankers and implementing custom rerankers.
|
||||
|
||||
|
||||
|
||||
@@ -66,7 +66,7 @@ the size of the data.
|
||||
|
||||
### Embedding Functions
|
||||
|
||||
The embedding API has been completely reworked, and it now more closely resembles the Python API, including the new [embedding registry](./js/classes/embedding.EmbeddingFunctionRegistry.md)
|
||||
The embedding API has been completely reworked, and it now more closely resembles the Python API, including the new [embedding registry](./js/classes/embedding.EmbeddingFunctionRegistry.md):
|
||||
|
||||
=== "vectordb (deprecated)"
|
||||
|
||||
|
||||
@@ -207,7 +207,7 @@
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"## The dataset\n",
|
||||
"The dataset we'll use is a synthetic QA dataset generated from LLama2 review paper. The paper was divided into chunks, with each chunk being a unique context. An LLM was prompted to ask questions relevant to the context for testing a retreiver.\n",
|
||||
"The dataset we'll use is a synthetic QA dataset generated from LLama2 review paper. The paper was divided into chunks, with each chunk being a unique context. An LLM was prompted to ask questions relevant to the context for testing a retriever.\n",
|
||||
"The exact code and other utility functions for this can be found in [this](https://github.com/lancedb/ragged) repo\n"
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
@@ -477,7 +477,7 @@
|
||||
"source": [
|
||||
"## Vector Search\n",
|
||||
"\n",
|
||||
"avg latency - `3.48 ms ± 71.6 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)`"
|
||||
"Average latency: `3.48 ms ± 71.6 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)`"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -597,7 +597,7 @@
|
||||
"`LinearCombinationReranker(weight=0.7)` is used as the default reranker for reranking the hybrid search results if the reranker isn't specified explicitly.\n",
|
||||
"The `weight` param controls the weightage provided to vector search score. The weight of `1-weight` is applied to FTS scores when reranking.\n",
|
||||
"\n",
|
||||
"Latency - `71 ms ± 25.4 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)`"
|
||||
"Latency: `71 ms ± 25.4 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)`"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -675,9 +675,9 @@
|
||||
},
|
||||
"source": [
|
||||
"### Cohere Reranker\n",
|
||||
"This uses Cohere's Reranking API to re-rank the results. It accepts the reranking model name as a parameter. By Default it uses the english-v3 model but you can easily switch to a multi-lingual model.\n",
|
||||
"This uses Cohere's Reranking API to re-rank the results. It accepts the reranking model name as a parameter. By default it uses the english-v3 model but you can easily switch to a multi-lingual model.\n",
|
||||
"\n",
|
||||
"latency - `605 ms ± 78.1 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)`"
|
||||
"Latency: `605 ms ± 78.1 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)`"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -1165,7 +1165,7 @@
|
||||
},
|
||||
"source": [
|
||||
"### ColBERT Reranker\n",
|
||||
"Colber Reranker is powered by ColBERT model. It runs locally using the huggingface implementation.\n",
|
||||
"Colbert Reranker is powered by ColBERT model. It runs locally using the huggingface implementation.\n",
|
||||
"\n",
|
||||
"Latency - `950 ms ± 5.78 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)`\n",
|
||||
"\n",
|
||||
@@ -1489,9 +1489,9 @@
|
||||
},
|
||||
"source": [
|
||||
"### Cross Encoder Reranker\n",
|
||||
"Uses cross encoder models are rerankers. Uses sentence transformer implemntation locally\n",
|
||||
"Uses cross encoder models are rerankers. Uses sentence transformer implementation locally\n",
|
||||
"\n",
|
||||
"Latency - `1.38 s ± 64.6 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)`"
|
||||
"Latency: `1.38 s ± 64.6 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)`"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -1771,10 +1771,10 @@
|
||||
"source": [
|
||||
"### (Experimental) OpenAI Reranker\n",
|
||||
"\n",
|
||||
"This prompts chat model to rerank results which is not a dedicated reranker model. This should be treated as experimental. You might run out of token limit so set the search limits based on your token limit.\n",
|
||||
"NOTE: It is recommended to use `gpt-4-turbo-preview`, older models might lead to bad behaviour\n",
|
||||
"This prompts a chat model to rerank results and is not a dedicated reranker model. This should be treated as experimental. You might exceed the token limit so set the search limits based on your token limit.\n",
|
||||
"NOTE: It is recommended to use `gpt-4-turbo-preview` as older models might lead to bad behaviour\n",
|
||||
"\n",
|
||||
"Latency - `Can take 10s of seconds if using GPT-4 model`"
|
||||
"Latency: `Can take 10s of seconds if using GPT-4 model`"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -1817,7 +1817,7 @@
|
||||
},
|
||||
"source": [
|
||||
"## Use your custom Reranker\n",
|
||||
"Hybrid search in LanceDB is designed to be very flexible. You can easily plug in your own Re-reranking logic. To do so, you simply need to implement the base Reranker class"
|
||||
"Hybrid search in LanceDB is designed to be very flexible. You can easily plug in your own Re-reranking logic. To do so, you simply need to implement the base Reranker class:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -1849,9 +1849,9 @@
|
||||
"source": [
|
||||
"### Custom Reranker based on CohereReranker\n",
|
||||
"\n",
|
||||
"For the sake of simplicity let's build custom reranker that just enchances the Cohere Reranker by accepting a filter query, and accept other CohereReranker params as kwags.\n",
|
||||
"For the sake of simplicity let's build a custom reranker that enhances the Cohere Reranker by accepting a filter query, and accepts other CohereReranker params as kwargs.\n",
|
||||
"\n",
|
||||
"For this toy example let's say we want to get rid of docs that represent a table of contents, appendix etc. as these are semantically close of representing costs but this isn't something we are interested in because they don't represent the specific reasons why operating costs were high. They simply represent the costs."
|
||||
"For this toy example let's say we want to get rid of docs that represent a table of contents or appendix, as these are semantically close to representing costs but don't represent the specific reasons why operating costs were high."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -1969,7 +1969,7 @@
|
||||
"id": "b3b5464a-7252-4eab-aaac-9b0eae37496f"
|
||||
},
|
||||
"source": [
|
||||
"As you can see the document containing the Table of contetnts of spending no longer shows up"
|
||||
"As you can see, the document containing the table of contents no longer shows up."
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
@@ -49,7 +49,7 @@
|
||||
},
|
||||
"source": [
|
||||
"## What is a retriever\n",
|
||||
"VectorDBs are used as retreivers in recommender or chatbot-based systems for retrieving relevant data based on user queries. For example, retriever is a critical component of Retrieval Augmented Generation (RAG) acrhitectures. In this section, we will discuss how to improve the performance of retrievers.\n",
|
||||
"VectorDBs are used as retrievers in recommender or chatbot-based systems for retrieving relevant data based on user queries. For example, retriever is a critical component of Retrieval Augmented Generation (RAG) acrhitectures. In this section, we will discuss how to improve the performance of retrievers.\n",
|
||||
"\n",
|
||||
"<img src=\"https://llmstack.ai/assets/images/rag-f517f1f834bdbb94a87765e0edd40ff2.png\" />\n",
|
||||
"\n",
|
||||
@@ -64,7 +64,7 @@
|
||||
"- Fine-tuning the embedding models\n",
|
||||
"- Using different embedding models\n",
|
||||
"\n",
|
||||
"Obviously, the above list is not exhaustive. There are other subtler ways that can improve retrieval performance like experimenting chunking algorithms, using different distance/similarity metrics etc. But for brevity, we'll only cover high level and more impactful techniques here.\n",
|
||||
"Obviously, the above list is not exhaustive. There are other subtler ways that can improve retrieval performance like alternative chunking algorithms, using different distance/similarity metrics, and more. For brevity, we'll only cover high level and more impactful techniques here.\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
@@ -77,7 +77,7 @@
|
||||
"# LanceDB\n",
|
||||
"- Multimodal DB for AI\n",
|
||||
"- Powered by an innovative & open-source in-house file format\n",
|
||||
"- 0 Setup\n",
|
||||
"- Zero setup\n",
|
||||
"- Scales up on disk storage\n",
|
||||
"- Native support for vector, full-text(BM25) and hybrid search\n",
|
||||
"\n",
|
||||
@@ -92,8 +92,8 @@
|
||||
},
|
||||
"source": [
|
||||
"## The dataset\n",
|
||||
"The dataset we'll use is a synthetic QA dataset generated from LLama2 review paper. The paper was divided into chunks, with each chunk being a unique context. An LLM was prompted to ask questions relevant to the context for testing a retreiver.\n",
|
||||
"The exact code and other utility functions for this can be found in [this](https://github.com/lancedb/ragged) repo\n"
|
||||
"The dataset we'll use is a synthetic QA dataset generated from LLama2 review paper. The paper was divided into chunks, with each chunk being a unique context. An LLM was prompted to ask questions relevant to the context for testing a retriever.\n",
|
||||
"The exact code and other utility functions for this can be found in [this](https://github.com/lancedb/ragged) repo.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -594,10 +594,10 @@
|
||||
},
|
||||
"source": [
|
||||
"## Ingestion\n",
|
||||
"Let us now ingest the contexts in LanceDB\n",
|
||||
"Let us now ingest the contexts in LanceDB. The steps will be:\n",
|
||||
"\n",
|
||||
"- Create a schema (Pydantic or Pyarrow)\n",
|
||||
"- Select an embedding model from LanceDB Embedding API (Allows automatic vectorization of data)\n",
|
||||
"- Select an embedding model from LanceDB Embedding API (to allow automatic vectorization of data)\n",
|
||||
"- Ingest the contexts\n"
|
||||
]
|
||||
},
|
||||
@@ -841,7 +841,7 @@
|
||||
},
|
||||
"source": [
|
||||
"## Different Query types in LanceDB\n",
|
||||
"LanceDB allows switching query types with by setting `query_type` argument, which defaults to `vector` when using Embedding API. In this example we'll use `JinaReranker` which is one of many rerankers supported by LanceDB\n",
|
||||
"LanceDB allows switching query types with by setting `query_type` argument, which defaults to `vector` when using Embedding API. In this example we'll use `JinaReranker` which is one of many rerankers supported by LanceDB.\n",
|
||||
"\n",
|
||||
"### Vector search:\n",
|
||||
"Vector search\n",
|
||||
@@ -1446,11 +1446,11 @@
|
||||
"source": [
|
||||
"## Takeaways & Tradeoffs\n",
|
||||
"\n",
|
||||
"* **Easiest method to significantly improve accuracy** Using Hybrid search and/or rerankers can significantly improve retrieval performance without spending any additional time or effort on tuning embedding models, generators, or dissecting the dataset.\n",
|
||||
"* **Rerankers significantly improve accuracy at little cost.** Using Hybrid search and/or rerankers can significantly improve retrieval performance without spending any additional time or effort on tuning embedding models, generators, or dissecting the dataset.\n",
|
||||
"\n",
|
||||
"* **Reranking is an expensive operation.** Depending on the type of reranker you choose, they can incur significant latecy to query times. Although some API-based rerankers can be significantly faster.\n",
|
||||
"\n",
|
||||
"* When using models locally, having a warmed-up GPU environment will significantly reduce latency. This is specially useful if the application doesn't need to be strcitly realtime. The tradeoff being GPU resources."
|
||||
"* **Pre-warmed GPU environments reduce latency.** When using models locally, having a warmed-up GPU environment will significantly reduce latency. This is especially useful if the application doesn't need to be strictly realtime. Pre-warming comes at the expense of GPU resources."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -1504,4 +1504,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
1096
docs/src/notebooks/reproducibility_async.ipynb
Normal file
1096
docs/src/notebooks/reproducibility_async.ipynb
Normal file
File diff suppressed because it is too large
Load Diff
@@ -8,54 +8,55 @@ and PyArrow. The sequence of steps in a typical workflow is shown below.
|
||||
|
||||
First, we need to connect to a LanceDB database.
|
||||
|
||||
```py
|
||||
=== "Sync API"
|
||||
|
||||
import lancedb
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_python.py:import-lancedb"
|
||||
--8<-- "python/python/tests/docs/test_python.py:connect_to_lancedb"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
db = lancedb.connect("data/sample-lancedb")
|
||||
```
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_python.py:import-lancedb"
|
||||
--8<-- "python/python/tests/docs/test_python.py:connect_to_lancedb_async"
|
||||
```
|
||||
|
||||
We can load a Pandas `DataFrame` to LanceDB directly.
|
||||
|
||||
```py
|
||||
import pandas as pd
|
||||
=== "Sync API"
|
||||
|
||||
data = pd.DataFrame({
|
||||
"vector": [[3.1, 4.1], [5.9, 26.5]],
|
||||
"item": ["foo", "bar"],
|
||||
"price": [10.0, 20.0]
|
||||
})
|
||||
table = db.create_table("pd_table", data=data)
|
||||
```
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_python.py:import-pandas"
|
||||
--8<-- "python/python/tests/docs/test_python.py:create_table_pandas"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_python.py:import-pandas"
|
||||
--8<-- "python/python/tests/docs/test_python.py:create_table_pandas_async"
|
||||
```
|
||||
|
||||
Similar to the [`pyarrow.write_dataset()`](https://arrow.apache.org/docs/python/generated/pyarrow.dataset.write_dataset.html) method, LanceDB's
|
||||
[`db.create_table()`](python.md/#lancedb.db.DBConnection.create_table) accepts data in a variety of forms.
|
||||
|
||||
If you have a dataset that is larger than memory, you can create a table with `Iterator[pyarrow.RecordBatch]` to lazily load the data:
|
||||
|
||||
```py
|
||||
=== "Sync API"
|
||||
|
||||
from typing import Iterable
|
||||
import pyarrow as pa
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_python.py:import-iterable"
|
||||
--8<-- "python/python/tests/docs/test_python.py:import-pyarrow"
|
||||
--8<-- "python/python/tests/docs/test_python.py:make_batches"
|
||||
--8<-- "python/python/tests/docs/test_python.py:create_table_iterable"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
def make_batches() -> Iterable[pa.RecordBatch]:
|
||||
for i in range(5):
|
||||
yield pa.RecordBatch.from_arrays(
|
||||
[
|
||||
pa.array([[3.1, 4.1], [5.9, 26.5]]),
|
||||
pa.array(["foo", "bar"]),
|
||||
pa.array([10.0, 20.0]),
|
||||
],
|
||||
["vector", "item", "price"])
|
||||
|
||||
schema=pa.schema([
|
||||
pa.field("vector", pa.list_(pa.float32())),
|
||||
pa.field("item", pa.utf8()),
|
||||
pa.field("price", pa.float32()),
|
||||
])
|
||||
|
||||
table = db.create_table("iterable_table", data=make_batches(), schema=schema)
|
||||
```
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_python.py:import-iterable"
|
||||
--8<-- "python/python/tests/docs/test_python.py:import-pyarrow"
|
||||
--8<-- "python/python/tests/docs/test_python.py:make_batches"
|
||||
--8<-- "python/python/tests/docs/test_python.py:create_table_iterable_async"
|
||||
```
|
||||
|
||||
You will find detailed instructions of creating a LanceDB dataset in
|
||||
[Getting Started](../basic.md#quick-start) and [API](python.md/#lancedb.db.DBConnection.create_table)
|
||||
@@ -65,15 +66,16 @@ sections.
|
||||
|
||||
We can now perform similarity search via the LanceDB Python API.
|
||||
|
||||
```py
|
||||
# Open the table previously created.
|
||||
table = db.open_table("pd_table")
|
||||
=== "Sync API"
|
||||
|
||||
query_vector = [100, 100]
|
||||
# Pandas DataFrame
|
||||
df = table.search(query_vector).limit(1).to_pandas()
|
||||
print(df)
|
||||
```
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_python.py:vector_search"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_python.py:vector_search_async"
|
||||
```
|
||||
|
||||
```
|
||||
vector item price _distance
|
||||
@@ -83,16 +85,13 @@ print(df)
|
||||
If you have a simple filter, it's faster to provide a `where` clause to LanceDB's `search` method.
|
||||
For more complex filters or aggregations, you can always resort to using the underlying `DataFrame` methods after performing a search.
|
||||
|
||||
```python
|
||||
=== "Sync API"
|
||||
|
||||
# Apply the filter via LanceDB
|
||||
results = table.search([100, 100]).where("price < 15").to_pandas()
|
||||
assert len(results) == 1
|
||||
assert results["item"].iloc[0] == "foo"
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_python.py:vector_search_with_filter"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
# Apply the filter via Pandas
|
||||
df = results = table.search([100, 100]).to_pandas()
|
||||
results = df[df.price < 15]
|
||||
assert len(results) == 1
|
||||
assert results["item"].iloc[0] == "foo"
|
||||
```
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_python.py:vector_search_with_filter_async"
|
||||
```
|
||||
|
||||
@@ -2,38 +2,29 @@
|
||||
|
||||
LanceDB supports [Polars](https://github.com/pola-rs/polars), a blazingly fast DataFrame library for Python written in Rust. Just like in Pandas, the Polars integration is enabled by PyArrow under the hood. A deeper integration between Lance Tables and Polars DataFrames is in progress, but at the moment, you can read a Polars DataFrame into LanceDB and output the search results from a query to a Polars DataFrame.
|
||||
|
||||
|
||||
## Create & Query LanceDB Table
|
||||
|
||||
### From Polars DataFrame
|
||||
|
||||
First, we connect to a LanceDB database.
|
||||
|
||||
```py
|
||||
import lancedb
|
||||
|
||||
db = lancedb.connect("data/polars-lancedb")
|
||||
```py
|
||||
--8<-- "python/python/tests/docs/test_python.py:import-lancedb"
|
||||
--8<-- "python/python/tests/docs/test_python.py:connect_to_lancedb"
|
||||
```
|
||||
|
||||
We can load a Polars `DataFrame` to LanceDB directly.
|
||||
|
||||
```py
|
||||
import polars as pl
|
||||
|
||||
data = pl.DataFrame({
|
||||
"vector": [[3.1, 4.1], [5.9, 26.5]],
|
||||
"item": ["foo", "bar"],
|
||||
"price": [10.0, 20.0]
|
||||
})
|
||||
table = db.create_table("pl_table", data=data)
|
||||
--8<-- "python/python/tests/docs/test_python.py:import-polars"
|
||||
--8<-- "python/python/tests/docs/test_python.py:create_table_polars"
|
||||
```
|
||||
|
||||
We can now perform similarity search via the LanceDB Python API.
|
||||
|
||||
```py
|
||||
query = [3.0, 4.0]
|
||||
result = table.search(query).limit(1).to_polars()
|
||||
print(result)
|
||||
print(type(result))
|
||||
--8<-- "python/python/tests/docs/test_python.py:vector_search_polars"
|
||||
```
|
||||
|
||||
In addition to the selected columns, LanceDB also returns a vector
|
||||
@@ -59,33 +50,16 @@ Note that the type of the result from a table search is a Polars DataFrame.
|
||||
Alternately, we can create an empty LanceDB Table using a Pydantic schema and populate it with a Polars DataFrame.
|
||||
|
||||
```py
|
||||
import polars as pl
|
||||
from lancedb.pydantic import Vector, LanceModel
|
||||
|
||||
|
||||
class Item(LanceModel):
|
||||
vector: Vector(2)
|
||||
item: str
|
||||
price: float
|
||||
|
||||
data = {
|
||||
"vector": [[3.1, 4.1]],
|
||||
"item": "foo",
|
||||
"price": 10.0,
|
||||
}
|
||||
|
||||
table = db.create_table("test_table", schema=Item)
|
||||
df = pl.DataFrame(data)
|
||||
# Add Polars DataFrame to table
|
||||
table.add(df)
|
||||
--8<-- "python/python/tests/docs/test_python.py:import-polars"
|
||||
--8<-- "python/python/tests/docs/test_python.py:import-lancedb-pydantic"
|
||||
--8<-- "python/python/tests/docs/test_python.py:class_Item"
|
||||
--8<-- "python/python/tests/docs/test_python.py:create_table_pydantic"
|
||||
```
|
||||
|
||||
The table can now be queried as usual.
|
||||
|
||||
```py
|
||||
result = table.search([3.0, 4.0]).limit(1).to_polars()
|
||||
print(result)
|
||||
print(type(result))
|
||||
--8<-- "python/python/tests/docs/test_python.py:vector_search_polars"
|
||||
```
|
||||
|
||||
```
|
||||
@@ -108,8 +82,7 @@ As you iterate on your application, you'll likely need to work with the whole ta
|
||||
LanceDB tables can also be converted directly into a polars LazyFrame for further processing.
|
||||
|
||||
```python
|
||||
ldf = table.to_polars()
|
||||
print(type(ldf))
|
||||
--8<-- "python/python/tests/docs/test_python.py:dump_table_lazyform"
|
||||
```
|
||||
|
||||
Unlike the search result from a query, we can see that the type of the result is a LazyFrame.
|
||||
@@ -121,7 +94,7 @@ Unlike the search result from a query, we can see that the type of the result is
|
||||
We can now work with the LazyFrame as we would in Polars, and collect the first result.
|
||||
|
||||
```python
|
||||
print(ldf.first().collect())
|
||||
--8<-- "python/python/tests/docs/test_python.py:print_table_lazyform"
|
||||
```
|
||||
|
||||
```
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
====================================================================
|
||||
Adaptive RAG introduces a RAG technique that combines query analysis with self-corrective RAG.
|
||||
|
||||
For Query Analysis, it uses a small classifier(LLM), to decide the query’s complexity. Query Analysis helps routing smoothly to adjust between different retrieval strategies No retrieval, Single-shot RAG or Iterative RAG.
|
||||
For Query Analysis, it uses a small classifier(LLM), to decide the query’s complexity. Query Analysis guides adjustment between different retrieval strategies: No retrieval, Single-shot RAG or Iterative RAG.
|
||||
|
||||
**[Official Paper](https://arxiv.org/pdf/2403.14403)**
|
||||
|
||||
@@ -12,9 +12,9 @@ For Query Analysis, it uses a small classifier(LLM), to decide the query’s com
|
||||
</figcaption>
|
||||
</figure>
|
||||
|
||||
**[Offical Implementation](https://github.com/starsuzi/Adaptive-RAG)**
|
||||
**[Official Implementation](https://github.com/starsuzi/Adaptive-RAG)**
|
||||
|
||||
Here’s a code snippet for query analysis
|
||||
Here’s a code snippet for query analysis:
|
||||
|
||||
```python
|
||||
from langchain_core.prompts import ChatPromptTemplate
|
||||
@@ -35,7 +35,7 @@ llm = ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0)
|
||||
structured_llm_router = llm.with_structured_output(RouteQuery)
|
||||
```
|
||||
|
||||
For defining and querying retriever
|
||||
The following example defines and queries a retriever:
|
||||
|
||||
```python
|
||||
# add documents in LanceDB
|
||||
@@ -48,4 +48,4 @@ retriever = vectorstore.as_retriever()
|
||||
# query using defined retriever
|
||||
question = "How adaptive RAG works"
|
||||
docs = retriever.get_relevant_documents(question)
|
||||
```
|
||||
```
|
||||
|
||||
@@ -11,7 +11,7 @@ FLARE, stands for Forward-Looking Active REtrieval augmented generation is a gen
|
||||
|
||||
[](https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/better-rag-FLAIR/main.ipynb)
|
||||
|
||||
Here’s a code snippet for using FLARE with Langchain
|
||||
Here’s a code snippet for using FLARE with Langchain:
|
||||
|
||||
```python
|
||||
from langchain.vectorstores import LanceDB
|
||||
@@ -35,4 +35,4 @@ flare = FlareChain.from_llm(llm=llm,retriever=vector_store_retriever,max_generat
|
||||
result = flare.run(input_text)
|
||||
```
|
||||
|
||||
[](https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/better-rag-FLAIR/main.ipynb)
|
||||
[](https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/better-rag-FLAIR/main.ipynb)
|
||||
|
||||
@@ -11,7 +11,7 @@ HyDE, stands for Hypothetical Document Embeddings is an approach used for precis
|
||||
|
||||
[](https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/Advance-RAG-with-HyDE/main.ipynb)
|
||||
|
||||
Here’s a code snippet for using HyDE with Langchain
|
||||
Here’s a code snippet for using HyDE with Langchain:
|
||||
|
||||
```python
|
||||
from langchain.llms import OpenAI
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
**Agentic RAG 🤖**
|
||||
====================================================================
|
||||
Agentic RAG is Agent-based RAG introduces an advanced framework for answering questions by using intelligent agents instead of just relying on large language models. These agents act like expert researchers, handling complex tasks such as detailed planning, multi-step reasoning, and using external tools. They navigate multiple documents, compare information, and generate accurate answers. This system is easily scalable, with each new document set managed by a sub-agent, making it a powerful tool for tackling a wide range of information needs.
|
||||
Agentic RAG introduces an advanced framework for answering questions by using intelligent agents instead of just relying on large language models. These agents act like expert researchers, handling complex tasks such as detailed planning, multi-step reasoning, and using external tools. They navigate multiple documents, compare information, and generate accurate answers. This system is easily scalable, with each new document set managed by a sub-agent, making it a powerful tool for tackling a wide range of information needs.
|
||||
|
||||
<figure markdown="span">
|
||||

|
||||
@@ -9,7 +9,7 @@ Agentic RAG is Agent-based RAG introduces an advanced framework for answering qu
|
||||
|
||||
[](https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/tutorials/Agentic_RAG/main.ipynb)
|
||||
|
||||
Here’s a code snippet for defining retriever using Langchain
|
||||
Here’s a code snippet for defining retriever using Langchain:
|
||||
|
||||
```python
|
||||
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
||||
@@ -41,7 +41,7 @@ retriever = vectorstore.as_retriever()
|
||||
|
||||
```
|
||||
|
||||
Agent that formulates an improved query for better retrieval results and then grades the retrieved documents
|
||||
Here is an agent that formulates an improved query for better retrieval results and then grades the retrieved documents:
|
||||
|
||||
```python
|
||||
def grade_documents(state) -> Literal["generate", "rewrite"]:
|
||||
@@ -98,4 +98,4 @@ def rewrite(state):
|
||||
return {"messages": [response]}
|
||||
```
|
||||
|
||||
[](https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/tutorials/Agentic_RAG/main.ipynb)
|
||||
[](https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/tutorials/Agentic_RAG/main.ipynb)
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
Corrective-RAG (CRAG) is a strategy for Retrieval-Augmented Generation (RAG) that includes self-reflection and self-grading of retrieved documents. Here’s a simplified breakdown of the steps involved:
|
||||
|
||||
1. **Relevance Check**: If at least one document meets the relevance threshold, the process moves forward to the generation phase.
|
||||
2. **Knowledge Refinement**: Before generating an answer, the process refines the knowledge by dividing the document into smaller segments called "knowledge strips."
|
||||
2. **Knowledge Refinement**: Before generating an answer, the process refines the knowledge by dividing the document into smaller segments called "knowledge strips".
|
||||
3. **Grading and Filtering**: Each "knowledge strip" is graded, and irrelevant ones are filtered out.
|
||||
4. **Additional Data Source**: If all documents are below the relevance threshold, or if the system is unsure about their relevance, it will seek additional information by performing a web search to supplement the retrieved data.
|
||||
|
||||
@@ -19,11 +19,11 @@ Above steps are mentioned in
|
||||
|
||||
Corrective Retrieval-Augmented Generation (CRAG) is a method that works like a **built-in fact-checker**.
|
||||
|
||||
**[Offical Implementation](https://github.com/HuskyInSalt/CRAG)**
|
||||
**[Official Implementation](https://github.com/HuskyInSalt/CRAG)**
|
||||
|
||||
[](https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/tutorials/Corrective-RAG-with_Langgraph/CRAG_with_Langgraph.ipynb)
|
||||
|
||||
Here’s a code snippet for defining a table with the [Embedding API](https://lancedb.github.io/lancedb/embeddings/embedding_functions/), and retrieves the relevant documents.
|
||||
Here’s a code snippet for defining a table with the [Embedding API](https://lancedb.github.io/lancedb/embeddings/embedding_functions/), and retrieves the relevant documents:
|
||||
|
||||
```python
|
||||
import pandas as pd
|
||||
@@ -115,6 +115,6 @@ def grade_documents(state):
|
||||
}
|
||||
```
|
||||
|
||||
Check Colab for the Implementation of CRAG with Langgraph
|
||||
Check Colab for the Implementation of CRAG with Langgraph:
|
||||
|
||||
[](https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/tutorials/Corrective-RAG-with_Langgraph/CRAG_with_Langgraph.ipynb)
|
||||
[](https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/tutorials/Corrective-RAG-with_Langgraph/CRAG_with_Langgraph.ipynb)
|
||||
|
||||
@@ -6,7 +6,7 @@ One of the main benefits of Graph RAG is its ability to capture and represent co
|
||||
|
||||
**[Official Paper](https://arxiv.org/pdf/2404.16130)**
|
||||
|
||||
**[Offical Implementation](https://github.com/microsoft/graphrag)**
|
||||
**[Official Implementation](https://github.com/microsoft/graphrag)**
|
||||
|
||||
[Microsoft Research Blog](https://www.microsoft.com/en-us/research/blog/graphrag-unlocking-llm-discovery-on-narrative-private-data/)
|
||||
|
||||
@@ -39,16 +39,16 @@ python3 -m graphrag.index --root dataset-dir
|
||||
|
||||
- **Execute Query**
|
||||
|
||||
Global Query Execution gives a broad overview of dataset
|
||||
Global Query Execution gives a broad overview of dataset:
|
||||
|
||||
```bash
|
||||
python3 -m graphrag.query --root dataset-dir --method global "query-question"
|
||||
```
|
||||
|
||||
Local Query Execution gives a detailed and specific answers based on the context of the entities
|
||||
Local Query Execution gives a detailed and specific answers based on the context of the entities:
|
||||
|
||||
```bash
|
||||
python3 -m graphrag.query --root dataset-dir --method local "query-question"
|
||||
```
|
||||
|
||||
[](https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/Graphrag/main.ipynb)
|
||||
[](https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/Graphrag/main.ipynb)
|
||||
|
||||
@@ -15,7 +15,7 @@ MRAG is cost-effective and energy-efficient because it avoids extra LLM queries,
|
||||
|
||||
**[Official Implementation](https://github.com/spcl/MRAG)**
|
||||
|
||||
Here’s a code snippet for defining different embedding spaces with the [Embedding API](https://lancedb.github.io/lancedb/embeddings/embedding_functions/)
|
||||
Here’s a code snippet for defining different embedding spaces with the [Embedding API](https://lancedb.github.io/lancedb/embeddings/embedding_functions/):
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
@@ -44,6 +44,6 @@ class Space3(LanceModel):
|
||||
vector: Vector(model3.ndims()) = model3.VectorField()
|
||||
```
|
||||
|
||||
Create different tables using defined embedding spaces, then make queries to each embedding space. Use the resulted closest documents from each embedding space to generate answers.
|
||||
Create different tables using defined embedding spaces, then make queries to each embedding space. Use the resulting closest documents from each embedding space to generate answers.
|
||||
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
**Self RAG 🤳**
|
||||
====================================================================
|
||||
Self-RAG is a strategy for Retrieval-Augmented Generation (RAG) to get better retrieved information, generated text, and checking their own work, all without losing their flexibility. Unlike the traditional Retrieval-Augmented Generation (RAG) method, Self-RAG retrieves information as needed, can skip retrieval if not needed, and evaluates its own output while generating text. It also uses a process to pick the best output based on different preferences.
|
||||
Self-RAG is a strategy for Retrieval-Augmented Generation (RAG) to get better retrieved information, generated text, and validation, without loss of flexibility. Unlike the traditional Retrieval-Augmented Generation (RAG) method, Self-RAG retrieves information as needed, can skip retrieval if not needed, and evaluates its own output while generating text. It also uses a process to pick the best output based on different preferences.
|
||||
|
||||
**[Official Paper](https://arxiv.org/pdf/2310.11511)**
|
||||
|
||||
@@ -10,11 +10,11 @@ Self-RAG is a strategy for Retrieval-Augmented Generation (RAG) to get better re
|
||||
</figcaption>
|
||||
</figure>
|
||||
|
||||
**[Offical Implementation](https://github.com/AkariAsai/self-rag)**
|
||||
**[Official Implementation](https://github.com/AkariAsai/self-rag)**
|
||||
|
||||
Self-RAG starts by generating a response without retrieving extra info if it's not needed. For questions that need more details, it retrieves to get the necessary information.
|
||||
|
||||
Here’s a code snippet for defining retriever using Langchain
|
||||
Here’s a code snippet for defining retriever using Langchain:
|
||||
|
||||
```python
|
||||
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
||||
@@ -46,7 +46,7 @@ retriever = vectorstore.as_retriever()
|
||||
|
||||
```
|
||||
|
||||
Functions that grades the retrieved documents and if required formulates an improved query for better retrieval results
|
||||
The following functions grade the retrieved documents and formulate an improved query for better retrieval results, if required:
|
||||
|
||||
```python
|
||||
def grade_documents(state) -> Literal["generate", "rewrite"]:
|
||||
@@ -93,4 +93,4 @@ def rewrite(state):
|
||||
model = ChatOpenAI(temperature=0, model="gpt-4-0125-preview", streaming=True)
|
||||
response = model.invoke(msg)
|
||||
return {"messages": [response]}
|
||||
```
|
||||
```
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
**SFR RAG 📑**
|
||||
====================================================================
|
||||
Salesforce AI Research introduces SFR-RAG, a 9-billion-parameter language model trained with a significant emphasis on reliable, precise, and faithful contextual generation abilities specific to real-world RAG use cases and relevant agentic tasks. They include precise factual knowledge extraction, distinguishing relevant against distracting contexts, citing appropriate sources along with answers, producing complex and multi-hop reasoning over multiple contexts, consistent format following, as well as refraining from hallucination over unanswerable queries.
|
||||
Salesforce AI Research introduced SFR-RAG, a 9-billion-parameter language model trained with a significant emphasis on reliable, precise, and faithful contextual generation abilities specific to real-world RAG use cases and relevant agentic tasks. It targets precise factual knowledge extraction, distinction between relevant and distracting contexts, citation of appropriate sources along with answers, production of complex and multi-hop reasoning over multiple contexts, consistent format following, as well as minimization of hallucination over unanswerable queries.
|
||||
|
||||
**[Offical Implementation](https://github.com/SalesforceAIResearch/SFR-RAG)**
|
||||
**[Official Implementation](https://github.com/SalesforceAIResearch/SFR-RAG)**
|
||||
|
||||
<figure markdown="span">
|
||||

|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
# AnswersDotAI Rerankers
|
||||
|
||||
This integration allows using answersdotai's rerankers to rerank the search results. [Rerankers](https://github.com/AnswerDotAI/rerankers)
|
||||
A lightweight, low-dependency, unified API to use all common reranking and cross-encoder models.
|
||||
This integration uses [AnswersDotAI's rerankers](https://github.com/AnswerDotAI/rerankers) to rerank the search results, providing a lightweight, low-dependency, unified API to use all common reranking and cross-encoder models.
|
||||
|
||||
!!! note
|
||||
Supported Query Types: Hybrid, Vector, FTS
|
||||
@@ -45,10 +44,10 @@ Accepted Arguments
|
||||
----------------
|
||||
| Argument | Type | Default | Description |
|
||||
| --- | --- | --- | --- |
|
||||
| `model_type` | `str` | `"colbert"` | The type of model to use. Supported model types can be found here - https://github.com/AnswerDotAI/rerankers |
|
||||
| `model_type` | `str` | `"colbert"` | The type of model to use. Supported model types can be found here: https://github.com/AnswerDotAI/rerankers. |
|
||||
| `model_name` | `str` | `"answerdotai/answerai-colbert-small-v1"` | The name of the reranker model to use. |
|
||||
| `column` | `str` | `"text"` | The name of the column to use as input to the cross encoder model. |
|
||||
| `return_score` | str | `"relevance"` | Options are "relevance" or "all". The type of score to return. If "relevance", will return only the `_relevance_score. If "all" is supported, will return relevance score along with the vector and/or fts scores depending on query type |
|
||||
| `return_score` | str | `"relevance"` | Options are "relevance" or "all". The type of score to return. If "relevance", will return only the `_relevance_score. If "all" is supported, will return relevance score along with the vector and/or fts scores depending on query type. |
|
||||
|
||||
|
||||
|
||||
@@ -58,17 +57,17 @@ You can specify the type of scores you want the reranker to return. The followin
|
||||
### Hybrid Search
|
||||
|`return_score`| Status | Description |
|
||||
| --- | --- | --- |
|
||||
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
|
||||
| `all` | ❌ Not Supported | Returns have vector(`_distance`) and FTS(`score`) along with Hybrid Search score(`_relevance_score`) |
|
||||
| `relevance` | ✅ Supported | Results only have the `_relevance_score` column. |
|
||||
| `all` | ❌ Not Supported | Results have vector(`_distance`) and FTS(`score`) along with Hybrid Search score(`_relevance_score`). |
|
||||
|
||||
### Vector Search
|
||||
|`return_score`| Status | Description |
|
||||
| --- | --- | --- |
|
||||
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
|
||||
| `all` | ✅ Supported | Returns have vector(`_distance`) along with Hybrid Search score(`_relevance_score`) |
|
||||
| `relevance` | ✅ Supported | Results only have the `_relevance_score` column. |
|
||||
| `all` | ✅ Supported | Results have vector(`_distance`) along with Hybrid Search score(`_relevance_score`). |
|
||||
|
||||
### FTS Search
|
||||
|`return_score`| Status | Description |
|
||||
| --- | --- | --- |
|
||||
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
|
||||
| `all` | ✅ Supported | Returns have FTS(`score`) along with Hybrid Search score(`_relevance_score`) |
|
||||
| `relevance` | ✅ Supported | Results only have the `_relevance_score` column. |
|
||||
| `all` | ✅ Supported | Results have FTS(`score`) along with Hybrid Search score(`_relevance_score`). |
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Cohere Reranker
|
||||
|
||||
This re-ranker uses the [Cohere](https://cohere.ai/) API to rerank the search results. You can use this re-ranker by passing `CohereReranker()` to the `rerank()` method. Note that you'll either need to set the `COHERE_API_KEY` environment variable or pass the `api_key` argument to use this re-ranker.
|
||||
This reranker uses the [Cohere](https://cohere.ai/) API to rerank the search results. You can use this reranker by passing `CohereReranker()` to the `rerank()` method. Note that you'll either need to set the `COHERE_API_KEY` environment variable or pass the `api_key` argument to use this reranker.
|
||||
|
||||
|
||||
!!! note
|
||||
@@ -62,17 +62,17 @@ You can specify the type of scores you want the reranker to return. The followin
|
||||
### Hybrid Search
|
||||
|`return_score`| Status | Description |
|
||||
| --- | --- | --- |
|
||||
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
|
||||
| `all` | ❌ Not Supported | Returns have vector(`_distance`) and FTS(`score`) along with Hybrid Search score(`_relevance_score`) |
|
||||
| `relevance` | ✅ Supported | Results only have the `_relevance_score` column |
|
||||
| `all` | ❌ Not Supported | Results have vector(`_distance`) and FTS(`score`) along with Hybrid Search score(`_relevance_score`) |
|
||||
|
||||
### Vector Search
|
||||
|`return_score`| Status | Description |
|
||||
| --- | --- | --- |
|
||||
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
|
||||
| `all` | ✅ Supported | Returns have vector(`_distance`) along with Hybrid Search score(`_relevance_score`) |
|
||||
| `relevance` | ✅ Supported | Results only have the `_relevance_score` column |
|
||||
| `all` | ✅ Supported | Results have vector(`_distance`) along with Hybrid Search score(`_relevance_score`) |
|
||||
|
||||
### FTS Search
|
||||
|`return_score`| Status | Description |
|
||||
| --- | --- | --- |
|
||||
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
|
||||
| `all` | ✅ Supported | Returns have FTS(`score`) along with Hybrid Search score(`_relevance_score`) |
|
||||
| `relevance` | ✅ Supported | Results only have the `_relevance_score` column |
|
||||
| `all` | ✅ Supported | Results have FTS(`score`) along with Hybrid Search score(`_relevance_score`) |
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# ColBERT Reranker
|
||||
|
||||
This re-ranker uses ColBERT model to rerank the search results. You can use this re-ranker by passing `ColbertReranker()` to the `rerank()` method.
|
||||
This reranker uses ColBERT model to rerank the search results. You can use this reranker by passing `ColbertReranker()` to the `rerank()` method.
|
||||
!!! note
|
||||
Supported Query Types: Hybrid, Vector, FTS
|
||||
|
||||
@@ -46,7 +46,7 @@ Accepted Arguments
|
||||
| `model_name` | `str` | `"colbert-ir/colbertv2.0"` | The name of the reranker model to use.|
|
||||
| `column` | `str` | `"text"` | The name of the column to use as input to the cross encoder model. |
|
||||
| `device` | `str` | `None` | The device to use for the cross encoder model. If None, will use "cuda" if available, otherwise "cpu". |
|
||||
| `return_score` | str | `"relevance"` | Options are "relevance" or "all". The type of score to return. If "relevance", will return only the `_relevance_score. If "all" is supported, will return relevance score along with the vector and/or fts scores depending on query type |
|
||||
| `return_score` | str | `"relevance"` | Options are "relevance" or "all". The type of score to return. If "relevance", will return only the `_relevance_score. If "all" is supported, will return relevance score along with the vector and/or fts scores depending on query type. |
|
||||
|
||||
|
||||
## Supported Scores for each query type
|
||||
@@ -55,17 +55,17 @@ You can specify the type of scores you want the reranker to return. The followin
|
||||
### Hybrid Search
|
||||
|`return_score`| Status | Description |
|
||||
| --- | --- | --- |
|
||||
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
|
||||
| `all` | ❌ Not Supported | Returns have vector(`_distance`) and FTS(`score`) along with Hybrid Search score(`_relevance_score`) |
|
||||
| `relevance` | ✅ Supported | Results only have the `_relevance_score` column. |
|
||||
| `all` | ❌ Not Supported | Results have vector(`_distance`) and FTS(`score`) along with Hybrid Search score(`_relevance_score`). |
|
||||
|
||||
### Vector Search
|
||||
|`return_score`| Status | Description |
|
||||
| --- | --- | --- |
|
||||
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
|
||||
| `all` | ✅ Supported | Returns have vector(`_distance`) along with Hybrid Search score(`_relevance_score`) |
|
||||
| `relevance` | ✅ Supported | Results only have the `_relevance_score` column. |
|
||||
| `all` | ✅ Supported | Results have vector(`_distance`) along with Hybrid Search score(`_relevance_score`). |
|
||||
|
||||
### FTS Search
|
||||
|`return_score`| Status | Description |
|
||||
| --- | --- | --- |
|
||||
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
|
||||
| `all` | ✅ Supported | Returns have FTS(`score`) along with Hybrid Search score(`_relevance_score`) |
|
||||
| `relevance` | ✅ Supported | Results only have the `_relevance_score` column. |
|
||||
| `all` | ✅ Supported | Results have FTS(`score`) along with Hybrid Search score(`_relevance_score`). |
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Cross Encoder Reranker
|
||||
|
||||
This re-ranker uses Cross Encoder models from sentence-transformers to rerank the search results. You can use this re-ranker by passing `CrossEncoderReranker()` to the `rerank()` method.
|
||||
This reranker uses Cross Encoder models from sentence-transformers to rerank the search results. You can use this reranker by passing `CrossEncoderReranker()` to the `rerank()` method.
|
||||
!!! note
|
||||
Supported Query Types: Hybrid, Vector, FTS
|
||||
|
||||
@@ -46,7 +46,7 @@ Accepted Arguments
|
||||
| `model_name` | `str` | `""cross-encoder/ms-marco-TinyBERT-L-6"` | The name of the reranker model to use.|
|
||||
| `column` | `str` | `"text"` | The name of the column to use as input to the cross encoder model. |
|
||||
| `device` | `str` | `None` | The device to use for the cross encoder model. If None, will use "cuda" if available, otherwise "cpu". |
|
||||
| `return_score` | str | `"relevance"` | Options are "relevance" or "all". The type of score to return. If "relevance", will return only the `_relevance_score. If "all" is supported, will return relevance score along with the vector and/or fts scores depending on query type |
|
||||
| `return_score` | str | `"relevance"` | Options are "relevance" or "all". The type of score to return. If "relevance", will return only the `_relevance_score. If "all" is supported, will return relevance score along with the vector and/or fts scores depending on query type. |
|
||||
|
||||
## Supported Scores for each query type
|
||||
You can specify the type of scores you want the reranker to return. The following are the supported scores for each query type:
|
||||
@@ -54,17 +54,17 @@ You can specify the type of scores you want the reranker to return. The followin
|
||||
### Hybrid Search
|
||||
|`return_score`| Status | Description |
|
||||
| --- | --- | --- |
|
||||
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
|
||||
| `all` | ❌ Not Supported | Returns have vector(`_distance`) and FTS(`score`) along with Hybrid Search score(`_relevance_score`) |
|
||||
| `relevance` | ✅ Supported | Results only have the `_relevance_score` column. |
|
||||
| `all` | ❌ Not Supported | Results have vector(`_distance`) and FTS(`score`) along with Hybrid Search score(`_relevance_score`). |
|
||||
|
||||
### Vector Search
|
||||
|`return_score`| Status | Description |
|
||||
| --- | --- | --- |
|
||||
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
|
||||
| `all` | ✅ Supported | Returns have vector(`_distance`) along with Hybrid Search score(`_relevance_score`) |
|
||||
| `relevance` | ✅ Supported | Results only have the `_relevance_score` column. |
|
||||
| `all` | ✅ Supported | Results have vector(`_distance`) along with Hybrid Search score(`_relevance_score`). |
|
||||
|
||||
### FTS Search
|
||||
|`return_score`| Status | Description |
|
||||
| --- | --- | --- |
|
||||
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
|
||||
| `all` | ✅ Supported | Returns have FTS(`score`) along with Hybrid Search score(`_relevance_score`) |
|
||||
| `relevance` | ✅ Supported | Results only have the `_relevance_score` column. |
|
||||
| `all` | ✅ Supported | Results have FTS(`score`) along with Hybrid Search score(`_relevance_score`). |
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
## Building Custom Rerankers
|
||||
You can build your own custom reranker by subclassing the `Reranker` class and implementing the `rerank_hybrid()` method. Optionally, you can also implement the `rerank_vector()` and `rerank_fts()` methods if you want to support reranking for vector and FTS search separately.
|
||||
Here's an example of a custom reranker that combines the results of semantic and full-text search using a linear combination of the scores.
|
||||
|
||||
The `Reranker` base interface comes with a `merge_results()` method that can be used to combine the results of semantic and full-text search. This is a vanilla merging algorithm that simply concatenates the results and removes the duplicates without taking the scores into consideration. It only keeps the first copy of the row encountered. This works well in cases that don't require the scores of semantic and full-text search to combine the results. If you want to use the scores or want to support `return_score="all"`, you'll need to implement your own merging algorithm.
|
||||
|
||||
Here's an example of a custom reranker that combines the results of semantic and full-text search using a linear combination of the scores:
|
||||
|
||||
```python
|
||||
|
||||
from lancedb.rerankers import Reranker
|
||||
@@ -42,7 +43,7 @@ class MyReranker(Reranker):
|
||||
```
|
||||
|
||||
### Example of a Custom Reranker
|
||||
For the sake of simplicity let's build custom reranker that just enchances the Cohere Reranker by accepting a filter query, and accept other CohereReranker params as kwags.
|
||||
For the sake of simplicity let's build custom reranker that enhances the Cohere Reranker by accepting a filter query, and accepts other CohereReranker params as kwargs.
|
||||
|
||||
```python
|
||||
|
||||
@@ -83,6 +84,6 @@ class ModifiedCohereReranker(CohereReranker):
|
||||
```
|
||||
|
||||
!!! tip
|
||||
The `vector_results` and `fts_results` are pyarrow tables. Lean more about pyarrow tables [here](https://arrow.apache.org/docs/python). It can be convered to other data types like pandas dataframe, pydict, pylist etc.
|
||||
The `vector_results` and `fts_results` are pyarrow tables. Lean more about pyarrow tables [here](https://arrow.apache.org/docs/python). It can be converted to other data types like pandas dataframe, pydict, pylist etc.
|
||||
|
||||
For example, You can convert them to pandas dataframes using `to_pandas()` method and perform any operations you want. After you are done, you can convert the dataframe back to pyarrow table using `pa.Table.from_pandas()` method and return it.
|
||||
For example, You can convert them to pandas dataframes using `to_pandas()` method and perform any operations you want. After you are done, you can convert the dataframe back to pyarrow table using `pa.Table.from_pandas()` method and return it.
|
||||
|
||||
@@ -13,7 +13,7 @@ LanceDB comes with some built-in rerankers. Some of the rerankers that are avail
|
||||
|
||||
|
||||
## Using a Reranker
|
||||
Using rerankers is optional for vector and FTS. However, for hybrid search, rerankers are required. To use a reranker, you need to create an instance of the reranker and pass it to the `rerank` method of the query builder.
|
||||
Using rerankers is optional for vector and FTS. However, for hybrid search, rerankers are required. To use a reranker, you need to create an instance of the reranker and pass it to the `rerank` method of the query builder:
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
@@ -36,14 +36,14 @@ tbl = db.create_table("test", data)
|
||||
reranker = CohereReranker(api_key="your_api_key")
|
||||
|
||||
# Run vector search with a reranker
|
||||
result = tbl.query("hello").rerank(reranker).to_list()
|
||||
result = tbl.search("hello").rerank(reranker).to_list()
|
||||
|
||||
# Run FTS search with a reranker
|
||||
result = tbl.query("hello", query_type="fts").rerank(reranker).to_list()
|
||||
result = tbl.search("hello", query_type="fts").rerank(reranker).to_list()
|
||||
|
||||
# Run hybrid search with a reranker
|
||||
tbl.create_fts_index("text")
|
||||
result = tbl.query("hello", query_type="hybrid").rerank(reranker).to_list()
|
||||
result = tbl.search("hello", query_type="hybrid").rerank(reranker).to_list()
|
||||
```
|
||||
|
||||
### Multi-vector reranking
|
||||
@@ -64,7 +64,7 @@ reranked = reranker.rerank_multivector([res1, res2, res3], deduplicate=True)
|
||||
```
|
||||
|
||||
## Available Rerankers
|
||||
LanceDB comes with some built-in rerankers. Here are some of the rerankers that are available in LanceDB:
|
||||
LanceDB comes with the following built-in rerankers:
|
||||
|
||||
- [Cohere Reranker](./cohere.md)
|
||||
- [Cross Encoder Reranker](./cross_encoder.md)
|
||||
@@ -78,4 +78,4 @@ LanceDB comes with some built-in rerankers. Here are some of the rerankers that
|
||||
|
||||
## Creating Custom Rerankers
|
||||
|
||||
LanceDB also you to create custom rerankers by extending the base `Reranker` class. The custom reranker should implement the `rerank` method that takes a list of search results and returns a reranked list of search results. This is covered in more detail in the [Creating Custom Rerankers](./custom_reranker.md) section.
|
||||
LanceDB also you to create custom rerankers by extending the base `Reranker` class. The custom reranker should implement the `rerank` method that takes a list of search results and returns a reranked list of search results. This is covered in more detail in the [Creating Custom Rerankers](./custom_reranker.md) section.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Jina Reranker
|
||||
|
||||
This re-ranker uses the [Jina](https://jina.ai/reranker/) API to rerank the search results. You can use this re-ranker by passing `JinaReranker()` to the `rerank()` method. Note that you'll either need to set the `JINA_API_KEY` environment variable or pass the `api_key` argument to use this re-ranker.
|
||||
This reranker uses the [Jina](https://jina.ai/reranker/) API to rerank the search results. You can use this reranker by passing `JinaReranker()` to the `rerank()` method. Note that you'll either need to set the `JINA_API_KEY` environment variable or pass the `api_key` argument to use this reranker.
|
||||
|
||||
|
||||
!!! note
|
||||
@@ -48,11 +48,11 @@ Accepted Arguments
|
||||
----------------
|
||||
| Argument | Type | Default | Description |
|
||||
| --- | --- | --- | --- |
|
||||
| `model_name` | `str` | `"jina-reranker-v2-base-multilingual"` | The name of the reranker model to use. You can find the list of available models in https://jina.ai/reranker/|
|
||||
| `model_name` | `str` | `"jina-reranker-v2-base-multilingual"` | The name of the reranker model to use. You can find the list of available models in https://jina.ai/reranker. |
|
||||
| `column` | `str` | `"text"` | The name of the column to use as input to the cross encoder model. |
|
||||
| `top_n` | `str` | `None` | The number of results to return. If None, will return all results. |
|
||||
| `api_key` | `str` | `None` | The API key for the Jina API. If not provided, the `JINA_API_KEY` environment variable is used. |
|
||||
| `return_score` | str | `"relevance"` | Options are "relevance" or "all". The type of score to return. If "relevance", will return only the `_relevance_score. If "all" is supported, will return relevance score along with the vector and/or fts scores depending on query type |
|
||||
| `return_score` | str | `"relevance"` | Options are "relevance" or "all". The type of score to return. If "relevance", will return only the `_relevance_score. If "all" is supported, will return relevance score along with the vector and/or fts scores depending on query type. |
|
||||
|
||||
|
||||
|
||||
@@ -62,17 +62,17 @@ You can specify the type of scores you want the reranker to return. The followin
|
||||
### Hybrid Search
|
||||
|`return_score`| Status | Description |
|
||||
| --- | --- | --- |
|
||||
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
|
||||
| `all` | ❌ Not Supported | Returns have vector(`_distance`) and FTS(`score`) along with Hybrid Search score(`_relevance_score`) |
|
||||
| `relevance` | ✅ Supported | Results only have the `_relevance_score` column. |
|
||||
| `all` | ❌ Not Supported | Results have vector(`_distance`) and FTS(`score`) along with Hybrid Search score(`_relevance_score`). |
|
||||
|
||||
### Vector Search
|
||||
|`return_score`| Status | Description |
|
||||
| --- | --- | --- |
|
||||
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
|
||||
| `all` | ✅ Supported | Returns have vector(`_distance`) along with Hybrid Search score(`_relevance_score`) |
|
||||
| `relevance` | ✅ Supported | Results only have the `_relevance_score` column. |
|
||||
| `all` | ✅ Supported | Results have vector(`_distance`) along with Hybrid Search score(`_relevance_score`). |
|
||||
|
||||
### FTS Search
|
||||
|`return_score`| Status | Description |
|
||||
| --- | --- | --- |
|
||||
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
|
||||
| `all` | ✅ Supported | Returns have FTS(`score`) along with Hybrid Search score(`_relevance_score`) |
|
||||
| `relevance` | ✅ Supported | Results only have the `_relevance_score` column. |
|
||||
| `all` | ✅ Supported | Results have FTS(`score`) along with Hybrid Search score(`_relevance_score`). |
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
# Linear Combination Reranker
|
||||
|
||||
!!! note
|
||||
This is depricated. It is recommended to use the `RRFReranker` instead, if you want to use a score based reranker.
|
||||
This is deprecated. It is recommended to use the `RRFReranker` instead, if you want to use a score-based reranker.
|
||||
|
||||
It combines the results of semantic and full-text search using a linear combination of the scores. The weights for the linear combination can be specified. It defaults to 0.7, i.e, 70% weight for semantic search and 30% weight for full-text search.
|
||||
The Linear Combination Reranker combines the results of semantic and full-text search using a linear combination of the scores. The weights for the linear combination can be specified, and defaults to 0.7, i.e, 70% weight for semantic search and 30% weight for full-text search.
|
||||
|
||||
!!! note
|
||||
Supported Query Types: Hybrid
|
||||
@@ -51,5 +51,5 @@ You can specify the type of scores you want the reranker to return. The followin
|
||||
### Hybrid Search
|
||||
|`return_score`| Status | Description |
|
||||
| --- | --- | --- |
|
||||
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
|
||||
| `all` | ✅ Supported | Returns have vector(`_distance`) and FTS(`score`) along with Hybrid Search score(`_distance`) |
|
||||
| `relevance` | ✅ Supported | Results only have the `_relevance_score` column |
|
||||
| `all` | ✅ Supported | Results have vector(`_distance`) and FTS(`score`) along with Hybrid Search score(`_distance`) |
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
# OpenAI Reranker (Experimental)
|
||||
|
||||
This re-ranker uses OpenAI chat model to rerank the search results. You can use this re-ranker by passing `OpenAI()` to the `rerank()` method.
|
||||
This reranker uses OpenAI chat model to rerank the search results. You can use this reranker by passing `OpenAI()` to the `rerank()` method.
|
||||
!!! note
|
||||
Supported Query Types: Hybrid, Vector, FTS
|
||||
|
||||
!!! warning
|
||||
This re-ranker is experimental. OpenAI doesn't have a dedicated reranking model, so we are using the chat model for reranking.
|
||||
This reranker is experimental. OpenAI doesn't have a dedicated reranking model, so we are using the chat model for reranking.
|
||||
|
||||
```python
|
||||
import numpy
|
||||
@@ -47,7 +47,7 @@ Accepted Arguments
|
||||
| --- | --- | --- | --- |
|
||||
| `model_name` | `str` | `"gpt-4-turbo-preview"` | The name of the reranker model to use.|
|
||||
| `column` | `str` | `"text"` | The name of the column to use as input to the cross encoder model. |
|
||||
| `return_score` | str | `"relevance"` | Options are "relevance" or "all". The type of score to return. If "relevance", will return only the `_relevance_score. If "all" is supported, will return relevance score along with the vector and/or fts scores depending on query type |
|
||||
| `return_score` | str | `"relevance"` | Options are "relevance" or "all". The type of score to return. If "relevance", will return only the `_relevance_score. If "all" is supported, will return relevance score along with the vector and/or fts scores depending on query type. |
|
||||
| `api_key` | str | `None` | The API key to use. If None, will use the OPENAI_API_KEY environment variable.
|
||||
|
||||
|
||||
@@ -57,17 +57,17 @@ You can specify the type of scores you want the reranker to return. The followin
|
||||
### Hybrid Search
|
||||
|`return_score`| Status | Description |
|
||||
| --- | --- | --- |
|
||||
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
|
||||
| `all` | ❌ Not Supported | Returns have vector(`_distance`) and FTS(`score`) along with Hybrid Search score(`_relevance_score`) |
|
||||
| `relevance` | ✅ Supported | Results only have the `_relevance_score` column. |
|
||||
| `all` | ❌ Not Supported | Results have vector(`_distance`) and FTS(`score`) along with Hybrid Search score(`_relevance_score`). |
|
||||
|
||||
### Vector Search
|
||||
|`return_score`| Status | Description |
|
||||
| --- | --- | --- |
|
||||
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
|
||||
| `all` | ✅ Supported | Returns have vector(`_distance`) along with Hybrid Search score(`_relevance_score`) |
|
||||
| `relevance` | ✅ Supported | Results only have the `_relevance_score` column. |
|
||||
| `all` | ✅ Supported | Results have vector(`_distance`) along with Hybrid Search score(`_relevance_score`). |
|
||||
|
||||
### FTS Search
|
||||
|`return_score`| Status | Description |
|
||||
| --- | --- | --- |
|
||||
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
|
||||
| `all` | ✅ Supported | Returns have FTS(`score`) along with Hybrid Search score(`_relevance_score`) |
|
||||
| `relevance` | ✅ Supported | Results only have the `_relevance_score` column. |
|
||||
| `all` | ✅ Supported | Results have FTS(`score`) along with Hybrid Search score(`_relevance_score`). |
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Reciprocal Rank Fusion Reranker
|
||||
|
||||
This is the default re-ranker used by LanceDB hybrid search. Reciprocal Rank Fusion (RRF) is an algorithm that evaluates the search scores by leveraging the positions/rank of the documents. The implementation follows this [paper](https://plg.uwaterloo.ca/~gvcormac/cormacksigir09-rrf.pdf).
|
||||
This is the default reranker used by LanceDB hybrid search. Reciprocal Rank Fusion (RRF) is an algorithm that evaluates the search scores by leveraging the positions/rank of the documents. The implementation follows this [paper](https://plg.uwaterloo.ca/~gvcormac/cormacksigir09-rrf.pdf).
|
||||
|
||||
|
||||
!!! note
|
||||
@@ -39,7 +39,7 @@ Accepted Arguments
|
||||
----------------
|
||||
| Argument | Type | Default | Description |
|
||||
| --- | --- | --- | --- |
|
||||
| `K` | `int` | `60` | A constant used in the RRF formula (default is 60). Experiments indicate that k = 60 was near-optimal, but that the choice is not critical |
|
||||
| `K` | `int` | `60` | A constant used in the RRF formula (default is 60). Experiments indicate that k = 60 was near-optimal, but that the choice is not critical. |
|
||||
| `return_score` | str | `"relevance"` | Options are "relevance" or "all". The type of score to return. If "relevance", will return only the `_relevance_score`. If "all", will return all scores from the vector and FTS search along with the relevance score. |
|
||||
|
||||
|
||||
@@ -49,5 +49,5 @@ You can specify the type of scores you want the reranker to return. The followin
|
||||
### Hybrid Search
|
||||
|`return_score`| Status | Description |
|
||||
| --- | --- | --- |
|
||||
| `relevance` | ✅ Supported | Returned rows only have the `_relevance_score` column |
|
||||
| `all` | ✅ Supported | Returned rows have vector(`_distance`) and FTS(`score`) along with Hybrid Search score(`_relevance_score`) |
|
||||
| `relevance` | ✅ Supported | Returned rows only have the `_relevance_score` column. |
|
||||
| `all` | ✅ Supported | Returned rows have vector(`_distance`) and FTS(`score`) along with Hybrid Search score(`_relevance_score`). |
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
Voyage AI provides cutting-edge embedding and rerankers.
|
||||
|
||||
This re-ranker uses the [VoyageAI](https://docs.voyageai.com/docs/) API to rerank the search results. You can use this re-ranker by passing `VoyageAIReranker()` to the `rerank()` method. Note that you'll either need to set the `VOYAGE_API_KEY` environment variable or pass the `api_key` argument to use this re-ranker.
|
||||
This reranker uses the [VoyageAI](https://docs.voyageai.com/docs/) API to rerank the search results. You can use this reranker by passing `VoyageAIReranker()` to the `rerank()` method. Note that you'll either need to set the `VOYAGE_API_KEY` environment variable or pass the `api_key` argument to use this reranker.
|
||||
|
||||
|
||||
!!! note
|
||||
|
||||
@@ -44,18 +44,16 @@ db.create_table("my_vectors", data=data)
|
||||
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
import numpy as np
|
||||
=== "Sync API"
|
||||
|
||||
db = lancedb.connect("data/sample-lancedb")
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_search.py:exhaustive_search"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
tbl = db.open_table("my_vectors")
|
||||
|
||||
df = tbl.search(np.random.random((1536))) \
|
||||
.limit(10) \
|
||||
.to_list()
|
||||
```
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_search.py:exhaustive_search_async"
|
||||
```
|
||||
|
||||
=== "TypeScript"
|
||||
|
||||
@@ -81,12 +79,16 @@ By default, `l2` will be used as metric type. You can specify the metric type as
|
||||
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
df = tbl.search(np.random.random((1536))) \
|
||||
.metric("cosine") \
|
||||
.limit(10) \
|
||||
.to_list()
|
||||
```
|
||||
=== "Sync API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_search.py:exhaustive_search_cosine"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_search.py:exhaustive_search_async_cosine"
|
||||
```
|
||||
|
||||
=== "TypeScript"
|
||||
|
||||
@@ -136,46 +138,67 @@ LanceDB supports binary vectors as a data type, and has the ability to search bi
|
||||
--8<-- "python/python/tests/docs/test_binary_vector.py:async_binary_vector"
|
||||
```
|
||||
|
||||
## Search with distance range
|
||||
|
||||
You can also search for vectors within a specific distance range from the query vector. This is useful when you want to find vectors that are not just the nearest neighbors, but also those that are within a certain distance. This can be done by using the `distance_range` method.
|
||||
|
||||
=== "Python"
|
||||
|
||||
=== "sync API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_distance_range.py:imports"
|
||||
|
||||
--8<-- "python/python/tests/docs/test_distance_range.py:sync_distance_range"
|
||||
```
|
||||
|
||||
=== "async API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_distance_range.py:imports"
|
||||
|
||||
--8<-- "python/python/tests/docs/test_distance_range.py:async_distance_range"
|
||||
```
|
||||
|
||||
=== "TypeScript"
|
||||
|
||||
=== "@lancedb/lancedb"
|
||||
|
||||
```ts
|
||||
--8<-- "nodejs/examples/search.test.ts:import"
|
||||
|
||||
--8<-- "nodejs/examples/search.test.ts:distance_range"
|
||||
```
|
||||
|
||||
|
||||
## Output search results
|
||||
|
||||
LanceDB returns vector search results via different formats commonly used in python.
|
||||
Let's create a LanceDB table with a nested schema:
|
||||
|
||||
=== "Python"
|
||||
=== "Sync API"
|
||||
|
||||
```python
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_search.py:import-datetime"
|
||||
--8<-- "python/python/tests/docs/test_search.py:import-lancedb"
|
||||
--8<-- "python/python/tests/docs/test_search.py:import-lancedb-pydantic"
|
||||
--8<-- "python/python/tests/docs/test_search.py:import-numpy"
|
||||
--8<-- "python/python/tests/docs/test_search.py:import-pydantic-base-model"
|
||||
--8<-- "python/python/tests/docs/test_search.py:class-definition"
|
||||
--8<-- "python/python/tests/docs/test_search.py:create_table_with_nested_schema"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
from datetime import datetime
|
||||
import lancedb
|
||||
from lancedb.pydantic import LanceModel, Vector
|
||||
import numpy as np
|
||||
from pydantic import BaseModel
|
||||
uri = "data/sample-lancedb-nested"
|
||||
|
||||
class Metadata(BaseModel):
|
||||
source: str
|
||||
timestamp: datetime
|
||||
|
||||
class Document(BaseModel):
|
||||
content: str
|
||||
meta: Metadata
|
||||
|
||||
class LanceSchema(LanceModel):
|
||||
id: str
|
||||
vector: Vector(1536)
|
||||
payload: Document
|
||||
|
||||
# Let's add 100 sample rows to our dataset
|
||||
data = [LanceSchema(
|
||||
id=f"id{i}",
|
||||
vector=np.random.randn(1536),
|
||||
payload=Document(
|
||||
content=f"document{i}", meta=Metadata(source=f"source{i % 10}", timestamp=datetime.now())
|
||||
),
|
||||
) for i in range(100)]
|
||||
|
||||
tbl = db.create_table("documents", data=data)
|
||||
```
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_search.py:import-datetime"
|
||||
--8<-- "python/python/tests/docs/test_search.py:import-lancedb"
|
||||
--8<-- "python/python/tests/docs/test_search.py:import-lancedb-pydantic"
|
||||
--8<-- "python/python/tests/docs/test_search.py:import-numpy"
|
||||
--8<-- "python/python/tests/docs/test_search.py:import-pydantic-base-model"
|
||||
--8<-- "python/python/tests/docs/test_search.py:class-definition"
|
||||
--8<-- "python/python/tests/docs/test_search.py:create_table_async_with_nested_schema"
|
||||
```
|
||||
|
||||
### As a PyArrow table
|
||||
|
||||
@@ -184,17 +207,31 @@ Let's create a LanceDB table with a nested schema:
|
||||
the addition of an `_distance` column for vector search or a `score`
|
||||
column for full text search.
|
||||
|
||||
```python
|
||||
tbl.search(np.random.randn(1536)).to_arrow()
|
||||
```
|
||||
=== "Sync API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_search.py:search_result_as_pyarrow"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_search.py:search_result_async_as_pyarrow"
|
||||
```
|
||||
|
||||
### As a Pandas DataFrame
|
||||
|
||||
You can also get the results as a pandas dataframe.
|
||||
|
||||
```python
|
||||
tbl.search(np.random.randn(1536)).to_pandas()
|
||||
```
|
||||
=== "Sync API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_search.py:search_result_as_pandas"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_search.py:search_result_async_as_pandas"
|
||||
```
|
||||
|
||||
While other formats like Arrow/Pydantic/Python dicts have a natural
|
||||
way to handle nested schemas, pandas can only store nested data as a
|
||||
@@ -202,33 +239,50 @@ Let's create a LanceDB table with a nested schema:
|
||||
So for convenience, you can also tell LanceDB to flatten a nested schema
|
||||
when creating the pandas dataframe.
|
||||
|
||||
```python
|
||||
tbl.search(np.random.randn(1536)).to_pandas(flatten=True)
|
||||
```
|
||||
=== "Sync API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_search.py:search_result_as_pandas_flatten_true"
|
||||
```
|
||||
|
||||
If your table has a deeply nested struct, you can control how many levels
|
||||
of nesting to flatten by passing in a positive integer.
|
||||
|
||||
```python
|
||||
tbl.search(np.random.randn(1536)).to_pandas(flatten=1)
|
||||
```
|
||||
=== "Sync API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_search.py:search_result_as_pandas_flatten_1"
|
||||
```
|
||||
!!! note
|
||||
`flatten` is not yet supported with our asynchronous client.
|
||||
|
||||
### As a list of Python dicts
|
||||
|
||||
You can of course return results as a list of python dicts.
|
||||
|
||||
```python
|
||||
tbl.search(np.random.randn(1536)).to_list()
|
||||
```
|
||||
=== "Sync API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_search.py:search_result_as_list"
|
||||
```
|
||||
=== "Async API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_search.py:search_result_async_as_list"
|
||||
```
|
||||
|
||||
### As a list of Pydantic models
|
||||
|
||||
We can add data using Pydantic models, and we can certainly
|
||||
retrieve results as Pydantic models
|
||||
|
||||
```python
|
||||
tbl.search(np.random.randn(1536)).to_pydantic(LanceSchema)
|
||||
```
|
||||
=== "Sync API"
|
||||
|
||||
```python
|
||||
--8<-- "python/python/tests/docs/test_search.py:search_result_as_pydantic"
|
||||
```
|
||||
!!! note
|
||||
`to_pydantic()` is not yet supported with our asynchronous client.
|
||||
|
||||
Note that in this case the extra `_distance` field is discarded since
|
||||
it's not part of the LanceSchema.
|
||||
|
||||
@@ -4,8 +4,8 @@
|
||||
|
||||
LanceDB supports filtering of query results based on metadata fields. By default, post-filtering is
|
||||
performed on the top-k results returned by the vector search. However, pre-filtering is also an
|
||||
option that performs the filter prior to vector search. This can be useful to narrow down on
|
||||
the search space on a very large dataset to reduce query latency.
|
||||
option that performs the filter prior to vector search. This can be useful to narrow down
|
||||
the search space of a very large dataset to reduce query latency.
|
||||
|
||||
Note that both pre-filtering and post-filtering can yield false positives. For pre-filtering, if the filter is too selective, it might eliminate relevant items that the vector search would have otherwise identified as a good match. In this case, increasing `nprobes` parameter will help reduce such false positives. It is recommended to set `use_index=false` if you know that the filter is highly selective.
|
||||
|
||||
@@ -15,13 +15,18 @@ Similarly, a highly selective post-filter can lead to false positives. Increasin
|
||||
```python
|
||||
import lancedb
|
||||
import numpy as np
|
||||
|
||||
uri = "data/sample-lancedb"
|
||||
db = lancedb.connect(uri)
|
||||
|
||||
data = [{"vector": row, "item": f"item {i}", "id": i}
|
||||
for i, row in enumerate(np.random.random((10_000, 2)).astype('int'))]
|
||||
for i, row in enumerate(np.random.random((10_000, 2)).astype('int'))]
|
||||
|
||||
# Synchronous client
|
||||
db = lancedb.connect(uri)
|
||||
tbl = db.create_table("my_vectors", data=data)
|
||||
|
||||
# Asynchronous client
|
||||
async_db = await lancedb.connect_async(uri)
|
||||
async_tbl = await async_db.create_table("my_vectors_async", data=data)
|
||||
```
|
||||
-->
|
||||
<!-- Setup Code
|
||||
@@ -39,13 +44,11 @@ const tbl = await db.createTable('myVectors', data)
|
||||
|
||||
=== "Python"
|
||||
|
||||
```py
|
||||
result = (
|
||||
tbl.search([0.5, 0.2])
|
||||
.where("id = 10", prefilter=True)
|
||||
.limit(1)
|
||||
.to_arrow()
|
||||
)
|
||||
```python
|
||||
# Synchronous client
|
||||
result = tbl.search([0.5, 0.2]).where("id = 10", prefilter=True).limit(1).to_arrow()
|
||||
# Asynchronous client
|
||||
result = await async_tbl.query().where("id = 10").nearest_to([0.5, 0.2]).limit(1).to_arrow()
|
||||
```
|
||||
|
||||
=== "TypeScript"
|
||||
@@ -63,15 +66,15 @@ const tbl = await db.createTable('myVectors', data)
|
||||
```
|
||||
!!! note
|
||||
|
||||
Creating a [scalar index](guides/scalar_index.md) accelerates filtering
|
||||
Creating a [scalar index](guides/scalar_index.md) accelerates filtering.
|
||||
|
||||
## SQL filters
|
||||
|
||||
Because it's built on top of [DataFusion](https://github.com/apache/arrow-datafusion), LanceDB
|
||||
embraces the utilization of standard SQL expressions as predicates for filtering operations.
|
||||
It can be used during vector search, update, and deletion operations.
|
||||
SQL can be used during vector search, update, and deletion operations.
|
||||
|
||||
Currently, Lance supports a growing list of SQL expressions.
|
||||
LanceDB supports a growing list of SQL expressions:
|
||||
|
||||
- `>`, `>=`, `<`, `<=`, `=`
|
||||
- `AND`, `OR`, `NOT`
|
||||
@@ -88,9 +91,17 @@ For example, the following filter string is acceptable:
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
tbl.search([100, 102]) \
|
||||
.where("(item IN ('item 0', 'item 2')) AND (id > 10)") \
|
||||
.to_arrow()
|
||||
# Synchronous client
|
||||
tbl.search([100, 102]).where(
|
||||
"(item IN ('item 0', 'item 2')) AND (id > 10)"
|
||||
).to_arrow()
|
||||
# Asynchronous client
|
||||
await (
|
||||
async_tbl.query()
|
||||
.where("(item IN ('item 0', 'item 2')) AND (id > 10)")
|
||||
.nearest_to([100, 102])
|
||||
.to_arrow()
|
||||
)
|
||||
```
|
||||
|
||||
=== "TypeScript"
|
||||
@@ -121,7 +132,7 @@ path must be wrapped in backticks.
|
||||
!!!warning "Field names containing periods (`.`) are not supported."
|
||||
|
||||
Literals for dates, timestamps, and decimals can be written by writing the string
|
||||
value after the type name. For example
|
||||
value after the type name. For example:
|
||||
|
||||
=== "SQL"
|
||||
|
||||
@@ -163,12 +174,15 @@ The mapping from SQL types to Arrow types is:
|
||||
|
||||
## Filtering without Vector Search
|
||||
|
||||
You can also filter your data without search.
|
||||
You can also filter your data without search:
|
||||
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
# Synchronous client
|
||||
tbl.search().where("id = 10").limit(10).to_arrow()
|
||||
# Asynchronous client
|
||||
await async_tbl.query().where("id = 10").limit(10).to_arrow()
|
||||
```
|
||||
|
||||
=== "TypeScript"
|
||||
|
||||
@@ -12,6 +12,8 @@ excluded_globs = [
|
||||
"../src/integrations/*.md",
|
||||
"../src/guides/tables.md",
|
||||
"../src/python/duckdb.md",
|
||||
"../src/python/pandas_and_pyarrow.md",
|
||||
"../src/python/polars_arrow.md",
|
||||
"../src/embeddings/*.md",
|
||||
"../src/concepts/*.md",
|
||||
"../src/ann_indexes.md",
|
||||
@@ -23,9 +25,10 @@ excluded_globs = [
|
||||
"../src/embeddings/available_embedding_models/text_embedding_functions/*.md",
|
||||
"../src/embeddings/available_embedding_models/multimodal_embedding_functions/*.md",
|
||||
"../src/rag/*.md",
|
||||
"../src/rag/advanced_techniques/*.md"
|
||||
|
||||
|
||||
"../src/rag/advanced_techniques/*.md",
|
||||
"../src/guides/scalar_index.md",
|
||||
"../src/guides/storage.md",
|
||||
"../src/search.md"
|
||||
]
|
||||
|
||||
python_prefix = "py"
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
<parent>
|
||||
<groupId>com.lancedb</groupId>
|
||||
<artifactId>lancedb-parent</artifactId>
|
||||
<version>0.14.1-final.0</version>
|
||||
<version>0.14.2-beta.0</version>
|
||||
<relativePath>../pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
<groupId>com.lancedb</groupId>
|
||||
<artifactId>lancedb-parent</artifactId>
|
||||
<version>0.14.1-final.0</version>
|
||||
<version>0.14.2-beta.0</version>
|
||||
<packaging>pom</packaging>
|
||||
|
||||
<name>LanceDB Parent</name>
|
||||
|
||||
62
node/package-lock.json
generated
62
node/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "vectordb",
|
||||
"version": "0.14.1",
|
||||
"version": "0.14.2-beta.0",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "vectordb",
|
||||
"version": "0.14.1",
|
||||
"version": "0.14.2-beta.0",
|
||||
"cpu": [
|
||||
"x64",
|
||||
"arm64"
|
||||
@@ -52,14 +52,14 @@
|
||||
"uuid": "^9.0.0"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"@lancedb/vectordb-darwin-arm64": "0.14.1",
|
||||
"@lancedb/vectordb-darwin-x64": "0.14.1",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.14.1",
|
||||
"@lancedb/vectordb-linux-arm64-musl": "0.14.1",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.14.1",
|
||||
"@lancedb/vectordb-linux-x64-musl": "0.14.1",
|
||||
"@lancedb/vectordb-win32-arm64-msvc": "0.14.1",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.14.1"
|
||||
"@lancedb/vectordb-darwin-arm64": "0.14.2-beta.0",
|
||||
"@lancedb/vectordb-darwin-x64": "0.14.2-beta.0",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.14.2-beta.0",
|
||||
"@lancedb/vectordb-linux-arm64-musl": "0.14.2-beta.0",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.14.2-beta.0",
|
||||
"@lancedb/vectordb-linux-x64-musl": "0.14.2-beta.0",
|
||||
"@lancedb/vectordb-win32-arm64-msvc": "0.14.2-beta.0",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.14.2-beta.0"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@apache-arrow/ts": "^14.0.2",
|
||||
@@ -330,9 +330,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-darwin-arm64": {
|
||||
"version": "0.14.1",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-arm64/-/vectordb-darwin-arm64-0.14.1.tgz",
|
||||
"integrity": "sha512-6t7XHR7dBjDmAS/kz5wbe7LPhKW+WkFA16ZPyh0lmuxfnss4VvN3LE6qQBHjzYzB9U6Nu/4ktQ50xZGEPTnc5A==",
|
||||
"version": "0.14.2-beta.0",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-arm64/-/vectordb-darwin-arm64-0.14.2-beta.0.tgz",
|
||||
"integrity": "sha512-nsXOl9M8jhsr/LrfvrVHiuWWj/zX3zU2Aahpw8etjJbnU83nmO1r9agPxN6mD/J60EsLP3gDaiRPaFY66pHScA==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
@@ -343,9 +343,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-darwin-x64": {
|
||||
"version": "0.14.1",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-x64/-/vectordb-darwin-x64-0.14.1.tgz",
|
||||
"integrity": "sha512-8q6Kd6XnNPKN8wqj75pHVQ4KFl6z9BaI6lWDiEaCNcO3bjPZkcLFNosJq4raxZ9iUi50Yl0qFJ6qR0XFVTwnnw==",
|
||||
"version": "0.14.2-beta.0",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-x64/-/vectordb-darwin-x64-0.14.2-beta.0.tgz",
|
||||
"integrity": "sha512-E1ouo0EfGaxG26YWnw717vaHGNLulmqzh6eaTQuj45Vd4GaPj07TJygtDyvMFBJdsZjdY5YIc9U8yIem1NfeKQ==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
@@ -356,9 +356,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-linux-arm64-gnu": {
|
||||
"version": "0.14.1",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-arm64-gnu/-/vectordb-linux-arm64-gnu-0.14.1.tgz",
|
||||
"integrity": "sha512-4djEMmeNb+p6nW/C4xb8wdMwnIbWfO8fYAwiplOxzxeOpPaUC9rhwUUDCbrJDCpMa8RP5ED4/jC6yT8epaDMDw==",
|
||||
"version": "0.14.2-beta.0",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-arm64-gnu/-/vectordb-linux-arm64-gnu-0.14.2-beta.0.tgz",
|
||||
"integrity": "sha512-SewXZLGccZUkONACHHPCW1Z7xsz8MaXifwpaWMEyIzbQBFAIMq30lPZN63bTt/zNo6BcBPv54yz6n1ZfCv5l+w==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
@@ -369,9 +369,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-linux-arm64-musl": {
|
||||
"version": "0.14.1",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-arm64-musl/-/vectordb-linux-arm64-musl-0.14.1.tgz",
|
||||
"integrity": "sha512-c33hSsp16pnC58plzx1OXuifp9Rachx/MshE/L/OReoutt74fFdrRJwUjE4UCAysyY5QdvTrNm9OhDjopQK2Bw==",
|
||||
"version": "0.14.2-beta.0",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-arm64-musl/-/vectordb-linux-arm64-musl-0.14.2-beta.0.tgz",
|
||||
"integrity": "sha512-ppq3P2QYxPHmikY6nbWTwMhDGP+e+feqzm4iXKhpBxzHR2XwoY5CtDKgKDfEHy1FyCoIyvh2yYT2M1TSkrkOBw==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
@@ -382,9 +382,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-linux-x64-gnu": {
|
||||
"version": "0.14.1",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-x64-gnu/-/vectordb-linux-x64-gnu-0.14.1.tgz",
|
||||
"integrity": "sha512-psu6cH9iLiSbUEZD1EWbOA4THGYSwJvS2XICO9yN7A6D41AP/ynYMRZNKWo1fpdi2Fjb0xNQwiNhQyqwbi5gzA==",
|
||||
"version": "0.14.2-beta.0",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-x64-gnu/-/vectordb-linux-x64-gnu-0.14.2-beta.0.tgz",
|
||||
"integrity": "sha512-XgkoarmdS42fLMMqNdHTVja2z7a0/Q4h3X+n14Ph/pkYsb7pmOabV4a7+ej8KJPm1wv2GmDA4GXcFPjF0tFBFA==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
@@ -395,9 +395,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-linux-x64-musl": {
|
||||
"version": "0.14.1",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-x64-musl/-/vectordb-linux-x64-musl-0.14.1.tgz",
|
||||
"integrity": "sha512-Rg4VWW80HaTFmR7EvNSu+nfRQQM8beO/otBn/Nus5mj5zFw/7cacGRmiEYhDnk5iAn8nauV+Jsi9j2U+C2hp5w==",
|
||||
"version": "0.14.2-beta.0",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-x64-musl/-/vectordb-linux-x64-musl-0.14.2-beta.0.tgz",
|
||||
"integrity": "sha512-vGgUOVb43eccF0oz2YJK+Zionwk4ODelHU7icmGeVsULkkFkoAbf0nO4PY38ZAeLsodnLxHIIu51Bd4Jm9m20w==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
@@ -408,9 +408,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-win32-x64-msvc": {
|
||||
"version": "0.14.1",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-win32-x64-msvc/-/vectordb-win32-x64-msvc-0.14.1.tgz",
|
||||
"integrity": "sha512-XbifasmMbQIt3V9P0AtQND6M3XFiIAc1ZIgmjzBjOmxwqw4sQUwHMyJGIGOzKFZTK3fPJIGRHId7jAzXuBgfQg==",
|
||||
"version": "0.14.2-beta.0",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-win32-x64-msvc/-/vectordb-win32-x64-msvc-0.14.2-beta.0.tgz",
|
||||
"integrity": "sha512-zGLC382V3gE1MHQpf0XTe34yiB+6ZtSIuOFMIDEZVI5PVN5XkXULMY6dlt5fvo4IxhRoscGjpmmaNxJzUwigDg==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "vectordb",
|
||||
"version": "0.14.1",
|
||||
"version": "0.14.2-beta.0",
|
||||
"description": " Serverless, low-latency vector database for AI applications",
|
||||
"private": false,
|
||||
"main": "dist/index.js",
|
||||
@@ -92,13 +92,13 @@
|
||||
}
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"@lancedb/vectordb-darwin-x64": "0.14.1",
|
||||
"@lancedb/vectordb-darwin-arm64": "0.14.1",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.14.1",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.14.1",
|
||||
"@lancedb/vectordb-linux-x64-musl": "0.14.1",
|
||||
"@lancedb/vectordb-linux-arm64-musl": "0.14.1",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.14.1",
|
||||
"@lancedb/vectordb-win32-arm64-msvc": "0.14.1"
|
||||
"@lancedb/vectordb-darwin-x64": "0.14.2-beta.0",
|
||||
"@lancedb/vectordb-darwin-arm64": "0.14.2-beta.0",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.14.2-beta.0",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.14.2-beta.0",
|
||||
"@lancedb/vectordb-linux-x64-musl": "0.14.2-beta.0",
|
||||
"@lancedb/vectordb-linux-arm64-musl": "0.14.2-beta.0",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.14.2-beta.0",
|
||||
"@lancedb/vectordb-win32-arm64-msvc": "0.14.2-beta.0"
|
||||
}
|
||||
}
|
||||
|
||||
76
nodejs/CONTRIBUTING.md
Normal file
76
nodejs/CONTRIBUTING.md
Normal file
@@ -0,0 +1,76 @@
|
||||
# Contributing to LanceDB Typescript
|
||||
|
||||
This document outlines the process for contributing to LanceDB Typescript.
|
||||
For general contribution guidelines, see [CONTRIBUTING.md](../CONTRIBUTING.md).
|
||||
|
||||
## Project layout
|
||||
|
||||
The Typescript package is a wrapper around the Rust library, `lancedb`. We use
|
||||
the [napi-rs](https://napi.rs/) library to create the bindings between Rust and
|
||||
Typescript.
|
||||
|
||||
* `src/`: Rust bindings source code
|
||||
* `lancedb/`: Typescript package source code
|
||||
* `__test__/`: Unit tests
|
||||
* `examples/`: An npm package with the examples shown in the documentation
|
||||
|
||||
## Development environment
|
||||
|
||||
To set up your development environment, you will need to install the following:
|
||||
|
||||
1. Node.js 14 or later
|
||||
2. Rust's package manager, Cargo. Use [rustup](https://rustup.rs/) to install.
|
||||
3. [protoc](https://grpc.io/docs/protoc-installation/) (Protocol Buffers compiler)
|
||||
|
||||
Initial setup:
|
||||
|
||||
```shell
|
||||
npm install
|
||||
```
|
||||
|
||||
### Commit Hooks
|
||||
|
||||
It is **highly recommended** to install the [pre-commit](https://pre-commit.com/) hooks to ensure that your
|
||||
code is formatted correctly and passes basic checks before committing:
|
||||
|
||||
```shell
|
||||
pre-commit install
|
||||
```
|
||||
|
||||
## Development
|
||||
|
||||
Most common development commands can be run using the npm scripts.
|
||||
|
||||
Build the package
|
||||
|
||||
```shell
|
||||
npm install
|
||||
npm run build
|
||||
```
|
||||
|
||||
Lint:
|
||||
|
||||
```shell
|
||||
npm run lint
|
||||
```
|
||||
|
||||
Format and fix lints:
|
||||
|
||||
```shell
|
||||
npm run lint-fix
|
||||
```
|
||||
|
||||
Run tests:
|
||||
|
||||
```shell
|
||||
npm test
|
||||
```
|
||||
|
||||
To run a single test:
|
||||
|
||||
```shell
|
||||
# Single file: table.test.ts
|
||||
npm test -- table.test.ts
|
||||
# Single test: 'merge insert' in table.test.ts
|
||||
npm test -- table.test.ts --testNamePattern=merge\ insert
|
||||
```
|
||||
@@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "lancedb-nodejs"
|
||||
edition.workspace = true
|
||||
version = "0.14.1"
|
||||
version = "0.14.2-beta.0"
|
||||
license.workspace = true
|
||||
description.workspace = true
|
||||
repository.workspace = true
|
||||
|
||||
@@ -36,37 +36,4 @@ The [quickstart](../basic.md) contains a more complete example.
|
||||
|
||||
## Development
|
||||
|
||||
```sh
|
||||
npm run build
|
||||
npm run test
|
||||
```
|
||||
|
||||
### Running lint / format
|
||||
|
||||
LanceDb uses [biome](https://biomejs.dev/) for linting and formatting. if you are using VSCode you will need to install the official [Biome](https://marketplace.visualstudio.com/items?itemName=biomejs.biome) extension.
|
||||
To manually lint your code you can run:
|
||||
|
||||
```sh
|
||||
npm run lint
|
||||
```
|
||||
|
||||
to automatically fix all fixable issues:
|
||||
|
||||
```sh
|
||||
npm run lint-fix
|
||||
```
|
||||
|
||||
If you do not have your workspace root set to the `nodejs` directory, unfortunately the extension will not work. You can still run the linting and formatting commands manually.
|
||||
|
||||
### Generating docs
|
||||
|
||||
```sh
|
||||
npm run docs
|
||||
|
||||
cd ../docs
|
||||
# Asssume the virtual environment was created
|
||||
# python3 -m venv venv
|
||||
# pip install -r requirements.txt
|
||||
. ./venv/bin/activate
|
||||
mkdocs build
|
||||
```
|
||||
See [CONTRIBUTING.md](./CONTRIBUTING.md) for information on how to contribute to LanceDB.
|
||||
|
||||
@@ -475,6 +475,62 @@ describe("When creating an index", () => {
|
||||
expect(rst.numRows).toBe(1);
|
||||
});
|
||||
|
||||
it("should search with distance range", async () => {
|
||||
await tbl.createIndex("vec");
|
||||
|
||||
const rst = await tbl.query().limit(10).nearestTo(queryVec).toArrow();
|
||||
const distanceColumn = rst.getChild("_distance");
|
||||
let minDist = undefined;
|
||||
let maxDist = undefined;
|
||||
if (distanceColumn) {
|
||||
minDist = distanceColumn.get(0);
|
||||
maxDist = distanceColumn.get(9);
|
||||
}
|
||||
|
||||
const rst2 = await tbl
|
||||
.query()
|
||||
.limit(10)
|
||||
.nearestTo(queryVec)
|
||||
.distanceRange(minDist, maxDist)
|
||||
.toArrow();
|
||||
const distanceColumn2 = rst2.getChild("_distance");
|
||||
expect(distanceColumn2).toBeDefined();
|
||||
if (distanceColumn2) {
|
||||
for await (const d of distanceColumn2) {
|
||||
expect(d).toBeGreaterThanOrEqual(minDist);
|
||||
expect(d).toBeLessThan(maxDist);
|
||||
}
|
||||
}
|
||||
|
||||
const rst3 = await tbl
|
||||
.query()
|
||||
.limit(10)
|
||||
.nearestTo(queryVec)
|
||||
.distanceRange(maxDist, undefined)
|
||||
.toArrow();
|
||||
const distanceColumn3 = rst3.getChild("_distance");
|
||||
expect(distanceColumn3).toBeDefined();
|
||||
if (distanceColumn3) {
|
||||
for await (const d of distanceColumn3) {
|
||||
expect(d).toBeGreaterThanOrEqual(maxDist);
|
||||
}
|
||||
}
|
||||
|
||||
const rst4 = await tbl
|
||||
.query()
|
||||
.limit(10)
|
||||
.nearestTo(queryVec)
|
||||
.distanceRange(undefined, minDist)
|
||||
.toArrow();
|
||||
const distanceColumn4 = rst4.getChild("_distance");
|
||||
expect(distanceColumn4).toBeDefined();
|
||||
if (distanceColumn4) {
|
||||
for await (const d of distanceColumn4) {
|
||||
expect(d).toBeLessThan(minDist);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
it("should create and search IVF_HNSW indices", async () => {
|
||||
await tbl.createIndex("vec", {
|
||||
config: Index.hnswSq(),
|
||||
|
||||
@@ -38,5 +38,19 @@ test("full text search", async () => {
|
||||
.toArray();
|
||||
// --8<-- [end:search2]
|
||||
expect(results2.length).toBe(10);
|
||||
|
||||
// --8<-- [start:distance_range]
|
||||
const results3 = await (
|
||||
tbl.search(Array(128).fill(1.2)) as lancedb.VectorQuery
|
||||
)
|
||||
.distanceType("cosine")
|
||||
.distanceRange(0.1, 0.2)
|
||||
.limit(10)
|
||||
.toArray();
|
||||
// --8<-- [end:distance_range]
|
||||
for (const r of results3) {
|
||||
expect(r.distance).toBeGreaterThanOrEqual(0.1);
|
||||
expect(r.distance).toBeLessThan(0.2);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
@@ -388,6 +388,19 @@ export class VectorQuery extends QueryBase<NativeVectorQuery> {
|
||||
return this;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the distance range to use
|
||||
*
|
||||
* Only rows with distances within range [lower_bound, upper_bound)
|
||||
* will be returned.
|
||||
*
|
||||
* `undefined` means no lower or upper bound.
|
||||
*/
|
||||
distanceRange(lowerBound?: number, upperBound?: number): VectorQuery {
|
||||
super.doCall((inner) => inner.distanceRange(lowerBound, upperBound));
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the number of candidates to consider during the search
|
||||
*
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-darwin-arm64",
|
||||
"version": "0.14.1",
|
||||
"version": "0.14.2-beta.0",
|
||||
"os": ["darwin"],
|
||||
"cpu": ["arm64"],
|
||||
"main": "lancedb.darwin-arm64.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-darwin-x64",
|
||||
"version": "0.14.1",
|
||||
"version": "0.14.2-beta.0",
|
||||
"os": ["darwin"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.darwin-x64.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-arm64-gnu",
|
||||
"version": "0.14.1",
|
||||
"version": "0.14.2-beta.0",
|
||||
"os": ["linux"],
|
||||
"cpu": ["arm64"],
|
||||
"main": "lancedb.linux-arm64-gnu.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-arm64-musl",
|
||||
"version": "0.14.1",
|
||||
"version": "0.14.2-beta.0",
|
||||
"os": ["linux"],
|
||||
"cpu": ["arm64"],
|
||||
"main": "lancedb.linux-arm64-musl.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-x64-gnu",
|
||||
"version": "0.14.1",
|
||||
"version": "0.14.2-beta.0",
|
||||
"os": ["linux"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.linux-x64-gnu.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-x64-musl",
|
||||
"version": "0.14.1",
|
||||
"version": "0.14.2-beta.0",
|
||||
"os": ["linux"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.linux-x64-musl.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-win32-arm64-msvc",
|
||||
"version": "0.14.1",
|
||||
"version": "0.14.2-beta.0",
|
||||
"os": [
|
||||
"win32"
|
||||
],
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-win32-x64-msvc",
|
||||
"version": "0.14.1",
|
||||
"version": "0.14.2-beta.0",
|
||||
"os": ["win32"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.win32-x64-msvc.node",
|
||||
|
||||
151
nodejs/package-lock.json
generated
151
nodejs/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb",
|
||||
"version": "0.14.1",
|
||||
"version": "0.14.2-beta.0",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@lancedb/lancedb",
|
||||
"version": "0.14.1",
|
||||
"version": "0.14.2-beta.0",
|
||||
"cpu": [
|
||||
"x64",
|
||||
"arm64"
|
||||
@@ -18,6 +18,7 @@
|
||||
"win32"
|
||||
],
|
||||
"dependencies": {
|
||||
"@lancedb/lancedb": "^0.14.1",
|
||||
"reflect-metadata": "^0.2.2"
|
||||
},
|
||||
"devDependencies": {
|
||||
@@ -4149,6 +4150,152 @@
|
||||
"@jridgewell/sourcemap-codec": "^1.4.14"
|
||||
}
|
||||
},
|
||||
"node_modules/@lancedb/lancedb": {
|
||||
"version": "0.14.1",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/lancedb/-/lancedb-0.14.1.tgz",
|
||||
"integrity": "sha512-DfJ887t52n/2s8G1JnzE7gAR4i7UnfP1OjDYnJ4yTk0aIcn76CbVOUegYfURYlYjL+QFdI1MrAzUdMgYgsGGcA==",
|
||||
"cpu": [
|
||||
"x64",
|
||||
"arm64"
|
||||
],
|
||||
"license": "Apache 2.0",
|
||||
"os": [
|
||||
"darwin",
|
||||
"linux",
|
||||
"win32"
|
||||
],
|
||||
"dependencies": {
|
||||
"reflect-metadata": "^0.2.2"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 18"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"@lancedb/lancedb-darwin-arm64": "0.14.1",
|
||||
"@lancedb/lancedb-darwin-x64": "0.14.1",
|
||||
"@lancedb/lancedb-linux-arm64-gnu": "0.14.1",
|
||||
"@lancedb/lancedb-linux-arm64-musl": "0.14.1",
|
||||
"@lancedb/lancedb-linux-x64-gnu": "0.14.1",
|
||||
"@lancedb/lancedb-linux-x64-musl": "0.14.1",
|
||||
"@lancedb/lancedb-win32-arm64-msvc": "0.14.1",
|
||||
"@lancedb/lancedb-win32-x64-msvc": "0.14.1"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"apache-arrow": ">=15.0.0 <=18.1.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@lancedb/lancedb-darwin-arm64": {
|
||||
"version": "0.14.1",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/lancedb-darwin-arm64/-/lancedb-darwin-arm64-0.14.1.tgz",
|
||||
"integrity": "sha512-eSWV3GydXfyaptPXZ+S3BgXY1YI26oHQDekACaVevRW6/YQD7sS9UhhSZn1mYyDtLTfJu2kOK2XHA9UY8nyuTg==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
"license": "Apache 2.0",
|
||||
"optional": true,
|
||||
"os": [
|
||||
"darwin"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">= 18"
|
||||
}
|
||||
},
|
||||
"node_modules/@lancedb/lancedb-darwin-x64": {
|
||||
"version": "0.14.1",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/lancedb-darwin-x64/-/lancedb-darwin-x64-0.14.1.tgz",
|
||||
"integrity": "sha512-ecf50ykF9WCWmpwAjs3Mk2mph7d+rMJ9EVJeX0UJ4KHDC874lnTDo6Tfd9iUcbExtNI1KZbu+CFnYsbQU+R0gw==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
"license": "Apache 2.0",
|
||||
"optional": true,
|
||||
"os": [
|
||||
"darwin"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">= 18"
|
||||
}
|
||||
},
|
||||
"node_modules/@lancedb/lancedb-linux-arm64-gnu": {
|
||||
"version": "0.14.1",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/lancedb-linux-arm64-gnu/-/lancedb-linux-arm64-gnu-0.14.1.tgz",
|
||||
"integrity": "sha512-X7ub1fOm7jZ19KFW/u3nDyFvj5XzDPqEVrp9mmcOgSrst3NJEGGBz1JypkLnTWpg/7IpCBs1UO1G7R7LEsHYOA==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
"license": "Apache 2.0",
|
||||
"optional": true,
|
||||
"os": [
|
||||
"linux"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">= 18"
|
||||
}
|
||||
},
|
||||
"node_modules/@lancedb/lancedb-linux-arm64-musl": {
|
||||
"version": "0.14.1",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/lancedb-linux-arm64-musl/-/lancedb-linux-arm64-musl-0.14.1.tgz",
|
||||
"integrity": "sha512-rkiWpsQCXwybwEjcdFXkAeGahiLcK/NQUjZc9WBY6CKk2Y9dICIafYzxZ6MDCY19jeJIgs3JS0mjleUWYr3JFw==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
"license": "Apache 2.0",
|
||||
"optional": true,
|
||||
"os": [
|
||||
"linux"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">= 18"
|
||||
}
|
||||
},
|
||||
"node_modules/@lancedb/lancedb-linux-x64-gnu": {
|
||||
"version": "0.14.1",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/lancedb-linux-x64-gnu/-/lancedb-linux-x64-gnu-0.14.1.tgz",
|
||||
"integrity": "sha512-LGp4D58pQJ3+H3GncNxWHkvhIVOKpTzYUBtVfC8he1rwZ6+CiYDyK9Sim/j8o3UJlJ7cP0m3gNUzPfQchQF9WA==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
"license": "Apache 2.0",
|
||||
"optional": true,
|
||||
"os": [
|
||||
"linux"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">= 18"
|
||||
}
|
||||
},
|
||||
"node_modules/@lancedb/lancedb-linux-x64-musl": {
|
||||
"version": "0.14.1",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/lancedb-linux-x64-musl/-/lancedb-linux-x64-musl-0.14.1.tgz",
|
||||
"integrity": "sha512-V/TeoyKUESPL/8L1z4WLbMFe5ZEv4gtxc0AFK8ghiduFYN/Hckuj4oTo/Y0ysLiBx1At9FCa91hWDB301ibHBg==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
"license": "Apache 2.0",
|
||||
"optional": true,
|
||||
"os": [
|
||||
"linux"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">= 18"
|
||||
}
|
||||
},
|
||||
"node_modules/@lancedb/lancedb-win32-x64-msvc": {
|
||||
"version": "0.14.1",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/lancedb-win32-x64-msvc/-/lancedb-win32-x64-msvc-0.14.1.tgz",
|
||||
"integrity": "sha512-4M8D0j8/3WZv4CKo+Z44sISKPCKWN5MWA0dcEEGw4sEXHF2RJLrMIOOgEpT5NF7VW+X4t2JJxUA6j2T3cXaD8w==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
"license": "Apache 2.0",
|
||||
"optional": true,
|
||||
"os": [
|
||||
"win32"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">= 18"
|
||||
}
|
||||
},
|
||||
"node_modules/@napi-rs/cli": {
|
||||
"version": "2.18.3",
|
||||
"resolved": "https://registry.npmjs.org/@napi-rs/cli/-/cli-2.18.3.tgz",
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
"ann"
|
||||
],
|
||||
"private": false,
|
||||
"version": "0.14.1",
|
||||
"version": "0.14.2-beta.0",
|
||||
"main": "dist/index.js",
|
||||
"exports": {
|
||||
".": "./dist/index.js",
|
||||
|
||||
@@ -171,6 +171,15 @@ impl VectorQuery {
|
||||
self.inner = self.inner.clone().nprobes(nprobe as usize);
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn distance_range(&mut self, lower_bound: Option<f64>, upper_bound: Option<f64>) {
|
||||
// napi doesn't support f32, so we have to convert to f32
|
||||
self.inner = self
|
||||
.inner
|
||||
.clone()
|
||||
.distance_range(lower_bound.map(|v| v as f32), upper_bound.map(|v| v as f32));
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn ef(&mut self, ef: u32) {
|
||||
self.inner = self.inner.clone().ef(ef as usize);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
[tool.bumpversion]
|
||||
current_version = "0.17.2-beta.2"
|
||||
current_version = "0.18.0-beta.0"
|
||||
parse = """(?x)
|
||||
(?P<major>0|[1-9]\\d*)\\.
|
||||
(?P<minor>0|[1-9]\\d*)\\.
|
||||
|
||||
78
python/CONTRIBUTING.md
Normal file
78
python/CONTRIBUTING.md
Normal file
@@ -0,0 +1,78 @@
|
||||
# Contributing to LanceDB Python
|
||||
|
||||
This document outlines the process for contributing to LanceDB Python.
|
||||
For general contribution guidelines, see [CONTRIBUTING.md](../CONTRIBUTING.md).
|
||||
|
||||
## Project layout
|
||||
|
||||
The Python package is a wrapper around the Rust library, `lancedb`. We use
|
||||
[pyo3](https://pyo3.rs/) to create the bindings between Rust and Python.
|
||||
|
||||
* `src/`: Rust bindings source code
|
||||
* `python/lancedb`: Python package source code
|
||||
* `python/tests`: Unit tests
|
||||
|
||||
## Development environment
|
||||
|
||||
To set up your development environment, you will need to install the following:
|
||||
|
||||
1. Python 3.9 or later
|
||||
2. Cargo (Rust's package manager). Use [rustup](https://rustup.rs/) to install.
|
||||
3. [protoc](https://grpc.io/docs/protoc-installation/) (Protocol Buffers compiler)
|
||||
|
||||
Create a virtual environment to work in:
|
||||
|
||||
```bash
|
||||
python -m venv venv
|
||||
source venv/bin/activate
|
||||
pip install maturin
|
||||
```
|
||||
|
||||
### Commit Hooks
|
||||
|
||||
It is **highly recommended** to install the pre-commit hooks to ensure that your
|
||||
code is formatted correctly and passes basic checks before committing:
|
||||
|
||||
```bash
|
||||
make develop # this will install pre-commit itself
|
||||
pre-commit install
|
||||
```
|
||||
|
||||
## Development
|
||||
|
||||
Most common development commands can be run using the Makefile.
|
||||
|
||||
Build the package
|
||||
|
||||
```shell
|
||||
make develop
|
||||
```
|
||||
|
||||
Format:
|
||||
|
||||
```shell
|
||||
make format
|
||||
```
|
||||
|
||||
Run tests:
|
||||
|
||||
```shell
|
||||
make test
|
||||
make doctest
|
||||
```
|
||||
|
||||
To run a single test, you can use the `pytest` command directly. Provide the path
|
||||
to the test file, and optionally the test name after `::`.
|
||||
|
||||
```shell
|
||||
# Single file: test_table.py
|
||||
pytest -vv python/tests/test_table.py
|
||||
# Single test: test_basic in test_table.py
|
||||
pytest -vv python/tests/test_table.py::test_basic
|
||||
```
|
||||
|
||||
To see all commands, run:
|
||||
|
||||
```shell
|
||||
make help
|
||||
```
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "lancedb-python"
|
||||
version = "0.17.2-beta.2"
|
||||
version = "0.18.0-beta.0"
|
||||
edition.workspace = true
|
||||
description = "Python bindings for LanceDB"
|
||||
license.workspace = true
|
||||
|
||||
32
python/Makefile
Normal file
32
python/Makefile
Normal file
@@ -0,0 +1,32 @@
|
||||
PIP_EXTRA_INDEX_URL ?= https://pypi.fury.io/lancedb/
|
||||
|
||||
help: ## Show this help.
|
||||
@sed -ne '/@sed/!s/## //p' $(MAKEFILE_LIST)
|
||||
|
||||
.PHONY: develop
|
||||
develop: ## Install the package in development mode.
|
||||
PIP_EXTRA_INDEX_URL=$(PIP_EXTRA_INDEX_URL) maturin develop --extras tests,dev,embeddings
|
||||
|
||||
.PHONY: format
|
||||
format: ## Format the code.
|
||||
cargo fmt
|
||||
ruff format python
|
||||
|
||||
.PHONY: check
|
||||
check: ## Check formatting and lints.
|
||||
cargo fmt --check
|
||||
ruff format --check python
|
||||
cargo clippy
|
||||
ruff check python
|
||||
|
||||
.PHONY: fix
|
||||
fix: ## Fix python lints
|
||||
ruff check python --fix
|
||||
|
||||
.PHONY: doctest
|
||||
doctest: ## Run documentation tests.
|
||||
pytest --doctest-modules python/lancedb
|
||||
|
||||
.PHONY: test
|
||||
test: ## Run tests.
|
||||
pytest python/tests -vv --durations=10 -m "not slow"
|
||||
@@ -8,6 +8,15 @@ A Python library for [LanceDB](https://github.com/lancedb/lancedb).
|
||||
pip install lancedb
|
||||
```
|
||||
|
||||
### Preview Releases
|
||||
|
||||
Stable releases are created about every 2 weeks. For the latest features and bug fixes, you can install the preview release. These releases receive the same level of testing as stable releases, but are not guaranteed to be available for more than 6 months after they are released. Once your application is stable, we recommend switching to stable releases.
|
||||
|
||||
|
||||
```bash
|
||||
pip install --pre --extra-index-url https://pypi.fury.io/lancedb/ lancedb
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
### Basic Example
|
||||
@@ -20,76 +29,6 @@ results = table.search([0.1, 0.3]).limit(20).to_list()
|
||||
print(results)
|
||||
```
|
||||
|
||||
## Development
|
||||
### Development
|
||||
|
||||
LanceDb is based on the rust crate `lancedb` and is built with maturin. In order to build with maturin
|
||||
you will either need a conda environment or a virtual environment (venv).
|
||||
|
||||
```bash
|
||||
python -m venv venv
|
||||
. ./venv/bin/activate
|
||||
```
|
||||
|
||||
Install the necessary packages:
|
||||
|
||||
```bash
|
||||
python -m pip install .[tests,dev]
|
||||
```
|
||||
|
||||
To build the python package you can use maturin:
|
||||
|
||||
```bash
|
||||
# This will build the rust bindings and place them in the appropriate place
|
||||
# in your venv or conda environment
|
||||
maturin develop
|
||||
```
|
||||
|
||||
To run the unit tests:
|
||||
|
||||
```bash
|
||||
pytest
|
||||
```
|
||||
|
||||
To run the doc tests:
|
||||
|
||||
```bash
|
||||
pytest --doctest-modules python/lancedb
|
||||
```
|
||||
|
||||
To run linter and automatically fix all errors:
|
||||
|
||||
```bash
|
||||
ruff format python
|
||||
ruff --fix python
|
||||
```
|
||||
|
||||
If any packages are missing, install them with:
|
||||
|
||||
```bash
|
||||
pip install <PACKAGE_NAME>
|
||||
```
|
||||
|
||||
___
|
||||
For **Windows** users, there may be errors when installing packages, so these commands may be helpful:
|
||||
|
||||
Activate the virtual environment:
|
||||
|
||||
```bash
|
||||
. .\venv\Scripts\activate
|
||||
```
|
||||
|
||||
You may need to run the installs separately:
|
||||
|
||||
```bash
|
||||
pip install -e .[tests]
|
||||
pip install -e .[dev]
|
||||
```
|
||||
|
||||
`tantivy` requires `rust` to be installed, so install it with `conda`, as it doesn't support windows installation:
|
||||
|
||||
```bash
|
||||
pip install wheel
|
||||
pip install cargo
|
||||
conda install rust
|
||||
pip install tantivy
|
||||
```
|
||||
See [CONTRIBUTING.md](./CONTRIBUTING.md) for information on how to contribute to LanceDB.
|
||||
|
||||
@@ -784,10 +784,6 @@ class AsyncConnection(object):
|
||||
registry = EmbeddingFunctionRegistry.get_instance()
|
||||
metadata = registry.get_table_metadata(embedding_functions)
|
||||
|
||||
data, schema = sanitize_create_table(
|
||||
data, schema, metadata, on_bad_vectors, fill_value
|
||||
)
|
||||
|
||||
# Defining defaults here and not in function prototype. In the future
|
||||
# these defaults will move into rust so better to keep them as None.
|
||||
if on_bad_vectors is None:
|
||||
|
||||
@@ -108,9 +108,14 @@ class EmbeddingFunctionRegistry:
|
||||
An empty dict is returned if input is None or does not
|
||||
contain b"embedding_functions".
|
||||
"""
|
||||
if metadata is None or b"embedding_functions" not in metadata:
|
||||
if metadata is None:
|
||||
return {}
|
||||
# Look at both bytes and string keys, since we might use either
|
||||
serialized = metadata.get(
|
||||
b"embedding_functions", metadata.get("embedding_functions")
|
||||
)
|
||||
if serialized is None:
|
||||
return {}
|
||||
serialized = metadata[b"embedding_functions"]
|
||||
raw_list = json.loads(serialized.decode("utf-8"))
|
||||
return {
|
||||
obj["vector_column"]: EmbeddingFunctionConfig(
|
||||
|
||||
@@ -28,7 +28,7 @@ from .arrow import AsyncRecordBatchReader
|
||||
from .rerankers.base import Reranker
|
||||
from .rerankers.rrf import RRFReranker
|
||||
from .rerankers.util import check_reranker_result
|
||||
from .util import safe_import_pandas
|
||||
from .util import safe_import_pandas, flatten_columns
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import PIL
|
||||
@@ -254,7 +254,7 @@ class LanceQueryBuilder(ABC):
|
||||
self._offset = 0
|
||||
self._columns = None
|
||||
self._where = None
|
||||
self._prefilter = False
|
||||
self._prefilter = True
|
||||
self._with_row_id = False
|
||||
self._vector = None
|
||||
self._text = None
|
||||
@@ -293,24 +293,7 @@ class LanceQueryBuilder(ABC):
|
||||
specified depth.
|
||||
If unspecified, do not flatten the nested columns.
|
||||
"""
|
||||
tbl = self.to_arrow()
|
||||
if flatten is True:
|
||||
while True:
|
||||
tbl = tbl.flatten()
|
||||
# loop through all columns to check if there is any struct column
|
||||
if any(pa.types.is_struct(col.type) for col in tbl.schema):
|
||||
continue
|
||||
else:
|
||||
break
|
||||
elif isinstance(flatten, int):
|
||||
if flatten <= 0:
|
||||
raise ValueError(
|
||||
"Please specify a positive integer for flatten or the boolean "
|
||||
"value `True`"
|
||||
)
|
||||
while flatten > 0:
|
||||
tbl = tbl.flatten()
|
||||
flatten -= 1
|
||||
tbl = flatten_columns(self.to_arrow(), flatten)
|
||||
return tbl.to_pandas()
|
||||
|
||||
@abstractmethod
|
||||
@@ -442,7 +425,7 @@ class LanceQueryBuilder(ABC):
|
||||
raise ValueError("columns must be a list or a dictionary")
|
||||
return self
|
||||
|
||||
def where(self, where: str, prefilter: bool = False) -> LanceQueryBuilder:
|
||||
def where(self, where: str, prefilter: bool = True) -> LanceQueryBuilder:
|
||||
"""Set the where clause.
|
||||
|
||||
Parameters
|
||||
@@ -451,7 +434,7 @@ class LanceQueryBuilder(ABC):
|
||||
The where clause which is a valid SQL where clause. See
|
||||
`Lance filter pushdown <https://lancedb.github.io/lance/read_and_write.html#filter-push-down>`_
|
||||
for valid SQL expressions.
|
||||
prefilter: bool, default False
|
||||
prefilter: bool, default True
|
||||
If True, apply the filter before vector search, otherwise the
|
||||
filter is applied on the result of vector search.
|
||||
This feature is **EXPERIMENTAL** and may be removed and modified
|
||||
@@ -489,7 +472,7 @@ class LanceQueryBuilder(ABC):
|
||||
--------
|
||||
>>> import lancedb
|
||||
>>> db = lancedb.connect("./.lancedb")
|
||||
>>> table = db.create_table("my_table", [{"vector": [99, 99]}])
|
||||
>>> table = db.create_table("my_table", [{"vector": [99.0, 99]}])
|
||||
>>> query = [100, 100]
|
||||
>>> plan = table.search(query).explain_plan(True)
|
||||
>>> print(plan) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
|
||||
@@ -592,7 +575,8 @@ class LanceVectorQueryBuilder(LanceQueryBuilder):
|
||||
... .limit(2)
|
||||
... .to_pandas())
|
||||
b vector _distance
|
||||
0 6 [0.4, 0.4] 0.0
|
||||
0 6 [0.4, 0.4] 0.000000
|
||||
1 2 [1.1, 1.2] 0.000944
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
@@ -664,7 +648,7 @@ class LanceVectorQueryBuilder(LanceQueryBuilder):
|
||||
|
||||
Parameters
|
||||
----------
|
||||
lower: Optional[float]
|
||||
lower_bound: Optional[float]
|
||||
The lower bound of the distance range.
|
||||
upper_bound: Optional[float]
|
||||
The upper bound of the distance range.
|
||||
@@ -779,7 +763,7 @@ class LanceVectorQueryBuilder(LanceQueryBuilder):
|
||||
|
||||
return result_set
|
||||
|
||||
def where(self, where: str, prefilter: bool = False) -> LanceVectorQueryBuilder:
|
||||
def where(self, where: str, prefilter: bool = True) -> LanceVectorQueryBuilder:
|
||||
"""Set the where clause.
|
||||
|
||||
Parameters
|
||||
@@ -788,7 +772,7 @@ class LanceVectorQueryBuilder(LanceQueryBuilder):
|
||||
The where clause which is a valid SQL where clause. See
|
||||
`Lance filter pushdown <https://lancedb.github.io/lance/read_and_write.html#filter-push-down>`_
|
||||
for valid SQL expressions.
|
||||
prefilter: bool, default False
|
||||
prefilter: bool, default True
|
||||
If True, apply the filter before vector search, otherwise the
|
||||
filter is applied on the result of vector search.
|
||||
This feature is **EXPERIMENTAL** and may be removed and modified
|
||||
@@ -1326,7 +1310,7 @@ class LanceHybridQueryBuilder(LanceQueryBuilder):
|
||||
|
||||
Parameters
|
||||
----------
|
||||
lower: Optional[float]
|
||||
lower_bound: Optional[float]
|
||||
The lower bound of the distance range.
|
||||
upper_bound: Optional[float]
|
||||
The upper bound of the distance range.
|
||||
@@ -1595,7 +1579,9 @@ class AsyncQueryBase(object):
|
||||
"""
|
||||
return (await self.to_arrow()).to_pylist()
|
||||
|
||||
async def to_pandas(self) -> "pd.DataFrame":
|
||||
async def to_pandas(
|
||||
self, flatten: Optional[Union[int, bool]] = None
|
||||
) -> "pd.DataFrame":
|
||||
"""
|
||||
Execute the query and collect the results into a pandas DataFrame.
|
||||
|
||||
@@ -1615,8 +1601,42 @@ class AsyncQueryBase(object):
|
||||
... async for batch in await table.query().to_batches():
|
||||
... batch_df = batch.to_pandas()
|
||||
>>> asyncio.run(doctest_example())
|
||||
|
||||
Parameters
|
||||
----------
|
||||
flatten: Optional[Union[int, bool]]
|
||||
If flatten is True, flatten all nested columns.
|
||||
If flatten is an integer, flatten the nested columns up to the
|
||||
specified depth.
|
||||
If unspecified, do not flatten the nested columns.
|
||||
"""
|
||||
return (await self.to_arrow()).to_pandas()
|
||||
return (flatten_columns(await self.to_arrow(), flatten)).to_pandas()
|
||||
|
||||
async def to_polars(self) -> "pl.DataFrame":
|
||||
"""
|
||||
Execute the query and collect the results into a Polars DataFrame.
|
||||
|
||||
This method will collect all results into memory before returning. If you
|
||||
expect a large number of results, you may want to use
|
||||
[to_batches][lancedb.query.AsyncQueryBase.to_batches] and convert each batch to
|
||||
polars separately.
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
>>> import asyncio
|
||||
>>> import polars as pl
|
||||
>>> from lancedb import connect_async
|
||||
>>> async def doctest_example():
|
||||
... conn = await connect_async("./.lancedb")
|
||||
... table = await conn.create_table("my_table", data=[{"a": 1, "b": 2}])
|
||||
... async for batch in await table.query().to_batches():
|
||||
... batch_df = pl.from_arrow(batch)
|
||||
>>> asyncio.run(doctest_example())
|
||||
"""
|
||||
import polars as pl
|
||||
|
||||
return pl.from_arrow(await self.to_arrow())
|
||||
|
||||
async def explain_plan(self, verbose: Optional[bool] = False):
|
||||
"""Return the execution plan for this query.
|
||||
@@ -1783,10 +1803,22 @@ class AsyncFTSQuery(AsyncQueryBase):
|
||||
def __init__(self, inner: LanceFTSQuery):
|
||||
super().__init__(inner)
|
||||
self._inner = inner
|
||||
self._reranker = None
|
||||
|
||||
def get_query(self):
|
||||
self._inner.get_query()
|
||||
|
||||
def rerank(
|
||||
self,
|
||||
reranker: Reranker = RRFReranker(),
|
||||
) -> AsyncFTSQuery:
|
||||
if reranker and not isinstance(reranker, Reranker):
|
||||
raise ValueError("reranker must be an instance of Reranker class.")
|
||||
|
||||
self._reranker = reranker
|
||||
|
||||
return self
|
||||
|
||||
def nearest_to(
|
||||
self,
|
||||
query_vector: Union[VEC, Tuple, List[VEC]],
|
||||
@@ -1857,6 +1889,12 @@ class AsyncFTSQuery(AsyncQueryBase):
|
||||
self._inner.nearest_to(AsyncQuery._query_vec_to_array(query_vector))
|
||||
)
|
||||
|
||||
async def to_arrow(self) -> pa.Table:
|
||||
results = await super().to_arrow()
|
||||
if self._reranker:
|
||||
results = self._reranker.rerank_fts(results)
|
||||
return results
|
||||
|
||||
|
||||
class AsyncVectorQuery(AsyncQueryBase):
|
||||
def __init__(self, inner: LanceVectorQuery):
|
||||
@@ -1871,6 +1909,7 @@ class AsyncVectorQuery(AsyncQueryBase):
|
||||
"""
|
||||
super().__init__(inner)
|
||||
self._inner = inner
|
||||
self._reranker = None
|
||||
|
||||
def column(self, column: str) -> AsyncVectorQuery:
|
||||
"""
|
||||
@@ -1921,7 +1960,7 @@ class AsyncVectorQuery(AsyncQueryBase):
|
||||
|
||||
Parameters
|
||||
----------
|
||||
lower: Optional[float]
|
||||
lower_bound: Optional[float]
|
||||
The lower bound of the distance range.
|
||||
upper_bound: Optional[float]
|
||||
The upper bound of the distance range.
|
||||
@@ -2016,6 +2055,16 @@ class AsyncVectorQuery(AsyncQueryBase):
|
||||
self._inner.bypass_vector_index()
|
||||
return self
|
||||
|
||||
def rerank(
|
||||
self, reranker: Reranker = RRFReranker(), query_string: Optional[str] = None
|
||||
) -> AsyncHybridQuery:
|
||||
if reranker and not isinstance(reranker, Reranker):
|
||||
raise ValueError("reranker must be an instance of Reranker class.")
|
||||
|
||||
self._reranker = reranker
|
||||
|
||||
return self
|
||||
|
||||
def nearest_to_text(
|
||||
self, query: str, columns: Union[str, List[str]] = []
|
||||
) -> AsyncHybridQuery:
|
||||
@@ -2049,6 +2098,12 @@ class AsyncVectorQuery(AsyncQueryBase):
|
||||
self._inner.nearest_to_text({"query": query, "columns": columns})
|
||||
)
|
||||
|
||||
async def to_arrow(self) -> pa.Table:
|
||||
results = await super().to_arrow()
|
||||
if self._reranker:
|
||||
results = self._reranker.rerank_vector(results)
|
||||
return results
|
||||
|
||||
|
||||
class AsyncHybridQuery(AsyncQueryBase):
|
||||
"""
|
||||
|
||||
@@ -25,7 +25,6 @@ from urllib.parse import urlparse
|
||||
import lance
|
||||
from lancedb.background_loop import LOOP
|
||||
from .dependencies import _check_for_pandas
|
||||
import numpy as np
|
||||
import pyarrow as pa
|
||||
import pyarrow.compute as pc
|
||||
import pyarrow.fs as pa_fs
|
||||
@@ -74,34 +73,17 @@ pl = safe_import_polars()
|
||||
QueryType = Literal["vector", "fts", "hybrid", "auto"]
|
||||
|
||||
|
||||
def _pd_schema_without_embedding_funcs(
|
||||
schema: Optional[pa.Schema], columns: List[str]
|
||||
) -> Optional[pa.Schema]:
|
||||
"""Return a schema without any embedding function columns"""
|
||||
if schema is None:
|
||||
return None
|
||||
embedding_functions = EmbeddingFunctionRegistry.get_instance().parse_functions(
|
||||
schema.metadata
|
||||
)
|
||||
if not embedding_functions:
|
||||
return schema
|
||||
return pa.schema([field for field in schema if field.name in columns])
|
||||
|
||||
|
||||
def _coerce_to_table(data, schema: Optional[pa.Schema] = None) -> pa.Table:
|
||||
def _into_pyarrow_table(data) -> pa.Table:
|
||||
if _check_for_hugging_face(data):
|
||||
# Huggingface datasets
|
||||
from lance.dependencies import datasets
|
||||
|
||||
if isinstance(data, datasets.Dataset):
|
||||
if schema is None:
|
||||
schema = data.features.arrow_schema
|
||||
schema = data.features.arrow_schema
|
||||
return pa.Table.from_batches(data.data.to_batches(), schema=schema)
|
||||
elif isinstance(data, datasets.dataset_dict.DatasetDict):
|
||||
if schema is None:
|
||||
schema = _schema_from_hf(data, schema)
|
||||
schema = _schema_from_hf(data, schema)
|
||||
return pa.Table.from_batches(_to_batches_with_split(data), schema=schema)
|
||||
|
||||
if isinstance(data, LanceModel):
|
||||
raise ValueError("Cannot add a single LanceModel to a table. Use a list.")
|
||||
|
||||
@@ -111,17 +93,15 @@ def _coerce_to_table(data, schema: Optional[pa.Schema] = None) -> pa.Table:
|
||||
if isinstance(data, list):
|
||||
# convert to list of dict if data is a bunch of LanceModels
|
||||
if isinstance(data[0], LanceModel):
|
||||
if schema is None:
|
||||
schema = data[0].__class__.to_arrow_schema()
|
||||
schema = data[0].__class__.to_arrow_schema()
|
||||
data = [model_to_dict(d) for d in data]
|
||||
return pa.Table.from_pylist(data, schema=schema)
|
||||
elif isinstance(data[0], pa.RecordBatch):
|
||||
return pa.Table.from_batches(data, schema=schema)
|
||||
return pa.Table.from_batches(data)
|
||||
else:
|
||||
return pa.Table.from_pylist(data, schema=schema)
|
||||
elif _check_for_pandas(data) and isinstance(data, pd.DataFrame): # type: ignore
|
||||
raw_schema = _pd_schema_without_embedding_funcs(schema, data.columns.to_list())
|
||||
table = pa.Table.from_pandas(data, preserve_index=False, schema=raw_schema)
|
||||
return pa.Table.from_pylist(data)
|
||||
elif _check_for_pandas(data) and isinstance(data, pd.DataFrame):
|
||||
table = pa.Table.from_pandas(data, preserve_index=False)
|
||||
# Do not serialize Pandas metadata
|
||||
meta = table.schema.metadata if table.schema.metadata is not None else {}
|
||||
meta = {k: v for k, v in meta.items() if k != b"pandas"}
|
||||
@@ -143,8 +123,13 @@ def _coerce_to_table(data, schema: Optional[pa.Schema] = None) -> pa.Table:
|
||||
and data.__class__.__name__ == "DataFrame"
|
||||
):
|
||||
return data.to_arrow()
|
||||
elif (
|
||||
type(data).__module__.startswith("polars")
|
||||
and data.__class__.__name__ == "LazyFrame"
|
||||
):
|
||||
return data.collect().to_arrow()
|
||||
elif isinstance(data, Iterable):
|
||||
return _process_iterator(data, schema)
|
||||
return _iterator_to_table(data)
|
||||
else:
|
||||
raise TypeError(
|
||||
f"Unknown data type {type(data)}. "
|
||||
@@ -154,27 +139,172 @@ def _coerce_to_table(data, schema: Optional[pa.Schema] = None) -> pa.Table:
|
||||
)
|
||||
|
||||
|
||||
def _iterator_to_table(data: Iterable) -> pa.Table:
|
||||
batches = []
|
||||
schema = None # Will get schema from first batch
|
||||
for batch in data:
|
||||
batch_table = _into_pyarrow_table(batch)
|
||||
if schema is not None:
|
||||
if batch_table.schema != schema:
|
||||
try:
|
||||
batch_table = batch_table.cast(schema)
|
||||
except pa.lib.ArrowInvalid:
|
||||
raise ValueError(
|
||||
f"Input iterator yielded a batch with schema that "
|
||||
f"does not match the schema of other batches.\n"
|
||||
f"Expected:\n{schema}\nGot:\n{batch_table.schema}"
|
||||
)
|
||||
else:
|
||||
# Use the first schema for the remainder of the batches
|
||||
schema = batch_table.schema
|
||||
batches.append(batch_table)
|
||||
|
||||
if batches:
|
||||
return pa.concat_tables(batches)
|
||||
else:
|
||||
raise ValueError("Input iterable is empty")
|
||||
|
||||
|
||||
def _sanitize_data(
|
||||
data: Any,
|
||||
schema: Optional[pa.Schema] = None,
|
||||
data: "DATA",
|
||||
target_schema: Optional[pa.Schema] = None,
|
||||
metadata: Optional[dict] = None, # embedding metadata
|
||||
on_bad_vectors: str = "error",
|
||||
on_bad_vectors: Literal["error", "drop", "fill", "null"] = "error",
|
||||
fill_value: float = 0.0,
|
||||
) -> Tuple[pa.Table, pa.Schema]:
|
||||
data = _coerce_to_table(data, schema)
|
||||
*,
|
||||
allow_subschema: bool = False,
|
||||
) -> pa.Table:
|
||||
"""
|
||||
Handle input data, applying all standard transformations.
|
||||
|
||||
This includes:
|
||||
|
||||
* Converting the data to a PyArrow Table
|
||||
* Adding vector columns defined in the metadata
|
||||
* Adding embedding metadata into the schema
|
||||
* Casting the table to the target schema
|
||||
* Handling bad vectors
|
||||
|
||||
Parameters
|
||||
----------
|
||||
target_schema : Optional[pa.Schema], default None
|
||||
The schema to cast the table to. This is typically the schema of the table
|
||||
if it already exists. Otherwise it might be a user-requested schema.
|
||||
allow_subschema : bool, default False
|
||||
If True, the input table is allowed to omit columns from the target schema.
|
||||
The target schema will be filtered to only include columns that are present
|
||||
in the input table before casting.
|
||||
metadata : Optional[dict], default None
|
||||
The embedding metadata to add to the schema.
|
||||
on_bad_vectors : Literal["error", "drop", "fill", "null"], default "error"
|
||||
What to do if any of the vectors are not the same size or contains NaNs.
|
||||
fill_value : float, default 0.0
|
||||
The value to use when filling vectors. Only used if on_bad_vectors="fill".
|
||||
All entries in the vector will be set to this value.
|
||||
"""
|
||||
# At this point, the table might not match the schema we are targeting:
|
||||
# 1. There might be embedding columns missing that will be added
|
||||
# in the add_embeddings step.
|
||||
# 2. If `allow_subschemas` is True, there might be columns missing.
|
||||
table = _into_pyarrow_table(data)
|
||||
|
||||
table = _append_vector_columns(table, target_schema, metadata=metadata)
|
||||
|
||||
# This happens before the cast so we can fix vector columns with
|
||||
# incorrect lengths before they are cast to FSL.
|
||||
table = _handle_bad_vectors(
|
||||
table,
|
||||
on_bad_vectors=on_bad_vectors,
|
||||
fill_value=fill_value,
|
||||
)
|
||||
|
||||
if target_schema is None:
|
||||
target_schema = _infer_target_schema(table)
|
||||
|
||||
if metadata:
|
||||
data = _append_vector_col(data, metadata, schema)
|
||||
metadata.update(data.schema.metadata or {})
|
||||
data = data.replace_schema_metadata(metadata)
|
||||
new_metadata = target_schema.metadata or {}
|
||||
new_metadata = new_metadata.update(metadata)
|
||||
target_schema = target_schema.with_metadata(new_metadata)
|
||||
|
||||
# TODO improve the logics in _sanitize_schema
|
||||
data = _sanitize_schema(data, schema, on_bad_vectors, fill_value)
|
||||
if schema is None:
|
||||
schema = data.schema
|
||||
_validate_schema(target_schema)
|
||||
|
||||
_validate_schema(schema)
|
||||
return data, schema
|
||||
table = _cast_to_target_schema(table, target_schema, allow_subschema)
|
||||
|
||||
return table
|
||||
|
||||
|
||||
def _cast_to_target_schema(
|
||||
table: pa.Table,
|
||||
target_schema: pa.Schema,
|
||||
allow_subschema: bool = False,
|
||||
) -> pa.Table:
|
||||
# pa.Table.cast expects field order not to be changed.
|
||||
# Lance doesn't care about field order, so we don't need to rearrange fields
|
||||
# to match the target schema. We just need to correctly cast the fields.
|
||||
if table.schema == target_schema:
|
||||
# Fast path when the schemas are already the same
|
||||
return table
|
||||
|
||||
fields = []
|
||||
for field in table.schema:
|
||||
target_field = target_schema.field(field.name)
|
||||
if target_field is None:
|
||||
raise ValueError(f"Field {field.name} not found in target schema")
|
||||
fields.append(target_field)
|
||||
reordered_schema = pa.schema(fields, metadata=target_schema.metadata)
|
||||
if not allow_subschema and len(reordered_schema) != len(target_schema):
|
||||
raise ValueError(
|
||||
"Input table has different number of columns than target schema"
|
||||
)
|
||||
|
||||
if allow_subschema and len(reordered_schema) != len(target_schema):
|
||||
fields = _infer_subschema(
|
||||
list(iter(table.schema)), list(iter(reordered_schema))
|
||||
)
|
||||
subschema = pa.schema(fields, metadata=target_schema.metadata)
|
||||
return table.cast(subschema)
|
||||
else:
|
||||
return table.cast(reordered_schema)
|
||||
|
||||
|
||||
def _infer_subschema(
|
||||
schema: List[pa.Field],
|
||||
reference_fields: List[pa.Field],
|
||||
) -> List[pa.Field]:
|
||||
"""
|
||||
Transform the list of fields so the types match the reference_fields.
|
||||
|
||||
The order of the fields is preserved.
|
||||
|
||||
``schema`` may have fewer fields than `reference_fields`, but it may not have
|
||||
more fields.
|
||||
|
||||
"""
|
||||
fields = []
|
||||
lookup = {f.name: f for f in reference_fields}
|
||||
for field in schema:
|
||||
reference = lookup.get(field.name)
|
||||
if reference is None:
|
||||
raise ValueError("Unexpected field in schema: {}".format(field))
|
||||
|
||||
if pa.types.is_struct(reference.type):
|
||||
new_type = pa.struct(
|
||||
_infer_subschema(
|
||||
field.type.fields,
|
||||
reference.type.fields,
|
||||
)
|
||||
)
|
||||
new_field = pa.field(
|
||||
field.name,
|
||||
new_type,
|
||||
reference.nullable,
|
||||
)
|
||||
else:
|
||||
new_field = reference
|
||||
|
||||
fields.append(new_field)
|
||||
|
||||
return fields
|
||||
|
||||
|
||||
def sanitize_create_table(
|
||||
@@ -193,13 +323,14 @@ def sanitize_create_table(
|
||||
if data is not None:
|
||||
if metadata is None and schema is not None:
|
||||
metadata = schema.metadata
|
||||
data, schema = _sanitize_data(
|
||||
data = _sanitize_data(
|
||||
data,
|
||||
schema,
|
||||
metadata=metadata,
|
||||
on_bad_vectors=on_bad_vectors,
|
||||
fill_value=fill_value,
|
||||
)
|
||||
schema = data.schema
|
||||
else:
|
||||
if schema is not None:
|
||||
data = pa.Table.from_pylist([], schema)
|
||||
@@ -211,6 +342,8 @@ def sanitize_create_table(
|
||||
|
||||
if metadata:
|
||||
schema = schema.with_metadata(metadata)
|
||||
# Need to apply metadata to the data as well
|
||||
data = data.replace_schema_metadata(metadata)
|
||||
|
||||
return data, schema
|
||||
|
||||
@@ -246,12 +379,22 @@ def _to_batches_with_split(data):
|
||||
yield b
|
||||
|
||||
|
||||
def _append_vector_col(data: pa.Table, metadata: dict, schema: Optional[pa.Schema]):
|
||||
def _append_vector_columns(
|
||||
data: pa.Table,
|
||||
schema: Optional[pa.Schema] = None,
|
||||
*,
|
||||
metadata: Optional[dict] = None,
|
||||
) -> pa.Table:
|
||||
"""
|
||||
Use the embedding function to automatically embed the source column and add the
|
||||
vector column to the table.
|
||||
Use the embedding function to automatically embed the source columns and add the
|
||||
vector columns to the table.
|
||||
"""
|
||||
if schema is None:
|
||||
metadata = metadata or {}
|
||||
else:
|
||||
metadata = schema.metadata or metadata or {}
|
||||
functions = EmbeddingFunctionRegistry.get_instance().parse_functions(metadata)
|
||||
|
||||
for vector_column, conf in functions.items():
|
||||
func = conf.function
|
||||
no_vector_column = vector_column not in data.column_names
|
||||
@@ -790,9 +933,9 @@ class Table(ABC):
|
||||
--------
|
||||
>>> import lancedb
|
||||
>>> data = [
|
||||
... {"x": 1, "vector": [1, 2]},
|
||||
... {"x": 2, "vector": [3, 4]},
|
||||
... {"x": 3, "vector": [5, 6]}
|
||||
... {"x": 1, "vector": [1.0, 2]},
|
||||
... {"x": 2, "vector": [3.0, 4]},
|
||||
... {"x": 3, "vector": [5.0, 6]}
|
||||
... ]
|
||||
>>> db = lancedb.connect("./.lancedb")
|
||||
>>> table = db.create_table("my_table", data)
|
||||
@@ -854,7 +997,7 @@ class Table(ABC):
|
||||
--------
|
||||
>>> import lancedb
|
||||
>>> import pandas as pd
|
||||
>>> data = pd.DataFrame({"x": [1, 2, 3], "vector": [[1, 2], [3, 4], [5, 6]]})
|
||||
>>> data = pd.DataFrame({"x": [1, 2, 3], "vector": [[1.0, 2], [3, 4], [5, 6]]})
|
||||
>>> db = lancedb.connect("./.lancedb")
|
||||
>>> table = db.create_table("my_table", data)
|
||||
>>> table.to_pandas()
|
||||
@@ -862,7 +1005,7 @@ class Table(ABC):
|
||||
0 1 [1.0, 2.0]
|
||||
1 2 [3.0, 4.0]
|
||||
2 3 [5.0, 6.0]
|
||||
>>> table.update(where="x = 2", values={"vector": [10, 10]})
|
||||
>>> table.update(where="x = 2", values={"vector": [10.0, 10]})
|
||||
>>> table.to_pandas()
|
||||
x vector
|
||||
0 1 [1.0, 2.0]
|
||||
@@ -1880,9 +2023,9 @@ class LanceTable(Table):
|
||||
--------
|
||||
>>> import lancedb
|
||||
>>> data = [
|
||||
... {"x": 1, "vector": [1, 2]},
|
||||
... {"x": 2, "vector": [3, 4]},
|
||||
... {"x": 3, "vector": [5, 6]}
|
||||
... {"x": 1, "vector": [1.0, 2]},
|
||||
... {"x": 2, "vector": [3.0, 4]},
|
||||
... {"x": 3, "vector": [5.0, 6]}
|
||||
... ]
|
||||
>>> db = lancedb.connect("./.lancedb")
|
||||
>>> table = db.create_table("my_table", data)
|
||||
@@ -1971,7 +2114,7 @@ class LanceTable(Table):
|
||||
--------
|
||||
>>> import lancedb
|
||||
>>> import pandas as pd
|
||||
>>> data = pd.DataFrame({"x": [1, 2, 3], "vector": [[1, 2], [3, 4], [5, 6]]})
|
||||
>>> data = pd.DataFrame({"x": [1, 2, 3], "vector": [[1.0, 2], [3, 4], [5, 6]]})
|
||||
>>> db = lancedb.connect("./.lancedb")
|
||||
>>> table = db.create_table("my_table", data)
|
||||
>>> table.to_pandas()
|
||||
@@ -1979,7 +2122,7 @@ class LanceTable(Table):
|
||||
0 1 [1.0, 2.0]
|
||||
1 2 [3.0, 4.0]
|
||||
2 3 [5.0, 6.0]
|
||||
>>> table.update(where="x = 2", values={"vector": [10, 10]})
|
||||
>>> table.update(where="x = 2", values={"vector": [10.0, 10]})
|
||||
>>> table.to_pandas()
|
||||
x vector
|
||||
0 1 [1.0, 2.0]
|
||||
@@ -2165,74 +2308,49 @@ class LanceTable(Table):
|
||||
LOOP.run(self._table.migrate_v2_manifest_paths())
|
||||
|
||||
|
||||
def _sanitize_schema(
|
||||
data: pa.Table,
|
||||
schema: pa.Schema = None,
|
||||
on_bad_vectors: str = "error",
|
||||
def _handle_bad_vectors(
|
||||
table: pa.Table,
|
||||
on_bad_vectors: Literal["error", "drop", "fill", "null"] = "error",
|
||||
fill_value: float = 0.0,
|
||||
) -> pa.Table:
|
||||
"""Ensure that the table has the expected schema.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
data: pa.Table
|
||||
The table to sanitize.
|
||||
schema: pa.Schema; optional
|
||||
The expected schema. If not provided, this just converts the
|
||||
vector column to fixed_size_list(float32) if necessary.
|
||||
on_bad_vectors: str, default "error"
|
||||
What to do if any of the vectors are not the same size or contains NaNs.
|
||||
One of "error", "drop", "fill", "null".
|
||||
fill_value: float, default 0.
|
||||
The value to use when filling vectors. Only used if on_bad_vectors="fill".
|
||||
"""
|
||||
if schema is not None:
|
||||
# cast the columns to the expected types
|
||||
data = data.combine_chunks()
|
||||
for field in schema:
|
||||
# TODO: we're making an assumption that fixed size list of 10 or more
|
||||
# is a vector column. This is definitely a bit hacky.
|
||||
likely_vector_col = (
|
||||
pa.types.is_fixed_size_list(field.type)
|
||||
and pa.types.is_float32(field.type.value_type)
|
||||
and field.type.list_size >= 10
|
||||
for field in table.schema:
|
||||
# They can provide a 'vector' column that isn't yet a FSL
|
||||
named_vector_col = (
|
||||
(
|
||||
pa.types.is_list(field.type)
|
||||
or pa.types.is_large_list(field.type)
|
||||
or pa.types.is_fixed_size_list(field.type)
|
||||
)
|
||||
is_default_vector_col = field.name == VECTOR_COLUMN_NAME
|
||||
if field.name in data.column_names and (
|
||||
likely_vector_col or is_default_vector_col
|
||||
):
|
||||
data = _sanitize_vector_column(
|
||||
data,
|
||||
vector_column_name=field.name,
|
||||
on_bad_vectors=on_bad_vectors,
|
||||
fill_value=fill_value,
|
||||
table_schema=schema,
|
||||
)
|
||||
return pa.Table.from_arrays(
|
||||
[data[name] for name in schema.names], schema=schema
|
||||
and pa.types.is_floating(field.type.value_type)
|
||||
and field.name == VECTOR_COLUMN_NAME
|
||||
)
|
||||
# TODO: we're making an assumption that fixed size list of 10 or more
|
||||
# is a vector column. This is definitely a bit hacky.
|
||||
likely_vector_col = (
|
||||
pa.types.is_fixed_size_list(field.type)
|
||||
and pa.types.is_floating(field.type.value_type)
|
||||
and (field.type.list_size >= 10)
|
||||
)
|
||||
|
||||
# just check the vector column
|
||||
if VECTOR_COLUMN_NAME in data.column_names:
|
||||
return _sanitize_vector_column(
|
||||
data,
|
||||
vector_column_name=VECTOR_COLUMN_NAME,
|
||||
on_bad_vectors=on_bad_vectors,
|
||||
fill_value=fill_value,
|
||||
)
|
||||
if named_vector_col or likely_vector_col:
|
||||
table = _handle_bad_vector_column(
|
||||
table,
|
||||
vector_column_name=field.name,
|
||||
on_bad_vectors=on_bad_vectors,
|
||||
fill_value=fill_value,
|
||||
)
|
||||
|
||||
return data
|
||||
return table
|
||||
|
||||
|
||||
def _sanitize_vector_column(
|
||||
def _handle_bad_vector_column(
|
||||
data: pa.Table,
|
||||
vector_column_name: str,
|
||||
table_schema: Optional[pa.Schema] = None,
|
||||
on_bad_vectors: str = "error",
|
||||
fill_value: float = 0.0,
|
||||
) -> pa.Table:
|
||||
"""
|
||||
Ensure that the vector column exists and has type fixed_size_list(float32)
|
||||
Ensure that the vector column exists and has type fixed_size_list(float)
|
||||
|
||||
Parameters
|
||||
----------
|
||||
@@ -2246,141 +2364,118 @@ def _sanitize_vector_column(
|
||||
fill_value: float, default 0.0
|
||||
The value to use when filling vectors. Only used if on_bad_vectors="fill".
|
||||
"""
|
||||
# ChunkedArray is annoying to work with, so we combine chunks here
|
||||
vec_arr = data[vector_column_name].combine_chunks()
|
||||
if table_schema is not None:
|
||||
field = table_schema.field(vector_column_name)
|
||||
else:
|
||||
field = None
|
||||
typ = data[vector_column_name].type
|
||||
if pa.types.is_list(typ) or pa.types.is_large_list(typ):
|
||||
# if it's a variable size list array,
|
||||
# we make sure the dimensions are all the same
|
||||
has_jagged_ndims = len(vec_arr.values) % len(data) != 0
|
||||
if has_jagged_ndims:
|
||||
data = _sanitize_jagged(
|
||||
data, fill_value, on_bad_vectors, vec_arr, vector_column_name
|
||||
)
|
||||
vec_arr = data[vector_column_name].combine_chunks()
|
||||
vec_arr = ensure_fixed_size_list(vec_arr)
|
||||
data = data.set_column(
|
||||
data.column_names.index(vector_column_name), vector_column_name, vec_arr
|
||||
)
|
||||
elif not pa.types.is_fixed_size_list(vec_arr.type):
|
||||
raise TypeError(f"Unsupported vector column type: {vec_arr.type}")
|
||||
vec_arr = data[vector_column_name]
|
||||
|
||||
if pa.types.is_float16(vec_arr.values.type):
|
||||
# Use numpy to check for NaNs, because as pyarrow does not have `is_nan`
|
||||
# kernel over f16 types yet.
|
||||
values_np = vec_arr.values.to_numpy(zero_copy_only=True)
|
||||
if np.isnan(values_np).any():
|
||||
data = _sanitize_nans(
|
||||
data, fill_value, on_bad_vectors, vec_arr, vector_column_name
|
||||
)
|
||||
else:
|
||||
if (
|
||||
field is not None
|
||||
and not field.nullable
|
||||
and pc.any(pc.is_null(vec_arr.values)).as_py()
|
||||
) or (pc.any(pc.is_nan(vec_arr.values)).as_py()):
|
||||
data = _sanitize_nans(
|
||||
data, fill_value, on_bad_vectors, vec_arr, vector_column_name
|
||||
)
|
||||
return data
|
||||
has_nan = has_nan_values(vec_arr)
|
||||
|
||||
|
||||
def ensure_fixed_size_list(vec_arr) -> pa.FixedSizeListArray:
|
||||
values = vec_arr.values
|
||||
if not (pa.types.is_float16(values.type) or pa.types.is_float32(values.type)):
|
||||
values = values.cast(pa.float32())
|
||||
if pa.types.is_fixed_size_list(vec_arr.type):
|
||||
list_size = vec_arr.type.list_size
|
||||
dim = vec_arr.type.list_size
|
||||
else:
|
||||
list_size = len(values) / len(vec_arr)
|
||||
vec_arr = pa.FixedSizeListArray.from_arrays(values, list_size)
|
||||
return vec_arr
|
||||
dim = _modal_list_size(vec_arr)
|
||||
has_wrong_dim = pc.not_equal(pc.list_value_length(vec_arr), dim)
|
||||
|
||||
has_bad_vectors = pc.any(has_nan).as_py() or pc.any(has_wrong_dim).as_py()
|
||||
|
||||
def _sanitize_jagged(data, fill_value, on_bad_vectors, vec_arr, vector_column_name):
|
||||
"""Sanitize jagged vectors."""
|
||||
if on_bad_vectors == "error":
|
||||
raise ValueError(
|
||||
f"Vector column {vector_column_name} has variable length vectors "
|
||||
"Set on_bad_vectors='drop' to remove them, or "
|
||||
"set on_bad_vectors='fill' and fill_value=<value> to replace them."
|
||||
)
|
||||
|
||||
lst_lengths = pc.list_value_length(vec_arr)
|
||||
ndims = pc.max(lst_lengths).as_py()
|
||||
correct_ndims = pc.equal(lst_lengths, ndims)
|
||||
|
||||
if on_bad_vectors == "fill":
|
||||
if fill_value is None:
|
||||
raise ValueError(
|
||||
"`fill_value` must not be None if `on_bad_vectors` is 'fill'"
|
||||
if has_bad_vectors:
|
||||
is_bad = pc.or_(has_nan, has_wrong_dim)
|
||||
if on_bad_vectors == "error":
|
||||
if pc.any(has_wrong_dim).as_py():
|
||||
raise ValueError(
|
||||
f"Vector column '{vector_column_name}' has variable length "
|
||||
"vectors. Set on_bad_vectors='drop' to remove them, "
|
||||
"set on_bad_vectors='fill' and fill_value=<value> to replace them, "
|
||||
"or set on_bad_vectors='null' to replace them with null."
|
||||
)
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Vector column '{vector_column_name}' has NaNs. "
|
||||
"Set on_bad_vectors='drop' to remove them, "
|
||||
"set on_bad_vectors='fill' and fill_value=<value> to replace them, "
|
||||
"or set on_bad_vectors='null' to replace them with null."
|
||||
)
|
||||
elif on_bad_vectors == "null":
|
||||
vec_arr = pc.if_else(
|
||||
is_bad,
|
||||
pa.scalar(None),
|
||||
vec_arr,
|
||||
)
|
||||
fill_arr = pa.scalar([float(fill_value)] * ndims)
|
||||
vec_arr = pc.if_else(correct_ndims, vec_arr, fill_arr)
|
||||
data = data.set_column(
|
||||
data.column_names.index(vector_column_name), vector_column_name, vec_arr
|
||||
)
|
||||
elif on_bad_vectors == "drop":
|
||||
data = data.filter(correct_ndims)
|
||||
elif on_bad_vectors == "null":
|
||||
data = data.set_column(
|
||||
data.column_names.index(vector_column_name),
|
||||
vector_column_name,
|
||||
pc.if_else(correct_ndims, vec_arr, pa.scalar(None)),
|
||||
)
|
||||
return data
|
||||
|
||||
|
||||
def _sanitize_nans(
|
||||
data,
|
||||
fill_value,
|
||||
on_bad_vectors,
|
||||
vec_arr: pa.FixedSizeListArray,
|
||||
vector_column_name: str,
|
||||
):
|
||||
"""Sanitize NaNs in vectors"""
|
||||
assert pa.types.is_fixed_size_list(vec_arr.type)
|
||||
if on_bad_vectors == "error":
|
||||
raise ValueError(
|
||||
f"Vector column {vector_column_name} has NaNs. "
|
||||
"Set on_bad_vectors='drop' to remove them, or "
|
||||
"set on_bad_vectors='fill' and fill_value=<value> to replace them. "
|
||||
"Or set on_bad_vectors='null' to replace them with null."
|
||||
)
|
||||
elif on_bad_vectors == "fill":
|
||||
if fill_value is None:
|
||||
raise ValueError(
|
||||
"`fill_value` must not be None if `on_bad_vectors` is 'fill'"
|
||||
elif on_bad_vectors == "drop":
|
||||
data = data.filter(pc.invert(is_bad))
|
||||
vec_arr = data[vector_column_name]
|
||||
elif on_bad_vectors == "fill":
|
||||
if fill_value is None:
|
||||
raise ValueError(
|
||||
"`fill_value` must not be None if `on_bad_vectors` is 'fill'"
|
||||
)
|
||||
vec_arr = pc.if_else(
|
||||
is_bad,
|
||||
pa.scalar([fill_value] * dim),
|
||||
vec_arr,
|
||||
)
|
||||
fill_value = float(fill_value)
|
||||
values = pc.if_else(pc.is_nan(vec_arr.values), fill_value, vec_arr.values)
|
||||
ndims = len(vec_arr[0])
|
||||
vec_arr = pa.FixedSizeListArray.from_arrays(values, ndims)
|
||||
data = data.set_column(
|
||||
data.column_names.index(vector_column_name), vector_column_name, vec_arr
|
||||
)
|
||||
elif on_bad_vectors == "drop":
|
||||
# Drop is very slow to be able to filter out NaNs in a fixed size list array
|
||||
np_arr = np.isnan(vec_arr.values.to_numpy(zero_copy_only=False))
|
||||
np_arr = np_arr.reshape(-1, vec_arr.type.list_size)
|
||||
not_nulls = np.any(np_arr, axis=1)
|
||||
data = data.filter(~not_nulls)
|
||||
elif on_bad_vectors == "null":
|
||||
# null = pa.nulls(len(vec_arr)).cast(vec_arr.type)
|
||||
# values = pc.if_else(pc.is_nan(vec_arr.values), fill_value, vec_arr.values)
|
||||
np_arr = np.isnan(vec_arr.values.to_numpy(zero_copy_only=False))
|
||||
np_arr = np_arr.reshape(-1, vec_arr.type.list_size)
|
||||
no_nans = np.any(np_arr, axis=1)
|
||||
data = data.set_column(
|
||||
data.column_names.index(vector_column_name),
|
||||
vector_column_name,
|
||||
pc.if_else(no_nans, vec_arr, pa.scalar(None)),
|
||||
)
|
||||
return data
|
||||
else:
|
||||
raise ValueError(f"Invalid value for on_bad_vectors: {on_bad_vectors}")
|
||||
|
||||
position = data.column_names.index(vector_column_name)
|
||||
return data.set_column(position, vector_column_name, vec_arr)
|
||||
|
||||
|
||||
def has_nan_values(arr: Union[pa.ListArray, pa.ChunkedArray]) -> pa.BooleanArray:
|
||||
if isinstance(arr, pa.ChunkedArray):
|
||||
values = pa.chunked_array([chunk.flatten() for chunk in arr.chunks])
|
||||
else:
|
||||
values = arr.flatten()
|
||||
if pa.types.is_float16(values.type):
|
||||
# is_nan isn't yet implemented for f16, so we cast to f32
|
||||
# https://github.com/apache/arrow/issues/45083
|
||||
values_has_nan = pc.is_nan(values.cast(pa.float32()))
|
||||
else:
|
||||
values_has_nan = pc.is_nan(values)
|
||||
values_indices = pc.list_parent_indices(arr)
|
||||
has_nan_indices = pc.unique(pc.filter(values_indices, values_has_nan))
|
||||
indices = pa.array(range(len(arr)), type=pa.uint32())
|
||||
return pc.is_in(indices, has_nan_indices)
|
||||
|
||||
|
||||
def _infer_target_schema(table: pa.Table) -> pa.Schema:
|
||||
schema = table.schema
|
||||
|
||||
for i, field in enumerate(schema):
|
||||
if (
|
||||
field.name == VECTOR_COLUMN_NAME
|
||||
and (pa.types.is_list(field.type) or pa.types.is_large_list(field.type))
|
||||
and pa.types.is_floating(field.type.value_type)
|
||||
):
|
||||
# Use the most common length of the list as the dimensions
|
||||
dim = _modal_list_size(table.column(i))
|
||||
|
||||
new_field = pa.field(
|
||||
VECTOR_COLUMN_NAME,
|
||||
pa.list_(pa.float32(), dim),
|
||||
nullable=field.nullable,
|
||||
)
|
||||
|
||||
schema = schema.set(i, new_field)
|
||||
elif (
|
||||
field.name == VECTOR_COLUMN_NAME
|
||||
and (pa.types.is_list(field.type) or pa.types.is_large_list(field.type))
|
||||
and pa.types.is_integer(field.type.value_type)
|
||||
):
|
||||
# Use the most common length of the list as the dimensions
|
||||
dim = _modal_list_size(table.column(i))
|
||||
new_field = pa.field(
|
||||
VECTOR_COLUMN_NAME,
|
||||
pa.list_(pa.uint8(), dim),
|
||||
nullable=field.nullable,
|
||||
)
|
||||
|
||||
schema = schema.set(i, new_field)
|
||||
|
||||
return schema
|
||||
|
||||
|
||||
def _modal_list_size(arr: Union[pa.ListArray, pa.ChunkedArray]) -> int:
|
||||
# Use the most common length of the list as the dimensions
|
||||
return pc.mode(pc.list_value_length(arr))[0].as_py()["mode"]
|
||||
|
||||
|
||||
def _validate_schema(schema: pa.Schema):
|
||||
@@ -2410,28 +2505,6 @@ def _validate_metadata(metadata: dict):
|
||||
_validate_metadata(v)
|
||||
|
||||
|
||||
def _process_iterator(data: Iterable, schema: Optional[pa.Schema] = None) -> pa.Table:
|
||||
batches = []
|
||||
for batch in data:
|
||||
batch_table = _coerce_to_table(batch, schema)
|
||||
if schema is not None:
|
||||
if batch_table.schema != schema:
|
||||
try:
|
||||
batch_table = batch_table.cast(schema)
|
||||
except pa.lib.ArrowInvalid: # type: ignore
|
||||
raise ValueError(
|
||||
f"Input iterator yielded a batch with schema that "
|
||||
f"does not match the expected schema.\nExpected:\n{schema}\n"
|
||||
f"Got:\n{batch_table.schema}"
|
||||
)
|
||||
batches.append(batch_table)
|
||||
|
||||
if batches:
|
||||
return pa.concat_tables(batches)
|
||||
else:
|
||||
raise ValueError("Input iterable is empty")
|
||||
|
||||
|
||||
class AsyncTable:
|
||||
"""
|
||||
An AsyncTable is a collection of Records in a LanceDB Database.
|
||||
@@ -2678,16 +2751,17 @@ class AsyncTable:
|
||||
on_bad_vectors = "error"
|
||||
if fill_value is None:
|
||||
fill_value = 0.0
|
||||
table_and_schema: Tuple[pa.Table, pa.Schema] = _sanitize_data(
|
||||
data = _sanitize_data(
|
||||
data,
|
||||
schema,
|
||||
metadata=schema.metadata,
|
||||
on_bad_vectors=on_bad_vectors,
|
||||
fill_value=fill_value,
|
||||
allow_subschema=True,
|
||||
)
|
||||
tbl, schema = table_and_schema
|
||||
if isinstance(tbl, pa.Table):
|
||||
data = pa.RecordBatchReader.from_batches(schema, tbl.to_batches())
|
||||
if isinstance(data, pa.Table):
|
||||
data = data.to_reader()
|
||||
|
||||
await self._inner.add(data, mode or "append")
|
||||
|
||||
def merge_insert(self, on: Union[str, Iterable[str]]) -> LanceMergeInsertBuilder:
|
||||
@@ -2822,12 +2896,13 @@ class AsyncTable:
|
||||
on_bad_vectors = "error"
|
||||
if fill_value is None:
|
||||
fill_value = 0.0
|
||||
data, _ = _sanitize_data(
|
||||
data = _sanitize_data(
|
||||
new_data,
|
||||
schema,
|
||||
metadata=schema.metadata,
|
||||
on_bad_vectors=on_bad_vectors,
|
||||
fill_value=fill_value,
|
||||
allow_subschema=True,
|
||||
)
|
||||
if isinstance(data, pa.Table):
|
||||
data = pa.RecordBatchReader.from_batches(data.schema, data.to_batches())
|
||||
@@ -2862,9 +2937,9 @@ class AsyncTable:
|
||||
--------
|
||||
>>> import lancedb
|
||||
>>> data = [
|
||||
... {"x": 1, "vector": [1, 2]},
|
||||
... {"x": 2, "vector": [3, 4]},
|
||||
... {"x": 3, "vector": [5, 6]}
|
||||
... {"x": 1, "vector": [1.0, 2]},
|
||||
... {"x": 2, "vector": [3.0, 4]},
|
||||
... {"x": 3, "vector": [5.0, 6]}
|
||||
... ]
|
||||
>>> db = lancedb.connect("./.lancedb")
|
||||
>>> table = db.create_table("my_table", data)
|
||||
|
||||
@@ -174,6 +174,38 @@ def safe_import_polars():
|
||||
return None
|
||||
|
||||
|
||||
def flatten_columns(tbl: pa.Table, flatten: Optional[Union[int, bool]] = None):
|
||||
"""
|
||||
Flatten all struct columns in a table.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
flatten: Optional[Union[int, bool]]
|
||||
If flatten is True, flatten all nested columns.
|
||||
If flatten is an integer, flatten the nested columns up to the
|
||||
specified depth.
|
||||
If unspecified, do not flatten the nested columns.
|
||||
"""
|
||||
if flatten is True:
|
||||
while True:
|
||||
tbl = tbl.flatten()
|
||||
# loop through all columns to check if there is any struct column
|
||||
if any(pa.types.is_struct(col.type) for col in tbl.schema):
|
||||
continue
|
||||
else:
|
||||
break
|
||||
elif isinstance(flatten, int):
|
||||
if flatten <= 0:
|
||||
raise ValueError(
|
||||
"Please specify a positive integer for flatten or the boolean "
|
||||
"value `True`"
|
||||
)
|
||||
while flatten > 0:
|
||||
tbl = tbl.flatten()
|
||||
flatten -= 1
|
||||
return tbl
|
||||
|
||||
|
||||
def inf_vector_column_query(schema: pa.Schema) -> str:
|
||||
"""
|
||||
Get the vector column name
|
||||
@@ -191,9 +223,7 @@ def inf_vector_column_query(schema: pa.Schema) -> str:
|
||||
vector_col_count = 0
|
||||
for field_name in schema.names:
|
||||
field = schema.field(field_name)
|
||||
if pa.types.is_fixed_size_list(field.type) and pa.types.is_floating(
|
||||
field.type.value_type
|
||||
):
|
||||
if pa.types.is_fixed_size_list(field.type):
|
||||
vector_col_count += 1
|
||||
if vector_col_count > 1:
|
||||
raise ValueError(
|
||||
|
||||
@@ -125,7 +125,7 @@ async def test_quickstart_async():
|
||||
|
||||
# --8<-- [start:create_table_async]
|
||||
# Asynchronous client
|
||||
async_tbl = await async_db.create_table("my_table2", data=data)
|
||||
async_tbl = await async_db.create_table("my_table_async", data=data)
|
||||
# --8<-- [end:create_table_async]
|
||||
|
||||
df = pd.DataFrame(
|
||||
@@ -137,17 +137,17 @@ async def test_quickstart_async():
|
||||
|
||||
# --8<-- [start:create_table_async_pandas]
|
||||
# Asynchronous client
|
||||
async_tbl = await async_db.create_table("table_from_df2", df)
|
||||
async_tbl = await async_db.create_table("table_from_df_async", df)
|
||||
# --8<-- [end:create_table_async_pandas]
|
||||
|
||||
schema = pa.schema([pa.field("vector", pa.list_(pa.float32(), list_size=2))])
|
||||
# --8<-- [start:create_empty_table_async]
|
||||
# Asynchronous client
|
||||
async_tbl = await async_db.create_table("empty_table2", schema=schema)
|
||||
async_tbl = await async_db.create_table("empty_table_async", schema=schema)
|
||||
# --8<-- [end:create_empty_table_async]
|
||||
# --8<-- [start:open_table_async]
|
||||
# Asynchronous client
|
||||
async_tbl = await async_db.open_table("my_table2")
|
||||
async_tbl = await async_db.open_table("my_table_async")
|
||||
# --8<-- [end:open_table_async]
|
||||
# --8<-- [start:table_names_async]
|
||||
# Asynchronous client
|
||||
@@ -161,6 +161,22 @@ async def test_quickstart_async():
|
||||
data = [{"vector": [x, x], "item": "filler", "price": x * x} for x in range(1000)]
|
||||
await async_tbl.add(data)
|
||||
# --8<-- [start:vector_search_async]
|
||||
# --8<-- [start:add_columns_async]
|
||||
await async_tbl.add_columns({"double_price": "cast((price * 2) as float)"})
|
||||
# --8<-- [end:add_columns_async]
|
||||
# --8<-- [start:alter_columns_async]
|
||||
await async_tbl.alter_columns(
|
||||
{
|
||||
"path": "double_price",
|
||||
"rename": "dbl_price",
|
||||
"data_type": pa.float64(),
|
||||
"nullable": True,
|
||||
}
|
||||
)
|
||||
# --8<-- [end:alter_columns_async]
|
||||
# --8<-- [start:drop_columns_async]
|
||||
await async_tbl.drop_columns(["dbl_price"])
|
||||
# --8<-- [end:drop_columns_async]
|
||||
# Asynchronous client
|
||||
await async_tbl.vector_search([100, 100]).limit(2).to_pandas()
|
||||
# --8<-- [end:vector_search_async]
|
||||
@@ -174,5 +190,5 @@ async def test_quickstart_async():
|
||||
# --8<-- [end:delete_rows_async]
|
||||
# --8<-- [start:drop_table_async]
|
||||
# Asynchronous client
|
||||
await async_db.drop_table("my_table2")
|
||||
await async_db.drop_table("my_table_async")
|
||||
# --8<-- [end:drop_table_async]
|
||||
|
||||
@@ -21,7 +21,7 @@ def test_binary_vector():
|
||||
]
|
||||
tbl = db.create_table("my_binary_vectors", data=data)
|
||||
query = np.random.randint(0, 256, size=16)
|
||||
tbl.search(query).to_arrow()
|
||||
tbl.search(query).metric("hamming").to_arrow()
|
||||
# --8<-- [end:sync_binary_vector]
|
||||
db.drop_table("my_binary_vectors")
|
||||
|
||||
@@ -39,6 +39,6 @@ async def test_binary_vector_async():
|
||||
]
|
||||
tbl = await db.create_table("my_binary_vectors", data=data)
|
||||
query = np.random.randint(0, 256, size=16)
|
||||
await tbl.query().nearest_to(query).to_arrow()
|
||||
await tbl.query().nearest_to(query).distance_type("hamming").to_arrow()
|
||||
# --8<-- [end:async_binary_vector]
|
||||
await db.drop_table("my_binary_vectors")
|
||||
|
||||
62
python/python/tests/docs/test_distance_range.py
Normal file
62
python/python/tests/docs/test_distance_range.py
Normal file
@@ -0,0 +1,62 @@
|
||||
import shutil
|
||||
import pytest
|
||||
|
||||
# --8<-- [start:imports]
|
||||
import lancedb
|
||||
import numpy as np
|
||||
# --8<-- [end:imports]
|
||||
|
||||
shutil.rmtree("data/distance_range_demo", ignore_errors=True)
|
||||
|
||||
|
||||
def test_binary_vector():
|
||||
# --8<-- [start:sync_distance_range]
|
||||
db = lancedb.connect("data/distance_range_demo")
|
||||
data = [
|
||||
{
|
||||
"id": i,
|
||||
"vector": np.random.random(256),
|
||||
}
|
||||
for i in range(1024)
|
||||
]
|
||||
tbl = db.create_table("my_table", data=data)
|
||||
query = np.random.random(256)
|
||||
|
||||
# Search for the vectors within the range of [0.1, 0.5)
|
||||
tbl.search(query).distance_range(0.1, 0.5).to_arrow()
|
||||
|
||||
# Search for the vectors with the distance less than 0.5
|
||||
tbl.search(query).distance_range(upper_bound=0.5).to_arrow()
|
||||
|
||||
# Search for the vectors with the distance greater or equal to 0.1
|
||||
tbl.search(query).distance_range(lower_bound=0.1).to_arrow()
|
||||
|
||||
# --8<-- [end:sync_distance_range]
|
||||
db.drop_table("my_table")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_binary_vector_async():
|
||||
# --8<-- [start:async_distance_range]
|
||||
db = await lancedb.connect_async("data/distance_range_demo")
|
||||
data = [
|
||||
{
|
||||
"id": i,
|
||||
"vector": np.random.random(256),
|
||||
}
|
||||
for i in range(1024)
|
||||
]
|
||||
tbl = await db.create_table("my_table", data=data)
|
||||
query = np.random.random(256)
|
||||
|
||||
# Search for the vectors within the range of [0.1, 0.5)
|
||||
await tbl.query().nearest_to(query).distance_range(0.1, 0.5).to_arrow()
|
||||
|
||||
# Search for the vectors with the distance less than 0.5
|
||||
await tbl.query().nearest_to(query).distance_range(upper_bound=0.5).to_arrow()
|
||||
|
||||
# Search for the vectors with the distance greater or equal to 0.1
|
||||
await tbl.query().nearest_to(query).distance_range(lower_bound=0.1).to_arrow()
|
||||
|
||||
# --8<-- [end:async_distance_range]
|
||||
await db.drop_table("my_table")
|
||||
169
python/python/tests/docs/test_guide_index.py
Normal file
169
python/python/tests/docs/test_guide_index.py
Normal file
@@ -0,0 +1,169 @@
|
||||
# --8<-- [start:import-lancedb]
|
||||
import lancedb
|
||||
|
||||
# --8<-- [end:import-lancedb]
|
||||
# --8<-- [start:import-lancedb-ivfpq]
|
||||
from lancedb.index import IvfPq
|
||||
|
||||
# --8<-- [end:import-lancedb-ivfpq]
|
||||
# --8<-- [start:import-lancedb-btree-bitmap]
|
||||
from lancedb.index import BTree, Bitmap
|
||||
|
||||
# --8<-- [end:import-lancedb-btree-bitmap]
|
||||
# --8<-- [start:import-numpy]
|
||||
import numpy as np
|
||||
|
||||
# --8<-- [end:import-numpy]
|
||||
import pytest
|
||||
|
||||
|
||||
def test_ann_index():
|
||||
# --8<-- [start:create_ann_index]
|
||||
uri = "data/sample-lancedb"
|
||||
|
||||
# Create 5,000 sample vectors
|
||||
data = [
|
||||
{"vector": row, "item": f"item {i}"}
|
||||
for i, row in enumerate(np.random.random((5_000, 32)).astype("float32"))
|
||||
]
|
||||
|
||||
db = lancedb.connect(uri)
|
||||
# Add the vectors to a table
|
||||
tbl = db.create_table("my_vectors", data=data)
|
||||
# Create and train the index - you need to have enough data in the table
|
||||
# for an effective training step
|
||||
tbl.create_index(num_partitions=2, num_sub_vectors=4)
|
||||
# --8<-- [end:create_ann_index]
|
||||
# --8<-- [start:vector_search]
|
||||
tbl.search(np.random.random((32))).limit(2).nprobes(20).refine_factor(
|
||||
10
|
||||
).to_pandas()
|
||||
# --8<-- [end:vector_search]
|
||||
# --8<-- [start:vector_search_with_filter]
|
||||
tbl.search(np.random.random((32))).where("item != 'item 1141'").to_pandas()
|
||||
# --8<-- [end:vector_search_with_filter]
|
||||
# --8<-- [start:vector_search_with_select]
|
||||
tbl.search(np.random.random((32))).select(["vector"]).to_pandas()
|
||||
# --8<-- [end:vector_search_with_select]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_ann_index_async():
|
||||
# --8<-- [start:create_ann_index_async]
|
||||
uri = "data/sample-lancedb"
|
||||
|
||||
# Create 5,000 sample vectors
|
||||
data = [
|
||||
{"vector": row, "item": f"item {i}"}
|
||||
for i, row in enumerate(np.random.random((5_000, 32)).astype("float32"))
|
||||
]
|
||||
|
||||
async_db = await lancedb.connect_async(uri)
|
||||
# Add the vectors to a table
|
||||
async_tbl = await async_db.create_table("my_vectors_async", data=data)
|
||||
# Create and train the index - you need to have enough data in the table
|
||||
# for an effective training step
|
||||
await async_tbl.create_index(
|
||||
"vector", config=IvfPq(num_partitions=2, num_sub_vectors=4)
|
||||
)
|
||||
# --8<-- [end:create_ann_index_async]
|
||||
# --8<-- [start:vector_search_async]
|
||||
await (
|
||||
async_tbl.query()
|
||||
.nearest_to(np.random.random((32)))
|
||||
.limit(2)
|
||||
.nprobes(20)
|
||||
.refine_factor(10)
|
||||
.to_pandas()
|
||||
)
|
||||
# --8<-- [end:vector_search_async]
|
||||
# --8<-- [start:vector_search_async_with_filter]
|
||||
await (
|
||||
async_tbl.query()
|
||||
.nearest_to(np.random.random((32)))
|
||||
.where("item != 'item 1141'")
|
||||
.to_pandas()
|
||||
)
|
||||
# --8<-- [end:vector_search_async_with_filter]
|
||||
# --8<-- [start:vector_search_async_with_select]
|
||||
await (
|
||||
async_tbl.query()
|
||||
.nearest_to(np.random.random((32)))
|
||||
.select(["vector"])
|
||||
.to_pandas()
|
||||
)
|
||||
# --8<-- [end:vector_search_async_with_select]
|
||||
|
||||
|
||||
def test_scalar_index():
|
||||
# --8<-- [start:basic_scalar_index]
|
||||
uri = "data/sample-lancedb"
|
||||
db = lancedb.connect(uri)
|
||||
books = [
|
||||
{
|
||||
"book_id": 1,
|
||||
"publisher": "plenty of books",
|
||||
"tags": ["fantasy", "adventure"],
|
||||
},
|
||||
{"book_id": 2, "publisher": "book town", "tags": ["non-fiction"]},
|
||||
{"book_id": 3, "publisher": "oreilly", "tags": ["textbook"]},
|
||||
]
|
||||
table = db.create_table("books", books)
|
||||
table.create_scalar_index("book_id") # BTree by default
|
||||
table.create_scalar_index("publisher", index_type="BITMAP")
|
||||
# --8<-- [end:basic_scalar_index]
|
||||
# --8<-- [start:search_with_scalar_index]
|
||||
table = db.open_table("books")
|
||||
table.search().where("book_id = 2").to_pandas()
|
||||
# --8<-- [end:search_with_scalar_index]
|
||||
# --8<-- [start:vector_search_with_scalar_index]
|
||||
data = [
|
||||
{"book_id": 1, "vector": [1.0, 2]},
|
||||
{"book_id": 2, "vector": [3.0, 4]},
|
||||
{"book_id": 3, "vector": [5.0, 6]},
|
||||
]
|
||||
|
||||
table = db.create_table("book_with_embeddings", data)
|
||||
(table.search([1, 2]).where("book_id != 3", prefilter=True).to_pandas())
|
||||
# --8<-- [end:vector_search_with_scalar_index]
|
||||
# --8<-- [start:update_scalar_index]
|
||||
table.add([{"vector": [7, 8], "book_id": 4}])
|
||||
table.optimize()
|
||||
# --8<-- [end:update_scalar_index]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_scalar_index_async():
|
||||
# --8<-- [start:basic_scalar_index_async]
|
||||
uri = "data/sample-lancedb"
|
||||
async_db = await lancedb.connect_async(uri)
|
||||
books = [
|
||||
{
|
||||
"book_id": 1,
|
||||
"publisher": "plenty of books",
|
||||
"tags": ["fantasy", "adventure"],
|
||||
},
|
||||
{"book_id": 2, "publisher": "book town", "tags": ["non-fiction"]},
|
||||
{"book_id": 3, "publisher": "oreilly", "tags": ["textbook"]},
|
||||
]
|
||||
async_tbl = await async_db.create_table("books_async", books)
|
||||
await async_tbl.create_index("book_id", config=BTree()) # BTree by default
|
||||
await async_tbl.create_index("publisher", config=Bitmap())
|
||||
# --8<-- [end:basic_scalar_index_async]
|
||||
# --8<-- [start:search_with_scalar_index_async]
|
||||
async_tbl = await async_db.open_table("books_async")
|
||||
await async_tbl.query().where("book_id = 2").to_pandas()
|
||||
# --8<-- [end:search_with_scalar_index_async]
|
||||
# --8<-- [start:vector_search_with_scalar_index_async]
|
||||
data = [
|
||||
{"book_id": 1, "vector": [1.0, 2]},
|
||||
{"book_id": 2, "vector": [3.0, 4]},
|
||||
{"book_id": 3, "vector": [5.0, 6]},
|
||||
]
|
||||
async_tbl = await async_db.create_table("book_with_embeddings_async", data)
|
||||
(await async_tbl.query().where("book_id != 3").nearest_to([1, 2]).to_pandas())
|
||||
# --8<-- [end:vector_search_with_scalar_index_async]
|
||||
# --8<-- [start:update_scalar_index_async]
|
||||
await async_tbl.add([{"vector": [7, 8], "book_id": 4}])
|
||||
await async_tbl.optimize()
|
||||
# --8<-- [end:update_scalar_index_async]
|
||||
576
python/python/tests/docs/test_guide_tables.py
Normal file
576
python/python/tests/docs/test_guide_tables.py
Normal file
@@ -0,0 +1,576 @@
|
||||
# --8<-- [start:import-lancedb]
|
||||
import lancedb
|
||||
|
||||
# --8<-- [end:import-lancedb]
|
||||
# --8<-- [start:import-pandas]
|
||||
import pandas as pd
|
||||
|
||||
# --8<-- [end:import-pandas]
|
||||
# --8<-- [start:import-pyarrow]
|
||||
import pyarrow as pa
|
||||
|
||||
# --8<-- [end:import-pyarrow]
|
||||
# --8<-- [start:import-polars]
|
||||
import polars as pl
|
||||
|
||||
# --8<-- [end:import-polars]
|
||||
# --8<-- [start:import-numpy]
|
||||
import numpy as np
|
||||
|
||||
# --8<-- [end:import-numpy]
|
||||
# --8<-- [start:import-lancedb-pydantic]
|
||||
from lancedb.pydantic import Vector, LanceModel
|
||||
|
||||
# --8<-- [end:import-lancedb-pydantic]
|
||||
# --8<-- [start:import-datetime]
|
||||
from datetime import timedelta
|
||||
|
||||
# --8<-- [end:import-datetime]
|
||||
# --8<-- [start:import-embeddings]
|
||||
from lancedb.embeddings import get_registry
|
||||
|
||||
# --8<-- [end:import-embeddings]
|
||||
# --8<-- [start:import-pydantic-basemodel]
|
||||
from pydantic import BaseModel
|
||||
|
||||
# --8<-- [end:import-pydantic-basemodel]
|
||||
import pytest
|
||||
|
||||
|
||||
# --8<-- [start:class-Content]
|
||||
class Content(LanceModel):
|
||||
movie_id: int
|
||||
vector: Vector(128)
|
||||
genres: str
|
||||
title: str
|
||||
imdb_id: int
|
||||
|
||||
@property
|
||||
def imdb_url(self) -> str:
|
||||
return f"https://www.imdb.com/title/tt{self.imdb_id}"
|
||||
|
||||
|
||||
# --8<-- [end:class-Content]
|
||||
# --8<-- [start:class-Document]
|
||||
class Document(BaseModel):
|
||||
content: str
|
||||
source: str
|
||||
|
||||
|
||||
# --8<-- [end:class-Document]
|
||||
# --8<-- [start:class-NestedSchema]
|
||||
class NestedSchema(LanceModel):
|
||||
id: str
|
||||
vector: Vector(1536)
|
||||
document: Document
|
||||
|
||||
|
||||
# --8<-- [end:class-NestedSchema]
|
||||
# --8<-- [start:class-Item]
|
||||
class Item(LanceModel):
|
||||
vector: Vector(2)
|
||||
item: str
|
||||
price: float
|
||||
|
||||
|
||||
# --8<-- [end:class-Item]
|
||||
|
||||
|
||||
# --8<-- [start:make_batches]
|
||||
def make_batches():
|
||||
for i in range(5):
|
||||
yield pa.RecordBatch.from_arrays(
|
||||
[
|
||||
pa.array(
|
||||
[[3.1, 4.1, 5.1, 6.1], [5.9, 26.5, 4.7, 32.8]],
|
||||
pa.list_(pa.float32(), 4),
|
||||
),
|
||||
pa.array(["foo", "bar"]),
|
||||
pa.array([10.0, 20.0]),
|
||||
],
|
||||
["vector", "item", "price"],
|
||||
)
|
||||
|
||||
|
||||
# --8<-- [end:make_batches]
|
||||
|
||||
|
||||
# --8<-- [start:make_batches_for_add]
|
||||
def make_batches_for_add():
|
||||
for i in range(5):
|
||||
yield [
|
||||
{"vector": [3.1, 4.1], "item": "peach", "price": 6.0},
|
||||
{"vector": [5.9, 26.5], "item": "pear", "price": 5.0},
|
||||
]
|
||||
|
||||
|
||||
# --8<-- [end:make_batches_for_add]
|
||||
|
||||
|
||||
def test_table():
|
||||
# --8<-- [start:connect]
|
||||
uri = "data/sample-lancedb"
|
||||
db = lancedb.connect(uri)
|
||||
# --8<-- [end:connect]
|
||||
# --8<-- [start:create_table]
|
||||
data = [
|
||||
{"vector": [1.1, 1.2], "lat": 45.5, "long": -122.7},
|
||||
{"vector": [0.2, 1.8], "lat": 40.1, "long": -74.1},
|
||||
]
|
||||
db.create_table("test_table", data)
|
||||
db["test_table"].head()
|
||||
# --8<-- [end:create_table]
|
||||
# --8<-- [start:create_table_exist_ok]
|
||||
db.create_table("test_table", data, exist_ok=True)
|
||||
# --8<-- [end:create_table_exist_ok]
|
||||
# --8<-- [start:create_table_overwrite]
|
||||
db.create_table("test_table", data, mode="overwrite")
|
||||
# --8<-- [end:create_table_overwrite]
|
||||
# --8<-- [start:create_table_from_pandas]
|
||||
data = pd.DataFrame(
|
||||
{
|
||||
"vector": [[1.1, 1.2, 1.3, 1.4], [0.2, 1.8, 0.4, 3.6]],
|
||||
"lat": [45.5, 40.1],
|
||||
"long": [-122.7, -74.1],
|
||||
}
|
||||
)
|
||||
db.create_table("my_table_pandas", data)
|
||||
db["my_table_pandas"].head()
|
||||
# --8<-- [end:create_table_from_pandas]
|
||||
# --8<-- [start:create_table_custom_schema]
|
||||
custom_schema = pa.schema(
|
||||
[
|
||||
pa.field("vector", pa.list_(pa.float32(), 4)),
|
||||
pa.field("lat", pa.float32()),
|
||||
pa.field("long", pa.float32()),
|
||||
]
|
||||
)
|
||||
|
||||
tbl = db.create_table("my_table_custom_schema", data, schema=custom_schema)
|
||||
# --8<-- [end:create_table_custom_schema]
|
||||
# --8<-- [start:create_table_from_polars]
|
||||
data = pl.DataFrame(
|
||||
{
|
||||
"vector": [[3.1, 4.1], [5.9, 26.5]],
|
||||
"item": ["foo", "bar"],
|
||||
"price": [10.0, 20.0],
|
||||
}
|
||||
)
|
||||
tbl = db.create_table("my_table_pl", data)
|
||||
# --8<-- [end:create_table_from_polars]
|
||||
# --8<-- [start:create_table_from_arrow_table]
|
||||
dim = 16
|
||||
total = 2
|
||||
schema = pa.schema(
|
||||
[pa.field("vector", pa.list_(pa.float16(), dim)), pa.field("text", pa.string())]
|
||||
)
|
||||
data = pa.Table.from_arrays(
|
||||
[
|
||||
pa.array(
|
||||
[np.random.randn(dim).astype(np.float16) for _ in range(total)],
|
||||
pa.list_(pa.float16(), dim),
|
||||
),
|
||||
pa.array(["foo", "bar"]),
|
||||
],
|
||||
["vector", "text"],
|
||||
)
|
||||
tbl = db.create_table("f16_tbl", data, schema=schema)
|
||||
# --8<-- [end:create_table_from_arrow_table]
|
||||
# --8<-- [start:create_table_from_pydantic]
|
||||
tbl = db.create_table("movielens_small", schema=Content)
|
||||
# --8<-- [end:create_table_from_pydantic]
|
||||
# --8<-- [start:create_table_nested_schema]
|
||||
tbl = db.create_table("nested_table", schema=NestedSchema)
|
||||
# --8<-- [end:create_table_nested_schema]
|
||||
# --8<-- [start:create_table_from_batch]
|
||||
schema = pa.schema(
|
||||
[
|
||||
pa.field("vector", pa.list_(pa.float32(), 4)),
|
||||
pa.field("item", pa.utf8()),
|
||||
pa.field("price", pa.float32()),
|
||||
]
|
||||
)
|
||||
db.create_table("batched_tale", make_batches(), schema=schema)
|
||||
# --8<-- [end:create_table_from_batch]
|
||||
# --8<-- [start:list_tables]
|
||||
print(db.table_names())
|
||||
# --8<-- [end:list_tables]
|
||||
# --8<-- [start:open_table]
|
||||
tbl = db.open_table("test_table")
|
||||
# --8<-- [end:open_table]
|
||||
# --8<-- [start:create_empty_table]
|
||||
schema = pa.schema(
|
||||
[
|
||||
pa.field("vector", pa.list_(pa.float32(), 2)),
|
||||
pa.field("item", pa.string()),
|
||||
pa.field("price", pa.float32()),
|
||||
]
|
||||
)
|
||||
tbl = db.create_table("test_empty_table", schema=schema)
|
||||
# --8<-- [end:create_empty_table]
|
||||
# --8<-- [start:create_empty_table_pydantic]
|
||||
tbl = db.create_table("test_empty_table_new", schema=Item.to_arrow_schema())
|
||||
# --8<-- [end:create_empty_table_pydantic]
|
||||
# --8<-- [start:add_table_from_pandas]
|
||||
df = pd.DataFrame(
|
||||
{
|
||||
"vector": [[1.3, 1.4], [9.5, 56.2]],
|
||||
"item": ["banana", "apple"],
|
||||
"price": [5.0, 7.0],
|
||||
}
|
||||
)
|
||||
|
||||
tbl.add(df)
|
||||
# --8<-- [end:add_table_from_pandas]
|
||||
# --8<-- [start:add_table_from_polars]
|
||||
df = pl.DataFrame(
|
||||
{
|
||||
"vector": [[1.3, 1.4], [9.5, 56.2]],
|
||||
"item": ["banana", "apple"],
|
||||
"price": [5.0, 7.0],
|
||||
}
|
||||
)
|
||||
|
||||
tbl.add(df)
|
||||
# --8<-- [end:add_table_from_polars]
|
||||
# --8<-- [start:add_table_from_batch]
|
||||
tbl.add(make_batches_for_add())
|
||||
# --8<-- [end:add_table_from_batch]
|
||||
# --8<-- [start:add_table_from_pyarrow]
|
||||
pa_table = pa.Table.from_arrays(
|
||||
[
|
||||
pa.array([[9.1, 6.7], [9.9, 31.2]], pa.list_(pa.float32(), 2)),
|
||||
pa.array(["mango", "orange"]),
|
||||
pa.array([7.0, 4.0]),
|
||||
],
|
||||
["vector", "item", "price"],
|
||||
)
|
||||
tbl.add(pa_table)
|
||||
# --8<-- [end:add_table_from_pyarrow]
|
||||
# --8<-- [start:add_table_from_pydantic]
|
||||
pydantic_model_items = [
|
||||
Item(vector=[8.1, 4.7], item="pineapple", price=10.0),
|
||||
Item(vector=[6.9, 9.3], item="avocado", price=9.0),
|
||||
]
|
||||
tbl.add(pydantic_model_items)
|
||||
# --8<-- [end:add_table_from_pydantic]
|
||||
# --8<-- [start:delete_row]
|
||||
tbl.delete('item = "fizz"')
|
||||
# --8<-- [end:delete_row]
|
||||
# --8<-- [start:delete_specific_row]
|
||||
data = [
|
||||
{"x": 1, "vector": [1, 2]},
|
||||
{"x": 2, "vector": [3, 4]},
|
||||
{"x": 3, "vector": [5, 6]},
|
||||
]
|
||||
# Synchronous client
|
||||
tbl = db.create_table("delete_row", data)
|
||||
tbl.to_pandas()
|
||||
# x vector
|
||||
# 0 1 [1.0, 2.0]
|
||||
# 1 2 [3.0, 4.0]
|
||||
# 2 3 [5.0, 6.0]
|
||||
|
||||
tbl.delete("x = 2")
|
||||
tbl.to_pandas()
|
||||
# x vector
|
||||
# 0 1 [1.0, 2.0]
|
||||
# 1 3 [5.0, 6.0]
|
||||
# --8<-- [end:delete_specific_row]
|
||||
# --8<-- [start:delete_list_values]
|
||||
to_remove = [1, 5]
|
||||
to_remove = ", ".join(str(v) for v in to_remove)
|
||||
|
||||
tbl.delete(f"x IN ({to_remove})")
|
||||
tbl.to_pandas()
|
||||
# x vector
|
||||
# 0 3 [5.0, 6.0]
|
||||
# --8<-- [end:delete_list_values]
|
||||
# --8<-- [start:update_table]
|
||||
# Create a table from a pandas DataFrame
|
||||
data = pd.DataFrame({"x": [1, 2, 3], "vector": [[1, 2], [3, 4], [5, 6]]})
|
||||
|
||||
tbl = db.create_table("test_table", data, mode="overwrite")
|
||||
# Update the table where x = 2
|
||||
tbl.update(where="x = 2", values={"vector": [10, 10]})
|
||||
# Get the updated table as a pandas DataFrame
|
||||
df = tbl.to_pandas()
|
||||
print(df)
|
||||
# --8<-- [end:update_table]
|
||||
# --8<-- [start:update_table_sql]
|
||||
# Update the table where x = 2
|
||||
tbl.update(values_sql={"x": "x + 1"})
|
||||
print(tbl.to_pandas())
|
||||
# --8<-- [end:update_table_sql]
|
||||
# --8<-- [start:table_strong_consistency]
|
||||
uri = "data/sample-lancedb"
|
||||
db = lancedb.connect(uri, read_consistency_interval=timedelta(0))
|
||||
tbl = db.open_table("test_table")
|
||||
# --8<-- [end:table_strong_consistency]
|
||||
# --8<-- [start:table_eventual_consistency]
|
||||
uri = "data/sample-lancedb"
|
||||
db = lancedb.connect(uri, read_consistency_interval=timedelta(seconds=5))
|
||||
tbl = db.open_table("test_table")
|
||||
# --8<-- [end:table_eventual_consistency]
|
||||
# --8<-- [start:table_checkout_latest]
|
||||
tbl = db.open_table("test_table")
|
||||
|
||||
# (Other writes happen to my_table from another process)
|
||||
|
||||
# Check for updates
|
||||
tbl.checkout_latest()
|
||||
# --8<-- [end:table_checkout_latest]
|
||||
|
||||
|
||||
@pytest.mark.skip
|
||||
def test_table_with_embedding():
|
||||
db = lancedb.connect("data/sample-lancedb")
|
||||
# --8<-- [start:create_table_with_embedding]
|
||||
embed_fcn = get_registry().get("huggingface").create(name="BAAI/bge-small-en-v1.5")
|
||||
|
||||
class Schema(LanceModel):
|
||||
text: str = embed_fcn.SourceField()
|
||||
vector: Vector(embed_fcn.ndims()) = embed_fcn.VectorField(default=None)
|
||||
|
||||
tbl = db.create_table("my_table_with_embedding", schema=Schema, mode="overwrite")
|
||||
models = [Schema(text="hello"), Schema(text="world")]
|
||||
tbl.add(models)
|
||||
# --8<-- [end:create_table_with_embedding]
|
||||
|
||||
|
||||
@pytest.mark.skip
|
||||
async def test_table_with_embedding_async():
|
||||
async_db = await lancedb.connect_async("data/sample-lancedb")
|
||||
# --8<-- [start:create_table_async_with_embedding]
|
||||
embed_fcn = get_registry().get("huggingface").create(name="BAAI/bge-small-en-v1.5")
|
||||
|
||||
class Schema(LanceModel):
|
||||
text: str = embed_fcn.SourceField()
|
||||
vector: Vector(embed_fcn.ndims()) = embed_fcn.VectorField(default=None)
|
||||
|
||||
async_tbl = await async_db.create_table(
|
||||
"my_table_async_with_embedding", schema=Schema, mode="overwrite"
|
||||
)
|
||||
models = [Schema(text="hello"), Schema(text="world")]
|
||||
await async_tbl.add(models)
|
||||
# --8<-- [end:create_table_async_with_embedding]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_table_async():
|
||||
# --8<-- [start:connect_async]
|
||||
uri = "data/sample-lancedb"
|
||||
async_db = await lancedb.connect_async(uri)
|
||||
# --8<-- [end:connect_async]
|
||||
# --8<-- [start:create_table_async]
|
||||
data = [
|
||||
{"vector": [1.1, 1.2], "lat": 45.5, "long": -122.7},
|
||||
{"vector": [0.2, 1.8], "lat": 40.1, "long": -74.1},
|
||||
]
|
||||
async_tbl = await async_db.create_table("test_table_async", data)
|
||||
await async_tbl.head()
|
||||
# --8<-- [end:create_table_async]
|
||||
# --8<-- [start:create_table_async_exist_ok]
|
||||
await async_db.create_table("test_table_async", data, exist_ok=True)
|
||||
# --8<-- [end:create_table_async_exist_ok]
|
||||
# --8<-- [start:create_table_async_overwrite]
|
||||
await async_db.create_table("test_table_async", data, mode="overwrite")
|
||||
# --8<-- [end:create_table_async_overwrite]
|
||||
# --8<-- [start:create_table_async_from_pandas]
|
||||
data = pd.DataFrame(
|
||||
{
|
||||
"vector": [[1.1, 1.2, 1.3, 1.4], [0.2, 1.8, 0.4, 3.6]],
|
||||
"lat": [45.5, 40.1],
|
||||
"long": [-122.7, -74.1],
|
||||
}
|
||||
)
|
||||
async_tbl = await async_db.create_table("my_table_async_pd", data)
|
||||
await async_tbl.head()
|
||||
# --8<-- [end:create_table_async_from_pandas]
|
||||
# --8<-- [start:create_table_async_custom_schema]
|
||||
custom_schema = pa.schema(
|
||||
[
|
||||
pa.field("vector", pa.list_(pa.float32(), 4)),
|
||||
pa.field("lat", pa.float32()),
|
||||
pa.field("long", pa.float32()),
|
||||
]
|
||||
)
|
||||
async_tbl = await async_db.create_table(
|
||||
"my_table_async_custom_schema", data, schema=custom_schema
|
||||
)
|
||||
# --8<-- [end:create_table_async_custom_schema]
|
||||
# --8<-- [start:create_table_async_from_polars]
|
||||
data = pl.DataFrame(
|
||||
{
|
||||
"vector": [[3.1, 4.1], [5.9, 26.5]],
|
||||
"item": ["foo", "bar"],
|
||||
"price": [10.0, 20.0],
|
||||
}
|
||||
)
|
||||
async_tbl = await async_db.create_table("my_table_async_pl", data)
|
||||
# --8<-- [end:create_table_async_from_polars]
|
||||
# --8<-- [start:create_table_async_from_arrow_table]
|
||||
dim = 16
|
||||
total = 2
|
||||
schema = pa.schema(
|
||||
[pa.field("vector", pa.list_(pa.float16(), dim)), pa.field("text", pa.string())]
|
||||
)
|
||||
data = pa.Table.from_arrays(
|
||||
[
|
||||
pa.array(
|
||||
[np.random.randn(dim).astype(np.float16) for _ in range(total)],
|
||||
pa.list_(pa.float16(), dim),
|
||||
),
|
||||
pa.array(["foo", "bar"]),
|
||||
],
|
||||
["vector", "text"],
|
||||
)
|
||||
async_tbl = await async_db.create_table("f16_tbl_async", data, schema=schema)
|
||||
# --8<-- [end:create_table_async_from_arrow_table]
|
||||
# --8<-- [start:create_table_async_from_pydantic]
|
||||
async_tbl = await async_db.create_table("movielens_small_async", schema=Content)
|
||||
# --8<-- [end:create_table_async_from_pydantic]
|
||||
# --8<-- [start:create_table_async_nested_schema]
|
||||
async_tbl = await async_db.create_table("nested_table_async", schema=NestedSchema)
|
||||
# --8<-- [end:create_table_async_nested_schema]
|
||||
# --8<-- [start:create_table_async_from_batch]
|
||||
schema = pa.schema(
|
||||
[
|
||||
pa.field("vector", pa.list_(pa.float32(), 4)),
|
||||
pa.field("item", pa.utf8()),
|
||||
pa.field("price", pa.float32()),
|
||||
]
|
||||
)
|
||||
await async_db.create_table("batched_table", make_batches(), schema=schema)
|
||||
# --8<-- [end:create_table_async_from_batch]
|
||||
# --8<-- [start:list_tables_async]
|
||||
print(await async_db.table_names())
|
||||
# --8<-- [end:list_tables_async]
|
||||
# --8<-- [start:open_table_async]
|
||||
async_tbl = await async_db.open_table("test_table_async")
|
||||
# --8<-- [end:open_table_async]
|
||||
# --8<-- [start:create_empty_table_async]
|
||||
schema = pa.schema(
|
||||
[
|
||||
pa.field("vector", pa.list_(pa.float32(), 2)),
|
||||
pa.field("item", pa.string()),
|
||||
pa.field("price", pa.float32()),
|
||||
]
|
||||
)
|
||||
async_tbl = await async_db.create_table("test_empty_table_async", schema=schema)
|
||||
# --8<-- [end:create_empty_table_async]
|
||||
# --8<-- [start:create_empty_table_async_pydantic]
|
||||
async_tbl = await async_db.create_table(
|
||||
"test_empty_table_async_new", schema=Item.to_arrow_schema()
|
||||
)
|
||||
# --8<-- [end:create_empty_table_async_pydantic]
|
||||
# --8<-- [start:add_table_async_from_pandas]
|
||||
df = pd.DataFrame(
|
||||
{
|
||||
"vector": [[1.3, 1.4], [9.5, 56.2]],
|
||||
"item": ["banana", "apple"],
|
||||
"price": [5.0, 7.0],
|
||||
}
|
||||
)
|
||||
await async_tbl.add(df)
|
||||
# --8<-- [end:add_table_async_from_pandas]
|
||||
# --8<-- [start:add_table_async_from_polars]
|
||||
df = pl.DataFrame(
|
||||
{
|
||||
"vector": [[1.3, 1.4], [9.5, 56.2]],
|
||||
"item": ["banana", "apple"],
|
||||
"price": [5.0, 7.0],
|
||||
}
|
||||
)
|
||||
await async_tbl.add(df)
|
||||
# --8<-- [end:add_table_async_from_polars]
|
||||
# --8<-- [start:add_table_async_from_batch]
|
||||
await async_tbl.add(make_batches_for_add())
|
||||
# --8<-- [end:add_table_async_from_batch]
|
||||
# --8<-- [start:add_table_async_from_pyarrow]
|
||||
pa_table = pa.Table.from_arrays(
|
||||
[
|
||||
pa.array([[9.1, 6.7], [9.9, 31.2]], pa.list_(pa.float32(), 2)),
|
||||
pa.array(["mango", "orange"]),
|
||||
pa.array([7.0, 4.0]),
|
||||
],
|
||||
["vector", "item", "price"],
|
||||
)
|
||||
await async_tbl.add(pa_table)
|
||||
# --8<-- [end:add_table_async_from_pyarrow]
|
||||
# --8<-- [start:add_table_async_from_pydantic]
|
||||
pydantic_model_items = [
|
||||
Item(vector=[8.1, 4.7], item="pineapple", price=10.0),
|
||||
Item(vector=[6.9, 9.3], item="avocado", price=9.0),
|
||||
]
|
||||
await async_tbl.add(pydantic_model_items)
|
||||
# --8<-- [end:add_table_async_from_pydantic]
|
||||
# --8<-- [start:delete_row_async]
|
||||
await async_tbl.delete('item = "fizz"')
|
||||
# --8<-- [end:delete_row_async]
|
||||
# --8<-- [start:delete_specific_row_async]
|
||||
data = [
|
||||
{"x": 1, "vector": [1, 2]},
|
||||
{"x": 2, "vector": [3, 4]},
|
||||
{"x": 3, "vector": [5, 6]},
|
||||
]
|
||||
async_db = await lancedb.connect_async(uri)
|
||||
async_tbl = await async_db.create_table("delete_row_async", data)
|
||||
await async_tbl.to_pandas()
|
||||
# x vector
|
||||
# 0 1 [1.0, 2.0]
|
||||
# 1 2 [3.0, 4.0]
|
||||
# 2 3 [5.0, 6.0]
|
||||
|
||||
await async_tbl.delete("x = 2")
|
||||
await async_tbl.to_pandas()
|
||||
# x vector
|
||||
# 0 1 [1.0, 2.0]
|
||||
# 1 3 [5.0, 6.0]
|
||||
# --8<-- [end:delete_specific_row_async]
|
||||
# --8<-- [start:delete_list_values_async]
|
||||
to_remove = [1, 5]
|
||||
to_remove = ", ".join(str(v) for v in to_remove)
|
||||
|
||||
await async_tbl.delete(f"x IN ({to_remove})")
|
||||
await async_tbl.to_pandas()
|
||||
# x vector
|
||||
# 0 3 [5.0, 6.0]
|
||||
# --8<-- [end:delete_list_values_async]
|
||||
# --8<-- [start:update_table_async]
|
||||
# Create a table from a pandas DataFrame
|
||||
data = pd.DataFrame({"x": [1, 2, 3], "vector": [[1, 2], [3, 4], [5, 6]]})
|
||||
|
||||
async_tbl = await async_db.create_table("update_table_async", data)
|
||||
# Update the table where x = 2
|
||||
await async_tbl.update({"vector": [10, 10]}, where="x = 2")
|
||||
# Get the updated table as a pandas DataFrame
|
||||
df = await async_tbl.to_pandas()
|
||||
# Print the DataFrame
|
||||
print(df)
|
||||
# --8<-- [end:update_table_async]
|
||||
# --8<-- [start:update_table_sql_async]
|
||||
# Update the table where x = 2
|
||||
await async_tbl.update(updates_sql={"x": "x + 1"})
|
||||
print(await async_tbl.to_pandas())
|
||||
# --8<-- [end:update_table_sql_async]
|
||||
# --8<-- [start:table_async_strong_consistency]
|
||||
uri = "data/sample-lancedb"
|
||||
async_db = await lancedb.connect_async(uri, read_consistency_interval=timedelta(0))
|
||||
async_tbl = await async_db.open_table("test_table_async")
|
||||
# --8<-- [end:table_async_strong_consistency]
|
||||
# --8<-- [start:table_async_ventual_consistency]
|
||||
uri = "data/sample-lancedb"
|
||||
async_db = await lancedb.connect_async(
|
||||
uri, read_consistency_interval=timedelta(seconds=5)
|
||||
)
|
||||
async_tbl = await async_db.open_table("test_table_async")
|
||||
# --8<-- [end:table_async_eventual_consistency]
|
||||
# --8<-- [start:table_async_checkout_latest]
|
||||
async_tbl = await async_db.open_table("test_table_async")
|
||||
|
||||
# (Other writes happen to test_table_async from another process)
|
||||
|
||||
# Check for updates
|
||||
await async_tbl.checkout_latest()
|
||||
# --8<-- [end:table_async_checkout_latest]
|
||||
187
python/python/tests/docs/test_python.py
Normal file
187
python/python/tests/docs/test_python.py
Normal file
@@ -0,0 +1,187 @@
|
||||
# --8<-- [start:import-lancedb]
|
||||
import lancedb
|
||||
|
||||
# --8<-- [end:import-lancedb]
|
||||
# --8<-- [start:import-pandas]
|
||||
import pandas as pd
|
||||
|
||||
# --8<-- [end:import-pandas]
|
||||
# --8<-- [start:import-iterable]
|
||||
from typing import Iterable
|
||||
|
||||
# --8<-- [end:import-iterable]
|
||||
# --8<-- [start:import-pyarrow]
|
||||
import pyarrow as pa
|
||||
|
||||
# --8<-- [end:import-pyarrow]
|
||||
# --8<-- [start:import-polars]
|
||||
import polars as pl
|
||||
|
||||
# --8<-- [end:import-polars]
|
||||
# --8<-- [start:import-lancedb-pydantic]
|
||||
from lancedb.pydantic import Vector, LanceModel
|
||||
|
||||
# --8<-- [end:import-lancedb-pydantic]
|
||||
import pytest
|
||||
|
||||
|
||||
# --8<-- [start:make_batches]
|
||||
def make_batches() -> Iterable[pa.RecordBatch]:
|
||||
for i in range(5):
|
||||
yield pa.RecordBatch.from_arrays(
|
||||
[
|
||||
pa.array([[3.1, 4.1], [5.9, 26.5]]),
|
||||
pa.array(["foo", "bar"]),
|
||||
pa.array([10.0, 20.0]),
|
||||
],
|
||||
["vector", "item", "price"],
|
||||
)
|
||||
|
||||
|
||||
# --8<-- [end:make_batches]
|
||||
|
||||
|
||||
def test_pandas_and_pyarrow():
|
||||
# --8<-- [start:connect_to_lancedb]
|
||||
uri = "data/sample-lancedb"
|
||||
db = lancedb.connect(uri)
|
||||
# --8<-- [end:connect_to_lancedb]
|
||||
# --8<-- [start:create_table_pandas]
|
||||
data = pd.DataFrame(
|
||||
{
|
||||
"vector": [[3.1, 4.1], [5.9, 26.5]],
|
||||
"item": ["foo", "bar"],
|
||||
"price": [10.0, 20.0],
|
||||
}
|
||||
)
|
||||
table = db.create_table("pd_table", data=data)
|
||||
# --8<-- [end:create_table_pandas]
|
||||
# --8<-- [start:create_table_iterable]
|
||||
schema = pa.schema(
|
||||
[
|
||||
pa.field("vector", pa.list_(pa.float32())),
|
||||
pa.field("item", pa.utf8()),
|
||||
pa.field("price", pa.float32()),
|
||||
]
|
||||
)
|
||||
table = db.create_table("iterable_table", data=make_batches(), schema=schema)
|
||||
# --8<-- [end:create_table_iterable]
|
||||
# --8<-- [start:vector_search]
|
||||
# Open the table previously created.
|
||||
table = db.open_table("pd_table")
|
||||
|
||||
query_vector = [100, 100]
|
||||
# Pandas DataFrame
|
||||
df = table.search(query_vector).limit(1).to_pandas()
|
||||
print(df)
|
||||
# --8<-- [end:vector_search]
|
||||
# --8<-- [start:vector_search_with_filter]
|
||||
# Apply the filter via LanceDB
|
||||
results = table.search([100, 100]).where("price < 15").to_pandas()
|
||||
assert len(results) == 1
|
||||
assert results["item"].iloc[0] == "foo"
|
||||
|
||||
# Apply the filter via Pandas
|
||||
df = results = table.search([100, 100]).to_pandas()
|
||||
results = df[df.price < 15]
|
||||
assert len(results) == 1
|
||||
assert results["item"].iloc[0] == "foo"
|
||||
# --8<-- [end:vector_search_with_filter]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_pandas_and_pyarrow_async():
|
||||
# --8<-- [start:connect_to_lancedb_async]
|
||||
uri = "data/sample-lancedb"
|
||||
async_db = await lancedb.connect_async(uri)
|
||||
# --8<-- [end:connect_to_lancedb_async]
|
||||
# --8<-- [start:create_table_pandas_async]
|
||||
data = pd.DataFrame(
|
||||
{
|
||||
"vector": [[3.1, 4.1], [5.9, 26.5]],
|
||||
"item": ["foo", "bar"],
|
||||
"price": [10.0, 20.0],
|
||||
}
|
||||
)
|
||||
await async_db.create_table("pd_table_async", data=data)
|
||||
# --8<-- [end:create_table_pandas_async]
|
||||
# --8<-- [start:create_table_iterable_async]
|
||||
schema = pa.schema(
|
||||
[
|
||||
pa.field("vector", pa.list_(pa.float32())),
|
||||
pa.field("item", pa.utf8()),
|
||||
pa.field("price", pa.float32()),
|
||||
]
|
||||
)
|
||||
await async_db.create_table(
|
||||
"iterable_table_async", data=make_batches(), schema=schema
|
||||
)
|
||||
# --8<-- [end:create_table_iterable_async]
|
||||
# --8<-- [start:vector_search_async]
|
||||
# Open the table previously created.
|
||||
async_tbl = await async_db.open_table("pd_table_async")
|
||||
|
||||
query_vector = [100, 100]
|
||||
# Pandas DataFrame
|
||||
df = await async_tbl.query().nearest_to(query_vector).limit(1).to_pandas()
|
||||
print(df)
|
||||
# --8<-- [end:vector_search_async]
|
||||
# --8<-- [start:vector_search_with_filter_async]
|
||||
# Apply the filter via LanceDB
|
||||
results = (
|
||||
await async_tbl.query().nearest_to([100, 100]).where("price < 15").to_pandas()
|
||||
)
|
||||
assert len(results) == 1
|
||||
assert results["item"].iloc[0] == "foo"
|
||||
|
||||
# Apply the filter via Pandas
|
||||
df = results = await async_tbl.query().nearest_to([100, 100]).to_pandas()
|
||||
results = df[df.price < 15]
|
||||
assert len(results) == 1
|
||||
assert results["item"].iloc[0] == "foo"
|
||||
# --8<-- [end:vector_search_with_filter_async]
|
||||
|
||||
|
||||
# --8<-- [start:class_Item]
|
||||
class Item(LanceModel):
|
||||
vector: Vector(2)
|
||||
item: str
|
||||
price: float
|
||||
|
||||
|
||||
# --8<-- [end:class_Item]
|
||||
|
||||
|
||||
def test_polars():
|
||||
uri = "data/sample-lancedb"
|
||||
db = lancedb.connect(uri)
|
||||
|
||||
# --8<-- [start:create_table_polars]
|
||||
data = pl.DataFrame(
|
||||
{
|
||||
"vector": [[3.1, 4.1], [5.9, 26.5]],
|
||||
"item": ["foo", "bar"],
|
||||
"price": [10.0, 20.0],
|
||||
}
|
||||
)
|
||||
table = db.create_table("pl_table", data=data)
|
||||
# --8<-- [end:create_table_polars]
|
||||
# --8<-- [start:vector_search_polars]
|
||||
query = [3.0, 4.0]
|
||||
result = table.search(query).limit(1).to_polars()
|
||||
print(result)
|
||||
print(type(result))
|
||||
# --8<-- [end:vector_search_polars]
|
||||
# --8<-- [start:create_table_pydantic]
|
||||
table = db.create_table("pydantic_table", schema=Item)
|
||||
df = pl.DataFrame(data)
|
||||
# Add Polars DataFrame to table
|
||||
table.add(df)
|
||||
# --8<-- [end:create_table_pydantic]
|
||||
# --8<-- [start:dump_table_lazyform]
|
||||
ldf = table.to_polars()
|
||||
print(type(ldf))
|
||||
# --8<-- [end:dump_table_lazyform]
|
||||
# --8<-- [start:print_table_lazyform]
|
||||
print(ldf.first().collect())
|
||||
# --8<-- [end:print_table_lazyform]
|
||||
366
python/python/tests/docs/test_search.py
Normal file
366
python/python/tests/docs/test_search.py
Normal file
@@ -0,0 +1,366 @@
|
||||
# --8<-- [start:import-lancedb]
|
||||
import lancedb
|
||||
|
||||
# --8<-- [end:import-lancedb]
|
||||
# --8<-- [start:import-numpy]
|
||||
import numpy as np
|
||||
|
||||
# --8<-- [end:import-numpy]
|
||||
# --8<-- [start:import-datetime]
|
||||
from datetime import datetime
|
||||
|
||||
# --8<-- [end:import-datetime]
|
||||
# --8<-- [start:import-lancedb-pydantic]
|
||||
from lancedb.pydantic import Vector, LanceModel
|
||||
|
||||
# --8<-- [end:import-lancedb-pydantic]
|
||||
# --8<-- [start:import-pydantic-base-model]
|
||||
from pydantic import BaseModel
|
||||
|
||||
# --8<-- [end:import-pydantic-base-model]
|
||||
# --8<-- [start:import-lancedb-fts]
|
||||
from lancedb.index import FTS
|
||||
|
||||
# --8<-- [end:import-lancedb-fts]
|
||||
# --8<-- [start:import-os]
|
||||
import os
|
||||
|
||||
# --8<-- [end:import-os]
|
||||
# --8<-- [start:import-embeddings]
|
||||
from lancedb.embeddings import get_registry
|
||||
|
||||
# --8<-- [end:import-embeddings]
|
||||
import pytest
|
||||
|
||||
|
||||
# --8<-- [start:class-definition]
|
||||
class Metadata(BaseModel):
|
||||
source: str
|
||||
timestamp: datetime
|
||||
|
||||
|
||||
class Document(BaseModel):
|
||||
content: str
|
||||
meta: Metadata
|
||||
|
||||
|
||||
class LanceSchema(LanceModel):
|
||||
id: str
|
||||
vector: Vector(1536)
|
||||
payload: Document
|
||||
|
||||
|
||||
# --8<-- [end:class-definition]
|
||||
|
||||
|
||||
def test_vector_search():
|
||||
# --8<-- [start:exhaustive_search]
|
||||
uri = "data/sample-lancedb"
|
||||
db = lancedb.connect(uri)
|
||||
data = [
|
||||
{"vector": row, "item": f"item {i}"}
|
||||
for i, row in enumerate(np.random.random((10_000, 1536)).astype("float32"))
|
||||
]
|
||||
tbl = db.create_table("vector_search", data=data)
|
||||
tbl.search(np.random.random((1536))).limit(10).to_list()
|
||||
# --8<-- [end:exhaustive_search]
|
||||
# --8<-- [start:exhaustive_search_cosine]
|
||||
tbl.search(np.random.random((1536))).metric("cosine").limit(10).to_list()
|
||||
# --8<-- [end:exhaustive_search_cosine]
|
||||
# --8<-- [start:create_table_with_nested_schema]
|
||||
# Let's add 100 sample rows to our dataset
|
||||
data = [
|
||||
LanceSchema(
|
||||
id=f"id{i}",
|
||||
vector=np.random.randn(1536),
|
||||
payload=Document(
|
||||
content=f"document{i}",
|
||||
meta=Metadata(source=f"source{i % 10}", timestamp=datetime.now()),
|
||||
),
|
||||
)
|
||||
for i in range(100)
|
||||
]
|
||||
|
||||
# Synchronous client
|
||||
tbl = db.create_table("documents", data=data)
|
||||
# --8<-- [end:create_table_with_nested_schema]
|
||||
# --8<-- [start:search_result_as_pyarrow]
|
||||
tbl.search(np.random.randn(1536)).to_arrow()
|
||||
# --8<-- [end:search_result_as_pyarrow]
|
||||
# --8<-- [start:search_result_as_pandas]
|
||||
tbl.search(np.random.randn(1536)).to_pandas()
|
||||
# --8<-- [end:search_result_as_pandas]
|
||||
# --8<-- [start:search_result_as_pandas_flatten_true]
|
||||
tbl.search(np.random.randn(1536)).to_pandas(flatten=True)
|
||||
# --8<-- [end:search_result_as_pandas_flatten_true]
|
||||
# --8<-- [start:search_result_as_pandas_flatten_1]
|
||||
tbl.search(np.random.randn(1536)).to_pandas(flatten=1)
|
||||
# --8<-- [end:search_result_as_pandas_flatten_1]
|
||||
# --8<-- [start:search_result_as_list]
|
||||
tbl.search(np.random.randn(1536)).to_list()
|
||||
# --8<-- [end:search_result_as_list]
|
||||
# --8<-- [start:search_result_as_pydantic]
|
||||
tbl.search(np.random.randn(1536)).to_pydantic(LanceSchema)
|
||||
# --8<-- [end:search_result_as_pydantic]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_vector_search_async():
|
||||
# --8<-- [start:exhaustive_search_async]
|
||||
uri = "data/sample-lancedb"
|
||||
async_db = await lancedb.connect_async(uri)
|
||||
data = [
|
||||
{"vector": row, "item": f"item {i}"}
|
||||
for i, row in enumerate(np.random.random((10_000, 1536)).astype("float32"))
|
||||
]
|
||||
async_tbl = await async_db.create_table("vector_search_async", data=data)
|
||||
(await async_tbl.query().nearest_to(np.random.random((1536))).limit(10).to_list())
|
||||
# --8<-- [end:exhaustive_search_async]
|
||||
# --8<-- [start:exhaustive_search_async_cosine]
|
||||
(
|
||||
await async_tbl.query()
|
||||
.nearest_to(np.random.random((1536)))
|
||||
.distance_type("cosine")
|
||||
.limit(10)
|
||||
.to_list()
|
||||
)
|
||||
# --8<-- [end:exhaustive_search_async_cosine]
|
||||
# --8<-- [start:create_table_async_with_nested_schema]
|
||||
# Let's add 100 sample rows to our dataset
|
||||
data = [
|
||||
LanceSchema(
|
||||
id=f"id{i}",
|
||||
vector=np.random.randn(1536),
|
||||
payload=Document(
|
||||
content=f"document{i}",
|
||||
meta=Metadata(source=f"source{i % 10}", timestamp=datetime.now()),
|
||||
),
|
||||
)
|
||||
for i in range(100)
|
||||
]
|
||||
|
||||
async_tbl = await async_db.create_table("documents_async", data=data)
|
||||
# --8<-- [end:create_table_async_with_nested_schema]
|
||||
# --8<-- [start:search_result_async_as_pyarrow]
|
||||
await async_tbl.query().nearest_to(np.random.randn(1536)).to_arrow()
|
||||
# --8<-- [end:search_result_async_as_pyarrow]
|
||||
# --8<-- [start:search_result_async_as_pandas]
|
||||
await async_tbl.query().nearest_to(np.random.randn(1536)).to_pandas()
|
||||
# --8<-- [end:search_result_async_as_pandas]
|
||||
# --8<-- [start:search_result_async_as_list]
|
||||
await async_tbl.query().nearest_to(np.random.randn(1536)).to_list()
|
||||
# --8<-- [end:search_result_async_as_list]
|
||||
|
||||
|
||||
def test_fts_native():
|
||||
# --8<-- [start:basic_fts]
|
||||
uri = "data/sample-lancedb"
|
||||
db = lancedb.connect(uri)
|
||||
|
||||
table = db.create_table(
|
||||
"my_table_fts",
|
||||
data=[
|
||||
{"vector": [3.1, 4.1], "text": "Frodo was a happy puppy"},
|
||||
{"vector": [5.9, 26.5], "text": "There are several kittens playing"},
|
||||
],
|
||||
)
|
||||
|
||||
# passing `use_tantivy=False` to use lance FTS index
|
||||
# `use_tantivy=True` by default
|
||||
table.create_fts_index("text", use_tantivy=False)
|
||||
table.search("puppy").limit(10).select(["text"]).to_list()
|
||||
# [{'text': 'Frodo was a happy puppy', '_score': 0.6931471824645996}]
|
||||
# ...
|
||||
# --8<-- [end:basic_fts]
|
||||
# --8<-- [start:fts_config_stem]
|
||||
table.create_fts_index("text", tokenizer_name="en_stem", replace=True)
|
||||
# --8<-- [end:fts_config_stem]
|
||||
# --8<-- [start:fts_config_folding]
|
||||
table.create_fts_index(
|
||||
"text",
|
||||
use_tantivy=False,
|
||||
language="French",
|
||||
stem=True,
|
||||
ascii_folding=True,
|
||||
replace=True,
|
||||
)
|
||||
# --8<-- [end:fts_config_folding]
|
||||
# --8<-- [start:fts_prefiltering]
|
||||
table.search("puppy").limit(10).where("text='foo'", prefilter=True).to_list()
|
||||
# --8<-- [end:fts_prefiltering]
|
||||
# --8<-- [start:fts_postfiltering]
|
||||
table.search("puppy").limit(10).where("text='foo'", prefilter=False).to_list()
|
||||
# --8<-- [end:fts_postfiltering]
|
||||
# --8<-- [start:fts_with_position]
|
||||
table.create_fts_index("text", use_tantivy=False, with_position=True, replace=True)
|
||||
# --8<-- [end:fts_with_position]
|
||||
# --8<-- [start:fts_incremental_index]
|
||||
table.add([{"vector": [3.1, 4.1], "text": "Frodo was a happy puppy"}])
|
||||
table.optimize()
|
||||
# --8<-- [end:fts_incremental_index]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_fts_native_async():
|
||||
# --8<-- [start:basic_fts_async]
|
||||
uri = "data/sample-lancedb"
|
||||
async_db = await lancedb.connect_async(uri)
|
||||
|
||||
async_tbl = await async_db.create_table(
|
||||
"my_table_fts_async",
|
||||
data=[
|
||||
{"vector": [3.1, 4.1], "text": "Frodo was a happy puppy"},
|
||||
{"vector": [5.9, 26.5], "text": "There are several kittens playing"},
|
||||
],
|
||||
)
|
||||
|
||||
# async API uses our native FTS algorithm
|
||||
await async_tbl.create_index("text", config=FTS())
|
||||
await (
|
||||
async_tbl.query().nearest_to_text("puppy").select(["text"]).limit(10).to_list()
|
||||
)
|
||||
# [{'text': 'Frodo was a happy puppy', '_score': 0.6931471824645996}]
|
||||
# ...
|
||||
# --8<-- [end:basic_fts_async]
|
||||
# --8<-- [start:fts_config_stem_async]
|
||||
await async_tbl.create_index(
|
||||
"text", config=FTS(language="English", stem=True, remove_stop_words=True)
|
||||
) # --8<-- [end:fts_config_stem_async]
|
||||
# --8<-- [start:fts_config_folding_async]
|
||||
await async_tbl.create_index(
|
||||
"text", config=FTS(language="French", stem=True, ascii_folding=True)
|
||||
)
|
||||
# --8<-- [end:fts_config_folding_async]
|
||||
# --8<-- [start:fts_prefiltering_async]
|
||||
await (
|
||||
async_tbl.query()
|
||||
.nearest_to_text("puppy")
|
||||
.limit(10)
|
||||
.where("text='foo'")
|
||||
.to_list()
|
||||
)
|
||||
# --8<-- [end:fts_prefiltering_async]
|
||||
# --8<-- [start:fts_postfiltering_async]
|
||||
await (
|
||||
async_tbl.query()
|
||||
.nearest_to_text("puppy")
|
||||
.limit(10)
|
||||
.where("text='foo'")
|
||||
.postfilter()
|
||||
.to_list()
|
||||
)
|
||||
# --8<-- [end:fts_postfiltering_async]
|
||||
# --8<-- [start:fts_with_position_async]
|
||||
await async_tbl.create_index("text", config=FTS(with_position=True))
|
||||
# --8<-- [end:fts_with_position_async]
|
||||
# --8<-- [start:fts_incremental_index_async]
|
||||
await async_tbl.add([{"vector": [3.1, 4.1], "text": "Frodo was a happy puppy"}])
|
||||
await async_tbl.optimize()
|
||||
# --8<-- [end:fts_incremental_index_async]
|
||||
|
||||
|
||||
@pytest.mark.skip()
|
||||
def test_hybrid_search():
|
||||
# --8<-- [start:import-openai]
|
||||
import openai
|
||||
|
||||
# --8<-- [end:import-openai]
|
||||
# --8<-- [start:openai-embeddings]
|
||||
# Ingest embedding function in LanceDB table
|
||||
# Configuring the environment variable OPENAI_API_KEY
|
||||
if "OPENAI_API_KEY" not in os.environ:
|
||||
# OR set the key here as a variable
|
||||
openai.api_key = "sk-..."
|
||||
embeddings = get_registry().get("openai").create()
|
||||
|
||||
# --8<-- [end:openai-embeddings]
|
||||
# --8<-- [start:class-Documents]
|
||||
class Documents(LanceModel):
|
||||
vector: Vector(embeddings.ndims()) = embeddings.VectorField()
|
||||
text: str = embeddings.SourceField()
|
||||
|
||||
# --8<-- [end:class-Documents]
|
||||
# --8<-- [start:basic_hybrid_search]
|
||||
data = [
|
||||
{"text": "rebel spaceships striking from a hidden base"},
|
||||
{"text": "have won their first victory against the evil Galactic Empire"},
|
||||
{"text": "during the battle rebel spies managed to steal secret plans"},
|
||||
{"text": "to the Empire's ultimate weapon the Death Star"},
|
||||
]
|
||||
uri = "data/sample-lancedb"
|
||||
db = lancedb.connect(uri)
|
||||
table = db.create_table("documents", schema=Documents)
|
||||
# ingest docs with auto-vectorization
|
||||
table.add(data)
|
||||
# Create a fts index before the hybrid search
|
||||
table.create_fts_index("text")
|
||||
# hybrid search with default re-ranker
|
||||
table.search("flower moon", query_type="hybrid").to_pandas()
|
||||
# --8<-- [end:basic_hybrid_search]
|
||||
# --8<-- [start:hybrid_search_pass_vector_text]
|
||||
vector_query = [0.1, 0.2, 0.3, 0.4, 0.5]
|
||||
text_query = "flower moon"
|
||||
(
|
||||
table.search(query_type="hybrid")
|
||||
.vector(vector_query)
|
||||
.text(text_query)
|
||||
.limit(5)
|
||||
.to_pandas()
|
||||
)
|
||||
# --8<-- [end:hybrid_search_pass_vector_text]
|
||||
|
||||
|
||||
@pytest.mark.skip
|
||||
async def test_hybrid_search_async():
|
||||
import openai
|
||||
|
||||
# --8<-- [start:openai-embeddings]
|
||||
# Ingest embedding function in LanceDB table
|
||||
# Configuring the environment variable OPENAI_API_KEY
|
||||
if "OPENAI_API_KEY" not in os.environ:
|
||||
# OR set the key here as a variable
|
||||
openai.api_key = "sk-..."
|
||||
embeddings = get_registry().get("openai").create()
|
||||
|
||||
# --8<-- [end:openai-embeddings]
|
||||
# --8<-- [start:class-Documents]
|
||||
class Documents(LanceModel):
|
||||
vector: Vector(embeddings.ndims()) = embeddings.VectorField()
|
||||
text: str = embeddings.SourceField()
|
||||
|
||||
# --8<-- [end:class-Documents]
|
||||
# --8<-- [start:basic_hybrid_search_async]
|
||||
uri = "data/sample-lancedb"
|
||||
async_db = await lancedb.connect_async(uri)
|
||||
data = [
|
||||
{"text": "rebel spaceships striking from a hidden base"},
|
||||
{"text": "have won their first victory against the evil Galactic Empire"},
|
||||
{"text": "during the battle rebel spies managed to steal secret plans"},
|
||||
{"text": "to the Empire's ultimate weapon the Death Star"},
|
||||
]
|
||||
async_tbl = await async_db.create_table("documents_async", schema=Documents)
|
||||
# ingest docs with auto-vectorization
|
||||
await async_tbl.add(data)
|
||||
# Create a fts index before the hybrid search
|
||||
await async_tbl.create_index("text", config=FTS())
|
||||
text_query = "flower moon"
|
||||
vector_query = embeddings.compute_query_embeddings(text_query)[0]
|
||||
# hybrid search with default re-ranker
|
||||
await (
|
||||
async_tbl.query()
|
||||
.nearest_to(vector_query)
|
||||
.nearest_to_text(text_query)
|
||||
.to_pandas()
|
||||
)
|
||||
# --8<-- [end:basic_hybrid_search_async]
|
||||
# --8<-- [start:hybrid_search_pass_vector_text_async]
|
||||
vector_query = [0.1, 0.2, 0.3, 0.4, 0.5]
|
||||
text_query = "flower moon"
|
||||
await (
|
||||
async_tbl.query()
|
||||
.nearest_to(vector_query)
|
||||
.nearest_to_text(text_query)
|
||||
.limit(5)
|
||||
.to_pandas()
|
||||
)
|
||||
# --8<-- [end:hybrid_search_pass_vector_text_async]
|
||||
@@ -198,7 +198,6 @@ def test_embedding_function_with_pandas(tmp_path):
|
||||
{
|
||||
"text": ["hello world", "goodbye world"],
|
||||
"val": [1, 2],
|
||||
"not-used": ["s1", "s3"],
|
||||
}
|
||||
)
|
||||
db = lancedb.connect(tmp_path)
|
||||
@@ -212,7 +211,6 @@ def test_embedding_function_with_pandas(tmp_path):
|
||||
{
|
||||
"text": ["extra", "more"],
|
||||
"val": [4, 5],
|
||||
"misc-col": ["s1", "s3"],
|
||||
}
|
||||
)
|
||||
tbl.add(df)
|
||||
|
||||
@@ -6,14 +6,18 @@ from datetime import timedelta
|
||||
from pathlib import Path
|
||||
|
||||
import lancedb
|
||||
from lancedb.index import IvfPq
|
||||
from lancedb.index import IvfPq, FTS
|
||||
import numpy as np
|
||||
import pandas.testing as tm
|
||||
import pyarrow as pa
|
||||
import pytest
|
||||
import pytest_asyncio
|
||||
from lancedb.pydantic import LanceModel, Vector
|
||||
from lancedb.query import AsyncQueryBase, LanceVectorQueryBuilder, Query
|
||||
from lancedb.query import (
|
||||
AsyncQueryBase,
|
||||
LanceVectorQueryBuilder,
|
||||
Query,
|
||||
)
|
||||
from lancedb.table import AsyncTable, LanceTable
|
||||
|
||||
|
||||
@@ -47,11 +51,23 @@ async def table_async(tmp_path) -> AsyncTable:
|
||||
"id": pa.array([1, 2]),
|
||||
"str_field": pa.array(["a", "b"]),
|
||||
"float_field": pa.array([1.0, 2.0]),
|
||||
"text": pa.array(["a", "dog"]),
|
||||
}
|
||||
)
|
||||
return await conn.create_table("test", data)
|
||||
|
||||
|
||||
@pytest_asyncio.fixture
|
||||
async def table_struct_async(tmp_path) -> AsyncTable:
|
||||
conn = await lancedb.connect_async(
|
||||
tmp_path, read_consistency_interval=timedelta(seconds=0)
|
||||
)
|
||||
struct = pa.array([{"n_legs": 2, "animals": "Parrot"}, {"year": 2022, "n_legs": 4}])
|
||||
month = pa.array([4, 6])
|
||||
table = pa.Table.from_arrays([struct, month], names=["a", "month"])
|
||||
return await conn.create_table("test_struct", table)
|
||||
|
||||
|
||||
def test_cast(table):
|
||||
class TestModel(LanceModel):
|
||||
vector: Vector(2)
|
||||
@@ -212,15 +228,25 @@ def test_query_builder_with_filter(table):
|
||||
def test_query_builder_with_prefilter(table):
|
||||
df = (
|
||||
LanceVectorQueryBuilder(table, [0, 0], "vector")
|
||||
.where("id = 2")
|
||||
.where("id = 2", prefilter=True)
|
||||
.limit(1)
|
||||
.to_pandas()
|
||||
)
|
||||
assert df["id"].values[0] == 2
|
||||
assert all(df["vector"].values[0] == [3, 4])
|
||||
|
||||
df = (
|
||||
LanceVectorQueryBuilder(table, [0, 0], "vector")
|
||||
.where("id = 2", prefilter=False)
|
||||
.limit(1)
|
||||
.to_pandas()
|
||||
)
|
||||
assert len(df) == 0
|
||||
|
||||
# ensure the default prefilter = True
|
||||
df = (
|
||||
LanceVectorQueryBuilder(table, [0, 0], "vector")
|
||||
.where("id = 2", prefilter=True)
|
||||
.where("id = 2")
|
||||
.limit(1)
|
||||
.to_pandas()
|
||||
)
|
||||
@@ -270,6 +296,7 @@ def test_query_builder_with_different_vector_column():
|
||||
Query(
|
||||
vector=query,
|
||||
filter="b < 10",
|
||||
prefilter=True,
|
||||
k=2,
|
||||
metric="cosine",
|
||||
columns=["b"],
|
||||
@@ -303,7 +330,7 @@ async def test_query_async(table_async: AsyncTable):
|
||||
await check_query(
|
||||
table_async.query(),
|
||||
expected_num_rows=2,
|
||||
expected_columns=["vector", "id", "str_field", "float_field"],
|
||||
expected_columns=["vector", "id", "str_field", "float_field", "text"],
|
||||
)
|
||||
await check_query(table_async.query().where("id = 2"), expected_num_rows=1)
|
||||
await check_query(
|
||||
@@ -372,32 +399,60 @@ async def test_query_async(table_async: AsyncTable):
|
||||
expected_columns=["id", "vector", "_rowid"],
|
||||
)
|
||||
|
||||
# FTS with rerank
|
||||
await table_async.create_index("text", config=FTS(with_position=False))
|
||||
await check_query(
|
||||
table_async.query().nearest_to_text("dog").rerank(),
|
||||
expected_num_rows=1,
|
||||
)
|
||||
|
||||
# Vector query with rerank
|
||||
await check_query(table_async.vector_search([1, 2]).rerank(), expected_num_rows=2)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_query_to_arrow_async(table_async: AsyncTable):
|
||||
table = await table_async.to_arrow()
|
||||
assert table.num_rows == 2
|
||||
assert table.num_columns == 4
|
||||
assert table.num_columns == 5
|
||||
|
||||
table = await table_async.query().to_arrow()
|
||||
assert table.num_rows == 2
|
||||
assert table.num_columns == 4
|
||||
assert table.num_columns == 5
|
||||
|
||||
table = await table_async.query().where("id < 0").to_arrow()
|
||||
assert table.num_rows == 0
|
||||
assert table.num_columns == 4
|
||||
assert table.num_columns == 5
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_query_to_pandas_async(table_async: AsyncTable):
|
||||
df = await table_async.to_pandas()
|
||||
assert df.shape == (2, 4)
|
||||
assert df.shape == (2, 5)
|
||||
|
||||
df = await table_async.query().to_pandas()
|
||||
assert df.shape == (2, 4)
|
||||
assert df.shape == (2, 5)
|
||||
|
||||
df = await table_async.query().where("id < 0").to_pandas()
|
||||
assert df.shape == (0, 4)
|
||||
assert df.shape == (0, 5)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_query_to_pandas_flatten_async(table_struct_async: AsyncTable):
|
||||
df = await table_struct_async.query().to_pandas()
|
||||
assert df.shape == (2, 2)
|
||||
|
||||
df = await table_struct_async.query().to_pandas(flatten=True)
|
||||
assert df.shape == (2, 4)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_query_to_polars_async(table_async: AsyncTable):
|
||||
df = await table_async.query().to_polars()
|
||||
assert df.shape == (2, 5)
|
||||
|
||||
df = await table_async.query().where("id < 0").to_polars()
|
||||
assert df.shape == (0, 5)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
|
||||
@@ -242,8 +242,8 @@ def test_add_subschema(mem_db: DBConnection):
|
||||
|
||||
data = {"price": 10.0, "item": "foo"}
|
||||
table.add([data])
|
||||
data = {"price": 2.0, "vector": [3.1, 4.1]}
|
||||
table.add([data])
|
||||
data = pd.DataFrame({"price": [2.0], "vector": [[3.1, 4.1]]})
|
||||
table.add(data)
|
||||
data = {"price": 3.0, "vector": [5.9, 26.5], "item": "bar"}
|
||||
table.add([data])
|
||||
|
||||
@@ -259,7 +259,7 @@ def test_add_subschema(mem_db: DBConnection):
|
||||
|
||||
data = {"item": "foo"}
|
||||
# We can't omit a column if it's not nullable
|
||||
with pytest.raises(RuntimeError, match="Invalid user input"):
|
||||
with pytest.raises(RuntimeError, match="Append with different schema"):
|
||||
table.add([data])
|
||||
|
||||
# We can add it if we make the column nullable
|
||||
@@ -292,6 +292,7 @@ def test_add_nullability(mem_db: DBConnection):
|
||||
]
|
||||
)
|
||||
table = mem_db.create_table("test", schema=schema)
|
||||
assert table.schema.field("vector").nullable is False
|
||||
|
||||
nullable_schema = pa.schema(
|
||||
[
|
||||
@@ -320,7 +321,10 @@ def test_add_nullability(mem_db: DBConnection):
|
||||
schema=nullable_schema,
|
||||
)
|
||||
# We can't add nullable schema if it contains nulls
|
||||
with pytest.raises(Exception, match="Vector column vector has NaNs"):
|
||||
with pytest.raises(
|
||||
Exception,
|
||||
match="Casting field 'vector' with null values to non-nullable",
|
||||
):
|
||||
table.add(data)
|
||||
|
||||
# But we can make it nullable
|
||||
@@ -776,6 +780,38 @@ def test_merge_insert(mem_db: DBConnection):
|
||||
assert table.to_arrow().sort_by("a") == expected
|
||||
|
||||
|
||||
# We vary the data format because there are slight differences in how
|
||||
# subschemas are handled in different formats
|
||||
@pytest.mark.parametrize(
|
||||
"data_format",
|
||||
[
|
||||
lambda table: table,
|
||||
lambda table: table.to_pandas(),
|
||||
lambda table: table.to_pylist(),
|
||||
],
|
||||
ids=["pa.Table", "pd.DataFrame", "rows"],
|
||||
)
|
||||
def test_merge_insert_subschema(mem_db: DBConnection, data_format):
|
||||
initial_data = pa.table(
|
||||
{"id": range(3), "a": [1.0, 2.0, 3.0], "c": ["x", "x", "x"]}
|
||||
)
|
||||
table = mem_db.create_table("my_table", data=initial_data)
|
||||
|
||||
new_data = pa.table({"id": [2, 3], "c": ["y", "y"]})
|
||||
new_data = data_format(new_data)
|
||||
(
|
||||
table.merge_insert(on="id")
|
||||
.when_matched_update_all()
|
||||
.when_not_matched_insert_all()
|
||||
.execute(new_data)
|
||||
)
|
||||
|
||||
expected = pa.table(
|
||||
{"id": [0, 1, 2, 3], "a": [1.0, 2.0, 3.0, None], "c": ["x", "x", "y", "y"]}
|
||||
)
|
||||
assert table.to_arrow().sort_by("id") == expected
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_merge_insert_async(mem_db_async: AsyncConnection):
|
||||
data = pa.table({"a": [1, 2, 3], "b": ["a", "b", "c"]})
|
||||
|
||||
@@ -13,10 +13,27 @@
|
||||
|
||||
import os
|
||||
import pathlib
|
||||
from typing import Optional
|
||||
|
||||
import lance
|
||||
from lancedb.conftest import MockTextEmbeddingFunction
|
||||
from lancedb.embeddings.base import EmbeddingFunctionConfig
|
||||
from lancedb.embeddings.registry import EmbeddingFunctionRegistry
|
||||
from lancedb.table import (
|
||||
_append_vector_columns,
|
||||
_cast_to_target_schema,
|
||||
_handle_bad_vectors,
|
||||
_into_pyarrow_table,
|
||||
_sanitize_data,
|
||||
_infer_target_schema,
|
||||
)
|
||||
import pyarrow as pa
|
||||
import pandas as pd
|
||||
import polars as pl
|
||||
import pytest
|
||||
import lancedb
|
||||
from lancedb.util import get_uri_scheme, join_uri, value_to_sql
|
||||
from utils import exception_output
|
||||
|
||||
|
||||
def test_normalize_uri():
|
||||
@@ -111,3 +128,460 @@ def test_value_to_sql_string(tmp_path):
|
||||
for value in values:
|
||||
table.update(where=f"search = {value_to_sql(value)}", values={"replace": value})
|
||||
assert table.to_pandas().query("search == @value")["replace"].item() == value
|
||||
|
||||
|
||||
def test_append_vector_columns():
|
||||
registry = EmbeddingFunctionRegistry.get_instance()
|
||||
registry.register("test")(MockTextEmbeddingFunction)
|
||||
conf = EmbeddingFunctionConfig(
|
||||
source_column="text",
|
||||
vector_column="vector",
|
||||
function=MockTextEmbeddingFunction(),
|
||||
)
|
||||
metadata = registry.get_table_metadata([conf])
|
||||
|
||||
schema = pa.schema(
|
||||
{
|
||||
"text": pa.string(),
|
||||
"vector": pa.list_(pa.float64(), 10),
|
||||
}
|
||||
)
|
||||
data = pa.table(
|
||||
{
|
||||
"text": ["hello"],
|
||||
"vector": [None], # Replaces null
|
||||
},
|
||||
schema=schema,
|
||||
)
|
||||
output = _append_vector_columns(
|
||||
data,
|
||||
schema, # metadata passed separate from schema
|
||||
metadata=metadata,
|
||||
)
|
||||
assert output.schema == schema
|
||||
assert output["vector"].null_count == 0
|
||||
|
||||
# Adds if missing
|
||||
data = pa.table({"text": ["hello"]})
|
||||
output = _append_vector_columns(
|
||||
data,
|
||||
schema.with_metadata(metadata),
|
||||
)
|
||||
assert output.schema == schema
|
||||
assert output["vector"].null_count == 0
|
||||
|
||||
# doesn't embed if already there
|
||||
data = pa.table(
|
||||
{
|
||||
"text": ["hello"],
|
||||
"vector": [[42.0] * 10],
|
||||
},
|
||||
schema=schema,
|
||||
)
|
||||
output = _append_vector_columns(
|
||||
data,
|
||||
schema.with_metadata(metadata),
|
||||
)
|
||||
assert output == data # No change
|
||||
|
||||
# No provided schema
|
||||
data = pa.table(
|
||||
{
|
||||
"text": ["hello"],
|
||||
}
|
||||
)
|
||||
output = _append_vector_columns(
|
||||
data,
|
||||
metadata=metadata,
|
||||
)
|
||||
expected_schema = pa.schema(
|
||||
{
|
||||
"text": pa.string(),
|
||||
"vector": pa.list_(pa.float32(), 10),
|
||||
}
|
||||
)
|
||||
assert output.schema == expected_schema
|
||||
assert output["vector"].null_count == 0
|
||||
|
||||
|
||||
@pytest.mark.parametrize("on_bad_vectors", ["error", "drop", "fill", "null"])
|
||||
def test_handle_bad_vectors_jagged(on_bad_vectors):
|
||||
vector = pa.array([[1.0, 2.0], [3.0], [4.0, 5.0]])
|
||||
schema = pa.schema({"vector": pa.list_(pa.float64())})
|
||||
data = pa.table({"vector": vector}, schema=schema)
|
||||
|
||||
if on_bad_vectors == "error":
|
||||
with pytest.raises(ValueError) as e:
|
||||
output = _handle_bad_vectors(
|
||||
data,
|
||||
on_bad_vectors=on_bad_vectors,
|
||||
)
|
||||
output = exception_output(e)
|
||||
assert output == (
|
||||
"ValueError: Vector column 'vector' has variable length vectors. Set "
|
||||
"on_bad_vectors='drop' to remove them, set on_bad_vectors='fill' "
|
||||
"and fill_value=<value> to replace them, or set on_bad_vectors='null' "
|
||||
"to replace them with null."
|
||||
)
|
||||
return
|
||||
else:
|
||||
output = _handle_bad_vectors(
|
||||
data,
|
||||
on_bad_vectors=on_bad_vectors,
|
||||
fill_value=42.0,
|
||||
)
|
||||
|
||||
if on_bad_vectors == "drop":
|
||||
expected = pa.array([[1.0, 2.0], [4.0, 5.0]])
|
||||
elif on_bad_vectors == "fill":
|
||||
expected = pa.array([[1.0, 2.0], [42.0, 42.0], [4.0, 5.0]])
|
||||
elif on_bad_vectors == "null":
|
||||
expected = pa.array([[1.0, 2.0], None, [4.0, 5.0]])
|
||||
|
||||
assert output["vector"].combine_chunks() == expected
|
||||
|
||||
|
||||
@pytest.mark.parametrize("on_bad_vectors", ["error", "drop", "fill", "null"])
|
||||
def test_handle_bad_vectors_nan(on_bad_vectors):
|
||||
vector = pa.array([[1.0, float("nan")], [3.0, 4.0]])
|
||||
data = pa.table({"vector": vector})
|
||||
|
||||
if on_bad_vectors == "error":
|
||||
with pytest.raises(ValueError) as e:
|
||||
output = _handle_bad_vectors(
|
||||
data,
|
||||
on_bad_vectors=on_bad_vectors,
|
||||
)
|
||||
output = exception_output(e)
|
||||
assert output == (
|
||||
"ValueError: Vector column 'vector' has NaNs. Set "
|
||||
"on_bad_vectors='drop' to remove them, set on_bad_vectors='fill' "
|
||||
"and fill_value=<value> to replace them, or set on_bad_vectors='null' "
|
||||
"to replace them with null."
|
||||
)
|
||||
return
|
||||
else:
|
||||
output = _handle_bad_vectors(
|
||||
data,
|
||||
on_bad_vectors=on_bad_vectors,
|
||||
fill_value=42.0,
|
||||
)
|
||||
|
||||
if on_bad_vectors == "drop":
|
||||
expected = pa.array([[3.0, 4.0]])
|
||||
elif on_bad_vectors == "fill":
|
||||
expected = pa.array([[42.0, 42.0], [3.0, 4.0]])
|
||||
elif on_bad_vectors == "null":
|
||||
expected = pa.array([None, [3.0, 4.0]])
|
||||
|
||||
assert output["vector"].combine_chunks() == expected
|
||||
|
||||
|
||||
def test_handle_bad_vectors_noop():
|
||||
# ChunkedArray should be preserved as-is
|
||||
vector = pa.chunked_array(
|
||||
[[[1.0, 2.0], [3.0, 4.0]]], type=pa.list_(pa.float64(), 2)
|
||||
)
|
||||
data = pa.table({"vector": vector})
|
||||
output = _handle_bad_vectors(data)
|
||||
assert output["vector"] == vector
|
||||
|
||||
|
||||
class TestModel(lancedb.pydantic.LanceModel):
|
||||
a: Optional[int]
|
||||
b: Optional[int]
|
||||
|
||||
|
||||
# TODO: huggingface,
|
||||
@pytest.mark.parametrize(
|
||||
"data",
|
||||
[
|
||||
lambda: [{"a": 1, "b": 2}],
|
||||
lambda: pa.RecordBatch.from_pylist([{"a": 1, "b": 2}]),
|
||||
lambda: pa.table({"a": [1], "b": [2]}),
|
||||
lambda: pa.table({"a": [1], "b": [2]}).to_reader(),
|
||||
lambda: iter(pa.table({"a": [1], "b": [2]}).to_batches()),
|
||||
lambda: (
|
||||
lance.write_dataset(
|
||||
pa.table({"a": [1], "b": [2]}),
|
||||
"memory://test",
|
||||
)
|
||||
),
|
||||
lambda: (
|
||||
lance.write_dataset(
|
||||
pa.table({"a": [1], "b": [2]}),
|
||||
"memory://test",
|
||||
).scanner()
|
||||
),
|
||||
lambda: pd.DataFrame({"a": [1], "b": [2]}),
|
||||
lambda: pl.DataFrame({"a": [1], "b": [2]}),
|
||||
lambda: pl.LazyFrame({"a": [1], "b": [2]}),
|
||||
lambda: [TestModel(a=1, b=2)],
|
||||
],
|
||||
ids=[
|
||||
"rows",
|
||||
"pa.RecordBatch",
|
||||
"pa.Table",
|
||||
"pa.RecordBatchReader",
|
||||
"batch_iter",
|
||||
"lance.LanceDataset",
|
||||
"lance.LanceScanner",
|
||||
"pd.DataFrame",
|
||||
"pl.DataFrame",
|
||||
"pl.LazyFrame",
|
||||
"pydantic",
|
||||
],
|
||||
)
|
||||
def test_into_pyarrow_table(data):
|
||||
expected = pa.table({"a": [1], "b": [2]})
|
||||
output = _into_pyarrow_table(data())
|
||||
assert output == expected
|
||||
|
||||
|
||||
def test_infer_target_schema():
|
||||
example = pa.schema(
|
||||
{
|
||||
"vec1": pa.list_(pa.float64(), 2),
|
||||
"vector": pa.list_(pa.float64()),
|
||||
}
|
||||
)
|
||||
data = pa.table(
|
||||
{
|
||||
"vec1": [[0.0] * 2],
|
||||
"vector": [[0.0] * 2],
|
||||
},
|
||||
schema=example,
|
||||
)
|
||||
expected = pa.schema(
|
||||
{
|
||||
"vec1": pa.list_(pa.float64(), 2),
|
||||
"vector": pa.list_(pa.float32(), 2),
|
||||
}
|
||||
)
|
||||
output = _infer_target_schema(data)
|
||||
assert output == expected
|
||||
|
||||
# Handle large list and use modal size
|
||||
# Most vectors are of length 2, so we should infer that as the target dimension
|
||||
example = pa.schema(
|
||||
{
|
||||
"vector": pa.large_list(pa.float64()),
|
||||
}
|
||||
)
|
||||
data = pa.table(
|
||||
{
|
||||
"vector": [[0.0] * 2, [0.0], [0.0] * 2],
|
||||
},
|
||||
schema=example,
|
||||
)
|
||||
expected = pa.schema(
|
||||
{
|
||||
"vector": pa.list_(pa.float32(), 2),
|
||||
}
|
||||
)
|
||||
output = _infer_target_schema(data)
|
||||
assert output == expected
|
||||
|
||||
# ignore if not list
|
||||
example = pa.schema(
|
||||
{
|
||||
"vector": pa.float64(),
|
||||
}
|
||||
)
|
||||
data = pa.table(
|
||||
{
|
||||
"vector": [0.0],
|
||||
},
|
||||
schema=example,
|
||||
)
|
||||
expected = example
|
||||
output = _infer_target_schema(data)
|
||||
assert output == expected
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"data",
|
||||
[
|
||||
[{"id": 1, "text": "hello"}],
|
||||
pa.RecordBatch.from_pylist([{"id": 1, "text": "hello"}]),
|
||||
pd.DataFrame({"id": [1], "text": ["hello"]}),
|
||||
pl.DataFrame({"id": [1], "text": ["hello"]}),
|
||||
],
|
||||
ids=["rows", "pa.RecordBatch", "pd.DataFrame", "pl.DataFrame"],
|
||||
)
|
||||
@pytest.mark.parametrize(
|
||||
"schema",
|
||||
[
|
||||
None,
|
||||
pa.schema(
|
||||
{
|
||||
"id": pa.int32(),
|
||||
"text": pa.string(),
|
||||
"vector": pa.list_(pa.float32(), 10),
|
||||
}
|
||||
),
|
||||
pa.schema(
|
||||
{
|
||||
"id": pa.int64(),
|
||||
"text": pa.string(),
|
||||
"vector": pa.list_(pa.float32(), 10),
|
||||
"extra": pa.int64(),
|
||||
}
|
||||
),
|
||||
],
|
||||
ids=["infer", "explicit", "subschema"],
|
||||
)
|
||||
@pytest.mark.parametrize("with_embedding", [True, False])
|
||||
def test_sanitize_data(
|
||||
data,
|
||||
schema: Optional[pa.Schema],
|
||||
with_embedding: bool,
|
||||
):
|
||||
if with_embedding:
|
||||
registry = EmbeddingFunctionRegistry.get_instance()
|
||||
registry.register("test")(MockTextEmbeddingFunction)
|
||||
conf = EmbeddingFunctionConfig(
|
||||
source_column="text",
|
||||
vector_column="vector",
|
||||
function=MockTextEmbeddingFunction(),
|
||||
)
|
||||
metadata = registry.get_table_metadata([conf])
|
||||
else:
|
||||
metadata = None
|
||||
|
||||
if schema is not None:
|
||||
to_remove = schema.get_field_index("extra")
|
||||
if to_remove >= 0:
|
||||
expected_schema = schema.remove(to_remove)
|
||||
else:
|
||||
expected_schema = schema
|
||||
else:
|
||||
expected_schema = pa.schema(
|
||||
{
|
||||
"id": pa.int64(),
|
||||
"text": pa.large_utf8()
|
||||
if isinstance(data, pl.DataFrame)
|
||||
else pa.string(),
|
||||
"vector": pa.list_(pa.float32(), 10),
|
||||
}
|
||||
)
|
||||
|
||||
if not with_embedding:
|
||||
to_remove = expected_schema.get_field_index("vector")
|
||||
if to_remove >= 0:
|
||||
expected_schema = expected_schema.remove(to_remove)
|
||||
|
||||
expected = pa.table(
|
||||
{
|
||||
"id": [1],
|
||||
"text": ["hello"],
|
||||
"vector": [[0.0] * 10],
|
||||
},
|
||||
schema=expected_schema,
|
||||
)
|
||||
|
||||
output_data = _sanitize_data(
|
||||
data,
|
||||
target_schema=schema,
|
||||
metadata=metadata,
|
||||
allow_subschema=True,
|
||||
)
|
||||
|
||||
assert output_data == expected
|
||||
|
||||
|
||||
def test_cast_to_target_schema():
|
||||
original_schema = pa.schema(
|
||||
{
|
||||
"id": pa.int32(),
|
||||
"struct": pa.struct(
|
||||
[
|
||||
pa.field("a", pa.int32()),
|
||||
]
|
||||
),
|
||||
"vector": pa.list_(pa.float64()),
|
||||
"vec1": pa.list_(pa.float64(), 2),
|
||||
"vec2": pa.list_(pa.float32(), 2),
|
||||
}
|
||||
)
|
||||
data = pa.table(
|
||||
{
|
||||
"id": [1],
|
||||
"struct": [{"a": 1}],
|
||||
"vector": [[0.0] * 2],
|
||||
"vec1": [[0.0] * 2],
|
||||
"vec2": [[0.0] * 2],
|
||||
},
|
||||
schema=original_schema,
|
||||
)
|
||||
|
||||
target = pa.schema(
|
||||
{
|
||||
"id": pa.int64(),
|
||||
"struct": pa.struct(
|
||||
[
|
||||
pa.field("a", pa.int64()),
|
||||
]
|
||||
),
|
||||
"vector": pa.list_(pa.float32(), 2),
|
||||
"vec1": pa.list_(pa.float32(), 2),
|
||||
"vec2": pa.list_(pa.float32(), 2),
|
||||
}
|
||||
)
|
||||
output = _cast_to_target_schema(data, target)
|
||||
expected = pa.table(
|
||||
{
|
||||
"id": [1],
|
||||
"struct": [{"a": 1}],
|
||||
"vector": [[0.0] * 2],
|
||||
"vec1": [[0.0] * 2],
|
||||
"vec2": [[0.0] * 2],
|
||||
},
|
||||
schema=target,
|
||||
)
|
||||
|
||||
# Data can be a subschema of the target
|
||||
target = pa.schema(
|
||||
{
|
||||
"id": pa.int64(),
|
||||
"struct": pa.struct(
|
||||
[
|
||||
pa.field("a", pa.int64()),
|
||||
# Additional nested field
|
||||
pa.field("b", pa.int64()),
|
||||
]
|
||||
),
|
||||
"vector": pa.list_(pa.float32(), 2),
|
||||
"vec1": pa.list_(pa.float32(), 2),
|
||||
"vec2": pa.list_(pa.float32(), 2),
|
||||
# Additional field
|
||||
"extra": pa.int64(),
|
||||
}
|
||||
)
|
||||
with pytest.raises(Exception):
|
||||
_cast_to_target_schema(data, target)
|
||||
output = _cast_to_target_schema(data, target, allow_subschema=True)
|
||||
expected_schema = pa.schema(
|
||||
{
|
||||
"id": pa.int64(),
|
||||
"struct": pa.struct(
|
||||
[
|
||||
pa.field("a", pa.int64()),
|
||||
]
|
||||
),
|
||||
"vector": pa.list_(pa.float32(), 2),
|
||||
"vec1": pa.list_(pa.float32(), 2),
|
||||
"vec2": pa.list_(pa.float32(), 2),
|
||||
}
|
||||
)
|
||||
expected = pa.table(
|
||||
{
|
||||
"id": [1],
|
||||
"struct": [{"a": 1}],
|
||||
"vector": [[0.0] * 2],
|
||||
"vec1": [[0.0] * 2],
|
||||
"vec2": [[0.0] * 2],
|
||||
},
|
||||
schema=expected_schema,
|
||||
)
|
||||
assert output == expected
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "lancedb-node"
|
||||
version = "0.14.1"
|
||||
version = "0.14.2-beta.0"
|
||||
description = "Serverless, low-latency vector database for AI applications"
|
||||
license.workspace = true
|
||||
edition.workspace = true
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "lancedb"
|
||||
version = "0.14.1"
|
||||
version = "0.14.2-beta.0"
|
||||
edition.workspace = true
|
||||
description = "LanceDB: A serverless, low-latency vector database for AI applications"
|
||||
license.workspace = true
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
//! LanceDB Table APIs
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -48,6 +49,7 @@ use lance_index::vector::pq::PQBuildParams;
|
||||
use lance_index::vector::sq::builder::SQBuildParams;
|
||||
use lance_index::DatasetIndexExt;
|
||||
use lance_index::IndexType;
|
||||
use lance_table::format::Manifest;
|
||||
use lance_table::io::commit::ManifestNamingScheme;
|
||||
use log::info;
|
||||
use serde::{Deserialize, Serialize};
|
||||
@@ -1697,6 +1699,49 @@ impl NativeTable {
|
||||
dataset.migrate_manifest_paths_v2().await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get the table manifest
|
||||
pub async fn manifest(&self) -> Result<Manifest> {
|
||||
let dataset = self.dataset.get().await?;
|
||||
Ok(dataset.manifest().clone())
|
||||
}
|
||||
|
||||
/// Update key-value pairs in config.
|
||||
pub async fn update_config(
|
||||
&self,
|
||||
upsert_values: impl IntoIterator<Item = (String, String)>,
|
||||
) -> Result<()> {
|
||||
let mut dataset = self.dataset.get_mut().await?;
|
||||
dataset.update_config(upsert_values).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Delete keys from the config
|
||||
pub async fn delete_config_keys(&self, delete_keys: &[&str]) -> Result<()> {
|
||||
let mut dataset = self.dataset.get_mut().await?;
|
||||
dataset.delete_config_keys(delete_keys).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Update schema metadata
|
||||
pub async fn replace_schema_metadata(
|
||||
&self,
|
||||
upsert_values: impl IntoIterator<Item = (String, String)>,
|
||||
) -> Result<()> {
|
||||
let mut dataset = self.dataset.get_mut().await?;
|
||||
dataset.replace_schema_metadata(upsert_values).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Update field metadata
|
||||
pub async fn replace_field_metadata(
|
||||
&self,
|
||||
new_values: impl IntoIterator<Item = (u32, HashMap<String, String>)>,
|
||||
) -> Result<()> {
|
||||
let mut dataset = self.dataset.get_mut().await?;
|
||||
dataset.replace_field_metadata(new_values).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
@@ -3308,4 +3353,197 @@ mod tests {
|
||||
table.checkout(version).await.unwrap();
|
||||
assert!(table.add(some_sample_data()).execute().await.is_err())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_update_dataset_config() {
|
||||
let tmp_dir = tempdir().unwrap();
|
||||
let uri = tmp_dir.path().to_str().unwrap();
|
||||
|
||||
let conn = ConnectBuilder::new(uri)
|
||||
.read_consistency_interval(Duration::from_secs(0))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let table = conn
|
||||
.create_table("my_table", some_sample_data())
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
let native_tbl = table.as_native().unwrap();
|
||||
|
||||
let manifest = native_tbl.manifest().await.unwrap();
|
||||
assert_eq!(manifest.config.len(), 0);
|
||||
|
||||
native_tbl
|
||||
.update_config(vec![("test_key1".to_string(), "test_val1".to_string())])
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let manifest = native_tbl.manifest().await.unwrap();
|
||||
assert_eq!(manifest.config.len(), 1);
|
||||
assert_eq!(
|
||||
manifest.config.get("test_key1"),
|
||||
Some(&"test_val1".to_string())
|
||||
);
|
||||
|
||||
native_tbl
|
||||
.update_config(vec![("test_key2".to_string(), "test_val2".to_string())])
|
||||
.await
|
||||
.unwrap();
|
||||
let manifest = native_tbl.manifest().await.unwrap();
|
||||
assert_eq!(manifest.config.len(), 2);
|
||||
assert_eq!(
|
||||
manifest.config.get("test_key1"),
|
||||
Some(&"test_val1".to_string())
|
||||
);
|
||||
assert_eq!(
|
||||
manifest.config.get("test_key2"),
|
||||
Some(&"test_val2".to_string())
|
||||
);
|
||||
|
||||
native_tbl
|
||||
.update_config(vec![(
|
||||
"test_key2".to_string(),
|
||||
"test_val2_update".to_string(),
|
||||
)])
|
||||
.await
|
||||
.unwrap();
|
||||
let manifest = native_tbl.manifest().await.unwrap();
|
||||
assert_eq!(manifest.config.len(), 2);
|
||||
assert_eq!(
|
||||
manifest.config.get("test_key1"),
|
||||
Some(&"test_val1".to_string())
|
||||
);
|
||||
assert_eq!(
|
||||
manifest.config.get("test_key2"),
|
||||
Some(&"test_val2_update".to_string())
|
||||
);
|
||||
|
||||
native_tbl.delete_config_keys(&["test_key1"]).await.unwrap();
|
||||
let manifest = native_tbl.manifest().await.unwrap();
|
||||
assert_eq!(manifest.config.len(), 1);
|
||||
assert_eq!(
|
||||
manifest.config.get("test_key2"),
|
||||
Some(&"test_val2_update".to_string())
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_schema_metadata_config() {
|
||||
let tmp_dir = tempdir().unwrap();
|
||||
let uri = tmp_dir.path().to_str().unwrap();
|
||||
|
||||
let conn = ConnectBuilder::new(uri)
|
||||
.read_consistency_interval(Duration::from_secs(0))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
let table = conn
|
||||
.create_table("my_table", some_sample_data())
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let native_tbl = table.as_native().unwrap();
|
||||
let schema = native_tbl.schema().await.unwrap();
|
||||
let metadata = schema.metadata();
|
||||
assert_eq!(metadata.len(), 0);
|
||||
|
||||
native_tbl
|
||||
.replace_schema_metadata(vec![("test_key1".to_string(), "test_val1".to_string())])
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let schema = native_tbl.schema().await.unwrap();
|
||||
let metadata = schema.metadata();
|
||||
assert_eq!(metadata.len(), 1);
|
||||
assert_eq!(metadata.get("test_key1"), Some(&"test_val1".to_string()));
|
||||
|
||||
native_tbl
|
||||
.replace_schema_metadata(vec![
|
||||
("test_key1".to_string(), "test_val1_update".to_string()),
|
||||
("test_key2".to_string(), "test_val2".to_string()),
|
||||
])
|
||||
.await
|
||||
.unwrap();
|
||||
let schema = native_tbl.schema().await.unwrap();
|
||||
let metadata = schema.metadata();
|
||||
assert_eq!(metadata.len(), 2);
|
||||
assert_eq!(
|
||||
metadata.get("test_key1"),
|
||||
Some(&"test_val1_update".to_string())
|
||||
);
|
||||
assert_eq!(metadata.get("test_key2"), Some(&"test_val2".to_string()));
|
||||
|
||||
native_tbl
|
||||
.replace_schema_metadata(vec![(
|
||||
"test_key2".to_string(),
|
||||
"test_val2_update".to_string(),
|
||||
)])
|
||||
.await
|
||||
.unwrap();
|
||||
let schema = native_tbl.schema().await.unwrap();
|
||||
let metadata = schema.metadata();
|
||||
assert_eq!(
|
||||
metadata.get("test_key2"),
|
||||
Some(&"test_val2_update".to_string())
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
pub async fn test_field_metadata_update() {
|
||||
let tmp_dir = tempdir().unwrap();
|
||||
let uri = tmp_dir.path().to_str().unwrap();
|
||||
|
||||
let conn = ConnectBuilder::new(uri)
|
||||
.read_consistency_interval(Duration::from_secs(0))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
let table = conn
|
||||
.create_table("my_table", some_sample_data())
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let native_tbl = table.as_native().unwrap();
|
||||
let schema = native_tbl.schema().await.unwrap();
|
||||
|
||||
let (field_idx, field) = schema.column_with_name("i").unwrap();
|
||||
let field_metadata = field.metadata();
|
||||
assert_eq!(field_metadata.len(), 0);
|
||||
|
||||
native_tbl
|
||||
.replace_schema_metadata(vec![(
|
||||
"test_key2".to_string(),
|
||||
"test_val2_update".to_string(),
|
||||
)])
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let schema = native_tbl.schema().await.unwrap();
|
||||
let metadata = schema.metadata();
|
||||
assert_eq!(metadata.len(), 1);
|
||||
assert_eq!(
|
||||
metadata.get("test_key2"),
|
||||
Some(&"test_val2_update".to_string())
|
||||
);
|
||||
|
||||
let mut new_field_metadata = HashMap::<String, String>::new();
|
||||
new_field_metadata.insert("test_field_key1".into(), "test_field_val1".into());
|
||||
native_tbl
|
||||
.replace_field_metadata(vec![(field_idx as u32, new_field_metadata)])
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let schema = native_tbl.schema().await.unwrap();
|
||||
let (_field_idx, field) = schema.column_with_name("i").unwrap();
|
||||
let field_metadata = field.metadata();
|
||||
assert_eq!(field_metadata.len(), 1);
|
||||
assert_eq!(
|
||||
field_metadata.get("test_field_key1"),
|
||||
Some(&"test_field_val1".to_string())
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user