mirror of
https://github.com/lancedb/lancedb.git
synced 2026-01-04 19:02:58 +00:00
Compare commits
16 Commits
v0.1.2-dev
...
v0.1.2-dev
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
be910485e7 | ||
|
|
0028b95fd8 | ||
|
|
102f1d7404 | ||
|
|
500aa7b002 | ||
|
|
8aa0f6b4ba | ||
|
|
140aa32e08 | ||
|
|
a067c3dc85 | ||
|
|
e762a4db4b | ||
|
|
5e0ff01879 | ||
|
|
84356220dd | ||
|
|
6c03662c68 | ||
|
|
5e098f4fe5 | ||
|
|
f485378ea4 | ||
|
|
f923cfe47f | ||
|
|
06cb7b6458 | ||
|
|
bdef634954 |
70
.github/workflows/make_release_commit.yml
vendored
Normal file
70
.github/workflows/make_release_commit.yml
vendored
Normal file
@@ -0,0 +1,70 @@
|
|||||||
|
name: Create release commit
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
dry_run:
|
||||||
|
description: 'Just create the local commit/tags but do not push it'
|
||||||
|
required: true
|
||||||
|
default: "false"
|
||||||
|
type: choice
|
||||||
|
options:
|
||||||
|
- "true"
|
||||||
|
- "false"
|
||||||
|
part:
|
||||||
|
description: 'What kind of release is this?'
|
||||||
|
required: true
|
||||||
|
default: 'patch'
|
||||||
|
type: choice
|
||||||
|
options:
|
||||||
|
- patch
|
||||||
|
- minor
|
||||||
|
- major
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
bump-version:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Check out main
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
ref: main
|
||||||
|
persist-credentials: false
|
||||||
|
fetch-depth: 0
|
||||||
|
lfs: true
|
||||||
|
- name: Install cargo utils
|
||||||
|
run: cargo install cargo-bump cargo-get
|
||||||
|
- name: Bump vectordb
|
||||||
|
working-directory: rust/vectordb
|
||||||
|
run: |
|
||||||
|
cargo bump ${{ inputs.part }}
|
||||||
|
echo "CRATE_VERSION=$(cargo get version)" >> $GITHUB_ENV
|
||||||
|
- name: Bump rust/ffi/node
|
||||||
|
working-directory: rust/ffi/node
|
||||||
|
run: |
|
||||||
|
cargo bump ${{ inputs.part }}
|
||||||
|
echo "FFI_CRATE_VERSION=$(cargo get version)" >> $GITHUB_ENV
|
||||||
|
- name: Bump node
|
||||||
|
working-directory: node
|
||||||
|
run: |
|
||||||
|
npm version ${{ inputs.part }}
|
||||||
|
echo "NPM_PACKAGE_VERSION=$(cat package.json | jq -r '.version')" >> $GITHUB_ENV
|
||||||
|
- name: Create tag
|
||||||
|
run: |
|
||||||
|
if [ "$CRATE_VERSION" != "$FFI_CRATE_VERSION" ]; then
|
||||||
|
echo "Version mismatch between rust/vectordb and rust/ffi/node"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [ "$CRATE_VERSION" != "$NPM_PACKAGE_VERSION" ]; then
|
||||||
|
echo "Version mismatch between rust/vectordb and node"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
export TAG="v$CRATE_VERSION'"
|
||||||
|
git tag $TAG
|
||||||
|
- name: Push new version and tag
|
||||||
|
if: ${{ inputs.dry_run }} == "false"
|
||||||
|
uses: ad-m/github-push-action@master
|
||||||
|
with:
|
||||||
|
github_token: ${{ secrets.RELEASE_TOKEN }}
|
||||||
|
branch: main
|
||||||
|
tags: true
|
||||||
6
.github/workflows/python.yml
vendored
6
.github/workflows/python.yml
vendored
@@ -30,7 +30,7 @@ jobs:
|
|||||||
python-version: 3.${{ matrix.python-minor-version }}
|
python-version: 3.${{ matrix.python-minor-version }}
|
||||||
- name: Install lancedb
|
- name: Install lancedb
|
||||||
run: |
|
run: |
|
||||||
pip install -e .
|
pip install -e ".[fts]"
|
||||||
pip install pytest
|
pip install pytest
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
run: pytest -x -v --durations=30 tests
|
run: pytest -x -v --durations=30 tests
|
||||||
@@ -49,10 +49,10 @@ jobs:
|
|||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
uses: actions/setup-python@v4
|
uses: actions/setup-python@v4
|
||||||
with:
|
with:
|
||||||
python-version: "3.10"
|
python-version: "3.11"
|
||||||
- name: Install lancedb
|
- name: Install lancedb
|
||||||
run: |
|
run: |
|
||||||
pip install -e .
|
pip install -e ".[fts]"
|
||||||
pip install pytest
|
pip install pytest
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
run: pytest -x -v --durations=30 tests
|
run: pytest -x -v --durations=30 tests
|
||||||
87
.github/workflows/release.yml
vendored
87
.github/workflows/release.yml
vendored
@@ -1,6 +1,7 @@
|
|||||||
name: Prepare Release
|
name: Prepare Release
|
||||||
|
|
||||||
# Based on https://github.com/dherman/neon-prebuild-example/blob/eaa4d33d682e5eb7abbc3da7aed153a1b1acb1b3/.github/workflows/publish.yml
|
# TODO: bump versions in CI
|
||||||
|
# NOTE: Python is a separate release for now.
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
@@ -38,34 +39,8 @@ jobs:
|
|||||||
- uses: softprops/action-gh-release@v1
|
- uses: softprops/action-gh-release@v1
|
||||||
with:
|
with:
|
||||||
draft: true
|
draft: true
|
||||||
files: target/vectordb-*.crate
|
files: target/package/vectordb-*.crate
|
||||||
|
fail_on_unmatched_files: true
|
||||||
python:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
needs: draft-release
|
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
shell: bash
|
|
||||||
working-directory: python
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
lfs: true
|
|
||||||
- name: Set up Python
|
|
||||||
uses: actions/setup-python@v4
|
|
||||||
with:
|
|
||||||
python-version: "3.10"
|
|
||||||
- name: Build wheel
|
|
||||||
run: |
|
|
||||||
pip install wheel
|
|
||||||
python setup.py sdist bdist_wheel
|
|
||||||
- uses: softprops/action-gh-release@v1
|
|
||||||
with:
|
|
||||||
draft: true
|
|
||||||
files: |
|
|
||||||
python/dist/lancedb-*.tar.gz
|
|
||||||
python/dist/lancedb-*.whl
|
|
||||||
|
|
||||||
node:
|
node:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
@@ -95,6 +70,7 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
draft: true
|
draft: true
|
||||||
files: node/vectordb-*.tgz
|
files: node/vectordb-*.tgz
|
||||||
|
fail_on_unmatched_files: true
|
||||||
|
|
||||||
node-macos:
|
node-macos:
|
||||||
runs-on: macos-12
|
runs-on: macos-12
|
||||||
@@ -112,49 +88,66 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
cd node
|
cd node
|
||||||
npm ci
|
npm ci
|
||||||
|
- name: Install rustup target
|
||||||
|
if: ${{ matrix.target == 'aarch64-apple-darwin' }}
|
||||||
|
run: rustup target add aarch64-apple-darwin
|
||||||
- name: Build MacOS native node modules
|
- name: Build MacOS native node modules
|
||||||
run: bash ci/build_macos_artifacts.sh ${{ matrix.target }}
|
run: bash ci/build_macos_artifacts.sh ${{ matrix.target }}
|
||||||
- uses: softprops/action-gh-release@v1
|
- uses: softprops/action-gh-release@v1
|
||||||
with:
|
with:
|
||||||
draft: true
|
draft: true
|
||||||
files: node/dist/vectordb-darwin*.tgz
|
files: node/dist/vectordb-darwin*.tgz
|
||||||
|
fail_on_unmatched_files: true
|
||||||
|
|
||||||
node-linux:
|
node-linux:
|
||||||
|
name: node-linux (${{ matrix.arch}}-unknown-linux-${{ matrix.libc }})
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: draft-release
|
needs: draft-release
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
target:
|
libc:
|
||||||
- x86_64-unknown-linux-gnu
|
- gnu
|
||||||
- aarch64-unknown-linux-gnu
|
# TODO: re-enable musl once we have refactored to pre-built containers
|
||||||
- aarch64-unknown-linux-musl
|
# Right now we have to build node from source which is too expensive.
|
||||||
- x86_64-unknown-linux-musl
|
# - musl
|
||||||
|
arch:
|
||||||
|
- x86_64
|
||||||
|
- aarch64
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- name: Setup Rust
|
- name: Change owner to root (for npm)
|
||||||
uses: ATiltedTree/setup-rust@v1
|
# The docker container is run as root, so we need the files to be owned by root
|
||||||
|
# Otherwise npm is a nightmare: https://github.com/npm/cli/issues/3773
|
||||||
|
run: sudo chown -R root:root .
|
||||||
|
- name: Set up QEMU
|
||||||
|
if: ${{ matrix.arch == 'aarch64' }}
|
||||||
|
uses: docker/setup-qemu-action@v2
|
||||||
with:
|
with:
|
||||||
rust-version: stable
|
platforms: arm64
|
||||||
- name: Install system dependencies
|
- name: Build Linux GNU native node modules
|
||||||
|
if: ${{ matrix.libc == 'gnu' }}
|
||||||
run: |
|
run: |
|
||||||
sudo apt update
|
docker run \
|
||||||
sudo apt install -y protobuf-compiler libssl-dev
|
-v $(pwd):/io -w /io \
|
||||||
cargo install cross
|
quay.io/pypa/manylinux2014_${{ matrix.arch }} \
|
||||||
- name: Install npm dependencies
|
bash ci/build_linux_artifacts.sh ${{ matrix.arch }}-unknown-linux-gnu
|
||||||
|
- name: Build musl Linux native node modules
|
||||||
|
if: ${{ matrix.libc == 'musl' }}
|
||||||
run: |
|
run: |
|
||||||
cd node
|
docker run --platform linux/arm64/v8 \
|
||||||
npm ci
|
-v $(pwd):/io -w /io \
|
||||||
- name: Build Linux native node modules
|
quay.io/pypa/musllinux_1_1_${{ matrix.arch }} \
|
||||||
run: bash ci/build_linux_artifacts.sh ${{ matrix.target }}
|
bash ci/build_linux_artifacts.sh ${{ matrix.arch }}-unknown-linux-musl
|
||||||
- uses: softprops/action-gh-release@v1
|
- uses: softprops/action-gh-release@v1
|
||||||
with:
|
with:
|
||||||
draft: true
|
draft: true
|
||||||
files: node/dist/vectordb-linux*.tgz
|
files: node/dist/vectordb-linux*.tgz
|
||||||
|
fail_on_unmatched_files: true
|
||||||
|
|
||||||
release:
|
release:
|
||||||
needs: [python, node, node-macos, node-linux, rust]
|
needs: [rust, node, node-macos, node-linux]
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/download-artifact@v3
|
- uses: actions/download-artifact@v3
|
||||||
|
|||||||
2
Cargo.lock
generated
2
Cargo.lock
generated
@@ -3359,8 +3359,10 @@ name = "vectordb"
|
|||||||
version = "0.1.2"
|
version = "0.1.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arrow-array",
|
"arrow-array",
|
||||||
|
"arrow-data",
|
||||||
"arrow-schema",
|
"arrow-schema",
|
||||||
"lance",
|
"lance",
|
||||||
|
"rand",
|
||||||
"tempfile",
|
"tempfile",
|
||||||
"tokio",
|
"tokio",
|
||||||
]
|
]
|
||||||
|
|||||||
14
Cross.toml
14
Cross.toml
@@ -1,14 +0,0 @@
|
|||||||
# These make sure our builds are compatible with old glibc versions.
|
|
||||||
[target.x86_64-unknown-linux-gnu]
|
|
||||||
pre-build = [
|
|
||||||
"dpkg --add-architecture $CROSS_DEB_ARCH",
|
|
||||||
"apt-get update && apt-get install --assume-yes libssl-dev:$CROSS_DEB_ARCH protobuf-compiler",
|
|
||||||
]
|
|
||||||
image = "ghcr.io/cross-rs/x86_64-unknown-linux-gnu:main-centos"
|
|
||||||
|
|
||||||
[target.aarch64-unknown-linux-gnu]
|
|
||||||
pre-build = [
|
|
||||||
"dpkg --add-architecture $CROSS_DEB_ARCH",
|
|
||||||
"apt-get update && apt-get install --assume-yes libssl-dev:$CROSS_DEB_ARCH protobuf-compiler",
|
|
||||||
]
|
|
||||||
image = "ghcr.io/cross-rs/aarch64-unknown-linux-gnu:main-centos"
|
|
||||||
@@ -1,3 +1,4 @@
|
|||||||
|
#!/bin/bash
|
||||||
# Builds the Linux artifacts (node binaries).
|
# Builds the Linux artifacts (node binaries).
|
||||||
# Usage: ./build_linux_artifacts.sh [target]
|
# Usage: ./build_linux_artifacts.sh [target]
|
||||||
# Targets supported:
|
# Targets supported:
|
||||||
@@ -6,31 +7,80 @@
|
|||||||
# - aarch64-unknown-linux-musl
|
# - aarch64-unknown-linux-musl
|
||||||
# - x86_64-unknown-linux-musl
|
# - x86_64-unknown-linux-musl
|
||||||
|
|
||||||
# On MacOS, need to run in a linux container:
|
# TODO: refactor this into a Docker container we can pull
|
||||||
# docker run -v $(pwd):/io -w /io
|
|
||||||
|
|
||||||
# Must run rustup toolchain install stable-x86_64-unknown-linux-gnu --force-non-host
|
|
||||||
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
build_node_binaries() {
|
setup_dependencies() {
|
||||||
pushd node
|
echo "Installing system dependencies..."
|
||||||
|
if [[ $1 == *musl ]]; then
|
||||||
|
# musllinux
|
||||||
|
apk add openssl-dev
|
||||||
|
else
|
||||||
|
# manylinux2014
|
||||||
|
yum install -y openssl-devel unzip
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ $1 == x86_64* ]]; then
|
||||||
|
ARCH=x86_64
|
||||||
|
else
|
||||||
|
# gnu target
|
||||||
|
ARCH=aarch_64
|
||||||
|
fi
|
||||||
|
|
||||||
for target in $1
|
# Install new enough protobuf (yum-provided is old)
|
||||||
do
|
PB_REL=https://github.com/protocolbuffers/protobuf/releases
|
||||||
echo "Building node library for $target"
|
PB_VERSION=23.1
|
||||||
# cross doesn't yet pass this down to Docker, so we do it ourselves.
|
curl -LO $PB_REL/download/v$PB_VERSION/protoc-$PB_VERSION-linux-$ARCH.zip
|
||||||
export CROSS_CONTAINER_OPTS="--platform linux/amd64"
|
unzip protoc-$PB_VERSION-linux-$ARCH.zip -d /usr/local
|
||||||
npm run cross-release -- --target $target
|
}
|
||||||
npm run pack-build -- --target $target
|
|
||||||
done
|
install_node() {
|
||||||
|
echo "Installing node..."
|
||||||
|
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.34.0/install.sh | bash
|
||||||
|
source "$HOME"/.bashrc
|
||||||
|
|
||||||
|
if [[ $1 == *musl ]]; then
|
||||||
|
# This node version is 15, we need 16 or higher:
|
||||||
|
# apk add nodejs-current npm
|
||||||
|
# So instead we install from source (nvm doesn't provide binaries for musl):
|
||||||
|
nvm install -s --no-progress 17
|
||||||
|
else
|
||||||
|
nvm install --no-progress 17 # latest that supports glibc 2.17
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
install_rust() {
|
||||||
|
echo "Installing rust..."
|
||||||
|
curl https://sh.rustup.rs -sSf | bash -s -- -y
|
||||||
|
export PATH="$PATH:/root/.cargo/bin"
|
||||||
|
}
|
||||||
|
|
||||||
|
build_node_binary() {
|
||||||
|
echo "Building node library for $1..."
|
||||||
|
pushd node
|
||||||
|
|
||||||
|
npm ci
|
||||||
|
|
||||||
|
if [[ $1 == *musl ]]; then
|
||||||
|
# This is needed for cargo to allow build cdylibs with musl
|
||||||
|
export RUSTFLAGS="-C target-feature=-crt-static"
|
||||||
|
fi
|
||||||
|
# We don't pass in target, since the native target here already matches
|
||||||
|
# and openblas-src doesn't do well with cross-compilation.
|
||||||
|
npm run build-release
|
||||||
|
npm run pack-build
|
||||||
|
|
||||||
popd
|
popd
|
||||||
}
|
}
|
||||||
|
|
||||||
if [ -n "$1" ]; then
|
TARGET=${1:-x86_64-unknown-linux-gnu}
|
||||||
targets=$1
|
# Others:
|
||||||
else
|
# aarch64-unknown-linux-gnu
|
||||||
# targets="x86_64-unknown-linux-gnu aarch64-unknown-linux-gnu aarch64-unknown-linux-musl x86_64-unknown-linux-musl"
|
# x86_64-unknown-linux-musl
|
||||||
targets="aarch64-unknown-linux-gnu"
|
# aarch64-unknown-linux-musl
|
||||||
fi
|
|
||||||
build_node_binaries $targets
|
setup_dependencies $TARGET
|
||||||
|
install_node $TARGET
|
||||||
|
install_rust
|
||||||
|
build_node_binary $TARGET
|
||||||
|
|||||||
@@ -2,6 +2,20 @@
|
|||||||
# Usage: ./build_macos_artifacts.sh [target]
|
# Usage: ./build_macos_artifacts.sh [target]
|
||||||
# Targets supported: x86_64-apple-darwin aarch64-apple-darwin
|
# Targets supported: x86_64-apple-darwin aarch64-apple-darwin
|
||||||
|
|
||||||
|
prebuild_rust() {
|
||||||
|
# Building here for the sake of easier debugging.
|
||||||
|
pushd rust/ffi/node
|
||||||
|
|
||||||
|
for target in $1
|
||||||
|
do
|
||||||
|
echo "Building rust library for $target"
|
||||||
|
export RUST_BACKTRACE=1
|
||||||
|
cargo build --release --target $target
|
||||||
|
done
|
||||||
|
|
||||||
|
popd
|
||||||
|
}
|
||||||
|
|
||||||
build_node_binaries() {
|
build_node_binaries() {
|
||||||
pushd node
|
pushd node
|
||||||
|
|
||||||
@@ -19,4 +33,6 @@ if [ -n "$1" ]; then
|
|||||||
else
|
else
|
||||||
targets="x86_64-apple-darwin aarch64-apple-darwin"
|
targets="x86_64-apple-darwin aarch64-apple-darwin"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
prebuild_rust $targets
|
||||||
build_node_binaries $targets
|
build_node_binaries $targets
|
||||||
90
ci/release_process.md
Normal file
90
ci/release_process.md
Normal file
@@ -0,0 +1,90 @@
|
|||||||
|
# How to release
|
||||||
|
|
||||||
|
This is for the Rust crate and Node module. For now, the Python module is
|
||||||
|
released separately.
|
||||||
|
|
||||||
|
The release is started by bumping the versions and pushing a new tag. To do this
|
||||||
|
automatically, use the `make_release_commit` GitHub action.
|
||||||
|
|
||||||
|
When the tag is pushed, GitHub actions will start building the libraries and
|
||||||
|
will upload them to a draft release.
|
||||||
|
|
||||||
|
While those jobs are running, edit the release notes as needed. For example,
|
||||||
|
bring relevant new features and bugfixes to the top of the notes and the testing
|
||||||
|
and CI changes to the bottom.
|
||||||
|
|
||||||
|
Once the jobs have finished, the release will be marked as not draft and the
|
||||||
|
artifacts will be released to crates.io, NPM, and PyPI.
|
||||||
|
|
||||||
|
## Manual process
|
||||||
|
|
||||||
|
You can also build the artifacts locally on a MacOS machine.
|
||||||
|
|
||||||
|
### Build the MacOS release libraries
|
||||||
|
|
||||||
|
One-time setup:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
rustup target add x86_64-apple-darwin aarch64-apple-darwin
|
||||||
|
```
|
||||||
|
|
||||||
|
To build:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
bash ci/build_macos_artifacts.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
### Build the Linux release libraries
|
||||||
|
|
||||||
|
To build a Linux library, we need to use docker with a different build script:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
ARCH=aarch64
|
||||||
|
docker run \
|
||||||
|
-v $(pwd):/io -w /io \
|
||||||
|
quay.io/pypa/manylinux2014_$ARCH \
|
||||||
|
bash ci/build_linux_artifacts.sh $ARCH-unknown-linux-gnu
|
||||||
|
```
|
||||||
|
|
||||||
|
You can change `ARCH` to `x86_64`.
|
||||||
|
|
||||||
|
Similar script for musl binaries (not yet working):
|
||||||
|
|
||||||
|
```shell
|
||||||
|
ARCH=aarch64
|
||||||
|
docker run \
|
||||||
|
--user $(id -u) \
|
||||||
|
-v $(pwd):/io -w /io \
|
||||||
|
quay.io/pypa/musllinux_1_1_$ARCH \
|
||||||
|
bash ci/build_linux_artifacts.sh $ARCH-unknown-linux-musl
|
||||||
|
```
|
||||||
|
|
||||||
|
<!--
|
||||||
|
|
||||||
|
For debugging, use these snippets:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
ARCH=aarch64
|
||||||
|
docker run -it \
|
||||||
|
-v $(pwd):/io -w /io \
|
||||||
|
quay.io/pypa/manylinux2014_$ARCH \
|
||||||
|
bash
|
||||||
|
```
|
||||||
|
|
||||||
|
```shell
|
||||||
|
ARCH=aarch64
|
||||||
|
docker run -it \
|
||||||
|
-v $(pwd):/io -w /io \
|
||||||
|
quay.io/pypa/musllinux_1_1_$ARCH \
|
||||||
|
bash
|
||||||
|
```
|
||||||
|
|
||||||
|
Note: musllinux_1_1 is Alpine Linux 3.12
|
||||||
|
-->
|
||||||
|
|
||||||
|
```
|
||||||
|
docker run \
|
||||||
|
-v $(pwd):/io -w /io \
|
||||||
|
quay.io/pypa/musllinux_1_1_aarch64 \
|
||||||
|
bash alpine_repro.sh
|
||||||
|
```
|
||||||
@@ -1,31 +0,0 @@
|
|||||||
# On MacOS, need to run in a linux container:
|
|
||||||
# cat ci/ubuntu_build.dockerfile | docker build -t lancedb-node-build -
|
|
||||||
# docker run -v /var/run/docker.sock:/var/run/docker.sock -v $(pwd):/io -w /io lancedb-node-build bash ci/build_linux_artifacts.sh
|
|
||||||
FROM ubuntu:20.04
|
|
||||||
|
|
||||||
ARG DEBIAN_FRONTEND=noninteractive
|
|
||||||
ENV TZ=Europe/Moscow
|
|
||||||
|
|
||||||
RUN apt update && apt install -y protobuf-compiler libssl-dev build-essential curl \
|
|
||||||
software-properties-common npm docker.io
|
|
||||||
|
|
||||||
# Install rust
|
|
||||||
RUN curl https://sh.rustup.rs -sSf | bash -s -- -y
|
|
||||||
|
|
||||||
ENV PATH="/root/.cargo/bin:${PATH}"
|
|
||||||
|
|
||||||
# Install cross
|
|
||||||
# https://github.com/cross-rs/cross/issues/1257#issuecomment-1544553706
|
|
||||||
RUN cargo install cross --git https://github.com/cross-rs/cross
|
|
||||||
|
|
||||||
# Install additional build targets
|
|
||||||
RUN rustup target add x86_64-unknown-linux-gnu aarch64-unknown-linux-gnu aarch64-unknown-linux-musl x86_64-unknown-linux-musl
|
|
||||||
|
|
||||||
# Install node
|
|
||||||
RUN npm install npm@latest -g && \
|
|
||||||
npm install n -g && \
|
|
||||||
n latest
|
|
||||||
|
|
||||||
# set CROSS_CONTAINER_IN_CONTAINER to inform `cross` that it is executed from within a container
|
|
||||||
ENV CROSS_CONTAINER_IN_CONTAINER=true
|
|
||||||
ENV CROSS_CONTAINER_ENGINE_NO_BUILDKIT=1
|
|
||||||
@@ -19,6 +19,7 @@ nav:
|
|||||||
- Basics: basic.md
|
- Basics: basic.md
|
||||||
- Embeddings: embedding.md
|
- Embeddings: embedding.md
|
||||||
- Indexing: ann_indexes.md
|
- Indexing: ann_indexes.md
|
||||||
|
- Full-text search: fts.md
|
||||||
- Integrations: integrations.md
|
- Integrations: integrations.md
|
||||||
- Python API: python.md
|
- Python API: python.md
|
||||||
|
|
||||||
|
|||||||
50
docs/src/fts.md
Normal file
50
docs/src/fts.md
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
# [EXPERIMENTAL] Full text search
|
||||||
|
|
||||||
|
LanceDB now provides experimental support for full text search.
|
||||||
|
This is currently Python only. We plan to push the integration down to Rust in the future
|
||||||
|
to make this available for JS as well.
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
To use full text search, you must install the fts optional dependencies:
|
||||||
|
|
||||||
|
`pip install lancedb[fts]`
|
||||||
|
|
||||||
|
|
||||||
|
## Quickstart
|
||||||
|
|
||||||
|
Assume:
|
||||||
|
1. `table` is a LanceDB Table
|
||||||
|
2. `text` is the name of the Table column that we want to index
|
||||||
|
|
||||||
|
To create the index:
|
||||||
|
|
||||||
|
```python
|
||||||
|
table.create_fts_index("text")
|
||||||
|
```
|
||||||
|
|
||||||
|
To search:
|
||||||
|
|
||||||
|
```python
|
||||||
|
df = table.search("puppy").limit(10).select(["text"]).to_df()
|
||||||
|
```
|
||||||
|
|
||||||
|
LanceDB automatically looks for an FTS index if the input is str.
|
||||||
|
|
||||||
|
## Multiple text columns
|
||||||
|
|
||||||
|
If you have multiple columns to index, pass them all as a list to `create_fts_index`:
|
||||||
|
|
||||||
|
```python
|
||||||
|
table.create_fts_index(["text1", "text2"])
|
||||||
|
```
|
||||||
|
|
||||||
|
Note that the search API call does not change - you can search over all indexed columns at once.
|
||||||
|
|
||||||
|
## Current limitations
|
||||||
|
|
||||||
|
1. Currently we do not yet support incremental writes.
|
||||||
|
If you add data after fts index creation, it won't be reflected
|
||||||
|
in search results until you do a full reindex.
|
||||||
|
|
||||||
|
2. We currently only support local filesystem paths for the fts index.
|
||||||
@@ -45,5 +45,6 @@ We will be adding completed demo apps built using LanceDB.
|
|||||||
* [`Basic Operations`](basic.md) - basic functionality of LanceDB.
|
* [`Basic Operations`](basic.md) - basic functionality of LanceDB.
|
||||||
* [`Embedding Functions`](embedding.md) - functions for working with embeddings.
|
* [`Embedding Functions`](embedding.md) - functions for working with embeddings.
|
||||||
* [`Indexing`](ann_indexes.md) - create vector indexes to speed up queries.
|
* [`Indexing`](ann_indexes.md) - create vector indexes to speed up queries.
|
||||||
|
* [`Full text search`](fts.md) - [EXPERIMENTAL] full-text search API
|
||||||
* [`Ecosystem Integrations`](integrations.md) - integrating LanceDB with python data tooling ecosystem.
|
* [`Ecosystem Integrations`](integrations.md) - integrating LanceDB with python data tooling ecosystem.
|
||||||
* [`API Reference`](python.md) - detailed documentation for the LanceDB Python SDK.
|
* [`API Reference`](python.md) - detailed documentation for the LanceDB Python SDK.
|
||||||
|
|||||||
@@ -9,7 +9,8 @@ npm install vectordb
|
|||||||
```
|
```
|
||||||
|
|
||||||
This will download the appropriate native library for your platform. We currently
|
This will download the appropriate native library for your platform. We currently
|
||||||
support x86_64 Linux, Intel MacOS, and ARM (M1/M2) MacOS.
|
support x86_64 Linux, aarch64 Linux, Intel MacOS, and ARM (M1/M2) MacOS. We do not
|
||||||
|
yet support Windows or musl-based Linux (such as Alpine Linux).
|
||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
|
|
||||||
|
|||||||
@@ -11,7 +11,8 @@
|
|||||||
"cross-release": "cargo-cp-artifact --artifact cdylib vectordb-node index.node -- cross build --message-format=json --release -p vectordb-node",
|
"cross-release": "cargo-cp-artifact --artifact cdylib vectordb-node index.node -- cross build --message-format=json --release -p vectordb-node",
|
||||||
"test": "mocha -recursive dist/test",
|
"test": "mocha -recursive dist/test",
|
||||||
"lint": "eslint src --ext .js,.ts",
|
"lint": "eslint src --ext .js,.ts",
|
||||||
"pack-build": "neon pack-build"
|
"pack-build": "neon pack-build",
|
||||||
|
"check-npm": "printenv && which node && which npm && npm --version"
|
||||||
},
|
},
|
||||||
"repository": {
|
"repository": {
|
||||||
"type": "git",
|
"type": "git",
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ import {
|
|||||||
List,
|
List,
|
||||||
makeBuilder,
|
makeBuilder,
|
||||||
RecordBatchFileWriter,
|
RecordBatchFileWriter,
|
||||||
Table,
|
Table, Utf8,
|
||||||
type Vector,
|
type Vector,
|
||||||
vectorFromArray
|
vectorFromArray
|
||||||
} from 'apache-arrow'
|
} from 'apache-arrow'
|
||||||
@@ -52,7 +52,12 @@ export function convertToTable (data: Array<Record<string, unknown>>): Table {
|
|||||||
for (const datum of data) {
|
for (const datum of data) {
|
||||||
values.push(datum[columnsKey])
|
values.push(datum[columnsKey])
|
||||||
}
|
}
|
||||||
records[columnsKey] = vectorFromArray(values)
|
if (typeof values[0] === 'string') {
|
||||||
|
// `vectorFromArray` converts strings into dictionary vectors, forcing it back to a string column
|
||||||
|
records[columnsKey] = vectorFromArray(values, new Utf8())
|
||||||
|
} else {
|
||||||
|
records[columnsKey] = vectorFromArray(values)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ import {
|
|||||||
import { fromRecordsToBuffer } from './arrow'
|
import { fromRecordsToBuffer } from './arrow'
|
||||||
|
|
||||||
// eslint-disable-next-line @typescript-eslint/no-var-requires
|
// eslint-disable-next-line @typescript-eslint/no-var-requires
|
||||||
const { databaseNew, databaseTableNames, databaseOpenTable, tableCreate, tableSearch, tableAdd } = require('../native.js')
|
const { databaseNew, databaseTableNames, databaseOpenTable, tableCreate, tableSearch, tableAdd, tableCreateVectorIndex } = require('../native.js')
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Connect to a LanceDB instance at the given URI
|
* Connect to a LanceDB instance at the given URI
|
||||||
@@ -100,64 +100,154 @@ export class Table {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Insert records into this Table
|
* Insert records into this Table.
|
||||||
* @param data Records to be inserted into the Table
|
|
||||||
*
|
*
|
||||||
* @param mode Append / Overwrite existing records. Default: Append
|
* @param data Records to be inserted into the Table
|
||||||
* @return The number of rows added to the table
|
* @return The number of rows added to the table
|
||||||
*/
|
*/
|
||||||
async add (data: Array<Record<string, unknown>>): Promise<number> {
|
async add (data: Array<Record<string, unknown>>): Promise<number> {
|
||||||
return tableAdd.call(this._tbl, await fromRecordsToBuffer(data), WriteMode.Append.toString())
|
return tableAdd.call(this._tbl, await fromRecordsToBuffer(data), WriteMode.Append.toString())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Insert records into this Table, replacing its contents.
|
||||||
|
*
|
||||||
|
* @param data Records to be inserted into the Table
|
||||||
|
* @return The number of rows added to the table
|
||||||
|
*/
|
||||||
async overwrite (data: Array<Record<string, unknown>>): Promise<number> {
|
async overwrite (data: Array<Record<string, unknown>>): Promise<number> {
|
||||||
return tableAdd.call(this._tbl, await fromRecordsToBuffer(data), WriteMode.Overwrite.toString())
|
return tableAdd.call(this._tbl, await fromRecordsToBuffer(data), WriteMode.Overwrite.toString())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async create_index (indexParams: VectorIndexParams): Promise<any> {
|
||||||
|
return tableCreateVectorIndex.call(this._tbl, indexParams)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
interface IvfPQIndexConfig {
|
||||||
|
/**
|
||||||
|
* The column to be indexed
|
||||||
|
*/
|
||||||
|
column?: string
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A unique name for the index
|
||||||
|
*/
|
||||||
|
index_name?: string
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Metric type, L2 or Cosine
|
||||||
|
*/
|
||||||
|
metric_type?: MetricType
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The number of partitions this index
|
||||||
|
*/
|
||||||
|
num_partitions?: number
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The max number of iterations for kmeans training.
|
||||||
|
*/
|
||||||
|
max_iters?: number
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Train as optimized product quantization.
|
||||||
|
*/
|
||||||
|
use_opq?: boolean
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Number of subvectors to build PQ code
|
||||||
|
*/
|
||||||
|
num_sub_vectors?: number
|
||||||
|
/**
|
||||||
|
* The number of bits to present one PQ centroid.
|
||||||
|
*/
|
||||||
|
num_bits?: number
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Max number of iterations to train OPQ, if `use_opq` is true.
|
||||||
|
*/
|
||||||
|
max_opq_iters?: number
|
||||||
|
|
||||||
|
type: 'ivf_pq'
|
||||||
|
}
|
||||||
|
|
||||||
|
export type VectorIndexParams = IvfPQIndexConfig
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A builder for nearest neighbor queries for LanceDB.
|
* A builder for nearest neighbor queries for LanceDB.
|
||||||
*/
|
*/
|
||||||
export class Query {
|
export class Query {
|
||||||
private readonly _tbl: any
|
private readonly _tbl: any
|
||||||
private readonly _query_vector: number[]
|
private readonly _queryVector: number[]
|
||||||
private _limit: number
|
private _limit: number
|
||||||
private readonly _refine_factor?: number
|
private _refineFactor?: number
|
||||||
private readonly _nprobes: number
|
private _nprobes: number
|
||||||
private readonly _columns?: string[]
|
private readonly _columns?: string[]
|
||||||
private _filter?: string
|
private _filter?: string
|
||||||
private readonly _metric = 'L2'
|
private _metricType?: MetricType
|
||||||
|
|
||||||
constructor (tbl: any, queryVector: number[]) {
|
constructor (tbl: any, queryVector: number[]) {
|
||||||
this._tbl = tbl
|
this._tbl = tbl
|
||||||
this._query_vector = queryVector
|
this._queryVector = queryVector
|
||||||
this._limit = 10
|
this._limit = 10
|
||||||
this._nprobes = 20
|
this._nprobes = 20
|
||||||
this._refine_factor = undefined
|
this._refineFactor = undefined
|
||||||
this._columns = undefined
|
this._columns = undefined
|
||||||
this._filter = undefined
|
this._filter = undefined
|
||||||
|
this._metricType = undefined
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/***
|
||||||
|
* Sets the number of results that will be returned
|
||||||
|
* @param value number of results
|
||||||
|
*/
|
||||||
limit (value: number): Query {
|
limit (value: number): Query {
|
||||||
this._limit = value
|
this._limit = value
|
||||||
return this
|
return this
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Refine the results by reading extra elements and re-ranking them in memory.
|
||||||
|
* @param value refine factor to use in this query.
|
||||||
|
*/
|
||||||
|
refineFactor (value: number): Query {
|
||||||
|
this._refineFactor = value
|
||||||
|
return this
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The number of probes used. A higher number makes search more accurate but also slower.
|
||||||
|
* @param value The number of probes used.
|
||||||
|
*/
|
||||||
|
nprobes (value: number): Query {
|
||||||
|
this._nprobes = value
|
||||||
|
return this
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A filter statement to be applied to this query.
|
||||||
|
* @param value A filter in the same format used by a sql WHERE clause.
|
||||||
|
*/
|
||||||
filter (value: string): Query {
|
filter (value: string): Query {
|
||||||
this._filter = value
|
this._filter = value
|
||||||
return this
|
return this
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Execute the query and return the results as an Array of Objects
|
* The MetricType used for this Query.
|
||||||
*/
|
* @param value The metric to the. @see MetricType for the different options
|
||||||
|
*/
|
||||||
|
metricType (value: MetricType): Query {
|
||||||
|
this._metricType = value
|
||||||
|
return this
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Execute the query and return the results as an Array of Objects
|
||||||
|
*/
|
||||||
async execute<T = Record<string, unknown>> (): Promise<T[]> {
|
async execute<T = Record<string, unknown>> (): Promise<T[]> {
|
||||||
let buffer
|
const buffer = await tableSearch.call(this._tbl, this)
|
||||||
if (this._filter != null) {
|
|
||||||
buffer = await tableSearch.call(this._tbl, this._query_vector, this._limit, this._filter)
|
|
||||||
} else {
|
|
||||||
buffer = await tableSearch.call(this._tbl, this._query_vector, this._limit)
|
|
||||||
}
|
|
||||||
const data = tableFromIPC(buffer)
|
const data = tableFromIPC(buffer)
|
||||||
return data.toArray().map((entry: Record<string, unknown>) => {
|
return data.toArray().map((entry: Record<string, unknown>) => {
|
||||||
const newObject: Record<string, unknown> = {}
|
const newObject: Record<string, unknown> = {}
|
||||||
@@ -177,3 +267,18 @@ export enum WriteMode {
|
|||||||
Overwrite = 'overwrite',
|
Overwrite = 'overwrite',
|
||||||
Append = 'append'
|
Append = 'append'
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Distance metrics type.
|
||||||
|
*/
|
||||||
|
export enum MetricType {
|
||||||
|
/**
|
||||||
|
* Euclidean distance
|
||||||
|
*/
|
||||||
|
L2 = 'l2',
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Cosine distance
|
||||||
|
*/
|
||||||
|
Cosine = 'cosine'
|
||||||
|
}
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ import { assert } from 'chai'
|
|||||||
import { track } from 'temp'
|
import { track } from 'temp'
|
||||||
|
|
||||||
import * as lancedb from '../index'
|
import * as lancedb from '../index'
|
||||||
|
import { MetricType, Query } from '../index'
|
||||||
|
|
||||||
describe('LanceDB client', function () {
|
describe('LanceDB client', function () {
|
||||||
describe('when creating a connection to lancedb', function () {
|
describe('when creating a connection to lancedb', function () {
|
||||||
@@ -67,7 +68,7 @@ describe('LanceDB client', function () {
|
|||||||
const uri = await createTestDB()
|
const uri = await createTestDB()
|
||||||
const con = await lancedb.connect(uri)
|
const con = await lancedb.connect(uri)
|
||||||
const table = await con.openTable('vectors')
|
const table = await con.openTable('vectors')
|
||||||
const results = await table.search([0.1, 0.3]).filter('id == 2').execute()
|
const results = await table.search([0.1, 0.1]).filter('id == 2').execute()
|
||||||
assert.equal(results.length, 1)
|
assert.equal(results.length, 1)
|
||||||
assert.equal(results[0].id, 2)
|
assert.equal(results[0].id, 2)
|
||||||
})
|
})
|
||||||
@@ -96,8 +97,8 @@ describe('LanceDB client', function () {
|
|||||||
const con = await lancedb.connect(dir)
|
const con = await lancedb.connect(dir)
|
||||||
|
|
||||||
const data = [
|
const data = [
|
||||||
{ id: 1, vector: [0.1, 0.2], price: 10 },
|
{ id: 1, vector: [0.1, 0.2], price: 10, name: 'a' },
|
||||||
{ id: 2, vector: [1.1, 1.2], price: 50 }
|
{ id: 2, vector: [1.1, 1.2], price: 50, name: 'b' }
|
||||||
]
|
]
|
||||||
|
|
||||||
const table = await con.createTable('vectors', data)
|
const table = await con.createTable('vectors', data)
|
||||||
@@ -105,8 +106,8 @@ describe('LanceDB client', function () {
|
|||||||
assert.equal(results.length, 2)
|
assert.equal(results.length, 2)
|
||||||
|
|
||||||
const dataAdd = [
|
const dataAdd = [
|
||||||
{ id: 3, vector: [2.1, 2.2], price: 10 },
|
{ id: 3, vector: [2.1, 2.2], price: 10, name: 'c' },
|
||||||
{ id: 4, vector: [3.1, 3.2], price: 50 }
|
{ id: 4, vector: [3.1, 3.2], price: 50, name: 'd' }
|
||||||
]
|
]
|
||||||
await table.add(dataAdd)
|
await table.add(dataAdd)
|
||||||
const resultsAdd = await table.search([0.1, 0.3]).execute()
|
const resultsAdd = await table.search([0.1, 0.3]).execute()
|
||||||
@@ -130,16 +131,43 @@ describe('LanceDB client', function () {
|
|||||||
assert.equal(resultsAdd.length, 2)
|
assert.equal(resultsAdd.length, 2)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
describe('when creating a vector index', function () {
|
||||||
|
it('overwrite all records in a table', async function () {
|
||||||
|
const uri = await createTestDB(32, 300)
|
||||||
|
const con = await lancedb.connect(uri)
|
||||||
|
const table = await con.openTable('vectors')
|
||||||
|
await table.create_index({ type: 'ivf_pq', column: 'vector', num_partitions: 2, max_iters: 2 })
|
||||||
|
}).timeout(10_000) // Timeout is high partially because GH macos runner is pretty slow
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
async function createTestDB (): Promise<string> {
|
describe('Query object', function () {
|
||||||
|
it('sets custom parameters', async function () {
|
||||||
|
const query = new Query(undefined, [0.1, 0.3])
|
||||||
|
.limit(1)
|
||||||
|
.metricType(MetricType.Cosine)
|
||||||
|
.refineFactor(100)
|
||||||
|
.nprobes(20) as Record<string, any>
|
||||||
|
assert.equal(query._limit, 1)
|
||||||
|
assert.equal(query._metricType, MetricType.Cosine)
|
||||||
|
assert.equal(query._refineFactor, 100)
|
||||||
|
assert.equal(query._nprobes, 20)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
async function createTestDB (numDimensions: number = 2, numRows: number = 2): Promise<string> {
|
||||||
const dir = await track().mkdir('lancejs')
|
const dir = await track().mkdir('lancejs')
|
||||||
const con = await lancedb.connect(dir)
|
const con = await lancedb.connect(dir)
|
||||||
|
|
||||||
const data = [
|
const data = []
|
||||||
{ id: 1, vector: [0.1, 0.2], name: 'foo', price: 10, is_active: true },
|
for (let i = 0; i < numRows; i++) {
|
||||||
{ id: 2, vector: [1.1, 1.2], name: 'bar', price: 50, is_active: false }
|
const vector = []
|
||||||
]
|
for (let j = 0; j < numDimensions; j++) {
|
||||||
|
vector.push(i + (j * 0.1))
|
||||||
|
}
|
||||||
|
data.push({ id: i + 1, name: `name_${i}`, price: i + 10, is_active: (i % 2 === 0), vector })
|
||||||
|
}
|
||||||
|
|
||||||
await con.createTable('vectors', data)
|
await con.createTable('vectors', data)
|
||||||
return dir
|
return dir
|
||||||
|
|||||||
122
python/lancedb/fts.py
Normal file
122
python/lancedb/fts.py
Normal file
@@ -0,0 +1,122 @@
|
|||||||
|
# Copyright 2023 LanceDB Developers
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
"""Full text search index using tantivy-py"""
|
||||||
|
import os
|
||||||
|
from typing import List, Tuple
|
||||||
|
|
||||||
|
import pyarrow as pa
|
||||||
|
import tantivy
|
||||||
|
|
||||||
|
from .table import LanceTable
|
||||||
|
|
||||||
|
|
||||||
|
def create_index(index_path: str, text_fields: List[str]) -> tantivy.Index:
|
||||||
|
"""
|
||||||
|
Create a new Index (not populated)
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
index_path : str
|
||||||
|
Path to the index directory
|
||||||
|
text_fields : List[str]
|
||||||
|
List of text fields to index
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
index : tantivy.Index
|
||||||
|
The index object (not yet populated)
|
||||||
|
"""
|
||||||
|
# Declaring our schema.
|
||||||
|
schema_builder = tantivy.SchemaBuilder()
|
||||||
|
# special field that we'll populate with row_id
|
||||||
|
schema_builder.add_integer_field("doc_id", stored=True)
|
||||||
|
# data fields
|
||||||
|
for name in text_fields:
|
||||||
|
schema_builder.add_text_field(name, stored=True)
|
||||||
|
schema = schema_builder.build()
|
||||||
|
os.makedirs(index_path, exist_ok=True)
|
||||||
|
index = tantivy.Index(schema, path=index_path)
|
||||||
|
return index
|
||||||
|
|
||||||
|
|
||||||
|
def populate_index(index: tantivy.Index, table: LanceTable, fields: List[str]) -> int:
|
||||||
|
"""
|
||||||
|
Populate an index with data from a LanceTable
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
index : tantivy.Index
|
||||||
|
The index object
|
||||||
|
table : LanceTable
|
||||||
|
The table to index
|
||||||
|
fields : List[str]
|
||||||
|
List of fields to index
|
||||||
|
"""
|
||||||
|
# first check the fields exist and are string or large string type
|
||||||
|
for name in fields:
|
||||||
|
f = table.schema.field(name) # raises KeyError if not found
|
||||||
|
if not pa.types.is_string(f.type) and not pa.types.is_large_string(f.type):
|
||||||
|
raise TypeError(f"Field {name} is not a string type")
|
||||||
|
|
||||||
|
# create a tantivy writer
|
||||||
|
writer = index.writer()
|
||||||
|
# write data into index
|
||||||
|
dataset = table.to_lance()
|
||||||
|
row_id = 0
|
||||||
|
for b in dataset.to_batches(columns=fields):
|
||||||
|
for i in range(b.num_rows):
|
||||||
|
doc = tantivy.Document()
|
||||||
|
doc.add_integer("doc_id", row_id)
|
||||||
|
for name in fields:
|
||||||
|
doc.add_text(name, b[name][i].as_py())
|
||||||
|
writer.add_document(doc)
|
||||||
|
row_id += 1
|
||||||
|
# commit changes
|
||||||
|
writer.commit()
|
||||||
|
return row_id
|
||||||
|
|
||||||
|
|
||||||
|
def search_index(
|
||||||
|
index: tantivy.Index, query: str, limit: int = 10
|
||||||
|
) -> Tuple[Tuple[int], Tuple[float]]:
|
||||||
|
"""
|
||||||
|
Search an index for a query
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
index : tantivy.Index
|
||||||
|
The index object
|
||||||
|
query : str
|
||||||
|
The query string
|
||||||
|
limit : int
|
||||||
|
The maximum number of results to return
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
ids_and_score: list[tuple[int], tuple[float]]
|
||||||
|
A tuple of two tuples, the first containing the document ids
|
||||||
|
and the second containing the scores
|
||||||
|
"""
|
||||||
|
searcher = index.searcher()
|
||||||
|
query = index.parse_query(query)
|
||||||
|
# get top results
|
||||||
|
results = searcher.search(query, limit)
|
||||||
|
return tuple(
|
||||||
|
zip(
|
||||||
|
*[
|
||||||
|
(searcher.doc(doc_address)["doc_id"][0], score)
|
||||||
|
for score, doc_address in results.hits
|
||||||
|
]
|
||||||
|
)
|
||||||
|
)
|
||||||
@@ -14,6 +14,7 @@ from __future__ import annotations
|
|||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
|
import pyarrow as pa
|
||||||
|
|
||||||
from .common import VECTOR_COLUMN_NAME
|
from .common import VECTOR_COLUMN_NAME
|
||||||
|
|
||||||
@@ -131,7 +132,6 @@ class LanceQueryBuilder:
|
|||||||
vector and the returned vector.
|
vector and the returned vector.
|
||||||
"""
|
"""
|
||||||
ds = self._table.to_lance()
|
ds = self._table.to_lance()
|
||||||
# TODO indexed search
|
|
||||||
tbl = ds.to_table(
|
tbl = ds.to_table(
|
||||||
columns=self._columns,
|
columns=self._columns,
|
||||||
filter=self._where,
|
filter=self._where,
|
||||||
@@ -145,3 +145,26 @@ class LanceQueryBuilder:
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
return tbl.to_pandas()
|
return tbl.to_pandas()
|
||||||
|
|
||||||
|
|
||||||
|
class LanceFtsQueryBuilder(LanceQueryBuilder):
|
||||||
|
def to_df(self) -> pd.DataFrame:
|
||||||
|
try:
|
||||||
|
import tantivy
|
||||||
|
except ImportError:
|
||||||
|
raise ImportError(
|
||||||
|
"You need to install the `lancedb[fts]` extra to use this method."
|
||||||
|
)
|
||||||
|
|
||||||
|
from .fts import search_index
|
||||||
|
|
||||||
|
# get the index path
|
||||||
|
index_path = self._table._get_fts_index_path()
|
||||||
|
# open the index
|
||||||
|
index = tantivy.Index.open(index_path)
|
||||||
|
# get the scores and doc ids
|
||||||
|
row_ids, scores = search_index(index, self._query, self._limit)
|
||||||
|
scores = pa.array(scores)
|
||||||
|
output_tbl = self._table.to_lance().take(row_ids, columns=self._columns)
|
||||||
|
output_tbl = output_tbl.append_column("score", scores)
|
||||||
|
return output_tbl.to_pandas()
|
||||||
|
|||||||
@@ -14,7 +14,9 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import os
|
import os
|
||||||
|
import shutil
|
||||||
from functools import cached_property
|
from functools import cached_property
|
||||||
|
from typing import List, Union
|
||||||
|
|
||||||
import lance
|
import lance
|
||||||
import numpy as np
|
import numpy as np
|
||||||
@@ -24,7 +26,8 @@ from lance import LanceDataset
|
|||||||
from lance.vector import vec_to_table
|
from lance.vector import vec_to_table
|
||||||
|
|
||||||
from .common import DATA, VEC, VECTOR_COLUMN_NAME
|
from .common import DATA, VEC, VECTOR_COLUMN_NAME
|
||||||
from .query import LanceQueryBuilder
|
from .query import LanceFtsQueryBuilder, LanceQueryBuilder
|
||||||
|
from .util import get_uri_scheme
|
||||||
|
|
||||||
|
|
||||||
def _sanitize_data(data, schema):
|
def _sanitize_data(data, schema):
|
||||||
@@ -130,6 +133,27 @@ class LanceTable:
|
|||||||
)
|
)
|
||||||
self._reset_dataset()
|
self._reset_dataset()
|
||||||
|
|
||||||
|
def create_fts_index(self, field_names: Union[str, List[str]]):
|
||||||
|
"""Create a full-text search index on the table.
|
||||||
|
|
||||||
|
Warning - this API is highly experimental and is highly likely to change
|
||||||
|
in the future.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
field_names: str or list of str
|
||||||
|
The name(s) of the field to index.
|
||||||
|
"""
|
||||||
|
from .fts import create_index, populate_index
|
||||||
|
|
||||||
|
if isinstance(field_names, str):
|
||||||
|
field_names = [field_names]
|
||||||
|
index = create_index(self._get_fts_index_path(), field_names)
|
||||||
|
populate_index(index, self, field_names)
|
||||||
|
|
||||||
|
def _get_fts_index_path(self):
|
||||||
|
return os.path.join(self._dataset_uri, "_indices", "tantivy")
|
||||||
|
|
||||||
@cached_property
|
@cached_property
|
||||||
def _dataset(self) -> LanceDataset:
|
def _dataset(self) -> LanceDataset:
|
||||||
return lance.dataset(self._dataset_uri, version=self._version)
|
return lance.dataset(self._dataset_uri, version=self._version)
|
||||||
@@ -158,7 +182,7 @@ class LanceTable:
|
|||||||
self._reset_dataset()
|
self._reset_dataset()
|
||||||
return len(self)
|
return len(self)
|
||||||
|
|
||||||
def search(self, query: VEC) -> LanceQueryBuilder:
|
def search(self, query: Union[VEC, str]) -> LanceQueryBuilder:
|
||||||
"""Create a search query to find the nearest neighbors
|
"""Create a search query to find the nearest neighbors
|
||||||
of the given query vector.
|
of the given query vector.
|
||||||
|
|
||||||
@@ -174,6 +198,10 @@ class LanceTable:
|
|||||||
and also the "score" column which is the distance between the query
|
and also the "score" column which is the distance between the query
|
||||||
vector and the returned vector.
|
vector and the returned vector.
|
||||||
"""
|
"""
|
||||||
|
if isinstance(query, str):
|
||||||
|
# fts
|
||||||
|
return LanceFtsQueryBuilder(self, query)
|
||||||
|
|
||||||
if isinstance(query, list):
|
if isinstance(query, list):
|
||||||
query = np.array(query)
|
query = np.array(query)
|
||||||
if isinstance(query, np.ndarray):
|
if isinstance(query, np.ndarray):
|
||||||
|
|||||||
@@ -45,6 +45,10 @@ dev = [
|
|||||||
docs = [
|
docs = [
|
||||||
"mkdocs", "mkdocs-jupyter", "mkdocs-material", "mkdocstrings[python]"
|
"mkdocs", "mkdocs-jupyter", "mkdocs-material", "mkdocstrings[python]"
|
||||||
]
|
]
|
||||||
|
fts = [
|
||||||
|
# tantivy 0.19.2
|
||||||
|
"tantivy@git+https://github.com/quickwit-oss/tantivy-py#164adc87e1a033117001cf70e38c82a53014d985"
|
||||||
|
]
|
||||||
|
|
||||||
[build-system]
|
[build-system]
|
||||||
requires = [
|
requires = [
|
||||||
|
|||||||
84
python/tests/test_fts.py
Normal file
84
python/tests/test_fts.py
Normal file
@@ -0,0 +1,84 @@
|
|||||||
|
# Copyright 2023 LanceDB Developers
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
import os
|
||||||
|
import random
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
import pandas as pd
|
||||||
|
import pytest
|
||||||
|
import tantivy
|
||||||
|
|
||||||
|
import lancedb as ldb
|
||||||
|
import lancedb.fts
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def table(tmp_path) -> ldb.table.LanceTable:
|
||||||
|
db = ldb.connect(tmp_path)
|
||||||
|
vectors = [np.random.randn(128) for _ in range(100)]
|
||||||
|
|
||||||
|
nouns = ("puppy", "car", "rabbit", "girl", "monkey")
|
||||||
|
verbs = ("runs", "hits", "jumps", "drives", "barfs")
|
||||||
|
adv = ("crazily.", "dutifully.", "foolishly.", "merrily.", "occasionally.")
|
||||||
|
adj = ("adorable", "clueless", "dirty", "odd", "stupid")
|
||||||
|
text = [
|
||||||
|
" ".join(
|
||||||
|
[
|
||||||
|
nouns[random.randrange(0, 5)],
|
||||||
|
verbs[random.randrange(0, 5)],
|
||||||
|
adv[random.randrange(0, 5)],
|
||||||
|
adj[random.randrange(0, 5)],
|
||||||
|
]
|
||||||
|
)
|
||||||
|
for _ in range(100)
|
||||||
|
]
|
||||||
|
table = db.create_table(
|
||||||
|
"test", data=pd.DataFrame({"vector": vectors, "text": text, "text2": text})
|
||||||
|
)
|
||||||
|
return table
|
||||||
|
|
||||||
|
|
||||||
|
def test_create_index(tmp_path):
|
||||||
|
index = ldb.fts.create_index(str(tmp_path / "index"), ["text"])
|
||||||
|
assert isinstance(index, tantivy.Index)
|
||||||
|
assert os.path.exists(str(tmp_path / "index"))
|
||||||
|
|
||||||
|
|
||||||
|
def test_populate_index(tmp_path, table):
|
||||||
|
index = ldb.fts.create_index(str(tmp_path / "index"), ["text"])
|
||||||
|
assert ldb.fts.populate_index(index, table, ["text"]) == len(table)
|
||||||
|
|
||||||
|
|
||||||
|
def test_search_index(tmp_path, table):
|
||||||
|
index = ldb.fts.create_index(str(tmp_path / "index"), ["text"])
|
||||||
|
ldb.fts.populate_index(index, table, ["text"])
|
||||||
|
index.reload()
|
||||||
|
results = ldb.fts.search_index(index, query="puppy", limit=10)
|
||||||
|
assert len(results) == 2
|
||||||
|
assert len(results[0]) == 10 # row_ids
|
||||||
|
assert len(results[1]) == 10 # scores
|
||||||
|
|
||||||
|
|
||||||
|
def test_create_index_from_table(tmp_path, table):
|
||||||
|
table.create_fts_index("text")
|
||||||
|
df = table.search("puppy").limit(10).select(["text"]).to_df()
|
||||||
|
assert len(df) == 10
|
||||||
|
assert "text" in df.columns
|
||||||
|
|
||||||
|
|
||||||
|
def test_create_index_multiple_columns(tmp_path, table):
|
||||||
|
table.create_fts_index(["text", "text2"])
|
||||||
|
df = table.search("puppy").limit(10).to_df()
|
||||||
|
assert len(df) == 10
|
||||||
|
assert "text" in df.columns
|
||||||
|
assert "text2" in df.columns
|
||||||
@@ -1,85 +0,0 @@
|
|||||||
|
|
||||||
How to release the node module
|
|
||||||
|
|
||||||
### 1. Bump the versions
|
|
||||||
|
|
||||||
<!-- TODO: we also need to bump the optional dependencies for node! -->
|
|
||||||
|
|
||||||
```shell
|
|
||||||
pushd rust/vectordb
|
|
||||||
cargo bump minor
|
|
||||||
popd
|
|
||||||
|
|
||||||
pushd rust/ffi/node
|
|
||||||
cargo bump minor
|
|
||||||
popd
|
|
||||||
|
|
||||||
pushd python
|
|
||||||
cargo bump minor
|
|
||||||
popd
|
|
||||||
|
|
||||||
pushd node
|
|
||||||
npm version minor
|
|
||||||
popd
|
|
||||||
|
|
||||||
git add -u
|
|
||||||
git commit -m "Bump versions"
|
|
||||||
git push
|
|
||||||
```
|
|
||||||
|
|
||||||
### 2. Push a new tag
|
|
||||||
|
|
||||||
```shell
|
|
||||||
git tag vX.X.X
|
|
||||||
git push --tag vX.X.X
|
|
||||||
```
|
|
||||||
|
|
||||||
When the tag is pushed, GitHub actions will start building the libraries and
|
|
||||||
will upload them to a draft release. Wait for those jobs to complete.
|
|
||||||
|
|
||||||
### 3. Publish the release
|
|
||||||
|
|
||||||
Once the jobs are complete, you can edit the
|
|
||||||
|
|
||||||
2. Push a tag, such as vX.X.X. Once the tag is pushrf, GitHub actions will start
|
|
||||||
building the native libraries and uploading them to a draft release. Wait for
|
|
||||||
those jobs to complete.
|
|
||||||
3. If the libraries are successful, edit the changelog and then publish the
|
|
||||||
release. Once you publish, a new action will start and upload all the
|
|
||||||
release artifacts to npm.
|
|
||||||
|
|
||||||
## Manual process
|
|
||||||
|
|
||||||
You can build the artifacts locally on a MacOS machine.
|
|
||||||
|
|
||||||
### Build the MacOS release libraries
|
|
||||||
|
|
||||||
One-time setup:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
rustup target add x86_64-apple-darwin aarch64-apple-darwin
|
|
||||||
```
|
|
||||||
|
|
||||||
To build:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
bash ci/build_macos_artifacts.sh
|
|
||||||
```
|
|
||||||
|
|
||||||
### Build the Linux release libraries
|
|
||||||
|
|
||||||
One-time setup, building the Docker container
|
|
||||||
|
|
||||||
```shell
|
|
||||||
cat ci/ubuntu_build.dockerfile | docker build -t lancedb-node-build -
|
|
||||||
```
|
|
||||||
|
|
||||||
To build:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
docker run \
|
|
||||||
-v /var/run/docker.sock:/var/run/docker.sock \
|
|
||||||
-v $(pwd):/io -w /io \
|
|
||||||
lancedb-node-build \
|
|
||||||
bash ci/build_linux_artifacts.sh
|
|
||||||
```
|
|
||||||
15
rust/ffi/node/src/index.rs
Normal file
15
rust/ffi/node/src/index.rs
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
// Copyright 2023 Lance Developers.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
pub mod vector;
|
||||||
128
rust/ffi/node/src/index/vector.rs
Normal file
128
rust/ffi/node/src/index/vector.rs
Normal file
@@ -0,0 +1,128 @@
|
|||||||
|
// Copyright 2023 Lance Developers.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::convert::TryFrom;
|
||||||
|
|
||||||
|
use lance::index::vector::ivf::IvfBuildParams;
|
||||||
|
use lance::index::vector::pq::PQBuildParams;
|
||||||
|
use lance::index::vector::MetricType;
|
||||||
|
use neon::context::FunctionContext;
|
||||||
|
use neon::prelude::*;
|
||||||
|
|
||||||
|
use vectordb::index::vector::{IvfPQIndexBuilder, VectorIndexBuilder};
|
||||||
|
|
||||||
|
use crate::{runtime, JsTable};
|
||||||
|
|
||||||
|
pub(crate) fn table_create_vector_index(mut cx: FunctionContext) -> JsResult<JsPromise> {
|
||||||
|
let js_table = cx.this().downcast_or_throw::<JsBox<JsTable>, _>(&mut cx)?;
|
||||||
|
let index_params = cx.argument::<JsObject>(0)?;
|
||||||
|
let index_params_builder = get_index_params_builder(&mut cx, index_params).unwrap();
|
||||||
|
|
||||||
|
let rt = runtime(&mut cx)?;
|
||||||
|
let channel = cx.channel();
|
||||||
|
|
||||||
|
let (deferred, promise) = cx.promise();
|
||||||
|
let table = js_table.table.clone();
|
||||||
|
|
||||||
|
rt.block_on(async move {
|
||||||
|
let add_result = table
|
||||||
|
.lock()
|
||||||
|
.unwrap()
|
||||||
|
.create_idx(&index_params_builder)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
deferred.settle_with(&channel, move |mut cx| {
|
||||||
|
add_result
|
||||||
|
.map(|_| cx.undefined())
|
||||||
|
.or_else(|err| cx.throw_error(err.to_string()))
|
||||||
|
});
|
||||||
|
});
|
||||||
|
Ok(promise)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_index_params_builder(
|
||||||
|
cx: &mut FunctionContext,
|
||||||
|
obj: Handle<JsObject>,
|
||||||
|
) -> Result<impl VectorIndexBuilder, String> {
|
||||||
|
let idx_type = obj
|
||||||
|
.get::<JsString, _, _>(cx, "type")
|
||||||
|
.map_err(|t| t.to_string())?
|
||||||
|
.value(cx);
|
||||||
|
|
||||||
|
match idx_type.as_str() {
|
||||||
|
"ivf_pq" => {
|
||||||
|
let mut index_builder: IvfPQIndexBuilder = IvfPQIndexBuilder::new();
|
||||||
|
let mut pq_params = PQBuildParams::default();
|
||||||
|
|
||||||
|
obj.get_opt::<JsString, _, _>(cx, "column")
|
||||||
|
.map_err(|t| t.to_string())?
|
||||||
|
.map(|s| index_builder.column(s.value(cx)));
|
||||||
|
|
||||||
|
obj.get_opt::<JsString, _, _>(cx, "index_name")
|
||||||
|
.map_err(|t| t.to_string())?
|
||||||
|
.map(|s| index_builder.index_name(s.value(cx)));
|
||||||
|
|
||||||
|
obj.get_opt::<JsString, _, _>(cx, "metric_type")
|
||||||
|
.map_err(|t| t.to_string())?
|
||||||
|
.map(|s| MetricType::try_from(s.value(cx).as_str()))
|
||||||
|
.map(|mt| {
|
||||||
|
let metric_type = mt.unwrap();
|
||||||
|
index_builder.metric_type(metric_type);
|
||||||
|
pq_params.metric_type = metric_type;
|
||||||
|
});
|
||||||
|
|
||||||
|
let num_partitions = obj
|
||||||
|
.get_opt::<JsNumber, _, _>(cx, "num_partitions")
|
||||||
|
.map_err(|t| t.to_string())?
|
||||||
|
.map(|s| s.value(cx) as usize);
|
||||||
|
|
||||||
|
let max_iters = obj
|
||||||
|
.get_opt::<JsNumber, _, _>(cx, "max_iters")
|
||||||
|
.map_err(|t| t.to_string())?
|
||||||
|
.map(|s| s.value(cx) as usize);
|
||||||
|
|
||||||
|
num_partitions.map(|np| {
|
||||||
|
let max_iters = max_iters.unwrap_or(50);
|
||||||
|
let ivf_params = IvfBuildParams {
|
||||||
|
num_partitions: np,
|
||||||
|
max_iters,
|
||||||
|
};
|
||||||
|
index_builder.ivf_params(ivf_params)
|
||||||
|
});
|
||||||
|
|
||||||
|
obj.get_opt::<JsBoolean, _, _>(cx, "use_opq")
|
||||||
|
.map_err(|t| t.to_string())?
|
||||||
|
.map(|s| pq_params.use_opq = s.value(cx));
|
||||||
|
|
||||||
|
obj.get_opt::<JsNumber, _, _>(cx, "num_sub_vectors")
|
||||||
|
.map_err(|t| t.to_string())?
|
||||||
|
.map(|s| pq_params.num_sub_vectors = s.value(cx) as usize);
|
||||||
|
|
||||||
|
obj.get_opt::<JsNumber, _, _>(cx, "num_bits")
|
||||||
|
.map_err(|t| t.to_string())?
|
||||||
|
.map(|s| pq_params.num_bits = s.value(cx) as usize);
|
||||||
|
|
||||||
|
obj.get_opt::<JsNumber, _, _>(cx, "max_iters")
|
||||||
|
.map_err(|t| t.to_string())?
|
||||||
|
.map(|s| pq_params.max_iters = s.value(cx) as usize);
|
||||||
|
|
||||||
|
obj.get_opt::<JsNumber, _, _>(cx, "max_opq_iters")
|
||||||
|
.map_err(|t| t.to_string())?
|
||||||
|
.map(|s| pq_params.max_opq_iters = s.value(cx) as usize);
|
||||||
|
|
||||||
|
Ok(index_builder)
|
||||||
|
}
|
||||||
|
t => Err(format!("{} is not a valid index type", t).to_string()),
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -13,6 +13,7 @@
|
|||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
use std::convert::TryFrom;
|
||||||
use std::ops::Deref;
|
use std::ops::Deref;
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
|
|
||||||
@@ -21,6 +22,7 @@ use arrow_ipc::writer::FileWriter;
|
|||||||
use futures::{TryFutureExt, TryStreamExt};
|
use futures::{TryFutureExt, TryStreamExt};
|
||||||
use lance::arrow::RecordBatchBuffer;
|
use lance::arrow::RecordBatchBuffer;
|
||||||
use lance::dataset::WriteMode;
|
use lance::dataset::WriteMode;
|
||||||
|
use lance::index::vector::MetricType;
|
||||||
use neon::prelude::*;
|
use neon::prelude::*;
|
||||||
use neon::types::buffer::TypedArray;
|
use neon::types::buffer::TypedArray;
|
||||||
use once_cell::sync::OnceCell;
|
use once_cell::sync::OnceCell;
|
||||||
@@ -34,17 +36,18 @@ use crate::arrow::arrow_buffer_to_record_batch;
|
|||||||
|
|
||||||
mod arrow;
|
mod arrow;
|
||||||
mod convert;
|
mod convert;
|
||||||
|
mod index;
|
||||||
|
|
||||||
struct JsDatabase {
|
struct JsDatabase {
|
||||||
database: Arc<Database>,
|
database: Arc<Database>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Finalize for JsDatabase {}
|
||||||
|
|
||||||
struct JsTable {
|
struct JsTable {
|
||||||
table: Arc<Mutex<Table>>,
|
table: Arc<Mutex<Table>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Finalize for JsDatabase {}
|
|
||||||
|
|
||||||
impl Finalize for JsTable {}
|
impl Finalize for JsTable {}
|
||||||
|
|
||||||
fn runtime<'a, C: Context<'a>>(cx: &mut C) -> NeonResult<&'static Runtime> {
|
fn runtime<'a, C: Context<'a>>(cx: &mut C) -> NeonResult<&'static Runtime> {
|
||||||
@@ -87,7 +90,9 @@ fn database_open_table(mut cx: FunctionContext) -> JsResult<JsPromise> {
|
|||||||
let table_rst = database.open_table(table_name).await;
|
let table_rst = database.open_table(table_name).await;
|
||||||
|
|
||||||
deferred.settle_with(&channel, move |mut cx| {
|
deferred.settle_with(&channel, move |mut cx| {
|
||||||
let table = Arc::new(Mutex::new(table_rst.or_else(|err| cx.throw_error(err.to_string()))?));
|
let table = Arc::new(Mutex::new(
|
||||||
|
table_rst.or_else(|err| cx.throw_error(err.to_string()))?,
|
||||||
|
));
|
||||||
Ok(cx.boxed(JsTable { table }))
|
Ok(cx.boxed(JsTable { table }))
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
@@ -96,15 +101,32 @@ fn database_open_table(mut cx: FunctionContext) -> JsResult<JsPromise> {
|
|||||||
|
|
||||||
fn table_search(mut cx: FunctionContext) -> JsResult<JsPromise> {
|
fn table_search(mut cx: FunctionContext) -> JsResult<JsPromise> {
|
||||||
let js_table = cx.this().downcast_or_throw::<JsBox<JsTable>, _>(&mut cx)?;
|
let js_table = cx.this().downcast_or_throw::<JsBox<JsTable>, _>(&mut cx)?;
|
||||||
let query_vector = cx.argument::<JsArray>(0)?; //. .as_value(&mut cx);
|
let query_obj = cx.argument::<JsObject>(0)?;
|
||||||
let limit = cx.argument::<JsNumber>(1)?.value(&mut cx);
|
|
||||||
let filter = cx.argument_opt(2).map(|f| f.downcast_or_throw::<JsString, _>(&mut cx).unwrap().value(&mut cx));
|
let limit = query_obj
|
||||||
|
.get::<JsNumber, _, _>(&mut cx, "_limit")?
|
||||||
|
.value(&mut cx);
|
||||||
|
let filter = query_obj
|
||||||
|
.get_opt::<JsString, _, _>(&mut cx, "_filter")?
|
||||||
|
.map(|s| s.value(&mut cx));
|
||||||
|
let refine_factor = query_obj
|
||||||
|
.get_opt::<JsNumber, _, _>(&mut cx, "_refineFactor")?
|
||||||
|
.map(|s| s.value(&mut cx))
|
||||||
|
.map(|i| i as u32);
|
||||||
|
let nprobes = query_obj
|
||||||
|
.get::<JsNumber, _, _>(&mut cx, "_nprobes")?
|
||||||
|
.value(&mut cx) as usize;
|
||||||
|
let metric_type = query_obj
|
||||||
|
.get_opt::<JsString, _, _>(&mut cx, "_metricType")?
|
||||||
|
.map(|s| s.value(&mut cx))
|
||||||
|
.map(|s| MetricType::try_from(s.as_str()).unwrap());
|
||||||
|
|
||||||
let rt = runtime(&mut cx)?;
|
let rt = runtime(&mut cx)?;
|
||||||
let channel = cx.channel();
|
let channel = cx.channel();
|
||||||
|
|
||||||
let (deferred, promise) = cx.promise();
|
let (deferred, promise) = cx.promise();
|
||||||
let table = js_table.table.clone();
|
let table = js_table.table.clone();
|
||||||
|
let query_vector = query_obj.get::<JsArray, _, _>(&mut cx, "_queryVector")?;
|
||||||
let query = convert::js_array_to_vec(query_vector.deref(), &mut cx);
|
let query = convert::js_array_to_vec(query_vector.deref(), &mut cx);
|
||||||
|
|
||||||
rt.spawn(async move {
|
rt.spawn(async move {
|
||||||
@@ -113,7 +135,10 @@ fn table_search(mut cx: FunctionContext) -> JsResult<JsPromise> {
|
|||||||
.unwrap()
|
.unwrap()
|
||||||
.search(Float32Array::from(query))
|
.search(Float32Array::from(query))
|
||||||
.limit(limit as usize)
|
.limit(limit as usize)
|
||||||
.filter(filter);
|
.refine_factor(refine_factor)
|
||||||
|
.nprobes(nprobes)
|
||||||
|
.filter(filter)
|
||||||
|
.metric_type(metric_type);
|
||||||
let record_batch_stream = builder.execute();
|
let record_batch_stream = builder.execute();
|
||||||
let results = record_batch_stream
|
let results = record_batch_stream
|
||||||
.and_then(|stream| stream.try_collect::<Vec<_>>().map_err(Error::from))
|
.and_then(|stream| stream.try_collect::<Vec<_>>().map_err(Error::from))
|
||||||
@@ -164,7 +189,9 @@ fn table_create(mut cx: FunctionContext) -> JsResult<JsPromise> {
|
|||||||
let table_rst = database.create_table(table_name, batch_reader).await;
|
let table_rst = database.create_table(table_name, batch_reader).await;
|
||||||
|
|
||||||
deferred.settle_with(&channel, move |mut cx| {
|
deferred.settle_with(&channel, move |mut cx| {
|
||||||
let table = Arc::new(Mutex::new(table_rst.or_else(|err| cx.throw_error(err.to_string()))?));
|
let table = Arc::new(Mutex::new(
|
||||||
|
table_rst.or_else(|err| cx.throw_error(err.to_string()))?,
|
||||||
|
));
|
||||||
Ok(cx.boxed(JsTable { table }))
|
Ok(cx.boxed(JsTable { table }))
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
@@ -178,9 +205,7 @@ fn table_add(mut cx: FunctionContext) -> JsResult<JsPromise> {
|
|||||||
("overwrite", WriteMode::Overwrite),
|
("overwrite", WriteMode::Overwrite),
|
||||||
]);
|
]);
|
||||||
|
|
||||||
let js_table = cx
|
let js_table = cx.this().downcast_or_throw::<JsBox<JsTable>, _>(&mut cx)?;
|
||||||
.this()
|
|
||||||
.downcast_or_throw::<JsBox<JsTable>, _>(&mut cx)?;
|
|
||||||
let buffer = cx.argument::<JsBuffer>(0)?;
|
let buffer = cx.argument::<JsBuffer>(0)?;
|
||||||
let write_mode = cx.argument::<JsString>(1)?.value(&mut cx);
|
let write_mode = cx.argument::<JsString>(1)?.value(&mut cx);
|
||||||
let batches = arrow_buffer_to_record_batch(buffer.as_slice(&mut cx));
|
let batches = arrow_buffer_to_record_batch(buffer.as_slice(&mut cx));
|
||||||
@@ -204,7 +229,6 @@ fn table_add(mut cx: FunctionContext) -> JsResult<JsPromise> {
|
|||||||
Ok(promise)
|
Ok(promise)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#[neon::main]
|
#[neon::main]
|
||||||
fn main(mut cx: ModuleContext) -> NeonResult<()> {
|
fn main(mut cx: ModuleContext) -> NeonResult<()> {
|
||||||
cx.export_function("databaseNew", database_new)?;
|
cx.export_function("databaseNew", database_new)?;
|
||||||
@@ -213,5 +237,9 @@ fn main(mut cx: ModuleContext) -> NeonResult<()> {
|
|||||||
cx.export_function("tableSearch", table_search)?;
|
cx.export_function("tableSearch", table_search)?;
|
||||||
cx.export_function("tableCreate", table_create)?;
|
cx.export_function("tableCreate", table_create)?;
|
||||||
cx.export_function("tableAdd", table_add)?;
|
cx.export_function("tableAdd", table_add)?;
|
||||||
|
cx.export_function(
|
||||||
|
"tableCreateVectorIndex",
|
||||||
|
index::vector::table_create_vector_index,
|
||||||
|
)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -10,9 +10,11 @@ repository = "https://github.com/lancedb/lancedb"
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
arrow-array = "37.0"
|
arrow-array = "37.0"
|
||||||
|
arrow-data = "37.0"
|
||||||
arrow-schema = "37.0"
|
arrow-schema = "37.0"
|
||||||
lance = "0.4.3"
|
lance = "0.4.3"
|
||||||
tokio = { version = "1.23", features = ["rt-multi-thread"] }
|
tokio = { version = "1.23", features = ["rt-multi-thread"] }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
tempfile = "3.5.0"
|
tempfile = "3.5.0"
|
||||||
|
rand = { version = "0.8.3", features = ["small_rng"] }
|
||||||
|
|||||||
15
rust/vectordb/src/index.rs
Normal file
15
rust/vectordb/src/index.rs
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
// Copyright 2023 Lance Developers.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
pub mod vector;
|
||||||
163
rust/vectordb/src/index/vector.rs
Normal file
163
rust/vectordb/src/index/vector.rs
Normal file
@@ -0,0 +1,163 @@
|
|||||||
|
// Copyright 2023 Lance Developers.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use lance::index::vector::ivf::IvfBuildParams;
|
||||||
|
use lance::index::vector::pq::PQBuildParams;
|
||||||
|
use lance::index::vector::{MetricType, VectorIndexParams};
|
||||||
|
|
||||||
|
pub trait VectorIndexBuilder {
|
||||||
|
fn get_column(&self) -> Option<String>;
|
||||||
|
fn get_index_name(&self) -> Option<String>;
|
||||||
|
fn build(&self) -> VectorIndexParams;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct IvfPQIndexBuilder {
|
||||||
|
column: Option<String>,
|
||||||
|
index_name: Option<String>,
|
||||||
|
metric_type: Option<MetricType>,
|
||||||
|
ivf_params: Option<IvfBuildParams>,
|
||||||
|
pq_params: Option<PQBuildParams>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl IvfPQIndexBuilder {
|
||||||
|
pub fn new() -> IvfPQIndexBuilder {
|
||||||
|
IvfPQIndexBuilder {
|
||||||
|
column: None,
|
||||||
|
index_name: None,
|
||||||
|
metric_type: None,
|
||||||
|
ivf_params: None,
|
||||||
|
pq_params: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl IvfPQIndexBuilder {
|
||||||
|
pub fn column(&mut self, column: String) -> &mut IvfPQIndexBuilder {
|
||||||
|
self.column = Some(column);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn index_name(&mut self, index_name: String) -> &mut IvfPQIndexBuilder {
|
||||||
|
self.index_name = Some(index_name);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn metric_type(&mut self, metric_type: MetricType) -> &mut IvfPQIndexBuilder {
|
||||||
|
self.metric_type = Some(metric_type);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn ivf_params(&mut self, ivf_params: IvfBuildParams) -> &mut IvfPQIndexBuilder {
|
||||||
|
self.ivf_params = Some(ivf_params);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn pq_params(&mut self, pq_params: PQBuildParams) -> &mut IvfPQIndexBuilder {
|
||||||
|
self.pq_params = Some(pq_params);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl VectorIndexBuilder for IvfPQIndexBuilder {
|
||||||
|
fn get_column(&self) -> Option<String> {
|
||||||
|
self.column.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_index_name(&self) -> Option<String> {
|
||||||
|
self.index_name.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn build(&self) -> VectorIndexParams {
|
||||||
|
let ivf_params = self.ivf_params.clone().unwrap_or(IvfBuildParams::default());
|
||||||
|
let pq_params = self.pq_params.clone().unwrap_or(PQBuildParams::default());
|
||||||
|
|
||||||
|
VectorIndexParams::with_ivf_pq_params(pq_params.metric_type, ivf_params, pq_params)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use lance::index::vector::ivf::IvfBuildParams;
|
||||||
|
use lance::index::vector::pq::PQBuildParams;
|
||||||
|
use lance::index::vector::{MetricType, StageParams};
|
||||||
|
|
||||||
|
use crate::index::vector::{IvfPQIndexBuilder, VectorIndexBuilder};
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_builder_no_params() {
|
||||||
|
let index_builder = IvfPQIndexBuilder::new();
|
||||||
|
assert!(index_builder.get_column().is_none());
|
||||||
|
assert!(index_builder.get_index_name().is_none());
|
||||||
|
|
||||||
|
let index_params = index_builder.build();
|
||||||
|
assert_eq!(index_params.stages.len(), 2);
|
||||||
|
if let StageParams::Ivf(ivf_params) = index_params.stages.get(0).unwrap() {
|
||||||
|
let default = IvfBuildParams::default();
|
||||||
|
assert_eq!(ivf_params.num_partitions, default.num_partitions);
|
||||||
|
assert_eq!(ivf_params.max_iters, default.max_iters);
|
||||||
|
} else {
|
||||||
|
panic!("Expected first stage to be ivf")
|
||||||
|
}
|
||||||
|
|
||||||
|
if let StageParams::PQ(pq_params) = index_params.stages.get(1).unwrap() {
|
||||||
|
assert_eq!(pq_params.use_opq, false);
|
||||||
|
} else {
|
||||||
|
panic!("Expected second stage to be pq")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_builder_all_params() {
|
||||||
|
let mut index_builder = IvfPQIndexBuilder::new();
|
||||||
|
|
||||||
|
index_builder
|
||||||
|
.column("c".to_owned())
|
||||||
|
.metric_type(MetricType::Cosine)
|
||||||
|
.index_name("index".to_owned());
|
||||||
|
|
||||||
|
assert_eq!(index_builder.column.clone().unwrap(), "c");
|
||||||
|
assert_eq!(index_builder.metric_type.unwrap(), MetricType::Cosine);
|
||||||
|
assert_eq!(index_builder.index_name.clone().unwrap(), "index");
|
||||||
|
|
||||||
|
let ivf_params = IvfBuildParams::new(500);
|
||||||
|
let mut pq_params = PQBuildParams::default();
|
||||||
|
pq_params.use_opq = true;
|
||||||
|
pq_params.max_iters = 1;
|
||||||
|
pq_params.num_bits = 8;
|
||||||
|
pq_params.num_sub_vectors = 50;
|
||||||
|
pq_params.metric_type = MetricType::Cosine;
|
||||||
|
pq_params.max_opq_iters = 2;
|
||||||
|
index_builder.ivf_params(ivf_params);
|
||||||
|
index_builder.pq_params(pq_params);
|
||||||
|
|
||||||
|
let index_params = index_builder.build();
|
||||||
|
assert_eq!(index_params.stages.len(), 2);
|
||||||
|
if let StageParams::Ivf(ivf_params) = index_params.stages.get(0).unwrap() {
|
||||||
|
assert_eq!(ivf_params.num_partitions, 500);
|
||||||
|
} else {
|
||||||
|
assert!(false, "Expected first stage to be ivf")
|
||||||
|
}
|
||||||
|
|
||||||
|
if let StageParams::PQ(pq_params) = index_params.stages.get(1).unwrap() {
|
||||||
|
assert_eq!(pq_params.use_opq, true);
|
||||||
|
assert_eq!(pq_params.max_iters, 1);
|
||||||
|
assert_eq!(pq_params.num_bits, 8);
|
||||||
|
assert_eq!(pq_params.num_sub_vectors, 50);
|
||||||
|
assert_eq!(pq_params.metric_type, MetricType::Cosine);
|
||||||
|
assert_eq!(pq_params.max_opq_iters, 2);
|
||||||
|
} else {
|
||||||
|
assert!(false, "Expected second stage to be pq")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -14,5 +14,6 @@
|
|||||||
|
|
||||||
pub mod database;
|
pub mod database;
|
||||||
pub mod error;
|
pub mod error;
|
||||||
|
pub mod index;
|
||||||
pub mod query;
|
pub mod query;
|
||||||
pub mod table;
|
pub mod table;
|
||||||
|
|||||||
@@ -29,7 +29,7 @@ pub struct Query {
|
|||||||
pub filter: Option<String>,
|
pub filter: Option<String>,
|
||||||
pub nprobes: usize,
|
pub nprobes: usize,
|
||||||
pub refine_factor: Option<u32>,
|
pub refine_factor: Option<u32>,
|
||||||
pub metric_type: MetricType,
|
pub metric_type: Option<MetricType>,
|
||||||
pub use_index: bool,
|
pub use_index: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -51,9 +51,9 @@ impl Query {
|
|||||||
limit: 10,
|
limit: 10,
|
||||||
nprobes: 20,
|
nprobes: 20,
|
||||||
refine_factor: None,
|
refine_factor: None,
|
||||||
metric_type: MetricType::L2,
|
metric_type: None,
|
||||||
use_index: false,
|
use_index: false,
|
||||||
filter: None
|
filter: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -71,10 +71,10 @@ impl Query {
|
|||||||
self.limit,
|
self.limit,
|
||||||
)?;
|
)?;
|
||||||
scanner.nprobs(self.nprobes);
|
scanner.nprobs(self.nprobes);
|
||||||
scanner.distance_metric(self.metric_type);
|
|
||||||
scanner.use_index(self.use_index);
|
scanner.use_index(self.use_index);
|
||||||
self.filter.as_ref().map(|f| scanner.filter(f));
|
self.filter.as_ref().map(|f| scanner.filter(f));
|
||||||
self.refine_factor.map(|rf| scanner.refine(rf));
|
self.refine_factor.map(|rf| scanner.refine(rf));
|
||||||
|
self.metric_type.map(|mt| scanner.distance_metric(mt));
|
||||||
Ok(scanner.try_into_stream().await?)
|
Ok(scanner.try_into_stream().await?)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -123,7 +123,7 @@ impl Query {
|
|||||||
/// # Arguments
|
/// # Arguments
|
||||||
///
|
///
|
||||||
/// * `metric_type` - The distance metric to use. By default [MetricType::L2] is used.
|
/// * `metric_type` - The distance metric to use. By default [MetricType::L2] is used.
|
||||||
pub fn metric_type(mut self, metric_type: MetricType) -> Query {
|
pub fn metric_type(mut self, metric_type: Option<MetricType>) -> Query {
|
||||||
self.metric_type = metric_type;
|
self.metric_type = metric_type;
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
@@ -174,14 +174,14 @@ mod tests {
|
|||||||
.limit(100)
|
.limit(100)
|
||||||
.nprobes(1000)
|
.nprobes(1000)
|
||||||
.use_index(true)
|
.use_index(true)
|
||||||
.metric_type(MetricType::Cosine)
|
.metric_type(Some(MetricType::Cosine))
|
||||||
.refine_factor(Some(999));
|
.refine_factor(Some(999));
|
||||||
|
|
||||||
assert_eq!(query.query_vector, new_vector);
|
assert_eq!(query.query_vector, new_vector);
|
||||||
assert_eq!(query.limit, 100);
|
assert_eq!(query.limit, 100);
|
||||||
assert_eq!(query.nprobes, 1000);
|
assert_eq!(query.nprobes, 1000);
|
||||||
assert_eq!(query.use_index, true);
|
assert_eq!(query.use_index, true);
|
||||||
assert_eq!(query.metric_type, MetricType::Cosine);
|
assert_eq!(query.metric_type, Some(MetricType::Cosine));
|
||||||
assert_eq!(query.refine_factor, Some(999));
|
assert_eq!(query.refine_factor, Some(999));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -17,8 +17,10 @@ use std::sync::Arc;
|
|||||||
|
|
||||||
use arrow_array::{Float32Array, RecordBatchReader};
|
use arrow_array::{Float32Array, RecordBatchReader};
|
||||||
use lance::dataset::{Dataset, WriteMode, WriteParams};
|
use lance::dataset::{Dataset, WriteMode, WriteParams};
|
||||||
|
use lance::index::IndexType;
|
||||||
|
|
||||||
use crate::error::{Error, Result};
|
use crate::error::{Error, Result};
|
||||||
|
use crate::index::vector::VectorIndexBuilder;
|
||||||
use crate::query::Query;
|
use crate::query::Query;
|
||||||
|
|
||||||
pub const VECTOR_COLUMN_NAME: &str = "vector";
|
pub const VECTOR_COLUMN_NAME: &str = "vector";
|
||||||
@@ -80,7 +82,30 @@ impl Table {
|
|||||||
|
|
||||||
let dataset =
|
let dataset =
|
||||||
Arc::new(Dataset::write(&mut batches, path, Some(WriteParams::default())).await?);
|
Arc::new(Dataset::write(&mut batches, path, Some(WriteParams::default())).await?);
|
||||||
Ok(Table { name, path: path.to_string(), dataset })
|
Ok(Table {
|
||||||
|
name,
|
||||||
|
path: path.to_string(),
|
||||||
|
dataset,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn create_idx(&mut self, index_builder: &impl VectorIndexBuilder) -> Result<()> {
|
||||||
|
use lance::index::DatasetIndexExt;
|
||||||
|
|
||||||
|
let dataset = self
|
||||||
|
.dataset
|
||||||
|
.create_index(
|
||||||
|
&[index_builder
|
||||||
|
.get_column()
|
||||||
|
.unwrap_or(VECTOR_COLUMN_NAME.to_string())
|
||||||
|
.as_str()],
|
||||||
|
IndexType::Vector,
|
||||||
|
index_builder.get_index_name(),
|
||||||
|
&index_builder.build(),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
self.dataset = Arc::new(dataset);
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Insert records into this Table
|
/// Insert records into this Table
|
||||||
@@ -95,12 +120,13 @@ impl Table {
|
|||||||
pub async fn add(
|
pub async fn add(
|
||||||
&mut self,
|
&mut self,
|
||||||
mut batches: Box<dyn RecordBatchReader>,
|
mut batches: Box<dyn RecordBatchReader>,
|
||||||
write_mode: Option<WriteMode>
|
write_mode: Option<WriteMode>,
|
||||||
) -> Result<usize> {
|
) -> Result<usize> {
|
||||||
let mut params = WriteParams::default();
|
let mut params = WriteParams::default();
|
||||||
params.mode = write_mode.unwrap_or(WriteMode::Append);
|
params.mode = write_mode.unwrap_or(WriteMode::Append);
|
||||||
|
|
||||||
self.dataset = Arc::new(Dataset::write(&mut batches, self.path.as_str(), Some(params)).await?);
|
self.dataset =
|
||||||
|
Arc::new(Dataset::write(&mut batches, self.path.as_str(), Some(params)).await?);
|
||||||
Ok(batches.count())
|
Ok(batches.count())
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -125,13 +151,21 @@ impl Table {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use arrow_array::{Float32Array, Int32Array, RecordBatch, RecordBatchReader};
|
use arrow_array::{
|
||||||
|
Array, FixedSizeListArray, Float32Array, Int32Array, RecordBatch, RecordBatchReader,
|
||||||
|
};
|
||||||
|
use arrow_data::ArrayDataBuilder;
|
||||||
use arrow_schema::{DataType, Field, Schema};
|
use arrow_schema::{DataType, Field, Schema};
|
||||||
use lance::arrow::RecordBatchBuffer;
|
use lance::arrow::RecordBatchBuffer;
|
||||||
use lance::dataset::{Dataset, WriteMode};
|
use lance::dataset::{Dataset, WriteMode};
|
||||||
|
use lance::index::vector::ivf::IvfBuildParams;
|
||||||
|
use lance::index::vector::pq::PQBuildParams;
|
||||||
|
use rand::Rng;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use tempfile::tempdir;
|
use tempfile::tempdir;
|
||||||
|
|
||||||
|
use crate::error::Result;
|
||||||
|
use crate::index::vector::IvfPQIndexBuilder;
|
||||||
use crate::table::Table;
|
use crate::table::Table;
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
@@ -171,14 +205,17 @@ mod tests {
|
|||||||
|
|
||||||
let batches: Box<dyn RecordBatchReader> = Box::new(make_test_batches());
|
let batches: Box<dyn RecordBatchReader> = Box::new(make_test_batches());
|
||||||
let schema = batches.schema().clone();
|
let schema = batches.schema().clone();
|
||||||
let mut table = Table::create(Arc::new(path_buf), "test".to_string(), batches).await.unwrap();
|
let mut table = Table::create(Arc::new(path_buf), "test".to_string(), batches)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
assert_eq!(table.count_rows().await.unwrap(), 10);
|
assert_eq!(table.count_rows().await.unwrap(), 10);
|
||||||
|
|
||||||
let new_batches: Box<dyn RecordBatchReader> = Box::new(RecordBatchBuffer::new(vec![RecordBatch::try_new(
|
let new_batches: Box<dyn RecordBatchReader> =
|
||||||
schema,
|
Box::new(RecordBatchBuffer::new(vec![RecordBatch::try_new(
|
||||||
vec![Arc::new(Int32Array::from_iter_values(100..110))],
|
schema,
|
||||||
)
|
vec![Arc::new(Int32Array::from_iter_values(100..110))],
|
||||||
.unwrap()]));
|
)
|
||||||
|
.unwrap()]));
|
||||||
|
|
||||||
table.add(new_batches, None).await.unwrap();
|
table.add(new_batches, None).await.unwrap();
|
||||||
assert_eq!(table.count_rows().await.unwrap(), 20);
|
assert_eq!(table.count_rows().await.unwrap(), 20);
|
||||||
@@ -192,15 +229,22 @@ mod tests {
|
|||||||
|
|
||||||
let batches: Box<dyn RecordBatchReader> = Box::new(make_test_batches());
|
let batches: Box<dyn RecordBatchReader> = Box::new(make_test_batches());
|
||||||
let schema = batches.schema().clone();
|
let schema = batches.schema().clone();
|
||||||
let mut table = Table::create(Arc::new(path_buf), "test".to_string(), batches).await.unwrap();
|
let mut table = Table::create(Arc::new(path_buf), "test".to_string(), batches)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
assert_eq!(table.count_rows().await.unwrap(), 10);
|
assert_eq!(table.count_rows().await.unwrap(), 10);
|
||||||
|
|
||||||
let new_batches: Box<dyn RecordBatchReader> = Box::new(RecordBatchBuffer::new(vec![RecordBatch::try_new(
|
let new_batches: Box<dyn RecordBatchReader> =
|
||||||
schema,
|
Box::new(RecordBatchBuffer::new(vec![RecordBatch::try_new(
|
||||||
vec![Arc::new(Int32Array::from_iter_values(100..110))],
|
schema,
|
||||||
).unwrap()]));
|
vec![Arc::new(Int32Array::from_iter_values(100..110))],
|
||||||
|
)
|
||||||
|
.unwrap()]));
|
||||||
|
|
||||||
table.add(new_batches, Some(WriteMode::Overwrite)).await.unwrap();
|
table
|
||||||
|
.add(new_batches, Some(WriteMode::Overwrite))
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
assert_eq!(table.count_rows().await.unwrap(), 10);
|
assert_eq!(table.count_rows().await.unwrap(), 10);
|
||||||
assert_eq!(table.name, "test");
|
assert_eq!(table.name, "test");
|
||||||
}
|
}
|
||||||
@@ -236,4 +280,74 @@ mod tests {
|
|||||||
)
|
)
|
||||||
.unwrap()])
|
.unwrap()])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_create_index() {
|
||||||
|
use arrow_array::RecordBatch;
|
||||||
|
use arrow_schema::{DataType, Field, Schema as ArrowSchema};
|
||||||
|
use rand;
|
||||||
|
use std::iter::repeat_with;
|
||||||
|
|
||||||
|
use arrow_array::Float32Array;
|
||||||
|
|
||||||
|
let tmp_dir = tempdir().unwrap();
|
||||||
|
let path_buf = tmp_dir.into_path();
|
||||||
|
|
||||||
|
let dimension = 16;
|
||||||
|
let schema = Arc::new(ArrowSchema::new(vec![Field::new(
|
||||||
|
"embeddings",
|
||||||
|
DataType::FixedSizeList(
|
||||||
|
Arc::new(Field::new("item", DataType::Float32, true)),
|
||||||
|
dimension,
|
||||||
|
),
|
||||||
|
false,
|
||||||
|
)]));
|
||||||
|
|
||||||
|
let mut rng = rand::thread_rng();
|
||||||
|
let float_arr = Float32Array::from(
|
||||||
|
repeat_with(|| rng.gen::<f32>())
|
||||||
|
.take(512 * dimension as usize)
|
||||||
|
.collect::<Vec<f32>>(),
|
||||||
|
);
|
||||||
|
|
||||||
|
let vectors = Arc::new(create_fixed_size_list(float_arr, dimension).unwrap());
|
||||||
|
let batches = RecordBatchBuffer::new(vec![RecordBatch::try_new(
|
||||||
|
schema.clone(),
|
||||||
|
vec![vectors.clone()],
|
||||||
|
)
|
||||||
|
.unwrap()]);
|
||||||
|
|
||||||
|
let reader: Box<dyn RecordBatchReader + Send> = Box::new(batches);
|
||||||
|
let mut table = Table::create(Arc::new(path_buf), "test".to_string(), reader)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let mut i = IvfPQIndexBuilder::new();
|
||||||
|
|
||||||
|
let index_builder = i
|
||||||
|
.column("embeddings".to_string())
|
||||||
|
.index_name("my_index".to_string())
|
||||||
|
.ivf_params(IvfBuildParams::new(256))
|
||||||
|
.pq_params(PQBuildParams::default());
|
||||||
|
|
||||||
|
table.create_idx(index_builder).await.unwrap();
|
||||||
|
|
||||||
|
assert_eq!(table.dataset.load_indices().await.unwrap().len(), 1);
|
||||||
|
assert_eq!(table.count_rows().await.unwrap(), 512);
|
||||||
|
assert_eq!(table.name, "test");
|
||||||
|
}
|
||||||
|
|
||||||
|
fn create_fixed_size_list<T: Array>(values: T, list_size: i32) -> Result<FixedSizeListArray> {
|
||||||
|
let list_type = DataType::FixedSizeList(
|
||||||
|
Arc::new(Field::new("item", values.data_type().clone(), true)),
|
||||||
|
list_size,
|
||||||
|
);
|
||||||
|
let data = ArrayDataBuilder::new(list_type)
|
||||||
|
.len(values.len() / list_size as usize)
|
||||||
|
.add_child_data(values.into_data())
|
||||||
|
.build()
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
Ok(FixedSizeListArray::from(data))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user