Compare commits

...

31 Commits

Author SHA1 Message Date
Lance Release
cc7e54298b Bump version: 0.1.7 → 0.1.8 2023-06-17 00:33:53 +00:00
Rob Meng
d1e8a97a2a isort entire repo (#200) 2023-06-15 20:12:10 -04:00
Lance Release
01dadb0862 Bump version: 0.1.6 → 0.1.7 2023-06-15 23:30:01 +00:00
gsilvestrin
0724d41c4b feat(node): pull node binaries into separate packages (2) (#197)
* Refactors the Node module to load the shared library from a separate
package. When a user does `npm install vectordb`, the correct optional
dependency is automatically downloaded by npm.
* Add scripts and instructions to build Linux and MacOS node artifacts
locally.
* Add instructions for publishing the npm module and crates.

Co-authored-by: Will Jones <willjones127@gmail.com>
2023-06-15 16:15:42 -07:00
Rob Meng
cbb56e25ab port remote connection client into lancedb (#194)
* to_df() is now async, added `to_df_blocking` to convenience
* add remote lancedb client to public lancedb
* make lancedb connection class understand url scheme
`lancedb+<connection_type>://<host>:<port>`.
2023-06-15 18:57:52 -04:00
gsilvestrin
78de8f5782 feat(node): add Table.countRows() (#185) 2023-06-15 14:35:54 -07:00
Lance Release
a6544c2a31 Bump version: 0.1.5 → 0.1.6 2023-06-15 16:16:03 +00:00
Leon Yee
39ed70896a [rust] added rust.yml for /rust directory (#193) 2023-06-14 11:46:08 -07:00
gsilvestrin
ae672df1b7 feat(rust): add action to publish release to crates.io (#192) 2023-06-14 11:01:22 -07:00
gsilvestrin
15c3f42387 feat(node): add action to tag node / rust releases (#186) 2023-06-14 11:01:02 -07:00
gsilvestrin
f65d85efcc feat(node): add where method to query builder (#183)
Closes #181
2023-06-14 10:54:43 -07:00
Utkarsh Gautam
6b5c046c3b [Python] Updated to_df implementation in Contextualizer class (#174)
Changes include:
- Contexts of sizes less than window param to be included as well
- Added optional threshold parameter to to_df in Contextualizer 
This should close #165 
- If maintainers are satisfied with the implementation will add more
examples and test cases and update the documentations as well.

---------

Co-authored-by: Nithin PS <47279496+Nithinps021@users.noreply.github.com>
Co-authored-by: Will Jones <willjones127@gmail.com>
2023-06-14 09:22:32 -07:00
Lei Xu
d00f4e51d0 Fix node ffi build (#191) 2023-06-13 19:31:29 -07:00
Benjamin Manns
fbc44d4243 Fix small typo in ann_indexes.md (#190) 2023-06-13 17:43:18 -07:00
Lei Xu
b53eee42ce Upgrade to lance 0.4.21 (#187) 2023-06-13 15:39:44 -07:00
Utkarsh Gautam
7e0d6088ca [docs] Fixed langchain example broken link in index.md (#184) 2023-06-13 12:40:39 -07:00
Lance Release
5210f40a33 [python] Bump version: 0.1.7 → 0.1.8 2023-06-12 22:06:59 +00:00
gsilvestrin
5ec4a5d730 feat(python): add action to build and publish wheel (#179) 2023-06-12 14:54:54 -07:00
gsilvestrin
e4f64fca7b Bump pylance 0.4.17 -> 0.4.20 (#173) 2023-06-12 14:54:20 -07:00
Lance Release
4744640bd2 [python] Bump version: 0.1.6 → 0.1.7 2023-06-12 21:39:16 +00:00
gsilvestrin
094b5e643c bugfix(python) Make release action has invalid name (#180) 2023-06-12 14:24:15 -07:00
gsilvestrin
a318778d2a feat(python): add action to tag python releases (#172) 2023-06-12 13:59:08 -07:00
Tevin Wang
9b83ce3d2a add black to python CI (#178)
Closes #48
2023-06-12 11:22:34 -07:00
Nithin PS
7bad676f30 [Python] FIx Contextualizer validation to arguments (#168)
Closes #164

---------

Co-authored-by: Will Jones <willjones127@gmail.com>
2023-06-12 09:20:09 -07:00
gsilvestrin
0e981e782b [nodejs] bumping version to 0.1.5 (#171) 2023-06-09 12:33:17 -07:00
Utkarsh Gautam
e18cdfc7cf [docs] Fixed Minor typo in embedding.md (#167)
Added missing tab to python snippet
2023-06-08 22:01:51 -07:00
Will Jones
fed33a51d5 wip: make the python API reference a bit nicer (#162)
Adds:

* Make `mkdocstrings` aware we are using numpy-style docstrings
* Fixes broken link on `index.md` to Python API docs (and added link to
node ones)
* Added examples to various classes.
* Added doctest to verify examples work.
2023-06-08 16:07:06 -07:00
Jai
a56b65db84 rename examples for slugs (#159) 2023-06-07 16:44:54 -07:00
gsilvestrin
f21caebeda Update links in README.md (#161)
Current one 404s
2023-06-07 13:16:00 -07:00
gsilvestrin
12da77a9f7 [doc] removed index creation from quickstart (#160) 2023-06-07 09:29:38 -07:00
gsilvestrin
131b2dc57b [nodejs] Added completed youtube transcript example / docs (#156) 2023-06-06 16:26:21 -07:00
71 changed files with 2112 additions and 262 deletions

12
.bumpversion.cfg Normal file
View File

@@ -0,0 +1,12 @@
[bumpversion]
current_version = 0.1.8
commit = True
message = Bump version: {current_version} → {new_version}
tag = True
tag_name = v{new_version}
[bumpversion:file:node/package.json]
[bumpversion:file:rust/ffi/node/Cargo.toml]
[bumpversion:file:rust/vectordb/Cargo.toml]

29
.github/workflows/cargo-publish.yml vendored Normal file
View File

@@ -0,0 +1,29 @@
name: Cargo Publish
on:
release:
types: [ published ]
env:
# This env var is used by Swatinem/rust-cache@v2 for the cache
# key, so we set it to make sure it is always consistent.
CARGO_TERM_COLOR: always
jobs:
build:
runs-on: ubuntu-22.04
timeout-minutes: 30
# Only runs on tags that matches the make-release action
if: startsWith(github.ref, 'refs/tags/v')
steps:
- uses: actions/checkout@v3
- uses: Swatinem/rust-cache@v2
with:
workspaces: rust
- name: Install dependencies
run: |
sudo apt update
sudo apt install -y protobuf-compiler libssl-dev
- name: Publish the package
run: |
cargo publish -p vectordb --all-features --token ${{ secrets.CARGO_REGISTRY_TOKEN }}

View File

@@ -0,0 +1,55 @@
name: Create release commit
on:
workflow_dispatch:
inputs:
dry_run:
description: 'Dry run (create the local commit/tags but do not push it)'
required: true
default: "false"
type: choice
options:
- "true"
- "false"
part:
description: 'What kind of release is this?'
required: true
default: 'patch'
type: choice
options:
- patch
- minor
- major
jobs:
bump-version:
runs-on: ubuntu-latest
steps:
- name: Check out main
uses: actions/checkout@v3
with:
ref: main
persist-credentials: false
fetch-depth: 0
lfs: true
- name: Set git configs for bumpversion
shell: bash
run: |
git config user.name 'Lance Release'
git config user.email 'lance-dev@lancedb.com'
- name: Set up Python 3.10
uses: actions/setup-python@v4
with:
python-version: "3.10"
- name: Bump version, create tag and commit
run: |
pip install bump2version
bumpversion --verbose ${{ inputs.part }}
- name: Push new version and tag
if: ${{ inputs.dry_run }} == "false"
uses: ad-m/github-push-action@master
with:
github_token: ${{ secrets.LANCEDB_RELEASE_TOKEN }}
branch: main
tags: true

View File

@@ -67,8 +67,12 @@ jobs:
- name: Build - name: Build
run: | run: |
npm ci npm ci
npm run build
npm run tsc npm run tsc
npm run build
npm run pack-build
npm install --no-save ./dist/vectordb-*.tgz
# Remove index.node to test with dependency installed
rm index.node
- name: Test - name: Test
run: npm run test run: npm run test
macos: macos:
@@ -94,8 +98,12 @@ jobs:
- name: Build - name: Build
run: | run: |
npm ci npm ci
npm run build
npm run tsc npm run tsc
npm run build
npm run pack-build
npm install --no-save ./dist/vectordb-*.tgz
# Remove index.node to test with dependency installed
rm index.node
- name: Test - name: Test
run: | run: |
npm run test npm run test

137
.github/workflows/npm-publish.yml vendored Normal file
View File

@@ -0,0 +1,137 @@
name: NPM Publish
on:
release:
types: [ published ]
jobs:
node:
runs-on: ubuntu-latest
# Only runs on tags that matches the make-release action
if: startsWith(github.ref, 'refs/tags/v')
defaults:
run:
shell: bash
working-directory: node
steps:
- name: Checkout
uses: actions/checkout@v3
- uses: actions/setup-node@v3
with:
node-version: 20
cache: 'npm'
cache-dependency-path: node/package-lock.json
- name: Install dependencies
run: |
sudo apt update
sudo apt install -y protobuf-compiler libssl-dev
- name: Build
run: |
npm ci
npm run tsc
npm pack
- name: Upload Linux Artifacts
uses: actions/upload-artifact@v3
with:
name: node-package
path: |
node/vectordb-*.tgz
node-macos:
runs-on: macos-12
# Only runs on tags that matches the make-release action
if: startsWith(github.ref, 'refs/tags/v')
strategy:
fail-fast: false
matrix:
target: [x86_64-apple-darwin, aarch64-apple-darwin]
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Install system dependencies
run: brew install protobuf
- name: Install npm dependencies
run: |
cd node
npm ci
- name: Install rustup target
if: ${{ matrix.target == 'aarch64-apple-darwin' }}
run: rustup target add aarch64-apple-darwin
- name: Build MacOS native node modules
run: bash ci/build_macos_artifacts.sh ${{ matrix.target }}
- name: Upload Darwin Artifacts
uses: actions/upload-artifact@v3
with:
name: darwin-native
path: |
node/dist/vectordb-darwin*.tgz
node-linux:
name: node-linux (${{ matrix.arch}}-unknown-linux-${{ matrix.libc }})
runs-on: ubuntu-latest
# Only runs on tags that matches the make-release action
if: startsWith(github.ref, 'refs/tags/v')
strategy:
fail-fast: false
matrix:
libc:
- gnu
# TODO: re-enable musl once we have refactored to pre-built containers
# Right now we have to build node from source which is too expensive.
# - musl
arch:
- x86_64
# Building on aarch64 is too slow for now
# - aarch64
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Change owner to root (for npm)
# The docker container is run as root, so we need the files to be owned by root
# Otherwise npm is a nightmare: https://github.com/npm/cli/issues/3773
run: sudo chown -R root:root .
- name: Set up QEMU
if: ${{ matrix.arch == 'aarch64' }}
uses: docker/setup-qemu-action@v2
with:
platforms: arm64
- name: Build Linux GNU native node modules
if: ${{ matrix.libc == 'gnu' }}
run: |
docker run \
-v $(pwd):/io -w /io \
quay.io/pypa/manylinux2014_${{ matrix.arch }} \
bash ci/build_linux_artifacts.sh ${{ matrix.arch }}-unknown-linux-gnu
- name: Build musl Linux native node modules
if: ${{ matrix.libc == 'musl' }}
run: |
docker run --platform linux/arm64/v8 \
-v $(pwd):/io -w /io \
quay.io/pypa/musllinux_1_1_${{ matrix.arch }} \
bash ci/build_linux_artifacts.sh ${{ matrix.arch }}-unknown-linux-musl
- name: Upload Linux Artifacts
uses: actions/upload-artifact@v3
with:
name: linux-native
path: |
node/dist/vectordb-linux*.tgz
release:
needs: [node, node-macos, node-linux]
runs-on: ubuntu-latest
# Only runs on tags that matches the make-release action
if: startsWith(github.ref, 'refs/tags/v')
steps:
- uses: actions/download-artifact@v3
- name: Display structure of downloaded files
run: ls -R
- uses: actions/setup-node@v3
with:
node-version: 20
- name: Publish to NPM
env:
NODE_AUTH_TOKEN: ${{ secrets.LANCEDB_NPM_REGISTRY_TOKEN }}
run: |
for filename in */*.tgz; do
npm publish $filename
done

31
.github/workflows/pypi-publish.yml vendored Normal file
View File

@@ -0,0 +1,31 @@
name: PyPI Publish
on:
release:
types: [ published ]
jobs:
publish:
runs-on: ubuntu-latest
# Only runs on tags that matches the python-make-release action
if: startsWith(github.ref, 'refs/tags/python-v')
defaults:
run:
shell: bash
working-directory: python
steps:
- uses: actions/checkout@v3
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: "3.8"
- name: Build distribution
run: |
ls -la
pip install wheel setuptools --upgrade
python setup.py sdist bdist_wheel
- name: Publish
uses: pypa/gh-action-pypi-publish@v1.8.5
with:
password: ${{ secrets.LANCEDB_PYPI_API_TOKEN }}
packages-dir: python/dist

View File

@@ -0,0 +1,56 @@
name: Python - Create release commit
on:
workflow_dispatch:
inputs:
dry_run:
description: 'Dry run (create the local commit/tags but do not push it)'
required: true
default: "false"
type: choice
options:
- "true"
- "false"
part:
description: 'What kind of release is this?'
required: true
default: 'patch'
type: choice
options:
- patch
- minor
- major
jobs:
bump-version:
runs-on: ubuntu-latest
steps:
- name: Check out main
uses: actions/checkout@v3
with:
ref: main
persist-credentials: false
fetch-depth: 0
lfs: true
- name: Set git configs for bumpversion
shell: bash
run: |
git config user.name 'Lance Release'
git config user.email 'lance-dev@lancedb.com'
- name: Set up Python 3.10
uses: actions/setup-python@v4
with:
python-version: "3.10"
- name: Bump version, create tag and commit
working-directory: python
run: |
pip install bump2version
bumpversion --verbose ${{ inputs.part }}
- name: Push new version and tag
if: ${{ inputs.dry_run }} == "false"
uses: ad-m/github-push-action@master
with:
github_token: ${{ secrets.LANCEDB_RELEASE_TOKEN }}
branch: main
tags: true

View File

@@ -32,9 +32,15 @@ jobs:
run: | run: |
pip install -e . pip install -e .
pip install tantivy@git+https://github.com/quickwit-oss/tantivy-py#164adc87e1a033117001cf70e38c82a53014d985 pip install tantivy@git+https://github.com/quickwit-oss/tantivy-py#164adc87e1a033117001cf70e38c82a53014d985
pip install pytest pytest-mock pip install pytest pytest-mock black isort
- name: Black
run: black --check --diff --no-color --quiet .
- name: isort
run: isort --check --diff --quiet .
- name: Run tests - name: Run tests
run: pytest -x -v --durations=30 tests run: pytest -x -v --durations=30 tests
- name: doctest
run: pytest --doctest-modules lancedb
mac: mac:
timeout-minutes: 30 timeout-minutes: 30
runs-on: "macos-12" runs-on: "macos-12"

67
.github/workflows/rust.yml vendored Normal file
View File

@@ -0,0 +1,67 @@
name: Rust
on:
push:
branches:
- main
pull_request:
paths:
- rust/**
- .github/workflows/rust.yml
env:
# This env var is used by Swatinem/rust-cache@v2 for the cache
# key, so we set it to make sure it is always consistent.
CARGO_TERM_COLOR: always
# Disable full debug symbol generation to speed up CI build and keep memory down
# "1" means line tables only, which is useful for panic tracebacks.
RUSTFLAGS: "-C debuginfo=1"
RUST_BACKTRACE: "1"
jobs:
linux:
timeout-minutes: 30
runs-on: ubuntu-22.04
defaults:
run:
shell: bash
working-directory: rust
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
lfs: true
- uses: Swatinem/rust-cache@v2
with:
workspaces: rust
- name: Install dependencies
run: |
sudo apt update
sudo apt install -y protobuf-compiler libssl-dev
- name: Build
run: cargo build --all-features
- name: Run tests
run: cargo test --all-features
macos:
runs-on: macos-12
timeout-minutes: 30
defaults:
run:
shell: bash
working-directory: rust
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
lfs: true
- name: CPU features
run: sysctl -a | grep cpu
- uses: Swatinem/rust-cache@v2
with:
workspaces: rust
- name: Install dependencies
run: brew install protobuf
- name: Build
run: cargo build --all-features
- name: Run tests
run: cargo test --all-features

2
.gitignore vendored
View File

@@ -4,6 +4,8 @@
**/__pycache__ **/__pycache__
.DS_Store .DS_Store
.vscode
rust/target rust/target
rust/Cargo.lock rust/Cargo.lock

36
Cargo.lock generated
View File

@@ -190,6 +190,7 @@ dependencies = [
"arrow-data", "arrow-data",
"arrow-schema", "arrow-schema",
"flatbuffers", "flatbuffers",
"zstd",
] ]
[[package]] [[package]]
@@ -654,6 +655,12 @@ version = "3.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b1ce199063694f33ffb7dd4e0ee620741495c32833cde5aa08f02a0bf96f0c8" checksum = "9b1ce199063694f33ffb7dd4e0ee620741495c32833cde5aa08f02a0bf96f0c8"
[[package]]
name = "bytemuck"
version = "1.13.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "17febce684fd15d89027105661fec94afb475cb995fbc59d2865198446ba2eea"
[[package]] [[package]]
name = "byteorder" name = "byteorder"
version = "1.4.3" version = "1.4.3"
@@ -1646,9 +1653,9 @@ dependencies = [
[[package]] [[package]]
name = "lance" name = "lance"
version = "0.4.17" version = "0.4.21"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "86dda8185bd1ffae7b910c1f68035af23be9b717c52e9cc4de176cd30b47f772" checksum = "3d6c2e7bcfc71c7167ec70cd06c6d55c644a148f6580218c5a0b66e13ac5b5cc"
dependencies = [ dependencies = [
"accelerate-src", "accelerate-src",
"arrow", "arrow",
@@ -1657,7 +1664,9 @@ dependencies = [
"arrow-buffer", "arrow-buffer",
"arrow-cast", "arrow-cast",
"arrow-data", "arrow-data",
"arrow-ipc",
"arrow-ord", "arrow-ord",
"arrow-row",
"arrow-schema", "arrow-schema",
"arrow-select", "arrow-select",
"async-recursion", "async-recursion",
@@ -1668,6 +1677,7 @@ dependencies = [
"bytes", "bytes",
"cblas", "cblas",
"chrono", "chrono",
"dashmap",
"datafusion", "datafusion",
"futures", "futures",
"lapack", "lapack",
@@ -1684,6 +1694,7 @@ dependencies = [
"prost-types", "prost-types",
"rand", "rand",
"reqwest", "reqwest",
"roaring",
"shellexpand", "shellexpand",
"snafu", "snafu",
"sqlparser-lance", "sqlparser-lance",
@@ -2598,6 +2609,12 @@ dependencies = [
"winreg", "winreg",
] ]
[[package]]
name = "retain_mut"
version = "0.1.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8c31b5c4033f8fdde8700e4657be2c497e7288f01515be52168c631e2e4d4086"
[[package]] [[package]]
name = "ring" name = "ring"
version = "0.16.20" version = "0.16.20"
@@ -2613,6 +2630,17 @@ dependencies = [
"winapi", "winapi",
] ]
[[package]]
name = "roaring"
version = "0.10.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ef0fb5e826a8bde011ecae6a8539dd333884335c57ff0f003fbe27c25bbe8f71"
dependencies = [
"bytemuck",
"byteorder",
"retain_mut",
]
[[package]] [[package]]
name = "rustc_version" name = "rustc_version"
version = "0.4.0" version = "0.4.0"
@@ -3358,7 +3386,7 @@ checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426"
[[package]] [[package]]
name = "vectordb" name = "vectordb"
version = "0.0.1" version = "0.1.6"
dependencies = [ dependencies = [
"arrow-array", "arrow-array",
"arrow-data", "arrow-data",
@@ -3373,7 +3401,7 @@ dependencies = [
[[package]] [[package]]
name = "vectordb-node" name = "vectordb-node"
version = "0.1.0" version = "0.1.6"
dependencies = [ dependencies = [
"arrow-array", "arrow-array",
"arrow-ipc", "arrow-ipc",

View File

@@ -75,4 +75,4 @@ result = table.search([100, 100]).limit(2).to_df()
## Blogs, Tutorials & Videos ## Blogs, Tutorials & Videos
* 📈 <a href="https://blog.eto.ai/benchmarking-random-access-in-lance-ed690757a826">2000x better performance with Lance over Parquet</a> * 📈 <a href="https://blog.eto.ai/benchmarking-random-access-in-lance-ed690757a826">2000x better performance with Lance over Parquet</a>
* 🤖 <a href="https://github.com/lancedb/lancedb/blob/main/notebooks/youtube_transcript_search.ipynb">Build a question and answer bot with LanceDB</a> * 🤖 <a href="https://github.com/lancedb/lancedb/blob/main/docs/src/notebooks/youtube_transcript_search.ipynb">Build a question and answer bot with LanceDB</a>

View File

@@ -0,0 +1,91 @@
#!/bin/bash
# Builds the Linux artifacts (node binaries).
# Usage: ./build_linux_artifacts.sh [target]
# Targets supported:
# - x86_64-unknown-linux-gnu:centos
# - aarch64-unknown-linux-gnu:centos
# - aarch64-unknown-linux-musl
# - x86_64-unknown-linux-musl
# TODO: refactor this into a Docker container we can pull
set -e
setup_dependencies() {
echo "Installing system dependencies..."
if [[ $1 == *musl ]]; then
# musllinux
apk add openssl-dev
else
# manylinux2014
yum install -y openssl-devel unzip
fi
if [[ $1 == x86_64* ]]; then
ARCH=x86_64
else
# gnu target
ARCH=aarch_64
fi
# Install new enough protobuf (yum-provided is old)
PB_REL=https://github.com/protocolbuffers/protobuf/releases
PB_VERSION=23.1
curl -LO $PB_REL/download/v$PB_VERSION/protoc-$PB_VERSION-linux-$ARCH.zip
unzip protoc-$PB_VERSION-linux-$ARCH.zip -d /usr/local
}
install_node() {
echo "Installing node..."
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.34.0/install.sh | bash
source "$HOME"/.bashrc
if [[ $1 == *musl ]]; then
# This node version is 15, we need 16 or higher:
# apk add nodejs-current npm
# So instead we install from source (nvm doesn't provide binaries for musl):
nvm install -s --no-progress 17
else
nvm install --no-progress 17 # latest that supports glibc 2.17
fi
}
install_rust() {
echo "Installing rust..."
curl https://sh.rustup.rs -sSf | bash -s -- -y
export PATH="$PATH:/root/.cargo/bin"
}
build_node_binary() {
echo "Building node library for $1..."
pushd node
npm ci
if [[ $1 == *musl ]]; then
# This is needed for cargo to allow build cdylibs with musl
export RUSTFLAGS="-C target-feature=-crt-static"
fi
# Cargo can run out of memory while pulling dependencies, espcially when running
# in QEMU. This is a workaround for that.
export CARGO_NET_GIT_FETCH_WITH_CLI=true
# We don't pass in target, since the native target here already matches
# and openblas-src doesn't do well with cross-compilation.
npm run build-release
npm run pack-build
popd
}
TARGET=${1:-x86_64-unknown-linux-gnu}
# Others:
# aarch64-unknown-linux-gnu
# x86_64-unknown-linux-musl
# aarch64-unknown-linux-musl
setup_dependencies $TARGET
install_node $TARGET
install_rust
build_node_binary $TARGET

View File

@@ -0,0 +1,33 @@
# Builds the macOS artifacts (node binaries).
# Usage: ./ci/build_macos_artifacts.sh [target]
# Targets supported: x86_64-apple-darwin aarch64-apple-darwin
prebuild_rust() {
# Building here for the sake of easier debugging.
pushd rust/ffi/node
echo "Building rust library for $1"
export RUST_BACKTRACE=1
cargo build --release --target $1
popd
}
build_node_binaries() {
pushd node
echo "Building node library for $1"
npm run build-release -- --target $1
npm run pack-build -- --target $1
popd
}
if [ -n "$1" ]; then
targets=$1
else
targets="x86_64-apple-darwin aarch64-apple-darwin"
fi
echo "Building artifacts for targets: $targets"
for target in $targets
do
prebuild_rust $target
build_node_binaries $target
done

View File

@@ -14,10 +14,24 @@ theme:
plugins: plugins:
- search - search
- autorefs
- mkdocstrings: - mkdocstrings:
handlers: handlers:
python: python:
paths: [../python] paths: [../python]
selection:
docstring_style: numpy
rendering:
heading_level: 4
show_source: false
show_symbol_type_in_heading: true
show_signature_annotations: true
show_root_heading: true
members_order: source
import:
# for cross references
- https://arrow.apache.org/docs/objects.inv
- https://pandas.pydata.org/docs/objects.inv
- mkdocs-jupyter - mkdocs-jupyter
markdown_extensions: markdown_extensions:
@@ -41,9 +55,13 @@ nav:
- Python full-text search: fts.md - Python full-text search: fts.md
- Python integrations: integrations.md - Python integrations: integrations.md
- Python examples: - Python examples:
- YouTube Transcript Search using OpenAI: notebooks/youtube_transcript_search.ipynb - YouTube Transcript Search: notebooks/youtube_transcript_search.ipynb
- Documentation QA Bot using LangChain: notebooks/code_qa_bot.ipynb - Documentation QA Bot using LangChain: notebooks/code_qa_bot.ipynb
- Multimodal search using CLIP: notebooks/multimodal_search.ipynb - Multimodal search using CLIP: notebooks/multimodal_search.ipynb
- Serverless QA Bot with S3 and Lambda: examples/serverless_lancedb_with_s3_and_lambda.md
- Serverless QA Bot with Modal: examples/serverless_qa_bot_with_modal_and_langchain.md
- Javascript examples:
- YouTube Transcript Search: examples/youtube_transcript_bot_with_nodejs.md
- References: - References:
- Vector Search: search.md - Vector Search: search.md
- Indexing: ann_indexes.md - Indexing: ann_indexes.md

View File

@@ -67,7 +67,7 @@ There are a couple of parameters that can be used to fine-tune the search:
e.g., for 1M vectors divided up into 256 partitions, nprobes should be set to ~20-40.<br/> e.g., for 1M vectors divided up into 256 partitions, nprobes should be set to ~20-40.<br/>
Note: nprobes is only applicable if an ANN index is present. If specified on a table without an ANN index, it is ignored. Note: nprobes is only applicable if an ANN index is present. If specified on a table without an ANN index, it is ignored.
- **refine_factor** (default: None): Refine the results by reading extra elements and re-ranking them in memory.<br/> - **refine_factor** (default: None): Refine the results by reading extra elements and re-ranking them in memory.<br/>
A higher number makes search more accurate but also slower. If you find the recall is less than idea, try refine_factor=10 to start.<br/> A higher number makes search more accurate but also slower. If you find the recall is less than ideal, try refine_factor=10 to start.<br/>
e.g., for 1M vectors divided into 256 partitions, if you're looking for top 20, then refine_factor=200 reranks the whole partition.<br/> e.g., for 1M vectors divided into 256 partitions, if you're looking for top 20, then refine_factor=200 reranks the whole partition.<br/>
Note: refine_factor is only applicable if an ANN index is present. If specified on a table without an ANN index, it is ignored. Note: refine_factor is only applicable if an ANN index is present. If specified on a table without an ANN index, it is ignored.

View File

@@ -45,7 +45,7 @@ You can also use an external API like OpenAI to generate embeddings
assert len(openai.Model.list()["data"]) > 0 assert len(openai.Model.list()["data"]) > 0
def embed_func(c): def embed_func(c):
rs = openai.Embedding.create(input=c, engine="text-embedding-ada-002") rs = openai.Embedding.create(input=c, engine="text-embedding-ada-002")
return [record["embedding"] for record in rs["data"]] return [record["embedding"] for record in rs["data"]]
``` ```

View File

@@ -1,27 +1,22 @@
import sys
from modal import Secret, Stub, Image, web_endpoint
import lancedb
import re
import pickle import pickle
import requests import re
import sys
import zipfile import zipfile
from pathlib import Path from pathlib import Path
import requests
from langchain.chains import RetrievalQA
from langchain.document_loaders import UnstructuredHTMLLoader from langchain.document_loaders import UnstructuredHTMLLoader
from langchain.embeddings import OpenAIEmbeddings from langchain.embeddings import OpenAIEmbeddings
from langchain.llms import OpenAI
from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import LanceDB from langchain.vectorstores import LanceDB
from langchain.llms import OpenAI from modal import Image, Secret, Stub, web_endpoint
from langchain.chains import RetrievalQA
import lancedb
lancedb_image = Image.debian_slim().pip_install( lancedb_image = Image.debian_slim().pip_install(
"lancedb", "lancedb", "langchain", "openai", "pandas", "tiktoken", "unstructured", "tabulate"
"langchain",
"openai",
"pandas",
"tiktoken",
"unstructured",
"tabulate"
) )
stub = Stub( stub = Stub(
@@ -34,21 +29,26 @@ docsearch = None
docs_path = Path("docs.pkl") docs_path = Path("docs.pkl")
db_path = Path("lancedb") db_path = Path("lancedb")
def get_document_title(document): def get_document_title(document):
m = str(document.metadata["source"]) m = str(document.metadata["source"])
title = re.findall("pandas.documentation(.*).html", m) title = re.findall("pandas.documentation(.*).html", m)
if title[0] is not None: if title[0] is not None:
return(title[0]) return title[0]
return '' return ""
def download_docs(): def download_docs():
pandas_docs = requests.get("https://eto-public.s3.us-west-2.amazonaws.com/datasets/pandas_docs/pandas.documentation.zip") pandas_docs = requests.get(
"https://eto-public.s3.us-west-2.amazonaws.com/datasets/pandas_docs/pandas.documentation.zip"
)
with open(Path("pandas.documentation.zip"), "wb") as f: with open(Path("pandas.documentation.zip"), "wb") as f:
f.write(pandas_docs.content) f.write(pandas_docs.content)
file = zipfile.ZipFile(Path("pandas.documentation.zip")) file = zipfile.ZipFile(Path("pandas.documentation.zip"))
file.extractall(path=Path("pandas_docs")) file.extractall(path=Path("pandas_docs"))
def store_docs(): def store_docs():
docs = [] docs = []
@@ -74,6 +74,7 @@ def store_docs():
return docs return docs
def qanda_langchain(query): def qanda_langchain(query):
download_docs() download_docs()
docs = store_docs() docs = store_docs()
@@ -85,14 +86,25 @@ def qanda_langchain(query):
documents = text_splitter.split_documents(docs) documents = text_splitter.split_documents(docs)
embeddings = OpenAIEmbeddings() embeddings = OpenAIEmbeddings()
db = lancedb.connect(db_path) db = lancedb.connect(db_path)
table = db.create_table("pandas_docs", data=[ table = db.create_table(
{"vector": embeddings.embed_query("Hello World"), "text": "Hello World", "id": "1"} "pandas_docs",
], mode="overwrite") data=[
{
"vector": embeddings.embed_query("Hello World"),
"text": "Hello World",
"id": "1",
}
],
mode="overwrite",
)
docsearch = LanceDB.from_documents(documents, embeddings, connection=table) docsearch = LanceDB.from_documents(documents, embeddings, connection=table)
qa = RetrievalQA.from_chain_type(llm=OpenAI(), chain_type="stuff", retriever=docsearch.as_retriever()) qa = RetrievalQA.from_chain_type(
llm=OpenAI(), chain_type="stuff", retriever=docsearch.as_retriever()
)
return qa.run(query) return qa.run(query)
@stub.function() @stub.function()
@web_endpoint(method="GET") @web_endpoint(method="GET")
def web(query: str): def web(query: str):
@@ -101,6 +113,7 @@ def web(query: str):
"answer": answer, "answer": answer,
} }
@stub.function() @stub.function()
def cli(query: str): def cli(query: str):
answer = qanda_langchain(query) answer = qanda_langchain(query)

View File

@@ -1,99 +0,0 @@
# YouTube transcript QA bot with NodeJS
## use LanceDB's Javascript API and OpenAI to build a QA bot for YouTube transcripts
<img id="splash" width="400" alt="nodejs" src="https://github.com/lancedb/lancedb/assets/917119/3a140e75-bf8e-438a-a1e4-af14a72bcf98">
This Q&A bot will allow you to search through youtube transcripts using natural language! We'll introduce how you can use LanceDB's Javascript API to store and manage your data easily.
For this example we're using a HuggingFace dataset that contains YouTube transcriptions: `jamescalam/youtube-transcriptions`, to make it easier, we've converted it to a LanceDB `db` already, which you can download and put in a working directory:
```wget -c https://eto-public.s3.us-west-2.amazonaws.com/lancedb_demo.tar.gz -O - | tar -xz -C .```
Now, we'll create a simple app that can:
1. Take a text based query and search for contexts in our corpus, using embeddings generated from the OpenAI Embedding API.
2. Create a prompt with the contexts, and call the OpenAI Completion API to answer the text based query.
Dependencies and setup of OpenAI API:
```javascript
const lancedb = require("vectordb");
const { Configuration, OpenAIApi } = require("openai");
const configuration = new Configuration({
apiKey: process.env.OPENAI_API_KEY,
});
const openai = new OpenAIApi(configuration);
```
First, let's set our question and the context amount. The context amount will be used to query similar documents in our corpus.
```javascript
const QUESTION = "who was the 12th person on the moon and when did they land?";
const CONTEXT_AMOUNT = 3;
```
Now, let's generate an embedding from this question:
```javascript
const embeddingResponse = await openai.createEmbedding({
model: "text-embedding-ada-002",
input: QUESTION,
});
const embedding = embeddingResponse.data["data"][0]["embedding"];
```
Once we have the embedding, we can connect to LanceDB (using the database we downloaded earlier), and search through the chatbot table.
We'll extract 3 similar documents found.
```javascript
const db = await lancedb.connect('./lancedb');
const tbl = await db.openTable('chatbot');
const query = tbl.search(embedding);
query.limit = CONTEXT_AMOUNT;
const context = await query.execute();
```
Let's combine the context together so we can pass it into our prompt:
```javascript
for (let i = 1; i < context.length; i++) {
context[0]["text"] += " " + context[i]["text"];
}
```
Lastly, let's construct the prompt. You could play around with this to create more accurate/better prompts to yield results.
```javascript
const prompt = "Answer the question based on the context below.\n\n" +
"Context:\n" +
`${context[0]["text"]}\n` +
`\n\nQuestion: ${QUESTION}\nAnswer:`;
```
We pass the prompt, along with the context, to the completion API.
```javascript
const completion = await openai.createCompletion({
model: "text-davinci-003",
prompt,
temperature: 0,
max_tokens: 400,
top_p: 1,
frequency_penalty: 0,
presence_penalty: 0,
});
```
And that's it!
```javascript
console.log(completion.data.choices[0].text);
```
The response is (which is non deterministic):
```
The 12th person on the moon was Harrison Schmitt and he landed on December 11, 1972.
```

View File

@@ -0,0 +1,139 @@
# YouTube transcript QA bot with NodeJS
## use LanceDB's Javascript API and OpenAI to build a QA bot for YouTube transcripts
<img id="splash" width="400" alt="nodejs" src="https://github.com/lancedb/lancedb/assets/917119/3a140e75-bf8e-438a-a1e4-af14a72bcf98">
This Q&A bot will allow you to search through youtube transcripts using natural language! We'll introduce how to use LanceDB's Javascript API to store and manage your data easily.
```bash
npm install vectordb
```
## Download the data
For this example, we're using a sample of a HuggingFace dataset that contains YouTube transcriptions: `jamescalam/youtube-transcriptions`. Download and extract this file under the `data` folder:
```bash
wget -c https://eto-public.s3.us-west-2.amazonaws.com/datasets/youtube_transcript/youtube-transcriptions_sample.jsonl
```
## Prepare Context
Each item in the dataset contains just a short chunk of text. We'll need to merge a bunch of these chunks together on a rolling basis. For this demo, we'll look back 20 records to create a more complete context for each sentence.
First, we need to read and parse the input file.
```javascript
const lines = (await fs.readFile(INPUT_FILE_NAME, 'utf-8'))
.toString()
.split('\n')
.filter(line => line.length > 0)
.map(line => JSON.parse(line))
const data = contextualize(lines, 20, 'video_id')
```
The contextualize function groups the transcripts by video_id and then creates the expanded context for each item.
```javascript
function contextualize (rows, contextSize, groupColumn) {
const grouped = []
rows.forEach(row => {
if (!grouped[row[groupColumn]]) {
grouped[row[groupColumn]] = []
}
grouped[row[groupColumn]].push(row)
})
const data = []
Object.keys(grouped).forEach(key => {
for (let i = 0; i < grouped[key].length; i++) {
const start = i - contextSize > 0 ? i - contextSize : 0
grouped[key][i].context = grouped[key].slice(start, i + 1).map(r => r.text).join(' ')
}
data.push(...grouped[key])
})
return data
}
```
## Create the LanceDB Table
To load our data into LanceDB, we need to create embedding (vectors) for each item. For this example, we will use the OpenAI embedding functions, which have a native integration with LanceDB.
```javascript
// You need to provide an OpenAI API key, here we read it from the OPENAI_API_KEY environment variable
const apiKey = process.env.OPENAI_API_KEY
// The embedding function will create embeddings for the 'context' column
const embedFunction = new lancedb.OpenAIEmbeddingFunction('context', apiKey)
// Connects to LanceDB
const db = await lancedb.connect('data/youtube-lancedb')
const tbl = await db.createTable('vectors', data, embedFunction)
```
## Create and answer the prompt
We will accept questions in natural language and use our corpus stored in LanceDB to answer them. First, we need to set up the OpenAI client:
```javascript
const configuration = new Configuration({ apiKey })
const openai = new OpenAIApi(configuration)
```
Then we can prompt questions and use LanceDB to retrieve the three most relevant transcripts for this prompt.
```javascript
const query = await rl.question('Prompt: ')
const results = await tbl
.search(query)
.select(['title', 'text', 'context'])
.limit(3)
.execute()
```
The query and the transcripts' context are appended together in a single prompt:
```javascript
function createPrompt (query, context) {
let prompt =
'Answer the question based on the context below.\n\n' +
'Context:\n'
// need to make sure our prompt is not larger than max size
prompt = prompt + context.map(c => c.context).join('\n\n---\n\n').substring(0, 3750)
prompt = prompt + `\n\nQuestion: ${query}\nAnswer:`
return prompt
}
```
We can now use the OpenAI Completion API to process our custom prompt and give us an answer.
```javascript
const response = await openai.createCompletion({
model: 'text-davinci-003',
prompt: createPrompt(query, results),
max_tokens: 400,
temperature: 0,
top_p: 1,
frequency_penalty: 0,
presence_penalty: 0
})
console.log(response.data.choices[0].text)
```
## Let's put it all together now
Now we can provide queries and have them answered based on your local LanceDB data.
```bash
Prompt: who was the 12th person on the moon and when did they land?
The 12th person on the moon was Harrison Schmitt and he landed on December 11, 1972.
Prompt: Which training method should I use for sentence transformers when I only have pairs of related sentences?
NLI with multiple negative ranking loss.
```
## That's a wrap
In this example, you learned how to use LanceDB to store and query embedding representations of your local data. The complete example code is on [GitHub](https://github.com/lancedb/lancedb/tree/main/node/examples), and you can also download the LanceDB dataset using [this link](https://eto-public.s3.us-west-2.amazonaws.com/datasets/youtube_transcript/youtube-lancedb.zip).

View File

@@ -8,11 +8,13 @@ The key features of LanceDB include:
* Store, query and filter vectors, metadata and multi-modal data (text, images, videos, point clouds, and more). * Store, query and filter vectors, metadata and multi-modal data (text, images, videos, point clouds, and more).
* Support for vector similarity search, full-text search and SQL.
* Native Python and Javascript/Typescript support. * Native Python and Javascript/Typescript support.
* Zero-copy, automatic versioning, manage versions of your data without needing extra infrastructure. * Zero-copy, automatic versioning, manage versions of your data without needing extra infrastructure.
* Ecosystem integrations with [LangChain 🦜️🔗](https://python.langchain.com/en/latest/modules/indexes/vectorstores/examples/lanecdb.html), [LlamaIndex 🦙](https://gpt-index.readthedocs.io/en/latest/examples/vector_stores/LanceDBIndexDemo.html), Apache-Arrow, Pandas, Polars, DuckDB and more on the way. * Ecosystem integrations with [LangChain 🦜️🔗](https://python.langchain.com/en/latest/modules/indexes/vectorstores/examples/lancedb.html), [LlamaIndex 🦙](https://gpt-index.readthedocs.io/en/latest/examples/vector_stores/LanceDBIndexDemo.html), Apache-Arrow, Pandas, Polars, DuckDB and more on the way.
LanceDB's core is written in Rust 🦀 and is built using <a href="https://github.com/lancedb/lance">Lance</a>, an open-source columnar format designed for performant ML workloads. LanceDB's core is written in Rust 🦀 and is built using <a href="https://github.com/lancedb/lance">Lance</a>, an open-source columnar format designed for performant ML workloads.
@@ -54,6 +56,11 @@ LanceDB's core is written in Rust 🦀 and is built using <a href="https://githu
- [YouTube Transcript Search](notebooks/youtube_transcript_search.ipynb) - [YouTube Transcript Search](notebooks/youtube_transcript_search.ipynb)
- [Documentation QA Bot using LangChain](notebooks/code_qa_bot.ipynb) - [Documentation QA Bot using LangChain](notebooks/code_qa_bot.ipynb)
- [Multimodal search using CLIP](notebooks/multimodal_search.ipynb) - [Multimodal search using CLIP](notebooks/multimodal_search.ipynb)
- [Serverless QA Bot with S3 and Lambda](examples/serverless_lancedb_with_s3_and_lambda.md)
- [Serverless QA Bot with Modal](examples/serverless_qa_bot_with_modal_and_langchain.md)
## Complete Demos (JavaScript)
- [YouTube Transcript Search](examples/youtube_transcript_bot_with_nodejs.md)
## Documentation Quick Links ## Documentation Quick Links
* [`Basic Operations`](basic.md) - basic functionality of LanceDB. * [`Basic Operations`](basic.md) - basic functionality of LanceDB.
@@ -61,4 +68,5 @@ LanceDB's core is written in Rust 🦀 and is built using <a href="https://githu
* [`Indexing`](ann_indexes.md) - create vector indexes to speed up queries. * [`Indexing`](ann_indexes.md) - create vector indexes to speed up queries.
* [`Full text search`](fts.md) - [EXPERIMENTAL] full-text search API * [`Full text search`](fts.md) - [EXPERIMENTAL] full-text search API
* [`Ecosystem Integrations`](integrations.md) - integrating LanceDB with python data tooling ecosystem. * [`Ecosystem Integrations`](integrations.md) - integrating LanceDB with python data tooling ecosystem.
* [`API Reference`](python.md) - detailed documentation for the LanceDB Python SDK. * [`Python API Reference`](python/python.md) - detailed documentation for the LanceDB Python SDK.
* [`Node API Reference`](javascript/modules.md) - detailed documentation for the LanceDB Python SDK.

View File

@@ -24,9 +24,6 @@ data = pd.DataFrame({
"price": [10.0, 20.0] "price": [10.0, 20.0]
}) })
table = db.create_table("pd_table", data=data) table = db.create_table("pd_table", data=data)
# Optionally, create a IVF_PQ index
table.create_index(num_partitions=256, num_sub_vectors=96)
``` ```
You will find detailed instructions of creating dataset and index in [Basic Operations](basic.md) and [Indexing](indexing.md) You will find detailed instructions of creating dataset and index in [Basic Operations](basic.md) and [Indexing](indexing.md)

View File

@@ -21,12 +21,13 @@ from argparse import ArgumentParser
from multiprocessing import Pool from multiprocessing import Pool
import lance import lance
import lancedb
import pyarrow as pa import pyarrow as pa
from datasets import load_dataset from datasets import load_dataset
from PIL import Image from PIL import Image
from transformers import CLIPModel, CLIPProcessor, CLIPTokenizerFast from transformers import CLIPModel, CLIPProcessor, CLIPTokenizerFast
import lancedb
MODEL_ID = "openai/clip-vit-base-patch32" MODEL_ID = "openai/clip-vit-base-patch32"
device = "cuda" device = "cuda"

View File

@@ -1,11 +1,12 @@
{ {
"cells": [ "cells": [
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"id": "42bf01fb", "id": "42bf01fb",
"metadata": {}, "metadata": {},
"source": [ "source": [
"# We're going to build question and answer bot\n", "# Youtube Transcript Search QA Bot\n",
"\n", "\n",
"This Q&A bot will allow you to search through youtube transcripts using natural language! By going through this notebook, we'll introduce how you can use LanceDB to store and manage your data easily." "This Q&A bot will allow you to search through youtube transcripts using natural language! By going through this notebook, we'll introduce how you can use LanceDB to store and manage your data easily."
] ]
@@ -35,6 +36,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"id": "22e570f4", "id": "22e570f4",
"metadata": {}, "metadata": {},
@@ -87,6 +89,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"id": "5ac2b6a3", "id": "5ac2b6a3",
"metadata": {}, "metadata": {},
@@ -181,6 +184,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"id": "3044e0b0", "id": "3044e0b0",
"metadata": {}, "metadata": {},
@@ -209,6 +213,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"id": "db586267", "id": "db586267",
"metadata": {}, "metadata": {},
@@ -229,6 +234,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"id": "2106b5bb", "id": "2106b5bb",
"metadata": {}, "metadata": {},
@@ -338,6 +344,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"id": "53e4bff1", "id": "53e4bff1",
"metadata": {}, "metadata": {},
@@ -371,6 +378,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"id": "8ef34fca", "id": "8ef34fca",
"metadata": {}, "metadata": {},
@@ -459,6 +467,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"id": "23afc2f9", "id": "23afc2f9",
"metadata": {}, "metadata": {},
@@ -541,6 +550,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"id": "28705959", "id": "28705959",
"metadata": {}, "metadata": {},
@@ -571,6 +581,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"id": "559a095b", "id": "559a095b",
"metadata": {}, "metadata": {},

View File

@@ -6,9 +6,38 @@
pip install lancedb pip install lancedb
``` ```
## ::: lancedb ## Connection
## ::: lancedb.db
## ::: lancedb.table ::: lancedb.connect
## ::: lancedb.query
## ::: lancedb.embeddings ::: lancedb.LanceDBConnection
## ::: lancedb.context
## Table
::: lancedb.table.LanceTable
## Querying
::: lancedb.query.LanceQueryBuilder
::: lancedb.query.LanceFtsQueryBuilder
## Embeddings
::: lancedb.embeddings.with_embeddings
::: lancedb.embeddings.EmbeddingFunction
## Context
::: lancedb.context.contextualize
::: lancedb.context.Contextualizer
## Full text search
::: lancedb.fts.create_index
::: lancedb.fts.populate_index
::: lancedb.fts.search_index

4
node/.npmignore Normal file
View File

@@ -0,0 +1,4 @@
gen_test_data.py
index.node
dist/lancedb*.tgz
vectordb*.tgz

View File

@@ -5,6 +5,12 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [0.1.5] - 2023-06-00
### Added
- Support for macOS X86
## [0.1.4] - 2023-06-03 ## [0.1.4] - 2023-06-03
### Added ### Added

View File

@@ -8,6 +8,10 @@ A JavaScript / Node.js library for [LanceDB](https://github.com/lancedb/lancedb)
npm install vectordb npm install vectordb
``` ```
This will download the appropriate native library for your platform. We currently
support x86_64 Linux, aarch64 Linux, Intel MacOS, and ARM (M1/M2) MacOS. We do not
yet support Windows or musl-based Linux (such as Alpine Linux).
## Usage ## Usage
### Basic Example ### Basic Example
@@ -24,17 +28,33 @@ The [examples](./examples) folder contains complete examples.
## Development ## Development
The LanceDB javascript is built with npm: To build everything fresh:
```bash
npm install
npm run tsc
npm run build
```
Then you should be able to run the tests with:
```bash
npm test
```
### Rebuilding Rust library
```bash
npm run build
```
### Rebuilding Typescript
```bash ```bash
npm run tsc npm run tsc
``` ```
Run the tests with ### Fix lints
```bash
npm test
```
To run the linter and have it automatically fix all errors To run the linter and have it automatically fix all errors

View File

@@ -0,0 +1,122 @@
// Copyright 2023 Lance Developers.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
'use strict'
const lancedb = require('vectordb')
const fs = require('fs/promises')
const readline = require('readline/promises')
const { stdin: input, stdout: output } = require('process')
const { Configuration, OpenAIApi } = require('openai')
// Download file from XYZ
const INPUT_FILE_NAME = 'data/youtube-transcriptions_sample.jsonl';
(async () => {
// You need to provide an OpenAI API key, here we read it from the OPENAI_API_KEY environment variable
const apiKey = process.env.OPENAI_API_KEY
// The embedding function will create embeddings for the 'context' column
const embedFunction = new lancedb.OpenAIEmbeddingFunction('context', apiKey)
// Connects to LanceDB
const db = await lancedb.connect('data/youtube-lancedb')
// Open the vectors table or create one if it does not exist
let tbl
if ((await db.tableNames()).includes('vectors')) {
tbl = await db.openTable('vectors', embedFunction)
} else {
tbl = await createEmbeddingsTable(db, embedFunction)
}
// Use OpenAI Completion API to generate and answer based on the context that LanceDB provides
const configuration = new Configuration({ apiKey })
const openai = new OpenAIApi(configuration)
const rl = readline.createInterface({ input, output })
try {
while (true) {
const query = await rl.question('Prompt: ')
const results = await tbl
.search(query)
.select(['title', 'text', 'context'])
.limit(3)
.execute()
// console.table(results)
const response = await openai.createCompletion({
model: 'text-davinci-003',
prompt: createPrompt(query, results),
max_tokens: 400,
temperature: 0,
top_p: 1,
frequency_penalty: 0,
presence_penalty: 0
})
console.log(response.data.choices[0].text)
}
} catch (err) {
console.log('Error: ', err)
} finally {
rl.close()
}
process.exit(1)
})()
async function createEmbeddingsTable (db, embedFunction) {
console.log(`Creating embeddings from ${INPUT_FILE_NAME}`)
// read the input file into a JSON array, skipping empty lines
const lines = (await fs.readFile(INPUT_FILE_NAME, 'utf-8'))
.toString()
.split('\n')
.filter(line => line.length > 0)
.map(line => JSON.parse(line))
const data = contextualize(lines, 20, 'video_id')
return await db.createTable('vectors', data, embedFunction)
}
// Each transcript has a small text column, we include previous transcripts in order to
// have more context information when creating embeddings
function contextualize (rows, contextSize, groupColumn) {
const grouped = []
rows.forEach(row => {
if (!grouped[row[groupColumn]]) {
grouped[row[groupColumn]] = []
}
grouped[row[groupColumn]].push(row)
})
const data = []
Object.keys(grouped).forEach(key => {
for (let i = 0; i < grouped[key].length; i++) {
const start = i - contextSize > 0 ? i - contextSize : 0
grouped[key][i].context = grouped[key].slice(start, i + 1).map(r => r.text).join(' ')
}
data.push(...grouped[key])
})
return data
}
// Creates a prompt by aggregating all relevant contexts
function createPrompt (query, context) {
let prompt =
'Answer the question based on the context below.\n\n' +
'Context:\n'
// need to make sure our prompt is not larger than max size
prompt = prompt + context.map(c => c.context).join('\n\n---\n\n').substring(0, 3750)
prompt = prompt + `\n\nQuestion: ${query}\nAnswer:`
return prompt
}

View File

@@ -0,0 +1,15 @@
{
"name": "vectordb-example-js-openai",
"version": "1.0.0",
"description": "",
"main": "index.js",
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1"
},
"author": "Lance Devs",
"license": "Apache-2.0",
"dependencies": {
"vectordb": "file:../..",
"openai": "^3.2.1"
}
}

View File

@@ -12,29 +12,26 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
const { currentTarget } = require('@neon-rs/load');
let nativeLib; let nativeLib;
function getPlatformLibrary() {
if (process.platform === "darwin" && process.arch == "arm64") {
return require('./aarch64-apple-darwin.node');
} else if (process.platform === "darwin" && process.arch == "x64") {
return require('./x86_64-apple-darwin.node');
} else if (process.platform === "linux" && process.arch == "x64") {
return require('./x86_64-unknown-linux-gnu.node');
} else {
throw new Error(`vectordb: unsupported platform ${process.platform}_${process.arch}. Please file a bug report at https://github.com/lancedb/lancedb/issues`)
}
}
try { try {
nativeLib = require('./index.node') nativeLib = require(`vectordb-${currentTarget()}`);
} catch (e) { } catch (e) {
if (e.code === "MODULE_NOT_FOUND") { try {
nativeLib = getPlatformLibrary(); // Might be developing locally, so try that. But don't expose that error
} else { // to the user.
throw new Error('vectordb: failed to load native library. Please file a bug report at https://github.com/lancedb/lancedb/issues'); nativeLib = require("./index.node");
} } catch {
throw new Error(`vectordb: failed to load native library.
You may need to run \`npm install vectordb-${currentTarget()}\`.
If that does not work, please file a bug report at https://github.com/lancedb/lancedb/issues
Source error: ${e}`);
}
} }
module.exports = nativeLib // Dynamic require for runtime.
module.exports = nativeLib;

45
node/package-lock.json generated
View File

@@ -1,18 +1,28 @@
{ {
"name": "vectordb", "name": "vectordb",
"version": "0.1.4", "version": "0.1.5",
"lockfileVersion": 2, "lockfileVersion": 2,
"requires": true, "requires": true,
"packages": { "packages": {
"": { "": {
"name": "vectordb", "name": "vectordb",
"version": "0.1.4", "version": "0.1.5",
"cpu": [
"x64",
"arm64"
],
"license": "Apache-2.0", "license": "Apache-2.0",
"os": [
"darwin",
"linux"
],
"dependencies": { "dependencies": {
"@apache-arrow/ts": "^12.0.0", "@apache-arrow/ts": "^12.0.0",
"@neon-rs/load": "^0.0.74",
"apache-arrow": "^12.0.0" "apache-arrow": "^12.0.0"
}, },
"devDependencies": { "devDependencies": {
"@neon-rs/cli": "^0.0.74",
"@types/chai": "^4.3.4", "@types/chai": "^4.3.4",
"@types/mocha": "^10.0.1", "@types/mocha": "^10.0.1",
"@types/node": "^18.16.2", "@types/node": "^18.16.2",
@@ -35,6 +45,12 @@
"typedoc": "^0.24.7", "typedoc": "^0.24.7",
"typedoc-plugin-markdown": "^3.15.3", "typedoc-plugin-markdown": "^3.15.3",
"typescript": "*" "typescript": "*"
},
"optionalDependencies": {
"vectordb-darwin-arm64": "0.1.2",
"vectordb-darwin-x64": "0.1.2",
"vectordb-linux-arm64-gnu": "0.1.2",
"vectordb-linux-x64-gnu": "0.1.2"
} }
}, },
"node_modules/@apache-arrow/ts": { "node_modules/@apache-arrow/ts": {
@@ -202,6 +218,20 @@
"@jridgewell/sourcemap-codec": "^1.4.10" "@jridgewell/sourcemap-codec": "^1.4.10"
} }
}, },
"node_modules/@neon-rs/cli": {
"version": "0.0.74",
"resolved": "https://registry.npmjs.org/@neon-rs/cli/-/cli-0.0.74.tgz",
"integrity": "sha512-9lPmNmjej5iKKOTMPryOMubwkgMRyTWRuaq1yokASvI5mPhr2kzPN7UVjdCOjQvpunNPngR9yAHoirpjiWhUHw==",
"dev": true,
"bin": {
"neon": "index.js"
}
},
"node_modules/@neon-rs/load": {
"version": "0.0.74",
"resolved": "https://registry.npmjs.org/@neon-rs/load/-/load-0.0.74.tgz",
"integrity": "sha512-/cPZD907UNz55yrc/ud4wDgQKtU1TvkD9jeqZWG6J4IMmZkp6zgjkQcKA8UvpkZlcpPHvc8J17sGzLFbP/LUYg=="
},
"node_modules/@nodelib/fs.scandir": { "node_modules/@nodelib/fs.scandir": {
"version": "2.1.5", "version": "2.1.5",
"resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz",
@@ -4601,6 +4631,17 @@
"@jridgewell/sourcemap-codec": "^1.4.10" "@jridgewell/sourcemap-codec": "^1.4.10"
} }
}, },
"@neon-rs/cli": {
"version": "0.0.74",
"resolved": "https://registry.npmjs.org/@neon-rs/cli/-/cli-0.0.74.tgz",
"integrity": "sha512-9lPmNmjej5iKKOTMPryOMubwkgMRyTWRuaq1yokASvI5mPhr2kzPN7UVjdCOjQvpunNPngR9yAHoirpjiWhUHw==",
"dev": true
},
"@neon-rs/load": {
"version": "0.0.74",
"resolved": "https://registry.npmjs.org/@neon-rs/load/-/load-0.0.74.tgz",
"integrity": "sha512-/cPZD907UNz55yrc/ud4wDgQKtU1TvkD9jeqZWG6J4IMmZkp6zgjkQcKA8UvpkZlcpPHvc8J17sGzLFbP/LUYg=="
},
"@nodelib/fs.scandir": { "@nodelib/fs.scandir": {
"version": "2.1.5", "version": "2.1.5",
"resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz",

View File

@@ -1,16 +1,19 @@
{ {
"name": "vectordb", "name": "vectordb",
"version": "0.1.4", "version": "0.1.8",
"description": " Serverless, low-latency vector database for AI applications", "description": " Serverless, low-latency vector database for AI applications",
"main": "dist/index.js", "main": "dist/index.js",
"types": "dist/index.d.ts", "types": "dist/index.d.ts",
"scripts": { "scripts": {
"tsc": "tsc -b", "tsc": "tsc -b",
"build": "cargo-cp-artifact --artifact cdylib vectordb-node index.node -- cargo build --message-format=json-render-diagnostics", "build": "cargo-cp-artifact --artifact cdylib vectordb-node index.node -- cargo build --message-format=json",
"build-release": "npm run build -- --release", "build-release": "npm run build -- --release",
"cross-release": "cargo-cp-artifact --artifact cdylib vectordb-node index.node -- cross build --message-format=json --release -p vectordb-node",
"test": "mocha -recursive dist/test", "test": "mocha -recursive dist/test",
"lint": "eslint src --ext .js,.ts", "lint": "eslint src --ext .js,.ts",
"clean": "rm -rf node_modules *.node dist/" "clean": "rm -rf node_modules *.node dist/",
"pack-build": "neon pack-build",
"check-npm": "printenv && which node && which npm && npm --version"
}, },
"repository": { "repository": {
"type": "git", "type": "git",
@@ -25,6 +28,7 @@
"author": "Lance Devs", "author": "Lance Devs",
"license": "Apache-2.0", "license": "Apache-2.0",
"devDependencies": { "devDependencies": {
"@neon-rs/cli": "^0.0.74",
"@types/chai": "^4.3.4", "@types/chai": "^4.3.4",
"@types/mocha": "^10.0.1", "@types/mocha": "^10.0.1",
"@types/node": "^18.16.2", "@types/node": "^18.16.2",
@@ -50,6 +54,29 @@
}, },
"dependencies": { "dependencies": {
"@apache-arrow/ts": "^12.0.0", "@apache-arrow/ts": "^12.0.0",
"@neon-rs/load": "^0.0.74",
"apache-arrow": "^12.0.0" "apache-arrow": "^12.0.0"
},
"os": [
"darwin",
"linux"
],
"cpu": [
"x64",
"arm64"
],
"neon": {
"targets": {
"x86_64-apple-darwin": "vectordb-darwin-x64",
"aarch64-apple-darwin": "vectordb-darwin-arm64",
"x86_64-unknown-linux-gnu": "vectordb-linux-x64-gnu",
"aarch64-unknown-linux-gnu": "vectordb-linux-arm64-gnu"
}
},
"optionalDependencies": {
"vectordb-darwin-arm64": "0.1.8",
"vectordb-darwin-x64": "0.1.8",
"vectordb-linux-x64-gnu": "0.1.8",
"vectordb-linux-arm64-gnu": "0.1.8"
} }
} }

View File

@@ -22,7 +22,7 @@ import { fromRecordsToBuffer } from './arrow'
import type { EmbeddingFunction } from './embedding/embedding_function' import type { EmbeddingFunction } from './embedding/embedding_function'
// eslint-disable-next-line @typescript-eslint/no-var-requires // eslint-disable-next-line @typescript-eslint/no-var-requires
const { databaseNew, databaseTableNames, databaseOpenTable, tableCreate, tableSearch, tableAdd, tableCreateVectorIndex } = require('../native.js') const { databaseNew, databaseTableNames, databaseOpenTable, tableCreate, tableSearch, tableAdd, tableCreateVectorIndex, tableCountRows } = require('../native.js')
export type { EmbeddingFunction } export type { EmbeddingFunction }
export { OpenAIEmbeddingFunction } from './embedding/openai' export { OpenAIEmbeddingFunction } from './embedding/openai'
@@ -178,6 +178,13 @@ export class Table<T = number[]> {
async create_index (indexParams: VectorIndexParams): Promise<any> { async create_index (indexParams: VectorIndexParams): Promise<any> {
return await this.createIndex(indexParams) return await this.createIndex(indexParams)
} }
/**
* Returns the number of rows in this table.
*/
async countRows (): Promise<number> {
return tableCountRows.call(this._tbl)
}
} }
interface IvfPQIndexConfig { interface IvfPQIndexConfig {
@@ -293,6 +300,8 @@ export class Query<T = number[]> {
return this return this
} }
where = this.filter
/** Return only the specified columns. /** Return only the specified columns.
* *
* @param value Only select the specified columns. If not specified, all columns will be returned. * @param value Only select the specified columns. If not specified, all columns will be returned.

View File

@@ -64,13 +64,20 @@ describe('LanceDB client', function () {
assert.equal(results[0].id, 1) assert.equal(results[0].id, 1)
}) })
it('uses a filter', async function () { it('uses a filter / where clause', async function () {
// eslint-disable-next-line @typescript-eslint/explicit-function-return-type
const assertResults = (results: Array<Record<string, unknown>>) => {
assert.equal(results.length, 1)
assert.equal(results[0].id, 2)
}
const uri = await createTestDB() const uri = await createTestDB()
const con = await lancedb.connect(uri) const con = await lancedb.connect(uri)
const table = await con.openTable('vectors') const table = await con.openTable('vectors')
const results = await table.search([0.1, 0.1]).filter('id == 2').execute() let results = await table.search([0.1, 0.1]).filter('id == 2').execute()
assert.equal(results.length, 1) assertResults(results)
assert.equal(results[0].id, 2) results = await table.search([0.1, 0.1]).where('id == 2').execute()
assertResults(results)
}) })
it('select only a subset of columns', async function () { it('select only a subset of columns', async function () {
@@ -103,9 +110,7 @@ describe('LanceDB client', function () {
const tableName = `vectors_${Math.floor(Math.random() * 100)}` const tableName = `vectors_${Math.floor(Math.random() * 100)}`
const table = await con.createTable(tableName, data) const table = await con.createTable(tableName, data)
assert.equal(table.name, tableName) assert.equal(table.name, tableName)
assert.equal(await table.countRows(), 2)
const results = await table.search([0.1, 0.3]).execute()
assert.equal(results.length, 2)
}) })
it('appends records to an existing table ', async function () { it('appends records to an existing table ', async function () {
@@ -118,16 +123,14 @@ describe('LanceDB client', function () {
] ]
const table = await con.createTable('vectors', data) const table = await con.createTable('vectors', data)
const results = await table.search([0.1, 0.3]).execute() assert.equal(await table.countRows(), 2)
assert.equal(results.length, 2)
const dataAdd = [ const dataAdd = [
{ id: 3, vector: [2.1, 2.2], price: 10, name: 'c' }, { id: 3, vector: [2.1, 2.2], price: 10, name: 'c' },
{ id: 4, vector: [3.1, 3.2], price: 50, name: 'd' } { id: 4, vector: [3.1, 3.2], price: 50, name: 'd' }
] ]
await table.add(dataAdd) await table.add(dataAdd)
const resultsAdd = await table.search([0.1, 0.3]).execute() assert.equal(await table.countRows(), 4)
assert.equal(resultsAdd.length, 4)
}) })
it('overwrite all records in a table', async function () { it('overwrite all records in a table', async function () {
@@ -135,16 +138,14 @@ describe('LanceDB client', function () {
const con = await lancedb.connect(uri) const con = await lancedb.connect(uri)
const table = await con.openTable('vectors') const table = await con.openTable('vectors')
const results = await table.search([0.1, 0.3]).execute() assert.equal(await table.countRows(), 2)
assert.equal(results.length, 2)
const dataOver = [ const dataOver = [
{ vector: [2.1, 2.2], price: 10, name: 'foo' }, { vector: [2.1, 2.2], price: 10, name: 'foo' },
{ vector: [3.1, 3.2], price: 50, name: 'bar' } { vector: [3.1, 3.2], price: 50, name: 'bar' }
] ]
await table.overwrite(dataOver) await table.overwrite(dataOver)
const resultsAdd = await table.search([0.1, 0.3]).execute() assert.equal(await table.countRows(), 2)
assert.equal(resultsAdd.length, 2)
}) })
}) })

8
python/.bumpversion.cfg Normal file
View File

@@ -0,0 +1,8 @@
[bumpversion]
current_version = 0.1.8
commit = True
message = [python] Bump version: {current_version} → {new_version}
tag = True
tag_name = python-v{new_version}
[bumpversion:file:pyproject.toml]

View File

@@ -22,8 +22,21 @@ def connect(uri: URI) -> LanceDBConnection:
uri: str or Path uri: str or Path
The uri of the database. The uri of the database.
Examples
--------
For a local directory, provide a path for the database:
>>> import lancedb
>>> db = lancedb.connect("~/.lancedb")
For object storage, use a URI prefix:
>>> db = lancedb.connect("s3://my-bucket/lancedb")
Returns Returns
------- -------
A connection to a LanceDB database. conn : LanceDBConnection
A connection to a LanceDB database.
""" """
return LanceDBConnection(uri) return LanceDBConnection(uri)

View File

@@ -0,0 +1,18 @@
import builtins
import os
import pytest
# import lancedb so we don't have to in every example
import lancedb
@pytest.fixture(autouse=True)
def doctest_setup(monkeypatch, tmpdir):
# disable color for doctests so we don't have to include
# escape codes in docstrings
monkeypatch.setitem(os.environ, "NO_COLOR", "1")
# Explicitly set the column width
monkeypatch.setitem(os.environ, "COLUMNS", "80")
# Work in a temporary directory
monkeypatch.chdir(tmpdir)

View File

@@ -14,20 +14,109 @@ from __future__ import annotations
import pandas as pd import pandas as pd
from .exceptions import MissingColumnError, MissingValueError
def contextualize(raw_df: pd.DataFrame) -> Contextualizer: def contextualize(raw_df: pd.DataFrame) -> Contextualizer:
"""Create a Contextualizer object for the given DataFrame. """Create a Contextualizer object for the given DataFrame.
Used to create context windows.
Used to create context windows. Context windows are rolling subsets of text
data.
The input text column should already be separated into rows that will be the
unit of the window. So to create a context window over tokens, start with
a DataFrame with one token per row. To create a context window over sentences,
start with a DataFrame with one sentence per row.
Examples
--------
>>> from lancedb.context import contextualize
>>> import pandas as pd
>>> data = pd.DataFrame({
... 'token': ['The', 'quick', 'brown', 'fox', 'jumped', 'over',
... 'the', 'lazy', 'dog', 'I', 'love', 'sandwiches'],
... 'document_id': [1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2]
... })
``window`` determines how many rows to include in each window. In our case
this how many tokens, but depending on the input data, it could be sentences,
paragraphs, messages, etc.
>>> contextualize(data).window(3).stride(1).text_col('token').to_df()
token document_id
0 The quick brown 1
1 quick brown fox 1
2 brown fox jumped 1
3 fox jumped over 1
4 jumped over the 1
5 over the lazy 1
6 the lazy dog 1
7 lazy dog I 1
8 dog I love 1
9 I love sandwiches 2
10 love sandwiches 2
>>> contextualize(data).window(7).stride(1).min_window_size(7).text_col('token').to_df()
token document_id
0 The quick brown fox jumped over the 1
1 quick brown fox jumped over the lazy 1
2 brown fox jumped over the lazy dog 1
3 fox jumped over the lazy dog I 1
4 jumped over the lazy dog I love 1
5 over the lazy dog I love sandwiches 1
``stride`` determines how many rows to skip between each window start. This can
be used to reduce the total number of windows generated.
>>> contextualize(data).window(4).stride(2).text_col('token').to_df()
token document_id
0 The quick brown fox 1
2 brown fox jumped over 1
4 jumped over the lazy 1
6 the lazy dog I 1
8 dog I love sandwiches 1
10 love sandwiches 2
``groupby`` determines how to group the rows. For example, we would like to have
context windows that don't cross document boundaries. In this case, we can
pass ``document_id`` as the group by.
>>> contextualize(data).window(4).stride(2).text_col('token').groupby('document_id').to_df()
token document_id
0 The quick brown fox 1
2 brown fox jumped over 1
4 jumped over the lazy 1
6 the lazy dog 1
9 I love sandwiches 2
``min_window_size`` determines the minimum size of the context windows that are generated
This can be used to trim the last few context windows which have size less than
``min_window_size``. By default context windows of size 1 are skipped.
>>> contextualize(data).window(6).stride(3).text_col('token').groupby('document_id').to_df()
token document_id
0 The quick brown fox jumped over 1
3 fox jumped over the lazy dog 1
6 the lazy dog 1
9 I love sandwiches 2
>>> contextualize(data).window(6).stride(3).min_window_size(4).text_col('token').groupby('document_id').to_df()
token document_id
0 The quick brown fox jumped over 1
3 fox jumped over the lazy dog 1
""" """
return Contextualizer(raw_df) return Contextualizer(raw_df)
class Contextualizer: class Contextualizer:
"""Create context windows from a DataFrame. See [lancedb.context.contextualize][]."""
def __init__(self, raw_df): def __init__(self, raw_df):
self._text_col = None self._text_col = None
self._groupby = None self._groupby = None
self._stride = None self._stride = None
self._window = None self._window = None
self._min_window_size = 2
self._raw_df = raw_df self._raw_df = raw_df
def window(self, window: int) -> Contextualizer: def window(self, window: int) -> Contextualizer:
@@ -75,17 +164,50 @@ class Contextualizer:
self._text_col = text_col self._text_col = text_col
return self return self
def min_window_size(self, min_window_size: int) -> Contextualizer:
"""Set the (optional) min_window_size size for the context window.
Parameters
----------
min_window_size: int
The min_window_size.
"""
self._min_window_size = min_window_size
return self
def to_df(self) -> pd.DataFrame: def to_df(self) -> pd.DataFrame:
"""Create the context windows and return a DataFrame.""" """Create the context windows and return a DataFrame."""
if self._text_col not in self._raw_df.columns.tolist():
raise MissingColumnError(self._text_col)
if self._window is None or self._window < 1:
raise MissingValueError(
"The value of window is None or less than 1. Specify the "
"window size (number of rows to include in each window)"
)
if self._stride is None or self._stride < 1:
raise MissingValueError(
"The value of stride is None or less than 1. Specify the "
"stride (number of rows to skip between each window)"
)
def process_group(grp): def process_group(grp):
# For each group, create the text rolling window # For each group, create the text rolling window
# with values of size >= min_window_size
text = grp[self._text_col].values text = grp[self._text_col].values
contexts = grp.iloc[: -self._window : self._stride, :].copy() contexts = grp.iloc[:: self._stride, :].copy()
contexts[self._text_col] = [ windows = [
" ".join(text[start_i : start_i + self._window]) " ".join(text[start_i : min(start_i + self._window, len(grp))])
for start_i in range(0, len(grp) - self._window, self._stride) for start_i in range(0, len(grp), self._stride)
if start_i + self._window <= len(grp)
or len(grp) - start_i >= self._min_window_size
] ]
# if last few rows dropped
if len(windows) < len(contexts):
contexts = contexts.iloc[: len(windows)]
contexts[self._text_col] = windows
return contexts return contexts
if self._groupby is None: if self._groupby is None:

View File

@@ -13,25 +13,59 @@
from __future__ import annotations from __future__ import annotations
import functools
import os import os
from pathlib import Path from pathlib import Path
import os
import pyarrow as pa import pyarrow as pa
from pyarrow import fs from pyarrow import fs
from .common import DATA, URI from .common import DATA, URI
from .table import LanceTable from .table import LanceTable
from .util import get_uri_scheme, get_uri_location from .util import get_uri_location, get_uri_scheme
class LanceDBConnection: class LanceDBConnection:
""" """
A connection to a LanceDB database. A connection to a LanceDB database.
Parameters
----------
uri: str or Path
The root uri of the database.
Examples
--------
>>> import lancedb
>>> db = lancedb.connect("./.lancedb")
>>> db.create_table("my_table", data=[{"vector": [1.1, 1.2], "b": 2},
... {"vector": [0.5, 1.3], "b": 4}])
LanceTable(my_table)
>>> db.create_table("another_table", data=[{"vector": [0.4, 0.4], "b": 6}])
LanceTable(another_table)
>>> db.table_names()
['another_table', 'my_table']
>>> len(db)
2
>>> db["my_table"]
LanceTable(my_table)
>>> "my_table" in db
True
>>> db.drop_table("my_table")
>>> db.drop_table("another_table")
""" """
def __init__(self, uri: URI): def __init__(self, uri: URI):
is_local = isinstance(uri, Path) or get_uri_scheme(uri) == "file" if not isinstance(uri, Path):
scheme = get_uri_scheme(uri)
is_local = isinstance(uri, Path) or scheme == "file"
# managed lancedb remote uses schema like lancedb+[http|grpc|...]://
self._is_managed_remote = not is_local and scheme.startswith("lancedb")
if self._is_managed_remote:
if len(scheme.split("+")) != 2:
raise ValueError(
f"Invalid LanceDB URI: {uri}, expected uri to have scheme like lancedb+<flavor>://..."
)
if is_local: if is_local:
if isinstance(uri, str): if isinstance(uri, str):
uri = Path(uri) uri = Path(uri)
@@ -39,30 +73,74 @@ class LanceDBConnection:
Path(uri).mkdir(parents=True, exist_ok=True) Path(uri).mkdir(parents=True, exist_ok=True)
self._uri = str(uri) self._uri = str(uri)
self._entered = False
@property @property
def uri(self) -> str: def uri(self) -> str:
return self._uri return self._uri
@functools.cached_property
def is_managed_remote(self) -> bool:
return self._is_managed_remote
@functools.cached_property
def remote_flavor(self) -> str:
if not self.is_managed_remote:
raise ValueError(
"Not a managed remote LanceDB, there should be no server flavor"
)
return get_uri_scheme(self.uri).split("+")[1]
@functools.cached_property
def _client(self) -> "lancedb.remote.LanceDBClient":
if not self.is_managed_remote:
raise ValueError("Not a managed remote LanceDB, there should be no client")
# don't import unless we are really using remote
from lancedb.remote.client import RestfulLanceDBClient
if self.remote_flavor == "http":
return RestfulLanceDBClient(self._uri)
raise ValueError("Unsupported remote flavor: " + self.remote_flavor)
async def close(self):
if self._entered:
raise ValueError("Cannot re-enter the same LanceDBConnection twice")
self._entered = True
await self._client.close()
async def __aenter__(self) -> LanceDBConnection:
return self
async def __aexit__(self, exc_type, exc_value, traceback):
await self.close()
def table_names(self) -> list[str]: def table_names(self) -> list[str]:
"""Get the names of all tables in the database. """Get the names of all tables in the database.
Returns Returns
------- -------
A list of table names. list of str
A list of table names.
""" """
try: try:
filesystem, path = fs.FileSystem.from_uri(self.uri) filesystem, path = fs.FileSystem.from_uri(self.uri)
except pa.ArrowInvalid: except pa.ArrowInvalid:
raise NotImplementedError( raise NotImplementedError("Unsupported scheme: " + self.uri)
"Unsupported scheme: " + self.uri
)
try: try:
paths = filesystem.get_file_info(fs.FileSelector(get_uri_location(self.uri))) paths = filesystem.get_file_info(
fs.FileSelector(get_uri_location(self.uri))
)
except FileNotFoundError: except FileNotFoundError:
# It is ok if the file does not exist since it will be created # It is ok if the file does not exist since it will be created
paths = [] paths = []
tables = [os.path.splitext(file_info.base_name)[0] for file_info in paths if file_info.extension == 'lance'] tables = [
os.path.splitext(file_info.base_name)[0]
for file_info in paths
if file_info.extension == "lance"
]
return tables return tables
def __len__(self) -> int: def __len__(self) -> int:
@@ -103,7 +181,73 @@ class LanceDBConnection:
Returns Returns
------- -------
A LanceTable object representing the table. LanceTable
A reference to the newly created table.
Examples
--------
Can create with list of tuples or dictionaries:
>>> import lancedb
>>> db = lancedb.connect("./.lancedb")
>>> data = [{"vector": [1.1, 1.2], "lat": 45.5, "long": -122.7},
... {"vector": [0.2, 1.8], "lat": 40.1, "long": -74.1}]
>>> db.create_table("my_table", data)
LanceTable(my_table)
>>> db["my_table"].head()
pyarrow.Table
vector: fixed_size_list<item: float>[2]
child 0, item: float
lat: double
long: double
----
vector: [[[1.1,1.2],[0.2,1.8]]]
lat: [[45.5,40.1]]
long: [[-122.7,-74.1]]
You can also pass a pandas DataFrame:
>>> import pandas as pd
>>> data = pd.DataFrame({
... "vector": [[1.1, 1.2], [0.2, 1.8]],
... "lat": [45.5, 40.1],
... "long": [-122.7, -74.1]
... })
>>> db.create_table("table2", data)
LanceTable(table2)
>>> db["table2"].head()
pyarrow.Table
vector: fixed_size_list<item: float>[2]
child 0, item: float
lat: double
long: double
----
vector: [[[1.1,1.2],[0.2,1.8]]]
lat: [[45.5,40.1]]
long: [[-122.7,-74.1]]
Data is converted to Arrow before being written to disk. For maximum
control over how data is saved, either provide the PyArrow schema to
convert to or else provide a PyArrow table directly.
>>> custom_schema = pa.schema([
... pa.field("vector", pa.list_(pa.float32(), 2)),
... pa.field("lat", pa.float32()),
... pa.field("long", pa.float32())
... ])
>>> db.create_table("table3", data, schema = custom_schema)
LanceTable(table3)
>>> db["table3"].head()
pyarrow.Table
vector: fixed_size_list<item: float>[2]
child 0, item: float
lat: float
long: float
----
vector: [[[1.1,1.2],[0.2,1.8]]]
lat: [[45.5,40.1]]
long: [[-122.7,-74.1]]
""" """
if data is not None: if data is not None:
tbl = LanceTable.create(self, name, data, schema, mode=mode) tbl = LanceTable.create(self, name, data, schema, mode=mode)

View File

@@ -29,7 +29,31 @@ def with_embeddings(
wrap_api: bool = True, wrap_api: bool = True,
show_progress: bool = False, show_progress: bool = False,
batch_size: int = 1000, batch_size: int = 1000,
): ) -> pa.Table:
"""Add a vector column to a table using the given embedding function.
The new columns will be called "vector".
Parameters
----------
func : Callable
A function that takes a list of strings and returns a list of vectors.
data : pa.Table or pd.DataFrame
The data to add an embedding column to.
column : str, default "text"
The name of the column to use as input to the embedding function.
wrap_api : bool, default True
Whether to wrap the embedding function in a retry and rate limiter.
show_progress : bool, default False
Whether to show a progress bar.
batch_size : int, default 1000
The number of row values to pass to each call of the embedding function.
Returns
-------
pa.Table
The input table with a new column called "vector" containing the embeddings.
"""
func = EmbeddingFunction(func) func = EmbeddingFunction(func)
if wrap_api: if wrap_api:
func = func.retry().rate_limit() func = func.retry().rate_limit()

View File

@@ -0,0 +1,22 @@
"""Custom exception handling"""
class MissingValueError(ValueError):
"""Exception raised when a required value is missing."""
pass
class MissingColumnError(KeyError):
"""
Exception raised when a column name specified is not in
the DataFrame object
"""
def __init__(self, column_name):
self.column_name = column_name
def __str__(self):
return (
f"Error: Column '{self.column_name}' does not exist in the DataFrame object"
)

View File

@@ -68,6 +68,11 @@ def populate_index(index: tantivy.Index, table: LanceTable, fields: List[str]) -
The table to index The table to index
fields : List[str] fields : List[str]
List of fields to index List of fields to index
Returns
-------
int
The number of rows indexed
""" """
# first check the fields exist and are string or large string type # first check the fields exist and are string or large string type
for name in fields: for name in fields:

View File

@@ -12,6 +12,9 @@
# limitations under the License. # limitations under the License.
from __future__ import annotations from __future__ import annotations
import asyncio
from typing import Awaitable, Literal
import numpy as np import numpy as np
import pandas as pd import pandas as pd
import pyarrow as pa import pyarrow as pa
@@ -22,6 +25,24 @@ from .common import VECTOR_COLUMN_NAME
class LanceQueryBuilder: class LanceQueryBuilder:
""" """
A builder for nearest neighbor queries for LanceDB. A builder for nearest neighbor queries for LanceDB.
Examples
--------
>>> import lancedb
>>> data = [{"vector": [1.1, 1.2], "b": 2},
... {"vector": [0.5, 1.3], "b": 4},
... {"vector": [0.4, 0.4], "b": 6},
... {"vector": [0.4, 0.4], "b": 10}]
>>> db = lancedb.connect("./.lancedb")
>>> table = db.create_table("my_table", data=data)
>>> (table.search([0.4, 0.4])
... .metric("cosine")
... .where("b < 10")
... .select(["b"])
... .limit(2)
... .to_df())
b vector score
0 6 [0.4, 0.4] 0.0
""" """
def __init__(self, table: "lancedb.table.LanceTable", query: np.ndarray): def __init__(self, table: "lancedb.table.LanceTable", query: np.ndarray):
@@ -44,7 +65,8 @@ class LanceQueryBuilder:
Returns Returns
------- -------
The LanceQueryBuilder object. LanceQueryBuilder
The LanceQueryBuilder object.
""" """
self._limit = limit self._limit = limit
return self return self
@@ -59,7 +81,8 @@ class LanceQueryBuilder:
Returns Returns
------- -------
The LanceQueryBuilder object. LanceQueryBuilder
The LanceQueryBuilder object.
""" """
self._columns = columns self._columns = columns
return self return self
@@ -74,22 +97,24 @@ class LanceQueryBuilder:
Returns Returns
------- -------
The LanceQueryBuilder object. LanceQueryBuilder
The LanceQueryBuilder object.
""" """
self._where = where self._where = where
return self return self
def metric(self, metric: str) -> LanceQueryBuilder: def metric(self, metric: Literal["L2", "cosine"]) -> LanceQueryBuilder:
"""Set the distance metric to use. """Set the distance metric to use.
Parameters Parameters
---------- ----------
metric: str metric: "L2" or "cosine"
The distance metric to use. By default "l2" is used. The distance metric to use. By default "L2" is used.
Returns Returns
------- -------
The LanceQueryBuilder object. LanceQueryBuilder
The LanceQueryBuilder object.
""" """
self._metric = metric self._metric = metric
return self return self
@@ -97,6 +122,12 @@ class LanceQueryBuilder:
def nprobes(self, nprobes: int) -> LanceQueryBuilder: def nprobes(self, nprobes: int) -> LanceQueryBuilder:
"""Set the number of probes to use. """Set the number of probes to use.
Higher values will yield better recall (more likely to find vectors if
they exist) at the expense of latency.
See discussion in [Querying an ANN Index][../querying-an-ann-index] for
tuning advice.
Parameters Parameters
---------- ----------
nprobes: int nprobes: int
@@ -104,13 +135,20 @@ class LanceQueryBuilder:
Returns Returns
------- -------
The LanceQueryBuilder object. LanceQueryBuilder
The LanceQueryBuilder object.
""" """
self._nprobes = nprobes self._nprobes = nprobes
return self return self
def refine_factor(self, refine_factor: int) -> LanceQueryBuilder: def refine_factor(self, refine_factor: int) -> LanceQueryBuilder:
"""Set the refine factor to use. """Set the refine factor to use, increasing the number of vectors sampled.
As an example, a refine factor of 2 will sample 2x as many vectors as
requested, re-ranks them, and returns the top half most relevant results.
See discussion in [Querying an ANN Index][querying-an-ann-index] for
tuning advice.
Parameters Parameters
---------- ----------
@@ -119,7 +157,8 @@ class LanceQueryBuilder:
Returns Returns
------- -------
The LanceQueryBuilder object. LanceQueryBuilder
The LanceQueryBuilder object.
""" """
self._refine_factor = refine_factor self._refine_factor = refine_factor
return self return self
@@ -131,8 +170,28 @@ class LanceQueryBuilder:
and also the "score" column which is the distance between the query and also the "score" column which is the distance between the query
vector and the returned vector. vector and the returned vector.
""" """
return self.to_arrow().to_pandas()
def to_arrow(self) -> pa.Table:
"""
Execute the query and return the results as a arrow Table.
In addition to the selected columns, LanceDB also returns a vector
and also the "score" column which is the distance between the query
vector and the returned vector.
"""
if self._table._conn.is_managed_remote:
try:
loop = asyncio.get_running_loop()
except RuntimeError:
loop = asyncio.get_event_loop()
result = self._table._conn._client.query(
self._table.name, self.to_remote_query()
)
return loop.run_until_complete(result).to_arrow()
ds = self._table.to_lance() ds = self._table.to_lance()
tbl = ds.to_table( return ds.to_table(
columns=self._columns, columns=self._columns,
filter=self._where, filter=self._where,
nearest={ nearest={
@@ -144,7 +203,20 @@ class LanceQueryBuilder:
"refine_factor": self._refine_factor, "refine_factor": self._refine_factor,
}, },
) )
return tbl.to_pandas()
def to_remote_query(self) -> "VectorQuery":
# don't import unless we are connecting to remote
from lancedb.remote.client import VectorQuery
return VectorQuery(
vector=self._query.tolist(),
filter=self._where,
k=self._limit,
_metric=self._metric,
columns=self._columns,
nprobes=self._nprobes,
refine_factor=self._refine_factor,
)
class LanceFtsQueryBuilder(LanceQueryBuilder): class LanceFtsQueryBuilder(LanceQueryBuilder):

View File

@@ -0,0 +1,61 @@
# Copyright 2023 LanceDB Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
from typing import List, Optional
import attr
import pandas as pd
import pyarrow as pa
from pydantic import BaseModel
__all__ = ["LanceDBClient", "VectorQuery", "VectorQueryResult"]
class VectorQuery(BaseModel):
# vector to search for
vector: List[float]
# sql filter to refine the query with
filter: Optional[str] = None
# top k results to return
k: int
# # metrics
_metric: str = "L2"
# which columns to return in the results
columns: Optional[List[str]] = None
# optional query parameters for tuning the results,
# e.g. `{"nprobes": "10", "refine_factor": "10"}`
nprobes: int = 10
refine_factor: Optional[int] = None
@attr.define
class VectorQueryResult:
# for now the response is directly seralized into a pandas dataframe
tbl: pa.Table
def to_arrow(self) -> pa.Table:
return self.tbl
class LanceDBClient(abc.ABC):
@abc.abstractmethod
def query(self, table_name: str, query: VectorQuery) -> VectorQueryResult:
"""Query the LanceDB server for the given table and query."""
pass

View File

@@ -0,0 +1,79 @@
# Copyright 2023 LanceDB Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import urllib.parse
import aiohttp
import attr
import pyarrow as pa
from lancedb.remote import VectorQuery, VectorQueryResult
from lancedb.remote.errors import LanceDBClientError
def _check_not_closed(f):
@functools.wraps(f)
def wrapped(self, *args, **kwargs):
if self.closed:
raise ValueError("Connection is closed")
return f(self, *args, **kwargs)
return wrapped
@attr.define(slots=False)
class RestfulLanceDBClient:
url: str
closed: bool = attr.field(default=False, init=False)
@functools.cached_property
def session(self) -> aiohttp.ClientSession:
parsed = urllib.parse.urlparse(self.url)
scheme = parsed.scheme
if not scheme.startswith("lancedb"):
raise ValueError(
f"Invalid scheme: {scheme}, must be like lancedb+<flavor>://"
)
flavor = scheme.split("+")[1]
url = f"{flavor}://{parsed.hostname}:{parsed.port}"
return aiohttp.ClientSession(url)
async def close(self):
await self.session.close()
self.closed = True
@_check_not_closed
async def query(self, table_name: str, query: VectorQuery) -> VectorQueryResult:
async with self.session.post(
f"/table/{table_name}/", json=query.dict(exclude_none=True)
) as resp:
resp: aiohttp.ClientResponse = resp
if 400 <= resp.status < 500:
raise LanceDBClientError(
f"Bad Request: {resp.status}, error: {await resp.text()}"
)
if 500 <= resp.status < 600:
raise LanceDBClientError(
f"Internal Server Error: {resp.status}, error: {await resp.text()}"
)
if resp.status != 200:
raise LanceDBClientError(
f"Unknown Error: {resp.status}, error: {await resp.text()}"
)
resp_body = await resp.read()
with pa.ipc.open_file(pa.BufferReader(resp_body)) as reader:
tbl = reader.read_all()
return VectorQueryResult(tbl)

View File

@@ -0,0 +1,16 @@
# Copyright 2023 LanceDB Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class LanceDBClientError(RuntimeError):
pass

View File

@@ -14,7 +14,6 @@
from __future__ import annotations from __future__ import annotations
import os import os
import shutil
from functools import cached_property from functools import cached_property
from typing import List, Union from typing import List, Union
@@ -27,7 +26,6 @@ from lance.vector import vec_to_table
from .common import DATA, VEC, VECTOR_COLUMN_NAME from .common import DATA, VEC, VECTOR_COLUMN_NAME
from .query import LanceFtsQueryBuilder, LanceQueryBuilder from .query import LanceFtsQueryBuilder, LanceQueryBuilder
from .util import get_uri_scheme
def _sanitize_data(data, schema): def _sanitize_data(data, schema):
@@ -47,6 +45,40 @@ def _sanitize_data(data, schema):
class LanceTable: class LanceTable:
""" """
A table in a LanceDB database. A table in a LanceDB database.
Examples
--------
Create using [LanceDBConnection.create_table][lancedb.LanceDBConnection.create_table]
(more examples in that method's documentation).
>>> import lancedb
>>> db = lancedb.connect("./.lancedb")
>>> table = db.create_table("my_table", data=[{"vector": [1.1, 1.2], "b": 2}])
>>> table.head()
pyarrow.Table
vector: fixed_size_list<item: float>[2]
child 0, item: float
b: int64
----
vector: [[[1.1,1.2]]]
b: [[2]]
Can append new data with [LanceTable.add][lancedb.table.LanceTable.add].
>>> table.add([{"vector": [0.5, 1.3], "b": 4}])
2
Can query the table with [LanceTable.search][lancedb.table.LanceTable.search].
>>> table.search([0.4, 0.4]).select(["b"]).to_df()
b vector score
0 4 [0.5, 1.3] 0.82
1 2 [1.1, 1.2] 1.13
Search queries are much faster when an index is created. See
[LanceTable.create_index][lancedb.table.LanceTable.create_index].
""" """
def __init__( def __init__(
@@ -64,7 +96,12 @@ class LanceTable:
@property @property
def schema(self) -> pa.Schema: def schema(self) -> pa.Schema:
"""Return the schema of the table.""" """Return the schema of the table.
Returns
-------
pa.Schema
A PyArrow schema object."""
return self._dataset.schema return self._dataset.schema
def list_versions(self): def list_versions(self):
@@ -72,12 +109,39 @@ class LanceTable:
return self._dataset.versions() return self._dataset.versions()
@property @property
def version(self): def version(self) -> int:
"""Get the current version of the table""" """Get the current version of the table"""
return self._dataset.version return self._dataset.version
def checkout(self, version: int): def checkout(self, version: int):
"""Checkout a version of the table""" """Checkout a version of the table. This is an in-place operation.
This allows viewing previous versions of the table.
Parameters
----------
version : int
The version to checkout.
Examples
--------
>>> import lancedb
>>> db = lancedb.connect("./.lancedb")
>>> table = db.create_table("my_table", [{"vector": [1.1, 0.9], "type": "vector"}])
>>> table.version
1
>>> table.to_pandas()
vector type
0 [1.1, 0.9] vector
>>> table.add([{"vector": [0.5, 0.2], "type": "vector"}])
2
>>> table.version
2
>>> table.checkout(1)
>>> table.to_pandas()
vector type
0 [1.1, 0.9] vector
"""
max_ver = max([v["version"] for v in self._dataset.versions()]) max_ver = max([v["version"] for v in self._dataset.versions()])
if version < 1 or version > max_ver: if version < 1 or version > max_ver:
raise ValueError(f"Invalid version {version}") raise ValueError(f"Invalid version {version}")
@@ -98,11 +162,20 @@ class LanceTable:
return self._dataset.head(n) return self._dataset.head(n)
def to_pandas(self) -> pd.DataFrame: def to_pandas(self) -> pd.DataFrame:
"""Return the table as a pandas DataFrame.""" """Return the table as a pandas DataFrame.
Returns
-------
pd.DataFrame
"""
return self.to_arrow().to_pandas() return self.to_arrow().to_pandas()
def to_arrow(self) -> pa.Table: def to_arrow(self) -> pa.Table:
"""Return the table as a pyarrow Table.""" """Return the table as a pyarrow Table.
Returns
-------
pa.Table"""
return self._dataset.to_table() return self._dataset.to_table()
@property @property
@@ -175,7 +248,8 @@ class LanceTable:
Returns Returns
------- -------
The number of vectors added to the table. int
The number of vectors in the table.
""" """
data = _sanitize_data(data, self.schema) data = _sanitize_data(data, self.schema)
lance.write_dataset(data, self._dataset_uri, mode=mode) lance.write_dataset(data, self._dataset_uri, mode=mode)
@@ -193,10 +267,11 @@ class LanceTable:
Returns Returns
------- -------
A LanceQueryBuilder object representing the query. LanceQueryBuilder
Once executed, the query returns selected columns, the vector, A query builder object representing the query.
and also the "score" column which is the distance between the query Once executed, the query returns selected columns, the vector,
vector and the returned vector. and also the "score" column which is the distance between the query
vector and the returned vector.
""" """
if isinstance(query, str): if isinstance(query, str):
# fts # fts
@@ -265,4 +340,6 @@ def _sanitize_vector_column(data: pa.Table, vector_column_name: str) -> pa.Table
values = values.cast(pa.float32()) values = values.cast(pa.float32())
list_size = len(values) / len(data) list_size = len(values) / len(data)
vec_arr = pa.FixedSizeListArray.from_arrays(values, list_size) vec_arr = pa.FixedSizeListArray.from_arrays(values, list_size)
return data.set_column(data.column_names.index(vector_column_name), vector_column_name, vec_arr) return data.set_column(
data.column_names.index(vector_column_name), vector_column_name, vec_arr
)

View File

@@ -1,7 +1,7 @@
[project] [project]
name = "lancedb" name = "lancedb"
version = "0.1.6" version = "0.1.8"
dependencies = ["pylance>=0.4.17", "ratelimiter", "retry", "tqdm"] dependencies = ["pylance>=0.4.20", "ratelimiter", "retry", "tqdm", "aiohttp", "pydantic", "attr"]
description = "lancedb" description = "lancedb"
authors = [ authors = [
{ name = "LanceDB Devs", email = "dev@lancedb.com" }, { name = "LanceDB Devs", email = "dev@lancedb.com" },
@@ -37,7 +37,7 @@ repository = "https://github.com/lancedb/lancedb"
[project.optional-dependencies] [project.optional-dependencies]
tests = [ tests = [
"pytest", "pytest-mock" "pytest", "pytest-mock", "doctest", "pytest-asyncio"
] ]
dev = [ dev = [
"ruff", "pre-commit", "black" "ruff", "pre-commit", "black"

View File

@@ -0,0 +1,77 @@
# Copyright 2023 LanceDB Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import pytest
from lancedb.context import contextualize
@pytest.fixture
def raw_df() -> pd.DataFrame:
return pd.DataFrame(
{
"token": [
"The",
"quick",
"brown",
"fox",
"jumped",
"over",
"the",
"lazy",
"dog",
"I",
"love",
"sandwiches",
],
"document_id": [1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2],
}
)
def test_contextualizer(raw_df: pd.DataFrame):
result = (
contextualize(raw_df)
.window(6)
.stride(3)
.text_col("token")
.groupby("document_id")
.to_df()["token"]
.to_list()
)
assert result == [
"The quick brown fox jumped over",
"fox jumped over the lazy dog",
"the lazy dog",
"I love sandwiches",
]
def test_contextualizer_with_threshold(raw_df: pd.DataFrame):
result = (
contextualize(raw_df)
.window(6)
.stride(3)
.text_col("token")
.groupby("document_id")
.min_window_size(4)
.to_df()["token"]
.to_list()
)
assert result == [
"The quick brown fox jumped over",
"fox jumped over the lazy dog",
]

View File

@@ -119,4 +119,4 @@ def test_delete_table(tmp_path):
assert db.table_names() == [] assert db.table_names() == []
db.create_table("test", data=data) db.create_table("test", data=data)
assert db.table_names() == ["test"] assert db.table_names() == ["test"]

View File

@@ -0,0 +1,27 @@
# Copyright 2023 LanceDB Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
from lancedb import LanceDBConnection
# TODO: setup integ test mark and script
@pytest.mark.skip(reason="Need to set up a local server")
def test_against_local_server():
conn = LanceDBConnection("lancedb+http://localhost:10024")
table = conn.open_table("sift1m_ivf1024_pq16")
df = table.search(np.random.rand(128)).to_df()
assert len(df) == 10

View File

@@ -14,6 +14,7 @@ import sys
import numpy as np import numpy as np
import pyarrow as pa import pyarrow as pa
from lancedb.embeddings import with_embeddings from lancedb.embeddings import with_embeddings

View File

@@ -13,13 +13,13 @@
import os import os
import random import random
import lancedb.fts
import numpy as np import numpy as np
import pandas as pd import pandas as pd
import pytest import pytest
import tantivy import tantivy
import lancedb as ldb import lancedb as ldb
import lancedb.fts
@pytest.fixture @pytest.fixture

View File

@@ -12,6 +12,7 @@
# limitations under the License. # limitations under the License.
import os import os
import pytest import pytest
import lancedb import lancedb
@@ -19,6 +20,7 @@ import lancedb
# You need to setup AWS credentials an a base path to run this test. Example # You need to setup AWS credentials an a base path to run this test. Example
# AWS_PROFILE=default TEST_S3_BASE_URL=s3://my_bucket/dataset pytest tests/test_io.py # AWS_PROFILE=default TEST_S3_BASE_URL=s3://my_bucket/dataset pytest tests/test_io.py
@pytest.mark.skipif( @pytest.mark.skipif(
(os.environ.get("TEST_S3_BASE_URL") is None), (os.environ.get("TEST_S3_BASE_URL") is None),
reason="please setup s3 base url", reason="please setup s3 base url",

View File

@@ -17,12 +17,15 @@ import pandas as pd
import pandas.testing as tm import pandas.testing as tm
import pyarrow as pa import pyarrow as pa
import pytest import pytest
from lancedb.db import LanceDBConnection
from lancedb.query import LanceQueryBuilder from lancedb.query import LanceQueryBuilder
class MockTable: class MockTable:
def __init__(self, tmp_path): def __init__(self, tmp_path):
self.uri = tmp_path self.uri = tmp_path
self._conn = LanceDBConnection("/tmp/lance/")
def to_lance(self): def to_lance(self):
return lance.dataset(self.uri) return lance.dataset(self.uri)
@@ -30,23 +33,17 @@ class MockTable:
@pytest.fixture @pytest.fixture
def table(tmp_path) -> MockTable: def table(tmp_path) -> MockTable:
df = pd.DataFrame( df = pa.table(
{ {
"vector": [[1, 2], [3, 4]], "vector": pa.array(
"id": [1, 2], [[1, 2], [3, 4]], type=pa.list_(pa.float32(), list_size=2)
"str_field": ["a", "b"], ),
"float_field": [1.0, 2.0], "id": pa.array([1, 2]),
"str_field": pa.array(["a", "b"]),
"float_field": pa.array([1.0, 2.0]),
} }
) )
schema = pa.schema( lance.write_dataset(df, tmp_path)
[
pa.field("vector", pa.list_(pa.float32(), list_size=2)),
pa.field("id", pa.int32()),
pa.field("str_field", pa.string()),
pa.field("float_field", pa.float64()),
]
)
lance.write_dataset(df, tmp_path, schema)
return MockTable(tmp_path) return MockTable(tmp_path)
@@ -65,7 +62,7 @@ def test_query_builder_with_filter(table):
def test_query_builder_with_metric(table): def test_query_builder_with_metric(table):
query = [4, 8] query = [4, 8]
df_default = LanceQueryBuilder(table, query).to_df() df_default = LanceQueryBuilder(table, query).to_df()
df_l2 = LanceQueryBuilder(table, query).metric("l2").to_df() df_l2 = LanceQueryBuilder(table, query).metric("L2").to_df()
tm.assert_frame_equal(df_default, df_l2) tm.assert_frame_equal(df_default, df_l2)
df_cosine = LanceQueryBuilder(table, query).metric("cosine").limit(1).to_df() df_cosine = LanceQueryBuilder(table, query).metric("cosine").limit(1).to_df()

View File

@@ -0,0 +1,95 @@
# Copyright 2023 LanceDB Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import attr
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from aiohttp import web
from lancedb.remote.client import RestfulLanceDBClient, VectorQuery
@attr.define
class MockLanceDBServer:
runner: web.AppRunner = attr.field(init=False)
site: web.TCPSite = attr.field(init=False)
async def query_handler(self, request: web.Request) -> web.Response:
table_name = request.match_info["table_name"]
assert table_name == "test_table"
request_json = await request.json()
# TODO: do some matching
vecs = pd.Series([np.random.rand(128) for x in range(10)], name="vector")
ids = pd.Series(range(10), name="id")
df = pd.DataFrame([vecs, ids]).T
batch = pa.RecordBatch.from_pandas(
df,
schema=pa.schema(
[
pa.field("vector", pa.list_(pa.float32(), 128)),
pa.field("id", pa.int64()),
]
),
)
sink = pa.BufferOutputStream()
with pa.ipc.new_file(sink, batch.schema) as writer:
writer.write_batch(batch)
return web.Response(body=sink.getvalue().to_pybytes())
async def setup(self):
app = web.Application()
app.add_routes([web.post("/table/{table_name}", self.query_handler)])
self.runner = web.AppRunner(app)
await self.runner.setup()
self.site = web.TCPSite(self.runner, "localhost", 8111)
async def start(self):
await self.site.start()
async def stop(self):
await self.runner.cleanup()
@pytest.mark.skip(reason="flaky somehow, fix later")
@pytest.mark.asyncio
async def test_e2e_with_mock_server():
mock_server = MockLanceDBServer()
await mock_server.setup()
await mock_server.start()
try:
client = RestfulLanceDBClient("lancedb+http://localhost:8111")
df = (
await client.query(
"test_table",
VectorQuery(
vector=np.random.rand(128).tolist(),
k=10,
_metric="L2",
columns=["id", "vector"],
),
)
).to_df()
assert "vector" in df.columns
assert "id" in df.columns
finally:
# make sure we don't leak resources
await mock_server.stop()

View File

@@ -0,0 +1,35 @@
# Copyright 2023 LanceDB Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pyarrow as pa
from lancedb.db import LanceDBConnection
from lancedb.remote.client import VectorQuery, VectorQueryResult
class FakeLanceDBClient:
async def close(self):
pass
async def query(self, table_name: str, query: VectorQuery) -> VectorQueryResult:
assert table_name == "test"
t = pa.schema([]).empty_table()
return VectorQueryResult(t)
def test_remote_db():
conn = LanceDBConnection("lancedb+http://client-will-be-injected")
setattr(conn, "_client", FakeLanceDBClient())
table = conn["test"]
table.search([1.0, 2.0]).to_df()

View File

@@ -11,11 +11,13 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import functools
from pathlib import Path from pathlib import Path
import pandas as pd import pandas as pd
import pyarrow as pa import pyarrow as pa
import pytest import pytest
from lancedb.table import LanceTable from lancedb.table import LanceTable
@@ -23,6 +25,10 @@ class MockDB:
def __init__(self, uri: Path): def __init__(self, uri: Path):
self.uri = uri self.uri = uri
@functools.cached_property
def is_managed_remote(self) -> bool:
return False
@pytest.fixture @pytest.fixture
def db(tmp_path) -> MockDB: def db(tmp_path) -> MockDB:

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "vectordb-node" name = "vectordb-node"
version = "0.1.0" version = "0.1.8"
description = "Serverless, low-latency vector database for AI applications" description = "Serverless, low-latency vector database for AI applications"
license = "Apache-2.0" license = "Apache-2.0"
edition = "2018" edition = "2018"

View File

@@ -97,6 +97,7 @@ fn get_index_params_builder(
let ivf_params = IvfBuildParams { let ivf_params = IvfBuildParams {
num_partitions: np, num_partitions: np,
max_iters, max_iters,
centroids: None,
}; };
index_builder.ivf_params(ivf_params) index_builder.ivf_params(ivf_params)
}); });

View File

@@ -264,6 +264,25 @@ fn table_add(mut cx: FunctionContext) -> JsResult<JsPromise> {
Ok(promise) Ok(promise)
} }
fn table_count_rows(mut cx: FunctionContext) -> JsResult<JsPromise> {
let js_table = cx.this().downcast_or_throw::<JsBox<JsTable>, _>(&mut cx)?;
let rt = runtime(&mut cx)?;
let channel = cx.channel();
let (deferred, promise) = cx.promise();
let table = js_table.table.clone();
rt.block_on(async move {
let num_rows_result = table.lock().unwrap().count_rows().await;
deferred.settle_with(&channel, move |mut cx| {
let num_rows = num_rows_result.or_else(|err| cx.throw_error(err.to_string()))?;
Ok(cx.number(num_rows as f64))
});
});
Ok(promise)
}
#[neon::main] #[neon::main]
fn main(mut cx: ModuleContext) -> NeonResult<()> { fn main(mut cx: ModuleContext) -> NeonResult<()> {
cx.export_function("databaseNew", database_new)?; cx.export_function("databaseNew", database_new)?;
@@ -272,6 +291,7 @@ fn main(mut cx: ModuleContext) -> NeonResult<()> {
cx.export_function("tableSearch", table_search)?; cx.export_function("tableSearch", table_search)?;
cx.export_function("tableCreate", table_create)?; cx.export_function("tableCreate", table_create)?;
cx.export_function("tableAdd", table_add)?; cx.export_function("tableAdd", table_add)?;
cx.export_function("tableCountRows", table_count_rows)?;
cx.export_function( cx.export_function(
"tableCreateVectorIndex", "tableCreateVectorIndex",
index::vector::table_create_vector_index, index::vector::table_create_vector_index,

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "vectordb" name = "vectordb"
version = "0.0.1" version = "0.1.8"
edition = "2021" edition = "2021"
description = "Serverless, low-latency vector database for AI applications" description = "Serverless, low-latency vector database for AI applications"
license = "Apache-2.0" license = "Apache-2.0"
@@ -14,7 +14,7 @@ arrow-data = "37.0"
arrow-schema = "37.0" arrow-schema = "37.0"
object_store = "0.5.6" object_store = "0.5.6"
snafu = "0.7.4" snafu = "0.7.4"
lance = "0.4.17" lance = "0.4.21"
tokio = { version = "1.23", features = ["rt-multi-thread"] } tokio = { version = "1.23", features = ["rt-multi-thread"] }
[dev-dependencies] [dev-dependencies]

View File

@@ -42,7 +42,7 @@ impl Database {
/// ///
/// * A [Database] object. /// * A [Database] object.
pub async fn connect(uri: &str) -> Result<Database> { pub async fn connect(uri: &str) -> Result<Database> {
let object_store = ObjectStore::new(uri).await?; let (object_store, _) = ObjectStore::from_uri(uri).await?;
if object_store.is_local() { if object_store.is_local() {
Self::try_create_dir(uri).context(CreateDirSnafu { path: uri })?; Self::try_create_dir(uri).context(CreateDirSnafu { path: uri })?;
} }
@@ -69,7 +69,7 @@ impl Database {
pub async fn table_names(&self) -> Result<Vec<String>> { pub async fn table_names(&self) -> Result<Vec<String>> {
let f = self let f = self
.object_store .object_store
.read_dir("/") .read_dir(self.uri.as_str())
.await? .await?
.iter() .iter()
.map(|fname| Path::new(fname)) .map(|fname| Path::new(fname))

View File

@@ -20,6 +20,8 @@ pub trait VectorIndexBuilder {
fn get_column(&self) -> Option<String>; fn get_column(&self) -> Option<String>;
fn get_index_name(&self) -> Option<String>; fn get_index_name(&self) -> Option<String>;
fn build(&self) -> VectorIndexParams; fn build(&self) -> VectorIndexParams;
fn get_replace(&self) -> bool;
} }
pub struct IvfPQIndexBuilder { pub struct IvfPQIndexBuilder {
@@ -28,6 +30,7 @@ pub struct IvfPQIndexBuilder {
metric_type: Option<MetricType>, metric_type: Option<MetricType>,
ivf_params: Option<IvfBuildParams>, ivf_params: Option<IvfBuildParams>,
pq_params: Option<PQBuildParams>, pq_params: Option<PQBuildParams>,
replace: bool,
} }
impl IvfPQIndexBuilder { impl IvfPQIndexBuilder {
@@ -38,6 +41,7 @@ impl IvfPQIndexBuilder {
metric_type: None, metric_type: None,
ivf_params: None, ivf_params: None,
pq_params: None, pq_params: None,
replace: true,
} }
} }
} }
@@ -67,6 +71,11 @@ impl IvfPQIndexBuilder {
self.pq_params = Some(pq_params); self.pq_params = Some(pq_params);
self self
} }
pub fn replace(&mut self, replace: bool) -> &mut IvfPQIndexBuilder {
self.replace = replace;
self
}
} }
impl VectorIndexBuilder for IvfPQIndexBuilder { impl VectorIndexBuilder for IvfPQIndexBuilder {
@@ -84,6 +93,10 @@ impl VectorIndexBuilder for IvfPQIndexBuilder {
VectorIndexParams::with_ivf_pq_params(pq_params.metric_type, ivf_params, pq_params) VectorIndexParams::with_ivf_pq_params(pq_params.metric_type, ivf_params, pq_params)
} }
fn get_replace(&self) -> bool {
self.replace
}
} }
#[cfg(test)] #[cfg(test)]

View File

@@ -177,7 +177,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn test_setters_getters() { async fn test_setters_getters() {
let mut batches: Box<dyn RecordBatchReader> = Box::new(make_test_batches()); let mut batches: Box<dyn RecordBatchReader> = Box::new(make_test_batches());
let ds = Dataset::write(&mut batches, ":memory:", None) let ds = Dataset::write(&mut batches, "memory://foo", None)
.await .await
.unwrap(); .unwrap();
@@ -206,7 +206,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn test_execute() { async fn test_execute() {
let mut batches: Box<dyn RecordBatchReader> = Box::new(make_test_batches()); let mut batches: Box<dyn RecordBatchReader> = Box::new(make_test_batches());
let ds = Dataset::write(&mut batches, ":memory:", None) let ds = Dataset::write(&mut batches, "memory://foo", None)
.await .await
.unwrap(); .unwrap();

View File

@@ -130,6 +130,7 @@ impl Table {
IndexType::Vector, IndexType::Vector,
index_builder.get_index_name(), index_builder.get_index_name(),
&index_builder.build(), &index_builder.build(),
index_builder.get_replace(),
) )
.await?; .await?;
self.dataset = Arc::new(dataset); self.dataset = Arc::new(dataset);
@@ -233,7 +234,7 @@ mod tests {
let uri = tmp_dir.path().to_str().unwrap(); let uri = tmp_dir.path().to_str().unwrap();
let batches: Box<dyn RecordBatchReader> = Box::new(make_test_batches()); let batches: Box<dyn RecordBatchReader> = Box::new(make_test_batches());
let schema = batches.schema().clone(); let _ = batches.schema().clone();
Table::create(&uri, "test", batches).await.unwrap(); Table::create(&uri, "test", batches).await.unwrap();
let batches: Box<dyn RecordBatchReader> = Box::new(make_test_batches()); let batches: Box<dyn RecordBatchReader> = Box::new(make_test_batches());