mirror of
https://github.com/lancedb/lancedb.git
synced 2025-12-23 21:39:57 +00:00
Compare commits
97 Commits
python-v0.
...
python-v0.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
64eb43229d | ||
|
|
c31c92122f | ||
|
|
205fc530cf | ||
|
|
2bde5401eb | ||
|
|
a405847f9b | ||
|
|
bcc19665ce | ||
|
|
2a6586d6fb | ||
|
|
029b01bbbf | ||
|
|
cd32944e54 | ||
|
|
7eb3b52297 | ||
|
|
8dcd328dce | ||
|
|
1d61717d0e | ||
|
|
4ee7225e91 | ||
|
|
2bc7dca3ca | ||
|
|
b24810a011 | ||
|
|
2b8e872be0 | ||
|
|
03ef1dc081 | ||
|
|
fde636ca2e | ||
|
|
51966a84f5 | ||
|
|
38015ffa7c | ||
|
|
dc72ece847 | ||
|
|
1521435193 | ||
|
|
bfe8fccfab | ||
|
|
6f6eb170a9 | ||
|
|
dd1c16bbaf | ||
|
|
a76186ee83 | ||
|
|
ae85008714 | ||
|
|
a85f039352 | ||
|
|
9c25998110 | ||
|
|
549ca51a8a | ||
|
|
632007d0e2 | ||
|
|
02d85a4ea4 | ||
|
|
a9d0625e2b | ||
|
|
89bcc1b2e7 | ||
|
|
6ad5553eca | ||
|
|
6eb7ccfdee | ||
|
|
758c82858f | ||
|
|
0cbc9cd551 | ||
|
|
7d65dd97cf | ||
|
|
85bb7e54e4 | ||
|
|
21014cab45 | ||
|
|
5857cb4c6e | ||
|
|
09ce6c5bb5 | ||
|
|
0fa50775d6 | ||
|
|
20faa4424b | ||
|
|
b624fc59eb | ||
|
|
d2caa5e202 | ||
|
|
501817cfac | ||
|
|
b3daa25f46 | ||
|
|
6008a8257b | ||
|
|
aaff43d304 | ||
|
|
d4c3a8ca87 | ||
|
|
ff5bbfdd4c | ||
|
|
694ca30c7c | ||
|
|
b2317c904d | ||
|
|
613f3063b9 | ||
|
|
5d2cd7fb2e | ||
|
|
a88e9bb134 | ||
|
|
9c1adff426 | ||
|
|
f9d5fa88a1 | ||
|
|
4db554eea5 | ||
|
|
101066788d | ||
|
|
c4135d9d30 | ||
|
|
ec39d98571 | ||
|
|
0cb37f0e5e | ||
|
|
24e3507ee2 | ||
|
|
2bdf0a02f9 | ||
|
|
32123713fd | ||
|
|
d5a01ffe7b | ||
|
|
e01045692c | ||
|
|
a62f661d90 | ||
|
|
4769d8eb76 | ||
|
|
d07d7a5980 | ||
|
|
8d2ff7b210 | ||
|
|
61c05b51a0 | ||
|
|
7801ab9b8b | ||
|
|
d297da5a7e | ||
|
|
6af69b57ad | ||
|
|
a062a92f6b | ||
|
|
277b753fd8 | ||
|
|
f78b7863f6 | ||
|
|
e7d824af2b | ||
|
|
02f1ec775f | ||
|
|
7b6d3f943b | ||
|
|
676876f4d5 | ||
|
|
fbfe2444a8 | ||
|
|
9555efacf9 | ||
|
|
513926960d | ||
|
|
cc507ca766 | ||
|
|
492d0328fe | ||
|
|
374c1e7aba | ||
|
|
30047a5566 | ||
|
|
85ccf9e22b | ||
|
|
0255221086 | ||
|
|
4ee229490c | ||
|
|
93e24f23af | ||
|
|
8f141e1e33 |
@@ -1,5 +1,5 @@
|
|||||||
[tool.bumpversion]
|
[tool.bumpversion]
|
||||||
current_version = "0.7.1"
|
current_version = "0.10.0-beta.1"
|
||||||
parse = """(?x)
|
parse = """(?x)
|
||||||
(?P<major>0|[1-9]\\d*)\\.
|
(?P<major>0|[1-9]\\d*)\\.
|
||||||
(?P<minor>0|[1-9]\\d*)\\.
|
(?P<minor>0|[1-9]\\d*)\\.
|
||||||
|
|||||||
12
.github/workflows/docs_test.yml
vendored
12
.github/workflows/docs_test.yml
vendored
@@ -30,9 +30,13 @@ jobs:
|
|||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
- name: Print CPU capabilities
|
- name: Print CPU capabilities
|
||||||
run: cat /proc/cpuinfo
|
run: cat /proc/cpuinfo
|
||||||
|
- name: Install protobuf
|
||||||
|
run: |
|
||||||
|
sudo apt update
|
||||||
|
sudo apt install -y protobuf-compiler
|
||||||
- name: Install dependecies needed for ubuntu
|
- name: Install dependecies needed for ubuntu
|
||||||
run: |
|
run: |
|
||||||
sudo apt install -y protobuf-compiler libssl-dev
|
sudo apt install -y libssl-dev
|
||||||
rustup update && rustup default
|
rustup update && rustup default
|
||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
uses: actions/setup-python@v5
|
uses: actions/setup-python@v5
|
||||||
@@ -72,9 +76,13 @@ jobs:
|
|||||||
uses: actions/setup-node@v4
|
uses: actions/setup-node@v4
|
||||||
with:
|
with:
|
||||||
node-version: 20
|
node-version: 20
|
||||||
|
- name: Install protobuf
|
||||||
|
run: |
|
||||||
|
sudo apt update
|
||||||
|
sudo apt install -y protobuf-compiler
|
||||||
- name: Install dependecies needed for ubuntu
|
- name: Install dependecies needed for ubuntu
|
||||||
run: |
|
run: |
|
||||||
sudo apt install -y protobuf-compiler libssl-dev
|
sudo apt install -y libssl-dev
|
||||||
rustup update && rustup default
|
rustup update && rustup default
|
||||||
- name: Rust cache
|
- name: Rust cache
|
||||||
uses: swatinem/rust-cache@v2
|
uses: swatinem/rust-cache@v2
|
||||||
|
|||||||
109
.github/workflows/java-publish.yml
vendored
Normal file
109
.github/workflows/java-publish.yml
vendored
Normal file
@@ -0,0 +1,109 @@
|
|||||||
|
name: Build and publish Java packages
|
||||||
|
on:
|
||||||
|
release:
|
||||||
|
types: [released]
|
||||||
|
pull_request:
|
||||||
|
paths:
|
||||||
|
- .github/workflows/java-publish.yml
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
macos-arm64:
|
||||||
|
name: Build on MacOS Arm64
|
||||||
|
runs-on: macos-14
|
||||||
|
timeout-minutes: 45
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
working-directory: ./java/core/lancedb-jni
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- uses: Swatinem/rust-cache@v2
|
||||||
|
- name: Install dependencies
|
||||||
|
run: |
|
||||||
|
brew install protobuf
|
||||||
|
- name: Build release
|
||||||
|
run: |
|
||||||
|
cargo build --release
|
||||||
|
- uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: liblancedb_jni_darwin_aarch64.zip
|
||||||
|
path: target/release/liblancedb_jni.dylib
|
||||||
|
retention-days: 1
|
||||||
|
if-no-files-found: error
|
||||||
|
linux-arm64:
|
||||||
|
name: Build on Linux Arm64
|
||||||
|
runs-on: warp-ubuntu-2204-arm64-8x
|
||||||
|
timeout-minutes: 45
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
working-directory: ./java/core/lancedb-jni
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- uses: Swatinem/rust-cache@v2
|
||||||
|
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||||
|
with:
|
||||||
|
toolchain: "1.79.0"
|
||||||
|
cache-workspaces: "./java/core/lancedb-jni"
|
||||||
|
# Disable full debug symbol generation to speed up CI build and keep memory down
|
||||||
|
# "1" means line tables only, which is useful for panic tracebacks.
|
||||||
|
rustflags: "-C debuginfo=1"
|
||||||
|
- name: Install dependencies
|
||||||
|
run: |
|
||||||
|
sudo apt -y -qq update
|
||||||
|
sudo apt install -y protobuf-compiler libssl-dev pkg-config
|
||||||
|
- name: Build release
|
||||||
|
run: |
|
||||||
|
cargo build --release
|
||||||
|
- uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: liblancedb_jni_linux_aarch64.zip
|
||||||
|
path: target/release/liblancedb_jni.so
|
||||||
|
retention-days: 1
|
||||||
|
if-no-files-found: error
|
||||||
|
linux-x86:
|
||||||
|
runs-on: warp-ubuntu-2204-x64-8x
|
||||||
|
timeout-minutes: 30
|
||||||
|
needs: [macos-arm64, linux-arm64]
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
working-directory: ./java
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- uses: Swatinem/rust-cache@v2
|
||||||
|
- name: Set up Java 8
|
||||||
|
uses: actions/setup-java@v4
|
||||||
|
with:
|
||||||
|
distribution: temurin
|
||||||
|
java-version: 8
|
||||||
|
cache: "maven"
|
||||||
|
server-id: ossrh
|
||||||
|
server-username: SONATYPE_USER
|
||||||
|
server-password: SONATYPE_TOKEN
|
||||||
|
gpg-private-key: ${{ secrets.GPG_PRIVATE_KEY }}
|
||||||
|
gpg-passphrase: ${{ secrets.GPG_PASSPHRASE }}
|
||||||
|
- name: Install dependencies
|
||||||
|
run: |
|
||||||
|
sudo apt -y -qq update
|
||||||
|
sudo apt install -y protobuf-compiler libssl-dev pkg-config
|
||||||
|
- name: Download artifact
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
|
- name: Copy native libs
|
||||||
|
run: |
|
||||||
|
mkdir -p ./core/target/classes/nativelib/darwin-aarch64 ./core/target/classes/nativelib/linux-aarch64
|
||||||
|
cp ../liblancedb_jni_darwin_aarch64.zip/liblancedb_jni.dylib ./core/target/classes/nativelib/darwin-aarch64/liblancedb_jni.dylib
|
||||||
|
cp ../liblancedb_jni_linux_aarch64.zip/liblancedb_jni.so ./core/target/classes/nativelib/linux-aarch64/liblancedb_jni.so
|
||||||
|
- name: Set github
|
||||||
|
run: |
|
||||||
|
git config --global user.email "LanceDB Github Runner"
|
||||||
|
git config --global user.name "dev+gha@lancedb.com"
|
||||||
|
- name: Publish with Java 8
|
||||||
|
run: |
|
||||||
|
echo "use-agent" >> ~/.gnupg/gpg.conf
|
||||||
|
echo "pinentry-mode loopback" >> ~/.gnupg/gpg.conf
|
||||||
|
export GPG_TTY=$(tty)
|
||||||
|
mvn --batch-mode -DskipTests -DpushChanges=false -Dgpg.passphrase=${{ secrets.GPG_PASSPHRASE }} deploy -P deploy-to-ossrh
|
||||||
|
env:
|
||||||
|
SONATYPE_USER: ${{ secrets.SONATYPE_USER }}
|
||||||
|
SONATYPE_TOKEN: ${{ secrets.SONATYPE_TOKEN }}
|
||||||
48
.github/workflows/java.yml
vendored
48
.github/workflows/java.yml
vendored
@@ -3,6 +3,8 @@ on:
|
|||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
||||||
|
paths:
|
||||||
|
- java/**
|
||||||
pull_request:
|
pull_request:
|
||||||
paths:
|
paths:
|
||||||
- java/**
|
- java/**
|
||||||
@@ -21,9 +23,42 @@ env:
|
|||||||
CARGO_INCREMENTAL: "0"
|
CARGO_INCREMENTAL: "0"
|
||||||
CARGO_BUILD_JOBS: "1"
|
CARGO_BUILD_JOBS: "1"
|
||||||
jobs:
|
jobs:
|
||||||
linux-build:
|
linux-build-java-11:
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
name: ubuntu-22.04 + Java 11 & 17
|
name: ubuntu-22.04 + Java 11
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
working-directory: ./java
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- uses: Swatinem/rust-cache@v2
|
||||||
|
with:
|
||||||
|
workspaces: java/core/lancedb-jni
|
||||||
|
- name: Run cargo fmt
|
||||||
|
run: cargo fmt --check
|
||||||
|
working-directory: ./java/core/lancedb-jni
|
||||||
|
- name: Install dependencies
|
||||||
|
run: |
|
||||||
|
sudo apt update
|
||||||
|
sudo apt install -y protobuf-compiler libssl-dev
|
||||||
|
- name: Install Java 11
|
||||||
|
uses: actions/setup-java@v4
|
||||||
|
with:
|
||||||
|
distribution: temurin
|
||||||
|
java-version: 11
|
||||||
|
cache: "maven"
|
||||||
|
- name: Java Style Check
|
||||||
|
run: mvn checkstyle:check
|
||||||
|
# Disable because of issues in lancedb rust core code
|
||||||
|
# - name: Rust Clippy
|
||||||
|
# working-directory: java/core/lancedb-jni
|
||||||
|
# run: cargo clippy --all-targets -- -D warnings
|
||||||
|
- name: Running tests with Java 11
|
||||||
|
run: mvn clean test
|
||||||
|
linux-build-java-17:
|
||||||
|
runs-on: ubuntu-22.04
|
||||||
|
name: ubuntu-22.04 + Java 17
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
working-directory: ./java
|
working-directory: ./java
|
||||||
@@ -47,20 +82,12 @@ jobs:
|
|||||||
java-version: 17
|
java-version: 17
|
||||||
cache: "maven"
|
cache: "maven"
|
||||||
- run: echo "JAVA_17=$JAVA_HOME" >> $GITHUB_ENV
|
- run: echo "JAVA_17=$JAVA_HOME" >> $GITHUB_ENV
|
||||||
- name: Install Java 11
|
|
||||||
uses: actions/setup-java@v4
|
|
||||||
with:
|
|
||||||
distribution: temurin
|
|
||||||
java-version: 11
|
|
||||||
cache: "maven"
|
|
||||||
- name: Java Style Check
|
- name: Java Style Check
|
||||||
run: mvn checkstyle:check
|
run: mvn checkstyle:check
|
||||||
# Disable because of issues in lancedb rust core code
|
# Disable because of issues in lancedb rust core code
|
||||||
# - name: Rust Clippy
|
# - name: Rust Clippy
|
||||||
# working-directory: java/core/lancedb-jni
|
# working-directory: java/core/lancedb-jni
|
||||||
# run: cargo clippy --all-targets -- -D warnings
|
# run: cargo clippy --all-targets -- -D warnings
|
||||||
- name: Running tests with Java 11
|
|
||||||
run: mvn clean test
|
|
||||||
- name: Running tests with Java 17
|
- name: Running tests with Java 17
|
||||||
run: |
|
run: |
|
||||||
export JAVA_TOOL_OPTIONS="$JAVA_TOOL_OPTIONS \
|
export JAVA_TOOL_OPTIONS="$JAVA_TOOL_OPTIONS \
|
||||||
@@ -83,3 +110,4 @@ jobs:
|
|||||||
-Djdk.reflect.useDirectMethodHandle=false \
|
-Djdk.reflect.useDirectMethodHandle=false \
|
||||||
-Dio.netty.tryReflectionSetAccessible=true"
|
-Dio.netty.tryReflectionSetAccessible=true"
|
||||||
JAVA_HOME=$JAVA_17 mvn clean test
|
JAVA_HOME=$JAVA_17 mvn clean test
|
||||||
|
|
||||||
|
|||||||
32
Cargo.toml
32
Cargo.toml
@@ -20,29 +20,31 @@ keywords = ["lancedb", "lance", "database", "vector", "search"]
|
|||||||
categories = ["database-implementations"]
|
categories = ["database-implementations"]
|
||||||
|
|
||||||
[workspace.dependencies]
|
[workspace.dependencies]
|
||||||
lance = { "version" = "=0.14.1", "features" = ["dynamodb"] }
|
lance = { "version" = "=0.17.0", "features" = ["dynamodb"] }
|
||||||
lance-index = { "version" = "=0.14.1" }
|
lance-index = { "version" = "=0.17.0" }
|
||||||
lance-linalg = { "version" = "=0.14.1" }
|
lance-linalg = { "version" = "=0.17.0" }
|
||||||
lance-testing = { "version" = "=0.14.1" }
|
lance-table = { "version" = "=0.17.0" }
|
||||||
lance-datafusion = { "version" = "=0.14.1" }
|
lance-testing = { "version" = "=0.17.0" }
|
||||||
|
lance-datafusion = { "version" = "=0.17.0" }
|
||||||
|
lance-encoding = { "version" = "=0.17.0" }
|
||||||
# Note that this one does not include pyarrow
|
# Note that this one does not include pyarrow
|
||||||
arrow = { version = "51.0", optional = false }
|
arrow = { version = "52.2", optional = false }
|
||||||
arrow-array = "51.0"
|
arrow-array = "52.2"
|
||||||
arrow-data = "51.0"
|
arrow-data = "52.2"
|
||||||
arrow-ipc = "51.0"
|
arrow-ipc = "52.2"
|
||||||
arrow-ord = "51.0"
|
arrow-ord = "52.2"
|
||||||
arrow-schema = "51.0"
|
arrow-schema = "52.2"
|
||||||
arrow-arith = "51.0"
|
arrow-arith = "52.2"
|
||||||
arrow-cast = "51.0"
|
arrow-cast = "52.2"
|
||||||
async-trait = "0"
|
async-trait = "0"
|
||||||
chrono = "0.4.35"
|
chrono = "0.4.35"
|
||||||
datafusion-physical-plan = "37.1"
|
datafusion-physical-plan = "40.0"
|
||||||
half = { "version" = "=2.4.1", default-features = false, features = [
|
half = { "version" = "=2.4.1", default-features = false, features = [
|
||||||
"num-traits",
|
"num-traits",
|
||||||
] }
|
] }
|
||||||
futures = "0"
|
futures = "0"
|
||||||
log = "0.4"
|
log = "0.4"
|
||||||
object_store = "0.9.0"
|
object_store = "0.10.2"
|
||||||
pin-project = "1.0.7"
|
pin-project = "1.0.7"
|
||||||
snafu = "0.7.4"
|
snafu = "0.7.4"
|
||||||
url = "2"
|
url = "2"
|
||||||
|
|||||||
28
README.md
28
README.md
@@ -7,8 +7,8 @@
|
|||||||
|
|
||||||
<a href='https://github.com/lancedb/vectordb-recipes/tree/main' target="_blank"><img alt='LanceDB' src='https://img.shields.io/badge/VectorDB_Recipes-100000?style=for-the-badge&logo=LanceDB&logoColor=white&labelColor=645cfb&color=645cfb'/></a>
|
<a href='https://github.com/lancedb/vectordb-recipes/tree/main' target="_blank"><img alt='LanceDB' src='https://img.shields.io/badge/VectorDB_Recipes-100000?style=for-the-badge&logo=LanceDB&logoColor=white&labelColor=645cfb&color=645cfb'/></a>
|
||||||
<a href='https://lancedb.github.io/lancedb/' target="_blank"><img alt='lancdb' src='https://img.shields.io/badge/DOCS-100000?style=for-the-badge&logo=lancdb&logoColor=white&labelColor=645cfb&color=645cfb'/></a>
|
<a href='https://lancedb.github.io/lancedb/' target="_blank"><img alt='lancdb' src='https://img.shields.io/badge/DOCS-100000?style=for-the-badge&logo=lancdb&logoColor=white&labelColor=645cfb&color=645cfb'/></a>
|
||||||
[](https://blog.lancedb.com/)
|
[](https://blog.lancedb.com/)
|
||||||
[](https://discord.gg/zMM32dvNtd)
|
[](https://discord.gg/zMM32dvNtd)
|
||||||
[](https://twitter.com/lancedb)
|
[](https://twitter.com/lancedb)
|
||||||
|
|
||||||
</p>
|
</p>
|
||||||
@@ -44,26 +44,24 @@ LanceDB's core is written in Rust 🦀 and is built using <a href="https://githu
|
|||||||
|
|
||||||
**Javascript**
|
**Javascript**
|
||||||
```shell
|
```shell
|
||||||
npm install vectordb
|
npm install @lancedb/lancedb
|
||||||
```
|
```
|
||||||
|
|
||||||
```javascript
|
```javascript
|
||||||
const lancedb = require('vectordb');
|
import * as lancedb from "@lancedb/lancedb";
|
||||||
const db = await lancedb.connect('data/sample-lancedb');
|
|
||||||
|
|
||||||
const table = await db.createTable({
|
const db = await lancedb.connect("data/sample-lancedb");
|
||||||
name: 'vectors',
|
const table = await db.createTable("vectors", [
|
||||||
data: [
|
{ id: 1, vector: [0.1, 0.2], item: "foo", price: 10 },
|
||||||
{ id: 1, vector: [0.1, 0.2], item: "foo", price: 10 },
|
{ id: 2, vector: [1.1, 1.2], item: "bar", price: 50 },
|
||||||
{ id: 2, vector: [1.1, 1.2], item: "bar", price: 50 }
|
], {mode: 'overwrite'});
|
||||||
]
|
|
||||||
})
|
|
||||||
|
|
||||||
const query = table.search([0.1, 0.3]).limit(2);
|
|
||||||
const results = await query.execute();
|
const query = table.vectorSearch([0.1, 0.3]).limit(2);
|
||||||
|
const results = await query.toArray();
|
||||||
|
|
||||||
// You can also search for rows by specific criteria without involving a vector search.
|
// You can also search for rows by specific criteria without involving a vector search.
|
||||||
const rowsByCriteria = await table.search(undefined).where("price >= 10").execute();
|
const rowsByCriteria = await table.query().where("price >= 10").toArray();
|
||||||
```
|
```
|
||||||
|
|
||||||
**Python**
|
**Python**
|
||||||
|
|||||||
@@ -18,4 +18,4 @@ docker run \
|
|||||||
-v $(pwd):/io -w /io \
|
-v $(pwd):/io -w /io \
|
||||||
--memory-swap=-1 \
|
--memory-swap=-1 \
|
||||||
lancedb-node-manylinux \
|
lancedb-node-manylinux \
|
||||||
bash ci/manylinux_node/build.sh $ARCH
|
bash ci/manylinux_node/build_vectordb.sh $ARCH
|
||||||
|
|||||||
@@ -4,9 +4,9 @@ ARCH=${1:-x86_64}
|
|||||||
|
|
||||||
# We pass down the current user so that when we later mount the local files
|
# We pass down the current user so that when we later mount the local files
|
||||||
# into the container, the files are accessible by the current user.
|
# into the container, the files are accessible by the current user.
|
||||||
pushd ci/manylinux_nodejs
|
pushd ci/manylinux_node
|
||||||
docker build \
|
docker build \
|
||||||
-t lancedb-nodejs-manylinux \
|
-t lancedb-node-manylinux-$ARCH \
|
||||||
--build-arg="ARCH=$ARCH" \
|
--build-arg="ARCH=$ARCH" \
|
||||||
--build-arg="DOCKER_USER=$(id -u)" \
|
--build-arg="DOCKER_USER=$(id -u)" \
|
||||||
--progress=plain \
|
--progress=plain \
|
||||||
@@ -17,5 +17,5 @@ popd
|
|||||||
docker run \
|
docker run \
|
||||||
-v $(pwd):/io -w /io \
|
-v $(pwd):/io -w /io \
|
||||||
--memory-swap=-1 \
|
--memory-swap=-1 \
|
||||||
lancedb-nodejs-manylinux \
|
lancedb-node-manylinux-$ARCH \
|
||||||
bash ci/manylinux_nodejs/build.sh $ARCH
|
bash ci/manylinux_node/build_lancedb.sh $ARCH
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
# range of linux distributions.
|
# range of linux distributions.
|
||||||
ARG ARCH=x86_64
|
ARG ARCH=x86_64
|
||||||
|
|
||||||
FROM quay.io/pypa/manylinux2014_${ARCH}
|
FROM quay.io/pypa/manylinux_2_28_${ARCH}
|
||||||
|
|
||||||
ARG ARCH=x86_64
|
ARG ARCH=x86_64
|
||||||
ARG DOCKER_USER=default_user
|
ARG DOCKER_USER=default_user
|
||||||
|
|||||||
0
ci/manylinux_nodejs/build.sh → ci/manylinux_node/build_lancedb.sh
Executable file → Normal file
0
ci/manylinux_nodejs/build.sh → ci/manylinux_node/build_lancedb.sh
Executable file → Normal file
@@ -6,7 +6,7 @@
|
|||||||
# /usr/bin/ld: failed to set dynamic section sizes: Bad value
|
# /usr/bin/ld: failed to set dynamic section sizes: Bad value
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
git clone -b OpenSSL_1_1_1u \
|
git clone -b OpenSSL_1_1_1v \
|
||||||
--single-branch \
|
--single-branch \
|
||||||
https://github.com/openssl/openssl.git
|
https://github.com/openssl/openssl.git
|
||||||
|
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ install_node() {
|
|||||||
|
|
||||||
source "$HOME"/.bashrc
|
source "$HOME"/.bashrc
|
||||||
|
|
||||||
nvm install --no-progress 16
|
nvm install --no-progress 18
|
||||||
}
|
}
|
||||||
|
|
||||||
install_rust() {
|
install_rust() {
|
||||||
|
|||||||
@@ -1,31 +0,0 @@
|
|||||||
# Many linux dockerfile with Rust, Node, and Lance dependencies installed.
|
|
||||||
# This container allows building the node modules native libraries in an
|
|
||||||
# environment with a very old glibc, so that we are compatible with a wide
|
|
||||||
# range of linux distributions.
|
|
||||||
ARG ARCH=x86_64
|
|
||||||
|
|
||||||
FROM quay.io/pypa/manylinux2014_${ARCH}
|
|
||||||
|
|
||||||
ARG ARCH=x86_64
|
|
||||||
ARG DOCKER_USER=default_user
|
|
||||||
|
|
||||||
# Install static openssl
|
|
||||||
COPY install_openssl.sh install_openssl.sh
|
|
||||||
RUN ./install_openssl.sh ${ARCH} > /dev/null
|
|
||||||
|
|
||||||
# Protobuf is also installed as root.
|
|
||||||
COPY install_protobuf.sh install_protobuf.sh
|
|
||||||
RUN ./install_protobuf.sh ${ARCH}
|
|
||||||
|
|
||||||
ENV DOCKER_USER=${DOCKER_USER}
|
|
||||||
# Create a group and user
|
|
||||||
RUN echo ${ARCH} && adduser --user-group --create-home --uid ${DOCKER_USER} build_user
|
|
||||||
|
|
||||||
# We switch to the user to install Rust and Node, since those like to be
|
|
||||||
# installed at the user level.
|
|
||||||
USER ${DOCKER_USER}
|
|
||||||
|
|
||||||
COPY prepare_manylinux_node.sh prepare_manylinux_node.sh
|
|
||||||
RUN cp /prepare_manylinux_node.sh $HOME/ && \
|
|
||||||
cd $HOME && \
|
|
||||||
./prepare_manylinux_node.sh ${ARCH}
|
|
||||||
@@ -1,26 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# Builds openssl from source so we can statically link to it
|
|
||||||
|
|
||||||
# this is to avoid the error we get with the system installation:
|
|
||||||
# /usr/bin/ld: <library>: version node not found for symbol SSLeay@@OPENSSL_1.0.1
|
|
||||||
# /usr/bin/ld: failed to set dynamic section sizes: Bad value
|
|
||||||
set -e
|
|
||||||
|
|
||||||
git clone -b OpenSSL_1_1_1u \
|
|
||||||
--single-branch \
|
|
||||||
https://github.com/openssl/openssl.git
|
|
||||||
|
|
||||||
pushd openssl
|
|
||||||
|
|
||||||
if [[ $1 == x86_64* ]]; then
|
|
||||||
ARCH=linux-x86_64
|
|
||||||
else
|
|
||||||
# gnu target
|
|
||||||
ARCH=linux-aarch64
|
|
||||||
fi
|
|
||||||
|
|
||||||
./Configure no-shared $ARCH
|
|
||||||
|
|
||||||
make
|
|
||||||
|
|
||||||
make install
|
|
||||||
@@ -1,15 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# Installs protobuf compiler. Should be run as root.
|
|
||||||
set -e
|
|
||||||
|
|
||||||
if [[ $1 == x86_64* ]]; then
|
|
||||||
ARCH=x86_64
|
|
||||||
else
|
|
||||||
# gnu target
|
|
||||||
ARCH=aarch_64
|
|
||||||
fi
|
|
||||||
|
|
||||||
PB_REL=https://github.com/protocolbuffers/protobuf/releases
|
|
||||||
PB_VERSION=23.1
|
|
||||||
curl -LO $PB_REL/download/v$PB_VERSION/protoc-$PB_VERSION-linux-$ARCH.zip
|
|
||||||
unzip protoc-$PB_VERSION-linux-$ARCH.zip -d /usr/local
|
|
||||||
@@ -1,21 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
set -e
|
|
||||||
|
|
||||||
install_node() {
|
|
||||||
echo "Installing node..."
|
|
||||||
|
|
||||||
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.34.0/install.sh | bash
|
|
||||||
|
|
||||||
source "$HOME"/.bashrc
|
|
||||||
|
|
||||||
nvm install --no-progress 16
|
|
||||||
}
|
|
||||||
|
|
||||||
install_rust() {
|
|
||||||
echo "Installing rust..."
|
|
||||||
curl https://sh.rustup.rs -sSf | bash -s -- -y
|
|
||||||
export PATH="$PATH:/root/.cargo/bin"
|
|
||||||
}
|
|
||||||
|
|
||||||
install_node
|
|
||||||
install_rust
|
|
||||||
123
docs/mkdocs.yml
123
docs/mkdocs.yml
@@ -26,6 +26,7 @@ theme:
|
|||||||
- content.code.copy
|
- content.code.copy
|
||||||
- content.tabs.link
|
- content.tabs.link
|
||||||
- content.action.edit
|
- content.action.edit
|
||||||
|
- content.tooltips
|
||||||
- toc.follow
|
- toc.follow
|
||||||
- navigation.top
|
- navigation.top
|
||||||
- navigation.tabs
|
- navigation.tabs
|
||||||
@@ -35,6 +36,7 @@ theme:
|
|||||||
- navigation.instant
|
- navigation.instant
|
||||||
icon:
|
icon:
|
||||||
repo: fontawesome/brands/github
|
repo: fontawesome/brands/github
|
||||||
|
annotation: material/arrow-right-circle
|
||||||
custom_dir: overrides
|
custom_dir: overrides
|
||||||
|
|
||||||
plugins:
|
plugins:
|
||||||
@@ -58,7 +60,7 @@ plugins:
|
|||||||
- https://pandas.pydata.org/docs/objects.inv
|
- https://pandas.pydata.org/docs/objects.inv
|
||||||
- mkdocs-jupyter
|
- mkdocs-jupyter
|
||||||
- render_swagger:
|
- render_swagger:
|
||||||
allow_arbitrary_locations : true
|
allow_arbitrary_locations: true
|
||||||
|
|
||||||
markdown_extensions:
|
markdown_extensions:
|
||||||
- admonition
|
- admonition
|
||||||
@@ -76,7 +78,12 @@ markdown_extensions:
|
|||||||
- pymdownx.tabbed:
|
- pymdownx.tabbed:
|
||||||
alternate_style: true
|
alternate_style: true
|
||||||
- md_in_html
|
- md_in_html
|
||||||
|
- abbr
|
||||||
- attr_list
|
- attr_list
|
||||||
|
- pymdownx.snippets
|
||||||
|
- pymdownx.emoji:
|
||||||
|
emoji_index: !!python/name:material.extensions.emoji.twemoji
|
||||||
|
emoji_generator: !!python/name:material.extensions.emoji.to_svg
|
||||||
|
|
||||||
nav:
|
nav:
|
||||||
- Home:
|
- Home:
|
||||||
@@ -84,14 +91,17 @@ nav:
|
|||||||
- 🏃🏼♂️ Quick start: basic.md
|
- 🏃🏼♂️ Quick start: basic.md
|
||||||
- 📚 Concepts:
|
- 📚 Concepts:
|
||||||
- Vector search: concepts/vector_search.md
|
- Vector search: concepts/vector_search.md
|
||||||
- Indexing: concepts/index_ivfpq.md
|
- Indexing:
|
||||||
|
- IVFPQ: concepts/index_ivfpq.md
|
||||||
|
- HNSW: concepts/index_hnsw.md
|
||||||
- Storage: concepts/storage.md
|
- Storage: concepts/storage.md
|
||||||
- Data management: concepts/data_management.md
|
- Data management: concepts/data_management.md
|
||||||
- 🔨 Guides:
|
- 🔨 Guides:
|
||||||
- Working with tables: guides/tables.md
|
- Working with tables: guides/tables.md
|
||||||
- Building an ANN index: ann_indexes.md
|
- Building a vector index: ann_indexes.md
|
||||||
- Vector Search: search.md
|
- Vector Search: search.md
|
||||||
- Full-text search: fts.md
|
- Full-text search: fts.md
|
||||||
|
- Building a scalar index: guides/scalar_index.md
|
||||||
- Hybrid search:
|
- Hybrid search:
|
||||||
- Overview: hybrid_search/hybrid_search.md
|
- Overview: hybrid_search/hybrid_search.md
|
||||||
- Comparing Rerankers: hybrid_search/eval.md
|
- Comparing Rerankers: hybrid_search/eval.md
|
||||||
@@ -100,10 +110,12 @@ nav:
|
|||||||
- Quickstart: reranking/index.md
|
- Quickstart: reranking/index.md
|
||||||
- Cohere Reranker: reranking/cohere.md
|
- Cohere Reranker: reranking/cohere.md
|
||||||
- Linear Combination Reranker: reranking/linear_combination.md
|
- Linear Combination Reranker: reranking/linear_combination.md
|
||||||
|
- Reciprocal Rank Fusion Reranker: reranking/rrf.md
|
||||||
- Cross Encoder Reranker: reranking/cross_encoder.md
|
- Cross Encoder Reranker: reranking/cross_encoder.md
|
||||||
- ColBERT Reranker: reranking/colbert.md
|
- ColBERT Reranker: reranking/colbert.md
|
||||||
- Jina Reranker: reranking/jina.md
|
- Jina Reranker: reranking/jina.md
|
||||||
- OpenAI Reranker: reranking/openai.md
|
- OpenAI Reranker: reranking/openai.md
|
||||||
|
- AnswerDotAi Rerankers: reranking/answerdotai.md
|
||||||
- Building Custom Rerankers: reranking/custom_reranker.md
|
- Building Custom Rerankers: reranking/custom_reranker.md
|
||||||
- Example: notebooks/lancedb_reranking.ipynb
|
- Example: notebooks/lancedb_reranking.ipynb
|
||||||
- Filtering: sql.md
|
- Filtering: sql.md
|
||||||
@@ -117,7 +129,23 @@ nav:
|
|||||||
- 🧬 Managing embeddings:
|
- 🧬 Managing embeddings:
|
||||||
- Overview: embeddings/index.md
|
- Overview: embeddings/index.md
|
||||||
- Embedding functions: embeddings/embedding_functions.md
|
- Embedding functions: embeddings/embedding_functions.md
|
||||||
- Available models: embeddings/default_embedding_functions.md
|
- Available models:
|
||||||
|
- Overview: embeddings/default_embedding_functions.md
|
||||||
|
- Text Embedding Functions:
|
||||||
|
- Sentence Transformers: embeddings/available_embedding_models/text_embedding_functions/sentence_transformers.md
|
||||||
|
- Huggingface Embedding Models: embeddings/available_embedding_models/text_embedding_functions/huggingface_embedding.md
|
||||||
|
- Ollama Embeddings: embeddings/available_embedding_models/text_embedding_functions/ollama_embedding.md
|
||||||
|
- OpenAI Embeddings: embeddings/available_embedding_models/text_embedding_functions/openai_embedding.md
|
||||||
|
- Instructor Embeddings: embeddings/available_embedding_models/text_embedding_functions/instructor_embedding.md
|
||||||
|
- Gemini Embeddings: embeddings/available_embedding_models/text_embedding_functions/gemini_embedding.md
|
||||||
|
- Cohere Embeddings: embeddings/available_embedding_models/text_embedding_functions/cohere_embedding.md
|
||||||
|
- Jina Embeddings: embeddings/available_embedding_models/text_embedding_functions/jina_embedding.md
|
||||||
|
- AWS Bedrock Text Embedding Functions: embeddings/available_embedding_models/text_embedding_functions/aws_bedrock_embedding.md
|
||||||
|
- IBM watsonx.ai Embeddings: embeddings/available_embedding_models/text_embedding_functions/ibm_watsonx_ai_embedding.md
|
||||||
|
- Multimodal Embedding Functions:
|
||||||
|
- OpenClip embeddings: embeddings/available_embedding_models/multimodal_embedding_functions/openclip_embedding.md
|
||||||
|
- Imagebind embeddings: embeddings/available_embedding_models/multimodal_embedding_functions/imagebind_embedding.md
|
||||||
|
- Jina Embeddings: embeddings/available_embedding_models/multimodal_embedding_functions/jina_multimodal_embedding.md
|
||||||
- User-defined embedding functions: embeddings/custom_embedding_function.md
|
- User-defined embedding functions: embeddings/custom_embedding_function.md
|
||||||
- "Example: Multi-lingual semantic search": notebooks/multi_lingual_example.ipynb
|
- "Example: Multi-lingual semantic search": notebooks/multi_lingual_example.ipynb
|
||||||
- "Example: MultiModal CLIP Embeddings": notebooks/DisappearingEmbeddingFunction.ipynb
|
- "Example: MultiModal CLIP Embeddings": notebooks/DisappearingEmbeddingFunction.ipynb
|
||||||
@@ -127,25 +155,31 @@ nav:
|
|||||||
- Polars: python/polars_arrow.md
|
- Polars: python/polars_arrow.md
|
||||||
- DuckDB: python/duckdb.md
|
- DuckDB: python/duckdb.md
|
||||||
- LangChain:
|
- LangChain:
|
||||||
- LangChain 🔗: integrations/langchain.md
|
- LangChain 🔗: integrations/langchain.md
|
||||||
- LangChain demo: notebooks/langchain_demo.ipynb
|
- LangChain demo: notebooks/langchain_demo.ipynb
|
||||||
- LangChain JS/TS 🔗: https://js.langchain.com/docs/integrations/vectorstores/lancedb
|
- LangChain JS/TS 🔗: https://js.langchain.com/docs/integrations/vectorstores/lancedb
|
||||||
- LlamaIndex 🦙:
|
- LlamaIndex 🦙:
|
||||||
- LlamaIndex docs: integrations/llamaIndex.md
|
- LlamaIndex docs: integrations/llamaIndex.md
|
||||||
- LlamaIndex demo: notebooks/llamaIndex_demo.ipynb
|
- LlamaIndex demo: notebooks/llamaIndex_demo.ipynb
|
||||||
- Pydantic: python/pydantic.md
|
- Pydantic: python/pydantic.md
|
||||||
- Voxel51: integrations/voxel51.md
|
- Voxel51: integrations/voxel51.md
|
||||||
- PromptTools: integrations/prompttools.md
|
- PromptTools: integrations/prompttools.md
|
||||||
|
- dlt: integrations/dlt.md
|
||||||
- 🎯 Examples:
|
- 🎯 Examples:
|
||||||
- Overview: examples/index.md
|
- Overview: examples/index.md
|
||||||
- 🐍 Python:
|
- 🐍 Python:
|
||||||
- Overview: examples/examples_python.md
|
- Overview: examples/examples_python.md
|
||||||
- YouTube Transcript Search: notebooks/youtube_transcript_search.ipynb
|
- Build From Scratch: examples/python_examples/build_from_scratch.md
|
||||||
- Documentation QA Bot using LangChain: notebooks/code_qa_bot.ipynb
|
- Multimodal: examples/python_examples/multimodal.md
|
||||||
- Multimodal search using CLIP: notebooks/multimodal_search.ipynb
|
- Rag: examples/python_examples/rag.md
|
||||||
- Example - Calculate CLIP Embeddings with Roboflow Inference: examples/image_embeddings_roboflow.md
|
- Vector Search: examples/python_examples/vector_search.md
|
||||||
- Serverless QA Bot with S3 and Lambda: examples/serverless_lancedb_with_s3_and_lambda.md
|
- Chatbot: examples/python_examples/chatbot.md
|
||||||
- Serverless QA Bot with Modal: examples/serverless_qa_bot_with_modal_and_langchain.md
|
- Evaluation: examples/python_examples/evaluations.md
|
||||||
|
- AI Agent: examples/python_examples/aiagent.md
|
||||||
|
- Recommender System: examples/python_examples/recommendersystem.md
|
||||||
|
- Miscellaneous:
|
||||||
|
- Serverless QA Bot with S3 and Lambda: examples/serverless_lancedb_with_s3_and_lambda.md
|
||||||
|
- Serverless QA Bot with Modal: examples/serverless_qa_bot_with_modal_and_langchain.md
|
||||||
- 👾 JavaScript:
|
- 👾 JavaScript:
|
||||||
- Overview: examples/examples_js.md
|
- Overview: examples/examples_js.md
|
||||||
- Serverless Website Chatbot: examples/serverless_website_chatbot.md
|
- Serverless Website Chatbot: examples/serverless_website_chatbot.md
|
||||||
@@ -153,6 +187,8 @@ nav:
|
|||||||
- TransformersJS Embedding Search: examples/transformerjs_embedding_search_nodejs.md
|
- TransformersJS Embedding Search: examples/transformerjs_embedding_search_nodejs.md
|
||||||
- 🦀 Rust:
|
- 🦀 Rust:
|
||||||
- Overview: examples/examples_rust.md
|
- Overview: examples/examples_rust.md
|
||||||
|
- Studies:
|
||||||
|
- ↗Improve retrievers with hybrid search and reranking: https://blog.lancedb.com/hybrid-search-and-reranking-report/
|
||||||
- 💭 FAQs: faq.md
|
- 💭 FAQs: faq.md
|
||||||
- ⚙️ API reference:
|
- ⚙️ API reference:
|
||||||
- 🐍 Python: python/python.md
|
- 🐍 Python: python/python.md
|
||||||
@@ -169,7 +205,9 @@ nav:
|
|||||||
- Quick start: basic.md
|
- Quick start: basic.md
|
||||||
- Concepts:
|
- Concepts:
|
||||||
- Vector search: concepts/vector_search.md
|
- Vector search: concepts/vector_search.md
|
||||||
- Indexing: concepts/index_ivfpq.md
|
- Indexing:
|
||||||
|
- IVFPQ: concepts/index_ivfpq.md
|
||||||
|
- HNSW: concepts/index_hnsw.md
|
||||||
- Storage: concepts/storage.md
|
- Storage: concepts/storage.md
|
||||||
- Data management: concepts/data_management.md
|
- Data management: concepts/data_management.md
|
||||||
- Guides:
|
- Guides:
|
||||||
@@ -177,6 +215,7 @@ nav:
|
|||||||
- Building an ANN index: ann_indexes.md
|
- Building an ANN index: ann_indexes.md
|
||||||
- Vector Search: search.md
|
- Vector Search: search.md
|
||||||
- Full-text search: fts.md
|
- Full-text search: fts.md
|
||||||
|
- Building a scalar index: guides/scalar_index.md
|
||||||
- Hybrid search:
|
- Hybrid search:
|
||||||
- Overview: hybrid_search/hybrid_search.md
|
- Overview: hybrid_search/hybrid_search.md
|
||||||
- Comparing Rerankers: hybrid_search/eval.md
|
- Comparing Rerankers: hybrid_search/eval.md
|
||||||
@@ -185,10 +224,12 @@ nav:
|
|||||||
- Quickstart: reranking/index.md
|
- Quickstart: reranking/index.md
|
||||||
- Cohere Reranker: reranking/cohere.md
|
- Cohere Reranker: reranking/cohere.md
|
||||||
- Linear Combination Reranker: reranking/linear_combination.md
|
- Linear Combination Reranker: reranking/linear_combination.md
|
||||||
|
- Reciprocal Rank Fusion Reranker: reranking/rrf.md
|
||||||
- Cross Encoder Reranker: reranking/cross_encoder.md
|
- Cross Encoder Reranker: reranking/cross_encoder.md
|
||||||
- ColBERT Reranker: reranking/colbert.md
|
- ColBERT Reranker: reranking/colbert.md
|
||||||
- Jina Reranker: reranking/jina.md
|
- Jina Reranker: reranking/jina.md
|
||||||
- OpenAI Reranker: reranking/openai.md
|
- OpenAI Reranker: reranking/openai.md
|
||||||
|
- AnswerDotAi Rerankers: reranking/answerdotai.md
|
||||||
- Building Custom Rerankers: reranking/custom_reranker.md
|
- Building Custom Rerankers: reranking/custom_reranker.md
|
||||||
- Example: notebooks/lancedb_reranking.ipynb
|
- Example: notebooks/lancedb_reranking.ipynb
|
||||||
- Filtering: sql.md
|
- Filtering: sql.md
|
||||||
@@ -202,7 +243,23 @@ nav:
|
|||||||
- Managing Embeddings:
|
- Managing Embeddings:
|
||||||
- Overview: embeddings/index.md
|
- Overview: embeddings/index.md
|
||||||
- Embedding functions: embeddings/embedding_functions.md
|
- Embedding functions: embeddings/embedding_functions.md
|
||||||
- Available models: embeddings/default_embedding_functions.md
|
- Available models:
|
||||||
|
- Overview: embeddings/default_embedding_functions.md
|
||||||
|
- Text Embedding Functions:
|
||||||
|
- Sentence Transformers: embeddings/available_embedding_models/text_embedding_functions/sentence_transformers.md
|
||||||
|
- Huggingface Embedding Models: embeddings/available_embedding_models/text_embedding_functions/huggingface_embedding.md
|
||||||
|
- Ollama Embeddings: embeddings/available_embedding_models/text_embedding_functions/ollama_embedding.md
|
||||||
|
- OpenAI Embeddings: embeddings/available_embedding_models/text_embedding_functions/openai_embedding.md
|
||||||
|
- Instructor Embeddings: embeddings/available_embedding_models/text_embedding_functions/instructor_embedding.md
|
||||||
|
- Gemini Embeddings: embeddings/available_embedding_models/text_embedding_functions/gemini_embedding.md
|
||||||
|
- Cohere Embeddings: embeddings/available_embedding_models/text_embedding_functions/cohere_embedding.md
|
||||||
|
- Jina Embeddings: embeddings/available_embedding_models/text_embedding_functions/jina_embedding.md
|
||||||
|
- AWS Bedrock Text Embedding Functions: embeddings/available_embedding_models/text_embedding_functions/aws_bedrock_embedding.md
|
||||||
|
- IBM watsonx.ai Embeddings: embeddings/available_embedding_models/text_embedding_functions/ibm_watsonx_ai_embedding.md
|
||||||
|
- Multimodal Embedding Functions:
|
||||||
|
- OpenClip embeddings: embeddings/available_embedding_models/multimodal_embedding_functions/openclip_embedding.md
|
||||||
|
- Imagebind embeddings: embeddings/available_embedding_models/multimodal_embedding_functions/imagebind_embedding.md
|
||||||
|
- Jina Embeddings: embeddings/available_embedding_models/multimodal_embedding_functions/jina_multimodal_embedding.md
|
||||||
- User-defined embedding functions: embeddings/custom_embedding_function.md
|
- User-defined embedding functions: embeddings/custom_embedding_function.md
|
||||||
- "Example: Multi-lingual semantic search": notebooks/multi_lingual_example.ipynb
|
- "Example: Multi-lingual semantic search": notebooks/multi_lingual_example.ipynb
|
||||||
- "Example: MultiModal CLIP Embeddings": notebooks/DisappearingEmbeddingFunction.ipynb
|
- "Example: MultiModal CLIP Embeddings": notebooks/DisappearingEmbeddingFunction.ipynb
|
||||||
@@ -217,16 +274,32 @@ nav:
|
|||||||
- Pydantic: python/pydantic.md
|
- Pydantic: python/pydantic.md
|
||||||
- Voxel51: integrations/voxel51.md
|
- Voxel51: integrations/voxel51.md
|
||||||
- PromptTools: integrations/prompttools.md
|
- PromptTools: integrations/prompttools.md
|
||||||
|
- dlt: integrations/dlt.md
|
||||||
- Examples:
|
- Examples:
|
||||||
- examples/index.md
|
- examples/index.md
|
||||||
- YouTube Transcript Search: notebooks/youtube_transcript_search.ipynb
|
- 🐍 Python:
|
||||||
- Documentation QA Bot using LangChain: notebooks/code_qa_bot.ipynb
|
- Overview: examples/examples_python.md
|
||||||
- Multimodal search using CLIP: notebooks/multimodal_search.ipynb
|
- Build From Scratch: examples/python_examples/build_from_scratch.md
|
||||||
- Serverless QA Bot with S3 and Lambda: examples/serverless_lancedb_with_s3_and_lambda.md
|
- Multimodal: examples/python_examples/multimodal.md
|
||||||
- Serverless QA Bot with Modal: examples/serverless_qa_bot_with_modal_and_langchain.md
|
- Rag: examples/python_examples/rag.md
|
||||||
- YouTube Transcript Search (JS): examples/youtube_transcript_bot_with_nodejs.md
|
- Vector Search: examples/python_examples/vector_search.md
|
||||||
- Serverless Chatbot from any website: examples/serverless_website_chatbot.md
|
- Chatbot: examples/python_examples/chatbot.md
|
||||||
- TransformersJS Embedding Search: examples/transformerjs_embedding_search_nodejs.md
|
- Evaluation: examples/python_examples/evaluations.md
|
||||||
|
- AI Agent: examples/python_examples/aiagent.md
|
||||||
|
- Recommender System: examples/python_examples/recommendersystem.md
|
||||||
|
- Miscellaneous:
|
||||||
|
- Serverless QA Bot with S3 and Lambda: examples/serverless_lancedb_with_s3_and_lambda.md
|
||||||
|
- Serverless QA Bot with Modal: examples/serverless_qa_bot_with_modal_and_langchain.md
|
||||||
|
- 👾 JavaScript:
|
||||||
|
- Overview: examples/examples_js.md
|
||||||
|
- Serverless Website Chatbot: examples/serverless_website_chatbot.md
|
||||||
|
- YouTube Transcript Search: examples/youtube_transcript_bot_with_nodejs.md
|
||||||
|
- TransformersJS Embedding Search: examples/transformerjs_embedding_search_nodejs.md
|
||||||
|
- 🦀 Rust:
|
||||||
|
- Overview: examples/examples_rust.md
|
||||||
|
- Studies:
|
||||||
|
- studies/overview.md
|
||||||
|
- ↗Improve retrievers with hybrid search and reranking: https://blog.lancedb.com/hybrid-search-and-reranking-report/
|
||||||
- API reference:
|
- API reference:
|
||||||
- Overview: api_reference.md
|
- Overview: api_reference.md
|
||||||
- Python: python/python.md
|
- Python: python/python.md
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
mkdocs==1.5.3
|
mkdocs==1.5.3
|
||||||
mkdocs-jupyter==0.24.1
|
mkdocs-jupyter==0.24.1
|
||||||
mkdocs-material==9.5.3
|
mkdocs-material==9.5.3
|
||||||
mkdocstrings[python]==0.20.0
|
mkdocstrings[python]==0.25.2
|
||||||
|
griffe
|
||||||
mkdocs-render-swagger-plugin
|
mkdocs-render-swagger-plugin
|
||||||
pydantic
|
pydantic
|
||||||
|
|||||||
1
docs/src/assets/colab.svg
Normal file
1
docs/src/assets/colab.svg
Normal file
@@ -0,0 +1 @@
|
|||||||
|
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="117" height="20"><linearGradient id="b" x2="0" y2="100%"><stop offset="0" stop-color="#bbb" stop-opacity=".1"/><stop offset="1" stop-opacity=".1"/></linearGradient><clipPath id="a"><rect width="117" height="20" rx="3" fill="#fff"/></clipPath><g clip-path="url(#a)"><path fill="#555" d="M0 0h30v20H0z"/><path fill="#007ec6" d="M30 0h87v20H30z"/><path fill="url(#b)" d="M0 0h117v20H0z"/></g><g fill="#fff" text-anchor="middle" font-family="DejaVu Sans,Verdana,Geneva,sans-serif" font-size="110"><svg x="4px" y="0px" width="22px" height="20px" viewBox="-2 0 28 24" style="background-color: #fff;border-radius: 1px;"><path style="fill:#e8710a;" d="M1.977,16.77c-2.667-2.277-2.605-7.079,0-9.357C2.919,8.057,3.522,9.075,4.49,9.691c-1.152,1.6-1.146,3.201-0.004,4.803C3.522,15.111,2.918,16.126,1.977,16.77z"/><path style="fill:#f9ab00;" d="M12.257,17.114c-1.767-1.633-2.485-3.658-2.118-6.02c0.451-2.91,2.139-4.893,4.946-5.678c2.565-0.718,4.964-0.217,6.878,1.819c-0.884,0.743-1.707,1.547-2.434,2.446C18.488,8.827,17.319,8.435,16,8.856c-2.404,0.767-3.046,3.241-1.494,5.644c-0.241,0.275-0.493,0.541-0.721,0.826C13.295,15.939,12.511,16.3,12.257,17.114z"/><path style="fill:#e8710a;" d="M19.529,9.682c0.727-0.899,1.55-1.703,2.434-2.446c2.703,2.783,2.701,7.031-0.005,9.764c-2.648,2.674-6.936,2.725-9.701,0.115c0.254-0.814,1.038-1.175,1.528-1.788c0.228-0.285,0.48-0.552,0.721-0.826c1.053,0.916,2.254,1.268,3.6,0.83C20.502,14.551,21.151,11.927,19.529,9.682z"/><path style="fill:#f9ab00;" d="M4.49,9.691C3.522,9.075,2.919,8.057,1.977,7.413c2.209-2.398,5.721-2.942,8.476-1.355c0.555,0.32,0.719,0.606,0.285,1.128c-0.157,0.188-0.258,0.422-0.391,0.631c-0.299,0.47-0.509,1.067-0.929,1.371C8.933,9.539,8.523,8.847,8.021,8.746C6.673,8.475,5.509,8.787,4.49,9.691z"/><path style="fill:#f9ab00;" d="M1.977,16.77c0.941-0.644,1.545-1.659,2.509-2.277c1.373,1.152,2.85,1.433,4.45,0.499c0.332-0.194,0.503-0.088,0.673,0.19c0.386,0.635,0.753,1.285,1.181,1.89c0.34,0.48,0.222,0.715-0.253,1.006C7.84,19.73,4.205,19.188,1.977,16.77z"/></svg><text x="245" y="140" transform="scale(.1)" textLength="30"> </text><text x="725" y="150" fill="#010101" fill-opacity=".3" transform="scale(.1)" textLength="770">Open in Colab</text><text x="725" y="140" transform="scale(.1)" textLength="770">Open in Colab</text></g> </svg>
|
||||||
|
After Width: | Height: | Size: 2.3 KiB |
1
docs/src/assets/ghost.svg
Normal file
1
docs/src/assets/ghost.svg
Normal file
@@ -0,0 +1 @@
|
|||||||
|
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="88.25" height="28" role="img" aria-label="GHOST"><title>GHOST</title><g shape-rendering="crispEdges"><rect width="88.25" height="28" fill="#000"/></g><g fill="#fff" text-anchor="middle" font-family="Verdana,Geneva,DejaVu Sans,sans-serif" text-rendering="geometricPrecision" font-size="100"><image x="9" y="7" width="14" height="14" xlink:href="data:image/svg+xml;base64,PHN2ZyBmaWxsPSIjZjdkZjFlIiByb2xlPSJpbWciIHZpZXdCb3g9IjAgMCAyNCAyNCIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIj48dGl0bGU+R2hvc3Q8L3RpdGxlPjxwYXRoIGQ9Ik0xMiAwQzUuMzczIDAgMCA1LjM3MyAwIDEyczUuMzczIDEyIDEyIDEyIDEyLTUuMzczIDEyLTEyUzE4LjYyNyAwIDEyIDB6bS4yNTYgMi4zMTNjMi40Ny4wMDUgNS4xMTYgMi4wMDggNS44OTggMi45NjJsLjI0NC4zYzEuNjQgMS45OTQgMy41NjkgNC4zNCAzLjU2OSA2Ljk2NiAwIDMuNzE5LTIuOTggNS44MDgtNi4xNTggNy41MDgtMS40MzMuNzY2LTIuOTggMS41MDgtNC43NDggMS41MDgtNC41NDMgMC04LjM2Ni0zLjU2OS04LjM2Ni04LjExMiAwLS43MDYuMTctMS40MjUuMzQyLTIuMTUuMTIyLS41MTUuMjQ0LTEuMDMzLjMwNy0xLjU0OS41NDgtNC41MzkgMi45NjctNi43OTUgOC40MjItNy40MDhhNC4yOSA0LjI5IDAgMDEuNDktLjAyNloiLz48L3N2Zz4="/><text transform="scale(.1)" x="541.25" y="175" textLength="442.5" fill="#fff" font-weight="bold">GHOST</text></g></svg>
|
||||||
|
After Width: | Height: | Size: 1.2 KiB |
1
docs/src/assets/github.svg
Normal file
1
docs/src/assets/github.svg
Normal file
@@ -0,0 +1 @@
|
|||||||
|
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="95.5" height="28" role="img" aria-label="GITHUB"><title>GITHUB</title><g shape-rendering="crispEdges"><rect width="95.5" height="28" fill="#121011"/></g><g fill="#fff" text-anchor="middle" font-family="Verdana,Geneva,DejaVu Sans,sans-serif" text-rendering="geometricPrecision" font-size="100"><image x="9" y="7" width="14" height="14" xlink:href="data:image/svg+xml;base64,PHN2ZyBmaWxsPSJ3aGl0ZSIgcm9sZT0iaW1nIiB2aWV3Qm94PSIwIDAgMjQgMjQiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyI+PHRpdGxlPkdpdEh1YjwvdGl0bGU+PHBhdGggZD0iTTEyIC4yOTdjLTYuNjMgMC0xMiA1LjM3My0xMiAxMiAwIDUuMzAzIDMuNDM4IDkuOCA4LjIwNSAxMS4zODUuNi4xMTMuODItLjI1OC44Mi0uNTc3IDAtLjI4NS0uMDEtMS4wNC0uMDE1LTIuMDQtMy4zMzguNzI0LTQuMDQyLTEuNjEtNC4wNDItMS42MUM0LjQyMiAxOC4wNyAzLjYzMyAxNy43IDMuNjMzIDE3LjdjLTEuMDg3LS43NDQuMDg0LS43MjkuMDg0LS43MjkgMS4yMDUuMDg0IDEuODM4IDEuMjM2IDEuODM4IDEuMjM2IDEuMDcgMS44MzUgMi44MDkgMS4zMDUgMy40OTUuOTk4LjEwOC0uNzc2LjQxNy0xLjMwNS43Ni0xLjYwNS0yLjY2NS0uMy01LjQ2Ni0xLjMzMi01LjQ2Ni01LjkzIDAtMS4zMS40NjUtMi4zOCAxLjIzNS0zLjIyLS4xMzUtLjMwMy0uNTQtMS41MjMuMTA1LTMuMTc2IDAgMCAxLjAwNS0uMzIyIDMuMyAxLjIzLjk2LS4yNjcgMS45OC0uMzk5IDMtLjQwNSAxLjAyLjAwNiAyLjA0LjEzOCAzIC40MDUgMi4yOC0xLjU1MiAzLjI4NS0xLjIzIDMuMjg1LTEuMjMuNjQ1IDEuNjUzLjI0IDIuODczLjEyIDMuMTc2Ljc2NS44NCAxLjIzIDEuOTEgMS4yMyAzLjIyIDAgNC42MS0yLjgwNSA1LjYyNS01LjQ3NSA1LjkyLjQyLjM2LjgxIDEuMDk2LjgxIDIuMjIgMCAxLjYwNi0uMDE1IDIuODk2LS4wMTUgMy4yODYgMCAuMzE1LjIxLjY5LjgyNS41N0MyMC41NjUgMjIuMDkyIDI0IDE3LjU5MiAyNCAxMi4yOTdjMC02LjYyNy01LjM3My0xMi0xMi0xMiIvPjwvc3ZnPg=="/><text transform="scale(.1)" x="577.5" y="175" textLength="515" fill="#fff" font-weight="bold">GITHUB</text></g></svg>
|
||||||
|
After Width: | Height: | Size: 1.7 KiB |
22
docs/src/assets/open_hf_space.svg
Normal file
22
docs/src/assets/open_hf_space.svg
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
<svg width="147" height="20" viewBox="0 0 147 20" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||||
|
<rect x="0.5" y="0.5" width="145.482" height="19" rx="9.5" fill="white" stroke="#EFEFEF"/>
|
||||||
|
<path d="M14.1863 10.9251V12.7593H16.0205V10.9251H14.1863Z" fill="#FF3270"/>
|
||||||
|
<path d="M17.8707 10.9251V12.7593H19.7049V10.9251H17.8707Z" fill="#861FFF"/>
|
||||||
|
<path d="M14.1863 7.24078V9.07496H16.0205V7.24078H14.1863Z" fill="#097EFF"/>
|
||||||
|
<path fill-rule="evenodd" clip-rule="evenodd" d="M12.903 6.77179C12.903 6.32194 13.2676 5.95728 13.7175 5.95728C14.1703 5.95728 15.2556 5.95728 16.1094 5.95728C16.7538 5.95728 17.2758 6.47963 17.2758 7.12398V9.6698H19.8217C20.4661 9.6698 20.9884 10.1922 20.9884 10.8365C20.9884 11.6337 20.9884 12.4309 20.9884 13.2282C20.9884 13.678 20.6237 14.0427 20.1738 14.0427H17.3039H16.5874H13.7175C13.2676 14.0427 12.903 13.678 12.903 13.2282V9.71653V9.64174V6.77179ZM14.1863 7.24066V9.07485H16.0205V7.24066H14.1863ZM14.1863 12.7593V10.9251H16.0205V12.7593H14.1863ZM17.8708 12.7593V10.9251H19.705V12.7593H17.8708Z" fill="black"/>
|
||||||
|
<path d="M18.614 8.35468L20.7796 6.18905M20.7796 6.18905V7.66073M20.7796 6.18905L19.2724 6.18905" stroke="black" stroke-width="0.686298" stroke-linecap="round" stroke-linejoin="round"/>
|
||||||
|
<path d="M31.6082 13.9838C30.8546 13.9838 30.1895 13.802 29.6132 13.4385C29.0368 13.066 28.5846 12.5429 28.2565 11.869C27.9373 11.1862 27.7777 10.3749 27.7777 9.43501C27.7777 8.49511 27.9373 7.69265 28.2565 7.02762C28.5846 6.3626 29.0368 5.85275 29.6132 5.49807C30.1895 5.14339 30.8546 4.96605 31.6082 4.96605C32.3708 4.96605 33.0403 5.14339 33.6166 5.49807C34.193 5.85275 34.6408 6.3626 34.96 7.02762C35.2881 7.69265 35.4521 8.49511 35.4521 9.43501C35.4521 10.3749 35.2881 11.1862 34.96 11.869C34.6408 12.5429 34.193 13.066 33.6166 13.4385C33.0403 13.802 32.3708 13.9838 31.6082 13.9838ZM31.6082 12.6404C32.291 12.6404 32.8363 12.3523 33.2442 11.7759C33.6521 11.1907 33.856 10.4104 33.856 9.43501C33.856 8.45964 33.6521 7.69708 33.2442 7.14733C32.8363 6.58871 32.291 6.3094 31.6082 6.3094C30.9255 6.3094 30.3802 6.58871 29.9723 7.14733C29.5644 7.69708 29.3605 8.45964 29.3605 9.43501C29.3605 10.4104 29.5644 11.1907 29.9723 11.7759C30.3802 12.3523 30.9255 12.6404 31.6082 12.6404Z" fill="#2C3236"/>
|
||||||
|
<path d="M37.0592 16.4045V7.29363H38.3227L38.4291 7.98526H38.4823C38.7572 7.75472 39.0631 7.55521 39.4 7.38674C39.7459 7.21826 40.0961 7.13403 40.4508 7.13403C41.2665 7.13403 41.8961 7.43551 42.3395 8.03846C42.7917 8.64142 43.0178 9.44831 43.0178 10.4591C43.0178 11.204 42.8848 11.8424 42.6188 12.3744C42.3528 12.8976 42.0069 13.2966 41.5813 13.5715C41.1646 13.8463 40.7124 13.9838 40.2247 13.9838C39.9409 13.9838 39.6572 13.9217 39.3734 13.7976C39.0897 13.6646 38.8148 13.4872 38.5488 13.2656L38.5887 14.3562V16.4045H37.0592ZM39.9055 12.7202C40.3399 12.7202 40.7035 12.5296 40.9961 12.1483C41.2887 11.767 41.435 11.2084 41.435 10.4724C41.435 9.81629 41.3242 9.30644 41.1025 8.94289C40.8808 8.57935 40.5217 8.39757 40.0252 8.39757C39.5641 8.39757 39.0853 8.64142 38.5887 9.1291V12.1749C38.8281 12.37 39.0587 12.5119 39.2803 12.6005C39.502 12.6803 39.7104 12.7202 39.9055 12.7202Z" fill="#2C3236"/>
|
||||||
|
<path d="M47.3598 13.9838C46.7568 13.9838 46.2115 13.8508 45.7238 13.5848C45.2361 13.3099 44.8504 12.9197 44.5667 12.4143C44.2829 11.9 44.141 11.2838 44.141 10.5656C44.141 9.85619 44.2829 9.24437 44.5667 8.73009C44.8593 8.2158 45.2361 7.82122 45.6972 7.54634C46.1583 7.27147 46.6415 7.13403 47.147 7.13403C47.741 7.13403 48.2376 7.26703 48.6366 7.53304C49.0356 7.79018 49.3371 8.15373 49.541 8.62368C49.745 9.08476 49.847 9.62122 49.847 10.233C49.847 10.5523 49.8248 10.8005 49.7805 10.9779H45.6307C45.7016 11.5542 45.91 12.002 46.2558 12.3212C46.6016 12.6404 47.0361 12.8 47.5593 12.8C47.843 12.8 48.1046 12.7601 48.344 12.6803C48.5923 12.5917 48.8361 12.472 49.0755 12.3212L49.5942 13.2789C49.2839 13.4828 48.9381 13.6513 48.5568 13.7843C48.1755 13.9173 47.7765 13.9838 47.3598 13.9838ZM45.6174 9.94043H48.5169C48.5169 9.43501 48.4061 9.04043 48.1844 8.75669C47.9627 8.46408 47.6302 8.31777 47.1869 8.31777C46.8056 8.31777 46.4642 8.45964 46.1627 8.74339C45.8701 9.01826 45.6883 9.41728 45.6174 9.94043Z" fill="#2C3236"/>
|
||||||
|
<path d="M51.3078 13.8242V7.29363H52.5714L52.6778 8.17147H52.731C53.0236 7.88772 53.3428 7.64388 53.6886 7.43994C54.0344 7.236 54.429 7.13403 54.8724 7.13403C55.5728 7.13403 56.0827 7.36014 56.4019 7.81235C56.7211 8.26457 56.8807 8.90299 56.8807 9.72762V13.8242H55.3512V9.92713C55.3512 9.38624 55.2714 9.00496 55.1118 8.78329C54.9522 8.56161 54.6906 8.45078 54.327 8.45078C54.0433 8.45078 53.7906 8.52171 53.5689 8.66358C53.3561 8.79659 53.1123 8.99609 52.8374 9.2621V13.8242H51.3078Z" fill="#2C3236"/>
|
||||||
|
<path d="M61.4131 13.8242V7.29363H62.9426V13.8242H61.4131ZM62.1845 6.14979C61.9096 6.14979 61.6879 6.06999 61.5195 5.91038C61.351 5.75078 61.2668 5.53797 61.2668 5.27196C61.2668 5.01482 61.351 4.80644 61.5195 4.64684C61.6879 4.48723 61.9096 4.40743 62.1845 4.40743C62.4594 4.40743 62.6811 4.48723 62.8495 4.64684C63.018 4.80644 63.1022 5.01482 63.1022 5.27196C63.1022 5.53797 63.018 5.75078 62.8495 5.91038C62.6811 6.06999 62.4594 6.14979 62.1845 6.14979Z" fill="#2C3236"/>
|
||||||
|
<path d="M64.8941 13.8242V7.29363H66.1576L66.264 8.17147H66.3172C66.6098 7.88772 66.929 7.64388 67.2748 7.43994C67.6207 7.236 68.0152 7.13403 68.4586 7.13403C69.1591 7.13403 69.6689 7.36014 69.9881 7.81235C70.3074 8.26457 70.467 8.90299 70.467 9.72762V13.8242H68.9374V9.92713C68.9374 9.38624 68.8576 9.00496 68.698 8.78329C68.5384 8.56161 68.2768 8.45078 67.9133 8.45078C67.6295 8.45078 67.3768 8.52171 67.1551 8.66358C66.9423 8.79659 66.6985 8.99609 66.4236 9.2621V13.8242H64.8941Z" fill="#2C3236"/>
|
||||||
|
<path d="M75.1323 13.8242V5.12565H76.6752V8.62368H80.1998V5.12565H81.7427V13.8242H80.1998V9.96703H76.6752V13.8242H75.1323Z" fill="#2C3236"/>
|
||||||
|
<path d="M83.9517 13.8242V5.12565H89.2054V6.4291H85.4945V8.88969H88.6601V10.1931H85.4945V13.8242H83.9517Z" fill="#2C3236"/>
|
||||||
|
<path d="M95.9349 13.9838C95.3497 13.9838 94.7822 13.8729 94.2324 13.6513C93.6915 13.4296 93.2127 13.1148 92.796 12.7069L93.7004 11.6562C94.0108 11.9488 94.3654 12.1882 94.7645 12.3744C95.1635 12.5518 95.5625 12.6404 95.9615 12.6404C96.458 12.6404 96.8349 12.5385 97.092 12.3345C97.3492 12.1306 97.4778 11.8601 97.4778 11.5232C97.4778 11.1596 97.3492 10.8981 97.092 10.7385C96.8438 10.5789 96.5245 10.4148 96.1344 10.2463L94.9374 9.72762C94.6536 9.60348 94.3743 9.44388 94.0994 9.2488C93.8334 9.05373 93.6117 8.80546 93.4344 8.50398C93.2659 8.2025 93.1817 7.83895 93.1817 7.41334C93.1817 6.95225 93.3058 6.53994 93.5541 6.17639C93.8113 5.80398 94.1571 5.51137 94.5915 5.29856C95.0349 5.07689 95.5403 4.96605 96.1078 4.96605C96.6132 4.96605 97.1009 5.06802 97.5709 5.27196C98.0408 5.46703 98.4442 5.73304 98.7812 6.06999L97.9965 7.05423C97.7216 6.82368 97.429 6.64191 97.1186 6.5089C96.8172 6.3759 96.4802 6.3094 96.1078 6.3094C95.6999 6.3094 95.3674 6.4025 95.1103 6.58871C94.862 6.76605 94.7379 7.01432 94.7379 7.33353C94.7379 7.55521 94.7999 7.74142 94.9241 7.89215C95.0571 8.03403 95.23 8.15816 95.4428 8.26457C95.6556 8.36211 95.8817 8.45964 96.1211 8.55718L97.3048 9.0493C97.8191 9.27097 98.2403 9.56358 98.5684 9.92713C98.8965 10.2818 99.0605 10.7739 99.0605 11.4035C99.0605 11.8734 98.9364 12.3035 98.6881 12.6936C98.4398 13.0838 98.0807 13.3986 97.6108 13.638C97.1497 13.8685 96.591 13.9838 95.9349 13.9838Z" fill="#2C3236"/>
|
||||||
|
<path d="M100.509 16.4045V7.29363H101.773L101.879 7.98526H101.932C102.207 7.75472 102.513 7.55521 102.85 7.38674C103.196 7.21826 103.546 7.13403 103.901 7.13403C104.717 7.13403 105.346 7.43551 105.79 8.03846C106.242 8.64142 106.468 9.44831 106.468 10.4591C106.468 11.204 106.335 11.8424 106.069 12.3744C105.803 12.8976 105.457 13.2966 105.031 13.5715C104.615 13.8463 104.162 13.9838 103.675 13.9838C103.391 13.9838 103.107 13.9217 102.824 13.7976C102.54 13.6646 102.265 13.4872 101.999 13.2656L102.039 14.3562V16.4045H100.509ZM103.356 12.7202C103.79 12.7202 104.154 12.5296 104.446 12.1483C104.739 11.767 104.885 11.2084 104.885 10.4724C104.885 9.81629 104.774 9.30644 104.553 8.94289C104.331 8.57935 103.972 8.39757 103.475 8.39757C103.014 8.39757 102.535 8.64142 102.039 9.1291V12.1749C102.278 12.37 102.509 12.5119 102.73 12.6005C102.952 12.6803 103.16 12.7202 103.356 12.7202Z" fill="#2C3236"/>
|
||||||
|
<path d="M109.444 13.9838C108.876 13.9838 108.411 13.8064 108.047 13.4518C107.692 13.0971 107.515 12.636 107.515 12.0685C107.515 11.368 107.821 10.8271 108.433 10.4458C109.045 10.0557 110.02 9.78969 111.359 9.64782C111.35 9.30201 111.257 9.00496 111.08 8.75669C110.911 8.49954 110.605 8.37097 110.162 8.37097C109.843 8.37097 109.528 8.43304 109.218 8.55718C108.916 8.68132 108.619 8.83206 108.326 9.0094L107.768 7.98526C108.131 7.75472 108.539 7.55521 108.991 7.38674C109.452 7.21826 109.94 7.13403 110.454 7.13403C111.27 7.13403 111.878 7.37787 112.277 7.86555C112.685 8.34437 112.888 9.04043 112.888 9.95373V13.8242H111.625L111.518 13.1059H111.465C111.173 13.3542 110.858 13.5626 110.521 13.7311C110.193 13.8995 109.834 13.9838 109.444 13.9838ZM109.936 12.7867C110.202 12.7867 110.441 12.7247 110.654 12.6005C110.876 12.4675 111.111 12.2902 111.359 12.0685V10.6055C110.472 10.7207 109.856 10.8936 109.51 11.1242C109.164 11.3458 108.991 11.6207 108.991 11.9488C108.991 12.2414 109.08 12.4542 109.257 12.5872C109.435 12.7202 109.661 12.7867 109.936 12.7867Z" fill="#2C3236"/>
|
||||||
|
<path d="M117.446 13.9838C116.851 13.9838 116.315 13.8508 115.836 13.5848C115.366 13.3099 114.989 12.9197 114.706 12.4143C114.431 11.9 114.293 11.2838 114.293 10.5656C114.293 9.83846 114.444 9.2222 114.746 8.71679C115.047 8.2025 115.446 7.81235 115.943 7.54634C116.448 7.27147 116.989 7.13403 117.565 7.13403C117.982 7.13403 118.346 7.20496 118.656 7.34684C118.966 7.48871 119.241 7.66161 119.48 7.86555L118.736 8.86309C118.567 8.71235 118.394 8.59708 118.217 8.51728C118.04 8.42861 117.849 8.38427 117.645 8.38427C117.122 8.38427 116.692 8.58378 116.355 8.98279C116.027 9.38181 115.863 9.9094 115.863 10.5656C115.863 11.2128 116.022 11.736 116.342 12.135C116.67 12.534 117.091 12.7335 117.605 12.7335C117.862 12.7335 118.102 12.6803 118.323 12.5739C118.554 12.4587 118.762 12.3256 118.948 12.1749L119.574 13.1857C119.272 13.4518 118.935 13.6513 118.563 13.7843C118.19 13.9173 117.818 13.9838 117.446 13.9838Z" fill="#2C3236"/>
|
||||||
|
<path d="M123.331 13.9838C122.728 13.9838 122.183 13.8508 121.695 13.5848C121.207 13.3099 120.822 12.9197 120.538 12.4143C120.254 11.9 120.112 11.2838 120.112 10.5656C120.112 9.85619 120.254 9.24437 120.538 8.73009C120.83 8.2158 121.207 7.82122 121.668 7.54634C122.13 7.27147 122.613 7.13403 123.118 7.13403C123.712 7.13403 124.209 7.26703 124.608 7.53304C125.007 7.79018 125.308 8.15373 125.512 8.62368C125.716 9.08476 125.818 9.62122 125.818 10.233C125.818 10.5523 125.796 10.8005 125.752 10.9779H121.602C121.673 11.5542 121.881 12.002 122.227 12.3212C122.573 12.6404 123.007 12.8 123.53 12.8C123.814 12.8 124.076 12.7601 124.315 12.6803C124.563 12.5917 124.807 12.472 125.047 12.3212L125.565 13.2789C125.255 13.4828 124.909 13.6513 124.528 13.7843C124.147 13.9173 123.748 13.9838 123.331 13.9838ZM121.589 9.94043H124.488C124.488 9.43501 124.377 9.04043 124.156 8.75669C123.934 8.46408 123.601 8.31777 123.158 8.31777C122.777 8.31777 122.435 8.45964 122.134 8.74339C121.841 9.01826 121.66 9.41728 121.589 9.94043Z" fill="#2C3236"/>
|
||||||
|
<path d="M129.101 13.9838C128.658 13.9838 128.215 13.8995 127.771 13.7311C127.328 13.5537 126.947 13.3365 126.627 13.0793L127.346 12.0951C127.638 12.3168 127.931 12.4941 128.223 12.6271C128.516 12.7601 128.826 12.8266 129.154 12.8266C129.509 12.8266 129.771 12.7513 129.939 12.6005C130.108 12.4498 130.192 12.2636 130.192 12.0419C130.192 11.8557 130.121 11.705 129.979 11.5897C129.846 11.4656 129.673 11.3591 129.46 11.2705C129.248 11.1729 129.026 11.0798 128.795 10.9912C128.512 10.8848 128.228 10.7562 127.944 10.6055C127.669 10.4458 127.443 10.2463 127.266 10.0069C127.088 9.75866 127 9.45274 127 9.0892C127 8.51284 127.213 8.04289 127.638 7.67935C128.064 7.3158 128.64 7.13403 129.367 7.13403C129.828 7.13403 130.241 7.21383 130.604 7.37344C130.968 7.53304 131.282 7.71482 131.548 7.91876L130.844 8.84979C130.613 8.68132 130.378 8.54831 130.139 8.45078C129.908 8.34437 129.664 8.29117 129.407 8.29117C129.079 8.29117 128.835 8.36211 128.676 8.50398C128.516 8.63698 128.436 8.80545 128.436 9.0094C128.436 9.26654 128.569 9.46161 128.835 9.59462C129.101 9.72762 129.412 9.85619 129.766 9.98033C130.068 10.0867 130.36 10.2197 130.644 10.3793C130.928 10.5301 131.163 10.7296 131.349 10.9779C131.544 11.2261 131.642 11.5542 131.642 11.9621C131.642 12.5207 131.424 12.9995 130.99 13.3986C130.555 13.7887 129.926 13.9838 129.101 13.9838Z" fill="#2C3236"/>
|
||||||
|
</svg>
|
||||||
|
After Width: | Height: | Size: 12 KiB |
1
docs/src/assets/python.svg
Normal file
1
docs/src/assets/python.svg
Normal file
@@ -0,0 +1 @@
|
|||||||
|
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="97.5" height="28" role="img" aria-label="PYTHON"><title>PYTHON</title><g shape-rendering="crispEdges"><rect width="97.5" height="28" fill="#3670a0"/></g><g fill="#fff" text-anchor="middle" font-family="Verdana,Geneva,DejaVu Sans,sans-serif" text-rendering="geometricPrecision" font-size="100"><image x="9" y="7" width="14" height="14" xlink:href="data:image/svg+xml;base64,PHN2ZyBmaWxsPSIjZmZkZDU0IiByb2xlPSJpbWciIHZpZXdCb3g9IjAgMCAyNCAyNCIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIj48dGl0bGU+UHl0aG9uPC90aXRsZT48cGF0aCBkPSJNMTQuMjUuMThsLjkuMi43My4yNi41OS4zLjQ1LjMyLjM0LjM0LjI1LjM0LjE2LjMzLjEuMy4wNC4yNi4wMi4yLS4wMS4xM1Y4LjVsLS4wNS42My0uMTMuNTUtLjIxLjQ2LS4yNi4zOC0uMy4zMS0uMzMuMjUtLjM1LjE5LS4zNS4xNC0uMzMuMS0uMy4wNy0uMjYuMDQtLjIxLjAySDguNzdsLS42OS4wNS0uNTkuMTQtLjUuMjItLjQxLjI3LS4zMy4zMi0uMjcuMzUtLjIuMzYtLjE1LjM3LS4xLjM1LS4wNy4zMi0uMDQuMjctLjAyLjIxdjMuMDZIMy4xN2wtLjIxLS4wMy0uMjgtLjA3LS4zMi0uMTItLjM1LS4xOC0uMzYtLjI2LS4zNi0uMzYtLjM1LS40Ni0uMzItLjU5LS4yOC0uNzMtLjIxLS44OC0uMTQtMS4wNS0uMDUtMS4yMy4wNi0xLjIyLjE2LTEuMDQuMjQtLjg3LjMyLS43MS4zNi0uNTcuNC0uNDQuNDItLjMzLjQyLS4yNC40LS4xNi4zNi0uMS4zMi0uMDUuMjQtLjAxaC4xNmwuMDYuMDFoOC4xNnYtLjgzSDYuMThsLS4wMS0yLjc1LS4wMi0uMzcuMDUtLjM0LjExLS4zMS4xNy0uMjguMjUtLjI2LjMxLS4yMy4zOC0uMi40NC0uMTguNTEtLjE1LjU4LS4xMi42NC0uMS43MS0uMDYuNzctLjA0Ljg0LS4wMiAxLjI3LjA1em0tNi4zIDEuOThsLS4yMy4zMy0uMDguNDEuMDguNDEuMjMuMzQuMzMuMjIuNDEuMDkuNDEtLjA5LjMzLS4yMi4yMy0uMzQuMDgtLjQxLS4wOC0uNDEtLjIzLS4zMy0uMzMtLjIyLS40MS0uMDktLjQxLjA5em0xMy4wOSAzLjk1bC4yOC4wNi4zMi4xMi4zNS4xOC4zNi4yNy4zNi4zNS4zNS40Ny4zMi41OS4yOC43My4yMS44OC4xNCAxLjA0LjA1IDEuMjMtLjA2IDEuMjMtLjE2IDEuMDQtLjI0Ljg2LS4zMi43MS0uMzYuNTctLjQuNDUtLjQyLjMzLS40Mi4yNC0uNC4xNi0uMzYuMDktLjMyLjA1LS4yNC4wMi0uMTYtLjAxaC04LjIydi44Mmg1Ljg0bC4wMSAyLjc2LjAyLjM2LS4wNS4zNC0uMTEuMzEtLjE3LjI5LS4yNS4yNS0uMzEuMjQtLjM4LjItLjQ0LjE3LS41MS4xNS0uNTguMTMtLjY0LjA5LS43MS4wNy0uNzcuMDQtLjg0LjAxLTEuMjctLjA0LTEuMDctLjE0LS45LS4yLS43My0uMjUtLjU5LS4zLS40NS0uMzMtLjM0LS4zNC0uMjUtLjM0LS4xNi0uMzMtLjEtLjMtLjA0LS4yNS0uMDItLjIuMDEtLjEzdi01LjM0bC4wNS0uNjQuMTMtLjU0LjIxLS40Ni4yNi0uMzguMy0uMzIuMzMtLjI0LjM1LS4yLjM1LS4xNC4zMy0uMS4zLS4wNi4yNi0uMDQuMjEtLjAyLjEzLS4wMWg1Ljg0bC42OS0uMDUuNTktLjE0LjUtLjIxLjQxLS4yOC4zMy0uMzIuMjctLjM1LjItLjM2LjE1LS4zNi4xLS4zNS4wNy0uMzIuMDQtLjI4LjAyLS4yMVY2LjA3aDIuMDlsLjE0LjAxem0tNi40NyAxNC4yNWwtLjIzLjMzLS4wOC40MS4wOC40MS4yMy4zMy4zMy4yMy40MS4wOC40MS0uMDguMzMtLjIzLjIzLS4zMy4wOC0uNDEtLjA4LS40MS0uMjMtLjMzLS4zMy0uMjMtLjQxLS4wOC0uNDEuMDh6Ii8+PC9zdmc+"/><text transform="scale(.1)" x="587.5" y="175" textLength="535" fill="#fff" font-weight="bold">PYTHON</text></g></svg>
|
||||||
|
After Width: | Height: | Size: 2.6 KiB |
@@ -572,7 +572,7 @@ You can use the embedding API when working with embedding models. It automatical
|
|||||||
--8<-- "rust/lancedb/examples/openai.rs:openai_embeddings"
|
--8<-- "rust/lancedb/examples/openai.rs:openai_embeddings"
|
||||||
```
|
```
|
||||||
|
|
||||||
Learn about using the existing integrations and creating custom embedding functions in the [embedding API guide](./embeddings/).
|
Learn about using the existing integrations and creating custom embedding functions in the [embedding API guide](./embeddings/index.md).
|
||||||
|
|
||||||
|
|
||||||
## What's next
|
## What's next
|
||||||
|
|||||||
92
docs/src/concepts/index_hnsw.md
Normal file
92
docs/src/concepts/index_hnsw.md
Normal file
@@ -0,0 +1,92 @@
|
|||||||
|
|
||||||
|
# Understanding HNSW index
|
||||||
|
|
||||||
|
Approximate Nearest Neighbor (ANN) search is a method for finding data points near a given point in a dataset, though not always the exact nearest one. HNSW is one of the most accurate and fastest Approximate Nearest Neighbour search algorithms, It’s beneficial in high-dimensional spaces where finding the same nearest neighbor would be too slow and costly
|
||||||
|
|
||||||
|
[Jump to usage](#usage)
|
||||||
|
There are three main types of ANN search algorithms:
|
||||||
|
|
||||||
|
* **Tree-based search algorithms**: Use a tree structure to organize and store data points.
|
||||||
|
* * **Hash-based search algorithms**: Use a specialized geometric hash table to store and manage data points. These algorithms typically focus on theoretical guarantees, and don't usually perform as well as the other approaches in practice.
|
||||||
|
* **Graph-based search algorithms**: Use a graph structure to store data points, which can be a bit complex.
|
||||||
|
|
||||||
|
HNSW is a graph-based algorithm. All graph-based search algorithms rely on the idea of a k-nearest neighbor (or k-approximate nearest neighbor) graph, which we outline below.
|
||||||
|
HNSW also combines this with the ideas behind a classic 1-dimensional search data structure: the skip list.
|
||||||
|
|
||||||
|
## k-Nearest Neighbor Graphs and k-approximate Nearest neighbor Graphs
|
||||||
|
The k-nearest neighbor graph actually predates its use for ANN search. Its construction is quite simple:
|
||||||
|
|
||||||
|
* Each vector in the dataset is given an associated vertex.
|
||||||
|
* Each vertex has outgoing edges to its k nearest neighbors. That is, the k closest other vertices by Euclidean distance between the two corresponding vectors. This can be thought of as a "friend list" for the vertex.
|
||||||
|
* For some applications (including nearest-neighbor search), the incoming edges are also added.
|
||||||
|
|
||||||
|
Eventually, it was realized that the following greedy search method over such a graph typically results in good approximate nearest neighbors:
|
||||||
|
|
||||||
|
* Given a query vector, start at some fixed "entry point" vertex (e.g. the approximate center node).
|
||||||
|
* Look at that vertex's neighbors. If any of them are closer to the query vector than the current vertex, then move to that vertex.
|
||||||
|
* Repeat until a local optimum is found.
|
||||||
|
|
||||||
|
The above algorithm also generalizes to e.g. top 10 approximate nearest neighbors.
|
||||||
|
|
||||||
|
Computing a k-nearest neighbor graph is actually quite slow, taking quadratic time in the dataset size. It was quickly realized that near-identical performance can be achieved using a k-approximate nearest neighbor graph. That is, instead of obtaining the k-nearest neighbors for each vertex, an approximate nearest neighbor search data structure is used to build much faster.
|
||||||
|
In fact, another data structure is not needed: This can be done "incrementally".
|
||||||
|
That is, if you start with a k-ANN graph for n-1 vertices, you can extend it to a k-ANN graph for n vertices as well by using the graph to obtain the k-ANN for the new vertex.
|
||||||
|
|
||||||
|
One downside of k-NN and k-ANN graphs alone is that one must typically build them with a large value of k to get decent results, resulting in a large index.
|
||||||
|
|
||||||
|
|
||||||
|
## HNSW: Hierarchical Navigable Small Worlds
|
||||||
|
|
||||||
|
HNSW builds on k-ANN in two main ways:
|
||||||
|
|
||||||
|
* Instead of getting the k-approximate nearest neighbors for a large value of k, it sparsifies the k-ANN graph using a carefully chosen "edge pruning" heuristic, allowing for the number of edges per vertex to be limited to a relatively small constant.
|
||||||
|
* The "entry point" vertex is chosen dynamically using a recursively constructed data structure on a subset of the data, similarly to a skip list.
|
||||||
|
|
||||||
|
This recursive structure can be thought of as separating into layers:
|
||||||
|
|
||||||
|
* At the bottom-most layer, an k-ANN graph on the whole dataset is present.
|
||||||
|
* At the second layer, a k-ANN graph on a fraction of the dataset (e.g. 10%) is present.
|
||||||
|
* At the Lth layer, a k-ANN graph is present. It is over a (constant) fraction (e.g. 10%) of the vectors/vertices present in the L-1th layer.
|
||||||
|
|
||||||
|
Then the greedy search routine operates as follows:
|
||||||
|
|
||||||
|
* At the top layer (using an arbitrary vertex as an entry point), use the greedy local search routine on the k-ANN graph to get an approximate nearest neighbor at that layer.
|
||||||
|
* Using the approximate nearest neighbor found in the previous layer as an entry point, find an approximate nearest neighbor in the next layer with the same method.
|
||||||
|
* Repeat until the bottom-most layer is reached. Then use the entry point to find multiple nearest neighbors (e.g. top 10).
|
||||||
|
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
We can combine the above concepts to understand how to build and query an HNSW index in LanceDB.
|
||||||
|
|
||||||
|
### Construct index
|
||||||
|
|
||||||
|
```python
|
||||||
|
import lancedb
|
||||||
|
import numpy as np
|
||||||
|
uri = "/tmp/lancedb"
|
||||||
|
db = lancedb.connect(uri)
|
||||||
|
|
||||||
|
# Create 10,000 sample vectors
|
||||||
|
data = [
|
||||||
|
{"vector": row, "item": f"item {i}"}
|
||||||
|
for i, row in enumerate(np.random.random((10_000, 1536)).astype('float32'))
|
||||||
|
]
|
||||||
|
|
||||||
|
# Add the vectors to a table
|
||||||
|
tbl = db.create_table("my_vectors", data=data)
|
||||||
|
|
||||||
|
# Create and train the HNSW index for a 1536-dimensional vector
|
||||||
|
# Make sure you have enough data in the table for an effective training step
|
||||||
|
tbl.create_index(index_type=IVF_HNSW_SQ)
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
### Query the index
|
||||||
|
|
||||||
|
```python
|
||||||
|
# Search using a random 1536-dimensional embedding
|
||||||
|
tbl.search(np.random.random((1536))) \
|
||||||
|
.limit(2) \
|
||||||
|
.to_pandas()
|
||||||
|
```
|
||||||
@@ -0,0 +1,67 @@
|
|||||||
|
# Imagebind embeddings
|
||||||
|
We have support for [imagebind](https://github.com/facebookresearch/ImageBind) model embeddings. You can download our version of the packaged model via - `pip install imagebind-packaged==0.1.2`.
|
||||||
|
|
||||||
|
This function is registered as `imagebind` and supports Audio, Video and Text modalities(extending to Thermal,Depth,IMU data):
|
||||||
|
|
||||||
|
| Parameter | Type | Default Value | Description |
|
||||||
|
|---|---|---|---|
|
||||||
|
| `name` | `str` | `"imagebind_huge"` | Name of the model. |
|
||||||
|
| `device` | `str` | `"cpu"` | The device to run the model on. Can be `"cpu"` or `"gpu"`. |
|
||||||
|
| `normalize` | `bool` | `False` | set to `True` to normalize your inputs before model ingestion. |
|
||||||
|
|
||||||
|
Below is an example demonstrating how the API works:
|
||||||
|
|
||||||
|
```python
|
||||||
|
import lancedb
|
||||||
|
from lancedb.pydantic import LanceModel, Vector
|
||||||
|
from lancedb.embeddings import get_registry
|
||||||
|
|
||||||
|
db = lancedb.connect(tmp_path)
|
||||||
|
func = get_registry().get("imagebind").create()
|
||||||
|
|
||||||
|
class ImageBindModel(LanceModel):
|
||||||
|
text: str
|
||||||
|
image_uri: str = func.SourceField()
|
||||||
|
audio_path: str
|
||||||
|
vector: Vector(func.ndims()) = func.VectorField()
|
||||||
|
|
||||||
|
# add locally accessible image paths
|
||||||
|
text_list=["A dog.", "A car", "A bird"]
|
||||||
|
image_paths=[".assets/dog_image.jpg", ".assets/car_image.jpg", ".assets/bird_image.jpg"]
|
||||||
|
audio_paths=[".assets/dog_audio.wav", ".assets/car_audio.wav", ".assets/bird_audio.wav"]
|
||||||
|
|
||||||
|
# Load data
|
||||||
|
inputs = [
|
||||||
|
{"text": a, "audio_path": b, "image_uri": c}
|
||||||
|
for a, b, c in zip(text_list, audio_paths, image_paths)
|
||||||
|
]
|
||||||
|
|
||||||
|
#create table and add data
|
||||||
|
table = db.create_table("img_bind", schema=ImageBindModel)
|
||||||
|
table.add(inputs)
|
||||||
|
```
|
||||||
|
|
||||||
|
Now, we can search using any modality:
|
||||||
|
|
||||||
|
#### image search
|
||||||
|
```python
|
||||||
|
query_image = "./assets/dog_image2.jpg" #download an image and enter that path here
|
||||||
|
actual = table.search(query_image).limit(1).to_pydantic(ImageBindModel)[0]
|
||||||
|
print(actual.text == "dog")
|
||||||
|
```
|
||||||
|
#### audio search
|
||||||
|
|
||||||
|
```python
|
||||||
|
query_audio = "./assets/car_audio2.wav" #download an audio clip and enter path here
|
||||||
|
actual = table.search(query_audio).limit(1).to_pydantic(ImageBindModel)[0]
|
||||||
|
print(actual.text == "car")
|
||||||
|
```
|
||||||
|
#### Text search
|
||||||
|
You can add any input query and fetch the result as follows:
|
||||||
|
```python
|
||||||
|
query = "an animal which flies and tweets"
|
||||||
|
actual = table.search(query).limit(1).to_pydantic(ImageBindModel)[0]
|
||||||
|
print(actual.text == "bird")
|
||||||
|
```
|
||||||
|
|
||||||
|
If you have any questions about the embeddings API, supported models, or see a relevant model missing, please raise an issue [on GitHub](https://github.com/lancedb/lancedb/issues).
|
||||||
@@ -0,0 +1,51 @@
|
|||||||
|
# Jina Embeddings : Multimodal
|
||||||
|
|
||||||
|
Jina embeddings can also be used to embed both text and image data, only some of the models support image data and you can check the list
|
||||||
|
under [https://jina.ai/embeddings/](https://jina.ai/embeddings/)
|
||||||
|
|
||||||
|
Supported parameters (to be passed in `create` method) are:
|
||||||
|
|
||||||
|
| Parameter | Type | Default Value | Description |
|
||||||
|
|---|---|---|---|
|
||||||
|
| `name` | `str` | `"jina-clip-v1"` | The model ID of the jina model to use |
|
||||||
|
|
||||||
|
Usage Example:
|
||||||
|
|
||||||
|
```python
|
||||||
|
import os
|
||||||
|
import requests
|
||||||
|
import lancedb
|
||||||
|
from lancedb.pydantic import LanceModel, Vector
|
||||||
|
from lancedb.embeddings import get_registry
|
||||||
|
import pandas as pd
|
||||||
|
|
||||||
|
os.environ['JINA_API_KEY'] = 'jina_*'
|
||||||
|
|
||||||
|
db = lancedb.connect("~/.lancedb")
|
||||||
|
func = get_registry().get("jina").create()
|
||||||
|
|
||||||
|
|
||||||
|
class Images(LanceModel):
|
||||||
|
label: str
|
||||||
|
image_uri: str = func.SourceField() # image uri as the source
|
||||||
|
image_bytes: bytes = func.SourceField() # image bytes as the source
|
||||||
|
vector: Vector(func.ndims()) = func.VectorField() # vector column
|
||||||
|
vec_from_bytes: Vector(func.ndims()) = func.VectorField() # Another vector column
|
||||||
|
|
||||||
|
|
||||||
|
table = db.create_table("images", schema=Images)
|
||||||
|
labels = ["cat", "cat", "dog", "dog", "horse", "horse"]
|
||||||
|
uris = [
|
||||||
|
"http://farm1.staticflickr.com/53/167798175_7c7845bbbd_z.jpg",
|
||||||
|
"http://farm1.staticflickr.com/134/332220238_da527d8140_z.jpg",
|
||||||
|
"http://farm9.staticflickr.com/8387/8602747737_2e5c2a45d4_z.jpg",
|
||||||
|
"http://farm5.staticflickr.com/4092/5017326486_1f46057f5f_z.jpg",
|
||||||
|
"http://farm9.staticflickr.com/8216/8434969557_d37882c42d_z.jpg",
|
||||||
|
"http://farm6.staticflickr.com/5142/5835678453_4f3a4edb45_z.jpg",
|
||||||
|
]
|
||||||
|
# get each uri as bytes
|
||||||
|
image_bytes = [requests.get(uri).content for uri in uris]
|
||||||
|
table.add(
|
||||||
|
pd.DataFrame({"label": labels, "image_uri": uris, "image_bytes": image_bytes})
|
||||||
|
)
|
||||||
|
```
|
||||||
@@ -0,0 +1,82 @@
|
|||||||
|
# OpenClip embeddings
|
||||||
|
We support CLIP model embeddings using the open source alternative, [open-clip](https://github.com/mlfoundations/open_clip) which supports various customizations. It is registered as `open-clip` and supports the following customizations:
|
||||||
|
|
||||||
|
| Parameter | Type | Default Value | Description |
|
||||||
|
|---|---|---|---|
|
||||||
|
| `name` | `str` | `"ViT-B-32"` | The name of the model. |
|
||||||
|
| `pretrained` | `str` | `"laion2b_s34b_b79k"` | The name of the pretrained model to load. |
|
||||||
|
| `device` | `str` | `"cpu"` | The device to run the model on. Can be `"cpu"` or `"gpu"`. |
|
||||||
|
| `batch_size` | `int` | `64` | The number of images to process in a batch. |
|
||||||
|
| `normalize` | `bool` | `True` | Whether to normalize the input images before feeding them to the model. |
|
||||||
|
|
||||||
|
This embedding function supports ingesting images as both bytes and urls. You can query them using both test and other images.
|
||||||
|
|
||||||
|
!!! info
|
||||||
|
LanceDB supports ingesting images directly from accessible links.
|
||||||
|
|
||||||
|
```python
|
||||||
|
import lancedb
|
||||||
|
from lancedb.pydantic import LanceModel, Vector
|
||||||
|
from lancedb.embeddings import get_registry
|
||||||
|
|
||||||
|
db = lancedb.connect(tmp_path)
|
||||||
|
func = get_registry().get("open-clip").create()
|
||||||
|
|
||||||
|
class Images(LanceModel):
|
||||||
|
label: str
|
||||||
|
image_uri: str = func.SourceField() # image uri as the source
|
||||||
|
image_bytes: bytes = func.SourceField() # image bytes as the source
|
||||||
|
vector: Vector(func.ndims()) = func.VectorField() # vector column
|
||||||
|
vec_from_bytes: Vector(func.ndims()) = func.VectorField() # Another vector column
|
||||||
|
|
||||||
|
table = db.create_table("images", schema=Images)
|
||||||
|
labels = ["cat", "cat", "dog", "dog", "horse", "horse"]
|
||||||
|
uris = [
|
||||||
|
"http://farm1.staticflickr.com/53/167798175_7c7845bbbd_z.jpg",
|
||||||
|
"http://farm1.staticflickr.com/134/332220238_da527d8140_z.jpg",
|
||||||
|
"http://farm9.staticflickr.com/8387/8602747737_2e5c2a45d4_z.jpg",
|
||||||
|
"http://farm5.staticflickr.com/4092/5017326486_1f46057f5f_z.jpg",
|
||||||
|
"http://farm9.staticflickr.com/8216/8434969557_d37882c42d_z.jpg",
|
||||||
|
"http://farm6.staticflickr.com/5142/5835678453_4f3a4edb45_z.jpg",
|
||||||
|
]
|
||||||
|
# get each uri as bytes
|
||||||
|
image_bytes = [requests.get(uri).content for uri in uris]
|
||||||
|
table.add(
|
||||||
|
pd.DataFrame({"label": labels, "image_uri": uris, "image_bytes": image_bytes})
|
||||||
|
)
|
||||||
|
```
|
||||||
|
Now we can search using text from both the default vector column and the custom vector column
|
||||||
|
```python
|
||||||
|
|
||||||
|
# text search
|
||||||
|
actual = table.search("man's best friend").limit(1).to_pydantic(Images)[0]
|
||||||
|
print(actual.label) # prints "dog"
|
||||||
|
|
||||||
|
frombytes = (
|
||||||
|
table.search("man's best friend", vector_column_name="vec_from_bytes")
|
||||||
|
.limit(1)
|
||||||
|
.to_pydantic(Images)[0]
|
||||||
|
)
|
||||||
|
print(frombytes.label)
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
Because we're using a multi-modal embedding function, we can also search using images
|
||||||
|
|
||||||
|
```python
|
||||||
|
# image search
|
||||||
|
query_image_uri = "http://farm1.staticflickr.com/200/467715466_ed4a31801f_z.jpg"
|
||||||
|
image_bytes = requests.get(query_image_uri).content
|
||||||
|
query_image = Image.open(io.BytesIO(image_bytes))
|
||||||
|
actual = table.search(query_image).limit(1).to_pydantic(Images)[0]
|
||||||
|
print(actual.label == "dog")
|
||||||
|
|
||||||
|
# image search using a custom vector column
|
||||||
|
other = (
|
||||||
|
table.search(query_image, vector_column_name="vec_from_bytes")
|
||||||
|
.limit(1)
|
||||||
|
.to_pydantic(Images)[0]
|
||||||
|
)
|
||||||
|
print(actual.label)
|
||||||
|
|
||||||
|
```
|
||||||
@@ -0,0 +1,51 @@
|
|||||||
|
# AWS Bedrock Text Embedding Functions
|
||||||
|
|
||||||
|
AWS Bedrock supports multiple base models for generating text embeddings. You need to setup the AWS credentials to use this embedding function.
|
||||||
|
You can do so by using `awscli` and also add your session_token:
|
||||||
|
```shell
|
||||||
|
aws configure
|
||||||
|
aws configure set aws_session_token "<your_session_token>"
|
||||||
|
```
|
||||||
|
to ensure that the credentials are set up correctly, you can run the following command:
|
||||||
|
```shell
|
||||||
|
aws sts get-caller-identity
|
||||||
|
```
|
||||||
|
|
||||||
|
Supported Embedding modelIDs are:
|
||||||
|
* `amazon.titan-embed-text-v1`
|
||||||
|
* `cohere.embed-english-v3`
|
||||||
|
* `cohere.embed-multilingual-v3`
|
||||||
|
|
||||||
|
Supported parameters (to be passed in `create` method) are:
|
||||||
|
|
||||||
|
| Parameter | Type | Default Value | Description |
|
||||||
|
|---|---|---|---|
|
||||||
|
| **name** | str | "amazon.titan-embed-text-v1" | The model ID of the bedrock model to use. Supported base models for Text Embeddings: amazon.titan-embed-text-v1, cohere.embed-english-v3, cohere.embed-multilingual-v3 |
|
||||||
|
| **region** | str | "us-east-1" | Optional name of the AWS Region in which the service should be called (e.g., "us-east-1"). |
|
||||||
|
| **profile_name** | str | None | Optional name of the AWS profile to use for calling the Bedrock service. If not specified, the default profile will be used. |
|
||||||
|
| **assumed_role** | str | None | Optional ARN of an AWS IAM role to assume for calling the Bedrock service. If not specified, the current active credentials will be used. |
|
||||||
|
| **role_session_name** | str | "lancedb-embeddings" | Optional name of the AWS IAM role session to use for calling the Bedrock service. If not specified, a "lancedb-embeddings" name will be used. |
|
||||||
|
| **runtime** | bool | True | Optional choice of getting different client to perform operations with the Amazon Bedrock service. |
|
||||||
|
| **max_retries** | int | 7 | Optional number of retries to perform when a request fails. |
|
||||||
|
|
||||||
|
Usage Example:
|
||||||
|
|
||||||
|
```python
|
||||||
|
import lancedb
|
||||||
|
from lancedb.pydantic import LanceModel, Vector
|
||||||
|
from lancedb.embeddings import get_registry
|
||||||
|
import pandas as pd
|
||||||
|
|
||||||
|
model = get_registry().get("bedrock-text").create()
|
||||||
|
|
||||||
|
class TextModel(LanceModel):
|
||||||
|
text: str = model.SourceField()
|
||||||
|
vector: Vector(model.ndims()) = model.VectorField()
|
||||||
|
|
||||||
|
df = pd.DataFrame({"text": ["hello world", "goodbye world"]})
|
||||||
|
db = lancedb.connect("tmp_path")
|
||||||
|
tbl = db.create_table("test", schema=TextModel, mode="overwrite")
|
||||||
|
|
||||||
|
tbl.add(df)
|
||||||
|
rs = tbl.search("hello").limit(1).to_pandas()
|
||||||
|
```
|
||||||
@@ -0,0 +1,63 @@
|
|||||||
|
# Cohere Embeddings
|
||||||
|
|
||||||
|
Using cohere API requires cohere package, which can be installed using `pip install cohere`. Cohere embeddings are used to generate embeddings for text data. The embeddings can be used for various tasks like semantic search, clustering, and classification.
|
||||||
|
You also need to set the `COHERE_API_KEY` environment variable to use the Cohere API.
|
||||||
|
|
||||||
|
Supported models are:
|
||||||
|
|
||||||
|
- embed-english-v3.0
|
||||||
|
- embed-multilingual-v3.0
|
||||||
|
- embed-english-light-v3.0
|
||||||
|
- embed-multilingual-light-v3.0
|
||||||
|
- embed-english-v2.0
|
||||||
|
- embed-english-light-v2.0
|
||||||
|
- embed-multilingual-v2.0
|
||||||
|
|
||||||
|
|
||||||
|
Supported parameters (to be passed in `create` method) are:
|
||||||
|
|
||||||
|
| Parameter | Type | Default Value | Description |
|
||||||
|
|---|---|--------|---------|
|
||||||
|
| `name` | `str` | `"embed-english-v2.0"` | The model ID of the cohere model to use. Supported base models for Text Embeddings: embed-english-v3.0, embed-multilingual-v3.0, embed-english-light-v3.0, embed-multilingual-light-v3.0, embed-english-v2.0, embed-english-light-v2.0, embed-multilingual-v2.0 |
|
||||||
|
| `source_input_type` | `str` | `"search_document"` | The type of input data to be used for the source column. |
|
||||||
|
| `query_input_type` | `str` | `"search_query"` | The type of input data to be used for the query. |
|
||||||
|
|
||||||
|
Cohere supports following input types:
|
||||||
|
|
||||||
|
| Input Type | Description |
|
||||||
|
|-------------------------|---------------------------------------|
|
||||||
|
| "`search_document`" | Used for embeddings stored in a vector|
|
||||||
|
| | database for search use-cases. |
|
||||||
|
| "`search_query`" | Used for embeddings of search queries |
|
||||||
|
| | run against a vector DB |
|
||||||
|
| "`semantic_similarity`" | Specifies the given text will be used |
|
||||||
|
| | for Semantic Textual Similarity (STS) |
|
||||||
|
| "`classification`" | Used for embeddings passed through a |
|
||||||
|
| | text classifier. |
|
||||||
|
| "`clustering`" | Used for the embeddings run through a |
|
||||||
|
| | clustering algorithm |
|
||||||
|
|
||||||
|
Usage Example:
|
||||||
|
|
||||||
|
```python
|
||||||
|
import lancedb
|
||||||
|
from lancedb.pydantic import LanceModel, Vector
|
||||||
|
from lancedb.embeddings import EmbeddingFunctionRegistry
|
||||||
|
|
||||||
|
cohere = EmbeddingFunctionRegistry
|
||||||
|
.get_instance()
|
||||||
|
.get("cohere")
|
||||||
|
.create(name="embed-multilingual-v2.0")
|
||||||
|
|
||||||
|
class TextModel(LanceModel):
|
||||||
|
text: str = cohere.SourceField()
|
||||||
|
vector: Vector(cohere.ndims()) = cohere.VectorField()
|
||||||
|
|
||||||
|
data = [ { "text": "hello world" },
|
||||||
|
{ "text": "goodbye world" }]
|
||||||
|
|
||||||
|
db = lancedb.connect("~/.lancedb")
|
||||||
|
tbl = db.create_table("test", schema=TextModel, mode="overwrite")
|
||||||
|
|
||||||
|
tbl.add(data)
|
||||||
|
```
|
||||||
@@ -0,0 +1,35 @@
|
|||||||
|
# Gemini Embeddings
|
||||||
|
With Google's Gemini, you can represent text (words, sentences, and blocks of text) in a vectorized form, making it easier to compare and contrast embeddings. For example, two texts that share a similar subject matter or sentiment should have similar embeddings, which can be identified through mathematical comparison techniques such as cosine similarity. For more on how and why you should use embeddings, refer to the Embeddings guide.
|
||||||
|
The Gemini Embedding Model API supports various task types:
|
||||||
|
|
||||||
|
| Task Type | Description |
|
||||||
|
|-------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
|
| "`retrieval_query`" | Specifies the given text is a query in a search/retrieval setting. |
|
||||||
|
| "`retrieval_document`" | Specifies the given text is a document in a search/retrieval setting. Using this task type requires a title but is automatically proided by Embeddings API |
|
||||||
|
| "`semantic_similarity`" | Specifies the given text will be used for Semantic Textual Similarity (STS). |
|
||||||
|
| "`classification`" | Specifies that the embeddings will be used for classification. |
|
||||||
|
| "`clusering`" | Specifies that the embeddings will be used for clustering. |
|
||||||
|
|
||||||
|
|
||||||
|
Usage Example:
|
||||||
|
|
||||||
|
```python
|
||||||
|
import lancedb
|
||||||
|
import pandas as pd
|
||||||
|
from lancedb.pydantic import LanceModel, Vector
|
||||||
|
from lancedb.embeddings import get_registry
|
||||||
|
|
||||||
|
|
||||||
|
model = get_registry().get("gemini-text").create()
|
||||||
|
|
||||||
|
class TextModel(LanceModel):
|
||||||
|
text: str = model.SourceField()
|
||||||
|
vector: Vector(model.ndims()) = model.VectorField()
|
||||||
|
|
||||||
|
df = pd.DataFrame({"text": ["hello world", "goodbye world"]})
|
||||||
|
db = lancedb.connect("~/.lancedb")
|
||||||
|
tbl = db.create_table("test", schema=TextModel, mode="overwrite")
|
||||||
|
|
||||||
|
tbl.add(df)
|
||||||
|
rs = tbl.search("hello").limit(1).to_pandas()
|
||||||
|
```
|
||||||
@@ -0,0 +1,24 @@
|
|||||||
|
# Huggingface embedding models
|
||||||
|
We offer support for all huggingface models (which can be loaded via [transformers](https://huggingface.co/docs/transformers/en/index) library). The default model is `colbert-ir/colbertv2.0` which also has its own special callout - `registry.get("colbert")`
|
||||||
|
|
||||||
|
Example usage -
|
||||||
|
```python
|
||||||
|
import lancedb
|
||||||
|
import pandas as pd
|
||||||
|
|
||||||
|
from lancedb.embeddings import get_registry
|
||||||
|
from lancedb.pydantic import LanceModel, Vector
|
||||||
|
|
||||||
|
model = get_registry().get("huggingface").create(name='facebook/bart-base')
|
||||||
|
|
||||||
|
class Words(LanceModel):
|
||||||
|
text: str = model.SourceField()
|
||||||
|
vector: Vector(model.ndims()) = model.VectorField()
|
||||||
|
|
||||||
|
df = pd.DataFrame({"text": ["hi hello sayonara", "goodbye world"]})
|
||||||
|
table = db.create_table("greets", schema=Words)
|
||||||
|
table.add(df)
|
||||||
|
query = "old greeting"
|
||||||
|
actual = table.search(query).limit(1).to_pydantic(Words)[0]
|
||||||
|
print(actual.text)
|
||||||
|
```
|
||||||
@@ -0,0 +1,75 @@
|
|||||||
|
# IBM watsonx.ai Embeddings
|
||||||
|
|
||||||
|
Generate text embeddings using IBM's watsonx.ai platform.
|
||||||
|
|
||||||
|
## Supported Models
|
||||||
|
|
||||||
|
You can find a list of supported models at [IBM watsonx.ai Documentation](https://dataplatform.cloud.ibm.com/docs/content/wsj/analyze-data/fm-models-embed.html?context=wx). The currently supported model names are:
|
||||||
|
|
||||||
|
- `ibm/slate-125m-english-rtrvr`
|
||||||
|
- `ibm/slate-30m-english-rtrvr`
|
||||||
|
- `sentence-transformers/all-minilm-l12-v2`
|
||||||
|
- `intfloat/multilingual-e5-large`
|
||||||
|
|
||||||
|
## Parameters
|
||||||
|
|
||||||
|
The following parameters can be passed to the `create` method:
|
||||||
|
|
||||||
|
| Parameter | Type | Default Value | Description |
|
||||||
|
|------------|----------|----------------------------------|-----------------------------------------------------------|
|
||||||
|
| name | str | "ibm/slate-125m-english-rtrvr" | The model ID of the watsonx.ai model to use |
|
||||||
|
| api_key | str | None | Optional IBM Cloud API key (or set `WATSONX_API_KEY`) |
|
||||||
|
| project_id | str | None | Optional watsonx project ID (or set `WATSONX_PROJECT_ID`) |
|
||||||
|
| url | str | None | Optional custom URL for the watsonx.ai instance |
|
||||||
|
| params | dict | None | Optional additional parameters for the embedding model |
|
||||||
|
|
||||||
|
## Usage Example
|
||||||
|
|
||||||
|
First, the watsonx.ai library is an optional dependency, so must be installed seperately:
|
||||||
|
|
||||||
|
```
|
||||||
|
pip install ibm-watsonx-ai
|
||||||
|
```
|
||||||
|
|
||||||
|
Optionally set environment variables (if not passing credentials to `create` directly):
|
||||||
|
|
||||||
|
```sh
|
||||||
|
export WATSONX_API_KEY="YOUR_WATSONX_API_KEY"
|
||||||
|
export WATSONX_PROJECT_ID="YOUR_WATSONX_PROJECT_ID"
|
||||||
|
```
|
||||||
|
|
||||||
|
```python
|
||||||
|
import os
|
||||||
|
import lancedb
|
||||||
|
from lancedb.pydantic import LanceModel, Vector
|
||||||
|
from lancedb.embeddings import EmbeddingFunctionRegistry
|
||||||
|
|
||||||
|
watsonx_embed = EmbeddingFunctionRegistry
|
||||||
|
.get_instance()
|
||||||
|
.get("watsonx")
|
||||||
|
.create(
|
||||||
|
name="ibm/slate-125m-english-rtrvr",
|
||||||
|
# Uncomment and set these if not using environment variables
|
||||||
|
# api_key="your_api_key_here",
|
||||||
|
# project_id="your_project_id_here",
|
||||||
|
# url="your_watsonx_url_here",
|
||||||
|
# params={...},
|
||||||
|
)
|
||||||
|
|
||||||
|
class TextModel(LanceModel):
|
||||||
|
text: str = watsonx_embed.SourceField()
|
||||||
|
vector: Vector(watsonx_embed.ndims()) = watsonx_embed.VectorField()
|
||||||
|
|
||||||
|
data = [
|
||||||
|
{"text": "hello world"},
|
||||||
|
{"text": "goodbye world"},
|
||||||
|
]
|
||||||
|
|
||||||
|
db = lancedb.connect("~/.lancedb")
|
||||||
|
tbl = db.create_table("watsonx_test", schema=TextModel, mode="overwrite")
|
||||||
|
|
||||||
|
tbl.add(data)
|
||||||
|
|
||||||
|
rs = tbl.search("hello").limit(1).to_pandas()
|
||||||
|
print(rs)
|
||||||
|
```
|
||||||
@@ -0,0 +1,50 @@
|
|||||||
|
# Instructor Embeddings
|
||||||
|
[Instructor](https://instructor-embedding.github.io/) is an instruction-finetuned text embedding model that can generate text embeddings tailored to any task (e.g. classification, retrieval, clustering, text evaluation, etc.) and domains (e.g. science, finance, etc.) by simply providing the task instruction, without any finetuning.
|
||||||
|
|
||||||
|
If you want to calculate customized embeddings for specific sentences, you can follow the unified template to write instructions.
|
||||||
|
|
||||||
|
!!! info
|
||||||
|
Represent the `domain` `text_type` for `task_objective`:
|
||||||
|
|
||||||
|
* `domain` is optional, and it specifies the domain of the text, e.g. science, finance, medicine, etc.
|
||||||
|
* `text_type` is required, and it specifies the encoding unit, e.g. sentence, document, paragraph, etc.
|
||||||
|
* `task_objective` is optional, and it specifies the objective of embedding, e.g. retrieve a document, classify the sentence, etc.
|
||||||
|
|
||||||
|
More information about the model can be found at the [source URL](https://github.com/xlang-ai/instructor-embedding).
|
||||||
|
|
||||||
|
| Argument | Type | Default | Description |
|
||||||
|
|---|---|---|---|
|
||||||
|
| `name` | `str` | "hkunlp/instructor-base" | The name of the model to use |
|
||||||
|
| `batch_size` | `int` | `32` | The batch size to use when generating embeddings |
|
||||||
|
| `device` | `str` | `"cpu"` | The device to use when generating embeddings |
|
||||||
|
| `show_progress_bar` | `bool` | `True` | Whether to show a progress bar when generating embeddings |
|
||||||
|
| `normalize_embeddings` | `bool` | `True` | Whether to normalize the embeddings |
|
||||||
|
| `quantize` | `bool` | `False` | Whether to quantize the model |
|
||||||
|
| `source_instruction` | `str` | `"represent the docuement for retreival"` | The instruction for the source column |
|
||||||
|
| `query_instruction` | `str` | `"represent the document for retreiving the most similar documents"` | The instruction for the query |
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
```python
|
||||||
|
import lancedb
|
||||||
|
from lancedb.pydantic import LanceModel, Vector
|
||||||
|
from lancedb.embeddings import get_registry, InstuctorEmbeddingFunction
|
||||||
|
|
||||||
|
instructor = get_registry().get("instructor").create(
|
||||||
|
source_instruction="represent the docuement for retreival",
|
||||||
|
query_instruction="represent the document for retreiving the most similar documents"
|
||||||
|
)
|
||||||
|
|
||||||
|
class Schema(LanceModel):
|
||||||
|
vector: Vector(instructor.ndims()) = instructor.VectorField()
|
||||||
|
text: str = instructor.SourceField()
|
||||||
|
|
||||||
|
db = lancedb.connect("~/.lancedb")
|
||||||
|
tbl = db.create_table("test", schema=Schema, mode="overwrite")
|
||||||
|
|
||||||
|
texts = [{"text": "Capitalism has been dominant in the Western world since the end of feudalism, but most feel[who?] that..."},
|
||||||
|
{"text": "The disparate impact theory is especially controversial under the Fair Housing Act because the Act..."},
|
||||||
|
{"text": "Disparate impact in United States labor law refers to practices in employment, housing, and other areas that.."}]
|
||||||
|
|
||||||
|
tbl.add(texts)
|
||||||
|
```
|
||||||
@@ -0,0 +1,39 @@
|
|||||||
|
# Jina Embeddings
|
||||||
|
|
||||||
|
Jina embeddings are used to generate embeddings for text and image data.
|
||||||
|
You also need to set the `JINA_API_KEY` environment variable to use the Jina API.
|
||||||
|
|
||||||
|
You can find a list of supported models under [https://jina.ai/embeddings/](https://jina.ai/embeddings/)
|
||||||
|
|
||||||
|
Supported parameters (to be passed in `create` method) are:
|
||||||
|
|
||||||
|
| Parameter | Type | Default Value | Description |
|
||||||
|
|---|---|---|---|
|
||||||
|
| `name` | `str` | `"jina-clip-v1"` | The model ID of the jina model to use |
|
||||||
|
|
||||||
|
Usage Example:
|
||||||
|
|
||||||
|
```python
|
||||||
|
import os
|
||||||
|
import lancedb
|
||||||
|
from lancedb.pydantic import LanceModel, Vector
|
||||||
|
from lancedb.embeddings import EmbeddingFunctionRegistry
|
||||||
|
|
||||||
|
os.environ['JINA_API_KEY'] = 'jina_*'
|
||||||
|
|
||||||
|
jina_embed = EmbeddingFunctionRegistry.get_instance().get("jina").create(name="jina-embeddings-v2-base-en")
|
||||||
|
|
||||||
|
|
||||||
|
class TextModel(LanceModel):
|
||||||
|
text: str = jina_embed.SourceField()
|
||||||
|
vector: Vector(jina_embed.ndims()) = jina_embed.VectorField()
|
||||||
|
|
||||||
|
|
||||||
|
data = [{"text": "hello world"},
|
||||||
|
{"text": "goodbye world"}]
|
||||||
|
|
||||||
|
db = lancedb.connect("~/.lancedb-2")
|
||||||
|
tbl = db.create_table("test", schema=TextModel, mode="overwrite")
|
||||||
|
|
||||||
|
tbl.add(data)
|
||||||
|
```
|
||||||
@@ -0,0 +1,37 @@
|
|||||||
|
# Ollama embeddings
|
||||||
|
|
||||||
|
Generate embeddings via the [ollama](https://github.com/ollama/ollama-python) python library. More details:
|
||||||
|
|
||||||
|
- [Ollama docs on embeddings](https://github.com/ollama/ollama/blob/main/docs/api.md#generate-embeddings)
|
||||||
|
- [Ollama blog on embeddings](https://ollama.com/blog/embedding-models)
|
||||||
|
|
||||||
|
| Parameter | Type | Default Value | Description |
|
||||||
|
|------------------------|----------------------------|--------------------------|------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
|
| `name` | `str` | `nomic-embed-text` | The name of the model. |
|
||||||
|
| `host` | `str` | `http://localhost:11434` | The Ollama host to connect to. |
|
||||||
|
| `options` | `ollama.Options` or `dict` | `None` | Additional model parameters listed in the documentation for the Modelfile such as `temperature`. |
|
||||||
|
| `keep_alive` | `float` or `str` | `"5m"` | Controls how long the model will stay loaded into memory following the request. |
|
||||||
|
| `ollama_client_kwargs` | `dict` | `{}` | kwargs that can be past to the `ollama.Client`. |
|
||||||
|
|
||||||
|
```python
|
||||||
|
import lancedb
|
||||||
|
from lancedb.pydantic import LanceModel, Vector
|
||||||
|
from lancedb.embeddings import get_registry
|
||||||
|
|
||||||
|
db = lancedb.connect("/tmp/db")
|
||||||
|
func = get_registry().get("ollama").create(name="nomic-embed-text")
|
||||||
|
|
||||||
|
class Words(LanceModel):
|
||||||
|
text: str = func.SourceField()
|
||||||
|
vector: Vector(func.ndims()) = func.VectorField()
|
||||||
|
|
||||||
|
table = db.create_table("words", schema=Words, mode="overwrite")
|
||||||
|
table.add([
|
||||||
|
{"text": "hello world"},
|
||||||
|
{"text": "goodbye world"}
|
||||||
|
])
|
||||||
|
|
||||||
|
query = "greetings"
|
||||||
|
actual = table.search(query).limit(1).to_pydantic(Words)[0]
|
||||||
|
print(actual.text)
|
||||||
|
```
|
||||||
@@ -0,0 +1,34 @@
|
|||||||
|
# OpenAI embeddings
|
||||||
|
|
||||||
|
LanceDB registers the OpenAI embeddings function in the registry by default, as `openai`. Below are the parameters that you can customize when creating the instances:
|
||||||
|
|
||||||
|
| Parameter | Type | Default Value | Description |
|
||||||
|
|---|---|---|---|
|
||||||
|
| `name` | `str` | `"text-embedding-ada-002"` | The name of the model. |
|
||||||
|
| `dim` | `int` | Model default | For OpenAI's newer text-embedding-3 model, we can specify a dimensionality that is smaller than the 1536 size. This feature supports it |
|
||||||
|
|
||||||
|
|
||||||
|
```python
|
||||||
|
import lancedb
|
||||||
|
from lancedb.pydantic import LanceModel, Vector
|
||||||
|
from lancedb.embeddings import get_registry
|
||||||
|
|
||||||
|
db = lancedb.connect("/tmp/db")
|
||||||
|
func = get_registry().get("openai").create(name="text-embedding-ada-002")
|
||||||
|
|
||||||
|
class Words(LanceModel):
|
||||||
|
text: str = func.SourceField()
|
||||||
|
vector: Vector(func.ndims()) = func.VectorField()
|
||||||
|
|
||||||
|
table = db.create_table("words", schema=Words, mode="overwrite")
|
||||||
|
table.add(
|
||||||
|
[
|
||||||
|
{"text": "hello world"},
|
||||||
|
{"text": "goodbye world"}
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
query = "greetings"
|
||||||
|
actual = table.search(query).limit(1).to_pydantic(Words)[0]
|
||||||
|
print(actual.text)
|
||||||
|
```
|
||||||
@@ -0,0 +1,174 @@
|
|||||||
|
# Sentence transformers
|
||||||
|
Allows you to set parameters when registering a `sentence-transformers` object.
|
||||||
|
|
||||||
|
!!! info
|
||||||
|
Sentence transformer embeddings are normalized by default. It is recommended to use normalized embeddings for similarity search.
|
||||||
|
|
||||||
|
| Parameter | Type | Default Value | Description |
|
||||||
|
|---|---|---|---|
|
||||||
|
| `name` | `str` | `all-MiniLM-L6-v2` | The name of the model |
|
||||||
|
| `device` | `str` | `cpu` | The device to run the model on (can be `cpu` or `gpu`) |
|
||||||
|
| `normalize` | `bool` | `True` | Whether to normalize the input text before feeding it to the model |
|
||||||
|
| `trust_remote_code` | `bool` | `False` | Whether to trust and execute remote code from the model's Huggingface repository |
|
||||||
|
|
||||||
|
|
||||||
|
??? "Check out available sentence-transformer models here!"
|
||||||
|
```markdown
|
||||||
|
- sentence-transformers/all-MiniLM-L12-v2
|
||||||
|
- sentence-transformers/paraphrase-mpnet-base-v2
|
||||||
|
- sentence-transformers/gtr-t5-base
|
||||||
|
- sentence-transformers/LaBSE
|
||||||
|
- sentence-transformers/all-MiniLM-L6-v2
|
||||||
|
- sentence-transformers/bert-base-nli-max-tokens
|
||||||
|
- sentence-transformers/bert-base-nli-mean-tokens
|
||||||
|
- sentence-transformers/bert-base-nli-stsb-mean-tokens
|
||||||
|
- sentence-transformers/bert-base-wikipedia-sections-mean-tokens
|
||||||
|
- sentence-transformers/bert-large-nli-cls-token
|
||||||
|
- sentence-transformers/bert-large-nli-max-tokens
|
||||||
|
- sentence-transformers/bert-large-nli-mean-tokens
|
||||||
|
- sentence-transformers/bert-large-nli-stsb-mean-tokens
|
||||||
|
- sentence-transformers/distilbert-base-nli-max-tokens
|
||||||
|
- sentence-transformers/distilbert-base-nli-mean-tokens
|
||||||
|
- sentence-transformers/distilbert-base-nli-stsb-mean-tokens
|
||||||
|
- sentence-transformers/distilroberta-base-msmarco-v1
|
||||||
|
- sentence-transformers/distilroberta-base-msmarco-v2
|
||||||
|
- sentence-transformers/nli-bert-base-cls-pooling
|
||||||
|
- sentence-transformers/nli-bert-base-max-pooling
|
||||||
|
- sentence-transformers/nli-bert-base
|
||||||
|
- sentence-transformers/nli-bert-large-cls-pooling
|
||||||
|
- sentence-transformers/nli-bert-large-max-pooling
|
||||||
|
- sentence-transformers/nli-bert-large
|
||||||
|
- sentence-transformers/nli-distilbert-base-max-pooling
|
||||||
|
- sentence-transformers/nli-distilbert-base
|
||||||
|
- sentence-transformers/nli-roberta-base
|
||||||
|
- sentence-transformers/nli-roberta-large
|
||||||
|
- sentence-transformers/roberta-base-nli-mean-tokens
|
||||||
|
- sentence-transformers/roberta-base-nli-stsb-mean-tokens
|
||||||
|
- sentence-transformers/roberta-large-nli-mean-tokens
|
||||||
|
- sentence-transformers/roberta-large-nli-stsb-mean-tokens
|
||||||
|
- sentence-transformers/stsb-bert-base
|
||||||
|
- sentence-transformers/stsb-bert-large
|
||||||
|
- sentence-transformers/stsb-distilbert-base
|
||||||
|
- sentence-transformers/stsb-roberta-base
|
||||||
|
- sentence-transformers/stsb-roberta-large
|
||||||
|
- sentence-transformers/xlm-r-100langs-bert-base-nli-mean-tokens
|
||||||
|
- sentence-transformers/xlm-r-100langs-bert-base-nli-stsb-mean-tokens
|
||||||
|
- sentence-transformers/xlm-r-base-en-ko-nli-ststb
|
||||||
|
- sentence-transformers/xlm-r-bert-base-nli-mean-tokens
|
||||||
|
- sentence-transformers/xlm-r-bert-base-nli-stsb-mean-tokens
|
||||||
|
- sentence-transformers/xlm-r-large-en-ko-nli-ststb
|
||||||
|
- sentence-transformers/bert-base-nli-cls-token
|
||||||
|
- sentence-transformers/all-distilroberta-v1
|
||||||
|
- sentence-transformers/multi-qa-MiniLM-L6-dot-v1
|
||||||
|
- sentence-transformers/multi-qa-distilbert-cos-v1
|
||||||
|
- sentence-transformers/multi-qa-distilbert-dot-v1
|
||||||
|
- sentence-transformers/multi-qa-mpnet-base-cos-v1
|
||||||
|
- sentence-transformers/multi-qa-mpnet-base-dot-v1
|
||||||
|
- sentence-transformers/nli-distilroberta-base-v2
|
||||||
|
- sentence-transformers/all-MiniLM-L6-v1
|
||||||
|
- sentence-transformers/all-mpnet-base-v1
|
||||||
|
- sentence-transformers/all-mpnet-base-v2
|
||||||
|
- sentence-transformers/all-roberta-large-v1
|
||||||
|
- sentence-transformers/allenai-specter
|
||||||
|
- sentence-transformers/average_word_embeddings_glove.6B.300d
|
||||||
|
- sentence-transformers/average_word_embeddings_glove.840B.300d
|
||||||
|
- sentence-transformers/average_word_embeddings_komninos
|
||||||
|
- sentence-transformers/average_word_embeddings_levy_dependency
|
||||||
|
- sentence-transformers/clip-ViT-B-32-multilingual-v1
|
||||||
|
- sentence-transformers/clip-ViT-B-32
|
||||||
|
- sentence-transformers/distilbert-base-nli-stsb-quora-ranking
|
||||||
|
- sentence-transformers/distilbert-multilingual-nli-stsb-quora-ranking
|
||||||
|
- sentence-transformers/distilroberta-base-paraphrase-v1
|
||||||
|
- sentence-transformers/distiluse-base-multilingual-cased-v1
|
||||||
|
- sentence-transformers/distiluse-base-multilingual-cased-v2
|
||||||
|
- sentence-transformers/distiluse-base-multilingual-cased
|
||||||
|
- sentence-transformers/facebook-dpr-ctx_encoder-multiset-base
|
||||||
|
- sentence-transformers/facebook-dpr-ctx_encoder-single-nq-base
|
||||||
|
- sentence-transformers/facebook-dpr-question_encoder-multiset-base
|
||||||
|
- sentence-transformers/facebook-dpr-question_encoder-single-nq-base
|
||||||
|
- sentence-transformers/gtr-t5-large
|
||||||
|
- sentence-transformers/gtr-t5-xl
|
||||||
|
- sentence-transformers/gtr-t5-xxl
|
||||||
|
- sentence-transformers/msmarco-MiniLM-L-12-v3
|
||||||
|
- sentence-transformers/msmarco-MiniLM-L-6-v3
|
||||||
|
- sentence-transformers/msmarco-MiniLM-L12-cos-v5
|
||||||
|
- sentence-transformers/msmarco-MiniLM-L6-cos-v5
|
||||||
|
- sentence-transformers/msmarco-bert-base-dot-v5
|
||||||
|
- sentence-transformers/msmarco-bert-co-condensor
|
||||||
|
- sentence-transformers/msmarco-distilbert-base-dot-prod-v3
|
||||||
|
- sentence-transformers/msmarco-distilbert-base-tas-b
|
||||||
|
- sentence-transformers/msmarco-distilbert-base-v2
|
||||||
|
- sentence-transformers/msmarco-distilbert-base-v3
|
||||||
|
- sentence-transformers/msmarco-distilbert-base-v4
|
||||||
|
- sentence-transformers/msmarco-distilbert-cos-v5
|
||||||
|
- sentence-transformers/msmarco-distilbert-dot-v5
|
||||||
|
- sentence-transformers/msmarco-distilbert-multilingual-en-de-v2-tmp-lng-aligned
|
||||||
|
- sentence-transformers/msmarco-distilbert-multilingual-en-de-v2-tmp-trained-scratch
|
||||||
|
- sentence-transformers/msmarco-distilroberta-base-v2
|
||||||
|
- sentence-transformers/msmarco-roberta-base-ance-firstp
|
||||||
|
- sentence-transformers/msmarco-roberta-base-v2
|
||||||
|
- sentence-transformers/msmarco-roberta-base-v3
|
||||||
|
- sentence-transformers/multi-qa-MiniLM-L6-cos-v1
|
||||||
|
- sentence-transformers/nli-mpnet-base-v2
|
||||||
|
- sentence-transformers/nli-roberta-base-v2
|
||||||
|
- sentence-transformers/nq-distilbert-base-v1
|
||||||
|
- sentence-transformers/paraphrase-MiniLM-L12-v2
|
||||||
|
- sentence-transformers/paraphrase-MiniLM-L3-v2
|
||||||
|
- sentence-transformers/paraphrase-MiniLM-L6-v2
|
||||||
|
- sentence-transformers/paraphrase-TinyBERT-L6-v2
|
||||||
|
- sentence-transformers/paraphrase-albert-base-v2
|
||||||
|
- sentence-transformers/paraphrase-albert-small-v2
|
||||||
|
- sentence-transformers/paraphrase-distilroberta-base-v1
|
||||||
|
- sentence-transformers/paraphrase-distilroberta-base-v2
|
||||||
|
- sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2
|
||||||
|
- sentence-transformers/paraphrase-multilingual-mpnet-base-v2
|
||||||
|
- sentence-transformers/paraphrase-xlm-r-multilingual-v1
|
||||||
|
- sentence-transformers/quora-distilbert-base
|
||||||
|
- sentence-transformers/quora-distilbert-multilingual
|
||||||
|
- sentence-transformers/sentence-t5-base
|
||||||
|
- sentence-transformers/sentence-t5-large
|
||||||
|
- sentence-transformers/sentence-t5-xxl
|
||||||
|
- sentence-transformers/sentence-t5-xl
|
||||||
|
- sentence-transformers/stsb-distilroberta-base-v2
|
||||||
|
- sentence-transformers/stsb-mpnet-base-v2
|
||||||
|
- sentence-transformers/stsb-roberta-base-v2
|
||||||
|
- sentence-transformers/stsb-xlm-r-multilingual
|
||||||
|
- sentence-transformers/xlm-r-distilroberta-base-paraphrase-v1
|
||||||
|
- sentence-transformers/clip-ViT-L-14
|
||||||
|
- sentence-transformers/clip-ViT-B-16
|
||||||
|
- sentence-transformers/use-cmlm-multilingual
|
||||||
|
- sentence-transformers/all-MiniLM-L12-v1
|
||||||
|
```
|
||||||
|
|
||||||
|
!!! info
|
||||||
|
You can also load many other model architectures from the library. For example models from sources such as BAAI, nomic, salesforce research, etc.
|
||||||
|
See this HF hub page for all [supported models](https://huggingface.co/models?library=sentence-transformers).
|
||||||
|
|
||||||
|
!!! note "BAAI Embeddings example"
|
||||||
|
Here is an example that uses BAAI embedding model from the HuggingFace Hub [supported models](https://huggingface.co/models?library=sentence-transformers)
|
||||||
|
```python
|
||||||
|
import lancedb
|
||||||
|
from lancedb.pydantic import LanceModel, Vector
|
||||||
|
from lancedb.embeddings import get_registry
|
||||||
|
|
||||||
|
db = lancedb.connect("/tmp/db")
|
||||||
|
model = get_registry().get("sentence-transformers").create(name="BAAI/bge-small-en-v1.5", device="cpu")
|
||||||
|
|
||||||
|
class Words(LanceModel):
|
||||||
|
text: str = model.SourceField()
|
||||||
|
vector: Vector(model.ndims()) = model.VectorField()
|
||||||
|
|
||||||
|
table = db.create_table("words", schema=Words)
|
||||||
|
table.add(
|
||||||
|
[
|
||||||
|
{"text": "hello world"},
|
||||||
|
{"text": "goodbye world"}
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
query = "greetings"
|
||||||
|
actual = table.search(query).limit(1).to_pydantic(Words)[0]
|
||||||
|
print(actual.text)
|
||||||
|
```
|
||||||
|
Visit sentence-transformers [HuggingFace HUB](https://huggingface.co/sentence-transformers) page for more information on the available models.
|
||||||
|
|
||||||
@@ -15,198 +15,226 @@ There is another optional layer of abstraction available: `TextEmbeddingFunction
|
|||||||
|
|
||||||
Let's implement `SentenceTransformerEmbeddings` class. All you need to do is implement the `generate_embeddings()` and `ndims` function to handle the input types you expect and register the class in the global `EmbeddingFunctionRegistry`
|
Let's implement `SentenceTransformerEmbeddings` class. All you need to do is implement the `generate_embeddings()` and `ndims` function to handle the input types you expect and register the class in the global `EmbeddingFunctionRegistry`
|
||||||
|
|
||||||
```python
|
|
||||||
from lancedb.embeddings import register
|
|
||||||
from lancedb.util import attempt_import_or_raise
|
|
||||||
|
|
||||||
@register("sentence-transformers")
|
=== "Python"
|
||||||
class SentenceTransformerEmbeddings(TextEmbeddingFunction):
|
|
||||||
name: str = "all-MiniLM-L6-v2"
|
|
||||||
# set more default instance vars like device, etc.
|
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
```python
|
||||||
super().__init__(**kwargs)
|
from lancedb.embeddings import register
|
||||||
self._ndims = None
|
from lancedb.util import attempt_import_or_raise
|
||||||
|
|
||||||
def generate_embeddings(self, texts):
|
|
||||||
return self._embedding_model().encode(list(texts), ...).tolist()
|
|
||||||
|
|
||||||
def ndims(self):
|
@register("sentence-transformers")
|
||||||
if self._ndims is None:
|
class SentenceTransformerEmbeddings(TextEmbeddingFunction):
|
||||||
self._ndims = len(self.generate_embeddings("foo")[0])
|
name: str = "all-MiniLM-L6-v2"
|
||||||
return self._ndims
|
# set more default instance vars like device, etc.
|
||||||
|
|
||||||
@cached(cache={})
|
def __init__(self, **kwargs):
|
||||||
def _embedding_model(self):
|
super().__init__(**kwargs)
|
||||||
return sentence_transformers.SentenceTransformer(name)
|
self._ndims = None
|
||||||
```
|
|
||||||
|
|
||||||
This is a stripped down version of our implementation of `SentenceTransformerEmbeddings` that removes certain optimizations and defaul settings.
|
def generate_embeddings(self, texts):
|
||||||
|
return self._embedding_model().encode(list(texts), ...).tolist()
|
||||||
|
|
||||||
|
def ndims(self):
|
||||||
|
if self._ndims is None:
|
||||||
|
self._ndims = len(self.generate_embeddings("foo")[0])
|
||||||
|
return self._ndims
|
||||||
|
|
||||||
|
@cached(cache={})
|
||||||
|
def _embedding_model(self):
|
||||||
|
return sentence_transformers.SentenceTransformer(name)
|
||||||
|
```
|
||||||
|
|
||||||
|
=== "TypeScript"
|
||||||
|
|
||||||
|
```ts
|
||||||
|
--8<--- "nodejs/examples/custom_embedding_function.ts:imports"
|
||||||
|
|
||||||
|
--8<--- "nodejs/examples/custom_embedding_function.ts:embedding_impl"
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
This is a stripped down version of our implementation of `SentenceTransformerEmbeddings` that removes certain optimizations and default settings.
|
||||||
|
|
||||||
Now you can use this embedding function to create your table schema and that's it! you can then ingest data and run queries without manually vectorizing the inputs.
|
Now you can use this embedding function to create your table schema and that's it! you can then ingest data and run queries without manually vectorizing the inputs.
|
||||||
|
|
||||||
```python
|
=== "Python"
|
||||||
from lancedb.pydantic import LanceModel, Vector
|
|
||||||
|
|
||||||
registry = EmbeddingFunctionRegistry.get_instance()
|
```python
|
||||||
stransformer = registry.get("sentence-transformers").create()
|
from lancedb.pydantic import LanceModel, Vector
|
||||||
|
|
||||||
class TextModelSchema(LanceModel):
|
registry = EmbeddingFunctionRegistry.get_instance()
|
||||||
vector: Vector(stransformer.ndims) = stransformer.VectorField()
|
stransformer = registry.get("sentence-transformers").create()
|
||||||
text: str = stransformer.SourceField()
|
|
||||||
|
|
||||||
tbl = db.create_table("table", schema=TextModelSchema)
|
class TextModelSchema(LanceModel):
|
||||||
|
vector: Vector(stransformer.ndims) = stransformer.VectorField()
|
||||||
|
text: str = stransformer.SourceField()
|
||||||
|
|
||||||
tbl.add(pd.DataFrame({"text": ["halo", "world"]}))
|
tbl = db.create_table("table", schema=TextModelSchema)
|
||||||
result = tbl.search("world").limit(5)
|
|
||||||
```
|
|
||||||
|
|
||||||
NOTE:
|
tbl.add(pd.DataFrame({"text": ["halo", "world"]}))
|
||||||
|
result = tbl.search("world").limit(5)
|
||||||
|
```
|
||||||
|
|
||||||
You can always implement the `EmbeddingFunction` interface directly if you want or need to, `TextEmbeddingFunction` just makes it much simpler and faster for you to do so, by setting up the boiler plat for text-specific use case
|
=== "TypeScript"
|
||||||
|
|
||||||
|
```ts
|
||||||
|
--8<--- "nodejs/examples/custom_embedding_function.ts:call_custom_function"
|
||||||
|
```
|
||||||
|
|
||||||
|
!!! note
|
||||||
|
|
||||||
|
You can always implement the `EmbeddingFunction` interface directly if you want or need to, `TextEmbeddingFunction` just makes it much simpler and faster for you to do so, by setting up the boiler plat for text-specific use case
|
||||||
|
|
||||||
## Multi-modal embedding function example
|
## Multi-modal embedding function example
|
||||||
You can also use the `EmbeddingFunction` interface to implement more complex workflows such as multi-modal embedding function support. LanceDB implements `OpenClipEmeddingFunction` class that suppports multi-modal seach. Here's the implementation that you can use as a reference to build your own multi-modal embedding functions.
|
You can also use the `EmbeddingFunction` interface to implement more complex workflows such as multi-modal embedding function support.
|
||||||
|
|
||||||
```python
|
=== "Python"
|
||||||
@register("open-clip")
|
|
||||||
class OpenClipEmbeddings(EmbeddingFunction):
|
|
||||||
name: str = "ViT-B-32"
|
|
||||||
pretrained: str = "laion2b_s34b_b79k"
|
|
||||||
device: str = "cpu"
|
|
||||||
batch_size: int = 64
|
|
||||||
normalize: bool = True
|
|
||||||
_model = PrivateAttr()
|
|
||||||
_preprocess = PrivateAttr()
|
|
||||||
_tokenizer = PrivateAttr()
|
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
LanceDB implements `OpenClipEmeddingFunction` class that suppports multi-modal seach. Here's the implementation that you can use as a reference to build your own multi-modal embedding functions.
|
||||||
super().__init__(*args, **kwargs)
|
|
||||||
open_clip = attempt_import_or_raise("open_clip", "open-clip") # EmbeddingFunction util to import external libs and raise if not found
|
|
||||||
model, _, preprocess = open_clip.create_model_and_transforms(
|
|
||||||
self.name, pretrained=self.pretrained
|
|
||||||
)
|
|
||||||
model.to(self.device)
|
|
||||||
self._model, self._preprocess = model, preprocess
|
|
||||||
self._tokenizer = open_clip.get_tokenizer(self.name)
|
|
||||||
self._ndims = None
|
|
||||||
|
|
||||||
def ndims(self):
|
```python
|
||||||
if self._ndims is None:
|
@register("open-clip")
|
||||||
self._ndims = self.generate_text_embeddings("foo").shape[0]
|
class OpenClipEmbeddings(EmbeddingFunction):
|
||||||
return self._ndims
|
name: str = "ViT-B-32"
|
||||||
|
pretrained: str = "laion2b_s34b_b79k"
|
||||||
|
device: str = "cpu"
|
||||||
|
batch_size: int = 64
|
||||||
|
normalize: bool = True
|
||||||
|
_model = PrivateAttr()
|
||||||
|
_preprocess = PrivateAttr()
|
||||||
|
_tokenizer = PrivateAttr()
|
||||||
|
|
||||||
def compute_query_embeddings(
|
def __init__(self, *args, **kwargs):
|
||||||
self, query: Union[str, "PIL.Image.Image"], *args, **kwargs
|
super().__init__(*args, **kwargs)
|
||||||
) -> List[np.ndarray]:
|
open_clip = attempt_import_or_raise("open_clip", "open-clip") # EmbeddingFunction util to import external libs and raise if not found
|
||||||
"""
|
model, _, preprocess = open_clip.create_model_and_transforms(
|
||||||
Compute the embeddings for a given user query
|
self.name, pretrained=self.pretrained
|
||||||
|
)
|
||||||
|
model.to(self.device)
|
||||||
|
self._model, self._preprocess = model, preprocess
|
||||||
|
self._tokenizer = open_clip.get_tokenizer(self.name)
|
||||||
|
self._ndims = None
|
||||||
|
|
||||||
Parameters
|
def ndims(self):
|
||||||
----------
|
if self._ndims is None:
|
||||||
query : Union[str, PIL.Image.Image]
|
self._ndims = self.generate_text_embeddings("foo").shape[0]
|
||||||
The query to embed. A query can be either text or an image.
|
return self._ndims
|
||||||
"""
|
|
||||||
if isinstance(query, str):
|
def compute_query_embeddings(
|
||||||
return [self.generate_text_embeddings(query)]
|
self, query: Union[str, "PIL.Image.Image"], *args, **kwargs
|
||||||
else:
|
) -> List[np.ndarray]:
|
||||||
|
"""
|
||||||
|
Compute the embeddings for a given user query
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
query : Union[str, PIL.Image.Image]
|
||||||
|
The query to embed. A query can be either text or an image.
|
||||||
|
"""
|
||||||
|
if isinstance(query, str):
|
||||||
|
return [self.generate_text_embeddings(query)]
|
||||||
|
else:
|
||||||
|
PIL = attempt_import_or_raise("PIL", "pillow")
|
||||||
|
if isinstance(query, PIL.Image.Image):
|
||||||
|
return [self.generate_image_embedding(query)]
|
||||||
|
else:
|
||||||
|
raise TypeError("OpenClip supports str or PIL Image as query")
|
||||||
|
|
||||||
|
def generate_text_embeddings(self, text: str) -> np.ndarray:
|
||||||
|
torch = attempt_import_or_raise("torch")
|
||||||
|
text = self.sanitize_input(text)
|
||||||
|
text = self._tokenizer(text)
|
||||||
|
text.to(self.device)
|
||||||
|
with torch.no_grad():
|
||||||
|
text_features = self._model.encode_text(text.to(self.device))
|
||||||
|
if self.normalize:
|
||||||
|
text_features /= text_features.norm(dim=-1, keepdim=True)
|
||||||
|
return text_features.cpu().numpy().squeeze()
|
||||||
|
|
||||||
|
def sanitize_input(self, images: IMAGES) -> Union[List[bytes], np.ndarray]:
|
||||||
|
"""
|
||||||
|
Sanitize the input to the embedding function.
|
||||||
|
"""
|
||||||
|
if isinstance(images, (str, bytes)):
|
||||||
|
images = [images]
|
||||||
|
elif isinstance(images, pa.Array):
|
||||||
|
images = images.to_pylist()
|
||||||
|
elif isinstance(images, pa.ChunkedArray):
|
||||||
|
images = images.combine_chunks().to_pylist()
|
||||||
|
return images
|
||||||
|
|
||||||
|
def compute_source_embeddings(
|
||||||
|
self, images: IMAGES, *args, **kwargs
|
||||||
|
) -> List[np.array]:
|
||||||
|
"""
|
||||||
|
Get the embeddings for the given images
|
||||||
|
"""
|
||||||
|
images = self.sanitize_input(images)
|
||||||
|
embeddings = []
|
||||||
|
for i in range(0, len(images), self.batch_size):
|
||||||
|
j = min(i + self.batch_size, len(images))
|
||||||
|
batch = images[i:j]
|
||||||
|
embeddings.extend(self._parallel_get(batch))
|
||||||
|
return embeddings
|
||||||
|
|
||||||
|
def _parallel_get(self, images: Union[List[str], List[bytes]]) -> List[np.ndarray]:
|
||||||
|
"""
|
||||||
|
Issue concurrent requests to retrieve the image data
|
||||||
|
"""
|
||||||
|
with concurrent.futures.ThreadPoolExecutor() as executor:
|
||||||
|
futures = [
|
||||||
|
executor.submit(self.generate_image_embedding, image)
|
||||||
|
for image in images
|
||||||
|
]
|
||||||
|
return [future.result() for future in futures]
|
||||||
|
|
||||||
|
def generate_image_embedding(
|
||||||
|
self, image: Union[str, bytes, "PIL.Image.Image"]
|
||||||
|
) -> np.ndarray:
|
||||||
|
"""
|
||||||
|
Generate the embedding for a single image
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
image : Union[str, bytes, PIL.Image.Image]
|
||||||
|
The image to embed. If the image is a str, it is treated as a uri.
|
||||||
|
If the image is bytes, it is treated as the raw image bytes.
|
||||||
|
"""
|
||||||
|
torch = attempt_import_or_raise("torch")
|
||||||
|
# TODO handle retry and errors for https
|
||||||
|
image = self._to_pil(image)
|
||||||
|
image = self._preprocess(image).unsqueeze(0)
|
||||||
|
with torch.no_grad():
|
||||||
|
return self._encode_and_normalize_image(image)
|
||||||
|
|
||||||
|
def _to_pil(self, image: Union[str, bytes]):
|
||||||
PIL = attempt_import_or_raise("PIL", "pillow")
|
PIL = attempt_import_or_raise("PIL", "pillow")
|
||||||
if isinstance(query, PIL.Image.Image):
|
if isinstance(image, bytes):
|
||||||
return [self.generate_image_embedding(query)]
|
return PIL.Image.open(io.BytesIO(image))
|
||||||
else:
|
if isinstance(image, PIL.Image.Image):
|
||||||
raise TypeError("OpenClip supports str or PIL Image as query")
|
return image
|
||||||
|
elif isinstance(image, str):
|
||||||
|
parsed = urlparse.urlparse(image)
|
||||||
|
# TODO handle drive letter on windows.
|
||||||
|
if parsed.scheme == "file":
|
||||||
|
return PIL.Image.open(parsed.path)
|
||||||
|
elif parsed.scheme == "":
|
||||||
|
return PIL.Image.open(image if os.name == "nt" else parsed.path)
|
||||||
|
elif parsed.scheme.startswith("http"):
|
||||||
|
return PIL.Image.open(io.BytesIO(url_retrieve(image)))
|
||||||
|
else:
|
||||||
|
raise NotImplementedError("Only local and http(s) urls are supported")
|
||||||
|
|
||||||
def generate_text_embeddings(self, text: str) -> np.ndarray:
|
def _encode_and_normalize_image(self, image_tensor: "torch.Tensor"):
|
||||||
torch = attempt_import_or_raise("torch")
|
"""
|
||||||
text = self.sanitize_input(text)
|
encode a single image tensor and optionally normalize the output
|
||||||
text = self._tokenizer(text)
|
"""
|
||||||
text.to(self.device)
|
image_features = self._model.encode_image(image_tensor)
|
||||||
with torch.no_grad():
|
|
||||||
text_features = self._model.encode_text(text.to(self.device))
|
|
||||||
if self.normalize:
|
if self.normalize:
|
||||||
text_features /= text_features.norm(dim=-1, keepdim=True)
|
image_features /= image_features.norm(dim=-1, keepdim=True)
|
||||||
return text_features.cpu().numpy().squeeze()
|
return image_features.cpu().numpy().squeeze()
|
||||||
|
```
|
||||||
|
|
||||||
def sanitize_input(self, images: IMAGES) -> Union[List[bytes], np.ndarray]:
|
=== "TypeScript"
|
||||||
"""
|
|
||||||
Sanitize the input to the embedding function.
|
|
||||||
"""
|
|
||||||
if isinstance(images, (str, bytes)):
|
|
||||||
images = [images]
|
|
||||||
elif isinstance(images, pa.Array):
|
|
||||||
images = images.to_pylist()
|
|
||||||
elif isinstance(images, pa.ChunkedArray):
|
|
||||||
images = images.combine_chunks().to_pylist()
|
|
||||||
return images
|
|
||||||
|
|
||||||
def compute_source_embeddings(
|
Coming Soon! See this [issue](https://github.com/lancedb/lancedb/issues/1482) to track the status!
|
||||||
self, images: IMAGES, *args, **kwargs
|
|
||||||
) -> List[np.array]:
|
|
||||||
"""
|
|
||||||
Get the embeddings for the given images
|
|
||||||
"""
|
|
||||||
images = self.sanitize_input(images)
|
|
||||||
embeddings = []
|
|
||||||
for i in range(0, len(images), self.batch_size):
|
|
||||||
j = min(i + self.batch_size, len(images))
|
|
||||||
batch = images[i:j]
|
|
||||||
embeddings.extend(self._parallel_get(batch))
|
|
||||||
return embeddings
|
|
||||||
|
|
||||||
def _parallel_get(self, images: Union[List[str], List[bytes]]) -> List[np.ndarray]:
|
|
||||||
"""
|
|
||||||
Issue concurrent requests to retrieve the image data
|
|
||||||
"""
|
|
||||||
with concurrent.futures.ThreadPoolExecutor() as executor:
|
|
||||||
futures = [
|
|
||||||
executor.submit(self.generate_image_embedding, image)
|
|
||||||
for image in images
|
|
||||||
]
|
|
||||||
return [future.result() for future in futures]
|
|
||||||
|
|
||||||
def generate_image_embedding(
|
|
||||||
self, image: Union[str, bytes, "PIL.Image.Image"]
|
|
||||||
) -> np.ndarray:
|
|
||||||
"""
|
|
||||||
Generate the embedding for a single image
|
|
||||||
|
|
||||||
Parameters
|
|
||||||
----------
|
|
||||||
image : Union[str, bytes, PIL.Image.Image]
|
|
||||||
The image to embed. If the image is a str, it is treated as a uri.
|
|
||||||
If the image is bytes, it is treated as the raw image bytes.
|
|
||||||
"""
|
|
||||||
torch = attempt_import_or_raise("torch")
|
|
||||||
# TODO handle retry and errors for https
|
|
||||||
image = self._to_pil(image)
|
|
||||||
image = self._preprocess(image).unsqueeze(0)
|
|
||||||
with torch.no_grad():
|
|
||||||
return self._encode_and_normalize_image(image)
|
|
||||||
|
|
||||||
def _to_pil(self, image: Union[str, bytes]):
|
|
||||||
PIL = attempt_import_or_raise("PIL", "pillow")
|
|
||||||
if isinstance(image, bytes):
|
|
||||||
return PIL.Image.open(io.BytesIO(image))
|
|
||||||
if isinstance(image, PIL.Image.Image):
|
|
||||||
return image
|
|
||||||
elif isinstance(image, str):
|
|
||||||
parsed = urlparse.urlparse(image)
|
|
||||||
# TODO handle drive letter on windows.
|
|
||||||
if parsed.scheme == "file":
|
|
||||||
return PIL.Image.open(parsed.path)
|
|
||||||
elif parsed.scheme == "":
|
|
||||||
return PIL.Image.open(image if os.name == "nt" else parsed.path)
|
|
||||||
elif parsed.scheme.startswith("http"):
|
|
||||||
return PIL.Image.open(io.BytesIO(url_retrieve(image)))
|
|
||||||
else:
|
|
||||||
raise NotImplementedError("Only local and http(s) urls are supported")
|
|
||||||
|
|
||||||
def _encode_and_normalize_image(self, image_tensor: "torch.Tensor"):
|
|
||||||
"""
|
|
||||||
encode a single image tensor and optionally normalize the output
|
|
||||||
"""
|
|
||||||
image_features = self._model.encode_image(image_tensor)
|
|
||||||
if self.normalize:
|
|
||||||
image_features /= image_features.norm(dim=-1, keepdim=True)
|
|
||||||
return image_features.cpu().numpy().squeeze()
|
|
||||||
```
|
|
||||||
|
|||||||
@@ -1,723 +1,84 @@
|
|||||||
There are various embedding functions available out of the box with LanceDB to manage your embeddings implicitly. We're actively working on adding other popular embedding APIs and models.
|
# 📚 Available Embedding Models
|
||||||
|
|
||||||
## Text embedding functions
|
There are various embedding functions available out of the box with LanceDB to manage your embeddings implicitly. We're actively working on adding other popular embedding APIs and models. 🚀
|
||||||
Contains the text embedding functions registered by default.
|
|
||||||
|
|
||||||
* Embedding functions have an inbuilt rate limit handler wrapper for source and query embedding function calls that retry with exponential backoff.
|
Before jumping on the list of available models, let's understand how to get an embedding model initialized and configured to use in our code:
|
||||||
* Each `EmbeddingFunction` implementation automatically takes `max_retries` as an argument which has the default value of 7.
|
|
||||||
|
|
||||||
### Sentence transformers
|
!!! example "Example usage"
|
||||||
Allows you to set parameters when registering a `sentence-transformers` object.
|
|
||||||
|
|
||||||
!!! info
|
|
||||||
Sentence transformer embeddings are normalized by default. It is recommended to use normalized embeddings for similarity search.
|
|
||||||
|
|
||||||
| Parameter | Type | Default Value | Description |
|
|
||||||
|---|---|---|---|
|
|
||||||
| `name` | `str` | `all-MiniLM-L6-v2` | The name of the model |
|
|
||||||
| `device` | `str` | `cpu` | The device to run the model on (can be `cpu` or `gpu`) |
|
|
||||||
| `normalize` | `bool` | `True` | Whether to normalize the input text before feeding it to the model |
|
|
||||||
| `trust_remote_code` | `bool` | `False` | Whether to trust and execute remote code from the model's Huggingface repository |
|
|
||||||
|
|
||||||
|
|
||||||
??? "Check out available sentence-transformer models here!"
|
|
||||||
```markdown
|
|
||||||
- sentence-transformers/all-MiniLM-L12-v2
|
|
||||||
- sentence-transformers/paraphrase-mpnet-base-v2
|
|
||||||
- sentence-transformers/gtr-t5-base
|
|
||||||
- sentence-transformers/LaBSE
|
|
||||||
- sentence-transformers/all-MiniLM-L6-v2
|
|
||||||
- sentence-transformers/bert-base-nli-max-tokens
|
|
||||||
- sentence-transformers/bert-base-nli-mean-tokens
|
|
||||||
- sentence-transformers/bert-base-nli-stsb-mean-tokens
|
|
||||||
- sentence-transformers/bert-base-wikipedia-sections-mean-tokens
|
|
||||||
- sentence-transformers/bert-large-nli-cls-token
|
|
||||||
- sentence-transformers/bert-large-nli-max-tokens
|
|
||||||
- sentence-transformers/bert-large-nli-mean-tokens
|
|
||||||
- sentence-transformers/bert-large-nli-stsb-mean-tokens
|
|
||||||
- sentence-transformers/distilbert-base-nli-max-tokens
|
|
||||||
- sentence-transformers/distilbert-base-nli-mean-tokens
|
|
||||||
- sentence-transformers/distilbert-base-nli-stsb-mean-tokens
|
|
||||||
- sentence-transformers/distilroberta-base-msmarco-v1
|
|
||||||
- sentence-transformers/distilroberta-base-msmarco-v2
|
|
||||||
- sentence-transformers/nli-bert-base-cls-pooling
|
|
||||||
- sentence-transformers/nli-bert-base-max-pooling
|
|
||||||
- sentence-transformers/nli-bert-base
|
|
||||||
- sentence-transformers/nli-bert-large-cls-pooling
|
|
||||||
- sentence-transformers/nli-bert-large-max-pooling
|
|
||||||
- sentence-transformers/nli-bert-large
|
|
||||||
- sentence-transformers/nli-distilbert-base-max-pooling
|
|
||||||
- sentence-transformers/nli-distilbert-base
|
|
||||||
- sentence-transformers/nli-roberta-base
|
|
||||||
- sentence-transformers/nli-roberta-large
|
|
||||||
- sentence-transformers/roberta-base-nli-mean-tokens
|
|
||||||
- sentence-transformers/roberta-base-nli-stsb-mean-tokens
|
|
||||||
- sentence-transformers/roberta-large-nli-mean-tokens
|
|
||||||
- sentence-transformers/roberta-large-nli-stsb-mean-tokens
|
|
||||||
- sentence-transformers/stsb-bert-base
|
|
||||||
- sentence-transformers/stsb-bert-large
|
|
||||||
- sentence-transformers/stsb-distilbert-base
|
|
||||||
- sentence-transformers/stsb-roberta-base
|
|
||||||
- sentence-transformers/stsb-roberta-large
|
|
||||||
- sentence-transformers/xlm-r-100langs-bert-base-nli-mean-tokens
|
|
||||||
- sentence-transformers/xlm-r-100langs-bert-base-nli-stsb-mean-tokens
|
|
||||||
- sentence-transformers/xlm-r-base-en-ko-nli-ststb
|
|
||||||
- sentence-transformers/xlm-r-bert-base-nli-mean-tokens
|
|
||||||
- sentence-transformers/xlm-r-bert-base-nli-stsb-mean-tokens
|
|
||||||
- sentence-transformers/xlm-r-large-en-ko-nli-ststb
|
|
||||||
- sentence-transformers/bert-base-nli-cls-token
|
|
||||||
- sentence-transformers/all-distilroberta-v1
|
|
||||||
- sentence-transformers/multi-qa-MiniLM-L6-dot-v1
|
|
||||||
- sentence-transformers/multi-qa-distilbert-cos-v1
|
|
||||||
- sentence-transformers/multi-qa-distilbert-dot-v1
|
|
||||||
- sentence-transformers/multi-qa-mpnet-base-cos-v1
|
|
||||||
- sentence-transformers/multi-qa-mpnet-base-dot-v1
|
|
||||||
- sentence-transformers/nli-distilroberta-base-v2
|
|
||||||
- sentence-transformers/all-MiniLM-L6-v1
|
|
||||||
- sentence-transformers/all-mpnet-base-v1
|
|
||||||
- sentence-transformers/all-mpnet-base-v2
|
|
||||||
- sentence-transformers/all-roberta-large-v1
|
|
||||||
- sentence-transformers/allenai-specter
|
|
||||||
- sentence-transformers/average_word_embeddings_glove.6B.300d
|
|
||||||
- sentence-transformers/average_word_embeddings_glove.840B.300d
|
|
||||||
- sentence-transformers/average_word_embeddings_komninos
|
|
||||||
- sentence-transformers/average_word_embeddings_levy_dependency
|
|
||||||
- sentence-transformers/clip-ViT-B-32-multilingual-v1
|
|
||||||
- sentence-transformers/clip-ViT-B-32
|
|
||||||
- sentence-transformers/distilbert-base-nli-stsb-quora-ranking
|
|
||||||
- sentence-transformers/distilbert-multilingual-nli-stsb-quora-ranking
|
|
||||||
- sentence-transformers/distilroberta-base-paraphrase-v1
|
|
||||||
- sentence-transformers/distiluse-base-multilingual-cased-v1
|
|
||||||
- sentence-transformers/distiluse-base-multilingual-cased-v2
|
|
||||||
- sentence-transformers/distiluse-base-multilingual-cased
|
|
||||||
- sentence-transformers/facebook-dpr-ctx_encoder-multiset-base
|
|
||||||
- sentence-transformers/facebook-dpr-ctx_encoder-single-nq-base
|
|
||||||
- sentence-transformers/facebook-dpr-question_encoder-multiset-base
|
|
||||||
- sentence-transformers/facebook-dpr-question_encoder-single-nq-base
|
|
||||||
- sentence-transformers/gtr-t5-large
|
|
||||||
- sentence-transformers/gtr-t5-xl
|
|
||||||
- sentence-transformers/gtr-t5-xxl
|
|
||||||
- sentence-transformers/msmarco-MiniLM-L-12-v3
|
|
||||||
- sentence-transformers/msmarco-MiniLM-L-6-v3
|
|
||||||
- sentence-transformers/msmarco-MiniLM-L12-cos-v5
|
|
||||||
- sentence-transformers/msmarco-MiniLM-L6-cos-v5
|
|
||||||
- sentence-transformers/msmarco-bert-base-dot-v5
|
|
||||||
- sentence-transformers/msmarco-bert-co-condensor
|
|
||||||
- sentence-transformers/msmarco-distilbert-base-dot-prod-v3
|
|
||||||
- sentence-transformers/msmarco-distilbert-base-tas-b
|
|
||||||
- sentence-transformers/msmarco-distilbert-base-v2
|
|
||||||
- sentence-transformers/msmarco-distilbert-base-v3
|
|
||||||
- sentence-transformers/msmarco-distilbert-base-v4
|
|
||||||
- sentence-transformers/msmarco-distilbert-cos-v5
|
|
||||||
- sentence-transformers/msmarco-distilbert-dot-v5
|
|
||||||
- sentence-transformers/msmarco-distilbert-multilingual-en-de-v2-tmp-lng-aligned
|
|
||||||
- sentence-transformers/msmarco-distilbert-multilingual-en-de-v2-tmp-trained-scratch
|
|
||||||
- sentence-transformers/msmarco-distilroberta-base-v2
|
|
||||||
- sentence-transformers/msmarco-roberta-base-ance-firstp
|
|
||||||
- sentence-transformers/msmarco-roberta-base-v2
|
|
||||||
- sentence-transformers/msmarco-roberta-base-v3
|
|
||||||
- sentence-transformers/multi-qa-MiniLM-L6-cos-v1
|
|
||||||
- sentence-transformers/nli-mpnet-base-v2
|
|
||||||
- sentence-transformers/nli-roberta-base-v2
|
|
||||||
- sentence-transformers/nq-distilbert-base-v1
|
|
||||||
- sentence-transformers/paraphrase-MiniLM-L12-v2
|
|
||||||
- sentence-transformers/paraphrase-MiniLM-L3-v2
|
|
||||||
- sentence-transformers/paraphrase-MiniLM-L6-v2
|
|
||||||
- sentence-transformers/paraphrase-TinyBERT-L6-v2
|
|
||||||
- sentence-transformers/paraphrase-albert-base-v2
|
|
||||||
- sentence-transformers/paraphrase-albert-small-v2
|
|
||||||
- sentence-transformers/paraphrase-distilroberta-base-v1
|
|
||||||
- sentence-transformers/paraphrase-distilroberta-base-v2
|
|
||||||
- sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2
|
|
||||||
- sentence-transformers/paraphrase-multilingual-mpnet-base-v2
|
|
||||||
- sentence-transformers/paraphrase-xlm-r-multilingual-v1
|
|
||||||
- sentence-transformers/quora-distilbert-base
|
|
||||||
- sentence-transformers/quora-distilbert-multilingual
|
|
||||||
- sentence-transformers/sentence-t5-base
|
|
||||||
- sentence-transformers/sentence-t5-large
|
|
||||||
- sentence-transformers/sentence-t5-xxl
|
|
||||||
- sentence-transformers/sentence-t5-xl
|
|
||||||
- sentence-transformers/stsb-distilroberta-base-v2
|
|
||||||
- sentence-transformers/stsb-mpnet-base-v2
|
|
||||||
- sentence-transformers/stsb-roberta-base-v2
|
|
||||||
- sentence-transformers/stsb-xlm-r-multilingual
|
|
||||||
- sentence-transformers/xlm-r-distilroberta-base-paraphrase-v1
|
|
||||||
- sentence-transformers/clip-ViT-L-14
|
|
||||||
- sentence-transformers/clip-ViT-B-16
|
|
||||||
- sentence-transformers/use-cmlm-multilingual
|
|
||||||
- sentence-transformers/all-MiniLM-L12-v1
|
|
||||||
```
|
|
||||||
|
|
||||||
!!! info
|
|
||||||
You can also load many other model architectures from the library. For example models from sources such as BAAI, nomic, salesforce research, etc.
|
|
||||||
See this HF hub page for all [supported models](https://huggingface.co/models?library=sentence-transformers).
|
|
||||||
|
|
||||||
!!! note "BAAI Embeddings example"
|
|
||||||
Here is an example that uses BAAI embedding model from the HuggingFace Hub [supported models](https://huggingface.co/models?library=sentence-transformers)
|
|
||||||
```python
|
```python
|
||||||
import lancedb
|
model = get_registry()
|
||||||
from lancedb.pydantic import LanceModel, Vector
|
.get("openai")
|
||||||
from lancedb.embeddings import get_registry
|
.create(name="text-embedding-ada-002")
|
||||||
|
|
||||||
db = lancedb.connect("/tmp/db")
|
|
||||||
model = get_registry().get("sentence-transformers").create(name="BAAI/bge-small-en-v1.5", device="cpu")
|
|
||||||
|
|
||||||
class Words(LanceModel):
|
|
||||||
text: str = model.SourceField()
|
|
||||||
vector: Vector(model.ndims()) = model.VectorField()
|
|
||||||
|
|
||||||
table = db.create_table("words", schema=Words)
|
|
||||||
table.add(
|
|
||||||
[
|
|
||||||
{"text": "hello world"},
|
|
||||||
{"text": "goodbye world"}
|
|
||||||
]
|
|
||||||
)
|
|
||||||
|
|
||||||
query = "greetings"
|
|
||||||
actual = table.search(query).limit(1).to_pydantic(Words)[0]
|
|
||||||
print(actual.text)
|
|
||||||
```
|
|
||||||
Visit sentence-transformers [HuggingFace HUB](https://huggingface.co/sentence-transformers) page for more information on the available models.
|
|
||||||
|
|
||||||
|
|
||||||
### Huggingface embedding models
|
|
||||||
We offer support for all huggingface models (which can be loaded via [transformers](https://huggingface.co/docs/transformers/en/index) library). The default model is `colbert-ir/colbertv2.0` which also has its own special callout - `registry.get("colbert")`
|
|
||||||
|
|
||||||
Example usage -
|
|
||||||
```python
|
|
||||||
import lancedb
|
|
||||||
import pandas as pd
|
|
||||||
|
|
||||||
from lancedb.embeddings import get_registry
|
|
||||||
from lancedb.pydantic import LanceModel, Vector
|
|
||||||
|
|
||||||
model = get_registry().get("huggingface").create(name='facebook/bart-base')
|
|
||||||
|
|
||||||
class Words(LanceModel):
|
|
||||||
text: str = model.SourceField()
|
|
||||||
vector: Vector(model.ndims()) = model.VectorField()
|
|
||||||
|
|
||||||
df = pd.DataFrame({"text": ["hi hello sayonara", "goodbye world"]})
|
|
||||||
table = db.create_table("greets", schema=Words)
|
|
||||||
table.add(df)
|
|
||||||
query = "old greeting"
|
|
||||||
actual = table.search(query).limit(1).to_pydantic(Words)[0]
|
|
||||||
print(actual.text)
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
### Ollama embeddings
|
|
||||||
Generate embeddings via the [ollama](https://github.com/ollama/ollama-python) python library. More details:
|
|
||||||
|
|
||||||
- [Ollama docs on embeddings](https://github.com/ollama/ollama/blob/main/docs/api.md#generate-embeddings)
|
|
||||||
- [Ollama blog on embeddings](https://ollama.com/blog/embedding-models)
|
|
||||||
|
|
||||||
| Parameter | Type | Default Value | Description |
|
|
||||||
|------------------------|----------------------------|--------------------------|------------------------------------------------------------------------------------------------------------------------------------------------|
|
|
||||||
| `name` | `str` | `nomic-embed-text` | The name of the model. |
|
|
||||||
| `host` | `str` | `http://localhost:11434` | The Ollama host to connect to. |
|
|
||||||
| `options` | `ollama.Options` or `dict` | `None` | Additional model parameters listed in the documentation for the Modelfile such as `temperature`. |
|
|
||||||
| `keep_alive` | `float` or `str` | `"5m"` | Controls how long the model will stay loaded into memory following the request. |
|
|
||||||
| `ollama_client_kwargs` | `dict` | `{}` | kwargs that can be past to the `ollama.Client`. |
|
|
||||||
|
|
||||||
```python
|
|
||||||
import lancedb
|
|
||||||
from lancedb.pydantic import LanceModel, Vector
|
|
||||||
from lancedb.embeddings import get_registry
|
|
||||||
|
|
||||||
db = lancedb.connect("/tmp/db")
|
|
||||||
func = get_registry().get("ollama").create(name="nomic-embed-text")
|
|
||||||
|
|
||||||
class Words(LanceModel):
|
|
||||||
text: str = func.SourceField()
|
|
||||||
vector: Vector(func.ndims()) = func.VectorField()
|
|
||||||
|
|
||||||
table = db.create_table("words", schema=Words, mode="overwrite")
|
|
||||||
table.add([
|
|
||||||
{"text": "hello world"},
|
|
||||||
{"text": "goodbye world"}
|
|
||||||
])
|
|
||||||
|
|
||||||
query = "greetings"
|
|
||||||
actual = table.search(query).limit(1).to_pydantic(Words)[0]
|
|
||||||
print(actual.text)
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
### OpenAI embeddings
|
|
||||||
LanceDB registers the OpenAI embeddings function in the registry by default, as `openai`. Below are the parameters that you can customize when creating the instances:
|
|
||||||
|
|
||||||
| Parameter | Type | Default Value | Description |
|
|
||||||
|---|---|---|---|
|
|
||||||
| `name` | `str` | `"text-embedding-ada-002"` | The name of the model. |
|
|
||||||
| `dim` | `int` | Model default | For OpenAI's newer text-embedding-3 model, we can specify a dimensionality that is smaller than the 1536 size. This feature supports it |
|
|
||||||
|
|
||||||
|
|
||||||
```python
|
|
||||||
import lancedb
|
|
||||||
from lancedb.pydantic import LanceModel, Vector
|
|
||||||
from lancedb.embeddings import get_registry
|
|
||||||
|
|
||||||
db = lancedb.connect("/tmp/db")
|
|
||||||
func = get_registry().get("openai").create(name="text-embedding-ada-002")
|
|
||||||
|
|
||||||
class Words(LanceModel):
|
|
||||||
text: str = func.SourceField()
|
|
||||||
vector: Vector(func.ndims()) = func.VectorField()
|
|
||||||
|
|
||||||
table = db.create_table("words", schema=Words, mode="overwrite")
|
|
||||||
table.add(
|
|
||||||
[
|
|
||||||
{"text": "hello world"},
|
|
||||||
{"text": "goodbye world"}
|
|
||||||
]
|
|
||||||
)
|
|
||||||
|
|
||||||
query = "greetings"
|
|
||||||
actual = table.search(query).limit(1).to_pydantic(Words)[0]
|
|
||||||
print(actual.text)
|
|
||||||
```
|
|
||||||
|
|
||||||
### Instructor Embeddings
|
|
||||||
[Instructor](https://instructor-embedding.github.io/) is an instruction-finetuned text embedding model that can generate text embeddings tailored to any task (e.g. classification, retrieval, clustering, text evaluation, etc.) and domains (e.g. science, finance, etc.) by simply providing the task instruction, without any finetuning.
|
|
||||||
|
|
||||||
If you want to calculate customized embeddings for specific sentences, you can follow the unified template to write instructions.
|
|
||||||
|
|
||||||
!!! info
|
|
||||||
Represent the `domain` `text_type` for `task_objective`:
|
|
||||||
|
|
||||||
* `domain` is optional, and it specifies the domain of the text, e.g. science, finance, medicine, etc.
|
|
||||||
* `text_type` is required, and it specifies the encoding unit, e.g. sentence, document, paragraph, etc.
|
|
||||||
* `task_objective` is optional, and it specifies the objective of embedding, e.g. retrieve a document, classify the sentence, etc.
|
|
||||||
|
|
||||||
More information about the model can be found at the [source URL](https://github.com/xlang-ai/instructor-embedding).
|
|
||||||
|
|
||||||
| Argument | Type | Default | Description |
|
|
||||||
|---|---|---|---|
|
|
||||||
| `name` | `str` | "hkunlp/instructor-base" | The name of the model to use |
|
|
||||||
| `batch_size` | `int` | `32` | The batch size to use when generating embeddings |
|
|
||||||
| `device` | `str` | `"cpu"` | The device to use when generating embeddings |
|
|
||||||
| `show_progress_bar` | `bool` | `True` | Whether to show a progress bar when generating embeddings |
|
|
||||||
| `normalize_embeddings` | `bool` | `True` | Whether to normalize the embeddings |
|
|
||||||
| `quantize` | `bool` | `False` | Whether to quantize the model |
|
|
||||||
| `source_instruction` | `str` | `"represent the docuement for retreival"` | The instruction for the source column |
|
|
||||||
| `query_instruction` | `str` | `"represent the document for retreiving the most similar documents"` | The instruction for the query |
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
```python
|
|
||||||
import lancedb
|
|
||||||
from lancedb.pydantic import LanceModel, Vector
|
|
||||||
from lancedb.embeddings import get_registry, InstuctorEmbeddingFunction
|
|
||||||
|
|
||||||
instructor = get_registry().get("instructor").create(
|
|
||||||
source_instruction="represent the docuement for retreival",
|
|
||||||
query_instruction="represent the document for retreiving the most similar documents"
|
|
||||||
)
|
|
||||||
|
|
||||||
class Schema(LanceModel):
|
|
||||||
vector: Vector(instructor.ndims()) = instructor.VectorField()
|
|
||||||
text: str = instructor.SourceField()
|
|
||||||
|
|
||||||
db = lancedb.connect("~/.lancedb")
|
|
||||||
tbl = db.create_table("test", schema=Schema, mode="overwrite")
|
|
||||||
|
|
||||||
texts = [{"text": "Capitalism has been dominant in the Western world since the end of feudalism, but most feel[who?] that..."},
|
|
||||||
{"text": "The disparate impact theory is especially controversial under the Fair Housing Act because the Act..."},
|
|
||||||
{"text": "Disparate impact in United States labor law refers to practices in employment, housing, and other areas that.."}]
|
|
||||||
|
|
||||||
tbl.add(texts)
|
|
||||||
```
|
|
||||||
|
|
||||||
### Gemini Embeddings
|
|
||||||
With Google's Gemini, you can represent text (words, sentences, and blocks of text) in a vectorized form, making it easier to compare and contrast embeddings. For example, two texts that share a similar subject matter or sentiment should have similar embeddings, which can be identified through mathematical comparison techniques such as cosine similarity. For more on how and why you should use embeddings, refer to the Embeddings guide.
|
|
||||||
The Gemini Embedding Model API supports various task types:
|
|
||||||
|
|
||||||
| Task Type | Description |
|
|
||||||
|-------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
|
||||||
| "`retrieval_query`" | Specifies the given text is a query in a search/retrieval setting. |
|
|
||||||
| "`retrieval_document`" | Specifies the given text is a document in a search/retrieval setting. Using this task type requires a title but is automatically proided by Embeddings API |
|
|
||||||
| "`semantic_similarity`" | Specifies the given text will be used for Semantic Textual Similarity (STS). |
|
|
||||||
| "`classification`" | Specifies that the embeddings will be used for classification. |
|
|
||||||
| "`clusering`" | Specifies that the embeddings will be used for clustering. |
|
|
||||||
|
|
||||||
|
|
||||||
Usage Example:
|
|
||||||
|
|
||||||
```python
|
|
||||||
import lancedb
|
|
||||||
import pandas as pd
|
|
||||||
from lancedb.pydantic import LanceModel, Vector
|
|
||||||
from lancedb.embeddings import get_registry
|
|
||||||
|
|
||||||
|
|
||||||
model = get_registry().get("gemini-text").create()
|
|
||||||
|
|
||||||
class TextModel(LanceModel):
|
|
||||||
text: str = model.SourceField()
|
|
||||||
vector: Vector(model.ndims()) = model.VectorField()
|
|
||||||
|
|
||||||
df = pd.DataFrame({"text": ["hello world", "goodbye world"]})
|
|
||||||
db = lancedb.connect("~/.lancedb")
|
|
||||||
tbl = db.create_table("test", schema=TextModel, mode="overwrite")
|
|
||||||
|
|
||||||
tbl.add(df)
|
|
||||||
rs = tbl.search("hello").limit(1).to_pandas()
|
|
||||||
```
|
|
||||||
|
|
||||||
### Cohere Embeddings
|
|
||||||
Using cohere API requires cohere package, which can be installed using `pip install cohere`. Cohere embeddings are used to generate embeddings for text data. The embeddings can be used for various tasks like semantic search, clustering, and classification.
|
|
||||||
You also need to set the `COHERE_API_KEY` environment variable to use the Cohere API.
|
|
||||||
|
|
||||||
Supported models are:
|
|
||||||
```
|
|
||||||
* embed-english-v3.0
|
|
||||||
* embed-multilingual-v3.0
|
|
||||||
* embed-english-light-v3.0
|
|
||||||
* embed-multilingual-light-v3.0
|
|
||||||
* embed-english-v2.0
|
|
||||||
* embed-english-light-v2.0
|
|
||||||
* embed-multilingual-v2.0
|
|
||||||
```
|
|
||||||
|
|
||||||
Supported parameters (to be passed in `create` method) are:
|
|
||||||
|
|
||||||
| Parameter | Type | Default Value | Description |
|
|
||||||
|---|---|---|---|
|
|
||||||
| `name` | `str` | `"embed-english-v2.0"` | The model ID of the cohere model to use. Supported base models for Text Embeddings: embed-english-v3.0, embed-multilingual-v3.0, embed-english-light-v3.0, embed-multilingual-light-v3.0, embed-english-v2.0, embed-english-light-v2.0, embed-multilingual-v2.0 |
|
|
||||||
| `source_input_type` | `str` | `"search_document"` | The type of input data to be used for the source column. |
|
|
||||||
| `query_input_type` | `str` | `"search_query"` | The type of input data to be used for the query. |
|
|
||||||
|
|
||||||
Cohere supports following input types:
|
|
||||||
| Input Type | Description |
|
|
||||||
|-------------------------|---------------------------------------|
|
|
||||||
| "`search_document`" | Used for embeddings stored in a vector|
|
|
||||||
| | database for search use-cases. |
|
|
||||||
| "`search_query`" | Used for embeddings of search queries |
|
|
||||||
| | run against a vector DB |
|
|
||||||
| "`semantic_similarity`" | Specifies the given text will be used |
|
|
||||||
| | for Semantic Textual Similarity (STS) |
|
|
||||||
| "`classification`" | Used for embeddings passed through a |
|
|
||||||
| | text classifier. |
|
|
||||||
| "`clustering`" | Used for the embeddings run through a |
|
|
||||||
| | clustering algorithm |
|
|
||||||
|
|
||||||
Usage Example:
|
|
||||||
|
|
||||||
```python
|
|
||||||
import lancedb
|
|
||||||
from lancedb.pydantic import LanceModel, Vector
|
|
||||||
from lancedb.embeddings import EmbeddingFunctionRegistry
|
|
||||||
|
|
||||||
cohere = EmbeddingFunctionRegistry
|
|
||||||
.get_instance()
|
|
||||||
.get("cohere")
|
|
||||||
.create(name="embed-multilingual-v2.0")
|
|
||||||
|
|
||||||
class TextModel(LanceModel):
|
|
||||||
text: str = cohere.SourceField()
|
|
||||||
vector: Vector(cohere.ndims()) = cohere.VectorField()
|
|
||||||
|
|
||||||
data = [ { "text": "hello world" },
|
|
||||||
{ "text": "goodbye world" }]
|
|
||||||
|
|
||||||
db = lancedb.connect("~/.lancedb")
|
|
||||||
tbl = db.create_table("test", schema=TextModel, mode="overwrite")
|
|
||||||
|
|
||||||
tbl.add(data)
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Jina Embeddings
|
Now let's understand the above syntax:
|
||||||
Jina embeddings are used to generate embeddings for text and image data.
|
|
||||||
You also need to set the `JINA_API_KEY` environment variable to use the Jina API.
|
|
||||||
|
|
||||||
You can find a list of supported models under [https://jina.ai/embeddings/](https://jina.ai/embeddings/)
|
|
||||||
|
|
||||||
Supported parameters (to be passed in `create` method) are:
|
|
||||||
|
|
||||||
| Parameter | Type | Default Value | Description |
|
|
||||||
|---|---|---|---|
|
|
||||||
| `name` | `str` | `"jina-clip-v1"` | The model ID of the jina model to use |
|
|
||||||
|
|
||||||
Usage Example:
|
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import os
|
model = get_registry().get("model_id").create(...params)
|
||||||
import lancedb
|
|
||||||
from lancedb.pydantic import LanceModel, Vector
|
|
||||||
from lancedb.embeddings import EmbeddingFunctionRegistry
|
|
||||||
|
|
||||||
os.environ['JINA_API_KEY'] = 'jina_*'
|
|
||||||
|
|
||||||
jina_embed = EmbeddingFunctionRegistry.get_instance().get("jina").create(name="jina-embeddings-v2-base-en")
|
|
||||||
|
|
||||||
|
|
||||||
class TextModel(LanceModel):
|
|
||||||
text: str = jina_embed.SourceField()
|
|
||||||
vector: Vector(jina_embed.ndims()) = jina_embed.VectorField()
|
|
||||||
|
|
||||||
|
|
||||||
data = [{"text": "hello world"},
|
|
||||||
{"text": "goodbye world"}]
|
|
||||||
|
|
||||||
db = lancedb.connect("~/.lancedb-2")
|
|
||||||
tbl = db.create_table("test", schema=TextModel, mode="overwrite")
|
|
||||||
|
|
||||||
tbl.add(data)
|
|
||||||
```
|
```
|
||||||
|
**This👆 line effectively creates a configured instance of an `embedding function` with `model` of choice that is ready for use.**
|
||||||
|
|
||||||
### AWS Bedrock Text Embedding Functions
|
- `get_registry()` : This function call returns an instance of a `EmbeddingFunctionRegistry` object. This registry manages the registration and retrieval of embedding functions.
|
||||||
AWS Bedrock supports multiple base models for generating text embeddings. You need to setup the AWS credentials to use this embedding function.
|
|
||||||
You can do so by using `awscli` and also add your session_token:
|
|
||||||
```shell
|
|
||||||
aws configure
|
|
||||||
aws configure set aws_session_token "<your_session_token>"
|
|
||||||
```
|
|
||||||
to ensure that the credentials are set up correctly, you can run the following command:
|
|
||||||
```shell
|
|
||||||
aws sts get-caller-identity
|
|
||||||
```
|
|
||||||
|
|
||||||
Supported Embedding modelIDs are:
|
- `.get("model_id")` : This method call on the registry object and retrieves the **embedding models functions** associated with the `"model_id"` (1) .
|
||||||
* `amazon.titan-embed-text-v1`
|
{ .annotate }
|
||||||
* `cohere.embed-english-v3`
|
|
||||||
* `cohere.embed-multilingual-v3`
|
|
||||||
|
|
||||||
Supported parameters (to be passed in `create` method) are:
|
1. Hover over the names in table below to find out the `model_id` of different embedding functions.
|
||||||
|
|
||||||
| Parameter | Type | Default Value | Description |
|
- `.create(...params)` : This method call is on the object returned by the `get` method. It instantiates an embedding model function using the **specified parameters**.
|
||||||
|---|---|---|---|
|
|
||||||
| **name** | str | "amazon.titan-embed-text-v1" | The model ID of the bedrock model to use. Supported base models for Text Embeddings: amazon.titan-embed-text-v1, cohere.embed-english-v3, cohere.embed-multilingual-v3 |
|
|
||||||
| **region** | str | "us-east-1" | Optional name of the AWS Region in which the service should be called (e.g., "us-east-1"). |
|
|
||||||
| **profile_name** | str | None | Optional name of the AWS profile to use for calling the Bedrock service. If not specified, the default profile will be used. |
|
|
||||||
| **assumed_role** | str | None | Optional ARN of an AWS IAM role to assume for calling the Bedrock service. If not specified, the current active credentials will be used. |
|
|
||||||
| **role_session_name** | str | "lancedb-embeddings" | Optional name of the AWS IAM role session to use for calling the Bedrock service. If not specified, a "lancedb-embeddings" name will be used. |
|
|
||||||
| **runtime** | bool | True | Optional choice of getting different client to perform operations with the Amazon Bedrock service. |
|
|
||||||
| **max_retries** | int | 7 | Optional number of retries to perform when a request fails. |
|
|
||||||
|
|
||||||
Usage Example:
|
??? question "What parameters does the `.create(...params)` method accepts?"
|
||||||
|
**Checkout the documentation of specific embedding models (links in the table below👇) to know what parameters it takes**.
|
||||||
|
|
||||||
```python
|
!!! tip "Moving on"
|
||||||
import lancedb
|
Now that we know how to get the **desired embedding model** and use it in our code, let's explore the comprehensive **list** of embedding models **supported by LanceDB**, in the tables below.
|
||||||
from lancedb.pydantic import LanceModel, Vector
|
|
||||||
from lancedb.embeddings import get_registry
|
|
||||||
|
|
||||||
model = get_registry().get("bedrock-text").create()
|
## Text Embedding Functions 📝
|
||||||
|
These functions are registered by default to handle text embeddings.
|
||||||
|
|
||||||
class TextModel(LanceModel):
|
- 🔄 **Embedding functions** have an inbuilt rate limit handler wrapper for source and query embedding function calls that retry with **exponential backoff**.
|
||||||
text: str = model.SourceField()
|
|
||||||
vector: Vector(model.ndims()) = model.VectorField()
|
|
||||||
|
|
||||||
df = pd.DataFrame({"text": ["hello world", "goodbye world"]})
|
- 🌕 Each `EmbeddingFunction` implementation automatically takes `max_retries` as an argument which has the default value of 7.
|
||||||
db = lancedb.connect("tmp_path")
|
|
||||||
tbl = db.create_table("test", schema=TextModel, mode="overwrite")
|
|
||||||
|
|
||||||
tbl.add(df)
|
🌟 **Available Text Embeddings**
|
||||||
rs = tbl.search("hello").limit(1).to_pandas()
|
|
||||||
```
|
|
||||||
|
|
||||||
## Multi-modal embedding functions
|
| **Embedding** :material-information-outline:{ title="Hover over the name to find out the model_id" } | **Description** | **Documentation** |
|
||||||
Multi-modal embedding functions allow you to query your table using both images and text.
|
|-----------|-------------|---------------|
|
||||||
|
| [**Sentence Transformers**](available_embedding_models/text_embedding_functions/sentence_transformers.md "sentence-transformers") | 🧠 **SentenceTransformers** is a Python framework for state-of-the-art sentence, text, and image embeddings. | [<img src="https://raw.githubusercontent.com/lancedb/assets/main/docs/assets/logos/sbert_2.png" alt="Sentence Transformers Icon" width="90" height="35">](available_embedding_models/text_embedding_functions/sentence_transformers.md)|
|
||||||
### OpenClip embeddings
|
| [**Huggingface Models**](available_embedding_models/text_embedding_functions/huggingface_embedding.md "huggingface") |🤗 We offer support for all **Huggingface** models. The default model is `colbert-ir/colbertv2.0`. | [<img src="https://raw.githubusercontent.com/lancedb/assets/main/docs/assets/logos/hugging_face.png" alt="Huggingface Icon" width="130" height="35">](available_embedding_models/text_embedding_functions/huggingface_embedding.md) |
|
||||||
We support CLIP model embeddings using the open source alternative, [open-clip](https://github.com/mlfoundations/open_clip) which supports various customizations. It is registered as `open-clip` and supports the following customizations:
|
| [**Ollama Embeddings**](available_embedding_models/text_embedding_functions/ollama_embedding.md "ollama") | 🔍 Generate embeddings via the **Ollama** python library. Ollama supports embedding models, making it possible to build RAG apps. | [<img src="https://raw.githubusercontent.com/lancedb/assets/main/docs/assets/logos/Ollama.png" alt="Ollama Icon" width="110" height="35">](available_embedding_models/text_embedding_functions/ollama_embedding.md)|
|
||||||
|
| [**OpenAI Embeddings**](available_embedding_models/text_embedding_functions/openai_embedding.md "openai")| 🔑 **OpenAI’s** text embeddings measure the relatedness of text strings. **LanceDB** supports state-of-the-art embeddings from OpenAI. | [<img src="https://raw.githubusercontent.com/lancedb/assets/main/docs/assets/logos/openai.png" alt="OpenAI Icon" width="100" height="35">](available_embedding_models/text_embedding_functions/openai_embedding.md)|
|
||||||
| Parameter | Type | Default Value | Description |
|
| [**Instructor Embeddings**](available_embedding_models/text_embedding_functions/instructor_embedding.md "instructor") | 📚 **Instructor**: An instruction-finetuned text embedding model that can generate text embeddings tailored to any task and domains by simply providing the task instruction, without any finetuning. | [<img src="https://raw.githubusercontent.com/lancedb/assets/main/docs/assets/logos/instructor_embedding.png" alt="Instructor Embedding Icon" width="140" height="35">](available_embedding_models/text_embedding_functions/instructor_embedding.md) |
|
||||||
|---|---|---|---|
|
| [**Gemini Embeddings**](available_embedding_models/text_embedding_functions/gemini_embedding.md "gemini-text") | 🌌 Google’s Gemini API generates state-of-the-art embeddings for words, phrases, and sentences. | [<img src="https://raw.githubusercontent.com/lancedb/assets/main/docs/assets/logos/gemini.png" alt="Gemini Icon" width="95" height="35">](available_embedding_models/text_embedding_functions/gemini_embedding.md) |
|
||||||
| `name` | `str` | `"ViT-B-32"` | The name of the model. |
|
| [**Cohere Embeddings**](available_embedding_models/text_embedding_functions/cohere_embedding.md "cohere") | 💬 This will help you get started with **Cohere** embedding models using LanceDB. Using cohere API requires cohere package. Install it via `pip`. | [<img src="https://raw.githubusercontent.com/lancedb/assets/main/docs/assets/logos/cohere.png" alt="Cohere Icon" width="140" height="35">](available_embedding_models/text_embedding_functions/cohere_embedding.md) |
|
||||||
| `pretrained` | `str` | `"laion2b_s34b_b79k"` | The name of the pretrained model to load. |
|
| [**Jina Embeddings**](available_embedding_models/text_embedding_functions/jina_embedding.md "jina") | 🔗 World-class embedding models to improve your search and RAG systems. You will need **jina api key**. | [<img src="https://raw.githubusercontent.com/lancedb/assets/main/docs/assets/logos/jina.png" alt="Jina Icon" width="90" height="35">](available_embedding_models/text_embedding_functions/jina_embedding.md) |
|
||||||
| `device` | `str` | `"cpu"` | The device to run the model on. Can be `"cpu"` or `"gpu"`. |
|
| [ **AWS Bedrock Functions**](available_embedding_models/text_embedding_functions/aws_bedrock_embedding.md "bedrock-text") | ☁️ AWS Bedrock supports multiple base models for generating text embeddings. You need to setup the AWS credentials to use this embedding function. | [<img src="https://raw.githubusercontent.com/lancedb/assets/main/docs/assets/logos/aws_bedrock.png" alt="AWS Bedrock Icon" width="120" height="35">](available_embedding_models/text_embedding_functions/aws_bedrock_embedding.md) |
|
||||||
| `batch_size` | `int` | `64` | The number of images to process in a batch. |
|
| [**IBM Watsonx.ai**](available_embedding_models/text_embedding_functions/ibm_watsonx_ai_embedding.md "watsonx") | 💡 Generate text embeddings using IBM's watsonx.ai platform. **Note**: watsonx.ai library is an optional dependency. | [<img src="https://raw.githubusercontent.com/lancedb/assets/main/docs/assets/logos/watsonx.png" alt="Watsonx Icon" width="140" height="35">](available_embedding_models/text_embedding_functions/ibm_watsonx_ai_embedding.md) |
|
||||||
| `normalize` | `bool` | `True` | Whether to normalize the input images before feeding them to the model. |
|
|
||||||
|
|
||||||
This embedding function supports ingesting images as both bytes and urls. You can query them using both test and other images.
|
|
||||||
|
|
||||||
!!! info
|
|
||||||
LanceDB supports ingesting images directly from accessible links.
|
|
||||||
|
|
||||||
```python
|
|
||||||
import lancedb
|
|
||||||
from lancedb.pydantic import LanceModel, Vector
|
|
||||||
from lancedb.embeddings import get_registry
|
|
||||||
|
|
||||||
db = lancedb.connect(tmp_path)
|
|
||||||
func = get_registry.get("open-clip").create()
|
|
||||||
|
|
||||||
class Images(LanceModel):
|
|
||||||
label: str
|
|
||||||
image_uri: str = func.SourceField() # image uri as the source
|
|
||||||
image_bytes: bytes = func.SourceField() # image bytes as the source
|
|
||||||
vector: Vector(func.ndims()) = func.VectorField() # vector column
|
|
||||||
vec_from_bytes: Vector(func.ndims()) = func.VectorField() # Another vector column
|
|
||||||
|
|
||||||
table = db.create_table("images", schema=Images)
|
|
||||||
labels = ["cat", "cat", "dog", "dog", "horse", "horse"]
|
|
||||||
uris = [
|
|
||||||
"http://farm1.staticflickr.com/53/167798175_7c7845bbbd_z.jpg",
|
|
||||||
"http://farm1.staticflickr.com/134/332220238_da527d8140_z.jpg",
|
|
||||||
"http://farm9.staticflickr.com/8387/8602747737_2e5c2a45d4_z.jpg",
|
|
||||||
"http://farm5.staticflickr.com/4092/5017326486_1f46057f5f_z.jpg",
|
|
||||||
"http://farm9.staticflickr.com/8216/8434969557_d37882c42d_z.jpg",
|
|
||||||
"http://farm6.staticflickr.com/5142/5835678453_4f3a4edb45_z.jpg",
|
|
||||||
]
|
|
||||||
# get each uri as bytes
|
|
||||||
image_bytes = [requests.get(uri).content for uri in uris]
|
|
||||||
table.add(
|
|
||||||
pd.DataFrame({"label": labels, "image_uri": uris, "image_bytes": image_bytes})
|
|
||||||
)
|
|
||||||
```
|
|
||||||
Now we can search using text from both the default vector column and the custom vector column
|
|
||||||
```python
|
|
||||||
|
|
||||||
# text search
|
|
||||||
actual = table.search("man's best friend").limit(1).to_pydantic(Images)[0]
|
|
||||||
print(actual.label) # prints "dog"
|
|
||||||
|
|
||||||
frombytes = (
|
|
||||||
table.search("man's best friend", vector_column_name="vec_from_bytes")
|
|
||||||
.limit(1)
|
|
||||||
.to_pydantic(Images)[0]
|
|
||||||
)
|
|
||||||
print(frombytes.label)
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
Because we're using a multi-modal embedding function, we can also search using images
|
|
||||||
|
|
||||||
```python
|
|
||||||
# image search
|
|
||||||
query_image_uri = "http://farm1.staticflickr.com/200/467715466_ed4a31801f_z.jpg"
|
|
||||||
image_bytes = requests.get(query_image_uri).content
|
|
||||||
query_image = Image.open(io.BytesIO(image_bytes))
|
|
||||||
actual = table.search(query_image).limit(1).to_pydantic(Images)[0]
|
|
||||||
print(actual.label == "dog")
|
|
||||||
|
|
||||||
# image search using a custom vector column
|
|
||||||
other = (
|
|
||||||
table.search(query_image, vector_column_name="vec_from_bytes")
|
|
||||||
.limit(1)
|
|
||||||
.to_pydantic(Images)[0]
|
|
||||||
)
|
|
||||||
print(actual.label)
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
### Imagebind embeddings
|
|
||||||
We have support for [imagebind](https://github.com/facebookresearch/ImageBind) model embeddings. You can download our version of the packaged model via - `pip install imagebind-packaged==0.1.2`.
|
|
||||||
|
|
||||||
This function is registered as `imagebind` and supports Audio, Video and Text modalities(extending to Thermal,Depth,IMU data):
|
|
||||||
|
|
||||||
| Parameter | Type | Default Value | Description |
|
|
||||||
|---|---|---|---|
|
|
||||||
| `name` | `str` | `"imagebind_huge"` | Name of the model. |
|
|
||||||
| `device` | `str` | `"cpu"` | The device to run the model on. Can be `"cpu"` or `"gpu"`. |
|
|
||||||
| `normalize` | `bool` | `False` | set to `True` to normalize your inputs before model ingestion. |
|
|
||||||
|
|
||||||
Below is an example demonstrating how the API works:
|
|
||||||
|
|
||||||
```python
|
|
||||||
import lancedb
|
|
||||||
from lancedb.pydantic import LanceModel, Vector
|
|
||||||
from lancedb.embeddings import get_registry
|
|
||||||
|
|
||||||
db = lancedb.connect(tmp_path)
|
|
||||||
func = get_registry.get("imagebind").create()
|
|
||||||
|
|
||||||
class ImageBindModel(LanceModel):
|
|
||||||
text: str
|
|
||||||
image_uri: str = func.SourceField()
|
|
||||||
audio_path: str
|
|
||||||
vector: Vector(func.ndims()) = func.VectorField()
|
|
||||||
|
|
||||||
# add locally accessible image paths
|
|
||||||
text_list=["A dog.", "A car", "A bird"]
|
|
||||||
image_paths=[".assets/dog_image.jpg", ".assets/car_image.jpg", ".assets/bird_image.jpg"]
|
|
||||||
audio_paths=[".assets/dog_audio.wav", ".assets/car_audio.wav", ".assets/bird_audio.wav"]
|
|
||||||
|
|
||||||
# Load data
|
|
||||||
inputs = [
|
|
||||||
{"text": a, "audio_path": b, "image_uri": c}
|
|
||||||
for a, b, c in zip(text_list, audio_paths, image_paths)
|
|
||||||
]
|
|
||||||
|
|
||||||
#create table and add data
|
|
||||||
table = db.create_table("img_bind", schema=ImageBindModel)
|
|
||||||
table.add(inputs)
|
|
||||||
```
|
|
||||||
|
|
||||||
Now, we can search using any modality:
|
|
||||||
|
|
||||||
#### image search
|
|
||||||
```python
|
|
||||||
query_image = "./assets/dog_image2.jpg" #download an image and enter that path here
|
|
||||||
actual = table.search(query_image).limit(1).to_pydantic(ImageBindModel)[0]
|
|
||||||
print(actual.text == "dog")
|
|
||||||
```
|
|
||||||
#### audio search
|
|
||||||
|
|
||||||
```python
|
|
||||||
query_audio = "./assets/car_audio2.wav" #download an audio clip and enter path here
|
|
||||||
actual = table.search(query_audio).limit(1).to_pydantic(ImageBindModel)[0]
|
|
||||||
print(actual.text == "car")
|
|
||||||
```
|
|
||||||
#### Text search
|
|
||||||
You can add any input query and fetch the result as follows:
|
|
||||||
```python
|
|
||||||
query = "an animal which flies and tweets"
|
|
||||||
actual = table.search(query).limit(1).to_pydantic(ImageBindModel)[0]
|
|
||||||
print(actual.text == "bird")
|
|
||||||
```
|
|
||||||
|
|
||||||
If you have any questions about the embeddings API, supported models, or see a relevant model missing, please raise an issue [on GitHub](https://github.com/lancedb/lancedb/issues).
|
|
||||||
|
|
||||||
### Jina Embeddings
|
|
||||||
Jina embeddings can also be used to embed both text and image data, only some of the models support image data and you can check the list
|
|
||||||
under [https://jina.ai/embeddings/](https://jina.ai/embeddings/)
|
|
||||||
|
|
||||||
Supported parameters (to be passed in `create` method) are:
|
|
||||||
|
|
||||||
| Parameter | Type | Default Value | Description |
|
|
||||||
|---|---|---|---|
|
|
||||||
| `name` | `str` | `"jina-clip-v1"` | The model ID of the jina model to use |
|
|
||||||
|
|
||||||
Usage Example:
|
|
||||||
|
|
||||||
```python
|
|
||||||
import os
|
|
||||||
import requests
|
|
||||||
import lancedb
|
|
||||||
from lancedb.pydantic import LanceModel, Vector
|
|
||||||
from lancedb.embeddings import get_registry
|
|
||||||
import pandas as pd
|
|
||||||
|
|
||||||
os.environ['JINA_API_KEY'] = 'jina_*'
|
|
||||||
|
|
||||||
db = lancedb.connect("~/.lancedb")
|
|
||||||
func = get_registry().get("jina").create()
|
|
||||||
|
|
||||||
|
|
||||||
class Images(LanceModel):
|
|
||||||
label: str
|
[st-key]: "sentence-transformers"
|
||||||
image_uri: str = func.SourceField() # image uri as the source
|
[hf-key]: "huggingface"
|
||||||
image_bytes: bytes = func.SourceField() # image bytes as the source
|
[ollama-key]: "ollama"
|
||||||
vector: Vector(func.ndims()) = func.VectorField() # vector column
|
[openai-key]: "openai"
|
||||||
vec_from_bytes: Vector(func.ndims()) = func.VectorField() # Another vector column
|
[instructor-key]: "instructor"
|
||||||
|
[gemini-key]: "gemini-text"
|
||||||
|
[cohere-key]: "cohere"
|
||||||
|
[jina-key]: "jina"
|
||||||
|
[aws-key]: "bedrock-text"
|
||||||
|
[watsonx-key]: "watsonx"
|
||||||
|
|
||||||
|
|
||||||
table = db.create_table("images", schema=Images)
|
## Multi-modal Embedding Functions🖼️
|
||||||
labels = ["cat", "cat", "dog", "dog", "horse", "horse"]
|
|
||||||
uris = [
|
Multi-modal embedding functions allow you to query your table using both images and text. 💬🖼️
|
||||||
"http://farm1.staticflickr.com/53/167798175_7c7845bbbd_z.jpg",
|
|
||||||
"http://farm1.staticflickr.com/134/332220238_da527d8140_z.jpg",
|
🌐 **Available Multi-modal Embeddings**
|
||||||
"http://farm9.staticflickr.com/8387/8602747737_2e5c2a45d4_z.jpg",
|
|
||||||
"http://farm5.staticflickr.com/4092/5017326486_1f46057f5f_z.jpg",
|
| Embedding :material-information-outline:{ title="Hover over the name to find out the model_id" } | Description | Documentation |
|
||||||
"http://farm9.staticflickr.com/8216/8434969557_d37882c42d_z.jpg",
|
|-----------|-------------|---------------|
|
||||||
"http://farm6.staticflickr.com/5142/5835678453_4f3a4edb45_z.jpg",
|
| [**OpenClip Embeddings**](available_embedding_models/multimodal_embedding_functions/openclip_embedding.md "open-clip") | 🎨 We support CLIP model embeddings using the open source alternative, **open-clip** which supports various customizations. | [<img src="https://raw.githubusercontent.com/lancedb/assets/main/docs/assets/logos/openclip_github.png" alt="openclip Icon" width="150" height="35">](available_embedding_models/multimodal_embedding_functions/openclip_embedding.md) |
|
||||||
]
|
| [**Imagebind Embeddings**](available_embedding_models/multimodal_embedding_functions/imagebind_embedding.md "imageind") | 🌌 We have support for **imagebind model embeddings**. You can download our version of the packaged model via - `pip install imagebind-packaged==0.1.2`. | [<img src="https://raw.githubusercontent.com/lancedb/assets/main/docs/assets/logos/imagebind_meta.png" alt="imagebind Icon" width="150" height="35">](available_embedding_models/multimodal_embedding_functions/imagebind_embedding.md)|
|
||||||
# get each uri as bytes
|
| [**Jina Multi-modal Embeddings**](available_embedding_models/multimodal_embedding_functions/jina_multimodal_embedding.md "jina") | 🔗 **Jina embeddings** can also be used to embed both **text** and **image** data, only some of the models support image data and you can check the detailed documentation. 👉 | [<img src="https://raw.githubusercontent.com/lancedb/assets/main/docs/assets/logos/jina.png" alt="jina Icon" width="90" height="35">](available_embedding_models/multimodal_embedding_functions/jina_multimodal_embedding.md) |
|
||||||
image_bytes = [requests.get(uri).content for uri in uris]
|
|
||||||
table.add(
|
!!! note
|
||||||
pd.DataFrame({"label": labels, "image_uri": uris, "image_bytes": image_bytes})
|
If you'd like to request support for additional **embedding functions**, please feel free to open an issue on our LanceDB [GitHub issue page](https://github.com/lancedb/lancedb/issues).
|
||||||
)
|
|
||||||
```
|
|
||||||
@@ -2,8 +2,8 @@ Representing multi-modal data as vector embeddings is becoming a standard practi
|
|||||||
|
|
||||||
For this purpose, LanceDB introduces an **embedding functions API**, that allow you simply set up once, during the configuration stage of your project. After this, the table remembers it, effectively making the embedding functions *disappear in the background* so you don't have to worry about manually passing callables, and instead, simply focus on the rest of your data engineering pipeline.
|
For this purpose, LanceDB introduces an **embedding functions API**, that allow you simply set up once, during the configuration stage of your project. After this, the table remembers it, effectively making the embedding functions *disappear in the background* so you don't have to worry about manually passing callables, and instead, simply focus on the rest of your data engineering pipeline.
|
||||||
|
|
||||||
!!! Note "LanceDB cloud doesn't support embedding functions yet"
|
!!! Note "Embedding functions on LanceDB cloud"
|
||||||
LanceDB Cloud does not support embedding functions yet. You need to generate embeddings before ingesting into the table or querying.
|
When using embedding functions with LanceDB cloud, the embeddings will be generated on the source device and sent to the cloud. This means that the source device must have the necessary resources to generate the embeddings.
|
||||||
|
|
||||||
!!! warning
|
!!! warning
|
||||||
Using the embedding function registry means that you don't have to explicitly generate the embeddings yourself.
|
Using the embedding function registry means that you don't have to explicitly generate the embeddings yourself.
|
||||||
|
|||||||
@@ -99,28 +99,28 @@ LanceDB registers the Sentence Transformers embeddings function in the registry
|
|||||||
|
|
||||||
Coming Soon!
|
Coming Soon!
|
||||||
|
|
||||||
### Jina Embeddings
|
### Embedding function with LanceDB cloud
|
||||||
|
Embedding functions are now supported on LanceDB cloud. The embeddings will be generated on the source device and sent to the cloud. This means that the source device must have the necessary resources to generate the embeddings. Here's an example using the OpenAI embedding function:
|
||||||
LanceDB registers the JinaAI embeddings function in the registry as `jina`. You can pass any supported model name to the `create`. By default it uses `"jina-clip-v1"`.
|
|
||||||
`jina-clip-v1` can handle both text and images and other models only support `text`.
|
|
||||||
|
|
||||||
You need to pass `JINA_API_KEY` in the environment variable or pass it as `api_key` to `create` method.
|
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import os
|
import os
|
||||||
import lancedb
|
import lancedb
|
||||||
from lancedb.pydantic import LanceModel, Vector
|
from lancedb.pydantic import LanceModel, Vector
|
||||||
from lancedb.embeddings import get_registry
|
from lancedb.embeddings import get_registry
|
||||||
os.environ['JINA_API_KEY'] = "jina_*"
|
os.environ['OPENAI_API_KEY'] = "..."
|
||||||
|
|
||||||
db = lancedb.connect("/tmp/db")
|
db = lancedb.connect(
|
||||||
func = get_registry().get("jina").create(name="jina-clip-v1")
|
uri="db://....",
|
||||||
|
api_key="sk_...",
|
||||||
|
region="us-east-1"
|
||||||
|
)
|
||||||
|
func = get_registry().get("openai").create()
|
||||||
|
|
||||||
class Words(LanceModel):
|
class Words(LanceModel):
|
||||||
text: str = func.SourceField()
|
text: str = func.SourceField()
|
||||||
vector: Vector(func.ndims()) = func.VectorField()
|
vector: Vector(func.ndims()) = func.VectorField()
|
||||||
|
|
||||||
table = db.create_table("words", schema=Words, mode="overwrite")
|
table = db.create_table("words", schema=Words)
|
||||||
table.add(
|
table.add(
|
||||||
[
|
[
|
||||||
{"text": "hello world"},
|
{"text": "hello world"},
|
||||||
|
|||||||
@@ -1,17 +1,22 @@
|
|||||||
# Examples: Python
|
# Overview : Python Examples
|
||||||
|
|
||||||
To help you get started, we provide some examples, projects and applications that use the LanceDB Python API. You can always find the latest examples in our [VectorDB Recipes](https://github.com/lancedb/vectordb-recipes) repository.
|
To help you get started, we provide some examples, projects, and applications that use the LanceDB Python API. These examples are designed to get you right into the code with minimal introduction, enabling you to move from an idea to a proof of concept in minutes.
|
||||||
|
|
||||||
| Example | Interactive Envs | Scripts |
|
You can find the latest examples in our [VectorDB Recipes](https://github.com/lancedb/vectordb-recipes) repository.
|
||||||
|-------- | ---------------- | ------ |
|
|
||||||
| | | |
|
**Introduction**
|
||||||
| [Youtube transcript search bot](https://github.com/lancedb/vectordb-recipes/tree/main/examples/youtube_bot/) | <a href="https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/youtube_bot/main.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a>| [](https://github.com/lancedb/vectordb-recipes/tree/main/examples/youtube_bot/main.py)|
|
|
||||||
| [Langchain: Code Docs QA bot](https://github.com/lancedb/vectordb-recipes/tree/main/examples/Code-Documentation-QA-Bot/) | <a href="https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/Code-Documentation-QA-Bot/main.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a>| [](https://github.com/lancedb/vectordb-recipes/tree/main/examples/Code-Documentation-QA-Bot/main.py) |
|
Explore applied examples available as Colab notebooks or Python scripts to integrate into your applications. You can also checkout our blog posts related to the particular example for deeper understanding.
|
||||||
| [AI Agents: Reducing Hallucination](https://github.com/lancedb/vectordb-recipes/tree/main/examples/reducing_hallucinations_ai_agents/) | <a href="https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/reducing_hallucinations_ai_agents/main.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a>| [](https://github.com/lancedb/vectordb-recipes/tree/main/examples/reducing_hallucinations_ai_agents/main.py)|
|
|
||||||
| [Multimodal CLIP: DiffusionDB](https://github.com/lancedb/vectordb-recipes/tree/main/examples/multimodal_clip/) | <a href="https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/multimodal_clip/main.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a>| [](https://github.com/lancedb/vectordb-recipes/tree/main/examples/multimodal_clip/main.py) |
|
| Explore | Description |
|
||||||
| [Multimodal CLIP: Youtube videos](https://github.com/lancedb/vectordb-recipes/tree/main/examples/multimodal_video_search/) | <a href="https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/multimodal_video_search/main.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a>| [](https://github.com/lancedb/vectordb-recipes/tree/main/examples/multimodal_video_search/main.py) |
|
|----------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
| [Movie Recommender](https://github.com/lancedb/vectordb-recipes/tree/main/examples/movie-recommender/) | <a href="https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/movie-recommender/main.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a> | [](https://github.com/lancedb/vectordb-recipes/tree/main/examples/movie-recommender/main.py) |
|
| [**Build from Scratch with LanceDB** 🛠️🚀](python_examples/build_from_scratch.md) | Start building your **GenAI applications** from the **ground up** using **LanceDB's** efficient vector-based document retrieval capabilities! Get started quickly with a solid foundation. |
|
||||||
| [Audio Search](https://github.com/lancedb/vectordb-recipes/tree/main/examples/audio_search/) | <a href="https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/audio_search/main.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a> | [](https://github.com/lancedb/vectordb-recipes/tree/main/examples/audio_search/main.py) |
|
| [**Multimodal Search with LanceDB** 🤹♂️🔍](python_examples/multimodal.md) | Combine **text** and **image queries** to find the most relevant results using **LanceDB’s multimodal** capabilities. Leverage the efficient vector-based similarity search. |
|
||||||
| [Multimodal Image + Text Search](https://github.com/lancedb/vectordb-recipes/tree/main/examples/multimodal_search/) | <a href="https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/multimodal_search/main.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a> | [](https://github.com/lancedb/vectordb-recipes/tree/main/examples/multimodal_search/main.py) |
|
| [**RAG (Retrieval-Augmented Generation) with LanceDB** 🔓🧐](python_examples/rag.md) | Build RAG (Retrieval-Augmented Generation) with **LanceDB** for efficient **vector-based information retrieval** and more accurate responses from AI. |
|
||||||
| [Evaluating Prompts with Prompttools](https://github.com/lancedb/vectordb-recipes/tree/main/examples/prompttools-eval-prompts/) | <a href="https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/prompttools-eval-prompts/main.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a> | |
|
| [**Vector Search: Efficient Retrieval** 🔓👀](python_examples/vector_search.md) | Use **LanceDB's** vector search capabilities to perform efficient and accurate **similarity searches**, enabling rapid discovery and retrieval of relevant documents in Large datasets. |
|
||||||
|
| [**Chatbot applications with LanceDB** 🤖](python_examples/chatbot.md) | Create **chatbots** that retrieves relevant context for **coherent and context-aware replies**, enhancing user experience through advanced conversational AI. |
|
||||||
|
| [**Evaluation: Assessing Text Performance with Precision** 📊💡](python_examples/evaluations.md) | Develop **evaluation** applications that allows you to input reference and candidate texts to **measure** their performance across various metrics. |
|
||||||
|
| [**AI Agents: Intelligent Collaboration** 🤖](python_examples/aiagent.md) | Enable **AI agents** to communicate and collaborate efficiently through dense vector representations, achieving shared goals seamlessly. |
|
||||||
|
| [**Recommender Systems: Personalized Discovery** 🍿📺](python_examples/recommendersystem.md) | Deliver **personalized experiences** by efficiently storing and querying item embeddings with **LanceDB's** powerful vector database capabilities. |
|
||||||
|
| **Miscellaneous Examples🌟** | Find other **unique examples** and **creative solutions** using **LanceDB**, showcasing the flexibility and broad applicability of the platform. |
|
||||||
|
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ LanceDB provides language APIs, allowing you to embed a database in your languag
|
|||||||
|
|
||||||
## Applications powered by LanceDB
|
## Applications powered by LanceDB
|
||||||
|
|
||||||
| Project Name | Description | Screenshot |
|
| Project Name | Description |
|
||||||
|-----------------------------------------------------|----------------------------------------------------------------------------------------------------------------------|-------------------------------------------|
|
| --- | --- |
|
||||||
| [YOLOExplorer](https://github.com/lancedb/yoloexplorer) | Iterate on your YOLO / CV datasets using SQL, Vector semantic search, and more within seconds |  |
|
| **Ultralytics Explorer 🚀**<br>[](https://docs.ultralytics.com/datasets/explorer/)<br>[](https://colab.research.google.com/github/ultralytics/ultralytics/blob/main/docs/en/datasets/explorer/explorer.ipynb) | - 🔍 **Explore CV Datasets**: Semantic search, SQL queries, vector similarity, natural language.<br>- 🖥️ **GUI & Python API**: Seamless dataset interaction.<br>- ⚡ **Efficient & Scalable**: Leverages LanceDB for large datasets.<br>- 📊 **Detailed Analysis**: Easily analyze data patterns.<br>- 🌐 **Browser GUI Demo**: Create embeddings, search images, run queries. |
|
||||||
| [Website Chatbot (Deployable Vercel Template)](https://github.com/lancedb/lancedb-vercel-chatbot) | Create a chatbot from the sitemap of any website/docs of your choice. Built using vectorDB serverless native javascript package. |  |
|
| **Website Chatbot🤖**<br>[](https://github.com/lancedb/lancedb-vercel-chatbot)<br>[](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2Flancedb%2Flancedb-vercel-chatbot&env=OPENAI_API_KEY&envDescription=OpenAI%20API%20Key%20for%20chat%20completion.&project-name=lancedb-vercel-chatbot&repository-name=lancedb-vercel-chatbot&demo-title=LanceDB%20Chatbot%20Demo&demo-description=Demo%20website%20chatbot%20with%20LanceDB.&demo-url=https%3A%2F%2Flancedb.vercel.app&demo-image=https%3A%2F%2Fi.imgur.com%2FazVJtvr.png) | - 🌐 **Chatbot from Sitemap/Docs**: Create a chatbot using site or document context.<br>- 🚀 **Embed LanceDB in Next.js**: Lightweight, on-prem storage.<br>- 🧠 **AI-Powered Context Retrieval**: Efficiently access relevant data.<br>- 🔧 **Serverless & Native JS**: Seamless integration with Next.js.<br>- ⚡ **One-Click Deploy on Vercel**: Quick and easy setup.. |
|
||||||
|
|||||||
27
docs/src/examples/python_examples/aiagent.md
Normal file
27
docs/src/examples/python_examples/aiagent.md
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
# AI Agents: Intelligent Collaboration🤖
|
||||||
|
|
||||||
|
Think of a platform where AI Agents can seamlessly exchange information, coordinate over tasks, and achieve shared targets with great efficiency💻📈.
|
||||||
|
|
||||||
|
## Vector-Based Coordination: The Technical Advantage
|
||||||
|
Leveraging LanceDB's vector-based capabilities, we can enable **AI agents 🤖** to communicate and collaborate through dense vector representations. AI agents can exchange information, coordinate on a task or work towards a common goal, just by giving queries📝.
|
||||||
|
|
||||||
|
| **AI Agents** | **Description** | **Links** |
|
||||||
|
|:--------------|:----------------|:----------|
|
||||||
|
| **AI Agents: Reducing Hallucinationt📊** | 🤖💡 **Reduce AI hallucinations** using Critique-Based Contexting! Learn by Simplifying and Automating tedious workflows by going through fitness trainer agent example.💪 | [][hullucination_github] <br>[][hullucination_colab] <br>[][hullucination_python] <br>[][hullucination_ghost] |
|
||||||
|
| **AI Trends Searcher: CrewAI🔍️** | 🔍️ Learn about **CrewAI Agents** ! Utilize the features of CrewAI - Role-based Agents, Task Management, and Inter-agent Delegation ! Make AI agents work together to do tricky stuff 😺| [][trend_github] <br>[][trend_colab] <br>[][trend_ghost] |
|
||||||
|
| **SuperAgent Autogen🤖** | 💻 AI interactions with the Super Agent! Integrating **Autogen**, **LanceDB**, **LangChain**, **LiteLLM**, and **Ollama** to create AI agent that excels in understanding and processing complex queries.🤖 | [][superagent_github] <br>[][superagent_colab] |
|
||||||
|
|
||||||
|
|
||||||
|
[hullucination_github]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/reducing_hallucinations_ai_agents
|
||||||
|
[hullucination_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/reducing_hallucinations_ai_agents/main.ipynb
|
||||||
|
[hullucination_python]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/reducing_hallucinations_ai_agents/main.py
|
||||||
|
[hullucination_ghost]: https://blog.lancedb.com/how-to-reduce-hallucinations-from-llm-powered-agents-using-long-term-memory-72f262c3cc1f/
|
||||||
|
|
||||||
|
[trend_github]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/AI-Trends-with-CrewAI
|
||||||
|
[trend_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/AI-Trends-with-CrewAI/CrewAI_AI_Trends.ipynb
|
||||||
|
[trend_ghost]: https://blog.lancedb.com/track-ai-trends-crewai-agents-rag/
|
||||||
|
|
||||||
|
[superagent_github]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/SuperAgent_Autogen
|
||||||
|
[superagent_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/SuperAgent_Autogen/main.ipynb
|
||||||
|
|
||||||
|
|
||||||
13
docs/src/examples/python_examples/build_from_scratch.md
Normal file
13
docs/src/examples/python_examples/build_from_scratch.md
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
# **Build from Scratch with LanceDB 🛠️🚀**
|
||||||
|
|
||||||
|
Start building your GenAI applications from the ground up using **LanceDB's** efficient vector-based document retrieval capabilities! 📑
|
||||||
|
|
||||||
|
**Get Started in Minutes ⏱️**
|
||||||
|
|
||||||
|
These examples provide a solid foundation for building your own GenAI applications using LanceDB. Jump from idea to **proof of concept** quickly with applied examples. Get started and see what you can create! 💻
|
||||||
|
|
||||||
|
| **Build From Scratch** | **Description** | **Links** |
|
||||||
|
|:-------------------------------------------|:-------------------------------------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
|
| **Build RAG from Scratch🚀💻** | 📝 Create a **Retrieval-Augmented Generation** (RAG) model from scratch using LanceDB. | [](https://github.com/lancedb/vectordb-recipes/tree/main/tutorials/RAG-from-Scratch)<br>[]() |
|
||||||
|
| **Local RAG from Scratch with Llama3🔥💡** | 🐫 Build a local RAG model using **Llama3** and **LanceDB** for fast and efficient text generation. | [](https://github.com/lancedb/vectordb-recipes/tree/main/tutorials/Local-RAG-from-Scratch)<br>[](https://github.com/lancedb/vectordb-recipes/blob/main/tutorials/Local-RAG-from-Scratch/rag.py) |
|
||||||
|
| **Multi-Head RAG from Scratch📚💻** | 🤯 Develop a **Multi-Head RAG model** from scratch, enabling generation of text based on multiple documents. | [](https://github.com/lancedb/vectordb-recipes/tree/main/tutorials/Multi-Head-RAG-from-Scratch)<br>[](https://github.com/lancedb/vectordb-recipes/tree/main/tutorials/Multi-Head-RAG-from-Scratch) |
|
||||||
41
docs/src/examples/python_examples/chatbot.md
Normal file
41
docs/src/examples/python_examples/chatbot.md
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
**Chatbot applications with LanceDB 🤖**
|
||||||
|
====================================================================
|
||||||
|
|
||||||
|
Create innovative chatbot applications that utilizes LanceDB for efficient vector-based response generation! 🌐✨
|
||||||
|
|
||||||
|
**Introduction 👋✨**
|
||||||
|
|
||||||
|
Users can input their queries, allowing the chatbot to retrieve relevant context seamlessly. 🔍📚 This enables the generation of coherent and context-aware replies that enhance user experience. 🌟🤝 Dive into the world of advanced conversational AI and streamline interactions with powerful data management! 🚀💡
|
||||||
|
|
||||||
|
|
||||||
|
| **Chatbot** | **Description** | **Links** |
|
||||||
|
|:----------------|:-----------------|:-----------|
|
||||||
|
| **Databricks DBRX Website Bot ⚡️** | Engage with the **Hogwarts chatbot**, that uses Open-source RAG with **DBRX**, **LanceDB** and **LLama-index with Hugging Face Embeddings**, to provide interactive and engaging user experiences. ✨ | [][databricks_github] <br>[][databricks_python] |
|
||||||
|
| **CLI SDK Manual Chatbot Locally 💻** | CLI chatbot for SDK/hardware documents using **Local RAG** with **LLama3**, **Ollama**, **LanceDB**, and **Openhermes Embeddings**, built with **Phidata** Assistant and Knowledge Base 🤖 | [][clisdk_github] <br>[][clisdk_python] |
|
||||||
|
| **Youtube Transcript Search QA Bot 📹** | Search through **youtube transcripts** using natural language with a Q&A bot, leveraging **LanceDB** for effortless data storage and management 💬 | [][youtube_github] <br>[][youtube_colab] <br>[][youtube_python] |
|
||||||
|
| **Code Documentation Q&A Bot with LangChain 🤖** | Query your own documentation easily using questions in natural language with a Q&A bot, powered by **LangChain** and **LanceDB**, demonstrated with **Numpy 1.26 docs** 📚 | [][docs_github] <br>[][docs_colab] <br>[][docs_python] |
|
||||||
|
| **Context-aware Chatbot using Llama 2 & LanceDB 🤖** | Build **conversational AI** with a **context-aware chatbot**, powered by **Llama 2**, **LanceDB**, and **LangChain**, that enables intuitive and meaningful conversations with your data 📚💬 | [][aware_github] <br>[][aware_colab] <br>[][aware_ghost] |
|
||||||
|
| **Chat with csv using Hybrid Search 📊** | **Chat** application that interacts with **CSV** and **Excel files** using **LanceDB’s** hybrid search capabilities, performing direct operations on large-scale columnar data efficiently 🚀 | [][csv_github] <br>[][csv_colab] <br>[][csv_ghost] |
|
||||||
|
|
||||||
|
|
||||||
|
[databricks_github]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/databricks_DBRX_website_bot
|
||||||
|
[databricks_python]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/databricks_DBRX_website_bot/main.py
|
||||||
|
|
||||||
|
[clisdk_github]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/CLI-SDK-Manual-Chatbot-Locally
|
||||||
|
[clisdk_python]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/CLI-SDK-Manual-Chatbot-Locally/assistant.py
|
||||||
|
|
||||||
|
[youtube_github]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/Youtube-Search-QA-Bot
|
||||||
|
[youtube_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/Youtube-Search-QA-Bot/main.ipynb
|
||||||
|
[youtube_python]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/Youtube-Search-QA-Bot/main.py
|
||||||
|
|
||||||
|
[docs_github]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/Code-Documentation-QA-Bot
|
||||||
|
[docs_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/Code-Documentation-QA-Bot/main.ipynb
|
||||||
|
[docs_python]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/Code-Documentation-QA-Bot/main.py
|
||||||
|
|
||||||
|
[aware_github]: https://github.com/lancedb/vectordb-recipes/blob/main/tutorials/chatbot_using_Llama2_&_lanceDB
|
||||||
|
[aware_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/tutorials/chatbot_using_Llama2_&_lanceDB/main.ipynb
|
||||||
|
[aware_ghost]: https://blog.lancedb.com/context-aware-chatbot-using-llama-2-lancedb-as-vector-database-4d771d95c755
|
||||||
|
|
||||||
|
[csv_github]: https://github.com/lancedb/vectordb-recipes/blob/main/tutorials/Chat_with_csv_file
|
||||||
|
[csv_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/tutorials/Chat_with_csv_file/main.ipynb
|
||||||
|
[csv_ghost]: https://blog.lancedb.com/p/d8c71df4-e55f-479a-819e-cde13354a6a3/
|
||||||
21
docs/src/examples/python_examples/evaluations.md
Normal file
21
docs/src/examples/python_examples/evaluations.md
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
**Evaluation: Assessing Text Performance with Precision 📊💡**
|
||||||
|
====================================================================
|
||||||
|
|
||||||
|
Evaluation is a comprehensive tool designed to measure the performance of text-based inputs, enabling data-driven optimization and improvement 📈.
|
||||||
|
|
||||||
|
**Text Evaluation 101 📚**
|
||||||
|
|
||||||
|
Using robust framework for assessing reference and candidate texts across various metrics📊, ensure that the text outputs are high-quality and meet specific requirements and standards📝.
|
||||||
|
|
||||||
|
| **Evaluation** | **Description** | **Links** |
|
||||||
|
| -------------- | --------------- | --------- |
|
||||||
|
| **Evaluating Prompts with Prompttools 🤖** | Compare, visualize & evaluate **embedding functions** (incl. OpenAI) across metrics like latency & custom evaluation 📈📊 | [][prompttools_github] <br>[][prompttools_colab] |
|
||||||
|
| **Evaluating RAG with RAGAs and GPT-4o 📊** | Evaluate **RAG pipelines** with cutting-edge metrics and tools, integrate with CI/CD for continuous performance checks, and generate responses with GPT-4o 🤖📈 | [][RAGAs_github] <br>[][RAGAs_colab] |
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[prompttools_github]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/prompttools-eval-prompts
|
||||||
|
[prompttools_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/prompttools-eval-prompts/main.ipynb
|
||||||
|
|
||||||
|
[RAGAs_github]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/Evaluating_RAG_with_RAGAs
|
||||||
|
[RAGAs_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/Evaluating_RAG_with_RAGAs/Evaluating_RAG_with_RAGAs.ipynb
|
||||||
28
docs/src/examples/python_examples/multimodal.md
Normal file
28
docs/src/examples/python_examples/multimodal.md
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
# **Multimodal Search with LanceDB 🤹♂️🔍**
|
||||||
|
|
||||||
|
Using LanceDB's multimodal capabilities, combine text and image queries to find the most relevant results in your corpus ! 🔓💡
|
||||||
|
|
||||||
|
**Explore the Future of Search 🚀**
|
||||||
|
|
||||||
|
LanceDB supports multimodal search by indexing and querying vector representations of text and image data 🤖. This enables efficient retrieval of relevant documents and images using vector-based similarity search 📊. The platform facilitates cross-modal search, allowing for text-image and image-text retrieval, and supports scalable indexing of high-dimensional vector spaces 💻.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
| **Multimodal** | **Description** | **Links** |
|
||||||
|
|:----------------|:-----------------|:-----------|
|
||||||
|
| **Multimodal CLIP: DiffusionDB 🌐💥** | Multi-Modal Search with **CLIP** and **LanceDB** Using **DiffusionDB** Data for Combined Text and Image Understanding ! 🔓 | [][Clip_diffusionDB_github] <br>[][Clip_diffusionDB_colab] <br>[][Clip_diffusionDB_python] <br>[][Clip_diffusionDB_ghost] |
|
||||||
|
| **Multimodal CLIP: Youtube Videos 📹👀** | Search **Youtube videos** using Multimodal CLIP, finding relevant content with ease and accuracy! 🎯 | [][Clip_youtube_github] <br>[][Clip_youtube_colab] <br> [][Clip_youtube_python] <br>[][Clip_youtube_python] |
|
||||||
|
| **Multimodal Image + Text Search 📸🔍** | Find **relevant documents** and **images** with a single query using **LanceDB's** multimodal search capabilities, to seamlessly integrate text and visuals ! 🌉 | [](https://github.com/lancedb/vectordb-recipes/blob/main/examples/multimodal_search) <br>[](https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/multimodal_search/main.ipynb) <br> [](https://github.com/lancedb/vectordb-recipes/blob/main/examples/multimodal_search/main.py)<br> [](https://blog.lancedb.com/multi-modal-ai-made-easy-with-lancedb-clip-5aaf8801c939/) |
|
||||||
|
| **Cambrian-1: Vision-Centric Image Exploration 🔍👀** | Learn how **Cambrian-1** works, using an example of **Vision-Centric** exploration on images found through vector search ! Work on **Flickr-8k** dataset 🔎 | [](https://www.kaggle.com/code/prasantdixit/cambrian-1-vision-centric-exploration-of-images/)<br> [](https://blog.lancedb.com/cambrian-1-vision-centric-exploration/) |
|
||||||
|
|
||||||
|
|
||||||
|
[Clip_diffusionDB_github]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/multimodal_clip_diffusiondb
|
||||||
|
[Clip_diffusionDB_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/multimodal_clip_diffusiondb/main.ipynb
|
||||||
|
[Clip_diffusionDB_python]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/multimodal_clip_diffusiondb/main.py
|
||||||
|
[Clip_diffusionDB_ghost]: https://blog.lancedb.com/multi-modal-ai-made-easy-with-lancedb-clip-5aaf8801c939/
|
||||||
|
|
||||||
|
|
||||||
|
[Clip_youtube_github]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/multimodal_video_search
|
||||||
|
[Clip_youtube_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/multimodal_video_search/main.ipynb
|
||||||
|
[Clip_youtube_python]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/multimodal_video_search/main.py
|
||||||
|
[Clip_youtube_ghost]: https://blog.lancedb.com/multi-modal-ai-made-easy-with-lancedb-clip-5aaf8801c939/
|
||||||
83
docs/src/examples/python_examples/rag.md
Normal file
83
docs/src/examples/python_examples/rag.md
Normal file
@@ -0,0 +1,83 @@
|
|||||||
|
**RAG (Retrieval-Augmented Generation) with LanceDB 🔓🧐**
|
||||||
|
====================================================================
|
||||||
|
|
||||||
|
Build RAG (Retrieval-Augmented Generation) with LanceDB, a powerful solution for efficient vector-based information retrieval 📊.
|
||||||
|
|
||||||
|
**Experience the Future of Search 🔄**
|
||||||
|
|
||||||
|
🤖 RAG enables AI to **retrieve** relevant information from external sources and use it to **generate** more accurate and context-specific responses. 💻 LanceDB provides a robust framework for integrating LLMs with external knowledge sources 📝.
|
||||||
|
|
||||||
|
| **RAG** | **Description** | **Links** |
|
||||||
|
|----------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------|
|
||||||
|
| **RAG with Matryoshka Embeddings and LlamaIndex** 🪆🔗 | Utilize **Matryoshka embeddings** and **LlamaIndex** to improve the efficiency and accuracy of your RAG models. 📈✨ | [][matryoshka_github] <br>[][matryoshka_colab] |
|
||||||
|
| **Improve RAG with Re-ranking** 📈🔄 | Enhance your RAG applications by implementing **re-ranking strategies** for more relevant document retrieval. 📚🔍 | [][rag_reranking_github] <br>[][rag_reranking_colab] <br>[][rag_reranking_ghost] |
|
||||||
|
| **Instruct-Multitask** 🧠🎯 | Integrate the **Instruct Embedding Model** with LanceDB to streamline your embedding API, reducing redundant code and overhead. 🌐📊 | [][instruct_multitask_github] <br>[][instruct_multitask_colab] <br>[][instruct_multitask_python] <br>[][instruct_multitask_ghost] |
|
||||||
|
| **Improve RAG with HyDE** 🌌🔍 | Use **Hypothetical Document Embeddings** for efficient, accurate, and unsupervised dense retrieval. 📄🔍 | [][hyde_github] <br>[][hyde_colab]<br>[][hyde_ghost] |
|
||||||
|
| **Improve RAG with LOTR** 🧙♂️📜 | Enhance RAG with **Lord of the Retriever (LOTR)** to address 'Lost in the Middle' challenges, especially in medical data. 🌟📜 | [][lotr_github] <br>[][lotr_colab] <br>[][lotr_ghost] |
|
||||||
|
| **Advanced RAG: Parent Document Retriever** 📑🔗 | Use **Parent Document & Bigger Chunk Retriever** to maintain context and relevance when generating related content. 🎵📄 | [][parent_doc_retriever_github] <br>[][parent_doc_retriever_colab] <br>[][parent_doc_retriever_ghost] |
|
||||||
|
| **Corrective RAG with Langgraph** 🔧📊 | Enhance RAG reliability with **Corrective RAG (CRAG)** by self-reflecting and fact-checking for accurate and trustworthy results. ✅🔍 |[][corrective_rag_github] <br>[][corrective_rag_colab] <br>[][corrective_rag_ghost] |
|
||||||
|
| **Contextual Compression with RAG** 🗜️🧠 | Apply **contextual compression techniques** to condense large documents while retaining essential information. 📄🗜️ | [][compression_rag_github] <br>[][compression_rag_colab] <br>[][compression_rag_ghost] |
|
||||||
|
| **Improve RAG with FLARE** 🔥| Enable users to ask questions directly to **academic papers**, focusing on **ArXiv papers**, with **F**orward-**L**ooking **A**ctive **RE**trieval augmented generation.🚀🌟 | [][flare_github] <br>[][flare_colab] <br>[][flare_ghost] |
|
||||||
|
| **Query Expansion and Reranker** 🔍🔄 | Enhance RAG with query expansion using Large Language Models and advanced **reranking methods** like **Cross Encoders**, **ColBERT v2**, and **FlashRank** for improved document retrieval precision and recall 🔍📈 | [][query_github] <br>[][query_colab] |
|
||||||
|
| **RAG Fusion** ⚡🌐 | Build RAG Fusion, utilize the **RRF algorithm** to rerank documents based on user queries ! Use **LanceDB** as vector database to store and retrieve documents related to queries via **OPENAI Embeddings**⚡🌐 | [][fusion_github] <br>[][fusion_colab] |
|
||||||
|
| **Agentic RAG** 🤖📚 | Build autonomous information retrieval with **Agentic RAG**, a framework of **intelligent agents** that collaborate to synthesize, summarize, and compare data across sources, that enables proactive and informed decision-making 🤖📚 | [][agentic_github] <br>[][agentic_colab] |
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[matryoshka_github]: https://github.com/lancedb/vectordb-recipes/blob/main/tutorials/RAG-with_MatryoshkaEmbed-Llamaindex
|
||||||
|
[matryoshka_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/tutorials/RAG-with_MatryoshkaEmbed-Llamaindex/RAG_with_MatryoshkaEmbedding_and_Llamaindex.ipynb
|
||||||
|
|
||||||
|
[rag_reranking_github]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/RAG_Reranking
|
||||||
|
[rag_reranking_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/RAG_Reranking/main.ipynb
|
||||||
|
[rag_reranking_ghost]: https://blog.lancedb.com/simplest-method-to-improve-rag-pipeline-re-ranking-cf6eaec6d544
|
||||||
|
|
||||||
|
|
||||||
|
[instruct_multitask_github]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/instruct-multitask
|
||||||
|
[instruct_multitask_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/instruct-multitask/main.ipynb
|
||||||
|
[instruct_multitask_python]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/instruct-multitask/main.py
|
||||||
|
[instruct_multitask_ghost]: https://blog.lancedb.com/multitask-embedding-with-lancedb-be18ec397543
|
||||||
|
|
||||||
|
[hyde_github]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/Advance-RAG-with-HyDE
|
||||||
|
[hyde_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/Advance-RAG-with-HyDE/main.ipynb
|
||||||
|
[hyde_ghost]: https://blog.lancedb.com/advanced-rag-precise-zero-shot-dense-retrieval-with-hyde-0946c54dfdcb
|
||||||
|
|
||||||
|
[lotr_github]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/Advance_RAG_LOTR
|
||||||
|
[lotr_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/Advance_RAG_LOTR/main.ipynb
|
||||||
|
[lotr_ghost]: https://blog.lancedb.com/better-rag-with-lotr-lord-of-retriever-23c8336b9a35
|
||||||
|
|
||||||
|
[parent_doc_retriever_github]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/parent_document_retriever
|
||||||
|
[parent_doc_retriever_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/parent_document_retriever/main.ipynb
|
||||||
|
[parent_doc_retriever_ghost]: https://blog.lancedb.com/modified-rag-parent-document-bigger-chunk-retriever-62b3d1e79bc6
|
||||||
|
|
||||||
|
[corrective_rag_github]: https://github.com/lancedb/vectordb-recipes/blob/main/tutorials/Corrective-RAG-with_Langgraph
|
||||||
|
[corrective_rag_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/tutorials/Corrective-RAG-with_Langgraph/CRAG_with_Langgraph.ipynb
|
||||||
|
[corrective_rag_ghost]: https://blog.lancedb.com/implementing-corrective-rag-in-the-easiest-way-2/
|
||||||
|
|
||||||
|
[compression_rag_github]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/Contextual-Compression-with-RAG
|
||||||
|
[compression_rag_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/Contextual-Compression-with-RAG/main.ipynb
|
||||||
|
[compression_rag_ghost]: https://blog.lancedb.com/enhance-rag-integrate-contextual-compression-and-filtering-for-precision-a29d4a810301/
|
||||||
|
|
||||||
|
[flare_github]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/better-rag-FLAIR
|
||||||
|
[flare_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/better-rag-FLAIR/main.ipynb
|
||||||
|
[flare_ghost]: https://blog.lancedb.com/better-rag-with-active-retrieval-augmented-generation-flare-3b66646e2a9f/
|
||||||
|
|
||||||
|
[query_github]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/QueryExpansion&Reranker
|
||||||
|
[query_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/QueryExpansion&Reranker/main.ipynb
|
||||||
|
|
||||||
|
|
||||||
|
[fusion_github]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/RAG_Fusion
|
||||||
|
[fusion_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/RAG_Fusion/main.ipynb
|
||||||
|
|
||||||
|
[agentic_github]: https://github.com/lancedb/vectordb-recipes/blob/main/tutorials/Agentic_RAG
|
||||||
|
[agentic_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/tutorials/Agentic_RAG/main.ipynb
|
||||||
|
|
||||||
|
|
||||||
37
docs/src/examples/python_examples/recommendersystem.md
Normal file
37
docs/src/examples/python_examples/recommendersystem.md
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
**Recommender Systems: Personalized Discovery🍿📺**
|
||||||
|
==============================================================
|
||||||
|
Deliver personalized experiences with Recommender Systems. 🎁
|
||||||
|
|
||||||
|
**Technical Overview📜**
|
||||||
|
|
||||||
|
🔍️ LanceDB's powerful vector database capabilities can efficiently store and query item embeddings. Recommender Systems can utilize it and provide personalized recommendations based on user preferences 🤝 and item features 📊 and therefore enhance the user experience.🗂️
|
||||||
|
|
||||||
|
| **Recommender System** | **Description** | **Links** |
|
||||||
|
| ---------------------- | --------------- | --------- |
|
||||||
|
| **Movie Recommender System🎬** | 🤝 Use **collaborative filtering** to predict user preferences, assuming similar users will like similar movies, and leverage **Singular Value Decomposition** (SVD) from Numpy for precise matrix factorization and accurate recommendations📊 | [][movie_github] <br>[][movie_colab] <br>[][movie_python] |
|
||||||
|
| **🎥 Movie Recommendation with Genres** | 🔍 Creates movie embeddings using **Doc2Vec**, capturing genre and characteristic nuances, and leverages VectorDB for efficient storage and querying, enabling accurate genre classification and personalized movie recommendations through **similarity searches**🎥 | [][genre_github] <br>[][genre_colab] <br>[][genre_ghost] |
|
||||||
|
| **🛍️ Product Recommender using Collaborative Filtering and LanceDB** | 📈 Using **Collaborative Filtering** and **LanceDB** to analyze your past purchases, recommends products based on user's past purchases. Demonstrated with the Instacart dataset in our example🛒 | [][product_github] <br>[][product_colab] <br>[][product_python] |
|
||||||
|
| **🔍 Arxiv Search with OpenCLIP and LanceDB** | 💡 Build a semantic search engine for **Arxiv papers** using **LanceDB**, and benchmarks its performance against traditional keyword-based search on **Nomic's Atlas**, to demonstrate the power of semantic search in finding relevant research papers📚 | [][arxiv_github] <br>[][arxiv_colab] <br>[][arxiv_python] |
|
||||||
|
| **Food Recommendation System🍴** | 🍔 Build a food recommendation system with **LanceDB**, featuring vector-based recommendations, full-text search, hybrid search, and reranking model integration for personalized and accurate food suggestions👌 | [][food_github] <br>[][food_colab] |
|
||||||
|
|
||||||
|
[movie_github]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/movie-recommender
|
||||||
|
[movie_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/movie-recommender/main.ipynb
|
||||||
|
[movie_python]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/movie-recommender/main.py
|
||||||
|
|
||||||
|
|
||||||
|
[genre_github]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/movie-recommendation-with-genres
|
||||||
|
[genre_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/movie-recommendation-with-genres/movie_recommendation_with_doc2vec_and_lancedb.ipynb
|
||||||
|
[genre_ghost]: https://blog.lancedb.com/movie-recommendation-system-using-lancedb-and-doc2vec/
|
||||||
|
|
||||||
|
[product_github]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/product-recommender
|
||||||
|
[product_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/product-recommender/main.ipynb
|
||||||
|
[product_python]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/product-recommender/main.py
|
||||||
|
|
||||||
|
|
||||||
|
[arxiv_github]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/arxiv-recommender
|
||||||
|
[arxiv_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/arxiv-recommender/main.ipynb
|
||||||
|
[arxiv_python]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/arxiv-recommender/main.py
|
||||||
|
|
||||||
|
|
||||||
|
[food_github]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/Food_recommendation
|
||||||
|
[food_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/Food_recommendation/main.ipynb
|
||||||
80
docs/src/examples/python_examples/vector_search.md
Normal file
80
docs/src/examples/python_examples/vector_search.md
Normal file
@@ -0,0 +1,80 @@
|
|||||||
|
**Vector Search: Efficient Retrieval 🔓👀**
|
||||||
|
====================================================================
|
||||||
|
|
||||||
|
Vector search with LanceDB, is a solution for efficient and accurate similarity searches in large datasets 📊.
|
||||||
|
|
||||||
|
**Vector Search Capabilities in LanceDB🔝**
|
||||||
|
|
||||||
|
LanceDB implements vector search algorithms for efficient document retrieval and analysis 📊. This enables fast and accurate discovery of relevant documents, leveraging dense vector representations 🤖. The platform supports scalable indexing and querying of high-dimensional vector spaces, facilitating precise document matching and retrieval 📈.
|
||||||
|
|
||||||
|
| **Vector Search** | **Description** | **Links** |
|
||||||
|
|:-----------------|:---------------|:---------|
|
||||||
|
| **Inbuilt Hybrid Search 🔄** | Perform hybrid search in **LanceDB** by combining the results of semantic and full-text search via a reranking algorithm of your choice 📊 | [][inbuilt_hybrid_search_github] <br>[][inbuilt_hybrid_search_colab] |
|
||||||
|
| **Hybrid Search with BM25 and LanceDB 💡** | Use **Synergizes BM25's** keyword-focused precision (term frequency, document length normalization, bias-free retrieval) with **LanceDB's** semantic understanding (contextual analysis, query intent alignment) for nuanced search results in complex datasets 📈 | [][BM25_github] <br>[][BM25_colab] <br>[][BM25_ghost] |
|
||||||
|
| **NER-powered Semantic Search 🔎** | Extract and identify essential information from text with Named Entity Recognition **(NER)** methods: Dictionary-Based, Rule-Based, and Deep Learning-Based, to accurately extract and categorize entities, enabling precise semantic search results 🗂️ | [][NER_github] <br>[][NER_colab] <br>[][NER_ghost]|
|
||||||
|
| **Audio Similarity Search using Vector Embeddings 🎵** | Create vector **embeddings of audio files** to find similar audio content, enabling efficient audio similarity search and retrieval in **LanceDB's** vector store 📻 |[][audio_search_github] <br>[][audio_search_colab] <br>[][audio_search_python]|
|
||||||
|
| **LanceDB Embeddings API: Multi-lingual Semantic Search 🌎** | Build a universal semantic search table with **LanceDB's Embeddings API**, supporting multiple languages (e.g., English, French) using **cohere's** multi-lingual model, for accurate cross-lingual search results 📄 | [][mls_github] <br>[][mls_colab] <br>[][mls_python] |
|
||||||
|
| **Facial Recognition: Face Embeddings 🤖** | Detect, crop, and embed faces using Facenet, then store and query face embeddings in **LanceDB** for efficient facial recognition and top-K matching results 👥 | [][fr_github] <br>[][fr_colab] |
|
||||||
|
| **Sentiment Analysis: Hotel Reviews 🏨** | Analyze customer sentiments towards the hotel industry using **BERT models**, storing sentiment labels, scores, and embeddings in **LanceDB**, enabling queries on customer opinions and potential areas for improvement 💬 | [][sentiment_analysis_github] <br>[][sentiment_analysis_colab] <br>[][sentiment_analysis_ghost] |
|
||||||
|
| **Vector Arithmetic with LanceDB ⚖️** | Perform **vector arithmetic** on embeddings, enabling complex relationships and nuances in data to be captured, and simplifying the process of retrieving semantically similar results 📊 | [][arithmetic_github] <br>[][arithmetic_colab] <br>[][arithmetic_ghost] |
|
||||||
|
| **Imagebind Demo 🖼️** | Explore the multi-modal capabilities of **Imagebind** through a Gradio app, use **LanceDB API** for seamless image search and retrieval experiences 📸 | [][imagebind_github] <br> [][imagebind_huggingface] |
|
||||||
|
| **Search Engine using SAM & CLIP 🔍** | Build a search engine within an image using **SAM** and **CLIP** models, enabling object-level search and retrieval, with LanceDB indexing and search capabilities to find the closest match between image embeddings and user queries 📸 | [][swi_github] <br>[][swi_colab] <br>[][swi_ghost] |
|
||||||
|
| **Zero Shot Object Localization and Detection with CLIP 🔎** | Perform object detection on images using **OpenAI's CLIP**, enabling zero-shot localization and detection of objects, with capabilities to split images into patches, parse with CLIP, and plot bounding boxes 📊 | [][zsod_github] <br>[][zsod_colab] |
|
||||||
|
| **Accelerate Vector Search with OpenVINO 🚀** | Boost vector search applications using **OpenVINO**, achieving significant speedups with **CLIP** for text-to-image and image-to-image searching, through PyTorch model optimization, FP16 and INT8 format conversion, and quantization with **OpenVINO NNCF** 📈 | [][openvino_github] <br>[][openvino_colab] <br>[][openvino_ghost] |
|
||||||
|
| **Zero-Shot Image Classification with CLIP and LanceDB 📸** | Achieve zero-shot image classification using **CLIP** and **LanceDB**, enabling models to classify images without prior training on specific use cases, unlocking flexible and adaptable image classification capabilities 🔓 | [][zsic_github] <br>[][zsic_colab] <br>[][zsic_ghost] |
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[inbuilt_hybrid_search_github]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/Inbuilt-Hybrid-Search
|
||||||
|
[inbuilt_hybrid_search_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/Inbuilt-Hybrid-Search/Inbuilt_Hybrid_Search_with_LanceDB.ipynb
|
||||||
|
|
||||||
|
[BM25_github]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/Hybrid_search_bm25_lancedb
|
||||||
|
[BM25_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/Hybrid_search_bm25_lancedb/main.ipynb
|
||||||
|
[BM25_ghost]: https://blog.lancedb.com/hybrid-search-combining-bm25-and-semantic-search-for-better-results-with-lan-1358038fe7e6
|
||||||
|
|
||||||
|
[NER_github]: https://github.com/lancedb/vectordb-recipes/blob/main/tutorials/NER-powered-Semantic-Search
|
||||||
|
[NER_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/tutorials/NER-powered-Semantic-Search/NER_powered_Semantic_Search_with_LanceDB.ipynb
|
||||||
|
[NER_ghost]: https://blog.lancedb.com/ner-powered-semantic-search-using-lancedb-51051dc3e493
|
||||||
|
|
||||||
|
[audio_search_github]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/audio_search
|
||||||
|
[audio_search_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/audio_search/main.ipynb
|
||||||
|
[audio_search_python]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/audio_search/main.py
|
||||||
|
|
||||||
|
[mls_github]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/multi-lingual-wiki-qa
|
||||||
|
[mls_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/multi-lingual-wiki-qa/main.ipynb
|
||||||
|
[mls_python]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/multi-lingual-wiki-qa/main.py
|
||||||
|
|
||||||
|
[fr_github]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/facial_recognition
|
||||||
|
[fr_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/facial_recognition/main.ipynb
|
||||||
|
|
||||||
|
[sentiment_analysis_github]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/Sentiment-Analysis-Analyse-Hotel-Reviews
|
||||||
|
[sentiment_analysis_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/Sentiment-Analysis-Analyse-Hotel-Reviews/Sentiment_Analysis_using_LanceDB.ipynb
|
||||||
|
[sentiment_analysis_ghost]: https://blog.lancedb.com/sentiment-analysis-using-lancedb-2da3cb1e3fa6
|
||||||
|
|
||||||
|
[arithmetic_github]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/Vector-Arithmetic-with-LanceDB
|
||||||
|
[arithmetic_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/Vector-Arithmetic-with-LanceDB/main.ipynb
|
||||||
|
[arithmetic_ghost]: https://blog.lancedb.com/vector-arithmetic-with-lancedb-an-intro-to-vector-embeddings/
|
||||||
|
|
||||||
|
[imagebind_github]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/imagebind_demo
|
||||||
|
[imagebind_huggingface]: https://huggingface.co/spaces/raghavd99/imagebind2
|
||||||
|
|
||||||
|
[swi_github]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/search-within-images-with-sam-and-clip
|
||||||
|
[swi_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/search-within-images-with-sam-and-clip/main.ipynb
|
||||||
|
[swi_ghost]: https://blog.lancedb.com/search-within-an-image-331b54e4285e
|
||||||
|
|
||||||
|
[zsod_github]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/zero-shot-object-detection-CLIP
|
||||||
|
[zsod_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/zero-shot-object-detection-CLIP/zero_shot_object_detection_clip.ipynb
|
||||||
|
|
||||||
|
[openvino_github]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/Accelerate-Vector-Search-Applications-Using-OpenVINO
|
||||||
|
[openvino_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/Accelerate-Vector-Search-Applications-Using-OpenVINO/clip_text_image_search.ipynb
|
||||||
|
[openvino_ghost]: https://blog.lancedb.com/accelerate-vector-search-applications-using-openvino-lancedb/
|
||||||
|
|
||||||
|
[zsic_github]: https://github.com/lancedb/vectordb-recipes/blob/main/examples/zero-shot-image-classification
|
||||||
|
[zsic_colab]: https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/zero-shot-image-classification/main.ipynb
|
||||||
|
[zsic_ghost]: https://blog.lancedb.com/zero-shot-image-classification-with-vector-search/
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -25,8 +25,8 @@ s3://eto-public/datasets/sift/vec_data.lance
|
|||||||
Then, we can write a quick Python script to populate our LanceDB Table:
|
Then, we can write a quick Python script to populate our LanceDB Table:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import pylance
|
import lance
|
||||||
sift_dataset = pylance.dataset("/path/to/local/vec_data.lance")
|
sift_dataset = lance.dataset("/path/to/local/vec_data.lance")
|
||||||
df = sift_dataset.to_table().to_pandas()
|
df = sift_dataset.to_table().to_pandas()
|
||||||
|
|
||||||
import lancedb
|
import lancedb
|
||||||
|
|||||||
183
docs/src/fts.md
183
docs/src/fts.md
@@ -1,9 +1,14 @@
|
|||||||
# Full-text search
|
# Full-text search
|
||||||
|
|
||||||
LanceDB provides support for full-text search via [Tantivy](https://github.com/quickwit-oss/tantivy) (currently Python only), allowing you to incorporate keyword-based search (based on BM25) in your retrieval solutions. Our goal is to push the FTS integration down to the Rust level in the future, so that it's available for Rust and JavaScript users as well. Follow along at [this Github issue](https://github.com/lancedb/lance/issues/1195)
|
LanceDB provides support for full-text search via Lance (before via [Tantivy](https://github.com/quickwit-oss/tantivy) (Python only)), allowing you to incorporate keyword-based search (based on BM25) in your retrieval solutions.
|
||||||
|
|
||||||
|
Currently, the Lance full text search is missing some features that are in the Tantivy full text search. This includes phrase queries, re-ranking, and customizing the tokenizer. Thus, in Python, Tantivy is still the default way to do full text search and many of the instructions below apply just to Tantivy-based indices.
|
||||||
|
|
||||||
|
|
||||||
## Installation
|
## Installation (Only for Tantivy-based FTS)
|
||||||
|
|
||||||
|
!!! note
|
||||||
|
No need to install the tantivy dependency if using native FTS
|
||||||
|
|
||||||
To use full-text search, install the dependency [`tantivy-py`](https://github.com/quickwit-oss/tantivy-py):
|
To use full-text search, install the dependency [`tantivy-py`](https://github.com/quickwit-oss/tantivy-py):
|
||||||
|
|
||||||
@@ -14,42 +19,83 @@ pip install tantivy==0.20.1
|
|||||||
|
|
||||||
## Example
|
## Example
|
||||||
|
|
||||||
Consider that we have a LanceDB table named `my_table`, whose string column `text` we want to index and query via keyword search.
|
Consider that we have a LanceDB table named `my_table`, whose string column `text` we want to index and query via keyword search, the FTS index must be created before you can search via keywords.
|
||||||
|
|
||||||
```python
|
=== "Python"
|
||||||
import lancedb
|
|
||||||
|
|
||||||
uri = "data/sample-lancedb"
|
```python
|
||||||
db = lancedb.connect(uri)
|
import lancedb
|
||||||
|
|
||||||
table = db.create_table(
|
uri = "data/sample-lancedb"
|
||||||
"my_table",
|
db = lancedb.connect(uri)
|
||||||
data=[
|
|
||||||
{"vector": [3.1, 4.1], "text": "Frodo was a happy puppy"},
|
|
||||||
{"vector": [5.9, 26.5], "text": "There are several kittens playing"},
|
|
||||||
],
|
|
||||||
)
|
|
||||||
```
|
|
||||||
|
|
||||||
## Create FTS index on single column
|
table = db.create_table(
|
||||||
|
"my_table",
|
||||||
|
data=[
|
||||||
|
{"vector": [3.1, 4.1], "text": "Frodo was a happy puppy"},
|
||||||
|
{"vector": [5.9, 26.5], "text": "There are several kittens playing"},
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
The FTS index must be created before you can search via keywords.
|
# passing `use_tantivy=False` to use lance FTS index
|
||||||
|
# `use_tantivy=True` by default
|
||||||
|
table.create_fts_index("text")
|
||||||
|
table.search("puppy").limit(10).select(["text"]).to_list()
|
||||||
|
# [{'text': 'Frodo was a happy puppy', '_score': 0.6931471824645996}]
|
||||||
|
# ...
|
||||||
|
```
|
||||||
|
|
||||||
```python
|
=== "TypeScript"
|
||||||
table.create_fts_index("text")
|
|
||||||
```
|
|
||||||
|
|
||||||
To search an FTS index via keywords, LanceDB's `table.search` accepts a string as input:
|
```typescript
|
||||||
|
import * as lancedb from "@lancedb/lancedb";
|
||||||
|
const uri = "data/sample-lancedb"
|
||||||
|
const db = await lancedb.connect(uri);
|
||||||
|
|
||||||
```python
|
const data = [
|
||||||
table.search("puppy").limit(10).select(["text"]).to_list()
|
{ vector: [3.1, 4.1], text: "Frodo was a happy puppy" },
|
||||||
```
|
{ vector: [5.9, 26.5], text: "There are several kittens playing" },
|
||||||
|
];
|
||||||
|
const tbl = await db.createTable("my_table", data, { mode: "overwrite" });
|
||||||
|
await tbl.createIndex("text", {
|
||||||
|
config: lancedb.Index.fts(),
|
||||||
|
});
|
||||||
|
|
||||||
This returns the result as a list of dictionaries as follows.
|
await tbl
|
||||||
|
.search("puppy", queryType="fts")
|
||||||
|
.select(["text"])
|
||||||
|
.limit(10)
|
||||||
|
.toArray();
|
||||||
|
```
|
||||||
|
|
||||||
```python
|
=== "Rust"
|
||||||
[{'text': 'Frodo was a happy puppy', 'score': 0.6931471824645996}]
|
|
||||||
```
|
```rust
|
||||||
|
let uri = "data/sample-lancedb";
|
||||||
|
let db = connect(uri).execute().await?;
|
||||||
|
let initial_data: Box<dyn RecordBatchReader + Send> = create_some_records()?;
|
||||||
|
let tbl = db
|
||||||
|
.create_table("my_table", initial_data)
|
||||||
|
.execute()
|
||||||
|
.await?;
|
||||||
|
tbl
|
||||||
|
.create_index(&["text"], Index::FTS(FtsIndexBuilder::default()))
|
||||||
|
.execute()
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
tbl
|
||||||
|
.query()
|
||||||
|
.full_text_search(FullTextSearchQuery::new("puppy".to_owned()))
|
||||||
|
.select(lancedb::query::Select::Columns(vec!["text".to_owned()]))
|
||||||
|
.limit(10)
|
||||||
|
.execute()
|
||||||
|
.await?;
|
||||||
|
```
|
||||||
|
|
||||||
|
It would search on all indexed columns by default, so it's useful when there are multiple indexed columns.
|
||||||
|
For now, this is supported in tantivy way only.
|
||||||
|
|
||||||
|
Passing `fts_columns="text"` if you want to specify the columns to search, but it's not available for Tantivy-based full text search.
|
||||||
|
|
||||||
!!! note
|
!!! note
|
||||||
LanceDB automatically searches on the existing FTS index if the input to the search is of type `str`. If you provide a vector as input, LanceDB will search the ANN index instead.
|
LanceDB automatically searches on the existing FTS index if the input to the search is of type `str`. If you provide a vector as input, LanceDB will search the ANN index instead.
|
||||||
@@ -57,20 +103,33 @@ This returns the result as a list of dictionaries as follows.
|
|||||||
## Tokenization
|
## Tokenization
|
||||||
By default the text is tokenized by splitting on punctuation and whitespaces and then removing tokens that are longer than 40 chars. For more language specific tokenization then provide the argument tokenizer_name with the 2 letter language code followed by "_stem". So for english it would be "en_stem".
|
By default the text is tokenized by splitting on punctuation and whitespaces and then removing tokens that are longer than 40 chars. For more language specific tokenization then provide the argument tokenizer_name with the 2 letter language code followed by "_stem". So for english it would be "en_stem".
|
||||||
|
|
||||||
```python
|
For now, only the Tantivy-based FTS index supports to specify the tokenizer, so it's only available in Python with `use_tantivy=True`.
|
||||||
table.create_fts_index("text", tokenizer_name="en_stem")
|
|
||||||
```
|
|
||||||
|
|
||||||
The following [languages](https://docs.rs/tantivy/latest/tantivy/tokenizer/enum.Language.html) are currently supported.
|
=== "use_tantivy=True"
|
||||||
|
|
||||||
|
```python
|
||||||
|
table.create_fts_index("text", use_tantivy=True, tokenizer_name="en_stem")
|
||||||
|
```
|
||||||
|
|
||||||
|
=== "use_tantivy=False"
|
||||||
|
|
||||||
|
[**Not supported yet**](https://github.com/lancedb/lance/issues/1195)
|
||||||
|
|
||||||
|
the following [languages](https://docs.rs/tantivy/latest/tantivy/tokenizer/enum.Language.html) are currently supported.
|
||||||
|
|
||||||
## Index multiple columns
|
## Index multiple columns
|
||||||
|
|
||||||
If you have multiple string columns to index, there's no need to combine them manually -- simply pass them all as a list to `create_fts_index`:
|
If you have multiple string columns to index, there's no need to combine them manually -- simply pass them all as a list to `create_fts_index`:
|
||||||
|
|
||||||
```python
|
=== "use_tantivy=True"
|
||||||
table.create_fts_index(["text1", "text2"])
|
|
||||||
```
|
```python
|
||||||
|
table.create_fts_index(["text1", "text2"])
|
||||||
|
```
|
||||||
|
|
||||||
|
=== "use_tantivy=False"
|
||||||
|
|
||||||
|
[**Not supported yet**](https://github.com/lancedb/lance/issues/1195)
|
||||||
|
|
||||||
Note that the search API call does not change - you can search over all indexed columns at once.
|
Note that the search API call does not change - you can search over all indexed columns at once.
|
||||||
|
|
||||||
@@ -80,19 +139,48 @@ Currently the LanceDB full text search feature supports *post-filtering*, meanin
|
|||||||
applied on top of the full text search results. This can be invoked via the familiar
|
applied on top of the full text search results. This can be invoked via the familiar
|
||||||
`where` syntax:
|
`where` syntax:
|
||||||
|
|
||||||
```python
|
=== "Python"
|
||||||
table.search("puppy").limit(10).where("meta='foo'").to_list()
|
|
||||||
```
|
```python
|
||||||
|
table.search("puppy").limit(10).where("meta='foo'").to_list()
|
||||||
|
```
|
||||||
|
|
||||||
|
=== "TypeScript"
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
await tbl
|
||||||
|
.search("apple")
|
||||||
|
.select(["id", "doc"])
|
||||||
|
.limit(10)
|
||||||
|
.where("meta='foo'")
|
||||||
|
.toArray();
|
||||||
|
```
|
||||||
|
|
||||||
|
=== "Rust"
|
||||||
|
|
||||||
|
```rust
|
||||||
|
table
|
||||||
|
.query()
|
||||||
|
.full_text_search(FullTextSearchQuery::new(words[0].to_owned()))
|
||||||
|
.select(lancedb::query::Select::Columns(vec!["doc".to_owned()]))
|
||||||
|
.limit(10)
|
||||||
|
.only_if("meta='foo'")
|
||||||
|
.execute()
|
||||||
|
.await?;
|
||||||
|
```
|
||||||
|
|
||||||
## Sorting
|
## Sorting
|
||||||
|
|
||||||
|
!!! warning "Warn"
|
||||||
|
Sorting is available for only Tantivy-based FTS
|
||||||
|
|
||||||
You can pre-sort the documents by specifying `ordering_field_names` when
|
You can pre-sort the documents by specifying `ordering_field_names` when
|
||||||
creating the full-text search index. Once pre-sorted, you can then specify
|
creating the full-text search index. Once pre-sorted, you can then specify
|
||||||
`ordering_field_name` while searching to return results sorted by the given
|
`ordering_field_name` while searching to return results sorted by the given
|
||||||
field. For example,
|
field. For example,
|
||||||
|
|
||||||
```
|
```python
|
||||||
table.create_fts_index(["text_field"], ordering_field_names=["sort_by_field"])
|
table.create_fts_index(["text_field"], use_tantivy=True, ordering_field_names=["sort_by_field"])
|
||||||
|
|
||||||
(table.search("terms", ordering_field_name="sort_by_field")
|
(table.search("terms", ordering_field_name="sort_by_field")
|
||||||
.limit(20)
|
.limit(20)
|
||||||
@@ -105,8 +193,8 @@ table.create_fts_index(["text_field"], ordering_field_names=["sort_by_field"])
|
|||||||
error will be raised that looks like `ValueError: The field does not exist: xxx`
|
error will be raised that looks like `ValueError: The field does not exist: xxx`
|
||||||
|
|
||||||
!!! note
|
!!! note
|
||||||
The fields to sort on must be of typed unsigned integer, or else you will see
|
The fields to sort on must be of typed unsigned integer, or else you will see
|
||||||
an error during indexing that looks like
|
an error during indexing that looks like
|
||||||
`TypeError: argument 'value': 'float' object cannot be interpreted as an integer`.
|
`TypeError: argument 'value': 'float' object cannot be interpreted as an integer`.
|
||||||
|
|
||||||
!!! note
|
!!! note
|
||||||
@@ -116,6 +204,9 @@ table.create_fts_index(["text_field"], ordering_field_names=["sort_by_field"])
|
|||||||
|
|
||||||
## Phrase queries vs. terms queries
|
## Phrase queries vs. terms queries
|
||||||
|
|
||||||
|
!!! warning "Warn"
|
||||||
|
Lance-based FTS doesn't support queries combining by boolean operators `OR`, `AND`.
|
||||||
|
|
||||||
For full-text search you can specify either a **phrase** query like `"the old man and the sea"`,
|
For full-text search you can specify either a **phrase** query like `"the old man and the sea"`,
|
||||||
or a **terms** search query like `"(Old AND Man) AND Sea"`. For more details on the terms
|
or a **terms** search query like `"(Old AND Man) AND Sea"`. For more details on the terms
|
||||||
query syntax, see Tantivy's [query parser rules](https://docs.rs/tantivy/latest/tantivy/query/struct.QueryParser.html).
|
query syntax, see Tantivy's [query parser rules](https://docs.rs/tantivy/latest/tantivy/query/struct.QueryParser.html).
|
||||||
@@ -142,7 +233,7 @@ enforce it in one of two ways:
|
|||||||
|
|
||||||
1. Place the double-quoted query inside single quotes. For example, `table.search('"they could have been dogs OR cats"')` is treated as
|
1. Place the double-quoted query inside single quotes. For example, `table.search('"they could have been dogs OR cats"')` is treated as
|
||||||
a phrase query.
|
a phrase query.
|
||||||
2. Explicitly declare the `phrase_query()` method. This is useful when you have a phrase query that
|
1. Explicitly declare the `phrase_query()` method. This is useful when you have a phrase query that
|
||||||
itself contains double quotes. For example, `table.search('the cats OR dogs were not really "pets" at all').phrase_query()`
|
itself contains double quotes. For example, `table.search('the cats OR dogs were not really "pets" at all').phrase_query()`
|
||||||
is treated as a phrase query.
|
is treated as a phrase query.
|
||||||
|
|
||||||
@@ -150,7 +241,7 @@ In general, a query that's declared as a phrase query will be wrapped in double
|
|||||||
double quotes replaced by single quotes.
|
double quotes replaced by single quotes.
|
||||||
|
|
||||||
|
|
||||||
## Configurations
|
## Configurations (Only for Tantivy-based FTS)
|
||||||
|
|
||||||
By default, LanceDB configures a 1GB heap size limit for creating the index. You can
|
By default, LanceDB configures a 1GB heap size limit for creating the index. You can
|
||||||
reduce this if running on a smaller node, or increase this for faster performance while
|
reduce this if running on a smaller node, or increase this for faster performance while
|
||||||
@@ -164,6 +255,8 @@ table.create_fts_index(["text1", "text2"], writer_heap_size=heap, replace=True)
|
|||||||
|
|
||||||
## Current limitations
|
## Current limitations
|
||||||
|
|
||||||
|
For that Tantivy-based FTS:
|
||||||
|
|
||||||
1. Currently we do not yet support incremental writes.
|
1. Currently we do not yet support incremental writes.
|
||||||
If you add data after FTS index creation, it won't be reflected
|
If you add data after FTS index creation, it won't be reflected
|
||||||
in search results until you do a full reindex.
|
in search results until you do a full reindex.
|
||||||
|
|||||||
108
docs/src/guides/scalar_index.md
Normal file
108
docs/src/guides/scalar_index.md
Normal file
@@ -0,0 +1,108 @@
|
|||||||
|
# Building Scalar Index
|
||||||
|
|
||||||
|
Similar to many SQL databases, LanceDB supports several types of Scalar indices to accelerate search
|
||||||
|
over scalar columns.
|
||||||
|
|
||||||
|
- `BTREE`: The most common type is BTREE. This index is inspired by the btree data structure
|
||||||
|
although only the first few layers of the btree are cached in memory.
|
||||||
|
It will perform well on columns with a large number of unique values and few rows per value.
|
||||||
|
- `BITMAP`: this index stores a bitmap for each unique value in the column.
|
||||||
|
This index is useful for columns with a finite number of unique values and many rows per value.
|
||||||
|
For example, columns that represent "categories", "labels", or "tags"
|
||||||
|
- `LABEL_LIST`: a special index that is used to index list columns whose values have a finite set of possibilities.
|
||||||
|
For example, a column that contains lists of tags (e.g. `["tag1", "tag2", "tag3"]`) can be indexed with a `LABEL_LIST` index.
|
||||||
|
|
||||||
|
| Data Type | Filter | Index Type |
|
||||||
|
| --------------------------------------------------------------- | ----------------------------------------- | ------------ |
|
||||||
|
| Numeric, String, Temporal | `<`, `=`, `>`, `in`, `between`, `is null` | `BTREE` |
|
||||||
|
| Boolean, numbers or strings with fewer than 1,000 unique values | `<`, `=`, `>`, `in`, `between`, `is null` | `BITMAP` |
|
||||||
|
| List of low cardinality of numbers or strings | `array_has_any`, `array_has_all` | `LABEL_LIST` |
|
||||||
|
|
||||||
|
=== "Python"
|
||||||
|
|
||||||
|
```python
|
||||||
|
import lancedb
|
||||||
|
books = [
|
||||||
|
{"book_id": 1, "publisher": "plenty of books", "tags": ["fantasy", "adventure"]},
|
||||||
|
{"book_id": 2, "publisher": "book town", "tags": ["non-fiction"]},
|
||||||
|
{"book_id": 3, "publisher": "oreilly", "tags": ["textbook"]}
|
||||||
|
]
|
||||||
|
|
||||||
|
db = lancedb.connect("./db")
|
||||||
|
table = db.create_table("books", books)
|
||||||
|
table.create_scalar_index("book_id") # BTree by default
|
||||||
|
table.create_scalar_index("publisher", index_type="BITMAP")
|
||||||
|
```
|
||||||
|
|
||||||
|
=== "Typescript"
|
||||||
|
|
||||||
|
=== "@lancedb/lancedb"
|
||||||
|
|
||||||
|
```js
|
||||||
|
const db = await lancedb.connect("data");
|
||||||
|
const tbl = await db.openTable("my_vectors");
|
||||||
|
|
||||||
|
await tbl.create_index("book_id");
|
||||||
|
await tlb.create_index("publisher", { config: lancedb.Index.bitmap() })
|
||||||
|
```
|
||||||
|
|
||||||
|
For example, the following scan will be faster if the column `my_col` has a scalar index:
|
||||||
|
|
||||||
|
=== "Python"
|
||||||
|
|
||||||
|
```python
|
||||||
|
import lancedb
|
||||||
|
|
||||||
|
table = db.open_table("books")
|
||||||
|
my_df = table.search().where("book_id = 2").to_pandas()
|
||||||
|
```
|
||||||
|
|
||||||
|
=== "Typescript"
|
||||||
|
|
||||||
|
=== "@lancedb/lancedb"
|
||||||
|
|
||||||
|
```js
|
||||||
|
const db = await lancedb.connect("data");
|
||||||
|
const tbl = await db.openTable("books");
|
||||||
|
|
||||||
|
await tbl
|
||||||
|
.query()
|
||||||
|
.where("book_id = 2")
|
||||||
|
.limit(10)
|
||||||
|
.toArray();
|
||||||
|
```
|
||||||
|
|
||||||
|
Scalar indices can also speed up scans containing a vector search or full text search, and a prefilter:
|
||||||
|
|
||||||
|
=== "Python"
|
||||||
|
|
||||||
|
```python
|
||||||
|
import lancedb
|
||||||
|
|
||||||
|
data = [
|
||||||
|
{"book_id": 1, "vector": [1, 2]},
|
||||||
|
{"book_id": 2, "vector": [3, 4]},
|
||||||
|
{"book_id": 3, "vector": [5, 6]}
|
||||||
|
]
|
||||||
|
table = db.create_table("book_with_embeddings", data)
|
||||||
|
|
||||||
|
(
|
||||||
|
table.search([1, 2])
|
||||||
|
.where("book_id != 3", prefilter=True)
|
||||||
|
.to_pandas()
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
=== "Typescript"
|
||||||
|
|
||||||
|
=== "@lancedb/lancedb"
|
||||||
|
|
||||||
|
```js
|
||||||
|
const db = await lancedb.connect("data/lance");
|
||||||
|
const tbl = await db.openTable("book_with_embeddings");
|
||||||
|
|
||||||
|
await tbl.search(Array(1536).fill(1.2))
|
||||||
|
.where("book_id != 3") // prefilter is default behavior.
|
||||||
|
.limit(10)
|
||||||
|
.toArray();
|
||||||
|
```
|
||||||
@@ -416,7 +416,6 @@ You can create an empty table for scenarios where you want to add data to the ta
|
|||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
```python
|
|
||||||
|
|
||||||
An empty table can be initialized via a PyArrow schema.
|
An empty table can be initialized via a PyArrow schema.
|
||||||
|
|
||||||
|
|||||||
@@ -43,200 +43,32 @@ table.create_fts_index("text")
|
|||||||
# hybrid search with default re-ranker
|
# hybrid search with default re-ranker
|
||||||
results = table.search("flower moon", query_type="hybrid").to_pandas()
|
results = table.search("flower moon", query_type="hybrid").to_pandas()
|
||||||
```
|
```
|
||||||
|
!!! Note
|
||||||
|
You can also pass the vector and text query manually. This is useful if you're not using the embedding API or if you're using a separate embedder service.
|
||||||
|
### Explicitly passing the vector and text query
|
||||||
|
```python
|
||||||
|
vector_query = [0.1, 0.2, 0.3, 0.4, 0.5]
|
||||||
|
text_query = "flower moon"
|
||||||
|
results = table.search(query_type="hybrid")
|
||||||
|
.vector(vector_query)
|
||||||
|
.text(text_query)
|
||||||
|
.limit(5)
|
||||||
|
.to_pandas()
|
||||||
|
|
||||||
By default, LanceDB uses `LinearCombinationReranker(weight=0.7)` to combine and rerank the results of semantic and full-text search. You can customize the hyperparameters as needed or write your own custom reranker. Here's how you can use any of the available rerankers:
|
```
|
||||||
|
|
||||||
|
By default, LanceDB uses `RRFReranker()`, which uses reciprocal rank fusion score, to combine and rerank the results of semantic and full-text search. You can customize the hyperparameters as needed or write your own custom reranker. Here's how you can use any of the available rerankers:
|
||||||
|
|
||||||
|
|
||||||
### `rerank()` arguments
|
### `rerank()` arguments
|
||||||
* `normalize`: `str`, default `"score"`:
|
* `normalize`: `str`, default `"score"`:
|
||||||
The method to normalize the scores. Can be "rank" or "score". If "rank", the scores are converted to ranks and then normalized. If "score", the scores are normalized directly.
|
The method to normalize the scores. Can be "rank" or "score". If "rank", the scores are converted to ranks and then normalized. If "score", the scores are normalized directly.
|
||||||
* `reranker`: `Reranker`, default `LinearCombinationReranker(weight=0.7)`.
|
* `reranker`: `Reranker`, default `RRF()`.
|
||||||
The reranker to use. If not specified, the default reranker is used.
|
The reranker to use. If not specified, the default reranker is used.
|
||||||
|
|
||||||
|
|
||||||
## Available Rerankers
|
## Available Rerankers
|
||||||
LanceDB provides a number of re-rankers out of the box. You can use any of these re-rankers by passing them to the `rerank()` method. Here's a list of available re-rankers:
|
LanceDB provides a number of re-rankers out of the box. You can use any of these re-rankers by passing them to the `rerank()` method.
|
||||||
|
Go to [Rerankers](../reranking/index.md) to learn more about using the available rerankers and implementing custom rerankers.
|
||||||
### Linear Combination Reranker
|
|
||||||
This is the default re-ranker used by LanceDB. It combines the results of semantic and full-text search using a linear combination of the scores. The weights for the linear combination can be specified. It defaults to 0.7, i.e, 70% weight for semantic search and 30% weight for full-text search.
|
|
||||||
|
|
||||||
|
|
||||||
```python
|
|
||||||
from lancedb.rerankers import LinearCombinationReranker
|
|
||||||
|
|
||||||
reranker = LinearCombinationReranker(weight=0.3) # Use 0.3 as the weight for vector search
|
|
||||||
|
|
||||||
results = table.search("rebel", query_type="hybrid").rerank(reranker=reranker).to_pandas()
|
|
||||||
```
|
|
||||||
|
|
||||||
### Arguments
|
|
||||||
----------------
|
|
||||||
* `weight`: `float`, default `0.7`:
|
|
||||||
The weight to use for the semantic search score. The weight for the full-text search score is `1 - weights`.
|
|
||||||
* `fill`: `float`, default `1.0`:
|
|
||||||
The score to give to results that are only in one of the two result sets.This is treated as penalty, so a higher value means a lower score.
|
|
||||||
TODO: We should just hardcode this-- its pretty confusing as we invert scores to calculate final score
|
|
||||||
* `return_score` : str, default `"relevance"`
|
|
||||||
options are "relevance" or "all"
|
|
||||||
The type of score to return. If "relevance", will return only the `_relevance_score. If "all", will return all scores from the vector and FTS search along with the relevance score.
|
|
||||||
|
|
||||||
### Cohere Reranker
|
|
||||||
This re-ranker uses the [Cohere](https://cohere.ai/) API to combine the results of semantic and full-text search. You can use this re-ranker by passing `CohereReranker()` to the `rerank()` method. Note that you'll need to set the `COHERE_API_KEY` environment variable to use this re-ranker.
|
|
||||||
|
|
||||||
```python
|
|
||||||
from lancedb.rerankers import CohereReranker
|
|
||||||
|
|
||||||
reranker = CohereReranker()
|
|
||||||
|
|
||||||
results = table.search("vampire weekend", query_type="hybrid").rerank(reranker=reranker).to_pandas()
|
|
||||||
```
|
|
||||||
|
|
||||||
### Arguments
|
|
||||||
----------------
|
|
||||||
* `model_name` : str, default `"rerank-english-v2.0"`
|
|
||||||
The name of the cross encoder model to use. Available cohere models are:
|
|
||||||
- rerank-english-v2.0
|
|
||||||
- rerank-multilingual-v2.0
|
|
||||||
* `column` : str, default `"text"`
|
|
||||||
The name of the column to use as input to the cross encoder model.
|
|
||||||
* `top_n` : str, default `None`
|
|
||||||
The number of results to return. If None, will return all results.
|
|
||||||
|
|
||||||
!!! Note
|
|
||||||
Only returns `_relevance_score`. Does not support `return_score = "all"`.
|
|
||||||
|
|
||||||
### Cross Encoder Reranker
|
|
||||||
This reranker uses the [Sentence Transformers](https://www.sbert.net/) library to combine the results of semantic and full-text search. You can use it by passing `CrossEncoderReranker()` to the `rerank()` method.
|
|
||||||
|
|
||||||
```python
|
|
||||||
from lancedb.rerankers import CrossEncoderReranker
|
|
||||||
|
|
||||||
reranker = CrossEncoderReranker()
|
|
||||||
|
|
||||||
results = table.search("harmony hall", query_type="hybrid").rerank(reranker=reranker).to_pandas()
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
### Arguments
|
|
||||||
----------------
|
|
||||||
* `model` : str, default `"cross-encoder/ms-marco-TinyBERT-L-6"`
|
|
||||||
The name of the cross encoder model to use. Available cross encoder models can be found [here](https://www.sbert.net/docs/pretrained_cross-encoders.html)
|
|
||||||
* `column` : str, default `"text"`
|
|
||||||
The name of the column to use as input to the cross encoder model.
|
|
||||||
* `device` : str, default `None`
|
|
||||||
The device to use for the cross encoder model. If None, will use "cuda" if available, otherwise "cpu".
|
|
||||||
|
|
||||||
!!! Note
|
|
||||||
Only returns `_relevance_score`. Does not support `return_score = "all"`.
|
|
||||||
|
|
||||||
|
|
||||||
### ColBERT Reranker
|
|
||||||
This reranker uses the ColBERT model to combine the results of semantic and full-text search. You can use it by passing `ColbertrReranker()` to the `rerank()` method.
|
|
||||||
|
|
||||||
ColBERT reranker model calculates relevance of given docs against the query and don't take existing fts and vector search scores into account, so it currently only supports `return_score="relevance"`. By default, it looks for `text` column to rerank the results. But you can specify the column name to use as input to the cross encoder model as described below.
|
|
||||||
|
|
||||||
```python
|
|
||||||
from lancedb.rerankers import ColbertReranker
|
|
||||||
|
|
||||||
reranker = ColbertReranker()
|
|
||||||
|
|
||||||
results = table.search("harmony hall", query_type="hybrid").rerank(reranker=reranker).to_pandas()
|
|
||||||
```
|
|
||||||
|
|
||||||
### Arguments
|
|
||||||
----------------
|
|
||||||
* `model_name` : `str`, default `"colbert-ir/colbertv2.0"`
|
|
||||||
The name of the cross encoder model to use.
|
|
||||||
* `column` : `str`, default `"text"`
|
|
||||||
The name of the column to use as input to the cross encoder model.
|
|
||||||
* `return_score` : `str`, default `"relevance"`
|
|
||||||
options are `"relevance"` or `"all"`. Only `"relevance"` is supported for now.
|
|
||||||
|
|
||||||
!!! Note
|
|
||||||
Only returns `_relevance_score`. Does not support `return_score = "all"`.
|
|
||||||
|
|
||||||
### OpenAI Reranker
|
|
||||||
This reranker uses the OpenAI API to combine the results of semantic and full-text search. You can use it by passing `OpenaiReranker()` to the `rerank()` method.
|
|
||||||
|
|
||||||
!!! Note
|
|
||||||
This prompts chat model to rerank results which is not a dedicated reranker model. This should be treated as experimental.
|
|
||||||
|
|
||||||
!!! Tip
|
|
||||||
- You might run out of token limit so set the search `limits` based on your token limit.
|
|
||||||
- It is recommended to use gpt-4-turbo-preview, the default model, older models might lead to undesired behaviour
|
|
||||||
|
|
||||||
```python
|
|
||||||
from lancedb.rerankers import OpenaiReranker
|
|
||||||
|
|
||||||
reranker = OpenaiReranker()
|
|
||||||
|
|
||||||
results = table.search("harmony hall", query_type="hybrid").rerank(reranker=reranker).to_pandas()
|
|
||||||
```
|
|
||||||
|
|
||||||
### Arguments
|
|
||||||
----------------
|
|
||||||
* `model_name` : `str`, default `"gpt-4-turbo-preview"`
|
|
||||||
The name of the cross encoder model to use.
|
|
||||||
* `column` : `str`, default `"text"`
|
|
||||||
The name of the column to use as input to the cross encoder model.
|
|
||||||
* `return_score` : `str`, default `"relevance"`
|
|
||||||
options are "relevance" or "all". Only "relevance" is supported for now.
|
|
||||||
* `api_key` : `str`, default `None`
|
|
||||||
The API key to use. If None, will use the OPENAI_API_KEY environment variable.
|
|
||||||
|
|
||||||
|
|
||||||
## Building Custom Rerankers
|
|
||||||
You can build your own custom reranker by subclassing the `Reranker` class and implementing the `rerank_hybrid()` method. Here's an example of a custom reranker that combines the results of semantic and full-text search using a linear combination of the scores.
|
|
||||||
|
|
||||||
The `Reranker` base interface comes with a `merge_results()` method that can be used to combine the results of semantic and full-text search. This is a vanilla merging algorithm that simply concatenates the results and removes the duplicates without taking the scores into consideration. It only keeps the first copy of the row encountered. This works well in cases that don't require the scores of semantic and full-text search to combine the results. If you want to use the scores or want to support `return_score="all"`, you'll need to implement your own merging algorithm.
|
|
||||||
|
|
||||||
```python
|
|
||||||
|
|
||||||
from lancedb.rerankers import Reranker
|
|
||||||
import pyarrow as pa
|
|
||||||
|
|
||||||
class MyReranker(Reranker):
|
|
||||||
def __init__(self, param1, param2, ..., return_score="relevance"):
|
|
||||||
super().__init__(return_score)
|
|
||||||
self.param1 = param1
|
|
||||||
self.param2 = param2
|
|
||||||
|
|
||||||
def rerank_hybrid(self, query: str, vector_results: pa.Table, fts_results: pa.Table):
|
|
||||||
# Use the built-in merging function
|
|
||||||
combined_result = self.merge_results(vector_results, fts_results)
|
|
||||||
|
|
||||||
# Do something with the combined results
|
|
||||||
# ...
|
|
||||||
|
|
||||||
# Return the combined results
|
|
||||||
return combined_result
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
### Example of a Custom Reranker
|
|
||||||
For the sake of simplicity let's build custom reranker that just enchances the Cohere Reranker by accepting a filter query, and accept other CohereReranker params as kwags.
|
|
||||||
|
|
||||||
```python
|
|
||||||
|
|
||||||
from typing import List, Union
|
|
||||||
import pandas as pd
|
|
||||||
from lancedb.rerankers import CohereReranker
|
|
||||||
|
|
||||||
class MofidifiedCohereReranker(CohereReranker):
|
|
||||||
def __init__(self, filters: Union[str, List[str]], **kwargs):
|
|
||||||
super().__init__(**kwargs)
|
|
||||||
filters = filters if isinstance(filters, list) else [filters]
|
|
||||||
self.filters = filters
|
|
||||||
|
|
||||||
def rerank_hybrid(self, query: str, vector_results: pa.Table, fts_results: pa.Table)-> pa.Table:
|
|
||||||
combined_result = super().rerank_hybrid(query, vector_results, fts_results)
|
|
||||||
df = combined_result.to_pandas()
|
|
||||||
for filter in self.filters:
|
|
||||||
df = df.query("not text.str.contains(@filter)")
|
|
||||||
|
|
||||||
return pa.Table.from_pandas(df)
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
!!! tip
|
|
||||||
The `vector_results` and `fts_results` are pyarrow tables. You can convert them to pandas dataframes using `to_pandas()` method and perform any operations you want. After you are done, you can convert the dataframe back to pyarrow table using `pa.Table.from_pandas()` method and return it.
|
|
||||||
|
|||||||
142
docs/src/integrations/dlt.md
Normal file
142
docs/src/integrations/dlt.md
Normal file
@@ -0,0 +1,142 @@
|
|||||||
|
# dlt
|
||||||
|
|
||||||
|
[dlt](https://dlthub.com/docs/intro) is an open-source library that you can add to your Python scripts to load data from various and often messy data sources into well-structured, live datasets. dlt's [integration with LanceDB](https://dlthub.com/docs/dlt-ecosystem/destinations/lancedb) lets you ingest data from any source (databases, APIs, CSVs, dataframes, JSONs, and more) into LanceDB with a few lines of simple python code. The integration enables automatic normalization of nested data, schema inference, incremental loading and embedding the data. dlt also has integrations with several other tools like dbt, airflow, dagster etc. that can be inserted into your LanceDB workflow.
|
||||||
|
|
||||||
|
## How to ingest data into LanceDB
|
||||||
|
|
||||||
|
In this example, we will be fetching movie information from the [Open Movie Database (OMDb) API](https://www.omdbapi.com/) and loading it into a local LanceDB instance. To implement it, you will need an API key for the OMDb API (which can be created freely [here](https://www.omdbapi.com/apikey.aspx)).
|
||||||
|
|
||||||
|
1. **Install `dlt` with LanceDB extras:**
|
||||||
|
```sh
|
||||||
|
pip install dlt[lancedb]
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Inside an empty directory, initialize a `dlt` project with:**
|
||||||
|
```sh
|
||||||
|
dlt init rest_api lancedb
|
||||||
|
```
|
||||||
|
This will add all the files necessary to create a `dlt` pipeline that can ingest data from any REST API (ex: OMDb API) and load into LanceDB.
|
||||||
|
```text
|
||||||
|
├── .dlt
|
||||||
|
│ ├── config.toml
|
||||||
|
│ └── secrets.toml
|
||||||
|
├── rest_api
|
||||||
|
├── rest_api_pipeline.py
|
||||||
|
└── requirements.txt
|
||||||
|
```
|
||||||
|
|
||||||
|
dlt has a list of pre-built [sources](https://dlthub.com/docs/dlt-ecosystem/verified-sources/) like [SQL databases](https://dlthub.com/docs/dlt-ecosystem/verified-sources/sql_database), [REST APIs](https://dlthub.com/docs/dlt-ecosystem/verified-sources/rest_api), [Google Sheets](https://dlthub.com/docs/dlt-ecosystem/verified-sources/google_sheets), [Notion](https://dlthub.com/docs/dlt-ecosystem/verified-sources/notion) etc., that can be used out-of-the-box by running `dlt init <source_name> lancedb`. Since dlt is a python library, it is also very easy to modify these pre-built sources or to write your own custom source from scratch.
|
||||||
|
|
||||||
|
|
||||||
|
3. **Specify necessary credentials and/or embedding model details:**
|
||||||
|
|
||||||
|
In order to fetch data from the OMDb API, you will need to pass a valid API key into your pipeline. Depending on whether you're using LanceDB OSS or LanceDB cloud, you also may need to provide the necessary credentials to connect to the LanceDB instance. These can be pasted inside `.dlt/sercrets.toml`.
|
||||||
|
|
||||||
|
dlt's LanceDB integration also allows you to automatically embed the data during ingestion. Depending on the embedding model chosen, you may need to paste the necessary credentials inside `.dlt/sercrets.toml`:
|
||||||
|
```toml
|
||||||
|
[sources.rest_api]
|
||||||
|
api_key = "api_key" # Enter the API key for the OMDb API
|
||||||
|
|
||||||
|
[destination.lancedb]
|
||||||
|
embedding_model_provider = "sentence-transformers"
|
||||||
|
embedding_model = "all-MiniLM-L6-v2"
|
||||||
|
[destination.lancedb.credentials]
|
||||||
|
uri = ".lancedb"
|
||||||
|
api_key = "api_key" # API key to connect to LanceDB Cloud. Leave out if you are using LanceDB OSS.
|
||||||
|
embedding_model_provider_api_key = "embedding_model_provider_api_key" # Not needed for providers that don't need authentication (ollama, sentence-transformers).
|
||||||
|
```
|
||||||
|
See [here](https://dlthub.com/docs/dlt-ecosystem/destinations/lancedb#configure-the-destination) for more information and for a list of available models and model providers.
|
||||||
|
|
||||||
|
|
||||||
|
4. **Write the pipeline code inside `rest_api_pipeline.py`:**
|
||||||
|
|
||||||
|
The following code shows how you can configure dlt's REST API source to connect to the [OMDb API](https://www.omdbapi.com/), fetch all movies with the word "godzilla" in the title, and load it into a LanceDB table. The REST API source allows you to pull data from any API with minimal code, to learn more read the [dlt docs](https://dlthub.com/docs/dlt-ecosystem/verified-sources/rest_api).
|
||||||
|
|
||||||
|
```python
|
||||||
|
|
||||||
|
# Import necessary modules
|
||||||
|
import dlt
|
||||||
|
from rest_api import rest_api_source
|
||||||
|
|
||||||
|
# Configure the REST API source
|
||||||
|
movies_source = rest_api_source(
|
||||||
|
{
|
||||||
|
"client": {
|
||||||
|
"base_url": "https://www.omdbapi.com/",
|
||||||
|
"auth": { # authentication strategy for the OMDb API
|
||||||
|
"type": "api_key",
|
||||||
|
"name": "apikey",
|
||||||
|
"api_key": dlt.secrets["sources.rest_api.api_token"], # read API credentials directly from secrets.toml
|
||||||
|
"location": "query"
|
||||||
|
},
|
||||||
|
"paginator": { # pagination strategy for the OMDb API
|
||||||
|
"type": "page_number",
|
||||||
|
"base_page": 1,
|
||||||
|
"total_path": "totalResults",
|
||||||
|
"maximum_page": 5
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"resources": [ # list of API endpoints to request
|
||||||
|
{
|
||||||
|
"name": "movie_search",
|
||||||
|
"endpoint": {
|
||||||
|
"path": "/",
|
||||||
|
"params": {
|
||||||
|
"s": "godzilla",
|
||||||
|
"type": "movie"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
# Create a pipeline object
|
||||||
|
pipeline = dlt.pipeline(
|
||||||
|
pipeline_name='movies_pipeline',
|
||||||
|
destination='lancedb', # this tells dlt to load the data into LanceDB
|
||||||
|
dataset_name='movies_data_pipeline',
|
||||||
|
)
|
||||||
|
|
||||||
|
# Run the pipeline
|
||||||
|
load_info = pipeline.run(movies_source)
|
||||||
|
|
||||||
|
# pretty print the information on data that was loaded
|
||||||
|
print(load_info)
|
||||||
|
```
|
||||||
|
|
||||||
|
The script above will ingest the data into LanceDB as it is, i.e. without creating any embeddings. If we want to embed one of the fields (for example, `"Title"` that contains the movie titles), then we will use dlt's `lancedb_adapter` and modify the script as follows:
|
||||||
|
|
||||||
|
- Add the following import statement:
|
||||||
|
```python
|
||||||
|
from dlt.destinations.adapters import lancedb_adapter
|
||||||
|
```
|
||||||
|
- Modify the pipeline run like this:
|
||||||
|
```python
|
||||||
|
load_info = pipeline.run(
|
||||||
|
lancedb_adapter(
|
||||||
|
movies_source,
|
||||||
|
embed="Title",
|
||||||
|
)
|
||||||
|
)
|
||||||
|
```
|
||||||
|
This will use the embedding model specified inside `.dlt/secrets.toml` to embed the field `"Title"`.
|
||||||
|
|
||||||
|
5. **Install necessary dependencies:**
|
||||||
|
```sh
|
||||||
|
pip install -r requirements.txt
|
||||||
|
```
|
||||||
|
|
||||||
|
Note: You may need to install the dependencies for your embedding models separately.
|
||||||
|
```sh
|
||||||
|
pip install sentence-transformers
|
||||||
|
```
|
||||||
|
|
||||||
|
6. **Run the pipeline:**
|
||||||
|
Finally, running the following command will ingest the data into your LanceDB instance.
|
||||||
|
```sh
|
||||||
|
python custom_source.py
|
||||||
|
```
|
||||||
|
|
||||||
|
For more information and advanced usage of dlt's LanceDB integration, read [the dlt documentation](https://dlthub.com/docs/dlt-ecosystem/destinations/lancedb).
|
||||||
@@ -1,378 +0,0 @@
|
|||||||
{
|
|
||||||
"cells": [
|
|
||||||
{
|
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"id": "13cb272e",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"# Code documentation Q&A bot example with LangChain\n",
|
|
||||||
"\n",
|
|
||||||
"This Q&A bot will allow you to query your own documentation easily using questions. We'll also demonstrate the use of LangChain and LanceDB using the OpenAI API. \n",
|
|
||||||
"\n",
|
|
||||||
"In this example we'll use Pandas 2.0 documentation, but, this could be replaced for your own docs as well\n",
|
|
||||||
"\n",
|
|
||||||
"<a href=\"https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/Code-Documentation-QA-Bot/main.ipynb\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"></a>\n",
|
|
||||||
"\n",
|
|
||||||
"Scripts - [](./examples/Code-Documentation-QA-Bot/main.py) [](./examples/Code-Documentation-QA-Bot/index.js)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 40,
|
|
||||||
"id": "66638d6c",
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"!pip install --quiet openai langchain\n",
|
|
||||||
"!pip install --quiet -U lancedb"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"id": "d1cdcac3",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"First, let's get some setup out of the way. As we're using the OpenAI API, ensure that you've set your key (and organization if needed):"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 42,
|
|
||||||
"id": "58ee1868",
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"from openai import OpenAI\n",
|
|
||||||
"import os\n",
|
|
||||||
"\n",
|
|
||||||
"# Configuring the environment variable OPENAI_API_KEY\n",
|
|
||||||
"if \"OPENAI_API_KEY\" not in os.environ:\n",
|
|
||||||
" os.environ[\"OPENAI_API_KEY\"] = \"sk-...\"\n",
|
|
||||||
"client = OpenAI()\n",
|
|
||||||
"assert len(client.models.list().data) > 0"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"id": "34f524d3",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"# Loading in our code documentation, generating embeddings and storing our documents in LanceDB\n",
|
|
||||||
"\n",
|
|
||||||
"We're going to use the power of LangChain to help us create our Q&A bot. It comes with several APIs that can make our development much easier as well as a LanceDB integration for vectorstore."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 43,
|
|
||||||
"id": "b55d22f1",
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"import lancedb\n",
|
|
||||||
"import re\n",
|
|
||||||
"import pickle\n",
|
|
||||||
"import requests\n",
|
|
||||||
"import zipfile\n",
|
|
||||||
"from pathlib import Path\n",
|
|
||||||
"\n",
|
|
||||||
"from langchain.document_loaders import UnstructuredHTMLLoader\n",
|
|
||||||
"from langchain.embeddings import OpenAIEmbeddings\n",
|
|
||||||
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
|
|
||||||
"from langchain.vectorstores import LanceDB\n",
|
|
||||||
"from langchain.llms import OpenAI\n",
|
|
||||||
"from langchain.chains import RetrievalQA"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"id": "56cc6d50",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"To make this easier, we've downloaded Pandas documentation and stored the raw HTML files for you to download. We'll download them and then use LangChain's HTML document readers to parse them and store them in LanceDB as a vector store, along with relevant metadata."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"id": "7da77e75",
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"pandas_docs = requests.get(\"https://eto-public.s3.us-west-2.amazonaws.com/datasets/pandas_docs/pandas.documentation.zip\")\n",
|
|
||||||
"with open('/tmp/pandas.documentation.zip', 'wb') as f:\n",
|
|
||||||
" f.write(pandas_docs.content)\n",
|
|
||||||
"\n",
|
|
||||||
"file = zipfile.ZipFile(\"/tmp/pandas.documentation.zip\")\n",
|
|
||||||
"file.extractall(path=\"/tmp/pandas_docs\")"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"id": "ae42496c",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"We'll create a simple helper function that can help to extract metadata, so we can use this downstream when we're wanting to query with filters. In this case, we want to keep the lineage of the uri or path for each document that we process:"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 44,
|
|
||||||
"id": "d171d062",
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"def get_document_title(document):\n",
|
|
||||||
" m = str(document.metadata[\"source\"])\n",
|
|
||||||
" title = re.findall(\"pandas.documentation(.*).html\", m)\n",
|
|
||||||
" if title[0] is not None:\n",
|
|
||||||
" return(title[0])\n",
|
|
||||||
" return ''"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"id": "130162ad",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"# Pre-processing and loading the documentation\n",
|
|
||||||
"\n",
|
|
||||||
"Next, let's pre-process and load the documentation. To make sure we don't need to do this repeatedly if we were updating code, we're caching it using pickle so we can retrieve it again (this could take a few minutes to run the first time you do it). We'll also add some more metadata to the docs here such as the title and version of the code:"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 45,
|
|
||||||
"id": "33bfe7d8",
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"docs_path = Path(\"docs.pkl\")\n",
|
|
||||||
"docs = []\n",
|
|
||||||
"\n",
|
|
||||||
"if not docs_path.exists():\n",
|
|
||||||
" for p in Path(\"/tmp/pandas_docs/pandas.documentation\").rglob(\"*.html\"):\n",
|
|
||||||
" print(p)\n",
|
|
||||||
" if p.is_dir():\n",
|
|
||||||
" continue\n",
|
|
||||||
" loader = UnstructuredHTMLLoader(p)\n",
|
|
||||||
" raw_document = loader.load()\n",
|
|
||||||
" \n",
|
|
||||||
" m = {}\n",
|
|
||||||
" m[\"title\"] = get_document_title(raw_document[0])\n",
|
|
||||||
" m[\"version\"] = \"2.0rc0\"\n",
|
|
||||||
" raw_document[0].metadata = raw_document[0].metadata | m\n",
|
|
||||||
" raw_document[0].metadata[\"source\"] = str(raw_document[0].metadata[\"source\"])\n",
|
|
||||||
" docs = docs + raw_document\n",
|
|
||||||
"\n",
|
|
||||||
" with docs_path.open(\"wb\") as fh:\n",
|
|
||||||
" pickle.dump(docs, fh)\n",
|
|
||||||
"else:\n",
|
|
||||||
" with docs_path.open(\"rb\") as fh:\n",
|
|
||||||
" docs = pickle.load(fh)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"id": "c3852dd3",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"# Generating embeddings from our docs\n",
|
|
||||||
"\n",
|
|
||||||
"Now that we have our raw documents loaded, we need to pre-process them to generate embeddings:"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 47,
|
|
||||||
"id": "82230563",
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"text_splitter = RecursiveCharacterTextSplitter(\n",
|
|
||||||
" chunk_size=1000,\n",
|
|
||||||
" chunk_overlap=200,\n",
|
|
||||||
")\n",
|
|
||||||
"documents = text_splitter.split_documents(docs)\n",
|
|
||||||
"embeddings = OpenAIEmbeddings()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"id": "43e68215",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"# Storing and querying with LanceDB\n",
|
|
||||||
"\n",
|
|
||||||
"Let's connect to LanceDB so we can store our documents. We'll create a Table to store them in:"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 48,
|
|
||||||
"id": "74780a58",
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"db = lancedb.connect('/tmp/lancedb')\n",
|
|
||||||
"table = db.create_table(\"pandas_docs\", data=[\n",
|
|
||||||
" {\"vector\": embeddings.embed_query(\"Hello World\"), \"text\": \"Hello World\", \"id\": \"1\"}\n",
|
|
||||||
"], mode=\"overwrite\")\n",
|
|
||||||
"docsearch = LanceDB.from_documents(documents, embeddings, connection=table)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"id": "3cb1dc5d",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"Now let's create our RetrievalQA chain using the LanceDB vector store:"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 49,
|
|
||||||
"id": "6a5891ad",
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"qa = RetrievalQA.from_chain_type(llm=OpenAI(), chain_type=\"stuff\", retriever=docsearch.as_retriever())"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"id": "28d93b85",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"And that's it! We're all set up. The next step is to run some queries, let's try a few:"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 50,
|
|
||||||
"id": "70d88316",
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [
|
|
||||||
{
|
|
||||||
"data": {
|
|
||||||
"text/plain": [
|
|
||||||
"' The major differences in pandas 2.0 include installing optional dependencies with pip extras, the ability to use any numpy numeric dtype in an Index, and enhancements, notable bug fixes, backwards incompatible API changes, deprecations, and performance improvements.'"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"execution_count": 50,
|
|
||||||
"metadata": {},
|
|
||||||
"output_type": "execute_result"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"source": [
|
|
||||||
"query = \"What are the major differences in pandas 2.0?\"\n",
|
|
||||||
"qa.run(query)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 51,
|
|
||||||
"id": "85a0397c",
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [
|
|
||||||
{
|
|
||||||
"data": {
|
|
||||||
"text/plain": [
|
|
||||||
"' 2.0.0rc0'"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"execution_count": 51,
|
|
||||||
"metadata": {},
|
|
||||||
"output_type": "execute_result"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"source": [
|
|
||||||
"query = \"What's the current version of pandas?\"\n",
|
|
||||||
"qa.run(query)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 52,
|
|
||||||
"id": "923f86c6",
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [
|
|
||||||
{
|
|
||||||
"data": {
|
|
||||||
"text/plain": [
|
|
||||||
"' Optional dependencies can be installed with pip install \"pandas[all]\" or \"pandas[performance]\". This will install all recommended performance dependencies such as numexpr, bottleneck and numba.'"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"execution_count": 52,
|
|
||||||
"metadata": {},
|
|
||||||
"output_type": "execute_result"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"source": [
|
|
||||||
"query = \"How do I make use of installing optional dependencies?\"\n",
|
|
||||||
"qa.run(query)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 53,
|
|
||||||
"id": "02082f83",
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [
|
|
||||||
{
|
|
||||||
"data": {
|
|
||||||
"text/plain": [
|
|
||||||
"\" \\n\\nPandas 2.0 includes a number of API breaking changes, such as increased minimum versions for dependencies, the use of os.linesep for DataFrame.to_csv's line_terminator, and reorganization of the library. See the release notes for a full list of changes.\""
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"execution_count": 53,
|
|
||||||
"metadata": {},
|
|
||||||
"output_type": "execute_result"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"source": [
|
|
||||||
"query = \"What are the backwards incompatible API changes in Pandas 2.0?\"\n",
|
|
||||||
"qa.run(query)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"id": "75cea547",
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": []
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"metadata": {
|
|
||||||
"kernelspec": {
|
|
||||||
"display_name": "Python 3 (ipykernel)",
|
|
||||||
"language": "python",
|
|
||||||
"name": "python3"
|
|
||||||
},
|
|
||||||
"language_info": {
|
|
||||||
"codemirror_mode": {
|
|
||||||
"name": "ipython",
|
|
||||||
"version": 3
|
|
||||||
},
|
|
||||||
"file_extension": ".py",
|
|
||||||
"mimetype": "text/x-python",
|
|
||||||
"name": "python",
|
|
||||||
"nbconvert_exporter": "python",
|
|
||||||
"pygments_lexer": "ipython3",
|
|
||||||
"version": "3.10.11"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"nbformat": 4,
|
|
||||||
"nbformat_minor": 5
|
|
||||||
}
|
|
||||||
File diff suppressed because one or more lines are too long
@@ -1,297 +0,0 @@
|
|||||||
{
|
|
||||||
"cells": [
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"\n",
|
|
||||||
"\n",
|
|
||||||
" <a href=\"https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/multimodal_clip/main.ipynb\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"></a>| [](./examples/multimodal_clip/main.py) |"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 2,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [
|
|
||||||
{
|
|
||||||
"name": "stdout",
|
|
||||||
"output_type": "stream",
|
|
||||||
"text": [
|
|
||||||
"\n",
|
|
||||||
"\u001B[1m[\u001B[0m\u001B[34;49mnotice\u001B[0m\u001B[1;39;49m]\u001B[0m\u001B[39;49m A new release of pip available: \u001B[0m\u001B[31;49m22.3.1\u001B[0m\u001B[39;49m -> \u001B[0m\u001B[32;49m23.1.2\u001B[0m\n",
|
|
||||||
"\u001B[1m[\u001B[0m\u001B[34;49mnotice\u001B[0m\u001B[1;39;49m]\u001B[0m\u001B[39;49m To update, run: \u001B[0m\u001B[32;49mpip install --upgrade pip\u001B[0m\n",
|
|
||||||
"\n",
|
|
||||||
"\u001B[1m[\u001B[0m\u001B[34;49mnotice\u001B[0m\u001B[1;39;49m]\u001B[0m\u001B[39;49m A new release of pip available: \u001B[0m\u001B[31;49m22.3.1\u001B[0m\u001B[39;49m -> \u001B[0m\u001B[32;49m23.1.2\u001B[0m\n",
|
|
||||||
"\u001B[1m[\u001B[0m\u001B[34;49mnotice\u001B[0m\u001B[1;39;49m]\u001B[0m\u001B[39;49m To update, run: \u001B[0m\u001B[32;49mpip install --upgrade pip\u001B[0m\n"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"source": [
|
|
||||||
"!pip install --quiet -U lancedb\n",
|
|
||||||
"!pip install --quiet gradio transformers torch torchvision"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 1,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"import io\n",
|
|
||||||
"\n",
|
|
||||||
"import PIL\n",
|
|
||||||
"import duckdb\n",
|
|
||||||
"import lancedb"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"## First run setup: Download data and pre-process"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"### Get dataset\n",
|
|
||||||
"\n",
|
|
||||||
"!wget https://eto-public.s3.us-west-2.amazonaws.com/datasets/diffusiondb_lance.tar.gz\n",
|
|
||||||
"!tar -xvf diffusiondb_lance.tar.gz\n",
|
|
||||||
"!mv diffusiondb_test rawdata.lance\n"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 30,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [
|
|
||||||
{
|
|
||||||
"data": {
|
|
||||||
"text/plain": [
|
|
||||||
"<lance.dataset.LanceDataset at 0x3045db590>"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"execution_count": 30,
|
|
||||||
"metadata": {},
|
|
||||||
"output_type": "execute_result"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"source": [
|
|
||||||
"# remove null prompts\n",
|
|
||||||
"import lance\n",
|
|
||||||
"import pyarrow.compute as pc\n",
|
|
||||||
"\n",
|
|
||||||
"# download s3://eto-public/datasets/diffusiondb/small_10k.lance to this uri\n",
|
|
||||||
"data = lance.dataset(\"~/datasets/rawdata.lance\").to_table()\n",
|
|
||||||
"\n",
|
|
||||||
"# First data processing and full-text-search index\n",
|
|
||||||
"db = lancedb.connect(\"~/datasets/demo\")\n",
|
|
||||||
"tbl = db.create_table(\"diffusiondb\", data.filter(~pc.field(\"prompt\").is_null()))\n",
|
|
||||||
"tbl = tbl.create_fts_index([\"prompt\"])"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"## Create / Open LanceDB Table"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 2,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"db = lancedb.connect(\"~/datasets/demo\")\n",
|
|
||||||
"tbl = db.open_table(\"diffusiondb\")"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"## Create CLIP embedding function for the text"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 3,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"from transformers import CLIPModel, CLIPProcessor, CLIPTokenizerFast\n",
|
|
||||||
"\n",
|
|
||||||
"MODEL_ID = \"openai/clip-vit-base-patch32\"\n",
|
|
||||||
"\n",
|
|
||||||
"tokenizer = CLIPTokenizerFast.from_pretrained(MODEL_ID)\n",
|
|
||||||
"model = CLIPModel.from_pretrained(MODEL_ID)\n",
|
|
||||||
"processor = CLIPProcessor.from_pretrained(MODEL_ID)\n",
|
|
||||||
"\n",
|
|
||||||
"def embed_func(query):\n",
|
|
||||||
" inputs = tokenizer([query], padding=True, return_tensors=\"pt\")\n",
|
|
||||||
" text_features = model.get_text_features(**inputs)\n",
|
|
||||||
" return text_features.detach().numpy()[0]"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"## Search functions for Gradio"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 4,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"def find_image_vectors(query):\n",
|
|
||||||
" emb = embed_func(query)\n",
|
|
||||||
" code = (\n",
|
|
||||||
" \"import lancedb\\n\"\n",
|
|
||||||
" \"db = lancedb.connect('~/datasets/demo')\\n\"\n",
|
|
||||||
" \"tbl = db.open_table('diffusiondb')\\n\\n\"\n",
|
|
||||||
" f\"embedding = embed_func('{query}')\\n\"\n",
|
|
||||||
" \"tbl.search(embedding).limit(9).to_pandas()\"\n",
|
|
||||||
" )\n",
|
|
||||||
" return (_extract(tbl.search(emb).limit(9).to_pandas()), code)\n",
|
|
||||||
"\n",
|
|
||||||
"def find_image_keywords(query):\n",
|
|
||||||
" code = (\n",
|
|
||||||
" \"import lancedb\\n\"\n",
|
|
||||||
" \"db = lancedb.connect('~/datasets/demo')\\n\"\n",
|
|
||||||
" \"tbl = db.open_table('diffusiondb')\\n\\n\"\n",
|
|
||||||
" f\"tbl.search('{query}').limit(9).to_pandas()\"\n",
|
|
||||||
" )\n",
|
|
||||||
" return (_extract(tbl.search(query).limit(9).to_pandas()), code)\n",
|
|
||||||
"\n",
|
|
||||||
"def find_image_sql(query):\n",
|
|
||||||
" code = (\n",
|
|
||||||
" \"import lancedb\\n\"\n",
|
|
||||||
" \"import duckdb\\n\"\n",
|
|
||||||
" \"db = lancedb.connect('~/datasets/demo')\\n\"\n",
|
|
||||||
" \"tbl = db.open_table('diffusiondb')\\n\\n\"\n",
|
|
||||||
" \"diffusiondb = tbl.to_lance()\\n\"\n",
|
|
||||||
" f\"duckdb.sql('{query}').to_df()\"\n",
|
|
||||||
" ) \n",
|
|
||||||
" diffusiondb = tbl.to_lance()\n",
|
|
||||||
" return (_extract(duckdb.sql(query).to_df()), code)\n",
|
|
||||||
"\n",
|
|
||||||
"def _extract(df):\n",
|
|
||||||
" image_col = \"image\"\n",
|
|
||||||
" return [(PIL.Image.open(io.BytesIO(row[image_col])), row[\"prompt\"]) for _, row in df.iterrows()]"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"## Setup Gradio interface"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 28,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [
|
|
||||||
{
|
|
||||||
"name": "stdout",
|
|
||||||
"output_type": "stream",
|
|
||||||
"text": [
|
|
||||||
"Running on local URL: http://127.0.0.1:7881\n",
|
|
||||||
"\n",
|
|
||||||
"To create a public link, set `share=True` in `launch()`.\n"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"data": {
|
|
||||||
"text/html": [
|
|
||||||
"<div><iframe src=\"http://127.0.0.1:7881/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
|
|
||||||
],
|
|
||||||
"text/plain": [
|
|
||||||
"<IPython.core.display.HTML object>"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"metadata": {},
|
|
||||||
"output_type": "display_data"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"data": {
|
|
||||||
"text/plain": []
|
|
||||||
},
|
|
||||||
"execution_count": 28,
|
|
||||||
"metadata": {},
|
|
||||||
"output_type": "execute_result"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"source": [
|
|
||||||
"import gradio as gr\n",
|
|
||||||
"\n",
|
|
||||||
"\n",
|
|
||||||
"with gr.Blocks() as demo:\n",
|
|
||||||
" with gr.Row():\n",
|
|
||||||
" with gr.Tab(\"Embeddings\"):\n",
|
|
||||||
" vector_query = gr.Textbox(value=\"portraits of a person\", show_label=False)\n",
|
|
||||||
" b1 = gr.Button(\"Submit\")\n",
|
|
||||||
" with gr.Tab(\"Keywords\"):\n",
|
|
||||||
" keyword_query = gr.Textbox(value=\"ninja turtle\", show_label=False)\n",
|
|
||||||
" b2 = gr.Button(\"Submit\")\n",
|
|
||||||
" with gr.Tab(\"SQL\"):\n",
|
|
||||||
" sql_query = gr.Textbox(value=\"SELECT * from diffusiondb WHERE image_nsfw >= 2 LIMIT 9\", show_label=False)\n",
|
|
||||||
" b3 = gr.Button(\"Submit\")\n",
|
|
||||||
" with gr.Row():\n",
|
|
||||||
" code = gr.Code(label=\"Code\", language=\"python\")\n",
|
|
||||||
" with gr.Row():\n",
|
|
||||||
" gallery = gr.Gallery(\n",
|
|
||||||
" label=\"Found images\", show_label=False, elem_id=\"gallery\"\n",
|
|
||||||
" ).style(columns=[3], rows=[3], object_fit=\"contain\", height=\"auto\") \n",
|
|
||||||
" \n",
|
|
||||||
" b1.click(find_image_vectors, inputs=vector_query, outputs=[gallery, code])\n",
|
|
||||||
" b2.click(find_image_keywords, inputs=keyword_query, outputs=[gallery, code])\n",
|
|
||||||
" b3.click(find_image_sql, inputs=sql_query, outputs=[gallery, code])\n",
|
|
||||||
" \n",
|
|
||||||
"demo.launch()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": []
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"metadata": {
|
|
||||||
"kernelspec": {
|
|
||||||
"display_name": "Python 3.11.4 64-bit",
|
|
||||||
"language": "python",
|
|
||||||
"name": "python3"
|
|
||||||
},
|
|
||||||
"language_info": {
|
|
||||||
"codemirror_mode": {
|
|
||||||
"name": "ipython",
|
|
||||||
"version": 3
|
|
||||||
},
|
|
||||||
"file_extension": ".py",
|
|
||||||
"mimetype": "text/x-python",
|
|
||||||
"name": "python",
|
|
||||||
"nbconvert_exporter": "python",
|
|
||||||
"pygments_lexer": "ipython3",
|
|
||||||
"version": "3.11.4"
|
|
||||||
},
|
|
||||||
"vscode": {
|
|
||||||
"interpreter": {
|
|
||||||
"hash": "b0fa6594d8f4cbf19f97940f81e996739fb7646882a419484c72d19e05852a7e"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"nbformat": 4,
|
|
||||||
"nbformat_minor": 1
|
|
||||||
}
|
|
||||||
File diff suppressed because one or more lines are too long
@@ -113,6 +113,10 @@ lists the indices that LanceDb supports.
|
|||||||
|
|
||||||
::: lancedb.index.BTree
|
::: lancedb.index.BTree
|
||||||
|
|
||||||
|
::: lancedb.index.Bitmap
|
||||||
|
|
||||||
|
::: lancedb.index.LabelList
|
||||||
|
|
||||||
::: lancedb.index.IvfPq
|
::: lancedb.index.IvfPq
|
||||||
|
|
||||||
## Querying (Asynchronous)
|
## Querying (Asynchronous)
|
||||||
|
|||||||
74
docs/src/reranking/answerdotai.md
Normal file
74
docs/src/reranking/answerdotai.md
Normal file
@@ -0,0 +1,74 @@
|
|||||||
|
# AnswersDotAI Rerankers
|
||||||
|
|
||||||
|
This integration allows using answersdotai's rerankers to rerank the search results. [Rerankers](https://github.com/AnswerDotAI/rerankers)
|
||||||
|
A lightweight, low-dependency, unified API to use all common reranking and cross-encoder models.
|
||||||
|
|
||||||
|
!!! note
|
||||||
|
Supported Query Types: Hybrid, Vector, FTS
|
||||||
|
|
||||||
|
|
||||||
|
```python
|
||||||
|
import numpy
|
||||||
|
import lancedb
|
||||||
|
from lancedb.embeddings import get_registry
|
||||||
|
from lancedb.pydantic import LanceModel, Vector
|
||||||
|
from lancedb.rerankers import AnswerdotaiRerankers
|
||||||
|
|
||||||
|
embedder = get_registry().get("sentence-transformers").create()
|
||||||
|
db = lancedb.connect("~/.lancedb")
|
||||||
|
|
||||||
|
class Schema(LanceModel):
|
||||||
|
text: str = embedder.SourceField()
|
||||||
|
vector: Vector(embedder.ndims()) = embedder.VectorField()
|
||||||
|
|
||||||
|
data = [
|
||||||
|
{"text": "hello world"},
|
||||||
|
{"text": "goodbye world"}
|
||||||
|
]
|
||||||
|
tbl = db.create_table("test", schema=Schema, mode="overwrite")
|
||||||
|
tbl.add(data)
|
||||||
|
reranker = AnswerdotaiRerankers()
|
||||||
|
|
||||||
|
# Run vector search with a reranker
|
||||||
|
result = tbl.search("hello").rerank(reranker=reranker).to_list()
|
||||||
|
|
||||||
|
# Run FTS search with a reranker
|
||||||
|
result = tbl.search("hello", query_type="fts").rerank(reranker=reranker).to_list()
|
||||||
|
|
||||||
|
# Run hybrid search with a reranker
|
||||||
|
tbl.create_fts_index("text", replace=True)
|
||||||
|
result = tbl.search("hello", query_type="hybrid").rerank(reranker=reranker).to_list()
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
Accepted Arguments
|
||||||
|
----------------
|
||||||
|
| Argument | Type | Default | Description |
|
||||||
|
| --- | --- | --- | --- |
|
||||||
|
| `model_type` | `str` | `"colbert"` | The type of model to use. Supported model types can be found here - https://github.com/AnswerDotAI/rerankers |
|
||||||
|
| `model_name` | `str` | `"answerdotai/answerai-colbert-small-v1"` | The name of the reranker model to use. |
|
||||||
|
| `column` | `str` | `"text"` | The name of the column to use as input to the cross encoder model. |
|
||||||
|
| `return_score` | str | `"relevance"` | Options are "relevance" or "all". The type of score to return. If "relevance", will return only the `_relevance_score. If "all" is supported, will return relevance score along with the vector and/or fts scores depending on query type |
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## Supported Scores for each query type
|
||||||
|
You can specify the type of scores you want the reranker to return. The following are the supported scores for each query type:
|
||||||
|
|
||||||
|
### Hybrid Search
|
||||||
|
|`return_score`| Status | Description |
|
||||||
|
| --- | --- | --- |
|
||||||
|
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
|
||||||
|
| `all` | ❌ Not Supported | Returns have vector(`_distance`) and FTS(`score`) along with Hybrid Search score(`_relevance_score`) |
|
||||||
|
|
||||||
|
### Vector Search
|
||||||
|
|`return_score`| Status | Description |
|
||||||
|
| --- | --- | --- |
|
||||||
|
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
|
||||||
|
| `all` | ✅ Supported | Returns have vector(`_distance`) along with Hybrid Search score(`_relevance_score`) |
|
||||||
|
|
||||||
|
### FTS Search
|
||||||
|
|`return_score`| Status | Description |
|
||||||
|
| --- | --- | --- |
|
||||||
|
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
|
||||||
|
| `all` | ✅ Supported | Returns have FTS(`score`) along with Hybrid Search score(`_relevance_score`) |
|
||||||
@@ -45,6 +45,23 @@ tbl.create_fts_index("text")
|
|||||||
result = tbl.query("hello", query_type="hybrid").rerank(reranker).to_list()
|
result = tbl.query("hello", query_type="hybrid").rerank(reranker).to_list()
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Multi-vector reranking
|
||||||
|
Most rerankers support reranking based on multiple vectors. To rerank based on multiple vectors, you can pass a list of vectors to the `rerank` method. Here's an example of how to rerank based on multiple vector columns using the `CrossEncoderReranker`:
|
||||||
|
|
||||||
|
```python
|
||||||
|
from lancedb.rerankers import CrossEncoderReranker
|
||||||
|
|
||||||
|
reranker = CrossEncoderReranker()
|
||||||
|
|
||||||
|
query = "hello"
|
||||||
|
|
||||||
|
res1 = table.search(query, vector_column_name="vector").limit(3)
|
||||||
|
res2 = table.search(query, vector_column_name="text_vector").limit(3)
|
||||||
|
res3 = table.search(query, vector_column_name="meta_vector").limit(3)
|
||||||
|
|
||||||
|
reranked = reranker.rerank_multivector([res1, res2, res3], deduplicate=True)
|
||||||
|
```
|
||||||
|
|
||||||
## Available Rerankers
|
## Available Rerankers
|
||||||
LanceDB comes with some built-in rerankers. Here are some of the rerankers that are available in LanceDB:
|
LanceDB comes with some built-in rerankers. Here are some of the rerankers that are available in LanceDB:
|
||||||
|
|
||||||
@@ -54,6 +71,8 @@ LanceDB comes with some built-in rerankers. Here are some of the rerankers that
|
|||||||
- [OpenAI Reranker](./openai.md)
|
- [OpenAI Reranker](./openai.md)
|
||||||
- [Linear Combination Reranker](./linear_combination.md)
|
- [Linear Combination Reranker](./linear_combination.md)
|
||||||
- [Jina Reranker](./jina.md)
|
- [Jina Reranker](./jina.md)
|
||||||
|
- [AnswerDotAI Rerankers](./answerdotai.md)
|
||||||
|
- [Reciprocal Rank Fusion Reranker](./rrf.md)
|
||||||
|
|
||||||
## Creating Custom Rerankers
|
## Creating Custom Rerankers
|
||||||
|
|
||||||
|
|||||||
53
docs/src/reranking/rrf.md
Normal file
53
docs/src/reranking/rrf.md
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
# Reciprocal Rank Fusion Reranker
|
||||||
|
|
||||||
|
Reciprocal Rank Fusion (RRF) is an algorithm that evaluates the search scores by leveraging the positions/rank of the documents. The implementation follows this [paper](https://plg.uwaterloo.ca/~gvcormac/cormacksigir09-rrf.pdf).
|
||||||
|
|
||||||
|
|
||||||
|
!!! note
|
||||||
|
Supported Query Types: Hybrid
|
||||||
|
|
||||||
|
|
||||||
|
```python
|
||||||
|
import numpy
|
||||||
|
import lancedb
|
||||||
|
from lancedb.embeddings import get_registry
|
||||||
|
from lancedb.pydantic import LanceModel, Vector
|
||||||
|
from lancedb.rerankers import RRFReranker
|
||||||
|
|
||||||
|
embedder = get_registry().get("sentence-transformers").create()
|
||||||
|
db = lancedb.connect("~/.lancedb")
|
||||||
|
|
||||||
|
class Schema(LanceModel):
|
||||||
|
text: str = embedder.SourceField()
|
||||||
|
vector: Vector(embedder.ndims()) = embedder.VectorField()
|
||||||
|
|
||||||
|
data = [
|
||||||
|
{"text": "hello world"},
|
||||||
|
{"text": "goodbye world"}
|
||||||
|
]
|
||||||
|
tbl = db.create_table("test", schema=Schema, mode="overwrite")
|
||||||
|
tbl.add(data)
|
||||||
|
reranker = RRFReranker()
|
||||||
|
|
||||||
|
# Run hybrid search with a reranker
|
||||||
|
tbl.create_fts_index("text", replace=True)
|
||||||
|
result = tbl.search("hello", query_type="hybrid").rerank(reranker=reranker).to_list()
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
Accepted Arguments
|
||||||
|
----------------
|
||||||
|
| Argument | Type | Default | Description |
|
||||||
|
| --- | --- | --- | --- |
|
||||||
|
| `K` | `int` | `60` | A constant used in the RRF formula (default is 60). Experiments indicate that k = 60 was near-optimal, but that the choice is not critical |
|
||||||
|
| `return_score` | str | `"relevance"` | Options are "relevance" or "all". The type of score to return. If "relevance", will return only the `_relevance_score`. If "all", will return all scores from the vector and FTS search along with the relevance score. |
|
||||||
|
|
||||||
|
|
||||||
|
## Supported Scores for each query type
|
||||||
|
You can specify the type of scores you want the reranker to return. The following are the supported scores for each query type:
|
||||||
|
|
||||||
|
### Hybrid Search
|
||||||
|
|`return_score`| Status | Description |
|
||||||
|
| --- | --- | --- |
|
||||||
|
| `relevance` | ✅ Supported | Returned rows only have the `_relevance_score` column |
|
||||||
|
| `all` | ✅ Supported | Returned rows have vector(`_distance`) and FTS(`score`) along with Hybrid Search score(`_relevance_score`) |
|
||||||
4
docs/src/studies/overview.md
Normal file
4
docs/src/studies/overview.md
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
This is a list of benchmarks and reports we've worked on at LanceDB. Some of these are continuously updated, while others are one-off reports.
|
||||||
|
|
||||||
|
- [Improve retrievers with hybrid search and reranking](https://blog.lancedb.com/hybrid-search-and-reranking-report/)
|
||||||
|
|
||||||
4
docs/test/md_testing.py
Normal file → Executable file
4
docs/test/md_testing.py
Normal file → Executable file
@@ -1,3 +1,5 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
import glob
|
import glob
|
||||||
from typing import Iterator, List
|
from typing import Iterator, List
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
@@ -17,6 +19,8 @@ excluded_globs = [
|
|||||||
"../src/hybrid_search/hybrid_search.md",
|
"../src/hybrid_search/hybrid_search.md",
|
||||||
"../src/reranking/*.md",
|
"../src/reranking/*.md",
|
||||||
"../src/guides/tuning_retrievers/*.md",
|
"../src/guides/tuning_retrievers/*.md",
|
||||||
|
"../src/embeddings/available_embedding_models/text_embedding_functions/*.md",
|
||||||
|
"../src/embeddings/available_embedding_models/multimodal_embedding_functions/*.md"
|
||||||
]
|
]
|
||||||
|
|
||||||
python_prefix = "py"
|
python_prefix = "py"
|
||||||
|
|||||||
@@ -5,4 +5,5 @@ pylance
|
|||||||
duckdb
|
duckdb
|
||||||
--extra-index-url https://download.pytorch.org/whl/cpu
|
--extra-index-url https://download.pytorch.org/whl/cpu
|
||||||
torch
|
torch
|
||||||
polars
|
polars>=0.19, <=1.3.0
|
||||||
|
|
||||||
|
|||||||
@@ -8,7 +8,7 @@
|
|||||||
<parent>
|
<parent>
|
||||||
<groupId>com.lancedb</groupId>
|
<groupId>com.lancedb</groupId>
|
||||||
<artifactId>lancedb-parent</artifactId>
|
<artifactId>lancedb-parent</artifactId>
|
||||||
<version>0.1-SNAPSHOT</version>
|
<version>0.0.3</version>
|
||||||
<relativePath>../pom.xml</relativePath>
|
<relativePath>../pom.xml</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
|
|
||||||
@@ -68,7 +68,7 @@
|
|||||||
</goals>
|
</goals>
|
||||||
<configuration>
|
<configuration>
|
||||||
<path>lancedb-jni</path>
|
<path>lancedb-jni</path>
|
||||||
<!--<release>true</release>-->
|
<release>true</release>
|
||||||
<!-- Copy native libraries to target/classes for runtime access -->
|
<!-- Copy native libraries to target/classes for runtime access -->
|
||||||
<copyTo>${project.build.directory}/classes/nativelib</copyTo>
|
<copyTo>${project.build.directory}/classes/nativelib</copyTo>
|
||||||
<copyWithPlatformDir>true</copyWithPlatformDir>
|
<copyWithPlatformDir>true</copyWithPlatformDir>
|
||||||
|
|||||||
142
java/pom.xml
142
java/pom.xml
@@ -6,15 +6,28 @@
|
|||||||
|
|
||||||
<groupId>com.lancedb</groupId>
|
<groupId>com.lancedb</groupId>
|
||||||
<artifactId>lancedb-parent</artifactId>
|
<artifactId>lancedb-parent</artifactId>
|
||||||
<version>0.1-SNAPSHOT</version>
|
<version>0.0.3</version>
|
||||||
<packaging>pom</packaging>
|
<packaging>pom</packaging>
|
||||||
|
|
||||||
<name>Lance Parent</name>
|
<name>LanceDB Parent</name>
|
||||||
|
<description>LanceDB vector database Java API</description>
|
||||||
|
<url>http://lancedb.com/</url>
|
||||||
|
|
||||||
|
<developers>
|
||||||
|
<developer>
|
||||||
|
<name>Lance DB Dev Group</name>
|
||||||
|
<email>dev@lancedb.com</email>
|
||||||
|
</developer>
|
||||||
|
</developers>
|
||||||
|
<licenses>
|
||||||
|
<license>
|
||||||
|
<name>The Apache Software License, Version 2.0</name>
|
||||||
|
<url>http://www.apache.org/licenses/LICENSE-2.0.txt</url>
|
||||||
|
</license>
|
||||||
|
</licenses>
|
||||||
|
|
||||||
<properties>
|
<properties>
|
||||||
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
|
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
|
||||||
<maven.compiler.source>11</maven.compiler.source>
|
|
||||||
<maven.compiler.target>11</maven.compiler.target>
|
|
||||||
<arrow.version>15.0.0</arrow.version>
|
<arrow.version>15.0.0</arrow.version>
|
||||||
</properties>
|
</properties>
|
||||||
|
|
||||||
@@ -22,6 +35,12 @@
|
|||||||
<module>core</module>
|
<module>core</module>
|
||||||
</modules>
|
</modules>
|
||||||
|
|
||||||
|
<scm>
|
||||||
|
<connection>scm:git:https://github.com/lancedb/lancedb.git</connection>
|
||||||
|
<developerConnection>scm:git:ssh://git@github.com/lancedb/lancedb.git</developerConnection>
|
||||||
|
<url>https://github.com/lancedb/lancedb</url>
|
||||||
|
</scm>
|
||||||
|
|
||||||
<dependencyManagement>
|
<dependencyManagement>
|
||||||
<dependencies>
|
<dependencies>
|
||||||
<dependency>
|
<dependency>
|
||||||
@@ -62,8 +81,45 @@
|
|||||||
</dependencies>
|
</dependencies>
|
||||||
</dependencyManagement>
|
</dependencyManagement>
|
||||||
|
|
||||||
|
<distributionManagement>
|
||||||
|
<snapshotRepository>
|
||||||
|
<id>ossrh</id>
|
||||||
|
<url>https://s01.oss.sonatype.org/content/repositories/snapshots</url>
|
||||||
|
</snapshotRepository>
|
||||||
|
<repository>
|
||||||
|
<id>ossrh</id>
|
||||||
|
<url>https://s01.oss.sonatype.org/service/local/staging/deploy/maven2/</url>
|
||||||
|
</repository>
|
||||||
|
</distributionManagement>
|
||||||
|
|
||||||
<build>
|
<build>
|
||||||
<plugins>
|
<plugins>
|
||||||
|
<plugin>
|
||||||
|
<groupId>org.apache.maven.plugins</groupId>
|
||||||
|
<artifactId>maven-source-plugin</artifactId>
|
||||||
|
<version>2.2.1</version>
|
||||||
|
<executions>
|
||||||
|
<execution>
|
||||||
|
<id>attach-sources</id>
|
||||||
|
<goals>
|
||||||
|
<goal>jar-no-fork</goal>
|
||||||
|
</goals>
|
||||||
|
</execution>
|
||||||
|
</executions>
|
||||||
|
</plugin>
|
||||||
|
<plugin>
|
||||||
|
<groupId>org.apache.maven.plugins</groupId>
|
||||||
|
<artifactId>maven-javadoc-plugin</artifactId>
|
||||||
|
<version>2.9.1</version>
|
||||||
|
<executions>
|
||||||
|
<execution>
|
||||||
|
<id>attach-javadocs</id>
|
||||||
|
<goals>
|
||||||
|
<goal>jar</goal>
|
||||||
|
</goals>
|
||||||
|
</execution>
|
||||||
|
</executions>
|
||||||
|
</plugin>
|
||||||
<plugin>
|
<plugin>
|
||||||
<groupId>org.apache.maven.plugins</groupId>
|
<groupId>org.apache.maven.plugins</groupId>
|
||||||
<artifactId>maven-checkstyle-plugin</artifactId>
|
<artifactId>maven-checkstyle-plugin</artifactId>
|
||||||
@@ -126,4 +182,82 @@
|
|||||||
</plugins>
|
</plugins>
|
||||||
</pluginManagement>
|
</pluginManagement>
|
||||||
</build>
|
</build>
|
||||||
|
|
||||||
|
<profiles>
|
||||||
|
<profile>
|
||||||
|
<id>jdk8</id>
|
||||||
|
<activation>
|
||||||
|
<jdk>[1.8,1.8.999]</jdk>
|
||||||
|
</activation>
|
||||||
|
<properties>
|
||||||
|
<maven.compiler.source>1.8</maven.compiler.source>
|
||||||
|
<maven.compiler.target>1.8</maven.compiler.target>
|
||||||
|
</properties>
|
||||||
|
</profile>
|
||||||
|
<profile>
|
||||||
|
<id>jdk11+</id>
|
||||||
|
<activation>
|
||||||
|
<jdk>[11,)</jdk>
|
||||||
|
</activation>
|
||||||
|
<properties>
|
||||||
|
<maven.compiler.source>11</maven.compiler.source>
|
||||||
|
<maven.compiler.target>11</maven.compiler.target>
|
||||||
|
</properties>
|
||||||
|
<build>
|
||||||
|
<plugins>
|
||||||
|
<plugin>
|
||||||
|
<artifactId>maven-surefire-plugin</artifactId>
|
||||||
|
<version>3.2.5</version>
|
||||||
|
<configuration>
|
||||||
|
<argLine>--add-opens=java.base/java.nio=ALL-UNNAMED</argLine>
|
||||||
|
<forkNode implementation="org.apache.maven.plugin.surefire.extensions.SurefireForkNodeFactory" />
|
||||||
|
<useSystemClassLoader>false</useSystemClassLoader>
|
||||||
|
</configuration>
|
||||||
|
</plugin>
|
||||||
|
</plugins>
|
||||||
|
</build>
|
||||||
|
</profile>
|
||||||
|
<profile>
|
||||||
|
<id>deploy-to-ossrh</id>
|
||||||
|
<build>
|
||||||
|
<plugins>
|
||||||
|
<plugin>
|
||||||
|
<groupId>org.sonatype.central</groupId>
|
||||||
|
<artifactId>central-publishing-maven-plugin</artifactId>
|
||||||
|
<version>0.4.0</version>
|
||||||
|
<extensions>true</extensions>
|
||||||
|
<configuration>
|
||||||
|
<publishingServerId>ossrh</publishingServerId>
|
||||||
|
<tokenAuth>true</tokenAuth>
|
||||||
|
</configuration>
|
||||||
|
</plugin>
|
||||||
|
<plugin>
|
||||||
|
<groupId>org.sonatype.plugins</groupId>
|
||||||
|
<artifactId>nexus-staging-maven-plugin</artifactId>
|
||||||
|
<version>1.6.13</version>
|
||||||
|
<extensions>true</extensions>
|
||||||
|
<configuration>
|
||||||
|
<serverId>ossrh</serverId>
|
||||||
|
<nexusUrl>https://s01.oss.sonatype.org/</nexusUrl>
|
||||||
|
<autoReleaseAfterClose>true</autoReleaseAfterClose>
|
||||||
|
</configuration>
|
||||||
|
</plugin>
|
||||||
|
<plugin>
|
||||||
|
<groupId>org.apache.maven.plugins</groupId>
|
||||||
|
<artifactId>maven-gpg-plugin</artifactId>
|
||||||
|
<version>1.5</version>
|
||||||
|
<executions>
|
||||||
|
<execution>
|
||||||
|
<id>sign-artifacts</id>
|
||||||
|
<phase>verify</phase>
|
||||||
|
<goals>
|
||||||
|
<goal>sign</goal>
|
||||||
|
</goals>
|
||||||
|
</execution>
|
||||||
|
</executions>
|
||||||
|
</plugin>
|
||||||
|
</plugins>
|
||||||
|
</build>
|
||||||
|
</profile>
|
||||||
|
</profiles>
|
||||||
</project>
|
</project>
|
||||||
|
|||||||
4
node/package-lock.json
generated
4
node/package-lock.json
generated
@@ -1,12 +1,12 @@
|
|||||||
{
|
{
|
||||||
"name": "vectordb",
|
"name": "vectordb",
|
||||||
"version": "0.7.1",
|
"version": "0.10.0-beta.1",
|
||||||
"lockfileVersion": 3,
|
"lockfileVersion": 3,
|
||||||
"requires": true,
|
"requires": true,
|
||||||
"packages": {
|
"packages": {
|
||||||
"": {
|
"": {
|
||||||
"name": "vectordb",
|
"name": "vectordb",
|
||||||
"version": "0.7.1",
|
"version": "0.10.0-beta.1",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"x64",
|
"x64",
|
||||||
"arm64"
|
"arm64"
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "vectordb",
|
"name": "vectordb",
|
||||||
"version": "0.7.1",
|
"version": "0.10.0-beta.1",
|
||||||
"description": " Serverless, low-latency vector database for AI applications",
|
"description": " Serverless, low-latency vector database for AI applications",
|
||||||
"main": "dist/index.js",
|
"main": "dist/index.js",
|
||||||
"types": "dist/index.d.ts",
|
"types": "dist/index.d.ts",
|
||||||
|
|||||||
@@ -93,6 +93,30 @@ describe("LanceDB client", function () {
|
|||||||
const con = await lancedb.connect(uri);
|
const con = await lancedb.connect(uri);
|
||||||
assert.deepEqual(await con.tableNames(), ["vectors"]);
|
assert.deepEqual(await con.tableNames(), ["vectors"]);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
it("read consistency level", async function () {
|
||||||
|
const uri = await createTestDB();
|
||||||
|
const db1 = await lancedb.connect({ uri });
|
||||||
|
const table1 = await db1.openTable("vectors");
|
||||||
|
|
||||||
|
const db2 = await lancedb.connect({
|
||||||
|
uri,
|
||||||
|
readConsistencyInterval: 0
|
||||||
|
})
|
||||||
|
const table2 = await db2.openTable("vectors");
|
||||||
|
|
||||||
|
assert.equal(await table2.countRows(), 2);
|
||||||
|
await table1.add([
|
||||||
|
{
|
||||||
|
id: 3,
|
||||||
|
name: 'name_2',
|
||||||
|
price: 10,
|
||||||
|
is_active: true,
|
||||||
|
vector: [ 0, 0.1 ]
|
||||||
|
},
|
||||||
|
]);
|
||||||
|
assert.equal(await table2.countRows(), 3);
|
||||||
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
describe("when querying an existing dataset", function () {
|
describe("when querying an existing dataset", function () {
|
||||||
|
|||||||
@@ -13,3 +13,12 @@ __test__
|
|||||||
renovate.json
|
renovate.json
|
||||||
.idea
|
.idea
|
||||||
src
|
src
|
||||||
|
lancedb
|
||||||
|
examples
|
||||||
|
nodejs-artifacts
|
||||||
|
Cargo.toml
|
||||||
|
biome.json
|
||||||
|
build.rs
|
||||||
|
jest.config.js
|
||||||
|
tsconfig.json
|
||||||
|
typedoc.json
|
||||||
|
|||||||
@@ -20,7 +20,6 @@ napi = { version = "2.16.8", default-features = false, features = [
|
|||||||
"async",
|
"async",
|
||||||
] }
|
] }
|
||||||
napi-derive = "2.16.4"
|
napi-derive = "2.16.4"
|
||||||
|
|
||||||
# Prevent dynamic linking of lzma, which comes from datafusion
|
# Prevent dynamic linking of lzma, which comes from datafusion
|
||||||
lzma-sys = { version = "*", features = ["static"] }
|
lzma-sys = { version = "*", features = ["static"] }
|
||||||
|
|
||||||
|
|||||||
@@ -12,9 +12,11 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
import { readdirSync } from "fs";
|
||||||
import { Field, Float64, Schema } from "apache-arrow";
|
import { Field, Float64, Schema } from "apache-arrow";
|
||||||
import * as tmp from "tmp";
|
import * as tmp from "tmp";
|
||||||
import { Connection, Table, connect } from "../lancedb";
|
import { Connection, Table, connect } from "../lancedb";
|
||||||
|
import { LocalTable } from "../lancedb/table";
|
||||||
|
|
||||||
describe("when connecting", () => {
|
describe("when connecting", () => {
|
||||||
let tmpDir: tmp.DirResult;
|
let tmpDir: tmp.DirResult;
|
||||||
@@ -134,4 +136,57 @@ describe("given a connection", () => {
|
|||||||
await table.add(data);
|
await table.add(data);
|
||||||
await expect(isV2(table)).resolves.toBe(true);
|
await expect(isV2(table)).resolves.toBe(true);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
it("should be able to create tables with V2 manifest paths", async () => {
|
||||||
|
const db = await connect(tmpDir.name);
|
||||||
|
let table = (await db.createEmptyTable(
|
||||||
|
"test_manifest_paths_v2_empty",
|
||||||
|
new Schema([new Field("id", new Float64(), true)]),
|
||||||
|
{
|
||||||
|
enableV2ManifestPaths: true,
|
||||||
|
},
|
||||||
|
)) as LocalTable;
|
||||||
|
expect(await table.usesV2ManifestPaths()).toBe(true);
|
||||||
|
|
||||||
|
let manifestDir =
|
||||||
|
tmpDir.name + "/test_manifest_paths_v2_empty.lance/_versions";
|
||||||
|
readdirSync(manifestDir).forEach((file) => {
|
||||||
|
expect(file).toMatch(/^\d{20}\.manifest$/);
|
||||||
|
});
|
||||||
|
|
||||||
|
table = (await db.createTable("test_manifest_paths_v2", [{ id: 1 }], {
|
||||||
|
enableV2ManifestPaths: true,
|
||||||
|
})) as LocalTable;
|
||||||
|
expect(await table.usesV2ManifestPaths()).toBe(true);
|
||||||
|
manifestDir = tmpDir.name + "/test_manifest_paths_v2.lance/_versions";
|
||||||
|
readdirSync(manifestDir).forEach((file) => {
|
||||||
|
expect(file).toMatch(/^\d{20}\.manifest$/);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it("should be able to migrate tables to the V2 manifest paths", async () => {
|
||||||
|
const db = await connect(tmpDir.name);
|
||||||
|
const table = (await db.createEmptyTable(
|
||||||
|
"test_manifest_path_migration",
|
||||||
|
new Schema([new Field("id", new Float64(), true)]),
|
||||||
|
{
|
||||||
|
enableV2ManifestPaths: false,
|
||||||
|
},
|
||||||
|
)) as LocalTable;
|
||||||
|
|
||||||
|
expect(await table.usesV2ManifestPaths()).toBe(false);
|
||||||
|
|
||||||
|
const manifestDir =
|
||||||
|
tmpDir.name + "/test_manifest_path_migration.lance/_versions";
|
||||||
|
readdirSync(manifestDir).forEach((file) => {
|
||||||
|
expect(file).toMatch(/^\d\.manifest$/);
|
||||||
|
});
|
||||||
|
|
||||||
|
await table.migrateManifestPathsV2();
|
||||||
|
expect(await table.usesV2ManifestPaths()).toBe(true);
|
||||||
|
|
||||||
|
readdirSync(manifestDir).forEach((file) => {
|
||||||
|
expect(file).toMatch(/^\d{20}\.manifest$/);
|
||||||
|
});
|
||||||
|
});
|
||||||
});
|
});
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
import * as apiArrow from "apache-arrow";
|
||||||
// Copyright 2024 Lance Developers.
|
// Copyright 2024 Lance Developers.
|
||||||
//
|
//
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
@@ -69,7 +70,7 @@ describe.each([arrow13, arrow14, arrow15, arrow16, arrow17])(
|
|||||||
return 3;
|
return 3;
|
||||||
}
|
}
|
||||||
embeddingDataType() {
|
embeddingDataType() {
|
||||||
return new arrow.Float32();
|
return new arrow.Float32() as apiArrow.Float;
|
||||||
}
|
}
|
||||||
async computeSourceEmbeddings(data: string[]) {
|
async computeSourceEmbeddings(data: string[]) {
|
||||||
return data.map(() => [1, 2, 3]);
|
return data.map(() => [1, 2, 3]);
|
||||||
@@ -82,7 +83,7 @@ describe.each([arrow13, arrow14, arrow15, arrow16, arrow17])(
|
|||||||
|
|
||||||
const schema = LanceSchema({
|
const schema = LanceSchema({
|
||||||
id: new arrow.Int32(),
|
id: new arrow.Int32(),
|
||||||
text: func.sourceField(new arrow.Utf8()),
|
text: func.sourceField(new arrow.Utf8() as apiArrow.DataType),
|
||||||
vector: func.vectorField(),
|
vector: func.vectorField(),
|
||||||
});
|
});
|
||||||
|
|
||||||
@@ -119,7 +120,7 @@ describe.each([arrow13, arrow14, arrow15, arrow16, arrow17])(
|
|||||||
return 3;
|
return 3;
|
||||||
}
|
}
|
||||||
embeddingDataType() {
|
embeddingDataType() {
|
||||||
return new arrow.Float32();
|
return new arrow.Float32() as apiArrow.Float;
|
||||||
}
|
}
|
||||||
async computeSourceEmbeddings(data: string[]) {
|
async computeSourceEmbeddings(data: string[]) {
|
||||||
return data.map(() => [1, 2, 3]);
|
return data.map(() => [1, 2, 3]);
|
||||||
@@ -144,7 +145,7 @@ describe.each([arrow13, arrow14, arrow15, arrow16, arrow17])(
|
|||||||
return 3;
|
return 3;
|
||||||
}
|
}
|
||||||
embeddingDataType() {
|
embeddingDataType() {
|
||||||
return new arrow.Float32();
|
return new arrow.Float32() as apiArrow.Float;
|
||||||
}
|
}
|
||||||
async computeSourceEmbeddings(data: string[]) {
|
async computeSourceEmbeddings(data: string[]) {
|
||||||
return data.map(() => [1, 2, 3]);
|
return data.map(() => [1, 2, 3]);
|
||||||
@@ -154,7 +155,7 @@ describe.each([arrow13, arrow14, arrow15, arrow16, arrow17])(
|
|||||||
|
|
||||||
const schema = LanceSchema({
|
const schema = LanceSchema({
|
||||||
id: new arrow.Int32(),
|
id: new arrow.Int32(),
|
||||||
text: func.sourceField(new arrow.Utf8()),
|
text: func.sourceField(new arrow.Utf8() as apiArrow.DataType),
|
||||||
vector: func.vectorField(),
|
vector: func.vectorField(),
|
||||||
});
|
});
|
||||||
const expectedMetadata = new Map<string, string>([
|
const expectedMetadata = new Map<string, string>([
|
||||||
|
|||||||
@@ -31,7 +31,9 @@ import {
|
|||||||
Float64,
|
Float64,
|
||||||
Int32,
|
Int32,
|
||||||
Int64,
|
Int64,
|
||||||
|
List,
|
||||||
Schema,
|
Schema,
|
||||||
|
Utf8,
|
||||||
makeArrowTable,
|
makeArrowTable,
|
||||||
} from "../lancedb/arrow";
|
} from "../lancedb/arrow";
|
||||||
import {
|
import {
|
||||||
@@ -331,6 +333,7 @@ describe("When creating an index", () => {
|
|||||||
const schema = new Schema([
|
const schema = new Schema([
|
||||||
new Field("id", new Int32(), true),
|
new Field("id", new Int32(), true),
|
||||||
new Field("vec", new FixedSizeList(32, new Field("item", new Float32()))),
|
new Field("vec", new FixedSizeList(32, new Field("item", new Float32()))),
|
||||||
|
new Field("tags", new List(new Field("item", new Utf8(), true))),
|
||||||
]);
|
]);
|
||||||
let tbl: Table;
|
let tbl: Table;
|
||||||
let queryVec: number[];
|
let queryVec: number[];
|
||||||
@@ -346,6 +349,7 @@ describe("When creating an index", () => {
|
|||||||
vec: Array(32)
|
vec: Array(32)
|
||||||
.fill(1)
|
.fill(1)
|
||||||
.map(() => Math.random()),
|
.map(() => Math.random()),
|
||||||
|
tags: ["tag1", "tag2", "tag3"],
|
||||||
})),
|
})),
|
||||||
{
|
{
|
||||||
schema,
|
schema,
|
||||||
@@ -392,6 +396,10 @@ describe("When creating an index", () => {
|
|||||||
.toArrow();
|
.toArrow();
|
||||||
expect(rst2.numRows).toBe(2);
|
expect(rst2.numRows).toBe(2);
|
||||||
expect(rst.toString()).toEqual(rst2.toString());
|
expect(rst.toString()).toEqual(rst2.toString());
|
||||||
|
|
||||||
|
// test offset
|
||||||
|
rst = await tbl.query().limit(2).offset(1).nearestTo(queryVec).toArrow();
|
||||||
|
expect(rst.numRows).toBe(1);
|
||||||
});
|
});
|
||||||
|
|
||||||
it("should allow parameters to be specified", async () => {
|
it("should allow parameters to be specified", async () => {
|
||||||
@@ -428,6 +436,42 @@ describe("When creating an index", () => {
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
test("create a bitmap index", async () => {
|
||||||
|
await tbl.createIndex("id", {
|
||||||
|
config: Index.bitmap(),
|
||||||
|
});
|
||||||
|
const indexDir = path.join(tmpDir.name, "test.lance", "_indices");
|
||||||
|
expect(fs.readdirSync(indexDir)).toHaveLength(1);
|
||||||
|
});
|
||||||
|
|
||||||
|
test("create a hnswPq index", async () => {
|
||||||
|
await tbl.createIndex("vec", {
|
||||||
|
config: Index.hnswPq({
|
||||||
|
numPartitions: 10,
|
||||||
|
}),
|
||||||
|
});
|
||||||
|
const indexDir = path.join(tmpDir.name, "test.lance", "_indices");
|
||||||
|
expect(fs.readdirSync(indexDir)).toHaveLength(1);
|
||||||
|
});
|
||||||
|
|
||||||
|
test("create a HnswSq index", async () => {
|
||||||
|
await tbl.createIndex("vec", {
|
||||||
|
config: Index.hnswSq({
|
||||||
|
numPartitions: 10,
|
||||||
|
}),
|
||||||
|
});
|
||||||
|
const indexDir = path.join(tmpDir.name, "test.lance", "_indices");
|
||||||
|
expect(fs.readdirSync(indexDir)).toHaveLength(1);
|
||||||
|
});
|
||||||
|
|
||||||
|
test("create a label list index", async () => {
|
||||||
|
await tbl.createIndex("tags", {
|
||||||
|
config: Index.labelList(),
|
||||||
|
});
|
||||||
|
const indexDir = path.join(tmpDir.name, "test.lance", "_indices");
|
||||||
|
expect(fs.readdirSync(indexDir)).toHaveLength(1);
|
||||||
|
});
|
||||||
|
|
||||||
test("should be able to get index stats", async () => {
|
test("should be able to get index stats", async () => {
|
||||||
await tbl.createIndex("id");
|
await tbl.createIndex("id");
|
||||||
|
|
||||||
@@ -706,6 +750,21 @@ describe("when optimizing a dataset", () => {
|
|||||||
expect(stats.prune.bytesRemoved).toBeGreaterThan(0);
|
expect(stats.prune.bytesRemoved).toBeGreaterThan(0);
|
||||||
expect(stats.prune.oldVersionsRemoved).toBe(3);
|
expect(stats.prune.oldVersionsRemoved).toBe(3);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
it("delete unverified", async () => {
|
||||||
|
const version = await table.version();
|
||||||
|
const versionFile = `${tmpDir.name}/${table.name}.lance/_versions/${version - 1}.manifest`;
|
||||||
|
fs.rmSync(versionFile);
|
||||||
|
|
||||||
|
let stats = await table.optimize({ deleteUnverified: false });
|
||||||
|
expect(stats.prune.oldVersionsRemoved).toBe(0);
|
||||||
|
|
||||||
|
stats = await table.optimize({
|
||||||
|
cleanupOlderThan: new Date(),
|
||||||
|
deleteUnverified: true,
|
||||||
|
});
|
||||||
|
expect(stats.prune.oldVersionsRemoved).toBeGreaterThan(1);
|
||||||
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
describe.each([arrow13, arrow14, arrow15, arrow16, arrow17])(
|
describe.each([arrow13, arrow14, arrow15, arrow16, arrow17])(
|
||||||
@@ -785,11 +844,58 @@ describe.each([arrow13, arrow14, arrow15, arrow16, arrow17])(
|
|||||||
];
|
];
|
||||||
const table = await db.createTable("test", data);
|
const table = await db.createTable("test", data);
|
||||||
|
|
||||||
expect(table.search("hello").toArray()).rejects.toThrow(
|
expect(table.search("hello", "vector").toArray()).rejects.toThrow(
|
||||||
"No embedding functions are defined in the table",
|
"No embedding functions are defined in the table",
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
test("full text search if no embedding function provided", async () => {
|
||||||
|
const db = await connect(tmpDir.name);
|
||||||
|
const data = [
|
||||||
|
{ text: "hello world", vector: [0.1, 0.2, 0.3] },
|
||||||
|
{ text: "goodbye world", vector: [0.4, 0.5, 0.6] },
|
||||||
|
];
|
||||||
|
const table = await db.createTable("test", data);
|
||||||
|
await table.createIndex("text", {
|
||||||
|
config: Index.fts(),
|
||||||
|
});
|
||||||
|
|
||||||
|
const results = await table.search("hello").toArray();
|
||||||
|
expect(results[0].text).toBe(data[0].text);
|
||||||
|
});
|
||||||
|
|
||||||
|
test("full text search without positions", async () => {
|
||||||
|
const db = await connect(tmpDir.name);
|
||||||
|
const data = [
|
||||||
|
{ text: "hello world", vector: [0.1, 0.2, 0.3] },
|
||||||
|
{ text: "goodbye world", vector: [0.4, 0.5, 0.6] },
|
||||||
|
];
|
||||||
|
const table = await db.createTable("test", data);
|
||||||
|
await table.createIndex("text", {
|
||||||
|
config: Index.fts({ withPositions: false }),
|
||||||
|
});
|
||||||
|
|
||||||
|
const results = await table.search("hello").toArray();
|
||||||
|
expect(results[0].text).toBe(data[0].text);
|
||||||
|
});
|
||||||
|
|
||||||
|
test("full text search phrase query", async () => {
|
||||||
|
const db = await connect(tmpDir.name);
|
||||||
|
const data = [
|
||||||
|
{ text: "hello world", vector: [0.1, 0.2, 0.3] },
|
||||||
|
{ text: "goodbye world", vector: [0.4, 0.5, 0.6] },
|
||||||
|
];
|
||||||
|
const table = await db.createTable("test", data);
|
||||||
|
await table.createIndex("text", {
|
||||||
|
config: Index.fts(),
|
||||||
|
});
|
||||||
|
|
||||||
|
const results = await table.search("world").toArray();
|
||||||
|
expect(results.length).toBe(2);
|
||||||
|
const phraseResults = await table.search('"hello world"').toArray();
|
||||||
|
expect(phraseResults.length).toBe(1);
|
||||||
|
});
|
||||||
|
|
||||||
test.each([
|
test.each([
|
||||||
[0.4, 0.5, 0.599], // number[]
|
[0.4, 0.5, 0.599], // number[]
|
||||||
Float32Array.of(0.4, 0.5, 0.599), // Float32Array
|
Float32Array.of(0.4, 0.5, 0.599), // Float32Array
|
||||||
|
|||||||
64
nodejs/examples/custom_embedding_function.ts
Normal file
64
nodejs/examples/custom_embedding_function.ts
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
// --8<-- [start:imports]
|
||||||
|
import * as lancedb from "@lancedb/lancedb";
|
||||||
|
import {
|
||||||
|
LanceSchema,
|
||||||
|
TextEmbeddingFunction,
|
||||||
|
getRegistry,
|
||||||
|
register,
|
||||||
|
} from "@lancedb/lancedb/embedding";
|
||||||
|
import { pipeline } from "@xenova/transformers";
|
||||||
|
// --8<-- [end:imports]
|
||||||
|
|
||||||
|
// --8<-- [start:embedding_impl]
|
||||||
|
@register("sentence-transformers")
|
||||||
|
class SentenceTransformersEmbeddings extends TextEmbeddingFunction {
|
||||||
|
name = "Xenova/all-miniLM-L6-v2";
|
||||||
|
#ndims!: number;
|
||||||
|
extractor: any;
|
||||||
|
|
||||||
|
async init() {
|
||||||
|
this.extractor = await pipeline("feature-extraction", this.name);
|
||||||
|
this.#ndims = await this.generateEmbeddings(["hello"]).then(
|
||||||
|
(e) => e[0].length,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
ndims() {
|
||||||
|
return this.#ndims;
|
||||||
|
}
|
||||||
|
|
||||||
|
toJSON() {
|
||||||
|
return {
|
||||||
|
name: this.name,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
async generateEmbeddings(texts: string[]) {
|
||||||
|
const output = await this.extractor(texts, {
|
||||||
|
pooling: "mean",
|
||||||
|
normalize: true,
|
||||||
|
});
|
||||||
|
return output.tolist();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// -8<-- [end:embedding_impl]
|
||||||
|
|
||||||
|
// --8<-- [start:call_custom_function]
|
||||||
|
const registry = getRegistry();
|
||||||
|
|
||||||
|
const sentenceTransformer = await registry
|
||||||
|
.get<SentenceTransformersEmbeddings>("sentence-transformers")!
|
||||||
|
.create();
|
||||||
|
|
||||||
|
const schema = LanceSchema({
|
||||||
|
vector: sentenceTransformer.vectorField(),
|
||||||
|
text: sentenceTransformer.sourceField(),
|
||||||
|
});
|
||||||
|
|
||||||
|
const db = await lancedb.connect("/tmp/db");
|
||||||
|
const table = await db.createEmptyTable("table", schema, { mode: "overwrite" });
|
||||||
|
|
||||||
|
await table.add([{ text: "hello" }, { text: "world" }]);
|
||||||
|
|
||||||
|
const results = await table.search("greeting").limit(1).toArray();
|
||||||
|
console.log(results[0].text);
|
||||||
|
// -8<-- [end:call_custom_function]
|
||||||
52
nodejs/examples/full_text_search.ts
Normal file
52
nodejs/examples/full_text_search.ts
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
// Copyright 2024 Lance Developers.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
import * as lancedb from "@lancedb/lancedb";
|
||||||
|
|
||||||
|
const db = await lancedb.connect("data/sample-lancedb");
|
||||||
|
|
||||||
|
const words = [
|
||||||
|
"apple",
|
||||||
|
"banana",
|
||||||
|
"cherry",
|
||||||
|
"date",
|
||||||
|
"elderberry",
|
||||||
|
"fig",
|
||||||
|
"grape",
|
||||||
|
];
|
||||||
|
|
||||||
|
const data = Array.from({ length: 10_000 }, (_, i) => ({
|
||||||
|
vector: Array(1536).fill(i),
|
||||||
|
id: i,
|
||||||
|
item: `item ${i}`,
|
||||||
|
strId: `${i}`,
|
||||||
|
doc: words[i % words.length],
|
||||||
|
}));
|
||||||
|
|
||||||
|
const tbl = await db.createTable("myVectors", data, { mode: "overwrite" });
|
||||||
|
|
||||||
|
await tbl.createIndex("doc", {
|
||||||
|
config: lancedb.Index.fts(),
|
||||||
|
});
|
||||||
|
|
||||||
|
// --8<-- [start:full_text_search]
|
||||||
|
let result = await tbl
|
||||||
|
.search("apple")
|
||||||
|
.select(["id", "doc"])
|
||||||
|
.limit(10)
|
||||||
|
.toArray();
|
||||||
|
console.log(result);
|
||||||
|
// --8<-- [end:full_text_search]
|
||||||
|
|
||||||
|
console.log("SQL search: done");
|
||||||
793
nodejs/examples/package-lock.json
generated
793
nodejs/examples/package-lock.json
generated
@@ -9,7 +9,12 @@
|
|||||||
"version": "1.0.0",
|
"version": "1.0.0",
|
||||||
"license": "Apache-2.0",
|
"license": "Apache-2.0",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@lancedb/lancedb": "file:../"
|
"@lancedb/lancedb": "file:../",
|
||||||
|
"@xenova/transformers": "^2.17.2",
|
||||||
|
"tsc": "^2.0.4"
|
||||||
|
},
|
||||||
|
"devDependencies": {
|
||||||
|
"typescript": "^5.5.4"
|
||||||
},
|
},
|
||||||
"peerDependencies": {
|
"peerDependencies": {
|
||||||
"typescript": "^5.0.0"
|
"typescript": "^5.0.0"
|
||||||
@@ -17,7 +22,7 @@
|
|||||||
},
|
},
|
||||||
"..": {
|
"..": {
|
||||||
"name": "@lancedb/lancedb",
|
"name": "@lancedb/lancedb",
|
||||||
"version": "0.6.0",
|
"version": "0.8.0",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"x64",
|
"x64",
|
||||||
"arm64"
|
"arm64"
|
||||||
@@ -29,44 +34,791 @@
|
|||||||
"win32"
|
"win32"
|
||||||
],
|
],
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"apache-arrow": "^15.0.0",
|
|
||||||
"axios": "^1.7.2",
|
"axios": "^1.7.2",
|
||||||
"openai": "^4.29.2",
|
|
||||||
"reflect-metadata": "^0.2.2"
|
"reflect-metadata": "^0.2.2"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
|
"@aws-sdk/client-dynamodb": "^3.33.0",
|
||||||
"@aws-sdk/client-kms": "^3.33.0",
|
"@aws-sdk/client-kms": "^3.33.0",
|
||||||
"@aws-sdk/client-s3": "^3.33.0",
|
"@aws-sdk/client-s3": "^3.33.0",
|
||||||
"@biomejs/biome": "^1.7.3",
|
"@biomejs/biome": "^1.7.3",
|
||||||
"@jest/globals": "^29.7.0",
|
"@jest/globals": "^29.7.0",
|
||||||
"@napi-rs/cli": "^2.18.0",
|
"@napi-rs/cli": "^2.18.3",
|
||||||
"@types/axios": "^0.14.0",
|
"@types/axios": "^0.14.0",
|
||||||
"@types/jest": "^29.1.2",
|
"@types/jest": "^29.1.2",
|
||||||
"@types/tmp": "^0.2.6",
|
"@types/tmp": "^0.2.6",
|
||||||
"apache-arrow-old": "npm:apache-arrow@13.0.0",
|
"apache-arrow-13": "npm:apache-arrow@13.0.0",
|
||||||
|
"apache-arrow-14": "npm:apache-arrow@14.0.0",
|
||||||
|
"apache-arrow-15": "npm:apache-arrow@15.0.0",
|
||||||
|
"apache-arrow-16": "npm:apache-arrow@16.0.0",
|
||||||
|
"apache-arrow-17": "npm:apache-arrow@17.0.0",
|
||||||
"eslint": "^8.57.0",
|
"eslint": "^8.57.0",
|
||||||
"jest": "^29.7.0",
|
"jest": "^29.7.0",
|
||||||
"shx": "^0.3.4",
|
"shx": "^0.3.4",
|
||||||
"tmp": "^0.2.3",
|
"tmp": "^0.2.3",
|
||||||
"ts-jest": "^29.1.2",
|
"ts-jest": "^29.1.2",
|
||||||
"typedoc": "^0.25.7",
|
"typedoc": "^0.26.4",
|
||||||
"typedoc-plugin-markdown": "^3.17.1",
|
"typedoc-plugin-markdown": "^4.2.1",
|
||||||
"typescript": "^5.3.3",
|
"typescript": "^5.5.4",
|
||||||
"typescript-eslint": "^7.1.0"
|
"typescript-eslint": "^7.1.0"
|
||||||
},
|
},
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": ">= 18"
|
"node": ">= 18"
|
||||||
|
},
|
||||||
|
"optionalDependencies": {
|
||||||
|
"@xenova/transformers": ">=2.17 < 3",
|
||||||
|
"openai": "^4.29.2"
|
||||||
|
},
|
||||||
|
"peerDependencies": {
|
||||||
|
"apache-arrow": ">=13.0.0 <=17.0.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@huggingface/jinja": {
|
||||||
|
"version": "0.2.2",
|
||||||
|
"resolved": "https://registry.npmjs.org/@huggingface/jinja/-/jinja-0.2.2.tgz",
|
||||||
|
"integrity": "sha512-/KPde26khDUIPkTGU82jdtTW9UAuvUTumCAbFs/7giR0SxsvZC4hru51PBvpijH6BVkHcROcvZM/lpy5h1jRRA==",
|
||||||
|
"engines": {
|
||||||
|
"node": ">=18"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/@lancedb/lancedb": {
|
"node_modules/@lancedb/lancedb": {
|
||||||
"resolved": "..",
|
"resolved": "..",
|
||||||
"link": true
|
"link": true
|
||||||
},
|
},
|
||||||
|
"node_modules/@protobufjs/aspromise": {
|
||||||
|
"version": "1.1.2",
|
||||||
|
"resolved": "https://registry.npmjs.org/@protobufjs/aspromise/-/aspromise-1.1.2.tgz",
|
||||||
|
"integrity": "sha512-j+gKExEuLmKwvz3OgROXtrJ2UG2x8Ch2YZUxahh+s1F2HZ+wAceUNLkvy6zKCPVRkU++ZWQrdxsUeQXmcg4uoQ=="
|
||||||
|
},
|
||||||
|
"node_modules/@protobufjs/base64": {
|
||||||
|
"version": "1.1.2",
|
||||||
|
"resolved": "https://registry.npmjs.org/@protobufjs/base64/-/base64-1.1.2.tgz",
|
||||||
|
"integrity": "sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg=="
|
||||||
|
},
|
||||||
|
"node_modules/@protobufjs/codegen": {
|
||||||
|
"version": "2.0.4",
|
||||||
|
"resolved": "https://registry.npmjs.org/@protobufjs/codegen/-/codegen-2.0.4.tgz",
|
||||||
|
"integrity": "sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg=="
|
||||||
|
},
|
||||||
|
"node_modules/@protobufjs/eventemitter": {
|
||||||
|
"version": "1.1.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/@protobufjs/eventemitter/-/eventemitter-1.1.0.tgz",
|
||||||
|
"integrity": "sha512-j9ednRT81vYJ9OfVuXG6ERSTdEL1xVsNgqpkxMsbIabzSo3goCjDIveeGv5d03om39ML71RdmrGNjG5SReBP/Q=="
|
||||||
|
},
|
||||||
|
"node_modules/@protobufjs/fetch": {
|
||||||
|
"version": "1.1.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/@protobufjs/fetch/-/fetch-1.1.0.tgz",
|
||||||
|
"integrity": "sha512-lljVXpqXebpsijW71PZaCYeIcE5on1w5DlQy5WH6GLbFryLUrBD4932W/E2BSpfRJWseIL4v/KPgBFxDOIdKpQ==",
|
||||||
|
"dependencies": {
|
||||||
|
"@protobufjs/aspromise": "^1.1.1",
|
||||||
|
"@protobufjs/inquire": "^1.1.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@protobufjs/float": {
|
||||||
|
"version": "1.0.2",
|
||||||
|
"resolved": "https://registry.npmjs.org/@protobufjs/float/-/float-1.0.2.tgz",
|
||||||
|
"integrity": "sha512-Ddb+kVXlXst9d+R9PfTIxh1EdNkgoRe5tOX6t01f1lYWOvJnSPDBlG241QLzcyPdoNTsblLUdujGSE4RzrTZGQ=="
|
||||||
|
},
|
||||||
|
"node_modules/@protobufjs/inquire": {
|
||||||
|
"version": "1.1.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/@protobufjs/inquire/-/inquire-1.1.0.tgz",
|
||||||
|
"integrity": "sha512-kdSefcPdruJiFMVSbn801t4vFK7KB/5gd2fYvrxhuJYg8ILrmn9SKSX2tZdV6V+ksulWqS7aXjBcRXl3wHoD9Q=="
|
||||||
|
},
|
||||||
|
"node_modules/@protobufjs/path": {
|
||||||
|
"version": "1.1.2",
|
||||||
|
"resolved": "https://registry.npmjs.org/@protobufjs/path/-/path-1.1.2.tgz",
|
||||||
|
"integrity": "sha512-6JOcJ5Tm08dOHAbdR3GrvP+yUUfkjG5ePsHYczMFLq3ZmMkAD98cDgcT2iA1lJ9NVwFd4tH/iSSoe44YWkltEA=="
|
||||||
|
},
|
||||||
|
"node_modules/@protobufjs/pool": {
|
||||||
|
"version": "1.1.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/@protobufjs/pool/-/pool-1.1.0.tgz",
|
||||||
|
"integrity": "sha512-0kELaGSIDBKvcgS4zkjz1PeddatrjYcmMWOlAuAPwAeccUrPHdUqo/J6LiymHHEiJT5NrF1UVwxY14f+fy4WQw=="
|
||||||
|
},
|
||||||
|
"node_modules/@protobufjs/utf8": {
|
||||||
|
"version": "1.1.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/@protobufjs/utf8/-/utf8-1.1.0.tgz",
|
||||||
|
"integrity": "sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw=="
|
||||||
|
},
|
||||||
|
"node_modules/@types/long": {
|
||||||
|
"version": "4.0.2",
|
||||||
|
"resolved": "https://registry.npmjs.org/@types/long/-/long-4.0.2.tgz",
|
||||||
|
"integrity": "sha512-MqTGEo5bj5t157U6fA/BiDynNkn0YknVdh48CMPkTSpFTVmvao5UQmm7uEF6xBEo7qIMAlY/JSleYaE6VOdpaA=="
|
||||||
|
},
|
||||||
|
"node_modules/@types/node": {
|
||||||
|
"version": "20.14.11",
|
||||||
|
"resolved": "https://registry.npmjs.org/@types/node/-/node-20.14.11.tgz",
|
||||||
|
"integrity": "sha512-kprQpL8MMeszbz6ojB5/tU8PLN4kesnN8Gjzw349rDlNgsSzg90lAVj3llK99Dh7JON+t9AuscPPFW6mPbTnSA==",
|
||||||
|
"dependencies": {
|
||||||
|
"undici-types": "~5.26.4"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@xenova/transformers": {
|
||||||
|
"version": "2.17.2",
|
||||||
|
"resolved": "https://registry.npmjs.org/@xenova/transformers/-/transformers-2.17.2.tgz",
|
||||||
|
"integrity": "sha512-lZmHqzrVIkSvZdKZEx7IYY51TK0WDrC8eR0c5IMnBsO8di8are1zzw8BlLhyO2TklZKLN5UffNGs1IJwT6oOqQ==",
|
||||||
|
"dependencies": {
|
||||||
|
"@huggingface/jinja": "^0.2.2",
|
||||||
|
"onnxruntime-web": "1.14.0",
|
||||||
|
"sharp": "^0.32.0"
|
||||||
|
},
|
||||||
|
"optionalDependencies": {
|
||||||
|
"onnxruntime-node": "1.14.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/b4a": {
|
||||||
|
"version": "1.6.6",
|
||||||
|
"resolved": "https://registry.npmjs.org/b4a/-/b4a-1.6.6.tgz",
|
||||||
|
"integrity": "sha512-5Tk1HLk6b6ctmjIkAcU/Ujv/1WqiDl0F0JdRCR80VsOcUlHcu7pWeWRlOqQLHfDEsVx9YH/aif5AG4ehoCtTmg=="
|
||||||
|
},
|
||||||
|
"node_modules/bare-events": {
|
||||||
|
"version": "2.4.2",
|
||||||
|
"resolved": "https://registry.npmjs.org/bare-events/-/bare-events-2.4.2.tgz",
|
||||||
|
"integrity": "sha512-qMKFd2qG/36aA4GwvKq8MxnPgCQAmBWmSyLWsJcbn8v03wvIPQ/hG1Ms8bPzndZxMDoHpxez5VOS+gC9Yi24/Q==",
|
||||||
|
"optional": true
|
||||||
|
},
|
||||||
|
"node_modules/bare-fs": {
|
||||||
|
"version": "2.3.1",
|
||||||
|
"resolved": "https://registry.npmjs.org/bare-fs/-/bare-fs-2.3.1.tgz",
|
||||||
|
"integrity": "sha512-W/Hfxc/6VehXlsgFtbB5B4xFcsCl+pAh30cYhoFyXErf6oGrwjh8SwiPAdHgpmWonKuYpZgGywN0SXt7dgsADA==",
|
||||||
|
"optional": true,
|
||||||
|
"dependencies": {
|
||||||
|
"bare-events": "^2.0.0",
|
||||||
|
"bare-path": "^2.0.0",
|
||||||
|
"bare-stream": "^2.0.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/bare-os": {
|
||||||
|
"version": "2.4.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/bare-os/-/bare-os-2.4.0.tgz",
|
||||||
|
"integrity": "sha512-v8DTT08AS/G0F9xrhyLtepoo9EJBJ85FRSMbu1pQUlAf6A8T0tEEQGMVObWeqpjhSPXsE0VGlluFBJu2fdoTNg==",
|
||||||
|
"optional": true
|
||||||
|
},
|
||||||
|
"node_modules/bare-path": {
|
||||||
|
"version": "2.1.3",
|
||||||
|
"resolved": "https://registry.npmjs.org/bare-path/-/bare-path-2.1.3.tgz",
|
||||||
|
"integrity": "sha512-lh/eITfU8hrj9Ru5quUp0Io1kJWIk1bTjzo7JH1P5dWmQ2EL4hFUlfI8FonAhSlgIfhn63p84CDY/x+PisgcXA==",
|
||||||
|
"optional": true,
|
||||||
|
"dependencies": {
|
||||||
|
"bare-os": "^2.1.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/bare-stream": {
|
||||||
|
"version": "2.1.3",
|
||||||
|
"resolved": "https://registry.npmjs.org/bare-stream/-/bare-stream-2.1.3.tgz",
|
||||||
|
"integrity": "sha512-tiDAH9H/kP+tvNO5sczyn9ZAA7utrSMobyDchsnyyXBuUe2FSQWbxhtuHB8jwpHYYevVo2UJpcmvvjrbHboUUQ==",
|
||||||
|
"optional": true,
|
||||||
|
"dependencies": {
|
||||||
|
"streamx": "^2.18.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/base64-js": {
|
||||||
|
"version": "1.5.1",
|
||||||
|
"resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz",
|
||||||
|
"integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==",
|
||||||
|
"funding": [
|
||||||
|
{
|
||||||
|
"type": "github",
|
||||||
|
"url": "https://github.com/sponsors/feross"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "patreon",
|
||||||
|
"url": "https://www.patreon.com/feross"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "consulting",
|
||||||
|
"url": "https://feross.org/support"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"node_modules/bl": {
|
||||||
|
"version": "4.1.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/bl/-/bl-4.1.0.tgz",
|
||||||
|
"integrity": "sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w==",
|
||||||
|
"dependencies": {
|
||||||
|
"buffer": "^5.5.0",
|
||||||
|
"inherits": "^2.0.4",
|
||||||
|
"readable-stream": "^3.4.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/buffer": {
|
||||||
|
"version": "5.7.1",
|
||||||
|
"resolved": "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz",
|
||||||
|
"integrity": "sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==",
|
||||||
|
"funding": [
|
||||||
|
{
|
||||||
|
"type": "github",
|
||||||
|
"url": "https://github.com/sponsors/feross"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "patreon",
|
||||||
|
"url": "https://www.patreon.com/feross"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "consulting",
|
||||||
|
"url": "https://feross.org/support"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"dependencies": {
|
||||||
|
"base64-js": "^1.3.1",
|
||||||
|
"ieee754": "^1.1.13"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/chownr": {
|
||||||
|
"version": "1.1.4",
|
||||||
|
"resolved": "https://registry.npmjs.org/chownr/-/chownr-1.1.4.tgz",
|
||||||
|
"integrity": "sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg=="
|
||||||
|
},
|
||||||
|
"node_modules/color": {
|
||||||
|
"version": "4.2.3",
|
||||||
|
"resolved": "https://registry.npmjs.org/color/-/color-4.2.3.tgz",
|
||||||
|
"integrity": "sha512-1rXeuUUiGGrykh+CeBdu5Ie7OJwinCgQY0bc7GCRxy5xVHy+moaqkpL/jqQq0MtQOeYcrqEz4abc5f0KtU7W4A==",
|
||||||
|
"dependencies": {
|
||||||
|
"color-convert": "^2.0.1",
|
||||||
|
"color-string": "^1.9.0"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">=12.5.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/color-convert": {
|
||||||
|
"version": "2.0.1",
|
||||||
|
"resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
|
||||||
|
"integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
|
||||||
|
"dependencies": {
|
||||||
|
"color-name": "~1.1.4"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">=7.0.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/color-name": {
|
||||||
|
"version": "1.1.4",
|
||||||
|
"resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
|
||||||
|
"integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA=="
|
||||||
|
},
|
||||||
|
"node_modules/color-string": {
|
||||||
|
"version": "1.9.1",
|
||||||
|
"resolved": "https://registry.npmjs.org/color-string/-/color-string-1.9.1.tgz",
|
||||||
|
"integrity": "sha512-shrVawQFojnZv6xM40anx4CkoDP+fZsw/ZerEMsW/pyzsRbElpsL/DBVW7q3ExxwusdNXI3lXpuhEZkzs8p5Eg==",
|
||||||
|
"dependencies": {
|
||||||
|
"color-name": "^1.0.0",
|
||||||
|
"simple-swizzle": "^0.2.2"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/decompress-response": {
|
||||||
|
"version": "6.0.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-6.0.0.tgz",
|
||||||
|
"integrity": "sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ==",
|
||||||
|
"dependencies": {
|
||||||
|
"mimic-response": "^3.1.0"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">=10"
|
||||||
|
},
|
||||||
|
"funding": {
|
||||||
|
"url": "https://github.com/sponsors/sindresorhus"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/deep-extend": {
|
||||||
|
"version": "0.6.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz",
|
||||||
|
"integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==",
|
||||||
|
"engines": {
|
||||||
|
"node": ">=4.0.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/detect-libc": {
|
||||||
|
"version": "2.0.3",
|
||||||
|
"resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.0.3.tgz",
|
||||||
|
"integrity": "sha512-bwy0MGW55bG41VqxxypOsdSdGqLwXPI/focwgTYCFMbdUiBAxLg9CFzG08sz2aqzknwiX7Hkl0bQENjg8iLByw==",
|
||||||
|
"engines": {
|
||||||
|
"node": ">=8"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/end-of-stream": {
|
||||||
|
"version": "1.4.4",
|
||||||
|
"resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz",
|
||||||
|
"integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==",
|
||||||
|
"dependencies": {
|
||||||
|
"once": "^1.4.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/expand-template": {
|
||||||
|
"version": "2.0.3",
|
||||||
|
"resolved": "https://registry.npmjs.org/expand-template/-/expand-template-2.0.3.tgz",
|
||||||
|
"integrity": "sha512-XYfuKMvj4O35f/pOXLObndIRvyQ+/+6AhODh+OKWj9S9498pHHn/IMszH+gt0fBCRWMNfk1ZSp5x3AifmnI2vg==",
|
||||||
|
"engines": {
|
||||||
|
"node": ">=6"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/fast-fifo": {
|
||||||
|
"version": "1.3.2",
|
||||||
|
"resolved": "https://registry.npmjs.org/fast-fifo/-/fast-fifo-1.3.2.tgz",
|
||||||
|
"integrity": "sha512-/d9sfos4yxzpwkDkuN7k2SqFKtYNmCTzgfEpz82x34IM9/zc8KGxQoXg1liNC/izpRM/MBdt44Nmx41ZWqk+FQ=="
|
||||||
|
},
|
||||||
|
"node_modules/flatbuffers": {
|
||||||
|
"version": "1.12.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/flatbuffers/-/flatbuffers-1.12.0.tgz",
|
||||||
|
"integrity": "sha512-c7CZADjRcl6j0PlvFy0ZqXQ67qSEZfrVPynmnL+2zPc+NtMvrF8Y0QceMo7QqnSPc7+uWjUIAbvCQ5WIKlMVdQ=="
|
||||||
|
},
|
||||||
|
"node_modules/fs-constants": {
|
||||||
|
"version": "1.0.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/fs-constants/-/fs-constants-1.0.0.tgz",
|
||||||
|
"integrity": "sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow=="
|
||||||
|
},
|
||||||
|
"node_modules/github-from-package": {
|
||||||
|
"version": "0.0.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/github-from-package/-/github-from-package-0.0.0.tgz",
|
||||||
|
"integrity": "sha512-SyHy3T1v2NUXn29OsWdxmK6RwHD+vkj3v8en8AOBZ1wBQ/hCAQ5bAQTD02kW4W9tUp/3Qh6J8r9EvntiyCmOOw=="
|
||||||
|
},
|
||||||
|
"node_modules/guid-typescript": {
|
||||||
|
"version": "1.0.9",
|
||||||
|
"resolved": "https://registry.npmjs.org/guid-typescript/-/guid-typescript-1.0.9.tgz",
|
||||||
|
"integrity": "sha512-Y8T4vYhEfwJOTbouREvG+3XDsjr8E3kIr7uf+JZ0BYloFsttiHU0WfvANVsR7TxNUJa/WpCnw/Ino/p+DeBhBQ=="
|
||||||
|
},
|
||||||
|
"node_modules/ieee754": {
|
||||||
|
"version": "1.2.1",
|
||||||
|
"resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz",
|
||||||
|
"integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==",
|
||||||
|
"funding": [
|
||||||
|
{
|
||||||
|
"type": "github",
|
||||||
|
"url": "https://github.com/sponsors/feross"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "patreon",
|
||||||
|
"url": "https://www.patreon.com/feross"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "consulting",
|
||||||
|
"url": "https://feross.org/support"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"node_modules/inherits": {
|
||||||
|
"version": "2.0.4",
|
||||||
|
"resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
|
||||||
|
"integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ=="
|
||||||
|
},
|
||||||
|
"node_modules/ini": {
|
||||||
|
"version": "1.3.8",
|
||||||
|
"resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz",
|
||||||
|
"integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew=="
|
||||||
|
},
|
||||||
|
"node_modules/is-arrayish": {
|
||||||
|
"version": "0.3.2",
|
||||||
|
"resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.3.2.tgz",
|
||||||
|
"integrity": "sha512-eVRqCvVlZbuw3GrM63ovNSNAeA1K16kaR/LRY/92w0zxQ5/1YzwblUX652i4Xs9RwAGjW9d9y6X88t8OaAJfWQ=="
|
||||||
|
},
|
||||||
|
"node_modules/long": {
|
||||||
|
"version": "4.0.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/long/-/long-4.0.0.tgz",
|
||||||
|
"integrity": "sha512-XsP+KhQif4bjX1kbuSiySJFNAehNxgLb6hPRGJ9QsUr8ajHkuXGdrHmFUTUUXhDwVX2R5bY4JNZEwbUiMhV+MA=="
|
||||||
|
},
|
||||||
|
"node_modules/mimic-response": {
|
||||||
|
"version": "3.1.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-3.1.0.tgz",
|
||||||
|
"integrity": "sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ==",
|
||||||
|
"engines": {
|
||||||
|
"node": ">=10"
|
||||||
|
},
|
||||||
|
"funding": {
|
||||||
|
"url": "https://github.com/sponsors/sindresorhus"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/minimist": {
|
||||||
|
"version": "1.2.8",
|
||||||
|
"resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz",
|
||||||
|
"integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==",
|
||||||
|
"funding": {
|
||||||
|
"url": "https://github.com/sponsors/ljharb"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/mkdirp-classic": {
|
||||||
|
"version": "0.5.3",
|
||||||
|
"resolved": "https://registry.npmjs.org/mkdirp-classic/-/mkdirp-classic-0.5.3.tgz",
|
||||||
|
"integrity": "sha512-gKLcREMhtuZRwRAfqP3RFW+TK4JqApVBtOIftVgjuABpAtpxhPGaDcfvbhNvD0B8iD1oUr/txX35NjcaY6Ns/A=="
|
||||||
|
},
|
||||||
|
"node_modules/napi-build-utils": {
|
||||||
|
"version": "1.0.2",
|
||||||
|
"resolved": "https://registry.npmjs.org/napi-build-utils/-/napi-build-utils-1.0.2.tgz",
|
||||||
|
"integrity": "sha512-ONmRUqK7zj7DWX0D9ADe03wbwOBZxNAfF20PlGfCWQcD3+/MakShIHrMqx9YwPTfxDdF1zLeL+RGZiR9kGMLdg=="
|
||||||
|
},
|
||||||
|
"node_modules/node-abi": {
|
||||||
|
"version": "3.65.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/node-abi/-/node-abi-3.65.0.tgz",
|
||||||
|
"integrity": "sha512-ThjYBfoDNr08AWx6hGaRbfPwxKV9kVzAzOzlLKbk2CuqXE2xnCh+cbAGnwM3t8Lq4v9rUB7VfondlkBckcJrVA==",
|
||||||
|
"dependencies": {
|
||||||
|
"semver": "^7.3.5"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">=10"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/node-addon-api": {
|
||||||
|
"version": "6.1.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/node-addon-api/-/node-addon-api-6.1.0.tgz",
|
||||||
|
"integrity": "sha512-+eawOlIgy680F0kBzPUNFhMZGtJ1YmqM6l4+Crf4IkImjYrO/mqPwRMh352g23uIaQKFItcQ64I7KMaJxHgAVA=="
|
||||||
|
},
|
||||||
|
"node_modules/once": {
|
||||||
|
"version": "1.4.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz",
|
||||||
|
"integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==",
|
||||||
|
"dependencies": {
|
||||||
|
"wrappy": "1"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/onnx-proto": {
|
||||||
|
"version": "4.0.4",
|
||||||
|
"resolved": "https://registry.npmjs.org/onnx-proto/-/onnx-proto-4.0.4.tgz",
|
||||||
|
"integrity": "sha512-aldMOB3HRoo6q/phyB6QRQxSt895HNNw82BNyZ2CMh4bjeKv7g/c+VpAFtJuEMVfYLMbRx61hbuqnKceLeDcDA==",
|
||||||
|
"dependencies": {
|
||||||
|
"protobufjs": "^6.8.8"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/onnxruntime-common": {
|
||||||
|
"version": "1.14.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/onnxruntime-common/-/onnxruntime-common-1.14.0.tgz",
|
||||||
|
"integrity": "sha512-3LJpegM2iMNRX2wUmtYfeX/ytfOzNwAWKSq1HbRrKc9+uqG/FsEA0bbKZl1btQeZaXhC26l44NWpNUeXPII7Ew=="
|
||||||
|
},
|
||||||
|
"node_modules/onnxruntime-node": {
|
||||||
|
"version": "1.14.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/onnxruntime-node/-/onnxruntime-node-1.14.0.tgz",
|
||||||
|
"integrity": "sha512-5ba7TWomIV/9b6NH/1x/8QEeowsb+jBEvFzU6z0T4mNsFwdPqXeFUM7uxC6QeSRkEbWu3qEB0VMjrvzN/0S9+w==",
|
||||||
|
"optional": true,
|
||||||
|
"os": [
|
||||||
|
"win32",
|
||||||
|
"darwin",
|
||||||
|
"linux"
|
||||||
|
],
|
||||||
|
"dependencies": {
|
||||||
|
"onnxruntime-common": "~1.14.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/onnxruntime-web": {
|
||||||
|
"version": "1.14.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/onnxruntime-web/-/onnxruntime-web-1.14.0.tgz",
|
||||||
|
"integrity": "sha512-Kcqf43UMfW8mCydVGcX9OMXI2VN17c0p6XvR7IPSZzBf/6lteBzXHvcEVWDPmCKuGombl997HgLqj91F11DzXw==",
|
||||||
|
"dependencies": {
|
||||||
|
"flatbuffers": "^1.12.0",
|
||||||
|
"guid-typescript": "^1.0.9",
|
||||||
|
"long": "^4.0.0",
|
||||||
|
"onnx-proto": "^4.0.4",
|
||||||
|
"onnxruntime-common": "~1.14.0",
|
||||||
|
"platform": "^1.3.6"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/platform": {
|
||||||
|
"version": "1.3.6",
|
||||||
|
"resolved": "https://registry.npmjs.org/platform/-/platform-1.3.6.tgz",
|
||||||
|
"integrity": "sha512-fnWVljUchTro6RiCFvCXBbNhJc2NijN7oIQxbwsyL0buWJPG85v81ehlHI9fXrJsMNgTofEoWIQeClKpgxFLrg=="
|
||||||
|
},
|
||||||
|
"node_modules/prebuild-install": {
|
||||||
|
"version": "7.1.2",
|
||||||
|
"resolved": "https://registry.npmjs.org/prebuild-install/-/prebuild-install-7.1.2.tgz",
|
||||||
|
"integrity": "sha512-UnNke3IQb6sgarcZIDU3gbMeTp/9SSU1DAIkil7PrqG1vZlBtY5msYccSKSHDqa3hNg436IXK+SNImReuA1wEQ==",
|
||||||
|
"dependencies": {
|
||||||
|
"detect-libc": "^2.0.0",
|
||||||
|
"expand-template": "^2.0.3",
|
||||||
|
"github-from-package": "0.0.0",
|
||||||
|
"minimist": "^1.2.3",
|
||||||
|
"mkdirp-classic": "^0.5.3",
|
||||||
|
"napi-build-utils": "^1.0.1",
|
||||||
|
"node-abi": "^3.3.0",
|
||||||
|
"pump": "^3.0.0",
|
||||||
|
"rc": "^1.2.7",
|
||||||
|
"simple-get": "^4.0.0",
|
||||||
|
"tar-fs": "^2.0.0",
|
||||||
|
"tunnel-agent": "^0.6.0"
|
||||||
|
},
|
||||||
|
"bin": {
|
||||||
|
"prebuild-install": "bin.js"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">=10"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/prebuild-install/node_modules/tar-fs": {
|
||||||
|
"version": "2.1.1",
|
||||||
|
"resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-2.1.1.tgz",
|
||||||
|
"integrity": "sha512-V0r2Y9scmbDRLCNex/+hYzvp/zyYjvFbHPNgVTKfQvVrb6guiE/fxP+XblDNR011utopbkex2nM4dHNV6GDsng==",
|
||||||
|
"dependencies": {
|
||||||
|
"chownr": "^1.1.1",
|
||||||
|
"mkdirp-classic": "^0.5.2",
|
||||||
|
"pump": "^3.0.0",
|
||||||
|
"tar-stream": "^2.1.4"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/prebuild-install/node_modules/tar-stream": {
|
||||||
|
"version": "2.2.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/tar-stream/-/tar-stream-2.2.0.tgz",
|
||||||
|
"integrity": "sha512-ujeqbceABgwMZxEJnk2HDY2DlnUZ+9oEcb1KzTVfYHio0UE6dG71n60d8D2I4qNvleWrrXpmjpt7vZeF1LnMZQ==",
|
||||||
|
"dependencies": {
|
||||||
|
"bl": "^4.0.3",
|
||||||
|
"end-of-stream": "^1.4.1",
|
||||||
|
"fs-constants": "^1.0.0",
|
||||||
|
"inherits": "^2.0.3",
|
||||||
|
"readable-stream": "^3.1.1"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">=6"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/protobufjs": {
|
||||||
|
"version": "6.11.4",
|
||||||
|
"resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-6.11.4.tgz",
|
||||||
|
"integrity": "sha512-5kQWPaJHi1WoCpjTGszzQ32PG2F4+wRY6BmAT4Vfw56Q2FZ4YZzK20xUYQH4YkfehY1e6QSICrJquM6xXZNcrw==",
|
||||||
|
"hasInstallScript": true,
|
||||||
|
"dependencies": {
|
||||||
|
"@protobufjs/aspromise": "^1.1.2",
|
||||||
|
"@protobufjs/base64": "^1.1.2",
|
||||||
|
"@protobufjs/codegen": "^2.0.4",
|
||||||
|
"@protobufjs/eventemitter": "^1.1.0",
|
||||||
|
"@protobufjs/fetch": "^1.1.0",
|
||||||
|
"@protobufjs/float": "^1.0.2",
|
||||||
|
"@protobufjs/inquire": "^1.1.0",
|
||||||
|
"@protobufjs/path": "^1.1.2",
|
||||||
|
"@protobufjs/pool": "^1.1.0",
|
||||||
|
"@protobufjs/utf8": "^1.1.0",
|
||||||
|
"@types/long": "^4.0.1",
|
||||||
|
"@types/node": ">=13.7.0",
|
||||||
|
"long": "^4.0.0"
|
||||||
|
},
|
||||||
|
"bin": {
|
||||||
|
"pbjs": "bin/pbjs",
|
||||||
|
"pbts": "bin/pbts"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/pump": {
|
||||||
|
"version": "3.0.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz",
|
||||||
|
"integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==",
|
||||||
|
"dependencies": {
|
||||||
|
"end-of-stream": "^1.1.0",
|
||||||
|
"once": "^1.3.1"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/queue-tick": {
|
||||||
|
"version": "1.0.1",
|
||||||
|
"resolved": "https://registry.npmjs.org/queue-tick/-/queue-tick-1.0.1.tgz",
|
||||||
|
"integrity": "sha512-kJt5qhMxoszgU/62PLP1CJytzd2NKetjSRnyuj31fDd3Rlcz3fzlFdFLD1SItunPwyqEOkca6GbV612BWfaBag=="
|
||||||
|
},
|
||||||
|
"node_modules/rc": {
|
||||||
|
"version": "1.2.8",
|
||||||
|
"resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz",
|
||||||
|
"integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==",
|
||||||
|
"dependencies": {
|
||||||
|
"deep-extend": "^0.6.0",
|
||||||
|
"ini": "~1.3.0",
|
||||||
|
"minimist": "^1.2.0",
|
||||||
|
"strip-json-comments": "~2.0.1"
|
||||||
|
},
|
||||||
|
"bin": {
|
||||||
|
"rc": "cli.js"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/readable-stream": {
|
||||||
|
"version": "3.6.2",
|
||||||
|
"resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz",
|
||||||
|
"integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==",
|
||||||
|
"dependencies": {
|
||||||
|
"inherits": "^2.0.3",
|
||||||
|
"string_decoder": "^1.1.1",
|
||||||
|
"util-deprecate": "^1.0.1"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">= 6"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/safe-buffer": {
|
||||||
|
"version": "5.2.1",
|
||||||
|
"resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz",
|
||||||
|
"integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==",
|
||||||
|
"funding": [
|
||||||
|
{
|
||||||
|
"type": "github",
|
||||||
|
"url": "https://github.com/sponsors/feross"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "patreon",
|
||||||
|
"url": "https://www.patreon.com/feross"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "consulting",
|
||||||
|
"url": "https://feross.org/support"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"node_modules/semver": {
|
||||||
|
"version": "7.6.3",
|
||||||
|
"resolved": "https://registry.npmjs.org/semver/-/semver-7.6.3.tgz",
|
||||||
|
"integrity": "sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A==",
|
||||||
|
"bin": {
|
||||||
|
"semver": "bin/semver.js"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">=10"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/sharp": {
|
||||||
|
"version": "0.32.6",
|
||||||
|
"resolved": "https://registry.npmjs.org/sharp/-/sharp-0.32.6.tgz",
|
||||||
|
"integrity": "sha512-KyLTWwgcR9Oe4d9HwCwNM2l7+J0dUQwn/yf7S0EnTtb0eVS4RxO0eUSvxPtzT4F3SY+C4K6fqdv/DO27sJ/v/w==",
|
||||||
|
"hasInstallScript": true,
|
||||||
|
"dependencies": {
|
||||||
|
"color": "^4.2.3",
|
||||||
|
"detect-libc": "^2.0.2",
|
||||||
|
"node-addon-api": "^6.1.0",
|
||||||
|
"prebuild-install": "^7.1.1",
|
||||||
|
"semver": "^7.5.4",
|
||||||
|
"simple-get": "^4.0.1",
|
||||||
|
"tar-fs": "^3.0.4",
|
||||||
|
"tunnel-agent": "^0.6.0"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">=14.15.0"
|
||||||
|
},
|
||||||
|
"funding": {
|
||||||
|
"url": "https://opencollective.com/libvips"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/simple-concat": {
|
||||||
|
"version": "1.0.1",
|
||||||
|
"resolved": "https://registry.npmjs.org/simple-concat/-/simple-concat-1.0.1.tgz",
|
||||||
|
"integrity": "sha512-cSFtAPtRhljv69IK0hTVZQ+OfE9nePi/rtJmw5UjHeVyVroEqJXP1sFztKUy1qU+xvz3u/sfYJLa947b7nAN2Q==",
|
||||||
|
"funding": [
|
||||||
|
{
|
||||||
|
"type": "github",
|
||||||
|
"url": "https://github.com/sponsors/feross"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "patreon",
|
||||||
|
"url": "https://www.patreon.com/feross"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "consulting",
|
||||||
|
"url": "https://feross.org/support"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"node_modules/simple-get": {
|
||||||
|
"version": "4.0.1",
|
||||||
|
"resolved": "https://registry.npmjs.org/simple-get/-/simple-get-4.0.1.tgz",
|
||||||
|
"integrity": "sha512-brv7p5WgH0jmQJr1ZDDfKDOSeWWg+OVypG99A/5vYGPqJ6pxiaHLy8nxtFjBA7oMa01ebA9gfh1uMCFqOuXxvA==",
|
||||||
|
"funding": [
|
||||||
|
{
|
||||||
|
"type": "github",
|
||||||
|
"url": "https://github.com/sponsors/feross"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "patreon",
|
||||||
|
"url": "https://www.patreon.com/feross"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "consulting",
|
||||||
|
"url": "https://feross.org/support"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"dependencies": {
|
||||||
|
"decompress-response": "^6.0.0",
|
||||||
|
"once": "^1.3.1",
|
||||||
|
"simple-concat": "^1.0.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/simple-swizzle": {
|
||||||
|
"version": "0.2.2",
|
||||||
|
"resolved": "https://registry.npmjs.org/simple-swizzle/-/simple-swizzle-0.2.2.tgz",
|
||||||
|
"integrity": "sha512-JA//kQgZtbuY83m+xT+tXJkmJncGMTFT+C+g2h2R9uxkYIrE2yy9sgmcLhCnw57/WSD+Eh3J97FPEDFnbXnDUg==",
|
||||||
|
"dependencies": {
|
||||||
|
"is-arrayish": "^0.3.1"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/streamx": {
|
||||||
|
"version": "2.18.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/streamx/-/streamx-2.18.0.tgz",
|
||||||
|
"integrity": "sha512-LLUC1TWdjVdn1weXGcSxyTR3T4+acB6tVGXT95y0nGbca4t4o/ng1wKAGTljm9VicuCVLvRlqFYXYy5GwgM7sQ==",
|
||||||
|
"dependencies": {
|
||||||
|
"fast-fifo": "^1.3.2",
|
||||||
|
"queue-tick": "^1.0.1",
|
||||||
|
"text-decoder": "^1.1.0"
|
||||||
|
},
|
||||||
|
"optionalDependencies": {
|
||||||
|
"bare-events": "^2.2.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/string_decoder": {
|
||||||
|
"version": "1.3.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz",
|
||||||
|
"integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==",
|
||||||
|
"dependencies": {
|
||||||
|
"safe-buffer": "~5.2.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/strip-json-comments": {
|
||||||
|
"version": "2.0.1",
|
||||||
|
"resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz",
|
||||||
|
"integrity": "sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ==",
|
||||||
|
"engines": {
|
||||||
|
"node": ">=0.10.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/tar-fs": {
|
||||||
|
"version": "3.0.6",
|
||||||
|
"resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-3.0.6.tgz",
|
||||||
|
"integrity": "sha512-iokBDQQkUyeXhgPYaZxmczGPhnhXZ0CmrqI+MOb/WFGS9DW5wnfrLgtjUJBvz50vQ3qfRwJ62QVoCFu8mPVu5w==",
|
||||||
|
"dependencies": {
|
||||||
|
"pump": "^3.0.0",
|
||||||
|
"tar-stream": "^3.1.5"
|
||||||
|
},
|
||||||
|
"optionalDependencies": {
|
||||||
|
"bare-fs": "^2.1.1",
|
||||||
|
"bare-path": "^2.1.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/tar-stream": {
|
||||||
|
"version": "3.1.7",
|
||||||
|
"resolved": "https://registry.npmjs.org/tar-stream/-/tar-stream-3.1.7.tgz",
|
||||||
|
"integrity": "sha512-qJj60CXt7IU1Ffyc3NJMjh6EkuCFej46zUqJ4J7pqYlThyd9bO0XBTmcOIhSzZJVWfsLks0+nle/j538YAW9RQ==",
|
||||||
|
"dependencies": {
|
||||||
|
"b4a": "^1.6.4",
|
||||||
|
"fast-fifo": "^1.2.0",
|
||||||
|
"streamx": "^2.15.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/text-decoder": {
|
||||||
|
"version": "1.1.1",
|
||||||
|
"resolved": "https://registry.npmjs.org/text-decoder/-/text-decoder-1.1.1.tgz",
|
||||||
|
"integrity": "sha512-8zll7REEv4GDD3x4/0pW+ppIxSNs7H1J10IKFZsuOMscumCdM2a+toDGLPA3T+1+fLBql4zbt5z83GEQGGV5VA==",
|
||||||
|
"dependencies": {
|
||||||
|
"b4a": "^1.6.4"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/tsc": {
|
||||||
|
"version": "2.0.4",
|
||||||
|
"resolved": "https://registry.npmjs.org/tsc/-/tsc-2.0.4.tgz",
|
||||||
|
"integrity": "sha512-fzoSieZI5KKJVBYGvwbVZs/J5za84f2lSTLPYf6AGiIf43tZ3GNrI1QzTLcjtyDDP4aLxd46RTZq1nQxe7+k5Q==",
|
||||||
|
"license": "MIT",
|
||||||
|
"bin": {
|
||||||
|
"tsc": "bin/tsc"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/tunnel-agent": {
|
||||||
|
"version": "0.6.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz",
|
||||||
|
"integrity": "sha512-McnNiV1l8RYeY8tBgEpuodCC1mLUdbSN+CYBL7kJsJNInOP8UjDDEwdk6Mw60vdLLrr5NHKZhMAOSrR2NZuQ+w==",
|
||||||
|
"dependencies": {
|
||||||
|
"safe-buffer": "^5.0.1"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": "*"
|
||||||
|
}
|
||||||
|
},
|
||||||
"node_modules/typescript": {
|
"node_modules/typescript": {
|
||||||
"version": "5.5.2",
|
"version": "5.5.4",
|
||||||
"resolved": "https://registry.npmjs.org/typescript/-/typescript-5.5.2.tgz",
|
"resolved": "https://registry.npmjs.org/typescript/-/typescript-5.5.4.tgz",
|
||||||
"integrity": "sha512-NcRtPEOsPFFWjobJEtfihkLCZCXZt/os3zf8nTxjVH3RvTSxjrCamJpbExGvYOF+tFHc3pA65qpdwPbzjohhew==",
|
"integrity": "sha512-Mtq29sKDAEYP7aljRgtPOpTvOfbwRWlS6dPRzwjdE+C0R4brX/GUyhHSecbHMFLNBLcJIPt9nl9yG5TZ1weH+Q==",
|
||||||
"peer": true,
|
"dev": true,
|
||||||
|
"license": "Apache-2.0",
|
||||||
"bin": {
|
"bin": {
|
||||||
"tsc": "bin/tsc",
|
"tsc": "bin/tsc",
|
||||||
"tsserver": "bin/tsserver"
|
"tsserver": "bin/tsserver"
|
||||||
@@ -74,6 +826,21 @@
|
|||||||
"engines": {
|
"engines": {
|
||||||
"node": ">=14.17"
|
"node": ">=14.17"
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
"node_modules/undici-types": {
|
||||||
|
"version": "5.26.5",
|
||||||
|
"resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz",
|
||||||
|
"integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA=="
|
||||||
|
},
|
||||||
|
"node_modules/util-deprecate": {
|
||||||
|
"version": "1.0.2",
|
||||||
|
"resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
|
||||||
|
"integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw=="
|
||||||
|
},
|
||||||
|
"node_modules/wrappy": {
|
||||||
|
"version": "1.0.2",
|
||||||
|
"resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
|
||||||
|
"integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ=="
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -10,9 +10,19 @@
|
|||||||
"author": "Lance Devs",
|
"author": "Lance Devs",
|
||||||
"license": "Apache-2.0",
|
"license": "Apache-2.0",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@lancedb/lancedb": "file:../"
|
"@lancedb/lancedb": "file:../",
|
||||||
|
"@xenova/transformers": "^2.17.2"
|
||||||
},
|
},
|
||||||
"peerDependencies": {
|
"devDependencies": {
|
||||||
"typescript": "^5.0.0"
|
"typescript": "^5.5.4"
|
||||||
|
},
|
||||||
|
"compilerOptions": {
|
||||||
|
"target": "ESNext",
|
||||||
|
"module": "ESNext",
|
||||||
|
"moduleResolution": "Node",
|
||||||
|
"strict": true,
|
||||||
|
"esModuleInterop": true,
|
||||||
|
"skipLibCheck": true,
|
||||||
|
"forceConsistentCasingInFileNames": true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -32,6 +32,7 @@ const _results2 = await tbl
|
|||||||
.distanceType("cosine")
|
.distanceType("cosine")
|
||||||
.limit(10)
|
.limit(10)
|
||||||
.toArray();
|
.toArray();
|
||||||
|
console.log(_results2);
|
||||||
// --8<-- [end:search2]
|
// --8<-- [end:search2]
|
||||||
|
|
||||||
console.log("search: done");
|
console.log("search: done");
|
||||||
|
|||||||
50
nodejs/examples/sentence-transformers.js
Normal file
50
nodejs/examples/sentence-transformers.js
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
import * as lancedb from "@lancedb/lancedb";
|
||||||
|
|
||||||
|
import { LanceSchema, getRegistry } from "@lancedb/lancedb/embedding";
|
||||||
|
import { Utf8 } from "apache-arrow";
|
||||||
|
|
||||||
|
const db = await lancedb.connect("/tmp/db");
|
||||||
|
const func = await getRegistry().get("huggingface").create();
|
||||||
|
|
||||||
|
const facts = [
|
||||||
|
"Albert Einstein was a theoretical physicist.",
|
||||||
|
"The capital of France is Paris.",
|
||||||
|
"The Great Wall of China is one of the Seven Wonders of the World.",
|
||||||
|
"Python is a popular programming language.",
|
||||||
|
"Mount Everest is the highest mountain in the world.",
|
||||||
|
"Leonardo da Vinci painted the Mona Lisa.",
|
||||||
|
"Shakespeare wrote Hamlet.",
|
||||||
|
"The human body has 206 bones.",
|
||||||
|
"The speed of light is approximately 299,792 kilometers per second.",
|
||||||
|
"Water boils at 100 degrees Celsius.",
|
||||||
|
"The Earth orbits the Sun.",
|
||||||
|
"The Pyramids of Giza are located in Egypt.",
|
||||||
|
"Coffee is one of the most popular beverages in the world.",
|
||||||
|
"Tokyo is the capital city of Japan.",
|
||||||
|
"Photosynthesis is the process by which plants make their food.",
|
||||||
|
"The Pacific Ocean is the largest ocean on Earth.",
|
||||||
|
"Mozart was a prolific composer of classical music.",
|
||||||
|
"The Internet is a global network of computers.",
|
||||||
|
"Basketball is a sport played with a ball and a hoop.",
|
||||||
|
"The first computer virus was created in 1983.",
|
||||||
|
"Artificial neural networks are inspired by the human brain.",
|
||||||
|
"Deep learning is a subset of machine learning.",
|
||||||
|
"IBM's Watson won Jeopardy! in 2011.",
|
||||||
|
"The first computer programmer was Ada Lovelace.",
|
||||||
|
"The first chatbot was ELIZA, created in the 1960s.",
|
||||||
|
].map((text) => ({ text }));
|
||||||
|
|
||||||
|
const factsSchema = LanceSchema({
|
||||||
|
text: func.sourceField(new Utf8()),
|
||||||
|
vector: func.vectorField(),
|
||||||
|
});
|
||||||
|
|
||||||
|
const tbl = await db.createTable("facts", facts, {
|
||||||
|
mode: "overwrite",
|
||||||
|
schema: factsSchema,
|
||||||
|
});
|
||||||
|
|
||||||
|
const query = "How many bones are in the human body?";
|
||||||
|
const actual = await tbl.search(query).limit(1).toArray();
|
||||||
|
|
||||||
|
console.log("Answer: ", actual[0]["text"]);
|
||||||
@@ -103,50 +103,11 @@ export type IntoVector =
|
|||||||
| number[]
|
| number[]
|
||||||
| Promise<Float32Array | Float64Array | number[]>;
|
| Promise<Float32Array | Float64Array | number[]>;
|
||||||
|
|
||||||
export type FloatLike =
|
|
||||||
| import("apache-arrow-13").Float
|
|
||||||
| import("apache-arrow-14").Float
|
|
||||||
| import("apache-arrow-15").Float
|
|
||||||
| import("apache-arrow-16").Float
|
|
||||||
| import("apache-arrow-17").Float;
|
|
||||||
export type DataTypeLike =
|
|
||||||
| import("apache-arrow-13").DataType
|
|
||||||
| import("apache-arrow-14").DataType
|
|
||||||
| import("apache-arrow-15").DataType
|
|
||||||
| import("apache-arrow-16").DataType
|
|
||||||
| import("apache-arrow-17").DataType;
|
|
||||||
|
|
||||||
export function isArrowTable(value: object): value is TableLike {
|
export function isArrowTable(value: object): value is TableLike {
|
||||||
if (value instanceof ArrowTable) return true;
|
if (value instanceof ArrowTable) return true;
|
||||||
return "schema" in value && "batches" in value;
|
return "schema" in value && "batches" in value;
|
||||||
}
|
}
|
||||||
|
|
||||||
export function isDataType(value: unknown): value is DataTypeLike {
|
|
||||||
return (
|
|
||||||
value instanceof DataType ||
|
|
||||||
DataType.isNull(value) ||
|
|
||||||
DataType.isInt(value) ||
|
|
||||||
DataType.isFloat(value) ||
|
|
||||||
DataType.isBinary(value) ||
|
|
||||||
DataType.isLargeBinary(value) ||
|
|
||||||
DataType.isUtf8(value) ||
|
|
||||||
DataType.isLargeUtf8(value) ||
|
|
||||||
DataType.isBool(value) ||
|
|
||||||
DataType.isDecimal(value) ||
|
|
||||||
DataType.isDate(value) ||
|
|
||||||
DataType.isTime(value) ||
|
|
||||||
DataType.isTimestamp(value) ||
|
|
||||||
DataType.isInterval(value) ||
|
|
||||||
DataType.isDuration(value) ||
|
|
||||||
DataType.isList(value) ||
|
|
||||||
DataType.isStruct(value) ||
|
|
||||||
DataType.isUnion(value) ||
|
|
||||||
DataType.isFixedSizeBinary(value) ||
|
|
||||||
DataType.isFixedSizeList(value) ||
|
|
||||||
DataType.isMap(value) ||
|
|
||||||
DataType.isDictionary(value)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
export function isNull(value: unknown): value is Null {
|
export function isNull(value: unknown): value is Null {
|
||||||
return value instanceof Null || DataType.isNull(value);
|
return value instanceof Null || DataType.isNull(value);
|
||||||
}
|
}
|
||||||
@@ -578,7 +539,7 @@ async function applyEmbeddingsFromMetadata(
|
|||||||
schema: Schema,
|
schema: Schema,
|
||||||
): Promise<ArrowTable> {
|
): Promise<ArrowTable> {
|
||||||
const registry = getRegistry();
|
const registry = getRegistry();
|
||||||
const functions = registry.parseFunctions(schema.metadata);
|
const functions = await registry.parseFunctions(schema.metadata);
|
||||||
|
|
||||||
const columns = Object.fromEntries(
|
const columns = Object.fromEntries(
|
||||||
table.schema.fields.map((field) => [
|
table.schema.fields.map((field) => [
|
||||||
|
|||||||
@@ -44,10 +44,29 @@ export interface CreateTableOptions {
|
|||||||
* The available options are described at https://lancedb.github.io/lancedb/guides/storage/
|
* The available options are described at https://lancedb.github.io/lancedb/guides/storage/
|
||||||
*/
|
*/
|
||||||
storageOptions?: Record<string, string>;
|
storageOptions?: Record<string, string>;
|
||||||
|
/**
|
||||||
|
* The version of the data storage format to use.
|
||||||
|
*
|
||||||
|
* The default is `legacy`, which is Lance format v1.
|
||||||
|
* `stable` is the new format, which is Lance format v2.
|
||||||
|
*/
|
||||||
|
dataStorageVersion?: string;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Use the new V2 manifest paths. These paths provide more efficient
|
||||||
|
* opening of datasets with many versions on object stores. WARNING:
|
||||||
|
* turning this on will make the dataset unreadable for older versions
|
||||||
|
* of LanceDB (prior to 0.10.0). To migrate an existing dataset, instead
|
||||||
|
* use the {@link LocalTable#migrateManifestPathsV2} method.
|
||||||
|
*/
|
||||||
|
enableV2ManifestPaths?: boolean;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* If true then data files will be written with the legacy format
|
* If true then data files will be written with the legacy format
|
||||||
*
|
*
|
||||||
* The default is true while the new format is in beta
|
* The default is true while the new format is in beta
|
||||||
|
*
|
||||||
|
* Deprecated.
|
||||||
*/
|
*/
|
||||||
useLegacyFormat?: boolean;
|
useLegacyFormat?: boolean;
|
||||||
schema?: SchemaLike;
|
schema?: SchemaLike;
|
||||||
@@ -240,18 +259,27 @@ export class LocalConnection extends Connection {
|
|||||||
): Promise<Table> {
|
): Promise<Table> {
|
||||||
if (typeof nameOrOptions !== "string" && "name" in nameOrOptions) {
|
if (typeof nameOrOptions !== "string" && "name" in nameOrOptions) {
|
||||||
const { name, data, ...options } = nameOrOptions;
|
const { name, data, ...options } = nameOrOptions;
|
||||||
|
|
||||||
return this.createTable(name, data, options);
|
return this.createTable(name, data, options);
|
||||||
}
|
}
|
||||||
if (data === undefined) {
|
if (data === undefined) {
|
||||||
throw new Error("data is required");
|
throw new Error("data is required");
|
||||||
}
|
}
|
||||||
const { buf, mode } = await Table.parseTableData(data, options);
|
const { buf, mode } = await Table.parseTableData(data, options);
|
||||||
|
let dataStorageVersion = "legacy";
|
||||||
|
if (options?.dataStorageVersion !== undefined) {
|
||||||
|
dataStorageVersion = options.dataStorageVersion;
|
||||||
|
} else if (options?.useLegacyFormat !== undefined) {
|
||||||
|
dataStorageVersion = options.useLegacyFormat ? "legacy" : "stable";
|
||||||
|
}
|
||||||
|
|
||||||
const innerTable = await this.inner.createTable(
|
const innerTable = await this.inner.createTable(
|
||||||
nameOrOptions,
|
nameOrOptions,
|
||||||
buf,
|
buf,
|
||||||
mode,
|
mode,
|
||||||
cleanseStorageOptions(options?.storageOptions),
|
cleanseStorageOptions(options?.storageOptions),
|
||||||
options?.useLegacyFormat,
|
dataStorageVersion,
|
||||||
|
options?.enableV2ManifestPaths,
|
||||||
);
|
);
|
||||||
|
|
||||||
return new LocalTable(innerTable);
|
return new LocalTable(innerTable);
|
||||||
@@ -275,6 +303,13 @@ export class LocalConnection extends Connection {
|
|||||||
metadata = registry.getTableMetadata([embeddingFunction]);
|
metadata = registry.getTableMetadata([embeddingFunction]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let dataStorageVersion = "legacy";
|
||||||
|
if (options?.dataStorageVersion !== undefined) {
|
||||||
|
dataStorageVersion = options.dataStorageVersion;
|
||||||
|
} else if (options?.useLegacyFormat !== undefined) {
|
||||||
|
dataStorageVersion = options.useLegacyFormat ? "legacy" : "stable";
|
||||||
|
}
|
||||||
|
|
||||||
const table = makeEmptyTable(schema, metadata);
|
const table = makeEmptyTable(schema, metadata);
|
||||||
const buf = await fromTableToBuffer(table);
|
const buf = await fromTableToBuffer(table);
|
||||||
const innerTable = await this.inner.createEmptyTable(
|
const innerTable = await this.inner.createEmptyTable(
|
||||||
@@ -282,7 +317,8 @@ export class LocalConnection extends Connection {
|
|||||||
buf,
|
buf,
|
||||||
mode,
|
mode,
|
||||||
cleanseStorageOptions(options?.storageOptions),
|
cleanseStorageOptions(options?.storageOptions),
|
||||||
options?.useLegacyFormat,
|
dataStorageVersion,
|
||||||
|
options?.enableV2ManifestPaths,
|
||||||
);
|
);
|
||||||
return new LocalTable(innerTable);
|
return new LocalTable(innerTable);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -15,13 +15,12 @@
|
|||||||
import "reflect-metadata";
|
import "reflect-metadata";
|
||||||
import {
|
import {
|
||||||
DataType,
|
DataType,
|
||||||
DataTypeLike,
|
|
||||||
Field,
|
Field,
|
||||||
FixedSizeList,
|
FixedSizeList,
|
||||||
|
Float,
|
||||||
Float32,
|
Float32,
|
||||||
FloatLike,
|
|
||||||
type IntoVector,
|
type IntoVector,
|
||||||
isDataType,
|
Utf8,
|
||||||
isFixedSizeList,
|
isFixedSizeList,
|
||||||
isFloat,
|
isFloat,
|
||||||
newVectorType,
|
newVectorType,
|
||||||
@@ -41,6 +40,7 @@ export interface EmbeddingFunctionConstructor<
|
|||||||
> {
|
> {
|
||||||
new (modelOptions?: T["TOptions"]): T;
|
new (modelOptions?: T["TOptions"]): T;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* An embedding function that automatically creates vector representation for a given column.
|
* An embedding function that automatically creates vector representation for a given column.
|
||||||
*/
|
*/
|
||||||
@@ -82,6 +82,8 @@ export abstract class EmbeddingFunction<
|
|||||||
*/
|
*/
|
||||||
abstract toJSON(): Partial<M>;
|
abstract toJSON(): Partial<M>;
|
||||||
|
|
||||||
|
async init?(): Promise<void>;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* sourceField is used in combination with `LanceSchema` to provide a declarative data model
|
* sourceField is used in combination with `LanceSchema` to provide a declarative data model
|
||||||
*
|
*
|
||||||
@@ -90,11 +92,12 @@ export abstract class EmbeddingFunction<
|
|||||||
* @see {@link lancedb.LanceSchema}
|
* @see {@link lancedb.LanceSchema}
|
||||||
*/
|
*/
|
||||||
sourceField(
|
sourceField(
|
||||||
optionsOrDatatype: Partial<FieldOptions> | DataTypeLike,
|
optionsOrDatatype: Partial<FieldOptions> | DataType,
|
||||||
): [DataTypeLike, Map<string, EmbeddingFunction>] {
|
): [DataType, Map<string, EmbeddingFunction>] {
|
||||||
let datatype = isDataType(optionsOrDatatype)
|
let datatype =
|
||||||
? optionsOrDatatype
|
"datatype" in optionsOrDatatype
|
||||||
: optionsOrDatatype?.datatype;
|
? optionsOrDatatype.datatype
|
||||||
|
: optionsOrDatatype;
|
||||||
if (!datatype) {
|
if (!datatype) {
|
||||||
throw new Error("Datatype is required");
|
throw new Error("Datatype is required");
|
||||||
}
|
}
|
||||||
@@ -120,15 +123,17 @@ export abstract class EmbeddingFunction<
|
|||||||
let dims: number | undefined = this.ndims();
|
let dims: number | undefined = this.ndims();
|
||||||
|
|
||||||
// `func.vectorField(new Float32())`
|
// `func.vectorField(new Float32())`
|
||||||
if (isDataType(optionsOrDatatype)) {
|
if (optionsOrDatatype === undefined) {
|
||||||
dtype = optionsOrDatatype;
|
dtype = new Float32();
|
||||||
|
} else if (!("datatype" in optionsOrDatatype)) {
|
||||||
|
dtype = sanitizeType(optionsOrDatatype);
|
||||||
} else {
|
} else {
|
||||||
// `func.vectorField({
|
// `func.vectorField({
|
||||||
// datatype: new Float32(),
|
// datatype: new Float32(),
|
||||||
// dims: 10
|
// dims: 10
|
||||||
// })`
|
// })`
|
||||||
dims = dims ?? optionsOrDatatype?.dims;
|
dims = dims ?? optionsOrDatatype?.dims;
|
||||||
dtype = optionsOrDatatype?.datatype;
|
dtype = sanitizeType(optionsOrDatatype?.datatype);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (dtype !== undefined) {
|
if (dtype !== undefined) {
|
||||||
@@ -170,7 +175,7 @@ export abstract class EmbeddingFunction<
|
|||||||
}
|
}
|
||||||
|
|
||||||
/** The datatype of the embeddings */
|
/** The datatype of the embeddings */
|
||||||
abstract embeddingDataType(): FloatLike;
|
abstract embeddingDataType(): Float;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a vector representation for the given values.
|
* Creates a vector representation for the given values.
|
||||||
@@ -189,6 +194,38 @@ export abstract class EmbeddingFunction<
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* an abstract class for implementing embedding functions that take text as input
|
||||||
|
*/
|
||||||
|
export abstract class TextEmbeddingFunction<
|
||||||
|
M extends FunctionOptions = FunctionOptions,
|
||||||
|
> extends EmbeddingFunction<string, M> {
|
||||||
|
//** Generate the embeddings for the given texts */
|
||||||
|
abstract generateEmbeddings(
|
||||||
|
texts: string[],
|
||||||
|
// biome-ignore lint/suspicious/noExplicitAny: we don't know what the implementor will do
|
||||||
|
...args: any[]
|
||||||
|
): Promise<number[][] | Float32Array[] | Float64Array[]>;
|
||||||
|
|
||||||
|
async computeQueryEmbeddings(data: string): Promise<Awaited<IntoVector>> {
|
||||||
|
return this.generateEmbeddings([data]).then((data) => data[0]);
|
||||||
|
}
|
||||||
|
|
||||||
|
embeddingDataType(): Float {
|
||||||
|
return new Float32();
|
||||||
|
}
|
||||||
|
|
||||||
|
override sourceField(): [DataType, Map<string, EmbeddingFunction>] {
|
||||||
|
return super.sourceField(new Utf8());
|
||||||
|
}
|
||||||
|
|
||||||
|
computeSourceEmbeddings(
|
||||||
|
data: string[],
|
||||||
|
): Promise<number[][] | Float32Array[] | Float64Array[]> {
|
||||||
|
return this.generateEmbeddings(data);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
export interface FieldOptions<T extends DataType = DataType> {
|
export interface FieldOptions<T extends DataType = DataType> {
|
||||||
datatype: T;
|
datatype: T;
|
||||||
dims?: number;
|
dims?: number;
|
||||||
|
|||||||
@@ -12,16 +12,16 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
import { DataType, Field, Schema } from "../arrow";
|
import { Field, Schema } from "../arrow";
|
||||||
import { isDataType } from "../arrow";
|
|
||||||
import { sanitizeType } from "../sanitize";
|
import { sanitizeType } from "../sanitize";
|
||||||
import { EmbeddingFunction } from "./embedding_function";
|
import { EmbeddingFunction } from "./embedding_function";
|
||||||
import { EmbeddingFunctionConfig, getRegistry } from "./registry";
|
import { EmbeddingFunctionConfig, getRegistry } from "./registry";
|
||||||
|
|
||||||
export { EmbeddingFunction } from "./embedding_function";
|
export { EmbeddingFunction, TextEmbeddingFunction } from "./embedding_function";
|
||||||
|
|
||||||
// We need to explicitly export '*' so that the `register` decorator actually registers the class.
|
// We need to explicitly export '*' so that the `register` decorator actually registers the class.
|
||||||
export * from "./openai";
|
export * from "./openai";
|
||||||
|
export * from "./transformers";
|
||||||
export * from "./registry";
|
export * from "./registry";
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -56,15 +56,15 @@ export function LanceSchema(
|
|||||||
Partial<EmbeddingFunctionConfig>
|
Partial<EmbeddingFunctionConfig>
|
||||||
>();
|
>();
|
||||||
Object.entries(fields).forEach(([key, value]) => {
|
Object.entries(fields).forEach(([key, value]) => {
|
||||||
if (isDataType(value)) {
|
if (Array.isArray(value)) {
|
||||||
arrowFields.push(new Field(key, sanitizeType(value), true));
|
|
||||||
} else {
|
|
||||||
const [dtype, metadata] = value as [
|
const [dtype, metadata] = value as [
|
||||||
object,
|
object,
|
||||||
Map<string, EmbeddingFunction>,
|
Map<string, EmbeddingFunction>,
|
||||||
];
|
];
|
||||||
arrowFields.push(new Field(key, sanitizeType(dtype), true));
|
arrowFields.push(new Field(key, sanitizeType(dtype), true));
|
||||||
parseEmbeddingFunctions(embeddingFunctions, key, metadata);
|
parseEmbeddingFunctions(embeddingFunctions, key, metadata);
|
||||||
|
} else {
|
||||||
|
arrowFields.push(new Field(key, sanitizeType(value), true));
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
const registry = getRegistry();
|
const registry = getRegistry();
|
||||||
|
|||||||
@@ -13,7 +13,7 @@
|
|||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
import type OpenAI from "openai";
|
import type OpenAI from "openai";
|
||||||
import { type EmbeddingCreateParams } from "openai/resources";
|
import type { EmbeddingCreateParams } from "openai/resources/index";
|
||||||
import { Float, Float32 } from "../arrow";
|
import { Float, Float32 } from "../arrow";
|
||||||
import { EmbeddingFunction } from "./embedding_function";
|
import { EmbeddingFunction } from "./embedding_function";
|
||||||
import { register } from "./registry";
|
import { register } from "./registry";
|
||||||
|
|||||||
@@ -18,9 +18,14 @@ import {
|
|||||||
} from "./embedding_function";
|
} from "./embedding_function";
|
||||||
import "reflect-metadata";
|
import "reflect-metadata";
|
||||||
import { OpenAIEmbeddingFunction } from "./openai";
|
import { OpenAIEmbeddingFunction } from "./openai";
|
||||||
|
import { TransformersEmbeddingFunction } from "./transformers";
|
||||||
|
|
||||||
|
type CreateReturnType<T> = T extends { init: () => Promise<void> }
|
||||||
|
? Promise<T>
|
||||||
|
: T;
|
||||||
|
|
||||||
interface EmbeddingFunctionCreate<T extends EmbeddingFunction> {
|
interface EmbeddingFunctionCreate<T extends EmbeddingFunction> {
|
||||||
create(options?: T["TOptions"]): T;
|
create(options?: T["TOptions"]): CreateReturnType<T>;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -32,6 +37,13 @@ interface EmbeddingFunctionCreate<T extends EmbeddingFunction> {
|
|||||||
export class EmbeddingFunctionRegistry {
|
export class EmbeddingFunctionRegistry {
|
||||||
#functions = new Map<string, EmbeddingFunctionConstructor>();
|
#functions = new Map<string, EmbeddingFunctionConstructor>();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the number of registered functions
|
||||||
|
*/
|
||||||
|
length() {
|
||||||
|
return this.#functions.size;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Register an embedding function
|
* Register an embedding function
|
||||||
* @param name The name of the function
|
* @param name The name of the function
|
||||||
@@ -61,38 +73,43 @@ export class EmbeddingFunctionRegistry {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
get(name: "openai"): EmbeddingFunctionCreate<OpenAIEmbeddingFunction>;
|
||||||
|
get(
|
||||||
|
name: "huggingface",
|
||||||
|
): EmbeddingFunctionCreate<TransformersEmbeddingFunction>;
|
||||||
|
get<T extends EmbeddingFunction<unknown>>(
|
||||||
|
name: string,
|
||||||
|
): EmbeddingFunctionCreate<T> | undefined;
|
||||||
/**
|
/**
|
||||||
* Fetch an embedding function by name
|
* Fetch an embedding function by name
|
||||||
* @param name The name of the function
|
* @param name The name of the function
|
||||||
*/
|
*/
|
||||||
get<T extends EmbeddingFunction<unknown>, Name extends string = "">(
|
get(name: string) {
|
||||||
name: Name extends "openai" ? "openai" : string,
|
|
||||||
//This makes it so that you can use string constants as "types", or use an explicitly supplied type
|
|
||||||
// ex:
|
|
||||||
// `registry.get("openai") -> EmbeddingFunctionCreate<OpenAIEmbeddingFunction>`
|
|
||||||
// `registry.get<MyCustomEmbeddingFunction>("my_func") -> EmbeddingFunctionCreate<MyCustomEmbeddingFunction> | undefined`
|
|
||||||
//
|
|
||||||
// the reason this is important is that we always know our built in functions are defined so the user isnt forced to do a non null/undefined
|
|
||||||
// ```ts
|
|
||||||
// const openai: OpenAIEmbeddingFunction = registry.get("openai").create()
|
|
||||||
// ```
|
|
||||||
): Name extends "openai"
|
|
||||||
? EmbeddingFunctionCreate<OpenAIEmbeddingFunction>
|
|
||||||
: EmbeddingFunctionCreate<T> | undefined {
|
|
||||||
type Output = Name extends "openai"
|
|
||||||
? EmbeddingFunctionCreate<OpenAIEmbeddingFunction>
|
|
||||||
: EmbeddingFunctionCreate<T> | undefined;
|
|
||||||
|
|
||||||
const factory = this.#functions.get(name);
|
const factory = this.#functions.get(name);
|
||||||
if (!factory) {
|
if (!factory) {
|
||||||
return undefined as Output;
|
// biome-ignore lint/suspicious/noExplicitAny: <explanation>
|
||||||
|
return undefined as any;
|
||||||
|
}
|
||||||
|
// biome-ignore lint/suspicious/noExplicitAny: <explanation>
|
||||||
|
let create: any;
|
||||||
|
if (factory.prototype.init) {
|
||||||
|
// biome-ignore lint/suspicious/noExplicitAny: <explanation>
|
||||||
|
create = async function (options?: any) {
|
||||||
|
const instance = new factory(options);
|
||||||
|
await instance.init!();
|
||||||
|
return instance;
|
||||||
|
};
|
||||||
|
} else {
|
||||||
|
// biome-ignore lint/suspicious/noExplicitAny: <explanation>
|
||||||
|
create = function (options?: any) {
|
||||||
|
const instance = new factory(options);
|
||||||
|
return instance;
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
return {
|
return {
|
||||||
create: function (options?: T["TOptions"]) {
|
create,
|
||||||
return new factory(options);
|
};
|
||||||
},
|
|
||||||
} as Output;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -105,10 +122,10 @@ export class EmbeddingFunctionRegistry {
|
|||||||
/**
|
/**
|
||||||
* @ignore
|
* @ignore
|
||||||
*/
|
*/
|
||||||
parseFunctions(
|
async parseFunctions(
|
||||||
this: EmbeddingFunctionRegistry,
|
this: EmbeddingFunctionRegistry,
|
||||||
metadata: Map<string, string>,
|
metadata: Map<string, string>,
|
||||||
): Map<string, EmbeddingFunctionConfig> {
|
): Promise<Map<string, EmbeddingFunctionConfig>> {
|
||||||
if (!metadata.has("embedding_functions")) {
|
if (!metadata.has("embedding_functions")) {
|
||||||
return new Map();
|
return new Map();
|
||||||
} else {
|
} else {
|
||||||
@@ -118,25 +135,30 @@ export class EmbeddingFunctionRegistry {
|
|||||||
vectorColumn: string;
|
vectorColumn: string;
|
||||||
model: EmbeddingFunction["TOptions"];
|
model: EmbeddingFunction["TOptions"];
|
||||||
};
|
};
|
||||||
|
|
||||||
const functions = <FunctionConfig[]>(
|
const functions = <FunctionConfig[]>(
|
||||||
JSON.parse(metadata.get("embedding_functions")!)
|
JSON.parse(metadata.get("embedding_functions")!)
|
||||||
);
|
);
|
||||||
return new Map(
|
|
||||||
functions.map((f) => {
|
const items: [string, EmbeddingFunctionConfig][] = await Promise.all(
|
||||||
|
functions.map(async (f) => {
|
||||||
const fn = this.get(f.name);
|
const fn = this.get(f.name);
|
||||||
if (!fn) {
|
if (!fn) {
|
||||||
throw new Error(`Function "${f.name}" not found in registry`);
|
throw new Error(`Function "${f.name}" not found in registry`);
|
||||||
}
|
}
|
||||||
|
const func = await this.get(f.name)!.create(f.model);
|
||||||
return [
|
return [
|
||||||
f.name,
|
f.name,
|
||||||
{
|
{
|
||||||
sourceColumn: f.sourceColumn,
|
sourceColumn: f.sourceColumn,
|
||||||
vectorColumn: f.vectorColumn,
|
vectorColumn: f.vectorColumn,
|
||||||
function: this.get(f.name)!.create(f.model),
|
function: func,
|
||||||
},
|
},
|
||||||
];
|
];
|
||||||
}),
|
}),
|
||||||
);
|
);
|
||||||
|
|
||||||
|
return new Map(items);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// biome-ignore lint/suspicious/noExplicitAny: <explanation>
|
// biome-ignore lint/suspicious/noExplicitAny: <explanation>
|
||||||
|
|||||||
193
nodejs/lancedb/embedding/transformers.ts
Normal file
193
nodejs/lancedb/embedding/transformers.ts
Normal file
@@ -0,0 +1,193 @@
|
|||||||
|
// Copyright 2023 Lance Developers.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
import { Float, Float32 } from "../arrow";
|
||||||
|
import { EmbeddingFunction } from "./embedding_function";
|
||||||
|
import { register } from "./registry";
|
||||||
|
|
||||||
|
export type XenovaTransformerOptions = {
|
||||||
|
/** The wasm compatible model to use */
|
||||||
|
model: string;
|
||||||
|
/**
|
||||||
|
* The wasm compatible tokenizer to use
|
||||||
|
* If not provided, it will use the default tokenizer for the model
|
||||||
|
*/
|
||||||
|
tokenizer?: string;
|
||||||
|
/**
|
||||||
|
* The number of dimensions of the embeddings
|
||||||
|
*
|
||||||
|
* We will attempt to infer this from the model config if not provided.
|
||||||
|
* Since there isn't a standard way to get this information from the model,
|
||||||
|
* you may need to manually specify this if using a model that doesn't have a 'hidden_size' in the config.
|
||||||
|
* */
|
||||||
|
ndims?: number;
|
||||||
|
/** Options for the tokenizer */
|
||||||
|
tokenizerOptions?: {
|
||||||
|
textPair?: string | string[];
|
||||||
|
padding?: boolean | "max_length";
|
||||||
|
addSpecialTokens?: boolean;
|
||||||
|
truncation?: boolean;
|
||||||
|
maxLength?: number;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
@register("huggingface")
|
||||||
|
export class TransformersEmbeddingFunction extends EmbeddingFunction<
|
||||||
|
string,
|
||||||
|
Partial<XenovaTransformerOptions>
|
||||||
|
> {
|
||||||
|
#model?: import("@xenova/transformers").PreTrainedModel;
|
||||||
|
#tokenizer?: import("@xenova/transformers").PreTrainedTokenizer;
|
||||||
|
#modelName: XenovaTransformerOptions["model"];
|
||||||
|
#initialized = false;
|
||||||
|
#tokenizerOptions: XenovaTransformerOptions["tokenizerOptions"];
|
||||||
|
#ndims?: number;
|
||||||
|
|
||||||
|
constructor(
|
||||||
|
options: Partial<XenovaTransformerOptions> = {
|
||||||
|
model: "Xenova/all-MiniLM-L6-v2",
|
||||||
|
},
|
||||||
|
) {
|
||||||
|
super();
|
||||||
|
|
||||||
|
const modelName = options?.model ?? "Xenova/all-MiniLM-L6-v2";
|
||||||
|
this.#tokenizerOptions = {
|
||||||
|
padding: true,
|
||||||
|
...options.tokenizerOptions,
|
||||||
|
};
|
||||||
|
|
||||||
|
this.#ndims = options.ndims;
|
||||||
|
this.#modelName = modelName;
|
||||||
|
}
|
||||||
|
toJSON() {
|
||||||
|
// biome-ignore lint/suspicious/noExplicitAny: <explanation>
|
||||||
|
const obj: Record<string, any> = {
|
||||||
|
model: this.#modelName,
|
||||||
|
};
|
||||||
|
if (this.#ndims) {
|
||||||
|
obj["ndims"] = this.#ndims;
|
||||||
|
}
|
||||||
|
if (this.#tokenizerOptions) {
|
||||||
|
obj["tokenizerOptions"] = this.#tokenizerOptions;
|
||||||
|
}
|
||||||
|
if (this.#tokenizer) {
|
||||||
|
obj["tokenizer"] = this.#tokenizer.name;
|
||||||
|
}
|
||||||
|
return obj;
|
||||||
|
}
|
||||||
|
|
||||||
|
async init() {
|
||||||
|
let transformers;
|
||||||
|
try {
|
||||||
|
// SAFETY:
|
||||||
|
// since typescript transpiles `import` to `require`, we need to do this in an unsafe way
|
||||||
|
// We can't use `require` because `@xenova/transformers` is an ESM module
|
||||||
|
// and we can't use `import` directly because typescript will transpile it to `require`.
|
||||||
|
// and we want to remain compatible with both ESM and CJS modules
|
||||||
|
// so we use `eval` to bypass typescript for this specific import.
|
||||||
|
transformers = await eval('import("@xenova/transformers")');
|
||||||
|
} catch (e) {
|
||||||
|
throw new Error(`error loading @xenova/transformers\nReason: ${e}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
this.#model = await transformers.AutoModel.from_pretrained(
|
||||||
|
this.#modelName,
|
||||||
|
);
|
||||||
|
} catch (e) {
|
||||||
|
throw new Error(
|
||||||
|
`error loading model ${this.#modelName}. Make sure you are using a wasm compatible model.\nReason: ${e}`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
this.#tokenizer = await transformers.AutoTokenizer.from_pretrained(
|
||||||
|
this.#modelName,
|
||||||
|
);
|
||||||
|
} catch (e) {
|
||||||
|
throw new Error(
|
||||||
|
`error loading tokenizer for ${this.#modelName}. Make sure you are using a wasm compatible model:\nReason: ${e}`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
this.#initialized = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
ndims(): number {
|
||||||
|
if (this.#ndims) {
|
||||||
|
return this.#ndims;
|
||||||
|
} else {
|
||||||
|
const config = this.#model!.config;
|
||||||
|
|
||||||
|
const ndims = config["hidden_size"];
|
||||||
|
if (!ndims) {
|
||||||
|
throw new Error(
|
||||||
|
"hidden_size not found in model config, you may need to manually specify the embedding dimensions. ",
|
||||||
|
);
|
||||||
|
}
|
||||||
|
return ndims;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
embeddingDataType(): Float {
|
||||||
|
return new Float32();
|
||||||
|
}
|
||||||
|
|
||||||
|
async computeSourceEmbeddings(data: string[]): Promise<number[][]> {
|
||||||
|
// this should only happen if the user is trying to use the function directly.
|
||||||
|
// Anything going through the registry should already be initialized.
|
||||||
|
if (!this.#initialized) {
|
||||||
|
return Promise.reject(
|
||||||
|
new Error(
|
||||||
|
"something went wrong: embedding function not initialized. Please call init()",
|
||||||
|
),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
const tokenizer = this.#tokenizer!;
|
||||||
|
const model = this.#model!;
|
||||||
|
|
||||||
|
const inputs = await tokenizer(data, this.#tokenizerOptions);
|
||||||
|
let tokens = await model.forward(inputs);
|
||||||
|
tokens = tokens[Object.keys(tokens)[0]];
|
||||||
|
|
||||||
|
const [nItems, nTokens] = tokens.dims;
|
||||||
|
|
||||||
|
tokens = tensorDiv(tokens.sum(1), nTokens);
|
||||||
|
|
||||||
|
// TODO: support other data types
|
||||||
|
const tokenData = tokens.data;
|
||||||
|
const stride = this.ndims();
|
||||||
|
|
||||||
|
const embeddings = [];
|
||||||
|
for (let i = 0; i < nItems; i++) {
|
||||||
|
const start = i * stride;
|
||||||
|
const end = start + stride;
|
||||||
|
const slice = tokenData.slice(start, end);
|
||||||
|
embeddings.push(Array.from(slice) as number[]); // TODO: Avoid copy here
|
||||||
|
}
|
||||||
|
return embeddings;
|
||||||
|
}
|
||||||
|
|
||||||
|
async computeQueryEmbeddings(data: string): Promise<number[]> {
|
||||||
|
return (await this.computeSourceEmbeddings([data]))[0];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const tensorDiv = (
|
||||||
|
src: import("@xenova/transformers").Tensor,
|
||||||
|
divBy: number,
|
||||||
|
) => {
|
||||||
|
for (let i = 0; i < src.data.length; ++i) {
|
||||||
|
src.data[i] /= divBy;
|
||||||
|
}
|
||||||
|
return src;
|
||||||
|
};
|
||||||
@@ -59,7 +59,7 @@ export {
|
|||||||
|
|
||||||
export { Index, IndexOptions, IvfPqOptions } from "./indices";
|
export { Index, IndexOptions, IvfPqOptions } from "./indices";
|
||||||
|
|
||||||
export { Table, AddDataOptions, UpdateOptions } from "./table";
|
export { Table, AddDataOptions, UpdateOptions, OptimizeOptions } from "./table";
|
||||||
|
|
||||||
export * as embedding from "./embedding";
|
export * as embedding from "./embedding";
|
||||||
|
|
||||||
|
|||||||
@@ -113,6 +113,38 @@ export interface IvfPqOptions {
|
|||||||
sampleRate?: number;
|
sampleRate?: number;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export interface HnswPqOptions {
|
||||||
|
distanceType?: "l2" | "cosine" | "dot";
|
||||||
|
numPartitions?: number;
|
||||||
|
numSubVectors?: number;
|
||||||
|
maxIterations?: number;
|
||||||
|
sampleRate?: number;
|
||||||
|
m?: number;
|
||||||
|
efConstruction?: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface HnswSqOptions {
|
||||||
|
distanceType?: "l2" | "cosine" | "dot";
|
||||||
|
numPartitions?: number;
|
||||||
|
maxIterations?: number;
|
||||||
|
sampleRate?: number;
|
||||||
|
m?: number;
|
||||||
|
efConstruction?: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Options to create a full text search index
|
||||||
|
*/
|
||||||
|
export interface FtsOptions {
|
||||||
|
/**
|
||||||
|
* Whether to build the index with positions.
|
||||||
|
* True by default.
|
||||||
|
* If set to false, the index will not store the positions of the tokens in the text,
|
||||||
|
* which will make the index smaller and faster to build, but will not support phrase queries.
|
||||||
|
*/
|
||||||
|
withPositions?: boolean;
|
||||||
|
}
|
||||||
|
|
||||||
export class Index {
|
export class Index {
|
||||||
private readonly inner: LanceDbIndex;
|
private readonly inner: LanceDbIndex;
|
||||||
private constructor(inner: LanceDbIndex) {
|
private constructor(inner: LanceDbIndex) {
|
||||||
@@ -175,6 +207,82 @@ export class Index {
|
|||||||
static btree() {
|
static btree() {
|
||||||
return new Index(LanceDbIndex.btree());
|
return new Index(LanceDbIndex.btree());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create a bitmap index.
|
||||||
|
*
|
||||||
|
* A `Bitmap` index stores a bitmap for each distinct value in the column for every row.
|
||||||
|
*
|
||||||
|
* This index works best for low-cardinality columns, where the number of unique values
|
||||||
|
* is small (i.e., less than a few hundreds).
|
||||||
|
*/
|
||||||
|
static bitmap() {
|
||||||
|
return new Index(LanceDbIndex.bitmap());
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create a label list index.
|
||||||
|
*
|
||||||
|
* LabelList index is a scalar index that can be used on `List<T>` columns to
|
||||||
|
* support queries with `array_contains_all` and `array_contains_any`
|
||||||
|
* using an underlying bitmap index.
|
||||||
|
*/
|
||||||
|
static labelList() {
|
||||||
|
return new Index(LanceDbIndex.labelList());
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create a full text search index
|
||||||
|
*
|
||||||
|
* A full text search index is an index on a string column, so that you can conduct full
|
||||||
|
* text searches on the column.
|
||||||
|
*
|
||||||
|
* The results of a full text search are ordered by relevance measured by BM25.
|
||||||
|
*
|
||||||
|
* You can combine filters with full text search.
|
||||||
|
*
|
||||||
|
* For now, the full text search index only supports English, and doesn't support phrase search.
|
||||||
|
*/
|
||||||
|
static fts(options?: Partial<FtsOptions>) {
|
||||||
|
return new Index(LanceDbIndex.fts(options?.withPositions));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* Create a hnswpq index
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
static hnswPq(options?: Partial<HnswPqOptions>) {
|
||||||
|
return new Index(
|
||||||
|
LanceDbIndex.hnswPq(
|
||||||
|
options?.distanceType,
|
||||||
|
options?.numPartitions,
|
||||||
|
options?.numSubVectors,
|
||||||
|
options?.maxIterations,
|
||||||
|
options?.sampleRate,
|
||||||
|
options?.m,
|
||||||
|
options?.efConstruction,
|
||||||
|
),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* Create a hnswsq index
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
static hnswSq(options?: Partial<HnswSqOptions>) {
|
||||||
|
return new Index(
|
||||||
|
LanceDbIndex.hnswSq(
|
||||||
|
options?.distanceType,
|
||||||
|
options?.numPartitions,
|
||||||
|
options?.maxIterations,
|
||||||
|
options?.sampleRate,
|
||||||
|
options?.m,
|
||||||
|
options?.efConstruction,
|
||||||
|
),
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
export interface IndexOptions {
|
export interface IndexOptions {
|
||||||
|
|||||||
@@ -88,6 +88,19 @@ export interface QueryExecutionOptions {
|
|||||||
maxBatchLength?: number;
|
maxBatchLength?: number;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Options that control the behavior of a full text search
|
||||||
|
*/
|
||||||
|
export interface FullTextSearchOptions {
|
||||||
|
/**
|
||||||
|
* The columns to search
|
||||||
|
*
|
||||||
|
* If not specified, all indexed columns will be searched.
|
||||||
|
* For now, only one column can be searched.
|
||||||
|
*/
|
||||||
|
columns?: string | string[];
|
||||||
|
}
|
||||||
|
|
||||||
/** Common methods supported by all query types */
|
/** Common methods supported by all query types */
|
||||||
export class QueryBase<NativeQueryType extends NativeQuery | NativeVectorQuery>
|
export class QueryBase<NativeQueryType extends NativeQuery | NativeVectorQuery>
|
||||||
implements AsyncIterable<RecordBatch>
|
implements AsyncIterable<RecordBatch>
|
||||||
@@ -134,6 +147,25 @@ export class QueryBase<NativeQueryType extends NativeQuery | NativeVectorQuery>
|
|||||||
return this.where(predicate);
|
return this.where(predicate);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fullTextSearch(
|
||||||
|
query: string,
|
||||||
|
options?: Partial<FullTextSearchOptions>,
|
||||||
|
): this {
|
||||||
|
let columns: string[] | null = null;
|
||||||
|
if (options) {
|
||||||
|
if (typeof options.columns === "string") {
|
||||||
|
columns = [options.columns];
|
||||||
|
} else if (Array.isArray(options.columns)) {
|
||||||
|
columns = options.columns;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
this.doCall((inner: NativeQueryType) =>
|
||||||
|
inner.fullTextSearch(query, columns),
|
||||||
|
);
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Return only the specified columns.
|
* Return only the specified columns.
|
||||||
*
|
*
|
||||||
@@ -202,6 +234,11 @@ export class QueryBase<NativeQueryType extends NativeQuery | NativeVectorQuery>
|
|||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
offset(offset: number): this {
|
||||||
|
this.doCall((inner: NativeQueryType) => inner.offset(offset));
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
protected nativeExecute(
|
protected nativeExecute(
|
||||||
options?: Partial<QueryExecutionOptions>,
|
options?: Partial<QueryExecutionOptions>,
|
||||||
): Promise<NativeBatchIterator> {
|
): Promise<NativeBatchIterator> {
|
||||||
|
|||||||
@@ -27,8 +27,7 @@ export class RestfulLanceDBClient {
|
|||||||
#apiKey: string;
|
#apiKey: string;
|
||||||
#hostOverride?: string;
|
#hostOverride?: string;
|
||||||
#closed: boolean = false;
|
#closed: boolean = false;
|
||||||
#connectionTimeout: number = 12 * 1000; // 12 seconds;
|
#timeout: number = 12 * 1000; // 12 seconds;
|
||||||
#readTimeout: number = 30 * 1000; // 30 seconds;
|
|
||||||
#session?: import("axios").AxiosInstance;
|
#session?: import("axios").AxiosInstance;
|
||||||
|
|
||||||
constructor(
|
constructor(
|
||||||
@@ -36,15 +35,13 @@ export class RestfulLanceDBClient {
|
|||||||
apiKey: string,
|
apiKey: string,
|
||||||
region: string,
|
region: string,
|
||||||
hostOverride?: string,
|
hostOverride?: string,
|
||||||
connectionTimeout?: number,
|
timeout?: number,
|
||||||
readTimeout?: number,
|
|
||||||
) {
|
) {
|
||||||
this.#dbName = dbName;
|
this.#dbName = dbName;
|
||||||
this.#apiKey = apiKey;
|
this.#apiKey = apiKey;
|
||||||
this.#region = region;
|
this.#region = region;
|
||||||
this.#hostOverride = hostOverride ?? this.#hostOverride;
|
this.#hostOverride = hostOverride ?? this.#hostOverride;
|
||||||
this.#connectionTimeout = connectionTimeout ?? this.#connectionTimeout;
|
this.#timeout = timeout ?? this.#timeout;
|
||||||
this.#readTimeout = readTimeout ?? this.#readTimeout;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// todo: cache the session.
|
// todo: cache the session.
|
||||||
@@ -59,7 +56,7 @@ export class RestfulLanceDBClient {
|
|||||||
Authorization: `Bearer ${this.#apiKey}`,
|
Authorization: `Bearer ${this.#apiKey}`,
|
||||||
},
|
},
|
||||||
transformResponse: decodeErrorData,
|
transformResponse: decodeErrorData,
|
||||||
timeout: this.#connectionTimeout,
|
timeout: this.#timeout,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -111,7 +108,7 @@ export class RestfulLanceDBClient {
|
|||||||
params,
|
params,
|
||||||
});
|
});
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
if (e instanceof AxiosError) {
|
if (e instanceof AxiosError && e.response) {
|
||||||
response = e.response;
|
response = e.response;
|
||||||
} else {
|
} else {
|
||||||
throw e;
|
throw e;
|
||||||
@@ -165,7 +162,7 @@ export class RestfulLanceDBClient {
|
|||||||
params: new Map(Object.entries(additional.params ?? {})),
|
params: new Map(Object.entries(additional.params ?? {})),
|
||||||
});
|
});
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
if (e instanceof AxiosError) {
|
if (e instanceof AxiosError && e.response) {
|
||||||
response = e.response;
|
response = e.response;
|
||||||
} else {
|
} else {
|
||||||
throw e;
|
throw e;
|
||||||
|
|||||||
@@ -20,8 +20,7 @@ export interface RemoteConnectionOptions {
|
|||||||
apiKey?: string;
|
apiKey?: string;
|
||||||
region?: string;
|
region?: string;
|
||||||
hostOverride?: string;
|
hostOverride?: string;
|
||||||
connectionTimeout?: number;
|
timeout?: number;
|
||||||
readTimeout?: number;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
export class RemoteConnection extends Connection {
|
export class RemoteConnection extends Connection {
|
||||||
@@ -33,13 +32,7 @@ export class RemoteConnection extends Connection {
|
|||||||
|
|
||||||
constructor(
|
constructor(
|
||||||
url: string,
|
url: string,
|
||||||
{
|
{ apiKey, region, hostOverride, timeout }: RemoteConnectionOptions,
|
||||||
apiKey,
|
|
||||||
region,
|
|
||||||
hostOverride,
|
|
||||||
connectionTimeout,
|
|
||||||
readTimeout,
|
|
||||||
}: RemoteConnectionOptions,
|
|
||||||
) {
|
) {
|
||||||
super();
|
super();
|
||||||
apiKey = apiKey ?? process.env.LANCEDB_API_KEY;
|
apiKey = apiKey ?? process.env.LANCEDB_API_KEY;
|
||||||
@@ -68,8 +61,7 @@ export class RemoteConnection extends Connection {
|
|||||||
this.#apiKey,
|
this.#apiKey,
|
||||||
this.#region,
|
this.#region,
|
||||||
hostOverride,
|
hostOverride,
|
||||||
connectionTimeout,
|
timeout,
|
||||||
readTimeout,
|
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -340,8 +340,14 @@ export function sanitizeType(typeLike: unknown): DataType<any> {
|
|||||||
if (typeof typeLike !== "object" || typeLike === null) {
|
if (typeof typeLike !== "object" || typeLike === null) {
|
||||||
throw Error("Expected a Type but object was null/undefined");
|
throw Error("Expected a Type but object was null/undefined");
|
||||||
}
|
}
|
||||||
if (!("typeId" in typeLike) || !(typeof typeLike.typeId !== "function")) {
|
if (
|
||||||
throw Error("Expected a Type to have a typeId function");
|
!("typeId" in typeLike) ||
|
||||||
|
!(
|
||||||
|
typeof typeLike.typeId !== "function" ||
|
||||||
|
typeof typeLike.typeId !== "number"
|
||||||
|
)
|
||||||
|
) {
|
||||||
|
throw Error("Expected a Type to have a typeId property");
|
||||||
}
|
}
|
||||||
let typeId: Type;
|
let typeId: Type;
|
||||||
if (typeof typeLike.typeId === "function") {
|
if (typeof typeLike.typeId === "function") {
|
||||||
|
|||||||
@@ -84,6 +84,7 @@ export interface OptimizeOptions {
|
|||||||
* tbl.cleanupOlderVersions(new Date());
|
* tbl.cleanupOlderVersions(new Date());
|
||||||
*/
|
*/
|
||||||
cleanupOlderThan: Date;
|
cleanupOlderThan: Date;
|
||||||
|
deleteUnverified: boolean;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -270,19 +271,23 @@ export abstract class Table {
|
|||||||
* @returns {Query} A builder that can be used to parameterize the query
|
* @returns {Query} A builder that can be used to parameterize the query
|
||||||
*/
|
*/
|
||||||
abstract query(): Query;
|
abstract query(): Query;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a search query to find the nearest neighbors
|
* Create a search query to find the nearest neighbors
|
||||||
* of the given query vector
|
* of the given query
|
||||||
* @param {string} query - the query. This will be converted to a vector using the table's provided embedding function
|
* @param {string | IntoVector} query - the query, a vector or string
|
||||||
* @note If no embedding functions are defined in the table, this will error when collecting the results.
|
* @param {string} queryType - the type of the query, "vector", "fts", or "auto"
|
||||||
|
* @param {string | string[]} ftsColumns - the columns to search in for full text search
|
||||||
|
* for now, only one column can be searched at a time.
|
||||||
|
*
|
||||||
|
* when "auto" is used, if the query is a string and an embedding function is defined, it will be treated as a vector query
|
||||||
|
* if the query is a string and no embedding function is defined, it will be treated as a full text search query
|
||||||
*/
|
*/
|
||||||
abstract search(query: string): VectorQuery;
|
abstract search(
|
||||||
/**
|
query: string | IntoVector,
|
||||||
* Create a search query to find the nearest neighbors
|
queryType?: string,
|
||||||
* of the given query vector
|
ftsColumns?: string | string[],
|
||||||
* @param {IntoVector} query - the query vector
|
): VectorQuery | Query;
|
||||||
*/
|
|
||||||
abstract search(query: IntoVector): VectorQuery;
|
|
||||||
/**
|
/**
|
||||||
* Search the table with a given query vector.
|
* Search the table with a given query vector.
|
||||||
*
|
*
|
||||||
@@ -490,7 +495,7 @@ export class LocalTable extends Table {
|
|||||||
const mode = options?.mode ?? "append";
|
const mode = options?.mode ?? "append";
|
||||||
const schema = await this.schema();
|
const schema = await this.schema();
|
||||||
const registry = getRegistry();
|
const registry = getRegistry();
|
||||||
const functions = registry.parseFunctions(schema.metadata);
|
const functions = await registry.parseFunctions(schema.metadata);
|
||||||
|
|
||||||
const buffer = await fromDataToBuffer(
|
const buffer = await fromDataToBuffer(
|
||||||
data,
|
data,
|
||||||
@@ -578,27 +583,50 @@ export class LocalTable extends Table {
|
|||||||
query(): Query {
|
query(): Query {
|
||||||
return new Query(this.inner);
|
return new Query(this.inner);
|
||||||
}
|
}
|
||||||
search(query: string | IntoVector): VectorQuery {
|
|
||||||
if (typeof query !== "string") {
|
|
||||||
return this.vectorSearch(query);
|
|
||||||
} else {
|
|
||||||
const queryPromise = this.getEmbeddingFunctions().then(
|
|
||||||
async (functions) => {
|
|
||||||
// TODO: Support multiple embedding functions
|
|
||||||
const embeddingFunc: EmbeddingFunctionConfig | undefined = functions
|
|
||||||
.values()
|
|
||||||
.next().value;
|
|
||||||
if (!embeddingFunc) {
|
|
||||||
return Promise.reject(
|
|
||||||
new Error("No embedding functions are defined in the table"),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
return await embeddingFunc.function.computeQueryEmbeddings(query);
|
|
||||||
},
|
|
||||||
);
|
|
||||||
|
|
||||||
return this.query().nearestTo(queryPromise);
|
search(
|
||||||
|
query: string | IntoVector,
|
||||||
|
queryType: string = "auto",
|
||||||
|
ftsColumns?: string | string[],
|
||||||
|
): VectorQuery | Query {
|
||||||
|
if (typeof query !== "string") {
|
||||||
|
if (queryType === "fts") {
|
||||||
|
throw new Error("Cannot perform full text search on a vector query");
|
||||||
|
}
|
||||||
|
return this.vectorSearch(query);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If the query is a string, we need to determine if it is a vector query or a full text search query
|
||||||
|
if (queryType === "fts") {
|
||||||
|
return this.query().fullTextSearch(query, {
|
||||||
|
columns: ftsColumns,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// The query type is auto or vector
|
||||||
|
// fall back to full text search if no embedding functions are defined and the query is a string
|
||||||
|
if (queryType === "auto" && getRegistry().length() === 0) {
|
||||||
|
return this.query().fullTextSearch(query, {
|
||||||
|
columns: ftsColumns,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
const queryPromise = this.getEmbeddingFunctions().then(
|
||||||
|
async (functions) => {
|
||||||
|
// TODO: Support multiple embedding functions
|
||||||
|
const embeddingFunc: EmbeddingFunctionConfig | undefined = functions
|
||||||
|
.values()
|
||||||
|
.next().value;
|
||||||
|
if (!embeddingFunc) {
|
||||||
|
return Promise.reject(
|
||||||
|
new Error("No embedding functions are defined in the table"),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
return await embeddingFunc.function.computeQueryEmbeddings(query);
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
return this.query().nearestTo(queryPromise);
|
||||||
}
|
}
|
||||||
|
|
||||||
vectorSearch(vector: IntoVector): VectorQuery {
|
vectorSearch(vector: IntoVector): VectorQuery {
|
||||||
@@ -644,7 +672,10 @@ export class LocalTable extends Table {
|
|||||||
cleanupOlderThanMs =
|
cleanupOlderThanMs =
|
||||||
new Date().getTime() - options.cleanupOlderThan.getTime();
|
new Date().getTime() - options.cleanupOlderThan.getTime();
|
||||||
}
|
}
|
||||||
return await this.inner.optimize(cleanupOlderThanMs);
|
return await this.inner.optimize(
|
||||||
|
cleanupOlderThanMs,
|
||||||
|
options?.deleteUnverified,
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
async listIndices(): Promise<IndexConfig[]> {
|
async listIndices(): Promise<IndexConfig[]> {
|
||||||
@@ -666,4 +697,31 @@ export class LocalTable extends Table {
|
|||||||
on = Array.isArray(on) ? on : [on];
|
on = Array.isArray(on) ? on : [on];
|
||||||
return new MergeInsertBuilder(this.inner.mergeInsert(on));
|
return new MergeInsertBuilder(this.inner.mergeInsert(on));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if the table uses the new manifest path scheme.
|
||||||
|
*
|
||||||
|
* This function will return true if the table uses the V2 manifest
|
||||||
|
* path scheme.
|
||||||
|
*/
|
||||||
|
async usesV2ManifestPaths(): Promise<boolean> {
|
||||||
|
return await this.inner.usesV2ManifestPaths();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Migrate the table to use the new manifest path scheme.
|
||||||
|
*
|
||||||
|
* This function will rename all V1 manifests to V2 manifest paths.
|
||||||
|
* These paths provide more efficient opening of datasets with many versions
|
||||||
|
* on object stores.
|
||||||
|
*
|
||||||
|
* This function is idempotent, and can be run multiple times without
|
||||||
|
* changing the state of the object store.
|
||||||
|
*
|
||||||
|
* However, it should not be run while other concurrent operations are happening.
|
||||||
|
* And it should also run until completion before resuming other operations.
|
||||||
|
*/
|
||||||
|
async migrateManifestPathsV2(): Promise<void> {
|
||||||
|
await this.inner.migrateManifestPathsV2();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user