mirror of
https://github.com/lancedb/lancedb.git
synced 2025-12-23 13:29:57 +00:00
Compare commits
17 Commits
python-v0.
...
v0.4.6
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ba6f949515 | ||
|
|
3dd8522bc9 | ||
|
|
e01ef63488 | ||
|
|
a6cf24b359 | ||
|
|
9a07c9aad8 | ||
|
|
d405798952 | ||
|
|
e8a8b92b2a | ||
|
|
66362c6506 | ||
|
|
5228ca4b6b | ||
|
|
dcc216a244 | ||
|
|
a7aa168c7f | ||
|
|
7a89b5ec68 | ||
|
|
ee862abd29 | ||
|
|
4e1ed2b139 | ||
|
|
008e0b1a93 | ||
|
|
82cbcf6d07 | ||
|
|
1cd5426aea |
@@ -1,5 +1,5 @@
|
||||
[bumpversion]
|
||||
current_version = 0.4.3
|
||||
current_version = 0.4.6
|
||||
commit = True
|
||||
message = Bump version: {current_version} → {new_version}
|
||||
tag = True
|
||||
|
||||
114
.github/workflows/nodejs.yml
vendored
Normal file
114
.github/workflows/nodejs.yml
vendored
Normal file
@@ -0,0 +1,114 @@
|
||||
name: NodeJS (NAPI)
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
paths:
|
||||
- nodejs/**
|
||||
- .github/workflows/nodejs.yml
|
||||
- docker-compose.yml
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
# Disable full debug symbol generation to speed up CI build and keep memory down
|
||||
# "1" means line tables only, which is useful for panic tracebacks.
|
||||
RUSTFLAGS: "-C debuginfo=1"
|
||||
RUST_BACKTRACE: "1"
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
name: Lint
|
||||
runs-on: ubuntu-22.04
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: nodejs
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
lfs: true
|
||||
- uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: 20
|
||||
cache: 'npm'
|
||||
cache-dependency-path: nodejs/package-lock.json
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
sudo apt update
|
||||
sudo apt install -y protobuf-compiler libssl-dev
|
||||
- name: Lint
|
||||
run: |
|
||||
cargo fmt --all -- --check
|
||||
cargo clippy --all --all-features -- -D warnings
|
||||
npm ci
|
||||
npm run lint
|
||||
linux:
|
||||
name: Linux (NodeJS ${{ matrix.node-version }})
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
matrix:
|
||||
node-version: [ "18", "20" ]
|
||||
runs-on: "ubuntu-22.04"
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: nodejs
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
lfs: true
|
||||
- uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: ${{ matrix.node-version }}
|
||||
cache: 'npm'
|
||||
cache-dependency-path: node/package-lock.json
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
sudo apt update
|
||||
sudo apt install -y protobuf-compiler libssl-dev
|
||||
npm install -g @napi-rs/cli
|
||||
- name: Build
|
||||
run: |
|
||||
npm ci
|
||||
npm run build
|
||||
- name: Test
|
||||
run: npm run test
|
||||
macos:
|
||||
timeout-minutes: 30
|
||||
runs-on: "macos-13"
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: nodejs
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
lfs: true
|
||||
- uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: 20
|
||||
cache: 'npm'
|
||||
cache-dependency-path: node/package-lock.json
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
brew install protobuf
|
||||
npm install -g @napi-rs/cli
|
||||
- name: Build
|
||||
run: |
|
||||
npm ci
|
||||
npm run build
|
||||
- name: Test
|
||||
run: |
|
||||
npm run test
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -29,8 +29,9 @@ python/dist
|
||||
node/dist
|
||||
node/examples/**/package-lock.json
|
||||
node/examples/**/dist
|
||||
dist
|
||||
|
||||
## Rust
|
||||
target
|
||||
|
||||
Cargo.lock
|
||||
Cargo.lock
|
||||
|
||||
19
Cargo.toml
19
Cargo.toml
@@ -1,14 +1,20 @@
|
||||
[workspace]
|
||||
members = ["rust/ffi/node", "rust/vectordb"]
|
||||
members = ["rust/ffi/node", "rust/vectordb", "nodejs"]
|
||||
# Python package needs to be built by maturin.
|
||||
exclude = ["python"]
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
edition = "2021"
|
||||
authors = ["Lance Devs <dev@lancedb.com>"]
|
||||
license = "Apache-2.0"
|
||||
repository = "https://github.com/lancedb/lancedb"
|
||||
|
||||
[workspace.dependencies]
|
||||
lance = { "version" = "=0.9.7", "features" = ["dynamodb"] }
|
||||
lance-index = { "version" = "=0.9.7" }
|
||||
lance-linalg = { "version" = "=0.9.7" }
|
||||
lance-testing = { "version" = "=0.9.7" }
|
||||
lance = { "version" = "=0.9.9", "features" = ["dynamodb"] }
|
||||
lance-index = { "version" = "=0.9.9" }
|
||||
lance-linalg = { "version" = "=0.9.9" }
|
||||
lance-testing = { "version" = "=0.9.9" }
|
||||
# Note that this one does not include pyarrow
|
||||
arrow = { version = "49.0.0", optional = false }
|
||||
arrow-array = "49.0"
|
||||
@@ -18,11 +24,14 @@ arrow-ord = "49.0"
|
||||
arrow-schema = "49.0"
|
||||
arrow-arith = "49.0"
|
||||
arrow-cast = "49.0"
|
||||
async-trait = "0"
|
||||
chrono = "0.4.23"
|
||||
half = { "version" = "=2.3.1", default-features = false, features = [
|
||||
"num-traits",
|
||||
] }
|
||||
futures = "0"
|
||||
log = "0.4"
|
||||
object_store = "0.9.0"
|
||||
snafu = "0.7.4"
|
||||
url = "2"
|
||||
num-traits = "0.2"
|
||||
|
||||
@@ -90,6 +90,7 @@ nav:
|
||||
- Full-text search: fts.md
|
||||
- Filtering: sql.md
|
||||
- Versioning & Reproducibility: notebooks/reproducibility.ipynb
|
||||
- Configuring Storage: guides/storage.md
|
||||
- 🧬 Managing embeddings:
|
||||
- Overview: embeddings/index.md
|
||||
- Explicit management: embeddings/embedding_explicit.md
|
||||
@@ -149,6 +150,7 @@ nav:
|
||||
- Full-text search: fts.md
|
||||
- Filtering: sql.md
|
||||
- Versioning & Reproducibility: notebooks/reproducibility.ipynb
|
||||
- Configuring Storage: guides/storage.md
|
||||
- Managing Embeddings:
|
||||
- Overview: embeddings/index.md
|
||||
- Explicit management: embeddings/embedding_explicit.md
|
||||
|
||||
91
docs/src/guides/storage.md
Normal file
91
docs/src/guides/storage.md
Normal file
@@ -0,0 +1,91 @@
|
||||
# Configuring cloud storage
|
||||
|
||||
<!-- TODO: When we add documentation for how to configure other storage types
|
||||
we can change the name to a more general "Configuring storage" -->
|
||||
|
||||
When using LanceDB OSS, you can choose where to store your data. The tradeoffs between different storage options are discussed in the [storage concepts guide](../concepts/storage.md). This guide shows how to configure LanceDB to use different storage options.
|
||||
|
||||
## Object Stores
|
||||
|
||||
LanceDB OSS supports object stores such as AWS S3 (and compatible stores), Azure Blob Store, and Google Cloud Storage. Which object store to use is determined by the URI scheme of the dataset path. `s3://` is used for AWS S3, `az://` is used for Azure Blob Storage, and `gs://` is used for Google Cloud Storage. These URIs are passed to the `connect` function:
|
||||
|
||||
=== "Python"
|
||||
|
||||
AWS S3:
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
db = lancedb.connect("s3://bucket/path")
|
||||
```
|
||||
|
||||
Google Cloud Storage:
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
db = lancedb.connect("gs://bucket/path")
|
||||
```
|
||||
|
||||
Azure Blob Storage:
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
db = lancedb.connect("az://bucket/path")
|
||||
```
|
||||
|
||||
=== "JavaScript"
|
||||
|
||||
AWS S3:
|
||||
|
||||
```javascript
|
||||
const lancedb = require("lancedb");
|
||||
const db = await lancedb.connect("s3://bucket/path");
|
||||
```
|
||||
|
||||
Google Cloud Storage:
|
||||
|
||||
```javascript
|
||||
const lancedb = require("lancedb");
|
||||
const db = await lancedb.connect("gs://bucket/path");
|
||||
```
|
||||
|
||||
Azure Blob Storage:
|
||||
|
||||
```javascript
|
||||
const lancedb = require("lancedb");
|
||||
const db = await lancedb.connect("az://bucket/path");
|
||||
```
|
||||
|
||||
In most cases, when running in the respective cloud and permissions are set up correctly, no additional configuration is required. When running outside of the respective cloud, authentication credentials must be provided using environment variables. In general, these environment variables are the same as those used by the respective cloud SDKs. The sections below describe the environment variables that can be used to configure each object store.
|
||||
|
||||
LanceDB OSS uses the [object-store](https://docs.rs/object_store/latest/object_store/) Rust crate for object store access. There are general environment variables that can be used to configure the object store, such as the request timeout and proxy configuration. See the [object_store ClientConfigKey](https://docs.rs/object_store/latest/object_store/enum.ClientConfigKey.html) doc for available configuration options. The environment variables that can be set are the snake-cased versions of these variable names. For example, to set `ProxyUrl` use the environment variable `PROXY_URL`. (Don't let the Rust docs intimidate you! We link to them so you can see an up-to-date list of the available options.)
|
||||
|
||||
|
||||
### AWS S3
|
||||
|
||||
To configure credentials for AWS S3, you can use the `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, and `AWS_SESSION_TOKEN` environment variables.
|
||||
|
||||
Alternatively, if you are using AWS SSO, you can use the `AWS_PROFILE` and `AWS_DEFAULT_REGION` environment variables.
|
||||
|
||||
You can see a full list of environment variables [here](https://docs.rs/object_store/latest/object_store/aws/struct.AmazonS3Builder.html#method.from_env).
|
||||
|
||||
#### S3-compatible stores
|
||||
|
||||
LanceDB can also connect to S3-compatible stores, such as MinIO. To do so, you must specify two environment variables: `AWS_ENDPOINT` and `AWS_DEFAULT_REGION`. `AWS_ENDPOINT` should be the URL of the S3-compatible store, and `AWS_DEFAULT_REGION` should be the region to use.
|
||||
|
||||
<!-- TODO: we should also document the use of S3 Express once we fully support it -->
|
||||
|
||||
### Google Cloud Storage
|
||||
|
||||
GCS credentials are configured by setting the `GOOGLE_SERVICE_ACCOUNT` environment variable to the path of a JSON file containing the service account credentials. There are several aliases for this environment variable, documented [here](https://docs.rs/object_store/latest/object_store/gcp/struct.GoogleCloudStorageBuilder.html#method.from_env).
|
||||
|
||||
|
||||
!!! info "HTTP/2 support"
|
||||
|
||||
By default, GCS uses HTTP/1 for communication, as opposed to HTTP/2. This improves maximum throughput significantly. However, if you wish to use HTTP/2 for some reason, you can set the environment variable `HTTP1_ONLY` to `false`.
|
||||
|
||||
### Azure Blob Storage
|
||||
|
||||
Azure Blob Storage credentials can be configured by setting the `AZURE_STORAGE_ACCOUNT_NAME` and ``AZURE_STORAGE_ACCOUNT_KEY`` environment variables. The full list of environment variables that can be set are documented [here](https://docs.rs/object_store/latest/object_store/azure/struct.MicrosoftAzureBuilder.html#method.from_env).
|
||||
|
||||
|
||||
<!-- TODO: demonstrate how to configure networked file systems for optimal performance -->
|
||||
44
node/package-lock.json
generated
44
node/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "vectordb",
|
||||
"version": "0.4.3",
|
||||
"version": "0.4.5",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "vectordb",
|
||||
"version": "0.4.3",
|
||||
"version": "0.4.5",
|
||||
"cpu": [
|
||||
"x64",
|
||||
"arm64"
|
||||
@@ -53,11 +53,11 @@
|
||||
"uuid": "^9.0.0"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"@lancedb/vectordb-darwin-arm64": "0.4.3",
|
||||
"@lancedb/vectordb-darwin-x64": "0.4.3",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.4.3",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.4.3",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.4.3"
|
||||
"@lancedb/vectordb-darwin-arm64": "0.4.5",
|
||||
"@lancedb/vectordb-darwin-x64": "0.4.5",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.4.5",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.4.5",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.4.5"
|
||||
}
|
||||
},
|
||||
"node_modules/@75lb/deep-merge": {
|
||||
@@ -329,9 +329,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-darwin-arm64": {
|
||||
"version": "0.4.3",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-arm64/-/vectordb-darwin-arm64-0.4.3.tgz",
|
||||
"integrity": "sha512-47CvvSaV1EdUsFEpXUJApTk+hMzAhCxVizipCFUlXCgcmzpCDL86wNgJij/X9a+j6zADhIX//Lsu0qd/an/Bpw==",
|
||||
"version": "0.4.5",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-arm64/-/vectordb-darwin-arm64-0.4.5.tgz",
|
||||
"integrity": "sha512-sR+Q9dRBzMm+NGqM7EiK07c7pQz/V4J//23p05CeO/YATjKYyU3jE/dmVenLjJGW2UUrRYiyUQ9X6Up+OOgdhA==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
@@ -341,9 +341,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-darwin-x64": {
|
||||
"version": "0.4.3",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-x64/-/vectordb-darwin-x64-0.4.3.tgz",
|
||||
"integrity": "sha512-UlZZv8CmJIuRJNJG+Y1VmFsGyPR8W/72Q5EwgMMsSES6zpMQ9pNdBDWhL3UGX6nMRgnbprkwYiWJ3xHhJvtqtw==",
|
||||
"version": "0.4.5",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-x64/-/vectordb-darwin-x64-0.4.5.tgz",
|
||||
"integrity": "sha512-/BIyUeVkLaUlOEQN4HUQ9J9ZdNWkDpZPUUS9kfz5iYIjotgwpSfznF8Q1GY5BVuXa2ke7GC3tnkwwd5ZMOuDsA==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
@@ -353,9 +353,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-linux-arm64-gnu": {
|
||||
"version": "0.4.3",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-arm64-gnu/-/vectordb-linux-arm64-gnu-0.4.3.tgz",
|
||||
"integrity": "sha512-L6NVJr/lKEd8+904FzZNpT8BGQMs2cHNYbGJMIaVvGnMiIJgKAFKtOyGtdDjoe1xRZoEw21yjRGksGbnRO5wHQ==",
|
||||
"version": "0.4.5",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-arm64-gnu/-/vectordb-linux-arm64-gnu-0.4.5.tgz",
|
||||
"integrity": "sha512-bq8vX7znIf2Dap41YIbB5uA/YahwaLvFPNH0WmwqeBWxF64/AJ74DsZk51ftwczQMsyLK74M8f1PzniapMAR+Q==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
@@ -365,9 +365,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-linux-x64-gnu": {
|
||||
"version": "0.4.3",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-x64-gnu/-/vectordb-linux-x64-gnu-0.4.3.tgz",
|
||||
"integrity": "sha512-OBx3WF3pK0xNfFJeErmuD9R2QWLa3XdeZspyTsIrQmBDeKj3HKh8y7Scpx4NH5Y09+9JNqRRKRZN7OqWTYhITg==",
|
||||
"version": "0.4.5",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-x64-gnu/-/vectordb-linux-x64-gnu-0.4.5.tgz",
|
||||
"integrity": "sha512-5qCWFyxihyMDYIGRAdQ7zv3enBEDxPR08dCmXr2Bu9yYI3SUqfuSvFX1NwflVeB+RzRMMbeG4xiaEbo7H7/H3Q==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
@@ -377,9 +377,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-win32-x64-msvc": {
|
||||
"version": "0.4.3",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-win32-x64-msvc/-/vectordb-win32-x64-msvc-0.4.3.tgz",
|
||||
"integrity": "sha512-n9IvR81NXZKnSN91mrgeXbEyCiGM+YLJpOgbdHoEtMP04VDnS+iSU4jGOtQBKErvWeCJQaGFQ9qzdcVchpRGyw==",
|
||||
"version": "0.4.5",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-win32-x64-msvc/-/vectordb-win32-x64-msvc-0.4.5.tgz",
|
||||
"integrity": "sha512-z3dZ6TDzm2EU5gNuejshArs3o84v1rdXnds22TTuc9fVhwg5JG87FyHFZKU1MGuyLuZW22Me0YDuS9VR+eAp0Q==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "vectordb",
|
||||
"version": "0.4.3",
|
||||
"version": "0.4.6",
|
||||
"description": " Serverless, low-latency vector database for AI applications",
|
||||
"main": "dist/index.js",
|
||||
"types": "dist/index.d.ts",
|
||||
"scripts": {
|
||||
"tsc": "tsc -b",
|
||||
"build": "cargo-cp-artifact --artifact cdylib vectordb-node index.node -- cargo build --message-format=json && tsc -b",
|
||||
"build": "npm run tsc && cargo-cp-artifact --artifact cdylib vectordb-node index.node -- cargo build --message-format=json",
|
||||
"build-release": "npm run build -- --release",
|
||||
"test": "npm run tsc && mocha -recursive dist/test",
|
||||
"integration-test": "npm run tsc && mocha -recursive dist/integration_test",
|
||||
@@ -81,10 +81,10 @@
|
||||
}
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"@lancedb/vectordb-darwin-arm64": "0.4.3",
|
||||
"@lancedb/vectordb-darwin-x64": "0.4.3",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.4.3",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.4.3",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.4.3"
|
||||
"@lancedb/vectordb-darwin-arm64": "0.4.6",
|
||||
"@lancedb/vectordb-darwin-x64": "0.4.6",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.4.6",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.4.6",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.4.6"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -391,24 +391,6 @@ describe('LanceDB client', function () {
|
||||
})
|
||||
}).timeout(120000)
|
||||
|
||||
it('fails to create a new table when the vector column is missing', async function () {
|
||||
const dir = await track().mkdir('lancejs')
|
||||
const con = await lancedb.connect(dir)
|
||||
|
||||
const data = [
|
||||
{
|
||||
id: 1,
|
||||
price: 10
|
||||
}
|
||||
]
|
||||
|
||||
const create = con.createTable('missing_vector', data)
|
||||
await expect(create).to.be.rejectedWith(
|
||||
Error,
|
||||
"column 'vector' is missing"
|
||||
)
|
||||
})
|
||||
|
||||
it('use overwrite flag to overwrite existing table', async function () {
|
||||
const dir = await track().mkdir('lancejs')
|
||||
const con = await lancedb.connect(dir)
|
||||
|
||||
22
nodejs/.eslintrc.js
Normal file
22
nodejs/.eslintrc.js
Normal file
@@ -0,0 +1,22 @@
|
||||
module.exports = {
|
||||
env: {
|
||||
browser: true,
|
||||
es2021: true,
|
||||
},
|
||||
extends: [
|
||||
"eslint:recommended",
|
||||
"plugin:@typescript-eslint/recommended-type-checked",
|
||||
"plugin:@typescript-eslint/stylistic-type-checked",
|
||||
],
|
||||
overrides: [],
|
||||
parserOptions: {
|
||||
project: "./tsconfig.json",
|
||||
ecmaVersion: "latest",
|
||||
sourceType: "module",
|
||||
},
|
||||
rules: {
|
||||
"@typescript-eslint/method-signature-style": "off",
|
||||
"@typescript-eslint/no-explicit-any": "off",
|
||||
},
|
||||
ignorePatterns: ["node_modules/", "dist/", "build/", "vectordb/native.*"],
|
||||
};
|
||||
15
nodejs/.npmignore
Normal file
15
nodejs/.npmignore
Normal file
@@ -0,0 +1,15 @@
|
||||
target
|
||||
Cargo.lock
|
||||
.cargo
|
||||
.github
|
||||
npm
|
||||
.eslintrc
|
||||
.prettierignore
|
||||
rustfmt.toml
|
||||
yarn.lock
|
||||
*.node
|
||||
.yarn
|
||||
__test__
|
||||
renovate.json
|
||||
.idea
|
||||
src
|
||||
28
nodejs/Cargo.toml
Normal file
28
nodejs/Cargo.toml
Normal file
@@ -0,0 +1,28 @@
|
||||
[package]
|
||||
name = "vectordb-nodejs"
|
||||
edition = "2021"
|
||||
version = "0.0.0"
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
|
||||
[lib]
|
||||
crate-type = ["cdylib"]
|
||||
|
||||
[dependencies]
|
||||
arrow-ipc.workspace = true
|
||||
futures.workspace = true
|
||||
lance-linalg.workspace = true
|
||||
lance.workspace = true
|
||||
vectordb = { path = "../rust/vectordb" }
|
||||
napi = { version = "2.14", default-features = false, features = [
|
||||
"napi7",
|
||||
"async"
|
||||
] }
|
||||
napi-derive = "2.14"
|
||||
|
||||
[build-dependencies]
|
||||
napi-build = "2.1"
|
||||
|
||||
[profile.release]
|
||||
lto = true
|
||||
strip = "symbols"
|
||||
24
nodejs/README.md
Normal file
24
nodejs/README.md
Normal file
@@ -0,0 +1,24 @@
|
||||
# (New) LanceDB NodeJS SDK
|
||||
|
||||
It will replace the NodeJS SDK when it is ready.
|
||||
|
||||
|
||||
## Development
|
||||
|
||||
```sh
|
||||
npm run build
|
||||
npm t
|
||||
```
|
||||
|
||||
Generating docs
|
||||
|
||||
```
|
||||
npm run docs
|
||||
|
||||
cd ../docs
|
||||
# Asssume the virtual environment was created
|
||||
# python3 -m venv venv
|
||||
# pip install -r requirements.txt
|
||||
. ./venv/bin/activate
|
||||
mkdocs build
|
||||
```
|
||||
106
nodejs/__test__/arrow.test.ts
Normal file
106
nodejs/__test__/arrow.test.ts
Normal file
@@ -0,0 +1,106 @@
|
||||
// Copyright 2024 Lance Developers.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import { makeArrowTable, toBuffer } from "../vectordb/arrow";
|
||||
import {
|
||||
Field,
|
||||
FixedSizeList,
|
||||
Float16,
|
||||
Float32,
|
||||
Int32,
|
||||
tableFromIPC,
|
||||
Schema,
|
||||
Float64,
|
||||
} from "apache-arrow";
|
||||
|
||||
test("customized schema", function () {
|
||||
const schema = new Schema([
|
||||
new Field("a", new Int32(), true),
|
||||
new Field("b", new Float32(), true),
|
||||
new Field(
|
||||
"c",
|
||||
new FixedSizeList(3, new Field("item", new Float16())),
|
||||
true
|
||||
),
|
||||
]);
|
||||
const table = makeArrowTable(
|
||||
[
|
||||
{ a: 1, b: 2, c: [1, 2, 3] },
|
||||
{ a: 4, b: 5, c: [4, 5, 6] },
|
||||
{ a: 7, b: 8, c: [7, 8, 9] },
|
||||
],
|
||||
{ schema }
|
||||
);
|
||||
|
||||
expect(table.schema.toString()).toEqual(schema.toString());
|
||||
|
||||
const buf = toBuffer(table);
|
||||
expect(buf.byteLength).toBeGreaterThan(0);
|
||||
|
||||
const actual = tableFromIPC(buf);
|
||||
expect(actual.numRows).toBe(3);
|
||||
const actualSchema = actual.schema;
|
||||
expect(actualSchema.toString()).toStrictEqual(schema.toString());
|
||||
});
|
||||
|
||||
test("default vector column", function () {
|
||||
const schema = new Schema([
|
||||
new Field("a", new Float64(), true),
|
||||
new Field("b", new Float64(), true),
|
||||
new Field("vector", new FixedSizeList(3, new Field("item", new Float32()))),
|
||||
]);
|
||||
const table = makeArrowTable([
|
||||
{ a: 1, b: 2, vector: [1, 2, 3] },
|
||||
{ a: 4, b: 5, vector: [4, 5, 6] },
|
||||
{ a: 7, b: 8, vector: [7, 8, 9] },
|
||||
]);
|
||||
|
||||
const buf = toBuffer(table);
|
||||
expect(buf.byteLength).toBeGreaterThan(0);
|
||||
|
||||
const actual = tableFromIPC(buf);
|
||||
expect(actual.numRows).toBe(3);
|
||||
const actualSchema = actual.schema;
|
||||
expect(actualSchema.toString()).toEqual(actualSchema.toString());
|
||||
});
|
||||
|
||||
test("2 vector columns", function () {
|
||||
const schema = new Schema([
|
||||
new Field("a", new Float64()),
|
||||
new Field("b", new Float64()),
|
||||
new Field("vec1", new FixedSizeList(3, new Field("item", new Float16()))),
|
||||
new Field("vec2", new FixedSizeList(3, new Field("item", new Float16()))),
|
||||
]);
|
||||
const table = makeArrowTable(
|
||||
[
|
||||
{ a: 1, b: 2, vec1: [1, 2, 3], vec2: [2, 4, 6] },
|
||||
{ a: 4, b: 5, vec1: [4, 5, 6], vec2: [8, 10, 12] },
|
||||
{ a: 7, b: 8, vec1: [7, 8, 9], vec2: [14, 16, 18] },
|
||||
],
|
||||
{
|
||||
vectorColumns: {
|
||||
vec1: { type: new Float16() },
|
||||
vec2: { type: new Float16() },
|
||||
},
|
||||
}
|
||||
);
|
||||
|
||||
const buf = toBuffer(table);
|
||||
expect(buf.byteLength).toBeGreaterThan(0);
|
||||
|
||||
const actual = tableFromIPC(buf);
|
||||
expect(actual.numRows).toBe(3);
|
||||
const actualSchema = actual.schema;
|
||||
expect(actualSchema.toString()).toEqual(schema.toString());
|
||||
});
|
||||
34
nodejs/__test__/index.test.ts
Normal file
34
nodejs/__test__/index.test.ts
Normal file
@@ -0,0 +1,34 @@
|
||||
// Copyright 2024 Lance Developers.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import * as os from "os";
|
||||
import * as path from "path";
|
||||
import * as fs from "fs";
|
||||
|
||||
import { Schema, Field, Float64 } from "apache-arrow";
|
||||
import { connect } from "../dist/index.js";
|
||||
|
||||
test("open database", async () => {
|
||||
const tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), "test-open"));
|
||||
|
||||
const db = await connect(tmpDir);
|
||||
let tableNames = await db.tableNames();
|
||||
expect(tableNames).toStrictEqual([]);
|
||||
|
||||
const tbl = await db.createTable("test", [{ id: 1 }, { id: 2 }]);
|
||||
expect(await db.tableNames()).toStrictEqual(["test"]);
|
||||
|
||||
const schema = tbl.schema;
|
||||
expect(schema).toEqual(new Schema([new Field("id", new Float64(), true)]));
|
||||
});
|
||||
183
nodejs/__test__/table.test.ts
Normal file
183
nodejs/__test__/table.test.ts
Normal file
@@ -0,0 +1,183 @@
|
||||
// Copyright 2024 Lance Developers.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import * as os from "os";
|
||||
import * as path from "path";
|
||||
import * as fs from "fs";
|
||||
|
||||
import { connect } from "../dist";
|
||||
import { Schema, Field, Float32, Int32, FixedSizeList } from "apache-arrow";
|
||||
import { makeArrowTable } from "../dist/arrow";
|
||||
|
||||
describe("Test creating index", () => {
|
||||
let tmpDir: string;
|
||||
const schema = new Schema([
|
||||
new Field("id", new Int32(), true),
|
||||
new Field("vec", new FixedSizeList(32, new Field("item", new Float32()))),
|
||||
]);
|
||||
|
||||
beforeEach(() => {
|
||||
tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), "index-"));
|
||||
});
|
||||
|
||||
test("create vector index with no column", async () => {
|
||||
const db = await connect(tmpDir);
|
||||
const data = makeArrowTable(
|
||||
Array(300)
|
||||
.fill(1)
|
||||
.map((_, i) => ({
|
||||
id: i,
|
||||
vec: Array(32)
|
||||
.fill(1)
|
||||
.map(() => Math.random()),
|
||||
})),
|
||||
{
|
||||
schema,
|
||||
}
|
||||
);
|
||||
const tbl = await db.createTable("test", data);
|
||||
await tbl.createIndex().build();
|
||||
|
||||
// check index directory
|
||||
const indexDir = path.join(tmpDir, "test.lance", "_indices");
|
||||
expect(fs.readdirSync(indexDir)).toHaveLength(1);
|
||||
// TODO: check index type.
|
||||
|
||||
// Search without specifying the column
|
||||
let query_vector = data.toArray()[5].vec.toJSON();
|
||||
let rst = await tbl.query().nearestTo(query_vector).limit(2).toArrow();
|
||||
expect(rst.numRows).toBe(2);
|
||||
|
||||
// Search with specifying the column
|
||||
let rst2 = await tbl.search(query_vector, "vec").limit(2).toArrow();
|
||||
expect(rst2.numRows).toBe(2);
|
||||
expect(rst.toString()).toEqual(rst2.toString());
|
||||
});
|
||||
|
||||
test("no vector column available", async () => {
|
||||
const db = await connect(tmpDir);
|
||||
const tbl = await db.createTable(
|
||||
"no_vec",
|
||||
makeArrowTable([
|
||||
{ id: 1, val: 2 },
|
||||
{ id: 2, val: 3 },
|
||||
])
|
||||
);
|
||||
await expect(tbl.createIndex().build()).rejects.toThrow(
|
||||
"No vector column found"
|
||||
);
|
||||
|
||||
await tbl.createIndex("val").build();
|
||||
const indexDir = path.join(tmpDir, "no_vec.lance", "_indices");
|
||||
expect(fs.readdirSync(indexDir)).toHaveLength(1);
|
||||
|
||||
for await (const r of tbl.query().filter("id > 1").select(["id"])) {
|
||||
expect(r.numRows).toBe(1);
|
||||
}
|
||||
});
|
||||
|
||||
test("two columns with different dimensions", async () => {
|
||||
const db = await connect(tmpDir);
|
||||
const schema = new Schema([
|
||||
new Field("id", new Int32(), true),
|
||||
new Field("vec", new FixedSizeList(32, new Field("item", new Float32()))),
|
||||
new Field(
|
||||
"vec2",
|
||||
new FixedSizeList(64, new Field("item", new Float32()))
|
||||
),
|
||||
]);
|
||||
const tbl = await db.createTable(
|
||||
"two_vectors",
|
||||
makeArrowTable(
|
||||
Array(300)
|
||||
.fill(1)
|
||||
.map((_, i) => ({
|
||||
id: i,
|
||||
vec: Array(32)
|
||||
.fill(1)
|
||||
.map(() => Math.random()),
|
||||
vec2: Array(64) // different dimension
|
||||
.fill(1)
|
||||
.map(() => Math.random()),
|
||||
})),
|
||||
{ schema }
|
||||
)
|
||||
);
|
||||
|
||||
// Only build index over v1
|
||||
await expect(tbl.createIndex().build()).rejects.toThrow(
|
||||
/.*More than one vector columns found.*/
|
||||
);
|
||||
tbl
|
||||
.createIndex("vec")
|
||||
.ivf_pq({ num_partitions: 2, num_sub_vectors: 2 })
|
||||
.build();
|
||||
|
||||
const rst = await tbl
|
||||
.query()
|
||||
.nearestTo(
|
||||
Array(32)
|
||||
.fill(1)
|
||||
.map(() => Math.random())
|
||||
)
|
||||
.limit(2)
|
||||
.toArrow();
|
||||
expect(rst.numRows).toBe(2);
|
||||
|
||||
// Search with specifying the column
|
||||
await expect(
|
||||
tbl
|
||||
.search(
|
||||
Array(64)
|
||||
.fill(1)
|
||||
.map(() => Math.random()),
|
||||
"vec"
|
||||
)
|
||||
.limit(2)
|
||||
.toArrow()
|
||||
).rejects.toThrow(/.*does not match the dimension.*/);
|
||||
|
||||
const query64 = Array(64)
|
||||
.fill(1)
|
||||
.map(() => Math.random());
|
||||
const rst64_1 = await tbl.query().nearestTo(query64).limit(2).toArrow();
|
||||
const rst64_2 = await tbl.search(query64, "vec2").limit(2).toArrow();
|
||||
expect(rst64_1.toString()).toEqual(rst64_2.toString());
|
||||
expect(rst64_1.numRows).toBe(2);
|
||||
});
|
||||
|
||||
test("create scalar index", async () => {
|
||||
const db = await connect(tmpDir);
|
||||
const data = makeArrowTable(
|
||||
Array(300)
|
||||
.fill(1)
|
||||
.map((_, i) => ({
|
||||
id: i,
|
||||
vec: Array(32)
|
||||
.fill(1)
|
||||
.map(() => Math.random()),
|
||||
})),
|
||||
{
|
||||
schema,
|
||||
}
|
||||
);
|
||||
const tbl = await db.createTable("test", data);
|
||||
await tbl.createIndex("id").build();
|
||||
|
||||
// check index directory
|
||||
const indexDir = path.join(tmpDir, "test.lance", "_indices");
|
||||
expect(fs.readdirSync(indexDir)).toHaveLength(1);
|
||||
// TODO: check index type.
|
||||
});
|
||||
});
|
||||
5
nodejs/build.rs
Normal file
5
nodejs/build.rs
Normal file
@@ -0,0 +1,5 @@
|
||||
extern crate napi_build;
|
||||
|
||||
fn main() {
|
||||
napi_build::setup();
|
||||
}
|
||||
5
nodejs/jest.config.js
Normal file
5
nodejs/jest.config.js
Normal file
@@ -0,0 +1,5 @@
|
||||
/** @type {import('ts-jest').JestConfigWithTsJest} */
|
||||
module.exports = {
|
||||
preset: 'ts-jest',
|
||||
testEnvironment: 'node',
|
||||
};
|
||||
3
nodejs/npm/darwin-arm64/README.md
Normal file
3
nodejs/npm/darwin-arm64/README.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# `vectordb-darwin-arm64`
|
||||
|
||||
This is the **aarch64-apple-darwin** binary for `vectordb`
|
||||
18
nodejs/npm/darwin-arm64/package.json
Normal file
18
nodejs/npm/darwin-arm64/package.json
Normal file
@@ -0,0 +1,18 @@
|
||||
{
|
||||
"name": "vectordb-darwin-arm64",
|
||||
"version": "0.4.3",
|
||||
"os": [
|
||||
"darwin"
|
||||
],
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
"main": "vectordb.darwin-arm64.node",
|
||||
"files": [
|
||||
"vectordb.darwin-arm64.node"
|
||||
],
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">= 18"
|
||||
}
|
||||
}
|
||||
3
nodejs/npm/darwin-x64/README.md
Normal file
3
nodejs/npm/darwin-x64/README.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# `vectordb-darwin-x64`
|
||||
|
||||
This is the **x86_64-apple-darwin** binary for `vectordb`
|
||||
18
nodejs/npm/darwin-x64/package.json
Normal file
18
nodejs/npm/darwin-x64/package.json
Normal file
@@ -0,0 +1,18 @@
|
||||
{
|
||||
"name": "vectordb-darwin-x64",
|
||||
"version": "0.4.3",
|
||||
"os": [
|
||||
"darwin"
|
||||
],
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
"main": "vectordb.darwin-x64.node",
|
||||
"files": [
|
||||
"vectordb.darwin-x64.node"
|
||||
],
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">= 18"
|
||||
}
|
||||
}
|
||||
3
nodejs/npm/linux-arm64-gnu/README.md
Normal file
3
nodejs/npm/linux-arm64-gnu/README.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# `vectordb-linux-arm64-gnu`
|
||||
|
||||
This is the **aarch64-unknown-linux-gnu** binary for `vectordb`
|
||||
21
nodejs/npm/linux-arm64-gnu/package.json
Normal file
21
nodejs/npm/linux-arm64-gnu/package.json
Normal file
@@ -0,0 +1,21 @@
|
||||
{
|
||||
"name": "vectordb-linux-arm64-gnu",
|
||||
"version": "0.4.3",
|
||||
"os": [
|
||||
"linux"
|
||||
],
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
"main": "vectordb.linux-arm64-gnu.node",
|
||||
"files": [
|
||||
"vectordb.linux-arm64-gnu.node"
|
||||
],
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">= 10"
|
||||
},
|
||||
"libc": [
|
||||
"glibc"
|
||||
]
|
||||
}
|
||||
3
nodejs/npm/linux-x64-gnu/README.md
Normal file
3
nodejs/npm/linux-x64-gnu/README.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# `vectordb-linux-x64-gnu`
|
||||
|
||||
This is the **x86_64-unknown-linux-gnu** binary for `vectordb`
|
||||
21
nodejs/npm/linux-x64-gnu/package.json
Normal file
21
nodejs/npm/linux-x64-gnu/package.json
Normal file
@@ -0,0 +1,21 @@
|
||||
{
|
||||
"name": "vectordb-linux-x64-gnu",
|
||||
"version": "0.4.3",
|
||||
"os": [
|
||||
"linux"
|
||||
],
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
"main": "vectordb.linux-x64-gnu.node",
|
||||
"files": [
|
||||
"vectordb.linux-x64-gnu.node"
|
||||
],
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">= 10"
|
||||
},
|
||||
"libc": [
|
||||
"glibc"
|
||||
]
|
||||
}
|
||||
6300
nodejs/package-lock.json
generated
Normal file
6300
nodejs/package-lock.json
generated
Normal file
File diff suppressed because it is too large
Load Diff
67
nodejs/package.json
Normal file
67
nodejs/package.json
Normal file
@@ -0,0 +1,67 @@
|
||||
{
|
||||
"name": "vectordb",
|
||||
"version": "0.4.3",
|
||||
"main": "./dist/index.js",
|
||||
"types": "./dist/index.d.ts",
|
||||
"napi": {
|
||||
"name": "vectordb-nodejs",
|
||||
"triples": {
|
||||
"defaults": false,
|
||||
"additional": [
|
||||
"aarch64-apple-darwin",
|
||||
"aarch64-unknown-linux-gnu",
|
||||
"x86_64-apple-darwin",
|
||||
"x86_64-unknown-linux-gnu"
|
||||
]
|
||||
}
|
||||
},
|
||||
"license": "Apache 2.0",
|
||||
"devDependencies": {
|
||||
"@napi-rs/cli": "^2.18.0",
|
||||
"@types/jest": "^29.5.11",
|
||||
"@typescript-eslint/eslint-plugin": "^6.19.0",
|
||||
"@typescript-eslint/parser": "^6.19.0",
|
||||
"eslint": "^8.56.0",
|
||||
"jest": "^29.7.0",
|
||||
"ts-jest": "^29.1.2",
|
||||
"typedoc": "^0.25.7",
|
||||
"typedoc-plugin-markdown": "^3.17.1",
|
||||
"typescript": "^5.3.3"
|
||||
},
|
||||
"ava": {
|
||||
"timeout": "3m"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 18"
|
||||
},
|
||||
"cpu": [
|
||||
"x64",
|
||||
"arm64"
|
||||
],
|
||||
"os": [
|
||||
"darwin",
|
||||
"linux",
|
||||
"windows"
|
||||
],
|
||||
"scripts": {
|
||||
"artifacts": "napi artifacts",
|
||||
"build:native": "napi build --platform --release --js vectordb/native.js --dts vectordb/native.d.ts dist/",
|
||||
"build:debug": "napi build --platform --dts ../vectordb/native.d.ts --js ../vectordb/native.js dist/",
|
||||
"build": "npm run build:debug && tsc -b",
|
||||
"docs": "typedoc --plugin typedoc-plugin-markdown vectordb/index.ts",
|
||||
"lint": "eslint vectordb --ext .js,.ts",
|
||||
"prepublishOnly": "napi prepublish -t npm",
|
||||
"test": "npm run build && jest",
|
||||
"universal": "napi universal",
|
||||
"version": "napi version"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"vectordb-darwin-arm64": "0.4.3",
|
||||
"vectordb-darwin-x64": "0.4.3",
|
||||
"vectordb-linux-arm64-gnu": "0.4.3",
|
||||
"vectordb-linux-x64-gnu": "0.4.3"
|
||||
},
|
||||
"dependencies": {
|
||||
"apache-arrow": "^15.0.0"
|
||||
}
|
||||
}
|
||||
86
nodejs/src/connection.rs
Normal file
86
nodejs/src/connection.rs
Normal file
@@ -0,0 +1,86 @@
|
||||
// Copyright 2024 Lance Developers.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use napi::bindgen_prelude::*;
|
||||
use napi_derive::*;
|
||||
|
||||
use crate::table::Table;
|
||||
use vectordb::connection::{Connection as LanceDBConnection, Database};
|
||||
use vectordb::ipc::ipc_file_to_batches;
|
||||
|
||||
#[napi]
|
||||
pub struct Connection {
|
||||
conn: Arc<dyn LanceDBConnection>,
|
||||
}
|
||||
|
||||
#[napi]
|
||||
impl Connection {
|
||||
/// Create a new Connection instance from the given URI.
|
||||
#[napi(factory)]
|
||||
pub async fn new(uri: String) -> napi::Result<Self> {
|
||||
Ok(Self {
|
||||
conn: Arc::new(Database::connect(&uri).await.map_err(|e| {
|
||||
napi::Error::from_reason(format!("Failed to connect to database: {}", e))
|
||||
})?),
|
||||
})
|
||||
}
|
||||
|
||||
/// List all tables in the dataset.
|
||||
#[napi]
|
||||
pub async fn table_names(&self) -> napi::Result<Vec<String>> {
|
||||
self.conn
|
||||
.table_names()
|
||||
.await
|
||||
.map_err(|e| napi::Error::from_reason(format!("{}", e)))
|
||||
}
|
||||
|
||||
/// Create table from a Apache Arrow IPC (file) buffer.
|
||||
///
|
||||
/// Parameters:
|
||||
/// - name: The name of the table.
|
||||
/// - buf: The buffer containing the IPC file.
|
||||
///
|
||||
#[napi]
|
||||
pub async fn create_table(&self, name: String, buf: Buffer) -> napi::Result<Table> {
|
||||
let batches = ipc_file_to_batches(buf.to_vec())
|
||||
.map_err(|e| napi::Error::from_reason(format!("Failed to read IPC file: {}", e)))?;
|
||||
let tbl = self
|
||||
.conn
|
||||
.create_table(&name, Box::new(batches), None)
|
||||
.await
|
||||
.map_err(|e| napi::Error::from_reason(format!("{}", e)))?;
|
||||
Ok(Table::new(tbl))
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async fn open_table(&self, name: String) -> napi::Result<Table> {
|
||||
let tbl = self
|
||||
.conn
|
||||
.open_table(&name)
|
||||
.await
|
||||
.map_err(|e| napi::Error::from_reason(format!("{}", e)))?;
|
||||
Ok(Table::new(tbl))
|
||||
}
|
||||
|
||||
/// Drop table with the name. Or raise an error if the table does not exist.
|
||||
#[napi]
|
||||
pub async fn drop_table(&self, name: String) -> napi::Result<()> {
|
||||
self.conn
|
||||
.drop_table(&name)
|
||||
.await
|
||||
.map_err(|e| napi::Error::from_reason(format!("{}", e)))
|
||||
}
|
||||
}
|
||||
100
nodejs/src/index.rs
Normal file
100
nodejs/src/index.rs
Normal file
@@ -0,0 +1,100 @@
|
||||
// Copyright 2024 Lance Developers.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use lance_linalg::distance::MetricType as LanceMetricType;
|
||||
use napi_derive::napi;
|
||||
|
||||
#[napi]
|
||||
pub enum IndexType {
|
||||
Scalar,
|
||||
IvfPq,
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub enum MetricType {
|
||||
L2,
|
||||
Cosine,
|
||||
Dot,
|
||||
}
|
||||
|
||||
impl From<MetricType> for LanceMetricType {
|
||||
fn from(metric: MetricType) -> Self {
|
||||
match metric {
|
||||
MetricType::L2 => Self::L2,
|
||||
MetricType::Cosine => Self::Cosine,
|
||||
MetricType::Dot => Self::Dot,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub struct IndexBuilder {
|
||||
inner: vectordb::index::IndexBuilder,
|
||||
}
|
||||
|
||||
#[napi]
|
||||
impl IndexBuilder {
|
||||
pub fn new(tbl: &dyn vectordb::Table) -> Self {
|
||||
let inner = tbl.create_index(&[]);
|
||||
Self { inner }
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub unsafe fn replace(&mut self, v: bool) {
|
||||
self.inner.replace(v);
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub unsafe fn column(&mut self, c: String) {
|
||||
self.inner.columns(&[c.as_str()]);
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub unsafe fn name(&mut self, name: String) {
|
||||
self.inner.name(name.as_str());
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub unsafe fn ivf_pq(
|
||||
&mut self,
|
||||
metric_type: Option<MetricType>,
|
||||
num_partitions: Option<u32>,
|
||||
num_sub_vectors: Option<u32>,
|
||||
num_bits: Option<u32>,
|
||||
max_iterations: Option<u32>,
|
||||
sample_rate: Option<u32>,
|
||||
) {
|
||||
self.inner.ivf_pq();
|
||||
metric_type.map(|m| self.inner.metric_type(m.into()));
|
||||
num_partitions.map(|p| self.inner.num_partitions(p));
|
||||
num_sub_vectors.map(|s| self.inner.num_sub_vectors(s));
|
||||
num_bits.map(|b| self.inner.num_bits(b));
|
||||
max_iterations.map(|i| self.inner.max_iterations(i));
|
||||
sample_rate.map(|s| self.inner.sample_rate(s));
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub unsafe fn scalar(&mut self) {
|
||||
self.inner.scalar();
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async fn build(&self) -> napi::Result<()> {
|
||||
self.inner
|
||||
.build()
|
||||
.await
|
||||
.map_err(|e| napi::Error::from_reason(format!("Failed to build index: {}", e)))?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
47
nodejs/src/iterator.rs
Normal file
47
nodejs/src/iterator.rs
Normal file
@@ -0,0 +1,47 @@
|
||||
// Copyright 2024 Lance Developers.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use futures::StreamExt;
|
||||
use lance::io::RecordBatchStream;
|
||||
use napi::bindgen_prelude::*;
|
||||
use napi_derive::napi;
|
||||
use vectordb::ipc::batches_to_ipc_file;
|
||||
|
||||
/** Typescript-style Async Iterator over RecordBatches */
|
||||
#[napi]
|
||||
pub struct RecordBatchIterator {
|
||||
inner: Box<dyn RecordBatchStream + Unpin>,
|
||||
}
|
||||
|
||||
#[napi]
|
||||
impl RecordBatchIterator {
|
||||
pub(crate) fn new(inner: Box<dyn RecordBatchStream + Unpin>) -> Self {
|
||||
Self { inner }
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async unsafe fn next(&mut self) -> napi::Result<Option<Buffer>> {
|
||||
if let Some(rst) = self.inner.next().await {
|
||||
let batch = rst.map_err(|e| {
|
||||
napi::Error::from_reason(format!("Failed to get next batch from stream: {}", e))
|
||||
})?;
|
||||
batches_to_ipc_file(&[batch])
|
||||
.map_err(|e| napi::Error::from_reason(format!("Failed to write IPC file: {}", e)))
|
||||
.map(|buf| Some(Buffer::from(buf)))
|
||||
} else {
|
||||
// We are done with the stream.
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
}
|
||||
48
nodejs/src/lib.rs
Normal file
48
nodejs/src/lib.rs
Normal file
@@ -0,0 +1,48 @@
|
||||
// Copyright 2024 Lance Developers.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use connection::Connection;
|
||||
use napi_derive::*;
|
||||
|
||||
mod connection;
|
||||
mod index;
|
||||
mod iterator;
|
||||
mod query;
|
||||
mod table;
|
||||
|
||||
#[napi(object)]
|
||||
pub struct ConnectionOptions {
|
||||
pub uri: String,
|
||||
pub api_key: Option<String>,
|
||||
pub host_override: Option<String>,
|
||||
}
|
||||
|
||||
/// Write mode for writing a table.
|
||||
#[napi(string_enum)]
|
||||
pub enum WriteMode {
|
||||
Create,
|
||||
Append,
|
||||
Overwrite,
|
||||
}
|
||||
|
||||
/// Write options when creating a Table.
|
||||
#[napi(object)]
|
||||
pub struct WriteOptions {
|
||||
pub mode: Option<WriteMode>,
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async fn connect(options: ConnectionOptions) -> napi::Result<Connection> {
|
||||
Connection::new(options.uri.clone()).await
|
||||
}
|
||||
81
nodejs/src/query.rs
Normal file
81
nodejs/src/query.rs
Normal file
@@ -0,0 +1,81 @@
|
||||
// Copyright 2024 Lance Developers.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use napi::bindgen_prelude::*;
|
||||
use napi_derive::napi;
|
||||
use vectordb::query::Query as LanceDBQuery;
|
||||
|
||||
use crate::{iterator::RecordBatchIterator, table::Table};
|
||||
|
||||
#[napi]
|
||||
pub struct Query {
|
||||
inner: LanceDBQuery,
|
||||
}
|
||||
|
||||
#[napi]
|
||||
impl Query {
|
||||
pub fn new(table: &Table) -> Self {
|
||||
Self {
|
||||
inner: table.table.query(),
|
||||
}
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn column(&mut self, column: String) {
|
||||
self.inner = self.inner.clone().column(&column);
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn filter(&mut self, filter: String) {
|
||||
self.inner = self.inner.clone().filter(filter);
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn select(&mut self, columns: Vec<String>) {
|
||||
self.inner = self.inner.clone().select(&columns);
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn limit(&mut self, limit: u32) {
|
||||
self.inner = self.inner.clone().limit(limit as usize);
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn prefilter(&mut self, prefilter: bool) {
|
||||
self.inner = self.inner.clone().prefilter(prefilter);
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn nearest_to(&mut self, vector: Float32Array) {
|
||||
self.inner = self.inner.clone().nearest_to(&vector);
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn refine_factor(&mut self, refine_factor: u32) {
|
||||
self.inner = self.inner.clone().refine_factor(refine_factor);
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn nprobes(&mut self, nprobe: u32) {
|
||||
self.inner = self.inner.clone().nprobes(nprobe as usize);
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async fn execute_stream(&self) -> napi::Result<RecordBatchIterator> {
|
||||
let inner_stream = self.inner.execute_stream().await.map_err(|e| {
|
||||
napi::Error::from_reason(format!("Failed to execute query stream: {}", e))
|
||||
})?;
|
||||
Ok(RecordBatchIterator::new(Box::new(inner_stream)))
|
||||
}
|
||||
}
|
||||
88
nodejs/src/table.rs
Normal file
88
nodejs/src/table.rs
Normal file
@@ -0,0 +1,88 @@
|
||||
// Copyright 2024 Lance Developers.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use arrow_ipc::writer::FileWriter;
|
||||
use napi::bindgen_prelude::*;
|
||||
use napi_derive::napi;
|
||||
use vectordb::{ipc::ipc_file_to_batches, table::TableRef};
|
||||
|
||||
use crate::index::IndexBuilder;
|
||||
use crate::query::Query;
|
||||
|
||||
#[napi]
|
||||
pub struct Table {
|
||||
pub(crate) table: TableRef,
|
||||
}
|
||||
|
||||
#[napi]
|
||||
impl Table {
|
||||
pub(crate) fn new(table: TableRef) -> Self {
|
||||
Self { table }
|
||||
}
|
||||
|
||||
/// Return Schema as empty Arrow IPC file.
|
||||
#[napi]
|
||||
pub fn schema(&self) -> napi::Result<Buffer> {
|
||||
let mut writer = FileWriter::try_new(vec![], &self.table.schema())
|
||||
.map_err(|e| napi::Error::from_reason(format!("Failed to create IPC file: {}", e)))?;
|
||||
writer
|
||||
.finish()
|
||||
.map_err(|e| napi::Error::from_reason(format!("Failed to finish IPC file: {}", e)))?;
|
||||
Ok(Buffer::from(writer.into_inner().map_err(|e| {
|
||||
napi::Error::from_reason(format!("Failed to get IPC file: {}", e))
|
||||
})?))
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async fn add(&self, buf: Buffer) -> napi::Result<()> {
|
||||
let batches = ipc_file_to_batches(buf.to_vec())
|
||||
.map_err(|e| napi::Error::from_reason(format!("Failed to read IPC file: {}", e)))?;
|
||||
self.table.add(Box::new(batches), None).await.map_err(|e| {
|
||||
napi::Error::from_reason(format!(
|
||||
"Failed to add batches to table {}: {}",
|
||||
self.table, e
|
||||
))
|
||||
})
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async fn count_rows(&self) -> napi::Result<usize> {
|
||||
self.table.count_rows().await.map_err(|e| {
|
||||
napi::Error::from_reason(format!(
|
||||
"Failed to count rows in table {}: {}",
|
||||
self.table, e
|
||||
))
|
||||
})
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async fn delete(&self, predicate: String) -> napi::Result<()> {
|
||||
self.table.delete(&predicate).await.map_err(|e| {
|
||||
napi::Error::from_reason(format!(
|
||||
"Failed to delete rows in table {}: predicate={}",
|
||||
self.table, e
|
||||
))
|
||||
})
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn create_index(&self) -> IndexBuilder {
|
||||
IndexBuilder::new(self.table.as_ref())
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn query(&self) -> Query {
|
||||
Query::new(self)
|
||||
}
|
||||
}
|
||||
31
nodejs/tsconfig.json
Normal file
31
nodejs/tsconfig.json
Normal file
@@ -0,0 +1,31 @@
|
||||
{
|
||||
"include": [
|
||||
"vectordb/*.ts",
|
||||
"vectordb/**/*.ts",
|
||||
"vectordb/*.js",
|
||||
],
|
||||
"compilerOptions": {
|
||||
"target": "es2022",
|
||||
"module": "commonjs",
|
||||
"declaration": true,
|
||||
"outDir": "./dist",
|
||||
"strict": true,
|
||||
"allowJs": true,
|
||||
"resolveJsonModule": true,
|
||||
},
|
||||
"exclude": [
|
||||
"./dist/*",
|
||||
],
|
||||
"typedocOptions": {
|
||||
"entryPoints": [
|
||||
"vectordb/index.ts"
|
||||
],
|
||||
"out": "../docs/src/javascript/",
|
||||
"visibilityFilters": {
|
||||
"protected": false,
|
||||
"private": false,
|
||||
"inherited": true,
|
||||
"external": false,
|
||||
}
|
||||
}
|
||||
}
|
||||
183
nodejs/vectordb/arrow.ts
Normal file
183
nodejs/vectordb/arrow.ts
Normal file
@@ -0,0 +1,183 @@
|
||||
// Copyright 2024 Lance Developers.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import {
|
||||
Field,
|
||||
FixedSizeList,
|
||||
Float,
|
||||
Float32,
|
||||
Schema,
|
||||
Table as ArrowTable,
|
||||
Table,
|
||||
Vector,
|
||||
vectorFromArray,
|
||||
tableToIPC,
|
||||
} from "apache-arrow";
|
||||
|
||||
/** Data type accepted by NodeJS SDK */
|
||||
export type Data = Record<string, unknown>[] | ArrowTable;
|
||||
|
||||
export class VectorColumnOptions {
|
||||
/** Vector column type. */
|
||||
type: Float = new Float32();
|
||||
|
||||
constructor(values?: Partial<VectorColumnOptions>) {
|
||||
Object.assign(this, values);
|
||||
}
|
||||
}
|
||||
|
||||
/** Options to control the makeArrowTable call. */
|
||||
export class MakeArrowTableOptions {
|
||||
/** Provided schema. */
|
||||
schema?: Schema;
|
||||
|
||||
/** Vector columns */
|
||||
vectorColumns: Record<string, VectorColumnOptions> = {
|
||||
vector: new VectorColumnOptions(),
|
||||
};
|
||||
|
||||
constructor(values?: Partial<MakeArrowTableOptions>) {
|
||||
Object.assign(this, values);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* An enhanced version of the {@link makeTable} function from Apache Arrow
|
||||
* that supports nested fields and embeddings columns.
|
||||
*
|
||||
* Note that it currently does not support nulls.
|
||||
*
|
||||
* @param data input data
|
||||
* @param options options to control the makeArrowTable call.
|
||||
*
|
||||
* @example
|
||||
*
|
||||
* ```ts
|
||||
*
|
||||
* import { fromTableToBuffer, makeArrowTable } from "../arrow";
|
||||
* import { Field, FixedSizeList, Float16, Float32, Int32, Schema } from "apache-arrow";
|
||||
*
|
||||
* const schema = new Schema([
|
||||
* new Field("a", new Int32()),
|
||||
* new Field("b", new Float32()),
|
||||
* new Field("c", new FixedSizeList(3, new Field("item", new Float16()))),
|
||||
* ]);
|
||||
* const table = makeArrowTable([
|
||||
* { a: 1, b: 2, c: [1, 2, 3] },
|
||||
* { a: 4, b: 5, c: [4, 5, 6] },
|
||||
* { a: 7, b: 8, c: [7, 8, 9] },
|
||||
* ], { schema });
|
||||
* ```
|
||||
*
|
||||
* It guesses the vector columns if the schema is not provided. For example,
|
||||
* by default it assumes that the column named `vector` is a vector column.
|
||||
*
|
||||
* ```ts
|
||||
*
|
||||
* const schema = new Schema([
|
||||
new Field("a", new Float64()),
|
||||
new Field("b", new Float64()),
|
||||
new Field(
|
||||
"vector",
|
||||
new FixedSizeList(3, new Field("item", new Float32()))
|
||||
),
|
||||
]);
|
||||
const table = makeArrowTable([
|
||||
{ a: 1, b: 2, vector: [1, 2, 3] },
|
||||
{ a: 4, b: 5, vector: [4, 5, 6] },
|
||||
{ a: 7, b: 8, vector: [7, 8, 9] },
|
||||
]);
|
||||
assert.deepEqual(table.schema, schema);
|
||||
* ```
|
||||
*
|
||||
* You can specify the vector column types and names using the options as well
|
||||
*
|
||||
* ```typescript
|
||||
*
|
||||
* const schema = new Schema([
|
||||
new Field('a', new Float64()),
|
||||
new Field('b', new Float64()),
|
||||
new Field('vec1', new FixedSizeList(3, new Field('item', new Float16()))),
|
||||
new Field('vec2', new FixedSizeList(3, new Field('item', new Float16())))
|
||||
]);
|
||||
* const table = makeArrowTable([
|
||||
{ a: 1, b: 2, vec1: [1, 2, 3], vec2: [2, 4, 6] },
|
||||
{ a: 4, b: 5, vec1: [4, 5, 6], vec2: [8, 10, 12] },
|
||||
{ a: 7, b: 8, vec1: [7, 8, 9], vec2: [14, 16, 18] }
|
||||
], {
|
||||
vectorColumns: {
|
||||
vec1: { type: new Float16() },
|
||||
vec2: { type: new Float16() }
|
||||
}
|
||||
}
|
||||
* assert.deepEqual(table.schema, schema)
|
||||
* ```
|
||||
*/
|
||||
export function makeArrowTable(
|
||||
data: Record<string, any>[],
|
||||
options?: Partial<MakeArrowTableOptions>
|
||||
): Table {
|
||||
if (data.length === 0) {
|
||||
throw new Error("At least one record needs to be provided");
|
||||
}
|
||||
const opt = new MakeArrowTableOptions(options ?? {});
|
||||
const columns: Record<string, Vector> = {};
|
||||
// TODO: sample dataset to find missing columns
|
||||
const columnNames = Object.keys(data[0]);
|
||||
for (const colName of columnNames) {
|
||||
// eslint-disable-next-line @typescript-eslint/no-unsafe-return
|
||||
const values = data.map((datum) => datum[colName]);
|
||||
let vector: Vector;
|
||||
|
||||
if (opt.schema !== undefined) {
|
||||
// Explicit schema is provided, highest priority
|
||||
vector = vectorFromArray(
|
||||
values,
|
||||
opt.schema?.fields.filter((f) => f.name === colName)[0]?.type
|
||||
);
|
||||
} else {
|
||||
const vectorColumnOptions = opt.vectorColumns[colName];
|
||||
if (vectorColumnOptions !== undefined) {
|
||||
const fslType = new FixedSizeList(
|
||||
(values[0] as any[]).length,
|
||||
new Field("item", vectorColumnOptions.type, false)
|
||||
);
|
||||
vector = vectorFromArray(values, fslType);
|
||||
} else {
|
||||
// Normal case
|
||||
vector = vectorFromArray(values);
|
||||
}
|
||||
}
|
||||
columns[colName] = vector;
|
||||
}
|
||||
|
||||
return new Table(columns);
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert an Arrow Table to a Buffer.
|
||||
*
|
||||
* @param data Arrow Table
|
||||
* @param schema Arrow Schema, optional
|
||||
* @returns Buffer node
|
||||
*/
|
||||
export function toBuffer(data: Data, schema?: Schema): Buffer {
|
||||
let tbl: Table;
|
||||
if (data instanceof Table) {
|
||||
tbl = data;
|
||||
} else {
|
||||
tbl = makeArrowTable(data, { schema });
|
||||
}
|
||||
return Buffer.from(tableToIPC(tbl));
|
||||
}
|
||||
70
nodejs/vectordb/connection.ts
Normal file
70
nodejs/vectordb/connection.ts
Normal file
@@ -0,0 +1,70 @@
|
||||
// Copyright 2024 Lance Developers.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import { toBuffer } from "./arrow";
|
||||
import { Connection as _NativeConnection } from "./native";
|
||||
import { Table } from "./table";
|
||||
import { Table as ArrowTable } from "apache-arrow";
|
||||
|
||||
/**
|
||||
* A LanceDB Connection that allows you to open tables and create new ones.
|
||||
*
|
||||
* Connection could be local against filesystem or remote against a server.
|
||||
*/
|
||||
export class Connection {
|
||||
readonly inner: _NativeConnection;
|
||||
|
||||
constructor(inner: _NativeConnection) {
|
||||
this.inner = inner;
|
||||
}
|
||||
|
||||
/** List all the table names in this database. */
|
||||
async tableNames(): Promise<string[]> {
|
||||
return this.inner.tableNames();
|
||||
}
|
||||
|
||||
/**
|
||||
* Open a table in the database.
|
||||
*
|
||||
* @param name The name of the table.
|
||||
* @param embeddings An embedding function to use on this table
|
||||
*/
|
||||
async openTable(name: string): Promise<Table> {
|
||||
const innerTable = await this.inner.openTable(name);
|
||||
return new Table(innerTable);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new Table and initialize it with new data.
|
||||
*
|
||||
* @param {string} name - The name of the table.
|
||||
* @param data - Non-empty Array of Records to be inserted into the table
|
||||
*/
|
||||
async createTable(
|
||||
name: string,
|
||||
data: Record<string, unknown>[] | ArrowTable
|
||||
): Promise<Table> {
|
||||
const buf = toBuffer(data);
|
||||
const innerTable = await this.inner.createTable(name, buf);
|
||||
return new Table(innerTable);
|
||||
}
|
||||
|
||||
/**
|
||||
* Drop an existing table.
|
||||
* @param name The name of the table to drop.
|
||||
*/
|
||||
async dropTable(name: string): Promise<void> {
|
||||
return this.inner.dropTable(name);
|
||||
}
|
||||
}
|
||||
64
nodejs/vectordb/index.ts
Normal file
64
nodejs/vectordb/index.ts
Normal file
@@ -0,0 +1,64 @@
|
||||
// Copyright 2024 Lance Developers.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import { Connection } from "./connection";
|
||||
import { Connection as NativeConnection, ConnectionOptions } from "./native.js";
|
||||
|
||||
export {
|
||||
ConnectionOptions,
|
||||
WriteOptions,
|
||||
Query,
|
||||
MetricType,
|
||||
} from "./native.js";
|
||||
export { Connection } from "./connection";
|
||||
export { Table } from "./table";
|
||||
export { Data } from "./arrow";
|
||||
export { IvfPQOptions, IndexBuilder } from "./indexer";
|
||||
|
||||
/**
|
||||
* Connect to a LanceDB instance at the given URI.
|
||||
*
|
||||
* Accpeted formats:
|
||||
*
|
||||
* - `/path/to/database` - local database
|
||||
* - `s3://bucket/path/to/database` or `gs://bucket/path/to/database` - database on cloud storage
|
||||
* - `db://host:port` - remote database (LanceDB cloud)
|
||||
*
|
||||
* @param uri The uri of the database. If the database uri starts with `db://` then it connects to a remote database.
|
||||
*
|
||||
* @see {@link ConnectionOptions} for more details on the URI format.
|
||||
*/
|
||||
export async function connect(uri: string): Promise<Connection>;
|
||||
export async function connect(
|
||||
opts: Partial<ConnectionOptions>
|
||||
): Promise<Connection>;
|
||||
export async function connect(
|
||||
args: string | Partial<ConnectionOptions>
|
||||
): Promise<Connection> {
|
||||
let opts: ConnectionOptions;
|
||||
if (typeof args === "string") {
|
||||
opts = { uri: args };
|
||||
} else {
|
||||
opts = Object.assign(
|
||||
{
|
||||
uri: "",
|
||||
apiKey: "",
|
||||
hostOverride: "",
|
||||
},
|
||||
args
|
||||
);
|
||||
}
|
||||
const nativeConn = await NativeConnection.new(opts.uri);
|
||||
return new Connection(nativeConn);
|
||||
}
|
||||
102
nodejs/vectordb/indexer.ts
Normal file
102
nodejs/vectordb/indexer.ts
Normal file
@@ -0,0 +1,102 @@
|
||||
// Copyright 2024 Lance Developers.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import {
|
||||
MetricType,
|
||||
IndexBuilder as NativeBuilder,
|
||||
Table as NativeTable,
|
||||
} from "./native";
|
||||
|
||||
/** Options to create `IVF_PQ` index */
|
||||
export interface IvfPQOptions {
|
||||
/** Number of IVF partitions. */
|
||||
num_partitions?: number;
|
||||
|
||||
/** Number of sub-vectors in PQ coding. */
|
||||
num_sub_vectors?: number;
|
||||
|
||||
/** Number of bits used for each PQ code.
|
||||
*/
|
||||
num_bits?: number;
|
||||
|
||||
/** Metric type to calculate the distance between vectors.
|
||||
*
|
||||
* Supported metrics: `L2`, `Cosine` and `Dot`.
|
||||
*/
|
||||
metric_type?: MetricType;
|
||||
|
||||
/** Number of iterations to train K-means.
|
||||
*
|
||||
* Default is 50. The more iterations it usually yield better results,
|
||||
* but it takes longer to train.
|
||||
*/
|
||||
max_iterations?: number;
|
||||
|
||||
sample_rate?: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Building an index on LanceDB {@link Table}
|
||||
*
|
||||
* @see {@link Table.createIndex} for detailed usage.
|
||||
*/
|
||||
export class IndexBuilder {
|
||||
private inner: NativeBuilder;
|
||||
|
||||
constructor(tbl: NativeTable) {
|
||||
this.inner = tbl.createIndex();
|
||||
}
|
||||
|
||||
/** Instruct the builder to build an `IVF_PQ` index */
|
||||
ivf_pq(options?: IvfPQOptions): IndexBuilder {
|
||||
this.inner.ivfPq(
|
||||
options?.metric_type,
|
||||
options?.num_partitions,
|
||||
options?.num_sub_vectors,
|
||||
options?.num_bits,
|
||||
options?.max_iterations,
|
||||
options?.sample_rate
|
||||
);
|
||||
return this;
|
||||
}
|
||||
|
||||
/** Instruct the builder to build a Scalar index. */
|
||||
scalar(): IndexBuilder {
|
||||
this.scalar();
|
||||
return this;
|
||||
}
|
||||
|
||||
/** Set the column(s) to create index on top of. */
|
||||
column(col: string): IndexBuilder {
|
||||
this.inner.column(col);
|
||||
return this;
|
||||
}
|
||||
|
||||
/** Set to true to replace existing index. */
|
||||
replace(val: boolean): IndexBuilder {
|
||||
this.inner.replace(val);
|
||||
return this;
|
||||
}
|
||||
|
||||
/** Specify the name of the index. Optional */
|
||||
name(n: string): IndexBuilder {
|
||||
this.inner.name(n);
|
||||
return this;
|
||||
}
|
||||
|
||||
/** Building the index. */
|
||||
async build() {
|
||||
await this.inner.build();
|
||||
}
|
||||
}
|
||||
80
nodejs/vectordb/native.d.ts
vendored
Normal file
80
nodejs/vectordb/native.d.ts
vendored
Normal file
@@ -0,0 +1,80 @@
|
||||
/* tslint:disable */
|
||||
/* eslint-disable */
|
||||
|
||||
/* auto-generated by NAPI-RS */
|
||||
|
||||
export const enum IndexType {
|
||||
Scalar = 0,
|
||||
IvfPq = 1
|
||||
}
|
||||
export const enum MetricType {
|
||||
L2 = 0,
|
||||
Cosine = 1,
|
||||
Dot = 2
|
||||
}
|
||||
export interface ConnectionOptions {
|
||||
uri: string
|
||||
apiKey?: string
|
||||
hostOverride?: string
|
||||
}
|
||||
/** Write mode for writing a table. */
|
||||
export const enum WriteMode {
|
||||
Create = 'Create',
|
||||
Append = 'Append',
|
||||
Overwrite = 'Overwrite'
|
||||
}
|
||||
/** Write options when creating a Table. */
|
||||
export interface WriteOptions {
|
||||
mode?: WriteMode
|
||||
}
|
||||
export function connect(options: ConnectionOptions): Promise<Connection>
|
||||
export class Connection {
|
||||
/** Create a new Connection instance from the given URI. */
|
||||
static new(uri: string): Promise<Connection>
|
||||
/** List all tables in the dataset. */
|
||||
tableNames(): Promise<Array<string>>
|
||||
/**
|
||||
* Create table from a Apache Arrow IPC (file) buffer.
|
||||
*
|
||||
* Parameters:
|
||||
* - name: The name of the table.
|
||||
* - buf: The buffer containing the IPC file.
|
||||
*
|
||||
*/
|
||||
createTable(name: string, buf: Buffer): Promise<Table>
|
||||
openTable(name: string): Promise<Table>
|
||||
/** Drop table with the name. Or raise an error if the table does not exist. */
|
||||
dropTable(name: string): Promise<void>
|
||||
}
|
||||
export class IndexBuilder {
|
||||
replace(v: boolean): void
|
||||
column(c: string): void
|
||||
name(name: string): void
|
||||
ivfPq(metricType?: MetricType | undefined | null, numPartitions?: number | undefined | null, numSubVectors?: number | undefined | null, numBits?: number | undefined | null, maxIterations?: number | undefined | null, sampleRate?: number | undefined | null): void
|
||||
scalar(): void
|
||||
build(): Promise<void>
|
||||
}
|
||||
/** Typescript-style Async Iterator over RecordBatches */
|
||||
export class RecordBatchIterator {
|
||||
next(): Promise<Buffer | null>
|
||||
}
|
||||
export class Query {
|
||||
column(column: string): void
|
||||
filter(filter: string): void
|
||||
select(columns: Array<string>): void
|
||||
limit(limit: number): void
|
||||
prefilter(prefilter: boolean): void
|
||||
nearestTo(vector: Float32Array): void
|
||||
refineFactor(refineFactor: number): void
|
||||
nprobes(nprobe: number): void
|
||||
executeStream(): Promise<RecordBatchIterator>
|
||||
}
|
||||
export class Table {
|
||||
/** Return Schema as empty Arrow IPC file. */
|
||||
schema(): Buffer
|
||||
add(buf: Buffer): Promise<void>
|
||||
countRows(): Promise<bigint>
|
||||
delete(predicate: string): Promise<void>
|
||||
createIndex(): IndexBuilder
|
||||
query(): Query
|
||||
}
|
||||
308
nodejs/vectordb/native.js
Normal file
308
nodejs/vectordb/native.js
Normal file
@@ -0,0 +1,308 @@
|
||||
/* tslint:disable */
|
||||
/* eslint-disable */
|
||||
/* prettier-ignore */
|
||||
|
||||
/* auto-generated by NAPI-RS */
|
||||
|
||||
const { existsSync, readFileSync } = require('fs')
|
||||
const { join } = require('path')
|
||||
|
||||
const { platform, arch } = process
|
||||
|
||||
let nativeBinding = null
|
||||
let localFileExisted = false
|
||||
let loadError = null
|
||||
|
||||
function isMusl() {
|
||||
// For Node 10
|
||||
if (!process.report || typeof process.report.getReport !== 'function') {
|
||||
try {
|
||||
const lddPath = require('child_process').execSync('which ldd').toString().trim()
|
||||
return readFileSync(lddPath, 'utf8').includes('musl')
|
||||
} catch (e) {
|
||||
return true
|
||||
}
|
||||
} else {
|
||||
const { glibcVersionRuntime } = process.report.getReport().header
|
||||
return !glibcVersionRuntime
|
||||
}
|
||||
}
|
||||
|
||||
switch (platform) {
|
||||
case 'android':
|
||||
switch (arch) {
|
||||
case 'arm64':
|
||||
localFileExisted = existsSync(join(__dirname, 'vectordb-nodejs.android-arm64.node'))
|
||||
try {
|
||||
if (localFileExisted) {
|
||||
nativeBinding = require('./vectordb-nodejs.android-arm64.node')
|
||||
} else {
|
||||
nativeBinding = require('vectordb-android-arm64')
|
||||
}
|
||||
} catch (e) {
|
||||
loadError = e
|
||||
}
|
||||
break
|
||||
case 'arm':
|
||||
localFileExisted = existsSync(join(__dirname, 'vectordb-nodejs.android-arm-eabi.node'))
|
||||
try {
|
||||
if (localFileExisted) {
|
||||
nativeBinding = require('./vectordb-nodejs.android-arm-eabi.node')
|
||||
} else {
|
||||
nativeBinding = require('vectordb-android-arm-eabi')
|
||||
}
|
||||
} catch (e) {
|
||||
loadError = e
|
||||
}
|
||||
break
|
||||
default:
|
||||
throw new Error(`Unsupported architecture on Android ${arch}`)
|
||||
}
|
||||
break
|
||||
case 'win32':
|
||||
switch (arch) {
|
||||
case 'x64':
|
||||
localFileExisted = existsSync(
|
||||
join(__dirname, 'vectordb-nodejs.win32-x64-msvc.node')
|
||||
)
|
||||
try {
|
||||
if (localFileExisted) {
|
||||
nativeBinding = require('./vectordb-nodejs.win32-x64-msvc.node')
|
||||
} else {
|
||||
nativeBinding = require('vectordb-win32-x64-msvc')
|
||||
}
|
||||
} catch (e) {
|
||||
loadError = e
|
||||
}
|
||||
break
|
||||
case 'ia32':
|
||||
localFileExisted = existsSync(
|
||||
join(__dirname, 'vectordb-nodejs.win32-ia32-msvc.node')
|
||||
)
|
||||
try {
|
||||
if (localFileExisted) {
|
||||
nativeBinding = require('./vectordb-nodejs.win32-ia32-msvc.node')
|
||||
} else {
|
||||
nativeBinding = require('vectordb-win32-ia32-msvc')
|
||||
}
|
||||
} catch (e) {
|
||||
loadError = e
|
||||
}
|
||||
break
|
||||
case 'arm64':
|
||||
localFileExisted = existsSync(
|
||||
join(__dirname, 'vectordb-nodejs.win32-arm64-msvc.node')
|
||||
)
|
||||
try {
|
||||
if (localFileExisted) {
|
||||
nativeBinding = require('./vectordb-nodejs.win32-arm64-msvc.node')
|
||||
} else {
|
||||
nativeBinding = require('vectordb-win32-arm64-msvc')
|
||||
}
|
||||
} catch (e) {
|
||||
loadError = e
|
||||
}
|
||||
break
|
||||
default:
|
||||
throw new Error(`Unsupported architecture on Windows: ${arch}`)
|
||||
}
|
||||
break
|
||||
case 'darwin':
|
||||
localFileExisted = existsSync(join(__dirname, 'vectordb-nodejs.darwin-universal.node'))
|
||||
try {
|
||||
if (localFileExisted) {
|
||||
nativeBinding = require('./vectordb-nodejs.darwin-universal.node')
|
||||
} else {
|
||||
nativeBinding = require('vectordb-darwin-universal')
|
||||
}
|
||||
break
|
||||
} catch {}
|
||||
switch (arch) {
|
||||
case 'x64':
|
||||
localFileExisted = existsSync(join(__dirname, 'vectordb-nodejs.darwin-x64.node'))
|
||||
try {
|
||||
if (localFileExisted) {
|
||||
nativeBinding = require('./vectordb-nodejs.darwin-x64.node')
|
||||
} else {
|
||||
nativeBinding = require('vectordb-darwin-x64')
|
||||
}
|
||||
} catch (e) {
|
||||
loadError = e
|
||||
}
|
||||
break
|
||||
case 'arm64':
|
||||
localFileExisted = existsSync(
|
||||
join(__dirname, 'vectordb-nodejs.darwin-arm64.node')
|
||||
)
|
||||
try {
|
||||
if (localFileExisted) {
|
||||
nativeBinding = require('./vectordb-nodejs.darwin-arm64.node')
|
||||
} else {
|
||||
nativeBinding = require('vectordb-darwin-arm64')
|
||||
}
|
||||
} catch (e) {
|
||||
loadError = e
|
||||
}
|
||||
break
|
||||
default:
|
||||
throw new Error(`Unsupported architecture on macOS: ${arch}`)
|
||||
}
|
||||
break
|
||||
case 'freebsd':
|
||||
if (arch !== 'x64') {
|
||||
throw new Error(`Unsupported architecture on FreeBSD: ${arch}`)
|
||||
}
|
||||
localFileExisted = existsSync(join(__dirname, 'vectordb-nodejs.freebsd-x64.node'))
|
||||
try {
|
||||
if (localFileExisted) {
|
||||
nativeBinding = require('./vectordb-nodejs.freebsd-x64.node')
|
||||
} else {
|
||||
nativeBinding = require('vectordb-freebsd-x64')
|
||||
}
|
||||
} catch (e) {
|
||||
loadError = e
|
||||
}
|
||||
break
|
||||
case 'linux':
|
||||
switch (arch) {
|
||||
case 'x64':
|
||||
if (isMusl()) {
|
||||
localFileExisted = existsSync(
|
||||
join(__dirname, 'vectordb-nodejs.linux-x64-musl.node')
|
||||
)
|
||||
try {
|
||||
if (localFileExisted) {
|
||||
nativeBinding = require('./vectordb-nodejs.linux-x64-musl.node')
|
||||
} else {
|
||||
nativeBinding = require('vectordb-linux-x64-musl')
|
||||
}
|
||||
} catch (e) {
|
||||
loadError = e
|
||||
}
|
||||
} else {
|
||||
localFileExisted = existsSync(
|
||||
join(__dirname, 'vectordb-nodejs.linux-x64-gnu.node')
|
||||
)
|
||||
try {
|
||||
if (localFileExisted) {
|
||||
nativeBinding = require('./vectordb-nodejs.linux-x64-gnu.node')
|
||||
} else {
|
||||
nativeBinding = require('vectordb-linux-x64-gnu')
|
||||
}
|
||||
} catch (e) {
|
||||
loadError = e
|
||||
}
|
||||
}
|
||||
break
|
||||
case 'arm64':
|
||||
if (isMusl()) {
|
||||
localFileExisted = existsSync(
|
||||
join(__dirname, 'vectordb-nodejs.linux-arm64-musl.node')
|
||||
)
|
||||
try {
|
||||
if (localFileExisted) {
|
||||
nativeBinding = require('./vectordb-nodejs.linux-arm64-musl.node')
|
||||
} else {
|
||||
nativeBinding = require('vectordb-linux-arm64-musl')
|
||||
}
|
||||
} catch (e) {
|
||||
loadError = e
|
||||
}
|
||||
} else {
|
||||
localFileExisted = existsSync(
|
||||
join(__dirname, 'vectordb-nodejs.linux-arm64-gnu.node')
|
||||
)
|
||||
try {
|
||||
if (localFileExisted) {
|
||||
nativeBinding = require('./vectordb-nodejs.linux-arm64-gnu.node')
|
||||
} else {
|
||||
nativeBinding = require('vectordb-linux-arm64-gnu')
|
||||
}
|
||||
} catch (e) {
|
||||
loadError = e
|
||||
}
|
||||
}
|
||||
break
|
||||
case 'arm':
|
||||
localFileExisted = existsSync(
|
||||
join(__dirname, 'vectordb-nodejs.linux-arm-gnueabihf.node')
|
||||
)
|
||||
try {
|
||||
if (localFileExisted) {
|
||||
nativeBinding = require('./vectordb-nodejs.linux-arm-gnueabihf.node')
|
||||
} else {
|
||||
nativeBinding = require('vectordb-linux-arm-gnueabihf')
|
||||
}
|
||||
} catch (e) {
|
||||
loadError = e
|
||||
}
|
||||
break
|
||||
case 'riscv64':
|
||||
if (isMusl()) {
|
||||
localFileExisted = existsSync(
|
||||
join(__dirname, 'vectordb-nodejs.linux-riscv64-musl.node')
|
||||
)
|
||||
try {
|
||||
if (localFileExisted) {
|
||||
nativeBinding = require('./vectordb-nodejs.linux-riscv64-musl.node')
|
||||
} else {
|
||||
nativeBinding = require('vectordb-linux-riscv64-musl')
|
||||
}
|
||||
} catch (e) {
|
||||
loadError = e
|
||||
}
|
||||
} else {
|
||||
localFileExisted = existsSync(
|
||||
join(__dirname, 'vectordb-nodejs.linux-riscv64-gnu.node')
|
||||
)
|
||||
try {
|
||||
if (localFileExisted) {
|
||||
nativeBinding = require('./vectordb-nodejs.linux-riscv64-gnu.node')
|
||||
} else {
|
||||
nativeBinding = require('vectordb-linux-riscv64-gnu')
|
||||
}
|
||||
} catch (e) {
|
||||
loadError = e
|
||||
}
|
||||
}
|
||||
break
|
||||
case 's390x':
|
||||
localFileExisted = existsSync(
|
||||
join(__dirname, 'vectordb-nodejs.linux-s390x-gnu.node')
|
||||
)
|
||||
try {
|
||||
if (localFileExisted) {
|
||||
nativeBinding = require('./vectordb-nodejs.linux-s390x-gnu.node')
|
||||
} else {
|
||||
nativeBinding = require('vectordb-linux-s390x-gnu')
|
||||
}
|
||||
} catch (e) {
|
||||
loadError = e
|
||||
}
|
||||
break
|
||||
default:
|
||||
throw new Error(`Unsupported architecture on Linux: ${arch}`)
|
||||
}
|
||||
break
|
||||
default:
|
||||
throw new Error(`Unsupported OS: ${platform}, architecture: ${arch}`)
|
||||
}
|
||||
|
||||
if (!nativeBinding) {
|
||||
if (loadError) {
|
||||
throw loadError
|
||||
}
|
||||
throw new Error(`Failed to load native binding`)
|
||||
}
|
||||
|
||||
const { Connection, IndexType, MetricType, IndexBuilder, RecordBatchIterator, Query, Table, WriteMode, connect } = nativeBinding
|
||||
|
||||
module.exports.Connection = Connection
|
||||
module.exports.IndexType = IndexType
|
||||
module.exports.MetricType = MetricType
|
||||
module.exports.IndexBuilder = IndexBuilder
|
||||
module.exports.RecordBatchIterator = RecordBatchIterator
|
||||
module.exports.Query = Query
|
||||
module.exports.Table = Table
|
||||
module.exports.WriteMode = WriteMode
|
||||
module.exports.connect = connect
|
||||
152
nodejs/vectordb/query.ts
Normal file
152
nodejs/vectordb/query.ts
Normal file
@@ -0,0 +1,152 @@
|
||||
// Copyright 2024 Lance Developers.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import { RecordBatch, tableFromIPC, Table as ArrowTable } from "apache-arrow";
|
||||
import {
|
||||
RecordBatchIterator as NativeBatchIterator,
|
||||
Query as NativeQuery,
|
||||
Table as NativeTable,
|
||||
} from "./native";
|
||||
|
||||
class RecordBatchIterator implements AsyncIterator<RecordBatch> {
|
||||
private promised_inner?: Promise<NativeBatchIterator>;
|
||||
private inner?: NativeBatchIterator;
|
||||
|
||||
constructor(
|
||||
inner?: NativeBatchIterator,
|
||||
promise?: Promise<NativeBatchIterator>
|
||||
) {
|
||||
// TODO: check promise reliably so we dont need to pass two arguments.
|
||||
this.inner = inner;
|
||||
this.promised_inner = promise;
|
||||
}
|
||||
|
||||
async next(): Promise<IteratorResult<RecordBatch<any>, any>> {
|
||||
if (this.inner === undefined) {
|
||||
this.inner = await this.promised_inner;
|
||||
}
|
||||
if (this.inner === undefined) {
|
||||
throw new Error("Invalid iterator state state");
|
||||
}
|
||||
const n = await this.inner.next();
|
||||
if (n == null) {
|
||||
return Promise.resolve({ done: true, value: null });
|
||||
}
|
||||
const tbl = tableFromIPC(n);
|
||||
if (tbl.batches.length != 1) {
|
||||
throw new Error("Expected only one batch");
|
||||
}
|
||||
return Promise.resolve({ done: false, value: tbl.batches[0] });
|
||||
}
|
||||
}
|
||||
/* eslint-enable */
|
||||
|
||||
/** Query executor */
|
||||
export class Query implements AsyncIterable<RecordBatch> {
|
||||
private readonly inner: NativeQuery;
|
||||
|
||||
constructor(tbl: NativeTable) {
|
||||
this.inner = tbl.query();
|
||||
}
|
||||
|
||||
/** Set the column to run query. */
|
||||
column(column: string): Query {
|
||||
this.inner.column(column);
|
||||
return this;
|
||||
}
|
||||
|
||||
/** Set the filter predicate, only returns the results that satisfy the filter.
|
||||
*
|
||||
*/
|
||||
filter(predicate: string): Query {
|
||||
this.inner.filter(predicate);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Select the columns to return. If not set, all columns are returned.
|
||||
*/
|
||||
select(columns: string[]): Query {
|
||||
this.inner.select(columns);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the limit of rows to return.
|
||||
*/
|
||||
limit(limit: number): Query {
|
||||
this.inner.limit(limit);
|
||||
return this;
|
||||
}
|
||||
|
||||
prefilter(prefilter: boolean): Query {
|
||||
this.inner.prefilter(prefilter);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the query vector.
|
||||
*/
|
||||
nearestTo(vector: number[]): Query {
|
||||
this.inner.nearestTo(Float32Array.from(vector));
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the number of IVF partitions to use for the query.
|
||||
*/
|
||||
nprobes(nprobes: number): Query {
|
||||
this.inner.nprobes(nprobes);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the refine factor for the query.
|
||||
*/
|
||||
refineFactor(refine_factor: number): Query {
|
||||
this.inner.refineFactor(refine_factor);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute the query and return the results as an AsyncIterator.
|
||||
*/
|
||||
async executeStream(): Promise<RecordBatchIterator> {
|
||||
const inner = await this.inner.executeStream();
|
||||
return new RecordBatchIterator(inner);
|
||||
}
|
||||
|
||||
/** Collect the results as an Arrow Table. */
|
||||
async toArrow(): Promise<ArrowTable> {
|
||||
const batches = [];
|
||||
for await (const batch of this) {
|
||||
batches.push(batch);
|
||||
}
|
||||
return new ArrowTable(batches);
|
||||
}
|
||||
|
||||
/** Returns a JSON Array of All results.
|
||||
*
|
||||
*/
|
||||
async toArray(): Promise<any[]> {
|
||||
const tbl = await this.toArrow();
|
||||
// eslint-disable-next-line @typescript-eslint/no-unsafe-return
|
||||
return tbl.toArray();
|
||||
}
|
||||
|
||||
[Symbol.asyncIterator](): AsyncIterator<RecordBatch<any>> {
|
||||
const promise = this.inner.executeStream();
|
||||
return new RecordBatchIterator(undefined, promise);
|
||||
}
|
||||
}
|
||||
153
nodejs/vectordb/table.ts
Normal file
153
nodejs/vectordb/table.ts
Normal file
@@ -0,0 +1,153 @@
|
||||
// Copyright 2024 Lance Developers.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import { Schema, tableFromIPC } from "apache-arrow";
|
||||
import { Table as _NativeTable } from "./native";
|
||||
import { toBuffer, Data } from "./arrow";
|
||||
import { Query } from "./query";
|
||||
import { IndexBuilder } from "./indexer";
|
||||
|
||||
/**
|
||||
* A LanceDB Table is the collection of Records.
|
||||
*
|
||||
* Each Record has one or more vector fields.
|
||||
*/
|
||||
export class Table {
|
||||
private readonly inner: _NativeTable;
|
||||
|
||||
/** Construct a Table. Internal use only. */
|
||||
constructor(inner: _NativeTable) {
|
||||
this.inner = inner;
|
||||
}
|
||||
|
||||
/** Get the schema of the table. */
|
||||
get schema(): Schema {
|
||||
const schemaBuf = this.inner.schema();
|
||||
const tbl = tableFromIPC(schemaBuf);
|
||||
return tbl.schema;
|
||||
}
|
||||
|
||||
/**
|
||||
* Insert records into this Table.
|
||||
*
|
||||
* @param {Data} data Records to be inserted into the Table
|
||||
* @return The number of rows added to the table
|
||||
*/
|
||||
async add(data: Data): Promise<void> {
|
||||
const buffer = toBuffer(data);
|
||||
await this.inner.add(buffer);
|
||||
}
|
||||
|
||||
/** Count the total number of rows in the dataset. */
|
||||
async countRows(): Promise<bigint> {
|
||||
return await this.inner.countRows();
|
||||
}
|
||||
|
||||
/** Delete the rows that satisfy the predicate. */
|
||||
async delete(predicate: string): Promise<void> {
|
||||
await this.inner.delete(predicate);
|
||||
}
|
||||
|
||||
/** Create an index over the columns.
|
||||
*
|
||||
* @param {string} column The column to create the index on. If not specified,
|
||||
* it will create an index on vector field.
|
||||
*
|
||||
* @example
|
||||
*
|
||||
* By default, it creates vector idnex on one vector column.
|
||||
*
|
||||
* ```typescript
|
||||
* const table = await conn.openTable("my_table");
|
||||
* await table.createIndex().build();
|
||||
* ```
|
||||
*
|
||||
* You can specify `IVF_PQ` parameters via `ivf_pq({})` call.
|
||||
* ```typescript
|
||||
* const table = await conn.openTable("my_table");
|
||||
* await table.createIndex("my_vec_col")
|
||||
* .ivf_pq({ num_partitions: 128, num_sub_vectors: 16 })
|
||||
* .build();
|
||||
* ```
|
||||
*
|
||||
* Or create a Scalar index
|
||||
*
|
||||
* ```typescript
|
||||
* await table.createIndex("my_float_col").build();
|
||||
* ```
|
||||
*/
|
||||
createIndex(column?: string): IndexBuilder {
|
||||
let builder = new IndexBuilder(this.inner);
|
||||
if (column !== undefined) {
|
||||
builder = builder.column(column);
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a generic {@link Query} Builder.
|
||||
*
|
||||
* When appropriate, various indices and statistics based pruning will be used to
|
||||
* accelerate the query.
|
||||
*
|
||||
* @example
|
||||
*
|
||||
* ### Run a SQL-style query
|
||||
* ```typescript
|
||||
* for await (const batch of table.query()
|
||||
* .filter("id > 1").select(["id"]).limit(20)) {
|
||||
* console.log(batch);
|
||||
* }
|
||||
* ```
|
||||
*
|
||||
* ### Run Top-10 vector similarity search
|
||||
* ```typescript
|
||||
* for await (const batch of table.query()
|
||||
* .nearestTo([1, 2, 3])
|
||||
* .refineFactor(5).nprobe(10)
|
||||
* .limit(10)) {
|
||||
* console.log(batch);
|
||||
* }
|
||||
*```
|
||||
*
|
||||
* ### Scan the full dataset
|
||||
* ```typescript
|
||||
* for await (const batch of table.query()) {
|
||||
* console.log(batch);
|
||||
* }
|
||||
*
|
||||
* ### Return the full dataset as Arrow Table
|
||||
* ```typescript
|
||||
* let arrowTbl = await table.query().nearestTo([1.0, 2.0, 0.5, 6.7]).toArrow();
|
||||
* ```
|
||||
*
|
||||
* @returns {@link Query}
|
||||
*/
|
||||
query(): Query {
|
||||
return new Query(this.inner);
|
||||
}
|
||||
|
||||
/** Search the table with a given query vector.
|
||||
*
|
||||
* This is a convenience method for preparing an ANN {@link Query}.
|
||||
*/
|
||||
search(vector: number[], column?: string): Query {
|
||||
const q = this.query();
|
||||
q.nearestTo(vector);
|
||||
if (column !== undefined) {
|
||||
q.column(column);
|
||||
}
|
||||
return q;
|
||||
}
|
||||
}
|
||||
@@ -3,7 +3,7 @@ name = "lancedb"
|
||||
version = "0.5.1"
|
||||
dependencies = [
|
||||
"deprecation",
|
||||
"pylance==0.9.7",
|
||||
"pylance==0.9.9",
|
||||
"ratelimiter~=1.0",
|
||||
"retry>=0.9.2",
|
||||
"tqdm>=4.27.0",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "vectordb-node"
|
||||
version = "0.4.3"
|
||||
version = "0.4.6"
|
||||
description = "Serverless, low-latency vector database for AI applications"
|
||||
license = "Apache-2.0"
|
||||
edition = "2018"
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Lance Developers.
|
||||
// Copyright 2024 Lance Developers.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@@ -19,19 +19,8 @@ use arrow_array::RecordBatch;
|
||||
use arrow_ipc::reader::FileReader;
|
||||
use arrow_ipc::writer::FileWriter;
|
||||
use arrow_schema::SchemaRef;
|
||||
use vectordb::table::VECTOR_COLUMN_NAME;
|
||||
|
||||
use crate::error::{MissingColumnSnafu, Result};
|
||||
use snafu::prelude::*;
|
||||
|
||||
fn validate_vector_column(record_batch: &RecordBatch) -> Result<()> {
|
||||
record_batch
|
||||
.column_by_name(VECTOR_COLUMN_NAME)
|
||||
.map(|_| ())
|
||||
.context(MissingColumnSnafu {
|
||||
name: VECTOR_COLUMN_NAME,
|
||||
})
|
||||
}
|
||||
use crate::error::Result;
|
||||
|
||||
pub(crate) fn arrow_buffer_to_record_batch(slice: &[u8]) -> Result<(Vec<RecordBatch>, SchemaRef)> {
|
||||
let mut batches: Vec<RecordBatch> = Vec::new();
|
||||
@@ -39,7 +28,6 @@ pub(crate) fn arrow_buffer_to_record_batch(slice: &[u8]) -> Result<(Vec<RecordBa
|
||||
let schema = file_reader.schema();
|
||||
for b in file_reader {
|
||||
let record_batch = b?;
|
||||
validate_vector_column(&record_batch)?;
|
||||
batches.push(record_batch);
|
||||
}
|
||||
Ok((batches, schema))
|
||||
|
||||
@@ -19,6 +19,7 @@ use neon::{
|
||||
};
|
||||
|
||||
use crate::{error::ResultExt, runtime, table::JsTable};
|
||||
use vectordb::Table;
|
||||
|
||||
pub(crate) fn table_create_scalar_index(mut cx: FunctionContext) -> JsResult<JsPromise> {
|
||||
let js_table = cx.this().downcast_or_throw::<JsBox<JsTable>, _>(&mut cx)?;
|
||||
@@ -29,10 +30,16 @@ pub(crate) fn table_create_scalar_index(mut cx: FunctionContext) -> JsResult<JsP
|
||||
|
||||
let (deferred, promise) = cx.promise();
|
||||
let channel = cx.channel();
|
||||
let mut table = js_table.table.clone();
|
||||
let table = js_table.table.clone();
|
||||
|
||||
rt.spawn(async move {
|
||||
let idx_result = table.create_scalar_index(&column, replace).await;
|
||||
let idx_result = table
|
||||
.as_native()
|
||||
.unwrap()
|
||||
.create_index(&[&column])
|
||||
.replace(replace)
|
||||
.build()
|
||||
.await;
|
||||
|
||||
deferred.settle_with(&channel, move |mut cx| {
|
||||
idx_result.or_throw(&mut cx)?;
|
||||
|
||||
@@ -12,13 +12,11 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use lance_index::vector::{ivf::IvfBuildParams, pq::PQBuildParams};
|
||||
use lance_linalg::distance::MetricType;
|
||||
use neon::context::FunctionContext;
|
||||
use neon::prelude::*;
|
||||
use std::convert::TryFrom;
|
||||
|
||||
use vectordb::index::vector::{IvfPQIndexBuilder, VectorIndexBuilder};
|
||||
use vectordb::index::IndexBuilder;
|
||||
|
||||
use crate::error::Error::InvalidIndexType;
|
||||
use crate::error::ResultExt;
|
||||
@@ -29,17 +27,24 @@ use crate::table::JsTable;
|
||||
pub(crate) fn table_create_vector_index(mut cx: FunctionContext) -> JsResult<JsPromise> {
|
||||
let js_table = cx.this().downcast_or_throw::<JsBox<JsTable>, _>(&mut cx)?;
|
||||
let index_params = cx.argument::<JsObject>(0)?;
|
||||
let index_params_builder = get_index_params_builder(&mut cx, index_params).or_throw(&mut cx)?;
|
||||
|
||||
let rt = runtime(&mut cx)?;
|
||||
|
||||
let (deferred, promise) = cx.promise();
|
||||
let channel = cx.channel();
|
||||
let mut table = js_table.table.clone();
|
||||
let table = js_table.table.clone();
|
||||
|
||||
let column_name = index_params
|
||||
.get_opt::<JsString, _, _>(&mut cx, "column")?
|
||||
.map(|s| s.value(&mut cx))
|
||||
.unwrap_or("vector".to_string()); // Backward compatibility
|
||||
|
||||
let tbl = table.clone();
|
||||
let mut index_builder = tbl.create_index(&[&column_name]);
|
||||
get_index_params_builder(&mut cx, index_params, &mut index_builder).or_throw(&mut cx)?;
|
||||
|
||||
rt.spawn(async move {
|
||||
let idx_result = table.create_index(&index_params_builder).await;
|
||||
|
||||
let idx_result = index_builder.build().await;
|
||||
deferred.settle_with(&channel, move |mut cx| {
|
||||
idx_result.or_throw(&mut cx)?;
|
||||
Ok(cx.boxed(JsTable::from(table)))
|
||||
@@ -51,66 +56,39 @@ pub(crate) fn table_create_vector_index(mut cx: FunctionContext) -> JsResult<JsP
|
||||
fn get_index_params_builder(
|
||||
cx: &mut FunctionContext,
|
||||
obj: Handle<JsObject>,
|
||||
) -> crate::error::Result<impl VectorIndexBuilder> {
|
||||
let idx_type = obj.get::<JsString, _, _>(cx, "type")?.value(cx);
|
||||
|
||||
match idx_type.as_str() {
|
||||
"ivf_pq" => {
|
||||
let mut index_builder: IvfPQIndexBuilder = IvfPQIndexBuilder::new();
|
||||
let mut pq_params = PQBuildParams::default();
|
||||
|
||||
obj.get_opt::<JsString, _, _>(cx, "column")?
|
||||
.map(|s| index_builder.column(s.value(cx)));
|
||||
|
||||
obj.get_opt::<JsString, _, _>(cx, "index_name")?
|
||||
.map(|s| index_builder.index_name(s.value(cx)));
|
||||
|
||||
if let Some(metric_type) = obj.get_opt::<JsString, _, _>(cx, "metric_type")? {
|
||||
let metric_type = MetricType::try_from(metric_type.value(cx).as_str()).unwrap();
|
||||
index_builder.metric_type(metric_type);
|
||||
}
|
||||
|
||||
let num_partitions = obj.get_opt_usize(cx, "num_partitions")?;
|
||||
let max_iters = obj.get_opt_usize(cx, "max_iters")?;
|
||||
|
||||
num_partitions.map(|np| {
|
||||
let max_iters = max_iters.unwrap_or(50);
|
||||
let ivf_params = IvfBuildParams {
|
||||
num_partitions: np,
|
||||
max_iters,
|
||||
..Default::default()
|
||||
};
|
||||
index_builder.ivf_params(ivf_params)
|
||||
});
|
||||
|
||||
if let Some(use_opq) = obj.get_opt::<JsBoolean, _, _>(cx, "use_opq")? {
|
||||
pq_params.use_opq = use_opq.value(cx);
|
||||
}
|
||||
|
||||
if let Some(num_sub_vectors) = obj.get_opt_usize(cx, "num_sub_vectors")? {
|
||||
pq_params.num_sub_vectors = num_sub_vectors;
|
||||
}
|
||||
|
||||
if let Some(num_bits) = obj.get_opt_usize(cx, "num_bits")? {
|
||||
pq_params.num_bits = num_bits;
|
||||
}
|
||||
|
||||
if let Some(max_iters) = obj.get_opt_usize(cx, "max_iters")? {
|
||||
pq_params.max_iters = max_iters;
|
||||
}
|
||||
|
||||
if let Some(max_opq_iters) = obj.get_opt_usize(cx, "max_opq_iters")? {
|
||||
pq_params.max_opq_iters = max_opq_iters;
|
||||
}
|
||||
|
||||
if let Some(replace) = obj.get_opt::<JsBoolean, _, _>(cx, "replace")? {
|
||||
index_builder.replace(replace.value(cx));
|
||||
}
|
||||
|
||||
Ok(index_builder)
|
||||
builder: &mut IndexBuilder,
|
||||
) -> crate::error::Result<()> {
|
||||
match obj.get::<JsString, _, _>(cx, "type")?.value(cx).as_str() {
|
||||
"ivf_pq" => builder.ivf_pq(),
|
||||
_ => {
|
||||
return Err(InvalidIndexType {
|
||||
index_type: "".into(),
|
||||
})
|
||||
}
|
||||
index_type => Err(InvalidIndexType {
|
||||
index_type: index_type.into(),
|
||||
}),
|
||||
};
|
||||
|
||||
obj.get_opt::<JsString, _, _>(cx, "index_name")?
|
||||
.map(|s| builder.name(s.value(cx).as_str()));
|
||||
|
||||
if let Some(metric_type) = obj.get_opt::<JsString, _, _>(cx, "metric_type")? {
|
||||
let metric_type = MetricType::try_from(metric_type.value(cx).as_str())?;
|
||||
builder.metric_type(metric_type);
|
||||
}
|
||||
|
||||
if let Some(np) = obj.get_opt_u32(cx, "num_partitions")? {
|
||||
builder.num_partitions(np);
|
||||
}
|
||||
if let Some(ns) = obj.get_opt_u32(cx, "num_sub_vectors")? {
|
||||
builder.num_sub_vectors(ns);
|
||||
}
|
||||
if let Some(max_iters) = obj.get_opt_u32(cx, "max_iters")? {
|
||||
builder.max_iterations(max_iters);
|
||||
}
|
||||
if let Some(num_bits) = obj.get_opt_u32(cx, "num_bits")? {
|
||||
builder.num_bits(num_bits);
|
||||
}
|
||||
if let Some(replace) = obj.get_opt::<JsBoolean, _, _>(cx, "replace")? {
|
||||
builder.replace(replace.value(cx));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use lance::io::object_store::ObjectStoreParams;
|
||||
use lance::io::ObjectStoreParams;
|
||||
use neon::prelude::*;
|
||||
use object_store::aws::{AwsCredential, AwsCredentialProvider};
|
||||
use object_store::CredentialProvider;
|
||||
|
||||
@@ -40,17 +40,6 @@ impl JsQuery {
|
||||
}
|
||||
projection_vec
|
||||
});
|
||||
let filter = query_obj
|
||||
.get_opt::<JsString, _, _>(&mut cx, "_filter")?
|
||||
.map(|s| s.value(&mut cx));
|
||||
let refine_factor = query_obj
|
||||
.get_opt_u32(&mut cx, "_refineFactor")
|
||||
.or_throw(&mut cx)?;
|
||||
let nprobes = query_obj.get_usize(&mut cx, "_nprobes").or_throw(&mut cx)?;
|
||||
let metric_type = query_obj
|
||||
.get_opt::<JsString, _, _>(&mut cx, "_metricType")?
|
||||
.map(|s| s.value(&mut cx))
|
||||
.map(|s| MetricType::try_from(s.as_str()).unwrap());
|
||||
|
||||
let prefilter = query_obj
|
||||
.get::<JsBoolean, _, _>(&mut cx, "_prefilter")?
|
||||
@@ -65,24 +54,41 @@ impl JsQuery {
|
||||
|
||||
let (deferred, promise) = cx.promise();
|
||||
let channel = cx.channel();
|
||||
let query_vector = query_obj.get_opt::<JsArray, _, _>(&mut cx, "_queryVector")?;
|
||||
let table = js_table.table.clone();
|
||||
let query = query_vector.map(|q| convert::js_array_to_vec(q.deref(), &mut cx));
|
||||
|
||||
let query_vector = query_obj.get_opt::<JsArray, _, _>(&mut cx, "_queryVector")?;
|
||||
let mut builder = table.query();
|
||||
if let Some(query) = query_vector.map(|q| convert::js_array_to_vec(q.deref(), &mut cx)) {
|
||||
builder = builder.nearest_to(&query);
|
||||
if let Some(metric_type) = query_obj
|
||||
.get_opt::<JsString, _, _>(&mut cx, "_metricType")?
|
||||
.map(|s| s.value(&mut cx))
|
||||
.map(|s| MetricType::try_from(s.as_str()).unwrap())
|
||||
{
|
||||
builder = builder.metric_type(metric_type);
|
||||
}
|
||||
|
||||
let nprobes = query_obj.get_usize(&mut cx, "_nprobes").or_throw(&mut cx)?;
|
||||
builder = builder.nprobes(nprobes);
|
||||
};
|
||||
|
||||
if let Some(filter) = query_obj
|
||||
.get_opt::<JsString, _, _>(&mut cx, "_filter")?
|
||||
.map(|s| s.value(&mut cx))
|
||||
{
|
||||
builder = builder.filter(filter);
|
||||
}
|
||||
if let Some(select) = select {
|
||||
builder = builder.select(select.as_slice());
|
||||
}
|
||||
if let Some(limit) = limit {
|
||||
builder = builder.limit(limit as usize);
|
||||
};
|
||||
|
||||
builder = builder.prefilter(prefilter);
|
||||
|
||||
rt.spawn(async move {
|
||||
let mut builder = table
|
||||
.search(query)
|
||||
.refine_factor(refine_factor)
|
||||
.nprobes(nprobes)
|
||||
.filter(filter)
|
||||
.metric_type(metric_type)
|
||||
.select(select)
|
||||
.prefilter(prefilter);
|
||||
if let Some(limit) = limit {
|
||||
builder = builder.limit(limit as usize);
|
||||
};
|
||||
|
||||
let record_batch_stream = builder.execute();
|
||||
let record_batch_stream = builder.execute_stream();
|
||||
let results = record_batch_stream
|
||||
.and_then(|stream| {
|
||||
stream
|
||||
|
||||
@@ -15,24 +15,25 @@
|
||||
use arrow_array::{RecordBatch, RecordBatchIterator};
|
||||
use lance::dataset::optimize::CompactionOptions;
|
||||
use lance::dataset::{WriteMode, WriteParams};
|
||||
use lance::io::object_store::ObjectStoreParams;
|
||||
use lance::io::ObjectStoreParams;
|
||||
use vectordb::table::OptimizeAction;
|
||||
|
||||
use crate::arrow::{arrow_buffer_to_record_batch, record_batch_to_buffer};
|
||||
use neon::prelude::*;
|
||||
use neon::types::buffer::TypedArray;
|
||||
use vectordb::Table;
|
||||
use vectordb::TableRef;
|
||||
|
||||
use crate::error::ResultExt;
|
||||
use crate::{convert, get_aws_creds, get_aws_region, runtime, JsDatabase};
|
||||
|
||||
pub(crate) struct JsTable {
|
||||
pub table: Table,
|
||||
pub table: TableRef,
|
||||
}
|
||||
|
||||
impl Finalize for JsTable {}
|
||||
|
||||
impl From<Table> for JsTable {
|
||||
fn from(table: Table) -> Self {
|
||||
impl From<TableRef> for JsTable {
|
||||
fn from(table: TableRef) -> Self {
|
||||
JsTable { table }
|
||||
}
|
||||
}
|
||||
@@ -96,7 +97,7 @@ impl JsTable {
|
||||
arrow_buffer_to_record_batch(buffer.as_slice(&cx)).or_throw(&mut cx)?;
|
||||
let rt = runtime(&mut cx)?;
|
||||
let channel = cx.channel();
|
||||
let mut table = js_table.table.clone();
|
||||
let table = js_table.table.clone();
|
||||
|
||||
let (deferred, promise) = cx.promise();
|
||||
let write_mode = match write_mode.as_str() {
|
||||
@@ -118,7 +119,7 @@ impl JsTable {
|
||||
|
||||
rt.spawn(async move {
|
||||
let batch_reader = RecordBatchIterator::new(batches.into_iter().map(Ok), schema);
|
||||
let add_result = table.add(batch_reader, Some(params)).await;
|
||||
let add_result = table.add(Box::new(batch_reader), Some(params)).await;
|
||||
|
||||
deferred.settle_with(&channel, move |mut cx| {
|
||||
add_result.or_throw(&mut cx)?;
|
||||
@@ -152,7 +153,7 @@ impl JsTable {
|
||||
let (deferred, promise) = cx.promise();
|
||||
let predicate = cx.argument::<JsString>(0)?.value(&mut cx);
|
||||
let channel = cx.channel();
|
||||
let mut table = js_table.table.clone();
|
||||
let table = js_table.table.clone();
|
||||
|
||||
rt.spawn(async move {
|
||||
let delete_result = table.delete(&predicate).await;
|
||||
@@ -167,7 +168,7 @@ impl JsTable {
|
||||
|
||||
pub(crate) fn js_update(mut cx: FunctionContext) -> JsResult<JsPromise> {
|
||||
let js_table = cx.this().downcast_or_throw::<JsBox<JsTable>, _>(&mut cx)?;
|
||||
let mut table = js_table.table.clone();
|
||||
let table = js_table.table.clone();
|
||||
|
||||
let rt = runtime(&mut cx)?;
|
||||
let (deferred, promise) = cx.promise();
|
||||
@@ -218,7 +219,11 @@ impl JsTable {
|
||||
|
||||
let predicate = predicate.as_deref();
|
||||
|
||||
let update_result = table.update(predicate, updates_arg).await;
|
||||
let update_result = table
|
||||
.as_native()
|
||||
.unwrap()
|
||||
.update(predicate, updates_arg)
|
||||
.await;
|
||||
deferred.settle_with(&channel, move |mut cx| {
|
||||
update_result.or_throw(&mut cx)?;
|
||||
Ok(cx.boxed(JsTable::from(table)))
|
||||
@@ -241,25 +246,30 @@ impl JsTable {
|
||||
.map(|val| val.value(&mut cx) as i64)
|
||||
.unwrap_or_else(|| 2 * 7 * 24 * 60); // 2 weeks
|
||||
let older_than = chrono::Duration::minutes(older_than);
|
||||
let delete_unverified: bool = cx
|
||||
.argument_opt(1)
|
||||
.and_then(|val| val.downcast::<JsBoolean, _>(&mut cx).ok())
|
||||
.map(|val| val.value(&mut cx))
|
||||
.unwrap_or_default();
|
||||
let delete_unverified: Option<bool> = Some(
|
||||
cx.argument_opt(1)
|
||||
.and_then(|val| val.downcast::<JsBoolean, _>(&mut cx).ok())
|
||||
.map(|val| val.value(&mut cx))
|
||||
.unwrap_or_default(),
|
||||
);
|
||||
|
||||
rt.spawn(async move {
|
||||
let stats = table
|
||||
.cleanup_old_versions(older_than, Some(delete_unverified))
|
||||
.optimize(OptimizeAction::Prune {
|
||||
older_than,
|
||||
delete_unverified,
|
||||
})
|
||||
.await;
|
||||
|
||||
deferred.settle_with(&channel, move |mut cx| {
|
||||
let stats = stats.or_throw(&mut cx)?;
|
||||
|
||||
let prune_stats = stats.prune.as_ref().expect("Prune stats missing");
|
||||
let output_metrics = JsObject::new(&mut cx);
|
||||
let bytes_removed = cx.number(stats.bytes_removed as f64);
|
||||
let bytes_removed = cx.number(prune_stats.bytes_removed as f64);
|
||||
output_metrics.set(&mut cx, "bytesRemoved", bytes_removed)?;
|
||||
|
||||
let old_versions = cx.number(stats.old_versions as f64);
|
||||
let old_versions = cx.number(prune_stats.old_versions as f64);
|
||||
output_metrics.set(&mut cx, "oldVersions", old_versions)?;
|
||||
|
||||
let output_table = cx.boxed(JsTable::from(table));
|
||||
@@ -278,7 +288,7 @@ impl JsTable {
|
||||
let js_table = cx.this().downcast_or_throw::<JsBox<JsTable>, _>(&mut cx)?;
|
||||
let rt = runtime(&mut cx)?;
|
||||
let (deferred, promise) = cx.promise();
|
||||
let mut table = js_table.table.clone();
|
||||
let table = js_table.table.clone();
|
||||
let channel = cx.channel();
|
||||
|
||||
let js_options = cx.argument::<JsObject>(0)?;
|
||||
@@ -310,10 +320,16 @@ impl JsTable {
|
||||
}
|
||||
|
||||
rt.spawn(async move {
|
||||
let stats = table.compact_files(options, None).await;
|
||||
let stats = table
|
||||
.optimize(OptimizeAction::Compact {
|
||||
options,
|
||||
remap_options: None,
|
||||
})
|
||||
.await;
|
||||
|
||||
deferred.settle_with(&channel, move |mut cx| {
|
||||
let stats = stats.or_throw(&mut cx)?;
|
||||
let stats = stats.compaction.as_ref().expect("Compact stats missing");
|
||||
|
||||
let output_metrics = JsObject::new(&mut cx);
|
||||
let fragments_removed = cx.number(stats.fragments_removed as f64);
|
||||
@@ -349,7 +365,7 @@ impl JsTable {
|
||||
let table = js_table.table.clone();
|
||||
|
||||
rt.spawn(async move {
|
||||
let indices = table.load_indices().await;
|
||||
let indices = table.as_native().unwrap().load_indices().await;
|
||||
|
||||
deferred.settle_with(&channel, move |mut cx| {
|
||||
let indices = indices.or_throw(&mut cx)?;
|
||||
@@ -389,8 +405,8 @@ impl JsTable {
|
||||
|
||||
rt.spawn(async move {
|
||||
let load_stats = futures::try_join!(
|
||||
table.count_indexed_rows(&index_uuid),
|
||||
table.count_unindexed_rows(&index_uuid)
|
||||
table.as_native().unwrap().count_indexed_rows(&index_uuid),
|
||||
table.as_native().unwrap().count_unindexed_rows(&index_uuid)
|
||||
);
|
||||
|
||||
deferred.settle_with(&channel, move |mut cx| {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "vectordb"
|
||||
version = "0.4.3"
|
||||
version = "0.4.6"
|
||||
edition = "2021"
|
||||
description = "LanceDB: A serverless, low-latency vector database for AI applications"
|
||||
license = "Apache-2.0"
|
||||
@@ -16,6 +16,7 @@ arrow-data = { workspace = true }
|
||||
arrow-schema = { workspace = true }
|
||||
arrow-ord = { workspace = true }
|
||||
arrow-cast = { workspace = true }
|
||||
arrow-ipc.workspace = true
|
||||
chrono = { workspace = true }
|
||||
object_store = { workspace = true }
|
||||
snafu = { workspace = true }
|
||||
@@ -25,11 +26,11 @@ lance-index = { workspace = true }
|
||||
lance-linalg = { workspace = true }
|
||||
lance-testing = { workspace = true }
|
||||
tokio = { version = "1.23", features = ["rt-multi-thread"] }
|
||||
log = { workspace = true }
|
||||
log.workspace = true
|
||||
async-trait = "0"
|
||||
bytes = "1"
|
||||
futures = "0"
|
||||
num-traits = "0"
|
||||
futures.workspace = true
|
||||
num-traits.workspace = true
|
||||
url = { workspace = true }
|
||||
serde = { version = "^1" }
|
||||
serde_json = { version = "1" }
|
||||
@@ -37,4 +38,4 @@ serde_json = { version = "1" }
|
||||
[dev-dependencies]
|
||||
tempfile = "3.5.0"
|
||||
rand = { version = "0.8.3", features = ["small_rng"] }
|
||||
walkdir = "2"
|
||||
walkdir = "2"
|
||||
|
||||
@@ -1,3 +1,8 @@
|
||||
# LanceDB Rust
|
||||
|
||||
Rust client for LanceDB, a serverless vector database. Read more at: https://lancedb.com/
|
||||
<a href="https://crates.io/crates/vectordb"></a>
|
||||
<a href="https://docs.rs/vectordb/latest/vectordb/"></a>
|
||||
|
||||
LanceDB Rust SDK, a serverless vector database.
|
||||
|
||||
Read more at: https://lancedb.com/
|
||||
|
||||
@@ -21,13 +21,13 @@ use std::sync::Arc;
|
||||
|
||||
use arrow_array::RecordBatchReader;
|
||||
use lance::dataset::WriteParams;
|
||||
use lance::io::object_store::{ObjectStore, WrappingObjectStore};
|
||||
use lance::io::{ObjectStore, WrappingObjectStore};
|
||||
use object_store::local::LocalFileSystem;
|
||||
use snafu::prelude::*;
|
||||
|
||||
use crate::error::{CreateDirSnafu, Error, InvalidTableNameSnafu, Result};
|
||||
use crate::io::object_store::MirroringObjectStoreWrapper;
|
||||
use crate::table::{ReadParams, Table};
|
||||
use crate::table::{NativeTable, ReadParams, TableRef};
|
||||
|
||||
pub const LANCE_FILE_EXTENSION: &str = "lance";
|
||||
|
||||
@@ -46,17 +46,20 @@ pub trait Connection: Send + Sync {
|
||||
/// * `params` - Optional [`WriteParams`] to create the table.
|
||||
///
|
||||
/// # Returns
|
||||
/// Created [`Table`], or [`Err(Error::TableAlreadyExists)`] if the table already exists.
|
||||
/// Created [`TableRef`], or [`Err(Error::TableAlreadyExists)`] if the table already exists.
|
||||
async fn create_table(
|
||||
&self,
|
||||
name: &str,
|
||||
batches: Box<dyn RecordBatchReader + Send>,
|
||||
params: Option<WriteParams>,
|
||||
) -> Result<Table>;
|
||||
) -> Result<TableRef>;
|
||||
|
||||
async fn open_table(&self, name: &str) -> Result<Table>;
|
||||
async fn open_table(&self, name: &str) -> Result<TableRef> {
|
||||
self.open_table_with_params(name, ReadParams::default())
|
||||
.await
|
||||
}
|
||||
|
||||
async fn open_table_with_params(&self, name: &str, params: ReadParams) -> Result<Table>;
|
||||
async fn open_table_with_params(&self, name: &str, params: ReadParams) -> Result<TableRef>;
|
||||
|
||||
/// Drop a table in the database.
|
||||
///
|
||||
@@ -65,6 +68,87 @@ pub trait Connection: Send + Sync {
|
||||
async fn drop_table(&self, name: &str) -> Result<()>;
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ConnectOptions {
|
||||
/// Database URI
|
||||
///
|
||||
/// # Accpeted URI formats
|
||||
///
|
||||
/// - `/path/to/database` - local database on file system.
|
||||
/// - `s3://bucket/path/to/database` or `gs://bucket/path/to/database` - database on cloud object store
|
||||
/// - `db://dbname` - Lance Cloud
|
||||
pub uri: String,
|
||||
|
||||
/// Lance Cloud API key
|
||||
pub api_key: Option<String>,
|
||||
/// Lance Cloud region
|
||||
pub region: Option<String>,
|
||||
/// Lance Cloud host override
|
||||
pub host_override: Option<String>,
|
||||
|
||||
/// The maximum number of indices to cache in memory. Defaults to 256.
|
||||
pub index_cache_size: u32,
|
||||
}
|
||||
|
||||
impl ConnectOptions {
|
||||
/// Create a new [`ConnectOptions`] with the given database URI.
|
||||
pub fn new(uri: &str) -> Self {
|
||||
Self {
|
||||
uri: uri.to_string(),
|
||||
api_key: None,
|
||||
region: None,
|
||||
host_override: None,
|
||||
index_cache_size: 256,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn api_key(mut self, api_key: &str) -> Self {
|
||||
self.api_key = Some(api_key.to_string());
|
||||
self
|
||||
}
|
||||
|
||||
pub fn region(mut self, region: &str) -> Self {
|
||||
self.region = Some(region.to_string());
|
||||
self
|
||||
}
|
||||
|
||||
pub fn host_override(mut self, host_override: &str) -> Self {
|
||||
self.host_override = Some(host_override.to_string());
|
||||
self
|
||||
}
|
||||
|
||||
pub fn index_cache_size(mut self, index_cache_size: u32) -> Self {
|
||||
self.index_cache_size = index_cache_size;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
/// Connect to a LanceDB database.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// - `uri` - URI where the database is located, can be a local file or a supported remote cloud storage
|
||||
///
|
||||
/// ## Accepted URI formats
|
||||
///
|
||||
/// - `/path/to/database` - local database on file system.
|
||||
/// - `s3://bucket/path/to/database` or `gs://bucket/path/to/database` - database on cloud object store
|
||||
/// - `db://dbname` - Lance Cloud
|
||||
///
|
||||
pub async fn connect(uri: &str) -> Result<Arc<dyn Connection>> {
|
||||
let options = ConnectOptions::new(uri);
|
||||
connect_with_options(&options).await
|
||||
}
|
||||
|
||||
/// Connect with [`ConnectOptions`].
|
||||
///
|
||||
/// # Arguments
|
||||
/// - `options` - [`ConnectOptions`] to connect to the database.
|
||||
pub async fn connect_with_options(options: &ConnectOptions) -> Result<Arc<dyn Connection>> {
|
||||
let db = Database::connect(&options.uri).await?;
|
||||
Ok(Arc::new(db))
|
||||
}
|
||||
|
||||
pub struct Database {
|
||||
object_store: ObjectStore,
|
||||
query_string: Option<String>,
|
||||
@@ -240,30 +324,19 @@ impl Connection for Database {
|
||||
name: &str,
|
||||
batches: Box<dyn RecordBatchReader + Send>,
|
||||
params: Option<WriteParams>,
|
||||
) -> Result<Table> {
|
||||
) -> Result<TableRef> {
|
||||
let table_uri = self.table_uri(name)?;
|
||||
|
||||
Table::create(
|
||||
&table_uri,
|
||||
name,
|
||||
batches,
|
||||
self.store_wrapper.clone(),
|
||||
params,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Open a table in the database.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `name` - The name of the table.
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * A [Table] object.
|
||||
async fn open_table(&self, name: &str) -> Result<Table> {
|
||||
self.open_table_with_params(name, ReadParams::default())
|
||||
.await
|
||||
Ok(Arc::new(
|
||||
NativeTable::create(
|
||||
&table_uri,
|
||||
name,
|
||||
batches,
|
||||
self.store_wrapper.clone(),
|
||||
params,
|
||||
)
|
||||
.await?,
|
||||
))
|
||||
}
|
||||
|
||||
/// Open a table in the database.
|
||||
@@ -274,10 +347,13 @@ impl Connection for Database {
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * A [Table] object.
|
||||
async fn open_table_with_params(&self, name: &str, params: ReadParams) -> Result<Table> {
|
||||
/// * A [TableRef] object.
|
||||
async fn open_table_with_params(&self, name: &str, params: ReadParams) -> Result<TableRef> {
|
||||
let table_uri = self.table_uri(name)?;
|
||||
Table::open_with_params(&table_uri, name, self.store_wrapper.clone(), params).await
|
||||
Ok(Arc::new(
|
||||
NativeTable::open_with_params(&table_uri, name, self.store_wrapper.clone(), params)
|
||||
.await?,
|
||||
))
|
||||
}
|
||||
|
||||
async fn drop_table(&self, name: &str) -> Result<()> {
|
||||
|
||||
@@ -12,6 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::PoisonError;
|
||||
|
||||
use arrow_schema::ArrowError;
|
||||
use snafu::Snafu;
|
||||
|
||||
@@ -35,6 +37,8 @@ pub enum Error {
|
||||
Lance { message: String },
|
||||
#[snafu(display("LanceDB Schema Error: {message}"))]
|
||||
Schema { message: String },
|
||||
#[snafu(display("Runtime error: {message}"))]
|
||||
Runtime { message: String },
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -70,3 +74,11 @@ impl From<object_store::path::Error> for Error {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> From<PoisonError<T>> for Error {
|
||||
fn from(e: PoisonError<T>) -> Self {
|
||||
Self::Runtime {
|
||||
message: e.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Lance Developers.
|
||||
// Copyright 2024 Lance Developers.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@@ -12,4 +12,286 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::{cmp::max, sync::Arc};
|
||||
|
||||
use lance::index::scalar::ScalarIndexParams;
|
||||
use lance_index::{DatasetIndexExt, IndexType};
|
||||
pub use lance_linalg::distance::MetricType;
|
||||
|
||||
pub mod vector;
|
||||
|
||||
use crate::{utils::default_vector_column, Error, Result, Table};
|
||||
|
||||
/// Index Parameters.
|
||||
pub enum IndexParams {
|
||||
Scalar {
|
||||
replace: bool,
|
||||
},
|
||||
IvfPq {
|
||||
replace: bool,
|
||||
metric_type: MetricType,
|
||||
num_partitions: u64,
|
||||
num_sub_vectors: u32,
|
||||
num_bits: u32,
|
||||
sample_rate: u32,
|
||||
max_iterations: u32,
|
||||
},
|
||||
}
|
||||
|
||||
/// Builder for Index Parameters.
|
||||
|
||||
pub struct IndexBuilder {
|
||||
table: Arc<dyn Table>,
|
||||
columns: Vec<String>,
|
||||
// General parameters
|
||||
/// Index name.
|
||||
name: Option<String>,
|
||||
/// Replace the existing index.
|
||||
replace: bool,
|
||||
|
||||
index_type: IndexType,
|
||||
|
||||
// Scalar index parameters
|
||||
// Nothing to set here.
|
||||
|
||||
// IVF_PQ parameters
|
||||
metric_type: MetricType,
|
||||
num_partitions: Option<u32>,
|
||||
// PQ related
|
||||
num_sub_vectors: Option<u32>,
|
||||
num_bits: u32,
|
||||
|
||||
/// The rate to find samples to train kmeans.
|
||||
sample_rate: u32,
|
||||
/// Max iteration to train kmeans.
|
||||
max_iterations: u32,
|
||||
}
|
||||
|
||||
impl IndexBuilder {
|
||||
pub(crate) fn new(table: Arc<dyn Table>, columns: &[&str]) -> Self {
|
||||
IndexBuilder {
|
||||
table,
|
||||
columns: columns.iter().map(|c| c.to_string()).collect(),
|
||||
name: None,
|
||||
replace: true,
|
||||
index_type: IndexType::Scalar,
|
||||
metric_type: MetricType::L2,
|
||||
num_partitions: None,
|
||||
num_sub_vectors: None,
|
||||
num_bits: 8,
|
||||
sample_rate: 256,
|
||||
max_iterations: 50,
|
||||
}
|
||||
}
|
||||
|
||||
/// Build a Scalar Index.
|
||||
///
|
||||
/// Accepted parameters:
|
||||
/// - `replace`: Replace the existing index.
|
||||
/// - `name`: Index name. Default: `None`
|
||||
pub fn scalar(&mut self) -> &mut Self {
|
||||
self.index_type = IndexType::Scalar;
|
||||
self
|
||||
}
|
||||
|
||||
/// Build an IVF PQ index.
|
||||
///
|
||||
/// Accepted parameters:
|
||||
/// - `replace`: Replace the existing index.
|
||||
/// - `name`: Index name. Default: `None`
|
||||
/// - `metric_type`: [MetricType] to use to build Vector Index.
|
||||
/// - `num_partitions`: Number of IVF partitions.
|
||||
/// - `num_sub_vectors`: Number of sub-vectors of PQ.
|
||||
/// - `num_bits`: Number of bits used for PQ centroids.
|
||||
/// - `sample_rate`: The rate to find samples to train kmeans.
|
||||
/// - `max_iterations`: Max iteration to train kmeans.
|
||||
pub fn ivf_pq(&mut self) -> &mut Self {
|
||||
self.index_type = IndexType::Vector;
|
||||
self
|
||||
}
|
||||
|
||||
/// The columns to build index on.
|
||||
pub fn columns(&mut self, cols: &[&str]) -> &mut Self {
|
||||
self.columns = cols.iter().map(|s| s.to_string()).collect();
|
||||
self
|
||||
}
|
||||
|
||||
/// Whether to replace the existing index, default is `true`.
|
||||
pub fn replace(&mut self, v: bool) -> &mut Self {
|
||||
self.replace = v;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the index name.
|
||||
pub fn name(&mut self, name: &str) -> &mut Self {
|
||||
self.name = Some(name.to_string());
|
||||
self
|
||||
}
|
||||
|
||||
/// [MetricType] to use to build Vector Index.
|
||||
///
|
||||
/// Default value is [MetricType::L2].
|
||||
pub fn metric_type(&mut self, metric_type: MetricType) -> &mut Self {
|
||||
self.metric_type = metric_type;
|
||||
self
|
||||
}
|
||||
|
||||
/// Number of IVF partitions.
|
||||
pub fn num_partitions(&mut self, num_partitions: u32) -> &mut Self {
|
||||
self.num_partitions = Some(num_partitions);
|
||||
self
|
||||
}
|
||||
|
||||
/// Number of sub-vectors of PQ.
|
||||
pub fn num_sub_vectors(&mut self, num_sub_vectors: u32) -> &mut Self {
|
||||
self.num_sub_vectors = Some(num_sub_vectors);
|
||||
self
|
||||
}
|
||||
|
||||
/// Number of bits used for PQ centroids.
|
||||
pub fn num_bits(&mut self, num_bits: u32) -> &mut Self {
|
||||
self.num_bits = num_bits;
|
||||
self
|
||||
}
|
||||
|
||||
/// The rate to find samples to train kmeans.
|
||||
pub fn sample_rate(&mut self, sample_rate: u32) -> &mut Self {
|
||||
self.sample_rate = sample_rate;
|
||||
self
|
||||
}
|
||||
|
||||
/// Max iteration to train kmeans.
|
||||
pub fn max_iterations(&mut self, max_iterations: u32) -> &mut Self {
|
||||
self.max_iterations = max_iterations;
|
||||
self
|
||||
}
|
||||
|
||||
/// Build the parameters.
|
||||
pub async fn build(&self) -> Result<()> {
|
||||
let schema = self.table.schema();
|
||||
|
||||
// TODO: simplify this after GH lance#1864.
|
||||
let mut index_type = &self.index_type;
|
||||
let columns = if self.columns.is_empty() {
|
||||
// By default we create vector index.
|
||||
index_type = &IndexType::Vector;
|
||||
vec![default_vector_column(&schema, None)?]
|
||||
} else {
|
||||
self.columns.clone()
|
||||
};
|
||||
|
||||
if columns.len() != 1 {
|
||||
return Err(Error::Schema {
|
||||
message: "Only one column is supported for index".to_string(),
|
||||
});
|
||||
}
|
||||
let column = &columns[0];
|
||||
|
||||
let field = schema.field_with_name(column)?;
|
||||
|
||||
let params = match index_type {
|
||||
IndexType::Scalar => IndexParams::Scalar {
|
||||
replace: self.replace,
|
||||
},
|
||||
IndexType::Vector => {
|
||||
let num_partitions = if let Some(n) = self.num_partitions {
|
||||
n
|
||||
} else {
|
||||
suggested_num_partitions(self.table.count_rows().await?)
|
||||
};
|
||||
let num_sub_vectors: u32 = if let Some(n) = self.num_sub_vectors {
|
||||
n
|
||||
} else {
|
||||
match field.data_type() {
|
||||
arrow_schema::DataType::FixedSizeList(_, n) => {
|
||||
Ok::<u32, Error>(suggested_num_sub_vectors(*n as u32))
|
||||
}
|
||||
_ => Err(Error::Schema {
|
||||
message: format!(
|
||||
"Column '{}' is not a FixedSizeList",
|
||||
&self.columns[0]
|
||||
),
|
||||
}),
|
||||
}?
|
||||
};
|
||||
IndexParams::IvfPq {
|
||||
replace: self.replace,
|
||||
metric_type: self.metric_type,
|
||||
num_partitions: num_partitions as u64,
|
||||
num_sub_vectors,
|
||||
num_bits: self.num_bits,
|
||||
sample_rate: self.sample_rate,
|
||||
max_iterations: self.max_iterations,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let tbl = self
|
||||
.table
|
||||
.as_native()
|
||||
.expect("Only native table is supported here");
|
||||
let mut dataset = tbl.clone_inner_dataset();
|
||||
match params {
|
||||
IndexParams::Scalar { replace } => {
|
||||
dataset
|
||||
.create_index(
|
||||
&[&column],
|
||||
IndexType::Scalar,
|
||||
None,
|
||||
&ScalarIndexParams::default(),
|
||||
replace,
|
||||
)
|
||||
.await?
|
||||
}
|
||||
IndexParams::IvfPq {
|
||||
replace,
|
||||
metric_type,
|
||||
num_partitions,
|
||||
num_sub_vectors,
|
||||
num_bits,
|
||||
max_iterations,
|
||||
..
|
||||
} => {
|
||||
let lance_idx_params = lance::index::vector::VectorIndexParams::ivf_pq(
|
||||
num_partitions as usize,
|
||||
num_bits as u8,
|
||||
num_sub_vectors as usize,
|
||||
false,
|
||||
metric_type,
|
||||
max_iterations as usize,
|
||||
);
|
||||
dataset
|
||||
.create_index(
|
||||
&[column],
|
||||
IndexType::Vector,
|
||||
None,
|
||||
&lance_idx_params,
|
||||
replace,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
tbl.reset_dataset(dataset);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn suggested_num_partitions(rows: usize) -> u32 {
|
||||
let num_partitions = (rows as f64).sqrt() as u32;
|
||||
max(1, num_partitions)
|
||||
}
|
||||
|
||||
fn suggested_num_sub_vectors(dim: u32) -> u32 {
|
||||
if dim % 16 == 0 {
|
||||
// Should be more aggressive than this default.
|
||||
dim / 16
|
||||
} else if dim % 8 == 0 {
|
||||
dim / 8
|
||||
} else {
|
||||
log::warn!(
|
||||
"The dimension of the vector is not divisible by 8 or 16, \
|
||||
which may cause performance degradation in PQ"
|
||||
);
|
||||
1
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,104 +14,7 @@
|
||||
|
||||
use serde::Deserialize;
|
||||
|
||||
use lance::format::{Index, Manifest};
|
||||
use lance::index::vector::pq::PQBuildParams;
|
||||
use lance::index::vector::VectorIndexParams;
|
||||
use lance_index::vector::ivf::IvfBuildParams;
|
||||
use lance_linalg::distance::MetricType;
|
||||
|
||||
pub trait VectorIndexBuilder {
|
||||
fn get_column(&self) -> Option<String>;
|
||||
fn get_index_name(&self) -> Option<String>;
|
||||
fn build(&self) -> VectorIndexParams;
|
||||
|
||||
fn get_replace(&self) -> bool;
|
||||
}
|
||||
|
||||
pub struct IvfPQIndexBuilder {
|
||||
column: Option<String>,
|
||||
index_name: Option<String>,
|
||||
metric_type: Option<MetricType>,
|
||||
ivf_params: Option<IvfBuildParams>,
|
||||
pq_params: Option<PQBuildParams>,
|
||||
replace: bool,
|
||||
}
|
||||
|
||||
impl IvfPQIndexBuilder {
|
||||
pub fn new() -> IvfPQIndexBuilder {
|
||||
Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for IvfPQIndexBuilder {
|
||||
fn default() -> Self {
|
||||
IvfPQIndexBuilder {
|
||||
column: None,
|
||||
index_name: None,
|
||||
metric_type: None,
|
||||
ivf_params: None,
|
||||
pq_params: None,
|
||||
replace: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl IvfPQIndexBuilder {
|
||||
pub fn column(&mut self, column: String) -> &mut IvfPQIndexBuilder {
|
||||
self.column = Some(column);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn index_name(&mut self, index_name: String) -> &mut IvfPQIndexBuilder {
|
||||
self.index_name = Some(index_name);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn metric_type(&mut self, metric_type: MetricType) -> &mut IvfPQIndexBuilder {
|
||||
self.metric_type = Some(metric_type);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn ivf_params(&mut self, ivf_params: IvfBuildParams) -> &mut IvfPQIndexBuilder {
|
||||
self.ivf_params = Some(ivf_params);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn pq_params(&mut self, pq_params: PQBuildParams) -> &mut IvfPQIndexBuilder {
|
||||
self.pq_params = Some(pq_params);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn replace(&mut self, replace: bool) -> &mut IvfPQIndexBuilder {
|
||||
self.replace = replace;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl VectorIndexBuilder for IvfPQIndexBuilder {
|
||||
fn get_column(&self) -> Option<String> {
|
||||
self.column.clone()
|
||||
}
|
||||
|
||||
fn get_index_name(&self) -> Option<String> {
|
||||
self.index_name.clone()
|
||||
}
|
||||
|
||||
fn build(&self) -> VectorIndexParams {
|
||||
let ivf_params = self.ivf_params.clone().unwrap_or_default();
|
||||
let pq_params = self.pq_params.clone().unwrap_or_default();
|
||||
|
||||
VectorIndexParams::with_ivf_pq_params(
|
||||
self.metric_type.unwrap_or(MetricType::L2),
|
||||
ivf_params,
|
||||
pq_params,
|
||||
)
|
||||
}
|
||||
|
||||
fn get_replace(&self) -> bool {
|
||||
self.replace
|
||||
}
|
||||
}
|
||||
use lance::table::format::{Index, Manifest};
|
||||
|
||||
pub struct VectorIndex {
|
||||
pub columns: Vec<String>,
|
||||
@@ -139,79 +42,3 @@ pub struct VectorIndexStatistics {
|
||||
pub num_indexed_rows: usize,
|
||||
pub num_unindexed_rows: usize,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
use lance::index::vector::StageParams;
|
||||
use lance_index::vector::ivf::IvfBuildParams;
|
||||
use lance_index::vector::pq::PQBuildParams;
|
||||
|
||||
use crate::index::vector::{IvfPQIndexBuilder, VectorIndexBuilder};
|
||||
|
||||
#[test]
|
||||
fn test_builder_no_params() {
|
||||
let index_builder = IvfPQIndexBuilder::new();
|
||||
assert!(index_builder.get_column().is_none());
|
||||
assert!(index_builder.get_index_name().is_none());
|
||||
|
||||
let index_params = index_builder.build();
|
||||
assert_eq!(index_params.stages.len(), 2);
|
||||
if let StageParams::Ivf(ivf_params) = index_params.stages.get(0).unwrap() {
|
||||
let default = IvfBuildParams::default();
|
||||
assert_eq!(ivf_params.num_partitions, default.num_partitions);
|
||||
assert_eq!(ivf_params.max_iters, default.max_iters);
|
||||
} else {
|
||||
panic!("Expected first stage to be ivf")
|
||||
}
|
||||
|
||||
if let StageParams::PQ(pq_params) = index_params.stages.get(1).unwrap() {
|
||||
assert_eq!(pq_params.use_opq, false);
|
||||
} else {
|
||||
panic!("Expected second stage to be pq")
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_builder_all_params() {
|
||||
let mut index_builder = IvfPQIndexBuilder::new();
|
||||
|
||||
index_builder
|
||||
.column("c".to_owned())
|
||||
.metric_type(MetricType::Cosine)
|
||||
.index_name("index".to_owned());
|
||||
|
||||
assert_eq!(index_builder.column.clone().unwrap(), "c");
|
||||
assert_eq!(index_builder.metric_type.unwrap(), MetricType::Cosine);
|
||||
assert_eq!(index_builder.index_name.clone().unwrap(), "index");
|
||||
|
||||
let ivf_params = IvfBuildParams::new(500);
|
||||
let mut pq_params = PQBuildParams::default();
|
||||
pq_params.use_opq = true;
|
||||
pq_params.max_iters = 1;
|
||||
pq_params.num_bits = 8;
|
||||
pq_params.num_sub_vectors = 50;
|
||||
pq_params.max_opq_iters = 2;
|
||||
index_builder.ivf_params(ivf_params);
|
||||
index_builder.pq_params(pq_params);
|
||||
|
||||
let index_params = index_builder.build();
|
||||
assert_eq!(index_params.stages.len(), 2);
|
||||
if let StageParams::Ivf(ivf_params) = index_params.stages.get(0).unwrap() {
|
||||
assert_eq!(ivf_params.num_partitions, 500);
|
||||
} else {
|
||||
assert!(false, "Expected first stage to be ivf")
|
||||
}
|
||||
|
||||
if let StageParams::PQ(pq_params) = index_params.stages.get(1).unwrap() {
|
||||
assert_eq!(pq_params.use_opq, true);
|
||||
assert_eq!(pq_params.max_iters, 1);
|
||||
assert_eq!(pq_params.num_bits, 8);
|
||||
assert_eq!(pq_params.num_sub_vectors, 50);
|
||||
assert_eq!(pq_params.max_opq_iters, 2);
|
||||
} else {
|
||||
assert!(false, "Expected second stage to be pq")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -23,7 +23,7 @@ use std::{
|
||||
|
||||
use bytes::Bytes;
|
||||
use futures::{stream::BoxStream, FutureExt, StreamExt};
|
||||
use lance::io::object_store::WrappingObjectStore;
|
||||
use lance::io::WrappingObjectStore;
|
||||
use object_store::{
|
||||
path::Path, Error, GetOptions, GetResult, ListResult, MultipartId, ObjectMeta, ObjectStore,
|
||||
PutOptions, PutResult, Result,
|
||||
@@ -335,14 +335,15 @@ impl WrappingObjectStore for MirroringObjectStoreWrapper {
|
||||
#[cfg(all(test, not(windows)))]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::connection::{Connection, Database};
|
||||
use arrow_array::PrimitiveArray;
|
||||
|
||||
use futures::TryStreamExt;
|
||||
use lance::{dataset::WriteParams, io::object_store::ObjectStoreParams};
|
||||
use lance::{dataset::WriteParams, io::ObjectStoreParams};
|
||||
use lance_testing::datagen::{BatchGenerator, IncrementingInt32, RandomVector};
|
||||
use object_store::local::LocalFileSystem;
|
||||
use tempfile;
|
||||
|
||||
use crate::connection::{Connection, Database};
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_e2e() {
|
||||
let dir1 = tempfile::tempdir().unwrap().into_path();
|
||||
@@ -374,11 +375,9 @@ mod test {
|
||||
assert_eq!(t.count_rows().await.unwrap(), 100);
|
||||
|
||||
let q = t
|
||||
.search(Some(PrimitiveArray::from_iter_values(vec![
|
||||
0.1, 0.1, 0.1, 0.1,
|
||||
])))
|
||||
.search(&[0.1, 0.1, 0.1, 0.1])
|
||||
.limit(10)
|
||||
.execute()
|
||||
.execute_stream()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
|
||||
95
rust/vectordb/src/ipc.rs
Normal file
95
rust/vectordb/src/ipc.rs
Normal file
@@ -0,0 +1,95 @@
|
||||
// Copyright 2024 Lance Developers.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! IPC support
|
||||
|
||||
use std::io::Cursor;
|
||||
|
||||
use arrow_array::{RecordBatch, RecordBatchReader};
|
||||
use arrow_ipc::{reader::StreamReader, writer::FileWriter};
|
||||
|
||||
use crate::{Error, Result};
|
||||
|
||||
/// Convert a Arrow IPC file to a batch reader
|
||||
pub fn ipc_file_to_batches(buf: Vec<u8>) -> Result<impl RecordBatchReader> {
|
||||
let buf_reader = Cursor::new(buf);
|
||||
let reader = StreamReader::try_new(buf_reader, None)?;
|
||||
Ok(reader)
|
||||
}
|
||||
|
||||
/// Convert record batches to Arrow IPC file
|
||||
pub fn batches_to_ipc_file(batches: &[RecordBatch]) -> Result<Vec<u8>> {
|
||||
if batches.is_empty() {
|
||||
return Err(Error::Store {
|
||||
message: "No batches to write".to_string(),
|
||||
});
|
||||
}
|
||||
let schema = batches[0].schema();
|
||||
let mut writer = FileWriter::try_new(vec![], &schema)?;
|
||||
for batch in batches {
|
||||
writer.write(batch)?;
|
||||
}
|
||||
writer.finish()?;
|
||||
Ok(writer.into_inner()?)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use super::*;
|
||||
use arrow_array::{Float32Array, Int64Array, RecordBatch};
|
||||
use arrow_ipc::writer::StreamWriter;
|
||||
use arrow_schema::{DataType, Field, Schema};
|
||||
use std::sync::Arc;
|
||||
|
||||
fn create_record_batch() -> Result<RecordBatch> {
|
||||
let schema = Schema::new(vec![
|
||||
Field::new("a", DataType::Int64, false),
|
||||
Field::new("b", DataType::Float32, false),
|
||||
]);
|
||||
|
||||
let a = Int64Array::from(vec![1, 2, 3, 4, 5]);
|
||||
let b = Float32Array::from(vec![1.1, 2.2, 3.3, 4.4, 5.5]);
|
||||
|
||||
let batch = RecordBatch::try_new(Arc::new(schema), vec![Arc::new(a), Arc::new(b)])?;
|
||||
|
||||
Ok(batch)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ipc_file_to_batches() -> Result<()> {
|
||||
let batch = create_record_batch()?;
|
||||
|
||||
let mut writer = StreamWriter::try_new(vec![], &batch.schema())?;
|
||||
writer.write(&batch)?;
|
||||
writer.finish()?;
|
||||
|
||||
let buf = writer.into_inner().unwrap();
|
||||
let mut reader = ipc_file_to_batches(buf).unwrap();
|
||||
let read_batch = reader.next().unwrap()?;
|
||||
|
||||
assert_eq!(batch.num_columns(), read_batch.num_columns());
|
||||
assert_eq!(batch.num_rows(), read_batch.num_rows());
|
||||
|
||||
for i in 0..batch.num_columns() {
|
||||
let batch_column = batch.column(i);
|
||||
let read_batch_column = read_batch.column(i);
|
||||
|
||||
assert_eq!(batch_column.data_type(), read_batch_column.data_type());
|
||||
assert_eq!(batch_column.len(), read_batch_column.len());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -41,21 +41,40 @@
|
||||
//!
|
||||
//! ### Quick Start
|
||||
//!
|
||||
//! <div class="warning">Rust API is not stable yet.</div>
|
||||
//! <div class="warning">Rust API is not stable yet, please expect breaking changes.</div>
|
||||
//!
|
||||
//! #### Connect to a database.
|
||||
//!
|
||||
//! ```rust
|
||||
//! use vectordb::{connection::{Database, Connection}, Table, WriteMode};
|
||||
//! use arrow_schema::{Field, Schema};
|
||||
//! use vectordb::connect;
|
||||
//! # use arrow_schema::{Field, Schema};
|
||||
//! # tokio::runtime::Runtime::new().unwrap().block_on(async {
|
||||
//! let db = Database::connect("data/sample-lancedb").await.unwrap();
|
||||
//! let db = connect("data/sample-lancedb").await.unwrap();
|
||||
//! # });
|
||||
//! ```
|
||||
//!
|
||||
//! LanceDB accepts the different form of database path:
|
||||
//!
|
||||
//! - `/path/to/database` - local database on file system.
|
||||
//! - `s3://bucket/path/to/database` or `gs://bucket/path/to/database` - database on cloud object store
|
||||
//! - `db://dbname` - Lance Cloud
|
||||
//!
|
||||
//! You can also use [`ConnectOptions`] to configure the connectoin to the database.
|
||||
//!
|
||||
//! ```rust
|
||||
//! use vectordb::{connect_with_options, ConnectOptions};
|
||||
//! # tokio::runtime::Runtime::new().unwrap().block_on(async {
|
||||
//! let options = ConnectOptions::new("data/sample-lancedb")
|
||||
//! .index_cache_size(1024);
|
||||
//! let db = connect_with_options(&options).await.unwrap();
|
||||
//! # });
|
||||
//! ```
|
||||
//!
|
||||
//! LanceDB uses [arrow-rs](https://github.com/apache/arrow-rs) to define schema, data types and array itself.
|
||||
//! It treats [`FixedSizeList<Float16/Float32>`](https://docs.rs/arrow/latest/arrow/array/struct.FixedSizeListArray.html)
|
||||
//! columns as vectors.
|
||||
//! columns as vector columns.
|
||||
//!
|
||||
//! For more details, please refer to [LanceDB documentation](https://lancedb.github.io/lancedb/).
|
||||
//!
|
||||
//! #### Create a table
|
||||
//!
|
||||
@@ -67,10 +86,11 @@
|
||||
//! use arrow_array::{RecordBatch, RecordBatchIterator};
|
||||
//! # use arrow_array::{FixedSizeListArray, Float32Array, Int32Array, types::Float32Type};
|
||||
//! # use vectordb::connection::{Database, Connection};
|
||||
//! # use vectordb::connect;
|
||||
//!
|
||||
//! # tokio::runtime::Runtime::new().unwrap().block_on(async {
|
||||
//! # let tmpdir = tempfile::tempdir().unwrap();
|
||||
//! # let db = Database::connect(tmpdir.path().to_str().unwrap()).await.unwrap();
|
||||
//! # let db = connect(tmpdir.path().to_str().unwrap()).await.unwrap();
|
||||
//! let schema = Arc::new(Schema::new(vec![
|
||||
//! Field::new("id", DataType::Int32, false),
|
||||
//! Field::new("vector", DataType::FixedSizeList(
|
||||
@@ -90,6 +110,27 @@
|
||||
//! # });
|
||||
//! ```
|
||||
//!
|
||||
//! #### Create vector index (IVF_PQ)
|
||||
//!
|
||||
//! ```no_run
|
||||
//! # use std::sync::Arc;
|
||||
//! # use vectordb::connect;
|
||||
//! # use arrow_array::{FixedSizeListArray, types::Float32Type, RecordBatch,
|
||||
//! # RecordBatchIterator, Int32Array};
|
||||
//! # use arrow_schema::{Schema, Field, DataType};
|
||||
//! # tokio::runtime::Runtime::new().unwrap().block_on(async {
|
||||
//! # let tmpdir = tempfile::tempdir().unwrap();
|
||||
//! # let db = connect(tmpdir.path().to_str().unwrap()).await.unwrap();
|
||||
//! # let tbl = db.open_table("idx_test").await.unwrap();
|
||||
//! tbl.create_index(&["vector"])
|
||||
//! .ivf_pq()
|
||||
//! .num_partitions(256)
|
||||
//! .build()
|
||||
//! .await
|
||||
//! .unwrap();
|
||||
//! # });
|
||||
//! ```
|
||||
//!
|
||||
//! #### Open table and run search
|
||||
//!
|
||||
//! ```rust
|
||||
@@ -119,8 +160,8 @@
|
||||
//! # db.create_table("my_table", Box::new(batches), None).await.unwrap();
|
||||
//! let table = db.open_table("my_table").await.unwrap();
|
||||
//! let results = table
|
||||
//! .search(Some(vec![1.0; 128]))
|
||||
//! .execute()
|
||||
//! .search(&[1.0; 128])
|
||||
//! .execute_stream()
|
||||
//! .await
|
||||
//! .unwrap()
|
||||
//! .try_collect::<Vec<_>>()
|
||||
@@ -136,11 +177,15 @@ pub mod data;
|
||||
pub mod error;
|
||||
pub mod index;
|
||||
pub mod io;
|
||||
pub mod ipc;
|
||||
pub mod query;
|
||||
pub mod table;
|
||||
pub mod utils;
|
||||
|
||||
pub use connection::Connection;
|
||||
pub use table::Table;
|
||||
pub use connection::{Connection, Database};
|
||||
pub use error::{Error, Result};
|
||||
pub use table::{Table, TableRef};
|
||||
|
||||
/// Connect to a database
|
||||
pub use connection::{connect, connect_with_options, ConnectOptions};
|
||||
pub use lance::dataset::WriteMode;
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Lance Developers.
|
||||
// Copyright 2024 Lance Developers.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@@ -15,25 +15,43 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow_array::Float32Array;
|
||||
use arrow_schema::Schema;
|
||||
use lance::dataset::scanner::{DatasetRecordBatchStream, Scanner};
|
||||
use lance::dataset::Dataset;
|
||||
use lance_linalg::distance::MetricType;
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::utils::default_vector_column;
|
||||
use crate::Error;
|
||||
|
||||
const DEFAULT_TOP_K: usize = 10;
|
||||
|
||||
/// A builder for nearest neighbor queries for LanceDB.
|
||||
#[derive(Clone)]
|
||||
pub struct Query {
|
||||
pub dataset: Arc<Dataset>,
|
||||
pub query_vector: Option<Float32Array>,
|
||||
pub column: String,
|
||||
pub limit: Option<usize>,
|
||||
pub filter: Option<String>,
|
||||
pub select: Option<Vec<String>>,
|
||||
pub nprobes: usize,
|
||||
pub refine_factor: Option<u32>,
|
||||
pub metric_type: Option<MetricType>,
|
||||
pub use_index: bool,
|
||||
pub prefilter: bool,
|
||||
dataset: Arc<Dataset>,
|
||||
|
||||
// The column to run the query on. If not specified, we will attempt to guess
|
||||
// the column based on the dataset's schema.
|
||||
column: Option<String>,
|
||||
|
||||
// IVF PQ - ANN search.
|
||||
query_vector: Option<Float32Array>,
|
||||
nprobes: usize,
|
||||
refine_factor: Option<u32>,
|
||||
metric_type: Option<MetricType>,
|
||||
|
||||
/// limit the number of rows to return.
|
||||
limit: Option<usize>,
|
||||
/// Apply filter to the returned rows.
|
||||
filter: Option<String>,
|
||||
/// Select column projection.
|
||||
select: Option<Vec<String>>,
|
||||
|
||||
/// Default is true. Set to false to enforce a brute force search.
|
||||
use_index: bool,
|
||||
/// Apply filter before ANN search/
|
||||
prefilter: bool,
|
||||
}
|
||||
|
||||
impl Query {
|
||||
@@ -41,17 +59,13 @@ impl Query {
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `dataset` - The table / dataset the query will be run against.
|
||||
/// * `vector` The vector used for this query.
|
||||
/// * `dataset` - Lance dataset.
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * A [Query] object.
|
||||
pub(crate) fn new(dataset: Arc<Dataset>, vector: Option<Float32Array>) -> Self {
|
||||
pub(crate) fn new(dataset: Arc<Dataset>) -> Self {
|
||||
Query {
|
||||
dataset,
|
||||
query_vector: vector,
|
||||
column: crate::table::VECTOR_COLUMN_NAME.to_string(),
|
||||
query_vector: None,
|
||||
column: None,
|
||||
limit: None,
|
||||
nprobes: 20,
|
||||
refine_factor: None,
|
||||
@@ -63,17 +77,37 @@ impl Query {
|
||||
}
|
||||
}
|
||||
|
||||
/// Execute the queries and return its results.
|
||||
/// Convert the query plan to a [`DatasetRecordBatchStream`]
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * A [DatasetRecordBatchStream] with the query's results.
|
||||
pub async fn execute(&self) -> Result<DatasetRecordBatchStream> {
|
||||
pub async fn execute_stream(&self) -> Result<DatasetRecordBatchStream> {
|
||||
let mut scanner: Scanner = self.dataset.scan();
|
||||
|
||||
if let Some(query) = self.query_vector.as_ref() {
|
||||
// If there is a vector query, default to limit=10 if unspecified
|
||||
scanner.nearest(&self.column, query, self.limit.unwrap_or(10))?;
|
||||
let column = if let Some(col) = self.column.as_ref() {
|
||||
col.clone()
|
||||
} else {
|
||||
// Infer a vector column with the same dimension of the query vector.
|
||||
let arrow_schema = Schema::from(self.dataset.schema());
|
||||
default_vector_column(&arrow_schema, Some(query.len() as i32))?
|
||||
};
|
||||
let field = self.dataset.schema().field(&column).ok_or(Error::Store {
|
||||
message: format!("Column {} not found in dataset schema", column),
|
||||
})?;
|
||||
if !matches!(field.data_type(), arrow_schema::DataType::FixedSizeList(f, dim) if f.data_type().is_floating() && dim == query.len() as i32)
|
||||
{
|
||||
return Err(Error::Store {
|
||||
message: format!(
|
||||
"Vector column '{}' does not match the dimension of the query vector: dim={}",
|
||||
column,
|
||||
query.len(),
|
||||
),
|
||||
});
|
||||
}
|
||||
scanner.nearest(&column, query, self.limit.unwrap_or(DEFAULT_TOP_K))?;
|
||||
} else {
|
||||
// If there is no vector query, it's ok to not have a limit
|
||||
scanner.limit(self.limit.map(|limit| limit as i64), None)?;
|
||||
@@ -94,8 +128,8 @@ impl Query {
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `column` - The column name
|
||||
pub fn column(mut self, column: &str) -> Query {
|
||||
self.column = column.into();
|
||||
pub fn column(mut self, column: &str) -> Self {
|
||||
self.column = Some(column.to_string());
|
||||
self
|
||||
}
|
||||
|
||||
@@ -104,18 +138,18 @@ impl Query {
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `limit` - The maximum number of results to return.
|
||||
pub fn limit(mut self, limit: usize) -> Query {
|
||||
pub fn limit(mut self, limit: usize) -> Self {
|
||||
self.limit = Some(limit);
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the vector used for this query.
|
||||
/// Find the nearest vectors to the given query vector.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `vector` - The vector that will be used for search.
|
||||
pub fn query_vector(mut self, query_vector: Float32Array) -> Query {
|
||||
self.query_vector = Some(query_vector);
|
||||
pub fn nearest_to(mut self, vector: &[f32]) -> Self {
|
||||
self.query_vector = Some(Float32Array::from(vector.to_vec()));
|
||||
self
|
||||
}
|
||||
|
||||
@@ -124,7 +158,7 @@ impl Query {
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `nprobes` - The number of probes to use.
|
||||
pub fn nprobes(mut self, nprobes: usize) -> Query {
|
||||
pub fn nprobes(mut self, nprobes: usize) -> Self {
|
||||
self.nprobes = nprobes;
|
||||
self
|
||||
}
|
||||
@@ -134,8 +168,8 @@ impl Query {
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `refine_factor` - The refine factor to use.
|
||||
pub fn refine_factor(mut self, refine_factor: Option<u32>) -> Query {
|
||||
self.refine_factor = refine_factor;
|
||||
pub fn refine_factor(mut self, refine_factor: u32) -> Self {
|
||||
self.refine_factor = Some(refine_factor);
|
||||
self
|
||||
}
|
||||
|
||||
@@ -144,8 +178,8 @@ impl Query {
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `metric_type` - The distance metric to use. By default [MetricType::L2] is used.
|
||||
pub fn metric_type(mut self, metric_type: Option<MetricType>) -> Query {
|
||||
self.metric_type = metric_type;
|
||||
pub fn metric_type(mut self, metric_type: MetricType) -> Self {
|
||||
self.metric_type = Some(metric_type);
|
||||
self
|
||||
}
|
||||
|
||||
@@ -154,7 +188,7 @@ impl Query {
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `use_index` - Sets Whether to use an ANN index if available
|
||||
pub fn use_index(mut self, use_index: bool) -> Query {
|
||||
pub fn use_index(mut self, use_index: bool) -> Self {
|
||||
self.use_index = use_index;
|
||||
self
|
||||
}
|
||||
@@ -163,21 +197,21 @@ impl Query {
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `filter` - value A filter in the same format used by a sql WHERE clause.
|
||||
pub fn filter(mut self, filter: Option<String>) -> Query {
|
||||
self.filter = filter;
|
||||
/// * `filter` - SQL filter
|
||||
pub fn filter(mut self, filter: impl AsRef<str>) -> Self {
|
||||
self.filter = Some(filter.as_ref().to_string());
|
||||
self
|
||||
}
|
||||
|
||||
/// Return only the specified columns.
|
||||
///
|
||||
/// Only select the specified columns. If not specified, all columns will be returned.
|
||||
pub fn select(mut self, columns: Option<Vec<String>>) -> Query {
|
||||
self.select = columns;
|
||||
pub fn select(mut self, columns: &[impl AsRef<str>]) -> Self {
|
||||
self.select = Some(columns.iter().map(|c| c.as_ref().to_string()).collect());
|
||||
self
|
||||
}
|
||||
|
||||
pub fn prefilter(mut self, prefilter: bool) -> Query {
|
||||
pub fn prefilter(mut self, prefilter: bool) -> Self {
|
||||
self.prefilter = prefilter;
|
||||
self
|
||||
}
|
||||
@@ -196,8 +230,10 @@ mod tests {
|
||||
use futures::StreamExt;
|
||||
use lance::dataset::Dataset;
|
||||
use lance_testing::datagen::{BatchGenerator, IncrementingInt32, RandomVector};
|
||||
use tempfile::tempdir;
|
||||
|
||||
use crate::query::Query;
|
||||
use crate::table::{NativeTable, Table};
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_setters_getters() {
|
||||
@@ -205,18 +241,18 @@ mod tests {
|
||||
let ds = Dataset::write(batches, "memory://foo", None).await.unwrap();
|
||||
|
||||
let vector = Some(Float32Array::from_iter_values([0.1, 0.2]));
|
||||
let query = Query::new(Arc::new(ds), vector.clone());
|
||||
let query = Query::new(Arc::new(ds)).nearest_to(&[0.1, 0.2]);
|
||||
assert_eq!(query.query_vector, vector);
|
||||
|
||||
let new_vector = Float32Array::from_iter_values([9.8, 8.7]);
|
||||
|
||||
let query = query
|
||||
.query_vector(new_vector.clone())
|
||||
.nearest_to(&[9.8, 8.7])
|
||||
.limit(100)
|
||||
.nprobes(1000)
|
||||
.use_index(true)
|
||||
.metric_type(Some(MetricType::Cosine))
|
||||
.refine_factor(Some(999));
|
||||
.metric_type(MetricType::Cosine)
|
||||
.refine_factor(999);
|
||||
|
||||
assert_eq!(query.query_vector.unwrap(), new_vector);
|
||||
assert_eq!(query.limit.unwrap(), 100);
|
||||
@@ -231,14 +267,8 @@ mod tests {
|
||||
let batches = make_non_empty_batches();
|
||||
let ds = Arc::new(Dataset::write(batches, "memory://foo", None).await.unwrap());
|
||||
|
||||
let vector = Some(Float32Array::from_iter_values([0.1; 4]));
|
||||
|
||||
let query = Query::new(ds.clone(), vector.clone());
|
||||
let result = query
|
||||
.limit(10)
|
||||
.filter(Some("id % 2 == 0".to_string()))
|
||||
.execute()
|
||||
.await;
|
||||
let query = Query::new(ds.clone()).nearest_to(&[0.1; 4]);
|
||||
let result = query.limit(10).filter("id % 2 == 0").execute_stream().await;
|
||||
let mut stream = result.expect("should have result");
|
||||
// should only have one batch
|
||||
while let Some(batch) = stream.next().await {
|
||||
@@ -246,12 +276,12 @@ mod tests {
|
||||
assert!(batch.expect("should be Ok").num_rows() < 10);
|
||||
}
|
||||
|
||||
let query = Query::new(ds, vector.clone());
|
||||
let query = Query::new(ds).nearest_to(&[0.1; 4]);
|
||||
let result = query
|
||||
.limit(10)
|
||||
.filter(Some("id % 2 == 0".to_string()))
|
||||
.filter(String::from("id % 2 == 0")) // Work with String too
|
||||
.prefilter(true)
|
||||
.execute()
|
||||
.execute_stream()
|
||||
.await;
|
||||
let mut stream = result.expect("should have result");
|
||||
// should only have one batch
|
||||
@@ -267,11 +297,8 @@ mod tests {
|
||||
let batches = make_non_empty_batches();
|
||||
let ds = Arc::new(Dataset::write(batches, "memory://foo", None).await.unwrap());
|
||||
|
||||
let query = Query::new(ds.clone(), None);
|
||||
let result = query
|
||||
.filter(Some("id % 2 == 0".to_string()))
|
||||
.execute()
|
||||
.await;
|
||||
let query = Query::new(ds.clone());
|
||||
let result = query.filter("id % 2 == 0").execute_stream().await;
|
||||
let mut stream = result.expect("should have result");
|
||||
// should only have one batch
|
||||
while let Some(batch) = stream.next().await {
|
||||
@@ -309,4 +336,21 @@ mod tests {
|
||||
schema,
|
||||
)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_search() {
|
||||
let tmp_dir = tempdir().unwrap();
|
||||
let dataset_path = tmp_dir.path().join("test.lance");
|
||||
let uri = dataset_path.to_str().unwrap();
|
||||
|
||||
let batches = make_test_batches();
|
||||
Dataset::write(batches, dataset_path.to_str().unwrap(), None)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let table = NativeTable::open(uri).await.unwrap();
|
||||
|
||||
let query = table.search(&[0.1, 0.2]);
|
||||
assert_eq!(&[0.1, 0.2], query.query_vector.unwrap().values());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2023 LanceDB Developers.
|
||||
// Copyright 2024 LanceDB Developers.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@@ -12,72 +12,273 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! LanceDB Table APIs
|
||||
|
||||
use std::path::Path;
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use arrow_array::RecordBatchReader;
|
||||
use arrow_schema::{Schema, SchemaRef};
|
||||
use chrono::Duration;
|
||||
use lance::dataset::builder::DatasetBuilder;
|
||||
use lance::index::scalar::ScalarIndexParams;
|
||||
use lance_index::optimize::OptimizeOptions;
|
||||
use lance_index::IndexType;
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow_array::{Float32Array, RecordBatchReader};
|
||||
use arrow_schema::SchemaRef;
|
||||
use lance::dataset::cleanup::RemovalStats;
|
||||
use lance::dataset::optimize::{
|
||||
compact_files, CompactionMetrics, CompactionOptions, IndexRemapperOptions,
|
||||
};
|
||||
pub use lance::dataset::ReadParams;
|
||||
use lance::dataset::{Dataset, UpdateBuilder, WriteParams};
|
||||
use lance::io::object_store::WrappingObjectStore;
|
||||
use lance_index::DatasetIndexExt;
|
||||
use std::path::Path;
|
||||
use lance::io::WrappingObjectStore;
|
||||
use lance_index::{optimize::OptimizeOptions, DatasetIndexExt};
|
||||
use log::info;
|
||||
|
||||
use crate::error::{Error, Result};
|
||||
use crate::index::vector::{VectorIndex, VectorIndexBuilder, VectorIndexStatistics};
|
||||
use crate::index::vector::{VectorIndex, VectorIndexStatistics};
|
||||
use crate::index::IndexBuilder;
|
||||
use crate::query::Query;
|
||||
use crate::utils::{PatchReadParam, PatchWriteParam};
|
||||
use crate::WriteMode;
|
||||
|
||||
pub use lance::dataset::ReadParams;
|
||||
/// Optimize the dataset.
|
||||
///
|
||||
/// Similar to `VACUUM` in PostgreSQL, it offers different options to
|
||||
/// optimize different parts of the table on disk.
|
||||
///
|
||||
/// By default, it optimizes everything, as [`OptimizeAction::All`].
|
||||
pub enum OptimizeAction {
|
||||
/// Run optimization on every, with default options.
|
||||
All,
|
||||
/// Compact files in the dataset
|
||||
Compact {
|
||||
options: CompactionOptions,
|
||||
remap_options: Option<Arc<dyn IndexRemapperOptions>>,
|
||||
},
|
||||
/// Prune old version of datasets.
|
||||
Prune {
|
||||
/// The duration of time to keep versions of the dataset.
|
||||
older_than: Duration,
|
||||
/// Because they may be part of an in-progress transaction, files newer than 7 days old are not deleted by default.
|
||||
/// If you are sure that there are no in-progress transactions, then you can set this to True to delete all files older than `older_than`.
|
||||
delete_unverified: Option<bool>,
|
||||
},
|
||||
/// Optimize index.
|
||||
Index(OptimizeOptions),
|
||||
}
|
||||
|
||||
pub const VECTOR_COLUMN_NAME: &str = "vector";
|
||||
impl Default for OptimizeAction {
|
||||
fn default() -> Self {
|
||||
Self::All
|
||||
}
|
||||
}
|
||||
|
||||
/// Statistics about the optimization.
|
||||
pub struct OptimizeStats {
|
||||
/// Stats of the file compaction.
|
||||
pub compaction: Option<CompactionMetrics>,
|
||||
|
||||
/// Stats of the version pruning
|
||||
pub prune: Option<RemovalStats>,
|
||||
}
|
||||
|
||||
/// A Table is a collection of strong typed Rows.
|
||||
///
|
||||
/// The type of the each row is defined in Apache Arrow [Schema].
|
||||
#[async_trait::async_trait]
|
||||
pub trait Table: std::fmt::Display + Send + Sync {
|
||||
fn as_any(&self) -> &dyn std::any::Any;
|
||||
|
||||
/// Cast as [`NativeTable`], or return None it if is not a [`NativeTable`].
|
||||
fn as_native(&self) -> Option<&NativeTable>;
|
||||
|
||||
/// Get the name of the table.
|
||||
fn name(&self) -> &str;
|
||||
|
||||
/// Get the arrow [Schema] of the table.
|
||||
fn schema(&self) -> SchemaRef;
|
||||
|
||||
/// Count the number of rows in this dataset.
|
||||
async fn count_rows(&self) -> Result<usize>;
|
||||
|
||||
/// Insert new records into this Table
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `batches` RecordBatch to be saved in the Table
|
||||
/// * `params` Append / Overwrite existing records. Default: Append
|
||||
async fn add(
|
||||
&self,
|
||||
batches: Box<dyn RecordBatchReader + Send>,
|
||||
params: Option<WriteParams>,
|
||||
) -> Result<()>;
|
||||
|
||||
/// Delete the rows from table that match the predicate.
|
||||
///
|
||||
/// # Arguments
|
||||
/// - `predicate` - The SQL predicate string to filter the rows to be deleted.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```no_run
|
||||
/// # use std::sync::Arc;
|
||||
/// # use vectordb::connection::{Database, Connection};
|
||||
/// # use arrow_array::{FixedSizeListArray, types::Float32Type, RecordBatch,
|
||||
/// # RecordBatchIterator, Int32Array};
|
||||
/// # use arrow_schema::{Schema, Field, DataType};
|
||||
/// # tokio::runtime::Runtime::new().unwrap().block_on(async {
|
||||
/// let tmpdir = tempfile::tempdir().unwrap();
|
||||
/// let db = Database::connect(tmpdir.path().to_str().unwrap()).await.unwrap();
|
||||
/// # let schema = Arc::new(Schema::new(vec![
|
||||
/// # Field::new("id", DataType::Int32, false),
|
||||
/// # Field::new("vector", DataType::FixedSizeList(
|
||||
/// # Arc::new(Field::new("item", DataType::Float32, true)), 128), true),
|
||||
/// # ]));
|
||||
/// let batches = RecordBatchIterator::new(vec![
|
||||
/// RecordBatch::try_new(schema.clone(),
|
||||
/// vec![
|
||||
/// Arc::new(Int32Array::from_iter_values(0..10)),
|
||||
/// Arc::new(FixedSizeListArray::from_iter_primitive::<Float32Type, _, _>(
|
||||
/// (0..10).map(|_| Some(vec![Some(1.0); 128])), 128)),
|
||||
/// ]).unwrap()
|
||||
/// ].into_iter().map(Ok),
|
||||
/// schema.clone());
|
||||
/// let tbl = db.create_table("delete_test", Box::new(batches), None).await.unwrap();
|
||||
/// tbl.delete("id > 5").await.unwrap();
|
||||
/// # });
|
||||
/// ```
|
||||
async fn delete(&self, predicate: &str) -> Result<()>;
|
||||
|
||||
/// Create an index on the column name.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```no_run
|
||||
/// # use std::sync::Arc;
|
||||
/// # use vectordb::connection::{Database, Connection};
|
||||
/// # use arrow_array::{FixedSizeListArray, types::Float32Type, RecordBatch,
|
||||
/// # RecordBatchIterator, Int32Array};
|
||||
/// # use arrow_schema::{Schema, Field, DataType};
|
||||
/// # tokio::runtime::Runtime::new().unwrap().block_on(async {
|
||||
/// let tmpdir = tempfile::tempdir().unwrap();
|
||||
/// let db = Database::connect(tmpdir.path().to_str().unwrap()).await.unwrap();
|
||||
/// # let tbl = db.open_table("idx_test").await.unwrap();
|
||||
/// tbl.create_index(&["vector"])
|
||||
/// .ivf_pq()
|
||||
/// .num_partitions(256)
|
||||
/// .build()
|
||||
/// .await
|
||||
/// .unwrap();
|
||||
/// # });
|
||||
/// ```
|
||||
fn create_index(&self, column: &[&str]) -> IndexBuilder;
|
||||
|
||||
/// Search the table with a given query vector.
|
||||
///
|
||||
/// This is a convenience method for preparing an ANN query.
|
||||
fn search(&self, query: &[f32]) -> Query {
|
||||
self.query().nearest_to(query)
|
||||
}
|
||||
|
||||
/// Create a generic [`Query`] Builder.
|
||||
///
|
||||
/// When appropriate, various indices and statistics based pruning will be used to
|
||||
/// accelerate the query.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ## Run a vector search (ANN) query.
|
||||
///
|
||||
/// ```no_run
|
||||
/// # use arrow_array::RecordBatch;
|
||||
/// # use futures::TryStreamExt;
|
||||
/// # tokio::runtime::Runtime::new().unwrap().block_on(async {
|
||||
/// # let tbl = vectordb::table::NativeTable::open("/tmp/tbl").await.unwrap();
|
||||
/// let stream = tbl.query().nearest_to(&[1.0, 2.0, 3.0])
|
||||
/// .refine_factor(5)
|
||||
/// .nprobes(10)
|
||||
/// .execute_stream()
|
||||
/// .await
|
||||
/// .unwrap();
|
||||
/// let batches: Vec<RecordBatch> = stream.try_collect().await.unwrap();
|
||||
/// # });
|
||||
/// ```
|
||||
///
|
||||
/// ## Run a SQL-style filter
|
||||
/// ```no_run
|
||||
/// # use arrow_array::RecordBatch;
|
||||
/// # use futures::TryStreamExt;
|
||||
/// # tokio::runtime::Runtime::new().unwrap().block_on(async {
|
||||
/// # let tbl = vectordb::table::NativeTable::open("/tmp/tbl").await.unwrap();
|
||||
/// let stream = tbl
|
||||
/// .query()
|
||||
/// .filter("id > 5")
|
||||
/// .limit(1000)
|
||||
/// .execute_stream()
|
||||
/// .await
|
||||
/// .unwrap();
|
||||
/// let batches: Vec<RecordBatch> = stream.try_collect().await.unwrap();
|
||||
/// # });
|
||||
/// ```
|
||||
///
|
||||
/// ## Run a full scan query.
|
||||
/// ```no_run
|
||||
/// # use arrow_array::RecordBatch;
|
||||
/// # use futures::TryStreamExt;
|
||||
/// # tokio::runtime::Runtime::new().unwrap().block_on(async {
|
||||
/// # let tbl = vectordb::table::NativeTable::open("/tmp/tbl").await.unwrap();
|
||||
/// let stream = tbl
|
||||
/// .query()
|
||||
/// .execute_stream()
|
||||
/// .await
|
||||
/// .unwrap();
|
||||
/// let batches: Vec<RecordBatch> = stream.try_collect().await.unwrap();
|
||||
/// # });
|
||||
/// ```
|
||||
fn query(&self) -> Query;
|
||||
|
||||
/// Optimize the on-disk data and indices for better performance.
|
||||
///
|
||||
/// <section class="warning">Experimental API</section>
|
||||
///
|
||||
/// Modeled after ``VACCUM`` in PostgreSQL.
|
||||
/// Not all implementations support explicit optimization.
|
||||
async fn optimize(&self, action: OptimizeAction) -> Result<OptimizeStats>;
|
||||
}
|
||||
|
||||
/// Reference to a Table pointer.
|
||||
pub type TableRef = Arc<dyn Table>;
|
||||
|
||||
/// A table in a LanceDB database.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Table {
|
||||
pub struct NativeTable {
|
||||
name: String,
|
||||
uri: String,
|
||||
dataset: Arc<Dataset>,
|
||||
dataset: Arc<Mutex<Dataset>>,
|
||||
|
||||
// the object store wrapper to use on write path
|
||||
store_wrapper: Option<Arc<dyn WrappingObjectStore>>,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Table {
|
||||
impl std::fmt::Display for NativeTable {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "Table({})", self.name)
|
||||
}
|
||||
}
|
||||
|
||||
impl Table {
|
||||
impl NativeTable {
|
||||
/// Opens an existing Table
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `uri` - The uri to a [Table]
|
||||
/// * `uri` - The uri to a [NativeTable]
|
||||
/// * `name` - The table name
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * A [Table] object.
|
||||
/// * A [NativeTable] object.
|
||||
pub async fn open(uri: &str) -> Result<Self> {
|
||||
let name = Self::get_table_name(uri)?;
|
||||
Self::open_with_params(uri, &name, None, ReadParams::default()).await
|
||||
}
|
||||
|
||||
/// Open an Table with a given name.
|
||||
pub async fn open_with_name(uri: &str, name: &str) -> Result<Self> {
|
||||
Self::open_with_params(uri, name, None, ReadParams::default()).await
|
||||
}
|
||||
|
||||
/// Opens an existing Table
|
||||
///
|
||||
/// # Arguments
|
||||
@@ -88,7 +289,7 @@ impl Table {
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * A [Table] object.
|
||||
/// * A [NativeTable] object.
|
||||
pub async fn open_with_params(
|
||||
uri: &str,
|
||||
name: &str,
|
||||
@@ -113,25 +314,26 @@ impl Table {
|
||||
message: e.to_string(),
|
||||
},
|
||||
})?;
|
||||
Ok(Table {
|
||||
Ok(NativeTable {
|
||||
name: name.to_string(),
|
||||
uri: uri.to_string(),
|
||||
dataset: Arc::new(dataset),
|
||||
dataset: Arc::new(Mutex::new(dataset)),
|
||||
store_wrapper: write_store_wrapper,
|
||||
})
|
||||
}
|
||||
|
||||
/// Checkout a specific version of this [`Table`]
|
||||
/// Make a new clone of the internal lance dataset.
|
||||
pub(crate) fn clone_inner_dataset(&self) -> Dataset {
|
||||
self.dataset.lock().expect("Lock poison").clone()
|
||||
}
|
||||
|
||||
/// Checkout a specific version of this [NativeTable]
|
||||
///
|
||||
pub async fn checkout(uri: &str, version: u64) -> Result<Self> {
|
||||
let name = Self::get_table_name(uri)?;
|
||||
Self::checkout_with_params(uri, &name, version, None, ReadParams::default()).await
|
||||
}
|
||||
|
||||
pub async fn checkout_with_name(uri: &str, name: &str, version: u64) -> Result<Self> {
|
||||
Self::checkout_with_params(uri, name, version, None, ReadParams::default()).await
|
||||
}
|
||||
|
||||
pub async fn checkout_with_params(
|
||||
uri: &str,
|
||||
name: &str,
|
||||
@@ -154,26 +356,27 @@ impl Table {
|
||||
message: e.to_string(),
|
||||
},
|
||||
})?;
|
||||
Ok(Table {
|
||||
Ok(NativeTable {
|
||||
name: name.to_string(),
|
||||
uri: uri.to_string(),
|
||||
dataset: Arc::new(dataset),
|
||||
dataset: Arc::new(Mutex::new(dataset)),
|
||||
store_wrapper: write_store_wrapper,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn checkout_latest(&self) -> Result<Self> {
|
||||
let latest_version_id = self.dataset.latest_version_id().await?;
|
||||
let dataset = if latest_version_id == self.dataset.version().version {
|
||||
self.dataset.clone()
|
||||
let dataset = self.clone_inner_dataset();
|
||||
let latest_version_id = dataset.latest_version_id().await?;
|
||||
let dataset = if latest_version_id == dataset.version().version {
|
||||
dataset
|
||||
} else {
|
||||
Arc::new(self.dataset.checkout_version(latest_version_id).await?)
|
||||
dataset.checkout_version(latest_version_id).await?
|
||||
};
|
||||
|
||||
Ok(Table {
|
||||
Ok(Self {
|
||||
name: self.name.clone(),
|
||||
uri: self.uri.clone(),
|
||||
dataset,
|
||||
dataset: Arc::new(Mutex::new(dataset)),
|
||||
store_wrapper: self.store_wrapper.clone(),
|
||||
})
|
||||
}
|
||||
@@ -203,8 +406,8 @@ impl Table {
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * A [Table] object.
|
||||
pub async fn create(
|
||||
/// * A [TableImpl] object.
|
||||
pub(crate) async fn create(
|
||||
uri: &str,
|
||||
name: &str,
|
||||
batches: impl RecordBatchReader + Send + 'static,
|
||||
@@ -227,109 +430,36 @@ impl Table {
|
||||
message: e.to_string(),
|
||||
},
|
||||
})?;
|
||||
Ok(Table {
|
||||
Ok(NativeTable {
|
||||
name: name.to_string(),
|
||||
uri: uri.to_string(),
|
||||
dataset: Arc::new(dataset),
|
||||
dataset: Arc::new(Mutex::new(dataset)),
|
||||
store_wrapper: write_store_wrapper,
|
||||
})
|
||||
}
|
||||
|
||||
/// Schema of this Table.
|
||||
pub fn schema(&self) -> SchemaRef {
|
||||
Arc::new(self.dataset.schema().into())
|
||||
}
|
||||
|
||||
/// Version of this Table
|
||||
pub fn version(&self) -> u64 {
|
||||
self.dataset.version().version
|
||||
self.dataset.lock().expect("lock poison").version().version
|
||||
}
|
||||
|
||||
/// Create index on the table.
|
||||
pub async fn create_index(&mut self, index_builder: &impl VectorIndexBuilder) -> Result<()> {
|
||||
let mut dataset = self.dataset.as_ref().clone();
|
||||
dataset
|
||||
.create_index(
|
||||
&[index_builder
|
||||
.get_column()
|
||||
.unwrap_or(VECTOR_COLUMN_NAME.to_string())
|
||||
.as_str()],
|
||||
IndexType::Vector,
|
||||
index_builder.get_index_name(),
|
||||
&index_builder.build(),
|
||||
index_builder.get_replace(),
|
||||
)
|
||||
.await?;
|
||||
self.dataset = Arc::new(dataset);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Create a scalar index on the table
|
||||
pub async fn create_scalar_index(&mut self, column: &str, replace: bool) -> Result<()> {
|
||||
let mut dataset = self.dataset.as_ref().clone();
|
||||
let params = ScalarIndexParams::default();
|
||||
dataset
|
||||
.create_index(&[column], IndexType::Scalar, None, ¶ms, replace)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn optimize_indices(&mut self, options: &OptimizeOptions) -> Result<()> {
|
||||
let mut dataset = self.dataset.as_ref().clone();
|
||||
async fn optimize_indices(&self, options: &OptimizeOptions) -> Result<()> {
|
||||
info!("LanceDB: optimizing indices: {:?}", options);
|
||||
let mut dataset = self.clone_inner_dataset();
|
||||
dataset.optimize_indices(options).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Insert records into this Table
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `batches` RecordBatch to be saved in the Table
|
||||
/// * `write_mode` Append / Overwrite existing records. Default: Append
|
||||
/// # Returns
|
||||
///
|
||||
/// * The number of rows added
|
||||
pub async fn add(
|
||||
&mut self,
|
||||
batches: impl RecordBatchReader + Send + 'static,
|
||||
params: Option<WriteParams>,
|
||||
) -> Result<()> {
|
||||
let params = Some(params.unwrap_or(WriteParams {
|
||||
mode: WriteMode::Append,
|
||||
..WriteParams::default()
|
||||
}));
|
||||
|
||||
// patch the params if we have a write store wrapper
|
||||
let params = match self.store_wrapper.clone() {
|
||||
Some(wrapper) => params.patch_with_store_wrapper(wrapper)?,
|
||||
None => params,
|
||||
};
|
||||
|
||||
self.dataset = Arc::new(Dataset::write(batches, &self.uri, params).await?);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Creates a new Query object that can be executed.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `query_vector` The vector used for this query.
|
||||
///
|
||||
/// # Returns
|
||||
/// * A [Query] object.
|
||||
pub fn search<T: Into<Float32Array>>(&self, query_vector: Option<T>) -> Query {
|
||||
Query::new(self.dataset.clone(), query_vector.map(|q| q.into()))
|
||||
pub fn query(&self) -> Query {
|
||||
Query::new(self.clone_inner_dataset().into())
|
||||
}
|
||||
|
||||
pub fn filter(&self, expr: String) -> Query {
|
||||
Query::new(self.dataset.clone(), None).filter(Some(expr))
|
||||
Query::new(self.clone_inner_dataset().into()).filter(expr)
|
||||
}
|
||||
|
||||
/// Returns the number of rows in this Table
|
||||
pub async fn count_rows(&self) -> Result<usize> {
|
||||
Ok(self.dataset.count_rows().await?)
|
||||
}
|
||||
|
||||
/// Merge new data into this table.
|
||||
pub async fn merge(
|
||||
@@ -338,26 +468,14 @@ impl Table {
|
||||
left_on: &str,
|
||||
right_on: &str,
|
||||
) -> Result<()> {
|
||||
let mut dataset = self.dataset.as_ref().clone();
|
||||
let mut dataset = self.clone_inner_dataset();
|
||||
dataset.merge(batches, left_on, right_on).await?;
|
||||
self.dataset = Arc::new(dataset);
|
||||
self.dataset = Arc::new(Mutex::new(dataset));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Delete rows from the table
|
||||
pub async fn delete(&mut self, predicate: &str) -> Result<()> {
|
||||
let mut dataset = self.dataset.as_ref().clone();
|
||||
dataset.delete(predicate).await?;
|
||||
self.dataset = Arc::new(dataset);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn update(
|
||||
&mut self,
|
||||
predicate: Option<&str>,
|
||||
updates: Vec<(&str, &str)>,
|
||||
) -> Result<()> {
|
||||
let mut builder = UpdateBuilder::new(self.dataset.clone());
|
||||
pub async fn update(&self, predicate: Option<&str>, updates: Vec<(&str, &str)>) -> Result<()> {
|
||||
let mut builder = UpdateBuilder::new(self.clone_inner_dataset().into());
|
||||
if let Some(predicate) = predicate {
|
||||
builder = builder.update_where(predicate)?;
|
||||
}
|
||||
@@ -367,9 +485,8 @@ impl Table {
|
||||
}
|
||||
|
||||
let operation = builder.build()?;
|
||||
let new_ds = operation.execute().await?;
|
||||
self.dataset = new_ds;
|
||||
|
||||
let ds = operation.execute().await?;
|
||||
self.reset_dataset(ds.as_ref().clone());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -384,13 +501,13 @@ impl Table {
|
||||
///
|
||||
/// This calls into [lance::dataset::Dataset::cleanup_old_versions] and
|
||||
/// returns the result.
|
||||
pub async fn cleanup_old_versions(
|
||||
async fn cleanup_old_versions(
|
||||
&self,
|
||||
older_than: Duration,
|
||||
delete_unverified: Option<bool>,
|
||||
) -> Result<RemovalStats> {
|
||||
Ok(self
|
||||
.dataset
|
||||
let dataset = self.clone_inner_dataset();
|
||||
Ok(dataset
|
||||
.cleanup_old_versions(older_than, delete_unverified)
|
||||
.await?)
|
||||
}
|
||||
@@ -401,27 +518,29 @@ impl Table {
|
||||
/// for faster reads.
|
||||
///
|
||||
/// This calls into [lance::dataset::optimize::compact_files].
|
||||
pub async fn compact_files(
|
||||
&mut self,
|
||||
async fn compact_files(
|
||||
&self,
|
||||
options: CompactionOptions,
|
||||
remap_options: Option<Arc<dyn IndexRemapperOptions>>,
|
||||
) -> Result<CompactionMetrics> {
|
||||
let mut dataset = self.dataset.as_ref().clone();
|
||||
let mut dataset = self.clone_inner_dataset();
|
||||
let metrics = compact_files(&mut dataset, options, remap_options).await?;
|
||||
self.dataset = Arc::new(dataset);
|
||||
self.reset_dataset(dataset);
|
||||
Ok(metrics)
|
||||
}
|
||||
|
||||
pub fn count_fragments(&self) -> usize {
|
||||
self.dataset.count_fragments()
|
||||
self.dataset.lock().expect("lock poison").count_fragments()
|
||||
}
|
||||
|
||||
pub async fn count_deleted_rows(&self) -> Result<usize> {
|
||||
Ok(self.dataset.count_deleted_rows().await?)
|
||||
let dataset = self.clone_inner_dataset();
|
||||
Ok(dataset.count_deleted_rows().await?)
|
||||
}
|
||||
|
||||
pub async fn num_small_files(&self, max_rows_per_group: usize) -> usize {
|
||||
self.dataset.num_small_files(max_rows_per_group).await
|
||||
let dataset = self.clone_inner_dataset();
|
||||
dataset.num_small_files(max_rows_per_group).await
|
||||
}
|
||||
|
||||
pub async fn count_indexed_rows(&self, index_uuid: &str) -> Result<Option<usize>> {
|
||||
@@ -439,8 +558,8 @@ impl Table {
|
||||
}
|
||||
|
||||
pub async fn load_indices(&self) -> Result<Vec<VectorIndex>> {
|
||||
let (indices, mf) =
|
||||
futures::try_join!(self.dataset.load_indices(), self.dataset.latest_manifest())?;
|
||||
let dataset = self.clone_inner_dataset();
|
||||
let (indices, mf) = futures::try_join!(dataset.load_indices(), dataset.latest_manifest())?;
|
||||
Ok(indices
|
||||
.iter()
|
||||
.map(|i| VectorIndex::new_from_format(&mf, i))
|
||||
@@ -456,10 +575,8 @@ impl Table {
|
||||
if index.is_none() {
|
||||
return Ok(None);
|
||||
}
|
||||
let index_stats = self
|
||||
.dataset
|
||||
.index_statistics(&index.unwrap().index_name)
|
||||
.await?;
|
||||
let dataset = self.clone_inner_dataset();
|
||||
let index_stats = dataset.index_statistics(&index.unwrap().index_name).await?;
|
||||
let index_stats: VectorIndexStatistics =
|
||||
serde_json::from_str(&index_stats).map_err(|e| Error::Lance {
|
||||
message: format!(
|
||||
@@ -470,6 +587,117 @@ impl Table {
|
||||
|
||||
Ok(Some(index_stats))
|
||||
}
|
||||
|
||||
pub(crate) fn reset_dataset(&self, dataset: Dataset) {
|
||||
*self.dataset.lock().expect("lock poison") = dataset;
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Table for NativeTable {
|
||||
fn as_any(&self) -> &dyn std::any::Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn as_native(&self) -> Option<&NativeTable> {
|
||||
Some(self)
|
||||
}
|
||||
|
||||
fn name(&self) -> &str {
|
||||
self.name.as_str()
|
||||
}
|
||||
|
||||
fn schema(&self) -> SchemaRef {
|
||||
let lance_schema = { self.dataset.lock().expect("lock poison").schema().clone() };
|
||||
Arc::new(Schema::from(&lance_schema))
|
||||
}
|
||||
|
||||
async fn count_rows(&self) -> Result<usize> {
|
||||
let dataset = { self.dataset.lock().expect("lock poison").clone() };
|
||||
Ok(dataset.count_rows().await?)
|
||||
}
|
||||
|
||||
async fn add(
|
||||
&self,
|
||||
batches: Box<dyn RecordBatchReader + Send>,
|
||||
params: Option<WriteParams>,
|
||||
) -> Result<()> {
|
||||
let params = Some(params.unwrap_or(WriteParams {
|
||||
mode: WriteMode::Append,
|
||||
..WriteParams::default()
|
||||
}));
|
||||
|
||||
// patch the params if we have a write store wrapper
|
||||
let params = match self.store_wrapper.clone() {
|
||||
Some(wrapper) => params.patch_with_store_wrapper(wrapper)?,
|
||||
None => params,
|
||||
};
|
||||
|
||||
self.reset_dataset(Dataset::write(batches, &self.uri, params).await?);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn create_index(&self, columns: &[&str]) -> IndexBuilder {
|
||||
IndexBuilder::new(Arc::new(self.clone()), columns)
|
||||
}
|
||||
|
||||
fn query(&self) -> Query {
|
||||
Query::new(Arc::new(self.dataset.lock().expect("lock poison").clone()))
|
||||
}
|
||||
|
||||
/// Delete rows from the table
|
||||
async fn delete(&self, predicate: &str) -> Result<()> {
|
||||
let mut dataset = self.clone_inner_dataset();
|
||||
dataset.delete(predicate).await?;
|
||||
self.reset_dataset(dataset);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn optimize(&self, action: OptimizeAction) -> Result<OptimizeStats> {
|
||||
let mut stats = OptimizeStats {
|
||||
compaction: None,
|
||||
prune: None,
|
||||
};
|
||||
match action {
|
||||
OptimizeAction::All => {
|
||||
stats.compaction = self
|
||||
.optimize(OptimizeAction::Compact {
|
||||
options: CompactionOptions::default(),
|
||||
remap_options: None,
|
||||
})
|
||||
.await?
|
||||
.compaction;
|
||||
stats.prune = self
|
||||
.optimize(OptimizeAction::Prune {
|
||||
older_than: Duration::days(7),
|
||||
delete_unverified: None,
|
||||
})
|
||||
.await?
|
||||
.prune;
|
||||
self.optimize(OptimizeAction::Index(OptimizeOptions::default()))
|
||||
.await?;
|
||||
}
|
||||
OptimizeAction::Compact {
|
||||
options,
|
||||
remap_options,
|
||||
} => {
|
||||
stats.compaction = Some(self.compact_files(options, remap_options).await?);
|
||||
}
|
||||
OptimizeAction::Prune {
|
||||
older_than,
|
||||
delete_unverified,
|
||||
} => {
|
||||
stats.prune = Some(
|
||||
self.cleanup_old_versions(older_than, delete_unverified)
|
||||
.await?,
|
||||
);
|
||||
}
|
||||
OptimizeAction::Index(options) => {
|
||||
self.optimize_indices(&options).await?;
|
||||
}
|
||||
}
|
||||
Ok(stats)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -487,14 +715,11 @@ mod tests {
|
||||
use arrow_schema::{DataType, Field, Schema, TimeUnit};
|
||||
use futures::TryStreamExt;
|
||||
use lance::dataset::{Dataset, WriteMode};
|
||||
use lance::index::vector::pq::PQBuildParams;
|
||||
use lance::io::object_store::{ObjectStoreParams, WrappingObjectStore};
|
||||
use lance_index::vector::ivf::IvfBuildParams;
|
||||
use lance::io::{ObjectStoreParams, WrappingObjectStore};
|
||||
use rand::Rng;
|
||||
use tempfile::tempdir;
|
||||
|
||||
use super::*;
|
||||
use crate::index::vector::IvfPQIndexBuilder;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_open() {
|
||||
@@ -506,7 +731,9 @@ mod tests {
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let table = Table::open(dataset_path.to_str().unwrap()).await.unwrap();
|
||||
let table = NativeTable::open(dataset_path.to_str().unwrap())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(table.name, "test")
|
||||
}
|
||||
@@ -515,7 +742,7 @@ mod tests {
|
||||
async fn test_open_not_found() {
|
||||
let tmp_dir = tempdir().unwrap();
|
||||
let uri = tmp_dir.path().to_str().unwrap();
|
||||
let table = Table::open(uri).await;
|
||||
let table = NativeTable::open(uri).await;
|
||||
assert!(matches!(table.unwrap_err(), Error::TableNotFound { .. }));
|
||||
}
|
||||
|
||||
@@ -535,12 +762,12 @@ mod tests {
|
||||
|
||||
let batches = make_test_batches();
|
||||
let _ = batches.schema().clone();
|
||||
Table::create(&uri, "test", batches, None, None)
|
||||
NativeTable::create(&uri, "test", batches, None, None)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let batches = make_test_batches();
|
||||
let result = Table::create(&uri, "test", batches, None, None).await;
|
||||
let result = NativeTable::create(&uri, "test", batches, None, None).await;
|
||||
assert!(matches!(
|
||||
result.unwrap_err(),
|
||||
Error::TableAlreadyExists { .. }
|
||||
@@ -554,7 +781,7 @@ mod tests {
|
||||
|
||||
let batches = make_test_batches();
|
||||
let schema = batches.schema().clone();
|
||||
let mut table = Table::create(&uri, "test", batches, None, None)
|
||||
let table = NativeTable::create(&uri, "test", batches, None, None)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(table.count_rows().await.unwrap(), 10);
|
||||
@@ -570,7 +797,7 @@ mod tests {
|
||||
schema.clone(),
|
||||
);
|
||||
|
||||
table.add(new_batches, None).await.unwrap();
|
||||
table.add(Box::new(new_batches), None).await.unwrap();
|
||||
assert_eq!(table.count_rows().await.unwrap(), 20);
|
||||
assert_eq!(table.name, "test");
|
||||
}
|
||||
@@ -582,7 +809,7 @@ mod tests {
|
||||
|
||||
let batches = make_test_batches();
|
||||
let schema = batches.schema().clone();
|
||||
let mut table = Table::create(uri, "test", batches, None, None)
|
||||
let table = NativeTable::create(uri, "test", batches, None, None)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(table.count_rows().await.unwrap(), 10);
|
||||
@@ -603,7 +830,7 @@ mod tests {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
table.add(new_batches, Some(param)).await.unwrap();
|
||||
table.add(Box::new(new_batches), Some(param)).await.unwrap();
|
||||
assert_eq!(table.count_rows().await.unwrap(), 10);
|
||||
assert_eq!(table.name, "test");
|
||||
}
|
||||
@@ -636,7 +863,7 @@ mod tests {
|
||||
);
|
||||
|
||||
Dataset::write(record_batch_iter, uri, None).await.unwrap();
|
||||
let mut table = Table::open(uri).await.unwrap();
|
||||
let table = NativeTable::open(uri).await.unwrap();
|
||||
|
||||
table
|
||||
.update(Some("id > 5"), vec![("name", "'foo'")])
|
||||
@@ -768,7 +995,7 @@ mod tests {
|
||||
);
|
||||
|
||||
Dataset::write(record_batch_iter, uri, None).await.unwrap();
|
||||
let mut table = Table::open(uri).await.unwrap();
|
||||
let table = NativeTable::open(uri).await.unwrap();
|
||||
|
||||
// check it can do update for each type
|
||||
let updates: Vec<(&str, &str)> = vec![
|
||||
@@ -874,24 +1101,6 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_search() {
|
||||
let tmp_dir = tempdir().unwrap();
|
||||
let dataset_path = tmp_dir.path().join("test.lance");
|
||||
let uri = dataset_path.to_str().unwrap();
|
||||
|
||||
let batches = make_test_batches();
|
||||
Dataset::write(batches, dataset_path.to_str().unwrap(), None)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let table = Table::open(uri).await.unwrap();
|
||||
|
||||
let vector = Float32Array::from_iter_values([0.1, 0.2]);
|
||||
let query = table.search(Some(vector.clone()));
|
||||
assert_eq!(vector, query.query_vector.unwrap());
|
||||
}
|
||||
|
||||
#[derive(Default, Debug)]
|
||||
struct NoOpCacheWrapper {
|
||||
called: AtomicBool,
|
||||
@@ -933,7 +1142,7 @@ mod tests {
|
||||
..Default::default()
|
||||
};
|
||||
assert!(!wrapper.called());
|
||||
let _ = Table::open_with_params(uri, "test", None, param)
|
||||
let _ = NativeTable::open_with_params(uri, "test", None, param)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(wrapper.called());
|
||||
@@ -987,23 +1196,23 @@ mod tests {
|
||||
schema,
|
||||
);
|
||||
|
||||
let mut table = Table::create(uri, "test", batches, None, None)
|
||||
let table = NativeTable::create(uri, "test", batches, None, None)
|
||||
.await
|
||||
.unwrap();
|
||||
let mut i = IvfPQIndexBuilder::new();
|
||||
|
||||
assert_eq!(table.count_indexed_rows("my_index").await.unwrap(), None);
|
||||
assert_eq!(table.count_unindexed_rows("my_index").await.unwrap(), None);
|
||||
|
||||
let index_builder = i
|
||||
.column("embeddings".to_string())
|
||||
.index_name("my_index".to_string())
|
||||
.ivf_params(IvfBuildParams::new(256))
|
||||
.pq_params(PQBuildParams::default());
|
||||
table
|
||||
.create_index(&["embeddings"])
|
||||
.ivf_pq()
|
||||
.name("my_index")
|
||||
.num_partitions(256)
|
||||
.build()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
table.create_index(index_builder).await.unwrap();
|
||||
|
||||
assert_eq!(table.dataset.load_indices().await.unwrap().len(), 1);
|
||||
assert_eq!(table.load_indices().await.unwrap().len(), 1);
|
||||
assert_eq!(table.count_rows().await.unwrap(), 512);
|
||||
assert_eq!(table.name, "test");
|
||||
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use lance::{
|
||||
dataset::{ReadParams, WriteParams},
|
||||
io::object_store::{ObjectStoreParams, WrappingObjectStore},
|
||||
};
|
||||
use arrow_schema::Schema;
|
||||
|
||||
use lance::dataset::{ReadParams, WriteParams};
|
||||
use lance::io::{ObjectStoreParams, WrappingObjectStore};
|
||||
|
||||
use crate::error::{Error, Result};
|
||||
|
||||
@@ -65,3 +65,86 @@ impl PatchReadParam for ReadParams {
|
||||
Ok(self)
|
||||
}
|
||||
}
|
||||
|
||||
/// Find one default column to create index.
|
||||
pub(crate) fn default_vector_column(schema: &Schema, dim: Option<i32>) -> Result<String> {
|
||||
// Try to find one fixed size list array column.
|
||||
let candidates = schema
|
||||
.fields()
|
||||
.iter()
|
||||
.filter_map(|field| match field.data_type() {
|
||||
arrow_schema::DataType::FixedSizeList(f, d)
|
||||
if f.data_type().is_floating()
|
||||
&& dim.map(|expect| *d == expect).unwrap_or(true) =>
|
||||
{
|
||||
Some(field.name())
|
||||
}
|
||||
_ => None,
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
if candidates.is_empty() {
|
||||
Err(Error::Store {
|
||||
message: "No vector column found to create index".to_string(),
|
||||
})
|
||||
} else if candidates.len() != 1 {
|
||||
Err(Error::Store {
|
||||
message: format!(
|
||||
"More than one vector columns found, \
|
||||
please specify which column to create index: {:?}",
|
||||
candidates
|
||||
),
|
||||
})
|
||||
} else {
|
||||
Ok(candidates[0].to_string())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
use arrow_schema::{DataType, Field};
|
||||
|
||||
#[test]
|
||||
fn test_guess_default_column() {
|
||||
let schema_no_vector = Schema::new(vec![
|
||||
Field::new("id", DataType::Int16, true),
|
||||
Field::new("tag", DataType::Utf8, false),
|
||||
]);
|
||||
assert!(default_vector_column(&schema_no_vector, None)
|
||||
.unwrap_err()
|
||||
.to_string()
|
||||
.contains("No vector column"));
|
||||
|
||||
let schema_with_vec_col = Schema::new(vec![
|
||||
Field::new("id", DataType::Int16, true),
|
||||
Field::new(
|
||||
"vec",
|
||||
DataType::FixedSizeList(Arc::new(Field::new("item", DataType::Float64, false)), 10),
|
||||
false,
|
||||
),
|
||||
]);
|
||||
assert_eq!(
|
||||
default_vector_column(&schema_with_vec_col, None).unwrap(),
|
||||
"vec"
|
||||
);
|
||||
|
||||
let multi_vec_col = Schema::new(vec![
|
||||
Field::new("id", DataType::Int16, true),
|
||||
Field::new(
|
||||
"vec",
|
||||
DataType::FixedSizeList(Arc::new(Field::new("item", DataType::Float64, false)), 10),
|
||||
false,
|
||||
),
|
||||
Field::new(
|
||||
"vec2",
|
||||
DataType::FixedSizeList(Arc::new(Field::new("item", DataType::Float64, false)), 50),
|
||||
false,
|
||||
),
|
||||
]);
|
||||
assert!(default_vector_column(&multi_vec_col, None)
|
||||
.unwrap_err()
|
||||
.to_string()
|
||||
.contains("More than one"));
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user