mirror of
https://github.com/lancedb/lancedb.git
synced 2025-12-23 13:29:57 +00:00
Compare commits
49 Commits
python-v0.
...
python-v0.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
81f2cdf736 | ||
|
|
d404a3590c | ||
|
|
e688484bd3 | ||
|
|
3bcd61c8de | ||
|
|
c76ec48603 | ||
|
|
d974413745 | ||
|
|
ec4f2fbd30 | ||
|
|
6375ea419a | ||
|
|
6689192cee | ||
|
|
dbec598610 | ||
|
|
8f6e7ce4f3 | ||
|
|
b482f41bf4 | ||
|
|
4dc7497547 | ||
|
|
d744972f2f | ||
|
|
9bc320874a | ||
|
|
510d449167 | ||
|
|
356e89a800 | ||
|
|
ae1cf4441d | ||
|
|
1ae08fe31d | ||
|
|
a517629c65 | ||
|
|
553dae1607 | ||
|
|
9c7e00eec3 | ||
|
|
a7d66032aa | ||
|
|
7fb8a732a5 | ||
|
|
f393ac3b0d | ||
|
|
ca83354780 | ||
|
|
272cbcad7a | ||
|
|
722fe1836c | ||
|
|
d1983602c2 | ||
|
|
9148cd6d47 | ||
|
|
47dbb988bf | ||
|
|
6821536d44 | ||
|
|
d6f0663671 | ||
|
|
ea33b68c6c | ||
|
|
1453bf4e7a | ||
|
|
abaf315baf | ||
|
|
14b9277ac1 | ||
|
|
d621826b79 | ||
|
|
08c0803ae1 | ||
|
|
62632cb90b | ||
|
|
14566df213 | ||
|
|
acfdf1b9cb | ||
|
|
f95402af7c | ||
|
|
d14c9b6d9e | ||
|
|
c1af53b787 | ||
|
|
2a02d1394b | ||
|
|
085066d2a8 | ||
|
|
adf1a38f4d | ||
|
|
294c33a42e |
@@ -1,5 +1,5 @@
|
||||
[bumpversion]
|
||||
current_version = 0.4.11
|
||||
current_version = 0.4.13
|
||||
commit = True
|
||||
message = Bump version: {current_version} → {new_version}
|
||||
tag = True
|
||||
|
||||
6
.github/workflows/docs.yml
vendored
6
.github/workflows/docs.yml
vendored
@@ -24,10 +24,14 @@ jobs:
|
||||
environment:
|
||||
name: github-pages
|
||||
url: ${{ steps.deployment.outputs.page_url }}
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: buildjet-8vcpu-ubuntu-2204
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Install dependecies needed for ubuntu
|
||||
run: |
|
||||
sudo apt install -y protobuf-compiler libssl-dev
|
||||
rustup update && rustup default
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
|
||||
13
.github/workflows/docs_test.yml
vendored
13
.github/workflows/docs_test.yml
vendored
@@ -24,16 +24,22 @@ env:
|
||||
jobs:
|
||||
test-python:
|
||||
name: Test doc python code
|
||||
runs-on: "ubuntu-latest"
|
||||
runs-on: "buildjet-8vcpu-ubuntu-2204"
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Install dependecies needed for ubuntu
|
||||
run: |
|
||||
sudo apt install -y protobuf-compiler libssl-dev
|
||||
rustup update && rustup default
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: 3.11
|
||||
cache: "pip"
|
||||
cache-dependency-path: "docs/test/requirements.txt"
|
||||
- name: Rust cache
|
||||
uses: swatinem/rust-cache@v2
|
||||
- name: Build Python
|
||||
working-directory: docs/test
|
||||
run:
|
||||
@@ -48,8 +54,8 @@ jobs:
|
||||
for d in *; do cd "$d"; echo "$d".py; python "$d".py; cd ..; done
|
||||
test-node:
|
||||
name: Test doc nodejs code
|
||||
runs-on: "ubuntu-latest"
|
||||
timeout-minutes: 45
|
||||
runs-on: "buildjet-8vcpu-ubuntu-2204"
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
fail-fast: false
|
||||
steps:
|
||||
@@ -65,6 +71,7 @@ jobs:
|
||||
- name: Install dependecies needed for ubuntu
|
||||
run: |
|
||||
sudo apt install -y protobuf-compiler libssl-dev
|
||||
rustup update && rustup default
|
||||
- name: Rust cache
|
||||
uses: swatinem/rust-cache@v2
|
||||
- name: Install node dependencies
|
||||
|
||||
21
.github/workflows/node.yml
vendored
21
.github/workflows/node.yml
vendored
@@ -24,27 +24,6 @@ env:
|
||||
RUST_BACKTRACE: "1"
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
name: Lint
|
||||
runs-on: ubuntu-22.04
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: node
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
lfs: true
|
||||
- uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: 20
|
||||
cache: 'npm'
|
||||
cache-dependency-path: node/package-lock.json
|
||||
- name: Lint
|
||||
run: |
|
||||
npm ci
|
||||
npm run lint
|
||||
linux:
|
||||
name: Linux (Node ${{ matrix.node-version }})
|
||||
timeout-minutes: 30
|
||||
|
||||
2
.github/workflows/nodejs.yml
vendored
2
.github/workflows/nodejs.yml
vendored
@@ -49,6 +49,7 @@ jobs:
|
||||
cargo clippy --all --all-features -- -D warnings
|
||||
npm ci
|
||||
npm run lint
|
||||
npm run chkformat
|
||||
linux:
|
||||
name: Linux (NodeJS ${{ matrix.node-version }})
|
||||
timeout-minutes: 30
|
||||
@@ -111,4 +112,3 @@ jobs:
|
||||
- name: Test
|
||||
run: |
|
||||
npm run test
|
||||
|
||||
|
||||
6
.github/workflows/python.yml
vendored
6
.github/workflows/python.yml
vendored
@@ -33,7 +33,7 @@ jobs:
|
||||
python-version: "3.11"
|
||||
- name: Install ruff
|
||||
run: |
|
||||
pip install ruff
|
||||
pip install ruff==0.2.2
|
||||
- name: Format check
|
||||
run: ruff format --check .
|
||||
- name: Lint
|
||||
@@ -66,7 +66,7 @@ jobs:
|
||||
- name: Install
|
||||
run: |
|
||||
pip install -e .[tests,dev,embeddings]
|
||||
pip install tantivy@git+https://github.com/quickwit-oss/tantivy-py#164adc87e1a033117001cf70e38c82a53014d985
|
||||
pip install tantivy
|
||||
pip install mlx
|
||||
- name: Doctest
|
||||
run: pytest --doctest-modules python/lancedb
|
||||
@@ -188,6 +188,6 @@ jobs:
|
||||
run: |
|
||||
pip install "pydantic<2"
|
||||
pip install -e .[tests]
|
||||
pip install tantivy@git+https://github.com/quickwit-oss/tantivy-py#164adc87e1a033117001cf70e38c82a53014d985
|
||||
pip install tantivy
|
||||
- name: Run tests
|
||||
run: pytest -m "not slow" -x -v --durations=30 python/tests
|
||||
|
||||
2
.github/workflows/run_tests/action.yml
vendored
2
.github/workflows/run_tests/action.yml
vendored
@@ -11,7 +11,7 @@ runs:
|
||||
- name: Install lancedb
|
||||
shell: bash
|
||||
run: |
|
||||
pip3 install $(ls target/wheels/lancedb-*.whl)[tests,dev,embeddings]
|
||||
pip3 install $(ls target/wheels/lancedb-*.whl)[tests,dev]
|
||||
- name: pytest
|
||||
shell: bash
|
||||
run: pytest -m "not slow" -x -v --durations=30 python/python/tests
|
||||
|
||||
1
.github/workflows/rust.yml
vendored
1
.github/workflows/rust.yml
vendored
@@ -119,3 +119,4 @@ jobs:
|
||||
$env:VCPKG_ROOT = $env:VCPKG_INSTALLATION_ROOT
|
||||
cargo build
|
||||
cargo test
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -39,4 +39,6 @@ dist
|
||||
## Rust
|
||||
target
|
||||
|
||||
**/sccache.log
|
||||
|
||||
Cargo.lock
|
||||
|
||||
@@ -5,17 +5,14 @@ repos:
|
||||
- id: check-yaml
|
||||
- id: end-of-file-fixer
|
||||
- id: trailing-whitespace
|
||||
- repo: https://github.com/psf/black
|
||||
rev: 22.12.0
|
||||
hooks:
|
||||
- id: black
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
# Ruff version.
|
||||
rev: v0.0.277
|
||||
rev: v0.2.2
|
||||
hooks:
|
||||
- id: ruff
|
||||
- repo: https://github.com/pycqa/isort
|
||||
rev: 5.12.0
|
||||
- repo: https://github.com/pre-commit/mirrors-prettier
|
||||
rev: v3.1.0
|
||||
hooks:
|
||||
- id: isort
|
||||
name: isort (python)
|
||||
- id: prettier
|
||||
files: "nodejs/.*"
|
||||
exclude: nodejs/lancedb/native.d.ts|nodejs/dist/.*
|
||||
|
||||
11
Cargo.toml
11
Cargo.toml
@@ -14,10 +14,10 @@ keywords = ["lancedb", "lance", "database", "vector", "search"]
|
||||
categories = ["database-implementations"]
|
||||
|
||||
[workspace.dependencies]
|
||||
lance = { "version" = "=0.10.1", "features" = ["dynamodb"] }
|
||||
lance-index = { "version" = "=0.10.1" }
|
||||
lance-linalg = { "version" = "=0.10.1" }
|
||||
lance-testing = { "version" = "=0.10.1" }
|
||||
lance = { "version" = "=0.10.4", "features" = ["dynamodb"] }
|
||||
lance-index = { "version" = "=0.10.4" }
|
||||
lance-linalg = { "version" = "=0.10.4" }
|
||||
lance-testing = { "version" = "=0.10.4" }
|
||||
# Note that this one does not include pyarrow
|
||||
arrow = { version = "50.0", optional = false }
|
||||
arrow-array = "50.0"
|
||||
@@ -28,13 +28,14 @@ arrow-schema = "50.0"
|
||||
arrow-arith = "50.0"
|
||||
arrow-cast = "50.0"
|
||||
async-trait = "0"
|
||||
chrono = "0.4.23"
|
||||
chrono = "0.4.35"
|
||||
half = { "version" = "=2.3.1", default-features = false, features = [
|
||||
"num-traits",
|
||||
] }
|
||||
futures = "0"
|
||||
log = "0.4"
|
||||
object_store = "0.9.0"
|
||||
pin-project = "1.0.7"
|
||||
snafu = "0.7.4"
|
||||
url = "2"
|
||||
num-traits = "0.2"
|
||||
|
||||
@@ -7,20 +7,11 @@ for brute-force scanning of the entire vector space.
|
||||
A vector index is faster but less accurate than exhaustive search (kNN or flat search).
|
||||
LanceDB provides many parameters to fine-tune the index's size, the speed of queries, and the accuracy of results.
|
||||
|
||||
Currently, LanceDB does _not_ automatically create the ANN index.
|
||||
LanceDB has optimized code for kNN as well. For many use-cases, datasets under 100K vectors won't require index creation at all.
|
||||
If you can live with <100ms latency, skipping index creation is a simpler workflow while guaranteeing 100% recall.
|
||||
## Disk-based Index
|
||||
|
||||
In the future we will look to automatically create and configure the ANN index as data comes in.
|
||||
|
||||
## Types of Index
|
||||
|
||||
Lance can support multiple index types, the most widely used one is `IVF_PQ`.
|
||||
|
||||
- `IVF_PQ`: use **Inverted File Index (IVF)** to first divide the dataset into `N` partitions,
|
||||
and then use **Product Quantization** to compress vectors in each partition.
|
||||
- `DiskANN` (**Experimental**): organize the vector as a on-disk graph, where the vertices approximately
|
||||
represent the nearest neighbors of each vector.
|
||||
Lance provides an `IVF_PQ` disk-based index. It uses **Inverted File Index (IVF)** to first divide
|
||||
the dataset into `N` partitions, and then applies **Product Quantization** to compress vectors in each partition.
|
||||
See the [indexing](concepts/index_ivfpq.md) concepts guide for more information on how this works.
|
||||
|
||||
## Creating an IVF_PQ Index
|
||||
|
||||
@@ -88,7 +79,7 @@ You can specify the GPU device to train IVF partitions via
|
||||
)
|
||||
```
|
||||
|
||||
=== "Macos"
|
||||
=== "MacOS"
|
||||
|
||||
<!-- skip-test -->
|
||||
```python
|
||||
@@ -100,7 +91,7 @@ You can specify the GPU device to train IVF partitions via
|
||||
)
|
||||
```
|
||||
|
||||
Trouble shootings:
|
||||
Troubleshooting:
|
||||
|
||||
If you see `AssertionError: Torch not compiled with CUDA enabled`, you need to [install
|
||||
PyTorch with CUDA support](https://pytorch.org/get-started/locally/).
|
||||
@@ -187,13 +178,21 @@ You can select the columns returned by the query using a select clause.
|
||||
|
||||
## FAQ
|
||||
|
||||
### Why do I need to manually create an index?
|
||||
|
||||
Currently, LanceDB does _not_ automatically create the ANN index.
|
||||
LanceDB is well-optimized for kNN (exhaustive search) via a disk-based index. For many use-cases,
|
||||
datasets of the order of ~100K vectors don't require index creation. If you can live with up to
|
||||
100ms latency, skipping index creation is a simpler workflow while guaranteeing 100% recall.
|
||||
|
||||
### When is it necessary to create an ANN vector index?
|
||||
|
||||
`LanceDB` has manually-tuned SIMD code for computing vector distances.
|
||||
In our benchmarks, computing 100K pairs of 1K dimension vectors takes **less than 20ms**.
|
||||
For small datasets (< 100K rows) or applications that can accept 100ms latency, vector indices are usually not necessary.
|
||||
`LanceDB` comes out-of-the-box with highly optimized SIMD code for computing vector similarity.
|
||||
In our benchmarks, computing distances for 100K pairs of 1K dimension vectors takes **less than 20ms**.
|
||||
We observe that for small datasets (~100K rows) or for applications that can accept 100ms latency,
|
||||
vector indices are usually not necessary.
|
||||
|
||||
For large-scale or higher dimension vectors, it is beneficial to create vector index.
|
||||
For large-scale or higher dimension vectors, it can beneficial to create vector index for performance.
|
||||
|
||||
### How big is my index, and how many memory will it take?
|
||||
|
||||
|
||||
@@ -46,7 +46,7 @@
|
||||
|
||||
!!! info "Please also make sure you're using the same version of Arrow as in the [vectordb crate](https://github.com/lancedb/lancedb/blob/main/Cargo.toml)"
|
||||
|
||||
## How to connect to a database
|
||||
## Connect to a database
|
||||
|
||||
=== "Python"
|
||||
|
||||
@@ -69,17 +69,22 @@
|
||||
```rust
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<()> {
|
||||
--8<-- "rust/vectordb/examples/simple.rs:connect"
|
||||
--8<-- "rust/lancedb/examples/simple.rs:connect"
|
||||
}
|
||||
```
|
||||
|
||||
!!! info "See [examples/simple.rs](https://github.com/lancedb/lancedb/tree/main/rust/vectordb/examples/simple.rs) for a full working example."
|
||||
!!! info "See [examples/simple.rs](https://github.com/lancedb/lancedb/tree/main/rust/lancedb/examples/simple.rs) for a full working example."
|
||||
|
||||
LanceDB will create the directory if it doesn't exist (including parent directories).
|
||||
|
||||
If you need a reminder of the uri, you can call `db.uri()`.
|
||||
|
||||
## How to create a table
|
||||
## Create a table
|
||||
|
||||
### Directly insert data to a new table
|
||||
|
||||
If you have data to insert into the table at creation time, you can simultaneously create a
|
||||
table and insert the data to it.
|
||||
|
||||
=== "Python"
|
||||
|
||||
@@ -118,17 +123,18 @@ If you need a reminder of the uri, you can call `db.uri()`.
|
||||
use arrow_schema::{DataType, Schema, Field};
|
||||
use arrow_array::{RecordBatch, RecordBatchIterator};
|
||||
|
||||
--8<-- "rust/vectordb/examples/simple.rs:create_table"
|
||||
--8<-- "rust/lancedb/examples/simple.rs:create_table"
|
||||
```
|
||||
|
||||
If the table already exists, LanceDB will raise an error by default.
|
||||
|
||||
!!! info "Under the hood, LanceDB is converting the input data into an Apache Arrow table and persisting it to disk in [Lance format](https://www.github.com/lancedb/lance)."
|
||||
!!! info "Under the hood, LanceDB converts the input data into an Apache Arrow table and persists it to disk using the [Lance format](https://www.github.com/lancedb/lance)."
|
||||
|
||||
### Creating an empty table
|
||||
### Create an empty table
|
||||
|
||||
Sometimes you may not have the data to insert into the table at creation time.
|
||||
In this case, you can create an empty table and specify the schema.
|
||||
In this case, you can create an empty table and specify the schema, so that you can add
|
||||
data to the table at a later time (such that it conforms to the schema).
|
||||
|
||||
=== "Python"
|
||||
|
||||
@@ -147,12 +153,12 @@ In this case, you can create an empty table and specify the schema.
|
||||
=== "Rust"
|
||||
|
||||
```rust
|
||||
--8<-- "rust/vectordb/examples/simple.rs:create_empty_table"
|
||||
--8<-- "rust/lancedb/examples/simple.rs:create_empty_table"
|
||||
```
|
||||
|
||||
## How to open an existing table
|
||||
## Open an existing table
|
||||
|
||||
Once created, you can open a table using the following code:
|
||||
Once created, you can open a table as follows:
|
||||
|
||||
=== "Python"
|
||||
|
||||
@@ -169,7 +175,7 @@ Once created, you can open a table using the following code:
|
||||
=== "Rust"
|
||||
|
||||
```rust
|
||||
--8<-- "rust/vectordb/examples/simple.rs:open_with_existing_file"
|
||||
--8<-- "rust/lancedb/examples/simple.rs:open_with_existing_file"
|
||||
```
|
||||
|
||||
If you forget the name of your table, you can always get a listing of all table names:
|
||||
@@ -189,12 +195,12 @@ If you forget the name of your table, you can always get a listing of all table
|
||||
=== "Rust"
|
||||
|
||||
```rust
|
||||
--8<-- "rust/vectordb/examples/simple.rs:list_names"
|
||||
--8<-- "rust/lancedb/examples/simple.rs:list_names"
|
||||
```
|
||||
|
||||
## How to add data to a table
|
||||
## Add data to a table
|
||||
|
||||
After a table has been created, you can always add more data to it using
|
||||
After a table has been created, you can always add more data to it as follows:
|
||||
|
||||
=== "Python"
|
||||
|
||||
@@ -219,12 +225,12 @@ After a table has been created, you can always add more data to it using
|
||||
=== "Rust"
|
||||
|
||||
```rust
|
||||
--8<-- "rust/vectordb/examples/simple.rs:add"
|
||||
--8<-- "rust/lancedb/examples/simple.rs:add"
|
||||
```
|
||||
|
||||
## How to search for (approximate) nearest neighbors
|
||||
## Search for nearest neighbors
|
||||
|
||||
Once you've embedded the query, you can find its nearest neighbors using the following code:
|
||||
Once you've embedded the query, you can find its nearest neighbors as follows:
|
||||
|
||||
=== "Python"
|
||||
|
||||
@@ -245,11 +251,12 @@ Once you've embedded the query, you can find its nearest neighbors using the fol
|
||||
```rust
|
||||
use futures::TryStreamExt;
|
||||
|
||||
--8<-- "rust/vectordb/examples/simple.rs:search"
|
||||
--8<-- "rust/lancedb/examples/simple.rs:search"
|
||||
```
|
||||
|
||||
By default, LanceDB runs a brute-force scan over dataset to find the K nearest neighbours (KNN).
|
||||
For tables with more than 50K vectors, creating an ANN index is recommended to speed up search performance.
|
||||
LanceDB allows you to create an ANN index on a table as follows:
|
||||
|
||||
=== "Python"
|
||||
|
||||
@@ -266,12 +273,17 @@ For tables with more than 50K vectors, creating an ANN index is recommended to s
|
||||
=== "Rust"
|
||||
|
||||
```rust
|
||||
--8<-- "rust/vectordb/examples/simple.rs:create_index"
|
||||
--8<-- "rust/lancedb/examples/simple.rs:create_index"
|
||||
```
|
||||
|
||||
Check [Approximate Nearest Neighbor (ANN) Indexes](/ann_indices.md) section for more details.
|
||||
!!! note "Why do I need to create an index manually?"
|
||||
LanceDB does not automatically create the ANN index, for two reasons. The first is that it's optimized
|
||||
for really fast retrievals via a disk-based index, and the second is that data and query workloads can
|
||||
be very diverse, so there's no one-size-fits-all index configuration. LanceDB provides many parameters
|
||||
to fine-tune index size, query latency and accuracy. See the section on
|
||||
[ANN indexes](ann_indexes.md) for more details.
|
||||
|
||||
## How to delete rows from a table
|
||||
## Delete rows from a table
|
||||
|
||||
Use the `delete()` method on tables to delete rows from a table. To choose
|
||||
which rows to delete, provide a filter that matches on the metadata columns.
|
||||
@@ -292,7 +304,7 @@ This can delete any number of rows that match the filter.
|
||||
=== "Rust"
|
||||
|
||||
```rust
|
||||
--8<-- "rust/vectordb/examples/simple.rs:delete"
|
||||
--8<-- "rust/lancedb/examples/simple.rs:delete"
|
||||
```
|
||||
|
||||
The deletion predicate is a SQL expression that supports the same expressions
|
||||
@@ -307,7 +319,7 @@ To see what expressions are supported, see the [SQL filters](sql.md) section.
|
||||
|
||||
Read more: [vectordb.Table.delete](javascript/interfaces/Table.md#delete)
|
||||
|
||||
## How to remove a table
|
||||
## Drop a table
|
||||
|
||||
Use the `drop_table()` method on the database to remove a table.
|
||||
|
||||
@@ -333,7 +345,7 @@ Use the `drop_table()` method on the database to remove a table.
|
||||
=== "Rust"
|
||||
|
||||
```rust
|
||||
--8<-- "rust/vectordb/examples/simple.rs:drop_table"
|
||||
--8<-- "rust/lancedb/examples/simple.rs:drop_table"
|
||||
```
|
||||
|
||||
!!! note "Bundling `vectordb` apps with Webpack"
|
||||
|
||||
@@ -31,7 +31,7 @@ As an example, consider starting with 128-dimensional vector consisting of 32-bi
|
||||
|
||||
While PQ helps with reducing the size of the index, IVF primarily addresses search performance. The primary purpose of an inverted file index is to facilitate rapid and effective nearest neighbor search by narrowing down the search space.
|
||||
|
||||
In IVF, the PQ vector space is divided into *Voronoi cells*, which are essentially partitions that consist of all the points in the space that are within a threshold distance of the given region's seed point. These seed points are used to create an inverted index that correlates each centroid with a list of vectors in the space, allowing a search to be restricted to just a subset of vectors in the index.
|
||||
In IVF, the PQ vector space is divided into *Voronoi cells*, which are essentially partitions that consist of all the points in the space that are within a threshold distance of the given region's seed point. These seed points are initialized by running K-means over the stored vectors. The centroids of K-means turn into the seed points which then each define a region. These regions are then are used to create an inverted index that correlates each centroid with a list of vectors in the space, allowing a search to be restricted to just a subset of vectors in the index.
|
||||
|
||||

|
||||
|
||||
@@ -81,24 +81,4 @@ The above query will perform a search on the table `tbl` using the given query v
|
||||
* `to_pandas()`: Convert the results to a pandas DataFrame
|
||||
|
||||
And there you have it! You now understand what an IVF-PQ index is, and how to create and query it in LanceDB.
|
||||
|
||||
|
||||
## FAQ
|
||||
|
||||
### When is it necessary to create a vector index?
|
||||
|
||||
LanceDB has manually-tuned SIMD code for computing vector distances. In our benchmarks, computing 100K pairs of 1K dimension vectors takes **<20ms**. For small datasets (<100K rows) or applications that can accept up to 100ms latency, vector indices are usually not necessary.
|
||||
|
||||
For large-scale or higher dimension vectors, it is beneficial to create vector index.
|
||||
|
||||
### How big is my index, and how much memory will it take?
|
||||
|
||||
In LanceDB, all vector indices are disk-based, meaning that when responding to a vector query, only the relevant pages from the index file are loaded from disk and cached in memory. Additionally, each sub-vector is usually encoded into 1 byte PQ code.
|
||||
|
||||
For example, with 1024-dimension vectors, if we choose `num_sub_vectors = 64`, each sub-vector has `1024 / 64 = 16` float32 numbers. Product quantization can lead to approximately `16 * sizeof(float32) / 1 = 64` times of space reduction.
|
||||
|
||||
### How to choose `num_partitions` and `num_sub_vectors` for IVF_PQ index?
|
||||
|
||||
`num_partitions` is used to decide how many partitions the first level IVF index uses. Higher number of partitions could lead to more efficient I/O during queries and better accuracy, but it takes much more time to train. On SIFT-1M dataset, our benchmark shows that keeping each partition 1K-4K rows lead to a good latency/recall.
|
||||
|
||||
`num_sub_vectors` specifies how many PQ short codes to generate on each vector. Because PQ is a lossy compression of the original vector, a higher `num_sub_vectors` usually results in less space distortion, and thus yields better accuracy. However, a higher `num_sub_vectors` also causes heavier I/O and more PQ computation, and thus, higher latency. `dimension / num_sub_vectors` should be a multiple of 8 for optimum SIMD efficiency.
|
||||
To see how to create an IVF-PQ index in LanceDB, take a look at the [ANN indexes](../ann_indexes.md) section.
|
||||
|
||||
@@ -47,6 +47,7 @@ LanceDB registers the OpenAI embeddings function in the registry by default, as
|
||||
| Parameter | Type | Default Value | Description |
|
||||
|---|---|---|---|
|
||||
| `name` | `str` | `"text-embedding-ada-002"` | The name of the model. |
|
||||
| `dim` | `int` | Model default | For OpenAI's newer text-embedding-3 model, we can specify a dimensionality that is smaller than the 1536 size. This feature supports it |
|
||||
|
||||
|
||||
```python
|
||||
@@ -175,7 +176,8 @@ Supported Embedding modelIDs are:
|
||||
* `cohere.embed-english-v3`
|
||||
* `cohere.embed-multilingual-v3`
|
||||
|
||||
Supported paramters (to be passed in `create` method) are:
|
||||
Supported parameters (to be passed in `create` method) are:
|
||||
|
||||
| Parameter | Type | Default Value | Description |
|
||||
|---|---|---|---|
|
||||
| **name** | str | "amazon.titan-embed-text-v1" | The model ID of the bedrock model to use. Supported base models for Text Embeddings: amazon.titan-embed-text-v1, cohere.embed-english-v3, cohere.embed-multilingual-v3 |
|
||||
@@ -222,7 +224,6 @@ This embedding function supports ingesting images as both bytes and urls. You ca
|
||||
!!! info
|
||||
LanceDB supports ingesting images directly from accessible links.
|
||||
|
||||
|
||||
```python
|
||||
|
||||
db = lancedb.connect(tmp_path)
|
||||
@@ -288,4 +289,67 @@ print(actual.label)
|
||||
|
||||
```
|
||||
|
||||
### Imagebind embeddings
|
||||
We have support for [imagebind](https://github.com/facebookresearch/ImageBind) model embeddings. You can download our version of the packaged model via - `pip install imagebind-packaged==0.1.2`.
|
||||
|
||||
This function is registered as `imagebind` and supports Audio, Video and Text modalities(extending to Thermal,Depth,IMU data):
|
||||
|
||||
| Parameter | Type | Default Value | Description |
|
||||
|---|---|---|---|
|
||||
| `name` | `str` | `"imagebind_huge"` | Name of the model. |
|
||||
| `device` | `str` | `"cpu"` | The device to run the model on. Can be `"cpu"` or `"gpu"`. |
|
||||
| `normalize` | `bool` | `False` | set to `True` to normalize your inputs before model ingestion. |
|
||||
|
||||
Below is an example demonstrating how the API works:
|
||||
|
||||
```python
|
||||
db = lancedb.connect(tmp_path)
|
||||
registry = EmbeddingFunctionRegistry.get_instance()
|
||||
func = registry.get("imagebind").create()
|
||||
|
||||
class ImageBindModel(LanceModel):
|
||||
text: str
|
||||
image_uri: str = func.SourceField()
|
||||
audio_path: str
|
||||
vector: Vector(func.ndims()) = func.VectorField()
|
||||
|
||||
# add locally accessible image paths
|
||||
text_list=["A dog.", "A car", "A bird"]
|
||||
image_paths=[".assets/dog_image.jpg", ".assets/car_image.jpg", ".assets/bird_image.jpg"]
|
||||
audio_paths=[".assets/dog_audio.wav", ".assets/car_audio.wav", ".assets/bird_audio.wav"]
|
||||
|
||||
# Load data
|
||||
inputs = [
|
||||
{"text": a, "audio_path": b, "image_uri": c}
|
||||
for a, b, c in zip(text_list, audio_paths, image_paths)
|
||||
]
|
||||
|
||||
#create table and add data
|
||||
table = db.create_table("img_bind", schema=ImageBindModel)
|
||||
table.add(inputs)
|
||||
```
|
||||
|
||||
Now, we can search using any modality:
|
||||
|
||||
#### image search
|
||||
```python
|
||||
query_image = "./assets/dog_image2.jpg" #download an image and enter that path here
|
||||
actual = table.search(query_image).limit(1).to_pydantic(ImageBindModel)[0]
|
||||
print(actual.text == "dog")
|
||||
```
|
||||
#### audio search
|
||||
|
||||
```python
|
||||
query_audio = "./assets/car_audio2.wav" #download an audio clip and enter path here
|
||||
actual = table.search(query_audio).limit(1).to_pydantic(ImageBindModel)[0]
|
||||
print(actual.text == "car")
|
||||
```
|
||||
#### Text search
|
||||
You can add any input query and fetch the result as follows:
|
||||
```python
|
||||
query = "an animal which flies and tweets"
|
||||
actual = table.search(query).limit(1).to_pydantic(ImageBindModel)[0]
|
||||
print(actual.text == "bird")
|
||||
```
|
||||
|
||||
If you have any questions about the embeddings API, supported models, or see a relevant model missing, please raise an issue [on GitHub](https://github.com/lancedb/lancedb/issues).
|
||||
|
||||
@@ -43,7 +43,7 @@ pip install lancedb
|
||||
We also need to install a specific commit of `tantivy`, a dependency of the LanceDB full text search engine we will use later in this guide:
|
||||
|
||||
```
|
||||
pip install tantivy@git+https://github.com/quickwit-oss/tantivy-py#164adc87e1a033117001cf70e38c82a53014d985
|
||||
pip install tantivy
|
||||
```
|
||||
|
||||
Create a new Python file and add the following code:
|
||||
|
||||
@@ -40,7 +40,7 @@ LanceDB and its underlying data format, Lance, are built to scale to really larg
|
||||
|
||||
No. LanceDB is blazing fast (due to its disk-based index) for even brute force kNN search, within reason. In our benchmarks, computing 100K pairs of 1000-dimension vectors takes less than 20ms. For small datasets of ~100K records or applications that can accept ~100ms latency, an ANN index is usually not necessary.
|
||||
|
||||
For large-scale (>1M) or higher dimension vectors, it is beneficial to create an ANN index.
|
||||
For large-scale (>1M) or higher dimension vectors, it is beneficial to create an ANN index. See the [ANN indexes](ann_indexes.md) section for more details.
|
||||
|
||||
### Does LanceDB support full-text search?
|
||||
|
||||
|
||||
@@ -75,21 +75,40 @@ applied on top of the full text search results. This can be invoked via the fami
|
||||
table.search("puppy").limit(10).where("meta='foo'").to_list()
|
||||
```
|
||||
|
||||
## Syntax
|
||||
## Phrase queries vs. terms queries
|
||||
|
||||
For full-text search you can perform either a phrase query like "the old man and the sea",
|
||||
or a structured search query like "(Old AND Man) AND Sea".
|
||||
Double quotes are used to disambiguate.
|
||||
For full-text search you can specify either a **phrase** query like `"the old man and the sea"`,
|
||||
or a **terms** search query like `"(Old AND Man) AND Sea"`. For more details on the terms
|
||||
query syntax, see Tantivy's [query parser rules](https://docs.rs/tantivy/latest/tantivy/query/struct.QueryParser.html).
|
||||
|
||||
For example:
|
||||
!!! tip "Note"
|
||||
The query parser will raise an exception on queries that are ambiguous. For example, in the query `they could have been dogs OR cats`, `OR` is capitalized so it's considered a keyword query operator. But it's ambiguous how the left part should be treated. So if you submit this search query as is, you'll get `Syntax Error: they could have been dogs OR cats`.
|
||||
|
||||
If you intended "they could have been dogs OR cats" as a phrase query, this actually
|
||||
raises a syntax error since `OR` is a recognized operator. If you make `or` lower case,
|
||||
this avoids the syntax error. However, it is cumbersome to have to remember what will
|
||||
conflict with the query syntax. Instead, if you search using
|
||||
`table.search('"they could have been dogs OR cats"')`, then the syntax checker avoids
|
||||
checking inside the quotes.
|
||||
```py
|
||||
# This raises a syntax error
|
||||
table.search("they could have been dogs OR cats")
|
||||
```
|
||||
|
||||
On the other hand, lowercasing `OR` to `or` will work, because there are no capitalized logical operators and
|
||||
the query is treated as a phrase query.
|
||||
|
||||
```py
|
||||
# This works!
|
||||
table.search("they could have been dogs or cats")
|
||||
```
|
||||
|
||||
It can be cumbersome to have to remember what will cause a syntax error depending on the type of
|
||||
query you want to perform. To make this simpler, when you want to perform a phrase query, you can
|
||||
enforce it in one of two ways:
|
||||
|
||||
1. Place the double-quoted query inside single quotes. For example, `table.search('"they could have been dogs OR cats"')` is treated as
|
||||
a phrase query.
|
||||
2. Explicitly declare the `phrase_query()` method. This is useful when you have a phrase query that
|
||||
itself contains double quotes. For example, `table.search('the cats OR dogs were not really "pets" at all').phrase_query()`
|
||||
is treated as a phrase query.
|
||||
|
||||
In general, a query that's declared as a phrase query will be wrapped in double quotes during parsing, with nested
|
||||
double quotes replaced by single quotes.
|
||||
|
||||
## Configurations
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@ Get started using these examples and quick links.
|
||||
| Integrations | |
|
||||
|---|---:|
|
||||
| <h3> LlamaIndex </h3>LlamaIndex is a simple, flexible data framework for connecting custom data sources to large language models. Llama index integrates with LanceDB as the serverless VectorDB. <h3>[Lean More](https://gpt-index.readthedocs.io/en/latest/examples/vector_stores/LanceDBIndexDemo.html) </h3> |<img src="../assets/llama-index.jpg" alt="image" width="150" height="auto">|
|
||||
| <h3>Langchain</h3>Langchain allows building applications with LLMs through composability <h3>[Lean More](https://python.langchain.com/en/latest/modules/indexes/vectorstores/examples/lancedb.html) | <img src="../assets/langchain.png" alt="image" width="150" height="auto">|
|
||||
| <h3>Langchain</h3>Langchain allows building applications with LLMs through composability <h3>[Lean More](https://python.langchain.com/docs/integrations/vectorstores/lancedb) | <img src="../assets/langchain.png" alt="image" width="150" height="auto">|
|
||||
| <h3>Langchain TS</h3> Javascript bindings for Langchain. It integrates with LanceDB's serverless vectordb allowing you to build powerful AI applications through composibility using only serverless functions. <h3>[Learn More]( https://js.langchain.com/docs/modules/data_connection/vectorstores/integrations/lancedb) | <img src="../assets/langchain.png" alt="image" width="150" height="auto">|
|
||||
| <h3>Voxel51</h3> It is an open source toolkit that enables you to build better computer vision workflows by improving the quality of your datasets and delivering insights about your models.<h3>[Learn More](./voxel51.md) | <img src="../assets/voxel.gif" alt="image" width="150" height="auto">|
|
||||
| <h3>PromptTools</h3> Offers a set of free, open-source tools for testing and experimenting with models, prompts, and configurations. The core idea is to enable developers to evaluate prompts using familiar interfaces like code and notebooks. You can use it to experiment with different configurations of LanceDB, and test how LanceDB integrates with the LLM of your choice.<h3>[Learn More](./prompttools.md) | <img src="../assets/prompttools.jpeg" alt="image" width="150" height="auto">|
|
||||
|
||||
569
docs/src/notebooks/multi_modal_video_RAG.ipynb
Normal file
569
docs/src/notebooks/multi_modal_video_RAG.ipynb
Normal file
File diff suppressed because one or more lines are too long
@@ -24,6 +24,12 @@ pip install lancedb
|
||||
|
||||
::: lancedb.query.LanceQueryBuilder
|
||||
|
||||
::: lancedb.query.LanceVectorQueryBuilder
|
||||
|
||||
::: lancedb.query.LanceFtsQueryBuilder
|
||||
|
||||
::: lancedb.query.LanceHybridQueryBuilder
|
||||
|
||||
## Embeddings
|
||||
|
||||
::: lancedb.embeddings.registry.EmbeddingFunctionRegistry
|
||||
@@ -62,10 +68,22 @@ pip install lancedb
|
||||
|
||||
## Integrations
|
||||
|
||||
### Pydantic
|
||||
## Pydantic
|
||||
|
||||
::: lancedb.pydantic.pydantic_to_schema
|
||||
|
||||
::: lancedb.pydantic.vector
|
||||
|
||||
::: lancedb.pydantic.LanceModel
|
||||
|
||||
## Reranking
|
||||
|
||||
::: lancedb.rerankers.linear_combination.LinearCombinationReranker
|
||||
|
||||
::: lancedb.rerankers.cohere.CohereReranker
|
||||
|
||||
::: lancedb.rerankers.colbert.ColbertReranker
|
||||
|
||||
::: lancedb.rerankers.cross_encoder.CrossEncoderReranker
|
||||
|
||||
::: lancedb.rerankers.openai.OpenaiReranker
|
||||
@@ -13,5 +13,10 @@ module.exports = {
|
||||
},
|
||||
rules: {
|
||||
"@typescript-eslint/method-signature-style": "off",
|
||||
"@typescript-eslint/quotes": "off",
|
||||
"@typescript-eslint/semi": "off",
|
||||
"@typescript-eslint/explicit-function-return-type": "off",
|
||||
"@typescript-eslint/space-before-function-paren": "off",
|
||||
"@typescript-eslint/indent": "off",
|
||||
}
|
||||
}
|
||||
|
||||
117
node/package-lock.json
generated
117
node/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "vectordb",
|
||||
"version": "0.4.11",
|
||||
"version": "0.4.13",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "vectordb",
|
||||
"version": "0.4.11",
|
||||
"version": "0.4.13",
|
||||
"cpu": [
|
||||
"x64",
|
||||
"arm64"
|
||||
@@ -18,9 +18,7 @@
|
||||
"win32"
|
||||
],
|
||||
"dependencies": {
|
||||
"@apache-arrow/ts": "^14.0.2",
|
||||
"@neon-rs/load": "^0.0.74",
|
||||
"apache-arrow": "^14.0.2",
|
||||
"axios": "^1.4.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
@@ -33,6 +31,7 @@
|
||||
"@types/temp": "^0.9.1",
|
||||
"@types/uuid": "^9.0.3",
|
||||
"@typescript-eslint/eslint-plugin": "^5.59.1",
|
||||
"apache-arrow-old": "npm:apache-arrow@13.0.0",
|
||||
"cargo-cp-artifact": "^0.1",
|
||||
"chai": "^4.3.7",
|
||||
"chai-as-promised": "^7.1.1",
|
||||
@@ -53,11 +52,15 @@
|
||||
"uuid": "^9.0.0"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"@lancedb/vectordb-darwin-arm64": "0.4.11",
|
||||
"@lancedb/vectordb-darwin-x64": "0.4.11",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.4.11",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.4.11",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.4.11"
|
||||
"@lancedb/vectordb-darwin-arm64": "0.4.13",
|
||||
"@lancedb/vectordb-darwin-x64": "0.4.13",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.4.13",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.4.13",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.4.13"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@apache-arrow/ts": "^14.0.2",
|
||||
"apache-arrow": "^14.0.2"
|
||||
}
|
||||
},
|
||||
"node_modules/@75lb/deep-merge": {
|
||||
@@ -93,6 +96,7 @@
|
||||
"version": "14.0.2",
|
||||
"resolved": "https://registry.npmjs.org/@apache-arrow/ts/-/ts-14.0.2.tgz",
|
||||
"integrity": "sha512-CtwAvLkK0CZv7xsYeCo91ml6PvlfzAmAJZkRYuz2GNBwfYufj5SVi0iuSMwIMkcU/szVwvLdzORSLa5PlF/2ug==",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@types/command-line-args": "5.2.0",
|
||||
"@types/command-line-usage": "5.0.2",
|
||||
@@ -109,7 +113,8 @@
|
||||
"node_modules/@apache-arrow/ts/node_modules/@types/node": {
|
||||
"version": "20.3.0",
|
||||
"resolved": "https://registry.npmjs.org/@types/node/-/node-20.3.0.tgz",
|
||||
"integrity": "sha512-cumHmIAf6On83X7yP+LrsEyUOf/YlociZelmpRYaGFydoaPdxdt80MAbu6vWerQT2COCp2nPvHdsbD7tHn/YlQ=="
|
||||
"integrity": "sha512-cumHmIAf6On83X7yP+LrsEyUOf/YlociZelmpRYaGFydoaPdxdt80MAbu6vWerQT2COCp2nPvHdsbD7tHn/YlQ==",
|
||||
"peer": true
|
||||
},
|
||||
"node_modules/@cargo-messages/android-arm-eabi": {
|
||||
"version": "0.0.160",
|
||||
@@ -328,66 +333,6 @@
|
||||
"@jridgewell/sourcemap-codec": "^1.4.10"
|
||||
}
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-darwin-arm64": {
|
||||
"version": "0.4.11",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-arm64/-/vectordb-darwin-arm64-0.4.11.tgz",
|
||||
"integrity": "sha512-JDOKmFnuJPFkA7ZmrzBJolROwSjWr7yMvAbi40uLBc25YbbVezodd30u2EFtIwWwtk1GqNYRZ49FZOElKYeC/Q==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
"optional": true,
|
||||
"os": [
|
||||
"darwin"
|
||||
]
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-darwin-x64": {
|
||||
"version": "0.4.11",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-x64/-/vectordb-darwin-x64-0.4.11.tgz",
|
||||
"integrity": "sha512-iy6r+8tp2v1EFgJV52jusXtxgO6NY6SkpOdX41xPqN2mQWMkfUAR9Xtks1mgknjPOIKH4MRc8ZS0jcW/UWmilQ==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
"optional": true,
|
||||
"os": [
|
||||
"darwin"
|
||||
]
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-linux-arm64-gnu": {
|
||||
"version": "0.4.11",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-arm64-gnu/-/vectordb-linux-arm64-gnu-0.4.11.tgz",
|
||||
"integrity": "sha512-5K6IVcTMuH0SZBjlqB5Gg39WC889FpTwIWKufxzQMMXrzxo5J3lKUHVoR28RRlNhDF2d9kZXBEyCpIfDFsV9iQ==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
"optional": true,
|
||||
"os": [
|
||||
"linux"
|
||||
]
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-linux-x64-gnu": {
|
||||
"version": "0.4.11",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-x64-gnu/-/vectordb-linux-x64-gnu-0.4.11.tgz",
|
||||
"integrity": "sha512-hF9ZChsdqKqqnivOzd9mE7lC3PmhZadXtwThi2RrsPiOLoEaGDfmr6Ni3amVQnB3bR8YEJtTxdQxe0NC4uW/8g==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
"optional": true,
|
||||
"os": [
|
||||
"linux"
|
||||
]
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-win32-x64-msvc": {
|
||||
"version": "0.4.11",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-win32-x64-msvc/-/vectordb-win32-x64-msvc-0.4.11.tgz",
|
||||
"integrity": "sha512-0+9ut1ccKoqIyGxsVixwx3771Z+DXpl5WfSmOeA8kf3v3jlOg2H+0YUahiXLDid2ju+yeLPrAUYm7A1gKHVhew==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
"optional": true,
|
||||
"os": [
|
||||
"win32"
|
||||
]
|
||||
},
|
||||
"node_modules/@neon-rs/cli": {
|
||||
"version": "0.0.160",
|
||||
"resolved": "https://registry.npmjs.org/@neon-rs/cli/-/cli-0.0.160.tgz",
|
||||
@@ -948,6 +893,7 @@
|
||||
"version": "14.0.2",
|
||||
"resolved": "https://registry.npmjs.org/apache-arrow/-/apache-arrow-14.0.2.tgz",
|
||||
"integrity": "sha512-EBO2xJN36/XoY81nhLcwCJgFwkboDZeyNQ+OPsG7bCoQjc2BT0aTyH/MR6SrL+LirSNz+cYqjGRlupMMlP1aEg==",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@types/command-line-args": "5.2.0",
|
||||
"@types/command-line-usage": "5.0.2",
|
||||
@@ -964,10 +910,39 @@
|
||||
"arrow2csv": "bin/arrow2csv.js"
|
||||
}
|
||||
},
|
||||
"node_modules/apache-arrow-old": {
|
||||
"name": "apache-arrow",
|
||||
"version": "13.0.0",
|
||||
"resolved": "https://registry.npmjs.org/apache-arrow/-/apache-arrow-13.0.0.tgz",
|
||||
"integrity": "sha512-3gvCX0GDawWz6KFNC28p65U+zGh/LZ6ZNKWNu74N6CQlKzxeoWHpi4CgEQsgRSEMuyrIIXi1Ea2syja7dwcHvw==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"@types/command-line-args": "5.2.0",
|
||||
"@types/command-line-usage": "5.0.2",
|
||||
"@types/node": "20.3.0",
|
||||
"@types/pad-left": "2.1.1",
|
||||
"command-line-args": "5.2.1",
|
||||
"command-line-usage": "7.0.1",
|
||||
"flatbuffers": "23.5.26",
|
||||
"json-bignum": "^0.0.3",
|
||||
"pad-left": "^2.1.0",
|
||||
"tslib": "^2.5.3"
|
||||
},
|
||||
"bin": {
|
||||
"arrow2csv": "bin/arrow2csv.js"
|
||||
}
|
||||
},
|
||||
"node_modules/apache-arrow-old/node_modules/@types/node": {
|
||||
"version": "20.3.0",
|
||||
"resolved": "https://registry.npmjs.org/@types/node/-/node-20.3.0.tgz",
|
||||
"integrity": "sha512-cumHmIAf6On83X7yP+LrsEyUOf/YlociZelmpRYaGFydoaPdxdt80MAbu6vWerQT2COCp2nPvHdsbD7tHn/YlQ==",
|
||||
"dev": true
|
||||
},
|
||||
"node_modules/apache-arrow/node_modules/@types/node": {
|
||||
"version": "20.3.0",
|
||||
"resolved": "https://registry.npmjs.org/@types/node/-/node-20.3.0.tgz",
|
||||
"integrity": "sha512-cumHmIAf6On83X7yP+LrsEyUOf/YlociZelmpRYaGFydoaPdxdt80MAbu6vWerQT2COCp2nPvHdsbD7tHn/YlQ=="
|
||||
"integrity": "sha512-cumHmIAf6On83X7yP+LrsEyUOf/YlociZelmpRYaGFydoaPdxdt80MAbu6vWerQT2COCp2nPvHdsbD7tHn/YlQ==",
|
||||
"peer": true
|
||||
},
|
||||
"node_modules/arg": {
|
||||
"version": "4.1.3",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "vectordb",
|
||||
"version": "0.4.11",
|
||||
"version": "0.4.13",
|
||||
"description": " Serverless, low-latency vector database for AI applications",
|
||||
"main": "dist/index.js",
|
||||
"types": "dist/index.d.ts",
|
||||
@@ -41,6 +41,7 @@
|
||||
"@types/temp": "^0.9.1",
|
||||
"@types/uuid": "^9.0.3",
|
||||
"@typescript-eslint/eslint-plugin": "^5.59.1",
|
||||
"apache-arrow-old": "npm:apache-arrow@13.0.0",
|
||||
"cargo-cp-artifact": "^0.1",
|
||||
"chai": "^4.3.7",
|
||||
"chai-as-promised": "^7.1.1",
|
||||
@@ -87,10 +88,10 @@
|
||||
}
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"@lancedb/vectordb-darwin-arm64": "0.4.11",
|
||||
"@lancedb/vectordb-darwin-x64": "0.4.11",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.4.11",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.4.11",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.4.11"
|
||||
"@lancedb/vectordb-darwin-arm64": "0.4.13",
|
||||
"@lancedb/vectordb-darwin-x64": "0.4.13",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.4.13",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.4.13",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.4.13"
|
||||
}
|
||||
}
|
||||
@@ -20,19 +20,20 @@ import {
|
||||
type Vector,
|
||||
FixedSizeList,
|
||||
vectorFromArray,
|
||||
type Schema,
|
||||
Schema,
|
||||
Table as ArrowTable,
|
||||
RecordBatchStreamWriter,
|
||||
List,
|
||||
RecordBatch,
|
||||
makeData,
|
||||
Struct,
|
||||
type Float,
|
||||
Float,
|
||||
DataType,
|
||||
Binary,
|
||||
Float32
|
||||
} from 'apache-arrow'
|
||||
import { type EmbeddingFunction } from './index'
|
||||
import { sanitizeSchema } from './sanitize'
|
||||
|
||||
/*
|
||||
* Options to control how a column should be converted to a vector array
|
||||
@@ -201,10 +202,13 @@ export function makeArrowTable (
|
||||
}
|
||||
|
||||
const opt = new MakeArrowTableOptions(options !== undefined ? options : {})
|
||||
if (opt.schema !== undefined && opt.schema !== null) {
|
||||
opt.schema = sanitizeSchema(opt.schema)
|
||||
}
|
||||
const columns: Record<string, Vector> = {}
|
||||
// TODO: sample dataset to find missing columns
|
||||
// Prefer the field ordering of the schema, if present
|
||||
const columnNames = ((options?.schema) != null) ? (options?.schema?.names as string[]) : Object.keys(data[0])
|
||||
const columnNames = ((opt.schema) != null) ? (opt.schema.names as string[]) : Object.keys(data[0])
|
||||
for (const colName of columnNames) {
|
||||
if (data.length !== 0 && !Object.prototype.hasOwnProperty.call(data[0], colName)) {
|
||||
// The field is present in the schema, but not in the data, skip it
|
||||
@@ -329,6 +333,9 @@ async function applyEmbeddings<T> (table: ArrowTable, embeddings?: EmbeddingFunc
|
||||
if (embeddings == null) {
|
||||
return table
|
||||
}
|
||||
if (schema !== undefined && schema !== null) {
|
||||
schema = sanitizeSchema(schema)
|
||||
}
|
||||
|
||||
// Convert from ArrowTable to Record<String, Vector>
|
||||
const colEntries = [...Array(table.numCols).keys()].map((_, idx) => {
|
||||
@@ -439,6 +446,9 @@ export async function fromRecordsToBuffer<T> (
|
||||
embeddings?: EmbeddingFunction<T>,
|
||||
schema?: Schema
|
||||
): Promise<Buffer> {
|
||||
if (schema !== undefined && schema !== null) {
|
||||
schema = sanitizeSchema(schema)
|
||||
}
|
||||
const table = await convertToTable(data, embeddings, { schema })
|
||||
const writer = RecordBatchFileWriter.writeAll(table)
|
||||
return Buffer.from(await writer.toUint8Array())
|
||||
@@ -456,6 +466,9 @@ export async function fromRecordsToStreamBuffer<T> (
|
||||
embeddings?: EmbeddingFunction<T>,
|
||||
schema?: Schema
|
||||
): Promise<Buffer> {
|
||||
if (schema !== null && schema !== undefined) {
|
||||
schema = sanitizeSchema(schema)
|
||||
}
|
||||
const table = await convertToTable(data, embeddings, { schema })
|
||||
const writer = RecordBatchStreamWriter.writeAll(table)
|
||||
return Buffer.from(await writer.toUint8Array())
|
||||
@@ -474,6 +487,9 @@ export async function fromTableToBuffer<T> (
|
||||
embeddings?: EmbeddingFunction<T>,
|
||||
schema?: Schema
|
||||
): Promise<Buffer> {
|
||||
if (schema !== null && schema !== undefined) {
|
||||
schema = sanitizeSchema(schema)
|
||||
}
|
||||
const tableWithEmbeddings = await applyEmbeddings(table, embeddings, schema)
|
||||
const writer = RecordBatchFileWriter.writeAll(tableWithEmbeddings)
|
||||
return Buffer.from(await writer.toUint8Array())
|
||||
@@ -492,6 +508,9 @@ export async function fromTableToStreamBuffer<T> (
|
||||
embeddings?: EmbeddingFunction<T>,
|
||||
schema?: Schema
|
||||
): Promise<Buffer> {
|
||||
if (schema !== null && schema !== undefined) {
|
||||
schema = sanitizeSchema(schema)
|
||||
}
|
||||
const tableWithEmbeddings = await applyEmbeddings(table, embeddings, schema)
|
||||
const writer = RecordBatchStreamWriter.writeAll(tableWithEmbeddings)
|
||||
return Buffer.from(await writer.toUint8Array())
|
||||
@@ -528,5 +547,5 @@ function alignTable (table: ArrowTable, schema: Schema): ArrowTable {
|
||||
|
||||
// Creates an empty Arrow Table
|
||||
export function createEmptyTable (schema: Schema): ArrowTable {
|
||||
return new ArrowTable(schema)
|
||||
return new ArrowTable(sanitizeSchema(schema))
|
||||
}
|
||||
|
||||
@@ -176,6 +176,10 @@ export async function connect (
|
||||
opts = { uri: arg }
|
||||
} else {
|
||||
// opts = { uri: arg.uri, awsCredentials = arg.awsCredentials }
|
||||
const keys = Object.keys(arg)
|
||||
if (keys.length === 1 && keys[0] === 'uri' && typeof arg.uri === 'string') {
|
||||
opts = { uri: arg.uri }
|
||||
} else {
|
||||
opts = Object.assign(
|
||||
{
|
||||
uri: '',
|
||||
@@ -187,6 +191,7 @@ export async function connect (
|
||||
arg
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
if (opts.uri.startsWith('db://')) {
|
||||
// Remote connection
|
||||
@@ -341,6 +346,7 @@ export interface Table<T = number[]> {
|
||||
*
|
||||
* @param column The column to index
|
||||
* @param replace If false, fail if an index already exists on the column
|
||||
* it is always set to true for remote connections
|
||||
*
|
||||
* Scalar indices, like vector indices, can be used to speed up scans. A scalar
|
||||
* index can speed up scans that contain filter expressions on the indexed column.
|
||||
@@ -384,7 +390,7 @@ export interface Table<T = number[]> {
|
||||
* await table.createScalarIndex('my_col')
|
||||
* ```
|
||||
*/
|
||||
createScalarIndex: (column: string, replace: boolean) => Promise<void>
|
||||
createScalarIndex: (column: string, replace?: boolean) => Promise<void>
|
||||
|
||||
/**
|
||||
* Returns the number of rows in this table.
|
||||
@@ -914,7 +920,10 @@ export class LocalTable<T = number[]> implements Table<T> {
|
||||
})
|
||||
}
|
||||
|
||||
async createScalarIndex (column: string, replace: boolean): Promise<void> {
|
||||
async createScalarIndex (column: string, replace?: boolean): Promise<void> {
|
||||
if (replace === undefined) {
|
||||
replace = true
|
||||
}
|
||||
return tableCreateScalarIndex.call(this._tbl, column, replace)
|
||||
}
|
||||
|
||||
|
||||
@@ -397,7 +397,7 @@ export class RemoteTable<T = number[]> implements Table<T> {
|
||||
}
|
||||
|
||||
const column = indexParams.column ?? 'vector'
|
||||
const indexType = 'vector' // only vector index is supported for remote connections
|
||||
const indexType = 'vector'
|
||||
const metricType = indexParams.metric_type ?? 'L2'
|
||||
const indexCacheSize = indexParams.index_cache_size ?? null
|
||||
|
||||
@@ -420,8 +420,25 @@ export class RemoteTable<T = number[]> implements Table<T> {
|
||||
}
|
||||
}
|
||||
|
||||
async createScalarIndex (column: string, replace: boolean): Promise<void> {
|
||||
throw new Error('Not implemented')
|
||||
async createScalarIndex (column: string): Promise<void> {
|
||||
const indexType = 'scalar'
|
||||
|
||||
const data = {
|
||||
column,
|
||||
index_type: indexType,
|
||||
replace: true
|
||||
}
|
||||
const res = await this._client.post(
|
||||
`/v1/table/${this._name}/create_scalar_index/`,
|
||||
data
|
||||
)
|
||||
if (res.status !== 200) {
|
||||
throw new Error(
|
||||
`Server Error, status: ${res.status}, ` +
|
||||
// eslint-disable-next-line @typescript-eslint/restrict-template-expressions
|
||||
`message: ${res.statusText}: ${res.data}`
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
async countRows (): Promise<number> {
|
||||
|
||||
501
node/src/sanitize.ts
Normal file
501
node/src/sanitize.ts
Normal file
@@ -0,0 +1,501 @@
|
||||
// Copyright 2023 LanceDB Developers.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// The utilities in this file help sanitize data from the user's arrow
|
||||
// library into the types expected by vectordb's arrow library. Node
|
||||
// generally allows for mulitple versions of the same library (and sometimes
|
||||
// even multiple copies of the same version) to be installed at the same
|
||||
// time. However, arrow-js uses instanceof which expected that the input
|
||||
// comes from the exact same library instance. This is not always the case
|
||||
// and so we must sanitize the input to ensure that it is compatible.
|
||||
|
||||
import {
|
||||
Field,
|
||||
Utf8,
|
||||
FixedSizeBinary,
|
||||
FixedSizeList,
|
||||
Schema,
|
||||
List,
|
||||
Struct,
|
||||
Float,
|
||||
Bool,
|
||||
Date_,
|
||||
Decimal,
|
||||
DataType,
|
||||
Dictionary,
|
||||
Binary,
|
||||
Float32,
|
||||
Interval,
|
||||
Map_,
|
||||
Duration,
|
||||
Union,
|
||||
Time,
|
||||
Timestamp,
|
||||
Type,
|
||||
Null,
|
||||
Int,
|
||||
type Precision,
|
||||
type DateUnit,
|
||||
Int8,
|
||||
Int16,
|
||||
Int32,
|
||||
Int64,
|
||||
Uint8,
|
||||
Uint16,
|
||||
Uint32,
|
||||
Uint64,
|
||||
Float16,
|
||||
Float64,
|
||||
DateDay,
|
||||
DateMillisecond,
|
||||
DenseUnion,
|
||||
SparseUnion,
|
||||
TimeNanosecond,
|
||||
TimeMicrosecond,
|
||||
TimeMillisecond,
|
||||
TimeSecond,
|
||||
TimestampNanosecond,
|
||||
TimestampMicrosecond,
|
||||
TimestampMillisecond,
|
||||
TimestampSecond,
|
||||
IntervalDayTime,
|
||||
IntervalYearMonth,
|
||||
DurationNanosecond,
|
||||
DurationMicrosecond,
|
||||
DurationMillisecond,
|
||||
DurationSecond,
|
||||
} from "apache-arrow";
|
||||
import type { IntBitWidth, TimeBitWidth } from "apache-arrow/type";
|
||||
|
||||
function sanitizeMetadata(
|
||||
metadataLike?: unknown
|
||||
): Map<string, string> | undefined {
|
||||
if (metadataLike === undefined || metadataLike === null) {
|
||||
return undefined;
|
||||
}
|
||||
if (!(metadataLike instanceof Map)) {
|
||||
throw Error("Expected metadata, if present, to be a Map<string, string>");
|
||||
}
|
||||
for (const item of metadataLike) {
|
||||
if (!(typeof item[0] === "string" || !(typeof item[1] === "string"))) {
|
||||
throw Error(
|
||||
"Expected metadata, if present, to be a Map<string, string> but it had non-string keys or values"
|
||||
);
|
||||
}
|
||||
}
|
||||
return metadataLike as Map<string, string>;
|
||||
}
|
||||
|
||||
function sanitizeInt(typeLike: object) {
|
||||
if (
|
||||
!("bitWidth" in typeLike) ||
|
||||
typeof typeLike.bitWidth !== "number" ||
|
||||
!("isSigned" in typeLike) ||
|
||||
typeof typeLike.isSigned !== "boolean"
|
||||
) {
|
||||
throw Error(
|
||||
"Expected an Int Type to have a `bitWidth` and `isSigned` property"
|
||||
);
|
||||
}
|
||||
return new Int(typeLike.isSigned, typeLike.bitWidth as IntBitWidth);
|
||||
}
|
||||
|
||||
function sanitizeFloat(typeLike: object) {
|
||||
if (!("precision" in typeLike) || typeof typeLike.precision !== "number") {
|
||||
throw Error("Expected a Float Type to have a `precision` property");
|
||||
}
|
||||
return new Float(typeLike.precision as Precision);
|
||||
}
|
||||
|
||||
function sanitizeDecimal(typeLike: object) {
|
||||
if (
|
||||
!("scale" in typeLike) ||
|
||||
typeof typeLike.scale !== "number" ||
|
||||
!("precision" in typeLike) ||
|
||||
typeof typeLike.precision !== "number" ||
|
||||
!("bitWidth" in typeLike) ||
|
||||
typeof typeLike.bitWidth !== "number"
|
||||
) {
|
||||
throw Error(
|
||||
"Expected a Decimal Type to have `scale`, `precision`, and `bitWidth` properties"
|
||||
);
|
||||
}
|
||||
return new Decimal(typeLike.scale, typeLike.precision, typeLike.bitWidth);
|
||||
}
|
||||
|
||||
function sanitizeDate(typeLike: object) {
|
||||
if (!("unit" in typeLike) || typeof typeLike.unit !== "number") {
|
||||
throw Error("Expected a Date type to have a `unit` property");
|
||||
}
|
||||
return new Date_(typeLike.unit as DateUnit);
|
||||
}
|
||||
|
||||
function sanitizeTime(typeLike: object) {
|
||||
if (
|
||||
!("unit" in typeLike) ||
|
||||
typeof typeLike.unit !== "number" ||
|
||||
!("bitWidth" in typeLike) ||
|
||||
typeof typeLike.bitWidth !== "number"
|
||||
) {
|
||||
throw Error(
|
||||
"Expected a Time type to have `unit` and `bitWidth` properties"
|
||||
);
|
||||
}
|
||||
return new Time(typeLike.unit, typeLike.bitWidth as TimeBitWidth);
|
||||
}
|
||||
|
||||
function sanitizeTimestamp(typeLike: object) {
|
||||
if (!("unit" in typeLike) || typeof typeLike.unit !== "number") {
|
||||
throw Error("Expected a Timestamp type to have a `unit` property");
|
||||
}
|
||||
let timezone = null;
|
||||
if ("timezone" in typeLike && typeof typeLike.timezone === "string") {
|
||||
timezone = typeLike.timezone;
|
||||
}
|
||||
return new Timestamp(typeLike.unit, timezone);
|
||||
}
|
||||
|
||||
function sanitizeTypedTimestamp(
|
||||
typeLike: object,
|
||||
Datatype:
|
||||
| typeof TimestampNanosecond
|
||||
| typeof TimestampMicrosecond
|
||||
| typeof TimestampMillisecond
|
||||
| typeof TimestampSecond
|
||||
) {
|
||||
let timezone = null;
|
||||
if ("timezone" in typeLike && typeof typeLike.timezone === "string") {
|
||||
timezone = typeLike.timezone;
|
||||
}
|
||||
return new Datatype(timezone);
|
||||
}
|
||||
|
||||
function sanitizeInterval(typeLike: object) {
|
||||
if (!("unit" in typeLike) || typeof typeLike.unit !== "number") {
|
||||
throw Error("Expected an Interval type to have a `unit` property");
|
||||
}
|
||||
return new Interval(typeLike.unit);
|
||||
}
|
||||
|
||||
function sanitizeList(typeLike: object) {
|
||||
if (!("children" in typeLike) || !Array.isArray(typeLike.children)) {
|
||||
throw Error(
|
||||
"Expected a List type to have an array-like `children` property"
|
||||
);
|
||||
}
|
||||
if (typeLike.children.length !== 1) {
|
||||
throw Error("Expected a List type to have exactly one child");
|
||||
}
|
||||
return new List(sanitizeField(typeLike.children[0]));
|
||||
}
|
||||
|
||||
function sanitizeStruct(typeLike: object) {
|
||||
if (!("children" in typeLike) || !Array.isArray(typeLike.children)) {
|
||||
throw Error(
|
||||
"Expected a Struct type to have an array-like `children` property"
|
||||
);
|
||||
}
|
||||
return new Struct(typeLike.children.map((child) => sanitizeField(child)));
|
||||
}
|
||||
|
||||
function sanitizeUnion(typeLike: object) {
|
||||
if (
|
||||
!("typeIds" in typeLike) ||
|
||||
!("mode" in typeLike) ||
|
||||
typeof typeLike.mode !== "number"
|
||||
) {
|
||||
throw Error(
|
||||
"Expected a Union type to have `typeIds` and `mode` properties"
|
||||
);
|
||||
}
|
||||
if (!("children" in typeLike) || !Array.isArray(typeLike.children)) {
|
||||
throw Error(
|
||||
"Expected a Union type to have an array-like `children` property"
|
||||
);
|
||||
}
|
||||
|
||||
return new Union(
|
||||
typeLike.mode,
|
||||
typeLike.typeIds as any,
|
||||
typeLike.children.map((child) => sanitizeField(child))
|
||||
);
|
||||
}
|
||||
|
||||
function sanitizeTypedUnion(
|
||||
typeLike: object,
|
||||
UnionType: typeof DenseUnion | typeof SparseUnion
|
||||
) {
|
||||
if (!("typeIds" in typeLike)) {
|
||||
throw Error(
|
||||
"Expected a DenseUnion/SparseUnion type to have a `typeIds` property"
|
||||
);
|
||||
}
|
||||
if (!("children" in typeLike) || !Array.isArray(typeLike.children)) {
|
||||
throw Error(
|
||||
"Expected a DenseUnion/SparseUnion type to have an array-like `children` property"
|
||||
);
|
||||
}
|
||||
|
||||
return new UnionType(
|
||||
typeLike.typeIds as any,
|
||||
typeLike.children.map((child) => sanitizeField(child))
|
||||
);
|
||||
}
|
||||
|
||||
function sanitizeFixedSizeBinary(typeLike: object) {
|
||||
if (!("byteWidth" in typeLike) || typeof typeLike.byteWidth !== "number") {
|
||||
throw Error(
|
||||
"Expected a FixedSizeBinary type to have a `byteWidth` property"
|
||||
);
|
||||
}
|
||||
return new FixedSizeBinary(typeLike.byteWidth);
|
||||
}
|
||||
|
||||
function sanitizeFixedSizeList(typeLike: object) {
|
||||
if (!("listSize" in typeLike) || typeof typeLike.listSize !== "number") {
|
||||
throw Error("Expected a FixedSizeList type to have a `listSize` property");
|
||||
}
|
||||
if (!("children" in typeLike) || !Array.isArray(typeLike.children)) {
|
||||
throw Error(
|
||||
"Expected a FixedSizeList type to have an array-like `children` property"
|
||||
);
|
||||
}
|
||||
if (typeLike.children.length !== 1) {
|
||||
throw Error("Expected a FixedSizeList type to have exactly one child");
|
||||
}
|
||||
return new FixedSizeList(
|
||||
typeLike.listSize,
|
||||
sanitizeField(typeLike.children[0])
|
||||
);
|
||||
}
|
||||
|
||||
function sanitizeMap(typeLike: object) {
|
||||
if (!("children" in typeLike) || !Array.isArray(typeLike.children)) {
|
||||
throw Error(
|
||||
"Expected a Map type to have an array-like `children` property"
|
||||
);
|
||||
}
|
||||
if (!("keysSorted" in typeLike) || typeof typeLike.keysSorted !== "boolean") {
|
||||
throw Error("Expected a Map type to have a `keysSorted` property");
|
||||
}
|
||||
return new Map_(
|
||||
typeLike.children.map((field) => sanitizeField(field)) as any,
|
||||
typeLike.keysSorted
|
||||
);
|
||||
}
|
||||
|
||||
function sanitizeDuration(typeLike: object) {
|
||||
if (!("unit" in typeLike) || typeof typeLike.unit !== "number") {
|
||||
throw Error("Expected a Duration type to have a `unit` property");
|
||||
}
|
||||
return new Duration(typeLike.unit);
|
||||
}
|
||||
|
||||
function sanitizeDictionary(typeLike: object) {
|
||||
if (!("id" in typeLike) || typeof typeLike.id !== "number") {
|
||||
throw Error("Expected a Dictionary type to have an `id` property");
|
||||
}
|
||||
if (!("indices" in typeLike) || typeof typeLike.indices !== "object") {
|
||||
throw Error("Expected a Dictionary type to have an `indices` property");
|
||||
}
|
||||
if (!("dictionary" in typeLike) || typeof typeLike.dictionary !== "object") {
|
||||
throw Error("Expected a Dictionary type to have an `dictionary` property");
|
||||
}
|
||||
if (!("isOrdered" in typeLike) || typeof typeLike.isOrdered !== "boolean") {
|
||||
throw Error("Expected a Dictionary type to have an `isOrdered` property");
|
||||
}
|
||||
return new Dictionary(
|
||||
sanitizeType(typeLike.dictionary),
|
||||
sanitizeType(typeLike.indices) as any,
|
||||
typeLike.id,
|
||||
typeLike.isOrdered
|
||||
);
|
||||
}
|
||||
|
||||
function sanitizeType(typeLike: unknown): DataType<any> {
|
||||
if (typeof typeLike !== "object" || typeLike === null) {
|
||||
throw Error("Expected a Type but object was null/undefined");
|
||||
}
|
||||
if (!("typeId" in typeLike) || !(typeof typeLike.typeId !== "function")) {
|
||||
throw Error("Expected a Type to have a typeId function");
|
||||
}
|
||||
let typeId: Type;
|
||||
if (typeof typeLike.typeId === "function") {
|
||||
typeId = (typeLike.typeId as () => unknown)() as Type;
|
||||
} else if (typeof typeLike.typeId === "number") {
|
||||
typeId = typeLike.typeId as Type;
|
||||
} else {
|
||||
throw Error("Type's typeId property was not a function or number");
|
||||
}
|
||||
|
||||
switch (typeId) {
|
||||
case Type.NONE:
|
||||
throw Error("Received a Type with a typeId of NONE");
|
||||
case Type.Null:
|
||||
return new Null();
|
||||
case Type.Int:
|
||||
return sanitizeInt(typeLike);
|
||||
case Type.Float:
|
||||
return sanitizeFloat(typeLike);
|
||||
case Type.Binary:
|
||||
return new Binary();
|
||||
case Type.Utf8:
|
||||
return new Utf8();
|
||||
case Type.Bool:
|
||||
return new Bool();
|
||||
case Type.Decimal:
|
||||
return sanitizeDecimal(typeLike);
|
||||
case Type.Date:
|
||||
return sanitizeDate(typeLike);
|
||||
case Type.Time:
|
||||
return sanitizeTime(typeLike);
|
||||
case Type.Timestamp:
|
||||
return sanitizeTimestamp(typeLike);
|
||||
case Type.Interval:
|
||||
return sanitizeInterval(typeLike);
|
||||
case Type.List:
|
||||
return sanitizeList(typeLike);
|
||||
case Type.Struct:
|
||||
return sanitizeStruct(typeLike);
|
||||
case Type.Union:
|
||||
return sanitizeUnion(typeLike);
|
||||
case Type.FixedSizeBinary:
|
||||
return sanitizeFixedSizeBinary(typeLike);
|
||||
case Type.FixedSizeList:
|
||||
return sanitizeFixedSizeList(typeLike);
|
||||
case Type.Map:
|
||||
return sanitizeMap(typeLike);
|
||||
case Type.Duration:
|
||||
return sanitizeDuration(typeLike);
|
||||
case Type.Dictionary:
|
||||
return sanitizeDictionary(typeLike);
|
||||
case Type.Int8:
|
||||
return new Int8();
|
||||
case Type.Int16:
|
||||
return new Int16();
|
||||
case Type.Int32:
|
||||
return new Int32();
|
||||
case Type.Int64:
|
||||
return new Int64();
|
||||
case Type.Uint8:
|
||||
return new Uint8();
|
||||
case Type.Uint16:
|
||||
return new Uint16();
|
||||
case Type.Uint32:
|
||||
return new Uint32();
|
||||
case Type.Uint64:
|
||||
return new Uint64();
|
||||
case Type.Float16:
|
||||
return new Float16();
|
||||
case Type.Float32:
|
||||
return new Float32();
|
||||
case Type.Float64:
|
||||
return new Float64();
|
||||
case Type.DateMillisecond:
|
||||
return new DateMillisecond();
|
||||
case Type.DateDay:
|
||||
return new DateDay();
|
||||
case Type.TimeNanosecond:
|
||||
return new TimeNanosecond();
|
||||
case Type.TimeMicrosecond:
|
||||
return new TimeMicrosecond();
|
||||
case Type.TimeMillisecond:
|
||||
return new TimeMillisecond();
|
||||
case Type.TimeSecond:
|
||||
return new TimeSecond();
|
||||
case Type.TimestampNanosecond:
|
||||
return sanitizeTypedTimestamp(typeLike, TimestampNanosecond);
|
||||
case Type.TimestampMicrosecond:
|
||||
return sanitizeTypedTimestamp(typeLike, TimestampMicrosecond);
|
||||
case Type.TimestampMillisecond:
|
||||
return sanitizeTypedTimestamp(typeLike, TimestampMillisecond);
|
||||
case Type.TimestampSecond:
|
||||
return sanitizeTypedTimestamp(typeLike, TimestampSecond);
|
||||
case Type.DenseUnion:
|
||||
return sanitizeTypedUnion(typeLike, DenseUnion);
|
||||
case Type.SparseUnion:
|
||||
return sanitizeTypedUnion(typeLike, SparseUnion);
|
||||
case Type.IntervalDayTime:
|
||||
return new IntervalDayTime();
|
||||
case Type.IntervalYearMonth:
|
||||
return new IntervalYearMonth();
|
||||
case Type.DurationNanosecond:
|
||||
return new DurationNanosecond();
|
||||
case Type.DurationMicrosecond:
|
||||
return new DurationMicrosecond();
|
||||
case Type.DurationMillisecond:
|
||||
return new DurationMillisecond();
|
||||
case Type.DurationSecond:
|
||||
return new DurationSecond();
|
||||
}
|
||||
}
|
||||
|
||||
function sanitizeField(fieldLike: unknown): Field {
|
||||
if (fieldLike instanceof Field) {
|
||||
return fieldLike;
|
||||
}
|
||||
if (typeof fieldLike !== "object" || fieldLike === null) {
|
||||
throw Error("Expected a Field but object was null/undefined");
|
||||
}
|
||||
if (
|
||||
!("type" in fieldLike) ||
|
||||
!("name" in fieldLike) ||
|
||||
!("nullable" in fieldLike)
|
||||
) {
|
||||
throw Error(
|
||||
"The field passed in is missing a `type`/`name`/`nullable` property"
|
||||
);
|
||||
}
|
||||
const type = sanitizeType(fieldLike.type);
|
||||
const name = fieldLike.name;
|
||||
if (!(typeof name === "string")) {
|
||||
throw Error("The field passed in had a non-string `name` property");
|
||||
}
|
||||
const nullable = fieldLike.nullable;
|
||||
if (!(typeof nullable === "boolean")) {
|
||||
throw Error("The field passed in had a non-boolean `nullable` property");
|
||||
}
|
||||
let metadata;
|
||||
if ("metadata" in fieldLike) {
|
||||
metadata = sanitizeMetadata(fieldLike.metadata);
|
||||
}
|
||||
return new Field(name, type, nullable, metadata);
|
||||
}
|
||||
|
||||
export function sanitizeSchema(schemaLike: unknown): Schema {
|
||||
if (schemaLike instanceof Schema) {
|
||||
return schemaLike;
|
||||
}
|
||||
if (typeof schemaLike !== "object" || schemaLike === null) {
|
||||
throw Error("Expected a Schema but object was null/undefined");
|
||||
}
|
||||
if (!("fields" in schemaLike)) {
|
||||
throw Error(
|
||||
"The schema passed in does not appear to be a schema (no 'fields' property)"
|
||||
);
|
||||
}
|
||||
let metadata;
|
||||
if ("metadata" in schemaLike) {
|
||||
metadata = sanitizeMetadata(schemaLike.metadata);
|
||||
}
|
||||
if (!Array.isArray(schemaLike.fields)) {
|
||||
throw Error(
|
||||
"The schema passed in had a 'fields' property but it was not an array"
|
||||
);
|
||||
}
|
||||
const sanitizedFields = schemaLike.fields.map((field) =>
|
||||
sanitizeField(field)
|
||||
);
|
||||
return new Schema(sanitizedFields, metadata);
|
||||
}
|
||||
@@ -34,8 +34,20 @@ import {
|
||||
List,
|
||||
DataType,
|
||||
Dictionary,
|
||||
Int64
|
||||
Int64,
|
||||
MetadataVersion
|
||||
} from 'apache-arrow'
|
||||
import {
|
||||
Dictionary as OldDictionary,
|
||||
Field as OldField,
|
||||
FixedSizeList as OldFixedSizeList,
|
||||
Float32 as OldFloat32,
|
||||
Int32 as OldInt32,
|
||||
Struct as OldStruct,
|
||||
Schema as OldSchema,
|
||||
TimestampNanosecond as OldTimestampNanosecond,
|
||||
Utf8 as OldUtf8
|
||||
} from 'apache-arrow-old'
|
||||
import { type EmbeddingFunction } from '../embedding/embedding_function'
|
||||
|
||||
chaiUse(chaiAsPromised)
|
||||
@@ -318,3 +330,31 @@ describe('makeEmptyTable', function () {
|
||||
await checkTableCreation(async (_, __, schema) => makeEmptyTable(schema))
|
||||
})
|
||||
})
|
||||
|
||||
describe('when using two versions of arrow', function () {
|
||||
it('can still import data', async function() {
|
||||
const schema = new OldSchema([
|
||||
new OldField('id', new OldInt32()),
|
||||
new OldField('vector', new OldFixedSizeList(1024, new OldField("item", new OldFloat32(), true))),
|
||||
new OldField('struct', new OldStruct([
|
||||
new OldField('nested', new OldDictionary(new OldUtf8(), new OldInt32(), 1, true)),
|
||||
new OldField('ts_with_tz', new OldTimestampNanosecond("some_tz")),
|
||||
new OldField('ts_no_tz', new OldTimestampNanosecond(null))
|
||||
]))
|
||||
]) as any
|
||||
// We use arrow version 13 to emulate a "foreign arrow" and this version doesn't have metadataVersion
|
||||
// In theory, this wouldn't matter. We don't rely on that property. However, it causes deepEqual to
|
||||
// fail so we patch it back in
|
||||
schema.metadataVersion = MetadataVersion.V5
|
||||
const table = makeArrowTable(
|
||||
[],
|
||||
{ schema }
|
||||
)
|
||||
|
||||
const buf = await fromTableToBuffer(table)
|
||||
assert.isAbove(buf.byteLength, 0)
|
||||
const actual = tableFromIPC(buf)
|
||||
const actualSchema = actual.schema
|
||||
assert.deepEqual(actualSchema, schema)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -128,6 +128,11 @@ describe('LanceDB client', function () {
|
||||
assertResults(results)
|
||||
results = await table.where('id % 2 = 0').execute()
|
||||
assertResults(results)
|
||||
|
||||
// Should reject a bad filter
|
||||
await expect(table.filter('id % 2 = 0 AND').execute()).to.be.rejectedWith(
|
||||
/.*sql parser error: Expected an expression:, found: EOF.*/
|
||||
)
|
||||
})
|
||||
|
||||
it('uses a filter / where clause', async function () {
|
||||
@@ -283,7 +288,8 @@ describe('LanceDB client', function () {
|
||||
|
||||
it('create a table from an Arrow Table', async function () {
|
||||
const dir = await track().mkdir('lancejs')
|
||||
const con = await lancedb.connect(dir)
|
||||
// Also test the connect function with an object
|
||||
const con = await lancedb.connect({ uri: dir })
|
||||
|
||||
const i32s = new Int32Array(new Array<number>(10))
|
||||
const i32 = makeVector(i32s)
|
||||
@@ -745,11 +751,11 @@ describe('LanceDB client', function () {
|
||||
num_sub_vectors: 2
|
||||
})
|
||||
await expect(createIndex).to.be.rejectedWith(
|
||||
/VectorIndex requires the column data type to be fixed size list of float32s/
|
||||
"index cannot be created on the column `name` which has data type Utf8"
|
||||
)
|
||||
})
|
||||
|
||||
it('it should fail when the column is not a vector', async function () {
|
||||
it('it should fail when num_partitions is invalid', async function () {
|
||||
const uri = await createTestDB(32, 300)
|
||||
const con = await lancedb.connect(uri)
|
||||
const table = await con.openTable('vectors')
|
||||
|
||||
3
nodejs/.eslintignore
Normal file
3
nodejs/.eslintignore
Normal file
@@ -0,0 +1,3 @@
|
||||
**/dist/**/*
|
||||
**/native.js
|
||||
**/native.d.ts
|
||||
@@ -1,22 +0,0 @@
|
||||
module.exports = {
|
||||
env: {
|
||||
browser: true,
|
||||
es2021: true,
|
||||
},
|
||||
extends: [
|
||||
"eslint:recommended",
|
||||
"plugin:@typescript-eslint/recommended-type-checked",
|
||||
"plugin:@typescript-eslint/stylistic-type-checked",
|
||||
],
|
||||
overrides: [],
|
||||
parserOptions: {
|
||||
project: "./tsconfig.json",
|
||||
ecmaVersion: "latest",
|
||||
sourceType: "module",
|
||||
},
|
||||
rules: {
|
||||
"@typescript-eslint/method-signature-style": "off",
|
||||
"@typescript-eslint/no-explicit-any": "off",
|
||||
},
|
||||
ignorePatterns: ["node_modules/", "dist/", "build/", "lancedb/native.*"],
|
||||
};
|
||||
1
nodejs/.prettierignore
Symbolic link
1
nodejs/.prettierignore
Symbolic link
@@ -0,0 +1 @@
|
||||
.eslintignore
|
||||
@@ -14,12 +14,10 @@ crate-type = ["cdylib"]
|
||||
[dependencies]
|
||||
arrow-ipc.workspace = true
|
||||
futures.workspace = true
|
||||
lance-linalg.workspace = true
|
||||
lance.workspace = true
|
||||
lancedb = { path = "../rust/lancedb" }
|
||||
napi = { version = "2.15", default-features = false, features = [
|
||||
"napi7",
|
||||
"async"
|
||||
"async",
|
||||
] }
|
||||
napi-derive = "2"
|
||||
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
|
||||
It will replace the NodeJS SDK when it is ready.
|
||||
|
||||
|
||||
## Development
|
||||
|
||||
```sh
|
||||
@@ -10,9 +9,35 @@ npm run build
|
||||
npm t
|
||||
```
|
||||
|
||||
Generating docs
|
||||
### Running lint / format
|
||||
|
||||
LanceDb uses eslint for linting. VSCode does not need any plugins to use eslint. However, it
|
||||
may need some additional configuration. Make sure that eslint.experimental.useFlatConfig is
|
||||
set to true. Also, if your vscode root folder is the repo root then you will need to set
|
||||
the eslint.workingDirectories to ["nodejs"]. To manually lint your code you can run:
|
||||
|
||||
```sh
|
||||
npm run lint
|
||||
```
|
||||
|
||||
LanceDb uses prettier for formatting. If you are using VSCode you will need to install the
|
||||
"Prettier - Code formatter" extension. You should then configure it to be the default formatter
|
||||
for typescript and you should enable format on save. To manually check your code's format you
|
||||
can run:
|
||||
|
||||
```sh
|
||||
npm run chkformat
|
||||
```
|
||||
|
||||
If you need to manually format your code you can run:
|
||||
|
||||
```sh
|
||||
npx prettier --write .
|
||||
```
|
||||
|
||||
### Generating docs
|
||||
|
||||
```sh
|
||||
npm run docs
|
||||
|
||||
cd ../docs
|
||||
|
||||
@@ -12,9 +12,13 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import { makeArrowTable, toBuffer } from "../lancedb/arrow";
|
||||
import {
|
||||
Int64,
|
||||
convertToTable,
|
||||
fromTableToBuffer,
|
||||
makeArrowTable,
|
||||
makeEmptyTable,
|
||||
} from "../dist/arrow";
|
||||
import {
|
||||
Field,
|
||||
FixedSizeList,
|
||||
Float16,
|
||||
@@ -23,43 +27,137 @@ import {
|
||||
tableFromIPC,
|
||||
Schema,
|
||||
Float64,
|
||||
type Table,
|
||||
Binary,
|
||||
Bool,
|
||||
Utf8,
|
||||
Struct,
|
||||
List,
|
||||
DataType,
|
||||
Dictionary,
|
||||
Int64,
|
||||
Float,
|
||||
Precision,
|
||||
MetadataVersion,
|
||||
} from "apache-arrow";
|
||||
import {
|
||||
Dictionary as OldDictionary,
|
||||
Field as OldField,
|
||||
FixedSizeList as OldFixedSizeList,
|
||||
Float32 as OldFloat32,
|
||||
Int32 as OldInt32,
|
||||
Struct as OldStruct,
|
||||
Schema as OldSchema,
|
||||
TimestampNanosecond as OldTimestampNanosecond,
|
||||
Utf8 as OldUtf8,
|
||||
} from "apache-arrow-old";
|
||||
import { type EmbeddingFunction } from "../dist/embedding/embedding_function";
|
||||
|
||||
test("customized schema", function () {
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
function sampleRecords(): Array<Record<string, any>> {
|
||||
return [
|
||||
{
|
||||
binary: Buffer.alloc(5),
|
||||
boolean: false,
|
||||
number: 7,
|
||||
string: "hello",
|
||||
struct: { x: 0, y: 0 },
|
||||
list: ["anime", "action", "comedy"],
|
||||
},
|
||||
];
|
||||
}
|
||||
|
||||
// Helper method to verify various ways to create a table
|
||||
async function checkTableCreation(
|
||||
tableCreationMethod: (
|
||||
records: Record<string, unknown>[],
|
||||
recordsReversed: Record<string, unknown>[],
|
||||
schema: Schema,
|
||||
) => Promise<Table>,
|
||||
infersTypes: boolean,
|
||||
): Promise<void> {
|
||||
const records = sampleRecords();
|
||||
const recordsReversed = [
|
||||
{
|
||||
list: ["anime", "action", "comedy"],
|
||||
struct: { x: 0, y: 0 },
|
||||
string: "hello",
|
||||
number: 7,
|
||||
boolean: false,
|
||||
binary: Buffer.alloc(5),
|
||||
},
|
||||
];
|
||||
const schema = new Schema([
|
||||
new Field("a", new Int32(), true),
|
||||
new Field("b", new Float32(), true),
|
||||
new Field("binary", new Binary(), false),
|
||||
new Field("boolean", new Bool(), false),
|
||||
new Field("number", new Float64(), false),
|
||||
new Field("string", new Utf8(), false),
|
||||
new Field(
|
||||
"c",
|
||||
new FixedSizeList(3, new Field("item", new Float16())),
|
||||
true
|
||||
"struct",
|
||||
new Struct([
|
||||
new Field("x", new Float64(), false),
|
||||
new Field("y", new Float64(), false),
|
||||
]),
|
||||
),
|
||||
new Field("list", new List(new Field("item", new Utf8(), false)), false),
|
||||
]);
|
||||
|
||||
const table = await tableCreationMethod(records, recordsReversed, schema);
|
||||
schema.fields.forEach((field, idx) => {
|
||||
const actualField = table.schema.fields[idx];
|
||||
// Type inference always assumes nullable=true
|
||||
if (infersTypes) {
|
||||
expect(actualField.nullable).toBe(true);
|
||||
} else {
|
||||
expect(actualField.nullable).toBe(false);
|
||||
}
|
||||
expect(table.getChild(field.name)?.type.toString()).toEqual(
|
||||
field.type.toString(),
|
||||
);
|
||||
expect(table.getChildAt(idx)?.type.toString()).toEqual(
|
||||
field.type.toString(),
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
describe("The function makeArrowTable", function () {
|
||||
it("will use data types from a provided schema instead of inference", async function () {
|
||||
const schema = new Schema([
|
||||
new Field("a", new Int32()),
|
||||
new Field("b", new Float32()),
|
||||
new Field("c", new FixedSizeList(3, new Field("item", new Float16()))),
|
||||
new Field("d", new Int64()),
|
||||
]);
|
||||
const table = makeArrowTable(
|
||||
[
|
||||
{ a: 1, b: 2, c: [1, 2, 3] },
|
||||
{ a: 4, b: 5, c: [4, 5, 6] },
|
||||
{ a: 7, b: 8, c: [7, 8, 9] },
|
||||
{ a: 1, b: 2, c: [1, 2, 3], d: 9 },
|
||||
{ a: 4, b: 5, c: [4, 5, 6], d: 10 },
|
||||
{ a: 7, b: 8, c: [7, 8, 9], d: null },
|
||||
],
|
||||
{ schema }
|
||||
{ schema },
|
||||
);
|
||||
|
||||
expect(table.schema.toString()).toEqual(schema.toString());
|
||||
|
||||
const buf = toBuffer(table);
|
||||
const buf = await fromTableToBuffer(table);
|
||||
expect(buf.byteLength).toBeGreaterThan(0);
|
||||
|
||||
const actual = tableFromIPC(buf);
|
||||
expect(actual.numRows).toBe(3);
|
||||
const actualSchema = actual.schema;
|
||||
expect(actualSchema.toString()).toStrictEqual(schema.toString());
|
||||
});
|
||||
expect(actualSchema).toEqual(schema);
|
||||
});
|
||||
|
||||
test("default vector column", function () {
|
||||
it("will assume the column `vector` is FixedSizeList<Float32> by default", async function () {
|
||||
const schema = new Schema([
|
||||
new Field("a", new Float64(), true),
|
||||
new Field("b", new Float64(), true),
|
||||
new Field("vector", new FixedSizeList(3, new Field("item", new Float32()))),
|
||||
new Field("a", new Float(Precision.DOUBLE), true),
|
||||
new Field("b", new Float(Precision.DOUBLE), true),
|
||||
new Field(
|
||||
"vector",
|
||||
new FixedSizeList(
|
||||
3,
|
||||
new Field("item", new Float(Precision.SINGLE), true),
|
||||
),
|
||||
true,
|
||||
),
|
||||
]);
|
||||
const table = makeArrowTable([
|
||||
{ a: 1, b: 2, vector: [1, 2, 3] },
|
||||
@@ -67,21 +165,29 @@ test("default vector column", function () {
|
||||
{ a: 7, b: 8, vector: [7, 8, 9] },
|
||||
]);
|
||||
|
||||
const buf = toBuffer(table);
|
||||
const buf = await fromTableToBuffer(table);
|
||||
expect(buf.byteLength).toBeGreaterThan(0);
|
||||
|
||||
const actual = tableFromIPC(buf);
|
||||
expect(actual.numRows).toBe(3);
|
||||
const actualSchema = actual.schema;
|
||||
expect(actualSchema.toString()).toEqual(actualSchema.toString());
|
||||
});
|
||||
expect(actualSchema).toEqual(schema);
|
||||
});
|
||||
|
||||
test("2 vector columns", function () {
|
||||
it("can support multiple vector columns", async function () {
|
||||
const schema = new Schema([
|
||||
new Field("a", new Float64()),
|
||||
new Field("b", new Float64()),
|
||||
new Field("vec1", new FixedSizeList(3, new Field("item", new Float16()))),
|
||||
new Field("vec2", new FixedSizeList(3, new Field("item", new Float16()))),
|
||||
new Field("a", new Float(Precision.DOUBLE), true),
|
||||
new Field("b", new Float(Precision.DOUBLE), true),
|
||||
new Field(
|
||||
"vec1",
|
||||
new FixedSizeList(3, new Field("item", new Float16(), true)),
|
||||
true,
|
||||
),
|
||||
new Field(
|
||||
"vec2",
|
||||
new FixedSizeList(3, new Field("item", new Float16(), true)),
|
||||
true,
|
||||
),
|
||||
]);
|
||||
const table = makeArrowTable(
|
||||
[
|
||||
@@ -94,27 +200,271 @@ test("2 vector columns", function () {
|
||||
vec1: { type: new Float16() },
|
||||
vec2: { type: new Float16() },
|
||||
},
|
||||
}
|
||||
},
|
||||
);
|
||||
|
||||
const buf = toBuffer(table);
|
||||
const buf = await fromTableToBuffer(table);
|
||||
expect(buf.byteLength).toBeGreaterThan(0);
|
||||
|
||||
const actual = tableFromIPC(buf);
|
||||
expect(actual.numRows).toBe(3);
|
||||
const actualSchema = actual.schema;
|
||||
expect(actualSchema.toString()).toEqual(schema.toString());
|
||||
expect(actualSchema).toEqual(schema);
|
||||
});
|
||||
|
||||
it("will allow different vector column types", async function () {
|
||||
const table = makeArrowTable([{ fp16: [1], fp32: [1], fp64: [1] }], {
|
||||
vectorColumns: {
|
||||
fp16: { type: new Float16() },
|
||||
fp32: { type: new Float32() },
|
||||
fp64: { type: new Float64() },
|
||||
},
|
||||
});
|
||||
|
||||
expect(table.getChild("fp16")?.type.children[0].type.toString()).toEqual(
|
||||
new Float16().toString(),
|
||||
);
|
||||
expect(table.getChild("fp32")?.type.children[0].type.toString()).toEqual(
|
||||
new Float32().toString(),
|
||||
);
|
||||
expect(table.getChild("fp64")?.type.children[0].type.toString()).toEqual(
|
||||
new Float64().toString(),
|
||||
);
|
||||
});
|
||||
|
||||
it("will use dictionary encoded strings if asked", async function () {
|
||||
const table = makeArrowTable([{ str: "hello" }]);
|
||||
expect(DataType.isUtf8(table.getChild("str")?.type)).toBe(true);
|
||||
|
||||
const tableWithDict = makeArrowTable([{ str: "hello" }], {
|
||||
dictionaryEncodeStrings: true,
|
||||
});
|
||||
expect(DataType.isDictionary(tableWithDict.getChild("str")?.type)).toBe(
|
||||
true,
|
||||
);
|
||||
|
||||
const schema = new Schema([
|
||||
new Field("str", new Dictionary(new Utf8(), new Int32())),
|
||||
]);
|
||||
|
||||
const tableWithDict2 = makeArrowTable([{ str: "hello" }], { schema });
|
||||
expect(DataType.isDictionary(tableWithDict2.getChild("str")?.type)).toBe(
|
||||
true,
|
||||
);
|
||||
});
|
||||
|
||||
it("will infer data types correctly", async function () {
|
||||
await checkTableCreation(async (records) => makeArrowTable(records), true);
|
||||
});
|
||||
|
||||
it("will allow a schema to be provided", async function () {
|
||||
await checkTableCreation(
|
||||
async (records, _, schema) => makeArrowTable(records, { schema }),
|
||||
false,
|
||||
);
|
||||
});
|
||||
|
||||
it("will use the field order of any provided schema", async function () {
|
||||
await checkTableCreation(
|
||||
async (_, recordsReversed, schema) =>
|
||||
makeArrowTable(recordsReversed, { schema }),
|
||||
false,
|
||||
);
|
||||
});
|
||||
|
||||
it("will make an empty table", async function () {
|
||||
await checkTableCreation(
|
||||
async (_, __, schema) => makeArrowTable([], { schema }),
|
||||
false,
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
test("handles int64", function() {
|
||||
// https://github.com/lancedb/lancedb/issues/960
|
||||
const schema = new Schema([
|
||||
new Field("x", new Int64(), true)
|
||||
class DummyEmbedding implements EmbeddingFunction<string> {
|
||||
public readonly sourceColumn = "string";
|
||||
public readonly embeddingDimension = 2;
|
||||
public readonly embeddingDataType = new Float16();
|
||||
|
||||
async embed(data: string[]): Promise<number[][]> {
|
||||
return data.map(() => [0.0, 0.0]);
|
||||
}
|
||||
}
|
||||
|
||||
class DummyEmbeddingWithNoDimension implements EmbeddingFunction<string> {
|
||||
public readonly sourceColumn = "string";
|
||||
|
||||
async embed(data: string[]): Promise<number[][]> {
|
||||
return data.map(() => [0.0, 0.0]);
|
||||
}
|
||||
}
|
||||
|
||||
describe("convertToTable", function () {
|
||||
it("will infer data types correctly", async function () {
|
||||
await checkTableCreation(
|
||||
async (records) => await convertToTable(records),
|
||||
true,
|
||||
);
|
||||
});
|
||||
|
||||
it("will allow a schema to be provided", async function () {
|
||||
await checkTableCreation(
|
||||
async (records, _, schema) =>
|
||||
await convertToTable(records, undefined, { schema }),
|
||||
false,
|
||||
);
|
||||
});
|
||||
|
||||
it("will use the field order of any provided schema", async function () {
|
||||
await checkTableCreation(
|
||||
async (_, recordsReversed, schema) =>
|
||||
await convertToTable(recordsReversed, undefined, { schema }),
|
||||
false,
|
||||
);
|
||||
});
|
||||
|
||||
it("will make an empty table", async function () {
|
||||
await checkTableCreation(
|
||||
async (_, __, schema) => await convertToTable([], undefined, { schema }),
|
||||
false,
|
||||
);
|
||||
});
|
||||
|
||||
it("will apply embeddings", async function () {
|
||||
const records = sampleRecords();
|
||||
const table = await convertToTable(records, new DummyEmbedding());
|
||||
expect(DataType.isFixedSizeList(table.getChild("vector")?.type)).toBe(true);
|
||||
expect(table.getChild("vector")?.type.children[0].type.toString()).toEqual(
|
||||
new Float16().toString(),
|
||||
);
|
||||
});
|
||||
|
||||
it("will fail if missing the embedding source column", async function () {
|
||||
await expect(
|
||||
convertToTable([{ id: 1 }], new DummyEmbedding()),
|
||||
).rejects.toThrow("'string' was not present");
|
||||
});
|
||||
|
||||
it("use embeddingDimension if embedding missing from table", async function () {
|
||||
const schema = new Schema([new Field("string", new Utf8(), false)]);
|
||||
// Simulate getting an empty Arrow table (minus embedding) from some other source
|
||||
// In other words, we aren't starting with records
|
||||
const table = makeEmptyTable(schema);
|
||||
|
||||
// If the embedding specifies the dimension we are fine
|
||||
await fromTableToBuffer(table, new DummyEmbedding());
|
||||
|
||||
// We can also supply a schema and should be ok
|
||||
const schemaWithEmbedding = new Schema([
|
||||
new Field("string", new Utf8(), false),
|
||||
new Field(
|
||||
"vector",
|
||||
new FixedSizeList(2, new Field("item", new Float16(), false)),
|
||||
false,
|
||||
),
|
||||
]);
|
||||
const table = makeArrowTable([
|
||||
{ x: 1 },
|
||||
{ x: 2 },
|
||||
{ x: 3 }
|
||||
], { schema });
|
||||
expect(table.schema).toEqual(schema);
|
||||
})
|
||||
await fromTableToBuffer(
|
||||
table,
|
||||
new DummyEmbeddingWithNoDimension(),
|
||||
schemaWithEmbedding,
|
||||
);
|
||||
|
||||
// Otherwise we will get an error
|
||||
await expect(
|
||||
fromTableToBuffer(table, new DummyEmbeddingWithNoDimension()),
|
||||
).rejects.toThrow("does not specify `embeddingDimension`");
|
||||
});
|
||||
|
||||
it("will apply embeddings to an empty table", async function () {
|
||||
const schema = new Schema([
|
||||
new Field("string", new Utf8(), false),
|
||||
new Field(
|
||||
"vector",
|
||||
new FixedSizeList(2, new Field("item", new Float16(), false)),
|
||||
false,
|
||||
),
|
||||
]);
|
||||
const table = await convertToTable([], new DummyEmbedding(), { schema });
|
||||
expect(DataType.isFixedSizeList(table.getChild("vector")?.type)).toBe(true);
|
||||
expect(table.getChild("vector")?.type.children[0].type.toString()).toEqual(
|
||||
new Float16().toString(),
|
||||
);
|
||||
});
|
||||
|
||||
it("will complain if embeddings present but schema missing embedding column", async function () {
|
||||
const schema = new Schema([new Field("string", new Utf8(), false)]);
|
||||
await expect(
|
||||
convertToTable([], new DummyEmbedding(), { schema }),
|
||||
).rejects.toThrow("column vector was missing");
|
||||
});
|
||||
|
||||
it("will provide a nice error if run twice", async function () {
|
||||
const records = sampleRecords();
|
||||
const table = await convertToTable(records, new DummyEmbedding());
|
||||
// fromTableToBuffer will try and apply the embeddings again
|
||||
await expect(
|
||||
fromTableToBuffer(table, new DummyEmbedding()),
|
||||
).rejects.toThrow("already existed");
|
||||
});
|
||||
});
|
||||
|
||||
describe("makeEmptyTable", function () {
|
||||
it("will make an empty table", async function () {
|
||||
await checkTableCreation(
|
||||
async (_, __, schema) => makeEmptyTable(schema),
|
||||
false,
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe("when using two versions of arrow", function () {
|
||||
it("can still import data", async function () {
|
||||
const schema = new OldSchema([
|
||||
new OldField("id", new OldInt32()),
|
||||
new OldField(
|
||||
"vector",
|
||||
new OldFixedSizeList(
|
||||
1024,
|
||||
new OldField("item", new OldFloat32(), true),
|
||||
),
|
||||
),
|
||||
new OldField(
|
||||
"struct",
|
||||
new OldStruct([
|
||||
new OldField(
|
||||
"nested",
|
||||
new OldDictionary(new OldUtf8(), new OldInt32(), 1, true),
|
||||
),
|
||||
new OldField("ts_with_tz", new OldTimestampNanosecond("some_tz")),
|
||||
new OldField("ts_no_tz", new OldTimestampNanosecond(null)),
|
||||
]),
|
||||
),
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
]) as any;
|
||||
schema.metadataVersion = MetadataVersion.V5;
|
||||
const table = makeArrowTable([], { schema });
|
||||
|
||||
const buf = await fromTableToBuffer(table);
|
||||
expect(buf.byteLength).toBeGreaterThan(0);
|
||||
const actual = tableFromIPC(buf);
|
||||
const actualSchema = actual.schema;
|
||||
expect(actualSchema.fields.length).toBe(3);
|
||||
|
||||
// Deep equality gets hung up on some very minor unimportant differences
|
||||
// between arrow version 13 and 15 which isn't really what we're testing for
|
||||
// and so we do our own comparison that just checks name/type/nullability
|
||||
function compareFields(lhs: Field, rhs: Field) {
|
||||
expect(lhs.name).toEqual(rhs.name);
|
||||
expect(lhs.nullable).toEqual(rhs.nullable);
|
||||
expect(lhs.typeId).toEqual(rhs.typeId);
|
||||
if ("children" in lhs.type && lhs.type.children !== null) {
|
||||
const lhsChildren = lhs.type.children as Field[];
|
||||
lhsChildren.forEach((child: Field, idx) => {
|
||||
compareFields(child, rhs.type.children[idx]);
|
||||
});
|
||||
}
|
||||
}
|
||||
actualSchema.fields.forEach((field, idx) => {
|
||||
compareFields(field, actualSchema.fields[idx]);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
88
nodejs/__test__/connection.test.ts
Normal file
88
nodejs/__test__/connection.test.ts
Normal file
@@ -0,0 +1,88 @@
|
||||
// Copyright 2024 Lance Developers.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import * as tmp from "tmp";
|
||||
|
||||
import { Connection, connect } from "../dist/index.js";
|
||||
|
||||
describe("when connecting", () => {
|
||||
let tmpDir: tmp.DirResult;
|
||||
beforeEach(() => (tmpDir = tmp.dirSync({ unsafeCleanup: true })));
|
||||
afterEach(() => tmpDir.removeCallback());
|
||||
|
||||
it("should connect", async () => {
|
||||
const db = await connect(tmpDir.name);
|
||||
expect(db.display()).toBe(
|
||||
`NativeDatabase(uri=${tmpDir.name}, read_consistency_interval=None)`,
|
||||
);
|
||||
});
|
||||
|
||||
it("should allow read consistency interval to be specified", async () => {
|
||||
const db = await connect(tmpDir.name, { readConsistencyInterval: 5 });
|
||||
expect(db.display()).toBe(
|
||||
`NativeDatabase(uri=${tmpDir.name}, read_consistency_interval=5s)`,
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe("given a connection", () => {
|
||||
let tmpDir: tmp.DirResult;
|
||||
let db: Connection;
|
||||
beforeEach(async () => {
|
||||
tmpDir = tmp.dirSync({ unsafeCleanup: true });
|
||||
db = await connect(tmpDir.name);
|
||||
});
|
||||
afterEach(() => tmpDir.removeCallback());
|
||||
|
||||
it("should raise an error if opening a non-existent table", async () => {
|
||||
await expect(db.openTable("non-existent")).rejects.toThrow("was not found");
|
||||
});
|
||||
|
||||
it("should raise an error if any operation is tried after it is closed", async () => {
|
||||
expect(db.isOpen()).toBe(true);
|
||||
await db.close();
|
||||
expect(db.isOpen()).toBe(false);
|
||||
await expect(db.tableNames()).rejects.toThrow("Connection is closed");
|
||||
});
|
||||
|
||||
it("should fail if creating table twice, unless overwrite is true", async () => {
|
||||
let tbl = await db.createTable("test", [{ id: 1 }, { id: 2 }]);
|
||||
await expect(tbl.countRows()).resolves.toBe(2);
|
||||
await expect(
|
||||
db.createTable("test", [{ id: 1 }, { id: 2 }]),
|
||||
).rejects.toThrow();
|
||||
tbl = await db.createTable("test", [{ id: 3 }], { mode: "overwrite" });
|
||||
await expect(tbl.countRows()).resolves.toBe(1);
|
||||
});
|
||||
|
||||
it("should respect limit and page token when listing tables", async () => {
|
||||
const db = await connect(tmpDir.name);
|
||||
|
||||
await db.createTable("b", [{ id: 1 }]);
|
||||
await db.createTable("a", [{ id: 1 }]);
|
||||
await db.createTable("c", [{ id: 1 }]);
|
||||
|
||||
let tables = await db.tableNames();
|
||||
expect(tables).toEqual(["a", "b", "c"]);
|
||||
|
||||
tables = await db.tableNames({ limit: 1 });
|
||||
expect(tables).toEqual(["a"]);
|
||||
|
||||
tables = await db.tableNames({ limit: 1, startAfter: "a" });
|
||||
expect(tables).toEqual(["b"]);
|
||||
|
||||
tables = await db.tableNames({ startAfter: "a" });
|
||||
expect(tables).toEqual(["b", "c"]);
|
||||
});
|
||||
});
|
||||
@@ -1,34 +0,0 @@
|
||||
// Copyright 2024 Lance Developers.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import * as os from "os";
|
||||
import * as path from "path";
|
||||
import * as fs from "fs";
|
||||
|
||||
import { Schema, Field, Float64 } from "apache-arrow";
|
||||
import { connect } from "../dist/index.js";
|
||||
|
||||
test("open database", async () => {
|
||||
const tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), "test-open"));
|
||||
|
||||
const db = await connect(tmpDir);
|
||||
let tableNames = await db.tableNames();
|
||||
expect(tableNames).toStrictEqual([]);
|
||||
|
||||
const tbl = await db.createTable("test", [{ id: 1 }, { id: 2 }]);
|
||||
expect(await db.tableNames()).toStrictEqual(["test"]);
|
||||
|
||||
const schema = await tbl.schema();
|
||||
expect(schema).toEqual(new Schema([new Field("id", new Float64(), true)]));
|
||||
});
|
||||
@@ -12,27 +12,91 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import * as os from "os";
|
||||
import * as path from "path";
|
||||
import * as fs from "fs";
|
||||
import * as path from "path";
|
||||
import * as tmp from "tmp";
|
||||
|
||||
import { connect } from "../dist";
|
||||
import { Schema, Field, Float32, Int32, FixedSizeList, Int64, Float64 } from "apache-arrow";
|
||||
import { Table, connect } from "../dist";
|
||||
import {
|
||||
Schema,
|
||||
Field,
|
||||
Float32,
|
||||
Int32,
|
||||
FixedSizeList,
|
||||
Int64,
|
||||
Float64,
|
||||
} from "apache-arrow";
|
||||
import { makeArrowTable } from "../dist/arrow";
|
||||
import { Index } from "../dist/indices";
|
||||
|
||||
describe("Test creating index", () => {
|
||||
let tmpDir: string;
|
||||
describe("Given a table", () => {
|
||||
let tmpDir: tmp.DirResult;
|
||||
let table: Table;
|
||||
const schema = new Schema([new Field("id", new Float64(), true)]);
|
||||
beforeEach(async () => {
|
||||
tmpDir = tmp.dirSync({ unsafeCleanup: true });
|
||||
const conn = await connect(tmpDir.name);
|
||||
table = await conn.createEmptyTable("some_table", schema);
|
||||
});
|
||||
afterEach(() => tmpDir.removeCallback());
|
||||
|
||||
it("be displayable", async () => {
|
||||
expect(table.display()).toMatch(
|
||||
/NativeTable\(some_table, uri=.*, read_consistency_interval=None\)/,
|
||||
);
|
||||
table.close();
|
||||
expect(table.display()).toBe("ClosedTable(some_table)");
|
||||
});
|
||||
|
||||
it("should let me add data", async () => {
|
||||
await table.add([{ id: 1 }, { id: 2 }]);
|
||||
await table.add([{ id: 1 }]);
|
||||
await expect(table.countRows()).resolves.toBe(3);
|
||||
});
|
||||
|
||||
it("should overwrite data if asked", async () => {
|
||||
await table.add([{ id: 1 }, { id: 2 }]);
|
||||
await table.add([{ id: 1 }], { mode: "overwrite" });
|
||||
await expect(table.countRows()).resolves.toBe(1);
|
||||
});
|
||||
|
||||
it("should let me close the table", async () => {
|
||||
expect(table.isOpen()).toBe(true);
|
||||
table.close();
|
||||
expect(table.isOpen()).toBe(false);
|
||||
expect(table.countRows()).rejects.toThrow("Table some_table is closed");
|
||||
});
|
||||
|
||||
it("should let me update values", async () => {
|
||||
await table.add([{ id: 1 }]);
|
||||
expect(await table.countRows("id == 1")).toBe(1);
|
||||
expect(await table.countRows("id == 7")).toBe(0);
|
||||
await table.update({ id: "7" });
|
||||
expect(await table.countRows("id == 1")).toBe(0);
|
||||
expect(await table.countRows("id == 7")).toBe(1);
|
||||
await table.add([{ id: 2 }]);
|
||||
// Test Map as input
|
||||
await table.update(new Map(Object.entries({ id: "10" })), {
|
||||
where: "id % 2 == 0",
|
||||
});
|
||||
expect(await table.countRows("id == 2")).toBe(0);
|
||||
expect(await table.countRows("id == 7")).toBe(1);
|
||||
expect(await table.countRows("id == 10")).toBe(1);
|
||||
});
|
||||
});
|
||||
|
||||
describe("When creating an index", () => {
|
||||
let tmpDir: tmp.DirResult;
|
||||
const schema = new Schema([
|
||||
new Field("id", new Int32(), true),
|
||||
new Field("vec", new FixedSizeList(32, new Field("item", new Float32()))),
|
||||
]);
|
||||
let tbl: Table;
|
||||
let queryVec: number[];
|
||||
|
||||
beforeEach(() => {
|
||||
tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), "index-"));
|
||||
});
|
||||
|
||||
test("create vector index with no column", async () => {
|
||||
const db = await connect(tmpDir);
|
||||
beforeEach(async () => {
|
||||
tmpDir = tmp.dirSync({ unsafeCleanup: true });
|
||||
const db = await connect(tmpDir.name);
|
||||
const data = makeArrowTable(
|
||||
Array(300)
|
||||
.fill(1)
|
||||
@@ -44,57 +108,76 @@ describe("Test creating index", () => {
|
||||
})),
|
||||
{
|
||||
schema,
|
||||
}
|
||||
},
|
||||
);
|
||||
const tbl = await db.createTable("test", data);
|
||||
await tbl.createIndex().build();
|
||||
queryVec = data.toArray()[5].vec.toJSON();
|
||||
tbl = await db.createTable("test", data);
|
||||
});
|
||||
afterEach(() => tmpDir.removeCallback());
|
||||
|
||||
it("should create a vector index on vector columns", async () => {
|
||||
await tbl.createIndex("vec");
|
||||
|
||||
// check index directory
|
||||
const indexDir = path.join(tmpDir, "test.lance", "_indices");
|
||||
const indexDir = path.join(tmpDir.name, "test.lance", "_indices");
|
||||
expect(fs.readdirSync(indexDir)).toHaveLength(1);
|
||||
// TODO: check index type.
|
||||
const indices = await tbl.listIndices();
|
||||
expect(indices.length).toBe(1);
|
||||
expect(indices[0]).toEqual({
|
||||
indexType: "IvfPq",
|
||||
columns: ["vec"],
|
||||
});
|
||||
|
||||
// Search without specifying the column
|
||||
let query_vector = data.toArray()[5].vec.toJSON();
|
||||
let rst = await tbl.query().nearestTo(query_vector).limit(2).toArrow();
|
||||
const rst = await tbl.query().nearestTo(queryVec).limit(2).toArrow();
|
||||
expect(rst.numRows).toBe(2);
|
||||
|
||||
// Search with specifying the column
|
||||
let rst2 = await tbl.search(query_vector, "vec").limit(2).toArrow();
|
||||
const rst2 = await tbl.search(queryVec, "vec").limit(2).toArrow();
|
||||
expect(rst2.numRows).toBe(2);
|
||||
expect(rst.toString()).toEqual(rst2.toString());
|
||||
});
|
||||
|
||||
test("no vector column available", async () => {
|
||||
const db = await connect(tmpDir);
|
||||
const tbl = await db.createTable(
|
||||
"no_vec",
|
||||
makeArrowTable([
|
||||
{ id: 1, val: 2 },
|
||||
{ id: 2, val: 3 },
|
||||
])
|
||||
);
|
||||
await expect(tbl.createIndex().build()).rejects.toThrow(
|
||||
"No vector column found"
|
||||
);
|
||||
it("should allow parameters to be specified", async () => {
|
||||
await tbl.createIndex("vec", {
|
||||
config: Index.ivfPq({
|
||||
numPartitions: 10,
|
||||
}),
|
||||
});
|
||||
|
||||
await tbl.createIndex("val").build();
|
||||
const indexDir = path.join(tmpDir, "no_vec.lance", "_indices");
|
||||
// TODO: Verify parameters when we can load index config as part of list indices
|
||||
});
|
||||
|
||||
it("should allow me to replace (or not) an existing index", async () => {
|
||||
await tbl.createIndex("id");
|
||||
// Default is replace=true
|
||||
await tbl.createIndex("id");
|
||||
await expect(tbl.createIndex("id", { replace: false })).rejects.toThrow(
|
||||
"already exists",
|
||||
);
|
||||
await tbl.createIndex("id", { replace: true });
|
||||
});
|
||||
|
||||
test("should create a scalar index on scalar columns", async () => {
|
||||
await tbl.createIndex("id");
|
||||
const indexDir = path.join(tmpDir.name, "test.lance", "_indices");
|
||||
expect(fs.readdirSync(indexDir)).toHaveLength(1);
|
||||
|
||||
for await (const r of tbl.query().filter("id > 1").select(["id"])) {
|
||||
expect(r.numRows).toBe(1);
|
||||
expect(r.numRows).toBe(298);
|
||||
}
|
||||
});
|
||||
|
||||
// TODO: Move this test to the query API test (making sure we can reject queries
|
||||
// when the dimension is incorrect)
|
||||
test("two columns with different dimensions", async () => {
|
||||
const db = await connect(tmpDir);
|
||||
const db = await connect(tmpDir.name);
|
||||
const schema = new Schema([
|
||||
new Field("id", new Int32(), true),
|
||||
new Field("vec", new FixedSizeList(32, new Field("item", new Float32()))),
|
||||
new Field(
|
||||
"vec2",
|
||||
new FixedSizeList(64, new Field("item", new Float32()))
|
||||
new FixedSizeList(64, new Field("item", new Float32())),
|
||||
),
|
||||
]);
|
||||
const tbl = await db.createTable(
|
||||
@@ -111,25 +194,21 @@ describe("Test creating index", () => {
|
||||
.fill(1)
|
||||
.map(() => Math.random()),
|
||||
})),
|
||||
{ schema }
|
||||
)
|
||||
{ schema },
|
||||
),
|
||||
);
|
||||
|
||||
// Only build index over v1
|
||||
await expect(tbl.createIndex().build()).rejects.toThrow(
|
||||
/.*More than one vector columns found.*/
|
||||
);
|
||||
tbl
|
||||
.createIndex("vec")
|
||||
.ivf_pq({ num_partitions: 2, num_sub_vectors: 2 })
|
||||
.build();
|
||||
await tbl.createIndex("vec", {
|
||||
config: Index.ivfPq({ numPartitions: 2, numSubVectors: 2 }),
|
||||
});
|
||||
|
||||
const rst = await tbl
|
||||
.query()
|
||||
.nearestTo(
|
||||
Array(32)
|
||||
.fill(1)
|
||||
.map(() => Math.random())
|
||||
.map(() => Math.random()),
|
||||
)
|
||||
.limit(2)
|
||||
.toArrow();
|
||||
@@ -142,141 +221,181 @@ describe("Test creating index", () => {
|
||||
Array(64)
|
||||
.fill(1)
|
||||
.map(() => Math.random()),
|
||||
"vec"
|
||||
"vec",
|
||||
)
|
||||
.limit(2)
|
||||
.toArrow()
|
||||
.toArrow(),
|
||||
).rejects.toThrow(/.*does not match the dimension.*/);
|
||||
|
||||
const query64 = Array(64)
|
||||
.fill(1)
|
||||
.map(() => Math.random());
|
||||
const rst64_1 = await tbl.query().nearestTo(query64).limit(2).toArrow();
|
||||
const rst64_2 = await tbl.search(query64, "vec2").limit(2).toArrow();
|
||||
expect(rst64_1.toString()).toEqual(rst64_2.toString());
|
||||
expect(rst64_1.numRows).toBe(2);
|
||||
});
|
||||
|
||||
test("create scalar index", async () => {
|
||||
const db = await connect(tmpDir);
|
||||
const data = makeArrowTable(
|
||||
Array(300)
|
||||
.fill(1)
|
||||
.map((_, i) => ({
|
||||
id: i,
|
||||
vec: Array(32)
|
||||
.fill(1)
|
||||
.map(() => Math.random()),
|
||||
})),
|
||||
{
|
||||
schema,
|
||||
}
|
||||
);
|
||||
const tbl = await db.createTable("test", data);
|
||||
await tbl.createIndex("id").build();
|
||||
|
||||
// check index directory
|
||||
const indexDir = path.join(tmpDir, "test.lance", "_indices");
|
||||
expect(fs.readdirSync(indexDir)).toHaveLength(1);
|
||||
// TODO: check index type.
|
||||
const rst64Query = await tbl.query().nearestTo(query64).limit(2).toArrow();
|
||||
const rst64Search = await tbl.search(query64, "vec2").limit(2).toArrow();
|
||||
expect(rst64Query.toString()).toEqual(rst64Search.toString());
|
||||
expect(rst64Query.numRows).toBe(2);
|
||||
});
|
||||
});
|
||||
|
||||
describe("Read consistency interval", () => {
|
||||
let tmpDir: string;
|
||||
let tmpDir: tmp.DirResult;
|
||||
beforeEach(() => {
|
||||
tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), "read-consistency-"));
|
||||
tmpDir = tmp.dirSync({ unsafeCleanup: true });
|
||||
});
|
||||
afterEach(() => tmpDir.removeCallback());
|
||||
|
||||
// const intervals = [undefined, 0, 0.1];
|
||||
const intervals = [0];
|
||||
test.each(intervals)("read consistency interval %p", async (interval) => {
|
||||
const db = await connect({ uri: tmpDir });
|
||||
const db = await connect(tmpDir.name);
|
||||
const table = await db.createTable("my_table", [{ id: 1 }]);
|
||||
|
||||
const db2 = await connect({ uri: tmpDir, readConsistencyInterval: interval });
|
||||
const db2 = await connect(tmpDir.name, {
|
||||
readConsistencyInterval: interval,
|
||||
});
|
||||
const table2 = await db2.openTable("my_table");
|
||||
expect(await table2.countRows()).toEqual(await table.countRows());
|
||||
|
||||
await table.add([{ id: 2 }]);
|
||||
|
||||
if (interval === undefined) {
|
||||
expect(await table2.countRows()).toEqual(1n);
|
||||
expect(await table2.countRows()).toEqual(1);
|
||||
// TODO: once we implement time travel we can uncomment this part of the test.
|
||||
// await table2.checkout_latest();
|
||||
// expect(await table2.countRows()).toEqual(2);
|
||||
} else if (interval === 0) {
|
||||
expect(await table2.countRows()).toEqual(2n);
|
||||
expect(await table2.countRows()).toEqual(2);
|
||||
} else {
|
||||
// interval == 0.1
|
||||
expect(await table2.countRows()).toEqual(1n);
|
||||
await new Promise(r => setTimeout(r, 100));
|
||||
expect(await table2.countRows()).toEqual(2n);
|
||||
expect(await table2.countRows()).toEqual(1);
|
||||
await new Promise((r) => setTimeout(r, 100));
|
||||
expect(await table2.countRows()).toEqual(2);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
describe('schema evolution', function () {
|
||||
let tmpDir: string;
|
||||
describe("schema evolution", function () {
|
||||
let tmpDir: tmp.DirResult;
|
||||
beforeEach(() => {
|
||||
tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), "schema-evolution-"));
|
||||
tmpDir = tmp.dirSync({ unsafeCleanup: true });
|
||||
});
|
||||
afterEach(() => {
|
||||
tmpDir.removeCallback();
|
||||
});
|
||||
|
||||
// Create a new sample table
|
||||
it('can add a new column to the schema', async function () {
|
||||
const con = await connect(tmpDir)
|
||||
const table = await con.createTable('vectors', [
|
||||
{ id: 1n, vector: [0.1, 0.2] }
|
||||
])
|
||||
it("can add a new column to the schema", async function () {
|
||||
const con = await connect(tmpDir.name);
|
||||
const table = await con.createTable("vectors", [
|
||||
{ id: 1n, vector: [0.1, 0.2] },
|
||||
]);
|
||||
|
||||
await table.addColumns([{ name: 'price', valueSql: 'cast(10.0 as float)' }])
|
||||
await table.addColumns([
|
||||
{ name: "price", valueSql: "cast(10.0 as float)" },
|
||||
]);
|
||||
|
||||
const expectedSchema = new Schema([
|
||||
new Field('id', new Int64(), true),
|
||||
new Field('vector', new FixedSizeList(2, new Field('item', new Float32(), true)), true),
|
||||
new Field('price', new Float32(), false)
|
||||
])
|
||||
expect(await table.schema()).toEqual(expectedSchema)
|
||||
new Field("id", new Int64(), true),
|
||||
new Field(
|
||||
"vector",
|
||||
new FixedSizeList(2, new Field("item", new Float32(), true)),
|
||||
true,
|
||||
),
|
||||
new Field("price", new Float32(), false),
|
||||
]);
|
||||
expect(await table.schema()).toEqual(expectedSchema);
|
||||
});
|
||||
|
||||
it('can alter the columns in the schema', async function () {
|
||||
const con = await connect(tmpDir)
|
||||
it("can alter the columns in the schema", async function () {
|
||||
const con = await connect(tmpDir.name);
|
||||
const schema = new Schema([
|
||||
new Field('id', new Int64(), true),
|
||||
new Field('vector', new FixedSizeList(2, new Field('item', new Float32(), true)), true),
|
||||
new Field('price', new Float64(), false)
|
||||
])
|
||||
const table = await con.createTable('vectors', [
|
||||
{ id: 1n, vector: [0.1, 0.2] }
|
||||
])
|
||||
new Field("id", new Int64(), true),
|
||||
new Field(
|
||||
"vector",
|
||||
new FixedSizeList(2, new Field("item", new Float32(), true)),
|
||||
true,
|
||||
),
|
||||
new Field("price", new Float64(), false),
|
||||
]);
|
||||
const table = await con.createTable("vectors", [
|
||||
{ id: 1n, vector: [0.1, 0.2] },
|
||||
]);
|
||||
// Can create a non-nullable column only through addColumns at the moment.
|
||||
await table.addColumns([{ name: 'price', valueSql: 'cast(10.0 as double)' }])
|
||||
expect(await table.schema()).toEqual(schema)
|
||||
await table.addColumns([
|
||||
{ name: "price", valueSql: "cast(10.0 as double)" },
|
||||
]);
|
||||
expect(await table.schema()).toEqual(schema);
|
||||
|
||||
await table.alterColumns([
|
||||
{ path: 'id', rename: 'new_id' },
|
||||
{ path: 'price', nullable: true }
|
||||
])
|
||||
{ path: "id", rename: "new_id" },
|
||||
{ path: "price", nullable: true },
|
||||
]);
|
||||
|
||||
const expectedSchema = new Schema([
|
||||
new Field('new_id', new Int64(), true),
|
||||
new Field('vector', new FixedSizeList(2, new Field('item', new Float32(), true)), true),
|
||||
new Field('price', new Float64(), true)
|
||||
])
|
||||
expect(await table.schema()).toEqual(expectedSchema)
|
||||
new Field("new_id", new Int64(), true),
|
||||
new Field(
|
||||
"vector",
|
||||
new FixedSizeList(2, new Field("item", new Float32(), true)),
|
||||
true,
|
||||
),
|
||||
new Field("price", new Float64(), true),
|
||||
]);
|
||||
expect(await table.schema()).toEqual(expectedSchema);
|
||||
});
|
||||
|
||||
it('can drop a column from the schema', async function () {
|
||||
const con = await connect(tmpDir)
|
||||
const table = await con.createTable('vectors', [
|
||||
{ id: 1n, vector: [0.1, 0.2] }
|
||||
])
|
||||
await table.dropColumns(['vector'])
|
||||
it("can drop a column from the schema", async function () {
|
||||
const con = await connect(tmpDir.name);
|
||||
const table = await con.createTable("vectors", [
|
||||
{ id: 1n, vector: [0.1, 0.2] },
|
||||
]);
|
||||
await table.dropColumns(["vector"]);
|
||||
|
||||
const expectedSchema = new Schema([
|
||||
new Field('id', new Int64(), true)
|
||||
])
|
||||
expect(await table.schema()).toEqual(expectedSchema)
|
||||
const expectedSchema = new Schema([new Field("id", new Int64(), true)]);
|
||||
expect(await table.schema()).toEqual(expectedSchema);
|
||||
});
|
||||
});
|
||||
|
||||
describe("when dealing with versioning", () => {
|
||||
let tmpDir: tmp.DirResult;
|
||||
beforeEach(() => {
|
||||
tmpDir = tmp.dirSync({ unsafeCleanup: true });
|
||||
});
|
||||
afterEach(() => {
|
||||
tmpDir.removeCallback();
|
||||
});
|
||||
|
||||
it("can travel in time", async () => {
|
||||
// Setup
|
||||
const con = await connect(tmpDir.name);
|
||||
const table = await con.createTable("vectors", [
|
||||
{ id: 1n, vector: [0.1, 0.2] },
|
||||
]);
|
||||
const version = await table.version();
|
||||
await table.add([{ id: 2n, vector: [0.1, 0.2] }]);
|
||||
expect(await table.countRows()).toBe(2);
|
||||
// Make sure we can rewind
|
||||
await table.checkout(version);
|
||||
expect(await table.countRows()).toBe(1);
|
||||
// Can't add data in time travel mode
|
||||
await expect(table.add([{ id: 3n, vector: [0.1, 0.2] }])).rejects.toThrow(
|
||||
"table cannot be modified when a specific version is checked out",
|
||||
);
|
||||
// Can go back to normal mode
|
||||
await table.checkoutLatest();
|
||||
expect(await table.countRows()).toBe(2);
|
||||
// Should be able to add data again
|
||||
await table.add([{ id: 2n, vector: [0.1, 0.2] }]);
|
||||
expect(await table.countRows()).toBe(3);
|
||||
// Now checkout and restore
|
||||
await table.checkout(version);
|
||||
await table.restore();
|
||||
expect(await table.countRows()).toBe(1);
|
||||
// Should be able to add data
|
||||
await table.add([{ id: 2n, vector: [0.1, 0.2] }]);
|
||||
expect(await table.countRows()).toBe(2);
|
||||
// Can't use restore if not checked out
|
||||
await expect(table.restore()).rejects.toThrow(
|
||||
"checkout before running restore",
|
||||
);
|
||||
});
|
||||
});
|
||||
10
nodejs/__test__/tsconfig.json
Normal file
10
nodejs/__test__/tsconfig.json
Normal file
@@ -0,0 +1,10 @@
|
||||
{
|
||||
"extends": "../tsconfig.json",
|
||||
"compilerOptions": {
|
||||
"outDir": "./dist/spec",
|
||||
"module": "commonjs",
|
||||
"target": "es2022",
|
||||
"types": ["jest", "node"]
|
||||
},
|
||||
"include": ["**/*"]
|
||||
}
|
||||
17
nodejs/eslint.config.js
Normal file
17
nodejs/eslint.config.js
Normal file
@@ -0,0 +1,17 @@
|
||||
/* eslint-disable @typescript-eslint/naming-convention */
|
||||
// @ts-check
|
||||
|
||||
const eslint = require("@eslint/js");
|
||||
const tseslint = require("typescript-eslint");
|
||||
const eslintConfigPrettier = require("eslint-config-prettier");
|
||||
|
||||
module.exports = tseslint.config(
|
||||
eslint.configs.recommended,
|
||||
eslintConfigPrettier,
|
||||
...tseslint.configs.recommended,
|
||||
{
|
||||
rules: {
|
||||
"@typescript-eslint/naming-convention": "error",
|
||||
},
|
||||
},
|
||||
);
|
||||
@@ -1,7 +1,7 @@
|
||||
/** @type {import('ts-jest').JestConfigWithTsJest} */
|
||||
module.exports = {
|
||||
preset: 'ts-jest',
|
||||
testEnvironment: 'node',
|
||||
preset: "ts-jest",
|
||||
testEnvironment: "node",
|
||||
moduleDirectories: ["node_modules", "./dist"],
|
||||
moduleFileExtensions: ["js", "ts"],
|
||||
};
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2024 Lance Developers.
|
||||
// Copyright 2023 Lance Developers.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@@ -13,23 +13,34 @@
|
||||
// limitations under the License.
|
||||
|
||||
import {
|
||||
Int64,
|
||||
Field,
|
||||
makeBuilder,
|
||||
RecordBatchFileWriter,
|
||||
Utf8,
|
||||
type Vector,
|
||||
FixedSizeList,
|
||||
Float,
|
||||
Float32,
|
||||
Schema,
|
||||
Table as ArrowTable,
|
||||
Table,
|
||||
Vector,
|
||||
vectorFromArray,
|
||||
tableToIPC,
|
||||
type Schema,
|
||||
Table as ArrowTable,
|
||||
RecordBatchStreamWriter,
|
||||
List,
|
||||
RecordBatch,
|
||||
makeData,
|
||||
Struct,
|
||||
type Float,
|
||||
DataType,
|
||||
Binary,
|
||||
Float32,
|
||||
} from "apache-arrow";
|
||||
import { type EmbeddingFunction } from "./embedding/embedding_function";
|
||||
import { sanitizeSchema } from "./sanitize";
|
||||
|
||||
/** Data type accepted by NodeJS SDK */
|
||||
export type Data = Record<string, unknown>[] | ArrowTable;
|
||||
|
||||
/*
|
||||
* Options to control how a column should be converted to a vector array
|
||||
*/
|
||||
export class VectorColumnOptions {
|
||||
/** Vector column type. */
|
||||
type: Float = new Float32();
|
||||
@@ -41,14 +52,50 @@ export class VectorColumnOptions {
|
||||
|
||||
/** Options to control the makeArrowTable call. */
|
||||
export class MakeArrowTableOptions {
|
||||
/** Provided schema. */
|
||||
/*
|
||||
* Schema of the data.
|
||||
*
|
||||
* If this is not provided then the data type will be inferred from the
|
||||
* JS type. Integer numbers will become int64, floating point numbers
|
||||
* will become float64 and arrays will become variable sized lists with
|
||||
* the data type inferred from the first element in the array.
|
||||
*
|
||||
* The schema must be specified if there are no records (e.g. to make
|
||||
* an empty table)
|
||||
*/
|
||||
schema?: Schema;
|
||||
|
||||
/** Vector columns */
|
||||
/*
|
||||
* Mapping from vector column name to expected type
|
||||
*
|
||||
* Lance expects vector columns to be fixed size list arrays (i.e. tensors)
|
||||
* However, `makeArrowTable` will not infer this by default (it creates
|
||||
* variable size list arrays). This field can be used to indicate that a column
|
||||
* should be treated as a vector column and converted to a fixed size list.
|
||||
*
|
||||
* The keys should be the names of the vector columns. The value specifies the
|
||||
* expected data type of the vector columns.
|
||||
*
|
||||
* If `schema` is provided then this field is ignored.
|
||||
*
|
||||
* By default, the column named "vector" will be assumed to be a float32
|
||||
* vector column.
|
||||
*/
|
||||
vectorColumns: Record<string, VectorColumnOptions> = {
|
||||
vector: new VectorColumnOptions(),
|
||||
};
|
||||
|
||||
/**
|
||||
* If true then string columns will be encoded with dictionary encoding
|
||||
*
|
||||
* Set this to true if your string columns tend to repeat the same values
|
||||
* often. For more precise control use the `schema` property to specify the
|
||||
* data type for individual columns.
|
||||
*
|
||||
* If `schema` is provided then this property is ignored.
|
||||
*/
|
||||
dictionaryEncodeStrings: boolean = false;
|
||||
|
||||
constructor(values?: Partial<MakeArrowTableOptions>) {
|
||||
Object.assign(this, values);
|
||||
}
|
||||
@@ -58,8 +105,30 @@ export class MakeArrowTableOptions {
|
||||
* An enhanced version of the {@link makeTable} function from Apache Arrow
|
||||
* that supports nested fields and embeddings columns.
|
||||
*
|
||||
* This function converts an array of Record<String, any> (row-major JS objects)
|
||||
* to an Arrow Table (a columnar structure)
|
||||
*
|
||||
* Note that it currently does not support nulls.
|
||||
*
|
||||
* If a schema is provided then it will be used to determine the resulting array
|
||||
* types. Fields will also be reordered to fit the order defined by the schema.
|
||||
*
|
||||
* If a schema is not provided then the types will be inferred and the field order
|
||||
* will be controlled by the order of properties in the first record. If a type
|
||||
* is inferred it will always be nullable.
|
||||
*
|
||||
* If the input is empty then a schema must be provided to create an empty table.
|
||||
*
|
||||
* When a schema is not specified then data types will be inferred. The inference
|
||||
* rules are as follows:
|
||||
*
|
||||
* - boolean => Bool
|
||||
* - number => Float64
|
||||
* - String => Utf8
|
||||
* - Buffer => Binary
|
||||
* - Record<String, any> => Struct
|
||||
* - Array<any> => List
|
||||
*
|
||||
* @param data input data
|
||||
* @param options options to control the makeArrowTable call.
|
||||
*
|
||||
@@ -82,8 +151,10 @@ export class MakeArrowTableOptions {
|
||||
* ], { schema });
|
||||
* ```
|
||||
*
|
||||
* It guesses the vector columns if the schema is not provided. For example,
|
||||
* by default it assumes that the column named `vector` is a vector column.
|
||||
* By default it assumes that the column named `vector` is a vector column
|
||||
* and it will be converted into a fixed size list array of type float32.
|
||||
* The `vectorColumns` option can be used to support other vector column
|
||||
* names and data types.
|
||||
*
|
||||
* ```ts
|
||||
*
|
||||
@@ -127,62 +198,437 @@ export class MakeArrowTableOptions {
|
||||
* ```
|
||||
*/
|
||||
export function makeArrowTable(
|
||||
data: Record<string, any>[],
|
||||
options?: Partial<MakeArrowTableOptions>
|
||||
): Table {
|
||||
if (data.length === 0) {
|
||||
throw new Error("At least one record needs to be provided");
|
||||
data: Array<Record<string, unknown>>,
|
||||
options?: Partial<MakeArrowTableOptions>,
|
||||
): ArrowTable {
|
||||
if (
|
||||
data.length === 0 &&
|
||||
(options?.schema === undefined || options?.schema === null)
|
||||
) {
|
||||
throw new Error("At least one record or a schema needs to be provided");
|
||||
}
|
||||
|
||||
const opt = new MakeArrowTableOptions(options !== undefined ? options : {});
|
||||
if (opt.schema !== undefined && opt.schema !== null) {
|
||||
opt.schema = sanitizeSchema(opt.schema);
|
||||
}
|
||||
const opt = new MakeArrowTableOptions(options ?? {});
|
||||
const columns: Record<string, Vector> = {};
|
||||
// TODO: sample dataset to find missing columns
|
||||
const columnNames = Object.keys(data[0]);
|
||||
// Prefer the field ordering of the schema, if present
|
||||
const columnNames =
|
||||
opt.schema != null ? (opt.schema.names as string[]) : Object.keys(data[0]);
|
||||
for (const colName of columnNames) {
|
||||
// eslint-disable-next-line @typescript-eslint/no-unsafe-return
|
||||
let values = data.map((datum) => datum[colName]);
|
||||
let vector: Vector;
|
||||
|
||||
if (opt.schema !== undefined) {
|
||||
// Explicit schema is provided, highest priority
|
||||
const fieldType: DataType | undefined = opt.schema.fields.filter((f) => f.name === colName)[0]?.type as DataType;
|
||||
if (fieldType instanceof Int64) {
|
||||
// wrap in BigInt to avoid bug: https://github.com/apache/arrow/issues/40051
|
||||
// eslint-disable-next-line @typescript-eslint/no-unsafe-argument
|
||||
values = values.map((v) => BigInt(v));
|
||||
if (
|
||||
data.length !== 0 &&
|
||||
!Object.prototype.hasOwnProperty.call(data[0], colName)
|
||||
) {
|
||||
// The field is present in the schema, but not in the data, skip it
|
||||
continue;
|
||||
}
|
||||
// Extract a single column from the records (transpose from row-major to col-major)
|
||||
let values = data.map((datum) => datum[colName]);
|
||||
|
||||
// By default (type === undefined) arrow will infer the type from the JS type
|
||||
let type;
|
||||
if (opt.schema !== undefined) {
|
||||
// If there is a schema provided, then use that for the type instead
|
||||
type = opt.schema?.fields.filter((f) => f.name === colName)[0]?.type;
|
||||
if (DataType.isInt(type) && type.bitWidth === 64) {
|
||||
// wrap in BigInt to avoid bug: https://github.com/apache/arrow/issues/40051
|
||||
values = values.map((v) => {
|
||||
if (v === null) {
|
||||
return v;
|
||||
}
|
||||
if (typeof v === "bigint") {
|
||||
return v;
|
||||
}
|
||||
if (typeof v === "number") {
|
||||
return BigInt(v);
|
||||
}
|
||||
throw new Error(
|
||||
`Expected BigInt or number for column ${colName}, got ${typeof v}`,
|
||||
);
|
||||
});
|
||||
}
|
||||
vector = vectorFromArray(values, fieldType);
|
||||
} else {
|
||||
// Otherwise, check to see if this column is one of the vector columns
|
||||
// defined by opt.vectorColumns and, if so, use the fixed size list type
|
||||
const vectorColumnOptions = opt.vectorColumns[colName];
|
||||
if (vectorColumnOptions !== undefined) {
|
||||
const fslType = new FixedSizeList(
|
||||
(values[0] as any[]).length,
|
||||
new Field("item", vectorColumnOptions.type, false)
|
||||
const firstNonNullValue = values.find((v) => v !== null);
|
||||
if (Array.isArray(firstNonNullValue)) {
|
||||
type = newVectorType(
|
||||
firstNonNullValue.length,
|
||||
vectorColumnOptions.type,
|
||||
);
|
||||
vector = vectorFromArray(values, fslType);
|
||||
} else {
|
||||
// Normal case
|
||||
vector = vectorFromArray(values);
|
||||
throw new Error(
|
||||
`Column ${colName} is expected to be a vector column but first non-null value is not an array. Could not determine size of vector column`,
|
||||
);
|
||||
}
|
||||
}
|
||||
columns[colName] = vector;
|
||||
}
|
||||
|
||||
return new Table(columns);
|
||||
try {
|
||||
// Convert an Array of JS values to an arrow vector
|
||||
columns[colName] = makeVector(values, type, opt.dictionaryEncodeStrings);
|
||||
} catch (error: unknown) {
|
||||
// eslint-disable-next-line @typescript-eslint/restrict-template-expressions
|
||||
throw Error(`Could not convert column "${colName}" to Arrow: ${error}`);
|
||||
}
|
||||
}
|
||||
|
||||
if (opt.schema != null) {
|
||||
// `new ArrowTable(columns)` infers a schema which may sometimes have
|
||||
// incorrect nullability (it assumes nullable=true always)
|
||||
//
|
||||
// `new ArrowTable(schema, columns)` will also fail because it will create a
|
||||
// batch with an inferred schema and then complain that the batch schema
|
||||
// does not match the provided schema.
|
||||
//
|
||||
// To work around this we first create a table with the wrong schema and
|
||||
// then patch the schema of the batches so we can use
|
||||
// `new ArrowTable(schema, batches)` which does not do any schema inference
|
||||
const firstTable = new ArrowTable(columns);
|
||||
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
|
||||
const batchesFixed = firstTable.batches.map(
|
||||
(batch) => new RecordBatch(opt.schema!, batch.data),
|
||||
);
|
||||
return new ArrowTable(opt.schema, batchesFixed);
|
||||
} else {
|
||||
return new ArrowTable(columns);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert an Arrow Table to a Buffer.
|
||||
*
|
||||
* @param data Arrow Table
|
||||
* @param schema Arrow Schema, optional
|
||||
* @returns Buffer node
|
||||
* Create an empty Arrow table with the provided schema
|
||||
*/
|
||||
export function toBuffer(data: Data, schema?: Schema): Buffer {
|
||||
let tbl: Table;
|
||||
if (data instanceof Table) {
|
||||
tbl = data;
|
||||
} else {
|
||||
tbl = makeArrowTable(data, { schema });
|
||||
}
|
||||
return Buffer.from(tableToIPC(tbl));
|
||||
export function makeEmptyTable(schema: Schema): ArrowTable {
|
||||
return makeArrowTable([], { schema });
|
||||
}
|
||||
|
||||
// Helper function to convert Array<Array<any>> to a variable sized list array
|
||||
// @ts-expect-error (Vector<unknown> is not assignable to Vector<any>)
|
||||
function makeListVector(lists: unknown[][]): Vector<unknown> {
|
||||
if (lists.length === 0 || lists[0].length === 0) {
|
||||
throw Error("Cannot infer list vector from empty array or empty list");
|
||||
}
|
||||
const sampleList = lists[0];
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
let inferredType: any;
|
||||
try {
|
||||
const sampleVector = makeVector(sampleList);
|
||||
inferredType = sampleVector.type;
|
||||
} catch (error: unknown) {
|
||||
// eslint-disable-next-line @typescript-eslint/restrict-template-expressions
|
||||
throw Error(`Cannot infer list vector. Cannot infer inner type: ${error}`);
|
||||
}
|
||||
|
||||
const listBuilder = makeBuilder({
|
||||
type: new List(new Field("item", inferredType, true)),
|
||||
});
|
||||
for (const list of lists) {
|
||||
listBuilder.append(list);
|
||||
}
|
||||
return listBuilder.finish().toVector();
|
||||
}
|
||||
|
||||
// Helper function to convert an Array of JS values to an Arrow Vector
|
||||
function makeVector(
|
||||
values: unknown[],
|
||||
type?: DataType,
|
||||
stringAsDictionary?: boolean,
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
): Vector<any> {
|
||||
if (type !== undefined) {
|
||||
// No need for inference, let Arrow create it
|
||||
return vectorFromArray(values, type);
|
||||
}
|
||||
if (values.length === 0) {
|
||||
throw Error(
|
||||
"makeVector requires at least one value or the type must be specfied",
|
||||
);
|
||||
}
|
||||
const sampleValue = values.find((val) => val !== null && val !== undefined);
|
||||
if (sampleValue === undefined) {
|
||||
throw Error(
|
||||
"makeVector cannot infer the type if all values are null or undefined",
|
||||
);
|
||||
}
|
||||
if (Array.isArray(sampleValue)) {
|
||||
// Default Arrow inference doesn't handle list types
|
||||
return makeListVector(values as unknown[][]);
|
||||
} else if (Buffer.isBuffer(sampleValue)) {
|
||||
// Default Arrow inference doesn't handle Buffer
|
||||
return vectorFromArray(values, new Binary());
|
||||
} else if (
|
||||
!(stringAsDictionary ?? false) &&
|
||||
(typeof sampleValue === "string" || sampleValue instanceof String)
|
||||
) {
|
||||
// If the type is string then don't use Arrow's default inference unless dictionaries are requested
|
||||
// because it will always use dictionary encoding for strings
|
||||
return vectorFromArray(values, new Utf8());
|
||||
} else {
|
||||
// Convert a JS array of values to an arrow vector
|
||||
return vectorFromArray(values);
|
||||
}
|
||||
}
|
||||
|
||||
async function applyEmbeddings<T>(
|
||||
table: ArrowTable,
|
||||
embeddings?: EmbeddingFunction<T>,
|
||||
schema?: Schema,
|
||||
): Promise<ArrowTable> {
|
||||
if (embeddings == null) {
|
||||
return table;
|
||||
}
|
||||
|
||||
if (schema !== undefined && schema !== null) {
|
||||
schema = sanitizeSchema(schema);
|
||||
}
|
||||
|
||||
// Convert from ArrowTable to Record<String, Vector>
|
||||
const colEntries = [...Array(table.numCols).keys()].map((_, idx) => {
|
||||
const name = table.schema.fields[idx].name;
|
||||
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
|
||||
const vec = table.getChildAt(idx)!;
|
||||
return [name, vec];
|
||||
});
|
||||
const newColumns = Object.fromEntries(colEntries);
|
||||
|
||||
const sourceColumn = newColumns[embeddings.sourceColumn];
|
||||
const destColumn = embeddings.destColumn ?? "vector";
|
||||
const innerDestType = embeddings.embeddingDataType ?? new Float32();
|
||||
if (sourceColumn === undefined) {
|
||||
throw new Error(
|
||||
`Cannot apply embedding function because the source column '${embeddings.sourceColumn}' was not present in the data`,
|
||||
);
|
||||
}
|
||||
|
||||
if (table.numRows === 0) {
|
||||
if (Object.prototype.hasOwnProperty.call(newColumns, destColumn)) {
|
||||
// We have an empty table and it already has the embedding column so no work needs to be done
|
||||
// Note: we don't return an error like we did below because this is a common occurrence. For example,
|
||||
// if we call convertToTable with 0 records and a schema that includes the embedding
|
||||
return table;
|
||||
}
|
||||
if (embeddings.embeddingDimension !== undefined) {
|
||||
const destType = newVectorType(
|
||||
embeddings.embeddingDimension,
|
||||
innerDestType,
|
||||
);
|
||||
newColumns[destColumn] = makeVector([], destType);
|
||||
} else if (schema != null) {
|
||||
const destField = schema.fields.find((f) => f.name === destColumn);
|
||||
if (destField != null) {
|
||||
newColumns[destColumn] = makeVector([], destField.type);
|
||||
} else {
|
||||
throw new Error(
|
||||
`Attempt to apply embeddings to an empty table failed because schema was missing embedding column '${destColumn}'`,
|
||||
);
|
||||
}
|
||||
} else {
|
||||
throw new Error(
|
||||
"Attempt to apply embeddings to an empty table when the embeddings function does not specify `embeddingDimension`",
|
||||
);
|
||||
}
|
||||
} else {
|
||||
if (Object.prototype.hasOwnProperty.call(newColumns, destColumn)) {
|
||||
throw new Error(
|
||||
`Attempt to apply embeddings to table failed because column ${destColumn} already existed`,
|
||||
);
|
||||
}
|
||||
if (table.batches.length > 1) {
|
||||
throw new Error(
|
||||
"Internal error: `makeArrowTable` unexpectedly created a table with more than one batch",
|
||||
);
|
||||
}
|
||||
const values = sourceColumn.toArray();
|
||||
const vectors = await embeddings.embed(values as T[]);
|
||||
if (vectors.length !== values.length) {
|
||||
throw new Error(
|
||||
"Embedding function did not return an embedding for each input element",
|
||||
);
|
||||
}
|
||||
const destType = newVectorType(vectors[0].length, innerDestType);
|
||||
newColumns[destColumn] = makeVector(vectors, destType);
|
||||
}
|
||||
|
||||
const newTable = new ArrowTable(newColumns);
|
||||
if (schema != null) {
|
||||
if (schema.fields.find((f) => f.name === destColumn) === undefined) {
|
||||
throw new Error(
|
||||
`When using embedding functions and specifying a schema the schema should include the embedding column but the column ${destColumn} was missing`,
|
||||
);
|
||||
}
|
||||
return alignTable(newTable, schema);
|
||||
}
|
||||
return newTable;
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert an Array of records into an Arrow Table, optionally applying an
|
||||
* embeddings function to it.
|
||||
*
|
||||
* This function calls `makeArrowTable` first to create the Arrow Table.
|
||||
* Any provided `makeTableOptions` (e.g. a schema) will be passed on to
|
||||
* that call.
|
||||
*
|
||||
* The embedding function will be passed a column of values (based on the
|
||||
* `sourceColumn` of the embedding function) and expects to receive back
|
||||
* number[][] which will be converted into a fixed size list column. By
|
||||
* default this will be a fixed size list of Float32 but that can be
|
||||
* customized by the `embeddingDataType` property of the embedding function.
|
||||
*
|
||||
* If a schema is provided in `makeTableOptions` then it should include the
|
||||
* embedding columns. If no schema is provded then embedding columns will
|
||||
* be placed at the end of the table, after all of the input columns.
|
||||
*/
|
||||
export async function convertToTable<T>(
|
||||
data: Array<Record<string, unknown>>,
|
||||
embeddings?: EmbeddingFunction<T>,
|
||||
makeTableOptions?: Partial<MakeArrowTableOptions>,
|
||||
): Promise<ArrowTable> {
|
||||
const table = makeArrowTable(data, makeTableOptions);
|
||||
return await applyEmbeddings(table, embeddings, makeTableOptions?.schema);
|
||||
}
|
||||
|
||||
// Creates the Arrow Type for a Vector column with dimension `dim`
|
||||
function newVectorType<T extends Float>(
|
||||
dim: number,
|
||||
innerType: T,
|
||||
): FixedSizeList<T> {
|
||||
// in Lance we always default to have the elements nullable, so we need to set it to true
|
||||
// otherwise we often get schema mismatches because the stored data always has schema with nullable elements
|
||||
const children = new Field<T>("item", innerType, true);
|
||||
return new FixedSizeList(dim, children);
|
||||
}
|
||||
|
||||
/**
|
||||
* Serialize an Array of records into a buffer using the Arrow IPC File serialization
|
||||
*
|
||||
* This function will call `convertToTable` and pass on `embeddings` and `schema`
|
||||
*
|
||||
* `schema` is required if data is empty
|
||||
*/
|
||||
export async function fromRecordsToBuffer<T>(
|
||||
data: Array<Record<string, unknown>>,
|
||||
embeddings?: EmbeddingFunction<T>,
|
||||
schema?: Schema,
|
||||
): Promise<Buffer> {
|
||||
if (schema !== undefined && schema !== null) {
|
||||
schema = sanitizeSchema(schema);
|
||||
}
|
||||
const table = await convertToTable(data, embeddings, { schema });
|
||||
const writer = RecordBatchFileWriter.writeAll(table);
|
||||
return Buffer.from(await writer.toUint8Array());
|
||||
}
|
||||
|
||||
/**
|
||||
* Serialize an Array of records into a buffer using the Arrow IPC Stream serialization
|
||||
*
|
||||
* This function will call `convertToTable` and pass on `embeddings` and `schema`
|
||||
*
|
||||
* `schema` is required if data is empty
|
||||
*/
|
||||
export async function fromRecordsToStreamBuffer<T>(
|
||||
data: Array<Record<string, unknown>>,
|
||||
embeddings?: EmbeddingFunction<T>,
|
||||
schema?: Schema,
|
||||
): Promise<Buffer> {
|
||||
if (schema !== undefined && schema !== null) {
|
||||
schema = sanitizeSchema(schema);
|
||||
}
|
||||
const table = await convertToTable(data, embeddings, { schema });
|
||||
const writer = RecordBatchStreamWriter.writeAll(table);
|
||||
return Buffer.from(await writer.toUint8Array());
|
||||
}
|
||||
|
||||
/**
|
||||
* Serialize an Arrow Table into a buffer using the Arrow IPC File serialization
|
||||
*
|
||||
* This function will apply `embeddings` to the table in a manner similar to
|
||||
* `convertToTable`.
|
||||
*
|
||||
* `schema` is required if the table is empty
|
||||
*/
|
||||
export async function fromTableToBuffer<T>(
|
||||
table: ArrowTable,
|
||||
embeddings?: EmbeddingFunction<T>,
|
||||
schema?: Schema,
|
||||
): Promise<Buffer> {
|
||||
if (schema !== undefined && schema !== null) {
|
||||
schema = sanitizeSchema(schema);
|
||||
}
|
||||
const tableWithEmbeddings = await applyEmbeddings(table, embeddings, schema);
|
||||
const writer = RecordBatchFileWriter.writeAll(tableWithEmbeddings);
|
||||
return Buffer.from(await writer.toUint8Array());
|
||||
}
|
||||
|
||||
export async function fromDataToBuffer<T>(
|
||||
data: Data,
|
||||
embeddings?: EmbeddingFunction<T>,
|
||||
schema?: Schema,
|
||||
): Promise<Buffer> {
|
||||
if (schema !== undefined && schema !== null) {
|
||||
schema = sanitizeSchema(schema);
|
||||
}
|
||||
if (data instanceof ArrowTable) {
|
||||
return fromTableToBuffer(data, embeddings, schema);
|
||||
} else {
|
||||
const table = await convertToTable(data);
|
||||
return fromTableToBuffer(table, embeddings, schema);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Serialize an Arrow Table into a buffer using the Arrow IPC Stream serialization
|
||||
*
|
||||
* This function will apply `embeddings` to the table in a manner similar to
|
||||
* `convertToTable`.
|
||||
*
|
||||
* `schema` is required if the table is empty
|
||||
*/
|
||||
export async function fromTableToStreamBuffer<T>(
|
||||
table: ArrowTable,
|
||||
embeddings?: EmbeddingFunction<T>,
|
||||
schema?: Schema,
|
||||
): Promise<Buffer> {
|
||||
const tableWithEmbeddings = await applyEmbeddings(table, embeddings, schema);
|
||||
const writer = RecordBatchStreamWriter.writeAll(tableWithEmbeddings);
|
||||
return Buffer.from(await writer.toUint8Array());
|
||||
}
|
||||
|
||||
function alignBatch(batch: RecordBatch, schema: Schema): RecordBatch {
|
||||
const alignedChildren = [];
|
||||
for (const field of schema.fields) {
|
||||
const indexInBatch = batch.schema.fields?.findIndex(
|
||||
(f) => f.name === field.name,
|
||||
);
|
||||
if (indexInBatch < 0) {
|
||||
throw new Error(
|
||||
`The column ${field.name} was not found in the Arrow Table`,
|
||||
);
|
||||
}
|
||||
alignedChildren.push(batch.data.children[indexInBatch]);
|
||||
}
|
||||
const newData = makeData({
|
||||
type: new Struct(schema.fields),
|
||||
length: batch.numRows,
|
||||
nullCount: batch.nullCount,
|
||||
children: alignedChildren,
|
||||
});
|
||||
return new RecordBatch(schema, newData);
|
||||
}
|
||||
|
||||
function alignTable(table: ArrowTable, schema: Schema): ArrowTable {
|
||||
const alignedBatches = table.batches.map((batch) =>
|
||||
alignBatch(batch, schema),
|
||||
);
|
||||
return new ArrowTable(schema, alignedBatches);
|
||||
}
|
||||
|
||||
// Creates an empty Arrow Table
|
||||
export function createEmptyTable(schema: Schema): ArrowTable {
|
||||
return new ArrowTable(sanitizeSchema(schema));
|
||||
}
|
||||
|
||||
@@ -12,26 +12,95 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import { toBuffer } from "./arrow";
|
||||
import { Connection as _NativeConnection } from "./native";
|
||||
import { fromTableToBuffer, makeArrowTable, makeEmptyTable } from "./arrow";
|
||||
import { Connection as LanceDbConnection } from "./native";
|
||||
import { Table } from "./table";
|
||||
import { Table as ArrowTable } from "apache-arrow";
|
||||
import { Table as ArrowTable, Schema } from "apache-arrow";
|
||||
|
||||
export interface CreateTableOptions {
|
||||
/**
|
||||
* The mode to use when creating the table.
|
||||
*
|
||||
* If this is set to "create" and the table already exists then either
|
||||
* an error will be thrown or, if existOk is true, then nothing will
|
||||
* happen. Any provided data will be ignored.
|
||||
*
|
||||
* If this is set to "overwrite" then any existing table will be replaced.
|
||||
*/
|
||||
mode: "create" | "overwrite";
|
||||
/**
|
||||
* If this is true and the table already exists and the mode is "create"
|
||||
* then no error will be raised.
|
||||
*/
|
||||
existOk: boolean;
|
||||
}
|
||||
|
||||
export interface TableNamesOptions {
|
||||
/**
|
||||
* If present, only return names that come lexicographically after the
|
||||
* supplied value.
|
||||
*
|
||||
* This can be combined with limit to implement pagination by setting this to
|
||||
* the last table name from the previous page.
|
||||
*/
|
||||
startAfter?: string;
|
||||
/** An optional limit to the number of results to return. */
|
||||
limit?: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* A LanceDB Connection that allows you to open tables and create new ones.
|
||||
*
|
||||
* Connection could be local against filesystem or remote against a server.
|
||||
*
|
||||
* A Connection is intended to be a long lived object and may hold open
|
||||
* resources such as HTTP connection pools. This is generally fine and
|
||||
* a single connection should be shared if it is going to be used many
|
||||
* times. However, if you are finished with a connection, you may call
|
||||
* close to eagerly free these resources. Any call to a Connection
|
||||
* method after it has been closed will result in an error.
|
||||
*
|
||||
* Closing a connection is optional. Connections will automatically
|
||||
* be closed when they are garbage collected.
|
||||
*
|
||||
* Any created tables are independent and will continue to work even if
|
||||
* the underlying connection has been closed.
|
||||
*/
|
||||
export class Connection {
|
||||
readonly inner: _NativeConnection;
|
||||
readonly inner: LanceDbConnection;
|
||||
|
||||
constructor(inner: _NativeConnection) {
|
||||
constructor(inner: LanceDbConnection) {
|
||||
this.inner = inner;
|
||||
}
|
||||
|
||||
/** List all the table names in this database. */
|
||||
async tableNames(): Promise<string[]> {
|
||||
return this.inner.tableNames();
|
||||
/** Return true if the connection has not been closed */
|
||||
isOpen(): boolean {
|
||||
return this.inner.isOpen();
|
||||
}
|
||||
|
||||
/** Close the connection, releasing any underlying resources.
|
||||
*
|
||||
* It is safe to call this method multiple times.
|
||||
*
|
||||
* Any attempt to use the connection after it is closed will result in an error.
|
||||
*/
|
||||
close(): void {
|
||||
this.inner.close();
|
||||
}
|
||||
|
||||
/** Return a brief description of the connection */
|
||||
display(): string {
|
||||
return this.inner.display();
|
||||
}
|
||||
|
||||
/** List all the table names in this database.
|
||||
*
|
||||
* Tables will be returned in lexicographical order.
|
||||
*
|
||||
* @param options Optional parameters to control the listing.
|
||||
*/
|
||||
async tableNames(options?: Partial<TableNamesOptions>): Promise<string[]> {
|
||||
return this.inner.tableNames(options?.startAfter, options?.limit);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -53,10 +122,48 @@ export class Connection {
|
||||
*/
|
||||
async createTable(
|
||||
name: string,
|
||||
data: Record<string, unknown>[] | ArrowTable
|
||||
data: Record<string, unknown>[] | ArrowTable,
|
||||
options?: Partial<CreateTableOptions>,
|
||||
): Promise<Table> {
|
||||
const buf = toBuffer(data);
|
||||
const innerTable = await this.inner.createTable(name, buf);
|
||||
let mode: string = options?.mode ?? "create";
|
||||
const existOk = options?.existOk ?? false;
|
||||
|
||||
if (mode === "create" && existOk) {
|
||||
mode = "exist_ok";
|
||||
}
|
||||
|
||||
let table: ArrowTable;
|
||||
if (data instanceof ArrowTable) {
|
||||
table = data;
|
||||
} else {
|
||||
table = makeArrowTable(data);
|
||||
}
|
||||
const buf = await fromTableToBuffer(table);
|
||||
const innerTable = await this.inner.createTable(name, buf, mode);
|
||||
return new Table(innerTable);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new empty Table
|
||||
*
|
||||
* @param {string} name - The name of the table.
|
||||
* @param schema - The schema of the table
|
||||
*/
|
||||
async createEmptyTable(
|
||||
name: string,
|
||||
schema: Schema,
|
||||
options?: Partial<CreateTableOptions>,
|
||||
): Promise<Table> {
|
||||
let mode: string = options?.mode ?? "create";
|
||||
const existOk = options?.existOk ?? false;
|
||||
|
||||
if (mode === "create" && existOk) {
|
||||
mode = "exist_ok";
|
||||
}
|
||||
|
||||
const table = makeEmptyTable(schema);
|
||||
const buf = await fromTableToBuffer(table);
|
||||
const innerTable = await this.inner.createEmptyTable(name, buf, mode);
|
||||
return new Table(innerTable);
|
||||
}
|
||||
|
||||
|
||||
77
nodejs/lancedb/embedding/embedding_function.ts
Normal file
77
nodejs/lancedb/embedding/embedding_function.ts
Normal file
@@ -0,0 +1,77 @@
|
||||
// Copyright 2023 Lance Developers.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import { type Float } from "apache-arrow";
|
||||
|
||||
/**
|
||||
* An embedding function that automatically creates vector representation for a given column.
|
||||
*/
|
||||
export interface EmbeddingFunction<T> {
|
||||
/**
|
||||
* The name of the column that will be used as input for the Embedding Function.
|
||||
*/
|
||||
sourceColumn: string;
|
||||
|
||||
/**
|
||||
* The data type of the embedding
|
||||
*
|
||||
* The embedding function should return `number`. This will be converted into
|
||||
* an Arrow float array. By default this will be Float32 but this property can
|
||||
* be used to control the conversion.
|
||||
*/
|
||||
embeddingDataType?: Float;
|
||||
|
||||
/**
|
||||
* The dimension of the embedding
|
||||
*
|
||||
* This is optional, normally this can be determined by looking at the results of
|
||||
* `embed`. If this is not specified, and there is an attempt to apply the embedding
|
||||
* to an empty table, then that process will fail.
|
||||
*/
|
||||
embeddingDimension?: number;
|
||||
|
||||
/**
|
||||
* The name of the column that will contain the embedding
|
||||
*
|
||||
* By default this is "vector"
|
||||
*/
|
||||
destColumn?: string;
|
||||
|
||||
/**
|
||||
* Should the source column be excluded from the resulting table
|
||||
*
|
||||
* By default the source column is included. Set this to true and
|
||||
* only the embedding will be stored.
|
||||
*/
|
||||
excludeSource?: boolean;
|
||||
|
||||
/**
|
||||
* Creates a vector representation for the given values.
|
||||
*/
|
||||
embed: (data: T[]) => Promise<number[][]>;
|
||||
}
|
||||
|
||||
export function isEmbeddingFunction<T>(
|
||||
value: unknown,
|
||||
): value is EmbeddingFunction<T> {
|
||||
if (typeof value !== "object" || value === null) {
|
||||
return false;
|
||||
}
|
||||
if (!("sourceColumn" in value) || !("embed" in value)) {
|
||||
return false;
|
||||
}
|
||||
return (
|
||||
typeof value.sourceColumn === "string" && typeof value.embed === "function"
|
||||
);
|
||||
}
|
||||
62
nodejs/lancedb/embedding/openai.ts
Normal file
62
nodejs/lancedb/embedding/openai.ts
Normal file
@@ -0,0 +1,62 @@
|
||||
// Copyright 2023 Lance Developers.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import { type EmbeddingFunction } from "./embedding_function";
|
||||
import type OpenAI from "openai";
|
||||
|
||||
export class OpenAIEmbeddingFunction implements EmbeddingFunction<string> {
|
||||
private readonly _openai: OpenAI;
|
||||
private readonly _modelName: string;
|
||||
|
||||
constructor(
|
||||
sourceColumn: string,
|
||||
openAIKey: string,
|
||||
modelName: string = "text-embedding-ada-002",
|
||||
) {
|
||||
/**
|
||||
* @type {import("openai").default}
|
||||
*/
|
||||
// eslint-disable-next-line @typescript-eslint/naming-convention
|
||||
let Openai;
|
||||
try {
|
||||
// eslint-disable-next-line @typescript-eslint/no-var-requires
|
||||
Openai = require("openai");
|
||||
} catch {
|
||||
throw new Error("please install openai@^4.24.1 using npm install openai");
|
||||
}
|
||||
|
||||
this.sourceColumn = sourceColumn;
|
||||
const configuration = {
|
||||
apiKey: openAIKey,
|
||||
};
|
||||
|
||||
this._openai = new Openai(configuration);
|
||||
this._modelName = modelName;
|
||||
}
|
||||
|
||||
async embed(data: string[]): Promise<number[][]> {
|
||||
const response = await this._openai.embeddings.create({
|
||||
model: this._modelName,
|
||||
input: data,
|
||||
});
|
||||
|
||||
const embeddings: number[][] = [];
|
||||
for (let i = 0; i < response.data.length; i++) {
|
||||
embeddings.push(response.data[i].embedding);
|
||||
}
|
||||
return embeddings;
|
||||
}
|
||||
|
||||
sourceColumn: string;
|
||||
}
|
||||
@@ -13,18 +13,14 @@
|
||||
// limitations under the License.
|
||||
|
||||
import { Connection } from "./connection";
|
||||
import { Connection as NativeConnection, ConnectionOptions } from "./native.js";
|
||||
|
||||
export {
|
||||
import {
|
||||
Connection as LanceDbConnection,
|
||||
ConnectionOptions,
|
||||
WriteOptions,
|
||||
Query,
|
||||
MetricType,
|
||||
} from "./native.js";
|
||||
export { Connection } from "./connection";
|
||||
export { Table } from "./table";
|
||||
export { Data } from "./arrow";
|
||||
export { IvfPQOptions, IndexBuilder } from "./indexer";
|
||||
|
||||
export { ConnectionOptions, WriteOptions, Query } from "./native.js";
|
||||
export { Connection, CreateTableOptions } from "./connection";
|
||||
export { Table, AddDataOptions } from "./table";
|
||||
|
||||
/**
|
||||
* Connect to a LanceDB instance at the given URI.
|
||||
@@ -39,26 +35,11 @@ export { IvfPQOptions, IndexBuilder } from "./indexer";
|
||||
*
|
||||
* @see {@link ConnectionOptions} for more details on the URI format.
|
||||
*/
|
||||
export async function connect(uri: string): Promise<Connection>;
|
||||
export async function connect(
|
||||
opts: Partial<ConnectionOptions>
|
||||
): Promise<Connection>;
|
||||
export async function connect(
|
||||
args: string | Partial<ConnectionOptions>
|
||||
uri: string,
|
||||
opts?: Partial<ConnectionOptions>,
|
||||
): Promise<Connection> {
|
||||
let opts: ConnectionOptions;
|
||||
if (typeof args === "string") {
|
||||
opts = { uri: args };
|
||||
} else {
|
||||
opts = Object.assign(
|
||||
{
|
||||
uri: "",
|
||||
apiKey: undefined,
|
||||
hostOverride: undefined,
|
||||
},
|
||||
args
|
||||
);
|
||||
}
|
||||
const nativeConn = await NativeConnection.new(opts);
|
||||
opts = opts ?? {};
|
||||
const nativeConn = await LanceDbConnection.new(uri, opts);
|
||||
return new Connection(nativeConn);
|
||||
}
|
||||
|
||||
@@ -1,102 +0,0 @@
|
||||
// Copyright 2024 Lance Developers.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import {
|
||||
MetricType,
|
||||
IndexBuilder as NativeBuilder,
|
||||
Table as NativeTable,
|
||||
} from "./native";
|
||||
|
||||
/** Options to create `IVF_PQ` index */
|
||||
export interface IvfPQOptions {
|
||||
/** Number of IVF partitions. */
|
||||
num_partitions?: number;
|
||||
|
||||
/** Number of sub-vectors in PQ coding. */
|
||||
num_sub_vectors?: number;
|
||||
|
||||
/** Number of bits used for each PQ code.
|
||||
*/
|
||||
num_bits?: number;
|
||||
|
||||
/** Metric type to calculate the distance between vectors.
|
||||
*
|
||||
* Supported metrics: `L2`, `Cosine` and `Dot`.
|
||||
*/
|
||||
metric_type?: MetricType;
|
||||
|
||||
/** Number of iterations to train K-means.
|
||||
*
|
||||
* Default is 50. The more iterations it usually yield better results,
|
||||
* but it takes longer to train.
|
||||
*/
|
||||
max_iterations?: number;
|
||||
|
||||
sample_rate?: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Building an index on LanceDB {@link Table}
|
||||
*
|
||||
* @see {@link Table.createIndex} for detailed usage.
|
||||
*/
|
||||
export class IndexBuilder {
|
||||
private inner: NativeBuilder;
|
||||
|
||||
constructor(tbl: NativeTable) {
|
||||
this.inner = tbl.createIndex();
|
||||
}
|
||||
|
||||
/** Instruct the builder to build an `IVF_PQ` index */
|
||||
ivf_pq(options?: IvfPQOptions): IndexBuilder {
|
||||
this.inner.ivfPq(
|
||||
options?.metric_type,
|
||||
options?.num_partitions,
|
||||
options?.num_sub_vectors,
|
||||
options?.num_bits,
|
||||
options?.max_iterations,
|
||||
options?.sample_rate
|
||||
);
|
||||
return this;
|
||||
}
|
||||
|
||||
/** Instruct the builder to build a Scalar index. */
|
||||
scalar(): IndexBuilder {
|
||||
this.scalar();
|
||||
return this;
|
||||
}
|
||||
|
||||
/** Set the column(s) to create index on top of. */
|
||||
column(col: string): IndexBuilder {
|
||||
this.inner.column(col);
|
||||
return this;
|
||||
}
|
||||
|
||||
/** Set to true to replace existing index. */
|
||||
replace(val: boolean): IndexBuilder {
|
||||
this.inner.replace(val);
|
||||
return this;
|
||||
}
|
||||
|
||||
/** Specify the name of the index. Optional */
|
||||
name(n: string): IndexBuilder {
|
||||
this.inner.name(n);
|
||||
return this;
|
||||
}
|
||||
|
||||
/** Building the index. */
|
||||
async build() {
|
||||
await this.inner.build();
|
||||
}
|
||||
}
|
||||
195
nodejs/lancedb/indices.ts
Normal file
195
nodejs/lancedb/indices.ts
Normal file
@@ -0,0 +1,195 @@
|
||||
// Copyright 2024 Lance Developers.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import { Index as LanceDbIndex } from "./native";
|
||||
|
||||
/**
|
||||
* Options to create an `IVF_PQ` index
|
||||
*/
|
||||
export interface IvfPqOptions {
|
||||
/** The number of IVF partitions to create.
|
||||
*
|
||||
* This value should generally scale with the number of rows in the dataset.
|
||||
* By default the number of partitions is the square root of the number of
|
||||
* rows.
|
||||
*
|
||||
* If this value is too large then the first part of the search (picking the
|
||||
* right partition) will be slow. If this value is too small then the second
|
||||
* part of the search (searching within a partition) will be slow.
|
||||
*/
|
||||
numPartitions?: number;
|
||||
|
||||
/** Number of sub-vectors of PQ.
|
||||
*
|
||||
* This value controls how much the vector is compressed during the quantization step.
|
||||
* The more sub vectors there are the less the vector is compressed. The default is
|
||||
* the dimension of the vector divided by 16. If the dimension is not evenly divisible
|
||||
* by 16 we use the dimension divded by 8.
|
||||
*
|
||||
* The above two cases are highly preferred. Having 8 or 16 values per subvector allows
|
||||
* us to use efficient SIMD instructions.
|
||||
*
|
||||
* If the dimension is not visible by 8 then we use 1 subvector. This is not ideal and
|
||||
* will likely result in poor performance.
|
||||
*/
|
||||
numSubVectors?: number;
|
||||
|
||||
/** [DistanceType] to use to build the index.
|
||||
*
|
||||
* Default value is [DistanceType::L2].
|
||||
*
|
||||
* This is used when training the index to calculate the IVF partitions
|
||||
* (vectors are grouped in partitions with similar vectors according to this
|
||||
* distance type) and to calculate a subvector's code during quantization.
|
||||
*
|
||||
* The distance type used to train an index MUST match the distance type used
|
||||
* to search the index. Failure to do so will yield inaccurate results.
|
||||
*
|
||||
* The following distance types are available:
|
||||
*
|
||||
* "l2" - Euclidean distance. This is a very common distance metric that
|
||||
* accounts for both magnitude and direction when determining the distance
|
||||
* between vectors. L2 distance has a range of [0, ∞).
|
||||
*
|
||||
* "cosine" - Cosine distance. Cosine distance is a distance metric
|
||||
* calculated from the cosine similarity between two vectors. Cosine
|
||||
* similarity is a measure of similarity between two non-zero vectors of an
|
||||
* inner product space. It is defined to equal the cosine of the angle
|
||||
* between them. Unlike L2, the cosine distance is not affected by the
|
||||
* magnitude of the vectors. Cosine distance has a range of [0, 2].
|
||||
*
|
||||
* Note: the cosine distance is undefined when one (or both) of the vectors
|
||||
* are all zeros (there is no direction). These vectors are invalid and may
|
||||
* never be returned from a vector search.
|
||||
*
|
||||
* "dot" - Dot product. Dot distance is the dot product of two vectors. Dot
|
||||
* distance has a range of (-∞, ∞). If the vectors are normalized (i.e. their
|
||||
* L2 norm is 1), then dot distance is equivalent to the cosine distance.
|
||||
*/
|
||||
distanceType?: "l2" | "cosine" | "dot";
|
||||
|
||||
/** Max iteration to train IVF kmeans.
|
||||
*
|
||||
* When training an IVF PQ index we use kmeans to calculate the partitions. This parameter
|
||||
* controls how many iterations of kmeans to run.
|
||||
*
|
||||
* Increasing this might improve the quality of the index but in most cases these extra
|
||||
* iterations have diminishing returns.
|
||||
*
|
||||
* The default value is 50.
|
||||
*/
|
||||
maxIterations?: number;
|
||||
|
||||
/** The number of vectors, per partition, to sample when training IVF kmeans.
|
||||
*
|
||||
* When an IVF PQ index is trained, we need to calculate partitions. These are groups
|
||||
* of vectors that are similar to each other. To do this we use an algorithm called kmeans.
|
||||
*
|
||||
* Running kmeans on a large dataset can be slow. To speed this up we run kmeans on a
|
||||
* random sample of the data. This parameter controls the size of the sample. The total
|
||||
* number of vectors used to train the index is `sample_rate * num_partitions`.
|
||||
*
|
||||
* Increasing this value might improve the quality of the index but in most cases the
|
||||
* default should be sufficient.
|
||||
*
|
||||
* The default value is 256.
|
||||
*/
|
||||
sampleRate?: number;
|
||||
}
|
||||
|
||||
export class Index {
|
||||
private readonly inner: LanceDbIndex;
|
||||
private constructor(inner: LanceDbIndex) {
|
||||
this.inner = inner;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create an IvfPq index
|
||||
*
|
||||
* This index stores a compressed (quantized) copy of every vector. These vectors
|
||||
* are grouped into partitions of similar vectors. Each partition keeps track of
|
||||
* a centroid which is the average value of all vectors in the group.
|
||||
*
|
||||
* During a query the centroids are compared with the query vector to find the closest
|
||||
* partitions. The compressed vectors in these partitions are then searched to find
|
||||
* the closest vectors.
|
||||
*
|
||||
* The compression scheme is called product quantization. Each vector is divided into
|
||||
* subvectors and then each subvector is quantized into a small number of bits. the
|
||||
* parameters `num_bits` and `num_subvectors` control this process, providing a tradeoff
|
||||
* between index size (and thus search speed) and index accuracy.
|
||||
*
|
||||
* The partitioning process is called IVF and the `num_partitions` parameter controls how
|
||||
* many groups to create.
|
||||
*
|
||||
* Note that training an IVF PQ index on a large dataset is a slow operation and
|
||||
* currently is also a memory intensive operation.
|
||||
*/
|
||||
static ivfPq(options?: Partial<IvfPqOptions>) {
|
||||
return new Index(
|
||||
LanceDbIndex.ivfPq(
|
||||
options?.distanceType,
|
||||
options?.numPartitions,
|
||||
options?.numSubVectors,
|
||||
options?.maxIterations,
|
||||
options?.sampleRate,
|
||||
),
|
||||
);
|
||||
}
|
||||
|
||||
/** Create a btree index
|
||||
*
|
||||
* A btree index is an index on a scalar columns. The index stores a copy of the column
|
||||
* in sorted order. A header entry is created for each block of rows (currently the
|
||||
* block size is fixed at 4096). These header entries are stored in a separate
|
||||
* cacheable structure (a btree). To search for data the header is used to determine
|
||||
* which blocks need to be read from disk.
|
||||
*
|
||||
* For example, a btree index in a table with 1Bi rows requires sizeof(Scalar) * 256Ki
|
||||
* bytes of memory and will generally need to read sizeof(Scalar) * 4096 bytes to find
|
||||
* the correct row ids.
|
||||
*
|
||||
* This index is good for scalar columns with mostly distinct values and does best when
|
||||
* the query is highly selective.
|
||||
*
|
||||
* The btree index does not currently have any parameters though parameters such as the
|
||||
* block size may be added in the future.
|
||||
*/
|
||||
static btree() {
|
||||
return new Index(LanceDbIndex.btree());
|
||||
}
|
||||
}
|
||||
|
||||
export interface IndexOptions {
|
||||
/** Advanced index configuration
|
||||
*
|
||||
* This option allows you to specify a specfic index to create and also
|
||||
* allows you to pass in configuration for training the index.
|
||||
*
|
||||
* See the static methods on Index for details on the various index types.
|
||||
*
|
||||
* If this is not supplied then column data type(s) and column statistics
|
||||
* will be used to determine the most useful kind of index to create.
|
||||
*/
|
||||
config?: Index;
|
||||
/** Whether to replace the existing index
|
||||
*
|
||||
* If this is false, and another index already exists on the same columns
|
||||
* and the same name, then an error will be returned. This is true even if
|
||||
* that index is out of date.
|
||||
*
|
||||
* The default is true
|
||||
*/
|
||||
replace?: boolean;
|
||||
}
|
||||
57
nodejs/lancedb/native.d.ts
vendored
57
nodejs/lancedb/native.d.ts
vendored
@@ -3,14 +3,17 @@
|
||||
|
||||
/* auto-generated by NAPI-RS */
|
||||
|
||||
export const enum IndexType {
|
||||
Scalar = 0,
|
||||
IvfPq = 1
|
||||
}
|
||||
export const enum MetricType {
|
||||
L2 = 0,
|
||||
Cosine = 1,
|
||||
Dot = 2
|
||||
/** A description of an index currently configured on a column */
|
||||
export interface IndexConfig {
|
||||
/** The type of the index */
|
||||
indexType: string
|
||||
/**
|
||||
* The columns in the index
|
||||
*
|
||||
* Currently this is always an array of size 1. In the future there may
|
||||
* be more columns to represent composite indices.
|
||||
*/
|
||||
columns: Array<string>
|
||||
}
|
||||
/**
|
||||
* A definition of a column alteration. The alteration changes the column at
|
||||
@@ -45,7 +48,6 @@ export interface AddColumnsSql {
|
||||
valueSql: string
|
||||
}
|
||||
export interface ConnectionOptions {
|
||||
uri: string
|
||||
apiKey?: string
|
||||
hostOverride?: string
|
||||
/**
|
||||
@@ -71,12 +73,15 @@ export const enum WriteMode {
|
||||
export interface WriteOptions {
|
||||
mode?: WriteMode
|
||||
}
|
||||
export function connect(options: ConnectionOptions): Promise<Connection>
|
||||
export function connect(uri: string, options: ConnectionOptions): Promise<Connection>
|
||||
export class Connection {
|
||||
/** Create a new Connection instance from the given URI. */
|
||||
static new(options: ConnectionOptions): Promise<Connection>
|
||||
static new(uri: string, options: ConnectionOptions): Promise<Connection>
|
||||
display(): string
|
||||
isOpen(): boolean
|
||||
close(): void
|
||||
/** List all tables in the dataset. */
|
||||
tableNames(): Promise<Array<string>>
|
||||
tableNames(startAfter?: string | undefined | null, limit?: number | undefined | null): Promise<Array<string>>
|
||||
/**
|
||||
* Create table from a Apache Arrow IPC (file) buffer.
|
||||
*
|
||||
@@ -85,18 +90,15 @@ export class Connection {
|
||||
* - buf: The buffer containing the IPC file.
|
||||
*
|
||||
*/
|
||||
createTable(name: string, buf: Buffer): Promise<Table>
|
||||
createTable(name: string, buf: Buffer, mode: string): Promise<Table>
|
||||
createEmptyTable(name: string, schemaBuf: Buffer, mode: string): Promise<Table>
|
||||
openTable(name: string): Promise<Table>
|
||||
/** Drop table with the name. Or raise an error if the table does not exist. */
|
||||
dropTable(name: string): Promise<void>
|
||||
}
|
||||
export class IndexBuilder {
|
||||
replace(v: boolean): void
|
||||
column(c: string): void
|
||||
name(name: string): void
|
||||
ivfPq(metricType?: MetricType | undefined | null, numPartitions?: number | undefined | null, numSubVectors?: number | undefined | null, numBits?: number | undefined | null, maxIterations?: number | undefined | null, sampleRate?: number | undefined | null): void
|
||||
scalar(): void
|
||||
build(): Promise<void>
|
||||
export class Index {
|
||||
static ivfPq(distanceType?: string | undefined | null, numPartitions?: number | undefined | null, numSubVectors?: number | undefined | null, maxIterations?: number | undefined | null, sampleRate?: number | undefined | null): Index
|
||||
static btree(): Index
|
||||
}
|
||||
/** Typescript-style Async Iterator over RecordBatches */
|
||||
export class RecordBatchIterator {
|
||||
@@ -114,14 +116,23 @@ export class Query {
|
||||
executeStream(): Promise<RecordBatchIterator>
|
||||
}
|
||||
export class Table {
|
||||
display(): string
|
||||
isOpen(): boolean
|
||||
close(): void
|
||||
/** Return Schema as empty Arrow IPC file. */
|
||||
schema(): Promise<Buffer>
|
||||
add(buf: Buffer): Promise<void>
|
||||
countRows(filter?: string | undefined | null): Promise<bigint>
|
||||
add(buf: Buffer, mode: string): Promise<void>
|
||||
countRows(filter?: string | undefined | null): Promise<number>
|
||||
delete(predicate: string): Promise<void>
|
||||
createIndex(): IndexBuilder
|
||||
createIndex(index: Index | undefined | null, column: string, replace?: boolean | undefined | null): Promise<void>
|
||||
update(onlyIf: string | undefined | null, columns: Array<[string, string]>): Promise<void>
|
||||
query(): Query
|
||||
addColumns(transforms: Array<AddColumnsSql>): Promise<void>
|
||||
alterColumns(alterations: Array<ColumnAlteration>): Promise<void>
|
||||
dropColumns(columns: Array<string>): Promise<void>
|
||||
version(): Promise<number>
|
||||
checkout(version: number): Promise<void>
|
||||
checkoutLatest(): Promise<void>
|
||||
restore(): Promise<void>
|
||||
listIndices(): Promise<Array<IndexConfig>>
|
||||
}
|
||||
|
||||
@@ -295,12 +295,10 @@ if (!nativeBinding) {
|
||||
throw new Error(`Failed to load native binding`)
|
||||
}
|
||||
|
||||
const { Connection, IndexType, MetricType, IndexBuilder, RecordBatchIterator, Query, Table, WriteMode, connect } = nativeBinding
|
||||
const { Connection, Index, RecordBatchIterator, Query, Table, WriteMode, connect } = nativeBinding
|
||||
|
||||
module.exports.Connection = Connection
|
||||
module.exports.IndexType = IndexType
|
||||
module.exports.MetricType = MetricType
|
||||
module.exports.IndexBuilder = IndexBuilder
|
||||
module.exports.Index = Index
|
||||
module.exports.RecordBatchIterator = RecordBatchIterator
|
||||
module.exports.Query = Query
|
||||
module.exports.Table = Table
|
||||
|
||||
@@ -20,21 +20,22 @@ import {
|
||||
} from "./native";
|
||||
|
||||
class RecordBatchIterator implements AsyncIterator<RecordBatch> {
|
||||
private promised_inner?: Promise<NativeBatchIterator>;
|
||||
private promisedInner?: Promise<NativeBatchIterator>;
|
||||
private inner?: NativeBatchIterator;
|
||||
|
||||
constructor(
|
||||
inner?: NativeBatchIterator,
|
||||
promise?: Promise<NativeBatchIterator>
|
||||
promise?: Promise<NativeBatchIterator>,
|
||||
) {
|
||||
// TODO: check promise reliably so we dont need to pass two arguments.
|
||||
this.inner = inner;
|
||||
this.promised_inner = promise;
|
||||
this.promisedInner = promise;
|
||||
}
|
||||
|
||||
async next(): Promise<IteratorResult<RecordBatch<any>, any>> {
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
async next(): Promise<IteratorResult<RecordBatch<any>>> {
|
||||
if (this.inner === undefined) {
|
||||
this.inner = await this.promised_inner;
|
||||
this.inner = await this.promisedInner;
|
||||
}
|
||||
if (this.inner === undefined) {
|
||||
throw new Error("Invalid iterator state state");
|
||||
@@ -114,8 +115,8 @@ export class Query implements AsyncIterable<RecordBatch> {
|
||||
/**
|
||||
* Set the refine factor for the query.
|
||||
*/
|
||||
refineFactor(refine_factor: number): Query {
|
||||
this.inner.refineFactor(refine_factor);
|
||||
refineFactor(refineFactor: number): Query {
|
||||
this.inner.refineFactor(refineFactor);
|
||||
return this;
|
||||
}
|
||||
|
||||
@@ -139,12 +140,13 @@ export class Query implements AsyncIterable<RecordBatch> {
|
||||
/** Returns a JSON Array of All results.
|
||||
*
|
||||
*/
|
||||
async toArray(): Promise<any[]> {
|
||||
async toArray(): Promise<unknown[]> {
|
||||
const tbl = await this.toArrow();
|
||||
// eslint-disable-next-line @typescript-eslint/no-unsafe-return
|
||||
return tbl.toArray();
|
||||
}
|
||||
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
[Symbol.asyncIterator](): AsyncIterator<RecordBatch<any>> {
|
||||
const promise = this.inner.executeStream();
|
||||
return new RecordBatchIterator(undefined, promise);
|
||||
|
||||
509
nodejs/lancedb/sanitize.ts
Normal file
509
nodejs/lancedb/sanitize.ts
Normal file
@@ -0,0 +1,509 @@
|
||||
// Copyright 2023 LanceDB Developers.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// The utilities in this file help sanitize data from the user's arrow
|
||||
// library into the types expected by vectordb's arrow library. Node
|
||||
// generally allows for mulitple versions of the same library (and sometimes
|
||||
// even multiple copies of the same version) to be installed at the same
|
||||
// time. However, arrow-js uses instanceof which expected that the input
|
||||
// comes from the exact same library instance. This is not always the case
|
||||
// and so we must sanitize the input to ensure that it is compatible.
|
||||
|
||||
import {
|
||||
Field,
|
||||
Utf8,
|
||||
FixedSizeBinary,
|
||||
FixedSizeList,
|
||||
Schema,
|
||||
List,
|
||||
Struct,
|
||||
Float,
|
||||
Bool,
|
||||
Date_,
|
||||
Decimal,
|
||||
DataType,
|
||||
Dictionary,
|
||||
Binary,
|
||||
Float32,
|
||||
Interval,
|
||||
Map_,
|
||||
Duration,
|
||||
Union,
|
||||
Time,
|
||||
Timestamp,
|
||||
Type,
|
||||
Null,
|
||||
Int,
|
||||
type Precision,
|
||||
type DateUnit,
|
||||
Int8,
|
||||
Int16,
|
||||
Int32,
|
||||
Int64,
|
||||
Uint8,
|
||||
Uint16,
|
||||
Uint32,
|
||||
Uint64,
|
||||
Float16,
|
||||
Float64,
|
||||
DateDay,
|
||||
DateMillisecond,
|
||||
DenseUnion,
|
||||
SparseUnion,
|
||||
TimeNanosecond,
|
||||
TimeMicrosecond,
|
||||
TimeMillisecond,
|
||||
TimeSecond,
|
||||
TimestampNanosecond,
|
||||
TimestampMicrosecond,
|
||||
TimestampMillisecond,
|
||||
TimestampSecond,
|
||||
IntervalDayTime,
|
||||
IntervalYearMonth,
|
||||
DurationNanosecond,
|
||||
DurationMicrosecond,
|
||||
DurationMillisecond,
|
||||
DurationSecond,
|
||||
} from "apache-arrow";
|
||||
import type { IntBitWidth, TKeys, TimeBitWidth } from "apache-arrow/type";
|
||||
|
||||
function sanitizeMetadata(
|
||||
metadataLike?: unknown,
|
||||
): Map<string, string> | undefined {
|
||||
if (metadataLike === undefined || metadataLike === null) {
|
||||
return undefined;
|
||||
}
|
||||
if (!(metadataLike instanceof Map)) {
|
||||
throw Error("Expected metadata, if present, to be a Map<string, string>");
|
||||
}
|
||||
for (const item of metadataLike) {
|
||||
if (!(typeof item[0] === "string" || !(typeof item[1] === "string"))) {
|
||||
throw Error(
|
||||
"Expected metadata, if present, to be a Map<string, string> but it had non-string keys or values",
|
||||
);
|
||||
}
|
||||
}
|
||||
return metadataLike as Map<string, string>;
|
||||
}
|
||||
|
||||
function sanitizeInt(typeLike: object) {
|
||||
if (
|
||||
!("bitWidth" in typeLike) ||
|
||||
typeof typeLike.bitWidth !== "number" ||
|
||||
!("isSigned" in typeLike) ||
|
||||
typeof typeLike.isSigned !== "boolean"
|
||||
) {
|
||||
throw Error(
|
||||
"Expected an Int Type to have a `bitWidth` and `isSigned` property",
|
||||
);
|
||||
}
|
||||
return new Int(typeLike.isSigned, typeLike.bitWidth as IntBitWidth);
|
||||
}
|
||||
|
||||
function sanitizeFloat(typeLike: object) {
|
||||
if (!("precision" in typeLike) || typeof typeLike.precision !== "number") {
|
||||
throw Error("Expected a Float Type to have a `precision` property");
|
||||
}
|
||||
return new Float(typeLike.precision as Precision);
|
||||
}
|
||||
|
||||
function sanitizeDecimal(typeLike: object) {
|
||||
if (
|
||||
!("scale" in typeLike) ||
|
||||
typeof typeLike.scale !== "number" ||
|
||||
!("precision" in typeLike) ||
|
||||
typeof typeLike.precision !== "number" ||
|
||||
!("bitWidth" in typeLike) ||
|
||||
typeof typeLike.bitWidth !== "number"
|
||||
) {
|
||||
throw Error(
|
||||
"Expected a Decimal Type to have `scale`, `precision`, and `bitWidth` properties",
|
||||
);
|
||||
}
|
||||
return new Decimal(typeLike.scale, typeLike.precision, typeLike.bitWidth);
|
||||
}
|
||||
|
||||
function sanitizeDate(typeLike: object) {
|
||||
if (!("unit" in typeLike) || typeof typeLike.unit !== "number") {
|
||||
throw Error("Expected a Date type to have a `unit` property");
|
||||
}
|
||||
return new Date_(typeLike.unit as DateUnit);
|
||||
}
|
||||
|
||||
function sanitizeTime(typeLike: object) {
|
||||
if (
|
||||
!("unit" in typeLike) ||
|
||||
typeof typeLike.unit !== "number" ||
|
||||
!("bitWidth" in typeLike) ||
|
||||
typeof typeLike.bitWidth !== "number"
|
||||
) {
|
||||
throw Error(
|
||||
"Expected a Time type to have `unit` and `bitWidth` properties",
|
||||
);
|
||||
}
|
||||
return new Time(typeLike.unit, typeLike.bitWidth as TimeBitWidth);
|
||||
}
|
||||
|
||||
function sanitizeTimestamp(typeLike: object) {
|
||||
if (!("unit" in typeLike) || typeof typeLike.unit !== "number") {
|
||||
throw Error("Expected a Timestamp type to have a `unit` property");
|
||||
}
|
||||
let timezone = null;
|
||||
if ("timezone" in typeLike && typeof typeLike.timezone === "string") {
|
||||
timezone = typeLike.timezone;
|
||||
}
|
||||
return new Timestamp(typeLike.unit, timezone);
|
||||
}
|
||||
|
||||
function sanitizeTypedTimestamp(
|
||||
typeLike: object,
|
||||
// eslint-disable-next-line @typescript-eslint/naming-convention
|
||||
Datatype:
|
||||
| typeof TimestampNanosecond
|
||||
| typeof TimestampMicrosecond
|
||||
| typeof TimestampMillisecond
|
||||
| typeof TimestampSecond,
|
||||
) {
|
||||
let timezone = null;
|
||||
if ("timezone" in typeLike && typeof typeLike.timezone === "string") {
|
||||
timezone = typeLike.timezone;
|
||||
}
|
||||
return new Datatype(timezone);
|
||||
}
|
||||
|
||||
function sanitizeInterval(typeLike: object) {
|
||||
if (!("unit" in typeLike) || typeof typeLike.unit !== "number") {
|
||||
throw Error("Expected an Interval type to have a `unit` property");
|
||||
}
|
||||
return new Interval(typeLike.unit);
|
||||
}
|
||||
|
||||
function sanitizeList(typeLike: object) {
|
||||
if (!("children" in typeLike) || !Array.isArray(typeLike.children)) {
|
||||
throw Error(
|
||||
"Expected a List type to have an array-like `children` property",
|
||||
);
|
||||
}
|
||||
if (typeLike.children.length !== 1) {
|
||||
throw Error("Expected a List type to have exactly one child");
|
||||
}
|
||||
return new List(sanitizeField(typeLike.children[0]));
|
||||
}
|
||||
|
||||
function sanitizeStruct(typeLike: object) {
|
||||
if (!("children" in typeLike) || !Array.isArray(typeLike.children)) {
|
||||
throw Error(
|
||||
"Expected a Struct type to have an array-like `children` property",
|
||||
);
|
||||
}
|
||||
return new Struct(typeLike.children.map((child) => sanitizeField(child)));
|
||||
}
|
||||
|
||||
function sanitizeUnion(typeLike: object) {
|
||||
if (
|
||||
!("typeIds" in typeLike) ||
|
||||
!("mode" in typeLike) ||
|
||||
typeof typeLike.mode !== "number"
|
||||
) {
|
||||
throw Error(
|
||||
"Expected a Union type to have `typeIds` and `mode` properties",
|
||||
);
|
||||
}
|
||||
if (!("children" in typeLike) || !Array.isArray(typeLike.children)) {
|
||||
throw Error(
|
||||
"Expected a Union type to have an array-like `children` property",
|
||||
);
|
||||
}
|
||||
|
||||
return new Union(
|
||||
typeLike.mode,
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
typeLike.typeIds as any,
|
||||
typeLike.children.map((child) => sanitizeField(child)),
|
||||
);
|
||||
}
|
||||
|
||||
function sanitizeTypedUnion(
|
||||
typeLike: object,
|
||||
// eslint-disable-next-line @typescript-eslint/naming-convention
|
||||
UnionType: typeof DenseUnion | typeof SparseUnion,
|
||||
) {
|
||||
if (!("typeIds" in typeLike)) {
|
||||
throw Error(
|
||||
"Expected a DenseUnion/SparseUnion type to have a `typeIds` property",
|
||||
);
|
||||
}
|
||||
if (!("children" in typeLike) || !Array.isArray(typeLike.children)) {
|
||||
throw Error(
|
||||
"Expected a DenseUnion/SparseUnion type to have an array-like `children` property",
|
||||
);
|
||||
}
|
||||
|
||||
return new UnionType(
|
||||
typeLike.typeIds as Int32Array | number[],
|
||||
typeLike.children.map((child) => sanitizeField(child)),
|
||||
);
|
||||
}
|
||||
|
||||
function sanitizeFixedSizeBinary(typeLike: object) {
|
||||
if (!("byteWidth" in typeLike) || typeof typeLike.byteWidth !== "number") {
|
||||
throw Error(
|
||||
"Expected a FixedSizeBinary type to have a `byteWidth` property",
|
||||
);
|
||||
}
|
||||
return new FixedSizeBinary(typeLike.byteWidth);
|
||||
}
|
||||
|
||||
function sanitizeFixedSizeList(typeLike: object) {
|
||||
if (!("listSize" in typeLike) || typeof typeLike.listSize !== "number") {
|
||||
throw Error("Expected a FixedSizeList type to have a `listSize` property");
|
||||
}
|
||||
if (!("children" in typeLike) || !Array.isArray(typeLike.children)) {
|
||||
throw Error(
|
||||
"Expected a FixedSizeList type to have an array-like `children` property",
|
||||
);
|
||||
}
|
||||
if (typeLike.children.length !== 1) {
|
||||
throw Error("Expected a FixedSizeList type to have exactly one child");
|
||||
}
|
||||
return new FixedSizeList(
|
||||
typeLike.listSize,
|
||||
sanitizeField(typeLike.children[0]),
|
||||
);
|
||||
}
|
||||
|
||||
function sanitizeMap(typeLike: object) {
|
||||
if (!("children" in typeLike) || !Array.isArray(typeLike.children)) {
|
||||
throw Error(
|
||||
"Expected a Map type to have an array-like `children` property",
|
||||
);
|
||||
}
|
||||
if (!("keysSorted" in typeLike) || typeof typeLike.keysSorted !== "boolean") {
|
||||
throw Error("Expected a Map type to have a `keysSorted` property");
|
||||
}
|
||||
|
||||
return new Map_(
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
typeLike.children.map((field) => sanitizeField(field)) as any,
|
||||
typeLike.keysSorted,
|
||||
);
|
||||
}
|
||||
|
||||
function sanitizeDuration(typeLike: object) {
|
||||
if (!("unit" in typeLike) || typeof typeLike.unit !== "number") {
|
||||
throw Error("Expected a Duration type to have a `unit` property");
|
||||
}
|
||||
return new Duration(typeLike.unit);
|
||||
}
|
||||
|
||||
function sanitizeDictionary(typeLike: object) {
|
||||
if (!("id" in typeLike) || typeof typeLike.id !== "number") {
|
||||
throw Error("Expected a Dictionary type to have an `id` property");
|
||||
}
|
||||
if (!("indices" in typeLike) || typeof typeLike.indices !== "object") {
|
||||
throw Error("Expected a Dictionary type to have an `indices` property");
|
||||
}
|
||||
if (!("dictionary" in typeLike) || typeof typeLike.dictionary !== "object") {
|
||||
throw Error("Expected a Dictionary type to have an `dictionary` property");
|
||||
}
|
||||
if (!("isOrdered" in typeLike) || typeof typeLike.isOrdered !== "boolean") {
|
||||
throw Error("Expected a Dictionary type to have an `isOrdered` property");
|
||||
}
|
||||
return new Dictionary(
|
||||
sanitizeType(typeLike.dictionary),
|
||||
sanitizeType(typeLike.indices) as TKeys,
|
||||
typeLike.id,
|
||||
typeLike.isOrdered,
|
||||
);
|
||||
}
|
||||
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
function sanitizeType(typeLike: unknown): DataType<any> {
|
||||
if (typeof typeLike !== "object" || typeLike === null) {
|
||||
throw Error("Expected a Type but object was null/undefined");
|
||||
}
|
||||
if (!("typeId" in typeLike) || !(typeof typeLike.typeId !== "function")) {
|
||||
throw Error("Expected a Type to have a typeId function");
|
||||
}
|
||||
let typeId: Type;
|
||||
if (typeof typeLike.typeId === "function") {
|
||||
typeId = (typeLike.typeId as () => unknown)() as Type;
|
||||
} else if (typeof typeLike.typeId === "number") {
|
||||
typeId = typeLike.typeId as Type;
|
||||
} else {
|
||||
throw Error("Type's typeId property was not a function or number");
|
||||
}
|
||||
|
||||
switch (typeId) {
|
||||
case Type.NONE:
|
||||
throw Error("Received a Type with a typeId of NONE");
|
||||
case Type.Null:
|
||||
return new Null();
|
||||
case Type.Int:
|
||||
return sanitizeInt(typeLike);
|
||||
case Type.Float:
|
||||
return sanitizeFloat(typeLike);
|
||||
case Type.Binary:
|
||||
return new Binary();
|
||||
case Type.Utf8:
|
||||
return new Utf8();
|
||||
case Type.Bool:
|
||||
return new Bool();
|
||||
case Type.Decimal:
|
||||
return sanitizeDecimal(typeLike);
|
||||
case Type.Date:
|
||||
return sanitizeDate(typeLike);
|
||||
case Type.Time:
|
||||
return sanitizeTime(typeLike);
|
||||
case Type.Timestamp:
|
||||
return sanitizeTimestamp(typeLike);
|
||||
case Type.Interval:
|
||||
return sanitizeInterval(typeLike);
|
||||
case Type.List:
|
||||
return sanitizeList(typeLike);
|
||||
case Type.Struct:
|
||||
return sanitizeStruct(typeLike);
|
||||
case Type.Union:
|
||||
return sanitizeUnion(typeLike);
|
||||
case Type.FixedSizeBinary:
|
||||
return sanitizeFixedSizeBinary(typeLike);
|
||||
case Type.FixedSizeList:
|
||||
return sanitizeFixedSizeList(typeLike);
|
||||
case Type.Map:
|
||||
return sanitizeMap(typeLike);
|
||||
case Type.Duration:
|
||||
return sanitizeDuration(typeLike);
|
||||
case Type.Dictionary:
|
||||
return sanitizeDictionary(typeLike);
|
||||
case Type.Int8:
|
||||
return new Int8();
|
||||
case Type.Int16:
|
||||
return new Int16();
|
||||
case Type.Int32:
|
||||
return new Int32();
|
||||
case Type.Int64:
|
||||
return new Int64();
|
||||
case Type.Uint8:
|
||||
return new Uint8();
|
||||
case Type.Uint16:
|
||||
return new Uint16();
|
||||
case Type.Uint32:
|
||||
return new Uint32();
|
||||
case Type.Uint64:
|
||||
return new Uint64();
|
||||
case Type.Float16:
|
||||
return new Float16();
|
||||
case Type.Float32:
|
||||
return new Float32();
|
||||
case Type.Float64:
|
||||
return new Float64();
|
||||
case Type.DateMillisecond:
|
||||
return new DateMillisecond();
|
||||
case Type.DateDay:
|
||||
return new DateDay();
|
||||
case Type.TimeNanosecond:
|
||||
return new TimeNanosecond();
|
||||
case Type.TimeMicrosecond:
|
||||
return new TimeMicrosecond();
|
||||
case Type.TimeMillisecond:
|
||||
return new TimeMillisecond();
|
||||
case Type.TimeSecond:
|
||||
return new TimeSecond();
|
||||
case Type.TimestampNanosecond:
|
||||
return sanitizeTypedTimestamp(typeLike, TimestampNanosecond);
|
||||
case Type.TimestampMicrosecond:
|
||||
return sanitizeTypedTimestamp(typeLike, TimestampMicrosecond);
|
||||
case Type.TimestampMillisecond:
|
||||
return sanitizeTypedTimestamp(typeLike, TimestampMillisecond);
|
||||
case Type.TimestampSecond:
|
||||
return sanitizeTypedTimestamp(typeLike, TimestampSecond);
|
||||
case Type.DenseUnion:
|
||||
return sanitizeTypedUnion(typeLike, DenseUnion);
|
||||
case Type.SparseUnion:
|
||||
return sanitizeTypedUnion(typeLike, SparseUnion);
|
||||
case Type.IntervalDayTime:
|
||||
return new IntervalDayTime();
|
||||
case Type.IntervalYearMonth:
|
||||
return new IntervalYearMonth();
|
||||
case Type.DurationNanosecond:
|
||||
return new DurationNanosecond();
|
||||
case Type.DurationMicrosecond:
|
||||
return new DurationMicrosecond();
|
||||
case Type.DurationMillisecond:
|
||||
return new DurationMillisecond();
|
||||
case Type.DurationSecond:
|
||||
return new DurationSecond();
|
||||
default:
|
||||
throw new Error("Unrecoginized type id in schema: " + typeId);
|
||||
}
|
||||
}
|
||||
|
||||
function sanitizeField(fieldLike: unknown): Field {
|
||||
if (fieldLike instanceof Field) {
|
||||
return fieldLike;
|
||||
}
|
||||
if (typeof fieldLike !== "object" || fieldLike === null) {
|
||||
throw Error("Expected a Field but object was null/undefined");
|
||||
}
|
||||
if (
|
||||
!("type" in fieldLike) ||
|
||||
!("name" in fieldLike) ||
|
||||
!("nullable" in fieldLike)
|
||||
) {
|
||||
throw Error(
|
||||
"The field passed in is missing a `type`/`name`/`nullable` property",
|
||||
);
|
||||
}
|
||||
const type = sanitizeType(fieldLike.type);
|
||||
const name = fieldLike.name;
|
||||
if (!(typeof name === "string")) {
|
||||
throw Error("The field passed in had a non-string `name` property");
|
||||
}
|
||||
const nullable = fieldLike.nullable;
|
||||
if (!(typeof nullable === "boolean")) {
|
||||
throw Error("The field passed in had a non-boolean `nullable` property");
|
||||
}
|
||||
let metadata;
|
||||
if ("metadata" in fieldLike) {
|
||||
metadata = sanitizeMetadata(fieldLike.metadata);
|
||||
}
|
||||
return new Field(name, type, nullable, metadata);
|
||||
}
|
||||
|
||||
export function sanitizeSchema(schemaLike: unknown): Schema {
|
||||
if (schemaLike instanceof Schema) {
|
||||
return schemaLike;
|
||||
}
|
||||
if (typeof schemaLike !== "object" || schemaLike === null) {
|
||||
throw Error("Expected a Schema but object was null/undefined");
|
||||
}
|
||||
if (!("fields" in schemaLike)) {
|
||||
throw Error(
|
||||
"The schema passed in does not appear to be a schema (no 'fields' property)",
|
||||
);
|
||||
}
|
||||
let metadata;
|
||||
if ("metadata" in schemaLike) {
|
||||
metadata = sanitizeMetadata(schemaLike.metadata);
|
||||
}
|
||||
if (!Array.isArray(schemaLike.fields)) {
|
||||
throw Error(
|
||||
"The schema passed in had a 'fields' property but it was not an array",
|
||||
);
|
||||
}
|
||||
const sanitizedFields = schemaLike.fields.map((field) =>
|
||||
sanitizeField(field),
|
||||
);
|
||||
return new Schema(sanitizedFields, metadata);
|
||||
}
|
||||
@@ -13,15 +13,53 @@
|
||||
// limitations under the License.
|
||||
|
||||
import { Schema, tableFromIPC } from "apache-arrow";
|
||||
import { AddColumnsSql, ColumnAlteration, Table as _NativeTable } from "./native";
|
||||
import { toBuffer, Data } from "./arrow";
|
||||
import {
|
||||
AddColumnsSql,
|
||||
ColumnAlteration,
|
||||
IndexConfig,
|
||||
Table as _NativeTable,
|
||||
} from "./native";
|
||||
import { Query } from "./query";
|
||||
import { IndexBuilder } from "./indexer";
|
||||
import { IndexOptions } from "./indices";
|
||||
import { Data, fromDataToBuffer } from "./arrow";
|
||||
|
||||
export { IndexConfig } from "./native";
|
||||
/**
|
||||
* Options for adding data to a table.
|
||||
*/
|
||||
export interface AddDataOptions {
|
||||
/** If "append" (the default) then the new data will be added to the table
|
||||
*
|
||||
* If "overwrite" then the new data will replace the existing data in the table.
|
||||
*/
|
||||
mode: "append" | "overwrite";
|
||||
}
|
||||
|
||||
export interface UpdateOptions {
|
||||
/**
|
||||
* A filter that limits the scope of the update.
|
||||
*
|
||||
* This should be an SQL filter expression.
|
||||
*
|
||||
* Only rows that satisfy the expression will be updated.
|
||||
*
|
||||
* For example, this could be 'my_col == 0' to replace all instances
|
||||
* of 0 in a column with some other default value.
|
||||
*/
|
||||
where: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* A LanceDB Table is the collection of Records.
|
||||
* A Table is a collection of Records in a LanceDB Database.
|
||||
*
|
||||
* Each Record has one or more vector fields.
|
||||
* A Table object is expected to be long lived and reused for multiple operations.
|
||||
* Table objects will cache a certain amount of index data in memory. This cache
|
||||
* will be freed when the Table is garbage collected. To eagerly free the cache you
|
||||
* can call the `close` method. Once the Table is closed, it cannot be used for any
|
||||
* further operations.
|
||||
*
|
||||
* Closing a table is optional. It not closed, it will be closed when it is garbage
|
||||
* collected.
|
||||
*/
|
||||
export class Table {
|
||||
private readonly inner: _NativeTable;
|
||||
@@ -31,6 +69,26 @@ export class Table {
|
||||
this.inner = inner;
|
||||
}
|
||||
|
||||
/** Return true if the table has not been closed */
|
||||
isOpen(): boolean {
|
||||
return this.inner.isOpen();
|
||||
}
|
||||
|
||||
/** Close the table, releasing any underlying resources.
|
||||
*
|
||||
* It is safe to call this method multiple times.
|
||||
*
|
||||
* Any attempt to use the table after it is closed will result in an error.
|
||||
*/
|
||||
close(): void {
|
||||
this.inner.close();
|
||||
}
|
||||
|
||||
/** Return a brief description of the table */
|
||||
display(): string {
|
||||
return this.inner.display();
|
||||
}
|
||||
|
||||
/** Get the schema of the table. */
|
||||
async schema(): Promise<Schema> {
|
||||
const schemaBuf = await this.inner.schema();
|
||||
@@ -44,13 +102,54 @@ export class Table {
|
||||
* @param {Data} data Records to be inserted into the Table
|
||||
* @return The number of rows added to the table
|
||||
*/
|
||||
async add(data: Data): Promise<void> {
|
||||
const buffer = toBuffer(data);
|
||||
await this.inner.add(buffer);
|
||||
async add(data: Data, options?: Partial<AddDataOptions>): Promise<void> {
|
||||
const mode = options?.mode ?? "append";
|
||||
|
||||
const buffer = await fromDataToBuffer(data);
|
||||
await this.inner.add(buffer, mode);
|
||||
}
|
||||
|
||||
/**
|
||||
* Update existing records in the Table
|
||||
*
|
||||
* An update operation can be used to adjust existing values. Use the
|
||||
* returned builder to specify which columns to update. The new value
|
||||
* can be a literal value (e.g. replacing nulls with some default value)
|
||||
* or an expression applied to the old value (e.g. incrementing a value)
|
||||
*
|
||||
* An optional condition can be specified (e.g. "only update if the old
|
||||
* value is 0")
|
||||
*
|
||||
* Note: if your condition is something like "some_id_column == 7" and
|
||||
* you are updating many rows (with different ids) then you will get
|
||||
* better performance with a single [`merge_insert`] call instead of
|
||||
* repeatedly calilng this method.
|
||||
*
|
||||
* @param updates the columns to update
|
||||
*
|
||||
* Keys in the map should specify the name of the column to update.
|
||||
* Values in the map provide the new value of the column. These can
|
||||
* be SQL literal strings (e.g. "7" or "'foo'") or they can be expressions
|
||||
* based on the row being updated (e.g. "my_col + 1")
|
||||
*
|
||||
* @param options additional options to control the update behavior
|
||||
*/
|
||||
async update(
|
||||
updates: Map<string, string> | Record<string, string>,
|
||||
options?: Partial<UpdateOptions>,
|
||||
) {
|
||||
const onlyIf = options?.where;
|
||||
let columns: [string, string][];
|
||||
if (updates instanceof Map) {
|
||||
columns = Array.from(updates.entries());
|
||||
} else {
|
||||
columns = Object.entries(updates);
|
||||
}
|
||||
await this.inner.update(onlyIf, columns);
|
||||
}
|
||||
|
||||
/** Count the total number of rows in the dataset. */
|
||||
async countRows(filter?: string): Promise<bigint> {
|
||||
async countRows(filter?: string): Promise<number> {
|
||||
return await this.inner.countRows(filter);
|
||||
}
|
||||
|
||||
@@ -59,24 +158,28 @@ export class Table {
|
||||
await this.inner.delete(predicate);
|
||||
}
|
||||
|
||||
/** Create an index over the columns.
|
||||
/** Create an index to speed up queries.
|
||||
*
|
||||
* @param {string} column The column to create the index on. If not specified,
|
||||
* it will create an index on vector field.
|
||||
* Indices can be created on vector columns or scalar columns.
|
||||
* Indices on vector columns will speed up vector searches.
|
||||
* Indices on scalar columns will speed up filtering (in both
|
||||
* vector and non-vector searches)
|
||||
*
|
||||
* @example
|
||||
*
|
||||
* By default, it creates vector idnex on one vector column.
|
||||
* If the column has a vector (fixed size list) data type then
|
||||
* an IvfPq vector index will be created.
|
||||
*
|
||||
* ```typescript
|
||||
* const table = await conn.openTable("my_table");
|
||||
* await table.createIndex().build();
|
||||
* await table.createIndex(["vector"]);
|
||||
* ```
|
||||
*
|
||||
* You can specify `IVF_PQ` parameters via `ivf_pq({})` call.
|
||||
* For advanced control over vector index creation you can specify
|
||||
* the index type and options.
|
||||
* ```typescript
|
||||
* const table = await conn.openTable("my_table");
|
||||
* await table.createIndex("my_vec_col")
|
||||
* await table.createIndex(["vector"], I)
|
||||
* .ivf_pq({ num_partitions: 128, num_sub_vectors: 16 })
|
||||
* .build();
|
||||
* ```
|
||||
@@ -87,12 +190,11 @@ export class Table {
|
||||
* await table.createIndex("my_float_col").build();
|
||||
* ```
|
||||
*/
|
||||
createIndex(column?: string): IndexBuilder {
|
||||
let builder = new IndexBuilder(this.inner);
|
||||
if (column !== undefined) {
|
||||
builder = builder.column(column);
|
||||
}
|
||||
return builder;
|
||||
async createIndex(column: string, options?: Partial<IndexOptions>) {
|
||||
// Bit of a hack to get around the fact that TS has no package-scope.
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
const nativeIndex = (options?.config as any)?.inner;
|
||||
await this.inner.createIndex(nativeIndex, column, options?.replace);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -188,4 +290,65 @@ export class Table {
|
||||
async dropColumns(columnNames: string[]): Promise<void> {
|
||||
await this.inner.dropColumns(columnNames);
|
||||
}
|
||||
|
||||
/** Retrieve the version of the table
|
||||
*
|
||||
* LanceDb supports versioning. Every operation that modifies the table increases
|
||||
* version. As long as a version hasn't been deleted you can `[Self::checkout]` that
|
||||
* version to view the data at that point. In addition, you can `[Self::restore]` the
|
||||
* version to replace the current table with a previous version.
|
||||
*/
|
||||
async version(): Promise<number> {
|
||||
return await this.inner.version();
|
||||
}
|
||||
|
||||
/** Checks out a specific version of the Table
|
||||
*
|
||||
* Any read operation on the table will now access the data at the checked out version.
|
||||
* As a consequence, calling this method will disable any read consistency interval
|
||||
* that was previously set.
|
||||
*
|
||||
* This is a read-only operation that turns the table into a sort of "view"
|
||||
* or "detached head". Other table instances will not be affected. To make the change
|
||||
* permanent you can use the `[Self::restore]` method.
|
||||
*
|
||||
* Any operation that modifies the table will fail while the table is in a checked
|
||||
* out state.
|
||||
*
|
||||
* To return the table to a normal state use `[Self::checkout_latest]`
|
||||
*/
|
||||
async checkout(version: number): Promise<void> {
|
||||
await this.inner.checkout(version);
|
||||
}
|
||||
|
||||
/** Ensures the table is pointing at the latest version
|
||||
*
|
||||
* This can be used to manually update a table when the read_consistency_interval is None
|
||||
* It can also be used to undo a `[Self::checkout]` operation
|
||||
*/
|
||||
async checkoutLatest(): Promise<void> {
|
||||
await this.inner.checkoutLatest();
|
||||
}
|
||||
|
||||
/** Restore the table to the currently checked out version
|
||||
*
|
||||
* This operation will fail if checkout has not been called previously
|
||||
*
|
||||
* This operation will overwrite the latest version of the table with a
|
||||
* previous version. Any changes made since the checked out version will
|
||||
* no longer be visible.
|
||||
*
|
||||
* Once the operation concludes the table will no longer be in a checked
|
||||
* out state and the read_consistency_interval, if any, will apply.
|
||||
*/
|
||||
async restore(): Promise<void> {
|
||||
await this.inner.restore();
|
||||
}
|
||||
|
||||
/**
|
||||
* List all indices that have been created with Self::create_index
|
||||
*/
|
||||
async listIndices(): Promise<IndexConfig[]> {
|
||||
return await this.inner.listIndices();
|
||||
}
|
||||
}
|
||||
|
||||
859
nodejs/package-lock.json
generated
859
nodejs/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -19,14 +19,20 @@
|
||||
"devDependencies": {
|
||||
"@napi-rs/cli": "^2.18.0",
|
||||
"@types/jest": "^29.1.2",
|
||||
"@types/tmp": "^0.2.6",
|
||||
"@typescript-eslint/eslint-plugin": "^6.19.0",
|
||||
"@typescript-eslint/parser": "^6.19.0",
|
||||
"eslint": "^8.56.0",
|
||||
"apache-arrow-old": "npm:apache-arrow@13.0.0",
|
||||
"eslint": "^8.57.0",
|
||||
"eslint-config-prettier": "^9.1.0",
|
||||
"jest": "^29.7.0",
|
||||
"prettier": "^3.1.0",
|
||||
"tmp": "^0.2.3",
|
||||
"ts-jest": "^29.1.2",
|
||||
"typedoc": "^0.25.7",
|
||||
"typedoc-plugin-markdown": "^3.17.1",
|
||||
"typescript": "^5.3.3"
|
||||
"typescript": "^5.3.3",
|
||||
"typescript-eslint": "^7.1.0"
|
||||
},
|
||||
"ava": {
|
||||
"timeout": "3m"
|
||||
@@ -48,11 +54,11 @@
|
||||
"build:native": "napi build --platform --release --js lancedb/native.js --dts lancedb/native.d.ts dist/",
|
||||
"build:debug": "napi build --platform --dts ../lancedb/native.d.ts --js ../lancedb/native.js dist/",
|
||||
"build": "npm run build:debug && tsc -b",
|
||||
"chkformat": "prettier . --check",
|
||||
"docs": "typedoc --plugin typedoc-plugin-markdown lancedb/index.ts",
|
||||
"lint": "eslint lancedb --ext .js,.ts",
|
||||
"lint": "eslint lancedb && eslint __test__",
|
||||
"prepublishOnly": "napi prepublish -t npm",
|
||||
"//": "maxWorkers=1 is workaround for bigint issue in jest: https://github.com/jestjs/jest/issues/11617#issuecomment-1068732414",
|
||||
"test": "npm run build && jest --maxWorkers=1",
|
||||
"test": "npm run build && jest --verbose",
|
||||
"universal": "napi universal",
|
||||
"version": "napi version"
|
||||
},
|
||||
@@ -60,7 +66,8 @@
|
||||
"lancedb-darwin-arm64": "0.4.3",
|
||||
"lancedb-darwin-x64": "0.4.3",
|
||||
"lancedb-linux-arm64-gnu": "0.4.3",
|
||||
"lancedb-linux-x64-gnu": "0.4.3"
|
||||
"lancedb-linux-x64-gnu": "0.4.3",
|
||||
"openai": "^4.28.4"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"apache-arrow": "^15.0.0"
|
||||
|
||||
@@ -17,20 +17,43 @@ use napi_derive::*;
|
||||
|
||||
use crate::table::Table;
|
||||
use crate::ConnectionOptions;
|
||||
use lancedb::connection::{ConnectBuilder, Connection as LanceDBConnection};
|
||||
use lancedb::ipc::ipc_file_to_batches;
|
||||
use lancedb::connection::{ConnectBuilder, Connection as LanceDBConnection, CreateTableMode};
|
||||
use lancedb::ipc::{ipc_file_to_batches, ipc_file_to_schema};
|
||||
|
||||
#[napi]
|
||||
pub struct Connection {
|
||||
conn: LanceDBConnection,
|
||||
inner: Option<LanceDBConnection>,
|
||||
}
|
||||
|
||||
impl Connection {
|
||||
pub(crate) fn inner_new(inner: LanceDBConnection) -> Self {
|
||||
Self { inner: Some(inner) }
|
||||
}
|
||||
|
||||
fn get_inner(&self) -> napi::Result<&LanceDBConnection> {
|
||||
self.inner
|
||||
.as_ref()
|
||||
.ok_or_else(|| napi::Error::from_reason("Connection is closed"))
|
||||
}
|
||||
}
|
||||
|
||||
impl Connection {
|
||||
fn parse_create_mode_str(mode: &str) -> napi::Result<CreateTableMode> {
|
||||
match mode {
|
||||
"create" => Ok(CreateTableMode::Create),
|
||||
"overwrite" => Ok(CreateTableMode::Overwrite),
|
||||
"exist_ok" => Ok(CreateTableMode::exist_ok(|builder| builder)),
|
||||
_ => Err(napi::Error::from_reason(format!("Invalid mode {}", mode))),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[napi]
|
||||
impl Connection {
|
||||
/// Create a new Connection instance from the given URI.
|
||||
#[napi(factory)]
|
||||
pub async fn new(options: ConnectionOptions) -> napi::Result<Self> {
|
||||
let mut builder = ConnectBuilder::new(&options.uri);
|
||||
pub async fn new(uri: String, options: ConnectionOptions) -> napi::Result<Self> {
|
||||
let mut builder = ConnectBuilder::new(&uri);
|
||||
if let Some(api_key) = options.api_key {
|
||||
builder = builder.api_key(&api_key);
|
||||
}
|
||||
@@ -41,19 +64,44 @@ impl Connection {
|
||||
builder =
|
||||
builder.read_consistency_interval(std::time::Duration::from_secs_f64(interval));
|
||||
}
|
||||
Ok(Self {
|
||||
conn: builder
|
||||
Ok(Self::inner_new(
|
||||
builder
|
||||
.execute()
|
||||
.await
|
||||
.map_err(|e| napi::Error::from_reason(format!("{}", e)))?,
|
||||
})
|
||||
))
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn display(&self) -> napi::Result<String> {
|
||||
Ok(self.get_inner()?.to_string())
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn is_open(&self) -> bool {
|
||||
self.inner.is_some()
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn close(&mut self) {
|
||||
self.inner.take();
|
||||
}
|
||||
|
||||
/// List all tables in the dataset.
|
||||
#[napi]
|
||||
pub async fn table_names(&self) -> napi::Result<Vec<String>> {
|
||||
self.conn
|
||||
.table_names()
|
||||
pub async fn table_names(
|
||||
&self,
|
||||
start_after: Option<String>,
|
||||
limit: Option<u32>,
|
||||
) -> napi::Result<Vec<String>> {
|
||||
let mut op = self.get_inner()?.table_names();
|
||||
if let Some(start_after) = start_after {
|
||||
op = op.start_after(start_after);
|
||||
}
|
||||
if let Some(limit) = limit {
|
||||
op = op.limit(limit);
|
||||
}
|
||||
op.execute()
|
||||
.await
|
||||
.map_err(|e| napi::Error::from_reason(format!("{}", e)))
|
||||
}
|
||||
@@ -65,12 +113,40 @@ impl Connection {
|
||||
/// - buf: The buffer containing the IPC file.
|
||||
///
|
||||
#[napi]
|
||||
pub async fn create_table(&self, name: String, buf: Buffer) -> napi::Result<Table> {
|
||||
pub async fn create_table(
|
||||
&self,
|
||||
name: String,
|
||||
buf: Buffer,
|
||||
mode: String,
|
||||
) -> napi::Result<Table> {
|
||||
let batches = ipc_file_to_batches(buf.to_vec())
|
||||
.map_err(|e| napi::Error::from_reason(format!("Failed to read IPC file: {}", e)))?;
|
||||
let mode = Self::parse_create_mode_str(&mode)?;
|
||||
let tbl = self
|
||||
.conn
|
||||
.get_inner()?
|
||||
.create_table(&name, Box::new(batches))
|
||||
.mode(mode)
|
||||
.execute()
|
||||
.await
|
||||
.map_err(|e| napi::Error::from_reason(format!("{}", e)))?;
|
||||
Ok(Table::new(tbl))
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async fn create_empty_table(
|
||||
&self,
|
||||
name: String,
|
||||
schema_buf: Buffer,
|
||||
mode: String,
|
||||
) -> napi::Result<Table> {
|
||||
let schema = ipc_file_to_schema(schema_buf.to_vec()).map_err(|e| {
|
||||
napi::Error::from_reason(format!("Failed to marshal schema from JS to Rust: {}", e))
|
||||
})?;
|
||||
let mode = Self::parse_create_mode_str(&mode)?;
|
||||
let tbl = self
|
||||
.get_inner()?
|
||||
.create_empty_table(&name, schema)
|
||||
.mode(mode)
|
||||
.execute()
|
||||
.await
|
||||
.map_err(|e| napi::Error::from_reason(format!("{}", e)))?;
|
||||
@@ -80,7 +156,7 @@ impl Connection {
|
||||
#[napi]
|
||||
pub async fn open_table(&self, name: String) -> napi::Result<Table> {
|
||||
let tbl = self
|
||||
.conn
|
||||
.get_inner()?
|
||||
.open_table(&name)
|
||||
.execute()
|
||||
.await
|
||||
@@ -91,7 +167,7 @@ impl Connection {
|
||||
/// Drop table with the name. Or raise an error if the table does not exist.
|
||||
#[napi]
|
||||
pub async fn drop_table(&self, name: String) -> napi::Result<()> {
|
||||
self.conn
|
||||
self.get_inner()?
|
||||
.drop_table(&name)
|
||||
.await
|
||||
.map_err(|e| napi::Error::from_reason(format!("{}", e)))
|
||||
|
||||
12
nodejs/src/error.rs
Normal file
12
nodejs/src/error.rs
Normal file
@@ -0,0 +1,12 @@
|
||||
pub type Result<T> = napi::Result<T>;
|
||||
|
||||
pub trait NapiErrorExt<T> {
|
||||
/// Convert to a napi error using from_reason(err.to_string())
|
||||
fn default_error(self) -> Result<T>;
|
||||
}
|
||||
|
||||
impl<T> NapiErrorExt<T> for std::result::Result<T, lancedb::Error> {
|
||||
fn default_error(self) -> Result<T> {
|
||||
self.map_err(|err| napi::Error::from_reason(err.to_string()))
|
||||
}
|
||||
}
|
||||
@@ -12,89 +12,75 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use lance_linalg::distance::MetricType as LanceMetricType;
|
||||
use std::sync::Mutex;
|
||||
|
||||
use lancedb::index::scalar::BTreeIndexBuilder;
|
||||
use lancedb::index::vector::IvfPqIndexBuilder;
|
||||
use lancedb::index::Index as LanceDbIndex;
|
||||
use lancedb::DistanceType;
|
||||
use napi_derive::napi;
|
||||
|
||||
#[napi]
|
||||
pub enum IndexType {
|
||||
Scalar,
|
||||
IvfPq,
|
||||
pub struct Index {
|
||||
inner: Mutex<Option<LanceDbIndex>>,
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub enum MetricType {
|
||||
L2,
|
||||
Cosine,
|
||||
Dot,
|
||||
}
|
||||
|
||||
impl From<MetricType> for LanceMetricType {
|
||||
fn from(metric: MetricType) -> Self {
|
||||
match metric {
|
||||
MetricType::L2 => Self::L2,
|
||||
MetricType::Cosine => Self::Cosine,
|
||||
MetricType::Dot => Self::Dot,
|
||||
}
|
||||
impl Index {
|
||||
pub fn consume(&self) -> napi::Result<LanceDbIndex> {
|
||||
self.inner
|
||||
.lock()
|
||||
.unwrap()
|
||||
.take()
|
||||
.ok_or(napi::Error::from_reason(
|
||||
"attempt to use an index more than once",
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub struct IndexBuilder {
|
||||
inner: lancedb::index::IndexBuilder,
|
||||
}
|
||||
|
||||
#[napi]
|
||||
impl IndexBuilder {
|
||||
pub fn new(tbl: &dyn lancedb::Table) -> Self {
|
||||
let inner = tbl.create_index(&[]);
|
||||
Self { inner }
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub unsafe fn replace(&mut self, v: bool) {
|
||||
self.inner.replace(v);
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub unsafe fn column(&mut self, c: String) {
|
||||
self.inner.columns(&[c.as_str()]);
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub unsafe fn name(&mut self, name: String) {
|
||||
self.inner.name(name.as_str());
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub unsafe fn ivf_pq(
|
||||
&mut self,
|
||||
metric_type: Option<MetricType>,
|
||||
impl Index {
|
||||
#[napi(factory)]
|
||||
pub fn ivf_pq(
|
||||
distance_type: Option<String>,
|
||||
num_partitions: Option<u32>,
|
||||
num_sub_vectors: Option<u32>,
|
||||
num_bits: Option<u32>,
|
||||
max_iterations: Option<u32>,
|
||||
sample_rate: Option<u32>,
|
||||
) {
|
||||
self.inner.ivf_pq();
|
||||
metric_type.map(|m| self.inner.metric_type(m.into()));
|
||||
num_partitions.map(|p| self.inner.num_partitions(p));
|
||||
num_sub_vectors.map(|s| self.inner.num_sub_vectors(s));
|
||||
num_bits.map(|b| self.inner.num_bits(b));
|
||||
max_iterations.map(|i| self.inner.max_iterations(i));
|
||||
sample_rate.map(|s| self.inner.sample_rate(s));
|
||||
) -> napi::Result<Self> {
|
||||
let mut ivf_pq_builder = IvfPqIndexBuilder::default();
|
||||
if let Some(distance_type) = distance_type {
|
||||
let distance_type = match distance_type.as_str() {
|
||||
"l2" => Ok(DistanceType::L2),
|
||||
"cosine" => Ok(DistanceType::Cosine),
|
||||
"dot" => Ok(DistanceType::Dot),
|
||||
_ => Err(napi::Error::from_reason(format!(
|
||||
"Invalid distance type '{}'. Must be one of l2, cosine, or dot",
|
||||
distance_type
|
||||
))),
|
||||
}?;
|
||||
ivf_pq_builder = ivf_pq_builder.distance_type(distance_type);
|
||||
}
|
||||
if let Some(num_partitions) = num_partitions {
|
||||
ivf_pq_builder = ivf_pq_builder.num_partitions(num_partitions);
|
||||
}
|
||||
if let Some(num_sub_vectors) = num_sub_vectors {
|
||||
ivf_pq_builder = ivf_pq_builder.num_sub_vectors(num_sub_vectors);
|
||||
}
|
||||
if let Some(max_iterations) = max_iterations {
|
||||
ivf_pq_builder = ivf_pq_builder.max_iterations(max_iterations);
|
||||
}
|
||||
if let Some(sample_rate) = sample_rate {
|
||||
ivf_pq_builder = ivf_pq_builder.sample_rate(sample_rate);
|
||||
}
|
||||
Ok(Self {
|
||||
inner: Mutex::new(Some(LanceDbIndex::IvfPq(ivf_pq_builder))),
|
||||
})
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub unsafe fn scalar(&mut self) {
|
||||
self.inner.scalar();
|
||||
#[napi(factory)]
|
||||
pub fn btree() -> Self {
|
||||
Self {
|
||||
inner: Mutex::new(Some(LanceDbIndex::BTree(BTreeIndexBuilder::default()))),
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async fn build(&self) -> napi::Result<()> {
|
||||
self.inner
|
||||
.build()
|
||||
.await
|
||||
.map_err(|e| napi::Error::from_reason(format!("Failed to build index: {}", e)))?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
use futures::StreamExt;
|
||||
use lance::io::RecordBatchStream;
|
||||
use lancedb::arrow::SendableRecordBatchStream;
|
||||
use lancedb::ipc::batches_to_ipc_file;
|
||||
use napi::bindgen_prelude::*;
|
||||
use napi_derive::napi;
|
||||
@@ -21,12 +21,12 @@ use napi_derive::napi;
|
||||
/** Typescript-style Async Iterator over RecordBatches */
|
||||
#[napi]
|
||||
pub struct RecordBatchIterator {
|
||||
inner: Box<dyn RecordBatchStream + Unpin>,
|
||||
inner: SendableRecordBatchStream,
|
||||
}
|
||||
|
||||
#[napi]
|
||||
impl RecordBatchIterator {
|
||||
pub(crate) fn new(inner: Box<dyn RecordBatchStream + Unpin>) -> Self {
|
||||
pub(crate) fn new(inner: SendableRecordBatchStream) -> Self {
|
||||
Self { inner }
|
||||
}
|
||||
|
||||
|
||||
@@ -16,6 +16,7 @@ use connection::Connection;
|
||||
use napi_derive::*;
|
||||
|
||||
mod connection;
|
||||
mod error;
|
||||
mod index;
|
||||
mod iterator;
|
||||
mod query;
|
||||
@@ -24,7 +25,6 @@ mod table;
|
||||
#[napi(object)]
|
||||
#[derive(Debug)]
|
||||
pub struct ConnectionOptions {
|
||||
pub uri: String,
|
||||
pub api_key: Option<String>,
|
||||
pub host_override: Option<String>,
|
||||
/// (For LanceDB OSS only): The interval, in seconds, at which to check for
|
||||
@@ -54,6 +54,6 @@ pub struct WriteOptions {
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async fn connect(options: ConnectionOptions) -> napi::Result<Connection> {
|
||||
Connection::new(options).await
|
||||
pub async fn connect(uri: String, options: ConnectionOptions) -> napi::Result<Connection> {
|
||||
Connection::new(uri, options).await
|
||||
}
|
||||
|
||||
@@ -16,7 +16,7 @@ use lancedb::query::Query as LanceDBQuery;
|
||||
use napi::bindgen_prelude::*;
|
||||
use napi_derive::napi;
|
||||
|
||||
use crate::{iterator::RecordBatchIterator, table::Table};
|
||||
use crate::iterator::RecordBatchIterator;
|
||||
|
||||
#[napi]
|
||||
pub struct Query {
|
||||
@@ -25,10 +25,8 @@ pub struct Query {
|
||||
|
||||
#[napi]
|
||||
impl Query {
|
||||
pub fn new(table: &Table) -> Self {
|
||||
Self {
|
||||
inner: table.table.query(),
|
||||
}
|
||||
pub fn new(query: LanceDBQuery) -> Self {
|
||||
Self { inner: query }
|
||||
}
|
||||
|
||||
#[napi]
|
||||
@@ -76,6 +74,6 @@ impl Query {
|
||||
let inner_stream = self.inner.execute_stream().await.map_err(|e| {
|
||||
napi::Error::from_reason(format!("Failed to execute query stream: {}", e))
|
||||
})?;
|
||||
Ok(RecordBatchIterator::new(Box::new(inner_stream)))
|
||||
Ok(RecordBatchIterator::new(inner_stream))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,33 +13,66 @@
|
||||
// limitations under the License.
|
||||
|
||||
use arrow_ipc::writer::FileWriter;
|
||||
use lance::dataset::ColumnAlteration as LanceColumnAlteration;
|
||||
use lancedb::{
|
||||
ipc::ipc_file_to_batches,
|
||||
table::{AddDataOptions, TableRef},
|
||||
use lancedb::ipc::ipc_file_to_batches;
|
||||
use lancedb::table::{
|
||||
AddDataMode, ColumnAlteration as LanceColumnAlteration, NewColumnTransform,
|
||||
Table as LanceDbTable,
|
||||
};
|
||||
use napi::bindgen_prelude::*;
|
||||
use napi_derive::napi;
|
||||
|
||||
use crate::index::IndexBuilder;
|
||||
use crate::error::NapiErrorExt;
|
||||
use crate::index::Index;
|
||||
use crate::query::Query;
|
||||
|
||||
#[napi]
|
||||
pub struct Table {
|
||||
pub(crate) table: TableRef,
|
||||
// We keep a duplicate of the table name so we can use it for error
|
||||
// messages even if the table has been closed
|
||||
name: String,
|
||||
pub(crate) inner: Option<LanceDbTable>,
|
||||
}
|
||||
|
||||
impl Table {
|
||||
fn inner_ref(&self) -> napi::Result<&LanceDbTable> {
|
||||
self.inner
|
||||
.as_ref()
|
||||
.ok_or_else(|| napi::Error::from_reason(format!("Table {} is closed", self.name)))
|
||||
}
|
||||
}
|
||||
|
||||
#[napi]
|
||||
impl Table {
|
||||
pub(crate) fn new(table: TableRef) -> Self {
|
||||
Self { table }
|
||||
pub(crate) fn new(table: LanceDbTable) -> Self {
|
||||
Self {
|
||||
name: table.name().to_string(),
|
||||
inner: Some(table),
|
||||
}
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn display(&self) -> String {
|
||||
match &self.inner {
|
||||
None => format!("ClosedTable({})", self.name),
|
||||
Some(inner) => inner.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn is_open(&self) -> bool {
|
||||
self.inner.is_some()
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn close(&mut self) {
|
||||
self.inner.take();
|
||||
}
|
||||
|
||||
/// Return Schema as empty Arrow IPC file.
|
||||
#[napi]
|
||||
pub async fn schema(&self) -> napi::Result<Buffer> {
|
||||
let schema =
|
||||
self.table.schema().await.map_err(|e| {
|
||||
self.inner_ref()?.schema().await.map_err(|e| {
|
||||
napi::Error::from_reason(format!("Failed to create IPC file: {}", e))
|
||||
})?;
|
||||
let mut writer = FileWriter::try_new(vec![], &schema)
|
||||
@@ -53,48 +86,89 @@ impl Table {
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async fn add(&self, buf: Buffer) -> napi::Result<()> {
|
||||
pub async fn add(&self, buf: Buffer, mode: String) -> napi::Result<()> {
|
||||
let batches = ipc_file_to_batches(buf.to_vec())
|
||||
.map_err(|e| napi::Error::from_reason(format!("Failed to read IPC file: {}", e)))?;
|
||||
self.table
|
||||
.add(Box::new(batches), AddDataOptions::default())
|
||||
.await
|
||||
.map_err(|e| {
|
||||
let mut op = self.inner_ref()?.add(Box::new(batches));
|
||||
|
||||
op = if mode == "append" {
|
||||
op.mode(AddDataMode::Append)
|
||||
} else if mode == "overwrite" {
|
||||
op.mode(AddDataMode::Overwrite)
|
||||
} else {
|
||||
return Err(napi::Error::from_reason(format!("Invalid mode: {}", mode)));
|
||||
};
|
||||
|
||||
op.execute().await.map_err(|e| {
|
||||
napi::Error::from_reason(format!(
|
||||
"Failed to add batches to table {}: {}",
|
||||
self.table, e
|
||||
self.name, e
|
||||
))
|
||||
})
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async fn count_rows(&self, filter: Option<String>) -> napi::Result<usize> {
|
||||
self.table.count_rows(filter).await.map_err(|e| {
|
||||
pub async fn count_rows(&self, filter: Option<String>) -> napi::Result<i64> {
|
||||
self.inner_ref()?
|
||||
.count_rows(filter)
|
||||
.await
|
||||
.map(|val| val as i64)
|
||||
.map_err(|e| {
|
||||
napi::Error::from_reason(format!(
|
||||
"Failed to count rows in table {}: {}",
|
||||
self.table, e
|
||||
self.name, e
|
||||
))
|
||||
})
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async fn delete(&self, predicate: String) -> napi::Result<()> {
|
||||
self.table.delete(&predicate).await.map_err(|e| {
|
||||
self.inner_ref()?.delete(&predicate).await.map_err(|e| {
|
||||
napi::Error::from_reason(format!(
|
||||
"Failed to delete rows in table {}: predicate={}",
|
||||
self.table, e
|
||||
self.name, e
|
||||
))
|
||||
})
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn create_index(&self) -> IndexBuilder {
|
||||
IndexBuilder::new(self.table.as_ref())
|
||||
pub async fn create_index(
|
||||
&self,
|
||||
index: Option<&Index>,
|
||||
column: String,
|
||||
replace: Option<bool>,
|
||||
) -> napi::Result<()> {
|
||||
let lancedb_index = if let Some(index) = index {
|
||||
index.consume()?
|
||||
} else {
|
||||
lancedb::index::Index::Auto
|
||||
};
|
||||
let mut builder = self.inner_ref()?.create_index(&[column], lancedb_index);
|
||||
if let Some(replace) = replace {
|
||||
builder = builder.replace(replace);
|
||||
}
|
||||
builder.execute().await.default_error()
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn query(&self) -> Query {
|
||||
Query::new(self)
|
||||
pub async fn update(
|
||||
&self,
|
||||
only_if: Option<String>,
|
||||
columns: Vec<(String, String)>,
|
||||
) -> napi::Result<()> {
|
||||
let mut op = self.inner_ref()?.update();
|
||||
if let Some(only_if) = only_if {
|
||||
op = op.only_if(only_if);
|
||||
}
|
||||
for (column_name, value) in columns {
|
||||
op = op.column(column_name, value);
|
||||
}
|
||||
op.execute().await.default_error()
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn query(&self) -> napi::Result<Query> {
|
||||
Ok(Query::new(self.inner_ref()?.query()))
|
||||
}
|
||||
|
||||
#[napi]
|
||||
@@ -103,14 +177,14 @@ impl Table {
|
||||
.into_iter()
|
||||
.map(|sql| (sql.name, sql.value_sql))
|
||||
.collect::<Vec<_>>();
|
||||
let transforms = lance::dataset::NewColumnTransform::SqlExpressions(transforms);
|
||||
self.table
|
||||
let transforms = NewColumnTransform::SqlExpressions(transforms);
|
||||
self.inner_ref()?
|
||||
.add_columns(transforms, None)
|
||||
.await
|
||||
.map_err(|err| {
|
||||
napi::Error::from_reason(format!(
|
||||
"Failed to add columns to table {}: {}",
|
||||
self.table, err
|
||||
self.name, err
|
||||
))
|
||||
})?;
|
||||
Ok(())
|
||||
@@ -130,13 +204,13 @@ impl Table {
|
||||
.map(LanceColumnAlteration::from)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
self.table
|
||||
self.inner_ref()?
|
||||
.alter_columns(&alterations)
|
||||
.await
|
||||
.map_err(|err| {
|
||||
napi::Error::from_reason(format!(
|
||||
"Failed to alter columns in table {}: {}",
|
||||
self.table, err
|
||||
self.name, err
|
||||
))
|
||||
})?;
|
||||
Ok(())
|
||||
@@ -145,14 +219,78 @@ impl Table {
|
||||
#[napi]
|
||||
pub async fn drop_columns(&self, columns: Vec<String>) -> napi::Result<()> {
|
||||
let col_refs = columns.iter().map(String::as_str).collect::<Vec<_>>();
|
||||
self.table.drop_columns(&col_refs).await.map_err(|err| {
|
||||
self.inner_ref()?
|
||||
.drop_columns(&col_refs)
|
||||
.await
|
||||
.map_err(|err| {
|
||||
napi::Error::from_reason(format!(
|
||||
"Failed to drop columns from table {}: {}",
|
||||
self.table, err
|
||||
self.name, err
|
||||
))
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async fn version(&self) -> napi::Result<i64> {
|
||||
self.inner_ref()?
|
||||
.version()
|
||||
.await
|
||||
.map(|val| val as i64)
|
||||
.default_error()
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async fn checkout(&self, version: i64) -> napi::Result<()> {
|
||||
self.inner_ref()?
|
||||
.checkout(version as u64)
|
||||
.await
|
||||
.default_error()
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async fn checkout_latest(&self) -> napi::Result<()> {
|
||||
self.inner_ref()?.checkout_latest().await.default_error()
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async fn restore(&self) -> napi::Result<()> {
|
||||
self.inner_ref()?.restore().await.default_error()
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async fn list_indices(&self) -> napi::Result<Vec<IndexConfig>> {
|
||||
Ok(self
|
||||
.inner_ref()?
|
||||
.list_indices()
|
||||
.await
|
||||
.default_error()?
|
||||
.into_iter()
|
||||
.map(IndexConfig::from)
|
||||
.collect::<Vec<_>>())
|
||||
}
|
||||
}
|
||||
|
||||
#[napi(object)]
|
||||
/// A description of an index currently configured on a column
|
||||
pub struct IndexConfig {
|
||||
/// The type of the index
|
||||
pub index_type: String,
|
||||
/// The columns in the index
|
||||
///
|
||||
/// Currently this is always an array of size 1. In the future there may
|
||||
/// be more columns to represent composite indices.
|
||||
pub columns: Vec<String>,
|
||||
}
|
||||
|
||||
impl From<lancedb::index::IndexConfig> for IndexConfig {
|
||||
fn from(value: lancedb::index::IndexConfig) -> Self {
|
||||
let index_type = format!("{:?}", value.index_type);
|
||||
Self {
|
||||
index_type,
|
||||
columns: value.columns,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A definition of a column alteration. The alteration changes the column at
|
||||
|
||||
@@ -1,9 +1,5 @@
|
||||
{
|
||||
"include": [
|
||||
"lancedb/*.ts",
|
||||
"lancedb/**/*.ts",
|
||||
"lancedb/*.js",
|
||||
],
|
||||
"include": ["lancedb/*.ts", "lancedb/**/*.ts", "lancedb/*.js"],
|
||||
"compilerOptions": {
|
||||
"target": "es2022",
|
||||
"module": "commonjs",
|
||||
@@ -11,21 +7,17 @@
|
||||
"outDir": "./dist",
|
||||
"strict": true,
|
||||
"allowJs": true,
|
||||
"resolveJsonModule": true,
|
||||
"resolveJsonModule": true
|
||||
},
|
||||
"exclude": [
|
||||
"./dist/*",
|
||||
],
|
||||
"exclude": ["./dist/*"],
|
||||
"typedocOptions": {
|
||||
"entryPoints": [
|
||||
"lancedb/index.ts"
|
||||
],
|
||||
"entryPoints": ["lancedb/index.ts"],
|
||||
"out": "../docs/src/javascript/",
|
||||
"visibilityFilters": {
|
||||
"protected": false,
|
||||
"private": false,
|
||||
"inherited": true,
|
||||
"external": false,
|
||||
"external": false
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
[bumpversion]
|
||||
current_version = 0.6.0
|
||||
current_version = 0.6.4
|
||||
commit = True
|
||||
message = [python] Bump version: {current_version} → {new_version}
|
||||
tag = True
|
||||
|
||||
38
python/ASYNC_MIGRATION.md
Normal file
38
python/ASYNC_MIGRATION.md
Normal file
@@ -0,0 +1,38 @@
|
||||
# Migration from Sync to Async API
|
||||
|
||||
A new asynchronous API has been added to LanceDb. This API is built
|
||||
on top of the rust lancedb crate (instead of being built on top of
|
||||
pylance). This will help keep the various language bindings in sync.
|
||||
There are some slight changes between the synchronous and the asynchronous
|
||||
APIs. This document will help you migrate. These changes relate mostly
|
||||
to the Connection and Table classes.
|
||||
|
||||
## Almost all functions are async
|
||||
|
||||
The most important change is that almost all functions are now async.
|
||||
This means the functions now return `asyncio` coroutines. You will
|
||||
need to use `await` to call these functions.
|
||||
|
||||
## Connection
|
||||
|
||||
* The connection now has a `close` method. You can call this when
|
||||
you are done with the connection to eagerly free resources. Currently
|
||||
this is limited to freeing/closing the HTTP connection for remote
|
||||
connections. In the future we may add caching or other resources to
|
||||
native connections so this is probably a good practice even if you aren't using remote connections.
|
||||
|
||||
In addition, the connection can be used as a context manager which may
|
||||
be a more convenient way to ensure the connection is closed.
|
||||
|
||||
It is not mandatory to call the `close` method. If you don't call it
|
||||
the connection will be closed when the object is garbage collected.
|
||||
|
||||
## Table
|
||||
|
||||
* The table now has a `close` method, similar to the connection. This
|
||||
can be used to eagerly free the cache used by a Table object. Similar
|
||||
to the connection, it can be used as a context manager and it is not
|
||||
mandatory to call the `close` method.
|
||||
* Previously `Table.schema` was a property. Now it is an async method.
|
||||
* The method `Table.__len__` was removed and `len(table)` will no longer
|
||||
work. Use `Table.count_rows` instead.
|
||||
@@ -7,13 +7,14 @@ license.workspace = true
|
||||
repository.workspace = true
|
||||
keywords.workspace = true
|
||||
categories.workspace = true
|
||||
|
||||
rust-version = "1.75.0"
|
||||
|
||||
[lib]
|
||||
name = "_lancedb"
|
||||
crate-type = ["cdylib"]
|
||||
|
||||
[dependencies]
|
||||
arrow = { version = "50.0.0", features = ["pyarrow"] }
|
||||
lancedb = { path = "../rust/lancedb" }
|
||||
env_logger = "0.10"
|
||||
pyo3 = { version = "0.20", features = ["extension-module", "abi3-py38"] }
|
||||
@@ -23,4 +24,7 @@ pyo3-asyncio = { version = "0.20", features = ["attributes", "tokio-runtime"] }
|
||||
lzma-sys = { version = "*", features = ["static"] }
|
||||
|
||||
[build-dependencies]
|
||||
pyo3-build-config = { version = "0.20.3", features = ["extension-module", "abi3-py38"] }
|
||||
pyo3-build-config = { version = "0.20.3", features = [
|
||||
"extension-module",
|
||||
"abi3-py38",
|
||||
] }
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
[project]
|
||||
name = "lancedb"
|
||||
version = "0.6.0"
|
||||
version = "0.6.4"
|
||||
dependencies = [
|
||||
"deprecation",
|
||||
"pylance==0.10.1",
|
||||
"pylance==0.10.4",
|
||||
"ratelimiter~=1.0",
|
||||
"retry>=0.9.2",
|
||||
"tqdm>=4.27.0",
|
||||
@@ -81,6 +81,7 @@ embeddings = [
|
||||
"awscli>=1.29.57",
|
||||
"botocore>=1.31.57",
|
||||
]
|
||||
azure = ["adlfs>=2024.2.0"]
|
||||
|
||||
[tool.maturin]
|
||||
python-source = "python"
|
||||
|
||||
@@ -21,10 +21,11 @@ __version__ = importlib.metadata.version("lancedb")
|
||||
|
||||
from ._lancedb import connect as lancedb_connect
|
||||
from .common import URI, sanitize_uri
|
||||
from .db import AsyncConnection, AsyncLanceDBConnection, DBConnection, LanceDBConnection
|
||||
from .db import AsyncConnection, DBConnection, LanceDBConnection
|
||||
from .remote.db import RemoteDBConnection
|
||||
from .schema import vector # noqa: F401
|
||||
from .utils import sentry_log # noqa: F401
|
||||
from .schema import vector
|
||||
from .table import AsyncTable
|
||||
from .utils import sentry_log
|
||||
|
||||
|
||||
def connect(
|
||||
@@ -35,6 +36,7 @@ def connect(
|
||||
host_override: Optional[str] = None,
|
||||
read_consistency_interval: Optional[timedelta] = None,
|
||||
request_thread_pool: Optional[Union[int, ThreadPoolExecutor]] = None,
|
||||
**kwargs,
|
||||
) -> DBConnection:
|
||||
"""Connect to a LanceDB database.
|
||||
|
||||
@@ -99,7 +101,12 @@ def connect(
|
||||
if isinstance(request_thread_pool, int):
|
||||
request_thread_pool = ThreadPoolExecutor(request_thread_pool)
|
||||
return RemoteDBConnection(
|
||||
uri, api_key, region, host_override, request_thread_pool=request_thread_pool
|
||||
uri,
|
||||
api_key,
|
||||
region,
|
||||
host_override,
|
||||
request_thread_pool=request_thread_pool,
|
||||
**kwargs,
|
||||
)
|
||||
return LanceDBConnection(uri, read_consistency_interval=read_consistency_interval)
|
||||
|
||||
@@ -168,8 +175,33 @@ async def connect_async(
|
||||
conn : DBConnection
|
||||
A connection to a LanceDB database.
|
||||
"""
|
||||
return AsyncLanceDBConnection(
|
||||
if read_consistency_interval is not None:
|
||||
read_consistency_interval_secs = read_consistency_interval.total_seconds()
|
||||
else:
|
||||
read_consistency_interval_secs = None
|
||||
|
||||
return AsyncConnection(
|
||||
await lancedb_connect(
|
||||
sanitize_uri(uri), api_key, region, host_override, read_consistency_interval
|
||||
sanitize_uri(uri),
|
||||
api_key,
|
||||
region,
|
||||
host_override,
|
||||
read_consistency_interval_secs,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
__all__ = [
|
||||
"connect",
|
||||
"connect_async",
|
||||
"AsyncConnection",
|
||||
"AsyncTable",
|
||||
"URI",
|
||||
"sanitize_uri",
|
||||
"sentry_log",
|
||||
"vector",
|
||||
"DBConnection",
|
||||
"LanceDBConnection",
|
||||
"RemoteDBConnection",
|
||||
"__version__",
|
||||
]
|
||||
|
||||
@@ -1,7 +1,49 @@
|
||||
from typing import Optional
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
import pyarrow as pa
|
||||
|
||||
class Index:
|
||||
@staticmethod
|
||||
def ivf_pq(
|
||||
distance_type: Optional[str],
|
||||
num_partitions: Optional[int],
|
||||
num_sub_vectors: Optional[int],
|
||||
max_iterations: Optional[int],
|
||||
sample_rate: Optional[int],
|
||||
) -> Index: ...
|
||||
@staticmethod
|
||||
def btree() -> Index: ...
|
||||
|
||||
class Connection(object):
|
||||
async def table_names(self) -> list[str]: ...
|
||||
async def table_names(
|
||||
self, start_after: Optional[str], limit: Optional[int]
|
||||
) -> list[str]: ...
|
||||
async def create_table(
|
||||
self, name: str, mode: str, data: pa.RecordBatchReader
|
||||
) -> Table: ...
|
||||
async def create_empty_table(
|
||||
self, name: str, mode: str, schema: pa.Schema
|
||||
) -> Table: ...
|
||||
|
||||
class Table:
|
||||
def name(self) -> str: ...
|
||||
def __repr__(self) -> str: ...
|
||||
async def schema(self) -> pa.Schema: ...
|
||||
async def add(self, data: pa.RecordBatchReader, mode: str) -> None: ...
|
||||
async def update(self, updates: Dict[str, str], where: Optional[str]) -> None: ...
|
||||
async def count_rows(self, filter: Optional[str]) -> int: ...
|
||||
async def create_index(
|
||||
self, column: str, config: Optional[Index], replace: Optional[bool]
|
||||
): ...
|
||||
async def version(self) -> int: ...
|
||||
async def checkout(self, version): ...
|
||||
async def checkout_latest(self): ...
|
||||
async def restore(self): ...
|
||||
async def list_indices(self) -> List[IndexConfig]: ...
|
||||
|
||||
class IndexConfig:
|
||||
index_type: str
|
||||
columns: List[str]
|
||||
|
||||
async def connect(
|
||||
uri: str,
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from pathlib import Path
|
||||
from typing import Iterable, List, Union
|
||||
from typing import Iterable, List, Optional, Union
|
||||
|
||||
import numpy as np
|
||||
import pyarrow as pa
|
||||
@@ -38,3 +38,99 @@ class Credential(str):
|
||||
|
||||
def sanitize_uri(uri: URI) -> str:
|
||||
return str(uri)
|
||||
|
||||
|
||||
def _casting_recordbatch_iter(
|
||||
input_iter: Iterable[pa.RecordBatch], schema: pa.Schema
|
||||
) -> Iterable[pa.RecordBatch]:
|
||||
"""
|
||||
Wrapper around an iterator of record batches. If the batches don't match the
|
||||
schema, try to cast them to the schema. If that fails, raise an error.
|
||||
|
||||
This is helpful for users who might have written the iterator with default
|
||||
data types in PyArrow, but specified more specific types in the schema. For
|
||||
example, PyArrow defaults to float64 for floating point types, but Lance
|
||||
uses float32 for vectors.
|
||||
"""
|
||||
for batch in input_iter:
|
||||
if not isinstance(batch, pa.RecordBatch):
|
||||
raise TypeError(f"Expected RecordBatch, got {type(batch)}")
|
||||
if batch.schema != schema:
|
||||
try:
|
||||
# RecordBatch doesn't have a cast method, but table does.
|
||||
batch = pa.Table.from_batches([batch]).cast(schema).to_batches()[0]
|
||||
except pa.lib.ArrowInvalid:
|
||||
raise ValueError(
|
||||
f"Input RecordBatch iterator yielded a batch with schema that "
|
||||
f"does not match the expected schema.\nExpected:\n{schema}\n"
|
||||
f"Got:\n{batch.schema}"
|
||||
)
|
||||
yield batch
|
||||
|
||||
|
||||
def data_to_reader(
|
||||
data: DATA, schema: Optional[pa.Schema] = None
|
||||
) -> pa.RecordBatchReader:
|
||||
"""Convert various types of input into a RecordBatchReader"""
|
||||
if pd is not None and isinstance(data, pd.DataFrame):
|
||||
return pa.Table.from_pandas(data, schema=schema).to_reader()
|
||||
elif isinstance(data, pa.Table):
|
||||
return data.to_reader()
|
||||
elif isinstance(data, pa.RecordBatch):
|
||||
return pa.Table.from_batches([data]).to_reader()
|
||||
# elif isinstance(data, LanceDataset):
|
||||
# return data_obj.scanner().to_reader()
|
||||
elif isinstance(data, pa.dataset.Dataset):
|
||||
return pa.dataset.Scanner.from_dataset(data).to_reader()
|
||||
elif isinstance(data, pa.dataset.Scanner):
|
||||
return data.to_reader()
|
||||
elif isinstance(data, pa.RecordBatchReader):
|
||||
return data
|
||||
elif (
|
||||
type(data).__module__.startswith("polars")
|
||||
and data.__class__.__name__ == "DataFrame"
|
||||
):
|
||||
return data.to_arrow().to_reader()
|
||||
# for other iterables, assume they are of type Iterable[RecordBatch]
|
||||
elif isinstance(data, Iterable):
|
||||
if schema is not None:
|
||||
data = _casting_recordbatch_iter(data, schema)
|
||||
return pa.RecordBatchReader.from_batches(schema, data)
|
||||
else:
|
||||
raise ValueError(
|
||||
"Must provide schema to write dataset from RecordBatch iterable"
|
||||
)
|
||||
else:
|
||||
raise TypeError(
|
||||
f"Unknown data type {type(data)}. "
|
||||
"Please check "
|
||||
"https://lancedb.github.io/lance/read_and_write.html "
|
||||
"to see supported types."
|
||||
)
|
||||
|
||||
|
||||
def validate_schema(schema: pa.Schema):
|
||||
"""
|
||||
Make sure the metadata is valid utf8
|
||||
"""
|
||||
if schema.metadata is not None:
|
||||
_validate_metadata(schema.metadata)
|
||||
|
||||
|
||||
def _validate_metadata(metadata: dict):
|
||||
"""
|
||||
Make sure the metadata values are valid utf8 (can be nested)
|
||||
|
||||
Raises ValueError if not valid utf8
|
||||
"""
|
||||
for k, v in metadata.items():
|
||||
if isinstance(v, bytes):
|
||||
try:
|
||||
v.decode("utf8")
|
||||
except UnicodeDecodeError:
|
||||
raise ValueError(
|
||||
f"Metadata key {k} is not valid utf8. "
|
||||
"Consider base64 encode for generic binary metadata."
|
||||
)
|
||||
elif isinstance(v, dict):
|
||||
_validate_metadata(v)
|
||||
|
||||
@@ -13,16 +13,24 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import inspect
|
||||
import os
|
||||
from abc import abstractmethod
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Iterable, List, Optional, Union
|
||||
from typing import TYPE_CHECKING, Iterable, List, Literal, Optional, Union
|
||||
|
||||
import pyarrow as pa
|
||||
from overrides import EnforceOverrides, override
|
||||
from pyarrow import fs
|
||||
|
||||
from .table import LanceTable, Table
|
||||
from lancedb.common import data_to_reader, validate_schema
|
||||
from lancedb.embeddings.registry import EmbeddingFunctionRegistry
|
||||
from lancedb.utils.events import register_event
|
||||
|
||||
from ._lancedb import connect as lancedb_connect
|
||||
from .pydantic import LanceModel
|
||||
from .table import AsyncTable, LanceTable, Table, _sanitize_data
|
||||
from .util import fs_from_uri, get_uri_location, get_uri_scheme, join_uri
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -31,7 +39,6 @@ if TYPE_CHECKING:
|
||||
from ._lancedb import Connection as LanceDbConnection
|
||||
from .common import DATA, URI
|
||||
from .embeddings import EmbeddingFunctionConfig
|
||||
from .pydantic import LanceModel
|
||||
|
||||
|
||||
class DBConnection(EnforceOverrides):
|
||||
@@ -312,6 +319,10 @@ class LanceDBConnection(DBConnection):
|
||||
def uri(self) -> str:
|
||||
return self._uri
|
||||
|
||||
async def _async_get_table_names(self, start_after: Optional[str], limit: int):
|
||||
conn = AsyncConnection(await lancedb_connect(self.uri))
|
||||
return await conn.table_names(start_after=start_after, limit=limit)
|
||||
|
||||
@override
|
||||
def table_names(
|
||||
self, page_token: Optional[str] = None, limit: int = 10
|
||||
@@ -323,6 +334,10 @@ class LanceDBConnection(DBConnection):
|
||||
Iterator of str.
|
||||
A list of table names.
|
||||
"""
|
||||
try:
|
||||
asyncio.get_running_loop()
|
||||
# User application is async. Soon we will just tell them to use the
|
||||
# async version. Until then fallback to the old sync implementation.
|
||||
try:
|
||||
filesystem = fs_from_uri(self.uri)[0]
|
||||
except pa.ArrowInvalid:
|
||||
@@ -341,6 +356,10 @@ class LanceDBConnection(DBConnection):
|
||||
]
|
||||
tables.sort()
|
||||
return tables
|
||||
except RuntimeError:
|
||||
# User application is sync. It is safe to use the async implementation
|
||||
# under the hood.
|
||||
return asyncio.run(self._async_get_table_names(page_token, limit))
|
||||
|
||||
def __len__(self) -> int:
|
||||
return len(self.table_names())
|
||||
@@ -422,43 +441,95 @@ class LanceDBConnection(DBConnection):
|
||||
filesystem.delete_dir(path)
|
||||
|
||||
|
||||
class AsyncConnection(EnforceOverrides):
|
||||
"""An active LanceDB connection interface."""
|
||||
class AsyncConnection(object):
|
||||
"""An active LanceDB connection
|
||||
|
||||
To obtain a connection you can use the [connect] function.
|
||||
|
||||
This could be a native connection (using lance) or a remote connection (e.g. for
|
||||
connecting to LanceDb Cloud)
|
||||
|
||||
Local connections do not currently hold any open resources but they may do so in the
|
||||
future (for example, for shared cache or connections to catalog services) Remote
|
||||
connections represent an open connection to the remote server. The [close] method
|
||||
can be used to release any underlying resources eagerly. The connection can also
|
||||
be used as a context manager:
|
||||
|
||||
Connections can be shared on multiple threads and are expected to be long lived.
|
||||
Connections can also be used as a context manager, however, in many cases a single
|
||||
connection can be used for the lifetime of the application and so this is often
|
||||
not needed. Closing a connection is optional. If it is not closed then it will
|
||||
be automatically closed when the connection object is deleted.
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
>>> import asyncio
|
||||
>>> import lancedb
|
||||
>>> async def my_connect():
|
||||
... with await lancedb.connect("/tmp/my_dataset") as conn:
|
||||
... # do something with the connection
|
||||
... pass
|
||||
... # conn is closed here
|
||||
"""
|
||||
|
||||
def __init__(self, connection: LanceDbConnection):
|
||||
self._inner = connection
|
||||
|
||||
def __repr__(self):
|
||||
return self._inner.__repr__()
|
||||
|
||||
def __enter__(self):
|
||||
self
|
||||
|
||||
def __exit__(self, *_):
|
||||
self.close()
|
||||
|
||||
def is_open(self):
|
||||
"""Return True if the connection is open."""
|
||||
return self._inner.is_open()
|
||||
|
||||
def close(self):
|
||||
"""Close the connection, releasing any underlying resources.
|
||||
|
||||
It is safe to call this method multiple times.
|
||||
|
||||
Any attempt to use the connection after it is closed will result in an error."""
|
||||
self._inner.close()
|
||||
|
||||
@abstractmethod
|
||||
async def table_names(
|
||||
self, *, page_token: Optional[str] = None, limit: int = 10
|
||||
self, *, start_after: Optional[str] = None, limit: Optional[int] = None
|
||||
) -> Iterable[str]:
|
||||
"""List all tables in this database, in sorted order
|
||||
|
||||
Parameters
|
||||
----------
|
||||
page_token: str, optional
|
||||
The token to use for pagination. If not present, start from the beginning.
|
||||
Typically, this token is last table name from the previous page.
|
||||
Only supported by LanceDb Cloud.
|
||||
start_after: str, optional
|
||||
If present, only return names that come lexicographically after the supplied
|
||||
value.
|
||||
|
||||
This can be combined with limit to implement pagination by setting this to
|
||||
the last table name from the previous page.
|
||||
limit: int, default 10
|
||||
The size of the page to return.
|
||||
Only supported by LanceDb Cloud.
|
||||
The number of results to return.
|
||||
|
||||
Returns
|
||||
-------
|
||||
Iterable of str
|
||||
"""
|
||||
pass
|
||||
return await self._inner.table_names(start_after=start_after, limit=limit)
|
||||
|
||||
@abstractmethod
|
||||
async def create_table(
|
||||
self,
|
||||
name: str,
|
||||
data: Optional[DATA] = None,
|
||||
schema: Optional[Union[pa.Schema, LanceModel]] = None,
|
||||
mode: str = "create",
|
||||
exist_ok: bool = False,
|
||||
on_bad_vectors: str = "error",
|
||||
fill_value: float = 0.0,
|
||||
mode: Optional[Literal["create", "overwrite"]] = None,
|
||||
exist_ok: Optional[bool] = None,
|
||||
on_bad_vectors: Optional[str] = None,
|
||||
fill_value: Optional[float] = None,
|
||||
embedding_functions: Optional[List[EmbeddingFunctionConfig]] = None,
|
||||
) -> Table:
|
||||
) -> AsyncTable:
|
||||
"""Create a [Table][lancedb.table.Table] in the database.
|
||||
|
||||
Parameters
|
||||
@@ -480,7 +551,7 @@ class AsyncConnection(EnforceOverrides):
|
||||
- pyarrow.Schema
|
||||
|
||||
- [LanceModel][lancedb.pydantic.LanceModel]
|
||||
mode: str; default "create"
|
||||
mode: Literal["create", "overwrite"]; default "create"
|
||||
The mode to use when creating the table.
|
||||
Can be either "create" or "overwrite".
|
||||
By default, if the table already exists, an exception is raised.
|
||||
@@ -596,7 +667,74 @@ class AsyncConnection(EnforceOverrides):
|
||||
LanceTable(connection=..., name="table4")
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
if inspect.isclass(schema) and issubclass(schema, LanceModel):
|
||||
# convert LanceModel to pyarrow schema
|
||||
# note that it's possible this contains
|
||||
# embedding function metadata already
|
||||
schema = schema.to_arrow_schema()
|
||||
|
||||
metadata = None
|
||||
if embedding_functions is not None:
|
||||
# If we passed in embedding functions explicitly
|
||||
# then we'll override any schema metadata that
|
||||
# may was implicitly specified by the LanceModel schema
|
||||
registry = EmbeddingFunctionRegistry.get_instance()
|
||||
metadata = registry.get_table_metadata(embedding_functions)
|
||||
|
||||
# Defining defaults here and not in function prototype. In the future
|
||||
# these defaults will move into rust so better to keep them as None.
|
||||
if on_bad_vectors is None:
|
||||
on_bad_vectors = "error"
|
||||
|
||||
if fill_value is None:
|
||||
fill_value = 0.0
|
||||
|
||||
if data is not None:
|
||||
data = _sanitize_data(
|
||||
data,
|
||||
schema,
|
||||
metadata=metadata,
|
||||
on_bad_vectors=on_bad_vectors,
|
||||
fill_value=fill_value,
|
||||
)
|
||||
|
||||
if schema is None:
|
||||
if data is None:
|
||||
raise ValueError("Either data or schema must be provided")
|
||||
elif hasattr(data, "schema"):
|
||||
schema = data.schema
|
||||
elif isinstance(data, Iterable):
|
||||
if metadata:
|
||||
raise TypeError(
|
||||
(
|
||||
"Persistent embedding functions not yet "
|
||||
"supported for generator data input"
|
||||
)
|
||||
)
|
||||
|
||||
if metadata:
|
||||
schema = schema.with_metadata(metadata)
|
||||
validate_schema(schema)
|
||||
|
||||
if exist_ok is None:
|
||||
exist_ok = False
|
||||
if mode is None:
|
||||
mode = "create"
|
||||
if mode == "create" and exist_ok:
|
||||
mode = "exist_ok"
|
||||
|
||||
if data is None:
|
||||
new_table = await self._inner.create_empty_table(name, mode, schema)
|
||||
else:
|
||||
data = data_to_reader(data, schema)
|
||||
new_table = await self._inner.create_table(
|
||||
name,
|
||||
mode,
|
||||
data,
|
||||
)
|
||||
|
||||
register_event("create_table")
|
||||
return AsyncTable(new_table)
|
||||
|
||||
async def open_table(self, name: str) -> Table:
|
||||
"""Open a Lance Table in the database.
|
||||
@@ -610,7 +748,9 @@ class AsyncConnection(EnforceOverrides):
|
||||
-------
|
||||
A LanceTable object representing the table.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
table = await self._inner.open_table(name)
|
||||
register_event("open_table")
|
||||
return AsyncTable(table)
|
||||
|
||||
async def drop_table(self, name: str):
|
||||
"""Drop a table from the database.
|
||||
@@ -628,46 +768,3 @@ class AsyncConnection(EnforceOverrides):
|
||||
This is the same thing as dropping all the tables
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class AsyncLanceDBConnection(AsyncConnection):
|
||||
def __init__(self, connection: LanceDbConnection):
|
||||
self._inner = connection
|
||||
|
||||
async def __repr__(self) -> str:
|
||||
pass
|
||||
|
||||
@override
|
||||
async def table_names(
|
||||
self,
|
||||
*,
|
||||
page_token=None,
|
||||
limit=None,
|
||||
) -> Iterable[str]:
|
||||
return await self._inner.table_names()
|
||||
|
||||
@override
|
||||
async def create_table(
|
||||
self,
|
||||
name: str,
|
||||
data: Optional[DATA] = None,
|
||||
schema: Optional[Union[pa.Schema, LanceModel]] = None,
|
||||
mode: str = "create",
|
||||
exist_ok: bool = False,
|
||||
on_bad_vectors: str = "error",
|
||||
fill_value: float = 0.0,
|
||||
embedding_functions: Optional[List[EmbeddingFunctionConfig]] = None,
|
||||
) -> LanceTable:
|
||||
raise NotImplementedError
|
||||
|
||||
@override
|
||||
async def open_table(self, name: str) -> LanceTable:
|
||||
raise NotImplementedError
|
||||
|
||||
@override
|
||||
async def drop_table(self, name: str, ignore_missing: bool = False):
|
||||
raise NotImplementedError
|
||||
|
||||
@override
|
||||
async def drop_database(self):
|
||||
raise NotImplementedError
|
||||
|
||||
@@ -31,7 +31,7 @@ class ImageBindEmbeddings(EmbeddingFunction):
|
||||
six different modalities: images, text, audio, depth, thermal, and IMU data
|
||||
|
||||
to download package, run :
|
||||
`pip install imagebind@git+https://github.com/raghavdixit99/ImageBind`
|
||||
`pip install imagebind-packaged==0.1.2`
|
||||
"""
|
||||
|
||||
name: str = "imagebind_huge"
|
||||
|
||||
@@ -103,9 +103,9 @@ class InstructorEmbeddingFunction(TextEmbeddingFunction):
|
||||
# convert_to_numpy: bool = True # Hardcoding this as numpy can be ingested directly
|
||||
|
||||
source_instruction: str = "represent the document for retrieval"
|
||||
query_instruction: (
|
||||
str
|
||||
) = "represent the document for retrieving the most similar documents"
|
||||
query_instruction: str = (
|
||||
"represent the document for retrieving the most similar documents"
|
||||
)
|
||||
|
||||
@weak_lru(maxsize=1)
|
||||
def ndims(self):
|
||||
|
||||
@@ -10,16 +10,15 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import os
|
||||
from functools import cached_property
|
||||
from typing import List, Optional, Union
|
||||
|
||||
import numpy as np
|
||||
from typing import TYPE_CHECKING, List, Optional, Union
|
||||
|
||||
from ..util import attempt_import_or_raise
|
||||
from .base import TextEmbeddingFunction
|
||||
from .registry import register
|
||||
from .utils import api_key_not_found_help
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import numpy as np
|
||||
|
||||
|
||||
@register("openai")
|
||||
@@ -28,14 +27,46 @@ class OpenAIEmbeddings(TextEmbeddingFunction):
|
||||
An embedding function that uses the OpenAI API
|
||||
|
||||
https://platform.openai.com/docs/guides/embeddings
|
||||
|
||||
This can also be used for open source models that
|
||||
are compatible with the OpenAI API.
|
||||
|
||||
Notes
|
||||
-----
|
||||
If you're running an Ollama server locally,
|
||||
you can just override the `base_url` parameter
|
||||
and provide the Ollama embedding model you want
|
||||
to use (https://ollama.com/library):
|
||||
|
||||
```python
|
||||
from lancedb.embeddings import get_registry
|
||||
openai = get_registry().get("openai")
|
||||
embedding_function = openai.create(
|
||||
name="<ollama-embedding-model-name>",
|
||||
base_url="http://localhost:11434",
|
||||
)
|
||||
```
|
||||
|
||||
"""
|
||||
|
||||
name: str = "text-embedding-ada-002"
|
||||
dim: Optional[int] = None
|
||||
base_url: Optional[str] = None
|
||||
default_headers: Optional[dict] = None
|
||||
organization: Optional[str] = None
|
||||
api_key: Optional[str] = None
|
||||
|
||||
def ndims(self):
|
||||
return self._ndims
|
||||
|
||||
@staticmethod
|
||||
def model_names():
|
||||
return [
|
||||
"text-embedding-ada-002",
|
||||
"text-embedding-3-large",
|
||||
"text-embedding-3-small",
|
||||
]
|
||||
|
||||
@cached_property
|
||||
def _ndims(self):
|
||||
if self.name == "text-embedding-ada-002":
|
||||
@@ -48,8 +79,8 @@ class OpenAIEmbeddings(TextEmbeddingFunction):
|
||||
raise ValueError(f"Unknown model name {self.name}")
|
||||
|
||||
def generate_embeddings(
|
||||
self, texts: Union[List[str], np.ndarray]
|
||||
) -> List[np.array]:
|
||||
self, texts: Union[List[str], "np.ndarray"]
|
||||
) -> List["np.array"]:
|
||||
"""
|
||||
Get the embeddings for the given texts
|
||||
|
||||
@@ -62,15 +93,25 @@ class OpenAIEmbeddings(TextEmbeddingFunction):
|
||||
if self.name == "text-embedding-ada-002":
|
||||
rs = self._openai_client.embeddings.create(input=texts, model=self.name)
|
||||
else:
|
||||
rs = self._openai_client.embeddings.create(
|
||||
input=texts, model=self.name, dimensions=self.ndims()
|
||||
)
|
||||
kwargs = {
|
||||
"input": texts,
|
||||
"model": self.name,
|
||||
}
|
||||
if self.dim:
|
||||
kwargs["dimensions"] = self.dim
|
||||
rs = self._openai_client.embeddings.create(**kwargs)
|
||||
return [v.embedding for v in rs.data]
|
||||
|
||||
@cached_property
|
||||
def _openai_client(self):
|
||||
openai = attempt_import_or_raise("openai")
|
||||
|
||||
if not os.environ.get("OPENAI_API_KEY"):
|
||||
api_key_not_found_help("openai")
|
||||
return openai.OpenAI()
|
||||
kwargs = {}
|
||||
if self.base_url:
|
||||
kwargs["base_url"] = self.base_url
|
||||
if self.default_headers:
|
||||
kwargs["default_headers"] = self.default_headers
|
||||
if self.organization:
|
||||
kwargs["organization"] = self.organization
|
||||
if self.api_key:
|
||||
kwargs["api_key"] = self.api_key
|
||||
return openai.OpenAI(**kwargs)
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
"""Full text search index using tantivy-py"""
|
||||
|
||||
import os
|
||||
from typing import List, Tuple
|
||||
|
||||
@@ -21,7 +22,7 @@ try:
|
||||
import tantivy
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"Please install tantivy-py `pip install tantivy@git+https://github.com/quickwit-oss/tantivy-py#164adc87e1a033117001cf70e38c82a53014d985` to use the full text search feature." # noqa: E501
|
||||
"Please install tantivy-py `pip install tantivy` to use the full text search feature." # noqa: E501
|
||||
)
|
||||
|
||||
from .table import LanceTable
|
||||
|
||||
163
python/python/lancedb/index.py
Normal file
163
python/python/lancedb/index.py
Normal file
@@ -0,0 +1,163 @@
|
||||
from typing import Optional
|
||||
|
||||
from ._lancedb import (
|
||||
Index as LanceDbIndex,
|
||||
)
|
||||
from ._lancedb import (
|
||||
IndexConfig,
|
||||
)
|
||||
|
||||
|
||||
class BTree(object):
|
||||
"""Describes a btree index configuration
|
||||
|
||||
A btree index is an index on scalar columns. The index stores a copy of the
|
||||
column in sorted order. A header entry is created for each block of rows
|
||||
(currently the block size is fixed at 4096). These header entries are stored
|
||||
in a separate cacheable structure (a btree). To search for data the header is
|
||||
used to determine which blocks need to be read from disk.
|
||||
|
||||
For example, a btree index in a table with 1Bi rows requires
|
||||
sizeof(Scalar) * 256Ki bytes of memory and will generally need to read
|
||||
sizeof(Scalar) * 4096 bytes to find the correct row ids.
|
||||
|
||||
This index is good for scalar columns with mostly distinct values and does best
|
||||
when the query is highly selective.
|
||||
|
||||
The btree index does not currently have any parameters though parameters such as
|
||||
the block size may be added in the future.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self._inner = LanceDbIndex.btree()
|
||||
|
||||
|
||||
class IvfPq(object):
|
||||
"""Describes an IVF PQ Index
|
||||
|
||||
This index stores a compressed (quantized) copy of every vector. These vectors
|
||||
are grouped into partitions of similar vectors. Each partition keeps track of
|
||||
a centroid which is the average value of all vectors in the group.
|
||||
|
||||
During a query the centroids are compared with the query vector to find the
|
||||
closest partitions. The compressed vectors in these partitions are then
|
||||
searched to find the closest vectors.
|
||||
|
||||
The compression scheme is called product quantization. Each vector is divide
|
||||
into subvectors and then each subvector is quantized into a small number of
|
||||
bits. the parameters `num_bits` and `num_subvectors` control this process,
|
||||
providing a tradeoff between index size (and thus search speed) and index
|
||||
accuracy.
|
||||
|
||||
The partitioning process is called IVF and the `num_partitions` parameter
|
||||
controls how many groups to create.
|
||||
|
||||
Note that training an IVF PQ index on a large dataset is a slow operation and
|
||||
currently is also a memory intensive operation.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
distance_type: Optional[str] = None,
|
||||
num_partitions: Optional[int] = None,
|
||||
num_sub_vectors: Optional[int] = None,
|
||||
max_iterations: Optional[int] = None,
|
||||
sample_rate: Optional[int] = None,
|
||||
):
|
||||
"""
|
||||
Create an IVF PQ index config
|
||||
|
||||
Parameters
|
||||
----------
|
||||
distance_type: str, default "L2"
|
||||
The distance metric used to train the index
|
||||
|
||||
This is used when training the index to calculate the IVF partitions
|
||||
(vectors are grouped in partitions with similar vectors according to this
|
||||
distance type) and to calculate a subvector's code during quantization.
|
||||
|
||||
The distance type used to train an index MUST match the distance type used
|
||||
to search the index. Failure to do so will yield inaccurate results.
|
||||
|
||||
The following distance types are available:
|
||||
|
||||
"l2" - Euclidean distance. This is a very common distance metric that
|
||||
accounts for both magnitude and direction when determining the distance
|
||||
between vectors. L2 distance has a range of [0, ∞).
|
||||
|
||||
"cosine" - Cosine distance. Cosine distance is a distance metric
|
||||
calculated from the cosine similarity between two vectors. Cosine
|
||||
similarity is a measure of similarity between two non-zero vectors of an
|
||||
inner product space. It is defined to equal the cosine of the angle
|
||||
between them. Unlike L2, the cosine distance is not affected by the
|
||||
magnitude of the vectors. Cosine distance has a range of [0, 2].
|
||||
|
||||
Note: the cosine distance is undefined when one (or both) of the vectors
|
||||
are all zeros (there is no direction). These vectors are invalid and may
|
||||
never be returned from a vector search.
|
||||
|
||||
"dot" - Dot product. Dot distance is the dot product of two vectors. Dot
|
||||
distance has a range of (-∞, ∞). If the vectors are normalized (i.e. their
|
||||
L2 norm is 1), then dot distance is equivalent to the cosine distance.
|
||||
num_partitions: int, default sqrt(num_rows)
|
||||
The number of IVF partitions to create.
|
||||
|
||||
This value should generally scale with the number of rows in the dataset.
|
||||
By default the number of partitions is the square root of the number of
|
||||
rows.
|
||||
|
||||
If this value is too large then the first part of the search (picking the
|
||||
right partition) will be slow. If this value is too small then the second
|
||||
part of the search (searching within a partition) will be slow.
|
||||
num_sub_vectors: int, default is vector dimension / 16
|
||||
Number of sub-vectors of PQ.
|
||||
|
||||
This value controls how much the vector is compressed during the
|
||||
quantization step. The more sub vectors there are the less the vector is
|
||||
compressed. The default is the dimension of the vector divided by 16. If
|
||||
the dimension is not evenly divisible by 16 we use the dimension divded by
|
||||
8.
|
||||
|
||||
The above two cases are highly preferred. Having 8 or 16 values per
|
||||
subvector allows us to use efficient SIMD instructions.
|
||||
|
||||
If the dimension is not visible by 8 then we use 1 subvector. This is not
|
||||
ideal and will likely result in poor performance.
|
||||
max_iterations: int, default 50
|
||||
Max iteration to train kmeans.
|
||||
|
||||
When training an IVF PQ index we use kmeans to calculate the partitions.
|
||||
This parameter controls how many iterations of kmeans to run.
|
||||
|
||||
Increasing this might improve the quality of the index but in most cases
|
||||
these extra iterations have diminishing returns.
|
||||
|
||||
The default value is 50.
|
||||
sample_rate: int, default 256
|
||||
The rate used to calculate the number of training vectors for kmeans.
|
||||
|
||||
When an IVF PQ index is trained, we need to calculate partitions. These
|
||||
are groups of vectors that are similar to each other. To do this we use an
|
||||
algorithm called kmeans.
|
||||
|
||||
Running kmeans on a large dataset can be slow. To speed this up we run
|
||||
kmeans on a random sample of the data. This parameter controls the size of
|
||||
the sample. The total number of vectors used to train the index is
|
||||
`sample_rate * num_partitions`.
|
||||
|
||||
Increasing this value might improve the quality of the index but in most
|
||||
cases the default should be sufficient.
|
||||
|
||||
The default value is 256.
|
||||
"""
|
||||
self._inner = LanceDbIndex.ivf_pq(
|
||||
distance_type=distance_type,
|
||||
num_partitions=num_partitions,
|
||||
num_sub_vectors=num_sub_vectors,
|
||||
max_iterations=max_iterations,
|
||||
sample_rate=sample_rate,
|
||||
)
|
||||
|
||||
|
||||
__all__ = ["BTree", "IvfPq", "IndexConfig"]
|
||||
@@ -106,8 +106,8 @@ class Query(pydantic.BaseModel):
|
||||
|
||||
|
||||
class LanceQueryBuilder(ABC):
|
||||
"""Build LanceDB query based on specific query type:
|
||||
vector or full text search.
|
||||
"""An abstract query builder. Subclasses are defined for vector search,
|
||||
full text search, hybrid, and plain SQL filtering.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
@@ -118,6 +118,22 @@ class LanceQueryBuilder(ABC):
|
||||
query_type: str,
|
||||
vector_column_name: str,
|
||||
) -> LanceQueryBuilder:
|
||||
"""
|
||||
Create a query builder based on the given query and query type.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
table: Table
|
||||
The table to query.
|
||||
query: Optional[Union[np.ndarray, str, "PIL.Image.Image", Tuple]]
|
||||
The query to use. If None, an empty query builder is returned
|
||||
which performs simple SQL filtering.
|
||||
query_type: str
|
||||
The type of query to perform. One of "vector", "fts", "hybrid", or "auto".
|
||||
If "auto", the query type is inferred based on the query.
|
||||
vector_column_name: str
|
||||
The name of the vector column to use for vector search.
|
||||
"""
|
||||
if query is None:
|
||||
return LanceEmptyQueryBuilder(table)
|
||||
|
||||
@@ -336,10 +352,8 @@ class LanceQueryBuilder(ABC):
|
||||
LanceQueryBuilder
|
||||
The LanceQueryBuilder object.
|
||||
"""
|
||||
if isinstance(columns, list):
|
||||
if isinstance(columns, list) or isinstance(columns, dict):
|
||||
self._columns = columns
|
||||
elif isinstance(columns, dict):
|
||||
self._columns = list(columns.items())
|
||||
else:
|
||||
raise ValueError("columns must be a list or a dictionary")
|
||||
return self
|
||||
@@ -561,7 +575,7 @@ class LanceFtsQueryBuilder(LanceQueryBuilder):
|
||||
import tantivy
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"Please install tantivy-py `pip install tantivy@git+https://github.com/quickwit-oss/tantivy-py#164adc87e1a033117001cf70e38c82a53014d985` to use the full text search feature." # noqa: E501
|
||||
"Please install tantivy-py `pip install tantivy` to use the full text search feature." # noqa: E501
|
||||
)
|
||||
|
||||
from .fts import search_index
|
||||
@@ -589,19 +603,26 @@ class LanceFtsQueryBuilder(LanceQueryBuilder):
|
||||
scores = pa.array(scores)
|
||||
output_tbl = self._table.to_lance().take(row_ids, columns=self._columns)
|
||||
output_tbl = output_tbl.append_column("score", scores)
|
||||
# this needs to match vector search results which are uint64
|
||||
row_ids = pa.array(row_ids, type=pa.uint64())
|
||||
|
||||
if self._where is not None:
|
||||
tmp_name = "__lancedb__duckdb__indexer__"
|
||||
output_tbl = output_tbl.append_column(
|
||||
tmp_name, pa.array(range(len(output_tbl)))
|
||||
)
|
||||
try:
|
||||
# TODO would be great to have Substrait generate pyarrow compute
|
||||
# expressions or conversely have pyarrow support SQL expressions
|
||||
# using Substrait
|
||||
import duckdb
|
||||
|
||||
output_tbl = (
|
||||
duckdb.sql("SELECT * FROM output_tbl")
|
||||
.filter(self._where)
|
||||
.to_arrow_table()
|
||||
)
|
||||
indexer = duckdb.sql(
|
||||
f"SELECT {tmp_name} FROM output_tbl WHERE {self._where}"
|
||||
).to_arrow_table()[tmp_name]
|
||||
output_tbl = output_tbl.take(indexer).drop([tmp_name])
|
||||
row_ids = row_ids.take(indexer)
|
||||
|
||||
except ImportError:
|
||||
import tempfile
|
||||
|
||||
@@ -611,10 +632,11 @@ class LanceFtsQueryBuilder(LanceQueryBuilder):
|
||||
with tempfile.TemporaryDirectory() as tmp:
|
||||
ds = lance.write_dataset(output_tbl, tmp)
|
||||
output_tbl = ds.to_table(filter=self._where)
|
||||
indexer = output_tbl[tmp_name]
|
||||
row_ids = row_ids.take(indexer)
|
||||
output_tbl = output_tbl.drop([tmp_name])
|
||||
|
||||
if self._with_row_id:
|
||||
# Need to set this to uint explicitly as vector results are in uint64
|
||||
row_ids = pa.array(row_ids, type=pa.uint64())
|
||||
output_tbl = output_tbl.append_column("_rowid", row_ids)
|
||||
return output_tbl
|
||||
|
||||
@@ -630,6 +652,16 @@ class LanceEmptyQueryBuilder(LanceQueryBuilder):
|
||||
|
||||
|
||||
class LanceHybridQueryBuilder(LanceQueryBuilder):
|
||||
"""
|
||||
A query builder that performs hybrid vector and full text search.
|
||||
Results are combined and reranked based on the specified reranker.
|
||||
By default, the results are reranked using the LinearCombinationReranker.
|
||||
|
||||
To make the vector and fts results comparable, the scores are normalized.
|
||||
Instead of normalizing scores, the `normalize` parameter can be set to "rank"
|
||||
in the `rerank` method to convert the scores to ranks and then normalize them.
|
||||
"""
|
||||
|
||||
def __init__(self, table: "Table", query: str, vector_column: str):
|
||||
super().__init__(table)
|
||||
self._validate_fts_index()
|
||||
|
||||
@@ -58,6 +58,9 @@ class RestfulLanceDBClient:
|
||||
|
||||
closed: bool = attrs.field(default=False, init=False)
|
||||
|
||||
connection_timeout: float = attrs.field(default=120.0, kw_only=True)
|
||||
read_timeout: float = attrs.field(default=300.0, kw_only=True)
|
||||
|
||||
@functools.cached_property
|
||||
def session(self) -> requests.Session:
|
||||
sess = requests.Session()
|
||||
@@ -117,7 +120,7 @@ class RestfulLanceDBClient:
|
||||
urljoin(self.url, uri),
|
||||
params=params,
|
||||
headers=self.headers,
|
||||
timeout=(120.0, 300.0),
|
||||
timeout=(self.connection_timeout, self.read_timeout),
|
||||
) as resp:
|
||||
self._check_status(resp)
|
||||
return resp.json()
|
||||
@@ -159,7 +162,7 @@ class RestfulLanceDBClient:
|
||||
urljoin(self.url, uri),
|
||||
headers=headers,
|
||||
params=params,
|
||||
timeout=(120.0, 300.0),
|
||||
timeout=(self.connection_timeout, self.read_timeout),
|
||||
**req_kwargs,
|
||||
) as resp:
|
||||
self._check_status(resp)
|
||||
|
||||
@@ -41,6 +41,8 @@ class RemoteDBConnection(DBConnection):
|
||||
region: str,
|
||||
host_override: Optional[str] = None,
|
||||
request_thread_pool: Optional[ThreadPoolExecutor] = None,
|
||||
connection_timeout: float = 120.0,
|
||||
read_timeout: float = 300.0,
|
||||
):
|
||||
"""Connect to a remote LanceDB database."""
|
||||
parsed = urlparse(db_url)
|
||||
@@ -49,7 +51,12 @@ class RemoteDBConnection(DBConnection):
|
||||
self.db_name = parsed.netloc
|
||||
self.api_key = api_key
|
||||
self._client = RestfulLanceDBClient(
|
||||
self.db_name, region, api_key, host_override
|
||||
self.db_name,
|
||||
region,
|
||||
api_key,
|
||||
host_override,
|
||||
connection_timeout=connection_timeout,
|
||||
read_timeout=read_timeout,
|
||||
)
|
||||
self._request_thread_pool = request_thread_pool
|
||||
|
||||
|
||||
@@ -66,11 +66,41 @@ class RemoteTable(Table):
|
||||
"""to_pandas() is not yet supported on LanceDB cloud."""
|
||||
return NotImplementedError("to_pandas() is not yet supported on LanceDB cloud.")
|
||||
|
||||
def create_scalar_index(self, *args, **kwargs):
|
||||
"""Creates a scalar index"""
|
||||
return NotImplementedError(
|
||||
"create_scalar_index() is not yet supported on LanceDB cloud."
|
||||
def list_indices(self):
|
||||
"""List all the indices on the table"""
|
||||
resp = self._conn._client.post(f"/v1/table/{self._name}/index/list/")
|
||||
return resp
|
||||
|
||||
def index_stats(self, index_uuid: str):
|
||||
"""List all the indices on the table"""
|
||||
resp = self._conn._client.post(
|
||||
f"/v1/table/{self._name}/index/{index_uuid}/stats/"
|
||||
)
|
||||
return resp
|
||||
|
||||
def create_scalar_index(
|
||||
self,
|
||||
column: str,
|
||||
):
|
||||
"""Creates a scalar index
|
||||
Parameters
|
||||
----------
|
||||
column : str
|
||||
The column to be indexed. Must be a boolean, integer, float,
|
||||
or string column.
|
||||
"""
|
||||
index_type = "scalar"
|
||||
|
||||
data = {
|
||||
"column": column,
|
||||
"index_type": index_type,
|
||||
"replace": True,
|
||||
}
|
||||
resp = self._conn._client.post(
|
||||
f"/v1/table/{self._name}/create_scalar_index/", data=data
|
||||
)
|
||||
|
||||
return resp
|
||||
|
||||
def create_index(
|
||||
self,
|
||||
@@ -277,6 +307,7 @@ class RemoteTable(Table):
|
||||
f = Future()
|
||||
f.set_result(self._conn._client.query(name, q))
|
||||
return f
|
||||
|
||||
else:
|
||||
|
||||
def submit(name, q):
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
"""Schema related utilities."""
|
||||
|
||||
import pyarrow as pa
|
||||
|
||||
|
||||
|
||||
@@ -19,7 +19,17 @@ from abc import ABC, abstractmethod
|
||||
from dataclasses import dataclass
|
||||
from datetime import timedelta
|
||||
from functools import cached_property
|
||||
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple, Union
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Dict,
|
||||
Iterable,
|
||||
List,
|
||||
Literal,
|
||||
Optional,
|
||||
Tuple,
|
||||
Union,
|
||||
)
|
||||
|
||||
import lance
|
||||
import numpy as np
|
||||
@@ -48,7 +58,9 @@ if TYPE_CHECKING:
|
||||
import PIL
|
||||
from lance.dataset import CleanupStats, ReaderLike
|
||||
|
||||
from ._lancedb import Table as LanceDBTable
|
||||
from .db import LanceDBConnection
|
||||
from .index import BTree, IndexConfig, IvfPq
|
||||
|
||||
|
||||
pd = safe_import_pandas()
|
||||
@@ -106,7 +118,8 @@ def _append_vector_col(data: pa.Table, metadata: dict, schema: Optional[pa.Schem
|
||||
functions = EmbeddingFunctionRegistry.get_instance().parse_functions(metadata)
|
||||
for vector_column, conf in functions.items():
|
||||
func = conf.function
|
||||
if vector_column not in data.column_names:
|
||||
no_vector_column = vector_column not in data.column_names
|
||||
if no_vector_column or pc.all(pc.is_null(data[vector_column])).as_py():
|
||||
col_data = func.compute_source_embeddings_with_retry(
|
||||
data[conf.source_column]
|
||||
)
|
||||
@@ -114,9 +127,16 @@ def _append_vector_col(data: pa.Table, metadata: dict, schema: Optional[pa.Schem
|
||||
dtype = schema.field(vector_column).type
|
||||
else:
|
||||
dtype = pa.list_(pa.float32(), len(col_data[0]))
|
||||
if no_vector_column:
|
||||
data = data.append_column(
|
||||
pa.field(vector_column, type=dtype), pa.array(col_data, type=dtype)
|
||||
)
|
||||
else:
|
||||
data = data.set_column(
|
||||
data.column_names.index(vector_column),
|
||||
pa.field(vector_column, type=dtype),
|
||||
pa.array(col_data, type=dtype),
|
||||
)
|
||||
return data
|
||||
|
||||
|
||||
@@ -1780,3 +1800,634 @@ def _sanitize_nans(data, fill_value, on_bad_vectors, vec_arr, vector_column_name
|
||||
is_full = np.any(~is_value_nan.reshape(-1, vec_arr.type.list_size), axis=1)
|
||||
data = data.filter(is_full)
|
||||
return data
|
||||
|
||||
|
||||
class AsyncTable:
|
||||
"""
|
||||
An AsyncTable is a collection of Records in a LanceDB Database.
|
||||
|
||||
An AsyncTable can be obtained from the
|
||||
[AsyncConnection.create_table][lancedb.AsyncConnection.create_table] and
|
||||
[AsyncConnection.open_table][lancedb.AsyncConnection.open_table] methods.
|
||||
|
||||
An AsyncTable object is expected to be long lived and reused for multiple
|
||||
operations. AsyncTable objects will cache a certain amount of index data in memory.
|
||||
This cache will be freed when the Table is garbage collected. To eagerly free the
|
||||
cache you can call the [close][AsyncTable.close] method. Once the AsyncTable is
|
||||
closed, it cannot be used for any further operations.
|
||||
|
||||
An AsyncTable can also be used as a context manager, and will automatically close
|
||||
when the context is exited. Closing a table is optional. If you do not close the
|
||||
table, it will be closed when the AsyncTable object is garbage collected.
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
Create using [DBConnection.create_table][lancedb.DBConnection.create_table]
|
||||
(more examples in that method's documentation).
|
||||
|
||||
>>> import lancedb
|
||||
>>> db = lancedb.connect("./.lancedb")
|
||||
>>> table = db.create_table("my_table", data=[{"vector": [1.1, 1.2], "b": 2}])
|
||||
>>> table.head()
|
||||
pyarrow.Table
|
||||
vector: fixed_size_list<item: float>[2]
|
||||
child 0, item: float
|
||||
b: int64
|
||||
----
|
||||
vector: [[[1.1,1.2]]]
|
||||
b: [[2]]
|
||||
|
||||
Can append new data with [Table.add()][lancedb.table.Table.add].
|
||||
|
||||
>>> table.add([{"vector": [0.5, 1.3], "b": 4}])
|
||||
|
||||
Can query the table with [Table.search][lancedb.table.Table.search].
|
||||
|
||||
>>> table.search([0.4, 0.4]).select(["b", "vector"]).to_pandas()
|
||||
b vector _distance
|
||||
0 4 [0.5, 1.3] 0.82
|
||||
1 2 [1.1, 1.2] 1.13
|
||||
|
||||
Search queries are much faster when an index is created. See
|
||||
[Table.create_index][lancedb.table.Table.create_index].
|
||||
"""
|
||||
|
||||
def __init__(self, table: LanceDBTable):
|
||||
"""Create a new Table object.
|
||||
|
||||
You should not create Table objects directly.
|
||||
|
||||
Use [AsyncConnection.create_table][lancedb.AsyncConnection.create_table] and
|
||||
[AsyncConnection.open_table][lancedb.AsyncConnection.open_table] to obtain
|
||||
Table objects."""
|
||||
self._inner = table
|
||||
|
||||
def __repr__(self):
|
||||
return self._inner.__repr__()
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, *_):
|
||||
self.close()
|
||||
|
||||
def is_open(self) -> bool:
|
||||
"""Return True if the table is closed."""
|
||||
return self._inner.is_open()
|
||||
|
||||
def close(self):
|
||||
"""Close the table and free any resources associated with it.
|
||||
|
||||
It is safe to call this method multiple times.
|
||||
|
||||
Any attempt to use the table after it has been closed will raise an error."""
|
||||
return self._inner.close()
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
"""The name of the table."""
|
||||
return self._inner.name()
|
||||
|
||||
async def schema(self) -> pa.Schema:
|
||||
"""The [Arrow Schema](https://arrow.apache.org/docs/python/api/datatypes.html#)
|
||||
of this Table
|
||||
|
||||
"""
|
||||
return await self._inner.schema()
|
||||
|
||||
async def count_rows(self, filter: Optional[str] = None) -> int:
|
||||
"""
|
||||
Count the number of rows in the table.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
filter: str, optional
|
||||
A SQL where clause to filter the rows to count.
|
||||
"""
|
||||
return await self._inner.count_rows(filter)
|
||||
|
||||
async def to_pandas(self) -> "pd.DataFrame":
|
||||
"""Return the table as a pandas DataFrame.
|
||||
|
||||
Returns
|
||||
-------
|
||||
pd.DataFrame
|
||||
"""
|
||||
return self.to_arrow().to_pandas()
|
||||
|
||||
async def to_arrow(self) -> pa.Table:
|
||||
"""Return the table as a pyarrow Table.
|
||||
|
||||
Returns
|
||||
-------
|
||||
pa.Table
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
async def create_index(
|
||||
self,
|
||||
column: str,
|
||||
*,
|
||||
replace: Optional[bool] = None,
|
||||
config: Optional[Union[IvfPq, BTree]] = None,
|
||||
):
|
||||
"""Create an index to speed up queries
|
||||
|
||||
Indices can be created on vector columns or scalar columns.
|
||||
Indices on vector columns will speed up vector searches.
|
||||
Indices on scalar columns will speed up filtering (in both
|
||||
vector and non-vector searches)
|
||||
|
||||
Parameters
|
||||
----------
|
||||
index: Index
|
||||
The index to create.
|
||||
|
||||
LanceDb supports multiple types of indices. See the static methods on
|
||||
the Index class for more details.
|
||||
column: str, default None
|
||||
The column to index.
|
||||
|
||||
When building a scalar index this must be set.
|
||||
|
||||
When building a vector index, this is optional. The default will look
|
||||
for any columns of type fixed-size-list with floating point values. If
|
||||
there is only one column of this type then it will be used. Otherwise
|
||||
an error will be returned.
|
||||
replace: bool, default True
|
||||
Whether to replace the existing index
|
||||
|
||||
If this is false, and another index already exists on the same columns
|
||||
and the same name, then an error will be returned. This is true even if
|
||||
that index is out of date.
|
||||
|
||||
The default is True
|
||||
"""
|
||||
index = None
|
||||
if config is not None:
|
||||
index = config._inner
|
||||
await self._inner.create_index(column, index=index, replace=replace)
|
||||
|
||||
async def add(
|
||||
self,
|
||||
data: DATA,
|
||||
*,
|
||||
mode: Optional[Literal["append", "overwrite"]] = "append",
|
||||
on_bad_vectors: Optional[str] = None,
|
||||
fill_value: Optional[float] = None,
|
||||
):
|
||||
"""Add more data to the [Table](Table).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
data: DATA
|
||||
The data to insert into the table. Acceptable types are:
|
||||
|
||||
- dict or list-of-dict
|
||||
|
||||
- pandas.DataFrame
|
||||
|
||||
- pyarrow.Table or pyarrow.RecordBatch
|
||||
mode: str
|
||||
The mode to use when writing the data. Valid values are
|
||||
"append" and "overwrite".
|
||||
on_bad_vectors: str, default "error"
|
||||
What to do if any of the vectors are not the same size or contains NaNs.
|
||||
One of "error", "drop", "fill".
|
||||
fill_value: float, default 0.
|
||||
The value to use when filling vectors. Only used if on_bad_vectors="fill".
|
||||
|
||||
"""
|
||||
schema = await self.schema()
|
||||
if on_bad_vectors is None:
|
||||
on_bad_vectors = "error"
|
||||
if fill_value is None:
|
||||
fill_value = 0.0
|
||||
data = _sanitize_data(
|
||||
data,
|
||||
schema,
|
||||
metadata=schema.metadata,
|
||||
on_bad_vectors=on_bad_vectors,
|
||||
fill_value=fill_value,
|
||||
)
|
||||
if isinstance(data, pa.Table):
|
||||
data = pa.RecordBatchReader.from_batches(data.schema, data.to_batches())
|
||||
await self._inner.add(data, mode)
|
||||
register_event("add")
|
||||
|
||||
def merge_insert(self, on: Union[str, Iterable[str]]) -> LanceMergeInsertBuilder:
|
||||
"""
|
||||
Returns a [`LanceMergeInsertBuilder`][lancedb.merge.LanceMergeInsertBuilder]
|
||||
that can be used to create a "merge insert" operation
|
||||
|
||||
This operation can add rows, update rows, and remove rows all in a single
|
||||
transaction. It is a very generic tool that can be used to create
|
||||
behaviors like "insert if not exists", "update or insert (i.e. upsert)",
|
||||
or even replace a portion of existing data with new data (e.g. replace
|
||||
all data where month="january")
|
||||
|
||||
The merge insert operation works by combining new data from a
|
||||
**source table** with existing data in a **target table** by using a
|
||||
join. There are three categories of records.
|
||||
|
||||
"Matched" records are records that exist in both the source table and
|
||||
the target table. "Not matched" records exist only in the source table
|
||||
(e.g. these are new data) "Not matched by source" records exist only
|
||||
in the target table (this is old data)
|
||||
|
||||
The builder returned by this method can be used to customize what
|
||||
should happen for each category of data.
|
||||
|
||||
Please note that the data may appear to be reordered as part of this
|
||||
operation. This is because updated rows will be deleted from the
|
||||
dataset and then reinserted at the end with the new values.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
on: Union[str, Iterable[str]]
|
||||
A column (or columns) to join on. This is how records from the
|
||||
source table and target table are matched. Typically this is some
|
||||
kind of key or id column.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import lancedb
|
||||
>>> data = pa.table({"a": [2, 1, 3], "b": ["a", "b", "c"]})
|
||||
>>> db = lancedb.connect("./.lancedb")
|
||||
>>> table = db.create_table("my_table", data)
|
||||
>>> new_data = pa.table({"a": [2, 3, 4], "b": ["x", "y", "z"]})
|
||||
>>> # Perform a "upsert" operation
|
||||
>>> table.merge_insert("a") \\
|
||||
... .when_matched_update_all() \\
|
||||
... .when_not_matched_insert_all() \\
|
||||
... .execute(new_data)
|
||||
>>> # The order of new rows is non-deterministic since we use
|
||||
>>> # a hash-join as part of this operation and so we sort here
|
||||
>>> table.to_arrow().sort_by("a").to_pandas()
|
||||
a b
|
||||
0 1 b
|
||||
1 2 x
|
||||
2 3 y
|
||||
3 4 z
|
||||
"""
|
||||
on = [on] if isinstance(on, str) else list(on.iter())
|
||||
|
||||
return LanceMergeInsertBuilder(self, on)
|
||||
|
||||
async def search(
|
||||
self,
|
||||
query: Optional[Union[VEC, str, "PIL.Image.Image", Tuple]] = None,
|
||||
vector_column_name: Optional[str] = None,
|
||||
query_type: str = "auto",
|
||||
) -> LanceQueryBuilder:
|
||||
"""Create a search query to find the nearest neighbors
|
||||
of the given query vector. We currently support [vector search][search]
|
||||
and [full-text search][experimental-full-text-search].
|
||||
|
||||
All query options are defined in [Query][lancedb.query.Query].
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import lancedb
|
||||
>>> db = lancedb.connect("./.lancedb")
|
||||
>>> data = [
|
||||
... {"original_width": 100, "caption": "bar", "vector": [0.1, 2.3, 4.5]},
|
||||
... {"original_width": 2000, "caption": "foo", "vector": [0.5, 3.4, 1.3]},
|
||||
... {"original_width": 3000, "caption": "test", "vector": [0.3, 6.2, 2.6]}
|
||||
... ]
|
||||
>>> table = db.create_table("my_table", data)
|
||||
>>> query = [0.4, 1.4, 2.4]
|
||||
>>> (table.search(query)
|
||||
... .where("original_width > 1000", prefilter=True)
|
||||
... .select(["caption", "original_width", "vector"])
|
||||
... .limit(2)
|
||||
... .to_pandas())
|
||||
caption original_width vector _distance
|
||||
0 foo 2000 [0.5, 3.4, 1.3] 5.220000
|
||||
1 test 3000 [0.3, 6.2, 2.6] 23.089996
|
||||
|
||||
Parameters
|
||||
----------
|
||||
query: list/np.ndarray/str/PIL.Image.Image, default None
|
||||
The targetted vector to search for.
|
||||
|
||||
- *default None*.
|
||||
Acceptable types are: list, np.ndarray, PIL.Image.Image
|
||||
|
||||
- If None then the select/where/limit clauses are applied to filter
|
||||
the table
|
||||
vector_column_name: str, optional
|
||||
The name of the vector column to search.
|
||||
|
||||
The vector column needs to be a pyarrow fixed size list type
|
||||
|
||||
- If not specified then the vector column is inferred from
|
||||
the table schema
|
||||
|
||||
- If the table has multiple vector columns then the *vector_column_name*
|
||||
needs to be specified. Otherwise, an error is raised.
|
||||
query_type: str
|
||||
*default "auto"*.
|
||||
Acceptable types are: "vector", "fts", "hybrid", or "auto"
|
||||
|
||||
- If "auto" then the query type is inferred from the query;
|
||||
|
||||
- If `query` is a list/np.ndarray then the query type is
|
||||
"vector";
|
||||
|
||||
- If `query` is a PIL.Image.Image then either do vector search,
|
||||
or raise an error if no corresponding embedding function is found.
|
||||
|
||||
- If `query` is a string, then the query type is "vector" if the
|
||||
table has embedding functions else the query type is "fts"
|
||||
|
||||
Returns
|
||||
-------
|
||||
LanceQueryBuilder
|
||||
A query builder object representing the query.
|
||||
Once executed, the query returns
|
||||
|
||||
- selected columns
|
||||
|
||||
- the vector
|
||||
|
||||
- and also the "_distance" column which is the distance between the query
|
||||
vector and the returned vector.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
async def _execute_query(self, query: Query) -> pa.Table:
|
||||
pass
|
||||
|
||||
async def _do_merge(
|
||||
self,
|
||||
merge: LanceMergeInsertBuilder,
|
||||
new_data: DATA,
|
||||
on_bad_vectors: str,
|
||||
fill_value: float,
|
||||
):
|
||||
pass
|
||||
|
||||
async def delete(self, where: str):
|
||||
"""Delete rows from the table.
|
||||
|
||||
This can be used to delete a single row, many rows, all rows, or
|
||||
sometimes no rows (if your predicate matches nothing).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
where: str
|
||||
The SQL where clause to use when deleting rows.
|
||||
|
||||
- For example, 'x = 2' or 'x IN (1, 2, 3)'.
|
||||
|
||||
The filter must not be empty, or it will error.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import lancedb
|
||||
>>> data = [
|
||||
... {"x": 1, "vector": [1, 2]},
|
||||
... {"x": 2, "vector": [3, 4]},
|
||||
... {"x": 3, "vector": [5, 6]}
|
||||
... ]
|
||||
>>> db = lancedb.connect("./.lancedb")
|
||||
>>> table = db.create_table("my_table", data)
|
||||
>>> table.to_pandas()
|
||||
x vector
|
||||
0 1 [1.0, 2.0]
|
||||
1 2 [3.0, 4.0]
|
||||
2 3 [5.0, 6.0]
|
||||
>>> table.delete("x = 2")
|
||||
>>> table.to_pandas()
|
||||
x vector
|
||||
0 1 [1.0, 2.0]
|
||||
1 3 [5.0, 6.0]
|
||||
|
||||
If you have a list of values to delete, you can combine them into a
|
||||
stringified list and use the `IN` operator:
|
||||
|
||||
>>> to_remove = [1, 5]
|
||||
>>> to_remove = ", ".join([str(v) for v in to_remove])
|
||||
>>> to_remove
|
||||
'1, 5'
|
||||
>>> table.delete(f"x IN ({to_remove})")
|
||||
>>> table.to_pandas()
|
||||
x vector
|
||||
0 3 [5.0, 6.0]
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
async def update(
|
||||
self,
|
||||
updates: Optional[Dict[str, Any]] = None,
|
||||
*,
|
||||
where: Optional[str] = None,
|
||||
updates_sql: Optional[Dict[str, str]] = None,
|
||||
):
|
||||
"""
|
||||
This can be used to update zero to all rows in the table.
|
||||
|
||||
If a filter is provided with `where` then only rows matching the
|
||||
filter will be updated. Otherwise all rows will be updated.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
updates: dict, optional
|
||||
The updates to apply. The keys should be the name of the column to
|
||||
update. The values should be the new values to assign. This is
|
||||
required unless updates_sql is supplied.
|
||||
where: str, optional
|
||||
An SQL filter that controls which rows are updated. For example, 'x = 2'
|
||||
or 'x IN (1, 2, 3)'. Only rows that satisfy this filter will be udpated.
|
||||
updates_sql: dict, optional
|
||||
The updates to apply, expressed as SQL expression strings. The keys should
|
||||
be column names. The values should be SQL expressions. These can be SQL
|
||||
literals (e.g. "7" or "'foo'") or they can be expressions based on the
|
||||
previous value of the row (e.g. "x + 1" to increment the x column by 1)
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import asyncio
|
||||
>>> import lancedb
|
||||
>>> import pandas as pd
|
||||
>>> async def demo_update():
|
||||
... data = pd.DataFrame({"x": [1, 2], "vector": [[1, 2], [3, 4]]})
|
||||
... db = await lancedb.connect_async("./.lancedb")
|
||||
... table = await db.create_table("my_table", data)
|
||||
... # x is [1, 2], vector is [[1, 2], [3, 4]]
|
||||
... await table.update({"vector": [10, 10]}, where="x = 2")
|
||||
... # x is [1, 2], vector is [[1, 2], [10, 10]]
|
||||
... await table.update(updates_sql={"x": "x + 1"})
|
||||
... # x is [2, 3], vector is [[1, 2], [10, 10]]
|
||||
>>> asyncio.run(demo_update())
|
||||
"""
|
||||
if updates is not None and updates_sql is not None:
|
||||
raise ValueError("Only one of updates or updates_sql can be provided")
|
||||
if updates is None and updates_sql is None:
|
||||
raise ValueError("Either updates or updates_sql must be provided")
|
||||
|
||||
if updates is not None:
|
||||
updates_sql = {k: value_to_sql(v) for k, v in updates.items()}
|
||||
|
||||
return await self._inner.update(updates_sql, where)
|
||||
|
||||
async def cleanup_old_versions(
|
||||
self,
|
||||
older_than: Optional[timedelta] = None,
|
||||
*,
|
||||
delete_unverified: bool = False,
|
||||
) -> CleanupStats:
|
||||
"""
|
||||
Clean up old versions of the table, freeing disk space.
|
||||
|
||||
Note: This function is not available in LanceDb Cloud (since LanceDb
|
||||
Cloud manages cleanup for you automatically)
|
||||
|
||||
Parameters
|
||||
----------
|
||||
older_than: timedelta, default None
|
||||
The minimum age of the version to delete. If None, then this defaults
|
||||
to two weeks.
|
||||
delete_unverified: bool, default False
|
||||
Because they may be part of an in-progress transaction, files newer
|
||||
than 7 days old are not deleted by default. If you are sure that
|
||||
there are no in-progress transactions, then you can set this to True
|
||||
to delete all files older than `older_than`.
|
||||
|
||||
Returns
|
||||
-------
|
||||
CleanupStats
|
||||
The stats of the cleanup operation, including how many bytes were
|
||||
freed.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
async def compact_files(self, *args, **kwargs):
|
||||
"""
|
||||
Run the compaction process on the table.
|
||||
|
||||
Note: This function is not available in LanceDb Cloud (since LanceDb
|
||||
Cloud manages compaction for you automatically)
|
||||
|
||||
This can be run after making several small appends to optimize the table
|
||||
for faster reads.
|
||||
|
||||
Arguments are passed onto :meth:`lance.dataset.DatasetOptimizer.compact_files`.
|
||||
For most cases, the default should be fine.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
async def add_columns(self, transforms: Dict[str, str]):
|
||||
"""
|
||||
Add new columns with defined values.
|
||||
|
||||
This is not yet available in LanceDB Cloud.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
transforms: Dict[str, str]
|
||||
A map of column name to a SQL expression to use to calculate the
|
||||
value of the new column. These expressions will be evaluated for
|
||||
each row in the table, and can reference existing columns.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
async def alter_columns(self, alterations: Iterable[Dict[str, str]]):
|
||||
"""
|
||||
Alter column names and nullability.
|
||||
|
||||
This is not yet available in LanceDB Cloud.
|
||||
|
||||
alterations : Iterable[Dict[str, Any]]
|
||||
A sequence of dictionaries, each with the following keys:
|
||||
- "path": str
|
||||
The column path to alter. For a top-level column, this is the name.
|
||||
For a nested column, this is the dot-separated path, e.g. "a.b.c".
|
||||
- "name": str, optional
|
||||
The new name of the column. If not specified, the column name is
|
||||
not changed.
|
||||
- "nullable": bool, optional
|
||||
Whether the column should be nullable. If not specified, the column
|
||||
nullability is not changed. Only non-nullable columns can be changed
|
||||
to nullable. Currently, you cannot change a nullable column to
|
||||
non-nullable.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
async def drop_columns(self, columns: Iterable[str]):
|
||||
"""
|
||||
Drop columns from the table.
|
||||
|
||||
This is not yet available in LanceDB Cloud.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
columns : Iterable[str]
|
||||
The names of the columns to drop.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
async def version(self) -> int:
|
||||
"""
|
||||
Retrieve the version of the table
|
||||
|
||||
LanceDb supports versioning. Every operation that modifies the table increases
|
||||
version. As long as a version hasn't been deleted you can `[Self::checkout]`
|
||||
that version to view the data at that point. In addition, you can
|
||||
`[Self::restore]` the version to replace the current table with a previous
|
||||
version.
|
||||
"""
|
||||
return await self._inner.version()
|
||||
|
||||
async def checkout(self, version):
|
||||
"""
|
||||
Checks out a specific version of the Table
|
||||
|
||||
Any read operation on the table will now access the data at the checked out
|
||||
version. As a consequence, calling this method will disable any read consistency
|
||||
interval that was previously set.
|
||||
|
||||
This is a read-only operation that turns the table into a sort of "view"
|
||||
or "detached head". Other table instances will not be affected. To make the
|
||||
change permanent you can use the `[Self::restore]` method.
|
||||
|
||||
Any operation that modifies the table will fail while the table is in a checked
|
||||
out state.
|
||||
|
||||
To return the table to a normal state use `[Self::checkout_latest]`
|
||||
"""
|
||||
await self._inner.checkout(version)
|
||||
|
||||
async def checkout_latest(self):
|
||||
"""
|
||||
Ensures the table is pointing at the latest version
|
||||
|
||||
This can be used to manually update a table when the read_consistency_interval
|
||||
is None
|
||||
It can also be used to undo a `[Self::checkout]` operation
|
||||
"""
|
||||
await self._inner.checkout_latest()
|
||||
|
||||
async def restore(self):
|
||||
"""
|
||||
Restore the table to the currently checked out version
|
||||
|
||||
This operation will fail if checkout has not been called previously
|
||||
|
||||
This operation will overwrite the latest version of the table with a
|
||||
previous version. Any changes made since the checked out version will
|
||||
no longer be visible.
|
||||
|
||||
Once the operation concludes the table will no longer be in a checked
|
||||
out state and the read_consistency_interval, if any, will apply.
|
||||
"""
|
||||
await self._inner.restore()
|
||||
|
||||
async def list_indices(self) -> IndexConfig:
|
||||
"""
|
||||
List all indices that have been created with Self::create_index
|
||||
"""
|
||||
return await self._inner.list_indices()
|
||||
|
||||
@@ -26,6 +26,18 @@ import pyarrow as pa
|
||||
import pyarrow.fs as pa_fs
|
||||
|
||||
|
||||
def safe_import_adlfs():
|
||||
try:
|
||||
import adlfs
|
||||
|
||||
return adlfs
|
||||
except ImportError:
|
||||
return None
|
||||
|
||||
|
||||
adlfs = safe_import_adlfs()
|
||||
|
||||
|
||||
def get_uri_scheme(uri: str) -> str:
|
||||
"""
|
||||
Get the scheme of a URI. If the URI does not have a scheme, assume it is a file URI.
|
||||
@@ -92,6 +104,17 @@ def fs_from_uri(uri: str) -> Tuple[pa_fs.FileSystem, str]:
|
||||
path = get_uri_location(uri)
|
||||
return fs, path
|
||||
|
||||
elif get_uri_scheme(uri) == "az" and adlfs is not None:
|
||||
az_blob_fs = adlfs.AzureBlobFileSystem(
|
||||
account_name=os.environ.get("AZURE_STORAGE_ACCOUNT_NAME"),
|
||||
account_key=os.environ.get("AZURE_STORAGE_ACCOUNT_KEY"),
|
||||
)
|
||||
|
||||
fs = pa_fs.PyFileSystem(pa_fs.FSSpecHandler(az_blob_fs))
|
||||
|
||||
path = get_uri_location(uri)
|
||||
return fs, path
|
||||
|
||||
return pa_fs.FileSystem.from_uri(uri)
|
||||
|
||||
|
||||
|
||||
@@ -69,7 +69,7 @@ class _Events:
|
||||
self.throttled_event_names = ["search_table"]
|
||||
self.throttled_events = set()
|
||||
self.max_events = 5 # max events to store in memory
|
||||
self.rate_limit = 60.0 * 5 # rate limit (seconds)
|
||||
self.rate_limit = 60.0 * 60.0 # rate limit (seconds)
|
||||
self.time = 0.0
|
||||
|
||||
if is_git_dir():
|
||||
|
||||
@@ -11,6 +11,9 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import re
|
||||
from datetime import timedelta
|
||||
|
||||
import lancedb
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
@@ -182,6 +185,10 @@ async def test_table_names_async(tmp_path):
|
||||
db = await lancedb.connect_async(tmp_path)
|
||||
assert await db.table_names() == ["test1", "test2", "test3"]
|
||||
|
||||
assert await db.table_names(limit=1) == ["test1"]
|
||||
assert await db.table_names(start_after="test1", limit=1) == ["test2"]
|
||||
assert await db.table_names(start_after="test1") == ["test2", "test3"]
|
||||
|
||||
|
||||
def test_create_mode(tmp_path):
|
||||
db = lancedb.connect(tmp_path)
|
||||
@@ -250,6 +257,133 @@ def test_create_exist_ok(tmp_path):
|
||||
db.create_table("test", schema=bad_schema, exist_ok=True)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_connect(tmp_path):
|
||||
db = await lancedb.connect_async(tmp_path)
|
||||
assert str(db) == f"NativeDatabase(uri={tmp_path}, read_consistency_interval=None)"
|
||||
|
||||
db = await lancedb.connect_async(
|
||||
tmp_path, read_consistency_interval=timedelta(seconds=5)
|
||||
)
|
||||
assert str(db) == f"NativeDatabase(uri={tmp_path}, read_consistency_interval=5s)"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_close(tmp_path):
|
||||
db = await lancedb.connect_async(tmp_path)
|
||||
assert db.is_open()
|
||||
db.close()
|
||||
assert not db.is_open()
|
||||
|
||||
with pytest.raises(RuntimeError, match="is closed"):
|
||||
await db.table_names()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_create_mode_async(tmp_path):
|
||||
db = await lancedb.connect_async(tmp_path)
|
||||
data = pd.DataFrame(
|
||||
{
|
||||
"vector": [[3.1, 4.1], [5.9, 26.5]],
|
||||
"item": ["foo", "bar"],
|
||||
"price": [10.0, 20.0],
|
||||
}
|
||||
)
|
||||
await db.create_table("test", data=data)
|
||||
|
||||
with pytest.raises(RuntimeError):
|
||||
await db.create_table("test", data=data)
|
||||
|
||||
new_data = pd.DataFrame(
|
||||
{
|
||||
"vector": [[3.1, 4.1], [5.9, 26.5]],
|
||||
"item": ["fizz", "buzz"],
|
||||
"price": [10.0, 20.0],
|
||||
}
|
||||
)
|
||||
_tbl = await db.create_table("test", data=new_data, mode="overwrite")
|
||||
|
||||
# MIGRATION: to_pandas() is not available in async
|
||||
# assert tbl.to_pandas().item.tolist() == ["fizz", "buzz"]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_create_exist_ok_async(tmp_path):
|
||||
db = await lancedb.connect_async(tmp_path)
|
||||
data = pd.DataFrame(
|
||||
{
|
||||
"vector": [[3.1, 4.1], [5.9, 26.5]],
|
||||
"item": ["foo", "bar"],
|
||||
"price": [10.0, 20.0],
|
||||
}
|
||||
)
|
||||
tbl = await db.create_table("test", data=data)
|
||||
|
||||
with pytest.raises(RuntimeError):
|
||||
await db.create_table("test", data=data)
|
||||
|
||||
# open the table but don't add more rows
|
||||
tbl2 = await db.create_table("test", data=data, exist_ok=True)
|
||||
assert tbl.name == tbl2.name
|
||||
assert await tbl.schema() == await tbl2.schema()
|
||||
|
||||
schema = pa.schema(
|
||||
[
|
||||
pa.field("vector", pa.list_(pa.float32(), list_size=2)),
|
||||
pa.field("item", pa.utf8()),
|
||||
pa.field("price", pa.float64()),
|
||||
]
|
||||
)
|
||||
tbl3 = await db.create_table("test", schema=schema, exist_ok=True)
|
||||
assert await tbl3.schema() == schema
|
||||
|
||||
# Migration: When creating a table, but the table already exists, but
|
||||
# the schema is different, it should raise an error.
|
||||
# bad_schema = pa.schema(
|
||||
# [
|
||||
# pa.field("vector", pa.list_(pa.float32(), list_size=2)),
|
||||
# pa.field("item", pa.utf8()),
|
||||
# pa.field("price", pa.float64()),
|
||||
# pa.field("extra", pa.float32()),
|
||||
# ]
|
||||
# )
|
||||
# with pytest.raises(ValueError):
|
||||
# await db.create_table("test", schema=bad_schema, exist_ok=True)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_open_table(tmp_path):
|
||||
db = await lancedb.connect_async(tmp_path)
|
||||
data = pd.DataFrame(
|
||||
{
|
||||
"vector": [[3.1, 4.1], [5.9, 26.5]],
|
||||
"item": ["foo", "bar"],
|
||||
"price": [10.0, 20.0],
|
||||
}
|
||||
)
|
||||
await db.create_table("test", data=data)
|
||||
|
||||
tbl = await db.open_table("test")
|
||||
assert tbl.name == "test"
|
||||
assert (
|
||||
re.search(
|
||||
r"NativeTable\(test, uri=.*test\.lance, read_consistency_interval=None\)",
|
||||
str(tbl),
|
||||
)
|
||||
is not None
|
||||
)
|
||||
assert await tbl.schema() == pa.schema(
|
||||
{
|
||||
"vector": pa.list_(pa.float32(), list_size=2),
|
||||
"item": pa.utf8(),
|
||||
"price": pa.float64(),
|
||||
}
|
||||
)
|
||||
|
||||
with pytest.raises(ValueError, match="was not found"):
|
||||
await db.open_table("does_not_exist")
|
||||
|
||||
|
||||
def test_delete_table(tmp_path):
|
||||
db = lancedb.connect(tmp_path)
|
||||
data = pd.DataFrame(
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import sys
|
||||
from typing import List, Union
|
||||
|
||||
import lance
|
||||
import lancedb
|
||||
@@ -23,6 +24,8 @@ from lancedb.embeddings import (
|
||||
EmbeddingFunctionRegistry,
|
||||
with_embeddings,
|
||||
)
|
||||
from lancedb.embeddings.base import TextEmbeddingFunction
|
||||
from lancedb.embeddings.registry import get_registry, register
|
||||
from lancedb.pydantic import LanceModel, Vector
|
||||
|
||||
|
||||
@@ -112,3 +115,34 @@ def test_embedding_function_rate_limit(tmp_path):
|
||||
table.add([{"text": "hello world"}])
|
||||
table.add([{"text": "hello world"}])
|
||||
assert len(table) == 2
|
||||
|
||||
|
||||
def test_add_optional_vector(tmp_path):
|
||||
@register("mock-embedding")
|
||||
class MockEmbeddingFunction(TextEmbeddingFunction):
|
||||
def ndims(self):
|
||||
return 128
|
||||
|
||||
def generate_embeddings(
|
||||
self, texts: Union[List[str], np.ndarray]
|
||||
) -> List[np.array]:
|
||||
"""
|
||||
Generate the embeddings for the given texts
|
||||
"""
|
||||
return [np.random.randn(self.ndims()).tolist() for _ in range(len(texts))]
|
||||
|
||||
registry = get_registry()
|
||||
model = registry.get("mock-embedding").create()
|
||||
|
||||
class LanceSchema(LanceModel):
|
||||
id: str
|
||||
vector: Vector(model.ndims()) = model.VectorField(default=None)
|
||||
text: str = model.SourceField()
|
||||
|
||||
db = lancedb.connect(tmp_path)
|
||||
tbl = db.create_table("optional_vector", schema=LanceSchema)
|
||||
|
||||
# add works
|
||||
expected = LanceSchema(id="id", text="text")
|
||||
tbl.add([expected])
|
||||
assert not (np.abs(tbl.to_pandas()["vector"][0]) < 1e-6).all()
|
||||
|
||||
@@ -137,7 +137,11 @@ def test_search_index_with_filter(table):
|
||||
|
||||
# no duckdb
|
||||
with mock.patch("builtins.__import__", side_effect=import_mock):
|
||||
rs = table.search("puppy").where("id=1").limit(10).to_list()
|
||||
rs = table.search("puppy").where("id=1").limit(10)
|
||||
# test schema
|
||||
assert rs.to_arrow().drop("score").schema.equals(table.schema)
|
||||
|
||||
rs = rs.to_list()
|
||||
for r in rs:
|
||||
assert r["id"] == 1
|
||||
|
||||
@@ -147,6 +151,10 @@ def test_search_index_with_filter(table):
|
||||
assert r["id"] == 1
|
||||
|
||||
assert rs == rs2
|
||||
rs = table.search("puppy").where("id=1").with_row_id(True).limit(10).to_list()
|
||||
for r in rs:
|
||||
assert r["id"] == 1
|
||||
assert r["_rowid"] is not None
|
||||
|
||||
|
||||
def test_null_input(table):
|
||||
@@ -169,10 +177,18 @@ def test_syntax(table):
|
||||
table.create_fts_index("text")
|
||||
with pytest.raises(ValueError, match="Syntax Error"):
|
||||
table.search("they could have been dogs OR cats").limit(10).to_list()
|
||||
|
||||
# these should work
|
||||
|
||||
# terms queries
|
||||
table.search('"they could have been dogs" OR cats').limit(10).to_list()
|
||||
table.search("(they AND could) OR (have AND been AND dogs) OR cats").limit(
|
||||
10
|
||||
).to_list()
|
||||
|
||||
# phrase queries
|
||||
table.search("they could have been dogs OR cats").phrase_query().limit(10).to_list()
|
||||
# this should work
|
||||
table.search('"they could have been dogs OR cats"').limit(10).to_list()
|
||||
# this should work too
|
||||
table.search('''"the cats OR dogs were not really 'pets' at all"''').limit(
|
||||
10
|
||||
).to_list()
|
||||
|
||||
69
python/python/tests/test_index.py
Normal file
69
python/python/tests/test_index.py
Normal file
@@ -0,0 +1,69 @@
|
||||
from datetime import timedelta
|
||||
|
||||
import pyarrow as pa
|
||||
import pytest
|
||||
import pytest_asyncio
|
||||
from lancedb import AsyncConnection, AsyncTable, connect_async
|
||||
from lancedb.index import BTree, IvfPq
|
||||
|
||||
|
||||
@pytest_asyncio.fixture
|
||||
async def db_async(tmp_path) -> AsyncConnection:
|
||||
return await connect_async(tmp_path, read_consistency_interval=timedelta(seconds=0))
|
||||
|
||||
|
||||
def sample_fixed_size_list_array(nrows, dim):
|
||||
vector_data = pa.array([float(i) for i in range(dim * nrows)], pa.float32())
|
||||
return pa.FixedSizeListArray.from_arrays(vector_data, dim)
|
||||
|
||||
|
||||
DIM = 8
|
||||
NROWS = 256
|
||||
|
||||
|
||||
@pytest_asyncio.fixture
|
||||
async def some_table(db_async):
|
||||
data = pa.Table.from_pydict(
|
||||
{
|
||||
"id": list(range(256)),
|
||||
"vector": sample_fixed_size_list_array(NROWS, DIM),
|
||||
}
|
||||
)
|
||||
return await db_async.create_table(
|
||||
"some_table",
|
||||
data,
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_create_scalar_index(some_table: AsyncTable):
|
||||
# Can create
|
||||
await some_table.create_index("id")
|
||||
# Can recreate if replace=True
|
||||
await some_table.create_index("id", replace=True)
|
||||
indices = await some_table.list_indices()
|
||||
assert len(indices) == 1
|
||||
assert indices[0].index_type == "BTree"
|
||||
assert indices[0].columns == ["id"]
|
||||
# Can't recreate if replace=False
|
||||
with pytest.raises(RuntimeError, match="already exists"):
|
||||
await some_table.create_index("id", replace=False)
|
||||
# can also specify index type
|
||||
await some_table.create_index("id", config=BTree())
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_create_vector_index(some_table: AsyncTable):
|
||||
# Can create
|
||||
await some_table.create_index("vector")
|
||||
# Can recreate if replace=True
|
||||
await some_table.create_index("vector", replace=True)
|
||||
# Can't recreate if replace=False
|
||||
with pytest.raises(RuntimeError, match="already exists"):
|
||||
await some_table.create_index("vector", replace=False)
|
||||
# Can also specify index type
|
||||
await some_table.create_index("vector", config=IvfPq(num_partitions=100))
|
||||
indices = await some_table.list_indices()
|
||||
assert len(indices) == 1
|
||||
assert indices[0].index_type == "IvfPq"
|
||||
assert indices[0].columns == ["vector"]
|
||||
@@ -16,16 +16,35 @@ import os
|
||||
import lancedb
|
||||
import pytest
|
||||
|
||||
# AWS:
|
||||
# You need to setup AWS credentials an a base path to run this test. Example
|
||||
# AWS_PROFILE=default TEST_S3_BASE_URL=s3://my_bucket/dataset pytest tests/test_io.py
|
||||
#
|
||||
# Azure:
|
||||
# You need to setup Azure credentials an a base path to run this test. Example
|
||||
# export AZURE_STORAGE_ACCOUNT_NAME="<account>"
|
||||
# export AZURE_STORAGE_ACCOUNT_KEY="<key>"
|
||||
# export REMOTE_BASE_URL=az://my_blob/dataset
|
||||
# pytest tests/test_io.py
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True, scope="module")
|
||||
def setup():
|
||||
yield
|
||||
|
||||
if remote_url := os.environ.get("REMOTE_BASE_URL"):
|
||||
db = lancedb.connect(remote_url)
|
||||
|
||||
for table in db.table_names():
|
||||
db.drop_table(table)
|
||||
|
||||
|
||||
@pytest.mark.skipif(
|
||||
(os.environ.get("TEST_S3_BASE_URL") is None),
|
||||
reason="please setup s3 base url",
|
||||
(os.environ.get("REMOTE_BASE_URL") is None),
|
||||
reason="please setup remote base url",
|
||||
)
|
||||
def test_s3_io():
|
||||
db = lancedb.connect(os.environ.get("TEST_S3_BASE_URL"))
|
||||
def test_remote_io():
|
||||
db = lancedb.connect(os.environ.get("REMOTE_BASE_URL"))
|
||||
assert db.table_names() == []
|
||||
|
||||
table = db.create_table(
|
||||
|
||||
@@ -94,6 +94,17 @@ def test_query_builder(table):
|
||||
assert all(np.array(rs[0]["vector"]) == [1, 2])
|
||||
|
||||
|
||||
def test_dynamic_projection(table):
|
||||
rs = (
|
||||
LanceVectorQueryBuilder(table, [0, 0], "vector")
|
||||
.limit(1)
|
||||
.select({"id": "id", "id2": "id * 2"})
|
||||
.to_list()
|
||||
)
|
||||
assert rs[0]["id"] == 1
|
||||
assert rs[0]["id2"] == 2
|
||||
|
||||
|
||||
def test_query_builder_with_filter(table):
|
||||
rs = LanceVectorQueryBuilder(table, [0, 0], "vector").where("id = 2").to_list()
|
||||
assert rs[0]["id"] == 2
|
||||
|
||||
@@ -26,8 +26,9 @@ import pandas as pd
|
||||
import polars as pl
|
||||
import pyarrow as pa
|
||||
import pytest
|
||||
import pytest_asyncio
|
||||
from lancedb.conftest import MockTextEmbeddingFunction
|
||||
from lancedb.db import LanceDBConnection
|
||||
from lancedb.db import AsyncConnection, LanceDBConnection
|
||||
from lancedb.embeddings import EmbeddingFunctionConfig, EmbeddingFunctionRegistry
|
||||
from lancedb.pydantic import LanceModel, Vector
|
||||
from lancedb.table import LanceTable
|
||||
@@ -49,6 +50,13 @@ def db(tmp_path) -> MockDB:
|
||||
return MockDB(tmp_path)
|
||||
|
||||
|
||||
@pytest_asyncio.fixture
|
||||
async def db_async(tmp_path) -> AsyncConnection:
|
||||
return await lancedb.connect_async(
|
||||
tmp_path, read_consistency_interval=timedelta(seconds=0)
|
||||
)
|
||||
|
||||
|
||||
def test_basic(db):
|
||||
ds = LanceTable.create(
|
||||
db,
|
||||
@@ -65,6 +73,35 @@ def test_basic(db):
|
||||
assert table.to_lance().to_table() == ds.to_table()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_close(db_async: AsyncConnection):
|
||||
table = await db_async.create_table("some_table", data=[{"id": 0}])
|
||||
assert table.is_open()
|
||||
table.close()
|
||||
assert not table.is_open()
|
||||
|
||||
with pytest.raises(Exception, match="Table some_table is closed"):
|
||||
await table.count_rows()
|
||||
assert str(table) == "ClosedTable(some_table)"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_update_async(db_async: AsyncConnection):
|
||||
table = await db_async.create_table("some_table", data=[{"id": 0}])
|
||||
assert await table.count_rows("id == 0") == 1
|
||||
assert await table.count_rows("id == 7") == 0
|
||||
await table.update({"id": 7})
|
||||
assert await table.count_rows("id == 7") == 1
|
||||
assert await table.count_rows("id == 0") == 0
|
||||
await table.add([{"id": 2}])
|
||||
await table.update(where="id % 2 == 0", updates_sql={"id": "5"})
|
||||
assert await table.count_rows("id == 7") == 1
|
||||
assert await table.count_rows("id == 2") == 0
|
||||
assert await table.count_rows("id == 5") == 1
|
||||
await table.update({"id": 10}, where="id == 5")
|
||||
assert await table.count_rows("id == 10") == 1
|
||||
|
||||
|
||||
def test_create_table(db):
|
||||
schema = pa.schema(
|
||||
[
|
||||
@@ -186,6 +223,25 @@ def test_add_pydantic_model(db):
|
||||
assert len(really_flattened.columns) == 7
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_add_async(db_async: AsyncConnection):
|
||||
table = await db_async.create_table(
|
||||
"test",
|
||||
data=[
|
||||
{"vector": [3.1, 4.1], "item": "foo", "price": 10.0},
|
||||
{"vector": [5.9, 26.5], "item": "bar", "price": 20.0},
|
||||
],
|
||||
)
|
||||
assert await table.count_rows() == 2
|
||||
await table.add(
|
||||
data=[
|
||||
{"vector": [10.0, 11.0], "item": "baz", "price": 30.0},
|
||||
],
|
||||
)
|
||||
table = await db_async.open_table("test")
|
||||
assert await table.count_rows() == 3
|
||||
|
||||
|
||||
def test_polars(db):
|
||||
data = {
|
||||
"vector": [[3.1, 4.1], [5.9, 26.5]],
|
||||
@@ -854,8 +910,17 @@ def test_hybrid_search(db, tmp_path):
|
||||
result3 = table.search(
|
||||
"Our father who art in heaven", query_type="hybrid"
|
||||
).to_pydantic(MyTable)
|
||||
|
||||
assert result1 == result3
|
||||
|
||||
# with post filters
|
||||
result = (
|
||||
table.search("Arrrrggghhhhhhh", query_type="hybrid")
|
||||
.where("text='Arrrrggghhhhhhh'")
|
||||
.to_list()
|
||||
)
|
||||
len(result) == 1
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"consistency_interval", [None, timedelta(seconds=0), timedelta(seconds=0.1)]
|
||||
@@ -926,3 +991,37 @@ def test_drop_columns(tmp_path):
|
||||
table = LanceTable.create(db, "my_table", data=data)
|
||||
table.drop_columns(["category"])
|
||||
assert table.to_arrow().column_names == ["id"]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_time_travel(db_async: AsyncConnection):
|
||||
# Setup
|
||||
table = await db_async.create_table("some_table", data=[{"id": 0}])
|
||||
version = await table.version()
|
||||
await table.add([{"id": 1}])
|
||||
assert await table.count_rows() == 2
|
||||
# Make sure we can rewind
|
||||
await table.checkout(version)
|
||||
assert await table.count_rows() == 1
|
||||
# Can't add data in time travel mode
|
||||
with pytest.raises(
|
||||
ValueError,
|
||||
match="table cannot be modified when a specific version is checked out",
|
||||
):
|
||||
await table.add([{"id": 2}])
|
||||
# Can go back to normal mode
|
||||
await table.checkout_latest()
|
||||
assert await table.count_rows() == 2
|
||||
# Should be able to add data again
|
||||
await table.add([{"id": 3}])
|
||||
assert await table.count_rows() == 3
|
||||
# Now checkout and restore
|
||||
await table.checkout(version)
|
||||
await table.restore()
|
||||
assert await table.count_rows() == 1
|
||||
# Should be able to add data
|
||||
await table.add([{"id": 4}])
|
||||
assert await table.count_rows() == 2
|
||||
# Can't use restore if not checked out
|
||||
with pytest.raises(ValueError, match="checkout before running restore"):
|
||||
await table.restore()
|
||||
|
||||
@@ -12,25 +12,129 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::time::Duration;
|
||||
use std::{sync::Arc, time::Duration};
|
||||
|
||||
use lancedb::connection::Connection as LanceConnection;
|
||||
use pyo3::{pyclass, pyfunction, pymethods, PyAny, PyRef, PyResult, Python};
|
||||
use arrow::{datatypes::Schema, ffi_stream::ArrowArrayStreamReader, pyarrow::FromPyArrow};
|
||||
use lancedb::connection::{Connection as LanceConnection, CreateTableMode};
|
||||
use pyo3::{
|
||||
exceptions::{PyRuntimeError, PyValueError},
|
||||
pyclass, pyfunction, pymethods, PyAny, PyRef, PyResult, Python,
|
||||
};
|
||||
use pyo3_asyncio::tokio::future_into_py;
|
||||
|
||||
use crate::error::PythonErrorExt;
|
||||
use crate::{error::PythonErrorExt, table::Table};
|
||||
|
||||
#[pyclass]
|
||||
pub struct Connection {
|
||||
inner: LanceConnection,
|
||||
inner: Option<LanceConnection>,
|
||||
}
|
||||
|
||||
impl Connection {
|
||||
pub(crate) fn new(inner: LanceConnection) -> Self {
|
||||
Self { inner: Some(inner) }
|
||||
}
|
||||
|
||||
fn get_inner(&self) -> PyResult<&LanceConnection> {
|
||||
self.inner
|
||||
.as_ref()
|
||||
.ok_or_else(|| PyRuntimeError::new_err("Connection is closed"))
|
||||
}
|
||||
}
|
||||
|
||||
impl Connection {
|
||||
fn parse_create_mode_str(mode: &str) -> PyResult<CreateTableMode> {
|
||||
match mode {
|
||||
"create" => Ok(CreateTableMode::Create),
|
||||
"overwrite" => Ok(CreateTableMode::Overwrite),
|
||||
"exist_ok" => Ok(CreateTableMode::exist_ok(|builder| builder)),
|
||||
_ => Err(PyValueError::new_err(format!("Invalid mode {}", mode))),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[pymethods]
|
||||
impl Connection {
|
||||
pub fn table_names(self_: PyRef<'_, Self>) -> PyResult<&PyAny> {
|
||||
let inner = self_.inner.clone();
|
||||
fn __repr__(&self) -> String {
|
||||
match &self.inner {
|
||||
Some(inner) => inner.to_string(),
|
||||
None => "ClosedConnection".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
fn is_open(&self) -> bool {
|
||||
self.inner.is_some()
|
||||
}
|
||||
|
||||
fn close(&mut self) {
|
||||
self.inner.take();
|
||||
}
|
||||
|
||||
pub fn table_names(
|
||||
self_: PyRef<'_, Self>,
|
||||
start_after: Option<String>,
|
||||
limit: Option<u32>,
|
||||
) -> PyResult<&PyAny> {
|
||||
let inner = self_.get_inner()?.clone();
|
||||
let mut op = inner.table_names();
|
||||
if let Some(start_after) = start_after {
|
||||
op = op.start_after(start_after);
|
||||
}
|
||||
if let Some(limit) = limit {
|
||||
op = op.limit(limit);
|
||||
}
|
||||
future_into_py(self_.py(), async move { op.execute().await.infer_error() })
|
||||
}
|
||||
|
||||
pub fn create_table<'a>(
|
||||
self_: PyRef<'a, Self>,
|
||||
name: String,
|
||||
mode: &str,
|
||||
data: &PyAny,
|
||||
) -> PyResult<&'a PyAny> {
|
||||
let inner = self_.get_inner()?.clone();
|
||||
|
||||
let mode = Self::parse_create_mode_str(mode)?;
|
||||
|
||||
let batches = Box::new(ArrowArrayStreamReader::from_pyarrow(data)?);
|
||||
future_into_py(self_.py(), async move {
|
||||
inner.table_names().await.infer_error()
|
||||
let table = inner
|
||||
.create_table(name, batches)
|
||||
.mode(mode)
|
||||
.execute()
|
||||
.await
|
||||
.infer_error()?;
|
||||
Ok(Table::new(table))
|
||||
})
|
||||
}
|
||||
|
||||
pub fn create_empty_table<'a>(
|
||||
self_: PyRef<'a, Self>,
|
||||
name: String,
|
||||
mode: &str,
|
||||
schema: &PyAny,
|
||||
) -> PyResult<&'a PyAny> {
|
||||
let inner = self_.get_inner()?.clone();
|
||||
|
||||
let mode = Self::parse_create_mode_str(mode)?;
|
||||
|
||||
let schema = Schema::from_pyarrow(schema)?;
|
||||
|
||||
future_into_py(self_.py(), async move {
|
||||
let table = inner
|
||||
.create_empty_table(name, Arc::new(schema))
|
||||
.mode(mode)
|
||||
.execute()
|
||||
.await
|
||||
.infer_error()?;
|
||||
Ok(Table::new(table))
|
||||
})
|
||||
}
|
||||
|
||||
pub fn open_table(self_: PyRef<'_, Self>, name: String) -> PyResult<&PyAny> {
|
||||
let inner = self_.get_inner()?.clone();
|
||||
future_into_py(self_.py(), async move {
|
||||
let table = inner.open_table(&name).execute().await.infer_error()?;
|
||||
Ok(Table::new(table))
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -59,8 +163,6 @@ pub fn connect(
|
||||
let read_consistency_interval = Duration::from_secs_f64(read_consistency_interval);
|
||||
builder = builder.read_consistency_interval(read_consistency_interval);
|
||||
}
|
||||
Ok(Connection {
|
||||
inner: builder.execute().await.infer_error()?,
|
||||
})
|
||||
Ok(Connection::new(builder.execute().await.infer_error()?))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
use pyo3::{
|
||||
exceptions::{PyOSError, PyRuntimeError, PyValueError},
|
||||
exceptions::{PyIOError, PyNotImplementedError, PyOSError, PyRuntimeError, PyValueError},
|
||||
PyResult,
|
||||
};
|
||||
|
||||
@@ -35,14 +35,21 @@ impl<T> PythonErrorExt<T> for std::result::Result<T, LanceError> {
|
||||
match &self {
|
||||
Ok(_) => Ok(self.unwrap()),
|
||||
Err(err) => match err {
|
||||
LanceError::InvalidInput { .. } => self.value_error(),
|
||||
LanceError::InvalidTableName { .. } => self.value_error(),
|
||||
LanceError::TableNotFound { .. } => self.value_error(),
|
||||
LanceError::TableAlreadyExists { .. } => self.runtime_error(),
|
||||
LanceError::CreateDir { .. } => self.os_error(),
|
||||
LanceError::Store { .. } => self.runtime_error(),
|
||||
LanceError::Lance { .. } => self.runtime_error(),
|
||||
LanceError::Schema { .. } => self.value_error(),
|
||||
LanceError::CreateDir { .. } => self.os_error(),
|
||||
LanceError::TableAlreadyExists { .. } => self.runtime_error(),
|
||||
LanceError::ObjectStore { .. } => Err(PyIOError::new_err(err.to_string())),
|
||||
LanceError::Lance { .. } => self.runtime_error(),
|
||||
LanceError::Runtime { .. } => self.runtime_error(),
|
||||
LanceError::Http { .. } => self.runtime_error(),
|
||||
LanceError::Arrow { .. } => self.runtime_error(),
|
||||
LanceError::NotSupported { .. } => {
|
||||
Err(PyNotImplementedError::new_err(err.to_string()))
|
||||
}
|
||||
LanceError::Other { .. } => self.runtime_error(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
109
python/src/index.rs
Normal file
109
python/src/index.rs
Normal file
@@ -0,0 +1,109 @@
|
||||
// Copyright 2024 Lance Developers.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Mutex;
|
||||
|
||||
use lancedb::{
|
||||
index::{scalar::BTreeIndexBuilder, vector::IvfPqIndexBuilder, Index as LanceDbIndex},
|
||||
DistanceType,
|
||||
};
|
||||
use pyo3::{
|
||||
exceptions::{PyRuntimeError, PyValueError},
|
||||
pyclass, pymethods, PyResult,
|
||||
};
|
||||
|
||||
#[pyclass]
|
||||
pub struct Index {
|
||||
inner: Mutex<Option<LanceDbIndex>>,
|
||||
}
|
||||
|
||||
impl Index {
|
||||
pub fn consume(&self) -> PyResult<LanceDbIndex> {
|
||||
self.inner
|
||||
.lock()
|
||||
.unwrap()
|
||||
.take()
|
||||
.ok_or_else(|| PyRuntimeError::new_err("cannot use an Index more than once"))
|
||||
}
|
||||
}
|
||||
|
||||
#[pymethods]
|
||||
impl Index {
|
||||
#[staticmethod]
|
||||
pub fn ivf_pq(
|
||||
distance_type: Option<String>,
|
||||
num_partitions: Option<u32>,
|
||||
num_sub_vectors: Option<u32>,
|
||||
max_iterations: Option<u32>,
|
||||
sample_rate: Option<u32>,
|
||||
) -> PyResult<Self> {
|
||||
let mut ivf_pq_builder = IvfPqIndexBuilder::default();
|
||||
if let Some(distance_type) = distance_type {
|
||||
let distance_type = match distance_type.as_str() {
|
||||
"l2" => Ok(DistanceType::L2),
|
||||
"cosine" => Ok(DistanceType::Cosine),
|
||||
"dot" => Ok(DistanceType::Dot),
|
||||
_ => Err(PyValueError::new_err(format!(
|
||||
"Invalid distance type '{}'. Must be one of l2, cosine, or dot",
|
||||
distance_type
|
||||
))),
|
||||
}?;
|
||||
ivf_pq_builder = ivf_pq_builder.distance_type(distance_type);
|
||||
}
|
||||
if let Some(num_partitions) = num_partitions {
|
||||
ivf_pq_builder = ivf_pq_builder.num_partitions(num_partitions);
|
||||
}
|
||||
if let Some(num_sub_vectors) = num_sub_vectors {
|
||||
ivf_pq_builder = ivf_pq_builder.num_sub_vectors(num_sub_vectors);
|
||||
}
|
||||
if let Some(max_iterations) = max_iterations {
|
||||
ivf_pq_builder = ivf_pq_builder.max_iterations(max_iterations);
|
||||
}
|
||||
if let Some(sample_rate) = sample_rate {
|
||||
ivf_pq_builder = ivf_pq_builder.sample_rate(sample_rate);
|
||||
}
|
||||
Ok(Self {
|
||||
inner: Mutex::new(Some(LanceDbIndex::IvfPq(ivf_pq_builder))),
|
||||
})
|
||||
}
|
||||
|
||||
#[staticmethod]
|
||||
pub fn btree() -> PyResult<Self> {
|
||||
Ok(Self {
|
||||
inner: Mutex::new(Some(LanceDbIndex::BTree(BTreeIndexBuilder::default()))),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[pyclass(get_all)]
|
||||
/// A description of an index currently configured on a column
|
||||
pub struct IndexConfig {
|
||||
/// The type of the index
|
||||
pub index_type: String,
|
||||
/// The columns in the index
|
||||
///
|
||||
/// Currently this is always a list of size 1. In the future there may
|
||||
/// be more columns to represent composite indices.
|
||||
pub columns: Vec<String>,
|
||||
}
|
||||
|
||||
impl From<lancedb::index::IndexConfig> for IndexConfig {
|
||||
fn from(value: lancedb::index::IndexConfig) -> Self {
|
||||
let index_type = format!("{:?}", value.index_type);
|
||||
Self {
|
||||
index_type,
|
||||
columns: value.columns,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -14,10 +14,15 @@
|
||||
|
||||
use connection::{connect, Connection};
|
||||
use env_logger::Env;
|
||||
use index::{Index, IndexConfig};
|
||||
use pyo3::{pymodule, types::PyModule, wrap_pyfunction, PyResult, Python};
|
||||
use table::Table;
|
||||
|
||||
pub mod connection;
|
||||
pub(crate) mod error;
|
||||
pub mod error;
|
||||
pub mod index;
|
||||
pub mod table;
|
||||
pub mod util;
|
||||
|
||||
#[pymodule]
|
||||
pub fn _lancedb(_py: Python, m: &PyModule) -> PyResult<()> {
|
||||
@@ -26,6 +31,9 @@ pub fn _lancedb(_py: Python, m: &PyModule) -> PyResult<()> {
|
||||
.write_style("LANCEDB_LOG_STYLE");
|
||||
env_logger::init_from_env(env);
|
||||
m.add_class::<Connection>()?;
|
||||
m.add_class::<Table>()?;
|
||||
m.add_class::<Index>()?;
|
||||
m.add_class::<IndexConfig>()?;
|
||||
m.add_function(wrap_pyfunction!(connect, m)?)?;
|
||||
m.add("__version__", env!("CARGO_PKG_VERSION"))?;
|
||||
Ok(())
|
||||
|
||||
182
python/src/table.rs
Normal file
182
python/src/table.rs
Normal file
@@ -0,0 +1,182 @@
|
||||
use arrow::{
|
||||
ffi_stream::ArrowArrayStreamReader,
|
||||
pyarrow::{FromPyArrow, ToPyArrow},
|
||||
};
|
||||
use lancedb::table::{AddDataMode, Table as LanceDbTable};
|
||||
use pyo3::{
|
||||
exceptions::{PyRuntimeError, PyValueError},
|
||||
pyclass, pymethods,
|
||||
types::{PyDict, PyString},
|
||||
PyAny, PyRef, PyResult, Python,
|
||||
};
|
||||
use pyo3_asyncio::tokio::future_into_py;
|
||||
|
||||
use crate::{
|
||||
error::PythonErrorExt,
|
||||
index::{Index, IndexConfig},
|
||||
};
|
||||
|
||||
#[pyclass]
|
||||
pub struct Table {
|
||||
// We keep a copy of the name to use if the inner table is dropped
|
||||
name: String,
|
||||
inner: Option<LanceDbTable>,
|
||||
}
|
||||
|
||||
impl Table {
|
||||
pub(crate) fn new(inner: LanceDbTable) -> Self {
|
||||
Self {
|
||||
name: inner.name().to_string(),
|
||||
inner: Some(inner),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Table {
|
||||
fn inner_ref(&self) -> PyResult<&LanceDbTable> {
|
||||
self.inner
|
||||
.as_ref()
|
||||
.ok_or_else(|| PyRuntimeError::new_err(format!("Table {} is closed", self.name)))
|
||||
}
|
||||
}
|
||||
|
||||
#[pymethods]
|
||||
impl Table {
|
||||
pub fn name(&self) -> String {
|
||||
self.name.clone()
|
||||
}
|
||||
|
||||
pub fn is_open(&self) -> bool {
|
||||
self.inner.is_some()
|
||||
}
|
||||
|
||||
pub fn close(&mut self) {
|
||||
self.inner.take();
|
||||
}
|
||||
|
||||
pub fn schema(self_: PyRef<'_, Self>) -> PyResult<&PyAny> {
|
||||
let inner = self_.inner_ref()?.clone();
|
||||
future_into_py(self_.py(), async move {
|
||||
let schema = inner.schema().await.infer_error()?;
|
||||
Python::with_gil(|py| schema.to_pyarrow(py))
|
||||
})
|
||||
}
|
||||
|
||||
pub fn add<'a>(self_: PyRef<'a, Self>, data: &PyAny, mode: String) -> PyResult<&'a PyAny> {
|
||||
let batches = Box::new(ArrowArrayStreamReader::from_pyarrow(data)?);
|
||||
let mut op = self_.inner_ref()?.add(batches);
|
||||
if mode == "append" {
|
||||
op = op.mode(AddDataMode::Append);
|
||||
} else if mode == "overwrite" {
|
||||
op = op.mode(AddDataMode::Overwrite);
|
||||
} else {
|
||||
return Err(PyValueError::new_err(format!("Invalid mode: {}", mode)));
|
||||
}
|
||||
|
||||
future_into_py(self_.py(), async move {
|
||||
op.execute().await.infer_error()?;
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
pub fn update<'a>(
|
||||
self_: PyRef<'a, Self>,
|
||||
updates: &PyDict,
|
||||
r#where: Option<String>,
|
||||
) -> PyResult<&'a PyAny> {
|
||||
let mut op = self_.inner_ref()?.update();
|
||||
if let Some(only_if) = r#where {
|
||||
op = op.only_if(only_if);
|
||||
}
|
||||
for (column_name, value) in updates.into_iter() {
|
||||
let column_name: &PyString = column_name.downcast()?;
|
||||
let column_name = column_name.to_str()?.to_string();
|
||||
let value: &PyString = value.downcast()?;
|
||||
let value = value.to_str()?.to_string();
|
||||
op = op.column(column_name, value);
|
||||
}
|
||||
future_into_py(self_.py(), async move {
|
||||
op.execute().await.infer_error()?;
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
pub fn count_rows(self_: PyRef<'_, Self>, filter: Option<String>) -> PyResult<&PyAny> {
|
||||
let inner = self_.inner_ref()?.clone();
|
||||
future_into_py(self_.py(), async move {
|
||||
inner.count_rows(filter).await.infer_error()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn create_index<'a>(
|
||||
self_: PyRef<'a, Self>,
|
||||
column: String,
|
||||
index: Option<&Index>,
|
||||
replace: Option<bool>,
|
||||
) -> PyResult<&'a PyAny> {
|
||||
let index = if let Some(index) = index {
|
||||
index.consume()?
|
||||
} else {
|
||||
lancedb::index::Index::Auto
|
||||
};
|
||||
let mut op = self_.inner_ref()?.create_index(&[column], index);
|
||||
if let Some(replace) = replace {
|
||||
op = op.replace(replace);
|
||||
}
|
||||
|
||||
future_into_py(self_.py(), async move {
|
||||
op.execute().await.infer_error()?;
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
pub fn list_indices(self_: PyRef<'_, Self>) -> PyResult<&PyAny> {
|
||||
let inner = self_.inner_ref()?.clone();
|
||||
future_into_py(self_.py(), async move {
|
||||
Ok(inner
|
||||
.list_indices()
|
||||
.await
|
||||
.infer_error()?
|
||||
.into_iter()
|
||||
.map(IndexConfig::from)
|
||||
.collect::<Vec<_>>())
|
||||
})
|
||||
}
|
||||
|
||||
pub fn __repr__(&self) -> String {
|
||||
match &self.inner {
|
||||
None => format!("ClosedTable({})", self.name),
|
||||
Some(inner) => inner.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn version(self_: PyRef<'_, Self>) -> PyResult<&PyAny> {
|
||||
let inner = self_.inner_ref()?.clone();
|
||||
future_into_py(
|
||||
self_.py(),
|
||||
async move { inner.version().await.infer_error() },
|
||||
)
|
||||
}
|
||||
|
||||
pub fn checkout(self_: PyRef<'_, Self>, version: u64) -> PyResult<&PyAny> {
|
||||
let inner = self_.inner_ref()?.clone();
|
||||
future_into_py(self_.py(), async move {
|
||||
inner.checkout(version).await.infer_error()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn checkout_latest(self_: PyRef<'_, Self>) -> PyResult<&PyAny> {
|
||||
let inner = self_.inner_ref()?.clone();
|
||||
future_into_py(self_.py(), async move {
|
||||
inner.checkout_latest().await.infer_error()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn restore(self_: PyRef<'_, Self>) -> PyResult<&PyAny> {
|
||||
let inner = self_.inner_ref()?.clone();
|
||||
future_into_py(
|
||||
self_.py(),
|
||||
async move { inner.restore().await.infer_error() },
|
||||
)
|
||||
}
|
||||
}
|
||||
35
python/src/util.rs
Normal file
35
python/src/util.rs
Normal file
@@ -0,0 +1,35 @@
|
||||
use std::sync::Mutex;
|
||||
|
||||
use pyo3::{exceptions::PyRuntimeError, PyResult};
|
||||
|
||||
/// A wrapper around a rust builder
|
||||
///
|
||||
/// Rust builders are often implemented so that the builder methods
|
||||
/// consume the builder and return a new one. This is not compatible
|
||||
/// with the pyo3, which, being garbage collected, cannot easily obtain
|
||||
/// ownership of an object.
|
||||
///
|
||||
/// This wrapper converts the compile-time safety of rust into runtime
|
||||
/// errors if any attempt to use the builder happens after it is consumed.
|
||||
pub struct BuilderWrapper<T> {
|
||||
name: String,
|
||||
inner: Mutex<Option<T>>,
|
||||
}
|
||||
|
||||
impl<T> BuilderWrapper<T> {
|
||||
pub fn new(name: impl AsRef<str>, inner: T) -> Self {
|
||||
Self {
|
||||
name: name.as_ref().to_string(),
|
||||
inner: Mutex::new(Some(inner)),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn consume<O>(&self, mod_fn: impl FnOnce(T) -> O) -> PyResult<O> {
|
||||
let mut inner = self.inner.lock().unwrap();
|
||||
let inner_builder = inner.take().ok_or_else(|| {
|
||||
PyRuntimeError::new_err(format!("{} has already been consumed", self.name))
|
||||
})?;
|
||||
let result = mod_fn(inner_builder);
|
||||
Ok(result)
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "lancedb-node"
|
||||
version = "0.4.11"
|
||||
version = "0.4.13"
|
||||
description = "Serverless, low-latency vector database for AI applications"
|
||||
license.workspace = true
|
||||
edition.workspace = true
|
||||
@@ -8,6 +8,7 @@ repository.workspace = true
|
||||
keywords.workspace = true
|
||||
categories.workspace = true
|
||||
exclude = ["index.node"]
|
||||
rust-version = "1.75"
|
||||
|
||||
[lib]
|
||||
crate-type = ["cdylib"]
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use lancedb::index::{scalar::BTreeIndexBuilder, Index};
|
||||
use neon::{
|
||||
context::{Context, FunctionContext},
|
||||
result::JsResult,
|
||||
@@ -19,7 +20,6 @@ use neon::{
|
||||
};
|
||||
|
||||
use crate::{error::ResultExt, runtime, table::JsTable};
|
||||
use lancedb::Table;
|
||||
|
||||
pub fn table_create_scalar_index(mut cx: FunctionContext) -> JsResult<JsPromise> {
|
||||
let js_table = cx.this().downcast_or_throw::<JsBox<JsTable>, _>(&mut cx)?;
|
||||
@@ -34,11 +34,9 @@ pub fn table_create_scalar_index(mut cx: FunctionContext) -> JsResult<JsPromise>
|
||||
|
||||
rt.spawn(async move {
|
||||
let idx_result = table
|
||||
.as_native()
|
||||
.unwrap()
|
||||
.create_index(&[&column])
|
||||
.create_index(&[column], Index::BTree(BTreeIndexBuilder::default()))
|
||||
.replace(replace)
|
||||
.build()
|
||||
.execute()
|
||||
.await;
|
||||
|
||||
deferred.settle_with(&channel, move |mut cx| {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user