Compare commits

...

28 Commits

Author SHA1 Message Date
Lance Release
27404c8623 Bump version: 0.17.1-beta.7 → 0.17.1 2024-12-24 18:37:28 +00:00
Lance Release
f181c7e77f Bump version: 0.17.1-beta.6 → 0.17.1-beta.7 2024-12-24 18:37:27 +00:00
BubbleCal
e70fd4fecc feat: support IVF_FLAT, binary vectors and hamming distance (#1955)
binary vectors and hamming distance can work on only IVF_FLAT, so
introduce them all in this PR.

---------

Signed-off-by: BubbleCal <bubble-cal@outlook.com>
2024-12-24 10:36:20 -08:00
verma nakul
ac0068b80e feat(python): add ignore_missing to the async drop_table() method (#1953)
- feat(db): add `ignore_missing` to async `drop_table` method

Fixes #1951

---------

Co-authored-by: Will Jones <willjones127@gmail.com>
2024-12-24 10:33:47 -08:00
Hezi Zisman
ebac960571 feat(python): add bypass_vector_index to sync api (#1947)
Hi lancedb team,

This PR adds the `bypass_vector_index` logic to the sync API, as
described in [Issue
#535](https://github.com/lancedb/lancedb/issues/535). (Closes #535).

Iv'e implemented it only for the regular vector search. If you think it
should also be supported for FTS, Hybrid, or Empty queries and for the
cloud solution, please let me know, and I’ll be happy to extend it.

Since there’s no `CONTRIBUTING.md` or contribution guidelines, I opted
for the simplest implementation to get this started.

Looking forward to your feedback!

Thanks!

---------

Co-authored-by: Will Jones <willjones127@gmail.com>
2024-12-24 10:33:26 -08:00
Lance Release
59b57055e7 Updating package-lock.json 2024-12-19 19:40:28 +00:00
Lance Release
591c8de8fc Updating package-lock.json 2024-12-19 19:40:13 +00:00
Lance Release
f835ff310f Bump version: 0.14.1-beta.5 → 0.14.1-beta.6 2024-12-19 19:39:41 +00:00
Lance Release
cf8c2edaf4 Bump version: 0.17.1-beta.5 → 0.17.1-beta.6 2024-12-19 19:39:08 +00:00
Will Jones
61a714a459 docs: improve optimization docs (#1957)
* Add `See Also` section to `cleanup_old_files` and `compact_files` so
they know it's linked to `optimize`.
* Fixes link to `compact_files` arguments
* Improves formatting of note.
2024-12-19 10:55:11 -08:00
Will Jones
5ddd84cec0 feat: upgrade lance to 0.21.0-beta.5 (#1961) 2024-12-19 10:54:59 -08:00
Will Jones
27ef0bb0a2 ci(rust): check MSRV and upgrade toolchain (#1960)
* Upgrades our toolchain file to v1.83.0, since many dependencies now
have MSRV of 1.81.0
* Reverts Rust changes from #1946 that were working around this in a
dumb way
* Adding an MSRV check
* Reduce MSRV back to 1.78.0
2024-12-19 08:43:25 -08:00
Will Jones
25402ba6ec chore: update lockfiles (#1946) 2024-12-18 08:43:33 -08:00
Lance Release
37c359ed40 Updating package-lock.json 2024-12-13 22:38:04 +00:00
Lance Release
06cdf00987 Bump version: 0.14.1-beta.4 → 0.14.1-beta.5 2024-12-13 22:37:41 +00:00
Lance Release
144b7f5d54 Bump version: 0.17.1-beta.4 → 0.17.1-beta.5 2024-12-13 22:37:13 +00:00
LuQQiu
edc9b9adec chore: bump Lance version to v0.21.0-beta.4 (#1939) 2024-12-13 14:36:13 -08:00
Will Jones
d11b2a6975 ci: fix python beta release to publish to fury (#1937)
We have been publishing all releases--even preview ones--to PyPI. This
was because of a faulty bash if statement. This PR fixes that
conditional.
2024-12-13 14:19:14 -08:00
Will Jones
980aa70e2d feat(python): async-sync feature parity on Table (#1914)
### Changes to sync API
* Updated `LanceTable` and `LanceDBConnection` reprs
* Add `storage_options`, `data_storage_version`, and
`enable_v2_manifest_paths` to sync create table API.
* Add `storage_options` to `open_table` in sync API.
* Add `list_indices()` and `index_stats()` to sync API
* `create_table()` will now create only 1 version when data is passed.
Previously it would always create two versions: 1 to create an empty
table and 1 to add data to it.

### Changes to async API
* Add `embedding_functions` to async `create_table()` API.
* Added `head()` to async API

### Refactors
* Refactor index parameters into dataclasses so they are easier to use
from Python
* Moved most tests to use an in-memory DB so we don't need to create so
many temp directories

Closes #1792
Closes #1932

---------

Co-authored-by: Weston Pace <weston.pace@gmail.com>
2024-12-13 12:56:44 -08:00
Lance Release
d83e5a0208 Updating package-lock.json 2024-12-13 05:34:30 +00:00
Lance Release
16a6b9ce8f Bump version: 0.14.1-beta.3 → 0.14.1-beta.4 2024-12-13 05:34:01 +00:00
Lance Release
e3c6213333 Bump version: 0.17.1-beta.3 → 0.17.1-beta.4 2024-12-13 05:33:34 +00:00
Weston Pace
00552439d9 feat: upgrade lance to 0.21.0b3 (#1936) 2024-12-12 21:32:59 -08:00
QianZhu
c0ee370f83 docs: improve schema evolution api examples (#1929) 2024-12-12 10:52:06 -08:00
QianZhu
17e4022045 docs: add faq to cloud doc (#1907)
Co-authored-by: Will Jones <willjones127@gmail.com>
2024-12-12 10:07:03 -08:00
BubbleCal
c3ebac1a92 feat(node): support FTS options in nodejs (#1934)
Closes #1790

---------

Signed-off-by: BubbleCal <bubble-cal@outlook.com>
2024-12-12 08:19:04 -08:00
Lance Release
10f919a0a9 Updating package-lock.json 2024-12-11 19:18:36 +00:00
Lance Release
8af5476395 Bump version: 0.14.1-beta.2 → 0.14.1-beta.3 2024-12-11 19:18:17 +00:00
71 changed files with 2113 additions and 1447 deletions

View File

@@ -1,5 +1,5 @@
[tool.bumpversion] [tool.bumpversion]
current_version = "0.14.1-beta.2" current_version = "0.14.1-beta.6"
parse = """(?x) parse = """(?x)
(?P<major>0|[1-9]\\d*)\\. (?P<major>0|[1-9]\\d*)\\.
(?P<minor>0|[1-9]\\d*)\\. (?P<minor>0|[1-9]\\d*)\\.

View File

@@ -97,3 +97,7 @@ jobs:
if: ${{ !inputs.dry_run && inputs.other }} if: ${{ !inputs.dry_run && inputs.other }}
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
- uses: ./.github/workflows/update_package_lock_nodejs
if: ${{ !inputs.dry_run && inputs.other }}
with:
github_token: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -571,7 +571,7 @@ jobs:
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
ref: main ref: main
persist-credentials: false token: ${{ secrets.LANCEDB_RELEASE_TOKEN }}
fetch-depth: 0 fetch-depth: 0
lfs: true lfs: true
- uses: ./.github/workflows/update_package_lock - uses: ./.github/workflows/update_package_lock
@@ -589,7 +589,7 @@ jobs:
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
ref: main ref: main
persist-credentials: false token: ${{ secrets.LANCEDB_RELEASE_TOKEN }}
fetch-depth: 0 fetch-depth: 0
lfs: true lfs: true
- uses: ./.github/workflows/update_package_lock_nodejs - uses: ./.github/workflows/update_package_lock_nodejs

View File

@@ -185,7 +185,7 @@ jobs:
Add-Content $env:GITHUB_PATH "C:\BuildTools\VC\Tools\Llvm\x64\bin" Add-Content $env:GITHUB_PATH "C:\BuildTools\VC\Tools\Llvm\x64\bin"
# Add MSVC runtime libraries to LIB # Add MSVC runtime libraries to LIB
$env:LIB = "C:\BuildTools\VC\Tools\MSVC\$latestVersion\lib\arm64;" + $env:LIB = "C:\BuildTools\VC\Tools\MSVC\$latestVersion\lib\arm64;" +
"C:\Program Files (x86)\Windows Kits\10\Lib\10.0.22621.0\um\arm64;" + "C:\Program Files (x86)\Windows Kits\10\Lib\10.0.22621.0\um\arm64;" +
"C:\Program Files (x86)\Windows Kits\10\Lib\10.0.22621.0\ucrt\arm64" "C:\Program Files (x86)\Windows Kits\10\Lib\10.0.22621.0\ucrt\arm64"
Add-Content $env:GITHUB_ENV "LIB=$env:LIB" Add-Content $env:GITHUB_ENV "LIB=$env:LIB"
@@ -238,3 +238,41 @@ jobs:
$env:VCPKG_ROOT = $env:VCPKG_INSTALLATION_ROOT $env:VCPKG_ROOT = $env:VCPKG_INSTALLATION_ROOT
cargo build --target aarch64-pc-windows-msvc cargo build --target aarch64-pc-windows-msvc
cargo test --target aarch64-pc-windows-msvc cargo test --target aarch64-pc-windows-msvc
msrv:
# Check the minimum supported Rust version
name: MSRV Check - Rust v${{ matrix.msrv }}
runs-on: ubuntu-24.04
strategy:
matrix:
msrv: ["1.78.0"] # This should match up with rust-version in Cargo.toml
env:
# Need up-to-date compilers for kernels
CC: clang-18
CXX: clang++-18
steps:
- uses: actions/checkout@v4
with:
submodules: true
- name: Install dependencies
run: |
sudo apt update
sudo apt install -y protobuf-compiler libssl-dev
- name: Install ${{ matrix.msrv }}
uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ matrix.msrv }}
- name: Downgrade dependencies
# These packages have newer requirements for MSRV
run: |
cargo update -p aws-sdk-bedrockruntime --precise 1.64.0
cargo update -p aws-sdk-dynamodb --precise 1.55.0
cargo update -p aws-config --precise 1.5.10
cargo update -p aws-sdk-kms --precise 1.51.0
cargo update -p aws-sdk-s3 --precise 1.65.0
cargo update -p aws-sdk-sso --precise 1.50.0
cargo update -p aws-sdk-ssooidc --precise 1.51.0
cargo update -p aws-sdk-sts --precise 1.51.0
cargo update -p home --precise 0.5.9
- name: cargo +${{ matrix.msrv }} check
run: cargo check --workspace --tests --benches --all-features

View File

@@ -22,7 +22,7 @@ runs:
shell: bash shell: bash
id: choose_repo id: choose_repo
run: | run: |
if [ ${{ github.ref }} == "*beta*" ]; then if [[ ${{ github.ref }} == *beta* ]]; then
echo "repo=fury" >> $GITHUB_OUTPUT echo "repo=fury" >> $GITHUB_OUTPUT
else else
echo "repo=pypi" >> $GITHUB_OUTPUT echo "repo=pypi" >> $GITHUB_OUTPUT
@@ -33,7 +33,7 @@ runs:
FURY_TOKEN: ${{ inputs.fury_token }} FURY_TOKEN: ${{ inputs.fury_token }}
PYPI_TOKEN: ${{ inputs.pypi_token }} PYPI_TOKEN: ${{ inputs.pypi_token }}
run: | run: |
if [ ${{ steps.choose_repo.outputs.repo }} == "fury" ]; then if [[ ${{ steps.choose_repo.outputs.repo }} == fury ]]; then
WHEEL=$(ls target/wheels/lancedb-*.whl 2> /dev/null | head -n 1) WHEEL=$(ls target/wheels/lancedb-*.whl 2> /dev/null | head -n 1)
echo "Uploading $WHEEL to Fury" echo "Uploading $WHEEL to Fury"
curl -f -F package=@$WHEEL https://$FURY_TOKEN@push.fury.io/lancedb/ curl -f -F package=@$WHEEL https://$FURY_TOKEN@push.fury.io/lancedb/

View File

@@ -18,19 +18,19 @@ repository = "https://github.com/lancedb/lancedb"
description = "Serverless, low-latency vector database for AI applications" description = "Serverless, low-latency vector database for AI applications"
keywords = ["lancedb", "lance", "database", "vector", "search"] keywords = ["lancedb", "lance", "database", "vector", "search"]
categories = ["database-implementations"] categories = ["database-implementations"]
rust-version = "1.80.0" # TODO: lower this once we upgrade Lance again. rust-version = "1.78.0"
[workspace.dependencies] [workspace.dependencies]
lance = { "version" = "=0.21.0", "features" = [ lance = { "version" = "=0.21.0", "features" = [
"dynamodb", "dynamodb",
], git = "https://github.com/lancedb/lance.git", tag = "v0.21.0-beta.2" } ], git = "https://github.com/lancedb/lance.git", tag = "v0.21.0-beta.5" }
lance-io = { version = "=0.21.0", git = "https://github.com/lancedb/lance.git", tag = "v0.21.0-beta.2" } lance-io = { version = "=0.21.0", git = "https://github.com/lancedb/lance.git", tag = "v0.21.0-beta.5" }
lance-index = { version = "=0.21.0", git = "https://github.com/lancedb/lance.git", tag = "v0.21.0-beta.2" } lance-index = { version = "=0.21.0", git = "https://github.com/lancedb/lance.git", tag = "v0.21.0-beta.5" }
lance-linalg = { version = "=0.21.0", git = "https://github.com/lancedb/lance.git", tag = "v0.21.0-beta.2" } lance-linalg = { version = "=0.21.0", git = "https://github.com/lancedb/lance.git", tag = "v0.21.0-beta.5" }
lance-table = { version = "=0.21.0", git = "https://github.com/lancedb/lance.git", tag = "v0.21.0-beta.2" } lance-table = { version = "=0.21.0", git = "https://github.com/lancedb/lance.git", tag = "v0.21.0-beta.5" }
lance-testing = { version = "=0.21.0", git = "https://github.com/lancedb/lance.git", tag = "v0.21.0-beta.2" } lance-testing = { version = "=0.21.0", git = "https://github.com/lancedb/lance.git", tag = "v0.21.0-beta.5" }
lance-datafusion = { version = "=0.21.0", git = "https://github.com/lancedb/lance.git", tag = "v0.21.0-beta.2" } lance-datafusion = { version = "=0.21.0", git = "https://github.com/lancedb/lance.git", tag = "v0.21.0-beta.5" }
lance-encoding = { version = "=0.21.0", git = "https://github.com/lancedb/lance.git", tag = "v0.21.0-beta.2" } lance-encoding = { version = "=0.21.0", git = "https://github.com/lancedb/lance.git", tag = "v0.21.0-beta.5" }
# Note that this one does not include pyarrow # Note that this one does not include pyarrow
arrow = { version = "53.2", optional = false } arrow = { version = "53.2", optional = false }
arrow-array = "53.2" arrow-array = "53.2"

View File

@@ -62,6 +62,7 @@ plugins:
# for cross references # for cross references
- https://arrow.apache.org/docs/objects.inv - https://arrow.apache.org/docs/objects.inv
- https://pandas.pydata.org/docs/objects.inv - https://pandas.pydata.org/docs/objects.inv
- https://lancedb.github.io/lance/objects.inv
- mkdocs-jupyter - mkdocs-jupyter
- render_swagger: - render_swagger:
allow_arbitrary_locations: true allow_arbitrary_locations: true
@@ -231,6 +232,7 @@ nav:
- 🐍 Python: python/saas-python.md - 🐍 Python: python/saas-python.md
- 👾 JavaScript: javascript/modules.md - 👾 JavaScript: javascript/modules.md
- REST API: cloud/rest.md - REST API: cloud/rest.md
- FAQs: cloud/cloud_faq.md
- Quick start: basic.md - Quick start: basic.md
- Concepts: - Concepts:
@@ -357,6 +359,7 @@ nav:
- 🐍 Python: python/saas-python.md - 🐍 Python: python/saas-python.md
- 👾 JavaScript: javascript/modules.md - 👾 JavaScript: javascript/modules.md
- REST API: cloud/rest.md - REST API: cloud/rest.md
- FAQs: cloud/cloud_faq.md
extra_css: extra_css:
- styles/global.css - styles/global.css

View File

@@ -141,14 +141,6 @@ recommend switching to stable releases.
--8<-- "python/python/tests/docs/test_basic.py:connect_async" --8<-- "python/python/tests/docs/test_basic.py:connect_async"
``` ```
!!! note "Asynchronous Python API"
The asynchronous Python API is new and has some slight differences compared
to the synchronous API. Feel free to start using the asynchronous version.
Once all features have migrated we will start to move the synchronous API to
use the same syntax as the asynchronous API. To help with this migration we
have created a [migration guide](migration.md) detailing the differences.
=== "Typescript[^1]" === "Typescript[^1]"
=== "@lancedb/lancedb" === "@lancedb/lancedb"

View File

@@ -0,0 +1,34 @@
This section provides answers to the most common questions asked about LanceDB Cloud. By following these guidelines, you can ensure a smooth, performant experience with LanceDB Cloud.
### Should I reuse the database connection?
Yes! It is recommended to establish a single database connection and maintain it throughout your interaction with the tables within.
LanceDB uses HTTP connections to communicate with the servers. By re-using the Connection object, you avoid the overhead of repeatedly establishing HTTP connections, significantly improving efficiency.
### Should I re-use the `Table` object?
`table = db.open_table()` should be called once and used for all subsequent table operations. If there are changes to the opened table, `table` always reflect the **latest version** of the data.
### What should I do if I need to search for rows by `id`?
LanceDB Cloud currently does not support an ID or primary key column. You are recommended to add a
user-defined ID column. To significantly improve the query performance with SQL causes, a scalar BITMAP/BTREE index should be created on this column.
### What are the vector indexing types supported by LanceDB Cloud?
We support `IVF_PQ` and `IVF_HNSW_SQ` as the `index_type` which is passed to `create_index`. LanceDB Cloud tunes the indexing parameters automatically to achieve the best tradeoff between query latency and query quality.
### When I add new rows to a table, do I need to manually update the index?
No! LanceDB Cloud triggers an asynchronous background job to index the new vectors.
Even though indexing is asynchronous, your vectors will still be immediately searchable. LanceDB uses brute-force search to search over unindexed rows. This makes you new data is immediately available, but does increase latency temporarily. To disable the brute-force part of search, set the `fast_search` flag in your query to `true`.
### Do I need to reindex the whole dataset if only a small portion of the data is deleted or updated?
No! Similar to adding data to the table, LanceDB Cloud triggers an asynchronous background job to update the existing indices. Therefore, no action is needed from users and there is absolutely no
downtime expected.
### How do I know whether an index has been created?
While index creation in LanceDB Cloud is generally fast, querying immediately after a `create_index` call may result in errors. It's recommended to use `list_indices` to verify index creation before querying.
### Why is my query latency higher than expected?
Multiple factors can impact query latency. To reduce query latency, consider the following:
- Send pre-warm queries: send a few queries to warm up the cache before an actual user query.
- Check network latency: LanceDB Cloud is hosted in AWS `us-east-1` region. It is recommended to run queries from an EC2 instance that is in the same region.
- Create scalar indices: If you are filtering on metadata, it is recommended to create scalar indices on those columns. This will speedup searches with metadata filtering. See [here](../guides/scalar_index.md) for more details on creating a scalar index.

View File

@@ -804,12 +804,13 @@ a table:
You can add new columns to the table with the `add_columns` method. New columns You can add new columns to the table with the `add_columns` method. New columns
are filled with values based on a SQL expression. For example, you can add a new are filled with values based on a SQL expression. For example, you can add a new
column `y` to the table and fill it with the value of `x + 1`. column `y` to the table, fill it with the value of `x * 2` and set the expected
data type for it.
=== "Python" === "Python"
```python ```python
table.add_columns({"double_price": "price * 2"}) --8<-- "python/python/tests/docs/test_basic.py:add_columns"
``` ```
**API Reference:** [lancedb.table.Table.add_columns][] **API Reference:** [lancedb.table.Table.add_columns][]
@@ -849,8 +850,7 @@ rewriting the column, which can be a heavy operation.
```python ```python
import pyarrow as pa import pyarrow as pa
table.alter_column({"path": "double_price", "rename": "dbl_price", --8<-- "python/python/tests/docs/test_basic.py:alter_columns"
"data_type": pa.float32(), "nullable": False})
``` ```
**API Reference:** [lancedb.table.Table.alter_columns][] **API Reference:** [lancedb.table.Table.alter_columns][]
@@ -873,7 +873,7 @@ will remove the column from the schema.
=== "Python" === "Python"
```python ```python
table.drop_columns(["dbl_price"]) --8<-- "python/python/tests/docs/test_basic.py:drop_columns"
``` ```
**API Reference:** [lancedb.table.Table.drop_columns][] **API Reference:** [lancedb.table.Table.drop_columns][]

View File

@@ -1,81 +1,14 @@
# Rust-backed Client Migration Guide # Rust-backed Client Migration Guide
In an effort to ensure all clients have the same set of capabilities we have begun migrating the In an effort to ensure all clients have the same set of capabilities we have
python and node clients onto a common Rust base library. In python, this new client is part of migrated the Python and Node clients onto a common Rust base library. In Python,
the same lancedb package, exposed as an asynchronous client. Once the asynchronous client has both the synchronous and asynchronous clients are based on this implementation.
reached full functionality we will begin migrating the synchronous library to be a thin wrapper In Node, the new client is available as `@lancedb/lancedb`, which replaces
around the asynchronous client. the existing `vectordb` package.
This guide describes the differences between the two APIs and will hopefully assist users This guide describes the differences between the two Node APIs and will hopefully assist users
that would like to migrate to the new API. that would like to migrate to the new API.
## Python
### Closeable Connections
The Connection now has a `close` method. You can call this when
you are done with the connection to eagerly free resources. Currently
this is limited to freeing/closing the HTTP connection for remote
connections. In the future we may add caching or other resources to
native connections so this is probably a good practice even if you
aren't using remote connections.
In addition, the connection can be used as a context manager which may
be a more convenient way to ensure the connection is closed.
```python
import lancedb
async def my_async_fn():
with await lancedb.connect_async("my_uri") as db:
print(await db.table_names())
```
It is not mandatory to call the `close` method. If you do not call it
then the connection will be closed when the object is garbage collected.
### Closeable Table
The Table now also has a `close` method, similar to the connection. This
can be used to eagerly free the cache used by a Table object. Similar to
the connection, it can be used as a context manager and it is not mandatory
to call the `close` method.
#### Changes to Table APIs
- Previously `Table.schema` was a property. Now it is an async method.
- The method `Table.__len__` was removed and `len(table)` will no longer
work. Use `Table.count_rows` instead.
#### Creating Indices
The `Table.create_index` method is now used for creating both vector indices
and scalar indices. It currently requires a column name to be specified (the
column to index). Vector index defaults are now smarter and scale better with
the size of the data.
To specify index configuration details you will need to specify which kind of
index you are using.
#### Querying
The `Table.search` method has been renamed to `AsyncTable.vector_search` for
clarity.
### Features not yet supported
The following features are not yet supported by the asynchronous API. However,
we plan to support them soon.
- You cannot specify an embedding function when creating or opening a table.
You must calculate embeddings yourself if using the asynchronous API
- The merge insert operation is not supported in the asynchronous API
- Cleanup / compact / optimize indices are not supported in the asynchronous API
- add / alter columns is not supported in the asynchronous API
- The asynchronous API does not yet support any full text search or reranking
search
- Remote connections to LanceDb Cloud are not yet supported.
- The method Table.head is not yet supported.
## TypeScript/JavaScript ## TypeScript/JavaScript
For JS/TS users, we offer a brand new SDK [@lancedb/lancedb](https://www.npmjs.com/package/@lancedb/lancedb) For JS/TS users, we offer a brand new SDK [@lancedb/lancedb](https://www.npmjs.com/package/@lancedb/lancedb)

View File

@@ -47,6 +47,8 @@ is also an [asynchronous API client](#connections-asynchronous).
::: lancedb.embeddings.registry.EmbeddingFunctionRegistry ::: lancedb.embeddings.registry.EmbeddingFunctionRegistry
::: lancedb.embeddings.base.EmbeddingFunctionConfig
::: lancedb.embeddings.base.EmbeddingFunction ::: lancedb.embeddings.base.EmbeddingFunction
::: lancedb.embeddings.base.TextEmbeddingFunction ::: lancedb.embeddings.base.TextEmbeddingFunction
@@ -127,8 +129,12 @@ lists the indices that LanceDb supports.
::: lancedb.index.LabelList ::: lancedb.index.LabelList
::: lancedb.index.FTS
::: lancedb.index.IvfPq ::: lancedb.index.IvfPq
::: lancedb.index.IvfFlat
## Querying (Asynchronous) ## Querying (Asynchronous)
Queries allow you to return data from your database. Basic queries can be Queries allow you to return data from your database. Basic queries can be

View File

@@ -17,4 +17,8 @@ pip install lancedb
## Table ## Table
::: lancedb.remote.table.RemoteTable ::: lancedb.remote.table.RemoteTable
options:
filters:
- "!cleanup_old_versions"
- "!compact_files"
- "!optimize"

View File

@@ -13,11 +13,15 @@ A vector search finds the approximate or exact nearest neighbors to a given quer
Distance metrics are a measure of the similarity between a pair of vectors. Distance metrics are a measure of the similarity between a pair of vectors.
Currently, LanceDB supports the following metrics: Currently, LanceDB supports the following metrics:
| Metric | Description | | Metric | Description |
| -------- | --------------------------------------------------------------------------- | | --------- | --------------------------------------------------------------------------- |
| `l2` | [Euclidean / L2 distance](https://en.wikipedia.org/wiki/Euclidean_distance) | | `l2` | [Euclidean / L2 distance](https://en.wikipedia.org/wiki/Euclidean_distance) |
| `cosine` | [Cosine Similarity](https://en.wikipedia.org/wiki/Cosine_similarity) | | `cosine` | [Cosine Similarity](https://en.wikipedia.org/wiki/Cosine_similarity) |
| `dot` | [Dot Production](https://en.wikipedia.org/wiki/Dot_product) | | `dot` | [Dot Production](https://en.wikipedia.org/wiki/Dot_product) |
| `hamming` | [Hamming Distance](https://en.wikipedia.org/wiki/Hamming_distance) |
!!! note
The `hamming` metric is only available for binary vectors.
## Exhaustive search (kNN) ## Exhaustive search (kNN)
@@ -107,6 +111,31 @@ an ANN search means that using an index often involves a trade-off between recal
See the [IVF_PQ index](./concepts/index_ivfpq.md) for a deeper description of how `IVF_PQ` See the [IVF_PQ index](./concepts/index_ivfpq.md) for a deeper description of how `IVF_PQ`
indexes work in LanceDB. indexes work in LanceDB.
## Binary vector
LanceDB supports binary vectors as a data type, and has the ability to search binary vectors with hamming distance. The binary vectors are stored as uint8 arrays (every 8 bits are stored as a byte):
!!! note
The dim of the binary vector must be a multiple of 8. A vector of dim 128 will be stored as a uint8 array of size 16.
=== "Python"
=== "sync API"
```python
--8<-- "python/python/tests/docs/test_binary_vector.py:imports"
--8<-- "python/python/tests/docs/test_binary_vector.py:sync_binary_vector"
```
=== "async API"
```python
--8<-- "python/python/tests/docs/test_binary_vector.py:imports"
--8<-- "python/python/tests/docs/test_binary_vector.py:async_binary_vector"
```
## Output search results ## Output search results
LanceDB returns vector search results via different formats commonly used in python. LanceDB returns vector search results via different formats commonly used in python.

View File

@@ -16,6 +16,7 @@ excluded_globs = [
"../src/concepts/*.md", "../src/concepts/*.md",
"../src/ann_indexes.md", "../src/ann_indexes.md",
"../src/basic.md", "../src/basic.md",
"../src/search.md",
"../src/hybrid_search/hybrid_search.md", "../src/hybrid_search/hybrid_search.md",
"../src/reranking/*.md", "../src/reranking/*.md",
"../src/guides/tuning_retrievers/*.md", "../src/guides/tuning_retrievers/*.md",

View File

@@ -8,7 +8,7 @@
<parent> <parent>
<groupId>com.lancedb</groupId> <groupId>com.lancedb</groupId>
<artifactId>lancedb-parent</artifactId> <artifactId>lancedb-parent</artifactId>
<version>0.14.1-beta.2</version> <version>0.14.1-beta.6</version>
<relativePath>../pom.xml</relativePath> <relativePath>../pom.xml</relativePath>
</parent> </parent>

View File

@@ -6,7 +6,7 @@
<groupId>com.lancedb</groupId> <groupId>com.lancedb</groupId>
<artifactId>lancedb-parent</artifactId> <artifactId>lancedb-parent</artifactId>
<version>0.14.1-beta.2</version> <version>0.14.1-beta.6</version>
<packaging>pom</packaging> <packaging>pom</packaging>
<name>LanceDB Parent</name> <name>LanceDB Parent</name>

20
node/package-lock.json generated
View File

@@ -1,12 +1,12 @@
{ {
"name": "vectordb", "name": "vectordb",
"version": "0.14.1-beta.2", "version": "0.14.1-beta.6",
"lockfileVersion": 3, "lockfileVersion": 3,
"requires": true, "requires": true,
"packages": { "packages": {
"": { "": {
"name": "vectordb", "name": "vectordb",
"version": "0.14.1-beta.2", "version": "0.14.1-beta.6",
"cpu": [ "cpu": [
"x64", "x64",
"arm64" "arm64"
@@ -52,14 +52,14 @@
"uuid": "^9.0.0" "uuid": "^9.0.0"
}, },
"optionalDependencies": { "optionalDependencies": {
"@lancedb/vectordb-darwin-arm64": "0.14.1-beta.2", "@lancedb/vectordb-darwin-arm64": "0.14.1-beta.6",
"@lancedb/vectordb-darwin-x64": "0.14.1-beta.2", "@lancedb/vectordb-darwin-x64": "0.14.1-beta.6",
"@lancedb/vectordb-linux-arm64-gnu": "0.14.1-beta.2", "@lancedb/vectordb-linux-arm64-gnu": "0.14.1-beta.6",
"@lancedb/vectordb-linux-arm64-musl": "0.14.1-beta.2", "@lancedb/vectordb-linux-arm64-musl": "0.14.1-beta.6",
"@lancedb/vectordb-linux-x64-gnu": "0.14.1-beta.2", "@lancedb/vectordb-linux-x64-gnu": "0.14.1-beta.6",
"@lancedb/vectordb-linux-x64-musl": "0.14.1-beta.2", "@lancedb/vectordb-linux-x64-musl": "0.14.1-beta.6",
"@lancedb/vectordb-win32-arm64-msvc": "0.14.1-beta.2", "@lancedb/vectordb-win32-arm64-msvc": "0.14.1-beta.6",
"@lancedb/vectordb-win32-x64-msvc": "0.14.1-beta.2" "@lancedb/vectordb-win32-x64-msvc": "0.14.1-beta.6"
}, },
"peerDependencies": { "peerDependencies": {
"@apache-arrow/ts": "^14.0.2", "@apache-arrow/ts": "^14.0.2",

View File

@@ -1,6 +1,6 @@
{ {
"name": "vectordb", "name": "vectordb",
"version": "0.14.1-beta.2", "version": "0.14.1-beta.6",
"description": " Serverless, low-latency vector database for AI applications", "description": " Serverless, low-latency vector database for AI applications",
"private": false, "private": false,
"main": "dist/index.js", "main": "dist/index.js",
@@ -92,13 +92,13 @@
} }
}, },
"optionalDependencies": { "optionalDependencies": {
"@lancedb/vectordb-darwin-x64": "0.14.1-beta.2", "@lancedb/vectordb-darwin-x64": "0.14.1-beta.6",
"@lancedb/vectordb-darwin-arm64": "0.14.1-beta.2", "@lancedb/vectordb-darwin-arm64": "0.14.1-beta.6",
"@lancedb/vectordb-linux-x64-gnu": "0.14.1-beta.2", "@lancedb/vectordb-linux-x64-gnu": "0.14.1-beta.6",
"@lancedb/vectordb-linux-arm64-gnu": "0.14.1-beta.2", "@lancedb/vectordb-linux-arm64-gnu": "0.14.1-beta.6",
"@lancedb/vectordb-linux-x64-musl": "0.14.1-beta.2", "@lancedb/vectordb-linux-x64-musl": "0.14.1-beta.6",
"@lancedb/vectordb-linux-arm64-musl": "0.14.1-beta.2", "@lancedb/vectordb-linux-arm64-musl": "0.14.1-beta.6",
"@lancedb/vectordb-win32-x64-msvc": "0.14.1-beta.2", "@lancedb/vectordb-win32-x64-msvc": "0.14.1-beta.6",
"@lancedb/vectordb-win32-arm64-msvc": "0.14.1-beta.2" "@lancedb/vectordb-win32-arm64-msvc": "0.14.1-beta.6"
} }
} }

View File

@@ -1,7 +1,7 @@
[package] [package]
name = "lancedb-nodejs" name = "lancedb-nodejs"
edition.workspace = true edition.workspace = true
version = "0.14.1-beta.2" version = "0.14.1-beta.6"
license.workspace = true license.workspace = true
description.workspace = true description.workspace = true
repository.workspace = true repository.workspace = true

View File

@@ -1058,6 +1058,26 @@ describe.each([arrow15, arrow16, arrow17, arrow18])(
expect(results[0].text).toBe(data[0].text); expect(results[0].text).toBe(data[0].text);
}); });
test("full text search without lowercase", async () => {
const db = await connect(tmpDir.name);
const data = [
{ text: "hello world", vector: [0.1, 0.2, 0.3] },
{ text: "Hello World", vector: [0.4, 0.5, 0.6] },
];
const table = await db.createTable("test", data);
await table.createIndex("text", {
config: Index.fts({ withPosition: false }),
});
const results = await table.search("hello").toArray();
expect(results.length).toBe(2);
await table.createIndex("text", {
config: Index.fts({ withPosition: false, lowercase: false }),
});
const results2 = await table.search("hello").toArray();
expect(results2.length).toBe(1);
});
test("full text search phrase query", async () => { test("full text search phrase query", async () => {
const db = await connect(tmpDir.name); const db = await connect(tmpDir.name);
const data = [ const data = [

View File

@@ -119,7 +119,9 @@ test("basic table examples", async () => {
{ {
// --8<-- [start:add_columns] // --8<-- [start:add_columns]
await tbl.addColumns([{ name: "double_price", valueSql: "price * 2" }]); await tbl.addColumns([
{ name: "double_price", valueSql: "cast((price * 2) as Float)" },
]);
// --8<-- [end:add_columns] // --8<-- [end:add_columns]
// --8<-- [start:alter_columns] // --8<-- [start:alter_columns]
await tbl.alterColumns([ await tbl.alterColumns([

View File

@@ -349,6 +349,52 @@ export interface FtsOptions {
* which will make the index smaller and faster to build, but will not support phrase queries. * which will make the index smaller and faster to build, but will not support phrase queries.
*/ */
withPosition?: boolean; withPosition?: boolean;
/**
* The tokenizer to use when building the index.
* The default is "simple".
*
* The following tokenizers are available:
*
* "simple" - Simple tokenizer. This tokenizer splits the text into tokens using whitespace and punctuation as a delimiter.
*
* "whitespace" - Whitespace tokenizer. This tokenizer splits the text into tokens using whitespace as a delimiter.
*
* "raw" - Raw tokenizer. This tokenizer does not split the text into tokens and indexes the entire text as a single token.
*/
baseTokenizer?: "simple" | "whitespace" | "raw";
/**
* language for stemming and stop words
* this is only used when `stem` or `remove_stop_words` is true
*/
language?: string;
/**
* maximum token length
* tokens longer than this length will be ignored
*/
maxTokenLength?: number;
/**
* whether to lowercase tokens
*/
lowercase?: boolean;
/**
* whether to stem tokens
*/
stem?: boolean;
/**
* whether to remove stop words
*/
removeStopWords?: boolean;
/**
* whether to remove punctuation
*/
asciiFolding?: boolean;
} }
export class Index { export class Index {
@@ -450,7 +496,18 @@ export class Index {
* For now, the full text search index only supports English, and doesn't support phrase search. * For now, the full text search index only supports English, and doesn't support phrase search.
*/ */
static fts(options?: Partial<FtsOptions>) { static fts(options?: Partial<FtsOptions>) {
return new Index(LanceDbIndex.fts(options?.withPosition)); return new Index(
LanceDbIndex.fts(
options?.withPosition,
options?.baseTokenizer,
options?.language,
options?.maxTokenLength,
options?.lowercase,
options?.stem,
options?.removeStopWords,
options?.asciiFolding,
),
);
} }
/** /**

View File

@@ -1,6 +1,6 @@
{ {
"name": "@lancedb/lancedb-darwin-arm64", "name": "@lancedb/lancedb-darwin-arm64",
"version": "0.14.1-beta.2", "version": "0.14.1-beta.6",
"os": ["darwin"], "os": ["darwin"],
"cpu": ["arm64"], "cpu": ["arm64"],
"main": "lancedb.darwin-arm64.node", "main": "lancedb.darwin-arm64.node",

View File

@@ -1,6 +1,6 @@
{ {
"name": "@lancedb/lancedb-darwin-x64", "name": "@lancedb/lancedb-darwin-x64",
"version": "0.14.1-beta.2", "version": "0.14.1-beta.6",
"os": ["darwin"], "os": ["darwin"],
"cpu": ["x64"], "cpu": ["x64"],
"main": "lancedb.darwin-x64.node", "main": "lancedb.darwin-x64.node",

View File

@@ -1,6 +1,6 @@
{ {
"name": "@lancedb/lancedb-linux-arm64-gnu", "name": "@lancedb/lancedb-linux-arm64-gnu",
"version": "0.14.1-beta.2", "version": "0.14.1-beta.6",
"os": ["linux"], "os": ["linux"],
"cpu": ["arm64"], "cpu": ["arm64"],
"main": "lancedb.linux-arm64-gnu.node", "main": "lancedb.linux-arm64-gnu.node",

View File

@@ -1,6 +1,6 @@
{ {
"name": "@lancedb/lancedb-linux-arm64-musl", "name": "@lancedb/lancedb-linux-arm64-musl",
"version": "0.14.1-beta.2", "version": "0.14.1-beta.6",
"os": ["linux"], "os": ["linux"],
"cpu": ["arm64"], "cpu": ["arm64"],
"main": "lancedb.linux-arm64-musl.node", "main": "lancedb.linux-arm64-musl.node",

View File

@@ -1,6 +1,6 @@
{ {
"name": "@lancedb/lancedb-linux-x64-gnu", "name": "@lancedb/lancedb-linux-x64-gnu",
"version": "0.14.1-beta.2", "version": "0.14.1-beta.6",
"os": ["linux"], "os": ["linux"],
"cpu": ["x64"], "cpu": ["x64"],
"main": "lancedb.linux-x64-gnu.node", "main": "lancedb.linux-x64-gnu.node",

View File

@@ -1,6 +1,6 @@
{ {
"name": "@lancedb/lancedb-linux-x64-musl", "name": "@lancedb/lancedb-linux-x64-musl",
"version": "0.14.1-beta.2", "version": "0.14.1-beta.6",
"os": ["linux"], "os": ["linux"],
"cpu": ["x64"], "cpu": ["x64"],
"main": "lancedb.linux-x64-musl.node", "main": "lancedb.linux-x64-musl.node",

View File

@@ -1,6 +1,6 @@
{ {
"name": "@lancedb/lancedb-win32-arm64-msvc", "name": "@lancedb/lancedb-win32-arm64-msvc",
"version": "0.14.1-beta.2", "version": "0.14.1-beta.6",
"os": [ "os": [
"win32" "win32"
], ],

View File

@@ -1,6 +1,6 @@
{ {
"name": "@lancedb/lancedb-win32-x64-msvc", "name": "@lancedb/lancedb-win32-x64-msvc",
"version": "0.14.1-beta.2", "version": "0.14.1-beta.6",
"os": ["win32"], "os": ["win32"],
"cpu": ["x64"], "cpu": ["x64"],
"main": "lancedb.win32-x64-msvc.node", "main": "lancedb.win32-x64-msvc.node",

View File

@@ -1,12 +1,12 @@
{ {
"name": "@lancedb/lancedb", "name": "@lancedb/lancedb",
"version": "0.14.0", "version": "0.14.1-beta.6",
"lockfileVersion": 3, "lockfileVersion": 3,
"requires": true, "requires": true,
"packages": { "packages": {
"": { "": {
"name": "@lancedb/lancedb", "name": "@lancedb/lancedb",
"version": "0.14.0", "version": "0.14.1-beta.6",
"cpu": [ "cpu": [
"x64", "x64",
"arm64" "arm64"

View File

@@ -11,7 +11,7 @@
"ann" "ann"
], ],
"private": false, "private": false,
"version": "0.14.1-beta.2", "version": "0.14.1-beta.6",
"main": "dist/index.js", "main": "dist/index.js",
"exports": { "exports": {
".": "./dist/index.js", ".": "./dist/index.js",

View File

@@ -96,11 +96,45 @@ impl Index {
} }
#[napi(factory)] #[napi(factory)]
pub fn fts(with_position: Option<bool>) -> Self { #[allow(clippy::too_many_arguments)]
pub fn fts(
with_position: Option<bool>,
base_tokenizer: Option<String>,
language: Option<String>,
max_token_length: Option<u32>,
lower_case: Option<bool>,
stem: Option<bool>,
remove_stop_words: Option<bool>,
ascii_folding: Option<bool>,
) -> Self {
let mut opts = FtsIndexBuilder::default(); let mut opts = FtsIndexBuilder::default();
let mut tokenizer_configs = opts.tokenizer_configs.clone();
if let Some(with_position) = with_position { if let Some(with_position) = with_position {
opts = opts.with_position(with_position); opts = opts.with_position(with_position);
} }
if let Some(base_tokenizer) = base_tokenizer {
tokenizer_configs = tokenizer_configs.base_tokenizer(base_tokenizer);
}
if let Some(language) = language {
tokenizer_configs = tokenizer_configs.language(&language).unwrap();
}
if let Some(max_token_length) = max_token_length {
tokenizer_configs = tokenizer_configs.max_token_length(Some(max_token_length as usize));
}
if let Some(lower_case) = lower_case {
tokenizer_configs = tokenizer_configs.lower_case(lower_case);
}
if let Some(stem) = stem {
tokenizer_configs = tokenizer_configs.stem(stem);
}
if let Some(remove_stop_words) = remove_stop_words {
tokenizer_configs = tokenizer_configs.remove_stop_words(remove_stop_words);
}
if let Some(ascii_folding) = ascii_folding {
tokenizer_configs = tokenizer_configs.ascii_folding(ascii_folding);
}
opts.tokenizer_configs = tokenizer_configs;
Self { Self {
inner: Mutex::new(Some(LanceDbIndex::FTS(opts))), inner: Mutex::new(Some(LanceDbIndex::FTS(opts))),
} }

View File

@@ -5,8 +5,9 @@ pub fn parse_distance_type(distance_type: impl AsRef<str>) -> napi::Result<Dista
"l2" => Ok(DistanceType::L2), "l2" => Ok(DistanceType::L2),
"cosine" => Ok(DistanceType::Cosine), "cosine" => Ok(DistanceType::Cosine),
"dot" => Ok(DistanceType::Dot), "dot" => Ok(DistanceType::Dot),
"hamming" => Ok(DistanceType::Hamming),
_ => Err(napi::Error::from_reason(format!( _ => Err(napi::Error::from_reason(format!(
"Invalid distance type '{}'. Must be one of l2, cosine, or dot", "Invalid distance type '{}'. Must be one of l2, cosine, dot, or hamming",
distance_type.as_ref() distance_type.as_ref()
))), ))),
} }

View File

@@ -1,5 +1,5 @@
[tool.bumpversion] [tool.bumpversion]
current_version = "0.17.1-beta.3" current_version = "0.17.1"
parse = """(?x) parse = """(?x)
(?P<major>0|[1-9]\\d*)\\. (?P<major>0|[1-9]\\d*)\\.
(?P<minor>0|[1-9]\\d*)\\. (?P<minor>0|[1-9]\\d*)\\.

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "lancedb-python" name = "lancedb-python"
version = "0.17.1-beta.3" version = "0.17.1"
edition.workspace = true edition.workspace = true
description = "Python bindings for LanceDB" description = "Python bindings for LanceDB"
license.workspace = true license.workspace = true

View File

@@ -3,7 +3,7 @@ name = "lancedb"
# version in Cargo.toml # version in Cargo.toml
dependencies = [ dependencies = [
"deprecation", "deprecation",
"pylance==0.21.0b2", "pylance==0.21.0b5",
"tqdm>=4.27.0", "tqdm>=4.27.0",
"pydantic>=1.10", "pydantic>=1.10",
"packaging", "packaging",

View File

@@ -70,7 +70,7 @@ def connect(
default configuration is used. default configuration is used.
storage_options: dict, optional storage_options: dict, optional
Additional options for the storage backend. See available options at Additional options for the storage backend. See available options at
https://lancedb.github.io/lancedb/guides/storage/ <https://lancedb.github.io/lancedb/guides/storage/>
Examples Examples
-------- --------
@@ -82,11 +82,13 @@ def connect(
For object storage, use a URI prefix: For object storage, use a URI prefix:
>>> db = lancedb.connect("s3://my-bucket/lancedb") >>> db = lancedb.connect("s3://my-bucket/lancedb",
... storage_options={"aws_access_key_id": "***"})
Connect to LanceDB cloud: Connect to LanceDB cloud:
>>> db = lancedb.connect("db://my_database", api_key="ldb_...") >>> db = lancedb.connect("db://my_database", api_key="ldb_...",
... client_config={"retry_config": {"retries": 5}})
Returns Returns
------- -------
@@ -164,7 +166,7 @@ async def connect_async(
default configuration is used. default configuration is used.
storage_options: dict, optional storage_options: dict, optional
Additional options for the storage backend. See available options at Additional options for the storage backend. See available options at
https://lancedb.github.io/lancedb/guides/storage/ <https://lancedb.github.io/lancedb/guides/storage/>
Examples Examples
-------- --------

View File

@@ -2,19 +2,8 @@ from typing import Dict, List, Optional, Tuple
import pyarrow as pa import pyarrow as pa
class Index:
@staticmethod
def ivf_pq(
distance_type: Optional[str],
num_partitions: Optional[int],
num_sub_vectors: Optional[int],
max_iterations: Optional[int],
sample_rate: Optional[int],
) -> Index: ...
@staticmethod
def btree() -> Index: ...
class Connection(object): class Connection(object):
uri: str
async def table_names( async def table_names(
self, start_after: Optional[str], limit: Optional[int] self, start_after: Optional[str], limit: Optional[int]
) -> list[str]: ... ) -> list[str]: ...
@@ -46,9 +35,7 @@ class Table:
async def add(self, data: pa.RecordBatchReader, mode: str) -> None: ... async def add(self, data: pa.RecordBatchReader, mode: str) -> None: ...
async def update(self, updates: Dict[str, str], where: Optional[str]) -> None: ... async def update(self, updates: Dict[str, str], where: Optional[str]) -> None: ...
async def count_rows(self, filter: Optional[str]) -> int: ... async def count_rows(self, filter: Optional[str]) -> int: ...
async def create_index( async def create_index(self, column: str, config, replace: Optional[bool]): ...
self, column: str, config: Optional[Index], replace: Optional[bool]
): ...
async def version(self) -> int: ... async def version(self) -> int: ...
async def checkout(self, version): ... async def checkout(self, version): ...
async def checkout_latest(self): ... async def checkout_latest(self): ...

View File

@@ -23,3 +23,6 @@ class BackgroundEventLoop:
def run(self, future): def run(self, future):
return asyncio.run_coroutine_threadsafe(future, self.loop).result() return asyncio.run_coroutine_threadsafe(future, self.loop).result()
LOOP = BackgroundEventLoop()

View File

@@ -17,12 +17,13 @@ from abc import abstractmethod
from pathlib import Path from pathlib import Path
from typing import TYPE_CHECKING, Dict, Iterable, List, Literal, Optional, Union from typing import TYPE_CHECKING, Dict, Iterable, List, Literal, Optional, Union
from overrides import EnforceOverrides, override from lancedb.embeddings.registry import EmbeddingFunctionRegistry
from overrides import EnforceOverrides, override # type: ignore
from lancedb.common import data_to_reader, sanitize_uri, validate_schema from lancedb.common import data_to_reader, sanitize_uri, validate_schema
from lancedb.background_loop import BackgroundEventLoop from lancedb.background_loop import LOOP
from ._lancedb import connect as lancedb_connect from ._lancedb import connect as lancedb_connect # type: ignore
from .table import ( from .table import (
AsyncTable, AsyncTable,
LanceTable, LanceTable,
@@ -43,8 +44,6 @@ if TYPE_CHECKING:
from .common import DATA, URI from .common import DATA, URI
from .embeddings import EmbeddingFunctionConfig from .embeddings import EmbeddingFunctionConfig
LOOP = BackgroundEventLoop()
class DBConnection(EnforceOverrides): class DBConnection(EnforceOverrides):
"""An active LanceDB connection interface.""" """An active LanceDB connection interface."""
@@ -82,6 +81,10 @@ class DBConnection(EnforceOverrides):
on_bad_vectors: str = "error", on_bad_vectors: str = "error",
fill_value: float = 0.0, fill_value: float = 0.0,
embedding_functions: Optional[List[EmbeddingFunctionConfig]] = None, embedding_functions: Optional[List[EmbeddingFunctionConfig]] = None,
*,
storage_options: Optional[Dict[str, str]] = None,
data_storage_version: Optional[str] = None,
enable_v2_manifest_paths: Optional[bool] = None,
) -> Table: ) -> Table:
"""Create a [Table][lancedb.table.Table] in the database. """Create a [Table][lancedb.table.Table] in the database.
@@ -119,6 +122,24 @@ class DBConnection(EnforceOverrides):
One of "error", "drop", "fill". One of "error", "drop", "fill".
fill_value: float fill_value: float
The value to use when filling vectors. Only used if on_bad_vectors="fill". The value to use when filling vectors. Only used if on_bad_vectors="fill".
storage_options: dict, optional
Additional options for the storage backend. Options already set on the
connection will be inherited by the table, but can be overridden here.
See available options at
<https://lancedb.github.io/lancedb/guides/storage/>
data_storage_version: optional, str, default "stable"
The version of the data storage format to use. Newer versions are more
efficient but require newer versions of lance to read. The default is
"stable" which will use the legacy v2 version. See the user guide
for more details.
enable_v2_manifest_paths: bool, optional, default False
Use the new V2 manifest paths. These paths provide more efficient
opening of datasets with many versions on object stores. WARNING:
turning this on will make the dataset unreadable for older versions
of LanceDB (prior to 0.13.0). To migrate an existing dataset, instead
use the
[Table.migrate_manifest_paths_v2][lancedb.table.Table.migrate_v2_manifest_paths]
method.
Returns Returns
------- -------
@@ -140,7 +161,7 @@ class DBConnection(EnforceOverrides):
>>> data = [{"vector": [1.1, 1.2], "lat": 45.5, "long": -122.7}, >>> data = [{"vector": [1.1, 1.2], "lat": 45.5, "long": -122.7},
... {"vector": [0.2, 1.8], "lat": 40.1, "long": -74.1}] ... {"vector": [0.2, 1.8], "lat": 40.1, "long": -74.1}]
>>> db.create_table("my_table", data) >>> db.create_table("my_table", data)
LanceTable(connection=..., name="my_table") LanceTable(name='my_table', version=1, ...)
>>> db["my_table"].head() >>> db["my_table"].head()
pyarrow.Table pyarrow.Table
vector: fixed_size_list<item: float>[2] vector: fixed_size_list<item: float>[2]
@@ -161,7 +182,7 @@ class DBConnection(EnforceOverrides):
... "long": [-122.7, -74.1] ... "long": [-122.7, -74.1]
... }) ... })
>>> db.create_table("table2", data) >>> db.create_table("table2", data)
LanceTable(connection=..., name="table2") LanceTable(name='table2', version=1, ...)
>>> db["table2"].head() >>> db["table2"].head()
pyarrow.Table pyarrow.Table
vector: fixed_size_list<item: float>[2] vector: fixed_size_list<item: float>[2]
@@ -184,7 +205,7 @@ class DBConnection(EnforceOverrides):
... pa.field("long", pa.float32()) ... pa.field("long", pa.float32())
... ]) ... ])
>>> db.create_table("table3", data, schema = custom_schema) >>> db.create_table("table3", data, schema = custom_schema)
LanceTable(connection=..., name="table3") LanceTable(name='table3', version=1, ...)
>>> db["table3"].head() >>> db["table3"].head()
pyarrow.Table pyarrow.Table
vector: fixed_size_list<item: float>[2] vector: fixed_size_list<item: float>[2]
@@ -218,7 +239,7 @@ class DBConnection(EnforceOverrides):
... pa.field("price", pa.float32()), ... pa.field("price", pa.float32()),
... ]) ... ])
>>> db.create_table("table4", make_batches(), schema=schema) >>> db.create_table("table4", make_batches(), schema=schema)
LanceTable(connection=..., name="table4") LanceTable(name='table4', version=1, ...)
""" """
raise NotImplementedError raise NotImplementedError
@@ -226,7 +247,13 @@ class DBConnection(EnforceOverrides):
def __getitem__(self, name: str) -> LanceTable: def __getitem__(self, name: str) -> LanceTable:
return self.open_table(name) return self.open_table(name)
def open_table(self, name: str, *, index_cache_size: Optional[int] = None) -> Table: def open_table(
self,
name: str,
*,
storage_options: Optional[Dict[str, str]] = None,
index_cache_size: Optional[int] = None,
) -> Table:
"""Open a Lance Table in the database. """Open a Lance Table in the database.
Parameters Parameters
@@ -243,6 +270,11 @@ class DBConnection(EnforceOverrides):
This cache applies to the entire opened table, across all indices. This cache applies to the entire opened table, across all indices.
Setting this value higher will increase performance on larger datasets Setting this value higher will increase performance on larger datasets
at the expense of more RAM at the expense of more RAM
storage_options: dict, optional
Additional options for the storage backend. Options already set on the
connection will be inherited by the table, but can be overridden here.
See available options at
<https://lancedb.github.io/lancedb/guides/storage/>
Returns Returns
------- -------
@@ -309,15 +341,15 @@ class LanceDBConnection(DBConnection):
>>> db = lancedb.connect("./.lancedb") >>> db = lancedb.connect("./.lancedb")
>>> db.create_table("my_table", data=[{"vector": [1.1, 1.2], "b": 2}, >>> db.create_table("my_table", data=[{"vector": [1.1, 1.2], "b": 2},
... {"vector": [0.5, 1.3], "b": 4}]) ... {"vector": [0.5, 1.3], "b": 4}])
LanceTable(connection=..., name="my_table") LanceTable(name='my_table', version=1, ...)
>>> db.create_table("another_table", data=[{"vector": [0.4, 0.4], "b": 6}]) >>> db.create_table("another_table", data=[{"vector": [0.4, 0.4], "b": 6}])
LanceTable(connection=..., name="another_table") LanceTable(name='another_table', version=1, ...)
>>> sorted(db.table_names()) >>> sorted(db.table_names())
['another_table', 'my_table'] ['another_table', 'my_table']
>>> len(db) >>> len(db)
2 2
>>> db["my_table"] >>> db["my_table"]
LanceTable(connection=..., name="my_table") LanceTable(name='my_table', version=1, ...)
>>> "my_table" in db >>> "my_table" in db
True True
>>> db.drop_table("my_table") >>> db.drop_table("my_table")
@@ -363,7 +395,7 @@ class LanceDBConnection(DBConnection):
self._conn = AsyncConnection(LOOP.run(do_connect())) self._conn = AsyncConnection(LOOP.run(do_connect()))
def __repr__(self) -> str: def __repr__(self) -> str:
val = f"{self.__class__.__name__}({self._uri}" val = f"{self.__class__.__name__}(uri={self._uri!r}"
if self.read_consistency_interval is not None: if self.read_consistency_interval is not None:
val += f", read_consistency_interval={repr(self.read_consistency_interval)}" val += f", read_consistency_interval={repr(self.read_consistency_interval)}"
val += ")" val += ")"
@@ -403,6 +435,10 @@ class LanceDBConnection(DBConnection):
on_bad_vectors: str = "error", on_bad_vectors: str = "error",
fill_value: float = 0.0, fill_value: float = 0.0,
embedding_functions: Optional[List[EmbeddingFunctionConfig]] = None, embedding_functions: Optional[List[EmbeddingFunctionConfig]] = None,
*,
storage_options: Optional[Dict[str, str]] = None,
data_storage_version: Optional[str] = None,
enable_v2_manifest_paths: Optional[bool] = None,
) -> LanceTable: ) -> LanceTable:
"""Create a table in the database. """Create a table in the database.
@@ -424,12 +460,19 @@ class LanceDBConnection(DBConnection):
on_bad_vectors=on_bad_vectors, on_bad_vectors=on_bad_vectors,
fill_value=fill_value, fill_value=fill_value,
embedding_functions=embedding_functions, embedding_functions=embedding_functions,
storage_options=storage_options,
data_storage_version=data_storage_version,
enable_v2_manifest_paths=enable_v2_manifest_paths,
) )
return tbl return tbl
@override @override
def open_table( def open_table(
self, name: str, *, index_cache_size: Optional[int] = None self,
name: str,
*,
storage_options: Optional[Dict[str, str]] = None,
index_cache_size: Optional[int] = None,
) -> LanceTable: ) -> LanceTable:
"""Open a table in the database. """Open a table in the database.
@@ -442,7 +485,12 @@ class LanceDBConnection(DBConnection):
------- -------
A LanceTable object representing the table. A LanceTable object representing the table.
""" """
return LanceTable.open(self, name, index_cache_size=index_cache_size) return LanceTable.open(
self,
name,
storage_options=storage_options,
index_cache_size=index_cache_size,
)
@override @override
def drop_table(self, name: str, ignore_missing: bool = False): def drop_table(self, name: str, ignore_missing: bool = False):
@@ -455,13 +503,7 @@ class LanceDBConnection(DBConnection):
ignore_missing: bool, default False ignore_missing: bool, default False
If True, ignore if the table does not exist. If True, ignore if the table does not exist.
""" """
try: LOOP.run(self._conn.drop_table(name, ignore_missing=ignore_missing))
LOOP.run(self._conn.drop_table(name))
except ValueError as e:
if not ignore_missing:
raise e
if f"Table '{name}' was not found" not in str(e):
raise e
@override @override
def drop_database(self): def drop_database(self):
@@ -524,6 +566,10 @@ class AsyncConnection(object):
Any attempt to use the connection after it is closed will result in an error.""" Any attempt to use the connection after it is closed will result in an error."""
self._inner.close() self._inner.close()
@property
def uri(self) -> str:
return self._inner.uri
async def table_names( async def table_names(
self, *, start_after: Optional[str] = None, limit: Optional[int] = None self, *, start_after: Optional[str] = None, limit: Optional[int] = None
) -> Iterable[str]: ) -> Iterable[str]:
@@ -557,6 +603,7 @@ class AsyncConnection(object):
fill_value: Optional[float] = None, fill_value: Optional[float] = None,
storage_options: Optional[Dict[str, str]] = None, storage_options: Optional[Dict[str, str]] = None,
*, *,
embedding_functions: List[EmbeddingFunctionConfig] = None,
data_storage_version: Optional[str] = None, data_storage_version: Optional[str] = None,
use_legacy_format: Optional[bool] = None, use_legacy_format: Optional[bool] = None,
enable_v2_manifest_paths: Optional[bool] = None, enable_v2_manifest_paths: Optional[bool] = None,
@@ -601,7 +648,7 @@ class AsyncConnection(object):
Additional options for the storage backend. Options already set on the Additional options for the storage backend. Options already set on the
connection will be inherited by the table, but can be overridden here. connection will be inherited by the table, but can be overridden here.
See available options at See available options at
https://lancedb.github.io/lancedb/guides/storage/ <https://lancedb.github.io/lancedb/guides/storage/>
data_storage_version: optional, str, default "stable" data_storage_version: optional, str, default "stable"
The version of the data storage format to use. Newer versions are more The version of the data storage format to use. Newer versions are more
efficient but require newer versions of lance to read. The default is efficient but require newer versions of lance to read. The default is
@@ -730,6 +777,17 @@ class AsyncConnection(object):
""" """
metadata = None metadata = None
if embedding_functions is not None:
# If we passed in embedding functions explicitly
# then we'll override any schema metadata that
# may was implicitly specified by the LanceModel schema
registry = EmbeddingFunctionRegistry.get_instance()
metadata = registry.get_table_metadata(embedding_functions)
data, schema = sanitize_create_table(
data, schema, metadata, on_bad_vectors, fill_value
)
# Defining defaults here and not in function prototype. In the future # Defining defaults here and not in function prototype. In the future
# these defaults will move into rust so better to keep them as None. # these defaults will move into rust so better to keep them as None.
if on_bad_vectors is None: if on_bad_vectors is None:
@@ -791,7 +849,7 @@ class AsyncConnection(object):
Additional options for the storage backend. Options already set on the Additional options for the storage backend. Options already set on the
connection will be inherited by the table, but can be overridden here. connection will be inherited by the table, but can be overridden here.
See available options at See available options at
https://lancedb.github.io/lancedb/guides/storage/ <https://lancedb.github.io/lancedb/guides/storage/>
index_cache_size: int, default 256 index_cache_size: int, default 256
Set the size of the index cache, specified as a number of entries Set the size of the index cache, specified as a number of entries
@@ -822,15 +880,23 @@ class AsyncConnection(object):
""" """
await self._inner.rename_table(old_name, new_name) await self._inner.rename_table(old_name, new_name)
async def drop_table(self, name: str): async def drop_table(self, name: str, *, ignore_missing: bool = False):
"""Drop a table from the database. """Drop a table from the database.
Parameters Parameters
---------- ----------
name: str name: str
The name of the table. The name of the table.
ignore_missing: bool, default False
If True, ignore if the table does not exist.
""" """
await self._inner.drop_table(name) try:
await self._inner.drop_table(name)
except ValueError as e:
if not ignore_missing:
raise e
if f"Table '{name}' was not found" not in str(e):
raise e
async def drop_database(self): async def drop_database(self):
""" """

View File

@@ -1,8 +1,6 @@
from typing import Optional from dataclasses import dataclass
from typing import Literal, Optional
from ._lancedb import (
Index as LanceDbIndex,
)
from ._lancedb import ( from ._lancedb import (
IndexConfig, IndexConfig,
) )
@@ -29,6 +27,7 @@ lang_mapping = {
} }
@dataclass
class BTree: class BTree:
"""Describes a btree index configuration """Describes a btree index configuration
@@ -50,10 +49,10 @@ class BTree:
the block size may be added in the future. the block size may be added in the future.
""" """
def __init__(self): pass
self._inner = LanceDbIndex.btree()
@dataclass
class Bitmap: class Bitmap:
"""Describe a Bitmap index configuration. """Describe a Bitmap index configuration.
@@ -73,10 +72,10 @@ class Bitmap:
requires 128 / 8 * 1Bi bytes on disk. requires 128 / 8 * 1Bi bytes on disk.
""" """
def __init__(self): pass
self._inner = LanceDbIndex.bitmap()
@dataclass
class LabelList: class LabelList:
"""Describe a LabelList index configuration. """Describe a LabelList index configuration.
@@ -87,41 +86,57 @@ class LabelList:
For example, it works with `tags`, `categories`, `keywords`, etc. For example, it works with `tags`, `categories`, `keywords`, etc.
""" """
def __init__(self): pass
self._inner = LanceDbIndex.label_list()
@dataclass
class FTS: class FTS:
"""Describe a FTS index configuration. """Describe a FTS index configuration.
`FTS` is a full-text search index that can be used on `String` columns `FTS` is a full-text search index that can be used on `String` columns
For example, it works with `title`, `description`, `content`, etc. For example, it works with `title`, `description`, `content`, etc.
Attributes
----------
with_position : bool, default True
Whether to store the position of the token in the document. Setting this
to False can reduce the size of the index and improve indexing speed,
but it will disable support for phrase queries.
base_tokenizer : str, default "simple"
The base tokenizer to use for tokenization. Options are:
- "simple": Splits text by whitespace and punctuation.
- "whitespace": Split text by whitespace, but not punctuation.
- "raw": No tokenization. The entire text is treated as a single token.
language : str, default "English"
The language to use for tokenization.
max_token_length : int, default 40
The maximum token length to index. Tokens longer than this length will be
ignored.
lower_case : bool, default True
Whether to convert the token to lower case. This makes queries case-insensitive.
stem : bool, default False
Whether to stem the token. Stemming reduces words to their root form.
For example, in English "running" and "runs" would both be reduced to "run".
remove_stop_words : bool, default False
Whether to remove stop words. Stop words are common words that are often
removed from text before indexing. For example, in English "the" and "and".
ascii_folding : bool, default False
Whether to fold ASCII characters. This converts accented characters to
their ASCII equivalent. For example, "café" would be converted to "cafe".
""" """
def __init__( with_position: bool = True
self, base_tokenizer: Literal["simple", "raw", "whitespace"] = "simple"
with_position: bool = True, language: str = "English"
base_tokenizer: str = "simple", max_token_length: Optional[int] = 40
language: str = "English", lower_case: bool = True
max_token_length: Optional[int] = 40, stem: bool = False
lower_case: bool = True, remove_stop_words: bool = False
stem: bool = False, ascii_folding: bool = False
remove_stop_words: bool = False,
ascii_folding: bool = False,
):
self._inner = LanceDbIndex.fts(
with_position=with_position,
base_tokenizer=base_tokenizer,
language=language,
max_token_length=max_token_length,
lower_case=lower_case,
stem=stem,
remove_stop_words=remove_stop_words,
ascii_folding=ascii_folding,
)
@dataclass
class HnswPq: class HnswPq:
"""Describe a HNSW-PQ index configuration. """Describe a HNSW-PQ index configuration.
@@ -232,30 +247,17 @@ class HnswPq:
search phase. search phase.
""" """
def __init__( distance_type: Literal["l2", "cosine", "dot"] = "l2"
self, num_partitions: Optional[int] = None
*, num_sub_vectors: Optional[int] = None
distance_type: Optional[str] = None, num_bits: int = 8
num_partitions: Optional[int] = None, max_iterations: int = 50
num_sub_vectors: Optional[int] = None, sample_rate: int = 256
num_bits: Optional[int] = None, m: int = 20
max_iterations: Optional[int] = None, ef_construction: int = 300
sample_rate: Optional[int] = None,
m: Optional[int] = None,
ef_construction: Optional[int] = None,
):
self._inner = LanceDbIndex.hnsw_pq(
distance_type=distance_type,
num_partitions=num_partitions,
num_sub_vectors=num_sub_vectors,
num_bits=num_bits,
max_iterations=max_iterations,
sample_rate=sample_rate,
m=m,
ef_construction=ef_construction,
)
@dataclass
class HnswSq: class HnswSq:
"""Describe a HNSW-SQ index configuration. """Describe a HNSW-SQ index configuration.
@@ -345,26 +347,106 @@ class HnswSq:
""" """
def __init__( distance_type: Literal["l2", "cosine", "dot"] = "l2"
self, num_partitions: Optional[int] = None
*, max_iterations: int = 50
distance_type: Optional[str] = None, sample_rate: int = 256
num_partitions: Optional[int] = None, m: int = 20
max_iterations: Optional[int] = None, ef_construction: int = 300
sample_rate: Optional[int] = None,
m: Optional[int] = None,
ef_construction: Optional[int] = None,
):
self._inner = LanceDbIndex.hnsw_sq(
distance_type=distance_type,
num_partitions=num_partitions,
max_iterations=max_iterations,
sample_rate=sample_rate,
m=m,
ef_construction=ef_construction,
)
@dataclass
class IvfFlat:
"""Describes an IVF Flat Index
This index stores raw vectors.
These vectors are grouped into partitions of similar vectors.
Each partition keeps track of a centroid which is
the average value of all vectors in the group.
Attributes
----------
distance_type: str, default "L2"
The distance metric used to train the index
This is used when training the index to calculate the IVF partitions
(vectors are grouped in partitions with similar vectors according to this
distance type) and to calculate a subvector's code during quantization.
The distance type used to train an index MUST match the distance type used
to search the index. Failure to do so will yield inaccurate results.
The following distance types are available:
"l2" - Euclidean distance. This is a very common distance metric that
accounts for both magnitude and direction when determining the distance
between vectors. L2 distance has a range of [0, ∞).
"cosine" - Cosine distance. Cosine distance is a distance metric
calculated from the cosine similarity between two vectors. Cosine
similarity is a measure of similarity between two non-zero vectors of an
inner product space. It is defined to equal the cosine of the angle
between them. Unlike L2, the cosine distance is not affected by the
magnitude of the vectors. Cosine distance has a range of [0, 2].
Note: the cosine distance is undefined when one (or both) of the vectors
are all zeros (there is no direction). These vectors are invalid and may
never be returned from a vector search.
"dot" - Dot product. Dot distance is the dot product of two vectors. Dot
distance has a range of (-∞, ∞). If the vectors are normalized (i.e. their
L2 norm is 1), then dot distance is equivalent to the cosine distance.
"hamming" - Hamming distance. Hamming distance is a distance metric
calculated as the number of positions at which the corresponding bits are
different. Hamming distance has a range of [0, vector dimension].
num_partitions: int, default sqrt(num_rows)
The number of IVF partitions to create.
This value should generally scale with the number of rows in the dataset.
By default the number of partitions is the square root of the number of
rows.
If this value is too large then the first part of the search (picking the
right partition) will be slow. If this value is too small then the second
part of the search (searching within a partition) will be slow.
max_iterations: int, default 50
Max iteration to train kmeans.
When training an IVF PQ index we use kmeans to calculate the partitions.
This parameter controls how many iterations of kmeans to run.
Increasing this might improve the quality of the index but in most cases
these extra iterations have diminishing returns.
The default value is 50.
sample_rate: int, default 256
The rate used to calculate the number of training vectors for kmeans.
When an IVF PQ index is trained, we need to calculate partitions. These
are groups of vectors that are similar to each other. To do this we use an
algorithm called kmeans.
Running kmeans on a large dataset can be slow. To speed this up we run
kmeans on a random sample of the data. This parameter controls the size of
the sample. The total number of vectors used to train the index is
`sample_rate * num_partitions`.
Increasing this value might improve the quality of the index but in most
cases the default should be sufficient.
The default value is 256.
"""
distance_type: Literal["l2", "cosine", "dot", "hamming"] = "l2"
num_partitions: Optional[int] = None
max_iterations: int = 50
sample_rate: int = 256
@dataclass
class IvfPq: class IvfPq:
"""Describes an IVF PQ Index """Describes an IVF PQ Index
@@ -387,120 +469,103 @@ class IvfPq:
Note that training an IVF PQ index on a large dataset is a slow operation and Note that training an IVF PQ index on a large dataset is a slow operation and
currently is also a memory intensive operation. currently is also a memory intensive operation.
Attributes
----------
distance_type: str, default "L2"
The distance metric used to train the index
This is used when training the index to calculate the IVF partitions
(vectors are grouped in partitions with similar vectors according to this
distance type) and to calculate a subvector's code during quantization.
The distance type used to train an index MUST match the distance type used
to search the index. Failure to do so will yield inaccurate results.
The following distance types are available:
"l2" - Euclidean distance. This is a very common distance metric that
accounts for both magnitude and direction when determining the distance
between vectors. L2 distance has a range of [0, ∞).
"cosine" - Cosine distance. Cosine distance is a distance metric
calculated from the cosine similarity between two vectors. Cosine
similarity is a measure of similarity between two non-zero vectors of an
inner product space. It is defined to equal the cosine of the angle
between them. Unlike L2, the cosine distance is not affected by the
magnitude of the vectors. Cosine distance has a range of [0, 2].
Note: the cosine distance is undefined when one (or both) of the vectors
are all zeros (there is no direction). These vectors are invalid and may
never be returned from a vector search.
"dot" - Dot product. Dot distance is the dot product of two vectors. Dot
distance has a range of (-∞, ∞). If the vectors are normalized (i.e. their
L2 norm is 1), then dot distance is equivalent to the cosine distance.
num_partitions: int, default sqrt(num_rows)
The number of IVF partitions to create.
This value should generally scale with the number of rows in the dataset.
By default the number of partitions is the square root of the number of
rows.
If this value is too large then the first part of the search (picking the
right partition) will be slow. If this value is too small then the second
part of the search (searching within a partition) will be slow.
num_sub_vectors: int, default is vector dimension / 16
Number of sub-vectors of PQ.
This value controls how much the vector is compressed during the
quantization step. The more sub vectors there are the less the vector is
compressed. The default is the dimension of the vector divided by 16. If
the dimension is not evenly divisible by 16 we use the dimension divded by
8.
The above two cases are highly preferred. Having 8 or 16 values per
subvector allows us to use efficient SIMD instructions.
If the dimension is not visible by 8 then we use 1 subvector. This is not
ideal and will likely result in poor performance.
num_bits: int, default 8
Number of bits to encode each sub-vector.
This value controls how much the sub-vectors are compressed. The more bits
the more accurate the index but the slower search. The default is 8
bits. Only 4 and 8 are supported.
max_iterations: int, default 50
Max iteration to train kmeans.
When training an IVF PQ index we use kmeans to calculate the partitions.
This parameter controls how many iterations of kmeans to run.
Increasing this might improve the quality of the index but in most cases
these extra iterations have diminishing returns.
The default value is 50.
sample_rate: int, default 256
The rate used to calculate the number of training vectors for kmeans.
When an IVF PQ index is trained, we need to calculate partitions. These
are groups of vectors that are similar to each other. To do this we use an
algorithm called kmeans.
Running kmeans on a large dataset can be slow. To speed this up we run
kmeans on a random sample of the data. This parameter controls the size of
the sample. The total number of vectors used to train the index is
`sample_rate * num_partitions`.
Increasing this value might improve the quality of the index but in most
cases the default should be sufficient.
The default value is 256.
""" """
def __init__( distance_type: Literal["l2", "cosine", "dot"] = "l2"
self, num_partitions: Optional[int] = None
*, num_sub_vectors: Optional[int] = None
distance_type: Optional[str] = None, num_bits: int = 8
num_partitions: Optional[int] = None, max_iterations: int = 50
num_sub_vectors: Optional[int] = None, sample_rate: int = 256
num_bits: Optional[int] = None,
max_iterations: Optional[int] = None,
sample_rate: Optional[int] = None,
):
"""
Create an IVF PQ index config
Parameters
----------
distance_type: str, default "L2"
The distance metric used to train the index
This is used when training the index to calculate the IVF partitions
(vectors are grouped in partitions with similar vectors according to this
distance type) and to calculate a subvector's code during quantization.
The distance type used to train an index MUST match the distance type used
to search the index. Failure to do so will yield inaccurate results.
The following distance types are available:
"l2" - Euclidean distance. This is a very common distance metric that
accounts for both magnitude and direction when determining the distance
between vectors. L2 distance has a range of [0, ∞).
"cosine" - Cosine distance. Cosine distance is a distance metric
calculated from the cosine similarity between two vectors. Cosine
similarity is a measure of similarity between two non-zero vectors of an
inner product space. It is defined to equal the cosine of the angle
between them. Unlike L2, the cosine distance is not affected by the
magnitude of the vectors. Cosine distance has a range of [0, 2].
Note: the cosine distance is undefined when one (or both) of the vectors
are all zeros (there is no direction). These vectors are invalid and may
never be returned from a vector search.
"dot" - Dot product. Dot distance is the dot product of two vectors. Dot
distance has a range of (-∞, ∞). If the vectors are normalized (i.e. their
L2 norm is 1), then dot distance is equivalent to the cosine distance.
num_partitions: int, default sqrt(num_rows)
The number of IVF partitions to create.
This value should generally scale with the number of rows in the dataset.
By default the number of partitions is the square root of the number of
rows.
If this value is too large then the first part of the search (picking the
right partition) will be slow. If this value is too small then the second
part of the search (searching within a partition) will be slow.
num_sub_vectors: int, default is vector dimension / 16
Number of sub-vectors of PQ.
This value controls how much the vector is compressed during the
quantization step. The more sub vectors there are the less the vector is
compressed. The default is the dimension of the vector divided by 16. If
the dimension is not evenly divisible by 16 we use the dimension divded by
8.
The above two cases are highly preferred. Having 8 or 16 values per
subvector allows us to use efficient SIMD instructions.
If the dimension is not visible by 8 then we use 1 subvector. This is not
ideal and will likely result in poor performance.
num_bits: int, default 8
Number of bits to encode each sub-vector.
This value controls how much the sub-vectors are compressed. The more bits
the more accurate the index but the slower search. The default is 8
bits. Only 4 and 8 are supported.
max_iterations: int, default 50
Max iteration to train kmeans.
When training an IVF PQ index we use kmeans to calculate the partitions.
This parameter controls how many iterations of kmeans to run.
Increasing this might improve the quality of the index but in most cases
these extra iterations have diminishing returns.
The default value is 50.
sample_rate: int, default 256
The rate used to calculate the number of training vectors for kmeans.
When an IVF PQ index is trained, we need to calculate partitions. These
are groups of vectors that are similar to each other. To do this we use an
algorithm called kmeans.
Running kmeans on a large dataset can be slow. To speed this up we run
kmeans on a random sample of the data. This parameter controls the size of
the sample. The total number of vectors used to train the index is
`sample_rate * num_partitions`.
Increasing this value might improve the quality of the index but in most
cases the default should be sufficient.
The default value is 256.
"""
if distance_type is not None:
distance_type = distance_type.lower()
self._inner = LanceDbIndex.ivf_pq(
distance_type=distance_type,
num_partitions=num_partitions,
num_sub_vectors=num_sub_vectors,
num_bits=num_bits,
max_iterations=max_iterations,
sample_rate=sample_rate,
)
__all__ = ["BTree", "IvfPq", "IndexConfig"] __all__ = ["BTree", "IvfFlat", "IvfPq", "HnswPq", "HnswSq", "IndexConfig"]

View File

@@ -126,6 +126,9 @@ class Query(pydantic.BaseModel):
ef: Optional[int] = None ef: Optional[int] = None
# Default is true. Set to false to enforce a brute force search.
use_index: bool = True
class LanceQueryBuilder(ABC): class LanceQueryBuilder(ABC):
"""An abstract query builder. Subclasses are defined for vector search, """An abstract query builder. Subclasses are defined for vector search,
@@ -253,6 +256,7 @@ class LanceQueryBuilder(ABC):
self._vector = None self._vector = None
self._text = None self._text = None
self._ef = None self._ef = None
self._use_index = True
@deprecation.deprecated( @deprecation.deprecated(
deprecated_in="0.3.1", deprecated_in="0.3.1",
@@ -511,6 +515,7 @@ class LanceQueryBuilder(ABC):
"metric": self._metric, "metric": self._metric,
"nprobes": self._nprobes, "nprobes": self._nprobes,
"refine_factor": self._refine_factor, "refine_factor": self._refine_factor,
"use_index": self._use_index,
}, },
prefilter=self._prefilter, prefilter=self._prefilter,
filter=self._str_query, filter=self._str_query,
@@ -729,6 +734,7 @@ class LanceVectorQueryBuilder(LanceQueryBuilder):
offset=self._offset, offset=self._offset,
fast_search=self._fast_search, fast_search=self._fast_search,
ef=self._ef, ef=self._ef,
use_index=self._use_index,
) )
result_set = self._table._execute_query(query, batch_size) result_set = self._table._execute_query(query, batch_size)
if self._reranker is not None: if self._reranker is not None:
@@ -802,6 +808,24 @@ class LanceVectorQueryBuilder(LanceQueryBuilder):
self._str_query = query_string if query_string is not None else self._str_query self._str_query = query_string if query_string is not None else self._str_query
return self return self
def bypass_vector_index(self) -> LanceVectorQueryBuilder:
"""
If this is called then any vector index is skipped
An exhaustive (flat) search will be performed. The query vector will
be compared to every vector in the table. At high scales this can be
expensive. However, this is often still useful. For example, skipping
the vector index can give you ground truth results which you can use to
calculate your recall to select an appropriate value for nprobes.
Returns
-------
LanceVectorQueryBuilder
The LanceVectorQueryBuilder object.
"""
self._use_index = False
return self
class LanceFtsQueryBuilder(LanceQueryBuilder): class LanceFtsQueryBuilder(LanceQueryBuilder):
"""A builder for full text search for LanceDB.""" """A builder for full text search for LanceDB."""
@@ -1108,6 +1132,8 @@ class LanceHybridQueryBuilder(LanceQueryBuilder):
self._vector_query.refine_factor(self._refine_factor) self._vector_query.refine_factor(self._refine_factor)
if self._ef: if self._ef:
self._vector_query.ef(self._ef) self._vector_query.ef(self._ef)
if not self._use_index:
self._vector_query.bypass_vector_index()
with ThreadPoolExecutor() as executor: with ThreadPoolExecutor() as executor:
fts_future = executor.submit(self._fts_query.with_row_id(True).to_arrow) fts_future = executor.submit(self._fts_query.with_row_id(True).to_arrow)
@@ -1323,6 +1349,24 @@ class LanceHybridQueryBuilder(LanceQueryBuilder):
self._text = text self._text = text
return self return self
def bypass_vector_index(self) -> LanceHybridQueryBuilder:
"""
If this is called then any vector index is skipped
An exhaustive (flat) search will be performed. The query vector will
be compared to every vector in the table. At high scales this can be
expensive. However, this is often still useful. For example, skipping
the vector index can give you ground truth results which you can use to
calculate your recall to select an appropriate value for nprobes.
Returns
-------
LanceHybridQueryBuilder
The LanceHybridQueryBuilder object.
"""
self._use_index = False
return self
class AsyncQueryBase(object): class AsyncQueryBase(object):
def __init__(self, inner: Union[LanceQuery | LanceVectorQuery]): def __init__(self, inner: Union[LanceQuery | LanceVectorQuery]):

View File

@@ -121,7 +121,13 @@ class RemoteDBConnection(DBConnection):
return LOOP.run(self._conn.table_names(start_after=page_token, limit=limit)) return LOOP.run(self._conn.table_names(start_after=page_token, limit=limit))
@override @override
def open_table(self, name: str, *, index_cache_size: Optional[int] = None) -> Table: def open_table(
self,
name: str,
*,
storage_options: Optional[Dict[str, str]] = None,
index_cache_size: Optional[int] = None,
) -> Table:
"""Open a Lance Table in the database. """Open a Lance Table in the database.
Parameters Parameters

View File

@@ -15,7 +15,10 @@ from datetime import timedelta
import logging import logging
from functools import cached_property from functools import cached_property
from typing import Dict, Iterable, List, Optional, Union, Literal from typing import Dict, Iterable, List, Optional, Union, Literal
import warnings
from lancedb._lancedb import IndexConfig
from lancedb.embeddings.base import EmbeddingFunctionConfig
from lancedb.index import FTS, BTree, Bitmap, HnswPq, HnswSq, IvfPq, LabelList from lancedb.index import FTS, BTree, Bitmap, HnswPq, HnswSq, IvfPq, LabelList
from lancedb.remote.db import LOOP from lancedb.remote.db import LOOP
import pyarrow as pa import pyarrow as pa
@@ -25,7 +28,7 @@ from lancedb.merge import LanceMergeInsertBuilder
from lancedb.embeddings import EmbeddingFunctionRegistry from lancedb.embeddings import EmbeddingFunctionRegistry
from ..query import LanceVectorQueryBuilder, LanceQueryBuilder from ..query import LanceVectorQueryBuilder, LanceQueryBuilder
from ..table import AsyncTable, Query, Table from ..table import AsyncTable, IndexStatistics, Query, Table
class RemoteTable(Table): class RemoteTable(Table):
@@ -62,7 +65,7 @@ class RemoteTable(Table):
return LOOP.run(self._table.version()) return LOOP.run(self._table.version())
@cached_property @cached_property
def embedding_functions(self) -> dict: def embedding_functions(self) -> Dict[str, EmbeddingFunctionConfig]:
""" """
Get the embedding functions for the table Get the embedding functions for the table
@@ -94,11 +97,11 @@ class RemoteTable(Table):
def checkout_latest(self): def checkout_latest(self):
return LOOP.run(self._table.checkout_latest()) return LOOP.run(self._table.checkout_latest())
def list_indices(self): def list_indices(self) -> Iterable[IndexConfig]:
"""List all the indices on the table""" """List all the indices on the table"""
return LOOP.run(self._table.list_indices()) return LOOP.run(self._table.list_indices())
def index_stats(self, index_uuid: str): def index_stats(self, index_uuid: str) -> Optional[IndexStatistics]:
"""List all the stats of a specified index""" """List all the stats of a specified index"""
return LOOP.run(self._table.index_stats(index_uuid)) return LOOP.run(self._table.index_stats(index_uuid))
@@ -479,16 +482,28 @@ class RemoteTable(Table):
) )
def cleanup_old_versions(self, *_): def cleanup_old_versions(self, *_):
"""cleanup_old_versions() is not supported on the LanceDB cloud""" """
raise NotImplementedError( cleanup_old_versions() is a no-op on LanceDB Cloud.
"cleanup_old_versions() is not supported on the LanceDB cloud"
Tables are automatically cleaned up and optimized.
"""
warnings.warn(
"cleanup_old_versions() is a no-op on LanceDB Cloud. "
"Tables are automatically cleaned up and optimized."
) )
pass
def compact_files(self, *_): def compact_files(self, *_):
"""compact_files() is not supported on the LanceDB cloud""" """
raise NotImplementedError( compact_files() is a no-op on LanceDB Cloud.
"compact_files() is not supported on the LanceDB cloud"
Tables are automatically compacted and optimized.
"""
warnings.warn(
"compact_files() is a no-op on LanceDB Cloud. "
"Tables are automatically compacted and optimized."
) )
pass
def optimize( def optimize(
self, self,
@@ -496,12 +511,16 @@ class RemoteTable(Table):
cleanup_older_than: Optional[timedelta] = None, cleanup_older_than: Optional[timedelta] = None,
delete_unverified: bool = False, delete_unverified: bool = False,
): ):
"""optimize() is not supported on the LanceDB cloud. """
Indices are optimized automatically.""" optimize() is a no-op on LanceDB Cloud.
raise NotImplementedError(
"optimize() is not supported on the LanceDB cloud. " Indices are optimized automatically.
"""
warnings.warn(
"optimize() is a no-op on LanceDB Cloud. "
"Indices are optimized automatically." "Indices are optimized automatically."
) )
pass
def count_rows(self, filter: Optional[str] = None) -> int: def count_rows(self, filter: Optional[str] = None) -> int:
return LOOP.run(self._table.count_rows(filter)) return LOOP.run(self._table.count_rows(filter))
@@ -515,6 +534,16 @@ class RemoteTable(Table):
def drop_columns(self, columns: Iterable[str]): def drop_columns(self, columns: Iterable[str]):
return LOOP.run(self._table.drop_columns(columns)) return LOOP.run(self._table.drop_columns(columns))
def uses_v2_manifest_paths(self) -> bool:
raise NotImplementedError(
"uses_v2_manifest_paths() is not supported on the LanceDB Cloud"
)
def migrate_v2_manifest_paths(self):
raise NotImplementedError(
"migrate_v2_manifest_paths() is not supported on the LanceDB Cloud"
)
def add_index(tbl: pa.Table, i: int) -> pa.Table: def add_index(tbl: pa.Table, i: int) -> pa.Table:
return tbl.add_column( return tbl.add_column(

File diff suppressed because it is too large Load Diff

View File

@@ -314,3 +314,15 @@ def deprecated(func):
def validate_table_name(name: str): def validate_table_name(name: str):
"""Verify the table name is valid.""" """Verify the table name is valid."""
native_validate_table_name(name) native_validate_table_name(name)
def add_note(base_exception: BaseException, note: str):
if hasattr(base_exception, "add_note"):
base_exception.add_note(note)
elif isinstance(base_exception.args[0], str):
base_exception.args = (
base_exception.args[0] + "\n" + note,
*base_exception.args[1:],
)
else:
raise ValueError("Cannot add note to exception")

View File

@@ -0,0 +1,32 @@
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright The LanceDB Authors
from datetime import timedelta
from lancedb.db import AsyncConnection, DBConnection
import lancedb
import pytest
import pytest_asyncio
# Use an in-memory database for most tests.
@pytest.fixture
def mem_db() -> DBConnection:
return lancedb.connect("memory://")
# Use a temporary directory when we need to inspect the database files.
@pytest.fixture
def tmp_db(tmp_path) -> DBConnection:
return lancedb.connect(tmp_path)
@pytest_asyncio.fixture
async def mem_db_async() -> AsyncConnection:
return await lancedb.connect_async("memory://")
@pytest_asyncio.fixture
async def tmp_db_async(tmp_path) -> AsyncConnection:
return await lancedb.connect_async(
tmp_path, read_consistency_interval=timedelta(seconds=0)
)

View File

@@ -75,6 +75,22 @@ def test_quickstart():
for _ in range(1000) for _ in range(1000)
] ]
) )
# --8<-- [start:add_columns]
tbl.add_columns({"double_price": "cast((price * 2) as float)"})
# --8<-- [end:add_columns]
# --8<-- [start:alter_columns]
tbl.alter_columns(
{
"path": "double_price",
"rename": "dbl_price",
"data_type": pa.float64(),
"nullable": True,
}
)
# --8<-- [end:alter_columns]
# --8<-- [start:drop_columns]
tbl.drop_columns(["dbl_price"])
# --8<-- [end:drop_columns]
# --8<-- [start:create_index] # --8<-- [start:create_index]
# Synchronous client # Synchronous client
tbl.create_index(num_sub_vectors=1) tbl.create_index(num_sub_vectors=1)

View File

@@ -0,0 +1,44 @@
import shutil
# --8<-- [start:imports]
import lancedb
import numpy as np
import pytest
# --8<-- [end:imports]
shutil.rmtree("data/binary_lancedb", ignore_errors=True)
def test_binary_vector():
# --8<-- [start:sync_binary_vector]
db = lancedb.connect("data/binary_lancedb")
data = [
{
"id": i,
"vector": np.random.randint(0, 256, size=16),
}
for i in range(1024)
]
tbl = db.create_table("my_binary_vectors", data=data)
query = np.random.randint(0, 256, size=16)
tbl.search(query).to_arrow()
# --8<-- [end:sync_binary_vector]
db.drop_table("my_binary_vectors")
@pytest.mark.asyncio
async def test_binary_vector_async():
# --8<-- [start:async_binary_vector]
db = await lancedb.connect_async("data/binary_lancedb")
data = [
{
"id": i,
"vector": np.random.randint(0, 256, size=16),
}
for i in range(1024)
]
tbl = await db.create_table("my_binary_vectors", data=data)
query = np.random.randint(0, 256, size=16)
await tbl.query().nearest_to(query).to_arrow()
# --8<-- [end:async_binary_vector]
await db.drop_table("my_binary_vectors")

View File

@@ -98,7 +98,7 @@ def test_ingest_pd(tmp_path):
assert db.open_table("test").name == db["test"].name assert db.open_table("test").name == db["test"].name
def test_ingest_iterator(tmp_path): def test_ingest_iterator(mem_db: lancedb.DBConnection):
class PydanticSchema(LanceModel): class PydanticSchema(LanceModel):
vector: Vector(2) vector: Vector(2)
item: str item: str
@@ -156,8 +156,7 @@ def test_ingest_iterator(tmp_path):
] ]
def run_tests(schema): def run_tests(schema):
db = lancedb.connect(tmp_path) tbl = mem_db.create_table("table2", make_batches(), schema=schema)
tbl = db.create_table("table2", make_batches(), schema=schema, mode="overwrite")
tbl.to_pandas() tbl.to_pandas()
assert tbl.search([3.1, 4.1]).limit(1).to_pandas()["_distance"][0] == 0.0 assert tbl.search([3.1, 4.1]).limit(1).to_pandas()["_distance"][0] == 0.0
assert tbl.search([5.9, 26.5]).limit(1).to_pandas()["_distance"][0] == 0.0 assert tbl.search([5.9, 26.5]).limit(1).to_pandas()["_distance"][0] == 0.0
@@ -165,15 +164,14 @@ def test_ingest_iterator(tmp_path):
tbl.add(make_batches()) tbl.add(make_batches())
assert tbl_len == 50 assert tbl_len == 50
assert len(tbl) == tbl_len * 2 assert len(tbl) == tbl_len * 2
assert len(tbl.list_versions()) == 3 assert len(tbl.list_versions()) == 2
db.drop_database() mem_db.drop_database()
run_tests(arrow_schema) run_tests(arrow_schema)
run_tests(PydanticSchema) run_tests(PydanticSchema)
def test_table_names(tmp_path): def test_table_names(tmp_db: lancedb.DBConnection):
db = lancedb.connect(tmp_path)
data = pd.DataFrame( data = pd.DataFrame(
{ {
"vector": [[3.1, 4.1], [5.9, 26.5]], "vector": [[3.1, 4.1], [5.9, 26.5]],
@@ -181,10 +179,10 @@ def test_table_names(tmp_path):
"price": [10.0, 20.0], "price": [10.0, 20.0],
} }
) )
db.create_table("test2", data=data) tmp_db.create_table("test2", data=data)
db.create_table("test1", data=data) tmp_db.create_table("test1", data=data)
db.create_table("test3", data=data) tmp_db.create_table("test3", data=data)
assert db.table_names() == ["test1", "test2", "test3"] assert tmp_db.table_names() == ["test1", "test2", "test3"]
@pytest.mark.asyncio @pytest.mark.asyncio
@@ -209,8 +207,7 @@ async def test_table_names_async(tmp_path):
assert await db.table_names(start_after="test1") == ["test2", "test3"] assert await db.table_names(start_after="test1") == ["test2", "test3"]
def test_create_mode(tmp_path): def test_create_mode(tmp_db: lancedb.DBConnection):
db = lancedb.connect(tmp_path)
data = pd.DataFrame( data = pd.DataFrame(
{ {
"vector": [[3.1, 4.1], [5.9, 26.5]], "vector": [[3.1, 4.1], [5.9, 26.5]],
@@ -218,10 +215,10 @@ def test_create_mode(tmp_path):
"price": [10.0, 20.0], "price": [10.0, 20.0],
} }
) )
db.create_table("test", data=data) tmp_db.create_table("test", data=data)
with pytest.raises(Exception): with pytest.raises(Exception):
db.create_table("test", data=data) tmp_db.create_table("test", data=data)
new_data = pd.DataFrame( new_data = pd.DataFrame(
{ {
@@ -230,13 +227,11 @@ def test_create_mode(tmp_path):
"price": [10.0, 20.0], "price": [10.0, 20.0],
} }
) )
tbl = db.create_table("test", data=new_data, mode="overwrite") tbl = tmp_db.create_table("test", data=new_data, mode="overwrite")
assert tbl.to_pandas().item.tolist() == ["fizz", "buzz"] assert tbl.to_pandas().item.tolist() == ["fizz", "buzz"]
def test_create_table_from_iterator(tmp_path): def test_create_table_from_iterator(mem_db: lancedb.DBConnection):
db = lancedb.connect(tmp_path)
def gen_data(): def gen_data():
for _ in range(10): for _ in range(10):
yield pa.RecordBatch.from_arrays( yield pa.RecordBatch.from_arrays(
@@ -248,14 +243,12 @@ def test_create_table_from_iterator(tmp_path):
["vector", "item", "price"], ["vector", "item", "price"],
) )
table = db.create_table("test", data=gen_data()) table = mem_db.create_table("test", data=gen_data())
assert table.count_rows() == 10 assert table.count_rows() == 10
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_create_table_from_iterator_async(tmp_path): async def test_create_table_from_iterator_async(mem_db_async: lancedb.AsyncConnection):
db = await lancedb.connect_async(tmp_path)
def gen_data(): def gen_data():
for _ in range(10): for _ in range(10):
yield pa.RecordBatch.from_arrays( yield pa.RecordBatch.from_arrays(
@@ -267,12 +260,11 @@ async def test_create_table_from_iterator_async(tmp_path):
["vector", "item", "price"], ["vector", "item", "price"],
) )
table = await db.create_table("test", data=gen_data()) table = await mem_db_async.create_table("test", data=gen_data())
assert await table.count_rows() == 10 assert await table.count_rows() == 10
def test_create_exist_ok(tmp_path): def test_create_exist_ok(tmp_db: lancedb.DBConnection):
db = lancedb.connect(tmp_path)
data = pd.DataFrame( data = pd.DataFrame(
{ {
"vector": [[3.1, 4.1], [5.9, 26.5]], "vector": [[3.1, 4.1], [5.9, 26.5]],
@@ -280,13 +272,13 @@ def test_create_exist_ok(tmp_path):
"price": [10.0, 20.0], "price": [10.0, 20.0],
} }
) )
tbl = db.create_table("test", data=data) tbl = tmp_db.create_table("test", data=data)
with pytest.raises(OSError): with pytest.raises(ValueError):
db.create_table("test", data=data) tmp_db.create_table("test", data=data)
# open the table but don't add more rows # open the table but don't add more rows
tbl2 = db.create_table("test", data=data, exist_ok=True) tbl2 = tmp_db.create_table("test", data=data, exist_ok=True)
assert tbl.name == tbl2.name assert tbl.name == tbl2.name
assert tbl.schema == tbl2.schema assert tbl.schema == tbl2.schema
assert len(tbl) == len(tbl2) assert len(tbl) == len(tbl2)
@@ -298,7 +290,7 @@ def test_create_exist_ok(tmp_path):
pa.field("price", pa.float64()), pa.field("price", pa.float64()),
] ]
) )
tbl3 = db.create_table("test", schema=schema, exist_ok=True) tbl3 = tmp_db.create_table("test", schema=schema, exist_ok=True)
assert tbl3.schema == schema assert tbl3.schema == schema
bad_schema = pa.schema( bad_schema = pa.schema(
@@ -310,7 +302,7 @@ def test_create_exist_ok(tmp_path):
] ]
) )
with pytest.raises(ValueError): with pytest.raises(ValueError):
db.create_table("test", schema=bad_schema, exist_ok=True) tmp_db.create_table("test", schema=bad_schema, exist_ok=True)
@pytest.mark.asyncio @pytest.mark.asyncio
@@ -325,26 +317,24 @@ async def test_connect(tmp_path):
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_close(tmp_path): async def test_close(mem_db_async: lancedb.AsyncConnection):
db = await lancedb.connect_async(tmp_path) assert mem_db_async.is_open()
assert db.is_open() mem_db_async.close()
db.close() assert not mem_db_async.is_open()
assert not db.is_open()
with pytest.raises(RuntimeError, match="is closed"): with pytest.raises(RuntimeError, match="is closed"):
await db.table_names() await mem_db_async.table_names()
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_context_manager(tmp_path): async def test_context_manager():
with await lancedb.connect_async(tmp_path) as db: with await lancedb.connect_async("memory://") as db:
assert db.is_open() assert db.is_open()
assert not db.is_open() assert not db.is_open()
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_create_mode_async(tmp_path): async def test_create_mode_async(tmp_db_async: lancedb.AsyncConnection):
db = await lancedb.connect_async(tmp_path)
data = pd.DataFrame( data = pd.DataFrame(
{ {
"vector": [[3.1, 4.1], [5.9, 26.5]], "vector": [[3.1, 4.1], [5.9, 26.5]],
@@ -352,10 +342,10 @@ async def test_create_mode_async(tmp_path):
"price": [10.0, 20.0], "price": [10.0, 20.0],
} }
) )
await db.create_table("test", data=data) await tmp_db_async.create_table("test", data=data)
with pytest.raises(ValueError, match="already exists"): with pytest.raises(ValueError, match="already exists"):
await db.create_table("test", data=data) await tmp_db_async.create_table("test", data=data)
new_data = pd.DataFrame( new_data = pd.DataFrame(
{ {
@@ -364,15 +354,14 @@ async def test_create_mode_async(tmp_path):
"price": [10.0, 20.0], "price": [10.0, 20.0],
} }
) )
_tbl = await db.create_table("test", data=new_data, mode="overwrite") _tbl = await tmp_db_async.create_table("test", data=new_data, mode="overwrite")
# MIGRATION: to_pandas() is not available in async # MIGRATION: to_pandas() is not available in async
# assert tbl.to_pandas().item.tolist() == ["fizz", "buzz"] # assert tbl.to_pandas().item.tolist() == ["fizz", "buzz"]
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_create_exist_ok_async(tmp_path): async def test_create_exist_ok_async(tmp_db_async: lancedb.AsyncConnection):
db = await lancedb.connect_async(tmp_path)
data = pd.DataFrame( data = pd.DataFrame(
{ {
"vector": [[3.1, 4.1], [5.9, 26.5]], "vector": [[3.1, 4.1], [5.9, 26.5]],
@@ -380,13 +369,13 @@ async def test_create_exist_ok_async(tmp_path):
"price": [10.0, 20.0], "price": [10.0, 20.0],
} }
) )
tbl = await db.create_table("test", data=data) tbl = await tmp_db_async.create_table("test", data=data)
with pytest.raises(ValueError, match="already exists"): with pytest.raises(ValueError, match="already exists"):
await db.create_table("test", data=data) await tmp_db_async.create_table("test", data=data)
# open the table but don't add more rows # open the table but don't add more rows
tbl2 = await db.create_table("test", data=data, exist_ok=True) tbl2 = await tmp_db_async.create_table("test", data=data, exist_ok=True)
assert tbl.name == tbl2.name assert tbl.name == tbl2.name
assert await tbl.schema() == await tbl2.schema() assert await tbl.schema() == await tbl2.schema()
@@ -397,7 +386,7 @@ async def test_create_exist_ok_async(tmp_path):
pa.field("price", pa.float64()), pa.field("price", pa.float64()),
] ]
) )
tbl3 = await db.create_table("test", schema=schema, exist_ok=True) tbl3 = await tmp_db_async.create_table("test", schema=schema, exist_ok=True)
assert await tbl3.schema() == schema assert await tbl3.schema() == schema
# Migration: When creating a table, but the table already exists, but # Migration: When creating a table, but the table already exists, but
@@ -448,13 +437,12 @@ async def test_create_table_v2_manifest_paths_async(tmp_path):
assert re.match(r"\d{20}\.manifest", manifest) assert re.match(r"\d{20}\.manifest", manifest)
def test_open_table_sync(tmp_path): def test_open_table_sync(tmp_db: lancedb.DBConnection):
db = lancedb.connect(tmp_path) tmp_db.create_table("test", data=[{"id": 0}])
db.create_table("test", data=[{"id": 0}]) assert tmp_db.open_table("test").count_rows() == 1
assert db.open_table("test").count_rows() == 1 assert tmp_db.open_table("test", index_cache_size=0).count_rows() == 1
assert db.open_table("test", index_cache_size=0).count_rows() == 1 with pytest.raises(ValueError, match="Table 'does_not_exist' was not found"):
with pytest.raises(FileNotFoundError, match="does not exist"): tmp_db.open_table("does_not_exist")
db.open_table("does_not_exist")
@pytest.mark.asyncio @pytest.mark.asyncio
@@ -494,8 +482,7 @@ async def test_open_table(tmp_path):
await db.open_table("does_not_exist") await db.open_table("does_not_exist")
def test_delete_table(tmp_path): def test_delete_table(tmp_db: lancedb.DBConnection):
db = lancedb.connect(tmp_path)
data = pd.DataFrame( data = pd.DataFrame(
{ {
"vector": [[3.1, 4.1], [5.9, 26.5]], "vector": [[3.1, 4.1], [5.9, 26.5]],
@@ -503,26 +490,51 @@ def test_delete_table(tmp_path):
"price": [10.0, 20.0], "price": [10.0, 20.0],
} }
) )
db.create_table("test", data=data) tmp_db.create_table("test", data=data)
with pytest.raises(Exception): with pytest.raises(Exception):
db.create_table("test", data=data) tmp_db.create_table("test", data=data)
assert db.table_names() == ["test"] assert tmp_db.table_names() == ["test"]
db.drop_table("test") tmp_db.drop_table("test")
assert db.table_names() == [] assert tmp_db.table_names() == []
db.create_table("test", data=data) tmp_db.create_table("test", data=data)
assert db.table_names() == ["test"] assert tmp_db.table_names() == ["test"]
# dropping a table that does not exist should pass # dropping a table that does not exist should pass
# if ignore_missing=True # if ignore_missing=True
db.drop_table("does_not_exist", ignore_missing=True) tmp_db.drop_table("does_not_exist", ignore_missing=True)
def test_drop_database(tmp_path): @pytest.mark.asyncio
db = lancedb.connect(tmp_path) async def test_delete_table_async(tmp_db: lancedb.DBConnection):
data = pd.DataFrame(
{
"vector": [[3.1, 4.1], [5.9, 26.5]],
"item": ["foo", "bar"],
"price": [10.0, 20.0],
}
)
tmp_db.create_table("test", data=data)
with pytest.raises(Exception):
tmp_db.create_table("test", data=data)
assert tmp_db.table_names() == ["test"]
tmp_db.drop_table("test")
assert tmp_db.table_names() == []
tmp_db.create_table("test", data=data)
assert tmp_db.table_names() == ["test"]
tmp_db.drop_table("does_not_exist", ignore_missing=True)
def test_drop_database(tmp_db: lancedb.DBConnection):
data = pd.DataFrame( data = pd.DataFrame(
{ {
"vector": [[3.1, 4.1], [5.9, 26.5]], "vector": [[3.1, 4.1], [5.9, 26.5]],
@@ -537,51 +549,50 @@ def test_drop_database(tmp_path):
"price": [12.0, 17.0], "price": [12.0, 17.0],
} }
) )
db.create_table("test", data=data) tmp_db.create_table("test", data=data)
with pytest.raises(Exception): with pytest.raises(Exception):
db.create_table("test", data=data) tmp_db.create_table("test", data=data)
assert db.table_names() == ["test"] assert tmp_db.table_names() == ["test"]
db.create_table("new_test", data=new_data) tmp_db.create_table("new_test", data=new_data)
db.drop_database() tmp_db.drop_database()
assert db.table_names() == [] assert tmp_db.table_names() == []
# it should pass when no tables are present # it should pass when no tables are present
db.create_table("test", data=new_data) tmp_db.create_table("test", data=new_data)
db.drop_table("test") tmp_db.drop_table("test")
assert db.table_names() == [] assert tmp_db.table_names() == []
db.drop_database() tmp_db.drop_database()
assert db.table_names() == [] assert tmp_db.table_names() == []
# creating an empty database with schema # creating an empty database with schema
schema = pa.schema([pa.field("vector", pa.list_(pa.float32(), list_size=2))]) schema = pa.schema([pa.field("vector", pa.list_(pa.float32(), list_size=2))])
db.create_table("empty_table", schema=schema) tmp_db.create_table("empty_table", schema=schema)
# dropping a empty database should pass # dropping a empty database should pass
db.drop_database() tmp_db.drop_database()
assert db.table_names() == [] assert tmp_db.table_names() == []
def test_empty_or_nonexistent_table(tmp_path): def test_empty_or_nonexistent_table(mem_db: lancedb.DBConnection):
db = lancedb.connect(tmp_path)
with pytest.raises(Exception): with pytest.raises(Exception):
db.create_table("test_with_no_data") mem_db.create_table("test_with_no_data")
with pytest.raises(Exception): with pytest.raises(Exception):
db.open_table("does_not_exist") mem_db.open_table("does_not_exist")
schema = pa.schema([pa.field("a", pa.int64(), nullable=False)]) schema = pa.schema([pa.field("a", pa.int64(), nullable=False)])
test = db.create_table("test", schema=schema) test = mem_db.create_table("test", schema=schema)
class TestModel(LanceModel): class TestModel(LanceModel):
a: int a: int
test2 = db.create_table("test2", schema=TestModel) test2 = mem_db.create_table("test2", schema=TestModel)
assert test.schema == test2.schema assert test.schema == test2.schema
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_create_in_v2_mode(tmp_path): async def test_create_in_v2_mode(mem_db_async: lancedb.AsyncConnection):
def make_data(): def make_data():
for i in range(10): for i in range(10):
yield pa.record_batch([pa.array([x for x in range(1024)])], names=["x"]) yield pa.record_batch([pa.array([x for x in range(1024)])], names=["x"])
@@ -591,10 +602,8 @@ async def test_create_in_v2_mode(tmp_path):
schema = pa.schema([pa.field("x", pa.int64())]) schema = pa.schema([pa.field("x", pa.int64())])
db = await lancedb.connect_async(tmp_path)
# Create table in v1 mode # Create table in v1 mode
tbl = await db.create_table( tbl = await mem_db_async.create_table(
"test", data=make_data(), schema=schema, data_storage_version="legacy" "test", data=make_data(), schema=schema, data_storage_version="legacy"
) )
@@ -610,7 +619,7 @@ async def test_create_in_v2_mode(tmp_path):
assert not await is_in_v2_mode(tbl) assert not await is_in_v2_mode(tbl)
# Create table in v2 mode # Create table in v2 mode
tbl = await db.create_table( tbl = await mem_db_async.create_table(
"test_v2", data=make_data(), schema=schema, use_legacy_format=False "test_v2", data=make_data(), schema=schema, use_legacy_format=False
) )
@@ -622,7 +631,7 @@ async def test_create_in_v2_mode(tmp_path):
assert await is_in_v2_mode(tbl) assert await is_in_v2_mode(tbl)
# Create empty table in v2 mode and add data # Create empty table in v2 mode and add data
tbl = await db.create_table( tbl = await mem_db_async.create_table(
"test_empty_v2", data=None, schema=schema, use_legacy_format=False "test_empty_v2", data=None, schema=schema, use_legacy_format=False
) )
await tbl.add(make_table()) await tbl.add(make_table())
@@ -630,7 +639,7 @@ async def test_create_in_v2_mode(tmp_path):
assert await is_in_v2_mode(tbl) assert await is_in_v2_mode(tbl)
# Create empty table uses v1 mode by default # Create empty table uses v1 mode by default
tbl = await db.create_table( tbl = await mem_db_async.create_table(
"test_empty_v2_default", data=None, schema=schema, data_storage_version="legacy" "test_empty_v2_default", data=None, schema=schema, data_storage_version="legacy"
) )
await tbl.add(make_table()) await tbl.add(make_table())
@@ -638,18 +647,17 @@ async def test_create_in_v2_mode(tmp_path):
assert not await is_in_v2_mode(tbl) assert not await is_in_v2_mode(tbl)
def test_replace_index(tmp_path): def test_replace_index(mem_db: lancedb.DBConnection):
db = lancedb.connect(uri=tmp_path) table = mem_db.create_table(
table = db.create_table(
"test", "test",
[ [
{"vector": np.random.rand(128), "item": "foo", "price": float(i)} {"vector": np.random.rand(32), "item": "foo", "price": float(i)}
for i in range(1000) for i in range(512)
], ],
) )
table.create_index( table.create_index(
num_partitions=2, num_partitions=2,
num_sub_vectors=4, num_sub_vectors=2,
) )
with pytest.raises(Exception): with pytest.raises(Exception):
@@ -660,27 +668,26 @@ def test_replace_index(tmp_path):
) )
table.create_index( table.create_index(
num_partitions=2, num_partitions=1,
num_sub_vectors=4, num_sub_vectors=2,
replace=True, replace=True,
index_cache_size=10, index_cache_size=10,
) )
def test_prefilter_with_index(tmp_path): def test_prefilter_with_index(mem_db: lancedb.DBConnection):
db = lancedb.connect(uri=tmp_path)
data = [ data = [
{"vector": np.random.rand(128), "item": "foo", "price": float(i)} {"vector": np.random.rand(32), "item": "foo", "price": float(i)}
for i in range(1000) for i in range(512)
] ]
sample_key = data[100]["vector"] sample_key = data[100]["vector"]
table = db.create_table( table = mem_db.create_table(
"test", "test",
data, data,
) )
table.create_index( table.create_index(
num_partitions=2, num_partitions=2,
num_sub_vectors=4, num_sub_vectors=2,
) )
table = ( table = (
table.search(sample_key) table.search(sample_key)
@@ -691,13 +698,34 @@ def test_prefilter_with_index(tmp_path):
assert table.num_rows == 1 assert table.num_rows == 1
def test_create_table_with_invalid_names(tmp_path): def test_create_table_with_invalid_names(tmp_db: lancedb.DBConnection):
db = lancedb.connect(uri=tmp_path)
data = [{"vector": np.random.rand(128), "item": "foo"} for i in range(10)] data = [{"vector": np.random.rand(128), "item": "foo"} for i in range(10)]
with pytest.raises(ValueError): with pytest.raises(ValueError):
db.create_table("foo/bar", data) tmp_db.create_table("foo/bar", data)
with pytest.raises(ValueError): with pytest.raises(ValueError):
db.create_table("foo bar", data) tmp_db.create_table("foo bar", data)
with pytest.raises(ValueError): with pytest.raises(ValueError):
db.create_table("foo$$bar", data) tmp_db.create_table("foo$$bar", data)
db.create_table("foo.bar", data) tmp_db.create_table("foo.bar", data)
def test_bypass_vector_index_sync(tmp_db: lancedb.DBConnection):
data = [{"vector": np.random.rand(32)} for _ in range(512)]
sample_key = data[100]["vector"]
table = tmp_db.create_table(
"test",
data,
)
table.create_index(
num_partitions=2,
num_sub_vectors=2,
)
plan_with_index = table.search(sample_key).explain_plan(verbose=True)
assert "ANN" in plan_with_index
plan_without_index = (
table.search(sample_key).bypass_vector_index().explain_plan(verbose=True)
)
assert "KNN" in plan_without_index

View File

@@ -15,10 +15,12 @@ import random
from unittest import mock from unittest import mock
import lancedb as ldb import lancedb as ldb
from lancedb.db import DBConnection
from lancedb.index import FTS from lancedb.index import FTS
import numpy as np import numpy as np
import pandas as pd import pandas as pd
import pytest import pytest
from utils import exception_output
pytest.importorskip("lancedb.fts") pytest.importorskip("lancedb.fts")
tantivy = pytest.importorskip("tantivy") tantivy = pytest.importorskip("tantivy")
@@ -458,3 +460,44 @@ def test_syntax(table):
table.search('the cats OR dogs were not really "pets" at all').phrase_query().limit( table.search('the cats OR dogs were not really "pets" at all').phrase_query().limit(
10 10
).to_list() ).to_list()
def test_language(mem_db: DBConnection):
sentences = [
"Il n'y a que trois routes qui traversent la ville.",
"Je veux prendre la route vers l'est.",
"Je te retrouve au café au bout de la route.",
]
data = [{"text": s} for s in sentences]
table = mem_db.create_table("test", data=data)
with pytest.raises(ValueError) as e:
table.create_fts_index("text", use_tantivy=False, language="klingon")
assert exception_output(e) == (
"ValueError: LanceDB does not support the requested language: 'klingon'\n"
"Supported languages: Arabic, Danish, Dutch, English, Finnish, French, "
"German, Greek, Hungarian, Italian, Norwegian, Portuguese, Romanian, "
"Russian, Spanish, Swedish, Tamil, Turkish"
)
table.create_fts_index(
"text",
use_tantivy=False,
language="French",
stem=True,
ascii_folding=True,
remove_stop_words=True,
)
# Can get "routes" and "route" from the same root
results = table.search("route", query_type="fts").limit(5).to_list()
assert len(results) == 3
# Can find "café", without needing to provide accent
results = table.search("cafe", query_type="fts").limit(5).to_list()
assert len(results) == 1
# Stop words -> no results
results = table.search("la", query_type="fts").limit(5).to_list()
assert len(results) == 0

View File

@@ -8,7 +8,7 @@ import pyarrow as pa
import pytest import pytest
import pytest_asyncio import pytest_asyncio
from lancedb import AsyncConnection, AsyncTable, connect_async from lancedb import AsyncConnection, AsyncTable, connect_async
from lancedb.index import BTree, IvfPq, Bitmap, LabelList, HnswPq, HnswSq from lancedb.index import BTree, IvfFlat, IvfPq, Bitmap, LabelList, HnswPq, HnswSq
@pytest_asyncio.fixture @pytest_asyncio.fixture
@@ -42,6 +42,27 @@ async def some_table(db_async):
) )
@pytest_asyncio.fixture
async def binary_table(db_async):
data = [
{
"id": i,
"vector": [i] * 128,
}
for i in range(NROWS)
]
return await db_async.create_table(
"binary_table",
data,
schema=pa.schema(
[
pa.field("id", pa.int64()),
pa.field("vector", pa.list_(pa.uint8(), 128)),
]
),
)
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_create_scalar_index(some_table: AsyncTable): async def test_create_scalar_index(some_table: AsyncTable):
# Can create # Can create
@@ -143,3 +164,27 @@ async def test_create_hnswsq_index(some_table: AsyncTable):
await some_table.create_index("vector", config=HnswSq(num_partitions=10)) await some_table.create_index("vector", config=HnswSq(num_partitions=10))
indices = await some_table.list_indices() indices = await some_table.list_indices()
assert len(indices) == 1 assert len(indices) == 1
@pytest.mark.asyncio
async def test_create_index_with_binary_vectors(binary_table: AsyncTable):
await binary_table.create_index(
"vector", config=IvfFlat(distance_type="hamming", num_partitions=10)
)
indices = await binary_table.list_indices()
assert len(indices) == 1
assert indices[0].index_type == "IvfFlat"
assert indices[0].columns == ["vector"]
assert indices[0].name == "vector_idx"
stats = await binary_table.index_stats("vector_idx")
assert stats.index_type == "IVF_FLAT"
assert stats.distance_type == "hamming"
assert stats.num_indexed_rows == await binary_table.count_rows()
assert stats.num_unindexed_rows == 0
assert stats.num_indices == 1
# the dataset contains vectors with all values from 0 to 255
for v in range(256):
res = await binary_table.query().nearest_to([v] * 128).to_arrow()
assert res["id"][0].as_py() == v

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,11 @@
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright The LanceDB Authors
import pytest
def exception_output(e_info: pytest.ExceptionInfo):
import traceback
# skip traceback part, since it's not worth checking in tests
lines = traceback.format_exception_only(e_info.type, e_info.value)
return "".join(lines).strip()

View File

@@ -58,6 +58,11 @@ impl Connection {
self.inner.take(); self.inner.take();
} }
#[getter]
pub fn uri(&self) -> PyResult<String> {
self.get_inner().map(|inner| inner.uri().to_string())
}
#[pyo3(signature = (start_after=None, limit=None))] #[pyo3(signature = (start_after=None, limit=None))]
pub fn table_names( pub fn table_names(
self_: PyRef<'_, Self>, self_: PyRef<'_, Self>,

View File

@@ -12,224 +12,174 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
use std::sync::Mutex; use lancedb::index::vector::IvfFlatIndexBuilder;
use lancedb::index::{
use lancedb::index::scalar::FtsIndexBuilder; scalar::{BTreeIndexBuilder, FtsIndexBuilder, TokenizerConfig},
use lancedb::{ vector::{IvfHnswPqIndexBuilder, IvfHnswSqIndexBuilder, IvfPqIndexBuilder},
index::{ Index as LanceDbIndex,
scalar::BTreeIndexBuilder,
vector::{IvfHnswPqIndexBuilder, IvfHnswSqIndexBuilder, IvfPqIndexBuilder},
Index as LanceDbIndex,
},
DistanceType,
}; };
use pyo3::{ use pyo3::{
exceptions::{PyKeyError, PyRuntimeError, PyValueError}, exceptions::{PyKeyError, PyValueError},
pyclass, pymethods, IntoPy, PyObject, PyResult, Python, intern, pyclass, pymethods,
types::PyAnyMethods,
Bound, FromPyObject, IntoPy, PyAny, PyObject, PyResult, Python,
}; };
use crate::util::parse_distance_type; use crate::util::parse_distance_type;
#[pyclass] pub fn class_name<'a>(ob: &'a Bound<'_, PyAny>) -> PyResult<&'a str> {
pub struct Index { let full_name: &str = ob
inner: Mutex<Option<LanceDbIndex>>, .getattr(intern!(ob.py(), "__class__"))?
} .getattr(intern!(ob.py(), "__name__"))?
.extract()?;
impl Index { match full_name.rsplit_once('.') {
pub fn consume(&self) -> PyResult<LanceDbIndex> { Some((_, name)) => Ok(name),
self.inner None => Ok(full_name),
.lock()
.unwrap()
.take()
.ok_or_else(|| PyRuntimeError::new_err("cannot use an Index more than once"))
} }
} }
#[pymethods] pub fn extract_index_params(source: &Option<Bound<'_, PyAny>>) -> PyResult<LanceDbIndex> {
impl Index { if let Some(source) = source {
#[pyo3(signature = (distance_type=None, num_partitions=None, num_sub_vectors=None,num_bits=None, max_iterations=None, sample_rate=None))] match class_name(source)? {
#[staticmethod] "BTree" => Ok(LanceDbIndex::BTree(BTreeIndexBuilder::default())),
pub fn ivf_pq( "Bitmap" => Ok(LanceDbIndex::Bitmap(Default::default())),
distance_type: Option<String>, "LabelList" => Ok(LanceDbIndex::LabelList(Default::default())),
num_partitions: Option<u32>, "FTS" => {
num_sub_vectors: Option<u32>, let params = source.extract::<FtsParams>()?;
num_bits: Option<u32>, let inner_opts = TokenizerConfig::default()
max_iterations: Option<u32>, .base_tokenizer(params.base_tokenizer)
sample_rate: Option<u32>, .language(&params.language)
) -> PyResult<Self> { .map_err(|_| PyValueError::new_err(format!("LanceDB does not support the requested language: '{}'", params.language)))?
let mut ivf_pq_builder = IvfPqIndexBuilder::default(); .lower_case(params.lower_case)
if let Some(distance_type) = distance_type { .max_token_length(params.max_token_length)
let distance_type = match distance_type.as_str() { .remove_stop_words(params.remove_stop_words)
"l2" => Ok(DistanceType::L2), .stem(params.stem)
"cosine" => Ok(DistanceType::Cosine), .ascii_folding(params.ascii_folding);
"dot" => Ok(DistanceType::Dot), let mut opts = FtsIndexBuilder::default()
_ => Err(PyValueError::new_err(format!( .with_position(params.with_position);
"Invalid distance type '{}'. Must be one of l2, cosine, or dot", opts.tokenizer_configs = inner_opts;
distance_type Ok(LanceDbIndex::FTS(opts))
))), },
}?; "IvfFlat" => {
ivf_pq_builder = ivf_pq_builder.distance_type(distance_type); let params = source.extract::<IvfFlatParams>()?;
let distance_type = parse_distance_type(params.distance_type)?;
let mut ivf_flat_builder = IvfFlatIndexBuilder::default()
.distance_type(distance_type)
.max_iterations(params.max_iterations)
.sample_rate(params.sample_rate);
if let Some(num_partitions) = params.num_partitions {
ivf_flat_builder = ivf_flat_builder.num_partitions(num_partitions);
}
Ok(LanceDbIndex::IvfFlat(ivf_flat_builder))
},
"IvfPq" => {
let params = source.extract::<IvfPqParams>()?;
let distance_type = parse_distance_type(params.distance_type)?;
let mut ivf_pq_builder = IvfPqIndexBuilder::default()
.distance_type(distance_type)
.max_iterations(params.max_iterations)
.sample_rate(params.sample_rate)
.num_bits(params.num_bits);
if let Some(num_partitions) = params.num_partitions {
ivf_pq_builder = ivf_pq_builder.num_partitions(num_partitions);
}
if let Some(num_sub_vectors) = params.num_sub_vectors {
ivf_pq_builder = ivf_pq_builder.num_sub_vectors(num_sub_vectors);
}
Ok(LanceDbIndex::IvfPq(ivf_pq_builder))
},
"HnswPq" => {
let params = source.extract::<IvfHnswPqParams>()?;
let distance_type = parse_distance_type(params.distance_type)?;
let mut hnsw_pq_builder = IvfHnswPqIndexBuilder::default()
.distance_type(distance_type)
.max_iterations(params.max_iterations)
.sample_rate(params.sample_rate)
.num_edges(params.m)
.ef_construction(params.ef_construction)
.num_bits(params.num_bits);
if let Some(num_partitions) = params.num_partitions {
hnsw_pq_builder = hnsw_pq_builder.num_partitions(num_partitions);
}
if let Some(num_sub_vectors) = params.num_sub_vectors {
hnsw_pq_builder = hnsw_pq_builder.num_sub_vectors(num_sub_vectors);
}
Ok(LanceDbIndex::IvfHnswPq(hnsw_pq_builder))
},
"HnswSq" => {
let params = source.extract::<IvfHnswSqParams>()?;
let distance_type = parse_distance_type(params.distance_type)?;
let mut hnsw_sq_builder = IvfHnswSqIndexBuilder::default()
.distance_type(distance_type)
.max_iterations(params.max_iterations)
.sample_rate(params.sample_rate)
.num_edges(params.m)
.ef_construction(params.ef_construction);
if let Some(num_partitions) = params.num_partitions {
hnsw_sq_builder = hnsw_sq_builder.num_partitions(num_partitions);
}
Ok(LanceDbIndex::IvfHnswSq(hnsw_sq_builder))
},
not_supported => Err(PyValueError::new_err(format!(
"Invalid index type '{}'. Must be one of BTree, Bitmap, LabelList, FTS, IvfPq, IvfHnswPq, or IvfHnswSq",
not_supported
))),
} }
if let Some(num_partitions) = num_partitions { } else {
ivf_pq_builder = ivf_pq_builder.num_partitions(num_partitions); Ok(LanceDbIndex::Auto)
}
if let Some(num_sub_vectors) = num_sub_vectors {
ivf_pq_builder = ivf_pq_builder.num_sub_vectors(num_sub_vectors);
}
if let Some(num_bits) = num_bits {
ivf_pq_builder = ivf_pq_builder.num_bits(num_bits);
}
if let Some(max_iterations) = max_iterations {
ivf_pq_builder = ivf_pq_builder.max_iterations(max_iterations);
}
if let Some(sample_rate) = sample_rate {
ivf_pq_builder = ivf_pq_builder.sample_rate(sample_rate);
}
Ok(Self {
inner: Mutex::new(Some(LanceDbIndex::IvfPq(ivf_pq_builder))),
})
} }
}
#[staticmethod] #[derive(FromPyObject)]
pub fn btree() -> PyResult<Self> { struct FtsParams {
Ok(Self { with_position: bool,
inner: Mutex::new(Some(LanceDbIndex::BTree(BTreeIndexBuilder::default()))), base_tokenizer: String,
}) language: String,
} max_token_length: Option<usize>,
lower_case: bool,
stem: bool,
remove_stop_words: bool,
ascii_folding: bool,
}
#[staticmethod] #[derive(FromPyObject)]
pub fn bitmap() -> PyResult<Self> { struct IvfFlatParams {
Ok(Self { distance_type: String,
inner: Mutex::new(Some(LanceDbIndex::Bitmap(Default::default()))), num_partitions: Option<u32>,
}) max_iterations: u32,
} sample_rate: u32,
}
#[staticmethod] #[derive(FromPyObject)]
pub fn label_list() -> PyResult<Self> { struct IvfPqParams {
Ok(Self { distance_type: String,
inner: Mutex::new(Some(LanceDbIndex::LabelList(Default::default()))), num_partitions: Option<u32>,
}) num_sub_vectors: Option<u32>,
} num_bits: u32,
max_iterations: u32,
sample_rate: u32,
}
#[pyo3(signature = (with_position=None, base_tokenizer=None, language=None, max_token_length=None, lower_case=None, stem=None, remove_stop_words=None, ascii_folding=None))] #[derive(FromPyObject)]
#[allow(clippy::too_many_arguments)] struct IvfHnswPqParams {
#[staticmethod] distance_type: String,
pub fn fts( num_partitions: Option<u32>,
with_position: Option<bool>, num_sub_vectors: Option<u32>,
base_tokenizer: Option<String>, num_bits: u32,
language: Option<String>, max_iterations: u32,
max_token_length: Option<usize>, sample_rate: u32,
lower_case: Option<bool>, m: u32,
stem: Option<bool>, ef_construction: u32,
remove_stop_words: Option<bool>, }
ascii_folding: Option<bool>,
) -> Self {
let mut opts = FtsIndexBuilder::default();
if let Some(with_position) = with_position {
opts = opts.with_position(with_position);
}
if let Some(base_tokenizer) = base_tokenizer {
opts.tokenizer_configs = opts.tokenizer_configs.base_tokenizer(base_tokenizer);
}
if let Some(language) = language {
opts.tokenizer_configs = opts.tokenizer_configs.language(&language).unwrap();
}
opts.tokenizer_configs = opts.tokenizer_configs.max_token_length(max_token_length);
if let Some(lower_case) = lower_case {
opts.tokenizer_configs = opts.tokenizer_configs.lower_case(lower_case);
}
if let Some(stem) = stem {
opts.tokenizer_configs = opts.tokenizer_configs.stem(stem);
}
if let Some(remove_stop_words) = remove_stop_words {
opts.tokenizer_configs = opts.tokenizer_configs.remove_stop_words(remove_stop_words);
}
if let Some(ascii_folding) = ascii_folding {
opts.tokenizer_configs = opts.tokenizer_configs.ascii_folding(ascii_folding);
}
Self {
inner: Mutex::new(Some(LanceDbIndex::FTS(opts))),
}
}
#[pyo3(signature = (distance_type=None, num_partitions=None, num_sub_vectors=None,num_bits=None, max_iterations=None, sample_rate=None, m=None, ef_construction=None))] #[derive(FromPyObject)]
#[staticmethod] struct IvfHnswSqParams {
#[allow(clippy::too_many_arguments)] distance_type: String,
pub fn hnsw_pq( num_partitions: Option<u32>,
distance_type: Option<String>, max_iterations: u32,
num_partitions: Option<u32>, sample_rate: u32,
num_sub_vectors: Option<u32>, m: u32,
num_bits: Option<u32>, ef_construction: u32,
max_iterations: Option<u32>,
sample_rate: Option<u32>,
m: Option<u32>,
ef_construction: Option<u32>,
) -> PyResult<Self> {
let mut hnsw_pq_builder = IvfHnswPqIndexBuilder::default();
if let Some(distance_type) = distance_type {
let distance_type = parse_distance_type(distance_type)?;
hnsw_pq_builder = hnsw_pq_builder.distance_type(distance_type);
}
if let Some(num_partitions) = num_partitions {
hnsw_pq_builder = hnsw_pq_builder.num_partitions(num_partitions);
}
if let Some(num_sub_vectors) = num_sub_vectors {
hnsw_pq_builder = hnsw_pq_builder.num_sub_vectors(num_sub_vectors);
}
if let Some(num_bits) = num_bits {
hnsw_pq_builder = hnsw_pq_builder.num_bits(num_bits);
}
if let Some(max_iterations) = max_iterations {
hnsw_pq_builder = hnsw_pq_builder.max_iterations(max_iterations);
}
if let Some(sample_rate) = sample_rate {
hnsw_pq_builder = hnsw_pq_builder.sample_rate(sample_rate);
}
if let Some(m) = m {
hnsw_pq_builder = hnsw_pq_builder.num_edges(m);
}
if let Some(ef_construction) = ef_construction {
hnsw_pq_builder = hnsw_pq_builder.ef_construction(ef_construction);
}
Ok(Self {
inner: Mutex::new(Some(LanceDbIndex::IvfHnswPq(hnsw_pq_builder))),
})
}
#[pyo3(signature = (distance_type=None, num_partitions=None, max_iterations=None, sample_rate=None, m=None, ef_construction=None))]
#[staticmethod]
pub fn hnsw_sq(
distance_type: Option<String>,
num_partitions: Option<u32>,
max_iterations: Option<u32>,
sample_rate: Option<u32>,
m: Option<u32>,
ef_construction: Option<u32>,
) -> PyResult<Self> {
let mut hnsw_sq_builder = IvfHnswSqIndexBuilder::default();
if let Some(distance_type) = distance_type {
let distance_type = parse_distance_type(distance_type)?;
hnsw_sq_builder = hnsw_sq_builder.distance_type(distance_type);
}
if let Some(num_partitions) = num_partitions {
hnsw_sq_builder = hnsw_sq_builder.num_partitions(num_partitions);
}
if let Some(max_iterations) = max_iterations {
hnsw_sq_builder = hnsw_sq_builder.max_iterations(max_iterations);
}
if let Some(sample_rate) = sample_rate {
hnsw_sq_builder = hnsw_sq_builder.sample_rate(sample_rate);
}
if let Some(m) = m {
hnsw_sq_builder = hnsw_sq_builder.num_edges(m);
}
if let Some(ef_construction) = ef_construction {
hnsw_sq_builder = hnsw_sq_builder.ef_construction(ef_construction);
}
Ok(Self {
inner: Mutex::new(Some(LanceDbIndex::IvfHnswSq(hnsw_sq_builder))),
})
}
} }
#[pyclass(get_all)] #[pyclass(get_all)]

View File

@@ -15,7 +15,7 @@
use arrow::RecordBatchStream; use arrow::RecordBatchStream;
use connection::{connect, Connection}; use connection::{connect, Connection};
use env_logger::Env; use env_logger::Env;
use index::{Index, IndexConfig}; use index::IndexConfig;
use pyo3::{ use pyo3::{
pymodule, pymodule,
types::{PyModule, PyModuleMethods}, types::{PyModule, PyModuleMethods},
@@ -40,7 +40,6 @@ pub fn _lancedb(_py: Python, m: &Bound<'_, PyModule>) -> PyResult<()> {
env_logger::init_from_env(env); env_logger::init_from_env(env);
m.add_class::<Connection>()?; m.add_class::<Connection>()?;
m.add_class::<Table>()?; m.add_class::<Table>()?;
m.add_class::<Index>()?;
m.add_class::<IndexConfig>()?; m.add_class::<IndexConfig>()?;
m.add_class::<Query>()?; m.add_class::<Query>()?;
m.add_class::<VectorQuery>()?; m.add_class::<VectorQuery>()?;

View File

@@ -19,7 +19,7 @@ use pyo3_async_runtimes::tokio::future_into_py;
use crate::{ use crate::{
error::PythonErrorExt, error::PythonErrorExt,
index::{Index, IndexConfig}, index::{extract_index_params, IndexConfig},
query::Query, query::Query,
}; };
@@ -177,14 +177,10 @@ impl Table {
pub fn create_index<'a>( pub fn create_index<'a>(
self_: PyRef<'a, Self>, self_: PyRef<'a, Self>,
column: String, column: String,
index: Option<&Index>, index: Option<Bound<'_, PyAny>>,
replace: Option<bool>, replace: Option<bool>,
) -> PyResult<Bound<'a, PyAny>> { ) -> PyResult<Bound<'a, PyAny>> {
let index = if let Some(index) = index { let index = extract_index_params(&index)?;
index.consume()?
} else {
lancedb::index::Index::Auto
};
let mut op = self_.inner_ref()?.create_index(&[column], index); let mut op = self_.inner_ref()?.create_index(&[column], index);
if let Some(replace) = replace { if let Some(replace) = replace {
op = op.replace(replace); op = op.replace(replace);

View File

@@ -43,8 +43,9 @@ pub fn parse_distance_type(distance_type: impl AsRef<str>) -> PyResult<DistanceT
"l2" => Ok(DistanceType::L2), "l2" => Ok(DistanceType::L2),
"cosine" => Ok(DistanceType::Cosine), "cosine" => Ok(DistanceType::Cosine),
"dot" => Ok(DistanceType::Dot), "dot" => Ok(DistanceType::Dot),
"hamming" => Ok(DistanceType::Hamming),
_ => Err(PyValueError::new_err(format!( _ => Err(PyValueError::new_err(format!(
"Invalid distance type '{}'. Must be one of l2, cosine, or dot", "Invalid distance type '{}'. Must be one of l2, cosine, dot, or hamming",
distance_type.as_ref() distance_type.as_ref()
))), ))),
} }

View File

@@ -1,2 +1,2 @@
[toolchain] [toolchain]
channel = "1.80.0" channel = "1.83.0"

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "lancedb-node" name = "lancedb-node"
version = "0.14.1-beta.2" version = "0.14.1-beta.6"
description = "Serverless, low-latency vector database for AI applications" description = "Serverless, low-latency vector database for AI applications"
license.workspace = true license.workspace = true
edition.workspace = true edition.workspace = true

View File

@@ -1,13 +1,13 @@
[package] [package]
name = "lancedb" name = "lancedb"
version = "0.14.1-beta.2" version = "0.14.1-beta.6"
edition.workspace = true edition.workspace = true
description = "LanceDB: A serverless, low-latency vector database for AI applications" description = "LanceDB: A serverless, low-latency vector database for AI applications"
license.workspace = true license.workspace = true
repository.workspace = true repository.workspace = true
keywords.workspace = true keywords.workspace = true
categories.workspace = true categories.workspace = true
rust-version = "1.75" rust-version.workspace = true
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies] [dependencies]

View File

@@ -1050,6 +1050,8 @@ impl ConnectionInternal for Database {
write_params.enable_v2_manifest_paths = write_params.enable_v2_manifest_paths =
options.enable_v2_manifest_paths.unwrap_or_default(); options.enable_v2_manifest_paths.unwrap_or_default();
let data_schema = data.schema();
match NativeTable::create( match NativeTable::create(
&table_uri, &table_uri,
&options.name, &options.name,
@@ -1069,7 +1071,18 @@ impl ConnectionInternal for Database {
CreateTableMode::ExistOk(callback) => { CreateTableMode::ExistOk(callback) => {
let builder = OpenTableBuilder::new(options.parent, options.name); let builder = OpenTableBuilder::new(options.parent, options.name);
let builder = (callback)(builder); let builder = (callback)(builder);
builder.execute().await let table = builder.execute().await?;
let table_schema = table.schema().await?;
if table_schema != data_schema {
return Err(Error::Schema {
message: "Provided schema does not match existing table schema"
.to_string(),
});
}
Ok(table)
} }
CreateTableMode::Overwrite => unreachable!(), CreateTableMode::Overwrite => unreachable!(),
}, },

View File

@@ -17,6 +17,7 @@ use std::sync::Arc;
use scalar::FtsIndexBuilder; use scalar::FtsIndexBuilder;
use serde::Deserialize; use serde::Deserialize;
use serde_with::skip_serializing_none; use serde_with::skip_serializing_none;
use vector::IvfFlatIndexBuilder;
use crate::{table::TableInternal, DistanceType, Error, Result}; use crate::{table::TableInternal, DistanceType, Error, Result};
@@ -56,6 +57,9 @@ pub enum Index {
/// Full text search index using bm25. /// Full text search index using bm25.
FTS(FtsIndexBuilder), FTS(FtsIndexBuilder),
/// IVF index
IvfFlat(IvfFlatIndexBuilder),
/// IVF index with Product Quantization /// IVF index with Product Quantization
IvfPq(IvfPqIndexBuilder), IvfPq(IvfPqIndexBuilder),
@@ -106,6 +110,8 @@ impl IndexBuilder {
#[derive(Debug, Clone, PartialEq, Deserialize)] #[derive(Debug, Clone, PartialEq, Deserialize)]
pub enum IndexType { pub enum IndexType {
// Vector // Vector
#[serde(alias = "IVF_FLAT")]
IvfFlat,
#[serde(alias = "IVF_PQ")] #[serde(alias = "IVF_PQ")]
IvfPq, IvfPq,
#[serde(alias = "IVF_HNSW_PQ")] #[serde(alias = "IVF_HNSW_PQ")]
@@ -127,6 +133,7 @@ pub enum IndexType {
impl std::fmt::Display for IndexType { impl std::fmt::Display for IndexType {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self { match self {
Self::IvfFlat => write!(f, "IVF_FLAT"),
Self::IvfPq => write!(f, "IVF_PQ"), Self::IvfPq => write!(f, "IVF_PQ"),
Self::IvfHnswPq => write!(f, "IVF_HNSW_PQ"), Self::IvfHnswPq => write!(f, "IVF_HNSW_PQ"),
Self::IvfHnswSq => write!(f, "IVF_HNSW_SQ"), Self::IvfHnswSq => write!(f, "IVF_HNSW_SQ"),
@@ -147,6 +154,7 @@ impl std::str::FromStr for IndexType {
"BITMAP" => Ok(Self::Bitmap), "BITMAP" => Ok(Self::Bitmap),
"LABEL_LIST" | "LABELLIST" => Ok(Self::LabelList), "LABEL_LIST" | "LABELLIST" => Ok(Self::LabelList),
"FTS" | "INVERTED" => Ok(Self::FTS), "FTS" | "INVERTED" => Ok(Self::FTS),
"IVF_FLAT" => Ok(Self::IvfFlat),
"IVF_PQ" => Ok(Self::IvfPq), "IVF_PQ" => Ok(Self::IvfPq),
"IVF_HNSW_PQ" => Ok(Self::IvfHnswPq), "IVF_HNSW_PQ" => Ok(Self::IvfHnswPq),
"IVF_HNSW_SQ" => Ok(Self::IvfHnswSq), "IVF_HNSW_SQ" => Ok(Self::IvfHnswSq),

View File

@@ -77,5 +77,5 @@ impl FtsIndexBuilder {
} }
} }
use lance_index::scalar::inverted::TokenizerConfig; pub use lance_index::scalar::inverted::TokenizerConfig;
pub use lance_index::scalar::FullTextSearchQuery; pub use lance_index::scalar::FullTextSearchQuery;

View File

@@ -162,6 +162,43 @@ macro_rules! impl_hnsw_params_setter {
}; };
} }
/// Builder for an IVF Flat index.
///
/// This index stores raw vectors. These vectors are grouped into partitions of similar vectors.
/// Each partition keeps track of a centroid which is the average value of all vectors in the group.
///
/// During a query the centroids are compared with the query vector to find the closest partitions.
/// The raw vectors in these partitions are then searched to find the closest vectors.
///
/// The partitioning process is called IVF and the `num_partitions` parameter controls how many groups to create.
///
/// Note that training an IVF Flat index on a large dataset is a slow operation and currently is also a memory intensive operation.
#[derive(Debug, Clone)]
pub struct IvfFlatIndexBuilder {
pub(crate) distance_type: DistanceType,
// IVF
pub(crate) num_partitions: Option<u32>,
pub(crate) sample_rate: u32,
pub(crate) max_iterations: u32,
}
impl Default for IvfFlatIndexBuilder {
fn default() -> Self {
Self {
distance_type: DistanceType::L2,
num_partitions: None,
sample_rate: 256,
max_iterations: 50,
}
}
}
impl IvfFlatIndexBuilder {
impl_distance_type_setter!();
impl_ivf_params_setter!();
}
/// Builder for an IVF PQ index. /// Builder for an IVF PQ index.
/// ///
/// This index stores a compressed (quantized) copy of every vector. These vectors /// This index stores a compressed (quantized) copy of every vector. These vectors

View File

@@ -339,7 +339,7 @@ pub trait QueryBase {
fn limit(self, limit: usize) -> Self; fn limit(self, limit: usize) -> Self;
/// Set the offset of the query. /// Set the offset of the query.
///
/// By default, it fetches starting with the first row. /// By default, it fetches starting with the first row.
/// This method can be used to skip the first `offset` rows. /// This method can be used to skip the first `offset` rows.
fn offset(self, offset: usize) -> Self; fn offset(self, offset: usize) -> Self;

View File

@@ -18,9 +18,9 @@ use std::path::Path;
use std::sync::Arc; use std::sync::Arc;
use arrow::array::AsArray; use arrow::array::AsArray;
use arrow::datatypes::Float32Type; use arrow::datatypes::{Float32Type, UInt8Type};
use arrow_array::{RecordBatchIterator, RecordBatchReader}; use arrow_array::{RecordBatchIterator, RecordBatchReader};
use arrow_schema::{Field, Schema, SchemaRef}; use arrow_schema::{DataType, Field, Schema, SchemaRef};
use async_trait::async_trait; use async_trait::async_trait;
use datafusion_physical_plan::display::DisplayableExecutionPlan; use datafusion_physical_plan::display::DisplayableExecutionPlan;
use datafusion_physical_plan::projection::ProjectionExec; use datafusion_physical_plan::projection::ProjectionExec;
@@ -58,8 +58,8 @@ use crate::embeddings::{EmbeddingDefinition, EmbeddingRegistry, MaybeEmbedded, M
use crate::error::{Error, Result}; use crate::error::{Error, Result};
use crate::index::scalar::FtsIndexBuilder; use crate::index::scalar::FtsIndexBuilder;
use crate::index::vector::{ use crate::index::vector::{
suggested_num_partitions_for_hnsw, IvfHnswPqIndexBuilder, IvfHnswSqIndexBuilder, suggested_num_partitions_for_hnsw, IvfFlatIndexBuilder, IvfHnswPqIndexBuilder,
IvfPqIndexBuilder, VectorIndex, IvfHnswSqIndexBuilder, IvfPqIndexBuilder, VectorIndex,
}; };
use crate::index::IndexStatistics; use crate::index::IndexStatistics;
use crate::index::{ use crate::index::{
@@ -1306,6 +1306,44 @@ impl NativeTable {
.collect()) .collect())
} }
async fn create_ivf_flat_index(
&self,
index: IvfFlatIndexBuilder,
field: &Field,
replace: bool,
) -> Result<()> {
if !supported_vector_data_type(field.data_type()) {
return Err(Error::InvalidInput {
message: format!(
"An IVF Flat index cannot be created on the column `{}` which has data type {}",
field.name(),
field.data_type()
),
});
}
let num_partitions = if let Some(n) = index.num_partitions {
n
} else {
suggested_num_partitions(self.count_rows(None).await?)
};
let mut dataset = self.dataset.get_mut().await?;
let lance_idx_params = lance::index::vector::VectorIndexParams::ivf_flat(
num_partitions as usize,
index.distance_type.into(),
);
dataset
.create_index(
&[field.name()],
IndexType::Vector,
None,
&lance_idx_params,
replace,
)
.await?;
Ok(())
}
async fn create_ivf_pq_index( async fn create_ivf_pq_index(
&self, &self,
index: IvfPqIndexBuilder, index: IvfPqIndexBuilder,
@@ -1778,6 +1816,10 @@ impl TableInternal for NativeTable {
Index::Bitmap(_) => self.create_bitmap_index(field, opts).await, Index::Bitmap(_) => self.create_bitmap_index(field, opts).await,
Index::LabelList(_) => self.create_label_list_index(field, opts).await, Index::LabelList(_) => self.create_label_list_index(field, opts).await,
Index::FTS(fts_opts) => self.create_fts_index(field, fts_opts, opts.replace).await, Index::FTS(fts_opts) => self.create_fts_index(field, fts_opts, opts.replace).await,
Index::IvfFlat(ivf_flat) => {
self.create_ivf_flat_index(ivf_flat, field, opts.replace)
.await
}
Index::IvfPq(ivf_pq) => self.create_ivf_pq_index(ivf_pq, field, opts.replace).await, Index::IvfPq(ivf_pq) => self.create_ivf_pq_index(ivf_pq, field, opts.replace).await,
Index::IvfHnswPq(ivf_hnsw_pq) => { Index::IvfHnswPq(ivf_hnsw_pq) => {
self.create_ivf_hnsw_pq_index(ivf_hnsw_pq, field, opts.replace) self.create_ivf_hnsw_pq_index(ivf_hnsw_pq, field, opts.replace)
@@ -1848,14 +1890,21 @@ impl TableInternal for NativeTable {
message: format!("Column {} not found in dataset schema", column), message: format!("Column {} not found in dataset schema", column),
})?; })?;
if let arrow_schema::DataType::FixedSizeList(f, dim) = field.data_type() { let mut is_binary = false;
if !f.data_type().is_floating() { if let arrow_schema::DataType::FixedSizeList(element, dim) = field.data_type() {
return Err(Error::InvalidInput { match element.data_type() {
message: format!( e_type if e_type.is_floating() => {}
"The data type of the vector column '{}' is not a floating point type", e_type if *e_type == DataType::UInt8 => {
column is_binary = true;
), }
}); _ => {
return Err(Error::InvalidInput {
message: format!(
"The data type of the vector column '{}' is not a floating point type",
column
),
});
}
} }
if dim != query_vector.len() as i32 { if dim != query_vector.len() as i32 {
return Err(Error::InvalidInput { return Err(Error::InvalidInput {
@@ -1870,12 +1919,22 @@ impl TableInternal for NativeTable {
} }
} }
let query_vector = query_vector.as_primitive::<Float32Type>(); if is_binary {
scanner.nearest( let query_vector = arrow::compute::cast(&query_vector, &DataType::UInt8)?;
&column, let query_vector = query_vector.as_primitive::<UInt8Type>();
query_vector, scanner.nearest(
query.base.limit.unwrap_or(DEFAULT_TOP_K), &column,
)?; query_vector,
query.base.limit.unwrap_or(DEFAULT_TOP_K),
)?;
} else {
let query_vector = query_vector.as_primitive::<Float32Type>();
scanner.nearest(
&column,
query_vector,
query.base.limit.unwrap_or(DEFAULT_TOP_K),
)?;
}
} }
scanner.limit( scanner.limit(
query.base.limit.map(|limit| limit as i64), query.base.limit.map(|limit| limit as i64),

View File

@@ -110,7 +110,7 @@ pub(crate) fn default_vector_column(schema: &Schema, dim: Option<i32>) -> Result
.iter() .iter()
.filter_map(|field| match field.data_type() { .filter_map(|field| match field.data_type() {
arrow_schema::DataType::FixedSizeList(f, d) arrow_schema::DataType::FixedSizeList(f, d)
if f.data_type().is_floating() if (f.data_type().is_floating() || f.data_type() == &DataType::UInt8)
&& dim.map(|expect| *d == expect).unwrap_or(true) => && dim.map(|expect| *d == expect).unwrap_or(true) =>
{ {
Some(field.name()) Some(field.name())
@@ -171,7 +171,9 @@ pub fn supported_fts_data_type(dtype: &DataType) -> bool {
pub fn supported_vector_data_type(dtype: &DataType) -> bool { pub fn supported_vector_data_type(dtype: &DataType) -> bool {
match dtype { match dtype {
DataType::FixedSizeList(inner, _) => DataType::is_floating(inner.data_type()), DataType::FixedSizeList(inner, _) => {
DataType::is_floating(inner.data_type()) || *inner.data_type() == DataType::UInt8
}
_ => false, _ => false,
} }
} }