mirror of
https://github.com/lancedb/lancedb.git
synced 2025-12-23 13:29:57 +00:00
Compare commits
35 Commits
python-v0.
...
python-v0.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1096da09da | ||
|
|
683824f1e9 | ||
|
|
db7bdefe77 | ||
|
|
e41894b071 | ||
|
|
e1ae2bcbd8 | ||
|
|
ababc3f8ec | ||
|
|
a1377afcaa | ||
|
|
a26c8f3316 | ||
|
|
88d8d7249e | ||
|
|
0eb7c9ea0c | ||
|
|
1db66c6980 | ||
|
|
c58da8fc8a | ||
|
|
448c4a835d | ||
|
|
850f80de99 | ||
|
|
a022368426 | ||
|
|
8b815ef5a8 | ||
|
|
e4c3a9346c | ||
|
|
1d1f8964d2 | ||
|
|
d326146a40 | ||
|
|
693bca1eba | ||
|
|
343e274ea5 | ||
|
|
a695fb8030 | ||
|
|
bc8670d7af | ||
|
|
74004161ff | ||
|
|
34ddb1de6d | ||
|
|
1029fc9cb0 | ||
|
|
31c5df6d99 | ||
|
|
dbf37a0434 | ||
|
|
f20f19b804 | ||
|
|
55207ce844 | ||
|
|
c21f9cdda0 | ||
|
|
bc38abb781 | ||
|
|
731f86e44c | ||
|
|
31dad71c94 | ||
|
|
9585f550b3 |
@@ -1,5 +1,5 @@
|
||||
[bumpversion]
|
||||
current_version = 0.2.5
|
||||
current_version = 0.3.0
|
||||
commit = True
|
||||
message = Bump version: {current_version} → {new_version}
|
||||
tag = True
|
||||
|
||||
3
.github/workflows/node.yml
vendored
3
.github/workflows/node.yml
vendored
@@ -9,6 +9,7 @@ on:
|
||||
- node/**
|
||||
- rust/ffi/node/**
|
||||
- .github/workflows/node.yml
|
||||
- docker-compose.yml
|
||||
|
||||
env:
|
||||
# Disable full debug symbol generation to speed up CI build and keep memory down
|
||||
@@ -133,7 +134,7 @@ jobs:
|
||||
cache: 'npm'
|
||||
cache-dependency-path: node/package-lock.json
|
||||
- name: start local stack
|
||||
run: docker compose -f ../docker-compose.yml up -d
|
||||
run: docker compose -f ../docker-compose.yml up -d --wait
|
||||
- name: create s3
|
||||
run: aws s3 mb s3://lancedb-integtest --endpoint $AWS_ENDPOINT
|
||||
- name: create ddb
|
||||
|
||||
34
.github/workflows/python.yml
vendored
34
.github/workflows/python.yml
vendored
@@ -38,7 +38,7 @@ jobs:
|
||||
- name: isort
|
||||
run: isort --check --diff --quiet .
|
||||
- name: Run tests
|
||||
run: pytest -x -v --durations=30 tests
|
||||
run: pytest -m "not slow" -x -v --durations=30 tests
|
||||
- name: doctest
|
||||
run: pytest --doctest-modules lancedb
|
||||
mac:
|
||||
@@ -65,4 +65,34 @@ jobs:
|
||||
- name: Black
|
||||
run: black --check --diff --no-color --quiet .
|
||||
- name: Run tests
|
||||
run: pytest -x -v --durations=30 tests
|
||||
run: pytest -m "not slow" -x -v --durations=30 tests
|
||||
pydantic1x:
|
||||
timeout-minutes: 30
|
||||
runs-on: "ubuntu-22.04"
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: python
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
lfs: true
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: 3.9
|
||||
- name: Install lancedb
|
||||
run: |
|
||||
pip install "pydantic<2"
|
||||
pip install -e .[tests]
|
||||
pip install tantivy@git+https://github.com/quickwit-oss/tantivy-py#164adc87e1a033117001cf70e38c82a53014d985
|
||||
pip install pytest pytest-mock black isort
|
||||
- name: Black
|
||||
run: black --check --diff --no-color --quiet .
|
||||
- name: isort
|
||||
run: isort --check --diff --quiet .
|
||||
- name: Run tests
|
||||
run: pytest -m "not slow" -x -v --durations=30 tests
|
||||
- name: doctest
|
||||
run: pytest --doctest-modules lancedb
|
||||
@@ -5,8 +5,9 @@ exclude = ["python"]
|
||||
resolver = "2"
|
||||
|
||||
[workspace.dependencies]
|
||||
lance = { "version" = "=0.7.4", "features" = ["dynamodb"] }
|
||||
lance-linalg = { "version" = "=0.7.4" }
|
||||
lance = { "version" = "=0.8.3", "features" = ["dynamodb"] }
|
||||
lance-linalg = { "version" = "=0.8.3" }
|
||||
lance-testing = { "version" = "=0.8.3" }
|
||||
# Note that this one does not include pyarrow
|
||||
arrow = { version = "43.0.0", optional = false }
|
||||
arrow-array = "43.0"
|
||||
@@ -16,6 +17,7 @@ arrow-ord = "43.0"
|
||||
arrow-schema = "43.0"
|
||||
arrow-arith = "43.0"
|
||||
arrow-cast = "43.0"
|
||||
chrono = "0.4.23"
|
||||
half = { "version" = "=2.2.1", default-features = false, features = [
|
||||
"num-traits"
|
||||
] }
|
||||
|
||||
157
README.md
157
README.md
@@ -1,78 +1,79 @@
|
||||
<div align="center">
|
||||
<p align="center">
|
||||
|
||||
<img width="275" alt="LanceDB Logo" src="https://user-images.githubusercontent.com/917119/226205734-6063d87a-1ecc-45fe-85be-1dea6383a3d8.png">
|
||||
|
||||
**Developer-friendly, serverless vector database for AI applications**
|
||||
|
||||
<a href="https://lancedb.github.io/lancedb/">Documentation</a> •
|
||||
<a href="https://blog.lancedb.com/">Blog</a> •
|
||||
<a href="https://discord.gg/zMM32dvNtd">Discord</a> •
|
||||
<a href="https://twitter.com/lancedb">Twitter</a>
|
||||
|
||||
</p>
|
||||
|
||||
<img max-width="750px" alt="LanceDB Multimodal Search" src="https://github.com/lancedb/lancedb/assets/917119/09c5afc5-7816-4687-bae4-f2ca194426ec">
|
||||
|
||||
</p>
|
||||
</div>
|
||||
|
||||
<hr />
|
||||
|
||||
LanceDB is an open-source database for vector-search built with persistent storage, which greatly simplifies retrevial, filtering and management of embeddings.
|
||||
|
||||
The key features of LanceDB include:
|
||||
|
||||
* Production-scale vector search with no servers to manage.
|
||||
|
||||
* Store, query and filter vectors, metadata and multi-modal data (text, images, videos, point clouds, and more).
|
||||
|
||||
* Support for vector similarity search, full-text search and SQL.
|
||||
|
||||
* Native Python and Javascript/Typescript support.
|
||||
|
||||
* Zero-copy, automatic versioning, manage versions of your data without needing extra infrastructure.
|
||||
|
||||
* Ecosystem integrations with [LangChain 🦜️🔗](https://python.langchain.com/en/latest/modules/indexes/vectorstores/examples/lanecdb.html), [LlamaIndex 🦙](https://gpt-index.readthedocs.io/en/latest/examples/vector_stores/LanceDBIndexDemo.html), Apache-Arrow, Pandas, Polars, DuckDB and more on the way.
|
||||
|
||||
LanceDB's core is written in Rust 🦀 and is built using <a href="https://github.com/lancedb/lance">Lance</a>, an open-source columnar format designed for performant ML workloads.
|
||||
|
||||
## Quick Start
|
||||
|
||||
**Javascript**
|
||||
```shell
|
||||
npm install vectordb
|
||||
```
|
||||
|
||||
```javascript
|
||||
const lancedb = require('vectordb');
|
||||
const db = await lancedb.connect('data/sample-lancedb');
|
||||
|
||||
const table = await db.createTable('vectors',
|
||||
[{ id: 1, vector: [0.1, 0.2], item: "foo", price: 10 },
|
||||
{ id: 2, vector: [1.1, 1.2], item: "bar", price: 50 }])
|
||||
|
||||
const query = table.search([0.1, 0.3]);
|
||||
query.limit = 20;
|
||||
const results = await query.execute();
|
||||
```
|
||||
|
||||
**Python**
|
||||
```shell
|
||||
pip install lancedb
|
||||
```
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
|
||||
uri = "data/sample-lancedb"
|
||||
db = lancedb.connect(uri)
|
||||
table = db.create_table("my_table",
|
||||
data=[{"vector": [3.1, 4.1], "item": "foo", "price": 10.0},
|
||||
{"vector": [5.9, 26.5], "item": "bar", "price": 20.0}])
|
||||
result = table.search([100, 100]).limit(2).to_df()
|
||||
```
|
||||
|
||||
## Blogs, Tutorials & Videos
|
||||
* 📈 <a href="https://blog.eto.ai/benchmarking-random-access-in-lance-ed690757a826">2000x better performance with Lance over Parquet</a>
|
||||
* 🤖 <a href="https://github.com/lancedb/lancedb/blob/main/docs/src/notebooks/youtube_transcript_search.ipynb">Build a question and answer bot with LanceDB</a>
|
||||
<div align="center">
|
||||
<p align="center">
|
||||
|
||||
<img width="275" alt="LanceDB Logo" src="https://user-images.githubusercontent.com/917119/226205734-6063d87a-1ecc-45fe-85be-1dea6383a3d8.png">
|
||||
|
||||
**Developer-friendly, serverless vector database for AI applications**
|
||||
|
||||
<a href="https://lancedb.github.io/lancedb/">Documentation</a> •
|
||||
<a href="https://blog.lancedb.com/">Blog</a> •
|
||||
<a href="https://discord.gg/zMM32dvNtd">Discord</a> •
|
||||
<a href="https://twitter.com/lancedb">Twitter</a>
|
||||
|
||||
</p>
|
||||
|
||||
<img max-width="750px" alt="LanceDB Multimodal Search" src="https://github.com/lancedb/lancedb/assets/917119/09c5afc5-7816-4687-bae4-f2ca194426ec">
|
||||
|
||||
</p>
|
||||
</div>
|
||||
|
||||
<hr />
|
||||
|
||||
LanceDB is an open-source database for vector-search built with persistent storage, which greatly simplifies retrevial, filtering and management of embeddings.
|
||||
|
||||
The key features of LanceDB include:
|
||||
|
||||
* Production-scale vector search with no servers to manage.
|
||||
|
||||
* Store, query and filter vectors, metadata and multi-modal data (text, images, videos, point clouds, and more).
|
||||
|
||||
* Support for vector similarity search, full-text search and SQL.
|
||||
|
||||
* Native Python and Javascript/Typescript support.
|
||||
|
||||
* Zero-copy, automatic versioning, manage versions of your data without needing extra infrastructure.
|
||||
|
||||
* GPU support in building vector index(*).
|
||||
|
||||
* Ecosystem integrations with [LangChain 🦜️🔗](https://python.langchain.com/en/latest/modules/indexes/vectorstores/examples/lanecdb.html), [LlamaIndex 🦙](https://gpt-index.readthedocs.io/en/latest/examples/vector_stores/LanceDBIndexDemo.html), Apache-Arrow, Pandas, Polars, DuckDB and more on the way.
|
||||
|
||||
LanceDB's core is written in Rust 🦀 and is built using <a href="https://github.com/lancedb/lance">Lance</a>, an open-source columnar format designed for performant ML workloads.
|
||||
|
||||
## Quick Start
|
||||
|
||||
**Javascript**
|
||||
```shell
|
||||
npm install vectordb
|
||||
```
|
||||
|
||||
```javascript
|
||||
const lancedb = require('vectordb');
|
||||
const db = await lancedb.connect('data/sample-lancedb');
|
||||
|
||||
const table = await db.createTable('vectors',
|
||||
[{ id: 1, vector: [0.1, 0.2], item: "foo", price: 10 },
|
||||
{ id: 2, vector: [1.1, 1.2], item: "bar", price: 50 }])
|
||||
|
||||
const query = table.search([0.1, 0.3]).limit(2);
|
||||
const results = await query.execute();
|
||||
```
|
||||
|
||||
**Python**
|
||||
```shell
|
||||
pip install lancedb
|
||||
```
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
|
||||
uri = "data/sample-lancedb"
|
||||
db = lancedb.connect(uri)
|
||||
table = db.create_table("my_table",
|
||||
data=[{"vector": [3.1, 4.1], "item": "foo", "price": 10.0},
|
||||
{"vector": [5.9, 26.5], "item": "bar", "price": 20.0}])
|
||||
result = table.search([100, 100]).limit(2).to_pandas()
|
||||
```
|
||||
|
||||
## Blogs, Tutorials & Videos
|
||||
* 📈 <a href="https://blog.eto.ai/benchmarking-random-access-in-lance-ed690757a826">2000x better performance with Lance over Parquet</a>
|
||||
* 🤖 <a href="https://github.com/lancedb/lancedb/blob/main/docs/src/notebooks/youtube_transcript_search.ipynb">Build a question and answer bot with LanceDB</a>
|
||||
|
||||
@@ -13,3 +13,6 @@ services:
|
||||
- AWS_SECRET_ACCESS_KEY=SECRETKEY
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "-f", "http://localhost:4566/health" ]
|
||||
interval: 5s
|
||||
retries: 3
|
||||
start_period: 10s
|
||||
|
||||
@@ -21,6 +21,7 @@ theme:
|
||||
- navigation.tracking
|
||||
- navigation.instant
|
||||
- navigation.indexes
|
||||
- navigation.expand
|
||||
icon:
|
||||
repo: fontawesome/brands/github
|
||||
custom_dir: overrides
|
||||
@@ -68,7 +69,7 @@ nav:
|
||||
- 🏢 Home: index.md
|
||||
- 💡 Basics: basic.md
|
||||
- 📚 Guides:
|
||||
- Tables: guides/tables.md
|
||||
- Create Ingest Update Delete: guides/tables.md
|
||||
- Vector Search: search.md
|
||||
- SQL filters: sql.md
|
||||
- Indexing: ann_indexes.md
|
||||
@@ -96,9 +97,11 @@ nav:
|
||||
- Serverless Website Chatbot: examples/serverless_website_chatbot.md
|
||||
- YouTube Transcript Search: examples/youtube_transcript_bot_with_nodejs.md
|
||||
- TransformersJS Embedding Search: examples/transformerjs_embedding_search_nodejs.md
|
||||
- ⚙️ CLI & Config: cli_config.md
|
||||
|
||||
- Basics: basic.md
|
||||
- Guides:
|
||||
- Tables: guides/tables.md
|
||||
- Create Ingest Update Delete: guides/tables.md
|
||||
- Vector Search: search.md
|
||||
- SQL filters: sql.md
|
||||
- Indexing: ann_indexes.md
|
||||
|
||||
@@ -6,7 +6,7 @@ LanceDB provides many parameters to fine-tune the index's size, the speed of que
|
||||
|
||||
Currently, LanceDB does *not* automatically create the ANN index.
|
||||
LanceDB has optimized code for KNN as well. For many use-cases, datasets under 100K vectors won't require index creation at all.
|
||||
If you can live with <100ms latency, skipping index creation is a simpler workflow while guaranteeing 100% recall.
|
||||
If you can live with < 100ms latency, skipping index creation is a simpler workflow while guaranteeing 100% recall.
|
||||
|
||||
In the future we will look to automatically create and configure the ANN index.
|
||||
|
||||
@@ -68,6 +68,12 @@ a single PQ code.
|
||||
<figcaption>IVF_PQ index with <code>num_partitions=2, num_sub_vectors=4</code></figcaption>
|
||||
</figure>
|
||||
|
||||
### Use GPU to build vector index
|
||||
|
||||
Lance Python SDK has experimental GPU support for creating IVF index.
|
||||
You can specify the GPU device to train IVF partitions via
|
||||
|
||||
- **accelerator**: Specify to `"cuda"`` to enable GPU training.
|
||||
|
||||
## Querying an ANN Index
|
||||
|
||||
@@ -91,7 +97,7 @@ There are a couple of parameters that can be used to fine-tune the search:
|
||||
.limit(2) \
|
||||
.nprobes(20) \
|
||||
.refine_factor(10) \
|
||||
.to_df()
|
||||
.to_pandas()
|
||||
```
|
||||
```
|
||||
vector item _distance
|
||||
@@ -118,7 +124,7 @@ You can further filter the elements returned by a search using a where clause.
|
||||
|
||||
=== "Python"
|
||||
```python
|
||||
tbl.search(np.random.random((1536))).where("item != 'item 1141'").to_df()
|
||||
tbl.search(np.random.random((1536))).where("item != 'item 1141'").to_pandas()
|
||||
```
|
||||
|
||||
=== "Javascript"
|
||||
@@ -135,7 +141,7 @@ You can select the columns returned by the query using a select clause.
|
||||
|
||||
=== "Python"
|
||||
```python
|
||||
tbl.search(np.random.random((1536))).select(["vector"]).to_df()
|
||||
tbl.search(np.random.random((1536))).select(["vector"]).to_pandas()
|
||||
```
|
||||
```
|
||||
vector _distance
|
||||
@@ -154,28 +160,28 @@ You can select the columns returned by the query using a select clause.
|
||||
|
||||
## FAQ
|
||||
|
||||
### When is it necessary to create an ANN vector index.
|
||||
### When is it necessary to create an ANN vector index?
|
||||
|
||||
`LanceDB` has manually tuned SIMD code for computing vector distances.
|
||||
In our benchmarks, computing 100K pairs of 1K dimension vectors only take less than 20ms.
|
||||
For small dataset (<100K rows) or the applications which can accept 100ms latency, vector indices are usually not necessary.
|
||||
`LanceDB` has manually-tuned SIMD code for computing vector distances.
|
||||
In our benchmarks, computing 100K pairs of 1K dimension vectors takes **less than 20ms**.
|
||||
For small datasets (< 100K rows) or applications that can accept 100ms latency, vector indices are usually not necessary.
|
||||
|
||||
For large-scale or higher dimension vectors, it is beneficial to create vector index.
|
||||
|
||||
### How big is my index, and how many memory will it take.
|
||||
### How big is my index, and how many memory will it take?
|
||||
|
||||
In LanceDB, all vector indices are disk-based, meaning that when responding to a vector query, only the relevant pages from the index file are loaded from disk and cached in memory. Additionally, each sub-vector is usually encoded into 1 byte PQ code.
|
||||
In LanceDB, all vector indices are **disk-based**, meaning that when responding to a vector query, only the relevant pages from the index file are loaded from disk and cached in memory. Additionally, each sub-vector is usually encoded into 1 byte PQ code.
|
||||
|
||||
For example, with a 1024-dimension dataset, if we choose `num_sub_vectors=64`, each sub-vector has `1024 / 64 = 16` float32 numbers.
|
||||
Product quantization can lead to approximately `16 * sizeof(float32) / 1 = 64` times of space reduction.
|
||||
|
||||
### How to choose `num_partitions` and `num_sub_vectors` for `IVF_PQ` index.
|
||||
### How to choose `num_partitions` and `num_sub_vectors` for `IVF_PQ` index?
|
||||
|
||||
`num_partitions` is used to decide how many partitions the first level `IVF` index uses.
|
||||
Higher number of partitions could lead to more efficient I/O during queries and better accuracy, but it takes much more time to train.
|
||||
On `SIFT-1M` dataset, our benchmark shows that keeping each partition 1K-4K rows lead to a good latency / recall.
|
||||
|
||||
`num_sub_vectors` decides how many Product Quantization code to generate on each vector. Because
|
||||
Product Quantization is a lossy compression of the original vector, the more `num_sub_vectors` usually results to
|
||||
less space distortion, and thus yield better accuracy. However, similarly, more `num_sub_vectors` causes heavier I/O and
|
||||
more PQ computation, thus, higher latency. `dimension / num_sub_vectors` should be aligned with 8 for better SIMD efficiency.
|
||||
`num_sub_vectors` specifies how many Product Quantization (PQ) short codes to generate on each vector. Because
|
||||
PQ is a lossy compression of the original vector, a higher `num_sub_vectors` usually results in
|
||||
less space distortion, and thus yields better accuracy. However, a higher `num_sub_vectors` also causes heavier I/O and
|
||||
more PQ computation, and thus, higher latency. `dimension / num_sub_vectors` should be a multiple of 8 for optimum SIMD efficiency.
|
||||
@@ -123,9 +123,15 @@ After a table has been created, you can always add more data to it using
|
||||
|
||||
=== "Python"
|
||||
```python
|
||||
df = pd.DataFrame([{"vector": [1.3, 1.4], "item": "fizz", "price": 100.0},
|
||||
{"vector": [9.5, 56.2], "item": "buzz", "price": 200.0}])
|
||||
tbl.add(df)
|
||||
|
||||
# Option 1: Add a list of dicts to a table
|
||||
data = [{"vector": [1.3, 1.4], "item": "fizz", "price": 100.0},
|
||||
{"vector": [9.5, 56.2], "item": "buzz", "price": 200.0}]
|
||||
tbl.add(data)
|
||||
|
||||
# Option 2: Add a pandas DataFrame to a table
|
||||
df = pd.DataFrame(data)
|
||||
tbl.add(data)
|
||||
```
|
||||
|
||||
=== "Javascript"
|
||||
@@ -140,7 +146,7 @@ Once you've embedded the query, you can find its nearest neighbors using the fol
|
||||
|
||||
=== "Python"
|
||||
```python
|
||||
tbl.search([100, 100]).limit(2).to_df()
|
||||
tbl.search([100, 100]).limit(2).to_pandas()
|
||||
```
|
||||
|
||||
This returns a pandas DataFrame with the results.
|
||||
|
||||
37
docs/src/cli_config.md
Normal file
37
docs/src/cli_config.md
Normal file
@@ -0,0 +1,37 @@
|
||||
|
||||
## LanceDB CLI
|
||||
Once lanceDB is installed, you can access the CLI using `lancedb` command on the console
|
||||
```
|
||||
lancedb
|
||||
```
|
||||
This lists out all the various command-line options available. You can get the usage or help for a particular command
|
||||
```
|
||||
lancedb {command} --help
|
||||
```
|
||||
|
||||
## LanceDB config
|
||||
LanceDB uses a global config file to store certain settings. These settings are configurable using the lanceDB cli.
|
||||
To view your config settings, you can use:
|
||||
```
|
||||
lancedb config
|
||||
```
|
||||
These config parameters can be tuned using the cli.
|
||||
```
|
||||
lancedb {config_name} --{argument}
|
||||
```
|
||||
|
||||
## LanceDB Opt-in Diagnostics
|
||||
When enabled, LanceDB will send anonymous events to help us improve LanceDB. These diagnostics are used only for error reporting and no data is collected. Error & stats allow us to automate certain aspects of bug reporting, prioritization of fixes and feature requests.
|
||||
These diagnostics are opt-in and can be enabled or disabled using the `lancedb diagnostics` command. These are enabled by default.
|
||||
Get usage help.
|
||||
```
|
||||
lancedb diagnostics --help
|
||||
```
|
||||
Disable diagnostics
|
||||
```
|
||||
lancedb diagnostics --disabled
|
||||
```
|
||||
Enable diagnostics
|
||||
```
|
||||
lancedb diagnostics --enabled
|
||||
```
|
||||
@@ -118,7 +118,7 @@ belong in the same latent space and your results will be nonsensical.
|
||||
```python
|
||||
query = "What's the best pizza topping?"
|
||||
query_vector = embed_func([query])[0]
|
||||
tbl.search(query_vector).limit(10).to_df()
|
||||
tbl.search(query_vector).limit(10).to_pandas()
|
||||
```
|
||||
|
||||
The above snippet returns a pandas DataFrame with the 10 closest vectors to the query.
|
||||
|
||||
@@ -80,14 +80,14 @@ def handler(event, context):
|
||||
# Shape of SIFT is (128,1M), d=float32
|
||||
query_vector = np.array(event['query_vector'], dtype=np.float32)
|
||||
|
||||
rs = table.search(query_vector).limit(2).to_df()
|
||||
rs = table.search(query_vector).limit(2).to_list()
|
||||
|
||||
return {
|
||||
"statusCode": status_code,
|
||||
"headers": {
|
||||
"Content-Type": "application/json"
|
||||
},
|
||||
"body": rs.to_json()
|
||||
"body": json.dumps(rs)
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
@@ -6,17 +6,19 @@ to make this available for JS as well.
|
||||
|
||||
## Installation
|
||||
|
||||
To use full text search, you must install optional dependency tantivy-py:
|
||||
To use full text search, you must install the dependency `tantivy-py`:
|
||||
|
||||
# tantivy 0.19.2
|
||||
pip install tantivy@git+https://github.com/quickwit-oss/tantivy-py#164adc87e1a033117001cf70e38c82a53014d985
|
||||
# tantivy 0.20.1
|
||||
```sh
|
||||
pip install tantivy==0.20.1
|
||||
```
|
||||
|
||||
|
||||
## Quickstart
|
||||
|
||||
Assume:
|
||||
1. `table` is a LanceDB Table
|
||||
2. `text` is the name of the Table column that we want to index
|
||||
2. `text` is the name of the `Table` column that we want to index
|
||||
|
||||
For example,
|
||||
|
||||
@@ -41,7 +43,13 @@ table.create_fts_index("text")
|
||||
To search:
|
||||
|
||||
```python
|
||||
df = table.search("puppy").limit(10).select(["text"]).to_df()
|
||||
table.search("puppy").limit(10).select(["text"]).to_list()
|
||||
```
|
||||
|
||||
Which returns a list of dictionaries:
|
||||
|
||||
```python
|
||||
[{'text': 'Frodo was a happy puppy', 'score': 0.6931471824645996}]
|
||||
```
|
||||
|
||||
LanceDB automatically looks for an FTS index if the input is str.
|
||||
|
||||
@@ -42,7 +42,7 @@ A Table is a collection of Records in a LanceDB Database. You can follow along o
|
||||
import pandas as pd
|
||||
|
||||
data = pd.DataFrame({
|
||||
"vector": [[1.1, 1.2], [0.2, 1.8]],
|
||||
"vector": [[1.1, 1.2, 1.3, 1.4], [0.2, 1.8, 0.4, 3.6]],
|
||||
"lat": [45.5, 40.1],
|
||||
"long": [-122.7, -74.1]
|
||||
})
|
||||
@@ -56,7 +56,7 @@ A Table is a collection of Records in a LanceDB Database. You can follow along o
|
||||
|
||||
```python
|
||||
custom_schema = pa.schema([
|
||||
pa.field("vector", pa.list_(pa.float32(), 2)),
|
||||
pa.field("vector", pa.list_(pa.float32(), 4)),
|
||||
pa.field("lat", pa.float32()),
|
||||
pa.field("long", pa.float32())
|
||||
])
|
||||
@@ -70,8 +70,8 @@ A Table is a collection of Records in a LanceDB Database. You can follow along o
|
||||
```python
|
||||
table = pa.Table.from_arrays(
|
||||
[
|
||||
pa.array([[3.1, 4.1], [5.9, 26.5]],
|
||||
pa.list_(pa.float32(), 2)),
|
||||
pa.array([[3.1, 4.1, 5.1, 6.1], [5.9, 26.5, 4.7, 32.8]],
|
||||
pa.list_(pa.float32(), 4)),
|
||||
pa.array(["foo", "bar"]),
|
||||
pa.array([10.0, 20.0]),
|
||||
],
|
||||
@@ -84,7 +84,17 @@ A Table is a collection of Records in a LanceDB Database. You can follow along o
|
||||
```
|
||||
|
||||
### From Pydantic Models
|
||||
LanceDB supports to create Apache Arrow Schema from a Pydantic BaseModel via pydantic_to_schema() method.
|
||||
When you create an empty table without data, you must specify the table schema.
|
||||
LanceDB supports creating tables by specifying a pyarrow schema or a specialized
|
||||
pydantic model called `LanceModel`.
|
||||
|
||||
For example, the following Content model specifies a table with 5 columns:
|
||||
movie_id, vector, genres, title, and imdb_id. When you create a table, you can
|
||||
pass the class as the value of the `schema` parameter to `create_table`.
|
||||
The `vector` column is a `Vector` type, which is a specialized pydantic type that
|
||||
can be configured with the vector dimensions. It is also important to note that
|
||||
LanceDB only understands subclasses of `lancedb.pydantic.LanceModel`
|
||||
(which itself derives from `pydantic.BaseModel`).
|
||||
|
||||
```python
|
||||
from lancedb.pydantic import Vector, LanceModel
|
||||
@@ -121,8 +131,8 @@ A Table is a collection of Records in a LanceDB Database. You can follow along o
|
||||
for i in range(5):
|
||||
yield pa.RecordBatch.from_arrays(
|
||||
[
|
||||
pa.array([[3.1, 4.1], [5.9, 26.5]],
|
||||
pa.list_(pa.float32(), 2)),
|
||||
pa.array([[3.1, 4.1, 5.1, 6.1], [5.9, 26.5, 4.7, 32.8]],
|
||||
pa.list_(pa.float32(), 4)),
|
||||
pa.array(["foo", "bar"]),
|
||||
pa.array([10.0, 20.0]),
|
||||
],
|
||||
@@ -130,7 +140,7 @@ A Table is a collection of Records in a LanceDB Database. You can follow along o
|
||||
)
|
||||
|
||||
schema = pa.schema([
|
||||
pa.field("vector", pa.list_(pa.float32(), 2)),
|
||||
pa.field("vector", pa.list_(pa.float32(), 4)),
|
||||
pa.field("item", pa.utf8()),
|
||||
pa.field("price", pa.float32()),
|
||||
])
|
||||
@@ -354,6 +364,48 @@ Use the `delete()` method on tables to delete rows from a table. To choose which
|
||||
await tbl.countRows() // Returns 1
|
||||
```
|
||||
|
||||
### Updating a Table [Experimental]
|
||||
EXPERIMENTAL: Update rows in the table (not threadsafe).
|
||||
|
||||
This can be used to update zero to all rows depending on how many rows match the where clause.
|
||||
|
||||
| Parameter | Type | Description |
|
||||
|---|---|---|
|
||||
| `where` | `str` | The SQL where clause to use when updating rows. For example, `'x = 2'` or `'x IN (1, 2, 3)'`. The filter must not be empty, or it will error. |
|
||||
| `values` | `dict` | The values to update. The keys are the column names and the values are the values to set. |
|
||||
|
||||
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
import pandas as pd
|
||||
|
||||
# Create a lancedb connection
|
||||
db = lancedb.connect("./.lancedb")
|
||||
|
||||
# Create a table from a pandas DataFrame
|
||||
data = pd.DataFrame({"x": [1, 2, 3], "vector": [[1, 2], [3, 4], [5, 6]]})
|
||||
table = db.create_table("my_table", data)
|
||||
|
||||
# Update the table where x = 2
|
||||
table.update(where="x = 2", values={"vector": [10, 10]})
|
||||
|
||||
# Get the updated table as a pandas DataFrame
|
||||
df = table.to_pandas()
|
||||
|
||||
# Print the DataFrame
|
||||
print(df)
|
||||
```
|
||||
|
||||
Output
|
||||
```shell
|
||||
x vector
|
||||
0 1 [1.0, 2.0]
|
||||
1 3 [5.0, 6.0]
|
||||
2 2 [10.0, 10.0]
|
||||
```
|
||||
|
||||
## What's Next?
|
||||
|
||||
Learn how to Query your tables and create indices
|
||||
@@ -36,7 +36,7 @@ LanceDB's core is written in Rust 🦀 and is built using <a href="https://githu
|
||||
table = db.create_table("my_table",
|
||||
data=[{"vector": [3.1, 4.1], "item": "foo", "price": 10.0},
|
||||
{"vector": [5.9, 26.5], "item": "bar", "price": 20.0}])
|
||||
result = table.search([100, 100]).limit(2).to_df()
|
||||
result = table.search([100, 100]).limit(2).to_list()
|
||||
```
|
||||
|
||||
=== "Javascript"
|
||||
|
||||
@@ -144,7 +144,7 @@
|
||||
"source": [
|
||||
"# Pre-processing and loading the documentation\n",
|
||||
"\n",
|
||||
"Next, let's pre-process and load the documentation. To make sure we don't need to do this repeatedly if we were updating code, we're caching it using pickle so we can retrieve it again (this could take a few minutes to run the first time yyou do it). We'll also add some more metadata to the docs here such as the title and version of the code:"
|
||||
"Next, let's pre-process and load the documentation. To make sure we don't need to do this repeatedly if we were updating code, we're caching it using pickle so we can retrieve it again (this could take a few minutes to run the first time you do it). We'll also add some more metadata to the docs here such as the title and version of the code:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -255,7 +255,7 @@
|
||||
"id": "28d93b85",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And thats it! We're all setup. The next step is to run some queries, let's try a few:"
|
||||
"And that's it! We're all set up. The next step is to run some queries, let's try a few:"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -19,11 +19,11 @@
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip available: \u001b[0m\u001b[31;49m22.3.1\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m23.1.2\u001b[0m\n",
|
||||
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip install --upgrade pip\u001b[0m\n",
|
||||
"\u001B[1m[\u001B[0m\u001B[34;49mnotice\u001B[0m\u001B[1;39;49m]\u001B[0m\u001B[39;49m A new release of pip available: \u001B[0m\u001B[31;49m22.3.1\u001B[0m\u001B[39;49m -> \u001B[0m\u001B[32;49m23.1.2\u001B[0m\n",
|
||||
"\u001B[1m[\u001B[0m\u001B[34;49mnotice\u001B[0m\u001B[1;39;49m]\u001B[0m\u001B[39;49m To update, run: \u001B[0m\u001B[32;49mpip install --upgrade pip\u001B[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip available: \u001b[0m\u001b[31;49m22.3.1\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m23.1.2\u001b[0m\n",
|
||||
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip install --upgrade pip\u001b[0m\n"
|
||||
"\u001B[1m[\u001B[0m\u001B[34;49mnotice\u001B[0m\u001B[1;39;49m]\u001B[0m\u001B[39;49m A new release of pip available: \u001B[0m\u001B[31;49m22.3.1\u001B[0m\u001B[39;49m -> \u001B[0m\u001B[32;49m23.1.2\u001B[0m\n",
|
||||
"\u001B[1m[\u001B[0m\u001B[34;49mnotice\u001B[0m\u001B[1;39;49m]\u001B[0m\u001B[39;49m To update, run: \u001B[0m\u001B[32;49mpip install --upgrade pip\u001B[0m\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -39,6 +39,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import io\n",
|
||||
"\n",
|
||||
"import PIL\n",
|
||||
"import duckdb\n",
|
||||
"import lancedb"
|
||||
@@ -158,18 +159,18 @@
|
||||
" \"db = lancedb.connect('~/datasets/demo')\\n\"\n",
|
||||
" \"tbl = db.open_table('diffusiondb')\\n\\n\"\n",
|
||||
" f\"embedding = embed_func('{query}')\\n\"\n",
|
||||
" \"tbl.search(embedding).limit(9).to_df()\"\n",
|
||||
" \"tbl.search(embedding).limit(9).to_pandas()\"\n",
|
||||
" )\n",
|
||||
" return (_extract(tbl.search(emb).limit(9).to_df()), code)\n",
|
||||
" return (_extract(tbl.search(emb).limit(9).to_pandas()), code)\n",
|
||||
"\n",
|
||||
"def find_image_keywords(query):\n",
|
||||
" code = (\n",
|
||||
" \"import lancedb\\n\"\n",
|
||||
" \"db = lancedb.connect('~/datasets/demo')\\n\"\n",
|
||||
" \"tbl = db.open_table('diffusiondb')\\n\\n\"\n",
|
||||
" f\"tbl.search('{query}').limit(9).to_df()\"\n",
|
||||
" f\"tbl.search('{query}').limit(9).to_pandas()\"\n",
|
||||
" )\n",
|
||||
" return (_extract(tbl.search(query).limit(9).to_df()), code)\n",
|
||||
" return (_extract(tbl.search(query).limit(9).to_pandas()), code)\n",
|
||||
"\n",
|
||||
"def find_image_sql(query):\n",
|
||||
" code = (\n",
|
||||
|
||||
@@ -27,11 +27,11 @@
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m23.0\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m23.1.1\u001b[0m\n",
|
||||
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip install --upgrade pip\u001b[0m\n",
|
||||
"\u001B[1m[\u001B[0m\u001B[34;49mnotice\u001B[0m\u001B[1;39;49m]\u001B[0m\u001B[39;49m A new release of pip is available: \u001B[0m\u001B[31;49m23.0\u001B[0m\u001B[39;49m -> \u001B[0m\u001B[32;49m23.1.1\u001B[0m\n",
|
||||
"\u001B[1m[\u001B[0m\u001B[34;49mnotice\u001B[0m\u001B[1;39;49m]\u001B[0m\u001B[39;49m To update, run: \u001B[0m\u001B[32;49mpip install --upgrade pip\u001B[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m23.0\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m23.1.1\u001b[0m\n",
|
||||
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip install --upgrade pip\u001b[0m\n"
|
||||
"\u001B[1m[\u001B[0m\u001B[34;49mnotice\u001B[0m\u001B[1;39;49m]\u001B[0m\u001B[39;49m A new release of pip is available: \u001B[0m\u001B[31;49m23.0\u001B[0m\u001B[39;49m -> \u001B[0m\u001B[32;49m23.1.1\u001B[0m\n",
|
||||
"\u001B[1m[\u001B[0m\u001B[34;49mnotice\u001B[0m\u001B[1;39;49m]\u001B[0m\u001B[39;49m To update, run: \u001B[0m\u001B[32;49mpip install --upgrade pip\u001B[0m\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -184,7 +184,7 @@
|
||||
"df = (contextualize(data.to_pandas())\n",
|
||||
" .groupby(\"title\").text_col(\"text\")\n",
|
||||
" .window(20).stride(4)\n",
|
||||
" .to_df())\n",
|
||||
" .to_pandas())\n",
|
||||
"df.head(1)"
|
||||
]
|
||||
},
|
||||
@@ -603,7 +603,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Use LanceDB to get top 3 most relevant context\n",
|
||||
"context = tbl.search(emb).limit(3).to_df()"
|
||||
"context = tbl.search(emb).limit(3).to_pandas()"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -74,7 +74,7 @@ table = db.open_table("pd_table")
|
||||
|
||||
query_vector = [100, 100]
|
||||
# Pandas DataFrame
|
||||
df = table.search(query_vector).limit(1).to_df()
|
||||
df = table.search(query_vector).limit(1).to_pandas()
|
||||
print(df)
|
||||
```
|
||||
|
||||
@@ -89,12 +89,12 @@ If you have more complex criteria, you can always apply the filter to the result
|
||||
```python
|
||||
|
||||
# Apply the filter via LanceDB
|
||||
results = table.search([100, 100]).where("price < 15").to_df()
|
||||
results = table.search([100, 100]).where("price < 15").to_pandas()
|
||||
assert len(results) == 1
|
||||
assert results["item"].iloc[0] == "foo"
|
||||
|
||||
# Apply the filter via Pandas
|
||||
df = results = table.search([100, 100]).to_df()
|
||||
df = results = table.search([100, 100]).to_pandas()
|
||||
results = df[df.price < 15]
|
||||
assert len(results) == 1
|
||||
assert results["item"].iloc[0] == "foo"
|
||||
|
||||
@@ -26,15 +26,19 @@ pip install lancedb
|
||||
|
||||
## Embeddings
|
||||
|
||||
::: lancedb.embeddings.with_embeddings
|
||||
|
||||
::: lancedb.embeddings.functions.EmbeddingFunctionRegistry
|
||||
|
||||
::: lancedb.embeddings.functions.EmbeddingFunctionModel
|
||||
::: lancedb.embeddings.functions.EmbeddingFunction
|
||||
|
||||
::: lancedb.embeddings.functions.TextEmbeddingFunctionModel
|
||||
|
||||
::: lancedb.embeddings.functions.SentenceTransformerEmbeddingFunction
|
||||
::: lancedb.embeddings.functions.TextEmbeddingFunction
|
||||
|
||||
::: lancedb.embeddings.functions.SentenceTransformerEmbeddings
|
||||
|
||||
::: lancedb.embeddings.functions.OpenAIEmbeddings
|
||||
|
||||
::: lancedb.embeddings.functions.OpenClipEmbeddings
|
||||
|
||||
::: lancedb.embeddings.with_embeddings
|
||||
|
||||
## Context
|
||||
|
||||
|
||||
@@ -25,8 +25,8 @@ Currently, we support the following metrics:
|
||||
|
||||
### Flat Search
|
||||
|
||||
If LanceDB does not create a vector index, LanceDB would need to scan (`Flat Search`) the entire vector column
|
||||
and compute the distance for each vector in order to find the closest matches.
|
||||
If you do not create a vector index, LanceDB would need to exhaustively scan the entire vector column (via `Flat Search`)
|
||||
and compute the distance for *every* vector in order to find the closest matches. This is effectively a KNN search.
|
||||
|
||||
|
||||
<!-- Setup Code
|
||||
@@ -67,7 +67,7 @@ await db_setup.createTable('my_vectors', data)
|
||||
|
||||
df = tbl.search(np.random.random((1536))) \
|
||||
.limit(10) \
|
||||
.to_df()
|
||||
.to_list()
|
||||
```
|
||||
|
||||
=== "JavaScript"
|
||||
@@ -92,7 +92,7 @@ as well.
|
||||
df = tbl.search(np.random.random((1536))) \
|
||||
.metric("cosine") \
|
||||
.limit(10) \
|
||||
.to_df()
|
||||
.to_list()
|
||||
```
|
||||
|
||||
|
||||
@@ -110,7 +110,7 @@ as well.
|
||||
|
||||
To accelerate vector retrievals, it is common to build vector indices.
|
||||
A vector index is a data structure specifically designed to efficiently organize and
|
||||
search vector data based on their similarity or distance metrics.
|
||||
search vector data based on their similarity via the chosen distance metric.
|
||||
By constructing a vector index, you can reduce the search space and avoid the need
|
||||
for brute-force scanning of the entire vector column.
|
||||
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
lancedb @ git+https://github.com/lancedb/lancedb.git#egg=subdir&subdirectory=python
|
||||
-e ../../python
|
||||
numpy
|
||||
pandas
|
||||
pylance
|
||||
duckdb
|
||||
duckdb
|
||||
--extra-index-url https://download.pytorch.org/whl/cpu
|
||||
torch
|
||||
|
||||
|
||||
74
node/package-lock.json
generated
74
node/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "vectordb",
|
||||
"version": "0.2.5",
|
||||
"version": "0.3.0",
|
||||
"lockfileVersion": 2,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "vectordb",
|
||||
"version": "0.2.5",
|
||||
"version": "0.3.0",
|
||||
"cpu": [
|
||||
"x64",
|
||||
"arm64"
|
||||
@@ -53,11 +53,11 @@
|
||||
"uuid": "^9.0.0"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"@lancedb/vectordb-darwin-arm64": "0.2.5",
|
||||
"@lancedb/vectordb-darwin-x64": "0.2.5",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.2.5",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.2.5",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.2.5"
|
||||
"@lancedb/vectordb-darwin-arm64": "0.3.0",
|
||||
"@lancedb/vectordb-darwin-x64": "0.3.0",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.3.0",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.3.0",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.3.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@apache-arrow/ts": {
|
||||
@@ -317,9 +317,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-darwin-arm64": {
|
||||
"version": "0.2.5",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-arm64/-/vectordb-darwin-arm64-0.2.5.tgz",
|
||||
"integrity": "sha512-V4206SajkMN3o+bBFBAYJq5emlrjevitP0g8RFfVlmj/LS38i8k4uvSe1bICQ2amUrYkL/Jw4ktYn19NRfTU+g==",
|
||||
"version": "0.3.0",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-arm64/-/vectordb-darwin-arm64-0.3.0.tgz",
|
||||
"integrity": "sha512-Fg+k/cSnqmNQlSWyDp0PpaAJ67kAISfZAD+zZ3mcE8/3ml2I/wM/GVjPy2zeiQX9aR93lG1mZXFSNTDUc74tWQ==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
@@ -329,9 +329,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-darwin-x64": {
|
||||
"version": "0.2.5",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-x64/-/vectordb-darwin-x64-0.2.5.tgz",
|
||||
"integrity": "sha512-orePizgXCbTJbDJ4bMMnYh/4OgmWDBbHShNxHKQobcX+NgWTexmR0lV1WNOG+DtczBiGH422e3gHJ+xhTO13vg==",
|
||||
"version": "0.3.0",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-x64/-/vectordb-darwin-x64-0.3.0.tgz",
|
||||
"integrity": "sha512-CXp4b/brMbnBPZuGzKIOskd9uD90R73rWubaJ0du/Kt6fcyQX1dM1wEhWTLxI6eKf8IDL/R9QLL2cIahm1J86w==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
@@ -341,9 +341,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-linux-arm64-gnu": {
|
||||
"version": "0.2.5",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-arm64-gnu/-/vectordb-linux-arm64-gnu-0.2.5.tgz",
|
||||
"integrity": "sha512-xIMNwsFGOHeY9EUWCHhUAcA2sCHZ5Lim0sc42uuUOeWayyH+HeR6ZWReptDQRuAoJHqQeag9qcqteE0AZPDTEw==",
|
||||
"version": "0.3.0",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-arm64-gnu/-/vectordb-linux-arm64-gnu-0.3.0.tgz",
|
||||
"integrity": "sha512-1bjaRzYcDsWIRUbO2K/f+ohNmNvCgKcrrOhmiXSHVlYY8kH1LUMFZj+BhqBC0Ea0Stt7/1rsRLMRXRtaeVOEHw==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
@@ -353,9 +353,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-linux-x64-gnu": {
|
||||
"version": "0.2.5",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-x64-gnu/-/vectordb-linux-x64-gnu-0.2.5.tgz",
|
||||
"integrity": "sha512-Qr8dbHavtE+Zfd45kEORJQe01kRWhMF703gk8zhtZhskDUBCfqm3ap22JIux58tASxVcBqY8EtUFojfYGnQVvA==",
|
||||
"version": "0.3.0",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-x64-gnu/-/vectordb-linux-x64-gnu-0.3.0.tgz",
|
||||
"integrity": "sha512-BEDIJ6ReGAi+tLTS/RzxIw621yo1UUUiVNTzPGV2didyiJCr1chIGbES+39d/wiFQM43Xs3CBZLNzp+jKkv0/w==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
@@ -365,9 +365,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-win32-x64-msvc": {
|
||||
"version": "0.2.5",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-win32-x64-msvc/-/vectordb-win32-x64-msvc-0.2.5.tgz",
|
||||
"integrity": "sha512-jTqkR9HRfbjxhUrlTfveNkJ78tlpVXeNn3BS4wBm4VIsPd75jminKBRYtrlQCWyHusqrUQedKny4hhG1CuNUkg==",
|
||||
"version": "0.3.0",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-win32-x64-msvc/-/vectordb-win32-x64-msvc-0.3.0.tgz",
|
||||
"integrity": "sha512-7K2kbWbShuifQF/6L/tWSz2DhKfIreHKlBdVOuBTYYOReQMHn5cJxgwuFgQHqMubZ9zcagtHpmo+Wtqd034OKQ==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
@@ -4869,33 +4869,33 @@
|
||||
}
|
||||
},
|
||||
"@lancedb/vectordb-darwin-arm64": {
|
||||
"version": "0.2.5",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-arm64/-/vectordb-darwin-arm64-0.2.5.tgz",
|
||||
"integrity": "sha512-V4206SajkMN3o+bBFBAYJq5emlrjevitP0g8RFfVlmj/LS38i8k4uvSe1bICQ2amUrYkL/Jw4ktYn19NRfTU+g==",
|
||||
"version": "0.3.0",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-arm64/-/vectordb-darwin-arm64-0.3.0.tgz",
|
||||
"integrity": "sha512-Fg+k/cSnqmNQlSWyDp0PpaAJ67kAISfZAD+zZ3mcE8/3ml2I/wM/GVjPy2zeiQX9aR93lG1mZXFSNTDUc74tWQ==",
|
||||
"optional": true
|
||||
},
|
||||
"@lancedb/vectordb-darwin-x64": {
|
||||
"version": "0.2.5",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-x64/-/vectordb-darwin-x64-0.2.5.tgz",
|
||||
"integrity": "sha512-orePizgXCbTJbDJ4bMMnYh/4OgmWDBbHShNxHKQobcX+NgWTexmR0lV1WNOG+DtczBiGH422e3gHJ+xhTO13vg==",
|
||||
"version": "0.3.0",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-x64/-/vectordb-darwin-x64-0.3.0.tgz",
|
||||
"integrity": "sha512-CXp4b/brMbnBPZuGzKIOskd9uD90R73rWubaJ0du/Kt6fcyQX1dM1wEhWTLxI6eKf8IDL/R9QLL2cIahm1J86w==",
|
||||
"optional": true
|
||||
},
|
||||
"@lancedb/vectordb-linux-arm64-gnu": {
|
||||
"version": "0.2.5",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-arm64-gnu/-/vectordb-linux-arm64-gnu-0.2.5.tgz",
|
||||
"integrity": "sha512-xIMNwsFGOHeY9EUWCHhUAcA2sCHZ5Lim0sc42uuUOeWayyH+HeR6ZWReptDQRuAoJHqQeag9qcqteE0AZPDTEw==",
|
||||
"version": "0.3.0",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-arm64-gnu/-/vectordb-linux-arm64-gnu-0.3.0.tgz",
|
||||
"integrity": "sha512-1bjaRzYcDsWIRUbO2K/f+ohNmNvCgKcrrOhmiXSHVlYY8kH1LUMFZj+BhqBC0Ea0Stt7/1rsRLMRXRtaeVOEHw==",
|
||||
"optional": true
|
||||
},
|
||||
"@lancedb/vectordb-linux-x64-gnu": {
|
||||
"version": "0.2.5",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-x64-gnu/-/vectordb-linux-x64-gnu-0.2.5.tgz",
|
||||
"integrity": "sha512-Qr8dbHavtE+Zfd45kEORJQe01kRWhMF703gk8zhtZhskDUBCfqm3ap22JIux58tASxVcBqY8EtUFojfYGnQVvA==",
|
||||
"version": "0.3.0",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-x64-gnu/-/vectordb-linux-x64-gnu-0.3.0.tgz",
|
||||
"integrity": "sha512-BEDIJ6ReGAi+tLTS/RzxIw621yo1UUUiVNTzPGV2didyiJCr1chIGbES+39d/wiFQM43Xs3CBZLNzp+jKkv0/w==",
|
||||
"optional": true
|
||||
},
|
||||
"@lancedb/vectordb-win32-x64-msvc": {
|
||||
"version": "0.2.5",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-win32-x64-msvc/-/vectordb-win32-x64-msvc-0.2.5.tgz",
|
||||
"integrity": "sha512-jTqkR9HRfbjxhUrlTfveNkJ78tlpVXeNn3BS4wBm4VIsPd75jminKBRYtrlQCWyHusqrUQedKny4hhG1CuNUkg==",
|
||||
"version": "0.3.0",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-win32-x64-msvc/-/vectordb-win32-x64-msvc-0.3.0.tgz",
|
||||
"integrity": "sha512-7K2kbWbShuifQF/6L/tWSz2DhKfIreHKlBdVOuBTYYOReQMHn5cJxgwuFgQHqMubZ9zcagtHpmo+Wtqd034OKQ==",
|
||||
"optional": true
|
||||
},
|
||||
"@neon-rs/cli": {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "vectordb",
|
||||
"version": "0.2.5",
|
||||
"version": "0.3.0",
|
||||
"description": " Serverless, low-latency vector database for AI applications",
|
||||
"main": "dist/index.js",
|
||||
"types": "dist/index.d.ts",
|
||||
@@ -81,10 +81,10 @@
|
||||
}
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"@lancedb/vectordb-darwin-arm64": "0.2.5",
|
||||
"@lancedb/vectordb-darwin-x64": "0.2.5",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.2.5",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.2.5",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.2.5"
|
||||
"@lancedb/vectordb-darwin-arm64": "0.3.0",
|
||||
"@lancedb/vectordb-darwin-x64": "0.3.0",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.3.0",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.3.0",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.3.0"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -23,7 +23,7 @@ import { Query } from './query'
|
||||
import { isEmbeddingFunction } from './embedding/embedding_function'
|
||||
|
||||
// eslint-disable-next-line @typescript-eslint/no-var-requires
|
||||
const { databaseNew, databaseTableNames, databaseOpenTable, databaseDropTable, tableCreate, tableAdd, tableCreateVectorIndex, tableCountRows, tableDelete } = require('../native.js')
|
||||
const { databaseNew, databaseTableNames, databaseOpenTable, databaseDropTable, tableCreate, tableAdd, tableCreateVectorIndex, tableCountRows, tableDelete, tableCleanupOldVersions, tableCompactFiles } = require('../native.js')
|
||||
|
||||
export { Query }
|
||||
export type { EmbeddingFunction }
|
||||
@@ -459,6 +459,111 @@ export class LocalTable<T = number[]> implements Table<T> {
|
||||
async delete (filter: string): Promise<void> {
|
||||
return tableDelete.call(this._tbl, filter).then((newTable: any) => { this._tbl = newTable })
|
||||
}
|
||||
|
||||
/**
|
||||
* Clean up old versions of the table, freeing disk space.
|
||||
*
|
||||
* @param olderThan The minimum age in minutes of the versions to delete. If not
|
||||
* provided, defaults to two weeks.
|
||||
* @param deleteUnverified Because they may be part of an in-progress
|
||||
* transaction, uncommitted files newer than 7 days old are
|
||||
* not deleted by default. This means that failed transactions
|
||||
* can leave around data that takes up disk space for up to
|
||||
* 7 days. You can override this safety mechanism by setting
|
||||
* this option to `true`, only if you promise there are no
|
||||
* in progress writes while you run this operation. Failure to
|
||||
* uphold this promise can lead to corrupted tables.
|
||||
* @returns
|
||||
*/
|
||||
async cleanupOldVersions (olderThan?: number, deleteUnverified?: boolean): Promise<CleanupStats> {
|
||||
return tableCleanupOldVersions.call(this._tbl, olderThan, deleteUnverified)
|
||||
.then((res: { newTable: any, metrics: CleanupStats }) => {
|
||||
this._tbl = res.newTable
|
||||
return res.metrics
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Run the compaction process on the table.
|
||||
*
|
||||
* This can be run after making several small appends to optimize the table
|
||||
* for faster reads.
|
||||
*
|
||||
* @param options Advanced options configuring compaction. In most cases, you
|
||||
* can omit this arguments, as the default options are sensible
|
||||
* for most tables.
|
||||
* @returns Metrics about the compaction operation.
|
||||
*/
|
||||
async compactFiles (options?: CompactionOptions): Promise<CompactionMetrics> {
|
||||
const optionsArg = options ?? {}
|
||||
return tableCompactFiles.call(this._tbl, optionsArg)
|
||||
.then((res: { newTable: any, metrics: CompactionMetrics }) => {
|
||||
this._tbl = res.newTable
|
||||
return res.metrics
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
export interface CleanupStats {
|
||||
/**
|
||||
* The number of bytes removed from disk.
|
||||
*/
|
||||
bytesRemoved: number
|
||||
/**
|
||||
* The number of old table versions removed.
|
||||
*/
|
||||
oldVersions: number
|
||||
}
|
||||
|
||||
export interface CompactionOptions {
|
||||
/**
|
||||
* The number of rows per fragment to target. Fragments that have fewer rows
|
||||
* will be compacted into adjacent fragments to produce larger fragments.
|
||||
* Defaults to 1024 * 1024.
|
||||
*/
|
||||
targetRowsPerFragment?: number
|
||||
/**
|
||||
* The maximum number of rows per group. Defaults to 1024.
|
||||
*/
|
||||
maxRowsPerGroup?: number
|
||||
/**
|
||||
* If true, fragments that have rows that are deleted may be compacted to
|
||||
* remove the deleted rows. This can improve the performance of queries.
|
||||
* Default is true.
|
||||
*/
|
||||
materializeDeletions?: boolean
|
||||
/**
|
||||
* A number between 0 and 1, representing the proportion of rows that must be
|
||||
* marked deleted before a fragment is a candidate for compaction to remove
|
||||
* the deleted rows. Default is 10%.
|
||||
*/
|
||||
materializeDeletionsThreshold?: number
|
||||
/**
|
||||
* The number of threads to use for compaction. If not provided, defaults to
|
||||
* the number of cores on the machine.
|
||||
*/
|
||||
numThreads?: number
|
||||
}
|
||||
|
||||
export interface CompactionMetrics {
|
||||
/**
|
||||
* The number of fragments that were removed.
|
||||
*/
|
||||
fragmentsRemoved: number
|
||||
/**
|
||||
* The number of new fragments that were created.
|
||||
*/
|
||||
fragmentsAdded: number
|
||||
/**
|
||||
* The number of files that were removed. Each fragment may have more than one
|
||||
* file.
|
||||
*/
|
||||
filesRemoved: number
|
||||
/**
|
||||
* The number of files added. This is typically equal to the number of
|
||||
* fragments added.
|
||||
*/
|
||||
filesAdded: number
|
||||
}
|
||||
|
||||
/// Config to build IVF_PQ index.
|
||||
|
||||
@@ -18,13 +18,16 @@ import * as chaiAsPromised from 'chai-as-promised'
|
||||
import { v4 as uuidv4 } from 'uuid'
|
||||
|
||||
import * as lancedb from '../index'
|
||||
import { tmpdir } from 'os'
|
||||
import * as fs from 'fs'
|
||||
import * as path from 'path'
|
||||
|
||||
const assert = chai.assert
|
||||
chai.use(chaiAsPromised)
|
||||
|
||||
describe('LanceDB AWS Integration test', function () {
|
||||
it('s3+ddb schema is processed correctly', async function () {
|
||||
this.timeout(5000)
|
||||
this.timeout(15000)
|
||||
|
||||
// WARNING: specifying engine is NOT a publicly supported feature in lancedb yet
|
||||
// THE API WILL CHANGE
|
||||
@@ -41,3 +44,130 @@ describe('LanceDB AWS Integration test', function () {
|
||||
assert.equal(await table.countRows(), 6)
|
||||
})
|
||||
})
|
||||
|
||||
describe('LanceDB Mirrored Store Integration test', function () {
|
||||
it('s3://...?mirroredStore=... param is processed correctly', async function () {
|
||||
this.timeout(600000)
|
||||
|
||||
const dir = tmpdir()
|
||||
console.log(dir)
|
||||
const conn = await lancedb.connect(`s3://lancedb-integtest?mirroredStore=${dir}`)
|
||||
const data = Array(200).fill({ vector: Array(128).fill(1.0), id: 0 })
|
||||
data.push(...Array(200).fill({ vector: Array(128).fill(1.0), id: 1 }))
|
||||
data.push(...Array(200).fill({ vector: Array(128).fill(1.0), id: 2 }))
|
||||
data.push(...Array(200).fill({ vector: Array(128).fill(1.0), id: 3 }))
|
||||
|
||||
const tableName = uuidv4()
|
||||
|
||||
// try create table and check if it's mirrored
|
||||
const t = await conn.createTable(tableName, data, { writeMode: lancedb.WriteMode.Overwrite })
|
||||
|
||||
const mirroredPath = path.join(dir, `${tableName}.lance`)
|
||||
fs.readdir(mirroredPath, { withFileTypes: true }, (err, files) => {
|
||||
if (err != null) throw err
|
||||
// there should be two dirs
|
||||
assert.equal(files.length, 2)
|
||||
assert.isTrue(files[0].isDirectory())
|
||||
assert.isTrue(files[1].isDirectory())
|
||||
|
||||
fs.readdir(path.join(mirroredPath, '_transactions'), { withFileTypes: true }, (err, files) => {
|
||||
if (err != null) throw err
|
||||
assert.equal(files.length, 1)
|
||||
assert.isTrue(files[0].name.endsWith('.txn'))
|
||||
})
|
||||
|
||||
fs.readdir(path.join(mirroredPath, 'data'), { withFileTypes: true }, (err, files) => {
|
||||
if (err != null) throw err
|
||||
assert.equal(files.length, 1)
|
||||
assert.isTrue(files[0].name.endsWith('.lance'))
|
||||
})
|
||||
})
|
||||
|
||||
// try create index and check if it's mirrored
|
||||
await t.createIndex({ column: 'vector', type: 'ivf_pq' })
|
||||
|
||||
fs.readdir(mirroredPath, { withFileTypes: true }, (err, files) => {
|
||||
if (err != null) throw err
|
||||
// there should be two dirs
|
||||
assert.equal(files.length, 3)
|
||||
assert.isTrue(files[0].isDirectory())
|
||||
assert.isTrue(files[1].isDirectory())
|
||||
assert.isTrue(files[2].isDirectory())
|
||||
|
||||
// Two TXs now
|
||||
fs.readdir(path.join(mirroredPath, '_transactions'), { withFileTypes: true }, (err, files) => {
|
||||
if (err != null) throw err
|
||||
assert.equal(files.length, 2)
|
||||
assert.isTrue(files[0].name.endsWith('.txn'))
|
||||
assert.isTrue(files[1].name.endsWith('.txn'))
|
||||
})
|
||||
|
||||
fs.readdir(path.join(mirroredPath, 'data'), { withFileTypes: true }, (err, files) => {
|
||||
if (err != null) throw err
|
||||
assert.equal(files.length, 1)
|
||||
assert.isTrue(files[0].name.endsWith('.lance'))
|
||||
})
|
||||
|
||||
fs.readdir(path.join(mirroredPath, '_indices'), { withFileTypes: true }, (err, files) => {
|
||||
if (err != null) throw err
|
||||
assert.equal(files.length, 1)
|
||||
assert.isTrue(files[0].isDirectory())
|
||||
|
||||
fs.readdir(path.join(mirroredPath, '_indices', files[0].name), { withFileTypes: true }, (err, files) => {
|
||||
if (err != null) throw err
|
||||
|
||||
assert.equal(files.length, 1)
|
||||
assert.isTrue(files[0].isFile())
|
||||
assert.isTrue(files[0].name.endsWith('.idx'))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// try delete and check if it's mirrored
|
||||
await t.delete('id = 0')
|
||||
|
||||
fs.readdir(mirroredPath, { withFileTypes: true }, (err, files) => {
|
||||
if (err != null) throw err
|
||||
// there should be two dirs
|
||||
assert.equal(files.length, 4)
|
||||
assert.isTrue(files[0].isDirectory())
|
||||
assert.isTrue(files[1].isDirectory())
|
||||
assert.isTrue(files[2].isDirectory())
|
||||
assert.isTrue(files[3].isDirectory())
|
||||
|
||||
// Three TXs now
|
||||
fs.readdir(path.join(mirroredPath, '_transactions'), { withFileTypes: true }, (err, files) => {
|
||||
if (err != null) throw err
|
||||
assert.equal(files.length, 3)
|
||||
assert.isTrue(files[0].name.endsWith('.txn'))
|
||||
assert.isTrue(files[1].name.endsWith('.txn'))
|
||||
})
|
||||
|
||||
fs.readdir(path.join(mirroredPath, 'data'), { withFileTypes: true }, (err, files) => {
|
||||
if (err != null) throw err
|
||||
assert.equal(files.length, 1)
|
||||
assert.isTrue(files[0].name.endsWith('.lance'))
|
||||
})
|
||||
|
||||
fs.readdir(path.join(mirroredPath, '_indices'), { withFileTypes: true }, (err, files) => {
|
||||
if (err != null) throw err
|
||||
assert.equal(files.length, 1)
|
||||
assert.isTrue(files[0].isDirectory())
|
||||
|
||||
fs.readdir(path.join(mirroredPath, '_indices', files[0].name), { withFileTypes: true }, (err, files) => {
|
||||
if (err != null) throw err
|
||||
|
||||
assert.equal(files.length, 1)
|
||||
assert.isTrue(files[0].isFile())
|
||||
assert.isTrue(files[0].name.endsWith('.idx'))
|
||||
})
|
||||
})
|
||||
|
||||
fs.readdir(path.join(mirroredPath, '_deletions'), { withFileTypes: true }, (err, files) => {
|
||||
if (err != null) throw err
|
||||
assert.equal(files.length, 1)
|
||||
assert.isTrue(files[0].name.endsWith('.arrow'))
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@@ -18,8 +18,8 @@ import * as chai from 'chai'
|
||||
import * as chaiAsPromised from 'chai-as-promised'
|
||||
|
||||
import * as lancedb from '../index'
|
||||
import { type AwsCredentials, type EmbeddingFunction, MetricType, Query, WriteMode, DefaultWriteOptions, isWriteOptions } from '../index'
|
||||
import { Field, Int32, makeVector, Schema, Utf8, Table as ArrowTable, vectorFromArray } from 'apache-arrow'
|
||||
import { type AwsCredentials, type EmbeddingFunction, MetricType, Query, WriteMode, DefaultWriteOptions, isWriteOptions, type LocalTable } from '../index'
|
||||
import { FixedSizeList, Field, Int32, makeVector, Schema, Utf8, Table as ArrowTable, vectorFromArray, Float32 } from 'apache-arrow'
|
||||
|
||||
const expect = chai.expect
|
||||
const assert = chai.assert
|
||||
@@ -258,6 +258,36 @@ describe('LanceDB client', function () {
|
||||
})
|
||||
})
|
||||
|
||||
describe('when searching an empty dataset', function () {
|
||||
it('should not fail', async function () {
|
||||
const dir = await track().mkdir('lancejs')
|
||||
const con = await lancedb.connect(dir)
|
||||
|
||||
const schema = new Schema(
|
||||
[new Field('vector', new FixedSizeList(128, new Field('float32', new Float32())))]
|
||||
)
|
||||
const table = await con.createTable({ name: 'vectors', schema })
|
||||
const result = await table.search(Array(128).fill(0.1)).execute()
|
||||
assert.isEmpty(result)
|
||||
})
|
||||
})
|
||||
|
||||
describe('when searching an empty-after-delete dataset', function () {
|
||||
it('should not fail', async function () {
|
||||
const dir = await track().mkdir('lancejs')
|
||||
const con = await lancedb.connect(dir)
|
||||
|
||||
const schema = new Schema(
|
||||
[new Field('vector', new FixedSizeList(128, new Field('float32', new Float32())))]
|
||||
)
|
||||
const table = await con.createTable({ name: 'vectors', schema })
|
||||
await table.add([{ vector: Array(128).fill(0.1) }])
|
||||
await table.delete('vector IS NOT NULL')
|
||||
const result = await table.search(Array(128).fill(0.1)).execute()
|
||||
assert.isEmpty(result)
|
||||
})
|
||||
})
|
||||
|
||||
describe('when creating a vector index', function () {
|
||||
it('overwrite all records in a table', async function () {
|
||||
const uri = await createTestDB(32, 300)
|
||||
@@ -416,3 +446,45 @@ describe('WriteOptions', function () {
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('Compact and cleanup', function () {
|
||||
it('can cleanup after compaction', async function () {
|
||||
const dir = await track().mkdir('lancejs')
|
||||
const con = await lancedb.connect(dir)
|
||||
|
||||
const data = [
|
||||
{ price: 10, name: 'foo', vector: [1, 2, 3] },
|
||||
{ price: 50, name: 'bar', vector: [4, 5, 6] }
|
||||
]
|
||||
const table = await con.createTable('t1', data) as LocalTable
|
||||
|
||||
const newData = [
|
||||
{ price: 30, name: 'baz', vector: [7, 8, 9] }
|
||||
]
|
||||
await table.add(newData)
|
||||
|
||||
const compactionMetrics = await table.compactFiles({
|
||||
numThreads: 2
|
||||
})
|
||||
assert.equal(compactionMetrics.fragmentsRemoved, 2)
|
||||
assert.equal(compactionMetrics.fragmentsAdded, 1)
|
||||
assert.equal(await table.countRows(), 3)
|
||||
|
||||
await table.cleanupOldVersions()
|
||||
assert.equal(await table.countRows(), 3)
|
||||
|
||||
// should have no effect, but this validates the arguments are parsed.
|
||||
await table.compactFiles({
|
||||
targetRowsPerFragment: 1024 * 10,
|
||||
maxRowsPerGroup: 1024,
|
||||
materializeDeletions: true,
|
||||
materializeDeletionsThreshold: 0.5,
|
||||
numThreads: 2
|
||||
})
|
||||
|
||||
const cleanupMetrics = await table.cleanupOldVersions(0, true)
|
||||
assert.isAtLeast(cleanupMetrics.bytesRemoved, 1)
|
||||
assert.isAtLeast(cleanupMetrics.oldVersions, 1)
|
||||
assert.equal(await table.countRows(), 3)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
[bumpversion]
|
||||
current_version = 0.2.4
|
||||
current_version = 0.3.1
|
||||
commit = True
|
||||
message = [python] Bump version: {current_version} → {new_version}
|
||||
tag = True
|
||||
|
||||
@@ -16,7 +16,7 @@ pip install lancedb
|
||||
import lancedb
|
||||
db = lancedb.connect('<PATH_TO_LANCEDB_DATASET>')
|
||||
table = db.open_table('my_table')
|
||||
results = table.search([0.1, 0.3]).limit(20).to_df()
|
||||
results = table.search([0.1, 0.3]).limit(20).to_list()
|
||||
print(results)
|
||||
```
|
||||
|
||||
|
||||
@@ -11,11 +11,15 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import importlib.metadata
|
||||
from typing import Optional
|
||||
|
||||
__version__ = importlib.metadata.version("lancedb")
|
||||
|
||||
from .db import URI, DBConnection, LanceDBConnection
|
||||
from .remote.db import RemoteDBConnection
|
||||
from .schema import vector
|
||||
from .utils import sentry_log
|
||||
|
||||
|
||||
def connect(
|
||||
|
||||
12
python/lancedb/cli/__init__.py
Normal file
12
python/lancedb/cli/__init__.py
Normal file
@@ -0,0 +1,12 @@
|
||||
# Copyright 2023 LanceDB Developers
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
46
python/lancedb/cli/cli.py
Normal file
46
python/lancedb/cli/cli.py
Normal file
@@ -0,0 +1,46 @@
|
||||
# Copyright 2023 LanceDB Developers
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import click
|
||||
|
||||
from lancedb.utils import CONFIG
|
||||
|
||||
|
||||
@click.group()
|
||||
@click.version_option(help="LanceDB command line interface entry point")
|
||||
def cli():
|
||||
"LanceDB command line interface"
|
||||
|
||||
|
||||
diagnostics_help = """
|
||||
Enable or disable LanceDB diagnostics. When enabled, LanceDB will send anonymous events to help us improve LanceDB.
|
||||
These diagnostics are used only for error reporting and no data is collected. You can find more about diagnosis on
|
||||
our docs: https://lancedb.github.io/lancedb/cli_config/
|
||||
"""
|
||||
|
||||
|
||||
@cli.command(help=diagnostics_help)
|
||||
@click.option("--enabled/--disabled", default=True)
|
||||
def diagnostics(enabled):
|
||||
CONFIG.update({"diagnostics": True if enabled else False})
|
||||
click.echo("LanceDB diagnostics is %s" % ("enabled" if enabled else "disabled"))
|
||||
|
||||
|
||||
@cli.command(help="Show current LanceDB configuration")
|
||||
def config():
|
||||
# TODO: pretty print as table with colors and formatting
|
||||
click.echo("Current LanceDB configuration:")
|
||||
cfg = CONFIG.copy()
|
||||
cfg.pop("uuid") # Don't show uuid as it is not configurable
|
||||
for item, amount in cfg.items():
|
||||
click.echo("{} ({})".format(item, amount))
|
||||
@@ -1,9 +1,9 @@
|
||||
import os
|
||||
|
||||
import pyarrow as pa
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
from lancedb.embeddings import EmbeddingFunctionModel, EmbeddingFunctionRegistry
|
||||
from .embeddings import EmbeddingFunctionRegistry, TextEmbeddingFunction
|
||||
|
||||
# import lancedb so we don't have to in every example
|
||||
|
||||
@@ -22,17 +22,19 @@ def doctest_setup(monkeypatch, tmpdir):
|
||||
registry = EmbeddingFunctionRegistry.get_instance()
|
||||
|
||||
|
||||
@registry.register()
|
||||
class MockEmbeddingFunction(EmbeddingFunctionModel):
|
||||
def __call__(self, data):
|
||||
if isinstance(data, str):
|
||||
data = [data]
|
||||
elif isinstance(data, pa.ChunkedArray):
|
||||
data = data.combine_chunks().to_pylist()
|
||||
elif isinstance(data, pa.Array):
|
||||
data = data.to_pylist()
|
||||
@registry.register("test")
|
||||
class MockTextEmbeddingFunction(TextEmbeddingFunction):
|
||||
"""
|
||||
Return the hash of the first 10 characters
|
||||
"""
|
||||
|
||||
return [self.embed(row) for row in data]
|
||||
def generate_embeddings(self, texts):
|
||||
return [self._compute_one_embedding(row) for row in texts]
|
||||
|
||||
def embed(self, row):
|
||||
return [float(hash(c)) for c in row[:10]]
|
||||
def _compute_one_embedding(self, row):
|
||||
emb = np.array([float(hash(c)) for c in row[:10]])
|
||||
emb /= np.linalg.norm(emb)
|
||||
return emb
|
||||
|
||||
def ndims(self):
|
||||
return 10
|
||||
|
||||
@@ -12,6 +12,9 @@
|
||||
# limitations under the License.
|
||||
from __future__ import annotations
|
||||
|
||||
import deprecation
|
||||
|
||||
from . import __version__
|
||||
from .exceptions import MissingColumnError, MissingValueError
|
||||
from .util import safe_import_pandas
|
||||
|
||||
@@ -43,7 +46,7 @@ def contextualize(raw_df: "pd.DataFrame") -> Contextualizer:
|
||||
this how many tokens, but depending on the input data, it could be sentences,
|
||||
paragraphs, messages, etc.
|
||||
|
||||
>>> contextualize(data).window(3).stride(1).text_col('token').to_df()
|
||||
>>> contextualize(data).window(3).stride(1).text_col('token').to_pandas()
|
||||
token document_id
|
||||
0 The quick brown 1
|
||||
1 quick brown fox 1
|
||||
@@ -56,7 +59,7 @@ def contextualize(raw_df: "pd.DataFrame") -> Contextualizer:
|
||||
8 dog I love 1
|
||||
9 I love sandwiches 2
|
||||
10 love sandwiches 2
|
||||
>>> contextualize(data).window(7).stride(1).min_window_size(7).text_col('token').to_df()
|
||||
>>> contextualize(data).window(7).stride(1).min_window_size(7).text_col('token').to_pandas()
|
||||
token document_id
|
||||
0 The quick brown fox jumped over the 1
|
||||
1 quick brown fox jumped over the lazy 1
|
||||
@@ -68,7 +71,7 @@ def contextualize(raw_df: "pd.DataFrame") -> Contextualizer:
|
||||
``stride`` determines how many rows to skip between each window start. This can
|
||||
be used to reduce the total number of windows generated.
|
||||
|
||||
>>> contextualize(data).window(4).stride(2).text_col('token').to_df()
|
||||
>>> contextualize(data).window(4).stride(2).text_col('token').to_pandas()
|
||||
token document_id
|
||||
0 The quick brown fox 1
|
||||
2 brown fox jumped over 1
|
||||
@@ -81,7 +84,7 @@ def contextualize(raw_df: "pd.DataFrame") -> Contextualizer:
|
||||
context windows that don't cross document boundaries. In this case, we can
|
||||
pass ``document_id`` as the group by.
|
||||
|
||||
>>> contextualize(data).window(4).stride(2).text_col('token').groupby('document_id').to_df()
|
||||
>>> contextualize(data).window(4).stride(2).text_col('token').groupby('document_id').to_pandas()
|
||||
token document_id
|
||||
0 The quick brown fox 1
|
||||
2 brown fox jumped over 1
|
||||
@@ -93,14 +96,14 @@ def contextualize(raw_df: "pd.DataFrame") -> Contextualizer:
|
||||
This can be used to trim the last few context windows which have size less than
|
||||
``min_window_size``. By default context windows of size 1 are skipped.
|
||||
|
||||
>>> contextualize(data).window(6).stride(3).text_col('token').groupby('document_id').to_df()
|
||||
>>> contextualize(data).window(6).stride(3).text_col('token').groupby('document_id').to_pandas()
|
||||
token document_id
|
||||
0 The quick brown fox jumped over 1
|
||||
3 fox jumped over the lazy dog 1
|
||||
6 the lazy dog 1
|
||||
9 I love sandwiches 2
|
||||
|
||||
>>> contextualize(data).window(6).stride(3).min_window_size(4).text_col('token').groupby('document_id').to_df()
|
||||
>>> contextualize(data).window(6).stride(3).min_window_size(4).text_col('token').groupby('document_id').to_pandas()
|
||||
token document_id
|
||||
0 The quick brown fox jumped over 1
|
||||
3 fox jumped over the lazy dog 1
|
||||
@@ -176,7 +179,16 @@ class Contextualizer:
|
||||
self._min_window_size = min_window_size
|
||||
return self
|
||||
|
||||
@deprecation.deprecated(
|
||||
deprecated_in="0.3.1",
|
||||
removed_in="0.4.0",
|
||||
current_version=__version__,
|
||||
details="Use the bar function instead",
|
||||
)
|
||||
def to_df(self) -> "pd.DataFrame":
|
||||
return self.to_pandas()
|
||||
|
||||
def to_pandas(self) -> "pd.DataFrame":
|
||||
"""Create the context windows and return a DataFrame."""
|
||||
if pd is None:
|
||||
raise ImportError(
|
||||
|
||||
@@ -22,7 +22,7 @@ import pyarrow as pa
|
||||
from pyarrow import fs
|
||||
|
||||
from .common import DATA, URI
|
||||
from .embeddings import EmbeddingFunctionModel
|
||||
from .embeddings import EmbeddingFunctionConfig
|
||||
from .pydantic import LanceModel
|
||||
from .table import LanceTable, Table
|
||||
from .util import fs_from_uri, get_uri_location, get_uri_scheme
|
||||
@@ -290,7 +290,7 @@ class LanceDBConnection(DBConnection):
|
||||
mode: str = "create",
|
||||
on_bad_vectors: str = "error",
|
||||
fill_value: float = 0.0,
|
||||
embedding_functions: Optional[List[EmbeddingFunctionModel]] = None,
|
||||
embedding_functions: Optional[List[EmbeddingFunctionConfig]] = None,
|
||||
) -> LanceTable:
|
||||
"""Create a table in the database.
|
||||
|
||||
|
||||
@@ -12,11 +12,14 @@
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
from .cohere import CohereEmbeddingFunction
|
||||
from .functions import (
|
||||
REGISTRY,
|
||||
EmbeddingFunctionModel,
|
||||
EmbeddingFunction,
|
||||
EmbeddingFunctionConfig,
|
||||
EmbeddingFunctionRegistry,
|
||||
SentenceTransformerEmbeddingFunction,
|
||||
TextEmbeddingFunctionModel,
|
||||
OpenAIEmbeddings,
|
||||
OpenClipEmbeddings,
|
||||
SentenceTransformerEmbeddings,
|
||||
TextEmbeddingFunction,
|
||||
)
|
||||
from .utils import with_embeddings
|
||||
|
||||
86
python/lancedb/embeddings/cohere.py
Normal file
86
python/lancedb/embeddings/cohere.py
Normal file
@@ -0,0 +1,86 @@
|
||||
# Copyright (c) 2023. LanceDB Developers
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
from typing import ClassVar, List, Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
from .functions import TextEmbeddingFunction, register
|
||||
from .utils import api_key_not_found_help
|
||||
|
||||
|
||||
@register("cohere")
|
||||
class CohereEmbeddingFunction(TextEmbeddingFunction):
|
||||
"""
|
||||
An embedding function that uses the Cohere API
|
||||
|
||||
https://docs.cohere.com/docs/multilingual-language-models
|
||||
|
||||
Parameters
|
||||
----------
|
||||
name: str, default "embed-multilingual-v2.0"
|
||||
The name of the model to use. See the Cohere documentation for a list of available models.
|
||||
|
||||
Examples
|
||||
--------
|
||||
import lancedb
|
||||
from lancedb.pydantic import LanceModel, Vector
|
||||
from lancedb.embeddings import EmbeddingFunctionRegistry
|
||||
|
||||
cohere = EmbeddingFunctionRegistry.get_instance().get("cohere").create(name="embed-multilingual-v2.0")
|
||||
|
||||
class TextModel(LanceModel):
|
||||
text: str = cohere.SourceField()
|
||||
vector: Vector(cohere.ndims()) = cohere.VectorField()
|
||||
|
||||
data = [ { "text": "hello world" },
|
||||
{ "text": "goodbye world" }]
|
||||
|
||||
db = lancedb.connect("~/.lancedb")
|
||||
tbl = db.create_table("test", schema=TextModel, mode="overwrite")
|
||||
|
||||
tbl.add(data)
|
||||
|
||||
"""
|
||||
|
||||
name: str = "embed-multilingual-v2.0"
|
||||
client: ClassVar = None
|
||||
|
||||
def ndims(self):
|
||||
# TODO: fix hardcoding
|
||||
return 768
|
||||
|
||||
def generate_embeddings(
|
||||
self, texts: Union[List[str], np.ndarray]
|
||||
) -> List[np.array]:
|
||||
"""
|
||||
Get the embeddings for the given texts
|
||||
|
||||
Parameters
|
||||
----------
|
||||
texts: list[str] or np.ndarray (of str)
|
||||
The texts to embed
|
||||
"""
|
||||
# TODO retry, rate limit, token limit
|
||||
self._init_client()
|
||||
rs = CohereEmbeddingFunction.client.embed(texts=texts, model=self.name)
|
||||
|
||||
return [emb for emb in rs.embeddings]
|
||||
|
||||
def _init_client(self):
|
||||
cohere = self.safe_import("cohere")
|
||||
if CohereEmbeddingFunction.client is None:
|
||||
if os.environ.get("COHERE_API_KEY") is None:
|
||||
api_key_not_found_help("cohere")
|
||||
CohereEmbeddingFunction.client = cohere.Client(os.environ["COHERE_API_KEY"])
|
||||
@@ -10,43 +10,79 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import concurrent.futures
|
||||
import importlib
|
||||
import io
|
||||
import json
|
||||
import os
|
||||
import socket
|
||||
import urllib.error
|
||||
import urllib.parse as urlparse
|
||||
import urllib.request
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import List, Optional, Union
|
||||
from typing import Dict, List, Optional, Union
|
||||
|
||||
import numpy as np
|
||||
import pyarrow as pa
|
||||
from cachetools import cached
|
||||
from pydantic import BaseModel
|
||||
from pydantic import BaseModel, Field, PrivateAttr
|
||||
from tqdm import tqdm
|
||||
|
||||
|
||||
class EmbeddingFunctionRegistry:
|
||||
"""
|
||||
This is a singleton class used to register embedding functions
|
||||
and fetch them by name. It also handles serializing and deserializing
|
||||
and fetch them by name. It also handles serializing and deserializing.
|
||||
You can implement your own embedding function by subclassing EmbeddingFunction
|
||||
or TextEmbeddingFunction and registering it with the registry.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> registry = EmbeddingFunctionRegistry.get_instance()
|
||||
>>> @registry.register("my-embedding-function")
|
||||
... class MyEmbeddingFunction(EmbeddingFunction):
|
||||
... def ndims(self) -> int:
|
||||
... return 128
|
||||
...
|
||||
... def compute_query_embeddings(self, query: str, *args, **kwargs) -> List[np.array]:
|
||||
... return self.compute_source_embeddings(query, *args, **kwargs)
|
||||
...
|
||||
... def compute_source_embeddings(self, texts: TEXT, *args, **kwargs) -> List[np.array]:
|
||||
... return [np.random.rand(self.ndims()) for _ in range(len(texts))]
|
||||
...
|
||||
>>> registry.get("my-embedding-function")
|
||||
<class 'lancedb.embeddings.functions.MyEmbeddingFunction'>
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def get_instance(cls):
|
||||
return REGISTRY
|
||||
return __REGISTRY__
|
||||
|
||||
def __init__(self):
|
||||
self._functions = {}
|
||||
|
||||
def register(self):
|
||||
def register(self, alias: str = None):
|
||||
"""
|
||||
This creates a decorator that can be used to register
|
||||
an EmbeddingFunctionModel.
|
||||
an EmbeddingFunction.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
alias : Optional[str]
|
||||
a human friendly name for the embedding function. If not
|
||||
provided, the class name will be used.
|
||||
"""
|
||||
|
||||
# This is a decorator for a class that inherits from BaseModel
|
||||
# It adds the class to the registry
|
||||
def decorator(cls):
|
||||
if not issubclass(cls, EmbeddingFunctionModel):
|
||||
raise TypeError("Must be a subclass of EmbeddingFunctionModel")
|
||||
if not issubclass(cls, EmbeddingFunction):
|
||||
raise TypeError("Must be a subclass of EmbeddingFunction")
|
||||
if cls.__name__ in self._functions:
|
||||
raise KeyError(f"{cls.__name__} was already registered")
|
||||
self._functions[cls.__name__] = cls
|
||||
key = alias or cls.__name__
|
||||
self._functions[key] = cls
|
||||
cls.__embedding_function_registry_alias__ = alias
|
||||
return cls
|
||||
|
||||
return decorator
|
||||
@@ -57,13 +93,22 @@ class EmbeddingFunctionRegistry:
|
||||
"""
|
||||
self._functions = {}
|
||||
|
||||
def load(self, name: str):
|
||||
def get(self, name: str):
|
||||
"""
|
||||
Fetch an embedding function class by name
|
||||
|
||||
Parameters
|
||||
----------
|
||||
name : str
|
||||
The name of the embedding function to fetch
|
||||
Either the alias or the class name if no alias was provided
|
||||
during registration
|
||||
"""
|
||||
return self._functions[name]
|
||||
|
||||
def parse_functions(self, metadata: Optional[dict]) -> dict:
|
||||
def parse_functions(
|
||||
self, metadata: Optional[Dict[bytes, bytes]]
|
||||
) -> Dict[str, "EmbeddingFunctionConfig"]:
|
||||
"""
|
||||
Parse the metadata from an arrow table and
|
||||
return a mapping of the vector column to the
|
||||
@@ -71,9 +116,9 @@ class EmbeddingFunctionRegistry:
|
||||
|
||||
Parameters
|
||||
----------
|
||||
metadata : Optional[dict]
|
||||
metadata : Optional[Dict[bytes, bytes]]
|
||||
The metadata from an arrow table. Note that
|
||||
the keys and values are bytes.
|
||||
the keys and values are bytes (pyarrow api)
|
||||
|
||||
Returns
|
||||
-------
|
||||
@@ -86,68 +131,94 @@ class EmbeddingFunctionRegistry:
|
||||
return {}
|
||||
serialized = metadata[b"embedding_functions"]
|
||||
raw_list = json.loads(serialized.decode("utf-8"))
|
||||
functions = {}
|
||||
for obj in raw_list:
|
||||
model = self.load(obj["schema"]["title"])
|
||||
functions[obj["model"]["vector_column"]] = model(**obj["model"])
|
||||
return functions
|
||||
return {
|
||||
obj["vector_column"]: EmbeddingFunctionConfig(
|
||||
vector_column=obj["vector_column"],
|
||||
source_column=obj["source_column"],
|
||||
function=self.get(obj["name"])(**obj["model"]),
|
||||
)
|
||||
for obj in raw_list
|
||||
}
|
||||
|
||||
def function_to_metadata(self, func):
|
||||
def function_to_metadata(self, conf: "EmbeddingFunctionConfig"):
|
||||
"""
|
||||
Convert the given embedding function and source / vector column configs
|
||||
into a config dictionary that can be serialized into arrow metadata
|
||||
"""
|
||||
schema = func.model_json_schema()
|
||||
json_data = func.model_dump()
|
||||
func = conf.function
|
||||
name = getattr(
|
||||
func, "__embedding_function_registry_alias__", func.__class__.__name__
|
||||
)
|
||||
json_data = func.safe_model_dump()
|
||||
return {
|
||||
"schema": schema,
|
||||
"name": name,
|
||||
"model": json_data,
|
||||
"source_column": conf.source_column,
|
||||
"vector_column": conf.vector_column,
|
||||
}
|
||||
|
||||
def get_table_metadata(self, func_list):
|
||||
"""
|
||||
Convert a list of embedding functions and source / vector column configs
|
||||
Convert a list of embedding functions and source / vector configs
|
||||
into a config dictionary that can be serialized into arrow metadata
|
||||
"""
|
||||
if func_list is None or len(func_list) == 0:
|
||||
return None
|
||||
json_data = [self.function_to_metadata(func) for func in func_list]
|
||||
# Note that metadata dictionary values must be bytes so we need to json dump then utf8 encode
|
||||
# Note that metadata dictionary values must be bytes
|
||||
# so we need to json dump then utf8 encode
|
||||
metadata = json.dumps(json_data, indent=2).encode("utf-8")
|
||||
return {"embedding_functions": metadata}
|
||||
|
||||
|
||||
REGISTRY = EmbeddingFunctionRegistry()
|
||||
|
||||
|
||||
class EmbeddingFunctionModel(BaseModel, ABC):
|
||||
"""
|
||||
A callable ABC for embedding functions
|
||||
"""
|
||||
|
||||
source_column: Optional[str]
|
||||
vector_column: str
|
||||
|
||||
@abstractmethod
|
||||
def __call__(self, *args, **kwargs) -> List[np.array]:
|
||||
pass
|
||||
# Global instance
|
||||
__REGISTRY__ = EmbeddingFunctionRegistry()
|
||||
|
||||
|
||||
TEXT = Union[str, List[str], pa.Array, pa.ChunkedArray, np.ndarray]
|
||||
IMAGES = Union[
|
||||
str, bytes, List[str], List[bytes], pa.Array, pa.ChunkedArray, np.ndarray
|
||||
]
|
||||
|
||||
|
||||
class TextEmbeddingFunctionModel(EmbeddingFunctionModel):
|
||||
class EmbeddingFunction(BaseModel, ABC):
|
||||
"""
|
||||
A callable ABC for embedding functions that take text as input
|
||||
An ABC for embedding functions.
|
||||
|
||||
All concrete embedding functions must implement the following:
|
||||
1. compute_query_embeddings() which takes a query and returns a list of embeddings
|
||||
2. get_source_embeddings() which returns a list of embeddings for the source column
|
||||
For text data, the two will be the same. For multi-modal data, the source column
|
||||
might be images and the vector column might be text.
|
||||
3. ndims method which returns the number of dimensions of the vector column
|
||||
"""
|
||||
|
||||
def __call__(self, texts: TEXT, *args, **kwargs) -> List[np.array]:
|
||||
texts = self.sanitize_input(texts)
|
||||
return self.generate_embeddings(texts)
|
||||
_ndims: int = PrivateAttr()
|
||||
|
||||
@classmethod
|
||||
def create(cls, **kwargs):
|
||||
"""
|
||||
Create an instance of the embedding function
|
||||
"""
|
||||
return cls(**kwargs)
|
||||
|
||||
@abstractmethod
|
||||
def compute_query_embeddings(self, *args, **kwargs) -> List[np.array]:
|
||||
"""
|
||||
Compute the embeddings for a given user query
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def compute_source_embeddings(self, *args, **kwargs) -> List[np.array]:
|
||||
"""
|
||||
Compute the embeddings for the source column in the database
|
||||
"""
|
||||
pass
|
||||
|
||||
def sanitize_input(self, texts: TEXT) -> Union[List[str], np.ndarray]:
|
||||
"""
|
||||
Sanitize the input to the embedding function. This is called
|
||||
before generate_embeddings() and is useful for stripping
|
||||
whitespace, lowercasing, etc.
|
||||
Sanitize the input to the embedding function.
|
||||
"""
|
||||
if isinstance(texts, str):
|
||||
texts = [texts]
|
||||
@@ -157,6 +228,78 @@ class TextEmbeddingFunctionModel(EmbeddingFunctionModel):
|
||||
texts = texts.combine_chunks().to_pylist()
|
||||
return texts
|
||||
|
||||
@classmethod
|
||||
def safe_import(cls, module: str, mitigation=None):
|
||||
"""
|
||||
Import the specified module. If the module is not installed,
|
||||
raise an ImportError with a helpful message.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
module : str
|
||||
The name of the module to import
|
||||
mitigation : Optional[str]
|
||||
The package(s) to install to mitigate the error.
|
||||
If not provided then the module name will be used.
|
||||
"""
|
||||
try:
|
||||
return importlib.import_module(module)
|
||||
except ImportError:
|
||||
raise ImportError(f"Please install {mitigation or module}")
|
||||
|
||||
def safe_model_dump(self):
|
||||
from ..pydantic import PYDANTIC_VERSION
|
||||
|
||||
if PYDANTIC_VERSION.major < 2:
|
||||
return dict(self)
|
||||
return self.model_dump()
|
||||
|
||||
@abstractmethod
|
||||
def ndims(self):
|
||||
"""
|
||||
Return the dimensions of the vector column
|
||||
"""
|
||||
pass
|
||||
|
||||
def SourceField(self, **kwargs):
|
||||
"""
|
||||
Creates a pydantic Field that can automatically annotate
|
||||
the source column for this embedding function
|
||||
"""
|
||||
return Field(json_schema_extra={"source_column_for": self}, **kwargs)
|
||||
|
||||
def VectorField(self, **kwargs):
|
||||
"""
|
||||
Creates a pydantic Field that can automatically annotate
|
||||
the target vector column for this embedding function
|
||||
"""
|
||||
return Field(json_schema_extra={"vector_column_for": self}, **kwargs)
|
||||
|
||||
|
||||
class EmbeddingFunctionConfig(BaseModel):
|
||||
"""
|
||||
This model encapsulates the configuration for a embedding function
|
||||
in a lancedb table. It holds the embedding function, the source column,
|
||||
and the vector column
|
||||
"""
|
||||
|
||||
vector_column: str
|
||||
source_column: str
|
||||
function: EmbeddingFunction
|
||||
|
||||
|
||||
class TextEmbeddingFunction(EmbeddingFunction):
|
||||
"""
|
||||
A callable ABC for embedding functions that take text as input
|
||||
"""
|
||||
|
||||
def compute_query_embeddings(self, query: str, *args, **kwargs) -> List[np.array]:
|
||||
return self.compute_source_embeddings(query, *args, **kwargs)
|
||||
|
||||
def compute_source_embeddings(self, texts: TEXT, *args, **kwargs) -> List[np.array]:
|
||||
texts = self.sanitize_input(texts)
|
||||
return self.generate_embeddings(texts)
|
||||
|
||||
@abstractmethod
|
||||
def generate_embeddings(
|
||||
self, texts: Union[List[str], np.ndarray]
|
||||
@@ -167,15 +310,25 @@ class TextEmbeddingFunctionModel(EmbeddingFunctionModel):
|
||||
pass
|
||||
|
||||
|
||||
@REGISTRY.register()
|
||||
class SentenceTransformerEmbeddingFunction(TextEmbeddingFunctionModel):
|
||||
# @EmbeddingFunctionRegistry.get_instance().register(name) doesn't work in 3.8
|
||||
register = lambda name: EmbeddingFunctionRegistry.get_instance().register(name)
|
||||
|
||||
|
||||
@register("sentence-transformers")
|
||||
class SentenceTransformerEmbeddings(TextEmbeddingFunction):
|
||||
"""
|
||||
An embedding function that uses the sentence-transformers library
|
||||
|
||||
https://huggingface.co/sentence-transformers
|
||||
"""
|
||||
|
||||
name: str = "all-MiniLM-L6-v2"
|
||||
device: str = "cpu"
|
||||
normalize: bool = False
|
||||
normalize: bool = True
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
self._ndims = None
|
||||
|
||||
@property
|
||||
def embedding_model(self):
|
||||
@@ -186,6 +339,11 @@ class SentenceTransformerEmbeddingFunction(TextEmbeddingFunctionModel):
|
||||
"""
|
||||
return self.__class__.get_embedding_model(self.name, self.device)
|
||||
|
||||
def ndims(self):
|
||||
if self._ndims is None:
|
||||
self._ndims = len(self.generate_embeddings("foo")[0])
|
||||
return self._ndims
|
||||
|
||||
def generate_embeddings(
|
||||
self, texts: Union[List[str], np.ndarray]
|
||||
) -> List[np.array]:
|
||||
@@ -220,9 +378,201 @@ class SentenceTransformerEmbeddingFunction(TextEmbeddingFunctionModel):
|
||||
|
||||
TODO: use lru_cache instead with a reasonable/configurable maxsize
|
||||
"""
|
||||
try:
|
||||
from sentence_transformers import SentenceTransformer
|
||||
sentence_transformers = cls.safe_import(
|
||||
"sentence_transformers", "sentence-transformers"
|
||||
)
|
||||
return sentence_transformers.SentenceTransformer(name, device=device)
|
||||
|
||||
return SentenceTransformer(name, device=device)
|
||||
except ImportError:
|
||||
raise ValueError("Please install sentence_transformers")
|
||||
|
||||
@register("openai")
|
||||
class OpenAIEmbeddings(TextEmbeddingFunction):
|
||||
"""
|
||||
An embedding function that uses the OpenAI API
|
||||
|
||||
https://platform.openai.com/docs/guides/embeddings
|
||||
"""
|
||||
|
||||
name: str = "text-embedding-ada-002"
|
||||
|
||||
def ndims(self):
|
||||
# TODO don't hardcode this
|
||||
return 1536
|
||||
|
||||
def generate_embeddings(
|
||||
self, texts: Union[List[str], np.ndarray]
|
||||
) -> List[np.array]:
|
||||
"""
|
||||
Get the embeddings for the given texts
|
||||
|
||||
Parameters
|
||||
----------
|
||||
texts: list[str] or np.ndarray (of str)
|
||||
The texts to embed
|
||||
"""
|
||||
# TODO retry, rate limit, token limit
|
||||
openai = self.safe_import("openai")
|
||||
rs = openai.Embedding.create(input=texts, model=self.name)["data"]
|
||||
return [v["embedding"] for v in rs]
|
||||
|
||||
|
||||
@register("open-clip")
|
||||
class OpenClipEmbeddings(EmbeddingFunction):
|
||||
"""
|
||||
An embedding function that uses the OpenClip API
|
||||
For multi-modal text-to-image search
|
||||
|
||||
https://github.com/mlfoundations/open_clip
|
||||
"""
|
||||
|
||||
name: str = "ViT-B-32"
|
||||
pretrained: str = "laion2b_s34b_b79k"
|
||||
device: str = "cpu"
|
||||
batch_size: int = 64
|
||||
normalize: bool = True
|
||||
_model = PrivateAttr()
|
||||
_preprocess = PrivateAttr()
|
||||
_tokenizer = PrivateAttr()
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
open_clip = self.safe_import("open_clip", "open-clip")
|
||||
model, _, preprocess = open_clip.create_model_and_transforms(
|
||||
self.name, pretrained=self.pretrained
|
||||
)
|
||||
model.to(self.device)
|
||||
self._model, self._preprocess = model, preprocess
|
||||
self._tokenizer = open_clip.get_tokenizer(self.name)
|
||||
self._ndims = None
|
||||
|
||||
def ndims(self):
|
||||
if self._ndims is None:
|
||||
self._ndims = self.generate_text_embeddings("foo").shape[0]
|
||||
return self._ndims
|
||||
|
||||
def compute_query_embeddings(
|
||||
self, query: Union[str, "PIL.Image.Image"], *args, **kwargs
|
||||
) -> List[np.ndarray]:
|
||||
"""
|
||||
Compute the embeddings for a given user query
|
||||
|
||||
Parameters
|
||||
----------
|
||||
query : Union[str, PIL.Image.Image]
|
||||
The query to embed. A query can be either text or an image.
|
||||
"""
|
||||
if isinstance(query, str):
|
||||
return [self.generate_text_embeddings(query)]
|
||||
else:
|
||||
PIL = self.safe_import("PIL", "pillow")
|
||||
if isinstance(query, PIL.Image.Image):
|
||||
return [self.generate_image_embedding(query)]
|
||||
else:
|
||||
raise TypeError("OpenClip supports str or PIL Image as query")
|
||||
|
||||
def generate_text_embeddings(self, text: str) -> np.ndarray:
|
||||
torch = self.safe_import("torch")
|
||||
text = self.sanitize_input(text)
|
||||
text = self._tokenizer(text)
|
||||
text.to(self.device)
|
||||
with torch.no_grad():
|
||||
text_features = self._model.encode_text(text.to(self.device))
|
||||
if self.normalize:
|
||||
text_features /= text_features.norm(dim=-1, keepdim=True)
|
||||
return text_features.cpu().numpy().squeeze()
|
||||
|
||||
def sanitize_input(self, images: IMAGES) -> Union[List[bytes], np.ndarray]:
|
||||
"""
|
||||
Sanitize the input to the embedding function.
|
||||
"""
|
||||
if isinstance(images, (str, bytes)):
|
||||
images = [images]
|
||||
elif isinstance(images, pa.Array):
|
||||
images = images.to_pylist()
|
||||
elif isinstance(images, pa.ChunkedArray):
|
||||
images = images.combine_chunks().to_pylist()
|
||||
return images
|
||||
|
||||
def compute_source_embeddings(
|
||||
self, images: IMAGES, *args, **kwargs
|
||||
) -> List[np.array]:
|
||||
"""
|
||||
Get the embeddings for the given images
|
||||
"""
|
||||
images = self.sanitize_input(images)
|
||||
embeddings = []
|
||||
for i in range(0, len(images), self.batch_size):
|
||||
j = min(i + self.batch_size, len(images))
|
||||
batch = images[i:j]
|
||||
embeddings.extend(self._parallel_get(batch))
|
||||
return embeddings
|
||||
|
||||
def _parallel_get(self, images: Union[List[str], List[bytes]]) -> List[np.ndarray]:
|
||||
"""
|
||||
Issue concurrent requests to retrieve the image data
|
||||
"""
|
||||
with concurrent.futures.ThreadPoolExecutor() as executor:
|
||||
futures = [
|
||||
executor.submit(self.generate_image_embedding, image)
|
||||
for image in images
|
||||
]
|
||||
return [future.result() for future in tqdm(futures)]
|
||||
|
||||
def generate_image_embedding(
|
||||
self, image: Union[str, bytes, "PIL.Image.Image"]
|
||||
) -> np.ndarray:
|
||||
"""
|
||||
Generate the embedding for a single image
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : Union[str, bytes, PIL.Image.Image]
|
||||
The image to embed. If the image is a str, it is treated as a uri.
|
||||
If the image is bytes, it is treated as the raw image bytes.
|
||||
"""
|
||||
torch = self.safe_import("torch")
|
||||
# TODO handle retry and errors for https
|
||||
image = self._to_pil(image)
|
||||
image = self._preprocess(image).unsqueeze(0)
|
||||
with torch.no_grad():
|
||||
return self._encode_and_normalize_image(image)
|
||||
|
||||
def _to_pil(self, image: Union[str, bytes]):
|
||||
PIL = self.safe_import("PIL", "pillow")
|
||||
if isinstance(image, bytes):
|
||||
return PIL.Image.open(io.BytesIO(image))
|
||||
if isinstance(image, PIL.Image.Image):
|
||||
return image
|
||||
elif isinstance(image, str):
|
||||
parsed = urlparse.urlparse(image)
|
||||
# TODO handle drive letter on windows.
|
||||
if parsed.scheme == "file":
|
||||
return PIL.Image.open(parsed.path)
|
||||
elif parsed.scheme == "":
|
||||
return PIL.Image.open(image if os.name == "nt" else parsed.path)
|
||||
elif parsed.scheme.startswith("http"):
|
||||
return PIL.Image.open(io.BytesIO(url_retrieve(image)))
|
||||
else:
|
||||
raise NotImplementedError("Only local and http(s) urls are supported")
|
||||
|
||||
def _encode_and_normalize_image(self, image_tensor: "torch.Tensor"):
|
||||
"""
|
||||
encode a single image tensor and optionally normalize the output
|
||||
"""
|
||||
image_features = self._model.encode_image(image_tensor.to(self.device))
|
||||
if self.normalize:
|
||||
image_features /= image_features.norm(dim=-1, keepdim=True)
|
||||
return image_features.cpu().numpy().squeeze()
|
||||
|
||||
|
||||
def url_retrieve(url: str):
|
||||
"""
|
||||
Parameters
|
||||
----------
|
||||
url: str
|
||||
URL to download from
|
||||
"""
|
||||
try:
|
||||
with urllib.request.urlopen(url) as conn:
|
||||
return conn.read()
|
||||
except (socket.gaierror, urllib.error.URLError) as err:
|
||||
raise ConnectionError("could not download {} due to {}".format(url, err))
|
||||
|
||||
@@ -21,6 +21,7 @@ from lance.vector import vec_to_table
|
||||
from retry import retry
|
||||
|
||||
from ..util import safe_import_pandas
|
||||
from ..utils.general import LOGGER
|
||||
|
||||
pd = safe_import_pandas()
|
||||
DATA = Union[pa.Table, "pd.DataFrame"]
|
||||
@@ -152,3 +153,8 @@ class FunctionWrapper:
|
||||
yield from tqdm(_chunker(arr), total=math.ceil(length / self._batch_size))
|
||||
else:
|
||||
yield from _chunker(arr)
|
||||
|
||||
|
||||
def api_key_not_found_help(provider):
|
||||
LOGGER.error(f"Could not find API key for {provider}.")
|
||||
raise ValueError(f"Please set the {provider.upper()}_API_KEY environment variable.")
|
||||
|
||||
@@ -26,6 +26,8 @@ import pyarrow as pa
|
||||
import pydantic
|
||||
import semver
|
||||
|
||||
from .embeddings import EmbeddingFunctionRegistry
|
||||
|
||||
PYDANTIC_VERSION = semver.Version.parse(pydantic.__version__)
|
||||
try:
|
||||
from pydantic_core import CoreSchema, core_schema
|
||||
@@ -126,7 +128,7 @@ def Vector(
|
||||
def validate(cls, v):
|
||||
if not isinstance(v, (list, range, np.ndarray)) or len(v) != dim:
|
||||
raise TypeError("A list of numbers or numpy.ndarray is needed")
|
||||
return v
|
||||
return cls(v)
|
||||
|
||||
if PYDANTIC_VERSION < (2, 0):
|
||||
|
||||
@@ -236,27 +238,18 @@ def pydantic_to_schema(model: Type[pydantic.BaseModel]) -> pa.Schema:
|
||||
>>> from typing import List, Optional
|
||||
>>> import pydantic
|
||||
>>> from lancedb.pydantic import pydantic_to_schema
|
||||
...
|
||||
>>> class InnerModel(pydantic.BaseModel):
|
||||
... a: str
|
||||
... b: Optional[float]
|
||||
>>>
|
||||
>>> class FooModel(pydantic.BaseModel):
|
||||
... id: int
|
||||
... s: Optional[str] = None
|
||||
... s: str
|
||||
... vec: List[float]
|
||||
... li: List[int]
|
||||
... inner: InnerModel
|
||||
...
|
||||
>>> schema = pydantic_to_schema(FooModel)
|
||||
>>> assert schema == pa.schema([
|
||||
... pa.field("id", pa.int64(), False),
|
||||
... pa.field("s", pa.utf8(), True),
|
||||
... pa.field("s", pa.utf8(), False),
|
||||
... pa.field("vec", pa.list_(pa.float64()), False),
|
||||
... pa.field("li", pa.list_(pa.int64()), False),
|
||||
... pa.field("inner", pa.struct([
|
||||
... pa.field("a", pa.utf8(), False),
|
||||
... pa.field("b", pa.float64(), True),
|
||||
... ]), False),
|
||||
... ])
|
||||
"""
|
||||
fields = _pydantic_model_to_fields(model)
|
||||
@@ -290,13 +283,58 @@ class LanceModel(pydantic.BaseModel):
|
||||
"""
|
||||
Get the Arrow Schema for this model.
|
||||
"""
|
||||
return pydantic_to_schema(cls)
|
||||
schema = pydantic_to_schema(cls)
|
||||
functions = cls.parse_embedding_functions()
|
||||
if len(functions) > 0:
|
||||
metadata = EmbeddingFunctionRegistry.get_instance().get_table_metadata(
|
||||
functions
|
||||
)
|
||||
schema = schema.with_metadata(metadata)
|
||||
return schema
|
||||
|
||||
@classmethod
|
||||
def field_names(cls) -> List[str]:
|
||||
"""
|
||||
Get the field names of this model.
|
||||
"""
|
||||
return list(cls.safe_get_fields().keys())
|
||||
|
||||
@classmethod
|
||||
def safe_get_fields(cls):
|
||||
if PYDANTIC_VERSION.major < 2:
|
||||
return list(cls.__fields__.keys())
|
||||
return list(cls.model_fields.keys())
|
||||
return cls.__fields__
|
||||
return cls.model_fields
|
||||
|
||||
@classmethod
|
||||
def parse_embedding_functions(cls) -> List["EmbeddingFunctionConfig"]:
|
||||
"""
|
||||
Parse the embedding functions from this model.
|
||||
"""
|
||||
from .embeddings import EmbeddingFunctionConfig
|
||||
|
||||
vec_and_function = []
|
||||
for name, field_info in cls.safe_get_fields().items():
|
||||
func = get_extras(field_info, "vector_column_for")
|
||||
if func is not None:
|
||||
vec_and_function.append([name, func])
|
||||
|
||||
configs = []
|
||||
for vec, func in vec_and_function:
|
||||
for source, field_info in cls.safe_get_fields().items():
|
||||
src_func = get_extras(field_info, "source_column_for")
|
||||
if src_func == func:
|
||||
configs.append(
|
||||
EmbeddingFunctionConfig(
|
||||
source_column=source, vector_column=vec, function=func
|
||||
)
|
||||
)
|
||||
return configs
|
||||
|
||||
|
||||
def get_extras(field_info: pydantic.fields.FieldInfo, key: str) -> Any:
|
||||
"""
|
||||
Get the extra metadata from a Pydantic FieldInfo.
|
||||
"""
|
||||
if PYDANTIC_VERSION.major >= 2:
|
||||
return (field_info.json_schema_extra or {}).get(key)
|
||||
return (field_info.field_info.extra or {}).get("json_schema_extra", {}).get(key)
|
||||
|
||||
@@ -16,10 +16,12 @@ from __future__ import annotations
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import List, Literal, Optional, Type, Union
|
||||
|
||||
import deprecation
|
||||
import numpy as np
|
||||
import pyarrow as pa
|
||||
import pydantic
|
||||
|
||||
from . import __version__
|
||||
from .common import VECTOR_COLUMN_NAME
|
||||
from .pydantic import LanceModel
|
||||
from .util import safe_import_pandas
|
||||
@@ -38,6 +40,9 @@ class Query(pydantic.BaseModel):
|
||||
# sql filter to refine the query with
|
||||
filter: Optional[str] = None
|
||||
|
||||
# if True then apply the filter before vector search
|
||||
prefilter: bool = False
|
||||
|
||||
# top k results to return
|
||||
k: int
|
||||
|
||||
@@ -60,13 +65,15 @@ class LanceQueryBuilder(ABC):
|
||||
def create(
|
||||
cls,
|
||||
table: "lancedb.table.Table",
|
||||
query: Optional[Union[np.ndarray, str]],
|
||||
query: Optional[Union[np.ndarray, str, "PIL.Image.Image"]],
|
||||
query_type: str,
|
||||
vector_column_name: str,
|
||||
) -> LanceQueryBuilder:
|
||||
if query is None:
|
||||
return LanceEmptyQueryBuilder(table)
|
||||
|
||||
# convert "auto" query_type to "vector" or "fts"
|
||||
# and convert the query to vector if needed
|
||||
query, query_type = cls._resolve_query(
|
||||
table, query, query_type, vector_column_name
|
||||
)
|
||||
@@ -90,30 +97,27 @@ class LanceQueryBuilder(ABC):
|
||||
# otherwise raise TypeError
|
||||
if query_type == "fts":
|
||||
if not isinstance(query, str):
|
||||
raise TypeError(
|
||||
f"Query type is 'fts' but query is not a string: {type(query)}"
|
||||
)
|
||||
raise TypeError(f"'fts' queries must be a string: {type(query)}")
|
||||
return query, query_type
|
||||
elif query_type == "vector":
|
||||
# If query_type is vector, then query must be a list or np.ndarray.
|
||||
# otherwise raise TypeError
|
||||
if not isinstance(query, (list, np.ndarray)):
|
||||
raise TypeError(
|
||||
f"Query type is 'vector' but query is not a list or np.ndarray: {type(query)}"
|
||||
)
|
||||
conf = table.embedding_functions.get(vector_column_name)
|
||||
if conf is not None:
|
||||
query = conf.function.compute_query_embeddings(query)[0]
|
||||
else:
|
||||
msg = f"No embedding function for {vector_column_name}"
|
||||
raise ValueError(msg)
|
||||
return query, query_type
|
||||
elif query_type == "auto":
|
||||
if isinstance(query, (list, np.ndarray)):
|
||||
return query, "vector"
|
||||
elif isinstance(query, str):
|
||||
func = table.embedding_functions.get(vector_column_name, None)
|
||||
if func is not None:
|
||||
query = func(query)[0]
|
||||
else:
|
||||
conf = table.embedding_functions.get(vector_column_name)
|
||||
if conf is not None:
|
||||
query = conf.function.compute_query_embeddings(query)[0]
|
||||
return query, "vector"
|
||||
else:
|
||||
return query, "fts"
|
||||
else:
|
||||
raise TypeError("Query must be a list, np.ndarray, or str")
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Invalid query_type, must be 'vector', 'fts', or 'auto': {query_type}"
|
||||
@@ -125,7 +129,24 @@ class LanceQueryBuilder(ABC):
|
||||
self._columns = None
|
||||
self._where = None
|
||||
|
||||
@deprecation.deprecated(
|
||||
deprecated_in="0.3.1",
|
||||
removed_in="0.4.0",
|
||||
current_version=__version__,
|
||||
details="Use the bar function instead",
|
||||
)
|
||||
def to_df(self) -> "pd.DataFrame":
|
||||
"""
|
||||
Deprecated alias for `to_pandas()`. Please use `to_pandas()` instead.
|
||||
|
||||
Execute the query and return the results as a pandas DataFrame.
|
||||
In addition to the selected columns, LanceDB also returns a vector
|
||||
and also the "_distance" column which is the distance between the query
|
||||
vector and the returned vector.
|
||||
"""
|
||||
return self.to_pandas()
|
||||
|
||||
def to_pandas(self) -> "pd.DataFrame":
|
||||
"""
|
||||
Execute the query and return the results as a pandas DataFrame.
|
||||
In addition to the selected columns, LanceDB also returns a vector
|
||||
@@ -146,6 +167,16 @@ class LanceQueryBuilder(ABC):
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def to_list(self) -> List[dict]:
|
||||
"""
|
||||
Execute the query and return the results as a list of dictionaries.
|
||||
|
||||
Each list entry is a dictionary with the selected column names as keys,
|
||||
or all table columns if `select` is not called. The vector and the "_distance"
|
||||
fields are returned whether or not they're explicitly selected.
|
||||
"""
|
||||
return self.to_arrow().to_pylist()
|
||||
|
||||
def to_pydantic(self, model: Type[LanceModel]) -> List[LanceModel]:
|
||||
"""Return the table as a list of pydantic models.
|
||||
|
||||
@@ -163,7 +194,7 @@ class LanceQueryBuilder(ABC):
|
||||
for row in self.to_arrow().to_pylist()
|
||||
]
|
||||
|
||||
def limit(self, limit: int) -> LanceVectorQueryBuilder:
|
||||
def limit(self, limit: int) -> LanceQueryBuilder:
|
||||
"""Set the maximum number of results to return.
|
||||
|
||||
Parameters
|
||||
@@ -173,13 +204,13 @@ class LanceQueryBuilder(ABC):
|
||||
|
||||
Returns
|
||||
-------
|
||||
LanceVectorQueryBuilder
|
||||
LanceQueryBuilder
|
||||
The LanceQueryBuilder object.
|
||||
"""
|
||||
self._limit = limit
|
||||
return self
|
||||
|
||||
def select(self, columns: list) -> LanceVectorQueryBuilder:
|
||||
def select(self, columns: list) -> LanceQueryBuilder:
|
||||
"""Set the columns to return.
|
||||
|
||||
Parameters
|
||||
@@ -189,13 +220,13 @@ class LanceQueryBuilder(ABC):
|
||||
|
||||
Returns
|
||||
-------
|
||||
LanceVectorQueryBuilder
|
||||
LanceQueryBuilder
|
||||
The LanceQueryBuilder object.
|
||||
"""
|
||||
self._columns = columns
|
||||
return self
|
||||
|
||||
def where(self, where: str) -> LanceVectorQueryBuilder:
|
||||
def where(self, where) -> LanceQueryBuilder:
|
||||
"""Set the where clause.
|
||||
|
||||
Parameters
|
||||
@@ -205,7 +236,7 @@ class LanceQueryBuilder(ABC):
|
||||
|
||||
Returns
|
||||
-------
|
||||
LanceVectorQueryBuilder
|
||||
LanceQueryBuilder
|
||||
The LanceQueryBuilder object.
|
||||
"""
|
||||
self._where = where
|
||||
@@ -230,7 +261,7 @@ class LanceVectorQueryBuilder(LanceQueryBuilder):
|
||||
... .where("b < 10")
|
||||
... .select(["b"])
|
||||
... .limit(2)
|
||||
... .to_df())
|
||||
... .to_pandas())
|
||||
b vector _distance
|
||||
0 6 [0.4, 0.4] 0.0
|
||||
"""
|
||||
@@ -238,7 +269,7 @@ class LanceVectorQueryBuilder(LanceQueryBuilder):
|
||||
def __init__(
|
||||
self,
|
||||
table: "lancedb.table.Table",
|
||||
query: Union[np.ndarray, list],
|
||||
query: Union[np.ndarray, list, "PIL.Image.Image"],
|
||||
vector_column: str = VECTOR_COLUMN_NAME,
|
||||
):
|
||||
super().__init__(table)
|
||||
@@ -247,6 +278,7 @@ class LanceVectorQueryBuilder(LanceQueryBuilder):
|
||||
self._nprobes = 20
|
||||
self._refine_factor = None
|
||||
self._vector_column = vector_column
|
||||
self._prefilter = False
|
||||
|
||||
def metric(self, metric: Literal["L2", "cosine"]) -> LanceVectorQueryBuilder:
|
||||
"""Set the distance metric to use.
|
||||
@@ -321,6 +353,7 @@ class LanceVectorQueryBuilder(LanceQueryBuilder):
|
||||
query = Query(
|
||||
vector=vector,
|
||||
filter=self._where,
|
||||
prefilter=self._prefilter,
|
||||
k=self._limit,
|
||||
metric=self._metric,
|
||||
columns=self._columns,
|
||||
@@ -330,6 +363,30 @@ class LanceVectorQueryBuilder(LanceQueryBuilder):
|
||||
)
|
||||
return self._table._execute_query(query)
|
||||
|
||||
def where(self, where: str, prefilter: bool = False) -> LanceVectorQueryBuilder:
|
||||
"""Set the where clause.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
where: str
|
||||
The where clause.
|
||||
prefilter: bool, default False
|
||||
If True, apply the filter before vector search, otherwise the
|
||||
filter is applied on the result of vector search.
|
||||
This feature is **EXPERIMENTAL** and may be removed and modified
|
||||
without warning in the future. Currently this is only supported
|
||||
in OSS and can only be used with a table that does not have an ANN
|
||||
index.
|
||||
|
||||
Returns
|
||||
-------
|
||||
LanceQueryBuilder
|
||||
The LanceQueryBuilder object.
|
||||
"""
|
||||
self._where = where
|
||||
self._prefilter = prefilter
|
||||
return self
|
||||
|
||||
|
||||
class LanceFtsQueryBuilder(LanceQueryBuilder):
|
||||
def __init__(self, table: "lancedb.table.Table", query: str):
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
import abc
|
||||
from typing import List, Optional
|
||||
|
||||
import attr
|
||||
import attrs
|
||||
import pyarrow as pa
|
||||
from pydantic import BaseModel
|
||||
|
||||
@@ -44,7 +44,7 @@ class VectorQuery(BaseModel):
|
||||
refine_factor: Optional[int] = None
|
||||
|
||||
|
||||
@attr.define
|
||||
@attrs.define
|
||||
class VectorQueryResult:
|
||||
# for now the response is directly seralized into a pandas dataframe
|
||||
tbl: pa.Table
|
||||
|
||||
@@ -16,7 +16,7 @@ import functools
|
||||
from typing import Any, Callable, Dict, Optional, Union
|
||||
|
||||
import aiohttp
|
||||
import attr
|
||||
import attrs
|
||||
import pyarrow as pa
|
||||
from pydantic import BaseModel
|
||||
|
||||
@@ -43,14 +43,14 @@ async def _read_ipc(resp: aiohttp.ClientResponse) -> pa.Table:
|
||||
return reader.read_all()
|
||||
|
||||
|
||||
@attr.define(slots=False)
|
||||
@attrs.define(slots=False)
|
||||
class RestfulLanceDBClient:
|
||||
db_name: str
|
||||
region: str
|
||||
api_key: Credential
|
||||
host_override: Optional[str] = attr.field(default=None)
|
||||
host_override: Optional[str] = attrs.field(default=None)
|
||||
|
||||
closed: bool = attr.field(default=False, init=False)
|
||||
closed: bool = attrs.field(default=False, init=False)
|
||||
|
||||
@functools.cached_property
|
||||
def session(self) -> aiohttp.ClientSession:
|
||||
|
||||
@@ -18,10 +18,9 @@ from urllib.parse import urlparse
|
||||
|
||||
import pyarrow as pa
|
||||
|
||||
from lancedb.common import DATA
|
||||
from lancedb.db import DBConnection
|
||||
from lancedb.table import Table, _sanitize_data
|
||||
|
||||
from ..common import DATA
|
||||
from ..db import DBConnection
|
||||
from ..table import Table, _sanitize_data
|
||||
from .arrow import to_ipc_binary
|
||||
from .client import ARROW_STREAM_CONTENT_TYPE, RestfulLanceDBClient
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
|
||||
import uuid
|
||||
from functools import cached_property
|
||||
from typing import Union
|
||||
from typing import Optional, Union
|
||||
|
||||
import pyarrow as pa
|
||||
from lance import json_to_schema
|
||||
@@ -62,6 +62,7 @@ class RemoteTable(Table):
|
||||
num_sub_vectors=96,
|
||||
vector_column_name: str = VECTOR_COLUMN_NAME,
|
||||
replace: bool = True,
|
||||
accelerator: Optional[str] = None,
|
||||
):
|
||||
raise NotImplementedError
|
||||
|
||||
@@ -98,6 +99,8 @@ class RemoteTable(Table):
|
||||
return LanceVectorQueryBuilder(self, query, vector_column_name)
|
||||
|
||||
def _execute_query(self, query: Query) -> pa.Table:
|
||||
if query.prefilter:
|
||||
raise NotImplementedError("Cloud support for prefiltering is coming soon")
|
||||
result = self._conn._client.query(self._name, query)
|
||||
return self._conn._loop.run_until_complete(result).to_arrow()
|
||||
|
||||
|
||||
@@ -16,6 +16,7 @@ from __future__ import annotations
|
||||
import inspect
|
||||
import os
|
||||
from abc import ABC, abstractmethod
|
||||
from datetime import timedelta
|
||||
from functools import cached_property
|
||||
from typing import Any, Iterable, List, Optional, Union
|
||||
|
||||
@@ -24,14 +25,16 @@ import numpy as np
|
||||
import pyarrow as pa
|
||||
import pyarrow.compute as pc
|
||||
from lance import LanceDataset
|
||||
from lance.dataset import ReaderLike
|
||||
from lance.dataset import CleanupStats, ReaderLike
|
||||
from lance.vector import vec_to_table
|
||||
|
||||
from .common import DATA, VEC, VECTOR_COLUMN_NAME
|
||||
from .embeddings import EmbeddingFunctionModel, EmbeddingFunctionRegistry
|
||||
from .embeddings import EmbeddingFunctionRegistry
|
||||
from .embeddings.functions import EmbeddingFunctionConfig
|
||||
from .pydantic import LanceModel
|
||||
from .query import LanceQueryBuilder, Query
|
||||
from .util import fs_from_uri, safe_import_pandas
|
||||
from .utils.events import register_event
|
||||
|
||||
pd = safe_import_pandas()
|
||||
|
||||
@@ -81,15 +84,16 @@ def _append_vector_col(data: pa.Table, metadata: dict, schema: Optional[pa.Schem
|
||||
vector column to the table.
|
||||
"""
|
||||
functions = EmbeddingFunctionRegistry.get_instance().parse_functions(metadata)
|
||||
for vector_col, func in functions.items():
|
||||
if vector_col not in data.column_names:
|
||||
col_data = func(data[func.source_column])
|
||||
for vector_column, conf in functions.items():
|
||||
func = conf.function
|
||||
if vector_column not in data.column_names:
|
||||
col_data = func.compute_source_embeddings(data[conf.source_column])
|
||||
if schema is not None:
|
||||
dtype = schema.field(vector_col).type
|
||||
dtype = schema.field(vector_column).type
|
||||
else:
|
||||
dtype = pa.list_(pa.float32(), len(col_data[0]))
|
||||
data = data.append_column(
|
||||
pa.field(vector_col, type=dtype), pa.array(col_data, type=dtype)
|
||||
pa.field(vector_column, type=dtype), pa.array(col_data, type=dtype)
|
||||
)
|
||||
return data
|
||||
|
||||
@@ -134,7 +138,7 @@ class Table(ABC):
|
||||
|
||||
Can query the table with [Table.search][lancedb.table.Table.search].
|
||||
|
||||
>>> table.search([0.4, 0.4]).select(["b"]).to_df()
|
||||
>>> table.search([0.4, 0.4]).select(["b"]).to_pandas()
|
||||
b vector _distance
|
||||
0 4 [0.5, 1.3] 0.82
|
||||
1 2 [1.1, 1.2] 1.13
|
||||
@@ -178,6 +182,7 @@ class Table(ABC):
|
||||
num_sub_vectors=96,
|
||||
vector_column_name: str = VECTOR_COLUMN_NAME,
|
||||
replace: bool = True,
|
||||
accelerator: Optional[str] = None,
|
||||
):
|
||||
"""Create an index on the table.
|
||||
|
||||
@@ -198,6 +203,9 @@ class Table(ABC):
|
||||
replace: bool, default True
|
||||
If True, replace the existing index if it exists.
|
||||
If False, raise an error if duplicate index exists.
|
||||
accelerator: str, default None
|
||||
If set, use the given accelerator to create the index.
|
||||
Only support "cuda" for now.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@@ -230,7 +238,7 @@ class Table(ABC):
|
||||
@abstractmethod
|
||||
def search(
|
||||
self,
|
||||
query: Optional[Union[VEC, str]] = None,
|
||||
query: Optional[Union[VEC, str, "PIL.Image.Image"]] = None,
|
||||
vector_column_name: str = VECTOR_COLUMN_NAME,
|
||||
query_type: str = "auto",
|
||||
) -> LanceQueryBuilder:
|
||||
@@ -239,7 +247,7 @@ class Table(ABC):
|
||||
|
||||
Parameters
|
||||
----------
|
||||
query: str, list, np.ndarray, default None
|
||||
query: str, list, np.ndarray, PIL.Image.Image, default None
|
||||
The query to search for. If None then
|
||||
the select/where/limit clauses are applied to filter
|
||||
the table
|
||||
@@ -249,6 +257,8 @@ class Table(ABC):
|
||||
"vector", "fts", or "auto"
|
||||
If "auto" then the query type is inferred from the query;
|
||||
If `query` is a list/np.ndarray then the query type is "vector";
|
||||
If `query` is a PIL.Image.Image then either do vector search
|
||||
or raise an error if no corresponding embedding function is found.
|
||||
If `query` is a string, then the query type is "vector" if the
|
||||
table has embedding functions else the query type is "fts"
|
||||
|
||||
@@ -386,6 +396,17 @@ class LanceTable(Table):
|
||||
raise ValueError(f"Invalid version {version}")
|
||||
self._reset_dataset(version=version)
|
||||
|
||||
try:
|
||||
# Accessing the property updates the cached value
|
||||
_ = self._dataset
|
||||
except Exception as e:
|
||||
if "not found" in str(e):
|
||||
raise ValueError(
|
||||
f"Version {version} no longer exists. Was it cleaned up?"
|
||||
)
|
||||
else:
|
||||
raise e
|
||||
|
||||
def restore(self, version: int = None):
|
||||
"""Restore a version of the table. This is an in-place operation.
|
||||
|
||||
@@ -475,6 +496,7 @@ class LanceTable(Table):
|
||||
num_sub_vectors=96,
|
||||
vector_column_name=VECTOR_COLUMN_NAME,
|
||||
replace: bool = True,
|
||||
accelerator: Optional[str] = None,
|
||||
):
|
||||
"""Create an index on the table."""
|
||||
self._dataset.create_index(
|
||||
@@ -484,8 +506,10 @@ class LanceTable(Table):
|
||||
num_partitions=num_partitions,
|
||||
num_sub_vectors=num_sub_vectors,
|
||||
replace=replace,
|
||||
accelerator=accelerator,
|
||||
)
|
||||
self._reset_dataset()
|
||||
register_event("create_index")
|
||||
|
||||
def create_fts_index(self, field_names: Union[str, List[str]]):
|
||||
"""Create a full-text search index on the table.
|
||||
@@ -504,6 +528,7 @@ class LanceTable(Table):
|
||||
field_names = [field_names]
|
||||
index = create_index(self._get_fts_index_path(), field_names)
|
||||
populate_index(index, self, field_names)
|
||||
register_event("create_fts_index")
|
||||
|
||||
def _get_fts_index_path(self):
|
||||
return os.path.join(self._dataset_uri, "_indices", "tantivy")
|
||||
@@ -524,6 +549,9 @@ class LanceTable(Table):
|
||||
fill_value: float = 0.0,
|
||||
):
|
||||
"""Add data to the table.
|
||||
If vector columns are missing and the table
|
||||
has embedding functions, then the vector columns
|
||||
are automatically computed and added.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
@@ -553,6 +581,7 @@ class LanceTable(Table):
|
||||
)
|
||||
lance.write_dataset(data, self._dataset_uri, schema=self.schema, mode=mode)
|
||||
self._reset_dataset()
|
||||
register_event("add")
|
||||
|
||||
def merge(
|
||||
self,
|
||||
@@ -616,12 +645,7 @@ class LanceTable(Table):
|
||||
other_table, left_on=left_on, right_on=right_on, schema=schema
|
||||
)
|
||||
self._reset_dataset()
|
||||
|
||||
def _get_embedding_function_for_source_col(self, column_name: str):
|
||||
for k, v in self.embedding_functions.items():
|
||||
if v.source_column == column_name:
|
||||
return v
|
||||
return None
|
||||
register_event("merge")
|
||||
|
||||
@cached_property
|
||||
def embedding_functions(self) -> dict:
|
||||
@@ -640,7 +664,7 @@ class LanceTable(Table):
|
||||
|
||||
def search(
|
||||
self,
|
||||
query: Optional[Union[VEC, str]] = None,
|
||||
query: Optional[Union[VEC, str, "PIL.Image.Image"]] = None,
|
||||
vector_column_name: str = VECTOR_COLUMN_NAME,
|
||||
query_type: str = "auto",
|
||||
) -> LanceQueryBuilder:
|
||||
@@ -649,7 +673,7 @@ class LanceTable(Table):
|
||||
|
||||
Parameters
|
||||
----------
|
||||
query: str, list, np.ndarray, or None
|
||||
query: str, list, np.ndarray, a PIL Image or None
|
||||
The query to search for. If None then
|
||||
the select/where/limit clauses are applied to filter
|
||||
the table
|
||||
@@ -658,9 +682,11 @@ class LanceTable(Table):
|
||||
query_type: str, default "auto"
|
||||
"vector", "fts", or "auto"
|
||||
If "auto" then the query type is inferred from the query;
|
||||
If the query is a list/np.ndarray then the query type is "vector";
|
||||
If `query` is a list/np.ndarray then the query type is "vector";
|
||||
If `query` is a PIL.Image.Image then either do vector search
|
||||
or raise an error if no corresponding embedding function is found.
|
||||
If the query is a string, then the query type is "vector" if the
|
||||
table has embedding functions else the query type is "fts"
|
||||
table has embedding functions, else the query type is "fts"
|
||||
|
||||
Returns
|
||||
-------
|
||||
@@ -670,6 +696,7 @@ class LanceTable(Table):
|
||||
and also the "_distance" column which is the distance between the query
|
||||
vector and the returned vector.
|
||||
"""
|
||||
register_event("search")
|
||||
return LanceQueryBuilder.create(
|
||||
self, query, query_type, vector_column_name=vector_column_name
|
||||
)
|
||||
@@ -684,7 +711,7 @@ class LanceTable(Table):
|
||||
mode="create",
|
||||
on_bad_vectors: str = "error",
|
||||
fill_value: float = 0.0,
|
||||
embedding_functions: List[EmbeddingFunctionModel] = None,
|
||||
embedding_functions: List[EmbeddingFunctionConfig] = None,
|
||||
):
|
||||
"""
|
||||
Create a new table.
|
||||
@@ -727,10 +754,16 @@ class LanceTable(Table):
|
||||
"""
|
||||
tbl = LanceTable(db, name)
|
||||
if inspect.isclass(schema) and issubclass(schema, LanceModel):
|
||||
# convert LanceModel to pyarrow schema
|
||||
# note that it's possible this contains
|
||||
# embedding function metadata already
|
||||
schema = schema.to_arrow_schema()
|
||||
|
||||
metadata = None
|
||||
if embedding_functions is not None:
|
||||
# If we passed in embedding functions explicitly
|
||||
# then we'll override any schema metadata that
|
||||
# may was implicitly specified by the LanceModel schema
|
||||
registry = EmbeddingFunctionRegistry.get_instance()
|
||||
metadata = registry.get_table_metadata(embedding_functions)
|
||||
|
||||
@@ -767,6 +800,7 @@ class LanceTable(Table):
|
||||
if data is not None:
|
||||
table.add(data)
|
||||
|
||||
register_event("create_table")
|
||||
return table
|
||||
|
||||
@classmethod
|
||||
@@ -832,12 +866,20 @@ class LanceTable(Table):
|
||||
self.delete(where)
|
||||
self.add(orig_data, mode="append")
|
||||
self._reset_dataset()
|
||||
register_event("update")
|
||||
|
||||
def _execute_query(self, query: Query) -> pa.Table:
|
||||
ds = self.to_lance()
|
||||
if query.prefilter:
|
||||
for idx in ds.list_indices():
|
||||
if query.vector_column in idx["fields"]:
|
||||
raise NotImplementedError(
|
||||
"Prefiltering for indexed vector column is coming soon."
|
||||
)
|
||||
return ds.to_table(
|
||||
columns=query.columns,
|
||||
filter=query.filter,
|
||||
prefilter=query.prefilter,
|
||||
nearest={
|
||||
"column": query.vector_column,
|
||||
"q": query.vector,
|
||||
@@ -848,6 +890,48 @@ class LanceTable(Table):
|
||||
},
|
||||
)
|
||||
|
||||
def cleanup_old_versions(
|
||||
self,
|
||||
older_than: Optional[timedelta] = None,
|
||||
*,
|
||||
delete_unverified: bool = False,
|
||||
) -> CleanupStats:
|
||||
"""
|
||||
Clean up old versions of the table, freeing disk space.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
older_than: timedelta, default None
|
||||
The minimum age of the version to delete. If None, then this defaults
|
||||
to two weeks.
|
||||
delete_unverified: bool, default False
|
||||
Because they may be part of an in-progress transaction, files newer
|
||||
than 7 days old are not deleted by default. If you are sure that
|
||||
there are no in-progress transactions, then you can set this to True
|
||||
to delete all files older than `older_than`.
|
||||
|
||||
Returns
|
||||
-------
|
||||
CleanupStats
|
||||
The stats of the cleanup operation, including how many bytes were
|
||||
freed.
|
||||
"""
|
||||
return self.to_lance().cleanup_old_versions(
|
||||
older_than, delete_unverified=delete_unverified
|
||||
)
|
||||
|
||||
def compact_files(self, *args, **kwargs):
|
||||
"""
|
||||
Run the compaction process on the table.
|
||||
|
||||
This can be run after making several small appends to optimize the table
|
||||
for faster reads.
|
||||
|
||||
Arguments are passed onto :meth:`lance.dataset.DatasetOptimizer.compact_files`.
|
||||
For most cases, the default should be fine.
|
||||
"""
|
||||
return self.to_lance().optimize.compact_files(*args, **kwargs)
|
||||
|
||||
|
||||
def _sanitize_schema(
|
||||
data: pa.Table,
|
||||
|
||||
@@ -70,7 +70,11 @@ def fs_from_uri(uri: str) -> Tuple[pa_fs.FileSystem, str]:
|
||||
Get a PyArrow FileSystem from a URI, handling extra environment variables.
|
||||
"""
|
||||
if get_uri_scheme(uri) == "s3":
|
||||
fs = pa_fs.S3FileSystem(endpoint_override=os.environ.get("AWS_ENDPOINT"))
|
||||
fs = pa_fs.S3FileSystem(
|
||||
endpoint_override=os.environ.get("AWS_ENDPOINT"),
|
||||
request_timeout=30,
|
||||
connect_timeout=30,
|
||||
)
|
||||
path = get_uri_location(uri)
|
||||
return fs, path
|
||||
|
||||
|
||||
15
python/lancedb/utils/__init__.py
Normal file
15
python/lancedb/utils/__init__.py
Normal file
@@ -0,0 +1,15 @@
|
||||
# Copyright 2023 LanceDB Developers
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from .config import Config
|
||||
|
||||
CONFIG = Config()
|
||||
116
python/lancedb/utils/config.py
Normal file
116
python/lancedb/utils/config.py
Normal file
@@ -0,0 +1,116 @@
|
||||
# Copyright 2023 LanceDB Developers
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import copy
|
||||
import hashlib
|
||||
import os
|
||||
import platform
|
||||
import uuid
|
||||
from pathlib import Path
|
||||
|
||||
from .general import LOGGER, is_dir_writeable, yaml_load, yaml_save
|
||||
|
||||
|
||||
def get_user_config_dir(sub_dir="lancedb"):
|
||||
"""
|
||||
Get the user config directory.
|
||||
|
||||
Args:
|
||||
sub_dir (str): The name of the subdirectory to create.
|
||||
|
||||
Returns:
|
||||
(Path): The path to the user config directory.
|
||||
"""
|
||||
# Return the appropriate config directory for each operating system
|
||||
if platform.system() == "Windows":
|
||||
path = Path.home() / "AppData" / "Roaming" / sub_dir
|
||||
elif platform.system() == "Darwin":
|
||||
path = Path.home() / "Library" / "Application Support" / sub_dir
|
||||
elif platform.system() == "Linux":
|
||||
path = Path.home() / ".config" / sub_dir
|
||||
else:
|
||||
raise ValueError(f"Unsupported operating system: {platform.system()}")
|
||||
|
||||
# GCP and AWS lambda fix, only /tmp is writeable
|
||||
if not is_dir_writeable(path.parent):
|
||||
LOGGER.warning(
|
||||
f"WARNING ⚠️ user config directory '{path}' is not writeable, defaulting to '/tmp' or CWD."
|
||||
"Alternatively you can define a LANCEDB_CONFIG_DIR environment variable for this path."
|
||||
)
|
||||
path = (
|
||||
Path("/tmp") / sub_dir
|
||||
if is_dir_writeable("/tmp")
|
||||
else Path().cwd() / sub_dir
|
||||
)
|
||||
|
||||
# Create the subdirectory if it does not exist
|
||||
path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
return path
|
||||
|
||||
|
||||
USER_CONFIG_DIR = Path(os.getenv("LANCEDB_CONFIG_DIR") or get_user_config_dir())
|
||||
CONFIG_FILE = USER_CONFIG_DIR / "config.yaml"
|
||||
|
||||
|
||||
class Config(dict):
|
||||
"""
|
||||
Manages lancedb config stored in a YAML file.
|
||||
|
||||
Args:
|
||||
file (str | Path): Path to the lancedb config YAML file. Default is USER_CONFIG_DIR / 'config.yaml'.
|
||||
"""
|
||||
|
||||
def __init__(self, file=CONFIG_FILE):
|
||||
self.file = Path(file)
|
||||
self.defaults = { # Default global config values
|
||||
"diagnostics": True,
|
||||
"uuid": hashlib.sha256(str(uuid.getnode()).encode()).hexdigest(),
|
||||
}
|
||||
|
||||
super().__init__(copy.deepcopy(self.defaults))
|
||||
|
||||
if not self.file.exists():
|
||||
self.save()
|
||||
|
||||
self.load()
|
||||
correct_keys = self.keys() == self.defaults.keys()
|
||||
correct_types = all(
|
||||
type(a) is type(b) for a, b in zip(self.values(), self.defaults.values())
|
||||
)
|
||||
if not (correct_keys and correct_types):
|
||||
LOGGER.warning(
|
||||
"WARNING ⚠️ LanceDB settings reset to default values. This may be due to a possible problem "
|
||||
"with your settings or a recent package update. "
|
||||
f"\nView settings & usage with 'lancedb settings' or at '{self.file}'"
|
||||
)
|
||||
self.reset()
|
||||
|
||||
def load(self):
|
||||
"""Loads settings from the YAML file."""
|
||||
super().update(yaml_load(self.file))
|
||||
|
||||
def save(self):
|
||||
"""Saves the current settings to the YAML file."""
|
||||
yaml_save(self.file, dict(self))
|
||||
|
||||
def update(self, *args, **kwargs):
|
||||
"""Updates a setting value in the current settings."""
|
||||
super().update(*args, **kwargs)
|
||||
self.save()
|
||||
|
||||
def reset(self):
|
||||
"""Resets the settings to default and saves them."""
|
||||
self.clear()
|
||||
self.update(self.defaults)
|
||||
self.save()
|
||||
161
python/lancedb/utils/events.py
Normal file
161
python/lancedb/utils/events.py
Normal file
@@ -0,0 +1,161 @@
|
||||
# Copyright 2023 LanceDB Developers
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import datetime
|
||||
import importlib.metadata
|
||||
import platform
|
||||
import random
|
||||
import sys
|
||||
import time
|
||||
|
||||
from lancedb.utils import CONFIG
|
||||
from lancedb.utils.general import TryExcept
|
||||
|
||||
from .general import (
|
||||
PLATFORMS,
|
||||
get_git_origin_url,
|
||||
is_git_dir,
|
||||
is_github_actions_ci,
|
||||
is_online,
|
||||
is_pip_package,
|
||||
is_pytest_running,
|
||||
threaded_request,
|
||||
)
|
||||
|
||||
|
||||
class _Events:
|
||||
"""
|
||||
A class for collecting anonymous event analytics. Event analytics are enabled when ``diagnostics=True`` in config and
|
||||
disabled when ``diagnostics=False``.
|
||||
|
||||
You can enable or disable diagnostics by running ``lancedb diagnostics --enabled`` or ``lancedb diagnostics --disabled``.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
url : str
|
||||
The URL to send anonymous events.
|
||||
rate_limit : float
|
||||
The rate limit in seconds for sending events.
|
||||
metadata : dict
|
||||
A dictionary containing metadata about the environment.
|
||||
enabled : bool
|
||||
A flag to enable or disable Events based on certain conditions.
|
||||
"""
|
||||
|
||||
_instance = None
|
||||
|
||||
url = "https://app.posthog.com/capture/"
|
||||
headers = {"Content-Type": "application/json"}
|
||||
api_key = "phc_oENDjGgHtmIDrV6puUiFem2RB4JA8gGWulfdulmMdZP"
|
||||
# This api-key is write only and is safe to expose in the codebase.
|
||||
|
||||
def __init__(self):
|
||||
"""
|
||||
Initializes the Events object with default values for events, rate_limit, and metadata.
|
||||
"""
|
||||
self.events = [] # events list
|
||||
self.max_events = 25 # max events to store in memory
|
||||
self.rate_limit = 60.0 # rate limit (seconds)
|
||||
self.time = 0.0
|
||||
|
||||
if is_git_dir():
|
||||
install = "git"
|
||||
elif is_pip_package():
|
||||
install = "pip"
|
||||
else:
|
||||
install = "other"
|
||||
self.metadata = {
|
||||
"cli": sys.argv[0],
|
||||
"install": install,
|
||||
"python": ".".join(platform.python_version_tuple()[:2]),
|
||||
"version": importlib.metadata.version("lancedb"),
|
||||
"platforms": PLATFORMS,
|
||||
"session_id": round(random.random() * 1e15),
|
||||
# 'engagement_time_msec': 1000 # TODO: In future we might be interested in this metric
|
||||
}
|
||||
|
||||
TESTS_RUNNING = is_pytest_running() or is_github_actions_ci()
|
||||
ONLINE = is_online()
|
||||
self.enabled = (
|
||||
CONFIG["diagnostics"]
|
||||
and not TESTS_RUNNING
|
||||
and ONLINE
|
||||
and (
|
||||
is_pip_package()
|
||||
or get_git_origin_url() == "https://github.com/lancedb/lancedb.git"
|
||||
)
|
||||
)
|
||||
|
||||
def __call__(self, event_name, params={}):
|
||||
"""
|
||||
Attempts to add a new event to the events list and send events if the rate limit is reached.
|
||||
|
||||
Args
|
||||
----
|
||||
event_name : str
|
||||
The name of the event to be logged.
|
||||
params : dict, optional
|
||||
A dictionary of additional parameters to be logged with the event.
|
||||
"""
|
||||
### NOTE: We might need a way to tag a session with a label to check usage from a source. Setting label should be exposed to the user.
|
||||
if not self.enabled:
|
||||
return
|
||||
if (
|
||||
len(self.events) < self.max_events
|
||||
): # Events list limited to 25 events (drop any events past this)
|
||||
params.update(self.metadata)
|
||||
self.events.append(
|
||||
{
|
||||
"event": event_name,
|
||||
"properties": params,
|
||||
"timestamp": datetime.datetime.now(
|
||||
tz=datetime.timezone.utc
|
||||
).isoformat(),
|
||||
"distinct_id": CONFIG["uuid"],
|
||||
}
|
||||
)
|
||||
|
||||
# Check rate limit
|
||||
t = time.time()
|
||||
if (t - self.time) < self.rate_limit:
|
||||
return
|
||||
# Time is over rate limiter, send now
|
||||
data = {
|
||||
"api_key": self.api_key,
|
||||
"distinct_id": CONFIG["uuid"], # posthog needs this to accepts the event
|
||||
"batch": self.events,
|
||||
}
|
||||
|
||||
# POST equivalent to requests.post(self.url, json=data).
|
||||
# threaded request is used to avoid blocking, retries are disabled, and verbose is disabled
|
||||
# to avoid any possible disruption in the console.
|
||||
threaded_request(
|
||||
method="post",
|
||||
url=self.url,
|
||||
headers=self.headers,
|
||||
json=data,
|
||||
retry=0,
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
# Flush & Reset
|
||||
self.events = []
|
||||
self.time = t
|
||||
|
||||
|
||||
@TryExcept(verbose=False)
|
||||
def register_event(name: str, **kwargs):
|
||||
if _Events._instance is None:
|
||||
_Events._instance = _Events()
|
||||
|
||||
_Events._instance(name, **kwargs)
|
||||
445
python/lancedb/utils/general.py
Normal file
445
python/lancedb/utils/general.py
Normal file
@@ -0,0 +1,445 @@
|
||||
# Copyright 2023 LanceDB Developers
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import contextlib
|
||||
import importlib
|
||||
import logging.config
|
||||
import os
|
||||
import platform
|
||||
import subprocess
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Union
|
||||
|
||||
import requests
|
||||
import yaml
|
||||
|
||||
LOGGING_NAME = "lancedb"
|
||||
VERBOSE = (
|
||||
str(os.getenv("LANCEDB_VERBOSE", True)).lower() == "true"
|
||||
) # global verbose mode
|
||||
|
||||
|
||||
def set_logging(name=LOGGING_NAME, verbose=True):
|
||||
"""Sets up logging for the given name.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
name : str, optional
|
||||
The name of the logger. Default is 'lancedb'.
|
||||
verbose : bool, optional
|
||||
Whether to enable verbose logging. Default is True.
|
||||
"""
|
||||
|
||||
rank = int(os.getenv("RANK", -1)) # rank in world for Multi-GPU trainings
|
||||
level = logging.INFO if verbose and rank in {-1, 0} else logging.ERROR
|
||||
logging.config.dictConfig(
|
||||
{
|
||||
"version": 1,
|
||||
"disable_existing_loggers": False,
|
||||
"formatters": {name: {"format": "%(message)s"}},
|
||||
"handlers": {
|
||||
name: {
|
||||
"class": "logging.StreamHandler",
|
||||
"formatter": name,
|
||||
"level": level,
|
||||
}
|
||||
},
|
||||
"loggers": {name: {"level": level, "handlers": [name], "propagate": False}},
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
set_logging(LOGGING_NAME, verbose=VERBOSE)
|
||||
LOGGER = logging.getLogger(LOGGING_NAME)
|
||||
|
||||
|
||||
def is_pip_package(filepath: str = __name__) -> bool:
|
||||
"""Determines if the file at the given filepath is part of a pip package.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
filepath : str, optional
|
||||
The filepath to check. Default is the current file.
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True if the file is part of a pip package, False otherwise.
|
||||
"""
|
||||
# Get the spec for the module
|
||||
spec = importlib.util.find_spec(filepath)
|
||||
|
||||
# Return whether the spec is not None and the origin is not None (indicating it is a package)
|
||||
return spec is not None and spec.origin is not None
|
||||
|
||||
|
||||
def is_pytest_running():
|
||||
"""Determines whether pytest is currently running or not.
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True if pytest is running, False otherwise.
|
||||
"""
|
||||
return (
|
||||
("PYTEST_CURRENT_TEST" in os.environ)
|
||||
or ("pytest" in sys.modules)
|
||||
or ("pytest" in Path(sys.argv[0]).stem)
|
||||
)
|
||||
|
||||
|
||||
def is_github_actions_ci() -> bool:
|
||||
"""
|
||||
Determine if the current environment is a GitHub Actions CI Python runner.
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True if the current environment is a GitHub Actions CI Python runner, False otherwise.
|
||||
"""
|
||||
|
||||
return (
|
||||
"GITHUB_ACTIONS" in os.environ
|
||||
and "RUNNER_OS" in os.environ
|
||||
and "RUNNER_TOOL_CACHE" in os.environ
|
||||
)
|
||||
|
||||
|
||||
def is_git_dir():
|
||||
"""
|
||||
Determines whether the current file is part of a git repository.
|
||||
If the current file is not part of a git repository, returns None.
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True if current file is part of a git repository.
|
||||
"""
|
||||
return get_git_dir() is not None
|
||||
|
||||
|
||||
def is_online() -> bool:
|
||||
"""
|
||||
Check internet connectivity by attempting to connect to a known online host.
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True if connection is successful, False otherwise.
|
||||
"""
|
||||
import socket
|
||||
|
||||
for host in "1.1.1.1", "8.8.8.8", "223.5.5.5": # Cloudflare, Google, AliDNS:
|
||||
try:
|
||||
test_connection = socket.create_connection(address=(host, 53), timeout=2)
|
||||
except (socket.timeout, socket.gaierror, OSError):
|
||||
continue
|
||||
else:
|
||||
# If the connection was successful, close it to avoid a ResourceWarning
|
||||
test_connection.close()
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def is_dir_writeable(dir_path: Union[str, Path]) -> bool:
|
||||
"""Check if a directory is writeable.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
dir_path : Union[str, Path]
|
||||
The path to the directory.
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True if the directory is writeable, False otherwise.
|
||||
"""
|
||||
return os.access(str(dir_path), os.W_OK)
|
||||
|
||||
|
||||
def is_colab():
|
||||
"""Check if the current script is running inside a Google Colab notebook.
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True if running inside a Colab notebook, False otherwise.
|
||||
"""
|
||||
return "COLAB_RELEASE_TAG" in os.environ or "COLAB_BACKEND_VERSION" in os.environ
|
||||
|
||||
|
||||
def is_kaggle():
|
||||
"""Check if the current script is running inside a Kaggle kernel.
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True if running inside a Kaggle kernel, False otherwise.
|
||||
"""
|
||||
return (
|
||||
os.environ.get("PWD") == "/kaggle/working"
|
||||
and os.environ.get("KAGGLE_URL_BASE") == "https://www.kaggle.com"
|
||||
)
|
||||
|
||||
|
||||
def is_jupyter():
|
||||
"""Check if the current script is running inside a Jupyter Notebook.
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True if running inside a Jupyter Notebook, False otherwise.
|
||||
"""
|
||||
with contextlib.suppress(Exception):
|
||||
from IPython import get_ipython
|
||||
|
||||
return get_ipython() is not None
|
||||
return False
|
||||
|
||||
|
||||
def is_docker() -> bool:
|
||||
"""Determine if the script is running inside a Docker container.
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True if the script is running inside a Docker container, False otherwise.
|
||||
"""
|
||||
file = Path("/proc/self/cgroup")
|
||||
if file.exists():
|
||||
with open(file) as f:
|
||||
return "docker" in f.read()
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def get_git_dir():
|
||||
"""Determine whether the current file is part of a git repository and if so, returns the repository root directory.
|
||||
If the current file is not part of a git repository, returns None.
|
||||
|
||||
Returns
|
||||
-------
|
||||
Path | None
|
||||
Git root directory if found or None if not found.
|
||||
"""
|
||||
for d in Path(__file__).parents:
|
||||
if (d / ".git").is_dir():
|
||||
return d
|
||||
|
||||
|
||||
def get_git_origin_url():
|
||||
"""Retrieve the origin URL of a git repository.
|
||||
|
||||
Returns
|
||||
-------
|
||||
str | None
|
||||
The origin URL of the git repository or None if not git directory.
|
||||
"""
|
||||
if is_git_dir():
|
||||
with contextlib.suppress(subprocess.CalledProcessError):
|
||||
origin = subprocess.check_output(
|
||||
["git", "config", "--get", "remote.origin.url"]
|
||||
)
|
||||
return origin.decode().strip()
|
||||
|
||||
|
||||
def yaml_save(file="data.yaml", data=None, header=""):
|
||||
"""Save YAML data to a file.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
file : str, optional
|
||||
File name, by default 'data.yaml'.
|
||||
data : dict, optional
|
||||
Data to save in YAML format, by default None.
|
||||
header : str, optional
|
||||
YAML header to add, by default "".
|
||||
"""
|
||||
if data is None:
|
||||
data = {}
|
||||
file = Path(file)
|
||||
if not file.parent.exists():
|
||||
# Create parent directories if they don't exist
|
||||
file.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Convert Path objects to strings
|
||||
for k, v in data.items():
|
||||
if isinstance(v, Path):
|
||||
data[k] = str(v)
|
||||
|
||||
# Dump data to file in YAML format
|
||||
with open(file, "w", errors="ignore", encoding="utf-8") as f:
|
||||
if header:
|
||||
f.write(header)
|
||||
yaml.safe_dump(data, f, sort_keys=False, allow_unicode=True)
|
||||
|
||||
|
||||
def yaml_load(file="data.yaml", append_filename=False):
|
||||
"""
|
||||
Load YAML data from a file.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
file : str, optional
|
||||
File name. Default is 'data.yaml'.
|
||||
append_filename : bool, optional
|
||||
Add the YAML filename to the YAML dictionary. Default is False.
|
||||
|
||||
Returns
|
||||
-------
|
||||
dict
|
||||
YAML data and file name.
|
||||
"""
|
||||
assert Path(file).suffix in (
|
||||
".yaml",
|
||||
".yml",
|
||||
), f"Attempting to load non-YAML file {file} with yaml_load()"
|
||||
with open(file, errors="ignore", encoding="utf-8") as f:
|
||||
s = f.read() # string
|
||||
|
||||
# Add YAML filename to dict and return
|
||||
data = (
|
||||
yaml.safe_load(s) or {}
|
||||
) # always return a dict (yaml.safe_load() may return None for empty files)
|
||||
if append_filename:
|
||||
data["yaml_file"] = str(file)
|
||||
return data
|
||||
|
||||
|
||||
def yaml_print(yaml_file: Union[str, Path, dict]) -> None:
|
||||
"""
|
||||
Pretty prints a YAML file or a YAML-formatted dictionary.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
yaml_file : Union[str, Path, dict]
|
||||
The file path of the YAML file or a YAML-formatted dictionary.
|
||||
|
||||
Returns
|
||||
-------
|
||||
None
|
||||
"""
|
||||
yaml_dict = (
|
||||
yaml_load(yaml_file) if isinstance(yaml_file, (str, Path)) else yaml_file
|
||||
)
|
||||
dump = yaml.dump(yaml_dict, sort_keys=False, allow_unicode=True)
|
||||
LOGGER.info(f"Printing '{yaml_file}'\n\n{dump}")
|
||||
|
||||
|
||||
PLATFORMS = [platform.system()]
|
||||
if is_colab():
|
||||
PLATFORMS.append("Colab")
|
||||
if is_kaggle():
|
||||
PLATFORMS.append("Kaggle")
|
||||
if is_jupyter():
|
||||
PLATFORMS.append("Jupyter")
|
||||
if is_docker():
|
||||
PLATFORMS.append("Docker")
|
||||
|
||||
PLATFORMS = "|".join(PLATFORMS)
|
||||
|
||||
|
||||
class TryExcept(contextlib.ContextDecorator):
|
||||
"""
|
||||
TryExcept context manager.
|
||||
Usage: @TryExcept() decorator or 'with TryExcept():' context manager.
|
||||
"""
|
||||
|
||||
def __init__(self, msg="", verbose=True):
|
||||
"""
|
||||
Parameters
|
||||
----------
|
||||
msg : str, optional
|
||||
Custom message to display in case of exception, by default "".
|
||||
verbose : bool, optional
|
||||
Whether to display the message, by default True.
|
||||
"""
|
||||
self.msg = msg
|
||||
self.verbose = verbose
|
||||
|
||||
def __enter__(self):
|
||||
pass
|
||||
|
||||
def __exit__(self, exc_type, value, traceback):
|
||||
if self.verbose and value:
|
||||
LOGGER.info(f"{self.msg}{': ' if self.msg else ''}{value}")
|
||||
return True
|
||||
|
||||
|
||||
def threaded_request(
|
||||
method, url, retry=3, timeout=30, thread=True, code=-1, verbose=True, **kwargs
|
||||
):
|
||||
"""
|
||||
Makes an HTTP request using the 'requests' library, with exponential backoff retries up to a specified timeout.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
method : str
|
||||
The HTTP method to use for the request. Choices are 'post' and 'get'.
|
||||
url : str
|
||||
The URL to make the request to.
|
||||
retry : int, optional
|
||||
Number of retries to attempt before giving up, by default 3.
|
||||
timeout : int, optional
|
||||
Timeout in seconds after which the function will give up retrying, by default 30.
|
||||
thread : bool, optional
|
||||
Whether to execute the request in a separate daemon thread, by default True.
|
||||
code : int, optional
|
||||
An identifier for the request, used for logging purposes, by default -1.
|
||||
verbose : bool, optional
|
||||
A flag to determine whether to print out to console or not, by default True.
|
||||
|
||||
Returns
|
||||
-------
|
||||
requests.Response
|
||||
The HTTP response object. If the request is executed in a separate thread, returns the thread itself.
|
||||
"""
|
||||
retry_codes = () # retry only these codes TODO: add codes if needed in future (500, 408)
|
||||
|
||||
@TryExcept(verbose=verbose)
|
||||
def func(method, url, **kwargs):
|
||||
"""Make HTTP requests with retries and timeouts, with optional progress tracking."""
|
||||
response = None
|
||||
t0 = time.time()
|
||||
for i in range(retry + 1):
|
||||
if (time.time() - t0) > timeout:
|
||||
break
|
||||
response = requests.request(method, url, **kwargs)
|
||||
if response.status_code < 300: # good return codes in the 2xx range
|
||||
break
|
||||
try:
|
||||
m = response.json().get("message", "No JSON message.")
|
||||
except AttributeError:
|
||||
m = "Unable to read JSON."
|
||||
if i == 0:
|
||||
if response.status_code in retry_codes:
|
||||
m += f" Retrying {retry}x for {timeout}s." if retry else ""
|
||||
elif response.status_code == 429: # rate limit
|
||||
m = f"Rate limit reached"
|
||||
if verbose:
|
||||
LOGGER.warning(f"{response.status_code} #{code}")
|
||||
if response.status_code not in retry_codes:
|
||||
return response
|
||||
time.sleep(2**i) # exponential standoff
|
||||
return response
|
||||
|
||||
args = method, url
|
||||
if thread:
|
||||
return threading.Thread(
|
||||
target=func, args=args, kwargs=kwargs, daemon=True
|
||||
).start()
|
||||
else:
|
||||
return func(*args, **kwargs)
|
||||
112
python/lancedb/utils/sentry_log.py
Normal file
112
python/lancedb/utils/sentry_log.py
Normal file
@@ -0,0 +1,112 @@
|
||||
# Copyright 2023 LanceDB Developers
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import bdb
|
||||
import importlib.metadata
|
||||
import logging
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
from lancedb.utils import CONFIG
|
||||
|
||||
from .general import (
|
||||
PLATFORMS,
|
||||
TryExcept,
|
||||
is_git_dir,
|
||||
is_github_actions_ci,
|
||||
is_online,
|
||||
is_pip_package,
|
||||
is_pytest_running,
|
||||
)
|
||||
|
||||
|
||||
@TryExcept(verbose=False)
|
||||
def set_sentry():
|
||||
"""
|
||||
Initialize the Sentry SDK for error tracking and reporting. Only used if sentry_sdk package is installed and
|
||||
sync=True in settings. Run 'lancedb settings' to see and update settings YAML file.
|
||||
|
||||
Conditions required to send errors (ALL conditions must be met or no errors will be reported):
|
||||
- sentry_sdk package is installed
|
||||
- sync=True in settings
|
||||
- pytest is not running
|
||||
- running in a pip package installation
|
||||
- running in a non-git directory
|
||||
- online environment
|
||||
|
||||
The function also configures Sentry SDK to ignore KeyboardInterrupt and FileNotFoundError
|
||||
exceptions for now.
|
||||
|
||||
Additionally, the function sets custom tags and user information for Sentry events.
|
||||
"""
|
||||
|
||||
def before_send(event, hint):
|
||||
"""
|
||||
Modify the event before sending it to Sentry based on specific exception types and messages.
|
||||
|
||||
Args:
|
||||
event (dict): The event dictionary containing information about the error.
|
||||
hint (dict): A dictionary containing additional information about the error.
|
||||
|
||||
Returns:
|
||||
dict: The modified event or None if the event should not be sent to Sentry.
|
||||
"""
|
||||
if "exc_info" in hint:
|
||||
exc_type, exc_value, tb = hint["exc_info"]
|
||||
if "out of memory" in str(exc_value).lower():
|
||||
return None
|
||||
|
||||
if is_git_dir():
|
||||
install = "git"
|
||||
elif is_pip_package():
|
||||
install = "pip"
|
||||
else:
|
||||
install = "other"
|
||||
|
||||
event["tags"] = {
|
||||
"sys_argv": sys.argv[0],
|
||||
"sys_argv_name": Path(sys.argv[0]).name,
|
||||
"install": install,
|
||||
"platforms": PLATFORMS,
|
||||
"version": importlib.metadata.version("lancedb"),
|
||||
}
|
||||
return event
|
||||
|
||||
TESTS_RUNNING = is_pytest_running() or is_github_actions_ci()
|
||||
ONLINE = is_online()
|
||||
if CONFIG["diagnostics"] and not TESTS_RUNNING and ONLINE and is_pip_package():
|
||||
# and not is_git_dir(): # not running inside a git dir. Maybe too restrictive?
|
||||
|
||||
# If sentry_sdk package is not installed then return and do not use Sentry
|
||||
try:
|
||||
import sentry_sdk # noqa
|
||||
except ImportError:
|
||||
return
|
||||
|
||||
sentry_sdk.init(
|
||||
dsn="https://c63ef8c64e05d1aa1a96513361f3ca2f@o4505950840946688.ingest.sentry.io/4505950933614592",
|
||||
debug=False,
|
||||
include_local_variables=False,
|
||||
traces_sample_rate=1.0,
|
||||
environment="production", # 'dev' or 'production'
|
||||
before_send=before_send,
|
||||
ignore_errors=[KeyboardInterrupt, FileNotFoundError, bdb.BdbQuit],
|
||||
)
|
||||
sentry_sdk.set_user({"id": CONFIG["uuid"]}) # SHA-256 anonymized UUID hash
|
||||
|
||||
# Disable all sentry logging
|
||||
for logger in "sentry_sdk", "sentry_sdk.errors":
|
||||
logging.getLogger(logger).setLevel(logging.CRITICAL)
|
||||
|
||||
|
||||
set_sentry()
|
||||
@@ -1,16 +1,20 @@
|
||||
[project]
|
||||
name = "lancedb"
|
||||
version = "0.2.4"
|
||||
version = "0.3.1"
|
||||
dependencies = [
|
||||
"pylance==0.7.4",
|
||||
"ratelimiter",
|
||||
"retry",
|
||||
"tqdm",
|
||||
"deprecation",
|
||||
"pylance==0.8.3",
|
||||
"ratelimiter~=1.0",
|
||||
"retry>=0.9.2",
|
||||
"tqdm>=4.1.0",
|
||||
"aiohttp",
|
||||
"pydantic",
|
||||
"attr",
|
||||
"pydantic>=1.10",
|
||||
"attrs>=21.3.0",
|
||||
"semver>=3.0",
|
||||
"cachetools"
|
||||
"cachetools",
|
||||
"pyyaml>=6.0",
|
||||
"click>=8.1.7",
|
||||
"requests>=2.31.0"
|
||||
]
|
||||
description = "lancedb"
|
||||
authors = [{ name = "LanceDB Devs", email = "dev@lancedb.com" }]
|
||||
@@ -44,9 +48,14 @@ classifiers = [
|
||||
repository = "https://github.com/lancedb/lancedb"
|
||||
|
||||
[project.optional-dependencies]
|
||||
tests = ["pandas>=1.4", "pytest", "pytest-mock", "pytest-asyncio"]
|
||||
tests = ["pandas>=1.4", "pytest", "pytest-mock", "pytest-asyncio", "requests"]
|
||||
dev = ["ruff", "pre-commit", "black"]
|
||||
docs = ["mkdocs", "mkdocs-jupyter", "mkdocs-material", "mkdocstrings[python]"]
|
||||
clip = ["torch", "pillow", "open-clip"]
|
||||
embeddings = ["openai", "sentence-transformers", "torch", "pillow", "open-clip", "cohere"]
|
||||
|
||||
[project.scripts]
|
||||
lancedb = "lancedb.cli.cli:cli"
|
||||
|
||||
[build-system]
|
||||
requires = ["setuptools", "wheel"]
|
||||
@@ -54,3 +63,10 @@ build-backend = "setuptools.build_meta"
|
||||
|
||||
[tool.isort]
|
||||
profile = "black"
|
||||
|
||||
[tool.pytest.ini_options]
|
||||
addopts = "--strict-markers"
|
||||
markers = [
|
||||
"slow: marks tests as slow (deselect with '-m \"not slow\"')",
|
||||
"asyncio"
|
||||
]
|
||||
|
||||
35
python/tests/test_cli.py
Normal file
35
python/tests/test_cli.py
Normal file
@@ -0,0 +1,35 @@
|
||||
from click.testing import CliRunner
|
||||
|
||||
from lancedb.cli.cli import cli
|
||||
from lancedb.utils import CONFIG
|
||||
|
||||
|
||||
def test_entry():
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(cli)
|
||||
assert result.exit_code == 0 # Main check
|
||||
assert "lancedb" in result.output.lower() # lazy check
|
||||
|
||||
|
||||
def test_diagnostics():
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(cli, ["diagnostics", "--disabled"])
|
||||
assert result.exit_code == 0 # Main check
|
||||
assert CONFIG["diagnostics"] == False
|
||||
|
||||
result = runner.invoke(cli, ["diagnostics", "--enabled"])
|
||||
assert result.exit_code == 0 # Main check
|
||||
assert CONFIG["diagnostics"] == True
|
||||
|
||||
|
||||
def test_config():
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(cli, ["config"])
|
||||
assert result.exit_code == 0 # Main check
|
||||
cfg = CONFIG.copy()
|
||||
cfg.pop("uuid")
|
||||
for (
|
||||
item,
|
||||
_,
|
||||
) in cfg.items(): # check for keys only as formatting is subject to change
|
||||
assert item in result.output
|
||||
@@ -47,7 +47,7 @@ def test_contextualizer(raw_df: pd.DataFrame):
|
||||
.stride(3)
|
||||
.text_col("token")
|
||||
.groupby("document_id")
|
||||
.to_df()["token"]
|
||||
.to_pandas()["token"]
|
||||
.to_list()
|
||||
)
|
||||
|
||||
@@ -67,7 +67,7 @@ def test_contextualizer_with_threshold(raw_df: pd.DataFrame):
|
||||
.text_col("token")
|
||||
.groupby("document_id")
|
||||
.min_window_size(4)
|
||||
.to_df()["token"]
|
||||
.to_pandas()["token"]
|
||||
.to_list()
|
||||
)
|
||||
|
||||
|
||||
@@ -33,11 +33,11 @@ def test_basic(tmp_path):
|
||||
{"vector": [5.9, 26.5], "item": "bar", "price": 20.0},
|
||||
],
|
||||
)
|
||||
rs = table.search([100, 100]).limit(1).to_df()
|
||||
rs = table.search([100, 100]).limit(1).to_pandas()
|
||||
assert len(rs) == 1
|
||||
assert rs["item"].iloc[0] == "bar"
|
||||
|
||||
rs = table.search([100, 100]).where("price < 15").limit(2).to_df()
|
||||
rs = table.search([100, 100]).where("price < 15").limit(2).to_pandas()
|
||||
assert len(rs) == 1
|
||||
assert rs["item"].iloc[0] == "foo"
|
||||
|
||||
@@ -62,11 +62,11 @@ def test_ingest_pd(tmp_path):
|
||||
}
|
||||
)
|
||||
table = db.create_table("test", data=data)
|
||||
rs = table.search([100, 100]).limit(1).to_df()
|
||||
rs = table.search([100, 100]).limit(1).to_pandas()
|
||||
assert len(rs) == 1
|
||||
assert rs["item"].iloc[0] == "bar"
|
||||
|
||||
rs = table.search([100, 100]).where("price < 15").limit(2).to_df()
|
||||
rs = table.search([100, 100]).where("price < 15").limit(2).to_pandas()
|
||||
assert len(rs) == 1
|
||||
assert rs["item"].iloc[0] == "foo"
|
||||
|
||||
@@ -136,11 +136,9 @@ def test_ingest_iterator(tmp_path):
|
||||
def run_tests(schema):
|
||||
db = lancedb.connect(tmp_path)
|
||||
tbl = db.create_table("table2", make_batches(), schema=schema, mode="overwrite")
|
||||
|
||||
tbl.to_pandas()
|
||||
assert tbl.search([3.1, 4.1]).limit(1).to_df()["_distance"][0] == 0.0
|
||||
assert tbl.search([5.9, 26.5]).limit(1).to_df()["_distance"][0] == 0.0
|
||||
|
||||
assert tbl.search([3.1, 4.1]).limit(1).to_pandas()["_distance"][0] == 0.0
|
||||
assert tbl.search([5.9, 26.5]).limit(1).to_pandas()["_distance"][0] == 0.0
|
||||
tbl_len = len(tbl)
|
||||
tbl.add(make_batches())
|
||||
assert tbl_len == 50
|
||||
|
||||
@@ -23,5 +23,5 @@ from lancedb import LanceDBConnection
|
||||
def test_against_local_server():
|
||||
conn = LanceDBConnection("lancedb+http://localhost:10024")
|
||||
table = conn.open_table("sift1m_ivf1024_pq16")
|
||||
df = table.search(np.random.rand(128)).to_df()
|
||||
df = table.search(np.random.rand(128)).to_pandas()
|
||||
assert len(df) == 10
|
||||
|
||||
@@ -16,8 +16,12 @@ import lance
|
||||
import numpy as np
|
||||
import pyarrow as pa
|
||||
|
||||
from lancedb.conftest import MockEmbeddingFunction
|
||||
from lancedb.embeddings import EmbeddingFunctionRegistry, with_embeddings
|
||||
from lancedb.conftest import MockTextEmbeddingFunction
|
||||
from lancedb.embeddings import (
|
||||
EmbeddingFunctionConfig,
|
||||
EmbeddingFunctionRegistry,
|
||||
with_embeddings,
|
||||
)
|
||||
|
||||
|
||||
def mock_embed_func(input_data):
|
||||
@@ -54,8 +58,12 @@ def test_embedding_function(tmp_path):
|
||||
"vector": [np.random.randn(10), np.random.randn(10)],
|
||||
}
|
||||
)
|
||||
func = MockEmbeddingFunction(source_column="text", vector_column="vector")
|
||||
metadata = registry.get_table_metadata([func])
|
||||
conf = EmbeddingFunctionConfig(
|
||||
source_column="text",
|
||||
vector_column="vector",
|
||||
function=MockTextEmbeddingFunction(),
|
||||
)
|
||||
metadata = registry.get_table_metadata([conf])
|
||||
table = table.replace_schema_metadata(metadata)
|
||||
|
||||
# Write it to disk
|
||||
@@ -65,14 +73,13 @@ def test_embedding_function(tmp_path):
|
||||
ds = lance.dataset(tmp_path / "test.lance")
|
||||
|
||||
# can we get the serialized version back out?
|
||||
functions = registry.parse_functions(ds.schema.metadata)
|
||||
configs = registry.parse_functions(ds.schema.metadata)
|
||||
|
||||
func = functions["vector"]
|
||||
actual = func("hello world")
|
||||
conf = configs["vector"]
|
||||
func = conf.function
|
||||
actual = func.compute_query_embeddings("hello world")
|
||||
|
||||
# We create an instance
|
||||
expected_func = MockEmbeddingFunction(source_column="text", vector_column="vector")
|
||||
# And we make sure we can call it
|
||||
expected = expected_func("hello world")
|
||||
expected = func.compute_query_embeddings("hello world")
|
||||
|
||||
assert np.allclose(actual, expected)
|
||||
|
||||
149
python/tests/test_embeddings_slow.py
Normal file
149
python/tests/test_embeddings_slow.py
Normal file
@@ -0,0 +1,149 @@
|
||||
# Copyright (c) 2023. LanceDB Developers
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import io
|
||||
import os
|
||||
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
import pytest
|
||||
import requests
|
||||
|
||||
import lancedb
|
||||
from lancedb.embeddings import EmbeddingFunctionRegistry
|
||||
from lancedb.pydantic import LanceModel, Vector
|
||||
|
||||
# These are integration tests for embedding functions.
|
||||
# They are slow because they require downloading models
|
||||
# or connection to external api
|
||||
|
||||
|
||||
@pytest.mark.slow
|
||||
@pytest.mark.parametrize("alias", ["sentence-transformers", "openai"])
|
||||
def test_sentence_transformer(alias, tmp_path):
|
||||
db = lancedb.connect(tmp_path)
|
||||
registry = EmbeddingFunctionRegistry.get_instance()
|
||||
func = registry.get(alias).create()
|
||||
|
||||
class Words(LanceModel):
|
||||
text: str = func.SourceField()
|
||||
vector: Vector(func.ndims()) = func.VectorField()
|
||||
|
||||
table = db.create_table("words", schema=Words)
|
||||
table.add(
|
||||
pd.DataFrame(
|
||||
{
|
||||
"text": [
|
||||
"hello world",
|
||||
"goodbye world",
|
||||
"fizz",
|
||||
"buzz",
|
||||
"foo",
|
||||
"bar",
|
||||
"baz",
|
||||
]
|
||||
}
|
||||
)
|
||||
)
|
||||
|
||||
query = "greetings"
|
||||
actual = table.search(query).limit(1).to_pydantic(Words)[0]
|
||||
|
||||
vec = func.compute_query_embeddings(query)[0]
|
||||
expected = table.search(vec).limit(1).to_pydantic(Words)[0]
|
||||
assert actual.text == expected.text
|
||||
assert actual.text == "hello world"
|
||||
|
||||
|
||||
@pytest.mark.slow
|
||||
def test_openclip(tmp_path):
|
||||
from PIL import Image
|
||||
|
||||
db = lancedb.connect(tmp_path)
|
||||
registry = EmbeddingFunctionRegistry.get_instance()
|
||||
func = registry.get("open-clip").create()
|
||||
|
||||
class Images(LanceModel):
|
||||
label: str
|
||||
image_uri: str = func.SourceField()
|
||||
image_bytes: bytes = func.SourceField()
|
||||
vector: Vector(func.ndims()) = func.VectorField()
|
||||
vec_from_bytes: Vector(func.ndims()) = func.VectorField()
|
||||
|
||||
table = db.create_table("images", schema=Images)
|
||||
labels = ["cat", "cat", "dog", "dog", "horse", "horse"]
|
||||
uris = [
|
||||
"http://farm1.staticflickr.com/53/167798175_7c7845bbbd_z.jpg",
|
||||
"http://farm1.staticflickr.com/134/332220238_da527d8140_z.jpg",
|
||||
"http://farm9.staticflickr.com/8387/8602747737_2e5c2a45d4_z.jpg",
|
||||
"http://farm5.staticflickr.com/4092/5017326486_1f46057f5f_z.jpg",
|
||||
"http://farm9.staticflickr.com/8216/8434969557_d37882c42d_z.jpg",
|
||||
"http://farm6.staticflickr.com/5142/5835678453_4f3a4edb45_z.jpg",
|
||||
]
|
||||
# get each uri as bytes
|
||||
image_bytes = [requests.get(uri).content for uri in uris]
|
||||
table.add(
|
||||
pd.DataFrame({"label": labels, "image_uri": uris, "image_bytes": image_bytes})
|
||||
)
|
||||
|
||||
# text search
|
||||
actual = table.search("man's best friend").limit(1).to_pydantic(Images)[0]
|
||||
assert actual.label == "dog"
|
||||
frombytes = (
|
||||
table.search("man's best friend", vector_column_name="vec_from_bytes")
|
||||
.limit(1)
|
||||
.to_pydantic(Images)[0]
|
||||
)
|
||||
assert actual.label == frombytes.label
|
||||
assert np.allclose(actual.vector, frombytes.vector)
|
||||
|
||||
# image search
|
||||
query_image_uri = "http://farm1.staticflickr.com/200/467715466_ed4a31801f_z.jpg"
|
||||
image_bytes = requests.get(query_image_uri).content
|
||||
query_image = Image.open(io.BytesIO(image_bytes))
|
||||
actual = table.search(query_image).limit(1).to_pydantic(Images)[0]
|
||||
assert actual.label == "dog"
|
||||
other = (
|
||||
table.search(query_image, vector_column_name="vec_from_bytes")
|
||||
.limit(1)
|
||||
.to_pydantic(Images)[0]
|
||||
)
|
||||
assert actual.label == other.label
|
||||
|
||||
arrow_table = table.search().select(["vector", "vec_from_bytes"]).to_arrow()
|
||||
assert np.allclose(
|
||||
arrow_table["vector"].combine_chunks().values.to_numpy(),
|
||||
arrow_table["vec_from_bytes"].combine_chunks().values.to_numpy(),
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.slow
|
||||
@pytest.mark.skipif(
|
||||
os.environ.get("COHERE_API_KEY") is None, reason="COHERE_API_KEY not set"
|
||||
) # also skip if cohere not installed
|
||||
def test_cohere_embedding_function():
|
||||
cohere = (
|
||||
EmbeddingFunctionRegistry.get_instance()
|
||||
.get("cohere")
|
||||
.create(name="embed-multilingual-v2.0")
|
||||
)
|
||||
|
||||
class TextModel(LanceModel):
|
||||
text: str = cohere.SourceField()
|
||||
vector: Vector(cohere.ndims()) = cohere.VectorField()
|
||||
|
||||
df = pd.DataFrame({"text": ["hello world", "goodbye world"]})
|
||||
db = lancedb.connect("~/lancedb")
|
||||
tbl = db.create_table("test", schema=TextModel, mode="overwrite")
|
||||
|
||||
tbl.add(df)
|
||||
assert len(tbl.to_pandas()["vector"][0]) == cohere.ndims()
|
||||
@@ -71,14 +71,14 @@ def test_search_index(tmp_path, table):
|
||||
|
||||
def test_create_index_from_table(tmp_path, table):
|
||||
table.create_fts_index("text")
|
||||
df = table.search("puppy").limit(10).select(["text"]).to_df()
|
||||
df = table.search("puppy").limit(10).select(["text"]).to_pandas()
|
||||
assert len(df) == 10
|
||||
assert "text" in df.columns
|
||||
|
||||
|
||||
def test_create_index_multiple_columns(tmp_path, table):
|
||||
table.create_fts_index(["text", "text2"])
|
||||
df = table.search("puppy").limit(10).to_df()
|
||||
df = table.search("puppy").limit(10).to_pandas()
|
||||
assert len(df) == 10
|
||||
assert "text" in df.columns
|
||||
assert "text2" in df.columns
|
||||
@@ -87,5 +87,5 @@ def test_create_index_multiple_columns(tmp_path, table):
|
||||
def test_empty_rs(tmp_path, table, mocker):
|
||||
table.create_fts_index(["text", "text2"])
|
||||
mocker.patch("lancedb.fts.search_index", return_value=([], []))
|
||||
df = table.search("puppy").limit(10).to_df()
|
||||
df = table.search("puppy").limit(10).to_pandas()
|
||||
assert len(df) == 0
|
||||
|
||||
@@ -36,11 +36,11 @@ def test_s3_io():
|
||||
{"vector": [5.9, 26.5], "item": "bar", "price": 20.0},
|
||||
],
|
||||
)
|
||||
rs = table.search([100, 100]).limit(1).to_df()
|
||||
rs = table.search([100, 100]).limit(1).to_pandas()
|
||||
assert len(rs) == 1
|
||||
assert rs["item"].iloc[0] == "bar"
|
||||
|
||||
rs = table.search([100, 100]).where("price < 15").limit(2).to_df()
|
||||
rs = table.search([100, 100]).where("price < 15").limit(2).to_pandas()
|
||||
assert len(rs) == 1
|
||||
assert rs["item"].iloc[0] == "foo"
|
||||
|
||||
|
||||
@@ -38,6 +38,7 @@ class MockTable:
|
||||
return ds.to_table(
|
||||
columns=query.columns,
|
||||
filter=query.filter,
|
||||
prefilter=query.prefilter,
|
||||
nearest={
|
||||
"column": query.vector_column,
|
||||
"q": query.vector,
|
||||
@@ -84,15 +85,37 @@ def test_cast(table):
|
||||
|
||||
|
||||
def test_query_builder(table):
|
||||
df = (
|
||||
LanceVectorQueryBuilder(table, [0, 0], "vector").limit(1).select(["id"]).to_df()
|
||||
rs = (
|
||||
LanceVectorQueryBuilder(table, [0, 0], "vector")
|
||||
.limit(1)
|
||||
.select(["id"])
|
||||
.to_list()
|
||||
)
|
||||
assert df["id"].values[0] == 1
|
||||
assert all(df["vector"].values[0] == [1, 2])
|
||||
assert rs[0]["id"] == 1
|
||||
assert all(np.array(rs[0]["vector"]) == [1, 2])
|
||||
|
||||
|
||||
def test_query_builder_with_filter(table):
|
||||
df = LanceVectorQueryBuilder(table, [0, 0], "vector").where("id = 2").to_df()
|
||||
rs = LanceVectorQueryBuilder(table, [0, 0], "vector").where("id = 2").to_list()
|
||||
assert rs[0]["id"] == 2
|
||||
assert all(np.array(rs[0]["vector"]) == [3, 4])
|
||||
|
||||
|
||||
def test_query_builder_with_prefilter(table):
|
||||
df = (
|
||||
LanceVectorQueryBuilder(table, [0, 0], "vector")
|
||||
.where("id = 2")
|
||||
.limit(1)
|
||||
.to_pandas()
|
||||
)
|
||||
assert len(df) == 0
|
||||
|
||||
df = (
|
||||
LanceVectorQueryBuilder(table, [0, 0], "vector")
|
||||
.where("id = 2", prefilter=True)
|
||||
.limit(1)
|
||||
.to_pandas()
|
||||
)
|
||||
assert df["id"].values[0] == 2
|
||||
assert all(df["vector"].values[0] == [3, 4])
|
||||
|
||||
@@ -100,9 +123,11 @@ def test_query_builder_with_filter(table):
|
||||
def test_query_builder_with_metric(table):
|
||||
query = [4, 8]
|
||||
vector_column_name = "vector"
|
||||
df_default = LanceVectorQueryBuilder(table, query, vector_column_name).to_df()
|
||||
df_default = LanceVectorQueryBuilder(table, query, vector_column_name).to_pandas()
|
||||
df_l2 = (
|
||||
LanceVectorQueryBuilder(table, query, vector_column_name).metric("L2").to_df()
|
||||
LanceVectorQueryBuilder(table, query, vector_column_name)
|
||||
.metric("L2")
|
||||
.to_pandas()
|
||||
)
|
||||
tm.assert_frame_equal(df_default, df_l2)
|
||||
|
||||
@@ -110,7 +135,7 @@ def test_query_builder_with_metric(table):
|
||||
LanceVectorQueryBuilder(table, query, vector_column_name)
|
||||
.metric("cosine")
|
||||
.limit(1)
|
||||
.to_df()
|
||||
.to_pandas()
|
||||
)
|
||||
assert df_cosine._distance[0] == pytest.approx(
|
||||
cosine_distance(query, df_cosine.vector[0]),
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import attr
|
||||
import attrs
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
import pyarrow as pa
|
||||
@@ -21,10 +21,10 @@ from aiohttp import web
|
||||
from lancedb.remote.client import RestfulLanceDBClient, VectorQuery
|
||||
|
||||
|
||||
@attr.define
|
||||
@attrs.define
|
||||
class MockLanceDBServer:
|
||||
runner: web.AppRunner = attr.field(init=False)
|
||||
site: web.TCPSite = attr.field(init=False)
|
||||
runner: web.AppRunner = attrs.field(init=False)
|
||||
site: web.TCPSite = attrs.field(init=False)
|
||||
|
||||
async def query_handler(self, request: web.Request) -> web.Response:
|
||||
table_name = request.match_info["table_name"]
|
||||
@@ -86,7 +86,7 @@ async def test_e2e_with_mock_server():
|
||||
columns=["id", "vector"],
|
||||
),
|
||||
)
|
||||
).to_df()
|
||||
).to_pandas()
|
||||
|
||||
assert "vector" in df.columns
|
||||
assert "id" in df.columns
|
||||
|
||||
@@ -32,4 +32,4 @@ def test_remote_db():
|
||||
setattr(conn, "_client", FakeLanceDBClient())
|
||||
|
||||
table = conn["test"]
|
||||
table.search([1.0, 2.0]).to_df()
|
||||
table.search([1.0, 2.0]).to_pandas()
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
import functools
|
||||
from datetime import timedelta
|
||||
from pathlib import Path
|
||||
from typing import List
|
||||
from unittest.mock import PropertyMock, patch
|
||||
@@ -22,8 +23,9 @@ import pandas as pd
|
||||
import pyarrow as pa
|
||||
import pytest
|
||||
|
||||
from lancedb.conftest import MockEmbeddingFunction
|
||||
from lancedb.conftest import MockTextEmbeddingFunction
|
||||
from lancedb.db import LanceDBConnection
|
||||
from lancedb.embeddings import EmbeddingFunctionConfig, EmbeddingFunctionRegistry
|
||||
from lancedb.pydantic import LanceModel, Vector
|
||||
from lancedb.table import LanceTable
|
||||
|
||||
@@ -222,6 +224,7 @@ def test_create_index_method():
|
||||
num_partitions=256,
|
||||
num_sub_vectors=96,
|
||||
replace=True,
|
||||
accelerator=None,
|
||||
)
|
||||
|
||||
|
||||
@@ -356,20 +359,23 @@ def test_create_with_embedding_function(db):
|
||||
text: str
|
||||
vector: Vector(10)
|
||||
|
||||
func = MockEmbeddingFunction(source_column="text", vector_column="vector")
|
||||
func = MockTextEmbeddingFunction()
|
||||
texts = ["hello world", "goodbye world", "foo bar baz fizz buzz"]
|
||||
df = pd.DataFrame({"text": texts, "vector": func(texts)})
|
||||
df = pd.DataFrame({"text": texts, "vector": func.compute_source_embeddings(texts)})
|
||||
|
||||
conf = EmbeddingFunctionConfig(
|
||||
source_column="text", vector_column="vector", function=func
|
||||
)
|
||||
table = LanceTable.create(
|
||||
db,
|
||||
"my_table",
|
||||
schema=MyTable,
|
||||
embedding_functions=[func],
|
||||
embedding_functions=[conf],
|
||||
)
|
||||
table.add(df)
|
||||
|
||||
query_str = "hi how are you?"
|
||||
query_vector = func(query_str)[0]
|
||||
query_vector = func.compute_query_embeddings(query_str)[0]
|
||||
expected = table.search(query_vector).limit(2).to_arrow()
|
||||
|
||||
actual = table.search(query_str).limit(2).to_arrow()
|
||||
@@ -377,17 +383,13 @@ def test_create_with_embedding_function(db):
|
||||
|
||||
|
||||
def test_add_with_embedding_function(db):
|
||||
class MyTable(LanceModel):
|
||||
text: str
|
||||
vector: Vector(10)
|
||||
emb = EmbeddingFunctionRegistry.get_instance().get("test")()
|
||||
|
||||
func = MockEmbeddingFunction(source_column="text", vector_column="vector")
|
||||
table = LanceTable.create(
|
||||
db,
|
||||
"my_table",
|
||||
schema=MyTable,
|
||||
embedding_functions=[func],
|
||||
)
|
||||
class MyTable(LanceModel):
|
||||
text: str = emb.SourceField()
|
||||
vector: Vector(emb.ndims()) = emb.VectorField()
|
||||
|
||||
table = LanceTable.create(db, "my_table", schema=MyTable)
|
||||
|
||||
texts = ["hello world", "goodbye world", "foo bar baz fizz buzz"]
|
||||
df = pd.DataFrame({"text": texts})
|
||||
@@ -397,7 +399,7 @@ def test_add_with_embedding_function(db):
|
||||
table.add([{"text": t} for t in texts])
|
||||
|
||||
query_str = "hi how are you?"
|
||||
query_vector = func(query_str)[0]
|
||||
query_vector = emb.compute_query_embeddings(query_str)[0]
|
||||
expected = table.search(query_vector).limit(2).to_arrow()
|
||||
|
||||
actual = table.search(query_str).limit(2).to_arrow()
|
||||
@@ -426,8 +428,8 @@ def test_multiple_vector_columns(db):
|
||||
table.add(df)
|
||||
|
||||
q = np.random.randn(10)
|
||||
result1 = table.search(q, vector_column_name="vector1").limit(1).to_df()
|
||||
result2 = table.search(q, vector_column_name="vector2").limit(1).to_df()
|
||||
result1 = table.search(q, vector_column_name="vector1").limit(1).to_pandas()
|
||||
result2 = table.search(q, vector_column_name="vector2").limit(1).to_pandas()
|
||||
|
||||
assert result1["text"].iloc[0] != result2["text"].iloc[0]
|
||||
|
||||
@@ -438,6 +440,34 @@ def test_empty_query(db):
|
||||
"my_table",
|
||||
data=[{"text": "foo", "id": 0}, {"text": "bar", "id": 1}],
|
||||
)
|
||||
df = table.search().select(["id"]).where("text='bar'").limit(1).to_df()
|
||||
df = table.search().select(["id"]).where("text='bar'").limit(1).to_pandas()
|
||||
val = df.id.iloc[0]
|
||||
assert val == 1
|
||||
|
||||
|
||||
def test_compact_cleanup(db):
|
||||
table = LanceTable.create(
|
||||
db,
|
||||
"my_table",
|
||||
data=[{"text": "foo", "id": 0}, {"text": "bar", "id": 1}],
|
||||
)
|
||||
|
||||
table.add([{"text": "baz", "id": 2}])
|
||||
assert len(table) == 3
|
||||
assert table.version == 3
|
||||
|
||||
stats = table.compact_files()
|
||||
assert len(table) == 3
|
||||
assert table.version == 4
|
||||
assert stats.fragments_removed > 0
|
||||
assert stats.fragments_added == 1
|
||||
|
||||
stats = table.cleanup_old_versions()
|
||||
assert stats.bytes_removed == 0
|
||||
|
||||
stats = table.cleanup_old_versions(older_than=timedelta(0), delete_unverified=True)
|
||||
assert stats.bytes_removed > 0
|
||||
assert table.version == 4
|
||||
|
||||
with pytest.raises(Exception, match="Version 3 no longer exists"):
|
||||
table.checkout(3)
|
||||
|
||||
60
python/tests/test_telemetry.py
Normal file
60
python/tests/test_telemetry.py
Normal file
@@ -0,0 +1,60 @@
|
||||
import json
|
||||
|
||||
import pytest
|
||||
|
||||
import lancedb
|
||||
from lancedb.utils.events import _Events
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def request_log_path(tmp_path):
|
||||
return tmp_path / "request.json"
|
||||
|
||||
|
||||
def mock_register_event(name: str, **kwargs):
|
||||
if _Events._instance is None:
|
||||
_Events._instance = _Events()
|
||||
|
||||
_Events._instance.enabled = True
|
||||
_Events._instance.rate_limit = 0
|
||||
_Events._instance(name, **kwargs)
|
||||
|
||||
|
||||
def test_event_reporting(monkeypatch, request_log_path, tmp_path) -> None:
|
||||
def mock_request(**kwargs):
|
||||
json_data = kwargs.get("json", {})
|
||||
with open(request_log_path, "w") as f:
|
||||
json.dump(json_data, f)
|
||||
|
||||
monkeypatch.setattr(
|
||||
lancedb.table, "register_event", mock_register_event
|
||||
) # Force enable registering events and strip exception handling
|
||||
monkeypatch.setattr(lancedb.utils.events, "threaded_request", mock_request)
|
||||
|
||||
db = lancedb.connect(tmp_path)
|
||||
db.create_table(
|
||||
"test",
|
||||
data=[
|
||||
{"vector": [3.1, 4.1], "item": "foo", "price": 10.0},
|
||||
{"vector": [5.9, 26.5], "item": "bar", "price": 20.0},
|
||||
],
|
||||
mode="overwrite",
|
||||
)
|
||||
|
||||
assert request_log_path.exists() # test if event was registered
|
||||
|
||||
with open(request_log_path, "r") as f:
|
||||
json_data = json.load(f)
|
||||
|
||||
# TODO: don't hardcode these here. Instead create a module level json scehma in lancedb.utils.events for better evolvability
|
||||
batch_keys = ["api_key", "distinct_id", "batch"]
|
||||
event_keys = ["event", "properties", "timestamp", "distinct_id"]
|
||||
property_keys = ["cli", "install", "platforms", "version", "session_id"]
|
||||
|
||||
assert all([key in json_data for key in batch_keys])
|
||||
assert all([key in json_data["batch"][0] for key in event_keys])
|
||||
assert all([key in json_data["batch"][0]["properties"] for key in property_keys])
|
||||
|
||||
# cleanup & reset
|
||||
monkeypatch.undo()
|
||||
_Events._instance = None
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "vectordb-node"
|
||||
version = "0.2.5"
|
||||
version = "0.3.0"
|
||||
description = "Serverless, low-latency vector database for AI applications"
|
||||
license = "Apache-2.0"
|
||||
edition = "2018"
|
||||
@@ -13,6 +13,7 @@ crate-type = ["cdylib"]
|
||||
arrow-array = { workspace = true }
|
||||
arrow-ipc = { workspace = true }
|
||||
arrow-schema = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
conv = "0.3.3"
|
||||
once_cell = "1"
|
||||
futures = "0.3"
|
||||
|
||||
@@ -12,8 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use lance::index::vector::ivf::IvfBuildParams;
|
||||
use lance::index::vector::pq::PQBuildParams;
|
||||
use lance::index::vector::{ivf::IvfBuildParams, pq::PQBuildParams};
|
||||
use lance_linalg::distance::MetricType;
|
||||
use neon::context::FunctionContext;
|
||||
use neon::prelude::*;
|
||||
@@ -82,7 +81,7 @@ fn get_index_params_builder(
|
||||
let ivf_params = IvfBuildParams {
|
||||
num_partitions: np,
|
||||
max_iters,
|
||||
centroids: None,
|
||||
..Default::default()
|
||||
};
|
||||
index_builder.ivf_params(ivf_params)
|
||||
});
|
||||
|
||||
@@ -195,7 +195,7 @@ fn database_open_table(mut cx: FunctionContext) -> JsResult<JsPromise> {
|
||||
|
||||
let (deferred, promise) = cx.promise();
|
||||
rt.spawn(async move {
|
||||
let table_rst = database.open_table_with_params(&table_name, ¶ms).await;
|
||||
let table_rst = database.open_table_with_params(&table_name, params).await;
|
||||
|
||||
deferred.settle_with(&channel, move |mut cx| {
|
||||
let js_table = JsTable::from(table_rst.or_throw(&mut cx)?);
|
||||
@@ -237,6 +237,8 @@ fn main(mut cx: ModuleContext) -> NeonResult<()> {
|
||||
cx.export_function("tableAdd", JsTable::js_add)?;
|
||||
cx.export_function("tableCountRows", JsTable::js_count_rows)?;
|
||||
cx.export_function("tableDelete", JsTable::js_delete)?;
|
||||
cx.export_function("tableCleanupOldVersions", JsTable::js_cleanup)?;
|
||||
cx.export_function("tableCompactFiles", JsTable::js_compact)?;
|
||||
cx.export_function(
|
||||
"tableCreateVectorIndex",
|
||||
index::vector::table_create_vector_index,
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
use arrow_array::RecordBatchIterator;
|
||||
use lance::dataset::optimize::CompactionOptions;
|
||||
use lance::dataset::{WriteMode, WriteParams};
|
||||
use lance::io::object_store::ObjectStoreParams;
|
||||
|
||||
@@ -163,4 +164,116 @@ impl JsTable {
|
||||
});
|
||||
Ok(promise)
|
||||
}
|
||||
|
||||
pub(crate) fn js_cleanup(mut cx: FunctionContext) -> JsResult<JsPromise> {
|
||||
let js_table = cx.this().downcast_or_throw::<JsBox<JsTable>, _>(&mut cx)?;
|
||||
let rt = runtime(&mut cx)?;
|
||||
let (deferred, promise) = cx.promise();
|
||||
let table = js_table.table.clone();
|
||||
let channel = cx.channel();
|
||||
|
||||
let older_than: i64 = cx
|
||||
.argument_opt(0)
|
||||
.and_then(|val| val.downcast::<JsNumber, _>(&mut cx).ok())
|
||||
.map(|val| val.value(&mut cx) as i64)
|
||||
.unwrap_or_else(|| 2 * 7 * 24 * 60); // 2 weeks
|
||||
let older_than = chrono::Duration::minutes(older_than);
|
||||
let delete_unverified: bool = cx
|
||||
.argument_opt(1)
|
||||
.and_then(|val| val.downcast::<JsBoolean, _>(&mut cx).ok())
|
||||
.map(|val| val.value(&mut cx))
|
||||
.unwrap_or_default();
|
||||
|
||||
rt.spawn(async move {
|
||||
let stats = table
|
||||
.cleanup_old_versions(older_than, Some(delete_unverified))
|
||||
.await;
|
||||
|
||||
deferred.settle_with(&channel, move |mut cx| {
|
||||
let stats = stats.or_throw(&mut cx)?;
|
||||
|
||||
let output_metrics = JsObject::new(&mut cx);
|
||||
let bytes_removed = cx.number(stats.bytes_removed as f64);
|
||||
output_metrics.set(&mut cx, "bytesRemoved", bytes_removed)?;
|
||||
|
||||
let old_versions = cx.number(stats.old_versions as f64);
|
||||
output_metrics.set(&mut cx, "oldVersions", old_versions)?;
|
||||
|
||||
let output_table = cx.boxed(JsTable::from(table));
|
||||
|
||||
let output = JsObject::new(&mut cx);
|
||||
output.set(&mut cx, "metrics", output_metrics)?;
|
||||
output.set(&mut cx, "newTable", output_table)?;
|
||||
|
||||
Ok(output)
|
||||
})
|
||||
});
|
||||
Ok(promise)
|
||||
}
|
||||
|
||||
pub(crate) fn js_compact(mut cx: FunctionContext) -> JsResult<JsPromise> {
|
||||
let js_table = cx.this().downcast_or_throw::<JsBox<JsTable>, _>(&mut cx)?;
|
||||
let rt = runtime(&mut cx)?;
|
||||
let (deferred, promise) = cx.promise();
|
||||
let mut table = js_table.table.clone();
|
||||
let channel = cx.channel();
|
||||
|
||||
let js_options = cx.argument::<JsObject>(0)?;
|
||||
let mut options = CompactionOptions::default();
|
||||
|
||||
if let Some(target_rows) =
|
||||
js_options.get_opt::<JsNumber, _, _>(&mut cx, "targetRowsPerFragment")?
|
||||
{
|
||||
options.target_rows_per_fragment = target_rows.value(&mut cx) as usize;
|
||||
}
|
||||
if let Some(max_per_group) =
|
||||
js_options.get_opt::<JsNumber, _, _>(&mut cx, "maxRowsPerGroup")?
|
||||
{
|
||||
options.max_rows_per_group = max_per_group.value(&mut cx) as usize;
|
||||
}
|
||||
if let Some(materialize_deletions) =
|
||||
js_options.get_opt::<JsBoolean, _, _>(&mut cx, "materializeDeletions")?
|
||||
{
|
||||
options.materialize_deletions = materialize_deletions.value(&mut cx);
|
||||
}
|
||||
if let Some(materialize_deletions_threshold) =
|
||||
js_options.get_opt::<JsNumber, _, _>(&mut cx, "materializeDeletionsThreshold")?
|
||||
{
|
||||
options.materialize_deletions_threshold =
|
||||
materialize_deletions_threshold.value(&mut cx) as f32;
|
||||
}
|
||||
if let Some(num_threads) = js_options.get_opt::<JsNumber, _, _>(&mut cx, "numThreads")? {
|
||||
options.num_threads = num_threads.value(&mut cx) as usize;
|
||||
}
|
||||
|
||||
rt.spawn(async move {
|
||||
let stats = table.compact_files(options).await;
|
||||
|
||||
deferred.settle_with(&channel, move |mut cx| {
|
||||
let stats = stats.or_throw(&mut cx)?;
|
||||
|
||||
let output_metrics = JsObject::new(&mut cx);
|
||||
let fragments_removed = cx.number(stats.fragments_removed as f64);
|
||||
output_metrics.set(&mut cx, "fragmentsRemoved", fragments_removed)?;
|
||||
|
||||
let fragments_added = cx.number(stats.fragments_added as f64);
|
||||
output_metrics.set(&mut cx, "fragmentsAdded", fragments_added)?;
|
||||
|
||||
let files_removed = cx.number(stats.files_removed as f64);
|
||||
output_metrics.set(&mut cx, "filesRemoved", files_removed)?;
|
||||
|
||||
let files_added = cx.number(stats.files_added as f64);
|
||||
output_metrics.set(&mut cx, "filesAdded", files_added)?;
|
||||
|
||||
let output_table = cx.boxed(JsTable::from(table));
|
||||
|
||||
let output = JsObject::new(&mut cx);
|
||||
output.set(&mut cx, "metrics", output_metrics)?;
|
||||
output.set(&mut cx, "newTable", output_table)?;
|
||||
|
||||
Ok(output)
|
||||
})
|
||||
});
|
||||
Ok(promise)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "vectordb"
|
||||
version = "0.2.5"
|
||||
version = "0.3.0"
|
||||
edition = "2021"
|
||||
description = "LanceDB: A serverless, low-latency vector database for AI applications"
|
||||
license = "Apache-2.0"
|
||||
@@ -16,16 +16,22 @@ arrow-data = { workspace = true }
|
||||
arrow-schema = { workspace = true }
|
||||
arrow-ord = { workspace = true }
|
||||
arrow-cast = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
object_store = { workspace = true }
|
||||
snafu = { workspace = true }
|
||||
half = { workspace = true }
|
||||
lance = { workspace = true }
|
||||
lance-linalg = { workspace = true }
|
||||
lance-testing = { workspace = true }
|
||||
tokio = { version = "1.23", features = ["rt-multi-thread"] }
|
||||
log = { workspace = true }
|
||||
async-trait = "0"
|
||||
bytes = "1"
|
||||
futures = "0"
|
||||
num-traits = "0"
|
||||
url = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = "3.5.0"
|
||||
rand = { version = "0.8.3", features = ["small_rng"] }
|
||||
walkdir = "2"
|
||||
@@ -14,13 +14,16 @@
|
||||
|
||||
use std::fs::create_dir_all;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow_array::RecordBatchReader;
|
||||
use lance::dataset::WriteParams;
|
||||
use lance::io::object_store::ObjectStore;
|
||||
use lance::io::object_store::{ObjectStore, WrappingObjectStore};
|
||||
use object_store::local::LocalFileSystem;
|
||||
use snafu::prelude::*;
|
||||
|
||||
use crate::error::{CreateDirSnafu, Error, InvalidTableNameSnafu, Result};
|
||||
use crate::io::object_store::MirroringObjectStoreWrapper;
|
||||
use crate::table::{ReadParams, Table};
|
||||
|
||||
pub const LANCE_FILE_EXTENSION: &str = "lance";
|
||||
@@ -31,21 +34,14 @@ pub struct Database {
|
||||
|
||||
pub(crate) uri: String,
|
||||
pub(crate) base_path: object_store::path::Path,
|
||||
|
||||
// the object store wrapper to use on write path
|
||||
pub(crate) store_wrapper: Option<Arc<dyn WrappingObjectStore>>,
|
||||
}
|
||||
|
||||
const LANCE_EXTENSION: &str = "lance";
|
||||
const ENGINE: &str = "engine";
|
||||
|
||||
/// Parse a url, if it's not a valid url, assume it's a local file
|
||||
/// and try to parse with file:// appended
|
||||
fn parse_url(url: &str) -> Result<url::Url> {
|
||||
match url::Url::parse(url) {
|
||||
Ok(url) => Ok(url),
|
||||
Err(_) => url::Url::parse(format!("file://{}", url).as_str()).map_err(|e| Error::Lance {
|
||||
message: format!("Failed to parse uri: {}", e),
|
||||
}),
|
||||
}
|
||||
}
|
||||
const MIRRORED_STORE: &str = "mirroredStore";
|
||||
|
||||
/// A connection to LanceDB
|
||||
impl Database {
|
||||
@@ -59,73 +55,94 @@ impl Database {
|
||||
///
|
||||
/// * A [Database] object.
|
||||
pub async fn connect(uri: &str) -> Result<Database> {
|
||||
// For a native (using lance directly) connection
|
||||
// The DB doesn't use any uri parameters, but lance does
|
||||
// So we need to parse the uri, extract the query string, and progate it to lance
|
||||
let mut url = parse_url(uri)?;
|
||||
let parse_res = url::Url::parse(uri);
|
||||
|
||||
// special handling for windows
|
||||
if url.scheme().len() == 1 && cfg!(windows) {
|
||||
let (object_store, base_path) = ObjectStore::from_uri(uri).await?;
|
||||
if object_store.is_local() {
|
||||
Self::try_create_dir(uri).context(CreateDirSnafu { path: uri })?;
|
||||
match parse_res {
|
||||
Ok(url) if url.scheme().len() == 1 && cfg!(windows) => Self::open_path(uri).await,
|
||||
Ok(mut url) => {
|
||||
// iter thru the query params and extract the commit store param
|
||||
let mut engine = None;
|
||||
let mut mirrored_store = None;
|
||||
let mut filtered_querys = vec![];
|
||||
|
||||
// WARNING: specifying engine is NOT a publicly supported feature in lancedb yet
|
||||
// THE API WILL CHANGE
|
||||
for (key, value) in url.query_pairs() {
|
||||
if key == ENGINE {
|
||||
engine = Some(value.to_string());
|
||||
} else if key == MIRRORED_STORE {
|
||||
if cfg!(windows) {
|
||||
return Err(Error::Lance {
|
||||
message: "mirrored store is not supported on windows".into(),
|
||||
});
|
||||
}
|
||||
mirrored_store = Some(value.to_string());
|
||||
} else {
|
||||
// to owned so we can modify the url
|
||||
filtered_querys.push((key.to_string(), value.to_string()));
|
||||
}
|
||||
}
|
||||
|
||||
// Filter out the commit store query param -- it's a lancedb param
|
||||
url.query_pairs_mut().clear();
|
||||
url.query_pairs_mut().extend_pairs(filtered_querys);
|
||||
// Take a copy of the query string so we can propagate it to lance
|
||||
let query_string = url.query().map(|s| s.to_string());
|
||||
// clear the query string so we can use the url as the base uri
|
||||
// use .set_query(None) instead of .set_query("") because the latter
|
||||
// will add a trailing '?' to the url
|
||||
url.set_query(None);
|
||||
|
||||
let table_base_uri = if let Some(store) = engine {
|
||||
static WARN_ONCE: std::sync::Once = std::sync::Once::new();
|
||||
WARN_ONCE.call_once(|| {
|
||||
log::warn!("Specifing engine is not a publicly supported feature in lancedb yet. THE API WILL CHANGE");
|
||||
});
|
||||
let old_scheme = url.scheme().to_string();
|
||||
let new_scheme = format!("{}+{}", old_scheme, store);
|
||||
url.to_string().replacen(&old_scheme, &new_scheme, 1)
|
||||
} else {
|
||||
url.to_string()
|
||||
};
|
||||
|
||||
let plain_uri = url.to_string();
|
||||
let (object_store, base_path) = ObjectStore::from_uri(&plain_uri).await?;
|
||||
if object_store.is_local() {
|
||||
Self::try_create_dir(&plain_uri).context(CreateDirSnafu { path: plain_uri })?;
|
||||
}
|
||||
|
||||
let write_store_wrapper = match mirrored_store {
|
||||
Some(path) => {
|
||||
let mirrored_store = Arc::new(LocalFileSystem::new_with_prefix(path)?);
|
||||
let wrapper = MirroringObjectStoreWrapper::new(mirrored_store);
|
||||
Some(Arc::new(wrapper) as Arc<dyn WrappingObjectStore>)
|
||||
}
|
||||
None => None,
|
||||
};
|
||||
|
||||
Ok(Database {
|
||||
uri: table_base_uri,
|
||||
query_string,
|
||||
base_path,
|
||||
object_store,
|
||||
store_wrapper: write_store_wrapper,
|
||||
})
|
||||
}
|
||||
return Ok(Database {
|
||||
uri: uri.to_string(),
|
||||
query_string: None,
|
||||
base_path,
|
||||
object_store,
|
||||
});
|
||||
Err(_) => Self::open_path(uri).await,
|
||||
}
|
||||
}
|
||||
|
||||
// iter thru the query params and extract the commit store param
|
||||
let mut engine = None;
|
||||
let mut filtered_querys = vec![];
|
||||
|
||||
// WARNING: specifying engine is NOT a publicly supported feature in lancedb yet
|
||||
// THE API WILL CHANGE
|
||||
for (key, value) in url.query_pairs() {
|
||||
if key == ENGINE {
|
||||
engine = Some(value.to_string());
|
||||
} else {
|
||||
// to owned so we can modify the url
|
||||
filtered_querys.push((key.to_string(), value.to_string()));
|
||||
}
|
||||
}
|
||||
|
||||
// Filter out the commit store query param -- it's a lancedb param
|
||||
url.query_pairs_mut().clear();
|
||||
url.query_pairs_mut().extend_pairs(filtered_querys);
|
||||
// Take a copy of the query string so we can propagate it to lance
|
||||
let query_string = url.query().map(|s| s.to_string());
|
||||
// clear the query string so we can use the url as the base uri
|
||||
// use .set_query(None) instead of .set_query("") because the latter
|
||||
// will add a trailing '?' to the url
|
||||
url.set_query(None);
|
||||
|
||||
let table_base_uri = if let Some(store) = engine {
|
||||
static WARN_ONCE: std::sync::Once = std::sync::Once::new();
|
||||
WARN_ONCE.call_once(|| {
|
||||
log::warn!("Specifing engine is not a publicly supported feature in lancedb yet. THE API WILL CHANGE");
|
||||
});
|
||||
let old_scheme = url.scheme().to_string();
|
||||
let new_scheme = format!("{}+{}", old_scheme, store);
|
||||
url.to_string().replacen(&old_scheme, &new_scheme, 1)
|
||||
} else {
|
||||
url.to_string()
|
||||
};
|
||||
|
||||
let plain_uri = url.to_string();
|
||||
let (object_store, base_path) = ObjectStore::from_uri(&plain_uri).await?;
|
||||
async fn open_path(path: &str) -> Result<Database> {
|
||||
let (object_store, base_path) = ObjectStore::from_uri(path).await?;
|
||||
if object_store.is_local() {
|
||||
Self::try_create_dir(&plain_uri).context(CreateDirSnafu { path: plain_uri })?;
|
||||
Self::try_create_dir(path).context(CreateDirSnafu { path: path })?;
|
||||
}
|
||||
|
||||
Ok(Database {
|
||||
uri: table_base_uri,
|
||||
query_string,
|
||||
Ok(Self {
|
||||
uri: path.to_string(),
|
||||
query_string: None,
|
||||
base_path,
|
||||
object_store,
|
||||
store_wrapper: None,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -175,7 +192,15 @@ impl Database {
|
||||
params: Option<WriteParams>,
|
||||
) -> Result<Table> {
|
||||
let table_uri = self.table_uri(name)?;
|
||||
Table::create(&table_uri, name, batches, params).await
|
||||
|
||||
Table::create(
|
||||
&table_uri,
|
||||
name,
|
||||
batches,
|
||||
self.store_wrapper.clone(),
|
||||
params,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Open a table in the database.
|
||||
@@ -187,7 +212,7 @@ impl Database {
|
||||
///
|
||||
/// * A [Table] object.
|
||||
pub async fn open_table(&self, name: &str) -> Result<Table> {
|
||||
self.open_table_with_params(name, &ReadParams::default())
|
||||
self.open_table_with_params(name, ReadParams::default())
|
||||
.await
|
||||
}
|
||||
|
||||
@@ -200,9 +225,9 @@ impl Database {
|
||||
/// # Returns
|
||||
///
|
||||
/// * A [Table] object.
|
||||
pub async fn open_table_with_params(&self, name: &str, params: &ReadParams) -> Result<Table> {
|
||||
pub async fn open_table_with_params(&self, name: &str, params: ReadParams) -> Result<Table> {
|
||||
let table_uri = self.table_uri(name)?;
|
||||
Table::open_with_params(&table_uri, name, params).await
|
||||
Table::open_with_params(&table_uri, name, self.store_wrapper.clone(), params).await
|
||||
}
|
||||
|
||||
/// Drop a table in the database.
|
||||
@@ -240,6 +265,7 @@ impl Database {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::fs::create_dir_all;
|
||||
|
||||
use tempfile::tempdir;
|
||||
|
||||
use crate::database::Database;
|
||||
@@ -250,15 +276,29 @@ mod tests {
|
||||
let uri = tmp_dir.path().to_str().unwrap();
|
||||
let db = Database::connect(uri).await.unwrap();
|
||||
|
||||
// file:// scheme should be automatically appended if not specified
|
||||
// windows path come with drive letter, so file:// won't be appended
|
||||
let expected = if cfg!(windows) {
|
||||
uri.to_string()
|
||||
} else {
|
||||
format!("file://{}", uri)
|
||||
};
|
||||
assert_eq!(db.uri, uri);
|
||||
}
|
||||
|
||||
assert_eq!(db.uri, expected);
|
||||
#[cfg(not(windows))]
|
||||
#[tokio::test]
|
||||
async fn test_connect_relative() {
|
||||
let tmp_dir = tempdir().unwrap();
|
||||
let uri = std::fs::canonicalize(tmp_dir.path().to_str().unwrap()).unwrap();
|
||||
|
||||
let mut relative_anacestors = vec![];
|
||||
let current_dir = std::env::current_dir().unwrap();
|
||||
let mut ancestors = current_dir.ancestors();
|
||||
while let Some(_) = ancestors.next() {
|
||||
relative_anacestors.push("..");
|
||||
}
|
||||
let relative_root = std::path::PathBuf::from(relative_anacestors.join("/"));
|
||||
let relative_uri = relative_root.join(&uri);
|
||||
|
||||
let db = Database::connect(relative_uri.to_str().unwrap())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(db.uri, relative_uri.to_str().unwrap().to_string());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
||||
1
rust/vectordb/src/io.rs
Normal file
1
rust/vectordb/src/io.rs
Normal file
@@ -0,0 +1 @@
|
||||
pub mod object_store;
|
||||
396
rust/vectordb/src/io/object_store.rs
Normal file
396
rust/vectordb/src/io/object_store.rs
Normal file
@@ -0,0 +1,396 @@
|
||||
// Copyright 2023 Lance Developers.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! A mirroring object store that mirror writes to a secondary object store
|
||||
|
||||
use std::{
|
||||
fmt::Formatter,
|
||||
pin::Pin,
|
||||
sync::Arc,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
use bytes::Bytes;
|
||||
use futures::{stream::BoxStream, FutureExt, StreamExt};
|
||||
use lance::io::object_store::WrappingObjectStore;
|
||||
use object_store::{
|
||||
path::Path, GetOptions, GetResult, ListResult, MultipartId, ObjectMeta, ObjectStore, Result,
|
||||
};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use tokio::{
|
||||
io::{AsyncWrite, AsyncWriteExt},
|
||||
task::JoinHandle,
|
||||
};
|
||||
|
||||
#[derive(Debug)]
|
||||
struct MirroringObjectStore {
|
||||
primary: Arc<dyn ObjectStore>,
|
||||
secondary: Arc<dyn ObjectStore>,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for MirroringObjectStore {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
writeln!(f, "MirrowingObjectStore")?;
|
||||
writeln!(f, "primary:")?;
|
||||
self.primary.fmt(f)?;
|
||||
writeln!(f, "secondary:")?;
|
||||
self.secondary.fmt(f)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
trait PrimaryOnly {
|
||||
fn primary_only(&self) -> bool;
|
||||
}
|
||||
|
||||
impl PrimaryOnly for Path {
|
||||
fn primary_only(&self) -> bool {
|
||||
self.to_string().contains("manifest")
|
||||
}
|
||||
}
|
||||
|
||||
/// An object store that mirrors write to secondsry object store first
|
||||
/// and than commit to primary object store.
|
||||
///
|
||||
/// This is meant to mirrow writes to a less-durable but lower-latency
|
||||
/// store. We have primary store that is durable but slow, and a secondary
|
||||
/// store that is fast but not asdurable
|
||||
///
|
||||
/// Note: this object store does not mirror writes to *.manifest files
|
||||
#[async_trait]
|
||||
impl ObjectStore for MirroringObjectStore {
|
||||
async fn put(&self, location: &Path, bytes: Bytes) -> Result<()> {
|
||||
if location.primary_only() {
|
||||
self.primary.put(location, bytes).await
|
||||
} else {
|
||||
self.secondary.put(location, bytes.clone()).await?;
|
||||
self.primary.put(location, bytes).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
async fn put_multipart(
|
||||
&self,
|
||||
location: &Path,
|
||||
) -> Result<(MultipartId, Box<dyn AsyncWrite + Unpin + Send>)> {
|
||||
if location.primary_only() {
|
||||
return self.primary.put_multipart(location).await;
|
||||
}
|
||||
|
||||
let (id, stream) = self.secondary.put_multipart(location).await?;
|
||||
|
||||
let mirroring_upload = MirroringUpload::new(
|
||||
Pin::new(stream),
|
||||
self.primary.clone(),
|
||||
self.secondary.clone(),
|
||||
location.clone(),
|
||||
);
|
||||
|
||||
Ok((id, Box::new(mirroring_upload)))
|
||||
}
|
||||
|
||||
async fn abort_multipart(&self, location: &Path, multipart_id: &MultipartId) -> Result<()> {
|
||||
if location.primary_only() {
|
||||
return self.primary.abort_multipart(location, multipart_id).await;
|
||||
}
|
||||
|
||||
self.secondary.abort_multipart(location, multipart_id).await
|
||||
}
|
||||
|
||||
// Reads are routed to primary only
|
||||
async fn get_opts(&self, location: &Path, options: GetOptions) -> Result<GetResult> {
|
||||
self.primary.get_opts(location, options).await
|
||||
}
|
||||
|
||||
async fn head(&self, location: &Path) -> Result<ObjectMeta> {
|
||||
self.primary.head(location).await
|
||||
}
|
||||
|
||||
// garbage collection on secondary will happen async from other means
|
||||
async fn delete(&self, location: &Path) -> Result<()> {
|
||||
self.primary.delete(location).await
|
||||
}
|
||||
|
||||
async fn list(&self, prefix: Option<&Path>) -> Result<BoxStream<'_, Result<ObjectMeta>>> {
|
||||
self.primary.list(prefix).await
|
||||
}
|
||||
|
||||
async fn list_with_delimiter(&self, prefix: Option<&Path>) -> Result<ListResult> {
|
||||
self.primary.list_with_delimiter(prefix).await
|
||||
}
|
||||
|
||||
async fn copy(&self, from: &Path, to: &Path) -> Result<()> {
|
||||
if from.primary_only() {
|
||||
self.primary.copy(from, to).await
|
||||
} else {
|
||||
self.secondary.copy(from, to).await?;
|
||||
self.primary.copy(from, to).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
async fn copy_if_not_exists(&self, from: &Path, to: &Path) -> Result<()> {
|
||||
self.primary.copy_if_not_exists(from, to).await
|
||||
}
|
||||
}
|
||||
|
||||
struct MirroringUpload {
|
||||
secondary_stream: Pin<Box<dyn AsyncWrite + Unpin + Send>>,
|
||||
|
||||
primary_store: Arc<dyn ObjectStore>,
|
||||
secondary_store: Arc<dyn ObjectStore>,
|
||||
location: Path,
|
||||
|
||||
state: MirroringUploadShutdown,
|
||||
}
|
||||
|
||||
// The state goes from
|
||||
// None
|
||||
// -> (secondary)ShutingDown
|
||||
// -> (secondary)ShutdownDone
|
||||
// -> Uploading(to primary)
|
||||
// -> Done
|
||||
#[derive(Debug)]
|
||||
enum MirroringUploadShutdown {
|
||||
None,
|
||||
ShutingDown,
|
||||
ShutdownDone,
|
||||
Uploading(Pin<Box<JoinHandle<()>>>),
|
||||
Completed,
|
||||
}
|
||||
|
||||
impl MirroringUpload {
|
||||
pub fn new(
|
||||
secondary_stream: Pin<Box<dyn AsyncWrite + Unpin + Send>>,
|
||||
primary_store: Arc<dyn ObjectStore>,
|
||||
secondary_store: Arc<dyn ObjectStore>,
|
||||
location: Path,
|
||||
) -> Self {
|
||||
Self {
|
||||
secondary_stream,
|
||||
primary_store,
|
||||
secondary_store,
|
||||
location,
|
||||
state: MirroringUploadShutdown::None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AsyncWrite for MirroringUpload {
|
||||
fn poll_write(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
buf: &[u8],
|
||||
) -> Poll<Result<usize, std::io::Error>> {
|
||||
if !matches!(self.state, MirroringUploadShutdown::None) {
|
||||
return Poll::Ready(Err(std::io::Error::new(
|
||||
std::io::ErrorKind::Other,
|
||||
"already shutdown",
|
||||
)));
|
||||
}
|
||||
// Write to secondary first
|
||||
let mut_self = self.get_mut();
|
||||
mut_self.secondary_stream.as_mut().poll_write(cx, buf)
|
||||
}
|
||||
|
||||
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), std::io::Error>> {
|
||||
if !matches!(self.state, MirroringUploadShutdown::None) {
|
||||
return Poll::Ready(Err(std::io::Error::new(
|
||||
std::io::ErrorKind::Other,
|
||||
"already shutdown",
|
||||
)));
|
||||
}
|
||||
|
||||
let mut_self = self.get_mut();
|
||||
mut_self.secondary_stream.as_mut().poll_flush(cx)
|
||||
}
|
||||
|
||||
fn poll_shutdown(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<Result<(), std::io::Error>> {
|
||||
let mut_self = self.get_mut();
|
||||
|
||||
loop {
|
||||
// try to shutdown secondary first
|
||||
match &mut mut_self.state {
|
||||
MirroringUploadShutdown::None | MirroringUploadShutdown::ShutingDown => {
|
||||
match mut_self.secondary_stream.as_mut().poll_shutdown(cx) {
|
||||
Poll::Ready(Ok(())) => {
|
||||
mut_self.state = MirroringUploadShutdown::ShutdownDone;
|
||||
// don't return, no waker is setup
|
||||
}
|
||||
Poll::Ready(Err(e)) => return Poll::Ready(Err(e)),
|
||||
Poll::Pending => {
|
||||
mut_self.state = MirroringUploadShutdown::ShutingDown;
|
||||
return Poll::Pending;
|
||||
}
|
||||
}
|
||||
}
|
||||
MirroringUploadShutdown::ShutdownDone => {
|
||||
let primary_store = mut_self.primary_store.clone();
|
||||
let secondary_store = mut_self.secondary_store.clone();
|
||||
let location = mut_self.location.clone();
|
||||
|
||||
let upload_future =
|
||||
Box::pin(tokio::runtime::Handle::current().spawn(async move {
|
||||
let mut source =
|
||||
secondary_store.get(&location).await.unwrap().into_stream();
|
||||
let upload_stream = primary_store.put_multipart(&location).await;
|
||||
let (_, mut stream) = upload_stream.unwrap();
|
||||
|
||||
while let Some(buf) = source.next().await {
|
||||
let buf = buf.unwrap();
|
||||
stream.write_all(&buf).await.unwrap();
|
||||
}
|
||||
|
||||
stream.shutdown().await.unwrap();
|
||||
}));
|
||||
mut_self.state = MirroringUploadShutdown::Uploading(upload_future);
|
||||
// don't return, no waker is setup
|
||||
}
|
||||
MirroringUploadShutdown::Uploading(ref mut join_handle) => {
|
||||
match join_handle.poll_unpin(cx) {
|
||||
Poll::Ready(Ok(())) => {
|
||||
mut_self.state = MirroringUploadShutdown::Completed;
|
||||
return Poll::Ready(Ok(()));
|
||||
}
|
||||
Poll::Ready(Err(e)) => {
|
||||
mut_self.state = MirroringUploadShutdown::Completed;
|
||||
return Poll::Ready(Err(e.into()));
|
||||
}
|
||||
Poll::Pending => {
|
||||
return Poll::Pending;
|
||||
}
|
||||
}
|
||||
}
|
||||
MirroringUploadShutdown::Completed => {
|
||||
return Poll::Ready(Err(std::io::Error::new(
|
||||
std::io::ErrorKind::Other,
|
||||
"shutdown already completed",
|
||||
)))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct MirroringObjectStoreWrapper {
|
||||
secondary: Arc<dyn ObjectStore>,
|
||||
}
|
||||
|
||||
impl MirroringObjectStoreWrapper {
|
||||
pub fn new(secondary: Arc<dyn ObjectStore>) -> Self {
|
||||
Self { secondary }
|
||||
}
|
||||
}
|
||||
|
||||
impl WrappingObjectStore for MirroringObjectStoreWrapper {
|
||||
fn wrap(&self, primary: Arc<dyn ObjectStore>) -> Arc<dyn ObjectStore> {
|
||||
Arc::new(MirroringObjectStore {
|
||||
primary,
|
||||
secondary: self.secondary.clone(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// windows pathing can't be simply concatenated
|
||||
#[cfg(all(test, not(windows)))]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::Database;
|
||||
use arrow_array::PrimitiveArray;
|
||||
use futures::TryStreamExt;
|
||||
use lance::{dataset::WriteParams, io::object_store::ObjectStoreParams};
|
||||
use lance_testing::datagen::{BatchGenerator, IncrementingInt32, RandomVector};
|
||||
use object_store::local::LocalFileSystem;
|
||||
use tempfile;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_e2e() {
|
||||
let dir1 = tempfile::tempdir().unwrap().into_path();
|
||||
let dir2 = tempfile::tempdir().unwrap().into_path();
|
||||
|
||||
let secondary_store = LocalFileSystem::new_with_prefix(dir2.to_str().unwrap()).unwrap();
|
||||
let object_store_wrapper = Arc::new(MirroringObjectStoreWrapper {
|
||||
secondary: Arc::new(secondary_store),
|
||||
});
|
||||
|
||||
let db = Database::connect(dir1.to_str().unwrap()).await.unwrap();
|
||||
|
||||
let mut param = WriteParams::default();
|
||||
let mut store_params = ObjectStoreParams::default();
|
||||
store_params.object_store_wrapper = Some(object_store_wrapper);
|
||||
param.store_params = Some(store_params);
|
||||
|
||||
let mut datagen = BatchGenerator::new();
|
||||
datagen = datagen.col(Box::new(IncrementingInt32::default()));
|
||||
datagen = datagen.col(Box::new(RandomVector::default().named("vector".into())));
|
||||
|
||||
let res = db
|
||||
.create_table("test", datagen.batch(100), Some(param.clone()))
|
||||
.await;
|
||||
|
||||
// leave this here for easy debugging
|
||||
let t = res.unwrap();
|
||||
|
||||
assert_eq!(t.count_rows().await.unwrap(), 100);
|
||||
|
||||
let q = t
|
||||
.search(PrimitiveArray::from_iter_values(vec![0.1, 0.1, 0.1, 0.1]))
|
||||
.limit(10)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let bateches = q.try_collect::<Vec<_>>().await.unwrap();
|
||||
assert_eq!(bateches.len(), 1);
|
||||
assert_eq!(bateches[0].num_rows(), 10);
|
||||
|
||||
use walkdir::WalkDir;
|
||||
|
||||
let primary_location = dir1.join("test.lance").canonicalize().unwrap();
|
||||
let secondary_location = dir2.join(primary_location.strip_prefix("/").unwrap());
|
||||
|
||||
let mut primary_iter = WalkDir::new(&primary_location).into_iter();
|
||||
let mut secondary_iter = WalkDir::new(&secondary_location).into_iter();
|
||||
|
||||
let mut primary_elem = primary_iter.next();
|
||||
let mut secondary_elem = secondary_iter.next();
|
||||
|
||||
loop {
|
||||
if primary_elem.is_none() && secondary_elem.is_none() {
|
||||
break;
|
||||
}
|
||||
// primary has more data then secondary, should not run out before secondary
|
||||
let primary_f = primary_elem.unwrap().unwrap();
|
||||
// hit manifest, skip, _versions contains all the manifest and should not exist on secondary
|
||||
let primary_raw_path = primary_f.file_name().to_str().unwrap();
|
||||
if primary_raw_path.contains("manifest") || primary_raw_path.contains("_versions") {
|
||||
primary_elem = primary_iter.next();
|
||||
continue;
|
||||
}
|
||||
let secondary_f = secondary_elem.unwrap().unwrap();
|
||||
assert_eq!(
|
||||
primary_f.path().strip_prefix(&primary_location),
|
||||
secondary_f.path().strip_prefix(&secondary_location)
|
||||
);
|
||||
|
||||
primary_elem = primary_iter.next();
|
||||
secondary_elem = secondary_iter.next();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -16,8 +16,10 @@ pub mod data;
|
||||
pub mod database;
|
||||
pub mod error;
|
||||
pub mod index;
|
||||
pub mod io;
|
||||
pub mod query;
|
||||
pub mod table;
|
||||
pub mod utils;
|
||||
|
||||
pub use database::Database;
|
||||
pub use table::Table;
|
||||
|
||||
@@ -12,17 +12,22 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use chrono::Duration;
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow_array::{Float32Array, RecordBatchReader};
|
||||
use arrow_schema::SchemaRef;
|
||||
use lance::dataset::cleanup::RemovalStats;
|
||||
use lance::dataset::optimize::{compact_files, CompactionMetrics, CompactionOptions};
|
||||
use lance::dataset::{Dataset, WriteParams};
|
||||
use lance::index::IndexType;
|
||||
use lance::io::object_store::WrappingObjectStore;
|
||||
use std::path::Path;
|
||||
|
||||
use crate::error::{Error, Result};
|
||||
use crate::index::vector::VectorIndexBuilder;
|
||||
use crate::query::Query;
|
||||
use crate::utils::{PatchReadParam, PatchWriteParam};
|
||||
use crate::WriteMode;
|
||||
|
||||
pub use lance::dataset::ReadParams;
|
||||
@@ -35,6 +40,9 @@ pub struct Table {
|
||||
name: String,
|
||||
uri: String,
|
||||
dataset: Arc<Dataset>,
|
||||
|
||||
// the object store wrapper to use on write path
|
||||
store_wrapper: Option<Arc<dyn WrappingObjectStore>>,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Table {
|
||||
@@ -56,12 +64,12 @@ impl Table {
|
||||
/// * A [Table] object.
|
||||
pub async fn open(uri: &str) -> Result<Self> {
|
||||
let name = Self::get_table_name(uri)?;
|
||||
Self::open_with_params(uri, &name, &ReadParams::default()).await
|
||||
Self::open_with_params(uri, &name, None, ReadParams::default()).await
|
||||
}
|
||||
|
||||
/// Open an Table with a given name.
|
||||
pub async fn open_with_name(uri: &str, name: &str) -> Result<Self> {
|
||||
Self::open_with_params(uri, name, &ReadParams::default()).await
|
||||
Self::open_with_params(uri, name, None, ReadParams::default()).await
|
||||
}
|
||||
|
||||
/// Opens an existing Table
|
||||
@@ -75,8 +83,18 @@ impl Table {
|
||||
/// # Returns
|
||||
///
|
||||
/// * A [Table] object.
|
||||
pub async fn open_with_params(uri: &str, name: &str, params: &ReadParams) -> Result<Self> {
|
||||
let dataset = Dataset::open_with_params(uri, params)
|
||||
pub async fn open_with_params(
|
||||
uri: &str,
|
||||
name: &str,
|
||||
write_store_wrapper: Option<Arc<dyn WrappingObjectStore>>,
|
||||
params: ReadParams,
|
||||
) -> Result<Self> {
|
||||
// patch the params if we have a write store wrapper
|
||||
let params = match write_store_wrapper.clone() {
|
||||
Some(wrapper) => params.patch_with_store_wrapper(wrapper)?,
|
||||
None => params,
|
||||
};
|
||||
let dataset = Dataset::open_with_params(uri, ¶ms)
|
||||
.await
|
||||
.map_err(|e| match e {
|
||||
lance::Error::DatasetNotFound { .. } => Error::TableNotFound {
|
||||
@@ -90,6 +108,7 @@ impl Table {
|
||||
name: name.to_string(),
|
||||
uri: uri.to_string(),
|
||||
dataset: Arc::new(dataset),
|
||||
store_wrapper: write_store_wrapper,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -97,20 +116,26 @@ impl Table {
|
||||
///
|
||||
pub async fn checkout(uri: &str, version: u64) -> Result<Self> {
|
||||
let name = Self::get_table_name(uri)?;
|
||||
Self::checkout_with_params(uri, &name, version, &ReadParams::default()).await
|
||||
Self::checkout_with_params(uri, &name, version, None, ReadParams::default()).await
|
||||
}
|
||||
|
||||
pub async fn checkout_with_name(uri: &str, name: &str, version: u64) -> Result<Self> {
|
||||
Self::checkout_with_params(uri, name, version, &ReadParams::default()).await
|
||||
Self::checkout_with_params(uri, name, version, None, ReadParams::default()).await
|
||||
}
|
||||
|
||||
pub async fn checkout_with_params(
|
||||
uri: &str,
|
||||
name: &str,
|
||||
version: u64,
|
||||
params: &ReadParams,
|
||||
write_store_wrapper: Option<Arc<dyn WrappingObjectStore>>,
|
||||
params: ReadParams,
|
||||
) -> Result<Self> {
|
||||
let dataset = Dataset::checkout_with_params(uri, version, params)
|
||||
// patch the params if we have a write store wrapper
|
||||
let params = match write_store_wrapper.clone() {
|
||||
Some(wrapper) => params.patch_with_store_wrapper(wrapper)?,
|
||||
None => params,
|
||||
};
|
||||
let dataset = Dataset::checkout_with_params(uri, version, ¶ms)
|
||||
.await
|
||||
.map_err(|e| match e {
|
||||
lance::Error::DatasetNotFound { .. } => Error::TableNotFound {
|
||||
@@ -124,6 +149,7 @@ impl Table {
|
||||
name: name.to_string(),
|
||||
uri: uri.to_string(),
|
||||
dataset: Arc::new(dataset),
|
||||
store_wrapper: write_store_wrapper,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -157,8 +183,15 @@ impl Table {
|
||||
uri: &str,
|
||||
name: &str,
|
||||
batches: impl RecordBatchReader + Send + 'static,
|
||||
write_store_wrapper: Option<Arc<dyn WrappingObjectStore>>,
|
||||
params: Option<WriteParams>,
|
||||
) -> Result<Self> {
|
||||
// patch the params if we have a write store wrapper
|
||||
let params = match write_store_wrapper.clone() {
|
||||
Some(wrapper) => params.patch_with_store_wrapper(wrapper)?,
|
||||
None => params,
|
||||
};
|
||||
|
||||
let dataset = Dataset::write(batches, uri, params)
|
||||
.await
|
||||
.map_err(|e| match e {
|
||||
@@ -173,6 +206,7 @@ impl Table {
|
||||
name: name.to_string(),
|
||||
uri: uri.to_string(),
|
||||
dataset: Arc::new(dataset),
|
||||
store_wrapper: write_store_wrapper,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -190,8 +224,8 @@ impl Table {
|
||||
pub async fn create_index(&mut self, index_builder: &impl VectorIndexBuilder) -> Result<()> {
|
||||
use lance::index::DatasetIndexExt;
|
||||
|
||||
let dataset = self
|
||||
.dataset
|
||||
let mut dataset = self.dataset.as_ref().clone();
|
||||
dataset
|
||||
.create_index(
|
||||
&[index_builder
|
||||
.get_column()
|
||||
@@ -221,12 +255,18 @@ impl Table {
|
||||
batches: impl RecordBatchReader + Send + 'static,
|
||||
params: Option<WriteParams>,
|
||||
) -> Result<()> {
|
||||
let params = params.unwrap_or(WriteParams {
|
||||
let params = Some(params.unwrap_or(WriteParams {
|
||||
mode: WriteMode::Append,
|
||||
..WriteParams::default()
|
||||
});
|
||||
}));
|
||||
|
||||
self.dataset = Arc::new(Dataset::write(batches, &self.uri, Some(params)).await?);
|
||||
// patch the params if we have a write store wrapper
|
||||
let params = match self.store_wrapper.clone() {
|
||||
Some(wrapper) => params.patch_with_store_wrapper(wrapper)?,
|
||||
None => params,
|
||||
};
|
||||
|
||||
self.dataset = Arc::new(Dataset::write(batches, &self.uri, params).await?);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -268,6 +308,41 @@ impl Table {
|
||||
self.dataset = Arc::new(dataset);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Remove old versions of the dataset from disk.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `older_than` - The duration of time to keep versions of the dataset.
|
||||
/// * `delete_unverified` - Because they may be part of an in-progress
|
||||
/// transaction, files newer than 7 days old are not deleted by default.
|
||||
/// If you are sure that there are no in-progress transactions, then you
|
||||
/// can set this to True to delete all files older than `older_than`.
|
||||
///
|
||||
/// This calls into [lance::dataset::Dataset::cleanup_old_versions] and
|
||||
/// returns the result.
|
||||
pub async fn cleanup_old_versions(
|
||||
&self,
|
||||
older_than: Duration,
|
||||
delete_unverified: Option<bool>,
|
||||
) -> Result<RemovalStats> {
|
||||
Ok(self
|
||||
.dataset
|
||||
.cleanup_old_versions(older_than, delete_unverified)
|
||||
.await?)
|
||||
}
|
||||
|
||||
/// Compact files in the dataset.
|
||||
///
|
||||
/// This can be run after making several small appends to optimize the table
|
||||
/// for faster reads.
|
||||
///
|
||||
/// This calls into [lance::dataset::optimize::compact_files].
|
||||
pub async fn compact_files(&mut self, options: CompactionOptions) -> Result<CompactionMetrics> {
|
||||
let mut dataset = self.dataset.as_ref().clone();
|
||||
let metrics = compact_files(&mut dataset, options).await?;
|
||||
self.dataset = Arc::new(dataset);
|
||||
Ok(metrics)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -330,10 +405,12 @@ mod tests {
|
||||
|
||||
let batches = make_test_batches();
|
||||
let _ = batches.schema().clone();
|
||||
Table::create(&uri, "test", batches, None).await.unwrap();
|
||||
Table::create(&uri, "test", batches, None, None)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let batches = make_test_batches();
|
||||
let result = Table::create(&uri, "test", batches, None).await;
|
||||
let result = Table::create(&uri, "test", batches, None, None).await;
|
||||
assert!(matches!(
|
||||
result.unwrap_err(),
|
||||
Error::TableAlreadyExists { .. }
|
||||
@@ -347,7 +424,9 @@ mod tests {
|
||||
|
||||
let batches = make_test_batches();
|
||||
let schema = batches.schema().clone();
|
||||
let mut table = Table::create(&uri, "test", batches, None).await.unwrap();
|
||||
let mut table = Table::create(&uri, "test", batches, None, None)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(table.count_rows().await.unwrap(), 10);
|
||||
|
||||
let new_batches = RecordBatchIterator::new(
|
||||
@@ -373,7 +452,9 @@ mod tests {
|
||||
|
||||
let batches = make_test_batches();
|
||||
let schema = batches.schema().clone();
|
||||
let mut table = Table::create(uri, "test", batches, None).await.unwrap();
|
||||
let mut table = Table::create(uri, "test", batches, None, None)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(table.count_rows().await.unwrap(), 10);
|
||||
|
||||
let new_batches = RecordBatchIterator::new(
|
||||
@@ -456,7 +537,9 @@ mod tests {
|
||||
..Default::default()
|
||||
};
|
||||
assert!(!wrapper.called());
|
||||
let _ = Table::open_with_params(uri, "test", ¶m).await.unwrap();
|
||||
let _ = Table::open_with_params(uri, "test", None, param)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(wrapper.called());
|
||||
}
|
||||
|
||||
@@ -508,7 +591,9 @@ mod tests {
|
||||
schema,
|
||||
);
|
||||
|
||||
let mut table = Table::create(uri, "test", batches, None).await.unwrap();
|
||||
let mut table = Table::create(uri, "test", batches, None, None)
|
||||
.await
|
||||
.unwrap();
|
||||
let mut i = IvfPQIndexBuilder::new();
|
||||
|
||||
let index_builder = i
|
||||
|
||||
67
rust/vectordb/src/utils.rs
Normal file
67
rust/vectordb/src/utils.rs
Normal file
@@ -0,0 +1,67 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use lance::{
|
||||
dataset::{ReadParams, WriteParams},
|
||||
io::object_store::{ObjectStoreParams, WrappingObjectStore},
|
||||
};
|
||||
|
||||
use crate::error::{Error, Result};
|
||||
|
||||
pub trait PatchStoreParam {
|
||||
fn patch_with_store_wrapper(
|
||||
self,
|
||||
wrapper: Arc<dyn WrappingObjectStore>,
|
||||
) -> Result<Option<ObjectStoreParams>>;
|
||||
}
|
||||
|
||||
impl PatchStoreParam for Option<ObjectStoreParams> {
|
||||
fn patch_with_store_wrapper(
|
||||
self,
|
||||
wrapper: Arc<dyn WrappingObjectStore>,
|
||||
) -> Result<Option<ObjectStoreParams>> {
|
||||
let mut params = self.unwrap_or_default();
|
||||
if params.object_store_wrapper.is_some() {
|
||||
return Err(Error::Lance {
|
||||
message: "can not patch param because object store is already set".into(),
|
||||
});
|
||||
}
|
||||
params.object_store_wrapper = Some(wrapper);
|
||||
|
||||
Ok(Some(params))
|
||||
}
|
||||
}
|
||||
|
||||
pub trait PatchWriteParam {
|
||||
fn patch_with_store_wrapper(
|
||||
self,
|
||||
wrapper: Arc<dyn WrappingObjectStore>,
|
||||
) -> Result<Option<WriteParams>>;
|
||||
}
|
||||
|
||||
impl PatchWriteParam for Option<WriteParams> {
|
||||
fn patch_with_store_wrapper(
|
||||
self,
|
||||
wrapper: Arc<dyn WrappingObjectStore>,
|
||||
) -> Result<Option<WriteParams>> {
|
||||
let mut params = self.unwrap_or_default();
|
||||
params.store_params = params.store_params.patch_with_store_wrapper(wrapper)?;
|
||||
Ok(Some(params))
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE: we have some API inconsistency here.
|
||||
// WriteParam is found in the form of Option<WriteParam> and ReadParam is found in the form of ReadParam
|
||||
|
||||
pub trait PatchReadParam {
|
||||
fn patch_with_store_wrapper(self, wrapper: Arc<dyn WrappingObjectStore>) -> Result<ReadParams>;
|
||||
}
|
||||
|
||||
impl PatchReadParam for ReadParams {
|
||||
fn patch_with_store_wrapper(
|
||||
mut self,
|
||||
wrapper: Arc<dyn WrappingObjectStore>,
|
||||
) -> Result<ReadParams> {
|
||||
self.store_options = self.store_options.patch_with_store_wrapper(wrapper)?;
|
||||
Ok(self)
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user