mirror of
https://github.com/lancedb/lancedb.git
synced 2025-12-23 13:29:57 +00:00
Compare commits
14 Commits
update-doc
...
python-v0.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
60260018cf | ||
|
|
bb100c5c19 | ||
|
|
eab9072bb5 | ||
|
|
ee0f0611d9 | ||
|
|
34966312cb | ||
|
|
756188358c | ||
|
|
dc5126d8d1 | ||
|
|
50c20af060 | ||
|
|
0965d7dd5a | ||
|
|
7bbb2872de | ||
|
|
e81d2975da | ||
|
|
2c7f96ba4f | ||
|
|
f9dd7a5d8a | ||
|
|
1d4943688d |
@@ -1,5 +1,5 @@
|
|||||||
[bumpversion]
|
[bumpversion]
|
||||||
current_version = 0.4.0
|
current_version = 0.4.1
|
||||||
commit = True
|
commit = True
|
||||||
message = Bump version: {current_version} → {new_version}
|
message = Bump version: {current_version} → {new_version}
|
||||||
tag = True
|
tag = True
|
||||||
|
|||||||
33
.github/ISSUE_TEMPLATE/bug-node.yml
vendored
Normal file
33
.github/ISSUE_TEMPLATE/bug-node.yml
vendored
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
name: Bug Report - Node / Typescript
|
||||||
|
description: File a bug report
|
||||||
|
title: "bug(node): "
|
||||||
|
labels: [bug, typescript]
|
||||||
|
body:
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: |
|
||||||
|
Thanks for taking the time to fill out this bug report!
|
||||||
|
- type: input
|
||||||
|
id: version
|
||||||
|
attributes:
|
||||||
|
label: LanceDB version
|
||||||
|
description: What version of LanceDB are you using? `npm list | grep vectordb`.
|
||||||
|
placeholder: v0.3.2
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
- type: textarea
|
||||||
|
id: what-happened
|
||||||
|
attributes:
|
||||||
|
label: What happened?
|
||||||
|
description: Also tell us, what did you expect to happen?
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
id: reproduction
|
||||||
|
attributes:
|
||||||
|
label: Are there known steps to reproduce?
|
||||||
|
description: |
|
||||||
|
Let us know how to reproduce the bug and we may be able to fix it more
|
||||||
|
quickly. This is not required, but it is helpful.
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
33
.github/ISSUE_TEMPLATE/bug-python.yml
vendored
Normal file
33
.github/ISSUE_TEMPLATE/bug-python.yml
vendored
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
name: Bug Report - Python
|
||||||
|
description: File a bug report
|
||||||
|
title: "bug(python): "
|
||||||
|
labels: [bug, python]
|
||||||
|
body:
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: |
|
||||||
|
Thanks for taking the time to fill out this bug report!
|
||||||
|
- type: input
|
||||||
|
id: version
|
||||||
|
attributes:
|
||||||
|
label: LanceDB version
|
||||||
|
description: What version of LanceDB are you using? `python -c "import lancedb; print(lancedb.__version__)"`.
|
||||||
|
placeholder: v0.3.2
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
- type: textarea
|
||||||
|
id: what-happened
|
||||||
|
attributes:
|
||||||
|
label: What happened?
|
||||||
|
description: Also tell us, what did you expect to happen?
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
id: reproduction
|
||||||
|
attributes:
|
||||||
|
label: Are there known steps to reproduce?
|
||||||
|
description: |
|
||||||
|
Let us know how to reproduce the bug and we may be able to fix it more
|
||||||
|
quickly. This is not required, but it is helpful.
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
5
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
5
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
blank_issues_enabled: true
|
||||||
|
contact_links:
|
||||||
|
- name: Discord Community Support
|
||||||
|
url: https://discord.com/invite/zMM32dvNtd
|
||||||
|
about: Please ask and answer questions here.
|
||||||
23
.github/ISSUE_TEMPLATE/documentation.yml
vendored
Normal file
23
.github/ISSUE_TEMPLATE/documentation.yml
vendored
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
name: 'Documentation improvement'
|
||||||
|
description: Report an issue with the documentation.
|
||||||
|
labels: [documentation]
|
||||||
|
|
||||||
|
body:
|
||||||
|
- type: textarea
|
||||||
|
id: description
|
||||||
|
attributes:
|
||||||
|
label: Description
|
||||||
|
description: >
|
||||||
|
Describe the issue with the documentation and how it can be fixed or improved.
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: input
|
||||||
|
id: link
|
||||||
|
attributes:
|
||||||
|
label: Link
|
||||||
|
description: >
|
||||||
|
Provide a link to the existing documentation, if applicable.
|
||||||
|
placeholder: ex. https://lancedb.github.io/lancedb/guides/tables/...
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
31
.github/ISSUE_TEMPLATE/feature.yml
vendored
Normal file
31
.github/ISSUE_TEMPLATE/feature.yml
vendored
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
name: Feature suggestion
|
||||||
|
description: Suggestion a new feature for LanceDB
|
||||||
|
title: "Feature: "
|
||||||
|
labels: [enhancement]
|
||||||
|
body:
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: |
|
||||||
|
Share a new idea for a feature or improvement. Be sure to search existing
|
||||||
|
issues first to avoid duplicates.
|
||||||
|
- type: dropdown
|
||||||
|
id: sdk
|
||||||
|
attributes:
|
||||||
|
label: SDK
|
||||||
|
description: Which SDK are you using? This helps us prioritize.
|
||||||
|
options:
|
||||||
|
- Python
|
||||||
|
- Node
|
||||||
|
- Rust
|
||||||
|
default: 0
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
- type: textarea
|
||||||
|
id: description
|
||||||
|
attributes:
|
||||||
|
label: Description
|
||||||
|
description: |
|
||||||
|
Describe the feature and why it would be useful. If applicable, consider
|
||||||
|
providing a code example of what it might be like to use the feature.
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
13
.github/workflows/python.yml
vendored
13
.github/workflows/python.yml
vendored
@@ -44,12 +44,19 @@ jobs:
|
|||||||
run: pytest -m "not slow" -x -v --durations=30 tests
|
run: pytest -m "not slow" -x -v --durations=30 tests
|
||||||
- name: doctest
|
- name: doctest
|
||||||
run: pytest --doctest-modules lancedb
|
run: pytest --doctest-modules lancedb
|
||||||
mac:
|
platform:
|
||||||
|
name: "Platform: ${{ matrix.config.name }}"
|
||||||
timeout-minutes: 30
|
timeout-minutes: 30
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
mac-runner: [ "macos-13", "macos-13-xlarge" ]
|
config:
|
||||||
runs-on: "${{ matrix.mac-runner }}"
|
- name: x86 Mac
|
||||||
|
runner: macos-13
|
||||||
|
- name: Arm Mac
|
||||||
|
runner: macos-13-xlarge
|
||||||
|
- name: x86 Windows
|
||||||
|
runner: windows-latest
|
||||||
|
runs-on: "${{ matrix.config.runner }}"
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
shell: bash
|
shell: bash
|
||||||
|
|||||||
23
.github/workflows/rust.yml
vendored
23
.github/workflows/rust.yml
vendored
@@ -24,6 +24,29 @@ env:
|
|||||||
RUST_BACKTRACE: "1"
|
RUST_BACKTRACE: "1"
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
lint:
|
||||||
|
timeout-minutes: 30
|
||||||
|
runs-on: ubuntu-22.04
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
shell: bash
|
||||||
|
working-directory: rust
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
lfs: true
|
||||||
|
- uses: Swatinem/rust-cache@v2
|
||||||
|
with:
|
||||||
|
workspaces: rust
|
||||||
|
- name: Install dependencies
|
||||||
|
run: |
|
||||||
|
sudo apt update
|
||||||
|
sudo apt install -y protobuf-compiler libssl-dev
|
||||||
|
- name: Run format
|
||||||
|
run: cargo fmt --all -- --check
|
||||||
|
- name: Run clippy
|
||||||
|
run: cargo clippy --all --all-features -- -D warnings
|
||||||
linux:
|
linux:
|
||||||
timeout-minutes: 30
|
timeout-minutes: 30
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
|
|||||||
26
Cargo.toml
26
Cargo.toml
@@ -5,24 +5,24 @@ exclude = ["python"]
|
|||||||
resolver = "2"
|
resolver = "2"
|
||||||
|
|
||||||
[workspace.dependencies]
|
[workspace.dependencies]
|
||||||
lance = { "version" = "=0.9.0", "features" = ["dynamodb"] }
|
lance = { "version" = "=0.9.1", "features" = ["dynamodb"] }
|
||||||
lance-index = { "version" = "=0.9.0" }
|
lance-index = { "version" = "=0.9.1" }
|
||||||
lance-linalg = { "version" = "=0.9.0" }
|
lance-linalg = { "version" = "=0.9.1" }
|
||||||
lance-testing = { "version" = "=0.9.0" }
|
lance-testing = { "version" = "=0.9.1" }
|
||||||
# Note that this one does not include pyarrow
|
# Note that this one does not include pyarrow
|
||||||
arrow = { version = "47.0.0", optional = false }
|
arrow = { version = "49.0.0", optional = false }
|
||||||
arrow-array = "47.0"
|
arrow-array = "49.0"
|
||||||
arrow-data = "47.0"
|
arrow-data = "49.0"
|
||||||
arrow-ipc = "47.0"
|
arrow-ipc = "49.0"
|
||||||
arrow-ord = "47.0"
|
arrow-ord = "49.0"
|
||||||
arrow-schema = "47.0"
|
arrow-schema = "49.0"
|
||||||
arrow-arith = "47.0"
|
arrow-arith = "49.0"
|
||||||
arrow-cast = "47.0"
|
arrow-cast = "49.0"
|
||||||
chrono = "0.4.23"
|
chrono = "0.4.23"
|
||||||
half = { "version" = "=2.3.1", default-features = false, features = [
|
half = { "version" = "=2.3.1", default-features = false, features = [
|
||||||
"num-traits",
|
"num-traits",
|
||||||
] }
|
] }
|
||||||
log = "0.4"
|
log = "0.4"
|
||||||
object_store = "0.7.1"
|
object_store = "0.8.0"
|
||||||
snafu = "0.7.4"
|
snafu = "0.7.4"
|
||||||
url = "2"
|
url = "2"
|
||||||
|
|||||||
@@ -64,18 +64,26 @@ We'll cover the basics of using LanceDB on your local machine in this section.
|
|||||||
tbl = db.create_table("table_from_df", data=df)
|
tbl = db.create_table("table_from_df", data=df)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
!!! warning
|
||||||
|
|
||||||
|
If the table already exists, LanceDB will raise an error by default.
|
||||||
|
If you want to overwrite the table, you can pass in `mode="overwrite"`
|
||||||
|
to the `createTable` function.
|
||||||
|
|
||||||
=== "Javascript"
|
=== "Javascript"
|
||||||
```javascript
|
```javascript
|
||||||
const tb = await db.createTable("my_table",
|
const tb = await db.createTable(
|
||||||
data=[{"vector": [3.1, 4.1], "item": "foo", "price": 10.0},
|
"myTable",
|
||||||
|
[{"vector": [3.1, 4.1], "item": "foo", "price": 10.0},
|
||||||
{"vector": [5.9, 26.5], "item": "bar", "price": 20.0}])
|
{"vector": [5.9, 26.5], "item": "bar", "price": 20.0}])
|
||||||
```
|
```
|
||||||
|
|
||||||
!!! warning
|
!!! warning
|
||||||
|
|
||||||
If the table already exists, LanceDB will raise an error by default.
|
If the table already exists, LanceDB will raise an error by default.
|
||||||
If you want to overwrite the table, you can pass in `mode="overwrite"`
|
If you want to overwrite the table, you can pass in `"overwrite"`
|
||||||
to the `createTable` function.
|
to the `createTable` function like this: `await con.createTable(tableName, data, { writeMode: WriteMode.Overwrite })`
|
||||||
|
|
||||||
|
|
||||||
??? info "Under the hood, LanceDB is converting the input data into an Apache Arrow table and persisting it to disk in [Lance format](https://www.github.com/lancedb/lance)."
|
??? info "Under the hood, LanceDB is converting the input data into an Apache Arrow table and persisting it to disk in [Lance format](https://www.github.com/lancedb/lance)."
|
||||||
|
|
||||||
@@ -108,7 +116,7 @@ Once created, you can open a table using the following code:
|
|||||||
|
|
||||||
=== "Javascript"
|
=== "Javascript"
|
||||||
```javascript
|
```javascript
|
||||||
const tbl = await db.openTable("my_table");
|
const tbl = await db.openTable("myTable");
|
||||||
```
|
```
|
||||||
|
|
||||||
If you forget the name of your table, you can always get a listing of all table names:
|
If you forget the name of your table, you can always get a listing of all table names:
|
||||||
@@ -198,6 +206,13 @@ This permanently removes the table and is not recoverable, unlike deleting rows.
|
|||||||
By default, if the table does not exist an exception is raised. To suppress this,
|
By default, if the table does not exist an exception is raised. To suppress this,
|
||||||
you can pass in `ignore_missing=True`.
|
you can pass in `ignore_missing=True`.
|
||||||
|
|
||||||
|
=== "JavaScript"
|
||||||
|
```javascript
|
||||||
|
await db.dropTable('myTable')
|
||||||
|
```
|
||||||
|
|
||||||
|
This permanently removes the table and is not recoverable, unlike deleting rows.
|
||||||
|
If the table does not exist an exception is raised.
|
||||||
|
|
||||||
## What's next
|
## What's next
|
||||||
|
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
There are various Embedding functions available out of the box with lancedb. We're working on supporting other popular embedding APIs.
|
There are various Embedding functions available out of the box with LanceDB. We're working on supporting other popular embedding APIs.
|
||||||
|
|
||||||
## Text Embedding Functions
|
## Text Embedding Functions
|
||||||
Here are the text embedding functions registered by default.
|
Here are the text embedding functions registered by default.
|
||||||
Embedding functions have inbuilt rate limit handler wrapper for source and query embedding function calls that retry with exponential standoff.
|
Embedding functions have an inbuilt rate limit handler wrapper for source and query embedding function calls that retry with exponential standoff.
|
||||||
Each `EmbeddingFunction` implementation automatically takes `max_retries` as an argument which has the deafult value of 7.
|
Each `EmbeddingFunction` implementation automatically takes `max_retries` as an argument which has the default value of 7.
|
||||||
|
|
||||||
### Sentence Transformers
|
### Sentence Transformers
|
||||||
Here are the parameters that you can set when registering a `sentence-transformers` object, and their default values:
|
Here are the parameters that you can set when registering a `sentence-transformers` object, and their default values:
|
||||||
@@ -69,15 +69,15 @@ print(actual.text)
|
|||||||
```
|
```
|
||||||
|
|
||||||
### Instructor Embeddings
|
### Instructor Embeddings
|
||||||
Instructor is an instruction-finetuned text embedding model that can generate text embeddings tailored to any task (e.g., classification, retrieval, clustering, text evaluation, etc.) and domains (e.g., science, finance, etc.) by simply providing the task instruction, without any finetuning
|
Instructor is an instruction-finetuned text embedding model that can generate text embeddings tailored to any task (e.g. classification, retrieval, clustering, text evaluation, etc.) and domains (e.g. science, finance, etc.) by simply providing the task instruction, without any finetuning.
|
||||||
|
|
||||||
If you want to calculate customized embeddings for specific sentences, you may follow the unified template to write instructions:
|
If you want to calculate customized embeddings for specific sentences, you may follow the unified template to write instructions:
|
||||||
|
|
||||||
Represent the `domain` `text_type` for `task_objective`:
|
Represent the `domain` `text_type` for `task_objective`:
|
||||||
|
|
||||||
* `domain` is optional, and it specifies the domain of the text, e.g., science, finance, medicine, etc.
|
* `domain` is optional, and it specifies the domain of the text, e.g. science, finance, medicine, etc.
|
||||||
* `text_type` is required, and it specifies the encoding unit, e.g., sentence, document, paragraph, etc.
|
* `text_type` is required, and it specifies the encoding unit, e.g. sentence, document, paragraph, etc.
|
||||||
* `task_objective` is optional, and it specifies the objective of embedding, e.g., retrieve a document, classify the sentence, etc.
|
* `task_objective` is optional, and it specifies the objective of embedding, e.g. retrieve a document, classify the sentence, etc.
|
||||||
|
|
||||||
More information about the model can be found here - https://github.com/xlang-ai/instructor-embedding
|
More information about the model can be found here - https://github.com/xlang-ai/instructor-embedding
|
||||||
|
|
||||||
@@ -119,10 +119,10 @@ tbl.add(texts)
|
|||||||
```
|
```
|
||||||
|
|
||||||
## Multi-modal embedding functions
|
## Multi-modal embedding functions
|
||||||
Multi-modal embedding functions allow you query your table using both images and text.
|
Multi-modal embedding functions allow you to query your table using both images and text.
|
||||||
|
|
||||||
### OpenClipEmbeddings
|
### OpenClipEmbeddings
|
||||||
We support CLIP model embeddings using the open souce alternbative, open-clip which support various customizations. It is registered as `open-clip` and supports following customizations.
|
We support CLIP model embeddings using the open source alternative, open-clip which supports various customizations. It is registered as `open-clip` and supports the following customizations:
|
||||||
|
|
||||||
|
|
||||||
| Parameter | Type | Default Value | Description |
|
| Parameter | Type | Default Value | Description |
|
||||||
|
|||||||
@@ -203,7 +203,7 @@ This guide will show how to create tables, insert data into them, and update the
|
|||||||
```javascript
|
```javascript
|
||||||
data
|
data
|
||||||
const tb = await db.createTable("my_table",
|
const tb = await db.createTable("my_table",
|
||||||
data=[{"vector": [3.1, 4.1], "item": "foo", "price": 10.0},
|
[{"vector": [3.1, 4.1], "item": "foo", "price": 10.0},
|
||||||
{"vector": [5.9, 26.5], "item": "bar", "price": 20.0}])
|
{"vector": [5.9, 26.5], "item": "bar", "price": 20.0}])
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
@@ -11,8 +11,13 @@ npm install vectordb
|
|||||||
```
|
```
|
||||||
|
|
||||||
This will download the appropriate native library for your platform. We currently
|
This will download the appropriate native library for your platform. We currently
|
||||||
support x86_64 Linux, aarch64 Linux, Intel MacOS, and ARM (M1/M2) MacOS. We do not
|
support:
|
||||||
yet support musl-based Linux (such as Alpine Linux).
|
|
||||||
|
* Linux (x86_64 and aarch64)
|
||||||
|
* MacOS (Intel and ARM/M1/M2)
|
||||||
|
* Windows (x86_64 only)
|
||||||
|
|
||||||
|
We do not yet support musl-based Linux (such as Alpine Linux) or aarch64 Windows.
|
||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
|
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ import numpy as np
|
|||||||
uri = "data/sample-lancedb"
|
uri = "data/sample-lancedb"
|
||||||
db = lancedb.connect(uri)
|
db = lancedb.connect(uri)
|
||||||
|
|
||||||
data = [{"vector": row, "item": f"item {i}"}
|
data = [{"vector": row, "item": f"item {i}", "id": i}
|
||||||
for i, row in enumerate(np.random.random((10_000, 2)).astype('int'))]
|
for i, row in enumerate(np.random.random((10_000, 2)).astype('int'))]
|
||||||
|
|
||||||
tbl = db.create_table("my_vectors", data=data)
|
tbl = db.create_table("my_vectors", data=data)
|
||||||
@@ -35,33 +35,25 @@ const db = await vectordb.connect('data/sample-lancedb')
|
|||||||
|
|
||||||
let data = []
|
let data = []
|
||||||
for (let i = 0; i < 10_000; i++) {
|
for (let i = 0; i < 10_000; i++) {
|
||||||
data.push({vector: Array(1536).fill(i), id: `${i}`, content: "", longId: `${i}`},)
|
data.push({vector: Array(1536).fill(i), id: i, item: `item ${i}`, strId: `${i}`})
|
||||||
}
|
}
|
||||||
const tbl = await db.createTable('my_vectors', data)
|
const tbl = await db.createTable('myVectors', data)
|
||||||
```
|
```
|
||||||
-->
|
-->
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
tbl.search([100, 102]) \
|
tbl.search([100, 102]) \
|
||||||
.where("""(
|
.where("(item IN ('item 0', 'item 2')) AND (id > 10)") \
|
||||||
(label IN [10, 20])
|
.to_arrow()
|
||||||
AND
|
|
||||||
(note.email IS NOT NULL)
|
|
||||||
) OR NOT note.created
|
|
||||||
""")
|
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
=== "Javascript"
|
=== "Javascript"
|
||||||
|
|
||||||
```javascript
|
```javascript
|
||||||
tbl.search([100, 102])
|
await tbl.search(Array(1536).fill(0))
|
||||||
.where(`(
|
.where("(item IN ('item 0', 'item 2')) AND (id > 10)")
|
||||||
(label IN [10, 20])
|
.execute()
|
||||||
AND
|
|
||||||
(note.email IS NOT NULL)
|
|
||||||
) OR NOT note.created
|
|
||||||
`)
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
@@ -118,3 +110,22 @@ The mapping from SQL types to Arrow types is:
|
|||||||
|
|
||||||
[^1]: See precision mapping in previous table.
|
[^1]: See precision mapping in previous table.
|
||||||
|
|
||||||
|
|
||||||
|
## Filtering without Vector Search
|
||||||
|
|
||||||
|
You can also filter your data without search.
|
||||||
|
|
||||||
|
=== "Python"
|
||||||
|
```python
|
||||||
|
tbl.search().where("id=10").limit(10).to_arrow()
|
||||||
|
```
|
||||||
|
|
||||||
|
=== "JavaScript"
|
||||||
|
```javascript
|
||||||
|
await tbl.where('id=10').limit(10).execute()
|
||||||
|
```
|
||||||
|
|
||||||
|
!!! warning
|
||||||
|
If your table is large, this could potentially return a very large
|
||||||
|
amount of data. Please be sure to use a `limit` clause unless
|
||||||
|
you're sure you want to return the whole result set.
|
||||||
|
|||||||
@@ -9,8 +9,13 @@ npm install vectordb
|
|||||||
```
|
```
|
||||||
|
|
||||||
This will download the appropriate native library for your platform. We currently
|
This will download the appropriate native library for your platform. We currently
|
||||||
support x86_64 Linux, aarch64 Linux, Intel MacOS, and ARM (M1/M2) MacOS. We do not
|
support:
|
||||||
yet support musl-based Linux (such as Alpine Linux).
|
|
||||||
|
* Linux (x86_64 and aarch64)
|
||||||
|
* MacOS (Intel and ARM/M1/M2)
|
||||||
|
* Windows (x86_64 only)
|
||||||
|
|
||||||
|
We do not yet support musl-based Linux (such as Alpine Linux) or aarch64 Windows.
|
||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "vectordb",
|
"name": "vectordb",
|
||||||
"version": "0.4.0",
|
"version": "0.4.1",
|
||||||
"description": " Serverless, low-latency vector database for AI applications",
|
"description": " Serverless, low-latency vector database for AI applications",
|
||||||
"main": "dist/index.js",
|
"main": "dist/index.js",
|
||||||
"types": "dist/index.d.ts",
|
"types": "dist/index.d.ts",
|
||||||
@@ -81,10 +81,10 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"optionalDependencies": {
|
"optionalDependencies": {
|
||||||
"@lancedb/vectordb-darwin-arm64": "0.4.0",
|
"@lancedb/vectordb-darwin-arm64": "0.4.1",
|
||||||
"@lancedb/vectordb-darwin-x64": "0.4.0",
|
"@lancedb/vectordb-darwin-x64": "0.4.1",
|
||||||
"@lancedb/vectordb-linux-arm64-gnu": "0.4.0",
|
"@lancedb/vectordb-linux-arm64-gnu": "0.4.1",
|
||||||
"@lancedb/vectordb-linux-x64-gnu": "0.4.0",
|
"@lancedb/vectordb-linux-x64-gnu": "0.4.1",
|
||||||
"@lancedb/vectordb-win32-x64-msvc": "0.4.0"
|
"@lancedb/vectordb-win32-x64-msvc": "0.4.1"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ import { isEmbeddingFunction } from './embedding/embedding_function'
|
|||||||
import { type Literal, toSQL } from './util'
|
import { type Literal, toSQL } from './util'
|
||||||
|
|
||||||
// eslint-disable-next-line @typescript-eslint/no-var-requires
|
// eslint-disable-next-line @typescript-eslint/no-var-requires
|
||||||
const { databaseNew, databaseTableNames, databaseOpenTable, databaseDropTable, tableCreate, tableAdd, tableCreateVectorIndex, tableCountRows, tableDelete, tableUpdate, tableCleanupOldVersions, tableCompactFiles, tableListIndices, tableIndexStats } = require('../native.js')
|
const { databaseNew, databaseTableNames, databaseOpenTable, databaseDropTable, tableCreate, tableAdd, tableCreateScalarIndex, tableCreateVectorIndex, tableCountRows, tableDelete, tableUpdate, tableCleanupOldVersions, tableCompactFiles, tableListIndices, tableIndexStats } = require('../native.js')
|
||||||
|
|
||||||
export { Query }
|
export { Query }
|
||||||
export type { EmbeddingFunction }
|
export type { EmbeddingFunction }
|
||||||
@@ -223,6 +223,56 @@ export interface Table<T = number[]> {
|
|||||||
*/
|
*/
|
||||||
createIndex: (indexParams: VectorIndexParams) => Promise<any>
|
createIndex: (indexParams: VectorIndexParams) => Promise<any>
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create a scalar index on this Table for the given column
|
||||||
|
*
|
||||||
|
* @param column The column to index
|
||||||
|
* @param replace If false, fail if an index already exists on the column
|
||||||
|
*
|
||||||
|
* Scalar indices, like vector indices, can be used to speed up scans. A scalar
|
||||||
|
* index can speed up scans that contain filter expressions on the indexed column.
|
||||||
|
* For example, the following scan will be faster if the column `my_col` has
|
||||||
|
* a scalar index:
|
||||||
|
*
|
||||||
|
* ```ts
|
||||||
|
* const con = await lancedb.connect('./.lancedb');
|
||||||
|
* const table = await con.openTable('images');
|
||||||
|
* const results = await table.where('my_col = 7').execute();
|
||||||
|
* ```
|
||||||
|
*
|
||||||
|
* Scalar indices can also speed up scans containing a vector search and a
|
||||||
|
* prefilter:
|
||||||
|
*
|
||||||
|
* ```ts
|
||||||
|
* const con = await lancedb.connect('././lancedb');
|
||||||
|
* const table = await con.openTable('images');
|
||||||
|
* const results = await table.search([1.0, 2.0]).where('my_col != 7').prefilter(true);
|
||||||
|
* ```
|
||||||
|
*
|
||||||
|
* Scalar indices can only speed up scans for basic filters using
|
||||||
|
* equality, comparison, range (e.g. `my_col BETWEEN 0 AND 100`), and set
|
||||||
|
* membership (e.g. `my_col IN (0, 1, 2)`)
|
||||||
|
*
|
||||||
|
* Scalar indices can be used if the filter contains multiple indexed columns and
|
||||||
|
* the filter criteria are AND'd or OR'd together
|
||||||
|
* (e.g. `my_col < 0 AND other_col> 100`)
|
||||||
|
*
|
||||||
|
* Scalar indices may be used if the filter contains non-indexed columns but,
|
||||||
|
* depending on the structure of the filter, they may not be usable. For example,
|
||||||
|
* if the column `not_indexed` does not have a scalar index then the filter
|
||||||
|
* `my_col = 0 OR not_indexed = 1` will not be able to use any scalar index on
|
||||||
|
* `my_col`.
|
||||||
|
*
|
||||||
|
* @examples
|
||||||
|
*
|
||||||
|
* ```ts
|
||||||
|
* const con = await lancedb.connect('././lancedb')
|
||||||
|
* const table = await con.openTable('images')
|
||||||
|
* await table.createScalarIndex('my_col')
|
||||||
|
* ```
|
||||||
|
*/
|
||||||
|
createScalarIndex: (column: string, replace: boolean) => Promise<void>
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns the number of rows in this table.
|
* Returns the number of rows in this table.
|
||||||
*/
|
*/
|
||||||
@@ -281,8 +331,8 @@ export interface Table<T = number[]> {
|
|||||||
* const tbl = await con.createTable("my_table", data)
|
* const tbl = await con.createTable("my_table", data)
|
||||||
*
|
*
|
||||||
* await tbl.update({
|
* await tbl.update({
|
||||||
* filter: "id = 2",
|
* where: "id = 2",
|
||||||
* updates: { vector: [2, 2], name: "Michael" },
|
* values: { vector: [2, 2], name: "Michael" },
|
||||||
* })
|
* })
|
||||||
*
|
*
|
||||||
* let results = await tbl.search([1, 1]).execute();
|
* let results = await tbl.search([1, 1]).execute();
|
||||||
@@ -537,6 +587,10 @@ export class LocalTable<T = number[]> implements Table<T> {
|
|||||||
return tableCreateVectorIndex.call(this._tbl, indexParams).then((newTable: any) => { this._tbl = newTable })
|
return tableCreateVectorIndex.call(this._tbl, indexParams).then((newTable: any) => { this._tbl = newTable })
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async createScalarIndex (column: string, replace: boolean): Promise<void> {
|
||||||
|
return tableCreateScalarIndex.call(this._tbl, column, replace)
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns the number of rows in this table.
|
* Returns the number of rows in this table.
|
||||||
*/
|
*/
|
||||||
|
|||||||
@@ -57,8 +57,8 @@ export class RemoteConnection implements Connection {
|
|||||||
return 'db://' + this._client.uri
|
return 'db://' + this._client.uri
|
||||||
}
|
}
|
||||||
|
|
||||||
async tableNames (): Promise<string[]> {
|
async tableNames (pageToken: string = '', limit: number = 10): Promise<string[]> {
|
||||||
const response = await this._client.get('/v1/table/')
|
const response = await this._client.get('/v1/table/', { limit, page_token: pageToken })
|
||||||
return response.data.tables
|
return response.data.tables
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -283,6 +283,10 @@ export class RemoteTable<T = number[]> implements Table<T> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async createScalarIndex (column: string, replace: boolean): Promise<void> {
|
||||||
|
throw new Error('Not implemented')
|
||||||
|
}
|
||||||
|
|
||||||
async countRows (): Promise<number> {
|
async countRows (): Promise<number> {
|
||||||
const result = await this._client.post(`/v1/table/${this._name}/describe/`)
|
const result = await this._client.post(`/v1/table/${this._name}/describe/`)
|
||||||
return result.data?.stats?.num_rows
|
return result.data?.stats?.num_rows
|
||||||
|
|||||||
@@ -135,6 +135,17 @@ describe('LanceDB client', function () {
|
|||||||
assert.isTrue(results.length === 10)
|
assert.isTrue(results.length === 10)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
it('should allow creation and use of scalar indices', async function () {
|
||||||
|
const uri = await createTestDB(16, 300)
|
||||||
|
const con = await lancedb.connect(uri)
|
||||||
|
const table = await con.openTable('vectors')
|
||||||
|
await table.createScalarIndex('id', true)
|
||||||
|
|
||||||
|
// Prefiltering should still work the same
|
||||||
|
const results = await table.search(new Array(16).fill(0.1)).limit(10).filter('id >= 10').prefilter(true).execute()
|
||||||
|
assert.isTrue(results.length === 10)
|
||||||
|
})
|
||||||
|
|
||||||
it('select only a subset of columns', async function () {
|
it('select only a subset of columns', async function () {
|
||||||
const uri = await createTestDB()
|
const uri = await createTestDB()
|
||||||
const con = await lancedb.connect(uri)
|
const con = await lancedb.connect(uri)
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
[bumpversion]
|
[bumpversion]
|
||||||
current_version = 0.4.0
|
current_version = 0.4.1
|
||||||
commit = True
|
commit = True
|
||||||
message = [python] Bump version: {current_version} → {new_version}
|
message = [python] Bump version: {current_version} → {new_version}
|
||||||
tag = True
|
tag = True
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ from overrides import EnforceOverrides, override
|
|||||||
from pyarrow import fs
|
from pyarrow import fs
|
||||||
|
|
||||||
from .table import LanceTable, Table
|
from .table import LanceTable, Table
|
||||||
from .util import fs_from_uri, get_uri_location, get_uri_scheme
|
from .util import fs_from_uri, get_uri_location, get_uri_scheme, join_uri
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from .common import DATA, URI
|
from .common import DATA, URI
|
||||||
@@ -288,14 +288,13 @@ class LanceDBConnection(DBConnection):
|
|||||||
A list of table names.
|
A list of table names.
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
filesystem, path = fs_from_uri(self.uri)
|
filesystem = fs_from_uri(self.uri)[0]
|
||||||
except pa.ArrowInvalid:
|
except pa.ArrowInvalid:
|
||||||
raise NotImplementedError("Unsupported scheme: " + self.uri)
|
raise NotImplementedError("Unsupported scheme: " + self.uri)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
paths = filesystem.get_file_info(
|
loc = get_uri_location(self.uri)
|
||||||
fs.FileSelector(get_uri_location(self.uri))
|
paths = filesystem.get_file_info(fs.FileSelector(loc))
|
||||||
)
|
|
||||||
except FileNotFoundError:
|
except FileNotFoundError:
|
||||||
# It is ok if the file does not exist since it will be created
|
# It is ok if the file does not exist since it will be created
|
||||||
paths = []
|
paths = []
|
||||||
@@ -373,7 +372,7 @@ class LanceDBConnection(DBConnection):
|
|||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
filesystem, path = fs_from_uri(self.uri)
|
filesystem, path = fs_from_uri(self.uri)
|
||||||
table_path = os.path.join(path, name + ".lance")
|
table_path = join_uri(path, name + ".lance")
|
||||||
filesystem.delete_dir(table_path)
|
filesystem.delete_dir(table_path)
|
||||||
except FileNotFoundError:
|
except FileNotFoundError:
|
||||||
if not ignore_missing:
|
if not ignore_missing:
|
||||||
|
|||||||
@@ -64,6 +64,12 @@ class RemoteTable(Table):
|
|||||||
"""to_pandas() is not supported on the LanceDB cloud"""
|
"""to_pandas() is not supported on the LanceDB cloud"""
|
||||||
return NotImplementedError("to_pandas() is not supported on the LanceDB cloud")
|
return NotImplementedError("to_pandas() is not supported on the LanceDB cloud")
|
||||||
|
|
||||||
|
def create_scalar_index(self, *args, **kwargs):
|
||||||
|
"""Creates a scalar index"""
|
||||||
|
return NotImplementedError(
|
||||||
|
"create_scalar_index() is not supported on the LanceDB cloud"
|
||||||
|
)
|
||||||
|
|
||||||
def create_index(
|
def create_index(
|
||||||
self,
|
self,
|
||||||
metric="L2",
|
metric="L2",
|
||||||
|
|||||||
@@ -23,6 +23,7 @@ import lance
|
|||||||
import numpy as np
|
import numpy as np
|
||||||
import pyarrow as pa
|
import pyarrow as pa
|
||||||
import pyarrow.compute as pc
|
import pyarrow.compute as pc
|
||||||
|
import pyarrow.fs as pa_fs
|
||||||
from lance import LanceDataset
|
from lance import LanceDataset
|
||||||
from lance.vector import vec_to_table
|
from lance.vector import vec_to_table
|
||||||
|
|
||||||
@@ -30,7 +31,7 @@ from .common import DATA, VEC, VECTOR_COLUMN_NAME
|
|||||||
from .embeddings import EmbeddingFunctionConfig, EmbeddingFunctionRegistry
|
from .embeddings import EmbeddingFunctionConfig, EmbeddingFunctionRegistry
|
||||||
from .pydantic import LanceModel, model_to_dict
|
from .pydantic import LanceModel, model_to_dict
|
||||||
from .query import LanceQueryBuilder, Query
|
from .query import LanceQueryBuilder, Query
|
||||||
from .util import fs_from_uri, safe_import_pandas, value_to_sql
|
from .util import fs_from_uri, safe_import_pandas, value_to_sql, join_uri
|
||||||
from .utils.events import register_event
|
from .utils.events import register_event
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
@@ -220,6 +221,77 @@ class Table(ABC):
|
|||||||
"""
|
"""
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def create_scalar_index(
|
||||||
|
self,
|
||||||
|
column: str,
|
||||||
|
*,
|
||||||
|
replace: bool = True,
|
||||||
|
):
|
||||||
|
"""Create a scalar index on a column.
|
||||||
|
|
||||||
|
Scalar indices, like vector indices, can be used to speed up scans. A scalar
|
||||||
|
index can speed up scans that contain filter expressions on the indexed column.
|
||||||
|
For example, the following scan will be faster if the column ``my_col`` has
|
||||||
|
a scalar index:
|
||||||
|
|
||||||
|
.. code-block:: python
|
||||||
|
|
||||||
|
import lancedb
|
||||||
|
|
||||||
|
db = lancedb.connect("/data/lance")
|
||||||
|
img_table = db.open_table("images")
|
||||||
|
my_df = img_table.search().where("my_col = 7", prefilter=True).to_pandas()
|
||||||
|
|
||||||
|
Scalar indices can also speed up scans containing a vector search and a
|
||||||
|
prefilter:
|
||||||
|
|
||||||
|
.. code-block::python
|
||||||
|
|
||||||
|
import lancedb
|
||||||
|
|
||||||
|
db = lancedb.connect("/data/lance")
|
||||||
|
img_table = db.open_table("images")
|
||||||
|
img_table.search([1, 2, 3, 4], vector_column_name="vector")
|
||||||
|
.where("my_col != 7", prefilter=True)
|
||||||
|
.to_pandas()
|
||||||
|
|
||||||
|
Scalar indices can only speed up scans for basic filters using
|
||||||
|
equality, comparison, range (e.g. ``my_col BETWEEN 0 AND 100``), and set
|
||||||
|
membership (e.g. `my_col IN (0, 1, 2)`)
|
||||||
|
|
||||||
|
Scalar indices can be used if the filter contains multiple indexed columns and
|
||||||
|
the filter criteria are AND'd or OR'd together
|
||||||
|
(e.g. ``my_col < 0 AND other_col> 100``)
|
||||||
|
|
||||||
|
Scalar indices may be used if the filter contains non-indexed columns but,
|
||||||
|
depending on the structure of the filter, they may not be usable. For example,
|
||||||
|
if the column ``not_indexed`` does not have a scalar index then the filter
|
||||||
|
``my_col = 0 OR not_indexed = 1`` will not be able to use any scalar index on
|
||||||
|
``my_col``.
|
||||||
|
|
||||||
|
**Experimental API**
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
column : str
|
||||||
|
The column to be indexed. Must be a boolean, integer, float,
|
||||||
|
or string column.
|
||||||
|
replace : bool, default True
|
||||||
|
Replace the existing index if it exists.
|
||||||
|
|
||||||
|
Examples
|
||||||
|
--------
|
||||||
|
|
||||||
|
.. code-block:: python
|
||||||
|
|
||||||
|
import lance
|
||||||
|
|
||||||
|
dataset = lance.dataset("/tmp/images.lance")
|
||||||
|
dataset.create_scalar_index("category")
|
||||||
|
"""
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def add(
|
def add(
|
||||||
self,
|
self,
|
||||||
@@ -439,6 +511,7 @@ class Table(ABC):
|
|||||||
"""
|
"""
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
|
|
||||||
class LanceTable(Table):
|
class LanceTable(Table):
|
||||||
"""
|
"""
|
||||||
A table in a LanceDB database.
|
A table in a LanceDB database.
|
||||||
@@ -606,7 +679,7 @@ class LanceTable(Table):
|
|||||||
|
|
||||||
@property
|
@property
|
||||||
def _dataset_uri(self) -> str:
|
def _dataset_uri(self) -> str:
|
||||||
return os.path.join(self._conn.uri, f"{self.name}.lance")
|
return join_uri(self._conn.uri, f"{self.name}.lance")
|
||||||
|
|
||||||
def create_index(
|
def create_index(
|
||||||
self,
|
self,
|
||||||
@@ -632,7 +705,12 @@ class LanceTable(Table):
|
|||||||
self._reset_dataset()
|
self._reset_dataset()
|
||||||
register_event("create_index")
|
register_event("create_index")
|
||||||
|
|
||||||
def create_fts_index(self, field_names: Union[str, List[str]]):
|
def create_scalar_index(self, column: str, *, replace: bool = True):
|
||||||
|
self._dataset.create_scalar_index(column, index_type="BTREE", replace=replace)
|
||||||
|
|
||||||
|
def create_fts_index(
|
||||||
|
self, field_names: Union[str, List[str]], *, replace: bool = False
|
||||||
|
):
|
||||||
"""Create a full-text search index on the table.
|
"""Create a full-text search index on the table.
|
||||||
|
|
||||||
Warning - this API is highly experimental and is highly likely to change
|
Warning - this API is highly experimental and is highly likely to change
|
||||||
@@ -642,17 +720,31 @@ class LanceTable(Table):
|
|||||||
----------
|
----------
|
||||||
field_names: str or list of str
|
field_names: str or list of str
|
||||||
The name(s) of the field to index.
|
The name(s) of the field to index.
|
||||||
|
replace: bool, default False
|
||||||
|
If True, replace the existing index if it exists. Note that this is
|
||||||
|
not yet an atomic operation; the index will be temporarily
|
||||||
|
unavailable while the new index is being created.
|
||||||
"""
|
"""
|
||||||
from .fts import create_index, populate_index
|
from .fts import create_index, populate_index
|
||||||
|
|
||||||
if isinstance(field_names, str):
|
if isinstance(field_names, str):
|
||||||
field_names = [field_names]
|
field_names = [field_names]
|
||||||
|
|
||||||
|
fs, path = fs_from_uri(self._get_fts_index_path())
|
||||||
|
index_exists = fs.get_file_info(path).type != pa_fs.FileType.NotFound
|
||||||
|
if index_exists:
|
||||||
|
if not replace:
|
||||||
|
raise ValueError(
|
||||||
|
f"Index already exists. Use replace=True to overwrite."
|
||||||
|
)
|
||||||
|
fs.delete_dir(path)
|
||||||
|
|
||||||
index = create_index(self._get_fts_index_path(), field_names)
|
index = create_index(self._get_fts_index_path(), field_names)
|
||||||
populate_index(index, self, field_names)
|
populate_index(index, self, field_names)
|
||||||
register_event("create_fts_index")
|
register_event("create_fts_index")
|
||||||
|
|
||||||
def _get_fts_index_path(self):
|
def _get_fts_index_path(self):
|
||||||
return os.path.join(self._dataset_uri, "_indices", "tantivy")
|
return join_uri(self._dataset_uri, "_indices", "tantivy")
|
||||||
|
|
||||||
@cached_property
|
@cached_property
|
||||||
def _dataset(self) -> LanceDataset:
|
def _dataset(self) -> LanceDataset:
|
||||||
|
|||||||
@@ -14,7 +14,8 @@
|
|||||||
import os
|
import os
|
||||||
from datetime import date, datetime
|
from datetime import date, datetime
|
||||||
from functools import singledispatch
|
from functools import singledispatch
|
||||||
from typing import Tuple
|
import pathlib
|
||||||
|
from typing import Tuple, Union
|
||||||
from urllib.parse import urlparse
|
from urllib.parse import urlparse
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
@@ -62,6 +63,12 @@ def get_uri_location(uri: str) -> str:
|
|||||||
str: Location part of the URL, without scheme
|
str: Location part of the URL, without scheme
|
||||||
"""
|
"""
|
||||||
parsed = urlparse(uri)
|
parsed = urlparse(uri)
|
||||||
|
if len(parsed.scheme) == 1:
|
||||||
|
# Windows drive names are parsed as the scheme
|
||||||
|
# e.g. "c:\path" -> ParseResult(scheme="c", netloc="", path="/path", ...)
|
||||||
|
# So we add special handling here for schemes that are a single character
|
||||||
|
return uri
|
||||||
|
|
||||||
if not parsed.netloc:
|
if not parsed.netloc:
|
||||||
return parsed.path
|
return parsed.path
|
||||||
else:
|
else:
|
||||||
@@ -84,6 +91,29 @@ def fs_from_uri(uri: str) -> Tuple[pa_fs.FileSystem, str]:
|
|||||||
return pa_fs.FileSystem.from_uri(uri)
|
return pa_fs.FileSystem.from_uri(uri)
|
||||||
|
|
||||||
|
|
||||||
|
def join_uri(base: Union[str, pathlib.Path], *parts: str) -> str:
|
||||||
|
"""
|
||||||
|
Join a URI with multiple parts, handles both local and remote paths
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
base : str
|
||||||
|
The base URI
|
||||||
|
parts : str
|
||||||
|
The parts to join to the base URI, each separated by the
|
||||||
|
appropriate path separator for the URI scheme and OS
|
||||||
|
"""
|
||||||
|
if isinstance(base, pathlib.Path):
|
||||||
|
return base.joinpath(*parts)
|
||||||
|
base = str(base)
|
||||||
|
if get_uri_scheme(base) == "file":
|
||||||
|
# using pathlib for local paths make this windows compatible
|
||||||
|
# `get_uri_scheme` returns `file` for windows drive names (e.g. `c:\path`)
|
||||||
|
return str(pathlib.Path(base, *parts))
|
||||||
|
# for remote paths, just use os.path.join
|
||||||
|
return "/".join([p.rstrip("/") for p in [base, *parts]])
|
||||||
|
|
||||||
|
|
||||||
def safe_import_pandas():
|
def safe_import_pandas():
|
||||||
try:
|
try:
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
[project]
|
[project]
|
||||||
name = "lancedb"
|
name = "lancedb"
|
||||||
version = "0.4.0"
|
version = "0.4.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"deprecation",
|
"deprecation",
|
||||||
"pylance==0.9.0",
|
"pylance==0.9.1",
|
||||||
"ratelimiter~=1.0",
|
"ratelimiter~=1.0",
|
||||||
"retry>=0.9.2",
|
"retry>=0.9.2",
|
||||||
"tqdm>=4.27.0",
|
"tqdm>=4.27.0",
|
||||||
|
|||||||
@@ -83,6 +83,24 @@ def test_create_index_from_table(tmp_path, table):
|
|||||||
assert len(df) == 10
|
assert len(df) == 10
|
||||||
assert "text" in df.columns
|
assert "text" in df.columns
|
||||||
|
|
||||||
|
# Check whether it can be updated
|
||||||
|
table.add(
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"vector": np.random.randn(128),
|
||||||
|
"text": "gorilla",
|
||||||
|
"text2": "gorilla",
|
||||||
|
"nested": {"text": "gorilla"},
|
||||||
|
}
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
with pytest.raises(ValueError, match="already exists"):
|
||||||
|
table.create_fts_index("text")
|
||||||
|
|
||||||
|
table.create_fts_index("text", replace=True)
|
||||||
|
assert len(table.search("gorilla").limit(1).to_pandas()) == 1
|
||||||
|
|
||||||
|
|
||||||
def test_create_index_multiple_columns(tmp_path, table):
|
def test_create_index_multiple_columns(tmp_path, table):
|
||||||
table.create_fts_index(["text", "text2"])
|
table.create_fts_index(["text", "text2"])
|
||||||
|
|||||||
@@ -21,8 +21,8 @@ import lance
|
|||||||
import numpy as np
|
import numpy as np
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
import pyarrow as pa
|
import pyarrow as pa
|
||||||
from pydantic import BaseModel
|
|
||||||
import pytest
|
import pytest
|
||||||
|
from pydantic import BaseModel
|
||||||
|
|
||||||
from lancedb.conftest import MockTextEmbeddingFunction
|
from lancedb.conftest import MockTextEmbeddingFunction
|
||||||
from lancedb.db import LanceDBConnection
|
from lancedb.db import LanceDBConnection
|
||||||
@@ -532,6 +532,33 @@ def test_multiple_vector_columns(db):
|
|||||||
assert result1["text"].iloc[0] != result2["text"].iloc[0]
|
assert result1["text"].iloc[0] != result2["text"].iloc[0]
|
||||||
|
|
||||||
|
|
||||||
|
def test_create_scalar_index(db):
|
||||||
|
vec_array = pa.array(
|
||||||
|
[[1, 1], [2, 2], [3, 3], [4, 4], [5, 5]], pa.list_(pa.float32(), 2)
|
||||||
|
)
|
||||||
|
test_data = pa.Table.from_pydict(
|
||||||
|
{"x": ["c", "b", "a", "e", "b"], "y": [1, 2, 3, 4, 5], "vector": vec_array}
|
||||||
|
)
|
||||||
|
table = LanceTable.create(
|
||||||
|
db,
|
||||||
|
"my_table",
|
||||||
|
data=test_data,
|
||||||
|
)
|
||||||
|
table.create_scalar_index("x")
|
||||||
|
indices = table.to_lance().list_indices()
|
||||||
|
assert len(indices) == 1
|
||||||
|
scalar_index = indices[0]
|
||||||
|
assert scalar_index["type"] == "Scalar"
|
||||||
|
|
||||||
|
# Confirm that prefiltering still works with the scalar index column
|
||||||
|
results = table.search().where("x = 'c'").to_arrow()
|
||||||
|
assert results == test_data.slice(0, 1)
|
||||||
|
results = table.search([5, 5]).to_arrow()
|
||||||
|
assert results["_distance"][0].as_py() == 0
|
||||||
|
results = table.search([5, 5]).where("x != 'b'").to_arrow()
|
||||||
|
assert results["_distance"][0].as_py() > 0
|
||||||
|
|
||||||
|
|
||||||
def test_empty_query(db):
|
def test_empty_query(db):
|
||||||
table = LanceTable.create(
|
table = LanceTable.create(
|
||||||
db,
|
db,
|
||||||
|
|||||||
@@ -11,7 +11,12 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
from lancedb.util import get_uri_scheme
|
import os
|
||||||
|
import pathlib
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from lancedb.util import get_uri_scheme, join_uri
|
||||||
|
|
||||||
|
|
||||||
def test_normalize_uri():
|
def test_normalize_uri():
|
||||||
@@ -28,3 +33,55 @@ def test_normalize_uri():
|
|||||||
for uri, expected_scheme in zip(uris, schemes):
|
for uri, expected_scheme in zip(uris, schemes):
|
||||||
parsed_scheme = get_uri_scheme(uri)
|
parsed_scheme = get_uri_scheme(uri)
|
||||||
assert parsed_scheme == expected_scheme
|
assert parsed_scheme == expected_scheme
|
||||||
|
|
||||||
|
|
||||||
|
def test_join_uri_remote():
|
||||||
|
schemes = ["s3", "az", "gs"]
|
||||||
|
for scheme in schemes:
|
||||||
|
expected = f"{scheme}://bucket/path/to/table.lance"
|
||||||
|
base_uri = f"{scheme}://bucket/path/to/"
|
||||||
|
parts = ["table.lance"]
|
||||||
|
assert join_uri(base_uri, *parts) == expected
|
||||||
|
|
||||||
|
base_uri = f"{scheme}://bucket"
|
||||||
|
parts = ["path", "to", "table.lance"]
|
||||||
|
assert join_uri(base_uri, *parts) == expected
|
||||||
|
|
||||||
|
|
||||||
|
# skip this test if on windows
|
||||||
|
@pytest.mark.skipif(os.name == "nt", reason="Windows paths are not POSIX")
|
||||||
|
def test_join_uri_posix():
|
||||||
|
for base in [
|
||||||
|
# relative path
|
||||||
|
"relative/path",
|
||||||
|
"relative/path/",
|
||||||
|
# an absolute path
|
||||||
|
"/absolute/path",
|
||||||
|
"/absolute/path/",
|
||||||
|
# a file URI
|
||||||
|
"file:///absolute/path",
|
||||||
|
"file:///absolute/path/",
|
||||||
|
]:
|
||||||
|
joined = join_uri(base, "table.lance")
|
||||||
|
assert joined == str(pathlib.Path(base) / "table.lance")
|
||||||
|
joined = join_uri(pathlib.Path(base), "table.lance")
|
||||||
|
assert joined == pathlib.Path(base) / "table.lance"
|
||||||
|
|
||||||
|
|
||||||
|
# skip this test if not on windows
|
||||||
|
@pytest.mark.skipif(os.name != "nt", reason="Windows paths are not POSIX")
|
||||||
|
def test_local_join_uri_windows():
|
||||||
|
# https://learn.microsoft.com/en-us/dotnet/standard/io/file-path-formats
|
||||||
|
for base in [
|
||||||
|
# windows relative path
|
||||||
|
"relative\\path",
|
||||||
|
"relative\\path\\",
|
||||||
|
# windows absolute path from current drive
|
||||||
|
"c:\\absolute\\path",
|
||||||
|
# relative path from root of current drive
|
||||||
|
"\\relative\\path",
|
||||||
|
]:
|
||||||
|
joined = join_uri(base, "table.lance")
|
||||||
|
assert joined == str(pathlib.Path(base) / "table.lance")
|
||||||
|
joined = join_uri(pathlib.Path(base), "table.lance")
|
||||||
|
assert joined == pathlib.Path(base) / "table.lance"
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "vectordb-node"
|
name = "vectordb-node"
|
||||||
version = "0.4.0"
|
version = "0.4.1"
|
||||||
description = "Serverless, low-latency vector database for AI applications"
|
description = "Serverless, low-latency vector database for AI applications"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ pub enum Error {
|
|||||||
#[snafu(display("column '{name}' is missing"))]
|
#[snafu(display("column '{name}' is missing"))]
|
||||||
MissingColumn { name: String },
|
MissingColumn { name: String },
|
||||||
#[snafu(display("{name}: {message}"))]
|
#[snafu(display("{name}: {message}"))]
|
||||||
RangeError { name: String, message: String },
|
OutOfRange { name: String, message: String },
|
||||||
#[snafu(display("{index_type} is not a valid index type"))]
|
#[snafu(display("{index_type} is not a valid index type"))]
|
||||||
InvalidIndexType { index_type: String },
|
InvalidIndexType { index_type: String },
|
||||||
|
|
||||||
|
|||||||
@@ -12,4 +12,5 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
pub mod scalar;
|
||||||
pub mod vector;
|
pub mod vector;
|
||||||
|
|||||||
43
rust/ffi/node/src/index/scalar.rs
Normal file
43
rust/ffi/node/src/index/scalar.rs
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
// Copyright 2023 Lance Developers.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use neon::{
|
||||||
|
context::{Context, FunctionContext},
|
||||||
|
result::JsResult,
|
||||||
|
types::{JsBoolean, JsBox, JsPromise, JsString},
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::{error::ResultExt, runtime, table::JsTable};
|
||||||
|
|
||||||
|
pub(crate) fn table_create_scalar_index(mut cx: FunctionContext) -> JsResult<JsPromise> {
|
||||||
|
let js_table = cx.this().downcast_or_throw::<JsBox<JsTable>, _>(&mut cx)?;
|
||||||
|
let column = cx.argument::<JsString>(0)?.value(&mut cx);
|
||||||
|
let replace = cx.argument::<JsBoolean>(1)?.value(&mut cx);
|
||||||
|
|
||||||
|
let rt = runtime(&mut cx)?;
|
||||||
|
|
||||||
|
let (deferred, promise) = cx.promise();
|
||||||
|
let channel = cx.channel();
|
||||||
|
let mut table = js_table.table.clone();
|
||||||
|
|
||||||
|
rt.spawn(async move {
|
||||||
|
let idx_result = table.create_scalar_index(&column, replace).await;
|
||||||
|
|
||||||
|
deferred.settle_with(&channel, move |mut cx| {
|
||||||
|
idx_result.or_throw(&mut cx)?;
|
||||||
|
Ok(cx.undefined())
|
||||||
|
});
|
||||||
|
});
|
||||||
|
Ok(promise)
|
||||||
|
}
|
||||||
@@ -65,12 +65,10 @@ fn get_index_params_builder(
|
|||||||
obj.get_opt::<JsString, _, _>(cx, "index_name")?
|
obj.get_opt::<JsString, _, _>(cx, "index_name")?
|
||||||
.map(|s| index_builder.index_name(s.value(cx)));
|
.map(|s| index_builder.index_name(s.value(cx)));
|
||||||
|
|
||||||
obj.get_opt::<JsString, _, _>(cx, "metric_type")?
|
if let Some(metric_type) = obj.get_opt::<JsString, _, _>(cx, "metric_type")? {
|
||||||
.map(|s| MetricType::try_from(s.value(cx).as_str()))
|
let metric_type = MetricType::try_from(metric_type.value(cx).as_str()).unwrap();
|
||||||
.map(|mt| {
|
|
||||||
let metric_type = mt.unwrap();
|
|
||||||
index_builder.metric_type(metric_type);
|
index_builder.metric_type(metric_type);
|
||||||
});
|
}
|
||||||
|
|
||||||
let num_partitions = obj.get_opt_usize(cx, "num_partitions")?;
|
let num_partitions = obj.get_opt_usize(cx, "num_partitions")?;
|
||||||
let max_iters = obj.get_opt_usize(cx, "max_iters")?;
|
let max_iters = obj.get_opt_usize(cx, "max_iters")?;
|
||||||
@@ -85,23 +83,29 @@ fn get_index_params_builder(
|
|||||||
index_builder.ivf_params(ivf_params)
|
index_builder.ivf_params(ivf_params)
|
||||||
});
|
});
|
||||||
|
|
||||||
obj.get_opt::<JsBoolean, _, _>(cx, "use_opq")?
|
if let Some(use_opq) = obj.get_opt::<JsBoolean, _, _>(cx, "use_opq")? {
|
||||||
.map(|s| pq_params.use_opq = s.value(cx));
|
pq_params.use_opq = use_opq.value(cx);
|
||||||
|
}
|
||||||
|
|
||||||
obj.get_opt_usize(cx, "num_sub_vectors")?
|
if let Some(num_sub_vectors) = obj.get_opt_usize(cx, "num_sub_vectors")? {
|
||||||
.map(|s| pq_params.num_sub_vectors = s);
|
pq_params.num_sub_vectors = num_sub_vectors;
|
||||||
|
}
|
||||||
|
|
||||||
obj.get_opt_usize(cx, "num_bits")?
|
if let Some(num_bits) = obj.get_opt_usize(cx, "num_bits")? {
|
||||||
.map(|s| pq_params.num_bits = s);
|
pq_params.num_bits = num_bits;
|
||||||
|
}
|
||||||
|
|
||||||
obj.get_opt_usize(cx, "max_iters")?
|
if let Some(max_iters) = obj.get_opt_usize(cx, "max_iters")? {
|
||||||
.map(|s| pq_params.max_iters = s);
|
pq_params.max_iters = max_iters;
|
||||||
|
}
|
||||||
|
|
||||||
obj.get_opt_usize(cx, "max_opq_iters")?
|
if let Some(max_opq_iters) = obj.get_opt_usize(cx, "max_opq_iters")? {
|
||||||
.map(|s| pq_params.max_opq_iters = s);
|
pq_params.max_opq_iters = max_opq_iters;
|
||||||
|
}
|
||||||
|
|
||||||
obj.get_opt::<JsBoolean, _, _>(cx, "replace")?
|
if let Some(replace) = obj.get_opt::<JsBoolean, _, _>(cx, "replace")? {
|
||||||
.map(|s| index_builder.replace(s.value(cx)));
|
index_builder.replace(replace.value(cx));
|
||||||
|
}
|
||||||
|
|
||||||
Ok(index_builder)
|
Ok(index_builder)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -242,6 +242,10 @@ fn main(mut cx: ModuleContext) -> NeonResult<()> {
|
|||||||
cx.export_function("tableCompactFiles", JsTable::js_compact)?;
|
cx.export_function("tableCompactFiles", JsTable::js_compact)?;
|
||||||
cx.export_function("tableListIndices", JsTable::js_list_indices)?;
|
cx.export_function("tableListIndices", JsTable::js_list_indices)?;
|
||||||
cx.export_function("tableIndexStats", JsTable::js_index_stats)?;
|
cx.export_function("tableIndexStats", JsTable::js_index_stats)?;
|
||||||
|
cx.export_function(
|
||||||
|
"tableCreateScalarIndex",
|
||||||
|
index::scalar::table_create_scalar_index,
|
||||||
|
)?;
|
||||||
cx.export_function(
|
cx.export_function(
|
||||||
"tableCreateVectorIndex",
|
"tableCreateVectorIndex",
|
||||||
index::vector::table_create_vector_index,
|
index::vector::table_create_vector_index,
|
||||||
|
|||||||
@@ -47,15 +47,15 @@ fn f64_to_u32_safe(n: f64, key: &str) -> Result<u32> {
|
|||||||
use conv::*;
|
use conv::*;
|
||||||
|
|
||||||
n.approx_as::<u32>().map_err(|e| match e {
|
n.approx_as::<u32>().map_err(|e| match e {
|
||||||
FloatError::NegOverflow(_) => Error::RangeError {
|
FloatError::NegOverflow(_) => Error::OutOfRange {
|
||||||
name: key.into(),
|
name: key.into(),
|
||||||
message: "must be > 0".to_string(),
|
message: "must be > 0".to_string(),
|
||||||
},
|
},
|
||||||
FloatError::PosOverflow(_) => Error::RangeError {
|
FloatError::PosOverflow(_) => Error::OutOfRange {
|
||||||
name: key.into(),
|
name: key.into(),
|
||||||
message: format!("must be < {}", u32::MAX),
|
message: format!("must be < {}", u32::MAX),
|
||||||
},
|
},
|
||||||
FloatError::NotANumber(_) => Error::RangeError {
|
FloatError::NotANumber(_) => Error::OutOfRange {
|
||||||
name: key.into(),
|
name: key.into(),
|
||||||
message: "not a valid number".to_string(),
|
message: "not a valid number".to_string(),
|
||||||
},
|
},
|
||||||
@@ -66,15 +66,15 @@ fn f64_to_usize_safe(n: f64, key: &str) -> Result<usize> {
|
|||||||
use conv::*;
|
use conv::*;
|
||||||
|
|
||||||
n.approx_as::<usize>().map_err(|e| match e {
|
n.approx_as::<usize>().map_err(|e| match e {
|
||||||
FloatError::NegOverflow(_) => Error::RangeError {
|
FloatError::NegOverflow(_) => Error::OutOfRange {
|
||||||
name: key.into(),
|
name: key.into(),
|
||||||
message: "must be > 0".to_string(),
|
message: "must be > 0".to_string(),
|
||||||
},
|
},
|
||||||
FloatError::PosOverflow(_) => Error::RangeError {
|
FloatError::PosOverflow(_) => Error::OutOfRange {
|
||||||
name: key.into(),
|
name: key.into(),
|
||||||
message: format!("must be < {}", usize::MAX),
|
message: format!("must be < {}", usize::MAX),
|
||||||
},
|
},
|
||||||
FloatError::NotANumber(_) => Error::RangeError {
|
FloatError::NotANumber(_) => Error::OutOfRange {
|
||||||
name: key.into(),
|
name: key.into(),
|
||||||
message: "not a valid number".to_string(),
|
message: "not a valid number".to_string(),
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -25,11 +25,11 @@ impl JsQuery {
|
|||||||
let limit = query_obj
|
let limit = query_obj
|
||||||
.get_opt::<JsNumber, _, _>(&mut cx, "_limit")?
|
.get_opt::<JsNumber, _, _>(&mut cx, "_limit")?
|
||||||
.map(|value| {
|
.map(|value| {
|
||||||
let limit = value.value(&mut cx) as u64;
|
let limit = value.value(&mut cx);
|
||||||
if limit <= 0 {
|
if limit <= 0.0 {
|
||||||
panic!("Limit must be a positive integer");
|
panic!("Limit must be a positive integer");
|
||||||
}
|
}
|
||||||
limit
|
limit as u64
|
||||||
});
|
});
|
||||||
let select = query_obj
|
let select = query_obj
|
||||||
.get_opt::<JsArray, _, _>(&mut cx, "_select")?
|
.get_opt::<JsArray, _, _>(&mut cx, "_select")?
|
||||||
@@ -73,7 +73,7 @@ impl JsQuery {
|
|||||||
|
|
||||||
rt.spawn(async move {
|
rt.spawn(async move {
|
||||||
let mut builder = table
|
let mut builder = table
|
||||||
.search(query.map(|q| Float32Array::from(q)))
|
.search(query.map(Float32Array::from))
|
||||||
.refine_factor(refine_factor)
|
.refine_factor(refine_factor)
|
||||||
.nprobes(nprobes)
|
.nprobes(nprobes)
|
||||||
.filter(filter)
|
.filter(filter)
|
||||||
|
|||||||
@@ -45,7 +45,7 @@ impl JsTable {
|
|||||||
let table_name = cx.argument::<JsString>(0)?.value(&mut cx);
|
let table_name = cx.argument::<JsString>(0)?.value(&mut cx);
|
||||||
let buffer = cx.argument::<JsBuffer>(1)?;
|
let buffer = cx.argument::<JsBuffer>(1)?;
|
||||||
let (batches, schema) =
|
let (batches, schema) =
|
||||||
arrow_buffer_to_record_batch(buffer.as_slice(&mut cx)).or_throw(&mut cx)?;
|
arrow_buffer_to_record_batch(buffer.as_slice(&cx)).or_throw(&mut cx)?;
|
||||||
|
|
||||||
// Write mode
|
// Write mode
|
||||||
let mode = match cx.argument::<JsString>(2)?.value(&mut cx).as_str() {
|
let mode = match cx.argument::<JsString>(2)?.value(&mut cx).as_str() {
|
||||||
@@ -93,7 +93,7 @@ impl JsTable {
|
|||||||
let buffer = cx.argument::<JsBuffer>(0)?;
|
let buffer = cx.argument::<JsBuffer>(0)?;
|
||||||
let write_mode = cx.argument::<JsString>(1)?.value(&mut cx);
|
let write_mode = cx.argument::<JsString>(1)?.value(&mut cx);
|
||||||
let (batches, schema) =
|
let (batches, schema) =
|
||||||
arrow_buffer_to_record_batch(buffer.as_slice(&mut cx)).or_throw(&mut cx)?;
|
arrow_buffer_to_record_batch(buffer.as_slice(&cx)).or_throw(&mut cx)?;
|
||||||
let rt = runtime(&mut cx)?;
|
let rt = runtime(&mut cx)?;
|
||||||
let channel = cx.channel();
|
let channel = cx.channel();
|
||||||
let mut table = js_table.table.clone();
|
let mut table = js_table.table.clone();
|
||||||
@@ -186,7 +186,7 @@ impl JsTable {
|
|||||||
.downcast_or_throw::<JsString, _>(&mut cx)?;
|
.downcast_or_throw::<JsString, _>(&mut cx)?;
|
||||||
|
|
||||||
let value = updates_arg
|
let value = updates_arg
|
||||||
.get_value(&mut cx, property.clone())?
|
.get_value(&mut cx, property)?
|
||||||
.downcast_or_throw::<JsString, _>(&mut cx)?;
|
.downcast_or_throw::<JsString, _>(&mut cx)?;
|
||||||
|
|
||||||
let property = property.value(&mut cx);
|
let property = property.value(&mut cx);
|
||||||
@@ -216,7 +216,7 @@ impl JsTable {
|
|||||||
.map(|(k, v)| (k.as_str(), v.as_str()))
|
.map(|(k, v)| (k.as_str(), v.as_str()))
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
let predicate = predicate.as_ref().map(|s| s.as_str());
|
let predicate = predicate.as_deref();
|
||||||
|
|
||||||
let update_result = table.update(predicate, updates_arg).await;
|
let update_result = table.update(predicate, updates_arg).await;
|
||||||
deferred.settle_with(&channel, move |mut cx| {
|
deferred.settle_with(&channel, move |mut cx| {
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "vectordb"
|
name = "vectordb"
|
||||||
version = "0.4.0"
|
version = "0.4.1"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
description = "LanceDB: A serverless, low-latency vector database for AI applications"
|
description = "LanceDB: A serverless, low-latency vector database for AI applications"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
|
|||||||
@@ -26,7 +26,7 @@ use futures::{stream::BoxStream, FutureExt, StreamExt};
|
|||||||
use lance::io::object_store::WrappingObjectStore;
|
use lance::io::object_store::WrappingObjectStore;
|
||||||
use object_store::{
|
use object_store::{
|
||||||
path::Path, Error, GetOptions, GetResult, ListResult, MultipartId, ObjectMeta, ObjectStore,
|
path::Path, Error, GetOptions, GetResult, ListResult, MultipartId, ObjectMeta, ObjectStore,
|
||||||
Result,
|
PutOptions, PutResult, Result,
|
||||||
};
|
};
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
@@ -72,13 +72,28 @@ impl PrimaryOnly for Path {
|
|||||||
/// Note: this object store does not mirror writes to *.manifest files
|
/// Note: this object store does not mirror writes to *.manifest files
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl ObjectStore for MirroringObjectStore {
|
impl ObjectStore for MirroringObjectStore {
|
||||||
async fn put(&self, location: &Path, bytes: Bytes) -> Result<()> {
|
async fn put(&self, location: &Path, bytes: Bytes) -> Result<PutResult> {
|
||||||
if location.primary_only() {
|
if location.primary_only() {
|
||||||
self.primary.put(location, bytes).await
|
self.primary.put(location, bytes).await
|
||||||
} else {
|
} else {
|
||||||
self.secondary.put(location, bytes.clone()).await?;
|
self.secondary.put(location, bytes.clone()).await?;
|
||||||
self.primary.put(location, bytes).await?;
|
self.primary.put(location, bytes).await
|
||||||
Ok(())
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn put_opts(
|
||||||
|
&self,
|
||||||
|
location: &Path,
|
||||||
|
bytes: Bytes,
|
||||||
|
options: PutOptions,
|
||||||
|
) -> Result<PutResult> {
|
||||||
|
if location.primary_only() {
|
||||||
|
self.primary.put_opts(location, bytes, options).await
|
||||||
|
} else {
|
||||||
|
self.secondary
|
||||||
|
.put_opts(location, bytes.clone(), options.clone())
|
||||||
|
.await?;
|
||||||
|
self.primary.put_opts(location, bytes, options).await
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -129,8 +144,8 @@ impl ObjectStore for MirroringObjectStore {
|
|||||||
self.primary.delete(location).await
|
self.primary.delete(location).await
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn list(&self, prefix: Option<&Path>) -> Result<BoxStream<'_, Result<ObjectMeta>>> {
|
fn list(&self, prefix: Option<&Path>) -> BoxStream<'_, Result<ObjectMeta>> {
|
||||||
self.primary.list(prefix).await
|
self.primary.list(prefix)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn list_with_delimiter(&self, prefix: Option<&Path>) -> Result<ListResult> {
|
async fn list_with_delimiter(&self, prefix: Option<&Path>) -> Result<ListResult> {
|
||||||
|
|||||||
@@ -14,6 +14,7 @@
|
|||||||
|
|
||||||
use chrono::Duration;
|
use chrono::Duration;
|
||||||
use lance::dataset::builder::DatasetBuilder;
|
use lance::dataset::builder::DatasetBuilder;
|
||||||
|
use lance::index::scalar::ScalarIndexParams;
|
||||||
use lance_index::IndexType;
|
use lance_index::IndexType;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
@@ -262,6 +263,16 @@ impl Table {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Create a scalar index on the table
|
||||||
|
pub async fn create_scalar_index(&mut self, column: &str, replace: bool) -> Result<()> {
|
||||||
|
let mut dataset = self.dataset.as_ref().clone();
|
||||||
|
let params = ScalarIndexParams::default();
|
||||||
|
dataset
|
||||||
|
.create_index(&[column], IndexType::Scalar, None, ¶ms, replace)
|
||||||
|
.await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn optimize_indices(&mut self) -> Result<()> {
|
pub async fn optimize_indices(&mut self) -> Result<()> {
|
||||||
let mut dataset = self.dataset.as_ref().clone();
|
let mut dataset = self.dataset.as_ref().clone();
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user