mirror of
https://github.com/lancedb/lancedb.git
synced 2025-12-23 13:29:57 +00:00
Compare commits
42 Commits
python-v0.
...
python-v0.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
04f962f6b0 | ||
|
|
19e896ff69 | ||
|
|
272e4103b2 | ||
|
|
75c257ebb6 | ||
|
|
9ee152eb42 | ||
|
|
c9ae1b1737 | ||
|
|
89dc80c42a | ||
|
|
7b020ac799 | ||
|
|
529e774bbb | ||
|
|
7c12239305 | ||
|
|
d83424d6b4 | ||
|
|
8bf89f887c | ||
|
|
b2160b2304 | ||
|
|
1bb82597be | ||
|
|
e4eee38b3c | ||
|
|
64fc2be503 | ||
|
|
dc8054e90d | ||
|
|
1684940946 | ||
|
|
695813463c | ||
|
|
ed594b0f76 | ||
|
|
cee2b5ea42 | ||
|
|
f315f9665a | ||
|
|
5deb26bc8b | ||
|
|
3cc670ac38 | ||
|
|
4ade3e31e2 | ||
|
|
a222d2cd91 | ||
|
|
508e621f3d | ||
|
|
a1a0472f3f | ||
|
|
3425a6d339 | ||
|
|
af54e0ce06 | ||
|
|
089905fe8f | ||
|
|
554939e5d2 | ||
|
|
7a13814922 | ||
|
|
e9f25f6a12 | ||
|
|
419a433244 | ||
|
|
a9311c4dc0 | ||
|
|
178bcf9c90 | ||
|
|
b9be092cb1 | ||
|
|
e8c0c52315 | ||
|
|
a60fa0d3b7 | ||
|
|
726d629b9b | ||
|
|
b493f56dee |
@@ -1,5 +1,5 @@
|
|||||||
[tool.bumpversion]
|
[tool.bumpversion]
|
||||||
current_version = "0.19.0-beta.11"
|
current_version = "0.19.1-beta.3"
|
||||||
parse = """(?x)
|
parse = """(?x)
|
||||||
(?P<major>0|[1-9]\\d*)\\.
|
(?P<major>0|[1-9]\\d*)\\.
|
||||||
(?P<minor>0|[1-9]\\d*)\\.
|
(?P<minor>0|[1-9]\\d*)\\.
|
||||||
|
|||||||
1
.github/workflows/python.yml
vendored
1
.github/workflows/python.yml
vendored
@@ -228,6 +228,7 @@ jobs:
|
|||||||
- name: Install lancedb
|
- name: Install lancedb
|
||||||
run: |
|
run: |
|
||||||
pip install "pydantic<2"
|
pip install "pydantic<2"
|
||||||
|
pip install pyarrow==16
|
||||||
pip install --extra-index-url https://pypi.fury.io/lancedb/ -e .[tests]
|
pip install --extra-index-url https://pypi.fury.io/lancedb/ -e .[tests]
|
||||||
pip install tantivy
|
pip install tantivy
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
|
|||||||
408
Cargo.lock
generated
408
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
16
Cargo.toml
16
Cargo.toml
@@ -21,14 +21,14 @@ categories = ["database-implementations"]
|
|||||||
rust-version = "1.78.0"
|
rust-version = "1.78.0"
|
||||||
|
|
||||||
[workspace.dependencies]
|
[workspace.dependencies]
|
||||||
lance = { "version" = "=0.26.0", "features" = ["dynamodb"] }
|
lance = { "version" = "=0.27.0", "features" = ["dynamodb"], tag = "v0.27.0-beta.5", git="https://github.com/lancedb/lance.git" }
|
||||||
lance-io = "=0.26.0"
|
lance-io = { version = "=0.27.0", tag = "v0.27.0-beta.5", git="https://github.com/lancedb/lance.git" }
|
||||||
lance-index = "=0.26.0"
|
lance-index = { version = "=0.27.0", tag = "v0.27.0-beta.5", git="https://github.com/lancedb/lance.git" }
|
||||||
lance-linalg = "=0.26.0"
|
lance-linalg = { version = "=0.27.0", tag = "v0.27.0-beta.5", git="https://github.com/lancedb/lance.git" }
|
||||||
lance-table = "=0.26.0"
|
lance-table = { version = "=0.27.0", tag = "v0.27.0-beta.5", git="https://github.com/lancedb/lance.git" }
|
||||||
lance-testing = "=0.26.0"
|
lance-testing = { version = "=0.27.0", tag = "v0.27.0-beta.5", git="https://github.com/lancedb/lance.git" }
|
||||||
lance-datafusion = "=0.26.0"
|
lance-datafusion = { version = "=0.27.0", tag = "v0.27.0-beta.5", git="https://github.com/lancedb/lance.git" }
|
||||||
lance-encoding = "=0.26.0"
|
lance-encoding = { version = "=0.27.0", tag = "v0.27.0-beta.5", git="https://github.com/lancedb/lance.git" }
|
||||||
# Note that this one does not include pyarrow
|
# Note that this one does not include pyarrow
|
||||||
arrow = { version = "54.1", optional = false }
|
arrow = { version = "54.1", optional = false }
|
||||||
arrow-array = "54.1"
|
arrow-array = "54.1"
|
||||||
|
|||||||
@@ -33,20 +33,22 @@ Construct a MergeInsertBuilder. __Internal use only.__
|
|||||||
### execute()
|
### execute()
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
execute(data): Promise<void>
|
execute(data, execOptions?): Promise<MergeResult>
|
||||||
```
|
```
|
||||||
|
|
||||||
Executes the merge insert operation
|
Executes the merge insert operation
|
||||||
|
|
||||||
Nothing is returned but the `Table` is updated
|
|
||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
* **data**: [`Data`](../type-aliases/Data.md)
|
* **data**: [`Data`](../type-aliases/Data.md)
|
||||||
|
|
||||||
|
* **execOptions?**: `Partial`<[`WriteExecutionOptions`](../interfaces/WriteExecutionOptions.md)>
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
`Promise`<`void`>
|
`Promise`<[`MergeResult`](../interfaces/MergeResult.md)>
|
||||||
|
|
||||||
|
the merge result
|
||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
|
|||||||
@@ -40,7 +40,7 @@ Returns the name of the table
|
|||||||
### add()
|
### add()
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
abstract add(data, options?): Promise<void>
|
abstract add(data, options?): Promise<AddResult>
|
||||||
```
|
```
|
||||||
|
|
||||||
Insert records into this Table.
|
Insert records into this Table.
|
||||||
@@ -54,14 +54,17 @@ Insert records into this Table.
|
|||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
`Promise`<`void`>
|
`Promise`<[`AddResult`](../interfaces/AddResult.md)>
|
||||||
|
|
||||||
|
A promise that resolves to an object
|
||||||
|
containing the new version number of the table
|
||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
### addColumns()
|
### addColumns()
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
abstract addColumns(newColumnTransforms): Promise<void>
|
abstract addColumns(newColumnTransforms): Promise<AddColumnsResult>
|
||||||
```
|
```
|
||||||
|
|
||||||
Add new columns with defined values.
|
Add new columns with defined values.
|
||||||
@@ -76,14 +79,17 @@ Add new columns with defined values.
|
|||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
`Promise`<`void`>
|
`Promise`<[`AddColumnsResult`](../interfaces/AddColumnsResult.md)>
|
||||||
|
|
||||||
|
A promise that resolves to an object
|
||||||
|
containing the new version number of the table after adding the columns.
|
||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
### alterColumns()
|
### alterColumns()
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
abstract alterColumns(columnAlterations): Promise<void>
|
abstract alterColumns(columnAlterations): Promise<AlterColumnsResult>
|
||||||
```
|
```
|
||||||
|
|
||||||
Alter the name or nullability of columns.
|
Alter the name or nullability of columns.
|
||||||
@@ -96,7 +102,10 @@ Alter the name or nullability of columns.
|
|||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
`Promise`<`void`>
|
`Promise`<[`AlterColumnsResult`](../interfaces/AlterColumnsResult.md)>
|
||||||
|
|
||||||
|
A promise that resolves to an object
|
||||||
|
containing the new version number of the table after altering the columns.
|
||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
@@ -117,8 +126,8 @@ wish to return to standard mode, call `checkoutLatest`.
|
|||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
|
|
||||||
* **version**: `number`
|
* **version**: `string` \| `number`
|
||||||
The version to checkout
|
The version to checkout, could be version number or tag
|
||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
@@ -252,7 +261,7 @@ await table.createIndex("my_float_col");
|
|||||||
### delete()
|
### delete()
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
abstract delete(predicate): Promise<void>
|
abstract delete(predicate): Promise<DeleteResult>
|
||||||
```
|
```
|
||||||
|
|
||||||
Delete the rows that satisfy the predicate.
|
Delete the rows that satisfy the predicate.
|
||||||
@@ -263,7 +272,10 @@ Delete the rows that satisfy the predicate.
|
|||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
`Promise`<`void`>
|
`Promise`<[`DeleteResult`](../interfaces/DeleteResult.md)>
|
||||||
|
|
||||||
|
A promise that resolves to an object
|
||||||
|
containing the new version number of the table
|
||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
@@ -284,7 +296,7 @@ Return a brief description of the table
|
|||||||
### dropColumns()
|
### dropColumns()
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
abstract dropColumns(columnNames): Promise<void>
|
abstract dropColumns(columnNames): Promise<DropColumnsResult>
|
||||||
```
|
```
|
||||||
|
|
||||||
Drop one or more columns from the dataset
|
Drop one or more columns from the dataset
|
||||||
@@ -303,7 +315,10 @@ then call ``cleanup_files`` to remove the old files.
|
|||||||
|
|
||||||
#### Returns
|
#### Returns
|
||||||
|
|
||||||
`Promise`<`void`>
|
`Promise`<[`DropColumnsResult`](../interfaces/DropColumnsResult.md)>
|
||||||
|
|
||||||
|
A promise that resolves to an object
|
||||||
|
containing the new version number of the table after dropping the columns.
|
||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
@@ -615,6 +630,50 @@ of the given query
|
|||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
|
### stats()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
abstract stats(): Promise<TableStatistics>
|
||||||
|
```
|
||||||
|
|
||||||
|
Returns table and fragment statistics
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
`Promise`<[`TableStatistics`](../interfaces/TableStatistics.md)>
|
||||||
|
|
||||||
|
The table and fragment statistics
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### tags()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
abstract tags(): Promise<Tags>
|
||||||
|
```
|
||||||
|
|
||||||
|
Get a tags manager for this table.
|
||||||
|
|
||||||
|
Tags allow you to label specific versions of a table with a human-readable name.
|
||||||
|
The returned tags manager can be used to list, create, update, or delete tags.
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
`Promise`<[`Tags`](Tags.md)>
|
||||||
|
|
||||||
|
A tags manager for this table
|
||||||
|
|
||||||
|
#### Example
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
const tagsManager = await table.tags();
|
||||||
|
await tagsManager.create("v1", 1);
|
||||||
|
const tags = await tagsManager.list();
|
||||||
|
console.log(tags); // { "v1": { version: 1, manifestSize: ... } }
|
||||||
|
```
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
### toArrow()
|
### toArrow()
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
@@ -634,7 +693,7 @@ Return the table as an arrow table
|
|||||||
#### update(opts)
|
#### update(opts)
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
abstract update(opts): Promise<void>
|
abstract update(opts): Promise<UpdateResult>
|
||||||
```
|
```
|
||||||
|
|
||||||
Update existing records in the Table
|
Update existing records in the Table
|
||||||
@@ -645,7 +704,10 @@ Update existing records in the Table
|
|||||||
|
|
||||||
##### Returns
|
##### Returns
|
||||||
|
|
||||||
`Promise`<`void`>
|
`Promise`<[`UpdateResult`](../interfaces/UpdateResult.md)>
|
||||||
|
|
||||||
|
A promise that resolves to an object containing
|
||||||
|
the number of rows updated and the new version number
|
||||||
|
|
||||||
##### Example
|
##### Example
|
||||||
|
|
||||||
@@ -656,7 +718,7 @@ table.update({where:"x = 2", values:{"vector": [10, 10]}})
|
|||||||
#### update(opts)
|
#### update(opts)
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
abstract update(opts): Promise<void>
|
abstract update(opts): Promise<UpdateResult>
|
||||||
```
|
```
|
||||||
|
|
||||||
Update existing records in the Table
|
Update existing records in the Table
|
||||||
@@ -667,7 +729,10 @@ Update existing records in the Table
|
|||||||
|
|
||||||
##### Returns
|
##### Returns
|
||||||
|
|
||||||
`Promise`<`void`>
|
`Promise`<[`UpdateResult`](../interfaces/UpdateResult.md)>
|
||||||
|
|
||||||
|
A promise that resolves to an object containing
|
||||||
|
the number of rows updated and the new version number
|
||||||
|
|
||||||
##### Example
|
##### Example
|
||||||
|
|
||||||
@@ -678,7 +743,7 @@ table.update({where:"x = 2", valuesSql:{"x": "x + 1"}})
|
|||||||
#### update(updates, options)
|
#### update(updates, options)
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
abstract update(updates, options?): Promise<void>
|
abstract update(updates, options?): Promise<UpdateResult>
|
||||||
```
|
```
|
||||||
|
|
||||||
Update existing records in the Table
|
Update existing records in the Table
|
||||||
@@ -701,10 +766,6 @@ repeatedly calilng this method.
|
|||||||
* **updates**: `Record`<`string`, `string`> \| `Map`<`string`, `string`>
|
* **updates**: `Record`<`string`, `string`> \| `Map`<`string`, `string`>
|
||||||
the
|
the
|
||||||
columns to update
|
columns to update
|
||||||
Keys in the map should specify the name of the column to update.
|
|
||||||
Values in the map provide the new value of the column. These can
|
|
||||||
be SQL literal strings (e.g. "7" or "'foo'") or they can be expressions
|
|
||||||
based on the row being updated (e.g. "my_col + 1")
|
|
||||||
|
|
||||||
* **options?**: `Partial`<[`UpdateOptions`](../interfaces/UpdateOptions.md)>
|
* **options?**: `Partial`<[`UpdateOptions`](../interfaces/UpdateOptions.md)>
|
||||||
additional options to control
|
additional options to control
|
||||||
@@ -712,7 +773,15 @@ repeatedly calilng this method.
|
|||||||
|
|
||||||
##### Returns
|
##### Returns
|
||||||
|
|
||||||
`Promise`<`void`>
|
`Promise`<[`UpdateResult`](../interfaces/UpdateResult.md)>
|
||||||
|
|
||||||
|
A promise that resolves to an object
|
||||||
|
containing the number of rows updated and the new version number
|
||||||
|
|
||||||
|
Keys in the map should specify the name of the column to update.
|
||||||
|
Values in the map provide the new value of the column. These can
|
||||||
|
be SQL literal strings (e.g. "7" or "'foo'") or they can be expressions
|
||||||
|
based on the row being updated (e.g. "my_col + 1")
|
||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
|
|||||||
35
docs/src/js/classes/TagContents.md
Normal file
35
docs/src/js/classes/TagContents.md
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
[@lancedb/lancedb](../globals.md) / TagContents
|
||||||
|
|
||||||
|
# Class: TagContents
|
||||||
|
|
||||||
|
## Constructors
|
||||||
|
|
||||||
|
### new TagContents()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
new TagContents(): TagContents
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
[`TagContents`](TagContents.md)
|
||||||
|
|
||||||
|
## Properties
|
||||||
|
|
||||||
|
### manifestSize
|
||||||
|
|
||||||
|
```ts
|
||||||
|
manifestSize: number;
|
||||||
|
```
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### version
|
||||||
|
|
||||||
|
```ts
|
||||||
|
version: number;
|
||||||
|
```
|
||||||
99
docs/src/js/classes/Tags.md
Normal file
99
docs/src/js/classes/Tags.md
Normal file
@@ -0,0 +1,99 @@
|
|||||||
|
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
[@lancedb/lancedb](../globals.md) / Tags
|
||||||
|
|
||||||
|
# Class: Tags
|
||||||
|
|
||||||
|
## Constructors
|
||||||
|
|
||||||
|
### new Tags()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
new Tags(): Tags
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
[`Tags`](Tags.md)
|
||||||
|
|
||||||
|
## Methods
|
||||||
|
|
||||||
|
### create()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
create(tag, version): Promise<void>
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Parameters
|
||||||
|
|
||||||
|
* **tag**: `string`
|
||||||
|
|
||||||
|
* **version**: `number`
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
`Promise`<`void`>
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### delete()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
delete(tag): Promise<void>
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Parameters
|
||||||
|
|
||||||
|
* **tag**: `string`
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
`Promise`<`void`>
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### getVersion()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
getVersion(tag): Promise<number>
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Parameters
|
||||||
|
|
||||||
|
* **tag**: `string`
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
`Promise`<`number`>
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### list()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
list(): Promise<Record<string, TagContents>>
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
`Promise`<`Record`<`string`, [`TagContents`](TagContents.md)>>
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### update()
|
||||||
|
|
||||||
|
```ts
|
||||||
|
update(tag, version): Promise<void>
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Parameters
|
||||||
|
|
||||||
|
* **tag**: `string`
|
||||||
|
|
||||||
|
* **version**: `number`
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
|
||||||
|
`Promise`<`void`>
|
||||||
@@ -27,19 +27,28 @@
|
|||||||
- [QueryBase](classes/QueryBase.md)
|
- [QueryBase](classes/QueryBase.md)
|
||||||
- [RecordBatchIterator](classes/RecordBatchIterator.md)
|
- [RecordBatchIterator](classes/RecordBatchIterator.md)
|
||||||
- [Table](classes/Table.md)
|
- [Table](classes/Table.md)
|
||||||
|
- [TagContents](classes/TagContents.md)
|
||||||
|
- [Tags](classes/Tags.md)
|
||||||
- [VectorColumnOptions](classes/VectorColumnOptions.md)
|
- [VectorColumnOptions](classes/VectorColumnOptions.md)
|
||||||
- [VectorQuery](classes/VectorQuery.md)
|
- [VectorQuery](classes/VectorQuery.md)
|
||||||
|
|
||||||
## Interfaces
|
## Interfaces
|
||||||
|
|
||||||
|
- [AddColumnsResult](interfaces/AddColumnsResult.md)
|
||||||
- [AddColumnsSql](interfaces/AddColumnsSql.md)
|
- [AddColumnsSql](interfaces/AddColumnsSql.md)
|
||||||
- [AddDataOptions](interfaces/AddDataOptions.md)
|
- [AddDataOptions](interfaces/AddDataOptions.md)
|
||||||
|
- [AddResult](interfaces/AddResult.md)
|
||||||
|
- [AlterColumnsResult](interfaces/AlterColumnsResult.md)
|
||||||
- [ClientConfig](interfaces/ClientConfig.md)
|
- [ClientConfig](interfaces/ClientConfig.md)
|
||||||
- [ColumnAlteration](interfaces/ColumnAlteration.md)
|
- [ColumnAlteration](interfaces/ColumnAlteration.md)
|
||||||
- [CompactionStats](interfaces/CompactionStats.md)
|
- [CompactionStats](interfaces/CompactionStats.md)
|
||||||
- [ConnectionOptions](interfaces/ConnectionOptions.md)
|
- [ConnectionOptions](interfaces/ConnectionOptions.md)
|
||||||
- [CreateTableOptions](interfaces/CreateTableOptions.md)
|
- [CreateTableOptions](interfaces/CreateTableOptions.md)
|
||||||
|
- [DeleteResult](interfaces/DeleteResult.md)
|
||||||
|
- [DropColumnsResult](interfaces/DropColumnsResult.md)
|
||||||
- [ExecutableQuery](interfaces/ExecutableQuery.md)
|
- [ExecutableQuery](interfaces/ExecutableQuery.md)
|
||||||
|
- [FragmentStatistics](interfaces/FragmentStatistics.md)
|
||||||
|
- [FragmentSummaryStats](interfaces/FragmentSummaryStats.md)
|
||||||
- [FtsOptions](interfaces/FtsOptions.md)
|
- [FtsOptions](interfaces/FtsOptions.md)
|
||||||
- [FullTextQuery](interfaces/FullTextQuery.md)
|
- [FullTextQuery](interfaces/FullTextQuery.md)
|
||||||
- [FullTextSearchOptions](interfaces/FullTextSearchOptions.md)
|
- [FullTextSearchOptions](interfaces/FullTextSearchOptions.md)
|
||||||
@@ -50,6 +59,7 @@
|
|||||||
- [IndexStatistics](interfaces/IndexStatistics.md)
|
- [IndexStatistics](interfaces/IndexStatistics.md)
|
||||||
- [IvfFlatOptions](interfaces/IvfFlatOptions.md)
|
- [IvfFlatOptions](interfaces/IvfFlatOptions.md)
|
||||||
- [IvfPqOptions](interfaces/IvfPqOptions.md)
|
- [IvfPqOptions](interfaces/IvfPqOptions.md)
|
||||||
|
- [MergeResult](interfaces/MergeResult.md)
|
||||||
- [OpenTableOptions](interfaces/OpenTableOptions.md)
|
- [OpenTableOptions](interfaces/OpenTableOptions.md)
|
||||||
- [OptimizeOptions](interfaces/OptimizeOptions.md)
|
- [OptimizeOptions](interfaces/OptimizeOptions.md)
|
||||||
- [OptimizeStats](interfaces/OptimizeStats.md)
|
- [OptimizeStats](interfaces/OptimizeStats.md)
|
||||||
@@ -57,9 +67,12 @@
|
|||||||
- [RemovalStats](interfaces/RemovalStats.md)
|
- [RemovalStats](interfaces/RemovalStats.md)
|
||||||
- [RetryConfig](interfaces/RetryConfig.md)
|
- [RetryConfig](interfaces/RetryConfig.md)
|
||||||
- [TableNamesOptions](interfaces/TableNamesOptions.md)
|
- [TableNamesOptions](interfaces/TableNamesOptions.md)
|
||||||
|
- [TableStatistics](interfaces/TableStatistics.md)
|
||||||
- [TimeoutConfig](interfaces/TimeoutConfig.md)
|
- [TimeoutConfig](interfaces/TimeoutConfig.md)
|
||||||
- [UpdateOptions](interfaces/UpdateOptions.md)
|
- [UpdateOptions](interfaces/UpdateOptions.md)
|
||||||
|
- [UpdateResult](interfaces/UpdateResult.md)
|
||||||
- [Version](interfaces/Version.md)
|
- [Version](interfaces/Version.md)
|
||||||
|
- [WriteExecutionOptions](interfaces/WriteExecutionOptions.md)
|
||||||
|
|
||||||
## Type Aliases
|
## Type Aliases
|
||||||
|
|
||||||
|
|||||||
15
docs/src/js/interfaces/AddColumnsResult.md
Normal file
15
docs/src/js/interfaces/AddColumnsResult.md
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
[@lancedb/lancedb](../globals.md) / AddColumnsResult
|
||||||
|
|
||||||
|
# Interface: AddColumnsResult
|
||||||
|
|
||||||
|
## Properties
|
||||||
|
|
||||||
|
### version
|
||||||
|
|
||||||
|
```ts
|
||||||
|
version: number;
|
||||||
|
```
|
||||||
15
docs/src/js/interfaces/AddResult.md
Normal file
15
docs/src/js/interfaces/AddResult.md
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
[@lancedb/lancedb](../globals.md) / AddResult
|
||||||
|
|
||||||
|
# Interface: AddResult
|
||||||
|
|
||||||
|
## Properties
|
||||||
|
|
||||||
|
### version
|
||||||
|
|
||||||
|
```ts
|
||||||
|
version: number;
|
||||||
|
```
|
||||||
15
docs/src/js/interfaces/AlterColumnsResult.md
Normal file
15
docs/src/js/interfaces/AlterColumnsResult.md
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
[@lancedb/lancedb](../globals.md) / AlterColumnsResult
|
||||||
|
|
||||||
|
# Interface: AlterColumnsResult
|
||||||
|
|
||||||
|
## Properties
|
||||||
|
|
||||||
|
### version
|
||||||
|
|
||||||
|
```ts
|
||||||
|
version: number;
|
||||||
|
```
|
||||||
15
docs/src/js/interfaces/DeleteResult.md
Normal file
15
docs/src/js/interfaces/DeleteResult.md
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
[@lancedb/lancedb](../globals.md) / DeleteResult
|
||||||
|
|
||||||
|
# Interface: DeleteResult
|
||||||
|
|
||||||
|
## Properties
|
||||||
|
|
||||||
|
### version
|
||||||
|
|
||||||
|
```ts
|
||||||
|
version: number;
|
||||||
|
```
|
||||||
15
docs/src/js/interfaces/DropColumnsResult.md
Normal file
15
docs/src/js/interfaces/DropColumnsResult.md
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
[@lancedb/lancedb](../globals.md) / DropColumnsResult
|
||||||
|
|
||||||
|
# Interface: DropColumnsResult
|
||||||
|
|
||||||
|
## Properties
|
||||||
|
|
||||||
|
### version
|
||||||
|
|
||||||
|
```ts
|
||||||
|
version: number;
|
||||||
|
```
|
||||||
37
docs/src/js/interfaces/FragmentStatistics.md
Normal file
37
docs/src/js/interfaces/FragmentStatistics.md
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
[@lancedb/lancedb](../globals.md) / FragmentStatistics
|
||||||
|
|
||||||
|
# Interface: FragmentStatistics
|
||||||
|
|
||||||
|
## Properties
|
||||||
|
|
||||||
|
### lengths
|
||||||
|
|
||||||
|
```ts
|
||||||
|
lengths: FragmentSummaryStats;
|
||||||
|
```
|
||||||
|
|
||||||
|
Statistics on the number of rows in the table fragments
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### numFragments
|
||||||
|
|
||||||
|
```ts
|
||||||
|
numFragments: number;
|
||||||
|
```
|
||||||
|
|
||||||
|
The number of fragments in the table
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### numSmallFragments
|
||||||
|
|
||||||
|
```ts
|
||||||
|
numSmallFragments: number;
|
||||||
|
```
|
||||||
|
|
||||||
|
The number of uncompacted fragments in the table
|
||||||
77
docs/src/js/interfaces/FragmentSummaryStats.md
Normal file
77
docs/src/js/interfaces/FragmentSummaryStats.md
Normal file
@@ -0,0 +1,77 @@
|
|||||||
|
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
[@lancedb/lancedb](../globals.md) / FragmentSummaryStats
|
||||||
|
|
||||||
|
# Interface: FragmentSummaryStats
|
||||||
|
|
||||||
|
## Properties
|
||||||
|
|
||||||
|
### max
|
||||||
|
|
||||||
|
```ts
|
||||||
|
max: number;
|
||||||
|
```
|
||||||
|
|
||||||
|
The number of rows in the fragment with the most rows
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### mean
|
||||||
|
|
||||||
|
```ts
|
||||||
|
mean: number;
|
||||||
|
```
|
||||||
|
|
||||||
|
The mean number of rows in the fragments
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### min
|
||||||
|
|
||||||
|
```ts
|
||||||
|
min: number;
|
||||||
|
```
|
||||||
|
|
||||||
|
The number of rows in the fragment with the fewest rows
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### p25
|
||||||
|
|
||||||
|
```ts
|
||||||
|
p25: number;
|
||||||
|
```
|
||||||
|
|
||||||
|
The 25th percentile of number of rows in the fragments
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### p50
|
||||||
|
|
||||||
|
```ts
|
||||||
|
p50: number;
|
||||||
|
```
|
||||||
|
|
||||||
|
The 50th percentile of number of rows in the fragments
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### p75
|
||||||
|
|
||||||
|
```ts
|
||||||
|
p75: number;
|
||||||
|
```
|
||||||
|
|
||||||
|
The 75th percentile of number of rows in the fragments
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### p99
|
||||||
|
|
||||||
|
```ts
|
||||||
|
p99: number;
|
||||||
|
```
|
||||||
|
|
||||||
|
The 99th percentile of number of rows in the fragments
|
||||||
39
docs/src/js/interfaces/MergeResult.md
Normal file
39
docs/src/js/interfaces/MergeResult.md
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
[@lancedb/lancedb](../globals.md) / MergeResult
|
||||||
|
|
||||||
|
# Interface: MergeResult
|
||||||
|
|
||||||
|
## Properties
|
||||||
|
|
||||||
|
### numDeletedRows
|
||||||
|
|
||||||
|
```ts
|
||||||
|
numDeletedRows: number;
|
||||||
|
```
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### numInsertedRows
|
||||||
|
|
||||||
|
```ts
|
||||||
|
numInsertedRows: number;
|
||||||
|
```
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### numUpdatedRows
|
||||||
|
|
||||||
|
```ts
|
||||||
|
numUpdatedRows: number;
|
||||||
|
```
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### version
|
||||||
|
|
||||||
|
```ts
|
||||||
|
version: number;
|
||||||
|
```
|
||||||
47
docs/src/js/interfaces/TableStatistics.md
Normal file
47
docs/src/js/interfaces/TableStatistics.md
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
[@lancedb/lancedb](../globals.md) / TableStatistics
|
||||||
|
|
||||||
|
# Interface: TableStatistics
|
||||||
|
|
||||||
|
## Properties
|
||||||
|
|
||||||
|
### fragmentStats
|
||||||
|
|
||||||
|
```ts
|
||||||
|
fragmentStats: FragmentStatistics;
|
||||||
|
```
|
||||||
|
|
||||||
|
Statistics on table fragments
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### numIndices
|
||||||
|
|
||||||
|
```ts
|
||||||
|
numIndices: number;
|
||||||
|
```
|
||||||
|
|
||||||
|
The number of indices in the table
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### numRows
|
||||||
|
|
||||||
|
```ts
|
||||||
|
numRows: number;
|
||||||
|
```
|
||||||
|
|
||||||
|
The number of rows in the table
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### totalBytes
|
||||||
|
|
||||||
|
```ts
|
||||||
|
totalBytes: number;
|
||||||
|
```
|
||||||
|
|
||||||
|
The total number of bytes in the table
|
||||||
23
docs/src/js/interfaces/UpdateResult.md
Normal file
23
docs/src/js/interfaces/UpdateResult.md
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
[@lancedb/lancedb](../globals.md) / UpdateResult
|
||||||
|
|
||||||
|
# Interface: UpdateResult
|
||||||
|
|
||||||
|
## Properties
|
||||||
|
|
||||||
|
### rowsUpdated
|
||||||
|
|
||||||
|
```ts
|
||||||
|
rowsUpdated: number;
|
||||||
|
```
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
### version
|
||||||
|
|
||||||
|
```ts
|
||||||
|
version: number;
|
||||||
|
```
|
||||||
26
docs/src/js/interfaces/WriteExecutionOptions.md
Normal file
26
docs/src/js/interfaces/WriteExecutionOptions.md
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
[@lancedb/lancedb](../globals.md) / WriteExecutionOptions
|
||||||
|
|
||||||
|
# Interface: WriteExecutionOptions
|
||||||
|
|
||||||
|
## Properties
|
||||||
|
|
||||||
|
### timeoutMs?
|
||||||
|
|
||||||
|
```ts
|
||||||
|
optional timeoutMs: number;
|
||||||
|
```
|
||||||
|
|
||||||
|
Maximum time to run the operation before cancelling it.
|
||||||
|
|
||||||
|
By default, there is a 30-second timeout that is only enforced after the
|
||||||
|
first attempt. This is to prevent spending too long retrying to resolve
|
||||||
|
conflicts. For example, if a write attempt takes 20 seconds and fails,
|
||||||
|
the second attempt will be cancelled after 10 seconds, hitting the
|
||||||
|
30-second timeout. However, a write that takes one hour and succeeds on the
|
||||||
|
first attempt will not be cancelled.
|
||||||
|
|
||||||
|
When this is set, the timeout is enforced on all attempts, including the first.
|
||||||
@@ -8,7 +8,7 @@
|
|||||||
<parent>
|
<parent>
|
||||||
<groupId>com.lancedb</groupId>
|
<groupId>com.lancedb</groupId>
|
||||||
<artifactId>lancedb-parent</artifactId>
|
<artifactId>lancedb-parent</artifactId>
|
||||||
<version>0.19.0-beta.11</version>
|
<version>0.19.1-beta.3</version>
|
||||||
<relativePath>../pom.xml</relativePath>
|
<relativePath>../pom.xml</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
|
|
||||||
|
|||||||
@@ -6,7 +6,7 @@
|
|||||||
|
|
||||||
<groupId>com.lancedb</groupId>
|
<groupId>com.lancedb</groupId>
|
||||||
<artifactId>lancedb-parent</artifactId>
|
<artifactId>lancedb-parent</artifactId>
|
||||||
<version>0.19.0-beta.11</version>
|
<version>0.19.1-beta.3</version>
|
||||||
<packaging>pom</packaging>
|
<packaging>pom</packaging>
|
||||||
|
|
||||||
<name>LanceDB Parent</name>
|
<name>LanceDB Parent</name>
|
||||||
|
|||||||
44
node/package-lock.json
generated
44
node/package-lock.json
generated
@@ -1,12 +1,12 @@
|
|||||||
{
|
{
|
||||||
"name": "vectordb",
|
"name": "vectordb",
|
||||||
"version": "0.19.0-beta.11",
|
"version": "0.19.1-beta.3",
|
||||||
"lockfileVersion": 3,
|
"lockfileVersion": 3,
|
||||||
"requires": true,
|
"requires": true,
|
||||||
"packages": {
|
"packages": {
|
||||||
"": {
|
"": {
|
||||||
"name": "vectordb",
|
"name": "vectordb",
|
||||||
"version": "0.19.0-beta.11",
|
"version": "0.19.1-beta.3",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"x64",
|
"x64",
|
||||||
"arm64"
|
"arm64"
|
||||||
@@ -52,11 +52,11 @@
|
|||||||
"uuid": "^9.0.0"
|
"uuid": "^9.0.0"
|
||||||
},
|
},
|
||||||
"optionalDependencies": {
|
"optionalDependencies": {
|
||||||
"@lancedb/vectordb-darwin-arm64": "0.19.0-beta.11",
|
"@lancedb/vectordb-darwin-arm64": "0.19.1-beta.3",
|
||||||
"@lancedb/vectordb-darwin-x64": "0.19.0-beta.11",
|
"@lancedb/vectordb-darwin-x64": "0.19.1-beta.3",
|
||||||
"@lancedb/vectordb-linux-arm64-gnu": "0.19.0-beta.11",
|
"@lancedb/vectordb-linux-arm64-gnu": "0.19.1-beta.3",
|
||||||
"@lancedb/vectordb-linux-x64-gnu": "0.19.0-beta.11",
|
"@lancedb/vectordb-linux-x64-gnu": "0.19.1-beta.3",
|
||||||
"@lancedb/vectordb-win32-x64-msvc": "0.19.0-beta.11"
|
"@lancedb/vectordb-win32-x64-msvc": "0.19.1-beta.3"
|
||||||
},
|
},
|
||||||
"peerDependencies": {
|
"peerDependencies": {
|
||||||
"@apache-arrow/ts": "^14.0.2",
|
"@apache-arrow/ts": "^14.0.2",
|
||||||
@@ -327,9 +327,9 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/@lancedb/vectordb-darwin-arm64": {
|
"node_modules/@lancedb/vectordb-darwin-arm64": {
|
||||||
"version": "0.19.0-beta.11",
|
"version": "0.19.1-beta.3",
|
||||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-arm64/-/vectordb-darwin-arm64-0.19.0-beta.11.tgz",
|
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-arm64/-/vectordb-darwin-arm64-0.19.1-beta.3.tgz",
|
||||||
"integrity": "sha512-fLefGJYdlIRIjrJj8MU1r5Zix5LpKktpCYilA7tZrfvBWkubGceJ+U6RPsWz7VGBfWcETo3g5CBooUPhbtSMlQ==",
|
"integrity": "sha512-TglTNkvgxxHHhh8YbEwj5t9XuInNVUNeFN34Zyk+7ab/rDdMASiKv6ZvDkwacVm7aXeBbLw39/6+IegStJfFCg==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"arm64"
|
"arm64"
|
||||||
],
|
],
|
||||||
@@ -340,9 +340,9 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"node_modules/@lancedb/vectordb-darwin-x64": {
|
"node_modules/@lancedb/vectordb-darwin-x64": {
|
||||||
"version": "0.19.0-beta.11",
|
"version": "0.19.1-beta.3",
|
||||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-x64/-/vectordb-darwin-x64-0.19.0-beta.11.tgz",
|
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-x64/-/vectordb-darwin-x64-0.19.1-beta.3.tgz",
|
||||||
"integrity": "sha512-FkCa1TbPLDXAGhlRI4tafcltzApCsyvgi+I+kX07u5DKPNQVALpQ3R6X6GLlbiFsAFBdyv9t2fqQ9DlgjJIZpA==",
|
"integrity": "sha512-mwBbOVgeUT3xyegzga0gTBJ+DXI3dP1zPKcOQRQDRJk+GkfHk1CblGXT3h/YL18NWfR1FGMe9s59PNJR6r6l8A==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"x64"
|
"x64"
|
||||||
],
|
],
|
||||||
@@ -353,9 +353,9 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"node_modules/@lancedb/vectordb-linux-arm64-gnu": {
|
"node_modules/@lancedb/vectordb-linux-arm64-gnu": {
|
||||||
"version": "0.19.0-beta.11",
|
"version": "0.19.1-beta.3",
|
||||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-arm64-gnu/-/vectordb-linux-arm64-gnu-0.19.0-beta.11.tgz",
|
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-arm64-gnu/-/vectordb-linux-arm64-gnu-0.19.1-beta.3.tgz",
|
||||||
"integrity": "sha512-iZkL/01HNUNQ8pGK0+hoNyrM7P1YtShsyIQVzJMfo41SAofCBf9qvi9YaYyd49sDb+dQXeRn1+cfaJ9siz1OHw==",
|
"integrity": "sha512-amihspQ5ThSKRJsPpeAte/edWDGAN5ZjdqhtX8AUuuOmoJ5EekfsgXZc+fyFNwl6RzGT7PKqpL7SQzOdLKMijQ==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"arm64"
|
"arm64"
|
||||||
],
|
],
|
||||||
@@ -366,9 +366,9 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"node_modules/@lancedb/vectordb-linux-x64-gnu": {
|
"node_modules/@lancedb/vectordb-linux-x64-gnu": {
|
||||||
"version": "0.19.0-beta.11",
|
"version": "0.19.1-beta.3",
|
||||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-x64-gnu/-/vectordb-linux-x64-gnu-0.19.0-beta.11.tgz",
|
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-x64-gnu/-/vectordb-linux-x64-gnu-0.19.1-beta.3.tgz",
|
||||||
"integrity": "sha512-MdKRHxe2tRQqmExNLv3f6Wvx1mEi98eFtD0ysm4tNrQdaS1MJbTp+DUehrRKkfDWsooalHkIi9d02BVw5qseUQ==",
|
"integrity": "sha512-mZzOETBii+UUu7D2TOohhukXNjjOfldbNADRB20FF2a3hYzrVteiFudCQRYtbVunpHE0qvNRTkyuRqM7DwOygw==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"x64"
|
"x64"
|
||||||
],
|
],
|
||||||
@@ -379,9 +379,9 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"node_modules/@lancedb/vectordb-win32-x64-msvc": {
|
"node_modules/@lancedb/vectordb-win32-x64-msvc": {
|
||||||
"version": "0.19.0-beta.11",
|
"version": "0.19.1-beta.3",
|
||||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-win32-x64-msvc/-/vectordb-win32-x64-msvc-0.19.0-beta.11.tgz",
|
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-win32-x64-msvc/-/vectordb-win32-x64-msvc-0.19.1-beta.3.tgz",
|
||||||
"integrity": "sha512-KWy+t9jr0feJAW9NkmM/w9kfdpp78+7mkeh9lb0g3xI3OdYU1yizNqFjbIQqJf7/L4sou4wmOjAC+FcP8qCtzg==",
|
"integrity": "sha512-LHsKFtJZRRZ4MVa6uSeWqPJ9vfw0atmp6bvVDByxgouVN4CwdlnAxOu69YJtwDPxnfg8Pn+eQ5txIFvhFtCAnA==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"x64"
|
"x64"
|
||||||
],
|
],
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "vectordb",
|
"name": "vectordb",
|
||||||
"version": "0.19.0-beta.11",
|
"version": "0.19.1-beta.3",
|
||||||
"description": " Serverless, low-latency vector database for AI applications",
|
"description": " Serverless, low-latency vector database for AI applications",
|
||||||
"private": false,
|
"private": false,
|
||||||
"main": "dist/index.js",
|
"main": "dist/index.js",
|
||||||
@@ -89,10 +89,10 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"optionalDependencies": {
|
"optionalDependencies": {
|
||||||
"@lancedb/vectordb-darwin-x64": "0.19.0-beta.11",
|
"@lancedb/vectordb-darwin-x64": "0.19.1-beta.3",
|
||||||
"@lancedb/vectordb-darwin-arm64": "0.19.0-beta.11",
|
"@lancedb/vectordb-darwin-arm64": "0.19.1-beta.3",
|
||||||
"@lancedb/vectordb-linux-x64-gnu": "0.19.0-beta.11",
|
"@lancedb/vectordb-linux-x64-gnu": "0.19.1-beta.3",
|
||||||
"@lancedb/vectordb-linux-arm64-gnu": "0.19.0-beta.11",
|
"@lancedb/vectordb-linux-arm64-gnu": "0.19.1-beta.3",
|
||||||
"@lancedb/vectordb-win32-x64-msvc": "0.19.0-beta.11"
|
"@lancedb/vectordb-win32-x64-msvc": "0.19.1-beta.3"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "lancedb-nodejs"
|
name = "lancedb-nodejs"
|
||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
version = "0.19.0-beta.11"
|
version = "0.19.1-beta.3"
|
||||||
license.workspace = true
|
license.workspace = true
|
||||||
description.workspace = true
|
description.workspace = true
|
||||||
repository.workspace = true
|
repository.workspace = true
|
||||||
|
|||||||
@@ -374,6 +374,71 @@ describe.each([arrow15, arrow16, arrow17, arrow18])(
|
|||||||
expect(table2.numRows).toBe(4);
|
expect(table2.numRows).toBe(4);
|
||||||
expect(table2.schema).toEqual(schema);
|
expect(table2.schema).toEqual(schema);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
it("should correctly retain values in nested struct fields", async function () {
|
||||||
|
// Define test data with nested struct
|
||||||
|
const testData = [
|
||||||
|
{
|
||||||
|
id: "doc1",
|
||||||
|
vector: [1, 2, 3],
|
||||||
|
metadata: {
|
||||||
|
filePath: "/path/to/file1.ts",
|
||||||
|
startLine: 10,
|
||||||
|
endLine: 20,
|
||||||
|
text: "function test() { return true; }",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: "doc2",
|
||||||
|
vector: [4, 5, 6],
|
||||||
|
metadata: {
|
||||||
|
filePath: "/path/to/file2.ts",
|
||||||
|
startLine: 30,
|
||||||
|
endLine: 40,
|
||||||
|
text: "function test2() { return false; }",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
// Create Arrow table from the data
|
||||||
|
const table = makeArrowTable(testData);
|
||||||
|
|
||||||
|
// Verify schema has the nested struct fields
|
||||||
|
const metadataField = table.schema.fields.find(
|
||||||
|
(f) => f.name === "metadata",
|
||||||
|
);
|
||||||
|
expect(metadataField).toBeDefined();
|
||||||
|
// biome-ignore lint/suspicious/noExplicitAny: accessing fields in different Arrow versions
|
||||||
|
const childNames = metadataField?.type.children.map((c: any) => c.name);
|
||||||
|
expect(childNames).toEqual([
|
||||||
|
"filePath",
|
||||||
|
"startLine",
|
||||||
|
"endLine",
|
||||||
|
"text",
|
||||||
|
]);
|
||||||
|
|
||||||
|
// Convert to buffer and back (simulating storage and retrieval)
|
||||||
|
const buf = await fromTableToBuffer(table);
|
||||||
|
const retrievedTable = tableFromIPC(buf);
|
||||||
|
|
||||||
|
// Verify the retrieved table has the same structure
|
||||||
|
const rows = [];
|
||||||
|
for (let i = 0; i < retrievedTable.numRows; i++) {
|
||||||
|
rows.push(retrievedTable.get(i));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check values in the first row
|
||||||
|
const firstRow = rows[0];
|
||||||
|
expect(firstRow.id).toBe("doc1");
|
||||||
|
expect(firstRow.vector.toJSON()).toEqual([1, 2, 3]);
|
||||||
|
|
||||||
|
// Verify metadata values are preserved (this is where the bug is)
|
||||||
|
expect(firstRow.metadata).toBeDefined();
|
||||||
|
expect(firstRow.metadata.filePath).toBe("/path/to/file1.ts");
|
||||||
|
expect(firstRow.metadata.startLine).toBe(10);
|
||||||
|
expect(firstRow.metadata.endLine).toBe(20);
|
||||||
|
expect(firstRow.metadata.text).toBe("function test() { return true; }");
|
||||||
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
class DummyEmbedding extends EmbeddingFunction<string> {
|
class DummyEmbedding extends EmbeddingFunction<string> {
|
||||||
|
|||||||
@@ -34,6 +34,7 @@ import {
|
|||||||
} from "../lancedb/embedding";
|
} from "../lancedb/embedding";
|
||||||
import { Index } from "../lancedb/indices";
|
import { Index } from "../lancedb/indices";
|
||||||
import { instanceOfFullTextQuery } from "../lancedb/query";
|
import { instanceOfFullTextQuery } from "../lancedb/query";
|
||||||
|
import exp = require("constants");
|
||||||
|
|
||||||
describe.each([arrow15, arrow16, arrow17, arrow18])(
|
describe.each([arrow15, arrow16, arrow17, arrow18])(
|
||||||
"Given a table",
|
"Given a table",
|
||||||
@@ -71,8 +72,33 @@ describe.each([arrow15, arrow16, arrow17, arrow18])(
|
|||||||
await expect(table.countRows()).resolves.toBe(3);
|
await expect(table.countRows()).resolves.toBe(3);
|
||||||
});
|
});
|
||||||
|
|
||||||
it("should overwrite data if asked", async () => {
|
it("should show table stats", async () => {
|
||||||
await table.add([{ id: 1 }, { id: 2 }]);
|
await table.add([{ id: 1 }, { id: 2 }]);
|
||||||
|
await table.add([{ id: 1 }]);
|
||||||
|
await expect(table.stats()).resolves.toEqual({
|
||||||
|
fragmentStats: {
|
||||||
|
lengths: {
|
||||||
|
max: 2,
|
||||||
|
mean: 1,
|
||||||
|
min: 1,
|
||||||
|
p25: 1,
|
||||||
|
p50: 2,
|
||||||
|
p75: 2,
|
||||||
|
p99: 2,
|
||||||
|
},
|
||||||
|
numFragments: 2,
|
||||||
|
numSmallFragments: 2,
|
||||||
|
},
|
||||||
|
numIndices: 0,
|
||||||
|
numRows: 3,
|
||||||
|
totalBytes: 24,
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it("should overwrite data if asked", async () => {
|
||||||
|
const addRes = await table.add([{ id: 1 }, { id: 2 }]);
|
||||||
|
expect(addRes).toHaveProperty("version");
|
||||||
|
expect(addRes.version).toBe(2);
|
||||||
await table.add([{ id: 1 }], { mode: "overwrite" });
|
await table.add([{ id: 1 }], { mode: "overwrite" });
|
||||||
await expect(table.countRows()).resolves.toBe(1);
|
await expect(table.countRows()).resolves.toBe(1);
|
||||||
});
|
});
|
||||||
@@ -88,7 +114,11 @@ describe.each([arrow15, arrow16, arrow17, arrow18])(
|
|||||||
await table.add([{ id: 1 }]);
|
await table.add([{ id: 1 }]);
|
||||||
expect(await table.countRows("id == 1")).toBe(1);
|
expect(await table.countRows("id == 1")).toBe(1);
|
||||||
expect(await table.countRows("id == 7")).toBe(0);
|
expect(await table.countRows("id == 7")).toBe(0);
|
||||||
await table.update({ id: "7" });
|
const updateRes = await table.update({ id: "7" });
|
||||||
|
expect(updateRes).toHaveProperty("version");
|
||||||
|
expect(updateRes.version).toBe(3);
|
||||||
|
expect(updateRes).toHaveProperty("rowsUpdated");
|
||||||
|
expect(updateRes.rowsUpdated).toBe(1);
|
||||||
expect(await table.countRows("id == 1")).toBe(0);
|
expect(await table.countRows("id == 1")).toBe(0);
|
||||||
expect(await table.countRows("id == 7")).toBe(1);
|
expect(await table.countRows("id == 7")).toBe(1);
|
||||||
await table.add([{ id: 2 }]);
|
await table.add([{ id: 2 }]);
|
||||||
@@ -315,11 +345,17 @@ describe("merge insert", () => {
|
|||||||
{ a: 3, b: "y" },
|
{ a: 3, b: "y" },
|
||||||
{ a: 4, b: "z" },
|
{ a: 4, b: "z" },
|
||||||
];
|
];
|
||||||
await table
|
const mergeInsertRes = await table
|
||||||
.mergeInsert("a")
|
.mergeInsert("a")
|
||||||
.whenMatchedUpdateAll()
|
.whenMatchedUpdateAll()
|
||||||
.whenNotMatchedInsertAll()
|
.whenNotMatchedInsertAll()
|
||||||
.execute(newData);
|
.execute(newData, { timeoutMs: 10_000 });
|
||||||
|
expect(mergeInsertRes).toHaveProperty("version");
|
||||||
|
expect(mergeInsertRes.version).toBe(2);
|
||||||
|
expect(mergeInsertRes.numInsertedRows).toBe(1);
|
||||||
|
expect(mergeInsertRes.numUpdatedRows).toBe(2);
|
||||||
|
expect(mergeInsertRes.numDeletedRows).toBe(0);
|
||||||
|
|
||||||
const expected = [
|
const expected = [
|
||||||
{ a: 1, b: "a" },
|
{ a: 1, b: "a" },
|
||||||
{ a: 2, b: "x" },
|
{ a: 2, b: "x" },
|
||||||
@@ -337,10 +373,12 @@ describe("merge insert", () => {
|
|||||||
{ a: 3, b: "y" },
|
{ a: 3, b: "y" },
|
||||||
{ a: 4, b: "z" },
|
{ a: 4, b: "z" },
|
||||||
];
|
];
|
||||||
await table
|
const mergeInsertRes = await table
|
||||||
.mergeInsert("a")
|
.mergeInsert("a")
|
||||||
.whenMatchedUpdateAll({ where: "target.b = 'b'" })
|
.whenMatchedUpdateAll({ where: "target.b = 'b'" })
|
||||||
.execute(newData);
|
.execute(newData);
|
||||||
|
expect(mergeInsertRes).toHaveProperty("version");
|
||||||
|
expect(mergeInsertRes.version).toBe(2);
|
||||||
|
|
||||||
const expected = [
|
const expected = [
|
||||||
{ a: 1, b: "a" },
|
{ a: 1, b: "a" },
|
||||||
@@ -425,6 +463,20 @@ describe("merge insert", () => {
|
|||||||
res = res.sort((a, b) => a.a - b.a);
|
res = res.sort((a, b) => a.a - b.a);
|
||||||
expect(res).toEqual(expected);
|
expect(res).toEqual(expected);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
test("timeout", async () => {
|
||||||
|
const newData = [
|
||||||
|
{ a: 2, b: "x" },
|
||||||
|
{ a: 4, b: "z" },
|
||||||
|
];
|
||||||
|
await expect(
|
||||||
|
table
|
||||||
|
.mergeInsert("a")
|
||||||
|
.whenMatchedUpdateAll()
|
||||||
|
.whenNotMatchedInsertAll()
|
||||||
|
.execute(newData, { timeoutMs: 0 }),
|
||||||
|
).rejects.toThrow("merge insert timed out");
|
||||||
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
describe("When creating an index", () => {
|
describe("When creating an index", () => {
|
||||||
@@ -1000,15 +1052,19 @@ describe("schema evolution", function () {
|
|||||||
{ id: 1n, vector: [0.1, 0.2] },
|
{ id: 1n, vector: [0.1, 0.2] },
|
||||||
]);
|
]);
|
||||||
// Can create a non-nullable column only through addColumns at the moment.
|
// Can create a non-nullable column only through addColumns at the moment.
|
||||||
await table.addColumns([
|
const addColumnsRes = await table.addColumns([
|
||||||
{ name: "price", valueSql: "cast(10.0 as double)" },
|
{ name: "price", valueSql: "cast(10.0 as double)" },
|
||||||
]);
|
]);
|
||||||
|
expect(addColumnsRes).toHaveProperty("version");
|
||||||
|
expect(addColumnsRes.version).toBe(2);
|
||||||
expect(await table.schema()).toEqual(schema);
|
expect(await table.schema()).toEqual(schema);
|
||||||
|
|
||||||
await table.alterColumns([
|
const alterColumnsRes = await table.alterColumns([
|
||||||
{ path: "id", rename: "new_id" },
|
{ path: "id", rename: "new_id" },
|
||||||
{ path: "price", nullable: true },
|
{ path: "price", nullable: true },
|
||||||
]);
|
]);
|
||||||
|
expect(alterColumnsRes).toHaveProperty("version");
|
||||||
|
expect(alterColumnsRes.version).toBe(3);
|
||||||
|
|
||||||
const expectedSchema = new Schema([
|
const expectedSchema = new Schema([
|
||||||
new Field("new_id", new Int64(), true),
|
new Field("new_id", new Int64(), true),
|
||||||
@@ -1126,7 +1182,9 @@ describe("schema evolution", function () {
|
|||||||
const table = await con.createTable("vectors", [
|
const table = await con.createTable("vectors", [
|
||||||
{ id: 1n, vector: [0.1, 0.2] },
|
{ id: 1n, vector: [0.1, 0.2] },
|
||||||
]);
|
]);
|
||||||
await table.dropColumns(["vector"]);
|
const dropColumnsRes = await table.dropColumns(["vector"]);
|
||||||
|
expect(dropColumnsRes).toHaveProperty("version");
|
||||||
|
expect(dropColumnsRes.version).toBe(2);
|
||||||
|
|
||||||
const expectedSchema = new Schema([new Field("id", new Int64(), true)]);
|
const expectedSchema = new Schema([new Field("id", new Int64(), true)]);
|
||||||
expect(await table.schema()).toEqual(expectedSchema);
|
expect(await table.schema()).toEqual(expectedSchema);
|
||||||
@@ -1178,6 +1236,99 @@ describe("when dealing with versioning", () => {
|
|||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
|
describe("when dealing with tags", () => {
|
||||||
|
let tmpDir: tmp.DirResult;
|
||||||
|
beforeEach(() => {
|
||||||
|
tmpDir = tmp.dirSync({ unsafeCleanup: true });
|
||||||
|
});
|
||||||
|
afterEach(() => {
|
||||||
|
tmpDir.removeCallback();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("can manage tags", async () => {
|
||||||
|
const conn = await connect(tmpDir.name, {
|
||||||
|
readConsistencyInterval: 0,
|
||||||
|
});
|
||||||
|
|
||||||
|
const table = await conn.createTable("my_table", [
|
||||||
|
{ id: 1n, vector: [0.1, 0.2] },
|
||||||
|
]);
|
||||||
|
expect(await table.version()).toBe(1);
|
||||||
|
|
||||||
|
await table.add([{ id: 2n, vector: [0.3, 0.4] }]);
|
||||||
|
expect(await table.version()).toBe(2);
|
||||||
|
|
||||||
|
const tagsManager = await table.tags();
|
||||||
|
|
||||||
|
const initialTags = await tagsManager.list();
|
||||||
|
expect(Object.keys(initialTags).length).toBe(0);
|
||||||
|
|
||||||
|
const tag1 = "tag1";
|
||||||
|
await tagsManager.create(tag1, 1);
|
||||||
|
expect(await tagsManager.getVersion(tag1)).toBe(1);
|
||||||
|
|
||||||
|
const tagsAfterFirst = await tagsManager.list();
|
||||||
|
expect(Object.keys(tagsAfterFirst).length).toBe(1);
|
||||||
|
expect(tagsAfterFirst).toHaveProperty(tag1);
|
||||||
|
expect(tagsAfterFirst[tag1].version).toBe(1);
|
||||||
|
|
||||||
|
await tagsManager.create("tag2", 2);
|
||||||
|
expect(await tagsManager.getVersion("tag2")).toBe(2);
|
||||||
|
|
||||||
|
const tagsAfterSecond = await tagsManager.list();
|
||||||
|
expect(Object.keys(tagsAfterSecond).length).toBe(2);
|
||||||
|
expect(tagsAfterSecond).toHaveProperty(tag1);
|
||||||
|
expect(tagsAfterSecond[tag1].version).toBe(1);
|
||||||
|
expect(tagsAfterSecond).toHaveProperty("tag2");
|
||||||
|
expect(tagsAfterSecond["tag2"].version).toBe(2);
|
||||||
|
|
||||||
|
await table.add([{ id: 3n, vector: [0.5, 0.6] }]);
|
||||||
|
await tagsManager.update(tag1, 3);
|
||||||
|
expect(await tagsManager.getVersion(tag1)).toBe(3);
|
||||||
|
|
||||||
|
await tagsManager.delete("tag2");
|
||||||
|
const tagsAfterDelete = await tagsManager.list();
|
||||||
|
expect(Object.keys(tagsAfterDelete).length).toBe(1);
|
||||||
|
expect(tagsAfterDelete).toHaveProperty(tag1);
|
||||||
|
expect(tagsAfterDelete[tag1].version).toBe(3);
|
||||||
|
|
||||||
|
await table.add([{ id: 4n, vector: [0.7, 0.8] }]);
|
||||||
|
expect(await table.version()).toBe(4);
|
||||||
|
|
||||||
|
await table.checkout(tag1);
|
||||||
|
expect(await table.version()).toBe(3);
|
||||||
|
|
||||||
|
await table.checkoutLatest();
|
||||||
|
expect(await table.version()).toBe(4);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("can checkout and restore tags", async () => {
|
||||||
|
const conn = await connect(tmpDir.name, {
|
||||||
|
readConsistencyInterval: 0,
|
||||||
|
});
|
||||||
|
|
||||||
|
const table = await conn.createTable("my_table", [
|
||||||
|
{ id: 1n, vector: [0.1, 0.2] },
|
||||||
|
]);
|
||||||
|
expect(await table.version()).toBe(1);
|
||||||
|
expect(await table.countRows()).toBe(1);
|
||||||
|
const tagsManager = await table.tags();
|
||||||
|
const tag1 = "tag1";
|
||||||
|
await tagsManager.create(tag1, 1);
|
||||||
|
await table.add([{ id: 2n, vector: [0.3, 0.4] }]);
|
||||||
|
const tag2 = "tag2";
|
||||||
|
await tagsManager.create(tag2, 2);
|
||||||
|
expect(await table.version()).toBe(2);
|
||||||
|
await table.checkout(tag1);
|
||||||
|
expect(await table.version()).toBe(1);
|
||||||
|
await table.restore();
|
||||||
|
expect(await table.version()).toBe(3);
|
||||||
|
expect(await table.countRows()).toBe(1);
|
||||||
|
await table.add([{ id: 3n, vector: [0.5, 0.6] }]);
|
||||||
|
expect(await table.countRows()).toBe(2);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
describe("when optimizing a dataset", () => {
|
describe("when optimizing a dataset", () => {
|
||||||
let tmpDir: tmp.DirResult;
|
let tmpDir: tmp.DirResult;
|
||||||
let table: Table;
|
let table: Table;
|
||||||
|
|||||||
@@ -639,8 +639,9 @@ function transposeData(
|
|||||||
): Vector {
|
): Vector {
|
||||||
if (field.type instanceof Struct) {
|
if (field.type instanceof Struct) {
|
||||||
const childFields = field.type.children;
|
const childFields = field.type.children;
|
||||||
|
const fullPath = [...path, field.name];
|
||||||
const childVectors = childFields.map((child) => {
|
const childVectors = childFields.map((child) => {
|
||||||
return transposeData(data, child, [...path, child.name]);
|
return transposeData(data, child, fullPath);
|
||||||
});
|
});
|
||||||
const structData = makeData({
|
const structData = makeData({
|
||||||
type: field.type,
|
type: field.type,
|
||||||
@@ -652,7 +653,14 @@ function transposeData(
|
|||||||
const values = data.map((datum) => {
|
const values = data.map((datum) => {
|
||||||
let current: unknown = datum;
|
let current: unknown = datum;
|
||||||
for (const key of valuesPath) {
|
for (const key of valuesPath) {
|
||||||
if (isObject(current) && Object.hasOwn(current, key)) {
|
if (current == null) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (
|
||||||
|
isObject(current) &&
|
||||||
|
(Object.hasOwn(current, key) || key in current)
|
||||||
|
) {
|
||||||
current = current[key];
|
current = current[key];
|
||||||
} else {
|
} else {
|
||||||
return null;
|
return null;
|
||||||
|
|||||||
@@ -23,6 +23,18 @@ export {
|
|||||||
OptimizeStats,
|
OptimizeStats,
|
||||||
CompactionStats,
|
CompactionStats,
|
||||||
RemovalStats,
|
RemovalStats,
|
||||||
|
TableStatistics,
|
||||||
|
FragmentStatistics,
|
||||||
|
FragmentSummaryStats,
|
||||||
|
Tags,
|
||||||
|
TagContents,
|
||||||
|
MergeResult,
|
||||||
|
AddResult,
|
||||||
|
AddColumnsResult,
|
||||||
|
AlterColumnsResult,
|
||||||
|
DeleteResult,
|
||||||
|
DropColumnsResult,
|
||||||
|
UpdateResult,
|
||||||
} from "./native.js";
|
} from "./native.js";
|
||||||
|
|
||||||
export {
|
export {
|
||||||
@@ -74,7 +86,7 @@ export {
|
|||||||
ColumnAlteration,
|
ColumnAlteration,
|
||||||
} from "./table";
|
} from "./table";
|
||||||
|
|
||||||
export { MergeInsertBuilder } from "./merge";
|
export { MergeInsertBuilder, WriteExecutionOptions } from "./merge";
|
||||||
|
|
||||||
export * as embedding from "./embedding";
|
export * as embedding from "./embedding";
|
||||||
export * as rerankers from "./rerankers";
|
export * as rerankers from "./rerankers";
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
// SPDX-License-Identifier: Apache-2.0
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||||
import { Data, Schema, fromDataToBuffer } from "./arrow";
|
import { Data, Schema, fromDataToBuffer } from "./arrow";
|
||||||
import { NativeMergeInsertBuilder } from "./native";
|
import { MergeResult, NativeMergeInsertBuilder } from "./native";
|
||||||
|
|
||||||
/** A builder used to create and run a merge insert operation */
|
/** A builder used to create and run a merge insert operation */
|
||||||
export class MergeInsertBuilder {
|
export class MergeInsertBuilder {
|
||||||
@@ -73,9 +73,12 @@ export class MergeInsertBuilder {
|
|||||||
/**
|
/**
|
||||||
* Executes the merge insert operation
|
* Executes the merge insert operation
|
||||||
*
|
*
|
||||||
* Nothing is returned but the `Table` is updated
|
* @returns {Promise<MergeResult>} the merge result
|
||||||
*/
|
*/
|
||||||
async execute(data: Data): Promise<void> {
|
async execute(
|
||||||
|
data: Data,
|
||||||
|
execOptions?: Partial<WriteExecutionOptions>,
|
||||||
|
): Promise<MergeResult> {
|
||||||
let schema: Schema;
|
let schema: Schema;
|
||||||
if (this.#schema instanceof Promise) {
|
if (this.#schema instanceof Promise) {
|
||||||
schema = await this.#schema;
|
schema = await this.#schema;
|
||||||
@@ -83,7 +86,28 @@ export class MergeInsertBuilder {
|
|||||||
} else {
|
} else {
|
||||||
schema = this.#schema;
|
schema = this.#schema;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (execOptions?.timeoutMs !== undefined) {
|
||||||
|
this.#native.setTimeout(execOptions.timeoutMs);
|
||||||
|
}
|
||||||
|
|
||||||
const buffer = await fromDataToBuffer(data, undefined, schema);
|
const buffer = await fromDataToBuffer(data, undefined, schema);
|
||||||
await this.#native.execute(buffer);
|
return await this.#native.execute(buffer);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export interface WriteExecutionOptions {
|
||||||
|
/**
|
||||||
|
* Maximum time to run the operation before cancelling it.
|
||||||
|
*
|
||||||
|
* By default, there is a 30-second timeout that is only enforced after the
|
||||||
|
* first attempt. This is to prevent spending too long retrying to resolve
|
||||||
|
* conflicts. For example, if a write attempt takes 20 seconds and fails,
|
||||||
|
* the second attempt will be cancelled after 10 seconds, hitting the
|
||||||
|
* 30-second timeout. However, a write that takes one hour and succeeds on the
|
||||||
|
* first attempt will not be cancelled.
|
||||||
|
*
|
||||||
|
* When this is set, the timeout is enforced on all attempts, including the first.
|
||||||
|
*/
|
||||||
|
timeoutMs?: number;
|
||||||
|
}
|
||||||
|
|||||||
@@ -16,10 +16,18 @@ import { EmbeddingFunctionConfig, getRegistry } from "./embedding/registry";
|
|||||||
import { IndexOptions } from "./indices";
|
import { IndexOptions } from "./indices";
|
||||||
import { MergeInsertBuilder } from "./merge";
|
import { MergeInsertBuilder } from "./merge";
|
||||||
import {
|
import {
|
||||||
|
AddColumnsResult,
|
||||||
AddColumnsSql,
|
AddColumnsSql,
|
||||||
|
AddResult,
|
||||||
|
AlterColumnsResult,
|
||||||
|
DeleteResult,
|
||||||
|
DropColumnsResult,
|
||||||
IndexConfig,
|
IndexConfig,
|
||||||
IndexStatistics,
|
IndexStatistics,
|
||||||
OptimizeStats,
|
OptimizeStats,
|
||||||
|
TableStatistics,
|
||||||
|
Tags,
|
||||||
|
UpdateResult,
|
||||||
Table as _NativeTable,
|
Table as _NativeTable,
|
||||||
} from "./native";
|
} from "./native";
|
||||||
import {
|
import {
|
||||||
@@ -124,12 +132,19 @@ export abstract class Table {
|
|||||||
/**
|
/**
|
||||||
* Insert records into this Table.
|
* Insert records into this Table.
|
||||||
* @param {Data} data Records to be inserted into the Table
|
* @param {Data} data Records to be inserted into the Table
|
||||||
|
* @returns {Promise<AddResult>} A promise that resolves to an object
|
||||||
|
* containing the new version number of the table
|
||||||
*/
|
*/
|
||||||
abstract add(data: Data, options?: Partial<AddDataOptions>): Promise<void>;
|
abstract add(
|
||||||
|
data: Data,
|
||||||
|
options?: Partial<AddDataOptions>,
|
||||||
|
): Promise<AddResult>;
|
||||||
/**
|
/**
|
||||||
* Update existing records in the Table
|
* Update existing records in the Table
|
||||||
* @param opts.values The values to update. The keys are the column names and the values
|
* @param opts.values The values to update. The keys are the column names and the values
|
||||||
* are the values to set.
|
* are the values to set.
|
||||||
|
* @returns {Promise<UpdateResult>} A promise that resolves to an object containing
|
||||||
|
* the number of rows updated and the new version number
|
||||||
* @example
|
* @example
|
||||||
* ```ts
|
* ```ts
|
||||||
* table.update({where:"x = 2", values:{"vector": [10, 10]}})
|
* table.update({where:"x = 2", values:{"vector": [10, 10]}})
|
||||||
@@ -139,11 +154,13 @@ export abstract class Table {
|
|||||||
opts: {
|
opts: {
|
||||||
values: Map<string, IntoSql> | Record<string, IntoSql>;
|
values: Map<string, IntoSql> | Record<string, IntoSql>;
|
||||||
} & Partial<UpdateOptions>,
|
} & Partial<UpdateOptions>,
|
||||||
): Promise<void>;
|
): Promise<UpdateResult>;
|
||||||
/**
|
/**
|
||||||
* Update existing records in the Table
|
* Update existing records in the Table
|
||||||
* @param opts.valuesSql The values to update. The keys are the column names and the values
|
* @param opts.valuesSql The values to update. The keys are the column names and the values
|
||||||
* are the values to set. The values are SQL expressions.
|
* are the values to set. The values are SQL expressions.
|
||||||
|
* @returns {Promise<UpdateResult>} A promise that resolves to an object containing
|
||||||
|
* the number of rows updated and the new version number
|
||||||
* @example
|
* @example
|
||||||
* ```ts
|
* ```ts
|
||||||
* table.update({where:"x = 2", valuesSql:{"x": "x + 1"}})
|
* table.update({where:"x = 2", valuesSql:{"x": "x + 1"}})
|
||||||
@@ -153,7 +170,7 @@ export abstract class Table {
|
|||||||
opts: {
|
opts: {
|
||||||
valuesSql: Map<string, string> | Record<string, string>;
|
valuesSql: Map<string, string> | Record<string, string>;
|
||||||
} & Partial<UpdateOptions>,
|
} & Partial<UpdateOptions>,
|
||||||
): Promise<void>;
|
): Promise<UpdateResult>;
|
||||||
/**
|
/**
|
||||||
* Update existing records in the Table
|
* Update existing records in the Table
|
||||||
*
|
*
|
||||||
@@ -171,6 +188,8 @@ export abstract class Table {
|
|||||||
* repeatedly calilng this method.
|
* repeatedly calilng this method.
|
||||||
* @param {Map<string, string> | Record<string, string>} updates - the
|
* @param {Map<string, string> | Record<string, string>} updates - the
|
||||||
* columns to update
|
* columns to update
|
||||||
|
* @returns {Promise<UpdateResult>} A promise that resolves to an object
|
||||||
|
* containing the number of rows updated and the new version number
|
||||||
*
|
*
|
||||||
* Keys in the map should specify the name of the column to update.
|
* Keys in the map should specify the name of the column to update.
|
||||||
* Values in the map provide the new value of the column. These can
|
* Values in the map provide the new value of the column. These can
|
||||||
@@ -182,12 +201,16 @@ export abstract class Table {
|
|||||||
abstract update(
|
abstract update(
|
||||||
updates: Map<string, string> | Record<string, string>,
|
updates: Map<string, string> | Record<string, string>,
|
||||||
options?: Partial<UpdateOptions>,
|
options?: Partial<UpdateOptions>,
|
||||||
): Promise<void>;
|
): Promise<UpdateResult>;
|
||||||
|
|
||||||
/** Count the total number of rows in the dataset. */
|
/** Count the total number of rows in the dataset. */
|
||||||
abstract countRows(filter?: string): Promise<number>;
|
abstract countRows(filter?: string): Promise<number>;
|
||||||
/** Delete the rows that satisfy the predicate. */
|
/**
|
||||||
abstract delete(predicate: string): Promise<void>;
|
* Delete the rows that satisfy the predicate.
|
||||||
|
* @returns {Promise<DeleteResult>} A promise that resolves to an object
|
||||||
|
* containing the new version number of the table
|
||||||
|
*/
|
||||||
|
abstract delete(predicate: string): Promise<DeleteResult>;
|
||||||
/**
|
/**
|
||||||
* Create an index to speed up queries.
|
* Create an index to speed up queries.
|
||||||
*
|
*
|
||||||
@@ -341,15 +364,23 @@ export abstract class Table {
|
|||||||
* the SQL expression to use to calculate the value of the new column. These
|
* the SQL expression to use to calculate the value of the new column. These
|
||||||
* expressions will be evaluated for each row in the table, and can
|
* expressions will be evaluated for each row in the table, and can
|
||||||
* reference existing columns in the table.
|
* reference existing columns in the table.
|
||||||
|
* @returns {Promise<AddColumnsResult>} A promise that resolves to an object
|
||||||
|
* containing the new version number of the table after adding the columns.
|
||||||
*/
|
*/
|
||||||
abstract addColumns(newColumnTransforms: AddColumnsSql[]): Promise<void>;
|
abstract addColumns(
|
||||||
|
newColumnTransforms: AddColumnsSql[],
|
||||||
|
): Promise<AddColumnsResult>;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Alter the name or nullability of columns.
|
* Alter the name or nullability of columns.
|
||||||
* @param {ColumnAlteration[]} columnAlterations One or more alterations to
|
* @param {ColumnAlteration[]} columnAlterations One or more alterations to
|
||||||
* apply to columns.
|
* apply to columns.
|
||||||
|
* @returns {Promise<AlterColumnsResult>} A promise that resolves to an object
|
||||||
|
* containing the new version number of the table after altering the columns.
|
||||||
*/
|
*/
|
||||||
abstract alterColumns(columnAlterations: ColumnAlteration[]): Promise<void>;
|
abstract alterColumns(
|
||||||
|
columnAlterations: ColumnAlteration[],
|
||||||
|
): Promise<AlterColumnsResult>;
|
||||||
/**
|
/**
|
||||||
* Drop one or more columns from the dataset
|
* Drop one or more columns from the dataset
|
||||||
*
|
*
|
||||||
@@ -360,8 +391,10 @@ export abstract class Table {
|
|||||||
* @param {string[]} columnNames The names of the columns to drop. These can
|
* @param {string[]} columnNames The names of the columns to drop. These can
|
||||||
* be nested column references (e.g. "a.b.c") or top-level column names
|
* be nested column references (e.g. "a.b.c") or top-level column names
|
||||||
* (e.g. "a").
|
* (e.g. "a").
|
||||||
|
* @returns {Promise<DropColumnsResult>} A promise that resolves to an object
|
||||||
|
* containing the new version number of the table after dropping the columns.
|
||||||
*/
|
*/
|
||||||
abstract dropColumns(columnNames: string[]): Promise<void>;
|
abstract dropColumns(columnNames: string[]): Promise<DropColumnsResult>;
|
||||||
/** Retrieve the version of the table */
|
/** Retrieve the version of the table */
|
||||||
|
|
||||||
abstract version(): Promise<number>;
|
abstract version(): Promise<number>;
|
||||||
@@ -374,7 +407,7 @@ export abstract class Table {
|
|||||||
*
|
*
|
||||||
* Calling this method will set the table into time-travel mode. If you
|
* Calling this method will set the table into time-travel mode. If you
|
||||||
* wish to return to standard mode, call `checkoutLatest`.
|
* wish to return to standard mode, call `checkoutLatest`.
|
||||||
* @param {number} version The version to checkout
|
* @param {number | string} version The version to checkout, could be version number or tag
|
||||||
* @example
|
* @example
|
||||||
* ```typescript
|
* ```typescript
|
||||||
* import * as lancedb from "@lancedb/lancedb"
|
* import * as lancedb from "@lancedb/lancedb"
|
||||||
@@ -390,7 +423,8 @@ export abstract class Table {
|
|||||||
* console.log(await table.version()); // 2
|
* console.log(await table.version()); // 2
|
||||||
* ```
|
* ```
|
||||||
*/
|
*/
|
||||||
abstract checkout(version: number): Promise<void>;
|
abstract checkout(version: number | string): Promise<void>;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Checkout the latest version of the table. _This is an in-place operation._
|
* Checkout the latest version of the table. _This is an in-place operation._
|
||||||
*
|
*
|
||||||
@@ -404,6 +438,23 @@ export abstract class Table {
|
|||||||
*/
|
*/
|
||||||
abstract listVersions(): Promise<Version[]>;
|
abstract listVersions(): Promise<Version[]>;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get a tags manager for this table.
|
||||||
|
*
|
||||||
|
* Tags allow you to label specific versions of a table with a human-readable name.
|
||||||
|
* The returned tags manager can be used to list, create, update, or delete tags.
|
||||||
|
*
|
||||||
|
* @returns {Tags} A tags manager for this table
|
||||||
|
* @example
|
||||||
|
* ```typescript
|
||||||
|
* const tagsManager = await table.tags();
|
||||||
|
* await tagsManager.create("v1", 1);
|
||||||
|
* const tags = await tagsManager.list();
|
||||||
|
* console.log(tags); // { "v1": { version: 1, manifestSize: ... } }
|
||||||
|
* ```
|
||||||
|
*/
|
||||||
|
abstract tags(): Promise<Tags>;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Restore the table to the currently checked out version
|
* Restore the table to the currently checked out version
|
||||||
*
|
*
|
||||||
@@ -463,6 +514,13 @@ export abstract class Table {
|
|||||||
* Use {@link Table.listIndices} to find the names of the indices.
|
* Use {@link Table.listIndices} to find the names of the indices.
|
||||||
*/
|
*/
|
||||||
abstract indexStats(name: string): Promise<IndexStatistics | undefined>;
|
abstract indexStats(name: string): Promise<IndexStatistics | undefined>;
|
||||||
|
|
||||||
|
/** Returns table and fragment statistics
|
||||||
|
*
|
||||||
|
* @returns {TableStatistics} The table and fragment statistics
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
abstract stats(): Promise<TableStatistics>;
|
||||||
}
|
}
|
||||||
|
|
||||||
export class LocalTable extends Table {
|
export class LocalTable extends Table {
|
||||||
@@ -502,12 +560,12 @@ export class LocalTable extends Table {
|
|||||||
return tbl.schema;
|
return tbl.schema;
|
||||||
}
|
}
|
||||||
|
|
||||||
async add(data: Data, options?: Partial<AddDataOptions>): Promise<void> {
|
async add(data: Data, options?: Partial<AddDataOptions>): Promise<AddResult> {
|
||||||
const mode = options?.mode ?? "append";
|
const mode = options?.mode ?? "append";
|
||||||
const schema = await this.schema();
|
const schema = await this.schema();
|
||||||
|
|
||||||
const buffer = await fromDataToBuffer(data, undefined, schema);
|
const buffer = await fromDataToBuffer(data, undefined, schema);
|
||||||
await this.inner.add(buffer, mode);
|
return await this.inner.add(buffer, mode);
|
||||||
}
|
}
|
||||||
|
|
||||||
async update(
|
async update(
|
||||||
@@ -520,7 +578,7 @@ export class LocalTable extends Table {
|
|||||||
valuesSql: Map<string, string> | Record<string, string>;
|
valuesSql: Map<string, string> | Record<string, string>;
|
||||||
} & Partial<UpdateOptions>),
|
} & Partial<UpdateOptions>),
|
||||||
options?: Partial<UpdateOptions>,
|
options?: Partial<UpdateOptions>,
|
||||||
) {
|
): Promise<UpdateResult> {
|
||||||
const isValues =
|
const isValues =
|
||||||
"values" in optsOrUpdates && typeof optsOrUpdates.values !== "string";
|
"values" in optsOrUpdates && typeof optsOrUpdates.values !== "string";
|
||||||
const isValuesSql =
|
const isValuesSql =
|
||||||
@@ -567,15 +625,15 @@ export class LocalTable extends Table {
|
|||||||
columns = Object.entries(optsOrUpdates as Record<string, string>);
|
columns = Object.entries(optsOrUpdates as Record<string, string>);
|
||||||
predicate = options?.where;
|
predicate = options?.where;
|
||||||
}
|
}
|
||||||
await this.inner.update(predicate, columns);
|
return await this.inner.update(predicate, columns);
|
||||||
}
|
}
|
||||||
|
|
||||||
async countRows(filter?: string): Promise<number> {
|
async countRows(filter?: string): Promise<number> {
|
||||||
return await this.inner.countRows(filter);
|
return await this.inner.countRows(filter);
|
||||||
}
|
}
|
||||||
|
|
||||||
async delete(predicate: string): Promise<void> {
|
async delete(predicate: string): Promise<DeleteResult> {
|
||||||
await this.inner.delete(predicate);
|
return await this.inner.delete(predicate);
|
||||||
}
|
}
|
||||||
|
|
||||||
async createIndex(column: string, options?: Partial<IndexOptions>) {
|
async createIndex(column: string, options?: Partial<IndexOptions>) {
|
||||||
@@ -663,11 +721,15 @@ export class LocalTable extends Table {
|
|||||||
|
|
||||||
// TODO: Support BatchUDF
|
// TODO: Support BatchUDF
|
||||||
|
|
||||||
async addColumns(newColumnTransforms: AddColumnsSql[]): Promise<void> {
|
async addColumns(
|
||||||
await this.inner.addColumns(newColumnTransforms);
|
newColumnTransforms: AddColumnsSql[],
|
||||||
|
): Promise<AddColumnsResult> {
|
||||||
|
return await this.inner.addColumns(newColumnTransforms);
|
||||||
}
|
}
|
||||||
|
|
||||||
async alterColumns(columnAlterations: ColumnAlteration[]): Promise<void> {
|
async alterColumns(
|
||||||
|
columnAlterations: ColumnAlteration[],
|
||||||
|
): Promise<AlterColumnsResult> {
|
||||||
const processedAlterations = columnAlterations.map((alteration) => {
|
const processedAlterations = columnAlterations.map((alteration) => {
|
||||||
if (typeof alteration.dataType === "string") {
|
if (typeof alteration.dataType === "string") {
|
||||||
return {
|
return {
|
||||||
@@ -688,19 +750,22 @@ export class LocalTable extends Table {
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
await this.inner.alterColumns(processedAlterations);
|
return await this.inner.alterColumns(processedAlterations);
|
||||||
}
|
}
|
||||||
|
|
||||||
async dropColumns(columnNames: string[]): Promise<void> {
|
async dropColumns(columnNames: string[]): Promise<DropColumnsResult> {
|
||||||
await this.inner.dropColumns(columnNames);
|
return await this.inner.dropColumns(columnNames);
|
||||||
}
|
}
|
||||||
|
|
||||||
async version(): Promise<number> {
|
async version(): Promise<number> {
|
||||||
return await this.inner.version();
|
return await this.inner.version();
|
||||||
}
|
}
|
||||||
|
|
||||||
async checkout(version: number): Promise<void> {
|
async checkout(version: number | string): Promise<void> {
|
||||||
await this.inner.checkout(version);
|
if (typeof version === "string") {
|
||||||
|
return this.inner.checkoutTag(version);
|
||||||
|
}
|
||||||
|
return this.inner.checkout(version);
|
||||||
}
|
}
|
||||||
|
|
||||||
async checkoutLatest(): Promise<void> {
|
async checkoutLatest(): Promise<void> {
|
||||||
@@ -719,6 +784,10 @@ export class LocalTable extends Table {
|
|||||||
await this.inner.restore();
|
await this.inner.restore();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async tags(): Promise<Tags> {
|
||||||
|
return await this.inner.tags();
|
||||||
|
}
|
||||||
|
|
||||||
async optimize(options?: Partial<OptimizeOptions>): Promise<OptimizeStats> {
|
async optimize(options?: Partial<OptimizeOptions>): Promise<OptimizeStats> {
|
||||||
let cleanupOlderThanMs;
|
let cleanupOlderThanMs;
|
||||||
if (
|
if (
|
||||||
@@ -749,6 +818,11 @@ export class LocalTable extends Table {
|
|||||||
}
|
}
|
||||||
return stats;
|
return stats;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async stats(): Promise<TableStatistics> {
|
||||||
|
return await this.inner.stats();
|
||||||
|
}
|
||||||
|
|
||||||
mergeInsert(on: string | string[]): MergeInsertBuilder {
|
mergeInsert(on: string | string[]): MergeInsertBuilder {
|
||||||
on = Array.isArray(on) ? on : [on];
|
on = Array.isArray(on) ? on : [on];
|
||||||
return new MergeInsertBuilder(this.inner.mergeInsert(on), this.schema());
|
return new MergeInsertBuilder(this.inner.mergeInsert(on), this.schema());
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "@lancedb/lancedb-darwin-arm64",
|
"name": "@lancedb/lancedb-darwin-arm64",
|
||||||
"version": "0.19.0-beta.11",
|
"version": "0.19.1-beta.3",
|
||||||
"os": ["darwin"],
|
"os": ["darwin"],
|
||||||
"cpu": ["arm64"],
|
"cpu": ["arm64"],
|
||||||
"main": "lancedb.darwin-arm64.node",
|
"main": "lancedb.darwin-arm64.node",
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "@lancedb/lancedb-darwin-x64",
|
"name": "@lancedb/lancedb-darwin-x64",
|
||||||
"version": "0.19.0-beta.11",
|
"version": "0.19.1-beta.3",
|
||||||
"os": ["darwin"],
|
"os": ["darwin"],
|
||||||
"cpu": ["x64"],
|
"cpu": ["x64"],
|
||||||
"main": "lancedb.darwin-x64.node",
|
"main": "lancedb.darwin-x64.node",
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "@lancedb/lancedb-linux-arm64-gnu",
|
"name": "@lancedb/lancedb-linux-arm64-gnu",
|
||||||
"version": "0.19.0-beta.11",
|
"version": "0.19.1-beta.3",
|
||||||
"os": ["linux"],
|
"os": ["linux"],
|
||||||
"cpu": ["arm64"],
|
"cpu": ["arm64"],
|
||||||
"main": "lancedb.linux-arm64-gnu.node",
|
"main": "lancedb.linux-arm64-gnu.node",
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "@lancedb/lancedb-linux-arm64-musl",
|
"name": "@lancedb/lancedb-linux-arm64-musl",
|
||||||
"version": "0.19.0-beta.11",
|
"version": "0.19.1-beta.3",
|
||||||
"os": ["linux"],
|
"os": ["linux"],
|
||||||
"cpu": ["arm64"],
|
"cpu": ["arm64"],
|
||||||
"main": "lancedb.linux-arm64-musl.node",
|
"main": "lancedb.linux-arm64-musl.node",
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "@lancedb/lancedb-linux-x64-gnu",
|
"name": "@lancedb/lancedb-linux-x64-gnu",
|
||||||
"version": "0.19.0-beta.11",
|
"version": "0.19.1-beta.3",
|
||||||
"os": ["linux"],
|
"os": ["linux"],
|
||||||
"cpu": ["x64"],
|
"cpu": ["x64"],
|
||||||
"main": "lancedb.linux-x64-gnu.node",
|
"main": "lancedb.linux-x64-gnu.node",
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "@lancedb/lancedb-linux-x64-musl",
|
"name": "@lancedb/lancedb-linux-x64-musl",
|
||||||
"version": "0.19.0-beta.11",
|
"version": "0.19.1-beta.3",
|
||||||
"os": ["linux"],
|
"os": ["linux"],
|
||||||
"cpu": ["x64"],
|
"cpu": ["x64"],
|
||||||
"main": "lancedb.linux-x64-musl.node",
|
"main": "lancedb.linux-x64-musl.node",
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "@lancedb/lancedb-win32-arm64-msvc",
|
"name": "@lancedb/lancedb-win32-arm64-msvc",
|
||||||
"version": "0.19.0-beta.11",
|
"version": "0.19.1-beta.3",
|
||||||
"os": [
|
"os": [
|
||||||
"win32"
|
"win32"
|
||||||
],
|
],
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "@lancedb/lancedb-win32-x64-msvc",
|
"name": "@lancedb/lancedb-win32-x64-msvc",
|
||||||
"version": "0.19.0-beta.11",
|
"version": "0.19.1-beta.3",
|
||||||
"os": ["win32"],
|
"os": ["win32"],
|
||||||
"cpu": ["x64"],
|
"cpu": ["x64"],
|
||||||
"main": "lancedb.win32-x64-msvc.node",
|
"main": "lancedb.win32-x64-msvc.node",
|
||||||
|
|||||||
4
nodejs/package-lock.json
generated
4
nodejs/package-lock.json
generated
@@ -1,12 +1,12 @@
|
|||||||
{
|
{
|
||||||
"name": "@lancedb/lancedb",
|
"name": "@lancedb/lancedb",
|
||||||
"version": "0.19.0-beta.11",
|
"version": "0.19.1-beta.3",
|
||||||
"lockfileVersion": 3,
|
"lockfileVersion": 3,
|
||||||
"requires": true,
|
"requires": true,
|
||||||
"packages": {
|
"packages": {
|
||||||
"": {
|
"": {
|
||||||
"name": "@lancedb/lancedb",
|
"name": "@lancedb/lancedb",
|
||||||
"version": "0.19.0-beta.11",
|
"version": "0.19.1-beta.3",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"x64",
|
"x64",
|
||||||
"arm64"
|
"arm64"
|
||||||
|
|||||||
@@ -11,7 +11,7 @@
|
|||||||
"ann"
|
"ann"
|
||||||
],
|
],
|
||||||
"private": false,
|
"private": false,
|
||||||
"version": "0.19.0-beta.11",
|
"version": "0.19.1-beta.3",
|
||||||
"main": "dist/index.js",
|
"main": "dist/index.js",
|
||||||
"exports": {
|
"exports": {
|
||||||
".": "./dist/index.js",
|
".": "./dist/index.js",
|
||||||
|
|||||||
@@ -1,11 +1,13 @@
|
|||||||
// SPDX-License-Identifier: Apache-2.0
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||||
|
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
use lancedb::{arrow::IntoArrow, ipc::ipc_file_to_batches, table::merge::MergeInsertBuilder};
|
use lancedb::{arrow::IntoArrow, ipc::ipc_file_to_batches, table::merge::MergeInsertBuilder};
|
||||||
use napi::bindgen_prelude::*;
|
use napi::bindgen_prelude::*;
|
||||||
use napi_derive::napi;
|
use napi_derive::napi;
|
||||||
|
|
||||||
use crate::error::convert_error;
|
use crate::{error::convert_error, table::MergeResult};
|
||||||
|
|
||||||
#[napi]
|
#[napi]
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
@@ -36,8 +38,13 @@ impl NativeMergeInsertBuilder {
|
|||||||
this
|
this
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[napi]
|
||||||
|
pub fn set_timeout(&mut self, timeout: u32) {
|
||||||
|
self.inner.timeout(Duration::from_millis(timeout as u64));
|
||||||
|
}
|
||||||
|
|
||||||
#[napi(catch_unwind)]
|
#[napi(catch_unwind)]
|
||||||
pub async fn execute(&self, buf: Buffer) -> napi::Result<()> {
|
pub async fn execute(&self, buf: Buffer) -> napi::Result<MergeResult> {
|
||||||
let data = ipc_file_to_batches(buf.to_vec())
|
let data = ipc_file_to_batches(buf.to_vec())
|
||||||
.and_then(IntoArrow::into_arrow)
|
.and_then(IntoArrow::into_arrow)
|
||||||
.map_err(|e| {
|
.map_err(|e| {
|
||||||
@@ -46,12 +53,13 @@ impl NativeMergeInsertBuilder {
|
|||||||
|
|
||||||
let this = self.clone();
|
let this = self.clone();
|
||||||
|
|
||||||
this.inner.execute(data).await.map_err(|e| {
|
let res = this.inner.execute(data).await.map_err(|e| {
|
||||||
napi::Error::from_reason(format!(
|
napi::Error::from_reason(format!(
|
||||||
"Failed to execute merge insert: {}",
|
"Failed to execute merge insert: {}",
|
||||||
convert_error(&e)
|
convert_error(&e)
|
||||||
))
|
))
|
||||||
})
|
})?;
|
||||||
|
Ok(res.into())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -75,7 +75,7 @@ impl Table {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[napi(catch_unwind)]
|
#[napi(catch_unwind)]
|
||||||
pub async fn add(&self, buf: Buffer, mode: String) -> napi::Result<()> {
|
pub async fn add(&self, buf: Buffer, mode: String) -> napi::Result<AddResult> {
|
||||||
let batches = ipc_file_to_batches(buf.to_vec())
|
let batches = ipc_file_to_batches(buf.to_vec())
|
||||||
.map_err(|e| napi::Error::from_reason(format!("Failed to read IPC file: {}", e)))?;
|
.map_err(|e| napi::Error::from_reason(format!("Failed to read IPC file: {}", e)))?;
|
||||||
let mut op = self.inner_ref()?.add(batches);
|
let mut op = self.inner_ref()?.add(batches);
|
||||||
@@ -88,7 +88,8 @@ impl Table {
|
|||||||
return Err(napi::Error::from_reason(format!("Invalid mode: {}", mode)));
|
return Err(napi::Error::from_reason(format!("Invalid mode: {}", mode)));
|
||||||
};
|
};
|
||||||
|
|
||||||
op.execute().await.default_error()
|
let res = op.execute().await.default_error()?;
|
||||||
|
Ok(res.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[napi(catch_unwind)]
|
#[napi(catch_unwind)]
|
||||||
@@ -101,8 +102,9 @@ impl Table {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[napi(catch_unwind)]
|
#[napi(catch_unwind)]
|
||||||
pub async fn delete(&self, predicate: String) -> napi::Result<()> {
|
pub async fn delete(&self, predicate: String) -> napi::Result<DeleteResult> {
|
||||||
self.inner_ref()?.delete(&predicate).await.default_error()
|
let res = self.inner_ref()?.delete(&predicate).await.default_error()?;
|
||||||
|
Ok(res.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[napi(catch_unwind)]
|
#[napi(catch_unwind)]
|
||||||
@@ -157,12 +159,18 @@ impl Table {
|
|||||||
.default_error()
|
.default_error()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[napi(catch_unwind)]
|
||||||
|
pub async fn stats(&self) -> Result<TableStatistics> {
|
||||||
|
let stats = self.inner_ref()?.stats().await.default_error()?;
|
||||||
|
Ok(stats.into())
|
||||||
|
}
|
||||||
|
|
||||||
#[napi(catch_unwind)]
|
#[napi(catch_unwind)]
|
||||||
pub async fn update(
|
pub async fn update(
|
||||||
&self,
|
&self,
|
||||||
only_if: Option<String>,
|
only_if: Option<String>,
|
||||||
columns: Vec<(String, String)>,
|
columns: Vec<(String, String)>,
|
||||||
) -> napi::Result<u64> {
|
) -> napi::Result<UpdateResult> {
|
||||||
let mut op = self.inner_ref()?.update();
|
let mut op = self.inner_ref()?.update();
|
||||||
if let Some(only_if) = only_if {
|
if let Some(only_if) = only_if {
|
||||||
op = op.only_if(only_if);
|
op = op.only_if(only_if);
|
||||||
@@ -170,7 +178,8 @@ impl Table {
|
|||||||
for (column_name, value) in columns {
|
for (column_name, value) in columns {
|
||||||
op = op.column(column_name, value);
|
op = op.column(column_name, value);
|
||||||
}
|
}
|
||||||
op.execute().await.default_error()
|
let res = op.execute().await.default_error()?;
|
||||||
|
Ok(res.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[napi(catch_unwind)]
|
#[napi(catch_unwind)]
|
||||||
@@ -184,21 +193,28 @@ impl Table {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[napi(catch_unwind)]
|
#[napi(catch_unwind)]
|
||||||
pub async fn add_columns(&self, transforms: Vec<AddColumnsSql>) -> napi::Result<()> {
|
pub async fn add_columns(
|
||||||
|
&self,
|
||||||
|
transforms: Vec<AddColumnsSql>,
|
||||||
|
) -> napi::Result<AddColumnsResult> {
|
||||||
let transforms = transforms
|
let transforms = transforms
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|sql| (sql.name, sql.value_sql))
|
.map(|sql| (sql.name, sql.value_sql))
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
let transforms = NewColumnTransform::SqlExpressions(transforms);
|
let transforms = NewColumnTransform::SqlExpressions(transforms);
|
||||||
self.inner_ref()?
|
let res = self
|
||||||
|
.inner_ref()?
|
||||||
.add_columns(transforms, None)
|
.add_columns(transforms, None)
|
||||||
.await
|
.await
|
||||||
.default_error()?;
|
.default_error()?;
|
||||||
Ok(())
|
Ok(res.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[napi(catch_unwind)]
|
#[napi(catch_unwind)]
|
||||||
pub async fn alter_columns(&self, alterations: Vec<ColumnAlteration>) -> napi::Result<()> {
|
pub async fn alter_columns(
|
||||||
|
&self,
|
||||||
|
alterations: Vec<ColumnAlteration>,
|
||||||
|
) -> napi::Result<AlterColumnsResult> {
|
||||||
for alteration in &alterations {
|
for alteration in &alterations {
|
||||||
if alteration.rename.is_none()
|
if alteration.rename.is_none()
|
||||||
&& alteration.nullable.is_none()
|
&& alteration.nullable.is_none()
|
||||||
@@ -215,21 +231,23 @@ impl Table {
|
|||||||
.collect::<std::result::Result<Vec<_>, String>>()
|
.collect::<std::result::Result<Vec<_>, String>>()
|
||||||
.map_err(napi::Error::from_reason)?;
|
.map_err(napi::Error::from_reason)?;
|
||||||
|
|
||||||
self.inner_ref()?
|
let res = self
|
||||||
|
.inner_ref()?
|
||||||
.alter_columns(&alterations)
|
.alter_columns(&alterations)
|
||||||
.await
|
.await
|
||||||
.default_error()?;
|
.default_error()?;
|
||||||
Ok(())
|
Ok(res.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[napi(catch_unwind)]
|
#[napi(catch_unwind)]
|
||||||
pub async fn drop_columns(&self, columns: Vec<String>) -> napi::Result<()> {
|
pub async fn drop_columns(&self, columns: Vec<String>) -> napi::Result<DropColumnsResult> {
|
||||||
let col_refs = columns.iter().map(String::as_str).collect::<Vec<_>>();
|
let col_refs = columns.iter().map(String::as_str).collect::<Vec<_>>();
|
||||||
self.inner_ref()?
|
let res = self
|
||||||
|
.inner_ref()?
|
||||||
.drop_columns(&col_refs)
|
.drop_columns(&col_refs)
|
||||||
.await
|
.await
|
||||||
.default_error()?;
|
.default_error()?;
|
||||||
Ok(())
|
Ok(res.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[napi(catch_unwind)]
|
#[napi(catch_unwind)]
|
||||||
@@ -249,6 +267,14 @@ impl Table {
|
|||||||
.default_error()
|
.default_error()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[napi(catch_unwind)]
|
||||||
|
pub async fn checkout_tag(&self, tag: String) -> napi::Result<()> {
|
||||||
|
self.inner_ref()?
|
||||||
|
.checkout_tag(tag.as_str())
|
||||||
|
.await
|
||||||
|
.default_error()
|
||||||
|
}
|
||||||
|
|
||||||
#[napi(catch_unwind)]
|
#[napi(catch_unwind)]
|
||||||
pub async fn checkout_latest(&self) -> napi::Result<()> {
|
pub async fn checkout_latest(&self) -> napi::Result<()> {
|
||||||
self.inner_ref()?.checkout_latest().await.default_error()
|
self.inner_ref()?.checkout_latest().await.default_error()
|
||||||
@@ -281,6 +307,13 @@ impl Table {
|
|||||||
self.inner_ref()?.restore().await.default_error()
|
self.inner_ref()?.restore().await.default_error()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[napi(catch_unwind)]
|
||||||
|
pub async fn tags(&self) -> napi::Result<Tags> {
|
||||||
|
Ok(Tags {
|
||||||
|
inner: self.inner_ref()?.clone(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
#[napi(catch_unwind)]
|
#[napi(catch_unwind)]
|
||||||
pub async fn optimize(
|
pub async fn optimize(
|
||||||
&self,
|
&self,
|
||||||
@@ -540,9 +573,257 @@ impl From<lancedb::index::IndexStatistics> for IndexStatistics {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[napi(object)]
|
||||||
|
pub struct TableStatistics {
|
||||||
|
/// The total number of bytes in the table
|
||||||
|
pub total_bytes: i64,
|
||||||
|
|
||||||
|
/// The number of rows in the table
|
||||||
|
pub num_rows: i64,
|
||||||
|
|
||||||
|
/// The number of indices in the table
|
||||||
|
pub num_indices: i64,
|
||||||
|
|
||||||
|
/// Statistics on table fragments
|
||||||
|
pub fragment_stats: FragmentStatistics,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[napi(object)]
|
||||||
|
pub struct FragmentStatistics {
|
||||||
|
/// The number of fragments in the table
|
||||||
|
pub num_fragments: i64,
|
||||||
|
|
||||||
|
/// The number of uncompacted fragments in the table
|
||||||
|
pub num_small_fragments: i64,
|
||||||
|
|
||||||
|
/// Statistics on the number of rows in the table fragments
|
||||||
|
pub lengths: FragmentSummaryStats,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[napi(object)]
|
||||||
|
pub struct FragmentSummaryStats {
|
||||||
|
/// The number of rows in the fragment with the fewest rows
|
||||||
|
pub min: i64,
|
||||||
|
|
||||||
|
/// The number of rows in the fragment with the most rows
|
||||||
|
pub max: i64,
|
||||||
|
|
||||||
|
/// The mean number of rows in the fragments
|
||||||
|
pub mean: i64,
|
||||||
|
|
||||||
|
/// The 25th percentile of number of rows in the fragments
|
||||||
|
pub p25: i64,
|
||||||
|
|
||||||
|
/// The 50th percentile of number of rows in the fragments
|
||||||
|
pub p50: i64,
|
||||||
|
|
||||||
|
/// The 75th percentile of number of rows in the fragments
|
||||||
|
pub p75: i64,
|
||||||
|
|
||||||
|
/// The 99th percentile of number of rows in the fragments
|
||||||
|
pub p99: i64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<lancedb::table::TableStatistics> for TableStatistics {
|
||||||
|
fn from(v: lancedb::table::TableStatistics) -> Self {
|
||||||
|
Self {
|
||||||
|
total_bytes: v.total_bytes as i64,
|
||||||
|
num_rows: v.num_rows as i64,
|
||||||
|
num_indices: v.num_indices as i64,
|
||||||
|
fragment_stats: FragmentStatistics {
|
||||||
|
num_fragments: v.fragment_stats.num_fragments as i64,
|
||||||
|
num_small_fragments: v.fragment_stats.num_small_fragments as i64,
|
||||||
|
lengths: FragmentSummaryStats {
|
||||||
|
min: v.fragment_stats.lengths.min as i64,
|
||||||
|
max: v.fragment_stats.lengths.max as i64,
|
||||||
|
mean: v.fragment_stats.lengths.mean as i64,
|
||||||
|
p25: v.fragment_stats.lengths.p25 as i64,
|
||||||
|
p50: v.fragment_stats.lengths.p50 as i64,
|
||||||
|
p75: v.fragment_stats.lengths.p75 as i64,
|
||||||
|
p99: v.fragment_stats.lengths.p99 as i64,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[napi(object)]
|
#[napi(object)]
|
||||||
pub struct Version {
|
pub struct Version {
|
||||||
pub version: i64,
|
pub version: i64,
|
||||||
pub timestamp: i64,
|
pub timestamp: i64,
|
||||||
pub metadata: HashMap<String, String>,
|
pub metadata: HashMap<String, String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[napi(object)]
|
||||||
|
pub struct UpdateResult {
|
||||||
|
pub rows_updated: i64,
|
||||||
|
pub version: i64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<lancedb::table::UpdateResult> for UpdateResult {
|
||||||
|
fn from(value: lancedb::table::UpdateResult) -> Self {
|
||||||
|
Self {
|
||||||
|
rows_updated: value.rows_updated as i64,
|
||||||
|
version: value.version as i64,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[napi(object)]
|
||||||
|
pub struct AddResult {
|
||||||
|
pub version: i64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<lancedb::table::AddResult> for AddResult {
|
||||||
|
fn from(value: lancedb::table::AddResult) -> Self {
|
||||||
|
Self {
|
||||||
|
version: value.version as i64,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[napi(object)]
|
||||||
|
pub struct DeleteResult {
|
||||||
|
pub version: i64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<lancedb::table::DeleteResult> for DeleteResult {
|
||||||
|
fn from(value: lancedb::table::DeleteResult) -> Self {
|
||||||
|
Self {
|
||||||
|
version: value.version as i64,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[napi(object)]
|
||||||
|
pub struct MergeResult {
|
||||||
|
pub version: i64,
|
||||||
|
pub num_inserted_rows: i64,
|
||||||
|
pub num_updated_rows: i64,
|
||||||
|
pub num_deleted_rows: i64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<lancedb::table::MergeResult> for MergeResult {
|
||||||
|
fn from(value: lancedb::table::MergeResult) -> Self {
|
||||||
|
Self {
|
||||||
|
version: value.version as i64,
|
||||||
|
num_inserted_rows: value.num_inserted_rows as i64,
|
||||||
|
num_updated_rows: value.num_updated_rows as i64,
|
||||||
|
num_deleted_rows: value.num_deleted_rows as i64,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[napi(object)]
|
||||||
|
pub struct AddColumnsResult {
|
||||||
|
pub version: i64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<lancedb::table::AddColumnsResult> for AddColumnsResult {
|
||||||
|
fn from(value: lancedb::table::AddColumnsResult) -> Self {
|
||||||
|
Self {
|
||||||
|
version: value.version as i64,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[napi(object)]
|
||||||
|
pub struct AlterColumnsResult {
|
||||||
|
pub version: i64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<lancedb::table::AlterColumnsResult> for AlterColumnsResult {
|
||||||
|
fn from(value: lancedb::table::AlterColumnsResult) -> Self {
|
||||||
|
Self {
|
||||||
|
version: value.version as i64,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[napi(object)]
|
||||||
|
pub struct DropColumnsResult {
|
||||||
|
pub version: i64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<lancedb::table::DropColumnsResult> for DropColumnsResult {
|
||||||
|
fn from(value: lancedb::table::DropColumnsResult) -> Self {
|
||||||
|
Self {
|
||||||
|
version: value.version as i64,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[napi]
|
||||||
|
pub struct TagContents {
|
||||||
|
pub version: i64,
|
||||||
|
pub manifest_size: i64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[napi]
|
||||||
|
pub struct Tags {
|
||||||
|
inner: LanceDbTable,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[napi]
|
||||||
|
impl Tags {
|
||||||
|
#[napi]
|
||||||
|
pub async fn list(&self) -> napi::Result<HashMap<String, TagContents>> {
|
||||||
|
let rust_tags = self.inner.tags().await.default_error()?;
|
||||||
|
let tag_list = rust_tags.as_ref().list().await.default_error()?;
|
||||||
|
let tag_contents = tag_list
|
||||||
|
.into_iter()
|
||||||
|
.map(|(k, v)| {
|
||||||
|
(
|
||||||
|
k,
|
||||||
|
TagContents {
|
||||||
|
version: v.version as i64,
|
||||||
|
manifest_size: v.manifest_size as i64,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
Ok(tag_contents)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[napi]
|
||||||
|
pub async fn get_version(&self, tag: String) -> napi::Result<i64> {
|
||||||
|
let rust_tags = self.inner.tags().await.default_error()?;
|
||||||
|
rust_tags
|
||||||
|
.as_ref()
|
||||||
|
.get_version(tag.as_str())
|
||||||
|
.await
|
||||||
|
.map(|v| v as i64)
|
||||||
|
.default_error()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[napi]
|
||||||
|
pub async unsafe fn create(&mut self, tag: String, version: i64) -> napi::Result<()> {
|
||||||
|
let mut rust_tags = self.inner.tags().await.default_error()?;
|
||||||
|
rust_tags
|
||||||
|
.as_mut()
|
||||||
|
.create(tag.as_str(), version as u64)
|
||||||
|
.await
|
||||||
|
.default_error()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[napi]
|
||||||
|
pub async unsafe fn delete(&mut self, tag: String) -> napi::Result<()> {
|
||||||
|
let mut rust_tags = self.inner.tags().await.default_error()?;
|
||||||
|
rust_tags
|
||||||
|
.as_mut()
|
||||||
|
.delete(tag.as_str())
|
||||||
|
.await
|
||||||
|
.default_error()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[napi]
|
||||||
|
pub async unsafe fn update(&mut self, tag: String, version: i64) -> napi::Result<()> {
|
||||||
|
let mut rust_tags = self.inner.tags().await.default_error()?;
|
||||||
|
rust_tags
|
||||||
|
.as_mut()
|
||||||
|
.update(tag.as_str(), version as u64)
|
||||||
|
.await
|
||||||
|
.default_error()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
[tool.bumpversion]
|
[tool.bumpversion]
|
||||||
current_version = "0.22.0"
|
current_version = "0.22.1-beta.4"
|
||||||
parse = """(?x)
|
parse = """(?x)
|
||||||
(?P<major>0|[1-9]\\d*)\\.
|
(?P<major>0|[1-9]\\d*)\\.
|
||||||
(?P<minor>0|[1-9]\\d*)\\.
|
(?P<minor>0|[1-9]\\d*)\\.
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "lancedb-python"
|
name = "lancedb-python"
|
||||||
version = "0.22.0"
|
version = "0.22.1-beta.4"
|
||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
description = "Python bindings for LanceDB"
|
description = "Python bindings for LanceDB"
|
||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ dependencies = [
|
|||||||
"numpy",
|
"numpy",
|
||||||
"overrides>=0.7",
|
"overrides>=0.7",
|
||||||
"packaging",
|
"packaging",
|
||||||
"pyarrow>=14",
|
"pyarrow>=16",
|
||||||
"pydantic>=1.10",
|
"pydantic>=1.10",
|
||||||
"tqdm>=4.27.0",
|
"tqdm>=4.27.0",
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
from datetime import timedelta
|
from datetime import timedelta
|
||||||
from typing import Dict, List, Optional, Tuple, Any, Union, Literal
|
from typing import Dict, List, Optional, Tuple, Any, TypedDict, Union, Literal
|
||||||
|
|
||||||
import pyarrow as pa
|
import pyarrow as pa
|
||||||
|
|
||||||
@@ -36,8 +36,10 @@ class Table:
|
|||||||
async def schema(self) -> pa.Schema: ...
|
async def schema(self) -> pa.Schema: ...
|
||||||
async def add(
|
async def add(
|
||||||
self, data: pa.RecordBatchReader, mode: Literal["append", "overwrite"]
|
self, data: pa.RecordBatchReader, mode: Literal["append", "overwrite"]
|
||||||
) -> None: ...
|
) -> AddResult: ...
|
||||||
async def update(self, updates: Dict[str, str], where: Optional[str]) -> None: ...
|
async def update(
|
||||||
|
self, updates: Dict[str, str], where: Optional[str]
|
||||||
|
) -> UpdateResult: ...
|
||||||
async def count_rows(self, filter: Optional[str]) -> int: ...
|
async def count_rows(self, filter: Optional[str]) -> int: ...
|
||||||
async def create_index(
|
async def create_index(
|
||||||
self,
|
self,
|
||||||
@@ -47,23 +49,34 @@ class Table:
|
|||||||
): ...
|
): ...
|
||||||
async def list_versions(self) -> List[Dict[str, Any]]: ...
|
async def list_versions(self) -> List[Dict[str, Any]]: ...
|
||||||
async def version(self) -> int: ...
|
async def version(self) -> int: ...
|
||||||
async def checkout(self, version: int): ...
|
async def checkout(self, version: Union[int, str]): ...
|
||||||
async def checkout_latest(self): ...
|
async def checkout_latest(self): ...
|
||||||
async def restore(self, version: Optional[int] = None): ...
|
async def restore(self, version: Optional[Union[int, str]] = None): ...
|
||||||
async def list_indices(self) -> list[IndexConfig]: ...
|
async def list_indices(self) -> list[IndexConfig]: ...
|
||||||
async def delete(self, filter: str): ...
|
async def delete(self, filter: str) -> DeleteResult: ...
|
||||||
async def add_columns(self, columns: list[tuple[str, str]]) -> None: ...
|
async def add_columns(self, columns: list[tuple[str, str]]) -> AddColumnsResult: ...
|
||||||
async def add_columns_with_schema(self, schema: pa.Schema) -> None: ...
|
async def add_columns_with_schema(self, schema: pa.Schema) -> AddColumnsResult: ...
|
||||||
async def alter_columns(self, columns: list[dict[str, Any]]) -> None: ...
|
async def alter_columns(
|
||||||
|
self, columns: list[dict[str, Any]]
|
||||||
|
) -> AlterColumnsResult: ...
|
||||||
async def optimize(
|
async def optimize(
|
||||||
self,
|
self,
|
||||||
*,
|
*,
|
||||||
cleanup_since_ms: Optional[int] = None,
|
cleanup_since_ms: Optional[int] = None,
|
||||||
delete_unverified: Optional[bool] = None,
|
delete_unverified: Optional[bool] = None,
|
||||||
) -> OptimizeStats: ...
|
) -> OptimizeStats: ...
|
||||||
|
@property
|
||||||
|
def tags(self) -> Tags: ...
|
||||||
def query(self) -> Query: ...
|
def query(self) -> Query: ...
|
||||||
def vector_search(self) -> VectorQuery: ...
|
def vector_search(self) -> VectorQuery: ...
|
||||||
|
|
||||||
|
class Tags:
|
||||||
|
async def list(self) -> Dict[str, Tag]: ...
|
||||||
|
async def get_version(self, tag: str) -> int: ...
|
||||||
|
async def create(self, tag: str, version: int): ...
|
||||||
|
async def delete(self, tag: str): ...
|
||||||
|
async def update(self, tag: str, version: int): ...
|
||||||
|
|
||||||
class IndexConfig:
|
class IndexConfig:
|
||||||
index_type: str
|
index_type: str
|
||||||
columns: List[str]
|
columns: List[str]
|
||||||
@@ -195,3 +208,32 @@ class RemovalStats:
|
|||||||
class OptimizeStats:
|
class OptimizeStats:
|
||||||
compaction: CompactionStats
|
compaction: CompactionStats
|
||||||
prune: RemovalStats
|
prune: RemovalStats
|
||||||
|
|
||||||
|
class Tag(TypedDict):
|
||||||
|
version: int
|
||||||
|
manifest_size: int
|
||||||
|
|
||||||
|
class AddResult:
|
||||||
|
version: int
|
||||||
|
|
||||||
|
class DeleteResult:
|
||||||
|
version: int
|
||||||
|
|
||||||
|
class UpdateResult:
|
||||||
|
rows_updated: int
|
||||||
|
version: int
|
||||||
|
|
||||||
|
class MergeResult:
|
||||||
|
version: int
|
||||||
|
num_updated_rows: int
|
||||||
|
num_inserted_rows: int
|
||||||
|
num_deleted_rows: int
|
||||||
|
|
||||||
|
class AddColumnsResult:
|
||||||
|
version: int
|
||||||
|
|
||||||
|
class AlterColumnsResult:
|
||||||
|
version: int
|
||||||
|
|
||||||
|
class DropColumnsResult:
|
||||||
|
version: int
|
||||||
|
|||||||
@@ -4,10 +4,14 @@
|
|||||||
|
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from datetime import timedelta
|
||||||
from typing import TYPE_CHECKING, List, Optional
|
from typing import TYPE_CHECKING, List, Optional
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from .common import DATA
|
from .common import DATA
|
||||||
|
from ._lancedb import (
|
||||||
|
MergeInsertResult,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class LanceMergeInsertBuilder(object):
|
class LanceMergeInsertBuilder(object):
|
||||||
@@ -28,6 +32,7 @@ class LanceMergeInsertBuilder(object):
|
|||||||
self._when_not_matched_insert_all = False
|
self._when_not_matched_insert_all = False
|
||||||
self._when_not_matched_by_source_delete = False
|
self._when_not_matched_by_source_delete = False
|
||||||
self._when_not_matched_by_source_condition = None
|
self._when_not_matched_by_source_condition = None
|
||||||
|
self._timeout = None
|
||||||
|
|
||||||
def when_matched_update_all(
|
def when_matched_update_all(
|
||||||
self, *, where: Optional[str] = None
|
self, *, where: Optional[str] = None
|
||||||
@@ -78,7 +83,8 @@ class LanceMergeInsertBuilder(object):
|
|||||||
new_data: DATA,
|
new_data: DATA,
|
||||||
on_bad_vectors: str = "error",
|
on_bad_vectors: str = "error",
|
||||||
fill_value: float = 0.0,
|
fill_value: float = 0.0,
|
||||||
):
|
timeout: Optional[timedelta] = None,
|
||||||
|
) -> MergeInsertResult:
|
||||||
"""
|
"""
|
||||||
Executes the merge insert operation
|
Executes the merge insert operation
|
||||||
|
|
||||||
@@ -95,5 +101,24 @@ class LanceMergeInsertBuilder(object):
|
|||||||
One of "error", "drop", "fill".
|
One of "error", "drop", "fill".
|
||||||
fill_value: float, default 0.
|
fill_value: float, default 0.
|
||||||
The value to use when filling vectors. Only used if on_bad_vectors="fill".
|
The value to use when filling vectors. Only used if on_bad_vectors="fill".
|
||||||
|
timeout: Optional[timedelta], default None
|
||||||
|
Maximum time to run the operation before cancelling it.
|
||||||
|
|
||||||
|
By default, there is a 30-second timeout that is only enforced after the
|
||||||
|
first attempt. This is to prevent spending too long retrying to resolve
|
||||||
|
conflicts. For example, if a write attempt takes 20 seconds and fails,
|
||||||
|
the second attempt will be cancelled after 10 seconds, hitting the
|
||||||
|
30-second timeout. However, a write that takes one hour and succeeds on the
|
||||||
|
first attempt will not be cancelled.
|
||||||
|
|
||||||
|
When this is set, the timeout is enforced on all attempts, including
|
||||||
|
the first.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
MergeInsertResult
|
||||||
|
version: the new version number of the table after doing merge insert.
|
||||||
"""
|
"""
|
||||||
|
if timeout is not None:
|
||||||
|
self._timeout = timeout
|
||||||
return self._table._do_merge(self, new_data, on_bad_vectors, fill_value)
|
return self._table._do_merge(self, new_data, on_bad_vectors, fill_value)
|
||||||
|
|||||||
@@ -415,6 +415,7 @@ class LanceModel(pydantic.BaseModel):
|
|||||||
>>> table.add([
|
>>> table.add([
|
||||||
... TestModel(name="test", vector=[1.0, 2.0])
|
... TestModel(name="test", vector=[1.0, 2.0])
|
||||||
... ])
|
... ])
|
||||||
|
AddResult(version=2)
|
||||||
>>> table.search([0., 0.]).limit(1).to_pydantic(TestModel)
|
>>> table.search([0., 0.]).limit(1).to_pydantic(TestModel)
|
||||||
[TestModel(name='test', vector=FixedSizeList(dim=2))]
|
[TestModel(name='test', vector=FixedSizeList(dim=2))]
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -1636,51 +1636,7 @@ class LanceHybridQueryBuilder(LanceQueryBuilder):
|
|||||||
raise NotImplementedError("to_query_object not yet supported on a hybrid query")
|
raise NotImplementedError("to_query_object not yet supported on a hybrid query")
|
||||||
|
|
||||||
def to_arrow(self, *, timeout: Optional[timedelta] = None) -> pa.Table:
|
def to_arrow(self, *, timeout: Optional[timedelta] = None) -> pa.Table:
|
||||||
vector_query, fts_query = self._validate_query(
|
self._create_query_builders()
|
||||||
self._query, self._vector, self._text
|
|
||||||
)
|
|
||||||
self._fts_query = LanceFtsQueryBuilder(
|
|
||||||
self._table, fts_query, fts_columns=self._fts_columns
|
|
||||||
)
|
|
||||||
vector_query = self._query_to_vector(
|
|
||||||
self._table, vector_query, self._vector_column
|
|
||||||
)
|
|
||||||
self._vector_query = LanceVectorQueryBuilder(
|
|
||||||
self._table, vector_query, self._vector_column
|
|
||||||
)
|
|
||||||
|
|
||||||
if self._limit:
|
|
||||||
self._vector_query.limit(self._limit)
|
|
||||||
self._fts_query.limit(self._limit)
|
|
||||||
if self._columns:
|
|
||||||
self._vector_query.select(self._columns)
|
|
||||||
self._fts_query.select(self._columns)
|
|
||||||
if self._where:
|
|
||||||
self._vector_query.where(self._where, self._postfilter)
|
|
||||||
self._fts_query.where(self._where, self._postfilter)
|
|
||||||
if self._with_row_id:
|
|
||||||
self._vector_query.with_row_id(True)
|
|
||||||
self._fts_query.with_row_id(True)
|
|
||||||
if self._phrase_query:
|
|
||||||
self._fts_query.phrase_query(True)
|
|
||||||
if self._distance_type:
|
|
||||||
self._vector_query.metric(self._distance_type)
|
|
||||||
if self._nprobes:
|
|
||||||
self._vector_query.nprobes(self._nprobes)
|
|
||||||
if self._refine_factor:
|
|
||||||
self._vector_query.refine_factor(self._refine_factor)
|
|
||||||
if self._ef:
|
|
||||||
self._vector_query.ef(self._ef)
|
|
||||||
if self._bypass_vector_index:
|
|
||||||
self._vector_query.bypass_vector_index()
|
|
||||||
if self._lower_bound or self._upper_bound:
|
|
||||||
self._vector_query.distance_range(
|
|
||||||
lower_bound=self._lower_bound, upper_bound=self._upper_bound
|
|
||||||
)
|
|
||||||
|
|
||||||
if self._reranker is None:
|
|
||||||
self._reranker = RRFReranker()
|
|
||||||
|
|
||||||
with ThreadPoolExecutor() as executor:
|
with ThreadPoolExecutor() as executor:
|
||||||
fts_future = executor.submit(
|
fts_future = executor.submit(
|
||||||
self._fts_query.with_row_id(True).to_arrow, timeout=timeout
|
self._fts_query.with_row_id(True).to_arrow, timeout=timeout
|
||||||
@@ -2003,6 +1959,112 @@ class LanceHybridQueryBuilder(LanceQueryBuilder):
|
|||||||
self._bypass_vector_index = True
|
self._bypass_vector_index = True
|
||||||
return self
|
return self
|
||||||
|
|
||||||
|
def explain_plan(self, verbose: Optional[bool] = False) -> str:
|
||||||
|
"""Return the execution plan for this query.
|
||||||
|
|
||||||
|
Examples
|
||||||
|
--------
|
||||||
|
>>> import lancedb
|
||||||
|
>>> db = lancedb.connect("./.lancedb")
|
||||||
|
>>> table = db.create_table("my_table", [{"vector": [99.0, 99]}])
|
||||||
|
>>> query = [100, 100]
|
||||||
|
>>> plan = table.search(query).explain_plan(True)
|
||||||
|
>>> print(plan) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
|
||||||
|
ProjectionExec: expr=[vector@0 as vector, _distance@2 as _distance]
|
||||||
|
GlobalLimitExec: skip=0, fetch=10
|
||||||
|
FilterExec: _distance@2 IS NOT NULL
|
||||||
|
SortExec: TopK(fetch=10), expr=[_distance@2 ASC NULLS LAST], preserve_partitioning=[false]
|
||||||
|
KNNVectorDistance: metric=l2
|
||||||
|
LanceScan: uri=..., projection=[vector], row_id=true, row_addr=false, ordered=false
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
verbose : bool, default False
|
||||||
|
Use a verbose output format.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
plan : str
|
||||||
|
""" # noqa: E501
|
||||||
|
self._create_query_builders()
|
||||||
|
|
||||||
|
results = ["Vector Search Plan:"]
|
||||||
|
results.append(
|
||||||
|
self._table._explain_plan(
|
||||||
|
self._vector_query.to_query_object(), verbose=verbose
|
||||||
|
)
|
||||||
|
)
|
||||||
|
results.append("FTS Search Plan:")
|
||||||
|
results.append(
|
||||||
|
self._table._explain_plan(
|
||||||
|
self._fts_query.to_query_object(), verbose=verbose
|
||||||
|
)
|
||||||
|
)
|
||||||
|
return "\n".join(results)
|
||||||
|
|
||||||
|
def analyze_plan(self):
|
||||||
|
"""Execute the query and display with runtime metrics.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
plan : str
|
||||||
|
"""
|
||||||
|
self._create_query_builders()
|
||||||
|
|
||||||
|
results = ["Vector Search Plan:"]
|
||||||
|
results.append(self._table._analyze_plan(self._vector_query.to_query_object()))
|
||||||
|
results.append("FTS Search Plan:")
|
||||||
|
results.append(self._table._analyze_plan(self._fts_query.to_query_object()))
|
||||||
|
return "\n".join(results)
|
||||||
|
|
||||||
|
def _create_query_builders(self):
|
||||||
|
"""Set up and configure the vector and FTS query builders."""
|
||||||
|
vector_query, fts_query = self._validate_query(
|
||||||
|
self._query, self._vector, self._text
|
||||||
|
)
|
||||||
|
self._fts_query = LanceFtsQueryBuilder(
|
||||||
|
self._table, fts_query, fts_columns=self._fts_columns
|
||||||
|
)
|
||||||
|
vector_query = self._query_to_vector(
|
||||||
|
self._table, vector_query, self._vector_column
|
||||||
|
)
|
||||||
|
self._vector_query = LanceVectorQueryBuilder(
|
||||||
|
self._table, vector_query, self._vector_column
|
||||||
|
)
|
||||||
|
|
||||||
|
# Apply common configurations
|
||||||
|
if self._limit:
|
||||||
|
self._vector_query.limit(self._limit)
|
||||||
|
self._fts_query.limit(self._limit)
|
||||||
|
if self._columns:
|
||||||
|
self._vector_query.select(self._columns)
|
||||||
|
self._fts_query.select(self._columns)
|
||||||
|
if self._where:
|
||||||
|
self._vector_query.where(self._where, self._postfilter)
|
||||||
|
self._fts_query.where(self._where, self._postfilter)
|
||||||
|
if self._with_row_id:
|
||||||
|
self._vector_query.with_row_id(True)
|
||||||
|
self._fts_query.with_row_id(True)
|
||||||
|
if self._phrase_query:
|
||||||
|
self._fts_query.phrase_query(True)
|
||||||
|
if self._distance_type:
|
||||||
|
self._vector_query.metric(self._distance_type)
|
||||||
|
if self._nprobes:
|
||||||
|
self._vector_query.nprobes(self._nprobes)
|
||||||
|
if self._refine_factor:
|
||||||
|
self._vector_query.refine_factor(self._refine_factor)
|
||||||
|
if self._ef:
|
||||||
|
self._vector_query.ef(self._ef)
|
||||||
|
if self._bypass_vector_index:
|
||||||
|
self._vector_query.bypass_vector_index()
|
||||||
|
if self._lower_bound or self._upper_bound:
|
||||||
|
self._vector_query.distance_range(
|
||||||
|
lower_bound=self._lower_bound, upper_bound=self._upper_bound
|
||||||
|
)
|
||||||
|
|
||||||
|
if self._reranker is None:
|
||||||
|
self._reranker = RRFReranker()
|
||||||
|
|
||||||
|
|
||||||
class AsyncQueryBase(object):
|
class AsyncQueryBase(object):
|
||||||
def __init__(self, inner: Union[LanceQuery, LanceVectorQuery]):
|
def __init__(self, inner: Union[LanceQuery, LanceVectorQuery]):
|
||||||
|
|||||||
@@ -7,7 +7,16 @@ from functools import cached_property
|
|||||||
from typing import Dict, Iterable, List, Optional, Union, Literal
|
from typing import Dict, Iterable, List, Optional, Union, Literal
|
||||||
import warnings
|
import warnings
|
||||||
|
|
||||||
from lancedb._lancedb import IndexConfig
|
from lancedb._lancedb import (
|
||||||
|
AddColumnsResult,
|
||||||
|
AddResult,
|
||||||
|
AlterColumnsResult,
|
||||||
|
DeleteResult,
|
||||||
|
DropColumnsResult,
|
||||||
|
IndexConfig,
|
||||||
|
MergeResult,
|
||||||
|
UpdateResult,
|
||||||
|
)
|
||||||
from lancedb.embeddings.base import EmbeddingFunctionConfig
|
from lancedb.embeddings.base import EmbeddingFunctionConfig
|
||||||
from lancedb.index import FTS, BTree, Bitmap, HnswPq, HnswSq, IvfFlat, IvfPq, LabelList
|
from lancedb.index import FTS, BTree, Bitmap, HnswPq, HnswSq, IvfFlat, IvfPq, LabelList
|
||||||
from lancedb.remote.db import LOOP
|
from lancedb.remote.db import LOOP
|
||||||
@@ -18,7 +27,7 @@ from lancedb.merge import LanceMergeInsertBuilder
|
|||||||
from lancedb.embeddings import EmbeddingFunctionRegistry
|
from lancedb.embeddings import EmbeddingFunctionRegistry
|
||||||
|
|
||||||
from ..query import LanceVectorQueryBuilder, LanceQueryBuilder
|
from ..query import LanceVectorQueryBuilder, LanceQueryBuilder
|
||||||
from ..table import AsyncTable, IndexStatistics, Query, Table
|
from ..table import AsyncTable, IndexStatistics, Query, Table, Tags
|
||||||
|
|
||||||
|
|
||||||
class RemoteTable(Table):
|
class RemoteTable(Table):
|
||||||
@@ -38,9 +47,6 @@ class RemoteTable(Table):
|
|||||||
def __repr__(self) -> str:
|
def __repr__(self) -> str:
|
||||||
return f"RemoteTable({self.db_name}.{self.name})"
|
return f"RemoteTable({self.db_name}.{self.name})"
|
||||||
|
|
||||||
def __len__(self) -> int:
|
|
||||||
self.count_rows(None)
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def schema(self) -> pa.Schema:
|
def schema(self) -> pa.Schema:
|
||||||
"""The [Arrow Schema](https://arrow.apache.org/docs/python/api/datatypes.html#)
|
"""The [Arrow Schema](https://arrow.apache.org/docs/python/api/datatypes.html#)
|
||||||
@@ -54,6 +60,10 @@ class RemoteTable(Table):
|
|||||||
"""Get the current version of the table"""
|
"""Get the current version of the table"""
|
||||||
return LOOP.run(self._table.version())
|
return LOOP.run(self._table.version())
|
||||||
|
|
||||||
|
@property
|
||||||
|
def tags(self) -> Tags:
|
||||||
|
return Tags(self._table)
|
||||||
|
|
||||||
@cached_property
|
@cached_property
|
||||||
def embedding_functions(self) -> Dict[str, EmbeddingFunctionConfig]:
|
def embedding_functions(self) -> Dict[str, EmbeddingFunctionConfig]:
|
||||||
"""
|
"""
|
||||||
@@ -81,13 +91,13 @@ class RemoteTable(Table):
|
|||||||
"""to_pandas() is not yet supported on LanceDB cloud."""
|
"""to_pandas() is not yet supported on LanceDB cloud."""
|
||||||
return NotImplementedError("to_pandas() is not yet supported on LanceDB cloud.")
|
return NotImplementedError("to_pandas() is not yet supported on LanceDB cloud.")
|
||||||
|
|
||||||
def checkout(self, version: int):
|
def checkout(self, version: Union[int, str]):
|
||||||
return LOOP.run(self._table.checkout(version))
|
return LOOP.run(self._table.checkout(version))
|
||||||
|
|
||||||
def checkout_latest(self):
|
def checkout_latest(self):
|
||||||
return LOOP.run(self._table.checkout_latest())
|
return LOOP.run(self._table.checkout_latest())
|
||||||
|
|
||||||
def restore(self, version: Optional[int] = None):
|
def restore(self, version: Optional[Union[int, str]] = None):
|
||||||
return LOOP.run(self._table.restore(version))
|
return LOOP.run(self._table.restore(version))
|
||||||
|
|
||||||
def list_indices(self) -> Iterable[IndexConfig]:
|
def list_indices(self) -> Iterable[IndexConfig]:
|
||||||
@@ -259,7 +269,7 @@ class RemoteTable(Table):
|
|||||||
mode: str = "append",
|
mode: str = "append",
|
||||||
on_bad_vectors: str = "error",
|
on_bad_vectors: str = "error",
|
||||||
fill_value: float = 0.0,
|
fill_value: float = 0.0,
|
||||||
) -> int:
|
) -> AddResult:
|
||||||
"""Add more data to the [Table](Table). It has the same API signature as
|
"""Add more data to the [Table](Table). It has the same API signature as
|
||||||
the OSS version.
|
the OSS version.
|
||||||
|
|
||||||
@@ -282,8 +292,12 @@ class RemoteTable(Table):
|
|||||||
fill_value: float, default 0.
|
fill_value: float, default 0.
|
||||||
The value to use when filling vectors. Only used if on_bad_vectors="fill".
|
The value to use when filling vectors. Only used if on_bad_vectors="fill".
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
AddResult
|
||||||
|
An object containing the new version number of the table after adding data.
|
||||||
"""
|
"""
|
||||||
LOOP.run(
|
return LOOP.run(
|
||||||
self._table.add(
|
self._table.add(
|
||||||
data, mode=mode, on_bad_vectors=on_bad_vectors, fill_value=fill_value
|
data, mode=mode, on_bad_vectors=on_bad_vectors, fill_value=fill_value
|
||||||
)
|
)
|
||||||
@@ -409,10 +423,12 @@ class RemoteTable(Table):
|
|||||||
new_data: DATA,
|
new_data: DATA,
|
||||||
on_bad_vectors: str,
|
on_bad_vectors: str,
|
||||||
fill_value: float,
|
fill_value: float,
|
||||||
):
|
) -> MergeResult:
|
||||||
LOOP.run(self._table._do_merge(merge, new_data, on_bad_vectors, fill_value))
|
return LOOP.run(
|
||||||
|
self._table._do_merge(merge, new_data, on_bad_vectors, fill_value)
|
||||||
|
)
|
||||||
|
|
||||||
def delete(self, predicate: str):
|
def delete(self, predicate: str) -> DeleteResult:
|
||||||
"""Delete rows from the table.
|
"""Delete rows from the table.
|
||||||
|
|
||||||
This can be used to delete a single row, many rows, all rows, or
|
This can be used to delete a single row, many rows, all rows, or
|
||||||
@@ -427,6 +443,11 @@ class RemoteTable(Table):
|
|||||||
|
|
||||||
The filter must not be empty, or it will error.
|
The filter must not be empty, or it will error.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
DeleteResult
|
||||||
|
An object containing the new version number of the table after deletion.
|
||||||
|
|
||||||
Examples
|
Examples
|
||||||
--------
|
--------
|
||||||
>>> import lancedb
|
>>> import lancedb
|
||||||
@@ -459,7 +480,7 @@ class RemoteTable(Table):
|
|||||||
x vector _distance # doctest: +SKIP
|
x vector _distance # doctest: +SKIP
|
||||||
0 2 [3.0, 4.0] 85.0 # doctest: +SKIP
|
0 2 [3.0, 4.0] 85.0 # doctest: +SKIP
|
||||||
"""
|
"""
|
||||||
LOOP.run(self._table.delete(predicate))
|
return LOOP.run(self._table.delete(predicate))
|
||||||
|
|
||||||
def update(
|
def update(
|
||||||
self,
|
self,
|
||||||
@@ -467,7 +488,7 @@ class RemoteTable(Table):
|
|||||||
values: Optional[dict] = None,
|
values: Optional[dict] = None,
|
||||||
*,
|
*,
|
||||||
values_sql: Optional[Dict[str, str]] = None,
|
values_sql: Optional[Dict[str, str]] = None,
|
||||||
):
|
) -> UpdateResult:
|
||||||
"""
|
"""
|
||||||
This can be used to update zero to all rows depending on how many
|
This can be used to update zero to all rows depending on how many
|
||||||
rows match the where clause.
|
rows match the where clause.
|
||||||
@@ -485,6 +506,12 @@ class RemoteTable(Table):
|
|||||||
reference existing columns. For example, {"x": "x + 1"} will increment
|
reference existing columns. For example, {"x": "x + 1"} will increment
|
||||||
the x column by 1.
|
the x column by 1.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
UpdateResult
|
||||||
|
- rows_updated: The number of rows that were updated
|
||||||
|
- version: The new version number of the table after the update
|
||||||
|
|
||||||
Examples
|
Examples
|
||||||
--------
|
--------
|
||||||
>>> import lancedb
|
>>> import lancedb
|
||||||
@@ -509,7 +536,7 @@ class RemoteTable(Table):
|
|||||||
2 2 [10.0, 10.0] # doctest: +SKIP
|
2 2 [10.0, 10.0] # doctest: +SKIP
|
||||||
|
|
||||||
"""
|
"""
|
||||||
LOOP.run(
|
return LOOP.run(
|
||||||
self._table.update(where=where, updates=values, updates_sql=values_sql)
|
self._table.update(where=where, updates=values, updates_sql=values_sql)
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -557,13 +584,15 @@ class RemoteTable(Table):
|
|||||||
def count_rows(self, filter: Optional[str] = None) -> int:
|
def count_rows(self, filter: Optional[str] = None) -> int:
|
||||||
return LOOP.run(self._table.count_rows(filter))
|
return LOOP.run(self._table.count_rows(filter))
|
||||||
|
|
||||||
def add_columns(self, transforms: Dict[str, str]):
|
def add_columns(self, transforms: Dict[str, str]) -> AddColumnsResult:
|
||||||
return LOOP.run(self._table.add_columns(transforms))
|
return LOOP.run(self._table.add_columns(transforms))
|
||||||
|
|
||||||
def alter_columns(self, *alterations: Iterable[Dict[str, str]]):
|
def alter_columns(
|
||||||
|
self, *alterations: Iterable[Dict[str, str]]
|
||||||
|
) -> AlterColumnsResult:
|
||||||
return LOOP.run(self._table.alter_columns(*alterations))
|
return LOOP.run(self._table.alter_columns(*alterations))
|
||||||
|
|
||||||
def drop_columns(self, columns: Iterable[str]):
|
def drop_columns(self, columns: Iterable[str]) -> DropColumnsResult:
|
||||||
return LOOP.run(self._table.drop_columns(columns))
|
return LOOP.run(self._table.drop_columns(columns))
|
||||||
|
|
||||||
def drop_index(self, index_name: str):
|
def drop_index(self, index_name: str):
|
||||||
@@ -574,6 +603,9 @@ class RemoteTable(Table):
|
|||||||
):
|
):
|
||||||
return LOOP.run(self._table.wait_for_index(index_names, timeout))
|
return LOOP.run(self._table.wait_for_index(index_names, timeout))
|
||||||
|
|
||||||
|
def stats(self):
|
||||||
|
return LOOP.run(self._table.stats())
|
||||||
|
|
||||||
def uses_v2_manifest_paths(self) -> bool:
|
def uses_v2_manifest_paths(self) -> bool:
|
||||||
raise NotImplementedError(
|
raise NotImplementedError(
|
||||||
"uses_v2_manifest_paths() is not supported on the LanceDB Cloud"
|
"uses_v2_manifest_paths() is not supported on the LanceDB Cloud"
|
||||||
|
|||||||
@@ -77,6 +77,14 @@ if TYPE_CHECKING:
|
|||||||
OptimizeStats,
|
OptimizeStats,
|
||||||
CleanupStats,
|
CleanupStats,
|
||||||
CompactionStats,
|
CompactionStats,
|
||||||
|
Tag,
|
||||||
|
AddColumnsResult,
|
||||||
|
AddResult,
|
||||||
|
AlterColumnsResult,
|
||||||
|
DeleteResult,
|
||||||
|
DropColumnsResult,
|
||||||
|
MergeResult,
|
||||||
|
UpdateResult,
|
||||||
)
|
)
|
||||||
from .db import LanceDBConnection
|
from .db import LanceDBConnection
|
||||||
from .index import IndexConfig
|
from .index import IndexConfig
|
||||||
@@ -549,6 +557,7 @@ class Table(ABC):
|
|||||||
Can append new data with [Table.add()][lancedb.table.Table.add].
|
Can append new data with [Table.add()][lancedb.table.Table.add].
|
||||||
|
|
||||||
>>> table.add([{"vector": [0.5, 1.3], "b": 4}])
|
>>> table.add([{"vector": [0.5, 1.3], "b": 4}])
|
||||||
|
AddResult(version=2)
|
||||||
|
|
||||||
Can query the table with [Table.search][lancedb.table.Table.search].
|
Can query the table with [Table.search][lancedb.table.Table.search].
|
||||||
|
|
||||||
@@ -582,6 +591,39 @@ class Table(ABC):
|
|||||||
"""
|
"""
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
|
@property
|
||||||
|
@abstractmethod
|
||||||
|
def tags(self) -> Tags:
|
||||||
|
"""Tag management for the table.
|
||||||
|
|
||||||
|
Similar to Git, tags are a way to add metadata to a specific version of the
|
||||||
|
table.
|
||||||
|
|
||||||
|
.. warning::
|
||||||
|
|
||||||
|
Tagged versions are exempted from the :py:meth:`cleanup_old_versions()`
|
||||||
|
process.
|
||||||
|
|
||||||
|
To remove a version that has been tagged, you must first
|
||||||
|
:py:meth:`~Tags.delete` the associated tag.
|
||||||
|
|
||||||
|
Examples
|
||||||
|
--------
|
||||||
|
|
||||||
|
.. code-block:: python
|
||||||
|
|
||||||
|
table = db.open_table("my_table")
|
||||||
|
table.tags.create("v2-prod-20250203", 10)
|
||||||
|
|
||||||
|
tags = table.tags.list()
|
||||||
|
|
||||||
|
"""
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
def __len__(self) -> int:
|
||||||
|
"""The number of rows in this Table"""
|
||||||
|
return self.count_rows(None)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def embedding_functions(self) -> Dict[str, EmbeddingFunctionConfig]:
|
def embedding_functions(self) -> Dict[str, EmbeddingFunctionConfig]:
|
||||||
@@ -709,6 +751,13 @@ class Table(ABC):
|
|||||||
"""
|
"""
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def stats(self) -> TableStatistics:
|
||||||
|
"""
|
||||||
|
Retrieve table and fragment statistics.
|
||||||
|
"""
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def create_scalar_index(
|
def create_scalar_index(
|
||||||
self,
|
self,
|
||||||
@@ -857,7 +906,7 @@ class Table(ABC):
|
|||||||
mode: AddMode = "append",
|
mode: AddMode = "append",
|
||||||
on_bad_vectors: OnBadVectorsType = "error",
|
on_bad_vectors: OnBadVectorsType = "error",
|
||||||
fill_value: float = 0.0,
|
fill_value: float = 0.0,
|
||||||
):
|
) -> AddResult:
|
||||||
"""Add more data to the [Table](Table).
|
"""Add more data to the [Table](Table).
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
@@ -879,6 +928,10 @@ class Table(ABC):
|
|||||||
fill_value: float, default 0.
|
fill_value: float, default 0.
|
||||||
The value to use when filling vectors. Only used if on_bad_vectors="fill".
|
The value to use when filling vectors. Only used if on_bad_vectors="fill".
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
AddResult
|
||||||
|
An object containing the new version number of the table after adding data.
|
||||||
"""
|
"""
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
@@ -925,10 +978,12 @@ class Table(ABC):
|
|||||||
>>> table = db.create_table("my_table", data)
|
>>> table = db.create_table("my_table", data)
|
||||||
>>> new_data = pa.table({"a": [2, 3, 4], "b": ["x", "y", "z"]})
|
>>> new_data = pa.table({"a": [2, 3, 4], "b": ["x", "y", "z"]})
|
||||||
>>> # Perform a "upsert" operation
|
>>> # Perform a "upsert" operation
|
||||||
>>> table.merge_insert("a") \\
|
>>> res = table.merge_insert("a") \\
|
||||||
... .when_matched_update_all() \\
|
... .when_matched_update_all() \\
|
||||||
... .when_not_matched_insert_all() \\
|
... .when_not_matched_insert_all() \\
|
||||||
... .execute(new_data)
|
... .execute(new_data)
|
||||||
|
>>> res
|
||||||
|
MergeResult(version=2, num_updated_rows=2, num_inserted_rows=1, num_deleted_rows=0)
|
||||||
>>> # The order of new rows is non-deterministic since we use
|
>>> # The order of new rows is non-deterministic since we use
|
||||||
>>> # a hash-join as part of this operation and so we sort here
|
>>> # a hash-join as part of this operation and so we sort here
|
||||||
>>> table.to_arrow().sort_by("a").to_pandas()
|
>>> table.to_arrow().sort_by("a").to_pandas()
|
||||||
@@ -937,7 +992,7 @@ class Table(ABC):
|
|||||||
1 2 x
|
1 2 x
|
||||||
2 3 y
|
2 3 y
|
||||||
3 4 z
|
3 4 z
|
||||||
"""
|
""" # noqa: E501
|
||||||
on = [on] if isinstance(on, str) else list(iter(on))
|
on = [on] if isinstance(on, str) else list(iter(on))
|
||||||
|
|
||||||
return LanceMergeInsertBuilder(self, on)
|
return LanceMergeInsertBuilder(self, on)
|
||||||
@@ -1052,10 +1107,10 @@ class Table(ABC):
|
|||||||
new_data: DATA,
|
new_data: DATA,
|
||||||
on_bad_vectors: OnBadVectorsType,
|
on_bad_vectors: OnBadVectorsType,
|
||||||
fill_value: float,
|
fill_value: float,
|
||||||
): ...
|
) -> MergeResult: ...
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def delete(self, where: str):
|
def delete(self, where: str) -> DeleteResult:
|
||||||
"""Delete rows from the table.
|
"""Delete rows from the table.
|
||||||
|
|
||||||
This can be used to delete a single row, many rows, all rows, or
|
This can be used to delete a single row, many rows, all rows, or
|
||||||
@@ -1070,6 +1125,11 @@ class Table(ABC):
|
|||||||
|
|
||||||
The filter must not be empty, or it will error.
|
The filter must not be empty, or it will error.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
DeleteResult
|
||||||
|
An object containing the new version number of the table after deletion.
|
||||||
|
|
||||||
Examples
|
Examples
|
||||||
--------
|
--------
|
||||||
>>> import lancedb
|
>>> import lancedb
|
||||||
@@ -1086,6 +1146,7 @@ class Table(ABC):
|
|||||||
1 2 [3.0, 4.0]
|
1 2 [3.0, 4.0]
|
||||||
2 3 [5.0, 6.0]
|
2 3 [5.0, 6.0]
|
||||||
>>> table.delete("x = 2")
|
>>> table.delete("x = 2")
|
||||||
|
DeleteResult(version=2)
|
||||||
>>> table.to_pandas()
|
>>> table.to_pandas()
|
||||||
x vector
|
x vector
|
||||||
0 1 [1.0, 2.0]
|
0 1 [1.0, 2.0]
|
||||||
@@ -1099,6 +1160,7 @@ class Table(ABC):
|
|||||||
>>> to_remove
|
>>> to_remove
|
||||||
'1, 5'
|
'1, 5'
|
||||||
>>> table.delete(f"x IN ({to_remove})")
|
>>> table.delete(f"x IN ({to_remove})")
|
||||||
|
DeleteResult(version=3)
|
||||||
>>> table.to_pandas()
|
>>> table.to_pandas()
|
||||||
x vector
|
x vector
|
||||||
0 3 [5.0, 6.0]
|
0 3 [5.0, 6.0]
|
||||||
@@ -1112,7 +1174,7 @@ class Table(ABC):
|
|||||||
values: Optional[dict] = None,
|
values: Optional[dict] = None,
|
||||||
*,
|
*,
|
||||||
values_sql: Optional[Dict[str, str]] = None,
|
values_sql: Optional[Dict[str, str]] = None,
|
||||||
):
|
) -> UpdateResult:
|
||||||
"""
|
"""
|
||||||
This can be used to update zero to all rows depending on how many
|
This can be used to update zero to all rows depending on how many
|
||||||
rows match the where clause. If no where clause is provided, then
|
rows match the where clause. If no where clause is provided, then
|
||||||
@@ -1134,6 +1196,12 @@ class Table(ABC):
|
|||||||
reference existing columns. For example, {"x": "x + 1"} will increment
|
reference existing columns. For example, {"x": "x + 1"} will increment
|
||||||
the x column by 1.
|
the x column by 1.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
UpdateResult
|
||||||
|
- rows_updated: The number of rows that were updated
|
||||||
|
- version: The new version number of the table after the update
|
||||||
|
|
||||||
Examples
|
Examples
|
||||||
--------
|
--------
|
||||||
>>> import lancedb
|
>>> import lancedb
|
||||||
@@ -1147,12 +1215,14 @@ class Table(ABC):
|
|||||||
1 2 [3.0, 4.0]
|
1 2 [3.0, 4.0]
|
||||||
2 3 [5.0, 6.0]
|
2 3 [5.0, 6.0]
|
||||||
>>> table.update(where="x = 2", values={"vector": [10.0, 10]})
|
>>> table.update(where="x = 2", values={"vector": [10.0, 10]})
|
||||||
|
UpdateResult(rows_updated=1, version=2)
|
||||||
>>> table.to_pandas()
|
>>> table.to_pandas()
|
||||||
x vector
|
x vector
|
||||||
0 1 [1.0, 2.0]
|
0 1 [1.0, 2.0]
|
||||||
1 3 [5.0, 6.0]
|
1 3 [5.0, 6.0]
|
||||||
2 2 [10.0, 10.0]
|
2 2 [10.0, 10.0]
|
||||||
>>> table.update(values_sql={"x": "x + 1"})
|
>>> table.update(values_sql={"x": "x + 1"})
|
||||||
|
UpdateResult(rows_updated=3, version=3)
|
||||||
>>> table.to_pandas()
|
>>> table.to_pandas()
|
||||||
x vector
|
x vector
|
||||||
0 2 [1.0, 2.0]
|
0 2 [1.0, 2.0]
|
||||||
@@ -1315,6 +1385,11 @@ class Table(ABC):
|
|||||||
Alternatively, a pyarrow Field or Schema can be provided to add
|
Alternatively, a pyarrow Field or Schema can be provided to add
|
||||||
new columns with the specified data types. The new columns will
|
new columns with the specified data types. The new columns will
|
||||||
be initialized with null values.
|
be initialized with null values.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
AddColumnsResult
|
||||||
|
version: the new version number of the table after adding columns.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
@@ -1340,10 +1415,15 @@ class Table(ABC):
|
|||||||
nullability is not changed. Only non-nullable columns can be changed
|
nullability is not changed. Only non-nullable columns can be changed
|
||||||
to nullable. Currently, you cannot change a nullable column to
|
to nullable. Currently, you cannot change a nullable column to
|
||||||
non-nullable.
|
non-nullable.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
AlterColumnsResult
|
||||||
|
version: the new version number of the table after the alteration.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def drop_columns(self, columns: Iterable[str]):
|
def drop_columns(self, columns: Iterable[str]) -> DropColumnsResult:
|
||||||
"""
|
"""
|
||||||
Drop columns from the table.
|
Drop columns from the table.
|
||||||
|
|
||||||
@@ -1351,10 +1431,15 @@ class Table(ABC):
|
|||||||
----------
|
----------
|
||||||
columns : Iterable[str]
|
columns : Iterable[str]
|
||||||
The names of the columns to drop.
|
The names of the columns to drop.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
DropColumnsResult
|
||||||
|
version: the new version number of the table dropping the columns.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def checkout(self, version: int):
|
def checkout(self, version: Union[int, str]):
|
||||||
"""
|
"""
|
||||||
Checks out a specific version of the Table
|
Checks out a specific version of the Table
|
||||||
|
|
||||||
@@ -1369,6 +1454,12 @@ class Table(ABC):
|
|||||||
Any operation that modifies the table will fail while the table is in a checked
|
Any operation that modifies the table will fail while the table is in a checked
|
||||||
out state.
|
out state.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
version: int | str,
|
||||||
|
The version to check out. A version number (`int`) or a tag
|
||||||
|
(`str`) can be provided.
|
||||||
|
|
||||||
To return the table to a normal state use `[Self::checkout_latest]`
|
To return the table to a normal state use `[Self::checkout_latest]`
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@@ -1383,7 +1474,7 @@ class Table(ABC):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def restore(self, version: Optional[int] = None):
|
def restore(self, version: Optional[Union[int, str]] = None):
|
||||||
"""Restore a version of the table. This is an in-place operation.
|
"""Restore a version of the table. This is an in-place operation.
|
||||||
|
|
||||||
This creates a new version where the data is equivalent to the
|
This creates a new version where the data is equivalent to the
|
||||||
@@ -1391,9 +1482,10 @@ class Table(ABC):
|
|||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
version : int, default None
|
version : int or str, default None
|
||||||
The version to restore. If unspecified then restores the currently
|
The version number or version tag to restore.
|
||||||
checked out version. If the currently checked out version is the
|
If unspecified then restores the currently checked out version.
|
||||||
|
If the currently checked out version is the
|
||||||
latest version then this is a no-op.
|
latest version then this is a no-op.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@@ -1538,7 +1630,46 @@ class LanceTable(Table):
|
|||||||
"""Get the current version of the table"""
|
"""Get the current version of the table"""
|
||||||
return LOOP.run(self._table.version())
|
return LOOP.run(self._table.version())
|
||||||
|
|
||||||
def checkout(self, version: int):
|
@property
|
||||||
|
def tags(self) -> Tags:
|
||||||
|
"""Tag management for the table.
|
||||||
|
|
||||||
|
Similar to Git, tags are a way to add metadata to a specific version of the
|
||||||
|
table.
|
||||||
|
|
||||||
|
.. warning::
|
||||||
|
|
||||||
|
Tagged versions are exempted from the :py:meth:`cleanup_old_versions()`
|
||||||
|
process.
|
||||||
|
|
||||||
|
To remove a version that has been tagged, you must first
|
||||||
|
:py:meth:`~Tags.delete` the associated tag.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
Tags
|
||||||
|
The tag manager for managing tags for the table.
|
||||||
|
|
||||||
|
Examples
|
||||||
|
--------
|
||||||
|
>>> import lancedb
|
||||||
|
>>> db = lancedb.connect("./.lancedb")
|
||||||
|
>>> table = db.create_table("my_table",
|
||||||
|
... [{"vector": [1.1, 0.9], "type": "vector"}])
|
||||||
|
>>> table.tags.create("v1", table.version)
|
||||||
|
>>> table.add([{"vector": [0.5, 0.2], "type": "vector"}])
|
||||||
|
AddResult(version=2)
|
||||||
|
>>> tags = table.tags.list()
|
||||||
|
>>> print(tags["v1"]["version"])
|
||||||
|
1
|
||||||
|
>>> table.checkout("v1")
|
||||||
|
>>> table.to_pandas()
|
||||||
|
vector type
|
||||||
|
0 [1.1, 0.9] vector
|
||||||
|
"""
|
||||||
|
return Tags(self._table)
|
||||||
|
|
||||||
|
def checkout(self, version: Union[int, str]):
|
||||||
"""Checkout a version of the table. This is an in-place operation.
|
"""Checkout a version of the table. This is an in-place operation.
|
||||||
|
|
||||||
This allows viewing previous versions of the table. If you wish to
|
This allows viewing previous versions of the table. If you wish to
|
||||||
@@ -1550,8 +1681,9 @@ class LanceTable(Table):
|
|||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
version : int
|
version: int | str,
|
||||||
The version to checkout.
|
The version to check out. A version number (`int`) or a tag
|
||||||
|
(`str`) can be provided.
|
||||||
|
|
||||||
Examples
|
Examples
|
||||||
--------
|
--------
|
||||||
@@ -1565,6 +1697,7 @@ class LanceTable(Table):
|
|||||||
vector type
|
vector type
|
||||||
0 [1.1, 0.9] vector
|
0 [1.1, 0.9] vector
|
||||||
>>> table.add([{"vector": [0.5, 0.2], "type": "vector"}])
|
>>> table.add([{"vector": [0.5, 0.2], "type": "vector"}])
|
||||||
|
AddResult(version=2)
|
||||||
>>> table.version
|
>>> table.version
|
||||||
2
|
2
|
||||||
>>> table.checkout(1)
|
>>> table.checkout(1)
|
||||||
@@ -1582,7 +1715,7 @@ class LanceTable(Table):
|
|||||||
"""
|
"""
|
||||||
LOOP.run(self._table.checkout_latest())
|
LOOP.run(self._table.checkout_latest())
|
||||||
|
|
||||||
def restore(self, version: Optional[int] = None):
|
def restore(self, version: Optional[Union[int, str]] = None):
|
||||||
"""Restore a version of the table. This is an in-place operation.
|
"""Restore a version of the table. This is an in-place operation.
|
||||||
|
|
||||||
This creates a new version where the data is equivalent to the
|
This creates a new version where the data is equivalent to the
|
||||||
@@ -1590,9 +1723,10 @@ class LanceTable(Table):
|
|||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
version : int, default None
|
version : int or str, default None
|
||||||
The version to restore. If unspecified then restores the currently
|
The version number or version tag to restore.
|
||||||
checked out version. If the currently checked out version is the
|
If unspecified then restores the currently checked out version.
|
||||||
|
If the currently checked out version is the
|
||||||
latest version then this is a no-op.
|
latest version then this is a no-op.
|
||||||
|
|
||||||
Examples
|
Examples
|
||||||
@@ -1607,14 +1741,23 @@ class LanceTable(Table):
|
|||||||
vector type
|
vector type
|
||||||
0 [1.1, 0.9] vector
|
0 [1.1, 0.9] vector
|
||||||
>>> table.add([{"vector": [0.5, 0.2], "type": "vector"}])
|
>>> table.add([{"vector": [0.5, 0.2], "type": "vector"}])
|
||||||
|
AddResult(version=2)
|
||||||
>>> table.version
|
>>> table.version
|
||||||
2
|
2
|
||||||
|
>>> table.tags.create("v2", 2)
|
||||||
>>> table.restore(1)
|
>>> table.restore(1)
|
||||||
>>> table.to_pandas()
|
>>> table.to_pandas()
|
||||||
vector type
|
vector type
|
||||||
0 [1.1, 0.9] vector
|
0 [1.1, 0.9] vector
|
||||||
>>> len(table.list_versions())
|
>>> len(table.list_versions())
|
||||||
3
|
3
|
||||||
|
>>> table.restore("v2")
|
||||||
|
>>> table.to_pandas()
|
||||||
|
vector type
|
||||||
|
0 [1.1, 0.9] vector
|
||||||
|
1 [0.5, 0.2] vector
|
||||||
|
>>> len(table.list_versions())
|
||||||
|
4
|
||||||
"""
|
"""
|
||||||
if version is not None:
|
if version is not None:
|
||||||
LOOP.run(self._table.checkout(version))
|
LOOP.run(self._table.checkout(version))
|
||||||
@@ -1623,9 +1766,6 @@ class LanceTable(Table):
|
|||||||
def count_rows(self, filter: Optional[str] = None) -> int:
|
def count_rows(self, filter: Optional[str] = None) -> int:
|
||||||
return LOOP.run(self._table.count_rows(filter))
|
return LOOP.run(self._table.count_rows(filter))
|
||||||
|
|
||||||
def __len__(self) -> int:
|
|
||||||
return self.count_rows()
|
|
||||||
|
|
||||||
def __repr__(self) -> str:
|
def __repr__(self) -> str:
|
||||||
val = f"{self.__class__.__name__}(name={self.name!r}, version={self.version}"
|
val = f"{self.__class__.__name__}(name={self.name!r}, version={self.version}"
|
||||||
if self._conn.read_consistency_interval is not None:
|
if self._conn.read_consistency_interval is not None:
|
||||||
@@ -1801,6 +1941,9 @@ class LanceTable(Table):
|
|||||||
) -> None:
|
) -> None:
|
||||||
return LOOP.run(self._table.wait_for_index(index_names, timeout))
|
return LOOP.run(self._table.wait_for_index(index_names, timeout))
|
||||||
|
|
||||||
|
def stats(self) -> TableStatistics:
|
||||||
|
return LOOP.run(self._table.stats())
|
||||||
|
|
||||||
def create_scalar_index(
|
def create_scalar_index(
|
||||||
self,
|
self,
|
||||||
column: str,
|
column: str,
|
||||||
@@ -1968,7 +2111,7 @@ class LanceTable(Table):
|
|||||||
mode: AddMode = "append",
|
mode: AddMode = "append",
|
||||||
on_bad_vectors: OnBadVectorsType = "error",
|
on_bad_vectors: OnBadVectorsType = "error",
|
||||||
fill_value: float = 0.0,
|
fill_value: float = 0.0,
|
||||||
):
|
) -> AddResult:
|
||||||
"""Add data to the table.
|
"""Add data to the table.
|
||||||
If vector columns are missing and the table
|
If vector columns are missing and the table
|
||||||
has embedding functions, then the vector columns
|
has embedding functions, then the vector columns
|
||||||
@@ -1992,7 +2135,7 @@ class LanceTable(Table):
|
|||||||
int
|
int
|
||||||
The number of vectors in the table.
|
The number of vectors in the table.
|
||||||
"""
|
"""
|
||||||
LOOP.run(
|
return LOOP.run(
|
||||||
self._table.add(
|
self._table.add(
|
||||||
data, mode=mode, on_bad_vectors=on_bad_vectors, fill_value=fill_value
|
data, mode=mode, on_bad_vectors=on_bad_vectors, fill_value=fill_value
|
||||||
)
|
)
|
||||||
@@ -2322,8 +2465,8 @@ class LanceTable(Table):
|
|||||||
)
|
)
|
||||||
return self
|
return self
|
||||||
|
|
||||||
def delete(self, where: str):
|
def delete(self, where: str) -> DeleteResult:
|
||||||
LOOP.run(self._table.delete(where))
|
return LOOP.run(self._table.delete(where))
|
||||||
|
|
||||||
def update(
|
def update(
|
||||||
self,
|
self,
|
||||||
@@ -2331,7 +2474,7 @@ class LanceTable(Table):
|
|||||||
values: Optional[dict] = None,
|
values: Optional[dict] = None,
|
||||||
*,
|
*,
|
||||||
values_sql: Optional[Dict[str, str]] = None,
|
values_sql: Optional[Dict[str, str]] = None,
|
||||||
):
|
) -> UpdateResult:
|
||||||
"""
|
"""
|
||||||
This can be used to update zero to all rows depending on how many
|
This can be used to update zero to all rows depending on how many
|
||||||
rows match the where clause.
|
rows match the where clause.
|
||||||
@@ -2349,6 +2492,12 @@ class LanceTable(Table):
|
|||||||
reference existing columns. For example, {"x": "x + 1"} will increment
|
reference existing columns. For example, {"x": "x + 1"} will increment
|
||||||
the x column by 1.
|
the x column by 1.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
UpdateResult
|
||||||
|
- rows_updated: The number of rows that were updated
|
||||||
|
- version: The new version number of the table after the update
|
||||||
|
|
||||||
Examples
|
Examples
|
||||||
--------
|
--------
|
||||||
>>> import lancedb
|
>>> import lancedb
|
||||||
@@ -2362,6 +2511,7 @@ class LanceTable(Table):
|
|||||||
1 2 [3.0, 4.0]
|
1 2 [3.0, 4.0]
|
||||||
2 3 [5.0, 6.0]
|
2 3 [5.0, 6.0]
|
||||||
>>> table.update(where="x = 2", values={"vector": [10.0, 10]})
|
>>> table.update(where="x = 2", values={"vector": [10.0, 10]})
|
||||||
|
UpdateResult(rows_updated=1, version=2)
|
||||||
>>> table.to_pandas()
|
>>> table.to_pandas()
|
||||||
x vector
|
x vector
|
||||||
0 1 [1.0, 2.0]
|
0 1 [1.0, 2.0]
|
||||||
@@ -2369,7 +2519,7 @@ class LanceTable(Table):
|
|||||||
2 2 [10.0, 10.0]
|
2 2 [10.0, 10.0]
|
||||||
|
|
||||||
"""
|
"""
|
||||||
LOOP.run(self._table.update(values, where=where, updates_sql=values_sql))
|
return LOOP.run(self._table.update(values, where=where, updates_sql=values_sql))
|
||||||
|
|
||||||
def _execute_query(
|
def _execute_query(
|
||||||
self,
|
self,
|
||||||
@@ -2403,8 +2553,10 @@ class LanceTable(Table):
|
|||||||
new_data: DATA,
|
new_data: DATA,
|
||||||
on_bad_vectors: OnBadVectorsType,
|
on_bad_vectors: OnBadVectorsType,
|
||||||
fill_value: float,
|
fill_value: float,
|
||||||
):
|
) -> MergeResult:
|
||||||
LOOP.run(self._table._do_merge(merge, new_data, on_bad_vectors, fill_value))
|
return LOOP.run(
|
||||||
|
self._table._do_merge(merge, new_data, on_bad_vectors, fill_value)
|
||||||
|
)
|
||||||
|
|
||||||
@deprecation.deprecated(
|
@deprecation.deprecated(
|
||||||
deprecated_in="0.21.0",
|
deprecated_in="0.21.0",
|
||||||
@@ -2546,14 +2698,16 @@ class LanceTable(Table):
|
|||||||
|
|
||||||
def add_columns(
|
def add_columns(
|
||||||
self, transforms: Dict[str, str] | pa.field | List[pa.field] | pa.Schema
|
self, transforms: Dict[str, str] | pa.field | List[pa.field] | pa.Schema
|
||||||
):
|
) -> AddColumnsResult:
|
||||||
LOOP.run(self._table.add_columns(transforms))
|
return LOOP.run(self._table.add_columns(transforms))
|
||||||
|
|
||||||
def alter_columns(self, *alterations: Iterable[Dict[str, str]]):
|
def alter_columns(
|
||||||
LOOP.run(self._table.alter_columns(*alterations))
|
self, *alterations: Iterable[Dict[str, str]]
|
||||||
|
) -> AlterColumnsResult:
|
||||||
|
return LOOP.run(self._table.alter_columns(*alterations))
|
||||||
|
|
||||||
def drop_columns(self, columns: Iterable[str]):
|
def drop_columns(self, columns: Iterable[str]) -> DropColumnsResult:
|
||||||
LOOP.run(self._table.drop_columns(columns))
|
return LOOP.run(self._table.drop_columns(columns))
|
||||||
|
|
||||||
def uses_v2_manifest_paths(self) -> bool:
|
def uses_v2_manifest_paths(self) -> bool:
|
||||||
"""
|
"""
|
||||||
@@ -3095,6 +3249,12 @@ class AsyncTable:
|
|||||||
"""
|
"""
|
||||||
await self._inner.wait_for_index(index_names, timeout)
|
await self._inner.wait_for_index(index_names, timeout)
|
||||||
|
|
||||||
|
async def stats(self) -> TableStatistics:
|
||||||
|
"""
|
||||||
|
Retrieve table and fragment statistics.
|
||||||
|
"""
|
||||||
|
return await self._inner.stats()
|
||||||
|
|
||||||
async def add(
|
async def add(
|
||||||
self,
|
self,
|
||||||
data: DATA,
|
data: DATA,
|
||||||
@@ -3102,7 +3262,7 @@ class AsyncTable:
|
|||||||
mode: Optional[Literal["append", "overwrite"]] = "append",
|
mode: Optional[Literal["append", "overwrite"]] = "append",
|
||||||
on_bad_vectors: Optional[OnBadVectorsType] = None,
|
on_bad_vectors: Optional[OnBadVectorsType] = None,
|
||||||
fill_value: Optional[float] = None,
|
fill_value: Optional[float] = None,
|
||||||
):
|
) -> AddResult:
|
||||||
"""Add more data to the [Table](Table).
|
"""Add more data to the [Table](Table).
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
@@ -3141,7 +3301,7 @@ class AsyncTable:
|
|||||||
if isinstance(data, pa.Table):
|
if isinstance(data, pa.Table):
|
||||||
data = data.to_reader()
|
data = data.to_reader()
|
||||||
|
|
||||||
await self._inner.add(data, mode or "append")
|
return await self._inner.add(data, mode or "append")
|
||||||
|
|
||||||
def merge_insert(self, on: Union[str, Iterable[str]]) -> LanceMergeInsertBuilder:
|
def merge_insert(self, on: Union[str, Iterable[str]]) -> LanceMergeInsertBuilder:
|
||||||
"""
|
"""
|
||||||
@@ -3186,10 +3346,12 @@ class AsyncTable:
|
|||||||
>>> table = db.create_table("my_table", data)
|
>>> table = db.create_table("my_table", data)
|
||||||
>>> new_data = pa.table({"a": [2, 3, 4], "b": ["x", "y", "z"]})
|
>>> new_data = pa.table({"a": [2, 3, 4], "b": ["x", "y", "z"]})
|
||||||
>>> # Perform a "upsert" operation
|
>>> # Perform a "upsert" operation
|
||||||
>>> table.merge_insert("a") \\
|
>>> res = table.merge_insert("a") \\
|
||||||
... .when_matched_update_all() \\
|
... .when_matched_update_all() \\
|
||||||
... .when_not_matched_insert_all() \\
|
... .when_not_matched_insert_all() \\
|
||||||
... .execute(new_data)
|
... .execute(new_data)
|
||||||
|
>>> res
|
||||||
|
MergeResult(version=2, num_updated_rows=2, num_inserted_rows=1, num_deleted_rows=0)
|
||||||
>>> # The order of new rows is non-deterministic since we use
|
>>> # The order of new rows is non-deterministic since we use
|
||||||
>>> # a hash-join as part of this operation and so we sort here
|
>>> # a hash-join as part of this operation and so we sort here
|
||||||
>>> table.to_arrow().sort_by("a").to_pandas()
|
>>> table.to_arrow().sort_by("a").to_pandas()
|
||||||
@@ -3198,7 +3360,7 @@ class AsyncTable:
|
|||||||
1 2 x
|
1 2 x
|
||||||
2 3 y
|
2 3 y
|
||||||
3 4 z
|
3 4 z
|
||||||
"""
|
""" # noqa: E501
|
||||||
on = [on] if isinstance(on, str) else list(iter(on))
|
on = [on] if isinstance(on, str) else list(iter(on))
|
||||||
|
|
||||||
return LanceMergeInsertBuilder(self, on)
|
return LanceMergeInsertBuilder(self, on)
|
||||||
@@ -3529,7 +3691,7 @@ class AsyncTable:
|
|||||||
new_data: DATA,
|
new_data: DATA,
|
||||||
on_bad_vectors: OnBadVectorsType,
|
on_bad_vectors: OnBadVectorsType,
|
||||||
fill_value: float,
|
fill_value: float,
|
||||||
):
|
) -> MergeResult:
|
||||||
schema = await self.schema()
|
schema = await self.schema()
|
||||||
if on_bad_vectors is None:
|
if on_bad_vectors is None:
|
||||||
on_bad_vectors = "error"
|
on_bad_vectors = "error"
|
||||||
@@ -3545,7 +3707,7 @@ class AsyncTable:
|
|||||||
)
|
)
|
||||||
if isinstance(data, pa.Table):
|
if isinstance(data, pa.Table):
|
||||||
data = pa.RecordBatchReader.from_batches(data.schema, data.to_batches())
|
data = pa.RecordBatchReader.from_batches(data.schema, data.to_batches())
|
||||||
await self._inner.execute_merge_insert(
|
return await self._inner.execute_merge_insert(
|
||||||
data,
|
data,
|
||||||
dict(
|
dict(
|
||||||
on=merge._on,
|
on=merge._on,
|
||||||
@@ -3554,10 +3716,11 @@ class AsyncTable:
|
|||||||
when_not_matched_insert_all=merge._when_not_matched_insert_all,
|
when_not_matched_insert_all=merge._when_not_matched_insert_all,
|
||||||
when_not_matched_by_source_delete=merge._when_not_matched_by_source_delete,
|
when_not_matched_by_source_delete=merge._when_not_matched_by_source_delete,
|
||||||
when_not_matched_by_source_condition=merge._when_not_matched_by_source_condition,
|
when_not_matched_by_source_condition=merge._when_not_matched_by_source_condition,
|
||||||
|
timeout=merge._timeout,
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
async def delete(self, where: str):
|
async def delete(self, where: str) -> DeleteResult:
|
||||||
"""Delete rows from the table.
|
"""Delete rows from the table.
|
||||||
|
|
||||||
This can be used to delete a single row, many rows, all rows, or
|
This can be used to delete a single row, many rows, all rows, or
|
||||||
@@ -3588,6 +3751,7 @@ class AsyncTable:
|
|||||||
1 2 [3.0, 4.0]
|
1 2 [3.0, 4.0]
|
||||||
2 3 [5.0, 6.0]
|
2 3 [5.0, 6.0]
|
||||||
>>> table.delete("x = 2")
|
>>> table.delete("x = 2")
|
||||||
|
DeleteResult(version=2)
|
||||||
>>> table.to_pandas()
|
>>> table.to_pandas()
|
||||||
x vector
|
x vector
|
||||||
0 1 [1.0, 2.0]
|
0 1 [1.0, 2.0]
|
||||||
@@ -3601,6 +3765,7 @@ class AsyncTable:
|
|||||||
>>> to_remove
|
>>> to_remove
|
||||||
'1, 5'
|
'1, 5'
|
||||||
>>> table.delete(f"x IN ({to_remove})")
|
>>> table.delete(f"x IN ({to_remove})")
|
||||||
|
DeleteResult(version=3)
|
||||||
>>> table.to_pandas()
|
>>> table.to_pandas()
|
||||||
x vector
|
x vector
|
||||||
0 3 [5.0, 6.0]
|
0 3 [5.0, 6.0]
|
||||||
@@ -3613,7 +3778,7 @@ class AsyncTable:
|
|||||||
*,
|
*,
|
||||||
where: Optional[str] = None,
|
where: Optional[str] = None,
|
||||||
updates_sql: Optional[Dict[str, str]] = None,
|
updates_sql: Optional[Dict[str, str]] = None,
|
||||||
):
|
) -> UpdateResult:
|
||||||
"""
|
"""
|
||||||
This can be used to update zero to all rows in the table.
|
This can be used to update zero to all rows in the table.
|
||||||
|
|
||||||
@@ -3635,6 +3800,13 @@ class AsyncTable:
|
|||||||
literals (e.g. "7" or "'foo'") or they can be expressions based on the
|
literals (e.g. "7" or "'foo'") or they can be expressions based on the
|
||||||
previous value of the row (e.g. "x + 1" to increment the x column by 1)
|
previous value of the row (e.g. "x + 1" to increment the x column by 1)
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
UpdateResult
|
||||||
|
An object containing:
|
||||||
|
- rows_updated: The number of rows that were updated
|
||||||
|
- version: The new version number of the table after the update
|
||||||
|
|
||||||
Examples
|
Examples
|
||||||
--------
|
--------
|
||||||
>>> import asyncio
|
>>> import asyncio
|
||||||
@@ -3663,7 +3835,7 @@ class AsyncTable:
|
|||||||
|
|
||||||
async def add_columns(
|
async def add_columns(
|
||||||
self, transforms: dict[str, str] | pa.field | List[pa.field] | pa.Schema
|
self, transforms: dict[str, str] | pa.field | List[pa.field] | pa.Schema
|
||||||
):
|
) -> AddColumnsResult:
|
||||||
"""
|
"""
|
||||||
Add new columns with defined values.
|
Add new columns with defined values.
|
||||||
|
|
||||||
@@ -3675,6 +3847,12 @@ class AsyncTable:
|
|||||||
each row in the table, and can reference existing columns.
|
each row in the table, and can reference existing columns.
|
||||||
Alternatively, you can pass a pyarrow field or schema to add
|
Alternatively, you can pass a pyarrow field or schema to add
|
||||||
new columns with NULLs.
|
new columns with NULLs.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
AddColumnsResult
|
||||||
|
version: the new version number of the table after adding columns.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
if isinstance(transforms, pa.Field):
|
if isinstance(transforms, pa.Field):
|
||||||
transforms = [transforms]
|
transforms = [transforms]
|
||||||
@@ -3683,11 +3861,13 @@ class AsyncTable:
|
|||||||
):
|
):
|
||||||
transforms = pa.schema(transforms)
|
transforms = pa.schema(transforms)
|
||||||
if isinstance(transforms, pa.Schema):
|
if isinstance(transforms, pa.Schema):
|
||||||
await self._inner.add_columns_with_schema(transforms)
|
return await self._inner.add_columns_with_schema(transforms)
|
||||||
else:
|
else:
|
||||||
await self._inner.add_columns(list(transforms.items()))
|
return await self._inner.add_columns(list(transforms.items()))
|
||||||
|
|
||||||
async def alter_columns(self, *alterations: Iterable[dict[str, Any]]):
|
async def alter_columns(
|
||||||
|
self, *alterations: Iterable[dict[str, Any]]
|
||||||
|
) -> AlterColumnsResult:
|
||||||
"""
|
"""
|
||||||
Alter column names and nullability.
|
Alter column names and nullability.
|
||||||
|
|
||||||
@@ -3707,8 +3887,13 @@ class AsyncTable:
|
|||||||
nullability is not changed. Only non-nullable columns can be changed
|
nullability is not changed. Only non-nullable columns can be changed
|
||||||
to nullable. Currently, you cannot change a nullable column to
|
to nullable. Currently, you cannot change a nullable column to
|
||||||
non-nullable.
|
non-nullable.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
AlterColumnsResult
|
||||||
|
version: the new version number of the table after the alteration.
|
||||||
"""
|
"""
|
||||||
await self._inner.alter_columns(alterations)
|
return await self._inner.alter_columns(alterations)
|
||||||
|
|
||||||
async def drop_columns(self, columns: Iterable[str]):
|
async def drop_columns(self, columns: Iterable[str]):
|
||||||
"""
|
"""
|
||||||
@@ -3719,7 +3904,7 @@ class AsyncTable:
|
|||||||
columns : Iterable[str]
|
columns : Iterable[str]
|
||||||
The names of the columns to drop.
|
The names of the columns to drop.
|
||||||
"""
|
"""
|
||||||
await self._inner.drop_columns(columns)
|
return await self._inner.drop_columns(columns)
|
||||||
|
|
||||||
async def version(self) -> int:
|
async def version(self) -> int:
|
||||||
"""
|
"""
|
||||||
@@ -3746,7 +3931,7 @@ class AsyncTable:
|
|||||||
|
|
||||||
return versions
|
return versions
|
||||||
|
|
||||||
async def checkout(self, version: int):
|
async def checkout(self, version: int | str):
|
||||||
"""
|
"""
|
||||||
Checks out a specific version of the Table
|
Checks out a specific version of the Table
|
||||||
|
|
||||||
@@ -3761,6 +3946,12 @@ class AsyncTable:
|
|||||||
Any operation that modifies the table will fail while the table is in a checked
|
Any operation that modifies the table will fail while the table is in a checked
|
||||||
out state.
|
out state.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
version: int | str,
|
||||||
|
The version to check out. A version number (`int`) or a tag
|
||||||
|
(`str`) can be provided.
|
||||||
|
|
||||||
To return the table to a normal state use `[Self::checkout_latest]`
|
To return the table to a normal state use `[Self::checkout_latest]`
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
@@ -3783,7 +3974,7 @@ class AsyncTable:
|
|||||||
"""
|
"""
|
||||||
await self._inner.checkout_latest()
|
await self._inner.checkout_latest()
|
||||||
|
|
||||||
async def restore(self, version: Optional[int] = None):
|
async def restore(self, version: Optional[int | str] = None):
|
||||||
"""
|
"""
|
||||||
Restore the table to the currently checked out version
|
Restore the table to the currently checked out version
|
||||||
|
|
||||||
@@ -3798,6 +3989,24 @@ class AsyncTable:
|
|||||||
"""
|
"""
|
||||||
await self._inner.restore(version)
|
await self._inner.restore(version)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def tags(self) -> AsyncTags:
|
||||||
|
"""Tag management for the dataset.
|
||||||
|
|
||||||
|
Similar to Git, tags are a way to add metadata to a specific version of the
|
||||||
|
dataset.
|
||||||
|
|
||||||
|
.. warning::
|
||||||
|
|
||||||
|
Tagged versions are exempted from the
|
||||||
|
:py:meth:`optimize(cleanup_older_than)` process.
|
||||||
|
|
||||||
|
To remove a version that has been tagged, you must first
|
||||||
|
:py:meth:`~Tags.delete` the associated tag.
|
||||||
|
|
||||||
|
"""
|
||||||
|
return AsyncTags(self._inner)
|
||||||
|
|
||||||
async def optimize(
|
async def optimize(
|
||||||
self,
|
self,
|
||||||
*,
|
*,
|
||||||
@@ -3967,3 +4176,217 @@ class IndexStatistics:
|
|||||||
# a dictionary instead of a class.
|
# a dictionary instead of a class.
|
||||||
def __getitem__(self, key):
|
def __getitem__(self, key):
|
||||||
return getattr(self, key)
|
return getattr(self, key)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class TableStatistics:
|
||||||
|
"""
|
||||||
|
Statistics about a table and fragments.
|
||||||
|
|
||||||
|
Attributes
|
||||||
|
----------
|
||||||
|
total_bytes: int
|
||||||
|
The total number of bytes in the table.
|
||||||
|
num_rows: int
|
||||||
|
The total number of rows in the table.
|
||||||
|
num_indices: int
|
||||||
|
The total number of indices in the table.
|
||||||
|
fragment_stats: FragmentStatistics
|
||||||
|
Statistics about fragments in the table.
|
||||||
|
"""
|
||||||
|
|
||||||
|
total_bytes: int
|
||||||
|
num_rows: int
|
||||||
|
num_indices: int
|
||||||
|
fragment_stats: FragmentStatistics
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class FragmentStatistics:
|
||||||
|
"""
|
||||||
|
Statistics about fragments.
|
||||||
|
|
||||||
|
Attributes
|
||||||
|
----------
|
||||||
|
num_fragments: int
|
||||||
|
The total number of fragments in the table.
|
||||||
|
num_small_fragments: int
|
||||||
|
The total number of small fragments in the table.
|
||||||
|
Small fragments have low row counts and may need to be compacted.
|
||||||
|
lengths: FragmentSummaryStats
|
||||||
|
Statistics about the number of rows in the table fragments.
|
||||||
|
"""
|
||||||
|
|
||||||
|
num_fragments: int
|
||||||
|
num_small_fragments: int
|
||||||
|
lengths: FragmentSummaryStats
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class FragmentSummaryStats:
|
||||||
|
"""
|
||||||
|
Statistics about fragments sizes
|
||||||
|
|
||||||
|
Attributes
|
||||||
|
----------
|
||||||
|
min: int
|
||||||
|
The number of rows in the fragment with the fewest rows.
|
||||||
|
max: int
|
||||||
|
The number of rows in the fragment with the most rows.
|
||||||
|
mean: int
|
||||||
|
The mean number of rows in the fragments.
|
||||||
|
p25: int
|
||||||
|
The 25th percentile of number of rows in the fragments.
|
||||||
|
p50: int
|
||||||
|
The 50th percentile of number of rows in the fragments.
|
||||||
|
p75: int
|
||||||
|
The 75th percentile of number of rows in the fragments.
|
||||||
|
p99: int
|
||||||
|
The 99th percentile of number of rows in the fragments.
|
||||||
|
"""
|
||||||
|
|
||||||
|
min: int
|
||||||
|
max: int
|
||||||
|
mean: int
|
||||||
|
p25: int
|
||||||
|
p50: int
|
||||||
|
p75: int
|
||||||
|
p99: int
|
||||||
|
|
||||||
|
|
||||||
|
class Tags:
|
||||||
|
"""
|
||||||
|
Table tag manager.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, table):
|
||||||
|
self._table = table
|
||||||
|
|
||||||
|
def list(self) -> Dict[str, Tag]:
|
||||||
|
"""
|
||||||
|
List all table tags.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
dict[str, Tag]
|
||||||
|
A dictionary mapping tag names to version numbers.
|
||||||
|
"""
|
||||||
|
return LOOP.run(self._table.tags.list())
|
||||||
|
|
||||||
|
def get_version(self, tag: str) -> int:
|
||||||
|
"""
|
||||||
|
Get the version of a tag.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
tag: str,
|
||||||
|
The name of the tag to get the version for.
|
||||||
|
"""
|
||||||
|
return LOOP.run(self._table.tags.get_version(tag))
|
||||||
|
|
||||||
|
def create(self, tag: str, version: int) -> None:
|
||||||
|
"""
|
||||||
|
Create a tag for a given table version.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
tag: str,
|
||||||
|
The name of the tag to create. This name must be unique among all tag
|
||||||
|
names for the table.
|
||||||
|
version: int,
|
||||||
|
The table version to tag.
|
||||||
|
"""
|
||||||
|
LOOP.run(self._table.tags.create(tag, version))
|
||||||
|
|
||||||
|
def delete(self, tag: str) -> None:
|
||||||
|
"""
|
||||||
|
Delete tag from the table.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
tag: str,
|
||||||
|
The name of the tag to delete.
|
||||||
|
"""
|
||||||
|
LOOP.run(self._table.tags.delete(tag))
|
||||||
|
|
||||||
|
def update(self, tag: str, version: int) -> None:
|
||||||
|
"""
|
||||||
|
Update tag to a new version.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
tag: str,
|
||||||
|
The name of the tag to update.
|
||||||
|
version: int,
|
||||||
|
The new table version to tag.
|
||||||
|
"""
|
||||||
|
LOOP.run(self._table.tags.update(tag, version))
|
||||||
|
|
||||||
|
|
||||||
|
class AsyncTags:
|
||||||
|
"""
|
||||||
|
Async table tag manager.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, table):
|
||||||
|
self._table = table
|
||||||
|
|
||||||
|
async def list(self) -> Dict[str, Tag]:
|
||||||
|
"""
|
||||||
|
List all table tags.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
dict[str, Tag]
|
||||||
|
A dictionary mapping tag names to version numbers.
|
||||||
|
"""
|
||||||
|
return await self._table.tags.list()
|
||||||
|
|
||||||
|
async def get_version(self, tag: str) -> int:
|
||||||
|
"""
|
||||||
|
Get the version of a tag.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
tag: str,
|
||||||
|
The name of the tag to get the version for.
|
||||||
|
"""
|
||||||
|
return await self._table.tags.get_version(tag)
|
||||||
|
|
||||||
|
async def create(self, tag: str, version: int) -> None:
|
||||||
|
"""
|
||||||
|
Create a tag for a given table version.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
tag: str,
|
||||||
|
The name of the tag to create. This name must be unique among all tag
|
||||||
|
names for the table.
|
||||||
|
version: int,
|
||||||
|
The table version to tag.
|
||||||
|
"""
|
||||||
|
await self._table.tags.create(tag, version)
|
||||||
|
|
||||||
|
async def delete(self, tag: str) -> None:
|
||||||
|
"""
|
||||||
|
Delete tag from the table.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
tag: str,
|
||||||
|
The name of the tag to delete.
|
||||||
|
"""
|
||||||
|
await self._table.tags.delete(tag)
|
||||||
|
|
||||||
|
async def update(self, tag: str, version: int) -> None:
|
||||||
|
"""
|
||||||
|
Update tag to a new version.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
tag: str,
|
||||||
|
The name of the tag to update.
|
||||||
|
version: int,
|
||||||
|
The new table version to tag.
|
||||||
|
"""
|
||||||
|
await self._table.tags.update(tag, version)
|
||||||
|
|||||||
@@ -18,15 +18,19 @@ def test_upsert(mem_db):
|
|||||||
{"id": 1, "name": "Bobby"},
|
{"id": 1, "name": "Bobby"},
|
||||||
{"id": 2, "name": "Charlie"},
|
{"id": 2, "name": "Charlie"},
|
||||||
]
|
]
|
||||||
(
|
res = (
|
||||||
table.merge_insert("id")
|
table.merge_insert("id")
|
||||||
.when_matched_update_all()
|
.when_matched_update_all()
|
||||||
.when_not_matched_insert_all()
|
.when_not_matched_insert_all()
|
||||||
.execute(new_users)
|
.execute(new_users)
|
||||||
)
|
)
|
||||||
table.count_rows() # 3
|
table.count_rows() # 3
|
||||||
|
res # {'num_inserted_rows': 1, 'num_updated_rows': 1, 'num_deleted_rows': 0}
|
||||||
# --8<-- [end:upsert_basic]
|
# --8<-- [end:upsert_basic]
|
||||||
assert table.count_rows() == 3
|
assert table.count_rows() == 3
|
||||||
|
assert res.num_inserted_rows == 1
|
||||||
|
assert res.num_deleted_rows == 0
|
||||||
|
assert res.num_updated_rows == 1
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
@@ -44,15 +48,22 @@ async def test_upsert_async(mem_db_async):
|
|||||||
{"id": 1, "name": "Bobby"},
|
{"id": 1, "name": "Bobby"},
|
||||||
{"id": 2, "name": "Charlie"},
|
{"id": 2, "name": "Charlie"},
|
||||||
]
|
]
|
||||||
await (
|
res = await (
|
||||||
table.merge_insert("id")
|
table.merge_insert("id")
|
||||||
.when_matched_update_all()
|
.when_matched_update_all()
|
||||||
.when_not_matched_insert_all()
|
.when_not_matched_insert_all()
|
||||||
.execute(new_users)
|
.execute(new_users)
|
||||||
)
|
)
|
||||||
await table.count_rows() # 3
|
await table.count_rows() # 3
|
||||||
|
res
|
||||||
|
# MergeResult(version=2, num_updated_rows=1,
|
||||||
|
# num_inserted_rows=1, num_deleted_rows=0)
|
||||||
# --8<-- [end:upsert_basic_async]
|
# --8<-- [end:upsert_basic_async]
|
||||||
assert await table.count_rows() == 3
|
assert await table.count_rows() == 3
|
||||||
|
assert res.version == 2
|
||||||
|
assert res.num_inserted_rows == 1
|
||||||
|
assert res.num_deleted_rows == 0
|
||||||
|
assert res.num_updated_rows == 1
|
||||||
|
|
||||||
|
|
||||||
def test_insert_if_not_exists(mem_db):
|
def test_insert_if_not_exists(mem_db):
|
||||||
@@ -69,10 +80,19 @@ def test_insert_if_not_exists(mem_db):
|
|||||||
{"domain": "google.com", "name": "Google"},
|
{"domain": "google.com", "name": "Google"},
|
||||||
{"domain": "facebook.com", "name": "Facebook"},
|
{"domain": "facebook.com", "name": "Facebook"},
|
||||||
]
|
]
|
||||||
(table.merge_insert("domain").when_not_matched_insert_all().execute(new_domains))
|
res = (
|
||||||
|
table.merge_insert("domain").when_not_matched_insert_all().execute(new_domains)
|
||||||
|
)
|
||||||
table.count_rows() # 3
|
table.count_rows() # 3
|
||||||
|
res
|
||||||
|
# MergeResult(version=2, num_updated_rows=0,
|
||||||
|
# num_inserted_rows=1, num_deleted_rows=0)
|
||||||
# --8<-- [end:insert_if_not_exists]
|
# --8<-- [end:insert_if_not_exists]
|
||||||
assert table.count_rows() == 3
|
assert table.count_rows() == 3
|
||||||
|
assert res.version == 2
|
||||||
|
assert res.num_inserted_rows == 1
|
||||||
|
assert res.num_deleted_rows == 0
|
||||||
|
assert res.num_updated_rows == 0
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
@@ -90,12 +110,19 @@ async def test_insert_if_not_exists_async(mem_db_async):
|
|||||||
{"domain": "google.com", "name": "Google"},
|
{"domain": "google.com", "name": "Google"},
|
||||||
{"domain": "facebook.com", "name": "Facebook"},
|
{"domain": "facebook.com", "name": "Facebook"},
|
||||||
]
|
]
|
||||||
await (
|
res = await (
|
||||||
table.merge_insert("domain").when_not_matched_insert_all().execute(new_domains)
|
table.merge_insert("domain").when_not_matched_insert_all().execute(new_domains)
|
||||||
)
|
)
|
||||||
await table.count_rows() # 3
|
await table.count_rows() # 3
|
||||||
# --8<-- [end:insert_if_not_exists_async]
|
res
|
||||||
|
# MergeResult(version=2, num_updated_rows=0,
|
||||||
|
# num_inserted_rows=1, num_deleted_rows=0)
|
||||||
|
# --8<-- [end:insert_if_not_exists]
|
||||||
assert await table.count_rows() == 3
|
assert await table.count_rows() == 3
|
||||||
|
assert res.version == 2
|
||||||
|
assert res.num_inserted_rows == 1
|
||||||
|
assert res.num_deleted_rows == 0
|
||||||
|
assert res.num_updated_rows == 0
|
||||||
|
|
||||||
|
|
||||||
def test_replace_range(mem_db):
|
def test_replace_range(mem_db):
|
||||||
@@ -113,7 +140,7 @@ def test_replace_range(mem_db):
|
|||||||
new_chunks = [
|
new_chunks = [
|
||||||
{"doc_id": 1, "chunk_id": 0, "text": "Baz"},
|
{"doc_id": 1, "chunk_id": 0, "text": "Baz"},
|
||||||
]
|
]
|
||||||
(
|
res = (
|
||||||
table.merge_insert(["doc_id", "chunk_id"])
|
table.merge_insert(["doc_id", "chunk_id"])
|
||||||
.when_matched_update_all()
|
.when_matched_update_all()
|
||||||
.when_not_matched_insert_all()
|
.when_not_matched_insert_all()
|
||||||
@@ -121,8 +148,15 @@ def test_replace_range(mem_db):
|
|||||||
.execute(new_chunks)
|
.execute(new_chunks)
|
||||||
)
|
)
|
||||||
table.count_rows("doc_id = 1") # 1
|
table.count_rows("doc_id = 1") # 1
|
||||||
# --8<-- [end:replace_range]
|
res
|
||||||
|
# MergeResult(version=2, num_updated_rows=1,
|
||||||
|
# num_inserted_rows=0, num_deleted_rows=1)
|
||||||
|
# --8<-- [end:insert_if_not_exists]
|
||||||
assert table.count_rows("doc_id = 1") == 1
|
assert table.count_rows("doc_id = 1") == 1
|
||||||
|
assert res.version == 2
|
||||||
|
assert res.num_inserted_rows == 0
|
||||||
|
assert res.num_deleted_rows == 1
|
||||||
|
assert res.num_updated_rows == 1
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
@@ -141,7 +175,7 @@ async def test_replace_range_async(mem_db_async):
|
|||||||
new_chunks = [
|
new_chunks = [
|
||||||
{"doc_id": 1, "chunk_id": 0, "text": "Baz"},
|
{"doc_id": 1, "chunk_id": 0, "text": "Baz"},
|
||||||
]
|
]
|
||||||
await (
|
res = await (
|
||||||
table.merge_insert(["doc_id", "chunk_id"])
|
table.merge_insert(["doc_id", "chunk_id"])
|
||||||
.when_matched_update_all()
|
.when_matched_update_all()
|
||||||
.when_not_matched_insert_all()
|
.when_not_matched_insert_all()
|
||||||
@@ -149,5 +183,12 @@ async def test_replace_range_async(mem_db_async):
|
|||||||
.execute(new_chunks)
|
.execute(new_chunks)
|
||||||
)
|
)
|
||||||
await table.count_rows("doc_id = 1") # 1
|
await table.count_rows("doc_id = 1") # 1
|
||||||
# --8<-- [end:replace_range_async]
|
res
|
||||||
|
# MergeResult(version=2, num_updated_rows=1,
|
||||||
|
# num_inserted_rows=0, num_deleted_rows=1)
|
||||||
|
# --8<-- [end:insert_if_not_exists]
|
||||||
assert await table.count_rows("doc_id = 1") == 1
|
assert await table.count_rows("doc_id = 1") == 1
|
||||||
|
assert res.version == 2
|
||||||
|
assert res.num_inserted_rows == 0
|
||||||
|
assert res.num_deleted_rows == 1
|
||||||
|
assert res.num_updated_rows == 1
|
||||||
|
|||||||
@@ -149,6 +149,24 @@ async def test_async_checkout():
|
|||||||
assert await table.count_rows() == 300
|
assert await table.count_rows() == 300
|
||||||
|
|
||||||
|
|
||||||
|
def test_table_len_sync():
|
||||||
|
def handler(request):
|
||||||
|
if request.path == "/v1/table/test/create/?mode=create":
|
||||||
|
request.send_response(200)
|
||||||
|
request.send_header("Content-Type", "application/json")
|
||||||
|
request.end_headers()
|
||||||
|
request.wfile.write(b"{}")
|
||||||
|
|
||||||
|
request.send_response(200)
|
||||||
|
request.send_header("Content-Type", "application/json")
|
||||||
|
request.end_headers()
|
||||||
|
request.wfile.write(json.dumps(1).encode())
|
||||||
|
|
||||||
|
with mock_lancedb_connection(handler) as db:
|
||||||
|
table = db.create_table("test", [{"id": 1}])
|
||||||
|
assert len(table) == 1
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_http_error():
|
async def test_http_error():
|
||||||
request_id_holder = {"request_id": None}
|
request_id_holder = {"request_id": None}
|
||||||
@@ -389,6 +407,50 @@ def test_table_wait_for_index_timeout():
|
|||||||
table.wait_for_index(["id_idx"], timedelta(seconds=1))
|
table.wait_for_index(["id_idx"], timedelta(seconds=1))
|
||||||
|
|
||||||
|
|
||||||
|
def test_stats():
|
||||||
|
stats = {
|
||||||
|
"total_bytes": 38,
|
||||||
|
"num_rows": 2,
|
||||||
|
"num_indices": 0,
|
||||||
|
"fragment_stats": {
|
||||||
|
"num_fragments": 1,
|
||||||
|
"num_small_fragments": 1,
|
||||||
|
"lengths": {
|
||||||
|
"min": 2,
|
||||||
|
"max": 2,
|
||||||
|
"mean": 2,
|
||||||
|
"p25": 2,
|
||||||
|
"p50": 2,
|
||||||
|
"p75": 2,
|
||||||
|
"p99": 2,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
def handler(request):
|
||||||
|
if request.path == "/v1/table/test/create/?mode=create":
|
||||||
|
request.send_response(200)
|
||||||
|
request.send_header("Content-Type", "application/json")
|
||||||
|
request.end_headers()
|
||||||
|
request.wfile.write(b"{}")
|
||||||
|
elif request.path == "/v1/table/test/stats/":
|
||||||
|
request.send_response(200)
|
||||||
|
request.send_header("Content-Type", "application/json")
|
||||||
|
request.end_headers()
|
||||||
|
payload = json.dumps(stats)
|
||||||
|
request.wfile.write(payload.encode())
|
||||||
|
else:
|
||||||
|
print(request.path)
|
||||||
|
request.send_response(404)
|
||||||
|
request.end_headers()
|
||||||
|
|
||||||
|
with mock_lancedb_connection(handler) as db:
|
||||||
|
table = db.create_table("test", [{"id": 1}])
|
||||||
|
res = table.stats()
|
||||||
|
print(f"{res=}")
|
||||||
|
assert res == stats
|
||||||
|
|
||||||
|
|
||||||
@contextlib.contextmanager
|
@contextlib.contextmanager
|
||||||
def query_test_table(query_handler, *, server_version=Version("0.1.0")):
|
def query_test_table(query_handler, *, server_version=Version("0.1.0")):
|
||||||
def handler(request):
|
def handler(request):
|
||||||
|
|||||||
@@ -106,15 +106,22 @@ async def test_update_async(mem_db_async: AsyncConnection):
|
|||||||
table = await mem_db_async.create_table("some_table", data=[{"id": 0}])
|
table = await mem_db_async.create_table("some_table", data=[{"id": 0}])
|
||||||
assert await table.count_rows("id == 0") == 1
|
assert await table.count_rows("id == 0") == 1
|
||||||
assert await table.count_rows("id == 7") == 0
|
assert await table.count_rows("id == 7") == 0
|
||||||
await table.update({"id": 7})
|
update_res = await table.update({"id": 7})
|
||||||
|
assert update_res.rows_updated == 1
|
||||||
|
assert update_res.version == 2
|
||||||
assert await table.count_rows("id == 7") == 1
|
assert await table.count_rows("id == 7") == 1
|
||||||
assert await table.count_rows("id == 0") == 0
|
assert await table.count_rows("id == 0") == 0
|
||||||
await table.add([{"id": 2}])
|
add_res = await table.add([{"id": 2}])
|
||||||
await table.update(where="id % 2 == 0", updates_sql={"id": "5"})
|
assert add_res.version == 3
|
||||||
|
update_res = await table.update(where="id % 2 == 0", updates_sql={"id": "5"})
|
||||||
|
assert update_res.rows_updated == 1
|
||||||
|
assert update_res.version == 4
|
||||||
assert await table.count_rows("id == 7") == 1
|
assert await table.count_rows("id == 7") == 1
|
||||||
assert await table.count_rows("id == 2") == 0
|
assert await table.count_rows("id == 2") == 0
|
||||||
assert await table.count_rows("id == 5") == 1
|
assert await table.count_rows("id == 5") == 1
|
||||||
await table.update({"id": 10}, where="id == 5")
|
update_res = await table.update({"id": 10}, where="id == 5")
|
||||||
|
assert update_res.rows_updated == 1
|
||||||
|
assert update_res.version == 5
|
||||||
assert await table.count_rows("id == 10") == 1
|
assert await table.count_rows("id == 10") == 1
|
||||||
|
|
||||||
|
|
||||||
@@ -437,7 +444,8 @@ def test_add_pydantic_model(mem_db: DBConnection):
|
|||||||
content="foo", meta=Metadata(source="bar", timestamp=datetime.now())
|
content="foo", meta=Metadata(source="bar", timestamp=datetime.now())
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
tbl.add([expected])
|
add_res = tbl.add([expected])
|
||||||
|
assert add_res.version == 2
|
||||||
|
|
||||||
result = tbl.search([0.0, 0.0]).limit(1).to_pydantic(LanceSchema)[0]
|
result = tbl.search([0.0, 0.0]).limit(1).to_pydantic(LanceSchema)[0]
|
||||||
assert result == expected
|
assert result == expected
|
||||||
@@ -459,11 +467,12 @@ async def test_add_async(mem_db_async: AsyncConnection):
|
|||||||
],
|
],
|
||||||
)
|
)
|
||||||
assert await table.count_rows() == 2
|
assert await table.count_rows() == 2
|
||||||
await table.add(
|
add_res = await table.add(
|
||||||
data=[
|
data=[
|
||||||
{"vector": [10.0, 11.0], "item": "baz", "price": 30.0},
|
{"vector": [10.0, 11.0], "item": "baz", "price": 30.0},
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
assert add_res.version == 2
|
||||||
assert await table.count_rows() == 3
|
assert await table.count_rows() == 3
|
||||||
|
|
||||||
|
|
||||||
@@ -529,6 +538,113 @@ def test_versioning(mem_db: DBConnection):
|
|||||||
assert len(table) == 2
|
assert len(table) == 2
|
||||||
|
|
||||||
|
|
||||||
|
def test_tags(mem_db: DBConnection):
|
||||||
|
table = mem_db.create_table(
|
||||||
|
"test",
|
||||||
|
data=[
|
||||||
|
{"vector": [3.1, 4.1], "item": "foo", "price": 10.0},
|
||||||
|
{"vector": [5.9, 26.5], "item": "bar", "price": 20.0},
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
table.tags.create("tag1", 1)
|
||||||
|
tags = table.tags.list()
|
||||||
|
assert "tag1" in tags
|
||||||
|
assert tags["tag1"]["version"] == 1
|
||||||
|
|
||||||
|
table.add(
|
||||||
|
data=[
|
||||||
|
{"vector": [10.0, 11.0], "item": "baz", "price": 30.0},
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
table.tags.create("tag2", 2)
|
||||||
|
tags = table.tags.list()
|
||||||
|
assert "tag1" in tags
|
||||||
|
assert "tag2" in tags
|
||||||
|
assert tags["tag1"]["version"] == 1
|
||||||
|
assert tags["tag2"]["version"] == 2
|
||||||
|
|
||||||
|
table.tags.delete("tag2")
|
||||||
|
table.tags.update("tag1", 2)
|
||||||
|
tags = table.tags.list()
|
||||||
|
assert "tag1" in tags
|
||||||
|
assert tags["tag1"]["version"] == 2
|
||||||
|
|
||||||
|
table.tags.update("tag1", 1)
|
||||||
|
tags = table.tags.list()
|
||||||
|
assert "tag1" in tags
|
||||||
|
assert tags["tag1"]["version"] == 1
|
||||||
|
|
||||||
|
table.checkout("tag1")
|
||||||
|
assert table.version == 1
|
||||||
|
assert table.count_rows() == 2
|
||||||
|
table.tags.create("tag2", 2)
|
||||||
|
table.checkout("tag2")
|
||||||
|
assert table.version == 2
|
||||||
|
assert table.count_rows() == 3
|
||||||
|
table.checkout_latest()
|
||||||
|
table.add(
|
||||||
|
data=[
|
||||||
|
{"vector": [12.0, 13.0], "item": "baz", "price": 40.0},
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_async_tags(mem_db_async: AsyncConnection):
|
||||||
|
table = await mem_db_async.create_table(
|
||||||
|
"test",
|
||||||
|
data=[
|
||||||
|
{"vector": [3.1, 4.1], "item": "foo", "price": 10.0},
|
||||||
|
{"vector": [5.9, 26.5], "item": "bar", "price": 20.0},
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
await table.tags.create("tag1", 1)
|
||||||
|
tags = await table.tags.list()
|
||||||
|
assert "tag1" in tags
|
||||||
|
assert tags["tag1"]["version"] == 1
|
||||||
|
|
||||||
|
await table.add(
|
||||||
|
data=[
|
||||||
|
{"vector": [10.0, 11.0], "item": "baz", "price": 30.0},
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
await table.tags.create("tag2", 2)
|
||||||
|
tags = await table.tags.list()
|
||||||
|
assert "tag1" in tags
|
||||||
|
assert "tag2" in tags
|
||||||
|
assert tags["tag1"]["version"] == 1
|
||||||
|
assert tags["tag2"]["version"] == 2
|
||||||
|
|
||||||
|
await table.tags.delete("tag2")
|
||||||
|
await table.tags.update("tag1", 2)
|
||||||
|
tags = await table.tags.list()
|
||||||
|
assert "tag1" in tags
|
||||||
|
assert tags["tag1"]["version"] == 2
|
||||||
|
|
||||||
|
await table.tags.update("tag1", 1)
|
||||||
|
tags = await table.tags.list()
|
||||||
|
assert "tag1" in tags
|
||||||
|
assert tags["tag1"]["version"] == 1
|
||||||
|
|
||||||
|
await table.checkout("tag1")
|
||||||
|
assert await table.version() == 1
|
||||||
|
assert await table.count_rows() == 2
|
||||||
|
await table.tags.create("tag2", 2)
|
||||||
|
await table.checkout("tag2")
|
||||||
|
assert await table.version() == 2
|
||||||
|
assert await table.count_rows() == 3
|
||||||
|
await table.checkout_latest()
|
||||||
|
await table.add(
|
||||||
|
data=[
|
||||||
|
{"vector": [12.0, 13.0], "item": "baz", "price": 40.0},
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@patch("lancedb.table.AsyncTable.create_index")
|
@patch("lancedb.table.AsyncTable.create_index")
|
||||||
def test_create_index_method(mock_create_index, mem_db: DBConnection):
|
def test_create_index_method(mock_create_index, mem_db: DBConnection):
|
||||||
table = mem_db.create_table(
|
table = mem_db.create_table(
|
||||||
@@ -653,6 +769,29 @@ def test_restore(mem_db: DBConnection):
|
|||||||
table.restore(0)
|
table.restore(0)
|
||||||
|
|
||||||
|
|
||||||
|
def test_restore_with_tags(mem_db: DBConnection):
|
||||||
|
table = mem_db.create_table(
|
||||||
|
"my_table",
|
||||||
|
data=[{"vector": [1.1, 0.9], "type": "vector"}],
|
||||||
|
)
|
||||||
|
tag = "tag1"
|
||||||
|
table.tags.create(tag, 1)
|
||||||
|
table.add([{"vector": [0.5, 0.2], "type": "vector"}])
|
||||||
|
table.restore(tag)
|
||||||
|
assert len(table.list_versions()) == 3
|
||||||
|
assert len(table) == 1
|
||||||
|
expected = table.to_arrow()
|
||||||
|
|
||||||
|
table.add([{"vector": [0.3, 0.3], "type": "vector"}])
|
||||||
|
table.checkout("tag1")
|
||||||
|
table.restore()
|
||||||
|
assert len(table.list_versions()) == 5
|
||||||
|
assert table.to_arrow() == expected
|
||||||
|
|
||||||
|
with pytest.raises(ValueError):
|
||||||
|
table.restore("tag_unknown")
|
||||||
|
|
||||||
|
|
||||||
def test_merge(tmp_db: DBConnection, tmp_path):
|
def test_merge(tmp_db: DBConnection, tmp_path):
|
||||||
pytest.importorskip("lance")
|
pytest.importorskip("lance")
|
||||||
import lance
|
import lance
|
||||||
@@ -688,7 +827,8 @@ def test_delete(mem_db: DBConnection):
|
|||||||
)
|
)
|
||||||
assert len(table) == 2
|
assert len(table) == 2
|
||||||
assert len(table.list_versions()) == 1
|
assert len(table.list_versions()) == 1
|
||||||
table.delete("id=0")
|
delete_res = table.delete("id=0")
|
||||||
|
assert delete_res.version == 2
|
||||||
assert len(table.list_versions()) == 2
|
assert len(table.list_versions()) == 2
|
||||||
assert table.version == 2
|
assert table.version == 2
|
||||||
assert len(table) == 1
|
assert len(table) == 1
|
||||||
@@ -702,7 +842,9 @@ def test_update(mem_db: DBConnection):
|
|||||||
)
|
)
|
||||||
assert len(table) == 2
|
assert len(table) == 2
|
||||||
assert len(table.list_versions()) == 1
|
assert len(table.list_versions()) == 1
|
||||||
table.update(where="id=0", values={"vector": [1.1, 1.1]})
|
update_res = table.update(where="id=0", values={"vector": [1.1, 1.1]})
|
||||||
|
assert update_res.version == 2
|
||||||
|
assert update_res.rows_updated == 1
|
||||||
assert len(table.list_versions()) == 2
|
assert len(table.list_versions()) == 2
|
||||||
assert table.version == 2
|
assert table.version == 2
|
||||||
assert len(table) == 2
|
assert len(table) == 2
|
||||||
@@ -791,9 +933,16 @@ def test_merge_insert(mem_db: DBConnection):
|
|||||||
new_data = pa.table({"a": [2, 3, 4], "b": ["x", "y", "z"]})
|
new_data = pa.table({"a": [2, 3, 4], "b": ["x", "y", "z"]})
|
||||||
|
|
||||||
# upsert
|
# upsert
|
||||||
table.merge_insert(
|
merge_insert_res = (
|
||||||
"a"
|
table.merge_insert("a")
|
||||||
).when_matched_update_all().when_not_matched_insert_all().execute(new_data)
|
.when_matched_update_all()
|
||||||
|
.when_not_matched_insert_all()
|
||||||
|
.execute(new_data, timeout=timedelta(seconds=10))
|
||||||
|
)
|
||||||
|
assert merge_insert_res.version == 2
|
||||||
|
assert merge_insert_res.num_inserted_rows == 1
|
||||||
|
assert merge_insert_res.num_updated_rows == 2
|
||||||
|
assert merge_insert_res.num_deleted_rows == 0
|
||||||
|
|
||||||
expected = pa.table({"a": [1, 2, 3, 4], "b": ["a", "x", "y", "z"]})
|
expected = pa.table({"a": [1, 2, 3, 4], "b": ["a", "x", "y", "z"]})
|
||||||
assert table.to_arrow().sort_by("a") == expected
|
assert table.to_arrow().sort_by("a") == expected
|
||||||
@@ -801,17 +950,28 @@ def test_merge_insert(mem_db: DBConnection):
|
|||||||
table.restore(version)
|
table.restore(version)
|
||||||
|
|
||||||
# conditional update
|
# conditional update
|
||||||
table.merge_insert("a").when_matched_update_all(where="target.b = 'b'").execute(
|
merge_insert_res = (
|
||||||
new_data
|
table.merge_insert("a")
|
||||||
|
.when_matched_update_all(where="target.b = 'b'")
|
||||||
|
.execute(new_data)
|
||||||
)
|
)
|
||||||
|
assert merge_insert_res.version == 4
|
||||||
|
assert merge_insert_res.num_inserted_rows == 0
|
||||||
|
assert merge_insert_res.num_updated_rows == 1
|
||||||
|
assert merge_insert_res.num_deleted_rows == 0
|
||||||
expected = pa.table({"a": [1, 2, 3], "b": ["a", "x", "c"]})
|
expected = pa.table({"a": [1, 2, 3], "b": ["a", "x", "c"]})
|
||||||
assert table.to_arrow().sort_by("a") == expected
|
assert table.to_arrow().sort_by("a") == expected
|
||||||
|
|
||||||
table.restore(version)
|
table.restore(version)
|
||||||
|
|
||||||
# insert-if-not-exists
|
# insert-if-not-exists
|
||||||
table.merge_insert("a").when_not_matched_insert_all().execute(new_data)
|
merge_insert_res = (
|
||||||
|
table.merge_insert("a").when_not_matched_insert_all().execute(new_data)
|
||||||
|
)
|
||||||
|
assert merge_insert_res.version == 6
|
||||||
|
assert merge_insert_res.num_inserted_rows == 1
|
||||||
|
assert merge_insert_res.num_updated_rows == 0
|
||||||
|
assert merge_insert_res.num_deleted_rows == 0
|
||||||
expected = pa.table({"a": [1, 2, 3, 4], "b": ["a", "b", "c", "z"]})
|
expected = pa.table({"a": [1, 2, 3, 4], "b": ["a", "b", "c", "z"]})
|
||||||
assert table.to_arrow().sort_by("a") == expected
|
assert table.to_arrow().sort_by("a") == expected
|
||||||
|
|
||||||
@@ -820,13 +980,17 @@ def test_merge_insert(mem_db: DBConnection):
|
|||||||
new_data = pa.table({"a": [2, 4], "b": ["x", "z"]})
|
new_data = pa.table({"a": [2, 4], "b": ["x", "z"]})
|
||||||
|
|
||||||
# replace-range
|
# replace-range
|
||||||
(
|
merge_insert_res = (
|
||||||
table.merge_insert("a")
|
table.merge_insert("a")
|
||||||
.when_matched_update_all()
|
.when_matched_update_all()
|
||||||
.when_not_matched_insert_all()
|
.when_not_matched_insert_all()
|
||||||
.when_not_matched_by_source_delete("a > 2")
|
.when_not_matched_by_source_delete("a > 2")
|
||||||
.execute(new_data)
|
.execute(new_data)
|
||||||
)
|
)
|
||||||
|
assert merge_insert_res.version == 8
|
||||||
|
assert merge_insert_res.num_inserted_rows == 1
|
||||||
|
assert merge_insert_res.num_updated_rows == 1
|
||||||
|
assert merge_insert_res.num_deleted_rows == 1
|
||||||
|
|
||||||
expected = pa.table({"a": [1, 2, 4], "b": ["a", "x", "z"]})
|
expected = pa.table({"a": [1, 2, 4], "b": ["a", "x", "z"]})
|
||||||
assert table.to_arrow().sort_by("a") == expected
|
assert table.to_arrow().sort_by("a") == expected
|
||||||
@@ -834,15 +998,27 @@ def test_merge_insert(mem_db: DBConnection):
|
|||||||
table.restore(version)
|
table.restore(version)
|
||||||
|
|
||||||
# replace-range no condition
|
# replace-range no condition
|
||||||
table.merge_insert(
|
merge_insert_res = (
|
||||||
"a"
|
table.merge_insert("a")
|
||||||
).when_matched_update_all().when_not_matched_insert_all().when_not_matched_by_source_delete().execute(
|
.when_matched_update_all()
|
||||||
new_data
|
.when_not_matched_insert_all()
|
||||||
|
.when_not_matched_by_source_delete()
|
||||||
|
.execute(new_data)
|
||||||
)
|
)
|
||||||
|
assert merge_insert_res.version == 10
|
||||||
|
assert merge_insert_res.num_inserted_rows == 1
|
||||||
|
assert merge_insert_res.num_updated_rows == 1
|
||||||
|
assert merge_insert_res.num_deleted_rows == 2
|
||||||
|
|
||||||
expected = pa.table({"a": [2, 4], "b": ["x", "z"]})
|
expected = pa.table({"a": [2, 4], "b": ["x", "z"]})
|
||||||
assert table.to_arrow().sort_by("a") == expected
|
assert table.to_arrow().sort_by("a") == expected
|
||||||
|
|
||||||
|
# timeout
|
||||||
|
with pytest.raises(Exception, match="merge insert timed out"):
|
||||||
|
table.merge_insert("a").when_matched_update_all().execute(
|
||||||
|
new_data, timeout=timedelta(0)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
# We vary the data format because there are slight differences in how
|
# We vary the data format because there are slight differences in how
|
||||||
# subschemas are handled in different formats
|
# subschemas are handled in different formats
|
||||||
@@ -1371,11 +1547,13 @@ def test_restore_consistency(tmp_path):
|
|||||||
def test_add_columns(mem_db: DBConnection):
|
def test_add_columns(mem_db: DBConnection):
|
||||||
data = pa.table({"id": [0, 1]})
|
data = pa.table({"id": [0, 1]})
|
||||||
table = LanceTable.create(mem_db, "my_table", data=data)
|
table = LanceTable.create(mem_db, "my_table", data=data)
|
||||||
table.add_columns({"new_col": "id + 2"})
|
add_columns_res = table.add_columns({"new_col": "id + 2"})
|
||||||
|
assert add_columns_res.version == 2
|
||||||
assert table.to_arrow().column_names == ["id", "new_col"]
|
assert table.to_arrow().column_names == ["id", "new_col"]
|
||||||
assert table.to_arrow()["new_col"].to_pylist() == [2, 3]
|
assert table.to_arrow()["new_col"].to_pylist() == [2, 3]
|
||||||
|
|
||||||
table.add_columns({"null_int": "cast(null as bigint)"})
|
add_columns_res = table.add_columns({"null_int": "cast(null as bigint)"})
|
||||||
|
assert add_columns_res.version == 3
|
||||||
assert table.schema.field("null_int").type == pa.int64()
|
assert table.schema.field("null_int").type == pa.int64()
|
||||||
|
|
||||||
|
|
||||||
@@ -1383,7 +1561,8 @@ def test_add_columns(mem_db: DBConnection):
|
|||||||
async def test_add_columns_async(mem_db_async: AsyncConnection):
|
async def test_add_columns_async(mem_db_async: AsyncConnection):
|
||||||
data = pa.table({"id": [0, 1]})
|
data = pa.table({"id": [0, 1]})
|
||||||
table = await mem_db_async.create_table("my_table", data=data)
|
table = await mem_db_async.create_table("my_table", data=data)
|
||||||
await table.add_columns({"new_col": "id + 2"})
|
add_columns_res = await table.add_columns({"new_col": "id + 2"})
|
||||||
|
assert add_columns_res.version == 2
|
||||||
data = await table.to_arrow()
|
data = await table.to_arrow()
|
||||||
assert data.column_names == ["id", "new_col"]
|
assert data.column_names == ["id", "new_col"]
|
||||||
assert data["new_col"].to_pylist() == [2, 3]
|
assert data["new_col"].to_pylist() == [2, 3]
|
||||||
@@ -1393,9 +1572,10 @@ async def test_add_columns_async(mem_db_async: AsyncConnection):
|
|||||||
async def test_add_columns_with_schema(mem_db_async: AsyncConnection):
|
async def test_add_columns_with_schema(mem_db_async: AsyncConnection):
|
||||||
data = pa.table({"id": [0, 1]})
|
data = pa.table({"id": [0, 1]})
|
||||||
table = await mem_db_async.create_table("my_table", data=data)
|
table = await mem_db_async.create_table("my_table", data=data)
|
||||||
await table.add_columns(
|
add_columns_res = await table.add_columns(
|
||||||
[pa.field("x", pa.int64()), pa.field("vector", pa.list_(pa.float32(), 8))]
|
[pa.field("x", pa.int64()), pa.field("vector", pa.list_(pa.float32(), 8))]
|
||||||
)
|
)
|
||||||
|
assert add_columns_res.version == 2
|
||||||
|
|
||||||
assert await table.schema() == pa.schema(
|
assert await table.schema() == pa.schema(
|
||||||
[
|
[
|
||||||
@@ -1406,11 +1586,12 @@ async def test_add_columns_with_schema(mem_db_async: AsyncConnection):
|
|||||||
)
|
)
|
||||||
|
|
||||||
table = await mem_db_async.create_table("table2", data=data)
|
table = await mem_db_async.create_table("table2", data=data)
|
||||||
await table.add_columns(
|
add_columns_res = await table.add_columns(
|
||||||
pa.schema(
|
pa.schema(
|
||||||
[pa.field("y", pa.int64()), pa.field("emb", pa.list_(pa.float32(), 8))]
|
[pa.field("y", pa.int64()), pa.field("emb", pa.list_(pa.float32(), 8))]
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
assert add_columns_res.version == 2
|
||||||
assert await table.schema() == pa.schema(
|
assert await table.schema() == pa.schema(
|
||||||
[
|
[
|
||||||
pa.field("id", pa.int64()),
|
pa.field("id", pa.int64()),
|
||||||
@@ -1423,7 +1604,8 @@ async def test_add_columns_with_schema(mem_db_async: AsyncConnection):
|
|||||||
def test_alter_columns(mem_db: DBConnection):
|
def test_alter_columns(mem_db: DBConnection):
|
||||||
data = pa.table({"id": [0, 1]})
|
data = pa.table({"id": [0, 1]})
|
||||||
table = mem_db.create_table("my_table", data=data)
|
table = mem_db.create_table("my_table", data=data)
|
||||||
table.alter_columns({"path": "id", "rename": "new_id"})
|
alter_columns_res = table.alter_columns({"path": "id", "rename": "new_id"})
|
||||||
|
assert alter_columns_res.version == 2
|
||||||
assert table.to_arrow().column_names == ["new_id"]
|
assert table.to_arrow().column_names == ["new_id"]
|
||||||
|
|
||||||
|
|
||||||
@@ -1431,9 +1613,13 @@ def test_alter_columns(mem_db: DBConnection):
|
|||||||
async def test_alter_columns_async(mem_db_async: AsyncConnection):
|
async def test_alter_columns_async(mem_db_async: AsyncConnection):
|
||||||
data = pa.table({"id": [0, 1]})
|
data = pa.table({"id": [0, 1]})
|
||||||
table = await mem_db_async.create_table("my_table", data=data)
|
table = await mem_db_async.create_table("my_table", data=data)
|
||||||
await table.alter_columns({"path": "id", "rename": "new_id"})
|
alter_columns_res = await table.alter_columns({"path": "id", "rename": "new_id"})
|
||||||
|
assert alter_columns_res.version == 2
|
||||||
assert (await table.to_arrow()).column_names == ["new_id"]
|
assert (await table.to_arrow()).column_names == ["new_id"]
|
||||||
await table.alter_columns(dict(path="new_id", data_type=pa.int16(), nullable=True))
|
alter_columns_res = await table.alter_columns(
|
||||||
|
dict(path="new_id", data_type=pa.int16(), nullable=True)
|
||||||
|
)
|
||||||
|
assert alter_columns_res.version == 3
|
||||||
data = await table.to_arrow()
|
data = await table.to_arrow()
|
||||||
assert data.column(0).type == pa.int16()
|
assert data.column(0).type == pa.int16()
|
||||||
assert data.schema.field(0).nullable
|
assert data.schema.field(0).nullable
|
||||||
@@ -1442,7 +1628,8 @@ async def test_alter_columns_async(mem_db_async: AsyncConnection):
|
|||||||
def test_drop_columns(mem_db: DBConnection):
|
def test_drop_columns(mem_db: DBConnection):
|
||||||
data = pa.table({"id": [0, 1], "category": ["a", "b"]})
|
data = pa.table({"id": [0, 1], "category": ["a", "b"]})
|
||||||
table = mem_db.create_table("my_table", data=data)
|
table = mem_db.create_table("my_table", data=data)
|
||||||
table.drop_columns(["category"])
|
drop_columns_res = table.drop_columns(["category"])
|
||||||
|
assert drop_columns_res.version == 2
|
||||||
assert table.to_arrow().column_names == ["id"]
|
assert table.to_arrow().column_names == ["id"]
|
||||||
|
|
||||||
|
|
||||||
@@ -1450,7 +1637,8 @@ def test_drop_columns(mem_db: DBConnection):
|
|||||||
async def test_drop_columns_async(mem_db_async: AsyncConnection):
|
async def test_drop_columns_async(mem_db_async: AsyncConnection):
|
||||||
data = pa.table({"id": [0, 1], "category": ["a", "b"]})
|
data = pa.table({"id": [0, 1], "category": ["a", "b"]})
|
||||||
table = await mem_db_async.create_table("my_table", data=data)
|
table = await mem_db_async.create_table("my_table", data=data)
|
||||||
await table.drop_columns(["category"])
|
drop_columns_res = await table.drop_columns(["category"])
|
||||||
|
assert drop_columns_res.version == 2
|
||||||
assert (await table.to_arrow()).column_names == ["id"]
|
assert (await table.to_arrow()).column_names == ["id"]
|
||||||
|
|
||||||
|
|
||||||
@@ -1588,3 +1776,31 @@ def test_replace_field_metadata(tmp_path):
|
|||||||
schema = table.schema
|
schema = table.schema
|
||||||
field = schema[0].metadata
|
field = schema[0].metadata
|
||||||
assert field == {b"foo": b"bar"}
|
assert field == {b"foo": b"bar"}
|
||||||
|
|
||||||
|
|
||||||
|
def test_stats(mem_db: DBConnection):
|
||||||
|
table = mem_db.create_table(
|
||||||
|
"my_table",
|
||||||
|
data=[{"text": "foo", "id": 0}, {"text": "bar", "id": 1}],
|
||||||
|
)
|
||||||
|
assert len(table) == 2
|
||||||
|
stats = table.stats()
|
||||||
|
print(f"{stats=}")
|
||||||
|
assert stats == {
|
||||||
|
"total_bytes": 38,
|
||||||
|
"num_rows": 2,
|
||||||
|
"num_indices": 0,
|
||||||
|
"fragment_stats": {
|
||||||
|
"num_fragments": 1,
|
||||||
|
"num_small_fragments": 1,
|
||||||
|
"lengths": {
|
||||||
|
"min": 2,
|
||||||
|
"max": 2,
|
||||||
|
"mean": 2,
|
||||||
|
"p25": 2,
|
||||||
|
"p50": 2,
|
||||||
|
"p75": 2,
|
||||||
|
"p99": 2,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|||||||
@@ -11,7 +11,10 @@ use pyo3::{
|
|||||||
wrap_pyfunction, Bound, PyResult, Python,
|
wrap_pyfunction, Bound, PyResult, Python,
|
||||||
};
|
};
|
||||||
use query::{FTSQuery, HybridQuery, Query, VectorQuery};
|
use query::{FTSQuery, HybridQuery, Query, VectorQuery};
|
||||||
use table::Table;
|
use table::{
|
||||||
|
AddColumnsResult, AddResult, AlterColumnsResult, DeleteResult, DropColumnsResult, MergeResult,
|
||||||
|
Table, UpdateResult,
|
||||||
|
};
|
||||||
|
|
||||||
pub mod arrow;
|
pub mod arrow;
|
||||||
pub mod connection;
|
pub mod connection;
|
||||||
@@ -35,6 +38,13 @@ pub fn _lancedb(_py: Python, m: &Bound<'_, PyModule>) -> PyResult<()> {
|
|||||||
m.add_class::<HybridQuery>()?;
|
m.add_class::<HybridQuery>()?;
|
||||||
m.add_class::<VectorQuery>()?;
|
m.add_class::<VectorQuery>()?;
|
||||||
m.add_class::<RecordBatchStream>()?;
|
m.add_class::<RecordBatchStream>()?;
|
||||||
|
m.add_class::<AddColumnsResult>()?;
|
||||||
|
m.add_class::<AlterColumnsResult>()?;
|
||||||
|
m.add_class::<AddResult>()?;
|
||||||
|
m.add_class::<MergeResult>()?;
|
||||||
|
m.add_class::<DeleteResult>()?;
|
||||||
|
m.add_class::<DropColumnsResult>()?;
|
||||||
|
m.add_class::<UpdateResult>()?;
|
||||||
m.add_function(wrap_pyfunction!(connect, m)?)?;
|
m.add_function(wrap_pyfunction!(connect, m)?)?;
|
||||||
m.add_function(wrap_pyfunction!(util::validate_table_name, m)?)?;
|
m.add_function(wrap_pyfunction!(util::validate_table_name, m)?)?;
|
||||||
m.add("__version__", env!("CARGO_PKG_VERSION"))?;
|
m.add("__version__", env!("CARGO_PKG_VERSION"))?;
|
||||||
|
|||||||
@@ -2,6 +2,11 @@
|
|||||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||||
use std::{collections::HashMap, sync::Arc};
|
use std::{collections::HashMap, sync::Arc};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
error::PythonErrorExt,
|
||||||
|
index::{extract_index_params, IndexConfig},
|
||||||
|
query::Query,
|
||||||
|
};
|
||||||
use arrow::{
|
use arrow::{
|
||||||
datatypes::{DataType, Schema},
|
datatypes::{DataType, Schema},
|
||||||
ffi_stream::ArrowArrayStreamReader,
|
ffi_stream::ArrowArrayStreamReader,
|
||||||
@@ -19,12 +24,6 @@ use pyo3::{
|
|||||||
};
|
};
|
||||||
use pyo3_async_runtimes::tokio::future_into_py;
|
use pyo3_async_runtimes::tokio::future_into_py;
|
||||||
|
|
||||||
use crate::{
|
|
||||||
error::PythonErrorExt,
|
|
||||||
index::{extract_index_params, IndexConfig},
|
|
||||||
query::Query,
|
|
||||||
};
|
|
||||||
|
|
||||||
/// Statistics about a compaction operation.
|
/// Statistics about a compaction operation.
|
||||||
#[pyclass(get_all)]
|
#[pyclass(get_all)]
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
@@ -59,6 +58,170 @@ pub struct OptimizeStats {
|
|||||||
pub prune: RemovalStats,
|
pub prune: RemovalStats,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[pyclass(get_all)]
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct UpdateResult {
|
||||||
|
pub rows_updated: u64,
|
||||||
|
pub version: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[pymethods]
|
||||||
|
impl UpdateResult {
|
||||||
|
pub fn __repr__(&self) -> String {
|
||||||
|
format!(
|
||||||
|
"UpdateResult(rows_updated={}, version={})",
|
||||||
|
self.rows_updated, self.version
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<lancedb::table::UpdateResult> for UpdateResult {
|
||||||
|
fn from(result: lancedb::table::UpdateResult) -> Self {
|
||||||
|
Self {
|
||||||
|
rows_updated: result.rows_updated,
|
||||||
|
version: result.version,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[pyclass(get_all)]
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct AddResult {
|
||||||
|
pub version: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[pymethods]
|
||||||
|
impl AddResult {
|
||||||
|
pub fn __repr__(&self) -> String {
|
||||||
|
format!("AddResult(version={})", self.version)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<lancedb::table::AddResult> for AddResult {
|
||||||
|
fn from(result: lancedb::table::AddResult) -> Self {
|
||||||
|
Self {
|
||||||
|
version: result.version,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[pyclass(get_all)]
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct DeleteResult {
|
||||||
|
pub version: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[pymethods]
|
||||||
|
impl DeleteResult {
|
||||||
|
pub fn __repr__(&self) -> String {
|
||||||
|
format!("DeleteResult(version={})", self.version)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<lancedb::table::DeleteResult> for DeleteResult {
|
||||||
|
fn from(result: lancedb::table::DeleteResult) -> Self {
|
||||||
|
Self {
|
||||||
|
version: result.version,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[pyclass(get_all)]
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct MergeResult {
|
||||||
|
pub version: u64,
|
||||||
|
pub num_updated_rows: u64,
|
||||||
|
pub num_inserted_rows: u64,
|
||||||
|
pub num_deleted_rows: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[pymethods]
|
||||||
|
impl MergeResult {
|
||||||
|
pub fn __repr__(&self) -> String {
|
||||||
|
format!(
|
||||||
|
"MergeResult(version={}, num_updated_rows={}, num_inserted_rows={}, num_deleted_rows={})",
|
||||||
|
self.version,
|
||||||
|
self.num_updated_rows,
|
||||||
|
self.num_inserted_rows,
|
||||||
|
self.num_deleted_rows
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<lancedb::table::MergeResult> for MergeResult {
|
||||||
|
fn from(result: lancedb::table::MergeResult) -> Self {
|
||||||
|
Self {
|
||||||
|
version: result.version,
|
||||||
|
num_updated_rows: result.num_updated_rows,
|
||||||
|
num_inserted_rows: result.num_inserted_rows,
|
||||||
|
num_deleted_rows: result.num_deleted_rows,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[pyclass(get_all)]
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct AddColumnsResult {
|
||||||
|
pub version: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[pymethods]
|
||||||
|
impl AddColumnsResult {
|
||||||
|
pub fn __repr__(&self) -> String {
|
||||||
|
format!("AddColumnsResult(version={})", self.version)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<lancedb::table::AddColumnsResult> for AddColumnsResult {
|
||||||
|
fn from(result: lancedb::table::AddColumnsResult) -> Self {
|
||||||
|
Self {
|
||||||
|
version: result.version,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[pyclass(get_all)]
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct AlterColumnsResult {
|
||||||
|
pub version: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[pymethods]
|
||||||
|
impl AlterColumnsResult {
|
||||||
|
pub fn __repr__(&self) -> String {
|
||||||
|
format!("AlterColumnsResult(version={})", self.version)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<lancedb::table::AlterColumnsResult> for AlterColumnsResult {
|
||||||
|
fn from(result: lancedb::table::AlterColumnsResult) -> Self {
|
||||||
|
Self {
|
||||||
|
version: result.version,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[pyclass(get_all)]
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct DropColumnsResult {
|
||||||
|
pub version: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[pymethods]
|
||||||
|
impl DropColumnsResult {
|
||||||
|
pub fn __repr__(&self) -> String {
|
||||||
|
format!("DropColumnsResult(version={})", self.version)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<lancedb::table::DropColumnsResult> for DropColumnsResult {
|
||||||
|
fn from(result: lancedb::table::DropColumnsResult) -> Self {
|
||||||
|
Self {
|
||||||
|
version: result.version,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[pyclass]
|
#[pyclass]
|
||||||
pub struct Table {
|
pub struct Table {
|
||||||
// We keep a copy of the name to use if the inner table is dropped
|
// We keep a copy of the name to use if the inner table is dropped
|
||||||
@@ -133,15 +296,16 @@ impl Table {
|
|||||||
}
|
}
|
||||||
|
|
||||||
future_into_py(self_.py(), async move {
|
future_into_py(self_.py(), async move {
|
||||||
op.execute().await.infer_error()?;
|
let result = op.execute().await.infer_error()?;
|
||||||
Ok(())
|
Ok(AddResult::from(result))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn delete(self_: PyRef<'_, Self>, condition: String) -> PyResult<Bound<'_, PyAny>> {
|
pub fn delete(self_: PyRef<'_, Self>, condition: String) -> PyResult<Bound<'_, PyAny>> {
|
||||||
let inner = self_.inner_ref()?.clone();
|
let inner = self_.inner_ref()?.clone();
|
||||||
future_into_py(self_.py(), async move {
|
future_into_py(self_.py(), async move {
|
||||||
inner.delete(&condition).await.infer_error()
|
let result = inner.delete(&condition).await.infer_error()?;
|
||||||
|
Ok(DeleteResult::from(result))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -161,8 +325,8 @@ impl Table {
|
|||||||
op = op.column(column_name, value);
|
op = op.column(column_name, value);
|
||||||
}
|
}
|
||||||
future_into_py(self_.py(), async move {
|
future_into_py(self_.py(), async move {
|
||||||
op.execute().await.infer_error()?;
|
let result = op.execute().await.infer_error()?;
|
||||||
Ok(())
|
Ok(UpdateResult::from(result))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -280,6 +444,40 @@ impl Table {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn stats(self_: PyRef<'_, Self>) -> PyResult<Bound<'_, PyAny>> {
|
||||||
|
let inner = self_.inner_ref()?.clone();
|
||||||
|
future_into_py(self_.py(), async move {
|
||||||
|
let stats = inner.stats().await.infer_error()?;
|
||||||
|
Python::with_gil(|py| {
|
||||||
|
let dict = PyDict::new(py);
|
||||||
|
dict.set_item("total_bytes", stats.total_bytes)?;
|
||||||
|
dict.set_item("num_rows", stats.num_rows)?;
|
||||||
|
dict.set_item("num_indices", stats.num_indices)?;
|
||||||
|
|
||||||
|
let fragment_stats = PyDict::new(py);
|
||||||
|
fragment_stats.set_item("num_fragments", stats.fragment_stats.num_fragments)?;
|
||||||
|
fragment_stats.set_item(
|
||||||
|
"num_small_fragments",
|
||||||
|
stats.fragment_stats.num_small_fragments,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
let fragment_lengths = PyDict::new(py);
|
||||||
|
fragment_lengths.set_item("min", stats.fragment_stats.lengths.min)?;
|
||||||
|
fragment_lengths.set_item("max", stats.fragment_stats.lengths.max)?;
|
||||||
|
fragment_lengths.set_item("mean", stats.fragment_stats.lengths.mean)?;
|
||||||
|
fragment_lengths.set_item("p25", stats.fragment_stats.lengths.p25)?;
|
||||||
|
fragment_lengths.set_item("p50", stats.fragment_stats.lengths.p50)?;
|
||||||
|
fragment_lengths.set_item("p75", stats.fragment_stats.lengths.p75)?;
|
||||||
|
fragment_lengths.set_item("p99", stats.fragment_stats.lengths.p99)?;
|
||||||
|
|
||||||
|
fragment_stats.set_item("lengths", fragment_lengths)?;
|
||||||
|
dict.set_item("fragment_stats", fragment_stats)?;
|
||||||
|
|
||||||
|
Ok(Some(dict.unbind()))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
pub fn __repr__(&self) -> String {
|
pub fn __repr__(&self) -> String {
|
||||||
match &self.inner {
|
match &self.inner {
|
||||||
None => format!("ClosedTable({})", self.name),
|
None => format!("ClosedTable({})", self.name),
|
||||||
@@ -322,10 +520,16 @@ impl Table {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn checkout(self_: PyRef<'_, Self>, version: u64) -> PyResult<Bound<'_, PyAny>> {
|
pub fn checkout(self_: PyRef<'_, Self>, version: LanceVersion) -> PyResult<Bound<'_, PyAny>> {
|
||||||
let inner = self_.inner_ref()?.clone();
|
let inner = self_.inner_ref()?.clone();
|
||||||
future_into_py(self_.py(), async move {
|
let py = self_.py();
|
||||||
inner.checkout(version).await.infer_error()
|
future_into_py(py, async move {
|
||||||
|
match version {
|
||||||
|
LanceVersion::Version(version_num) => {
|
||||||
|
inner.checkout(version_num).await.infer_error()
|
||||||
|
}
|
||||||
|
LanceVersion::Tag(tag) => inner.checkout_tag(&tag).await.infer_error(),
|
||||||
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -337,12 +541,19 @@ impl Table {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[pyo3(signature = (version=None))]
|
#[pyo3(signature = (version=None))]
|
||||||
pub fn restore(self_: PyRef<'_, Self>, version: Option<u64>) -> PyResult<Bound<'_, PyAny>> {
|
pub fn restore(
|
||||||
|
self_: PyRef<'_, Self>,
|
||||||
|
version: Option<LanceVersion>,
|
||||||
|
) -> PyResult<Bound<'_, PyAny>> {
|
||||||
let inner = self_.inner_ref()?.clone();
|
let inner = self_.inner_ref()?.clone();
|
||||||
|
let py = self_.py();
|
||||||
|
|
||||||
future_into_py(self_.py(), async move {
|
future_into_py(py, async move {
|
||||||
if let Some(version) = version {
|
if let Some(version) = version {
|
||||||
inner.checkout(version).await.infer_error()?;
|
match version {
|
||||||
|
LanceVersion::Version(num) => inner.checkout(num).await.infer_error()?,
|
||||||
|
LanceVersion::Tag(tag) => inner.checkout_tag(&tag).await.infer_error()?,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
inner.restore().await.infer_error()
|
inner.restore().await.infer_error()
|
||||||
})
|
})
|
||||||
@@ -352,6 +563,11 @@ impl Table {
|
|||||||
Query::new(self.inner_ref().unwrap().query())
|
Query::new(self.inner_ref().unwrap().query())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[getter]
|
||||||
|
pub fn tags(&self) -> PyResult<Tags> {
|
||||||
|
Ok(Tags::new(self.inner_ref()?.clone()))
|
||||||
|
}
|
||||||
|
|
||||||
/// Optimize the on-disk data by compacting and pruning old data, for better performance.
|
/// Optimize the on-disk data by compacting and pruning old data, for better performance.
|
||||||
#[pyo3(signature = (cleanup_since_ms=None, delete_unverified=None, retrain=None))]
|
#[pyo3(signature = (cleanup_since_ms=None, delete_unverified=None, retrain=None))]
|
||||||
pub fn optimize(
|
pub fn optimize(
|
||||||
@@ -433,10 +649,13 @@ impl Table {
|
|||||||
builder
|
builder
|
||||||
.when_not_matched_by_source_delete(parameters.when_not_matched_by_source_condition);
|
.when_not_matched_by_source_delete(parameters.when_not_matched_by_source_condition);
|
||||||
}
|
}
|
||||||
|
if let Some(timeout) = parameters.timeout {
|
||||||
|
builder.timeout(timeout);
|
||||||
|
}
|
||||||
|
|
||||||
future_into_py(self_.py(), async move {
|
future_into_py(self_.py(), async move {
|
||||||
builder.execute(Box::new(batches)).await.infer_error()?;
|
let res = builder.execute(Box::new(batches)).await.infer_error()?;
|
||||||
Ok(())
|
Ok(MergeResult::from(res))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -472,8 +691,8 @@ impl Table {
|
|||||||
|
|
||||||
let inner = self_.inner_ref()?.clone();
|
let inner = self_.inner_ref()?.clone();
|
||||||
future_into_py(self_.py(), async move {
|
future_into_py(self_.py(), async move {
|
||||||
inner.add_columns(definitions, None).await.infer_error()?;
|
let result = inner.add_columns(definitions, None).await.infer_error()?;
|
||||||
Ok(())
|
Ok(AddColumnsResult::from(result))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -486,8 +705,8 @@ impl Table {
|
|||||||
|
|
||||||
let inner = self_.inner_ref()?.clone();
|
let inner = self_.inner_ref()?.clone();
|
||||||
future_into_py(self_.py(), async move {
|
future_into_py(self_.py(), async move {
|
||||||
inner.add_columns(transform, None).await.infer_error()?;
|
let result = inner.add_columns(transform, None).await.infer_error()?;
|
||||||
Ok(())
|
Ok(AddColumnsResult::from(result))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -530,8 +749,8 @@ impl Table {
|
|||||||
|
|
||||||
let inner = self_.inner_ref()?.clone();
|
let inner = self_.inner_ref()?.clone();
|
||||||
future_into_py(self_.py(), async move {
|
future_into_py(self_.py(), async move {
|
||||||
inner.alter_columns(&alterations).await.infer_error()?;
|
let result = inner.alter_columns(&alterations).await.infer_error()?;
|
||||||
Ok(())
|
Ok(AlterColumnsResult::from(result))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -539,8 +758,8 @@ impl Table {
|
|||||||
let inner = self_.inner_ref()?.clone();
|
let inner = self_.inner_ref()?.clone();
|
||||||
future_into_py(self_.py(), async move {
|
future_into_py(self_.py(), async move {
|
||||||
let column_refs = columns.iter().map(String::as_str).collect::<Vec<&str>>();
|
let column_refs = columns.iter().map(String::as_str).collect::<Vec<&str>>();
|
||||||
inner.drop_columns(&column_refs).await.infer_error()?;
|
let result = inner.drop_columns(&column_refs).await.infer_error()?;
|
||||||
Ok(())
|
Ok(DropColumnsResult::from(result))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -576,6 +795,12 @@ impl Table {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(FromPyObject)]
|
||||||
|
pub enum LanceVersion {
|
||||||
|
Version(u64),
|
||||||
|
Tag(String),
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(FromPyObject)]
|
#[derive(FromPyObject)]
|
||||||
#[pyo3(from_item_all)]
|
#[pyo3(from_item_all)]
|
||||||
pub struct MergeInsertParams {
|
pub struct MergeInsertParams {
|
||||||
@@ -585,4 +810,74 @@ pub struct MergeInsertParams {
|
|||||||
when_not_matched_insert_all: bool,
|
when_not_matched_insert_all: bool,
|
||||||
when_not_matched_by_source_delete: bool,
|
when_not_matched_by_source_delete: bool,
|
||||||
when_not_matched_by_source_condition: Option<String>,
|
when_not_matched_by_source_condition: Option<String>,
|
||||||
|
timeout: Option<std::time::Duration>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[pyclass]
|
||||||
|
pub struct Tags {
|
||||||
|
inner: LanceDbTable,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Tags {
|
||||||
|
pub fn new(table: LanceDbTable) -> Self {
|
||||||
|
Self { inner: table }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[pymethods]
|
||||||
|
impl Tags {
|
||||||
|
pub fn list(self_: PyRef<'_, Self>) -> PyResult<Bound<'_, PyAny>> {
|
||||||
|
let inner = self_.inner.clone();
|
||||||
|
future_into_py(self_.py(), async move {
|
||||||
|
let tags = inner.tags().await.infer_error()?;
|
||||||
|
let res = tags.list().await.infer_error()?;
|
||||||
|
|
||||||
|
Python::with_gil(|py| {
|
||||||
|
let py_dict = PyDict::new(py);
|
||||||
|
for (key, contents) in res {
|
||||||
|
let value_dict = PyDict::new(py);
|
||||||
|
value_dict.set_item("version", contents.version)?;
|
||||||
|
value_dict.set_item("manifest_size", contents.manifest_size)?;
|
||||||
|
py_dict.set_item(key, value_dict)?;
|
||||||
|
}
|
||||||
|
Ok(py_dict.unbind())
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_version(self_: PyRef<'_, Self>, tag: String) -> PyResult<Bound<'_, PyAny>> {
|
||||||
|
let inner = self_.inner.clone();
|
||||||
|
future_into_py(self_.py(), async move {
|
||||||
|
let tags = inner.tags().await.infer_error()?;
|
||||||
|
let res = tags.get_version(tag.as_str()).await.infer_error()?;
|
||||||
|
Ok(res)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn create(self_: PyRef<Self>, tag: String, version: u64) -> PyResult<Bound<PyAny>> {
|
||||||
|
let inner = self_.inner.clone();
|
||||||
|
future_into_py(self_.py(), async move {
|
||||||
|
let mut tags = inner.tags().await.infer_error()?;
|
||||||
|
tags.create(tag.as_str(), version).await.infer_error()?;
|
||||||
|
Ok(())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn delete(self_: PyRef<Self>, tag: String) -> PyResult<Bound<PyAny>> {
|
||||||
|
let inner = self_.inner.clone();
|
||||||
|
future_into_py(self_.py(), async move {
|
||||||
|
let mut tags = inner.tags().await.infer_error()?;
|
||||||
|
tags.delete(tag.as_str()).await.infer_error()?;
|
||||||
|
Ok(())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn update(self_: PyRef<Self>, tag: String, version: u64) -> PyResult<Bound<PyAny>> {
|
||||||
|
let inner = self_.inner.clone();
|
||||||
|
future_into_py(self_.py(), async move {
|
||||||
|
let mut tags = inner.tags().await.infer_error()?;
|
||||||
|
tags.update(tag.as_str(), version).await.infer_error()?;
|
||||||
|
Ok(())
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "lancedb-node"
|
name = "lancedb-node"
|
||||||
version = "0.19.0-beta.11"
|
version = "0.19.1-beta.3"
|
||||||
description = "Serverless, low-latency vector database for AI applications"
|
description = "Serverless, low-latency vector database for AI applications"
|
||||||
license.workspace = true
|
license.workspace = true
|
||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "lancedb"
|
name = "lancedb"
|
||||||
version = "0.19.0-beta.11"
|
version = "0.19.1-beta.3"
|
||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
description = "LanceDB: A serverless, low-latency vector database for AI applications"
|
description = "LanceDB: A serverless, low-latency vector database for AI applications"
|
||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -14,7 +14,7 @@ use datafusion_physical_plan::projection::ProjectionExec;
|
|||||||
use datafusion_physical_plan::repartition::RepartitionExec;
|
use datafusion_physical_plan::repartition::RepartitionExec;
|
||||||
use datafusion_physical_plan::union::UnionExec;
|
use datafusion_physical_plan::union::UnionExec;
|
||||||
use datafusion_physical_plan::ExecutionPlan;
|
use datafusion_physical_plan::ExecutionPlan;
|
||||||
use futures::{StreamExt, TryStreamExt};
|
use futures::{FutureExt, StreamExt, TryFutureExt, TryStreamExt};
|
||||||
use lance::dataset::builder::DatasetBuilder;
|
use lance::dataset::builder::DatasetBuilder;
|
||||||
use lance::dataset::cleanup::RemovalStats;
|
use lance::dataset::cleanup::RemovalStats;
|
||||||
use lance::dataset::optimize::{compact_files, CompactionMetrics, IndexRemapperOptions};
|
use lance::dataset::optimize::{compact_files, CompactionMetrics, IndexRemapperOptions};
|
||||||
@@ -80,9 +80,13 @@ pub mod merge;
|
|||||||
|
|
||||||
use crate::index::waiter::wait_for_index;
|
use crate::index::waiter::wait_for_index;
|
||||||
pub use chrono::Duration;
|
pub use chrono::Duration;
|
||||||
|
use futures::future::{join_all, Either};
|
||||||
pub use lance::dataset::optimize::CompactionOptions;
|
pub use lance::dataset::optimize::CompactionOptions;
|
||||||
|
pub use lance::dataset::refs::{TagContents, Tags as LanceTags};
|
||||||
pub use lance::dataset::scanner::DatasetRecordBatchStream;
|
pub use lance::dataset::scanner::DatasetRecordBatchStream;
|
||||||
|
use lance::dataset::statistics::DatasetStatisticsExt;
|
||||||
pub use lance_index::optimize::OptimizeOptions;
|
pub use lance_index::optimize::OptimizeOptions;
|
||||||
|
use serde_with::skip_serializing_none;
|
||||||
|
|
||||||
/// Defines the type of column
|
/// Defines the type of column
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
@@ -307,7 +311,7 @@ impl<T: IntoArrow> AddDataBuilder<T> {
|
|||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn execute(self) -> Result<()> {
|
pub async fn execute(self) -> Result<AddResult> {
|
||||||
let parent = self.parent.clone();
|
let parent = self.parent.clone();
|
||||||
let data = self.data.into_arrow()?;
|
let data = self.data.into_arrow()?;
|
||||||
let without_data = AddDataBuilder::<NoData> {
|
let without_data = AddDataBuilder::<NoData> {
|
||||||
@@ -375,8 +379,8 @@ impl UpdateBuilder {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Executes the update operation.
|
/// Executes the update operation.
|
||||||
/// Returns the number of rows that were updated.
|
/// Returns the update result
|
||||||
pub async fn execute(self) -> Result<u64> {
|
pub async fn execute(self) -> Result<UpdateResult> {
|
||||||
if self.columns.is_empty() {
|
if self.columns.is_empty() {
|
||||||
Err(Error::InvalidInput {
|
Err(Error::InvalidInput {
|
||||||
message: "at least one column must be specified in an update operation".to_string(),
|
message: "at least one column must be specified in an update operation".to_string(),
|
||||||
@@ -401,6 +405,100 @@ pub enum AnyQuery {
|
|||||||
VectorQuery(VectorQueryRequest),
|
VectorQuery(VectorQueryRequest),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
pub trait Tags: Send + Sync {
|
||||||
|
/// List the tags of the table.
|
||||||
|
async fn list(&self) -> Result<HashMap<String, TagContents>>;
|
||||||
|
|
||||||
|
/// Get the version of the table referenced by a tag.
|
||||||
|
async fn get_version(&self, tag: &str) -> Result<u64>;
|
||||||
|
|
||||||
|
/// Create a new tag for the given version of the table.
|
||||||
|
async fn create(&mut self, tag: &str, version: u64) -> Result<()>;
|
||||||
|
|
||||||
|
/// Delete a tag from the table.
|
||||||
|
async fn delete(&mut self, tag: &str) -> Result<()>;
|
||||||
|
|
||||||
|
/// Update an existing tag to point to a new version of the table.
|
||||||
|
async fn update(&mut self, tag: &str, version: u64) -> Result<()>;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||||
|
pub struct UpdateResult {
|
||||||
|
#[serde(default)]
|
||||||
|
pub rows_updated: u64,
|
||||||
|
// The commit version associated with the operation.
|
||||||
|
// A version of `0` indicates compatibility with legacy servers that do not return
|
||||||
|
/// a commit version.
|
||||||
|
#[serde(default)]
|
||||||
|
pub version: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||||
|
pub struct AddResult {
|
||||||
|
// The commit version associated with the operation.
|
||||||
|
// A version of `0` indicates compatibility with legacy servers that do not return
|
||||||
|
/// a commit version.
|
||||||
|
#[serde(default)]
|
||||||
|
pub version: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||||
|
pub struct DeleteResult {
|
||||||
|
// The commit version associated with the operation.
|
||||||
|
// A version of `0` indicates compatibility with legacy servers that do not return
|
||||||
|
/// a commit version.
|
||||||
|
#[serde(default)]
|
||||||
|
pub version: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||||
|
pub struct MergeResult {
|
||||||
|
// The commit version associated with the operation.
|
||||||
|
// A version of `0` indicates compatibility with legacy servers that do not return
|
||||||
|
/// a commit version.
|
||||||
|
#[serde(default)]
|
||||||
|
pub version: u64,
|
||||||
|
/// Number of inserted rows (for user statistics)
|
||||||
|
#[serde(default)]
|
||||||
|
pub num_inserted_rows: u64,
|
||||||
|
/// Number of updated rows (for user statistics)
|
||||||
|
#[serde(default)]
|
||||||
|
pub num_updated_rows: u64,
|
||||||
|
/// Number of deleted rows (for user statistics)
|
||||||
|
/// Note: This is different from internal references to 'deleted_rows', since we technically "delete" updated rows during processing.
|
||||||
|
/// However those rows are not shared with the user.
|
||||||
|
#[serde(default)]
|
||||||
|
pub num_deleted_rows: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||||
|
pub struct AddColumnsResult {
|
||||||
|
// The commit version associated with the operation.
|
||||||
|
// A version of `0` indicates compatibility with legacy servers that do not return
|
||||||
|
/// a commit version.
|
||||||
|
#[serde(default)]
|
||||||
|
pub version: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||||
|
pub struct AlterColumnsResult {
|
||||||
|
// The commit version associated with the operation.
|
||||||
|
// A version of `0` indicates compatibility with legacy servers that do not return
|
||||||
|
/// a commit version.
|
||||||
|
#[serde(default)]
|
||||||
|
pub version: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||||
|
pub struct DropColumnsResult {
|
||||||
|
// The commit version associated with the operation.
|
||||||
|
// A version of `0` indicates compatibility with legacy servers that do not return
|
||||||
|
/// a commit version.
|
||||||
|
#[serde(default)]
|
||||||
|
pub version: u64,
|
||||||
|
}
|
||||||
|
|
||||||
/// A trait for anything "table-like". This is used for both native tables (which target
|
/// A trait for anything "table-like". This is used for both native tables (which target
|
||||||
/// Lance datasets) and remote tables (which target LanceDB cloud)
|
/// Lance datasets) and remote tables (which target LanceDB cloud)
|
||||||
///
|
///
|
||||||
@@ -445,11 +543,11 @@ pub trait BaseTable: std::fmt::Display + std::fmt::Debug + Send + Sync {
|
|||||||
&self,
|
&self,
|
||||||
add: AddDataBuilder<NoData>,
|
add: AddDataBuilder<NoData>,
|
||||||
data: Box<dyn arrow_array::RecordBatchReader + Send>,
|
data: Box<dyn arrow_array::RecordBatchReader + Send>,
|
||||||
) -> Result<()>;
|
) -> Result<AddResult>;
|
||||||
/// Delete rows from the table.
|
/// Delete rows from the table.
|
||||||
async fn delete(&self, predicate: &str) -> Result<()>;
|
async fn delete(&self, predicate: &str) -> Result<DeleteResult>;
|
||||||
/// Update rows in the table.
|
/// Update rows in the table.
|
||||||
async fn update(&self, update: UpdateBuilder) -> Result<u64>;
|
async fn update(&self, update: UpdateBuilder) -> Result<UpdateResult>;
|
||||||
/// Create an index on the provided column(s).
|
/// Create an index on the provided column(s).
|
||||||
async fn create_index(&self, index: IndexBuilder) -> Result<()>;
|
async fn create_index(&self, index: IndexBuilder) -> Result<()>;
|
||||||
/// List the indices on the table.
|
/// List the indices on the table.
|
||||||
@@ -465,7 +563,9 @@ pub trait BaseTable: std::fmt::Display + std::fmt::Debug + Send + Sync {
|
|||||||
&self,
|
&self,
|
||||||
params: MergeInsertBuilder,
|
params: MergeInsertBuilder,
|
||||||
new_data: Box<dyn RecordBatchReader + Send>,
|
new_data: Box<dyn RecordBatchReader + Send>,
|
||||||
) -> Result<()>;
|
) -> Result<MergeResult>;
|
||||||
|
/// Gets the table tag manager.
|
||||||
|
async fn tags(&self) -> Result<Box<dyn Tags + '_>>;
|
||||||
/// Optimize the dataset.
|
/// Optimize the dataset.
|
||||||
async fn optimize(&self, action: OptimizeAction) -> Result<OptimizeStats>;
|
async fn optimize(&self, action: OptimizeAction) -> Result<OptimizeStats>;
|
||||||
/// Add columns to the table.
|
/// Add columns to the table.
|
||||||
@@ -473,15 +573,18 @@ pub trait BaseTable: std::fmt::Display + std::fmt::Debug + Send + Sync {
|
|||||||
&self,
|
&self,
|
||||||
transforms: NewColumnTransform,
|
transforms: NewColumnTransform,
|
||||||
read_columns: Option<Vec<String>>,
|
read_columns: Option<Vec<String>>,
|
||||||
) -> Result<()>;
|
) -> Result<AddColumnsResult>;
|
||||||
/// Alter columns in the table.
|
/// Alter columns in the table.
|
||||||
async fn alter_columns(&self, alterations: &[ColumnAlteration]) -> Result<()>;
|
async fn alter_columns(&self, alterations: &[ColumnAlteration]) -> Result<AlterColumnsResult>;
|
||||||
/// Drop columns from the table.
|
/// Drop columns from the table.
|
||||||
async fn drop_columns(&self, columns: &[&str]) -> Result<()>;
|
async fn drop_columns(&self, columns: &[&str]) -> Result<DropColumnsResult>;
|
||||||
/// Get the version of the table.
|
/// Get the version of the table.
|
||||||
async fn version(&self) -> Result<u64>;
|
async fn version(&self) -> Result<u64>;
|
||||||
/// Checkout a specific version of the table.
|
/// Checkout a specific version of the table.
|
||||||
async fn checkout(&self, version: u64) -> Result<()>;
|
async fn checkout(&self, version: u64) -> Result<()>;
|
||||||
|
/// Checkout a table version referenced by a tag.
|
||||||
|
/// Tags provide a human-readable way to reference specific versions of the table.
|
||||||
|
async fn checkout_tag(&self, tag: &str) -> Result<()>;
|
||||||
/// Checkout the latest version of the table.
|
/// Checkout the latest version of the table.
|
||||||
async fn checkout_latest(&self) -> Result<()>;
|
async fn checkout_latest(&self) -> Result<()>;
|
||||||
/// Restore the table to the currently checked out version.
|
/// Restore the table to the currently checked out version.
|
||||||
@@ -499,6 +602,8 @@ pub trait BaseTable: std::fmt::Display + std::fmt::Debug + Send + Sync {
|
|||||||
index_names: &[&str],
|
index_names: &[&str],
|
||||||
timeout: std::time::Duration,
|
timeout: std::time::Duration,
|
||||||
) -> Result<()>;
|
) -> Result<()>;
|
||||||
|
/// Get statistics on the table
|
||||||
|
async fn stats(&self) -> Result<TableStatistics>;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A Table is a collection of strong typed Rows.
|
/// A Table is a collection of strong typed Rows.
|
||||||
@@ -701,7 +806,7 @@ impl Table {
|
|||||||
/// tbl.delete("id > 5").await.unwrap();
|
/// tbl.delete("id > 5").await.unwrap();
|
||||||
/// # });
|
/// # });
|
||||||
/// ```
|
/// ```
|
||||||
pub async fn delete(&self, predicate: &str) -> Result<()> {
|
pub async fn delete(&self, predicate: &str) -> Result<DeleteResult> {
|
||||||
self.inner.delete(predicate).await
|
self.inner.delete(predicate).await
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1016,17 +1121,20 @@ impl Table {
|
|||||||
&self,
|
&self,
|
||||||
transforms: NewColumnTransform,
|
transforms: NewColumnTransform,
|
||||||
read_columns: Option<Vec<String>>,
|
read_columns: Option<Vec<String>>,
|
||||||
) -> Result<()> {
|
) -> Result<AddColumnsResult> {
|
||||||
self.inner.add_columns(transforms, read_columns).await
|
self.inner.add_columns(transforms, read_columns).await
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Change a column's name or nullability.
|
/// Change a column's name or nullability.
|
||||||
pub async fn alter_columns(&self, alterations: &[ColumnAlteration]) -> Result<()> {
|
pub async fn alter_columns(
|
||||||
|
&self,
|
||||||
|
alterations: &[ColumnAlteration],
|
||||||
|
) -> Result<AlterColumnsResult> {
|
||||||
self.inner.alter_columns(alterations).await
|
self.inner.alter_columns(alterations).await
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Remove columns from the table.
|
/// Remove columns from the table.
|
||||||
pub async fn drop_columns(&self, columns: &[&str]) -> Result<()> {
|
pub async fn drop_columns(&self, columns: &[&str]) -> Result<DropColumnsResult> {
|
||||||
self.inner.drop_columns(columns).await
|
self.inner.drop_columns(columns).await
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1058,6 +1166,24 @@ impl Table {
|
|||||||
self.inner.checkout(version).await
|
self.inner.checkout(version).await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Checks out a specific version of the Table by tag
|
||||||
|
///
|
||||||
|
/// Any read operation on the table will now access the data at the version referenced by the tag.
|
||||||
|
/// As a consequence, calling this method will disable any read consistency interval
|
||||||
|
/// that was previously set.
|
||||||
|
///
|
||||||
|
/// This is a read-only operation that turns the table into a sort of "view"
|
||||||
|
/// or "detached head". Other table instances will not be affected. To make the change
|
||||||
|
/// permanent you can use the `[Self::restore]` method.
|
||||||
|
///
|
||||||
|
/// Any operation that modifies the table will fail while the table is in a checked
|
||||||
|
/// out state.
|
||||||
|
///
|
||||||
|
/// To return the table to a normal state use `[Self::checkout_latest]`
|
||||||
|
pub async fn checkout_tag(&self, tag: &str) -> Result<()> {
|
||||||
|
self.inner.checkout_tag(tag).await
|
||||||
|
}
|
||||||
|
|
||||||
/// Ensures the table is pointing at the latest version
|
/// Ensures the table is pointing at the latest version
|
||||||
///
|
///
|
||||||
/// This can be used to manually update a table when the read_consistency_interval is None
|
/// This can be used to manually update a table when the read_consistency_interval is None
|
||||||
@@ -1144,6 +1270,11 @@ impl Table {
|
|||||||
self.inner.wait_for_index(index_names, timeout).await
|
self.inner.wait_for_index(index_names, timeout).await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get the tags manager.
|
||||||
|
pub async fn tags(&self) -> Result<Box<dyn Tags + '_>> {
|
||||||
|
self.inner.tags().await
|
||||||
|
}
|
||||||
|
|
||||||
// Take many execution plans and map them into a single plan that adds
|
// Take many execution plans and map them into a single plan that adds
|
||||||
// a query_index column and unions them.
|
// a query_index column and unions them.
|
||||||
pub(crate) fn multi_vector_plan(
|
pub(crate) fn multi_vector_plan(
|
||||||
@@ -1194,6 +1325,40 @@ impl Table {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
Ok(Arc::new(repartitioned))
|
Ok(Arc::new(repartitioned))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Retrieve statistics on the table
|
||||||
|
pub async fn stats(&self) -> Result<TableStatistics> {
|
||||||
|
self.inner.stats().await
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct NativeTags {
|
||||||
|
inner: LanceTags,
|
||||||
|
}
|
||||||
|
#[async_trait]
|
||||||
|
impl Tags for NativeTags {
|
||||||
|
async fn list(&self) -> Result<HashMap<String, TagContents>> {
|
||||||
|
Ok(self.inner.list().await?)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_version(&self, tag: &str) -> Result<u64> {
|
||||||
|
Ok(self.inner.get_version(tag).await?)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn create(&mut self, tag: &str, version: u64) -> Result<()> {
|
||||||
|
self.inner.create(tag, version).await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn delete(&mut self, tag: &str) -> Result<()> {
|
||||||
|
self.inner.delete(tag).await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn update(&mut self, tag: &str, version: u64) -> Result<()> {
|
||||||
|
self.inner.update(tag, version).await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<NativeTable> for Table {
|
impl From<NativeTable> for Table {
|
||||||
@@ -1849,7 +2014,7 @@ impl NativeTable {
|
|||||||
/// more information.
|
/// more information.
|
||||||
pub async fn uses_v2_manifest_paths(&self) -> Result<bool> {
|
pub async fn uses_v2_manifest_paths(&self) -> Result<bool> {
|
||||||
let dataset = self.dataset.get().await?;
|
let dataset = self.dataset.get().await?;
|
||||||
Ok(dataset.manifest_naming_scheme == ManifestNamingScheme::V2)
|
Ok(dataset.manifest_location().naming_scheme == ManifestNamingScheme::V2)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Migrate the table to use the new manifest path scheme.
|
/// Migrate the table to use the new manifest path scheme.
|
||||||
@@ -1940,6 +2105,10 @@ impl BaseTable for NativeTable {
|
|||||||
self.dataset.as_time_travel(version).await
|
self.dataset.as_time_travel(version).await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn checkout_tag(&self, tag: &str) -> Result<()> {
|
||||||
|
self.dataset.as_time_travel(tag).await
|
||||||
|
}
|
||||||
|
|
||||||
async fn checkout_latest(&self) -> Result<()> {
|
async fn checkout_latest(&self) -> Result<()> {
|
||||||
self.dataset
|
self.dataset
|
||||||
.as_latest(self.read_consistency_interval)
|
.as_latest(self.read_consistency_interval)
|
||||||
@@ -1998,7 +2167,7 @@ impl BaseTable for NativeTable {
|
|||||||
&self,
|
&self,
|
||||||
add: AddDataBuilder<NoData>,
|
add: AddDataBuilder<NoData>,
|
||||||
data: Box<dyn RecordBatchReader + Send>,
|
data: Box<dyn RecordBatchReader + Send>,
|
||||||
) -> Result<()> {
|
) -> Result<AddResult> {
|
||||||
let data = Box::new(MaybeEmbedded::try_new(
|
let data = Box::new(MaybeEmbedded::try_new(
|
||||||
data,
|
data,
|
||||||
self.table_definition().await?,
|
self.table_definition().await?,
|
||||||
@@ -2021,9 +2190,9 @@ impl BaseTable for NativeTable {
|
|||||||
.execute_stream(data)
|
.execute_stream(data)
|
||||||
.await?
|
.await?
|
||||||
};
|
};
|
||||||
|
let version = dataset.manifest().version;
|
||||||
self.dataset.set_latest(dataset).await;
|
self.dataset.set_latest(dataset).await;
|
||||||
Ok(())
|
Ok(AddResult { version })
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn create_index(&self, opts: IndexBuilder) -> Result<()> {
|
async fn create_index(&self, opts: IndexBuilder) -> Result<()> {
|
||||||
@@ -2069,7 +2238,7 @@ impl BaseTable for NativeTable {
|
|||||||
Ok(dataset.prewarm_index(index_name).await?)
|
Ok(dataset.prewarm_index(index_name).await?)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn update(&self, update: UpdateBuilder) -> Result<u64> {
|
async fn update(&self, update: UpdateBuilder) -> Result<UpdateResult> {
|
||||||
let dataset = self.dataset.get().await?.clone();
|
let dataset = self.dataset.get().await?.clone();
|
||||||
let mut builder = LanceUpdateBuilder::new(Arc::new(dataset));
|
let mut builder = LanceUpdateBuilder::new(Arc::new(dataset));
|
||||||
if let Some(predicate) = update.filter {
|
if let Some(predicate) = update.filter {
|
||||||
@@ -2085,7 +2254,10 @@ impl BaseTable for NativeTable {
|
|||||||
self.dataset
|
self.dataset
|
||||||
.set_latest(res.new_dataset.as_ref().clone())
|
.set_latest(res.new_dataset.as_ref().clone())
|
||||||
.await;
|
.await;
|
||||||
Ok(res.rows_updated)
|
Ok(UpdateResult {
|
||||||
|
rows_updated: res.rows_updated,
|
||||||
|
version: res.new_dataset.version().version,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn create_plan(
|
async fn create_plan(
|
||||||
@@ -2277,7 +2449,7 @@ impl BaseTable for NativeTable {
|
|||||||
&self,
|
&self,
|
||||||
params: MergeInsertBuilder,
|
params: MergeInsertBuilder,
|
||||||
new_data: Box<dyn RecordBatchReader + Send>,
|
new_data: Box<dyn RecordBatchReader + Send>,
|
||||||
) -> Result<()> {
|
) -> Result<MergeResult> {
|
||||||
let dataset = Arc::new(self.dataset.get().await?.clone());
|
let dataset = Arc::new(self.dataset.get().await?.clone());
|
||||||
let mut builder = LanceMergeInsertBuilder::try_new(dataset.clone(), params.on)?;
|
let mut builder = LanceMergeInsertBuilder::try_new(dataset.clone(), params.on)?;
|
||||||
match (
|
match (
|
||||||
@@ -2303,16 +2475,51 @@ impl BaseTable for NativeTable {
|
|||||||
} else {
|
} else {
|
||||||
builder.when_not_matched_by_source(WhenNotMatchedBySource::Keep);
|
builder.when_not_matched_by_source(WhenNotMatchedBySource::Keep);
|
||||||
}
|
}
|
||||||
let job = builder.try_build()?;
|
|
||||||
let (new_dataset, _stats) = job.execute_reader(new_data).await?;
|
let future = if let Some(timeout) = params.timeout {
|
||||||
|
// The default retry timeout is 30s, so we pass the full timeout down
|
||||||
|
// as well in case it is longer than that.
|
||||||
|
let future = builder
|
||||||
|
.retry_timeout(timeout)
|
||||||
|
.try_build()?
|
||||||
|
.execute_reader(new_data);
|
||||||
|
Either::Left(tokio::time::timeout(timeout, future).map(|res| match res {
|
||||||
|
Ok(Ok((new_dataset, stats))) => Ok((new_dataset, stats)),
|
||||||
|
Ok(Err(e)) => Err(e.into()),
|
||||||
|
Err(_) => Err(Error::Runtime {
|
||||||
|
message: "merge insert timed out".to_string(),
|
||||||
|
}),
|
||||||
|
}))
|
||||||
|
} else {
|
||||||
|
let job = builder.try_build()?;
|
||||||
|
Either::Right(job.execute_reader(new_data).map_err(|e| e.into()))
|
||||||
|
};
|
||||||
|
let (new_dataset, stats) = future.await?;
|
||||||
|
let version = new_dataset.manifest().version;
|
||||||
self.dataset.set_latest(new_dataset.as_ref().clone()).await;
|
self.dataset.set_latest(new_dataset.as_ref().clone()).await;
|
||||||
Ok(())
|
Ok(MergeResult {
|
||||||
|
version,
|
||||||
|
num_updated_rows: stats.num_updated_rows,
|
||||||
|
num_inserted_rows: stats.num_inserted_rows,
|
||||||
|
num_deleted_rows: stats.num_deleted_rows,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Delete rows from the table
|
/// Delete rows from the table
|
||||||
async fn delete(&self, predicate: &str) -> Result<()> {
|
async fn delete(&self, predicate: &str) -> Result<DeleteResult> {
|
||||||
self.dataset.get_mut().await?.delete(predicate).await?;
|
let mut dataset = self.dataset.get_mut().await?;
|
||||||
Ok(())
|
dataset.delete(predicate).await?;
|
||||||
|
Ok(DeleteResult {
|
||||||
|
version: dataset.version().version,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn tags(&self) -> Result<Box<dyn Tags + '_>> {
|
||||||
|
let dataset = self.dataset.get().await?;
|
||||||
|
|
||||||
|
Ok(Box::new(NativeTags {
|
||||||
|
inner: dataset.tags.clone(),
|
||||||
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn optimize(&self, action: OptimizeAction) -> Result<OptimizeStats> {
|
async fn optimize(&self, action: OptimizeAction) -> Result<OptimizeStats> {
|
||||||
@@ -2371,27 +2578,28 @@ impl BaseTable for NativeTable {
|
|||||||
&self,
|
&self,
|
||||||
transforms: NewColumnTransform,
|
transforms: NewColumnTransform,
|
||||||
read_columns: Option<Vec<String>>,
|
read_columns: Option<Vec<String>>,
|
||||||
) -> Result<()> {
|
) -> Result<AddColumnsResult> {
|
||||||
self.dataset
|
let mut dataset = self.dataset.get_mut().await?;
|
||||||
.get_mut()
|
dataset.add_columns(transforms, read_columns, None).await?;
|
||||||
.await?
|
Ok(AddColumnsResult {
|
||||||
.add_columns(transforms, read_columns, None)
|
version: dataset.version().version,
|
||||||
.await?;
|
})
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn alter_columns(&self, alterations: &[ColumnAlteration]) -> Result<()> {
|
async fn alter_columns(&self, alterations: &[ColumnAlteration]) -> Result<AlterColumnsResult> {
|
||||||
self.dataset
|
let mut dataset = self.dataset.get_mut().await?;
|
||||||
.get_mut()
|
dataset.alter_columns(alterations).await?;
|
||||||
.await?
|
Ok(AlterColumnsResult {
|
||||||
.alter_columns(alterations)
|
version: dataset.version().version,
|
||||||
.await?;
|
})
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn drop_columns(&self, columns: &[&str]) -> Result<()> {
|
async fn drop_columns(&self, columns: &[&str]) -> Result<DropColumnsResult> {
|
||||||
self.dataset.get_mut().await?.drop_columns(columns).await?;
|
let mut dataset = self.dataset.get_mut().await?;
|
||||||
Ok(())
|
dataset.drop_columns(columns).await?;
|
||||||
|
Ok(DropColumnsResult {
|
||||||
|
version: dataset.version().version,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn list_indices(&self) -> Result<Vec<IndexConfig>> {
|
async fn list_indices(&self) -> Result<Vec<IndexConfig>> {
|
||||||
@@ -2480,6 +2688,108 @@ impl BaseTable for NativeTable {
|
|||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
wait_for_index(self, index_names, timeout).await
|
wait_for_index(self, index_names, timeout).await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn stats(&self) -> Result<TableStatistics> {
|
||||||
|
let num_rows = self.count_rows(None).await?;
|
||||||
|
let num_indices = self.list_indices().await?.len();
|
||||||
|
let ds = self.dataset.get().await?;
|
||||||
|
let ds_clone = (*ds).clone();
|
||||||
|
let ds_stats = Arc::new(ds_clone).calculate_data_stats().await?;
|
||||||
|
let total_bytes = ds_stats.fields.iter().map(|f| f.bytes_on_disk).sum::<u64>() as usize;
|
||||||
|
|
||||||
|
let frags = ds.get_fragments();
|
||||||
|
let mut sorted_sizes = join_all(
|
||||||
|
frags
|
||||||
|
.iter()
|
||||||
|
.map(|frag| async move { frag.physical_rows().await.unwrap_or(0) }),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
sorted_sizes.sort();
|
||||||
|
|
||||||
|
let small_frag_threshold = 100000;
|
||||||
|
let num_fragments = sorted_sizes.len();
|
||||||
|
let num_small_fragments = sorted_sizes
|
||||||
|
.iter()
|
||||||
|
.filter(|&&size| size < small_frag_threshold)
|
||||||
|
.count();
|
||||||
|
|
||||||
|
let p25 = *sorted_sizes.get(num_fragments / 4).unwrap_or(&0);
|
||||||
|
let p50 = *sorted_sizes.get(num_fragments / 2).unwrap_or(&0);
|
||||||
|
let p75 = *sorted_sizes.get(num_fragments * 3 / 4).unwrap_or(&0);
|
||||||
|
let p99 = *sorted_sizes.get(num_fragments * 99 / 100).unwrap_or(&0);
|
||||||
|
let min = sorted_sizes.first().copied().unwrap_or(0);
|
||||||
|
let max = sorted_sizes.last().copied().unwrap_or(0);
|
||||||
|
let mean = if num_fragments == 0 {
|
||||||
|
0
|
||||||
|
} else {
|
||||||
|
sorted_sizes.iter().copied().sum::<usize>() / num_fragments
|
||||||
|
};
|
||||||
|
|
||||||
|
let frag_stats = FragmentStatistics {
|
||||||
|
num_fragments,
|
||||||
|
num_small_fragments,
|
||||||
|
lengths: FragmentSummaryStats {
|
||||||
|
min,
|
||||||
|
max,
|
||||||
|
mean,
|
||||||
|
p25,
|
||||||
|
p50,
|
||||||
|
p75,
|
||||||
|
p99,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
let stats = TableStatistics {
|
||||||
|
total_bytes,
|
||||||
|
num_rows,
|
||||||
|
num_indices,
|
||||||
|
fragment_stats: frag_stats,
|
||||||
|
};
|
||||||
|
Ok(stats)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[skip_serializing_none]
|
||||||
|
#[derive(Debug, Deserialize, PartialEq)]
|
||||||
|
pub struct TableStatistics {
|
||||||
|
/// The total number of bytes in the table
|
||||||
|
pub total_bytes: usize,
|
||||||
|
|
||||||
|
/// The number of rows in the table
|
||||||
|
pub num_rows: usize,
|
||||||
|
|
||||||
|
/// The number of indices in the table
|
||||||
|
pub num_indices: usize,
|
||||||
|
|
||||||
|
/// Statistics on table fragments
|
||||||
|
pub fragment_stats: FragmentStatistics,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[skip_serializing_none]
|
||||||
|
#[derive(Debug, Deserialize, PartialEq)]
|
||||||
|
pub struct FragmentStatistics {
|
||||||
|
/// The number of fragments in the table
|
||||||
|
pub num_fragments: usize,
|
||||||
|
|
||||||
|
/// The number of uncompacted fragments in the table
|
||||||
|
pub num_small_fragments: usize,
|
||||||
|
|
||||||
|
/// Statistics on the number of rows in the table fragments
|
||||||
|
pub lengths: FragmentSummaryStats,
|
||||||
|
// todo: add size statistics
|
||||||
|
// /// Statistics on the number of bytes in the table fragments
|
||||||
|
// sizes: FragmentStats,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[skip_serializing_none]
|
||||||
|
#[derive(Debug, Deserialize, PartialEq)]
|
||||||
|
pub struct FragmentSummaryStats {
|
||||||
|
pub min: usize,
|
||||||
|
pub max: usize,
|
||||||
|
pub mean: usize,
|
||||||
|
pub p25: usize,
|
||||||
|
pub p50: usize,
|
||||||
|
pub p75: usize,
|
||||||
|
pub p99: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
@@ -3081,6 +3391,60 @@ mod tests {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_tags() {
|
||||||
|
let tmp_dir = tempdir().unwrap();
|
||||||
|
let uri = tmp_dir.path().to_str().unwrap();
|
||||||
|
|
||||||
|
let conn = ConnectBuilder::new(uri)
|
||||||
|
.read_consistency_interval(Duration::from_secs(0))
|
||||||
|
.execute()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
let table = conn
|
||||||
|
.create_table("my_table", some_sample_data())
|
||||||
|
.execute()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(table.version().await.unwrap(), 1);
|
||||||
|
table.add(some_sample_data()).execute().await.unwrap();
|
||||||
|
assert_eq!(table.version().await.unwrap(), 2);
|
||||||
|
let mut tags_manager = table.tags().await.unwrap();
|
||||||
|
let tags = tags_manager.list().await.unwrap();
|
||||||
|
assert!(tags.is_empty(), "Tags should be empty initially");
|
||||||
|
let tag1 = "tag1";
|
||||||
|
tags_manager.create(tag1, 1).await.unwrap();
|
||||||
|
assert_eq!(tags_manager.get_version(tag1).await.unwrap(), 1);
|
||||||
|
let tags = tags_manager.list().await.unwrap();
|
||||||
|
assert_eq!(tags.len(), 1);
|
||||||
|
assert!(tags.contains_key(tag1));
|
||||||
|
assert_eq!(tags.get(tag1).unwrap().version, 1);
|
||||||
|
tags_manager.create("tag2", 2).await.unwrap();
|
||||||
|
assert_eq!(tags_manager.get_version("tag2").await.unwrap(), 2);
|
||||||
|
let tags = tags_manager.list().await.unwrap();
|
||||||
|
assert_eq!(tags.len(), 2);
|
||||||
|
assert!(tags.contains_key(tag1));
|
||||||
|
assert_eq!(tags.get(tag1).unwrap().version, 1);
|
||||||
|
assert!(tags.contains_key("tag2"));
|
||||||
|
assert_eq!(tags.get("tag2").unwrap().version, 2);
|
||||||
|
// Test update and delete
|
||||||
|
table.add(some_sample_data()).execute().await.unwrap();
|
||||||
|
tags_manager.update(tag1, 3).await.unwrap();
|
||||||
|
assert_eq!(tags_manager.get_version(tag1).await.unwrap(), 3);
|
||||||
|
tags_manager.delete("tag2").await.unwrap();
|
||||||
|
let tags = tags_manager.list().await.unwrap();
|
||||||
|
assert_eq!(tags.len(), 1);
|
||||||
|
assert!(tags.contains_key(tag1));
|
||||||
|
assert_eq!(tags.get(tag1).unwrap().version, 3);
|
||||||
|
// Test checkout tag
|
||||||
|
table.add(some_sample_data()).execute().await.unwrap();
|
||||||
|
assert_eq!(table.version().await.unwrap(), 4);
|
||||||
|
table.checkout_tag(tag1).await.unwrap();
|
||||||
|
assert_eq!(table.version().await.unwrap(), 3);
|
||||||
|
table.checkout_latest().await.unwrap();
|
||||||
|
assert_eq!(table.version().await.unwrap(), 4);
|
||||||
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_create_index() {
|
async fn test_create_index() {
|
||||||
use arrow_array::RecordBatch;
|
use arrow_array::RecordBatch;
|
||||||
@@ -3803,4 +4167,108 @@ mod tests {
|
|||||||
Some(&"test_field_val1".to_string())
|
Some(&"test_field_val1".to_string())
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
pub async fn test_stats() {
|
||||||
|
let tmp_dir = tempdir().unwrap();
|
||||||
|
let uri = tmp_dir.path().to_str().unwrap();
|
||||||
|
|
||||||
|
let conn = ConnectBuilder::new(uri).execute().await.unwrap();
|
||||||
|
|
||||||
|
let schema = Arc::new(Schema::new(vec![
|
||||||
|
Field::new("id", DataType::Int32, false),
|
||||||
|
Field::new("foo", DataType::Int32, true),
|
||||||
|
]));
|
||||||
|
let batch = RecordBatch::try_new(
|
||||||
|
schema.clone(),
|
||||||
|
vec![
|
||||||
|
Arc::new(Int32Array::from_iter_values(0..100)),
|
||||||
|
Arc::new(Int32Array::from_iter_values(0..100)),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let table = conn
|
||||||
|
.create_table(
|
||||||
|
"test_stats",
|
||||||
|
RecordBatchIterator::new(vec![Ok(batch.clone())], batch.schema()),
|
||||||
|
)
|
||||||
|
.execute()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
for _ in 0..10 {
|
||||||
|
let batch = RecordBatch::try_new(
|
||||||
|
schema.clone(),
|
||||||
|
vec![
|
||||||
|
Arc::new(Int32Array::from_iter_values(0..15)),
|
||||||
|
Arc::new(Int32Array::from_iter_values(0..15)),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
table
|
||||||
|
.add(RecordBatchIterator::new(
|
||||||
|
vec![Ok(batch.clone())],
|
||||||
|
batch.schema(),
|
||||||
|
))
|
||||||
|
.execute()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
let empty_table = conn
|
||||||
|
.create_table(
|
||||||
|
"test_stats_empty",
|
||||||
|
RecordBatchIterator::new(vec![], batch.schema()),
|
||||||
|
)
|
||||||
|
.execute()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let res = table.stats().await.unwrap();
|
||||||
|
println!("{:#?}", res);
|
||||||
|
assert_eq!(
|
||||||
|
res,
|
||||||
|
TableStatistics {
|
||||||
|
num_rows: 250,
|
||||||
|
num_indices: 0,
|
||||||
|
total_bytes: 2000,
|
||||||
|
fragment_stats: FragmentStatistics {
|
||||||
|
num_fragments: 11,
|
||||||
|
num_small_fragments: 11,
|
||||||
|
lengths: FragmentSummaryStats {
|
||||||
|
min: 15,
|
||||||
|
max: 100,
|
||||||
|
mean: 22,
|
||||||
|
p25: 15,
|
||||||
|
p50: 15,
|
||||||
|
p75: 15,
|
||||||
|
p99: 100,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
);
|
||||||
|
let res = empty_table.stats().await.unwrap();
|
||||||
|
println!("{:#?}", res);
|
||||||
|
assert_eq!(
|
||||||
|
res,
|
||||||
|
TableStatistics {
|
||||||
|
num_rows: 0,
|
||||||
|
num_indices: 0,
|
||||||
|
total_bytes: 0,
|
||||||
|
fragment_stats: FragmentStatistics {
|
||||||
|
num_fragments: 0,
|
||||||
|
num_small_fragments: 0,
|
||||||
|
lengths: FragmentSummaryStats {
|
||||||
|
min: 0,
|
||||||
|
max: 0,
|
||||||
|
mean: 0,
|
||||||
|
p25: 0,
|
||||||
|
p50: 0,
|
||||||
|
p75: 0,
|
||||||
|
p99: 0,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ use std::{
|
|||||||
time::{self, Duration, Instant},
|
time::{self, Duration, Instant},
|
||||||
};
|
};
|
||||||
|
|
||||||
use lance::Dataset;
|
use lance::{dataset::refs, Dataset};
|
||||||
use tokio::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard};
|
use tokio::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard};
|
||||||
|
|
||||||
use crate::error::Result;
|
use crate::error::Result;
|
||||||
@@ -83,19 +83,32 @@ impl DatasetRef {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn as_time_travel(&mut self, target_version: u64) -> Result<()> {
|
async fn as_time_travel(&mut self, target_version: impl Into<refs::Ref>) -> Result<()> {
|
||||||
|
let target_ref = target_version.into();
|
||||||
|
|
||||||
match self {
|
match self {
|
||||||
Self::Latest { dataset, .. } => {
|
Self::Latest { dataset, .. } => {
|
||||||
|
let new_dataset = dataset.checkout_version(target_ref.clone()).await?;
|
||||||
|
let version_value = new_dataset.version().version;
|
||||||
|
|
||||||
*self = Self::TimeTravel {
|
*self = Self::TimeTravel {
|
||||||
dataset: dataset.checkout_version(target_version).await?,
|
dataset: new_dataset,
|
||||||
version: target_version,
|
version: version_value,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
Self::TimeTravel { dataset, version } => {
|
Self::TimeTravel { dataset, version } => {
|
||||||
if *version != target_version {
|
let should_checkout = match &target_ref {
|
||||||
|
refs::Ref::Version(target_ver) => version != target_ver,
|
||||||
|
refs::Ref::Tag(_) => true, // Always checkout for tags
|
||||||
|
};
|
||||||
|
|
||||||
|
if should_checkout {
|
||||||
|
let new_dataset = dataset.checkout_version(target_ref).await?;
|
||||||
|
let version_value = new_dataset.version().version;
|
||||||
|
|
||||||
*self = Self::TimeTravel {
|
*self = Self::TimeTravel {
|
||||||
dataset: dataset.checkout_version(target_version).await?,
|
dataset: new_dataset,
|
||||||
version: target_version,
|
version: version_value,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -175,7 +188,7 @@ impl DatasetConsistencyWrapper {
|
|||||||
write_guard.as_latest(read_consistency_interval).await
|
write_guard.as_latest(read_consistency_interval).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn as_time_travel(&self, target_version: u64) -> Result<()> {
|
pub async fn as_time_travel(&self, target_version: impl Into<refs::Ref>) -> Result<()> {
|
||||||
self.0.write().await.as_time_travel(target_version).await
|
self.0.write().await.as_time_travel(target_version).await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,13 +1,13 @@
|
|||||||
// SPDX-License-Identifier: Apache-2.0
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||||
|
|
||||||
use std::sync::Arc;
|
use std::{sync::Arc, time::Duration};
|
||||||
|
|
||||||
use arrow_array::RecordBatchReader;
|
use arrow_array::RecordBatchReader;
|
||||||
|
|
||||||
use crate::Result;
|
use crate::Result;
|
||||||
|
|
||||||
use super::BaseTable;
|
use super::{BaseTable, MergeResult};
|
||||||
|
|
||||||
/// A builder used to create and run a merge insert operation
|
/// A builder used to create and run a merge insert operation
|
||||||
///
|
///
|
||||||
@@ -21,6 +21,7 @@ pub struct MergeInsertBuilder {
|
|||||||
pub(crate) when_not_matched_insert_all: bool,
|
pub(crate) when_not_matched_insert_all: bool,
|
||||||
pub(crate) when_not_matched_by_source_delete: bool,
|
pub(crate) when_not_matched_by_source_delete: bool,
|
||||||
pub(crate) when_not_matched_by_source_delete_filt: Option<String>,
|
pub(crate) when_not_matched_by_source_delete_filt: Option<String>,
|
||||||
|
pub(crate) timeout: Option<Duration>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl MergeInsertBuilder {
|
impl MergeInsertBuilder {
|
||||||
@@ -33,6 +34,7 @@ impl MergeInsertBuilder {
|
|||||||
when_not_matched_insert_all: false,
|
when_not_matched_insert_all: false,
|
||||||
when_not_matched_by_source_delete: false,
|
when_not_matched_by_source_delete: false,
|
||||||
when_not_matched_by_source_delete_filt: None,
|
when_not_matched_by_source_delete_filt: None,
|
||||||
|
timeout: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -84,10 +86,26 @@ impl MergeInsertBuilder {
|
|||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Maximum time to run the operation before cancelling it.
|
||||||
|
///
|
||||||
|
/// By default, there is a 30-second timeout that is only enforced after the
|
||||||
|
/// first attempt. This is to prevent spending too long retrying to resolve
|
||||||
|
/// conflicts. For example, if a write attempt takes 20 seconds and fails,
|
||||||
|
/// the second attempt will be cancelled after 10 seconds, hitting the
|
||||||
|
/// 30-second timeout. However, a write that takes one hour and succeeds on the
|
||||||
|
/// first attempt will not be cancelled.
|
||||||
|
///
|
||||||
|
/// When this is set, the timeout is enforced on all attempts, including the first.
|
||||||
|
pub fn timeout(&mut self, timeout: Duration) -> &mut Self {
|
||||||
|
self.timeout = Some(timeout);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
/// Executes the merge insert operation
|
/// Executes the merge insert operation
|
||||||
///
|
///
|
||||||
/// Nothing is returned but the [`super::Table`] is updated
|
/// Returns version and statistics about the merge operation including the number of rows
|
||||||
pub async fn execute(self, new_data: Box<dyn RecordBatchReader + Send>) -> Result<()> {
|
/// inserted, updated, and deleted.
|
||||||
|
pub async fn execute(self, new_data: Box<dyn RecordBatchReader + Send>) -> Result<MergeResult> {
|
||||||
self.table.clone().merge_insert(self, new_data).await
|
self.table.clone().merge_insert(self, new_data).await
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user