mirror of
https://github.com/lancedb/lancedb.git
synced 2026-04-03 14:30:41 +00:00
Compare commits
29 Commits
codex/upda
...
python-v0.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
590c0c1e77 | ||
|
|
382ecd65e3 | ||
|
|
e26b22bcca | ||
|
|
3ba46135a5 | ||
|
|
f903d07887 | ||
|
|
5d550124bd | ||
|
|
c57cb310a2 | ||
|
|
97754f5123 | ||
|
|
7b1c063848 | ||
|
|
c7f189f27b | ||
|
|
a0a2942ad5 | ||
|
|
e3d53dd185 | ||
|
|
66804e99fc | ||
|
|
9f85d4c639 | ||
|
|
1ba19d728e | ||
|
|
4c44587af0 | ||
|
|
1d1cafb59c | ||
|
|
4714598155 | ||
|
|
74f457a0f2 | ||
|
|
cca6a7c989 | ||
|
|
ad96489114 | ||
|
|
76429730c0 | ||
|
|
874b74dd3c | ||
|
|
61de47f3a5 | ||
|
|
f4d613565e | ||
|
|
410ab9b6fe | ||
|
|
1d6e00b902 | ||
|
|
a0228036ae | ||
|
|
d8fc071a7d |
@@ -1,5 +1,5 @@
|
||||
[tool.bumpversion]
|
||||
current_version = "0.27.1"
|
||||
current_version = "0.27.2"
|
||||
parse = """(?x)
|
||||
(?P<major>0|[1-9]\\d*)\\.
|
||||
(?P<minor>0|[1-9]\\d*)\\.
|
||||
|
||||
@@ -23,8 +23,10 @@ runs:
|
||||
steps:
|
||||
- name: CONFIRM ARM BUILD
|
||||
shell: bash
|
||||
env:
|
||||
ARM_BUILD: ${{ inputs.arm-build }}
|
||||
run: |
|
||||
echo "ARM BUILD: ${{ inputs.arm-build }}"
|
||||
echo "ARM BUILD: $ARM_BUILD"
|
||||
- name: Build x86_64 Manylinux wheel
|
||||
if: ${{ inputs.arm-build == 'false' }}
|
||||
uses: PyO3/maturin-action@v1
|
||||
|
||||
16
.github/workflows/rust.yml
vendored
16
.github/workflows/rust.yml
vendored
@@ -207,14 +207,14 @@ jobs:
|
||||
- name: Downgrade dependencies
|
||||
# These packages have newer requirements for MSRV
|
||||
run: |
|
||||
cargo update -p aws-sdk-bedrockruntime --precise 1.64.0
|
||||
cargo update -p aws-sdk-dynamodb --precise 1.55.0
|
||||
cargo update -p aws-config --precise 1.5.10
|
||||
cargo update -p aws-sdk-kms --precise 1.51.0
|
||||
cargo update -p aws-sdk-s3 --precise 1.65.0
|
||||
cargo update -p aws-sdk-sso --precise 1.50.0
|
||||
cargo update -p aws-sdk-ssooidc --precise 1.51.0
|
||||
cargo update -p aws-sdk-sts --precise 1.51.0
|
||||
cargo update -p aws-sdk-bedrockruntime --precise 1.77.0
|
||||
cargo update -p aws-sdk-dynamodb --precise 1.68.0
|
||||
cargo update -p aws-config --precise 1.6.0
|
||||
cargo update -p aws-sdk-kms --precise 1.63.0
|
||||
cargo update -p aws-sdk-s3 --precise 1.79.0
|
||||
cargo update -p aws-sdk-sso --precise 1.62.0
|
||||
cargo update -p aws-sdk-ssooidc --precise 1.63.0
|
||||
cargo update -p aws-sdk-sts --precise 1.63.0
|
||||
cargo update -p home --precise 0.5.9
|
||||
- name: cargo +${{ matrix.msrv }} check
|
||||
env:
|
||||
|
||||
2134
Cargo.lock
generated
2134
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
28
Cargo.toml
28
Cargo.toml
@@ -15,20 +15,20 @@ categories = ["database-implementations"]
|
||||
rust-version = "1.91.0"
|
||||
|
||||
[workspace.dependencies]
|
||||
lance = { version = "=3.0.1", default-features = false }
|
||||
lance-core = { version = "=3.0.1" }
|
||||
lance-datagen = { version = "=3.0.1" }
|
||||
lance-file = { version = "=3.0.1" }
|
||||
lance-io = { version = "=3.0.1", default-features = false }
|
||||
lance-index = { version = "=3.0.1" }
|
||||
lance-linalg = { version = "=3.0.1" }
|
||||
lance-namespace = { version = "=3.0.1" }
|
||||
lance-namespace-impls = { version = "=3.0.1", default-features = false }
|
||||
lance-table = { version = "=3.0.1" }
|
||||
lance-testing = { version = "=3.0.1" }
|
||||
lance-datafusion = { version = "=3.0.1" }
|
||||
lance-encoding = { version = "=3.0.1" }
|
||||
lance-arrow = { version = "=3.0.1" }
|
||||
lance = { "version" = "=5.0.0-beta.4", default-features = false, "tag" = "v5.0.0-beta.4", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-core = { "version" = "=5.0.0-beta.4", "tag" = "v5.0.0-beta.4", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-datagen = { "version" = "=5.0.0-beta.4", "tag" = "v5.0.0-beta.4", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-file = { "version" = "=5.0.0-beta.4", "tag" = "v5.0.0-beta.4", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-io = { "version" = "=5.0.0-beta.4", default-features = false, "tag" = "v5.0.0-beta.4", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-index = { "version" = "=5.0.0-beta.4", "tag" = "v5.0.0-beta.4", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-linalg = { "version" = "=5.0.0-beta.4", "tag" = "v5.0.0-beta.4", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-namespace = { "version" = "=5.0.0-beta.4", "tag" = "v5.0.0-beta.4", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-namespace-impls = { "version" = "=5.0.0-beta.4", default-features = false, "tag" = "v5.0.0-beta.4", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-table = { "version" = "=5.0.0-beta.4", "tag" = "v5.0.0-beta.4", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-testing = { "version" = "=5.0.0-beta.4", "tag" = "v5.0.0-beta.4", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-datafusion = { "version" = "=5.0.0-beta.4", "tag" = "v5.0.0-beta.4", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-encoding = { "version" = "=5.0.0-beta.4", "tag" = "v5.0.0-beta.4", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-arrow = { "version" = "=5.0.0-beta.4", "tag" = "v5.0.0-beta.4", "git" = "https://github.com/lance-format/lance.git" }
|
||||
ahash = "0.8"
|
||||
# Note that this one does not include pyarrow
|
||||
arrow = { version = "57.2", optional = false }
|
||||
|
||||
@@ -14,7 +14,7 @@ Add the following dependency to your `pom.xml`:
|
||||
<dependency>
|
||||
<groupId>com.lancedb</groupId>
|
||||
<artifactId>lancedb-core</artifactId>
|
||||
<version>0.27.1</version>
|
||||
<version>0.27.2</version>
|
||||
</dependency>
|
||||
```
|
||||
|
||||
@@ -57,32 +57,32 @@ LanceNamespace namespaceClient = LanceDbNamespaceClientBuilder.newBuilder()
|
||||
|
||||
## Metadata Operations
|
||||
|
||||
### Creating a Namespace
|
||||
### Creating a Namespace Path
|
||||
|
||||
Namespaces organize tables hierarchically. Create a namespace before creating tables within it:
|
||||
Namespace paths organize tables hierarchically. Create the desired namespace path before creating tables within it:
|
||||
|
||||
```java
|
||||
import org.lance.namespace.model.CreateNamespaceRequest;
|
||||
import org.lance.namespace.model.CreateNamespaceResponse;
|
||||
|
||||
// Create a child namespace
|
||||
// Create a child namespace path
|
||||
CreateNamespaceRequest request = new CreateNamespaceRequest();
|
||||
request.setId(Arrays.asList("my_namespace"));
|
||||
|
||||
CreateNamespaceResponse response = namespaceClient.createNamespace(request);
|
||||
```
|
||||
|
||||
You can also create nested namespaces:
|
||||
You can also create nested namespace paths:
|
||||
|
||||
```java
|
||||
// Create a nested namespace: parent/child
|
||||
// Create a nested namespace path: parent/child
|
||||
CreateNamespaceRequest request = new CreateNamespaceRequest();
|
||||
request.setId(Arrays.asList("parent_namespace", "child_namespace"));
|
||||
|
||||
CreateNamespaceResponse response = namespaceClient.createNamespace(request);
|
||||
```
|
||||
|
||||
### Describing a Namespace
|
||||
### Describing a Namespace Path
|
||||
|
||||
```java
|
||||
import org.lance.namespace.model.DescribeNamespaceRequest;
|
||||
@@ -95,22 +95,22 @@ DescribeNamespaceResponse response = namespaceClient.describeNamespace(request);
|
||||
System.out.println("Namespace properties: " + response.getProperties());
|
||||
```
|
||||
|
||||
### Listing Namespaces
|
||||
### Listing Namespace Paths
|
||||
|
||||
```java
|
||||
import org.lance.namespace.model.ListNamespacesRequest;
|
||||
import org.lance.namespace.model.ListNamespacesResponse;
|
||||
|
||||
// List all namespaces at root level
|
||||
// List all namespace paths at the root level
|
||||
ListNamespacesRequest request = new ListNamespacesRequest();
|
||||
request.setId(Arrays.asList()); // Empty for root
|
||||
|
||||
ListNamespacesResponse response = namespaceClient.listNamespaces(request);
|
||||
for (String ns : response.getNamespaces()) {
|
||||
System.out.println("Namespace: " + ns);
|
||||
System.out.println("Namespace path: " + ns);
|
||||
}
|
||||
|
||||
// List child namespaces under a parent
|
||||
// List child namespace paths under a parent path
|
||||
ListNamespacesRequest childRequest = new ListNamespacesRequest();
|
||||
childRequest.setId(Arrays.asList("parent_namespace"));
|
||||
|
||||
@@ -123,7 +123,7 @@ ListNamespacesResponse childResponse = namespaceClient.listNamespaces(childReque
|
||||
import org.lance.namespace.model.ListTablesRequest;
|
||||
import org.lance.namespace.model.ListTablesResponse;
|
||||
|
||||
// List tables in a namespace
|
||||
// List tables in a namespace path
|
||||
ListTablesRequest request = new ListTablesRequest();
|
||||
request.setId(Arrays.asList("my_namespace"));
|
||||
|
||||
@@ -133,7 +133,7 @@ for (String table : response.getTables()) {
|
||||
}
|
||||
```
|
||||
|
||||
### Dropping a Namespace
|
||||
### Dropping a Namespace Path
|
||||
|
||||
```java
|
||||
import org.lance.namespace.model.DropNamespaceRequest;
|
||||
@@ -175,7 +175,7 @@ DropTableResponse response = namespaceClient.dropTable(request);
|
||||
|
||||
### Creating a Table
|
||||
|
||||
Tables are created within a namespace by providing data in Apache Arrow IPC format:
|
||||
Tables are created within a namespace path by providing data in Apache Arrow IPC format:
|
||||
|
||||
```java
|
||||
import org.lance.namespace.LanceNamespace;
|
||||
@@ -242,7 +242,7 @@ try (BufferAllocator allocator = new RootAllocator();
|
||||
}
|
||||
byte[] tableData = out.toByteArray();
|
||||
|
||||
// Create table in a namespace
|
||||
// Create a table in a namespace path
|
||||
CreateTableRequest request = new CreateTableRequest();
|
||||
request.setId(Arrays.asList("my_namespace", "my_table"));
|
||||
CreateTableResponse response = namespaceClient.createTable(request, tableData);
|
||||
|
||||
@@ -61,8 +61,8 @@ sharing the same data, deletion, and index files.
|
||||
* **options.sourceVersion?**: `number`
|
||||
The version of the source table to clone.
|
||||
|
||||
* **options.targetNamespace?**: `string`[]
|
||||
The namespace for the target table (defaults to root namespace).
|
||||
* **options.targetNamespacePath?**: `string`[]
|
||||
The namespace path for the target table (defaults to root namespace).
|
||||
|
||||
#### Returns
|
||||
|
||||
@@ -116,13 +116,13 @@ Creates a new empty Table
|
||||
|
||||
`Promise`<[`Table`](Table.md)>
|
||||
|
||||
#### createEmptyTable(name, schema, namespace, options)
|
||||
#### createEmptyTable(name, schema, namespacePath, options)
|
||||
|
||||
```ts
|
||||
abstract createEmptyTable(
|
||||
name,
|
||||
schema,
|
||||
namespace?,
|
||||
namespacePath?,
|
||||
options?): Promise<Table>
|
||||
```
|
||||
|
||||
@@ -136,8 +136,8 @@ Creates a new empty Table
|
||||
* **schema**: [`SchemaLike`](../type-aliases/SchemaLike.md)
|
||||
The schema of the table
|
||||
|
||||
* **namespace?**: `string`[]
|
||||
The namespace to create the table in (defaults to root namespace)
|
||||
* **namespacePath?**: `string`[]
|
||||
The namespace path to create the table in (defaults to root namespace)
|
||||
|
||||
* **options?**: `Partial`<[`CreateTableOptions`](../interfaces/CreateTableOptions.md)>
|
||||
Additional options
|
||||
@@ -150,10 +150,10 @@ Creates a new empty Table
|
||||
|
||||
### createTable()
|
||||
|
||||
#### createTable(options, namespace)
|
||||
#### createTable(options, namespacePath)
|
||||
|
||||
```ts
|
||||
abstract createTable(options, namespace?): Promise<Table>
|
||||
abstract createTable(options, namespacePath?): Promise<Table>
|
||||
```
|
||||
|
||||
Creates a new Table and initialize it with new data.
|
||||
@@ -163,8 +163,8 @@ Creates a new Table and initialize it with new data.
|
||||
* **options**: `object` & `Partial`<[`CreateTableOptions`](../interfaces/CreateTableOptions.md)>
|
||||
The options object.
|
||||
|
||||
* **namespace?**: `string`[]
|
||||
The namespace to create the table in (defaults to root namespace)
|
||||
* **namespacePath?**: `string`[]
|
||||
The namespace path to create the table in (defaults to root namespace)
|
||||
|
||||
##### Returns
|
||||
|
||||
@@ -197,13 +197,13 @@ Creates a new Table and initialize it with new data.
|
||||
|
||||
`Promise`<[`Table`](Table.md)>
|
||||
|
||||
#### createTable(name, data, namespace, options)
|
||||
#### createTable(name, data, namespacePath, options)
|
||||
|
||||
```ts
|
||||
abstract createTable(
|
||||
name,
|
||||
data,
|
||||
namespace?,
|
||||
namespacePath?,
|
||||
options?): Promise<Table>
|
||||
```
|
||||
|
||||
@@ -218,8 +218,8 @@ Creates a new Table and initialize it with new data.
|
||||
Non-empty Array of Records
|
||||
to be inserted into the table
|
||||
|
||||
* **namespace?**: `string`[]
|
||||
The namespace to create the table in (defaults to root namespace)
|
||||
* **namespacePath?**: `string`[]
|
||||
The namespace path to create the table in (defaults to root namespace)
|
||||
|
||||
* **options?**: `Partial`<[`CreateTableOptions`](../interfaces/CreateTableOptions.md)>
|
||||
Additional options
|
||||
@@ -247,15 +247,15 @@ Return a brief description of the connection
|
||||
### dropAllTables()
|
||||
|
||||
```ts
|
||||
abstract dropAllTables(namespace?): Promise<void>
|
||||
abstract dropAllTables(namespacePath?): Promise<void>
|
||||
```
|
||||
|
||||
Drop all tables in the database.
|
||||
|
||||
#### Parameters
|
||||
|
||||
* **namespace?**: `string`[]
|
||||
The namespace to drop tables from (defaults to root namespace).
|
||||
* **namespacePath?**: `string`[]
|
||||
The namespace path to drop tables from (defaults to root namespace).
|
||||
|
||||
#### Returns
|
||||
|
||||
@@ -266,7 +266,7 @@ Drop all tables in the database.
|
||||
### dropTable()
|
||||
|
||||
```ts
|
||||
abstract dropTable(name, namespace?): Promise<void>
|
||||
abstract dropTable(name, namespacePath?): Promise<void>
|
||||
```
|
||||
|
||||
Drop an existing table.
|
||||
@@ -276,8 +276,8 @@ Drop an existing table.
|
||||
* **name**: `string`
|
||||
The name of the table to drop.
|
||||
|
||||
* **namespace?**: `string`[]
|
||||
The namespace of the table (defaults to root namespace).
|
||||
* **namespacePath?**: `string`[]
|
||||
The namespace path of the table (defaults to root namespace).
|
||||
|
||||
#### Returns
|
||||
|
||||
@@ -304,7 +304,7 @@ Return true if the connection has not been closed
|
||||
```ts
|
||||
abstract openTable(
|
||||
name,
|
||||
namespace?,
|
||||
namespacePath?,
|
||||
options?): Promise<Table>
|
||||
```
|
||||
|
||||
@@ -315,8 +315,8 @@ Open a table in the database.
|
||||
* **name**: `string`
|
||||
The name of the table
|
||||
|
||||
* **namespace?**: `string`[]
|
||||
The namespace of the table (defaults to root namespace)
|
||||
* **namespacePath?**: `string`[]
|
||||
The namespace path of the table (defaults to root namespace)
|
||||
|
||||
* **options?**: `Partial`<[`OpenTableOptions`](../interfaces/OpenTableOptions.md)>
|
||||
Additional options
|
||||
@@ -349,10 +349,10 @@ Tables will be returned in lexicographical order.
|
||||
|
||||
`Promise`<`string`[]>
|
||||
|
||||
#### tableNames(namespace, options)
|
||||
#### tableNames(namespacePath, options)
|
||||
|
||||
```ts
|
||||
abstract tableNames(namespace?, options?): Promise<string[]>
|
||||
abstract tableNames(namespacePath?, options?): Promise<string[]>
|
||||
```
|
||||
|
||||
List all the table names in this database.
|
||||
@@ -361,8 +361,8 @@ Tables will be returned in lexicographical order.
|
||||
|
||||
##### Parameters
|
||||
|
||||
* **namespace?**: `string`[]
|
||||
The namespace to list tables from (defaults to root namespace)
|
||||
* **namespacePath?**: `string`[]
|
||||
The namespace path to list tables from (defaults to root namespace)
|
||||
|
||||
* **options?**: `Partial`<[`TableNamesOptions`](../interfaces/TableNamesOptions.md)>
|
||||
options to control the
|
||||
|
||||
@@ -52,7 +52,7 @@ new EmbeddingFunction<T, M>(): EmbeddingFunction<T, M>
|
||||
### computeQueryEmbeddings()
|
||||
|
||||
```ts
|
||||
computeQueryEmbeddings(data): Promise<number[] | Float32Array | Float64Array>
|
||||
computeQueryEmbeddings(data): Promise<number[] | Uint8Array | Float32Array | Float64Array>
|
||||
```
|
||||
|
||||
Compute the embeddings for a single query
|
||||
@@ -63,7 +63,7 @@ Compute the embeddings for a single query
|
||||
|
||||
#### Returns
|
||||
|
||||
`Promise`<`number`[] \| `Float32Array` \| `Float64Array`>
|
||||
`Promise`<`number`[] \| `Uint8Array` \| `Float32Array` \| `Float64Array`>
|
||||
|
||||
***
|
||||
|
||||
|
||||
@@ -37,7 +37,7 @@ new TextEmbeddingFunction<M>(): TextEmbeddingFunction<M>
|
||||
### computeQueryEmbeddings()
|
||||
|
||||
```ts
|
||||
computeQueryEmbeddings(data): Promise<number[] | Float32Array | Float64Array>
|
||||
computeQueryEmbeddings(data): Promise<number[] | Uint8Array | Float32Array | Float64Array>
|
||||
```
|
||||
|
||||
Compute the embeddings for a single query
|
||||
@@ -48,7 +48,7 @@ Compute the embeddings for a single query
|
||||
|
||||
#### Returns
|
||||
|
||||
`Promise`<`number`[] \| `Float32Array` \| `Float64Array`>
|
||||
`Promise`<`number`[] \| `Uint8Array` \| `Float32Array` \| `Float64Array`>
|
||||
|
||||
#### Overrides
|
||||
|
||||
|
||||
@@ -7,5 +7,10 @@
|
||||
# Type Alias: IntoVector
|
||||
|
||||
```ts
|
||||
type IntoVector: Float32Array | Float64Array | number[] | Promise<Float32Array | Float64Array | number[]>;
|
||||
type IntoVector:
|
||||
| Float32Array
|
||||
| Float64Array
|
||||
| Uint8Array
|
||||
| number[]
|
||||
| Promise<Float32Array | Float64Array | Uint8Array | number[]>;
|
||||
```
|
||||
|
||||
@@ -36,6 +36,20 @@ is also an [asynchronous API client](#connections-asynchronous).
|
||||
|
||||
::: lancedb.table.Tags
|
||||
|
||||
## Expressions
|
||||
|
||||
Type-safe expression builder for filters and projections. Use these instead
|
||||
of raw SQL strings with [where][lancedb.query.LanceQueryBuilder.where] and
|
||||
[select][lancedb.query.LanceQueryBuilder.select].
|
||||
|
||||
::: lancedb.expr.Expr
|
||||
|
||||
::: lancedb.expr.col
|
||||
|
||||
::: lancedb.expr.lit
|
||||
|
||||
::: lancedb.expr.func
|
||||
|
||||
## Querying (Synchronous)
|
||||
|
||||
::: lancedb.query.Query
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
<parent>
|
||||
<groupId>com.lancedb</groupId>
|
||||
<artifactId>lancedb-parent</artifactId>
|
||||
<version>0.27.1-final.0</version>
|
||||
<version>0.27.2-final.0</version>
|
||||
<relativePath>../pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
<groupId>com.lancedb</groupId>
|
||||
<artifactId>lancedb-parent</artifactId>
|
||||
<version>0.27.1-final.0</version>
|
||||
<version>0.27.2-final.0</version>
|
||||
<packaging>pom</packaging>
|
||||
<name>${project.artifactId}</name>
|
||||
<description>LanceDB Java SDK Parent POM</description>
|
||||
@@ -28,7 +28,7 @@
|
||||
<properties>
|
||||
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
|
||||
<arrow.version>15.0.0</arrow.version>
|
||||
<lance-core.version>3.0.1</lance-core.version>
|
||||
<lance-core.version>5.0.0-beta.4</lance-core.version>
|
||||
<spotless.skip>false</spotless.skip>
|
||||
<spotless.version>2.30.0</spotless.version>
|
||||
<spotless.java.googlejavaformat.version>1.7</spotless.java.googlejavaformat.version>
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "lancedb-nodejs"
|
||||
edition.workspace = true
|
||||
version = "0.27.1"
|
||||
version = "0.27.2"
|
||||
license.workspace = true
|
||||
description.workspace = true
|
||||
repository.workspace = true
|
||||
@@ -15,6 +15,8 @@ crate-type = ["cdylib"]
|
||||
async-trait.workspace = true
|
||||
arrow-ipc.workspace = true
|
||||
arrow-array.workspace = true
|
||||
arrow-buffer = "57.2"
|
||||
half.workspace = true
|
||||
arrow-schema.workspace = true
|
||||
env_logger.workspace = true
|
||||
futures.workspace = true
|
||||
|
||||
@@ -103,7 +103,7 @@ describe.each([arrow15, arrow16, arrow17, arrow18])(
|
||||
},
|
||||
numIndices: 0,
|
||||
numRows: 3,
|
||||
totalBytes: 24,
|
||||
totalBytes: 44,
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
110
nodejs/__test__/vector_types.test.ts
Normal file
110
nodejs/__test__/vector_types.test.ts
Normal file
@@ -0,0 +1,110 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
import * as tmp from "tmp";
|
||||
|
||||
import { type Table, connect } from "../lancedb";
|
||||
import {
|
||||
Field,
|
||||
FixedSizeList,
|
||||
Float32,
|
||||
Int64,
|
||||
Schema,
|
||||
makeArrowTable,
|
||||
} from "../lancedb/arrow";
|
||||
|
||||
describe("Vector query with different typed arrays", () => {
|
||||
let tmpDir: tmp.DirResult;
|
||||
|
||||
afterEach(() => {
|
||||
tmpDir?.removeCallback();
|
||||
});
|
||||
|
||||
async function createFloat32Table(): Promise<Table> {
|
||||
tmpDir = tmp.dirSync({ unsafeCleanup: true });
|
||||
const db = await connect(tmpDir.name);
|
||||
const schema = new Schema([
|
||||
new Field("id", new Int64(), true),
|
||||
new Field(
|
||||
"vec",
|
||||
new FixedSizeList(2, new Field("item", new Float32())),
|
||||
true,
|
||||
),
|
||||
]);
|
||||
const data = makeArrowTable(
|
||||
[
|
||||
{ id: 1n, vec: [1.0, 0.0] },
|
||||
{ id: 2n, vec: [0.0, 1.0] },
|
||||
{ id: 3n, vec: [1.0, 1.0] },
|
||||
],
|
||||
{ schema },
|
||||
);
|
||||
return db.createTable("test_f32", data);
|
||||
}
|
||||
|
||||
it("should search with Float32Array (baseline)", async () => {
|
||||
const table = await createFloat32Table();
|
||||
const results = await table
|
||||
.query()
|
||||
.nearestTo(new Float32Array([1.0, 0.0]))
|
||||
.limit(1)
|
||||
.toArray();
|
||||
|
||||
expect(results.length).toBe(1);
|
||||
expect(Number(results[0].id)).toBe(1);
|
||||
});
|
||||
|
||||
it("should search with number[] (backward compat)", async () => {
|
||||
const table = await createFloat32Table();
|
||||
const results = await table
|
||||
.query()
|
||||
.nearestTo([1.0, 0.0])
|
||||
.limit(1)
|
||||
.toArray();
|
||||
|
||||
expect(results.length).toBe(1);
|
||||
expect(Number(results[0].id)).toBe(1);
|
||||
});
|
||||
|
||||
it("should search with Float64Array via raw path", async () => {
|
||||
const table = await createFloat32Table();
|
||||
const results = await table
|
||||
.query()
|
||||
.nearestTo(new Float64Array([1.0, 0.0]))
|
||||
.limit(1)
|
||||
.toArray();
|
||||
|
||||
expect(results.length).toBe(1);
|
||||
expect(Number(results[0].id)).toBe(1);
|
||||
});
|
||||
|
||||
it("should add multiple query vectors with Float64Array", async () => {
|
||||
const table = await createFloat32Table();
|
||||
const results = await table
|
||||
.query()
|
||||
.nearestTo(new Float64Array([1.0, 0.0]))
|
||||
.addQueryVector(new Float64Array([0.0, 1.0]))
|
||||
.limit(2)
|
||||
.toArray();
|
||||
|
||||
expect(results.length).toBeGreaterThanOrEqual(2);
|
||||
});
|
||||
|
||||
// Float16Array is only available in Node 22+; not in TypeScript's standard lib yet
|
||||
const float16ArrayCtor = (globalThis as unknown as Record<string, unknown>)
|
||||
.Float16Array as (new (values: number[]) => unknown) | undefined;
|
||||
const hasFloat16 = float16ArrayCtor !== undefined;
|
||||
const f16it = hasFloat16 ? it : it.skip;
|
||||
|
||||
f16it("should search with Float16Array via raw path", async () => {
|
||||
const table = await createFloat32Table();
|
||||
const results = await table
|
||||
.query()
|
||||
.nearestTo(new float16ArrayCtor!([1.0, 0.0]) as Float32Array)
|
||||
.limit(1)
|
||||
.toArray();
|
||||
|
||||
expect(results.length).toBe(1);
|
||||
expect(Number(results[0].id)).toBe(1);
|
||||
});
|
||||
});
|
||||
@@ -117,8 +117,9 @@ export type TableLike =
|
||||
export type IntoVector =
|
||||
| Float32Array
|
||||
| Float64Array
|
||||
| Uint8Array
|
||||
| number[]
|
||||
| Promise<Float32Array | Float64Array | number[]>;
|
||||
| Promise<Float32Array | Float64Array | Uint8Array | number[]>;
|
||||
|
||||
export type MultiVector = IntoVector[];
|
||||
|
||||
@@ -126,14 +127,48 @@ export function isMultiVector(value: unknown): value is MultiVector {
|
||||
return Array.isArray(value) && isIntoVector(value[0]);
|
||||
}
|
||||
|
||||
// Float16Array is not in TypeScript's standard lib yet; access dynamically
|
||||
type Float16ArrayCtor = new (
|
||||
...args: unknown[]
|
||||
) => { buffer: ArrayBuffer; byteOffset: number; byteLength: number };
|
||||
const float16ArrayCtor = (globalThis as unknown as Record<string, unknown>)
|
||||
.Float16Array as Float16ArrayCtor | undefined;
|
||||
|
||||
export function isIntoVector(value: unknown): value is IntoVector {
|
||||
return (
|
||||
value instanceof Float32Array ||
|
||||
value instanceof Float64Array ||
|
||||
value instanceof Uint8Array ||
|
||||
(float16ArrayCtor !== undefined && value instanceof float16ArrayCtor) ||
|
||||
(Array.isArray(value) && !Array.isArray(value[0]))
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract the underlying byte buffer and data type from a typed array
|
||||
* for passing to the Rust NAPI layer without precision loss.
|
||||
*/
|
||||
export function extractVectorBuffer(
|
||||
vector: Float32Array | Float64Array | Uint8Array,
|
||||
): { data: Uint8Array; dtype: string } | null {
|
||||
if (float16ArrayCtor !== undefined && vector instanceof float16ArrayCtor) {
|
||||
return {
|
||||
data: new Uint8Array(vector.buffer, vector.byteOffset, vector.byteLength),
|
||||
dtype: "float16",
|
||||
};
|
||||
}
|
||||
if (vector instanceof Float64Array) {
|
||||
return {
|
||||
data: new Uint8Array(vector.buffer, vector.byteOffset, vector.byteLength),
|
||||
dtype: "float64",
|
||||
};
|
||||
}
|
||||
if (vector instanceof Uint8Array && !(vector instanceof Float32Array)) {
|
||||
return { data: vector, dtype: "uint8" };
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
export function isArrowTable(value: object): value is TableLike {
|
||||
if (value instanceof ArrowTable) return true;
|
||||
return "schema" in value && "batches" in value;
|
||||
|
||||
@@ -166,25 +166,25 @@ export abstract class Connection {
|
||||
* List all the table names in this database.
|
||||
*
|
||||
* Tables will be returned in lexicographical order.
|
||||
* @param {string[]} namespace - The namespace to list tables from (defaults to root namespace)
|
||||
* @param {string[]} namespacePath - The namespace path to list tables from (defaults to root namespace)
|
||||
* @param {Partial<TableNamesOptions>} options - options to control the
|
||||
* paging / start point
|
||||
*
|
||||
*/
|
||||
abstract tableNames(
|
||||
namespace?: string[],
|
||||
namespacePath?: string[],
|
||||
options?: Partial<TableNamesOptions>,
|
||||
): Promise<string[]>;
|
||||
|
||||
/**
|
||||
* Open a table in the database.
|
||||
* @param {string} name - The name of the table
|
||||
* @param {string[]} namespace - The namespace of the table (defaults to root namespace)
|
||||
* @param {string[]} namespacePath - The namespace path of the table (defaults to root namespace)
|
||||
* @param {Partial<OpenTableOptions>} options - Additional options
|
||||
*/
|
||||
abstract openTable(
|
||||
name: string,
|
||||
namespace?: string[],
|
||||
namespacePath?: string[],
|
||||
options?: Partial<OpenTableOptions>,
|
||||
): Promise<Table>;
|
||||
|
||||
@@ -193,7 +193,7 @@ export abstract class Connection {
|
||||
* @param {object} options - The options object.
|
||||
* @param {string} options.name - The name of the table.
|
||||
* @param {Data} options.data - Non-empty Array of Records to be inserted into the table
|
||||
* @param {string[]} namespace - The namespace to create the table in (defaults to root namespace)
|
||||
* @param {string[]} namespacePath - The namespace path to create the table in (defaults to root namespace)
|
||||
*
|
||||
*/
|
||||
abstract createTable(
|
||||
@@ -201,7 +201,7 @@ export abstract class Connection {
|
||||
name: string;
|
||||
data: Data;
|
||||
} & Partial<CreateTableOptions>,
|
||||
namespace?: string[],
|
||||
namespacePath?: string[],
|
||||
): Promise<Table>;
|
||||
/**
|
||||
* Creates a new Table and initialize it with new data.
|
||||
@@ -220,13 +220,13 @@ export abstract class Connection {
|
||||
* @param {string} name - The name of the table.
|
||||
* @param {Record<string, unknown>[] | TableLike} data - Non-empty Array of Records
|
||||
* to be inserted into the table
|
||||
* @param {string[]} namespace - The namespace to create the table in (defaults to root namespace)
|
||||
* @param {string[]} namespacePath - The namespace path to create the table in (defaults to root namespace)
|
||||
* @param {Partial<CreateTableOptions>} options - Additional options
|
||||
*/
|
||||
abstract createTable(
|
||||
name: string,
|
||||
data: Record<string, unknown>[] | TableLike,
|
||||
namespace?: string[],
|
||||
namespacePath?: string[],
|
||||
options?: Partial<CreateTableOptions>,
|
||||
): Promise<Table>;
|
||||
|
||||
@@ -245,28 +245,28 @@ export abstract class Connection {
|
||||
* Creates a new empty Table
|
||||
* @param {string} name - The name of the table.
|
||||
* @param {Schema} schema - The schema of the table
|
||||
* @param {string[]} namespace - The namespace to create the table in (defaults to root namespace)
|
||||
* @param {string[]} namespacePath - The namespace path to create the table in (defaults to root namespace)
|
||||
* @param {Partial<CreateTableOptions>} options - Additional options
|
||||
*/
|
||||
abstract createEmptyTable(
|
||||
name: string,
|
||||
schema: import("./arrow").SchemaLike,
|
||||
namespace?: string[],
|
||||
namespacePath?: string[],
|
||||
options?: Partial<CreateTableOptions>,
|
||||
): Promise<Table>;
|
||||
|
||||
/**
|
||||
* Drop an existing table.
|
||||
* @param {string} name The name of the table to drop.
|
||||
* @param {string[]} namespace The namespace of the table (defaults to root namespace).
|
||||
* @param {string[]} namespacePath The namespace path of the table (defaults to root namespace).
|
||||
*/
|
||||
abstract dropTable(name: string, namespace?: string[]): Promise<void>;
|
||||
abstract dropTable(name: string, namespacePath?: string[]): Promise<void>;
|
||||
|
||||
/**
|
||||
* Drop all tables in the database.
|
||||
* @param {string[]} namespace The namespace to drop tables from (defaults to root namespace).
|
||||
* @param {string[]} namespacePath The namespace path to drop tables from (defaults to root namespace).
|
||||
*/
|
||||
abstract dropAllTables(namespace?: string[]): Promise<void>;
|
||||
abstract dropAllTables(namespacePath?: string[]): Promise<void>;
|
||||
|
||||
/**
|
||||
* Clone a table from a source table.
|
||||
@@ -279,7 +279,7 @@ export abstract class Connection {
|
||||
* @param {string} targetTableName - The name of the target table to create.
|
||||
* @param {string} sourceUri - The URI of the source table to clone from.
|
||||
* @param {object} options - Clone options.
|
||||
* @param {string[]} options.targetNamespace - The namespace for the target table (defaults to root namespace).
|
||||
* @param {string[]} options.targetNamespacePath - The namespace path for the target table (defaults to root namespace).
|
||||
* @param {number} options.sourceVersion - The version of the source table to clone.
|
||||
* @param {string} options.sourceTag - The tag of the source table to clone.
|
||||
* @param {boolean} options.isShallow - Whether to perform a shallow clone (defaults to true).
|
||||
@@ -288,7 +288,7 @@ export abstract class Connection {
|
||||
targetTableName: string,
|
||||
sourceUri: string,
|
||||
options?: {
|
||||
targetNamespace?: string[];
|
||||
targetNamespacePath?: string[];
|
||||
sourceVersion?: number;
|
||||
sourceTag?: string;
|
||||
isShallow?: boolean;
|
||||
@@ -319,25 +319,25 @@ export class LocalConnection extends Connection {
|
||||
}
|
||||
|
||||
async tableNames(
|
||||
namespaceOrOptions?: string[] | Partial<TableNamesOptions>,
|
||||
namespacePathOrOptions?: string[] | Partial<TableNamesOptions>,
|
||||
options?: Partial<TableNamesOptions>,
|
||||
): Promise<string[]> {
|
||||
// Detect if first argument is namespace array or options object
|
||||
let namespace: string[] | undefined;
|
||||
// Detect if first argument is namespacePath array or options object
|
||||
let namespacePath: string[] | undefined;
|
||||
let tableNamesOptions: Partial<TableNamesOptions> | undefined;
|
||||
|
||||
if (Array.isArray(namespaceOrOptions)) {
|
||||
// First argument is namespace array
|
||||
namespace = namespaceOrOptions;
|
||||
if (Array.isArray(namespacePathOrOptions)) {
|
||||
// First argument is namespacePath array
|
||||
namespacePath = namespacePathOrOptions;
|
||||
tableNamesOptions = options;
|
||||
} else {
|
||||
// First argument is options object (backwards compatibility)
|
||||
namespace = undefined;
|
||||
tableNamesOptions = namespaceOrOptions;
|
||||
namespacePath = undefined;
|
||||
tableNamesOptions = namespacePathOrOptions;
|
||||
}
|
||||
|
||||
return this.inner.tableNames(
|
||||
namespace ?? [],
|
||||
namespacePath ?? [],
|
||||
tableNamesOptions?.startAfter,
|
||||
tableNamesOptions?.limit,
|
||||
);
|
||||
@@ -345,12 +345,12 @@ export class LocalConnection extends Connection {
|
||||
|
||||
async openTable(
|
||||
name: string,
|
||||
namespace?: string[],
|
||||
namespacePath?: string[],
|
||||
options?: Partial<OpenTableOptions>,
|
||||
): Promise<Table> {
|
||||
const innerTable = await this.inner.openTable(
|
||||
name,
|
||||
namespace ?? [],
|
||||
namespacePath ?? [],
|
||||
cleanseStorageOptions(options?.storageOptions),
|
||||
options?.indexCacheSize,
|
||||
);
|
||||
@@ -362,7 +362,7 @@ export class LocalConnection extends Connection {
|
||||
targetTableName: string,
|
||||
sourceUri: string,
|
||||
options?: {
|
||||
targetNamespace?: string[];
|
||||
targetNamespacePath?: string[];
|
||||
sourceVersion?: number;
|
||||
sourceTag?: string;
|
||||
isShallow?: boolean;
|
||||
@@ -371,7 +371,7 @@ export class LocalConnection extends Connection {
|
||||
const innerTable = await this.inner.cloneTable(
|
||||
targetTableName,
|
||||
sourceUri,
|
||||
options?.targetNamespace ?? [],
|
||||
options?.targetNamespacePath ?? [],
|
||||
options?.sourceVersion ?? null,
|
||||
options?.sourceTag ?? null,
|
||||
options?.isShallow ?? true,
|
||||
@@ -406,42 +406,42 @@ export class LocalConnection extends Connection {
|
||||
nameOrOptions:
|
||||
| string
|
||||
| ({ name: string; data: Data } & Partial<CreateTableOptions>),
|
||||
dataOrNamespace?: Record<string, unknown>[] | TableLike | string[],
|
||||
namespaceOrOptions?: string[] | Partial<CreateTableOptions>,
|
||||
dataOrNamespacePath?: Record<string, unknown>[] | TableLike | string[],
|
||||
namespacePathOrOptions?: string[] | Partial<CreateTableOptions>,
|
||||
options?: Partial<CreateTableOptions>,
|
||||
): Promise<Table> {
|
||||
if (typeof nameOrOptions !== "string" && "name" in nameOrOptions) {
|
||||
// First overload: createTable(options, namespace?)
|
||||
// First overload: createTable(options, namespacePath?)
|
||||
const { name, data, ...createOptions } = nameOrOptions;
|
||||
const namespace = dataOrNamespace as string[] | undefined;
|
||||
return this._createTableImpl(name, data, namespace, createOptions);
|
||||
const namespacePath = dataOrNamespacePath as string[] | undefined;
|
||||
return this._createTableImpl(name, data, namespacePath, createOptions);
|
||||
}
|
||||
|
||||
// Second overload: createTable(name, data, namespace?, options?)
|
||||
// Second overload: createTable(name, data, namespacePath?, options?)
|
||||
const name = nameOrOptions;
|
||||
const data = dataOrNamespace as Record<string, unknown>[] | TableLike;
|
||||
const data = dataOrNamespacePath as Record<string, unknown>[] | TableLike;
|
||||
|
||||
// Detect if third argument is namespace array or options object
|
||||
let namespace: string[] | undefined;
|
||||
// Detect if third argument is namespacePath array or options object
|
||||
let namespacePath: string[] | undefined;
|
||||
let createOptions: Partial<CreateTableOptions> | undefined;
|
||||
|
||||
if (Array.isArray(namespaceOrOptions)) {
|
||||
// Third argument is namespace array
|
||||
namespace = namespaceOrOptions;
|
||||
if (Array.isArray(namespacePathOrOptions)) {
|
||||
// Third argument is namespacePath array
|
||||
namespacePath = namespacePathOrOptions;
|
||||
createOptions = options;
|
||||
} else {
|
||||
// Third argument is options object (backwards compatibility)
|
||||
namespace = undefined;
|
||||
createOptions = namespaceOrOptions;
|
||||
namespacePath = undefined;
|
||||
createOptions = namespacePathOrOptions;
|
||||
}
|
||||
|
||||
return this._createTableImpl(name, data, namespace, createOptions);
|
||||
return this._createTableImpl(name, data, namespacePath, createOptions);
|
||||
}
|
||||
|
||||
private async _createTableImpl(
|
||||
name: string,
|
||||
data: Data,
|
||||
namespace?: string[],
|
||||
namespacePath?: string[],
|
||||
options?: Partial<CreateTableOptions>,
|
||||
): Promise<Table> {
|
||||
if (data === undefined) {
|
||||
@@ -455,7 +455,7 @@ export class LocalConnection extends Connection {
|
||||
name,
|
||||
buf,
|
||||
mode,
|
||||
namespace ?? [],
|
||||
namespacePath ?? [],
|
||||
storageOptions,
|
||||
);
|
||||
|
||||
@@ -465,21 +465,21 @@ export class LocalConnection extends Connection {
|
||||
async createEmptyTable(
|
||||
name: string,
|
||||
schema: import("./arrow").SchemaLike,
|
||||
namespaceOrOptions?: string[] | Partial<CreateTableOptions>,
|
||||
namespacePathOrOptions?: string[] | Partial<CreateTableOptions>,
|
||||
options?: Partial<CreateTableOptions>,
|
||||
): Promise<Table> {
|
||||
// Detect if third argument is namespace array or options object
|
||||
let namespace: string[] | undefined;
|
||||
// Detect if third argument is namespacePath array or options object
|
||||
let namespacePath: string[] | undefined;
|
||||
let createOptions: Partial<CreateTableOptions> | undefined;
|
||||
|
||||
if (Array.isArray(namespaceOrOptions)) {
|
||||
// Third argument is namespace array
|
||||
namespace = namespaceOrOptions;
|
||||
if (Array.isArray(namespacePathOrOptions)) {
|
||||
// Third argument is namespacePath array
|
||||
namespacePath = namespacePathOrOptions;
|
||||
createOptions = options;
|
||||
} else {
|
||||
// Third argument is options object (backwards compatibility)
|
||||
namespace = undefined;
|
||||
createOptions = namespaceOrOptions;
|
||||
namespacePath = undefined;
|
||||
createOptions = namespacePathOrOptions;
|
||||
}
|
||||
|
||||
let mode: string = createOptions?.mode ?? "create";
|
||||
@@ -502,18 +502,18 @@ export class LocalConnection extends Connection {
|
||||
name,
|
||||
buf,
|
||||
mode,
|
||||
namespace ?? [],
|
||||
namespacePath ?? [],
|
||||
storageOptions,
|
||||
);
|
||||
return new LocalTable(innerTable);
|
||||
}
|
||||
|
||||
async dropTable(name: string, namespace?: string[]): Promise<void> {
|
||||
return this.inner.dropTable(name, namespace ?? []);
|
||||
async dropTable(name: string, namespacePath?: string[]): Promise<void> {
|
||||
return this.inner.dropTable(name, namespacePath ?? []);
|
||||
}
|
||||
|
||||
async dropAllTables(namespace?: string[]): Promise<void> {
|
||||
return this.inner.dropAllTables(namespace ?? []);
|
||||
async dropAllTables(namespacePath?: string[]): Promise<void> {
|
||||
return this.inner.dropAllTables(namespacePath ?? []);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@ import {
|
||||
Table as ArrowTable,
|
||||
type IntoVector,
|
||||
RecordBatch,
|
||||
extractVectorBuffer,
|
||||
fromBufferToRecordBatch,
|
||||
fromRecordBatchToBuffer,
|
||||
tableFromIPC,
|
||||
@@ -661,10 +662,8 @@ export class VectorQuery extends StandardQueryBase<NativeVectorQuery> {
|
||||
const res = (async () => {
|
||||
try {
|
||||
const v = await vector;
|
||||
const arr = Float32Array.from(v);
|
||||
//
|
||||
// biome-ignore lint/suspicious/noExplicitAny: we need to get the `inner`, but js has no package scoping
|
||||
const value: any = this.addQueryVector(arr);
|
||||
const value: any = this.addQueryVector(v);
|
||||
const inner = value.inner as
|
||||
| NativeVectorQuery
|
||||
| Promise<NativeVectorQuery>;
|
||||
@@ -676,7 +675,12 @@ export class VectorQuery extends StandardQueryBase<NativeVectorQuery> {
|
||||
return new VectorQuery(res);
|
||||
} else {
|
||||
super.doCall((inner) => {
|
||||
inner.addQueryVector(Float32Array.from(vector));
|
||||
const raw = Array.isArray(vector) ? null : extractVectorBuffer(vector);
|
||||
if (raw) {
|
||||
inner.addQueryVectorRaw(raw.data, raw.dtype);
|
||||
} else {
|
||||
inner.addQueryVector(Float32Array.from(vector as number[]));
|
||||
}
|
||||
});
|
||||
return this;
|
||||
}
|
||||
@@ -765,14 +769,23 @@ export class Query extends StandardQueryBase<NativeQuery> {
|
||||
* a default `limit` of 10 will be used. @see {@link Query#limit}
|
||||
*/
|
||||
nearestTo(vector: IntoVector): VectorQuery {
|
||||
const callNearestTo = (
|
||||
inner: NativeQuery,
|
||||
resolved: Float32Array | Float64Array | Uint8Array | number[],
|
||||
): NativeVectorQuery => {
|
||||
const raw = Array.isArray(resolved)
|
||||
? null
|
||||
: extractVectorBuffer(resolved);
|
||||
if (raw) {
|
||||
return inner.nearestToRaw(raw.data, raw.dtype);
|
||||
}
|
||||
return inner.nearestTo(Float32Array.from(resolved as number[]));
|
||||
};
|
||||
|
||||
if (this.inner instanceof Promise) {
|
||||
const nativeQuery = this.inner.then(async (inner) => {
|
||||
if (vector instanceof Promise) {
|
||||
const arr = await vector.then((v) => Float32Array.from(v));
|
||||
return inner.nearestTo(arr);
|
||||
} else {
|
||||
return inner.nearestTo(Float32Array.from(vector));
|
||||
}
|
||||
const resolved = vector instanceof Promise ? await vector : vector;
|
||||
return callNearestTo(inner, resolved);
|
||||
});
|
||||
return new VectorQuery(nativeQuery);
|
||||
}
|
||||
@@ -780,10 +793,8 @@ export class Query extends StandardQueryBase<NativeQuery> {
|
||||
const res = (async () => {
|
||||
try {
|
||||
const v = await vector;
|
||||
const arr = Float32Array.from(v);
|
||||
//
|
||||
// biome-ignore lint/suspicious/noExplicitAny: we need to get the `inner`, but js has no package scoping
|
||||
const value: any = this.nearestTo(arr);
|
||||
const value: any = this.nearestTo(v);
|
||||
const inner = value.inner as
|
||||
| NativeVectorQuery
|
||||
| Promise<NativeVectorQuery>;
|
||||
@@ -794,7 +805,7 @@ export class Query extends StandardQueryBase<NativeQuery> {
|
||||
})();
|
||||
return new VectorQuery(res);
|
||||
} else {
|
||||
const vectorQuery = this.inner.nearestTo(Float32Array.from(vector));
|
||||
const vectorQuery = callNearestTo(this.inner, vector);
|
||||
return new VectorQuery(vectorQuery);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-darwin-arm64",
|
||||
"version": "0.27.1",
|
||||
"version": "0.27.2",
|
||||
"os": ["darwin"],
|
||||
"cpu": ["arm64"],
|
||||
"main": "lancedb.darwin-arm64.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-arm64-gnu",
|
||||
"version": "0.27.1",
|
||||
"version": "0.27.2",
|
||||
"os": ["linux"],
|
||||
"cpu": ["arm64"],
|
||||
"main": "lancedb.linux-arm64-gnu.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-arm64-musl",
|
||||
"version": "0.27.1",
|
||||
"version": "0.27.2",
|
||||
"os": ["linux"],
|
||||
"cpu": ["arm64"],
|
||||
"main": "lancedb.linux-arm64-musl.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-x64-gnu",
|
||||
"version": "0.27.1",
|
||||
"version": "0.27.2",
|
||||
"os": ["linux"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.linux-x64-gnu.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-x64-musl",
|
||||
"version": "0.27.1",
|
||||
"version": "0.27.2",
|
||||
"os": ["linux"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.linux-x64-musl.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-win32-arm64-msvc",
|
||||
"version": "0.27.1",
|
||||
"version": "0.27.2",
|
||||
"os": [
|
||||
"win32"
|
||||
],
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-win32-x64-msvc",
|
||||
"version": "0.27.1",
|
||||
"version": "0.27.2",
|
||||
"os": ["win32"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.win32-x64-msvc.node",
|
||||
|
||||
4
nodejs/package-lock.json
generated
4
nodejs/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb",
|
||||
"version": "0.27.1",
|
||||
"version": "0.27.2",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@lancedb/lancedb",
|
||||
"version": "0.27.1",
|
||||
"version": "0.27.2",
|
||||
"cpu": [
|
||||
"x64",
|
||||
"arm64"
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
"ann"
|
||||
],
|
||||
"private": false,
|
||||
"version": "0.27.1",
|
||||
"version": "0.27.2",
|
||||
"main": "dist/index.js",
|
||||
"exports": {
|
||||
".": "./dist/index.js",
|
||||
|
||||
@@ -119,12 +119,12 @@ impl Connection {
|
||||
#[napi(catch_unwind)]
|
||||
pub async fn table_names(
|
||||
&self,
|
||||
namespace: Vec<String>,
|
||||
namespace_path: Option<Vec<String>>,
|
||||
start_after: Option<String>,
|
||||
limit: Option<u32>,
|
||||
) -> napi::Result<Vec<String>> {
|
||||
let mut op = self.get_inner()?.table_names();
|
||||
op = op.namespace(namespace);
|
||||
op = op.namespace(namespace_path.unwrap_or_default());
|
||||
if let Some(start_after) = start_after {
|
||||
op = op.start_after(start_after);
|
||||
}
|
||||
@@ -146,7 +146,7 @@ impl Connection {
|
||||
name: String,
|
||||
buf: Buffer,
|
||||
mode: String,
|
||||
namespace: Vec<String>,
|
||||
namespace_path: Option<Vec<String>>,
|
||||
storage_options: Option<HashMap<String, String>>,
|
||||
) -> napi::Result<Table> {
|
||||
let batches = ipc_file_to_batches(buf.to_vec())
|
||||
@@ -154,7 +154,7 @@ impl Connection {
|
||||
let mode = Self::parse_create_mode_str(&mode)?;
|
||||
let mut builder = self.get_inner()?.create_table(&name, batches).mode(mode);
|
||||
|
||||
builder = builder.namespace(namespace);
|
||||
builder = builder.namespace(namespace_path.unwrap_or_default());
|
||||
|
||||
if let Some(storage_options) = storage_options {
|
||||
for (key, value) in storage_options {
|
||||
@@ -171,7 +171,7 @@ impl Connection {
|
||||
name: String,
|
||||
schema_buf: Buffer,
|
||||
mode: String,
|
||||
namespace: Vec<String>,
|
||||
namespace_path: Option<Vec<String>>,
|
||||
storage_options: Option<HashMap<String, String>>,
|
||||
) -> napi::Result<Table> {
|
||||
let schema = ipc_file_to_schema(schema_buf.to_vec()).map_err(|e| {
|
||||
@@ -183,7 +183,7 @@ impl Connection {
|
||||
.create_empty_table(&name, schema)
|
||||
.mode(mode);
|
||||
|
||||
builder = builder.namespace(namespace);
|
||||
builder = builder.namespace(namespace_path.unwrap_or_default());
|
||||
|
||||
if let Some(storage_options) = storage_options {
|
||||
for (key, value) in storage_options {
|
||||
@@ -198,13 +198,13 @@ impl Connection {
|
||||
pub async fn open_table(
|
||||
&self,
|
||||
name: String,
|
||||
namespace: Vec<String>,
|
||||
namespace_path: Option<Vec<String>>,
|
||||
storage_options: Option<HashMap<String, String>>,
|
||||
index_cache_size: Option<u32>,
|
||||
) -> napi::Result<Table> {
|
||||
let mut builder = self.get_inner()?.open_table(&name);
|
||||
|
||||
builder = builder.namespace(namespace);
|
||||
builder = builder.namespace(namespace_path.unwrap_or_default());
|
||||
|
||||
if let Some(storage_options) = storage_options {
|
||||
for (key, value) in storage_options {
|
||||
@@ -223,7 +223,7 @@ impl Connection {
|
||||
&self,
|
||||
target_table_name: String,
|
||||
source_uri: String,
|
||||
target_namespace: Vec<String>,
|
||||
target_namespace_path: Option<Vec<String>>,
|
||||
source_version: Option<i64>,
|
||||
source_tag: Option<String>,
|
||||
is_shallow: bool,
|
||||
@@ -232,7 +232,7 @@ impl Connection {
|
||||
.get_inner()?
|
||||
.clone_table(&target_table_name, &source_uri);
|
||||
|
||||
builder = builder.target_namespace(target_namespace);
|
||||
builder = builder.target_namespace(target_namespace_path.unwrap_or_default());
|
||||
|
||||
if let Some(version) = source_version {
|
||||
builder = builder.source_version(version as u64);
|
||||
@@ -250,18 +250,21 @@ impl Connection {
|
||||
|
||||
/// Drop table with the name. Or raise an error if the table does not exist.
|
||||
#[napi(catch_unwind)]
|
||||
pub async fn drop_table(&self, name: String, namespace: Vec<String>) -> napi::Result<()> {
|
||||
pub async fn drop_table(
|
||||
&self,
|
||||
name: String,
|
||||
namespace_path: Option<Vec<String>>,
|
||||
) -> napi::Result<()> {
|
||||
let ns = namespace_path.unwrap_or_default();
|
||||
self.get_inner()?
|
||||
.drop_table(&name, &namespace)
|
||||
.drop_table(&name, &ns)
|
||||
.await
|
||||
.default_error()
|
||||
}
|
||||
|
||||
#[napi(catch_unwind)]
|
||||
pub async fn drop_all_tables(&self, namespace: Vec<String>) -> napi::Result<()> {
|
||||
self.get_inner()?
|
||||
.drop_all_tables(&namespace)
|
||||
.await
|
||||
.default_error()
|
||||
pub async fn drop_all_tables(&self, namespace_path: Option<Vec<String>>) -> napi::Result<()> {
|
||||
let ns = namespace_path.unwrap_or_default();
|
||||
self.get_inner()?.drop_all_tables(&ns).await.default_error()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,6 +3,12 @@
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow_array::{
|
||||
Array, Float16Array as ArrowFloat16Array, Float32Array as ArrowFloat32Array,
|
||||
Float64Array as ArrowFloat64Array, UInt8Array as ArrowUInt8Array,
|
||||
};
|
||||
use arrow_buffer::ScalarBuffer;
|
||||
use half::f16;
|
||||
use lancedb::index::scalar::{
|
||||
BooleanQuery, BoostQuery, FtsQuery, FullTextSearchQuery, MatchQuery, MultiMatchQuery, Occur,
|
||||
Operator, PhraseQuery,
|
||||
@@ -24,6 +30,33 @@ use crate::rerankers::RerankHybridCallbackArgs;
|
||||
use crate::rerankers::Reranker;
|
||||
use crate::util::{parse_distance_type, schema_to_buffer};
|
||||
|
||||
fn bytes_to_arrow_array(data: Uint8Array, dtype: String) -> napi::Result<Arc<dyn Array>> {
|
||||
let buf = arrow_buffer::Buffer::from(data.to_vec());
|
||||
let num_bytes = buf.len();
|
||||
match dtype.as_str() {
|
||||
"float16" => {
|
||||
let scalar_buf = ScalarBuffer::<f16>::new(buf, 0, num_bytes / 2);
|
||||
Ok(Arc::new(ArrowFloat16Array::new(scalar_buf, None)))
|
||||
}
|
||||
"float32" => {
|
||||
let scalar_buf = ScalarBuffer::<f32>::new(buf, 0, num_bytes / 4);
|
||||
Ok(Arc::new(ArrowFloat32Array::new(scalar_buf, None)))
|
||||
}
|
||||
"float64" => {
|
||||
let scalar_buf = ScalarBuffer::<f64>::new(buf, 0, num_bytes / 8);
|
||||
Ok(Arc::new(ArrowFloat64Array::new(scalar_buf, None)))
|
||||
}
|
||||
"uint8" => {
|
||||
let scalar_buf = ScalarBuffer::<u8>::new(buf, 0, num_bytes);
|
||||
Ok(Arc::new(ArrowUInt8Array::new(scalar_buf, None)))
|
||||
}
|
||||
_ => Err(napi::Error::from_reason(format!(
|
||||
"Unsupported vector dtype: {}. Expected one of: float16, float32, float64, uint8",
|
||||
dtype
|
||||
))),
|
||||
}
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub struct Query {
|
||||
inner: LanceDbQuery,
|
||||
@@ -78,6 +111,13 @@ impl Query {
|
||||
Ok(VectorQuery { inner })
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn nearest_to_raw(&mut self, data: Uint8Array, dtype: String) -> Result<VectorQuery> {
|
||||
let array = bytes_to_arrow_array(data, dtype)?;
|
||||
let inner = self.inner.clone().nearest_to(array).default_error()?;
|
||||
Ok(VectorQuery { inner })
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn fast_search(&mut self) {
|
||||
self.inner = self.inner.clone().fast_search();
|
||||
@@ -163,6 +203,13 @@ impl VectorQuery {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn add_query_vector_raw(&mut self, data: Uint8Array, dtype: String) -> Result<()> {
|
||||
let array = bytes_to_arrow_array(data, dtype)?;
|
||||
self.inner = self.inner.clone().add_query_vector(array).default_error()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn distance_type(&mut self, distance_type: String) -> napi::Result<()> {
|
||||
let distance_type = parse_distance_type(distance_type)?;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
[tool.bumpversion]
|
||||
current_version = "0.30.1"
|
||||
current_version = "0.31.0-beta.0"
|
||||
parse = """(?x)
|
||||
(?P<major>0|[1-9]\\d*)\\.
|
||||
(?P<minor>0|[1-9]\\d*)\\.
|
||||
|
||||
2
python/.gitignore
vendored
2
python/.gitignore
vendored
@@ -1,3 +1,5 @@
|
||||
# Test data created by some example tests
|
||||
data/
|
||||
_lancedb.pyd
|
||||
# macOS debug symbols bundle generated during build
|
||||
*.dSYM/
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "lancedb-python"
|
||||
version = "0.30.1"
|
||||
version = "0.31.0-beta.0"
|
||||
edition.workspace = true
|
||||
description = "Python bindings for LanceDB"
|
||||
license.workspace = true
|
||||
@@ -23,6 +23,7 @@ lance-namespace.workspace = true
|
||||
lance-namespace-impls.workspace = true
|
||||
lance-io.workspace = true
|
||||
env_logger.workspace = true
|
||||
log.workspace = true
|
||||
pyo3 = { version = "0.26", features = ["extension-module", "abi3-py39"] }
|
||||
pyo3-async-runtimes = { version = "0.26", features = [
|
||||
"attributes",
|
||||
|
||||
@@ -45,7 +45,7 @@ repository = "https://github.com/lancedb/lancedb"
|
||||
|
||||
[project.optional-dependencies]
|
||||
pylance = [
|
||||
"pylance>=4.0.0b7",
|
||||
"pylance>=5.0.0b3",
|
||||
]
|
||||
tests = [
|
||||
"aiohttp>=3.9.0",
|
||||
@@ -59,7 +59,7 @@ tests = [
|
||||
"polars>=0.19, <=1.3.0",
|
||||
"tantivy>=0.20.0",
|
||||
"pyarrow-stubs>=16.0",
|
||||
"pylance>=4.0.0b7",
|
||||
"pylance>=5.0.0b3",
|
||||
"requests>=2.31.0",
|
||||
"datafusion>=52,<53",
|
||||
]
|
||||
|
||||
@@ -6,7 +6,7 @@ import importlib.metadata
|
||||
import os
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from datetime import timedelta
|
||||
from typing import Dict, Optional, Union, Any
|
||||
from typing import Dict, Optional, Union, Any, List
|
||||
import warnings
|
||||
|
||||
__version__ = importlib.metadata.version("lancedb")
|
||||
@@ -15,9 +15,9 @@ from ._lancedb import connect as lancedb_connect
|
||||
from .common import URI, sanitize_uri
|
||||
from urllib.parse import urlparse
|
||||
from .db import AsyncConnection, DBConnection, LanceDBConnection
|
||||
from .io import StorageOptionsProvider
|
||||
from .remote import ClientConfig
|
||||
from .remote.db import RemoteDBConnection
|
||||
from .expr import Expr, col, lit, func
|
||||
from .schema import vector
|
||||
from .table import AsyncTable, Table
|
||||
from ._lancedb import Session
|
||||
@@ -63,7 +63,7 @@ def _check_s3_bucket_with_dots(
|
||||
|
||||
|
||||
def connect(
|
||||
uri: URI,
|
||||
uri: Optional[URI] = None,
|
||||
*,
|
||||
api_key: Optional[str] = None,
|
||||
region: str = "us-east-1",
|
||||
@@ -73,14 +73,18 @@ def connect(
|
||||
client_config: Union[ClientConfig, Dict[str, Any], None] = None,
|
||||
storage_options: Optional[Dict[str, str]] = None,
|
||||
session: Optional[Session] = None,
|
||||
namespace_client_impl: Optional[str] = None,
|
||||
namespace_client_properties: Optional[Dict[str, str]] = None,
|
||||
namespace_client_pushdown_operations: Optional[List[str]] = None,
|
||||
**kwargs: Any,
|
||||
) -> DBConnection:
|
||||
"""Connect to a LanceDB database.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
uri: str or Path
|
||||
The uri of the database.
|
||||
uri: str or Path, optional
|
||||
The uri of the database. When ``namespace_client_impl`` is provided you may
|
||||
omit ``uri`` and connect through a namespace client instead.
|
||||
api_key: str, optional
|
||||
If presented, connect to LanceDB cloud.
|
||||
Otherwise, connect to a database on file system or cloud storage.
|
||||
@@ -113,6 +117,18 @@ def connect(
|
||||
cache sizes for index and metadata caches, which can significantly
|
||||
impact memory use and performance. They can also be re-used across
|
||||
multiple connections to share the same cache state.
|
||||
namespace_client_impl : str, optional
|
||||
When provided along with ``namespace_client_properties``, ``connect``
|
||||
returns a namespace-backed connection by delegating to
|
||||
:func:`connect_namespace`. The value identifies which namespace
|
||||
implementation to load (e.g., ``"dir"`` or ``"rest"``).
|
||||
namespace_client_properties : dict, optional
|
||||
Configuration to pass to the namespace client implementation. Required
|
||||
when ``namespace_client_impl`` is set.
|
||||
namespace_client_pushdown_operations : list[str], optional
|
||||
Only used when ``namespace_client_properties`` is provided. Forwards to
|
||||
:func:`connect_namespace` to control which operations are executed on the
|
||||
namespace service (e.g., ``["QueryTable", "CreateTable"]``).
|
||||
|
||||
Examples
|
||||
--------
|
||||
@@ -132,11 +148,42 @@ def connect(
|
||||
>>> db = lancedb.connect("db://my_database", api_key="ldb_...",
|
||||
... client_config={"retry_config": {"retries": 5}})
|
||||
|
||||
Connect to a namespace-backed database:
|
||||
|
||||
>>> db = lancedb.connect(namespace_client_impl="dir",
|
||||
... namespace_client_properties={"root": "/tmp/ns"})
|
||||
|
||||
Returns
|
||||
-------
|
||||
conn : DBConnection
|
||||
A connection to a LanceDB database.
|
||||
"""
|
||||
if namespace_client_impl is not None or namespace_client_properties is not None:
|
||||
if namespace_client_impl is None or namespace_client_properties is None:
|
||||
raise ValueError(
|
||||
"Both namespace_client_impl and "
|
||||
"namespace_client_properties must be provided"
|
||||
)
|
||||
if kwargs:
|
||||
raise ValueError(f"Unknown keyword arguments: {kwargs}")
|
||||
return connect_namespace(
|
||||
namespace_client_impl,
|
||||
namespace_client_properties,
|
||||
read_consistency_interval=read_consistency_interval,
|
||||
storage_options=storage_options,
|
||||
session=session,
|
||||
namespace_client_pushdown_operations=namespace_client_pushdown_operations,
|
||||
)
|
||||
|
||||
if namespace_client_pushdown_operations is not None:
|
||||
raise ValueError(
|
||||
"namespace_client_pushdown_operations is only valid when "
|
||||
"connecting through a namespace"
|
||||
)
|
||||
if uri is None:
|
||||
raise ValueError(
|
||||
"uri is required when not connecting through a namespace client"
|
||||
)
|
||||
if isinstance(uri, str) and uri.startswith("db://"):
|
||||
if api_key is None:
|
||||
api_key = os.environ.get("LANCEDB_API_KEY")
|
||||
@@ -271,6 +318,10 @@ __all__ = [
|
||||
"AsyncConnection",
|
||||
"AsyncLanceNamespaceDBConnection",
|
||||
"AsyncTable",
|
||||
"col",
|
||||
"Expr",
|
||||
"func",
|
||||
"lit",
|
||||
"URI",
|
||||
"sanitize_uri",
|
||||
"vector",
|
||||
@@ -279,7 +330,6 @@ __all__ = [
|
||||
"LanceNamespaceDBConnection",
|
||||
"RemoteDBConnection",
|
||||
"Session",
|
||||
"StorageOptionsProvider",
|
||||
"Table",
|
||||
"__version__",
|
||||
]
|
||||
|
||||
@@ -14,7 +14,6 @@ from .index import (
|
||||
HnswSq,
|
||||
FTS,
|
||||
)
|
||||
from .io import StorageOptionsProvider
|
||||
from lance_namespace import (
|
||||
ListNamespacesResponse,
|
||||
CreateNamespaceResponse,
|
||||
@@ -27,6 +26,32 @@ from .remote import ClientConfig
|
||||
IvfHnswPq: type[HnswPq] = HnswPq
|
||||
IvfHnswSq: type[HnswSq] = HnswSq
|
||||
|
||||
class PyExpr:
|
||||
"""A type-safe DataFusion expression node (Rust-side handle)."""
|
||||
|
||||
def eq(self, other: "PyExpr") -> "PyExpr": ...
|
||||
def ne(self, other: "PyExpr") -> "PyExpr": ...
|
||||
def lt(self, other: "PyExpr") -> "PyExpr": ...
|
||||
def lte(self, other: "PyExpr") -> "PyExpr": ...
|
||||
def gt(self, other: "PyExpr") -> "PyExpr": ...
|
||||
def gte(self, other: "PyExpr") -> "PyExpr": ...
|
||||
def and_(self, other: "PyExpr") -> "PyExpr": ...
|
||||
def or_(self, other: "PyExpr") -> "PyExpr": ...
|
||||
def not_(self) -> "PyExpr": ...
|
||||
def add(self, other: "PyExpr") -> "PyExpr": ...
|
||||
def sub(self, other: "PyExpr") -> "PyExpr": ...
|
||||
def mul(self, other: "PyExpr") -> "PyExpr": ...
|
||||
def div(self, other: "PyExpr") -> "PyExpr": ...
|
||||
def lower(self) -> "PyExpr": ...
|
||||
def upper(self) -> "PyExpr": ...
|
||||
def contains(self, substr: "PyExpr") -> "PyExpr": ...
|
||||
def cast(self, data_type: pa.DataType) -> "PyExpr": ...
|
||||
def to_sql(self) -> str: ...
|
||||
|
||||
def expr_col(name: str) -> PyExpr: ...
|
||||
def expr_lit(value: Union[bool, int, float, str]) -> PyExpr: ...
|
||||
def expr_func(name: str, args: List[PyExpr]) -> PyExpr: ...
|
||||
|
||||
class Session:
|
||||
def __init__(
|
||||
self,
|
||||
@@ -46,35 +71,35 @@ class Connection(object):
|
||||
async def close(self): ...
|
||||
async def list_namespaces(
|
||||
self,
|
||||
namespace: Optional[List[str]] = None,
|
||||
namespace_path: Optional[List[str]] = None,
|
||||
page_token: Optional[str] = None,
|
||||
limit: Optional[int] = None,
|
||||
) -> ListNamespacesResponse: ...
|
||||
async def create_namespace(
|
||||
self,
|
||||
namespace: List[str],
|
||||
namespace_path: List[str],
|
||||
mode: Optional[str] = None,
|
||||
properties: Optional[Dict[str, str]] = None,
|
||||
) -> CreateNamespaceResponse: ...
|
||||
async def drop_namespace(
|
||||
self,
|
||||
namespace: List[str],
|
||||
namespace_path: List[str],
|
||||
mode: Optional[str] = None,
|
||||
behavior: Optional[str] = None,
|
||||
) -> DropNamespaceResponse: ...
|
||||
async def describe_namespace(
|
||||
self,
|
||||
namespace: List[str],
|
||||
namespace_path: List[str],
|
||||
) -> DescribeNamespaceResponse: ...
|
||||
async def list_tables(
|
||||
self,
|
||||
namespace: Optional[List[str]] = None,
|
||||
namespace_path: Optional[List[str]] = None,
|
||||
page_token: Optional[str] = None,
|
||||
limit: Optional[int] = None,
|
||||
) -> ListTablesResponse: ...
|
||||
async def table_names(
|
||||
self,
|
||||
namespace: Optional[List[str]],
|
||||
namespace_path: Optional[List[str]],
|
||||
start_after: Optional[str],
|
||||
limit: Optional[int],
|
||||
) -> list[str]: ... # Deprecated: Use list_tables instead
|
||||
@@ -83,9 +108,8 @@ class Connection(object):
|
||||
name: str,
|
||||
mode: str,
|
||||
data: pa.RecordBatchReader,
|
||||
namespace: Optional[List[str]] = None,
|
||||
namespace_path: Optional[List[str]] = None,
|
||||
storage_options: Optional[Dict[str, str]] = None,
|
||||
storage_options_provider: Optional[StorageOptionsProvider] = None,
|
||||
location: Optional[str] = None,
|
||||
) -> Table: ...
|
||||
async def create_empty_table(
|
||||
@@ -93,17 +117,15 @@ class Connection(object):
|
||||
name: str,
|
||||
mode: str,
|
||||
schema: pa.Schema,
|
||||
namespace: Optional[List[str]] = None,
|
||||
namespace_path: Optional[List[str]] = None,
|
||||
storage_options: Optional[Dict[str, str]] = None,
|
||||
storage_options_provider: Optional[StorageOptionsProvider] = None,
|
||||
location: Optional[str] = None,
|
||||
) -> Table: ...
|
||||
async def open_table(
|
||||
self,
|
||||
name: str,
|
||||
namespace: Optional[List[str]] = None,
|
||||
namespace_path: Optional[List[str]] = None,
|
||||
storage_options: Optional[Dict[str, str]] = None,
|
||||
storage_options_provider: Optional[StorageOptionsProvider] = None,
|
||||
index_cache_size: Optional[int] = None,
|
||||
location: Optional[str] = None,
|
||||
) -> Table: ...
|
||||
@@ -111,7 +133,7 @@ class Connection(object):
|
||||
self,
|
||||
target_table_name: str,
|
||||
source_uri: str,
|
||||
target_namespace: Optional[List[str]] = None,
|
||||
target_namespace_path: Optional[List[str]] = None,
|
||||
source_version: Optional[int] = None,
|
||||
source_tag: Optional[str] = None,
|
||||
is_shallow: bool = True,
|
||||
@@ -120,13 +142,15 @@ class Connection(object):
|
||||
self,
|
||||
cur_name: str,
|
||||
new_name: str,
|
||||
cur_namespace: Optional[List[str]] = None,
|
||||
new_namespace: Optional[List[str]] = None,
|
||||
cur_namespace_path: Optional[List[str]] = None,
|
||||
new_namespace_path: Optional[List[str]] = None,
|
||||
) -> None: ...
|
||||
async def drop_table(
|
||||
self, name: str, namespace: Optional[List[str]] = None
|
||||
self, name: str, namespace_path: Optional[List[str]] = None
|
||||
) -> None: ...
|
||||
async def drop_all_tables(
|
||||
self, namespace_path: Optional[List[str]] = None
|
||||
) -> None: ...
|
||||
async def drop_all_tables(self, namespace: Optional[List[str]] = None) -> None: ...
|
||||
|
||||
class Table:
|
||||
def name(self) -> str: ...
|
||||
@@ -135,7 +159,10 @@ class Table:
|
||||
def close(self) -> None: ...
|
||||
async def schema(self) -> pa.Schema: ...
|
||||
async def add(
|
||||
self, data: pa.RecordBatchReader, mode: Literal["append", "overwrite"]
|
||||
self,
|
||||
data: pa.RecordBatchReader,
|
||||
mode: Literal["append", "overwrite"],
|
||||
progress: Optional[Any] = None,
|
||||
) -> AddResult: ...
|
||||
async def update(
|
||||
self, updates: Dict[str, str], where: Optional[str]
|
||||
@@ -222,7 +249,9 @@ class RecordBatchStream:
|
||||
|
||||
class Query:
|
||||
def where(self, filter: str): ...
|
||||
def select(self, columns: Tuple[str, str]): ...
|
||||
def where_expr(self, expr: PyExpr): ...
|
||||
def select(self, columns: List[Tuple[str, str]]): ...
|
||||
def select_expr(self, columns: List[Tuple[str, PyExpr]]): ...
|
||||
def select_columns(self, columns: List[str]): ...
|
||||
def limit(self, limit: int): ...
|
||||
def offset(self, offset: int): ...
|
||||
@@ -248,7 +277,9 @@ class TakeQuery:
|
||||
|
||||
class FTSQuery:
|
||||
def where(self, filter: str): ...
|
||||
def select(self, columns: List[str]): ...
|
||||
def where_expr(self, expr: PyExpr): ...
|
||||
def select(self, columns: List[Tuple[str, str]]): ...
|
||||
def select_expr(self, columns: List[Tuple[str, PyExpr]]): ...
|
||||
def limit(self, limit: int): ...
|
||||
def offset(self, offset: int): ...
|
||||
def fast_search(self): ...
|
||||
@@ -267,7 +298,9 @@ class VectorQuery:
|
||||
async def output_schema(self) -> pa.Schema: ...
|
||||
async def execute(self) -> RecordBatchStream: ...
|
||||
def where(self, filter: str): ...
|
||||
def select(self, columns: List[str]): ...
|
||||
def where_expr(self, expr: PyExpr): ...
|
||||
def select(self, columns: List[Tuple[str, str]]): ...
|
||||
def select_expr(self, columns: List[Tuple[str, PyExpr]]): ...
|
||||
def select_with_projection(self, columns: Tuple[str, str]): ...
|
||||
def limit(self, limit: int): ...
|
||||
def offset(self, offset: int): ...
|
||||
@@ -284,7 +317,9 @@ class VectorQuery:
|
||||
|
||||
class HybridQuery:
|
||||
def where(self, filter: str): ...
|
||||
def select(self, columns: List[str]): ...
|
||||
def where_expr(self, expr: PyExpr): ...
|
||||
def select(self, columns: List[Tuple[str, str]]): ...
|
||||
def select_expr(self, columns: List[Tuple[str, PyExpr]]): ...
|
||||
def limit(self, limit: int): ...
|
||||
def offset(self, offset: int): ...
|
||||
def fast_search(self): ...
|
||||
|
||||
@@ -52,7 +52,6 @@ if TYPE_CHECKING:
|
||||
from ._lancedb import Connection as LanceDbConnection
|
||||
from .common import DATA, URI
|
||||
from .embeddings import EmbeddingFunctionConfig
|
||||
from .io import StorageOptionsProvider
|
||||
from ._lancedb import Session
|
||||
|
||||
from .namespace_utils import (
|
||||
@@ -67,7 +66,7 @@ class DBConnection(EnforceOverrides):
|
||||
|
||||
def list_namespaces(
|
||||
self,
|
||||
namespace: Optional[List[str]] = None,
|
||||
namespace_path: Optional[List[str]] = None,
|
||||
page_token: Optional[str] = None,
|
||||
limit: Optional[int] = None,
|
||||
) -> ListNamespacesResponse:
|
||||
@@ -75,7 +74,7 @@ class DBConnection(EnforceOverrides):
|
||||
|
||||
Parameters
|
||||
----------
|
||||
namespace: List[str], default []
|
||||
namespace_path: List[str], default []
|
||||
The parent namespace to list namespaces in.
|
||||
Empty list represents root namespace.
|
||||
page_token: str, optional
|
||||
@@ -89,13 +88,13 @@ class DBConnection(EnforceOverrides):
|
||||
ListNamespacesResponse
|
||||
Response containing namespace names and optional page_token for pagination.
|
||||
"""
|
||||
if namespace is None:
|
||||
namespace = []
|
||||
if namespace_path is None:
|
||||
namespace_path = []
|
||||
return ListNamespacesResponse(namespaces=[], page_token=None)
|
||||
|
||||
def create_namespace(
|
||||
self,
|
||||
namespace: List[str],
|
||||
namespace_path: List[str],
|
||||
mode: Optional[str] = None,
|
||||
properties: Optional[Dict[str, str]] = None,
|
||||
) -> CreateNamespaceResponse:
|
||||
@@ -103,7 +102,7 @@ class DBConnection(EnforceOverrides):
|
||||
|
||||
Parameters
|
||||
----------
|
||||
namespace: List[str]
|
||||
namespace_path: List[str]
|
||||
The namespace identifier to create.
|
||||
mode: str, optional
|
||||
Creation mode - "create" (fail if exists), "exist_ok" (skip if exists),
|
||||
@@ -122,7 +121,7 @@ class DBConnection(EnforceOverrides):
|
||||
|
||||
def drop_namespace(
|
||||
self,
|
||||
namespace: List[str],
|
||||
namespace_path: List[str],
|
||||
mode: Optional[str] = None,
|
||||
behavior: Optional[str] = None,
|
||||
) -> DropNamespaceResponse:
|
||||
@@ -130,7 +129,7 @@ class DBConnection(EnforceOverrides):
|
||||
|
||||
Parameters
|
||||
----------
|
||||
namespace: List[str]
|
||||
namespace_path: List[str]
|
||||
The namespace identifier to drop.
|
||||
mode: str, optional
|
||||
Whether to skip if not exists ("SKIP") or fail ("FAIL"). Case insensitive.
|
||||
@@ -147,12 +146,14 @@ class DBConnection(EnforceOverrides):
|
||||
"Namespace operations are not supported for this connection type"
|
||||
)
|
||||
|
||||
def describe_namespace(self, namespace: List[str]) -> DescribeNamespaceResponse:
|
||||
def describe_namespace(
|
||||
self, namespace_path: List[str]
|
||||
) -> DescribeNamespaceResponse:
|
||||
"""Describe a namespace.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
namespace: List[str]
|
||||
namespace_path: List[str]
|
||||
The namespace identifier to describe.
|
||||
|
||||
Returns
|
||||
@@ -166,7 +167,7 @@ class DBConnection(EnforceOverrides):
|
||||
|
||||
def list_tables(
|
||||
self,
|
||||
namespace: Optional[List[str]] = None,
|
||||
namespace_path: Optional[List[str]] = None,
|
||||
page_token: Optional[str] = None,
|
||||
limit: Optional[int] = None,
|
||||
) -> ListTablesResponse:
|
||||
@@ -174,7 +175,7 @@ class DBConnection(EnforceOverrides):
|
||||
|
||||
Parameters
|
||||
----------
|
||||
namespace: List[str], optional
|
||||
namespace_path: List[str], optional
|
||||
The namespace to list tables in.
|
||||
None or empty list represents root namespace.
|
||||
page_token: str, optional
|
||||
@@ -198,13 +199,13 @@ class DBConnection(EnforceOverrides):
|
||||
page_token: Optional[str] = None,
|
||||
limit: int = 10,
|
||||
*,
|
||||
namespace: Optional[List[str]] = None,
|
||||
namespace_path: Optional[List[str]] = None,
|
||||
) -> Iterable[str]:
|
||||
"""List all tables in this database, in sorted order
|
||||
|
||||
Parameters
|
||||
----------
|
||||
namespace: List[str], default []
|
||||
namespace_path: List[str], default []
|
||||
The namespace to list tables in.
|
||||
Empty list represents root namespace.
|
||||
page_token: str, optional
|
||||
@@ -231,9 +232,8 @@ class DBConnection(EnforceOverrides):
|
||||
fill_value: float = 0.0,
|
||||
embedding_functions: Optional[List[EmbeddingFunctionConfig]] = None,
|
||||
*,
|
||||
namespace: Optional[List[str]] = None,
|
||||
namespace_path: Optional[List[str]] = None,
|
||||
storage_options: Optional[Dict[str, str]] = None,
|
||||
storage_options_provider: Optional["StorageOptionsProvider"] = None,
|
||||
data_storage_version: Optional[str] = None,
|
||||
enable_v2_manifest_paths: Optional[bool] = None,
|
||||
) -> Table:
|
||||
@@ -243,7 +243,7 @@ class DBConnection(EnforceOverrides):
|
||||
----------
|
||||
name: str
|
||||
The name of the table.
|
||||
namespace: List[str], default []
|
||||
namespace_path: List[str], default []
|
||||
The namespace to create the table in.
|
||||
Empty list represents root namespace.
|
||||
data: The data to initialize the table, *optional*
|
||||
@@ -401,9 +401,8 @@ class DBConnection(EnforceOverrides):
|
||||
self,
|
||||
name: str,
|
||||
*,
|
||||
namespace: Optional[List[str]] = None,
|
||||
namespace_path: Optional[List[str]] = None,
|
||||
storage_options: Optional[Dict[str, str]] = None,
|
||||
storage_options_provider: Optional["StorageOptionsProvider"] = None,
|
||||
index_cache_size: Optional[int] = None,
|
||||
) -> Table:
|
||||
"""Open a Lance Table in the database.
|
||||
@@ -412,7 +411,7 @@ class DBConnection(EnforceOverrides):
|
||||
----------
|
||||
name: str
|
||||
The name of the table.
|
||||
namespace: List[str], optional
|
||||
namespace_path: List[str], optional
|
||||
The namespace to open the table from.
|
||||
None or empty list represents root namespace.
|
||||
index_cache_size: int, default 256
|
||||
@@ -440,27 +439,27 @@ class DBConnection(EnforceOverrides):
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def drop_table(self, name: str, namespace: Optional[List[str]] = None):
|
||||
def drop_table(self, name: str, namespace_path: Optional[List[str]] = None):
|
||||
"""Drop a table from the database.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
name: str
|
||||
The name of the table.
|
||||
namespace: List[str], default []
|
||||
namespace_path: List[str], default []
|
||||
The namespace to drop the table from.
|
||||
Empty list represents root namespace.
|
||||
"""
|
||||
if namespace is None:
|
||||
namespace = []
|
||||
if namespace_path is None:
|
||||
namespace_path = []
|
||||
raise NotImplementedError
|
||||
|
||||
def rename_table(
|
||||
self,
|
||||
cur_name: str,
|
||||
new_name: str,
|
||||
cur_namespace: Optional[List[str]] = None,
|
||||
new_namespace: Optional[List[str]] = None,
|
||||
cur_namespace_path: Optional[List[str]] = None,
|
||||
new_namespace_path: Optional[List[str]] = None,
|
||||
):
|
||||
"""Rename a table in the database.
|
||||
|
||||
@@ -470,17 +469,17 @@ class DBConnection(EnforceOverrides):
|
||||
The current name of the table.
|
||||
new_name: str
|
||||
The new name of the table.
|
||||
cur_namespace: List[str], optional
|
||||
cur_namespace_path: List[str], optional
|
||||
The namespace of the current table.
|
||||
None or empty list represents root namespace.
|
||||
new_namespace: List[str], optional
|
||||
new_namespace_path: List[str], optional
|
||||
The namespace to move the table to.
|
||||
If not specified, defaults to the same as cur_namespace.
|
||||
"""
|
||||
if cur_namespace is None:
|
||||
cur_namespace = []
|
||||
if new_namespace is None:
|
||||
new_namespace = []
|
||||
if cur_namespace_path is None:
|
||||
cur_namespace_path = []
|
||||
if new_namespace_path is None:
|
||||
new_namespace_path = []
|
||||
raise NotImplementedError
|
||||
|
||||
def drop_database(self):
|
||||
@@ -490,18 +489,18 @@ class DBConnection(EnforceOverrides):
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def drop_all_tables(self, namespace: Optional[List[str]] = None):
|
||||
def drop_all_tables(self, namespace_path: Optional[List[str]] = None):
|
||||
"""
|
||||
Drop all tables from the database
|
||||
|
||||
Parameters
|
||||
----------
|
||||
namespace: List[str], optional
|
||||
namespace_path: List[str], optional
|
||||
The namespace to drop all tables from.
|
||||
None or empty list represents root namespace.
|
||||
"""
|
||||
if namespace is None:
|
||||
namespace = []
|
||||
if namespace_path is None:
|
||||
namespace_path = []
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
@@ -642,7 +641,7 @@ class LanceDBConnection(DBConnection):
|
||||
@override
|
||||
def list_namespaces(
|
||||
self,
|
||||
namespace: Optional[List[str]] = None,
|
||||
namespace_path: Optional[List[str]] = None,
|
||||
page_token: Optional[str] = None,
|
||||
limit: Optional[int] = None,
|
||||
) -> ListNamespacesResponse:
|
||||
@@ -650,7 +649,7 @@ class LanceDBConnection(DBConnection):
|
||||
|
||||
Parameters
|
||||
----------
|
||||
namespace: List[str], optional
|
||||
namespace_path: List[str], optional
|
||||
The parent namespace to list namespaces in.
|
||||
None or empty list represents root namespace.
|
||||
page_token: str, optional
|
||||
@@ -664,18 +663,18 @@ class LanceDBConnection(DBConnection):
|
||||
ListNamespacesResponse
|
||||
Response containing namespace names and optional page_token for pagination.
|
||||
"""
|
||||
if namespace is None:
|
||||
namespace = []
|
||||
if namespace_path is None:
|
||||
namespace_path = []
|
||||
return LOOP.run(
|
||||
self._conn.list_namespaces(
|
||||
namespace=namespace, page_token=page_token, limit=limit
|
||||
namespace_path=namespace_path, page_token=page_token, limit=limit
|
||||
)
|
||||
)
|
||||
|
||||
@override
|
||||
def create_namespace(
|
||||
self,
|
||||
namespace: List[str],
|
||||
namespace_path: List[str],
|
||||
mode: Optional[str] = None,
|
||||
properties: Optional[Dict[str, str]] = None,
|
||||
) -> CreateNamespaceResponse:
|
||||
@@ -683,7 +682,7 @@ class LanceDBConnection(DBConnection):
|
||||
|
||||
Parameters
|
||||
----------
|
||||
namespace: List[str]
|
||||
namespace_path: List[str]
|
||||
The namespace identifier to create.
|
||||
mode: str, optional
|
||||
Creation mode - "create" (fail if exists), "exist_ok" (skip if exists),
|
||||
@@ -698,14 +697,14 @@ class LanceDBConnection(DBConnection):
|
||||
"""
|
||||
return LOOP.run(
|
||||
self._conn.create_namespace(
|
||||
namespace=namespace, mode=mode, properties=properties
|
||||
namespace_path=namespace_path, mode=mode, properties=properties
|
||||
)
|
||||
)
|
||||
|
||||
@override
|
||||
def drop_namespace(
|
||||
self,
|
||||
namespace: List[str],
|
||||
namespace_path: List[str],
|
||||
mode: Optional[str] = None,
|
||||
behavior: Optional[str] = None,
|
||||
) -> DropNamespaceResponse:
|
||||
@@ -713,7 +712,7 @@ class LanceDBConnection(DBConnection):
|
||||
|
||||
Parameters
|
||||
----------
|
||||
namespace: List[str]
|
||||
namespace_path: List[str]
|
||||
The namespace identifier to drop.
|
||||
mode: str, optional
|
||||
Whether to skip if not exists ("SKIP") or fail ("FAIL"). Case insensitive.
|
||||
@@ -727,16 +726,20 @@ class LanceDBConnection(DBConnection):
|
||||
Response containing properties and transaction_id if applicable.
|
||||
"""
|
||||
return LOOP.run(
|
||||
self._conn.drop_namespace(namespace=namespace, mode=mode, behavior=behavior)
|
||||
self._conn.drop_namespace(
|
||||
namespace_path=namespace_path, mode=mode, behavior=behavior
|
||||
)
|
||||
)
|
||||
|
||||
@override
|
||||
def describe_namespace(self, namespace: List[str]) -> DescribeNamespaceResponse:
|
||||
def describe_namespace(
|
||||
self, namespace_path: List[str]
|
||||
) -> DescribeNamespaceResponse:
|
||||
"""Describe a namespace.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
namespace: List[str]
|
||||
namespace_path: List[str]
|
||||
The namespace identifier to describe.
|
||||
|
||||
Returns
|
||||
@@ -744,12 +747,12 @@ class LanceDBConnection(DBConnection):
|
||||
DescribeNamespaceResponse
|
||||
Response containing the namespace properties.
|
||||
"""
|
||||
return LOOP.run(self._conn.describe_namespace(namespace=namespace))
|
||||
return LOOP.run(self._conn.describe_namespace(namespace_path=namespace_path))
|
||||
|
||||
@override
|
||||
def list_tables(
|
||||
self,
|
||||
namespace: Optional[List[str]] = None,
|
||||
namespace_path: Optional[List[str]] = None,
|
||||
page_token: Optional[str] = None,
|
||||
limit: Optional[int] = None,
|
||||
) -> ListTablesResponse:
|
||||
@@ -757,7 +760,7 @@ class LanceDBConnection(DBConnection):
|
||||
|
||||
Parameters
|
||||
----------
|
||||
namespace: List[str], optional
|
||||
namespace_path: List[str], optional
|
||||
The namespace to list tables in.
|
||||
None or empty list represents root namespace.
|
||||
page_token: str, optional
|
||||
@@ -771,11 +774,11 @@ class LanceDBConnection(DBConnection):
|
||||
ListTablesResponse
|
||||
Response containing table names and optional page_token for pagination.
|
||||
"""
|
||||
if namespace is None:
|
||||
namespace = []
|
||||
if namespace_path is None:
|
||||
namespace_path = []
|
||||
return LOOP.run(
|
||||
self._conn.list_tables(
|
||||
namespace=namespace, page_token=page_token, limit=limit
|
||||
namespace_path=namespace_path, page_token=page_token, limit=limit
|
||||
)
|
||||
)
|
||||
|
||||
@@ -785,7 +788,7 @@ class LanceDBConnection(DBConnection):
|
||||
page_token: Optional[str] = None,
|
||||
limit: int = 10,
|
||||
*,
|
||||
namespace: Optional[List[str]] = None,
|
||||
namespace_path: Optional[List[str]] = None,
|
||||
) -> Iterable[str]:
|
||||
"""Get the names of all tables in the database. The names are sorted.
|
||||
|
||||
@@ -794,7 +797,7 @@ class LanceDBConnection(DBConnection):
|
||||
|
||||
Parameters
|
||||
----------
|
||||
namespace: List[str], optional
|
||||
namespace_path: List[str], optional
|
||||
The namespace to list tables in.
|
||||
page_token: str, optional
|
||||
The token to use for pagination.
|
||||
@@ -813,11 +816,11 @@ class LanceDBConnection(DBConnection):
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
if namespace is None:
|
||||
namespace = []
|
||||
if namespace_path is None:
|
||||
namespace_path = []
|
||||
return LOOP.run(
|
||||
self._conn.table_names(
|
||||
namespace=namespace, start_after=page_token, limit=limit
|
||||
namespace_path=namespace_path, start_after=page_token, limit=limit
|
||||
)
|
||||
)
|
||||
|
||||
@@ -839,9 +842,8 @@ class LanceDBConnection(DBConnection):
|
||||
fill_value: float = 0.0,
|
||||
embedding_functions: Optional[List[EmbeddingFunctionConfig]] = None,
|
||||
*,
|
||||
namespace: Optional[List[str]] = None,
|
||||
namespace_path: Optional[List[str]] = None,
|
||||
storage_options: Optional[Dict[str, str]] = None,
|
||||
storage_options_provider: Optional["StorageOptionsProvider"] = None,
|
||||
data_storage_version: Optional[str] = None,
|
||||
enable_v2_manifest_paths: Optional[bool] = None,
|
||||
) -> LanceTable:
|
||||
@@ -849,15 +851,15 @@ class LanceDBConnection(DBConnection):
|
||||
|
||||
Parameters
|
||||
----------
|
||||
namespace: List[str], optional
|
||||
namespace_path: List[str], optional
|
||||
The namespace to create the table in.
|
||||
|
||||
See
|
||||
---
|
||||
DBConnection.create_table
|
||||
"""
|
||||
if namespace is None:
|
||||
namespace = []
|
||||
if namespace_path is None:
|
||||
namespace_path = []
|
||||
if mode.lower() not in ["create", "overwrite"]:
|
||||
raise ValueError("mode must be either 'create' or 'overwrite'")
|
||||
validate_table_name(name)
|
||||
@@ -872,9 +874,8 @@ class LanceDBConnection(DBConnection):
|
||||
on_bad_vectors=on_bad_vectors,
|
||||
fill_value=fill_value,
|
||||
embedding_functions=embedding_functions,
|
||||
namespace=namespace,
|
||||
namespace_path=namespace_path,
|
||||
storage_options=storage_options,
|
||||
storage_options_provider=storage_options_provider,
|
||||
)
|
||||
return tbl
|
||||
|
||||
@@ -883,9 +884,8 @@ class LanceDBConnection(DBConnection):
|
||||
self,
|
||||
name: str,
|
||||
*,
|
||||
namespace: Optional[List[str]] = None,
|
||||
namespace_path: Optional[List[str]] = None,
|
||||
storage_options: Optional[Dict[str, str]] = None,
|
||||
storage_options_provider: Optional["StorageOptionsProvider"] = None,
|
||||
index_cache_size: Optional[int] = None,
|
||||
) -> LanceTable:
|
||||
"""Open a table in the database.
|
||||
@@ -894,15 +894,15 @@ class LanceDBConnection(DBConnection):
|
||||
----------
|
||||
name: str
|
||||
The name of the table.
|
||||
namespace: List[str], optional
|
||||
namespace_path: List[str], optional
|
||||
The namespace to open the table from.
|
||||
|
||||
Returns
|
||||
-------
|
||||
A LanceTable object representing the table.
|
||||
"""
|
||||
if namespace is None:
|
||||
namespace = []
|
||||
if namespace_path is None:
|
||||
namespace_path = []
|
||||
if index_cache_size is not None:
|
||||
import warnings
|
||||
|
||||
@@ -917,9 +917,8 @@ class LanceDBConnection(DBConnection):
|
||||
return LanceTable.open(
|
||||
self,
|
||||
name,
|
||||
namespace=namespace,
|
||||
namespace_path=namespace_path,
|
||||
storage_options=storage_options,
|
||||
storage_options_provider=storage_options_provider,
|
||||
index_cache_size=index_cache_size,
|
||||
)
|
||||
|
||||
@@ -928,7 +927,7 @@ class LanceDBConnection(DBConnection):
|
||||
target_table_name: str,
|
||||
source_uri: str,
|
||||
*,
|
||||
target_namespace: Optional[List[str]] = None,
|
||||
target_namespace_path: Optional[List[str]] = None,
|
||||
source_version: Optional[int] = None,
|
||||
source_tag: Optional[str] = None,
|
||||
is_shallow: bool = True,
|
||||
@@ -946,7 +945,7 @@ class LanceDBConnection(DBConnection):
|
||||
The name of the target table to create.
|
||||
source_uri: str
|
||||
The URI of the source table to clone from.
|
||||
target_namespace: List[str], optional
|
||||
target_namespace_path: List[str], optional
|
||||
The namespace for the target table.
|
||||
None or empty list represents root namespace.
|
||||
source_version: int, optional
|
||||
@@ -961,13 +960,13 @@ class LanceDBConnection(DBConnection):
|
||||
-------
|
||||
A LanceTable object representing the cloned table.
|
||||
"""
|
||||
if target_namespace is None:
|
||||
target_namespace = []
|
||||
if target_namespace_path is None:
|
||||
target_namespace_path = []
|
||||
LOOP.run(
|
||||
self._conn.clone_table(
|
||||
target_table_name,
|
||||
source_uri,
|
||||
target_namespace=target_namespace,
|
||||
target_namespace_path=target_namespace_path,
|
||||
source_version=source_version,
|
||||
source_tag=source_tag,
|
||||
is_shallow=is_shallow,
|
||||
@@ -976,14 +975,14 @@ class LanceDBConnection(DBConnection):
|
||||
return LanceTable.open(
|
||||
self,
|
||||
target_table_name,
|
||||
namespace=target_namespace,
|
||||
namespace_path=target_namespace_path,
|
||||
)
|
||||
|
||||
@override
|
||||
def drop_table(
|
||||
self,
|
||||
name: str,
|
||||
namespace: Optional[List[str]] = None,
|
||||
namespace_path: Optional[List[str]] = None,
|
||||
ignore_missing: bool = False,
|
||||
):
|
||||
"""Drop a table from the database.
|
||||
@@ -992,32 +991,32 @@ class LanceDBConnection(DBConnection):
|
||||
----------
|
||||
name: str
|
||||
The name of the table.
|
||||
namespace: List[str], optional
|
||||
namespace_path: List[str], optional
|
||||
The namespace to drop the table from.
|
||||
ignore_missing: bool, default False
|
||||
If True, ignore if the table does not exist.
|
||||
"""
|
||||
if namespace is None:
|
||||
namespace = []
|
||||
if namespace_path is None:
|
||||
namespace_path = []
|
||||
LOOP.run(
|
||||
self._conn.drop_table(
|
||||
name, namespace=namespace, ignore_missing=ignore_missing
|
||||
name, namespace_path=namespace_path, ignore_missing=ignore_missing
|
||||
)
|
||||
)
|
||||
|
||||
@override
|
||||
def drop_all_tables(self, namespace: Optional[List[str]] = None):
|
||||
if namespace is None:
|
||||
namespace = []
|
||||
LOOP.run(self._conn.drop_all_tables(namespace=namespace))
|
||||
def drop_all_tables(self, namespace_path: Optional[List[str]] = None):
|
||||
if namespace_path is None:
|
||||
namespace_path = []
|
||||
LOOP.run(self._conn.drop_all_tables(namespace_path=namespace_path))
|
||||
|
||||
@override
|
||||
def rename_table(
|
||||
self,
|
||||
cur_name: str,
|
||||
new_name: str,
|
||||
cur_namespace: Optional[List[str]] = None,
|
||||
new_namespace: Optional[List[str]] = None,
|
||||
cur_namespace_path: Optional[List[str]] = None,
|
||||
new_namespace_path: Optional[List[str]] = None,
|
||||
):
|
||||
"""Rename a table in the database.
|
||||
|
||||
@@ -1027,21 +1026,21 @@ class LanceDBConnection(DBConnection):
|
||||
The current name of the table.
|
||||
new_name: str
|
||||
The new name of the table.
|
||||
cur_namespace: List[str], optional
|
||||
cur_namespace_path: List[str], optional
|
||||
The namespace of the current table.
|
||||
new_namespace: List[str], optional
|
||||
new_namespace_path: List[str], optional
|
||||
The namespace to move the table to.
|
||||
"""
|
||||
if cur_namespace is None:
|
||||
cur_namespace = []
|
||||
if new_namespace is None:
|
||||
new_namespace = []
|
||||
if cur_namespace_path is None:
|
||||
cur_namespace_path = []
|
||||
if new_namespace_path is None:
|
||||
new_namespace_path = []
|
||||
LOOP.run(
|
||||
self._conn.rename_table(
|
||||
cur_name,
|
||||
new_name,
|
||||
cur_namespace=cur_namespace,
|
||||
new_namespace=new_namespace,
|
||||
cur_namespace_path=cur_namespace_path,
|
||||
new_namespace_path=new_namespace_path,
|
||||
)
|
||||
)
|
||||
|
||||
@@ -1125,7 +1124,7 @@ class AsyncConnection(object):
|
||||
|
||||
async def list_namespaces(
|
||||
self,
|
||||
namespace: Optional[List[str]] = None,
|
||||
namespace_path: Optional[List[str]] = None,
|
||||
page_token: Optional[str] = None,
|
||||
limit: Optional[int] = None,
|
||||
) -> ListNamespacesResponse:
|
||||
@@ -1133,7 +1132,7 @@ class AsyncConnection(object):
|
||||
|
||||
Parameters
|
||||
----------
|
||||
namespace: List[str], optional
|
||||
namespace_path: List[str], optional
|
||||
The parent namespace to list namespaces in.
|
||||
None or empty list represents root namespace.
|
||||
page_token: str, optional
|
||||
@@ -1146,16 +1145,16 @@ class AsyncConnection(object):
|
||||
ListNamespacesResponse
|
||||
Response containing namespace names and optional pagination token
|
||||
"""
|
||||
if namespace is None:
|
||||
namespace = []
|
||||
if namespace_path is None:
|
||||
namespace_path = []
|
||||
result = await self._inner.list_namespaces(
|
||||
namespace=namespace, page_token=page_token, limit=limit
|
||||
namespace_path=namespace_path, page_token=page_token, limit=limit
|
||||
)
|
||||
return ListNamespacesResponse(**result)
|
||||
|
||||
async def create_namespace(
|
||||
self,
|
||||
namespace: List[str],
|
||||
namespace_path: List[str],
|
||||
mode: Optional[str] = None,
|
||||
properties: Optional[Dict[str, str]] = None,
|
||||
) -> CreateNamespaceResponse:
|
||||
@@ -1163,7 +1162,7 @@ class AsyncConnection(object):
|
||||
|
||||
Parameters
|
||||
----------
|
||||
namespace: List[str]
|
||||
namespace_path: List[str]
|
||||
The namespace identifier to create.
|
||||
mode: str, optional
|
||||
Creation mode - "create", "exist_ok", or "overwrite". Case insensitive.
|
||||
@@ -1176,7 +1175,7 @@ class AsyncConnection(object):
|
||||
Response containing namespace properties
|
||||
"""
|
||||
result = await self._inner.create_namespace(
|
||||
namespace,
|
||||
namespace_path,
|
||||
mode=_normalize_create_namespace_mode(mode),
|
||||
properties=properties,
|
||||
)
|
||||
@@ -1184,7 +1183,7 @@ class AsyncConnection(object):
|
||||
|
||||
async def drop_namespace(
|
||||
self,
|
||||
namespace: List[str],
|
||||
namespace_path: List[str],
|
||||
mode: Optional[str] = None,
|
||||
behavior: Optional[str] = None,
|
||||
) -> DropNamespaceResponse:
|
||||
@@ -1192,7 +1191,7 @@ class AsyncConnection(object):
|
||||
|
||||
Parameters
|
||||
----------
|
||||
namespace: List[str]
|
||||
namespace_path: List[str]
|
||||
The namespace identifier to drop.
|
||||
mode: str, optional
|
||||
Whether to skip if not exists ("SKIP") or fail ("FAIL"). Case insensitive.
|
||||
@@ -1206,20 +1205,20 @@ class AsyncConnection(object):
|
||||
Response containing properties and transaction_id if applicable.
|
||||
"""
|
||||
result = await self._inner.drop_namespace(
|
||||
namespace,
|
||||
namespace_path,
|
||||
mode=_normalize_drop_namespace_mode(mode),
|
||||
behavior=_normalize_drop_namespace_behavior(behavior),
|
||||
)
|
||||
return DropNamespaceResponse(**result)
|
||||
|
||||
async def describe_namespace(
|
||||
self, namespace: List[str]
|
||||
self, namespace_path: List[str]
|
||||
) -> DescribeNamespaceResponse:
|
||||
"""Describe a namespace.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
namespace: List[str]
|
||||
namespace_path: List[str]
|
||||
The namespace identifier to describe.
|
||||
|
||||
Returns
|
||||
@@ -1227,12 +1226,12 @@ class AsyncConnection(object):
|
||||
DescribeNamespaceResponse
|
||||
Response containing the namespace properties.
|
||||
"""
|
||||
result = await self._inner.describe_namespace(namespace)
|
||||
result = await self._inner.describe_namespace(namespace_path)
|
||||
return DescribeNamespaceResponse(**result)
|
||||
|
||||
async def list_tables(
|
||||
self,
|
||||
namespace: Optional[List[str]] = None,
|
||||
namespace_path: Optional[List[str]] = None,
|
||||
page_token: Optional[str] = None,
|
||||
limit: Optional[int] = None,
|
||||
) -> ListTablesResponse:
|
||||
@@ -1240,7 +1239,7 @@ class AsyncConnection(object):
|
||||
|
||||
Parameters
|
||||
----------
|
||||
namespace: List[str], optional
|
||||
namespace_path: List[str], optional
|
||||
The namespace to list tables in.
|
||||
None or empty list represents root namespace.
|
||||
page_token: str, optional
|
||||
@@ -1254,17 +1253,17 @@ class AsyncConnection(object):
|
||||
ListTablesResponse
|
||||
Response containing table names and optional page_token for pagination.
|
||||
"""
|
||||
if namespace is None:
|
||||
namespace = []
|
||||
if namespace_path is None:
|
||||
namespace_path = []
|
||||
result = await self._inner.list_tables(
|
||||
namespace=namespace, page_token=page_token, limit=limit
|
||||
namespace_path=namespace_path, page_token=page_token, limit=limit
|
||||
)
|
||||
return ListTablesResponse(**result)
|
||||
|
||||
async def table_names(
|
||||
self,
|
||||
*,
|
||||
namespace: Optional[List[str]] = None,
|
||||
namespace_path: Optional[List[str]] = None,
|
||||
start_after: Optional[str] = None,
|
||||
limit: Optional[int] = None,
|
||||
) -> Iterable[str]:
|
||||
@@ -1275,7 +1274,7 @@ class AsyncConnection(object):
|
||||
|
||||
Parameters
|
||||
----------
|
||||
namespace: List[str], optional
|
||||
namespace_path: List[str], optional
|
||||
The namespace to list tables in.
|
||||
None or empty list represents root namespace.
|
||||
start_after: str, optional
|
||||
@@ -1298,10 +1297,10 @@ class AsyncConnection(object):
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
if namespace is None:
|
||||
namespace = []
|
||||
if namespace_path is None:
|
||||
namespace_path = []
|
||||
return await self._inner.table_names(
|
||||
namespace=namespace, start_after=start_after, limit=limit
|
||||
namespace_path=namespace_path, start_after=start_after, limit=limit
|
||||
)
|
||||
|
||||
async def create_table(
|
||||
@@ -1314,9 +1313,8 @@ class AsyncConnection(object):
|
||||
on_bad_vectors: Optional[str] = None,
|
||||
fill_value: Optional[float] = None,
|
||||
storage_options: Optional[Dict[str, str]] = None,
|
||||
storage_options_provider: Optional["StorageOptionsProvider"] = None,
|
||||
*,
|
||||
namespace: Optional[List[str]] = None,
|
||||
namespace_path: Optional[List[str]] = None,
|
||||
embedding_functions: Optional[List[EmbeddingFunctionConfig]] = None,
|
||||
location: Optional[str] = None,
|
||||
) -> AsyncTable:
|
||||
@@ -1326,7 +1324,7 @@ class AsyncConnection(object):
|
||||
----------
|
||||
name: str
|
||||
The name of the table.
|
||||
namespace: List[str], default []
|
||||
namespace_path: List[str], default []
|
||||
The namespace to create the table in.
|
||||
Empty list represents root namespace.
|
||||
data: The data to initialize the table, *optional*
|
||||
@@ -1477,8 +1475,8 @@ class AsyncConnection(object):
|
||||
... await db.create_table("table4", make_batches(), schema=schema)
|
||||
>>> asyncio.run(iterable_example())
|
||||
"""
|
||||
if namespace is None:
|
||||
namespace = []
|
||||
if namespace_path is None:
|
||||
namespace_path = []
|
||||
metadata = None
|
||||
|
||||
if embedding_functions is not None:
|
||||
@@ -1513,9 +1511,8 @@ class AsyncConnection(object):
|
||||
name,
|
||||
mode,
|
||||
schema,
|
||||
namespace=namespace,
|
||||
namespace_path=namespace_path,
|
||||
storage_options=storage_options,
|
||||
storage_options_provider=storage_options_provider,
|
||||
location=location,
|
||||
)
|
||||
else:
|
||||
@@ -1524,9 +1521,8 @@ class AsyncConnection(object):
|
||||
name,
|
||||
mode,
|
||||
data,
|
||||
namespace=namespace,
|
||||
namespace_path=namespace_path,
|
||||
storage_options=storage_options,
|
||||
storage_options_provider=storage_options_provider,
|
||||
location=location,
|
||||
)
|
||||
|
||||
@@ -1536,9 +1532,8 @@ class AsyncConnection(object):
|
||||
self,
|
||||
name: str,
|
||||
*,
|
||||
namespace: Optional[List[str]] = None,
|
||||
namespace_path: Optional[List[str]] = None,
|
||||
storage_options: Optional[Dict[str, str]] = None,
|
||||
storage_options_provider: Optional["StorageOptionsProvider"] = None,
|
||||
index_cache_size: Optional[int] = None,
|
||||
location: Optional[str] = None,
|
||||
namespace_client: Optional[Any] = None,
|
||||
@@ -1550,7 +1545,7 @@ class AsyncConnection(object):
|
||||
----------
|
||||
name: str
|
||||
The name of the table.
|
||||
namespace: List[str], optional
|
||||
namespace_path: List[str], optional
|
||||
The namespace to open the table from.
|
||||
None or empty list represents root namespace.
|
||||
storage_options: dict, optional
|
||||
@@ -1583,13 +1578,12 @@ class AsyncConnection(object):
|
||||
-------
|
||||
A LanceTable object representing the table.
|
||||
"""
|
||||
if namespace is None:
|
||||
namespace = []
|
||||
if namespace_path is None:
|
||||
namespace_path = []
|
||||
table = await self._inner.open_table(
|
||||
name,
|
||||
namespace=namespace,
|
||||
namespace_path=namespace_path,
|
||||
storage_options=storage_options,
|
||||
storage_options_provider=storage_options_provider,
|
||||
index_cache_size=index_cache_size,
|
||||
location=location,
|
||||
namespace_client=namespace_client,
|
||||
@@ -1602,7 +1596,7 @@ class AsyncConnection(object):
|
||||
target_table_name: str,
|
||||
source_uri: str,
|
||||
*,
|
||||
target_namespace: Optional[List[str]] = None,
|
||||
target_namespace_path: Optional[List[str]] = None,
|
||||
source_version: Optional[int] = None,
|
||||
source_tag: Optional[str] = None,
|
||||
is_shallow: bool = True,
|
||||
@@ -1620,7 +1614,7 @@ class AsyncConnection(object):
|
||||
The name of the target table to create.
|
||||
source_uri: str
|
||||
The URI of the source table to clone from.
|
||||
target_namespace: List[str], optional
|
||||
target_namespace_path: List[str], optional
|
||||
The namespace for the target table.
|
||||
None or empty list represents root namespace.
|
||||
source_version: int, optional
|
||||
@@ -1635,12 +1629,12 @@ class AsyncConnection(object):
|
||||
-------
|
||||
An AsyncTable object representing the cloned table.
|
||||
"""
|
||||
if target_namespace is None:
|
||||
target_namespace = []
|
||||
if target_namespace_path is None:
|
||||
target_namespace_path = []
|
||||
table = await self._inner.clone_table(
|
||||
target_table_name,
|
||||
source_uri,
|
||||
target_namespace=target_namespace,
|
||||
target_namespace_path=target_namespace_path,
|
||||
source_version=source_version,
|
||||
source_tag=source_tag,
|
||||
is_shallow=is_shallow,
|
||||
@@ -1651,8 +1645,8 @@ class AsyncConnection(object):
|
||||
self,
|
||||
cur_name: str,
|
||||
new_name: str,
|
||||
cur_namespace: Optional[List[str]] = None,
|
||||
new_namespace: Optional[List[str]] = None,
|
||||
cur_namespace_path: Optional[List[str]] = None,
|
||||
new_namespace_path: Optional[List[str]] = None,
|
||||
):
|
||||
"""Rename a table in the database.
|
||||
|
||||
@@ -1662,26 +1656,29 @@ class AsyncConnection(object):
|
||||
The current name of the table.
|
||||
new_name: str
|
||||
The new name of the table.
|
||||
cur_namespace: List[str], optional
|
||||
cur_namespace_path: List[str], optional
|
||||
The namespace of the current table.
|
||||
None or empty list represents root namespace.
|
||||
new_namespace: List[str], optional
|
||||
new_namespace_path: List[str], optional
|
||||
The namespace to move the table to.
|
||||
If not specified, defaults to the same as cur_namespace.
|
||||
"""
|
||||
if cur_namespace is None:
|
||||
cur_namespace = []
|
||||
if new_namespace is None:
|
||||
new_namespace = []
|
||||
if cur_namespace_path is None:
|
||||
cur_namespace_path = []
|
||||
if new_namespace_path is None:
|
||||
new_namespace_path = []
|
||||
await self._inner.rename_table(
|
||||
cur_name, new_name, cur_namespace=cur_namespace, new_namespace=new_namespace
|
||||
cur_name,
|
||||
new_name,
|
||||
cur_namespace_path=cur_namespace_path,
|
||||
new_namespace_path=new_namespace_path,
|
||||
)
|
||||
|
||||
async def drop_table(
|
||||
self,
|
||||
name: str,
|
||||
*,
|
||||
namespace: Optional[List[str]] = None,
|
||||
namespace_path: Optional[List[str]] = None,
|
||||
ignore_missing: bool = False,
|
||||
):
|
||||
"""Drop a table from the database.
|
||||
@@ -1690,34 +1687,34 @@ class AsyncConnection(object):
|
||||
----------
|
||||
name: str
|
||||
The name of the table.
|
||||
namespace: List[str], default []
|
||||
namespace_path: List[str], default []
|
||||
The namespace to drop the table from.
|
||||
Empty list represents root namespace.
|
||||
ignore_missing: bool, default False
|
||||
If True, ignore if the table does not exist.
|
||||
"""
|
||||
if namespace is None:
|
||||
namespace = []
|
||||
if namespace_path is None:
|
||||
namespace_path = []
|
||||
try:
|
||||
await self._inner.drop_table(name, namespace=namespace)
|
||||
await self._inner.drop_table(name, namespace_path=namespace_path)
|
||||
except ValueError as e:
|
||||
if not ignore_missing:
|
||||
raise e
|
||||
if f"Table '{name}' was not found" not in str(e):
|
||||
raise e
|
||||
|
||||
async def drop_all_tables(self, namespace: Optional[List[str]] = None):
|
||||
async def drop_all_tables(self, namespace_path: Optional[List[str]] = None):
|
||||
"""Drop all tables from the database.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
namespace: List[str], optional
|
||||
namespace_path: List[str], optional
|
||||
The namespace to drop all tables from.
|
||||
None or empty list represents root namespace.
|
||||
"""
|
||||
if namespace is None:
|
||||
namespace = []
|
||||
await self._inner.drop_all_tables(namespace=namespace)
|
||||
if namespace_path is None:
|
||||
namespace_path = []
|
||||
await self._inner.drop_all_tables(namespace_path=namespace_path)
|
||||
|
||||
@deprecation.deprecated(
|
||||
deprecated_in="0.15.1",
|
||||
|
||||
@@ -10,6 +10,7 @@ import sys
|
||||
import threading
|
||||
import time
|
||||
import urllib.error
|
||||
import urllib.request
|
||||
import weakref
|
||||
import logging
|
||||
from functools import wraps
|
||||
|
||||
298
python/python/lancedb/expr.py
Normal file
298
python/python/lancedb/expr.py
Normal file
@@ -0,0 +1,298 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
"""Type-safe expression builder for filters and projections.
|
||||
|
||||
Instead of writing raw SQL strings you can build expressions with Python
|
||||
operators::
|
||||
|
||||
from lancedb.expr import col, lit
|
||||
|
||||
# filter: age > 18 AND status = 'active'
|
||||
filt = (col("age") > lit(18)) & (col("status") == lit("active"))
|
||||
|
||||
# projection: compute a derived column
|
||||
proj = {"score": col("raw_score") * lit(1.5)}
|
||||
|
||||
table.search().where(filt).select(proj).to_list()
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Union
|
||||
|
||||
import pyarrow as pa
|
||||
|
||||
from lancedb._lancedb import PyExpr, expr_col, expr_lit, expr_func
|
||||
|
||||
__all__ = ["Expr", "col", "lit", "func"]
|
||||
|
||||
_STR_TO_PA_TYPE: dict = {
|
||||
"bool": pa.bool_(),
|
||||
"boolean": pa.bool_(),
|
||||
"int8": pa.int8(),
|
||||
"int16": pa.int16(),
|
||||
"int32": pa.int32(),
|
||||
"int64": pa.int64(),
|
||||
"uint8": pa.uint8(),
|
||||
"uint16": pa.uint16(),
|
||||
"uint32": pa.uint32(),
|
||||
"uint64": pa.uint64(),
|
||||
"float16": pa.float16(),
|
||||
"float32": pa.float32(),
|
||||
"float": pa.float32(),
|
||||
"float64": pa.float64(),
|
||||
"double": pa.float64(),
|
||||
"string": pa.string(),
|
||||
"utf8": pa.string(),
|
||||
"str": pa.string(),
|
||||
"large_string": pa.large_utf8(),
|
||||
"large_utf8": pa.large_utf8(),
|
||||
"date32": pa.date32(),
|
||||
"date": pa.date32(),
|
||||
"date64": pa.date64(),
|
||||
}
|
||||
|
||||
|
||||
def _coerce(value: "ExprLike") -> "Expr":
|
||||
"""Return *value* as an :class:`Expr`, wrapping plain Python values via
|
||||
:func:`lit` if needed."""
|
||||
if isinstance(value, Expr):
|
||||
return value
|
||||
return lit(value)
|
||||
|
||||
|
||||
# Type alias used in annotations.
|
||||
ExprLike = Union["Expr", bool, int, float, str]
|
||||
|
||||
|
||||
class Expr:
|
||||
"""A type-safe expression node.
|
||||
|
||||
Construct instances with :func:`col` and :func:`lit`, then combine them
|
||||
using Python operators or the named methods below.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from lancedb.expr import col, lit
|
||||
>>> filt = (col("age") > lit(18)) & (col("name").lower() == lit("alice"))
|
||||
>>> proj = {"double": col("x") * lit(2)}
|
||||
"""
|
||||
|
||||
# Make Expr unhashable so that == returns an Expr rather than being used
|
||||
# for dict keys / set membership.
|
||||
__hash__ = None # type: ignore[assignment]
|
||||
|
||||
def __init__(self, inner: PyExpr) -> None:
|
||||
self._inner = inner
|
||||
|
||||
# ── comparisons ──────────────────────────────────────────────────────────
|
||||
|
||||
def __eq__(self, other: ExprLike) -> "Expr": # type: ignore[override]
|
||||
"""Equal to (``col("x") == 1``)."""
|
||||
return Expr(self._inner.eq(_coerce(other)._inner))
|
||||
|
||||
def __ne__(self, other: ExprLike) -> "Expr": # type: ignore[override]
|
||||
"""Not equal to (``col("x") != 1``)."""
|
||||
return Expr(self._inner.ne(_coerce(other)._inner))
|
||||
|
||||
def __lt__(self, other: ExprLike) -> "Expr":
|
||||
"""Less than (``col("x") < 1``)."""
|
||||
return Expr(self._inner.lt(_coerce(other)._inner))
|
||||
|
||||
def __le__(self, other: ExprLike) -> "Expr":
|
||||
"""Less than or equal to (``col("x") <= 1``)."""
|
||||
return Expr(self._inner.lte(_coerce(other)._inner))
|
||||
|
||||
def __gt__(self, other: ExprLike) -> "Expr":
|
||||
"""Greater than (``col("x") > 1``)."""
|
||||
return Expr(self._inner.gt(_coerce(other)._inner))
|
||||
|
||||
def __ge__(self, other: ExprLike) -> "Expr":
|
||||
"""Greater than or equal to (``col("x") >= 1``)."""
|
||||
return Expr(self._inner.gte(_coerce(other)._inner))
|
||||
|
||||
# ── logical ──────────────────────────────────────────────────────────────
|
||||
|
||||
def __and__(self, other: "Expr") -> "Expr":
|
||||
"""Logical AND (``expr_a & expr_b``)."""
|
||||
return Expr(self._inner.and_(_coerce(other)._inner))
|
||||
|
||||
def __or__(self, other: "Expr") -> "Expr":
|
||||
"""Logical OR (``expr_a | expr_b``)."""
|
||||
return Expr(self._inner.or_(_coerce(other)._inner))
|
||||
|
||||
def __invert__(self) -> "Expr":
|
||||
"""Logical NOT (``~expr``)."""
|
||||
return Expr(self._inner.not_())
|
||||
|
||||
# ── arithmetic ───────────────────────────────────────────────────────────
|
||||
|
||||
def __add__(self, other: ExprLike) -> "Expr":
|
||||
"""Add (``col("x") + 1``)."""
|
||||
return Expr(self._inner.add(_coerce(other)._inner))
|
||||
|
||||
def __radd__(self, other: ExprLike) -> "Expr":
|
||||
"""Right-hand add (``1 + col("x")``)."""
|
||||
return Expr(_coerce(other)._inner.add(self._inner))
|
||||
|
||||
def __sub__(self, other: ExprLike) -> "Expr":
|
||||
"""Subtract (``col("x") - 1``)."""
|
||||
return Expr(self._inner.sub(_coerce(other)._inner))
|
||||
|
||||
def __rsub__(self, other: ExprLike) -> "Expr":
|
||||
"""Right-hand subtract (``1 - col("x")``)."""
|
||||
return Expr(_coerce(other)._inner.sub(self._inner))
|
||||
|
||||
def __mul__(self, other: ExprLike) -> "Expr":
|
||||
"""Multiply (``col("x") * 2``)."""
|
||||
return Expr(self._inner.mul(_coerce(other)._inner))
|
||||
|
||||
def __rmul__(self, other: ExprLike) -> "Expr":
|
||||
"""Right-hand multiply (``2 * col("x")``)."""
|
||||
return Expr(_coerce(other)._inner.mul(self._inner))
|
||||
|
||||
def __truediv__(self, other: ExprLike) -> "Expr":
|
||||
"""Divide (``col("x") / 2``)."""
|
||||
return Expr(self._inner.div(_coerce(other)._inner))
|
||||
|
||||
def __rtruediv__(self, other: ExprLike) -> "Expr":
|
||||
"""Right-hand divide (``1 / col("x")``)."""
|
||||
return Expr(_coerce(other)._inner.div(self._inner))
|
||||
|
||||
# ── string methods ───────────────────────────────────────────────────────
|
||||
|
||||
def lower(self) -> "Expr":
|
||||
"""Convert string column values to lowercase."""
|
||||
return Expr(self._inner.lower())
|
||||
|
||||
def upper(self) -> "Expr":
|
||||
"""Convert string column values to uppercase."""
|
||||
return Expr(self._inner.upper())
|
||||
|
||||
def contains(self, substr: "ExprLike") -> "Expr":
|
||||
"""Return True where the string contains *substr*."""
|
||||
return Expr(self._inner.contains(_coerce(substr)._inner))
|
||||
|
||||
# ── type cast ────────────────────────────────────────────────────────────
|
||||
|
||||
def cast(self, data_type: Union[str, "pa.DataType"]) -> "Expr":
|
||||
"""Cast values to *data_type*.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
data_type:
|
||||
A PyArrow ``DataType`` (e.g. ``pa.int32()``) or one of the type
|
||||
name strings: ``"bool"``, ``"int8"``, ``"int16"``, ``"int32"``,
|
||||
``"int64"``, ``"uint8"``–``"uint64"``, ``"float32"``,
|
||||
``"float64"``, ``"string"``, ``"date32"``, ``"date64"``.
|
||||
"""
|
||||
if isinstance(data_type, str):
|
||||
try:
|
||||
data_type = _STR_TO_PA_TYPE[data_type]
|
||||
except KeyError:
|
||||
raise ValueError(
|
||||
f"unsupported data type: '{data_type}'. Supported: "
|
||||
f"{', '.join(_STR_TO_PA_TYPE)}"
|
||||
)
|
||||
return Expr(self._inner.cast(data_type))
|
||||
|
||||
# ── named comparison helpers (alternative to operators) ──────────────────
|
||||
|
||||
def eq(self, other: ExprLike) -> "Expr":
|
||||
"""Equal to."""
|
||||
return self.__eq__(other)
|
||||
|
||||
def ne(self, other: ExprLike) -> "Expr":
|
||||
"""Not equal to."""
|
||||
return self.__ne__(other)
|
||||
|
||||
def lt(self, other: ExprLike) -> "Expr":
|
||||
"""Less than."""
|
||||
return self.__lt__(other)
|
||||
|
||||
def lte(self, other: ExprLike) -> "Expr":
|
||||
"""Less than or equal to."""
|
||||
return self.__le__(other)
|
||||
|
||||
def gt(self, other: ExprLike) -> "Expr":
|
||||
"""Greater than."""
|
||||
return self.__gt__(other)
|
||||
|
||||
def gte(self, other: ExprLike) -> "Expr":
|
||||
"""Greater than or equal to."""
|
||||
return self.__ge__(other)
|
||||
|
||||
def and_(self, other: "Expr") -> "Expr":
|
||||
"""Logical AND."""
|
||||
return self.__and__(other)
|
||||
|
||||
def or_(self, other: "Expr") -> "Expr":
|
||||
"""Logical OR."""
|
||||
return self.__or__(other)
|
||||
|
||||
# ── utilities ────────────────────────────────────────────────────────────
|
||||
|
||||
def to_sql(self) -> str:
|
||||
"""Render the expression as a SQL string (useful for debugging)."""
|
||||
return self._inner.to_sql()
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"Expr({self._inner.to_sql()})"
|
||||
|
||||
|
||||
# ── free functions ────────────────────────────────────────────────────────────
|
||||
|
||||
|
||||
def col(name: str) -> Expr:
|
||||
"""Reference a table column by name.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
name:
|
||||
The column name.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from lancedb.expr import col, lit
|
||||
>>> col("age") > lit(18)
|
||||
Expr((age > 18))
|
||||
"""
|
||||
return Expr(expr_col(name))
|
||||
|
||||
|
||||
def lit(value: Union[bool, int, float, str]) -> Expr:
|
||||
"""Create a literal (constant) value expression.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
value:
|
||||
A Python ``bool``, ``int``, ``float``, or ``str``.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from lancedb.expr import col, lit
|
||||
>>> col("price") * lit(1.1)
|
||||
Expr((price * 1.1))
|
||||
"""
|
||||
return Expr(expr_lit(value))
|
||||
|
||||
|
||||
def func(name: str, *args: ExprLike) -> Expr:
|
||||
"""Call an arbitrary SQL function by name.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
name:
|
||||
The SQL function name (e.g. ``"lower"``, ``"upper"``).
|
||||
*args:
|
||||
The function arguments as :class:`Expr` or plain Python literals.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from lancedb.expr import col, func
|
||||
>>> func("lower", col("name"))
|
||||
Expr(lower(name))
|
||||
"""
|
||||
inner_args = [_coerce(a)._inner for a in args]
|
||||
return Expr(expr_func(name, inner_args))
|
||||
@@ -2,70 +2,3 @@
|
||||
# SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
"""I/O utilities and interfaces for LanceDB."""
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Dict
|
||||
|
||||
|
||||
class StorageOptionsProvider(ABC):
|
||||
"""Abstract base class for providing storage options to LanceDB tables.
|
||||
|
||||
Storage options providers enable automatic credential refresh for cloud
|
||||
storage backends (e.g., AWS S3, Azure Blob Storage, GCS). When credentials
|
||||
have an expiration time, the provider's fetch_storage_options() method will
|
||||
be called periodically to get fresh credentials before they expire.
|
||||
|
||||
Example
|
||||
-------
|
||||
>>> class MyProvider(StorageOptionsProvider):
|
||||
... def fetch_storage_options(self) -> Dict[str, str]:
|
||||
... # Fetch fresh credentials from your credential manager
|
||||
... return {
|
||||
... "aws_access_key_id": "...",
|
||||
... "aws_secret_access_key": "...",
|
||||
... "expires_at_millis": "1234567890000" # Optional
|
||||
... }
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def fetch_storage_options(self) -> Dict[str, str]:
|
||||
"""Fetch fresh storage credentials.
|
||||
|
||||
This method is called by LanceDB when credentials need to be refreshed.
|
||||
If the returned dictionary contains an "expires_at_millis" key with a
|
||||
Unix timestamp in milliseconds, LanceDB will automatically refresh the
|
||||
credentials before that time. If the key is not present, credentials
|
||||
are assumed to not expire.
|
||||
|
||||
Returns
|
||||
-------
|
||||
Dict[str, str]
|
||||
Dictionary containing cloud storage credentials and optionally an
|
||||
expiration time:
|
||||
- "expires_at_millis" (optional): Unix timestamp in milliseconds when
|
||||
credentials expire
|
||||
- Provider-specific credential keys (e.g., aws_access_key_id,
|
||||
aws_secret_access_key, etc.)
|
||||
|
||||
Raises
|
||||
------
|
||||
RuntimeError
|
||||
If credentials cannot be fetched or are invalid
|
||||
"""
|
||||
pass
|
||||
|
||||
def provider_id(self) -> str:
|
||||
"""Return a human-readable unique identifier for this provider instance.
|
||||
|
||||
This identifier is used for caching and equality comparison. Two providers
|
||||
with the same ID will share the same cached object store connection.
|
||||
|
||||
The default implementation uses the class name and string representation.
|
||||
Override this method if you need custom identification logic.
|
||||
|
||||
Returns
|
||||
-------
|
||||
str
|
||||
A unique identifier for this provider instance
|
||||
"""
|
||||
return f"{self.__class__.__name__} {{ repr: {str(self)!r} }}"
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -38,6 +38,7 @@ from .rerankers.base import Reranker
|
||||
from .rerankers.rrf import RRFReranker
|
||||
from .rerankers.util import check_reranker_result
|
||||
from .util import flatten_columns
|
||||
from .expr import Expr
|
||||
from lancedb._lancedb import fts_query_to_json
|
||||
from typing_extensions import Annotated
|
||||
|
||||
@@ -70,7 +71,7 @@ def ensure_vector_query(
|
||||
) -> Union[List[float], List[List[float]], pa.Array, List[pa.Array]]:
|
||||
if isinstance(val, list):
|
||||
if len(val) == 0:
|
||||
return ValueError("Vector query must be a non-empty list")
|
||||
raise ValueError("Vector query must be a non-empty list")
|
||||
sample = val[0]
|
||||
else:
|
||||
if isinstance(val, float):
|
||||
@@ -83,7 +84,7 @@ def ensure_vector_query(
|
||||
return val
|
||||
if isinstance(sample, list):
|
||||
if len(sample) == 0:
|
||||
return ValueError("Vector query must be a non-empty list")
|
||||
raise ValueError("Vector query must be a non-empty list")
|
||||
if isinstance(sample[0], float):
|
||||
# val is list of list of floats
|
||||
return val
|
||||
@@ -449,8 +450,8 @@ class Query(pydantic.BaseModel):
|
||||
ensure_vector_query,
|
||||
] = None
|
||||
|
||||
# sql filter to refine the query with
|
||||
filter: Optional[str] = None
|
||||
# sql filter or type-safe Expr to refine the query with
|
||||
filter: Optional[Union[str, Expr]] = None
|
||||
|
||||
# if True then apply the filter after vector search
|
||||
postfilter: Optional[bool] = None
|
||||
@@ -464,8 +465,8 @@ class Query(pydantic.BaseModel):
|
||||
# distance type to use for vector search
|
||||
distance_type: Optional[str] = None
|
||||
|
||||
# which columns to return in the results
|
||||
columns: Optional[Union[List[str], Dict[str, str]]] = None
|
||||
# which columns to return in the results (dict values may be str or Expr)
|
||||
columns: Optional[Union[List[str], Dict[str, Union[str, Expr]]]] = None
|
||||
|
||||
# minimum number of IVF partitions to search
|
||||
#
|
||||
@@ -856,14 +857,15 @@ class LanceQueryBuilder(ABC):
|
||||
self._offset = offset
|
||||
return self
|
||||
|
||||
def select(self, columns: Union[list[str], dict[str, str]]) -> Self:
|
||||
def select(self, columns: Union[list[str], dict[str, Union[str, Expr]]]) -> Self:
|
||||
"""Set the columns to return.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
columns: list of str, or dict of str to str default None
|
||||
columns: list of str, or dict of str to str or Expr
|
||||
List of column names to be fetched.
|
||||
Or a dictionary of column names to SQL expressions.
|
||||
Or a dictionary of column names to SQL expressions or
|
||||
:class:`~lancedb.expr.Expr` objects.
|
||||
All columns are fetched if None or unspecified.
|
||||
|
||||
Returns
|
||||
@@ -877,15 +879,15 @@ class LanceQueryBuilder(ABC):
|
||||
raise ValueError("columns must be a list or a dictionary")
|
||||
return self
|
||||
|
||||
def where(self, where: str, prefilter: bool = True) -> Self:
|
||||
def where(self, where: Union[str, Expr], prefilter: bool = True) -> Self:
|
||||
"""Set the where clause.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
where: str
|
||||
The where clause which is a valid SQL where clause. See
|
||||
`Lance filter pushdown <https://lance.org/guide/read_and_write#filter-push-down>`_
|
||||
for valid SQL expressions.
|
||||
where: str or :class:`~lancedb.expr.Expr`
|
||||
The filter condition. Can be a SQL string or a type-safe
|
||||
:class:`~lancedb.expr.Expr` built with :func:`~lancedb.expr.col`
|
||||
and :func:`~lancedb.expr.lit`.
|
||||
prefilter: bool, default True
|
||||
If True, apply the filter before vector search, otherwise the
|
||||
filter is applied on the result of vector search.
|
||||
@@ -1355,15 +1357,17 @@ class LanceVectorQueryBuilder(LanceQueryBuilder):
|
||||
|
||||
return result_set
|
||||
|
||||
def where(self, where: str, prefilter: bool = None) -> LanceVectorQueryBuilder:
|
||||
def where(
|
||||
self, where: Union[str, Expr], prefilter: bool = None
|
||||
) -> LanceVectorQueryBuilder:
|
||||
"""Set the where clause.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
where: str
|
||||
The where clause which is a valid SQL where clause. See
|
||||
`Lance filter pushdown <https://lance.org/guide/read_and_write#filter-push-down>`_
|
||||
for valid SQL expressions.
|
||||
where: str or :class:`~lancedb.expr.Expr`
|
||||
The filter condition. Can be a SQL string or a type-safe
|
||||
:class:`~lancedb.expr.Expr` built with :func:`~lancedb.expr.col`
|
||||
and :func:`~lancedb.expr.lit`.
|
||||
prefilter: bool, default True
|
||||
If True, apply the filter before vector search, otherwise the
|
||||
filter is applied on the result of vector search.
|
||||
@@ -2286,10 +2290,20 @@ class AsyncQueryBase(object):
|
||||
"""
|
||||
if isinstance(columns, list) and all(isinstance(c, str) for c in columns):
|
||||
self._inner.select_columns(columns)
|
||||
elif isinstance(columns, dict) and all(
|
||||
isinstance(k, str) and isinstance(v, str) for k, v in columns.items()
|
||||
):
|
||||
self._inner.select(list(columns.items()))
|
||||
elif isinstance(columns, dict) and all(isinstance(k, str) for k in columns):
|
||||
if any(isinstance(v, Expr) for v in columns.values()):
|
||||
# At least one value is an Expr — use the type-safe path.
|
||||
from .expr import _coerce
|
||||
|
||||
pairs = [(k, _coerce(v)._inner) for k, v in columns.items()]
|
||||
self._inner.select_expr(pairs)
|
||||
elif all(isinstance(v, str) for v in columns.values()):
|
||||
self._inner.select(list(columns.items()))
|
||||
else:
|
||||
raise TypeError(
|
||||
"dict values must be str or Expr, got "
|
||||
+ str({k: type(v) for k, v in columns.items()})
|
||||
)
|
||||
else:
|
||||
raise TypeError("columns must be a list of column names or a dict")
|
||||
return self
|
||||
@@ -2529,11 +2543,13 @@ class AsyncStandardQuery(AsyncQueryBase):
|
||||
"""
|
||||
super().__init__(inner)
|
||||
|
||||
def where(self, predicate: str) -> Self:
|
||||
def where(self, predicate: Union[str, Expr]) -> Self:
|
||||
"""
|
||||
Only return rows matching the given predicate
|
||||
|
||||
The predicate should be supplied as an SQL query string.
|
||||
The predicate can be a SQL string or a type-safe
|
||||
:class:`~lancedb.expr.Expr` built with :func:`~lancedb.expr.col`
|
||||
and :func:`~lancedb.expr.lit`.
|
||||
|
||||
Examples
|
||||
--------
|
||||
@@ -2545,7 +2561,10 @@ class AsyncStandardQuery(AsyncQueryBase):
|
||||
Filtering performance can often be improved by creating a scalar index
|
||||
on the filter column(s).
|
||||
"""
|
||||
self._inner.where(predicate)
|
||||
if isinstance(predicate, Expr):
|
||||
self._inner.where_expr(predicate._inner)
|
||||
else:
|
||||
self._inner.where(predicate)
|
||||
return self
|
||||
|
||||
def limit(self, limit: int) -> Self:
|
||||
|
||||
@@ -111,7 +111,7 @@ class RemoteDBConnection(DBConnection):
|
||||
@override
|
||||
def list_namespaces(
|
||||
self,
|
||||
namespace: Optional[List[str]] = None,
|
||||
namespace_path: Optional[List[str]] = None,
|
||||
page_token: Optional[str] = None,
|
||||
limit: Optional[int] = None,
|
||||
) -> ListNamespacesResponse:
|
||||
@@ -119,7 +119,7 @@ class RemoteDBConnection(DBConnection):
|
||||
|
||||
Parameters
|
||||
----------
|
||||
namespace: List[str], optional
|
||||
namespace_path: List[str], optional
|
||||
The parent namespace to list namespaces in.
|
||||
None or empty list represents root namespace.
|
||||
page_token: str, optional
|
||||
@@ -133,18 +133,18 @@ class RemoteDBConnection(DBConnection):
|
||||
ListNamespacesResponse
|
||||
Response containing namespace names and optional page_token for pagination.
|
||||
"""
|
||||
if namespace is None:
|
||||
namespace = []
|
||||
if namespace_path is None:
|
||||
namespace_path = []
|
||||
return LOOP.run(
|
||||
self._conn.list_namespaces(
|
||||
namespace=namespace, page_token=page_token, limit=limit
|
||||
namespace_path=namespace_path, page_token=page_token, limit=limit
|
||||
)
|
||||
)
|
||||
|
||||
@override
|
||||
def create_namespace(
|
||||
self,
|
||||
namespace: List[str],
|
||||
namespace_path: List[str],
|
||||
mode: Optional[str] = None,
|
||||
properties: Optional[Dict[str, str]] = None,
|
||||
) -> CreateNamespaceResponse:
|
||||
@@ -152,7 +152,7 @@ class RemoteDBConnection(DBConnection):
|
||||
|
||||
Parameters
|
||||
----------
|
||||
namespace: List[str]
|
||||
namespace_path: List[str]
|
||||
The namespace identifier to create.
|
||||
mode: str, optional
|
||||
Creation mode - "create" (fail if exists), "exist_ok" (skip if exists),
|
||||
@@ -167,14 +167,14 @@ class RemoteDBConnection(DBConnection):
|
||||
"""
|
||||
return LOOP.run(
|
||||
self._conn.create_namespace(
|
||||
namespace=namespace, mode=mode, properties=properties
|
||||
namespace_path=namespace_path, mode=mode, properties=properties
|
||||
)
|
||||
)
|
||||
|
||||
@override
|
||||
def drop_namespace(
|
||||
self,
|
||||
namespace: List[str],
|
||||
namespace_path: List[str],
|
||||
mode: Optional[str] = None,
|
||||
behavior: Optional[str] = None,
|
||||
) -> DropNamespaceResponse:
|
||||
@@ -182,7 +182,7 @@ class RemoteDBConnection(DBConnection):
|
||||
|
||||
Parameters
|
||||
----------
|
||||
namespace: List[str]
|
||||
namespace_path: List[str]
|
||||
The namespace identifier to drop.
|
||||
mode: str, optional
|
||||
Whether to skip if not exists ("SKIP") or fail ("FAIL"). Case insensitive.
|
||||
@@ -196,16 +196,20 @@ class RemoteDBConnection(DBConnection):
|
||||
Response containing properties and transaction_id if applicable.
|
||||
"""
|
||||
return LOOP.run(
|
||||
self._conn.drop_namespace(namespace=namespace, mode=mode, behavior=behavior)
|
||||
self._conn.drop_namespace(
|
||||
namespace_path=namespace_path, mode=mode, behavior=behavior
|
||||
)
|
||||
)
|
||||
|
||||
@override
|
||||
def describe_namespace(self, namespace: List[str]) -> DescribeNamespaceResponse:
|
||||
def describe_namespace(
|
||||
self, namespace_path: List[str]
|
||||
) -> DescribeNamespaceResponse:
|
||||
"""Describe a namespace.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
namespace: List[str]
|
||||
namespace_path: List[str]
|
||||
The namespace identifier to describe.
|
||||
|
||||
Returns
|
||||
@@ -213,12 +217,12 @@ class RemoteDBConnection(DBConnection):
|
||||
DescribeNamespaceResponse
|
||||
Response containing the namespace properties.
|
||||
"""
|
||||
return LOOP.run(self._conn.describe_namespace(namespace=namespace))
|
||||
return LOOP.run(self._conn.describe_namespace(namespace_path=namespace_path))
|
||||
|
||||
@override
|
||||
def list_tables(
|
||||
self,
|
||||
namespace: Optional[List[str]] = None,
|
||||
namespace_path: Optional[List[str]] = None,
|
||||
page_token: Optional[str] = None,
|
||||
limit: Optional[int] = None,
|
||||
) -> ListTablesResponse:
|
||||
@@ -226,7 +230,7 @@ class RemoteDBConnection(DBConnection):
|
||||
|
||||
Parameters
|
||||
----------
|
||||
namespace: List[str], optional
|
||||
namespace_path: List[str], optional
|
||||
The namespace to list tables in.
|
||||
None or empty list represents root namespace.
|
||||
page_token: str, optional
|
||||
@@ -240,11 +244,11 @@ class RemoteDBConnection(DBConnection):
|
||||
ListTablesResponse
|
||||
Response containing table names and optional page_token for pagination.
|
||||
"""
|
||||
if namespace is None:
|
||||
namespace = []
|
||||
if namespace_path is None:
|
||||
namespace_path = []
|
||||
return LOOP.run(
|
||||
self._conn.list_tables(
|
||||
namespace=namespace, page_token=page_token, limit=limit
|
||||
namespace_path=namespace_path, page_token=page_token, limit=limit
|
||||
)
|
||||
)
|
||||
|
||||
@@ -254,7 +258,7 @@ class RemoteDBConnection(DBConnection):
|
||||
page_token: Optional[str] = None,
|
||||
limit: int = 10,
|
||||
*,
|
||||
namespace: Optional[List[str]] = None,
|
||||
namespace_path: Optional[List[str]] = None,
|
||||
) -> Iterable[str]:
|
||||
"""List the names of all tables in the database.
|
||||
|
||||
@@ -263,7 +267,7 @@ class RemoteDBConnection(DBConnection):
|
||||
|
||||
Parameters
|
||||
----------
|
||||
namespace: List[str], default []
|
||||
namespace_path: List[str], default []
|
||||
The namespace to list tables in.
|
||||
Empty list represents root namespace.
|
||||
page_token: str
|
||||
@@ -282,11 +286,11 @@ class RemoteDBConnection(DBConnection):
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
if namespace is None:
|
||||
namespace = []
|
||||
if namespace_path is None:
|
||||
namespace_path = []
|
||||
return LOOP.run(
|
||||
self._conn.table_names(
|
||||
namespace=namespace, start_after=page_token, limit=limit
|
||||
namespace_path=namespace_path, start_after=page_token, limit=limit
|
||||
)
|
||||
)
|
||||
|
||||
@@ -295,7 +299,7 @@ class RemoteDBConnection(DBConnection):
|
||||
self,
|
||||
name: str,
|
||||
*,
|
||||
namespace: Optional[List[str]] = None,
|
||||
namespace_path: Optional[List[str]] = None,
|
||||
storage_options: Optional[Dict[str, str]] = None,
|
||||
index_cache_size: Optional[int] = None,
|
||||
) -> Table:
|
||||
@@ -305,7 +309,7 @@ class RemoteDBConnection(DBConnection):
|
||||
----------
|
||||
name: str
|
||||
The name of the table.
|
||||
namespace: List[str], optional
|
||||
namespace_path: List[str], optional
|
||||
The namespace to open the table from.
|
||||
None or empty list represents root namespace.
|
||||
|
||||
@@ -315,15 +319,15 @@ class RemoteDBConnection(DBConnection):
|
||||
"""
|
||||
from .table import RemoteTable
|
||||
|
||||
if namespace is None:
|
||||
namespace = []
|
||||
if namespace_path is None:
|
||||
namespace_path = []
|
||||
if index_cache_size is not None:
|
||||
logging.info(
|
||||
"index_cache_size is ignored in LanceDb Cloud"
|
||||
" (there is no local cache to configure)"
|
||||
)
|
||||
|
||||
table = LOOP.run(self._conn.open_table(name, namespace=namespace))
|
||||
table = LOOP.run(self._conn.open_table(name, namespace_path=namespace_path))
|
||||
return RemoteTable(table, self.db_name)
|
||||
|
||||
def clone_table(
|
||||
@@ -331,7 +335,7 @@ class RemoteDBConnection(DBConnection):
|
||||
target_table_name: str,
|
||||
source_uri: str,
|
||||
*,
|
||||
target_namespace: Optional[List[str]] = None,
|
||||
target_namespace_path: Optional[List[str]] = None,
|
||||
source_version: Optional[int] = None,
|
||||
source_tag: Optional[str] = None,
|
||||
is_shallow: bool = True,
|
||||
@@ -344,7 +348,7 @@ class RemoteDBConnection(DBConnection):
|
||||
The name of the target table to create.
|
||||
source_uri: str
|
||||
The URI of the source table to clone from.
|
||||
target_namespace: List[str], optional
|
||||
target_namespace_path: List[str], optional
|
||||
The namespace for the target table.
|
||||
None or empty list represents root namespace.
|
||||
source_version: int, optional
|
||||
@@ -361,13 +365,13 @@ class RemoteDBConnection(DBConnection):
|
||||
"""
|
||||
from .table import RemoteTable
|
||||
|
||||
if target_namespace is None:
|
||||
target_namespace = []
|
||||
if target_namespace_path is None:
|
||||
target_namespace_path = []
|
||||
table = LOOP.run(
|
||||
self._conn.clone_table(
|
||||
target_table_name,
|
||||
source_uri,
|
||||
target_namespace=target_namespace,
|
||||
target_namespace_path=target_namespace_path,
|
||||
source_version=source_version,
|
||||
source_tag=source_tag,
|
||||
is_shallow=is_shallow,
|
||||
@@ -387,7 +391,7 @@ class RemoteDBConnection(DBConnection):
|
||||
exist_ok: bool = False,
|
||||
embedding_functions: Optional[List[EmbeddingFunctionConfig]] = None,
|
||||
*,
|
||||
namespace: Optional[List[str]] = None,
|
||||
namespace_path: Optional[List[str]] = None,
|
||||
) -> Table:
|
||||
"""Create a [Table][lancedb.table.Table] in the database.
|
||||
|
||||
@@ -395,7 +399,7 @@ class RemoteDBConnection(DBConnection):
|
||||
----------
|
||||
name: str
|
||||
The name of the table.
|
||||
namespace: List[str], optional
|
||||
namespace_path: List[str], optional
|
||||
The namespace to create the table in.
|
||||
None or empty list represents root namespace.
|
||||
data: The data to initialize the table, *optional*
|
||||
@@ -495,8 +499,8 @@ class RemoteDBConnection(DBConnection):
|
||||
mode = "exist_ok"
|
||||
elif not mode:
|
||||
mode = "exist_ok"
|
||||
if namespace is None:
|
||||
namespace = []
|
||||
if namespace_path is None:
|
||||
namespace_path = []
|
||||
validate_table_name(name)
|
||||
if embedding_functions is not None:
|
||||
logging.warning(
|
||||
@@ -511,7 +515,7 @@ class RemoteDBConnection(DBConnection):
|
||||
self._conn.create_table(
|
||||
name,
|
||||
data,
|
||||
namespace=namespace,
|
||||
namespace_path=namespace_path,
|
||||
mode=mode,
|
||||
schema=schema,
|
||||
on_bad_vectors=on_bad_vectors,
|
||||
@@ -521,28 +525,28 @@ class RemoteDBConnection(DBConnection):
|
||||
return RemoteTable(table, self.db_name)
|
||||
|
||||
@override
|
||||
def drop_table(self, name: str, namespace: Optional[List[str]] = None):
|
||||
def drop_table(self, name: str, namespace_path: Optional[List[str]] = None):
|
||||
"""Drop a table from the database.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
name: str
|
||||
The name of the table.
|
||||
namespace: List[str], optional
|
||||
namespace_path: List[str], optional
|
||||
The namespace to drop the table from.
|
||||
None or empty list represents root namespace.
|
||||
"""
|
||||
if namespace is None:
|
||||
namespace = []
|
||||
LOOP.run(self._conn.drop_table(name, namespace=namespace))
|
||||
if namespace_path is None:
|
||||
namespace_path = []
|
||||
LOOP.run(self._conn.drop_table(name, namespace_path=namespace_path))
|
||||
|
||||
@override
|
||||
def rename_table(
|
||||
self,
|
||||
cur_name: str,
|
||||
new_name: str,
|
||||
cur_namespace: Optional[List[str]] = None,
|
||||
new_namespace: Optional[List[str]] = None,
|
||||
cur_namespace_path: Optional[List[str]] = None,
|
||||
new_namespace_path: Optional[List[str]] = None,
|
||||
):
|
||||
"""Rename a table in the database.
|
||||
|
||||
@@ -553,19 +557,19 @@ class RemoteDBConnection(DBConnection):
|
||||
new_name: str
|
||||
The new name of the table.
|
||||
"""
|
||||
if cur_namespace is None:
|
||||
cur_namespace = []
|
||||
if new_namespace is None:
|
||||
new_namespace = []
|
||||
if cur_namespace_path is None:
|
||||
cur_namespace_path = []
|
||||
if new_namespace_path is None:
|
||||
new_namespace_path = []
|
||||
LOOP.run(
|
||||
self._conn.rename_table(
|
||||
cur_name,
|
||||
new_name,
|
||||
cur_namespace=cur_namespace,
|
||||
new_namespace=new_namespace,
|
||||
cur_namespace_path=cur_namespace_path,
|
||||
new_namespace_path=new_namespace_path,
|
||||
)
|
||||
)
|
||||
|
||||
async def close(self):
|
||||
"""Close the connection to the database."""
|
||||
self._client.close()
|
||||
self._conn.close()
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
from datetime import timedelta
|
||||
import logging
|
||||
from functools import cached_property
|
||||
from typing import Dict, Iterable, List, Optional, Union, Literal
|
||||
from typing import Any, Callable, Dict, Iterable, List, Optional, Union, Literal
|
||||
import warnings
|
||||
|
||||
from lancedb._lancedb import (
|
||||
@@ -35,6 +35,7 @@ import pyarrow as pa
|
||||
from lancedb.common import DATA, VEC, VECTOR_COLUMN_NAME
|
||||
from lancedb.merge import LanceMergeInsertBuilder
|
||||
from lancedb.embeddings import EmbeddingFunctionRegistry
|
||||
from lancedb.table import _normalize_progress
|
||||
|
||||
from ..query import LanceVectorQueryBuilder, LanceQueryBuilder, LanceTakeQueryBuilder
|
||||
from ..table import AsyncTable, IndexStatistics, Query, Table, Tags
|
||||
@@ -308,6 +309,7 @@ class RemoteTable(Table):
|
||||
mode: str = "append",
|
||||
on_bad_vectors: str = "error",
|
||||
fill_value: float = 0.0,
|
||||
progress: Optional[Union[bool, Callable, Any]] = None,
|
||||
) -> AddResult:
|
||||
"""Add more data to the [Table](Table). It has the same API signature as
|
||||
the OSS version.
|
||||
@@ -330,17 +332,29 @@ class RemoteTable(Table):
|
||||
One of "error", "drop", "fill".
|
||||
fill_value: float, default 0.
|
||||
The value to use when filling vectors. Only used if on_bad_vectors="fill".
|
||||
progress: bool, callable, or tqdm-like, optional
|
||||
A callback or tqdm-compatible progress bar. See
|
||||
:meth:`Table.add` for details.
|
||||
|
||||
Returns
|
||||
-------
|
||||
AddResult
|
||||
An object containing the new version number of the table after adding data.
|
||||
"""
|
||||
return LOOP.run(
|
||||
self._table.add(
|
||||
data, mode=mode, on_bad_vectors=on_bad_vectors, fill_value=fill_value
|
||||
progress, owns = _normalize_progress(progress)
|
||||
try:
|
||||
return LOOP.run(
|
||||
self._table.add(
|
||||
data,
|
||||
mode=mode,
|
||||
on_bad_vectors=on_bad_vectors,
|
||||
fill_value=fill_value,
|
||||
progress=progress,
|
||||
)
|
||||
)
|
||||
)
|
||||
finally:
|
||||
if owns:
|
||||
progress.close()
|
||||
|
||||
def search(
|
||||
self,
|
||||
|
||||
@@ -14,6 +14,7 @@ from functools import cached_property
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Callable,
|
||||
Dict,
|
||||
Iterable,
|
||||
List,
|
||||
@@ -88,7 +89,6 @@ from .index import lang_mapping
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .db import LanceDBConnection
|
||||
from .io import StorageOptionsProvider
|
||||
from ._lancedb import (
|
||||
Table as LanceDBTable,
|
||||
OptimizeStats,
|
||||
@@ -277,7 +277,7 @@ def _sanitize_data(
|
||||
|
||||
if metadata:
|
||||
new_metadata = target_schema.metadata or {}
|
||||
new_metadata = new_metadata.update(metadata)
|
||||
new_metadata.update(metadata)
|
||||
target_schema = target_schema.with_metadata(new_metadata)
|
||||
|
||||
_validate_schema(target_schema)
|
||||
@@ -556,6 +556,21 @@ def _table_uri(base: str, table_name: str) -> str:
|
||||
return join_uri(base, f"{table_name}.lance")
|
||||
|
||||
|
||||
def _normalize_progress(progress):
|
||||
"""Normalize a ``progress`` parameter for :meth:`Table.add`.
|
||||
|
||||
Returns ``(progress_obj, owns)`` where *owns* is True when we created a
|
||||
tqdm bar that the caller must close.
|
||||
"""
|
||||
if progress is True:
|
||||
from tqdm.auto import tqdm
|
||||
|
||||
return tqdm(unit=" rows"), True
|
||||
if progress is False or progress is None:
|
||||
return None, False
|
||||
return progress, False
|
||||
|
||||
|
||||
class Table(ABC):
|
||||
"""
|
||||
A Table is a collection of Records in a LanceDB Database.
|
||||
@@ -974,6 +989,7 @@ class Table(ABC):
|
||||
mode: AddMode = "append",
|
||||
on_bad_vectors: OnBadVectorsType = "error",
|
||||
fill_value: float = 0.0,
|
||||
progress: Optional[Union[bool, Callable, Any]] = None,
|
||||
) -> AddResult:
|
||||
"""Add more data to the [Table](Table).
|
||||
|
||||
@@ -995,6 +1011,29 @@ class Table(ABC):
|
||||
One of "error", "drop", "fill".
|
||||
fill_value: float, default 0.
|
||||
The value to use when filling vectors. Only used if on_bad_vectors="fill".
|
||||
progress: bool, callable, or tqdm-like, optional
|
||||
Progress reporting during the add operation. Can be:
|
||||
|
||||
- ``True`` to automatically create and display a tqdm progress
|
||||
bar (requires ``tqdm`` to be installed)::
|
||||
|
||||
table.add(data, progress=True)
|
||||
|
||||
- A **callable** that receives a dict with keys ``output_rows``,
|
||||
``output_bytes``, ``total_rows``, ``elapsed_seconds``,
|
||||
``active_tasks``, ``total_tasks``, and ``done``::
|
||||
|
||||
def on_progress(p):
|
||||
print(f"{p['output_rows']}/{p['total_rows']} rows, "
|
||||
f"{p['active_tasks']}/{p['total_tasks']} workers")
|
||||
table.add(data, progress=on_progress)
|
||||
|
||||
- A **tqdm-compatible** progress bar whose ``total`` and
|
||||
``update()`` will be called automatically. The postfix shows
|
||||
write throughput (MB/s) and active worker count::
|
||||
|
||||
with tqdm() as pbar:
|
||||
table.add(data, progress=pbar)
|
||||
|
||||
Returns
|
||||
-------
|
||||
@@ -1736,30 +1775,30 @@ class LanceTable(Table):
|
||||
connection: "LanceDBConnection",
|
||||
name: str,
|
||||
*,
|
||||
namespace: Optional[List[str]] = None,
|
||||
namespace_path: Optional[List[str]] = None,
|
||||
storage_options: Optional[Dict[str, str]] = None,
|
||||
storage_options_provider: Optional["StorageOptionsProvider"] = None,
|
||||
index_cache_size: Optional[int] = None,
|
||||
location: Optional[str] = None,
|
||||
namespace_client: Optional[Any] = None,
|
||||
managed_versioning: Optional[bool] = None,
|
||||
pushdown_operations: Optional[set] = None,
|
||||
_async: AsyncTable = None,
|
||||
):
|
||||
if namespace is None:
|
||||
namespace = []
|
||||
if namespace_path is None:
|
||||
namespace_path = []
|
||||
self._conn = connection
|
||||
self._namespace = namespace
|
||||
self._namespace_path = namespace_path
|
||||
self._location = location # Store location for use in _dataset_path
|
||||
self._namespace_client = namespace_client
|
||||
self._pushdown_operations = pushdown_operations or set()
|
||||
if _async is not None:
|
||||
self._table = _async
|
||||
else:
|
||||
self._table = LOOP.run(
|
||||
connection._conn.open_table(
|
||||
name,
|
||||
namespace=namespace,
|
||||
namespace_path=namespace_path,
|
||||
storage_options=storage_options,
|
||||
storage_options_provider=storage_options_provider,
|
||||
index_cache_size=index_cache_size,
|
||||
location=location,
|
||||
namespace_client=namespace_client,
|
||||
@@ -1774,13 +1813,13 @@ class LanceTable(Table):
|
||||
@property
|
||||
def namespace(self) -> List[str]:
|
||||
"""Return the namespace path of the table."""
|
||||
return self._namespace
|
||||
return self._namespace_path
|
||||
|
||||
@property
|
||||
def id(self) -> str:
|
||||
"""Return the full identifier of the table (namespace$name)."""
|
||||
if self._namespace:
|
||||
return "$".join(self._namespace + [self.name])
|
||||
if self._namespace_path:
|
||||
return "$".join(self._namespace_path + [self.name])
|
||||
return self.name
|
||||
|
||||
@classmethod
|
||||
@@ -1801,26 +1840,26 @@ class LanceTable(Table):
|
||||
db,
|
||||
name,
|
||||
*,
|
||||
namespace: Optional[List[str]] = None,
|
||||
namespace_path: Optional[List[str]] = None,
|
||||
storage_options: Optional[Dict[str, str]] = None,
|
||||
storage_options_provider: Optional["StorageOptionsProvider"] = None,
|
||||
index_cache_size: Optional[int] = None,
|
||||
location: Optional[str] = None,
|
||||
namespace_client: Optional[Any] = None,
|
||||
managed_versioning: Optional[bool] = None,
|
||||
pushdown_operations: Optional[set] = None,
|
||||
):
|
||||
if namespace is None:
|
||||
namespace = []
|
||||
if namespace_path is None:
|
||||
namespace_path = []
|
||||
tbl = cls(
|
||||
db,
|
||||
name,
|
||||
namespace=namespace,
|
||||
namespace_path=namespace_path,
|
||||
storage_options=storage_options,
|
||||
storage_options_provider=storage_options_provider,
|
||||
index_cache_size=index_cache_size,
|
||||
location=location,
|
||||
namespace_client=namespace_client,
|
||||
managed_versioning=managed_versioning,
|
||||
pushdown_operations=pushdown_operations,
|
||||
)
|
||||
|
||||
# check the dataset exists
|
||||
@@ -1853,11 +1892,11 @@ class LanceTable(Table):
|
||||
)
|
||||
|
||||
if self._namespace_client is not None:
|
||||
table_id = self._namespace + [self.name]
|
||||
table_id = self._namespace_path + [self.name]
|
||||
return lance.dataset(
|
||||
version=self.version,
|
||||
storage_options=self._conn.storage_options,
|
||||
namespace=self._namespace_client,
|
||||
namespace_client=self._namespace_client,
|
||||
table_id=table_id,
|
||||
**kwargs,
|
||||
)
|
||||
@@ -2492,6 +2531,7 @@ class LanceTable(Table):
|
||||
mode: AddMode = "append",
|
||||
on_bad_vectors: OnBadVectorsType = "error",
|
||||
fill_value: float = 0.0,
|
||||
progress: Optional[Union[bool, Callable, Any]] = None,
|
||||
) -> AddResult:
|
||||
"""Add data to the table.
|
||||
If vector columns are missing and the table
|
||||
@@ -2510,17 +2550,29 @@ class LanceTable(Table):
|
||||
One of "error", "drop", "fill", "null".
|
||||
fill_value: float, default 0.
|
||||
The value to use when filling vectors. Only used if on_bad_vectors="fill".
|
||||
progress: bool, callable, or tqdm-like, optional
|
||||
A callback or tqdm-compatible progress bar. See
|
||||
:meth:`Table.add` for details.
|
||||
|
||||
Returns
|
||||
-------
|
||||
int
|
||||
The number of vectors in the table.
|
||||
"""
|
||||
return LOOP.run(
|
||||
self._table.add(
|
||||
data, mode=mode, on_bad_vectors=on_bad_vectors, fill_value=fill_value
|
||||
progress, owns = _normalize_progress(progress)
|
||||
try:
|
||||
return LOOP.run(
|
||||
self._table.add(
|
||||
data,
|
||||
mode=mode,
|
||||
on_bad_vectors=on_bad_vectors,
|
||||
fill_value=fill_value,
|
||||
progress=progress,
|
||||
)
|
||||
)
|
||||
)
|
||||
finally:
|
||||
if owns:
|
||||
progress.close()
|
||||
|
||||
def merge(
|
||||
self,
|
||||
@@ -2750,13 +2802,13 @@ class LanceTable(Table):
|
||||
fill_value: float = 0.0,
|
||||
embedding_functions: Optional[List[EmbeddingFunctionConfig]] = None,
|
||||
*,
|
||||
namespace: Optional[List[str]] = None,
|
||||
namespace_path: Optional[List[str]] = None,
|
||||
storage_options: Optional[Dict[str, str | bool]] = None,
|
||||
storage_options_provider: Optional["StorageOptionsProvider"] = None,
|
||||
data_storage_version: Optional[str] = None,
|
||||
enable_v2_manifest_paths: Optional[bool] = None,
|
||||
location: Optional[str] = None,
|
||||
namespace_client: Optional[Any] = None,
|
||||
pushdown_operations: Optional[set] = None,
|
||||
):
|
||||
"""
|
||||
Create a new table.
|
||||
@@ -2811,13 +2863,14 @@ class LanceTable(Table):
|
||||
Deprecated. Set `storage_options` when connecting to the database and set
|
||||
`new_table_enable_v2_manifest_paths` in the options.
|
||||
"""
|
||||
if namespace is None:
|
||||
namespace = []
|
||||
if namespace_path is None:
|
||||
namespace_path = []
|
||||
self = cls.__new__(cls)
|
||||
self._conn = db
|
||||
self._namespace = namespace
|
||||
self._namespace_path = namespace_path
|
||||
self._location = location
|
||||
self._namespace_client = namespace_client
|
||||
self._pushdown_operations = pushdown_operations or set()
|
||||
|
||||
if data_storage_version is not None:
|
||||
warnings.warn(
|
||||
@@ -2850,9 +2903,8 @@ class LanceTable(Table):
|
||||
on_bad_vectors=on_bad_vectors,
|
||||
fill_value=fill_value,
|
||||
embedding_functions=embedding_functions,
|
||||
namespace=namespace,
|
||||
namespace_path=namespace_path,
|
||||
storage_options=storage_options,
|
||||
storage_options_provider=storage_options_provider,
|
||||
location=location,
|
||||
)
|
||||
)
|
||||
@@ -2921,6 +2973,15 @@ class LanceTable(Table):
|
||||
batch_size: Optional[int] = None,
|
||||
timeout: Optional[timedelta] = None,
|
||||
) -> pa.RecordBatchReader:
|
||||
if (
|
||||
"QueryTable" in self._pushdown_operations
|
||||
and self._namespace_client is not None
|
||||
):
|
||||
from lancedb.namespace import _execute_server_side_query
|
||||
|
||||
table_id = self._namespace_path + [self.name]
|
||||
return _execute_server_side_query(self._namespace_client, table_id, query)
|
||||
|
||||
async_iter = LOOP.run(
|
||||
self._table._execute_query(query, batch_size=batch_size, timeout=timeout)
|
||||
)
|
||||
@@ -3769,6 +3830,7 @@ class AsyncTable:
|
||||
mode: Optional[Literal["append", "overwrite"]] = "append",
|
||||
on_bad_vectors: Optional[OnBadVectorsType] = None,
|
||||
fill_value: Optional[float] = None,
|
||||
progress: Optional[Union[bool, Callable, Any]] = None,
|
||||
) -> AddResult:
|
||||
"""Add more data to the [Table](Table).
|
||||
|
||||
@@ -3790,6 +3852,9 @@ class AsyncTable:
|
||||
One of "error", "drop", "fill", "null".
|
||||
fill_value: float, default 0.
|
||||
The value to use when filling vectors. Only used if on_bad_vectors="fill".
|
||||
progress: callable or tqdm-like, optional
|
||||
A callback or tqdm-compatible progress bar. See
|
||||
:meth:`Table.add` for details.
|
||||
|
||||
"""
|
||||
schema = await self.schema()
|
||||
@@ -3800,7 +3865,13 @@ class AsyncTable:
|
||||
|
||||
# _santitize_data is an old code path, but we will use it until the
|
||||
# new code path is ready.
|
||||
if on_bad_vectors != "error" or (
|
||||
if mode == "overwrite":
|
||||
# For overwrite, apply the same preprocessing as create_table
|
||||
# so vector columns are inferred as FixedSizeList.
|
||||
data, _ = sanitize_create_table(
|
||||
data, None, on_bad_vectors=on_bad_vectors, fill_value=fill_value
|
||||
)
|
||||
elif on_bad_vectors != "error" or (
|
||||
schema.metadata is not None and b"embedding_functions" in schema.metadata
|
||||
):
|
||||
data = _sanitize_data(
|
||||
@@ -3813,8 +3884,9 @@ class AsyncTable:
|
||||
)
|
||||
_register_optional_converters()
|
||||
data = to_scannable(data)
|
||||
progress, owns = _normalize_progress(progress)
|
||||
try:
|
||||
return await self._inner.add(data, mode or "append")
|
||||
return await self._inner.add(data, mode or "append", progress=progress)
|
||||
except RuntimeError as e:
|
||||
if "Cast error" in str(e):
|
||||
raise ValueError(e)
|
||||
@@ -3822,6 +3894,9 @@ class AsyncTable:
|
||||
raise ValueError(e)
|
||||
else:
|
||||
raise
|
||||
finally:
|
||||
if owns:
|
||||
progress.close()
|
||||
|
||||
def merge_insert(self, on: Union[str, Iterable[str]]) -> LanceMergeInsertBuilder:
|
||||
"""
|
||||
@@ -4144,7 +4219,7 @@ class AsyncTable:
|
||||
async_query = async_query.offset(query.offset)
|
||||
if query.columns:
|
||||
async_query = async_query.select(query.columns)
|
||||
if query.filter:
|
||||
if query.filter is not None:
|
||||
async_query = async_query.where(query.filter)
|
||||
if query.fast_search:
|
||||
async_query = async_query.fast_search()
|
||||
|
||||
@@ -183,8 +183,8 @@ def test_table_names(tmp_db: lancedb.DBConnection):
|
||||
result = list(tmp_db.table_names("test2", limit=2))
|
||||
assert result == ["test3"], f"Expected ['test3'], got {result}"
|
||||
|
||||
# Test that namespace parameter can be passed as keyword
|
||||
result = list(tmp_db.table_names(namespace=[]))
|
||||
# Test that namespace_path parameter can be passed as keyword
|
||||
result = list(tmp_db.table_names(namespace_path=[]))
|
||||
assert len(result) == 3
|
||||
|
||||
|
||||
@@ -909,7 +909,7 @@ def test_local_namespace_operations(tmp_path):
|
||||
NotImplementedError,
|
||||
match="Namespace operations are not supported for listing database",
|
||||
):
|
||||
db.list_namespaces(namespace=["test"])
|
||||
db.list_namespaces(namespace_path=["test"])
|
||||
|
||||
|
||||
def test_local_create_namespace_not_supported(tmp_path):
|
||||
|
||||
@@ -546,3 +546,24 @@ def test_openai_no_retry_on_401(mock_sleep):
|
||||
assert mock_func.call_count == 1
|
||||
# Verify that sleep was never called (no retries)
|
||||
assert mock_sleep.call_count == 0
|
||||
|
||||
|
||||
def test_url_retrieve_downloads_image():
|
||||
"""
|
||||
Embedding functions like open-clip, siglip, and jinaai use url_retrieve()
|
||||
to download images from HTTP URLs. For example, open_clip._to_pil() calls:
|
||||
|
||||
PIL_Image.open(io.BytesIO(url_retrieve(image)))
|
||||
|
||||
Verify that url_retrieve() can download an image and open it as PIL Image,
|
||||
matching the real usage pattern in embedding functions.
|
||||
"""
|
||||
import io
|
||||
|
||||
Image = pytest.importorskip("PIL.Image")
|
||||
from lancedb.embeddings.utils import url_retrieve
|
||||
|
||||
image_url = "http://farm1.staticflickr.com/53/167798175_7c7845bbbd_z.jpg"
|
||||
image_bytes = url_retrieve(image_url)
|
||||
img = Image.open(io.BytesIO(image_bytes))
|
||||
assert img.size[0] > 0 and img.size[1] > 0
|
||||
|
||||
@@ -8,6 +8,7 @@ import shutil
|
||||
import pytest
|
||||
import pyarrow as pa
|
||||
import lancedb
|
||||
from lance_namespace.errors import NamespaceNotEmptyError, TableNotFoundError
|
||||
|
||||
|
||||
class TestNamespaceConnection:
|
||||
@@ -32,6 +33,16 @@ class TestNamespaceConnection:
|
||||
# Initially no tables in root
|
||||
assert len(list(db.table_names())) == 0
|
||||
|
||||
def test_connect_via_connect_helper(self):
|
||||
"""Connecting via lancedb.connect should delegate to namespace connection."""
|
||||
db = lancedb.connect(
|
||||
namespace_client_impl="dir",
|
||||
namespace_client_properties={"root": self.temp_dir},
|
||||
)
|
||||
|
||||
assert isinstance(db, lancedb.LanceNamespaceDBConnection)
|
||||
assert len(list(db.table_names())) == 0
|
||||
|
||||
def test_create_table_through_namespace(self):
|
||||
"""Test creating a table through namespace."""
|
||||
db = lancedb.connect_namespace("dir", {"root": self.temp_dir})
|
||||
@@ -49,14 +60,14 @@ class TestNamespaceConnection:
|
||||
)
|
||||
|
||||
# Create empty table in child namespace
|
||||
table = db.create_table("test_table", schema=schema, namespace=["test_ns"])
|
||||
table = db.create_table("test_table", schema=schema, namespace_path=["test_ns"])
|
||||
assert table is not None
|
||||
assert table.name == "test_table"
|
||||
assert table.namespace == ["test_ns"]
|
||||
assert table.id == "test_ns$test_table"
|
||||
|
||||
# Table should appear in child namespace
|
||||
table_names = list(db.table_names(namespace=["test_ns"]))
|
||||
table_names = list(db.table_names(namespace_path=["test_ns"]))
|
||||
assert "test_table" in table_names
|
||||
assert len(table_names) == 1
|
||||
|
||||
@@ -79,10 +90,10 @@ class TestNamespaceConnection:
|
||||
pa.field("vector", pa.list_(pa.float32(), 2)),
|
||||
]
|
||||
)
|
||||
db.create_table("test_table", schema=schema, namespace=["test_ns"])
|
||||
db.create_table("test_table", schema=schema, namespace_path=["test_ns"])
|
||||
|
||||
# Open the table
|
||||
table = db.open_table("test_table", namespace=["test_ns"])
|
||||
table = db.open_table("test_table", namespace_path=["test_ns"])
|
||||
assert table is not None
|
||||
assert table.name == "test_table"
|
||||
assert table.namespace == ["test_ns"]
|
||||
@@ -107,31 +118,31 @@ class TestNamespaceConnection:
|
||||
pa.field("vector", pa.list_(pa.float32(), 2)),
|
||||
]
|
||||
)
|
||||
db.create_table("table1", schema=schema, namespace=["test_ns"])
|
||||
db.create_table("table2", schema=schema, namespace=["test_ns"])
|
||||
db.create_table("table1", schema=schema, namespace_path=["test_ns"])
|
||||
db.create_table("table2", schema=schema, namespace_path=["test_ns"])
|
||||
|
||||
# Verify both tables exist in child namespace
|
||||
table_names = list(db.table_names(namespace=["test_ns"]))
|
||||
table_names = list(db.table_names(namespace_path=["test_ns"]))
|
||||
assert "table1" in table_names
|
||||
assert "table2" in table_names
|
||||
assert len(table_names) == 2
|
||||
|
||||
# Drop one table
|
||||
db.drop_table("table1", namespace=["test_ns"])
|
||||
db.drop_table("table1", namespace_path=["test_ns"])
|
||||
|
||||
# Verify only table2 remains
|
||||
table_names = list(db.table_names(namespace=["test_ns"]))
|
||||
table_names = list(db.table_names(namespace_path=["test_ns"]))
|
||||
assert "table1" not in table_names
|
||||
assert "table2" in table_names
|
||||
assert len(table_names) == 1
|
||||
|
||||
# Drop the second table
|
||||
db.drop_table("table2", namespace=["test_ns"])
|
||||
assert len(list(db.table_names(namespace=["test_ns"]))) == 0
|
||||
db.drop_table("table2", namespace_path=["test_ns"])
|
||||
assert len(list(db.table_names(namespace_path=["test_ns"]))) == 0
|
||||
|
||||
# Should not be able to open dropped table
|
||||
with pytest.raises(RuntimeError):
|
||||
db.open_table("table1", namespace=["test_ns"])
|
||||
with pytest.raises(TableNotFoundError):
|
||||
db.open_table("table1", namespace_path=["test_ns"])
|
||||
|
||||
def test_create_table_with_schema(self):
|
||||
"""Test creating a table with explicit schema through namespace."""
|
||||
@@ -150,7 +161,7 @@ class TestNamespaceConnection:
|
||||
)
|
||||
|
||||
# Create table with schema in child namespace
|
||||
table = db.create_table("test_table", schema=schema, namespace=["test_ns"])
|
||||
table = db.create_table("test_table", schema=schema, namespace_path=["test_ns"])
|
||||
assert table is not None
|
||||
assert table.namespace == ["test_ns"]
|
||||
|
||||
@@ -174,7 +185,7 @@ class TestNamespaceConnection:
|
||||
pa.field("vector", pa.list_(pa.float32(), 2)),
|
||||
]
|
||||
)
|
||||
db.create_table("old_name", schema=schema, namespace=["test_ns"])
|
||||
db.create_table("old_name", schema=schema, namespace_path=["test_ns"])
|
||||
|
||||
# Rename should raise NotImplementedError
|
||||
with pytest.raises(NotImplementedError, match="rename_table is not supported"):
|
||||
@@ -195,20 +206,20 @@ class TestNamespaceConnection:
|
||||
]
|
||||
)
|
||||
for i in range(3):
|
||||
db.create_table(f"table{i}", schema=schema, namespace=["test_ns"])
|
||||
db.create_table(f"table{i}", schema=schema, namespace_path=["test_ns"])
|
||||
|
||||
# Verify tables exist in child namespace
|
||||
assert len(list(db.table_names(namespace=["test_ns"]))) == 3
|
||||
assert len(list(db.table_names(namespace_path=["test_ns"]))) == 3
|
||||
|
||||
# Drop all tables in child namespace
|
||||
db.drop_all_tables(namespace=["test_ns"])
|
||||
db.drop_all_tables(namespace_path=["test_ns"])
|
||||
|
||||
# Verify all tables are gone from child namespace
|
||||
assert len(list(db.table_names(namespace=["test_ns"]))) == 0
|
||||
assert len(list(db.table_names(namespace_path=["test_ns"]))) == 0
|
||||
|
||||
# Test that table_names works with keyword-only namespace parameter
|
||||
db.create_table("test_table", schema=schema, namespace=["test_ns"])
|
||||
result = list(db.table_names(namespace=["test_ns"]))
|
||||
db.create_table("test_table", schema=schema, namespace_path=["test_ns"])
|
||||
result = list(db.table_names(namespace_path=["test_ns"]))
|
||||
assert "test_table" in result
|
||||
|
||||
def test_table_operations(self):
|
||||
@@ -226,7 +237,7 @@ class TestNamespaceConnection:
|
||||
pa.field("text", pa.string()),
|
||||
]
|
||||
)
|
||||
table = db.create_table("test_table", schema=schema, namespace=["test_ns"])
|
||||
table = db.create_table("test_table", schema=schema, namespace_path=["test_ns"])
|
||||
|
||||
# Verify empty table was created
|
||||
result = table.to_pandas()
|
||||
@@ -297,25 +308,25 @@ class TestNamespaceConnection:
|
||||
]
|
||||
)
|
||||
table = db.create_table(
|
||||
"test_table", schema=schema, namespace=["test_namespace"]
|
||||
"test_table", schema=schema, namespace_path=["test_namespace"]
|
||||
)
|
||||
assert table is not None
|
||||
|
||||
# Verify table exists in namespace
|
||||
tables_in_namespace = list(db.table_names(namespace=["test_namespace"]))
|
||||
tables_in_namespace = list(db.table_names(namespace_path=["test_namespace"]))
|
||||
assert "test_table" in tables_in_namespace
|
||||
assert len(tables_in_namespace) == 1
|
||||
|
||||
# Open table from namespace
|
||||
table = db.open_table("test_table", namespace=["test_namespace"])
|
||||
table = db.open_table("test_table", namespace_path=["test_namespace"])
|
||||
assert table is not None
|
||||
assert table.name == "test_table"
|
||||
|
||||
# Drop table from namespace
|
||||
db.drop_table("test_table", namespace=["test_namespace"])
|
||||
db.drop_table("test_table", namespace_path=["test_namespace"])
|
||||
|
||||
# Verify table no longer exists in namespace
|
||||
tables_in_namespace = list(db.table_names(namespace=["test_namespace"]))
|
||||
tables_in_namespace = list(db.table_names(namespace_path=["test_namespace"]))
|
||||
assert len(tables_in_namespace) == 0
|
||||
|
||||
# Drop namespace
|
||||
@@ -337,14 +348,14 @@ class TestNamespaceConnection:
|
||||
pa.field("vector", pa.list_(pa.float32(), 2)),
|
||||
]
|
||||
)
|
||||
db.create_table("test_table", schema=schema, namespace=["test_namespace"])
|
||||
db.create_table("test_table", schema=schema, namespace_path=["test_namespace"])
|
||||
|
||||
# Try to drop namespace with tables - should fail
|
||||
with pytest.raises(RuntimeError, match="is not empty"):
|
||||
with pytest.raises(NamespaceNotEmptyError):
|
||||
db.drop_namespace(["test_namespace"])
|
||||
|
||||
# Drop table first
|
||||
db.drop_table("test_table", namespace=["test_namespace"])
|
||||
db.drop_table("test_table", namespace_path=["test_namespace"])
|
||||
|
||||
# Now dropping namespace should work
|
||||
db.drop_namespace(["test_namespace"])
|
||||
@@ -367,10 +378,10 @@ class TestNamespaceConnection:
|
||||
|
||||
# Create table with same name in both namespaces
|
||||
table_a = db.create_table(
|
||||
"same_name_table", schema=schema, namespace=["namespace_a"]
|
||||
"same_name_table", schema=schema, namespace_path=["namespace_a"]
|
||||
)
|
||||
table_b = db.create_table(
|
||||
"same_name_table", schema=schema, namespace=["namespace_b"]
|
||||
"same_name_table", schema=schema, namespace_path=["namespace_b"]
|
||||
)
|
||||
|
||||
# Add different data to each table
|
||||
@@ -388,7 +399,9 @@ class TestNamespaceConnection:
|
||||
table_b.add(data_b)
|
||||
|
||||
# Verify data in namespace_a table
|
||||
opened_table_a = db.open_table("same_name_table", namespace=["namespace_a"])
|
||||
opened_table_a = db.open_table(
|
||||
"same_name_table", namespace_path=["namespace_a"]
|
||||
)
|
||||
result_a = opened_table_a.to_pandas().sort_values("id").reset_index(drop=True)
|
||||
assert len(result_a) == 2
|
||||
assert result_a["id"].tolist() == [1, 2]
|
||||
@@ -399,7 +412,9 @@ class TestNamespaceConnection:
|
||||
assert [v.tolist() for v in result_a["vector"]] == [[1.0, 2.0], [3.0, 4.0]]
|
||||
|
||||
# Verify data in namespace_b table
|
||||
opened_table_b = db.open_table("same_name_table", namespace=["namespace_b"])
|
||||
opened_table_b = db.open_table(
|
||||
"same_name_table", namespace_path=["namespace_b"]
|
||||
)
|
||||
result_b = opened_table_b.to_pandas().sort_values("id").reset_index(drop=True)
|
||||
assert len(result_b) == 3
|
||||
assert result_b["id"].tolist() == [10, 20, 30]
|
||||
@@ -419,8 +434,8 @@ class TestNamespaceConnection:
|
||||
assert "same_name_table" not in root_tables
|
||||
|
||||
# Clean up
|
||||
db.drop_table("same_name_table", namespace=["namespace_a"])
|
||||
db.drop_table("same_name_table", namespace=["namespace_b"])
|
||||
db.drop_table("same_name_table", namespace_path=["namespace_a"])
|
||||
db.drop_table("same_name_table", namespace_path=["namespace_b"])
|
||||
db.drop_namespace(["namespace_a"])
|
||||
db.drop_namespace(["namespace_b"])
|
||||
|
||||
@@ -448,6 +463,8 @@ class TestAsyncNamespaceConnection:
|
||||
table_names = await db.table_names()
|
||||
assert len(list(table_names)) == 0
|
||||
|
||||
# Async connect via namespace helper is not enabled yet.
|
||||
|
||||
async def test_create_table_async(self):
|
||||
"""Test creating a table asynchronously through namespace."""
|
||||
db = lancedb.connect_namespace_async("dir", {"root": self.temp_dir})
|
||||
@@ -466,13 +483,13 @@ class TestAsyncNamespaceConnection:
|
||||
|
||||
# Create empty table in child namespace
|
||||
table = await db.create_table(
|
||||
"test_table", schema=schema, namespace=["test_ns"]
|
||||
"test_table", schema=schema, namespace_path=["test_ns"]
|
||||
)
|
||||
assert table is not None
|
||||
assert isinstance(table, lancedb.AsyncTable)
|
||||
|
||||
# Table should appear in child namespace
|
||||
table_names = await db.table_names(namespace=["test_ns"])
|
||||
table_names = await db.table_names(namespace_path=["test_ns"])
|
||||
assert "test_table" in list(table_names)
|
||||
|
||||
async def test_open_table_async(self):
|
||||
@@ -489,10 +506,10 @@ class TestAsyncNamespaceConnection:
|
||||
pa.field("vector", pa.list_(pa.float32(), 2)),
|
||||
]
|
||||
)
|
||||
await db.create_table("test_table", schema=schema, namespace=["test_ns"])
|
||||
await db.create_table("test_table", schema=schema, namespace_path=["test_ns"])
|
||||
|
||||
# Open the table
|
||||
table = await db.open_table("test_table", namespace=["test_ns"])
|
||||
table = await db.open_table("test_table", namespace_path=["test_ns"])
|
||||
assert table is not None
|
||||
assert isinstance(table, lancedb.AsyncTable)
|
||||
|
||||
@@ -546,20 +563,20 @@ class TestAsyncNamespaceConnection:
|
||||
pa.field("vector", pa.list_(pa.float32(), 2)),
|
||||
]
|
||||
)
|
||||
await db.create_table("table1", schema=schema, namespace=["test_ns"])
|
||||
await db.create_table("table2", schema=schema, namespace=["test_ns"])
|
||||
await db.create_table("table1", schema=schema, namespace_path=["test_ns"])
|
||||
await db.create_table("table2", schema=schema, namespace_path=["test_ns"])
|
||||
|
||||
# Verify both tables exist in child namespace
|
||||
table_names = list(await db.table_names(namespace=["test_ns"]))
|
||||
table_names = list(await db.table_names(namespace_path=["test_ns"]))
|
||||
assert "table1" in table_names
|
||||
assert "table2" in table_names
|
||||
assert len(table_names) == 2
|
||||
|
||||
# Drop one table
|
||||
await db.drop_table("table1", namespace=["test_ns"])
|
||||
await db.drop_table("table1", namespace_path=["test_ns"])
|
||||
|
||||
# Verify only table2 remains
|
||||
table_names = list(await db.table_names(namespace=["test_ns"]))
|
||||
table_names = list(await db.table_names(namespace_path=["test_ns"]))
|
||||
assert "table1" not in table_names
|
||||
assert "table2" in table_names
|
||||
assert len(table_names) == 1
|
||||
@@ -588,20 +605,24 @@ class TestAsyncNamespaceConnection:
|
||||
]
|
||||
)
|
||||
table = await db.create_table(
|
||||
"test_table", schema=schema, namespace=["test_namespace"]
|
||||
"test_table", schema=schema, namespace_path=["test_namespace"]
|
||||
)
|
||||
assert table is not None
|
||||
|
||||
# Verify table exists in namespace
|
||||
tables_in_namespace = list(await db.table_names(namespace=["test_namespace"]))
|
||||
tables_in_namespace = list(
|
||||
await db.table_names(namespace_path=["test_namespace"])
|
||||
)
|
||||
assert "test_table" in tables_in_namespace
|
||||
assert len(tables_in_namespace) == 1
|
||||
|
||||
# Drop table from namespace
|
||||
await db.drop_table("test_table", namespace=["test_namespace"])
|
||||
await db.drop_table("test_table", namespace_path=["test_namespace"])
|
||||
|
||||
# Verify table no longer exists in namespace
|
||||
tables_in_namespace = list(await db.table_names(namespace=["test_namespace"]))
|
||||
tables_in_namespace = list(
|
||||
await db.table_names(namespace_path=["test_namespace"])
|
||||
)
|
||||
assert len(tables_in_namespace) == 0
|
||||
|
||||
# Drop namespace
|
||||
@@ -626,15 +647,98 @@ class TestAsyncNamespaceConnection:
|
||||
]
|
||||
)
|
||||
for i in range(3):
|
||||
await db.create_table(f"table{i}", schema=schema, namespace=["test_ns"])
|
||||
await db.create_table(
|
||||
f"table{i}", schema=schema, namespace_path=["test_ns"]
|
||||
)
|
||||
|
||||
# Verify tables exist in child namespace
|
||||
table_names = await db.table_names(namespace=["test_ns"])
|
||||
table_names = await db.table_names(namespace_path=["test_ns"])
|
||||
assert len(list(table_names)) == 3
|
||||
|
||||
# Drop all tables in child namespace
|
||||
await db.drop_all_tables(namespace=["test_ns"])
|
||||
await db.drop_all_tables(namespace_path=["test_ns"])
|
||||
|
||||
# Verify all tables are gone from child namespace
|
||||
table_names = await db.table_names(namespace=["test_ns"])
|
||||
table_names = await db.table_names(namespace_path=["test_ns"])
|
||||
assert len(list(table_names)) == 0
|
||||
|
||||
|
||||
class TestPushdownOperations:
|
||||
"""Test pushdown operations on namespace connections."""
|
||||
|
||||
def setup_method(self):
|
||||
"""Set up test fixtures."""
|
||||
self.temp_dir = tempfile.mkdtemp()
|
||||
|
||||
def teardown_method(self):
|
||||
"""Clean up test fixtures."""
|
||||
shutil.rmtree(self.temp_dir, ignore_errors=True)
|
||||
|
||||
def test_query_table_pushdown_stored(self):
|
||||
"""Test that QueryTable pushdown is stored on sync connection."""
|
||||
db = lancedb.connect_namespace(
|
||||
"dir",
|
||||
{"root": self.temp_dir},
|
||||
namespace_client_pushdown_operations=["QueryTable"],
|
||||
)
|
||||
assert "QueryTable" in db._pushdown_operations
|
||||
|
||||
def test_create_table_pushdown_stored(self):
|
||||
"""Test that CreateTable pushdown is stored on sync connection."""
|
||||
db = lancedb.connect_namespace(
|
||||
"dir",
|
||||
{"root": self.temp_dir},
|
||||
namespace_client_pushdown_operations=["CreateTable"],
|
||||
)
|
||||
assert "CreateTable" in db._pushdown_operations
|
||||
|
||||
def test_both_pushdowns_stored(self):
|
||||
"""Test that both pushdown operations can be set together."""
|
||||
db = lancedb.connect_namespace(
|
||||
"dir",
|
||||
{"root": self.temp_dir},
|
||||
namespace_client_pushdown_operations=["QueryTable", "CreateTable"],
|
||||
)
|
||||
assert "QueryTable" in db._pushdown_operations
|
||||
assert "CreateTable" in db._pushdown_operations
|
||||
|
||||
def test_pushdown_defaults_to_empty(self):
|
||||
"""Test that pushdown operations default to empty."""
|
||||
db = lancedb.connect_namespace("dir", {"root": self.temp_dir})
|
||||
assert len(db._pushdown_operations) == 0
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
class TestAsyncPushdownOperations:
|
||||
"""Test pushdown operations on async namespace connections."""
|
||||
|
||||
def setup_method(self):
|
||||
"""Set up test fixtures."""
|
||||
self.temp_dir = tempfile.mkdtemp()
|
||||
|
||||
def teardown_method(self):
|
||||
"""Clean up test fixtures."""
|
||||
shutil.rmtree(self.temp_dir, ignore_errors=True)
|
||||
|
||||
async def test_async_query_table_pushdown_stored(self):
|
||||
"""Test that QueryTable pushdown is stored on async connection."""
|
||||
db = lancedb.connect_namespace_async(
|
||||
"dir",
|
||||
{"root": self.temp_dir},
|
||||
namespace_client_pushdown_operations=["QueryTable"],
|
||||
)
|
||||
assert "QueryTable" in db._pushdown_operations
|
||||
|
||||
async def test_async_create_table_pushdown_stored(self):
|
||||
"""Test that CreateTable pushdown is stored on async connection."""
|
||||
db = lancedb.connect_namespace_async(
|
||||
"dir",
|
||||
{"root": self.temp_dir},
|
||||
namespace_client_pushdown_operations=["CreateTable"],
|
||||
)
|
||||
assert "CreateTable" in db._pushdown_operations
|
||||
|
||||
async def test_async_pushdown_defaults_to_empty(self):
|
||||
"""Test that pushdown operations default to empty on async connection."""
|
||||
db = lancedb.connect_namespace_async("dir", {"root": self.temp_dir})
|
||||
assert len(db._pushdown_operations) == 0
|
||||
|
||||
@@ -4,9 +4,11 @@
|
||||
"""
|
||||
Integration tests for LanceDB Namespace with S3 and credential refresh.
|
||||
|
||||
This test simulates a namespace server that returns incrementing credentials
|
||||
and verifies that the credential refresh mechanism works correctly for both
|
||||
create_table and open_table operations.
|
||||
This test uses DirectoryNamespace with native ops_metrics and vend_input_storage_options
|
||||
features to track API calls and test credential refresh mechanisms.
|
||||
|
||||
Tests are parameterized to run with both DirectoryNamespace and a CustomNamespace
|
||||
wrapper to verify Python-Rust binding works correctly for custom implementations.
|
||||
|
||||
Tests verify:
|
||||
- Storage options provider is auto-created and used
|
||||
@@ -18,22 +20,136 @@ Tests verify:
|
||||
import copy
|
||||
import time
|
||||
import uuid
|
||||
from threading import Lock
|
||||
from typing import Dict
|
||||
from typing import Dict, Optional
|
||||
|
||||
import pyarrow as pa
|
||||
import pytest
|
||||
from lance_namespace import (
|
||||
CreateEmptyTableRequest,
|
||||
CreateEmptyTableResponse,
|
||||
from lance.namespace import (
|
||||
DeclareTableRequest,
|
||||
DeclareTableResponse,
|
||||
DescribeTableRequest,
|
||||
DescribeTableResponse,
|
||||
DirectoryNamespace,
|
||||
LanceNamespace,
|
||||
)
|
||||
from lance_namespace import (
|
||||
CreateNamespaceRequest,
|
||||
CreateNamespaceResponse,
|
||||
CreateTableRequest,
|
||||
CreateTableResponse,
|
||||
CreateTableVersionRequest,
|
||||
CreateTableVersionResponse,
|
||||
DeregisterTableRequest,
|
||||
DeregisterTableResponse,
|
||||
DescribeNamespaceRequest,
|
||||
DescribeNamespaceResponse,
|
||||
DescribeTableVersionRequest,
|
||||
DescribeTableVersionResponse,
|
||||
DropNamespaceRequest,
|
||||
DropNamespaceResponse,
|
||||
DropTableRequest,
|
||||
DropTableResponse,
|
||||
ListNamespacesRequest,
|
||||
ListNamespacesResponse,
|
||||
ListTablesRequest,
|
||||
ListTablesResponse,
|
||||
ListTableVersionsRequest,
|
||||
ListTableVersionsResponse,
|
||||
NamespaceExistsRequest,
|
||||
RegisterTableRequest,
|
||||
RegisterTableResponse,
|
||||
TableExistsRequest,
|
||||
)
|
||||
from lancedb.namespace import LanceNamespaceDBConnection
|
||||
|
||||
|
||||
class CustomNamespace(LanceNamespace):
|
||||
"""A custom namespace wrapper that delegates to DirectoryNamespace.
|
||||
|
||||
This class verifies that the Python-Rust binding works correctly for
|
||||
custom namespace implementations that wrap the native DirectoryNamespace.
|
||||
All methods simply delegate to the underlying DirectoryNamespace instance.
|
||||
"""
|
||||
|
||||
def __init__(self, inner: DirectoryNamespace):
|
||||
self._inner = inner
|
||||
|
||||
def namespace_id(self) -> str:
|
||||
return f"CustomNamespace[{self._inner.namespace_id()}]"
|
||||
|
||||
def create_namespace(
|
||||
self, request: CreateNamespaceRequest
|
||||
) -> CreateNamespaceResponse:
|
||||
return self._inner.create_namespace(request)
|
||||
|
||||
def describe_namespace(
|
||||
self, request: DescribeNamespaceRequest
|
||||
) -> DescribeNamespaceResponse:
|
||||
return self._inner.describe_namespace(request)
|
||||
|
||||
def namespace_exists(self, request: NamespaceExistsRequest) -> None:
|
||||
return self._inner.namespace_exists(request)
|
||||
|
||||
def drop_namespace(self, request: DropNamespaceRequest) -> DropNamespaceResponse:
|
||||
return self._inner.drop_namespace(request)
|
||||
|
||||
def list_namespaces(self, request: ListNamespacesRequest) -> ListNamespacesResponse:
|
||||
return self._inner.list_namespaces(request)
|
||||
|
||||
def create_table(
|
||||
self, request: CreateTableRequest, data: bytes
|
||||
) -> CreateTableResponse:
|
||||
return self._inner.create_table(request, data)
|
||||
|
||||
def declare_table(self, request: DeclareTableRequest) -> DeclareTableResponse:
|
||||
return self._inner.declare_table(request)
|
||||
|
||||
def describe_table(self, request: DescribeTableRequest) -> DescribeTableResponse:
|
||||
return self._inner.describe_table(request)
|
||||
|
||||
def table_exists(self, request: TableExistsRequest) -> None:
|
||||
return self._inner.table_exists(request)
|
||||
|
||||
def drop_table(self, request: DropTableRequest) -> DropTableResponse:
|
||||
return self._inner.drop_table(request)
|
||||
|
||||
def list_tables(self, request: ListTablesRequest) -> ListTablesResponse:
|
||||
return self._inner.list_tables(request)
|
||||
|
||||
def register_table(self, request: RegisterTableRequest) -> RegisterTableResponse:
|
||||
return self._inner.register_table(request)
|
||||
|
||||
def deregister_table(
|
||||
self, request: DeregisterTableRequest
|
||||
) -> DeregisterTableResponse:
|
||||
return self._inner.deregister_table(request)
|
||||
|
||||
def list_table_versions(
|
||||
self, request: ListTableVersionsRequest
|
||||
) -> ListTableVersionsResponse:
|
||||
return self._inner.list_table_versions(request)
|
||||
|
||||
def describe_table_version(
|
||||
self, request: DescribeTableVersionRequest
|
||||
) -> DescribeTableVersionResponse:
|
||||
return self._inner.describe_table_version(request)
|
||||
|
||||
def create_table_version(
|
||||
self, request: CreateTableVersionRequest
|
||||
) -> CreateTableVersionResponse:
|
||||
return self._inner.create_table_version(request)
|
||||
|
||||
def retrieve_ops_metrics(self) -> Optional[Dict[str, int]]:
|
||||
return self._inner.retrieve_ops_metrics()
|
||||
|
||||
|
||||
def _wrap_if_custom(ns_client: DirectoryNamespace, use_custom: bool):
|
||||
"""Wrap namespace client in CustomNamespace if use_custom is True."""
|
||||
if use_custom:
|
||||
return CustomNamespace(ns_client)
|
||||
return ns_client
|
||||
|
||||
|
||||
# LocalStack S3 configuration
|
||||
CONFIG = {
|
||||
"allow_http": "true",
|
||||
@@ -89,162 +205,88 @@ def delete_bucket(s3, bucket_name):
|
||||
pass
|
||||
|
||||
|
||||
class TrackingNamespace(LanceNamespace):
|
||||
def create_tracking_namespace(
|
||||
bucket_name: str,
|
||||
storage_options: dict,
|
||||
credential_expires_in_seconds: int = 60,
|
||||
use_custom: bool = False,
|
||||
):
|
||||
"""Create a DirectoryNamespace with ops metrics and credential vending enabled.
|
||||
|
||||
Uses native DirectoryNamespace features:
|
||||
- ops_metrics_enabled=true: Tracks API call counts via retrieve_ops_metrics()
|
||||
- vend_input_storage_options=true: Returns input storage options in responses
|
||||
- vend_input_storage_options_refresh_interval_millis: Adds expires_at_millis
|
||||
|
||||
Args:
|
||||
bucket_name: S3 bucket name or local path
|
||||
storage_options: Storage options to pass through (credentials, endpoint, etc.)
|
||||
credential_expires_in_seconds: Interval in seconds for credential expiration
|
||||
use_custom: If True, wrap in CustomNamespace for testing custom implementations
|
||||
|
||||
Returns:
|
||||
Tuple of (namespace_client, inner_namespace_client) where inner is always
|
||||
the DirectoryNamespace (used for metrics retrieval)
|
||||
"""
|
||||
Mock namespace that wraps DirectoryNamespace and tracks API calls.
|
||||
# Add refresh_offset_millis to storage options so that credentials are not
|
||||
# considered expired immediately. Set to 1 second (1000ms) so that refresh
|
||||
# checks work correctly with short-lived credentials in tests.
|
||||
storage_options_with_refresh = dict(storage_options)
|
||||
storage_options_with_refresh["refresh_offset_millis"] = "1000"
|
||||
|
||||
This namespace returns incrementing credentials with each API call to simulate
|
||||
credential rotation. It also tracks the number of times each API is called
|
||||
to verify caching behavior.
|
||||
"""
|
||||
dir_props = {f"storage.{k}": v for k, v in storage_options_with_refresh.items()}
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
bucket_name: str,
|
||||
storage_options: Dict[str, str],
|
||||
credential_expires_in_seconds: int = 60,
|
||||
):
|
||||
from lance.namespace import DirectoryNamespace
|
||||
if bucket_name.startswith("/") or bucket_name.startswith("file://"):
|
||||
dir_props["root"] = f"{bucket_name}/namespace_root"
|
||||
else:
|
||||
dir_props["root"] = f"s3://{bucket_name}/namespace_root"
|
||||
|
||||
self.bucket_name = bucket_name
|
||||
self.base_storage_options = storage_options
|
||||
self.credential_expires_in_seconds = credential_expires_in_seconds
|
||||
self.describe_call_count = 0
|
||||
self.create_call_count = 0
|
||||
self.lock = Lock()
|
||||
# Enable ops metrics tracking
|
||||
dir_props["ops_metrics_enabled"] = "true"
|
||||
# Enable storage options vending
|
||||
dir_props["vend_input_storage_options"] = "true"
|
||||
# Set refresh interval in milliseconds
|
||||
dir_props["vend_input_storage_options_refresh_interval_millis"] = str(
|
||||
credential_expires_in_seconds * 1000
|
||||
)
|
||||
|
||||
# Create underlying DirectoryNamespace with storage options
|
||||
dir_props = {f"storage.{k}": v for k, v in storage_options.items()}
|
||||
inner_ns_client = DirectoryNamespace(**dir_props)
|
||||
ns_client = _wrap_if_custom(inner_ns_client, use_custom)
|
||||
return ns_client, inner_ns_client
|
||||
|
||||
# Use S3 path for bucket name, local path for file paths
|
||||
if bucket_name.startswith("/") or bucket_name.startswith("file://"):
|
||||
dir_props["root"] = f"{bucket_name}/namespace_root"
|
||||
else:
|
||||
dir_props["root"] = f"s3://{bucket_name}/namespace_root"
|
||||
|
||||
self.inner = DirectoryNamespace(**dir_props)
|
||||
def get_describe_call_count(namespace_client) -> int:
|
||||
"""Get the number of describe_table calls made to the namespace client."""
|
||||
return namespace_client.retrieve_ops_metrics().get("describe_table", 0)
|
||||
|
||||
def get_describe_call_count(self) -> int:
|
||||
"""Thread-safe getter for describe call count."""
|
||||
with self.lock:
|
||||
return self.describe_call_count
|
||||
|
||||
def get_create_call_count(self) -> int:
|
||||
"""Thread-safe getter for create call count."""
|
||||
with self.lock:
|
||||
return self.create_call_count
|
||||
|
||||
def namespace_id(self) -> str:
|
||||
"""Return namespace identifier."""
|
||||
return f"TrackingNamespace {{ inner: {self.inner.namespace_id()} }}"
|
||||
|
||||
def _modify_storage_options(
|
||||
self, storage_options: Dict[str, str], count: int
|
||||
) -> Dict[str, str]:
|
||||
"""
|
||||
Add incrementing credentials with expiration timestamp.
|
||||
|
||||
This simulates a credential rotation system where each call returns
|
||||
new credentials that expire after credential_expires_in_seconds.
|
||||
"""
|
||||
# Start from base storage options (endpoint, region, allow_http, etc.)
|
||||
# because DirectoryNamespace returns None for storage_options from
|
||||
# describe_table/declare_table when no credential vendor is configured.
|
||||
modified = copy.deepcopy(self.base_storage_options)
|
||||
if storage_options:
|
||||
modified.update(storage_options)
|
||||
|
||||
# Increment credentials to simulate rotation
|
||||
modified["aws_access_key_id"] = f"AKID_{count}"
|
||||
modified["aws_secret_access_key"] = f"SECRET_{count}"
|
||||
modified["aws_session_token"] = f"TOKEN_{count}"
|
||||
|
||||
# Set expiration time
|
||||
expires_at_millis = int(
|
||||
(time.time() + self.credential_expires_in_seconds) * 1000
|
||||
)
|
||||
modified["expires_at_millis"] = str(expires_at_millis)
|
||||
|
||||
return modified
|
||||
|
||||
def declare_table(self, request: DeclareTableRequest) -> DeclareTableResponse:
|
||||
"""Track declare_table calls and inject rotating credentials."""
|
||||
with self.lock:
|
||||
self.create_call_count += 1
|
||||
count = self.create_call_count
|
||||
|
||||
response = self.inner.declare_table(request)
|
||||
response.storage_options = self._modify_storage_options(
|
||||
response.storage_options, count
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
def create_empty_table(
|
||||
self, request: CreateEmptyTableRequest
|
||||
) -> CreateEmptyTableResponse:
|
||||
"""Track create_empty_table calls and inject rotating credentials."""
|
||||
with self.lock:
|
||||
self.create_call_count += 1
|
||||
count = self.create_call_count
|
||||
|
||||
response = self.inner.create_empty_table(request)
|
||||
response.storage_options = self._modify_storage_options(
|
||||
response.storage_options, count
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
def describe_table(self, request: DescribeTableRequest) -> DescribeTableResponse:
|
||||
"""Track describe_table calls and inject rotating credentials."""
|
||||
with self.lock:
|
||||
self.describe_call_count += 1
|
||||
count = self.describe_call_count
|
||||
|
||||
response = self.inner.describe_table(request)
|
||||
response.storage_options = self._modify_storage_options(
|
||||
response.storage_options, count
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
# Pass through other methods to inner namespace
|
||||
def list_tables(self, request):
|
||||
return self.inner.list_tables(request)
|
||||
|
||||
def drop_table(self, request):
|
||||
return self.inner.drop_table(request)
|
||||
|
||||
def list_namespaces(self, request):
|
||||
return self.inner.list_namespaces(request)
|
||||
|
||||
def create_namespace(self, request):
|
||||
return self.inner.create_namespace(request)
|
||||
|
||||
def drop_namespace(self, request):
|
||||
return self.inner.drop_namespace(request)
|
||||
def get_declare_call_count(namespace_client) -> int:
|
||||
"""Get the number of declare_table calls made to the namespace client."""
|
||||
return namespace_client.retrieve_ops_metrics().get("declare_table", 0)
|
||||
|
||||
|
||||
@pytest.mark.s3_test
|
||||
def test_namespace_create_table_with_provider(s3_bucket: str):
|
||||
@pytest.mark.parametrize("use_custom", [False, True], ids=["DirectoryNS", "CustomNS"])
|
||||
def test_namespace_create_table_with_provider(s3_bucket: str, use_custom: bool):
|
||||
"""
|
||||
Test creating a table through namespace with storage options provider.
|
||||
|
||||
Verifies:
|
||||
- create_empty_table is called once to reserve location
|
||||
- declare_table is called once to reserve location
|
||||
- Storage options provider is auto-created
|
||||
- Table can be written successfully
|
||||
- Credentials are cached during write operations
|
||||
"""
|
||||
storage_options = copy.deepcopy(CONFIG)
|
||||
|
||||
namespace = TrackingNamespace(
|
||||
ns_client, inner_ns_client = create_tracking_namespace(
|
||||
bucket_name=s3_bucket,
|
||||
storage_options=storage_options,
|
||||
credential_expires_in_seconds=3600, # 1 hour
|
||||
use_custom=use_custom,
|
||||
)
|
||||
|
||||
db = LanceNamespaceDBConnection(namespace)
|
||||
db = LanceNamespaceDBConnection(ns_client)
|
||||
|
||||
# Create unique namespace for this test
|
||||
namespace_name = f"test_ns_{uuid.uuid4().hex[:8]}"
|
||||
@@ -254,8 +296,8 @@ def test_namespace_create_table_with_provider(s3_bucket: str):
|
||||
namespace_path = [namespace_name]
|
||||
|
||||
# Verify initial state
|
||||
assert namespace.get_create_call_count() == 0
|
||||
assert namespace.get_describe_call_count() == 0
|
||||
assert get_declare_call_count(inner_ns_client) == 0
|
||||
assert get_describe_call_count(inner_ns_client) == 0
|
||||
|
||||
# Create table with data
|
||||
data = pa.table(
|
||||
@@ -266,12 +308,12 @@ def test_namespace_create_table_with_provider(s3_bucket: str):
|
||||
}
|
||||
)
|
||||
|
||||
table = db.create_table(table_name, data, namespace=namespace_path)
|
||||
table = db.create_table(table_name, data, namespace_path=namespace_path)
|
||||
|
||||
# Verify create_empty_table was called exactly once
|
||||
assert namespace.get_create_call_count() == 1
|
||||
# Verify declare_table was called exactly once
|
||||
assert get_declare_call_count(inner_ns_client) == 1
|
||||
# describe_table should NOT be called during create in create mode
|
||||
assert namespace.get_describe_call_count() == 0
|
||||
assert get_describe_call_count(inner_ns_client) == 0
|
||||
|
||||
# Verify table was created successfully
|
||||
assert table.name == table_name
|
||||
@@ -281,7 +323,8 @@ def test_namespace_create_table_with_provider(s3_bucket: str):
|
||||
|
||||
|
||||
@pytest.mark.s3_test
|
||||
def test_namespace_open_table_with_provider(s3_bucket: str):
|
||||
@pytest.mark.parametrize("use_custom", [False, True], ids=["DirectoryNS", "CustomNS"])
|
||||
def test_namespace_open_table_with_provider(s3_bucket: str, use_custom: bool):
|
||||
"""
|
||||
Test opening a table through namespace with storage options provider.
|
||||
|
||||
@@ -293,13 +336,14 @@ def test_namespace_open_table_with_provider(s3_bucket: str):
|
||||
"""
|
||||
storage_options = copy.deepcopy(CONFIG)
|
||||
|
||||
namespace = TrackingNamespace(
|
||||
ns_client, inner_ns_client = create_tracking_namespace(
|
||||
bucket_name=s3_bucket,
|
||||
storage_options=storage_options,
|
||||
credential_expires_in_seconds=3600,
|
||||
use_custom=use_custom,
|
||||
)
|
||||
|
||||
db = LanceNamespaceDBConnection(namespace)
|
||||
db = LanceNamespaceDBConnection(ns_client)
|
||||
|
||||
# Create unique namespace for this test
|
||||
namespace_name = f"test_ns_{uuid.uuid4().hex[:8]}"
|
||||
@@ -317,21 +361,21 @@ def test_namespace_open_table_with_provider(s3_bucket: str):
|
||||
}
|
||||
)
|
||||
|
||||
db.create_table(table_name, data, namespace=namespace_path)
|
||||
db.create_table(table_name, data, namespace_path=namespace_path)
|
||||
|
||||
initial_create_count = namespace.get_create_call_count()
|
||||
assert initial_create_count == 1
|
||||
initial_declare_count = get_declare_call_count(inner_ns_client)
|
||||
assert initial_declare_count == 1
|
||||
|
||||
# Open the table
|
||||
opened_table = db.open_table(table_name, namespace=namespace_path)
|
||||
opened_table = db.open_table(table_name, namespace_path=namespace_path)
|
||||
|
||||
# Verify describe_table was called exactly once
|
||||
assert namespace.get_describe_call_count() == 1
|
||||
# create_empty_table should not be called again
|
||||
assert namespace.get_create_call_count() == initial_create_count
|
||||
assert get_describe_call_count(inner_ns_client) == 1
|
||||
# declare_table should not be called again
|
||||
assert get_declare_call_count(inner_ns_client) == initial_declare_count
|
||||
|
||||
# Perform multiple read operations
|
||||
describe_count_after_open = namespace.get_describe_call_count()
|
||||
describe_count_after_open = get_describe_call_count(inner_ns_client)
|
||||
|
||||
for _ in range(3):
|
||||
result = opened_table.to_pandas()
|
||||
@@ -340,11 +384,12 @@ def test_namespace_open_table_with_provider(s3_bucket: str):
|
||||
assert count == 5
|
||||
|
||||
# Verify credentials were cached (no additional describe_table calls)
|
||||
assert namespace.get_describe_call_count() == describe_count_after_open
|
||||
assert get_describe_call_count(inner_ns_client) == describe_count_after_open
|
||||
|
||||
|
||||
@pytest.mark.s3_test
|
||||
def test_namespace_credential_refresh_on_read(s3_bucket: str):
|
||||
@pytest.mark.parametrize("use_custom", [False, True], ids=["DirectoryNS", "CustomNS"])
|
||||
def test_namespace_credential_refresh_on_read(s3_bucket: str, use_custom: bool):
|
||||
"""
|
||||
Test credential refresh when credentials expire during read operations.
|
||||
|
||||
@@ -355,13 +400,14 @@ def test_namespace_credential_refresh_on_read(s3_bucket: str):
|
||||
"""
|
||||
storage_options = copy.deepcopy(CONFIG)
|
||||
|
||||
namespace = TrackingNamespace(
|
||||
ns_client, inner_ns_client = create_tracking_namespace(
|
||||
bucket_name=s3_bucket,
|
||||
storage_options=storage_options,
|
||||
credential_expires_in_seconds=3, # Short expiration for testing
|
||||
use_custom=use_custom,
|
||||
)
|
||||
|
||||
db = LanceNamespaceDBConnection(namespace)
|
||||
db = LanceNamespaceDBConnection(ns_client)
|
||||
|
||||
# Create unique namespace for this test
|
||||
namespace_name = f"test_ns_{uuid.uuid4().hex[:8]}"
|
||||
@@ -378,16 +424,16 @@ def test_namespace_credential_refresh_on_read(s3_bucket: str):
|
||||
}
|
||||
)
|
||||
|
||||
db.create_table(table_name, data, namespace=namespace_path)
|
||||
db.create_table(table_name, data, namespace_path=namespace_path)
|
||||
|
||||
# Open table (triggers describe_table)
|
||||
opened_table = db.open_table(table_name, namespace=namespace_path)
|
||||
opened_table = db.open_table(table_name, namespace_path=namespace_path)
|
||||
|
||||
# Perform an immediate read (should use credentials from open)
|
||||
result = opened_table.to_pandas()
|
||||
assert len(result) == 3
|
||||
|
||||
describe_count_after_first_read = namespace.get_describe_call_count()
|
||||
describe_count_after_first_read = get_describe_call_count(inner_ns_client)
|
||||
|
||||
# Wait for credentials to expire (3 seconds + buffer)
|
||||
time.sleep(5)
|
||||
@@ -396,7 +442,7 @@ def test_namespace_credential_refresh_on_read(s3_bucket: str):
|
||||
result = opened_table.to_pandas()
|
||||
assert len(result) == 3
|
||||
|
||||
describe_count_after_refresh = namespace.get_describe_call_count()
|
||||
describe_count_after_refresh = get_describe_call_count(inner_ns_client)
|
||||
# Verify describe_table was called again (credential refresh)
|
||||
refresh_delta = describe_count_after_refresh - describe_count_after_first_read
|
||||
|
||||
@@ -409,7 +455,8 @@ def test_namespace_credential_refresh_on_read(s3_bucket: str):
|
||||
|
||||
|
||||
@pytest.mark.s3_test
|
||||
def test_namespace_credential_refresh_on_write(s3_bucket: str):
|
||||
@pytest.mark.parametrize("use_custom", [False, True], ids=["DirectoryNS", "CustomNS"])
|
||||
def test_namespace_credential_refresh_on_write(s3_bucket: str, use_custom: bool):
|
||||
"""
|
||||
Test credential refresh when credentials expire during write operations.
|
||||
|
||||
@@ -420,13 +467,14 @@ def test_namespace_credential_refresh_on_write(s3_bucket: str):
|
||||
"""
|
||||
storage_options = copy.deepcopy(CONFIG)
|
||||
|
||||
namespace = TrackingNamespace(
|
||||
ns_client, inner_ns_client = create_tracking_namespace(
|
||||
bucket_name=s3_bucket,
|
||||
storage_options=storage_options,
|
||||
credential_expires_in_seconds=3, # Short expiration
|
||||
use_custom=use_custom,
|
||||
)
|
||||
|
||||
db = LanceNamespaceDBConnection(namespace)
|
||||
db = LanceNamespaceDBConnection(ns_client)
|
||||
|
||||
# Create unique namespace for this test
|
||||
namespace_name = f"test_ns_{uuid.uuid4().hex[:8]}"
|
||||
@@ -443,7 +491,7 @@ def test_namespace_credential_refresh_on_write(s3_bucket: str):
|
||||
}
|
||||
)
|
||||
|
||||
table = db.create_table(table_name, initial_data, namespace=namespace_path)
|
||||
table = db.create_table(table_name, initial_data, namespace_path=namespace_path)
|
||||
|
||||
# Add more data (should use cached credentials)
|
||||
new_data = pa.table(
|
||||
@@ -471,24 +519,26 @@ def test_namespace_credential_refresh_on_write(s3_bucket: str):
|
||||
|
||||
|
||||
@pytest.mark.s3_test
|
||||
def test_namespace_overwrite_mode(s3_bucket: str):
|
||||
@pytest.mark.parametrize("use_custom", [False, True], ids=["DirectoryNS", "CustomNS"])
|
||||
def test_namespace_overwrite_mode(s3_bucket: str, use_custom: bool):
|
||||
"""
|
||||
Test creating table in overwrite mode with credential tracking.
|
||||
|
||||
Verifies:
|
||||
- First create calls create_empty_table exactly once
|
||||
- First create calls declare_table exactly once
|
||||
- Overwrite mode calls describe_table exactly once to check existence
|
||||
- Storage options provider works in overwrite mode
|
||||
"""
|
||||
storage_options = copy.deepcopy(CONFIG)
|
||||
|
||||
namespace = TrackingNamespace(
|
||||
ns_client, inner_ns_client = create_tracking_namespace(
|
||||
bucket_name=s3_bucket,
|
||||
storage_options=storage_options,
|
||||
credential_expires_in_seconds=3600,
|
||||
use_custom=use_custom,
|
||||
)
|
||||
|
||||
db = LanceNamespaceDBConnection(namespace)
|
||||
db = LanceNamespaceDBConnection(ns_client)
|
||||
|
||||
# Create unique namespace for this test
|
||||
namespace_name = f"test_ns_{uuid.uuid4().hex[:8]}"
|
||||
@@ -505,11 +555,11 @@ def test_namespace_overwrite_mode(s3_bucket: str):
|
||||
}
|
||||
)
|
||||
|
||||
table = db.create_table(table_name, data1, namespace=namespace_path)
|
||||
# Exactly one create_empty_table call for initial create
|
||||
assert namespace.get_create_call_count() == 1
|
||||
table = db.create_table(table_name, data1, namespace_path=namespace_path)
|
||||
# Exactly one declare_table call for initial create
|
||||
assert get_declare_call_count(inner_ns_client) == 1
|
||||
# No describe_table calls in create mode
|
||||
assert namespace.get_describe_call_count() == 0
|
||||
assert get_describe_call_count(inner_ns_client) == 0
|
||||
assert table.count_rows() == 2
|
||||
|
||||
# Overwrite the table
|
||||
@@ -521,14 +571,14 @@ def test_namespace_overwrite_mode(s3_bucket: str):
|
||||
)
|
||||
|
||||
table2 = db.create_table(
|
||||
table_name, data2, namespace=namespace_path, mode="overwrite"
|
||||
table_name, data2, namespace_path=namespace_path, mode="overwrite"
|
||||
)
|
||||
|
||||
# Should still have only 1 create_empty_table call
|
||||
# Should still have only 1 declare_table call
|
||||
# (overwrite reuses location from describe_table)
|
||||
assert namespace.get_create_call_count() == 1
|
||||
assert get_declare_call_count(inner_ns_client) == 1
|
||||
# Should have called describe_table exactly once to get existing table location
|
||||
assert namespace.get_describe_call_count() == 1
|
||||
assert get_describe_call_count(inner_ns_client) == 1
|
||||
|
||||
# Verify new data
|
||||
assert table2.count_rows() == 3
|
||||
@@ -537,7 +587,8 @@ def test_namespace_overwrite_mode(s3_bucket: str):
|
||||
|
||||
|
||||
@pytest.mark.s3_test
|
||||
def test_namespace_multiple_tables(s3_bucket: str):
|
||||
@pytest.mark.parametrize("use_custom", [False, True], ids=["DirectoryNS", "CustomNS"])
|
||||
def test_namespace_multiple_tables(s3_bucket: str, use_custom: bool):
|
||||
"""
|
||||
Test creating and opening multiple tables in the same namespace.
|
||||
|
||||
@@ -548,13 +599,14 @@ def test_namespace_multiple_tables(s3_bucket: str):
|
||||
"""
|
||||
storage_options = copy.deepcopy(CONFIG)
|
||||
|
||||
namespace = TrackingNamespace(
|
||||
ns_client, inner_ns_client = create_tracking_namespace(
|
||||
bucket_name=s3_bucket,
|
||||
storage_options=storage_options,
|
||||
credential_expires_in_seconds=3600,
|
||||
use_custom=use_custom,
|
||||
)
|
||||
|
||||
db = LanceNamespaceDBConnection(namespace)
|
||||
db = LanceNamespaceDBConnection(ns_client)
|
||||
|
||||
# Create unique namespace for this test
|
||||
namespace_name = f"test_ns_{uuid.uuid4().hex[:8]}"
|
||||
@@ -564,22 +616,22 @@ def test_namespace_multiple_tables(s3_bucket: str):
|
||||
# Create first table
|
||||
table1_name = f"table1_{uuid.uuid4().hex}"
|
||||
data1 = pa.table({"id": [1, 2], "value": [10, 20]})
|
||||
db.create_table(table1_name, data1, namespace=namespace_path)
|
||||
db.create_table(table1_name, data1, namespace_path=namespace_path)
|
||||
|
||||
# Create second table
|
||||
table2_name = f"table2_{uuid.uuid4().hex}"
|
||||
data2 = pa.table({"id": [3, 4], "value": [30, 40]})
|
||||
db.create_table(table2_name, data2, namespace=namespace_path)
|
||||
db.create_table(table2_name, data2, namespace_path=namespace_path)
|
||||
|
||||
# Should have 2 create calls (one per table)
|
||||
assert namespace.get_create_call_count() == 2
|
||||
# Should have 2 declare calls (one per table)
|
||||
assert get_declare_call_count(inner_ns_client) == 2
|
||||
|
||||
# Open both tables
|
||||
opened1 = db.open_table(table1_name, namespace=namespace_path)
|
||||
opened2 = db.open_table(table2_name, namespace=namespace_path)
|
||||
opened1 = db.open_table(table1_name, namespace_path=namespace_path)
|
||||
opened2 = db.open_table(table2_name, namespace_path=namespace_path)
|
||||
|
||||
# Should have 2 describe calls (one per open)
|
||||
assert namespace.get_describe_call_count() == 2
|
||||
assert get_describe_call_count(inner_ns_client) == 2
|
||||
|
||||
# Verify both tables work independently
|
||||
assert opened1.count_rows() == 2
|
||||
@@ -593,7 +645,8 @@ def test_namespace_multiple_tables(s3_bucket: str):
|
||||
|
||||
|
||||
@pytest.mark.s3_test
|
||||
def test_namespace_with_schema_only(s3_bucket: str):
|
||||
@pytest.mark.parametrize("use_custom", [False, True], ids=["DirectoryNS", "CustomNS"])
|
||||
def test_namespace_with_schema_only(s3_bucket: str, use_custom: bool):
|
||||
"""
|
||||
Test creating empty table with schema only (no data).
|
||||
|
||||
@@ -604,13 +657,14 @@ def test_namespace_with_schema_only(s3_bucket: str):
|
||||
"""
|
||||
storage_options = copy.deepcopy(CONFIG)
|
||||
|
||||
namespace = TrackingNamespace(
|
||||
ns_client, inner_ns_client = create_tracking_namespace(
|
||||
bucket_name=s3_bucket,
|
||||
storage_options=storage_options,
|
||||
credential_expires_in_seconds=3600,
|
||||
use_custom=use_custom,
|
||||
)
|
||||
|
||||
db = LanceNamespaceDBConnection(namespace)
|
||||
db = LanceNamespaceDBConnection(ns_client)
|
||||
|
||||
# Create unique namespace for this test
|
||||
namespace_name = f"test_ns_{uuid.uuid4().hex[:8]}"
|
||||
@@ -628,12 +682,12 @@ def test_namespace_with_schema_only(s3_bucket: str):
|
||||
]
|
||||
)
|
||||
|
||||
table = db.create_table(table_name, schema=schema, namespace=namespace_path)
|
||||
table = db.create_table(table_name, schema=schema, namespace_path=namespace_path)
|
||||
|
||||
# Should have called create_empty_table once
|
||||
assert namespace.get_create_call_count() == 1
|
||||
# Should have called declare_table once
|
||||
assert get_declare_call_count(inner_ns_client) == 1
|
||||
# Should NOT have called describe_table in create mode
|
||||
assert namespace.get_describe_call_count() == 0
|
||||
assert get_describe_call_count(inner_ns_client) == 0
|
||||
|
||||
# Verify empty table
|
||||
assert table.count_rows() == 0
|
||||
|
||||
@@ -30,6 +30,7 @@ from lancedb.query import (
|
||||
PhraseQuery,
|
||||
Query,
|
||||
FullTextSearchQuery,
|
||||
ensure_vector_query,
|
||||
)
|
||||
from lancedb.rerankers.cross_encoder import CrossEncoderReranker
|
||||
from lancedb.table import AsyncTable, LanceTable
|
||||
@@ -1501,6 +1502,18 @@ def test_search_empty_table(mem_db):
|
||||
assert results == []
|
||||
|
||||
|
||||
def test_ensure_vector_query_empty_list():
|
||||
"""Regression: ensure_vector_query used to return instead of raise ValueError."""
|
||||
with pytest.raises(ValueError, match="non-empty"):
|
||||
ensure_vector_query([])
|
||||
|
||||
|
||||
def test_ensure_vector_query_nested_empty_list():
|
||||
"""Regression: ensure_vector_query used to return instead of raise ValueError."""
|
||||
with pytest.raises(ValueError, match="non-empty"):
|
||||
ensure_vector_query([[]])
|
||||
|
||||
|
||||
def test_fast_search(tmp_path):
|
||||
db = lancedb.connect(tmp_path)
|
||||
|
||||
|
||||
@@ -1201,6 +1201,18 @@ async def test_header_provider_overrides_static_headers():
|
||||
await db.table_names()
|
||||
|
||||
|
||||
def test_close():
|
||||
"""Test that close() works without AttributeError."""
|
||||
import asyncio
|
||||
|
||||
def handler(req):
|
||||
req.send_response(200)
|
||||
req.end_headers()
|
||||
|
||||
with mock_lancedb_connection(handler) as db:
|
||||
asyncio.run(db.close())
|
||||
|
||||
|
||||
@pytest.mark.parametrize("exception", [KeyboardInterrupt, SystemExit, GeneratorExit])
|
||||
def test_background_loop_cancellation(exception):
|
||||
"""Test that BackgroundEventLoop.run() cancels the future on interrupt."""
|
||||
|
||||
@@ -527,6 +527,132 @@ async def test_add_async(mem_db_async: AsyncConnection):
|
||||
assert await table.count_rows() == 3
|
||||
|
||||
|
||||
def test_add_overwrite_infers_vector_schema(mem_db: DBConnection):
|
||||
"""Overwrite should infer vector columns the same way create_table does.
|
||||
|
||||
Regression test for https://github.com/lancedb/lancedb/issues/3183
|
||||
"""
|
||||
table = mem_db.create_table(
|
||||
"test_overwrite_vec",
|
||||
data=[
|
||||
{"vector": [1.0, 2.0, 3.0, 4.0], "item": "foo"},
|
||||
{"vector": [5.0, 6.0, 7.0, 8.0], "item": "bar"},
|
||||
],
|
||||
)
|
||||
# create_table infers vector as fixed_size_list<float32, 4>
|
||||
original_type = table.schema.field("vector").type
|
||||
assert pa.types.is_fixed_size_list(original_type)
|
||||
|
||||
# overwrite with plain Python lists (PyArrow infers list<double>)
|
||||
table.add(
|
||||
[
|
||||
{"vector": [10.0, 20.0, 30.0, 40.0], "item": "baz"},
|
||||
],
|
||||
mode="overwrite",
|
||||
)
|
||||
# overwrite should infer vector column the same way as create_table
|
||||
new_type = table.schema.field("vector").type
|
||||
assert pa.types.is_fixed_size_list(new_type), (
|
||||
f"Expected fixed_size_list after overwrite, got {new_type}"
|
||||
)
|
||||
|
||||
|
||||
def test_add_progress_callback(mem_db: DBConnection):
|
||||
table = mem_db.create_table(
|
||||
"test",
|
||||
data=[{"id": 1}, {"id": 2}],
|
||||
)
|
||||
|
||||
updates = []
|
||||
table.add([{"id": 3}, {"id": 4}], progress=lambda p: updates.append(dict(p)))
|
||||
|
||||
assert len(table) == 4
|
||||
# The done callback always fires, so we should always get at least one.
|
||||
assert len(updates) >= 1, "expected at least one progress callback"
|
||||
for p in updates:
|
||||
assert "output_rows" in p
|
||||
assert "output_bytes" in p
|
||||
assert "total_rows" in p
|
||||
assert "elapsed_seconds" in p
|
||||
assert "active_tasks" in p
|
||||
assert "total_tasks" in p
|
||||
assert "done" in p
|
||||
# The last callback should have done=True.
|
||||
assert updates[-1]["done"] is True
|
||||
|
||||
|
||||
def test_add_progress_tqdm_like(mem_db: DBConnection):
|
||||
"""Test that a tqdm-like object gets total set and update() called."""
|
||||
|
||||
class FakeBar:
|
||||
def __init__(self):
|
||||
self.total = None
|
||||
self.n = 0
|
||||
self.postfix = None
|
||||
|
||||
def update(self, n):
|
||||
self.n += n
|
||||
|
||||
def set_postfix_str(self, s):
|
||||
self.postfix = s
|
||||
|
||||
def refresh(self):
|
||||
pass
|
||||
|
||||
table = mem_db.create_table(
|
||||
"test",
|
||||
data=[{"id": 1}, {"id": 2}],
|
||||
)
|
||||
|
||||
bar = FakeBar()
|
||||
table.add([{"id": 3}, {"id": 4}], progress=bar)
|
||||
|
||||
assert len(table) == 4
|
||||
# Postfix should contain throughput and worker count
|
||||
if bar.postfix is not None:
|
||||
assert "MB/s" in bar.postfix
|
||||
assert "workers" in bar.postfix
|
||||
|
||||
|
||||
def test_add_progress_bool(mem_db: DBConnection):
|
||||
"""Test that progress=True creates and closes a tqdm bar automatically."""
|
||||
table = mem_db.create_table(
|
||||
"test",
|
||||
data=[{"id": 1}, {"id": 2}],
|
||||
)
|
||||
|
||||
table.add([{"id": 3}, {"id": 4}], progress=True)
|
||||
assert len(table) == 4
|
||||
|
||||
# progress=False should be the same as None
|
||||
table.add([{"id": 5}], progress=False)
|
||||
assert len(table) == 5
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_add_progress_callback_async(mem_db_async: AsyncConnection):
|
||||
"""Progress callbacks work through the async path too."""
|
||||
table = await mem_db_async.create_table("test", data=[{"id": 1}, {"id": 2}])
|
||||
|
||||
updates = []
|
||||
await table.add([{"id": 3}, {"id": 4}], progress=lambda p: updates.append(dict(p)))
|
||||
|
||||
assert await table.count_rows() == 4
|
||||
assert len(updates) >= 1
|
||||
assert updates[-1]["done"] is True
|
||||
|
||||
|
||||
def test_add_progress_callback_error(mem_db: DBConnection):
|
||||
"""A failing callback must not prevent the write from succeeding."""
|
||||
table = mem_db.create_table("test", data=[{"id": 1}, {"id": 2}])
|
||||
|
||||
def bad_callback(p):
|
||||
raise RuntimeError("boom")
|
||||
|
||||
table.add([{"id": 3}, {"id": 4}], progress=bad_callback)
|
||||
assert len(table) == 4
|
||||
|
||||
|
||||
def test_polars(mem_db: DBConnection):
|
||||
data = {
|
||||
"vector": [[3.1, 4.1], [5.9, 26.5]],
|
||||
@@ -1982,7 +2108,7 @@ def test_stats(mem_db: DBConnection):
|
||||
stats = table.stats()
|
||||
print(f"{stats=}")
|
||||
assert stats == {
|
||||
"total_bytes": 38,
|
||||
"total_bytes": 60,
|
||||
"num_rows": 2,
|
||||
"num_indices": 0,
|
||||
"fragment_stats": {
|
||||
@@ -2047,3 +2173,33 @@ def test_table_uri(tmp_path):
|
||||
db = lancedb.connect(tmp_path)
|
||||
table = db.create_table("my_table", data=[{"x": 0}])
|
||||
assert table.uri == str(tmp_path / "my_table.lance")
|
||||
|
||||
|
||||
def test_sanitize_data_metadata_not_stripped():
|
||||
"""Regression test: dict.update() returns None, so assigning its result
|
||||
would silently replace metadata with None, causing with_metadata(None)
|
||||
to strip all schema metadata from the target schema."""
|
||||
from lancedb.table import _sanitize_data
|
||||
|
||||
schema = pa.schema(
|
||||
[pa.field("x", pa.int64())],
|
||||
metadata={b"existing_key": b"existing_value"},
|
||||
)
|
||||
batch = pa.record_batch([pa.array([1, 2, 3])], schema=schema)
|
||||
|
||||
# Use a different field type so the reader and target schemas differ,
|
||||
# forcing _cast_to_target_schema to rebuild the schema with the
|
||||
# target's metadata (instead of taking the fast-path).
|
||||
target_schema = pa.schema(
|
||||
[pa.field("x", pa.int32())],
|
||||
metadata={b"existing_key": b"existing_value"},
|
||||
)
|
||||
|
||||
reader = pa.RecordBatchReader.from_batches(schema, [batch])
|
||||
metadata = {b"new_key": b"new_value"}
|
||||
result = _sanitize_data(reader, target_schema=target_schema, metadata=metadata)
|
||||
|
||||
result_schema = result.schema
|
||||
assert result_schema.metadata is not None
|
||||
assert result_schema.metadata[b"existing_key"] == b"existing_value"
|
||||
assert result_schema.metadata[b"new_key"] == b"new_value"
|
||||
|
||||
@@ -17,8 +17,9 @@ use pyo3::{
|
||||
use pyo3_async_runtimes::tokio::future_into_py;
|
||||
|
||||
use crate::{
|
||||
error::PythonErrorExt, namespace::extract_namespace_arc,
|
||||
storage_options::py_object_to_storage_options_provider, table::Table,
|
||||
error::PythonErrorExt,
|
||||
namespace::{create_namespace_storage_options_provider, extract_namespace_arc},
|
||||
table::Table,
|
||||
};
|
||||
|
||||
#[pyclass]
|
||||
@@ -87,16 +88,16 @@ impl Connection {
|
||||
})
|
||||
}
|
||||
|
||||
#[pyo3(signature = (namespace=vec![], start_after=None, limit=None))]
|
||||
#[pyo3(signature = (namespace_path=None, start_after=None, limit=None))]
|
||||
pub fn table_names(
|
||||
self_: PyRef<'_, Self>,
|
||||
namespace: Vec<String>,
|
||||
namespace_path: Option<Vec<String>>,
|
||||
start_after: Option<String>,
|
||||
limit: Option<u32>,
|
||||
) -> PyResult<Bound<'_, PyAny>> {
|
||||
let inner = self_.get_inner()?.clone();
|
||||
let mut op = inner.table_names();
|
||||
op = op.namespace(namespace);
|
||||
op = op.namespace(namespace_path.unwrap_or_default());
|
||||
if let Some(start_after) = start_after {
|
||||
op = op.start_after(start_after);
|
||||
}
|
||||
@@ -107,34 +108,43 @@ impl Connection {
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
#[pyo3(signature = (name, mode, data, namespace=vec![], storage_options=None, storage_options_provider=None, location=None))]
|
||||
#[pyo3(signature = (name, mode, data, namespace_path=None, storage_options=None, location=None, namespace_client=None))]
|
||||
pub fn create_table<'a>(
|
||||
self_: PyRef<'a, Self>,
|
||||
name: String,
|
||||
mode: &str,
|
||||
data: Bound<'_, PyAny>,
|
||||
namespace: Vec<String>,
|
||||
namespace_path: Option<Vec<String>>,
|
||||
storage_options: Option<HashMap<String, String>>,
|
||||
storage_options_provider: Option<Py<PyAny>>,
|
||||
location: Option<String>,
|
||||
namespace_client: Option<Py<PyAny>>,
|
||||
) -> PyResult<Bound<'a, PyAny>> {
|
||||
let inner = self_.get_inner()?.clone();
|
||||
let py = self_.py();
|
||||
|
||||
let mode = Self::parse_create_mode_str(mode)?;
|
||||
|
||||
let batches: Box<dyn arrow::array::RecordBatchReader + Send> =
|
||||
Box::new(ArrowArrayStreamReader::from_pyarrow_bound(&data)?);
|
||||
|
||||
let mut builder = inner.create_table(name, batches).mode(mode);
|
||||
let ns_path = namespace_path.clone().unwrap_or_default();
|
||||
let mut builder = inner.create_table(name.clone(), batches).mode(mode);
|
||||
|
||||
builder = builder.namespace(namespace);
|
||||
builder = builder.namespace(ns_path.clone());
|
||||
if let Some(storage_options) = storage_options {
|
||||
builder = builder.storage_options(storage_options);
|
||||
}
|
||||
if let Some(provider_obj) = storage_options_provider {
|
||||
let provider = py_object_to_storage_options_provider(provider_obj)?;
|
||||
|
||||
// Auto-create storage options provider from namespace_client
|
||||
if let Some(ns_obj) = namespace_client {
|
||||
let ns_client = extract_namespace_arc(py, ns_obj)?;
|
||||
// Create table_id by combining namespace_path with table name
|
||||
let mut table_id = ns_path;
|
||||
table_id.push(name);
|
||||
let provider = create_namespace_storage_options_provider(ns_client, table_id);
|
||||
builder = builder.storage_options_provider(provider);
|
||||
}
|
||||
|
||||
if let Some(location) = location {
|
||||
builder = builder.location(location);
|
||||
}
|
||||
@@ -146,33 +156,44 @@ impl Connection {
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
#[pyo3(signature = (name, mode, schema, namespace=vec![], storage_options=None, storage_options_provider=None, location=None))]
|
||||
#[pyo3(signature = (name, mode, schema, namespace_path=None, storage_options=None, location=None, namespace_client=None))]
|
||||
pub fn create_empty_table<'a>(
|
||||
self_: PyRef<'a, Self>,
|
||||
name: String,
|
||||
mode: &str,
|
||||
schema: Bound<'_, PyAny>,
|
||||
namespace: Vec<String>,
|
||||
namespace_path: Option<Vec<String>>,
|
||||
storage_options: Option<HashMap<String, String>>,
|
||||
storage_options_provider: Option<Py<PyAny>>,
|
||||
location: Option<String>,
|
||||
namespace_client: Option<Py<PyAny>>,
|
||||
) -> PyResult<Bound<'a, PyAny>> {
|
||||
let inner = self_.get_inner()?.clone();
|
||||
let py = self_.py();
|
||||
|
||||
let mode = Self::parse_create_mode_str(mode)?;
|
||||
|
||||
let schema = Schema::from_pyarrow_bound(&schema)?;
|
||||
|
||||
let mut builder = inner.create_empty_table(name, Arc::new(schema)).mode(mode);
|
||||
let ns_path = namespace_path.clone().unwrap_or_default();
|
||||
let mut builder = inner
|
||||
.create_empty_table(name.clone(), Arc::new(schema))
|
||||
.mode(mode);
|
||||
|
||||
builder = builder.namespace(namespace);
|
||||
builder = builder.namespace(ns_path.clone());
|
||||
if let Some(storage_options) = storage_options {
|
||||
builder = builder.storage_options(storage_options);
|
||||
}
|
||||
if let Some(provider_obj) = storage_options_provider {
|
||||
let provider = py_object_to_storage_options_provider(provider_obj)?;
|
||||
|
||||
// Auto-create storage options provider from namespace_client
|
||||
if let Some(ns_obj) = namespace_client {
|
||||
let ns_client = extract_namespace_arc(py, ns_obj)?;
|
||||
// Create table_id by combining namespace_path with table name
|
||||
let mut table_id = ns_path;
|
||||
table_id.push(name);
|
||||
let provider = create_namespace_storage_options_provider(ns_client, table_id);
|
||||
builder = builder.storage_options_provider(provider);
|
||||
}
|
||||
|
||||
if let Some(location) = location {
|
||||
builder = builder.location(location);
|
||||
}
|
||||
@@ -184,45 +205,44 @@ impl Connection {
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
#[pyo3(signature = (name, namespace=vec![], storage_options = None, storage_options_provider=None, index_cache_size = None, location=None, namespace_client=None, managed_versioning=None))]
|
||||
#[pyo3(signature = (name, namespace_path=None, storage_options=None, index_cache_size=None, location=None, namespace_client=None, managed_versioning=None))]
|
||||
pub fn open_table(
|
||||
self_: PyRef<'_, Self>,
|
||||
name: String,
|
||||
namespace: Vec<String>,
|
||||
namespace_path: Option<Vec<String>>,
|
||||
storage_options: Option<HashMap<String, String>>,
|
||||
storage_options_provider: Option<Py<PyAny>>,
|
||||
index_cache_size: Option<u32>,
|
||||
location: Option<String>,
|
||||
namespace_client: Option<Py<PyAny>>,
|
||||
managed_versioning: Option<bool>,
|
||||
) -> PyResult<Bound<'_, PyAny>> {
|
||||
let inner = self_.get_inner()?.clone();
|
||||
let py = self_.py();
|
||||
|
||||
let mut builder = inner.open_table(name);
|
||||
builder = builder.namespace(namespace.clone());
|
||||
let ns_path = namespace_path.clone().unwrap_or_default();
|
||||
let mut builder = inner.open_table(name.clone());
|
||||
builder = builder.namespace(ns_path.clone());
|
||||
if let Some(storage_options) = storage_options {
|
||||
builder = builder.storage_options(storage_options);
|
||||
}
|
||||
if let Some(provider_obj) = storage_options_provider {
|
||||
let provider = py_object_to_storage_options_provider(provider_obj)?;
|
||||
|
||||
// Auto-create storage options provider from namespace_client
|
||||
if let Some(ns_obj) = namespace_client {
|
||||
let ns_client = extract_namespace_arc(py, ns_obj)?;
|
||||
// Create table_id by combining namespace_path with table name
|
||||
let mut table_id = ns_path;
|
||||
table_id.push(name);
|
||||
let provider = create_namespace_storage_options_provider(ns_client.clone(), table_id);
|
||||
builder = builder.storage_options_provider(provider);
|
||||
builder = builder.namespace_client(ns_client);
|
||||
}
|
||||
|
||||
if let Some(index_cache_size) = index_cache_size {
|
||||
builder = builder.index_cache_size(index_cache_size);
|
||||
}
|
||||
if let Some(location) = location {
|
||||
builder = builder.location(location);
|
||||
}
|
||||
// Extract namespace client from Python object if provided
|
||||
let ns_client = if let Some(ns_obj) = namespace_client {
|
||||
let py = self_.py();
|
||||
Some(extract_namespace_arc(py, ns_obj)?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
if let Some(ns_client) = ns_client {
|
||||
builder = builder.namespace_client(ns_client);
|
||||
}
|
||||
// Pass managed_versioning if provided to avoid redundant describe_table call
|
||||
if let Some(enabled) = managed_versioning {
|
||||
builder = builder.managed_versioning(enabled);
|
||||
@@ -234,12 +254,12 @@ impl Connection {
|
||||
})
|
||||
}
|
||||
|
||||
#[pyo3(signature = (target_table_name, source_uri, target_namespace=vec![], source_version=None, source_tag=None, is_shallow=true))]
|
||||
#[pyo3(signature = (target_table_name, source_uri, target_namespace_path=None, source_version=None, source_tag=None, is_shallow=true))]
|
||||
pub fn clone_table(
|
||||
self_: PyRef<'_, Self>,
|
||||
target_table_name: String,
|
||||
source_uri: String,
|
||||
target_namespace: Vec<String>,
|
||||
target_namespace_path: Option<Vec<String>>,
|
||||
source_version: Option<u64>,
|
||||
source_tag: Option<String>,
|
||||
is_shallow: bool,
|
||||
@@ -247,7 +267,7 @@ impl Connection {
|
||||
let inner = self_.get_inner()?.clone();
|
||||
|
||||
let mut builder = inner.clone_table(target_table_name, source_uri);
|
||||
builder = builder.target_namespace(target_namespace);
|
||||
builder = builder.target_namespace(target_namespace_path.unwrap_or_default());
|
||||
if let Some(version) = source_version {
|
||||
builder = builder.source_version(version);
|
||||
}
|
||||
@@ -262,52 +282,56 @@ impl Connection {
|
||||
})
|
||||
}
|
||||
|
||||
#[pyo3(signature = (cur_name, new_name, cur_namespace=vec![], new_namespace=vec![]))]
|
||||
#[pyo3(signature = (cur_name, new_name, cur_namespace_path=None, new_namespace_path=None))]
|
||||
pub fn rename_table(
|
||||
self_: PyRef<'_, Self>,
|
||||
cur_name: String,
|
||||
new_name: String,
|
||||
cur_namespace: Vec<String>,
|
||||
new_namespace: Vec<String>,
|
||||
cur_namespace_path: Option<Vec<String>>,
|
||||
new_namespace_path: Option<Vec<String>>,
|
||||
) -> PyResult<Bound<'_, PyAny>> {
|
||||
let inner = self_.get_inner()?.clone();
|
||||
let cur_ns_path = cur_namespace_path.unwrap_or_default();
|
||||
let new_ns_path = new_namespace_path.unwrap_or_default();
|
||||
future_into_py(self_.py(), async move {
|
||||
inner
|
||||
.rename_table(cur_name, new_name, &cur_namespace, &new_namespace)
|
||||
.rename_table(cur_name, new_name, &cur_ns_path, &new_ns_path)
|
||||
.await
|
||||
.infer_error()
|
||||
})
|
||||
}
|
||||
|
||||
#[pyo3(signature = (name, namespace=vec![]))]
|
||||
#[pyo3(signature = (name, namespace_path=None))]
|
||||
pub fn drop_table(
|
||||
self_: PyRef<'_, Self>,
|
||||
name: String,
|
||||
namespace: Vec<String>,
|
||||
namespace_path: Option<Vec<String>>,
|
||||
) -> PyResult<Bound<'_, PyAny>> {
|
||||
let inner = self_.get_inner()?.clone();
|
||||
let ns_path = namespace_path.unwrap_or_default();
|
||||
future_into_py(self_.py(), async move {
|
||||
inner.drop_table(name, &namespace).await.infer_error()
|
||||
inner.drop_table(name, &ns_path).await.infer_error()
|
||||
})
|
||||
}
|
||||
|
||||
#[pyo3(signature = (namespace=vec![],))]
|
||||
#[pyo3(signature = (namespace_path=None,))]
|
||||
pub fn drop_all_tables(
|
||||
self_: PyRef<'_, Self>,
|
||||
namespace: Vec<String>,
|
||||
namespace_path: Option<Vec<String>>,
|
||||
) -> PyResult<Bound<'_, PyAny>> {
|
||||
let inner = self_.get_inner()?.clone();
|
||||
let ns_path = namespace_path.unwrap_or_default();
|
||||
future_into_py(self_.py(), async move {
|
||||
inner.drop_all_tables(&namespace).await.infer_error()
|
||||
inner.drop_all_tables(&ns_path).await.infer_error()
|
||||
})
|
||||
}
|
||||
|
||||
// Namespace management methods
|
||||
|
||||
#[pyo3(signature = (namespace=vec![], page_token=None, limit=None))]
|
||||
#[pyo3(signature = (namespace_path=None, page_token=None, limit=None))]
|
||||
pub fn list_namespaces(
|
||||
self_: PyRef<'_, Self>,
|
||||
namespace: Vec<String>,
|
||||
namespace_path: Option<Vec<String>>,
|
||||
page_token: Option<String>,
|
||||
limit: Option<u32>,
|
||||
) -> PyResult<Bound<'_, PyAny>> {
|
||||
@@ -316,11 +340,7 @@ impl Connection {
|
||||
future_into_py(py, async move {
|
||||
use lance_namespace::models::ListNamespacesRequest;
|
||||
let request = ListNamespacesRequest {
|
||||
id: if namespace.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(namespace)
|
||||
},
|
||||
id: namespace_path,
|
||||
page_token,
|
||||
limit: limit.map(|l| l as i32),
|
||||
..Default::default()
|
||||
@@ -335,10 +355,10 @@ impl Connection {
|
||||
})
|
||||
}
|
||||
|
||||
#[pyo3(signature = (namespace, mode=None, properties=None))]
|
||||
#[pyo3(signature = (namespace_path, mode=None, properties=None))]
|
||||
pub fn create_namespace(
|
||||
self_: PyRef<'_, Self>,
|
||||
namespace: Vec<String>,
|
||||
namespace_path: Vec<String>,
|
||||
mode: Option<String>,
|
||||
properties: Option<std::collections::HashMap<String, String>>,
|
||||
) -> PyResult<Bound<'_, PyAny>> {
|
||||
@@ -354,11 +374,7 @@ impl Connection {
|
||||
_ => None,
|
||||
});
|
||||
let request = CreateNamespaceRequest {
|
||||
id: if namespace.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(namespace)
|
||||
},
|
||||
id: Some(namespace_path),
|
||||
mode: mode_str,
|
||||
properties,
|
||||
..Default::default()
|
||||
@@ -372,10 +388,10 @@ impl Connection {
|
||||
})
|
||||
}
|
||||
|
||||
#[pyo3(signature = (namespace, mode=None, behavior=None))]
|
||||
#[pyo3(signature = (namespace_path, mode=None, behavior=None))]
|
||||
pub fn drop_namespace(
|
||||
self_: PyRef<'_, Self>,
|
||||
namespace: Vec<String>,
|
||||
namespace_path: Vec<String>,
|
||||
mode: Option<String>,
|
||||
behavior: Option<String>,
|
||||
) -> PyResult<Bound<'_, PyAny>> {
|
||||
@@ -395,11 +411,7 @@ impl Connection {
|
||||
_ => None,
|
||||
});
|
||||
let request = DropNamespaceRequest {
|
||||
id: if namespace.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(namespace)
|
||||
},
|
||||
id: Some(namespace_path),
|
||||
mode: mode_str,
|
||||
behavior: behavior_str,
|
||||
..Default::default()
|
||||
@@ -414,21 +426,17 @@ impl Connection {
|
||||
})
|
||||
}
|
||||
|
||||
#[pyo3(signature = (namespace,))]
|
||||
#[pyo3(signature = (namespace_path,))]
|
||||
pub fn describe_namespace(
|
||||
self_: PyRef<'_, Self>,
|
||||
namespace: Vec<String>,
|
||||
namespace_path: Vec<String>,
|
||||
) -> PyResult<Bound<'_, PyAny>> {
|
||||
let inner = self_.get_inner()?.clone();
|
||||
let py = self_.py();
|
||||
future_into_py(py, async move {
|
||||
use lance_namespace::models::DescribeNamespaceRequest;
|
||||
let request = DescribeNamespaceRequest {
|
||||
id: if namespace.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(namespace)
|
||||
},
|
||||
id: Some(namespace_path),
|
||||
..Default::default()
|
||||
};
|
||||
let response = inner.describe_namespace(request).await.infer_error()?;
|
||||
@@ -440,10 +448,10 @@ impl Connection {
|
||||
})
|
||||
}
|
||||
|
||||
#[pyo3(signature = (namespace=vec![], page_token=None, limit=None))]
|
||||
#[pyo3(signature = (namespace_path=None, page_token=None, limit=None))]
|
||||
pub fn list_tables(
|
||||
self_: PyRef<'_, Self>,
|
||||
namespace: Vec<String>,
|
||||
namespace_path: Option<Vec<String>>,
|
||||
page_token: Option<String>,
|
||||
limit: Option<u32>,
|
||||
) -> PyResult<Bound<'_, PyAny>> {
|
||||
@@ -452,11 +460,7 @@ impl Connection {
|
||||
future_into_py(py, async move {
|
||||
use lance_namespace::models::ListTablesRequest;
|
||||
let request = ListTablesRequest {
|
||||
id: if namespace.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(namespace)
|
||||
},
|
||||
id: namespace_path,
|
||||
page_token,
|
||||
limit: limit.map(|l| l as i32),
|
||||
..Default::default()
|
||||
|
||||
175
python/src/expr.rs
Normal file
175
python/src/expr.rs
Normal file
@@ -0,0 +1,175 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
//! PyO3 bindings for the LanceDB expression builder API.
|
||||
//!
|
||||
//! This module exposes [`PyExpr`] and helper free functions so Python can
|
||||
//! build type-safe filter / projection expressions that map directly to
|
||||
//! DataFusion [`Expr`] nodes, bypassing SQL string parsing.
|
||||
|
||||
use arrow::{datatypes::DataType, pyarrow::PyArrowType};
|
||||
use lancedb::expr::{DfExpr, col as ldb_col, contains, expr_cast, lit as df_lit, lower, upper};
|
||||
use pyo3::{Bound, PyAny, PyResult, exceptions::PyValueError, prelude::*, pyfunction};
|
||||
|
||||
/// A type-safe DataFusion expression.
|
||||
///
|
||||
/// Instances are constructed via the free functions [`expr_col`] and
|
||||
/// [`expr_lit`] and combined with the methods on this struct. On the Python
|
||||
/// side a thin wrapper class (`lancedb.expr.Expr`) delegates to these methods
|
||||
/// and adds Python operator overloads.
|
||||
#[pyclass(name = "PyExpr")]
|
||||
#[derive(Clone)]
|
||||
pub struct PyExpr(pub DfExpr);
|
||||
|
||||
#[pymethods]
|
||||
impl PyExpr {
|
||||
// ── comparisons ──────────────────────────────────────────────────────────
|
||||
|
||||
fn eq(&self, other: &Self) -> Self {
|
||||
Self(self.0.clone().eq(other.0.clone()))
|
||||
}
|
||||
|
||||
fn ne(&self, other: &Self) -> Self {
|
||||
Self(self.0.clone().not_eq(other.0.clone()))
|
||||
}
|
||||
|
||||
fn lt(&self, other: &Self) -> Self {
|
||||
Self(self.0.clone().lt(other.0.clone()))
|
||||
}
|
||||
|
||||
fn lte(&self, other: &Self) -> Self {
|
||||
Self(self.0.clone().lt_eq(other.0.clone()))
|
||||
}
|
||||
|
||||
fn gt(&self, other: &Self) -> Self {
|
||||
Self(self.0.clone().gt(other.0.clone()))
|
||||
}
|
||||
|
||||
fn gte(&self, other: &Self) -> Self {
|
||||
Self(self.0.clone().gt_eq(other.0.clone()))
|
||||
}
|
||||
|
||||
// ── logical ──────────────────────────────────────────────────────────────
|
||||
|
||||
fn and_(&self, other: &Self) -> Self {
|
||||
Self(self.0.clone().and(other.0.clone()))
|
||||
}
|
||||
|
||||
fn or_(&self, other: &Self) -> Self {
|
||||
Self(self.0.clone().or(other.0.clone()))
|
||||
}
|
||||
|
||||
fn not_(&self) -> Self {
|
||||
use std::ops::Not;
|
||||
Self(self.0.clone().not())
|
||||
}
|
||||
|
||||
// ── arithmetic ───────────────────────────────────────────────────────────
|
||||
|
||||
fn add(&self, other: &Self) -> Self {
|
||||
use std::ops::Add;
|
||||
Self(self.0.clone().add(other.0.clone()))
|
||||
}
|
||||
|
||||
fn sub(&self, other: &Self) -> Self {
|
||||
use std::ops::Sub;
|
||||
Self(self.0.clone().sub(other.0.clone()))
|
||||
}
|
||||
|
||||
fn mul(&self, other: &Self) -> Self {
|
||||
use std::ops::Mul;
|
||||
Self(self.0.clone().mul(other.0.clone()))
|
||||
}
|
||||
|
||||
fn div(&self, other: &Self) -> Self {
|
||||
use std::ops::Div;
|
||||
Self(self.0.clone().div(other.0.clone()))
|
||||
}
|
||||
|
||||
// ── string functions ─────────────────────────────────────────────────────
|
||||
|
||||
/// Convert string column to lowercase.
|
||||
fn lower(&self) -> Self {
|
||||
Self(lower(self.0.clone()))
|
||||
}
|
||||
|
||||
/// Convert string column to uppercase.
|
||||
fn upper(&self) -> Self {
|
||||
Self(upper(self.0.clone()))
|
||||
}
|
||||
|
||||
/// Test whether the string contains `substr`.
|
||||
fn contains(&self, substr: &Self) -> Self {
|
||||
Self(contains(self.0.clone(), substr.0.clone()))
|
||||
}
|
||||
|
||||
// ── type cast ────────────────────────────────────────────────────────────
|
||||
|
||||
/// Cast the expression to `data_type`.
|
||||
///
|
||||
/// `data_type` must be a PyArrow `DataType` (e.g. `pa.int32()`).
|
||||
/// On the Python side, `lancedb.expr.Expr.cast` also accepts type name
|
||||
/// strings via `pa.lib.ensure_type` before forwarding here.
|
||||
fn cast(&self, data_type: PyArrowType<DataType>) -> Self {
|
||||
Self(expr_cast(self.0.clone(), data_type.0))
|
||||
}
|
||||
|
||||
// ── utilities ────────────────────────────────────────────────────────────
|
||||
|
||||
/// Render the expression as a SQL string (useful for debugging).
|
||||
fn to_sql(&self) -> PyResult<String> {
|
||||
lancedb::expr::expr_to_sql_string(&self.0).map_err(|e| PyValueError::new_err(e.to_string()))
|
||||
}
|
||||
|
||||
fn __repr__(&self) -> PyResult<String> {
|
||||
let sql =
|
||||
lancedb::expr::expr_to_sql_string(&self.0).unwrap_or_else(|_| "<expr>".to_string());
|
||||
Ok(format!("PyExpr({})", sql))
|
||||
}
|
||||
}
|
||||
|
||||
// ── free functions ────────────────────────────────────────────────────────────
|
||||
|
||||
/// Create a column reference expression.
|
||||
///
|
||||
/// The column name is preserved exactly as given (case-sensitive), so
|
||||
/// `col("firstName")` correctly references a field named `firstName`.
|
||||
#[pyfunction]
|
||||
pub fn expr_col(name: &str) -> PyExpr {
|
||||
PyExpr(ldb_col(name))
|
||||
}
|
||||
|
||||
/// Create a literal value expression.
|
||||
///
|
||||
/// Supported Python types: `bool`, `int`, `float`, `str`.
|
||||
#[pyfunction]
|
||||
pub fn expr_lit(value: Bound<'_, PyAny>) -> PyResult<PyExpr> {
|
||||
// bool must be checked before int because bool is a subclass of int in Python
|
||||
if let Ok(b) = value.extract::<bool>() {
|
||||
return Ok(PyExpr(df_lit(b)));
|
||||
}
|
||||
if let Ok(i) = value.extract::<i64>() {
|
||||
return Ok(PyExpr(df_lit(i)));
|
||||
}
|
||||
if let Ok(f) = value.extract::<f64>() {
|
||||
return Ok(PyExpr(df_lit(f)));
|
||||
}
|
||||
if let Ok(s) = value.extract::<String>() {
|
||||
return Ok(PyExpr(df_lit(s)));
|
||||
}
|
||||
Err(PyValueError::new_err(format!(
|
||||
"unsupported literal type: {}. Supported: bool, int, float, str",
|
||||
value.get_type().name()?
|
||||
)))
|
||||
}
|
||||
|
||||
/// Call an arbitrary registered SQL function by name.
|
||||
///
|
||||
/// See `lancedb::expr::func` for the list of supported function names.
|
||||
#[pyfunction]
|
||||
pub fn expr_func(name: &str, args: Vec<PyExpr>) -> PyResult<PyExpr> {
|
||||
let df_args: Vec<DfExpr> = args.into_iter().map(|e| e.0).collect();
|
||||
lancedb::expr::func(name, df_args)
|
||||
.map(PyExpr)
|
||||
.map_err(|e| PyValueError::new_err(e.to_string()))
|
||||
}
|
||||
@@ -4,6 +4,7 @@
|
||||
use arrow::RecordBatchStream;
|
||||
use connection::{Connection, connect};
|
||||
use env_logger::Env;
|
||||
use expr::{PyExpr, expr_col, expr_func, expr_lit};
|
||||
use index::IndexConfig;
|
||||
use permutation::{PyAsyncPermutationBuilder, PyPermutationReader};
|
||||
use pyo3::{
|
||||
@@ -21,13 +22,13 @@ use table::{
|
||||
pub mod arrow;
|
||||
pub mod connection;
|
||||
pub mod error;
|
||||
pub mod expr;
|
||||
pub mod header;
|
||||
pub mod index;
|
||||
pub mod namespace;
|
||||
pub mod permutation;
|
||||
pub mod query;
|
||||
pub mod session;
|
||||
pub mod storage_options;
|
||||
pub mod table;
|
||||
pub mod util;
|
||||
|
||||
@@ -55,10 +56,14 @@ pub fn _lancedb(_py: Python, m: &Bound<'_, PyModule>) -> PyResult<()> {
|
||||
m.add_class::<UpdateResult>()?;
|
||||
m.add_class::<PyAsyncPermutationBuilder>()?;
|
||||
m.add_class::<PyPermutationReader>()?;
|
||||
m.add_class::<PyExpr>()?;
|
||||
m.add_function(wrap_pyfunction!(connect, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(permutation::async_permutation_builder, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(util::validate_table_name, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(query::fts_query_to_json, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(expr_col, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(expr_lit, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(expr_func, m)?)?;
|
||||
m.add("__version__", env!("CARGO_PKG_VERSION"))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use bytes::Bytes;
|
||||
use lance_io::object_store::{LanceNamespaceStorageOptionsProvider, StorageOptionsProvider};
|
||||
use lance_namespace::LanceNamespace as LanceNamespaceTrait;
|
||||
use lance_namespace::models::*;
|
||||
use pyo3::prelude::*;
|
||||
@@ -694,3 +695,21 @@ pub fn extract_namespace_arc(
|
||||
let ns_ref = ns.bind(py);
|
||||
PyLanceNamespace::create_arc(py, ns_ref)
|
||||
}
|
||||
|
||||
/// Create a LanceNamespaceStorageOptionsProvider from a namespace client and table ID.
|
||||
///
|
||||
/// This creates a Rust storage options provider that fetches credentials from the
|
||||
/// namespace's describe_table() method, enabling automatic credential refresh.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `namespace_client` - The namespace client (wrapped PyLanceNamespace)
|
||||
/// * `table_id` - Full table identifier (namespace_path + table_name)
|
||||
pub fn create_namespace_storage_options_provider(
|
||||
namespace_client: Arc<dyn LanceNamespaceTrait>,
|
||||
table_id: Vec<String>,
|
||||
) -> Arc<dyn StorageOptionsProvider> {
|
||||
Arc::new(LanceNamespaceStorageOptionsProvider::new(
|
||||
namespace_client,
|
||||
table_id,
|
||||
))
|
||||
}
|
||||
|
||||
@@ -35,12 +35,10 @@ use pyo3::types::PyList;
|
||||
use pyo3::types::{PyDict, PyString};
|
||||
use pyo3::{FromPyObject, exceptions::PyRuntimeError};
|
||||
use pyo3::{PyErr, pyclass};
|
||||
use pyo3::{
|
||||
exceptions::{PyNotImplementedError, PyValueError},
|
||||
intern,
|
||||
};
|
||||
use pyo3::{exceptions::PyValueError, intern};
|
||||
use pyo3_async_runtimes::tokio::future_into_py;
|
||||
|
||||
use crate::expr::PyExpr;
|
||||
use crate::util::parse_distance_type;
|
||||
use crate::{arrow::RecordBatchStream, util::PyLanceDB};
|
||||
use crate::{error::PythonErrorExt, index::class_name};
|
||||
@@ -344,9 +342,13 @@ impl<'py> IntoPyObject<'py> for PyQueryFilter {
|
||||
|
||||
fn into_pyobject(self, py: pyo3::Python<'py>) -> PyResult<Self::Output> {
|
||||
match self.0 {
|
||||
QueryFilter::Datafusion(_) => Err(PyNotImplementedError::new_err(
|
||||
"Datafusion filter has no conversion to Python",
|
||||
)),
|
||||
QueryFilter::Datafusion(expr) => {
|
||||
// Serialize the DataFusion expression to a SQL string so that
|
||||
// callers (e.g. remote tables) see the same format as Sql.
|
||||
let sql = lancedb::expr::expr_to_sql_string(&expr)
|
||||
.map_err(|e| PyRuntimeError::new_err(e.to_string()))?;
|
||||
Ok(sql.into_pyobject(py)?.into_any())
|
||||
}
|
||||
QueryFilter::Sql(sql) => Ok(sql.into_pyobject(py)?.into_any()),
|
||||
QueryFilter::Substrait(substrait) => Ok(substrait.into_pyobject(py)?.into_any()),
|
||||
}
|
||||
@@ -370,10 +372,20 @@ impl Query {
|
||||
self.inner = self.inner.clone().only_if(predicate);
|
||||
}
|
||||
|
||||
pub fn where_expr(&mut self, expr: PyExpr) {
|
||||
self.inner = self.inner.clone().only_if_expr(expr.0);
|
||||
}
|
||||
|
||||
pub fn select(&mut self, columns: Vec<(String, String)>) {
|
||||
self.inner = self.inner.clone().select(Select::dynamic(&columns));
|
||||
}
|
||||
|
||||
pub fn select_expr(&mut self, columns: Vec<(String, PyExpr)>) {
|
||||
let pairs: Vec<(String, lancedb::expr::DfExpr)> =
|
||||
columns.into_iter().map(|(name, e)| (name, e.0)).collect();
|
||||
self.inner = self.inner.clone().select(Select::Expr(pairs));
|
||||
}
|
||||
|
||||
pub fn select_columns(&mut self, columns: Vec<String>) {
|
||||
self.inner = self.inner.clone().select(Select::columns(&columns));
|
||||
}
|
||||
@@ -607,10 +619,20 @@ impl FTSQuery {
|
||||
self.inner = self.inner.clone().only_if(predicate);
|
||||
}
|
||||
|
||||
pub fn where_expr(&mut self, expr: PyExpr) {
|
||||
self.inner = self.inner.clone().only_if_expr(expr.0);
|
||||
}
|
||||
|
||||
pub fn select(&mut self, columns: Vec<(String, String)>) {
|
||||
self.inner = self.inner.clone().select(Select::dynamic(&columns));
|
||||
}
|
||||
|
||||
pub fn select_expr(&mut self, columns: Vec<(String, PyExpr)>) {
|
||||
let pairs: Vec<(String, lancedb::expr::DfExpr)> =
|
||||
columns.into_iter().map(|(name, e)| (name, e.0)).collect();
|
||||
self.inner = self.inner.clone().select(Select::Expr(pairs));
|
||||
}
|
||||
|
||||
pub fn select_columns(&mut self, columns: Vec<String>) {
|
||||
self.inner = self.inner.clone().select(Select::columns(&columns));
|
||||
}
|
||||
@@ -725,6 +747,10 @@ impl VectorQuery {
|
||||
self.inner = self.inner.clone().only_if(predicate);
|
||||
}
|
||||
|
||||
pub fn where_expr(&mut self, expr: PyExpr) {
|
||||
self.inner = self.inner.clone().only_if_expr(expr.0);
|
||||
}
|
||||
|
||||
pub fn add_query_vector(&mut self, vector: Bound<'_, PyAny>) -> PyResult<()> {
|
||||
let data: ArrayData = ArrayData::from_pyarrow_bound(&vector)?;
|
||||
let array = make_array(data);
|
||||
@@ -736,6 +762,12 @@ impl VectorQuery {
|
||||
self.inner = self.inner.clone().select(Select::dynamic(&columns));
|
||||
}
|
||||
|
||||
pub fn select_expr(&mut self, columns: Vec<(String, PyExpr)>) {
|
||||
let pairs: Vec<(String, lancedb::expr::DfExpr)> =
|
||||
columns.into_iter().map(|(name, e)| (name, e.0)).collect();
|
||||
self.inner = self.inner.clone().select(Select::Expr(pairs));
|
||||
}
|
||||
|
||||
pub fn select_columns(&mut self, columns: Vec<String>) {
|
||||
self.inner = self.inner.clone().select(Select::columns(&columns));
|
||||
}
|
||||
@@ -890,11 +922,21 @@ impl HybridQuery {
|
||||
self.inner_fts.r#where(predicate);
|
||||
}
|
||||
|
||||
pub fn where_expr(&mut self, expr: PyExpr) {
|
||||
self.inner_vec.where_expr(expr.clone());
|
||||
self.inner_fts.where_expr(expr);
|
||||
}
|
||||
|
||||
pub fn select(&mut self, columns: Vec<(String, String)>) {
|
||||
self.inner_vec.select(columns.clone());
|
||||
self.inner_fts.select(columns);
|
||||
}
|
||||
|
||||
pub fn select_expr(&mut self, columns: Vec<(String, PyExpr)>) {
|
||||
self.inner_vec.select_expr(columns.clone());
|
||||
self.inner_fts.select_expr(columns);
|
||||
}
|
||||
|
||||
pub fn select_columns(&mut self, columns: Vec<String>) {
|
||||
self.inner_vec.select_columns(columns.clone());
|
||||
self.inner_fts.select_columns(columns);
|
||||
|
||||
@@ -1,137 +0,0 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
//! PyO3 bindings for StorageOptionsProvider
|
||||
//!
|
||||
//! This module provides the bridge between Python StorageOptionsProvider objects
|
||||
//! and Rust's StorageOptionsProvider trait, enabling automatic credential refresh.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use lance_io::object_store::StorageOptionsProvider;
|
||||
use pyo3::prelude::*;
|
||||
use pyo3::types::PyDict;
|
||||
|
||||
/// Internal wrapper around a Python object implementing StorageOptionsProvider
|
||||
pub struct PyStorageOptionsProvider {
|
||||
/// The Python object implementing fetch_storage_options()
|
||||
inner: Py<PyAny>,
|
||||
}
|
||||
|
||||
impl Clone for PyStorageOptionsProvider {
|
||||
fn clone(&self) -> Self {
|
||||
Python::attach(|py| Self {
|
||||
inner: self.inner.clone_ref(py),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl PyStorageOptionsProvider {
|
||||
pub fn new(obj: Py<PyAny>) -> PyResult<Self> {
|
||||
Python::attach(|py| {
|
||||
// Verify the object has a fetch_storage_options method
|
||||
if !obj.bind(py).hasattr("fetch_storage_options")? {
|
||||
return Err(pyo3::exceptions::PyTypeError::new_err(
|
||||
"StorageOptionsProvider must implement fetch_storage_options() method",
|
||||
));
|
||||
}
|
||||
Ok(Self { inner: obj })
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Wrapper that implements the Rust StorageOptionsProvider trait
|
||||
pub struct PyStorageOptionsProviderWrapper {
|
||||
py_provider: PyStorageOptionsProvider,
|
||||
}
|
||||
|
||||
impl PyStorageOptionsProviderWrapper {
|
||||
pub fn new(py_provider: PyStorageOptionsProvider) -> Self {
|
||||
Self { py_provider }
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl StorageOptionsProvider for PyStorageOptionsProviderWrapper {
|
||||
async fn fetch_storage_options(&self) -> lance_core::Result<Option<HashMap<String, String>>> {
|
||||
// Call Python method from async context using spawn_blocking
|
||||
let py_provider = self.py_provider.clone();
|
||||
|
||||
tokio::task::spawn_blocking(move || {
|
||||
Python::attach(|py| {
|
||||
// Call the Python fetch_storage_options method
|
||||
let result = py_provider
|
||||
.inner
|
||||
.bind(py)
|
||||
.call_method0("fetch_storage_options")
|
||||
.map_err(|e| lance_core::Error::io_source(Box::new(std::io::Error::other(format!(
|
||||
"Failed to call fetch_storage_options: {}",
|
||||
e
|
||||
)))))?;
|
||||
|
||||
// If result is None, return None
|
||||
if result.is_none() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
// Extract the result dict - should be a flat Map<String, String>
|
||||
let result_dict = result.downcast::<PyDict>().map_err(|_| {
|
||||
lance_core::Error::invalid_input(
|
||||
"fetch_storage_options() must return a dict of string key-value pairs or None",
|
||||
)
|
||||
})?;
|
||||
|
||||
// Convert all entries to HashMap<String, String>
|
||||
let mut storage_options = HashMap::new();
|
||||
for (key, value) in result_dict.iter() {
|
||||
let key_str: String = key.extract().map_err(|e| {
|
||||
lance_core::Error::invalid_input(format!("Storage option key must be a string: {}", e))
|
||||
})?;
|
||||
let value_str: String = value.extract().map_err(|e| {
|
||||
lance_core::Error::invalid_input(format!("Storage option value must be a string: {}", e))
|
||||
})?;
|
||||
storage_options.insert(key_str, value_str);
|
||||
}
|
||||
|
||||
Ok(Some(storage_options))
|
||||
})
|
||||
})
|
||||
.await
|
||||
.map_err(|e| lance_core::Error::io_source(Box::new(std::io::Error::other(format!(
|
||||
"Task join error: {}",
|
||||
e
|
||||
)))))?
|
||||
}
|
||||
|
||||
fn provider_id(&self) -> String {
|
||||
Python::attach(|py| {
|
||||
// Call provider_id() method on the Python object
|
||||
let obj = self.py_provider.inner.bind(py);
|
||||
obj.call_method0("provider_id")
|
||||
.and_then(|result| result.extract::<String>())
|
||||
.unwrap_or_else(|e| {
|
||||
// If provider_id() fails, construct a fallback ID
|
||||
format!("PyStorageOptionsProvider(error: {})", e)
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for PyStorageOptionsProviderWrapper {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "PyStorageOptionsProviderWrapper({})", self.provider_id())
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert a Python object to an Arc<dyn StorageOptionsProvider>
|
||||
///
|
||||
/// This is the main entry point for converting Python StorageOptionsProvider objects
|
||||
/// to Rust trait objects that can be used by the Lance ecosystem.
|
||||
pub fn py_object_to_storage_options_provider(
|
||||
py_obj: Py<PyAny>,
|
||||
) -> PyResult<Arc<dyn StorageOptionsProvider>> {
|
||||
let py_provider = PyStorageOptionsProvider::new(py_obj)?;
|
||||
Ok(Arc::new(PyStorageOptionsProviderWrapper::new(py_provider)))
|
||||
}
|
||||
@@ -19,7 +19,7 @@ use lancedb::table::{
|
||||
Table as LanceDbTable,
|
||||
};
|
||||
use pyo3::{
|
||||
Bound, FromPyObject, PyAny, PyRef, PyResult, Python,
|
||||
Bound, FromPyObject, Py, PyAny, PyRef, PyResult, Python,
|
||||
exceptions::{PyKeyError, PyRuntimeError, PyValueError},
|
||||
pyclass, pymethods,
|
||||
types::{IntoPyDict, PyAnyMethods, PyDict, PyDictMethods},
|
||||
@@ -299,10 +299,12 @@ impl Table {
|
||||
})
|
||||
}
|
||||
|
||||
#[pyo3(signature = (data, mode, progress=None))]
|
||||
pub fn add<'a>(
|
||||
self_: PyRef<'a, Self>,
|
||||
data: PyScannable,
|
||||
mode: String,
|
||||
progress: Option<Py<PyAny>>,
|
||||
) -> PyResult<Bound<'a, PyAny>> {
|
||||
let mut op = self_.inner_ref()?.add(data);
|
||||
if mode == "append" {
|
||||
@@ -312,6 +314,81 @@ impl Table {
|
||||
} else {
|
||||
return Err(PyValueError::new_err(format!("Invalid mode: {}", mode)));
|
||||
}
|
||||
if let Some(progress_obj) = progress {
|
||||
let is_callable = Python::attach(|py| progress_obj.bind(py).is_callable());
|
||||
if is_callable {
|
||||
// Callback: call with a dict of progress info.
|
||||
op = op.progress(move |p| {
|
||||
Python::attach(|py| {
|
||||
let dict = PyDict::new(py);
|
||||
if let Err(e) = dict
|
||||
.set_item("output_rows", p.output_rows())
|
||||
.and_then(|_| dict.set_item("output_bytes", p.output_bytes()))
|
||||
.and_then(|_| dict.set_item("total_rows", p.total_rows()))
|
||||
.and_then(|_| {
|
||||
dict.set_item("elapsed_seconds", p.elapsed().as_secs_f64())
|
||||
})
|
||||
.and_then(|_| dict.set_item("active_tasks", p.active_tasks()))
|
||||
.and_then(|_| dict.set_item("total_tasks", p.total_tasks()))
|
||||
.and_then(|_| dict.set_item("done", p.done()))
|
||||
{
|
||||
log::warn!("progress dict error: {e}");
|
||||
return;
|
||||
}
|
||||
if let Err(e) = progress_obj.call1(py, (dict,)) {
|
||||
log::warn!("progress callback error: {e}");
|
||||
}
|
||||
});
|
||||
});
|
||||
} else {
|
||||
// tqdm-like: has update() method.
|
||||
let mut last_rows: usize = 0;
|
||||
let mut total_set = false;
|
||||
op = op.progress(move |p| {
|
||||
let current = p.output_rows();
|
||||
let prev = last_rows;
|
||||
last_rows = current;
|
||||
Python::attach(|py| {
|
||||
if let Some(total) = p.total_rows()
|
||||
&& !total_set
|
||||
{
|
||||
if let Err(e) = progress_obj.setattr(py, "total", total) {
|
||||
log::warn!("progress setattr error: {e}");
|
||||
}
|
||||
total_set = true;
|
||||
}
|
||||
let delta = current.saturating_sub(prev);
|
||||
if delta > 0 {
|
||||
if let Err(e) = progress_obj.call_method1(py, "update", (delta,)) {
|
||||
log::warn!("progress update error: {e}");
|
||||
}
|
||||
// Show throughput and active workers in tqdm postfix.
|
||||
let elapsed = p.elapsed().as_secs_f64();
|
||||
if elapsed > 0.0 {
|
||||
let mb_per_sec = p.output_bytes() as f64 / elapsed / 1_000_000.0;
|
||||
let postfix = format!(
|
||||
"{:.1} MB/s | {}/{} workers",
|
||||
mb_per_sec,
|
||||
p.active_tasks(),
|
||||
p.total_tasks()
|
||||
);
|
||||
if let Err(e) =
|
||||
progress_obj.call_method1(py, "set_postfix_str", (postfix,))
|
||||
{
|
||||
log::warn!("progress set_postfix_str error: {e}");
|
||||
}
|
||||
}
|
||||
}
|
||||
if p.done() {
|
||||
// Force a final refresh so the bar shows completion.
|
||||
if let Err(e) = progress_obj.call_method0(py, "refresh") {
|
||||
log::warn!("progress refresh error: {e}");
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
future_into_py(self_.py(), async move {
|
||||
let result = op.execute().await.infer_error()?;
|
||||
|
||||
387
python/tests/test_expr.py
Normal file
387
python/tests/test_expr.py
Normal file
@@ -0,0 +1,387 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
"""Tests for the type-safe expression builder API."""
|
||||
|
||||
import pytest
|
||||
import pyarrow as pa
|
||||
import lancedb
|
||||
from lancedb.expr import Expr, col, lit, func
|
||||
|
||||
|
||||
# ── unit tests for Expr construction ─────────────────────────────────────────
|
||||
|
||||
|
||||
class TestExprConstruction:
|
||||
def test_col_returns_expr(self):
|
||||
e = col("age")
|
||||
assert isinstance(e, Expr)
|
||||
|
||||
def test_lit_int(self):
|
||||
e = lit(42)
|
||||
assert isinstance(e, Expr)
|
||||
|
||||
def test_lit_float(self):
|
||||
e = lit(3.14)
|
||||
assert isinstance(e, Expr)
|
||||
|
||||
def test_lit_str(self):
|
||||
e = lit("hello")
|
||||
assert isinstance(e, Expr)
|
||||
|
||||
def test_lit_bool(self):
|
||||
e = lit(True)
|
||||
assert isinstance(e, Expr)
|
||||
|
||||
def test_lit_unsupported_type_raises(self):
|
||||
with pytest.raises(Exception):
|
||||
lit([1, 2, 3])
|
||||
|
||||
def test_func(self):
|
||||
e = func("lower", col("name"))
|
||||
assert isinstance(e, Expr)
|
||||
assert e.to_sql() == "lower(name)"
|
||||
|
||||
def test_func_unknown_raises(self):
|
||||
with pytest.raises(Exception):
|
||||
func("not_a_real_function", col("x"))
|
||||
|
||||
|
||||
class TestExprOperators:
|
||||
def test_eq_operator(self):
|
||||
e = col("x") == lit(1)
|
||||
assert isinstance(e, Expr)
|
||||
assert e.to_sql() == "(x = 1)"
|
||||
|
||||
def test_ne_operator(self):
|
||||
e = col("x") != lit(1)
|
||||
assert isinstance(e, Expr)
|
||||
assert e.to_sql() == "(x <> 1)"
|
||||
|
||||
def test_lt_operator(self):
|
||||
e = col("age") < lit(18)
|
||||
assert isinstance(e, Expr)
|
||||
assert e.to_sql() == "(age < 18)"
|
||||
|
||||
def test_le_operator(self):
|
||||
e = col("age") <= lit(18)
|
||||
assert isinstance(e, Expr)
|
||||
assert e.to_sql() == "(age <= 18)"
|
||||
|
||||
def test_gt_operator(self):
|
||||
e = col("age") > lit(18)
|
||||
assert isinstance(e, Expr)
|
||||
assert e.to_sql() == "(age > 18)"
|
||||
|
||||
def test_ge_operator(self):
|
||||
e = col("age") >= lit(18)
|
||||
assert isinstance(e, Expr)
|
||||
assert e.to_sql() == "(age >= 18)"
|
||||
|
||||
def test_and_operator(self):
|
||||
e = (col("age") > lit(18)) & (col("status") == lit("active"))
|
||||
assert isinstance(e, Expr)
|
||||
assert e.to_sql() == "((age > 18) AND (status = 'active'))"
|
||||
|
||||
def test_or_operator(self):
|
||||
e = (col("a") == lit(1)) | (col("b") == lit(2))
|
||||
assert isinstance(e, Expr)
|
||||
assert e.to_sql() == "((a = 1) OR (b = 2))"
|
||||
|
||||
def test_invert_operator(self):
|
||||
e = ~(col("active") == lit(True))
|
||||
assert isinstance(e, Expr)
|
||||
assert e.to_sql() == "NOT (active = true)"
|
||||
|
||||
def test_add_operator(self):
|
||||
e = col("x") + lit(1)
|
||||
assert isinstance(e, Expr)
|
||||
assert e.to_sql() == "(x + 1)"
|
||||
|
||||
def test_sub_operator(self):
|
||||
e = col("x") - lit(1)
|
||||
assert isinstance(e, Expr)
|
||||
assert e.to_sql() == "(x - 1)"
|
||||
|
||||
def test_mul_operator(self):
|
||||
e = col("price") * lit(1.1)
|
||||
assert isinstance(e, Expr)
|
||||
assert e.to_sql() == "(price * 1.1)"
|
||||
|
||||
def test_div_operator(self):
|
||||
e = col("total") / lit(2)
|
||||
assert isinstance(e, Expr)
|
||||
assert e.to_sql() == "(total / 2)"
|
||||
|
||||
def test_radd(self):
|
||||
e = lit(1) + col("x")
|
||||
assert isinstance(e, Expr)
|
||||
assert e.to_sql() == "(1 + x)"
|
||||
|
||||
def test_rmul(self):
|
||||
e = lit(2) * col("x")
|
||||
assert isinstance(e, Expr)
|
||||
assert e.to_sql() == "(2 * x)"
|
||||
|
||||
def test_coerce_plain_int(self):
|
||||
# Operators should auto-wrap plain Python values via lit()
|
||||
e = col("age") > 18
|
||||
assert isinstance(e, Expr)
|
||||
assert e.to_sql() == "(age > 18)"
|
||||
|
||||
def test_coerce_plain_str(self):
|
||||
e = col("name") == "alice"
|
||||
assert isinstance(e, Expr)
|
||||
assert e.to_sql() == "(name = 'alice')"
|
||||
|
||||
|
||||
class TestExprStringMethods:
|
||||
def test_lower(self):
|
||||
e = col("name").lower()
|
||||
assert isinstance(e, Expr)
|
||||
assert e.to_sql() == "lower(name)"
|
||||
|
||||
def test_upper(self):
|
||||
e = col("name").upper()
|
||||
assert isinstance(e, Expr)
|
||||
assert e.to_sql() == "upper(name)"
|
||||
|
||||
def test_contains(self):
|
||||
e = col("text").contains(lit("hello"))
|
||||
assert isinstance(e, Expr)
|
||||
assert e.to_sql() == "contains(text, 'hello')"
|
||||
|
||||
def test_contains_with_str_coerce(self):
|
||||
e = col("text").contains("hello")
|
||||
assert isinstance(e, Expr)
|
||||
assert e.to_sql() == "contains(text, 'hello')"
|
||||
|
||||
def test_chained_lower_eq(self):
|
||||
e = col("name").lower() == lit("alice")
|
||||
assert isinstance(e, Expr)
|
||||
assert e.to_sql() == "(lower(name) = 'alice')"
|
||||
|
||||
|
||||
class TestExprCast:
|
||||
def test_cast_string(self):
|
||||
e = col("id").cast("string")
|
||||
assert isinstance(e, Expr)
|
||||
assert e.to_sql() == "CAST(id AS VARCHAR)"
|
||||
|
||||
def test_cast_int32(self):
|
||||
e = col("score").cast("int32")
|
||||
assert isinstance(e, Expr)
|
||||
assert e.to_sql() == "CAST(score AS INTEGER)"
|
||||
|
||||
def test_cast_float64(self):
|
||||
e = col("val").cast("float64")
|
||||
assert isinstance(e, Expr)
|
||||
assert e.to_sql() == "CAST(val AS DOUBLE)"
|
||||
|
||||
def test_cast_pyarrow_type(self):
|
||||
e = col("score").cast(pa.int32())
|
||||
assert isinstance(e, Expr)
|
||||
assert e.to_sql() == "CAST(score AS INTEGER)"
|
||||
|
||||
def test_cast_pyarrow_float64(self):
|
||||
e = col("val").cast(pa.float64())
|
||||
assert isinstance(e, Expr)
|
||||
assert e.to_sql() == "CAST(val AS DOUBLE)"
|
||||
|
||||
def test_cast_pyarrow_string(self):
|
||||
e = col("id").cast(pa.string())
|
||||
assert isinstance(e, Expr)
|
||||
assert e.to_sql() == "CAST(id AS VARCHAR)"
|
||||
|
||||
def test_cast_pyarrow_and_string_equivalent(self):
|
||||
# pa.int32() and "int32" should produce equivalent SQL
|
||||
sql_str = col("x").cast("int32").to_sql()
|
||||
sql_pa = col("x").cast(pa.int32()).to_sql()
|
||||
assert sql_str == sql_pa
|
||||
|
||||
|
||||
class TestExprNamedMethods:
|
||||
def test_eq_method(self):
|
||||
e = col("x").eq(lit(1))
|
||||
assert isinstance(e, Expr)
|
||||
assert e.to_sql() == "(x = 1)"
|
||||
|
||||
def test_gt_method(self):
|
||||
e = col("x").gt(lit(0))
|
||||
assert isinstance(e, Expr)
|
||||
assert e.to_sql() == "(x > 0)"
|
||||
|
||||
def test_and_method(self):
|
||||
e = col("x").gt(lit(0)).and_(col("y").lt(lit(10)))
|
||||
assert isinstance(e, Expr)
|
||||
assert e.to_sql() == "((x > 0) AND (y < 10))"
|
||||
|
||||
def test_or_method(self):
|
||||
e = col("x").eq(lit(1)).or_(col("x").eq(lit(2)))
|
||||
assert isinstance(e, Expr)
|
||||
assert e.to_sql() == "((x = 1) OR (x = 2))"
|
||||
|
||||
|
||||
class TestExprRepr:
|
||||
def test_repr(self):
|
||||
e = col("age") > lit(18)
|
||||
assert repr(e) == "Expr((age > 18))"
|
||||
|
||||
def test_to_sql(self):
|
||||
e = col("age") > 18
|
||||
assert e.to_sql() == "(age > 18)"
|
||||
|
||||
def test_unhashable(self):
|
||||
e = col("x")
|
||||
with pytest.raises(TypeError):
|
||||
{e: 1}
|
||||
|
||||
|
||||
# ── integration tests: end-to-end query against a real table ─────────────────
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def simple_table(tmp_path):
|
||||
db = lancedb.connect(str(tmp_path))
|
||||
data = pa.table(
|
||||
{
|
||||
"id": [1, 2, 3, 4, 5],
|
||||
"name": ["Alice", "Bob", "Charlie", "alice", "BOB"],
|
||||
"age": [25, 17, 30, 22, 15],
|
||||
"score": [1.5, 2.0, 3.5, 4.0, 0.5],
|
||||
}
|
||||
)
|
||||
return db.create_table("test", data)
|
||||
|
||||
|
||||
class TestExprFilter:
|
||||
def test_simple_gt_filter(self, simple_table):
|
||||
result = simple_table.search().where(col("age") > lit(20)).to_arrow()
|
||||
assert result.num_rows == 3 # ages 25, 30, 22
|
||||
|
||||
def test_compound_and_filter(self, simple_table):
|
||||
result = (
|
||||
simple_table.search()
|
||||
.where((col("age") > lit(18)) & (col("score") > lit(2.0)))
|
||||
.to_arrow()
|
||||
)
|
||||
assert result.num_rows == 2 # (30, 3.5) and (22, 4.0)
|
||||
|
||||
def test_string_equality_filter(self, simple_table):
|
||||
result = simple_table.search().where(col("name") == lit("Bob")).to_arrow()
|
||||
assert result.num_rows == 1
|
||||
|
||||
def test_or_filter(self, simple_table):
|
||||
result = (
|
||||
simple_table.search()
|
||||
.where((col("age") < lit(18)) | (col("age") > lit(28)))
|
||||
.to_arrow()
|
||||
)
|
||||
assert result.num_rows == 3 # ages 17, 30, 15
|
||||
|
||||
def test_coercion_no_lit(self, simple_table):
|
||||
# Python values should be auto-coerced
|
||||
result = simple_table.search().where(col("age") > 20).to_arrow()
|
||||
assert result.num_rows == 3
|
||||
|
||||
def test_string_sql_still_works(self, simple_table):
|
||||
# Backwards compatibility: plain strings still accepted
|
||||
result = simple_table.search().where("age > 20").to_arrow()
|
||||
assert result.num_rows == 3
|
||||
|
||||
|
||||
class TestExprProjection:
|
||||
def test_select_with_expr(self, simple_table):
|
||||
result = (
|
||||
simple_table.search()
|
||||
.select({"double_score": col("score") * lit(2)})
|
||||
.to_arrow()
|
||||
)
|
||||
assert "double_score" in result.schema.names
|
||||
|
||||
def test_select_mixed_str_and_expr(self, simple_table):
|
||||
result = (
|
||||
simple_table.search()
|
||||
.select({"id": "id", "double_score": col("score") * lit(2)})
|
||||
.to_arrow()
|
||||
)
|
||||
assert "id" in result.schema.names
|
||||
assert "double_score" in result.schema.names
|
||||
|
||||
def test_select_list_of_columns(self, simple_table):
|
||||
# Plain list of str still works
|
||||
result = simple_table.search().select(["id", "name"]).to_arrow()
|
||||
assert result.schema.names == ["id", "name"]
|
||||
|
||||
|
||||
# ── column name edge cases ────────────────────────────────────────────────────
|
||||
|
||||
|
||||
class TestColNaming:
|
||||
"""Unit tests verifying that col() preserves identifiers exactly.
|
||||
|
||||
Identifiers that need quoting (camelCase, spaces, leading digits, unicode)
|
||||
are wrapped in backticks to match the lance SQL parser's dialect.
|
||||
"""
|
||||
|
||||
def test_camel_case_preserved_in_sql(self):
|
||||
# camelCase is quoted with backticks so the case round-trips correctly.
|
||||
assert col("firstName").to_sql() == "`firstName`"
|
||||
|
||||
def test_camel_case_in_expression(self):
|
||||
assert (col("firstName") > lit(18)).to_sql() == "(`firstName` > 18)"
|
||||
|
||||
def test_space_in_name_quoted(self):
|
||||
assert col("first name").to_sql() == "`first name`"
|
||||
|
||||
def test_space_in_expression(self):
|
||||
assert (col("first name") == lit("A")).to_sql() == "(`first name` = 'A')"
|
||||
|
||||
def test_leading_digit_quoted(self):
|
||||
assert col("2fast").to_sql() == "`2fast`"
|
||||
|
||||
def test_unicode_quoted(self):
|
||||
assert col("名前").to_sql() == "`名前`"
|
||||
|
||||
def test_snake_case_unquoted(self):
|
||||
# Plain snake_case needs no quoting.
|
||||
assert col("first_name").to_sql() == "first_name"
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def special_col_table(tmp_path):
|
||||
db = lancedb.connect(str(tmp_path))
|
||||
data = pa.table(
|
||||
{
|
||||
"firstName": ["Alice", "Bob", "Charlie"],
|
||||
"first name": ["A", "B", "C"],
|
||||
"score": [10, 20, 30],
|
||||
}
|
||||
)
|
||||
return db.create_table("special", data)
|
||||
|
||||
|
||||
class TestColNamingIntegration:
|
||||
def test_camel_case_filter(self, special_col_table):
|
||||
result = (
|
||||
special_col_table.search()
|
||||
.where(col("firstName") == lit("Alice"))
|
||||
.to_arrow()
|
||||
)
|
||||
assert result.num_rows == 1
|
||||
assert result["firstName"][0].as_py() == "Alice"
|
||||
|
||||
def test_space_in_col_filter(self, special_col_table):
|
||||
result = (
|
||||
special_col_table.search().where(col("first name") == lit("B")).to_arrow()
|
||||
)
|
||||
assert result.num_rows == 1
|
||||
|
||||
def test_camel_case_projection(self, special_col_table):
|
||||
result = (
|
||||
special_col_table.search()
|
||||
.select({"upper_name": col("firstName").upper()})
|
||||
.to_arrow()
|
||||
)
|
||||
assert "upper_name" in result.schema.names
|
||||
assert sorted(result["upper_name"].to_pylist()) == ["ALICE", "BOB", "CHARLIE"]
|
||||
301
python/uv.lock
generated
301
python/uv.lock
generated
@@ -247,7 +247,7 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "awscli"
|
||||
version = "1.44.35"
|
||||
version = "1.44.70"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "botocore" },
|
||||
@@ -257,9 +257,9 @@ dependencies = [
|
||||
{ name = "rsa" },
|
||||
{ name = "s3transfer" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/f3/42/58705761bce0d24c4496aac146d724a8caf20a33d906ec954729c934088b/awscli-1.44.35.tar.gz", hash = "sha256:bc38774bfc71fd33112fd283522b010c2f5b606e57b28a85884d96e8051c58e7", size = 1888844, upload-time = "2026-02-09T21:50:10.697Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/76/b2/0f522e76ca173ac06949883f00994ec173f9336c8f8146f982458ebc6ce7/awscli-1.44.70.tar.gz", hash = "sha256:25eafa6237a2ff9ad98c8bb486c40f904996db5fb3e9facc8cba9108caa7859c", size = 1886989, upload-time = "2026-03-31T19:33:41.249Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/cd/94/df482d7f36ffc0f8b973258aa3fc2cd33deef0c06a1ec0f228e55d79ed9a/awscli-1.44.35-py3-none-any.whl", hash = "sha256:0823c1af8926a3bd10db652d8b64d61cfbf34268be845aca332ea7aea0c1ac15", size = 4641343, upload-time = "2026-02-09T21:50:06.323Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c1/eb/98028053ab43a723ddcfd0322b5d5929f413211ae6af834852e064493e68/awscli-1.44.70-py3-none-any.whl", hash = "sha256:eb742517feca3be3b6567c3f302de6b5a3a12b18b61e530509d6e098e243771f", size = 4624874, upload-time = "2026-03-31T19:33:37.912Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -408,16 +408,16 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "botocore"
|
||||
version = "1.42.45"
|
||||
version = "1.42.80"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "jmespath" },
|
||||
{ name = "python-dateutil" },
|
||||
{ name = "urllib3" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/7a/b1/c36ad705d67bb935eac3085052b5dc03ec22d5ac12e7aedf514f3d76cac8/botocore-1.42.45.tar.gz", hash = "sha256:40b577d07b91a0ed26879da9e4658d82d3a400382446af1014d6ad3957497545", size = 14941217, upload-time = "2026-02-09T21:50:01.966Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/2e/42/d0ce09fe5b494e2a9de513206dec90fbe72bcb101457a60f526a6b1c300b/botocore-1.42.80.tar.gz", hash = "sha256:fe32af53dc87f5f4d61879bc231e2ca2cc0719b19b8f6d268e82a34f713a8a09", size = 15110373, upload-time = "2026-03-31T19:33:33.82Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/7e/ec/6681b8e4884f8663d7650220e702c503e4ba6bd09a5b91d44803b0b1d0a8/botocore-1.42.45-py3-none-any.whl", hash = "sha256:a5ea5d1b7c46c2d5d113879e45b21eaf7d60dc865f4bcb46dfcf0703fe3429f4", size = 14615557, upload-time = "2026-02-09T21:49:57.066Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/17/b0/c03f2ed8e7817db1c22d70720636a1b22a2a4d3aa3c09da0257072b30bc5/botocore-1.42.80-py3-none-any.whl", hash = "sha256:7291632b2ede71b7c69e6e366480bb6e2a5d2fae8f7d2d2eb49215e32b7c7a12", size = 14787168, upload-time = "2026-03-31T19:33:29.396Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -750,19 +750,19 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "datafusion"
|
||||
version = "51.0.0"
|
||||
version = "52.3.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "pyarrow" },
|
||||
{ name = "typing-extensions", marker = "python_full_version < '3.13'" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/2c/6d/d0e2632c93bbcca0687eeda672af3f92042ecd349df7be55da86253594a9/datafusion-51.0.0.tar.gz", hash = "sha256:1887c7d5ed3ae5d9f389e62ba869864afad4006a3f7c99ef0ca4707782a7838f", size = 193751, upload-time = "2026-01-09T13:23:41.562Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/db/d4/a5ad7b665a80008901892fde61dc667318db0652a955d706ddca3a224b5a/datafusion-52.3.0.tar.gz", hash = "sha256:2e8b02ad142b1a0d673f035d96a0944a640ac78275003d7e453cee4afe4a20a4", size = 205026, upload-time = "2026-03-16T10:54:07.739Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/cf/a9/7717cec053a3309be3020fe3147e3f76e5bf21295fa8adf9b52dd44ea3ff/datafusion-51.0.0-cp310-abi3-macosx_10_12_x86_64.whl", hash = "sha256:0c0d265fe3ee0dcbfa7cc3c64c7cd94fc493f38418bd79debb7ec29f29b7176e", size = 30389413, upload-time = "2026-01-09T13:23:23.266Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/55/45/72c9874fd3740a4cb9d55049fdbae0df512dc5433e9f1176f3cfd970f1a1/datafusion-51.0.0-cp310-abi3-macosx_11_0_arm64.whl", hash = "sha256:43e6011db86e950bf9a21ed73cc089c2346b340a41a4f1044268af6c3a357acc", size = 26982206, upload-time = "2026-01-09T13:23:27.437Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/21/ac/b32ba1f25d38fc16e7623cc4bfb7bd68db61be2ef27b2d9969ea5c865765/datafusion-51.0.0-cp310-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e76803907150159aa059d5cc9291645bbaac1b6a46d07e56035118d327b741ae", size = 33246117, upload-time = "2026-01-09T13:23:30.981Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0b/4e/437121422ef010690fc3cdd7f080203e986ba00e0e3c3b577e03f5b54ca2/datafusion-51.0.0-cp310-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:9d0cfabfe1853994adc2e6e9da5f36c1eb061102e34a2f1101fa935c6991c9e1", size = 31421867, upload-time = "2026-01-09T13:23:34.436Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/db/fc/58cf27fcb85b2fd2a698253ae46213b1cbda784407e205c148f4006c1429/datafusion-51.0.0-cp310-abi3-win_amd64.whl", hash = "sha256:fd5f9abfd6669062debf0658d13e4583234c89d4df95faf381927b11cea411f5", size = 32517679, upload-time = "2026-01-09T13:23:39.615Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/55/63/1bb0737988cefa77274b459d64fa4b57ba4cf755639a39733e9581b5d599/datafusion-52.3.0-cp310-abi3-macosx_10_12_x86_64.whl", hash = "sha256:a73f02406b2985b9145dd97f8221a929c9ef3289a8ba64c6b52043e240938528", size = 31503230, upload-time = "2026-03-16T10:53:50.312Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d6/e3/ea3b79239953c3044d19d8e9581015da025b6640796db03799e435b17910/datafusion-52.3.0-cp310-abi3-macosx_11_0_arm64.whl", hash = "sha256:118a1f0add6a3f91fcbc90c71819fe08750e2981637d5e7b346e099e94a20b8b", size = 28159497, upload-time = "2026-03-16T10:53:54.032Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/24/c8/7d325feb4b7509ae03857fd7e164e95ec72e8c9f3dfd3178ec7f80d53977/datafusion-52.3.0-cp310-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:253ce7aee5fe84bd6ee290c20608114114bdb5115852617f97d3855d36ad9341", size = 30769154, upload-time = "2026-03-16T10:53:57.835Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/37/ee/478689c69b3cb1ccabb2d52feac0c181f6cdf20b51a81df35344b1dab9a6/datafusion-52.3.0-cp310-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:2af3469d2f06959bec88579ab107a72f965de18b32e607069bbdd0b859ed8dbb", size = 33060335, upload-time = "2026-03-16T10:54:01.715Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1c/48/01906ab5c1a70373c6874ac5192d03646fa7b94d9ff06e3f676cb6b0f43f/datafusion-52.3.0-cp310-abi3-win_amd64.whl", hash = "sha256:9fb35738cf4dbff672dbcfffc7332813024cb0ad2ab8cda1fb90b9054277ab0c", size = 33765807, upload-time = "2026-03-16T10:54:05.728Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1251,6 +1251,7 @@ dependencies = [
|
||||
{ name = "griffecli" },
|
||||
{ name = "griffelib" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/04/56/28a0accac339c164b52a92c6cfc45a903acc0c174caa5c1713803467b533/griffe-2.0.0.tar.gz", hash = "sha256:c68979cd8395422083a51ea7cf02f9c119d889646d99b7b656ee43725de1b80f", size = 293906, upload-time = "2026-03-23T21:06:53.402Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/8b/94/ee21d41e7eb4f823b94603b9d40f86d3c7fde80eacc2c3c71845476dddaa/griffe-2.0.0-py3-none-any.whl", hash = "sha256:5418081135a391c3e6e757a7f3f156f1a1a746cc7b4023868ff7d5e2f9a980aa", size = 5214, upload-time = "2026-02-09T19:09:44.105Z" },
|
||||
]
|
||||
@@ -1263,6 +1264,7 @@ dependencies = [
|
||||
{ name = "colorama" },
|
||||
{ name = "griffelib" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/a4/f8/2e129fd4a86e52e58eefe664de05e7d502decf766e7316cc9e70fdec3e18/griffecli-2.0.0.tar.gz", hash = "sha256:312fa5ebb4ce6afc786356e2d0ce85b06c1c20d45abc42d74f0cda65e159f6ef", size = 56213, upload-time = "2026-03-23T21:06:54.8Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/e6/ed/d93f7a447bbf7a935d8868e9617cbe1cadf9ee9ee6bd275d3040fbf93d60/griffecli-2.0.0-py3-none-any.whl", hash = "sha256:9f7cd9ee9b21d55e91689358978d2385ae65c22f307a63fb3269acf3f21e643d", size = 9345, upload-time = "2026-02-09T19:09:42.554Z" },
|
||||
]
|
||||
@@ -1271,6 +1273,7 @@ wheels = [
|
||||
name = "griffelib"
|
||||
version = "2.0.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/ad/06/eccbd311c9e2b3ca45dbc063b93134c57a1ccc7607c5e545264ad092c4a9/griffelib-2.0.0.tar.gz", hash = "sha256:e504d637a089f5cab9b5daf18f7645970509bf4f53eda8d79ed71cce8bd97934", size = 166312, upload-time = "2026-03-23T21:06:55.954Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/4d/51/c936033e16d12b627ea334aaaaf42229c37620d0f15593456ab69ab48161/griffelib-2.0.0-py3-none-any.whl", hash = "sha256:01284878c966508b6d6f1dbff9b6fa607bc062d8261c5c7253cb285b06422a7f", size = 142004, upload-time = "2026-02-09T19:09:40.561Z" },
|
||||
]
|
||||
@@ -1888,19 +1891,19 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "lance-namespace"
|
||||
version = "0.4.5"
|
||||
version = "0.6.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "lance-namespace-urllib3-client" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/b4/b5/0c3c55cf336b1e90392c2e24ac833551659e8bb3c61644b2d94825eb31bd/lance_namespace-0.4.5.tar.gz", hash = "sha256:0aee0abed3a1fa762c2955c7d12bb3004cea5c82ba28f6fcb9fe79d0cc19e317", size = 9827, upload-time = "2026-01-07T19:20:23.005Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/28/9f/7906ba4117df8d965510285eaf07264a77de2fd283b9d44ec7fc63a4a57a/lance_namespace-0.6.1.tar.gz", hash = "sha256:f0deea442bd3f1056a8e2fed056ae2778e3356517ec2e680db049058b824d131", size = 10666, upload-time = "2026-03-17T17:55:44.977Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/34/88/173687dad72baf819223e3b506898e386bc88c26ff8da5e8013291e02daf/lance_namespace-0.4.5-py3-none-any.whl", hash = "sha256:cd1a4f789de03ba23a0c16f100b1464cca572a5d04e428917a54d09db912d548", size = 11703, upload-time = "2026-01-07T19:20:25.394Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d1/91/aee1c0a04d17f2810173bd304bd444eb78332045df1b0c1b07cebd01f530/lance_namespace-0.6.1-py3-none-any.whl", hash = "sha256:9699c9e3f12236e5e08ea979cc4e036a8e3c67ed2f37ae6f25c5353ab908e1be", size = 12498, upload-time = "2026-03-17T17:55:44.062Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "lance-namespace-urllib3-client"
|
||||
version = "0.4.5"
|
||||
version = "0.6.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "pydantic" },
|
||||
@@ -1908,9 +1911,9 @@ dependencies = [
|
||||
{ name = "typing-extensions" },
|
||||
{ name = "urllib3" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/97/a9/4e527c2f05704565618b239b0965f829d1a194837f01234af3f8e2f33d92/lance_namespace_urllib3_client-0.4.5.tar.gz", hash = "sha256:184deda8cf8700926d994618187053c644eb1f2866a4479e7b80843cacc92b1c", size = 159726, upload-time = "2026-01-07T19:20:24.025Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/63/a1/8706a2be25bd184acccc411e48f1a42a4cbf3b6556cba15b9fcf4c15cfcc/lance_namespace_urllib3_client-0.6.1.tar.gz", hash = "sha256:31fbd058ce1ea0bf49045cdeaa756360ece0bc61e9e10276f41af6d217debe87", size = 182567, upload-time = "2026-03-17T17:55:46.87Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/ca/86/0adee7190408a28dcc5a0562c674537457e3de59ee51d1c724ecdc4a9930/lance_namespace_urllib3_client-0.4.5-py3-none-any.whl", hash = "sha256:2ee154d616ba4721f0bfdf043d33c4fef2e79d380653e2f263058ab00fb4adf4", size = 277969, upload-time = "2026-01-07T19:20:26.597Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/cd/c7/cb9580602dec25f0fdd6005c1c9ba1d4c8c0c3dc8d543107e5a9f248bba8/lance_namespace_urllib3_client-0.6.1-py3-none-any.whl", hash = "sha256:b9c103e1377ad46d2bd70eec894bfec0b1e2133dae0964d7e4de543c6e16293b", size = 317111, upload-time = "2026-03-17T17:55:45.546Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1999,57 +2002,57 @@ tests = [
|
||||
[package.metadata]
|
||||
requires-dist = [
|
||||
{ name = "adlfs", marker = "extra == 'azure'", specifier = ">=2024.2.0" },
|
||||
{ name = "aiohttp", marker = "extra == 'tests'" },
|
||||
{ name = "awscli", marker = "extra == 'embeddings'", specifier = ">=1.29.57" },
|
||||
{ name = "aiohttp", marker = "extra == 'tests'", specifier = ">=3.9.0" },
|
||||
{ name = "awscli", marker = "extra == 'embeddings'", specifier = ">=1.44.38" },
|
||||
{ name = "boto3", marker = "extra == 'embeddings'", specifier = ">=1.28.57" },
|
||||
{ name = "boto3", marker = "extra == 'tests'" },
|
||||
{ name = "boto3", marker = "extra == 'tests'", specifier = ">=1.28.57" },
|
||||
{ name = "botocore", marker = "extra == 'embeddings'", specifier = ">=1.31.57" },
|
||||
{ name = "cohere", marker = "extra == 'embeddings'" },
|
||||
{ name = "cohere", marker = "extra == 'embeddings'", specifier = ">=4.0" },
|
||||
{ name = "colpali-engine", marker = "extra == 'embeddings'", specifier = ">=0.3.10" },
|
||||
{ name = "datafusion", marker = "extra == 'tests'", specifier = "<52" },
|
||||
{ name = "deprecation" },
|
||||
{ name = "duckdb", marker = "extra == 'tests'" },
|
||||
{ name = "google-generativeai", marker = "extra == 'embeddings'" },
|
||||
{ name = "huggingface-hub", marker = "extra == 'embeddings'" },
|
||||
{ name = "datafusion", marker = "extra == 'tests'", specifier = ">=52,<53" },
|
||||
{ name = "deprecation", specifier = ">=2.1.0" },
|
||||
{ name = "duckdb", marker = "extra == 'tests'", specifier = ">=0.9.0" },
|
||||
{ name = "google-generativeai", marker = "extra == 'embeddings'", specifier = ">=0.3.0" },
|
||||
{ name = "huggingface-hub", marker = "extra == 'embeddings'", specifier = ">=0.19.0" },
|
||||
{ name = "ibm-watsonx-ai", marker = "python_full_version >= '3.10' and extra == 'embeddings'", specifier = ">=1.1.2" },
|
||||
{ name = "instructorembedding", marker = "extra == 'embeddings'" },
|
||||
{ name = "instructorembedding", marker = "extra == 'embeddings'", specifier = ">=1.0.1" },
|
||||
{ name = "lance-namespace", specifier = ">=0.3.2" },
|
||||
{ name = "mkdocs", marker = "extra == 'docs'" },
|
||||
{ name = "mkdocs-jupyter", marker = "extra == 'docs'" },
|
||||
{ name = "mkdocs-material", marker = "extra == 'docs'" },
|
||||
{ name = "mkdocstrings-python", marker = "extra == 'docs'" },
|
||||
{ name = "numpy" },
|
||||
{ name = "numpy", specifier = ">=1.24.0" },
|
||||
{ name = "ollama", marker = "extra == 'embeddings'", specifier = ">=0.3.0" },
|
||||
{ name = "open-clip-torch", marker = "extra == 'clip'" },
|
||||
{ name = "open-clip-torch", marker = "extra == 'embeddings'" },
|
||||
{ name = "open-clip-torch", marker = "extra == 'embeddings'", specifier = ">=2.20.0" },
|
||||
{ name = "openai", marker = "extra == 'embeddings'", specifier = ">=1.6.1" },
|
||||
{ name = "overrides", marker = "python_full_version < '3.12'", specifier = ">=0.7" },
|
||||
{ name = "packaging" },
|
||||
{ name = "packaging", specifier = ">=23.0" },
|
||||
{ name = "pandas", marker = "extra == 'tests'", specifier = ">=1.4" },
|
||||
{ name = "pillow", marker = "extra == 'clip'" },
|
||||
{ name = "pillow", marker = "extra == 'embeddings'" },
|
||||
{ name = "pillow", marker = "extra == 'siglip'" },
|
||||
{ name = "pillow", marker = "extra == 'clip'", specifier = ">=12.1.1" },
|
||||
{ name = "pillow", marker = "extra == 'embeddings'", specifier = ">=12.1.1" },
|
||||
{ name = "pillow", marker = "extra == 'siglip'", specifier = ">=12.1.1" },
|
||||
{ name = "polars", marker = "extra == 'tests'", specifier = ">=0.19,<=1.3.0" },
|
||||
{ name = "pre-commit", marker = "extra == 'dev'" },
|
||||
{ name = "pre-commit", marker = "extra == 'dev'", specifier = ">=3.5.0" },
|
||||
{ name = "pyarrow", specifier = ">=16" },
|
||||
{ name = "pyarrow-stubs", marker = "extra == 'tests'" },
|
||||
{ name = "pyarrow-stubs", marker = "extra == 'tests'", specifier = ">=16.0" },
|
||||
{ name = "pydantic", specifier = ">=1.10" },
|
||||
{ name = "pylance", marker = "extra == 'pylance'", specifier = ">=1.0.0b14" },
|
||||
{ name = "pylance", marker = "extra == 'tests'", specifier = ">=1.0.0b14,<3.0.0" },
|
||||
{ name = "pyright", marker = "extra == 'dev'" },
|
||||
{ name = "pytest", marker = "extra == 'tests'" },
|
||||
{ name = "pytest-asyncio", marker = "extra == 'tests'" },
|
||||
{ name = "pytest-mock", marker = "extra == 'tests'" },
|
||||
{ name = "pytz", marker = "extra == 'tests'" },
|
||||
{ name = "pylance", marker = "extra == 'pylance'", specifier = ">=4.0.0b7" },
|
||||
{ name = "pylance", marker = "extra == 'tests'", specifier = ">=4.0.0b7" },
|
||||
{ name = "pyright", marker = "extra == 'dev'", specifier = ">=1.1.350" },
|
||||
{ name = "pytest", marker = "extra == 'tests'", specifier = ">=7.0" },
|
||||
{ name = "pytest-asyncio", marker = "extra == 'tests'", specifier = ">=0.21" },
|
||||
{ name = "pytest-mock", marker = "extra == 'tests'", specifier = ">=3.10" },
|
||||
{ name = "pytz", marker = "extra == 'tests'", specifier = ">=2023.3" },
|
||||
{ name = "requests", marker = "extra == 'embeddings'", specifier = ">=2.31.0" },
|
||||
{ name = "requests", marker = "extra == 'tests'" },
|
||||
{ name = "ruff", marker = "extra == 'dev'" },
|
||||
{ name = "sentence-transformers", marker = "extra == 'embeddings'" },
|
||||
{ name = "sentencepiece", marker = "extra == 'embeddings'" },
|
||||
{ name = "requests", marker = "extra == 'tests'", specifier = ">=2.31.0" },
|
||||
{ name = "ruff", marker = "extra == 'dev'", specifier = ">=0.3.0" },
|
||||
{ name = "sentence-transformers", marker = "extra == 'embeddings'", specifier = ">=2.2.0" },
|
||||
{ name = "sentencepiece", marker = "extra == 'embeddings'", specifier = ">=0.1.99" },
|
||||
{ name = "sentencepiece", marker = "extra == 'siglip'" },
|
||||
{ name = "tantivy", marker = "extra == 'tests'" },
|
||||
{ name = "tantivy", marker = "extra == 'tests'", specifier = ">=0.20.0" },
|
||||
{ name = "torch", marker = "extra == 'clip'" },
|
||||
{ name = "torch", marker = "extra == 'embeddings'" },
|
||||
{ name = "torch", marker = "extra == 'embeddings'", specifier = ">=2.0.0" },
|
||||
{ name = "torch", marker = "extra == 'siglip'" },
|
||||
{ name = "tqdm", specifier = ">=4.27.0" },
|
||||
{ name = "transformers", marker = "extra == 'siglip'", specifier = ">=4.41.0" },
|
||||
@@ -3169,100 +3172,100 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "pillow"
|
||||
version = "12.1.0"
|
||||
version = "12.1.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/d0/02/d52c733a2452ef1ffcc123b68e6606d07276b0e358db70eabad7e40042b7/pillow-12.1.0.tar.gz", hash = "sha256:5c5ae0a06e9ea030ab786b0251b32c7e4ce10e58d983c0d5c56029455180b5b9", size = 46977283, upload-time = "2026-01-02T09:13:29.892Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/1f/42/5c74462b4fd957fcd7b13b04fb3205ff8349236ea74c7c375766d6c82288/pillow-12.1.1.tar.gz", hash = "sha256:9ad8fa5937ab05218e2b6a4cff30295ad35afd2f83ac592e68c0d871bb0fdbc4", size = 46980264, upload-time = "2026-02-11T04:23:07.146Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/fe/41/f73d92b6b883a579e79600d391f2e21cb0df767b2714ecbd2952315dfeef/pillow-12.1.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:fb125d860738a09d363a88daa0f59c4533529a90e564785e20fe875b200b6dbd", size = 5304089, upload-time = "2026-01-02T09:10:24.953Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/94/55/7aca2891560188656e4a91ed9adba305e914a4496800da6b5c0a15f09edf/pillow-12.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cad302dc10fac357d3467a74a9561c90609768a6f73a1923b0fd851b6486f8b0", size = 4657815, upload-time = "2026-01-02T09:10:27.063Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e9/d2/b28221abaa7b4c40b7dba948f0f6a708bd7342c4d47ce342f0ea39643974/pillow-12.1.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:a40905599d8079e09f25027423aed94f2823adaf2868940de991e53a449e14a8", size = 6222593, upload-time = "2026-01-02T09:10:29.115Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/71/b8/7a61fb234df6a9b0b479f69e66901209d89ff72a435b49933f9122f94cac/pillow-12.1.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:92a7fe4225365c5e3a8e598982269c6d6698d3e783b3b1ae979e7819f9cd55c1", size = 8027579, upload-time = "2026-01-02T09:10:31.182Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ea/51/55c751a57cc524a15a0e3db20e5cde517582359508d62305a627e77fd295/pillow-12.1.0-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f10c98f49227ed8383d28174ee95155a675c4ed7f85e2e573b04414f7e371bda", size = 6335760, upload-time = "2026-01-02T09:10:33.02Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/dc/7c/60e3e6f5e5891a1a06b4c910f742ac862377a6fe842f7184df4a274ce7bf/pillow-12.1.0-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8637e29d13f478bc4f153d8daa9ffb16455f0a6cb287da1b432fdad2bfbd66c7", size = 7027127, upload-time = "2026-01-02T09:10:35.009Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/06/37/49d47266ba50b00c27ba63a7c898f1bb41a29627ced8c09e25f19ebec0ff/pillow-12.1.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:21e686a21078b0f9cb8c8a961d99e6a4ddb88e0fc5ea6e130172ddddc2e5221a", size = 6449896, upload-time = "2026-01-02T09:10:36.793Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f9/e5/67fd87d2913902462cd9b79c6211c25bfe95fcf5783d06e1367d6d9a741f/pillow-12.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:2415373395a831f53933c23ce051021e79c8cd7979822d8cc478547a3f4da8ef", size = 7151345, upload-time = "2026-01-02T09:10:39.064Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/bd/15/f8c7abf82af68b29f50d77c227e7a1f87ce02fdc66ded9bf603bc3b41180/pillow-12.1.0-cp310-cp310-win32.whl", hash = "sha256:e75d3dba8fc1ddfec0cd752108f93b83b4f8d6ab40e524a95d35f016b9683b09", size = 6325568, upload-time = "2026-01-02T09:10:41.035Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d4/24/7d1c0e160b6b5ac2605ef7d8be537e28753c0db5363d035948073f5513d7/pillow-12.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:64efdf00c09e31efd754448a383ea241f55a994fd079866b92d2bbff598aad91", size = 7032367, upload-time = "2026-01-02T09:10:43.09Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f4/03/41c038f0d7a06099254c60f618d0ec7be11e79620fc23b8e85e5b31d9a44/pillow-12.1.0-cp310-cp310-win_arm64.whl", hash = "sha256:f188028b5af6b8fb2e9a76ac0f841a575bd1bd396e46ef0840d9b88a48fdbcea", size = 2452345, upload-time = "2026-01-02T09:10:44.795Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/43/c4/bf8328039de6cc22182c3ef007a2abfbbdab153661c0a9aa78af8d706391/pillow-12.1.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:a83e0850cb8f5ac975291ebfc4170ba481f41a28065277f7f735c202cd8e0af3", size = 5304057, upload-time = "2026-01-02T09:10:46.627Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/43/06/7264c0597e676104cc22ca73ee48f752767cd4b1fe084662620b17e10120/pillow-12.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b6e53e82ec2db0717eabb276aa56cf4e500c9a7cec2c2e189b55c24f65a3e8c0", size = 4657811, upload-time = "2026-01-02T09:10:49.548Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/72/64/f9189e44474610daf83da31145fa56710b627b5c4c0b9c235e34058f6b31/pillow-12.1.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:40a8e3b9e8773876d6e30daed22f016509e3987bab61b3b7fe309d7019a87451", size = 6232243, upload-time = "2026-01-02T09:10:51.62Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ef/30/0df458009be6a4caca4ca2c52975e6275c387d4e5c95544e34138b41dc86/pillow-12.1.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:800429ac32c9b72909c671aaf17ecd13110f823ddb7db4dfef412a5587c2c24e", size = 8037872, upload-time = "2026-01-02T09:10:53.446Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e4/86/95845d4eda4f4f9557e25381d70876aa213560243ac1a6d619c46caaedd9/pillow-12.1.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0b022eaaf709541b391ee069f0022ee5b36c709df71986e3f7be312e46f42c84", size = 6345398, upload-time = "2026-01-02T09:10:55.426Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5c/1f/8e66ab9be3aaf1435bc03edd1ebdf58ffcd17f7349c1d970cafe87af27d9/pillow-12.1.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1f345e7bc9d7f368887c712aa5054558bad44d2a301ddf9248599f4161abc7c0", size = 7034667, upload-time = "2026-01-02T09:10:57.11Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f9/f6/683b83cb9b1db1fb52b87951b1c0b99bdcfceaa75febf11406c19f82cb5e/pillow-12.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d70347c8a5b7ccd803ec0c85c8709f036e6348f1e6a5bf048ecd9c64d3550b8b", size = 6458743, upload-time = "2026-01-02T09:10:59.331Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9a/7d/de833d63622538c1d58ce5395e7c6cb7e7dce80decdd8bde4a484e095d9f/pillow-12.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1fcc52d86ce7a34fd17cb04e87cfdb164648a3662a6f20565910a99653d66c18", size = 7159342, upload-time = "2026-01-02T09:11:01.82Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8c/40/50d86571c9e5868c42b81fe7da0c76ca26373f3b95a8dd675425f4a92ec1/pillow-12.1.0-cp311-cp311-win32.whl", hash = "sha256:3ffaa2f0659e2f740473bcf03c702c39a8d4b2b7ffc629052028764324842c64", size = 6328655, upload-time = "2026-01-02T09:11:04.556Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6c/af/b1d7e301c4cd26cd45d4af884d9ee9b6fab893b0ad2450d4746d74a6968c/pillow-12.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:806f3987ffe10e867bab0ddad45df1148a2b98221798457fa097ad85d6e8bc75", size = 7031469, upload-time = "2026-01-02T09:11:06.538Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/48/36/d5716586d887fb2a810a4a61518a327a1e21c8b7134c89283af272efe84b/pillow-12.1.0-cp311-cp311-win_arm64.whl", hash = "sha256:9f5fefaca968e700ad1a4a9de98bf0869a94e397fe3524c4c9450c1445252304", size = 2452515, upload-time = "2026-01-02T09:11:08.226Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/20/31/dc53fe21a2f2996e1b7d92bf671cdb157079385183ef7c1ae08b485db510/pillow-12.1.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:a332ac4ccb84b6dde65dbace8431f3af08874bf9770719d32a635c4ef411b18b", size = 5262642, upload-time = "2026-01-02T09:11:10.138Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ab/c1/10e45ac9cc79419cedf5121b42dcca5a50ad2b601fa080f58c22fb27626e/pillow-12.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:907bfa8a9cb790748a9aa4513e37c88c59660da3bcfffbd24a7d9e6abf224551", size = 4657464, upload-time = "2026-01-02T09:11:12.319Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ad/26/7b82c0ab7ef40ebede7a97c72d473bda5950f609f8e0c77b04af574a0ddb/pillow-12.1.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:efdc140e7b63b8f739d09a99033aa430accce485ff78e6d311973a67b6bf3208", size = 6234878, upload-time = "2026-01-02T09:11:14.096Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/76/25/27abc9792615b5e886ca9411ba6637b675f1b77af3104710ac7353fe5605/pillow-12.1.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:bef9768cab184e7ae6e559c032e95ba8d07b3023c289f79a2bd36e8bf85605a5", size = 8044868, upload-time = "2026-01-02T09:11:15.903Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0a/ea/f200a4c36d836100e7bc738fc48cd963d3ba6372ebc8298a889e0cfc3359/pillow-12.1.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:742aea052cf5ab5034a53c3846165bc3ce88d7c38e954120db0ab867ca242661", size = 6349468, upload-time = "2026-01-02T09:11:17.631Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/11/8f/48d0b77ab2200374c66d344459b8958c86693be99526450e7aee714e03e4/pillow-12.1.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a6dfc2af5b082b635af6e08e0d1f9f1c4e04d17d4e2ca0ef96131e85eda6eb17", size = 7041518, upload-time = "2026-01-02T09:11:19.389Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1d/23/c281182eb986b5d31f0a76d2a2c8cd41722d6fb8ed07521e802f9bba52de/pillow-12.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:609e89d9f90b581c8d16358c9087df76024cf058fa693dd3e1e1620823f39670", size = 6462829, upload-time = "2026-01-02T09:11:21.28Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/25/ef/7018273e0faac099d7b00982abdcc39142ae6f3bd9ceb06de09779c4a9d6/pillow-12.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:43b4899cfd091a9693a1278c4982f3e50f7fb7cff5153b05174b4afc9593b616", size = 7166756, upload-time = "2026-01-02T09:11:23.559Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8f/c8/993d4b7ab2e341fe02ceef9576afcf5830cdec640be2ac5bee1820d693d4/pillow-12.1.0-cp312-cp312-win32.whl", hash = "sha256:aa0c9cc0b82b14766a99fbe6084409972266e82f459821cd26997a488a7261a7", size = 6328770, upload-time = "2026-01-02T09:11:25.661Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a7/87/90b358775a3f02765d87655237229ba64a997b87efa8ccaca7dd3e36e7a7/pillow-12.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:d70534cea9e7966169ad29a903b99fc507e932069a881d0965a1a84bb57f6c6d", size = 7033406, upload-time = "2026-01-02T09:11:27.474Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5d/cf/881b457eccacac9e5b2ddd97d5071fb6d668307c57cbf4e3b5278e06e536/pillow-12.1.0-cp312-cp312-win_arm64.whl", hash = "sha256:65b80c1ee7e14a87d6a068dd3b0aea268ffcabfe0498d38661b00c5b4b22e74c", size = 2452612, upload-time = "2026-01-02T09:11:29.309Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/dd/c7/2530a4aa28248623e9d7f27316b42e27c32ec410f695929696f2e0e4a778/pillow-12.1.0-cp313-cp313-ios_13_0_arm64_iphoneos.whl", hash = "sha256:7b5dd7cbae20285cdb597b10eb5a2c13aa9de6cde9bb64a3c1317427b1db1ae1", size = 4062543, upload-time = "2026-01-02T09:11:31.566Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8f/1f/40b8eae823dc1519b87d53c30ed9ef085506b05281d313031755c1705f73/pillow-12.1.0-cp313-cp313-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:29a4cef9cb672363926f0470afc516dbf7305a14d8c54f7abbb5c199cd8f8179", size = 4138373, upload-time = "2026-01-02T09:11:33.367Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d4/77/6fa60634cf06e52139fd0e89e5bbf055e8166c691c42fb162818b7fda31d/pillow-12.1.0-cp313-cp313-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:681088909d7e8fa9e31b9799aaa59ba5234c58e5e4f1951b4c4d1082a2e980e0", size = 3601241, upload-time = "2026-01-02T09:11:35.011Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4f/bf/28ab865de622e14b747f0cd7877510848252d950e43002e224fb1c9ababf/pillow-12.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:983976c2ab753166dc66d36af6e8ec15bb511e4a25856e2227e5f7e00a160587", size = 5262410, upload-time = "2026-01-02T09:11:36.682Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1c/34/583420a1b55e715937a85bd48c5c0991598247a1fd2eb5423188e765ea02/pillow-12.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:db44d5c160a90df2d24a24760bbd37607d53da0b34fb546c4c232af7192298ac", size = 4657312, upload-time = "2026-01-02T09:11:38.535Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1d/fd/f5a0896839762885b3376ff04878f86ab2b097c2f9a9cdccf4eda8ba8dc0/pillow-12.1.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:6b7a9d1db5dad90e2991645874f708e87d9a3c370c243c2d7684d28f7e133e6b", size = 6232605, upload-time = "2026-01-02T09:11:40.602Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/98/aa/938a09d127ac1e70e6ed467bd03834350b33ef646b31edb7452d5de43792/pillow-12.1.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:6258f3260986990ba2fa8a874f8b6e808cf5abb51a94015ca3dc3c68aa4f30ea", size = 8041617, upload-time = "2026-01-02T09:11:42.721Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/17/e8/538b24cb426ac0186e03f80f78bc8dc7246c667f58b540bdd57c71c9f79d/pillow-12.1.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e115c15e3bc727b1ca3e641a909f77f8ca72a64fff150f666fcc85e57701c26c", size = 6346509, upload-time = "2026-01-02T09:11:44.955Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/01/9a/632e58ec89a32738cabfd9ec418f0e9898a2b4719afc581f07c04a05e3c9/pillow-12.1.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6741e6f3074a35e47c77b23a4e4f2d90db3ed905cb1c5e6e0d49bff2045632bc", size = 7038117, upload-time = "2026-01-02T09:11:46.736Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c7/a2/d40308cf86eada842ca1f3ffa45d0ca0df7e4ab33c83f81e73f5eaed136d/pillow-12.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:935b9d1aed48fcfb3f838caac506f38e29621b44ccc4f8a64d575cb1b2a88644", size = 6460151, upload-time = "2026-01-02T09:11:48.625Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f1/88/f5b058ad6453a085c5266660a1417bdad590199da1b32fb4efcff9d33b05/pillow-12.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5fee4c04aad8932da9f8f710af2c1a15a83582cfb884152a9caa79d4efcdbf9c", size = 7164534, upload-time = "2026-01-02T09:11:50.445Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/19/ce/c17334caea1db789163b5d855a5735e47995b0b5dc8745e9a3605d5f24c0/pillow-12.1.0-cp313-cp313-win32.whl", hash = "sha256:a786bf667724d84aa29b5db1c61b7bfdde380202aaca12c3461afd6b71743171", size = 6332551, upload-time = "2026-01-02T09:11:52.234Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e5/07/74a9d941fa45c90a0d9465098fe1ec85de3e2afbdc15cc4766622d516056/pillow-12.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:461f9dfdafa394c59cd6d818bdfdbab4028b83b02caadaff0ffd433faf4c9a7a", size = 7040087, upload-time = "2026-01-02T09:11:54.822Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/88/09/c99950c075a0e9053d8e880595926302575bc742b1b47fe1bbcc8d388d50/pillow-12.1.0-cp313-cp313-win_arm64.whl", hash = "sha256:9212d6b86917a2300669511ed094a9406888362e085f2431a7da985a6b124f45", size = 2452470, upload-time = "2026-01-02T09:11:56.522Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b5/ba/970b7d85ba01f348dee4d65412476321d40ee04dcb51cd3735b9dc94eb58/pillow-12.1.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:00162e9ca6d22b7c3ee8e61faa3c3253cd19b6a37f126cad04f2f88b306f557d", size = 5264816, upload-time = "2026-01-02T09:11:58.227Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/10/60/650f2fb55fdba7a510d836202aa52f0baac633e50ab1cf18415d332188fb/pillow-12.1.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:7d6daa89a00b58c37cb1747ec9fb7ac3bc5ffd5949f5888657dfddde6d1312e0", size = 4660472, upload-time = "2026-01-02T09:12:00.798Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2b/c0/5273a99478956a099d533c4f46cbaa19fd69d606624f4334b85e50987a08/pillow-12.1.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:e2479c7f02f9d505682dc47df8c0ea1fc5e264c4d1629a5d63fe3e2334b89554", size = 6268974, upload-time = "2026-01-02T09:12:02.572Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b4/26/0bf714bc2e73d5267887d47931d53c4ceeceea6978148ed2ab2a4e6463c4/pillow-12.1.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f188d580bd870cda1e15183790d1cc2fa78f666e76077d103edf048eed9c356e", size = 8073070, upload-time = "2026-01-02T09:12:04.75Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/43/cf/1ea826200de111a9d65724c54f927f3111dc5ae297f294b370a670c17786/pillow-12.1.0-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0fde7ec5538ab5095cc02df38ee99b0443ff0e1c847a045554cf5f9af1f4aa82", size = 6380176, upload-time = "2026-01-02T09:12:06.626Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/03/e0/7938dd2b2013373fd85d96e0f38d62b7a5a262af21ac274250c7ca7847c9/pillow-12.1.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0ed07dca4a8464bada6139ab38f5382f83e5f111698caf3191cb8dbf27d908b4", size = 7067061, upload-time = "2026-01-02T09:12:08.624Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/86/ad/a2aa97d37272a929a98437a8c0ac37b3cf012f4f8721e1bd5154699b2518/pillow-12.1.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:f45bd71d1fa5e5749587613037b172e0b3b23159d1c00ef2fc920da6f470e6f0", size = 6491824, upload-time = "2026-01-02T09:12:10.488Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a4/44/80e46611b288d51b115826f136fb3465653c28f491068a72d3da49b54cd4/pillow-12.1.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:277518bf4fe74aa91489e1b20577473b19ee70fb97c374aa50830b279f25841b", size = 7190911, upload-time = "2026-01-02T09:12:12.772Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/86/77/eacc62356b4cf81abe99ff9dbc7402750044aed02cfd6a503f7c6fc11f3e/pillow-12.1.0-cp313-cp313t-win32.whl", hash = "sha256:7315f9137087c4e0ee73a761b163fc9aa3b19f5f606a7fc08d83fd3e4379af65", size = 6336445, upload-time = "2026-01-02T09:12:14.775Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e7/3c/57d81d0b74d218706dafccb87a87ea44262c43eef98eb3b164fd000e0491/pillow-12.1.0-cp313-cp313t-win_amd64.whl", hash = "sha256:0ddedfaa8b5f0b4ffbc2fa87b556dc59f6bb4ecb14a53b33f9189713ae8053c0", size = 7045354, upload-time = "2026-01-02T09:12:16.599Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ac/82/8b9b97bba2e3576a340f93b044a3a3a09841170ab4c1eb0d5c93469fd32f/pillow-12.1.0-cp313-cp313t-win_arm64.whl", hash = "sha256:80941e6d573197a0c28f394753de529bb436b1ca990ed6e765cf42426abc39f8", size = 2454547, upload-time = "2026-01-02T09:12:18.704Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8c/87/bdf971d8bbcf80a348cc3bacfcb239f5882100fe80534b0ce67a784181d8/pillow-12.1.0-cp314-cp314-ios_13_0_arm64_iphoneos.whl", hash = "sha256:5cb7bc1966d031aec37ddb9dcf15c2da5b2e9f7cc3ca7c54473a20a927e1eb91", size = 4062533, upload-time = "2026-01-02T09:12:20.791Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ff/4f/5eb37a681c68d605eb7034c004875c81f86ec9ef51f5be4a63eadd58859a/pillow-12.1.0-cp314-cp314-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:97e9993d5ed946aba26baf9c1e8cf18adbab584b99f452ee72f7ee8acb882796", size = 4138546, upload-time = "2026-01-02T09:12:23.664Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/11/6d/19a95acb2edbace40dcd582d077b991646b7083c41b98da4ed7555b59733/pillow-12.1.0-cp314-cp314-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:414b9a78e14ffeb98128863314e62c3f24b8a86081066625700b7985b3f529bd", size = 3601163, upload-time = "2026-01-02T09:12:26.338Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fc/36/2b8138e51cb42e4cc39c3297713455548be855a50558c3ac2beebdc251dd/pillow-12.1.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:e6bdb408f7c9dd2a5ff2b14a3b0bb6d4deb29fb9961e6eb3ae2031ae9a5cec13", size = 5266086, upload-time = "2026-01-02T09:12:28.782Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/53/4b/649056e4d22e1caa90816bf99cef0884aed607ed38075bd75f091a607a38/pillow-12.1.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:3413c2ae377550f5487991d444428f1a8ae92784aac79caa8b1e3b89b175f77e", size = 4657344, upload-time = "2026-01-02T09:12:31.117Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6c/6b/c5742cea0f1ade0cd61485dc3d81f05261fc2276f537fbdc00802de56779/pillow-12.1.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:e5dcbe95016e88437ecf33544ba5db21ef1b8dd6e1b434a2cb2a3d605299e643", size = 6232114, upload-time = "2026-01-02T09:12:32.936Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/bf/8f/9f521268ce22d63991601aafd3d48d5ff7280a246a1ef62d626d67b44064/pillow-12.1.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d0a7735df32ccbcc98b98a1ac785cc4b19b580be1bdf0aeb5c03223220ea09d5", size = 8042708, upload-time = "2026-01-02T09:12:34.78Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1a/eb/257f38542893f021502a1bbe0c2e883c90b5cff26cc33b1584a841a06d30/pillow-12.1.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0c27407a2d1b96774cbc4a7594129cc027339fd800cd081e44497722ea1179de", size = 6347762, upload-time = "2026-01-02T09:12:36.748Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c4/5a/8ba375025701c09b309e8d5163c5a4ce0102fa86bbf8800eb0d7ac87bc51/pillow-12.1.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:15c794d74303828eaa957ff8070846d0efe8c630901a1c753fdc63850e19ecd9", size = 7039265, upload-time = "2026-01-02T09:12:39.082Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/cf/dc/cf5e4cdb3db533f539e88a7bbf9f190c64ab8a08a9bc7a4ccf55067872e4/pillow-12.1.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:c990547452ee2800d8506c4150280757f88532f3de2a58e3022e9b179107862a", size = 6462341, upload-time = "2026-01-02T09:12:40.946Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d0/47/0291a25ac9550677e22eda48510cfc4fa4b2ef0396448b7fbdc0a6946309/pillow-12.1.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:b63e13dd27da389ed9475b3d28510f0f954bca0041e8e551b2a4eb1eab56a39a", size = 7165395, upload-time = "2026-01-02T09:12:42.706Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4f/4c/e005a59393ec4d9416be06e6b45820403bb946a778e39ecec62f5b2b991e/pillow-12.1.0-cp314-cp314-win32.whl", hash = "sha256:1a949604f73eb07a8adab38c4fe50791f9919344398bdc8ac6b307f755fc7030", size = 6431413, upload-time = "2026-01-02T09:12:44.944Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1c/af/f23697f587ac5f9095d67e31b81c95c0249cd461a9798a061ed6709b09b5/pillow-12.1.0-cp314-cp314-win_amd64.whl", hash = "sha256:4f9f6a650743f0ddee5593ac9e954ba1bdbc5e150bc066586d4f26127853ab94", size = 7176779, upload-time = "2026-01-02T09:12:46.727Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b3/36/6a51abf8599232f3e9afbd16d52829376a68909fe14efe29084445db4b73/pillow-12.1.0-cp314-cp314-win_arm64.whl", hash = "sha256:808b99604f7873c800c4840f55ff389936ef1948e4e87645eaf3fccbc8477ac4", size = 2543105, upload-time = "2026-01-02T09:12:49.243Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/82/54/2e1dd20c8749ff225080d6ba465a0cab4387f5db0d1c5fb1439e2d99923f/pillow-12.1.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:bc11908616c8a283cf7d664f77411a5ed2a02009b0097ff8abbba5e79128ccf2", size = 5268571, upload-time = "2026-01-02T09:12:51.11Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/57/61/571163a5ef86ec0cf30d265ac2a70ae6fc9e28413d1dc94fa37fae6bda89/pillow-12.1.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:896866d2d436563fa2a43a9d72f417874f16b5545955c54a64941e87c1376c61", size = 4660426, upload-time = "2026-01-02T09:12:52.865Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5e/e1/53ee5163f794aef1bf84243f755ee6897a92c708505350dd1923f4afec48/pillow-12.1.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8e178e3e99d3c0ea8fc64b88447f7cac8ccf058af422a6cedc690d0eadd98c51", size = 6269908, upload-time = "2026-01-02T09:12:54.884Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/bc/0b/b4b4106ff0ee1afa1dc599fde6ab230417f800279745124f6c50bcffed8e/pillow-12.1.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:079af2fb0c599c2ec144ba2c02766d1b55498e373b3ac64687e43849fbbef5bc", size = 8074733, upload-time = "2026-01-02T09:12:56.802Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/19/9f/80b411cbac4a732439e629a26ad3ef11907a8c7fc5377b7602f04f6fe4e7/pillow-12.1.0-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bdec5e43377761c5dbca620efb69a77f6855c5a379e32ac5b158f54c84212b14", size = 6381431, upload-time = "2026-01-02T09:12:58.823Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8f/b7/d65c45db463b66ecb6abc17c6ba6917a911202a07662247e1355ce1789e7/pillow-12.1.0-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:565c986f4b45c020f5421a4cea13ef294dde9509a8577f29b2fc5edc7587fff8", size = 7068529, upload-time = "2026-01-02T09:13:00.885Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/50/96/dfd4cd726b4a45ae6e3c669fc9e49deb2241312605d33aba50499e9d9bd1/pillow-12.1.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:43aca0a55ce1eefc0aefa6253661cb54571857b1a7b2964bd8a1e3ef4b729924", size = 6492981, upload-time = "2026-01-02T09:13:03.314Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4d/1c/b5dc52cf713ae46033359c5ca920444f18a6359ce1020dd3e9c553ea5bc6/pillow-12.1.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:0deedf2ea233722476b3a81e8cdfbad786f7adbed5d848469fa59fe52396e4ef", size = 7191878, upload-time = "2026-01-02T09:13:05.276Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/53/26/c4188248bd5edaf543864fe4834aebe9c9cb4968b6f573ce014cc42d0720/pillow-12.1.0-cp314-cp314t-win32.whl", hash = "sha256:b17fbdbe01c196e7e159aacb889e091f28e61020a8abeac07b68079b6e626988", size = 6438703, upload-time = "2026-01-02T09:13:07.491Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b8/0e/69ed296de8ea05cb03ee139cee600f424ca166e632567b2d66727f08c7ed/pillow-12.1.0-cp314-cp314t-win_amd64.whl", hash = "sha256:27b9baecb428899db6c0de572d6d305cfaf38ca1596b5c0542a5182e3e74e8c6", size = 7182927, upload-time = "2026-01-02T09:13:09.841Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fc/f5/68334c015eed9b5cff77814258717dec591ded209ab5b6fb70e2ae873d1d/pillow-12.1.0-cp314-cp314t-win_arm64.whl", hash = "sha256:f61333d817698bdcdd0f9d7793e365ac3d2a21c1f1eb02b32ad6aefb8d8ea831", size = 2545104, upload-time = "2026-01-02T09:13:12.068Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8b/bc/224b1d98cffd7164b14707c91aac83c07b047fbd8f58eba4066a3e53746a/pillow-12.1.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:ca94b6aac0d7af2a10ba08c0f888b3d5114439b6b3ef39968378723622fed377", size = 5228605, upload-time = "2026-01-02T09:13:14.084Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0c/ca/49ca7769c4550107de049ed85208240ba0f330b3f2e316f24534795702ce/pillow-12.1.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:351889afef0f485b84078ea40fe33727a0492b9af3904661b0abbafee0355b72", size = 4622245, upload-time = "2026-01-02T09:13:15.964Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/73/48/fac807ce82e5955bcc2718642b94b1bd22a82a6d452aea31cbb678cddf12/pillow-12.1.0-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:bb0984b30e973f7e2884362b7d23d0a348c7143ee559f38ef3eaab640144204c", size = 5247593, upload-time = "2026-01-02T09:13:17.913Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d2/95/3e0742fe358c4664aed4fd05d5f5373dcdad0b27af52aa0972568541e3f4/pillow-12.1.0-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:84cabc7095dd535ca934d57e9ce2a72ffd216e435a84acb06b2277b1de2689bd", size = 6989008, upload-time = "2026-01-02T09:13:20.083Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5a/74/fe2ac378e4e202e56d50540d92e1ef4ff34ed687f3c60f6a121bcf99437e/pillow-12.1.0-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:53d8b764726d3af1a138dd353116f774e3862ec7e3794e0c8781e30db0f35dfc", size = 5313824, upload-time = "2026-01-02T09:13:22.405Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f3/77/2a60dee1adee4e2655ac328dd05c02a955c1cd683b9f1b82ec3feb44727c/pillow-12.1.0-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5da841d81b1a05ef940a8567da92decaa15bc4d7dedb540a8c219ad83d91808a", size = 5963278, upload-time = "2026-01-02T09:13:24.706Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2d/71/64e9b1c7f04ae0027f788a248e6297d7fcc29571371fe7d45495a78172c0/pillow-12.1.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:75af0b4c229ac519b155028fa1be632d812a519abba9b46b20e50c6caa184f19", size = 7029809, upload-time = "2026-01-02T09:13:26.541Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1d/30/5bd3d794762481f8c8ae9c80e7b76ecea73b916959eb587521358ef0b2f9/pillow-12.1.1-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1f1625b72740fdda5d77b4def688eb8fd6490975d06b909fd19f13f391e077e0", size = 5304099, upload-time = "2026-02-11T04:20:06.13Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/bd/c1/aab9e8f3eeb4490180e357955e15c2ef74b31f64790ff356c06fb6cf6d84/pillow-12.1.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:178aa072084bd88ec759052feca8e56cbb14a60b39322b99a049e58090479713", size = 4657880, upload-time = "2026-02-11T04:20:09.291Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f1/0a/9879e30d56815ad529d3985aeff5af4964202425c27261a6ada10f7cbf53/pillow-12.1.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:b66e95d05ba806247aaa1561f080abc7975daf715c30780ff92a20e4ec546e1b", size = 6222587, upload-time = "2026-02-11T04:20:10.82Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5a/5f/a1b72ff7139e4f89014e8d451442c74a774d5c43cd938fb0a9f878576b37/pillow-12.1.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:89c7e895002bbe49cdc5426150377cbbc04767d7547ed145473f496dfa40408b", size = 8027678, upload-time = "2026-02-11T04:20:12.455Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e2/c2/c7cb187dac79a3d22c3ebeae727abee01e077c8c7d930791dc592f335153/pillow-12.1.1-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3a5cbdcddad0af3da87cb16b60d23648bc3b51967eb07223e9fed77a82b457c4", size = 6335777, upload-time = "2026-02-11T04:20:14.441Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0c/7b/f9b09a7804ec7336effb96c26d37c29d27225783dc1501b7d62dcef6ae25/pillow-12.1.1-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9f51079765661884a486727f0729d29054242f74b46186026582b4e4769918e4", size = 7027140, upload-time = "2026-02-11T04:20:16.387Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/98/b2/2fa3c391550bd421b10849d1a2144c44abcd966daadd2f7c12e19ea988c4/pillow-12.1.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:99c1506ea77c11531d75e3a412832a13a71c7ebc8192ab9e4b2e355555920e3e", size = 6449855, upload-time = "2026-02-11T04:20:18.554Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/96/ff/9caf4b5b950c669263c39e96c78c0d74a342c71c4f43fd031bb5cb7ceac9/pillow-12.1.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:36341d06738a9f66c8287cf8b876d24b18db9bd8740fa0672c74e259ad408cff", size = 7151329, upload-time = "2026-02-11T04:20:20.646Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/7b/f8/4b24841f582704da675ca535935bccb32b00a6da1226820845fac4a71136/pillow-12.1.1-cp310-cp310-win32.whl", hash = "sha256:6c52f062424c523d6c4db85518774cc3d50f5539dd6eed32b8f6229b26f24d40", size = 6325574, upload-time = "2026-02-11T04:20:22.43Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f8/f9/9f6b01c0881d7036063aa6612ef04c0e2cad96be21325a1e92d0203f8e91/pillow-12.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:c6008de247150668a705a6338156efb92334113421ceecf7438a12c9a12dab23", size = 7032347, upload-time = "2026-02-11T04:20:23.932Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/79/13/c7922edded3dcdaf10c59297540b72785620abc0538872c819915746757d/pillow-12.1.1-cp310-cp310-win_arm64.whl", hash = "sha256:1a9b0ee305220b392e1124a764ee4265bd063e54a751a6b62eff69992f457fa9", size = 2453457, upload-time = "2026-02-11T04:20:25.392Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2b/46/5da1ec4a5171ee7bf1a0efa064aba70ba3d6e0788ce3f5acd1375d23c8c0/pillow-12.1.1-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:e879bb6cd5c73848ef3b2b48b8af9ff08c5b71ecda8048b7dd22d8a33f60be32", size = 5304084, upload-time = "2026-02-11T04:20:27.501Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/78/93/a29e9bc02d1cf557a834da780ceccd54e02421627200696fcf805ebdc3fb/pillow-12.1.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:365b10bb9417dd4498c0e3b128018c4a624dc11c7b97d8cc54effe3b096f4c38", size = 4657866, upload-time = "2026-02-11T04:20:29.827Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/13/84/583a4558d492a179d31e4aae32eadce94b9acf49c0337c4ce0b70e0a01f2/pillow-12.1.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d4ce8e329c93845720cd2014659ca67eac35f6433fd3050393d85f3ecef0dad5", size = 6232148, upload-time = "2026-02-11T04:20:31.329Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d5/e2/53c43334bbbb2d3b938978532fbda8e62bb6e0b23a26ce8592f36bcc4987/pillow-12.1.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fc354a04072b765eccf2204f588a7a532c9511e8b9c7f900e1b64e3e33487090", size = 8038007, upload-time = "2026-02-11T04:20:34.225Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b8/a6/3d0e79c8a9d58150dd98e199d7c1c56861027f3829a3a60b3c2784190180/pillow-12.1.1-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7e7976bf1910a8116b523b9f9f58bf410f3e8aa330cd9a2bb2953f9266ab49af", size = 6345418, upload-time = "2026-02-11T04:20:35.858Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a2/c8/46dfeac5825e600579157eea177be43e2f7ff4a99da9d0d0a49533509ac5/pillow-12.1.1-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:597bd9c8419bc7c6af5604e55847789b69123bbe25d65cc6ad3012b4f3c98d8b", size = 7034590, upload-time = "2026-02-11T04:20:37.91Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/af/bf/e6f65d3db8a8bbfeaf9e13cc0417813f6319863a73de934f14b2229ada18/pillow-12.1.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:2c1fc0f2ca5f96a3c8407e41cca26a16e46b21060fe6d5b099d2cb01412222f5", size = 6458655, upload-time = "2026-02-11T04:20:39.496Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f9/c2/66091f3f34a25894ca129362e510b956ef26f8fb67a0e6417bc5744e56f1/pillow-12.1.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:578510d88c6229d735855e1f278aa305270438d36a05031dfaae5067cc8eb04d", size = 7159286, upload-time = "2026-02-11T04:20:41.139Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/7b/5a/24bc8eb526a22f957d0cec6243146744966d40857e3d8deb68f7902ca6c1/pillow-12.1.1-cp311-cp311-win32.whl", hash = "sha256:7311c0a0dcadb89b36b7025dfd8326ecfa36964e29913074d47382706e516a7c", size = 6328663, upload-time = "2026-02-11T04:20:43.184Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/31/03/bef822e4f2d8f9d7448c133d0a18185d3cce3e70472774fffefe8b0ed562/pillow-12.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:fbfa2a7c10cc2623f412753cddf391c7f971c52ca40a3f65dc5039b2939e8563", size = 7031448, upload-time = "2026-02-11T04:20:44.696Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/49/70/f76296f53610bd17b2e7d31728b8b7825e3ac3b5b3688b51f52eab7c0818/pillow-12.1.1-cp311-cp311-win_arm64.whl", hash = "sha256:b81b5e3511211631b3f672a595e3221252c90af017e399056d0faabb9538aa80", size = 2453651, upload-time = "2026-02-11T04:20:46.243Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/07/d3/8df65da0d4df36b094351dce696f2989bec731d4f10e743b1c5f4da4d3bf/pillow-12.1.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:ab323b787d6e18b3d91a72fc99b1a2c28651e4358749842b8f8dfacd28ef2052", size = 5262803, upload-time = "2026-02-11T04:20:47.653Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d6/71/5026395b290ff404b836e636f51d7297e6c83beceaa87c592718747e670f/pillow-12.1.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:adebb5bee0f0af4909c30db0d890c773d1a92ffe83da908e2e9e720f8edf3984", size = 4657601, upload-time = "2026-02-11T04:20:49.328Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b1/2e/1001613d941c67442f745aff0f7cc66dd8df9a9c084eb497e6a543ee6f7e/pillow-12.1.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:bb66b7cc26f50977108790e2456b7921e773f23db5630261102233eb355a3b79", size = 6234995, upload-time = "2026-02-11T04:20:51.032Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/07/26/246ab11455b2549b9233dbd44d358d033a2f780fa9007b61a913c5b2d24e/pillow-12.1.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:aee2810642b2898bb187ced9b349e95d2a7272930796e022efaf12e99dccd293", size = 8045012, upload-time = "2026-02-11T04:20:52.882Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b2/8b/07587069c27be7535ac1fe33874e32de118fbd34e2a73b7f83436a88368c/pillow-12.1.1-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a0b1cd6232e2b618adcc54d9882e4e662a089d5768cd188f7c245b4c8c44a397", size = 6349638, upload-time = "2026-02-11T04:20:54.444Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ff/79/6df7b2ee763d619cda2fb4fea498e5f79d984dae304d45a8999b80d6cf5c/pillow-12.1.1-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7aac39bcf8d4770d089588a2e1dd111cbaa42df5a94be3114222057d68336bd0", size = 7041540, upload-time = "2026-02-11T04:20:55.97Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2c/5e/2ba19e7e7236d7529f4d873bdaf317a318896bac289abebd4bb00ef247f0/pillow-12.1.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ab174cd7d29a62dd139c44bf74b698039328f45cb03b4596c43473a46656b2f3", size = 6462613, upload-time = "2026-02-11T04:20:57.542Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/03/03/31216ec124bb5c3dacd74ce8efff4cc7f52643653bad4825f8f08c697743/pillow-12.1.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:339ffdcb7cbeaa08221cd401d517d4b1fe7a9ed5d400e4a8039719238620ca35", size = 7166745, upload-time = "2026-02-11T04:20:59.196Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1f/e7/7c4552d80052337eb28653b617eafdef39adfb137c49dd7e831b8dc13bc5/pillow-12.1.1-cp312-cp312-win32.whl", hash = "sha256:5d1f9575a12bed9e9eedd9a4972834b08c97a352bd17955ccdebfeca5913fa0a", size = 6328823, upload-time = "2026-02-11T04:21:01.385Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3d/17/688626d192d7261bbbf98846fc98995726bddc2c945344b65bec3a29d731/pillow-12.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:21329ec8c96c6e979cd0dfd29406c40c1d52521a90544463057d2aaa937d66a6", size = 7033367, upload-time = "2026-02-11T04:21:03.536Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ed/fe/a0ef1f73f939b0eca03ee2c108d0043a87468664770612602c63266a43c4/pillow-12.1.1-cp312-cp312-win_arm64.whl", hash = "sha256:af9a332e572978f0218686636610555ae3defd1633597be015ed50289a03c523", size = 2453811, upload-time = "2026-02-11T04:21:05.116Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d5/11/6db24d4bd7685583caeae54b7009584e38da3c3d4488ed4cd25b439de486/pillow-12.1.1-cp313-cp313-ios_13_0_arm64_iphoneos.whl", hash = "sha256:d242e8ac078781f1de88bf823d70c1a9b3c7950a44cdf4b7c012e22ccbcd8e4e", size = 4062689, upload-time = "2026-02-11T04:21:06.804Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/33/c0/ce6d3b1fe190f0021203e0d9b5b99e57843e345f15f9ef22fcd43842fd21/pillow-12.1.1-cp313-cp313-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:02f84dfad02693676692746df05b89cf25597560db2857363a208e393429f5e9", size = 4138535, upload-time = "2026-02-11T04:21:08.452Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a0/c6/d5eb6a4fb32a3f9c21a8c7613ec706534ea1cf9f4b3663e99f0d83f6fca8/pillow-12.1.1-cp313-cp313-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:e65498daf4b583091ccbb2556c7000abf0f3349fcd57ef7adc9a84a394ed29f6", size = 3601364, upload-time = "2026-02-11T04:21:10.194Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/14/a1/16c4b823838ba4c9c52c0e6bbda903a3fe5a1bdbf1b8eb4fff7156f3e318/pillow-12.1.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:6c6db3b84c87d48d0088943bf33440e0c42370b99b1c2a7989216f7b42eede60", size = 5262561, upload-time = "2026-02-11T04:21:11.742Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/bb/ad/ad9dc98ff24f485008aa5cdedaf1a219876f6f6c42a4626c08bc4e80b120/pillow-12.1.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:8b7e5304e34942bf62e15184219a7b5ad4ff7f3bb5cca4d984f37df1a0e1aee2", size = 4657460, upload-time = "2026-02-11T04:21:13.786Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9e/1b/f1a4ea9a895b5732152789326202a82464d5254759fbacae4deea3069334/pillow-12.1.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:18e5bddd742a44b7e6b1e773ab5db102bd7a94c32555ba656e76d319d19c3850", size = 6232698, upload-time = "2026-02-11T04:21:15.949Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/95/f4/86f51b8745070daf21fd2e5b1fe0eb35d4db9ca26e6d58366562fb56a743/pillow-12.1.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fc44ef1f3de4f45b50ccf9136999d71abb99dca7706bc75d222ed350b9fd2289", size = 8041706, upload-time = "2026-02-11T04:21:17.723Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/29/9b/d6ecd956bb1266dd1045e995cce9b8d77759e740953a1c9aad9502a0461e/pillow-12.1.1-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5a8eb7ed8d4198bccbd07058416eeec51686b498e784eda166395a23eb99138e", size = 6346621, upload-time = "2026-02-11T04:21:19.547Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/71/24/538bff45bde96535d7d998c6fed1a751c75ac7c53c37c90dc2601b243893/pillow-12.1.1-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:47b94983da0c642de92ced1702c5b6c292a84bd3a8e1d1702ff923f183594717", size = 7038069, upload-time = "2026-02-11T04:21:21.378Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/94/0e/58cb1a6bc48f746bc4cb3adb8cabff73e2742c92b3bf7a220b7cf69b9177/pillow-12.1.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:518a48c2aab7ce596d3bf79d0e275661b846e86e4d0e7dec34712c30fe07f02a", size = 6460040, upload-time = "2026-02-11T04:21:23.148Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6c/57/9045cb3ff11eeb6c1adce3b2d60d7d299d7b273a2e6c8381a524abfdc474/pillow-12.1.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a550ae29b95c6dc13cf69e2c9dc5747f814c54eeb2e32d683e5e93af56caa029", size = 7164523, upload-time = "2026-02-11T04:21:25.01Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/73/f2/9be9cb99f2175f0d4dbadd6616ce1bf068ee54a28277ea1bf1fbf729c250/pillow-12.1.1-cp313-cp313-win32.whl", hash = "sha256:a003d7422449f6d1e3a34e3dd4110c22148336918ddbfc6a32581cd54b2e0b2b", size = 6332552, upload-time = "2026-02-11T04:21:27.238Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3f/eb/b0834ad8b583d7d9d42b80becff092082a1c3c156bb582590fcc973f1c7c/pillow-12.1.1-cp313-cp313-win_amd64.whl", hash = "sha256:344cf1e3dab3be4b1fa08e449323d98a2a3f819ad20f4b22e77a0ede31f0faa1", size = 7040108, upload-time = "2026-02-11T04:21:29.462Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d5/7d/fc09634e2aabdd0feabaff4a32f4a7d97789223e7c2042fd805ea4b4d2c2/pillow-12.1.1-cp313-cp313-win_arm64.whl", hash = "sha256:5c0dd1636633e7e6a0afe7bf6a51a14992b7f8e60de5789018ebbdfae55b040a", size = 2453712, upload-time = "2026-02-11T04:21:31.072Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/19/2a/b9d62794fc8a0dd14c1943df68347badbd5511103e0d04c035ffe5cf2255/pillow-12.1.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0330d233c1a0ead844fc097a7d16c0abff4c12e856c0b325f231820fee1f39da", size = 5264880, upload-time = "2026-02-11T04:21:32.865Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/26/9d/e03d857d1347fa5ed9247e123fcd2a97b6220e15e9cb73ca0a8d91702c6e/pillow-12.1.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5dae5f21afb91322f2ff791895ddd8889e5e947ff59f71b46041c8ce6db790bc", size = 4660616, upload-time = "2026-02-11T04:21:34.97Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f7/ec/8a6d22afd02570d30954e043f09c32772bfe143ba9285e2fdb11284952cd/pillow-12.1.1-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2e0c664be47252947d870ac0d327fea7e63985a08794758aa8af5b6cb6ec0c9c", size = 6269008, upload-time = "2026-02-11T04:21:36.623Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3d/1d/6d875422c9f28a4a361f495a5f68d9de4a66941dc2c619103ca335fa6446/pillow-12.1.1-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:691ab2ac363b8217f7d31b3497108fb1f50faab2f75dfb03284ec2f217e87bf8", size = 8073226, upload-time = "2026-02-11T04:21:38.585Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a1/cd/134b0b6ee5eda6dc09e25e24b40fdafe11a520bc725c1d0bbaa5e00bf95b/pillow-12.1.1-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e9e8064fb1cc019296958595f6db671fba95209e3ceb0c4734c9baf97de04b20", size = 6380136, upload-time = "2026-02-11T04:21:40.562Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/7a/a9/7628f013f18f001c1b98d8fffe3452f306a70dc6aba7d931019e0492f45e/pillow-12.1.1-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:472a8d7ded663e6162dafdf20015c486a7009483ca671cece7a9279b512fcb13", size = 7067129, upload-time = "2026-02-11T04:21:42.521Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1e/f8/66ab30a2193b277785601e82ee2d49f68ea575d9637e5e234faaa98efa4c/pillow-12.1.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:89b54027a766529136a06cfebeecb3a04900397a3590fd252160b888479517bf", size = 6491807, upload-time = "2026-02-11T04:21:44.22Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/da/0b/a877a6627dc8318fdb84e357c5e1a758c0941ab1ddffdafd231983788579/pillow-12.1.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:86172b0831b82ce4f7877f280055892b31179e1576aa00d0df3bb1bbf8c3e524", size = 7190954, upload-time = "2026-02-11T04:21:46.114Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/83/43/6f732ff85743cf746b1361b91665d9f5155e1483817f693f8d57ea93147f/pillow-12.1.1-cp313-cp313t-win32.whl", hash = "sha256:44ce27545b6efcf0fdbdceb31c9a5bdea9333e664cda58a7e674bb74608b3986", size = 6336441, upload-time = "2026-02-11T04:21:48.22Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3b/44/e865ef3986611bb75bfabdf94a590016ea327833f434558801122979cd0e/pillow-12.1.1-cp313-cp313t-win_amd64.whl", hash = "sha256:a285e3eb7a5a45a2ff504e31f4a8d1b12ef62e84e5411c6804a42197c1cf586c", size = 7045383, upload-time = "2026-02-11T04:21:50.015Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a8/c6/f4fb24268d0c6908b9f04143697ea18b0379490cb74ba9e8d41b898bd005/pillow-12.1.1-cp313-cp313t-win_arm64.whl", hash = "sha256:cc7d296b5ea4d29e6570dabeaed58d31c3fea35a633a69679fb03d7664f43fb3", size = 2456104, upload-time = "2026-02-11T04:21:51.633Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/03/d0/bebb3ffbf31c5a8e97241476c4cf8b9828954693ce6744b4a2326af3e16b/pillow-12.1.1-cp314-cp314-ios_13_0_arm64_iphoneos.whl", hash = "sha256:417423db963cb4be8bac3fc1204fe61610f6abeed1580a7a2cbb2fbda20f12af", size = 4062652, upload-time = "2026-02-11T04:21:53.19Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2d/c0/0e16fb0addda4851445c28f8350d8c512f09de27bbb0d6d0bbf8b6709605/pillow-12.1.1-cp314-cp314-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:b957b71c6b2387610f556a7eb0828afbe40b4a98036fc0d2acfa5a44a0c2036f", size = 4138823, upload-time = "2026-02-11T04:22:03.088Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6b/fb/6170ec655d6f6bb6630a013dd7cf7bc218423d7b5fa9071bf63dc32175ae/pillow-12.1.1-cp314-cp314-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:097690ba1f2efdeb165a20469d59d8bb03c55fb6621eb2041a060ae8ea3e9642", size = 3601143, upload-time = "2026-02-11T04:22:04.909Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/59/04/dc5c3f297510ba9a6837cbb318b87dd2b8f73eb41a43cc63767f65cb599c/pillow-12.1.1-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:2815a87ab27848db0321fb78c7f0b2c8649dee134b7f2b80c6a45c6831d75ccd", size = 5266254, upload-time = "2026-02-11T04:22:07.656Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/05/30/5db1236b0d6313f03ebf97f5e17cda9ca060f524b2fcc875149a8360b21c/pillow-12.1.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:f7ed2c6543bad5a7d5530eb9e78c53132f93dfa44a28492db88b41cdab885202", size = 4657499, upload-time = "2026-02-11T04:22:09.613Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6f/18/008d2ca0eb612e81968e8be0bbae5051efba24d52debf930126d7eaacbba/pillow-12.1.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:652a2c9ccfb556235b2b501a3a7cf3742148cd22e04b5625c5fe057ea3e3191f", size = 6232137, upload-time = "2026-02-11T04:22:11.434Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/70/f1/f14d5b8eeb4b2cd62b9f9f847eb6605f103df89ef619ac68f92f748614ea/pillow-12.1.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d6e4571eedf43af33d0fc233a382a76e849badbccdf1ac438841308652a08e1f", size = 8042721, upload-time = "2026-02-11T04:22:13.321Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5a/d6/17824509146e4babbdabf04d8171491fa9d776f7061ff6e727522df9bd03/pillow-12.1.1-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b574c51cf7d5d62e9be37ba446224b59a2da26dc4c1bb2ecbe936a4fb1a7cb7f", size = 6347798, upload-time = "2026-02-11T04:22:15.449Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d1/ee/c85a38a9ab92037a75615aba572c85ea51e605265036e00c5b67dfafbfe2/pillow-12.1.1-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a37691702ed687799de29a518d63d4682d9016932db66d4e90c345831b02fb4e", size = 7039315, upload-time = "2026-02-11T04:22:17.24Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ec/f3/bc8ccc6e08a148290d7523bde4d9a0d6c981db34631390dc6e6ec34cacf6/pillow-12.1.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:f95c00d5d6700b2b890479664a06e754974848afaae5e21beb4d83c106923fd0", size = 6462360, upload-time = "2026-02-11T04:22:19.111Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f6/ab/69a42656adb1d0665ab051eec58a41f169ad295cf81ad45406963105408f/pillow-12.1.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:559b38da23606e68681337ad74622c4dbba02254fc9cb4488a305dd5975c7eeb", size = 7165438, upload-time = "2026-02-11T04:22:21.041Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/02/46/81f7aa8941873f0f01d4b55cc543b0a3d03ec2ee30d617a0448bf6bd6dec/pillow-12.1.1-cp314-cp314-win32.whl", hash = "sha256:03edcc34d688572014ff223c125a3f77fb08091e4607e7745002fc214070b35f", size = 6431503, upload-time = "2026-02-11T04:22:22.833Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/40/72/4c245f7d1044b67affc7f134a09ea619d4895333d35322b775b928180044/pillow-12.1.1-cp314-cp314-win_amd64.whl", hash = "sha256:50480dcd74fa63b8e78235957d302d98d98d82ccbfac4c7e12108ba9ecbdba15", size = 7176748, upload-time = "2026-02-11T04:22:24.64Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e4/ad/8a87bdbe038c5c698736e3348af5c2194ffb872ea52f11894c95f9305435/pillow-12.1.1-cp314-cp314-win_arm64.whl", hash = "sha256:5cb1785d97b0c3d1d1a16bc1d710c4a0049daefc4935f3a8f31f827f4d3d2e7f", size = 2544314, upload-time = "2026-02-11T04:22:26.685Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6c/9d/efd18493f9de13b87ede7c47e69184b9e859e4427225ea962e32e56a49bc/pillow-12.1.1-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:1f90cff8aa76835cba5769f0b3121a22bd4eb9e6884cfe338216e557a9a548b8", size = 5268612, upload-time = "2026-02-11T04:22:29.884Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f8/f1/4f42eb2b388eb2ffc660dcb7f7b556c1015c53ebd5f7f754965ef997585b/pillow-12.1.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:1f1be78ce9466a7ee64bfda57bdba0f7cc499d9794d518b854816c41bf0aa4e9", size = 4660567, upload-time = "2026-02-11T04:22:31.799Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/01/54/df6ef130fa43e4b82e32624a7b821a2be1c5653a5fdad8469687a7db4e00/pillow-12.1.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:42fc1f4677106188ad9a55562bbade416f8b55456f522430fadab3cef7cd4e60", size = 6269951, upload-time = "2026-02-11T04:22:33.921Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a9/48/618752d06cc44bb4aae8ce0cd4e6426871929ed7b46215638088270d9b34/pillow-12.1.1-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:98edb152429ab62a1818039744d8fbb3ccab98a7c29fc3d5fcef158f3f1f68b7", size = 8074769, upload-time = "2026-02-11T04:22:35.877Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c3/bd/f1d71eb39a72fa088d938655afba3e00b38018d052752f435838961127d8/pillow-12.1.1-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d470ab1178551dd17fdba0fef463359c41aaa613cdcd7ff8373f54be629f9f8f", size = 6381358, upload-time = "2026-02-11T04:22:37.698Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/64/ef/c784e20b96674ed36a5af839305f55616f8b4f8aa8eeccf8531a6e312243/pillow-12.1.1-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6408a7b064595afcab0a49393a413732a35788f2a5092fdc6266952ed67de586", size = 7068558, upload-time = "2026-02-11T04:22:39.597Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/73/cb/8059688b74422ae61278202c4e1ad992e8a2e7375227be0a21c6b87ca8d5/pillow-12.1.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5d8c41325b382c07799a3682c1c258469ea2ff97103c53717b7893862d0c98ce", size = 6493028, upload-time = "2026-02-11T04:22:42.73Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c6/da/e3c008ed7d2dd1f905b15949325934510b9d1931e5df999bb15972756818/pillow-12.1.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:c7697918b5be27424e9ce568193efd13d925c4481dd364e43f5dff72d33e10f8", size = 7191940, upload-time = "2026-02-11T04:22:44.543Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/01/4a/9202e8d11714c1fc5951f2e1ef362f2d7fbc595e1f6717971d5dd750e969/pillow-12.1.1-cp314-cp314t-win32.whl", hash = "sha256:d2912fd8114fc5545aa3a4b5576512f64c55a03f3ebcca4c10194d593d43ea36", size = 6438736, upload-time = "2026-02-11T04:22:46.347Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f3/ca/cbce2327eb9885476b3957b2e82eb12c866a8b16ad77392864ad601022ce/pillow-12.1.1-cp314-cp314t-win_amd64.whl", hash = "sha256:4ceb838d4bd9dab43e06c363cab2eebf63846d6a4aeaea283bbdfd8f1a8ed58b", size = 7182894, upload-time = "2026-02-11T04:22:48.114Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ec/d2/de599c95ba0a973b94410477f8bf0b6f0b5e67360eb89bcb1ad365258beb/pillow-12.1.1-cp314-cp314t-win_arm64.whl", hash = "sha256:7b03048319bfc6170e93bd60728a1af51d3dd7704935feb228c4d4faab35d334", size = 2546446, upload-time = "2026-02-11T04:22:50.342Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/56/11/5d43209aa4cb58e0cc80127956ff1796a68b928e6324bbf06ef4db34367b/pillow-12.1.1-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:600fd103672b925fe62ed08e0d874ea34d692474df6f4bf7ebe148b30f89f39f", size = 5228606, upload-time = "2026-02-11T04:22:52.106Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5f/d5/3b005b4e4fda6698b371fa6c21b097d4707585d7db99e98d9b0b87ac612a/pillow-12.1.1-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:665e1b916b043cef294bc54d47bf02d87e13f769bc4bc5fa225a24b3a6c5aca9", size = 4622321, upload-time = "2026-02-11T04:22:53.827Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/df/36/ed3ea2d594356fd8037e5a01f6156c74bc8d92dbb0fa60746cc96cabb6e8/pillow-12.1.1-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:495c302af3aad1ca67420ddd5c7bd480c8867ad173528767d906428057a11f0e", size = 5247579, upload-time = "2026-02-11T04:22:56.094Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/54/9a/9cc3e029683cf6d20ae5085da0dafc63148e3252c2f13328e553aaa13cfb/pillow-12.1.1-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8fd420ef0c52c88b5a035a0886f367748c72147b2b8f384c9d12656678dfdfa9", size = 6989094, upload-time = "2026-02-11T04:22:58.288Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/00/98/fc53ab36da80b88df0967896b6c4b4cd948a0dc5aa40a754266aa3ae48b3/pillow-12.1.1-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f975aa7ef9684ce7e2c18a3aa8f8e2106ce1e46b94ab713d156b2898811651d3", size = 5313850, upload-time = "2026-02-11T04:23:00.554Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/30/02/00fa585abfd9fe9d73e5f6e554dc36cc2b842898cbfc46d70353dae227f8/pillow-12.1.1-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8089c852a56c2966cf18835db62d9b34fef7ba74c726ad943928d494fa7f4735", size = 5963343, upload-time = "2026-02-11T04:23:02.934Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f2/26/c56ce33ca856e358d27fda9676c055395abddb82c35ac0f593877ed4562e/pillow-12.1.1-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:cb9bb857b2d057c6dfc72ac5f3b44836924ba15721882ef103cecb40d002d80e", size = 7029880, upload-time = "2026-02-11T04:23:04.783Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3767,7 +3770,7 @@ crypto = [
|
||||
|
||||
[[package]]
|
||||
name = "pylance"
|
||||
version = "2.0.0"
|
||||
version = "4.0.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "lance-namespace" },
|
||||
@@ -3776,12 +3779,12 @@ dependencies = [
|
||||
{ name = "pyarrow" },
|
||||
]
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/89/1e/7cba63f641e25243521a73c85d9f198c970546904bd32d86a74d8a5503b4/pylance-2.0.0-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:ecfc291cace1aae2faeac9b329ee9b42674e6cad505fafcfe223b7fcbbc15a34", size = 51673048, upload-time = "2026-02-05T19:53:58.676Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3d/b7/0674bea6e33a3edf466afa6d28271c495996a6f287f4426dd20d3cc08fcc/pylance-2.0.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c0397d7b9e7da2bbcc15c13edc52698a988f10e30ddb7577bebe82ec5deb82eb", size = 54124374, upload-time = "2026-02-05T20:01:43.278Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f5/16/43ddd4dab5ae785eb6b6fea10c747ef757edebd702d8cdd2f7c451c82810/pylance-2.0.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:25be16d2797d7b684365f44e2ccdc85da210a1763cf7abb9382fbb1b132a605f", size = 57604350, upload-time = "2026-02-05T20:10:03.402Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ab/91/94bd6e88cc59e9a3642479a448c636307cbe3919cfbb03a2894fe40004d7/pylance-2.0.0-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:63fcedecb88ff0ab18d538b32ed5d285a814f2bab0776a75ef9f3bd42d5b6d7d", size = 54139864, upload-time = "2026-02-05T20:02:07.957Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b1/ac/4cf5c2529cf7f10d1ed1195745c75e0817a09862297ad705ab539abab830/pylance-2.0.0-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:a3792af7bb4e77aa80436d7553b8567a3ac63be9199a0ece786a9ef2438f7930", size = 57575193, upload-time = "2026-02-05T20:10:27.163Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/45/a3/05fd03f25c417e55f5f141e08585da8a5e5d0b17c71882b446388f203584/pylance-2.0.0-cp39-abi3-win_amd64.whl", hash = "sha256:f08d9f87c6d6ac2d2dea6898a4364faef57d3c6a802f8faf3b62fe756fb6834b", size = 61682039, upload-time = "2026-02-05T20:30:48.272Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/19/29/5152da1261a628c293876917b6185538bd68f4cf1420da6265b5be79d09b/pylance-4.0.0-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:7310892f3089eeddb1af1fe5c398b71cc483a3015646caceaa2f62fc92b227b2", size = 54420876, upload-time = "2026-03-30T18:18:37.525Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/99/ae/7edbbfc18c3be43eedb886e74a17826c09fdf35588b35912f2733779ea43/pylance-4.0.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:57f6a521b1b4b77a62d791850213a854093719c7d76b9641e8abcd445eb73e56", size = 56752552, upload-time = "2026-03-30T18:24:21.331Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ef/88/6d8bda83224bac52806f09d3e211d8886b81500384948a753c4b24c11f35/pylance-4.0.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e433d6bddd66de99c58e472bc3e8ed1590c7ff4ff7948479254c1c2111a601a8", size = 60305704, upload-time = "2026-03-30T18:35:23.425Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/52/f3/8d8369c756c4173ea070f6964213f9b622ac278bd04a058c48d00a549177/pylance-4.0.0-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:f36dce83c11cd5d598cb0f64bad7c51fc21ed43df868b9029184a385c6bf4d84", size = 56771233, upload-time = "2026-03-30T18:25:40.012Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/66/e6/53e0713440685b1c76e20d72755eca2e531cc182ea9a612b4cb6a15abe50/pylance-4.0.0-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:9ca03f97f22e0b75f06378c4006d587aba26408122fd066f0e43e2b7a019c67e", size = 60260813, upload-time = "2026-03-30T18:36:07.976Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1e/04/5f22b88c8965d3982f68f67bfe24d756e7b788e10392d2bec6f97f5eb0e3/pylance-4.0.0-cp39-abi3-win_amd64.whl", hash = "sha256:9261c32d3bd6aaab33025a45b20c2f2554804e1bc2a1ec2bfcb06f0c9d2e59b9", size = 65137830, upload-time = "2026-03-30T18:37:33.048Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "lancedb"
|
||||
version = "0.27.1"
|
||||
version = "0.27.2"
|
||||
edition.workspace = true
|
||||
description = "LanceDB: A serverless, low-latency vector database for AI applications"
|
||||
license.workspace = true
|
||||
|
||||
@@ -101,9 +101,9 @@ impl TableNamesBuilder {
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the namespace to list tables from
|
||||
pub fn namespace(mut self, namespace: Vec<String>) -> Self {
|
||||
self.request.namespace = namespace;
|
||||
/// Set the namespace path to list tables from
|
||||
pub fn namespace(mut self, namespace_path: Vec<String>) -> Self {
|
||||
self.request.namespace_path = namespace_path;
|
||||
self
|
||||
}
|
||||
|
||||
@@ -131,7 +131,7 @@ impl OpenTableBuilder {
|
||||
parent,
|
||||
request: OpenTableRequest {
|
||||
name,
|
||||
namespace: vec![],
|
||||
namespace_path: vec![],
|
||||
index_cache_size: None,
|
||||
lance_read_params: None,
|
||||
location: None,
|
||||
@@ -206,9 +206,9 @@ impl OpenTableBuilder {
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the namespace for the table
|
||||
pub fn namespace(mut self, namespace: Vec<String>) -> Self {
|
||||
self.request.namespace = namespace;
|
||||
/// Set the namespace path for the table
|
||||
pub fn namespace(mut self, namespace_path: Vec<String>) -> Self {
|
||||
self.request.namespace_path = namespace_path;
|
||||
self
|
||||
}
|
||||
|
||||
@@ -303,9 +303,9 @@ impl CloneTableBuilder {
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the target namespace for the cloned table
|
||||
pub fn target_namespace(mut self, namespace: Vec<String>) -> Self {
|
||||
self.request.target_namespace = namespace;
|
||||
/// Set the target namespace path for the cloned table
|
||||
pub fn target_namespace(mut self, namespace_path: Vec<String>) -> Self {
|
||||
self.request.target_namespace_path = namespace_path;
|
||||
self
|
||||
}
|
||||
|
||||
@@ -456,15 +456,15 @@ impl Connection {
|
||||
&self,
|
||||
old_name: impl AsRef<str>,
|
||||
new_name: impl AsRef<str>,
|
||||
cur_namespace: &[String],
|
||||
new_namespace: &[String],
|
||||
cur_namespace_path: &[String],
|
||||
new_namespace_path: &[String],
|
||||
) -> Result<()> {
|
||||
self.internal
|
||||
.rename_table(
|
||||
old_name.as_ref(),
|
||||
new_name.as_ref(),
|
||||
cur_namespace,
|
||||
new_namespace,
|
||||
cur_namespace_path,
|
||||
new_namespace_path,
|
||||
)
|
||||
.await
|
||||
}
|
||||
@@ -478,9 +478,11 @@ impl Connection {
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `name` - The name of the table to drop
|
||||
/// * `namespace` - The namespace to drop the table from
|
||||
pub async fn drop_table(&self, name: impl AsRef<str>, namespace: &[String]) -> Result<()> {
|
||||
self.internal.drop_table(name.as_ref(), namespace).await
|
||||
/// * `namespace_path` - The namespace path to drop the table from
|
||||
pub async fn drop_table(&self, name: impl AsRef<str>, namespace_path: &[String]) -> Result<()> {
|
||||
self.internal
|
||||
.drop_table(name.as_ref(), namespace_path)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Drop the database
|
||||
@@ -494,9 +496,9 @@ impl Connection {
|
||||
/// Drops all tables in the database
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `namespace` - The namespace to drop all tables from. Empty slice represents root namespace.
|
||||
pub async fn drop_all_tables(&self, namespace: &[String]) -> Result<()> {
|
||||
self.internal.drop_all_tables(namespace).await
|
||||
/// * `namespace_path` - The namespace path to drop all tables from. Empty slice represents root namespace.
|
||||
pub async fn drop_all_tables(&self, namespace_path: &[String]) -> Result<()> {
|
||||
self.internal.drop_all_tables(namespace_path).await
|
||||
}
|
||||
|
||||
/// List immediate child namespace names in the given namespace
|
||||
@@ -596,11 +598,8 @@ pub struct ConnectBuilder {
|
||||
}
|
||||
|
||||
#[cfg(feature = "remote")]
|
||||
const ENV_VARS_TO_STORAGE_OPTS: [(&str, &str); 3] = [
|
||||
("AZURE_STORAGE_ACCOUNT_NAME", "azure_storage_account_name"),
|
||||
("AZURE_CLIENT_ID", "azure_client_id"),
|
||||
("AZURE_TENANT_ID", "azure_tenant_id"),
|
||||
];
|
||||
const ENV_VARS_TO_STORAGE_OPTS: [(&str, &str); 1] =
|
||||
[("AZURE_STORAGE_ACCOUNT_NAME", "azure_storage_account_name")];
|
||||
|
||||
impl ConnectBuilder {
|
||||
/// Create a new [`ConnectOptions`] with the given database URI.
|
||||
@@ -865,6 +864,21 @@ pub fn connect(uri: &str) -> ConnectBuilder {
|
||||
ConnectBuilder::new(uri)
|
||||
}
|
||||
|
||||
use std::collections::HashSet;
|
||||
|
||||
/// Operations that can be pushed down to the namespace server.
|
||||
///
|
||||
/// These operations will be executed on the namespace server instead of locally
|
||||
/// when enabled via [`ConnectNamespaceBuilder::pushdown_operations`].
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
|
||||
pub enum PushdownOperation {
|
||||
/// Execute queries on the namespace server via `query_table()` instead of locally.
|
||||
QueryTable,
|
||||
/// Execute table creation on the namespace server via `create_table()`
|
||||
/// instead of using `declare_table` + local write.
|
||||
CreateTable,
|
||||
}
|
||||
|
||||
pub struct ConnectNamespaceBuilder {
|
||||
ns_impl: String,
|
||||
properties: HashMap<String, String>,
|
||||
@@ -872,7 +886,7 @@ pub struct ConnectNamespaceBuilder {
|
||||
read_consistency_interval: Option<std::time::Duration>,
|
||||
embedding_registry: Option<Arc<dyn EmbeddingRegistry>>,
|
||||
session: Option<Arc<lance::session::Session>>,
|
||||
server_side_query_enabled: bool,
|
||||
pushdown_operations: HashSet<PushdownOperation>,
|
||||
}
|
||||
|
||||
impl ConnectNamespaceBuilder {
|
||||
@@ -884,7 +898,7 @@ impl ConnectNamespaceBuilder {
|
||||
read_consistency_interval: None,
|
||||
embedding_registry: None,
|
||||
session: None,
|
||||
server_side_query_enabled: false,
|
||||
pushdown_operations: HashSet::new(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -939,15 +953,30 @@ impl ConnectNamespaceBuilder {
|
||||
self
|
||||
}
|
||||
|
||||
/// Enable server-side query execution.
|
||||
/// Add operations to push down to the namespace server.
|
||||
///
|
||||
/// When enabled, queries will be executed on the namespace server instead of
|
||||
/// locally. This can improve performance by reducing data transfer and
|
||||
/// leveraging server-side compute resources.
|
||||
/// When operations are added, they will be executed on the namespace server
|
||||
/// instead of locally. This can improve performance by reducing data transfer
|
||||
/// and leveraging server-side compute resources.
|
||||
///
|
||||
/// Default is `false` (queries executed locally).
|
||||
pub fn server_side_query(mut self, enabled: bool) -> Self {
|
||||
self.server_side_query_enabled = enabled;
|
||||
/// Available operations:
|
||||
/// - [`PushdownOperation::QueryTable`]: Execute queries via `namespace.query_table()`
|
||||
/// - [`PushdownOperation::CreateTable`]: Execute table creation via `namespace.create_table()`
|
||||
///
|
||||
/// By default, no operations are pushed down (all executed locally).
|
||||
pub fn pushdown_operation(mut self, operation: PushdownOperation) -> Self {
|
||||
self.pushdown_operations.insert(operation);
|
||||
self
|
||||
}
|
||||
|
||||
/// Add multiple operations to push down to the namespace server.
|
||||
///
|
||||
/// See [`Self::pushdown_operation`] for details.
|
||||
pub fn pushdown_operations(
|
||||
mut self,
|
||||
operations: impl IntoIterator<Item = PushdownOperation>,
|
||||
) -> Self {
|
||||
self.pushdown_operations.extend(operations);
|
||||
self
|
||||
}
|
||||
|
||||
@@ -962,7 +991,7 @@ impl ConnectNamespaceBuilder {
|
||||
self.storage_options,
|
||||
self.read_consistency_interval,
|
||||
self.session,
|
||||
self.server_side_query_enabled,
|
||||
self.pushdown_operations,
|
||||
)
|
||||
.await?,
|
||||
);
|
||||
|
||||
@@ -111,9 +111,9 @@ impl CreateTableBuilder {
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Set the namespace for the table
|
||||
pub fn namespace(mut self, namespace: Vec<String>) -> Self {
|
||||
self.request.namespace = namespace;
|
||||
/// Set the namespace path for the table
|
||||
pub fn namespace(mut self, namespace_path: Vec<String>) -> Self {
|
||||
self.request.namespace_path = namespace_path;
|
||||
self
|
||||
}
|
||||
|
||||
|
||||
@@ -40,8 +40,8 @@ pub trait DatabaseOptions {
|
||||
/// A request to list names of tables in the database (deprecated, use ListTablesRequest)
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct TableNamesRequest {
|
||||
/// The namespace to list tables in. Empty list represents root namespace.
|
||||
pub namespace: Vec<String>,
|
||||
/// The namespace path to list tables in. Empty list represents root namespace.
|
||||
pub namespace_path: Vec<String>,
|
||||
/// If present, only return names that come lexicographically after the supplied
|
||||
/// value.
|
||||
///
|
||||
@@ -56,8 +56,8 @@ pub struct TableNamesRequest {
|
||||
#[derive(Clone)]
|
||||
pub struct OpenTableRequest {
|
||||
pub name: String,
|
||||
/// The namespace to open the table from. Empty list represents root namespace.
|
||||
pub namespace: Vec<String>,
|
||||
/// The namespace path to open the table from. Empty list represents root namespace.
|
||||
pub namespace_path: Vec<String>,
|
||||
pub index_cache_size: Option<u32>,
|
||||
pub lance_read_params: Option<ReadParams>,
|
||||
/// Optional custom location for the table. If not provided, the database will
|
||||
@@ -76,7 +76,7 @@ impl std::fmt::Debug for OpenTableRequest {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("OpenTableRequest")
|
||||
.field("name", &self.name)
|
||||
.field("namespace", &self.namespace)
|
||||
.field("namespace_path", &self.namespace_path)
|
||||
.field("index_cache_size", &self.index_cache_size)
|
||||
.field("lance_read_params", &self.lance_read_params)
|
||||
.field("location", &self.location)
|
||||
@@ -115,8 +115,8 @@ impl CreateTableMode {
|
||||
pub struct CreateTableRequest {
|
||||
/// The name of the new table
|
||||
pub name: String,
|
||||
/// The namespace to create the table in. Empty list represents root namespace.
|
||||
pub namespace: Vec<String>,
|
||||
/// The namespace path to create the table in. Empty list represents root namespace.
|
||||
pub namespace_path: Vec<String>,
|
||||
/// Initial data to write to the table, can be empty.
|
||||
pub data: Box<dyn Scannable>,
|
||||
/// The mode to use when creating the table
|
||||
@@ -135,7 +135,7 @@ impl CreateTableRequest {
|
||||
pub fn new(name: String, data: Box<dyn Scannable>) -> Self {
|
||||
Self {
|
||||
name,
|
||||
namespace: vec![],
|
||||
namespace_path: vec![],
|
||||
data,
|
||||
mode: CreateTableMode::default(),
|
||||
write_options: WriteOptions::default(),
|
||||
@@ -155,8 +155,8 @@ impl CreateTableRequest {
|
||||
pub struct CloneTableRequest {
|
||||
/// The name of the target table to create
|
||||
pub target_table_name: String,
|
||||
/// The namespace for the target table. Empty list represents root namespace.
|
||||
pub target_namespace: Vec<String>,
|
||||
/// The namespace path for the target table. Empty list represents root namespace.
|
||||
pub target_namespace_path: Vec<String>,
|
||||
/// The URI of the source table to clone from.
|
||||
pub source_uri: String,
|
||||
/// Optional version of the source table to clone.
|
||||
@@ -175,7 +175,7 @@ impl CloneTableRequest {
|
||||
pub fn new(target_table_name: String, source_uri: String) -> Self {
|
||||
Self {
|
||||
target_table_name,
|
||||
target_namespace: vec![],
|
||||
target_namespace_path: vec![],
|
||||
source_uri,
|
||||
source_version: None,
|
||||
source_tag: None,
|
||||
@@ -251,13 +251,13 @@ pub trait Database:
|
||||
&self,
|
||||
cur_name: &str,
|
||||
new_name: &str,
|
||||
cur_namespace: &[String],
|
||||
new_namespace: &[String],
|
||||
cur_namespace_path: &[String],
|
||||
new_namespace_path: &[String],
|
||||
) -> Result<()>;
|
||||
/// Drop a table in the database
|
||||
async fn drop_table(&self, name: &str, namespace: &[String]) -> Result<()>;
|
||||
async fn drop_table(&self, name: &str, namespace_path: &[String]) -> Result<()>;
|
||||
/// Drop all tables in the database
|
||||
async fn drop_all_tables(&self, namespace: &[String]) -> Result<()>;
|
||||
async fn drop_all_tables(&self, namespace_path: &[String]) -> Result<()>;
|
||||
fn as_any(&self) -> &dyn std::any::Any;
|
||||
|
||||
/// Get the equivalent namespace client of this database
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
|
||||
//! Provides the `ListingDatabase`, a simple database where tables are folders in a directory
|
||||
|
||||
use std::collections::HashSet;
|
||||
use std::fs::create_dir_all;
|
||||
use std::path::Path;
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
@@ -653,7 +654,7 @@ impl ListingDatabase {
|
||||
async fn handle_table_exists(
|
||||
&self,
|
||||
table_name: &str,
|
||||
namespace: Vec<String>,
|
||||
namespace_path: Vec<String>,
|
||||
mode: CreateTableMode,
|
||||
data_schema: &arrow_schema::Schema,
|
||||
) -> Result<Arc<dyn BaseTable>> {
|
||||
@@ -664,7 +665,7 @@ impl ListingDatabase {
|
||||
CreateTableMode::ExistOk(callback) => {
|
||||
let req = OpenTableRequest {
|
||||
name: table_name.to_string(),
|
||||
namespace: namespace.clone(),
|
||||
namespace_path: namespace_path.clone(),
|
||||
index_cache_size: None,
|
||||
lance_read_params: None,
|
||||
location: None,
|
||||
@@ -751,7 +752,7 @@ impl Database for ListingDatabase {
|
||||
}
|
||||
|
||||
async fn table_names(&self, request: TableNamesRequest) -> Result<Vec<String>> {
|
||||
if !request.namespace.is_empty() {
|
||||
if !request.namespace_path.is_empty() {
|
||||
return Err(Error::NotSupported {
|
||||
message: "Namespace parameter is not supported for listing database. Only root namespace is supported.".into(),
|
||||
});
|
||||
@@ -838,7 +839,7 @@ impl Database for ListingDatabase {
|
||||
|
||||
async fn create_table(&self, request: CreateTableRequest) -> Result<Arc<dyn BaseTable>> {
|
||||
// When namespace is not empty, location must be provided
|
||||
if !request.namespace.is_empty() && request.location.is_none() {
|
||||
if !request.namespace_path.is_empty() && request.location.is_none() {
|
||||
return Err(Error::InvalidInput {
|
||||
message: "Location must be provided when namespace is not empty".into(),
|
||||
});
|
||||
@@ -864,13 +865,13 @@ impl Database for ListingDatabase {
|
||||
match NativeTable::create(
|
||||
&table_uri,
|
||||
&request.name,
|
||||
request.namespace.clone(),
|
||||
request.namespace_path.clone(),
|
||||
request.data,
|
||||
self.store_wrapper.clone(),
|
||||
Some(write_params),
|
||||
self.read_consistency_interval,
|
||||
request.namespace_client,
|
||||
false, // server_side_query_enabled - listing database doesn't support server-side queries
|
||||
HashSet::new(), // listing database doesn't support server-side queries
|
||||
)
|
||||
.await
|
||||
{
|
||||
@@ -878,7 +879,7 @@ impl Database for ListingDatabase {
|
||||
Err(Error::TableAlreadyExists { .. }) => {
|
||||
self.handle_table_exists(
|
||||
&request.name,
|
||||
request.namespace.clone(),
|
||||
request.namespace_path.clone(),
|
||||
request.mode,
|
||||
&data_schema,
|
||||
)
|
||||
@@ -889,7 +890,7 @@ impl Database for ListingDatabase {
|
||||
}
|
||||
|
||||
async fn clone_table(&self, request: CloneTableRequest) -> Result<Arc<dyn BaseTable>> {
|
||||
if !request.target_namespace.is_empty() {
|
||||
if !request.target_namespace_path.is_empty() {
|
||||
return Err(Error::NotSupported {
|
||||
message: "Namespace parameter is not supported for listing database. Only root namespace is supported.".into(),
|
||||
});
|
||||
@@ -944,13 +945,13 @@ impl Database for ListingDatabase {
|
||||
let cloned_table = NativeTable::open_with_params(
|
||||
&target_uri,
|
||||
&request.target_table_name,
|
||||
request.target_namespace,
|
||||
request.target_namespace_path,
|
||||
self.store_wrapper.clone(),
|
||||
None,
|
||||
self.read_consistency_interval,
|
||||
request.namespace_client,
|
||||
false, // server_side_query_enabled - listing database doesn't support server-side queries
|
||||
None, // managed_versioning - will be queried if namespace_client is provided
|
||||
HashSet::new(), // listing database doesn't support server-side queries
|
||||
None, // managed_versioning - will be queried if namespace_client is provided
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -959,7 +960,7 @@ impl Database for ListingDatabase {
|
||||
|
||||
async fn open_table(&self, mut request: OpenTableRequest) -> Result<Arc<dyn BaseTable>> {
|
||||
// When namespace is not empty, location must be provided
|
||||
if !request.namespace.is_empty() && request.location.is_none() {
|
||||
if !request.namespace_path.is_empty() && request.location.is_none() {
|
||||
return Err(Error::InvalidInput {
|
||||
message: "Location must be provided when namespace is not empty".into(),
|
||||
});
|
||||
@@ -1021,12 +1022,12 @@ impl Database for ListingDatabase {
|
||||
NativeTable::open_with_params(
|
||||
&table_uri,
|
||||
&request.name,
|
||||
request.namespace,
|
||||
request.namespace_path,
|
||||
self.store_wrapper.clone(),
|
||||
Some(read_params),
|
||||
self.read_consistency_interval,
|
||||
request.namespace_client,
|
||||
false, // server_side_query_enabled - listing database doesn't support server-side queries
|
||||
HashSet::new(), // listing database doesn't support server-side queries
|
||||
request.managed_versioning, // Pass through managed_versioning from request
|
||||
)
|
||||
.await?,
|
||||
@@ -1038,15 +1039,15 @@ impl Database for ListingDatabase {
|
||||
&self,
|
||||
_cur_name: &str,
|
||||
_new_name: &str,
|
||||
cur_namespace: &[String],
|
||||
new_namespace: &[String],
|
||||
cur_namespace_path: &[String],
|
||||
new_namespace_path: &[String],
|
||||
) -> Result<()> {
|
||||
if !cur_namespace.is_empty() {
|
||||
if !cur_namespace_path.is_empty() {
|
||||
return Err(Error::NotSupported {
|
||||
message: "Namespace parameter is not supported for listing database.".into(),
|
||||
});
|
||||
}
|
||||
if !new_namespace.is_empty() {
|
||||
if !new_namespace_path.is_empty() {
|
||||
return Err(Error::NotSupported {
|
||||
message: "Namespace parameter is not supported for listing database.".into(),
|
||||
});
|
||||
@@ -1056,8 +1057,8 @@ impl Database for ListingDatabase {
|
||||
})
|
||||
}
|
||||
|
||||
async fn drop_table(&self, name: &str, namespace: &[String]) -> Result<()> {
|
||||
if !namespace.is_empty() {
|
||||
async fn drop_table(&self, name: &str, namespace_path: &[String]) -> Result<()> {
|
||||
if !namespace_path.is_empty() {
|
||||
return Err(Error::NotSupported {
|
||||
message: "Namespace parameter is not supported for listing database.".into(),
|
||||
});
|
||||
@@ -1066,9 +1067,9 @@ impl Database for ListingDatabase {
|
||||
}
|
||||
|
||||
#[allow(deprecated)]
|
||||
async fn drop_all_tables(&self, namespace: &[String]) -> Result<()> {
|
||||
async fn drop_all_tables(&self, namespace_path: &[String]) -> Result<()> {
|
||||
// Check if namespace parameter is provided
|
||||
if !namespace.is_empty() {
|
||||
if !namespace_path.is_empty() {
|
||||
return Err(Error::NotSupported {
|
||||
message: "Namespace parameter is not supported for listing database.".into(),
|
||||
});
|
||||
@@ -1146,7 +1147,7 @@ mod tests {
|
||||
let source_table = db
|
||||
.create_table(CreateTableRequest {
|
||||
name: "source_table".to_string(),
|
||||
namespace: vec![],
|
||||
namespace_path: vec![],
|
||||
data: Box::new(RecordBatch::new_empty(schema.clone())) as Box<dyn Scannable>,
|
||||
mode: CreateTableMode::Create,
|
||||
write_options: Default::default(),
|
||||
@@ -1163,7 +1164,7 @@ mod tests {
|
||||
let cloned_table = db
|
||||
.clone_table(CloneTableRequest {
|
||||
target_table_name: "cloned_table".to_string(),
|
||||
target_namespace: vec![],
|
||||
target_namespace_path: vec![],
|
||||
source_uri: source_uri.clone(),
|
||||
source_version: None,
|
||||
source_tag: None,
|
||||
@@ -1208,7 +1209,7 @@ mod tests {
|
||||
let source_table = db
|
||||
.create_table(CreateTableRequest {
|
||||
name: "source_with_data".to_string(),
|
||||
namespace: vec![],
|
||||
namespace_path: vec![],
|
||||
data: Box::new(batch) as Box<dyn Scannable>,
|
||||
mode: CreateTableMode::Create,
|
||||
write_options: Default::default(),
|
||||
@@ -1224,7 +1225,7 @@ mod tests {
|
||||
let cloned_table = db
|
||||
.clone_table(CloneTableRequest {
|
||||
target_table_name: "cloned_with_data".to_string(),
|
||||
target_namespace: vec![],
|
||||
target_namespace_path: vec![],
|
||||
source_uri,
|
||||
source_version: None,
|
||||
source_tag: None,
|
||||
@@ -1268,7 +1269,7 @@ mod tests {
|
||||
|
||||
db.create_table(CreateTableRequest {
|
||||
name: "source".to_string(),
|
||||
namespace: vec![],
|
||||
namespace_path: vec![],
|
||||
data: Box::new(RecordBatch::new_empty(schema)) as Box<dyn Scannable>,
|
||||
mode: CreateTableMode::Create,
|
||||
write_options: Default::default(),
|
||||
@@ -1284,7 +1285,7 @@ mod tests {
|
||||
let cloned = db
|
||||
.clone_table(CloneTableRequest {
|
||||
target_table_name: "cloned".to_string(),
|
||||
target_namespace: vec![],
|
||||
target_namespace_path: vec![],
|
||||
source_uri,
|
||||
source_version: None,
|
||||
source_tag: None,
|
||||
@@ -1305,7 +1306,7 @@ mod tests {
|
||||
|
||||
db.create_table(CreateTableRequest {
|
||||
name: "source".to_string(),
|
||||
namespace: vec![],
|
||||
namespace_path: vec![],
|
||||
data: Box::new(RecordBatch::new_empty(schema)) as Box<dyn Scannable>,
|
||||
mode: CreateTableMode::Create,
|
||||
write_options: Default::default(),
|
||||
@@ -1321,7 +1322,7 @@ mod tests {
|
||||
let result = db
|
||||
.clone_table(CloneTableRequest {
|
||||
target_table_name: "cloned".to_string(),
|
||||
target_namespace: vec![],
|
||||
target_namespace_path: vec![],
|
||||
source_uri,
|
||||
source_version: None,
|
||||
source_tag: None,
|
||||
@@ -1346,7 +1347,7 @@ mod tests {
|
||||
|
||||
db.create_table(CreateTableRequest {
|
||||
name: "source".to_string(),
|
||||
namespace: vec![],
|
||||
namespace_path: vec![],
|
||||
data: Box::new(RecordBatch::new_empty(schema)) as Box<dyn Scannable>,
|
||||
mode: CreateTableMode::Create,
|
||||
write_options: Default::default(),
|
||||
@@ -1362,7 +1363,7 @@ mod tests {
|
||||
let result = db
|
||||
.clone_table(CloneTableRequest {
|
||||
target_table_name: "cloned".to_string(),
|
||||
target_namespace: vec!["namespace".to_string()], // Non-empty namespace
|
||||
target_namespace_path: vec!["namespace".to_string()], // Non-empty namespace
|
||||
source_uri,
|
||||
source_version: None,
|
||||
source_tag: None,
|
||||
@@ -1387,7 +1388,7 @@ mod tests {
|
||||
|
||||
db.create_table(CreateTableRequest {
|
||||
name: "source".to_string(),
|
||||
namespace: vec![],
|
||||
namespace_path: vec![],
|
||||
data: Box::new(RecordBatch::new_empty(schema)) as Box<dyn Scannable>,
|
||||
mode: CreateTableMode::Create,
|
||||
write_options: Default::default(),
|
||||
@@ -1403,7 +1404,7 @@ mod tests {
|
||||
let result = db
|
||||
.clone_table(CloneTableRequest {
|
||||
target_table_name: "invalid/name".to_string(), // Invalid name with slash
|
||||
target_namespace: vec![],
|
||||
target_namespace_path: vec![],
|
||||
source_uri,
|
||||
source_version: None,
|
||||
source_tag: None,
|
||||
@@ -1423,7 +1424,7 @@ mod tests {
|
||||
let result = db
|
||||
.clone_table(CloneTableRequest {
|
||||
target_table_name: "cloned".to_string(),
|
||||
target_namespace: vec![],
|
||||
target_namespace_path: vec![],
|
||||
source_uri: "/nonexistent/table.lance".to_string(),
|
||||
source_version: None,
|
||||
source_tag: None,
|
||||
@@ -1444,7 +1445,7 @@ mod tests {
|
||||
|
||||
db.create_table(CreateTableRequest {
|
||||
name: "source".to_string(),
|
||||
namespace: vec![],
|
||||
namespace_path: vec![],
|
||||
data: Box::new(RecordBatch::new_empty(schema)) as Box<dyn Scannable>,
|
||||
mode: CreateTableMode::Create,
|
||||
write_options: Default::default(),
|
||||
@@ -1460,7 +1461,7 @@ mod tests {
|
||||
let result = db
|
||||
.clone_table(CloneTableRequest {
|
||||
target_table_name: "cloned".to_string(),
|
||||
target_namespace: vec![],
|
||||
target_namespace_path: vec![],
|
||||
source_uri,
|
||||
source_version: Some(1),
|
||||
source_tag: Some("v1.0".to_string()),
|
||||
@@ -1498,7 +1499,7 @@ mod tests {
|
||||
let source_table = db
|
||||
.create_table(CreateTableRequest {
|
||||
name: "versioned_source".to_string(),
|
||||
namespace: vec![],
|
||||
namespace_path: vec![],
|
||||
data: Box::new(batch1) as Box<dyn Scannable>,
|
||||
mode: CreateTableMode::Create,
|
||||
write_options: Default::default(),
|
||||
@@ -1534,7 +1535,7 @@ mod tests {
|
||||
let cloned_table = db
|
||||
.clone_table(CloneTableRequest {
|
||||
target_table_name: "cloned_from_version".to_string(),
|
||||
target_namespace: vec![],
|
||||
target_namespace_path: vec![],
|
||||
source_uri,
|
||||
source_version: Some(initial_version),
|
||||
source_tag: None,
|
||||
@@ -1573,7 +1574,7 @@ mod tests {
|
||||
let source_table = db
|
||||
.create_table(CreateTableRequest {
|
||||
name: "tagged_source".to_string(),
|
||||
namespace: vec![],
|
||||
namespace_path: vec![],
|
||||
data: Box::new(batch1),
|
||||
mode: CreateTableMode::Create,
|
||||
write_options: Default::default(),
|
||||
@@ -1613,7 +1614,7 @@ mod tests {
|
||||
let cloned_table = db
|
||||
.clone_table(CloneTableRequest {
|
||||
target_table_name: "cloned_from_tag".to_string(),
|
||||
target_namespace: vec![],
|
||||
target_namespace_path: vec![],
|
||||
source_uri,
|
||||
source_version: None,
|
||||
source_tag: Some("v1.0".to_string()),
|
||||
@@ -1649,7 +1650,7 @@ mod tests {
|
||||
let source_table = db
|
||||
.create_table(CreateTableRequest {
|
||||
name: "independent_source".to_string(),
|
||||
namespace: vec![],
|
||||
namespace_path: vec![],
|
||||
data: Box::new(batch1),
|
||||
mode: CreateTableMode::Create,
|
||||
write_options: Default::default(),
|
||||
@@ -1665,7 +1666,7 @@ mod tests {
|
||||
let cloned_table = db
|
||||
.clone_table(CloneTableRequest {
|
||||
target_table_name: "independent_clone".to_string(),
|
||||
target_namespace: vec![],
|
||||
target_namespace_path: vec![],
|
||||
source_uri,
|
||||
source_version: None,
|
||||
source_tag: None,
|
||||
@@ -1725,7 +1726,7 @@ mod tests {
|
||||
let source_table = db
|
||||
.create_table(CreateTableRequest {
|
||||
name: "latest_version_source".to_string(),
|
||||
namespace: vec![],
|
||||
namespace_path: vec![],
|
||||
data: Box::new(batch1),
|
||||
mode: CreateTableMode::Create,
|
||||
write_options: Default::default(),
|
||||
@@ -1758,7 +1759,7 @@ mod tests {
|
||||
let cloned_table = db
|
||||
.clone_table(CloneTableRequest {
|
||||
target_table_name: "cloned_latest".to_string(),
|
||||
target_namespace: vec![],
|
||||
target_namespace_path: vec![],
|
||||
source_uri,
|
||||
source_version: None,
|
||||
source_tag: None,
|
||||
@@ -1812,7 +1813,7 @@ mod tests {
|
||||
let table = db
|
||||
.create_table(CreateTableRequest {
|
||||
name: "test_stable".to_string(),
|
||||
namespace: vec![],
|
||||
namespace_path: vec![],
|
||||
data: Box::new(batch),
|
||||
mode: CreateTableMode::Create,
|
||||
write_options: Default::default(),
|
||||
@@ -1863,7 +1864,7 @@ mod tests {
|
||||
let table = db
|
||||
.create_table(CreateTableRequest {
|
||||
name: "test_stable_table_level".to_string(),
|
||||
namespace: vec![],
|
||||
namespace_path: vec![],
|
||||
data: Box::new(batch),
|
||||
mode: CreateTableMode::Create,
|
||||
write_options,
|
||||
@@ -1934,7 +1935,7 @@ mod tests {
|
||||
let table = db
|
||||
.create_table(CreateTableRequest {
|
||||
name: "test_override".to_string(),
|
||||
namespace: vec![],
|
||||
namespace_path: vec![],
|
||||
data: Box::new(batch),
|
||||
mode: CreateTableMode::Create,
|
||||
write_options,
|
||||
@@ -2052,7 +2053,7 @@ mod tests {
|
||||
|
||||
db.create_table(CreateTableRequest {
|
||||
name: "table1".to_string(),
|
||||
namespace: vec![],
|
||||
namespace_path: vec![],
|
||||
data: Box::new(RecordBatch::new_empty(schema.clone())) as Box<dyn Scannable>,
|
||||
mode: CreateTableMode::Create,
|
||||
write_options: Default::default(),
|
||||
@@ -2064,7 +2065,7 @@ mod tests {
|
||||
|
||||
db.create_table(CreateTableRequest {
|
||||
name: "table2".to_string(),
|
||||
namespace: vec![],
|
||||
namespace_path: vec![],
|
||||
data: Box::new(RecordBatch::new_empty(schema)) as Box<dyn Scannable>,
|
||||
mode: CreateTableMode::Create,
|
||||
write_options: Default::default(),
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
|
||||
//! Namespace-based database implementation that delegates table management to lance-namespace
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
@@ -22,6 +22,7 @@ use lance_namespace_impls::ConnectBuilder;
|
||||
use lance_table::io::commit::CommitHandler;
|
||||
use lance_table::io::commit::external_manifest::ExternalManifestCommitHandler;
|
||||
|
||||
use crate::connection::PushdownOperation;
|
||||
use crate::database::ReadConsistency;
|
||||
use crate::error::{Error, Result};
|
||||
use crate::table::NativeTable;
|
||||
@@ -42,8 +43,8 @@ pub struct LanceNamespaceDatabase {
|
||||
session: Option<Arc<lance::session::Session>>,
|
||||
// database URI
|
||||
uri: String,
|
||||
// Whether to enable server-side query execution
|
||||
server_side_query_enabled: bool,
|
||||
// Operations to push down to the namespace server
|
||||
pushdown_operations: HashSet<PushdownOperation>,
|
||||
}
|
||||
|
||||
impl LanceNamespaceDatabase {
|
||||
@@ -53,7 +54,7 @@ impl LanceNamespaceDatabase {
|
||||
storage_options: HashMap<String, String>,
|
||||
read_consistency_interval: Option<std::time::Duration>,
|
||||
session: Option<Arc<lance::session::Session>>,
|
||||
server_side_query_enabled: bool,
|
||||
pushdown_operations: HashSet<PushdownOperation>,
|
||||
) -> Result<Self> {
|
||||
let mut builder = ConnectBuilder::new(ns_impl);
|
||||
for (key, value) in ns_properties.clone() {
|
||||
@@ -72,7 +73,7 @@ impl LanceNamespaceDatabase {
|
||||
read_consistency_interval,
|
||||
session,
|
||||
uri: format!("namespace://{}", ns_impl),
|
||||
server_side_query_enabled,
|
||||
pushdown_operations,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -82,7 +83,7 @@ impl std::fmt::Debug for LanceNamespaceDatabase {
|
||||
f.debug_struct("LanceNamespaceDatabase")
|
||||
.field("storage_options", &self.storage_options)
|
||||
.field("read_consistency_interval", &self.read_consistency_interval)
|
||||
.field("server_side_query_enabled", &self.server_side_query_enabled)
|
||||
.field("pushdown_operations", &self.pushdown_operations)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
@@ -138,7 +139,7 @@ impl Database for LanceNamespaceDatabase {
|
||||
|
||||
async fn table_names(&self, request: TableNamesRequest) -> Result<Vec<String>> {
|
||||
let ns_request = ListTablesRequest {
|
||||
id: Some(request.namespace),
|
||||
id: Some(request.namespace_path),
|
||||
page_token: request.start_after,
|
||||
limit: request.limit.map(|l| l as i32),
|
||||
..Default::default()
|
||||
@@ -154,7 +155,7 @@ impl Database for LanceNamespaceDatabase {
|
||||
}
|
||||
|
||||
async fn create_table(&self, request: DbCreateTableRequest) -> Result<Arc<dyn BaseTable>> {
|
||||
let mut table_id = request.namespace.clone();
|
||||
let mut table_id = request.namespace_path.clone();
|
||||
table_id.push(request.name.clone());
|
||||
let describe_request = DescribeTableRequest {
|
||||
id: Some(table_id.clone()),
|
||||
@@ -191,11 +192,11 @@ impl Database for LanceNamespaceDatabase {
|
||||
let native_table = NativeTable::open_from_namespace(
|
||||
self.namespace.clone(),
|
||||
&request.name,
|
||||
request.namespace.clone(),
|
||||
request.namespace_path.clone(),
|
||||
None,
|
||||
None,
|
||||
self.read_consistency_interval,
|
||||
self.server_side_query_enabled,
|
||||
self.pushdown_operations.clone(),
|
||||
self.session.clone(),
|
||||
)
|
||||
.await?;
|
||||
@@ -205,7 +206,7 @@ impl Database for LanceNamespaceDatabase {
|
||||
}
|
||||
}
|
||||
|
||||
let mut table_id = request.namespace.clone();
|
||||
let mut table_id = request.namespace_path.clone();
|
||||
table_id.push(request.name.clone());
|
||||
|
||||
let declare_request = DeclareTableRequest {
|
||||
@@ -255,12 +256,12 @@ impl Database for LanceNamespaceDatabase {
|
||||
self.namespace.clone(),
|
||||
&location,
|
||||
&request.name,
|
||||
request.namespace.clone(),
|
||||
request.namespace_path.clone(),
|
||||
request.data,
|
||||
None, // write_store_wrapper not used for namespace connections
|
||||
write_params,
|
||||
self.read_consistency_interval,
|
||||
self.server_side_query_enabled,
|
||||
self.pushdown_operations.clone(),
|
||||
self.session.clone(),
|
||||
)
|
||||
.await?;
|
||||
@@ -272,11 +273,11 @@ impl Database for LanceNamespaceDatabase {
|
||||
let native_table = NativeTable::open_from_namespace(
|
||||
self.namespace.clone(),
|
||||
&request.name,
|
||||
request.namespace.clone(),
|
||||
request.namespace_path.clone(),
|
||||
None, // write_store_wrapper not used for namespace connections
|
||||
request.lance_read_params,
|
||||
self.read_consistency_interval,
|
||||
self.server_side_query_enabled,
|
||||
self.pushdown_operations.clone(),
|
||||
self.session.clone(),
|
||||
)
|
||||
.await?;
|
||||
@@ -294,16 +295,16 @@ impl Database for LanceNamespaceDatabase {
|
||||
&self,
|
||||
_cur_name: &str,
|
||||
_new_name: &str,
|
||||
_cur_namespace: &[String],
|
||||
_new_namespace: &[String],
|
||||
_cur_namespace_path: &[String],
|
||||
_new_namespace_path: &[String],
|
||||
) -> Result<()> {
|
||||
Err(Error::NotSupported {
|
||||
message: "rename_table is not supported for namespace connections".to_string(),
|
||||
})
|
||||
}
|
||||
|
||||
async fn drop_table(&self, name: &str, namespace: &[String]) -> Result<()> {
|
||||
let mut table_id = namespace.to_vec();
|
||||
async fn drop_table(&self, name: &str, namespace_path: &[String]) -> Result<()> {
|
||||
let mut table_id = namespace_path.to_vec();
|
||||
table_id.push(name.to_string());
|
||||
|
||||
let drop_request = DropTableRequest {
|
||||
@@ -321,17 +322,17 @@ impl Database for LanceNamespaceDatabase {
|
||||
}
|
||||
|
||||
#[allow(deprecated)]
|
||||
async fn drop_all_tables(&self, namespace: &[String]) -> Result<()> {
|
||||
async fn drop_all_tables(&self, namespace_path: &[String]) -> Result<()> {
|
||||
let tables = self
|
||||
.table_names(TableNamesRequest {
|
||||
namespace: namespace.to_vec(),
|
||||
namespace_path: namespace_path.to_vec(),
|
||||
start_after: None,
|
||||
limit: None,
|
||||
})
|
||||
.await?;
|
||||
|
||||
for table in tables {
|
||||
self.drop_table(&table, namespace).await?;
|
||||
self.drop_table(&table, namespace_path).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -240,7 +240,7 @@ impl Shuffler {
|
||||
.await?;
|
||||
// Need to read the entire file in a single batch for in-memory shuffling
|
||||
let batch = reader.read_record_batch(0, reader.num_rows()).await?;
|
||||
let mut rng = rng.lock().unwrap();
|
||||
let mut rng = rng.lock().unwrap_or_else(|e| e.into_inner());
|
||||
Self::shuffle_batch(&batch, &mut rng, clump_size)
|
||||
}
|
||||
})
|
||||
|
||||
@@ -27,7 +27,17 @@ use arrow_schema::DataType;
|
||||
use datafusion_expr::{Expr, ScalarUDF, expr_fn::cast};
|
||||
use datafusion_functions::string::expr_fn as string_expr_fn;
|
||||
|
||||
pub use datafusion_expr::{col, lit};
|
||||
pub use datafusion_expr::lit;
|
||||
|
||||
/// Create a column reference expression, preserving the name exactly as given.
|
||||
///
|
||||
/// Unlike DataFusion's built-in [`col`][datafusion_expr::col], this function
|
||||
/// does **not** normalise the identifier to lower-case, so
|
||||
/// `col("firstName")` correctly references a field named `firstName`.
|
||||
pub fn col(name: impl Into<String>) -> DfExpr {
|
||||
use datafusion_common::Column;
|
||||
DfExpr::Column(Column::new_unqualified(name))
|
||||
}
|
||||
|
||||
pub use datafusion_expr::Expr as DfExpr;
|
||||
|
||||
|
||||
@@ -2,11 +2,37 @@
|
||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
use datafusion_expr::Expr;
|
||||
use datafusion_sql::unparser;
|
||||
use datafusion_sql::unparser::{self, dialect::Dialect};
|
||||
|
||||
/// Unparser dialect that matches the quoting style expected by the Lance SQL
|
||||
/// parser. Lance uses backtick (`` ` ``) as the only delimited-identifier
|
||||
/// quote character, so we must produce `` `firstName` `` rather than
|
||||
/// `"firstName"` for identifiers that require quoting.
|
||||
///
|
||||
/// We quote an identifier when it:
|
||||
/// * is a SQL reserved word, OR
|
||||
/// * contains characters outside `[a-zA-Z0-9_]`, OR
|
||||
/// * starts with a digit, OR
|
||||
/// * contains upper-case letters (unquoted identifiers are normalised to
|
||||
/// lower-case by the SQL parser, which would break case-sensitive schemas).
|
||||
struct LanceSqlDialect;
|
||||
|
||||
impl Dialect for LanceSqlDialect {
|
||||
fn identifier_quote_style(&self, identifier: &str) -> Option<char> {
|
||||
let needs_quote = identifier.chars().any(|c| c.is_ascii_uppercase())
|
||||
|| !identifier
|
||||
.chars()
|
||||
.enumerate()
|
||||
.all(|(i, c)| c == '_' || c.is_ascii_alphabetic() || (i > 0 && c.is_ascii_digit()));
|
||||
if needs_quote { Some('`') } else { None }
|
||||
}
|
||||
}
|
||||
|
||||
pub fn expr_to_sql_string(expr: &Expr) -> crate::Result<String> {
|
||||
let ast = unparser::expr_to_sql(expr).map_err(|e| crate::Error::InvalidInput {
|
||||
message: format!("failed to serialize expression to SQL: {}", e),
|
||||
})?;
|
||||
let ast = unparser::Unparser::new(&LanceSqlDialect)
|
||||
.expr_to_sql(expr)
|
||||
.map_err(|e| crate::Error::InvalidInput {
|
||||
message: format!("failed to serialize expression to SQL: {}", e),
|
||||
})?;
|
||||
Ok(ast.to_string())
|
||||
}
|
||||
|
||||
@@ -66,13 +66,13 @@ impl IoTrackingStore {
|
||||
}
|
||||
|
||||
fn record_read(&self, num_bytes: u64) {
|
||||
let mut stats = self.stats.lock().unwrap();
|
||||
let mut stats = self.stats.lock().unwrap_or_else(|e| e.into_inner());
|
||||
stats.read_iops += 1;
|
||||
stats.read_bytes += num_bytes;
|
||||
}
|
||||
|
||||
fn record_write(&self, num_bytes: u64) {
|
||||
let mut stats = self.stats.lock().unwrap();
|
||||
let mut stats = self.stats.lock().unwrap_or_else(|e| e.into_inner());
|
||||
stats.write_iops += 1;
|
||||
stats.write_bytes += num_bytes;
|
||||
}
|
||||
@@ -229,10 +229,63 @@ impl MultipartUpload for IoTrackingMultipartUpload {
|
||||
|
||||
fn put_part(&mut self, payload: PutPayload) -> UploadPart {
|
||||
{
|
||||
let mut stats = self.stats.lock().unwrap();
|
||||
let mut stats = self.stats.lock().unwrap_or_else(|e| e.into_inner());
|
||||
stats.write_iops += 1;
|
||||
stats.write_bytes += payload.content_length() as u64;
|
||||
}
|
||||
self.target.put_part(payload)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
/// Helper: poison a Mutex<IoStats> by panicking while holding the lock.
|
||||
fn poison_stats(stats: &Arc<Mutex<IoStats>>) {
|
||||
let stats_clone = stats.clone();
|
||||
let handle = std::thread::spawn(move || {
|
||||
let _guard = stats_clone.lock().unwrap();
|
||||
panic!("intentional panic to poison stats mutex");
|
||||
});
|
||||
let _ = handle.join();
|
||||
assert!(stats.lock().is_err(), "mutex should be poisoned");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_record_read_recovers_from_poisoned_lock() {
|
||||
let stats = Arc::new(Mutex::new(IoStats::default()));
|
||||
let store = IoTrackingStore {
|
||||
target: Arc::new(object_store::memory::InMemory::new()),
|
||||
stats: stats.clone(),
|
||||
};
|
||||
|
||||
poison_stats(&stats);
|
||||
|
||||
// record_read should not panic
|
||||
store.record_read(1024);
|
||||
|
||||
// Verify the stats were updated despite poisoning
|
||||
let s = stats.lock().unwrap_or_else(|e| e.into_inner());
|
||||
assert_eq!(s.read_iops, 1);
|
||||
assert_eq!(s.read_bytes, 1024);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_record_write_recovers_from_poisoned_lock() {
|
||||
let stats = Arc::new(Mutex::new(IoStats::default()));
|
||||
let store = IoTrackingStore {
|
||||
target: Arc::new(object_store::memory::InMemory::new()),
|
||||
stats: stats.clone(),
|
||||
};
|
||||
|
||||
poison_stats(&stats);
|
||||
|
||||
// record_write should not panic
|
||||
store.record_write(2048);
|
||||
|
||||
let s = stats.lock().unwrap_or_else(|e| e.into_inner());
|
||||
assert_eq!(s.write_iops, 1);
|
||||
assert_eq!(s.write_bytes, 2048);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ use std::sync::Arc;
|
||||
use std::{future::Future, time::Duration};
|
||||
|
||||
use arrow::compute::concat_batches;
|
||||
use arrow_array::{Array, Float16Array, Float32Array, Float64Array, make_array};
|
||||
use arrow_array::{Array, Float16Array, Float32Array, Float64Array, RecordBatch, make_array};
|
||||
use arrow_schema::{DataType, SchemaRef};
|
||||
use datafusion_expr::Expr;
|
||||
use datafusion_physical_plan::ExecutionPlan;
|
||||
@@ -17,15 +17,17 @@ use lance_datafusion::exec::execute_plan;
|
||||
use lance_index::scalar::FullTextSearchQuery;
|
||||
use lance_index::scalar::inverted::SCORE_COL;
|
||||
use lance_index::vector::DIST_COL;
|
||||
use lance_io::stream::RecordBatchStreamAdapter;
|
||||
|
||||
use crate::DistanceType;
|
||||
use crate::error::{Error, Result};
|
||||
use crate::rerankers::rrf::RRFReranker;
|
||||
use crate::rerankers::{NormalizeMethod, Reranker, check_reranker_result};
|
||||
use crate::table::BaseTable;
|
||||
use crate::utils::TimeoutStream;
|
||||
use crate::{arrow::SendableRecordBatchStream, table::AnyQuery};
|
||||
use crate::utils::{MaxBatchLengthStream, TimeoutStream};
|
||||
use crate::{
|
||||
arrow::{SendableRecordBatchStream, SimpleRecordBatchStream},
|
||||
table::AnyQuery,
|
||||
};
|
||||
|
||||
mod hybrid;
|
||||
|
||||
@@ -604,6 +606,14 @@ impl Default for QueryExecutionOptions {
|
||||
}
|
||||
}
|
||||
|
||||
impl QueryExecutionOptions {
|
||||
fn without_output_batch_length_limit(&self) -> Self {
|
||||
let mut options = self.clone();
|
||||
options.max_batch_length = 0;
|
||||
options
|
||||
}
|
||||
}
|
||||
|
||||
/// A trait for a query object that can be executed to get results
|
||||
///
|
||||
/// There are various kinds of queries but they all return results
|
||||
@@ -1180,6 +1190,8 @@ impl VectorQuery {
|
||||
&self,
|
||||
options: QueryExecutionOptions,
|
||||
) -> Result<SendableRecordBatchStream> {
|
||||
let max_batch_length = options.max_batch_length as usize;
|
||||
let internal_options = options.without_output_batch_length_limit();
|
||||
// clone query and specify we want to include row IDs, which can be needed for reranking
|
||||
let mut fts_query = Query::new(self.parent.clone());
|
||||
fts_query.request = self.request.base.clone();
|
||||
@@ -1189,8 +1201,8 @@ impl VectorQuery {
|
||||
|
||||
vector_query.request.base.full_text_search = None;
|
||||
let (fts_results, vec_results) = try_join!(
|
||||
fts_query.execute_with_options(options.clone()),
|
||||
vector_query.inner_execute_with_options(options)
|
||||
fts_query.execute_with_options(internal_options.clone()),
|
||||
vector_query.inner_execute_with_options(internal_options)
|
||||
)?;
|
||||
|
||||
let (fts_results, vec_results) = try_join!(
|
||||
@@ -1245,9 +1257,7 @@ impl VectorQuery {
|
||||
results = results.drop_column(ROW_ID)?;
|
||||
}
|
||||
|
||||
Ok(SendableRecordBatchStream::from(
|
||||
RecordBatchStreamAdapter::new(results.schema(), stream::iter([Ok(results)])),
|
||||
))
|
||||
Ok(single_batch_stream(results, max_batch_length))
|
||||
}
|
||||
|
||||
async fn inner_execute_with_options(
|
||||
@@ -1256,6 +1266,7 @@ impl VectorQuery {
|
||||
) -> Result<SendableRecordBatchStream> {
|
||||
let plan = self.create_plan(options.clone()).await?;
|
||||
let inner = execute_plan(plan, Default::default())?;
|
||||
let inner = MaxBatchLengthStream::new_boxed(inner, options.max_batch_length as usize);
|
||||
let inner = if let Some(timeout) = options.timeout {
|
||||
TimeoutStream::new_boxed(inner, timeout)
|
||||
} else {
|
||||
@@ -1265,6 +1276,25 @@ impl VectorQuery {
|
||||
}
|
||||
}
|
||||
|
||||
fn single_batch_stream(batch: RecordBatch, max_batch_length: usize) -> SendableRecordBatchStream {
|
||||
let schema = batch.schema();
|
||||
if max_batch_length == 0 || batch.num_rows() <= max_batch_length {
|
||||
return Box::pin(SimpleRecordBatchStream::new(
|
||||
stream::iter([Ok(batch)]),
|
||||
schema,
|
||||
));
|
||||
}
|
||||
|
||||
let mut batches = Vec::with_capacity(batch.num_rows().div_ceil(max_batch_length));
|
||||
let mut offset = 0;
|
||||
while offset < batch.num_rows() {
|
||||
let length = (batch.num_rows() - offset).min(max_batch_length);
|
||||
batches.push(Ok(batch.slice(offset, length)));
|
||||
offset += length;
|
||||
}
|
||||
Box::pin(SimpleRecordBatchStream::new(stream::iter(batches), schema))
|
||||
}
|
||||
|
||||
impl ExecutableQuery for VectorQuery {
|
||||
async fn create_plan(&self, options: QueryExecutionOptions) -> Result<Arc<dyn ExecutionPlan>> {
|
||||
let query = AnyQuery::VectorQuery(self.request.clone());
|
||||
@@ -1753,6 +1783,50 @@ mod tests {
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
async fn make_large_vector_table(tmp_dir: &tempfile::TempDir, rows: usize) -> Table {
|
||||
let dataset_path = tmp_dir.path().join("large_test.lance");
|
||||
let uri = dataset_path.to_str().unwrap();
|
||||
|
||||
let schema = Arc::new(ArrowSchema::new(vec![
|
||||
ArrowField::new("id", DataType::Utf8, false),
|
||||
ArrowField::new(
|
||||
"vector",
|
||||
DataType::FixedSizeList(
|
||||
Arc::new(ArrowField::new("item", DataType::Float32, true)),
|
||||
4,
|
||||
),
|
||||
false,
|
||||
),
|
||||
]));
|
||||
|
||||
let ids = StringArray::from_iter_values((0..rows).map(|i| format!("row-{i}")));
|
||||
let vectors = FixedSizeListArray::from_iter_primitive::<Float32Type, _, _>(
|
||||
(0..rows).map(|i| Some(vec![Some(i as f32), Some(1.0), Some(2.0), Some(3.0)])),
|
||||
4,
|
||||
);
|
||||
let batch =
|
||||
RecordBatch::try_new(schema.clone(), vec![Arc::new(ids), Arc::new(vectors)]).unwrap();
|
||||
|
||||
let conn = connect(uri).execute().await.unwrap();
|
||||
conn.create_table("my_table", vec![batch])
|
||||
.execute()
|
||||
.await
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
async fn assert_stream_batches_at_most(
|
||||
mut results: SendableRecordBatchStream,
|
||||
max_batch_length: usize,
|
||||
) {
|
||||
let mut saw_batch = false;
|
||||
while let Some(batch) = results.next().await {
|
||||
let batch = batch.unwrap();
|
||||
saw_batch = true;
|
||||
assert!(batch.num_rows() <= max_batch_length);
|
||||
}
|
||||
assert!(saw_batch);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_execute_with_options() {
|
||||
let tmp_dir = tempdir().unwrap();
|
||||
@@ -1772,6 +1846,83 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_vector_query_execute_with_options_respects_max_batch_length() {
|
||||
let tmp_dir = tempdir().unwrap();
|
||||
let table = make_large_vector_table(&tmp_dir, 10_000).await;
|
||||
|
||||
let results = table
|
||||
.query()
|
||||
.nearest_to(vec![0.0, 1.0, 2.0, 3.0])
|
||||
.unwrap()
|
||||
.limit(10_000)
|
||||
.execute_with_options(QueryExecutionOptions {
|
||||
max_batch_length: 100,
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
assert_stream_batches_at_most(results, 100).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_hybrid_query_execute_with_options_respects_max_batch_length() {
|
||||
let tmp_dir = tempdir().unwrap();
|
||||
let dataset_path = tmp_dir.path();
|
||||
let conn = connect(dataset_path.to_str().unwrap())
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let dims = 2;
|
||||
let rows = 512;
|
||||
let schema = Arc::new(ArrowSchema::new(vec![
|
||||
ArrowField::new("text", DataType::Utf8, false),
|
||||
ArrowField::new(
|
||||
"vector",
|
||||
DataType::FixedSizeList(
|
||||
Arc::new(ArrowField::new("item", DataType::Float32, true)),
|
||||
dims,
|
||||
),
|
||||
false,
|
||||
),
|
||||
]));
|
||||
|
||||
let text = StringArray::from_iter_values((0..rows).map(|_| "match"));
|
||||
let vectors = FixedSizeListArray::from_iter_primitive::<Float32Type, _, _>(
|
||||
(0..rows).map(|i| Some(vec![Some(i as f32), Some(0.0)])),
|
||||
dims,
|
||||
);
|
||||
let record_batch =
|
||||
RecordBatch::try_new(schema.clone(), vec![Arc::new(text), Arc::new(vectors)]).unwrap();
|
||||
let table = conn
|
||||
.create_table("my_table", record_batch)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
table
|
||||
.create_index(&["text"], crate::index::Index::FTS(Default::default()))
|
||||
.replace(true)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let results = table
|
||||
.query()
|
||||
.full_text_search(FullTextSearchQuery::new("match".to_string()))
|
||||
.limit(rows)
|
||||
.nearest_to(&[0.0, 0.0])
|
||||
.unwrap()
|
||||
.execute_with_options(QueryExecutionOptions {
|
||||
max_batch_length: 100,
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
assert_stream_batches_at_most(results, 100).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_analyze_plan() {
|
||||
let tmp_dir = tempdir().unwrap();
|
||||
|
||||
@@ -443,23 +443,13 @@ impl<S: HttpSend> RestfulLanceDbClient<S> {
|
||||
})?,
|
||||
);
|
||||
}
|
||||
// Map azure storage options to x-azure-* headers.
|
||||
// The option key uses underscores (e.g. "azure_client_id") while the
|
||||
// header uses hyphens (e.g. "x-azure-client-id").
|
||||
let azure_opts: [(&str, &str); 3] = [
|
||||
("azure_storage_account_name", "x-azure-storage-account-name"),
|
||||
("azure_client_id", "x-azure-client-id"),
|
||||
("azure_tenant_id", "x-azure-tenant-id"),
|
||||
];
|
||||
for (opt_key, header_name) in azure_opts {
|
||||
if let Some(v) = options.0.get(opt_key) {
|
||||
headers.insert(
|
||||
HeaderName::from_static(header_name),
|
||||
HeaderValue::from_str(v).map_err(|_| Error::InvalidInput {
|
||||
message: format!("non-ascii value '{}' for option '{}'", v, opt_key),
|
||||
})?,
|
||||
);
|
||||
}
|
||||
if let Some(v) = options.0.get("azure_storage_account_name") {
|
||||
headers.insert(
|
||||
HeaderName::from_static("x-azure-storage-account-name"),
|
||||
HeaderValue::from_str(v).map_err(|_| Error::InvalidInput {
|
||||
message: format!("non-ascii storage account name '{}' provided", db_name),
|
||||
})?,
|
||||
);
|
||||
}
|
||||
|
||||
for (key, value) in &config.extra_headers {
|
||||
@@ -1082,34 +1072,4 @@ mod tests {
|
||||
_ => panic!("Expected Runtime error"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_default_headers_azure_opts() {
|
||||
let mut opts = HashMap::new();
|
||||
opts.insert(
|
||||
"azure_storage_account_name".to_string(),
|
||||
"myaccount".to_string(),
|
||||
);
|
||||
opts.insert("azure_client_id".to_string(), "my-client-id".to_string());
|
||||
opts.insert("azure_tenant_id".to_string(), "my-tenant-id".to_string());
|
||||
let remote_opts = RemoteOptions::new(opts);
|
||||
|
||||
let headers = RestfulLanceDbClient::<Sender>::default_headers(
|
||||
"test-key",
|
||||
"us-east-1",
|
||||
"testdb",
|
||||
false,
|
||||
&remote_opts,
|
||||
None,
|
||||
&ClientConfig::default(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(
|
||||
headers.get("x-azure-storage-account-name").unwrap(),
|
||||
"myaccount"
|
||||
);
|
||||
assert_eq!(headers.get("x-azure-client-id").unwrap(), "my-client-id");
|
||||
assert_eq!(headers.get("x-azure-tenant-id").unwrap(), "my-tenant-id");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -362,9 +362,9 @@ impl<S: HttpSend> Database for RemoteDatabase<S> {
|
||||
}
|
||||
|
||||
async fn table_names(&self, request: TableNamesRequest) -> Result<Vec<String>> {
|
||||
let mut req = if !request.namespace.is_empty() {
|
||||
let mut req = if !request.namespace_path.is_empty() {
|
||||
let namespace_id =
|
||||
build_namespace_identifier(&request.namespace, &self.client.id_delimiter);
|
||||
build_namespace_identifier(&request.namespace_path, &self.client.id_delimiter);
|
||||
self.client
|
||||
.get(&format!("/v1/namespace/{}/table/list", namespace_id))
|
||||
} else {
|
||||
@@ -387,12 +387,12 @@ impl<S: HttpSend> Database for RemoteDatabase<S> {
|
||||
.tables;
|
||||
for table in &tables {
|
||||
let table_identifier =
|
||||
build_table_identifier(table, &request.namespace, &self.client.id_delimiter);
|
||||
let cache_key = build_cache_key(table, &request.namespace);
|
||||
build_table_identifier(table, &request.namespace_path, &self.client.id_delimiter);
|
||||
let cache_key = build_cache_key(table, &request.namespace_path);
|
||||
let remote_table = Arc::new(RemoteTable::new(
|
||||
self.client.clone(),
|
||||
table.clone(),
|
||||
request.namespace.clone(),
|
||||
request.namespace_path.clone(),
|
||||
table_identifier.clone(),
|
||||
version.clone(),
|
||||
));
|
||||
@@ -442,8 +442,11 @@ impl<S: HttpSend> Database for RemoteDatabase<S> {
|
||||
async fn create_table(&self, mut request: CreateTableRequest) -> Result<Arc<dyn BaseTable>> {
|
||||
let body = stream_as_body(request.data.scan_as_stream())?;
|
||||
|
||||
let identifier =
|
||||
build_table_identifier(&request.name, &request.namespace, &self.client.id_delimiter);
|
||||
let identifier = build_table_identifier(
|
||||
&request.name,
|
||||
&request.namespace_path,
|
||||
&self.client.id_delimiter,
|
||||
);
|
||||
let req = self
|
||||
.client
|
||||
.post(&format!("/v1/table/{}/create/", identifier))
|
||||
@@ -463,7 +466,7 @@ impl<S: HttpSend> Database for RemoteDatabase<S> {
|
||||
CreateTableMode::ExistOk(callback) => {
|
||||
let req = OpenTableRequest {
|
||||
name: request.name.clone(),
|
||||
namespace: request.namespace.clone(),
|
||||
namespace_path: request.namespace_path.clone(),
|
||||
index_cache_size: None,
|
||||
lance_read_params: None,
|
||||
location: None,
|
||||
@@ -495,13 +498,16 @@ impl<S: HttpSend> Database for RemoteDatabase<S> {
|
||||
}
|
||||
let rsp = self.client.check_response(&request_id, rsp).await?;
|
||||
let version = parse_server_version(&request_id, &rsp)?;
|
||||
let table_identifier =
|
||||
build_table_identifier(&request.name, &request.namespace, &self.client.id_delimiter);
|
||||
let cache_key = build_cache_key(&request.name, &request.namespace);
|
||||
let table_identifier = build_table_identifier(
|
||||
&request.name,
|
||||
&request.namespace_path,
|
||||
&self.client.id_delimiter,
|
||||
);
|
||||
let cache_key = build_cache_key(&request.name, &request.namespace_path);
|
||||
let table = Arc::new(RemoteTable::new(
|
||||
self.client.clone(),
|
||||
request.name.clone(),
|
||||
request.namespace.clone(),
|
||||
request.namespace_path.clone(),
|
||||
table_identifier,
|
||||
version,
|
||||
));
|
||||
@@ -513,7 +519,7 @@ impl<S: HttpSend> Database for RemoteDatabase<S> {
|
||||
async fn clone_table(&self, request: CloneTableRequest) -> Result<Arc<dyn BaseTable>> {
|
||||
let table_identifier = build_table_identifier(
|
||||
&request.target_table_name,
|
||||
&request.target_namespace,
|
||||
&request.target_namespace_path,
|
||||
&self.client.id_delimiter,
|
||||
);
|
||||
|
||||
@@ -542,11 +548,11 @@ impl<S: HttpSend> Database for RemoteDatabase<S> {
|
||||
}
|
||||
|
||||
let version = parse_server_version(&request_id, &rsp)?;
|
||||
let cache_key = build_cache_key(&request.target_table_name, &request.target_namespace);
|
||||
let cache_key = build_cache_key(&request.target_table_name, &request.target_namespace_path);
|
||||
let table = Arc::new(RemoteTable::new(
|
||||
self.client.clone(),
|
||||
request.target_table_name.clone(),
|
||||
request.target_namespace.clone(),
|
||||
request.target_namespace_path.clone(),
|
||||
table_identifier,
|
||||
version,
|
||||
));
|
||||
@@ -556,9 +562,12 @@ impl<S: HttpSend> Database for RemoteDatabase<S> {
|
||||
}
|
||||
|
||||
async fn open_table(&self, request: OpenTableRequest) -> Result<Arc<dyn BaseTable>> {
|
||||
let identifier =
|
||||
build_table_identifier(&request.name, &request.namespace, &self.client.id_delimiter);
|
||||
let cache_key = build_cache_key(&request.name, &request.namespace);
|
||||
let identifier = build_table_identifier(
|
||||
&request.name,
|
||||
&request.namespace_path,
|
||||
&self.client.id_delimiter,
|
||||
);
|
||||
let cache_key = build_cache_key(&request.name, &request.namespace_path);
|
||||
|
||||
// We describe the table to confirm it exists before moving on.
|
||||
if let Some(table) = self.table_cache.get(&cache_key).await {
|
||||
@@ -574,17 +583,17 @@ impl<S: HttpSend> Database for RemoteDatabase<S> {
|
||||
let version = parse_server_version(&request_id, &rsp)?;
|
||||
let table_identifier = build_table_identifier(
|
||||
&request.name,
|
||||
&request.namespace,
|
||||
&request.namespace_path,
|
||||
&self.client.id_delimiter,
|
||||
);
|
||||
let table = Arc::new(RemoteTable::new(
|
||||
self.client.clone(),
|
||||
request.name.clone(),
|
||||
request.namespace.clone(),
|
||||
request.namespace_path.clone(),
|
||||
table_identifier,
|
||||
version,
|
||||
));
|
||||
let cache_key = build_cache_key(&request.name, &request.namespace);
|
||||
let cache_key = build_cache_key(&request.name, &request.namespace_path);
|
||||
self.table_cache.insert(cache_key, table.clone()).await;
|
||||
Ok(table)
|
||||
}
|
||||
@@ -594,18 +603,18 @@ impl<S: HttpSend> Database for RemoteDatabase<S> {
|
||||
&self,
|
||||
current_name: &str,
|
||||
new_name: &str,
|
||||
cur_namespace: &[String],
|
||||
new_namespace: &[String],
|
||||
cur_namespace_path: &[String],
|
||||
new_namespace_path: &[String],
|
||||
) -> Result<()> {
|
||||
let current_identifier =
|
||||
build_table_identifier(current_name, cur_namespace, &self.client.id_delimiter);
|
||||
let current_cache_key = build_cache_key(current_name, cur_namespace);
|
||||
let new_cache_key = build_cache_key(new_name, new_namespace);
|
||||
build_table_identifier(current_name, cur_namespace_path, &self.client.id_delimiter);
|
||||
let current_cache_key = build_cache_key(current_name, cur_namespace_path);
|
||||
let new_cache_key = build_cache_key(new_name, new_namespace_path);
|
||||
|
||||
let mut body = serde_json::json!({ "new_table_name": new_name });
|
||||
if !new_namespace.is_empty() {
|
||||
if !new_namespace_path.is_empty() {
|
||||
body["new_namespace"] = serde_json::Value::Array(
|
||||
new_namespace
|
||||
new_namespace_path
|
||||
.iter()
|
||||
.map(|s| serde_json::Value::String(s.clone()))
|
||||
.collect(),
|
||||
@@ -624,9 +633,9 @@ impl<S: HttpSend> Database for RemoteDatabase<S> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn drop_table(&self, name: &str, namespace: &[String]) -> Result<()> {
|
||||
let identifier = build_table_identifier(name, namespace, &self.client.id_delimiter);
|
||||
let cache_key = build_cache_key(name, namespace);
|
||||
async fn drop_table(&self, name: &str, namespace_path: &[String]) -> Result<()> {
|
||||
let identifier = build_table_identifier(name, namespace_path, &self.client.id_delimiter);
|
||||
let cache_key = build_cache_key(name, namespace_path);
|
||||
let req = self.client.post(&format!("/v1/table/{}/drop/", identifier));
|
||||
let (request_id, resp) = self.client.send(req).await?;
|
||||
self.client.check_response(&request_id, resp).await?;
|
||||
@@ -634,9 +643,9 @@ impl<S: HttpSend> Database for RemoteDatabase<S> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn drop_all_tables(&self, namespace: &[String]) -> Result<()> {
|
||||
async fn drop_all_tables(&self, namespace_path: &[String]) -> Result<()> {
|
||||
// TODO: Implement namespace-aware drop_all_tables
|
||||
let _namespace = namespace; // Suppress unused warning for now
|
||||
let _namespace_path = namespace_path; // Suppress unused warning for now
|
||||
Err(crate::Error::NotSupported {
|
||||
message: "Dropping all tables is not currently supported in the remote API".to_string(),
|
||||
})
|
||||
@@ -782,12 +791,7 @@ impl RemoteOptions {
|
||||
|
||||
impl From<StorageOptions> for RemoteOptions {
|
||||
fn from(options: StorageOptions) -> Self {
|
||||
let supported_opts = vec![
|
||||
"account_name",
|
||||
"azure_storage_account_name",
|
||||
"azure_client_id",
|
||||
"azure_tenant_id",
|
||||
];
|
||||
let supported_opts = vec!["account_name", "azure_storage_account_name"];
|
||||
let mut filtered = HashMap::new();
|
||||
for opt in supported_opts {
|
||||
if let Some(v) = options.0.get(opt) {
|
||||
|
||||
@@ -5,6 +5,7 @@ pub mod insert;
|
||||
|
||||
use self::insert::RemoteInsertExec;
|
||||
use crate::expr::expr_to_sql_string;
|
||||
use crate::table::write_progress::FinishOnDrop;
|
||||
|
||||
use super::ARROW_STREAM_CONTENT_TYPE;
|
||||
use super::client::RequestResultExt;
|
||||
@@ -939,12 +940,15 @@ impl<S: HttpSend + 'static> RemoteTable<S> {
|
||||
async fn add_single_partition(&self, output: PreprocessingOutput) -> Result<AddResult> {
|
||||
use crate::remote::retry::RetryCounter;
|
||||
|
||||
let _guard = output.tracker.as_ref().map(|t| t.track_task());
|
||||
|
||||
let mut insert: Arc<dyn ExecutionPlan> = Arc::new(RemoteInsertExec::new(
|
||||
self.name.clone(),
|
||||
self.identifier.clone(),
|
||||
self.client.clone(),
|
||||
output.plan,
|
||||
output.overwrite,
|
||||
output.tracker.clone(),
|
||||
));
|
||||
|
||||
let mut retry_counter =
|
||||
@@ -1045,6 +1049,11 @@ impl<S: HttpSend + 'static> RemoteTable<S> {
|
||||
output: &PreprocessingOutput,
|
||||
num_partitions: usize,
|
||||
) -> Result<()> {
|
||||
debug_assert!(
|
||||
output.rescannable,
|
||||
"multipart inserts require rescannable input for retry support"
|
||||
);
|
||||
|
||||
let plan = Arc::new(
|
||||
datafusion_physical_plan::repartition::RepartitionExec::try_new(
|
||||
output.plan.clone(),
|
||||
@@ -1059,14 +1068,18 @@ impl<S: HttpSend + 'static> RemoteTable<S> {
|
||||
plan,
|
||||
output.overwrite,
|
||||
upload_id.to_string(),
|
||||
output.tracker.clone(),
|
||||
));
|
||||
|
||||
let task_ctx = Arc::new(datafusion_execution::TaskContext::default());
|
||||
let tracker = output.tracker.clone();
|
||||
let mut join_set = tokio::task::JoinSet::new();
|
||||
for partition in 0..num_partitions {
|
||||
let exec = insert.clone();
|
||||
let ctx = task_ctx.clone();
|
||||
let tracker = tracker.clone();
|
||||
join_set.spawn(async move {
|
||||
let _guard = tracker.as_ref().map(|t| t.track_task());
|
||||
let mut stream = exec
|
||||
.execute(partition, ctx)
|
||||
.map_err(|e| -> Error { e.into() })?;
|
||||
@@ -1273,6 +1286,11 @@ impl<S: HttpSend> BaseTable for RemoteTable<S> {
|
||||
|
||||
let output = add.into_plan(&table_schema, &table_def)?;
|
||||
|
||||
if let Some(ref t) = output.tracker {
|
||||
t.set_total_tasks(num_partitions);
|
||||
}
|
||||
let _finish = FinishOnDrop(output.tracker.clone());
|
||||
|
||||
if num_partitions > 1 {
|
||||
self.add_multipart(output, num_partitions).await
|
||||
} else {
|
||||
@@ -1975,6 +1993,7 @@ impl<S: HttpSend> BaseTable for RemoteTable<S> {
|
||||
self.client.clone(),
|
||||
input,
|
||||
overwrite,
|
||||
None,
|
||||
)))
|
||||
}
|
||||
}
|
||||
@@ -5170,6 +5189,77 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_multipart_write_progress() {
|
||||
let callback_count = Arc::new(AtomicUsize::new(0));
|
||||
let max_active = Arc::new(AtomicUsize::new(0));
|
||||
let last_total_tasks = Arc::new(AtomicUsize::new(0));
|
||||
let seen_done = Arc::new(std::sync::Mutex::new(false));
|
||||
|
||||
let cb_count = callback_count.clone();
|
||||
let cb_active = max_active.clone();
|
||||
let cb_total = last_total_tasks.clone();
|
||||
let cb_done = seen_done.clone();
|
||||
|
||||
let table = Table::new_with_handler_version(
|
||||
"my_table",
|
||||
semver::Version::new(0, 4, 0),
|
||||
move |request| {
|
||||
let path = request.url().path();
|
||||
|
||||
if path == "/v1/table/my_table/describe/" {
|
||||
return simple_describe_response();
|
||||
}
|
||||
if path == "/v1/table/my_table/multipart_write/create" {
|
||||
return http::Response::builder()
|
||||
.status(200)
|
||||
.body(r#"{"upload_id": "prog-upload"}"#.to_string())
|
||||
.unwrap();
|
||||
}
|
||||
if path == "/v1/table/my_table/insert/" {
|
||||
return http::Response::builder()
|
||||
.status(200)
|
||||
.body(r#"{"version": 1}"#.to_string())
|
||||
.unwrap();
|
||||
}
|
||||
if path == "/v1/table/my_table/multipart_write/complete" {
|
||||
return http::Response::builder()
|
||||
.status(200)
|
||||
.body(r#"{"version": 3}"#.to_string())
|
||||
.unwrap();
|
||||
}
|
||||
panic!("Unexpected request path: {}", path);
|
||||
},
|
||||
);
|
||||
|
||||
let batch = record_batch!(("id", Int32, [1, 2, 3])).unwrap();
|
||||
table
|
||||
.add(vec![batch])
|
||||
.write_parallelism(2)
|
||||
.progress(move |p| {
|
||||
cb_count.fetch_add(1, Ordering::SeqCst);
|
||||
cb_active.fetch_max(p.active_tasks(), Ordering::SeqCst);
|
||||
cb_total.store(p.total_tasks(), Ordering::SeqCst);
|
||||
if p.done() {
|
||||
*cb_done.lock().unwrap() = true;
|
||||
}
|
||||
})
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert!(
|
||||
callback_count.load(Ordering::SeqCst) >= 1,
|
||||
"expected at least one progress callback"
|
||||
);
|
||||
assert!(*seen_done.lock().unwrap(), "must see done=true");
|
||||
assert_eq!(last_total_tasks.load(Ordering::SeqCst), 2);
|
||||
assert!(
|
||||
max_active.load(Ordering::SeqCst) >= 1,
|
||||
"expected at least one active task"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_multipart_write_fallback_old_server() {
|
||||
let insert_count = Arc::new(AtomicUsize::new(0));
|
||||
|
||||
@@ -11,12 +11,14 @@ use arrow_ipc::CompressionType;
|
||||
use datafusion_common::{DataFusionError, Result as DataFusionResult};
|
||||
use datafusion_execution::{SendableRecordBatchStream, TaskContext};
|
||||
use datafusion_physical_expr::EquivalenceProperties;
|
||||
use datafusion_physical_plan::metrics::{ExecutionPlanMetricsSet, MetricsSet};
|
||||
use datafusion_physical_plan::stream::RecordBatchStreamAdapter;
|
||||
use datafusion_physical_plan::{
|
||||
DisplayAs, DisplayFormatType, ExecutionPlan, ExecutionPlanProperties, PlanProperties,
|
||||
};
|
||||
use futures::StreamExt;
|
||||
use http::header::CONTENT_TYPE;
|
||||
use lance::io::exec::utils::InstrumentedRecordBatchStreamAdapter;
|
||||
|
||||
use crate::Error;
|
||||
use crate::remote::ARROW_STREAM_CONTENT_TYPE;
|
||||
@@ -24,6 +26,7 @@ use crate::remote::client::{HttpSend, RestfulLanceDbClient, Sender};
|
||||
use crate::remote::table::RemoteTable;
|
||||
use crate::table::AddResult;
|
||||
use crate::table::datafusion::insert::COUNT_SCHEMA;
|
||||
use crate::table::write_progress::WriteProgressTracker;
|
||||
|
||||
/// ExecutionPlan for inserting data into a remote LanceDB table.
|
||||
///
|
||||
@@ -42,7 +45,9 @@ pub struct RemoteInsertExec<S: HttpSend = Sender> {
|
||||
overwrite: bool,
|
||||
properties: PlanProperties,
|
||||
add_result: Arc<Mutex<Option<AddResult>>>,
|
||||
metrics: ExecutionPlanMetricsSet,
|
||||
upload_id: Option<String>,
|
||||
tracker: Option<Arc<WriteProgressTracker>>,
|
||||
}
|
||||
|
||||
impl<S: HttpSend + 'static> RemoteInsertExec<S> {
|
||||
@@ -53,8 +58,11 @@ impl<S: HttpSend + 'static> RemoteInsertExec<S> {
|
||||
client: RestfulLanceDbClient<S>,
|
||||
input: Arc<dyn ExecutionPlan>,
|
||||
overwrite: bool,
|
||||
tracker: Option<Arc<WriteProgressTracker>>,
|
||||
) -> Self {
|
||||
Self::new_inner(table_name, identifier, client, input, overwrite, None)
|
||||
Self::new_inner(
|
||||
table_name, identifier, client, input, overwrite, None, tracker,
|
||||
)
|
||||
}
|
||||
|
||||
/// Create a multi-partition RemoteInsertExec for use with multipart writes.
|
||||
@@ -69,6 +77,7 @@ impl<S: HttpSend + 'static> RemoteInsertExec<S> {
|
||||
input: Arc<dyn ExecutionPlan>,
|
||||
overwrite: bool,
|
||||
upload_id: String,
|
||||
tracker: Option<Arc<WriteProgressTracker>>,
|
||||
) -> Self {
|
||||
Self::new_inner(
|
||||
table_name,
|
||||
@@ -77,6 +86,7 @@ impl<S: HttpSend + 'static> RemoteInsertExec<S> {
|
||||
input,
|
||||
overwrite,
|
||||
Some(upload_id),
|
||||
tracker,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -87,6 +97,7 @@ impl<S: HttpSend + 'static> RemoteInsertExec<S> {
|
||||
input: Arc<dyn ExecutionPlan>,
|
||||
overwrite: bool,
|
||||
upload_id: Option<String>,
|
||||
tracker: Option<Arc<WriteProgressTracker>>,
|
||||
) -> Self {
|
||||
let num_partitions = if upload_id.is_some() {
|
||||
input.output_partitioning().partition_count()
|
||||
@@ -109,7 +120,9 @@ impl<S: HttpSend + 'static> RemoteInsertExec<S> {
|
||||
overwrite,
|
||||
properties,
|
||||
add_result: Arc::new(Mutex::new(None)),
|
||||
metrics: ExecutionPlanMetricsSet::new(),
|
||||
upload_id,
|
||||
tracker,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -117,7 +130,10 @@ impl<S: HttpSend + 'static> RemoteInsertExec<S> {
|
||||
// TODO: this will be used when we wire this up to Table::add().
|
||||
#[allow(dead_code)]
|
||||
pub fn add_result(&self) -> Option<AddResult> {
|
||||
self.add_result.lock().unwrap().clone()
|
||||
self.add_result
|
||||
.lock()
|
||||
.unwrap_or_else(|e| e.into_inner())
|
||||
.clone()
|
||||
}
|
||||
|
||||
/// Stream the input into an HTTP body as an Arrow IPC stream, capturing any
|
||||
@@ -128,6 +144,7 @@ impl<S: HttpSend + 'static> RemoteInsertExec<S> {
|
||||
fn stream_as_http_body(
|
||||
data: SendableRecordBatchStream,
|
||||
error_tx: tokio::sync::oneshot::Sender<DataFusionError>,
|
||||
tracker: Option<Arc<WriteProgressTracker>>,
|
||||
) -> DataFusionResult<reqwest::Body> {
|
||||
let options = arrow_ipc::writer::IpcWriteOptions::default()
|
||||
.try_with_compression(Some(CompressionType::LZ4_FRAME))?;
|
||||
@@ -139,37 +156,46 @@ impl<S: HttpSend + 'static> RemoteInsertExec<S> {
|
||||
|
||||
let stream = futures::stream::try_unfold(
|
||||
(data, writer, Some(error_tx), false),
|
||||
move |(mut data, mut writer, error_tx, finished)| async move {
|
||||
if finished {
|
||||
return Ok(None);
|
||||
}
|
||||
match data.next().await {
|
||||
Some(Ok(batch)) => {
|
||||
writer
|
||||
.write(&batch)
|
||||
.map_err(|e| std::io::Error::other(e.to_string()))?;
|
||||
let buffer = std::mem::take(writer.get_mut());
|
||||
Ok(Some((buffer, (data, writer, error_tx, false))))
|
||||
move |(mut data, mut writer, error_tx, finished)| {
|
||||
let tracker = tracker.clone();
|
||||
async move {
|
||||
if finished {
|
||||
return Ok(None);
|
||||
}
|
||||
Some(Err(e)) => {
|
||||
// Send the original error through the channel before
|
||||
// returning a generic error to reqwest.
|
||||
if let Some(tx) = error_tx {
|
||||
let _ = tx.send(e);
|
||||
match data.next().await {
|
||||
Some(Ok(batch)) => {
|
||||
writer
|
||||
.write(&batch)
|
||||
.map_err(|e| std::io::Error::other(e.to_string()))?;
|
||||
let buffer = std::mem::take(writer.get_mut());
|
||||
if let Some(ref t) = tracker {
|
||||
t.record_bytes(buffer.len());
|
||||
}
|
||||
Ok(Some((buffer, (data, writer, error_tx, false))))
|
||||
}
|
||||
Err(std::io::Error::other(
|
||||
"input stream error (see error channel)",
|
||||
))
|
||||
}
|
||||
None => {
|
||||
writer
|
||||
.finish()
|
||||
.map_err(|e| std::io::Error::other(e.to_string()))?;
|
||||
let buffer = std::mem::take(writer.get_mut());
|
||||
if buffer.is_empty() {
|
||||
Ok(None)
|
||||
} else {
|
||||
Ok(Some((buffer, (data, writer, None, true))))
|
||||
Some(Err(e)) => {
|
||||
// Send the original error through the channel before
|
||||
// returning a generic error to reqwest.
|
||||
if let Some(tx) = error_tx {
|
||||
let _ = tx.send(e);
|
||||
}
|
||||
Err(std::io::Error::other(
|
||||
"input stream error (see error channel)",
|
||||
))
|
||||
}
|
||||
None => {
|
||||
writer
|
||||
.finish()
|
||||
.map_err(|e| std::io::Error::other(e.to_string()))?;
|
||||
let buffer = std::mem::take(writer.get_mut());
|
||||
if buffer.is_empty() {
|
||||
Ok(None)
|
||||
} else {
|
||||
if let Some(ref t) = tracker {
|
||||
t.record_bytes(buffer.len());
|
||||
}
|
||||
Ok(Some((buffer, (data, writer, None, true))))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -246,6 +272,7 @@ impl<S: HttpSend + 'static> ExecutionPlan for RemoteInsertExec<S> {
|
||||
children[0].clone(),
|
||||
self.overwrite,
|
||||
self.upload_id.clone(),
|
||||
self.tracker.clone(),
|
||||
)))
|
||||
}
|
||||
|
||||
@@ -262,12 +289,21 @@ impl<S: HttpSend + 'static> ExecutionPlan for RemoteInsertExec<S> {
|
||||
}
|
||||
|
||||
let input_stream = self.input.execute(partition, context)?;
|
||||
let input_schema = input_stream.schema();
|
||||
let input_stream: SendableRecordBatchStream =
|
||||
Box::pin(InstrumentedRecordBatchStreamAdapter::new(
|
||||
input_schema,
|
||||
input_stream,
|
||||
partition,
|
||||
&self.metrics,
|
||||
));
|
||||
let client = self.client.clone();
|
||||
let identifier = self.identifier.clone();
|
||||
let overwrite = self.overwrite;
|
||||
let add_result = self.add_result.clone();
|
||||
let table_name = self.table_name.clone();
|
||||
let upload_id = self.upload_id.clone();
|
||||
let tracker = self.tracker.clone();
|
||||
|
||||
let stream = futures::stream::once(async move {
|
||||
let mut request = client
|
||||
@@ -282,7 +318,7 @@ impl<S: HttpSend + 'static> ExecutionPlan for RemoteInsertExec<S> {
|
||||
}
|
||||
|
||||
let (error_tx, mut error_rx) = tokio::sync::oneshot::channel();
|
||||
let body = Self::stream_as_http_body(input_stream, error_tx)?;
|
||||
let body = Self::stream_as_http_body(input_stream, error_tx, tracker)?;
|
||||
let request = request.body(body);
|
||||
|
||||
let result: DataFusionResult<(String, _)> = async {
|
||||
@@ -344,6 +380,15 @@ impl<S: HttpSend + 'static> ExecutionPlan for RemoteInsertExec<S> {
|
||||
DataFusionError::Execution("Failed to acquire lock for add_result".to_string())
|
||||
})?;
|
||||
*res_lock = Some(parsed_result);
|
||||
} else {
|
||||
// We don't use the body in this case, but we should still consume it.
|
||||
let _ = response.bytes().await.map_err(|e| {
|
||||
DataFusionError::External(Box::new(Error::Http {
|
||||
source: Box::new(e),
|
||||
request_id: request_id.clone(),
|
||||
status_code: None,
|
||||
}))
|
||||
})?;
|
||||
}
|
||||
|
||||
// Return a single batch with count 0 (actual count is tracked in add_result)
|
||||
@@ -357,6 +402,10 @@ impl<S: HttpSend + 'static> ExecutionPlan for RemoteInsertExec<S> {
|
||||
stream,
|
||||
)))
|
||||
}
|
||||
|
||||
fn metrics(&self) -> Option<MetricsSet> {
|
||||
Some(self.metrics.clone_inner())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -19,11 +19,11 @@ pub use lance::dataset::Version;
|
||||
use lance::dataset::WriteMode;
|
||||
use lance::dataset::builder::DatasetBuilder;
|
||||
use lance::dataset::{InsertBuilder, WriteParams};
|
||||
use lance::index::DatasetIndexExt;
|
||||
use lance::index::vector::VectorIndexParams;
|
||||
use lance::index::vector::utils::infer_vector_dim;
|
||||
use lance::io::{ObjectStoreParams, WrappingObjectStore};
|
||||
use lance_datafusion::utils::StreamingWriteSource;
|
||||
use lance_index::DatasetIndexExt;
|
||||
use lance_index::IndexType;
|
||||
use lance_index::scalar::{BuiltinIndexType, ScalarIndexParams};
|
||||
use lance_index::vector::bq::RQBuildParams;
|
||||
@@ -42,11 +42,13 @@ use lance_table::io::commit::CommitHandler;
|
||||
use lance_table::io::commit::ManifestNamingScheme;
|
||||
use lance_table::io::commit::external_manifest::ExternalManifestCommitHandler;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::format;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::connection::PushdownOperation;
|
||||
|
||||
use crate::data::scannable::{PeekedScannable, Scannable, estimate_write_partitions};
|
||||
use crate::database::Database;
|
||||
use crate::embeddings::{EmbeddingDefinition, EmbeddingRegistry, MemoryRegistry};
|
||||
@@ -74,7 +76,9 @@ pub mod optimize;
|
||||
pub mod query;
|
||||
pub mod schema_evolution;
|
||||
pub mod update;
|
||||
pub mod write_progress;
|
||||
use crate::index::waiter::wait_for_index;
|
||||
#[cfg(feature = "remote")]
|
||||
pub(crate) use add_data::PreprocessingOutput;
|
||||
pub use add_data::{AddDataBuilder, AddDataMode, AddResult, NaNVectorBehavior};
|
||||
pub use chrono::Duration;
|
||||
@@ -1266,10 +1270,9 @@ pub struct NativeTable {
|
||||
// Optional namespace client for namespace operations (e.g., managed versioning).
|
||||
// pub(crate) so query.rs can access the field for server-side query execution.
|
||||
pub(crate) namespace_client: Option<Arc<dyn LanceNamespace>>,
|
||||
// Whether to enable server-side query execution via the namespace client.
|
||||
// When true and namespace_client is set, queries will be executed on the
|
||||
// namespace server instead of locally.
|
||||
pub(crate) server_side_query_enabled: bool,
|
||||
// Operations to push down to the namespace server.
|
||||
// pub(crate) so query.rs can access the field for server-side query execution.
|
||||
pub(crate) pushdown_operations: HashSet<PushdownOperation>,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for NativeTable {
|
||||
@@ -1281,7 +1284,7 @@ impl std::fmt::Debug for NativeTable {
|
||||
.field("uri", &self.uri)
|
||||
.field("read_consistency_interval", &self.read_consistency_interval)
|
||||
.field("namespace_client", &self.namespace_client)
|
||||
.field("server_side_query_enabled", &self.server_side_query_enabled)
|
||||
.field("pushdown_operations", &self.pushdown_operations)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
@@ -1318,7 +1321,18 @@ impl NativeTable {
|
||||
/// * A [NativeTable] object.
|
||||
pub async fn open(uri: &str) -> Result<Self> {
|
||||
let name = Self::get_table_name(uri)?;
|
||||
Self::open_with_params(uri, &name, vec![], None, None, None, None, false, None).await
|
||||
Self::open_with_params(
|
||||
uri,
|
||||
&name,
|
||||
vec![],
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
HashSet::new(),
|
||||
None,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Opens an existing Table
|
||||
@@ -1329,7 +1343,7 @@ impl NativeTable {
|
||||
/// * `name` The Table name
|
||||
/// * `params` The [ReadParams] to use when opening the table
|
||||
/// * `namespace_client` - Optional namespace client for namespace operations
|
||||
/// * `server_side_query_enabled` - Whether to enable server-side query execution
|
||||
/// * `pushdown_operations` - Operations to push down to the namespace server
|
||||
/// * `managed_versioning` - Whether managed versioning is enabled. If None and namespace_client
|
||||
/// is provided, the value will be fetched via describe_table.
|
||||
///
|
||||
@@ -1345,7 +1359,7 @@ impl NativeTable {
|
||||
params: Option<ReadParams>,
|
||||
read_consistency_interval: Option<std::time::Duration>,
|
||||
namespace_client: Option<Arc<dyn LanceNamespace>>,
|
||||
server_side_query_enabled: bool,
|
||||
pushdown_operations: HashSet<PushdownOperation>,
|
||||
managed_versioning: Option<bool>,
|
||||
) -> Result<Self> {
|
||||
let params = params.unwrap_or_default();
|
||||
@@ -1415,7 +1429,7 @@ impl NativeTable {
|
||||
dataset,
|
||||
read_consistency_interval,
|
||||
namespace_client,
|
||||
server_side_query_enabled,
|
||||
pushdown_operations,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1441,10 +1455,8 @@ impl NativeTable {
|
||||
/// * `write_store_wrapper` - Optional wrapper for the object store on write path
|
||||
/// * `params` - Optional read parameters
|
||||
/// * `read_consistency_interval` - Optional interval for read consistency
|
||||
/// * `server_side_query_enabled` - Whether to enable server-side query execution.
|
||||
/// When true, the namespace_client will be stored and queries will be executed
|
||||
/// on the namespace server. When false, the namespace is only used for opening
|
||||
/// the table, and queries are executed locally.
|
||||
/// * `pushdown_operations` - Operations to push down to the namespace server.
|
||||
/// When `QueryTable` is included, queries will be executed on the namespace server.
|
||||
/// * `session` - Optional session for object stores and caching
|
||||
///
|
||||
/// # Returns
|
||||
@@ -1458,7 +1470,7 @@ impl NativeTable {
|
||||
write_store_wrapper: Option<Arc<dyn WrappingObjectStore>>,
|
||||
params: Option<ReadParams>,
|
||||
read_consistency_interval: Option<std::time::Duration>,
|
||||
server_side_query_enabled: bool,
|
||||
pushdown_operations: HashSet<PushdownOperation>,
|
||||
session: Option<Arc<lance::session::Session>>,
|
||||
) -> Result<Self> {
|
||||
let mut params = params.unwrap_or_default();
|
||||
@@ -1505,11 +1517,12 @@ impl NativeTable {
|
||||
let dataset = DatasetConsistencyWrapper::new_latest(dataset, read_consistency_interval);
|
||||
let id = Self::build_id(&namespace, name);
|
||||
|
||||
let stored_namespace_client = if server_side_query_enabled {
|
||||
Some(namespace_client)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let stored_namespace_client =
|
||||
if pushdown_operations.contains(&PushdownOperation::QueryTable) {
|
||||
Some(namespace_client)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(Self {
|
||||
name: name.to_string(),
|
||||
@@ -1519,7 +1532,7 @@ impl NativeTable {
|
||||
dataset,
|
||||
read_consistency_interval,
|
||||
namespace_client: stored_namespace_client,
|
||||
server_side_query_enabled,
|
||||
pushdown_operations,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1560,7 +1573,7 @@ impl NativeTable {
|
||||
/// * `batches` RecordBatch to be saved in the database.
|
||||
/// * `params` - Write parameters.
|
||||
/// * `namespace_client` - Optional namespace client for namespace operations
|
||||
/// * `server_side_query_enabled` - Whether to enable server-side query execution
|
||||
/// * `pushdown_operations` - Operations to push down to the namespace server
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
@@ -1575,7 +1588,7 @@ impl NativeTable {
|
||||
params: Option<WriteParams>,
|
||||
read_consistency_interval: Option<std::time::Duration>,
|
||||
namespace_client: Option<Arc<dyn LanceNamespace>>,
|
||||
server_side_query_enabled: bool,
|
||||
pushdown_operations: HashSet<PushdownOperation>,
|
||||
) -> Result<Self> {
|
||||
// Default params uses format v1.
|
||||
let params = params.unwrap_or(WriteParams {
|
||||
@@ -1608,7 +1621,7 @@ impl NativeTable {
|
||||
dataset: DatasetConsistencyWrapper::new_latest(dataset, read_consistency_interval),
|
||||
read_consistency_interval,
|
||||
namespace_client,
|
||||
server_side_query_enabled,
|
||||
pushdown_operations,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1622,7 +1635,7 @@ impl NativeTable {
|
||||
params: Option<WriteParams>,
|
||||
read_consistency_interval: Option<std::time::Duration>,
|
||||
namespace_client: Option<Arc<dyn LanceNamespace>>,
|
||||
server_side_query_enabled: bool,
|
||||
pushdown_operations: HashSet<PushdownOperation>,
|
||||
) -> Result<Self> {
|
||||
let data: Box<dyn Scannable> = Box::new(RecordBatch::new_empty(schema));
|
||||
Self::create(
|
||||
@@ -1634,7 +1647,7 @@ impl NativeTable {
|
||||
params,
|
||||
read_consistency_interval,
|
||||
namespace_client,
|
||||
server_side_query_enabled,
|
||||
pushdown_operations,
|
||||
)
|
||||
.await
|
||||
}
|
||||
@@ -1657,7 +1670,7 @@ impl NativeTable {
|
||||
/// * `write_store_wrapper` - Optional wrapper for the object store on write path
|
||||
/// * `params` - Optional write parameters
|
||||
/// * `read_consistency_interval` - Optional interval for read consistency
|
||||
/// * `server_side_query_enabled` - Whether to enable server-side query execution
|
||||
/// * `pushdown_operations` - Operations to push down to the namespace server
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
@@ -1672,7 +1685,7 @@ impl NativeTable {
|
||||
write_store_wrapper: Option<Arc<dyn WrappingObjectStore>>,
|
||||
params: Option<WriteParams>,
|
||||
read_consistency_interval: Option<std::time::Duration>,
|
||||
server_side_query_enabled: bool,
|
||||
pushdown_operations: HashSet<PushdownOperation>,
|
||||
session: Option<Arc<lance::session::Session>>,
|
||||
) -> Result<Self> {
|
||||
// Build table_id from namespace + name for the storage options provider
|
||||
@@ -1724,11 +1737,12 @@ impl NativeTable {
|
||||
|
||||
let id = Self::build_id(&namespace, name);
|
||||
|
||||
let stored_namespace_client = if server_side_query_enabled {
|
||||
Some(namespace_client)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let stored_namespace_client =
|
||||
if pushdown_operations.contains(&PushdownOperation::QueryTable) {
|
||||
Some(namespace_client)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(Self {
|
||||
name: name.to_string(),
|
||||
@@ -1738,7 +1752,7 @@ impl NativeTable {
|
||||
dataset: DatasetConsistencyWrapper::new_latest(dataset, read_consistency_interval),
|
||||
read_consistency_interval,
|
||||
namespace_client: stored_namespace_client,
|
||||
server_side_query_enabled,
|
||||
pushdown_operations,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -2275,13 +2289,21 @@ impl BaseTable for NativeTable {
|
||||
|
||||
let insert_exec = Arc::new(InsertExec::new(ds_wrapper.clone(), ds, plan, lance_params));
|
||||
|
||||
let tracker_for_tasks = output.tracker.clone();
|
||||
if let Some(ref t) = tracker_for_tasks {
|
||||
t.set_total_tasks(num_partitions);
|
||||
}
|
||||
let _finish = write_progress::FinishOnDrop(output.tracker);
|
||||
|
||||
// Execute all partitions in parallel.
|
||||
let task_ctx = Arc::new(TaskContext::default());
|
||||
let handles = FuturesUnordered::new();
|
||||
for partition in 0..num_partitions {
|
||||
let exec = insert_exec.clone();
|
||||
let ctx = task_ctx.clone();
|
||||
let tracker = tracker_for_tasks.clone();
|
||||
handles.push(tokio::spawn(async move {
|
||||
let _guard = tracker.as_ref().map(|t| t.track_task());
|
||||
let mut stream = exec
|
||||
.execute(partition, ctx)
|
||||
.map_err(|e| -> Error { e.into() })?;
|
||||
@@ -2741,9 +2763,19 @@ mod tests {
|
||||
vec![Ok(batch.clone())],
|
||||
batch.schema(),
|
||||
));
|
||||
let table = NativeTable::create(uri, "test", vec![], reader, None, None, None, None, false)
|
||||
.await
|
||||
.unwrap();
|
||||
let table = NativeTable::create(
|
||||
uri,
|
||||
"test",
|
||||
vec![],
|
||||
reader,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
HashSet::new(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(table.count_rows(None).await.unwrap(), 10);
|
||||
assert_eq!(
|
||||
@@ -3770,7 +3802,7 @@ mod tests {
|
||||
TableStatistics {
|
||||
num_rows: 250,
|
||||
num_indices: 0,
|
||||
total_bytes: 2000,
|
||||
total_bytes: 2300,
|
||||
fragment_stats: FragmentStatistics {
|
||||
num_fragments: 11,
|
||||
num_small_fragments: 11,
|
||||
|
||||
@@ -13,6 +13,9 @@ use crate::embeddings::EmbeddingRegistry;
|
||||
use crate::table::datafusion::cast::cast_to_table_schema;
|
||||
use crate::table::datafusion::reject_nan::reject_nan_vectors;
|
||||
use crate::table::datafusion::scannable_exec::ScannableExec;
|
||||
use crate::table::write_progress::ProgressCallback;
|
||||
use crate::table::write_progress::WriteProgress;
|
||||
use crate::table::write_progress::WriteProgressTracker;
|
||||
use crate::{Error, Result};
|
||||
|
||||
use super::{BaseTable, TableDefinition, WriteOptions};
|
||||
@@ -52,6 +55,7 @@ pub struct AddDataBuilder {
|
||||
pub(crate) write_options: WriteOptions,
|
||||
pub(crate) on_nan_vectors: NaNVectorBehavior,
|
||||
pub(crate) embedding_registry: Option<Arc<dyn EmbeddingRegistry>>,
|
||||
pub(crate) progress_callback: Option<ProgressCallback>,
|
||||
pub(crate) write_parallelism: Option<usize>,
|
||||
}
|
||||
|
||||
@@ -78,6 +82,7 @@ impl AddDataBuilder {
|
||||
write_options: WriteOptions::default(),
|
||||
on_nan_vectors: NaNVectorBehavior::default(),
|
||||
embedding_registry,
|
||||
progress_callback: None,
|
||||
write_parallelism: None,
|
||||
}
|
||||
}
|
||||
@@ -103,6 +108,27 @@ impl AddDataBuilder {
|
||||
self
|
||||
}
|
||||
|
||||
/// Set a callback to receive progress updates during the add operation.
|
||||
///
|
||||
/// The callback is invoked once per batch written, and once more with
|
||||
/// [`WriteProgress::done`] set to `true` when the write completes.
|
||||
///
|
||||
/// ```
|
||||
/// # use lancedb::Table;
|
||||
/// # async fn example(table: &Table) -> Result<(), Box<dyn std::error::Error>> {
|
||||
/// let batch = arrow_array::record_batch!(("id", Int32, [1, 2, 3])).unwrap();
|
||||
/// table.add(batch)
|
||||
/// .progress(|p| println!("{}/{:?} rows", p.output_rows(), p.total_rows()))
|
||||
/// .execute()
|
||||
/// .await?;
|
||||
/// # Ok(())
|
||||
/// # }
|
||||
/// ```
|
||||
pub fn progress(mut self, callback: impl FnMut(&WriteProgress) + Send + 'static) -> Self {
|
||||
self.progress_callback = Some(Arc::new(std::sync::Mutex::new(callback)));
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the number of parallel write streams.
|
||||
///
|
||||
/// By default, the number of streams is estimated from the data size.
|
||||
@@ -147,8 +173,11 @@ impl AddDataBuilder {
|
||||
scannable_with_embeddings(self.data, table_def, self.embedding_registry.as_ref())?;
|
||||
|
||||
let rescannable = self.data.rescannable();
|
||||
let tracker = self
|
||||
.progress_callback
|
||||
.map(|cb| Arc::new(WriteProgressTracker::new(cb, self.data.num_rows())));
|
||||
let plan: Arc<dyn datafusion_physical_plan::ExecutionPlan> =
|
||||
Arc::new(ScannableExec::new(self.data));
|
||||
Arc::new(ScannableExec::new(self.data, tracker.clone()));
|
||||
// Skip casting when overwriting — the input schema replaces the table schema.
|
||||
let plan = if overwrite {
|
||||
plan
|
||||
@@ -166,6 +195,7 @@ impl AddDataBuilder {
|
||||
rescannable,
|
||||
write_options: self.write_options,
|
||||
mode: self.mode,
|
||||
tracker,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -178,6 +208,7 @@ pub struct PreprocessingOutput {
|
||||
pub rescannable: bool,
|
||||
pub write_options: WriteOptions,
|
||||
pub mode: AddDataMode,
|
||||
pub tracker: Option<Arc<WriteProgressTracker>>,
|
||||
}
|
||||
|
||||
/// Check that the input schema is valid for insert.
|
||||
|
||||
@@ -12,13 +12,16 @@ use datafusion_common::{DataFusionError, Result as DataFusionResult};
|
||||
use datafusion_execution::{SendableRecordBatchStream, TaskContext};
|
||||
use datafusion_physical_expr::{EquivalenceProperties, Partitioning};
|
||||
use datafusion_physical_plan::execution_plan::{Boundedness, EmissionType};
|
||||
use datafusion_physical_plan::metrics::{ExecutionPlanMetricsSet, MetricBuilder, MetricsSet};
|
||||
use datafusion_physical_plan::stream::RecordBatchStreamAdapter;
|
||||
use datafusion_physical_plan::{
|
||||
DisplayAs, DisplayFormatType, ExecutionPlan, ExecutionPlanProperties, PlanProperties,
|
||||
};
|
||||
use futures::TryStreamExt;
|
||||
use lance::Dataset;
|
||||
use lance::dataset::transaction::{Operation, Transaction};
|
||||
use lance::dataset::{CommitBuilder, InsertBuilder, WriteParams};
|
||||
use lance::io::exec::utils::InstrumentedRecordBatchStreamAdapter;
|
||||
use lance_table::format::Fragment;
|
||||
|
||||
use crate::table::dataset::DatasetConsistencyWrapper;
|
||||
@@ -80,6 +83,7 @@ pub struct InsertExec {
|
||||
write_params: WriteParams,
|
||||
properties: PlanProperties,
|
||||
partial_transactions: Arc<Mutex<Vec<Transaction>>>,
|
||||
metrics: ExecutionPlanMetricsSet,
|
||||
}
|
||||
|
||||
impl InsertExec {
|
||||
@@ -105,6 +109,7 @@ impl InsertExec {
|
||||
write_params,
|
||||
properties,
|
||||
partial_transactions: Arc::new(Mutex::new(Vec::with_capacity(num_partitions))),
|
||||
metrics: ExecutionPlanMetricsSet::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -176,6 +181,19 @@ impl ExecutionPlan for InsertExec {
|
||||
let total_partitions = self.input.output_partitioning().partition_count();
|
||||
let ds_wrapper = self.ds_wrapper.clone();
|
||||
|
||||
let output_bytes = MetricBuilder::new(&self.metrics).output_bytes(partition);
|
||||
let input_schema = input_stream.schema();
|
||||
let input_stream: SendableRecordBatchStream =
|
||||
Box::pin(InstrumentedRecordBatchStreamAdapter::new(
|
||||
input_schema,
|
||||
input_stream.map_ok(move |batch| {
|
||||
output_bytes.add(batch.get_array_memory_size());
|
||||
batch
|
||||
}),
|
||||
partition,
|
||||
&self.metrics,
|
||||
));
|
||||
|
||||
let stream = futures::stream::once(async move {
|
||||
let transaction = InsertBuilder::new(dataset.clone())
|
||||
.with_params(&write_params)
|
||||
@@ -186,7 +204,9 @@ impl ExecutionPlan for InsertExec {
|
||||
|
||||
let to_commit = {
|
||||
// Don't hold the lock over an await point.
|
||||
let mut txns = partial_transactions.lock().unwrap();
|
||||
let mut txns = partial_transactions
|
||||
.lock()
|
||||
.unwrap_or_else(|e| e.into_inner());
|
||||
txns.push(transaction);
|
||||
if txns.len() == total_partitions {
|
||||
Some(std::mem::take(&mut *txns))
|
||||
@@ -215,6 +235,10 @@ impl ExecutionPlan for InsertExec {
|
||||
stream,
|
||||
)))
|
||||
}
|
||||
|
||||
fn metrics(&self) -> Option<MetricsSet> {
|
||||
Some(self.metrics.clone_inner())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -7,17 +7,21 @@ use std::sync::{Arc, Mutex};
|
||||
use datafusion_common::{DataFusionError, Result as DFResult, Statistics, stats::Precision};
|
||||
use datafusion_execution::{SendableRecordBatchStream, TaskContext};
|
||||
use datafusion_physical_expr::{EquivalenceProperties, Partitioning};
|
||||
use datafusion_physical_plan::stream::RecordBatchStreamAdapter;
|
||||
use datafusion_physical_plan::{
|
||||
DisplayAs, DisplayFormatType, ExecutionPlan, PlanProperties, execution_plan::EmissionType,
|
||||
};
|
||||
use futures::TryStreamExt;
|
||||
|
||||
use crate::table::write_progress::WriteProgressTracker;
|
||||
use crate::{arrow::SendableRecordBatchStreamExt, data::scannable::Scannable};
|
||||
|
||||
pub struct ScannableExec {
|
||||
// We don't require Scannable to by Sync, so we wrap it in a Mutex to allow safe concurrent access.
|
||||
pub(crate) struct ScannableExec {
|
||||
// We don't require Scannable to be Sync, so we wrap it in a Mutex to allow safe concurrent access.
|
||||
source: Mutex<Box<dyn Scannable>>,
|
||||
num_rows: Option<usize>,
|
||||
properties: PlanProperties,
|
||||
tracker: Option<Arc<WriteProgressTracker>>,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for ScannableExec {
|
||||
@@ -30,7 +34,7 @@ impl std::fmt::Debug for ScannableExec {
|
||||
}
|
||||
|
||||
impl ScannableExec {
|
||||
pub fn new(source: Box<dyn Scannable>) -> Self {
|
||||
pub fn new(source: Box<dyn Scannable>, tracker: Option<Arc<WriteProgressTracker>>) -> Self {
|
||||
let schema = source.schema();
|
||||
let eq_properties = EquivalenceProperties::new(schema);
|
||||
let properties = PlanProperties::new(
|
||||
@@ -46,6 +50,7 @@ impl ScannableExec {
|
||||
source,
|
||||
num_rows,
|
||||
properties,
|
||||
tracker,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -102,7 +107,18 @@ impl ExecutionPlan for ScannableExec {
|
||||
Err(poison) => poison.into_inner().scan_as_stream(),
|
||||
};
|
||||
|
||||
Ok(stream.into_df_stream())
|
||||
let tracker = self.tracker.clone();
|
||||
let stream = stream.into_df_stream().map_ok(move |batch| {
|
||||
if let Some(ref t) = tracker {
|
||||
t.record_batch(batch.num_rows(), batch.get_array_memory_size());
|
||||
}
|
||||
batch
|
||||
});
|
||||
|
||||
Ok(Box::pin(RecordBatchStreamAdapter::new(
|
||||
self.schema(),
|
||||
stream,
|
||||
)))
|
||||
}
|
||||
|
||||
fn partition_statistics(&self, _partition: Option<usize>) -> DFResult<Statistics> {
|
||||
|
||||
@@ -82,7 +82,7 @@ impl DatasetConsistencyWrapper {
|
||||
/// pinned dataset regardless of consistency mode.
|
||||
pub async fn get(&self) -> Result<Arc<Dataset>> {
|
||||
{
|
||||
let state = self.state.lock().unwrap();
|
||||
let state = self.state.lock()?;
|
||||
if state.pinned_version.is_some() {
|
||||
return Ok(state.dataset.clone());
|
||||
}
|
||||
@@ -101,7 +101,7 @@ impl DatasetConsistencyWrapper {
|
||||
}
|
||||
ConsistencyMode::Strong => refresh_latest(self.state.clone()).await,
|
||||
ConsistencyMode::Lazy => {
|
||||
let state = self.state.lock().unwrap();
|
||||
let state = self.state.lock()?;
|
||||
Ok(state.dataset.clone())
|
||||
}
|
||||
}
|
||||
@@ -116,7 +116,7 @@ impl DatasetConsistencyWrapper {
|
||||
/// concurrent [`as_time_travel`](Self::as_time_travel) call), the update
|
||||
/// is silently ignored — the write already committed to storage.
|
||||
pub fn update(&self, dataset: Dataset) {
|
||||
let mut state = self.state.lock().unwrap();
|
||||
let mut state = self.state.lock().unwrap_or_else(|e| e.into_inner());
|
||||
if state.pinned_version.is_some() {
|
||||
// A concurrent as_time_travel() beat us here. The write succeeded
|
||||
// in storage, but since we're now pinned we don't advance the
|
||||
@@ -139,7 +139,7 @@ impl DatasetConsistencyWrapper {
|
||||
|
||||
/// Check that the dataset is in a mutable mode (Latest).
|
||||
pub fn ensure_mutable(&self) -> Result<()> {
|
||||
let state = self.state.lock().unwrap();
|
||||
let state = self.state.lock()?;
|
||||
if state.pinned_version.is_some() {
|
||||
Err(crate::Error::InvalidInput {
|
||||
message: "table cannot be modified when a specific version is checked out"
|
||||
@@ -152,13 +152,16 @@ impl DatasetConsistencyWrapper {
|
||||
|
||||
/// Returns the version, if in time travel mode, or None otherwise.
|
||||
pub fn time_travel_version(&self) -> Option<u64> {
|
||||
self.state.lock().unwrap().pinned_version
|
||||
self.state
|
||||
.lock()
|
||||
.unwrap_or_else(|e| e.into_inner())
|
||||
.pinned_version
|
||||
}
|
||||
|
||||
/// Convert into a wrapper in latest version mode.
|
||||
pub async fn as_latest(&self) -> Result<()> {
|
||||
let dataset = {
|
||||
let state = self.state.lock().unwrap();
|
||||
let state = self.state.lock()?;
|
||||
if state.pinned_version.is_none() {
|
||||
return Ok(());
|
||||
}
|
||||
@@ -168,7 +171,7 @@ impl DatasetConsistencyWrapper {
|
||||
let latest_version = dataset.latest_version_id().await?;
|
||||
let new_dataset = dataset.checkout_version(latest_version).await?;
|
||||
|
||||
let mut state = self.state.lock().unwrap();
|
||||
let mut state = self.state.lock()?;
|
||||
if state.pinned_version.is_some() {
|
||||
state.dataset = Arc::new(new_dataset);
|
||||
state.pinned_version = None;
|
||||
@@ -184,7 +187,7 @@ impl DatasetConsistencyWrapper {
|
||||
let target_ref = target_version.into();
|
||||
|
||||
let (should_checkout, dataset) = {
|
||||
let state = self.state.lock().unwrap();
|
||||
let state = self.state.lock()?;
|
||||
let should = match state.pinned_version {
|
||||
None => true,
|
||||
Some(version) => match &target_ref {
|
||||
@@ -204,7 +207,7 @@ impl DatasetConsistencyWrapper {
|
||||
let new_dataset = dataset.checkout_version(target_ref).await?;
|
||||
let version_value = new_dataset.version().version;
|
||||
|
||||
let mut state = self.state.lock().unwrap();
|
||||
let mut state = self.state.lock()?;
|
||||
state.dataset = Arc::new(new_dataset);
|
||||
state.pinned_version = Some(version_value);
|
||||
Ok(())
|
||||
@@ -212,7 +215,7 @@ impl DatasetConsistencyWrapper {
|
||||
|
||||
pub async fn reload(&self) -> Result<()> {
|
||||
let (dataset, pinned_version) = {
|
||||
let state = self.state.lock().unwrap();
|
||||
let state = self.state.lock()?;
|
||||
(state.dataset.clone(), state.pinned_version)
|
||||
};
|
||||
|
||||
@@ -230,7 +233,7 @@ impl DatasetConsistencyWrapper {
|
||||
|
||||
let new_dataset = dataset.checkout_version(version).await?;
|
||||
|
||||
let mut state = self.state.lock().unwrap();
|
||||
let mut state = self.state.lock()?;
|
||||
if state.pinned_version == Some(version) {
|
||||
state.dataset = Arc::new(new_dataset);
|
||||
}
|
||||
@@ -242,14 +245,14 @@ impl DatasetConsistencyWrapper {
|
||||
}
|
||||
|
||||
async fn refresh_latest(state: Arc<Mutex<DatasetState>>) -> Result<Arc<Dataset>> {
|
||||
let dataset = { state.lock().unwrap().dataset.clone() };
|
||||
let dataset = { state.lock()?.dataset.clone() };
|
||||
|
||||
let mut ds = (*dataset).clone();
|
||||
ds.checkout_latest().await?;
|
||||
let new_arc = Arc::new(ds);
|
||||
|
||||
{
|
||||
let mut state = state.lock().unwrap();
|
||||
let mut state = state.lock()?;
|
||||
if state.pinned_version.is_none()
|
||||
&& new_arc.manifest().version >= state.dataset.manifest().version
|
||||
{
|
||||
@@ -612,4 +615,108 @@ mod tests {
|
||||
let s = io_stats.incremental_stats();
|
||||
assert_eq!(s.read_iops, 0, "step 5, elapsed={:?}", start.elapsed());
|
||||
}
|
||||
|
||||
/// Helper: poison the mutex inside a DatasetConsistencyWrapper.
|
||||
fn poison_state(wrapper: &DatasetConsistencyWrapper) {
|
||||
let state = wrapper.state.clone();
|
||||
let handle = std::thread::spawn(move || {
|
||||
let _guard = state.lock().unwrap();
|
||||
panic!("intentional panic to poison mutex");
|
||||
});
|
||||
let _ = handle.join(); // join collects the panic
|
||||
assert!(wrapper.state.lock().is_err(), "mutex should be poisoned");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get_returns_error_on_poisoned_lock() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let uri = dir.path().to_str().unwrap();
|
||||
let ds = create_test_dataset(uri).await;
|
||||
|
||||
let wrapper = DatasetConsistencyWrapper::new_latest(ds, None);
|
||||
poison_state(&wrapper);
|
||||
|
||||
// get() should return Err, not panic
|
||||
let result = wrapper.get().await;
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_ensure_mutable_returns_error_on_poisoned_lock() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let uri = dir.path().to_str().unwrap();
|
||||
let ds = create_test_dataset(uri).await;
|
||||
|
||||
let wrapper = DatasetConsistencyWrapper::new_latest(ds, None);
|
||||
poison_state(&wrapper);
|
||||
|
||||
let result = wrapper.ensure_mutable();
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_update_recovers_from_poisoned_lock() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let uri = dir.path().to_str().unwrap();
|
||||
let ds = create_test_dataset(uri).await;
|
||||
let ds_v2 = append_to_dataset(uri).await;
|
||||
|
||||
let wrapper = DatasetConsistencyWrapper::new_latest(ds, None);
|
||||
poison_state(&wrapper);
|
||||
|
||||
// update() returns (), should not panic
|
||||
wrapper.update(ds_v2);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_time_travel_version_recovers_from_poisoned_lock() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let uri = dir.path().to_str().unwrap();
|
||||
let ds = create_test_dataset(uri).await;
|
||||
|
||||
let wrapper = DatasetConsistencyWrapper::new_latest(ds, None);
|
||||
poison_state(&wrapper);
|
||||
|
||||
// Should not panic, returns whatever was in the mutex
|
||||
let _version = wrapper.time_travel_version();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_as_latest_returns_error_on_poisoned_lock() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let uri = dir.path().to_str().unwrap();
|
||||
let ds = create_test_dataset(uri).await;
|
||||
|
||||
let wrapper = DatasetConsistencyWrapper::new_latest(ds, None);
|
||||
poison_state(&wrapper);
|
||||
|
||||
let result = wrapper.as_latest().await;
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_as_time_travel_returns_error_on_poisoned_lock() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let uri = dir.path().to_str().unwrap();
|
||||
let ds = create_test_dataset(uri).await;
|
||||
|
||||
let wrapper = DatasetConsistencyWrapper::new_latest(ds, None);
|
||||
poison_state(&wrapper);
|
||||
|
||||
let result = wrapper.as_time_travel(1u64).await;
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_reload_returns_error_on_poisoned_lock() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let uri = dir.path().to_str().unwrap();
|
||||
let ds = create_test_dataset(uri).await;
|
||||
|
||||
let wrapper = DatasetConsistencyWrapper::new_latest(ds, None);
|
||||
poison_state(&wrapper);
|
||||
|
||||
let result = wrapper.reload().await;
|
||||
assert!(result.is_err());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ use std::sync::Arc;
|
||||
|
||||
use lance::dataset::cleanup::RemovalStats;
|
||||
use lance::dataset::optimize::{CompactionMetrics, IndexRemapperOptions, compact_files};
|
||||
use lance_index::DatasetIndexExt;
|
||||
use lance::index::DatasetIndexExt;
|
||||
use lance_index::optimize::OptimizeOptions;
|
||||
use log::info;
|
||||
|
||||
|
||||
@@ -4,12 +4,13 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use super::NativeTable;
|
||||
use crate::connection::PushdownOperation;
|
||||
use crate::error::{Error, Result};
|
||||
use crate::expr::expr_to_sql_string;
|
||||
use crate::query::{
|
||||
DEFAULT_TOP_K, QueryExecutionOptions, QueryFilter, QueryRequest, Select, VectorQueryRequest,
|
||||
};
|
||||
use crate::utils::{TimeoutStream, default_vector_column};
|
||||
use crate::utils::{MaxBatchLengthStream, TimeoutStream, default_vector_column};
|
||||
use arrow::array::{AsArray, FixedSizeListBuilder, Float32Builder};
|
||||
use arrow::datatypes::{Float32Type, UInt8Type};
|
||||
use arrow_array::Array;
|
||||
@@ -40,8 +41,10 @@ pub async fn execute_query(
|
||||
query: &AnyQuery,
|
||||
options: QueryExecutionOptions,
|
||||
) -> Result<DatasetRecordBatchStream> {
|
||||
// If server-side query is enabled and namespace client is configured, use server-side query execution
|
||||
if table.server_side_query_enabled
|
||||
// If QueryTable pushdown is enabled and namespace client is configured, use server-side query execution
|
||||
if table
|
||||
.pushdown_operations
|
||||
.contains(&PushdownOperation::QueryTable)
|
||||
&& let Some(ref namespace_client) = table.namespace_client
|
||||
{
|
||||
return execute_namespace_query(table, namespace_client.clone(), query, options).await;
|
||||
@@ -66,6 +69,7 @@ async fn execute_generic_query(
|
||||
) -> Result<DatasetRecordBatchStream> {
|
||||
let plan = create_plan(table, query, options.clone()).await?;
|
||||
let inner = execute_plan(plan, Default::default())?;
|
||||
let inner = MaxBatchLengthStream::new_boxed(inner, options.max_batch_length as usize);
|
||||
let inner = if let Some(timeout) = options.timeout {
|
||||
TimeoutStream::new_boxed(inner, timeout)
|
||||
} else {
|
||||
@@ -200,7 +204,9 @@ pub async fn create_plan(
|
||||
scanner.with_row_id();
|
||||
}
|
||||
|
||||
scanner.batch_size(options.max_batch_length as usize);
|
||||
if options.max_batch_length > 0 {
|
||||
scanner.batch_size(options.max_batch_length as usize);
|
||||
}
|
||||
|
||||
if query.base.fast_search {
|
||||
scanner.fast_search();
|
||||
|
||||
431
rust/lancedb/src/table/write_progress.rs
Normal file
431
rust/lancedb/src/table/write_progress.rs
Normal file
@@ -0,0 +1,431 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
//! Progress monitoring for write operations.
|
||||
//!
|
||||
//! You can add a callback to process progress in [`crate::table::AddDataBuilder::progress`].
|
||||
//! [`WriteProgress`] is the struct passed to the callback.
|
||||
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
/// Progress snapshot for a write operation.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct WriteProgress {
|
||||
// These are private and only accessible via getters, to make it easy to add
|
||||
// new fields without breaking existing callbacks.
|
||||
elapsed: Duration,
|
||||
output_rows: usize,
|
||||
output_bytes: usize,
|
||||
total_rows: Option<usize>,
|
||||
active_tasks: usize,
|
||||
total_tasks: usize,
|
||||
done: bool,
|
||||
}
|
||||
|
||||
impl WriteProgress {
|
||||
/// Wall-clock time since monitoring started.
|
||||
pub fn elapsed(&self) -> Duration {
|
||||
self.elapsed
|
||||
}
|
||||
|
||||
/// Number of rows written so far.
|
||||
pub fn output_rows(&self) -> usize {
|
||||
self.output_rows
|
||||
}
|
||||
|
||||
/// Number of bytes written so far.
|
||||
pub fn output_bytes(&self) -> usize {
|
||||
self.output_bytes
|
||||
}
|
||||
|
||||
/// Total rows expected.
|
||||
///
|
||||
/// Populated when the input source reports a row count (e.g. a
|
||||
/// [`arrow_array::RecordBatch`]). Always `Some` when [`WriteProgress::done`]
|
||||
/// is `true` — falling back to the actual number of rows written.
|
||||
pub fn total_rows(&self) -> Option<usize> {
|
||||
self.total_rows
|
||||
}
|
||||
|
||||
/// Number of parallel write tasks currently in flight.
|
||||
pub fn active_tasks(&self) -> usize {
|
||||
self.active_tasks
|
||||
}
|
||||
|
||||
/// Total number of parallel write tasks (i.e. the write parallelism).
|
||||
pub fn total_tasks(&self) -> usize {
|
||||
self.total_tasks
|
||||
}
|
||||
|
||||
/// Whether the write operation has completed.
|
||||
///
|
||||
/// The final callback always has `done = true`. Callers can use this to
|
||||
/// finalize progress bars or perform cleanup.
|
||||
pub fn done(&self) -> bool {
|
||||
self.done
|
||||
}
|
||||
}
|
||||
|
||||
/// Callback type for progress updates.
|
||||
///
|
||||
/// Callbacks are serialized by the tracker and are never invoked reentrantly,
|
||||
/// so `FnMut` is safe to use here.
|
||||
pub type ProgressCallback = Arc<Mutex<dyn FnMut(&WriteProgress) + Send>>;
|
||||
|
||||
/// Tracks progress of a write operation and invokes a [`ProgressCallback`].
|
||||
///
|
||||
/// Call [`WriteProgressTracker::record_batch`] for each batch written.
|
||||
/// Call [`WriteProgressTracker::finish`] once after all data is written.
|
||||
///
|
||||
/// The callback is never invoked reentrantly: all state updates and callback
|
||||
/// invocations are serialized behind a single lock.
|
||||
impl std::fmt::Debug for WriteProgressTracker {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("WriteProgressTracker")
|
||||
.field("total_rows", &self.total_rows)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct WriteProgressTracker {
|
||||
rows_and_bytes: std::sync::Mutex<(usize, usize)>,
|
||||
/// Wire bytes tracked separately by the insert layer. When set (> 0),
|
||||
/// this takes precedence over the in-memory bytes from `rows_and_bytes`.
|
||||
wire_bytes: AtomicUsize,
|
||||
active_tasks: Arc<AtomicUsize>,
|
||||
total_tasks: AtomicUsize,
|
||||
start: Instant,
|
||||
/// Known total rows from the input source, if available.
|
||||
total_rows: Option<usize>,
|
||||
callback: ProgressCallback,
|
||||
}
|
||||
|
||||
impl WriteProgressTracker {
|
||||
pub fn new(callback: ProgressCallback, total_rows: Option<usize>) -> Self {
|
||||
Self {
|
||||
rows_and_bytes: std::sync::Mutex::new((0, 0)),
|
||||
wire_bytes: AtomicUsize::new(0),
|
||||
active_tasks: Arc::new(AtomicUsize::new(0)),
|
||||
total_tasks: AtomicUsize::new(1),
|
||||
start: Instant::now(),
|
||||
total_rows,
|
||||
callback,
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the total number of parallel write tasks (the write parallelism).
|
||||
pub fn set_total_tasks(&self, n: usize) {
|
||||
self.total_tasks.store(n, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment the active task count. Returns a guard that decrements on drop.
|
||||
pub fn track_task(&self) -> ActiveTaskGuard {
|
||||
self.active_tasks.fetch_add(1, Ordering::Relaxed);
|
||||
ActiveTaskGuard(self.active_tasks.clone())
|
||||
}
|
||||
|
||||
/// Record a batch of rows passing through the scan node.
|
||||
pub fn record_batch(&self, rows: usize, bytes: usize) {
|
||||
// Lock order: callback first, then rows_and_bytes. This is the only
|
||||
// order used anywhere, so deadlocks cannot occur.
|
||||
let mut cb = self.callback.lock().unwrap_or_else(|e| e.into_inner());
|
||||
let mut guard = self
|
||||
.rows_and_bytes
|
||||
.lock()
|
||||
.unwrap_or_else(|e| e.into_inner());
|
||||
guard.0 += rows;
|
||||
guard.1 += bytes;
|
||||
let progress = self.snapshot(guard.0, guard.1, false);
|
||||
drop(guard);
|
||||
cb(&progress);
|
||||
}
|
||||
|
||||
/// Record wire bytes from the insert layer (e.g. IPC-encoded bytes for
|
||||
/// remote writes). When wire bytes are recorded, they take precedence over
|
||||
/// the in-memory Arrow bytes tracked by [`record_batch`].
|
||||
pub fn record_bytes(&self, bytes: usize) {
|
||||
self.wire_bytes.fetch_add(bytes, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Emit the final progress callback indicating the write is complete.
|
||||
///
|
||||
/// `total_rows` is always `Some` on the final callback: it uses the known
|
||||
/// total if available, or falls back to the number of rows actually written.
|
||||
pub fn finish(&self) {
|
||||
let mut cb = self.callback.lock().unwrap_or_else(|e| e.into_inner());
|
||||
let guard = self
|
||||
.rows_and_bytes
|
||||
.lock()
|
||||
.unwrap_or_else(|e| e.into_inner());
|
||||
let mut snap = self.snapshot(guard.0, guard.1, true);
|
||||
snap.total_rows = Some(self.total_rows.unwrap_or(guard.0));
|
||||
drop(guard);
|
||||
cb(&snap);
|
||||
}
|
||||
|
||||
fn snapshot(&self, rows: usize, in_memory_bytes: usize, done: bool) -> WriteProgress {
|
||||
let wire = self.wire_bytes.load(Ordering::Relaxed);
|
||||
// Prefer wire bytes (actual I/O size) when the insert layer is
|
||||
// tracking them; fall back to in-memory Arrow size otherwise.
|
||||
// TODO: for local writes, track actual bytes written by Lance
|
||||
// instead of using in-memory Arrow size as a proxy.
|
||||
let output_bytes = if wire > 0 { wire } else { in_memory_bytes };
|
||||
WriteProgress {
|
||||
elapsed: self.start.elapsed(),
|
||||
output_rows: rows,
|
||||
output_bytes,
|
||||
total_rows: self.total_rows,
|
||||
active_tasks: self.active_tasks.load(Ordering::Relaxed),
|
||||
total_tasks: self.total_tasks.load(Ordering::Relaxed),
|
||||
done,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// RAII guard that decrements the active task count when dropped.
|
||||
pub(crate) struct ActiveTaskGuard(Arc<AtomicUsize>);
|
||||
|
||||
impl Drop for ActiveTaskGuard {
|
||||
fn drop(&mut self) {
|
||||
self.0.fetch_sub(1, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
/// RAII guard that calls [`WriteProgressTracker::finish`] on drop.
|
||||
///
|
||||
/// This ensures the final `done=true` callback fires even if the write
|
||||
/// errors or the future is cancelled.
|
||||
pub(crate) struct FinishOnDrop(pub Option<Arc<WriteProgressTracker>>);
|
||||
|
||||
impl Drop for FinishOnDrop {
|
||||
fn drop(&mut self) {
|
||||
if let Some(t) = self.0.take() {
|
||||
t.finish();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
|
||||
use arrow_array::record_batch;
|
||||
|
||||
use crate::connect;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_progress_monitor_fires_callback() {
|
||||
let db = connect("memory://").execute().await.unwrap();
|
||||
|
||||
let batch = record_batch!(("id", Int32, [1, 2, 3])).unwrap();
|
||||
let table = db
|
||||
.create_table("progress_test", batch)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let callback_count = Arc::new(AtomicUsize::new(0));
|
||||
let last_rows = Arc::new(AtomicUsize::new(0));
|
||||
let max_active = Arc::new(AtomicUsize::new(0));
|
||||
let last_total_tasks = Arc::new(AtomicUsize::new(0));
|
||||
let cb_count = callback_count.clone();
|
||||
let cb_rows = last_rows.clone();
|
||||
let cb_active = max_active.clone();
|
||||
let cb_total_tasks = last_total_tasks.clone();
|
||||
|
||||
let new_data = record_batch!(("id", Int32, [4, 5, 6])).unwrap();
|
||||
table
|
||||
.add(new_data)
|
||||
.progress(move |p| {
|
||||
cb_count.fetch_add(1, Ordering::SeqCst);
|
||||
cb_rows.store(p.output_rows(), Ordering::SeqCst);
|
||||
cb_active.fetch_max(p.active_tasks(), Ordering::SeqCst);
|
||||
cb_total_tasks.store(p.total_tasks(), Ordering::SeqCst);
|
||||
})
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(table.count_rows(None).await.unwrap(), 6);
|
||||
assert!(callback_count.load(Ordering::SeqCst) >= 1);
|
||||
// Progress tracks the newly inserted rows, not the total table size.
|
||||
assert_eq!(last_rows.load(Ordering::SeqCst), 3);
|
||||
// At least one callback should have seen an active task.
|
||||
assert!(max_active.load(Ordering::SeqCst) >= 1);
|
||||
// total_tasks should reflect the write parallelism.
|
||||
assert!(last_total_tasks.load(Ordering::SeqCst) >= 1);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_progress_done_fires_at_end() {
|
||||
let db = connect("memory://").execute().await.unwrap();
|
||||
let batch = record_batch!(("id", Int32, [1, 2, 3])).unwrap();
|
||||
let table = db
|
||||
.create_table("progress_done", batch)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let seen_done = Arc::new(std::sync::Mutex::new(Vec::<bool>::new()));
|
||||
let seen = seen_done.clone();
|
||||
|
||||
let new_data = record_batch!(("id", Int32, [4, 5, 6])).unwrap();
|
||||
table
|
||||
.add(new_data)
|
||||
.progress(move |p| {
|
||||
seen.lock().unwrap().push(p.done());
|
||||
})
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let done_flags = seen_done.lock().unwrap();
|
||||
assert!(!done_flags.is_empty(), "at least one callback must fire");
|
||||
// Only the last callback should have done=true.
|
||||
let last = *done_flags.last().unwrap();
|
||||
assert!(last, "last callback must have done=true");
|
||||
// All earlier callbacks should have done=false.
|
||||
for &d in done_flags.iter().rev().skip(1) {
|
||||
assert!(!d, "non-final callbacks must have done=false");
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_progress_total_rows_known() {
|
||||
let db = connect("memory://").execute().await.unwrap();
|
||||
|
||||
let batch = record_batch!(("id", Int32, [1, 2, 3])).unwrap();
|
||||
let table = db
|
||||
.create_table("total_known", batch)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let seen_total = Arc::new(std::sync::Mutex::new(Vec::new()));
|
||||
let seen = seen_total.clone();
|
||||
|
||||
// RecordBatch implements Scannable with num_rows() -> Some(3)
|
||||
let new_data = record_batch!(("id", Int32, [4, 5, 6])).unwrap();
|
||||
table
|
||||
.add(new_data)
|
||||
.progress(move |p| {
|
||||
seen.lock().unwrap().push(p.total_rows());
|
||||
})
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let totals = seen_total.lock().unwrap();
|
||||
// All callbacks (including done) should have total_rows = Some(3)
|
||||
assert!(
|
||||
totals.contains(&Some(3)),
|
||||
"expected total_rows=Some(3) in at least one callback, got: {:?}",
|
||||
*totals
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_progress_total_rows_unknown() {
|
||||
use arrow_array::RecordBatchIterator;
|
||||
|
||||
let db = connect("memory://").execute().await.unwrap();
|
||||
|
||||
let batch = record_batch!(("id", Int32, [1, 2, 3])).unwrap();
|
||||
let table = db
|
||||
.create_table("total_unknown", batch)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let seen_total = Arc::new(std::sync::Mutex::new(Vec::new()));
|
||||
let seen = seen_total.clone();
|
||||
|
||||
// RecordBatchReader does not provide num_rows, so total_rows should be
|
||||
// None in intermediate callbacks but always Some on the done callback.
|
||||
let schema = arrow_schema::Schema::new(vec![arrow_schema::Field::new(
|
||||
"id",
|
||||
arrow_schema::DataType::Int32,
|
||||
false,
|
||||
)]);
|
||||
let new_data: Box<dyn arrow_array::RecordBatchReader + Send> =
|
||||
Box::new(RecordBatchIterator::new(
|
||||
vec![Ok(record_batch!(("id", Int32, [4, 5, 6])).unwrap())],
|
||||
Arc::new(schema),
|
||||
));
|
||||
table
|
||||
.add(new_data)
|
||||
.progress(move |p| {
|
||||
seen.lock().unwrap().push((p.total_rows(), p.done()));
|
||||
})
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let entries = seen_total.lock().unwrap();
|
||||
assert!(!entries.is_empty(), "at least one callback must fire");
|
||||
for (total, done) in entries.iter() {
|
||||
if *done {
|
||||
assert!(
|
||||
total.is_some(),
|
||||
"done callback must have total_rows set, got: {:?}",
|
||||
total
|
||||
);
|
||||
} else {
|
||||
assert_eq!(
|
||||
*total, None,
|
||||
"intermediate callback must have total_rows=None, got: {:?}",
|
||||
total
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_record_batch_recovers_from_poisoned_callback_lock() {
|
||||
use super::{ProgressCallback, WriteProgressTracker};
|
||||
use std::sync::Mutex;
|
||||
|
||||
let callback: ProgressCallback = Arc::new(Mutex::new(|_: &super::WriteProgress| {}));
|
||||
|
||||
// Poison the callback mutex
|
||||
let cb_clone = callback.clone();
|
||||
let handle = std::thread::spawn(move || {
|
||||
let _guard = cb_clone.lock().unwrap();
|
||||
panic!("intentional panic to poison callback mutex");
|
||||
});
|
||||
let _ = handle.join();
|
||||
assert!(
|
||||
callback.lock().is_err(),
|
||||
"callback mutex should be poisoned"
|
||||
);
|
||||
|
||||
let tracker = WriteProgressTracker::new(callback, Some(100));
|
||||
|
||||
// record_batch should not panic
|
||||
tracker.record_batch(10, 1024);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_finish_recovers_from_poisoned_callback_lock() {
|
||||
use super::{ProgressCallback, WriteProgressTracker};
|
||||
use std::sync::Mutex;
|
||||
|
||||
let callback: ProgressCallback = Arc::new(Mutex::new(|_: &super::WriteProgress| {}));
|
||||
|
||||
// Poison the callback mutex
|
||||
let cb_clone = callback.clone();
|
||||
let handle = std::thread::spawn(move || {
|
||||
let _guard = cb_clone.lock().unwrap();
|
||||
panic!("intentional panic to poison callback mutex");
|
||||
});
|
||||
let _ = handle.join();
|
||||
|
||||
let tracker = WriteProgressTracker::new(callback, Some(100));
|
||||
|
||||
// finish should not panic
|
||||
tracker.finish();
|
||||
}
|
||||
}
|
||||
@@ -122,7 +122,7 @@ where
|
||||
/// This is a cheap synchronous check useful as a fast path before
|
||||
/// constructing a fetch closure for [`get()`](Self::get).
|
||||
pub fn try_get(&self) -> Option<V> {
|
||||
let cache = self.inner.lock().unwrap();
|
||||
let cache = self.inner.lock().unwrap_or_else(|e| e.into_inner());
|
||||
cache.state.fresh_value(self.ttl, self.refresh_window)
|
||||
}
|
||||
|
||||
@@ -138,7 +138,7 @@ where
|
||||
{
|
||||
// Fast path: check if cache is fresh
|
||||
{
|
||||
let cache = self.inner.lock().unwrap();
|
||||
let cache = self.inner.lock().unwrap_or_else(|e| e.into_inner());
|
||||
if let Some(value) = cache.state.fresh_value(self.ttl, self.refresh_window) {
|
||||
return Ok(value);
|
||||
}
|
||||
@@ -147,7 +147,7 @@ where
|
||||
// Slow path
|
||||
let mut fetch = Some(fetch);
|
||||
let action = {
|
||||
let mut cache = self.inner.lock().unwrap();
|
||||
let mut cache = self.inner.lock().unwrap_or_else(|e| e.into_inner());
|
||||
self.determine_action(&mut cache, &mut fetch)
|
||||
};
|
||||
|
||||
@@ -161,7 +161,7 @@ where
|
||||
///
|
||||
/// This avoids a blocking fetch on the first [`get()`](Self::get) call.
|
||||
pub fn seed(&self, value: V) {
|
||||
let mut cache = self.inner.lock().unwrap();
|
||||
let mut cache = self.inner.lock().unwrap_or_else(|e| e.into_inner());
|
||||
cache.state = State::Current(value, clock::now());
|
||||
}
|
||||
|
||||
@@ -170,7 +170,7 @@ where
|
||||
/// Any in-flight background fetch from before this call will not update the
|
||||
/// cache (the generation counter prevents stale writes).
|
||||
pub fn invalidate(&self) {
|
||||
let mut cache = self.inner.lock().unwrap();
|
||||
let mut cache = self.inner.lock().unwrap_or_else(|e| e.into_inner());
|
||||
cache.state = State::Empty;
|
||||
cache.generation += 1;
|
||||
}
|
||||
@@ -267,7 +267,7 @@ where
|
||||
let fut_for_spawn = shared.clone();
|
||||
tokio::spawn(async move {
|
||||
let result = fut_for_spawn.await;
|
||||
let mut cache = inner.lock().unwrap();
|
||||
let mut cache = inner.lock().unwrap_or_else(|e| e.into_inner());
|
||||
// Only update if no invalidation has happened since we started
|
||||
if cache.generation != generation {
|
||||
return;
|
||||
@@ -590,4 +590,67 @@ mod tests {
|
||||
let v = cache.get(ok_fetcher(count.clone(), "fresh")).await.unwrap();
|
||||
assert_eq!(v, "fresh");
|
||||
}
|
||||
|
||||
/// Helper: poison the inner mutex of a BackgroundCache.
|
||||
fn poison_cache(cache: &BackgroundCache<String, TestError>) {
|
||||
let inner = cache.inner.clone();
|
||||
let handle = std::thread::spawn(move || {
|
||||
let _guard = inner.lock().unwrap();
|
||||
panic!("intentional panic to poison mutex");
|
||||
});
|
||||
let _ = handle.join();
|
||||
assert!(cache.inner.lock().is_err(), "mutex should be poisoned");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_try_get_recovers_from_poisoned_lock() {
|
||||
let cache = new_cache();
|
||||
let count = Arc::new(AtomicUsize::new(0));
|
||||
|
||||
// Seed a value first
|
||||
cache.get(ok_fetcher(count.clone(), "hello")).await.unwrap();
|
||||
cache.get(ok_fetcher(count.clone(), "hello")).await.unwrap(); // peek
|
||||
|
||||
poison_cache(&cache);
|
||||
|
||||
// try_get() should not panic — it recovers via unwrap_or_else
|
||||
let result = cache.try_get();
|
||||
// The value may or may not be fresh depending on timing, but it must not panic
|
||||
let _ = result;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get_recovers_from_poisoned_lock() {
|
||||
let cache = new_cache();
|
||||
let count = Arc::new(AtomicUsize::new(0));
|
||||
|
||||
poison_cache(&cache);
|
||||
|
||||
// get() should not panic — it recovers and can still fetch
|
||||
let result = cache.get(ok_fetcher(count.clone(), "recovered")).await;
|
||||
assert!(result.is_ok());
|
||||
assert_eq!(result.unwrap(), "recovered");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_seed_recovers_from_poisoned_lock() {
|
||||
let cache = new_cache();
|
||||
poison_cache(&cache);
|
||||
|
||||
// seed() should not panic
|
||||
cache.seed("seeded".to_string());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_invalidate_recovers_from_poisoned_lock() {
|
||||
let cache = new_cache();
|
||||
let count = Arc::new(AtomicUsize::new(0));
|
||||
|
||||
cache.get(ok_fetcher(count.clone(), "hello")).await.unwrap();
|
||||
|
||||
poison_cache(&cache);
|
||||
|
||||
// invalidate() should not panic
|
||||
cache.invalidate();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -335,6 +335,85 @@ impl Stream for TimeoutStream {
|
||||
}
|
||||
}
|
||||
|
||||
/// A `Stream` wrapper that slices oversized batches to enforce a maximum batch length.
|
||||
pub struct MaxBatchLengthStream {
|
||||
inner: SendableRecordBatchStream,
|
||||
max_batch_length: Option<usize>,
|
||||
buffered_batch: Option<RecordBatch>,
|
||||
buffered_offset: usize,
|
||||
}
|
||||
|
||||
impl MaxBatchLengthStream {
|
||||
pub fn new(inner: SendableRecordBatchStream, max_batch_length: usize) -> Self {
|
||||
Self {
|
||||
inner,
|
||||
max_batch_length: (max_batch_length > 0).then_some(max_batch_length),
|
||||
buffered_batch: None,
|
||||
buffered_offset: 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_boxed(
|
||||
inner: SendableRecordBatchStream,
|
||||
max_batch_length: usize,
|
||||
) -> SendableRecordBatchStream {
|
||||
if max_batch_length == 0 {
|
||||
inner
|
||||
} else {
|
||||
Box::pin(Self::new(inner, max_batch_length))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl RecordBatchStream for MaxBatchLengthStream {
|
||||
fn schema(&self) -> SchemaRef {
|
||||
self.inner.schema()
|
||||
}
|
||||
}
|
||||
|
||||
impl Stream for MaxBatchLengthStream {
|
||||
type Item = DataFusionResult<RecordBatch>;
|
||||
|
||||
fn poll_next(
|
||||
mut self: Pin<&mut Self>,
|
||||
cx: &mut std::task::Context<'_>,
|
||||
) -> std::task::Poll<Option<Self::Item>> {
|
||||
loop {
|
||||
let Some(max_batch_length) = self.max_batch_length else {
|
||||
return Pin::new(&mut self.inner).poll_next(cx);
|
||||
};
|
||||
|
||||
if let Some(batch) = self.buffered_batch.clone() {
|
||||
if self.buffered_offset < batch.num_rows() {
|
||||
let remaining = batch.num_rows() - self.buffered_offset;
|
||||
let length = remaining.min(max_batch_length);
|
||||
let sliced = batch.slice(self.buffered_offset, length);
|
||||
self.buffered_offset += length;
|
||||
if self.buffered_offset >= batch.num_rows() {
|
||||
self.buffered_batch = None;
|
||||
self.buffered_offset = 0;
|
||||
}
|
||||
return std::task::Poll::Ready(Some(Ok(sliced)));
|
||||
}
|
||||
|
||||
self.buffered_batch = None;
|
||||
self.buffered_offset = 0;
|
||||
}
|
||||
|
||||
match Pin::new(&mut self.inner).poll_next(cx) {
|
||||
std::task::Poll::Ready(Some(Ok(batch))) => {
|
||||
if batch.num_rows() <= max_batch_length {
|
||||
return std::task::Poll::Ready(Some(Ok(batch)));
|
||||
}
|
||||
self.buffered_batch = Some(batch);
|
||||
self.buffered_offset = 0;
|
||||
}
|
||||
other => return other,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use arrow_array::Int32Array;
|
||||
@@ -470,7 +549,7 @@ mod tests {
|
||||
assert_eq!(string_to_datatype(string), Some(expected));
|
||||
}
|
||||
|
||||
fn sample_batch() -> RecordBatch {
|
||||
fn sample_batch(num_rows: i32) -> RecordBatch {
|
||||
let schema = Arc::new(Schema::new(vec![Field::new(
|
||||
"col1",
|
||||
DataType::Int32,
|
||||
@@ -478,14 +557,14 @@ mod tests {
|
||||
)]));
|
||||
RecordBatch::try_new(
|
||||
schema.clone(),
|
||||
vec![Arc::new(Int32Array::from(vec![1, 2, 3]))],
|
||||
vec![Arc::new(Int32Array::from_iter_values(0..num_rows))],
|
||||
)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_timeout_stream() {
|
||||
let batch = sample_batch();
|
||||
let batch = sample_batch(3);
|
||||
let schema = batch.schema();
|
||||
let mock_stream = stream::iter(vec![Ok(batch.clone()), Ok(batch.clone())]);
|
||||
|
||||
@@ -515,7 +594,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_timeout_stream_zero_duration() {
|
||||
let batch = sample_batch();
|
||||
let batch = sample_batch(3);
|
||||
let schema = batch.schema();
|
||||
let mock_stream = stream::iter(vec![Ok(batch.clone()), Ok(batch.clone())]);
|
||||
|
||||
@@ -534,7 +613,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_timeout_stream_completes_normally() {
|
||||
let batch = sample_batch();
|
||||
let batch = sample_batch(3);
|
||||
let schema = batch.schema();
|
||||
let mock_stream = stream::iter(vec![Ok(batch.clone()), Ok(batch.clone())]);
|
||||
|
||||
@@ -552,4 +631,35 @@ mod tests {
|
||||
// Stream should be empty now
|
||||
assert!(timeout_stream.next().await.is_none());
|
||||
}
|
||||
|
||||
async fn collect_batch_sizes(
|
||||
stream: SendableRecordBatchStream,
|
||||
max_batch_length: usize,
|
||||
) -> Vec<usize> {
|
||||
let mut sliced_stream = MaxBatchLengthStream::new(stream, max_batch_length);
|
||||
sliced_stream
|
||||
.by_ref()
|
||||
.map(|batch| batch.unwrap().num_rows())
|
||||
.collect::<Vec<_>>()
|
||||
.await
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_max_batch_length_stream_behaviors() {
|
||||
let schema = sample_batch(7).schema();
|
||||
let mock_stream = stream::iter(vec![Ok(sample_batch(2)), Ok(sample_batch(7))]);
|
||||
|
||||
let sendable_stream: SendableRecordBatchStream =
|
||||
Box::pin(RecordBatchStreamAdapter::new(schema.clone(), mock_stream));
|
||||
assert_eq!(
|
||||
collect_batch_sizes(sendable_stream, 3).await,
|
||||
vec![2, 3, 3, 1]
|
||||
);
|
||||
|
||||
let sendable_stream: SendableRecordBatchStream = Box::pin(RecordBatchStreamAdapter::new(
|
||||
schema,
|
||||
stream::iter(vec![Ok(sample_batch(2)), Ok(sample_batch(7))]),
|
||||
));
|
||||
assert_eq!(collect_batch_sizes(sendable_stream, 0).await, vec![2, 7]);
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user