mirror of
https://github.com/lancedb/lancedb.git
synced 2025-12-23 13:29:57 +00:00
Compare commits
19 Commits
add-async-
...
python-v0.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
84a6693294 | ||
|
|
6c2d4c10a4 | ||
|
|
d914722f79 | ||
|
|
a6e4034dba | ||
|
|
2616a50502 | ||
|
|
7b5e9d824a | ||
|
|
3b173e7cb9 | ||
|
|
d496ab13a0 | ||
|
|
69d9beebc7 | ||
|
|
d32360b99d | ||
|
|
9fa08bfa93 | ||
|
|
d6d9cb7415 | ||
|
|
990d93f553 | ||
|
|
0832cba3c6 | ||
|
|
38b0d91848 | ||
|
|
6826039575 | ||
|
|
3e9321fc40 | ||
|
|
2ded17452b | ||
|
|
dfd9d2ac99 |
@@ -1,5 +1,5 @@
|
||||
[tool.bumpversion]
|
||||
current_version = "0.13.1-beta.0"
|
||||
current_version = "0.14.0-beta.0"
|
||||
parse = """(?x)
|
||||
(?P<major>0|[1-9]\\d*)\\.
|
||||
(?P<minor>0|[1-9]\\d*)\\.
|
||||
|
||||
14
.github/workflows/npm-publish.yml
vendored
14
.github/workflows/npm-publish.yml
vendored
@@ -133,7 +133,7 @@ jobs:
|
||||
free -h
|
||||
- name: Build Linux Artifacts
|
||||
run: |
|
||||
bash ci/build_linux_artifacts.sh ${{ matrix.config.arch }}
|
||||
bash ci/build_linux_artifacts.sh ${{ matrix.config.arch }} ${{ matrix.config.arch }}-unknown-linux-gnu
|
||||
- name: Upload Linux Artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
@@ -143,7 +143,7 @@ jobs:
|
||||
|
||||
node-linux-musl:
|
||||
name: vectordb (${{ matrix.config.arch}}-unknown-linux-musl)
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ${{ matrix.config.runner }}
|
||||
container: alpine:edge
|
||||
# Only runs on tags that matches the make-release action
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
@@ -152,7 +152,10 @@ jobs:
|
||||
matrix:
|
||||
config:
|
||||
- arch: x86_64
|
||||
runner: ubuntu-latest
|
||||
- arch: aarch64
|
||||
# For successful fat LTO builds, we need a large runner to avoid OOM errors.
|
||||
runner: buildjet-16vcpu-ubuntu-2204-arm
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
@@ -185,7 +188,7 @@ jobs:
|
||||
- name: Build Linux Artifacts
|
||||
run: |
|
||||
source ./saved_env
|
||||
bash ci/manylinux_node/build_vectordb.sh ${{ matrix.config.arch }}
|
||||
bash ci/manylinux_node/build_vectordb.sh ${{ matrix.config.arch }} ${{ matrix.config.arch }}-unknown-linux-musl
|
||||
- name: Upload Linux Artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
@@ -246,7 +249,7 @@ jobs:
|
||||
|
||||
nodejs-linux-musl:
|
||||
name: lancedb (${{ matrix.config.arch}}-unknown-linux-musl
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ${{ matrix.config.runner }}
|
||||
container: alpine:edge
|
||||
# Only runs on tags that matches the make-release action
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
@@ -255,7 +258,10 @@ jobs:
|
||||
matrix:
|
||||
config:
|
||||
- arch: x86_64
|
||||
runner: ubuntu-latest
|
||||
- arch: aarch64
|
||||
# For successful fat LTO builds, we need a large runner to avoid OOM errors.
|
||||
runner: buildjet-16vcpu-ubuntu-2204-arm
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
15
Cargo.toml
15
Cargo.toml
@@ -23,13 +23,14 @@ rust-version = "1.80.0" # TO
|
||||
[workspace.dependencies]
|
||||
lance = { "version" = "=0.20.0", "features" = [
|
||||
"dynamodb",
|
||||
], git = "https://github.com/lancedb/lance.git", tag = "v0.20.0-beta.2" }
|
||||
lance-index = { version = "=0.20.0", git = "https://github.com/lancedb/lance.git", tag = "v0.20.0-beta.2" }
|
||||
lance-linalg = { version = "=0.20.0", git = "https://github.com/lancedb/lance.git", tag = "v0.20.0-beta.2" }
|
||||
lance-table = { version = "=0.20.0", git = "https://github.com/lancedb/lance.git", tag = "v0.20.0-beta.2" }
|
||||
lance-testing = { version = "=0.20.0", git = "https://github.com/lancedb/lance.git", tag = "v0.20.0-beta.2" }
|
||||
lance-datafusion = { version = "=0.20.0", git = "https://github.com/lancedb/lance.git", tag = "v0.20.0-beta.2" }
|
||||
lance-encoding = { version = "=0.20.0", git = "https://github.com/lancedb/lance.git", tag = "v0.20.0-beta.2" }
|
||||
], git = "https://github.com/lancedb/lance.git", tag = "v0.20.0-beta.3" }
|
||||
lance-io = { version = "=0.20.0", git = "https://github.com/lancedb/lance.git", tag = "v0.20.0-beta.3" }
|
||||
lance-index = { version = "=0.20.0", git = "https://github.com/lancedb/lance.git", tag = "v0.20.0-beta.3" }
|
||||
lance-linalg = { version = "=0.20.0", git = "https://github.com/lancedb/lance.git", tag = "v0.20.0-beta.3" }
|
||||
lance-table = { version = "=0.20.0", git = "https://github.com/lancedb/lance.git", tag = "v0.20.0-beta.3" }
|
||||
lance-testing = { version = "=0.20.0", git = "https://github.com/lancedb/lance.git", tag = "v0.20.0-beta.3" }
|
||||
lance-datafusion = { version = "=0.20.0", git = "https://github.com/lancedb/lance.git", tag = "v0.20.0-beta.3" }
|
||||
lance-encoding = { version = "=0.20.0", git = "https://github.com/lancedb/lance.git", tag = "v0.20.0-beta.3" }
|
||||
# Note that this one does not include pyarrow
|
||||
arrow = { version = "52.2", optional = false }
|
||||
arrow-array = "52.2"
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
ARCH=${1:-x86_64}
|
||||
TARGET_TRIPLE=${2:-x86_64-unknown-linux-gnu}
|
||||
|
||||
# We pass down the current user so that when we later mount the local files
|
||||
# We pass down the current user so that when we later mount the local files
|
||||
# into the container, the files are accessible by the current user.
|
||||
pushd ci/manylinux_node
|
||||
docker build \
|
||||
@@ -18,4 +19,4 @@ docker run \
|
||||
-v $(pwd):/io -w /io \
|
||||
--memory-swap=-1 \
|
||||
lancedb-node-manylinux \
|
||||
bash ci/manylinux_node/build_vectordb.sh $ARCH
|
||||
bash ci/manylinux_node/build_vectordb.sh $ARCH $TARGET_TRIPLE
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
# Builds the node module for manylinux. Invoked by ci/build_linux_artifacts.sh.
|
||||
set -e
|
||||
ARCH=${1:-x86_64}
|
||||
TARGET_TRIPLE=${2:-x86_64-unknown-linux-gnu}
|
||||
|
||||
if [ "$ARCH" = "x86_64" ]; then
|
||||
export OPENSSL_LIB_DIR=/usr/local/lib64/
|
||||
@@ -17,4 +18,4 @@ FILE=$HOME/.bashrc && test -f $FILE && source $FILE
|
||||
cd node
|
||||
npm ci
|
||||
npm run build-release
|
||||
npm run pack-build
|
||||
npm run pack-build -t $TARGET_TRIPLE
|
||||
|
||||
@@ -55,6 +55,9 @@ plugins:
|
||||
show_signature_annotations: true
|
||||
show_root_heading: true
|
||||
members_order: source
|
||||
docstring_section_style: list
|
||||
signature_crossrefs: true
|
||||
separate_signature: true
|
||||
import:
|
||||
# for cross references
|
||||
- https://arrow.apache.org/docs/objects.inv
|
||||
|
||||
@@ -1,23 +1,35 @@
|
||||
# Building Scalar Index
|
||||
# Building a Scalar Index
|
||||
|
||||
Similar to many SQL databases, LanceDB supports several types of Scalar indices to accelerate search
|
||||
Scalar indices organize data by scalar attributes (e.g. numbers, categorical values), enabling fast filtering of vector data. In vector databases, scalar indices accelerate the retrieval of scalar data associated with vectors, thus enhancing the query performance when searching for vectors that meet certain scalar criteria.
|
||||
|
||||
Similar to many SQL databases, LanceDB supports several types of scalar indices to accelerate search
|
||||
over scalar columns.
|
||||
|
||||
- `BTREE`: The most common type is BTREE. This index is inspired by the btree data structure
|
||||
although only the first few layers of the btree are cached in memory.
|
||||
It will perform well on columns with a large number of unique values and few rows per value.
|
||||
- `BITMAP`: this index stores a bitmap for each unique value in the column.
|
||||
This index is useful for columns with a finite number of unique values and many rows per value.
|
||||
For example, columns that represent "categories", "labels", or "tags"
|
||||
- `LABEL_LIST`: a special index that is used to index list columns whose values have a finite set of possibilities.
|
||||
- `BTREE`: The most common type is BTREE. The index stores a copy of the
|
||||
column in sorted order. This sorted copy allows a binary search to be used to
|
||||
satisfy queries.
|
||||
- `BITMAP`: this index stores a bitmap for each unique value in the column. It
|
||||
uses a series of bits to indicate whether a value is present in a row of a table
|
||||
- `LABEL_LIST`: a special index that can be used on `List<T>` columns to
|
||||
support queries with `array_contains_all` and `array_contains_any`
|
||||
using an underlying bitmap index.
|
||||
For example, a column that contains lists of tags (e.g. `["tag1", "tag2", "tag3"]`) can be indexed with a `LABEL_LIST` index.
|
||||
|
||||
!!! tips "How to choose the right scalar index type"
|
||||
|
||||
`BTREE`: This index is good for scalar columns with mostly distinct values and does best when the query is highly selective.
|
||||
|
||||
`BITMAP`: This index works best for low-cardinality numeric or string columns, where the number of unique values is small (i.e., less than a few thousands).
|
||||
|
||||
`LABEL_LIST`: This index should be used for columns containing list-type data.
|
||||
|
||||
| Data Type | Filter | Index Type |
|
||||
| --------------------------------------------------------------- | ----------------------------------------- | ------------ |
|
||||
| Numeric, String, Temporal | `<`, `=`, `>`, `in`, `between`, `is null` | `BTREE` |
|
||||
| Boolean, numbers or strings with fewer than 1,000 unique values | `<`, `=`, `>`, `in`, `between`, `is null` | `BITMAP` |
|
||||
| List of low cardinality of numbers or strings | `array_has_any`, `array_has_all` | `LABEL_LIST` |
|
||||
|
||||
### Create a scalar index
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
@@ -46,7 +58,7 @@ over scalar columns.
|
||||
await tlb.create_index("publisher", { config: lancedb.Index.bitmap() })
|
||||
```
|
||||
|
||||
For example, the following scan will be faster if the column `my_col` has a scalar index:
|
||||
The following scan will be faster if the column `book_id` has a scalar index:
|
||||
|
||||
=== "Python"
|
||||
|
||||
@@ -106,3 +118,30 @@ Scalar indices can also speed up scans containing a vector search or full text s
|
||||
.limit(10)
|
||||
.toArray();
|
||||
```
|
||||
### Update a scalar index
|
||||
Updating the table data (adding, deleting, or modifying records) requires that you also update the scalar index. This can be done by calling `optimize`, which will trigger an update to the existing scalar index.
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
table.add([{"vector": [7, 8], "book_id": 4}])
|
||||
table.optimize()
|
||||
```
|
||||
|
||||
=== "TypeScript"
|
||||
|
||||
```typescript
|
||||
await tbl.add([{ vector: [7, 8], book_id: 4 }]);
|
||||
await tbl.optimize();
|
||||
```
|
||||
|
||||
=== "Rust"
|
||||
|
||||
```rust
|
||||
let more_data: Box<dyn RecordBatchReader + Send> = create_some_records()?;
|
||||
tbl.add(more_data).execute().await?;
|
||||
tbl.optimize(OptimizeAction::All).execute().await?;
|
||||
```
|
||||
|
||||
!!! note
|
||||
|
||||
New data added after creating the scalar index will still appear in search results if optimize is not used, but with increased latency due to a flat search on the unindexed portion. LanceDB Cloud automates the optimize process, minimizing the impact on search speed.
|
||||
@@ -1,6 +1,16 @@
|
||||
# Python API Reference
|
||||
|
||||
This section contains the API reference for the OSS Python API.
|
||||
This section contains the API reference for the Python API. There is a
|
||||
synchronous and an asynchronous API client.
|
||||
|
||||
The general flow of using the API is:
|
||||
|
||||
1. Use [lancedb.connect][] or [lancedb.connect_async][] to connect to a database.
|
||||
2. Use the returned [lancedb.DBConnection][] or [lancedb.AsyncConnection][] to
|
||||
create or open tables.
|
||||
3. Use the returned [lancedb.table.Table][] or [lancedb.AsyncTable][] to query
|
||||
or modify tables.
|
||||
|
||||
|
||||
## Installation
|
||||
|
||||
|
||||
@@ -7,6 +7,10 @@ performed on the top-k results returned by the vector search. However, pre-filte
|
||||
option that performs the filter prior to vector search. This can be useful to narrow down on
|
||||
the search space on a very large dataset to reduce query latency.
|
||||
|
||||
Note that both pre-filtering and post-filtering can yield false positives. For pre-filtering, if the filter is too selective, it might eliminate relevant items that the vector search would have otherwise identified as a good match. In this case, increasing `nprobes` parameter will help reduce such false positives. It is recommended to set `use_index=false` if you know that the filter is highly selective.
|
||||
|
||||
Similarly, a highly selective post-filter can lead to false positives. Increasing both `nprobes` and `refine_factor` can mitigate this issue. When deciding between pre-filtering and post-filtering, pre-filtering is generally the safer choice if you're uncertain.
|
||||
|
||||
<!-- Setup Code
|
||||
```python
|
||||
import lancedb
|
||||
@@ -57,6 +61,9 @@ const tbl = await db.createTable('myVectors', data)
|
||||
```ts
|
||||
--8<-- "docs/src/sql_legacy.ts:search"
|
||||
```
|
||||
!!! note
|
||||
|
||||
Creating a [scalar index](guides/scalar_index.md) accelerates filtering
|
||||
|
||||
## SQL filters
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
<parent>
|
||||
<groupId>com.lancedb</groupId>
|
||||
<artifactId>lancedb-parent</artifactId>
|
||||
<version>0.13.1-beta.0</version>
|
||||
<version>0.14.0-beta.0</version>
|
||||
<relativePath>../pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
<groupId>com.lancedb</groupId>
|
||||
<artifactId>lancedb-parent</artifactId>
|
||||
<version>0.13.1-beta.0</version>
|
||||
<version>0.14.0-beta.0</version>
|
||||
<packaging>pom</packaging>
|
||||
|
||||
<name>LanceDB Parent</name>
|
||||
|
||||
20
node/package-lock.json
generated
20
node/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "vectordb",
|
||||
"version": "0.13.1-beta.0",
|
||||
"version": "0.14.0-beta.0",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "vectordb",
|
||||
"version": "0.13.1-beta.0",
|
||||
"version": "0.14.0-beta.0",
|
||||
"cpu": [
|
||||
"x64",
|
||||
"arm64"
|
||||
@@ -52,14 +52,14 @@
|
||||
"uuid": "^9.0.0"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"@lancedb/vectordb-darwin-arm64": "0.13.1-beta.0",
|
||||
"@lancedb/vectordb-darwin-x64": "0.13.1-beta.0",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.13.1-beta.0",
|
||||
"@lancedb/vectordb-linux-arm64-musl": "0.13.1-beta.0",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.13.1-beta.0",
|
||||
"@lancedb/vectordb-linux-x64-musl": "0.13.1-beta.0",
|
||||
"@lancedb/vectordb-win32-arm64-msvc": "0.13.1-beta.0",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.13.1-beta.0"
|
||||
"@lancedb/vectordb-darwin-arm64": "0.14.0-beta.0",
|
||||
"@lancedb/vectordb-darwin-x64": "0.14.0-beta.0",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.14.0-beta.0",
|
||||
"@lancedb/vectordb-linux-arm64-musl": "0.14.0-beta.0",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.14.0-beta.0",
|
||||
"@lancedb/vectordb-linux-x64-musl": "0.14.0-beta.0",
|
||||
"@lancedb/vectordb-win32-arm64-msvc": "0.14.0-beta.0",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.14.0-beta.0"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@apache-arrow/ts": "^14.0.2",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "vectordb",
|
||||
"version": "0.13.1-beta.0",
|
||||
"version": "0.14.0-beta.0",
|
||||
"description": " Serverless, low-latency vector database for AI applications",
|
||||
"main": "dist/index.js",
|
||||
"types": "dist/index.d.ts",
|
||||
@@ -84,18 +84,20 @@
|
||||
"aarch64-apple-darwin": "@lancedb/vectordb-darwin-arm64",
|
||||
"x86_64-unknown-linux-gnu": "@lancedb/vectordb-linux-x64-gnu",
|
||||
"aarch64-unknown-linux-gnu": "@lancedb/vectordb-linux-arm64-gnu",
|
||||
"x86_64-unknown-linux-musl": "@lancedb/vectordb-linux-x64-musl",
|
||||
"aarch64-unknown-linux-musl": "@lancedb/vectordb-linux-arm64-musl",
|
||||
"x86_64-pc-windows-msvc": "@lancedb/vectordb-win32-x64-msvc",
|
||||
"aarch64-pc-windows-msvc": "@lancedb/vectordb-win32-arm64-msvc"
|
||||
}
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"@lancedb/vectordb-darwin-x64": "0.13.1-beta.0",
|
||||
"@lancedb/vectordb-darwin-arm64": "0.13.1-beta.0",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.13.1-beta.0",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.13.1-beta.0",
|
||||
"@lancedb/vectordb-linux-x64-musl": "0.13.1-beta.0",
|
||||
"@lancedb/vectordb-linux-arm64-musl": "0.13.1-beta.0",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.13.1-beta.0",
|
||||
"@lancedb/vectordb-win32-arm64-msvc": "0.13.1-beta.0"
|
||||
"@lancedb/vectordb-darwin-x64": "0.14.0-beta.0",
|
||||
"@lancedb/vectordb-darwin-arm64": "0.14.0-beta.0",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.14.0-beta.0",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.14.0-beta.0",
|
||||
"@lancedb/vectordb-linux-x64-musl": "0.14.0-beta.0",
|
||||
"@lancedb/vectordb-linux-arm64-musl": "0.14.0-beta.0",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.14.0-beta.0",
|
||||
"@lancedb/vectordb-win32-arm64-msvc": "0.14.0-beta.0"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "lancedb-nodejs"
|
||||
edition.workspace = true
|
||||
version = "0.13.1-beta.0"
|
||||
version = "0.14.0-beta.0"
|
||||
license.workspace = true
|
||||
description.workspace = true
|
||||
repository.workspace = true
|
||||
|
||||
@@ -110,7 +110,10 @@ describe("given a connection", () => {
|
||||
let table = await db.createTable("test", data, { useLegacyFormat: true });
|
||||
|
||||
const isV2 = async (table: Table) => {
|
||||
const data = await table.query().toArrow({ maxBatchLength: 100000 });
|
||||
const data = await table
|
||||
.query()
|
||||
.limit(10000)
|
||||
.toArrow({ maxBatchLength: 100000 });
|
||||
console.log(data.batches.length);
|
||||
return data.batches.length < 5;
|
||||
};
|
||||
|
||||
@@ -585,11 +585,11 @@ describe("When creating an index", () => {
|
||||
expect(fs.readdirSync(indexDir)).toHaveLength(1);
|
||||
|
||||
for await (const r of tbl.query().where("id > 1").select(["id"])) {
|
||||
expect(r.numRows).toBe(298);
|
||||
expect(r.numRows).toBe(10);
|
||||
}
|
||||
// should also work with 'filter' alias
|
||||
for await (const r of tbl.query().filter("id > 1").select(["id"])) {
|
||||
expect(r.numRows).toBe(298);
|
||||
expect(r.numRows).toBe(10);
|
||||
}
|
||||
});
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-darwin-arm64",
|
||||
"version": "0.13.1-beta.0",
|
||||
"version": "0.14.0-beta.0",
|
||||
"os": ["darwin"],
|
||||
"cpu": ["arm64"],
|
||||
"main": "lancedb.darwin-arm64.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-darwin-x64",
|
||||
"version": "0.13.1-beta.0",
|
||||
"version": "0.14.0-beta.0",
|
||||
"os": ["darwin"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.darwin-x64.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-arm64-gnu",
|
||||
"version": "0.13.1-beta.0",
|
||||
"version": "0.14.0-beta.0",
|
||||
"os": ["linux"],
|
||||
"cpu": ["arm64"],
|
||||
"main": "lancedb.linux-arm64-gnu.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-arm64-musl",
|
||||
"version": "0.13.1-beta.0",
|
||||
"version": "0.14.0-beta.0",
|
||||
"os": ["linux"],
|
||||
"cpu": ["arm64"],
|
||||
"main": "lancedb.linux-arm64-musl.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-x64-gnu",
|
||||
"version": "0.13.1-beta.0",
|
||||
"version": "0.14.0-beta.0",
|
||||
"os": ["linux"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.linux-x64-gnu.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-x64-musl",
|
||||
"version": "0.13.1-beta.0",
|
||||
"version": "0.14.0-beta.0",
|
||||
"os": ["linux"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.linux-x64-musl.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-win32-arm64-msvc",
|
||||
"version": "0.13.1-beta.0",
|
||||
"version": "0.14.0-beta.0",
|
||||
"os": [
|
||||
"win32"
|
||||
],
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-win32-x64-msvc",
|
||||
"version": "0.13.1-beta.0",
|
||||
"version": "0.14.0-beta.0",
|
||||
"os": ["win32"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.win32-x64-msvc.node",
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
"vector database",
|
||||
"ann"
|
||||
],
|
||||
"version": "0.13.1-beta.0",
|
||||
"version": "0.14.0-beta.0",
|
||||
"main": "dist/index.js",
|
||||
"exports": {
|
||||
".": "./dist/index.js",
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
[tool.bumpversion]
|
||||
current_version = "0.16.1-beta.0"
|
||||
current_version = "0.17.0-beta.1"
|
||||
parse = """(?x)
|
||||
(?P<major>0|[1-9]\\d*)\\.
|
||||
(?P<minor>0|[1-9]\\d*)\\.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "lancedb-python"
|
||||
version = "0.16.1-beta.0"
|
||||
version = "0.17.0-beta.1"
|
||||
edition.workspace = true
|
||||
description = "Python bindings for LanceDB"
|
||||
license.workspace = true
|
||||
@@ -17,11 +17,17 @@ crate-type = ["cdylib"]
|
||||
arrow = { version = "52.1", features = ["pyarrow"] }
|
||||
lancedb = { path = "../rust/lancedb", default-features = false }
|
||||
env_logger.workspace = true
|
||||
pyo3 = { version = "0.21", features = ["extension-module", "abi3-py38", "gil-refs"] }
|
||||
pyo3 = { version = "0.21", features = [
|
||||
"extension-module",
|
||||
"abi3-py39",
|
||||
"gil-refs"
|
||||
] }
|
||||
# Using this fork for now: https://github.com/awestlake87/pyo3-asyncio/issues/119
|
||||
# pyo3-asyncio = { version = "0.20", features = ["attributes", "tokio-runtime"] }
|
||||
pyo3-asyncio-0-21 = { version = "0.21.0", features = ["attributes", "tokio-runtime"] }
|
||||
|
||||
pyo3-asyncio-0-21 = { version = "0.21.0", features = [
|
||||
"attributes",
|
||||
"tokio-runtime"
|
||||
] }
|
||||
pin-project = "1.1.5"
|
||||
futures.workspace = true
|
||||
tokio = { version = "1.36.0", features = ["sync"] }
|
||||
@@ -29,14 +35,13 @@ tokio = { version = "1.36.0", features = ["sync"] }
|
||||
[build-dependencies]
|
||||
pyo3-build-config = { version = "0.20.3", features = [
|
||||
"extension-module",
|
||||
"abi3-py38",
|
||||
"abi3-py39",
|
||||
] }
|
||||
|
||||
[features]
|
||||
default = ["default-tls", "remote"]
|
||||
fp16kernels = ["lancedb/fp16kernels"]
|
||||
remote = ["lancedb/remote"]
|
||||
|
||||
# TLS
|
||||
default-tls = ["lancedb/default-tls"]
|
||||
native-tls = ["lancedb/native-tls"]
|
||||
|
||||
@@ -3,8 +3,7 @@ name = "lancedb"
|
||||
# version in Cargo.toml
|
||||
dependencies = [
|
||||
"deprecation",
|
||||
"nest-asyncio~=1.0",
|
||||
"pylance==0.20.0b2",
|
||||
"pylance==0.20.0b3",
|
||||
"tqdm>=4.27.0",
|
||||
"pydantic>=1.10",
|
||||
"packaging",
|
||||
@@ -31,7 +30,6 @@ classifiers = [
|
||||
"Programming Language :: Python",
|
||||
"Programming Language :: Python :: 3",
|
||||
"Programming Language :: Python :: 3 :: Only",
|
||||
"Programming Language :: Python :: 3.8",
|
||||
"Programming Language :: Python :: 3.9",
|
||||
"Programming Language :: Python :: 3.10",
|
||||
"Programming Language :: Python :: 3.11",
|
||||
|
||||
@@ -83,25 +83,33 @@ class OpenAIEmbeddings(TextEmbeddingFunction):
|
||||
"""
|
||||
openai = attempt_import_or_raise("openai")
|
||||
|
||||
valid_texts = []
|
||||
valid_indices = []
|
||||
for idx, text in enumerate(texts):
|
||||
if text:
|
||||
valid_texts.append(text)
|
||||
valid_indices.append(idx)
|
||||
|
||||
# TODO retry, rate limit, token limit
|
||||
try:
|
||||
if self.name == "text-embedding-ada-002":
|
||||
rs = self._openai_client.embeddings.create(input=texts, model=self.name)
|
||||
else:
|
||||
kwargs = {
|
||||
"input": texts,
|
||||
"model": self.name,
|
||||
}
|
||||
if self.dim:
|
||||
kwargs["dimensions"] = self.dim
|
||||
rs = self._openai_client.embeddings.create(**kwargs)
|
||||
kwargs = {
|
||||
"input": valid_texts,
|
||||
"model": self.name,
|
||||
}
|
||||
if self.name != "text-embedding-ada-002":
|
||||
kwargs["dimensions"] = self.dim
|
||||
|
||||
rs = self._openai_client.embeddings.create(**kwargs)
|
||||
valid_embeddings = {
|
||||
idx: v.embedding for v, idx in zip(rs.data, valid_indices)
|
||||
}
|
||||
except openai.BadRequestError:
|
||||
logging.exception("Bad request: %s", texts)
|
||||
return [None] * len(texts)
|
||||
except Exception:
|
||||
logging.exception("OpenAI embeddings error")
|
||||
raise
|
||||
return [v.embedding for v in rs.data]
|
||||
return [valid_embeddings.get(idx, None) for idx in range(len(texts))]
|
||||
|
||||
@cached_property
|
||||
def _openai_client(self):
|
||||
|
||||
@@ -1,15 +1,5 @@
|
||||
# Copyright 2023 LanceDB Developers
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
"""Pydantic (v1 / v2) adapter for LanceDB"""
|
||||
|
||||
@@ -30,6 +20,7 @@ from typing import (
|
||||
Type,
|
||||
Union,
|
||||
_GenericAlias,
|
||||
GenericAlias,
|
||||
)
|
||||
|
||||
import numpy as np
|
||||
@@ -75,7 +66,7 @@ def vector(dim: int, value_type: pa.DataType = pa.float32()):
|
||||
|
||||
|
||||
def Vector(
|
||||
dim: int, value_type: pa.DataType = pa.float32()
|
||||
dim: int, value_type: pa.DataType = pa.float32(), nullable: bool = True
|
||||
) -> Type[FixedSizeListMixin]:
|
||||
"""Pydantic Vector Type.
|
||||
|
||||
@@ -88,6 +79,8 @@ def Vector(
|
||||
The dimension of the vector.
|
||||
value_type : pyarrow.DataType, optional
|
||||
The value type of the vector, by default pa.float32()
|
||||
nullable : bool, optional
|
||||
Whether the vector is nullable, by default it is True.
|
||||
|
||||
Examples
|
||||
--------
|
||||
@@ -103,7 +96,7 @@ def Vector(
|
||||
>>> assert schema == pa.schema([
|
||||
... pa.field("id", pa.int64(), False),
|
||||
... pa.field("url", pa.utf8(), False),
|
||||
... pa.field("embeddings", pa.list_(pa.float32(), 768), False)
|
||||
... pa.field("embeddings", pa.list_(pa.float32(), 768))
|
||||
... ])
|
||||
"""
|
||||
|
||||
@@ -112,6 +105,10 @@ def Vector(
|
||||
def __repr__(self):
|
||||
return f"FixedSizeList(dim={dim})"
|
||||
|
||||
@staticmethod
|
||||
def nullable() -> bool:
|
||||
return nullable
|
||||
|
||||
@staticmethod
|
||||
def dim() -> int:
|
||||
return dim
|
||||
@@ -205,9 +202,7 @@ else:
|
||||
def _pydantic_to_arrow_type(field: FieldInfo) -> pa.DataType:
|
||||
"""Convert a Pydantic FieldInfo to Arrow DataType"""
|
||||
|
||||
if isinstance(field.annotation, _GenericAlias) or (
|
||||
sys.version_info > (3, 9) and isinstance(field.annotation, types.GenericAlias)
|
||||
):
|
||||
if isinstance(field.annotation, (_GenericAlias, GenericAlias)):
|
||||
origin = field.annotation.__origin__
|
||||
args = field.annotation.__args__
|
||||
if origin is list:
|
||||
@@ -235,7 +230,7 @@ def _pydantic_to_arrow_type(field: FieldInfo) -> pa.DataType:
|
||||
|
||||
def is_nullable(field: FieldInfo) -> bool:
|
||||
"""Check if a Pydantic FieldInfo is nullable."""
|
||||
if isinstance(field.annotation, _GenericAlias):
|
||||
if isinstance(field.annotation, (_GenericAlias, GenericAlias)):
|
||||
origin = field.annotation.__origin__
|
||||
args = field.annotation.__args__
|
||||
if origin == Union:
|
||||
@@ -246,6 +241,10 @@ def is_nullable(field: FieldInfo) -> bool:
|
||||
for typ in args:
|
||||
if typ is type(None):
|
||||
return True
|
||||
elif inspect.isclass(field.annotation) and issubclass(
|
||||
field.annotation, FixedSizeListMixin
|
||||
):
|
||||
return field.annotation.nullable()
|
||||
return False
|
||||
|
||||
|
||||
|
||||
@@ -1502,10 +1502,11 @@ class AsyncQueryBase(object):
|
||||
... print(plan)
|
||||
>>> asyncio.run(doctest_example()) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
|
||||
ProjectionExec: expr=[vector@0 as vector, _distance@2 as _distance]
|
||||
FilterExec: _distance@2 IS NOT NULL
|
||||
SortExec: TopK(fetch=10), expr=[_distance@2 ASC NULLS LAST], preserve_partitioning=[false]
|
||||
KNNVectorDistance: metric=l2
|
||||
LanceScan: uri=..., projection=[vector], row_id=true, row_addr=false, ordered=false
|
||||
GlobalLimitExec: skip=0, fetch=10
|
||||
FilterExec: _distance@2 IS NOT NULL
|
||||
SortExec: TopK(fetch=10), expr=[_distance@2 ASC NULLS LAST], preserve_partitioning=[false]
|
||||
KNNVectorDistance: metric=l2
|
||||
LanceScan: uri=..., projection=[vector], row_id=true, row_addr=false, ordered=false
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
25
python/python/lancedb/remote/background_loop.py
Normal file
25
python/python/lancedb/remote/background_loop.py
Normal file
@@ -0,0 +1,25 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
import asyncio
|
||||
import threading
|
||||
|
||||
|
||||
class BackgroundEventLoop:
|
||||
"""
|
||||
A background event loop that can run futures.
|
||||
|
||||
Used to bridge sync and async code, without messing with users event loops.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.loop = asyncio.new_event_loop()
|
||||
self.thread = threading.Thread(
|
||||
target=self.loop.run_forever,
|
||||
name="LanceDBBackgroundEventLoop",
|
||||
daemon=True,
|
||||
)
|
||||
self.thread.start()
|
||||
|
||||
def run(self, future):
|
||||
return asyncio.run_coroutine_threadsafe(future, self.loop).result()
|
||||
@@ -11,7 +11,6 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import asyncio
|
||||
from datetime import timedelta
|
||||
import logging
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
@@ -21,6 +20,7 @@ import warnings
|
||||
|
||||
from lancedb import connect_async
|
||||
from lancedb.remote import ClientConfig
|
||||
from lancedb.remote.background_loop import BackgroundEventLoop
|
||||
import pyarrow as pa
|
||||
from overrides import override
|
||||
|
||||
@@ -31,6 +31,8 @@ from ..pydantic import LanceModel
|
||||
from ..table import Table
|
||||
from ..util import validate_table_name
|
||||
|
||||
LOOP = BackgroundEventLoop()
|
||||
|
||||
|
||||
class RemoteDBConnection(DBConnection):
|
||||
"""A connection to a remote LanceDB database."""
|
||||
@@ -86,18 +88,9 @@ class RemoteDBConnection(DBConnection):
|
||||
raise ValueError(f"Invalid scheme: {parsed.scheme}, only accepts db://")
|
||||
self.db_name = parsed.netloc
|
||||
|
||||
import nest_asyncio
|
||||
|
||||
nest_asyncio.apply()
|
||||
try:
|
||||
self._loop = asyncio.get_running_loop()
|
||||
except RuntimeError:
|
||||
self._loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(self._loop)
|
||||
|
||||
self.client_config = client_config
|
||||
|
||||
self._conn = self._loop.run_until_complete(
|
||||
self._conn = LOOP.run(
|
||||
connect_async(
|
||||
db_url,
|
||||
api_key=api_key,
|
||||
@@ -127,9 +120,7 @@ class RemoteDBConnection(DBConnection):
|
||||
-------
|
||||
An iterator of table names.
|
||||
"""
|
||||
return self._loop.run_until_complete(
|
||||
self._conn.table_names(start_after=page_token, limit=limit)
|
||||
)
|
||||
return LOOP.run(self._conn.table_names(start_after=page_token, limit=limit))
|
||||
|
||||
@override
|
||||
def open_table(self, name: str, *, index_cache_size: Optional[int] = None) -> Table:
|
||||
@@ -152,8 +143,8 @@ class RemoteDBConnection(DBConnection):
|
||||
" (there is no local cache to configure)"
|
||||
)
|
||||
|
||||
table = self._loop.run_until_complete(self._conn.open_table(name))
|
||||
return RemoteTable(table, self.db_name, self._loop)
|
||||
table = LOOP.run(self._conn.open_table(name))
|
||||
return RemoteTable(table, self.db_name)
|
||||
|
||||
@override
|
||||
def create_table(
|
||||
@@ -268,7 +259,7 @@ class RemoteDBConnection(DBConnection):
|
||||
|
||||
from .table import RemoteTable
|
||||
|
||||
table = self._loop.run_until_complete(
|
||||
table = LOOP.run(
|
||||
self._conn.create_table(
|
||||
name,
|
||||
data,
|
||||
@@ -278,7 +269,7 @@ class RemoteDBConnection(DBConnection):
|
||||
fill_value=fill_value,
|
||||
)
|
||||
)
|
||||
return RemoteTable(table, self.db_name, self._loop)
|
||||
return RemoteTable(table, self.db_name)
|
||||
|
||||
@override
|
||||
def drop_table(self, name: str):
|
||||
@@ -289,7 +280,7 @@ class RemoteDBConnection(DBConnection):
|
||||
name: str
|
||||
The name of the table.
|
||||
"""
|
||||
self._loop.run_until_complete(self._conn.drop_table(name))
|
||||
LOOP.run(self._conn.drop_table(name))
|
||||
|
||||
@override
|
||||
def rename_table(self, cur_name: str, new_name: str):
|
||||
@@ -302,7 +293,7 @@ class RemoteDBConnection(DBConnection):
|
||||
new_name: str
|
||||
The new name of the table.
|
||||
"""
|
||||
self._loop.run_until_complete(self._conn.rename_table(cur_name, new_name))
|
||||
LOOP.run(self._conn.rename_table(cur_name, new_name))
|
||||
|
||||
async def close(self):
|
||||
"""Close the connection to the database."""
|
||||
|
||||
@@ -12,12 +12,12 @@
|
||||
# limitations under the License.
|
||||
|
||||
from datetime import timedelta
|
||||
import asyncio
|
||||
import logging
|
||||
from functools import cached_property
|
||||
from typing import Dict, Iterable, List, Optional, Union, Literal
|
||||
|
||||
from lancedb.index import FTS, BTree, Bitmap, HnswPq, HnswSq, IvfPq, LabelList
|
||||
from lancedb.remote.db import LOOP
|
||||
import pyarrow as pa
|
||||
|
||||
from lancedb.common import DATA, VEC, VECTOR_COLUMN_NAME
|
||||
@@ -33,9 +33,7 @@ class RemoteTable(Table):
|
||||
self,
|
||||
table: AsyncTable,
|
||||
db_name: str,
|
||||
loop: Optional[asyncio.AbstractEventLoop] = None,
|
||||
):
|
||||
self._loop = loop
|
||||
self._table = table
|
||||
self.db_name = db_name
|
||||
|
||||
@@ -56,12 +54,12 @@ class RemoteTable(Table):
|
||||
of this Table
|
||||
|
||||
"""
|
||||
return self._loop.run_until_complete(self._table.schema())
|
||||
return LOOP.run(self._table.schema())
|
||||
|
||||
@property
|
||||
def version(self) -> int:
|
||||
"""Get the current version of the table"""
|
||||
return self._loop.run_until_complete(self._table.version())
|
||||
return LOOP.run(self._table.version())
|
||||
|
||||
@cached_property
|
||||
def embedding_functions(self) -> dict:
|
||||
@@ -98,11 +96,11 @@ class RemoteTable(Table):
|
||||
|
||||
def list_indices(self):
|
||||
"""List all the indices on the table"""
|
||||
return self._loop.run_until_complete(self._table.list_indices())
|
||||
return LOOP.run(self._table.list_indices())
|
||||
|
||||
def index_stats(self, index_uuid: str):
|
||||
"""List all the stats of a specified index"""
|
||||
return self._loop.run_until_complete(self._table.index_stats(index_uuid))
|
||||
return LOOP.run(self._table.index_stats(index_uuid))
|
||||
|
||||
def create_scalar_index(
|
||||
self,
|
||||
@@ -132,9 +130,7 @@ class RemoteTable(Table):
|
||||
else:
|
||||
raise ValueError(f"Unknown index type: {index_type}")
|
||||
|
||||
self._loop.run_until_complete(
|
||||
self._table.create_index(column, config=config, replace=replace)
|
||||
)
|
||||
LOOP.run(self._table.create_index(column, config=config, replace=replace))
|
||||
|
||||
def create_fts_index(
|
||||
self,
|
||||
@@ -144,9 +140,7 @@ class RemoteTable(Table):
|
||||
with_position: bool = True,
|
||||
):
|
||||
config = FTS(with_position=with_position)
|
||||
self._loop.run_until_complete(
|
||||
self._table.create_index(column, config=config, replace=replace)
|
||||
)
|
||||
LOOP.run(self._table.create_index(column, config=config, replace=replace))
|
||||
|
||||
def create_index(
|
||||
self,
|
||||
@@ -227,9 +221,7 @@ class RemoteTable(Table):
|
||||
" 'IVF_PQ', 'IVF_HNSW_PQ', 'IVF_HNSW_SQ'"
|
||||
)
|
||||
|
||||
self._loop.run_until_complete(
|
||||
self._table.create_index(vector_column_name, config=config)
|
||||
)
|
||||
LOOP.run(self._table.create_index(vector_column_name, config=config))
|
||||
|
||||
def add(
|
||||
self,
|
||||
@@ -261,7 +253,7 @@ class RemoteTable(Table):
|
||||
The value to use when filling vectors. Only used if on_bad_vectors="fill".
|
||||
|
||||
"""
|
||||
self._loop.run_until_complete(
|
||||
LOOP.run(
|
||||
self._table.add(
|
||||
data, mode=mode, on_bad_vectors=on_bad_vectors, fill_value=fill_value
|
||||
)
|
||||
@@ -349,9 +341,7 @@ class RemoteTable(Table):
|
||||
def _execute_query(
|
||||
self, query: Query, batch_size: Optional[int] = None
|
||||
) -> pa.RecordBatchReader:
|
||||
return self._loop.run_until_complete(
|
||||
self._table._execute_query(query, batch_size=batch_size)
|
||||
)
|
||||
return LOOP.run(self._table._execute_query(query, batch_size=batch_size))
|
||||
|
||||
def merge_insert(self, on: Union[str, Iterable[str]]) -> LanceMergeInsertBuilder:
|
||||
"""Returns a [`LanceMergeInsertBuilder`][lancedb.merge.LanceMergeInsertBuilder]
|
||||
@@ -368,9 +358,7 @@ class RemoteTable(Table):
|
||||
on_bad_vectors: str,
|
||||
fill_value: float,
|
||||
):
|
||||
self._loop.run_until_complete(
|
||||
self._table._do_merge(merge, new_data, on_bad_vectors, fill_value)
|
||||
)
|
||||
LOOP.run(self._table._do_merge(merge, new_data, on_bad_vectors, fill_value))
|
||||
|
||||
def delete(self, predicate: str):
|
||||
"""Delete rows from the table.
|
||||
@@ -419,7 +407,7 @@ class RemoteTable(Table):
|
||||
x vector _distance # doctest: +SKIP
|
||||
0 2 [3.0, 4.0] 85.0 # doctest: +SKIP
|
||||
"""
|
||||
self._loop.run_until_complete(self._table.delete(predicate))
|
||||
LOOP.run(self._table.delete(predicate))
|
||||
|
||||
def update(
|
||||
self,
|
||||
@@ -469,7 +457,7 @@ class RemoteTable(Table):
|
||||
2 2 [10.0, 10.0] # doctest: +SKIP
|
||||
|
||||
"""
|
||||
self._loop.run_until_complete(
|
||||
LOOP.run(
|
||||
self._table.update(where=where, updates=values, updates_sql=values_sql)
|
||||
)
|
||||
|
||||
@@ -499,7 +487,7 @@ class RemoteTable(Table):
|
||||
)
|
||||
|
||||
def count_rows(self, filter: Optional[str] = None) -> int:
|
||||
return self._loop.run_until_complete(self._table.count_rows(filter))
|
||||
return LOOP.run(self._table.count_rows(filter))
|
||||
|
||||
def add_columns(self, transforms: Dict[str, str]):
|
||||
raise NotImplementedError(
|
||||
|
||||
@@ -599,7 +599,9 @@ async def test_create_in_v2_mode(tmp_path):
|
||||
)
|
||||
|
||||
async def is_in_v2_mode(tbl):
|
||||
batches = await tbl.query().to_batches(max_batch_length=1024 * 10)
|
||||
batches = (
|
||||
await tbl.query().limit(10 * 1024).to_batches(max_batch_length=1024 * 10)
|
||||
)
|
||||
num_batches = 0
|
||||
async for batch in batches:
|
||||
num_batches += 1
|
||||
|
||||
@@ -90,10 +90,13 @@ def test_embedding_with_bad_results(tmp_path):
|
||||
self, texts: Union[List[str], np.ndarray]
|
||||
) -> list[Union[np.array, None]]:
|
||||
# Return None, which is bad if field is non-nullable
|
||||
return [
|
||||
None if i % 2 == 0 else np.random.randn(self.ndims())
|
||||
a = [
|
||||
np.full(self.ndims(), np.nan)
|
||||
if i % 2 == 0
|
||||
else np.random.randn(self.ndims())
|
||||
for i in range(len(texts))
|
||||
]
|
||||
return a
|
||||
|
||||
db = lancedb.connect(tmp_path)
|
||||
registry = EmbeddingFunctionRegistry.get_instance()
|
||||
|
||||
@@ -1,15 +1,6 @@
|
||||
# Copyright (c) 2023. LanceDB Developers
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
import importlib
|
||||
import io
|
||||
import os
|
||||
@@ -17,6 +8,7 @@ import os
|
||||
import lancedb
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
import pyarrow as pa
|
||||
import pytest
|
||||
from lancedb.embeddings import get_registry
|
||||
from lancedb.pydantic import LanceModel, Vector
|
||||
@@ -444,6 +436,30 @@ def test_watsonx_embedding(tmp_path):
|
||||
assert tbl.search("hello").limit(1).to_pandas()["text"][0] == "hello world"
|
||||
|
||||
|
||||
@pytest.mark.slow
|
||||
@pytest.mark.skipif(
|
||||
os.environ.get("OPENAI_API_KEY") is None, reason="OPENAI_API_KEY not set"
|
||||
)
|
||||
def test_openai_with_empty_strs(tmp_path):
|
||||
model = get_registry().get("openai").create(max_retries=0)
|
||||
|
||||
class TextModel(LanceModel):
|
||||
text: str = model.SourceField()
|
||||
vector: Vector(model.ndims()) = model.VectorField()
|
||||
|
||||
df = pd.DataFrame({"text": ["hello world", ""]})
|
||||
db = lancedb.connect(tmp_path)
|
||||
tbl = db.create_table("test", schema=TextModel, mode="overwrite")
|
||||
|
||||
tbl.add(df, on_bad_vectors="skip")
|
||||
tb = tbl.to_arrow()
|
||||
assert tb.schema.field_by_name("vector").type == pa.list_(
|
||||
pa.float32(), model.ndims()
|
||||
)
|
||||
assert len(tb) == 2
|
||||
assert tb["vector"].is_null().to_pylist() == [False, True]
|
||||
|
||||
|
||||
@pytest.mark.slow
|
||||
@pytest.mark.skipif(
|
||||
importlib.util.find_spec("ollama") is None, reason="Ollama not installed"
|
||||
|
||||
@@ -1,16 +1,5 @@
|
||||
# Copyright 2023 LanceDB Developers
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
import json
|
||||
import sys
|
||||
@@ -172,6 +161,26 @@ def test_pydantic_to_arrow_py38():
|
||||
assert schema == expect_schema
|
||||
|
||||
|
||||
def test_nullable_vector():
|
||||
class NullableModel(pydantic.BaseModel):
|
||||
vec: Vector(16, nullable=False)
|
||||
|
||||
schema = pydantic_to_schema(NullableModel)
|
||||
assert schema == pa.schema([pa.field("vec", pa.list_(pa.float32(), 16), False)])
|
||||
|
||||
class DefaultModel(pydantic.BaseModel):
|
||||
vec: Vector(16)
|
||||
|
||||
schema = pydantic_to_schema(DefaultModel)
|
||||
assert schema == pa.schema([pa.field("vec", pa.list_(pa.float32(), 16), True)])
|
||||
|
||||
class NotNullableModel(pydantic.BaseModel):
|
||||
vec: Vector(16)
|
||||
|
||||
schema = pydantic_to_schema(NotNullableModel)
|
||||
assert schema == pa.schema([pa.field("vec", pa.list_(pa.float32(), 16), True)])
|
||||
|
||||
|
||||
def test_fixed_size_list_field():
|
||||
class TestModel(pydantic.BaseModel):
|
||||
vec: Vector(16)
|
||||
@@ -192,7 +201,7 @@ def test_fixed_size_list_field():
|
||||
schema = pydantic_to_schema(TestModel)
|
||||
assert schema == pa.schema(
|
||||
[
|
||||
pa.field("vec", pa.list_(pa.float32(), 16), False),
|
||||
pa.field("vec", pa.list_(pa.float32(), 16)),
|
||||
pa.field("li", pa.list_(pa.int64()), False),
|
||||
]
|
||||
)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
import contextlib
|
||||
from datetime import timedelta
|
||||
import http.server
|
||||
@@ -187,6 +188,47 @@ async def test_retry_error():
|
||||
assert cause.status_code == 429
|
||||
|
||||
|
||||
def test_table_add_in_threadpool():
|
||||
def handler(request):
|
||||
if request.path == "/v1/table/test/insert/":
|
||||
request.send_response(200)
|
||||
request.end_headers()
|
||||
elif request.path == "/v1/table/test/create/":
|
||||
request.send_response(200)
|
||||
request.send_header("Content-Type", "application/json")
|
||||
request.end_headers()
|
||||
request.wfile.write(b"{}")
|
||||
elif request.path == "/v1/table/test/describe/":
|
||||
request.send_response(200)
|
||||
request.send_header("Content-Type", "application/json")
|
||||
request.end_headers()
|
||||
payload = json.dumps(
|
||||
dict(
|
||||
version=1,
|
||||
schema=dict(
|
||||
fields=[
|
||||
dict(name="id", type={"type": "int64"}, nullable=False),
|
||||
]
|
||||
),
|
||||
)
|
||||
)
|
||||
request.wfile.write(payload.encode())
|
||||
else:
|
||||
request.send_response(404)
|
||||
request.end_headers()
|
||||
|
||||
with mock_lancedb_connection(handler) as db:
|
||||
table = db.create_table("test", [{"id": 1}])
|
||||
with ThreadPoolExecutor(3) as executor:
|
||||
futures = []
|
||||
for _ in range(10):
|
||||
future = executor.submit(table.add, [{"id": 1}])
|
||||
futures.append(future)
|
||||
|
||||
for future in futures:
|
||||
future.result()
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def query_test_table(query_handler):
|
||||
def handler(request):
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "lancedb-node"
|
||||
version = "0.13.1-beta.0"
|
||||
version = "0.14.0-beta.0"
|
||||
description = "Serverless, low-latency vector database for AI applications"
|
||||
license.workspace = true
|
||||
edition.workspace = true
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "lancedb"
|
||||
version = "0.13.1-beta.0"
|
||||
version = "0.14.0-beta.0"
|
||||
edition.workspace = true
|
||||
description = "LanceDB: A serverless, low-latency vector database for AI applications"
|
||||
license.workspace = true
|
||||
@@ -27,6 +27,7 @@ half = { workspace = true }
|
||||
lazy_static.workspace = true
|
||||
lance = { workspace = true }
|
||||
lance-datafusion.workspace = true
|
||||
lance-io = { workspace = true }
|
||||
lance-index = { workspace = true }
|
||||
lance-table = { workspace = true }
|
||||
lance-linalg = { workspace = true }
|
||||
|
||||
@@ -38,6 +38,9 @@ use crate::table::{NativeTable, TableDefinition, WriteOptions};
|
||||
use crate::utils::validate_table_name;
|
||||
use crate::Table;
|
||||
pub use lance_encoding::version::LanceFileVersion;
|
||||
#[cfg(feature = "remote")]
|
||||
use lance_io::object_store::StorageOptions;
|
||||
use lance_table::io::commit::commit_handler_from_url;
|
||||
|
||||
pub const LANCE_FILE_EXTENSION: &str = "lance";
|
||||
|
||||
@@ -133,7 +136,7 @@ impl IntoArrow for NoData {
|
||||
|
||||
/// A builder for configuring a [`Connection::create_table`] operation
|
||||
pub struct CreateTableBuilder<const HAS_DATA: bool, T: IntoArrow> {
|
||||
parent: Arc<dyn ConnectionInternal>,
|
||||
pub(crate) parent: Arc<dyn ConnectionInternal>,
|
||||
pub(crate) name: String,
|
||||
pub(crate) data: Option<T>,
|
||||
pub(crate) mode: CreateTableMode,
|
||||
@@ -341,7 +344,7 @@ pub struct OpenTableBuilder {
|
||||
}
|
||||
|
||||
impl OpenTableBuilder {
|
||||
fn new(parent: Arc<dyn ConnectionInternal>, name: String) -> Self {
|
||||
pub(crate) fn new(parent: Arc<dyn ConnectionInternal>, name: String) -> Self {
|
||||
Self {
|
||||
parent,
|
||||
name,
|
||||
@@ -717,12 +720,14 @@ impl ConnectBuilder {
|
||||
message: "An api_key is required when connecting to LanceDb Cloud".to_string(),
|
||||
})?;
|
||||
|
||||
let storage_options = StorageOptions(self.storage_options.clone());
|
||||
let internal = Arc::new(crate::remote::db::RemoteDatabase::try_new(
|
||||
&self.uri,
|
||||
&api_key,
|
||||
®ion,
|
||||
self.host_override,
|
||||
self.client_config,
|
||||
storage_options.into(),
|
||||
)?);
|
||||
Ok(Connection {
|
||||
internal,
|
||||
@@ -855,7 +860,7 @@ impl Database {
|
||||
let table_base_uri = if let Some(store) = engine {
|
||||
static WARN_ONCE: std::sync::Once = std::sync::Once::new();
|
||||
WARN_ONCE.call_once(|| {
|
||||
log::warn!("Specifing engine is not a publicly supported feature in lancedb yet. THE API WILL CHANGE");
|
||||
log::warn!("Specifying engine is not a publicly supported feature in lancedb yet. THE API WILL CHANGE");
|
||||
});
|
||||
let old_scheme = url.scheme().to_string();
|
||||
let new_scheme = format!("{}+{}", old_scheme, store);
|
||||
@@ -1036,6 +1041,7 @@ impl ConnectionInternal for Database {
|
||||
};
|
||||
|
||||
let mut write_params = options.write_options.lance_write_params.unwrap_or_default();
|
||||
|
||||
if matches!(&options.mode, CreateTableMode::Overwrite) {
|
||||
write_params.mode = WriteMode::Overwrite;
|
||||
}
|
||||
@@ -1122,7 +1128,7 @@ impl ConnectionInternal for Database {
|
||||
let dir_name = format!("{}.{}", name, LANCE_EXTENSION);
|
||||
let full_path = self.base_path.child(dir_name.clone());
|
||||
self.object_store
|
||||
.remove_dir_all(full_path)
|
||||
.remove_dir_all(full_path.clone())
|
||||
.await
|
||||
.map_err(|err| match err {
|
||||
// this error is not lance::Error::DatasetNotFound,
|
||||
@@ -1132,6 +1138,19 @@ impl ConnectionInternal for Database {
|
||||
},
|
||||
_ => Error::from(err),
|
||||
})?;
|
||||
|
||||
let object_store_params = ObjectStoreParams {
|
||||
storage_options: Some(self.storage_options.clone()),
|
||||
..Default::default()
|
||||
};
|
||||
let mut uri = self.uri.clone();
|
||||
if let Some(query_string) = &self.query_string {
|
||||
uri.push_str(&format!("?{}", query_string));
|
||||
}
|
||||
let commit_handler = commit_handler_from_url(&uri, &Some(object_store_params))
|
||||
.await
|
||||
.unwrap();
|
||||
commit_handler.delete(&full_path).await.unwrap();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -1169,6 +1188,7 @@ mod tests {
|
||||
use lance_testing::datagen::{BatchGenerator, IncrementingInt32};
|
||||
use tempfile::tempdir;
|
||||
|
||||
use crate::query::QueryBase;
|
||||
use crate::query::{ExecutableQuery, QueryExecutionOptions};
|
||||
|
||||
use super::*;
|
||||
@@ -1296,6 +1316,7 @@ mod tests {
|
||||
// In v1 the row group size will trump max_batch_length
|
||||
let batches = tbl
|
||||
.query()
|
||||
.limit(20000)
|
||||
.execute_with_options(QueryExecutionOptions {
|
||||
max_batch_length: 50000,
|
||||
..Default::default()
|
||||
|
||||
@@ -596,7 +596,7 @@ impl Query {
|
||||
pub(crate) fn new(parent: Arc<dyn TableInternal>) -> Self {
|
||||
Self {
|
||||
parent,
|
||||
limit: None,
|
||||
limit: Some(DEFAULT_TOP_K),
|
||||
offset: None,
|
||||
filter: None,
|
||||
full_text_search: None,
|
||||
|
||||
@@ -21,6 +21,7 @@ use reqwest::{
|
||||
};
|
||||
|
||||
use crate::error::{Error, Result};
|
||||
use crate::remote::db::RemoteOptions;
|
||||
|
||||
const REQUEST_ID_HEADER: &str = "x-request-id";
|
||||
|
||||
@@ -215,6 +216,7 @@ impl RestfulLanceDbClient<Sender> {
|
||||
region: &str,
|
||||
host_override: Option<String>,
|
||||
client_config: ClientConfig,
|
||||
options: &RemoteOptions,
|
||||
) -> Result<Self> {
|
||||
let parsed_url = url::Url::parse(db_url).map_err(|err| Error::InvalidInput {
|
||||
message: format!("db_url is not a valid URL. '{db_url}'. Error: {err}"),
|
||||
@@ -255,6 +257,7 @@ impl RestfulLanceDbClient<Sender> {
|
||||
region,
|
||||
db_name,
|
||||
host_override.is_some(),
|
||||
options,
|
||||
)?)
|
||||
.user_agent(client_config.user_agent)
|
||||
.build()
|
||||
@@ -262,6 +265,7 @@ impl RestfulLanceDbClient<Sender> {
|
||||
message: "Failed to build HTTP client".into(),
|
||||
source: Some(Box::new(err)),
|
||||
})?;
|
||||
|
||||
let host = match host_override {
|
||||
Some(host_override) => host_override,
|
||||
None => format!("https://{}.{}.api.lancedb.com", db_name, region),
|
||||
@@ -287,6 +291,7 @@ impl<S: HttpSend> RestfulLanceDbClient<S> {
|
||||
region: &str,
|
||||
db_name: &str,
|
||||
has_host_override: bool,
|
||||
options: &RemoteOptions,
|
||||
) -> Result<HeaderMap> {
|
||||
let mut headers = HeaderMap::new();
|
||||
headers.insert(
|
||||
@@ -313,6 +318,23 @@ impl<S: HttpSend> RestfulLanceDbClient<S> {
|
||||
);
|
||||
}
|
||||
|
||||
if let Some(v) = options.0.get("account_name") {
|
||||
headers.insert(
|
||||
"x-azure-storage-account-name",
|
||||
HeaderValue::from_str(v).map_err(|_| Error::InvalidInput {
|
||||
message: format!("non-ascii storage account name '{}' provided", db_name),
|
||||
})?,
|
||||
);
|
||||
}
|
||||
if let Some(v) = options.0.get("azure_storage_account_name") {
|
||||
headers.insert(
|
||||
"x-azure-storage-account-name",
|
||||
HeaderValue::from_str(v).map_err(|_| Error::InvalidInput {
|
||||
message: format!("non-ascii storage account name '{}' provided", db_name),
|
||||
})?,
|
||||
);
|
||||
}
|
||||
|
||||
Ok(headers)
|
||||
}
|
||||
|
||||
|
||||
@@ -12,18 +12,21 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow_array::RecordBatchReader;
|
||||
use async_trait::async_trait;
|
||||
use http::StatusCode;
|
||||
use lance_io::object_store::StorageOptions;
|
||||
use moka::future::Cache;
|
||||
use reqwest::header::CONTENT_TYPE;
|
||||
use serde::Deserialize;
|
||||
use tokio::task::spawn_blocking;
|
||||
|
||||
use crate::connection::{
|
||||
ConnectionInternal, CreateTableBuilder, NoData, OpenTableBuilder, TableNamesBuilder,
|
||||
ConnectionInternal, CreateTableBuilder, CreateTableMode, NoData, OpenTableBuilder,
|
||||
TableNamesBuilder,
|
||||
};
|
||||
use crate::embeddings::EmbeddingRegistry;
|
||||
use crate::error::Result;
|
||||
@@ -52,9 +55,16 @@ impl RemoteDatabase {
|
||||
region: &str,
|
||||
host_override: Option<String>,
|
||||
client_config: ClientConfig,
|
||||
options: RemoteOptions,
|
||||
) -> Result<Self> {
|
||||
let client =
|
||||
RestfulLanceDbClient::try_new(uri, api_key, region, host_override, client_config)?;
|
||||
let client = RestfulLanceDbClient::try_new(
|
||||
uri,
|
||||
api_key,
|
||||
region,
|
||||
host_override,
|
||||
client_config,
|
||||
&options,
|
||||
)?;
|
||||
|
||||
let table_cache = Cache::builder()
|
||||
.time_to_live(std::time::Duration::from_secs(300))
|
||||
@@ -95,6 +105,16 @@ impl<S: HttpSend> std::fmt::Display for RemoteDatabase<S> {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&CreateTableMode> for &'static str {
|
||||
fn from(val: &CreateTableMode) -> Self {
|
||||
match val {
|
||||
CreateTableMode::Create => "create",
|
||||
CreateTableMode::Overwrite => "overwrite",
|
||||
CreateTableMode::ExistOk(_) => "exist_ok",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<S: HttpSend> ConnectionInternal for RemoteDatabase<S> {
|
||||
async fn table_names(&self, options: TableNamesBuilder) -> Result<Vec<String>> {
|
||||
@@ -133,14 +153,40 @@ impl<S: HttpSend> ConnectionInternal for RemoteDatabase<S> {
|
||||
let req = self
|
||||
.client
|
||||
.post(&format!("/v1/table/{}/create/", options.name))
|
||||
.query(&[("mode", Into::<&str>::into(&options.mode))])
|
||||
.body(data_buffer)
|
||||
.header(CONTENT_TYPE, ARROW_STREAM_CONTENT_TYPE);
|
||||
|
||||
let (request_id, rsp) = self.client.send(req, false).await?;
|
||||
|
||||
if rsp.status() == StatusCode::BAD_REQUEST {
|
||||
let body = rsp.text().await.err_to_http(request_id.clone())?;
|
||||
if body.contains("already exists") {
|
||||
return Err(crate::Error::TableAlreadyExists { name: options.name });
|
||||
return match options.mode {
|
||||
CreateTableMode::Create => {
|
||||
Err(crate::Error::TableAlreadyExists { name: options.name })
|
||||
}
|
||||
CreateTableMode::ExistOk(callback) => {
|
||||
let builder = OpenTableBuilder::new(options.parent, options.name);
|
||||
let builder = (callback)(builder);
|
||||
builder.execute().await
|
||||
}
|
||||
|
||||
// This should not happen, as we explicitly set the mode to overwrite and the server
|
||||
// shouldn't return an error if the table already exists.
|
||||
//
|
||||
// However if the server is an older version that doesn't support the mode parameter,
|
||||
// then we'll get the 400 response.
|
||||
CreateTableMode::Overwrite => Err(crate::Error::Http {
|
||||
source: format!(
|
||||
"unexpected response from server for create mode overwrite: {}",
|
||||
body
|
||||
)
|
||||
.into(),
|
||||
request_id,
|
||||
status_code: Some(StatusCode::BAD_REQUEST),
|
||||
}),
|
||||
};
|
||||
} else {
|
||||
return Err(crate::Error::InvalidInput { message: body });
|
||||
}
|
||||
@@ -206,6 +252,29 @@ impl<S: HttpSend> ConnectionInternal for RemoteDatabase<S> {
|
||||
}
|
||||
}
|
||||
|
||||
/// RemoteOptions contains a subset of StorageOptions that are compatible with Remote LanceDB connections
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct RemoteOptions(pub HashMap<String, String>);
|
||||
|
||||
impl RemoteOptions {
|
||||
pub fn new(options: HashMap<String, String>) -> Self {
|
||||
Self(options)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<StorageOptions> for RemoteOptions {
|
||||
fn from(options: StorageOptions) -> Self {
|
||||
let supported_opts = vec!["account_name", "azure_storage_account_name"];
|
||||
let mut filtered = HashMap::new();
|
||||
for opt in supported_opts {
|
||||
if let Some(v) = options.0.get(opt) {
|
||||
filtered.insert(opt.to_string(), v.to_string());
|
||||
}
|
||||
}
|
||||
RemoteOptions::new(filtered)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::{Arc, OnceLock};
|
||||
@@ -213,7 +282,9 @@ mod tests {
|
||||
use arrow_array::{Int32Array, RecordBatch, RecordBatchIterator};
|
||||
use arrow_schema::{DataType, Field, Schema};
|
||||
|
||||
use crate::connection::ConnectBuilder;
|
||||
use crate::{
|
||||
connection::CreateTableMode,
|
||||
remote::{ARROW_STREAM_CONTENT_TYPE, JSON_CONTENT_TYPE},
|
||||
Connection, Error,
|
||||
};
|
||||
@@ -382,6 +453,73 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_create_table_modes() {
|
||||
let test_cases = [
|
||||
(None, "mode=create"),
|
||||
(Some(CreateTableMode::Create), "mode=create"),
|
||||
(Some(CreateTableMode::Overwrite), "mode=overwrite"),
|
||||
(
|
||||
Some(CreateTableMode::ExistOk(Box::new(|b| b))),
|
||||
"mode=exist_ok",
|
||||
),
|
||||
];
|
||||
|
||||
for (mode, expected_query_string) in test_cases {
|
||||
let conn = Connection::new_with_handler(move |request| {
|
||||
assert_eq!(request.method(), &reqwest::Method::POST);
|
||||
assert_eq!(request.url().path(), "/v1/table/table1/create/");
|
||||
assert_eq!(request.url().query(), Some(expected_query_string));
|
||||
|
||||
http::Response::builder().status(200).body("").unwrap()
|
||||
});
|
||||
|
||||
let data = RecordBatch::try_new(
|
||||
Arc::new(Schema::new(vec![Field::new("a", DataType::Int32, false)])),
|
||||
vec![Arc::new(Int32Array::from(vec![1, 2, 3]))],
|
||||
)
|
||||
.unwrap();
|
||||
let reader = RecordBatchIterator::new([Ok(data.clone())], data.schema());
|
||||
let mut builder = conn.create_table("table1", reader);
|
||||
if let Some(mode) = mode {
|
||||
builder = builder.mode(mode);
|
||||
}
|
||||
builder.execute().await.unwrap();
|
||||
}
|
||||
|
||||
// check that the open table callback is called with exist_ok
|
||||
let conn = Connection::new_with_handler(|request| match request.url().path() {
|
||||
"/v1/table/table1/create/" => http::Response::builder()
|
||||
.status(400)
|
||||
.body("Table table1 already exists")
|
||||
.unwrap(),
|
||||
"/v1/table/table1/describe/" => http::Response::builder().status(200).body("").unwrap(),
|
||||
_ => {
|
||||
panic!("unexpected path: {:?}", request.url().path());
|
||||
}
|
||||
});
|
||||
let data = RecordBatch::try_new(
|
||||
Arc::new(Schema::new(vec![Field::new("a", DataType::Int32, false)])),
|
||||
vec![Arc::new(Int32Array::from(vec![1, 2, 3]))],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let called: Arc<OnceLock<bool>> = Arc::new(OnceLock::new());
|
||||
let reader = RecordBatchIterator::new([Ok(data.clone())], data.schema());
|
||||
let called_in_cb = called.clone();
|
||||
conn.create_table("table1", reader)
|
||||
.mode(CreateTableMode::ExistOk(Box::new(move |b| {
|
||||
called_in_cb.clone().set(true).unwrap();
|
||||
b
|
||||
})))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let called = *called.get().unwrap_or(&false);
|
||||
assert!(called);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_create_table_empty() {
|
||||
let conn = Connection::new_with_handler(|request| {
|
||||
@@ -436,4 +574,16 @@ mod tests {
|
||||
});
|
||||
conn.rename_table("table1", "table2").await.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_connect_remote_options() {
|
||||
let db_uri = "db://my-container/my-prefix";
|
||||
let _ = ConnectBuilder::new(db_uri)
|
||||
.region("us-east-1")
|
||||
.api_key("my-api-key")
|
||||
.storage_options(vec![("azure_storage_account_name", "my-storage-account")])
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1227,6 +1227,7 @@ mod tests {
|
||||
"prefilter": true,
|
||||
"distance_type": "l2",
|
||||
"nprobes": 20,
|
||||
"k": 10,
|
||||
"ef": Option::<usize>::None,
|
||||
"refine_factor": null,
|
||||
"version": null,
|
||||
|
||||
Reference in New Issue
Block a user