mirror of
https://github.com/lancedb/lancedb.git
synced 2026-03-28 11:30:39 +00:00
Compare commits
1 Commits
python-v0.
...
codex/upda
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d1d65bd6d2 |
@@ -1,5 +1,5 @@
|
||||
[tool.bumpversion]
|
||||
current_version = "0.24.0"
|
||||
current_version = "0.23.1"
|
||||
parse = """(?x)
|
||||
(?P<major>0|[1-9]\\d*)\\.
|
||||
(?P<minor>0|[1-9]\\d*)\\.
|
||||
|
||||
@@ -75,13 +75,6 @@ jobs:
|
||||
VERSION="${VERSION#v}"
|
||||
BRANCH_NAME="codex/update-lance-${VERSION//[^a-zA-Z0-9]/-}"
|
||||
|
||||
# Use "chore" for beta/rc versions, "feat" for stable releases
|
||||
if [[ "${VERSION}" == *beta* ]] || [[ "${VERSION}" == *rc* ]]; then
|
||||
COMMIT_TYPE="chore"
|
||||
else
|
||||
COMMIT_TYPE="feat"
|
||||
fi
|
||||
|
||||
cat <<EOF >/tmp/codex-prompt.txt
|
||||
You are running inside the lancedb repository on a GitHub Actions runner. Update the Lance dependency to version ${VERSION} and prepare a pull request for maintainers to review.
|
||||
|
||||
@@ -91,10 +84,10 @@ jobs:
|
||||
3. After clippy succeeds, run "cargo fmt --all" to format the workspace.
|
||||
4. Ensure the repository is clean except for intentional changes. Inspect "git status --short" and "git diff" to confirm the dependency update and any required fixes.
|
||||
5. Create and switch to a new branch named "${BRANCH_NAME}" (replace any duplicated hyphens if necessary).
|
||||
6. Stage all relevant files with "git add -A". Commit using the message "${COMMIT_TYPE}: update lance dependency to v${VERSION}".
|
||||
6. Stage all relevant files with "git add -A". Commit using the message "chore: update lance dependency to v${VERSION}".
|
||||
7. Push the branch to origin. If the branch already exists, force-push your changes.
|
||||
8. env "GH_TOKEN" is available, use "gh" tools for github related operations like creating pull request.
|
||||
9. Create a pull request targeting "main" with title "${COMMIT_TYPE}: update lance dependency to v${VERSION}". First, write the PR body to /tmp/pr-body.md using a heredoc (cat <<'EOF' > /tmp/pr-body.md). The body should summarize the dependency bump, clippy/fmt verification, and link the triggering tag (${TAG}). Then run "gh pr create --body-file /tmp/pr-body.md".
|
||||
9. Create a pull request targeting "main" with title "chore: update lance dependency to v${VERSION}". In the body, summarize the dependency bump, clippy/fmt verification, and link the triggering tag (${TAG}).
|
||||
10. After creating the PR, display the PR URL, "git status --short", and a concise summary of the commands run and their results.
|
||||
|
||||
Constraints:
|
||||
|
||||
10
.github/workflows/rust.yml
vendored
10
.github/workflows/rust.yml
vendored
@@ -48,8 +48,6 @@ jobs:
|
||||
run: cargo fmt --all -- --check
|
||||
- name: Run clippy
|
||||
run: cargo clippy --profile ci --workspace --tests --all-features -- -D warnings
|
||||
- name: Run clippy (without remote feature)
|
||||
run: cargo clippy --profile ci --workspace --tests -- -D warnings
|
||||
|
||||
build-no-lock:
|
||||
runs-on: ubuntu-24.04
|
||||
@@ -169,13 +167,13 @@ jobs:
|
||||
- name: Build
|
||||
run: |
|
||||
$env:VCPKG_ROOT = $env:VCPKG_INSTALLATION_ROOT
|
||||
cargo build --profile ci --features aws,remote --tests --locked --target ${{ matrix.target }}
|
||||
cargo build --profile ci --features remote --tests --locked --target ${{ matrix.target }}
|
||||
- name: Run tests
|
||||
# Can only run tests when target matches host
|
||||
if: ${{ matrix.target == 'x86_64-pc-windows-msvc' }}
|
||||
run: |
|
||||
$env:VCPKG_ROOT = $env:VCPKG_INSTALLATION_ROOT
|
||||
cargo test --profile ci --features aws,remote --locked
|
||||
cargo test --profile ci --features remote --locked
|
||||
|
||||
msrv:
|
||||
# Check the minimum supported Rust version
|
||||
@@ -183,7 +181,7 @@ jobs:
|
||||
runs-on: ubuntu-24.04
|
||||
strategy:
|
||||
matrix:
|
||||
msrv: ["1.88.0"] # This should match up with rust-version in Cargo.toml
|
||||
msrv: ["1.78.0"] # This should match up with rust-version in Cargo.toml
|
||||
env:
|
||||
# Need up-to-date compilers for kernels
|
||||
CC: clang-18
|
||||
@@ -214,6 +212,4 @@ jobs:
|
||||
cargo update -p aws-sdk-sts --precise 1.51.0
|
||||
cargo update -p home --precise 0.5.9
|
||||
- name: cargo +${{ matrix.msrv }} check
|
||||
env:
|
||||
RUSTUP_TOOLCHAIN: ${{ matrix.msrv }}
|
||||
run: cargo check --profile ci --workspace --tests --benches --all-features
|
||||
|
||||
1986
Cargo.lock
generated
1986
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
62
Cargo.toml
62
Cargo.toml
@@ -12,42 +12,42 @@ repository = "https://github.com/lancedb/lancedb"
|
||||
description = "Serverless, low-latency vector database for AI applications"
|
||||
keywords = ["lancedb", "lance", "database", "vector", "search"]
|
||||
categories = ["database-implementations"]
|
||||
rust-version = "1.88.0"
|
||||
rust-version = "1.78.0"
|
||||
|
||||
[workspace.dependencies]
|
||||
lance = { "version" = "=1.0.4", default-features = false, "tag" = "v1.0.4", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-core = { "version" = "=1.0.4", "tag" = "v1.0.4", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-datagen = { "version" = "=1.0.4", "tag" = "v1.0.4", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-file = { "version" = "=1.0.4", "tag" = "v1.0.4", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-io = { "version" = "=1.0.4", default-features = false, "tag" = "v1.0.4", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-index = { "version" = "=1.0.4", "tag" = "v1.0.4", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-linalg = { "version" = "=1.0.4", "tag" = "v1.0.4", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-namespace = { "version" = "=1.0.4", "tag" = "v1.0.4", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-namespace-impls = { "version" = "=1.0.4", default-features = false, "tag" = "v1.0.4", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-table = { "version" = "=1.0.4", "tag" = "v1.0.4", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-testing = { "version" = "=1.0.4", "tag" = "v1.0.4", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-datafusion = { "version" = "=1.0.4", "tag" = "v1.0.4", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-encoding = { "version" = "=1.0.4", "tag" = "v1.0.4", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-arrow = { "version" = "=1.0.4", "tag" = "v1.0.4", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance = { "version" = "=2.0.0-beta.7", default-features = false, "tag" = "v2.0.0-beta.7", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-core = { "version" = "=2.0.0-beta.7", "tag" = "v2.0.0-beta.7", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-datagen = { "version" = "=2.0.0-beta.7", "tag" = "v2.0.0-beta.7", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-file = { "version" = "=2.0.0-beta.7", "tag" = "v2.0.0-beta.7", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-io = { "version" = "=2.0.0-beta.7", default-features = false, "tag" = "v2.0.0-beta.7", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-index = { "version" = "=2.0.0-beta.7", "tag" = "v2.0.0-beta.7", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-linalg = { "version" = "=2.0.0-beta.7", "tag" = "v2.0.0-beta.7", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-namespace = { "version" = "=2.0.0-beta.7", "tag" = "v2.0.0-beta.7", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-namespace-impls = { "version" = "=2.0.0-beta.7", default-features = false, "tag" = "v2.0.0-beta.7", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-table = { "version" = "=2.0.0-beta.7", "tag" = "v2.0.0-beta.7", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-testing = { "version" = "=2.0.0-beta.7", "tag" = "v2.0.0-beta.7", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-datafusion = { "version" = "=2.0.0-beta.7", "tag" = "v2.0.0-beta.7", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-encoding = { "version" = "=2.0.0-beta.7", "tag" = "v2.0.0-beta.7", "git" = "https://github.com/lance-format/lance.git" }
|
||||
lance-arrow = { "version" = "=2.0.0-beta.7", "tag" = "v2.0.0-beta.7", "git" = "https://github.com/lance-format/lance.git" }
|
||||
ahash = "0.8"
|
||||
# Note that this one does not include pyarrow
|
||||
arrow = { version = "56.2", optional = false }
|
||||
arrow-array = "56.2"
|
||||
arrow-data = "56.2"
|
||||
arrow-ipc = "56.2"
|
||||
arrow-ord = "56.2"
|
||||
arrow-schema = "56.2"
|
||||
arrow-select = "56.2"
|
||||
arrow-cast = "56.2"
|
||||
arrow = { version = "57.2.0", optional = false }
|
||||
arrow-array = "57.2.0"
|
||||
arrow-data = "57.2.0"
|
||||
arrow-ipc = "57.2.0"
|
||||
arrow-ord = "57.2.0"
|
||||
arrow-schema = "57.2.0"
|
||||
arrow-select = "57.2.0"
|
||||
arrow-cast = "57.2.0"
|
||||
async-trait = "0"
|
||||
datafusion = { version = "50.1", default-features = false }
|
||||
datafusion-catalog = "50.1"
|
||||
datafusion-common = { version = "50.1", default-features = false }
|
||||
datafusion-execution = "50.1"
|
||||
datafusion-expr = "50.1"
|
||||
datafusion-physical-plan = "50.1"
|
||||
datafusion = { version = "51.0.0", default-features = false }
|
||||
datafusion-catalog = "51.0.0"
|
||||
datafusion-common = { version = "51.0.0", default-features = false }
|
||||
datafusion-execution = "51.0.0"
|
||||
datafusion-expr = "51.0.0"
|
||||
datafusion-physical-plan = "51.0.0"
|
||||
env_logger = "0.11"
|
||||
half = { "version" = "2.6.0", default-features = false, features = [
|
||||
half = { "version" = "2.7.1", default-features = false, features = [
|
||||
"num-traits",
|
||||
] }
|
||||
futures = "0"
|
||||
@@ -59,7 +59,7 @@ rand = "0.9"
|
||||
snafu = "0.8"
|
||||
url = "2"
|
||||
num-traits = "0.2"
|
||||
regex = "1.10"
|
||||
regex = "1.12"
|
||||
lazy_static = "1"
|
||||
semver = "1.0.25"
|
||||
chrono = "0.4"
|
||||
|
||||
@@ -14,7 +14,7 @@ Add the following dependency to your `pom.xml`:
|
||||
<dependency>
|
||||
<groupId>com.lancedb</groupId>
|
||||
<artifactId>lancedb-core</artifactId>
|
||||
<version>0.24.0</version>
|
||||
<version>0.23.1</version>
|
||||
</dependency>
|
||||
```
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
<parent>
|
||||
<groupId>com.lancedb</groupId>
|
||||
<artifactId>lancedb-parent</artifactId>
|
||||
<version>0.24.0-final.0</version>
|
||||
<version>0.23.1-final.0</version>
|
||||
<relativePath>../pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
<groupId>com.lancedb</groupId>
|
||||
<artifactId>lancedb-parent</artifactId>
|
||||
<version>0.24.0-final.0</version>
|
||||
<version>0.23.1-final.0</version>
|
||||
<packaging>pom</packaging>
|
||||
<name>${project.artifactId}</name>
|
||||
<description>LanceDB Java SDK Parent POM</description>
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "lancedb-nodejs"
|
||||
edition.workspace = true
|
||||
version = "0.24.0"
|
||||
version = "0.23.1"
|
||||
license.workspace = true
|
||||
description.workspace = true
|
||||
repository.workspace = true
|
||||
@@ -36,6 +36,6 @@ aws-lc-rs = "=1.13.0"
|
||||
napi-build = "2.1"
|
||||
|
||||
[features]
|
||||
default = ["remote", "lancedb/aws", "lancedb/gcs", "lancedb/azure", "lancedb/dynamodb", "lancedb/oss", "lancedb/huggingface"]
|
||||
default = ["remote", "lancedb/default"]
|
||||
fp16kernels = ["lancedb/fp16kernels"]
|
||||
remote = ["lancedb/remote"]
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-darwin-arm64",
|
||||
"version": "0.24.0",
|
||||
"version": "0.23.1",
|
||||
"os": ["darwin"],
|
||||
"cpu": ["arm64"],
|
||||
"main": "lancedb.darwin-arm64.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-darwin-x64",
|
||||
"version": "0.24.0",
|
||||
"version": "0.23.1",
|
||||
"os": ["darwin"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.darwin-x64.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-arm64-gnu",
|
||||
"version": "0.24.0",
|
||||
"version": "0.23.1",
|
||||
"os": ["linux"],
|
||||
"cpu": ["arm64"],
|
||||
"main": "lancedb.linux-arm64-gnu.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-arm64-musl",
|
||||
"version": "0.24.0",
|
||||
"version": "0.23.1",
|
||||
"os": ["linux"],
|
||||
"cpu": ["arm64"],
|
||||
"main": "lancedb.linux-arm64-musl.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-x64-gnu",
|
||||
"version": "0.24.0",
|
||||
"version": "0.23.1",
|
||||
"os": ["linux"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.linux-x64-gnu.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-x64-musl",
|
||||
"version": "0.24.0",
|
||||
"version": "0.23.1",
|
||||
"os": ["linux"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.linux-x64-musl.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-win32-arm64-msvc",
|
||||
"version": "0.24.0",
|
||||
"version": "0.23.1",
|
||||
"os": [
|
||||
"win32"
|
||||
],
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-win32-x64-msvc",
|
||||
"version": "0.24.0",
|
||||
"version": "0.23.1",
|
||||
"os": ["win32"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.win32-x64-msvc.node",
|
||||
|
||||
4
nodejs/package-lock.json
generated
4
nodejs/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb",
|
||||
"version": "0.24.0",
|
||||
"version": "0.23.1",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@lancedb/lancedb",
|
||||
"version": "0.24.0",
|
||||
"version": "0.23.1",
|
||||
"cpu": [
|
||||
"x64",
|
||||
"arm64"
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
"ann"
|
||||
],
|
||||
"private": false,
|
||||
"version": "0.24.0",
|
||||
"version": "0.23.1",
|
||||
"main": "dist/index.js",
|
||||
"exports": {
|
||||
".": "./dist/index.js",
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
[tool.bumpversion]
|
||||
current_version = "0.27.1"
|
||||
current_version = "0.26.1"
|
||||
parse = """(?x)
|
||||
(?P<major>0|[1-9]\\d*)\\.
|
||||
(?P<minor>0|[1-9]\\d*)\\.
|
||||
|
||||
@@ -1,28 +1,28 @@
|
||||
[package]
|
||||
name = "lancedb-python"
|
||||
version = "0.27.1"
|
||||
version = "0.26.1"
|
||||
edition.workspace = true
|
||||
description = "Python bindings for LanceDB"
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
keywords.workspace = true
|
||||
categories.workspace = true
|
||||
rust-version = "1.88.0"
|
||||
rust-version = "1.75.0"
|
||||
|
||||
[lib]
|
||||
name = "_lancedb"
|
||||
crate-type = ["cdylib"]
|
||||
|
||||
[dependencies]
|
||||
arrow = { version = "56.2", features = ["pyarrow"] }
|
||||
arrow = { version = "57.2.0", features = ["pyarrow"] }
|
||||
async-trait = "0.1"
|
||||
lancedb = { path = "../rust/lancedb", default-features = false }
|
||||
lance-core.workspace = true
|
||||
lance-namespace.workspace = true
|
||||
lance-io.workspace = true
|
||||
env_logger.workspace = true
|
||||
pyo3 = { version = "0.25", features = ["extension-module", "abi3-py39"] }
|
||||
pyo3-async-runtimes = { version = "0.25", features = [
|
||||
pyo3 = { version = "0.26", features = ["extension-module", "abi3-py39"] }
|
||||
pyo3-async-runtimes = { version = "0.26", features = [
|
||||
"attributes",
|
||||
"tokio-runtime",
|
||||
] }
|
||||
@@ -32,12 +32,12 @@ snafu.workspace = true
|
||||
tokio = { version = "1.40", features = ["sync"] }
|
||||
|
||||
[build-dependencies]
|
||||
pyo3-build-config = { version = "0.25", features = [
|
||||
pyo3-build-config = { version = "0.26", features = [
|
||||
"extension-module",
|
||||
"abi3-py39",
|
||||
] }
|
||||
|
||||
[features]
|
||||
default = ["remote", "lancedb/aws", "lancedb/gcs", "lancedb/azure", "lancedb/dynamodb", "lancedb/oss", "lancedb/huggingface"]
|
||||
default = ["remote", "lancedb/default"]
|
||||
fp16kernels = ["lancedb/fp16kernels"]
|
||||
remote = ["lancedb/remote"]
|
||||
|
||||
@@ -179,7 +179,6 @@ class Table:
|
||||
cleanup_since_ms: Optional[int] = None,
|
||||
delete_unverified: Optional[bool] = None,
|
||||
) -> OptimizeStats: ...
|
||||
async def uri(self) -> str: ...
|
||||
@property
|
||||
def tags(self) -> Tags: ...
|
||||
def query(self) -> Query: ...
|
||||
|
||||
@@ -655,14 +655,6 @@ class RemoteTable(Table):
|
||||
def stats(self):
|
||||
return LOOP.run(self._table.stats())
|
||||
|
||||
@property
|
||||
def uri(self) -> str:
|
||||
"""The table URI (storage location).
|
||||
|
||||
For remote tables, this fetches the location from the server via describe.
|
||||
"""
|
||||
return LOOP.run(self._table.uri())
|
||||
|
||||
def take_offsets(self, offsets: list[int]) -> LanceTakeQueryBuilder:
|
||||
return LanceTakeQueryBuilder(self._table.take_offsets(offsets))
|
||||
|
||||
|
||||
@@ -2218,10 +2218,6 @@ class LanceTable(Table):
|
||||
def stats(self) -> TableStatistics:
|
||||
return LOOP.run(self._table.stats())
|
||||
|
||||
@property
|
||||
def uri(self) -> str:
|
||||
return LOOP.run(self._table.uri())
|
||||
|
||||
def create_scalar_index(
|
||||
self,
|
||||
column: str,
|
||||
@@ -3610,20 +3606,6 @@ class AsyncTable:
|
||||
"""
|
||||
return await self._inner.stats()
|
||||
|
||||
async def uri(self) -> str:
|
||||
"""
|
||||
Get the table URI (storage location).
|
||||
|
||||
For remote tables, this fetches the location from the server via describe.
|
||||
For local tables, this returns the dataset URI.
|
||||
|
||||
Returns
|
||||
-------
|
||||
str
|
||||
The full storage location of the table (e.g., S3/GCS path).
|
||||
"""
|
||||
return await self._inner.uri()
|
||||
|
||||
async def add(
|
||||
self,
|
||||
data: DATA,
|
||||
|
||||
@@ -2,27 +2,12 @@
|
||||
# SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
from datetime import timedelta
|
||||
|
||||
from lancedb.db import AsyncConnection, DBConnection
|
||||
import lancedb
|
||||
import pytest
|
||||
import pytest_asyncio
|
||||
|
||||
|
||||
def pandas_string_type():
|
||||
"""Return the PyArrow string type that pandas uses for string columns.
|
||||
|
||||
pandas 3.0+ uses large_string for string columns, pandas 2.x uses string.
|
||||
"""
|
||||
import pandas as pd
|
||||
import pyarrow as pa
|
||||
|
||||
version = tuple(int(x) for x in pd.__version__.split(".")[:2])
|
||||
if version >= (3, 0):
|
||||
return pa.large_utf8()
|
||||
return pa.utf8()
|
||||
|
||||
|
||||
# Use an in-memory database for most tests.
|
||||
@pytest.fixture
|
||||
def mem_db() -> DBConnection:
|
||||
|
||||
@@ -268,8 +268,6 @@ async def test_create_table_from_iterator_async(mem_db_async: lancedb.AsyncConne
|
||||
|
||||
|
||||
def test_create_exist_ok(tmp_db: lancedb.DBConnection):
|
||||
from conftest import pandas_string_type
|
||||
|
||||
data = pd.DataFrame(
|
||||
{
|
||||
"vector": [[3.1, 4.1], [5.9, 26.5]],
|
||||
@@ -288,11 +286,10 @@ def test_create_exist_ok(tmp_db: lancedb.DBConnection):
|
||||
assert tbl.schema == tbl2.schema
|
||||
assert len(tbl) == len(tbl2)
|
||||
|
||||
# pandas 3.0+ uses large_string, pandas 2.x uses string
|
||||
schema = pa.schema(
|
||||
[
|
||||
pa.field("vector", pa.list_(pa.float32(), list_size=2)),
|
||||
pa.field("item", pandas_string_type()),
|
||||
pa.field("item", pa.utf8()),
|
||||
pa.field("price", pa.float64()),
|
||||
]
|
||||
)
|
||||
@@ -302,7 +299,7 @@ def test_create_exist_ok(tmp_db: lancedb.DBConnection):
|
||||
bad_schema = pa.schema(
|
||||
[
|
||||
pa.field("vector", pa.list_(pa.float32(), list_size=2)),
|
||||
pa.field("item", pandas_string_type()),
|
||||
pa.field("item", pa.utf8()),
|
||||
pa.field("price", pa.float64()),
|
||||
pa.field("extra", pa.float32()),
|
||||
]
|
||||
@@ -368,8 +365,6 @@ async def test_create_mode_async(tmp_db_async: lancedb.AsyncConnection):
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_create_exist_ok_async(tmp_db_async: lancedb.AsyncConnection):
|
||||
from conftest import pandas_string_type
|
||||
|
||||
data = pd.DataFrame(
|
||||
{
|
||||
"vector": [[3.1, 4.1], [5.9, 26.5]],
|
||||
@@ -387,11 +382,10 @@ async def test_create_exist_ok_async(tmp_db_async: lancedb.AsyncConnection):
|
||||
assert tbl.name == tbl2.name
|
||||
assert await tbl.schema() == await tbl2.schema()
|
||||
|
||||
# pandas 3.0+ uses large_string, pandas 2.x uses string
|
||||
schema = pa.schema(
|
||||
[
|
||||
pa.field("vector", pa.list_(pa.float32(), list_size=2)),
|
||||
pa.field("item", pandas_string_type()),
|
||||
pa.field("item", pa.utf8()),
|
||||
pa.field("price", pa.float64()),
|
||||
]
|
||||
)
|
||||
@@ -601,8 +595,6 @@ def test_open_table_sync(tmp_db: lancedb.DBConnection):
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_open_table(tmp_path):
|
||||
from conftest import pandas_string_type
|
||||
|
||||
db = await lancedb.connect_async(tmp_path)
|
||||
data = pd.DataFrame(
|
||||
{
|
||||
@@ -622,11 +614,10 @@ async def test_open_table(tmp_path):
|
||||
)
|
||||
is not None
|
||||
)
|
||||
# pandas 3.0+ uses large_string, pandas 2.x uses string
|
||||
assert await tbl.schema() == pa.schema(
|
||||
{
|
||||
"vector": pa.list_(pa.float32(), list_size=2),
|
||||
"item": pandas_string_type(),
|
||||
"item": pa.utf8(),
|
||||
"price": pa.float64(),
|
||||
}
|
||||
)
|
||||
|
||||
@@ -26,8 +26,6 @@ import pytest
|
||||
from lance_namespace import (
|
||||
CreateEmptyTableRequest,
|
||||
CreateEmptyTableResponse,
|
||||
DeclareTableRequest,
|
||||
DeclareTableResponse,
|
||||
DescribeTableRequest,
|
||||
DescribeTableResponse,
|
||||
LanceNamespace,
|
||||
@@ -162,19 +160,6 @@ class TrackingNamespace(LanceNamespace):
|
||||
|
||||
return modified
|
||||
|
||||
def declare_table(self, request: DeclareTableRequest) -> DeclareTableResponse:
|
||||
"""Track declare_table calls and inject rotating credentials."""
|
||||
with self.lock:
|
||||
self.create_call_count += 1
|
||||
count = self.create_call_count
|
||||
|
||||
response = self.inner.declare_table(request)
|
||||
response.storage_options = self._modify_storage_options(
|
||||
response.storage_options, count
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
def create_empty_table(
|
||||
self, request: CreateEmptyTableRequest
|
||||
) -> CreateEmptyTableResponse:
|
||||
|
||||
@@ -1967,9 +1967,3 @@ def test_add_table_with_empty_embeddings(tmp_path):
|
||||
on_bad_vectors="drop",
|
||||
)
|
||||
assert table.count_rows() == 1
|
||||
|
||||
|
||||
def test_table_uri(tmp_path):
|
||||
db = lancedb.connect(tmp_path)
|
||||
table = db.create_table("my_table", data=[{"x": 0}])
|
||||
assert table.uri == str(tmp_path / "my_table.lance")
|
||||
|
||||
@@ -528,19 +528,12 @@ def test_sanitize_data(
|
||||
else:
|
||||
expected_schema = schema
|
||||
else:
|
||||
from conftest import pandas_string_type
|
||||
|
||||
# polars uses large_string, pandas 3.0+ uses large_string, others use string
|
||||
if isinstance(data, pl.DataFrame):
|
||||
text_type = pa.large_utf8()
|
||||
elif isinstance(data, pd.DataFrame):
|
||||
text_type = pandas_string_type()
|
||||
else:
|
||||
text_type = pa.string()
|
||||
expected_schema = pa.schema(
|
||||
{
|
||||
"id": pa.int64(),
|
||||
"text": text_type,
|
||||
"text": pa.large_utf8()
|
||||
if isinstance(data, pl.DataFrame)
|
||||
else pa.string(),
|
||||
"vector": pa.list_(pa.float32(), 10),
|
||||
}
|
||||
)
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
#![allow(deprecated)]
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
@@ -10,8 +11,7 @@ use arrow::{
|
||||
use futures::stream::StreamExt;
|
||||
use lancedb::arrow::SendableRecordBatchStream;
|
||||
use pyo3::{
|
||||
exceptions::PyStopAsyncIteration, pyclass, pymethods, Bound, PyAny, PyObject, PyRef, PyResult,
|
||||
Python,
|
||||
exceptions::PyStopAsyncIteration, pyclass, pymethods, Bound, Py, PyAny, PyRef, PyResult, Python,
|
||||
};
|
||||
use pyo3_async_runtimes::tokio::future_into_py;
|
||||
|
||||
@@ -36,8 +36,11 @@ impl RecordBatchStream {
|
||||
#[pymethods]
|
||||
impl RecordBatchStream {
|
||||
#[getter]
|
||||
pub fn schema(&self, py: Python) -> PyResult<PyObject> {
|
||||
(*self.schema).clone().into_pyarrow(py)
|
||||
pub fn schema(&self, py: Python) -> PyResult<Py<PyAny>> {
|
||||
(*self.schema)
|
||||
.clone()
|
||||
.into_pyarrow(py)
|
||||
.map(|bound| bound.unbind())
|
||||
}
|
||||
|
||||
pub fn __aiter__(self_: PyRef<'_, Self>) -> PyRef<'_, Self> {
|
||||
@@ -53,7 +56,12 @@ impl RecordBatchStream {
|
||||
.next()
|
||||
.await
|
||||
.ok_or_else(|| PyStopAsyncIteration::new_err(""))?;
|
||||
Python::with_gil(|py| inner_next.infer_error()?.to_pyarrow(py))
|
||||
Python::with_gil(|py| {
|
||||
inner_next
|
||||
.infer_error()?
|
||||
.to_pyarrow(py)
|
||||
.map(|bound| bound.unbind())
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
#![allow(deprecated)]
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
@@ -327,20 +328,13 @@ impl Connection {
|
||||
let py = self_.py();
|
||||
future_into_py(py, async move {
|
||||
use lance_namespace::models::CreateNamespaceRequest;
|
||||
// Mode is now a string field
|
||||
let mode_str = mode.and_then(|m| match m.to_lowercase().as_str() {
|
||||
"create" => Some("Create".to_string()),
|
||||
"exist_ok" => Some("ExistOk".to_string()),
|
||||
"overwrite" => Some("Overwrite".to_string()),
|
||||
_ => None,
|
||||
});
|
||||
let request = CreateNamespaceRequest {
|
||||
id: if namespace.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(namespace)
|
||||
},
|
||||
mode: mode_str,
|
||||
mode,
|
||||
properties,
|
||||
..Default::default()
|
||||
};
|
||||
@@ -364,25 +358,14 @@ impl Connection {
|
||||
let py = self_.py();
|
||||
future_into_py(py, async move {
|
||||
use lance_namespace::models::DropNamespaceRequest;
|
||||
// Mode and Behavior are now string fields
|
||||
let mode_str = mode.and_then(|m| match m.to_uppercase().as_str() {
|
||||
"SKIP" => Some("Skip".to_string()),
|
||||
"FAIL" => Some("Fail".to_string()),
|
||||
_ => None,
|
||||
});
|
||||
let behavior_str = behavior.and_then(|b| match b.to_uppercase().as_str() {
|
||||
"RESTRICT" => Some("Restrict".to_string()),
|
||||
"CASCADE" => Some("Cascade".to_string()),
|
||||
_ => None,
|
||||
});
|
||||
let request = DropNamespaceRequest {
|
||||
id: if namespace.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(namespace)
|
||||
},
|
||||
mode: mode_str,
|
||||
behavior: behavior_str,
|
||||
mode,
|
||||
behavior,
|
||||
..Default::default()
|
||||
};
|
||||
let response = inner.drop_namespace(request).await.infer_error()?;
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
#![allow(deprecated)]
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
#![allow(deprecated)]
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
#![allow(deprecated)]
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
@@ -281,7 +282,7 @@ impl PyPermutationReader {
|
||||
let reader = slf.reader.clone();
|
||||
future_into_py(slf.py(), async move {
|
||||
let schema = reader.output_schema(selection).await.infer_error()?;
|
||||
Python::with_gil(|py| schema.to_pyarrow(py))
|
||||
Python::with_gil(|py| schema.to_pyarrow(py).map(|b| b.unbind()))
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
#![allow(deprecated)]
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
@@ -216,7 +217,7 @@ impl<'py> IntoPyObject<'py> for PyQueryVectors {
|
||||
let py_objs = self
|
||||
.0
|
||||
.into_iter()
|
||||
.map(|v| v.to_data().into_pyarrow(py))
|
||||
.map(|v| v.to_data().into_pyarrow(py).map(|b| b.unbind()))
|
||||
.collect::<Result<Vec<_>, _>>()?;
|
||||
PyList::new(py, py_objs)
|
||||
}
|
||||
@@ -453,7 +454,7 @@ impl Query {
|
||||
let inner = self_.inner.clone();
|
||||
future_into_py(self_.py(), async move {
|
||||
let schema = inner.output_schema().await.infer_error()?;
|
||||
Python::with_gil(|py| schema.to_pyarrow(py))
|
||||
Python::with_gil(|py| schema.to_pyarrow(py).map(|b| b.unbind()))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -532,7 +533,7 @@ impl TakeQuery {
|
||||
let inner = self_.inner.clone();
|
||||
future_into_py(self_.py(), async move {
|
||||
let schema = inner.output_schema().await.infer_error()?;
|
||||
Python::with_gil(|py| schema.to_pyarrow(py))
|
||||
Python::with_gil(|py| schema.to_pyarrow(py).map(|b| b.unbind()))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -627,7 +628,7 @@ impl FTSQuery {
|
||||
let inner = self_.inner.clone();
|
||||
future_into_py(self_.py(), async move {
|
||||
let schema = inner.output_schema().await.infer_error()?;
|
||||
Python::with_gil(|py| schema.to_pyarrow(py))
|
||||
Python::with_gil(|py| schema.to_pyarrow(py).map(|b| b.unbind()))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -806,7 +807,7 @@ impl VectorQuery {
|
||||
let inner = self_.inner.clone();
|
||||
future_into_py(self_.py(), async move {
|
||||
let schema = inner.output_schema().await.infer_error()?;
|
||||
Python::with_gil(|py| schema.to_pyarrow(py))
|
||||
Python::with_gil(|py| schema.to_pyarrow(py).map(|b| b.unbind()))
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
#![allow(deprecated)]
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
#![allow(deprecated)]
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
@@ -287,7 +288,7 @@ impl Table {
|
||||
let inner = self_.inner_ref()?.clone();
|
||||
future_into_py(self_.py(), async move {
|
||||
let schema = inner.schema().await.infer_error()?;
|
||||
Python::with_gil(|py| schema.to_pyarrow(py))
|
||||
Python::with_gil(|py| schema.to_pyarrow(py).map(|b| b.unbind()))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -497,11 +498,6 @@ impl Table {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn uri(self_: PyRef<'_, Self>) -> PyResult<Bound<'_, PyAny>> {
|
||||
let inner = self_.inner_ref()?.clone();
|
||||
future_into_py(self_.py(), async move { inner.uri().await.infer_error() })
|
||||
}
|
||||
|
||||
pub fn __repr__(&self) -> String {
|
||||
match &self.inner {
|
||||
None => format!("ClosedTable({})", self.name),
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "lancedb"
|
||||
version = "0.24.0"
|
||||
version = "0.23.1"
|
||||
edition.workspace = true
|
||||
description = "LanceDB: A serverless, low-latency vector database for AI applications"
|
||||
license.workspace = true
|
||||
@@ -104,16 +104,11 @@ test-log = "0.2"
|
||||
|
||||
|
||||
[features]
|
||||
default = []
|
||||
default = ["aws", "gcs", "azure", "dynamodb", "oss"]
|
||||
aws = ["lance/aws", "lance-io/aws", "lance-namespace-impls/dir-aws"]
|
||||
oss = ["lance/oss", "lance-io/oss", "lance-namespace-impls/dir-oss"]
|
||||
gcs = ["lance/gcp", "lance-io/gcp", "lance-namespace-impls/dir-gcp"]
|
||||
azure = ["lance/azure", "lance-io/azure", "lance-namespace-impls/dir-azure"]
|
||||
huggingface = [
|
||||
"lance/huggingface",
|
||||
"lance-io/huggingface",
|
||||
"lance-namespace-impls/dir-huggingface",
|
||||
]
|
||||
dynamodb = ["lance/dynamodb", "aws"]
|
||||
remote = ["dep:reqwest", "dep:http", "lance-namespace-impls/rest", "lance-namespace-impls/rest-adapter"]
|
||||
fp16kernels = ["lance-linalg/fp16kernels"]
|
||||
@@ -153,6 +148,3 @@ name = "ivf_pq"
|
||||
[[example]]
|
||||
name = "hybrid_search"
|
||||
required-features = ["sentence-transformers"]
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
|
||||
@@ -36,42 +36,10 @@ use crate::remote::{
|
||||
};
|
||||
use crate::table::{TableDefinition, WriteOptions};
|
||||
use crate::Table;
|
||||
use lance::io::ObjectStoreParams;
|
||||
pub use lance_encoding::version::LanceFileVersion;
|
||||
#[cfg(feature = "remote")]
|
||||
use lance_io::object_store::StorageOptions;
|
||||
use lance_io::object_store::{StorageOptionsAccessor, StorageOptionsProvider};
|
||||
|
||||
fn merge_storage_options(
|
||||
store_params: &mut ObjectStoreParams,
|
||||
pairs: impl IntoIterator<Item = (String, String)>,
|
||||
) {
|
||||
let mut options = store_params.storage_options().cloned().unwrap_or_default();
|
||||
for (key, value) in pairs {
|
||||
options.insert(key, value);
|
||||
}
|
||||
let provider = store_params
|
||||
.storage_options_accessor
|
||||
.as_ref()
|
||||
.and_then(|accessor| accessor.provider().cloned());
|
||||
let accessor = if let Some(provider) = provider {
|
||||
StorageOptionsAccessor::with_initial_and_provider(options, provider)
|
||||
} else {
|
||||
StorageOptionsAccessor::with_static_options(options)
|
||||
};
|
||||
store_params.storage_options_accessor = Some(Arc::new(accessor));
|
||||
}
|
||||
|
||||
fn set_storage_options_provider(
|
||||
store_params: &mut ObjectStoreParams,
|
||||
provider: Arc<dyn StorageOptionsProvider>,
|
||||
) {
|
||||
let accessor = match store_params.storage_options().cloned() {
|
||||
Some(options) => StorageOptionsAccessor::with_initial_and_provider(options, provider),
|
||||
None => StorageOptionsAccessor::with_provider(provider),
|
||||
};
|
||||
store_params.storage_options_accessor = Some(Arc::new(accessor));
|
||||
}
|
||||
use lance_io::object_store::StorageOptionsProvider;
|
||||
|
||||
/// A builder for configuring a [`Connection::table_names`] operation
|
||||
pub struct TableNamesBuilder {
|
||||
@@ -278,14 +246,16 @@ impl<const HAS_DATA: bool> CreateTableBuilder<HAS_DATA> {
|
||||
///
|
||||
/// See available options at <https://lancedb.com/docs/storage/>
|
||||
pub fn storage_option(mut self, key: impl Into<String>, value: impl Into<String>) -> Self {
|
||||
let store_params = self
|
||||
let store_options = self
|
||||
.request
|
||||
.write_options
|
||||
.lance_write_params
|
||||
.get_or_insert(Default::default())
|
||||
.store_params
|
||||
.get_or_insert(Default::default())
|
||||
.storage_options
|
||||
.get_or_insert(Default::default());
|
||||
merge_storage_options(store_params, [(key.into(), value.into())]);
|
||||
store_options.insert(key.into(), value.into());
|
||||
self
|
||||
}
|
||||
|
||||
@@ -299,17 +269,19 @@ impl<const HAS_DATA: bool> CreateTableBuilder<HAS_DATA> {
|
||||
mut self,
|
||||
pairs: impl IntoIterator<Item = (impl Into<String>, impl Into<String>)>,
|
||||
) -> Self {
|
||||
let store_params = self
|
||||
let store_options = self
|
||||
.request
|
||||
.write_options
|
||||
.lance_write_params
|
||||
.get_or_insert(Default::default())
|
||||
.store_params
|
||||
.get_or_insert(Default::default())
|
||||
.storage_options
|
||||
.get_or_insert(Default::default());
|
||||
let updates = pairs
|
||||
.into_iter()
|
||||
.map(|(key, value)| (key.into(), value.into()));
|
||||
merge_storage_options(store_params, updates);
|
||||
|
||||
for (key, value) in pairs {
|
||||
store_options.insert(key.into(), value.into());
|
||||
}
|
||||
self
|
||||
}
|
||||
|
||||
@@ -346,21 +318,23 @@ impl<const HAS_DATA: bool> CreateTableBuilder<HAS_DATA> {
|
||||
/// This has no effect in LanceDB Cloud.
|
||||
#[deprecated(since = "0.15.1", note = "Use `database_options` instead")]
|
||||
pub fn enable_v2_manifest_paths(mut self, use_v2_manifest_paths: bool) -> Self {
|
||||
let store_params = self
|
||||
let storage_options = self
|
||||
.request
|
||||
.write_options
|
||||
.lance_write_params
|
||||
.get_or_insert_with(Default::default)
|
||||
.store_params
|
||||
.get_or_insert_with(Default::default)
|
||||
.storage_options
|
||||
.get_or_insert_with(Default::default);
|
||||
let value = if use_v2_manifest_paths {
|
||||
"true".to_string()
|
||||
} else {
|
||||
"false".to_string()
|
||||
};
|
||||
merge_storage_options(
|
||||
store_params,
|
||||
[(OPT_NEW_TABLE_V2_MANIFEST_PATHS.to_string(), value)],
|
||||
|
||||
storage_options.insert(
|
||||
OPT_NEW_TABLE_V2_MANIFEST_PATHS.to_string(),
|
||||
if use_v2_manifest_paths {
|
||||
"true".to_string()
|
||||
} else {
|
||||
"false".to_string()
|
||||
},
|
||||
);
|
||||
self
|
||||
}
|
||||
@@ -370,19 +344,19 @@ impl<const HAS_DATA: bool> CreateTableBuilder<HAS_DATA> {
|
||||
/// The default is `LanceFileVersion::Stable`.
|
||||
#[deprecated(since = "0.15.1", note = "Use `database_options` instead")]
|
||||
pub fn data_storage_version(mut self, data_storage_version: LanceFileVersion) -> Self {
|
||||
let store_params = self
|
||||
let storage_options = self
|
||||
.request
|
||||
.write_options
|
||||
.lance_write_params
|
||||
.get_or_insert_with(Default::default)
|
||||
.store_params
|
||||
.get_or_insert_with(Default::default)
|
||||
.storage_options
|
||||
.get_or_insert_with(Default::default);
|
||||
merge_storage_options(
|
||||
store_params,
|
||||
[(
|
||||
OPT_NEW_TABLE_STORAGE_VERSION.to_string(),
|
||||
data_storage_version.to_string(),
|
||||
)],
|
||||
|
||||
storage_options.insert(
|
||||
OPT_NEW_TABLE_STORAGE_VERSION.to_string(),
|
||||
data_storage_version.to_string(),
|
||||
);
|
||||
self
|
||||
}
|
||||
@@ -407,14 +381,13 @@ impl<const HAS_DATA: bool> CreateTableBuilder<HAS_DATA> {
|
||||
/// This allows tables to automatically refresh cloud storage credentials
|
||||
/// when they expire, enabling long-running operations on remote storage.
|
||||
pub fn storage_options_provider(mut self, provider: Arc<dyn StorageOptionsProvider>) -> Self {
|
||||
let store_params = self
|
||||
.request
|
||||
self.request
|
||||
.write_options
|
||||
.lance_write_params
|
||||
.get_or_insert(Default::default())
|
||||
.store_params
|
||||
.get_or_insert(Default::default());
|
||||
set_storage_options_provider(store_params, provider);
|
||||
.get_or_insert(Default::default())
|
||||
.storage_options_provider = Some(provider);
|
||||
self
|
||||
}
|
||||
}
|
||||
@@ -477,13 +450,15 @@ impl OpenTableBuilder {
|
||||
///
|
||||
/// See available options at <https://lancedb.com/docs/storage/>
|
||||
pub fn storage_option(mut self, key: impl Into<String>, value: impl Into<String>) -> Self {
|
||||
let store_params = self
|
||||
let storage_options = self
|
||||
.request
|
||||
.lance_read_params
|
||||
.get_or_insert(Default::default())
|
||||
.store_options
|
||||
.get_or_insert(Default::default())
|
||||
.storage_options
|
||||
.get_or_insert(Default::default());
|
||||
merge_storage_options(store_params, [(key.into(), value.into())]);
|
||||
storage_options.insert(key.into(), value.into());
|
||||
self
|
||||
}
|
||||
|
||||
@@ -497,16 +472,18 @@ impl OpenTableBuilder {
|
||||
mut self,
|
||||
pairs: impl IntoIterator<Item = (impl Into<String>, impl Into<String>)>,
|
||||
) -> Self {
|
||||
let store_params = self
|
||||
let storage_options = self
|
||||
.request
|
||||
.lance_read_params
|
||||
.get_or_insert(Default::default())
|
||||
.store_options
|
||||
.get_or_insert(Default::default())
|
||||
.storage_options
|
||||
.get_or_insert(Default::default());
|
||||
let updates = pairs
|
||||
.into_iter()
|
||||
.map(|(key, value)| (key.into(), value.into()));
|
||||
merge_storage_options(store_params, updates);
|
||||
|
||||
for (key, value) in pairs {
|
||||
storage_options.insert(key.into(), value.into());
|
||||
}
|
||||
self
|
||||
}
|
||||
|
||||
@@ -530,13 +507,12 @@ impl OpenTableBuilder {
|
||||
/// This allows tables to automatically refresh cloud storage credentials
|
||||
/// when they expire, enabling long-running operations on remote storage.
|
||||
pub fn storage_options_provider(mut self, provider: Arc<dyn StorageOptionsProvider>) -> Self {
|
||||
let store_params = self
|
||||
.request
|
||||
self.request
|
||||
.lance_read_params
|
||||
.get_or_insert(Default::default())
|
||||
.store_options
|
||||
.get_or_insert(Default::default());
|
||||
set_storage_options_provider(store_params, provider);
|
||||
.get_or_insert(Default::default())
|
||||
.storage_options_provider = Some(provider);
|
||||
self
|
||||
}
|
||||
|
||||
@@ -892,10 +868,6 @@ pub struct ConnectBuilder {
|
||||
embedding_registry: Option<Arc<dyn EmbeddingRegistry>>,
|
||||
}
|
||||
|
||||
#[cfg(feature = "remote")]
|
||||
const ENV_VARS_TO_STORAGE_OPTS: [(&str, &str); 1] =
|
||||
[("AZURE_STORAGE_ACCOUNT_NAME", "azure_storage_account_name")];
|
||||
|
||||
impl ConnectBuilder {
|
||||
/// Create a new [`ConnectOptions`] with the given database URI.
|
||||
pub fn new(uri: &str) -> Self {
|
||||
@@ -1079,27 +1051,11 @@ impl ConnectBuilder {
|
||||
self
|
||||
}
|
||||
|
||||
#[cfg(feature = "remote")]
|
||||
fn apply_env_defaults(
|
||||
env_var_to_remote_storage_option: &[(&str, &str)],
|
||||
options: &mut HashMap<String, String>,
|
||||
) {
|
||||
for (env_key, opt_key) in env_var_to_remote_storage_option {
|
||||
if let Ok(env_value) = std::env::var(env_key) {
|
||||
if !options.contains_key(*opt_key) {
|
||||
options.insert((*opt_key).to_string(), env_value);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "remote")]
|
||||
fn execute_remote(self) -> Result<Connection> {
|
||||
use crate::remote::db::RemoteDatabaseOptions;
|
||||
|
||||
let mut merged_options = self.request.options.clone();
|
||||
Self::apply_env_defaults(&ENV_VARS_TO_STORAGE_OPTS, &mut merged_options);
|
||||
let options = RemoteDatabaseOptions::parse_from_map(&merged_options)?;
|
||||
let options = RemoteDatabaseOptions::parse_from_map(&self.request.options)?;
|
||||
|
||||
let region = options.region.ok_or_else(|| Error::InvalidInput {
|
||||
message: "A region is required when connecting to LanceDb Cloud".to_string(),
|
||||
@@ -1321,6 +1277,8 @@ mod test_utils {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::fs::create_dir_all;
|
||||
|
||||
use crate::database::listing::{ListingDatabaseOptions, NewTableConfig};
|
||||
use crate::query::QueryBase;
|
||||
use crate::query::{ExecutableQuery, QueryExecutionOptions};
|
||||
@@ -1344,23 +1302,6 @@ mod tests {
|
||||
assert_eq!(tc.connection.uri(), tc.uri);
|
||||
}
|
||||
|
||||
#[cfg(feature = "remote")]
|
||||
#[test]
|
||||
fn test_apply_env_defaults() {
|
||||
let env_key = "TEST_APPLY_ENV_DEFAULTS_ENVIRONMENT_VARIABLE_ENV_KEY";
|
||||
let env_val = "TEST_APPLY_ENV_DEFAULTS_ENVIRONMENT_VARIABLE_ENV_VAL";
|
||||
let opts_key = "test_apply_env_defaults_environment_variable_opts_key";
|
||||
std::env::set_var(env_key, env_val);
|
||||
|
||||
let mut options = HashMap::new();
|
||||
ConnectBuilder::apply_env_defaults(&[(env_key, opts_key)], &mut options);
|
||||
assert_eq!(Some(&env_val.to_string()), options.get(opts_key));
|
||||
|
||||
options.insert(opts_key.to_string(), "EXPLICIT-VALUE".to_string());
|
||||
ConnectBuilder::apply_env_defaults(&[(env_key, opts_key)], &mut options);
|
||||
assert_eq!(Some(&"EXPLICIT-VALUE".to_string()), options.get(opts_key));
|
||||
}
|
||||
|
||||
#[cfg(not(windows))]
|
||||
#[tokio::test]
|
||||
async fn test_connect_relative() {
|
||||
@@ -1585,27 +1526,18 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn drop_table() {
|
||||
let tc = new_test_connection().await.unwrap();
|
||||
let db = tc.connection;
|
||||
let tmp_dir = tempdir().unwrap();
|
||||
|
||||
if tc.is_remote {
|
||||
// All the typical endpoints such as s3:///, file-object-store:///, etc. treat drop_table
|
||||
// as idempotent.
|
||||
assert!(db.drop_table("invalid_table", &[]).await.is_ok());
|
||||
} else {
|
||||
// The behavior of drop_table when using a file:/// endpoint differs from all other
|
||||
// object providers, in that it returns an error when deleting a non-existent table.
|
||||
assert!(matches!(
|
||||
db.drop_table("invalid_table", &[]).await,
|
||||
Err(crate::Error::TableNotFound { .. }),
|
||||
));
|
||||
}
|
||||
let uri = tmp_dir.path().to_str().unwrap();
|
||||
let db = connect(uri).execute().await.unwrap();
|
||||
|
||||
let schema = Arc::new(Schema::new(vec![Field::new("x", DataType::Int32, false)]));
|
||||
db.create_empty_table("table1", schema.clone())
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
// drop non-exist table
|
||||
assert!(matches!(
|
||||
db.drop_table("invalid_table", &[]).await,
|
||||
Err(crate::Error::TableNotFound { .. }),
|
||||
));
|
||||
|
||||
create_dir_all(tmp_dir.path().join("table1.lance")).unwrap();
|
||||
db.drop_table("table1", &[]).await.unwrap();
|
||||
|
||||
let tables = db.table_names().execute().await.unwrap();
|
||||
|
||||
@@ -12,7 +12,7 @@ use lance::dataset::{builder::DatasetBuilder, ReadParams, WriteMode};
|
||||
use lance::io::{ObjectStore, ObjectStoreParams, WrappingObjectStore};
|
||||
use lance_datafusion::utils::StreamingWriteSource;
|
||||
use lance_encoding::version::LanceFileVersion;
|
||||
use lance_io::object_store::{StorageOptionsAccessor, StorageOptionsProvider};
|
||||
use lance_io::object_store::StorageOptionsProvider;
|
||||
use lance_table::io::commit::commit_handler_from_url;
|
||||
use object_store::local::LocalFileSystem;
|
||||
use snafu::ResultExt;
|
||||
@@ -356,13 +356,7 @@ impl ListingDatabase {
|
||||
.clone()
|
||||
.unwrap_or_else(|| Arc::new(lance::session::Session::default()));
|
||||
let os_params = ObjectStoreParams {
|
||||
storage_options_accessor: if options.storage_options.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(Arc::new(StorageOptionsAccessor::with_static_options(
|
||||
options.storage_options.clone(),
|
||||
)))
|
||||
},
|
||||
storage_options: Some(options.storage_options.clone()),
|
||||
..Default::default()
|
||||
};
|
||||
let (object_store, base_path) = ObjectStore::from_uri_and_params(
|
||||
@@ -469,20 +463,9 @@ impl ListingDatabase {
|
||||
validate_table_name(name)?;
|
||||
|
||||
let mut uri = self.uri.clone();
|
||||
// If the URI does not end with a path separator, add one
|
||||
// Use forward slash for URIs (http://, s3://, gs://, file://, etc.)
|
||||
// Use platform-specific separator for local paths without scheme
|
||||
let has_scheme = uri.contains("://");
|
||||
let ends_with_separator = uri.ends_with('/') || uri.ends_with('\\');
|
||||
|
||||
if !ends_with_separator {
|
||||
if has_scheme {
|
||||
// URIs always use forward slash
|
||||
uri.push('/');
|
||||
} else {
|
||||
// Local path without scheme - use platform separator
|
||||
uri.push(std::path::MAIN_SEPARATOR);
|
||||
}
|
||||
// If the URI does not end with a slash, add one
|
||||
if !uri.ends_with('/') {
|
||||
uri.push('/');
|
||||
}
|
||||
// Append the table name with the lance file extension
|
||||
uri.push_str(&format!("{}.{}", name, LANCE_FILE_EXTENSION));
|
||||
@@ -498,13 +481,7 @@ impl ListingDatabase {
|
||||
|
||||
async fn drop_tables(&self, names: Vec<String>) -> Result<()> {
|
||||
let object_store_params = ObjectStoreParams {
|
||||
storage_options_accessor: if self.storage_options.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(Arc::new(StorageOptionsAccessor::with_static_options(
|
||||
self.storage_options.clone(),
|
||||
)))
|
||||
},
|
||||
storage_options: Some(self.storage_options.clone()),
|
||||
..Default::default()
|
||||
};
|
||||
let mut uri = self.uri.clone();
|
||||
@@ -553,7 +530,7 @@ impl ListingDatabase {
|
||||
.lance_write_params
|
||||
.as_ref()
|
||||
.and_then(|p| p.store_params.as_ref())
|
||||
.and_then(|sp| sp.storage_options());
|
||||
.and_then(|sp| sp.storage_options.as_ref());
|
||||
|
||||
let storage_version_override = storage_options
|
||||
.and_then(|opts| opts.get(OPT_NEW_TABLE_STORAGE_VERSION))
|
||||
@@ -604,20 +581,21 @@ impl ListingDatabase {
|
||||
// will cause a new connection to be created, and that connection will
|
||||
// be dropped from the cache when python GCs the table object, which
|
||||
// confounds reuse across tables.
|
||||
if !self.storage_options.is_empty() || self.storage_options_provider.is_some() {
|
||||
let store_params = write_params
|
||||
if !self.storage_options.is_empty() {
|
||||
let storage_options = write_params
|
||||
.store_params
|
||||
.get_or_insert_with(Default::default)
|
||||
.storage_options
|
||||
.get_or_insert_with(Default::default);
|
||||
let mut storage_options = store_params.storage_options().cloned().unwrap_or_default();
|
||||
if !self.storage_options.is_empty() {
|
||||
self.inherit_storage_options(&mut storage_options);
|
||||
}
|
||||
let accessor = if let Some(ref provider) = self.storage_options_provider {
|
||||
StorageOptionsAccessor::with_initial_and_provider(storage_options, provider.clone())
|
||||
} else {
|
||||
StorageOptionsAccessor::with_static_options(storage_options)
|
||||
};
|
||||
store_params.storage_options_accessor = Some(Arc::new(accessor));
|
||||
self.inherit_storage_options(storage_options);
|
||||
}
|
||||
|
||||
// Set storage options provider if available
|
||||
if self.storage_options_provider.is_some() {
|
||||
write_params
|
||||
.store_params
|
||||
.get_or_insert_with(Default::default)
|
||||
.storage_options_provider = self.storage_options_provider.clone();
|
||||
}
|
||||
|
||||
write_params.data_storage_version = self
|
||||
@@ -903,13 +881,7 @@ impl Database for ListingDatabase {
|
||||
validate_table_name(&request.target_table_name)?;
|
||||
|
||||
let storage_params = ObjectStoreParams {
|
||||
storage_options_accessor: if self.storage_options.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(Arc::new(StorageOptionsAccessor::with_static_options(
|
||||
self.storage_options.clone(),
|
||||
)))
|
||||
},
|
||||
storage_options: Some(self.storage_options.clone()),
|
||||
..Default::default()
|
||||
};
|
||||
let read_params = ReadParams {
|
||||
@@ -973,28 +945,25 @@ impl Database for ListingDatabase {
|
||||
// will cause a new connection to be created, and that connection will
|
||||
// be dropped from the cache when python GCs the table object, which
|
||||
// confounds reuse across tables.
|
||||
if !self.storage_options.is_empty() || self.storage_options_provider.is_some() {
|
||||
let store_params = request
|
||||
if !self.storage_options.is_empty() {
|
||||
let storage_options = request
|
||||
.lance_read_params
|
||||
.get_or_insert_with(Default::default)
|
||||
.store_options
|
||||
.get_or_insert_with(Default::default)
|
||||
.storage_options
|
||||
.get_or_insert_with(Default::default);
|
||||
let mut storage_options = store_params.storage_options().cloned().unwrap_or_default();
|
||||
if !self.storage_options.is_empty() {
|
||||
self.inherit_storage_options(&mut storage_options);
|
||||
}
|
||||
// Preserve request-level provider if no connection-level provider exists
|
||||
let request_provider = store_params
|
||||
.storage_options_accessor
|
||||
.as_ref()
|
||||
.and_then(|a| a.provider().cloned());
|
||||
let provider = self.storage_options_provider.clone().or(request_provider);
|
||||
let accessor = if let Some(provider) = provider {
|
||||
StorageOptionsAccessor::with_initial_and_provider(storage_options, provider)
|
||||
} else {
|
||||
StorageOptionsAccessor::with_static_options(storage_options)
|
||||
};
|
||||
store_params.storage_options_accessor = Some(Arc::new(accessor));
|
||||
self.inherit_storage_options(storage_options);
|
||||
}
|
||||
|
||||
// Set storage options provider if available
|
||||
if self.storage_options_provider.is_some() {
|
||||
request
|
||||
.lance_read_params
|
||||
.get_or_insert_with(Default::default)
|
||||
.store_options
|
||||
.get_or_insert_with(Default::default)
|
||||
.storage_options_provider = self.storage_options_provider.clone();
|
||||
}
|
||||
|
||||
// Some ReadParams are exposed in the OpenTableBuilder, but we also
|
||||
@@ -1102,7 +1071,6 @@ mod tests {
|
||||
use crate::table::{Table, TableDefinition};
|
||||
use arrow_array::{Int32Array, RecordBatch, StringArray};
|
||||
use arrow_schema::{DataType, Field, Schema};
|
||||
use std::path::PathBuf;
|
||||
use tempfile::tempdir;
|
||||
|
||||
async fn setup_database() -> (tempfile::TempDir, ListingDatabase) {
|
||||
@@ -1901,9 +1869,7 @@ mod tests {
|
||||
let write_options = WriteOptions {
|
||||
lance_write_params: Some(lance::dataset::WriteParams {
|
||||
store_params: Some(lance::io::ObjectStoreParams {
|
||||
storage_options_accessor: Some(Arc::new(
|
||||
StorageOptionsAccessor::with_static_options(storage_options),
|
||||
)),
|
||||
storage_options: Some(storage_options),
|
||||
..Default::default()
|
||||
}),
|
||||
..Default::default()
|
||||
@@ -1977,9 +1943,7 @@ mod tests {
|
||||
let write_options = WriteOptions {
|
||||
lance_write_params: Some(lance::dataset::WriteParams {
|
||||
store_params: Some(lance::io::ObjectStoreParams {
|
||||
storage_options_accessor: Some(Arc::new(
|
||||
StorageOptionsAccessor::with_static_options(storage_options),
|
||||
)),
|
||||
storage_options: Some(storage_options),
|
||||
..Default::default()
|
||||
}),
|
||||
..Default::default()
|
||||
@@ -2082,19 +2046,6 @@ mod tests {
|
||||
assert_eq!(db_options.new_table_config.enable_stable_row_ids, None);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_table_uri() {
|
||||
let (_tempdir, db) = setup_database().await;
|
||||
|
||||
let mut pb = PathBuf::new();
|
||||
pb.push(db.uri.clone());
|
||||
pb.push("test.lance");
|
||||
|
||||
let expected = pb.to_str().unwrap();
|
||||
let uri = db.table_uri("test").ok().unwrap();
|
||||
assert_eq!(uri, expected);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_namespace_client() {
|
||||
let (_tempdir, db) = setup_database().await;
|
||||
|
||||
@@ -9,15 +9,14 @@ use std::sync::Arc;
|
||||
use async_trait::async_trait;
|
||||
use lance_namespace::{
|
||||
models::{
|
||||
CreateEmptyTableRequest, CreateNamespaceRequest, CreateNamespaceResponse,
|
||||
DeclareTableRequest, DescribeNamespaceRequest, DescribeNamespaceResponse,
|
||||
DescribeTableRequest, DropNamespaceRequest, DropNamespaceResponse, DropTableRequest,
|
||||
ListNamespacesRequest, ListNamespacesResponse, ListTablesRequest, ListTablesResponse,
|
||||
CreateNamespaceRequest, CreateNamespaceResponse, DeclareTableRequest,
|
||||
DescribeNamespaceRequest, DescribeNamespaceResponse, DescribeTableRequest,
|
||||
DropNamespaceRequest, DropNamespaceResponse, DropTableRequest, ListNamespacesRequest,
|
||||
ListNamespacesResponse, ListTablesRequest, ListTablesResponse,
|
||||
},
|
||||
LanceNamespace,
|
||||
};
|
||||
use lance_namespace_impls::ConnectBuilder;
|
||||
use log::warn;
|
||||
|
||||
use crate::database::ReadConsistency;
|
||||
use crate::error::{Error, Result};
|
||||
@@ -155,6 +154,7 @@ impl Database for LanceNamespaceDatabase {
|
||||
table_id.push(request.name.clone());
|
||||
let describe_request = DescribeTableRequest {
|
||||
id: Some(table_id.clone()),
|
||||
version: None,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
@@ -205,53 +205,24 @@ impl Database for LanceNamespaceDatabase {
|
||||
let mut table_id = request.namespace.clone();
|
||||
table_id.push(request.name.clone());
|
||||
|
||||
// Try declare_table first, falling back to create_empty_table for backwards
|
||||
// compatibility with older namespace clients that don't support declare_table
|
||||
let declare_request = DeclareTableRequest {
|
||||
id: Some(table_id.clone()),
|
||||
location: None,
|
||||
vend_credentials: None,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let location = match self.namespace.declare_table(declare_request).await {
|
||||
Ok(response) => response.location.ok_or_else(|| Error::Runtime {
|
||||
message: "Table location is missing from declare_table response".to_string(),
|
||||
})?,
|
||||
Err(e) => {
|
||||
// Check if the error is "not supported" and try create_empty_table as fallback
|
||||
let err_str = e.to_string().to_lowercase();
|
||||
if err_str.contains("not supported") || err_str.contains("not implemented") {
|
||||
warn!(
|
||||
"declare_table is not supported by the namespace client, \
|
||||
falling back to deprecated create_empty_table. \
|
||||
create_empty_table is deprecated and will be removed in Lance 3.0.0. \
|
||||
Please upgrade your namespace client to support declare_table."
|
||||
);
|
||||
#[allow(deprecated)]
|
||||
let create_empty_request = CreateEmptyTableRequest {
|
||||
id: Some(table_id.clone()),
|
||||
..Default::default()
|
||||
};
|
||||
let declare_response = self
|
||||
.namespace
|
||||
.declare_table(declare_request)
|
||||
.await
|
||||
.map_err(|e| Error::Runtime {
|
||||
message: format!("Failed to declare table: {}", e),
|
||||
})?;
|
||||
|
||||
#[allow(deprecated)]
|
||||
let create_response = self
|
||||
.namespace
|
||||
.create_empty_table(create_empty_request)
|
||||
.await
|
||||
.map_err(|e| Error::Runtime {
|
||||
message: format!("Failed to create empty table: {}", e),
|
||||
})?;
|
||||
|
||||
create_response.location.ok_or_else(|| Error::Runtime {
|
||||
message: "Table location is missing from create_empty_table response"
|
||||
.to_string(),
|
||||
})?
|
||||
} else {
|
||||
return Err(Error::Runtime {
|
||||
message: format!("Failed to declare table: {}", e),
|
||||
});
|
||||
}
|
||||
}
|
||||
};
|
||||
let location = declare_response.location.ok_or_else(|| Error::Runtime {
|
||||
message: "Table location is missing from declare_table response".to_string(),
|
||||
})?;
|
||||
|
||||
let native_table = NativeTable::create_from_namespace(
|
||||
self.namespace.clone(),
|
||||
@@ -466,6 +437,8 @@ mod tests {
|
||||
// Create a child namespace first
|
||||
conn.create_namespace(CreateNamespaceRequest {
|
||||
id: Some(vec!["test_ns".into()]),
|
||||
mode: None,
|
||||
properties: None,
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
@@ -526,6 +499,8 @@ mod tests {
|
||||
// Create a child namespace first
|
||||
conn.create_namespace(CreateNamespaceRequest {
|
||||
id: Some(vec!["test_ns".into()]),
|
||||
mode: None,
|
||||
properties: None,
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
@@ -589,6 +564,8 @@ mod tests {
|
||||
// Create a child namespace first
|
||||
conn.create_namespace(CreateNamespaceRequest {
|
||||
id: Some(vec!["test_ns".into()]),
|
||||
mode: None,
|
||||
properties: None,
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
@@ -672,6 +649,8 @@ mod tests {
|
||||
// Create a child namespace first
|
||||
conn.create_namespace(CreateNamespaceRequest {
|
||||
id: Some(vec!["test_ns".into()]),
|
||||
mode: None,
|
||||
properties: None,
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
@@ -727,6 +706,8 @@ mod tests {
|
||||
// Create a child namespace first
|
||||
conn.create_namespace(CreateNamespaceRequest {
|
||||
id: Some(vec!["test_ns".into()]),
|
||||
mode: None,
|
||||
properties: None,
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
@@ -807,6 +788,8 @@ mod tests {
|
||||
// Create a child namespace first
|
||||
conn.create_namespace(CreateNamespaceRequest {
|
||||
id: Some(vec!["test_ns".into()]),
|
||||
mode: None,
|
||||
properties: None,
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
@@ -840,6 +823,8 @@ mod tests {
|
||||
// Create a child namespace first
|
||||
conn.create_namespace(CreateNamespaceRequest {
|
||||
id: Some(vec!["test_ns".into()]),
|
||||
mode: None,
|
||||
properties: None,
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
|
||||
@@ -8,10 +8,9 @@ use datafusion_execution::{disk_manager::DiskManagerBuilder, runtime_env::Runtim
|
||||
use datafusion_expr::col;
|
||||
use futures::TryStreamExt;
|
||||
use lance_core::ROW_ID;
|
||||
use lance_datafusion::exec::SessionContextExt;
|
||||
|
||||
use crate::{
|
||||
arrow::{SendableRecordBatchStream, SendableRecordBatchStreamExt, SimpleRecordBatchStream},
|
||||
arrow::{SendableRecordBatchStream, SimpleRecordBatchStream},
|
||||
connect,
|
||||
database::{CreateTableData, CreateTableRequest, Database},
|
||||
dataloader::permutation::{
|
||||
@@ -178,12 +177,17 @@ impl PermutationBuilder {
|
||||
.build_arc()
|
||||
.unwrap(),
|
||||
);
|
||||
let df = ctx
|
||||
.read_one_shot(data.into_df_stream())
|
||||
let batches = data
|
||||
.map_err(|e| Error::Other {
|
||||
message: format!("Failed to setup sort by split id: {}", e),
|
||||
source: Some(e.into()),
|
||||
})?;
|
||||
})
|
||||
.try_collect::<Vec<_>>()
|
||||
.await?;
|
||||
let df = ctx.read_batches(batches).map_err(|e| Error::Other {
|
||||
message: format!("Failed to setup sort by split id: {}", e),
|
||||
source: Some(e.into()),
|
||||
})?;
|
||||
let df_stream = df
|
||||
.sort_by(vec![col(SPLIT_ID_COLUMN)])
|
||||
.map_err(|e| Error::Other {
|
||||
|
||||
@@ -171,7 +171,7 @@ impl Shuffler {
|
||||
// This is kind of an annoying limitation but if we allow runt clumps from batches then
|
||||
// clumps will get unaligned and we will mess up the clumps when we do the in-memory
|
||||
// shuffle step. If this is a problem we can probably figure out a better way to do this.
|
||||
if !is_last && !(batch.num_rows() as u64).is_multiple_of(clump_size) {
|
||||
if !is_last && batch.num_rows() as u64 % clump_size != 0 {
|
||||
return Err(Error::Runtime {
|
||||
message: format!(
|
||||
"Expected batch size ({}) to be divisible by clump size ({})",
|
||||
|
||||
@@ -1,9 +1,12 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
use std::sync::{
|
||||
atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering},
|
||||
Arc,
|
||||
use std::{
|
||||
iter,
|
||||
sync::{
|
||||
atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering},
|
||||
Arc,
|
||||
},
|
||||
};
|
||||
|
||||
use arrow_array::{Array, BooleanArray, RecordBatch, UInt64Array};
|
||||
@@ -155,7 +158,7 @@ impl Splitter {
|
||||
remaining_in_split
|
||||
};
|
||||
|
||||
split_ids.extend(std::iter::repeat_n(split_id as u64, rows_to_add as usize));
|
||||
split_ids.extend(iter::repeat(split_id as u64).take(rows_to_add as usize));
|
||||
if done {
|
||||
// Quit early if we've run out of splits
|
||||
break;
|
||||
@@ -659,7 +662,7 @@ mod tests {
|
||||
assert_eq!(split_batch.num_rows(), total_split_sizes as usize);
|
||||
let mut expected = Vec::with_capacity(total_split_sizes as usize);
|
||||
for (i, size) in expected_split_sizes.iter().enumerate() {
|
||||
expected.extend(std::iter::repeat_n(i as u64, *size as usize));
|
||||
expected.extend(iter::repeat(i as u64).take(*size as usize));
|
||||
}
|
||||
let expected = Arc::new(UInt64Array::from(expected)) as Arc<dyn Array>;
|
||||
|
||||
|
||||
@@ -297,10 +297,10 @@ impl IvfPqIndexBuilder {
|
||||
}
|
||||
|
||||
pub(crate) fn suggested_num_sub_vectors(dim: u32) -> u32 {
|
||||
if dim.is_multiple_of(16) {
|
||||
if dim % 16 == 0 {
|
||||
// Should be more aggressive than this default.
|
||||
dim / 16
|
||||
} else if dim.is_multiple_of(8) {
|
||||
} else if dim % 8 == 0 {
|
||||
dim / 8
|
||||
} else {
|
||||
log::warn!(
|
||||
|
||||
@@ -25,14 +25,13 @@
|
||||
//!
|
||||
//! ## Crate Features
|
||||
//!
|
||||
//! - `aws` - Enable AWS S3 object store support.
|
||||
//! - `dynamodb` - Enable DynamoDB manifest store support.
|
||||
//! - `azure` - Enable Azure Blob Storage object store support.
|
||||
//! - `gcs` - Enable Google Cloud Storage object store support.
|
||||
//! - `oss` - Enable Alibaba Cloud OSS object store support.
|
||||
//! - `remote` - Enable remote client to connect to LanceDB cloud.
|
||||
//! - `huggingface` - Enable HuggingFace Hub integration for loading datasets from the Hub.
|
||||
//! - `fp16kernels` - Enable FP16 kernels for faster vector search on CPU.
|
||||
//! ### Experimental Features
|
||||
//!
|
||||
//! These features are not enabled by default. They are experimental or in-development features that
|
||||
//! are not yet ready to be released.
|
||||
//!
|
||||
//! - `remote` - Enable remote client to connect to LanceDB cloud. This is not yet fully implemented
|
||||
//! and should not be enabled.
|
||||
//!
|
||||
//! ### Quick Start
|
||||
//!
|
||||
@@ -51,15 +50,17 @@
|
||||
//! - `s3://bucket/path/to/database` or `gs://bucket/path/to/database` - database on cloud object store
|
||||
//! - `db://dbname` - Lance Cloud
|
||||
//!
|
||||
//! You can also use [`ConnectBuilder`] to configure the connection to the database.
|
||||
//! You can also use [`ConnectOptions`] to configure the connection to the database.
|
||||
//!
|
||||
//! ```rust
|
||||
//! use object_store::aws::AwsCredential;
|
||||
//! # tokio::runtime::Runtime::new().unwrap().block_on(async {
|
||||
//! let db = lancedb::connect("data/sample-lancedb")
|
||||
//! .storage_options([
|
||||
//! ("aws_access_key_id", "some_key"),
|
||||
//! ("aws_secret_access_key", "some_secret"),
|
||||
//! ])
|
||||
//! .aws_creds(AwsCredential {
|
||||
//! key_id: "some_key".to_string(),
|
||||
//! secret_key: "some_secret".to_string(),
|
||||
//! token: None,
|
||||
//! })
|
||||
//! .execute()
|
||||
//! .await
|
||||
//! .unwrap();
|
||||
|
||||
@@ -1718,6 +1718,8 @@ mod tests {
|
||||
let namespace = vec!["test_ns".to_string()];
|
||||
conn.create_namespace(CreateNamespaceRequest {
|
||||
id: Some(namespace.clone()),
|
||||
mode: None,
|
||||
properties: None,
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
@@ -1743,6 +1745,8 @@ mod tests {
|
||||
let list_response = conn
|
||||
.list_tables(ListTablesRequest {
|
||||
id: Some(namespace.clone()),
|
||||
page_token: None,
|
||||
limit: None,
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
@@ -1754,6 +1758,8 @@ mod tests {
|
||||
let list_response = namespace_client
|
||||
.list_tables(ListTablesRequest {
|
||||
id: Some(namespace.clone()),
|
||||
page_token: None,
|
||||
limit: None,
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
@@ -1794,6 +1800,8 @@ mod tests {
|
||||
let namespace = vec!["multi_table_ns".to_string()];
|
||||
conn.create_namespace(CreateNamespaceRequest {
|
||||
id: Some(namespace.clone()),
|
||||
mode: None,
|
||||
properties: None,
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
@@ -1819,6 +1827,8 @@ mod tests {
|
||||
let list_response = conn
|
||||
.list_tables(ListTablesRequest {
|
||||
id: Some(namespace.clone()),
|
||||
page_token: None,
|
||||
limit: None,
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
|
||||
@@ -204,7 +204,6 @@ pub struct RemoteTable<S: HttpSend = Sender> {
|
||||
server_version: ServerVersion,
|
||||
|
||||
version: RwLock<Option<u64>>,
|
||||
location: RwLock<Option<String>>,
|
||||
}
|
||||
|
||||
impl<S: HttpSend> RemoteTable<S> {
|
||||
@@ -222,7 +221,6 @@ impl<S: HttpSend> RemoteTable<S> {
|
||||
identifier,
|
||||
server_version,
|
||||
version: RwLock::new(None),
|
||||
location: RwLock::new(None),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -641,7 +639,6 @@ impl<S: HttpSend> RemoteTable<S> {
|
||||
struct TableDescription {
|
||||
version: u64,
|
||||
schema: JsonSchema,
|
||||
location: Option<String>,
|
||||
}
|
||||
|
||||
impl<S: HttpSend> std::fmt::Display for RemoteTable<S> {
|
||||
@@ -670,7 +667,6 @@ mod test_utils {
|
||||
identifier: name,
|
||||
server_version: version.map(ServerVersion).unwrap_or_default(),
|
||||
version: RwLock::new(None),
|
||||
location: RwLock::new(None),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1465,28 +1461,8 @@ impl<S: HttpSend> BaseTable for RemoteTable<S> {
|
||||
message: "table_definition is not supported on LanceDB cloud.".into(),
|
||||
})
|
||||
}
|
||||
async fn uri(&self) -> Result<String> {
|
||||
// Check if we already have the location cached
|
||||
{
|
||||
let location = self.location.read().await;
|
||||
if let Some(ref loc) = *location {
|
||||
return Ok(loc.clone());
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch from server via describe
|
||||
let description = self.describe().await?;
|
||||
let location = description.location.ok_or_else(|| Error::NotSupported {
|
||||
message: "Table URI not supported by the server".into(),
|
||||
})?;
|
||||
|
||||
// Cache the location for future use
|
||||
{
|
||||
let mut cached_location = self.location.write().await;
|
||||
*cached_location = Some(location.clone());
|
||||
}
|
||||
|
||||
Ok(location)
|
||||
fn dataset_uri(&self) -> &str {
|
||||
"NOT_SUPPORTED"
|
||||
}
|
||||
|
||||
async fn storage_options(&self) -> Option<HashMap<String, String>> {
|
||||
@@ -3356,69 +3332,4 @@ mod tests {
|
||||
let result = table.drop_columns(&["old_col1", "old_col2"]).await.unwrap();
|
||||
assert_eq!(result.version, 5);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_uri() {
|
||||
let table = Table::new_with_handler("my_table", |request| {
|
||||
assert_eq!(request.method(), "POST");
|
||||
assert_eq!(request.url().path(), "/v1/table/my_table/describe/");
|
||||
|
||||
http::Response::builder()
|
||||
.status(200)
|
||||
.body(r#"{"version": 1, "schema": {"fields": []}, "location": "s3://bucket/path/to/table"}"#)
|
||||
.unwrap()
|
||||
});
|
||||
|
||||
let uri = table.uri().await.unwrap();
|
||||
assert_eq!(uri, "s3://bucket/path/to/table");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_uri_missing_location() {
|
||||
let table = Table::new_with_handler("my_table", |request| {
|
||||
assert_eq!(request.method(), "POST");
|
||||
assert_eq!(request.url().path(), "/v1/table/my_table/describe/");
|
||||
|
||||
// Server returns response without location field
|
||||
http::Response::builder()
|
||||
.status(200)
|
||||
.body(r#"{"version": 1, "schema": {"fields": []}}"#)
|
||||
.unwrap()
|
||||
});
|
||||
|
||||
let result = table.uri().await;
|
||||
assert!(result.is_err());
|
||||
assert!(matches!(&result, Err(Error::NotSupported { .. })));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_uri_caching() {
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::Arc;
|
||||
|
||||
let call_count = Arc::new(AtomicUsize::new(0));
|
||||
let call_count_clone = call_count.clone();
|
||||
|
||||
let table = Table::new_with_handler("my_table", move |request| {
|
||||
assert_eq!(request.url().path(), "/v1/table/my_table/describe/");
|
||||
call_count_clone.fetch_add(1, Ordering::SeqCst);
|
||||
|
||||
http::Response::builder()
|
||||
.status(200)
|
||||
.body(
|
||||
r#"{"version": 1, "schema": {"fields": []}, "location": "gs://bucket/table"}"#,
|
||||
)
|
||||
.unwrap()
|
||||
});
|
||||
|
||||
// First call should fetch from server
|
||||
let uri1 = table.uri().await.unwrap();
|
||||
assert_eq!(uri1, "gs://bucket/table");
|
||||
assert_eq!(call_count.load(Ordering::SeqCst), 1);
|
||||
|
||||
// Second call should use cached value
|
||||
let uri2 = table.uri().await.unwrap();
|
||||
assert_eq!(uri2, "gs://bucket/table");
|
||||
assert_eq!(call_count.load(Ordering::SeqCst), 1); // Still 1, no new call
|
||||
}
|
||||
}
|
||||
|
||||
@@ -40,7 +40,7 @@ use lance_index::vector::pq::PQBuildParams;
|
||||
use lance_index::vector::sq::builder::SQBuildParams;
|
||||
use lance_index::DatasetIndexExt;
|
||||
use lance_index::IndexType;
|
||||
use lance_io::object_store::{LanceNamespaceStorageOptionsProvider, StorageOptionsAccessor};
|
||||
use lance_io::object_store::LanceNamespaceStorageOptionsProvider;
|
||||
use lance_namespace::models::{
|
||||
QueryTableRequest as NsQueryTableRequest, QueryTableRequestColumns,
|
||||
QueryTableRequestFullTextQuery, QueryTableRequestVector, StringFtsQuery,
|
||||
@@ -608,8 +608,8 @@ pub trait BaseTable: std::fmt::Display + std::fmt::Debug + Send + Sync {
|
||||
async fn list_versions(&self) -> Result<Vec<Version>>;
|
||||
/// Get the table definition.
|
||||
async fn table_definition(&self) -> Result<TableDefinition>;
|
||||
/// Get the table URI (storage location)
|
||||
async fn uri(&self) -> Result<String>;
|
||||
/// Get the table URI
|
||||
fn dataset_uri(&self) -> &str;
|
||||
/// Get the storage options used when opening this table, if any.
|
||||
async fn storage_options(&self) -> Option<HashMap<String, String>>;
|
||||
/// Poll until the columns are fully indexed. Will return Error::Timeout if the columns
|
||||
@@ -1317,12 +1317,11 @@ impl Table {
|
||||
self.inner.list_indices().await
|
||||
}
|
||||
|
||||
/// Get the table URI (storage location)
|
||||
/// Get the underlying dataset URI
|
||||
///
|
||||
/// Returns the full storage location of the table (e.g., S3/GCS path).
|
||||
/// For remote tables, this fetches the location from the server via describe.
|
||||
pub async fn uri(&self) -> Result<String> {
|
||||
self.inner.uri().await
|
||||
/// Warning: This is an internal API and the return value is subject to change.
|
||||
pub fn dataset_uri(&self) -> &str {
|
||||
self.inner.dataset_uri()
|
||||
}
|
||||
|
||||
/// Get the storage options used when opening this table, if any.
|
||||
@@ -1412,26 +1411,35 @@ impl Table {
|
||||
let projected_plans = plans
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.map(|(plan_i, plan)| {
|
||||
let query_index = datafusion_common::ScalarValue::Int32(Some(plan_i as i32));
|
||||
let query_index_expr =
|
||||
datafusion_physical_plan::expressions::Literal::new(query_index);
|
||||
let query_index_expr =
|
||||
Arc::new(query_index_expr) as Arc<dyn datafusion_physical_plan::PhysicalExpr>;
|
||||
let mut projections = vec![(query_index_expr, "query_index".to_string())];
|
||||
projections.extend_from_slice(&project_all_columns);
|
||||
let projection = ProjectionExec::try_new(projections, plan).unwrap();
|
||||
Arc::new(projection) as Arc<dyn datafusion_physical_plan::ExecutionPlan>
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
.map(
|
||||
|(plan_i, plan)| -> Result<Arc<dyn datafusion_physical_plan::ExecutionPlan>> {
|
||||
let query_index = datafusion_common::ScalarValue::Int32(Some(plan_i as i32));
|
||||
let query_index_expr =
|
||||
datafusion_physical_plan::expressions::Literal::new(query_index);
|
||||
let query_index_expr = Arc::new(query_index_expr)
|
||||
as Arc<dyn datafusion_physical_plan::PhysicalExpr>;
|
||||
let mut projections = vec![(query_index_expr, "query_index".to_string())];
|
||||
projections.extend_from_slice(&project_all_columns);
|
||||
let projection =
|
||||
ProjectionExec::try_new(projections, plan).map_err(|e| Error::Runtime {
|
||||
message: format!("Failed to build projection plan: {e}"),
|
||||
})?;
|
||||
Ok(Arc::new(projection) as Arc<dyn datafusion_physical_plan::ExecutionPlan>)
|
||||
},
|
||||
)
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
|
||||
let unioned = Arc::new(UnionExec::new(projected_plans));
|
||||
let unioned = UnionExec::try_new(projected_plans).map_err(|e| Error::Runtime {
|
||||
message: format!("Failed to union query plans: {e}"),
|
||||
})?;
|
||||
// We require 1 partition in the final output
|
||||
let repartitioned = RepartitionExec::try_new(
|
||||
unioned,
|
||||
datafusion_physical_plan::Partitioning::RoundRobinBatch(1),
|
||||
)
|
||||
.unwrap();
|
||||
.map_err(|e| Error::Runtime {
|
||||
message: format!("Failed to repartition query plans: {e}"),
|
||||
})?;
|
||||
Ok(Arc::new(repartitioned))
|
||||
}
|
||||
|
||||
@@ -1666,14 +1674,18 @@ impl NativeTable {
|
||||
|
||||
// Use DatasetBuilder::from_namespace which automatically fetches location
|
||||
// and storage options from the namespace
|
||||
let builder = DatasetBuilder::from_namespace(namespace_client.clone(), table_id)
|
||||
.await
|
||||
.map_err(|e| match e {
|
||||
lance::Error::Namespace { source, .. } => Error::Runtime {
|
||||
message: format!("Failed to get table info from namespace: {:?}", source),
|
||||
},
|
||||
source => Error::Lance { source },
|
||||
})?;
|
||||
let builder = DatasetBuilder::from_namespace(
|
||||
namespace_client.clone(),
|
||||
table_id,
|
||||
false, // Don't ignore namespace storage options
|
||||
)
|
||||
.await
|
||||
.map_err(|e| match e {
|
||||
lance::Error::Namespace { source, .. } => Error::Runtime {
|
||||
message: format!("Failed to get table info from namespace: {:?}", source),
|
||||
},
|
||||
source => Error::Lance { source },
|
||||
})?;
|
||||
|
||||
let dataset = builder
|
||||
.with_read_params(params)
|
||||
@@ -1877,13 +1889,7 @@ impl NativeTable {
|
||||
let store_params = params
|
||||
.store_params
|
||||
.get_or_insert_with(ObjectStoreParams::default);
|
||||
let accessor = match store_params.storage_options().cloned() {
|
||||
Some(options) => {
|
||||
StorageOptionsAccessor::with_initial_and_provider(options, storage_options_provider)
|
||||
}
|
||||
None => StorageOptionsAccessor::with_provider(storage_options_provider),
|
||||
};
|
||||
store_params.storage_options_accessor = Some(Arc::new(accessor));
|
||||
store_params.storage_options_provider = Some(storage_options_provider);
|
||||
|
||||
// Patch the params if we have a write store wrapper
|
||||
let params = match write_store_wrapper.clone() {
|
||||
@@ -2059,7 +2065,7 @@ impl NativeTable {
|
||||
return provided;
|
||||
}
|
||||
let suggested = suggested_num_sub_vectors(dim);
|
||||
if num_bits.is_some_and(|num_bits| num_bits == 4) && !suggested.is_multiple_of(2) {
|
||||
if num_bits.is_some_and(|num_bits| num_bits == 4) && suggested % 2 != 0 {
|
||||
// num_sub_vectors must be even when 4 bits are used
|
||||
suggested + 1
|
||||
} else {
|
||||
@@ -2337,6 +2343,23 @@ impl NativeTable {
|
||||
|
||||
/// Convert an AnyQuery to the namespace QueryTableRequest format.
|
||||
fn convert_to_namespace_query(&self, query: &AnyQuery) -> Result<NsQueryTableRequest> {
|
||||
let to_namespace_columns =
|
||||
|select: &Select| -> Result<Option<Box<QueryTableRequestColumns>>> {
|
||||
match select {
|
||||
Select::All => Ok(None),
|
||||
Select::Columns(cols) => {
|
||||
let mut columns = QueryTableRequestColumns::new();
|
||||
columns.column_names = Some(cols.clone());
|
||||
Ok(Some(Box::new(columns)))
|
||||
}
|
||||
Select::Dynamic(_) => Err(Error::NotSupported {
|
||||
message:
|
||||
"Dynamic column selection is not supported for server-side queries"
|
||||
.to_string(),
|
||||
}),
|
||||
}
|
||||
};
|
||||
|
||||
match query {
|
||||
AnyQuery::VectorQuery(vq) => {
|
||||
// Extract the query vector(s)
|
||||
@@ -2348,22 +2371,6 @@ impl NativeTable {
|
||||
None => None,
|
||||
};
|
||||
|
||||
// Convert select to columns list
|
||||
let columns = match &vq.base.select {
|
||||
Select::All => None,
|
||||
Select::Columns(cols) => Some(Box::new(QueryTableRequestColumns {
|
||||
column_names: Some(cols.clone()),
|
||||
column_aliases: None,
|
||||
})),
|
||||
Select::Dynamic(_) => {
|
||||
return Err(Error::NotSupported {
|
||||
message:
|
||||
"Dynamic column selection is not supported for server-side queries"
|
||||
.to_string(),
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
// Check for unsupported features
|
||||
if vq.base.reranker.is_some() {
|
||||
return Err(Error::NotSupported {
|
||||
@@ -2371,6 +2378,8 @@ impl NativeTable {
|
||||
});
|
||||
}
|
||||
|
||||
let columns = to_namespace_columns(&vq.base.select)?;
|
||||
|
||||
// Convert FTS query if present
|
||||
let full_text_query = vq.base.full_text_search.as_ref().map(|fts| {
|
||||
let columns = fts.columns();
|
||||
@@ -2407,6 +2416,7 @@ impl NativeTable {
|
||||
with_row_id: Some(vq.base.with_row_id),
|
||||
bypass_vector_index: Some(!vq.use_index),
|
||||
full_text_query,
|
||||
version: None,
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
@@ -2425,19 +2435,7 @@ impl NativeTable {
|
||||
.map(|f| self.filter_to_sql(f))
|
||||
.transpose()?;
|
||||
|
||||
let columns = match &q.select {
|
||||
Select::All => None,
|
||||
Select::Columns(cols) => Some(Box::new(QueryTableRequestColumns {
|
||||
column_names: Some(cols.clone()),
|
||||
column_aliases: None,
|
||||
})),
|
||||
Select::Dynamic(_) => {
|
||||
return Err(Error::NotSupported {
|
||||
message: "Dynamic columns are not supported for server-side query"
|
||||
.to_string(),
|
||||
});
|
||||
}
|
||||
};
|
||||
let columns = to_namespace_columns(&q.select)?;
|
||||
|
||||
// Handle full text search if present
|
||||
let full_text_query = q.full_text_search.as_ref().map(|fts| {
|
||||
@@ -2469,10 +2467,18 @@ impl NativeTable {
|
||||
columns,
|
||||
prefilter: Some(q.prefilter),
|
||||
offset: q.offset.map(|o| o as i32),
|
||||
ef: None,
|
||||
refine_factor: None,
|
||||
distance_type: None,
|
||||
nprobes: None,
|
||||
vector_column: None, // No vector column for plain queries
|
||||
with_row_id: Some(q.with_row_id),
|
||||
bypass_vector_index: Some(true), // No vector index for plain queries
|
||||
full_text_query,
|
||||
version: None,
|
||||
fast_search: None,
|
||||
lower_bound: None,
|
||||
upper_bound: None,
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
@@ -3226,8 +3232,8 @@ impl BaseTable for NativeTable {
|
||||
Ok(results.into_iter().flatten().collect())
|
||||
}
|
||||
|
||||
async fn uri(&self) -> Result<String> {
|
||||
Ok(self.uri.clone())
|
||||
fn dataset_uri(&self) -> &str {
|
||||
self.uri.as_str()
|
||||
}
|
||||
|
||||
async fn storage_options(&self) -> Option<HashMap<String, String>> {
|
||||
@@ -3235,7 +3241,7 @@ impl BaseTable for NativeTable {
|
||||
.get()
|
||||
.await
|
||||
.ok()
|
||||
.and_then(|dataset| dataset.initial_storage_options().cloned())
|
||||
.and_then(|dataset| dataset.storage_options().cloned())
|
||||
}
|
||||
|
||||
async fn index_stats(&self, index_name: &str) -> Result<Option<IndexStatistics>> {
|
||||
@@ -3400,6 +3406,7 @@ pub struct FragmentSummaryStats {
|
||||
#[cfg(test)]
|
||||
#[allow(deprecated)]
|
||||
mod tests {
|
||||
use std::iter;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
@@ -4016,7 +4023,7 @@ mod tests {
|
||||
schema.clone(),
|
||||
vec![
|
||||
Arc::new(Int32Array::from_iter_values(offset..(offset + 10))),
|
||||
Arc::new(Int32Array::from_iter_values(std::iter::repeat_n(age, 10))),
|
||||
Arc::new(Int32Array::from_iter_values(iter::repeat(age).take(10))),
|
||||
],
|
||||
)],
|
||||
schema,
|
||||
@@ -5151,8 +5158,8 @@ mod tests {
|
||||
ns_request
|
||||
.columns
|
||||
.as_ref()
|
||||
.and_then(|c| c.column_names.as_ref()),
|
||||
Some(&vec!["id".to_string()])
|
||||
.and_then(|cols| cols.column_names.clone()),
|
||||
Some(vec!["id".to_string()])
|
||||
);
|
||||
assert_eq!(ns_request.vector_column, Some("vector".to_string()));
|
||||
assert_eq!(ns_request.distance_type, Some("l2".to_string()));
|
||||
@@ -5198,8 +5205,8 @@ mod tests {
|
||||
ns_request
|
||||
.columns
|
||||
.as_ref()
|
||||
.and_then(|c| c.column_names.as_ref()),
|
||||
Some(&vec!["id".to_string()])
|
||||
.and_then(|cols| cols.column_names.clone()),
|
||||
Some(vec!["id".to_string()])
|
||||
);
|
||||
assert_eq!(ns_request.with_row_id, Some(true));
|
||||
assert_eq!(ns_request.bypass_vector_index, Some(true));
|
||||
|
||||
@@ -101,6 +101,7 @@ impl DatasetRef {
|
||||
refs::Ref::Version(_, Some(target_ver)) => version != target_ver,
|
||||
refs::Ref::Version(_, None) => true, // No specific version, always checkout
|
||||
refs::Ref::Tag(_) => true, // Always checkout for tags
|
||||
refs::Ref::VersionNumber(target_ver) => version != target_ver,
|
||||
};
|
||||
|
||||
if should_checkout {
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
use std::{
|
||||
borrow::Cow,
|
||||
collections::{HashMap, HashSet},
|
||||
iter::repeat,
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
@@ -267,10 +268,9 @@ fn create_some_records() -> Result<impl IntoArrow> {
|
||||
schema.clone(),
|
||||
vec![
|
||||
Arc::new(Int32Array::from_iter_values(0..TOTAL as i32)),
|
||||
Arc::new(StringArray::from_iter(std::iter::repeat_n(
|
||||
Some("hello world".to_string()),
|
||||
TOTAL,
|
||||
))),
|
||||
Arc::new(StringArray::from_iter(
|
||||
repeat(Some("hello world".to_string())).take(TOTAL),
|
||||
)),
|
||||
],
|
||||
)
|
||||
.unwrap()]
|
||||
|
||||
Reference in New Issue
Block a user