mirror of
https://github.com/lancedb/lancedb.git
synced 2025-12-27 15:12:53 +00:00
Compare commits
1 Commits
python-v0.
...
remote-tab
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3965d1584c |
24
Cargo.toml
24
Cargo.toml
@@ -14,19 +14,19 @@ keywords = ["lancedb", "lance", "database", "vector", "search"]
|
|||||||
categories = ["database-implementations"]
|
categories = ["database-implementations"]
|
||||||
|
|
||||||
[workspace.dependencies]
|
[workspace.dependencies]
|
||||||
lance = { "version" = "=0.10.16", "features" = ["dynamodb"] }
|
lance = { "version" = "=0.10.15", "features" = ["dynamodb"] }
|
||||||
lance-index = { "version" = "=0.10.16" }
|
lance-index = { "version" = "=0.10.15" }
|
||||||
lance-linalg = { "version" = "=0.10.16" }
|
lance-linalg = { "version" = "=0.10.15" }
|
||||||
lance-testing = { "version" = "=0.10.16" }
|
lance-testing = { "version" = "=0.10.15" }
|
||||||
# Note that this one does not include pyarrow
|
# Note that this one does not include pyarrow
|
||||||
arrow = { version = "51.0", optional = false }
|
arrow = { version = "50.0", optional = false }
|
||||||
arrow-array = "51.0"
|
arrow-array = "50.0"
|
||||||
arrow-data = "51.0"
|
arrow-data = "50.0"
|
||||||
arrow-ipc = "51.0"
|
arrow-ipc = "50.0"
|
||||||
arrow-ord = "51.0"
|
arrow-ord = "50.0"
|
||||||
arrow-schema = "51.0"
|
arrow-schema = "50.0"
|
||||||
arrow-arith = "51.0"
|
arrow-arith = "50.0"
|
||||||
arrow-cast = "51.0"
|
arrow-cast = "50.0"
|
||||||
async-trait = "0"
|
async-trait = "0"
|
||||||
chrono = "0.4.35"
|
chrono = "0.4.35"
|
||||||
half = { "version" = "=2.3.1", default-features = false, features = [
|
half = { "version" = "=2.3.1", default-features = false, features = [
|
||||||
|
|||||||
@@ -159,7 +159,7 @@ Allows you to set parameters when registering a `sentence-transformers` object.
|
|||||||
from lancedb.embeddings import get_registry
|
from lancedb.embeddings import get_registry
|
||||||
|
|
||||||
db = lancedb.connect("/tmp/db")
|
db = lancedb.connect("/tmp/db")
|
||||||
model = get_registry().get("sentence-transformers").create(name="BAAI/bge-small-en-v1.5", device="cpu")
|
model = get_registry.get("sentence-transformers").create(name="BAAI/bge-small-en-v1.5", device="cpu")
|
||||||
|
|
||||||
class Words(LanceModel):
|
class Words(LanceModel):
|
||||||
text: str = model.SourceField()
|
text: str = model.SourceField()
|
||||||
|
|||||||
@@ -140,9 +140,6 @@ export class RemoteConnection implements Connection {
|
|||||||
schema = nameOrOpts.schema
|
schema = nameOrOpts.schema
|
||||||
embeddings = nameOrOpts.embeddingFunction
|
embeddings = nameOrOpts.embeddingFunction
|
||||||
tableName = nameOrOpts.name
|
tableName = nameOrOpts.name
|
||||||
if (data === undefined) {
|
|
||||||
data = nameOrOpts.data
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let buffer: Buffer
|
let buffer: Buffer
|
||||||
|
|||||||
@@ -77,18 +77,6 @@ export interface OpenTableOptions {
|
|||||||
* The available options are described at https://lancedb.github.io/lancedb/guides/storage/
|
* The available options are described at https://lancedb.github.io/lancedb/guides/storage/
|
||||||
*/
|
*/
|
||||||
storageOptions?: Record<string, string>;
|
storageOptions?: Record<string, string>;
|
||||||
/**
|
|
||||||
* Set the size of the index cache, specified as a number of entries
|
|
||||||
*
|
|
||||||
* The exact meaning of an "entry" will depend on the type of index:
|
|
||||||
* - IVF: there is one entry for each IVF partition
|
|
||||||
* - BTREE: there is one entry for the entire index
|
|
||||||
*
|
|
||||||
* This cache applies to the entire opened table, across all indices.
|
|
||||||
* Setting this value higher will increase performance on larger datasets
|
|
||||||
* at the expense of more RAM
|
|
||||||
*/
|
|
||||||
indexCacheSize?: number;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
export interface TableNamesOptions {
|
export interface TableNamesOptions {
|
||||||
@@ -172,7 +160,6 @@ export class Connection {
|
|||||||
const innerTable = await this.inner.openTable(
|
const innerTable = await this.inner.openTable(
|
||||||
name,
|
name,
|
||||||
cleanseStorageOptions(options?.storageOptions),
|
cleanseStorageOptions(options?.storageOptions),
|
||||||
options?.indexCacheSize,
|
|
||||||
);
|
);
|
||||||
return new Table(innerTable);
|
return new Table(innerTable);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -176,7 +176,6 @@ impl Connection {
|
|||||||
&self,
|
&self,
|
||||||
name: String,
|
name: String,
|
||||||
storage_options: Option<HashMap<String, String>>,
|
storage_options: Option<HashMap<String, String>>,
|
||||||
index_cache_size: Option<u32>,
|
|
||||||
) -> napi::Result<Table> {
|
) -> napi::Result<Table> {
|
||||||
let mut builder = self.get_inner()?.open_table(&name);
|
let mut builder = self.get_inner()?.open_table(&name);
|
||||||
if let Some(storage_options) = storage_options {
|
if let Some(storage_options) = storage_options {
|
||||||
@@ -184,9 +183,6 @@ impl Connection {
|
|||||||
builder = builder.storage_option(key, value);
|
builder = builder.storage_option(key, value);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if let Some(index_cache_size) = index_cache_size {
|
|
||||||
builder = builder.index_cache_size(index_cache_size);
|
|
||||||
}
|
|
||||||
let tbl = builder
|
let tbl = builder
|
||||||
.execute()
|
.execute()
|
||||||
.await
|
.await
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
[bumpversion]
|
[bumpversion]
|
||||||
current_version = 0.6.11
|
current_version = 0.6.9
|
||||||
commit = True
|
commit = True
|
||||||
message = [python] Bump version: {current_version} → {new_version}
|
message = [python] Bump version: {current_version} → {new_version}
|
||||||
tag = True
|
tag = True
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ name = "_lancedb"
|
|||||||
crate-type = ["cdylib"]
|
crate-type = ["cdylib"]
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
arrow = { version = "51.0.0", features = ["pyarrow"] }
|
arrow = { version = "50.0.0", features = ["pyarrow"] }
|
||||||
lancedb = { path = "../rust/lancedb" }
|
lancedb = { path = "../rust/lancedb" }
|
||||||
env_logger = "0.10"
|
env_logger = "0.10"
|
||||||
pyo3 = { version = "0.20", features = ["extension-module", "abi3-py38"] }
|
pyo3 = { version = "0.20", features = ["extension-module", "abi3-py38"] }
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[project]
|
[project]
|
||||||
name = "lancedb"
|
name = "lancedb"
|
||||||
version = "0.6.11"
|
version = "0.6.9"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"deprecation",
|
"deprecation",
|
||||||
"pylance==0.10.12",
|
"pylance==0.10.12",
|
||||||
|
|||||||
@@ -224,23 +224,13 @@ class DBConnection(EnforceOverrides):
|
|||||||
def __getitem__(self, name: str) -> LanceTable:
|
def __getitem__(self, name: str) -> LanceTable:
|
||||||
return self.open_table(name)
|
return self.open_table(name)
|
||||||
|
|
||||||
def open_table(self, name: str, *, index_cache_size: Optional[int] = None) -> Table:
|
def open_table(self, name: str) -> Table:
|
||||||
"""Open a Lance Table in the database.
|
"""Open a Lance Table in the database.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
name: str
|
name: str
|
||||||
The name of the table.
|
The name of the table.
|
||||||
index_cache_size: int, default 256
|
|
||||||
Set the size of the index cache, specified as a number of entries
|
|
||||||
|
|
||||||
The exact meaning of an "entry" will depend on the type of index:
|
|
||||||
* IVF - there is one entry for each IVF partition
|
|
||||||
* BTREE - there is one entry for the entire index
|
|
||||||
|
|
||||||
This cache applies to the entire opened table, across all indices.
|
|
||||||
Setting this value higher will increase performance on larger datasets
|
|
||||||
at the expense of more RAM
|
|
||||||
|
|
||||||
Returns
|
Returns
|
||||||
-------
|
-------
|
||||||
@@ -258,18 +248,6 @@ class DBConnection(EnforceOverrides):
|
|||||||
"""
|
"""
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def rename_table(self, cur_name: str, new_name: str):
|
|
||||||
"""Rename a table in the database.
|
|
||||||
|
|
||||||
Parameters
|
|
||||||
----------
|
|
||||||
cur_name: str
|
|
||||||
The current name of the table.
|
|
||||||
new_name: str
|
|
||||||
The new name of the table.
|
|
||||||
"""
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
def drop_database(self):
|
def drop_database(self):
|
||||||
"""
|
"""
|
||||||
Drop database
|
Drop database
|
||||||
@@ -429,9 +407,7 @@ class LanceDBConnection(DBConnection):
|
|||||||
return tbl
|
return tbl
|
||||||
|
|
||||||
@override
|
@override
|
||||||
def open_table(
|
def open_table(self, name: str) -> LanceTable:
|
||||||
self, name: str, *, index_cache_size: Optional[int] = None
|
|
||||||
) -> LanceTable:
|
|
||||||
"""Open a table in the database.
|
"""Open a table in the database.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
@@ -443,7 +419,7 @@ class LanceDBConnection(DBConnection):
|
|||||||
-------
|
-------
|
||||||
A LanceTable object representing the table.
|
A LanceTable object representing the table.
|
||||||
"""
|
"""
|
||||||
return LanceTable.open(self, name, index_cache_size=index_cache_size)
|
return LanceTable.open(self, name)
|
||||||
|
|
||||||
@override
|
@override
|
||||||
def drop_table(self, name: str, ignore_missing: bool = False):
|
def drop_table(self, name: str, ignore_missing: bool = False):
|
||||||
@@ -775,10 +751,7 @@ class AsyncConnection(object):
|
|||||||
return AsyncTable(new_table)
|
return AsyncTable(new_table)
|
||||||
|
|
||||||
async def open_table(
|
async def open_table(
|
||||||
self,
|
self, name: str, storage_options: Optional[Dict[str, str]] = None
|
||||||
name: str,
|
|
||||||
storage_options: Optional[Dict[str, str]] = None,
|
|
||||||
index_cache_size: Optional[int] = None,
|
|
||||||
) -> Table:
|
) -> Table:
|
||||||
"""Open a Lance Table in the database.
|
"""Open a Lance Table in the database.
|
||||||
|
|
||||||
@@ -791,22 +764,12 @@ class AsyncConnection(object):
|
|||||||
connection will be inherited by the table, but can be overridden here.
|
connection will be inherited by the table, but can be overridden here.
|
||||||
See available options at
|
See available options at
|
||||||
https://lancedb.github.io/lancedb/guides/storage/
|
https://lancedb.github.io/lancedb/guides/storage/
|
||||||
index_cache_size: int, default 256
|
|
||||||
Set the size of the index cache, specified as a number of entries
|
|
||||||
|
|
||||||
The exact meaning of an "entry" will depend on the type of index:
|
|
||||||
* IVF - there is one entry for each IVF partition
|
|
||||||
* BTREE - there is one entry for the entire index
|
|
||||||
|
|
||||||
This cache applies to the entire opened table, across all indices.
|
|
||||||
Setting this value higher will increase performance on larger datasets
|
|
||||||
at the expense of more RAM
|
|
||||||
|
|
||||||
Returns
|
Returns
|
||||||
-------
|
-------
|
||||||
A LanceTable object representing the table.
|
A LanceTable object representing the table.
|
||||||
"""
|
"""
|
||||||
table = await self._inner.open_table(name, storage_options, index_cache_size)
|
table = await self._inner.open_table(name, storage_options)
|
||||||
return AsyncTable(table)
|
return AsyncTable(table)
|
||||||
|
|
||||||
async def drop_table(self, name: str):
|
async def drop_table(self, name: str):
|
||||||
|
|||||||
@@ -94,7 +94,7 @@ class RemoteDBConnection(DBConnection):
|
|||||||
yield item
|
yield item
|
||||||
|
|
||||||
@override
|
@override
|
||||||
def open_table(self, name: str, *, index_cache_size: Optional[int] = None) -> Table:
|
def open_table(self, name: str) -> Table:
|
||||||
"""Open a Lance Table in the database.
|
"""Open a Lance Table in the database.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
@@ -110,12 +110,6 @@ class RemoteDBConnection(DBConnection):
|
|||||||
|
|
||||||
self._client.mount_retry_adapter_for_table(name)
|
self._client.mount_retry_adapter_for_table(name)
|
||||||
|
|
||||||
if index_cache_size is not None:
|
|
||||||
logging.info(
|
|
||||||
"index_cache_size is ignored in LanceDb Cloud"
|
|
||||||
" (there is no local cache to configure)"
|
|
||||||
)
|
|
||||||
|
|
||||||
# check if table exists
|
# check if table exists
|
||||||
if self._table_cache.get(name) is None:
|
if self._table_cache.get(name) is None:
|
||||||
self._client.post(f"/v1/table/{name}/describe/")
|
self._client.post(f"/v1/table/{name}/describe/")
|
||||||
@@ -287,24 +281,6 @@ class RemoteDBConnection(DBConnection):
|
|||||||
)
|
)
|
||||||
self._table_cache.pop(name)
|
self._table_cache.pop(name)
|
||||||
|
|
||||||
@override
|
|
||||||
def rename_table(self, cur_name: str, new_name: str):
|
|
||||||
"""Rename a table in the database.
|
|
||||||
|
|
||||||
Parameters
|
|
||||||
----------
|
|
||||||
cur_name: str
|
|
||||||
The current name of the table.
|
|
||||||
new_name: str
|
|
||||||
The new name of the table.
|
|
||||||
"""
|
|
||||||
self._client.post(
|
|
||||||
f"/v1/table/{cur_name}/rename/",
|
|
||||||
json={"new_table_name": new_name},
|
|
||||||
)
|
|
||||||
self._table_cache.pop(cur_name)
|
|
||||||
self._table_cache[new_name] = True
|
|
||||||
|
|
||||||
async def close(self):
|
async def close(self):
|
||||||
"""Close the connection to the database."""
|
"""Close the connection to the database."""
|
||||||
self._client.close()
|
self._client.close()
|
||||||
|
|||||||
@@ -72,7 +72,7 @@ class RemoteTable(Table):
|
|||||||
return resp
|
return resp
|
||||||
|
|
||||||
def index_stats(self, index_uuid: str):
|
def index_stats(self, index_uuid: str):
|
||||||
"""List all the stats of a specified index"""
|
"""List all the indices on the table"""
|
||||||
resp = self._conn._client.post(
|
resp = self._conn._client.post(
|
||||||
f"/v1/table/{self._name}/index/{index_uuid}/stats/"
|
f"/v1/table/{self._name}/index/{index_uuid}/stats/"
|
||||||
)
|
)
|
||||||
@@ -485,6 +485,64 @@ class RemoteTable(Table):
|
|||||||
|
|
||||||
payload = {"predicate": where, "updates": updates}
|
payload = {"predicate": where, "updates": updates}
|
||||||
self._conn._client.post(f"/v1/table/{self._name}/update/", data=payload)
|
self._conn._client.post(f"/v1/table/{self._name}/update/", data=payload)
|
||||||
|
|
||||||
|
def checkout(self, version: int):
|
||||||
|
"""Checkout a version of the table. This is an in-place operation.
|
||||||
|
|
||||||
|
This allows viewing previous versions of the table. If you wish to
|
||||||
|
keep writing to the dataset starting from an old version, then use
|
||||||
|
the `restore` function.
|
||||||
|
|
||||||
|
Calling this method will set the table into time-travel mode. If you
|
||||||
|
wish to return to standard mode, call `checkout_latest`.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
version : int
|
||||||
|
The version to checkout.
|
||||||
|
|
||||||
|
Examples ??? to be changed
|
||||||
|
--------
|
||||||
|
>>> import lancedb
|
||||||
|
>>> data = [{"vector": [1.1, 0.9], "type": "vector"}]
|
||||||
|
>>> db = lancedb.connect("db://...", api_key="...", # doctest: +SKIP
|
||||||
|
... region="...") # doctest: +SKIP
|
||||||
|
>>> table = db.create_table("my_table", data) # doctest: +SKIP
|
||||||
|
>>> table.version
|
||||||
|
2
|
||||||
|
>>> table.to_pandas()
|
||||||
|
vector type
|
||||||
|
0 [1.1, 0.9] vector
|
||||||
|
>>> table.add([{"vector": [0.5, 0.2], "type": "vector"}])
|
||||||
|
>>> table.version
|
||||||
|
3
|
||||||
|
>>> table.checkout(2)
|
||||||
|
>>> table.to_pandas()
|
||||||
|
vector type
|
||||||
|
0 [1.1, 0.9] vector
|
||||||
|
"""
|
||||||
|
|
||||||
|
def checkout_latest(self):
|
||||||
|
"""checkout_latest() is not yet supported on LanceDB cloud"""
|
||||||
|
raise NotImplementedError("checkout_latest() is not yet supported on LanceDB cloud")
|
||||||
|
|
||||||
|
def restore(self, version: int = None):
|
||||||
|
"""Restore a version of the table. This is an in-place operation.
|
||||||
|
|
||||||
|
This creates a new version where the data is equivalent to the
|
||||||
|
specified previous version. Data is not copied (as of python-v0.2.1).
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
version : int, default None
|
||||||
|
The version to restore. If unspecified then restores the currently
|
||||||
|
checked out version. If the currently checked out version is the
|
||||||
|
latest version then this is a no-op.
|
||||||
|
Examples
|
||||||
|
--------
|
||||||
|
>>> import lancedb
|
||||||
|
"""
|
||||||
|
max_version =
|
||||||
|
|
||||||
def cleanup_old_versions(self, *_):
|
def cleanup_old_versions(self, *_):
|
||||||
"""cleanup_old_versions() is not supported on the LanceDB cloud"""
|
"""cleanup_old_versions() is not supported on the LanceDB cloud"""
|
||||||
|
|||||||
@@ -806,7 +806,6 @@ class _LanceLatestDatasetRef(_LanceDatasetRef):
|
|||||||
"""Reference to the latest version of a LanceDataset."""
|
"""Reference to the latest version of a LanceDataset."""
|
||||||
|
|
||||||
uri: str
|
uri: str
|
||||||
index_cache_size: Optional[int] = None
|
|
||||||
read_consistency_interval: Optional[timedelta] = None
|
read_consistency_interval: Optional[timedelta] = None
|
||||||
last_consistency_check: Optional[float] = None
|
last_consistency_check: Optional[float] = None
|
||||||
_dataset: Optional[LanceDataset] = None
|
_dataset: Optional[LanceDataset] = None
|
||||||
@@ -814,9 +813,7 @@ class _LanceLatestDatasetRef(_LanceDatasetRef):
|
|||||||
@property
|
@property
|
||||||
def dataset(self) -> LanceDataset:
|
def dataset(self) -> LanceDataset:
|
||||||
if not self._dataset:
|
if not self._dataset:
|
||||||
self._dataset = lance.dataset(
|
self._dataset = lance.dataset(self.uri)
|
||||||
self.uri, index_cache_size=self.index_cache_size
|
|
||||||
)
|
|
||||||
self.last_consistency_check = time.monotonic()
|
self.last_consistency_check = time.monotonic()
|
||||||
elif self.read_consistency_interval is not None:
|
elif self.read_consistency_interval is not None:
|
||||||
now = time.monotonic()
|
now = time.monotonic()
|
||||||
@@ -845,15 +842,12 @@ class _LanceLatestDatasetRef(_LanceDatasetRef):
|
|||||||
class _LanceTimeTravelRef(_LanceDatasetRef):
|
class _LanceTimeTravelRef(_LanceDatasetRef):
|
||||||
uri: str
|
uri: str
|
||||||
version: int
|
version: int
|
||||||
index_cache_size: Optional[int] = None
|
|
||||||
_dataset: Optional[LanceDataset] = None
|
_dataset: Optional[LanceDataset] = None
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def dataset(self) -> LanceDataset:
|
def dataset(self) -> LanceDataset:
|
||||||
if not self._dataset:
|
if not self._dataset:
|
||||||
self._dataset = lance.dataset(
|
self._dataset = lance.dataset(self.uri, version=self.version)
|
||||||
self.uri, version=self.version, index_cache_size=self.index_cache_size
|
|
||||||
)
|
|
||||||
return self._dataset
|
return self._dataset
|
||||||
|
|
||||||
@dataset.setter
|
@dataset.setter
|
||||||
@@ -890,8 +884,6 @@ class LanceTable(Table):
|
|||||||
connection: "LanceDBConnection",
|
connection: "LanceDBConnection",
|
||||||
name: str,
|
name: str,
|
||||||
version: Optional[int] = None,
|
version: Optional[int] = None,
|
||||||
*,
|
|
||||||
index_cache_size: Optional[int] = None,
|
|
||||||
):
|
):
|
||||||
self._conn = connection
|
self._conn = connection
|
||||||
self.name = name
|
self.name = name
|
||||||
@@ -900,13 +892,11 @@ class LanceTable(Table):
|
|||||||
self._ref = _LanceTimeTravelRef(
|
self._ref = _LanceTimeTravelRef(
|
||||||
uri=self._dataset_uri,
|
uri=self._dataset_uri,
|
||||||
version=version,
|
version=version,
|
||||||
index_cache_size=index_cache_size,
|
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
self._ref = _LanceLatestDatasetRef(
|
self._ref = _LanceLatestDatasetRef(
|
||||||
uri=self._dataset_uri,
|
uri=self._dataset_uri,
|
||||||
read_consistency_interval=connection.read_consistency_interval,
|
read_consistency_interval=connection.read_consistency_interval,
|
||||||
index_cache_size=index_cache_size,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
|
|||||||
@@ -368,15 +368,6 @@ async def test_create_exist_ok_async(tmp_path):
|
|||||||
# await db.create_table("test", schema=bad_schema, exist_ok=True)
|
# await db.create_table("test", schema=bad_schema, exist_ok=True)
|
||||||
|
|
||||||
|
|
||||||
def test_open_table_sync(tmp_path):
|
|
||||||
db = lancedb.connect(tmp_path)
|
|
||||||
db.create_table("test", data=[{"id": 0}])
|
|
||||||
assert db.open_table("test").count_rows() == 1
|
|
||||||
assert db.open_table("test", index_cache_size=0).count_rows() == 1
|
|
||||||
with pytest.raises(FileNotFoundError, match="does not exist"):
|
|
||||||
db.open_table("does_not_exist")
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_open_table(tmp_path):
|
async def test_open_table(tmp_path):
|
||||||
db = await lancedb.connect_async(tmp_path)
|
db = await lancedb.connect_async(tmp_path)
|
||||||
@@ -406,10 +397,6 @@ async def test_open_table(tmp_path):
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
# No way to verify this yet, but at least make sure we
|
|
||||||
# can pass the parameter
|
|
||||||
await db.open_table("test", index_cache_size=0)
|
|
||||||
|
|
||||||
with pytest.raises(ValueError, match="was not found"):
|
with pytest.raises(ValueError, match="was not found"):
|
||||||
await db.open_table("does_not_exist")
|
await db.open_table("does_not_exist")
|
||||||
|
|
||||||
|
|||||||
@@ -134,21 +134,17 @@ impl Connection {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
#[pyo3(signature = (name, storage_options = None, index_cache_size = None))]
|
#[pyo3(signature = (name, storage_options = None))]
|
||||||
pub fn open_table(
|
pub fn open_table(
|
||||||
self_: PyRef<'_, Self>,
|
self_: PyRef<'_, Self>,
|
||||||
name: String,
|
name: String,
|
||||||
storage_options: Option<HashMap<String, String>>,
|
storage_options: Option<HashMap<String, String>>,
|
||||||
index_cache_size: Option<u32>,
|
|
||||||
) -> PyResult<&PyAny> {
|
) -> PyResult<&PyAny> {
|
||||||
let inner = self_.get_inner()?.clone();
|
let inner = self_.get_inner()?.clone();
|
||||||
let mut builder = inner.open_table(name);
|
let mut builder = inner.open_table(name);
|
||||||
if let Some(storage_options) = storage_options {
|
if let Some(storage_options) = storage_options {
|
||||||
builder = builder.storage_options(storage_options);
|
builder = builder.storage_options(storage_options);
|
||||||
}
|
}
|
||||||
if let Some(index_cache_size) = index_cache_size {
|
|
||||||
builder = builder.index_cache_size(index_cache_size);
|
|
||||||
}
|
|
||||||
future_into_py(self_.py(), async move {
|
future_into_py(self_.py(), async move {
|
||||||
let table = builder.execute().await.infer_error()?;
|
let table = builder.execute().await.infer_error()?;
|
||||||
Ok(Table::new(table))
|
Ok(Table::new(table))
|
||||||
|
|||||||
@@ -52,7 +52,7 @@ aws-sdk-kms = { version = "1.0" }
|
|||||||
aws-config = { version = "1.0" }
|
aws-config = { version = "1.0" }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = []
|
default = ["remote"]
|
||||||
remote = ["dep:reqwest"]
|
remote = ["dep:reqwest"]
|
||||||
fp16kernels = ["lance-linalg/fp16kernels"]
|
fp16kernels = ["lance-linalg/fp16kernels"]
|
||||||
s3-test = []
|
s3-test = []
|
||||||
@@ -33,9 +33,6 @@ use crate::table::{NativeTable, WriteOptions};
|
|||||||
use crate::utils::validate_table_name;
|
use crate::utils::validate_table_name;
|
||||||
use crate::Table;
|
use crate::Table;
|
||||||
|
|
||||||
#[cfg(feature = "remote")]
|
|
||||||
use log::warn;
|
|
||||||
|
|
||||||
pub const LANCE_FILE_EXTENSION: &str = "lance";
|
pub const LANCE_FILE_EXTENSION: &str = "lance";
|
||||||
|
|
||||||
pub type TableBuilderCallback = Box<dyn FnOnce(OpenTableBuilder) -> OpenTableBuilder + Send>;
|
pub type TableBuilderCallback = Box<dyn FnOnce(OpenTableBuilder) -> OpenTableBuilder + Send>;
|
||||||
@@ -582,7 +579,6 @@ impl ConnectBuilder {
|
|||||||
let api_key = self.api_key.ok_or_else(|| Error::InvalidInput {
|
let api_key = self.api_key.ok_or_else(|| Error::InvalidInput {
|
||||||
message: "An api_key is required when connecting to LanceDb Cloud".to_string(),
|
message: "An api_key is required when connecting to LanceDb Cloud".to_string(),
|
||||||
})?;
|
})?;
|
||||||
warn!("The rust implementation of the remote client is not yet ready for use.");
|
|
||||||
let internal = Arc::new(crate::remote::db::RemoteDatabase::try_new(
|
let internal = Arc::new(crate::remote::db::RemoteDatabase::try_new(
|
||||||
&self.uri,
|
&self.uri,
|
||||||
&api_key,
|
&api_key,
|
||||||
@@ -913,23 +909,12 @@ impl ConnectionInternal for Database {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Some ReadParams are exposed in the OpenTableBuilder, but we also
|
|
||||||
// let the user provide their own ReadParams.
|
|
||||||
//
|
|
||||||
// If we have a user provided ReadParams use that
|
|
||||||
// If we don't then start with the default ReadParams and customize it with
|
|
||||||
// the options from the OpenTableBuilder
|
|
||||||
let read_params = options.lance_read_params.unwrap_or_else(|| ReadParams {
|
|
||||||
index_cache_size: options.index_cache_size as usize,
|
|
||||||
..Default::default()
|
|
||||||
});
|
|
||||||
|
|
||||||
let native_table = Arc::new(
|
let native_table = Arc::new(
|
||||||
NativeTable::open_with_params(
|
NativeTable::open_with_params(
|
||||||
&table_uri,
|
&table_uri,
|
||||||
&options.name,
|
&options.name,
|
||||||
self.store_wrapper.clone(),
|
self.store_wrapper.clone(),
|
||||||
Some(read_params),
|
options.lance_read_params,
|
||||||
self.read_consistency_interval,
|
self.read_consistency_interval,
|
||||||
)
|
)
|
||||||
.await?,
|
.await?,
|
||||||
@@ -1047,6 +1032,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
|
#[ignore = "this can't pass due to https://github.com/lancedb/lancedb/issues/1019, enable it after the bug fixed"]
|
||||||
async fn test_open_table() {
|
async fn test_open_table() {
|
||||||
let tmp_dir = tempdir().unwrap();
|
let tmp_dir = tempdir().unwrap();
|
||||||
let uri = tmp_dir.path().to_str().unwrap();
|
let uri = tmp_dir.path().to_str().unwrap();
|
||||||
|
|||||||
@@ -46,18 +46,10 @@ impl VectorIndex {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Deserialize)]
|
|
||||||
pub struct VectorIndexMetadata {
|
|
||||||
pub metric_type: String,
|
|
||||||
pub index_type: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Deserialize)]
|
#[derive(Debug, Deserialize)]
|
||||||
pub struct VectorIndexStatistics {
|
pub struct VectorIndexStatistics {
|
||||||
pub num_indexed_rows: usize,
|
pub num_indexed_rows: usize,
|
||||||
pub num_unindexed_rows: usize,
|
pub num_unindexed_rows: usize,
|
||||||
pub index_type: String,
|
|
||||||
pub indices: Vec<VectorIndexMetadata>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Builder for an IVF PQ index.
|
/// Builder for an IVF PQ index.
|
||||||
|
|||||||
@@ -350,16 +350,8 @@ mod test {
|
|||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_e2e() {
|
async fn test_e2e() {
|
||||||
let dir1 = tempfile::tempdir()
|
let dir1 = tempfile::tempdir().unwrap().into_path();
|
||||||
.unwrap()
|
let dir2 = tempfile::tempdir().unwrap().into_path();
|
||||||
.into_path()
|
|
||||||
.canonicalize()
|
|
||||||
.unwrap();
|
|
||||||
let dir2 = tempfile::tempdir()
|
|
||||||
.unwrap()
|
|
||||||
.into_path()
|
|
||||||
.canonicalize()
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let secondary_store = LocalFileSystem::new_with_prefix(dir2.to_str().unwrap()).unwrap();
|
let secondary_store = LocalFileSystem::new_with_prefix(dir2.to_str().unwrap()).unwrap();
|
||||||
let object_store_wrapper = Arc::new(MirroringObjectStoreWrapper {
|
let object_store_wrapper = Arc::new(MirroringObjectStoreWrapper {
|
||||||
|
|||||||
@@ -34,16 +34,6 @@
|
|||||||
//! cargo install lancedb
|
//! cargo install lancedb
|
||||||
//! ```
|
//! ```
|
||||||
//!
|
//!
|
||||||
//! ## Crate Features
|
|
||||||
//!
|
|
||||||
//! ### Experimental Features
|
|
||||||
//!
|
|
||||||
//! These features are not enabled by default. They are experimental or in-development features that
|
|
||||||
//! are not yet ready to be released.
|
|
||||||
//!
|
|
||||||
//! - `remote` - Enable remote client to connect to LanceDB cloud. This is not yet fully implemented
|
|
||||||
//! and should not be enabled.
|
|
||||||
//!
|
|
||||||
//! ### Quick Start
|
//! ### Quick Start
|
||||||
//!
|
//!
|
||||||
//! #### Connect to a database.
|
//! #### Connect to a database.
|
||||||
|
|||||||
@@ -1061,26 +1061,6 @@ impl NativeTable {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get_index_type(&self, index_uuid: &str) -> Result<Option<String>> {
|
|
||||||
match self.load_index_stats(index_uuid).await? {
|
|
||||||
Some(stats) => Ok(Some(stats.index_type)),
|
|
||||||
None => Ok(None),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_distance_type(&self, index_uuid: &str) -> Result<Option<String>> {
|
|
||||||
match self.load_index_stats(index_uuid).await? {
|
|
||||||
Some(stats) => Ok(Some(
|
|
||||||
stats
|
|
||||||
.indices
|
|
||||||
.iter()
|
|
||||||
.map(|i| i.metric_type.clone())
|
|
||||||
.collect(),
|
|
||||||
)),
|
|
||||||
None => Ok(None),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn load_indices(&self) -> Result<Vec<VectorIndex>> {
|
pub async fn load_indices(&self) -> Result<Vec<VectorIndex>> {
|
||||||
let dataset = self.dataset.get().await?;
|
let dataset = self.dataset.get().await?;
|
||||||
let (indices, mf) = futures::try_join!(dataset.load_indices(), dataset.latest_manifest())?;
|
let (indices, mf) = futures::try_join!(dataset.load_indices(), dataset.latest_manifest())?;
|
||||||
|
|||||||
Reference in New Issue
Block a user