mirror of
https://github.com/lancedb/lancedb.git
synced 2025-12-26 22:59:57 +00:00
feat: add the optimize function to nodejs and async python (#1257)
The optimize function is pretty crucial for getting good performance when building a large scale dataset but it was only exposed in rust (many sync python users are probably doing this via to_lance today) This PR adds the optimize function to nodejs and to python. I left the function marked experimental because I think there will likely be changes to optimization (e.g. if we add features like "optimize on write"). I also only exposed the `cleanup_older_than` configuration parameter since this one is very commonly used and the rest have sensible defaults and we don't really know why we would recommend different values for these defaults anyways.
This commit is contained in:
@@ -86,3 +86,17 @@ class VectorQuery:
|
||||
def refine_factor(self, refine_factor: int): ...
|
||||
def nprobes(self, nprobes: int): ...
|
||||
def bypass_vector_index(self): ...
|
||||
|
||||
class CompactionStats:
|
||||
fragments_removed: int
|
||||
fragments_added: int
|
||||
files_removed: int
|
||||
files_added: int
|
||||
|
||||
class RemovalStats:
|
||||
bytes_removed: int
|
||||
old_versions_removed: int
|
||||
|
||||
class OptimizeStats:
|
||||
compaction: CompactionStats
|
||||
prune: RemovalStats
|
||||
|
||||
@@ -58,7 +58,7 @@ if TYPE_CHECKING:
|
||||
import PIL
|
||||
from lance.dataset import CleanupStats, ReaderLike
|
||||
|
||||
from ._lancedb import Table as LanceDBTable
|
||||
from ._lancedb import Table as LanceDBTable, OptimizeStats
|
||||
from .db import LanceDBConnection
|
||||
from .index import BTree, IndexConfig, IvfPq
|
||||
|
||||
@@ -2377,6 +2377,49 @@ class AsyncTable:
|
||||
"""
|
||||
await self._inner.restore()
|
||||
|
||||
async def optimize(
|
||||
self, *, cleanup_older_than: Optional[timedelta] = None
|
||||
) -> OptimizeStats:
|
||||
"""
|
||||
Optimize the on-disk data and indices for better performance.
|
||||
|
||||
Modeled after ``VACUUM`` in PostgreSQL.
|
||||
|
||||
Optimization covers three operations:
|
||||
|
||||
* Compaction: Merges small files into larger ones
|
||||
* Prune: Removes old versions of the dataset
|
||||
* Index: Optimizes the indices, adding new data to existing indices
|
||||
|
||||
Parameters
|
||||
----------
|
||||
cleanup_older_than: timedelta, optional default 7 days
|
||||
All files belonging to versions older than this will be removed. Set
|
||||
to 0 days to remove all versions except the latest. The latest version
|
||||
is never removed.
|
||||
|
||||
Experimental API
|
||||
----------------
|
||||
|
||||
The optimization process is undergoing active development and may change.
|
||||
Our goal with these changes is to improve the performance of optimization and
|
||||
reduce the complexity.
|
||||
|
||||
That being said, it is essential today to run optimize if you want the best
|
||||
performance. It should be stable and safe to use in production, but it our
|
||||
hope that the API may be simplified (or not even need to be called) in the
|
||||
future.
|
||||
|
||||
The frequency an application shoudl call optimize is based on the frequency of
|
||||
data modifications. If data is frequently added, deleted, or updated then
|
||||
optimize should be run frequently. A good rule of thumb is to run optimize if
|
||||
you have added or modified 100,000 or more records or run more than 20 data
|
||||
modification operations.
|
||||
"""
|
||||
if cleanup_older_than is not None:
|
||||
cleanup_older_than = round(cleanup_older_than.total_seconds() * 1000)
|
||||
return await self._inner.optimize(cleanup_older_than)
|
||||
|
||||
async def list_indices(self) -> IndexConfig:
|
||||
"""
|
||||
List all indices that have been created with Self::create_index
|
||||
|
||||
@@ -1025,3 +1025,29 @@ async def test_time_travel(db_async: AsyncConnection):
|
||||
# Can't use restore if not checked out
|
||||
with pytest.raises(ValueError, match="checkout before running restore"):
|
||||
await table.restore()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_optimize(db_async: AsyncConnection):
|
||||
table = await db_async.create_table(
|
||||
"test",
|
||||
data=[{"x": [1]}],
|
||||
)
|
||||
await table.add(
|
||||
data=[
|
||||
{"x": [2]},
|
||||
],
|
||||
)
|
||||
stats = await table.optimize()
|
||||
assert stats.compaction.files_removed == 2
|
||||
assert stats.compaction.files_added == 1
|
||||
assert stats.compaction.fragments_added == 1
|
||||
assert stats.compaction.fragments_removed == 2
|
||||
assert stats.prune.bytes_removed == 0
|
||||
assert stats.prune.old_versions_removed == 0
|
||||
|
||||
stats = await table.optimize(cleanup_older_than=timedelta(seconds=0))
|
||||
assert stats.prune.bytes_removed > 0
|
||||
assert stats.prune.old_versions_removed == 3
|
||||
|
||||
assert await table.query().to_arrow() == pa.table({"x": [[1], [2]]})
|
||||
|
||||
Reference in New Issue
Block a user