mirror of
https://github.com/lancedb/lancedb.git
synced 2025-12-26 06:39:57 +00:00
In addition, there are also a number of changes in nodejs to the docstrings of existing methods because this PR adds a jsdoc linter.
81 lines
2.6 KiB
Python
81 lines
2.6 KiB
Python
from typing import Dict, List, Optional, Tuple
|
|
|
|
import pyarrow as pa
|
|
|
|
class Index:
|
|
@staticmethod
|
|
def ivf_pq(
|
|
distance_type: Optional[str],
|
|
num_partitions: Optional[int],
|
|
num_sub_vectors: Optional[int],
|
|
max_iterations: Optional[int],
|
|
sample_rate: Optional[int],
|
|
) -> Index: ...
|
|
@staticmethod
|
|
def btree() -> Index: ...
|
|
|
|
class Connection(object):
|
|
async def table_names(
|
|
self, start_after: Optional[str], limit: Optional[int]
|
|
) -> list[str]: ...
|
|
async def create_table(
|
|
self, name: str, mode: str, data: pa.RecordBatchReader
|
|
) -> Table: ...
|
|
async def create_empty_table(
|
|
self, name: str, mode: str, schema: pa.Schema
|
|
) -> Table: ...
|
|
|
|
class Table:
|
|
def name(self) -> str: ...
|
|
def __repr__(self) -> str: ...
|
|
async def schema(self) -> pa.Schema: ...
|
|
async def add(self, data: pa.RecordBatchReader, mode: str) -> None: ...
|
|
async def update(self, updates: Dict[str, str], where: Optional[str]) -> None: ...
|
|
async def count_rows(self, filter: Optional[str]) -> int: ...
|
|
async def create_index(
|
|
self, column: str, config: Optional[Index], replace: Optional[bool]
|
|
): ...
|
|
async def version(self) -> int: ...
|
|
async def checkout(self, version): ...
|
|
async def checkout_latest(self): ...
|
|
async def restore(self): ...
|
|
async def list_indices(self) -> List[IndexConfig]: ...
|
|
def query(self) -> Query: ...
|
|
def vector_search(self) -> VectorQuery: ...
|
|
|
|
class IndexConfig:
|
|
index_type: str
|
|
columns: List[str]
|
|
|
|
async def connect(
|
|
uri: str,
|
|
api_key: Optional[str],
|
|
region: Optional[str],
|
|
host_override: Optional[str],
|
|
read_consistency_interval: Optional[float],
|
|
) -> Connection: ...
|
|
|
|
class RecordBatchStream:
|
|
def schema(self) -> pa.Schema: ...
|
|
async def next(self) -> Optional[pa.RecordBatch]: ...
|
|
|
|
class Query:
|
|
def where(self, filter: str): ...
|
|
def select(self, columns: Tuple[str, str]): ...
|
|
def limit(self, limit: int): ...
|
|
def nearest_to(self, query_vec: pa.Array) -> VectorQuery: ...
|
|
async def execute(self) -> RecordBatchStream: ...
|
|
|
|
class VectorQuery:
|
|
async def execute(self) -> RecordBatchStream: ...
|
|
def where(self, filter: str): ...
|
|
def select(self, columns: List[str]): ...
|
|
def select_with_projection(self, columns: Tuple[str, str]): ...
|
|
def limit(self, limit: int): ...
|
|
def column(self, column: str): ...
|
|
def distance_type(self, distance_type: str): ...
|
|
def postfilter(self): ...
|
|
def refine_factor(self, refine_factor: int): ...
|
|
def nprobes(self, nprobes: int): ...
|
|
def bypass_vector_index(self): ...
|