mirror of
https://github.com/lancedb/lancedb.git
synced 2025-12-23 05:19:58 +00:00
Compare commits
45 Commits
change_to_
...
python-v0.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1884fe8a3e | ||
|
|
d8111b259c | ||
|
|
3c74bf5c7a | ||
|
|
b64bb75a82 | ||
|
|
93e03ec702 | ||
|
|
7a94a7e171 | ||
|
|
acae6522fb | ||
|
|
005d5b64ac | ||
|
|
1e89d07fe2 | ||
|
|
1da55719e7 | ||
|
|
9d0ca5a823 | ||
|
|
1e0cc69401 | ||
|
|
f31e0c749d | ||
|
|
7a3ef68306 | ||
|
|
43952e01d7 | ||
|
|
495c335831 | ||
|
|
77707db543 | ||
|
|
d6d7ad3b06 | ||
|
|
e58d64c286 | ||
|
|
76cbd18c46 | ||
|
|
4abb38ac70 | ||
|
|
cc7bc5011d | ||
|
|
8193183304 | ||
|
|
cf28b58b7d | ||
|
|
e3b7ee47b9 | ||
|
|
97c9c906e4 | ||
|
|
358f86b9c6 | ||
|
|
5489e215a3 | ||
|
|
bc0814767b | ||
|
|
8960a8e535 | ||
|
|
a8568ddc72 | ||
|
|
55f88346d0 | ||
|
|
dfb9a28795 | ||
|
|
a797f5fe59 | ||
|
|
3cd84c9375 | ||
|
|
5ca83fdc99 | ||
|
|
33cc9b682f | ||
|
|
b3e5ac6d2a | ||
|
|
0fe844034d | ||
|
|
f41eb899dc | ||
|
|
e7022b990e | ||
|
|
ea86dad4b7 | ||
|
|
a45656b8b6 | ||
|
|
bc19a75f65 | ||
|
|
8e348ab4bd |
@@ -1,5 +1,5 @@
|
||||
[tool.bumpversion]
|
||||
current_version = "0.5.2"
|
||||
current_version = "0.5.2-final.1"
|
||||
parse = """(?x)
|
||||
(?P<major>0|[1-9]\\d*)\\.
|
||||
(?P<minor>0|[1-9]\\d*)\\.
|
||||
|
||||
@@ -46,6 +46,7 @@ runs:
|
||||
with:
|
||||
command: build
|
||||
working-directory: python
|
||||
docker-options: "-e PIP_EXTRA_INDEX_URL=https://pypi.fury.io/lancedb/"
|
||||
target: aarch64-unknown-linux-gnu
|
||||
manylinux: ${{ inputs.manylinux }}
|
||||
args: ${{ inputs.args }}
|
||||
|
||||
1
.github/workflows/build_mac_wheel/action.yml
vendored
1
.github/workflows/build_mac_wheel/action.yml
vendored
@@ -21,5 +21,6 @@ runs:
|
||||
with:
|
||||
command: build
|
||||
args: ${{ inputs.args }}
|
||||
docker-options: "-e PIP_EXTRA_INDEX_URL=https://pypi.fury.io/lancedb/"
|
||||
working-directory: python
|
||||
interpreter: 3.${{ inputs.python-minor-version }}
|
||||
|
||||
@@ -26,8 +26,9 @@ runs:
|
||||
with:
|
||||
command: build
|
||||
args: ${{ inputs.args }}
|
||||
docker-options: "-e PIP_EXTRA_INDEX_URL=https://pypi.fury.io/lancedb/"
|
||||
working-directory: python
|
||||
- uses: actions/upload-artifact@v3
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: windows-wheels
|
||||
path: python\target\wheels
|
||||
|
||||
4
.github/workflows/python.yml
vendored
4
.github/workflows/python.yml
vendored
@@ -65,7 +65,7 @@ jobs:
|
||||
workspaces: python
|
||||
- name: Install
|
||||
run: |
|
||||
pip install -e .[tests,dev,embeddings]
|
||||
pip install --extra-index-url https://pypi.fury.io/lancedb/ -e .[tests,dev,embeddings]
|
||||
pip install tantivy
|
||||
pip install mlx
|
||||
- name: Doctest
|
||||
@@ -189,7 +189,7 @@ jobs:
|
||||
- name: Install lancedb
|
||||
run: |
|
||||
pip install "pydantic<2"
|
||||
pip install -e .[tests]
|
||||
pip install --extra-index-url https://pypi.fury.io/lancedb/ -e .[tests]
|
||||
pip install tantivy
|
||||
- name: Run tests
|
||||
run: pytest -m "not slow and not s3_test" -x -v --durations=30 python/tests
|
||||
|
||||
2
.github/workflows/run_tests/action.yml
vendored
2
.github/workflows/run_tests/action.yml
vendored
@@ -15,7 +15,7 @@ runs:
|
||||
- name: Install lancedb
|
||||
shell: bash
|
||||
run: |
|
||||
pip3 install $(ls target/wheels/lancedb-*.whl)[tests,dev]
|
||||
pip3 install --extra-index-url https://pypi.fury.io/lancedb/ $(ls target/wheels/lancedb-*.whl)[tests,dev]
|
||||
- name: Setup localstack for integration tests
|
||||
if: ${{ inputs.integration == 'true' }}
|
||||
shell: bash
|
||||
|
||||
@@ -14,7 +14,7 @@ repos:
|
||||
hooks:
|
||||
- id: local-biome-check
|
||||
name: biome check
|
||||
entry: npx @biomejs/biome check --config-path nodejs/biome.json nodejs/
|
||||
entry: npx @biomejs/biome@1.7.3 check --config-path nodejs/biome.json nodejs/
|
||||
language: system
|
||||
types: [text]
|
||||
files: "nodejs/.*"
|
||||
|
||||
12
Cargo.toml
12
Cargo.toml
@@ -20,11 +20,11 @@ keywords = ["lancedb", "lance", "database", "vector", "search"]
|
||||
categories = ["database-implementations"]
|
||||
|
||||
[workspace.dependencies]
|
||||
lance = { "version" = "=0.12.1", "features" = ["dynamodb"] }
|
||||
lance-index = { "version" = "=0.12.1" }
|
||||
lance-linalg = { "version" = "=0.12.1" }
|
||||
lance-testing = { "version" = "=0.12.1" }
|
||||
lance-datafusion = { "version" = "=0.12.1" }
|
||||
lance = { "version" = "=0.13.0", "features" = ["dynamodb"] }
|
||||
lance-index = { "version" = "=0.13.0" }
|
||||
lance-linalg = { "version" = "=0.13.0" }
|
||||
lance-testing = { "version" = "=0.13.0" }
|
||||
lance-datafusion = { "version" = "=0.13.0" }
|
||||
# Note that this one does not include pyarrow
|
||||
arrow = { version = "51.0", optional = false }
|
||||
arrow-array = "51.0"
|
||||
@@ -35,7 +35,7 @@ arrow-schema = "51.0"
|
||||
arrow-arith = "51.0"
|
||||
arrow-cast = "51.0"
|
||||
async-trait = "0"
|
||||
chrono = "0.4.35"
|
||||
chrono = "=0.4.39"
|
||||
datafusion-physical-plan = "37.1"
|
||||
half = { "version" = "=2.4.1", default-features = false, features = [
|
||||
"num-traits",
|
||||
|
||||
@@ -54,6 +54,16 @@ This returns the result as a list of dictionaries as follows.
|
||||
!!! note
|
||||
LanceDB automatically searches on the existing FTS index if the input to the search is of type `str`. If you provide a vector as input, LanceDB will search the ANN index instead.
|
||||
|
||||
## Tokenization
|
||||
By default the text is tokenized by splitting on punctuation and whitespaces and then removing tokens that are longer than 40 chars. For more language specific tokenization then provide the argument tokenizer_name with the 2 letter language code followed by "_stem". So for english it would be "en_stem".
|
||||
|
||||
```python
|
||||
table.create_fts_index("text", tokenizer_name="en_stem")
|
||||
```
|
||||
|
||||
The following [languages](https://docs.rs/tantivy/latest/tantivy/tokenizer/enum.Language.html) are currently supported.
|
||||
|
||||
|
||||
## Index multiple columns
|
||||
|
||||
If you have multiple string columns to index, there's no need to combine them manually -- simply pass them all as a list to `create_fts_index`:
|
||||
@@ -139,6 +149,7 @@ is treated as a phrase query.
|
||||
In general, a query that's declared as a phrase query will be wrapped in double quotes during parsing, with nested
|
||||
double quotes replaced by single quotes.
|
||||
|
||||
|
||||
## Configurations
|
||||
|
||||
By default, LanceDB configures a 1GB heap size limit for creating the index. You can
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "vectordb",
|
||||
"version": "0.5.2",
|
||||
"version": "0.5.2-final.1",
|
||||
"description": " Serverless, low-latency vector database for AI applications",
|
||||
"main": "dist/index.js",
|
||||
"types": "dist/index.d.ts",
|
||||
"scripts": {
|
||||
"tsc": "tsc -b",
|
||||
"build": "npm run tsc && cargo-cp-artifact --artifact cdylib lancedb-node index.node -- cargo build --message-format=json",
|
||||
"build": "npm run tsc && cargo-cp-artifact --artifact cdylib lancedb_node index.node -- cargo build --message-format=json",
|
||||
"build-release": "npm run build -- --release",
|
||||
"test": "npm run tsc && mocha -recursive dist/test",
|
||||
"integration-test": "npm run tsc && mocha -recursive dist/integration_test",
|
||||
|
||||
@@ -712,9 +712,9 @@ export interface VectorIndex {
|
||||
export interface IndexStats {
|
||||
numIndexedRows: number | null
|
||||
numUnindexedRows: number | null
|
||||
index_type: string | null
|
||||
distance_type: string | null
|
||||
completed_at: string | null
|
||||
indexType: string | null
|
||||
distanceType: string | null
|
||||
completedAt: string | null
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -522,9 +522,9 @@ export class RemoteTable<T = number[]> implements Table<T> {
|
||||
return {
|
||||
numIndexedRows: body?.num_indexed_rows,
|
||||
numUnindexedRows: body?.num_unindexed_rows,
|
||||
index_type: body?.index_type,
|
||||
distance_type: body?.distance_type,
|
||||
completed_at: body?.completed_at
|
||||
indexType: body?.index_type,
|
||||
distanceType: body?.distance_type,
|
||||
completedAt: body?.completed_at
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -57,6 +57,18 @@ describe("given a connection", () => {
|
||||
expect(db.isOpen()).toBe(false);
|
||||
await expect(db.tableNames()).rejects.toThrow("Connection is closed");
|
||||
});
|
||||
it("should be able to create a table from an object arg `createTable(options)`, or args `createTable(name, data, options)`", async () => {
|
||||
let tbl = await db.createTable("test", [{ id: 1 }, { id: 2 }]);
|
||||
await expect(tbl.countRows()).resolves.toBe(2);
|
||||
|
||||
tbl = await db.createTable({
|
||||
name: "test",
|
||||
data: [{ id: 3 }],
|
||||
mode: "overwrite",
|
||||
});
|
||||
|
||||
await expect(tbl.countRows()).resolves.toBe(1);
|
||||
});
|
||||
|
||||
it("should fail if creating table twice, unless overwrite is true", async () => {
|
||||
let tbl = await db.createTable("test", [{ id: 1 }, { id: 2 }]);
|
||||
|
||||
@@ -230,7 +230,7 @@ describe("embedding functions", () => {
|
||||
},
|
||||
);
|
||||
|
||||
test.only.each([new Float16(), new Float32(), new Float64()])(
|
||||
test.each([new Float16(), new Float32(), new Float64()])(
|
||||
"should be able to provide auto embeddings with multiple float datatypes",
|
||||
async (floatType) => {
|
||||
@register("test1")
|
||||
|
||||
@@ -132,6 +132,140 @@ describe.each([arrow, arrowOld])("Given a table", (arrow: any) => {
|
||||
});
|
||||
});
|
||||
|
||||
describe("merge insert", () => {
|
||||
let tmpDir: tmp.DirResult;
|
||||
let table: Table;
|
||||
|
||||
beforeEach(async () => {
|
||||
tmpDir = tmp.dirSync({ unsafeCleanup: true });
|
||||
const conn = await connect(tmpDir.name);
|
||||
|
||||
table = await conn.createTable("some_table", [
|
||||
{ a: 1, b: "a" },
|
||||
{ a: 2, b: "b" },
|
||||
{ a: 3, b: "c" },
|
||||
]);
|
||||
});
|
||||
afterEach(() => tmpDir.removeCallback());
|
||||
|
||||
test("upsert", async () => {
|
||||
const newData = [
|
||||
{ a: 2, b: "x" },
|
||||
{ a: 3, b: "y" },
|
||||
{ a: 4, b: "z" },
|
||||
];
|
||||
await table
|
||||
.mergeInsert("a")
|
||||
.whenMatchedUpdateAll()
|
||||
.whenNotMatchedInsertAll()
|
||||
.execute(newData);
|
||||
const expected = [
|
||||
{ a: 1, b: "a" },
|
||||
{ a: 2, b: "x" },
|
||||
{ a: 3, b: "y" },
|
||||
{ a: 4, b: "z" },
|
||||
];
|
||||
|
||||
expect(
|
||||
JSON.parse(JSON.stringify((await table.toArrow()).toArray())),
|
||||
).toEqual(expected);
|
||||
});
|
||||
test("conditional update", async () => {
|
||||
const newData = [
|
||||
{ a: 2, b: "x" },
|
||||
{ a: 3, b: "y" },
|
||||
{ a: 4, b: "z" },
|
||||
];
|
||||
await table
|
||||
.mergeInsert("a")
|
||||
.whenMatchedUpdateAll({ where: "target.b = 'b'" })
|
||||
.execute(newData);
|
||||
|
||||
const expected = [
|
||||
{ a: 1, b: "a" },
|
||||
{ a: 2, b: "x" },
|
||||
{ a: 3, b: "c" },
|
||||
];
|
||||
// round trip to arrow and back to json to avoid comparing arrow objects to js object
|
||||
// biome-ignore lint/suspicious/noExplicitAny: test
|
||||
let res: any[] = JSON.parse(
|
||||
JSON.stringify((await table.toArrow()).toArray()),
|
||||
);
|
||||
res = res.sort((a, b) => a.a - b.a);
|
||||
|
||||
expect(res).toEqual(expected);
|
||||
});
|
||||
|
||||
test("insert if not exists", async () => {
|
||||
const newData = [
|
||||
{ a: 2, b: "x" },
|
||||
{ a: 3, b: "y" },
|
||||
{ a: 4, b: "z" },
|
||||
];
|
||||
await table.mergeInsert("a").whenNotMatchedInsertAll().execute(newData);
|
||||
const expected = [
|
||||
{ a: 1, b: "a" },
|
||||
{ a: 2, b: "b" },
|
||||
{ a: 3, b: "c" },
|
||||
{ a: 4, b: "z" },
|
||||
];
|
||||
// biome-ignore lint/suspicious/noExplicitAny: <explanation>
|
||||
let res: any[] = JSON.parse(
|
||||
JSON.stringify((await table.toArrow()).toArray()),
|
||||
);
|
||||
res = res.sort((a, b) => a.a - b.a);
|
||||
expect(res).toEqual(expected);
|
||||
});
|
||||
test("replace range", async () => {
|
||||
const newData = [
|
||||
{ a: 2, b: "x" },
|
||||
{ a: 4, b: "z" },
|
||||
];
|
||||
await table
|
||||
.mergeInsert("a")
|
||||
.whenMatchedUpdateAll()
|
||||
.whenNotMatchedInsertAll()
|
||||
.whenNotMatchedBySourceDelete({ where: "a > 2" })
|
||||
.execute(newData);
|
||||
|
||||
const expected = [
|
||||
{ a: 1, b: "a" },
|
||||
{ a: 2, b: "x" },
|
||||
{ a: 4, b: "z" },
|
||||
];
|
||||
// biome-ignore lint/suspicious/noExplicitAny: <explanation>
|
||||
let res: any[] = JSON.parse(
|
||||
JSON.stringify((await table.toArrow()).toArray()),
|
||||
);
|
||||
res = res.sort((a, b) => a.a - b.a);
|
||||
expect(res).toEqual(expected);
|
||||
});
|
||||
test("replace range no condition", async () => {
|
||||
const newData = [
|
||||
{ a: 2, b: "x" },
|
||||
{ a: 4, b: "z" },
|
||||
];
|
||||
await table
|
||||
.mergeInsert("a")
|
||||
.whenMatchedUpdateAll()
|
||||
.whenNotMatchedInsertAll()
|
||||
.whenNotMatchedBySourceDelete()
|
||||
.execute(newData);
|
||||
|
||||
const expected = [
|
||||
{ a: 2, b: "x" },
|
||||
{ a: 4, b: "z" },
|
||||
];
|
||||
|
||||
// biome-ignore lint/suspicious/noExplicitAny: test
|
||||
let res: any[] = JSON.parse(
|
||||
JSON.stringify((await table.toArrow()).toArray()),
|
||||
);
|
||||
res = res.sort((a, b) => a.a - b.a);
|
||||
expect(res).toEqual(expected);
|
||||
});
|
||||
});
|
||||
|
||||
describe("When creating an index", () => {
|
||||
let tmpDir: tmp.DirResult;
|
||||
const schema = new Schema([
|
||||
@@ -171,6 +305,7 @@ describe("When creating an index", () => {
|
||||
const indices = await tbl.listIndices();
|
||||
expect(indices.length).toBe(1);
|
||||
expect(indices[0]).toEqual({
|
||||
name: "vec_idx",
|
||||
indexType: "IvfPq",
|
||||
columns: ["vec"],
|
||||
});
|
||||
@@ -227,6 +362,24 @@ describe("When creating an index", () => {
|
||||
for await (const r of tbl.query().where("id > 1").select(["id"])) {
|
||||
expect(r.numRows).toBe(298);
|
||||
}
|
||||
// should also work with 'filter' alias
|
||||
for await (const r of tbl.query().filter("id > 1").select(["id"])) {
|
||||
expect(r.numRows).toBe(298);
|
||||
}
|
||||
});
|
||||
|
||||
test("should be able to get index stats", async () => {
|
||||
await tbl.createIndex("id");
|
||||
|
||||
const stats = await tbl.indexStats("id_idx");
|
||||
expect(stats).toBeDefined();
|
||||
expect(stats?.numIndexedRows).toEqual(300);
|
||||
expect(stats?.numUnindexedRows).toEqual(0);
|
||||
});
|
||||
|
||||
test("when getting stats on non-existent index", async () => {
|
||||
const stats = await tbl.indexStats("some non-existent index");
|
||||
expect(stats).toBeUndefined();
|
||||
});
|
||||
|
||||
// TODO: Move this test to the query API test (making sure we can reject queries
|
||||
|
||||
@@ -77,7 +77,7 @@
|
||||
"noDuplicateObjectKeys": "error",
|
||||
"noDuplicateParameters": "error",
|
||||
"noEmptyBlockStatements": "error",
|
||||
"noExplicitAny": "error",
|
||||
"noExplicitAny": "warn",
|
||||
"noExtraNonNullAssertion": "error",
|
||||
"noFallthroughSwitchClause": "error",
|
||||
"noFunctionAssign": "error",
|
||||
|
||||
@@ -12,38 +12,11 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import { Table as ArrowTable, Schema } from "./arrow";
|
||||
import {
|
||||
fromTableToBuffer,
|
||||
isArrowTable,
|
||||
makeArrowTable,
|
||||
makeEmptyTable,
|
||||
} from "./arrow";
|
||||
import { Table as ArrowTable, Data, Schema } from "./arrow";
|
||||
import { fromTableToBuffer, makeEmptyTable } from "./arrow";
|
||||
import { EmbeddingFunctionConfig, getRegistry } from "./embedding/registry";
|
||||
import { ConnectionOptions, Connection as LanceDbConnection } from "./native";
|
||||
import { Table } from "./table";
|
||||
|
||||
/**
|
||||
* Connect to a LanceDB instance at the given URI.
|
||||
*
|
||||
* Accepted formats:
|
||||
*
|
||||
* - `/path/to/database` - local database
|
||||
* - `s3://bucket/path/to/database` or `gs://bucket/path/to/database` - database on cloud storage
|
||||
* - `db://host:port` - remote database (LanceDB cloud)
|
||||
* @param {string} uri - The uri of the database. If the database uri starts
|
||||
* with `db://` then it connects to a remote database.
|
||||
* @see {@link ConnectionOptions} for more details on the URI format.
|
||||
*/
|
||||
export async function connect(
|
||||
uri: string,
|
||||
opts?: Partial<ConnectionOptions>,
|
||||
): Promise<Connection> {
|
||||
opts = opts ?? {};
|
||||
opts.storageOptions = cleanseStorageOptions(opts.storageOptions);
|
||||
const nativeConn = await LanceDbConnection.new(uri, opts);
|
||||
return new Connection(nativeConn);
|
||||
}
|
||||
import { Connection as LanceDbConnection } from "./native";
|
||||
import { LocalTable, Table } from "./table";
|
||||
|
||||
export interface CreateTableOptions {
|
||||
/**
|
||||
@@ -117,7 +90,6 @@ export interface TableNamesOptions {
|
||||
/** An optional limit to the number of results to return. */
|
||||
limit?: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* A LanceDB Connection that allows you to open tables and create new ones.
|
||||
*
|
||||
@@ -136,17 +108,15 @@ export interface TableNamesOptions {
|
||||
* Any created tables are independent and will continue to work even if
|
||||
* the underlying connection has been closed.
|
||||
*/
|
||||
export class Connection {
|
||||
readonly inner: LanceDbConnection;
|
||||
|
||||
constructor(inner: LanceDbConnection) {
|
||||
this.inner = inner;
|
||||
export abstract class Connection {
|
||||
[Symbol.for("nodejs.util.inspect.custom")](): string {
|
||||
return this.display();
|
||||
}
|
||||
|
||||
/** Return true if the connection has not been closed */
|
||||
isOpen(): boolean {
|
||||
return this.inner.isOpen();
|
||||
}
|
||||
/**
|
||||
* Return true if the connection has not been closed
|
||||
*/
|
||||
abstract isOpen(): boolean;
|
||||
|
||||
/**
|
||||
* Close the connection, releasing any underlying resources.
|
||||
@@ -155,14 +125,12 @@ export class Connection {
|
||||
*
|
||||
* Any attempt to use the connection after it is closed will result in an error.
|
||||
*/
|
||||
close(): void {
|
||||
this.inner.close();
|
||||
}
|
||||
abstract close(): void;
|
||||
|
||||
/** Return a brief description of the connection */
|
||||
display(): string {
|
||||
return this.inner.display();
|
||||
}
|
||||
/**
|
||||
* Return a brief description of the connection
|
||||
*/
|
||||
abstract display(): string;
|
||||
|
||||
/**
|
||||
* List all the table names in this database.
|
||||
@@ -170,15 +138,86 @@ export class Connection {
|
||||
* Tables will be returned in lexicographical order.
|
||||
* @param {Partial<TableNamesOptions>} options - options to control the
|
||||
* paging / start point
|
||||
*
|
||||
*/
|
||||
async tableNames(options?: Partial<TableNamesOptions>): Promise<string[]> {
|
||||
return this.inner.tableNames(options?.startAfter, options?.limit);
|
||||
}
|
||||
abstract tableNames(options?: Partial<TableNamesOptions>): Promise<string[]>;
|
||||
|
||||
/**
|
||||
* Open a table in the database.
|
||||
* @param {string} name - The name of the table
|
||||
*/
|
||||
abstract openTable(
|
||||
name: string,
|
||||
options?: Partial<OpenTableOptions>,
|
||||
): Promise<Table>;
|
||||
|
||||
/**
|
||||
* Creates a new Table and initialize it with new data.
|
||||
* @param {object} options - The options object.
|
||||
* @param {string} options.name - The name of the table.
|
||||
* @param {Data} options.data - Non-empty Array of Records to be inserted into the table
|
||||
*
|
||||
*/
|
||||
abstract createTable(
|
||||
options: {
|
||||
name: string;
|
||||
data: Data;
|
||||
} & Partial<CreateTableOptions>,
|
||||
): Promise<Table>;
|
||||
/**
|
||||
* Creates a new Table and initialize it with new data.
|
||||
* @param {string} name - The name of the table.
|
||||
* @param {Record<string, unknown>[] | ArrowTable} data - Non-empty Array of Records
|
||||
* to be inserted into the table
|
||||
*/
|
||||
abstract createTable(
|
||||
name: string,
|
||||
data: Record<string, unknown>[] | ArrowTable,
|
||||
options?: Partial<CreateTableOptions>,
|
||||
): Promise<Table>;
|
||||
|
||||
/**
|
||||
* Creates a new empty Table
|
||||
* @param {string} name - The name of the table.
|
||||
* @param {Schema} schema - The schema of the table
|
||||
*/
|
||||
abstract createEmptyTable(
|
||||
name: string,
|
||||
schema: Schema,
|
||||
options?: Partial<CreateTableOptions>,
|
||||
): Promise<Table>;
|
||||
|
||||
/**
|
||||
* Drop an existing table.
|
||||
* @param {string} name The name of the table to drop.
|
||||
*/
|
||||
abstract dropTable(name: string): Promise<void>;
|
||||
}
|
||||
|
||||
export class LocalConnection extends Connection {
|
||||
readonly inner: LanceDbConnection;
|
||||
|
||||
constructor(inner: LanceDbConnection) {
|
||||
super();
|
||||
this.inner = inner;
|
||||
}
|
||||
|
||||
isOpen(): boolean {
|
||||
return this.inner.isOpen();
|
||||
}
|
||||
|
||||
close(): void {
|
||||
this.inner.close();
|
||||
}
|
||||
|
||||
display(): string {
|
||||
return this.inner.display();
|
||||
}
|
||||
|
||||
async tableNames(options?: Partial<TableNamesOptions>): Promise<string[]> {
|
||||
return this.inner.tableNames(options?.startAfter, options?.limit);
|
||||
}
|
||||
|
||||
async openTable(
|
||||
name: string,
|
||||
options?: Partial<OpenTableOptions>,
|
||||
@@ -189,55 +228,35 @@ export class Connection {
|
||||
options?.indexCacheSize,
|
||||
);
|
||||
|
||||
return new Table(innerTable);
|
||||
return new LocalTable(innerTable);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new Table and initialize it with new data.
|
||||
* @param {string} name - The name of the table.
|
||||
* @param {Record<string, unknown>[] | ArrowTable} data - Non-empty Array of Records
|
||||
* to be inserted into the table
|
||||
*/
|
||||
async createTable(
|
||||
name: string,
|
||||
data: Record<string, unknown>[] | ArrowTable,
|
||||
nameOrOptions:
|
||||
| string
|
||||
| ({ name: string; data: Data } & Partial<CreateTableOptions>),
|
||||
data?: Record<string, unknown>[] | ArrowTable,
|
||||
options?: Partial<CreateTableOptions>,
|
||||
): Promise<Table> {
|
||||
let mode: string = options?.mode ?? "create";
|
||||
const existOk = options?.existOk ?? false;
|
||||
|
||||
if (mode === "create" && existOk) {
|
||||
mode = "exist_ok";
|
||||
if (typeof nameOrOptions !== "string" && "name" in nameOrOptions) {
|
||||
const { name, data, ...options } = nameOrOptions;
|
||||
return this.createTable(name, data, options);
|
||||
}
|
||||
|
||||
let table: ArrowTable;
|
||||
if (isArrowTable(data)) {
|
||||
table = data;
|
||||
} else {
|
||||
table = makeArrowTable(data, options);
|
||||
if (data === undefined) {
|
||||
throw new Error("data is required");
|
||||
}
|
||||
|
||||
const buf = await fromTableToBuffer(
|
||||
table,
|
||||
options?.embeddingFunction,
|
||||
options?.schema,
|
||||
);
|
||||
const { buf, mode } = await Table.parseTableData(data, options);
|
||||
const innerTable = await this.inner.createTable(
|
||||
name,
|
||||
nameOrOptions,
|
||||
buf,
|
||||
mode,
|
||||
cleanseStorageOptions(options?.storageOptions),
|
||||
options?.useLegacyFormat,
|
||||
);
|
||||
|
||||
return new Table(innerTable);
|
||||
return new LocalTable(innerTable);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new empty Table
|
||||
* @param {string} name - The name of the table.
|
||||
* @param {Schema} schema - The schema of the table
|
||||
*/
|
||||
async createEmptyTable(
|
||||
name: string,
|
||||
schema: Schema,
|
||||
@@ -265,13 +284,9 @@ export class Connection {
|
||||
cleanseStorageOptions(options?.storageOptions),
|
||||
options?.useLegacyFormat,
|
||||
);
|
||||
return new Table(innerTable);
|
||||
return new LocalTable(innerTable);
|
||||
}
|
||||
|
||||
/**
|
||||
* Drop an existing table.
|
||||
* @param {string} name The name of the table to drop.
|
||||
*/
|
||||
async dropTable(name: string): Promise<void> {
|
||||
return this.inner.dropTable(name);
|
||||
}
|
||||
@@ -280,7 +295,7 @@ export class Connection {
|
||||
/**
|
||||
* Takes storage options and makes all the keys snake case.
|
||||
*/
|
||||
function cleanseStorageOptions(
|
||||
export function cleanseStorageOptions(
|
||||
options?: Record<string, string>,
|
||||
): Record<string, string> | undefined {
|
||||
if (options === undefined) {
|
||||
|
||||
@@ -12,25 +12,43 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import {
|
||||
Connection,
|
||||
LocalConnection,
|
||||
cleanseStorageOptions,
|
||||
} from "./connection";
|
||||
|
||||
import {
|
||||
ConnectionOptions,
|
||||
Connection as LanceDbConnection,
|
||||
} from "./native.js";
|
||||
|
||||
import { RemoteConnection, RemoteConnectionOptions } from "./remote";
|
||||
|
||||
export {
|
||||
WriteOptions,
|
||||
WriteMode,
|
||||
AddColumnsSql,
|
||||
ColumnAlteration,
|
||||
ConnectionOptions,
|
||||
IndexStatistics,
|
||||
IndexMetadata,
|
||||
IndexConfig,
|
||||
} from "./native.js";
|
||||
|
||||
export {
|
||||
makeArrowTable,
|
||||
MakeArrowTableOptions,
|
||||
Data,
|
||||
VectorColumnOptions,
|
||||
} from "./arrow";
|
||||
|
||||
export {
|
||||
connect,
|
||||
Connection,
|
||||
CreateTableOptions,
|
||||
TableNamesOptions,
|
||||
} from "./connection";
|
||||
|
||||
export {
|
||||
ExecutableQuery,
|
||||
Query,
|
||||
@@ -38,6 +56,87 @@ export {
|
||||
VectorQuery,
|
||||
RecordBatchIterator,
|
||||
} from "./query";
|
||||
|
||||
export { Index, IndexOptions, IvfPqOptions } from "./indices";
|
||||
export { Table, AddDataOptions, IndexConfig, UpdateOptions } from "./table";
|
||||
|
||||
export { Table, AddDataOptions, UpdateOptions } from "./table";
|
||||
|
||||
export * as embedding from "./embedding";
|
||||
|
||||
/**
|
||||
* Connect to a LanceDB instance at the given URI.
|
||||
*
|
||||
* Accepted formats:
|
||||
*
|
||||
* - `/path/to/database` - local database
|
||||
* - `s3://bucket/path/to/database` or `gs://bucket/path/to/database` - database on cloud storage
|
||||
* - `db://host:port` - remote database (LanceDB cloud)
|
||||
* @param {string} uri - The uri of the database. If the database uri starts
|
||||
* with `db://` then it connects to a remote database.
|
||||
* @see {@link ConnectionOptions} for more details on the URI format.
|
||||
* @example
|
||||
* ```ts
|
||||
* const conn = await connect("/path/to/database");
|
||||
* ```
|
||||
* @example
|
||||
* ```ts
|
||||
* const conn = await connect(
|
||||
* "s3://bucket/path/to/database",
|
||||
* {storageOptions: {timeout: "60s"}
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
export async function connect(
|
||||
uri: string,
|
||||
opts?: Partial<ConnectionOptions | RemoteConnectionOptions>,
|
||||
): Promise<Connection>;
|
||||
/**
|
||||
* Connect to a LanceDB instance at the given URI.
|
||||
*
|
||||
* Accepted formats:
|
||||
*
|
||||
* - `/path/to/database` - local database
|
||||
* - `s3://bucket/path/to/database` or `gs://bucket/path/to/database` - database on cloud storage
|
||||
* - `db://host:port` - remote database (LanceDB cloud)
|
||||
* @param options - The options to use when connecting to the database
|
||||
* @see {@link ConnectionOptions} for more details on the URI format.
|
||||
* @example
|
||||
* ```ts
|
||||
* const conn = await connect({
|
||||
* uri: "/path/to/database",
|
||||
* storageOptions: {timeout: "60s"}
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
export async function connect(
|
||||
opts: Partial<RemoteConnectionOptions | ConnectionOptions> & { uri: string },
|
||||
): Promise<Connection>;
|
||||
export async function connect(
|
||||
uriOrOptions:
|
||||
| string
|
||||
| (Partial<RemoteConnectionOptions | ConnectionOptions> & { uri: string }),
|
||||
opts: Partial<ConnectionOptions | RemoteConnectionOptions> = {},
|
||||
): Promise<Connection> {
|
||||
let uri: string | undefined;
|
||||
if (typeof uriOrOptions !== "string") {
|
||||
const { uri: uri_, ...options } = uriOrOptions;
|
||||
uri = uri_;
|
||||
opts = options;
|
||||
} else {
|
||||
uri = uriOrOptions;
|
||||
}
|
||||
|
||||
if (!uri) {
|
||||
throw new Error("uri is required");
|
||||
}
|
||||
|
||||
if (uri?.startsWith("db://")) {
|
||||
return new RemoteConnection(uri, opts as RemoteConnectionOptions);
|
||||
}
|
||||
opts = (opts as ConnectionOptions) ?? {};
|
||||
(<ConnectionOptions>opts).storageOptions = cleanseStorageOptions(
|
||||
(<ConnectionOptions>opts).storageOptions,
|
||||
);
|
||||
const nativeConn = await LanceDbConnection.new(uri, opts);
|
||||
return new LocalConnection(nativeConn);
|
||||
}
|
||||
|
||||
70
nodejs/lancedb/merge.ts
Normal file
70
nodejs/lancedb/merge.ts
Normal file
@@ -0,0 +1,70 @@
|
||||
import { Data, fromDataToBuffer } from "./arrow";
|
||||
import { NativeMergeInsertBuilder } from "./native";
|
||||
|
||||
/** A builder used to create and run a merge insert operation */
|
||||
export class MergeInsertBuilder {
|
||||
#native: NativeMergeInsertBuilder;
|
||||
|
||||
/** Construct a MergeInsertBuilder. __Internal use only.__ */
|
||||
constructor(native: NativeMergeInsertBuilder) {
|
||||
this.#native = native;
|
||||
}
|
||||
|
||||
/**
|
||||
* Rows that exist in both the source table (new data) and
|
||||
* the target table (old data) will be updated, replacing
|
||||
* the old row with the corresponding matching row.
|
||||
*
|
||||
* If there are multiple matches then the behavior is undefined.
|
||||
* Currently this causes multiple copies of the row to be created
|
||||
* but that behavior is subject to change.
|
||||
*
|
||||
* An optional condition may be specified. If it is, then only
|
||||
* matched rows that satisfy the condtion will be updated. Any
|
||||
* rows that do not satisfy the condition will be left as they
|
||||
* are. Failing to satisfy the condition does not cause a
|
||||
* "matched row" to become a "not matched" row.
|
||||
*
|
||||
* The condition should be an SQL string. Use the prefix
|
||||
* target. to refer to rows in the target table (old data)
|
||||
* and the prefix source. to refer to rows in the source
|
||||
* table (new data).
|
||||
*
|
||||
* For example, "target.last_update < source.last_update"
|
||||
*/
|
||||
whenMatchedUpdateAll(options?: { where: string }): MergeInsertBuilder {
|
||||
return new MergeInsertBuilder(
|
||||
this.#native.whenMatchedUpdateAll(options?.where),
|
||||
);
|
||||
}
|
||||
/**
|
||||
* Rows that exist only in the source table (new data) should
|
||||
* be inserted into the target table.
|
||||
*/
|
||||
whenNotMatchedInsertAll(): MergeInsertBuilder {
|
||||
return new MergeInsertBuilder(this.#native.whenNotMatchedInsertAll());
|
||||
}
|
||||
/**
|
||||
* Rows that exist only in the target table (old data) will be
|
||||
* deleted. An optional condition can be provided to limit what
|
||||
* data is deleted.
|
||||
*
|
||||
* @param options.where - An optional condition to limit what data is deleted
|
||||
*/
|
||||
whenNotMatchedBySourceDelete(options?: {
|
||||
where: string;
|
||||
}): MergeInsertBuilder {
|
||||
return new MergeInsertBuilder(
|
||||
this.#native.whenNotMatchedBySourceDelete(options?.where),
|
||||
);
|
||||
}
|
||||
/**
|
||||
* Executes the merge insert operation
|
||||
*
|
||||
* Nothing is returned but the `Table` is updated
|
||||
*/
|
||||
async execute(data: Data): Promise<void> {
|
||||
const buffer = await fromDataToBuffer(data);
|
||||
await this.#native.execute(buffer);
|
||||
}
|
||||
}
|
||||
@@ -114,6 +114,14 @@ export class QueryBase<
|
||||
this.inner.onlyIf(predicate);
|
||||
return this as unknown as QueryType;
|
||||
}
|
||||
/**
|
||||
* A filter statement to be applied to this query.
|
||||
* @alias where
|
||||
* @deprecated Use `where` instead
|
||||
*/
|
||||
filter(predicate: string): QueryType {
|
||||
return this.where(predicate);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return only the specified columns.
|
||||
|
||||
221
nodejs/lancedb/remote/client.ts
Normal file
221
nodejs/lancedb/remote/client.ts
Normal file
@@ -0,0 +1,221 @@
|
||||
// Copyright 2023 LanceDB Developers.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import axios, {
|
||||
AxiosError,
|
||||
type AxiosResponse,
|
||||
type ResponseType,
|
||||
} from "axios";
|
||||
import { Table as ArrowTable } from "../arrow";
|
||||
import { tableFromIPC } from "../arrow";
|
||||
import { VectorQuery } from "../query";
|
||||
|
||||
export class RestfulLanceDBClient {
|
||||
#dbName: string;
|
||||
#region: string;
|
||||
#apiKey: string;
|
||||
#hostOverride?: string;
|
||||
#closed: boolean = false;
|
||||
#connectionTimeout: number = 12 * 1000; // 12 seconds;
|
||||
#readTimeout: number = 30 * 1000; // 30 seconds;
|
||||
#session?: import("axios").AxiosInstance;
|
||||
|
||||
constructor(
|
||||
dbName: string,
|
||||
apiKey: string,
|
||||
region: string,
|
||||
hostOverride?: string,
|
||||
connectionTimeout?: number,
|
||||
readTimeout?: number,
|
||||
) {
|
||||
this.#dbName = dbName;
|
||||
this.#apiKey = apiKey;
|
||||
this.#region = region;
|
||||
this.#hostOverride = hostOverride ?? this.#hostOverride;
|
||||
this.#connectionTimeout = connectionTimeout ?? this.#connectionTimeout;
|
||||
this.#readTimeout = readTimeout ?? this.#readTimeout;
|
||||
}
|
||||
|
||||
// todo: cache the session.
|
||||
get session(): import("axios").AxiosInstance {
|
||||
if (this.#session !== undefined) {
|
||||
return this.#session;
|
||||
} else {
|
||||
return axios.create({
|
||||
baseURL: this.url,
|
||||
headers: {
|
||||
// biome-ignore lint/style/useNamingConvention: external api
|
||||
Authorization: `Bearer ${this.#apiKey}`,
|
||||
},
|
||||
transformResponse: decodeErrorData,
|
||||
timeout: this.#connectionTimeout,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
get url(): string {
|
||||
return (
|
||||
this.#hostOverride ??
|
||||
`https://${this.#dbName}.${this.#region}.api.lancedb.com`
|
||||
);
|
||||
}
|
||||
|
||||
get headers(): { [key: string]: string } {
|
||||
const headers: { [key: string]: string } = {
|
||||
"x-api-key": this.#apiKey,
|
||||
"x-request-id": "na",
|
||||
};
|
||||
if (this.#region == "local") {
|
||||
headers["Host"] = `${this.#dbName}.${this.#region}.api.lancedb.com`;
|
||||
}
|
||||
if (this.#hostOverride) {
|
||||
headers["x-lancedb-database"] = this.#dbName;
|
||||
}
|
||||
return headers;
|
||||
}
|
||||
|
||||
isOpen(): boolean {
|
||||
return !this.#closed;
|
||||
}
|
||||
|
||||
private checkNotClosed(): void {
|
||||
if (this.#closed) {
|
||||
throw new Error("Connection is closed");
|
||||
}
|
||||
}
|
||||
|
||||
close(): void {
|
||||
this.#session = undefined;
|
||||
this.#closed = true;
|
||||
}
|
||||
|
||||
// biome-ignore lint/suspicious/noExplicitAny: <explanation>
|
||||
async get(uri: string, params?: Record<string, any>): Promise<any> {
|
||||
this.checkNotClosed();
|
||||
uri = new URL(uri, this.url).toString();
|
||||
let response;
|
||||
try {
|
||||
response = await this.session.get(uri, {
|
||||
headers: this.headers,
|
||||
params,
|
||||
});
|
||||
} catch (e) {
|
||||
if (e instanceof AxiosError) {
|
||||
response = e.response;
|
||||
} else {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
RestfulLanceDBClient.checkStatus(response!);
|
||||
return response!.data;
|
||||
}
|
||||
|
||||
// biome-ignore lint/suspicious/noExplicitAny: api response
|
||||
async post(uri: string, body?: any): Promise<any>;
|
||||
async post(
|
||||
uri: string,
|
||||
// biome-ignore lint/suspicious/noExplicitAny: api request
|
||||
body: any,
|
||||
additional: {
|
||||
config?: { responseType: "arraybuffer" };
|
||||
headers?: Record<string, string>;
|
||||
params?: Record<string, string>;
|
||||
},
|
||||
): Promise<Buffer>;
|
||||
async post(
|
||||
uri: string,
|
||||
// biome-ignore lint/suspicious/noExplicitAny: api request
|
||||
body?: any,
|
||||
additional?: {
|
||||
config?: { responseType: ResponseType };
|
||||
headers?: Record<string, string>;
|
||||
params?: Record<string, string>;
|
||||
},
|
||||
// biome-ignore lint/suspicious/noExplicitAny: api response
|
||||
): Promise<any> {
|
||||
this.checkNotClosed();
|
||||
uri = new URL(uri, this.url).toString();
|
||||
additional = Object.assign(
|
||||
{ config: { responseType: "json" } },
|
||||
additional,
|
||||
);
|
||||
|
||||
const headers = { ...this.headers, ...additional.headers };
|
||||
|
||||
if (!headers["Content-Type"]) {
|
||||
headers["Content-Type"] = "application/json";
|
||||
}
|
||||
let response;
|
||||
try {
|
||||
response = await this.session.post(uri, body, {
|
||||
headers,
|
||||
responseType: additional!.config!.responseType,
|
||||
params: new Map(Object.entries(additional.params ?? {})),
|
||||
});
|
||||
} catch (e) {
|
||||
if (e instanceof AxiosError) {
|
||||
response = e.response;
|
||||
} else {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
RestfulLanceDBClient.checkStatus(response!);
|
||||
if (additional!.config!.responseType === "arraybuffer") {
|
||||
return response!.data;
|
||||
} else {
|
||||
return JSON.parse(response!.data);
|
||||
}
|
||||
}
|
||||
|
||||
async listTables(limit = 10, pageToken = ""): Promise<string[]> {
|
||||
const json = await this.get("/v1/table", { limit, pageToken });
|
||||
return json.tables;
|
||||
}
|
||||
|
||||
async query(tableName: string, query: VectorQuery): Promise<ArrowTable> {
|
||||
const tbl = await this.post(`/v1/table/${tableName}/query`, query, {
|
||||
config: {
|
||||
responseType: "arraybuffer",
|
||||
},
|
||||
});
|
||||
return tableFromIPC(tbl);
|
||||
}
|
||||
|
||||
static checkStatus(response: AxiosResponse): void {
|
||||
if (response.status === 404) {
|
||||
throw new Error(`Not found: ${response.data}`);
|
||||
} else if (response.status >= 400 && response.status < 500) {
|
||||
throw new Error(
|
||||
`Bad Request: ${response.status}, error: ${response.data}`,
|
||||
);
|
||||
} else if (response.status >= 500 && response.status < 600) {
|
||||
throw new Error(
|
||||
`Internal Server Error: ${response.status}, error: ${response.data}`,
|
||||
);
|
||||
} else if (response.status !== 200) {
|
||||
throw new Error(
|
||||
`Unknown Error: ${response.status}, error: ${response.data}`,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function decodeErrorData(data: unknown) {
|
||||
if (Buffer.isBuffer(data)) {
|
||||
const decoded = data.toString("utf-8");
|
||||
return decoded;
|
||||
}
|
||||
return data;
|
||||
}
|
||||
196
nodejs/lancedb/remote/connection.ts
Normal file
196
nodejs/lancedb/remote/connection.ts
Normal file
@@ -0,0 +1,196 @@
|
||||
import { Schema } from "apache-arrow";
|
||||
import { Data, fromTableToStreamBuffer, makeEmptyTable } from "../arrow";
|
||||
import {
|
||||
Connection,
|
||||
CreateTableOptions,
|
||||
OpenTableOptions,
|
||||
TableNamesOptions,
|
||||
} from "../connection";
|
||||
import { Table } from "../table";
|
||||
import { TTLCache } from "../util";
|
||||
import { RestfulLanceDBClient } from "./client";
|
||||
import { RemoteTable } from "./table";
|
||||
|
||||
export interface RemoteConnectionOptions {
|
||||
apiKey?: string;
|
||||
region?: string;
|
||||
hostOverride?: string;
|
||||
connectionTimeout?: number;
|
||||
readTimeout?: number;
|
||||
}
|
||||
|
||||
export class RemoteConnection extends Connection {
|
||||
#dbName: string;
|
||||
#apiKey: string;
|
||||
#region: string;
|
||||
#client: RestfulLanceDBClient;
|
||||
#tableCache = new TTLCache(300_000);
|
||||
|
||||
constructor(
|
||||
url: string,
|
||||
{
|
||||
apiKey,
|
||||
region,
|
||||
hostOverride,
|
||||
connectionTimeout,
|
||||
readTimeout,
|
||||
}: RemoteConnectionOptions,
|
||||
) {
|
||||
super();
|
||||
apiKey = apiKey ?? process.env.LANCEDB_API_KEY;
|
||||
region = region ?? process.env.LANCEDB_REGION;
|
||||
|
||||
if (!apiKey) {
|
||||
throw new Error("apiKey is required when connecting to LanceDB Cloud");
|
||||
}
|
||||
|
||||
if (!region) {
|
||||
throw new Error("region is required when connecting to LanceDB Cloud");
|
||||
}
|
||||
|
||||
const parsed = new URL(url);
|
||||
if (parsed.protocol !== "db:") {
|
||||
throw new Error(
|
||||
`invalid protocol: ${parsed.protocol}, only accepts db://`,
|
||||
);
|
||||
}
|
||||
|
||||
this.#dbName = parsed.hostname;
|
||||
this.#apiKey = apiKey;
|
||||
this.#region = region;
|
||||
this.#client = new RestfulLanceDBClient(
|
||||
this.#dbName,
|
||||
this.#apiKey,
|
||||
this.#region,
|
||||
hostOverride,
|
||||
connectionTimeout,
|
||||
readTimeout,
|
||||
);
|
||||
}
|
||||
|
||||
isOpen(): boolean {
|
||||
return this.#client.isOpen();
|
||||
}
|
||||
close(): void {
|
||||
return this.#client.close();
|
||||
}
|
||||
|
||||
display(): string {
|
||||
return `RemoteConnection(${this.#dbName})`;
|
||||
}
|
||||
|
||||
async tableNames(options?: Partial<TableNamesOptions>): Promise<string[]> {
|
||||
const response = await this.#client.get("/v1/table/", {
|
||||
limit: options?.limit ?? 10,
|
||||
// biome-ignore lint/style/useNamingConvention: <explanation>
|
||||
page_token: options?.startAfter ?? "",
|
||||
});
|
||||
const body = await response.body();
|
||||
for (const table of body.tables) {
|
||||
this.#tableCache.set(table, true);
|
||||
}
|
||||
return body.tables;
|
||||
}
|
||||
|
||||
async openTable(
|
||||
name: string,
|
||||
_options?: Partial<OpenTableOptions> | undefined,
|
||||
): Promise<Table> {
|
||||
if (this.#tableCache.get(name) === undefined) {
|
||||
await this.#client.post(
|
||||
`/v1/table/${encodeURIComponent(name)}/describe/`,
|
||||
);
|
||||
this.#tableCache.set(name, true);
|
||||
}
|
||||
return new RemoteTable(this.#client, name, this.#dbName);
|
||||
}
|
||||
|
||||
async createTable(
|
||||
nameOrOptions:
|
||||
| string
|
||||
| ({ name: string; data: Data } & Partial<CreateTableOptions>),
|
||||
data?: Data,
|
||||
options?: Partial<CreateTableOptions> | undefined,
|
||||
): Promise<Table> {
|
||||
if (typeof nameOrOptions !== "string" && "name" in nameOrOptions) {
|
||||
const { name, data, ...options } = nameOrOptions;
|
||||
return this.createTable(name, data, options);
|
||||
}
|
||||
if (data === undefined) {
|
||||
throw new Error("data is required");
|
||||
}
|
||||
if (options?.mode) {
|
||||
console.warn(
|
||||
"option 'mode' is not supported in LanceDB Cloud",
|
||||
"LanceDB Cloud only supports the default 'create' mode.",
|
||||
"If the table already exists, an error will be thrown.",
|
||||
);
|
||||
}
|
||||
if (options?.embeddingFunction) {
|
||||
console.warn(
|
||||
"embedding_functions is not yet supported on LanceDB Cloud.",
|
||||
"Please vote https://github.com/lancedb/lancedb/issues/626 ",
|
||||
"for this feature.",
|
||||
);
|
||||
}
|
||||
|
||||
const { buf } = await Table.parseTableData(
|
||||
data,
|
||||
options,
|
||||
true /** streaming */,
|
||||
);
|
||||
|
||||
await this.#client.post(
|
||||
`/v1/table/${encodeURIComponent(nameOrOptions)}/create/`,
|
||||
buf,
|
||||
{
|
||||
config: {
|
||||
responseType: "arraybuffer",
|
||||
},
|
||||
headers: { "Content-Type": "application/vnd.apache.arrow.stream" },
|
||||
},
|
||||
);
|
||||
this.#tableCache.set(nameOrOptions, true);
|
||||
return new RemoteTable(this.#client, nameOrOptions, this.#dbName);
|
||||
}
|
||||
|
||||
async createEmptyTable(
|
||||
name: string,
|
||||
schema: Schema,
|
||||
options?: Partial<CreateTableOptions> | undefined,
|
||||
): Promise<Table> {
|
||||
if (options?.mode) {
|
||||
console.warn(`mode is not supported on LanceDB Cloud`);
|
||||
}
|
||||
|
||||
if (options?.embeddingFunction) {
|
||||
console.warn(
|
||||
"embeddingFunction is not yet supported on LanceDB Cloud.",
|
||||
"Please vote https://github.com/lancedb/lancedb/issues/626 ",
|
||||
"for this feature.",
|
||||
);
|
||||
}
|
||||
const emptyTable = makeEmptyTable(schema);
|
||||
const buf = await fromTableToStreamBuffer(emptyTable);
|
||||
|
||||
await this.#client.post(
|
||||
`/v1/table/${encodeURIComponent(name)}/create/`,
|
||||
buf,
|
||||
{
|
||||
config: {
|
||||
responseType: "arraybuffer",
|
||||
},
|
||||
headers: { "Content-Type": "application/vnd.apache.arrow.stream" },
|
||||
},
|
||||
);
|
||||
|
||||
this.#tableCache.set(name, true);
|
||||
return new RemoteTable(this.#client, name, this.#dbName);
|
||||
}
|
||||
|
||||
async dropTable(name: string): Promise<void> {
|
||||
await this.#client.post(`/v1/table/${encodeURIComponent(name)}/drop/`);
|
||||
|
||||
this.#tableCache.delete(name);
|
||||
}
|
||||
}
|
||||
3
nodejs/lancedb/remote/index.ts
Normal file
3
nodejs/lancedb/remote/index.ts
Normal file
@@ -0,0 +1,3 @@
|
||||
export { RestfulLanceDBClient } from "./client";
|
||||
export { type RemoteConnectionOptions, RemoteConnection } from "./connection";
|
||||
export { RemoteTable } from "./table";
|
||||
172
nodejs/lancedb/remote/table.ts
Normal file
172
nodejs/lancedb/remote/table.ts
Normal file
@@ -0,0 +1,172 @@
|
||||
// Copyright 2023 LanceDB Developers.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import { Table as ArrowTable } from "apache-arrow";
|
||||
|
||||
import { Data, IntoVector } from "../arrow";
|
||||
|
||||
import { IndexStatistics } from "..";
|
||||
import { CreateTableOptions } from "../connection";
|
||||
import { IndexOptions } from "../indices";
|
||||
import { MergeInsertBuilder } from "../merge";
|
||||
import { VectorQuery } from "../query";
|
||||
import { AddDataOptions, Table, UpdateOptions } from "../table";
|
||||
import { RestfulLanceDBClient } from "./client";
|
||||
|
||||
export class RemoteTable extends Table {
|
||||
#client: RestfulLanceDBClient;
|
||||
#name: string;
|
||||
|
||||
// Used in the display() method
|
||||
#dbName: string;
|
||||
|
||||
get #tablePrefix() {
|
||||
return `/v1/table/${encodeURIComponent(this.#name)}/`;
|
||||
}
|
||||
|
||||
get name(): string {
|
||||
return this.#name;
|
||||
}
|
||||
|
||||
public constructor(
|
||||
client: RestfulLanceDBClient,
|
||||
tableName: string,
|
||||
dbName: string,
|
||||
) {
|
||||
super();
|
||||
this.#client = client;
|
||||
this.#name = tableName;
|
||||
this.#dbName = dbName;
|
||||
}
|
||||
|
||||
isOpen(): boolean {
|
||||
return !this.#client.isOpen();
|
||||
}
|
||||
|
||||
close(): void {
|
||||
this.#client.close();
|
||||
}
|
||||
|
||||
display(): string {
|
||||
return `RemoteTable(${this.#dbName}; ${this.#name})`;
|
||||
}
|
||||
|
||||
async schema(): Promise<import("apache-arrow").Schema> {
|
||||
const resp = await this.#client.post(`${this.#tablePrefix}/describe/`);
|
||||
// TODO: parse this into a valid arrow schema
|
||||
return resp.schema;
|
||||
}
|
||||
async add(data: Data, options?: Partial<AddDataOptions>): Promise<void> {
|
||||
const { buf, mode } = await Table.parseTableData(
|
||||
data,
|
||||
options as CreateTableOptions,
|
||||
true,
|
||||
);
|
||||
await this.#client.post(`${this.#tablePrefix}/insert/`, buf, {
|
||||
params: {
|
||||
mode,
|
||||
},
|
||||
headers: {
|
||||
"Content-Type": "application/vnd.apache.arrow.stream",
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
async update(
|
||||
updates: Map<string, string> | Record<string, string>,
|
||||
options?: Partial<UpdateOptions>,
|
||||
): Promise<void> {
|
||||
await this.#client.post(`${this.#tablePrefix}/update/`, {
|
||||
predicate: options?.where ?? null,
|
||||
updates: Object.entries(updates).map(([key, value]) => [key, value]),
|
||||
});
|
||||
}
|
||||
async countRows(filter?: unknown): Promise<number> {
|
||||
const payload = { predicate: filter };
|
||||
return await this.#client.post(`${this.#tablePrefix}/count_rows/`, payload);
|
||||
}
|
||||
|
||||
async delete(predicate: unknown): Promise<void> {
|
||||
const payload = { predicate };
|
||||
await this.#client.post(`${this.#tablePrefix}/delete/`, payload);
|
||||
}
|
||||
async createIndex(
|
||||
column: string,
|
||||
options?: Partial<IndexOptions>,
|
||||
): Promise<void> {
|
||||
if (options !== undefined) {
|
||||
console.warn("options are not yet supported on the LanceDB cloud");
|
||||
}
|
||||
const indexType = "vector";
|
||||
const metric = "L2";
|
||||
const data = {
|
||||
column,
|
||||
// biome-ignore lint/style/useNamingConvention: external API
|
||||
index_type: indexType,
|
||||
// biome-ignore lint/style/useNamingConvention: external API
|
||||
metric_type: metric,
|
||||
};
|
||||
await this.#client.post(`${this.#tablePrefix}/create_index`, data);
|
||||
}
|
||||
query(): import("..").Query {
|
||||
throw new Error("query() is not yet supported on the LanceDB cloud");
|
||||
}
|
||||
search(query: IntoVector): VectorQuery;
|
||||
search(query: string): Promise<VectorQuery>;
|
||||
search(_query: string | IntoVector): VectorQuery | Promise<VectorQuery> {
|
||||
throw new Error("search() is not yet supported on the LanceDB cloud");
|
||||
}
|
||||
vectorSearch(_vector: unknown): import("..").VectorQuery {
|
||||
throw new Error("vectorSearch() is not yet supported on the LanceDB cloud");
|
||||
}
|
||||
addColumns(_newColumnTransforms: unknown): Promise<void> {
|
||||
throw new Error("addColumns() is not yet supported on the LanceDB cloud");
|
||||
}
|
||||
alterColumns(_columnAlterations: unknown): Promise<void> {
|
||||
throw new Error("alterColumns() is not yet supported on the LanceDB cloud");
|
||||
}
|
||||
dropColumns(_columnNames: unknown): Promise<void> {
|
||||
throw new Error("dropColumns() is not yet supported on the LanceDB cloud");
|
||||
}
|
||||
async version(): Promise<number> {
|
||||
const resp = await this.#client.post(`${this.#tablePrefix}/describe/`);
|
||||
return resp.version;
|
||||
}
|
||||
checkout(_version: unknown): Promise<void> {
|
||||
throw new Error("checkout() is not yet supported on the LanceDB cloud");
|
||||
}
|
||||
checkoutLatest(): Promise<void> {
|
||||
throw new Error(
|
||||
"checkoutLatest() is not yet supported on the LanceDB cloud",
|
||||
);
|
||||
}
|
||||
restore(): Promise<void> {
|
||||
throw new Error("restore() is not yet supported on the LanceDB cloud");
|
||||
}
|
||||
optimize(_options?: unknown): Promise<import("../native").OptimizeStats> {
|
||||
throw new Error("optimize() is not yet supported on the LanceDB cloud");
|
||||
}
|
||||
async listIndices(): Promise<import("../native").IndexConfig[]> {
|
||||
return await this.#client.post(`${this.#tablePrefix}/index/list/`);
|
||||
}
|
||||
toArrow(): Promise<ArrowTable> {
|
||||
throw new Error("toArrow() is not yet supported on the LanceDB cloud");
|
||||
}
|
||||
mergeInsert(_on: string | string[]): MergeInsertBuilder {
|
||||
throw new Error("mergeInsert() is not yet supported on the LanceDB cloud");
|
||||
}
|
||||
async indexStats(_name: string): Promise<IndexStatistics | undefined> {
|
||||
throw new Error("indexStats() is not yet supported on the LanceDB cloud");
|
||||
}
|
||||
}
|
||||
@@ -18,20 +18,26 @@ import {
|
||||
IntoVector,
|
||||
Schema,
|
||||
fromDataToBuffer,
|
||||
fromTableToBuffer,
|
||||
fromTableToStreamBuffer,
|
||||
isArrowTable,
|
||||
makeArrowTable,
|
||||
tableFromIPC,
|
||||
} from "./arrow";
|
||||
import { CreateTableOptions } from "./connection";
|
||||
|
||||
import { EmbeddingFunctionConfig, getRegistry } from "./embedding/registry";
|
||||
import { IndexOptions } from "./indices";
|
||||
import { MergeInsertBuilder } from "./merge";
|
||||
import {
|
||||
AddColumnsSql,
|
||||
ColumnAlteration,
|
||||
IndexConfig,
|
||||
IndexStatistics,
|
||||
OptimizeStats,
|
||||
Table as _NativeTable,
|
||||
} from "./native";
|
||||
import { Query, VectorQuery } from "./query";
|
||||
export { IndexConfig } from "./native";
|
||||
|
||||
/**
|
||||
* Options for adding data to a table.
|
||||
@@ -88,19 +94,15 @@ export interface OptimizeOptions {
|
||||
* Closing a table is optional. It not closed, it will be closed when it is garbage
|
||||
* collected.
|
||||
*/
|
||||
export class Table {
|
||||
private readonly inner: _NativeTable;
|
||||
|
||||
/** Construct a Table. Internal use only. */
|
||||
constructor(inner: _NativeTable) {
|
||||
this.inner = inner;
|
||||
export abstract class Table {
|
||||
[Symbol.for("nodejs.util.inspect.custom")](): string {
|
||||
return this.display();
|
||||
}
|
||||
/** Returns the name of the table */
|
||||
abstract get name(): string;
|
||||
|
||||
/** Return true if the table has not been closed */
|
||||
isOpen(): boolean {
|
||||
return this.inner.isOpen();
|
||||
}
|
||||
|
||||
abstract isOpen(): boolean;
|
||||
/**
|
||||
* Close the table, releasing any underlying resources.
|
||||
*
|
||||
@@ -108,48 +110,16 @@ export class Table {
|
||||
*
|
||||
* Any attempt to use the table after it is closed will result in an error.
|
||||
*/
|
||||
close(): void {
|
||||
this.inner.close();
|
||||
}
|
||||
|
||||
abstract close(): void;
|
||||
/** Return a brief description of the table */
|
||||
display(): string {
|
||||
return this.inner.display();
|
||||
}
|
||||
|
||||
async #getEmbeddingFunctions(): Promise<
|
||||
Map<string, EmbeddingFunctionConfig>
|
||||
> {
|
||||
const schema = await this.schema();
|
||||
const registry = getRegistry();
|
||||
return registry.parseFunctions(schema.metadata);
|
||||
}
|
||||
|
||||
abstract display(): string;
|
||||
/** Get the schema of the table. */
|
||||
async schema(): Promise<Schema> {
|
||||
const schemaBuf = await this.inner.schema();
|
||||
const tbl = tableFromIPC(schemaBuf);
|
||||
return tbl.schema;
|
||||
}
|
||||
|
||||
abstract schema(): Promise<Schema>;
|
||||
/**
|
||||
* Insert records into this Table.
|
||||
* @param {Data} data Records to be inserted into the Table
|
||||
*/
|
||||
async add(data: Data, options?: Partial<AddDataOptions>): Promise<void> {
|
||||
const mode = options?.mode ?? "append";
|
||||
const schema = await this.schema();
|
||||
const registry = getRegistry();
|
||||
const functions = registry.parseFunctions(schema.metadata);
|
||||
|
||||
const buffer = await fromDataToBuffer(
|
||||
data,
|
||||
functions.values().next().value,
|
||||
schema,
|
||||
);
|
||||
await this.inner.add(buffer, mode);
|
||||
}
|
||||
|
||||
abstract add(data: Data, options?: Partial<AddDataOptions>): Promise<void>;
|
||||
/**
|
||||
* Update existing records in the Table
|
||||
*
|
||||
@@ -175,30 +145,14 @@ export class Table {
|
||||
* @param {Partial<UpdateOptions>} options - additional options to control
|
||||
* the update behavior
|
||||
*/
|
||||
async update(
|
||||
abstract update(
|
||||
updates: Map<string, string> | Record<string, string>,
|
||||
options?: Partial<UpdateOptions>,
|
||||
) {
|
||||
const onlyIf = options?.where;
|
||||
let columns: [string, string][];
|
||||
if (updates instanceof Map) {
|
||||
columns = Array.from(updates.entries());
|
||||
} else {
|
||||
columns = Object.entries(updates);
|
||||
}
|
||||
await this.inner.update(onlyIf, columns);
|
||||
}
|
||||
|
||||
): Promise<void>;
|
||||
/** Count the total number of rows in the dataset. */
|
||||
async countRows(filter?: string): Promise<number> {
|
||||
return await this.inner.countRows(filter);
|
||||
}
|
||||
|
||||
abstract countRows(filter?: string): Promise<number>;
|
||||
/** Delete the rows that satisfy the predicate. */
|
||||
async delete(predicate: string): Promise<void> {
|
||||
await this.inner.delete(predicate);
|
||||
}
|
||||
|
||||
abstract delete(predicate: string): Promise<void>;
|
||||
/**
|
||||
* Create an index to speed up queries.
|
||||
*
|
||||
@@ -206,6 +160,9 @@ export class Table {
|
||||
* Indices on vector columns will speed up vector searches.
|
||||
* Indices on scalar columns will speed up filtering (in both
|
||||
* vector and non-vector searches)
|
||||
*
|
||||
* @note We currently don't support custom named indexes,
|
||||
* The index name will always be `${column}_idx`
|
||||
* @example
|
||||
* // If the column has a vector (fixed size list) data type then
|
||||
* // an IvfPq vector index will be created.
|
||||
@@ -225,13 +182,10 @@ export class Table {
|
||||
* // Or create a Scalar index
|
||||
* await table.createIndex("my_float_col");
|
||||
*/
|
||||
async createIndex(column: string, options?: Partial<IndexOptions>) {
|
||||
// Bit of a hack to get around the fact that TS has no package-scope.
|
||||
// biome-ignore lint/suspicious/noExplicitAny: skip
|
||||
const nativeIndex = (options?.config as any)?.inner;
|
||||
await this.inner.createIndex(nativeIndex, column, options?.replace);
|
||||
}
|
||||
|
||||
abstract createIndex(
|
||||
column: string,
|
||||
options?: Partial<IndexOptions>,
|
||||
): Promise<void>;
|
||||
/**
|
||||
* Create a {@link Query} Builder.
|
||||
*
|
||||
@@ -282,44 +236,20 @@ export class Table {
|
||||
* }
|
||||
* @returns {Query} A builder that can be used to parameterize the query
|
||||
*/
|
||||
query(): Query {
|
||||
return new Query(this.inner);
|
||||
}
|
||||
|
||||
abstract query(): Query;
|
||||
/**
|
||||
* Create a search query to find the nearest neighbors
|
||||
* of the given query vector
|
||||
* @param {string} query - the query. This will be converted to a vector using the table's provided embedding function
|
||||
* @rejects {Error} If no embedding functions are defined in the table
|
||||
*/
|
||||
search(query: string): Promise<VectorQuery>;
|
||||
abstract search(query: string): Promise<VectorQuery>;
|
||||
/**
|
||||
* Create a search query to find the nearest neighbors
|
||||
* of the given query vector
|
||||
* @param {IntoVector} query - the query vector
|
||||
*/
|
||||
search(query: IntoVector): VectorQuery;
|
||||
search(query: string | IntoVector): Promise<VectorQuery> | VectorQuery {
|
||||
if (typeof query !== "string") {
|
||||
return this.vectorSearch(query);
|
||||
} else {
|
||||
return this.#getEmbeddingFunctions().then(async (functions) => {
|
||||
// TODO: Support multiple embedding functions
|
||||
const embeddingFunc: EmbeddingFunctionConfig | undefined = functions
|
||||
.values()
|
||||
.next().value;
|
||||
if (!embeddingFunc) {
|
||||
return Promise.reject(
|
||||
new Error("No embedding functions are defined in the table"),
|
||||
);
|
||||
}
|
||||
const embeddings =
|
||||
await embeddingFunc.function.computeQueryEmbeddings(query);
|
||||
return this.query().nearestTo(embeddings);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
abstract search(query: IntoVector): VectorQuery;
|
||||
/**
|
||||
* Search the table with a given query vector.
|
||||
*
|
||||
@@ -327,11 +257,7 @@ export class Table {
|
||||
* is the same thing as calling `nearestTo` on the builder returned
|
||||
* by `query`. @see {@link Query#nearestTo} for more details.
|
||||
*/
|
||||
vectorSearch(vector: IntoVector): VectorQuery {
|
||||
return this.query().nearestTo(vector);
|
||||
}
|
||||
|
||||
// TODO: Support BatchUDF
|
||||
abstract vectorSearch(vector: IntoVector): VectorQuery;
|
||||
/**
|
||||
* Add new columns with defined values.
|
||||
* @param {AddColumnsSql[]} newColumnTransforms pairs of column names and
|
||||
@@ -339,19 +265,14 @@ export class Table {
|
||||
* expressions will be evaluated for each row in the table, and can
|
||||
* reference existing columns in the table.
|
||||
*/
|
||||
async addColumns(newColumnTransforms: AddColumnsSql[]): Promise<void> {
|
||||
await this.inner.addColumns(newColumnTransforms);
|
||||
}
|
||||
abstract addColumns(newColumnTransforms: AddColumnsSql[]): Promise<void>;
|
||||
|
||||
/**
|
||||
* Alter the name or nullability of columns.
|
||||
* @param {ColumnAlteration[]} columnAlterations One or more alterations to
|
||||
* apply to columns.
|
||||
*/
|
||||
async alterColumns(columnAlterations: ColumnAlteration[]): Promise<void> {
|
||||
await this.inner.alterColumns(columnAlterations);
|
||||
}
|
||||
|
||||
abstract alterColumns(columnAlterations: ColumnAlteration[]): Promise<void>;
|
||||
/**
|
||||
* Drop one or more columns from the dataset
|
||||
*
|
||||
@@ -363,15 +284,10 @@ export class Table {
|
||||
* be nested column references (e.g. "a.b.c") or top-level column names
|
||||
* (e.g. "a").
|
||||
*/
|
||||
async dropColumns(columnNames: string[]): Promise<void> {
|
||||
await this.inner.dropColumns(columnNames);
|
||||
}
|
||||
|
||||
abstract dropColumns(columnNames: string[]): Promise<void>;
|
||||
/** Retrieve the version of the table */
|
||||
async version(): Promise<number> {
|
||||
return await this.inner.version();
|
||||
}
|
||||
|
||||
abstract version(): Promise<number>;
|
||||
/**
|
||||
* Checks out a specific version of the table _This is an in-place operation._
|
||||
*
|
||||
@@ -397,19 +313,14 @@ export class Table {
|
||||
* console.log(await table.version()); // 2
|
||||
* ```
|
||||
*/
|
||||
async checkout(version: number): Promise<void> {
|
||||
await this.inner.checkout(version);
|
||||
}
|
||||
|
||||
abstract checkout(version: number): Promise<void>;
|
||||
/**
|
||||
* Checkout the latest version of the table. _This is an in-place operation._
|
||||
*
|
||||
* The table will be set back into standard mode, and will track the latest
|
||||
* version of the table.
|
||||
*/
|
||||
async checkoutLatest(): Promise<void> {
|
||||
await this.inner.checkoutLatest();
|
||||
}
|
||||
abstract checkoutLatest(): Promise<void>;
|
||||
|
||||
/**
|
||||
* Restore the table to the currently checked out version
|
||||
@@ -423,10 +334,7 @@ export class Table {
|
||||
* Once the operation concludes the table will no longer be in a checked
|
||||
* out state and the read_consistency_interval, if any, will apply.
|
||||
*/
|
||||
async restore(): Promise<void> {
|
||||
await this.inner.restore();
|
||||
}
|
||||
|
||||
abstract restore(): Promise<void>;
|
||||
/**
|
||||
* Optimize the on-disk data and indices for better performance.
|
||||
*
|
||||
@@ -457,6 +365,200 @@ export class Table {
|
||||
* you have added or modified 100,000 or more records or run more than 20 data
|
||||
* modification operations.
|
||||
*/
|
||||
abstract optimize(options?: Partial<OptimizeOptions>): Promise<OptimizeStats>;
|
||||
/** List all indices that have been created with {@link Table.createIndex} */
|
||||
abstract listIndices(): Promise<IndexConfig[]>;
|
||||
/** Return the table as an arrow table */
|
||||
abstract toArrow(): Promise<ArrowTable>;
|
||||
|
||||
abstract mergeInsert(on: string | string[]): MergeInsertBuilder;
|
||||
|
||||
/** List all the stats of a specified index
|
||||
*
|
||||
* @param {string} name The name of the index.
|
||||
* @returns {IndexStatistics | undefined} The stats of the index. If the index does not exist, it will return undefined
|
||||
*/
|
||||
abstract indexStats(name: string): Promise<IndexStatistics | undefined>;
|
||||
|
||||
static async parseTableData(
|
||||
// biome-ignore lint/suspicious/noExplicitAny: <explanation>
|
||||
data: Record<string, unknown>[] | ArrowTable<any>,
|
||||
options?: Partial<CreateTableOptions>,
|
||||
streaming = false,
|
||||
) {
|
||||
let mode: string = options?.mode ?? "create";
|
||||
const existOk = options?.existOk ?? false;
|
||||
|
||||
if (mode === "create" && existOk) {
|
||||
mode = "exist_ok";
|
||||
}
|
||||
|
||||
let table: ArrowTable;
|
||||
if (isArrowTable(data)) {
|
||||
table = data;
|
||||
} else {
|
||||
table = makeArrowTable(data, options);
|
||||
}
|
||||
if (streaming) {
|
||||
const buf = await fromTableToStreamBuffer(
|
||||
table,
|
||||
options?.embeddingFunction,
|
||||
options?.schema,
|
||||
);
|
||||
return { buf, mode };
|
||||
} else {
|
||||
const buf = await fromTableToBuffer(
|
||||
table,
|
||||
options?.embeddingFunction,
|
||||
options?.schema,
|
||||
);
|
||||
return { buf, mode };
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export class LocalTable extends Table {
|
||||
private readonly inner: _NativeTable;
|
||||
|
||||
constructor(inner: _NativeTable) {
|
||||
super();
|
||||
this.inner = inner;
|
||||
}
|
||||
get name(): string {
|
||||
return this.inner.name;
|
||||
}
|
||||
isOpen(): boolean {
|
||||
return this.inner.isOpen();
|
||||
}
|
||||
|
||||
close(): void {
|
||||
this.inner.close();
|
||||
}
|
||||
|
||||
display(): string {
|
||||
return this.inner.display();
|
||||
}
|
||||
|
||||
private async getEmbeddingFunctions(): Promise<
|
||||
Map<string, EmbeddingFunctionConfig>
|
||||
> {
|
||||
const schema = await this.schema();
|
||||
const registry = getRegistry();
|
||||
return registry.parseFunctions(schema.metadata);
|
||||
}
|
||||
|
||||
/** Get the schema of the table. */
|
||||
async schema(): Promise<Schema> {
|
||||
const schemaBuf = await this.inner.schema();
|
||||
const tbl = tableFromIPC(schemaBuf);
|
||||
return tbl.schema;
|
||||
}
|
||||
|
||||
async add(data: Data, options?: Partial<AddDataOptions>): Promise<void> {
|
||||
const mode = options?.mode ?? "append";
|
||||
const schema = await this.schema();
|
||||
const registry = getRegistry();
|
||||
const functions = registry.parseFunctions(schema.metadata);
|
||||
|
||||
const buffer = await fromDataToBuffer(
|
||||
data,
|
||||
functions.values().next().value,
|
||||
schema,
|
||||
);
|
||||
await this.inner.add(buffer, mode);
|
||||
}
|
||||
|
||||
async update(
|
||||
updates: Map<string, string> | Record<string, string>,
|
||||
options?: Partial<UpdateOptions>,
|
||||
) {
|
||||
const onlyIf = options?.where;
|
||||
let columns: [string, string][];
|
||||
if (updates instanceof Map) {
|
||||
columns = Array.from(updates.entries());
|
||||
} else {
|
||||
columns = Object.entries(updates);
|
||||
}
|
||||
await this.inner.update(onlyIf, columns);
|
||||
}
|
||||
|
||||
async countRows(filter?: string): Promise<number> {
|
||||
return await this.inner.countRows(filter);
|
||||
}
|
||||
|
||||
async delete(predicate: string): Promise<void> {
|
||||
await this.inner.delete(predicate);
|
||||
}
|
||||
|
||||
async createIndex(column: string, options?: Partial<IndexOptions>) {
|
||||
// Bit of a hack to get around the fact that TS has no package-scope.
|
||||
// biome-ignore lint/suspicious/noExplicitAny: skip
|
||||
const nativeIndex = (options?.config as any)?.inner;
|
||||
await this.inner.createIndex(nativeIndex, column, options?.replace);
|
||||
}
|
||||
|
||||
query(): Query {
|
||||
return new Query(this.inner);
|
||||
}
|
||||
|
||||
search(query: string): Promise<VectorQuery>;
|
||||
|
||||
search(query: IntoVector): VectorQuery;
|
||||
search(query: string | IntoVector): Promise<VectorQuery> | VectorQuery {
|
||||
if (typeof query !== "string") {
|
||||
return this.vectorSearch(query);
|
||||
} else {
|
||||
return this.getEmbeddingFunctions().then(async (functions) => {
|
||||
// TODO: Support multiple embedding functions
|
||||
const embeddingFunc: EmbeddingFunctionConfig | undefined = functions
|
||||
.values()
|
||||
.next().value;
|
||||
if (!embeddingFunc) {
|
||||
return Promise.reject(
|
||||
new Error("No embedding functions are defined in the table"),
|
||||
);
|
||||
}
|
||||
const embeddings =
|
||||
await embeddingFunc.function.computeQueryEmbeddings(query);
|
||||
return this.query().nearestTo(embeddings);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
vectorSearch(vector: IntoVector): VectorQuery {
|
||||
return this.query().nearestTo(vector);
|
||||
}
|
||||
|
||||
// TODO: Support BatchUDF
|
||||
|
||||
async addColumns(newColumnTransforms: AddColumnsSql[]): Promise<void> {
|
||||
await this.inner.addColumns(newColumnTransforms);
|
||||
}
|
||||
|
||||
async alterColumns(columnAlterations: ColumnAlteration[]): Promise<void> {
|
||||
await this.inner.alterColumns(columnAlterations);
|
||||
}
|
||||
|
||||
async dropColumns(columnNames: string[]): Promise<void> {
|
||||
await this.inner.dropColumns(columnNames);
|
||||
}
|
||||
|
||||
async version(): Promise<number> {
|
||||
return await this.inner.version();
|
||||
}
|
||||
|
||||
async checkout(version: number): Promise<void> {
|
||||
await this.inner.checkout(version);
|
||||
}
|
||||
|
||||
async checkoutLatest(): Promise<void> {
|
||||
await this.inner.checkoutLatest();
|
||||
}
|
||||
|
||||
async restore(): Promise<void> {
|
||||
await this.inner.restore();
|
||||
}
|
||||
|
||||
async optimize(options?: Partial<OptimizeOptions>): Promise<OptimizeStats> {
|
||||
let cleanupOlderThanMs;
|
||||
if (
|
||||
@@ -469,13 +571,23 @@ export class Table {
|
||||
return await this.inner.optimize(cleanupOlderThanMs);
|
||||
}
|
||||
|
||||
/** List all indices that have been created with {@link Table.createIndex} */
|
||||
async listIndices(): Promise<IndexConfig[]> {
|
||||
return await this.inner.listIndices();
|
||||
}
|
||||
|
||||
/** Return the table as an arrow table */
|
||||
async toArrow(): Promise<ArrowTable> {
|
||||
return await this.query().toArrow();
|
||||
}
|
||||
|
||||
async indexStats(name: string): Promise<IndexStatistics | undefined> {
|
||||
const stats = await this.inner.indexStats(name);
|
||||
if (stats === null) {
|
||||
return undefined;
|
||||
}
|
||||
return stats;
|
||||
}
|
||||
mergeInsert(on: string | string[]): MergeInsertBuilder {
|
||||
on = Array.isArray(on) ? on : [on];
|
||||
return new MergeInsertBuilder(this.inner.mergeInsert(on));
|
||||
}
|
||||
}
|
||||
|
||||
35
nodejs/lancedb/util.ts
Normal file
35
nodejs/lancedb/util.ts
Normal file
@@ -0,0 +1,35 @@
|
||||
export class TTLCache {
|
||||
// biome-ignore lint/suspicious/noExplicitAny: <explanation>
|
||||
private readonly cache: Map<string, { value: any; expires: number }>;
|
||||
|
||||
/**
|
||||
* @param ttl Time to live in milliseconds
|
||||
*/
|
||||
constructor(private readonly ttl: number) {
|
||||
this.cache = new Map();
|
||||
}
|
||||
|
||||
// biome-ignore lint/suspicious/noExplicitAny: <explanation>
|
||||
get(key: string): any | undefined {
|
||||
const entry = this.cache.get(key);
|
||||
if (entry === undefined) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
if (entry.expires < Date.now()) {
|
||||
this.cache.delete(key);
|
||||
return undefined;
|
||||
}
|
||||
|
||||
return entry.value;
|
||||
}
|
||||
|
||||
// biome-ignore lint/suspicious/noExplicitAny: <explanation>
|
||||
set(key: string, value: any): void {
|
||||
this.cache.set(key, { value, expires: Date.now() + this.ttl });
|
||||
}
|
||||
|
||||
delete(key: string): void {
|
||||
this.cache.delete(key);
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-darwin-arm64",
|
||||
"version": "0.5.2",
|
||||
"version": "0.5.2-final.1",
|
||||
"os": ["darwin"],
|
||||
"cpu": ["arm64"],
|
||||
"main": "lancedb.darwin-arm64.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-darwin-x64",
|
||||
"version": "0.5.2",
|
||||
"version": "0.5.2-final.1",
|
||||
"os": ["darwin"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.darwin-x64.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-arm64-gnu",
|
||||
"version": "0.5.2",
|
||||
"version": "0.5.2-final.1",
|
||||
"os": ["linux"],
|
||||
"cpu": ["arm64"],
|
||||
"main": "lancedb.linux-arm64-gnu.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-x64-gnu",
|
||||
"version": "0.5.2",
|
||||
"version": "0.5.2-final.1",
|
||||
"os": ["linux"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.linux-x64-gnu.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-win32-x64-msvc",
|
||||
"version": "0.5.2",
|
||||
"version": "0.5.2-final.1",
|
||||
"os": ["win32"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.win32-x64-msvc.node",
|
||||
|
||||
50
nodejs/package-lock.json
generated
50
nodejs/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb",
|
||||
"version": "0.5.0",
|
||||
"version": "0.5.2",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@lancedb/lancedb",
|
||||
"version": "0.5.0",
|
||||
"version": "0.5.2",
|
||||
"cpu": [
|
||||
"x64",
|
||||
"arm64"
|
||||
@@ -19,6 +19,7 @@
|
||||
],
|
||||
"dependencies": {
|
||||
"apache-arrow": "^15.0.0",
|
||||
"axios": "^1.7.2",
|
||||
"openai": "^4.29.2",
|
||||
"reflect-metadata": "^0.2.2"
|
||||
},
|
||||
@@ -28,6 +29,7 @@
|
||||
"@biomejs/biome": "^1.7.3",
|
||||
"@jest/globals": "^29.7.0",
|
||||
"@napi-rs/cli": "^2.18.0",
|
||||
"@types/axios": "^0.14.0",
|
||||
"@types/jest": "^29.1.2",
|
||||
"@types/tmp": "^0.2.6",
|
||||
"apache-arrow-old": "npm:apache-arrow@13.0.0",
|
||||
@@ -3123,6 +3125,16 @@
|
||||
"tslib": "^2.4.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@types/axios": {
|
||||
"version": "0.14.0",
|
||||
"resolved": "https://registry.npmjs.org/@types/axios/-/axios-0.14.0.tgz",
|
||||
"integrity": "sha512-KqQnQbdYE54D7oa/UmYVMZKq7CO4l8DEENzOKc4aBRwxCXSlJXGz83flFx5L7AWrOQnmuN3kVsRdt+GZPPjiVQ==",
|
||||
"deprecated": "This is a stub types definition for axios (https://github.com/mzabriskie/axios). axios provides its own type definitions, so you don't need @types/axios installed!",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"axios": "*"
|
||||
}
|
||||
},
|
||||
"node_modules/@types/babel__core": {
|
||||
"version": "7.20.5",
|
||||
"resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.5.tgz",
|
||||
@@ -3497,6 +3509,16 @@
|
||||
"resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz",
|
||||
"integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q=="
|
||||
},
|
||||
"node_modules/axios": {
|
||||
"version": "1.7.2",
|
||||
"resolved": "https://registry.npmjs.org/axios/-/axios-1.7.2.tgz",
|
||||
"integrity": "sha512-2A8QhOMrbomlDuiLeK9XibIBzuHeRcqqNOHp0Cyp5EoJ1IFDh+XZH3A6BkXtv0K4gFGCI0Y4BM7B1wOEi0Rmgw==",
|
||||
"dependencies": {
|
||||
"follow-redirects": "^1.15.6",
|
||||
"form-data": "^4.0.0",
|
||||
"proxy-from-env": "^1.1.0"
|
||||
}
|
||||
},
|
||||
"node_modules/babel-jest": {
|
||||
"version": "29.7.0",
|
||||
"resolved": "https://registry.npmjs.org/babel-jest/-/babel-jest-29.7.0.tgz",
|
||||
@@ -4478,6 +4500,25 @@
|
||||
"integrity": "sha512-36yxDn5H7OFZQla0/jFJmbIKTdZAQHngCedGxiMmpNfEZM0sdEeT+WczLQrjK6D7o2aiyLYDnkw0R3JK0Qv1RQ==",
|
||||
"dev": true
|
||||
},
|
||||
"node_modules/follow-redirects": {
|
||||
"version": "1.15.6",
|
||||
"resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.6.tgz",
|
||||
"integrity": "sha512-wWN62YITEaOpSK584EZXJafH1AGpO8RVgElfkuXbTOrPX4fIfOyEpW/CsiNd8JdYrAoOvafRTOEnvsO++qCqFA==",
|
||||
"funding": [
|
||||
{
|
||||
"type": "individual",
|
||||
"url": "https://github.com/sponsors/RubenVerborgh"
|
||||
}
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=4.0"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"debug": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/form-data": {
|
||||
"version": "4.0.0",
|
||||
"resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz",
|
||||
@@ -6359,6 +6400,11 @@
|
||||
"node": ">= 6"
|
||||
}
|
||||
},
|
||||
"node_modules/proxy-from-env": {
|
||||
"version": "1.1.0",
|
||||
"resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz",
|
||||
"integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg=="
|
||||
},
|
||||
"node_modules/punycode": {
|
||||
"version": "2.3.1",
|
||||
"resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz",
|
||||
|
||||
@@ -1,6 +1,16 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb",
|
||||
"version": "0.5.2",
|
||||
"description": "LanceDB: A serverless, low-latency vector database for AI applications",
|
||||
"keywords": [
|
||||
"database",
|
||||
"lance",
|
||||
"lancedb",
|
||||
"search",
|
||||
"vector",
|
||||
"vector database",
|
||||
"ann"
|
||||
],
|
||||
"version": "0.5.2-final.1",
|
||||
"main": "dist/index.js",
|
||||
"exports": {
|
||||
".": "./dist/index.js",
|
||||
@@ -38,7 +48,8 @@
|
||||
"typedoc": "^0.25.7",
|
||||
"typedoc-plugin-markdown": "^3.17.1",
|
||||
"typescript": "^5.3.3",
|
||||
"typescript-eslint": "^7.1.0"
|
||||
"typescript-eslint": "^7.1.0",
|
||||
"@types/axios": "^0.14.0"
|
||||
},
|
||||
"ava": {
|
||||
"timeout": "3m"
|
||||
@@ -66,6 +77,7 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"apache-arrow": "^15.0.0",
|
||||
"axios": "^1.7.2",
|
||||
"openai": "^4.29.2",
|
||||
"reflect-metadata": "^0.2.2"
|
||||
}
|
||||
|
||||
@@ -56,12 +56,6 @@ impl Connection {
|
||||
#[napi(factory)]
|
||||
pub async fn new(uri: String, options: ConnectionOptions) -> napi::Result<Self> {
|
||||
let mut builder = ConnectBuilder::new(&uri);
|
||||
if let Some(api_key) = options.api_key {
|
||||
builder = builder.api_key(&api_key);
|
||||
}
|
||||
if let Some(host_override) = options.host_override {
|
||||
builder = builder.host_override(&host_override);
|
||||
}
|
||||
if let Some(interval) = options.read_consistency_interval {
|
||||
builder =
|
||||
builder.read_consistency_interval(std::time::Duration::from_secs_f64(interval));
|
||||
|
||||
@@ -20,6 +20,7 @@ mod connection;
|
||||
mod error;
|
||||
mod index;
|
||||
mod iterator;
|
||||
pub mod merge;
|
||||
mod query;
|
||||
mod table;
|
||||
mod util;
|
||||
@@ -27,8 +28,6 @@ mod util;
|
||||
#[napi(object)]
|
||||
#[derive(Debug)]
|
||||
pub struct ConnectionOptions {
|
||||
pub api_key: Option<String>,
|
||||
pub host_override: Option<String>,
|
||||
/// (For LanceDB OSS only): The interval, in seconds, at which to check for
|
||||
/// updates to the table from other processes. If None, then consistency is not
|
||||
/// checked. For performance reasons, this is the default. For strong
|
||||
|
||||
53
nodejs/src/merge.rs
Normal file
53
nodejs/src/merge.rs
Normal file
@@ -0,0 +1,53 @@
|
||||
use lancedb::{arrow::IntoArrow, ipc::ipc_file_to_batches, table::merge::MergeInsertBuilder};
|
||||
use napi::bindgen_prelude::*;
|
||||
use napi_derive::napi;
|
||||
|
||||
#[napi]
|
||||
#[derive(Clone)]
|
||||
/// A builder used to create and run a merge insert operation
|
||||
pub struct NativeMergeInsertBuilder {
|
||||
pub(crate) inner: MergeInsertBuilder,
|
||||
}
|
||||
|
||||
#[napi]
|
||||
impl NativeMergeInsertBuilder {
|
||||
#[napi]
|
||||
pub fn when_matched_update_all(&self, condition: Option<String>) -> Self {
|
||||
let mut this = self.clone();
|
||||
this.inner.when_matched_update_all(condition);
|
||||
this
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn when_not_matched_insert_all(&self) -> Self {
|
||||
let mut this = self.clone();
|
||||
this.inner.when_not_matched_insert_all();
|
||||
this
|
||||
}
|
||||
#[napi]
|
||||
pub fn when_not_matched_by_source_delete(&self, filter: Option<String>) -> Self {
|
||||
let mut this = self.clone();
|
||||
this.inner.when_not_matched_by_source_delete(filter);
|
||||
this
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async fn execute(&self, buf: Buffer) -> napi::Result<()> {
|
||||
let data = ipc_file_to_batches(buf.to_vec())
|
||||
.and_then(IntoArrow::into_arrow)
|
||||
.map_err(|e| napi::Error::from_reason(format!("Failed to read IPC file: {}", e)))?;
|
||||
|
||||
let this = self.clone();
|
||||
|
||||
this.inner
|
||||
.execute(data)
|
||||
.await
|
||||
.map_err(|e| napi::Error::from_reason(format!("Failed to execute merge insert: {}", e)))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<MergeInsertBuilder> for NativeMergeInsertBuilder {
|
||||
fn from(inner: MergeInsertBuilder) -> Self {
|
||||
Self { inner }
|
||||
}
|
||||
}
|
||||
@@ -23,13 +23,14 @@ use napi_derive::napi;
|
||||
|
||||
use crate::error::NapiErrorExt;
|
||||
use crate::index::Index;
|
||||
use crate::merge::NativeMergeInsertBuilder;
|
||||
use crate::query::{Query, VectorQuery};
|
||||
|
||||
#[napi]
|
||||
pub struct Table {
|
||||
// We keep a duplicate of the table name so we can use it for error
|
||||
// messages even if the table has been closed
|
||||
name: String,
|
||||
pub name: String,
|
||||
pub(crate) inner: Option<LanceDbTable>,
|
||||
}
|
||||
|
||||
@@ -328,16 +329,31 @@ impl Table {
|
||||
.map(IndexConfig::from)
|
||||
.collect::<Vec<_>>())
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async fn index_stats(&self, index_name: String) -> napi::Result<Option<IndexStatistics>> {
|
||||
let tbl = self.inner_ref()?.as_native().unwrap();
|
||||
let stats = tbl.index_stats(&index_name).await.default_error()?;
|
||||
Ok(stats.map(IndexStatistics::from))
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn merge_insert(&self, on: Vec<String>) -> napi::Result<NativeMergeInsertBuilder> {
|
||||
let on: Vec<_> = on.iter().map(String::as_str).collect();
|
||||
Ok(self.inner_ref()?.merge_insert(on.as_slice()).into())
|
||||
}
|
||||
}
|
||||
|
||||
#[napi(object)]
|
||||
/// A description of an index currently configured on a column
|
||||
pub struct IndexConfig {
|
||||
/// The name of the index
|
||||
pub name: String,
|
||||
/// The type of the index
|
||||
pub index_type: String,
|
||||
/// The columns in the index
|
||||
///
|
||||
/// Currently this is always an array of size 1. In the future there may
|
||||
/// Currently this is always an array of size 1. In the future there may
|
||||
/// be more columns to represent composite indices.
|
||||
pub columns: Vec<String>,
|
||||
}
|
||||
@@ -348,6 +364,7 @@ impl From<lancedb::index::IndexConfig> for IndexConfig {
|
||||
Self {
|
||||
index_type,
|
||||
columns: value.columns,
|
||||
name: value.name,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -430,3 +447,40 @@ pub struct AddColumnsSql {
|
||||
/// The expression can reference other columns in the table.
|
||||
pub value_sql: String,
|
||||
}
|
||||
|
||||
#[napi(object)]
|
||||
pub struct IndexStatistics {
|
||||
/// The number of rows indexed by the index
|
||||
pub num_indexed_rows: f64,
|
||||
/// The number of rows not indexed
|
||||
pub num_unindexed_rows: f64,
|
||||
/// The type of the index
|
||||
pub index_type: Option<String>,
|
||||
/// The metadata for each index
|
||||
pub indices: Vec<IndexMetadata>,
|
||||
}
|
||||
impl From<lancedb::index::IndexStatistics> for IndexStatistics {
|
||||
fn from(value: lancedb::index::IndexStatistics) -> Self {
|
||||
Self {
|
||||
num_indexed_rows: value.num_indexed_rows as f64,
|
||||
num_unindexed_rows: value.num_unindexed_rows as f64,
|
||||
index_type: value.index_type.map(|t| format!("{:?}", t)),
|
||||
indices: value.indices.into_iter().map(Into::into).collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[napi(object)]
|
||||
pub struct IndexMetadata {
|
||||
pub metric_type: Option<String>,
|
||||
pub index_type: Option<String>,
|
||||
}
|
||||
|
||||
impl From<lancedb::index::IndexMetadata> for IndexMetadata {
|
||||
fn from(value: lancedb::index::IndexMetadata) -> Self {
|
||||
Self {
|
||||
metric_type: value.metric_type,
|
||||
index_type: value.index_type,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
[tool.bumpversion]
|
||||
current_version = "0.8.2"
|
||||
current_version = "0.9.0-beta.8"
|
||||
parse = """(?x)
|
||||
(?P<major>0|[1-9]\\d*)\\.
|
||||
(?P<minor>0|[1-9]\\d*)\\.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "lancedb-python"
|
||||
version = "0.8.2"
|
||||
version = "0.9.0-beta.8"
|
||||
edition.workspace = true
|
||||
description = "Python bindings for LanceDB"
|
||||
license.workspace = true
|
||||
@@ -19,6 +19,8 @@ lancedb = { path = "../rust/lancedb" }
|
||||
env_logger = "0.10"
|
||||
pyo3 = { version = "0.20", features = ["extension-module", "abi3-py38"] }
|
||||
pyo3-asyncio = { version = "0.20", features = ["attributes", "tokio-runtime"] }
|
||||
base64ct = "=1.6.0" # workaround for https://github.com/RustCrypto/formats/issues/1684
|
||||
chrono = "=0.4.39"
|
||||
|
||||
# Prevent dynamic linking of lzma, which comes from datafusion
|
||||
lzma-sys = { version = "*", features = ["static"] }
|
||||
|
||||
@@ -3,7 +3,7 @@ name = "lancedb"
|
||||
# version in Cargo.toml
|
||||
dependencies = [
|
||||
"deprecation",
|
||||
"pylance==0.12.1",
|
||||
"pylance==0.13.0",
|
||||
"ratelimiter~=1.0",
|
||||
"requests>=2.31.0",
|
||||
"retry>=0.9.2",
|
||||
@@ -13,6 +13,7 @@ dependencies = [
|
||||
"packaging",
|
||||
"cachetools",
|
||||
"overrides>=0.7",
|
||||
"urllib3==1.26.19"
|
||||
]
|
||||
description = "lancedb"
|
||||
authors = [{ name = "LanceDB Devs", email = "dev@lancedb.com" }]
|
||||
@@ -57,15 +58,10 @@ tests = [
|
||||
"duckdb",
|
||||
"pytz",
|
||||
"polars>=0.19",
|
||||
"tantivy"
|
||||
"tantivy",
|
||||
]
|
||||
dev = ["ruff", "pre-commit"]
|
||||
docs = [
|
||||
"mkdocs",
|
||||
"mkdocs-jupyter",
|
||||
"mkdocs-material",
|
||||
"mkdocstrings[python]",
|
||||
]
|
||||
docs = ["mkdocs", "mkdocs-jupyter", "mkdocs-material", "mkdocstrings[python]"]
|
||||
clip = ["torch", "pillow", "open-clip"]
|
||||
embeddings = [
|
||||
"openai>=1.6.1",
|
||||
@@ -100,5 +96,5 @@ addopts = "--strict-markers --ignore-glob=lancedb/embeddings/*.py"
|
||||
markers = [
|
||||
"slow: marks tests as slow (deselect with '-m \"not slow\"')",
|
||||
"asyncio",
|
||||
"s3_test"
|
||||
"s3_test",
|
||||
]
|
||||
|
||||
@@ -35,6 +35,7 @@ def connect(
|
||||
host_override: Optional[str] = None,
|
||||
read_consistency_interval: Optional[timedelta] = None,
|
||||
request_thread_pool: Optional[Union[int, ThreadPoolExecutor]] = None,
|
||||
storage_options: Optional[Dict[str, str]] = None,
|
||||
**kwargs,
|
||||
) -> DBConnection:
|
||||
"""Connect to a LanceDB database.
|
||||
@@ -70,6 +71,9 @@ def connect(
|
||||
executor will be used for making requests. This is for LanceDB Cloud
|
||||
only and is only used when making batch requests (i.e., passing in
|
||||
multiple queries to the search method at once).
|
||||
storage_options: dict, optional
|
||||
Additional options for the storage backend. See available options at
|
||||
https://lancedb.github.io/lancedb/guides/storage/
|
||||
|
||||
Examples
|
||||
--------
|
||||
@@ -105,12 +109,16 @@ def connect(
|
||||
region,
|
||||
host_override,
|
||||
request_thread_pool=request_thread_pool,
|
||||
storage_options=storage_options,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
if kwargs:
|
||||
raise ValueError(f"Unknown keyword arguments: {kwargs}")
|
||||
return LanceDBConnection(uri, read_consistency_interval=read_consistency_interval)
|
||||
return LanceDBConnection(
|
||||
uri,
|
||||
read_consistency_interval=read_consistency_interval,
|
||||
)
|
||||
|
||||
|
||||
async def connect_async(
|
||||
|
||||
@@ -29,7 +29,10 @@ from .table import LanceTable
|
||||
|
||||
|
||||
def create_index(
|
||||
index_path: str, text_fields: List[str], ordering_fields: List[str] = None
|
||||
index_path: str,
|
||||
text_fields: List[str],
|
||||
ordering_fields: List[str] = None,
|
||||
tokenizer_name: str = "default",
|
||||
) -> tantivy.Index:
|
||||
"""
|
||||
Create a new Index (not populated)
|
||||
@@ -42,6 +45,8 @@ def create_index(
|
||||
List of text fields to index
|
||||
ordering_fields: List[str]
|
||||
List of unsigned type fields to order by at search time
|
||||
tokenizer_name : str, default "default"
|
||||
The tokenizer to use
|
||||
|
||||
Returns
|
||||
-------
|
||||
@@ -56,7 +61,7 @@ def create_index(
|
||||
schema_builder.add_integer_field("doc_id", stored=True)
|
||||
# data fields
|
||||
for name in text_fields:
|
||||
schema_builder.add_text_field(name, stored=True)
|
||||
schema_builder.add_text_field(name, stored=True, tokenizer_name=tokenizer_name)
|
||||
if ordering_fields:
|
||||
for name in ordering_fields:
|
||||
schema_builder.add_unsigned_field(name, fast=True)
|
||||
|
||||
@@ -117,6 +117,8 @@ class Query(pydantic.BaseModel):
|
||||
|
||||
with_row_id: bool = False
|
||||
|
||||
fast_search: bool = False
|
||||
|
||||
|
||||
class LanceQueryBuilder(ABC):
|
||||
"""An abstract query builder. Subclasses are defined for vector search,
|
||||
@@ -125,12 +127,14 @@ class LanceQueryBuilder(ABC):
|
||||
|
||||
@classmethod
|
||||
def create(
|
||||
cls,
|
||||
table: "Table",
|
||||
query: Optional[Union[np.ndarray, str, "PIL.Image.Image", Tuple]],
|
||||
query_type: str,
|
||||
vector_column_name: str,
|
||||
ordering_field_name: str = None,
|
||||
cls,
|
||||
table: "Table",
|
||||
query: Optional[Union[np.ndarray, str, "PIL.Image.Image", Tuple]],
|
||||
query_type: str,
|
||||
vector_column_name: str,
|
||||
ordering_field_name: Optional[str] = None,
|
||||
fts_columns: Union[str, List[str]] = [],
|
||||
fast_search: bool = False,
|
||||
) -> LanceQueryBuilder:
|
||||
"""
|
||||
Create a query builder based on the given query and query type.
|
||||
@@ -147,13 +151,18 @@ class LanceQueryBuilder(ABC):
|
||||
If "auto", the query type is inferred based on the query.
|
||||
vector_column_name: str
|
||||
The name of the vector column to use for vector search.
|
||||
fast_search: bool
|
||||
Skip flat search of unindexed data.
|
||||
"""
|
||||
if query is None:
|
||||
return LanceEmptyQueryBuilder(table)
|
||||
|
||||
# Check hybrid search first as it supports empty query pattern
|
||||
if query_type == "hybrid":
|
||||
# hybrid fts and vector query
|
||||
return LanceHybridQueryBuilder(table, query, vector_column_name)
|
||||
return LanceHybridQueryBuilder(
|
||||
table, query, vector_column_name, fts_columns=fts_columns
|
||||
)
|
||||
|
||||
if query is None:
|
||||
return LanceEmptyQueryBuilder(table)
|
||||
|
||||
# remember the string query for reranking purpose
|
||||
str_query = query if isinstance(query, str) else None
|
||||
@@ -165,12 +174,17 @@ class LanceQueryBuilder(ABC):
|
||||
)
|
||||
|
||||
if query_type == "hybrid":
|
||||
return LanceHybridQueryBuilder(table, query, vector_column_name)
|
||||
return LanceHybridQueryBuilder(
|
||||
table, query, vector_column_name, fts_columns=fts_columns
|
||||
)
|
||||
|
||||
if isinstance(query, str):
|
||||
# fts
|
||||
return LanceFtsQueryBuilder(
|
||||
table, query, ordering_field_name=ordering_field_name
|
||||
table,
|
||||
query,
|
||||
ordering_field_name=ordering_field_name,
|
||||
fts_columns=fts_columns,
|
||||
)
|
||||
|
||||
if isinstance(query, list):
|
||||
@@ -180,7 +194,9 @@ class LanceQueryBuilder(ABC):
|
||||
else:
|
||||
raise TypeError(f"Unsupported query type: {type(query)}")
|
||||
|
||||
return LanceVectorQueryBuilder(table, query, vector_column_name, str_query)
|
||||
return LanceVectorQueryBuilder(
|
||||
table, query, vector_column_name, str_query, fast_search
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def _resolve_query(cls, table, query, query_type, vector_column_name):
|
||||
@@ -196,8 +212,6 @@ class LanceQueryBuilder(ABC):
|
||||
elif query_type == "auto":
|
||||
if isinstance(query, (list, np.ndarray)):
|
||||
return query, "vector"
|
||||
if isinstance(query, tuple):
|
||||
return query, "hybrid"
|
||||
else:
|
||||
conf = table.embedding_functions.get(vector_column_name)
|
||||
if conf is not None:
|
||||
@@ -224,9 +238,14 @@ class LanceQueryBuilder(ABC):
|
||||
def __init__(self, table: "Table"):
|
||||
self._table = table
|
||||
self._limit = 10
|
||||
self._offset = 0
|
||||
self._columns = None
|
||||
self._where = None
|
||||
self._prefilter = False
|
||||
self._with_row_id = False
|
||||
self._vector = None
|
||||
self._text = None
|
||||
self._ef = None
|
||||
|
||||
@deprecation.deprecated(
|
||||
deprecated_in="0.3.1",
|
||||
@@ -337,11 +356,13 @@ class LanceQueryBuilder(ABC):
|
||||
----------
|
||||
limit: int
|
||||
The maximum number of results to return.
|
||||
By default the query is limited to the first 10.
|
||||
Call this method and pass 0, a negative value,
|
||||
or None to remove the limit.
|
||||
*WARNING* if you have a large dataset, removing
|
||||
the limit can potentially result in reading a
|
||||
The default query limit is 10 results.
|
||||
For ANN/KNN queries, you must specify a limit.
|
||||
Entering 0, a negative number, or None will reset
|
||||
the limit to the default value of 10.
|
||||
*WARNING* if you have a large dataset, setting
|
||||
the limit to a large number, e.g. the table size,
|
||||
can potentially result in reading a
|
||||
large amount of data into memory and cause
|
||||
out of memory issues.
|
||||
|
||||
@@ -351,11 +372,33 @@ class LanceQueryBuilder(ABC):
|
||||
The LanceQueryBuilder object.
|
||||
"""
|
||||
if limit is None or limit <= 0:
|
||||
self._limit = None
|
||||
if isinstance(self, LanceVectorQueryBuilder):
|
||||
raise ValueError("Limit is required for ANN/KNN queries")
|
||||
else:
|
||||
self._limit = None
|
||||
else:
|
||||
self._limit = limit
|
||||
return self
|
||||
|
||||
def offset(self, offset: int) -> LanceQueryBuilder:
|
||||
"""Set the offset for the results.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
offset: int
|
||||
The offset to start fetching results from.
|
||||
|
||||
Returns
|
||||
-------
|
||||
LanceQueryBuilder
|
||||
The LanceQueryBuilder object.
|
||||
"""
|
||||
if offset is None or offset <= 0:
|
||||
self._offset = 0
|
||||
else:
|
||||
self._offset = offset
|
||||
return self
|
||||
|
||||
def select(self, columns: Union[list[str], dict[str, str]]) -> LanceQueryBuilder:
|
||||
"""Set the columns to return.
|
||||
|
||||
@@ -417,6 +460,80 @@ class LanceQueryBuilder(ABC):
|
||||
self._with_row_id = with_row_id
|
||||
return self
|
||||
|
||||
def explain_plan(self, verbose: Optional[bool] = False) -> str:
|
||||
"""Return the execution plan for this query.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import lancedb
|
||||
>>> db = lancedb.connect("./.lancedb")
|
||||
>>> table = db.create_table("my_table", [{"vector": [99, 99]}])
|
||||
>>> query = [100, 100]
|
||||
>>> plan = table.search(query).explain_plan(True)
|
||||
>>> print(plan) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
|
||||
ProjectionExec: expr=[vector@0 as vector, _distance@2 as _distance]
|
||||
GlobalLimitExec: skip=0, fetch=10
|
||||
FilterExec: _distance@2 IS NOT NULL
|
||||
SortExec: TopK(fetch=10), expr=[_distance@2 ASC NULLS LAST], preserve_partitioning=[false]
|
||||
KNNVectorDistance: metric=l2
|
||||
LanceScan: uri=..., projection=[vector], row_id=true, row_addr=false, ordered=false
|
||||
|
||||
Parameters
|
||||
----------
|
||||
verbose : bool, default False
|
||||
Use a verbose output format.
|
||||
|
||||
Returns
|
||||
-------
|
||||
plan : str
|
||||
""" # noqa: E501
|
||||
ds = self._table.to_lance()
|
||||
return ds.scanner(
|
||||
nearest={
|
||||
"column": self._vector_column,
|
||||
"q": self._query,
|
||||
"k": self._limit,
|
||||
"metric": self._metric,
|
||||
"nprobes": self._nprobes,
|
||||
"refine_factor": self._refine_factor,
|
||||
},
|
||||
prefilter=self._prefilter,
|
||||
filter=self._str_query,
|
||||
limit=self._limit,
|
||||
with_row_id=self._with_row_id,
|
||||
offset=self._offset,
|
||||
).explain_plan(verbose)
|
||||
|
||||
def vector(self, vector: Union[np.ndarray, list]) -> LanceQueryBuilder:
|
||||
"""Set the vector to search for.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
vector: np.ndarray or list
|
||||
The vector to search for.
|
||||
|
||||
Returns
|
||||
-------
|
||||
LanceQueryBuilder
|
||||
The LanceQueryBuilder object.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def text(self, text: str) -> LanceQueryBuilder:
|
||||
"""Set the text to search for.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
text: str
|
||||
The text to search for.
|
||||
|
||||
Returns
|
||||
-------
|
||||
LanceQueryBuilder
|
||||
The LanceQueryBuilder object.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class LanceVectorQueryBuilder(LanceQueryBuilder):
|
||||
"""
|
||||
@@ -440,11 +557,12 @@ class LanceVectorQueryBuilder(LanceQueryBuilder):
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
table: "Table",
|
||||
query: Union[np.ndarray, list, "PIL.Image.Image"],
|
||||
vector_column: str,
|
||||
str_query: Optional[str] = None,
|
||||
self,
|
||||
table: "Table",
|
||||
query: Union[np.ndarray, list, "PIL.Image.Image"],
|
||||
vector_column: str,
|
||||
str_query: Optional[str] = None,
|
||||
fast_search: bool = False,
|
||||
):
|
||||
super().__init__(table)
|
||||
self._query = query
|
||||
@@ -455,13 +573,14 @@ class LanceVectorQueryBuilder(LanceQueryBuilder):
|
||||
self._prefilter = False
|
||||
self._reranker = None
|
||||
self._str_query = str_query
|
||||
self._fast_search = fast_search
|
||||
|
||||
def metric(self, metric: Literal["L2", "cosine"]) -> LanceVectorQueryBuilder:
|
||||
def metric(self, metric: Literal["L2", "cosine", "dot"]) -> LanceVectorQueryBuilder:
|
||||
"""Set the distance metric to use.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
metric: "L2" or "cosine"
|
||||
metric: "L2" or "cosine" or "dot"
|
||||
The distance metric to use. By default "L2" is used.
|
||||
|
||||
Returns
|
||||
@@ -469,7 +588,7 @@ class LanceVectorQueryBuilder(LanceQueryBuilder):
|
||||
LanceVectorQueryBuilder
|
||||
The LanceQueryBuilder object.
|
||||
"""
|
||||
self._metric = metric
|
||||
self._metric = metric.lower()
|
||||
return self
|
||||
|
||||
def nprobes(self, nprobes: int) -> LanceVectorQueryBuilder:
|
||||
@@ -494,6 +613,28 @@ class LanceVectorQueryBuilder(LanceQueryBuilder):
|
||||
self._nprobes = nprobes
|
||||
return self
|
||||
|
||||
def ef(self, ef: int) -> LanceVectorQueryBuilder:
|
||||
"""Set the number of candidates to consider during search.
|
||||
|
||||
Higher values will yield better recall (more likely to find vectors if
|
||||
they exist) at the expense of latency.
|
||||
|
||||
This only applies to the HNSW-related index.
|
||||
The default value is 1.5 * limit.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
ef: int
|
||||
The number of candidates to consider during search.
|
||||
|
||||
Returns
|
||||
-------
|
||||
LanceVectorQueryBuilder
|
||||
The LanceQueryBuilder object.
|
||||
"""
|
||||
self._ef = ef
|
||||
return self
|
||||
|
||||
def refine_factor(self, refine_factor: int) -> LanceVectorQueryBuilder:
|
||||
"""Set the refine factor to use, increasing the number of vectors sampled.
|
||||
|
||||
@@ -554,15 +695,11 @@ class LanceVectorQueryBuilder(LanceQueryBuilder):
|
||||
refine_factor=self._refine_factor,
|
||||
vector_column=self._vector_column,
|
||||
with_row_id=self._with_row_id,
|
||||
offset=self._offset,
|
||||
fast_search=self._fast_search,
|
||||
ef=self._ef,
|
||||
)
|
||||
result_set = self._table._execute_query(query, batch_size)
|
||||
if self._reranker is not None:
|
||||
rs_table = result_set.read_all()
|
||||
result_set = self._reranker.rerank_vector(self._str_query, rs_table)
|
||||
# convert result_set back to RecordBatchReader
|
||||
result_set = pa.RecordBatchReader.from_batches(
|
||||
result_set.schema, result_set.to_batches()
|
||||
)
|
||||
|
||||
return result_set
|
||||
|
||||
@@ -591,7 +728,7 @@ class LanceVectorQueryBuilder(LanceQueryBuilder):
|
||||
return self
|
||||
|
||||
def rerank(
|
||||
self, reranker: Reranker, query_string: Optional[str] = None
|
||||
self, reranker: Reranker, query_string: Optional[str] = None
|
||||
) -> LanceVectorQueryBuilder:
|
||||
"""Rerank the results using the specified reranker.
|
||||
|
||||
@@ -756,12 +893,34 @@ class LanceFtsQueryBuilder(LanceQueryBuilder):
|
||||
|
||||
class LanceEmptyQueryBuilder(LanceQueryBuilder):
|
||||
def to_arrow(self) -> pa.Table:
|
||||
ds = self._table.to_lance()
|
||||
return ds.to_table(
|
||||
return self.to_batches().read_all()
|
||||
|
||||
def to_batches(self, /, batch_size: Optional[int] = None) -> pa.RecordBatchReader:
|
||||
query = Query(
|
||||
columns=self._columns,
|
||||
filter=self._where,
|
||||
limit=self._limit,
|
||||
k=self._limit or 10,
|
||||
with_row_id=self._with_row_id,
|
||||
vector=[],
|
||||
# not actually respected in remote query
|
||||
offset=self._offset or 0,
|
||||
)
|
||||
return self._table._execute_query(query)
|
||||
|
||||
def rerank(self, reranker: Reranker) -> LanceEmptyQueryBuilder:
|
||||
"""Rerank the results using the specified reranker.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
reranker: Reranker
|
||||
The reranker to use.
|
||||
|
||||
Returns
|
||||
-------
|
||||
LanceEmptyQueryBuilder
|
||||
The LanceQueryBuilder object.
|
||||
"""
|
||||
raise NotImplementedError("Reranking is not yet supported.")
|
||||
|
||||
|
||||
class LanceHybridQueryBuilder(LanceQueryBuilder):
|
||||
|
||||
@@ -55,11 +55,13 @@ class RestfulLanceDBClient:
|
||||
region: str
|
||||
api_key: Credential
|
||||
host_override: Optional[str] = attrs.field(default=None)
|
||||
db_prefix: Optional[str] = attrs.field(default=None)
|
||||
|
||||
closed: bool = attrs.field(default=False, init=False)
|
||||
|
||||
connection_timeout: float = attrs.field(default=120.0, kw_only=True)
|
||||
read_timeout: float = attrs.field(default=300.0, kw_only=True)
|
||||
storage_options: Optional[Dict[str, str]] = attrs.field(default=None, kw_only=True)
|
||||
|
||||
@functools.cached_property
|
||||
def session(self) -> requests.Session:
|
||||
@@ -92,6 +94,18 @@ class RestfulLanceDBClient:
|
||||
headers["Host"] = f"{self.db_name}.{self.region}.api.lancedb.com"
|
||||
if self.host_override:
|
||||
headers["x-lancedb-database"] = self.db_name
|
||||
if self.storage_options:
|
||||
if self.storage_options.get("account_name") is not None:
|
||||
headers["x-azure-storage-account-name"] = self.storage_options[
|
||||
"account_name"
|
||||
]
|
||||
if self.storage_options.get("azure_storage_account_name") is not None:
|
||||
headers["x-azure-storage-account-name"] = self.storage_options[
|
||||
"azure_storage_account_name"
|
||||
]
|
||||
if self.db_prefix:
|
||||
headers["x-lancedb-database-prefix"] = self.db_prefix
|
||||
|
||||
return headers
|
||||
|
||||
@staticmethod
|
||||
@@ -158,6 +172,7 @@ class RestfulLanceDBClient:
|
||||
headers["content-type"] = content_type
|
||||
if request_id is not None:
|
||||
headers["x-request-id"] = request_id
|
||||
|
||||
with self.session.post(
|
||||
urljoin(self.url, uri),
|
||||
headers=headers,
|
||||
@@ -245,7 +260,6 @@ def retry_adapter(options: Dict[str, Any]) -> HTTPAdapter:
|
||||
connect=connect_retries,
|
||||
read=read_retries,
|
||||
backoff_factor=backoff_factor,
|
||||
backoff_jitter=backoff_jitter,
|
||||
status_forcelist=statuses,
|
||||
allowed_methods=methods,
|
||||
)
|
||||
|
||||
@@ -15,7 +15,7 @@ import inspect
|
||||
import logging
|
||||
import uuid
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from typing import Iterable, List, Optional, Union
|
||||
from typing import Dict, Iterable, List, Optional, Union
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from cachetools import TTLCache
|
||||
@@ -44,20 +44,25 @@ class RemoteDBConnection(DBConnection):
|
||||
request_thread_pool: Optional[ThreadPoolExecutor] = None,
|
||||
connection_timeout: float = 120.0,
|
||||
read_timeout: float = 300.0,
|
||||
storage_options: Optional[Dict[str, str]] = None,
|
||||
):
|
||||
"""Connect to a remote LanceDB database."""
|
||||
parsed = urlparse(db_url)
|
||||
if parsed.scheme != "db":
|
||||
raise ValueError(f"Invalid scheme: {parsed.scheme}, only accepts db://")
|
||||
self.db_name = parsed.netloc
|
||||
prefix = parsed.path.lstrip("/")
|
||||
self.db_prefix = None if not prefix else prefix
|
||||
self.api_key = api_key
|
||||
self._client = RestfulLanceDBClient(
|
||||
self.db_name,
|
||||
region,
|
||||
api_key,
|
||||
host_override,
|
||||
self.db_prefix,
|
||||
connection_timeout=connection_timeout,
|
||||
read_timeout=read_timeout,
|
||||
storage_options=storage_options,
|
||||
)
|
||||
self._request_thread_pool = request_thread_pool
|
||||
self._table_cache = TTLCache(maxsize=10000, ttl=300)
|
||||
|
||||
@@ -15,13 +15,14 @@ import logging
|
||||
import uuid
|
||||
from concurrent.futures import Future
|
||||
from functools import cached_property
|
||||
from typing import Dict, Iterable, Optional, Union
|
||||
from typing import Dict, Iterable, Optional, Union, Literal
|
||||
|
||||
import pyarrow as pa
|
||||
from lance import json_to_schema
|
||||
|
||||
from lancedb.common import DATA, VEC, VECTOR_COLUMN_NAME
|
||||
from lancedb.merge import LanceMergeInsertBuilder
|
||||
from lancedb.query import LanceQueryBuilder
|
||||
|
||||
from ..query import LanceVectorQueryBuilder
|
||||
from ..table import Query, Table, _sanitize_data
|
||||
@@ -81,6 +82,7 @@ class RemoteTable(Table):
|
||||
def create_scalar_index(
|
||||
self,
|
||||
column: str,
|
||||
index_type: Literal["BTREE", "BITMAP", "LABEL_LIST", "scalar"] = "scalar",
|
||||
):
|
||||
"""Creates a scalar index
|
||||
Parameters
|
||||
@@ -89,8 +91,6 @@ class RemoteTable(Table):
|
||||
The column to be indexed. Must be a boolean, integer, float,
|
||||
or string column.
|
||||
"""
|
||||
index_type = "scalar"
|
||||
|
||||
data = {
|
||||
"column": column,
|
||||
"index_type": index_type,
|
||||
@@ -228,10 +228,21 @@ class RemoteTable(Table):
|
||||
content_type=ARROW_STREAM_CONTENT_TYPE,
|
||||
)
|
||||
|
||||
def query(
|
||||
self,
|
||||
query: Union[VEC, str] = None,
|
||||
query_type: str = "vector",
|
||||
vector_column_name: Optional[str] = None,
|
||||
fast_search: bool = False,
|
||||
) -> LanceVectorQueryBuilder:
|
||||
return self.search(query, query_type, vector_column_name, fast_search)
|
||||
|
||||
def search(
|
||||
self,
|
||||
query: Union[VEC, str],
|
||||
query: Union[VEC, str] = None,
|
||||
query_type: str = "vector",
|
||||
vector_column_name: Optional[str] = None,
|
||||
fast_search: bool = False,
|
||||
) -> LanceVectorQueryBuilder:
|
||||
"""Create a search query to find the nearest neighbors
|
||||
of the given query vector. We currently support [vector search][search]
|
||||
@@ -278,6 +289,11 @@ class RemoteTable(Table):
|
||||
- If the table has multiple vector columns then the *vector_column_name*
|
||||
needs to be specified. Otherwise, an error is raised.
|
||||
|
||||
fast_search: bool, optional
|
||||
Skip a flat search of unindexed data. This may improve
|
||||
search performance but search results will not include unindexed data.
|
||||
|
||||
- *default False*.
|
||||
Returns
|
||||
-------
|
||||
LanceQueryBuilder
|
||||
@@ -293,7 +309,14 @@ class RemoteTable(Table):
|
||||
"""
|
||||
if vector_column_name is None:
|
||||
vector_column_name = inf_vector_column_query(self.schema)
|
||||
return LanceVectorQueryBuilder(self, query, vector_column_name)
|
||||
|
||||
return LanceQueryBuilder.create(
|
||||
self,
|
||||
query,
|
||||
query_type,
|
||||
vector_column_name=vector_column_name,
|
||||
fast_search=fast_search,
|
||||
)
|
||||
|
||||
def _execute_query(
|
||||
self, query: Query, batch_size: Optional[int] = None
|
||||
|
||||
@@ -337,7 +337,6 @@ class Table(ABC):
|
||||
For example, the following scan will be faster if the column ``my_col`` has
|
||||
a scalar index:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import lancedb
|
||||
|
||||
@@ -348,8 +347,6 @@ class Table(ABC):
|
||||
Scalar indices can also speed up scans containing a vector search and a
|
||||
prefilter:
|
||||
|
||||
.. code-block::python
|
||||
|
||||
import lancedb
|
||||
|
||||
db = lancedb.connect("/data/lance")
|
||||
@@ -385,7 +382,6 @@ class Table(ABC):
|
||||
Examples
|
||||
--------
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import lance
|
||||
|
||||
@@ -1175,6 +1171,7 @@ class LanceTable(Table):
|
||||
*,
|
||||
replace: bool = False,
|
||||
writer_heap_size: Optional[int] = 1024 * 1024 * 1024,
|
||||
tokenizer_name: str = "default",
|
||||
):
|
||||
"""Create a full-text search index on the table.
|
||||
|
||||
@@ -1193,6 +1190,10 @@ class LanceTable(Table):
|
||||
ordering_field_names:
|
||||
A list of unsigned type fields to index to optionally order
|
||||
results on at search time
|
||||
tokenizer_name: str, default "default"
|
||||
The tokenizer to use for the index. Can be "raw", "default" or the 2 letter
|
||||
language code followed by "_stem". So for english it would be "en_stem".
|
||||
For available languages see: https://docs.rs/tantivy/latest/tantivy/tokenizer/enum.Language.html
|
||||
"""
|
||||
from .fts import create_index, populate_index
|
||||
|
||||
@@ -1218,6 +1219,7 @@ class LanceTable(Table):
|
||||
self._get_fts_index_path(),
|
||||
field_names,
|
||||
ordering_fields=ordering_field_names,
|
||||
tokenizer_name=tokenizer_name,
|
||||
)
|
||||
populate_index(
|
||||
index,
|
||||
|
||||
@@ -66,6 +66,17 @@ def test_create_index(tmp_path):
|
||||
assert os.path.exists(str(tmp_path / "index"))
|
||||
|
||||
|
||||
def test_create_index_with_stemming(tmp_path, table):
|
||||
index = ldb.fts.create_index(
|
||||
str(tmp_path / "index"), ["text"], tokenizer_name="en_stem"
|
||||
)
|
||||
assert isinstance(index, tantivy.Index)
|
||||
assert os.path.exists(str(tmp_path / "index"))
|
||||
|
||||
# Check stemming by running tokenizer on non empty table
|
||||
table.create_fts_index("text", tokenizer_name="en_stem")
|
||||
|
||||
|
||||
def test_populate_index(tmp_path, table):
|
||||
index = ldb.fts.create_index(str(tmp_path / "index"), ["text"])
|
||||
assert ldb.fts.populate_index(index, table, ["text"]) == len(table)
|
||||
|
||||
@@ -21,6 +21,7 @@ class FakeLanceDBClient:
|
||||
pass
|
||||
|
||||
def query(self, table_name: str, query: VectorQuery) -> VectorQueryResult:
|
||||
print(f"{query=}")
|
||||
assert table_name == "test"
|
||||
t = pa.schema([]).empty_table()
|
||||
return VectorQueryResult(t)
|
||||
@@ -39,3 +40,21 @@ def test_remote_db():
|
||||
table = conn["test"]
|
||||
table.schema = pa.schema([pa.field("vector", pa.list_(pa.float32(), 2))])
|
||||
table.search([1.0, 2.0]).to_pandas()
|
||||
|
||||
|
||||
def test_empty_query_with_filter():
|
||||
conn = lancedb.connect("db://client-will-be-injected", api_key="fake")
|
||||
setattr(conn, "_client", FakeLanceDBClient())
|
||||
|
||||
table = conn["test"]
|
||||
table.schema = pa.schema([pa.field("vector", pa.list_(pa.float32(), 2))])
|
||||
print(table.query().select(["vector"]).where("foo == bar").to_arrow())
|
||||
|
||||
|
||||
def test_fast_search_query_with_filter():
|
||||
conn = lancedb.connect("db://client-will-be-injected", api_key="fake")
|
||||
setattr(conn, "_client", FakeLanceDBClient())
|
||||
|
||||
table = conn["test"]
|
||||
table.schema = pa.schema([pa.field("vector", pa.list_(pa.float32(), 2))])
|
||||
print(table.query([0, 0], fast_search=True).select(["vector"]).where("foo == bar").to_arrow())
|
||||
|
||||
@@ -735,7 +735,7 @@ def test_create_scalar_index(db):
|
||||
indices = table.to_lance().list_indices()
|
||||
assert len(indices) == 1
|
||||
scalar_index = indices[0]
|
||||
assert scalar_index["type"] == "Scalar"
|
||||
assert scalar_index["type"] == "BTree"
|
||||
|
||||
# Confirm that prefiltering still works with the scalar index column
|
||||
results = table.search().where("x = 'c'").to_arrow()
|
||||
|
||||
2
rust-toolchain.toml
Normal file
2
rust-toolchain.toml
Normal file
@@ -0,0 +1,2 @@
|
||||
[toolchain]
|
||||
channel = "1.79.0"
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "lancedb-node"
|
||||
version = "0.5.2"
|
||||
version = "0.5.2-final.1"
|
||||
description = "Serverless, low-latency vector database for AI applications"
|
||||
license.workspace = true
|
||||
edition.workspace = true
|
||||
|
||||
@@ -463,6 +463,7 @@ impl JsTable {
|
||||
Ok(promise)
|
||||
}
|
||||
|
||||
#[allow(deprecated)]
|
||||
pub(crate) fn js_index_stats(mut cx: FunctionContext) -> JsResult<JsPromise> {
|
||||
let js_table = cx.this().downcast_or_throw::<JsBox<Self>, _>(&mut cx)?;
|
||||
let rt = runtime(&mut cx)?;
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "lancedb"
|
||||
version = "0.5.2"
|
||||
version = "0.5.2-final.1"
|
||||
edition.workspace = true
|
||||
description = "LanceDB: A serverless, low-latency vector database for AI applications"
|
||||
license.workspace = true
|
||||
|
||||
@@ -80,6 +80,8 @@ pub enum IndexType {
|
||||
|
||||
/// A description of an index currently configured on a column
|
||||
pub struct IndexConfig {
|
||||
/// The name of the index
|
||||
pub name: String,
|
||||
/// The type of the index
|
||||
pub index_type: IndexType,
|
||||
/// The columns in the index
|
||||
|
||||
@@ -84,7 +84,8 @@ pub fn convert_polars_arrow_array_to_arrow_rs_array(
|
||||
arrow_datatype: arrow_schema::DataType,
|
||||
) -> std::result::Result<arrow_array::ArrayRef, arrow_schema::ArrowError> {
|
||||
let polars_c_array = polars_arrow::ffi::export_array_to_c(polars_array);
|
||||
let arrow_c_array = unsafe { mem::transmute(polars_c_array) };
|
||||
// Safety: `polars_arrow::ffi::ArrowArray` has the same memory layout as `arrow::ffi::FFI_ArrowArray`.
|
||||
let arrow_c_array: arrow_data::ffi::FFI_ArrowArray = unsafe { mem::transmute(polars_c_array) };
|
||||
Ok(arrow_array::make_array(unsafe {
|
||||
arrow::ffi::from_ffi_and_data_type(arrow_c_array, arrow_datatype)
|
||||
}?))
|
||||
@@ -96,7 +97,8 @@ fn convert_arrow_rs_array_to_polars_arrow_array(
|
||||
polars_arrow_dtype: polars::datatypes::ArrowDataType,
|
||||
) -> Result<Box<dyn polars_arrow::array::Array>> {
|
||||
let arrow_c_array = arrow::ffi::FFI_ArrowArray::new(&arrow_rs_array.to_data());
|
||||
let polars_c_array = unsafe { mem::transmute(arrow_c_array) };
|
||||
// Safety: `polars_arrow::ffi::ArrowArray` has the same memory layout as `arrow::ffi::FFI_ArrowArray`.
|
||||
let polars_c_array: polars_arrow::ffi::ArrowArray = unsafe { mem::transmute(arrow_c_array) };
|
||||
Ok(unsafe { polars_arrow::ffi::import_array_from_c(polars_c_array, polars_arrow_dtype) }?)
|
||||
}
|
||||
|
||||
@@ -104,7 +106,9 @@ fn convert_polars_arrow_field_to_arrow_rs_field(
|
||||
polars_arrow_field: polars_arrow::datatypes::Field,
|
||||
) -> Result<arrow_schema::Field> {
|
||||
let polars_c_schema = polars_arrow::ffi::export_field_to_c(&polars_arrow_field);
|
||||
let arrow_c_schema: arrow::ffi::FFI_ArrowSchema = unsafe { mem::transmute(polars_c_schema) };
|
||||
// Safety: `polars_arrow::ffi::ArrowSchema` has the same memory layout as `arrow::ffi::FFI_ArrowSchema`.
|
||||
let arrow_c_schema: arrow::ffi::FFI_ArrowSchema =
|
||||
unsafe { mem::transmute::<_, _>(polars_c_schema) };
|
||||
let arrow_rs_dtype = arrow_schema::DataType::try_from(&arrow_c_schema)?;
|
||||
Ok(arrow_schema::Field::new(
|
||||
polars_arrow_field.name,
|
||||
@@ -118,6 +122,8 @@ fn convert_arrow_rs_field_to_polars_arrow_field(
|
||||
) -> Result<polars_arrow::datatypes::Field> {
|
||||
let arrow_rs_dtype = arrow_rs_field.data_type();
|
||||
let arrow_c_schema = arrow::ffi::FFI_ArrowSchema::try_from(arrow_rs_dtype)?;
|
||||
let polars_c_schema: polars_arrow::ffi::ArrowSchema = unsafe { mem::transmute(arrow_c_schema) };
|
||||
// Safety: `polars_arrow::ffi::ArrowSchema` has the same memory layout as `arrow::ffi::FFI_ArrowSchema`.
|
||||
let polars_c_schema: polars_arrow::ffi::ArrowSchema =
|
||||
unsafe { mem::transmute::<_, _>(arrow_c_schema) };
|
||||
Ok(unsafe { polars_arrow::ffi::import_field_from_c(&polars_c_schema) }?)
|
||||
}
|
||||
|
||||
@@ -1206,28 +1206,36 @@ impl NativeTable {
|
||||
.await)
|
||||
}
|
||||
|
||||
#[deprecated(since = "0.5.2", note = "Please use `index_stats` instead")]
|
||||
pub async fn count_indexed_rows(&self, index_uuid: &str) -> Result<Option<usize>> {
|
||||
#[allow(deprecated)]
|
||||
match self.load_index_stats(index_uuid).await? {
|
||||
Some(stats) => Ok(Some(stats.num_indexed_rows)),
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
#[deprecated(since = "0.5.2", note = "Please use `index_stats` instead")]
|
||||
pub async fn count_unindexed_rows(&self, index_uuid: &str) -> Result<Option<usize>> {
|
||||
#[allow(deprecated)]
|
||||
match self.load_index_stats(index_uuid).await? {
|
||||
Some(stats) => Ok(Some(stats.num_unindexed_rows)),
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
#[deprecated(since = "0.5.2", note = "Please use `index_stats` instead")]
|
||||
pub async fn get_index_type(&self, index_uuid: &str) -> Result<Option<String>> {
|
||||
#[allow(deprecated)]
|
||||
match self.load_index_stats(index_uuid).await? {
|
||||
Some(stats) => Ok(Some(stats.index_type.unwrap_or_default())),
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
#[deprecated(since = "0.5.2", note = "Please use `index_stats` instead")]
|
||||
pub async fn get_distance_type(&self, index_uuid: &str) -> Result<Option<String>> {
|
||||
#[allow(deprecated)]
|
||||
match self.load_index_stats(index_uuid).await? {
|
||||
Some(stats) => Ok(Some(
|
||||
stats
|
||||
@@ -1240,16 +1248,8 @@ impl NativeTable {
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn load_indices(&self) -> Result<Vec<VectorIndex>> {
|
||||
let dataset = self.dataset.get().await?;
|
||||
let (indices, mf) = futures::try_join!(dataset.load_indices(), dataset.latest_manifest())?;
|
||||
Ok(indices
|
||||
.iter()
|
||||
.map(|i| VectorIndex::new_from_format(&mf, i))
|
||||
.collect())
|
||||
}
|
||||
|
||||
async fn load_index_stats(&self, index_uuid: &str) -> Result<Option<IndexStatistics>> {
|
||||
#[deprecated(since = "0.5.2", note = "Please use `index_stats` instead")]
|
||||
pub async fn load_index_stats(&self, index_uuid: &str) -> Result<Option<IndexStatistics>> {
|
||||
let index = self
|
||||
.load_indices()
|
||||
.await?
|
||||
@@ -1268,6 +1268,35 @@ impl NativeTable {
|
||||
Ok(Some(index_stats))
|
||||
}
|
||||
|
||||
/// Get statistics about an index.
|
||||
/// Returns an error if the index does not exist.
|
||||
pub async fn index_stats<S: AsRef<str>>(
|
||||
&self,
|
||||
index_name: S,
|
||||
) -> Result<Option<IndexStatistics>> {
|
||||
self.dataset
|
||||
.get()
|
||||
.await?
|
||||
.index_statistics(index_name.as_ref())
|
||||
.await
|
||||
.ok()
|
||||
.map(|stats| {
|
||||
serde_json::from_str(&stats).map_err(|e| Error::InvalidInput {
|
||||
message: format!("error deserializing index statistics: {}", e),
|
||||
})
|
||||
})
|
||||
.transpose()
|
||||
}
|
||||
|
||||
pub async fn load_indices(&self) -> Result<Vec<VectorIndex>> {
|
||||
let dataset = self.dataset.get().await?;
|
||||
let (indices, mf) = futures::try_join!(dataset.load_indices(), dataset.latest_manifest())?;
|
||||
Ok(indices
|
||||
.iter()
|
||||
.map(|i| VectorIndex::new_from_format(&mf, i))
|
||||
.collect())
|
||||
}
|
||||
|
||||
async fn create_ivf_pq_index(
|
||||
&self,
|
||||
index: IvfPqIndexBuilder,
|
||||
@@ -1860,12 +1889,20 @@ impl TableInternal for NativeTable {
|
||||
}
|
||||
columns.push(field.name.clone());
|
||||
}
|
||||
Ok(IndexConfig { index_type: if is_vector { crate::index::IndexType::IvfPq } else { crate::index::IndexType::BTree }, columns })
|
||||
let index_type = if is_vector {
|
||||
crate::index::IndexType::IvfPq
|
||||
} else {
|
||||
crate::index::IndexType::BTree
|
||||
};
|
||||
|
||||
let name = idx.name.clone();
|
||||
Ok(IndexConfig { index_type, columns, name })
|
||||
}).collect::<Result<Vec<_>>>()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(deprecated)]
|
||||
mod tests {
|
||||
use std::iter;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
|
||||
@@ -23,6 +23,7 @@ use super::TableInternal;
|
||||
/// A builder used to create and run a merge insert operation
|
||||
///
|
||||
/// See [`super::Table::merge_insert`] for more context
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct MergeInsertBuilder {
|
||||
table: Arc<dyn TableInternal>,
|
||||
pub(super) on: Vec<String>,
|
||||
|
||||
Reference in New Issue
Block a user