Compare commits

..

2 Commits

Author SHA1 Message Date
qzhu
8e25e0c7f0 reformatted 2023-12-07 12:08:05 -08:00
qzhu
5f989e86d2 SaaS python SDK doc 2023-12-07 12:01:03 -08:00
32 changed files with 1299 additions and 1033 deletions

View File

@@ -37,10 +37,14 @@ jobs:
path: |
node/vectordb-*.tgz
node-macos-x86:
node-macos:
runs-on: macos-13
# Only runs on tags that matches the make-release action
if: startsWith(github.ref, 'refs/tags/v')
strategy:
fail-fast: false
matrix:
target: [x86_64-apple-darwin, aarch64-apple-darwin]
steps:
- name: Checkout
uses: actions/checkout@v3
@@ -50,8 +54,11 @@ jobs:
run: |
cd node
npm ci
- name: Install rustup target
if: ${{ matrix.target == 'aarch64-apple-darwin' }}
run: rustup target add aarch64-apple-darwin
- name: Build MacOS native node modules
run: bash ci/build_macos_artifacts.sh x86_64-apple-darwin
run: bash ci/build_macos_artifacts.sh ${{ matrix.target }}
- name: Upload Darwin Artifacts
uses: actions/upload-artifact@v3
with:
@@ -59,28 +66,6 @@ jobs:
path: |
node/dist/lancedb-vectordb-darwin*.tgz
node-macos-arm64:
runs-on: macos-13-xlarge
# Only runs on tags that matches the make-release action
if: startsWith(github.ref, 'refs/tags/v')
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Install system dependencies
run: brew install protobuf
- name: Install npm dependencies
run: |
cd node
npm ci
- name: Build MacOS native node modules
run: bash ci/build_macos_artifacts.sh aarch64-apple-darwin
- name: Upload Darwin Artifacts
uses: actions/upload-artifact@v3
with:
name: native-darwin
path: |
node/dist/lancedb-vectordb-darwin*.tgz
node-linux:
name: node-linux (${{ matrix.config.arch}}-unknown-linux-gnu
runs-on: ${{ matrix.config.runner }}

View File

@@ -5,10 +5,10 @@ exclude = ["python"]
resolver = "2"
[workspace.dependencies]
lance = { "version" = "=0.8.20", "features" = ["dynamodb"] }
lance-index = { "version" = "=0.8.20" }
lance-linalg = { "version" = "=0.8.20" }
lance-testing = { "version" = "=0.8.20" }
lance = { "version" = "=0.8.17", "features" = ["dynamodb"] }
lance-index = { "version" = "=0.8.17" }
lance-linalg = { "version" = "=0.8.17" }
lance-testing = { "version" = "=0.8.17" }
# Note that this one does not include pyarrow
arrow = { version = "47.0.0", optional = false }
arrow-array = "47.0"

View File

@@ -5,11 +5,10 @@
**Developer-friendly, serverless vector database for AI applications**
<a href='https://github.com/lancedb/vectordb-recipes/tree/main' target="_blank"><img alt='LanceDB' src='https://img.shields.io/badge/VectorDB_Recipes-100000?style=for-the-badge&logo=LanceDB&logoColor=white&labelColor=645cfb&color=645cfb'/></a>
<a href='https://lancedb.github.io/lancedb/' target="_blank"><img alt='lancdb' src='https://img.shields.io/badge/DOCS-100000?style=for-the-badge&logo=lancdb&logoColor=white&labelColor=645cfb&color=645cfb'/></a>
[![Medium](https://img.shields.io/badge/Medium-12100E?style=for-the-badge&logo=medium&logoColor=white)](https://blog.lancedb.com/)
[![Discord](https://img.shields.io/badge/Discord-%235865F2.svg?style=for-the-badge&logo=discord&logoColor=white)](https://discord.gg/zMM32dvNtd)
[![Twitter](https://img.shields.io/badge/Twitter-%231DA1F2.svg?style=for-the-badge&logo=Twitter&logoColor=white)](https://twitter.com/lancedb)
<a href="https://lancedb.github.io/lancedb/">Documentation</a>
<a href="https://blog.lancedb.com/">Blog</a>
<a href="https://discord.gg/zMM32dvNtd">Discord</a>
<a href="https://twitter.com/lancedb">Twitter</a>
</p>

View File

@@ -80,6 +80,7 @@ nav:
- Ingest Embedding Functions: embeddings/embedding_functions.md
- Available Functions: embeddings/default_embedding_functions.md
- Create Custom Embedding Functions: embeddings/api.md
- Example - Calculate CLIP Embeddings with Roboflow Inference: examples/image_embeddings_roboflow.md
- Example - Multi-lingual semantic search: notebooks/multi_lingual_example.ipynb
- Example - MultiModal CLIP Embeddings: notebooks/DisappearingEmbeddingFunction.ipynb
- 🔍 Python full-text search: fts.md
@@ -98,7 +99,6 @@ nav:
- YouTube Transcript Search: notebooks/youtube_transcript_search.ipynb
- Documentation QA Bot using LangChain: notebooks/code_qa_bot.ipynb
- Multimodal search using CLIP: notebooks/multimodal_search.ipynb
- Example - Calculate CLIP Embeddings with Roboflow Inference: examples/image_embeddings_roboflow.md
- Serverless QA Bot with S3 and Lambda: examples/serverless_lancedb_with_s3_and_lambda.md
- Serverless QA Bot with Modal: examples/serverless_qa_bot_with_modal_and_langchain.md
- 🌐 Javascript examples:

18
node/package-lock.json generated
View File

@@ -316,18 +316,6 @@
"@jridgewell/sourcemap-codec": "^1.4.10"
}
},
"node_modules/@lancedb/vectordb-darwin-arm64": {
"version": "0.3.9",
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-arm64/-/vectordb-darwin-arm64-0.3.9.tgz",
"integrity": "sha512-irtAdfSRQDcfnMnB8T7D0atLFfu1MMZZ1JaxMKu24DDZ8e4IMYKUplxwvWni3241yA9yDE/pliRZCNQbQCEfrg==",
"cpu": [
"arm64"
],
"optional": true,
"os": [
"darwin"
]
},
"node_modules/@lancedb/vectordb-darwin-x64": {
"version": "0.3.9",
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-x64/-/vectordb-darwin-x64-0.3.9.tgz",
@@ -4868,12 +4856,6 @@
"@jridgewell/sourcemap-codec": "^1.4.10"
}
},
"@lancedb/vectordb-darwin-arm64": {
"version": "0.3.9",
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-arm64/-/vectordb-darwin-arm64-0.3.9.tgz",
"integrity": "sha512-irtAdfSRQDcfnMnB8T7D0atLFfu1MMZZ1JaxMKu24DDZ8e4IMYKUplxwvWni3241yA9yDE/pliRZCNQbQCEfrg==",
"optional": true
},
"@lancedb/vectordb-darwin-x64": {
"version": "0.3.9",
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-x64/-/vectordb-darwin-x64-0.3.9.tgz",

View File

@@ -21,23 +21,38 @@ import type { EmbeddingFunction } from './embedding/embedding_function'
import { RemoteConnection } from './remote'
import { Query } from './query'
import { isEmbeddingFunction } from './embedding/embedding_function'
import {
type Connection, type CreateTableOptions, type Table,
type VectorIndexParams, type UpdateArgs, type UpdateSqlArgs,
type VectorIndex, type IndexStats,
type ConnectionOptions, WriteMode, type WriteOptions
} from './types'
import { toSQL } from './util'
export { type WriteMode }
// eslint-disable-next-line @typescript-eslint/no-var-requires
const { databaseNew, databaseTableNames, databaseOpenTable, databaseDropTable, tableCreate, tableAdd, tableCreateVectorIndex, tableCountRows, tableDelete, tableUpdate, tableCleanupOldVersions, tableCompactFiles, tableListIndices, tableIndexStats } = require('../native.js')
const { databaseNew, databaseTableNames, databaseOpenTable, databaseDropTable, tableCreate, tableAdd, tableCreateVectorIndex, tableCountRows, tableDelete, tableCleanupOldVersions, tableCompactFiles, tableListIndices, tableIndexStats } = require('../native.js')
export { Query }
export type { EmbeddingFunction }
export { OpenAIEmbeddingFunction } from './embedding/openai'
export interface AwsCredentials {
accessKeyId: string
secretKey: string
sessionToken?: string
}
export interface ConnectionOptions {
uri: string
awsCredentials?: AwsCredentials
awsRegion?: string
// API key for the remote connections
apiKey?: string
// Region to connect
region?: string
// override the host for the remote connections
hostOverride?: string
}
function getAwsArgs (opts: ConnectionOptions): any[] {
const callArgs = []
const awsCredentials = opts.awsCredentials
@@ -55,6 +70,23 @@ function getAwsArgs (opts: ConnectionOptions): any[] {
return callArgs
}
export interface CreateTableOptions<T> {
// Name of Table
name: string
// Data to insert into the Table
data?: Array<Record<string, unknown>> | ArrowTable | undefined
// Optional Arrow Schema for this table
schema?: Schema | undefined
// Optional embedding function used to create embeddings
embeddingFunction?: EmbeddingFunction<T> | undefined
// WriteOptions for this operation
writeOptions?: WriteOptions | undefined
}
/**
* Connect to a LanceDB instance at the given URI
* @param uri The uri of the database.
@@ -83,6 +115,174 @@ export async function connect (arg: string | Partial<ConnectionOptions>): Promis
return new LocalConnection(db, opts)
}
/**
* A LanceDB Connection that allows you to open tables and create new ones.
*
* Connection could be local against filesystem or remote against a server.
*/
export interface Connection {
uri: string
tableNames(): Promise<string[]>
/**
* Open a table in the database.
*
* @param name The name of the table.
* @param embeddings An embedding function to use on this table
*/
openTable<T>(name: string, embeddings?: EmbeddingFunction<T>): Promise<Table<T>>
/**
* Creates a new Table, optionally initializing it with new data.
*
* @param {string} name - The name of the table.
* @param data - Array of Records to be inserted into the table
* @param schema - An Arrow Schema that describe this table columns
* @param {EmbeddingFunction} embeddings - An embedding function to use on this table
* @param {WriteOptions} writeOptions - The write options to use when creating the table.
*/
createTable<T> ({ name, data, schema, embeddingFunction, writeOptions }: CreateTableOptions<T>): Promise<Table<T>>
/**
* Creates a new Table and initialize it with new data.
*
* @param {string} name - The name of the table.
* @param data - Non-empty Array of Records to be inserted into the table
*/
createTable (name: string, data: Array<Record<string, unknown>>): Promise<Table>
/**
* Creates a new Table and initialize it with new data.
*
* @param {string} name - The name of the table.
* @param data - Non-empty Array of Records to be inserted into the table
* @param {WriteOptions} options - The write options to use when creating the table.
*/
createTable (name: string, data: Array<Record<string, unknown>>, options: WriteOptions): Promise<Table>
/**
* Creates a new Table and initialize it with new data.
*
* @param {string} name - The name of the table.
* @param data - Non-empty Array of Records to be inserted into the table
* @param {EmbeddingFunction} embeddings - An embedding function to use on this table
*/
createTable<T> (name: string, data: Array<Record<string, unknown>>, embeddings: EmbeddingFunction<T>): Promise<Table<T>>
/**
* Creates a new Table and initialize it with new data.
*
* @param {string} name - The name of the table.
* @param data - Non-empty Array of Records to be inserted into the table
* @param {EmbeddingFunction} embeddings - An embedding function to use on this table
* @param {WriteOptions} options - The write options to use when creating the table.
*/
createTable<T> (name: string, data: Array<Record<string, unknown>>, embeddings: EmbeddingFunction<T>, options: WriteOptions): Promise<Table<T>>
/**
* Drop an existing table.
* @param name The name of the table to drop.
*/
dropTable(name: string): Promise<void>
}
/**
* A LanceDB Table is the collection of Records. Each Record has one or more vector fields.
*/
export interface Table<T = number[]> {
name: string
/**
* Creates a search query to find the nearest neighbors of the given search term
* @param query The query search term
*/
search: (query: T) => Query<T>
/**
* Insert records into this Table.
*
* @param data Records to be inserted into the Table
* @return The number of rows added to the table
*/
add: (data: Array<Record<string, unknown>>) => Promise<number>
/**
* Insert records into this Table, replacing its contents.
*
* @param data Records to be inserted into the Table
* @return The number of rows added to the table
*/
overwrite: (data: Array<Record<string, unknown>>) => Promise<number>
/**
* Create an ANN index on this Table vector index.
*
* @param indexParams The parameters of this Index, @see VectorIndexParams.
*/
createIndex: (indexParams: VectorIndexParams) => Promise<any>
/**
* Returns the number of rows in this table.
*/
countRows: () => Promise<number>
/**
* Delete rows from this table.
*
* This can be used to delete a single row, many rows, all rows, or
* sometimes no rows (if your predicate matches nothing).
*
* @param filter A filter in the same format used by a sql WHERE clause. The
* filter must not be empty.
*
* @examples
*
* ```ts
* const con = await lancedb.connect("./.lancedb")
* const data = [
* {id: 1, vector: [1, 2]},
* {id: 2, vector: [3, 4]},
* {id: 3, vector: [5, 6]},
* ];
* const tbl = await con.createTable("my_table", data)
* await tbl.delete("id = 2")
* await tbl.countRows() // Returns 2
* ```
*
* If you have a list of values to delete, you can combine them into a
* stringified list and use the `IN` operator:
*
* ```ts
* const to_remove = [1, 5];
* await tbl.delete(`id IN (${to_remove.join(",")})`)
* await tbl.countRows() // Returns 1
* ```
*/
delete: (filter: string) => Promise<void>
/**
* List the indicies on this table.
*/
listIndices: () => Promise<VectorIndex[]>
/**
* Get statistics about an index.
*/
indexStats: (indexUuid: string) => Promise<IndexStats>
}
export interface VectorIndex {
columns: string[]
name: string
uuid: string
}
export interface IndexStats {
numIndexedRows: number | null
numUnindexedRows: number | null
}
/**
* A connection to a LanceDB database.
*/
@@ -226,16 +426,6 @@ export class LocalTable<T = number[]> implements Table<T> {
return new Query(query, this._tbl, this._embeddings)
}
/**
* Creates a filter query to find all rows matching the specified criteria
* @param value The filter criteria (like SQL where clause syntax)
*/
filter (value: string): Query<T> {
return new Query(undefined, this._tbl, this._embeddings).filter(value)
}
where = this.filter
/**
* Insert records into this Table.
*
@@ -291,31 +481,6 @@ export class LocalTable<T = number[]> implements Table<T> {
return tableDelete.call(this._tbl, filter).then((newTable: any) => { this._tbl = newTable })
}
/**
* Update rows in this table.
*
* @param args see {@link UpdateArgs} and {@link UpdateSqlArgs} for more details
*
* @returns
*/
async update (args: UpdateArgs | UpdateSqlArgs): Promise<void> {
let filter: string | null
let updates: Record<string, string>
if ('valuesSql' in args) {
filter = args.where ?? null
updates = args.valuesSql
} else {
filter = args.where ?? null
updates = {}
for (const [key, value] of Object.entries(args.values)) {
updates[key] = toSQL(value)
}
}
return tableUpdate.call(this._tbl, filter, updates).then((newTable: any) => { this._tbl = newTable })
}
/**
* Clean up old versions of the table, freeing disk space.
*
@@ -430,6 +595,83 @@ export interface CompactionMetrics {
filesAdded: number
}
/// Config to build IVF_PQ index.
///
export interface IvfPQIndexConfig {
/**
* The column to be indexed
*/
column?: string
/**
* A unique name for the index
*/
index_name?: string
/**
* Metric type, L2 or Cosine
*/
metric_type?: MetricType
/**
* The number of partitions this index
*/
num_partitions?: number
/**
* The max number of iterations for kmeans training.
*/
max_iters?: number
/**
* Train as optimized product quantization.
*/
use_opq?: boolean
/**
* Number of subvectors to build PQ code
*/
num_sub_vectors?: number
/**
* The number of bits to present one PQ centroid.
*/
num_bits?: number
/**
* Max number of iterations to train OPQ, if `use_opq` is true.
*/
max_opq_iters?: number
/**
* Replace an existing index with the same name if it exists.
*/
replace?: boolean
type: 'ivf_pq'
}
export type VectorIndexParams = IvfPQIndexConfig
/**
* Write mode for writing a table.
*/
export enum WriteMode {
/** Create a new {@link Table}. */
Create = 'create',
/** Overwrite the existing {@link Table} if presented. */
Overwrite = 'overwrite',
/** Append new data to the table. */
Append = 'append'
}
/**
* Write options when creating a Table.
*/
export interface WriteOptions {
/** A {@link WriteMode} to use on this operation */
writeMode?: WriteMode
}
export class DefaultWriteOptions implements WriteOptions {
writeMode = WriteMode.Create
}
@@ -438,3 +680,23 @@ export function isWriteOptions (value: any): value is WriteOptions {
return Object.keys(value).length === 1 &&
(value.writeMode === undefined || typeof value.writeMode === 'string')
}
/**
* Distance metrics type.
*/
export enum MetricType {
/**
* Euclidean distance
*/
L2 = 'l2',
/**
* Cosine distance
*/
Cosine = 'cosine',
/**
* Dot product
*/
Dot = 'dot'
}

View File

@@ -0,0 +1,180 @@
// Copyright 2023 LanceDB Developers.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import { describe } from 'mocha'
import * as chai from 'chai'
import * as chaiAsPromised from 'chai-as-promised'
import { v4 as uuidv4 } from 'uuid'
import * as lancedb from '../index'
import { tmpdir } from 'os'
import * as fs from 'fs'
import * as path from 'path'
const assert = chai.assert
chai.use(chaiAsPromised)
describe('LanceDB AWS Integration test', function () {
it('s3+ddb schema is processed correctly', async function () {
this.timeout(15000)
// WARNING: specifying engine is NOT a publicly supported feature in lancedb yet
// THE API WILL CHANGE
const conn = await lancedb.connect('s3://lancedb-integtest?engine=ddb&ddbTableName=lancedb-integtest')
const data = [{ vector: Array(128).fill(1.0) }]
const tableName = uuidv4()
let table = await conn.createTable(tableName, data, { writeMode: lancedb.WriteMode.Overwrite })
const futs = [table.add(data), table.add(data), table.add(data), table.add(data), table.add(data)]
await Promise.allSettled(futs)
table = await conn.openTable(tableName)
assert.equal(await table.countRows(), 6)
})
})
describe('LanceDB Mirrored Store Integration test', function () {
it('s3://...?mirroredStore=... param is processed correctly', async function () {
this.timeout(600000)
const dir = tmpdir()
console.log(dir)
const conn = await lancedb.connect(`s3://lancedb-integtest?mirroredStore=${dir}`)
const data = Array(200).fill({ vector: Array(128).fill(1.0), id: 0 })
data.push(...Array(200).fill({ vector: Array(128).fill(1.0), id: 1 }))
data.push(...Array(200).fill({ vector: Array(128).fill(1.0), id: 2 }))
data.push(...Array(200).fill({ vector: Array(128).fill(1.0), id: 3 }))
const tableName = uuidv4()
// try create table and check if it's mirrored
const t = await conn.createTable(tableName, data, { writeMode: lancedb.WriteMode.Overwrite })
const mirroredPath = path.join(dir, `${tableName}.lance`)
fs.readdir(mirroredPath, { withFileTypes: true }, (err, files) => {
if (err != null) throw err
// there should be three dirs
assert.equal(files.length, 3)
assert.isTrue(files[0].isDirectory())
assert.isTrue(files[1].isDirectory())
fs.readdir(path.join(mirroredPath, '_transactions'), { withFileTypes: true }, (err, files) => {
if (err != null) throw err
assert.equal(files.length, 1)
assert.isTrue(files[0].name.endsWith('.txn'))
})
fs.readdir(path.join(mirroredPath, '_versions'), { withFileTypes: true }, (err, files) => {
if (err != null) throw err
assert.equal(files.length, 1)
assert.isTrue(files[0].name.endsWith('.manifest'))
})
fs.readdir(path.join(mirroredPath, 'data'), { withFileTypes: true }, (err, files) => {
if (err != null) throw err
assert.equal(files.length, 1)
assert.isTrue(files[0].name.endsWith('.lance'))
})
})
// try create index and check if it's mirrored
await t.createIndex({ column: 'vector', type: 'ivf_pq' })
fs.readdir(mirroredPath, { withFileTypes: true }, (err, files) => {
if (err != null) throw err
// there should be four dirs
assert.equal(files.length, 4)
assert.isTrue(files[0].isDirectory())
assert.isTrue(files[1].isDirectory())
assert.isTrue(files[2].isDirectory())
// Two TXs now
fs.readdir(path.join(mirroredPath, '_transactions'), { withFileTypes: true }, (err, files) => {
if (err != null) throw err
assert.equal(files.length, 2)
assert.isTrue(files[0].name.endsWith('.txn'))
assert.isTrue(files[1].name.endsWith('.txn'))
})
fs.readdir(path.join(mirroredPath, 'data'), { withFileTypes: true }, (err, files) => {
if (err != null) throw err
assert.equal(files.length, 1)
assert.isTrue(files[0].name.endsWith('.lance'))
})
fs.readdir(path.join(mirroredPath, '_indices'), { withFileTypes: true }, (err, files) => {
if (err != null) throw err
assert.equal(files.length, 1)
assert.isTrue(files[0].isDirectory())
fs.readdir(path.join(mirroredPath, '_indices', files[0].name), { withFileTypes: true }, (err, files) => {
if (err != null) throw err
assert.equal(files.length, 1)
assert.isTrue(files[0].isFile())
assert.isTrue(files[0].name.endsWith('.idx'))
})
})
})
// try delete and check if it's mirrored
await t.delete('id = 0')
fs.readdir(mirroredPath, { withFileTypes: true }, (err, files) => {
if (err != null) throw err
// there should be five dirs
assert.equal(files.length, 5)
assert.isTrue(files[0].isDirectory())
assert.isTrue(files[1].isDirectory())
assert.isTrue(files[2].isDirectory())
assert.isTrue(files[3].isDirectory())
assert.isTrue(files[4].isDirectory())
// Three TXs now
fs.readdir(path.join(mirroredPath, '_transactions'), { withFileTypes: true }, (err, files) => {
if (err != null) throw err
assert.equal(files.length, 3)
assert.isTrue(files[0].name.endsWith('.txn'))
assert.isTrue(files[1].name.endsWith('.txn'))
})
fs.readdir(path.join(mirroredPath, 'data'), { withFileTypes: true }, (err, files) => {
if (err != null) throw err
assert.equal(files.length, 1)
assert.isTrue(files[0].name.endsWith('.lance'))
})
fs.readdir(path.join(mirroredPath, '_indices'), { withFileTypes: true }, (err, files) => {
if (err != null) throw err
assert.equal(files.length, 1)
assert.isTrue(files[0].isDirectory())
fs.readdir(path.join(mirroredPath, '_indices', files[0].name), { withFileTypes: true }, (err, files) => {
if (err != null) throw err
assert.equal(files.length, 1)
assert.isTrue(files[0].isFile())
assert.isTrue(files[0].name.endsWith('.idx'))
})
})
fs.readdir(path.join(mirroredPath, '_deletions'), { withFileTypes: true }, (err, files) => {
if (err != null) throw err
assert.equal(files.length, 1)
assert.isTrue(files[0].name.endsWith('.arrow'))
})
})
})
})

View File

@@ -14,25 +14,19 @@
import { Vector, tableFromIPC } from 'apache-arrow'
import { type EmbeddingFunction } from './embedding/embedding_function'
import { type MetricType } from './types'
import { type MetricType } from '.'
// eslint-disable-next-line @typescript-eslint/no-var-requires
// const { tableSearch } = require('../native.js')
const tableSearch = async function (args: any, arg2: any): Promise<any> {
return await new Promise((resolve, reject) => {
resolve('')
})
}
const { tableSearch } = require('../native.js')
/**
* A builder for nearest neighbor queries for LanceDB.
*/
export class Query<T = number[]> {
private readonly _query?: T
private readonly _query: T
private readonly _tbl?: any
private _queryVector?: number[]
private _limit?: number
private _limit: number
private _refineFactor?: number
private _nprobes: number
private _select?: string[]
@@ -41,10 +35,10 @@ export class Query<T = number[]> {
private _prefilter: boolean
protected readonly _embeddings?: EmbeddingFunction<T>
constructor (query?: T, tbl?: any, embeddings?: EmbeddingFunction<T>) {
constructor (query: T, tbl?: any, embeddings?: EmbeddingFunction<T>) {
this._tbl = tbl
this._query = query
this._limit = undefined
this._limit = 10
this._nprobes = 20
this._refineFactor = undefined
this._select = undefined
@@ -119,12 +113,10 @@ export class Query<T = number[]> {
* Execute the query and return the results as an Array of Objects
*/
async execute<T = Record<string, unknown>> (): Promise<T[]> {
if (this._query !== undefined) {
if (this._embeddings !== undefined) {
this._queryVector = (await this._embeddings.embed([this._query]))[0]
} else {
this._queryVector = this._query as number[]
}
if (this._embeddings !== undefined) {
this._queryVector = (await this._embeddings.embed([this._query]))[0]
} else {
this._queryVector = this._query as number[]
}
const isElectron = this.isElectron()

View File

@@ -13,15 +13,11 @@
// limitations under the License.
import {
type Table, type VectorIndexParams,
type VectorIndex,
type IndexStats,
type UpdateArgs, type UpdateSqlArgs,
type Connection,
type ConnectionOptions, type CreateTableOptions,
type WriteOptions
} from '../types'
import { type EmbeddingFunction } from '../embedding/embedding_function'
type EmbeddingFunction, type Table, type VectorIndexParams, type Connection,
type ConnectionOptions, type CreateTableOptions, type VectorIndex,
type WriteOptions,
type IndexStats
} from '../index'
import { Query } from '../query'
import { Vector, Table as ArrowTable } from 'apache-arrow'
@@ -250,10 +246,6 @@ export class RemoteTable<T = number[]> implements Table<T> {
await this._client.post(`/v1/table/${this._name}/delete/`, { predicate: filter })
}
async update (args: UpdateArgs | UpdateSqlArgs): Promise<void> {
throw new Error('Not implemented')
}
async listIndices (): Promise<VectorIndex[]> {
const results = await this._client.post(`/v1/table/${this._name}/index/list/`)
return results.data.indexes?.map((index: any) => ({

View File

@@ -0,0 +1,57 @@
// Copyright 2023 Lance Developers.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import { describe } from 'mocha'
import { assert } from 'chai'
import { OpenAIEmbeddingFunction } from '../../embedding/openai'
import { isEmbeddingFunction } from '../../embedding/embedding_function'
// eslint-disable-next-line @typescript-eslint/no-var-requires
const { OpenAIApi } = require('openai')
// eslint-disable-next-line @typescript-eslint/no-var-requires
const { stub } = require('sinon')
describe('OpenAPIEmbeddings', function () {
const stubValue = {
data: {
data: [
{
embedding: Array(1536).fill(1.0)
},
{
embedding: Array(1536).fill(2.0)
}
]
}
}
describe('#embed', function () {
it('should create vector embeddings', async function () {
const openAIStub = stub(OpenAIApi.prototype, 'createEmbedding').returns(stubValue)
const f = new OpenAIEmbeddingFunction('text', 'sk-key')
const vectors = await f.embed(['abc', 'def'])
assert.isTrue(openAIStub.calledOnce)
assert.equal(vectors.length, 2)
assert.deepEqual(vectors[0], stubValue.data.data[0].embedding)
assert.deepEqual(vectors[1], stubValue.data.data[1].embedding)
})
})
describe('isEmbeddingFunction', function () {
it('should match the isEmbeddingFunction guard', function () {
assert.isTrue(isEmbeddingFunction(new OpenAIEmbeddingFunction('text', 'sk-key')))
})
})
})

76
node/src/test/io.ts Normal file
View File

@@ -0,0 +1,76 @@
// Copyright 2023 Lance Developers.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// IO tests
import { describe } from 'mocha'
import { assert } from 'chai'
import * as lancedb from '../index'
import { type ConnectionOptions } from '../index'
describe('LanceDB S3 client', function () {
if (process.env.TEST_S3_BASE_URL != null) {
const baseUri = process.env.TEST_S3_BASE_URL
it('should have a valid url', async function () {
const opts = { uri: `${baseUri}/valid_url` }
const table = await createTestDB(opts, 2, 20)
const con = await lancedb.connect(opts)
assert.equal(con.uri, opts.uri)
const results = await table.search([0.1, 0.3]).limit(5).execute()
assert.equal(results.length, 5)
}).timeout(10_000)
} else {
describe.skip('Skip S3 test', function () {})
}
if (process.env.TEST_S3_BASE_URL != null && process.env.TEST_AWS_ACCESS_KEY_ID != null && process.env.TEST_AWS_SECRET_ACCESS_KEY != null) {
const baseUri = process.env.TEST_S3_BASE_URL
it('use custom credentials', async function () {
const opts: ConnectionOptions = {
uri: `${baseUri}/custom_credentials`,
awsCredentials: {
accessKeyId: process.env.TEST_AWS_ACCESS_KEY_ID as string,
secretKey: process.env.TEST_AWS_SECRET_ACCESS_KEY as string
}
}
const table = await createTestDB(opts, 2, 20)
console.log(table)
const con = await lancedb.connect(opts)
console.log(con)
assert.equal(con.uri, opts.uri)
const results = await table.search([0.1, 0.3]).limit(5).execute()
assert.equal(results.length, 5)
}).timeout(10_000)
} else {
describe.skip('Skip S3 test', function () {})
}
})
async function createTestDB (opts: ConnectionOptions, numDimensions: number = 2, numRows: number = 2): Promise<lancedb.Table> {
const con = await lancedb.connect(opts)
const data = []
for (let i = 0; i < numRows; i++) {
const vector = []
for (let j = 0; j < numDimensions; j++) {
vector.push(i + (j * 0.1))
}
data.push({ id: i + 1, name: `name_${i}`, price: i + 10, is_active: (i % 2 === 0), vector })
}
return await con.createTable('vectors_2', data)
}

557
node/src/test/test.ts Normal file
View File

@@ -0,0 +1,557 @@
// Copyright 2023 LanceDB Developers.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import { describe } from 'mocha'
import { track } from 'temp'
import * as chai from 'chai'
import * as chaiAsPromised from 'chai-as-promised'
import * as lancedb from '../index'
import { type AwsCredentials, type EmbeddingFunction, MetricType, Query, WriteMode, DefaultWriteOptions, isWriteOptions, type LocalTable } from '../index'
import { FixedSizeList, Field, Int32, makeVector, Schema, Utf8, Table as ArrowTable, vectorFromArray, Float32 } from 'apache-arrow'
const expect = chai.expect
const assert = chai.assert
chai.use(chaiAsPromised)
describe('LanceDB client', function () {
describe('when creating a connection to lancedb', function () {
it('should have a valid url', async function () {
const uri = await createTestDB()
const con = await lancedb.connect(uri)
assert.equal(con.uri, uri)
})
it('should accept an options object', async function () {
const uri = await createTestDB()
const con = await lancedb.connect({ uri })
assert.equal(con.uri, uri)
})
it('should accept custom aws credentials', async function () {
const uri = await createTestDB()
const awsCredentials: AwsCredentials = {
accessKeyId: '',
secretKey: ''
}
const con = await lancedb.connect({ uri, awsCredentials })
assert.equal(con.uri, uri)
})
it('should return the existing table names', async function () {
const uri = await createTestDB()
const con = await lancedb.connect(uri)
assert.deepEqual(await con.tableNames(), ['vectors'])
})
})
describe('when querying an existing dataset', function () {
it('should open a table', async function () {
const uri = await createTestDB()
const con = await lancedb.connect(uri)
const table = await con.openTable('vectors')
assert.equal(table.name, 'vectors')
})
it('execute a query', async function () {
const uri = await createTestDB()
const con = await lancedb.connect(uri)
const table = await con.openTable('vectors')
const results = await table.search([0.1, 0.3]).execute()
assert.equal(results.length, 2)
assert.equal(results[0].price, 10)
const vector = results[0].vector as Float32Array
assert.approximately(vector[0], 0.0, 0.2)
assert.approximately(vector[0], 0.1, 0.3)
})
it('limits # of results', async function () {
const uri = await createTestDB()
const con = await lancedb.connect(uri)
const table = await con.openTable('vectors')
const results = await table.search([0.1, 0.3]).limit(1).execute()
assert.equal(results.length, 1)
assert.equal(results[0].id, 1)
})
it('uses a filter / where clause', async function () {
// eslint-disable-next-line @typescript-eslint/explicit-function-return-type
const assertResults = (results: Array<Record<string, unknown>>) => {
assert.equal(results.length, 1)
assert.equal(results[0].id, 2)
}
const uri = await createTestDB()
const con = await lancedb.connect(uri)
const table = await con.openTable('vectors')
let results = await table.search([0.1, 0.1]).filter('id == 2').execute()
assertResults(results)
results = await table.search([0.1, 0.1]).where('id == 2').execute()
assertResults(results)
})
it('should correctly process prefilter/postfilter', async function () {
const uri = await createTestDB(16, 300)
const con = await lancedb.connect(uri)
const table = await con.openTable('vectors')
await table.createIndex({ type: 'ivf_pq', column: 'vector', num_partitions: 2, max_iters: 2, num_sub_vectors: 2 })
// post filter should return less than the limit
let results = await table.search(new Array(16).fill(0.1)).limit(10).filter('id >= 10').prefilter(false).execute()
assert.isTrue(results.length < 10)
// pre filter should return exactly the limit
results = await table.search(new Array(16).fill(0.1)).limit(10).filter('id >= 10').prefilter(true).execute()
assert.isTrue(results.length === 10)
})
it('select only a subset of columns', async function () {
const uri = await createTestDB()
const con = await lancedb.connect(uri)
const table = await con.openTable('vectors')
const results = await table.search([0.1, 0.1]).select(['is_active']).execute()
assert.equal(results.length, 2)
// vector and _distance are always returned
assert.isDefined(results[0].vector)
assert.isDefined(results[0]._distance)
assert.isDefined(results[0].is_active)
assert.isUndefined(results[0].id)
assert.isUndefined(results[0].name)
assert.isUndefined(results[0].price)
})
})
describe('when creating a new dataset', function () {
it('create an empty table', async function () {
const dir = await track().mkdir('lancejs')
const con = await lancedb.connect(dir)
const schema = new Schema(
[new Field('id', new Int32()), new Field('name', new Utf8())]
)
const table = await con.createTable({ name: 'vectors', schema })
assert.equal(table.name, 'vectors')
assert.deepEqual(await con.tableNames(), ['vectors'])
})
it('create a table with a empty data array', async function () {
const dir = await track().mkdir('lancejs')
const con = await lancedb.connect(dir)
const schema = new Schema(
[new Field('id', new Int32()), new Field('name', new Utf8())]
)
const table = await con.createTable({ name: 'vectors', schema, data: [] })
assert.equal(table.name, 'vectors')
assert.deepEqual(await con.tableNames(), ['vectors'])
})
it('create a table from an Arrow Table', async function () {
const dir = await track().mkdir('lancejs')
const con = await lancedb.connect(dir)
const i32s = new Int32Array(new Array<number>(10))
const i32 = makeVector(i32s)
const data = new ArrowTable({ vector: i32 })
const table = await con.createTable({ name: 'vectors', data })
assert.equal(table.name, 'vectors')
assert.equal(await table.countRows(), 10)
assert.deepEqual(await con.tableNames(), ['vectors'])
})
it('creates a new table from javascript objects', async function () {
const dir = await track().mkdir('lancejs')
const con = await lancedb.connect(dir)
const data = [
{ id: 1, vector: [0.1, 0.2], price: 10 },
{ id: 2, vector: [1.1, 1.2], price: 50 }
]
const tableName = `vectors_${Math.floor(Math.random() * 100)}`
const table = await con.createTable(tableName, data)
assert.equal(table.name, tableName)
assert.equal(await table.countRows(), 2)
})
it('fails to create a new table when the vector column is missing', async function () {
const dir = await track().mkdir('lancejs')
const con = await lancedb.connect(dir)
const data = [
{ id: 1, price: 10 }
]
const create = con.createTable('missing_vector', data)
await expect(create).to.be.rejectedWith(Error, 'column \'vector\' is missing')
})
it('use overwrite flag to overwrite existing table', async function () {
const dir = await track().mkdir('lancejs')
const con = await lancedb.connect(dir)
const data = [
{ id: 1, vector: [0.1, 0.2], price: 10 },
{ id: 2, vector: [1.1, 1.2], price: 50 }
]
const tableName = 'overwrite'
await con.createTable(tableName, data, { writeMode: WriteMode.Create })
const newData = [
{ id: 1, vector: [0.1, 0.2], price: 10 },
{ id: 2, vector: [1.1, 1.2], price: 50 },
{ id: 3, vector: [1.1, 1.2], price: 50 }
]
await expect(con.createTable(tableName, newData)).to.be.rejectedWith(Error, 'already exists')
const table = await con.createTable(tableName, newData, { writeMode: WriteMode.Overwrite })
assert.equal(table.name, tableName)
assert.equal(await table.countRows(), 3)
})
it('appends records to an existing table ', async function () {
const dir = await track().mkdir('lancejs')
const con = await lancedb.connect(dir)
const data = [
{ id: 1, vector: [0.1, 0.2], price: 10, name: 'a' },
{ id: 2, vector: [1.1, 1.2], price: 50, name: 'b' }
]
const table = await con.createTable('vectors', data)
assert.equal(await table.countRows(), 2)
const dataAdd = [
{ id: 3, vector: [2.1, 2.2], price: 10, name: 'c' },
{ id: 4, vector: [3.1, 3.2], price: 50, name: 'd' }
]
await table.add(dataAdd)
assert.equal(await table.countRows(), 4)
})
it('overwrite all records in a table', async function () {
const uri = await createTestDB()
const con = await lancedb.connect(uri)
const table = await con.openTable('vectors')
assert.equal(await table.countRows(), 2)
const dataOver = [
{ vector: [2.1, 2.2], price: 10, name: 'foo' },
{ vector: [3.1, 3.2], price: 50, name: 'bar' }
]
await table.overwrite(dataOver)
assert.equal(await table.countRows(), 2)
})
it('can delete records from a table', async function () {
const uri = await createTestDB()
const con = await lancedb.connect(uri)
const table = await con.openTable('vectors')
assert.equal(await table.countRows(), 2)
await table.delete('price = 10')
assert.equal(await table.countRows(), 1)
})
})
describe('when searching an empty dataset', function () {
it('should not fail', async function () {
const dir = await track().mkdir('lancejs')
const con = await lancedb.connect(dir)
const schema = new Schema(
[new Field('vector', new FixedSizeList(128, new Field('float32', new Float32())))]
)
const table = await con.createTable({ name: 'vectors', schema })
const result = await table.search(Array(128).fill(0.1)).execute()
assert.isEmpty(result)
})
})
describe('when searching an empty-after-delete dataset', function () {
it('should not fail', async function () {
const dir = await track().mkdir('lancejs')
const con = await lancedb.connect(dir)
const schema = new Schema(
[new Field('vector', new FixedSizeList(128, new Field('float32', new Float32())))]
)
const table = await con.createTable({ name: 'vectors', schema })
await table.add([{ vector: Array(128).fill(0.1) }])
// https://github.com/lancedb/lance/issues/1635
await table.delete('true')
const result = await table.search(Array(128).fill(0.1)).execute()
assert.isEmpty(result)
})
})
describe('when creating a vector index', function () {
it('overwrite all records in a table', async function () {
const uri = await createTestDB(32, 300)
const con = await lancedb.connect(uri)
const table = await con.openTable('vectors')
await table.createIndex({ type: 'ivf_pq', column: 'vector', num_partitions: 2, max_iters: 2, num_sub_vectors: 2 })
}).timeout(10_000) // Timeout is high partially because GH macos runner is pretty slow
it('replace an existing index', async function () {
const uri = await createTestDB(16, 300)
const con = await lancedb.connect(uri)
const table = await con.openTable('vectors')
await table.createIndex({ type: 'ivf_pq', column: 'vector', num_partitions: 2, max_iters: 2, num_sub_vectors: 2 })
// Replace should fail if the index already exists
await expect(table.createIndex({
type: 'ivf_pq', column: 'vector', num_partitions: 2, max_iters: 2, num_sub_vectors: 2, replace: false
})
).to.be.rejectedWith('LanceError(Index)')
// Default replace = true
await table.createIndex({ type: 'ivf_pq', column: 'vector', num_partitions: 2, max_iters: 2, num_sub_vectors: 2 })
}).timeout(50_000)
it('it should fail when the column is not a vector', async function () {
const uri = await createTestDB(32, 300)
const con = await lancedb.connect(uri)
const table = await con.openTable('vectors')
const createIndex = table.createIndex({ type: 'ivf_pq', column: 'name', num_partitions: 2, max_iters: 2, num_sub_vectors: 2 })
await expect(createIndex).to.be.rejectedWith(/VectorIndex requires the column data type to be fixed size list of float32s/)
})
it('it should fail when the column is not a vector', async function () {
const uri = await createTestDB(32, 300)
const con = await lancedb.connect(uri)
const table = await con.openTable('vectors')
const createIndex = table.createIndex({ type: 'ivf_pq', column: 'name', num_partitions: -1, max_iters: 2, num_sub_vectors: 2 })
await expect(createIndex).to.be.rejectedWith('num_partitions: must be > 0')
})
it('should be able to list index and stats', async function () {
const uri = await createTestDB(32, 300)
const con = await lancedb.connect(uri)
const table = await con.openTable('vectors')
await table.createIndex({ type: 'ivf_pq', column: 'vector', num_partitions: 2, max_iters: 2, num_sub_vectors: 2 })
const indices = await table.listIndices()
expect(indices).to.have.lengthOf(1)
expect(indices[0].name).to.equal('vector_idx')
expect(indices[0].uuid).to.not.be.equal(undefined)
expect(indices[0].columns).to.have.lengthOf(1)
expect(indices[0].columns[0]).to.equal('vector')
const stats = await table.indexStats(indices[0].uuid)
expect(stats.numIndexedRows).to.equal(300)
expect(stats.numUnindexedRows).to.equal(0)
}).timeout(50_000)
})
describe('when using a custom embedding function', function () {
class TextEmbedding implements EmbeddingFunction<string> {
sourceColumn: string
constructor (targetColumn: string) {
this.sourceColumn = targetColumn
}
_embedding_map = new Map<string, number[]>([
['foo', [2.1, 2.2]],
['bar', [3.1, 3.2]]
])
async embed (data: string[]): Promise<number[][]> {
return data.map(datum => this._embedding_map.get(datum) ?? [0.0, 0.0])
}
}
it('should encode the original data into embeddings', async function () {
const dir = await track().mkdir('lancejs')
const con = await lancedb.connect(dir)
const embeddings = new TextEmbedding('name')
const data = [
{ price: 10, name: 'foo' },
{ price: 50, name: 'bar' }
]
const table = await con.createTable('vectors', data, embeddings, { writeMode: WriteMode.Create })
const results = await table.search('foo').execute()
assert.equal(results.length, 2)
})
it('should create embeddings for Arrow Table', async function () {
const dir = await track().mkdir('lancejs')
const con = await lancedb.connect(dir)
const embeddingFunction = new TextEmbedding('name')
const names = vectorFromArray(['foo', 'bar'], new Utf8())
const data = new ArrowTable({ name: names })
const table = await con.createTable({ name: 'vectors', data, embeddingFunction })
assert.equal(table.name, 'vectors')
const results = await table.search('foo').execute()
assert.equal(results.length, 2)
})
})
})
describe('Remote LanceDB client', function () {
describe('when the server is not reachable', function () {
it('produces a network error', async function () {
const con = await lancedb.connect({
uri: 'db://test-1234',
region: 'asdfasfasfdf',
apiKey: 'some-api-key'
})
// GET
try {
await con.tableNames()
} catch (err) {
expect(err).to.have.property('message', 'Network Error: getaddrinfo ENOTFOUND test-1234.asdfasfasfdf.api.lancedb.com')
}
// POST
try {
await con.createTable({ name: 'vectors', schema: new Schema([]) })
} catch (err) {
expect(err).to.have.property('message', 'Network Error: getaddrinfo ENOTFOUND test-1234.asdfasfasfdf.api.lancedb.com')
}
// Search
const table = await con.openTable('vectors')
try {
await table.search([0.1, 0.3]).execute()
} catch (err) {
expect(err).to.have.property('message', 'Network Error: getaddrinfo ENOTFOUND test-1234.asdfasfasfdf.api.lancedb.com')
}
})
})
})
describe('Query object', function () {
it('sets custom parameters', async function () {
const query = new Query([0.1, 0.3])
.limit(1)
.metricType(MetricType.Cosine)
.refineFactor(100)
.select(['a', 'b'])
.nprobes(20) as Record<string, any>
assert.equal(query._limit, 1)
assert.equal(query._metricType, MetricType.Cosine)
assert.equal(query._refineFactor, 100)
assert.equal(query._nprobes, 20)
assert.deepEqual(query._select, ['a', 'b'])
})
})
async function createTestDB (numDimensions: number = 2, numRows: number = 2): Promise<string> {
const dir = await track().mkdir('lancejs')
const con = await lancedb.connect(dir)
const data = []
for (let i = 0; i < numRows; i++) {
const vector = []
for (let j = 0; j < numDimensions; j++) {
vector.push(i + (j * 0.1))
}
data.push({ id: i + 1, name: `name_${i}`, price: i + 10, is_active: (i % 2 === 0), vector })
}
await con.createTable('vectors', data)
return dir
}
describe('Drop table', function () {
it('drop a table', async function () {
const dir = await track().mkdir('lancejs')
const con = await lancedb.connect(dir)
const data = [
{ price: 10, name: 'foo', vector: [1, 2, 3] },
{ price: 50, name: 'bar', vector: [4, 5, 6] }
]
await con.createTable('t1', data)
await con.createTable('t2', data)
assert.deepEqual(await con.tableNames(), ['t1', 't2'])
await con.dropTable('t1')
assert.deepEqual(await con.tableNames(), ['t2'])
})
})
describe('WriteOptions', function () {
context('#isWriteOptions', function () {
it('should not match empty object', function () {
assert.equal(isWriteOptions({}), false)
})
it('should match write options', function () {
assert.equal(isWriteOptions({ writeMode: WriteMode.Create }), true)
})
it('should match undefined write mode', function () {
assert.equal(isWriteOptions({ writeMode: undefined }), true)
})
it('should match default write options', function () {
assert.equal(isWriteOptions(new DefaultWriteOptions()), true)
})
})
})
describe('Compact and cleanup', function () {
it('can cleanup after compaction', async function () {
const dir = await track().mkdir('lancejs')
const con = await lancedb.connect(dir)
const data = [
{ price: 10, name: 'foo', vector: [1, 2, 3] },
{ price: 50, name: 'bar', vector: [4, 5, 6] }
]
const table = await con.createTable('t1', data) as LocalTable
const newData = [
{ price: 30, name: 'baz', vector: [7, 8, 9] }
]
await table.add(newData)
const compactionMetrics = await table.compactFiles({
numThreads: 2
})
assert.equal(compactionMetrics.fragmentsRemoved, 2)
assert.equal(compactionMetrics.fragmentsAdded, 1)
assert.equal(await table.countRows(), 3)
await table.cleanupOldVersions()
assert.equal(await table.countRows(), 3)
// should have no effect, but this validates the arguments are parsed.
await table.compactFiles({
targetRowsPerFragment: 1024 * 10,
maxRowsPerGroup: 1024,
materializeDeletions: true,
materializeDeletionsThreshold: 0.5,
numThreads: 2
})
const cleanupMetrics = await table.cleanupOldVersions(0, true)
assert.isAtLeast(cleanupMetrics.bytesRemoved, 1)
assert.isAtLeast(cleanupMetrics.oldVersions, 1)
assert.equal(await table.countRows(), 3)
})
})

View File

@@ -1,375 +0,0 @@
import {
type Schema,
type Table as ArrowTable
} from 'apache-arrow'
import { type Literal } from './util'
import type { EmbeddingFunction } from './embedding/embedding_function'
import { type Query } from './query'
export interface AwsCredentials {
accessKeyId: string
secretKey: string
sessionToken?: string
}
/**
* Write options when creating a Table.
*/
export interface WriteOptions {
/** A {@link WriteMode} to use on this operation */
writeMode?: WriteMode
}
/**
* Write mode for writing a table.
*/
export enum WriteMode {
/** Create a new {@link Table}. */
Create = 'create',
/** Overwrite the existing {@link Table} if presented. */
Overwrite = 'overwrite',
/** Append new data to the table. */
Append = 'append'
}
/**
* A LanceDB Connection that allows you to open tables and create new ones.
*
* Connection could be local against filesystem or remote against a server.
*/
export interface Connection {
uri: string
tableNames(): Promise<string[]>
/**
* Open a table in the database.
*
* @param name The name of the table.
* @param embeddings An embedding function to use on this table
*/
openTable<T>(name: string, embeddings?: EmbeddingFunction<T>): Promise<Table<T>>
/**
* Creates a new Table, optionally initializing it with new data.
*
* @param {string} name - The name of the table.
* @param data - Array of Records to be inserted into the table
* @param schema - An Arrow Schema that describe this table columns
* @param {EmbeddingFunction} embeddings - An embedding function to use on this table
* @param {WriteOptions} writeOptions - The write options to use when creating the table.
*/
createTable<T> ({ name, data, schema, embeddingFunction, writeOptions }: CreateTableOptions<T>): Promise<Table<T>>
/**
* Creates a new Table and initialize it with new data.
*
* @param {string} name - The name of the table.
* @param data - Non-empty Array of Records to be inserted into the table
*/
createTable (name: string, data: Array<Record<string, unknown>>): Promise<Table>
/**
* Creates a new Table and initialize it with new data.
*
* @param {string} name - The name of the table.
* @param data - Non-empty Array of Records to be inserted into the table
* @param {WriteOptions} options - The write options to use when creating the table.
*/
createTable (name: string, data: Array<Record<string, unknown>>, options: WriteOptions): Promise<Table>
/**
* Creates a new Table and initialize it with new data.
*
* @param {string} name - The name of the table.
* @param data - Non-empty Array of Records to be inserted into the table
* @param {EmbeddingFunction} embeddings - An embedding function to use on this table
*/
createTable<T> (name: string, data: Array<Record<string, unknown>>, embeddings: EmbeddingFunction<T>): Promise<Table<T>>
/**
* Creates a new Table and initialize it with new data.
*
* @param {string} name - The name of the table.
* @param data - Non-empty Array of Records to be inserted into the table
* @param {EmbeddingFunction} embeddings - An embedding function to use on this table
* @param {WriteOptions} options - The write options to use when creating the table.
*/
createTable<T> (name: string, data: Array<Record<string, unknown>>, embeddings: EmbeddingFunction<T>, options: WriteOptions): Promise<Table<T>>
/**
* Drop an existing table.
* @param name The name of the table to drop.
*/
dropTable(name: string): Promise<void>
}
export interface CreateTableOptions<T> {
// Name of Table
name: string
// Data to insert into the Table
data?: Array<Record<string, unknown>> | ArrowTable | undefined
// Optional Arrow Schema for this table
schema?: Schema | undefined
// Optional embedding function used to create embeddings
embeddingFunction?: EmbeddingFunction<T> | undefined
// WriteOptions for this operation
writeOptions?: WriteOptions | undefined
}
export interface ConnectionOptions {
uri: string
awsCredentials?: AwsCredentials
awsRegion?: string
// API key for the remote connections
apiKey?: string
// Region to connect
region?: string
// override the host for the remote connections
hostOverride?: string
}
/**
* Distance metrics type.
*/
export enum MetricType {
/**
* Euclidean distance
*/
L2 = 'l2',
/**
* Cosine distance
*/
Cosine = 'cosine',
/**
* Dot product
*/
Dot = 'dot'
}
/// Config to build IVF_PQ index.
///
export interface IvfPQIndexConfig {
/**
* The column to be indexed
*/
column?: string
/**
* A unique name for the index
*/
index_name?: string
/**
* Metric type, L2 or Cosine
*/
metric_type?: MetricType
/**
* The number of partitions this index
*/
num_partitions?: number
/**
* The max number of iterations for kmeans training.
*/
max_iters?: number
/**
* Train as optimized product quantization.
*/
use_opq?: boolean
/**
* Number of subvectors to build PQ code
*/
num_sub_vectors?: number
/**
* The number of bits to present one PQ centroid.
*/
num_bits?: number
/**
* Max number of iterations to train OPQ, if `use_opq` is true.
*/
max_opq_iters?: number
/**
* Replace an existing index with the same name if it exists.
*/
replace?: boolean
type: 'ivf_pq'
}
export type VectorIndexParams = IvfPQIndexConfig
/**
* A LanceDB Table is the collection of Records. Each Record has one or more vector fields.
*/
export interface Table<T = number[]> {
name: string
/**
* Creates a search query to find the nearest neighbors of the given search term
* @param query The query search term
*/
search: (query: T) => Query<T>
/**
* Insert records into this Table.
*
* @param data Records to be inserted into the Table
* @return The number of rows added to the table
*/
add: (data: Array<Record<string, unknown>>) => Promise<number>
/**
* Insert records into this Table, replacing its contents.
*
* @param data Records to be inserted into the Table
* @return The number of rows added to the table
*/
overwrite: (data: Array<Record<string, unknown>>) => Promise<number>
/**
* Create an ANN index on this Table vector index.
*
* @param indexParams The parameters of this Index, @see VectorIndexParams.
*/
createIndex: (indexParams: VectorIndexParams) => Promise<any>
/**
* Returns the number of rows in this table.
*/
countRows: () => Promise<number>
/**
* Delete rows from this table.
*
* This can be used to delete a single row, many rows, all rows, or
* sometimes no rows (if your predicate matches nothing).
*
* @param filter A filter in the same format used by a sql WHERE clause. The
* filter must not be empty.
*
* @examples
*
* ```ts
* const con = await lancedb.connect("./.lancedb")
* const data = [
* {id: 1, vector: [1, 2]},
* {id: 2, vector: [3, 4]},
* {id: 3, vector: [5, 6]},
* ];
* const tbl = await con.createTable("my_table", data)
* await tbl.delete("id = 2")
* await tbl.countRows() // Returns 2
* ```
*
* If you have a list of values to delete, you can combine them into a
* stringified list and use the `IN` operator:
*
* ```ts
* const to_remove = [1, 5];
* await tbl.delete(`id IN (${to_remove.join(",")})`)
* await tbl.countRows() // Returns 1
* ```
*/
delete: (filter: string) => Promise<void>
/**
* Update rows in this table.
*
* This can be used to update a single row, many rows, all rows, or
* sometimes no rows (if your predicate matches nothing).
*
* @param args see {@link UpdateArgs} and {@link UpdateSqlArgs} for more details
*
* @examples
*
* ```ts
* const con = await lancedb.connect("./.lancedb")
* const data = [
* {id: 1, vector: [3, 3], name: 'Ye'},
* {id: 2, vector: [4, 4], name: 'Mike'},
* ];
* const tbl = await con.createTable("my_table", data)
*
* await tbl.update({
* filter: "id = 2",
* updates: { vector: [2, 2], name: "Michael" },
* })
*
* let results = await tbl.search([1, 1]).execute();
* // Returns [
* // {id: 2, vector: [2, 2], name: 'Michael'}
* // {id: 1, vector: [3, 3], name: 'Ye'}
* // ]
* ```
*
*/
update: (args: UpdateArgs | UpdateSqlArgs) => Promise<void>
/**
* List the indicies on this table.
*/
listIndices: () => Promise<VectorIndex[]>
/**
* Get statistics about an index.
*/
indexStats: (indexUuid: string) => Promise<IndexStats>
}
export interface UpdateArgs {
/**
* A filter in the same format used by a sql WHERE clause. The filter may be empty,
* in which case all rows will be updated.
*/
where?: string
/**
* A key-value map of updates. The keys are the column names, and the values are the
* new values to set
*/
values: Record<string, Literal>
}
export interface UpdateSqlArgs {
/**
* A filter in the same format used by a sql WHERE clause. The filter may be empty,
* in which case all rows will be updated.
*/
where?: string
/**
* A key-value map of updates. The keys are the column names, and the values are the
* new values to set as SQL expressions.
*/
valuesSql: Record<string, string>
}
export interface VectorIndex {
columns: string[]
name: string
uuid: string
}
export interface IndexStats {
numIndexedRows: number | null
numUnindexedRows: number | null
}

View File

@@ -1,44 +0,0 @@
// Copyright 2023 LanceDB Developers.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
export type Literal = string | number | boolean | null | Date | Literal[]
export function toSQL (value: Literal): string {
if (typeof value === 'string') {
return `'${value}'`
}
if (typeof value === 'number') {
return value.toString()
}
if (typeof value === 'boolean') {
return value ? 'TRUE' : 'FALSE'
}
if (value === null) {
return 'NULL'
}
if (value instanceof Date) {
return `'${value.toISOString()}'`
}
if (Array.isArray(value)) {
return `[${value.map(toSQL).join(', ')}]`
}
// eslint-disable-next-line @typescript-eslint/restrict-template-expressions
throw new Error(`Unsupported value type: ${typeof value} value: (${value})`)
}

View File

@@ -56,7 +56,7 @@ class RemoteDBConnection(DBConnection):
self._loop = asyncio.get_event_loop()
def __repr__(self) -> str:
return f"RemoteConnect(name={self.db_name})"
return f"RemoveConnect(name={self.db_name})"
@override
def table_names(
@@ -167,10 +167,10 @@ class RemoteDBConnection(DBConnection):
Can create with list of tuples or dictionaries:
>>> import lancedb
>>> db = lancedb.connect("db://...", api_key="...", region="...") # doctest: +SKIP
>>> db = lancedb.connect("db://test-project-8f45eb")
>>> data = [{"vector": [1.1, 1.2], "lat": 45.5, "long": -122.7},
... {"vector": [0.2, 1.8], "lat": 40.1, "long": -74.1}]
>>> db.create_table("my_table", data) # doctest: +SKIP
>>> db.create_table("my_table", data)
LanceTable(my_table)
You can also pass a pandas DataFrame:
@@ -181,7 +181,7 @@ class RemoteDBConnection(DBConnection):
... "lat": [45.5, 40.1],
... "long": [-122.7, -74.1]
... })
>>> db.create_table("table2", data) # doctest: +SKIP
>>> db.create_table("table2", data)
LanceTable(table2)
>>> custom_schema = pa.schema([
@@ -189,7 +189,7 @@ class RemoteDBConnection(DBConnection):
... pa.field("lat", pa.float32()),
... pa.field("long", pa.float32())
... ])
>>> db.create_table("table3", data, schema = custom_schema) # doctest: +SKIP
>>> db.create_table("table3", data, schema = custom_schema)
LanceTable(table3)
It is also possible to create an table from `[Iterable[pa.RecordBatch]]`:
@@ -211,7 +211,7 @@ class RemoteDBConnection(DBConnection):
... pa.field("item", pa.utf8()),
... pa.field("price", pa.float32()),
... ])
>>> db.create_table("table4", make_batches(), schema=schema) # doctest: +SKIP
>>> db.create_table("table4", make_batches(), schema=schema)
LanceTable(table4)
"""

View File

@@ -85,7 +85,7 @@ class RemoteTable(Table):
>>> import lancedb
>>> import uuid
>>> from lancedb.schema import vector
>>> db = lancedb.connect("db://...", api_key="...", region="...") # doctest: +SKIP
>>> conn = lancedb.connect("db://...", api_key="...", region="...")
>>> table_name = uuid.uuid4().hex
>>> schema = pa.schema(
... [
@@ -94,11 +94,11 @@ class RemoteTable(Table):
... pa.field("s", pa.string(), False),
... ]
... )
>>> table = db.create_table( # doctest: +SKIP
... table_name, # doctest: +SKIP
... schema=schema, # doctest: +SKIP
... )
>>> table.create_index("L2", "vector") # doctest: +SKIP
>>> table = conn.create_table(
>>> table_name,
>>> schema=schema,
>>> )
>>> table.create_index("L2", "vector")
"""
index_type = "vector"
@@ -173,22 +173,22 @@ class RemoteTable(Table):
Examples
--------
>>> import lancedb
>>> db = lancedb.connect("db://...", api_key="...", region="...") # doctest: +SKIP
>>> db = lancedb.connect("db://...", api_key="...", region="...")
>>> data = [
... {"original_width": 100, "caption": "bar", "vector": [0.1, 2.3, 4.5]},
... {"original_width": 2000, "caption": "foo", "vector": [0.5, 3.4, 1.3]},
... {"original_width": 3000, "caption": "test", "vector": [0.3, 6.2, 2.6]}
... ]
>>> table = db.create_table("my_table", data) # doctest: +SKIP
>>> table = db.create_table("my_table", data)
>>> query = [0.4, 1.4, 2.4]
>>> (table.search(query, vector_column_name="vector") # doctest: +SKIP
... .where("original_width > 1000", prefilter=True) # doctest: +SKIP
... .select(["caption", "original_width"]) # doctest: +SKIP
... .limit(2) # doctest: +SKIP
... .to_pandas()) # doctest: +SKIP
caption original_width vector _distance # doctest: +SKIP
0 foo 2000 [0.5, 3.4, 1.3] 5.220000 # doctest: +SKIP
1 test 3000 [0.3, 6.2, 2.6] 23.089996 # doctest: +SKIP
>>> (table.search(query, vector_column_name="vector")
... .where("original_width > 1000", prefilter=True)
... .select(["caption", "original_width"])
... .limit(2)
... .to_pandas())
caption original_width vector _distance
0 foo 2000 [0.5, 3.4, 1.3] 5.220000
1 test 3000 [0.3, 6.2, 2.6] 23.089996
Parameters
----------
@@ -246,28 +246,30 @@ class RemoteTable(Table):
... {"x": 2, "vector": [3, 4]},
... {"x": 3, "vector": [5, 6]}
... ]
>>> db = lancedb.connect("db://...", api_key="...", region="...") # doctest: +SKIP
>>> table = db.create_table("my_table", data) # doctest: +SKIP
>>> table.search([10,10]).to_pandas() # doctest: +SKIP
x vector _distance # doctest: +SKIP
0 3 [5.0, 6.0] 41.0 # doctest: +SKIP
1 2 [3.0, 4.0] 85.0 # doctest: +SKIP
2 1 [1.0, 2.0] 145.0 # doctest: +SKIP
>>> table.delete("x = 2") # doctest: +SKIP
>>> table.search([10,10]).to_pandas() # doctest: +SKIP
x vector _distance # doctest: +SKIP
0 3 [5.0, 6.0] 41.0 # doctest: +SKIP
1 1 [1.0, 2.0] 145.0 # doctest: +SKIP
>>> db = lancedb.connect("db://...", api_key="...", region="...")
>>> table = db.create_table("my_table", data)
>>> table.search([10,10]).to_pandas()
x vector _distance
0 3 [5.0, 6.0] 41.0
1 2 [3.0, 4.0] 85.0
2 1 [1.0, 2.0] 145.0
>>> table.delete("x = 2")
>>> table.search([10,10]).to_pandas()
x vector _distance
0 3 [5.0, 6.0] 41.0
1 1 [1.0, 2.0] 145.0
If you have a list of values to delete, you can combine them into a
stringified list and use the `IN` operator:
>>> to_remove = [1, 3] # doctest: +SKIP
>>> to_remove = ", ".join([str(v) for v in to_remove]) # doctest: +SKIP
>>> table.delete(f"x IN ({to_remove})") # doctest: +SKIP
>>> table.search([10,10]).to_pandas() # doctest: +SKIP
x vector _distance # doctest: +SKIP
0 2 [3.0, 4.0] 85.0 # doctest: +SKIP
>>> to_remove = [1, 3]
>>> to_remove = ", ".join([str(v) for v in to_remove])
>>> to_remove
'1, 3'
>>> table.delete(f"x IN ({to_remove})")
>>> table.search([10,10]).to_pandas()
x vector _distance
0 2 [3.0, 4.0] 85.0
"""
payload = {"predicate": predicate}
self._conn._loop.run_until_complete(

View File

@@ -785,7 +785,7 @@ class LanceTable(Table):
and also the "_distance" column which is the distance between the query
vector and the returned vector.
"""
register_event("search_table")
register_event("search")
return LanceQueryBuilder.create(
self, query, query_type, vector_column_name=vector_column_name
)
@@ -906,8 +906,6 @@ class LanceTable(Table):
f"Table {name} does not exist."
f"Please first call db.create_table({name}, data)"
)
register_event("open_table")
return tbl
def delete(self, where: str):

View File

@@ -64,10 +64,8 @@ class _Events:
Initializes the Events object with default values for events, rate_limit, and metadata.
"""
self.events = [] # events list
self.throttled_event_names = ["search_table"]
self.throttled_events = set()
self.max_events = 5 # max events to store in memory
self.rate_limit = 60.0 * 5 # rate limit (seconds)
self.max_events = 25 # max events to store in memory
self.rate_limit = 60.0 # rate limit (seconds)
self.time = 0.0
if is_git_dir():
@@ -114,21 +112,18 @@ class _Events:
return
if (
len(self.events) < self.max_events
): # Events list limited to self.max_events (drop any events past this)
): # Events list limited to 25 events (drop any events past this)
params.update(self.metadata)
event = {
"event": event_name,
"properties": params,
"timestamp": datetime.datetime.now(
tz=datetime.timezone.utc
).isoformat(),
"distinct_id": CONFIG["uuid"],
}
if event_name not in self.throttled_event_names:
self.events.append(event)
elif event_name not in self.throttled_events:
self.throttled_events.add(event_name)
self.events.append(event)
self.events.append(
{
"event": event_name,
"properties": params,
"timestamp": datetime.datetime.now(
tz=datetime.timezone.utc
).isoformat(),
"distinct_id": CONFIG["uuid"],
}
)
# Check rate limit
t = time.time()
@@ -140,6 +135,7 @@ class _Events:
"distinct_id": CONFIG["uuid"], # posthog needs this to accepts the event
"batch": self.events,
}
# POST equivalent to requests.post(self.url, json=data).
# threaded request is used to avoid blocking, retries are disabled, and verbose is disabled
# to avoid any possible disruption in the console.
@@ -154,7 +150,6 @@ class _Events:
# Flush & Reset
self.events = []
self.throttled_events = set()
self.time = t

View File

@@ -237,7 +237,6 @@ fn main(mut cx: ModuleContext) -> NeonResult<()> {
cx.export_function("tableAdd", JsTable::js_add)?;
cx.export_function("tableCountRows", JsTable::js_count_rows)?;
cx.export_function("tableDelete", JsTable::js_delete)?;
cx.export_function("tableUpdate", JsTable::js_update)?;
cx.export_function("tableCleanupOldVersions", JsTable::js_cleanup)?;
cx.export_function("tableCompactFiles", JsTable::js_compact)?;
cx.export_function("tableListIndices", JsTable::js_list_indices)?;

View File

@@ -23,14 +23,8 @@ impl JsQuery {
let query_obj = cx.argument::<JsObject>(0)?;
let limit = query_obj
.get_opt::<JsNumber, _, _>(&mut cx, "_limit")?
.map(|value| {
let limit = value.value(&mut cx) as u64;
if limit <= 0 {
panic!("Limit must be a positive integer");
}
limit
});
.get::<JsNumber, _, _>(&mut cx, "_limit")?
.value(&mut cx);
let select = query_obj
.get_opt::<JsArray, _, _>(&mut cx, "_select")?
.map(|arr| {
@@ -54,9 +48,7 @@ impl JsQuery {
.map(|s| s.value(&mut cx))
.map(|s| MetricType::try_from(s.as_str()).unwrap());
let prefilter = query_obj
.get::<JsBoolean, _, _>(&mut cx, "_prefilter")?
.value(&mut cx);
let prefilter = query_obj.get::<JsBoolean, _, _>(&mut cx, "_prefilter")?.value(&mut cx);
let is_electron = cx
.argument::<JsBoolean>(1)
@@ -67,23 +59,20 @@ impl JsQuery {
let (deferred, promise) = cx.promise();
let channel = cx.channel();
let query_vector = query_obj.get_opt::<JsArray, _, _>(&mut cx, "_queryVector")?;
let query_vector = query_obj.get::<JsArray, _, _>(&mut cx, "_queryVector")?;
let query = convert::js_array_to_vec(query_vector.deref(), &mut cx);
let table = js_table.table.clone();
let query = query_vector.map(|q| convert::js_array_to_vec(q.deref(), &mut cx));
rt.spawn(async move {
let mut builder = table
.search(query.map(|q| Float32Array::from(q)))
let builder = table
.search(Float32Array::from(query))
.limit(limit as usize)
.refine_factor(refine_factor)
.nprobes(nprobes)
.filter(filter)
.metric_type(metric_type)
.select(select)
.prefilter(prefilter);
if let Some(limit) = limit {
builder = builder.limit(limit as usize);
};
let record_batch_stream = builder.execute();
let results = record_batch_stream
.and_then(|stream| {

View File

@@ -165,69 +165,6 @@ impl JsTable {
Ok(promise)
}
pub(crate) fn js_update(mut cx: FunctionContext) -> JsResult<JsPromise> {
let js_table = cx.this().downcast_or_throw::<JsBox<JsTable>, _>(&mut cx)?;
let mut table = js_table.table.clone();
let rt = runtime(&mut cx)?;
let (deferred, promise) = cx.promise();
let channel = cx.channel();
// create a vector of updates from the passed map
let updates_arg = cx.argument::<JsObject>(1)?;
let properties = updates_arg.get_own_property_names(&mut cx)?;
let mut updates: Vec<(String, String)> =
Vec::with_capacity(properties.len(&mut cx) as usize);
let len_properties = properties.len(&mut cx);
for i in 0..len_properties {
let property = properties
.get_value(&mut cx, i)?
.downcast_or_throw::<JsString, _>(&mut cx)?;
let value = updates_arg
.get_value(&mut cx, property.clone())?
.downcast_or_throw::<JsString, _>(&mut cx)?;
let property = property.value(&mut cx);
let value = value.value(&mut cx);
updates.push((property, value));
}
// get the filter/predicate if the user passed one
let predicate = cx.argument_opt(0);
let predicate = predicate.unwrap().downcast::<JsString, _>(&mut cx);
let predicate = match predicate {
Ok(_) => {
let val = predicate.map(|s| s.value(&mut cx)).unwrap();
Some(val)
}
Err(_) => {
// if the predicate is not string, check it's null otherwise an invalid
// type was passed
cx.argument::<JsNull>(0)?;
None
}
};
rt.spawn(async move {
let updates_arg = updates
.iter()
.map(|(k, v)| (k.as_str(), v.as_str()))
.collect::<Vec<_>>();
let predicate = predicate.as_ref().map(|s| s.as_str());
let update_result = table.update(predicate, updates_arg).await;
deferred.settle_with(&channel, move |mut cx| {
update_result.or_throw(&mut cx)?;
Ok(cx.boxed(JsTable::from(table)))
})
});
Ok(promise)
}
pub(crate) fn js_cleanup(mut cx: FunctionContext) -> JsResult<JsPromise> {
let js_table = cx.this().downcast_or_throw::<JsBox<JsTable>, _>(&mut cx)?;
let rt = runtime(&mut cx)?;

View File

@@ -359,7 +359,7 @@ mod test {
assert_eq!(t.count_rows().await.unwrap(), 100);
let q = t
.search(Some(PrimitiveArray::from_iter_values(vec![0.1, 0.1, 0.1, 0.1])))
.search(PrimitiveArray::from_iter_values(vec![0.1, 0.1, 0.1, 0.1]))
.limit(10)
.execute()
.await

View File

@@ -24,8 +24,8 @@ use crate::error::Result;
/// A builder for nearest neighbor queries for LanceDB.
pub struct Query {
pub dataset: Arc<Dataset>,
pub query_vector: Option<Float32Array>,
pub limit: Option<usize>,
pub query_vector: Float32Array,
pub limit: usize,
pub filter: Option<String>,
pub select: Option<Vec<String>>,
pub nprobes: usize,
@@ -46,11 +46,11 @@ impl Query {
/// # Returns
///
/// * A [Query] object.
pub(crate) fn new(dataset: Arc<Dataset>, vector: Option<Float32Array>) -> Self {
pub(crate) fn new(dataset: Arc<Dataset>, vector: Float32Array) -> Self {
Query {
dataset,
query_vector: vector,
limit: None,
limit: 10,
nprobes: 20,
refine_factor: None,
metric_type: None,
@@ -69,13 +69,11 @@ impl Query {
pub async fn execute(&self) -> Result<DatasetRecordBatchStream> {
let mut scanner: Scanner = self.dataset.scan();
if let Some(query) = self.query_vector.as_ref() {
// If there is a vector query, default to limit=10 if unspecified
scanner.nearest(crate::table::VECTOR_COLUMN_NAME, query, self.limit.unwrap_or(10))?;
} else {
// If there is no vector query, it's ok to not have a limit
scanner.limit(self.limit.map(|limit| limit as i64), None)?;
}
scanner.nearest(
crate::table::VECTOR_COLUMN_NAME,
&self.query_vector,
self.limit,
)?;
scanner.nprobs(self.nprobes);
scanner.use_index(self.use_index);
scanner.prefilter(self.prefilter);
@@ -93,7 +91,7 @@ impl Query {
///
/// * `limit` - The maximum number of results to return.
pub fn limit(mut self, limit: usize) -> Query {
self.limit = Some(limit);
self.limit = limit;
self
}
@@ -103,7 +101,7 @@ impl Query {
///
/// * `vector` - The vector that will be used for search.
pub fn query_vector(mut self, query_vector: Float32Array) -> Query {
self.query_vector = Some(query_vector);
self.query_vector = query_vector;
self
}
@@ -176,7 +174,7 @@ mod tests {
use std::sync::Arc;
use super::*;
use arrow_array::{Float32Array, RecordBatch, RecordBatchIterator, RecordBatchReader, cast::AsArray, Int32Array};
use arrow_array::{Float32Array, RecordBatch, RecordBatchIterator, RecordBatchReader};
use arrow_schema::{DataType, Field as ArrowField, Schema as ArrowSchema};
use futures::StreamExt;
use lance::dataset::Dataset;
@@ -189,7 +187,7 @@ mod tests {
let batches = make_test_batches();
let ds = Dataset::write(batches, "memory://foo", None).await.unwrap();
let vector = Some(Float32Array::from_iter_values([0.1, 0.2]));
let vector = Float32Array::from_iter_values([0.1, 0.2]);
let query = Query::new(Arc::new(ds), vector.clone());
assert_eq!(query.query_vector, vector);
@@ -203,8 +201,8 @@ mod tests {
.metric_type(Some(MetricType::Cosine))
.refine_factor(Some(999));
assert_eq!(query.query_vector.unwrap(), new_vector);
assert_eq!(query.limit.unwrap(), 100);
assert_eq!(query.query_vector, new_vector);
assert_eq!(query.limit, 100);
assert_eq!(query.nprobes, 1000);
assert_eq!(query.use_index, true);
assert_eq!(query.metric_type, Some(MetricType::Cosine));
@@ -216,7 +214,7 @@ mod tests {
let batches = make_non_empty_batches();
let ds = Arc::new(Dataset::write(batches, "memory://foo", None).await.unwrap());
let vector = Some(Float32Array::from_iter_values([0.1; 4]));
let vector = Float32Array::from_iter_values([0.1; 4]);
let query = Query::new(ds.clone(), vector.clone());
let result = query
@@ -246,27 +244,6 @@ mod tests {
}
}
#[tokio::test]
async fn test_execute_no_vector() {
// test that it's ok to not specify a query vector (just filter / limit)
let batches = make_non_empty_batches();
let ds = Arc::new(Dataset::write(batches, "memory://foo", None).await.unwrap());
let query = Query::new(ds.clone(), None);
let result = query
.filter(Some("id % 2 == 0".to_string()))
.execute()
.await;
let mut stream = result.expect("should have result");
// should only have one batch
while let Some(batch) = stream.next().await {
let b = batch.expect("should be Ok");
// cast arr into Int32Array
let arr: &Int32Array = b["id"].as_primitive();
assert!(arr.iter().all(|x| x.unwrap() % 2 == 0));
}
}
fn make_non_empty_batches() -> impl RecordBatchReader + Send + 'static {
let vec = Box::new(RandomVector::new().named("vector".to_string()));
let id = Box::new(IncrementingInt32::new().named("id".to_string()));

View File

@@ -23,7 +23,7 @@ use lance::dataset::cleanup::RemovalStats;
use lance::dataset::optimize::{
compact_files, CompactionMetrics, CompactionOptions, IndexRemapperOptions,
};
use lance::dataset::{Dataset, UpdateBuilder, WriteParams};
use lance::dataset::{Dataset, WriteParams};
use lance::index::DatasetIndexExt;
use lance::io::object_store::WrappingObjectStore;
use std::path::Path;
@@ -308,14 +308,10 @@ impl Table {
/// # Returns
///
/// * A [Query] object.
pub fn search(&self, query_vector: Option<Float32Array>) -> Query {
pub fn search(&self, query_vector: Float32Array) -> Query {
Query::new(self.dataset.clone(), query_vector)
}
pub fn filter(&self, expr: String) -> Query {
Query::new(self.dataset.clone(), None).filter(Some(expr))
}
/// Returns the number of rows in this Table
pub async fn count_rows(&self) -> Result<usize> {
Ok(self.dataset.count_rows().await?)
@@ -342,27 +338,6 @@ impl Table {
Ok(())
}
pub async fn update(
&mut self,
predicate: Option<&str>,
updates: Vec<(&str, &str)>,
) -> Result<()> {
let mut builder = UpdateBuilder::new(self.dataset.clone());
if let Some(predicate) = predicate {
builder = builder.update_where(predicate)?;
}
for (column, value) in updates {
builder = builder.set(column, value)?;
}
let operation = builder.build()?;
let new_ds = operation.execute().await?;
self.dataset = new_ds;
Ok(())
}
/// Remove old versions of the dataset from disk.
///
/// # Arguments
@@ -438,14 +413,11 @@ mod tests {
use std::sync::Arc;
use arrow_array::{
Array, BooleanArray, Date32Array, FixedSizeListArray, Float32Array, Float64Array,
Int32Array, Int64Array, LargeStringArray, RecordBatch, RecordBatchIterator,
RecordBatchReader, StringArray, TimestampMillisecondArray, TimestampNanosecondArray,
UInt32Array,
Array, FixedSizeListArray, Float32Array, Int32Array, RecordBatch, RecordBatchIterator,
RecordBatchReader,
};
use arrow_data::ArrayDataBuilder;
use arrow_schema::{DataType, Field, Schema, TimeUnit};
use futures::TryStreamExt;
use arrow_schema::{DataType, Field, Schema};
use lance::dataset::{Dataset, WriteMode};
use lance::index::vector::pq::PQBuildParams;
use lance::io::object_store::{ObjectStoreParams, WrappingObjectStore};
@@ -568,272 +540,6 @@ mod tests {
assert_eq!(table.name, "test");
}
#[tokio::test]
async fn test_update_with_predicate() {
let tmp_dir = tempdir().unwrap();
let dataset_path = tmp_dir.path().join("test.lance");
let uri = dataset_path.to_str().unwrap();
let schema = Arc::new(Schema::new(vec![
Field::new("id", DataType::Int32, false),
Field::new("name", DataType::Utf8, false),
]));
let record_batch_iter = RecordBatchIterator::new(
vec![RecordBatch::try_new(
schema.clone(),
vec![
Arc::new(Int32Array::from_iter_values(0..10)),
Arc::new(StringArray::from_iter_values(vec![
"a", "b", "c", "d", "e", "f", "g", "h", "i", "j",
])),
],
)
.unwrap()]
.into_iter()
.map(Ok),
schema.clone(),
);
Dataset::write(record_batch_iter, uri, None).await.unwrap();
let mut table = Table::open(uri).await.unwrap();
table
.update(Some("id > 5"), vec![("name", "'foo'")])
.await
.unwrap();
let ds_after = Dataset::open(uri).await.unwrap();
let mut batches = ds_after
.scan()
.project(&["id", "name"])
.unwrap()
.try_into_stream()
.await
.unwrap()
.try_collect::<Vec<_>>()
.await
.unwrap();
while let Some(batch) = batches.pop() {
let ids = batch
.column(0)
.as_any()
.downcast_ref::<Int32Array>()
.unwrap()
.iter()
.collect::<Vec<_>>();
let names = batch
.column(1)
.as_any()
.downcast_ref::<StringArray>()
.unwrap()
.iter()
.collect::<Vec<_>>();
for (i, name) in names.iter().enumerate() {
let id = ids[i].unwrap();
let name = name.unwrap();
if id > 5 {
assert_eq!(name, "foo");
} else {
assert_eq!(name, &format!("{}", (b'a' + id as u8) as char));
}
}
}
}
#[tokio::test]
async fn test_update_all_types() {
let tmp_dir = tempdir().unwrap();
let dataset_path = tmp_dir.path().join("test.lance");
let uri = dataset_path.to_str().unwrap();
let schema = Arc::new(Schema::new(vec![
Field::new("int32", DataType::Int32, false),
Field::new("int64", DataType::Int64, false),
Field::new("uint32", DataType::UInt32, false),
Field::new("string", DataType::Utf8, false),
Field::new("large_string", DataType::LargeUtf8, false),
Field::new("float32", DataType::Float32, false),
Field::new("float64", DataType::Float64, false),
Field::new("bool", DataType::Boolean, false),
Field::new("date32", DataType::Date32, false),
Field::new(
"timestamp_ns",
DataType::Timestamp(TimeUnit::Nanosecond, None),
false,
),
Field::new(
"timestamp_ms",
DataType::Timestamp(TimeUnit::Millisecond, None),
false,
),
Field::new(
"vec_f32",
DataType::FixedSizeList(Arc::new(Field::new("item", DataType::Float32, true)), 2),
false,
),
Field::new(
"vec_f64",
DataType::FixedSizeList(Arc::new(Field::new("item", DataType::Float64, true)), 2),
false,
),
]));
let record_batch_iter = RecordBatchIterator::new(
vec![RecordBatch::try_new(
schema.clone(),
vec![
Arc::new(Int32Array::from_iter_values(0..10)),
Arc::new(Int64Array::from_iter_values(0..10)),
Arc::new(UInt32Array::from_iter_values(0..10)),
Arc::new(StringArray::from_iter_values(vec![
"a", "b", "c", "d", "e", "f", "g", "h", "i", "j",
])),
Arc::new(LargeStringArray::from_iter_values(vec![
"a", "b", "c", "d", "e", "f", "g", "h", "i", "j",
])),
Arc::new(Float32Array::from_iter_values(
(0..10).into_iter().map(|i| i as f32),
)),
Arc::new(Float64Array::from_iter_values(
(0..10).into_iter().map(|i| i as f64),
)),
Arc::new(Into::<BooleanArray>::into(vec![
true, false, true, false, true, false, true, false, true, false,
])),
Arc::new(Date32Array::from_iter_values(0..10)),
Arc::new(TimestampNanosecondArray::from_iter_values(0..10)),
Arc::new(TimestampMillisecondArray::from_iter_values(0..10)),
Arc::new(
create_fixed_size_list(
Float32Array::from_iter_values((0..20).into_iter().map(|i| i as f32)),
2,
)
.unwrap(),
),
Arc::new(
create_fixed_size_list(
Float64Array::from_iter_values((0..20).into_iter().map(|i| i as f64)),
2,
)
.unwrap(),
),
],
)
.unwrap()]
.into_iter()
.map(Ok),
schema.clone(),
);
Dataset::write(record_batch_iter, uri, None).await.unwrap();
let mut table = Table::open(uri).await.unwrap();
// check it can do update for each type
let updates: Vec<(&str, &str)> = vec![
("string", "'foo'"),
("large_string", "'large_foo'"),
("int32", "1"),
("int64", "1"),
("uint32", "1"),
("float32", "1.0"),
("float64", "1.0"),
("bool", "true"),
("date32", "1"),
("timestamp_ns", "1"),
("timestamp_ms", "1"),
("vec_f32", "[1.0, 1.0]"),
("vec_f64", "[1.0, 1.0]"),
];
// for (column, value) in test_cases {
table.update(None, updates).await.unwrap();
let ds_after = Dataset::open(uri).await.unwrap();
let mut batches = ds_after
.scan()
.project(&[
"string",
"large_string",
"int32",
"int64",
"uint32",
"float32",
"float64",
"bool",
"date32",
"timestamp_ns",
"timestamp_ms",
"vec_f32",
"vec_f64",
])
.unwrap()
.try_into_stream()
.await
.unwrap()
.try_collect::<Vec<_>>()
.await
.unwrap();
let batch = batches.pop().unwrap();
macro_rules! assert_column {
($column:expr, $array_type:ty, $expected:expr) => {
let array = $column
.as_any()
.downcast_ref::<$array_type>()
.unwrap()
.iter()
.collect::<Vec<_>>();
for v in array {
assert_eq!(v, Some($expected));
}
};
}
assert_column!(batch.column(0), StringArray, "foo");
assert_column!(batch.column(1), LargeStringArray, "large_foo");
assert_column!(batch.column(2), Int32Array, 1);
assert_column!(batch.column(3), Int64Array, 1);
assert_column!(batch.column(4), UInt32Array, 1);
assert_column!(batch.column(5), Float32Array, 1.0);
assert_column!(batch.column(6), Float64Array, 1.0);
assert_column!(batch.column(7), BooleanArray, true);
assert_column!(batch.column(8), Date32Array, 1);
assert_column!(batch.column(9), TimestampNanosecondArray, 1);
assert_column!(batch.column(10), TimestampMillisecondArray, 1);
let array = batch
.column(11)
.as_any()
.downcast_ref::<FixedSizeListArray>()
.unwrap()
.iter()
.collect::<Vec<_>>();
for v in array {
let v = v.unwrap();
let f32array = v.as_any().downcast_ref::<Float32Array>().unwrap();
for v in f32array {
assert_eq!(v, Some(1.0));
}
}
let array = batch
.column(12)
.as_any()
.downcast_ref::<FixedSizeListArray>()
.unwrap()
.iter()
.collect::<Vec<_>>();
for v in array {
let v = v.unwrap();
let f64array = v.as_any().downcast_ref::<Float64Array>().unwrap();
for v in f64array {
assert_eq!(v, Some(1.0));
}
}
}
#[tokio::test]
async fn test_search() {
let tmp_dir = tempdir().unwrap();
@@ -848,8 +554,8 @@ mod tests {
let table = Table::open(uri).await.unwrap();
let vector = Float32Array::from_iter_values([0.1, 0.2]);
let query = table.search(Some(vector.clone()));
assert_eq!(vector, query.query_vector.unwrap());
let query = table.search(vector.clone());
assert_eq!(vector, query.query_vector);
}
#[derive(Default, Debug)]