mirror of
https://github.com/lancedb/lancedb.git
synced 2025-12-23 05:19:58 +00:00
Compare commits
18 Commits
python-v0.
...
v0.2.3
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cc5e2d3e10 | ||
|
|
30f5bc5865 | ||
|
|
2737315cb2 | ||
|
|
d52422603c | ||
|
|
f35f8e451f | ||
|
|
0b9924b432 | ||
|
|
ba416a571d | ||
|
|
13317ffb46 | ||
|
|
ca961567fe | ||
|
|
31a12a141d | ||
|
|
e3061d4cb4 | ||
|
|
1fcc67fd2c | ||
|
|
ac18812af0 | ||
|
|
8324e0f171 | ||
|
|
f0bcb26f32 | ||
|
|
b281c5255c | ||
|
|
d349d2a44a | ||
|
|
0699a6fa7b |
@@ -1,5 +1,5 @@
|
||||
[bumpversion]
|
||||
current_version = 0.1.19
|
||||
current_version = 0.2.3
|
||||
commit = True
|
||||
message = Bump version: {current_version} → {new_version}
|
||||
tag = True
|
||||
|
||||
@@ -6,7 +6,7 @@ members = [
|
||||
resolver = "2"
|
||||
|
||||
[workspace.dependencies]
|
||||
lance = "=0.6.1"
|
||||
lance = "=0.6.3"
|
||||
arrow-array = "43.0"
|
||||
arrow-data = "43.0"
|
||||
arrow-schema = "43.0"
|
||||
@@ -14,4 +14,3 @@ arrow-ipc = "43.0"
|
||||
half = { "version" = "=2.2.1", default-features = false }
|
||||
object_store = "0.6.1"
|
||||
snafu = "0.7.4"
|
||||
|
||||
|
||||
@@ -67,6 +67,7 @@ nav:
|
||||
- LlamaIndex 🦙: https://gpt-index.readthedocs.io/en/latest/examples/vector_stores/LanceDBIndexDemo.html
|
||||
- Pydantic: python/pydantic.md
|
||||
- Voxel51: integrations/voxel51.md
|
||||
- PromptTools: integrations/prompttools.md
|
||||
- Python examples:
|
||||
- YouTube Transcript Search: notebooks/youtube_transcript_search.ipynb
|
||||
- Documentation QA Bot using LangChain: notebooks/code_qa_bot.ipynb
|
||||
|
||||
@@ -63,6 +63,25 @@ A Table is a collection of Records in a LanceDB Database.
|
||||
table = db.create_table("table3", data, schema=custom_schema)
|
||||
```
|
||||
|
||||
### From PyArrow Tables
|
||||
You can also create LanceDB tables directly from pyarrow tables
|
||||
|
||||
```python
|
||||
table = pa.Table.from_arrays(
|
||||
[
|
||||
pa.array([[3.1, 4.1], [5.9, 26.5]],
|
||||
pa.list_(pa.float32(), 2)),
|
||||
pa.array(["foo", "bar"]),
|
||||
pa.array([10.0, 20.0]),
|
||||
],
|
||||
["vector", "item", "price"],
|
||||
)
|
||||
|
||||
db = lancedb.connect("db")
|
||||
|
||||
tbl = db.create_table("test1", table)
|
||||
```
|
||||
|
||||
### From Pydantic Models
|
||||
LanceDB supports to create Apache Arrow Schema from a Pydantic BaseModel via pydantic_to_schema() method.
|
||||
|
||||
@@ -86,10 +105,14 @@ A Table is a collection of Records in a LanceDB Database.
|
||||
table = db.create_table(table_name, schema=Content.to_arrow_schema())
|
||||
```
|
||||
|
||||
### Using RecordBatch Iterator / Writing Large Datasets
|
||||
### Using Iterators / Writing Large Datasets
|
||||
|
||||
It is recommended to use RecordBatch itertator to add large datasets in batches when creating your table in one go. This does not create multiple versions of your dataset unlike manually adding batches using `table.add()`
|
||||
It is recommended to use itertators to add large datasets in batches when creating your table in one go. This does not create multiple versions of your dataset unlike manually adding batches using `table.add()`
|
||||
|
||||
LanceDB additionally supports pyarrow's `RecordBatch` Iterators or other generators producing supported data types.
|
||||
|
||||
Here's an example using using `RecordBatch` iterator for creating tables.
|
||||
|
||||
```python
|
||||
import pyarrow as pa
|
||||
|
||||
@@ -97,7 +120,8 @@ A Table is a collection of Records in a LanceDB Database.
|
||||
for i in range(5):
|
||||
yield pa.RecordBatch.from_arrays(
|
||||
[
|
||||
pa.array([[3.1, 4.1], [5.9, 26.5]]),
|
||||
pa.array([[3.1, 4.1], [5.9, 26.5]],
|
||||
pa.list_(pa.float32(), 2)),
|
||||
pa.array(["foo", "bar"]),
|
||||
pa.array([10.0, 20.0]),
|
||||
],
|
||||
@@ -105,7 +129,7 @@ A Table is a collection of Records in a LanceDB Database.
|
||||
)
|
||||
|
||||
schema = pa.schema([
|
||||
pa.field("vector", pa.list_(pa.float32())),
|
||||
pa.field("vector", pa.list_(pa.float32(), 2)),
|
||||
pa.field("item", pa.utf8()),
|
||||
pa.field("price", pa.float32()),
|
||||
])
|
||||
@@ -113,20 +137,7 @@ A Table is a collection of Records in a LanceDB Database.
|
||||
db.create_table("table4", make_batches(), schema=schema)
|
||||
```
|
||||
|
||||
You can also use Pandas dataframe directly in the above example by converting it to `RecordBatch` object
|
||||
|
||||
```python
|
||||
import pandas as pd
|
||||
import pyarrow as pa
|
||||
|
||||
df = pd.DataFrame({'vector': [[0,1], [2,3], [4,5],[6,7]],
|
||||
'month': [3, 5, 7, 9],
|
||||
'day': [1, 5, 9, 13],
|
||||
'n_legs': [2, 4, 5, 100],
|
||||
'animals': ["Flamingo", "Horse", "Brittle stars", "Centipede"]})
|
||||
|
||||
batch = pa.RecordBatch.from_pandas(df)
|
||||
```
|
||||
You can also use iterators of other types like Pandas dataframe or Pylists directly in the above example.
|
||||
|
||||
## Creating Empty Table
|
||||
You can also create empty tables in python. Initialize it with schema and later ingest data into it.
|
||||
|
||||
7
docs/src/integrations/prompttools.md
Normal file
7
docs/src/integrations/prompttools.md
Normal file
@@ -0,0 +1,7 @@
|
||||
|
||||
[PromptTools](https://github.com/hegelai/prompttools) offers a set of free, open-source tools for testing and experimenting with models, prompts, and configurations. The core idea is to enable developers to evaluate prompts using familiar interfaces like code and notebooks. You can use it to experiment with different configurations of LanceDB, and test how LanceDB integrates with the LLM of your choice.
|
||||
|
||||
[Evaluating Prompts with PromptTools](./examples/prompttools-eval-prompts/) | <a href="https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/prompttools-eval-prompts/main.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a>
|
||||
|
||||

|
||||
|
||||
74
node/package-lock.json
generated
74
node/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "vectordb",
|
||||
"version": "0.1.19",
|
||||
"version": "0.2.2",
|
||||
"lockfileVersion": 2,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "vectordb",
|
||||
"version": "0.1.19",
|
||||
"version": "0.2.2",
|
||||
"cpu": [
|
||||
"x64",
|
||||
"arm64"
|
||||
@@ -51,11 +51,11 @@
|
||||
"typescript": "*"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"@lancedb/vectordb-darwin-arm64": "0.1.19",
|
||||
"@lancedb/vectordb-darwin-x64": "0.1.19",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.1.19",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.1.19",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.1.19"
|
||||
"@lancedb/vectordb-darwin-arm64": "0.2.2",
|
||||
"@lancedb/vectordb-darwin-x64": "0.2.2",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.2.2",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.2.2",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.2.2"
|
||||
}
|
||||
},
|
||||
"node_modules/@apache-arrow/ts": {
|
||||
@@ -315,9 +315,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-darwin-arm64": {
|
||||
"version": "0.1.19",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-arm64/-/vectordb-darwin-arm64-0.1.19.tgz",
|
||||
"integrity": "sha512-efQhJkBKvMNhjFq3Sw3/qHo9D9gb9UqiIr98n3STsbNxBQjMnWemXn91Ckl40siRG1O8qXcINW7Qs/EGmus+kg==",
|
||||
"version": "0.2.2",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-arm64/-/vectordb-darwin-arm64-0.2.2.tgz",
|
||||
"integrity": "sha512-ZsIMUQPzWa3jU5DOlsBPsov/pT+EJn9odR7ePKTxa7EUoBcCDOZk49+ehsQotxQlSYxhC211jK7yeUJKGYWOgg==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
@@ -327,9 +327,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-darwin-x64": {
|
||||
"version": "0.1.19",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-x64/-/vectordb-darwin-x64-0.1.19.tgz",
|
||||
"integrity": "sha512-r6OZNVyemAssABz2w7CRhe7dyREwBEfTytn+ux1zzTnzsgMgDovCQ0rQ3WZcxWvcy7SFCxiemA9IP1b/lsb4tQ==",
|
||||
"version": "0.2.2",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-x64/-/vectordb-darwin-x64-0.2.2.tgz",
|
||||
"integrity": "sha512-6H9H6gY7MTo8ijoldGVY2YfGhvjohDwOxceHIj/1HD+p90VWi3FLAMPMHzAlPMYg7ezMJH0qaemqmNaoboStrA==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
@@ -339,9 +339,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-linux-arm64-gnu": {
|
||||
"version": "0.1.19",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-arm64-gnu/-/vectordb-linux-arm64-gnu-0.1.19.tgz",
|
||||
"integrity": "sha512-mL/hRmZp6Kw7hmGJBdOZfp/tTYiCdlOcs8DA/+nr2eiXERv0gIhyiKvr2P5DwbBmut3qXEkDalMHTo95BSdL2A==",
|
||||
"version": "0.2.2",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-arm64-gnu/-/vectordb-linux-arm64-gnu-0.2.2.tgz",
|
||||
"integrity": "sha512-iZFsWt2rTLol3nzzObKxEnHhe4a+cmHETHlhKwHzQ+oU7S41UxLkQDd1dCh0XbzbRYjp7T2xPTqFG00o+MXomA==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
@@ -351,9 +351,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-linux-x64-gnu": {
|
||||
"version": "0.1.19",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-x64-gnu/-/vectordb-linux-x64-gnu-0.1.19.tgz",
|
||||
"integrity": "sha512-AG0FHksbbr+cHVKPi4B8cmBtqb6T9E0uaK4kyZkXrX52/xtv9RYVZcykaB/tSSm0XNFPWWRnx9R8UqNZV/hxMA==",
|
||||
"version": "0.2.2",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-x64-gnu/-/vectordb-linux-x64-gnu-0.2.2.tgz",
|
||||
"integrity": "sha512-UPZxxj+EtMAd4bOFLEGG0GSEsNDICU9PFfXZRe3wAcmj7LomdPDoFQq6uBV8IZT5guuKtHt+NQ876DtormIWSg==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
@@ -363,9 +363,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-win32-x64-msvc": {
|
||||
"version": "0.1.19",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-win32-x64-msvc/-/vectordb-win32-x64-msvc-0.1.19.tgz",
|
||||
"integrity": "sha512-PDWZ2hvLVXH4Z4WIO1rsWY8ev3NpNm7aXlaey32P+l1Iz9Hia9+F2GBpp2UiEQKfvbk82ucAvBLRmpSsHY8Tlw==",
|
||||
"version": "0.2.2",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-win32-x64-msvc/-/vectordb-win32-x64-msvc-0.2.2.tgz",
|
||||
"integrity": "sha512-Moouw4WNUIgz676XpfgXtVT0HnuH+01fTnO3wlhfKZQn7azs8JyJtGtzVzcn67GnMPdw+Xx687qylu8sxRol9A==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
@@ -4852,33 +4852,33 @@
|
||||
}
|
||||
},
|
||||
"@lancedb/vectordb-darwin-arm64": {
|
||||
"version": "0.1.19",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-arm64/-/vectordb-darwin-arm64-0.1.19.tgz",
|
||||
"integrity": "sha512-efQhJkBKvMNhjFq3Sw3/qHo9D9gb9UqiIr98n3STsbNxBQjMnWemXn91Ckl40siRG1O8qXcINW7Qs/EGmus+kg==",
|
||||
"version": "0.2.2",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-arm64/-/vectordb-darwin-arm64-0.2.2.tgz",
|
||||
"integrity": "sha512-ZsIMUQPzWa3jU5DOlsBPsov/pT+EJn9odR7ePKTxa7EUoBcCDOZk49+ehsQotxQlSYxhC211jK7yeUJKGYWOgg==",
|
||||
"optional": true
|
||||
},
|
||||
"@lancedb/vectordb-darwin-x64": {
|
||||
"version": "0.1.19",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-x64/-/vectordb-darwin-x64-0.1.19.tgz",
|
||||
"integrity": "sha512-r6OZNVyemAssABz2w7CRhe7dyREwBEfTytn+ux1zzTnzsgMgDovCQ0rQ3WZcxWvcy7SFCxiemA9IP1b/lsb4tQ==",
|
||||
"version": "0.2.2",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-x64/-/vectordb-darwin-x64-0.2.2.tgz",
|
||||
"integrity": "sha512-6H9H6gY7MTo8ijoldGVY2YfGhvjohDwOxceHIj/1HD+p90VWi3FLAMPMHzAlPMYg7ezMJH0qaemqmNaoboStrA==",
|
||||
"optional": true
|
||||
},
|
||||
"@lancedb/vectordb-linux-arm64-gnu": {
|
||||
"version": "0.1.19",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-arm64-gnu/-/vectordb-linux-arm64-gnu-0.1.19.tgz",
|
||||
"integrity": "sha512-mL/hRmZp6Kw7hmGJBdOZfp/tTYiCdlOcs8DA/+nr2eiXERv0gIhyiKvr2P5DwbBmut3qXEkDalMHTo95BSdL2A==",
|
||||
"version": "0.2.2",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-arm64-gnu/-/vectordb-linux-arm64-gnu-0.2.2.tgz",
|
||||
"integrity": "sha512-iZFsWt2rTLol3nzzObKxEnHhe4a+cmHETHlhKwHzQ+oU7S41UxLkQDd1dCh0XbzbRYjp7T2xPTqFG00o+MXomA==",
|
||||
"optional": true
|
||||
},
|
||||
"@lancedb/vectordb-linux-x64-gnu": {
|
||||
"version": "0.1.19",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-x64-gnu/-/vectordb-linux-x64-gnu-0.1.19.tgz",
|
||||
"integrity": "sha512-AG0FHksbbr+cHVKPi4B8cmBtqb6T9E0uaK4kyZkXrX52/xtv9RYVZcykaB/tSSm0XNFPWWRnx9R8UqNZV/hxMA==",
|
||||
"version": "0.2.2",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-x64-gnu/-/vectordb-linux-x64-gnu-0.2.2.tgz",
|
||||
"integrity": "sha512-UPZxxj+EtMAd4bOFLEGG0GSEsNDICU9PFfXZRe3wAcmj7LomdPDoFQq6uBV8IZT5guuKtHt+NQ876DtormIWSg==",
|
||||
"optional": true
|
||||
},
|
||||
"@lancedb/vectordb-win32-x64-msvc": {
|
||||
"version": "0.1.19",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-win32-x64-msvc/-/vectordb-win32-x64-msvc-0.1.19.tgz",
|
||||
"integrity": "sha512-PDWZ2hvLVXH4Z4WIO1rsWY8ev3NpNm7aXlaey32P+l1Iz9Hia9+F2GBpp2UiEQKfvbk82ucAvBLRmpSsHY8Tlw==",
|
||||
"version": "0.2.2",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-win32-x64-msvc/-/vectordb-win32-x64-msvc-0.2.2.tgz",
|
||||
"integrity": "sha512-Moouw4WNUIgz676XpfgXtVT0HnuH+01fTnO3wlhfKZQn7azs8JyJtGtzVzcn67GnMPdw+Xx687qylu8sxRol9A==",
|
||||
"optional": true
|
||||
},
|
||||
"@neon-rs/cli": {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "vectordb",
|
||||
"version": "0.1.19",
|
||||
"version": "0.2.3",
|
||||
"description": " Serverless, low-latency vector database for AI applications",
|
||||
"main": "dist/index.js",
|
||||
"types": "dist/index.d.ts",
|
||||
@@ -78,10 +78,10 @@
|
||||
}
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"@lancedb/vectordb-darwin-arm64": "0.1.19",
|
||||
"@lancedb/vectordb-darwin-x64": "0.1.19",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.1.19",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.1.19",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.1.19"
|
||||
"@lancedb/vectordb-darwin-arm64": "0.2.3",
|
||||
"@lancedb/vectordb-darwin-x64": "0.2.3",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.2.3",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.2.3",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.2.3"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,18 +13,19 @@
|
||||
// limitations under the License.
|
||||
|
||||
import {
|
||||
Field,
|
||||
Field, type FixedSizeListBuilder,
|
||||
Float32,
|
||||
List, type ListBuilder,
|
||||
makeBuilder,
|
||||
RecordBatchFileWriter,
|
||||
Table, Utf8,
|
||||
Utf8,
|
||||
type Vector,
|
||||
vectorFromArray
|
||||
FixedSizeList,
|
||||
vectorFromArray, type Schema, Table as ArrowTable
|
||||
} from 'apache-arrow'
|
||||
import { type EmbeddingFunction } from './index'
|
||||
|
||||
export async function convertToTable<T> (data: Array<Record<string, unknown>>, embeddings?: EmbeddingFunction<T>): Promise<Table> {
|
||||
// Converts an Array of records into an Arrow Table, optionally applying an embeddings function to it.
|
||||
export async function convertToTable<T> (data: Array<Record<string, unknown>>, embeddings?: EmbeddingFunction<T>): Promise<ArrowTable> {
|
||||
if (data.length === 0) {
|
||||
throw new Error('At least one record needs to be provided')
|
||||
}
|
||||
@@ -34,8 +35,8 @@ export async function convertToTable<T> (data: Array<Record<string, unknown>>, e
|
||||
|
||||
for (const columnsKey of columns) {
|
||||
if (columnsKey === 'vector') {
|
||||
const listBuilder = newVectorListBuilder()
|
||||
const vectorSize = (data[0].vector as any[]).length
|
||||
const listBuilder = newVectorBuilder(vectorSize)
|
||||
for (const datum of data) {
|
||||
if ((datum[columnsKey] as any[]).length !== vectorSize) {
|
||||
throw new Error(`Invalid vector size, expected ${vectorSize}`)
|
||||
@@ -52,9 +53,7 @@ export async function convertToTable<T> (data: Array<Record<string, unknown>>, e
|
||||
|
||||
if (columnsKey === embeddings?.sourceColumn) {
|
||||
const vectors = await embeddings.embed(values as T[])
|
||||
const listBuilder = newVectorListBuilder()
|
||||
vectors.map(v => listBuilder.append(v))
|
||||
records.vector = listBuilder.finish().toVector()
|
||||
records.vector = vectorFromArray(vectors, newVectorType(vectors[0].length))
|
||||
}
|
||||
|
||||
if (typeof values[0] === 'string') {
|
||||
@@ -66,20 +65,47 @@ export async function convertToTable<T> (data: Array<Record<string, unknown>>, e
|
||||
}
|
||||
}
|
||||
|
||||
return new Table(records)
|
||||
return new ArrowTable(records)
|
||||
}
|
||||
|
||||
// Creates a new Arrow ListBuilder that stores a Vector column
|
||||
function newVectorListBuilder (): ListBuilder<Float32, any> {
|
||||
const children = new Field<Float32>('item', new Float32())
|
||||
const list = new List(children)
|
||||
function newVectorBuilder (dim: number): FixedSizeListBuilder<Float32> {
|
||||
return makeBuilder({
|
||||
type: list
|
||||
type: newVectorType(dim)
|
||||
})
|
||||
}
|
||||
|
||||
// Creates the Arrow Type for a Vector column with dimension `dim`
|
||||
function newVectorType (dim: number): FixedSizeList<Float32> {
|
||||
const children = new Field<Float32>('item', new Float32())
|
||||
return new FixedSizeList(dim, children)
|
||||
}
|
||||
|
||||
// Converts an Array of records into Arrow IPC format
|
||||
export async function fromRecordsToBuffer<T> (data: Array<Record<string, unknown>>, embeddings?: EmbeddingFunction<T>): Promise<Buffer> {
|
||||
const table = await convertToTable(data, embeddings)
|
||||
const writer = RecordBatchFileWriter.writeAll(table)
|
||||
return Buffer.from(await writer.toUint8Array())
|
||||
}
|
||||
|
||||
// Converts an Arrow Table into Arrow IPC format
|
||||
export async function fromTableToBuffer<T> (table: ArrowTable, embeddings?: EmbeddingFunction<T>): Promise<Buffer> {
|
||||
if (embeddings !== undefined) {
|
||||
const source = table.getChild(embeddings.sourceColumn)
|
||||
|
||||
if (source === null) {
|
||||
throw new Error(`The embedding source column ${embeddings.sourceColumn} was not found in the Arrow Table`)
|
||||
}
|
||||
|
||||
const vectors = await embeddings.embed(source.toArray() as T[])
|
||||
const column = vectorFromArray(vectors, newVectorType(vectors[0].length))
|
||||
table = table.assign(new ArrowTable({ vector: column }))
|
||||
}
|
||||
const writer = RecordBatchFileWriter.writeAll(table)
|
||||
return Buffer.from(await writer.toUint8Array())
|
||||
}
|
||||
|
||||
// Creates an empty Arrow Table
|
||||
export function createEmptyTable (schema: Schema): ArrowTable {
|
||||
return new ArrowTable(schema)
|
||||
}
|
||||
|
||||
@@ -13,10 +13,10 @@
|
||||
// limitations under the License.
|
||||
|
||||
import {
|
||||
RecordBatchFileWriter,
|
||||
type Table as ArrowTable
|
||||
type Schema,
|
||||
Table as ArrowTable
|
||||
} from 'apache-arrow'
|
||||
import { fromRecordsToBuffer } from './arrow'
|
||||
import { createEmptyTable, fromRecordsToBuffer, fromTableToBuffer } from './arrow'
|
||||
import type { EmbeddingFunction } from './embedding/embedding_function'
|
||||
import { RemoteConnection } from './remote'
|
||||
import { Query } from './query'
|
||||
@@ -42,6 +42,8 @@ export interface ConnectionOptions {
|
||||
|
||||
awsCredentials?: AwsCredentials
|
||||
|
||||
awsRegion?: string
|
||||
|
||||
// API key for the remote connections
|
||||
apiKey?: string
|
||||
// Region to connect
|
||||
@@ -51,6 +53,40 @@ export interface ConnectionOptions {
|
||||
hostOverride?: string
|
||||
}
|
||||
|
||||
function getAwsArgs (opts: ConnectionOptions): any[] {
|
||||
const callArgs = []
|
||||
const awsCredentials = opts.awsCredentials
|
||||
if (awsCredentials !== undefined) {
|
||||
callArgs.push(awsCredentials.accessKeyId)
|
||||
callArgs.push(awsCredentials.secretKey)
|
||||
callArgs.push(awsCredentials.sessionToken)
|
||||
} else {
|
||||
callArgs.push(undefined)
|
||||
callArgs.push(undefined)
|
||||
callArgs.push(undefined)
|
||||
}
|
||||
|
||||
callArgs.push(opts.awsRegion)
|
||||
return callArgs
|
||||
}
|
||||
|
||||
export interface CreateTableOptions<T> {
|
||||
// Name of Table
|
||||
name: string
|
||||
|
||||
// Data to insert into the Table
|
||||
data?: Array<Record<string, unknown>> | ArrowTable | undefined
|
||||
|
||||
// Optional Arrow Schema for this table
|
||||
schema?: Schema | undefined
|
||||
|
||||
// Optional embedding function used to create embeddings
|
||||
embeddingFunction?: EmbeddingFunction<T> | undefined
|
||||
|
||||
// WriteOptions for this operation
|
||||
writeOptions?: WriteOptions | undefined
|
||||
}
|
||||
|
||||
/**
|
||||
* Connect to a LanceDB instance at the given URI
|
||||
* @param uri The uri of the database.
|
||||
@@ -97,6 +133,17 @@ export interface Connection {
|
||||
*/
|
||||
openTable<T>(name: string, embeddings?: EmbeddingFunction<T>): Promise<Table<T>>
|
||||
|
||||
/**
|
||||
* Creates a new Table, optionally initializing it with new data.
|
||||
*
|
||||
* @param {string} name - The name of the table.
|
||||
* @param data - Array of Records to be inserted into the table
|
||||
* @param schema - An Arrow Schema that describe this table columns
|
||||
* @param {EmbeddingFunction} embeddings - An embedding function to use on this table
|
||||
* @param {WriteOptions} writeOptions - The write options to use when creating the table.
|
||||
*/
|
||||
createTable<T> ({ name, data, schema, embeddingFunction, writeOptions }: CreateTableOptions<T>): Promise<Table<T>>
|
||||
|
||||
/**
|
||||
* Creates a new Table and initialize it with new data.
|
||||
*
|
||||
@@ -132,8 +179,6 @@ export interface Connection {
|
||||
*/
|
||||
createTable<T> (name: string, data: Array<Record<string, unknown>>, embeddings: EmbeddingFunction<T>, options: WriteOptions): Promise<Table<T>>
|
||||
|
||||
createTableArrow(name: string, table: ArrowTable): Promise<Table>
|
||||
|
||||
/**
|
||||
* Drop an existing table.
|
||||
* @param name The name of the table to drop.
|
||||
@@ -221,16 +266,16 @@ export interface Table<T = number[]> {
|
||||
* A connection to a LanceDB database.
|
||||
*/
|
||||
export class LocalConnection implements Connection {
|
||||
private readonly _options: ConnectionOptions
|
||||
private readonly _options: () => ConnectionOptions
|
||||
private readonly _db: any
|
||||
|
||||
constructor (db: any, options: ConnectionOptions) {
|
||||
this._options = options
|
||||
this._options = () => options
|
||||
this._db = db
|
||||
}
|
||||
|
||||
get uri (): string {
|
||||
return this._options.uri
|
||||
return this._options().uri
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -256,48 +301,66 @@ export class LocalConnection implements Connection {
|
||||
async openTable<T> (name: string, embeddings: EmbeddingFunction<T>): Promise<Table<T>>
|
||||
async openTable<T> (name: string, embeddings?: EmbeddingFunction<T>): Promise<Table<T>>
|
||||
async openTable<T> (name: string, embeddings?: EmbeddingFunction<T>): Promise<Table<T>> {
|
||||
const tbl = await databaseOpenTable.call(this._db, name)
|
||||
const tbl = await databaseOpenTable.call(this._db, name, ...getAwsArgs(this._options()))
|
||||
if (embeddings !== undefined) {
|
||||
return new LocalTable(tbl, name, this._options, embeddings)
|
||||
return new LocalTable(tbl, name, this._options(), embeddings)
|
||||
} else {
|
||||
return new LocalTable(tbl, name, this._options)
|
||||
return new LocalTable(tbl, name, this._options())
|
||||
}
|
||||
}
|
||||
|
||||
async createTable<T> (name: string, data: Array<Record<string, unknown>>, optsOrEmbedding?: WriteOptions | EmbeddingFunction<T>, opt?: WriteOptions): Promise<Table<T>> {
|
||||
let writeOptions: WriteOptions = new DefaultWriteOptions()
|
||||
if (opt !== undefined && isWriteOptions(opt)) {
|
||||
writeOptions = opt
|
||||
} else if (optsOrEmbedding !== undefined && isWriteOptions(optsOrEmbedding)) {
|
||||
writeOptions = optsOrEmbedding
|
||||
}
|
||||
|
||||
let embeddings: undefined | EmbeddingFunction<T>
|
||||
if (optsOrEmbedding !== undefined && isEmbeddingFunction(optsOrEmbedding)) {
|
||||
embeddings = optsOrEmbedding
|
||||
}
|
||||
const createArgs = [this._db, name, await fromRecordsToBuffer(data, embeddings), writeOptions.writeMode?.toString()]
|
||||
if (this._options.awsCredentials !== undefined) {
|
||||
createArgs.push(this._options.awsCredentials.accessKeyId)
|
||||
createArgs.push(this._options.awsCredentials.secretKey)
|
||||
if (this._options.awsCredentials.sessionToken !== undefined) {
|
||||
createArgs.push(this._options.awsCredentials.sessionToken)
|
||||
async createTable<T> (name: string | CreateTableOptions<T>, data?: Array<Record<string, unknown>>, optsOrEmbedding?: WriteOptions | EmbeddingFunction<T>, opt?: WriteOptions): Promise<Table<T>> {
|
||||
if (typeof name === 'string') {
|
||||
let writeOptions: WriteOptions = new DefaultWriteOptions()
|
||||
if (opt !== undefined && isWriteOptions(opt)) {
|
||||
writeOptions = opt
|
||||
} else if (optsOrEmbedding !== undefined && isWriteOptions(optsOrEmbedding)) {
|
||||
writeOptions = optsOrEmbedding
|
||||
}
|
||||
}
|
||||
|
||||
const tbl = await tableCreate.call(...createArgs)
|
||||
|
||||
if (embeddings !== undefined) {
|
||||
return new LocalTable(tbl, name, this._options, embeddings)
|
||||
} else {
|
||||
return new LocalTable(tbl, name, this._options)
|
||||
let embeddings: undefined | EmbeddingFunction<T>
|
||||
if (optsOrEmbedding !== undefined && isEmbeddingFunction(optsOrEmbedding)) {
|
||||
embeddings = optsOrEmbedding
|
||||
}
|
||||
return await this.createTableImpl({ name, data, embeddingFunction: embeddings, writeOptions })
|
||||
}
|
||||
return await this.createTableImpl(name)
|
||||
}
|
||||
|
||||
async createTableArrow (name: string, table: ArrowTable): Promise<Table> {
|
||||
const writer = RecordBatchFileWriter.writeAll(table)
|
||||
await tableCreate.call(this._db, name, Buffer.from(await writer.toUint8Array()))
|
||||
return await this.openTable(name)
|
||||
private async createTableImpl<T> ({ name, data, schema, embeddingFunction, writeOptions = new DefaultWriteOptions() }: {
|
||||
name: string
|
||||
data?: Array<Record<string, unknown>> | ArrowTable | undefined
|
||||
schema?: Schema | undefined
|
||||
embeddingFunction?: EmbeddingFunction<T> | undefined
|
||||
writeOptions?: WriteOptions | undefined
|
||||
}): Promise<Table<T>> {
|
||||
let buffer: Buffer
|
||||
|
||||
function isEmpty (data: Array<Record<string, unknown>> | ArrowTable<any>): boolean {
|
||||
if (data instanceof ArrowTable) {
|
||||
return data.data.length === 0
|
||||
}
|
||||
return data.length === 0
|
||||
}
|
||||
|
||||
if ((data === undefined) || isEmpty(data)) {
|
||||
if (schema === undefined) {
|
||||
throw new Error('Either data or schema needs to defined')
|
||||
}
|
||||
buffer = await fromTableToBuffer(createEmptyTable(schema))
|
||||
} else if (data instanceof ArrowTable) {
|
||||
buffer = await fromTableToBuffer(data, embeddingFunction)
|
||||
} else {
|
||||
// data is Array<Record<...>>
|
||||
buffer = await fromRecordsToBuffer(data, embeddingFunction)
|
||||
}
|
||||
|
||||
const tbl = await tableCreate.call(this._db, name, buffer, writeOptions?.writeMode?.toString(), ...getAwsArgs(this._options()))
|
||||
if (embeddingFunction !== undefined) {
|
||||
return new LocalTable(tbl, name, this._options(), embeddingFunction)
|
||||
} else {
|
||||
return new LocalTable(tbl, name, this._options())
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -313,7 +376,7 @@ export class LocalTable<T = number[]> implements Table<T> {
|
||||
private _tbl: any
|
||||
private readonly _name: string
|
||||
private readonly _embeddings?: EmbeddingFunction<T>
|
||||
private readonly _options: ConnectionOptions
|
||||
private readonly _options: () => ConnectionOptions
|
||||
|
||||
constructor (tbl: any, name: string, options: ConnectionOptions)
|
||||
/**
|
||||
@@ -327,7 +390,7 @@ export class LocalTable<T = number[]> implements Table<T> {
|
||||
this._tbl = tbl
|
||||
this._name = name
|
||||
this._embeddings = embeddings
|
||||
this._options = options
|
||||
this._options = () => options
|
||||
}
|
||||
|
||||
get name (): string {
|
||||
@@ -349,15 +412,12 @@ export class LocalTable<T = number[]> implements Table<T> {
|
||||
* @return The number of rows added to the table
|
||||
*/
|
||||
async add (data: Array<Record<string, unknown>>): Promise<number> {
|
||||
const callArgs = [this._tbl, await fromRecordsToBuffer(data, this._embeddings), WriteMode.Append.toString()]
|
||||
if (this._options.awsCredentials !== undefined) {
|
||||
callArgs.push(this._options.awsCredentials.accessKeyId)
|
||||
callArgs.push(this._options.awsCredentials.secretKey)
|
||||
if (this._options.awsCredentials.sessionToken !== undefined) {
|
||||
callArgs.push(this._options.awsCredentials.sessionToken)
|
||||
}
|
||||
}
|
||||
return tableAdd.call(...callArgs).then((newTable: any) => { this._tbl = newTable })
|
||||
return tableAdd.call(
|
||||
this._tbl,
|
||||
await fromRecordsToBuffer(data, this._embeddings),
|
||||
WriteMode.Append.toString(),
|
||||
...getAwsArgs(this._options())
|
||||
).then((newTable: any) => { this._tbl = newTable })
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -367,15 +427,12 @@ export class LocalTable<T = number[]> implements Table<T> {
|
||||
* @return The number of rows added to the table
|
||||
*/
|
||||
async overwrite (data: Array<Record<string, unknown>>): Promise<number> {
|
||||
const callArgs = [this._tbl, await fromRecordsToBuffer(data, this._embeddings), WriteMode.Overwrite.toString()]
|
||||
if (this._options.awsCredentials !== undefined) {
|
||||
callArgs.push(this._options.awsCredentials.accessKeyId)
|
||||
callArgs.push(this._options.awsCredentials.secretKey)
|
||||
if (this._options.awsCredentials.sessionToken !== undefined) {
|
||||
callArgs.push(this._options.awsCredentials.sessionToken)
|
||||
}
|
||||
}
|
||||
return tableAdd.call(...callArgs).then((newTable: any) => { this._tbl = newTable })
|
||||
return tableAdd.call(
|
||||
this._tbl,
|
||||
await fromRecordsToBuffer(data, this._embeddings),
|
||||
WriteMode.Overwrite.toString(),
|
||||
...getAwsArgs(this._options())
|
||||
).then((newTable: any) => { this._tbl = newTable })
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -112,7 +112,8 @@ export class Query<T = number[]> {
|
||||
this._queryVector = this._query as number[]
|
||||
}
|
||||
|
||||
const buffer = await tableSearch.call(this._tbl, this)
|
||||
const isElectron = this.isElectron()
|
||||
const buffer = await tableSearch.call(this._tbl, this, isElectron)
|
||||
const data = tableFromIPC(buffer)
|
||||
|
||||
return data.toArray().map((entry: Record<string, unknown>) => {
|
||||
@@ -127,4 +128,14 @@ export class Query<T = number[]> {
|
||||
return newObject as unknown as T
|
||||
})
|
||||
}
|
||||
|
||||
// See https://github.com/electron/electron/issues/2288
|
||||
private isElectron (): boolean {
|
||||
try {
|
||||
// eslint-disable-next-line no-prototype-builtins
|
||||
return (process?.versions?.hasOwnProperty('electron') || navigator?.userAgent?.toLowerCase()?.includes(' electron'))
|
||||
} catch (e) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,11 +14,11 @@
|
||||
|
||||
import {
|
||||
type EmbeddingFunction, type Table, type VectorIndexParams, type Connection,
|
||||
type ConnectionOptions
|
||||
type ConnectionOptions, type CreateTableOptions, type WriteOptions
|
||||
} from '../index'
|
||||
import { Query } from '../query'
|
||||
|
||||
import { type Table as ArrowTable, Vector } from 'apache-arrow'
|
||||
import { Vector } from 'apache-arrow'
|
||||
import { HttpLancedbClient } from './client'
|
||||
|
||||
/**
|
||||
@@ -66,13 +66,7 @@ export class RemoteConnection implements Connection {
|
||||
}
|
||||
}
|
||||
|
||||
async createTable (name: string, data: Array<Record<string, unknown>>): Promise<Table>
|
||||
async createTable<T> (name: string, data: Array<Record<string, unknown>>, embeddings: EmbeddingFunction<T>): Promise<Table<T>>
|
||||
async createTable<T> (name: string, data: Array<Record<string, unknown>>, embeddings?: EmbeddingFunction<T>): Promise<Table<T>> {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
async createTableArrow (name: string, table: ArrowTable): Promise<Table> {
|
||||
async createTable<T> (name: string | CreateTableOptions<T>, data?: Array<Record<string, unknown>>, optsOrEmbedding?: WriteOptions | EmbeddingFunction<T>, opt?: WriteOptions): Promise<Table<T>> {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
|
||||
@@ -47,7 +47,9 @@ describe('LanceDB S3 client', function () {
|
||||
}
|
||||
}
|
||||
const table = await createTestDB(opts, 2, 20)
|
||||
console.log(table)
|
||||
const con = await lancedb.connect(opts)
|
||||
console.log(con)
|
||||
assert.equal(con.uri, opts.uri)
|
||||
|
||||
const results = await table.search([0.1, 0.3]).limit(5).execute()
|
||||
@@ -70,5 +72,5 @@ async function createTestDB (opts: ConnectionOptions, numDimensions: number = 2,
|
||||
data.push({ id: i + 1, name: `name_${i}`, price: i + 10, is_active: (i % 2 === 0), vector })
|
||||
}
|
||||
|
||||
return await con.createTable('vectors', data)
|
||||
return await con.createTable('vectors_2', data)
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@ import * as chaiAsPromised from 'chai-as-promised'
|
||||
|
||||
import * as lancedb from '../index'
|
||||
import { type AwsCredentials, type EmbeddingFunction, MetricType, Query, WriteMode, DefaultWriteOptions, isWriteOptions } from '../index'
|
||||
import { Field, Int32, makeVector, Schema, Utf8, Table as ArrowTable, vectorFromArray } from 'apache-arrow'
|
||||
|
||||
const expect = chai.expect
|
||||
const assert = chai.assert
|
||||
@@ -119,6 +120,45 @@ describe('LanceDB client', function () {
|
||||
})
|
||||
|
||||
describe('when creating a new dataset', function () {
|
||||
it('create an empty table', async function () {
|
||||
const dir = await track().mkdir('lancejs')
|
||||
const con = await lancedb.connect(dir)
|
||||
|
||||
const schema = new Schema(
|
||||
[new Field('id', new Int32()), new Field('name', new Utf8())]
|
||||
)
|
||||
const table = await con.createTable({ name: 'vectors', schema })
|
||||
assert.equal(table.name, 'vectors')
|
||||
assert.deepEqual(await con.tableNames(), ['vectors'])
|
||||
})
|
||||
|
||||
it('create a table with a empty data array', async function () {
|
||||
const dir = await track().mkdir('lancejs')
|
||||
const con = await lancedb.connect(dir)
|
||||
|
||||
const schema = new Schema(
|
||||
[new Field('id', new Int32()), new Field('name', new Utf8())]
|
||||
)
|
||||
const table = await con.createTable({ name: 'vectors', schema, data: [] })
|
||||
assert.equal(table.name, 'vectors')
|
||||
assert.deepEqual(await con.tableNames(), ['vectors'])
|
||||
})
|
||||
|
||||
it('create a table from an Arrow Table', async function () {
|
||||
const dir = await track().mkdir('lancejs')
|
||||
const con = await lancedb.connect(dir)
|
||||
|
||||
const i32s = new Int32Array(new Array<number>(10))
|
||||
const i32 = makeVector(i32s)
|
||||
|
||||
const data = new ArrowTable({ vector: i32 })
|
||||
|
||||
const table = await con.createTable({ name: 'vectors', data })
|
||||
assert.equal(table.name, 'vectors')
|
||||
assert.equal(await table.countRows(), 10)
|
||||
assert.deepEqual(await con.tableNames(), ['vectors'])
|
||||
})
|
||||
|
||||
it('creates a new table from javascript objects', async function () {
|
||||
const dir = await track().mkdir('lancejs')
|
||||
const con = await lancedb.connect(dir)
|
||||
@@ -291,6 +331,20 @@ describe('LanceDB client', function () {
|
||||
const results = await table.search('foo').execute()
|
||||
assert.equal(results.length, 2)
|
||||
})
|
||||
|
||||
it('should create embeddings for Arrow Table', async function () {
|
||||
const dir = await track().mkdir('lancejs')
|
||||
const con = await lancedb.connect(dir)
|
||||
const embeddingFunction = new TextEmbedding('name')
|
||||
|
||||
const names = vectorFromArray(['foo', 'bar'], new Utf8())
|
||||
const data = new ArrowTable({ name: names })
|
||||
|
||||
const table = await con.createTable({ name: 'vectors', data, embeddingFunction })
|
||||
assert.equal(table.name, 'vectors')
|
||||
const results = await table.search('foo').execute()
|
||||
assert.equal(results.length, 2)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
|
||||
@@ -149,14 +149,14 @@ class DBConnection(ABC):
|
||||
... for i in range(5):
|
||||
... yield pa.RecordBatch.from_arrays(
|
||||
... [
|
||||
... pa.array([[3.1, 4.1], [5.9, 26.5]]),
|
||||
... pa.array([[3.1, 4.1], [5.9, 26.5]], pa.list_(pa.float32(), 2)),
|
||||
... pa.array(["foo", "bar"]),
|
||||
... pa.array([10.0, 20.0]),
|
||||
... ],
|
||||
... ["vector", "item", "price"],
|
||||
... )
|
||||
>>> schema=pa.schema([
|
||||
... pa.field("vector", pa.list_(pa.float32())),
|
||||
... pa.field("vector", pa.list_(pa.float32(), 2)),
|
||||
... pa.field("item", pa.utf8()),
|
||||
... pa.field("price", pa.float32()),
|
||||
... ])
|
||||
|
||||
@@ -56,11 +56,22 @@ def _sanitize_data(data, schema, on_bad_vectors, fill_value):
|
||||
metadata = {k: v for k, v in metadata.items() if k != b"pandas"}
|
||||
schema = data.schema.with_metadata(metadata)
|
||||
data = pa.Table.from_arrays(data.columns, schema=schema)
|
||||
if isinstance(data, Iterable):
|
||||
data = _to_record_batch_generator(data, schema, on_bad_vectors, fill_value)
|
||||
if not isinstance(data, (pa.Table, Iterable)):
|
||||
raise TypeError(f"Unsupported data type: {type(data)}")
|
||||
return data
|
||||
|
||||
|
||||
def _to_record_batch_generator(data: Iterable, schema, on_bad_vectors, fill_value):
|
||||
for batch in data:
|
||||
if not isinstance(batch, pa.RecordBatch):
|
||||
table = _sanitize_data(batch, schema, on_bad_vectors, fill_value)
|
||||
for batch in table.to_batches():
|
||||
yield batch
|
||||
yield batch
|
||||
|
||||
|
||||
class Table(ABC):
|
||||
"""
|
||||
A [Table](Table) is a collection of Records in a LanceDB [Database](Database).
|
||||
@@ -268,10 +279,11 @@ class LanceTable(Table):
|
||||
self.name = name
|
||||
self._version = version
|
||||
|
||||
def _reset_dataset(self):
|
||||
def _reset_dataset(self, version=None):
|
||||
try:
|
||||
if "_dataset" in self.__dict__:
|
||||
del self.__dict__["_dataset"]
|
||||
self._version = version
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
@@ -297,7 +309,9 @@ class LanceTable(Table):
|
||||
def checkout(self, version: int):
|
||||
"""Checkout a version of the table. This is an in-place operation.
|
||||
|
||||
This allows viewing previous versions of the table.
|
||||
This allows viewing previous versions of the table. If you wish to
|
||||
keep writing to the dataset starting from an old version, then use
|
||||
the `restore` function instead.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
@@ -325,7 +339,49 @@ class LanceTable(Table):
|
||||
max_ver = max([v["version"] for v in self._dataset.versions()])
|
||||
if version < 1 or version > max_ver:
|
||||
raise ValueError(f"Invalid version {version}")
|
||||
self._version = version
|
||||
self._reset_dataset(version=version)
|
||||
|
||||
def restore(self, version: int):
|
||||
"""Restore a version of the table. This is an in-place operation.
|
||||
|
||||
This creates a new version where the data is equivalent to the
|
||||
specified previous version. Note that this creates a new snapshot.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
version : int
|
||||
The version to restore.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import lancedb
|
||||
>>> db = lancedb.connect("./.lancedb")
|
||||
>>> table = db.create_table("my_table", [{"vector": [1.1, 0.9], "type": "vector"}])
|
||||
>>> table.version
|
||||
1
|
||||
>>> table.to_pandas()
|
||||
vector type
|
||||
0 [1.1, 0.9] vector
|
||||
>>> table.add([{"vector": [0.5, 0.2], "type": "vector"}])
|
||||
>>> table.version
|
||||
2
|
||||
>>> table.restore(1)
|
||||
>>> table.to_pandas()
|
||||
vector type
|
||||
0 [1.1, 0.9] vector
|
||||
>>> len(table.list_versions())
|
||||
3
|
||||
"""
|
||||
max_ver = max([v["version"] for v in self._dataset.versions()])
|
||||
if version < 1 or version >= max_ver:
|
||||
raise ValueError(f"Invalid version {version}")
|
||||
if version == max_ver:
|
||||
self._reset_dataset()
|
||||
return
|
||||
self.checkout(version)
|
||||
data = self.to_arrow()
|
||||
self.checkout(max_ver)
|
||||
self.add(data, mode="overwrite")
|
||||
self._reset_dataset()
|
||||
|
||||
def __len__(self):
|
||||
|
||||
@@ -17,7 +17,7 @@ import pyarrow as pa
|
||||
import pytest
|
||||
|
||||
import lancedb
|
||||
from lancedb.pydantic import LanceModel
|
||||
from lancedb.pydantic import LanceModel, vector
|
||||
|
||||
|
||||
def test_basic(tmp_path):
|
||||
@@ -77,35 +77,78 @@ def test_ingest_pd(tmp_path):
|
||||
assert db.open_table("test").name == db["test"].name
|
||||
|
||||
|
||||
def test_ingest_record_batch_iterator(tmp_path):
|
||||
def batch_reader():
|
||||
for i in range(5):
|
||||
yield pa.RecordBatch.from_arrays(
|
||||
[
|
||||
pa.array([[3.1, 4.1], [5.9, 26.5]]),
|
||||
pa.array(["foo", "bar"]),
|
||||
pa.array([10.0, 20.0]),
|
||||
],
|
||||
["vector", "item", "price"],
|
||||
)
|
||||
def test_ingest_iterator(tmp_path):
|
||||
class PydanticSchema(LanceModel):
|
||||
vector: vector(2)
|
||||
item: str
|
||||
price: float
|
||||
|
||||
db = lancedb.connect(tmp_path)
|
||||
tbl = db.create_table(
|
||||
"test",
|
||||
batch_reader(),
|
||||
schema=pa.schema(
|
||||
[
|
||||
pa.field("vector", pa.list_(pa.float32())),
|
||||
pa.field("item", pa.utf8()),
|
||||
pa.field("price", pa.float32()),
|
||||
]
|
||||
),
|
||||
arrow_schema = pa.schema(
|
||||
[
|
||||
pa.field("vector", pa.list_(pa.float32(), 2)),
|
||||
pa.field("item", pa.utf8()),
|
||||
pa.field("price", pa.float32()),
|
||||
]
|
||||
)
|
||||
|
||||
tbl_len = len(tbl)
|
||||
tbl.add(batch_reader())
|
||||
assert len(tbl) == tbl_len * 2
|
||||
assert len(tbl.list_versions()) == 2
|
||||
def make_batches():
|
||||
for _ in range(5):
|
||||
yield from [
|
||||
# pandas
|
||||
pd.DataFrame(
|
||||
{
|
||||
"vector": [[3.1, 4.1], [1, 1]],
|
||||
"item": ["foo", "bar"],
|
||||
"price": [10.0, 20.0],
|
||||
}
|
||||
),
|
||||
# pylist
|
||||
[
|
||||
{"vector": [3.1, 4.1], "item": "foo", "price": 10.0},
|
||||
{"vector": [5.9, 26.5], "item": "bar", "price": 20.0},
|
||||
],
|
||||
# recordbatch
|
||||
pa.RecordBatch.from_arrays(
|
||||
[
|
||||
pa.array([[3.1, 4.1], [5.9, 26.5]], pa.list_(pa.float32(), 2)),
|
||||
pa.array(["foo", "bar"]),
|
||||
pa.array([10.0, 20.0]),
|
||||
],
|
||||
["vector", "item", "price"],
|
||||
),
|
||||
# pa Table
|
||||
pa.Table.from_arrays(
|
||||
[
|
||||
pa.array([[3.1, 4.1], [5.9, 26.5]], pa.list_(pa.float32(), 2)),
|
||||
pa.array(["foo", "bar"]),
|
||||
pa.array([10.0, 20.0]),
|
||||
],
|
||||
["vector", "item", "price"],
|
||||
),
|
||||
# pydantic list
|
||||
[
|
||||
PydanticSchema(vector=[3.1, 4.1], item="foo", price=10.0),
|
||||
PydanticSchema(vector=[5.9, 26.5], item="bar", price=20.0),
|
||||
]
|
||||
# TODO: test pydict separately. it is unique column number and names contraint
|
||||
]
|
||||
|
||||
def run_tests(schema):
|
||||
db = lancedb.connect(tmp_path)
|
||||
tbl = db.create_table("table2", make_batches(), schema=schema, mode="overwrite")
|
||||
|
||||
tbl.to_pandas()
|
||||
assert tbl.search([3.1, 4.1]).limit(1).to_df()["_distance"][0] == 0.0
|
||||
assert tbl.search([5.9, 26.5]).limit(1).to_df()["_distance"][0] == 0.0
|
||||
|
||||
tbl_len = len(tbl)
|
||||
tbl.add(make_batches())
|
||||
assert len(tbl) == tbl_len * 2
|
||||
assert len(tbl.list_versions()) == 2
|
||||
db.drop_database()
|
||||
|
||||
run_tests(arrow_schema)
|
||||
run_tests(PydanticSchema)
|
||||
|
||||
|
||||
def test_create_mode(tmp_path):
|
||||
|
||||
@@ -268,3 +268,15 @@ def test_add_with_nans(db):
|
||||
arrow_tbl = table.to_lance().to_table(filter="item == 'bar'")
|
||||
v = arrow_tbl["vector"].to_pylist()[0]
|
||||
assert np.allclose(v, np.array([0.0, 0.0]))
|
||||
|
||||
|
||||
def test_restore(db):
|
||||
table = LanceTable.create(
|
||||
db,
|
||||
"my_table",
|
||||
data=[{"vector": [1.1, 0.9], "type": "vector"}],
|
||||
)
|
||||
table.add([{"vector": [0.5, 0.2], "type": "vector"}])
|
||||
table.restore(1)
|
||||
assert len(table.list_versions()) == 3
|
||||
assert len(table) == 1
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "vectordb-node"
|
||||
version = "0.1.19"
|
||||
version = "0.2.3"
|
||||
description = "Serverless, low-latency vector database for AI applications"
|
||||
license = "Apache-2.0"
|
||||
edition = "2018"
|
||||
|
||||
@@ -14,60 +14,33 @@
|
||||
|
||||
use std::io::Cursor;
|
||||
use std::ops::Deref;
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow_array::cast::as_list_array;
|
||||
use arrow_array::{Array, ArrayRef, FixedSizeListArray, RecordBatch};
|
||||
use arrow_array::RecordBatch;
|
||||
use arrow_ipc::reader::FileReader;
|
||||
use arrow_ipc::writer::FileWriter;
|
||||
use arrow_schema::{DataType, Field, Schema};
|
||||
use lance::arrow::{FixedSizeListArrayExt, RecordBatchExt};
|
||||
use arrow_schema::SchemaRef;
|
||||
use vectordb::table::VECTOR_COLUMN_NAME;
|
||||
|
||||
use crate::error::{MissingColumnSnafu, Result};
|
||||
use snafu::prelude::*;
|
||||
|
||||
pub(crate) fn convert_record_batch(record_batch: RecordBatch) -> Result<RecordBatch> {
|
||||
let column = get_column(VECTOR_COLUMN_NAME, &record_batch)?;
|
||||
|
||||
// TODO: we should just consume the underlying js buffer in the future instead of this arrow around a bunch of times
|
||||
let arr = as_list_array(column.as_ref());
|
||||
let list_size = arr.values().len() / record_batch.num_rows();
|
||||
let r = FixedSizeListArray::try_new_from_values(arr.values().to_owned(), list_size as i32)?;
|
||||
|
||||
let schema = Arc::new(Schema::new(vec![Field::new(
|
||||
VECTOR_COLUMN_NAME,
|
||||
DataType::FixedSizeList(
|
||||
Arc::new(Field::new("item", DataType::Float32, true)),
|
||||
list_size as i32,
|
||||
),
|
||||
true,
|
||||
)]));
|
||||
|
||||
let mut new_batch = RecordBatch::try_new(schema.clone(), vec![Arc::new(r)])?;
|
||||
|
||||
if record_batch.num_columns() > 1 {
|
||||
let rb = record_batch.drop_column(VECTOR_COLUMN_NAME)?;
|
||||
new_batch = new_batch.merge(&rb)?;
|
||||
}
|
||||
Ok(new_batch)
|
||||
}
|
||||
|
||||
fn get_column(column_name: &str, record_batch: &RecordBatch) -> Result<ArrayRef> {
|
||||
fn validate_vector_column(record_batch: &RecordBatch) -> Result<()> {
|
||||
record_batch
|
||||
.column_by_name(column_name)
|
||||
.cloned()
|
||||
.context(MissingColumnSnafu { name: column_name })
|
||||
.column_by_name(VECTOR_COLUMN_NAME)
|
||||
.map(|_| ())
|
||||
.context(MissingColumnSnafu { name: VECTOR_COLUMN_NAME })
|
||||
}
|
||||
|
||||
pub(crate) fn arrow_buffer_to_record_batch(slice: &[u8]) -> Result<Vec<RecordBatch>> {
|
||||
pub(crate) fn arrow_buffer_to_record_batch(slice: &[u8]) -> Result<(Vec<RecordBatch>, SchemaRef)> {
|
||||
let mut batches: Vec<RecordBatch> = Vec::new();
|
||||
let file_reader = FileReader::try_new(Cursor::new(slice), None)?;
|
||||
let schema = file_reader.schema().clone();
|
||||
for b in file_reader {
|
||||
let record_batch = convert_record_batch(b?)?;
|
||||
let record_batch = b?;
|
||||
validate_vector_column(&record_batch)?;
|
||||
batches.push(record_batch);
|
||||
}
|
||||
Ok(batches)
|
||||
Ok((batches, schema))
|
||||
}
|
||||
|
||||
pub(crate) fn record_batch_to_buffer(batches: Vec<RecordBatch>) -> Result<Vec<u8>> {
|
||||
|
||||
@@ -121,26 +121,28 @@ fn database_table_names(mut cx: FunctionContext) -> JsResult<JsPromise> {
|
||||
Ok(promise)
|
||||
}
|
||||
|
||||
fn get_aws_creds<T>(
|
||||
/// Get AWS creds arguments from the context
|
||||
/// Consumes 3 arguments
|
||||
fn get_aws_creds(
|
||||
cx: &mut FunctionContext,
|
||||
arg_starting_location: i32,
|
||||
) -> Result<Option<AwsCredentialProvider>, NeonResult<T>> {
|
||||
) -> NeonResult<Option<AwsCredentialProvider>> {
|
||||
let secret_key_id = cx
|
||||
.argument_opt(arg_starting_location)
|
||||
.map(|arg| arg.downcast_or_throw::<JsString, FunctionContext>(cx).ok())
|
||||
.flatten()
|
||||
.filter(|arg| arg.is_a::<JsString, _>(cx))
|
||||
.and_then(|arg| arg.downcast_or_throw::<JsString, FunctionContext>(cx).ok())
|
||||
.map(|v| v.value(cx));
|
||||
|
||||
let secret_key = cx
|
||||
.argument_opt(arg_starting_location + 1)
|
||||
.map(|arg| arg.downcast_or_throw::<JsString, FunctionContext>(cx).ok())
|
||||
.flatten()
|
||||
.filter(|arg| arg.is_a::<JsString, _>(cx))
|
||||
.and_then(|arg| arg.downcast_or_throw::<JsString, FunctionContext>(cx).ok())
|
||||
.map(|v| v.value(cx));
|
||||
|
||||
let temp_token = cx
|
||||
.argument_opt(arg_starting_location + 2)
|
||||
.map(|arg| arg.downcast_or_throw::<JsString, FunctionContext>(cx).ok())
|
||||
.flatten()
|
||||
.filter(|arg| arg.is_a::<JsString, _>(cx))
|
||||
.and_then(|arg| arg.downcast_or_throw::<JsString, FunctionContext>(cx).ok())
|
||||
.map(|v| v.value(cx));
|
||||
|
||||
match (secret_key_id, secret_key, temp_token) {
|
||||
@@ -152,7 +154,21 @@ fn get_aws_creds<T>(
|
||||
}),
|
||||
))),
|
||||
(None, None, None) => Ok(None),
|
||||
_ => Err(cx.throw_error("Invalid credentials configuration")),
|
||||
_ => cx.throw_error("Invalid credentials configuration"),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get AWS region arguments from the context
|
||||
fn get_aws_region(cx: &mut FunctionContext, arg_location: i32) -> NeonResult<Option<String>> {
|
||||
let region = cx
|
||||
.argument_opt(arg_location)
|
||||
.filter(|arg| arg.is_a::<JsString, _>(cx))
|
||||
.map(|arg| arg.downcast_or_throw::<JsString, FunctionContext>(cx));
|
||||
|
||||
match region {
|
||||
Some(Ok(region)) => Ok(Some(region.value(cx))),
|
||||
None => Ok(None),
|
||||
Some(Err(e)) => Err(e),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -162,14 +178,14 @@ fn database_open_table(mut cx: FunctionContext) -> JsResult<JsPromise> {
|
||||
.downcast_or_throw::<JsBox<JsDatabase>, _>(&mut cx)?;
|
||||
let table_name = cx.argument::<JsString>(0)?.value(&mut cx);
|
||||
|
||||
let aws_creds = match get_aws_creds(&mut cx, 1) {
|
||||
Ok(creds) => creds,
|
||||
Err(err) => return err,
|
||||
};
|
||||
let aws_creds = get_aws_creds(&mut cx, 1)?;
|
||||
|
||||
let aws_region = get_aws_region(&mut cx, 4)?;
|
||||
|
||||
let params = ReadParams {
|
||||
store_options: Some(ObjectStoreParams {
|
||||
aws_credentials: aws_creds,
|
||||
aws_region,
|
||||
..ObjectStoreParams::default()
|
||||
}),
|
||||
..ReadParams::default()
|
||||
|
||||
@@ -7,6 +7,7 @@ use lance::index::vector::MetricType;
|
||||
use neon::context::FunctionContext;
|
||||
use neon::handle::Handle;
|
||||
use neon::prelude::*;
|
||||
use neon::types::buffer::TypedArray;
|
||||
|
||||
use crate::arrow::record_batch_to_buffer;
|
||||
use crate::error::ResultExt;
|
||||
@@ -47,6 +48,11 @@ impl JsQuery {
|
||||
.map(|s| s.value(&mut cx))
|
||||
.map(|s| MetricType::try_from(s.as_str()).unwrap());
|
||||
|
||||
let is_electron = cx
|
||||
.argument::<JsBoolean>(1)
|
||||
.or_throw(&mut cx)?
|
||||
.value(&mut cx);
|
||||
|
||||
let rt = runtime(&mut cx)?;
|
||||
|
||||
let (deferred, promise) = cx.promise();
|
||||
@@ -76,9 +82,26 @@ impl JsQuery {
|
||||
deferred.settle_with(&channel, move |mut cx| {
|
||||
let results = results.or_throw(&mut cx)?;
|
||||
let buffer = record_batch_to_buffer(results).or_throw(&mut cx)?;
|
||||
Ok(JsBuffer::external(&mut cx, buffer))
|
||||
Self::new_js_buffer(buffer, &mut cx, is_electron)
|
||||
});
|
||||
});
|
||||
Ok(promise)
|
||||
}
|
||||
|
||||
// Creates a new JsBuffer from a rust buffer with a special logic for electron
|
||||
fn new_js_buffer<'a>(
|
||||
buffer: Vec<u8>,
|
||||
cx: &mut TaskContext<'a>,
|
||||
is_electron: bool,
|
||||
) -> NeonResult<Handle<'a, JsBuffer>> {
|
||||
if is_electron {
|
||||
// Electron does not support `external`: https://github.com/neon-bindings/neon/pull/937
|
||||
let mut js_buffer = JsBuffer::new(cx, buffer.len()).or_throw(cx)?;
|
||||
let buffer_data = js_buffer.as_mut_slice(cx);
|
||||
buffer_data.copy_from_slice(buffer.as_slice());
|
||||
Ok(js_buffer)
|
||||
} else {
|
||||
Ok(JsBuffer::external(cx, buffer))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,7 +22,7 @@ use neon::types::buffer::TypedArray;
|
||||
use vectordb::Table;
|
||||
|
||||
use crate::error::ResultExt;
|
||||
use crate::{get_aws_creds, runtime, JsDatabase};
|
||||
use crate::{get_aws_creds, get_aws_region, runtime, JsDatabase};
|
||||
|
||||
pub(crate) struct JsTable {
|
||||
pub table: Table,
|
||||
@@ -43,8 +43,7 @@ impl JsTable {
|
||||
.downcast_or_throw::<JsBox<JsDatabase>, _>(&mut cx)?;
|
||||
let table_name = cx.argument::<JsString>(0)?.value(&mut cx);
|
||||
let buffer = cx.argument::<JsBuffer>(1)?;
|
||||
let batches = arrow_buffer_to_record_batch(buffer.as_slice(&mut cx)).or_throw(&mut cx)?;
|
||||
let schema = batches[0].schema();
|
||||
let (batches, schema) = arrow_buffer_to_record_batch(buffer.as_slice(&mut cx)).or_throw(&mut cx)?;
|
||||
|
||||
// Write mode
|
||||
let mode = match cx.argument::<JsString>(2)?.value(&mut cx).as_str() {
|
||||
@@ -62,14 +61,13 @@ impl JsTable {
|
||||
let (deferred, promise) = cx.promise();
|
||||
let database = db.database.clone();
|
||||
|
||||
let aws_creds = match get_aws_creds(&mut cx, 3) {
|
||||
Ok(creds) => creds,
|
||||
Err(err) => return err,
|
||||
};
|
||||
let aws_creds = get_aws_creds(&mut cx, 3)?;
|
||||
let aws_region = get_aws_region(&mut cx, 6)?;
|
||||
|
||||
let params = WriteParams {
|
||||
store_params: Some(ObjectStoreParams {
|
||||
aws_credentials: aws_creds,
|
||||
aws_region,
|
||||
..ObjectStoreParams::default()
|
||||
}),
|
||||
mode: mode,
|
||||
@@ -94,10 +92,7 @@ impl JsTable {
|
||||
let js_table = cx.this().downcast_or_throw::<JsBox<JsTable>, _>(&mut cx)?;
|
||||
let buffer = cx.argument::<JsBuffer>(0)?;
|
||||
let write_mode = cx.argument::<JsString>(1)?.value(&mut cx);
|
||||
|
||||
let batches = arrow_buffer_to_record_batch(buffer.as_slice(&mut cx)).or_throw(&mut cx)?;
|
||||
let schema = batches[0].schema();
|
||||
|
||||
let (batches, schema) = arrow_buffer_to_record_batch(buffer.as_slice(&mut cx)).or_throw(&mut cx)?;
|
||||
let rt = runtime(&mut cx)?;
|
||||
let channel = cx.channel();
|
||||
let mut table = js_table.table.clone();
|
||||
@@ -109,14 +104,13 @@ impl JsTable {
|
||||
"overwrite" => WriteMode::Overwrite,
|
||||
s => return cx.throw_error(format!("invalid write mode {}", s)),
|
||||
};
|
||||
let aws_creds = match get_aws_creds(&mut cx, 2) {
|
||||
Ok(creds) => creds,
|
||||
Err(err) => return err,
|
||||
};
|
||||
let aws_creds = get_aws_creds(&mut cx, 2)?;
|
||||
let aws_region = get_aws_region(&mut cx, 5)?;
|
||||
|
||||
let params = WriteParams {
|
||||
store_params: Some(ObjectStoreParams {
|
||||
aws_credentials: aws_creds,
|
||||
aws_region,
|
||||
..ObjectStoreParams::default()
|
||||
}),
|
||||
mode: write_mode,
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "vectordb"
|
||||
version = "0.1.19"
|
||||
version = "0.2.3"
|
||||
edition = "2021"
|
||||
description = "Serverless, low-latency vector database for AI applications"
|
||||
license = "Apache-2.0"
|
||||
|
||||
Reference in New Issue
Block a user