Compare commits

..

2 Commits

Author SHA1 Message Date
Will Jones
f2f0739f8c docs: enhance Update user guide 2023-12-20 15:13:59 -08:00
Will Jones
88ce1e574a docs: update node API reference 2023-12-20 15:13:37 -08:00
39 changed files with 128 additions and 695 deletions

View File

@@ -1,5 +1,5 @@
[bumpversion]
current_version = 0.4.1
current_version = 0.4.0
commit = True
message = Bump version: {current_version} → {new_version}
tag = True

View File

@@ -1,33 +0,0 @@
name: Bug Report - Node / Typescript
description: File a bug report
title: "bug(node): "
labels: [bug, typescript]
body:
- type: markdown
attributes:
value: |
Thanks for taking the time to fill out this bug report!
- type: input
id: version
attributes:
label: LanceDB version
description: What version of LanceDB are you using? `npm list | grep vectordb`.
placeholder: v0.3.2
validations:
required: false
- type: textarea
id: what-happened
attributes:
label: What happened?
description: Also tell us, what did you expect to happen?
validations:
required: true
- type: textarea
id: reproduction
attributes:
label: Are there known steps to reproduce?
description: |
Let us know how to reproduce the bug and we may be able to fix it more
quickly. This is not required, but it is helpful.
validations:
required: false

View File

@@ -1,33 +0,0 @@
name: Bug Report - Python
description: File a bug report
title: "bug(python): "
labels: [bug, python]
body:
- type: markdown
attributes:
value: |
Thanks for taking the time to fill out this bug report!
- type: input
id: version
attributes:
label: LanceDB version
description: What version of LanceDB are you using? `python -c "import lancedb; print(lancedb.__version__)"`.
placeholder: v0.3.2
validations:
required: false
- type: textarea
id: what-happened
attributes:
label: What happened?
description: Also tell us, what did you expect to happen?
validations:
required: true
- type: textarea
id: reproduction
attributes:
label: Are there known steps to reproduce?
description: |
Let us know how to reproduce the bug and we may be able to fix it more
quickly. This is not required, but it is helpful.
validations:
required: false

View File

@@ -1,5 +0,0 @@
blank_issues_enabled: true
contact_links:
- name: Discord Community Support
url: https://discord.com/invite/zMM32dvNtd
about: Please ask and answer questions here.

View File

@@ -1,23 +0,0 @@
name: 'Documentation improvement'
description: Report an issue with the documentation.
labels: [documentation]
body:
- type: textarea
id: description
attributes:
label: Description
description: >
Describe the issue with the documentation and how it can be fixed or improved.
validations:
required: true
- type: input
id: link
attributes:
label: Link
description: >
Provide a link to the existing documentation, if applicable.
placeholder: ex. https://lancedb.github.io/lancedb/guides/tables/...
validations:
required: false

View File

@@ -1,31 +0,0 @@
name: Feature suggestion
description: Suggestion a new feature for LanceDB
title: "Feature: "
labels: [enhancement]
body:
- type: markdown
attributes:
value: |
Share a new idea for a feature or improvement. Be sure to search existing
issues first to avoid duplicates.
- type: dropdown
id: sdk
attributes:
label: SDK
description: Which SDK are you using? This helps us prioritize.
options:
- Python
- Node
- Rust
default: 0
validations:
required: false
- type: textarea
id: description
attributes:
label: Description
description: |
Describe the feature and why it would be useful. If applicable, consider
providing a code example of what it might be like to use the feature.
validations:
required: true

View File

@@ -44,19 +44,12 @@ jobs:
run: pytest -m "not slow" -x -v --durations=30 tests
- name: doctest
run: pytest --doctest-modules lancedb
platform:
name: "Platform: ${{ matrix.config.name }}"
mac:
timeout-minutes: 30
strategy:
matrix:
config:
- name: x86 Mac
runner: macos-13
- name: Arm Mac
runner: macos-13-xlarge
- name: x86 Windows
runner: windows-latest
runs-on: "${{ matrix.config.runner }}"
mac-runner: [ "macos-13", "macos-13-xlarge" ]
runs-on: "${{ matrix.mac-runner }}"
defaults:
run:
shell: bash

View File

@@ -24,29 +24,6 @@ env:
RUST_BACKTRACE: "1"
jobs:
lint:
timeout-minutes: 30
runs-on: ubuntu-22.04
defaults:
run:
shell: bash
working-directory: rust
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
lfs: true
- uses: Swatinem/rust-cache@v2
with:
workspaces: rust
- name: Install dependencies
run: |
sudo apt update
sudo apt install -y protobuf-compiler libssl-dev
- name: Run format
run: cargo fmt --all -- --check
- name: Run clippy
run: cargo clippy --all --all-features -- -D warnings
linux:
timeout-minutes: 30
runs-on: ubuntu-22.04

View File

@@ -5,24 +5,24 @@ exclude = ["python"]
resolver = "2"
[workspace.dependencies]
lance = { "version" = "=0.9.1", "features" = ["dynamodb"] }
lance-index = { "version" = "=0.9.1" }
lance-linalg = { "version" = "=0.9.1" }
lance-testing = { "version" = "=0.9.1" }
lance = { "version" = "=0.9.0", "features" = ["dynamodb"] }
lance-index = { "version" = "=0.9.0" }
lance-linalg = { "version" = "=0.9.0" }
lance-testing = { "version" = "=0.9.0" }
# Note that this one does not include pyarrow
arrow = { version = "49.0.0", optional = false }
arrow-array = "49.0"
arrow-data = "49.0"
arrow-ipc = "49.0"
arrow-ord = "49.0"
arrow-schema = "49.0"
arrow-arith = "49.0"
arrow-cast = "49.0"
arrow = { version = "47.0.0", optional = false }
arrow-array = "47.0"
arrow-data = "47.0"
arrow-ipc = "47.0"
arrow-ord = "47.0"
arrow-schema = "47.0"
arrow-arith = "47.0"
arrow-cast = "47.0"
chrono = "0.4.23"
half = { "version" = "=2.3.1", default-features = false, features = [
"num-traits",
] }
log = "0.4"
object_store = "0.8.0"
object_store = "0.7.1"
snafu = "0.7.4"
url = "2"

View File

@@ -64,26 +64,18 @@ We'll cover the basics of using LanceDB on your local machine in this section.
tbl = db.create_table("table_from_df", data=df)
```
!!! warning
If the table already exists, LanceDB will raise an error by default.
If you want to overwrite the table, you can pass in `mode="overwrite"`
to the `createTable` function.
=== "Javascript"
```javascript
const tb = await db.createTable(
"myTable",
[{"vector": [3.1, 4.1], "item": "foo", "price": 10.0},
{"vector": [5.9, 26.5], "item": "bar", "price": 20.0}])
const tb = await db.createTable("my_table",
data=[{"vector": [3.1, 4.1], "item": "foo", "price": 10.0},
{"vector": [5.9, 26.5], "item": "bar", "price": 20.0}])
```
!!! warning
If the table already exists, LanceDB will raise an error by default.
If you want to overwrite the table, you can pass in `"overwrite"`
to the `createTable` function like this: `await con.createTable(tableName, data, { writeMode: WriteMode.Overwrite })`
!!! warning
If the table already exists, LanceDB will raise an error by default.
If you want to overwrite the table, you can pass in `mode="overwrite"`
to the `createTable` function.
??? info "Under the hood, LanceDB is converting the input data into an Apache Arrow table and persisting it to disk in [Lance format](https://www.github.com/lancedb/lance)."
@@ -116,7 +108,7 @@ Once created, you can open a table using the following code:
=== "Javascript"
```javascript
const tbl = await db.openTable("myTable");
const tbl = await db.openTable("my_table");
```
If you forget the name of your table, you can always get a listing of all table names:
@@ -202,17 +194,10 @@ Use the `drop_table()` method on the database to remove a table.
db.drop_table("my_table")
```
This permanently removes the table and is not recoverable, unlike deleting rows.
By default, if the table does not exist an exception is raised. To suppress this,
you can pass in `ignore_missing=True`.
This permanently removes the table and is not recoverable, unlike deleting rows.
By default, if the table does not exist an exception is raised. To suppress this,
you can pass in `ignore_missing=True`.
=== "JavaScript"
```javascript
await db.dropTable('myTable')
```
This permanently removes the table and is not recoverable, unlike deleting rows.
If the table does not exist an exception is raised.
## What's next

View File

@@ -1,9 +1,9 @@
There are various Embedding functions available out of the box with LanceDB. We're working on supporting other popular embedding APIs.
There are various Embedding functions available out of the box with lancedb. We're working on supporting other popular embedding APIs.
## Text Embedding Functions
Here are the text embedding functions registered by default.
Embedding functions have an inbuilt rate limit handler wrapper for source and query embedding function calls that retry with exponential standoff.
Each `EmbeddingFunction` implementation automatically takes `max_retries` as an argument which has the default value of 7.
Embedding functions have inbuilt rate limit handler wrapper for source and query embedding function calls that retry with exponential standoff.
Each `EmbeddingFunction` implementation automatically takes `max_retries` as an argument which has the deafult value of 7.
### Sentence Transformers
Here are the parameters that you can set when registering a `sentence-transformers` object, and their default values:
@@ -69,15 +69,15 @@ print(actual.text)
```
### Instructor Embeddings
Instructor is an instruction-finetuned text embedding model that can generate text embeddings tailored to any task (e.g. classification, retrieval, clustering, text evaluation, etc.) and domains (e.g. science, finance, etc.) by simply providing the task instruction, without any finetuning.
Instructor is an instruction-finetuned text embedding model that can generate text embeddings tailored to any task (e.g., classification, retrieval, clustering, text evaluation, etc.) and domains (e.g., science, finance, etc.) by simply providing the task instruction, without any finetuning
If you want to calculate customized embeddings for specific sentences, you may follow the unified template to write instructions:
Represent the `domain` `text_type` for `task_objective`:
* `domain` is optional, and it specifies the domain of the text, e.g. science, finance, medicine, etc.
* `text_type` is required, and it specifies the encoding unit, e.g. sentence, document, paragraph, etc.
* `task_objective` is optional, and it specifies the objective of embedding, e.g. retrieve a document, classify the sentence, etc.
* `domain` is optional, and it specifies the domain of the text, e.g., science, finance, medicine, etc.
* `text_type` is required, and it specifies the encoding unit, e.g., sentence, document, paragraph, etc.
* `task_objective` is optional, and it specifies the objective of embedding, e.g., retrieve a document, classify the sentence, etc.
More information about the model can be found here - https://github.com/xlang-ai/instructor-embedding
@@ -119,10 +119,10 @@ tbl.add(texts)
```
## Multi-modal embedding functions
Multi-modal embedding functions allow you to query your table using both images and text.
Multi-modal embedding functions allow you query your table using both images and text.
### OpenClipEmbeddings
We support CLIP model embeddings using the open source alternative, open-clip which supports various customizations. It is registered as `open-clip` and supports the following customizations:
We support CLIP model embeddings using the open souce alternbative, open-clip which support various customizations. It is registered as `open-clip` and supports following customizations.
| Parameter | Type | Default Value | Description |
@@ -205,4 +205,4 @@ print(actual.label)
```
If you have any questions about the embeddings API, supported models, or see a relevant model missing, please raise an issue.
If you have any questions about the embeddings API, supported models, or see a relevant model missing, please raise an issue.

View File

@@ -203,8 +203,8 @@ This guide will show how to create tables, insert data into them, and update the
```javascript
data
const tb = await db.createTable("my_table",
[{"vector": [3.1, 4.1], "item": "foo", "price": 10.0},
{"vector": [5.9, 26.5], "item": "bar", "price": 20.0}])
data=[{"vector": [3.1, 4.1], "item": "foo", "price": 10.0},
{"vector": [5.9, 26.5], "item": "bar", "price": 20.0}])
```
!!! info "Note"

View File

@@ -11,13 +11,8 @@ npm install vectordb
```
This will download the appropriate native library for your platform. We currently
support:
* Linux (x86_64 and aarch64)
* MacOS (Intel and ARM/M1/M2)
* Windows (x86_64 only)
We do not yet support musl-based Linux (such as Alpine Linux) or aarch64 Windows.
support x86_64 Linux, aarch64 Linux, Intel MacOS, and ARM (M1/M2) MacOS. We do not
yet support musl-based Linux (such as Alpine Linux).
## Usage

View File

@@ -22,7 +22,7 @@ import numpy as np
uri = "data/sample-lancedb"
db = lancedb.connect(uri)
data = [{"vector": row, "item": f"item {i}", "id": i}
data = [{"vector": row, "item": f"item {i}"}
for i, row in enumerate(np.random.random((10_000, 2)).astype('int'))]
tbl = db.create_table("my_vectors", data=data)
@@ -35,25 +35,33 @@ const db = await vectordb.connect('data/sample-lancedb')
let data = []
for (let i = 0; i < 10_000; i++) {
data.push({vector: Array(1536).fill(i), id: i, item: `item ${i}`, strId: `${i}`})
data.push({vector: Array(1536).fill(i), id: `${i}`, content: "", longId: `${i}`},)
}
const tbl = await db.createTable('myVectors', data)
const tbl = await db.createTable('my_vectors', data)
```
-->
=== "Python"
```python
tbl.search([100, 102]) \
.where("(item IN ('item 0', 'item 2')) AND (id > 10)") \
.to_arrow()
```
.where("""(
(label IN [10, 20])
AND
(note.email IS NOT NULL)
) OR NOT note.created
""")
```
=== "Javascript"
```javascript
await tbl.search(Array(1536).fill(0))
.where("(item IN ('item 0', 'item 2')) AND (id > 10)")
.execute()
tbl.search([100, 102])
.where(`(
(label IN [10, 20])
AND
(note.email IS NOT NULL)
) OR NOT note.created
`)
```
@@ -110,22 +118,3 @@ The mapping from SQL types to Arrow types is:
[^1]: See precision mapping in previous table.
## Filtering without Vector Search
You can also filter your data without search.
=== "Python"
```python
tbl.search().where("id=10").limit(10).to_arrow()
```
=== "JavaScript"
```javascript
await tbl.where('id=10').limit(10).execute()
```
!!! warning
If your table is large, this could potentially return a very large
amount of data. Please be sure to use a `limit` clause unless
you're sure you want to return the whole result set.

View File

@@ -9,13 +9,8 @@ npm install vectordb
```
This will download the appropriate native library for your platform. We currently
support:
* Linux (x86_64 and aarch64)
* MacOS (Intel and ARM/M1/M2)
* Windows (x86_64 only)
We do not yet support musl-based Linux (such as Alpine Linux) or aarch64 Windows.
support x86_64 Linux, aarch64 Linux, Intel MacOS, and ARM (M1/M2) MacOS. We do not
yet support musl-based Linux (such as Alpine Linux).
## Usage

View File

@@ -1,6 +1,6 @@
{
"name": "vectordb",
"version": "0.4.1",
"version": "0.4.0",
"description": " Serverless, low-latency vector database for AI applications",
"main": "dist/index.js",
"types": "dist/index.d.ts",
@@ -81,10 +81,10 @@
}
},
"optionalDependencies": {
"@lancedb/vectordb-darwin-arm64": "0.4.1",
"@lancedb/vectordb-darwin-x64": "0.4.1",
"@lancedb/vectordb-linux-arm64-gnu": "0.4.1",
"@lancedb/vectordb-linux-x64-gnu": "0.4.1",
"@lancedb/vectordb-win32-x64-msvc": "0.4.1"
"@lancedb/vectordb-darwin-arm64": "0.4.0",
"@lancedb/vectordb-darwin-x64": "0.4.0",
"@lancedb/vectordb-linux-arm64-gnu": "0.4.0",
"@lancedb/vectordb-linux-x64-gnu": "0.4.0",
"@lancedb/vectordb-win32-x64-msvc": "0.4.0"
}
}

View File

@@ -24,7 +24,7 @@ import { isEmbeddingFunction } from './embedding/embedding_function'
import { type Literal, toSQL } from './util'
// eslint-disable-next-line @typescript-eslint/no-var-requires
const { databaseNew, databaseTableNames, databaseOpenTable, databaseDropTable, tableCreate, tableAdd, tableCreateScalarIndex, tableCreateVectorIndex, tableCountRows, tableDelete, tableUpdate, tableCleanupOldVersions, tableCompactFiles, tableListIndices, tableIndexStats } = require('../native.js')
const { databaseNew, databaseTableNames, databaseOpenTable, databaseDropTable, tableCreate, tableAdd, tableCreateVectorIndex, tableCountRows, tableDelete, tableUpdate, tableCleanupOldVersions, tableCompactFiles, tableListIndices, tableIndexStats } = require('../native.js')
export { Query }
export type { EmbeddingFunction }
@@ -223,56 +223,6 @@ export interface Table<T = number[]> {
*/
createIndex: (indexParams: VectorIndexParams) => Promise<any>
/**
* Create a scalar index on this Table for the given column
*
* @param column The column to index
* @param replace If false, fail if an index already exists on the column
*
* Scalar indices, like vector indices, can be used to speed up scans. A scalar
* index can speed up scans that contain filter expressions on the indexed column.
* For example, the following scan will be faster if the column `my_col` has
* a scalar index:
*
* ```ts
* const con = await lancedb.connect('./.lancedb');
* const table = await con.openTable('images');
* const results = await table.where('my_col = 7').execute();
* ```
*
* Scalar indices can also speed up scans containing a vector search and a
* prefilter:
*
* ```ts
* const con = await lancedb.connect('././lancedb');
* const table = await con.openTable('images');
* const results = await table.search([1.0, 2.0]).where('my_col != 7').prefilter(true);
* ```
*
* Scalar indices can only speed up scans for basic filters using
* equality, comparison, range (e.g. `my_col BETWEEN 0 AND 100`), and set
* membership (e.g. `my_col IN (0, 1, 2)`)
*
* Scalar indices can be used if the filter contains multiple indexed columns and
* the filter criteria are AND'd or OR'd together
* (e.g. `my_col < 0 AND other_col> 100`)
*
* Scalar indices may be used if the filter contains non-indexed columns but,
* depending on the structure of the filter, they may not be usable. For example,
* if the column `not_indexed` does not have a scalar index then the filter
* `my_col = 0 OR not_indexed = 1` will not be able to use any scalar index on
* `my_col`.
*
* @examples
*
* ```ts
* const con = await lancedb.connect('././lancedb')
* const table = await con.openTable('images')
* await table.createScalarIndex('my_col')
* ```
*/
createScalarIndex: (column: string, replace: boolean) => Promise<void>
/**
* Returns the number of rows in this table.
*/
@@ -331,8 +281,8 @@ export interface Table<T = number[]> {
* const tbl = await con.createTable("my_table", data)
*
* await tbl.update({
* where: "id = 2",
* values: { vector: [2, 2], name: "Michael" },
* filter: "id = 2",
* updates: { vector: [2, 2], name: "Michael" },
* })
*
* let results = await tbl.search([1, 1]).execute();
@@ -587,10 +537,6 @@ export class LocalTable<T = number[]> implements Table<T> {
return tableCreateVectorIndex.call(this._tbl, indexParams).then((newTable: any) => { this._tbl = newTable })
}
async createScalarIndex (column: string, replace: boolean): Promise<void> {
return tableCreateScalarIndex.call(this._tbl, column, replace)
}
/**
* Returns the number of rows in this table.
*/

View File

@@ -57,8 +57,8 @@ export class RemoteConnection implements Connection {
return 'db://' + this._client.uri
}
async tableNames (pageToken: string = '', limit: number = 10): Promise<string[]> {
const response = await this._client.get('/v1/table/', { limit, page_token: pageToken })
async tableNames (): Promise<string[]> {
const response = await this._client.get('/v1/table/')
return response.data.tables
}
@@ -283,10 +283,6 @@ export class RemoteTable<T = number[]> implements Table<T> {
}
}
async createScalarIndex (column: string, replace: boolean): Promise<void> {
throw new Error('Not implemented')
}
async countRows (): Promise<number> {
const result = await this._client.post(`/v1/table/${this._name}/describe/`)
return result.data?.stats?.num_rows

View File

@@ -135,17 +135,6 @@ describe('LanceDB client', function () {
assert.isTrue(results.length === 10)
})
it('should allow creation and use of scalar indices', async function () {
const uri = await createTestDB(16, 300)
const con = await lancedb.connect(uri)
const table = await con.openTable('vectors')
await table.createScalarIndex('id', true)
// Prefiltering should still work the same
const results = await table.search(new Array(16).fill(0.1)).limit(10).filter('id >= 10').prefilter(true).execute()
assert.isTrue(results.length === 10)
})
it('select only a subset of columns', async function () {
const uri = await createTestDB()
const con = await lancedb.connect(uri)

View File

@@ -23,7 +23,7 @@ from overrides import EnforceOverrides, override
from pyarrow import fs
from .table import LanceTable, Table
from .util import fs_from_uri, get_uri_location, get_uri_scheme, join_uri
from .util import fs_from_uri, get_uri_location, get_uri_scheme
if TYPE_CHECKING:
from .common import DATA, URI
@@ -288,13 +288,14 @@ class LanceDBConnection(DBConnection):
A list of table names.
"""
try:
filesystem = fs_from_uri(self.uri)[0]
filesystem, path = fs_from_uri(self.uri)
except pa.ArrowInvalid:
raise NotImplementedError("Unsupported scheme: " + self.uri)
try:
loc = get_uri_location(self.uri)
paths = filesystem.get_file_info(fs.FileSelector(loc))
paths = filesystem.get_file_info(
fs.FileSelector(get_uri_location(self.uri))
)
except FileNotFoundError:
# It is ok if the file does not exist since it will be created
paths = []
@@ -372,7 +373,7 @@ class LanceDBConnection(DBConnection):
"""
try:
filesystem, path = fs_from_uri(self.uri)
table_path = join_uri(path, name + ".lance")
table_path = os.path.join(path, name + ".lance")
filesystem.delete_dir(table_path)
except FileNotFoundError:
if not ignore_missing:

View File

@@ -64,12 +64,6 @@ class RemoteTable(Table):
"""to_pandas() is not supported on the LanceDB cloud"""
return NotImplementedError("to_pandas() is not supported on the LanceDB cloud")
def create_scalar_index(self, *args, **kwargs):
"""Creates a scalar index"""
return NotImplementedError(
"create_scalar_index() is not supported on the LanceDB cloud"
)
def create_index(
self,
metric="L2",

View File

@@ -23,7 +23,6 @@ import lance
import numpy as np
import pyarrow as pa
import pyarrow.compute as pc
import pyarrow.fs as pa_fs
from lance import LanceDataset
from lance.vector import vec_to_table
@@ -31,7 +30,7 @@ from .common import DATA, VEC, VECTOR_COLUMN_NAME
from .embeddings import EmbeddingFunctionConfig, EmbeddingFunctionRegistry
from .pydantic import LanceModel, model_to_dict
from .query import LanceQueryBuilder, Query
from .util import fs_from_uri, safe_import_pandas, value_to_sql, join_uri
from .util import fs_from_uri, safe_import_pandas, value_to_sql
from .utils.events import register_event
if TYPE_CHECKING:
@@ -221,77 +220,6 @@ class Table(ABC):
"""
raise NotImplementedError
@abstractmethod
def create_scalar_index(
self,
column: str,
*,
replace: bool = True,
):
"""Create a scalar index on a column.
Scalar indices, like vector indices, can be used to speed up scans. A scalar
index can speed up scans that contain filter expressions on the indexed column.
For example, the following scan will be faster if the column ``my_col`` has
a scalar index:
.. code-block:: python
import lancedb
db = lancedb.connect("/data/lance")
img_table = db.open_table("images")
my_df = img_table.search().where("my_col = 7", prefilter=True).to_pandas()
Scalar indices can also speed up scans containing a vector search and a
prefilter:
.. code-block::python
import lancedb
db = lancedb.connect("/data/lance")
img_table = db.open_table("images")
img_table.search([1, 2, 3, 4], vector_column_name="vector")
.where("my_col != 7", prefilter=True)
.to_pandas()
Scalar indices can only speed up scans for basic filters using
equality, comparison, range (e.g. ``my_col BETWEEN 0 AND 100``), and set
membership (e.g. `my_col IN (0, 1, 2)`)
Scalar indices can be used if the filter contains multiple indexed columns and
the filter criteria are AND'd or OR'd together
(e.g. ``my_col < 0 AND other_col> 100``)
Scalar indices may be used if the filter contains non-indexed columns but,
depending on the structure of the filter, they may not be usable. For example,
if the column ``not_indexed`` does not have a scalar index then the filter
``my_col = 0 OR not_indexed = 1`` will not be able to use any scalar index on
``my_col``.
**Experimental API**
Parameters
----------
column : str
The column to be indexed. Must be a boolean, integer, float,
or string column.
replace : bool, default True
Replace the existing index if it exists.
Examples
--------
.. code-block:: python
import lance
dataset = lance.dataset("/tmp/images.lance")
dataset.create_scalar_index("category")
"""
raise NotImplementedError
@abstractmethod
def add(
self,
@@ -511,7 +439,6 @@ class Table(ABC):
"""
raise NotImplementedError
class LanceTable(Table):
"""
A table in a LanceDB database.
@@ -679,7 +606,7 @@ class LanceTable(Table):
@property
def _dataset_uri(self) -> str:
return join_uri(self._conn.uri, f"{self.name}.lance")
return os.path.join(self._conn.uri, f"{self.name}.lance")
def create_index(
self,
@@ -705,12 +632,7 @@ class LanceTable(Table):
self._reset_dataset()
register_event("create_index")
def create_scalar_index(self, column: str, *, replace: bool = True):
self._dataset.create_scalar_index(column, index_type="BTREE", replace=replace)
def create_fts_index(
self, field_names: Union[str, List[str]], *, replace: bool = False
):
def create_fts_index(self, field_names: Union[str, List[str]]):
"""Create a full-text search index on the table.
Warning - this API is highly experimental and is highly likely to change
@@ -720,31 +642,17 @@ class LanceTable(Table):
----------
field_names: str or list of str
The name(s) of the field to index.
replace: bool, default False
If True, replace the existing index if it exists. Note that this is
not yet an atomic operation; the index will be temporarily
unavailable while the new index is being created.
"""
from .fts import create_index, populate_index
if isinstance(field_names, str):
field_names = [field_names]
fs, path = fs_from_uri(self._get_fts_index_path())
index_exists = fs.get_file_info(path).type != pa_fs.FileType.NotFound
if index_exists:
if not replace:
raise ValueError(
f"Index already exists. Use replace=True to overwrite."
)
fs.delete_dir(path)
index = create_index(self._get_fts_index_path(), field_names)
populate_index(index, self, field_names)
register_event("create_fts_index")
def _get_fts_index_path(self):
return join_uri(self._dataset_uri, "_indices", "tantivy")
return os.path.join(self._dataset_uri, "_indices", "tantivy")
@cached_property
def _dataset(self) -> LanceDataset:

View File

@@ -14,8 +14,7 @@
import os
from datetime import date, datetime
from functools import singledispatch
import pathlib
from typing import Tuple, Union
from typing import Tuple
from urllib.parse import urlparse
import numpy as np
@@ -63,12 +62,6 @@ def get_uri_location(uri: str) -> str:
str: Location part of the URL, without scheme
"""
parsed = urlparse(uri)
if len(parsed.scheme) == 1:
# Windows drive names are parsed as the scheme
# e.g. "c:\path" -> ParseResult(scheme="c", netloc="", path="/path", ...)
# So we add special handling here for schemes that are a single character
return uri
if not parsed.netloc:
return parsed.path
else:
@@ -91,29 +84,6 @@ def fs_from_uri(uri: str) -> Tuple[pa_fs.FileSystem, str]:
return pa_fs.FileSystem.from_uri(uri)
def join_uri(base: Union[str, pathlib.Path], *parts: str) -> str:
"""
Join a URI with multiple parts, handles both local and remote paths
Parameters
----------
base : str
The base URI
parts : str
The parts to join to the base URI, each separated by the
appropriate path separator for the URI scheme and OS
"""
if isinstance(base, pathlib.Path):
return base.joinpath(*parts)
base = str(base)
if get_uri_scheme(base) == "file":
# using pathlib for local paths make this windows compatible
# `get_uri_scheme` returns `file` for windows drive names (e.g. `c:\path`)
return str(pathlib.Path(base, *parts))
# for remote paths, just use os.path.join
return "/".join([p.rstrip("/") for p in [base, *parts]])
def safe_import_pandas():
try:
import pandas as pd

View File

@@ -3,7 +3,7 @@ name = "lancedb"
version = "0.4.0"
dependencies = [
"deprecation",
"pylance==0.9.1",
"pylance==0.9.0",
"ratelimiter~=1.0",
"retry>=0.9.2",
"tqdm>=4.27.0",

View File

@@ -83,24 +83,6 @@ def test_create_index_from_table(tmp_path, table):
assert len(df) == 10
assert "text" in df.columns
# Check whether it can be updated
table.add(
[
{
"vector": np.random.randn(128),
"text": "gorilla",
"text2": "gorilla",
"nested": {"text": "gorilla"},
}
]
)
with pytest.raises(ValueError, match="already exists"):
table.create_fts_index("text")
table.create_fts_index("text", replace=True)
assert len(table.search("gorilla").limit(1).to_pandas()) == 1
def test_create_index_multiple_columns(tmp_path, table):
table.create_fts_index(["text", "text2"])

View File

@@ -21,8 +21,8 @@ import lance
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from pydantic import BaseModel
import pytest
from lancedb.conftest import MockTextEmbeddingFunction
from lancedb.db import LanceDBConnection
@@ -532,33 +532,6 @@ def test_multiple_vector_columns(db):
assert result1["text"].iloc[0] != result2["text"].iloc[0]
def test_create_scalar_index(db):
vec_array = pa.array(
[[1, 1], [2, 2], [3, 3], [4, 4], [5, 5]], pa.list_(pa.float32(), 2)
)
test_data = pa.Table.from_pydict(
{"x": ["c", "b", "a", "e", "b"], "y": [1, 2, 3, 4, 5], "vector": vec_array}
)
table = LanceTable.create(
db,
"my_table",
data=test_data,
)
table.create_scalar_index("x")
indices = table.to_lance().list_indices()
assert len(indices) == 1
scalar_index = indices[0]
assert scalar_index["type"] == "Scalar"
# Confirm that prefiltering still works with the scalar index column
results = table.search().where("x = 'c'").to_arrow()
assert results == test_data.slice(0, 1)
results = table.search([5, 5]).to_arrow()
assert results["_distance"][0].as_py() == 0
results = table.search([5, 5]).where("x != 'b'").to_arrow()
assert results["_distance"][0].as_py() > 0
def test_empty_query(db):
table = LanceTable.create(
db,

View File

@@ -11,12 +11,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pathlib
import pytest
from lancedb.util import get_uri_scheme, join_uri
from lancedb.util import get_uri_scheme
def test_normalize_uri():
@@ -33,55 +28,3 @@ def test_normalize_uri():
for uri, expected_scheme in zip(uris, schemes):
parsed_scheme = get_uri_scheme(uri)
assert parsed_scheme == expected_scheme
def test_join_uri_remote():
schemes = ["s3", "az", "gs"]
for scheme in schemes:
expected = f"{scheme}://bucket/path/to/table.lance"
base_uri = f"{scheme}://bucket/path/to/"
parts = ["table.lance"]
assert join_uri(base_uri, *parts) == expected
base_uri = f"{scheme}://bucket"
parts = ["path", "to", "table.lance"]
assert join_uri(base_uri, *parts) == expected
# skip this test if on windows
@pytest.mark.skipif(os.name == "nt", reason="Windows paths are not POSIX")
def test_join_uri_posix():
for base in [
# relative path
"relative/path",
"relative/path/",
# an absolute path
"/absolute/path",
"/absolute/path/",
# a file URI
"file:///absolute/path",
"file:///absolute/path/",
]:
joined = join_uri(base, "table.lance")
assert joined == str(pathlib.Path(base) / "table.lance")
joined = join_uri(pathlib.Path(base), "table.lance")
assert joined == pathlib.Path(base) / "table.lance"
# skip this test if not on windows
@pytest.mark.skipif(os.name != "nt", reason="Windows paths are not POSIX")
def test_local_join_uri_windows():
# https://learn.microsoft.com/en-us/dotnet/standard/io/file-path-formats
for base in [
# windows relative path
"relative\\path",
"relative\\path\\",
# windows absolute path from current drive
"c:\\absolute\\path",
# relative path from root of current drive
"\\relative\\path",
]:
joined = join_uri(base, "table.lance")
assert joined == str(pathlib.Path(base) / "table.lance")
joined = join_uri(pathlib.Path(base), "table.lance")
assert joined == pathlib.Path(base) / "table.lance"

View File

@@ -1,6 +1,6 @@
[package]
name = "vectordb-node"
version = "0.4.1"
version = "0.4.0"
description = "Serverless, low-latency vector database for AI applications"
license = "Apache-2.0"
edition = "2018"

View File

@@ -23,7 +23,7 @@ pub enum Error {
#[snafu(display("column '{name}' is missing"))]
MissingColumn { name: String },
#[snafu(display("{name}: {message}"))]
OutOfRange { name: String, message: String },
RangeError { name: String, message: String },
#[snafu(display("{index_type} is not a valid index type"))]
InvalidIndexType { index_type: String },

View File

@@ -12,5 +12,4 @@
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod scalar;
pub mod vector;

View File

@@ -1,43 +0,0 @@
// Copyright 2023 Lance Developers.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use neon::{
context::{Context, FunctionContext},
result::JsResult,
types::{JsBoolean, JsBox, JsPromise, JsString},
};
use crate::{error::ResultExt, runtime, table::JsTable};
pub(crate) fn table_create_scalar_index(mut cx: FunctionContext) -> JsResult<JsPromise> {
let js_table = cx.this().downcast_or_throw::<JsBox<JsTable>, _>(&mut cx)?;
let column = cx.argument::<JsString>(0)?.value(&mut cx);
let replace = cx.argument::<JsBoolean>(1)?.value(&mut cx);
let rt = runtime(&mut cx)?;
let (deferred, promise) = cx.promise();
let channel = cx.channel();
let mut table = js_table.table.clone();
rt.spawn(async move {
let idx_result = table.create_scalar_index(&column, replace).await;
deferred.settle_with(&channel, move |mut cx| {
idx_result.or_throw(&mut cx)?;
Ok(cx.undefined())
});
});
Ok(promise)
}

View File

@@ -65,10 +65,12 @@ fn get_index_params_builder(
obj.get_opt::<JsString, _, _>(cx, "index_name")?
.map(|s| index_builder.index_name(s.value(cx)));
if let Some(metric_type) = obj.get_opt::<JsString, _, _>(cx, "metric_type")? {
let metric_type = MetricType::try_from(metric_type.value(cx).as_str()).unwrap();
index_builder.metric_type(metric_type);
}
obj.get_opt::<JsString, _, _>(cx, "metric_type")?
.map(|s| MetricType::try_from(s.value(cx).as_str()))
.map(|mt| {
let metric_type = mt.unwrap();
index_builder.metric_type(metric_type);
});
let num_partitions = obj.get_opt_usize(cx, "num_partitions")?;
let max_iters = obj.get_opt_usize(cx, "max_iters")?;
@@ -83,29 +85,23 @@ fn get_index_params_builder(
index_builder.ivf_params(ivf_params)
});
if let Some(use_opq) = obj.get_opt::<JsBoolean, _, _>(cx, "use_opq")? {
pq_params.use_opq = use_opq.value(cx);
}
obj.get_opt::<JsBoolean, _, _>(cx, "use_opq")?
.map(|s| pq_params.use_opq = s.value(cx));
if let Some(num_sub_vectors) = obj.get_opt_usize(cx, "num_sub_vectors")? {
pq_params.num_sub_vectors = num_sub_vectors;
}
obj.get_opt_usize(cx, "num_sub_vectors")?
.map(|s| pq_params.num_sub_vectors = s);
if let Some(num_bits) = obj.get_opt_usize(cx, "num_bits")? {
pq_params.num_bits = num_bits;
}
obj.get_opt_usize(cx, "num_bits")?
.map(|s| pq_params.num_bits = s);
if let Some(max_iters) = obj.get_opt_usize(cx, "max_iters")? {
pq_params.max_iters = max_iters;
}
obj.get_opt_usize(cx, "max_iters")?
.map(|s| pq_params.max_iters = s);
if let Some(max_opq_iters) = obj.get_opt_usize(cx, "max_opq_iters")? {
pq_params.max_opq_iters = max_opq_iters;
}
obj.get_opt_usize(cx, "max_opq_iters")?
.map(|s| pq_params.max_opq_iters = s);
if let Some(replace) = obj.get_opt::<JsBoolean, _, _>(cx, "replace")? {
index_builder.replace(replace.value(cx));
}
obj.get_opt::<JsBoolean, _, _>(cx, "replace")?
.map(|s| index_builder.replace(s.value(cx)));
Ok(index_builder)
}

View File

@@ -242,10 +242,6 @@ fn main(mut cx: ModuleContext) -> NeonResult<()> {
cx.export_function("tableCompactFiles", JsTable::js_compact)?;
cx.export_function("tableListIndices", JsTable::js_list_indices)?;
cx.export_function("tableIndexStats", JsTable::js_index_stats)?;
cx.export_function(
"tableCreateScalarIndex",
index::scalar::table_create_scalar_index,
)?;
cx.export_function(
"tableCreateVectorIndex",
index::vector::table_create_vector_index,

View File

@@ -47,15 +47,15 @@ fn f64_to_u32_safe(n: f64, key: &str) -> Result<u32> {
use conv::*;
n.approx_as::<u32>().map_err(|e| match e {
FloatError::NegOverflow(_) => Error::OutOfRange {
FloatError::NegOverflow(_) => Error::RangeError {
name: key.into(),
message: "must be > 0".to_string(),
},
FloatError::PosOverflow(_) => Error::OutOfRange {
FloatError::PosOverflow(_) => Error::RangeError {
name: key.into(),
message: format!("must be < {}", u32::MAX),
},
FloatError::NotANumber(_) => Error::OutOfRange {
FloatError::NotANumber(_) => Error::RangeError {
name: key.into(),
message: "not a valid number".to_string(),
},
@@ -66,15 +66,15 @@ fn f64_to_usize_safe(n: f64, key: &str) -> Result<usize> {
use conv::*;
n.approx_as::<usize>().map_err(|e| match e {
FloatError::NegOverflow(_) => Error::OutOfRange {
FloatError::NegOverflow(_) => Error::RangeError {
name: key.into(),
message: "must be > 0".to_string(),
},
FloatError::PosOverflow(_) => Error::OutOfRange {
FloatError::PosOverflow(_) => Error::RangeError {
name: key.into(),
message: format!("must be < {}", usize::MAX),
},
FloatError::NotANumber(_) => Error::OutOfRange {
FloatError::NotANumber(_) => Error::RangeError {
name: key.into(),
message: "not a valid number".to_string(),
},

View File

@@ -25,11 +25,11 @@ impl JsQuery {
let limit = query_obj
.get_opt::<JsNumber, _, _>(&mut cx, "_limit")?
.map(|value| {
let limit = value.value(&mut cx);
if limit <= 0.0 {
let limit = value.value(&mut cx) as u64;
if limit <= 0 {
panic!("Limit must be a positive integer");
}
limit as u64
limit
});
let select = query_obj
.get_opt::<JsArray, _, _>(&mut cx, "_select")?
@@ -73,7 +73,7 @@ impl JsQuery {
rt.spawn(async move {
let mut builder = table
.search(query.map(Float32Array::from))
.search(query.map(|q| Float32Array::from(q)))
.refine_factor(refine_factor)
.nprobes(nprobes)
.filter(filter)

View File

@@ -45,7 +45,7 @@ impl JsTable {
let table_name = cx.argument::<JsString>(0)?.value(&mut cx);
let buffer = cx.argument::<JsBuffer>(1)?;
let (batches, schema) =
arrow_buffer_to_record_batch(buffer.as_slice(&cx)).or_throw(&mut cx)?;
arrow_buffer_to_record_batch(buffer.as_slice(&mut cx)).or_throw(&mut cx)?;
// Write mode
let mode = match cx.argument::<JsString>(2)?.value(&mut cx).as_str() {
@@ -93,7 +93,7 @@ impl JsTable {
let buffer = cx.argument::<JsBuffer>(0)?;
let write_mode = cx.argument::<JsString>(1)?.value(&mut cx);
let (batches, schema) =
arrow_buffer_to_record_batch(buffer.as_slice(&cx)).or_throw(&mut cx)?;
arrow_buffer_to_record_batch(buffer.as_slice(&mut cx)).or_throw(&mut cx)?;
let rt = runtime(&mut cx)?;
let channel = cx.channel();
let mut table = js_table.table.clone();
@@ -186,7 +186,7 @@ impl JsTable {
.downcast_or_throw::<JsString, _>(&mut cx)?;
let value = updates_arg
.get_value(&mut cx, property)?
.get_value(&mut cx, property.clone())?
.downcast_or_throw::<JsString, _>(&mut cx)?;
let property = property.value(&mut cx);
@@ -216,7 +216,7 @@ impl JsTable {
.map(|(k, v)| (k.as_str(), v.as_str()))
.collect::<Vec<_>>();
let predicate = predicate.as_deref();
let predicate = predicate.as_ref().map(|s| s.as_str());
let update_result = table.update(predicate, updates_arg).await;
deferred.settle_with(&channel, move |mut cx| {

View File

@@ -1,6 +1,6 @@
[package]
name = "vectordb"
version = "0.4.1"
version = "0.4.0"
edition = "2021"
description = "LanceDB: A serverless, low-latency vector database for AI applications"
license = "Apache-2.0"

View File

@@ -26,7 +26,7 @@ use futures::{stream::BoxStream, FutureExt, StreamExt};
use lance::io::object_store::WrappingObjectStore;
use object_store::{
path::Path, Error, GetOptions, GetResult, ListResult, MultipartId, ObjectMeta, ObjectStore,
PutOptions, PutResult, Result,
Result,
};
use async_trait::async_trait;
@@ -72,28 +72,13 @@ impl PrimaryOnly for Path {
/// Note: this object store does not mirror writes to *.manifest files
#[async_trait]
impl ObjectStore for MirroringObjectStore {
async fn put(&self, location: &Path, bytes: Bytes) -> Result<PutResult> {
async fn put(&self, location: &Path, bytes: Bytes) -> Result<()> {
if location.primary_only() {
self.primary.put(location, bytes).await
} else {
self.secondary.put(location, bytes.clone()).await?;
self.primary.put(location, bytes).await
}
}
async fn put_opts(
&self,
location: &Path,
bytes: Bytes,
options: PutOptions,
) -> Result<PutResult> {
if location.primary_only() {
self.primary.put_opts(location, bytes, options).await
} else {
self.secondary
.put_opts(location, bytes.clone(), options.clone())
.await?;
self.primary.put_opts(location, bytes, options).await
self.primary.put(location, bytes).await?;
Ok(())
}
}
@@ -144,8 +129,8 @@ impl ObjectStore for MirroringObjectStore {
self.primary.delete(location).await
}
fn list(&self, prefix: Option<&Path>) -> BoxStream<'_, Result<ObjectMeta>> {
self.primary.list(prefix)
async fn list(&self, prefix: Option<&Path>) -> Result<BoxStream<'_, Result<ObjectMeta>>> {
self.primary.list(prefix).await
}
async fn list_with_delimiter(&self, prefix: Option<&Path>) -> Result<ListResult> {

View File

@@ -14,7 +14,6 @@
use chrono::Duration;
use lance::dataset::builder::DatasetBuilder;
use lance::index::scalar::ScalarIndexParams;
use lance_index::IndexType;
use std::sync::Arc;
@@ -263,16 +262,6 @@ impl Table {
Ok(())
}
/// Create a scalar index on the table
pub async fn create_scalar_index(&mut self, column: &str, replace: bool) -> Result<()> {
let mut dataset = self.dataset.as_ref().clone();
let params = ScalarIndexParams::default();
dataset
.create_index(&[column], IndexType::Scalar, None, &params, replace)
.await?;
Ok(())
}
pub async fn optimize_indices(&mut self) -> Result<()> {
let mut dataset = self.dataset.as_ref().clone();