Compare commits

...

21 Commits

Author SHA1 Message Date
Lance Release
ef20b2a138 [python] Bump version: 0.3.2 → 0.3.3 2023-11-01 21:15:55 +00:00
Lei Xu
2e0f251bfd chore: bump lance to 8.10 (#622) 2023-11-01 14:14:38 -07:00
Ayush Chaurasia
2cb91e818d Disable posthog on docs & reduce sentry trace factor (#607)
- posthog charges per event and docs events are registered very
frequently. We can keep tracking them on GA
- Reduced sentry trace factor
2023-11-02 01:13:16 +05:30
Chang She
2835c76336 doc: node sdk now supports windows (#616) 2023-11-01 10:04:18 -07:00
Bert
8068a2bbc3 ci: cancel in progress runs on new push (#620) 2023-11-01 11:33:48 -04:00
Bert
24111d543a fix!: sort table names (#619)
https://github.com/lancedb/lance/issues/1385
2023-11-01 10:50:09 -04:00
QianZhu
7eec2b8f9a Qian/query option doc (#615)
- API documentation improvement for queries (table.search)
- a small bug fix for the remote API on create_table

![image](https://github.com/lancedb/lancedb/assets/1305083/712e9bd3-deb8-4d81-8cd0-d8e98ef68f4e)

![image](https://github.com/lancedb/lancedb/assets/1305083/ba22125a-8c36-4e34-a07f-e39f0136e62c)
2023-10-31 19:50:05 -07:00
Will Jones
b2b70ea399 increment pylance (#618) 2023-10-31 18:07:03 -07:00
Bert
e50a3c1783 added api docs for prefilter flag (#617)
Added the prefilter flag argument to the `LanceQueryBuilder.where`.

This should make it display here:

https://lancedb.github.io/lancedb/python/python/#lancedb.query.LanceQueryBuilder.select

And also in intellisense like this:
<img width="848" alt="image"
src="https://github.com/lancedb/lancedb/assets/5846846/e0c53f4f-96bc-411b-9159-680a6c4d0070">

Also adds some improved documentation about the `where` argument to this
method.

---------

Co-authored-by: Weston Pace <weston.pace@gmail.com>
2023-10-31 16:39:32 -04:00
Weston Pace
b517134309 feat: allow prefiltering with index (#610)
Support for prefiltering with an index was added in lance version 0.8.7.
We can remove the lancedb check that prevents this. Closes #261
2023-10-31 13:11:03 -07:00
Lei Xu
6fb539b5bf doc: add doc to use GPU for indexing (#611) 2023-10-30 15:25:00 -07:00
Lance Release
f37fe120fd Updating package-lock.json 2023-10-26 22:30:16 +00:00
Lance Release
2e115acb9a Updating package-lock.json 2023-10-26 21:48:01 +00:00
Lance Release
27a638362d Bump version: 0.3.4 → 0.3.5 2023-10-26 21:47:44 +00:00
Bert
22a6695d7a fix conv version (#605) 2023-10-26 17:44:11 -04:00
Lance Release
57eff82ee7 Updating package-lock.json 2023-10-26 21:03:07 +00:00
Lance Release
7732f7d41c Bump version: 0.3.3 → 0.3.4 2023-10-26 21:02:52 +00:00
Bert
5ca98c326f feat: added dataset stats api to node (#604) 2023-10-26 17:00:48 -04:00
Bert
b55db397eb feat: added data stats apis (#596) 2023-10-26 13:10:17 -04:00
Rob Meng
c04d72ac8a expose remap index api (#603)
expose index remap options in `compact_files`
2023-10-25 22:10:37 -04:00
Rob Meng
28b02fb72a feat: expose optimize index api (#602)
expose `optimize_index` api.
2023-10-25 19:40:23 -04:00
33 changed files with 630 additions and 161 deletions

View File

@@ -1,5 +1,5 @@
[bumpversion] [bumpversion]
current_version = 0.3.3 current_version = 0.3.5
commit = True commit = True
message = Bump version: {current_version} → {new_version} message = Bump version: {current_version} → {new_version}
tag = True tag = True

View File

@@ -11,6 +11,10 @@ on:
- .github/workflows/node.yml - .github/workflows/node.yml
- docker-compose.yml - docker-compose.yml
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
env: env:
# Disable full debug symbol generation to speed up CI build and keep memory down # Disable full debug symbol generation to speed up CI build and keep memory down
# "1" means line tables only, which is useful for panic tracebacks. # "1" means line tables only, which is useful for panic tracebacks.

View File

@@ -38,7 +38,7 @@ jobs:
node/vectordb-*.tgz node/vectordb-*.tgz
node-macos: node-macos:
runs-on: macos-12 runs-on: macos-13
# Only runs on tags that matches the make-release action # Only runs on tags that matches the make-release action
if: startsWith(github.ref, 'refs/tags/v') if: startsWith(github.ref, 'refs/tags/v')
strategy: strategy:

View File

@@ -8,6 +8,11 @@ on:
paths: paths:
- python/** - python/**
- .github/workflows/python.yml - .github/workflows/python.yml
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs: jobs:
linux: linux:
timeout-minutes: 30 timeout-minutes: 30
@@ -43,7 +48,7 @@ jobs:
run: pytest --doctest-modules lancedb run: pytest --doctest-modules lancedb
mac: mac:
timeout-minutes: 30 timeout-minutes: 30
runs-on: "macos-12" runs-on: "macos-13"
defaults: defaults:
run: run:
shell: bash shell: bash

View File

@@ -10,6 +10,10 @@ on:
- rust/** - rust/**
- .github/workflows/rust.yml - .github/workflows/rust.yml
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
env: env:
# This env var is used by Swatinem/rust-cache@v2 for the cache # This env var is used by Swatinem/rust-cache@v2 for the cache
# key, so we set it to make sure it is always consistent. # key, so we set it to make sure it is always consistent.
@@ -44,7 +48,7 @@ jobs:
- name: Run tests - name: Run tests
run: cargo test --all-features run: cargo test --all-features
macos: macos:
runs-on: macos-12 runs-on: macos-13
timeout-minutes: 30 timeout-minutes: 30
defaults: defaults:
run: run:

View File

@@ -5,9 +5,9 @@ exclude = ["python"]
resolver = "2" resolver = "2"
[workspace.dependencies] [workspace.dependencies]
lance = { "version" = "=0.8.7", "features" = ["dynamodb"] } lance = { "version" = "=0.8.10", "features" = ["dynamodb"] }
lance-linalg = { "version" = "=0.8.7" } lance-linalg = { "version" = "=0.8.10" }
lance-testing = { "version" = "=0.8.7" } lance-testing = { "version" = "=0.8.10" }
# Note that this one does not include pyarrow # Note that this one does not include pyarrow
arrow = { version = "47.0.0", optional = false } arrow = { version = "47.0.0", optional = false }
arrow-array = "47.0" arrow-array = "47.0"
@@ -19,7 +19,7 @@ arrow-arith = "47.0"
arrow-cast = "47.0" arrow-cast = "47.0"
chrono = "0.4.23" chrono = "0.4.23"
half = { "version" = "=2.3.1", default-features = false, features = [ half = { "version" = "=2.3.1", default-features = false, features = [
"num-traits" "num-traits",
] } ] }
log = "0.4" log = "0.4"
object_store = "0.7.1" object_store = "0.7.1"

View File

@@ -150,8 +150,6 @@ nav:
extra_css: extra_css:
- styles/global.css - styles/global.css
extra_javascript:
- scripts/posthog.js
extra: extra:
analytics: analytics:

View File

@@ -71,9 +71,41 @@ a single PQ code.
### Use GPU to build vector index ### Use GPU to build vector index
Lance Python SDK has experimental GPU support for creating IVF index. Lance Python SDK has experimental GPU support for creating IVF index.
Using GPU for index creation requires [PyTorch>2.0](https://pytorch.org/) being installed.
You can specify the GPU device to train IVF partitions via You can specify the GPU device to train IVF partitions via
- **accelerator**: Specify to `"cuda"`` to enable GPU training. - **accelerator**: Specify to ``cuda`` or ``mps`` (on Apple Silicon) to enable GPU training.
=== "Linux"
<!-- skip-test -->
``` { .python .copy }
# Create index using CUDA on Nvidia GPUs.
tbl.create_index(
num_partitions=256,
num_sub_vectors=96,
accelerator="cuda"
)
```
=== "Macos"
<!-- skip-test -->
```python
# Create index using MPS on Apple Silicon.
tbl.create_index(
num_partitions=256,
num_sub_vectors=96,
accelerator="mps"
)
```
Trouble shootings:
If you see ``AssertionError: Torch not compiled with CUDA enabled``, you need to [install
PyTorch with CUDA support](https://pytorch.org/get-started/locally/).
## Querying an ANN Index ## Querying an ANN Index

View File

@@ -22,8 +22,6 @@ pip install lancedb
::: lancedb.query.LanceQueryBuilder ::: lancedb.query.LanceQueryBuilder
::: lancedb.query.LanceFtsQueryBuilder
## Embeddings ## Embeddings
::: lancedb.embeddings.registry.EmbeddingFunctionRegistry ::: lancedb.embeddings.registry.EmbeddingFunctionRegistry
@@ -56,7 +54,7 @@ pip install lancedb
## Utilities ## Utilities
::: lancedb.vector ::: lancedb.schema.vector
## Integrations ## Integrations

View File

@@ -18,29 +18,45 @@ python_file = ".py"
python_folder = "python" python_folder = "python"
files = glob.glob(glob_string, recursive=True) files = glob.glob(glob_string, recursive=True)
excluded_files = [f for excluded_glob in excluded_globs for f in glob.glob(excluded_glob, recursive=True)] excluded_files = [
f
for excluded_glob in excluded_globs
for f in glob.glob(excluded_glob, recursive=True)
]
def yield_lines(lines: Iterator[str], prefix: str, suffix: str): def yield_lines(lines: Iterator[str], prefix: str, suffix: str):
in_code_block = False in_code_block = False
# Python code has strict indentation # Python code has strict indentation
strip_length = 0 strip_length = 0
skip_test = False
for line in lines: for line in lines:
if "skip-test" in line:
skip_test = True
if line.strip().startswith(prefix + python_prefix): if line.strip().startswith(prefix + python_prefix):
in_code_block = True in_code_block = True
strip_length = len(line) - len(line.lstrip()) strip_length = len(line) - len(line.lstrip())
elif in_code_block and line.strip().startswith(suffix): elif in_code_block and line.strip().startswith(suffix):
in_code_block = False in_code_block = False
yield "\n" if not skip_test:
yield "\n"
skip_test = False
elif in_code_block: elif in_code_block:
yield line[strip_length:] if not skip_test:
yield line[strip_length:]
for file in filter(lambda file: file not in excluded_files, files): for file in filter(lambda file: file not in excluded_files, files):
with open(file, "r") as f: with open(file, "r") as f:
lines = list(yield_lines(iter(f), "```", "```")) lines = list(yield_lines(iter(f), "```", "```"))
if len(lines) > 0: if len(lines) > 0:
out_path = Path(python_folder) / Path(file).name.strip(".md") / (Path(file).name.strip(".md") + python_file) print(lines)
out_path = (
Path(python_folder)
/ Path(file).name.strip(".md")
/ (Path(file).name.strip(".md") + python_file)
)
print(out_path) print(out_path)
out_path.parent.mkdir(exist_ok=True, parents=True) out_path.parent.mkdir(exist_ok=True, parents=True)
with open(out_path, "w") as out: with open(out_path, "w") as out:
out.writelines(lines) out.writelines(lines)

View File

@@ -10,7 +10,7 @@ npm install vectordb
This will download the appropriate native library for your platform. We currently This will download the appropriate native library for your platform. We currently
support x86_64 Linux, aarch64 Linux, Intel MacOS, and ARM (M1/M2) MacOS. We do not support x86_64 Linux, aarch64 Linux, Intel MacOS, and ARM (M1/M2) MacOS. We do not
yet support Windows or musl-based Linux (such as Alpine Linux). yet support musl-based Linux (such as Alpine Linux).
## Usage ## Usage

74
node/package-lock.json generated
View File

@@ -1,12 +1,12 @@
{ {
"name": "vectordb", "name": "vectordb",
"version": "0.3.3", "version": "0.3.5",
"lockfileVersion": 2, "lockfileVersion": 2,
"requires": true, "requires": true,
"packages": { "packages": {
"": { "": {
"name": "vectordb", "name": "vectordb",
"version": "0.3.3", "version": "0.3.5",
"cpu": [ "cpu": [
"x64", "x64",
"arm64" "arm64"
@@ -53,11 +53,11 @@
"uuid": "^9.0.0" "uuid": "^9.0.0"
}, },
"optionalDependencies": { "optionalDependencies": {
"@lancedb/vectordb-darwin-arm64": "0.3.3", "@lancedb/vectordb-darwin-arm64": "0.3.5",
"@lancedb/vectordb-darwin-x64": "0.3.3", "@lancedb/vectordb-darwin-x64": "0.3.5",
"@lancedb/vectordb-linux-arm64-gnu": "0.3.3", "@lancedb/vectordb-linux-arm64-gnu": "0.3.5",
"@lancedb/vectordb-linux-x64-gnu": "0.3.3", "@lancedb/vectordb-linux-x64-gnu": "0.3.5",
"@lancedb/vectordb-win32-x64-msvc": "0.3.3" "@lancedb/vectordb-win32-x64-msvc": "0.3.5"
} }
}, },
"node_modules/@apache-arrow/ts": { "node_modules/@apache-arrow/ts": {
@@ -317,9 +317,9 @@
} }
}, },
"node_modules/@lancedb/vectordb-darwin-arm64": { "node_modules/@lancedb/vectordb-darwin-arm64": {
"version": "0.3.3", "version": "0.3.5",
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-arm64/-/vectordb-darwin-arm64-0.3.3.tgz", "resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-arm64/-/vectordb-darwin-arm64-0.3.5.tgz",
"integrity": "sha512-nvyj7xNX2/wb/PH5TjyhLR/NQ1jVuoBw2B5UaSg7qf8Tnm5SSXWQ7F25RVKcKwh72fz1qB+CWW24ftZnRzbT/Q==", "integrity": "sha512-Nnso+WXMSTIUouddDgPDNt40K6d2fF7W5OsfgAMDXAhUrdSMOZbVP0bWklRz9J7JluseBL9/MfLSEYZDTvrACg==",
"cpu": [ "cpu": [
"arm64" "arm64"
], ],
@@ -329,9 +329,9 @@
] ]
}, },
"node_modules/@lancedb/vectordb-darwin-x64": { "node_modules/@lancedb/vectordb-darwin-x64": {
"version": "0.3.3", "version": "0.3.5",
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-x64/-/vectordb-darwin-x64-0.3.3.tgz", "resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-x64/-/vectordb-darwin-x64-0.3.5.tgz",
"integrity": "sha512-7CW+nILyPHp6cua0Rl0xaTDWw/vajEn/jCsEjFYgDmE+rtf5Z5Fum41FxR9C2TtIAvUK+nWb5mkYeOLqU6vRvg==", "integrity": "sha512-gvg/iq13zAamLL7jueiIw7Q67dygm/NmILkFQ3WrAOUjr0IMxLBCv+XMxt62xajTrA+ObyfmU1uiuhrJL81PWw==",
"cpu": [ "cpu": [
"x64" "x64"
], ],
@@ -341,9 +341,9 @@
] ]
}, },
"node_modules/@lancedb/vectordb-linux-arm64-gnu": { "node_modules/@lancedb/vectordb-linux-arm64-gnu": {
"version": "0.3.3", "version": "0.3.5",
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-arm64-gnu/-/vectordb-linux-arm64-gnu-0.3.3.tgz", "resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-arm64-gnu/-/vectordb-linux-arm64-gnu-0.3.5.tgz",
"integrity": "sha512-MmhwbacKxZPkLwwOqysVY8mUb8lFoyFIPlYhSLV4xS1C8X4HWALljIul1qMl1RYudp9Uc3PsOzRexl+OvCGfUw==", "integrity": "sha512-6PvCBIXI9zPqF478TibZxxiAehFZ530g0FOFDT49xtp540HvhE9+XQk/yO0w96mvyoCfzB2lK4haDmdhCoehNw==",
"cpu": [ "cpu": [
"arm64" "arm64"
], ],
@@ -353,9 +353,9 @@
] ]
}, },
"node_modules/@lancedb/vectordb-linux-x64-gnu": { "node_modules/@lancedb/vectordb-linux-x64-gnu": {
"version": "0.3.3", "version": "0.3.5",
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-x64-gnu/-/vectordb-linux-x64-gnu-0.3.3.tgz", "resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-x64-gnu/-/vectordb-linux-x64-gnu-0.3.5.tgz",
"integrity": "sha512-OrNlsKi/QPw59Po040oRKn8IuqFEk4upc/4FaFKqVkcmQjjZrMg5Kgy9ZfWIhHdAnWXXggZZIPArpt0X1B0ceA==", "integrity": "sha512-e3nqurUeCow4QONeNf/QP50Z90mgrh9xoUfjRSHcCPQcP6WgmFEafbt0jeSVgZ7tbt7+03/MK0YexhHM/5sBjA==",
"cpu": [ "cpu": [
"x64" "x64"
], ],
@@ -365,9 +365,9 @@
] ]
}, },
"node_modules/@lancedb/vectordb-win32-x64-msvc": { "node_modules/@lancedb/vectordb-win32-x64-msvc": {
"version": "0.3.3", "version": "0.3.5",
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-win32-x64-msvc/-/vectordb-win32-x64-msvc-0.3.3.tgz", "resolved": "https://registry.npmjs.org/@lancedb/vectordb-win32-x64-msvc/-/vectordb-win32-x64-msvc-0.3.5.tgz",
"integrity": "sha512-lIT0A7a6eqX51IfGyhECtpXXgsr//kgbd+HZbcCdPy2GMmNezSch/7V22zExDSpF32hX8WfgcTLYCVWVilggDQ==", "integrity": "sha512-RC1FfgEr6Z9sADuvspT2PG1B2mpKRdckgeiHqTHkIXdq3Qp5V5TeQJAbVvMr2xd1q99W6zreub52QXf+AilLVQ==",
"cpu": [ "cpu": [
"x64" "x64"
], ],
@@ -4869,33 +4869,33 @@
} }
}, },
"@lancedb/vectordb-darwin-arm64": { "@lancedb/vectordb-darwin-arm64": {
"version": "0.3.3", "version": "0.3.5",
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-arm64/-/vectordb-darwin-arm64-0.3.3.tgz", "resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-arm64/-/vectordb-darwin-arm64-0.3.5.tgz",
"integrity": "sha512-nvyj7xNX2/wb/PH5TjyhLR/NQ1jVuoBw2B5UaSg7qf8Tnm5SSXWQ7F25RVKcKwh72fz1qB+CWW24ftZnRzbT/Q==", "integrity": "sha512-Nnso+WXMSTIUouddDgPDNt40K6d2fF7W5OsfgAMDXAhUrdSMOZbVP0bWklRz9J7JluseBL9/MfLSEYZDTvrACg==",
"optional": true "optional": true
}, },
"@lancedb/vectordb-darwin-x64": { "@lancedb/vectordb-darwin-x64": {
"version": "0.3.3", "version": "0.3.5",
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-x64/-/vectordb-darwin-x64-0.3.3.tgz", "resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-x64/-/vectordb-darwin-x64-0.3.5.tgz",
"integrity": "sha512-7CW+nILyPHp6cua0Rl0xaTDWw/vajEn/jCsEjFYgDmE+rtf5Z5Fum41FxR9C2TtIAvUK+nWb5mkYeOLqU6vRvg==", "integrity": "sha512-gvg/iq13zAamLL7jueiIw7Q67dygm/NmILkFQ3WrAOUjr0IMxLBCv+XMxt62xajTrA+ObyfmU1uiuhrJL81PWw==",
"optional": true "optional": true
}, },
"@lancedb/vectordb-linux-arm64-gnu": { "@lancedb/vectordb-linux-arm64-gnu": {
"version": "0.3.3", "version": "0.3.5",
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-arm64-gnu/-/vectordb-linux-arm64-gnu-0.3.3.tgz", "resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-arm64-gnu/-/vectordb-linux-arm64-gnu-0.3.5.tgz",
"integrity": "sha512-MmhwbacKxZPkLwwOqysVY8mUb8lFoyFIPlYhSLV4xS1C8X4HWALljIul1qMl1RYudp9Uc3PsOzRexl+OvCGfUw==", "integrity": "sha512-6PvCBIXI9zPqF478TibZxxiAehFZ530g0FOFDT49xtp540HvhE9+XQk/yO0w96mvyoCfzB2lK4haDmdhCoehNw==",
"optional": true "optional": true
}, },
"@lancedb/vectordb-linux-x64-gnu": { "@lancedb/vectordb-linux-x64-gnu": {
"version": "0.3.3", "version": "0.3.5",
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-x64-gnu/-/vectordb-linux-x64-gnu-0.3.3.tgz", "resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-x64-gnu/-/vectordb-linux-x64-gnu-0.3.5.tgz",
"integrity": "sha512-OrNlsKi/QPw59Po040oRKn8IuqFEk4upc/4FaFKqVkcmQjjZrMg5Kgy9ZfWIhHdAnWXXggZZIPArpt0X1B0ceA==", "integrity": "sha512-e3nqurUeCow4QONeNf/QP50Z90mgrh9xoUfjRSHcCPQcP6WgmFEafbt0jeSVgZ7tbt7+03/MK0YexhHM/5sBjA==",
"optional": true "optional": true
}, },
"@lancedb/vectordb-win32-x64-msvc": { "@lancedb/vectordb-win32-x64-msvc": {
"version": "0.3.3", "version": "0.3.5",
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-win32-x64-msvc/-/vectordb-win32-x64-msvc-0.3.3.tgz", "resolved": "https://registry.npmjs.org/@lancedb/vectordb-win32-x64-msvc/-/vectordb-win32-x64-msvc-0.3.5.tgz",
"integrity": "sha512-lIT0A7a6eqX51IfGyhECtpXXgsr//kgbd+HZbcCdPy2GMmNezSch/7V22zExDSpF32hX8WfgcTLYCVWVilggDQ==", "integrity": "sha512-RC1FfgEr6Z9sADuvspT2PG1B2mpKRdckgeiHqTHkIXdq3Qp5V5TeQJAbVvMr2xd1q99W6zreub52QXf+AilLVQ==",
"optional": true "optional": true
}, },
"@neon-rs/cli": { "@neon-rs/cli": {

View File

@@ -1,6 +1,6 @@
{ {
"name": "vectordb", "name": "vectordb",
"version": "0.3.3", "version": "0.3.5",
"description": " Serverless, low-latency vector database for AI applications", "description": " Serverless, low-latency vector database for AI applications",
"main": "dist/index.js", "main": "dist/index.js",
"types": "dist/index.d.ts", "types": "dist/index.d.ts",
@@ -81,10 +81,10 @@
} }
}, },
"optionalDependencies": { "optionalDependencies": {
"@lancedb/vectordb-darwin-arm64": "0.3.3", "@lancedb/vectordb-darwin-arm64": "0.3.5",
"@lancedb/vectordb-darwin-x64": "0.3.3", "@lancedb/vectordb-darwin-x64": "0.3.5",
"@lancedb/vectordb-linux-arm64-gnu": "0.3.3", "@lancedb/vectordb-linux-arm64-gnu": "0.3.5",
"@lancedb/vectordb-linux-x64-gnu": "0.3.3", "@lancedb/vectordb-linux-x64-gnu": "0.3.5",
"@lancedb/vectordb-win32-x64-msvc": "0.3.3" "@lancedb/vectordb-win32-x64-msvc": "0.3.5"
} }
} }

View File

@@ -23,7 +23,7 @@ import { Query } from './query'
import { isEmbeddingFunction } from './embedding/embedding_function' import { isEmbeddingFunction } from './embedding/embedding_function'
// eslint-disable-next-line @typescript-eslint/no-var-requires // eslint-disable-next-line @typescript-eslint/no-var-requires
const { databaseNew, databaseTableNames, databaseOpenTable, databaseDropTable, tableCreate, tableAdd, tableCreateVectorIndex, tableCountRows, tableDelete, tableCleanupOldVersions, tableCompactFiles } = require('../native.js') const { databaseNew, databaseTableNames, databaseOpenTable, databaseDropTable, tableCreate, tableAdd, tableCreateVectorIndex, tableCountRows, tableDelete, tableCleanupOldVersions, tableCompactFiles, tableListIndices, tableIndexStats } = require('../native.js')
export { Query } export { Query }
export type { EmbeddingFunction } export type { EmbeddingFunction }
@@ -260,6 +260,27 @@ export interface Table<T = number[]> {
* ``` * ```
*/ */
delete: (filter: string) => Promise<void> delete: (filter: string) => Promise<void>
/**
* List the indicies on this table.
*/
listIndices: () => Promise<VectorIndex[]>
/**
* Get statistics about an index.
*/
indexStats: (indexUuid: string) => Promise<IndexStats>
}
export interface VectorIndex {
columns: string[]
name: string
uuid: string
}
export interface IndexStats {
numIndexedRows: number | null
numUnindexedRows: number | null
} }
/** /**
@@ -502,6 +523,14 @@ export class LocalTable<T = number[]> implements Table<T> {
return res.metrics return res.metrics
}) })
} }
async listIndices (): Promise<VectorIndex[]> {
return tableListIndices.call(this._tbl)
}
async indexStats (indexUuid: string): Promise<IndexStats> {
return tableIndexStats.call(this._tbl, indexUuid)
}
} }
export interface CleanupStats { export interface CleanupStats {

View File

@@ -14,7 +14,9 @@
import { import {
type EmbeddingFunction, type Table, type VectorIndexParams, type Connection, type EmbeddingFunction, type Table, type VectorIndexParams, type Connection,
type ConnectionOptions, type CreateTableOptions, type WriteOptions type ConnectionOptions, type CreateTableOptions, type VectorIndex,
type WriteOptions,
type IndexStats
} from '../index' } from '../index'
import { Query } from '../query' import { Query } from '../query'
@@ -241,4 +243,21 @@ export class RemoteTable<T = number[]> implements Table<T> {
async delete (filter: string): Promise<void> { async delete (filter: string): Promise<void> {
await this._client.post(`/v1/table/${this._name}/delete/`, { predicate: filter }) await this._client.post(`/v1/table/${this._name}/delete/`, { predicate: filter })
} }
async listIndices (): Promise<VectorIndex[]> {
const results = await this._client.post(`/v1/table/${this._name}/index/list/`)
return results.data.indexes?.map((index: any) => ({
columns: index.columns,
name: index.index_name,
uuid: index.index_uuid
}))
}
async indexStats (indexUuid: string): Promise<IndexStats> {
const results = await this._client.post(`/v1/table/${this._name}/index/${indexUuid}/stats/`)
return {
numIndexedRows: results.data.num_indexed_rows,
numUnindexedRows: results.data.num_unindexed_rows
}
}
} }

View File

@@ -328,6 +328,24 @@ describe('LanceDB client', function () {
const createIndex = table.createIndex({ type: 'ivf_pq', column: 'name', num_partitions: -1, max_iters: 2, num_sub_vectors: 2 }) const createIndex = table.createIndex({ type: 'ivf_pq', column: 'name', num_partitions: -1, max_iters: 2, num_sub_vectors: 2 })
await expect(createIndex).to.be.rejectedWith('num_partitions: must be > 0') await expect(createIndex).to.be.rejectedWith('num_partitions: must be > 0')
}) })
it('should be able to list index and stats', async function () {
const uri = await createTestDB(32, 300)
const con = await lancedb.connect(uri)
const table = await con.openTable('vectors')
await table.createIndex({ type: 'ivf_pq', column: 'vector', num_partitions: 2, max_iters: 2, num_sub_vectors: 2 })
const indices = await table.listIndices()
expect(indices).to.have.lengthOf(1)
expect(indices[0].name).to.equal('vector_idx')
expect(indices[0].uuid).to.not.be.equal(undefined)
expect(indices[0].columns).to.have.lengthOf(1)
expect(indices[0].columns[0]).to.equal('vector')
const stats = await table.indexStats(indices[0].uuid)
expect(stats.numIndexedRows).to.equal(300)
expect(stats.numUnindexedRows).to.equal(0)
}).timeout(50_000)
}) })
describe('when using a custom embedding function', function () { describe('when using a custom embedding function', function () {

View File

@@ -1,5 +1,5 @@
[bumpversion] [bumpversion]
current_version = 0.3.2 current_version = 0.3.3
commit = True commit = True
message = [python] Bump version: {current_version} → {new_version} message = [python] Bump version: {current_version} → {new_version}
tag = True tag = True

View File

@@ -84,7 +84,9 @@ def contextualize(raw_df: "pd.DataFrame") -> Contextualizer:
context windows that don't cross document boundaries. In this case, we can context windows that don't cross document boundaries. In this case, we can
pass ``document_id`` as the group by. pass ``document_id`` as the group by.
>>> contextualize(data).window(4).stride(2).text_col('token').groupby('document_id').to_pandas() >>> (contextualize(data)
... .window(4).stride(2).text_col('token').groupby('document_id')
... .to_pandas())
token document_id token document_id
0 The quick brown fox 1 0 The quick brown fox 1
2 brown fox jumped over 1 2 brown fox jumped over 1
@@ -92,18 +94,24 @@ def contextualize(raw_df: "pd.DataFrame") -> Contextualizer:
6 the lazy dog 1 6 the lazy dog 1
9 I love sandwiches 2 9 I love sandwiches 2
``min_window_size`` determines the minimum size of the context windows that are generated ``min_window_size`` determines the minimum size of the context windows
This can be used to trim the last few context windows which have size less than that are generated.This can be used to trim the last few context windows
``min_window_size``. By default context windows of size 1 are skipped. which have size less than ``min_window_size``.
By default context windows of size 1 are skipped.
>>> contextualize(data).window(6).stride(3).text_col('token').groupby('document_id').to_pandas() >>> (contextualize(data)
... .window(6).stride(3).text_col('token').groupby('document_id')
... .to_pandas())
token document_id token document_id
0 The quick brown fox jumped over 1 0 The quick brown fox jumped over 1
3 fox jumped over the lazy dog 1 3 fox jumped over the lazy dog 1
6 the lazy dog 1 6 the lazy dog 1
9 I love sandwiches 2 9 I love sandwiches 2
>>> contextualize(data).window(6).stride(3).min_window_size(4).text_col('token').groupby('document_id').to_pandas() >>> (contextualize(data)
... .window(6).stride(3).min_window_size(4).text_col('token')
... .groupby('document_id')
... .to_pandas())
token document_id token document_id
0 The quick brown fox jumped over 1 0 The quick brown fox jumped over 1
3 fox jumped over the lazy dog 1 3 fox jumped over the lazy dog 1
@@ -113,7 +121,9 @@ def contextualize(raw_df: "pd.DataFrame") -> Contextualizer:
class Contextualizer: class Contextualizer:
"""Create context windows from a DataFrame. See [lancedb.context.contextualize][].""" """Create context windows from a DataFrame.
See [lancedb.context.contextualize][].
"""
def __init__(self, raw_df): def __init__(self, raw_df):
self._text_col = None self._text_col = None
@@ -183,7 +193,7 @@ class Contextualizer:
deprecated_in="0.3.1", deprecated_in="0.3.1",
removed_in="0.4.0", removed_in="0.4.0",
current_version=__version__, current_version=__version__,
details="Use the bar function instead", details="Use to_pandas() instead",
) )
def to_df(self) -> "pd.DataFrame": def to_df(self) -> "pd.DataFrame":
return self.to_pandas() return self.to_pandas()

View File

@@ -52,12 +52,24 @@ class DBConnection(ABC):
---------- ----------
name: str name: str
The name of the table. The name of the table.
data: list, tuple, dict, pd.DataFrame; optional data: The data to initialize the table, *optional*
The data to initialize the table. User must provide at least one of `data` or `schema`. User must provide at least one of `data` or `schema`.
schema: pyarrow.Schema or LanceModel; optional Acceptable types are:
The schema of the table.
- dict or list-of-dict
- pandas.DataFrame
- pyarrow.Table or pyarrow.RecordBatch
schema: The schema of the table, *optional*
Acceptable types are:
- pyarrow.Schema
- [LanceModel][lancedb.pydantic.LanceModel]
mode: str; default "create" mode: str; default "create"
The mode to use when creating the table. Can be either "create" or "overwrite". The mode to use when creating the table.
Can be either "create" or "overwrite".
By default, if the table already exists, an exception is raised. By default, if the table already exists, an exception is raised.
If you want to overwrite the table, use mode="overwrite". If you want to overwrite the table, use mode="overwrite".
on_bad_vectors: str, default "error" on_bad_vectors: str, default "error"
@@ -150,7 +162,8 @@ class DBConnection(ABC):
... for i in range(5): ... for i in range(5):
... yield pa.RecordBatch.from_arrays( ... yield pa.RecordBatch.from_arrays(
... [ ... [
... pa.array([[3.1, 4.1], [5.9, 26.5]], pa.list_(pa.float32(), 2)), ... pa.array([[3.1, 4.1], [5.9, 26.5]],
... pa.list_(pa.float32(), 2)),
... pa.array(["foo", "bar"]), ... pa.array(["foo", "bar"]),
... pa.array([10.0, 20.0]), ... pa.array([10.0, 20.0]),
... ], ... ],
@@ -250,7 +263,7 @@ class LanceDBConnection(DBConnection):
return self._uri return self._uri
def table_names(self) -> list[str]: def table_names(self) -> list[str]:
"""Get the names of all tables in the database. """Get the names of all tables in the database. The names are sorted.
Returns Returns
------- -------
@@ -274,6 +287,7 @@ class LanceDBConnection(DBConnection):
for file_info in paths for file_info in paths
if file_info.extension == "lance" if file_info.extension == "lance"
] ]
tables.sort()
return tables return tables
def __len__(self) -> int: def __len__(self) -> int:

View File

@@ -30,7 +30,40 @@ pd = safe_import_pandas()
class Query(pydantic.BaseModel): class Query(pydantic.BaseModel):
"""A Query""" """The LanceDB Query
Attributes
----------
vector : List[float]
the vector to search for
filter : Optional[str]
sql filter to refine the query with, optional
prefilter : bool
if True then apply the filter before vector search
k : int
top k results to return
metric : str
the distance metric between a pair of vectors,
can support L2 (default), Cosine and Dot.
[metric definitions][search]
columns : Optional[List[str]]
which columns to return in the results
nprobes : int
The number of probes used - optional
- A higher number makes search more accurate but also slower.
- See discussion in [Querying an ANN Index][querying-an-ann-index] for
tuning advice.
refine_factor : Optional[int]
Refine the results by reading extra elements and re-ranking them in memory - optional
- A higher number makes search more accurate but also slower.
- See discussion in [Querying an ANN Index][querying-an-ann-index] for
tuning advice.
"""
vector_column: str = VECTOR_COLUMN_NAME vector_column: str = VECTOR_COLUMN_NAME
@@ -61,6 +94,10 @@ class Query(pydantic.BaseModel):
class LanceQueryBuilder(ABC): class LanceQueryBuilder(ABC):
"""Build LanceDB query based on specific query type:
vector or full text search.
"""
@classmethod @classmethod
def create( def create(
cls, cls,
@@ -133,11 +170,11 @@ class LanceQueryBuilder(ABC):
deprecated_in="0.3.1", deprecated_in="0.3.1",
removed_in="0.4.0", removed_in="0.4.0",
current_version=__version__, current_version=__version__,
details="Use the bar function instead", details="Use to_pandas() instead",
) )
def to_df(self) -> "pd.DataFrame": def to_df(self) -> "pd.DataFrame":
""" """
Deprecated alias for `to_pandas()`. Please use `to_pandas()` instead. *Deprecated alias for `to_pandas()`. Please use `to_pandas()` instead.*
Execute the query and return the results as a pandas DataFrame. Execute the query and return the results as a pandas DataFrame.
In addition to the selected columns, LanceDB also returns a vector In addition to the selected columns, LanceDB also returns a vector
@@ -226,13 +263,20 @@ class LanceQueryBuilder(ABC):
self._columns = columns self._columns = columns
return self return self
def where(self, where) -> LanceQueryBuilder: def where(self, where: str, prefilter: bool = False) -> LanceQueryBuilder:
"""Set the where clause. """Set the where clause.
Parameters Parameters
---------- ----------
where: str where: str
The where clause. The where clause which is a valid SQL where clause. See
`Lance filter pushdown <https://lancedb.github.io/lance/read_and_write.html#filter-push-down>`_
for valid SQL expressions.
prefilter: bool, default False
If True, apply the filter before vector search, otherwise the
filter is applied on the result of vector search.
This feature is **EXPERIMENTAL** and may be removed and modified
without warning in the future.
Returns Returns
------- -------
@@ -240,13 +284,12 @@ class LanceQueryBuilder(ABC):
The LanceQueryBuilder object. The LanceQueryBuilder object.
""" """
self._where = where self._where = where
self._prefilter = prefilter
return self return self
class LanceVectorQueryBuilder(LanceQueryBuilder): class LanceVectorQueryBuilder(LanceQueryBuilder):
""" """
A builder for nearest neighbor queries for LanceDB.
Examples Examples
-------- --------
>>> import lancedb >>> import lancedb
@@ -302,7 +345,7 @@ class LanceVectorQueryBuilder(LanceQueryBuilder):
Higher values will yield better recall (more likely to find vectors if Higher values will yield better recall (more likely to find vectors if
they exist) at the expense of latency. they exist) at the expense of latency.
See discussion in [Querying an ANN Index][../querying-an-ann-index] for See discussion in [Querying an ANN Index][querying-an-ann-index] for
tuning advice. tuning advice.
Parameters Parameters
@@ -369,14 +412,14 @@ class LanceVectorQueryBuilder(LanceQueryBuilder):
Parameters Parameters
---------- ----------
where: str where: str
The where clause. The where clause which is a valid SQL where clause. See
`Lance filter pushdown <https://lancedb.github.io/lance/read_and_write.html#filter-push-down>`_
for valid SQL expressions.
prefilter: bool, default False prefilter: bool, default False
If True, apply the filter before vector search, otherwise the If True, apply the filter before vector search, otherwise the
filter is applied on the result of vector search. filter is applied on the result of vector search.
This feature is **EXPERIMENTAL** and may be removed and modified This feature is **EXPERIMENTAL** and may be removed and modified
without warning in the future. Currently this is only supported without warning in the future.
in OSS and can only be used with a table that does not have an ANN
index.
Returns Returns
------- -------
@@ -389,6 +432,8 @@ class LanceVectorQueryBuilder(LanceQueryBuilder):
class LanceFtsQueryBuilder(LanceQueryBuilder): class LanceFtsQueryBuilder(LanceQueryBuilder):
"""A builder for full text search for LanceDB."""
def __init__(self, table: "lancedb.table.Table", query: str): def __init__(self, table: "lancedb.table.Table", query: str):
super().__init__(table) super().__init__(table)
self._query = query self._query = query

View File

@@ -104,7 +104,11 @@ class RemoteDBConnection(DBConnection):
raise ValueError("Either data or schema must be provided.") raise ValueError("Either data or schema must be provided.")
if data is not None: if data is not None:
data = _sanitize_data( data = _sanitize_data(
data, schema, on_bad_vectors=on_bad_vectors, fill_value=fill_value data,
schema,
metadata=None,
on_bad_vectors=on_bad_vectors,
fill_value=fill_value,
) )
else: else:
if schema is None: if schema is None:

View File

@@ -149,13 +149,13 @@ class Table(ABC):
@property @property
@abstractmethod @abstractmethod
def schema(self) -> pa.Schema: def schema(self) -> pa.Schema:
"""The [Arrow Schema](https://arrow.apache.org/docs/python/api/datatypes.html#) of """The [Arrow Schema](https://arrow.apache.org/docs/python/api/datatypes.html#)
this Table of this Table
""" """
raise NotImplementedError raise NotImplementedError
def to_pandas(self): def to_pandas(self) -> "pd.DataFrame":
"""Return the table as a pandas DataFrame. """Return the table as a pandas DataFrame.
Returns Returns
@@ -191,17 +191,18 @@ class Table(ABC):
The distance metric to use when creating the index. The distance metric to use when creating the index.
Valid values are "L2", "cosine", or "dot". Valid values are "L2", "cosine", or "dot".
L2 is euclidean distance. L2 is euclidean distance.
num_partitions: int num_partitions: int, default 256
The number of IVF partitions to use when creating the index. The number of IVF partitions to use when creating the index.
Default is 256. Default is 256.
num_sub_vectors: int num_sub_vectors: int, default 96
The number of PQ sub-vectors to use when creating the index. The number of PQ sub-vectors to use when creating the index.
Default is 96. Default is 96.
vector_column_name: str, default "vector" vector_column_name: str, default "vector"
The vector column name to create the index. The vector column name to create the index.
replace: bool, default True replace: bool, default True
If True, replace the existing index if it exists. - If True, replace the existing index if it exists.
If False, raise an error if duplicate index exists.
- If False, raise an error if duplicate index exists.
accelerator: str, default None accelerator: str, default None
If set, use the given accelerator to create the index. If set, use the given accelerator to create the index.
Only support "cuda" for now. Only support "cuda" for now.
@@ -220,8 +221,14 @@ class Table(ABC):
Parameters Parameters
---------- ----------
data: list-of-dict, dict, pd.DataFrame data: DATA
The data to insert into the table. The data to insert into the table. Acceptable types are:
- dict or list-of-dict
- pandas.DataFrame
- pyarrow.Table or pyarrow.RecordBatch
mode: str mode: str
The mode to use when writing the data. Valid values are The mode to use when writing the data. Valid values are
"append" and "overwrite". "append" and "overwrite".
@@ -242,31 +249,70 @@ class Table(ABC):
query_type: str = "auto", query_type: str = "auto",
) -> LanceQueryBuilder: ) -> LanceQueryBuilder:
"""Create a search query to find the nearest neighbors """Create a search query to find the nearest neighbors
of the given query vector. of the given query vector. We currently support [vector search][search]
and [full-text search][experimental-full-text-search].
All query options are defined in [Query][lancedb.query.Query].
Examples
--------
>>> import lancedb
>>> db = lancedb.connect("./.lancedb")
>>> data = [
... {"original_width": 100, "caption": "bar", "vector": [0.1, 2.3, 4.5]},
... {"original_width": 2000, "caption": "foo", "vector": [0.5, 3.4, 1.3]},
... {"original_width": 3000, "caption": "test", "vector": [0.3, 6.2, 2.6]}
... ]
>>> table = db.create_table("my_table", data)
>>> query = [0.4, 1.4, 2.4]
>>> (table.search(query, vector_column_name="vector")
... .where("original_width > 1000", prefilter=True)
... .select(["caption", "original_width"])
... .limit(2)
... .to_pandas())
caption original_width vector _distance
0 foo 2000 [0.5, 3.4, 1.3] 5.220000
1 test 3000 [0.3, 6.2, 2.6] 23.089996
Parameters Parameters
---------- ----------
query: str, list, np.ndarray, PIL.Image.Image, default None query: list/np.ndarray/str/PIL.Image.Image, default None
The query to search for. If None then The targetted vector to search for.
the select/where/limit clauses are applied to filter
- *default None*.
Acceptable types are: list, np.ndarray, PIL.Image.Image
- If None then the select/where/limit clauses are applied to filter
the table the table
vector_column_name: str, default "vector" vector_column_name: str
The name of the vector column to search. The name of the vector column to search.
query_type: str, default "auto" *default "vector"*
"vector", "fts", or "auto" query_type: str
If "auto" then the query type is inferred from the query; *default "auto"*.
If `query` is a list/np.ndarray then the query type is "vector"; Acceptable types are: "vector", "fts", or "auto"
If `query` is a PIL.Image.Image then either do vector search
or raise an error if no corresponding embedding function is found. - If "auto" then the query type is inferred from the query;
If `query` is a string, then the query type is "vector" if the
- If `query` is a list/np.ndarray then the query type is
"vector";
- If `query` is a PIL.Image.Image then either do vector search,
or raise an error if no corresponding embedding function is found.
- If `query` is a string, then the query type is "vector" if the
table has embedding functions else the query type is "fts" table has embedding functions else the query type is "fts"
Returns Returns
------- -------
LanceQueryBuilder LanceQueryBuilder
A query builder object representing the query. A query builder object representing the query.
Once executed, the query returns selected columns, the vector, Once executed, the query returns
and also the "_distance" column which is the distance between the query
- selected columns
- the vector
- and also the "_distance" column which is the distance between the query
vector and the returned vector. vector and the returned vector.
""" """
raise NotImplementedError raise NotImplementedError
@@ -285,14 +331,19 @@ class Table(ABC):
Parameters Parameters
---------- ----------
where: str where: str
The SQL where clause to use when deleting rows. For example, 'x = 2' The SQL where clause to use when deleting rows.
or 'x IN (1, 2, 3)'. The filter must not be empty, or it will error.
- For example, 'x = 2' or 'x IN (1, 2, 3)'.
The filter must not be empty, or it will error.
Examples Examples
-------- --------
>>> import lancedb >>> import lancedb
>>> data = [ >>> data = [
... {"x": 1, "vector": [1, 2]}, {"x": 2, "vector": [3, 4]}, {"x": 3, "vector": [5, 6]} ... {"x": 1, "vector": [1, 2]},
... {"x": 2, "vector": [3, 4]},
... {"x": 3, "vector": [5, 6]}
... ] ... ]
>>> db = lancedb.connect("./.lancedb") >>> db = lancedb.connect("./.lancedb")
>>> table = db.create_table("my_table", data) >>> table = db.create_table("my_table", data)
@@ -377,7 +428,8 @@ class LanceTable(Table):
-------- --------
>>> import lancedb >>> import lancedb
>>> db = lancedb.connect("./.lancedb") >>> db = lancedb.connect("./.lancedb")
>>> table = db.create_table("my_table", [{"vector": [1.1, 0.9], "type": "vector"}]) >>> table = db.create_table("my_table",
... [{"vector": [1.1, 0.9], "type": "vector"}])
>>> table.version >>> table.version
2 2
>>> table.to_pandas() >>> table.to_pandas()
@@ -424,7 +476,8 @@ class LanceTable(Table):
-------- --------
>>> import lancedb >>> import lancedb
>>> db = lancedb.connect("./.lancedb") >>> db = lancedb.connect("./.lancedb")
>>> table = db.create_table("my_table", [{"vector": [1.1, 0.9], "type": "vector"}]) >>> table = db.create_table("my_table", [
... {"vector": [1.1, 0.9], "type": "vector"}])
>>> table.version >>> table.version
2 2
>>> table.to_pandas() >>> table.to_pandas()
@@ -669,14 +722,39 @@ class LanceTable(Table):
query_type: str = "auto", query_type: str = "auto",
) -> LanceQueryBuilder: ) -> LanceQueryBuilder:
"""Create a search query to find the nearest neighbors """Create a search query to find the nearest neighbors
of the given query vector. of the given query vector. We currently support [vector search][search]
and [full-text search][search].
Examples
--------
>>> import lancedb
>>> db = lancedb.connect("./.lancedb")
>>> data = [
... {"original_width": 100, "caption": "bar", "vector": [0.1, 2.3, 4.5]},
... {"original_width": 2000, "caption": "foo", "vector": [0.5, 3.4, 1.3]},
... {"original_width": 3000, "caption": "test", "vector": [0.3, 6.2, 2.6]}
... ]
>>> table = db.create_table("my_table", data)
>>> query = [0.4, 1.4, 2.4]
>>> (table.search(query, vector_column_name="vector")
... .where("original_width > 1000", prefilter=True)
... .select(["caption", "original_width"])
... .limit(2)
... .to_pandas())
caption original_width vector _distance
0 foo 2000 [0.5, 3.4, 1.3] 5.220000
1 test 3000 [0.3, 6.2, 2.6] 23.089996
Parameters Parameters
---------- ----------
query: str, list, np.ndarray, a PIL Image or None query: list/np.ndarray/str/PIL.Image.Image, default None
The query to search for. If None then The targetted vector to search for.
the select/where/limit clauses are applied to filter
the table - *default None*.
Acceptable types are: list, np.ndarray, PIL.Image.Image
- If None then the select/[where][sql]/limit clauses are applied
to filter the table
vector_column_name: str, default "vector" vector_column_name: str, default "vector"
The name of the vector column to search. The name of the vector column to search.
query_type: str, default "auto" query_type: str, default "auto"
@@ -685,7 +763,7 @@ class LanceTable(Table):
If `query` is a list/np.ndarray then the query type is "vector"; If `query` is a list/np.ndarray then the query type is "vector";
If `query` is a PIL.Image.Image then either do vector search If `query` is a PIL.Image.Image then either do vector search
or raise an error if no corresponding embedding function is found. or raise an error if no corresponding embedding function is found.
If the query is a string, then the query type is "vector" if the If the `query` is a string, then the query type is "vector" if the
table has embedding functions, else the query type is "fts" table has embedding functions, else the query type is "fts"
Returns Returns
@@ -720,7 +798,9 @@ class LanceTable(Table):
-------- --------
>>> import lancedb >>> import lancedb
>>> data = [ >>> data = [
... {"x": 1, "vector": [1, 2]}, {"x": 2, "vector": [3, 4]}, {"x": 3, "vector": [5, 6]} ... {"x": 1, "vector": [1, 2]},
... {"x": 2, "vector": [3, 4]},
... {"x": 3, "vector": [5, 6]}
... ] ... ]
>>> db = lancedb.connect("./.lancedb") >>> db = lancedb.connect("./.lancedb")
>>> table = db.create_table("my_table", data) >>> table = db.create_table("my_table", data)
@@ -740,7 +820,8 @@ class LanceTable(Table):
The data to insert into the table. The data to insert into the table.
At least one of `data` or `schema` must be provided. At least one of `data` or `schema` must be provided.
schema: pa.Schema or LanceModel, optional schema: pa.Schema or LanceModel, optional
The schema of the table. If not provided, the schema is inferred from the data. The schema of the table. If not provided,
the schema is inferred from the data.
At least one of `data` or `schema` must be provided. At least one of `data` or `schema` must be provided.
mode: str, default "create" mode: str, default "create"
The mode to use when writing the data. Valid values are The mode to use when writing the data. Valid values are
@@ -811,7 +892,8 @@ class LanceTable(Table):
file_info = fs.get_file_info(path) file_info = fs.get_file_info(path)
if file_info.type != pa.fs.FileType.Directory: if file_info.type != pa.fs.FileType.Directory:
raise FileNotFoundError( raise FileNotFoundError(
f"Table {name} does not exist. Please first call db.create_table({name}, data)" f"Table {name} does not exist."
f"Please first call db.create_table({name}, data)"
) )
return tbl return tbl
@@ -838,7 +920,9 @@ class LanceTable(Table):
-------- --------
>>> import lancedb >>> import lancedb
>>> data = [ >>> data = [
... {"x": 1, "vector": [1, 2]}, {"x": 2, "vector": [3, 4]}, {"x": 3, "vector": [5, 6]} ... {"x": 1, "vector": [1, 2]},
... {"x": 2, "vector": [3, 4]},
... {"x": 3, "vector": [5, 6]}
... ] ... ]
>>> db = lancedb.connect("./.lancedb") >>> db = lancedb.connect("./.lancedb")
>>> table = db.create_table("my_table", data) >>> table = db.create_table("my_table", data)
@@ -872,12 +956,6 @@ class LanceTable(Table):
def _execute_query(self, query: Query) -> pa.Table: def _execute_query(self, query: Query) -> pa.Table:
ds = self.to_lance() ds = self.to_lance()
if query.prefilter:
for idx in ds.list_indices():
if query.vector_column in idx["fields"]:
raise NotImplementedError(
"Prefiltering for indexed vector column is coming soon."
)
return ds.to_table( return ds.to_table(
columns=query.columns, columns=query.columns,
filter=query.filter, filter=query.filter,
@@ -1019,7 +1097,8 @@ def _sanitize_vector_column(
# ChunkedArray is annoying to work with, so we combine chunks here # ChunkedArray is annoying to work with, so we combine chunks here
vec_arr = data[vector_column_name].combine_chunks() vec_arr = data[vector_column_name].combine_chunks()
if pa.types.is_list(data[vector_column_name].type): if pa.types.is_list(data[vector_column_name].type):
# if it's a variable size list array we make sure the dimensions are all the same # if it's a variable size list array,
# we make sure the dimensions are all the same
has_jagged_ndims = len(vec_arr.values) % len(data) != 0 has_jagged_ndims = len(vec_arr.values) % len(data) != 0
if has_jagged_ndims: if has_jagged_ndims:
data = _sanitize_jagged( data = _sanitize_jagged(

View File

@@ -63,7 +63,8 @@ def set_sentry():
""" """
if "exc_info" in hint: if "exc_info" in hint:
exc_type, exc_value, tb = hint["exc_info"] exc_type, exc_value, tb = hint["exc_info"]
if "out of memory" in str(exc_value).lower(): ignored_errors = ["out of memory", "no space left on device", "testing"]
if any(error in str(exc_value).lower() for error in ignored_errors):
return None return None
if is_git_dir(): if is_git_dir():
@@ -97,7 +98,7 @@ def set_sentry():
dsn="https://c63ef8c64e05d1aa1a96513361f3ca2f@o4505950840946688.ingest.sentry.io/4505950933614592", dsn="https://c63ef8c64e05d1aa1a96513361f3ca2f@o4505950840946688.ingest.sentry.io/4505950933614592",
debug=False, debug=False,
include_local_variables=False, include_local_variables=False,
traces_sample_rate=1.0, traces_sample_rate=0.5,
environment="production", # 'dev' or 'production' environment="production", # 'dev' or 'production'
before_send=before_send, before_send=before_send,
ignore_errors=[KeyboardInterrupt, FileNotFoundError, bdb.BdbQuit], ignore_errors=[KeyboardInterrupt, FileNotFoundError, bdb.BdbQuit],

View File

@@ -1,9 +1,9 @@
[project] [project]
name = "lancedb" name = "lancedb"
version = "0.3.2" version = "0.3.3"
dependencies = [ dependencies = [
"deprecation", "deprecation",
"pylance==0.8.7", "pylance==0.8.10",
"ratelimiter~=1.0", "ratelimiter~=1.0",
"retry>=0.9.2", "retry>=0.9.2",
"tqdm>=4.1.0", "tqdm>=4.1.0",

View File

@@ -150,6 +150,21 @@ def test_ingest_iterator(tmp_path):
run_tests(PydanticSchema) run_tests(PydanticSchema)
def test_table_names(tmp_path):
db = lancedb.connect(tmp_path)
data = pd.DataFrame(
{
"vector": [[3.1, 4.1], [5.9, 26.5]],
"item": ["foo", "bar"],
"price": [10.0, 20.0],
}
)
db.create_table("test2", data=data)
db.create_table("test1", data=data)
db.create_table("test3", data=data)
assert db.table_names() == ["test1", "test2", "test3"]
def test_create_mode(tmp_path): def test_create_mode(tmp_path):
db = lancedb.connect(tmp_path) db = lancedb.connect(tmp_path)
data = pd.DataFrame( data = pd.DataFrame(
@@ -287,3 +302,27 @@ def test_replace_index(tmp_path):
num_sub_vectors=4, num_sub_vectors=4,
replace=True, replace=True,
) )
def test_prefilter_with_index(tmp_path):
db = lancedb.connect(uri=tmp_path)
data = [
{"vector": np.random.rand(128), "item": "foo", "price": float(i)}
for i in range(1000)
]
sample_key = data[100]["vector"]
table = db.create_table(
"test",
data,
)
table.create_index(
num_partitions=2,
num_sub_vectors=4,
)
table = (
table.search(sample_key)
.where("price == 500", prefilter=True)
.limit(5)
.to_arrow()
)
assert table.num_rows == 1

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "vectordb-node" name = "vectordb-node"
version = "0.3.3" version = "0.3.5"
description = "Serverless, low-latency vector database for AI applications" description = "Serverless, low-latency vector database for AI applications"
license = "Apache-2.0" license = "Apache-2.0"
edition = "2018" edition = "2018"

View File

@@ -70,7 +70,6 @@ fn get_index_params_builder(
.map(|mt| { .map(|mt| {
let metric_type = mt.unwrap(); let metric_type = mt.unwrap();
index_builder.metric_type(metric_type); index_builder.metric_type(metric_type);
pq_params.metric_type = metric_type;
}); });
let num_partitions = obj.get_opt_usize(cx, "num_partitions")?; let num_partitions = obj.get_opt_usize(cx, "num_partitions")?;

View File

@@ -239,6 +239,8 @@ fn main(mut cx: ModuleContext) -> NeonResult<()> {
cx.export_function("tableDelete", JsTable::js_delete)?; cx.export_function("tableDelete", JsTable::js_delete)?;
cx.export_function("tableCleanupOldVersions", JsTable::js_cleanup)?; cx.export_function("tableCleanupOldVersions", JsTable::js_cleanup)?;
cx.export_function("tableCompactFiles", JsTable::js_compact)?; cx.export_function("tableCompactFiles", JsTable::js_compact)?;
cx.export_function("tableListIndices", JsTable::js_list_indices)?;
cx.export_function("tableIndexStats", JsTable::js_index_stats)?;
cx.export_function( cx.export_function(
"tableCreateVectorIndex", "tableCreateVectorIndex",
index::vector::table_create_vector_index, index::vector::table_create_vector_index,

View File

@@ -247,7 +247,7 @@ impl JsTable {
} }
rt.spawn(async move { rt.spawn(async move {
let stats = table.compact_files(options).await; let stats = table.compact_files(options, None).await;
deferred.settle_with(&channel, move |mut cx| { deferred.settle_with(&channel, move |mut cx| {
let stats = stats.or_throw(&mut cx)?; let stats = stats.or_throw(&mut cx)?;
@@ -276,4 +276,91 @@ impl JsTable {
}); });
Ok(promise) Ok(promise)
} }
pub(crate) fn js_list_indices(mut cx: FunctionContext) -> JsResult<JsPromise> {
let js_table = cx.this().downcast_or_throw::<JsBox<JsTable>, _>(&mut cx)?;
let rt = runtime(&mut cx)?;
let (deferred, promise) = cx.promise();
// let predicate = cx.argument::<JsString>(0)?.value(&mut cx);
let channel = cx.channel();
let table = js_table.table.clone();
rt.spawn(async move {
let indices = table.load_indices().await;
deferred.settle_with(&channel, move |mut cx| {
let indices = indices.or_throw(&mut cx)?;
let output = JsArray::new(&mut cx, indices.len() as u32);
for (i, index) in indices.iter().enumerate() {
let js_index = JsObject::new(&mut cx);
let index_name = cx.string(index.index_name.clone());
js_index.set(&mut cx, "name", index_name)?;
let index_uuid = cx.string(index.index_uuid.clone());
js_index.set(&mut cx, "uuid", index_uuid)?;
let js_index_columns = JsArray::new(&mut cx, index.columns.len() as u32);
for (j, column) in index.columns.iter().enumerate() {
let js_column = cx.string(column.clone());
js_index_columns.set(&mut cx, j as u32, js_column)?;
}
js_index.set(&mut cx, "columns", js_index_columns)?;
output.set(&mut cx, i as u32, js_index)?;
}
Ok(output)
})
});
Ok(promise)
}
pub(crate) fn js_index_stats(mut cx: FunctionContext) -> JsResult<JsPromise> {
let js_table = cx.this().downcast_or_throw::<JsBox<JsTable>, _>(&mut cx)?;
let rt = runtime(&mut cx)?;
let (deferred, promise) = cx.promise();
let index_uuid = cx.argument::<JsString>(0)?.value(&mut cx);
let channel = cx.channel();
let table = js_table.table.clone();
rt.spawn(async move {
let load_stats = futures::try_join!(
table.count_indexed_rows(&index_uuid),
table.count_unindexed_rows(&index_uuid)
);
deferred.settle_with(&channel, move |mut cx| {
let (indexed_rows, unindexed_rows) = load_stats.or_throw(&mut cx)?;
let output = JsObject::new(&mut cx);
match indexed_rows {
Some(x) => {
let i = cx.number(x as f64);
output.set(&mut cx, "numIndexedRows", i)?;
}
None => {
let null = cx.null();
output.set(&mut cx, "numIndexedRows", null)?;
}
};
match unindexed_rows {
Some(x) => {
let i = cx.number(x as f64);
output.set(&mut cx, "numUnindexedRows", i)?;
}
None => {
let null = cx.null();
output.set(&mut cx, "numUnindexedRows", null)?;
}
};
Ok(output)
})
});
Ok(promise)
}
} }

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "vectordb" name = "vectordb"
version = "0.3.3" version = "0.3.5"
edition = "2021" edition = "2021"
description = "LanceDB: A serverless, low-latency vector database for AI applications" description = "LanceDB: A serverless, low-latency vector database for AI applications"
license = "Apache-2.0" license = "Apache-2.0"

View File

@@ -161,7 +161,7 @@ impl Database {
/// ///
/// * A [Vec<String>] with all table names. /// * A [Vec<String>] with all table names.
pub async fn table_names(&self) -> Result<Vec<String>> { pub async fn table_names(&self) -> Result<Vec<String>> {
let f = self let mut f = self
.object_store .object_store
.read_dir(self.base_path.clone()) .read_dir(self.base_path.clone())
.await? .await?
@@ -175,7 +175,8 @@ impl Database {
is_lance.unwrap_or(false) is_lance.unwrap_or(false)
}) })
.filter_map(|p| p.file_stem().and_then(|s| s.to_str().map(String::from))) .filter_map(|p| p.file_stem().and_then(|s| s.to_str().map(String::from)))
.collect(); .collect::<Vec<String>>();
f.sort();
Ok(f) Ok(f)
} }
@@ -312,8 +313,8 @@ mod tests {
let db = Database::connect(uri).await.unwrap(); let db = Database::connect(uri).await.unwrap();
let tables = db.table_names().await.unwrap(); let tables = db.table_names().await.unwrap();
assert_eq!(tables.len(), 2); assert_eq!(tables.len(), 2);
assert!(tables.contains(&String::from("table1"))); assert!(tables[0].eq(&String::from("table1")));
assert!(tables.contains(&String::from("table2"))); assert!(tables[1].eq(&String::from("table2")));
} }
#[tokio::test] #[tokio::test]

View File

@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
use lance::format::{Index, Manifest};
use lance::index::vector::ivf::IvfBuildParams; use lance::index::vector::ivf::IvfBuildParams;
use lance::index::vector::pq::PQBuildParams; use lance::index::vector::pq::PQBuildParams;
use lance::index::vector::VectorIndexParams; use lance::index::vector::VectorIndexParams;
@@ -98,7 +99,11 @@ impl VectorIndexBuilder for IvfPQIndexBuilder {
let ivf_params = self.ivf_params.clone().unwrap_or_default(); let ivf_params = self.ivf_params.clone().unwrap_or_default();
let pq_params = self.pq_params.clone().unwrap_or_default(); let pq_params = self.pq_params.clone().unwrap_or_default();
VectorIndexParams::with_ivf_pq_params(pq_params.metric_type, ivf_params, pq_params) VectorIndexParams::with_ivf_pq_params(
self.metric_type.unwrap_or(MetricType::L2),
ivf_params,
pq_params,
)
} }
fn get_replace(&self) -> bool { fn get_replace(&self) -> bool {
@@ -106,6 +111,27 @@ impl VectorIndexBuilder for IvfPQIndexBuilder {
} }
} }
pub struct VectorIndex {
pub columns: Vec<String>,
pub index_name: String,
pub index_uuid: String,
}
impl VectorIndex {
pub fn new_from_format(manifest: &Manifest, index: &Index) -> VectorIndex {
let fields = index
.fields
.iter()
.map(|i| manifest.schema.fields[*i as usize].name.clone())
.collect();
VectorIndex {
columns: fields,
index_name: index.name.clone(),
index_uuid: index.uuid.to_string(),
}
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
@@ -158,7 +184,6 @@ mod tests {
pq_params.max_iters = 1; pq_params.max_iters = 1;
pq_params.num_bits = 8; pq_params.num_bits = 8;
pq_params.num_sub_vectors = 50; pq_params.num_sub_vectors = 50;
pq_params.metric_type = MetricType::Cosine;
pq_params.max_opq_iters = 2; pq_params.max_opq_iters = 2;
index_builder.ivf_params(ivf_params); index_builder.ivf_params(ivf_params);
index_builder.pq_params(pq_params); index_builder.pq_params(pq_params);
@@ -176,7 +201,6 @@ mod tests {
assert_eq!(pq_params.max_iters, 1); assert_eq!(pq_params.max_iters, 1);
assert_eq!(pq_params.num_bits, 8); assert_eq!(pq_params.num_bits, 8);
assert_eq!(pq_params.num_sub_vectors, 50); assert_eq!(pq_params.num_sub_vectors, 50);
assert_eq!(pq_params.metric_type, MetricType::Cosine);
assert_eq!(pq_params.max_opq_iters, 2); assert_eq!(pq_params.max_opq_iters, 2);
} else { } else {
assert!(false, "Expected second stage to be pq") assert!(false, "Expected second stage to be pq")

View File

@@ -18,14 +18,16 @@ use std::sync::Arc;
use arrow_array::{Float32Array, RecordBatchReader}; use arrow_array::{Float32Array, RecordBatchReader};
use arrow_schema::SchemaRef; use arrow_schema::SchemaRef;
use lance::dataset::cleanup::RemovalStats; use lance::dataset::cleanup::RemovalStats;
use lance::dataset::optimize::{compact_files, CompactionMetrics, CompactionOptions}; use lance::dataset::optimize::{
compact_files, CompactionMetrics, CompactionOptions, IndexRemapperOptions,
};
use lance::dataset::{Dataset, WriteParams}; use lance::dataset::{Dataset, WriteParams};
use lance::index::IndexType; use lance::index::{DatasetIndexExt, IndexType};
use lance::io::object_store::WrappingObjectStore; use lance::io::object_store::WrappingObjectStore;
use std::path::Path; use std::path::Path;
use crate::error::{Error, Result}; use crate::error::{Error, Result};
use crate::index::vector::VectorIndexBuilder; use crate::index::vector::{VectorIndex, VectorIndexBuilder};
use crate::query::Query; use crate::query::Query;
use crate::utils::{PatchReadParam, PatchWriteParam}; use crate::utils::{PatchReadParam, PatchWriteParam};
use crate::WriteMode; use crate::WriteMode;
@@ -238,8 +240,6 @@ impl Table {
/// Create index on the table. /// Create index on the table.
pub async fn create_index(&mut self, index_builder: &impl VectorIndexBuilder) -> Result<()> { pub async fn create_index(&mut self, index_builder: &impl VectorIndexBuilder) -> Result<()> {
use lance::index::DatasetIndexExt;
let mut dataset = self.dataset.as_ref().clone(); let mut dataset = self.dataset.as_ref().clone();
dataset dataset
.create_index( .create_index(
@@ -257,6 +257,14 @@ impl Table {
Ok(()) Ok(())
} }
pub async fn optimize_indices(&mut self) -> Result<()> {
let mut dataset = self.dataset.as_ref().clone();
dataset.optimize_indices().await?;
Ok(())
}
/// Insert records into this Table /// Insert records into this Table
/// ///
/// # Arguments /// # Arguments
@@ -353,12 +361,45 @@ impl Table {
/// for faster reads. /// for faster reads.
/// ///
/// This calls into [lance::dataset::optimize::compact_files]. /// This calls into [lance::dataset::optimize::compact_files].
pub async fn compact_files(&mut self, options: CompactionOptions) -> Result<CompactionMetrics> { pub async fn compact_files(
&mut self,
options: CompactionOptions,
remap_options: Option<Arc<dyn IndexRemapperOptions>>,
) -> Result<CompactionMetrics> {
let mut dataset = self.dataset.as_ref().clone(); let mut dataset = self.dataset.as_ref().clone();
let metrics = compact_files(&mut dataset, options, None).await?; let metrics = compact_files(&mut dataset, options, remap_options).await?;
self.dataset = Arc::new(dataset); self.dataset = Arc::new(dataset);
Ok(metrics) Ok(metrics)
} }
pub fn count_fragments(&self) -> usize {
self.dataset.count_fragments()
}
pub fn count_deleted_rows(&self) -> usize {
self.dataset.count_deleted_rows()
}
pub fn num_small_files(&self, max_rows_per_group: usize) -> usize {
self.dataset.num_small_files(max_rows_per_group)
}
pub async fn count_indexed_rows(&self, index_uuid: &str) -> Result<Option<usize>> {
Ok(self.dataset.count_indexed_rows(index_uuid).await?)
}
pub async fn count_unindexed_rows(&self, index_uuid: &str) -> Result<Option<usize>> {
Ok(self.dataset.count_unindexed_rows(index_uuid).await?)
}
pub async fn load_indices(&self) -> Result<Vec<VectorIndex>> {
let (indices, mf) =
futures::try_join!(self.dataset.load_indices(), self.dataset.latest_manifest())?;
Ok(indices
.iter()
.map(|i| VectorIndex::new_from_format(&mf, i))
.collect())
}
} }
#[cfg(test)] #[cfg(test)]