Compare commits

...

41 Commits

Author SHA1 Message Date
Chang She
9441fde2bb reproducibility 2023-09-06 02:24:19 -07:00
Chang She
32163063dc Fix up docs (#477) 2023-09-05 22:29:50 -07:00
Chang She
9a9a73a65d [python] Use pydantic for embedding function persistence (#467)
1. Support persistent embedding function so users can just search using
query string
2. Add fixed size list conversion for multiple vector columns
3. Add support for empty query (just apply select/where/limit).
4. Refactor and simplify some of the data prep code

---------

Co-authored-by: Chang She <chang@lancedb.com>
Co-authored-by: Weston Pace <weston.pace@gmail.com>
2023-09-05 21:30:45 -07:00
Ayush Chaurasia
52fa7f5577 [Docs] Small typo fixes (#460) 2023-09-02 22:17:19 +05:30
Chang She
0cba0f4f92 [python] Temporary update feature (#457)
Combine delete and append to make a temporary update feature that is
only enabled for the local python lancedb.

The reason why this is temporary is because it first has to load the
data that matches the where clause into memory, which is technical
unbounded.

---------

Co-authored-by: Chang She <chang@lancedb.com>
2023-08-30 00:25:26 -07:00
Will Jones
8391ffee84 chore: make crate more discoverable (#443)
A few small changes to make the Rust crate more discoverable.
2023-08-25 08:59:14 -07:00
Lance Release
fe8848efb9 [python] Bump version: 0.2.1 → 0.2.2 2023-08-24 23:18:10 +00:00
Chang She
213c313b99 Revert "Updating package-lock.json" (#455)
This reverts commit ab97e5d632.

Co-authored-by: Chang She <chang@lancedb.com>
2023-08-24 15:54:57 -07:00
Chang She
157e995a43 Revert "Bump version: 0.2.4 → 0.2.5" (#454)
This reverts commit 87e9a0250f.

I triggered the nodejs release commit GHA by mistake. Reverting it.
The tag will be removed manually.

Co-authored-by: Chang She <chang@lancedb.com>
2023-08-24 15:44:37 -07:00
Lance Release
ab97e5d632 Updating package-lock.json 2023-08-24 21:54:35 +00:00
Lance Release
87e9a0250f Bump version: 0.2.4 → 0.2.5 2023-08-24 21:54:18 +00:00
Chang She
e587a17a64 [python] Support schema evolution in local LanceDB (#452)
Previously if you needed to add a column to a table you'd have to
rewrite the whole table. Instead,
we use the merge functionality from Lance format
to incrementally add columns from another table
or dataframe.

---------

Co-authored-by: Chang She <chang@lancedb.com>
Co-authored-by: Weston Pace <weston.pace@gmail.com>
2023-08-24 14:40:49 -07:00
Chang She
2f1f9f6338 [python] improve restore functionality (#451)
Previously the temporary restore feature required copying data. The new
feature in pylance does not.

---------

Co-authored-by: Chang She <chang@lancedb.com>
Co-authored-by: Weston Pace <weston.pace@gmail.com>
2023-08-24 11:00:34 -07:00
Lance Release
a34fa4df26 Updating package-lock.json 2023-08-24 05:23:19 +00:00
Lance Release
e20979b335 Updating package-lock.json 2023-08-24 04:48:11 +00:00
Lance Release
08689c345d Bump version: 0.2.3 → 0.2.4 2023-08-24 04:47:57 +00:00
Lance Release
909b7e90cd [python] Bump version: 0.2.0 → 0.2.1 2023-08-24 04:00:11 +00:00
QianZhu
ae8486cc8f bump lance version to 0.6.5 for lancedb release (#453) 2023-08-23 20:59:03 -07:00
Tevin Wang
b8f32d082f Clean up docs testing - exclude by glob instead of by file (#450) 2023-08-24 07:30:37 +05:30
Jai
ea7522baa5 fix url to image in docs (#444) 2023-08-22 16:21:02 -07:00
Lance Release
8764741116 Updating package-lock.json 2023-08-22 21:11:28 +00:00
Ayush Chaurasia
cc916389a6 [DOCS] Major Docs Revamp (#435) 2023-08-22 14:06:26 -07:00
Lance Release
3d7d903d88 Updating package-lock.json 2023-08-22 20:15:13 +00:00
Lance Release
cc5e2d3e10 Bump version: 0.2.2 → 0.2.3 2023-08-22 20:14:58 +00:00
Rob Meng
30f5bc5865 expose awsRegion to be configurable (#441) 2023-08-22 16:00:14 -04:00
gsilvestrin
2737315cb2 feat(node): Create empty tables / Arrow Tables (#399)
- Supports creating an empty table as long as an Arrow Schema is provided
- Supports creating a table from an Arrow Table (can be passed as data)
- Simplified some Arrow code in the TS/FFI side
- removed createTableArrow method, it was never documented / tested.
2023-08-22 10:57:45 -07:00
Rob Meng
d52422603c use a lambda function to hide the value of credentials when printing a connection/table (#438)
Previously when logging the `LocalConnection` and `LocalTable` classes,
we would expose the aws creds inside them. This PR changes the stored
creds to a anonymous function to hide the creds
2023-08-21 23:06:44 -04:00
Ayush Chaurasia
f35f8e451f [DOCS] Update integrations + small typos (#432)
Depends on - https://github.com/lancedb/lancedb/pull/430

---------

Co-authored-by: Kevin Tse <NivekT@users.noreply.github.com>
2023-08-18 09:59:22 +05:30
Ayush Chaurasia
0b9924b432 Make creating (and adding to) tables via Iterators more flexible & intuitive (#430)
It improves the UX as iterators can be of any type supported by the
table (plus recordbatch) & there is no separate requirement.
Also expands the test cases for pydantic & arrow schema.
If this is looks good I'll update the docs.

Example usage:
```
class Content(LanceModel):
    vector: vector(2)
    item: str
    price: float

def make_batches():
    for _ in range(5):
        yield from [ 
        # pandas
        pd.DataFrame({
            "vector": [[3.1, 4.1], [1, 1]],
            "item": ["foo", "bar"],
            "price": [10.0, 20.0],
        }),
        
        # pylist
        [
            {"vector": [3.1, 4.1], "item": "foo", "price": 10.0},
            {"vector": [5.9, 26.5], "item": "bar", "price": 20.0},
        ],

        # recordbatch
        pa.RecordBatch.from_arrays(
            [
                pa.array([[3.1, 4.1], [5.9, 26.5]], pa.list_(pa.float32(), 2)),
                pa.array(["foo", "bar"]),
                pa.array([10.0, 20.0]),
            ], 
            ["vector", "item", "price"],
        ),

        # pydantic list
        [
            Content(vector=[3.1, 4.1], item="foo", price=10.0),
            Content(vector=[5.9, 26.5], item="bar", price=20.0),
        ]]

db = lancedb.connect("db")
tbl = db.create_table("tabley", make_batches(), schema=Content, mode="overwrite")

tbl.add(make_batches())
```
Same should with arrow schema.

---------

Co-authored-by: Weston Pace <weston.pace@gmail.com>
2023-08-18 09:56:30 +05:30
Lance Release
ba416a571d Updating package-lock.json 2023-08-17 23:48:01 +00:00
Lance Release
13317ffb46 Updating package-lock.json 2023-08-17 23:07:51 +00:00
Lance Release
ca961567fe Bump version: 0.2.1 → 0.2.2 2023-08-17 23:07:36 +00:00
gsilvestrin
31a12a141d fix(node) Electron crashes when creating external buffer (#424) 2023-08-17 14:47:54 -07:00
Chang She
e3061d4cb4 [python] Temporary restore feature (#428)
This adds LanceTable.restore as a temporary feature. It reads data from
a previous version and creates
a new snapshot version using that data. This makes the version writeable
unlike checkout. This should be replaced once the feature is implemented
in pylance.

Co-authored-by: Chang She <chang@lancedb.com>
2023-08-14 20:10:29 -07:00
Lance Release
1fcc67fd2c Updating package-lock.json 2023-08-14 23:02:39 +00:00
Rob Meng
ac18812af0 fix moka version (#427) 2023-08-14 18:28:55 -04:00
Lance Release
8324e0f171 Bump version: 0.2.0 → 0.2.1 2023-08-14 22:22:24 +00:00
Rob Meng
f0bcb26f32 Upgrade lance and pass AWS creds when opening a table (#426) 2023-08-14 18:22:02 -04:00
Lance Release
b281c5255c Updating package-lock.json 2023-08-14 17:03:51 +00:00
Lance Release
d349d2a44a Updating package-lock.json 2023-08-14 16:06:52 +00:00
Lance Release
0699a6fa7b Bump version: 0.1.19 → 0.2.0 2023-08-14 16:06:36 +00:00
55 changed files with 3833 additions and 475 deletions

View File

@@ -1,5 +1,5 @@
[bumpversion] [bumpversion]
current_version = 0.1.19 current_version = 0.2.4
commit = True commit = True
message = Bump version: {current_version} → {new_version} message = Bump version: {current_version} → {new_version}
tag = True tag = True

View File

@@ -6,7 +6,7 @@ members = [
resolver = "2" resolver = "2"
[workspace.dependencies] [workspace.dependencies]
lance = "=0.6.1" lance = "=0.6.5"
arrow-array = "43.0" arrow-array = "43.0"
arrow-data = "43.0" arrow-data = "43.0"
arrow-schema = "43.0" arrow-schema = "43.0"
@@ -14,4 +14,3 @@ arrow-ipc = "43.0"
half = { "version" = "=2.2.1", default-features = false } half = { "version" = "=2.2.1", default-features = false }
object_store = "0.6.1" object_store = "0.6.1"
snafu = "0.7.4" snafu = "0.7.4"

View File

@@ -12,6 +12,15 @@ theme:
- content.code.copy - content.code.copy
- content.tabs.link - content.tabs.link
- content.action.edit - content.action.edit
- toc.follow
- toc.integrate
- navigation.top
- navigation.tabs
- navigation.tabs.sticky
- navigation.footer
- navigation.tracking
- navigation.instant
- navigation.indexes
icon: icon:
repo: fontawesome/brands/github repo: fontawesome/brands/github
custom_dir: overrides custom_dir: overrides
@@ -55,11 +64,48 @@ markdown_extensions:
- md_in_html - md_in_html
nav: nav:
- Home: index.md - Home:
- 🏢 Home: index.md
- 💡 Basics: basic.md
- 📚 Guides:
- Tables: guides/tables.md
- Vector Search: search.md
- SQL filters: sql.md
- Indexing: ann_indexes.md
- 🧬 Embeddings: embedding.md
- 🔍 Python full-text search: fts.md
- 🔌 Integrations:
- integrations/index.md
- Pandas and PyArrow: python/arrow.md
- DuckDB: python/duckdb.md
- LangChain 🔗: https://python.langchain.com/en/latest/modules/indexes/vectorstores/examples/lancedb.html
- LangChain JS/TS 🔗: https://js.langchain.com/docs/modules/data_connection/vectorstores/integrations/lancedb
- LlamaIndex 🦙: https://gpt-index.readthedocs.io/en/latest/examples/vector_stores/LanceDBIndexDemo.html
- Pydantic: python/pydantic.md
- Voxel51: integrations/voxel51.md
- PromptTools: integrations/prompttools.md
- 🐍 Python examples:
- examples/index.md
- YouTube Transcript Search: notebooks/youtube_transcript_search.ipynb
- Documentation QA Bot using LangChain: notebooks/code_qa_bot.ipynb
- Multimodal search using CLIP: notebooks/multimodal_search.ipynb
- Serverless QA Bot with S3 and Lambda: examples/serverless_lancedb_with_s3_and_lambda.md
- Serverless QA Bot with Modal: examples/serverless_qa_bot_with_modal_and_langchain.md
- 🌐 Javascript examples:
- Examples: examples/index_js.md
- Serverless Website Chatbot: examples/serverless_website_chatbot.md
- YouTube Transcript Search: examples/youtube_transcript_bot_with_nodejs.md
- TransformersJS Embedding Search: examples/transformerjs_embedding_search_nodejs.md
- Basics: basic.md - Basics: basic.md
- Guides:
- Tables: guides/tables.md
- Vector Search: search.md
- SQL filters: sql.md
- Indexing: ann_indexes.md
- Embeddings: embedding.md - Embeddings: embedding.md
- Python full-text search: fts.md - Python full-text search: fts.md
- Integrations: - Integrations:
- integrations/index.md
- Pandas and PyArrow: python/arrow.md - Pandas and PyArrow: python/arrow.md
- DuckDB: python/duckdb.md - DuckDB: python/duckdb.md
- LangChain 🦜️🔗: https://python.langchain.com/en/latest/modules/indexes/vectorstores/examples/lancedb.html - LangChain 🦜️🔗: https://python.langchain.com/en/latest/modules/indexes/vectorstores/examples/lancedb.html
@@ -67,24 +113,23 @@ nav:
- LlamaIndex 🦙: https://gpt-index.readthedocs.io/en/latest/examples/vector_stores/LanceDBIndexDemo.html - LlamaIndex 🦙: https://gpt-index.readthedocs.io/en/latest/examples/vector_stores/LanceDBIndexDemo.html
- Pydantic: python/pydantic.md - Pydantic: python/pydantic.md
- Voxel51: integrations/voxel51.md - Voxel51: integrations/voxel51.md
- PromptTools: integrations/prompttools.md
- Python examples: - Python examples:
- examples/index.md
- YouTube Transcript Search: notebooks/youtube_transcript_search.ipynb - YouTube Transcript Search: notebooks/youtube_transcript_search.ipynb
- Documentation QA Bot using LangChain: notebooks/code_qa_bot.ipynb - Documentation QA Bot using LangChain: notebooks/code_qa_bot.ipynb
- Multimodal search using CLIP: notebooks/multimodal_search.ipynb - Multimodal search using CLIP: notebooks/multimodal_search.ipynb
- Serverless QA Bot with S3 and Lambda: examples/serverless_lancedb_with_s3_and_lambda.md - Serverless QA Bot with S3 and Lambda: examples/serverless_lancedb_with_s3_and_lambda.md
- Serverless QA Bot with Modal: examples/serverless_qa_bot_with_modal_and_langchain.md - Serverless QA Bot with Modal: examples/serverless_qa_bot_with_modal_and_langchain.md
- Javascript examples: - Javascript examples:
- examples/index_js.md
- YouTube Transcript Search: examples/youtube_transcript_bot_with_nodejs.md - YouTube Transcript Search: examples/youtube_transcript_bot_with_nodejs.md
- Serverless Chatbot from any website: examples/serverless_website_chatbot.md
- TransformersJS Embedding Search: examples/transformerjs_embedding_search_nodejs.md - TransformersJS Embedding Search: examples/transformerjs_embedding_search_nodejs.md
- Guides:
- Tables: guides/tables.md
- Vector Search: search.md
- SQL filters: sql.md
- Indexing: ann_indexes.md
- API references: - API references:
- Python API: python/python.md - Python API: python/python.md
- Javascript API: javascript/modules.md - Javascript API: javascript/modules.md
- LanceDB Cloud↗: https://noteforms.com/forms/lancedb-mailing-list-cloud-kty1o5?notionforms=1&utm_source=notionforms
extra_css: extra_css:
- styles/global.css - styles/global.css

Binary file not shown.

After

Width:  |  Height:  |  Size: 104 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 170 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.9 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.7 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 205 KiB

View File

@@ -198,3 +198,15 @@ you can pass in `ignore_missing=True`.
This section covered the very basics of the LanceDB API. This section covered the very basics of the LanceDB API.
LanceDB supports many additional features when creating indices to speed up search and options for search. LanceDB supports many additional features when creating indices to speed up search and options for search.
These are contained in the next section of the documentation. These are contained in the next section of the documentation.
## Note: Bundling vectorDB apps with webpack
Since LanceDB contains a prebuilt Node binary, you must configure `next.config.js` to exclude it from webpack. This is required for both using Next.js and deploying on Vercel.
```javascript
/** @type {import('next').NextConfig} */
module.exports = ({
webpack(config) {
config.externals.push({ vectordb: 'vectordb' })
return config;
}
})
```

View File

@@ -66,7 +66,7 @@ You can also use an external API like OpenAI to generate embeddings
to generate embeddings for each row. to generate embeddings for each row.
Say if you have a pandas DataFrame with a `text` column that you want to be embedded, Say if you have a pandas DataFrame with a `text` column that you want to be embedded,
you can use the [with_embeddings](https://lancedb.github.io/lancedb/python/#lancedb.embeddings.with_embeddings) you can use the [with_embeddings](https://lancedb.github.io/lancedb/python/python/#lancedb.embeddings.with_embeddings)
function to generate embeddings and add create a combined pyarrow table: function to generate embeddings and add create a combined pyarrow table:

View File

@@ -0,0 +1,23 @@
# Examples
Here are some of the examples, projects and applications using LanceDB python library. Some examples are covered in detail in the next sections. You can find more on [VectorDB Recipes](https://github.com/lancedb/vectordb-recipes)
| Example | Interactive Envs | Scripts |
|-------- | ---------------- | ------ |
| | | |
| [Youtube transcript search bot](https://github.com/lancedb/vectordb-recipes/tree/main/examples/youtube_bot/) | <a href="https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/youtube_bot/main.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a>| [![Python](https://img.shields.io/badge/python-3670A0?style=for-the-badge&logo=python&logoColor=ffdd54)](https://github.com/lancedb/vectordb-recipes/tree/main/examples/youtube_bot/main.py)|
| [Langchain: Code Docs QA bot](https://github.com/lancedb/vectordb-recipes/tree/main/examples/Code-Documentation-QA-Bot/) | <a href="https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/Code-Documentation-QA-Bot/main.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a>| [![Python](https://img.shields.io/badge/python-3670A0?style=for-the-badge&logo=python&logoColor=ffdd54)](https://github.com/lancedb/vectordb-recipes/tree/main/examples/Code-Documentation-QA-Bot/main.py) |
| [AI Agents: Reducing Hallucination](https://github.com/lancedb/vectordb-recipes/tree/main/examples/reducing_hallucinations_ai_agents/) | <a href="https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/reducing_hallucinations_ai_agents/main.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a>| [![Python](https://img.shields.io/badge/python-3670A0?style=for-the-badge&logo=python&logoColor=ffdd54)](https://github.com/lancedb/vectordb-recipes/tree/main/examples/reducing_hallucinations_ai_agents/main.py)|
| [Multimodal CLIP: DiffusionDB](https://github.com/lancedb/vectordb-recipes/tree/main/examples/multimodal_clip/) | <a href="https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/multimodal_clip/main.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a>| [![Python](https://img.shields.io/badge/python-3670A0?style=for-the-badge&logo=python&logoColor=ffdd54)](https://github.com/lancedb/vectordb-recipes/tree/main/examples/multimodal_clip/main.py) |
| [Multimodal CLIP: Youtube videos](https://github.com/lancedb/vectordb-recipes/tree/main/examples/multimodal_video_search/) | <a href="https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/multimodal_video_search/main.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a>| [![Python](https://img.shields.io/badge/python-3670A0?style=for-the-badge&logo=python&logoColor=ffdd54)](https://github.com/lancedb/vectordb-recipes/tree/main/examples/multimodal_video_search/main.py) |
| [Movie Recommender](https://github.com/lancedb/vectordb-recipes/tree/main/examples/movie-recommender/) | <a href="https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/movie-recommender/main.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a> | [![Python](https://img.shields.io/badge/python-3670A0?style=for-the-badge&logo=python&logoColor=ffdd54)](https://github.com/lancedb/vectordb-recipes/tree/main/examples/movie-recommender/main.py) |
| [Audio Search](https://github.com/lancedb/vectordb-recipes/tree/main/examples/audio_search/) | <a href="https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/audio_search/main.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a> | [![Python](https://img.shields.io/badge/python-3670A0?style=for-the-badge&logo=python&logoColor=ffdd54)](https://github.com/lancedb/vectordb-recipes/tree/main/examples/audio_search/main.py) |
| [Multimodal Image + Text Search](https://github.com/lancedb/vectordb-recipes/tree/main/examples/multimodal_search/) | <a href="https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/multimodal_search/main.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a> | [![Python](https://img.shields.io/badge/python-3670A0?style=for-the-badge&logo=python&logoColor=ffdd54)](https://github.com/lancedb/vectordb-recipes/tree/main/examples/multimodal_search/main.py) |
| [Evaluating Prompts with Prompttools](https://github.com/lancedb/vectordb-recipes/tree/main/examples/prompttools-eval-prompts/) | <a href="https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/prompttools-eval-prompts/main.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a> | |
## Projects & Applications powered by LanceDB
| Project Name | Description | Screenshot |
|-----------------------------------------------------|----------------------------------------------------------------------------------------------------------------------|-------------------------------------------|
| [YOLOExplorer](https://github.com/lancedb/yoloexplorer) | Iterate on your YOLO / CV datasets using SQL, Vector semantic search, and more within seconds | ![YOLOExplorer](https://github.com/lancedb/vectordb-recipes/assets/15766192/ae513a29-8f15-4e0b-99a1-ccd8272b6131) |
| [Website Chatbot (Deployable Vercel Template)](https://github.com/lancedb/lancedb-vercel-chatbot) | Create a chatbot from the sitemap of any website/docs of your choice. Built using vectorDB serverless native javascript package. | ![Chatbot](../assets/vercel-template.gif) |

View File

@@ -0,0 +1,19 @@
# Examples
Here are some of the examples, projects and applications using vectordb native javascript library.
Some examples are covered in detail in the next sections. You can find more on [VectorDB Recipes](https://github.com/lancedb/vectordb-recipes)
| Example | Scripts |
|-------- | ------ |
| | |
| [Youtube transcript search bot](https://github.com/lancedb/vectordb-recipes/tree/main/examples/youtube_bot/) | [![JavaScript](https://img.shields.io/badge/javascript-%23323330.svg?style=for-the-badge&logo=javascript&logoColor=%23F7DF1E)](https://github.com/lancedb/vectordb-recipes/tree/main/examples/youtube_bot/index.js)|
| [Langchain: Code Docs QA bot](https://github.com/lancedb/vectordb-recipes/tree/main/examples/Code-Documentation-QA-Bot/) | [![JavaScript](https://img.shields.io/badge/javascript-%23323330.svg?style=for-the-badge&logo=javascript&logoColor=%23F7DF1E)](https://github.com/lancedb/vectordb-recipes/tree/main/examples/Code-Documentation-QA-Bot/index.js)|
| [AI Agents: Reducing Hallucination](https://github.com/lancedb/vectordb-recipes/tree/main/examples/reducing_hallucinations_ai_agents/) | [![JavaScript](https://img.shields.io/badge/javascript-%23323330.svg?style=for-the-badge&logo=javascript&logoColor=%23F7DF1E)](https://github.com/lancedb/vectordb-recipes/tree/main/examples/reducing_hallucinations_ai_agents/index.js)|
| [TransformersJS Embedding example](https://github.com/lancedb/vectordb-recipes/tree/main/examples/js-transformers/) | [![JavaScript](https://img.shields.io/badge/javascript-%23323330.svg?style=for-the-badge&logo=javascript&logoColor=%23F7DF1E)](https://github.com/lancedb/vectordb-recipes/tree/main/examples/js-transformers/index.js) |
## Projects & Applications
| Project Name | Description | Screenshot |
|-----------------------------------------------------|----------------------------------------------------------------------------------------------------------------------|-------------------------------------------|
| [YOLOExplorer](https://github.com/lancedb/yoloexplorer) | Iterate on your YOLO / CV datasets using SQL, Vector semantic search, and more within seconds | ![YOLOExplorer](https://github.com/lancedb/vectordb-recipes/assets/15766192/ae513a29-8f15-4e0b-99a1-ccd8272b6131) |
| [Website Chatbot (Deployable Vercel Template)](https://github.com/lancedb/lancedb-vercel-chatbot) | Create a chatbot from the sitemap of any website/docs of your choice. Built using vectorDB serverless native javascript package. | ![Chatbot](../assets/vercel-template.gif) |

View File

@@ -0,0 +1,61 @@
# LanceDB Chatbot - Vercel Next.js Template
Use an AI chatbot with website context retrieved from a vector store like LanceDB. LanceDB is lightweight and can be embedded directly into Next.js, with data stored on-prem.
## One click deploy on Vercel
[![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2Flancedb%2Flancedb-vercel-chatbot&env=OPENAI_API_KEY&envDescription=OpenAI%20API%20Key%20for%20chat%20completion.&project-name=lancedb-vercel-chatbot&repository-name=lancedb-vercel-chatbot&demo-title=LanceDB%20Chatbot%20Demo&demo-description=Demo%20website%20chatbot%20with%20LanceDB.&demo-url=https%3A%2F%2Flancedb.vercel.app&demo-image=https%3A%2F%2Fi.imgur.com%2FazVJtvr.png)
![Demo website landing page](../assets/vercel-template.gif)
## Development
First, rename `.env.example` to `.env.local`, and fill out `OPENAI_API_KEY` with your OpenAI API key. You can get one [here](https://openai.com/blog/openai-api).
Run the development server:
```bash
npm run dev
# or
yarn dev
# or
pnpm dev
```
Open [http://localhost:3000](http://localhost:3000) with your browser to see the result.
This project uses [`next/font`](https://nextjs.org/docs/basic-features/font-optimization) to automatically optimize and load Inter, a custom Google Font.
## Learn More
To learn more about LanceDB or Next.js, take a look at the following resources:
- [LanceDB Documentation](https://lancedb.github.io/lancedb/) - learn about LanceDB, the developer-friendly serverless vector database.
- [Next.js Documentation](https://nextjs.org/docs) - learn about Next.js features and API.
- [Learn Next.js](https://nextjs.org/learn) - an interactive Next.js tutorial.
## LanceDB on Next.js and Vercel
FYI: these configurations have been pre-implemented in this template.
Since LanceDB contains a prebuilt Node binary, you must configure `next.config.js` to exclude it from webpack. This is required for both using Next.js and deploying on Vercel.
```js
/** @type {import('next').NextConfig} */
module.exports = ({
webpack(config) {
config.externals.push({ vectordb: 'vectordb' })
return config;
}
})
```
To deploy on Vercel, we need to make sure that the NodeJS runtime static file analysis for Vercel can find the binary, since LanceDB uses dynamic imports by default. We can do this by modifying `package.json` in the `scripts` section.
```json
{
...
"scripts": {
...
"vercel-build": "sed -i 's/nativeLib = require(`@lancedb\\/vectordb-\\${currentTarget()}`);/nativeLib = require(`@lancedb\\/vectordb-linux-x64-gnu`);/' node_modules/vectordb/native.js && next build",
...
},
...
}
```

View File

@@ -7,7 +7,7 @@
<a href="https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/youtube_bot/main.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"> <a href="https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/youtube_bot/main.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab">
Scripts - [![Python](https://img.shields.io/badge/python-3670A0?style=for-the-badge&logo=python&logoColor=ffdd54)](./examples/youtube_bot/main.py) [![JavaScript](https://img.shields.io/badge/javascript-%23323330.svg?style=for-the-badge&logo=javascript&logoColor=%23F7DF1E)](./examples/youtube_bot/index.js) Scripts - [![Python](https://img.shields.io/badge/python-3670A0?style=for-the-badge&logo=python&logoColor=ffdd54)](https://github.com/lancedb/vectordb-recipesexamples/youtube_bot/main.py) [![JavaScript](https://img.shields.io/badge/javascript-%23323330.svg?style=for-the-badge&logo=javascript&logoColor=%23F7DF1E)](https://github.com/lancedb/vectordb-recipes/examples/youtube_bot/index.js)
This example is in a [notebook](https://github.com/lancedb/lancedb/blob/main/docs/src/notebooks/youtube_transcript_search.ipynb) This example is in a [notebook](https://github.com/lancedb/lancedb/blob/main/docs/src/notebooks/youtube_transcript_search.ipynb)

View File

@@ -1,4 +1,5 @@
A Table is a collection of Records in a LanceDB Database. <a href="https://colab.research.google.com/github/lancedb/lancedb/blob/main/docs/src/notebooks/tables_guide.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a><br/>
A Table is a collection of Records in a LanceDB Database. You can follow along on colab!
## Creating a LanceDB Table ## Creating a LanceDB Table
@@ -63,6 +64,25 @@ A Table is a collection of Records in a LanceDB Database.
table = db.create_table("table3", data, schema=custom_schema) table = db.create_table("table3", data, schema=custom_schema)
``` ```
### From PyArrow Tables
You can also create LanceDB tables directly from pyarrow tables
```python
table = pa.Table.from_arrays(
[
pa.array([[3.1, 4.1], [5.9, 26.5]],
pa.list_(pa.float32(), 2)),
pa.array(["foo", "bar"]),
pa.array([10.0, 20.0]),
],
["vector", "item", "price"],
)
db = lancedb.connect("db")
tbl = db.create_table("test1", table)
```
### From Pydantic Models ### From Pydantic Models
LanceDB supports to create Apache Arrow Schema from a Pydantic BaseModel via pydantic_to_schema() method. LanceDB supports to create Apache Arrow Schema from a Pydantic BaseModel via pydantic_to_schema() method.
@@ -86,10 +106,14 @@ A Table is a collection of Records in a LanceDB Database.
table = db.create_table(table_name, schema=Content.to_arrow_schema()) table = db.create_table(table_name, schema=Content.to_arrow_schema())
``` ```
### Using RecordBatch Iterator / Writing Large Datasets ### Using Iterators / Writing Large Datasets
It is recommended to use RecordBatch itertator to add large datasets in batches when creating your table in one go. This does not create multiple versions of your dataset unlike manually adding batches using `table.add()` It is recommended to use itertators to add large datasets in batches when creating your table in one go. This does not create multiple versions of your dataset unlike manually adding batches using `table.add()`
LanceDB additionally supports pyarrow's `RecordBatch` Iterators or other generators producing supported data types.
Here's an example using using `RecordBatch` iterator for creating tables.
```python ```python
import pyarrow as pa import pyarrow as pa
@@ -97,7 +121,8 @@ A Table is a collection of Records in a LanceDB Database.
for i in range(5): for i in range(5):
yield pa.RecordBatch.from_arrays( yield pa.RecordBatch.from_arrays(
[ [
pa.array([[3.1, 4.1], [5.9, 26.5]]), pa.array([[3.1, 4.1], [5.9, 26.5]],
pa.list_(pa.float32(), 2)),
pa.array(["foo", "bar"]), pa.array(["foo", "bar"]),
pa.array([10.0, 20.0]), pa.array([10.0, 20.0]),
], ],
@@ -105,7 +130,7 @@ A Table is a collection of Records in a LanceDB Database.
) )
schema = pa.schema([ schema = pa.schema([
pa.field("vector", pa.list_(pa.float32())), pa.field("vector", pa.list_(pa.float32(), 2)),
pa.field("item", pa.utf8()), pa.field("item", pa.utf8()),
pa.field("price", pa.float32()), pa.field("price", pa.float32()),
]) ])
@@ -113,20 +138,7 @@ A Table is a collection of Records in a LanceDB Database.
db.create_table("table4", make_batches(), schema=schema) db.create_table("table4", make_batches(), schema=schema)
``` ```
You can also use Pandas dataframe directly in the above example by converting it to `RecordBatch` object You can also use iterators of other types like Pandas dataframe or Pylists directly in the above example.
```python
import pandas as pd
import pyarrow as pa
df = pd.DataFrame({'vector': [[0,1], [2,3], [4,5],[6,7]],
'month': [3, 5, 7, 9],
'day': [1, 5, 9, 13],
'n_legs': [2, 4, 5, 100],
'animals': ["Flamingo", "Horse", "Brittle stars", "Centipede"]})
batch = pa.RecordBatch.from_pandas(df)
```
## Creating Empty Table ## Creating Empty Table
You can also create empty tables in python. Initialize it with schema and later ingest data into it. You can also create empty tables in python. Initialize it with schema and later ingest data into it.
@@ -160,7 +172,6 @@ A Table is a collection of Records in a LanceDB Database.
tbl = db.create_table("table5", schema=Model.to_arrow_schema()) tbl = db.create_table("table5", schema=Model.to_arrow_schema())
``` ```
=== "Javascript/Typescript" === "Javascript/Typescript"
@@ -235,23 +246,21 @@ After a table has been created, you can always add more data to it using
tbl.add(df) tbl.add(df)
``` ```
You can also add a large dataset batch in one go using pyArrow RecordBatch Iterator. You can also add a large dataset batch in one go using Iterator of any supported data types.
### Adding RecordBatch Iterator ### Adding to table using Iterator
```python ```python
import pyarrow as pa import pandas as pd
def make_batches(): def make_batches():
for i in range(5): for i in range(5):
yield pa.RecordBatch.from_arrays( yield pd.DataFrame(
[ {
pa.array([[3.1, 4.1], [5.9, 26.5]]), "vector": [[3.1, 4.1], [1, 1]],
pa.array(["foo", "bar"]), "item": ["foo", "bar"],
pa.array([10.0, 20.0]), "price": [10.0, 20.0],
], })
["vector", "item", "price"],
)
tbl.add(make_batches()) tbl.add(make_batches())
``` ```
@@ -283,8 +292,6 @@ Use the `delete()` method on tables to delete rows from a table. To choose which
tbl.delete('item = "fizz"') tbl.delete('item = "fizz"')
``` ```
## Examples
### Deleting row with specific column value ### Deleting row with specific column value
```python ```python

View File

@@ -1,20 +1,23 @@
# Welcome to LanceDB's Documentation # LanceDB
LanceDB is an open-source database for vector-search built with persistent storage, which greatly simplifies retrevial, filtering and management of embeddings. LanceDB is an open-source database for vector-search built with persistent storage, which greatly simplifies retrieval, filtering and management of embeddings.
![Illustration](/lancedb/assets/ecosystem-illustration.png)
The key features of LanceDB include: The key features of LanceDB include:
* Production-scale vector search with no servers to manage.
* Store, query and filter vectors, metadata and multi-modal data (text, images, videos, point clouds, and more). * Store, query and filter vectors, metadata and multi-modal data (text, images, videos, point clouds, and more).
* Support for vector similarity search, full-text search and SQL. * Support for production-scale vector similarity search, full-text search and SQL, with no servers to manage.
* Native Python and Javascript/Typescript support. * Native Python and Javascript/Typescript support.
* Zero-copy, automatic versioning, manage versions of your data without needing extra infrastructure. * Zero-copy, automatic versioning, manage versions of your data without needing extra infrastructure.
* Ecosystem integrations with [LangChain 🦜️🔗](https://python.langchain.com/en/latest/modules/indexes/vectorstores/examples/lancedb.html), [LlamaIndex 🦙](https://gpt-index.readthedocs.io/en/latest/examples/vector_stores/LanceDBIndexDemo.html), Apache-Arrow, Pandas, Polars, DuckDB and more on the way. * Persisted on HDD, allowing scalability without breaking the bank.
* Ingest your favorite data formats directly, like pandas DataFrames, Pydantic objects and more.
LanceDB's core is written in Rust 🦀 and is built using <a href="https://github.com/lancedb/lance">Lance</a>, an open-source columnar format designed for performant ML workloads. LanceDB's core is written in Rust 🦀 and is built using <a href="https://github.com/lancedb/lance">Lance</a>, an open-source columnar format designed for performant ML workloads.

View File

@@ -0,0 +1,21 @@
# Integrations
## Data Formats
LanceDB supports ingesting from your favorite data tools.
![Illustration](/lancedb/assets/ecosystem-illustration.png)
## Tools
LanceDB is integrated with most of the popular AI tools, with more coming soon.
Get started using these examples and quick links.
| Integrations | |
|---|---:|
| <h3> LlamaIndex </h3>LlamaIndex is a simple, flexible data framework for connecting custom data sources to large language models. Llama index integrates with LanceDB as the serverless VectorDB. <h3>[Lean More](https://gpt-index.readthedocs.io/en/latest/examples/vector_stores/LanceDBIndexDemo.html) </h3> |<img src="../assets/llama-index.jpg" alt="image" width="150" height="auto">|
| <h3>Langchain</h3>Langchain allows building applications with LLMs through composability <h3>[Lean More](https://python.langchain.com/en/latest/modules/indexes/vectorstores/examples/lancedb.html) | <img src="../assets/langchain.png" alt="image" width="150" height="auto">|
| <h3>Langchain TS</h3> Javascript bindings for Langchain. It integrates with LanceDB's serverless vectordb allowing you to build powerful AI applications through composibility using only serverless functions. <h3>[Learn More]( https://js.langchain.com/docs/modules/data_connection/vectorstores/integrations/lancedb) | <img src="../assets/langchain.png" alt="image" width="150" height="auto">|
| <h3>Voxel51</h3> It is an open source toolkit that enables you to build better computer vision workflows by improving the quality of your datasets and delivering insights about your models.<h3>[Learn More](./voxel51.md) | <img src="../assets/voxel.gif" alt="image" width="150" height="auto">|
| <h3>PromptTools</h3> Offers a set of free, open-source tools for testing and experimenting with models, prompts, and configurations. The core idea is to enable developers to evaluate prompts using familiar interfaces like code and notebooks. You can use it to experiment with different configurations of LanceDB, and test how LanceDB integrates with the LLM of your choice.<h3>[Learn More](./prompttools.md) | <img src="../assets/prompttools.jpeg" alt="image" width="150" height="auto">|

View File

@@ -0,0 +1,7 @@
[PromptTools](https://github.com/hegelai/prompttools) offers a set of free, open-source tools for testing and experimenting with models, prompts, and configurations. The core idea is to enable developers to evaluate prompts using familiar interfaces like code and notebooks. You can use it to experiment with different configurations of LanceDB, and test how LanceDB integrates with the LLM of your choice.
[Evaluating Prompts with PromptTools](./examples/prompttools-eval-prompts/) | <a href="https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/prompttools-eval-prompts/main.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a>
![Alt text](https://prompttools.readthedocs.io/en/latest/_images/demo.gif "a title")

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,62 @@
id,quote,author
1,"Nobody exists on purpose. Nobody belongs anywhere.",Morty
2,"We're all going to die. Come watch TV.",Morty
3,"Losers look stuff up while the rest of us are carpin' all them diems.",Summer
4,"He's not a hot girl. He can't just bail on his life and set up shop in someone else's.",Beth
5,"When you are an a—hole, it doesn't matter how right you are. Nobody wants to give you the satisfaction.",Morty
6,"God's turning people into insect monsters, Beth. I'm the one beating them to death. Thank me.",Jerry
7,"Camping is just being homeless without the change.",Summer
8,"This seems like a good time for a drink and a cold, calculated speech with sinister overtones. A speech about politics, about order, brotherhood, power ... but speeches are for campaigning. Now is the time for action.",Morty
9,"Having a family doesn't mean that you stop being an individual. You know the best thing you can do for the people that depend on you? Be honest with them, even if it means setting them free.",Mr. Meeseeks
10,"If I've learned one thing, it's that before you get anywhere in life, you gotta stop listening to yourself.",Jerry
11,"I just want to go back to Hell, where everyone thinks I'm smart and funny.",Mr. Needful
12,"Hi Mr. Jellybean, I'm Morty. Im on an adventure with my grandpa.",Morty
13,"You're not the cause of your parents' misery. You're just a symptom of it.",Summer
14,"Don't deify the people who leave you.",Beth
15,"Well, then get your s—t together, get it all together, and put it in a backpack, all your s—t, so it's together. And if you gotta take it somewhere, take it somewhere, you know, take it to the s—t store and sell it, or put it in the s—t museum. I don't care what you do, you just gotta get it together. Get your s—t together.",Morty
16,"At least the devil has a job!",Summer
17,"Life is effort and I'll stop when I die!",Jerry
18,"I just killed my family! I don't care what they were!",Morty
19,"It's funny to say they are small. It's funny to say they are big.",Shrimply Pibbles
20,"You're holding me verbally hostage.",Summer
21,"Honey, stop raising your father's cholesterol so you can take a hot funeral selfie.",Beth
22,"Rick, when you say you made an exact replica of the house, did you mean, like, an exact replica?",Morty
23,"Give a gun to the lady who got pregnant with me too early and constantly makes it our problem.",Summer
24,"Say goodbye to your precious dry land! For soon it will be wet!",Mr. Nimbus
25,"Nobody's smarter than Rick, but nobody else is my dad. You're a genius at that.",Morty
26,"B—h, my generation gets traumatized for breakfast.",Summer
27,"Inception made sense!",Morty
28,"I realize now I'm attracted to you for the same reason I can't be with you: You can't change. And I have no problem with that, but it clearly means I have a problem with myself.",Unity
29,"Mr. President, if I've learned one thing today, it's that sometimes you have to not give a f—k!",Morty
30,"I didn't know freedom meant people doing stuff that sucks.",Summer
31,"How many of these are just horrible mistakes I made? I mean, maybe I'd stop making so many if I let myself learn from them.",Morty
32,"I'm a scientist because I invent, transform, create, and destroy for a living. And when I don't like something about the world, I change it.",Rick
33,"Wubba lubba dub dub!",Rick
34,"I turned myself into a pickle, Morty! I'm Pickle Rick!",Rick
35,"I know about the Yosemite T-shirt, Morty.",Rick
36,"The universe is basically an animal. It grazes on the ordinary. It creates infinite idiots just to eat them.",Rick
37,"If I die in a cage, I lose a bet.",Rick
38,"Sometimes science is more art than science.",Rick
39,"To live is to risk it all—otherwise, you're just an inert chunk of randomly assembled molecules drifting wherever the universe blows you.",Rick
40,"Welcome to the club, pal.",Rick
41,"So I have an emo streak. It's part of what makes me so rad.",Rick
42,"Listen, I'm not the nicest guy in the universe, because I'm the smartest, and being nice is something stupid people do to hedge their bets.",Rick
43,"Wait a minute! Is that Mountain Dew in my quantum-transport-solution?",Rick
44,"Listen, Morty, I hate to break it to you, but what people call 'love' is just a chemical reaction that compels animals to breed.",Rick
45,"Break the cycle, Morty. Rise above. Focus on science.",Rick
46,"Don't get drawn into the culture, Morty. Stealing stuff is about the stuff, not the stealing.",Rick
47,"I'm sorry, but your opinion means very little to me.",Rick
48,"You don't get to tell anyone what's sad. Youre like a one-man Mount Sadmore. So I guess like a Lincoln Sadmorial.",Rick
49,"This pickle doesn't care about your children. I'm not gonna take their dreams. I'm gonna take their parents.",Rick
50,"I programmed you to believe that.",Rick
51,"Have fun with empowerment. It seems to make everyone that gets it really happy.",Rick
52,"Thanks, Mr. Poopybutthole. I always could count on you.",Rick
53,"Weddings are basically funerals with a cake.",Rick
54,"I mean, if you spend all day shuffling words around, you can make anything sound bad, Morty.",Rick
55,"It's your choice to take this personally.",Rick
56,"Excuse me, coming through. What are you here for? Just kidding, I don't care.",Rick
57,"If I let you make me nervous, then we can't get schwifty.",Rick
58,"Oh, boy, so you actually learned something today? What is this, Full House?",Rick
59,"I can't abide bureaucracy. I don't like being told where to go and what to do. I consider it a violation. Did you get those seeds all the way up your butt?",Rick
60,"I think you have to think ahead and live in the moment.",Rick
61,"I know that new situations can be intimidating. You're lookin' around and it's all scary and different, but you know, meeting them head-on, charging into 'em like a bull—that's how we grow as people.",Rick
1 id quote author
2 1 Nobody exists on purpose. Nobody belongs anywhere. Morty
3 2 We're all going to die. Come watch TV. Morty
4 3 Losers look stuff up while the rest of us are carpin' all them diems. Summer
5 4 He's not a hot girl. He can't just bail on his life and set up shop in someone else's. Beth
6 5 When you are an a—hole, it doesn't matter how right you are. Nobody wants to give you the satisfaction. Morty
7 6 God's turning people into insect monsters, Beth. I'm the one beating them to death. Thank me. Jerry
8 7 Camping is just being homeless without the change. Summer
9 8 This seems like a good time for a drink and a cold, calculated speech with sinister overtones. A speech about politics, about order, brotherhood, power ... but speeches are for campaigning. Now is the time for action. Morty
10 9 Having a family doesn't mean that you stop being an individual. You know the best thing you can do for the people that depend on you? Be honest with them, even if it means setting them free. Mr. Meeseeks
11 10 If I've learned one thing, it's that before you get anywhere in life, you gotta stop listening to yourself. Jerry
12 11 I just want to go back to Hell, where everyone thinks I'm smart and funny. Mr. Needful
13 12 Hi Mr. Jellybean, I'm Morty. I’m on an adventure with my grandpa. Morty
14 13 You're not the cause of your parents' misery. You're just a symptom of it. Summer
15 14 Don't deify the people who leave you. Beth
16 15 Well, then get your s—t together, get it all together, and put it in a backpack, all your s—t, so it's together. And if you gotta take it somewhere, take it somewhere, you know, take it to the s—t store and sell it, or put it in the s—t museum. I don't care what you do, you just gotta get it together. Get your s—t together. Morty
17 16 At least the devil has a job! Summer
18 17 Life is effort and I'll stop when I die! Jerry
19 18 I just killed my family! I don't care what they were! Morty
20 19 It's funny to say they are small. It's funny to say they are big. Shrimply Pibbles
21 20 You're holding me verbally hostage. Summer
22 21 Honey, stop raising your father's cholesterol so you can take a hot funeral selfie. Beth
23 22 Rick, when you say you made an exact replica of the house, did you mean, like, an exact replica? Morty
24 23 Give a gun to the lady who got pregnant with me too early and constantly makes it our problem. Summer
25 24 Say goodbye to your precious dry land! For soon it will be wet! Mr. Nimbus
26 25 Nobody's smarter than Rick, but nobody else is my dad. You're a genius at that. Morty
27 26 B—h, my generation gets traumatized for breakfast. Summer
28 27 Inception made sense! Morty
29 28 I realize now I'm attracted to you for the same reason I can't be with you: You can't change. And I have no problem with that, but it clearly means I have a problem with myself. Unity
30 29 Mr. President, if I've learned one thing today, it's that sometimes you have to not give a f—k! Morty
31 30 I didn't know freedom meant people doing stuff that sucks. Summer
32 31 How many of these are just horrible mistakes I made? I mean, maybe I'd stop making so many if I let myself learn from them. Morty
33 32 I'm a scientist because I invent, transform, create, and destroy for a living. And when I don't like something about the world, I change it. Rick
34 33 Wubba lubba dub dub! Rick
35 34 I turned myself into a pickle, Morty! I'm Pickle Rick! Rick
36 35 I know about the Yosemite T-shirt, Morty. Rick
37 36 The universe is basically an animal. It grazes on the ordinary. It creates infinite idiots just to eat them. Rick
38 37 If I die in a cage, I lose a bet. Rick
39 38 Sometimes science is more art than science. Rick
40 39 To live is to risk it all—otherwise, you're just an inert chunk of randomly assembled molecules drifting wherever the universe blows you. Rick
41 40 Welcome to the club, pal. Rick
42 41 So I have an emo streak. It's part of what makes me so rad. Rick
43 42 Listen, I'm not the nicest guy in the universe, because I'm the smartest, and being nice is something stupid people do to hedge their bets. Rick
44 43 Wait a minute! Is that Mountain Dew in my quantum-transport-solution? Rick
45 44 Listen, Morty, I hate to break it to you, but what people call 'love' is just a chemical reaction that compels animals to breed. Rick
46 45 Break the cycle, Morty. Rise above. Focus on science. Rick
47 46 Don't get drawn into the culture, Morty. Stealing stuff is about the stuff, not the stealing. Rick
48 47 I'm sorry, but your opinion means very little to me. Rick
49 48 You don't get to tell anyone what's sad. You’re like a one-man Mount Sadmore. So I guess like a Lincoln Sadmorial. Rick
50 49 This pickle doesn't care about your children. I'm not gonna take their dreams. I'm gonna take their parents. Rick
51 50 I programmed you to believe that. Rick
52 51 Have fun with empowerment. It seems to make everyone that gets it really happy. Rick
53 52 Thanks, Mr. Poopybutthole. I always could count on you. Rick
54 53 Weddings are basically funerals with a cake. Rick
55 54 I mean, if you spend all day shuffling words around, you can make anything sound bad, Morty. Rick
56 55 It's your choice to take this personally. Rick
57 56 Excuse me, coming through. What are you here for? Just kidding, I don't care. Rick
58 57 If I let you make me nervous, then we can't get schwifty. Rick
59 58 Oh, boy, so you actually learned something today? What is this, Full House? Rick
60 59 I can't abide bureaucracy. I don't like being told where to go and what to do. I consider it a violation. Did you get those seeds all the way up your butt? Rick
61 60 I think you have to think ahead and live in the moment. Rick
62 61 I know that new situations can be intimidating. You're lookin' around and it's all scary and different, but you know, meeting them head-on, charging into 'em like a bull—that's how we grow as people. Rick

View File

@@ -0,0 +1,831 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "d24eb4c6-e246-44ca-ba7c-6eae7923bd4c",
"metadata": {},
"source": [
"## LanceDB Tables\n",
"A Table is a collection of Records in a LanceDB Database.\n",
"\n",
"![illustration](../assets/ecosystem-illustration.png)"
]
},
{
"cell_type": "code",
"execution_count": 50,
"id": "c1b4e34b-a49c-471d-a343-a5940bb5138a",
"metadata": {},
"outputs": [],
"source": [
"!pip install lancedb -qq"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "4e5a8d07-d9a1-48c1-913a-8e0629289579",
"metadata": {},
"outputs": [],
"source": [
"import lancedb\n",
"db = lancedb.connect(\"./.lancedb\")"
]
},
{
"cell_type": "markdown",
"id": "66fb93d5-3551-406b-99b2-488442d61d06",
"metadata": {},
"source": [
"LanceDB allows ingesting data from various sources - `dict`, `list[dict]`, `pd.DataFrame`, `pa.Table` or a `Iterator[pa.RecordBatch]`. Let's take a look at some of the these.\n",
"\n",
" ### From list of tuples or dictionaries"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "5df12f66-8d99-43ad-8d0b-22189ec0a6b9",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"pyarrow.Table\n",
"vector: fixed_size_list<item: float>[2]\n",
" child 0, item: float\n",
"lat: double\n",
"long: double\n",
"----\n",
"vector: [[[1.1,1.2],[0.2,1.8]]]\n",
"lat: [[45.5,40.1]]\n",
"long: [[-122.7,-74.1]]"
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import lancedb\n",
"\n",
"db = lancedb.connect(\"./.lancedb\")\n",
"\n",
"data = [{\"vector\": [1.1, 1.2], \"lat\": 45.5, \"long\": -122.7},\n",
" {\"vector\": [0.2, 1.8], \"lat\": 40.1, \"long\": -74.1}]\n",
"\n",
"db.create_table(\"my_table\", data)\n",
"\n",
"db[\"my_table\"].head()"
]
},
{
"cell_type": "markdown",
"id": "10ce802f-1a10-49ee-8ee3-a9bfb302d86c",
"metadata": {},
"source": [
"## From pandas DataFrame\n"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "f4d87ae9-0ccb-48eb-b31d-bb8f2370e47e",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"pyarrow.Table\n",
"vector: fixed_size_list<item: float>[2]\n",
" child 0, item: float\n",
"lat: double\n",
"long: double\n",
"----\n",
"vector: [[[1.1,1.2],[0.2,1.8]]]\n",
"lat: [[45.5,40.1]]\n",
"long: [[-122.7,-74.1]]"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import pandas as pd\n",
"\n",
"data = pd.DataFrame({\n",
" \"vector\": [[1.1, 1.2], [0.2, 1.8]],\n",
" \"lat\": [45.5, 40.1],\n",
" \"long\": [-122.7, -74.1]\n",
"})\n",
"\n",
"db.create_table(\"table2\", data)\n",
"\n",
"db[\"table2\"].head() "
]
},
{
"cell_type": "markdown",
"id": "4be81469-5b57-4f78-9c72-3938c0378d9d",
"metadata": {},
"source": [
"Data is converted to Arrow before being written to disk. For maximum control over how data is saved, either provide the PyArrow schema to convert to or else provide a PyArrow Table directly.\n",
" "
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "25f34bcf-fca0-4431-8601-eac95d1bd347",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"vector: fixed_size_list<item: float>[2]\n",
" child 0, item: float\n",
"lat: float\n",
"long: float"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import pyarrow as pa\n",
"\n",
"custom_schema = pa.schema([\n",
"pa.field(\"vector\", pa.list_(pa.float32(), 2)),\n",
"pa.field(\"lat\", pa.float32()),\n",
"pa.field(\"long\", pa.float32())\n",
"])\n",
"\n",
"table = db.create_table(\"table3\", data, schema=custom_schema, mode=\"overwrite\")\n",
"table.schema"
]
},
{
"cell_type": "markdown",
"id": "4df51925-7ca2-4005-9c72-38b3d26240c6",
"metadata": {},
"source": [
"### From PyArrow Tables\n",
"\n",
"You can also create LanceDB tables directly from pyarrow tables"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "90a880f6-be43-4c9d-ba65-0b05197c0f6f",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"vector: fixed_size_list<item: float>[2]\n",
" child 0, item: float\n",
"item: string\n",
"price: double"
]
},
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"table = pa.Table.from_arrays(\n",
" [\n",
" pa.array([[3.1, 4.1], [5.9, 26.5]],\n",
" pa.list_(pa.float32(), 2)),\n",
" pa.array([\"foo\", \"bar\"]),\n",
" pa.array([10.0, 20.0]),\n",
" ],\n",
" [\"vector\", \"item\", \"price\"],\n",
" )\n",
"\n",
"db = lancedb.connect(\"db\")\n",
"\n",
"tbl = db.create_table(\"test1\", table, mode=\"overwrite\")\n",
"tbl.schema"
]
},
{
"cell_type": "markdown",
"id": "0f36c51c-d902-449d-8292-700e53990c32",
"metadata": {},
"source": [
"### From Pydantic Models\n",
"\n",
"LanceDB supports to create Apache Arrow Schema from a Pydantic BaseModel."
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "d81121d7-e4b7-447c-a48c-974b6ebb464a",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"movie_id: int64 not null\n",
"vector: fixed_size_list<item: float>[128] not null\n",
" child 0, item: float\n",
"genres: string not null\n",
"title: string not null\n",
"imdb_id: int64 not null"
]
},
"execution_count": 13,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from lancedb.pydantic import vector, LanceModel\n",
"\n",
"class Content(LanceModel):\n",
" movie_id: int\n",
" vector: vector(128)\n",
" genres: str\n",
" title: str\n",
" imdb_id: int\n",
" \n",
" @property\n",
" def imdb_url(self) -> str:\n",
" return f\"https://www.imdb.com/title/tt{self.imdb_id}\"\n",
"\n",
"import pyarrow as pa\n",
"db = lancedb.connect(\"~/.lancedb\")\n",
"table_name = \"movielens_small\"\n",
"table = db.create_table(table_name, schema=Content)\n",
"table.schema"
]
},
{
"cell_type": "markdown",
"id": "860e1f77-e860-46a9-98b7-b2979092ccd6",
"metadata": {},
"source": [
"### Using Iterators / Writing Large Datasets\n",
"\n",
"It is recommended to use itertators to add large datasets in batches when creating your table in one go. This does not create multiple versions of your dataset unlike manually adding batches using `table.add()`\n",
"\n",
"LanceDB additionally supports pyarrow's `RecordBatch` Iterators or other generators producing supported data types.\n",
"\n",
"## Here's an example using using `RecordBatch` iterator for creating tables."
]
},
{
"cell_type": "code",
"execution_count": 14,
"id": "bc247142-4e3c-41a2-b94c-8e00d2c2a508",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"LanceTable(table4)"
]
},
"execution_count": 14,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import pyarrow as pa\n",
"\n",
"def make_batches():\n",
" for i in range(5):\n",
" yield pa.RecordBatch.from_arrays(\n",
" [\n",
" pa.array([[3.1, 4.1], [5.9, 26.5]],\n",
" pa.list_(pa.float32(), 2)),\n",
" pa.array([\"foo\", \"bar\"]),\n",
" pa.array([10.0, 20.0]),\n",
" ],\n",
" [\"vector\", \"item\", \"price\"],\n",
" )\n",
"\n",
"schema = pa.schema([\n",
" pa.field(\"vector\", pa.list_(pa.float32(), 2)),\n",
" pa.field(\"item\", pa.utf8()),\n",
" pa.field(\"price\", pa.float32()),\n",
"])\n",
"\n",
"db.create_table(\"table4\", make_batches(), schema=schema)"
]
},
{
"cell_type": "markdown",
"id": "94f7dd2b-bae4-4bdf-8534-201437c31027",
"metadata": {},
"source": [
"### Using pandas `DataFrame` Iterator and Pydantic Schema\n",
"\n",
"You can set the schema via pyarrow schema object or using Pydantic object"
]
},
{
"cell_type": "code",
"execution_count": 16,
"id": "25ad3523-e0c9-4c28-b3df-38189c4e0e5f",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"vector: fixed_size_list<item: float>[2] not null\n",
" child 0, item: float\n",
"item: string not null\n",
"price: double not null"
]
},
"execution_count": 16,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import pyarrow as pa\n",
"import pandas as pd\n",
"\n",
"class PydanticSchema(LanceModel):\n",
" vector: vector(2)\n",
" item: str\n",
" price: float\n",
"\n",
"def make_batches():\n",
" for i in range(5):\n",
" yield pd.DataFrame(\n",
" {\n",
" \"vector\": [[3.1, 4.1], [1, 1]],\n",
" \"item\": [\"foo\", \"bar\"],\n",
" \"price\": [10.0, 20.0],\n",
" })\n",
"\n",
"tbl = db.create_table(\"table5\", make_batches(), schema=PydanticSchema)\n",
"tbl.schema"
]
},
{
"cell_type": "markdown",
"id": "4aa955e9-fcd0-4c99-b644-f218f3bb3f1a",
"metadata": {},
"source": [
"## Creating Empty Table\n",
"\n",
"You can create an empty table by just passing the schema and later add to it using `table.add()`"
]
},
{
"cell_type": "code",
"execution_count": 17,
"id": "2814173a-eacc-4dd8-a64d-6312b44582cc",
"metadata": {},
"outputs": [],
"source": [
"import lancedb\n",
"from lancedb.pydantic import LanceModel, vector\n",
"\n",
"class Model(LanceModel):\n",
" vector: vector(2)\n",
"\n",
"tbl = db.create_table(\"table6\", schema=Model.to_arrow_schema())"
]
},
{
"cell_type": "markdown",
"id": "1d1b0f5c-a1d9-459f-8614-8376b6f577e1",
"metadata": {},
"source": [
"## Open Existing Tables\n",
"\n",
"If you forget the name of your table, you can always get a listing of all table names:\n"
]
},
{
"cell_type": "code",
"execution_count": 18,
"id": "df9e13c0-41f6-437f-9dfa-2fd71d3d9c45",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"['table6', 'table4', 'table5', 'movielens_small']"
]
},
"execution_count": 18,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"db.table_names()"
]
},
{
"cell_type": "code",
"execution_count": 20,
"id": "9343f5ad-6024-42ee-ac2f-6c1471df8679",
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>vector</th>\n",
" <th>item</th>\n",
" <th>price</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>[3.1, 4.1]</td>\n",
" <td>foo</td>\n",
" <td>10.0</td>\n",
" </tr>\n",
" <tr>\n",
" <th>1</th>\n",
" <td>[5.9, 26.5]</td>\n",
" <td>bar</td>\n",
" <td>20.0</td>\n",
" </tr>\n",
" <tr>\n",
" <th>2</th>\n",
" <td>[3.1, 4.1]</td>\n",
" <td>foo</td>\n",
" <td>10.0</td>\n",
" </tr>\n",
" <tr>\n",
" <th>3</th>\n",
" <td>[5.9, 26.5]</td>\n",
" <td>bar</td>\n",
" <td>20.0</td>\n",
" </tr>\n",
" <tr>\n",
" <th>4</th>\n",
" <td>[3.1, 4.1]</td>\n",
" <td>foo</td>\n",
" <td>10.0</td>\n",
" </tr>\n",
" <tr>\n",
" <th>5</th>\n",
" <td>[5.9, 26.5]</td>\n",
" <td>bar</td>\n",
" <td>20.0</td>\n",
" </tr>\n",
" <tr>\n",
" <th>6</th>\n",
" <td>[3.1, 4.1]</td>\n",
" <td>foo</td>\n",
" <td>10.0</td>\n",
" </tr>\n",
" <tr>\n",
" <th>7</th>\n",
" <td>[5.9, 26.5]</td>\n",
" <td>bar</td>\n",
" <td>20.0</td>\n",
" </tr>\n",
" <tr>\n",
" <th>8</th>\n",
" <td>[3.1, 4.1]</td>\n",
" <td>foo</td>\n",
" <td>10.0</td>\n",
" </tr>\n",
" <tr>\n",
" <th>9</th>\n",
" <td>[5.9, 26.5]</td>\n",
" <td>bar</td>\n",
" <td>20.0</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" vector item price\n",
"0 [3.1, 4.1] foo 10.0\n",
"1 [5.9, 26.5] bar 20.0\n",
"2 [3.1, 4.1] foo 10.0\n",
"3 [5.9, 26.5] bar 20.0\n",
"4 [3.1, 4.1] foo 10.0\n",
"5 [5.9, 26.5] bar 20.0\n",
"6 [3.1, 4.1] foo 10.0\n",
"7 [5.9, 26.5] bar 20.0\n",
"8 [3.1, 4.1] foo 10.0\n",
"9 [5.9, 26.5] bar 20.0"
]
},
"execution_count": 20,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"tbl = db.open_table(\"table4\")\n",
"tbl.to_pandas()"
]
},
{
"cell_type": "markdown",
"id": "5019246f-12e3-4f78-88a8-9f4939802c76",
"metadata": {},
"source": [
"## Adding to table\n",
"After a table has been created, you can always add more data to it using\n",
"\n",
"You can add any of the valid data structures accepted by LanceDB table, i.e, `dict`, `list[dict]`, `pd.DataFrame`, or a `Iterator[pa.RecordBatch]`. Here are some examples."
]
},
{
"cell_type": "code",
"execution_count": 21,
"id": "8a56250f-73a1-4c26-a6ad-5c7a0ce3a9ab",
"metadata": {},
"outputs": [],
"source": [
"df = pd.DataFrame([{\"vector\": [1.3, 1.4], \"item\": \"fizz\", \"price\": 100.0},\n",
" {\"vector\": [9.5, 56.2], \"item\": \"buzz\", \"price\": 200.0}])\n",
"tbl.add(df)"
]
},
{
"cell_type": "markdown",
"id": "9985f6ee-67e1-45a9-b233-94e3d121ecbf",
"metadata": {},
"source": [
"You can also add a large dataset batch in one go using Iterator of supported data types\n",
"\n",
"### Adding via Iterator\n",
"\n",
"here, we'll use pandas DataFrame Iterator"
]
},
{
"cell_type": "code",
"execution_count": 22,
"id": "030c7057-b98e-4e2f-be14-b8c1f927f83c",
"metadata": {},
"outputs": [],
"source": [
"\n",
"import pandas as pd\n",
"\n",
"def make_batches():\n",
" for i in range(5):\n",
" yield pd.DataFrame(\n",
" {\n",
" \"vector\": [[3.1, 4.1], [1, 1]],\n",
" \"item\": [\"foo\", \"bar\"],\n",
" \"price\": [10.0, 20.0],\n",
" })\n",
"tbl.add(make_batches())"
]
},
{
"cell_type": "markdown",
"id": "b8316d5d-0a23-4675-b0ee-178711db873a",
"metadata": {},
"source": [
"## Deleting from a Table\n",
"\n",
"Use the `delete()` method on tables to delete rows from a table. To choose which rows to delete, provide a filter that matches on the metadata columns. This can delete any number of rows that match the filter, like:\n",
"\n",
"\n",
"```python\n",
"tbl.delete('item = \"fizz\"')\n",
"```\n"
]
},
{
"cell_type": "code",
"execution_count": 24,
"id": "e7a17de2-08d2-41b7-bd05-f63d1045ab1f",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"32\n"
]
},
{
"data": {
"text/plain": [
"17"
]
},
"execution_count": 24,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"print(len(tbl))\n",
" \n",
"tbl.delete(\"price = 20.0\")\n",
" \n",
"len(tbl)"
]
},
{
"cell_type": "markdown",
"id": "74ac180b-5432-4c14-b1a8-22c35ac83af8",
"metadata": {},
"source": [
"### Delete from a list of values"
]
},
{
"cell_type": "code",
"execution_count": 30,
"id": "fe3310bd-08f4-4a22-a63b-b3127d22f9f7",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
" vector item price\n",
"0 [3.1, 4.1] foo 10.0\n",
"1 [3.1, 4.1] foo 10.0\n",
"2 [3.1, 4.1] foo 10.0\n",
"3 [3.1, 4.1] foo 10.0\n",
"4 [3.1, 4.1] foo 10.0\n",
"5 [1.3, 1.4] fizz 100.0\n",
"6 [9.5, 56.2] buzz 200.0\n",
"7 [3.1, 4.1] foo 10.0\n",
"8 [3.1, 4.1] foo 10.0\n",
"9 [3.1, 4.1] foo 10.0\n",
"10 [3.1, 4.1] foo 10.0\n",
"11 [3.1, 4.1] foo 10.0\n",
"12 [3.1, 4.1] foo 10.0\n",
"13 [3.1, 4.1] foo 10.0\n",
"14 [3.1, 4.1] foo 10.0\n",
"15 [3.1, 4.1] foo 10.0\n",
"16 [3.1, 4.1] foo 10.0\n"
]
},
{
"ename": "OSError",
"evalue": "LanceError(IO): Error during planning: column foo does not exist",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mOSError\u001b[0m Traceback (most recent call last)",
"Cell \u001b[0;32mIn[30], line 4\u001b[0m\n\u001b[1;32m 2\u001b[0m to_remove \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m, \u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;241m.\u001b[39mjoin(\u001b[38;5;28mstr\u001b[39m(v) \u001b[38;5;28;01mfor\u001b[39;00m v \u001b[38;5;129;01min\u001b[39;00m to_remove)\n\u001b[1;32m 3\u001b[0m \u001b[38;5;28mprint\u001b[39m(tbl\u001b[38;5;241m.\u001b[39mto_pandas())\n\u001b[0;32m----> 4\u001b[0m \u001b[43mtbl\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdelete\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43mf\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mitem IN (\u001b[39;49m\u001b[38;5;132;43;01m{\u001b[39;49;00m\u001b[43mto_remove\u001b[49m\u001b[38;5;132;43;01m}\u001b[39;49;00m\u001b[38;5;124;43m)\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[1;32m 5\u001b[0m tbl\u001b[38;5;241m.\u001b[39mto_pandas()\n",
"File \u001b[0;32m~/Documents/lancedb/lancedb/python/lancedb/table.py:610\u001b[0m, in \u001b[0;36mLanceTable.delete\u001b[0;34m(self, where)\u001b[0m\n\u001b[1;32m 609\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mdelete\u001b[39m(\u001b[38;5;28mself\u001b[39m, where: \u001b[38;5;28mstr\u001b[39m):\n\u001b[0;32m--> 610\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_dataset\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdelete\u001b[49m\u001b[43m(\u001b[49m\u001b[43mwhere\u001b[49m\u001b[43m)\u001b[49m\n",
"File \u001b[0;32m~/Documents/lancedb/lancedb/env/lib/python3.11/site-packages/lance/dataset.py:489\u001b[0m, in \u001b[0;36mLanceDataset.delete\u001b[0;34m(self, predicate)\u001b[0m\n\u001b[1;32m 487\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(predicate, pa\u001b[38;5;241m.\u001b[39mcompute\u001b[38;5;241m.\u001b[39mExpression):\n\u001b[1;32m 488\u001b[0m predicate \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mstr\u001b[39m(predicate)\n\u001b[0;32m--> 489\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_ds\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdelete\u001b[49m\u001b[43m(\u001b[49m\u001b[43mpredicate\u001b[49m\u001b[43m)\u001b[49m\n",
"\u001b[0;31mOSError\u001b[0m: LanceError(IO): Error during planning: column foo does not exist"
]
}
],
"source": [
"to_remove = [\"foo\", \"buzz\"]\n",
"to_remove = \", \".join(str(v) for v in to_remove)\n",
"print(tbl.to_pandas())\n",
"tbl.delete(f\"item IN ({to_remove})\")\n"
]
},
{
"cell_type": "code",
"execution_count": 43,
"id": "87d5bc21-847f-4c81-b56e-f6dbe5d05aac",
"metadata": {},
"outputs": [],
"source": [
"df = pd.DataFrame(\n",
" {\n",
" \"vector\": [[3.1, 4.1], [1, 1]],\n",
" \"item\": [\"foo\", \"bar\"],\n",
" \"price\": [10.0, 20.0],\n",
" })\n",
"\n",
"tbl = db.create_table(\"table7\", data=df, mode=\"overwrite\")"
]
},
{
"cell_type": "code",
"execution_count": 44,
"id": "9cba4519-eb3a-4941-ab7e-873d762e750f",
"metadata": {},
"outputs": [],
"source": [
"to_remove = [10.0, 20.0]\n",
"to_remove = \", \".join(str(v) for v in to_remove)\n",
"\n",
"tbl.delete(f\"price IN ({to_remove})\")"
]
},
{
"cell_type": "code",
"execution_count": 46,
"id": "5bdc9801-d5ed-4871-92d0-88b27108e788",
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>vector</th>\n",
" <th>item</th>\n",
" <th>price</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
"Empty DataFrame\n",
"Columns: [vector, item, price]\n",
"Index: []"
]
},
"execution_count": 46,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"tbl.to_pandas()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "752d33d4-ce1c-48e5-90d2-c85f0982182d",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.4"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -28,7 +28,13 @@ pip install lancedb
::: lancedb.embeddings.with_embeddings ::: lancedb.embeddings.with_embeddings
::: lancedb.embeddings.EmbeddingFunction ::: lancedb.embeddings.functions.EmbeddingFunctionRegistry
::: lancedb.embeddings.functions.EmbeddingFunctionModel
::: lancedb.embeddings.functions.TextEmbeddingFunctionModel
::: lancedb.embeddings.functions.SentenceTransformerEmbeddingFunction
## Context ## Context

View File

@@ -3,4 +3,13 @@
--md-primary-fg-color--dark: #4338ca; --md-primary-fg-color--dark: #4338ca;
--md-text-font: ui-sans-serif, system-ui, -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, "Noto Sans", sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol", "Noto Color Emoji"; --md-text-font: ui-sans-serif, system-ui, -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, "Noto Sans", sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol", "Noto Color Emoji";
--md-code-font: ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace; --md-code-font: ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace;
} }
.md-nav__item, .md-tabs__item {
font-size: large;
}
/* Maximum space for text block */
.md-grid {
max-width: 90%;
}

View File

@@ -2,20 +2,17 @@ const glob = require("glob");
const fs = require("fs"); const fs = require("fs");
const path = require("path"); const path = require("path");
const excludedFiles = [ const globString = "../src/**/*.md";
const excludedGlobs = [
"../src/fts.md", "../src/fts.md",
"../src/embedding.md", "../src/embedding.md",
"../src/ann_indexes.md", "../src/examples/*.md",
"../src/examples/serverless_lancedb_with_s3_and_lambda.md",
"../src/examples/serverless_qa_bot_with_modal_and_langchain.md",
"../src/examples/transformerjs_embedding_search_nodejs.md",
"../src/examples/youtube_transcript_bot_with_nodejs.md",
"../src/guides/tables.md", "../src/guides/tables.md",
]; ];
const nodePrefix = "javascript"; const nodePrefix = "javascript";
const nodeFile = ".js"; const nodeFile = ".js";
const nodeFolder = "node"; const nodeFolder = "node";
const globString = "../src/**/*.md";
const asyncPrefix = "(async () => {\n"; const asyncPrefix = "(async () => {\n";
const asyncSuffix = "})();"; const asyncSuffix = "})();";
@@ -34,6 +31,7 @@ function* yieldLines(lines, prefix, suffix) {
} }
const files = glob.sync(globString, { recursive: true }); const files = glob.sync(globString, { recursive: true });
const excludedFiles = glob.sync(excludedGlobs, { recursive: true });
for (const file of files.filter((file) => !excludedFiles.includes(file))) { for (const file of files.filter((file) => !excludedFiles.includes(file))) {
const lines = []; const lines = [];
@@ -51,4 +49,4 @@ for (const file of files.filter((file) => !excludedFiles.includes(file))) {
fs.mkdirSync(path.dirname(outPath), { recursive: true }); fs.mkdirSync(path.dirname(outPath), { recursive: true });
fs.writeFileSync(outPath, asyncPrefix + "\n" + lines.join("\n") + asyncSuffix); fs.writeFileSync(outPath, asyncPrefix + "\n" + lines.join("\n") + asyncSuffix);
} }
} }

View File

@@ -2,20 +2,22 @@ import glob
from typing import Iterator from typing import Iterator
from pathlib import Path from pathlib import Path
excluded_files = [ glob_string = "../src/**/*.md"
excluded_globs = [
"../src/fts.md", "../src/fts.md",
"../src/embedding.md", "../src/embedding.md",
"../src/examples/serverless_lancedb_with_s3_and_lambda.md", "../src/examples/*.md",
"../src/examples/serverless_qa_bot_with_modal_and_langchain.md",
"../src/examples/youtube_transcript_bot_with_nodejs.md",
"../src/integrations/voxel51.md", "../src/integrations/voxel51.md",
"../src/guides/tables.md" "../src/guides/tables.md",
"../src/python/duckdb.md",
] ]
python_prefix = "py" python_prefix = "py"
python_file = ".py" python_file = ".py"
python_folder = "python" python_folder = "python"
glob_string = "../src/**/*.md"
files = glob.glob(glob_string, recursive=True)
excluded_files = [f for excluded_glob in excluded_globs for f in glob.glob(excluded_glob, recursive=True)]
def yield_lines(lines: Iterator[str], prefix: str, suffix: str): def yield_lines(lines: Iterator[str], prefix: str, suffix: str):
in_code_block = False in_code_block = False
@@ -31,7 +33,7 @@ def yield_lines(lines: Iterator[str], prefix: str, suffix: str):
elif in_code_block: elif in_code_block:
yield line[strip_length:] yield line[strip_length:]
for file in filter(lambda file: file not in excluded_files, glob.glob(glob_string, recursive=True)): for file in filter(lambda file: file not in excluded_files, files):
with open(file, "r") as f: with open(file, "r") as f:
lines = list(yield_lines(iter(f), "```", "```")) lines = list(yield_lines(iter(f), "```", "```"))
@@ -40,4 +42,4 @@ for file in filter(lambda file: file not in excluded_files, glob.glob(glob_strin
print(out_path) print(out_path)
out_path.parent.mkdir(exist_ok=True, parents=True) out_path.parent.mkdir(exist_ok=True, parents=True)
with open(out_path, "w") as out: with open(out_path, "w") as out:
out.writelines(lines) out.writelines(lines)

74
node/package-lock.json generated
View File

@@ -1,12 +1,12 @@
{ {
"name": "vectordb", "name": "vectordb",
"version": "0.1.19", "version": "0.2.4",
"lockfileVersion": 2, "lockfileVersion": 2,
"requires": true, "requires": true,
"packages": { "packages": {
"": { "": {
"name": "vectordb", "name": "vectordb",
"version": "0.1.19", "version": "0.2.4",
"cpu": [ "cpu": [
"x64", "x64",
"arm64" "arm64"
@@ -51,11 +51,11 @@
"typescript": "*" "typescript": "*"
}, },
"optionalDependencies": { "optionalDependencies": {
"@lancedb/vectordb-darwin-arm64": "0.1.19", "@lancedb/vectordb-darwin-arm64": "0.2.4",
"@lancedb/vectordb-darwin-x64": "0.1.19", "@lancedb/vectordb-darwin-x64": "0.2.4",
"@lancedb/vectordb-linux-arm64-gnu": "0.1.19", "@lancedb/vectordb-linux-arm64-gnu": "0.2.4",
"@lancedb/vectordb-linux-x64-gnu": "0.1.19", "@lancedb/vectordb-linux-x64-gnu": "0.2.4",
"@lancedb/vectordb-win32-x64-msvc": "0.1.19" "@lancedb/vectordb-win32-x64-msvc": "0.2.4"
} }
}, },
"node_modules/@apache-arrow/ts": { "node_modules/@apache-arrow/ts": {
@@ -315,9 +315,9 @@
} }
}, },
"node_modules/@lancedb/vectordb-darwin-arm64": { "node_modules/@lancedb/vectordb-darwin-arm64": {
"version": "0.1.19", "version": "0.2.4",
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-arm64/-/vectordb-darwin-arm64-0.1.19.tgz", "resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-arm64/-/vectordb-darwin-arm64-0.2.4.tgz",
"integrity": "sha512-efQhJkBKvMNhjFq3Sw3/qHo9D9gb9UqiIr98n3STsbNxBQjMnWemXn91Ckl40siRG1O8qXcINW7Qs/EGmus+kg==", "integrity": "sha512-MqiZXamHYEOfguPsHWLBQ56IabIN6Az8u2Hx8LCyXcxW9gcyJZMSAfJc+CcA4KYHKotv0KsVBhgxZ3kaZQQyiw==",
"cpu": [ "cpu": [
"arm64" "arm64"
], ],
@@ -327,9 +327,9 @@
] ]
}, },
"node_modules/@lancedb/vectordb-darwin-x64": { "node_modules/@lancedb/vectordb-darwin-x64": {
"version": "0.1.19", "version": "0.2.4",
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-x64/-/vectordb-darwin-x64-0.1.19.tgz", "resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-x64/-/vectordb-darwin-x64-0.2.4.tgz",
"integrity": "sha512-r6OZNVyemAssABz2w7CRhe7dyREwBEfTytn+ux1zzTnzsgMgDovCQ0rQ3WZcxWvcy7SFCxiemA9IP1b/lsb4tQ==", "integrity": "sha512-DzL+mw5WhKDwXdEFlPh8M9zSDhGnfks7NvEh6ZqKbU6znH206YB7g3OA4WfFyV579IIEQ8jd4v/XDthNzQKuSA==",
"cpu": [ "cpu": [
"x64" "x64"
], ],
@@ -339,9 +339,9 @@
] ]
}, },
"node_modules/@lancedb/vectordb-linux-arm64-gnu": { "node_modules/@lancedb/vectordb-linux-arm64-gnu": {
"version": "0.1.19", "version": "0.2.4",
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-arm64-gnu/-/vectordb-linux-arm64-gnu-0.1.19.tgz", "resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-arm64-gnu/-/vectordb-linux-arm64-gnu-0.2.4.tgz",
"integrity": "sha512-mL/hRmZp6Kw7hmGJBdOZfp/tTYiCdlOcs8DA/+nr2eiXERv0gIhyiKvr2P5DwbBmut3qXEkDalMHTo95BSdL2A==", "integrity": "sha512-LP1nNfIpFxCgcCMlIQdseDX9dZU27TNhCL41xar8euqcetY5uKvi0YqhiVlpNO85Ss1FRQBgQ/GtnOM6Bo7oBQ==",
"cpu": [ "cpu": [
"arm64" "arm64"
], ],
@@ -351,9 +351,9 @@
] ]
}, },
"node_modules/@lancedb/vectordb-linux-x64-gnu": { "node_modules/@lancedb/vectordb-linux-x64-gnu": {
"version": "0.1.19", "version": "0.2.4",
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-x64-gnu/-/vectordb-linux-x64-gnu-0.1.19.tgz", "resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-x64-gnu/-/vectordb-linux-x64-gnu-0.2.4.tgz",
"integrity": "sha512-AG0FHksbbr+cHVKPi4B8cmBtqb6T9E0uaK4kyZkXrX52/xtv9RYVZcykaB/tSSm0XNFPWWRnx9R8UqNZV/hxMA==", "integrity": "sha512-m4RhOI5JJWPU9Ip2LlRIzXu4mwIv9M//OyAuTLiLKRm8726jQHhYi5VFUEtNzqY0o0p6pS0b3XbifYQ+cyJn3Q==",
"cpu": [ "cpu": [
"x64" "x64"
], ],
@@ -363,9 +363,9 @@
] ]
}, },
"node_modules/@lancedb/vectordb-win32-x64-msvc": { "node_modules/@lancedb/vectordb-win32-x64-msvc": {
"version": "0.1.19", "version": "0.2.4",
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-win32-x64-msvc/-/vectordb-win32-x64-msvc-0.1.19.tgz", "resolved": "https://registry.npmjs.org/@lancedb/vectordb-win32-x64-msvc/-/vectordb-win32-x64-msvc-0.2.4.tgz",
"integrity": "sha512-PDWZ2hvLVXH4Z4WIO1rsWY8ev3NpNm7aXlaey32P+l1Iz9Hia9+F2GBpp2UiEQKfvbk82ucAvBLRmpSsHY8Tlw==", "integrity": "sha512-lMF/2e3YkKWnTYv0R7cUCfjMkAqepNaHSc/dvJzCNsFVEhfDsFdScQFLToARs5GGxnq4fOf+MKpaHg/W6QTxiA==",
"cpu": [ "cpu": [
"x64" "x64"
], ],
@@ -4852,33 +4852,33 @@
} }
}, },
"@lancedb/vectordb-darwin-arm64": { "@lancedb/vectordb-darwin-arm64": {
"version": "0.1.19", "version": "0.2.4",
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-arm64/-/vectordb-darwin-arm64-0.1.19.tgz", "resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-arm64/-/vectordb-darwin-arm64-0.2.4.tgz",
"integrity": "sha512-efQhJkBKvMNhjFq3Sw3/qHo9D9gb9UqiIr98n3STsbNxBQjMnWemXn91Ckl40siRG1O8qXcINW7Qs/EGmus+kg==", "integrity": "sha512-MqiZXamHYEOfguPsHWLBQ56IabIN6Az8u2Hx8LCyXcxW9gcyJZMSAfJc+CcA4KYHKotv0KsVBhgxZ3kaZQQyiw==",
"optional": true "optional": true
}, },
"@lancedb/vectordb-darwin-x64": { "@lancedb/vectordb-darwin-x64": {
"version": "0.1.19", "version": "0.2.4",
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-x64/-/vectordb-darwin-x64-0.1.19.tgz", "resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-x64/-/vectordb-darwin-x64-0.2.4.tgz",
"integrity": "sha512-r6OZNVyemAssABz2w7CRhe7dyREwBEfTytn+ux1zzTnzsgMgDovCQ0rQ3WZcxWvcy7SFCxiemA9IP1b/lsb4tQ==", "integrity": "sha512-DzL+mw5WhKDwXdEFlPh8M9zSDhGnfks7NvEh6ZqKbU6znH206YB7g3OA4WfFyV579IIEQ8jd4v/XDthNzQKuSA==",
"optional": true "optional": true
}, },
"@lancedb/vectordb-linux-arm64-gnu": { "@lancedb/vectordb-linux-arm64-gnu": {
"version": "0.1.19", "version": "0.2.4",
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-arm64-gnu/-/vectordb-linux-arm64-gnu-0.1.19.tgz", "resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-arm64-gnu/-/vectordb-linux-arm64-gnu-0.2.4.tgz",
"integrity": "sha512-mL/hRmZp6Kw7hmGJBdOZfp/tTYiCdlOcs8DA/+nr2eiXERv0gIhyiKvr2P5DwbBmut3qXEkDalMHTo95BSdL2A==", "integrity": "sha512-LP1nNfIpFxCgcCMlIQdseDX9dZU27TNhCL41xar8euqcetY5uKvi0YqhiVlpNO85Ss1FRQBgQ/GtnOM6Bo7oBQ==",
"optional": true "optional": true
}, },
"@lancedb/vectordb-linux-x64-gnu": { "@lancedb/vectordb-linux-x64-gnu": {
"version": "0.1.19", "version": "0.2.4",
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-x64-gnu/-/vectordb-linux-x64-gnu-0.1.19.tgz", "resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-x64-gnu/-/vectordb-linux-x64-gnu-0.2.4.tgz",
"integrity": "sha512-AG0FHksbbr+cHVKPi4B8cmBtqb6T9E0uaK4kyZkXrX52/xtv9RYVZcykaB/tSSm0XNFPWWRnx9R8UqNZV/hxMA==", "integrity": "sha512-m4RhOI5JJWPU9Ip2LlRIzXu4mwIv9M//OyAuTLiLKRm8726jQHhYi5VFUEtNzqY0o0p6pS0b3XbifYQ+cyJn3Q==",
"optional": true "optional": true
}, },
"@lancedb/vectordb-win32-x64-msvc": { "@lancedb/vectordb-win32-x64-msvc": {
"version": "0.1.19", "version": "0.2.4",
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-win32-x64-msvc/-/vectordb-win32-x64-msvc-0.1.19.tgz", "resolved": "https://registry.npmjs.org/@lancedb/vectordb-win32-x64-msvc/-/vectordb-win32-x64-msvc-0.2.4.tgz",
"integrity": "sha512-PDWZ2hvLVXH4Z4WIO1rsWY8ev3NpNm7aXlaey32P+l1Iz9Hia9+F2GBpp2UiEQKfvbk82ucAvBLRmpSsHY8Tlw==", "integrity": "sha512-lMF/2e3YkKWnTYv0R7cUCfjMkAqepNaHSc/dvJzCNsFVEhfDsFdScQFLToARs5GGxnq4fOf+MKpaHg/W6QTxiA==",
"optional": true "optional": true
}, },
"@neon-rs/cli": { "@neon-rs/cli": {

View File

@@ -1,6 +1,6 @@
{ {
"name": "vectordb", "name": "vectordb",
"version": "0.1.19", "version": "0.2.4",
"description": " Serverless, low-latency vector database for AI applications", "description": " Serverless, low-latency vector database for AI applications",
"main": "dist/index.js", "main": "dist/index.js",
"types": "dist/index.d.ts", "types": "dist/index.d.ts",
@@ -78,10 +78,10 @@
} }
}, },
"optionalDependencies": { "optionalDependencies": {
"@lancedb/vectordb-darwin-arm64": "0.1.19", "@lancedb/vectordb-darwin-arm64": "0.2.4",
"@lancedb/vectordb-darwin-x64": "0.1.19", "@lancedb/vectordb-darwin-x64": "0.2.4",
"@lancedb/vectordb-linux-arm64-gnu": "0.1.19", "@lancedb/vectordb-linux-arm64-gnu": "0.2.4",
"@lancedb/vectordb-linux-x64-gnu": "0.1.19", "@lancedb/vectordb-linux-x64-gnu": "0.2.4",
"@lancedb/vectordb-win32-x64-msvc": "0.1.19" "@lancedb/vectordb-win32-x64-msvc": "0.2.4"
} }
} }

View File

@@ -13,18 +13,19 @@
// limitations under the License. // limitations under the License.
import { import {
Field, Field, type FixedSizeListBuilder,
Float32, Float32,
List, type ListBuilder,
makeBuilder, makeBuilder,
RecordBatchFileWriter, RecordBatchFileWriter,
Table, Utf8, Utf8,
type Vector, type Vector,
vectorFromArray FixedSizeList,
vectorFromArray, type Schema, Table as ArrowTable
} from 'apache-arrow' } from 'apache-arrow'
import { type EmbeddingFunction } from './index' import { type EmbeddingFunction } from './index'
export async function convertToTable<T> (data: Array<Record<string, unknown>>, embeddings?: EmbeddingFunction<T>): Promise<Table> { // Converts an Array of records into an Arrow Table, optionally applying an embeddings function to it.
export async function convertToTable<T> (data: Array<Record<string, unknown>>, embeddings?: EmbeddingFunction<T>): Promise<ArrowTable> {
if (data.length === 0) { if (data.length === 0) {
throw new Error('At least one record needs to be provided') throw new Error('At least one record needs to be provided')
} }
@@ -34,8 +35,8 @@ export async function convertToTable<T> (data: Array<Record<string, unknown>>, e
for (const columnsKey of columns) { for (const columnsKey of columns) {
if (columnsKey === 'vector') { if (columnsKey === 'vector') {
const listBuilder = newVectorListBuilder()
const vectorSize = (data[0].vector as any[]).length const vectorSize = (data[0].vector as any[]).length
const listBuilder = newVectorBuilder(vectorSize)
for (const datum of data) { for (const datum of data) {
if ((datum[columnsKey] as any[]).length !== vectorSize) { if ((datum[columnsKey] as any[]).length !== vectorSize) {
throw new Error(`Invalid vector size, expected ${vectorSize}`) throw new Error(`Invalid vector size, expected ${vectorSize}`)
@@ -52,9 +53,7 @@ export async function convertToTable<T> (data: Array<Record<string, unknown>>, e
if (columnsKey === embeddings?.sourceColumn) { if (columnsKey === embeddings?.sourceColumn) {
const vectors = await embeddings.embed(values as T[]) const vectors = await embeddings.embed(values as T[])
const listBuilder = newVectorListBuilder() records.vector = vectorFromArray(vectors, newVectorType(vectors[0].length))
vectors.map(v => listBuilder.append(v))
records.vector = listBuilder.finish().toVector()
} }
if (typeof values[0] === 'string') { if (typeof values[0] === 'string') {
@@ -66,20 +65,47 @@ export async function convertToTable<T> (data: Array<Record<string, unknown>>, e
} }
} }
return new Table(records) return new ArrowTable(records)
} }
// Creates a new Arrow ListBuilder that stores a Vector column // Creates a new Arrow ListBuilder that stores a Vector column
function newVectorListBuilder (): ListBuilder<Float32, any> { function newVectorBuilder (dim: number): FixedSizeListBuilder<Float32> {
const children = new Field<Float32>('item', new Float32())
const list = new List(children)
return makeBuilder({ return makeBuilder({
type: list type: newVectorType(dim)
}) })
} }
// Creates the Arrow Type for a Vector column with dimension `dim`
function newVectorType (dim: number): FixedSizeList<Float32> {
const children = new Field<Float32>('item', new Float32())
return new FixedSizeList(dim, children)
}
// Converts an Array of records into Arrow IPC format
export async function fromRecordsToBuffer<T> (data: Array<Record<string, unknown>>, embeddings?: EmbeddingFunction<T>): Promise<Buffer> { export async function fromRecordsToBuffer<T> (data: Array<Record<string, unknown>>, embeddings?: EmbeddingFunction<T>): Promise<Buffer> {
const table = await convertToTable(data, embeddings) const table = await convertToTable(data, embeddings)
const writer = RecordBatchFileWriter.writeAll(table) const writer = RecordBatchFileWriter.writeAll(table)
return Buffer.from(await writer.toUint8Array()) return Buffer.from(await writer.toUint8Array())
} }
// Converts an Arrow Table into Arrow IPC format
export async function fromTableToBuffer<T> (table: ArrowTable, embeddings?: EmbeddingFunction<T>): Promise<Buffer> {
if (embeddings !== undefined) {
const source = table.getChild(embeddings.sourceColumn)
if (source === null) {
throw new Error(`The embedding source column ${embeddings.sourceColumn} was not found in the Arrow Table`)
}
const vectors = await embeddings.embed(source.toArray() as T[])
const column = vectorFromArray(vectors, newVectorType(vectors[0].length))
table = table.assign(new ArrowTable({ vector: column }))
}
const writer = RecordBatchFileWriter.writeAll(table)
return Buffer.from(await writer.toUint8Array())
}
// Creates an empty Arrow Table
export function createEmptyTable (schema: Schema): ArrowTable {
return new ArrowTable(schema)
}

View File

@@ -13,10 +13,10 @@
// limitations under the License. // limitations under the License.
import { import {
RecordBatchFileWriter, type Schema,
type Table as ArrowTable Table as ArrowTable
} from 'apache-arrow' } from 'apache-arrow'
import { fromRecordsToBuffer } from './arrow' import { createEmptyTable, fromRecordsToBuffer, fromTableToBuffer } from './arrow'
import type { EmbeddingFunction } from './embedding/embedding_function' import type { EmbeddingFunction } from './embedding/embedding_function'
import { RemoteConnection } from './remote' import { RemoteConnection } from './remote'
import { Query } from './query' import { Query } from './query'
@@ -42,6 +42,8 @@ export interface ConnectionOptions {
awsCredentials?: AwsCredentials awsCredentials?: AwsCredentials
awsRegion?: string
// API key for the remote connections // API key for the remote connections
apiKey?: string apiKey?: string
// Region to connect // Region to connect
@@ -51,6 +53,40 @@ export interface ConnectionOptions {
hostOverride?: string hostOverride?: string
} }
function getAwsArgs (opts: ConnectionOptions): any[] {
const callArgs = []
const awsCredentials = opts.awsCredentials
if (awsCredentials !== undefined) {
callArgs.push(awsCredentials.accessKeyId)
callArgs.push(awsCredentials.secretKey)
callArgs.push(awsCredentials.sessionToken)
} else {
callArgs.push(undefined)
callArgs.push(undefined)
callArgs.push(undefined)
}
callArgs.push(opts.awsRegion)
return callArgs
}
export interface CreateTableOptions<T> {
// Name of Table
name: string
// Data to insert into the Table
data?: Array<Record<string, unknown>> | ArrowTable | undefined
// Optional Arrow Schema for this table
schema?: Schema | undefined
// Optional embedding function used to create embeddings
embeddingFunction?: EmbeddingFunction<T> | undefined
// WriteOptions for this operation
writeOptions?: WriteOptions | undefined
}
/** /**
* Connect to a LanceDB instance at the given URI * Connect to a LanceDB instance at the given URI
* @param uri The uri of the database. * @param uri The uri of the database.
@@ -97,6 +133,17 @@ export interface Connection {
*/ */
openTable<T>(name: string, embeddings?: EmbeddingFunction<T>): Promise<Table<T>> openTable<T>(name: string, embeddings?: EmbeddingFunction<T>): Promise<Table<T>>
/**
* Creates a new Table, optionally initializing it with new data.
*
* @param {string} name - The name of the table.
* @param data - Array of Records to be inserted into the table
* @param schema - An Arrow Schema that describe this table columns
* @param {EmbeddingFunction} embeddings - An embedding function to use on this table
* @param {WriteOptions} writeOptions - The write options to use when creating the table.
*/
createTable<T> ({ name, data, schema, embeddingFunction, writeOptions }: CreateTableOptions<T>): Promise<Table<T>>
/** /**
* Creates a new Table and initialize it with new data. * Creates a new Table and initialize it with new data.
* *
@@ -132,8 +179,6 @@ export interface Connection {
*/ */
createTable<T> (name: string, data: Array<Record<string, unknown>>, embeddings: EmbeddingFunction<T>, options: WriteOptions): Promise<Table<T>> createTable<T> (name: string, data: Array<Record<string, unknown>>, embeddings: EmbeddingFunction<T>, options: WriteOptions): Promise<Table<T>>
createTableArrow(name: string, table: ArrowTable): Promise<Table>
/** /**
* Drop an existing table. * Drop an existing table.
* @param name The name of the table to drop. * @param name The name of the table to drop.
@@ -221,16 +266,16 @@ export interface Table<T = number[]> {
* A connection to a LanceDB database. * A connection to a LanceDB database.
*/ */
export class LocalConnection implements Connection { export class LocalConnection implements Connection {
private readonly _options: ConnectionOptions private readonly _options: () => ConnectionOptions
private readonly _db: any private readonly _db: any
constructor (db: any, options: ConnectionOptions) { constructor (db: any, options: ConnectionOptions) {
this._options = options this._options = () => options
this._db = db this._db = db
} }
get uri (): string { get uri (): string {
return this._options.uri return this._options().uri
} }
/** /**
@@ -256,48 +301,66 @@ export class LocalConnection implements Connection {
async openTable<T> (name: string, embeddings: EmbeddingFunction<T>): Promise<Table<T>> async openTable<T> (name: string, embeddings: EmbeddingFunction<T>): Promise<Table<T>>
async openTable<T> (name: string, embeddings?: EmbeddingFunction<T>): Promise<Table<T>> async openTable<T> (name: string, embeddings?: EmbeddingFunction<T>): Promise<Table<T>>
async openTable<T> (name: string, embeddings?: EmbeddingFunction<T>): Promise<Table<T>> { async openTable<T> (name: string, embeddings?: EmbeddingFunction<T>): Promise<Table<T>> {
const tbl = await databaseOpenTable.call(this._db, name) const tbl = await databaseOpenTable.call(this._db, name, ...getAwsArgs(this._options()))
if (embeddings !== undefined) { if (embeddings !== undefined) {
return new LocalTable(tbl, name, this._options, embeddings) return new LocalTable(tbl, name, this._options(), embeddings)
} else { } else {
return new LocalTable(tbl, name, this._options) return new LocalTable(tbl, name, this._options())
} }
} }
async createTable<T> (name: string, data: Array<Record<string, unknown>>, optsOrEmbedding?: WriteOptions | EmbeddingFunction<T>, opt?: WriteOptions): Promise<Table<T>> { async createTable<T> (name: string | CreateTableOptions<T>, data?: Array<Record<string, unknown>>, optsOrEmbedding?: WriteOptions | EmbeddingFunction<T>, opt?: WriteOptions): Promise<Table<T>> {
let writeOptions: WriteOptions = new DefaultWriteOptions() if (typeof name === 'string') {
if (opt !== undefined && isWriteOptions(opt)) { let writeOptions: WriteOptions = new DefaultWriteOptions()
writeOptions = opt if (opt !== undefined && isWriteOptions(opt)) {
} else if (optsOrEmbedding !== undefined && isWriteOptions(optsOrEmbedding)) { writeOptions = opt
writeOptions = optsOrEmbedding } else if (optsOrEmbedding !== undefined && isWriteOptions(optsOrEmbedding)) {
} writeOptions = optsOrEmbedding
let embeddings: undefined | EmbeddingFunction<T>
if (optsOrEmbedding !== undefined && isEmbeddingFunction(optsOrEmbedding)) {
embeddings = optsOrEmbedding
}
const createArgs = [this._db, name, await fromRecordsToBuffer(data, embeddings), writeOptions.writeMode?.toString()]
if (this._options.awsCredentials !== undefined) {
createArgs.push(this._options.awsCredentials.accessKeyId)
createArgs.push(this._options.awsCredentials.secretKey)
if (this._options.awsCredentials.sessionToken !== undefined) {
createArgs.push(this._options.awsCredentials.sessionToken)
} }
}
const tbl = await tableCreate.call(...createArgs) let embeddings: undefined | EmbeddingFunction<T>
if (optsOrEmbedding !== undefined && isEmbeddingFunction(optsOrEmbedding)) {
if (embeddings !== undefined) { embeddings = optsOrEmbedding
return new LocalTable(tbl, name, this._options, embeddings) }
} else { return await this.createTableImpl({ name, data, embeddingFunction: embeddings, writeOptions })
return new LocalTable(tbl, name, this._options)
} }
return await this.createTableImpl(name)
} }
async createTableArrow (name: string, table: ArrowTable): Promise<Table> { private async createTableImpl<T> ({ name, data, schema, embeddingFunction, writeOptions = new DefaultWriteOptions() }: {
const writer = RecordBatchFileWriter.writeAll(table) name: string
await tableCreate.call(this._db, name, Buffer.from(await writer.toUint8Array())) data?: Array<Record<string, unknown>> | ArrowTable | undefined
return await this.openTable(name) schema?: Schema | undefined
embeddingFunction?: EmbeddingFunction<T> | undefined
writeOptions?: WriteOptions | undefined
}): Promise<Table<T>> {
let buffer: Buffer
function isEmpty (data: Array<Record<string, unknown>> | ArrowTable<any>): boolean {
if (data instanceof ArrowTable) {
return data.data.length === 0
}
return data.length === 0
}
if ((data === undefined) || isEmpty(data)) {
if (schema === undefined) {
throw new Error('Either data or schema needs to defined')
}
buffer = await fromTableToBuffer(createEmptyTable(schema))
} else if (data instanceof ArrowTable) {
buffer = await fromTableToBuffer(data, embeddingFunction)
} else {
// data is Array<Record<...>>
buffer = await fromRecordsToBuffer(data, embeddingFunction)
}
const tbl = await tableCreate.call(this._db, name, buffer, writeOptions?.writeMode?.toString(), ...getAwsArgs(this._options()))
if (embeddingFunction !== undefined) {
return new LocalTable(tbl, name, this._options(), embeddingFunction)
} else {
return new LocalTable(tbl, name, this._options())
}
} }
/** /**
@@ -313,7 +376,7 @@ export class LocalTable<T = number[]> implements Table<T> {
private _tbl: any private _tbl: any
private readonly _name: string private readonly _name: string
private readonly _embeddings?: EmbeddingFunction<T> private readonly _embeddings?: EmbeddingFunction<T>
private readonly _options: ConnectionOptions private readonly _options: () => ConnectionOptions
constructor (tbl: any, name: string, options: ConnectionOptions) constructor (tbl: any, name: string, options: ConnectionOptions)
/** /**
@@ -327,7 +390,7 @@ export class LocalTable<T = number[]> implements Table<T> {
this._tbl = tbl this._tbl = tbl
this._name = name this._name = name
this._embeddings = embeddings this._embeddings = embeddings
this._options = options this._options = () => options
} }
get name (): string { get name (): string {
@@ -349,15 +412,12 @@ export class LocalTable<T = number[]> implements Table<T> {
* @return The number of rows added to the table * @return The number of rows added to the table
*/ */
async add (data: Array<Record<string, unknown>>): Promise<number> { async add (data: Array<Record<string, unknown>>): Promise<number> {
const callArgs = [this._tbl, await fromRecordsToBuffer(data, this._embeddings), WriteMode.Append.toString()] return tableAdd.call(
if (this._options.awsCredentials !== undefined) { this._tbl,
callArgs.push(this._options.awsCredentials.accessKeyId) await fromRecordsToBuffer(data, this._embeddings),
callArgs.push(this._options.awsCredentials.secretKey) WriteMode.Append.toString(),
if (this._options.awsCredentials.sessionToken !== undefined) { ...getAwsArgs(this._options())
callArgs.push(this._options.awsCredentials.sessionToken) ).then((newTable: any) => { this._tbl = newTable })
}
}
return tableAdd.call(...callArgs).then((newTable: any) => { this._tbl = newTable })
} }
/** /**
@@ -367,15 +427,12 @@ export class LocalTable<T = number[]> implements Table<T> {
* @return The number of rows added to the table * @return The number of rows added to the table
*/ */
async overwrite (data: Array<Record<string, unknown>>): Promise<number> { async overwrite (data: Array<Record<string, unknown>>): Promise<number> {
const callArgs = [this._tbl, await fromRecordsToBuffer(data, this._embeddings), WriteMode.Overwrite.toString()] return tableAdd.call(
if (this._options.awsCredentials !== undefined) { this._tbl,
callArgs.push(this._options.awsCredentials.accessKeyId) await fromRecordsToBuffer(data, this._embeddings),
callArgs.push(this._options.awsCredentials.secretKey) WriteMode.Overwrite.toString(),
if (this._options.awsCredentials.sessionToken !== undefined) { ...getAwsArgs(this._options())
callArgs.push(this._options.awsCredentials.sessionToken) ).then((newTable: any) => { this._tbl = newTable })
}
}
return tableAdd.call(...callArgs).then((newTable: any) => { this._tbl = newTable })
} }
/** /**

View File

@@ -112,7 +112,8 @@ export class Query<T = number[]> {
this._queryVector = this._query as number[] this._queryVector = this._query as number[]
} }
const buffer = await tableSearch.call(this._tbl, this) const isElectron = this.isElectron()
const buffer = await tableSearch.call(this._tbl, this, isElectron)
const data = tableFromIPC(buffer) const data = tableFromIPC(buffer)
return data.toArray().map((entry: Record<string, unknown>) => { return data.toArray().map((entry: Record<string, unknown>) => {
@@ -127,4 +128,14 @@ export class Query<T = number[]> {
return newObject as unknown as T return newObject as unknown as T
}) })
} }
// See https://github.com/electron/electron/issues/2288
private isElectron (): boolean {
try {
// eslint-disable-next-line no-prototype-builtins
return (process?.versions?.hasOwnProperty('electron') || navigator?.userAgent?.toLowerCase()?.includes(' electron'))
} catch (e) {
return false
}
}
} }

View File

@@ -14,11 +14,11 @@
import { import {
type EmbeddingFunction, type Table, type VectorIndexParams, type Connection, type EmbeddingFunction, type Table, type VectorIndexParams, type Connection,
type ConnectionOptions type ConnectionOptions, type CreateTableOptions, type WriteOptions
} from '../index' } from '../index'
import { Query } from '../query' import { Query } from '../query'
import { type Table as ArrowTable, Vector } from 'apache-arrow' import { Vector } from 'apache-arrow'
import { HttpLancedbClient } from './client' import { HttpLancedbClient } from './client'
/** /**
@@ -66,13 +66,7 @@ export class RemoteConnection implements Connection {
} }
} }
async createTable (name: string, data: Array<Record<string, unknown>>): Promise<Table> async createTable<T> (name: string | CreateTableOptions<T>, data?: Array<Record<string, unknown>>, optsOrEmbedding?: WriteOptions | EmbeddingFunction<T>, opt?: WriteOptions): Promise<Table<T>> {
async createTable<T> (name: string, data: Array<Record<string, unknown>>, embeddings: EmbeddingFunction<T>): Promise<Table<T>>
async createTable<T> (name: string, data: Array<Record<string, unknown>>, embeddings?: EmbeddingFunction<T>): Promise<Table<T>> {
throw new Error('Not implemented')
}
async createTableArrow (name: string, table: ArrowTable): Promise<Table> {
throw new Error('Not implemented') throw new Error('Not implemented')
} }

View File

@@ -47,7 +47,9 @@ describe('LanceDB S3 client', function () {
} }
} }
const table = await createTestDB(opts, 2, 20) const table = await createTestDB(opts, 2, 20)
console.log(table)
const con = await lancedb.connect(opts) const con = await lancedb.connect(opts)
console.log(con)
assert.equal(con.uri, opts.uri) assert.equal(con.uri, opts.uri)
const results = await table.search([0.1, 0.3]).limit(5).execute() const results = await table.search([0.1, 0.3]).limit(5).execute()
@@ -70,5 +72,5 @@ async function createTestDB (opts: ConnectionOptions, numDimensions: number = 2,
data.push({ id: i + 1, name: `name_${i}`, price: i + 10, is_active: (i % 2 === 0), vector }) data.push({ id: i + 1, name: `name_${i}`, price: i + 10, is_active: (i % 2 === 0), vector })
} }
return await con.createTable('vectors', data) return await con.createTable('vectors_2', data)
} }

View File

@@ -19,6 +19,7 @@ import * as chaiAsPromised from 'chai-as-promised'
import * as lancedb from '../index' import * as lancedb from '../index'
import { type AwsCredentials, type EmbeddingFunction, MetricType, Query, WriteMode, DefaultWriteOptions, isWriteOptions } from '../index' import { type AwsCredentials, type EmbeddingFunction, MetricType, Query, WriteMode, DefaultWriteOptions, isWriteOptions } from '../index'
import { Field, Int32, makeVector, Schema, Utf8, Table as ArrowTable, vectorFromArray } from 'apache-arrow'
const expect = chai.expect const expect = chai.expect
const assert = chai.assert const assert = chai.assert
@@ -119,6 +120,45 @@ describe('LanceDB client', function () {
}) })
describe('when creating a new dataset', function () { describe('when creating a new dataset', function () {
it('create an empty table', async function () {
const dir = await track().mkdir('lancejs')
const con = await lancedb.connect(dir)
const schema = new Schema(
[new Field('id', new Int32()), new Field('name', new Utf8())]
)
const table = await con.createTable({ name: 'vectors', schema })
assert.equal(table.name, 'vectors')
assert.deepEqual(await con.tableNames(), ['vectors'])
})
it('create a table with a empty data array', async function () {
const dir = await track().mkdir('lancejs')
const con = await lancedb.connect(dir)
const schema = new Schema(
[new Field('id', new Int32()), new Field('name', new Utf8())]
)
const table = await con.createTable({ name: 'vectors', schema, data: [] })
assert.equal(table.name, 'vectors')
assert.deepEqual(await con.tableNames(), ['vectors'])
})
it('create a table from an Arrow Table', async function () {
const dir = await track().mkdir('lancejs')
const con = await lancedb.connect(dir)
const i32s = new Int32Array(new Array<number>(10))
const i32 = makeVector(i32s)
const data = new ArrowTable({ vector: i32 })
const table = await con.createTable({ name: 'vectors', data })
assert.equal(table.name, 'vectors')
assert.equal(await table.countRows(), 10)
assert.deepEqual(await con.tableNames(), ['vectors'])
})
it('creates a new table from javascript objects', async function () { it('creates a new table from javascript objects', async function () {
const dir = await track().mkdir('lancejs') const dir = await track().mkdir('lancejs')
const con = await lancedb.connect(dir) const con = await lancedb.connect(dir)
@@ -291,6 +331,20 @@ describe('LanceDB client', function () {
const results = await table.search('foo').execute() const results = await table.search('foo').execute()
assert.equal(results.length, 2) assert.equal(results.length, 2)
}) })
it('should create embeddings for Arrow Table', async function () {
const dir = await track().mkdir('lancejs')
const con = await lancedb.connect(dir)
const embeddingFunction = new TextEmbedding('name')
const names = vectorFromArray(['foo', 'bar'], new Utf8())
const data = new ArrowTable({ name: names })
const table = await con.createTable({ name: 'vectors', data, embeddingFunction })
assert.equal(table.name, 'vectors')
const results = await table.search('foo').execute()
assert.equal(results.length, 2)
})
}) })
}) })

View File

@@ -1,5 +1,5 @@
[bumpversion] [bumpversion]
current_version = 0.2.0 current_version = 0.2.2
commit = True commit = True
message = [python] Bump version: {current_version} → {new_version} message = [python] Bump version: {current_version} → {new_version}
tag = True tag = True

View File

@@ -31,9 +31,13 @@ def connect(
---------- ----------
uri: str or Path uri: str or Path
The uri of the database. The uri of the database.
api_token: str, optional api_key: str, optional
If presented, connect to LanceDB cloud. If presented, connect to LanceDB cloud.
Otherwise, connect to a database on file system or cloud storage. Otherwise, connect to a database on file system or cloud storage.
region: str, default "us-west-2"
The region to use for LanceDB Cloud.
host_override: str, optional
The override url for LanceDB Cloud.
Examples Examples
-------- --------

View File

@@ -1,7 +1,10 @@
import os import os
import pyarrow as pa
import pytest import pytest
from lancedb.embeddings import EmbeddingFunctionModel, EmbeddingFunctionRegistry
# import lancedb so we don't have to in every example # import lancedb so we don't have to in every example
@@ -14,3 +17,22 @@ def doctest_setup(monkeypatch, tmpdir):
monkeypatch.setitem(os.environ, "COLUMNS", "80") monkeypatch.setitem(os.environ, "COLUMNS", "80")
# Work in a temporary directory # Work in a temporary directory
monkeypatch.chdir(tmpdir) monkeypatch.chdir(tmpdir)
registry = EmbeddingFunctionRegistry.get_instance()
@registry.register()
class MockEmbeddingFunction(EmbeddingFunctionModel):
def __call__(self, data):
if isinstance(data, str):
data = [data]
elif isinstance(data, pa.ChunkedArray):
data = data.combine_chunks().to_pylist()
elif isinstance(data, pa.Array):
data = data.to_pylist()
return [self.embed(row) for row in data]
def embed(self, row):
return [float(hash(c)) for c in row[:10]]

View File

@@ -16,12 +16,13 @@ from __future__ import annotations
import os import os
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from pathlib import Path from pathlib import Path
from typing import Optional from typing import List, Optional, Union
import pyarrow as pa import pyarrow as pa
from pyarrow import fs from pyarrow import fs
from .common import DATA, URI from .common import DATA, URI
from .embeddings import EmbeddingFunctionModel
from .pydantic import LanceModel from .pydantic import LanceModel
from .table import LanceTable, Table from .table import LanceTable, Table
from .util import fs_from_uri, get_uri_location, get_uri_scheme from .util import fs_from_uri, get_uri_location, get_uri_scheme
@@ -40,7 +41,7 @@ class DBConnection(ABC):
self, self,
name: str, name: str,
data: Optional[DATA] = None, data: Optional[DATA] = None,
schema: Optional[pa.Schema, LanceModel] = None, schema: Optional[Union[pa.Schema, LanceModel]] = None,
mode: str = "create", mode: str = "create",
on_bad_vectors: str = "error", on_bad_vectors: str = "error",
fill_value: float = 0.0, fill_value: float = 0.0,
@@ -149,14 +150,14 @@ class DBConnection(ABC):
... for i in range(5): ... for i in range(5):
... yield pa.RecordBatch.from_arrays( ... yield pa.RecordBatch.from_arrays(
... [ ... [
... pa.array([[3.1, 4.1], [5.9, 26.5]]), ... pa.array([[3.1, 4.1], [5.9, 26.5]], pa.list_(pa.float32(), 2)),
... pa.array(["foo", "bar"]), ... pa.array(["foo", "bar"]),
... pa.array([10.0, 20.0]), ... pa.array([10.0, 20.0]),
... ], ... ],
... ["vector", "item", "price"], ... ["vector", "item", "price"],
... ) ... )
>>> schema=pa.schema([ >>> schema=pa.schema([
... pa.field("vector", pa.list_(pa.float32())), ... pa.field("vector", pa.list_(pa.float32(), 2)),
... pa.field("item", pa.utf8()), ... pa.field("item", pa.utf8()),
... pa.field("price", pa.float32()), ... pa.field("price", pa.float32()),
... ]) ... ])
@@ -285,10 +286,11 @@ class LanceDBConnection(DBConnection):
self, self,
name: str, name: str,
data: Optional[DATA] = None, data: Optional[DATA] = None,
schema: Optional[pa.Schema, LanceModel] = None, schema: Optional[Union[pa.Schema, LanceModel]] = None,
mode: str = "create", mode: str = "create",
on_bad_vectors: str = "error", on_bad_vectors: str = "error",
fill_value: float = 0.0, fill_value: float = 0.0,
embedding_functions: Optional[List[EmbeddingFunctionModel]] = None,
) -> LanceTable: ) -> LanceTable:
"""Create a table in the database. """Create a table in the database.
@@ -307,6 +309,7 @@ class LanceDBConnection(DBConnection):
mode=mode, mode=mode,
on_bad_vectors=on_bad_vectors, on_bad_vectors=on_bad_vectors,
fill_value=fill_value, fill_value=fill_value,
embedding_functions=embedding_functions,
) )
return tbl return tbl

View File

@@ -0,0 +1,22 @@
# Copyright (c) 2023. LanceDB Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .functions import (
REGISTRY,
EmbeddingFunctionModel,
EmbeddingFunctionRegistry,
SentenceTransformerEmbeddingFunction,
TextEmbeddingFunctionModel,
)
from .utils import with_embeddings

View File

@@ -0,0 +1,228 @@
# Copyright (c) 2023. LanceDB Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from abc import ABC, abstractmethod
from typing import List, Optional, Union
import numpy as np
import pyarrow as pa
from cachetools import cached
from pydantic import BaseModel
class EmbeddingFunctionRegistry:
"""
This is a singleton class used to register embedding functions
and fetch them by name. It also handles serializing and deserializing
"""
@classmethod
def get_instance(cls):
return REGISTRY
def __init__(self):
self._functions = {}
def register(self):
"""
This creates a decorator that can be used to register
an EmbeddingFunctionModel.
"""
# This is a decorator for a class that inherits from BaseModel
# It adds the class to the registry
def decorator(cls):
if not issubclass(cls, EmbeddingFunctionModel):
raise TypeError("Must be a subclass of EmbeddingFunctionModel")
if cls.__name__ in self._functions:
raise KeyError(f"{cls.__name__} was already registered")
self._functions[cls.__name__] = cls
return cls
return decorator
def reset(self):
"""
Reset the registry to its initial state
"""
self._functions = {}
def load(self, name: str):
"""
Fetch an embedding function class by name
"""
return self._functions[name]
def parse_functions(self, metadata: Optional[dict]) -> dict:
"""
Parse the metadata from an arrow table and
return a mapping of the vector column to the
embedding function and source column
Parameters
----------
metadata : Optional[dict]
The metadata from an arrow table. Note that
the keys and values are bytes.
Returns
-------
functions : dict
A mapping of vector column name to embedding function.
An empty dict is returned if input is None or does not
contain b"embedding_functions".
"""
if metadata is None or b"embedding_functions" not in metadata:
return {}
serialized = metadata[b"embedding_functions"]
raw_list = json.loads(serialized.decode("utf-8"))
functions = {}
for obj in raw_list:
model = self.load(obj["schema"]["title"])
functions[obj["model"]["vector_column"]] = model(**obj["model"])
return functions
def function_to_metadata(self, func):
"""
Convert the given embedding function and source / vector column configs
into a config dictionary that can be serialized into arrow metadata
"""
schema = func.model_json_schema()
json_data = func.model_dump()
return {
"schema": schema,
"model": json_data,
}
def get_table_metadata(self, func_list):
"""
Convert a list of embedding functions and source / vector column configs
into a config dictionary that can be serialized into arrow metadata
"""
json_data = [self.function_to_metadata(func) for func in func_list]
# Note that metadata dictionary values must be bytes so we need to json dump then utf8 encode
metadata = json.dumps(json_data, indent=2).encode("utf-8")
return {"embedding_functions": metadata}
REGISTRY = EmbeddingFunctionRegistry()
class EmbeddingFunctionModel(BaseModel, ABC):
"""
A callable ABC for embedding functions
"""
source_column: Optional[str]
vector_column: str
@abstractmethod
def __call__(self, *args, **kwargs) -> List[np.array]:
pass
TEXT = Union[str, List[str], pa.Array, pa.ChunkedArray, np.ndarray]
class TextEmbeddingFunctionModel(EmbeddingFunctionModel):
"""
A callable ABC for embedding functions that take text as input
"""
def __call__(self, texts: TEXT, *args, **kwargs) -> List[np.array]:
texts = self.sanitize_input(texts)
return self.generate_embeddings(texts)
def sanitize_input(self, texts: TEXT) -> Union[List[str], np.ndarray]:
"""
Sanitize the input to the embedding function. This is called
before generate_embeddings() and is useful for stripping
whitespace, lowercasing, etc.
"""
if isinstance(texts, str):
texts = [texts]
elif isinstance(texts, pa.Array):
texts = texts.to_pylist()
elif isinstance(texts, pa.ChunkedArray):
texts = texts.combine_chunks().to_pylist()
return texts
@abstractmethod
def generate_embeddings(
self, texts: Union[List[str], np.ndarray]
) -> List[np.array]:
"""
Generate the embeddings for the given texts
"""
pass
@REGISTRY.register()
class SentenceTransformerEmbeddingFunction(TextEmbeddingFunctionModel):
"""
An embedding function that uses the sentence-transformers library
"""
name: str = "all-MiniLM-L6-v2"
device: str = "cpu"
normalize: bool = False
@property
def embedding_model(self):
"""
Get the sentence-transformers embedding model specified by the
name and device. This is cached so that the model is only loaded
once per process.
"""
return self.__class__.get_embedding_model(self.name, self.device)
def generate_embeddings(
self, texts: Union[List[str], np.ndarray]
) -> List[np.array]:
"""
Get the embeddings for the given texts
Parameters
----------
texts: list[str] or np.ndarray (of str)
The texts to embed
"""
return self.embedding_model.encode(
list(texts),
convert_to_numpy=True,
normalize_embeddings=self.normalize,
).tolist()
@classmethod
@cached(cache={})
def get_embedding_model(cls, name, device):
"""
Get the sentence-transformers embedding model specified by the
name and device. This is cached so that the model is only loaded
once per process.
Parameters
----------
name : str
The name of the model to load
device : str
The device to load the model on
TODO: use lru_cache instead with a reasonable/configurable maxsize
"""
try:
from sentence_transformers import SentenceTransformer
return SentenceTransformer(name, device=device)
except ImportError:
raise ValueError("Please install sentence_transformers")

View File

@@ -1,4 +1,4 @@
# Copyright 2023 LanceDB Developers # Copyright (c) 2023. LanceDB Developers
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
@@ -20,7 +20,7 @@ import pyarrow as pa
from lance.vector import vec_to_table from lance.vector import vec_to_table
from retry import retry from retry import retry
from .util import safe_import_pandas from ..util import safe_import_pandas
pd = safe_import_pandas() pd = safe_import_pandas()
DATA = Union[pa.Table, "pd.DataFrame"] DATA = Union[pa.Table, "pd.DataFrame"]
@@ -58,7 +58,7 @@ def with_embeddings(
pa.Table pa.Table
The input table with a new column called "vector" containing the embeddings. The input table with a new column called "vector" containing the embeddings.
""" """
func = EmbeddingFunction(func) func = FunctionWrapper(func)
if wrap_api: if wrap_api:
func = func.retry().rate_limit() func = func.retry().rate_limit()
func = func.batch_size(batch_size) func = func.batch_size(batch_size)
@@ -71,7 +71,11 @@ def with_embeddings(
return data.append_column("vector", table["vector"]) return data.append_column("vector", table["vector"])
class EmbeddingFunction: class FunctionWrapper:
"""
A wrapper for embedding functions that adds rate limiting, retries, and batching.
"""
def __init__(self, func: Callable): def __init__(self, func: Callable):
self.func = func self.func = func
self.rate_limiter_kwargs = {} self.rate_limiter_kwargs = {}

View File

@@ -13,6 +13,7 @@
from __future__ import annotations from __future__ import annotations
from abc import ABC, abstractmethod
from typing import List, Literal, Optional, Type, Union from typing import List, Literal, Optional, Type, Union
import numpy as np import numpy as np
@@ -54,7 +55,164 @@ class Query(pydantic.BaseModel):
refine_factor: Optional[int] = None refine_factor: Optional[int] = None
class LanceQueryBuilder: class LanceQueryBuilder(ABC):
@classmethod
def create(
cls,
table: "lancedb.table.Table",
query: Optional[Union[np.ndarray, str]],
query_type: str,
vector_column_name: str,
) -> LanceQueryBuilder:
if query is None:
return LanceEmptyQueryBuilder(table)
query, query_type = cls._resolve_query(
table, query, query_type, vector_column_name
)
if isinstance(query, str):
# fts
return LanceFtsQueryBuilder(table, query)
if isinstance(query, list):
query = np.array(query, dtype=np.float32)
elif isinstance(query, np.ndarray):
query = query.astype(np.float32)
else:
raise TypeError(f"Unsupported query type: {type(query)}")
return LanceVectorQueryBuilder(table, query, vector_column_name)
@classmethod
def _resolve_query(cls, table, query, query_type, vector_column_name):
# If query_type is fts, then query must be a string.
# otherwise raise TypeError
if query_type == "fts":
if not isinstance(query, str):
raise TypeError(
f"Query type is 'fts' but query is not a string: {type(query)}"
)
return query, query_type
elif query_type == "vector":
# If query_type is vector, then query must be a list or np.ndarray.
# otherwise raise TypeError
if not isinstance(query, (list, np.ndarray)):
raise TypeError(
f"Query type is 'vector' but query is not a list or np.ndarray: {type(query)}"
)
return query, query_type
elif query_type == "auto":
if isinstance(query, (list, np.ndarray)):
return query, "vector"
elif isinstance(query, str):
func = table.embedding_functions.get(vector_column_name, None)
if func is not None:
query = func(query)[0]
return query, "vector"
else:
return query, "fts"
else:
raise TypeError("Query must be a list, np.ndarray, or str")
else:
raise ValueError(
f"Invalid query_type, must be 'vector', 'fts', or 'auto': {query_type}"
)
def __init__(self, table: "lancedb.table.Table"):
self._table = table
self._limit = 10
self._columns = None
self._where = None
def to_df(self) -> "pd.DataFrame":
"""
Execute the query and return the results as a pandas DataFrame.
In addition to the selected columns, LanceDB also returns a vector
and also the "_distance" column which is the distance between the query
vector and the returned vector.
"""
return self.to_arrow().to_pandas()
@abstractmethod
def to_arrow(self) -> pa.Table:
"""
Execute the query and return the results as an
[Apache Arrow Table](https://arrow.apache.org/docs/python/generated/pyarrow.Table.html#pyarrow.Table).
In addition to the selected columns, LanceDB also returns a vector
and also the "_distance" column which is the distance between the query
vector and the returned vectors.
"""
raise NotImplementedError
def to_pydantic(self, model: Type[LanceModel]) -> List[LanceModel]:
"""Return the table as a list of pydantic models.
Parameters
----------
model: Type[LanceModel]
The pydantic model to use.
Returns
-------
List[LanceModel]
"""
return [
model(**{k: v for k, v in row.items() if k in model.field_names()})
for row in self.to_arrow().to_pylist()
]
def limit(self, limit: int) -> LanceVectorQueryBuilder:
"""Set the maximum number of results to return.
Parameters
----------
limit: int
The maximum number of results to return.
Returns
-------
LanceVectorQueryBuilder
The LanceQueryBuilder object.
"""
self._limit = limit
return self
def select(self, columns: list) -> LanceVectorQueryBuilder:
"""Set the columns to return.
Parameters
----------
columns: list
The columns to return.
Returns
-------
LanceVectorQueryBuilder
The LanceQueryBuilder object.
"""
self._columns = columns
return self
def where(self, where: str) -> LanceVectorQueryBuilder:
"""Set the where clause.
Parameters
----------
where: str
The where clause.
Returns
-------
LanceVectorQueryBuilder
The LanceQueryBuilder object.
"""
self._where = where
return self
class LanceVectorQueryBuilder(LanceQueryBuilder):
""" """
A builder for nearest neighbor queries for LanceDB. A builder for nearest neighbor queries for LanceDB.
@@ -80,68 +238,17 @@ class LanceQueryBuilder:
def __init__( def __init__(
self, self,
table: "lancedb.table.Table", table: "lancedb.table.Table",
query: Union[np.ndarray, str], query: Union[np.ndarray, list],
vector_column: str = VECTOR_COLUMN_NAME, vector_column: str = VECTOR_COLUMN_NAME,
): ):
super().__init__(table)
self._query = query
self._metric = "L2" self._metric = "L2"
self._nprobes = 20 self._nprobes = 20
self._refine_factor = None self._refine_factor = None
self._table = table
self._query = query
self._limit = 10
self._columns = None
self._where = None
self._vector_column = vector_column self._vector_column = vector_column
def limit(self, limit: int) -> LanceQueryBuilder: def metric(self, metric: Literal["L2", "cosine"]) -> LanceVectorQueryBuilder:
"""Set the maximum number of results to return.
Parameters
----------
limit: int
The maximum number of results to return.
Returns
-------
LanceQueryBuilder
The LanceQueryBuilder object.
"""
self._limit = limit
return self
def select(self, columns: list) -> LanceQueryBuilder:
"""Set the columns to return.
Parameters
----------
columns: list
The columns to return.
Returns
-------
LanceQueryBuilder
The LanceQueryBuilder object.
"""
self._columns = columns
return self
def where(self, where: str) -> LanceQueryBuilder:
"""Set the where clause.
Parameters
----------
where: str
The where clause.
Returns
-------
LanceQueryBuilder
The LanceQueryBuilder object.
"""
self._where = where
return self
def metric(self, metric: Literal["L2", "cosine"]) -> LanceQueryBuilder:
"""Set the distance metric to use. """Set the distance metric to use.
Parameters Parameters
@@ -151,13 +258,13 @@ class LanceQueryBuilder:
Returns Returns
------- -------
LanceQueryBuilder LanceVectorQueryBuilder
The LanceQueryBuilder object. The LanceQueryBuilder object.
""" """
self._metric = metric self._metric = metric
return self return self
def nprobes(self, nprobes: int) -> LanceQueryBuilder: def nprobes(self, nprobes: int) -> LanceVectorQueryBuilder:
"""Set the number of probes to use. """Set the number of probes to use.
Higher values will yield better recall (more likely to find vectors if Higher values will yield better recall (more likely to find vectors if
@@ -173,13 +280,13 @@ class LanceQueryBuilder:
Returns Returns
------- -------
LanceQueryBuilder LanceVectorQueryBuilder
The LanceQueryBuilder object. The LanceQueryBuilder object.
""" """
self._nprobes = nprobes self._nprobes = nprobes
return self return self
def refine_factor(self, refine_factor: int) -> LanceQueryBuilder: def refine_factor(self, refine_factor: int) -> LanceVectorQueryBuilder:
"""Set the refine factor to use, increasing the number of vectors sampled. """Set the refine factor to use, increasing the number of vectors sampled.
As an example, a refine factor of 2 will sample 2x as many vectors as As an example, a refine factor of 2 will sample 2x as many vectors as
@@ -195,22 +302,12 @@ class LanceQueryBuilder:
Returns Returns
------- -------
LanceQueryBuilder LanceVectorQueryBuilder
The LanceQueryBuilder object. The LanceQueryBuilder object.
""" """
self._refine_factor = refine_factor self._refine_factor = refine_factor
return self return self
def to_df(self) -> "pd.DataFrame":
"""
Execute the query and return the results as a pandas DataFrame.
In addition to the selected columns, LanceDB also returns a vector
and also the "_distance" column which is the distance between the query
vector and the returned vector.
"""
return self.to_arrow().to_pandas()
def to_arrow(self) -> pa.Table: def to_arrow(self) -> pa.Table:
""" """
Execute the query and return the results as an Execute the query and return the results as an
@@ -233,25 +330,12 @@ class LanceQueryBuilder:
) )
return self._table._execute_query(query) return self._table._execute_query(query)
def to_pydantic(self, model: Type[LanceModel]) -> List[LanceModel]:
"""Return the table as a list of pydantic models.
Parameters
----------
model: Type[LanceModel]
The pydantic model to use.
Returns
-------
List[LanceModel]
"""
return [
model(**{k: v for k, v in row.items() if k in model.field_names()})
for row in self.to_arrow().to_pylist()
]
class LanceFtsQueryBuilder(LanceQueryBuilder): class LanceFtsQueryBuilder(LanceQueryBuilder):
def __init__(self, table: "lancedb.table.Table", query: str):
super().__init__(table)
self._query = query
def to_arrow(self) -> pa.Table: def to_arrow(self) -> pa.Table:
try: try:
import tantivy import tantivy
@@ -275,3 +359,13 @@ class LanceFtsQueryBuilder(LanceQueryBuilder):
output_tbl = self._table.to_lance().take(row_ids, columns=self._columns) output_tbl = self._table.to_lance().take(row_ids, columns=self._columns)
output_tbl = output_tbl.append_column("score", scores) output_tbl = output_tbl.append_column("score", scores)
return output_tbl return output_tbl
class LanceEmptyQueryBuilder(LanceQueryBuilder):
def to_arrow(self) -> pa.Table:
ds = self._table.to_lance()
return ds.to_table(
columns=self._columns,
filter=self._where,
limit=self._limit,
)

View File

@@ -20,7 +20,7 @@ from lance import json_to_schema
from lancedb.common import DATA, VEC, VECTOR_COLUMN_NAME from lancedb.common import DATA, VEC, VECTOR_COLUMN_NAME
from ..query import LanceQueryBuilder from ..query import LanceVectorQueryBuilder
from ..table import Query, Table, _sanitize_data from ..table import Query, Table, _sanitize_data
from .arrow import to_ipc_binary from .arrow import to_ipc_binary
from .client import ARROW_STREAM_CONTENT_TYPE from .client import ARROW_STREAM_CONTENT_TYPE
@@ -73,7 +73,11 @@ class RemoteTable(Table):
fill_value: float = 0.0, fill_value: float = 0.0,
) -> int: ) -> int:
data = _sanitize_data( data = _sanitize_data(
data, self.schema, on_bad_vectors=on_bad_vectors, fill_value=fill_value data,
self.schema,
metadata=None,
on_bad_vectors=on_bad_vectors,
fill_value=fill_value,
) )
payload = to_ipc_binary(data) payload = to_ipc_binary(data)
@@ -89,9 +93,9 @@ class RemoteTable(Table):
) )
def search( def search(
self, query: Union[VEC, str], vector_column: str = VECTOR_COLUMN_NAME self, query: Union[VEC, str], vector_column_name: str = VECTOR_COLUMN_NAME
) -> LanceQueryBuilder: ) -> LanceVectorQueryBuilder:
return LanceQueryBuilder(self, query, vector_column) return LanceVectorQueryBuilder(self, query, vector_column_name)
def _execute_query(self, query: Query) -> pa.Table: def _execute_query(self, query: Query) -> pa.Table:
result = self._conn._client.query(self._name, query) result = self._conn._client.query(self._name, query)

View File

@@ -17,53 +17,97 @@ import inspect
import os import os
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from functools import cached_property from functools import cached_property
from typing import Iterable, List, Union from typing import Any, Iterable, List, Optional, Union
import lance import lance
import numpy as np import numpy as np
import pyarrow as pa import pyarrow as pa
import pyarrow.compute as pc import pyarrow.compute as pc
from lance import LanceDataset from lance import LanceDataset
from lance.dataset import ReaderLike
from lance.vector import vec_to_table from lance.vector import vec_to_table
from .common import DATA, VEC, VECTOR_COLUMN_NAME from .common import DATA, VEC, VECTOR_COLUMN_NAME
from .embeddings import EmbeddingFunctionModel, EmbeddingFunctionRegistry
from .pydantic import LanceModel from .pydantic import LanceModel
from .query import LanceFtsQueryBuilder, LanceQueryBuilder, Query from .query import LanceQueryBuilder, Query
from .util import fs_from_uri, safe_import_pandas from .util import fs_from_uri, safe_import_pandas
pd = safe_import_pandas() pd = safe_import_pandas()
def _sanitize_data(data, schema, on_bad_vectors, fill_value): def _sanitize_data(
data,
schema: Optional[pa.Schema],
metadata: Optional[dict],
on_bad_vectors: str,
fill_value: Any,
):
if isinstance(data, list): if isinstance(data, list):
# convert to list of dict if data is a bunch of LanceModels # convert to list of dict if data is a bunch of LanceModels
if isinstance(data[0], LanceModel): if isinstance(data[0], LanceModel):
schema = data[0].__class__.to_arrow_schema() schema = data[0].__class__.to_arrow_schema()
data = [dict(d) for d in data] data = [dict(d) for d in data]
data = pa.Table.from_pylist(data) data = pa.Table.from_pylist(data)
data = _sanitize_schema( elif isinstance(data, dict):
data, schema=schema, on_bad_vectors=on_bad_vectors, fill_value=fill_value
)
if isinstance(data, dict):
data = vec_to_table(data) data = vec_to_table(data)
if pd is not None and isinstance(data, pd.DataFrame): elif pd is not None and isinstance(data, pd.DataFrame):
data = pa.Table.from_pandas(data, preserve_index=False) data = pa.Table.from_pandas(data, preserve_index=False)
# Do not serialize Pandas metadata
meta = data.schema.metadata if data.schema.metadata is not None else {}
meta = {k: v for k, v in meta.items() if k != b"pandas"}
data = data.replace_schema_metadata(meta)
if isinstance(data, pa.Table):
if metadata:
data = _append_vector_col(data, metadata, schema)
metadata.update(data.schema.metadata or {})
data = data.replace_schema_metadata(metadata)
data = _sanitize_schema( data = _sanitize_schema(
data, schema=schema, on_bad_vectors=on_bad_vectors, fill_value=fill_value data, schema=schema, on_bad_vectors=on_bad_vectors, fill_value=fill_value
) )
# Do not serialize Pandas metadata elif isinstance(data, Iterable):
metadata = data.schema.metadata if data.schema.metadata is not None else {} data = _to_record_batch_generator(
metadata = {k: v for k, v in metadata.items() if k != b"pandas"} data, schema, metadata, on_bad_vectors, fill_value
schema = data.schema.with_metadata(metadata) )
data = pa.Table.from_arrays(data.columns, schema=schema) else:
if not isinstance(data, (pa.Table, Iterable)):
raise TypeError(f"Unsupported data type: {type(data)}") raise TypeError(f"Unsupported data type: {type(data)}")
return data return data
def _append_vector_col(data: pa.Table, metadata: dict, schema: Optional[pa.Schema]):
"""
Use the embedding function to automatically embed the source column and add the
vector column to the table.
"""
functions = EmbeddingFunctionRegistry.get_instance().parse_functions(metadata)
for vector_col, func in functions.items():
if vector_col not in data.column_names:
col_data = func(data[func.source_column])
if schema is not None:
dtype = schema.field(vector_col).type
else:
dtype = pa.list_(pa.float32(), len(col_data[0]))
data = data.append_column(
pa.field(vector_col, type=dtype), pa.array(col_data, type=dtype)
)
return data
def _to_record_batch_generator(
data: Iterable, schema, metadata, on_bad_vectors, fill_value
):
for batch in data:
if not isinstance(batch, pa.RecordBatch):
table = _sanitize_data(batch, schema, metadata, on_bad_vectors, fill_value)
for batch in table.to_batches():
yield batch
yield batch
class Table(ABC): class Table(ABC):
""" """
A [Table](Table) is a collection of Records in a LanceDB [Database](Database). A Table is a collection of Records in a LanceDB Database.
Examples Examples
-------- --------
@@ -184,17 +228,28 @@ class Table(ABC):
@abstractmethod @abstractmethod
def search( def search(
self, query: Union[VEC, str], vector_column: str = VECTOR_COLUMN_NAME self,
query: Optional[Union[VEC, str]] = None,
vector_column_name: str = VECTOR_COLUMN_NAME,
query_type: str = "auto",
) -> LanceQueryBuilder: ) -> LanceQueryBuilder:
"""Create a search query to find the nearest neighbors """Create a search query to find the nearest neighbors
of the given query vector. of the given query vector.
Parameters Parameters
---------- ----------
query: list, np.ndarray query: str, list, np.ndarray, default None
The query vector. The query to search for. If None then
vector_column: str, default "vector" the select/where/limit clauses are applied to filter
the table
vector_column_name: str, default "vector"
The name of the vector column to search. The name of the vector column to search.
query_type: str, default "auto"
"vector", "fts", or "auto"
If "auto" then the query type is inferred from the query;
If `query` is a list/np.ndarray then the query type is "vector";
If `query` is a string, then the query type is "vector" if the
table has embedding functions else the query type is "fts"
Returns Returns
------- -------
@@ -268,10 +323,11 @@ class LanceTable(Table):
self.name = name self.name = name
self._version = version self._version = version
def _reset_dataset(self): def _reset_dataset(self, version=None):
try: try:
if "_dataset" in self.__dict__: if "_dataset" in self.__dict__:
del self.__dict__["_dataset"] del self.__dict__["_dataset"]
self._version = version
except AttributeError: except AttributeError:
pass pass
@@ -297,7 +353,9 @@ class LanceTable(Table):
def checkout(self, version: int): def checkout(self, version: int):
"""Checkout a version of the table. This is an in-place operation. """Checkout a version of the table. This is an in-place operation.
This allows viewing previous versions of the table. This allows viewing previous versions of the table. If you wish to
keep writing to the dataset starting from an old version, then use
the `restore` function.
Parameters Parameters
---------- ----------
@@ -310,14 +368,14 @@ class LanceTable(Table):
>>> db = lancedb.connect("./.lancedb") >>> db = lancedb.connect("./.lancedb")
>>> table = db.create_table("my_table", [{"vector": [1.1, 0.9], "type": "vector"}]) >>> table = db.create_table("my_table", [{"vector": [1.1, 0.9], "type": "vector"}])
>>> table.version >>> table.version
1 2
>>> table.to_pandas() >>> table.to_pandas()
vector type vector type
0 [1.1, 0.9] vector 0 [1.1, 0.9] vector
>>> table.add([{"vector": [0.5, 0.2], "type": "vector"}]) >>> table.add([{"vector": [0.5, 0.2], "type": "vector"}])
>>> table.version >>> table.version
2 3
>>> table.checkout(1) >>> table.checkout(2)
>>> table.to_pandas() >>> table.to_pandas()
vector type vector type
0 [1.1, 0.9] vector 0 [1.1, 0.9] vector
@@ -325,7 +383,54 @@ class LanceTable(Table):
max_ver = max([v["version"] for v in self._dataset.versions()]) max_ver = max([v["version"] for v in self._dataset.versions()])
if version < 1 or version > max_ver: if version < 1 or version > max_ver:
raise ValueError(f"Invalid version {version}") raise ValueError(f"Invalid version {version}")
self._version = version self._reset_dataset(version=version)
def restore(self, version: int = None):
"""Restore a version of the table. This is an in-place operation.
This creates a new version where the data is equivalent to the
specified previous version. Data is not copied (as of python-v0.2.1).
Parameters
----------
version : int, default None
The version to restore. If unspecified then restores the currently
checked out version. If the currently checked out version is the
latest version then this is a no-op.
Examples
--------
>>> import lancedb
>>> db = lancedb.connect("./.lancedb")
>>> table = db.create_table("my_table", [{"vector": [1.1, 0.9], "type": "vector"}])
>>> table.version
2
>>> table.to_pandas()
vector type
0 [1.1, 0.9] vector
>>> table.add([{"vector": [0.5, 0.2], "type": "vector"}])
>>> table.version
3
>>> table.restore(2)
>>> table.to_pandas()
vector type
0 [1.1, 0.9] vector
>>> len(table.list_versions())
4
"""
max_ver = max([v["version"] for v in self._dataset.versions()])
if version is None:
version = self.version
elif version < 1 or version > max_ver:
raise ValueError(f"Invalid version {version}")
else:
self.checkout(version)
if version == max_ver:
# no-op if restoring the latest version
return
self._dataset.restore()
self._reset_dataset() self._reset_dataset()
def __len__(self): def __len__(self):
@@ -439,23 +544,122 @@ class LanceTable(Table):
""" """
# TODO: manage table listing and metadata separately # TODO: manage table listing and metadata separately
data = _sanitize_data( data = _sanitize_data(
data, self.schema, on_bad_vectors=on_bad_vectors, fill_value=fill_value data,
self.schema,
metadata=self.schema.metadata,
on_bad_vectors=on_bad_vectors,
fill_value=fill_value,
) )
lance.write_dataset(data, self._dataset_uri, schema=self.schema, mode=mode) lance.write_dataset(data, self._dataset_uri, schema=self.schema, mode=mode)
self._reset_dataset() self._reset_dataset()
def merge(
self,
other_table: Union[LanceTable, ReaderLike],
left_on: str,
right_on: Optional[str] = None,
schema: Optional[Union[pa.Schema, LanceModel]] = None,
):
"""Merge another table into this table.
Performs a left join, where the dataset is the left side and other_table
is the right side. Rows existing in the dataset but not on the left will
be filled with null values, unless Lance doesn't support null values for
some types, in which case an error will be raised. The only overlapping
column allowed is the join column. If other overlapping columns exist,
an error will be raised.
Parameters
----------
other_table: LanceTable or Reader-like
The data to be merged. Acceptable types are:
- Pandas DataFrame, Pyarrow Table, Dataset, Scanner,
Iterator[RecordBatch], or RecordBatchReader
- LanceTable
left_on: str
The name of the column in the dataset to join on.
right_on: str or None
The name of the column in other_table to join on. If None, defaults to
left_on.
schema: pa.Schema or LanceModel, optional
The schema of the other_table.
If not provided, the schema is inferred from the data.
Examples
--------
>>> import lancedb
>>> import pyarrow as pa
>>> df = pa.table({'x': [1, 2, 3], 'y': ['a', 'b', 'c']})
>>> db = lancedb.connect("./.lancedb")
>>> table = db.create_table("dataset", df)
>>> table.to_pandas()
x y
0 1 a
1 2 b
2 3 c
>>> new_df = pa.table({'x': [1, 2, 3], 'z': ['d', 'e', 'f']})
>>> table.merge(new_df, 'x')
>>> table.to_pandas()
x y z
0 1 a d
1 2 b e
2 3 c f
"""
if isinstance(schema, LanceModel):
schema = schema.to_arrow_schema()
if isinstance(other_table, LanceTable):
other_table = other_table.to_lance()
if isinstance(other_table, LanceDataset):
other_table = other_table.to_table()
self._dataset.merge(
other_table, left_on=left_on, right_on=right_on, schema=schema
)
self._reset_dataset()
def _get_embedding_function_for_source_col(self, column_name: str):
for k, v in self.embedding_functions.items():
if v.source_column == column_name:
return v
return None
@cached_property
def embedding_functions(self) -> dict:
"""
Get the embedding functions for the table
Returns
-------
funcs: dict
A mapping of the vector column to the embedding function
or empty dict if not configured.
"""
return EmbeddingFunctionRegistry.get_instance().parse_functions(
self.schema.metadata
)
def search( def search(
self, query: Union[VEC, str], vector_column_name=VECTOR_COLUMN_NAME self,
query: Optional[Union[VEC, str]] = None,
vector_column_name: str = VECTOR_COLUMN_NAME,
query_type: str = "auto",
) -> LanceQueryBuilder: ) -> LanceQueryBuilder:
"""Create a search query to find the nearest neighbors """Create a search query to find the nearest neighbors
of the given query vector. of the given query vector.
Parameters Parameters
---------- ----------
query: list, np.ndarray query: str, list, np.ndarray, or None
The query vector. The query to search for. If None then
the select/where/limit clauses are applied to filter
the table
vector_column_name: str, default "vector" vector_column_name: str, default "vector"
The name of the vector column to search. The name of the vector column to search.
query_type: str, default "auto"
"vector", "fts", or "auto"
If "auto" then the query type is inferred from the query;
If the query is a list/np.ndarray then the query type is "vector";
If the query is a string, then the query type is "vector" if the
table has embedding functions else the query type is "fts"
Returns Returns
------- -------
@@ -465,17 +669,9 @@ class LanceTable(Table):
and also the "_distance" column which is the distance between the query and also the "_distance" column which is the distance between the query
vector and the returned vector. vector and the returned vector.
""" """
if isinstance(query, str): return LanceQueryBuilder.create(
# fts self, query, query_type, vector_column_name=vector_column_name
return LanceFtsQueryBuilder(self, query, vector_column_name) )
if isinstance(query, list):
query = np.array(query)
if isinstance(query, np.ndarray):
query = query.astype(np.float32)
else:
raise TypeError(f"Unsupported query type: {type(query)}")
return LanceQueryBuilder(self, query, vector_column_name)
@classmethod @classmethod
def create( def create(
@@ -487,6 +683,7 @@ class LanceTable(Table):
mode="create", mode="create",
on_bad_vectors: str = "error", on_bad_vectors: str = "error",
fill_value: float = 0.0, fill_value: float = 0.0,
embedding_functions: List[EmbeddingFunctionModel] = None,
): ):
""" """
Create a new table. Create a new table.
@@ -524,20 +721,52 @@ class LanceTable(Table):
One of "error", "drop", "fill". One of "error", "drop", "fill".
fill_value: float, default 0. fill_value: float, default 0.
The value to use when filling vectors. Only used if on_bad_vectors="fill". The value to use when filling vectors. Only used if on_bad_vectors="fill".
embedding_functions: list of EmbeddingFunctionModel, default None
The embedding functions to use when creating the table.
""" """
tbl = LanceTable(db, name) tbl = LanceTable(db, name)
if inspect.isclass(schema) and issubclass(schema, LanceModel): if inspect.isclass(schema) and issubclass(schema, LanceModel):
schema = schema.to_arrow_schema() schema = schema.to_arrow_schema()
metadata = None
if embedding_functions is not None:
registry = EmbeddingFunctionRegistry.get_instance()
metadata = registry.get_table_metadata(embedding_functions)
if data is not None: if data is not None:
data = _sanitize_data( data = _sanitize_data(
data, schema, on_bad_vectors=on_bad_vectors, fill_value=fill_value data,
schema,
metadata=metadata,
on_bad_vectors=on_bad_vectors,
fill_value=fill_value,
) )
else:
if schema is None: if schema is None:
if data is None:
raise ValueError("Either data or schema must be provided") raise ValueError("Either data or schema must be provided")
data = pa.Table.from_pylist([], schema=schema) elif hasattr(data, "schema"):
lance.write_dataset(data, tbl._dataset_uri, schema=schema, mode=mode) schema = data.schema
return LanceTable(db, name) elif isinstance(data, Iterable):
if metadata:
raise TypeError(
(
"Persistent embedding functions not yet "
"supported for generator data input"
)
)
if metadata:
schema = schema.with_metadata(metadata)
empty = pa.Table.from_pylist([], schema=schema)
lance.write_dataset(empty, tbl._dataset_uri, schema=schema, mode=mode)
table = LanceTable(db, name)
if data is not None:
table.add(data)
return table
@classmethod @classmethod
def open(cls, db, name): def open(cls, db, name):
@@ -553,6 +782,56 @@ class LanceTable(Table):
def delete(self, where: str): def delete(self, where: str):
self._dataset.delete(where) self._dataset.delete(where)
def update(self, where: str, values: dict):
"""
EXPERIMENTAL: Update rows in the table (not threadsafe).
This can be used to update zero to all rows depending on how many
rows match the where clause.
Parameters
----------
where: str
The SQL where clause to use when updating rows. For example, 'x = 2'
or 'x IN (1, 2, 3)'. The filter must not be empty, or it will error.
values: dict
The values to update. The keys are the column names and the values
are the values to set.
Examples
--------
>>> import lancedb
>>> import pandas as pd
>>> data = pd.DataFrame({"x": [1, 2, 3], "vector": [[1, 2], [3, 4], [5, 6]]})
>>> db = lancedb.connect("./.lancedb")
>>> table = db.create_table("my_table", data)
>>> table.to_pandas()
x vector
0 1 [1.0, 2.0]
1 2 [3.0, 4.0]
2 3 [5.0, 6.0]
>>> table.update(where="x = 2", values={"vector": [10, 10]})
>>> table.to_pandas()
x vector
0 1 [1.0, 2.0]
1 3 [5.0, 6.0]
2 2 [10.0, 10.0]
"""
orig_data = self._dataset.to_table(filter=where).combine_chunks()
if len(orig_data) == 0:
return
for col, val in values.items():
i = orig_data.column_names.index(col)
if i < 0:
raise ValueError(f"Column {col} does not exist")
orig_data = orig_data.set_column(
i, col, pa.array([val] * len(orig_data), type=orig_data[col].type)
)
self.delete(where)
self.add(orig_data, mode="append")
self._reset_dataset()
def _execute_query(self, query: Query) -> pa.Table: def _execute_query(self, query: Query) -> pa.Table:
ds = self.to_lance() ds = self.to_lance()
return ds.to_table( return ds.to_table(
@@ -595,22 +874,38 @@ def _sanitize_schema(
return data return data
# cast the columns to the expected types # cast the columns to the expected types
data = data.combine_chunks() data = data.combine_chunks()
data = _sanitize_vector_column( for field in schema:
# TODO: we're making an assumption that fixed size list of 10 or more
# is a vector column. This is definitely a bit hacky.
likely_vector_col = (
pa.types.is_fixed_size_list(field.type)
and pa.types.is_float32(field.type.value_type)
and field.type.list_size >= 10
)
is_default_vector_col = field.name == VECTOR_COLUMN_NAME
if field.name in data.column_names and (
likely_vector_col or is_default_vector_col
):
data = _sanitize_vector_column(
data,
vector_column_name=field.name,
on_bad_vectors=on_bad_vectors,
fill_value=fill_value,
)
return pa.Table.from_arrays(
[data[name] for name in schema.names], schema=schema
)
# just check the vector column
if VECTOR_COLUMN_NAME in data.column_names:
return _sanitize_vector_column(
data, data,
vector_column_name=VECTOR_COLUMN_NAME, vector_column_name=VECTOR_COLUMN_NAME,
on_bad_vectors=on_bad_vectors, on_bad_vectors=on_bad_vectors,
fill_value=fill_value, fill_value=fill_value,
) )
return pa.Table.from_arrays(
[data[name] for name in schema.names], schema=schema return data
)
# just check the vector column
return _sanitize_vector_column(
data,
vector_column_name=VECTOR_COLUMN_NAME,
on_bad_vectors=on_bad_vectors,
fill_value=fill_value,
)
def _sanitize_vector_column( def _sanitize_vector_column(
@@ -634,8 +929,6 @@ def _sanitize_vector_column(
fill_value: float, default 0.0 fill_value: float, default 0.0
The value to use when filling vectors. Only used if on_bad_vectors="fill". The value to use when filling vectors. Only used if on_bad_vectors="fill".
""" """
if vector_column_name not in data.column_names:
raise ValueError(f"Missing vector column: {vector_column_name}")
# ChunkedArray is annoying to work with, so we combine chunks here # ChunkedArray is annoying to work with, so we combine chunks here
vec_arr = data[vector_column_name].combine_chunks() vec_arr = data[vector_column_name].combine_chunks()
if pa.types.is_list(data[vector_column_name].type): if pa.types.is_list(data[vector_column_name].type):

View File

@@ -1,15 +1,16 @@
[project] [project]
name = "lancedb" name = "lancedb"
version = "0.2.0" version = "0.2.2"
dependencies = [ dependencies = [
"pylance==0.6.1", "pylance==0.6.5",
"ratelimiter", "ratelimiter",
"retry", "retry",
"tqdm", "tqdm",
"aiohttp", "aiohttp",
"pydantic", "pydantic",
"attr", "attr",
"semver>=3.0" "semver>=3.0",
"cachetools"
] ]
description = "lancedb" description = "lancedb"
authors = [{ name = "LanceDB Devs", email = "dev@lancedb.com" }] authors = [{ name = "LanceDB Devs", email = "dev@lancedb.com" }]

View File

@@ -17,7 +17,7 @@ import pyarrow as pa
import pytest import pytest
import lancedb import lancedb
from lancedb.pydantic import LanceModel from lancedb.pydantic import LanceModel, vector
def test_basic(tmp_path): def test_basic(tmp_path):
@@ -77,35 +77,78 @@ def test_ingest_pd(tmp_path):
assert db.open_table("test").name == db["test"].name assert db.open_table("test").name == db["test"].name
def test_ingest_record_batch_iterator(tmp_path): def test_ingest_iterator(tmp_path):
def batch_reader(): class PydanticSchema(LanceModel):
for i in range(5): vector: vector(2)
yield pa.RecordBatch.from_arrays( item: str
[ price: float
pa.array([[3.1, 4.1], [5.9, 26.5]]),
pa.array(["foo", "bar"]),
pa.array([10.0, 20.0]),
],
["vector", "item", "price"],
)
db = lancedb.connect(tmp_path) arrow_schema = pa.schema(
tbl = db.create_table( [
"test", pa.field("vector", pa.list_(pa.float32(), 2)),
batch_reader(), pa.field("item", pa.utf8()),
schema=pa.schema( pa.field("price", pa.float32()),
[ ]
pa.field("vector", pa.list_(pa.float32())),
pa.field("item", pa.utf8()),
pa.field("price", pa.float32()),
]
),
) )
tbl_len = len(tbl) def make_batches():
tbl.add(batch_reader()) for _ in range(5):
assert len(tbl) == tbl_len * 2 yield from [
assert len(tbl.list_versions()) == 2 # pandas
pd.DataFrame(
{
"vector": [[3.1, 4.1], [1, 1]],
"item": ["foo", "bar"],
"price": [10.0, 20.0],
}
),
# pylist
[
{"vector": [3.1, 4.1], "item": "foo", "price": 10.0},
{"vector": [5.9, 26.5], "item": "bar", "price": 20.0},
],
# recordbatch
pa.RecordBatch.from_arrays(
[
pa.array([[3.1, 4.1], [5.9, 26.5]], pa.list_(pa.float32(), 2)),
pa.array(["foo", "bar"]),
pa.array([10.0, 20.0]),
],
["vector", "item", "price"],
),
# pa Table
pa.Table.from_arrays(
[
pa.array([[3.1, 4.1], [5.9, 26.5]], pa.list_(pa.float32(), 2)),
pa.array(["foo", "bar"]),
pa.array([10.0, 20.0]),
],
["vector", "item", "price"],
),
# pydantic list
[
PydanticSchema(vector=[3.1, 4.1], item="foo", price=10.0),
PydanticSchema(vector=[5.9, 26.5], item="bar", price=20.0),
]
# TODO: test pydict separately. it is unique column number and names contraint
]
def run_tests(schema):
db = lancedb.connect(tmp_path)
tbl = db.create_table("table2", make_batches(), schema=schema, mode="overwrite")
tbl.to_pandas()
assert tbl.search([3.1, 4.1]).limit(1).to_df()["_distance"][0] == 0.0
assert tbl.search([5.9, 26.5]).limit(1).to_df()["_distance"][0] == 0.0
tbl_len = len(tbl)
tbl.add(make_batches())
assert len(tbl) == tbl_len * 2
assert len(tbl.list_versions()) == 3
db.drop_database()
run_tests(arrow_schema)
run_tests(PydanticSchema)
def test_create_mode(tmp_path): def test_create_mode(tmp_path):

View File

@@ -12,10 +12,12 @@
# limitations under the License. # limitations under the License.
import sys import sys
import lance
import numpy as np import numpy as np
import pyarrow as pa import pyarrow as pa
from lancedb.embeddings import with_embeddings from lancedb.conftest import MockEmbeddingFunction
from lancedb.embeddings import EmbeddingFunctionRegistry, with_embeddings
def mock_embed_func(input_data): def mock_embed_func(input_data):
@@ -40,3 +42,37 @@ def test_with_embeddings():
assert data.column_names == ["text", "price", "vector"] assert data.column_names == ["text", "price", "vector"]
assert data.column("text").to_pylist() == ["foo", "bar"] assert data.column("text").to_pylist() == ["foo", "bar"]
assert data.column("price").to_pylist() == [10.0, 20.0] assert data.column("price").to_pylist() == [10.0, 20.0]
def test_embedding_function(tmp_path):
registry = EmbeddingFunctionRegistry.get_instance()
# let's create a table
table = pa.table(
{
"text": pa.array(["hello world", "goodbye world"]),
"vector": [np.random.randn(10), np.random.randn(10)],
}
)
func = MockEmbeddingFunction(source_column="text", vector_column="vector")
metadata = registry.get_table_metadata([func])
table = table.replace_schema_metadata(metadata)
# Write it to disk
lance.write_dataset(table, tmp_path / "test.lance")
# Load this back
ds = lance.dataset(tmp_path / "test.lance")
# can we get the serialized version back out?
functions = registry.parse_functions(ds.schema.metadata)
func = functions["vector"]
actual = func("hello world")
# We create an instance
expected_func = MockEmbeddingFunction(source_column="text", vector_column="vector")
# And we make sure we can call it
expected = expected_func("hello world")
assert np.allclose(actual, expected)

View File

@@ -21,7 +21,7 @@ import pytest
from lancedb.db import LanceDBConnection from lancedb.db import LanceDBConnection
from lancedb.pydantic import LanceModel, vector from lancedb.pydantic import LanceModel, vector
from lancedb.query import LanceQueryBuilder, Query from lancedb.query import LanceVectorQueryBuilder, Query
from lancedb.table import LanceTable from lancedb.table import LanceTable
@@ -72,7 +72,7 @@ def test_cast(table):
str_field: str str_field: str
float_field: float float_field: float
q = LanceQueryBuilder(table, [0, 0], "vector").limit(1) q = LanceVectorQueryBuilder(table, [0, 0], "vector").limit(1)
results = q.to_pydantic(TestModel) results = q.to_pydantic(TestModel)
assert len(results) == 1 assert len(results) == 1
r0 = results[0] r0 = results[0]
@@ -84,13 +84,15 @@ def test_cast(table):
def test_query_builder(table): def test_query_builder(table):
df = LanceQueryBuilder(table, [0, 0], "vector").limit(1).select(["id"]).to_df() df = (
LanceVectorQueryBuilder(table, [0, 0], "vector").limit(1).select(["id"]).to_df()
)
assert df["id"].values[0] == 1 assert df["id"].values[0] == 1
assert all(df["vector"].values[0] == [1, 2]) assert all(df["vector"].values[0] == [1, 2])
def test_query_builder_with_filter(table): def test_query_builder_with_filter(table):
df = LanceQueryBuilder(table, [0, 0], "vector").where("id = 2").to_df() df = LanceVectorQueryBuilder(table, [0, 0], "vector").where("id = 2").to_df()
assert df["id"].values[0] == 2 assert df["id"].values[0] == 2
assert all(df["vector"].values[0] == [3, 4]) assert all(df["vector"].values[0] == [3, 4])
@@ -98,12 +100,14 @@ def test_query_builder_with_filter(table):
def test_query_builder_with_metric(table): def test_query_builder_with_metric(table):
query = [4, 8] query = [4, 8]
vector_column_name = "vector" vector_column_name = "vector"
df_default = LanceQueryBuilder(table, query, vector_column_name).to_df() df_default = LanceVectorQueryBuilder(table, query, vector_column_name).to_df()
df_l2 = LanceQueryBuilder(table, query, vector_column_name).metric("L2").to_df() df_l2 = (
LanceVectorQueryBuilder(table, query, vector_column_name).metric("L2").to_df()
)
tm.assert_frame_equal(df_default, df_l2) tm.assert_frame_equal(df_default, df_l2)
df_cosine = ( df_cosine = (
LanceQueryBuilder(table, query, vector_column_name) LanceVectorQueryBuilder(table, query, vector_column_name)
.metric("cosine") .metric("cosine")
.limit(1) .limit(1)
.to_df() .to_df()
@@ -120,7 +124,7 @@ def test_query_builder_with_different_vector_column():
query = [4, 8] query = [4, 8]
vector_column_name = "foo_vector" vector_column_name = "foo_vector"
builder = ( builder = (
LanceQueryBuilder(table, query, vector_column_name) LanceVectorQueryBuilder(table, query, vector_column_name)
.metric("cosine") .metric("cosine")
.where("b < 10") .where("b < 10")
.select(["b"]) .select(["b"])

View File

@@ -16,11 +16,13 @@ from pathlib import Path
from typing import List from typing import List
from unittest.mock import PropertyMock, patch from unittest.mock import PropertyMock, patch
import lance
import numpy as np import numpy as np
import pandas as pd import pandas as pd
import pyarrow as pa import pyarrow as pa
import pytest import pytest
from lancedb.conftest import MockEmbeddingFunction
from lancedb.db import LanceDBConnection from lancedb.db import LanceDBConnection
from lancedb.pydantic import LanceModel, vector from lancedb.pydantic import LanceModel, vector
from lancedb.table import LanceTable from lancedb.table import LanceTable
@@ -177,16 +179,16 @@ def test_versioning(db):
], ],
) )
assert len(table.list_versions()) == 1
assert table.version == 1
table.add([{"vector": [6.3, 100.5], "item": "new", "price": 30.0}])
assert len(table.list_versions()) == 2 assert len(table.list_versions()) == 2
assert table.version == 2 assert table.version == 2
table.add([{"vector": [6.3, 100.5], "item": "new", "price": 30.0}])
assert len(table.list_versions()) == 3
assert table.version == 3
assert len(table) == 3 assert len(table) == 3
table.checkout(1) table.checkout(2)
assert table.version == 1 assert table.version == 2
assert len(table) == 2 assert len(table) == 2
@@ -268,3 +270,174 @@ def test_add_with_nans(db):
arrow_tbl = table.to_lance().to_table(filter="item == 'bar'") arrow_tbl = table.to_lance().to_table(filter="item == 'bar'")
v = arrow_tbl["vector"].to_pylist()[0] v = arrow_tbl["vector"].to_pylist()[0]
assert np.allclose(v, np.array([0.0, 0.0])) assert np.allclose(v, np.array([0.0, 0.0]))
def test_restore(db):
table = LanceTable.create(
db,
"my_table",
data=[{"vector": [1.1, 0.9], "type": "vector"}],
)
table.add([{"vector": [0.5, 0.2], "type": "vector"}])
table.restore(2)
assert len(table.list_versions()) == 4
assert len(table) == 1
expected = table.to_arrow()
table.checkout(2)
table.restore()
assert len(table.list_versions()) == 5
assert table.to_arrow() == expected
table.restore(5) # latest version should be no-op
assert len(table.list_versions()) == 5
with pytest.raises(ValueError):
table.restore(6)
with pytest.raises(ValueError):
table.restore(0)
def test_merge(db, tmp_path):
table = LanceTable.create(
db,
"my_table",
data=[{"vector": [1.1, 0.9], "id": 0}, {"vector": [1.2, 1.9], "id": 1}],
)
other_table = pa.table({"document": ["foo", "bar"], "id": [0, 1]})
table.merge(other_table, left_on="id")
assert len(table.list_versions()) == 3
expected = pa.table(
{"vector": [[1.1, 0.9], [1.2, 1.9]], "id": [0, 1], "document": ["foo", "bar"]},
schema=table.schema,
)
assert table.to_arrow() == expected
other_dataset = lance.write_dataset(other_table, tmp_path / "other_table.lance")
table.restore(1)
table.merge(other_dataset, left_on="id")
def test_delete(db):
table = LanceTable.create(
db,
"my_table",
data=[{"vector": [1.1, 0.9], "id": 0}, {"vector": [1.2, 1.9], "id": 1}],
)
assert len(table) == 2
assert len(table.list_versions()) == 2
table.delete("id=0")
assert len(table.list_versions()) == 3
assert table.version == 3
assert len(table) == 1
assert table.to_pandas()["id"].tolist() == [1]
def test_update(db):
table = LanceTable.create(
db,
"my_table",
data=[{"vector": [1.1, 0.9], "id": 0}, {"vector": [1.2, 1.9], "id": 1}],
)
assert len(table) == 2
assert len(table.list_versions()) == 2
table.update(where="id=0", values={"vector": [1.1, 1.1]})
assert len(table.list_versions()) == 4
assert table.version == 4
assert len(table) == 2
v = table.to_arrow()["vector"].combine_chunks()
v = v.values.to_numpy().reshape(2, 2)
assert np.allclose(v, np.array([[1.2, 1.9], [1.1, 1.1]]))
def test_create_with_embedding_function(db):
class MyTable(LanceModel):
text: str
vector: vector(10)
func = MockEmbeddingFunction(source_column="text", vector_column="vector")
texts = ["hello world", "goodbye world", "foo bar baz fizz buzz"]
df = pd.DataFrame({"text": texts, "vector": func(texts)})
table = LanceTable.create(
db,
"my_table",
schema=MyTable,
embedding_functions=[func],
)
table.add(df)
query_str = "hi how are you?"
query_vector = func(query_str)[0]
expected = table.search(query_vector).limit(2).to_arrow()
actual = table.search(query_str).limit(2).to_arrow()
assert actual == expected
def test_add_with_embedding_function(db):
class MyTable(LanceModel):
text: str
vector: vector(10)
func = MockEmbeddingFunction(source_column="text", vector_column="vector")
table = LanceTable.create(
db,
"my_table",
schema=MyTable,
embedding_functions=[func],
)
texts = ["hello world", "goodbye world", "foo bar baz fizz buzz"]
df = pd.DataFrame({"text": texts})
table.add(df)
texts = ["the quick brown fox", "jumped over the lazy dog"]
table.add([{"text": t} for t in texts])
query_str = "hi how are you?"
query_vector = func(query_str)[0]
expected = table.search(query_vector).limit(2).to_arrow()
actual = table.search(query_str).limit(2).to_arrow()
assert actual == expected
def test_multiple_vector_columns(db):
class MyTable(LanceModel):
text: str
vector1: vector(10)
vector2: vector(10)
table = LanceTable.create(
db,
"my_table",
schema=MyTable,
)
v1 = np.random.randn(10)
v2 = np.random.randn(10)
data = [
{"vector1": v1, "vector2": v2, "text": "foo"},
{"vector1": v2, "vector2": v1, "text": "bar"},
]
df = pd.DataFrame(data)
table.add(df)
q = np.random.randn(10)
result1 = table.search(q, vector_column_name="vector1").limit(1).to_df()
result2 = table.search(q, vector_column_name="vector2").limit(1).to_df()
assert result1["text"].iloc[0] != result2["text"].iloc[0]
def test_empty_query(db):
table = LanceTable.create(
db,
"my_table",
data=[{"text": "foo", "id": 0}, {"text": "bar", "id": 1}],
)
df = table.search().select(["id"]).where("text='bar'").limit(1).to_df()
val = df.id.iloc[0]
assert val == 1

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "vectordb-node" name = "vectordb-node"
version = "0.1.19" version = "0.2.4"
description = "Serverless, low-latency vector database for AI applications" description = "Serverless, low-latency vector database for AI applications"
license = "Apache-2.0" license = "Apache-2.0"
edition = "2018" edition = "2018"

View File

@@ -14,60 +14,33 @@
use std::io::Cursor; use std::io::Cursor;
use std::ops::Deref; use std::ops::Deref;
use std::sync::Arc;
use arrow_array::cast::as_list_array; use arrow_array::RecordBatch;
use arrow_array::{Array, ArrayRef, FixedSizeListArray, RecordBatch};
use arrow_ipc::reader::FileReader; use arrow_ipc::reader::FileReader;
use arrow_ipc::writer::FileWriter; use arrow_ipc::writer::FileWriter;
use arrow_schema::{DataType, Field, Schema}; use arrow_schema::SchemaRef;
use lance::arrow::{FixedSizeListArrayExt, RecordBatchExt};
use vectordb::table::VECTOR_COLUMN_NAME; use vectordb::table::VECTOR_COLUMN_NAME;
use crate::error::{MissingColumnSnafu, Result}; use crate::error::{MissingColumnSnafu, Result};
use snafu::prelude::*; use snafu::prelude::*;
pub(crate) fn convert_record_batch(record_batch: RecordBatch) -> Result<RecordBatch> { fn validate_vector_column(record_batch: &RecordBatch) -> Result<()> {
let column = get_column(VECTOR_COLUMN_NAME, &record_batch)?;
// TODO: we should just consume the underlying js buffer in the future instead of this arrow around a bunch of times
let arr = as_list_array(column.as_ref());
let list_size = arr.values().len() / record_batch.num_rows();
let r = FixedSizeListArray::try_new_from_values(arr.values().to_owned(), list_size as i32)?;
let schema = Arc::new(Schema::new(vec![Field::new(
VECTOR_COLUMN_NAME,
DataType::FixedSizeList(
Arc::new(Field::new("item", DataType::Float32, true)),
list_size as i32,
),
true,
)]));
let mut new_batch = RecordBatch::try_new(schema.clone(), vec![Arc::new(r)])?;
if record_batch.num_columns() > 1 {
let rb = record_batch.drop_column(VECTOR_COLUMN_NAME)?;
new_batch = new_batch.merge(&rb)?;
}
Ok(new_batch)
}
fn get_column(column_name: &str, record_batch: &RecordBatch) -> Result<ArrayRef> {
record_batch record_batch
.column_by_name(column_name) .column_by_name(VECTOR_COLUMN_NAME)
.cloned() .map(|_| ())
.context(MissingColumnSnafu { name: column_name }) .context(MissingColumnSnafu { name: VECTOR_COLUMN_NAME })
} }
pub(crate) fn arrow_buffer_to_record_batch(slice: &[u8]) -> Result<Vec<RecordBatch>> { pub(crate) fn arrow_buffer_to_record_batch(slice: &[u8]) -> Result<(Vec<RecordBatch>, SchemaRef)> {
let mut batches: Vec<RecordBatch> = Vec::new(); let mut batches: Vec<RecordBatch> = Vec::new();
let file_reader = FileReader::try_new(Cursor::new(slice), None)?; let file_reader = FileReader::try_new(Cursor::new(slice), None)?;
let schema = file_reader.schema().clone();
for b in file_reader { for b in file_reader {
let record_batch = convert_record_batch(b?)?; let record_batch = b?;
validate_vector_column(&record_batch)?;
batches.push(record_batch); batches.push(record_batch);
} }
Ok(batches) Ok((batches, schema))
} }
pub(crate) fn record_batch_to_buffer(batches: Vec<RecordBatch>) -> Result<Vec<u8>> { pub(crate) fn record_batch_to_buffer(batches: Vec<RecordBatch>) -> Result<Vec<u8>> {

View File

@@ -121,26 +121,28 @@ fn database_table_names(mut cx: FunctionContext) -> JsResult<JsPromise> {
Ok(promise) Ok(promise)
} }
fn get_aws_creds<T>( /// Get AWS creds arguments from the context
/// Consumes 3 arguments
fn get_aws_creds(
cx: &mut FunctionContext, cx: &mut FunctionContext,
arg_starting_location: i32, arg_starting_location: i32,
) -> Result<Option<AwsCredentialProvider>, NeonResult<T>> { ) -> NeonResult<Option<AwsCredentialProvider>> {
let secret_key_id = cx let secret_key_id = cx
.argument_opt(arg_starting_location) .argument_opt(arg_starting_location)
.map(|arg| arg.downcast_or_throw::<JsString, FunctionContext>(cx).ok()) .filter(|arg| arg.is_a::<JsString, _>(cx))
.flatten() .and_then(|arg| arg.downcast_or_throw::<JsString, FunctionContext>(cx).ok())
.map(|v| v.value(cx)); .map(|v| v.value(cx));
let secret_key = cx let secret_key = cx
.argument_opt(arg_starting_location + 1) .argument_opt(arg_starting_location + 1)
.map(|arg| arg.downcast_or_throw::<JsString, FunctionContext>(cx).ok()) .filter(|arg| arg.is_a::<JsString, _>(cx))
.flatten() .and_then(|arg| arg.downcast_or_throw::<JsString, FunctionContext>(cx).ok())
.map(|v| v.value(cx)); .map(|v| v.value(cx));
let temp_token = cx let temp_token = cx
.argument_opt(arg_starting_location + 2) .argument_opt(arg_starting_location + 2)
.map(|arg| arg.downcast_or_throw::<JsString, FunctionContext>(cx).ok()) .filter(|arg| arg.is_a::<JsString, _>(cx))
.flatten() .and_then(|arg| arg.downcast_or_throw::<JsString, FunctionContext>(cx).ok())
.map(|v| v.value(cx)); .map(|v| v.value(cx));
match (secret_key_id, secret_key, temp_token) { match (secret_key_id, secret_key, temp_token) {
@@ -152,7 +154,21 @@ fn get_aws_creds<T>(
}), }),
))), ))),
(None, None, None) => Ok(None), (None, None, None) => Ok(None),
_ => Err(cx.throw_error("Invalid credentials configuration")), _ => cx.throw_error("Invalid credentials configuration"),
}
}
/// Get AWS region arguments from the context
fn get_aws_region(cx: &mut FunctionContext, arg_location: i32) -> NeonResult<Option<String>> {
let region = cx
.argument_opt(arg_location)
.filter(|arg| arg.is_a::<JsString, _>(cx))
.map(|arg| arg.downcast_or_throw::<JsString, FunctionContext>(cx));
match region {
Some(Ok(region)) => Ok(Some(region.value(cx))),
None => Ok(None),
Some(Err(e)) => Err(e),
} }
} }
@@ -162,14 +178,14 @@ fn database_open_table(mut cx: FunctionContext) -> JsResult<JsPromise> {
.downcast_or_throw::<JsBox<JsDatabase>, _>(&mut cx)?; .downcast_or_throw::<JsBox<JsDatabase>, _>(&mut cx)?;
let table_name = cx.argument::<JsString>(0)?.value(&mut cx); let table_name = cx.argument::<JsString>(0)?.value(&mut cx);
let aws_creds = match get_aws_creds(&mut cx, 1) { let aws_creds = get_aws_creds(&mut cx, 1)?;
Ok(creds) => creds,
Err(err) => return err, let aws_region = get_aws_region(&mut cx, 4)?;
};
let params = ReadParams { let params = ReadParams {
store_options: Some(ObjectStoreParams { store_options: Some(ObjectStoreParams {
aws_credentials: aws_creds, aws_credentials: aws_creds,
aws_region,
..ObjectStoreParams::default() ..ObjectStoreParams::default()
}), }),
..ReadParams::default() ..ReadParams::default()

View File

@@ -7,6 +7,7 @@ use lance::index::vector::MetricType;
use neon::context::FunctionContext; use neon::context::FunctionContext;
use neon::handle::Handle; use neon::handle::Handle;
use neon::prelude::*; use neon::prelude::*;
use neon::types::buffer::TypedArray;
use crate::arrow::record_batch_to_buffer; use crate::arrow::record_batch_to_buffer;
use crate::error::ResultExt; use crate::error::ResultExt;
@@ -47,6 +48,11 @@ impl JsQuery {
.map(|s| s.value(&mut cx)) .map(|s| s.value(&mut cx))
.map(|s| MetricType::try_from(s.as_str()).unwrap()); .map(|s| MetricType::try_from(s.as_str()).unwrap());
let is_electron = cx
.argument::<JsBoolean>(1)
.or_throw(&mut cx)?
.value(&mut cx);
let rt = runtime(&mut cx)?; let rt = runtime(&mut cx)?;
let (deferred, promise) = cx.promise(); let (deferred, promise) = cx.promise();
@@ -76,9 +82,26 @@ impl JsQuery {
deferred.settle_with(&channel, move |mut cx| { deferred.settle_with(&channel, move |mut cx| {
let results = results.or_throw(&mut cx)?; let results = results.or_throw(&mut cx)?;
let buffer = record_batch_to_buffer(results).or_throw(&mut cx)?; let buffer = record_batch_to_buffer(results).or_throw(&mut cx)?;
Ok(JsBuffer::external(&mut cx, buffer)) Self::new_js_buffer(buffer, &mut cx, is_electron)
}); });
}); });
Ok(promise) Ok(promise)
} }
// Creates a new JsBuffer from a rust buffer with a special logic for electron
fn new_js_buffer<'a>(
buffer: Vec<u8>,
cx: &mut TaskContext<'a>,
is_electron: bool,
) -> NeonResult<Handle<'a, JsBuffer>> {
if is_electron {
// Electron does not support `external`: https://github.com/neon-bindings/neon/pull/937
let mut js_buffer = JsBuffer::new(cx, buffer.len()).or_throw(cx)?;
let buffer_data = js_buffer.as_mut_slice(cx);
buffer_data.copy_from_slice(buffer.as_slice());
Ok(js_buffer)
} else {
Ok(JsBuffer::external(cx, buffer))
}
}
} }

View File

@@ -22,7 +22,7 @@ use neon::types::buffer::TypedArray;
use vectordb::Table; use vectordb::Table;
use crate::error::ResultExt; use crate::error::ResultExt;
use crate::{get_aws_creds, runtime, JsDatabase}; use crate::{get_aws_creds, get_aws_region, runtime, JsDatabase};
pub(crate) struct JsTable { pub(crate) struct JsTable {
pub table: Table, pub table: Table,
@@ -43,8 +43,7 @@ impl JsTable {
.downcast_or_throw::<JsBox<JsDatabase>, _>(&mut cx)?; .downcast_or_throw::<JsBox<JsDatabase>, _>(&mut cx)?;
let table_name = cx.argument::<JsString>(0)?.value(&mut cx); let table_name = cx.argument::<JsString>(0)?.value(&mut cx);
let buffer = cx.argument::<JsBuffer>(1)?; let buffer = cx.argument::<JsBuffer>(1)?;
let batches = arrow_buffer_to_record_batch(buffer.as_slice(&mut cx)).or_throw(&mut cx)?; let (batches, schema) = arrow_buffer_to_record_batch(buffer.as_slice(&mut cx)).or_throw(&mut cx)?;
let schema = batches[0].schema();
// Write mode // Write mode
let mode = match cx.argument::<JsString>(2)?.value(&mut cx).as_str() { let mode = match cx.argument::<JsString>(2)?.value(&mut cx).as_str() {
@@ -62,14 +61,13 @@ impl JsTable {
let (deferred, promise) = cx.promise(); let (deferred, promise) = cx.promise();
let database = db.database.clone(); let database = db.database.clone();
let aws_creds = match get_aws_creds(&mut cx, 3) { let aws_creds = get_aws_creds(&mut cx, 3)?;
Ok(creds) => creds, let aws_region = get_aws_region(&mut cx, 6)?;
Err(err) => return err,
};
let params = WriteParams { let params = WriteParams {
store_params: Some(ObjectStoreParams { store_params: Some(ObjectStoreParams {
aws_credentials: aws_creds, aws_credentials: aws_creds,
aws_region,
..ObjectStoreParams::default() ..ObjectStoreParams::default()
}), }),
mode: mode, mode: mode,
@@ -94,10 +92,7 @@ impl JsTable {
let js_table = cx.this().downcast_or_throw::<JsBox<JsTable>, _>(&mut cx)?; let js_table = cx.this().downcast_or_throw::<JsBox<JsTable>, _>(&mut cx)?;
let buffer = cx.argument::<JsBuffer>(0)?; let buffer = cx.argument::<JsBuffer>(0)?;
let write_mode = cx.argument::<JsString>(1)?.value(&mut cx); let write_mode = cx.argument::<JsString>(1)?.value(&mut cx);
let (batches, schema) = arrow_buffer_to_record_batch(buffer.as_slice(&mut cx)).or_throw(&mut cx)?;
let batches = arrow_buffer_to_record_batch(buffer.as_slice(&mut cx)).or_throw(&mut cx)?;
let schema = batches[0].schema();
let rt = runtime(&mut cx)?; let rt = runtime(&mut cx)?;
let channel = cx.channel(); let channel = cx.channel();
let mut table = js_table.table.clone(); let mut table = js_table.table.clone();
@@ -109,14 +104,13 @@ impl JsTable {
"overwrite" => WriteMode::Overwrite, "overwrite" => WriteMode::Overwrite,
s => return cx.throw_error(format!("invalid write mode {}", s)), s => return cx.throw_error(format!("invalid write mode {}", s)),
}; };
let aws_creds = match get_aws_creds(&mut cx, 2) { let aws_creds = get_aws_creds(&mut cx, 2)?;
Ok(creds) => creds, let aws_region = get_aws_region(&mut cx, 5)?;
Err(err) => return err,
};
let params = WriteParams { let params = WriteParams {
store_params: Some(ObjectStoreParams { store_params: Some(ObjectStoreParams {
aws_credentials: aws_creds, aws_credentials: aws_creds,
aws_region,
..ObjectStoreParams::default() ..ObjectStoreParams::default()
}), }),
mode: write_mode, mode: write_mode,

View File

@@ -1,10 +1,12 @@
[package] [package]
name = "vectordb" name = "vectordb"
version = "0.1.19" version = "0.2.4"
edition = "2021" edition = "2021"
description = "Serverless, low-latency vector database for AI applications" description = "LanceDB: A serverless, low-latency vector database for AI applications"
license = "Apache-2.0" license = "Apache-2.0"
repository = "https://github.com/lancedb/lancedb" repository = "https://github.com/lancedb/lancedb"
keywords = ["lancedb", "lance", "database", "search"]
categories = ["database-implementations"]
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies] [dependencies]

3
rust/vectordb/README.md Normal file
View File

@@ -0,0 +1,3 @@
# LanceDB Rust
Rust client for LanceDB, a serverless vector database. Read more at: https://lancedb.com/