Compare commits

...

11 Commits

Author SHA1 Message Date
Lance Release
62c5117def [python] Bump version: 0.5.5 → 0.5.6 2024-02-20 20:45:02 +00:00
Bert
22c196b3e3 lance 0.9.18 (#1000) 2024-02-19 15:20:34 -05:00
Johannes Kolbe
1f4ac71fa3 apply fixes for notebook (#989) 2024-02-19 15:36:52 +05:30
Ayush Chaurasia
b5aad2d856 docs: Add meta tag for image preview (#988)
I think this should work. Need to deploy it to be sure as it can be
tested locally. Can be tested here.

2 things about this solution:
* All pages have a same meta tag, i.e, lancedb banner
* If needed, we can automatically use the first image of each page and
generate meta tags using the ultralytics mkdocs plugin that we did for
this purpose - https://github.com/ultralytics/mkdocs
2024-02-19 14:07:31 +05:30
Chang She
ca6f55b160 doc: update navigation links for embedding functions (#986) 2024-02-17 12:12:11 -08:00
Chang She
6f8cf1e068 doc: improve embedding functions documentation (#983)
Got some user feedback that the `implicit` / `explicit` distinction is
confusing.
Instead I was thinking we would just deprecate the `with_embeddings` API
and then organize working with embeddings into 3 buckets:

1. manually generate embeddings
2. use a provided embedding function
3. define your own custom embedding function
2024-02-17 10:39:28 -08:00
Chang She
e0277383a5 feat(python): add optional threadpool for batch requests (#981)
Currently if a batch request is given to the remote API, each query is
sent sequentially. We should allow the user to specify a threadpool.
2024-02-16 20:22:22 -08:00
Will Jones
d6b408e26f fix: use static C runtime on Windows (#979)
We depend on C static runtime, but not all Windows machines have that.
So might be worth statically linking it.

https://github.com/reorproject/reor/issues/36#issuecomment-1948876463
2024-02-16 15:54:12 -08:00
Will Jones
2447372c1f docs: show DuckDB with dataset, not table (#974)
Using datasets is preferred way to allow filter and projection pushdown,
as well as aggregated larger-than-memory tables.
2024-02-16 09:18:18 -08:00
Ayush Chaurasia
f0298d8372 docs: Minimal reranking evaluation benchmarks (#977) 2024-02-15 22:16:53 +05:30
Lance Release
54693e6bec Updating package-lock.json 2024-02-14 23:20:59 +00:00
23 changed files with 423 additions and 218 deletions

View File

@@ -33,3 +33,8 @@ rustflags = ["-C", "target-cpu=haswell", "-C", "target-feature=+avx2,+fma,+f16c"
[target.aarch64-apple-darwin] [target.aarch64-apple-darwin]
rustflags = ["-C", "target-cpu=apple-m1", "-C", "target-feature=+neon,+fp16,+fhm,+dotprod"] rustflags = ["-C", "target-cpu=apple-m1", "-C", "target-feature=+neon,+fp16,+fhm,+dotprod"]
# Not all Windows systems have the C runtime installed, so this avoids library
# not found errors on systems that are missing it.
[target.x86_64-pc-windows-msvc]
rustflags = ["-Ctarget-feature=+crt-static"]

View File

@@ -14,10 +14,10 @@ keywords = ["lancedb", "lance", "database", "vector", "search"]
categories = ["database-implementations"] categories = ["database-implementations"]
[workspace.dependencies] [workspace.dependencies]
lance = { "version" = "=0.9.16", "features" = ["dynamodb"] } lance = { "version" = "=0.9.18", "features" = ["dynamodb"] }
lance-index = { "version" = "=0.9.16" } lance-index = { "version" = "=0.9.18" }
lance-linalg = { "version" = "=0.9.16" } lance-linalg = { "version" = "=0.9.18" }
lance-testing = { "version" = "=0.9.16" } lance-testing = { "version" = "=0.9.18" }
# Note that this one does not include pyarrow # Note that this one does not include pyarrow
arrow = { version = "50.0", optional = false } arrow = { version = "50.0", optional = false }
arrow-array = "50.0" arrow-array = "50.0"

View File

@@ -92,16 +92,16 @@ nav:
- Full-text search: fts.md - Full-text search: fts.md
- Hybrid search: - Hybrid search:
- Overview: hybrid_search/hybrid_search.md - Overview: hybrid_search/hybrid_search.md
- Comparing Rerankers: hybrid_search/eval.md
- Airbnb financial data example: notebooks/hybrid_search.ipynb - Airbnb financial data example: notebooks/hybrid_search.ipynb
- Filtering: sql.md - Filtering: sql.md
- Versioning & Reproducibility: notebooks/reproducibility.ipynb - Versioning & Reproducibility: notebooks/reproducibility.ipynb
- Configuring Storage: guides/storage.md - Configuring Storage: guides/storage.md
- 🧬 Managing embeddings: - 🧬 Managing embeddings:
- Overview: embeddings/index.md - Overview: embeddings/index.md
- Explicit management: embeddings/embedding_explicit.md - Embedding functions: embeddings/embedding_functions.md
- Implicit management: embeddings/embedding_functions.md - Available models: embeddings/default_embedding_functions.md
- Available Functions: embeddings/default_embedding_functions.md - User-defined embedding functions: embeddings/custom_embedding_function.md
- Custom Embedding Functions: embeddings/api.md
- "Example: Multi-lingual semantic search": notebooks/multi_lingual_example.ipynb - "Example: Multi-lingual semantic search": notebooks/multi_lingual_example.ipynb
- "Example: MultiModal CLIP Embeddings": notebooks/DisappearingEmbeddingFunction.ipynb - "Example: MultiModal CLIP Embeddings": notebooks/DisappearingEmbeddingFunction.ipynb
- 🔌 Integrations: - 🔌 Integrations:
@@ -156,16 +156,16 @@ nav:
- Full-text search: fts.md - Full-text search: fts.md
- Hybrid search: - Hybrid search:
- Overview: hybrid_search/hybrid_search.md - Overview: hybrid_search/hybrid_search.md
- Comparing Rerankers: hybrid_search/eval.md
- Airbnb financial data example: notebooks/hybrid_search.ipynb - Airbnb financial data example: notebooks/hybrid_search.ipynb
- Filtering: sql.md - Filtering: sql.md
- Versioning & Reproducibility: notebooks/reproducibility.ipynb - Versioning & Reproducibility: notebooks/reproducibility.ipynb
- Configuring Storage: guides/storage.md - Configuring Storage: guides/storage.md
- Managing Embeddings: - Managing Embeddings:
- Overview: embeddings/index.md - Overview: embeddings/index.md
- Explicit management: embeddings/embedding_explicit.md - Embedding functions: embeddings/embedding_functions.md
- Implicit management: embeddings/embedding_functions.md - Available models: embeddings/default_embedding_functions.md
- Available Functions: embeddings/default_embedding_functions.md - User-defined embedding functions: embeddings/custom_embedding_function.md
- Custom Embedding Functions: embeddings/api.md
- "Example: Multi-lingual semantic search": notebooks/multi_lingual_example.ipynb - "Example: Multi-lingual semantic search": notebooks/multi_lingual_example.ipynb
- "Example: MultiModal CLIP Embeddings": notebooks/DisappearingEmbeddingFunction.ipynb - "Example: MultiModal CLIP Embeddings": notebooks/DisappearingEmbeddingFunction.ipynb
- Integrations: - Integrations:
@@ -206,6 +206,7 @@ extra_css:
extra_javascript: extra_javascript:
- "extra_js/init_ask_ai_widget.js" - "extra_js/init_ask_ai_widget.js"
- "extra_js/meta_tag.js"
extra: extra:
analytics: analytics:

View File

@@ -1,141 +0,0 @@
In this workflow, you define your own embedding function and pass it as a callable to LanceDB, invoking it in your code to generate the embeddings. Let's look at some examples.
### Hugging Face
!!! note
Currently, the Hugging Face method is only supported in the Python SDK.
=== "Python"
The most popular open source option is to use the [sentence-transformers](https://www.sbert.net/)
library, which can be installed via pip.
```bash
pip install sentence-transformers
```
The example below shows how to use the `paraphrase-albert-small-v2` model to generate embeddings
for a given document.
```python
from sentence_transformers import SentenceTransformer
name="paraphrase-albert-small-v2"
model = SentenceTransformer(name)
# used for both training and querying
def embed_func(batch):
return [model.encode(sentence) for sentence in batch]
```
### OpenAI
Another popular alternative is to use an external API like OpenAI's [embeddings API](https://platform.openai.com/docs/guides/embeddings/what-are-embeddings).
=== "Python"
```python
import openai
import os
# Configuring the environment variable OPENAI_API_KEY
if "OPENAI_API_KEY" not in os.environ:
# OR set the key here as a variable
openai.api_key = "sk-..."
# verify that the API key is working
assert len(openai.Model.list()["data"]) > 0
def embed_func(c):
rs = openai.Embedding.create(input=c, engine="text-embedding-ada-002")
return [record["embedding"] for record in rs["data"]]
```
=== "JavaScript"
```javascript
const lancedb = require("vectordb");
// You need to provide an OpenAI API key
const apiKey = "sk-..."
// The embedding function will create embeddings for the 'text' column
const embedding = new lancedb.OpenAIEmbeddingFunction('text', apiKey)
```
## Applying an embedding function to data
=== "Python"
Using an embedding function, you can apply it to raw data
to generate embeddings for each record.
Say you have a pandas DataFrame with a `text` column that you want embedded,
you can use the `with_embeddings` function to generate embeddings and add them to
an existing table.
```python
import pandas as pd
from lancedb.embeddings import with_embeddings
df = pd.DataFrame(
[
{"text": "pepperoni"},
{"text": "pineapple"}
]
)
data = with_embeddings(embed_func, df)
# The output is used to create / append to a table
# db.create_table("my_table", data=data)
```
If your data is in a different column, you can specify the `column` kwarg to `with_embeddings`.
By default, LanceDB calls the function with batches of 1000 rows. This can be configured
using the `batch_size` parameter to `with_embeddings`.
LanceDB automatically wraps the function with retry and rate-limit logic to ensure the OpenAI
API call is reliable.
=== "JavaScript"
Using an embedding function, you can apply it to raw data
to generate embeddings for each record.
Simply pass the embedding function created above and LanceDB will use it to generate
embeddings for your data.
```javascript
const db = await lancedb.connect("data/sample-lancedb");
const data = [
{ text: "pepperoni"},
{ text: "pineapple"}
]
const table = await db.createTable("vectors", data, embedding)
```
## Querying using an embedding function
!!! warning
At query time, you **must** use the same embedding function you used to vectorize your data.
If you use a different embedding function, the embeddings will not reside in the same vector
space and the results will be nonsensical.
=== "Python"
```python
query = "What's the best pizza topping?"
query_vector = embed_func([query])[0]
results = (
tbl.search(query_vector)
.limit(10)
.to_pandas()
)
```
The above snippet returns a pandas DataFrame with the 10 closest vectors to the query.
=== "JavaScript"
```javascript
const results = await table
.search("What's the best pizza topping?")
.limit(10)
.execute()
```
The above snippet returns an array of records with the top 10 nearest neighbors to the query.

View File

@@ -3,61 +3,126 @@ Representing multi-modal data as vector embeddings is becoming a standard practi
For this purpose, LanceDB introduces an **embedding functions API**, that allow you simply set up once, during the configuration stage of your project. After this, the table remembers it, effectively making the embedding functions *disappear in the background* so you don't have to worry about manually passing callables, and instead, simply focus on the rest of your data engineering pipeline. For this purpose, LanceDB introduces an **embedding functions API**, that allow you simply set up once, during the configuration stage of your project. After this, the table remembers it, effectively making the embedding functions *disappear in the background* so you don't have to worry about manually passing callables, and instead, simply focus on the rest of your data engineering pipeline.
!!! warning !!! warning
Using the implicit embeddings management approach means that you can forget about the manually passing around embedding Using the embedding function registry means that you don't have to explicitly generate the embeddings yourself.
functions in your code, as long as you don't intend to change it at a later time. If your embedding function changes, However, if your embedding function changes, you'll have to re-configure your table with the new embedding function
you'll have to re-configure your table with the new embedding function and regenerate the embeddings. and regenerate the embeddings. In the future, we plan to support the ability to change the embedding function via
table metadata and have LanceDB automatically take care of regenerating the embeddings.
## 1. Define the embedding function ## 1. Define the embedding function
We have some pre-defined embedding functions in the global registry, with more coming soon. Here's let's an implementation of CLIP as example.
```
registry = EmbeddingFunctionRegistry.get_instance()
clip = registry.get("open-clip").create()
``` === "Python"
You can also define your own embedding function by implementing the `EmbeddingFunction` abstract base interface. It subclasses Pydantic Model which can be utilized to write complex schemas simply as we'll see next! In the LanceDB python SDK, we define a global embedding function registry with
many different embedding models and even more coming soon.
Here's let's an implementation of CLIP as example.
```python
from lancedb.embeddings import get_registry
registry = get_registry()
clip = registry.get("open-clip").create()
```
You can also define your own embedding function by implementing the `EmbeddingFunction`
abstract base interface. It subclasses Pydantic Model which can be utilized to write complex schemas simply as we'll see next!
=== "JavaScript""
In the TypeScript SDK, the choices are more limited. For now, only the OpenAI
embedding function is available.
```javascript
const lancedb = require("vectordb");
// You need to provide an OpenAI API key
const apiKey = "sk-..."
// The embedding function will create embeddings for the 'text' column
const embedding = new lancedb.OpenAIEmbeddingFunction('text', apiKey)
```
## 2. Define the data model or schema ## 2. Define the data model or schema
The embedding function defined above abstracts away all the details about the models and dimensions required to define the schema. You can simply set a field as **source** or **vector** column. Here's how:
```python === "Python"
class Pets(LanceModel): The embedding function defined above abstracts away all the details about the models and dimensions required to define the schema. You can simply set a field as **source** or **vector** column. Here's how:
vector: Vector(clip.ndims) = clip.VectorField()
image_uri: str = clip.SourceField()
```
`VectorField` tells LanceDB to use the clip embedding function to generate query embeddings for the `vector` column and `SourceField` ensures that when adding data, we automatically use the specified embedding function to encode `image_uri`. ```python
class Pets(LanceModel):
vector: Vector(clip.ndims) = clip.VectorField()
image_uri: str = clip.SourceField()
```
## 3. Create LanceDB table `VectorField` tells LanceDB to use the clip embedding function to generate query embeddings for the `vector` column and `SourceField` ensures that when adding data, we automatically use the specified embedding function to encode `image_uri`.
Now that we have chosen/defined our embedding function and the schema, we can create the table:
```python === "JavaScript"
db = lancedb.connect("~/lancedb")
table = db.create_table("pets", schema=Pets)
``` For the TypeScript SDK, a schema can be inferred from input data, or an explicit
Arrow schema can be provided.
That's it! We've provided all the information needed to embed the source and query inputs. We can now forget about the model and dimension details and start to build our VectorDB pipeline. ## 3. Create table and add data
## 4. Ingest lots of data and query your table Now that we have chosen/defined our embedding function and the schema,
Any new or incoming data can just be added and it'll be vectorized automatically. we can create the table and ingest data without needing to explicitly generate
the embeddings at all:
```python === "Python"
table.add([{"image_uri": u} for u in uris]) ```python
``` db = lancedb.connect("~/lancedb")
table = db.create_table("pets", schema=Pets)
Our OpenCLIP query embedding function supports querying via both text and images: table.add([{"image_uri": u} for u in uris])
```
```python === "JavaScript"
result = table.search("dog")
```
Let's query an image: ```javascript
const db = await lancedb.connect("data/sample-lancedb");
const data = [
{ text: "pepperoni"},
{ text: "pineapple"}
]
```python const table = await db.createTable("vectors", data, embedding)
p = Path("path/to/images/samoyed_100.jpg") ```
query_image = Image.open(p)
table.search(query_image) ## 4. Querying your table
``` Not only can you forget about the embeddings during ingestion, you also don't
need to worry about it when you query the table:
=== "Python"
Our OpenCLIP query embedding function supports querying via both text and images:
```python
results = (
table.search("dog")
.limit(10)
.to_pandas()
)
```
Or we can search using an image:
```python
p = Path("path/to/images/samoyed_100.jpg")
query_image = Image.open(p)
results = (
table.search(query_image)
.limit(10)
.to_pandas()
)
```
Both of the above snippet returns a pandas DataFrame with the 10 closest vectors to the query.
=== "JavaScript"
```javascript
const results = await table
.search("What's the best pizza topping?")
.limit(10)
.execute()
```
The above snippet returns an array of records with the top 10 nearest neighbors to the query.
--- ---
@@ -100,4 +165,5 @@ rs[2].image
![](../assets/dog_clip_output.png) ![](../assets/dog_clip_output.png)
Now that you have the basic idea about implicit management via embedding functions, let's dive deeper into a [custom API](./api.md) that you can use to implement your own embedding functions. Now that you have the basic idea about LanceDB embedding functions and the embedding function registry,
let's dive deeper into defining your own [custom functions](./custom_embedding_function.md).

View File

@@ -1,8 +1,14 @@
Due to the nature of vector embeddings, they can be used to represent any kind of data, from text to images to audio. This makes them a very powerful tool for machine learning practitioners. However, there's no one-size-fits-all solution for generating embeddings - there are many different libraries and APIs (both commercial and open source) that can be used to generate embeddings from structured/unstructured data. Due to the nature of vector embeddings, they can be used to represent any kind of data, from text to images to audio.
This makes them a very powerful tool for machine learning practitioners.
However, there's no one-size-fits-all solution for generating embeddings - there are many different libraries and APIs
(both commercial and open source) that can be used to generate embeddings from structured/unstructured data.
LanceDB supports 2 methods of vectorizing your raw data into embeddings. LanceDB supports 3 methods of working with embeddings.
1. **Explicit**: By manually calling LanceDB's `with_embedding` function to vectorize your data via an `embed_func` of your choice 1. You can manually generate embeddings for the data and queries. This is done outside of LanceDB.
2. **Implicit**: Allow LanceDB to embed the data and queries in the background as they come in, by using the table's `EmbeddingRegistry` information 2. You can use the built-in [embedding functions](./embedding_functions.md) to embed the data and queries in the background.
3. For python users, you can define your own [custom embedding function](./custom_embedding_function.md)
that extends the default embedding functions.
See the [explicit](embedding_explicit.md) and [implicit](embedding_functions.md) embedding sections for more details. For python users, there is also a legacy [with_embeddings API](./legacy.md).
It is retained for compatibility and will be removed in a future version.

View File

@@ -0,0 +1,99 @@
The legacy `with_embeddings` API is for Python only and is deprecated.
### Hugging Face
The most popular open source option is to use the [sentence-transformers](https://www.sbert.net/)
library, which can be installed via pip.
```bash
pip install sentence-transformers
```
The example below shows how to use the `paraphrase-albert-small-v2` model to generate embeddings
for a given document.
```python
from sentence_transformers import SentenceTransformer
name="paraphrase-albert-small-v2"
model = SentenceTransformer(name)
# used for both training and querying
def embed_func(batch):
return [model.encode(sentence) for sentence in batch]
```
### OpenAI
Another popular alternative is to use an external API like OpenAI's [embeddings API](https://platform.openai.com/docs/guides/embeddings/what-are-embeddings).
```python
import openai
import os
# Configuring the environment variable OPENAI_API_KEY
if "OPENAI_API_KEY" not in os.environ:
# OR set the key here as a variable
openai.api_key = "sk-..."
client = openai.OpenAI()
def embed_func(c):
rs = client.embeddings.create(input=c, model="text-embedding-ada-002")
return [record.embedding for record in rs["data"]]
```
## Applying an embedding function to data
Using an embedding function, you can apply it to raw data
to generate embeddings for each record.
Say you have a pandas DataFrame with a `text` column that you want embedded,
you can use the `with_embeddings` function to generate embeddings and add them to
an existing table.
```python
import pandas as pd
from lancedb.embeddings import with_embeddings
df = pd.DataFrame(
[
{"text": "pepperoni"},
{"text": "pineapple"}
]
)
data = with_embeddings(embed_func, df)
# The output is used to create / append to a table
tbl = db.create_table("my_table", data=data)
```
If your data is in a different column, you can specify the `column` kwarg to `with_embeddings`.
By default, LanceDB calls the function with batches of 1000 rows. This can be configured
using the `batch_size` parameter to `with_embeddings`.
LanceDB automatically wraps the function with retry and rate-limit logic to ensure the OpenAI
API call is reliable.
## Querying using an embedding function
!!! warning
At query time, you **must** use the same embedding function you used to vectorize your data.
If you use a different embedding function, the embeddings will not reside in the same vector
space and the results will be nonsensical.
=== "Python"
```python
query = "What's the best pizza topping?"
query_vector = embed_func([query])[0]
results = (
tbl.search(query_vector)
.limit(10)
.to_pandas()
)
```
The above snippet returns a pandas DataFrame with the 10 closest vectors to the query.

View File

@@ -1,6 +1,5 @@
import pickle import pickle
import re import re
import sys
import zipfile import zipfile
from pathlib import Path from pathlib import Path

View File

@@ -0,0 +1,6 @@
window.addEventListener('load', function() {
var meta = document.createElement('meta');
meta.setAttribute('property', 'og:image');
meta.setAttribute('content', '/assets/lancedb_and_lance.png');
document.head.appendChild(meta);
});

View File

@@ -0,0 +1,49 @@
# Hybrid Search
Hybrid Search is a broad (often misused) term. It can mean anything from combining multiple methods for searching, to applying ranking methods to better sort the results. In this blog, we use the definition of "hybrid search" to mean using a combination of keyword-based and vector search.
## The challenge of (re)ranking search results
Once you have a group of the most relevant search results from multiple search sources, you'd likely standardize the score and rank them accordingly. This process can also be seen as another independent step-reranking.
There are two approaches for reranking search results from multiple sources.
* <b>Score-based</b>: Calculate final relevance scores based on a weighted linear combination of individual search algorithm scores. Example-Weighted linear combination of semantic search & keyword-based search results.
* <b>Relevance-based</b>: Discards the existing scores and calculates the relevance of each search result-query pair. Example-Cross Encoder models
Even though there are many strategies for reranking search results, none works for all cases. Moreover, evaluating them itself is a challenge. Also, reranking can be dataset, application specific so it's hard to generalize.
### Example evaluation of hybrid search with Reranking
Here's some evaluation numbers from experiment comparing these re-rankers on about 800 queries. It is modified version of an evaluation script from [llama-index](https://github.com/run-llama/finetune-embedding/blob/main/evaluate.ipynb) that measures hit-rate at top-k.
<b> With OpenAI ada2 embedding </b>
Vector Search baseline - `0.64`
| Reranker | Top-3 | Top-5 | Top-10 |
| --- | --- | --- | --- |
| Linear Combination | `0.73` | `0.74` | `0.85` |
| Cross Encoder | `0.71` | `0.70` | `0.77` |
| Cohere | `0.81` | `0.81` | `0.85` |
| ColBERT | `0.68` | `0.68` | `0.73` |
<p>
<img src="https://github.com/AyushExel/assets/assets/15766192/d57b1780-ef27-414c-a5c3-73bee7808a45">
</p>
<b> With OpenAI embedding-v3-small </b>
Vector Search baseline - `0.59`
| Reranker | Top-3 | Top-5 | Top-10 |
| --- | --- | --- | --- |
| Linear Combination | `0.68` | `0.70` | `0.84` |
| Cross Encoder | `0.72` | `0.72` | `0.79` |
| Cohere | `0.79` | `0.79` | `0.84` |
| ColBERT | `0.70` | `0.70` | `0.76` |
<p>
<img src="https://github.com/AyushExel/assets/assets/15766192/259adfd2-6ec6-4df6-a77d-1456598970dd">
</p>
### Conclusion
The results show that the reranking methods are able to improve the search results. However, the improvement is not consistent across all rerankers. The choice of reranker depends on the dataset and the application. It is also important to note that the reranking methods are not a replacement for the search methods. They are complementary and should be used together to get the best results. The speed to recall tradeoff is also an important factor to consider when choosing the reranker.

View File

@@ -290,7 +290,7 @@
"from lancedb.pydantic import LanceModel, Vector\n", "from lancedb.pydantic import LanceModel, Vector\n",
"\n", "\n",
"class Pets(LanceModel):\n", "class Pets(LanceModel):\n",
" vector: Vector(clip.ndims) = clip.VectorField()\n", " vector: Vector(clip.ndims()) = clip.VectorField()\n",
" image_uri: str = clip.SourceField()\n", " image_uri: str = clip.SourceField()\n",
"\n", "\n",
" @property\n", " @property\n",
@@ -360,7 +360,7 @@
" table = db.create_table(\"pets\", schema=Pets)\n", " table = db.create_table(\"pets\", schema=Pets)\n",
" # use a sampling of 1000 images\n", " # use a sampling of 1000 images\n",
" p = Path(\"~/Downloads/images\").expanduser()\n", " p = Path(\"~/Downloads/images\").expanduser()\n",
" uris = [str(f) for f in p.iterdir()]\n", " uris = [str(f) for f in p.glob(\"*.jpg\")]\n",
" uris = sample(uris, 1000)\n", " uris = sample(uris, 1000)\n",
" table.add(pd.DataFrame({\"image_uri\": uris}))" " table.add(pd.DataFrame({\"image_uri\": uris}))"
] ]
@@ -543,7 +543,7 @@
], ],
"source": [ "source": [
"from PIL import Image\n", "from PIL import Image\n",
"p = Path(\"/Users/changshe/Downloads/images/samoyed_100.jpg\")\n", "p = Path(\"~/Downloads/images/samoyed_100.jpg\").expanduser()\n",
"query_image = Image.open(p)\n", "query_image = Image.open(p)\n",
"query_image" "query_image"
] ]

View File

@@ -23,10 +23,8 @@ from multiprocessing import Pool
import lance import lance
import pyarrow as pa import pyarrow as pa
from datasets import load_dataset from datasets import load_dataset
from PIL import Image
from transformers import CLIPModel, CLIPProcessor, CLIPTokenizerFast from transformers import CLIPModel, CLIPProcessor, CLIPTokenizerFast
import lancedb
MODEL_ID = "openai/clip-vit-base-patch32" MODEL_ID = "openai/clip-vit-base-patch32"

View File

@@ -1,6 +1,9 @@
# DuckDB # DuckDB
LanceDB is very well-integrated with [DuckDB](https://duckdb.org/), an in-process SQL OLAP database. This integration is done via [Arrow](https://duckdb.org/docs/guides/python/sql_on_arrow) . In Python, LanceDB tables can also be queried with [DuckDB](https://duckdb.org/), an in-process SQL OLAP database. This means you can write complex SQL queries to analyze your data in LanceDB.
This integration is done via [Apache Arrow](https://duckdb.org/docs/guides/python/sql_on_arrow), which provides zero-copy data sharing between LanceDB and DuckDB. DuckDB is capable of passing down column selections and basic filters to LanceDB, reducing the amount of data that needs to be scanned to perform your query. Finally, the integration allows streaming data from LanceDB tables, allowing you to aggregate tables that won't fit into memory. All of this uses the same mechanism described in DuckDB's blog post *[DuckDB quacks Arrow](https://duckdb.org/2021/12/03/duck-arrow.html)*.
We can demonstrate this by first installing `duckdb` and `lancedb`. We can demonstrate this by first installing `duckdb` and `lancedb`.
@@ -19,14 +22,15 @@ data = [
{"vector": [5.9, 26.5], "item": "bar", "price": 20.0} {"vector": [5.9, 26.5], "item": "bar", "price": 20.0}
] ]
table = db.create_table("pd_table", data=data) table = db.create_table("pd_table", data=data)
arrow_table = table.to_arrow()
``` ```
DuckDB can directly query the `pyarrow.Table` object: To query the table, first call `to_lance` to convert the table to a "dataset", which is an object that can be queried by DuckDB. Then all you need to do is reference that dataset by the same name in your SQL query.
```python ```python
import duckdb import duckdb
arrow_table = table.to_lance()
duckdb.query("SELECT * FROM arrow_table") duckdb.query("SELECT * FROM arrow_table")
``` ```

60
node/package-lock.json generated
View File

@@ -328,6 +328,66 @@
"@jridgewell/sourcemap-codec": "^1.4.10" "@jridgewell/sourcemap-codec": "^1.4.10"
} }
}, },
"node_modules/@lancedb/vectordb-darwin-arm64": {
"version": "0.4.10",
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-arm64/-/vectordb-darwin-arm64-0.4.10.tgz",
"integrity": "sha512-y/uHOGb0g15pvqv5tdTyZ6oN+0QVpBmZDzKFWW6pPbuSZjB2uPqcs+ti0RB+AUdmS21kavVQqaNsw/HLKEGrHA==",
"cpu": [
"arm64"
],
"optional": true,
"os": [
"darwin"
]
},
"node_modules/@lancedb/vectordb-darwin-x64": {
"version": "0.4.10",
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-x64/-/vectordb-darwin-x64-0.4.10.tgz",
"integrity": "sha512-XbfR58OkQpAe0xMSTrwJh9ZjGSzG9EZ7zwO6HfYem8PxcLYAcC6eWRWoSG/T0uObyrPTcYYyvHsp0eNQWYBFAQ==",
"cpu": [
"x64"
],
"optional": true,
"os": [
"darwin"
]
},
"node_modules/@lancedb/vectordb-linux-arm64-gnu": {
"version": "0.4.10",
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-arm64-gnu/-/vectordb-linux-arm64-gnu-0.4.10.tgz",
"integrity": "sha512-x40WKH9b+KxorRmKr9G7fv8p5mMj8QJQvRMA0v6v+nbZHr2FLlAZV+9mvhHOnm4AGIkPP5335cUgv6Qz6hgwkQ==",
"cpu": [
"arm64"
],
"optional": true,
"os": [
"linux"
]
},
"node_modules/@lancedb/vectordb-linux-x64-gnu": {
"version": "0.4.10",
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-x64-gnu/-/vectordb-linux-x64-gnu-0.4.10.tgz",
"integrity": "sha512-CTGPpuzlqq2nVjUxI9gAJOT1oBANIovtIaFsOmBSnEAHgX7oeAxKy2b6L/kJzsgqSzvR5vfLwYcWFrr6ZmBxSA==",
"cpu": [
"x64"
],
"optional": true,
"os": [
"linux"
]
},
"node_modules/@lancedb/vectordb-win32-x64-msvc": {
"version": "0.4.10",
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-win32-x64-msvc/-/vectordb-win32-x64-msvc-0.4.10.tgz",
"integrity": "sha512-Fd7r74coZyrKzkfXg4WthqOL+uKyJyPTia6imcrMNqKOlTGdKmHf02Qi2QxWZrFaabkRYo4Tpn5FeRJ3yYX8CA==",
"cpu": [
"x64"
],
"optional": true,
"os": [
"win32"
]
},
"node_modules/@neon-rs/cli": { "node_modules/@neon-rs/cli": {
"version": "0.0.160", "version": "0.0.160",
"resolved": "https://registry.npmjs.org/@neon-rs/cli/-/cli-0.0.160.tgz", "resolved": "https://registry.npmjs.org/@neon-rs/cli/-/cli-0.0.160.tgz",

View File

@@ -1,5 +1,5 @@
[bumpversion] [bumpversion]
current_version = 0.5.5 current_version = 0.5.6
commit = True commit = True
message = [python] Bump version: {current_version} → {new_version} message = [python] Bump version: {current_version} → {new_version}
tag = True tag = True

View File

@@ -13,8 +13,9 @@
import importlib.metadata import importlib.metadata
import os import os
from concurrent.futures import ThreadPoolExecutor
from datetime import timedelta from datetime import timedelta
from typing import Optional from typing import Optional, Union
__version__ = importlib.metadata.version("lancedb") __version__ = importlib.metadata.version("lancedb")
@@ -32,6 +33,7 @@ def connect(
region: str = "us-east-1", region: str = "us-east-1",
host_override: Optional[str] = None, host_override: Optional[str] = None,
read_consistency_interval: Optional[timedelta] = None, read_consistency_interval: Optional[timedelta] = None,
request_thread_pool: Optional[Union[int, ThreadPoolExecutor]] = None,
) -> DBConnection: ) -> DBConnection:
"""Connect to a LanceDB database. """Connect to a LanceDB database.
@@ -58,7 +60,14 @@ def connect(
the last check, then the table will be checked for updates. Note: this the last check, then the table will be checked for updates. Note: this
consistency only applies to read operations. Write operations are consistency only applies to read operations. Write operations are
always consistent. always consistent.
request_thread_pool: int or ThreadPoolExecutor, optional
The thread pool to use for making batch requests to the LanceDB Cloud API.
If an integer, then a ThreadPoolExecutor will be created with that
number of threads. If None, then a ThreadPoolExecutor will be created
with the default number of threads. If a ThreadPoolExecutor, then that
executor will be used for making requests. This is for LanceDB Cloud
only and is only used when making batch requests (i.e., passing in
multiple queries to the search method at once).
Examples Examples
-------- --------
@@ -86,5 +95,9 @@ def connect(
api_key = os.environ.get("LANCEDB_API_KEY") api_key = os.environ.get("LANCEDB_API_KEY")
if api_key is None: if api_key is None:
raise ValueError(f"api_key is required to connected LanceDB cloud: {uri}") raise ValueError(f"api_key is required to connected LanceDB cloud: {uri}")
return RemoteDBConnection(uri, api_key, region, host_override) if isinstance(request_thread_pool, int):
request_thread_pool = ThreadPoolExecutor(request_thread_pool)
return RemoteDBConnection(
uri, api_key, region, host_override, request_thread_pool=request_thread_pool
)
return LanceDBConnection(uri, read_consistency_interval=read_consistency_interval) return LanceDBConnection(uri, read_consistency_interval=read_consistency_interval)

View File

@@ -26,7 +26,7 @@ import pyarrow as pa
from lance.vector import vec_to_table from lance.vector import vec_to_table
from retry import retry from retry import retry
from ..util import safe_import_pandas from ..util import deprecated, safe_import_pandas
from ..utils.general import LOGGER from ..utils.general import LOGGER
pd = safe_import_pandas() pd = safe_import_pandas()
@@ -38,6 +38,7 @@ IMAGES = Union[
] ]
@deprecated
def with_embeddings( def with_embeddings(
func: Callable, func: Callable,
data: DATA, data: DATA,

View File

@@ -14,6 +14,7 @@
import inspect import inspect
import logging import logging
import uuid import uuid
from concurrent.futures import ThreadPoolExecutor
from typing import Iterable, List, Optional, Union from typing import Iterable, List, Optional, Union
from urllib.parse import urlparse from urllib.parse import urlparse
@@ -39,6 +40,7 @@ class RemoteDBConnection(DBConnection):
api_key: str, api_key: str,
region: str, region: str,
host_override: Optional[str] = None, host_override: Optional[str] = None,
request_thread_pool: Optional[ThreadPoolExecutor] = None,
): ):
"""Connect to a remote LanceDB database.""" """Connect to a remote LanceDB database."""
parsed = urlparse(db_url) parsed = urlparse(db_url)
@@ -49,6 +51,7 @@ class RemoteDBConnection(DBConnection):
self._client = RestfulLanceDBClient( self._client = RestfulLanceDBClient(
self.db_name, region, api_key, host_override self.db_name, region, api_key, host_override
) )
self._request_thread_pool = request_thread_pool
def __repr__(self) -> str: def __repr__(self) -> str:
return f"RemoteConnect(name={self.db_name})" return f"RemoteConnect(name={self.db_name})"

View File

@@ -13,6 +13,7 @@
import logging import logging
import uuid import uuid
from concurrent.futures import Future
from functools import cached_property from functools import cached_property
from typing import Dict, Optional, Union from typing import Dict, Optional, Union
@@ -270,15 +271,28 @@ class RemoteTable(Table):
and len(query.vector) > 0 and len(query.vector) > 0
and not isinstance(query.vector[0], float) and not isinstance(query.vector[0], float)
): ):
if self._conn._request_thread_pool is None:
def submit(name, q):
f = Future()
f.set_result(self._conn._client.query(name, q))
return f
else:
def submit(name, q):
return self._conn._request_thread_pool.submit(
self._conn._client.query, name, q
)
results = [] results = []
for v in query.vector: for v in query.vector:
v = list(v) v = list(v)
q = query.copy() q = query.copy()
q.vector = v q.vector = v
results.append(self._conn._client.query(self._name, q)) results.append(submit(self._name, q))
return pa.concat_tables( return pa.concat_tables(
[add_index(r.to_arrow(), i) for i, r in enumerate(results)] [add_index(r.result().to_arrow(), i) for i, r in enumerate(results)]
) )
else: else:
result = self._conn._client.query(self._name, query) result = self._conn._client.query(self._name, query)

View File

@@ -11,9 +11,11 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import functools
import importlib import importlib
import os import os
import pathlib import pathlib
import warnings
from datetime import date, datetime from datetime import date, datetime
from functools import singledispatch from functools import singledispatch
from typing import Tuple, Union from typing import Tuple, Union
@@ -239,3 +241,25 @@ def _(value: list):
@value_to_sql.register(np.ndarray) @value_to_sql.register(np.ndarray)
def _(value: np.ndarray): def _(value: np.ndarray):
return value_to_sql(value.tolist()) return value_to_sql(value.tolist())
def deprecated(func):
"""This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used."""
@functools.wraps(func)
def new_func(*args, **kwargs):
warnings.simplefilter("always", DeprecationWarning) # turn off filter
warnings.warn(
(
f"Function {func.__name__} is deprecated and will be "
"removed in a future version"
),
category=DeprecationWarning,
stacklevel=2,
)
warnings.simplefilter("default", DeprecationWarning) # reset filter
return func(*args, **kwargs)
return new_func

View File

@@ -1,6 +1,6 @@
[project] [project]
name = "lancedb" name = "lancedb"
version = "0.5.5" version = "0.5.6"
dependencies = [ dependencies = [
"deprecation", "deprecation",
"pylance==0.9.16", "pylance==0.9.16",

View File

@@ -803,10 +803,8 @@ def test_count_rows(db):
assert table.count_rows(filter="text='bar'") == 1 assert table.count_rows(filter="text='bar'") == 1
def test_hybrid_search(db): def test_hybrid_search(db, tmp_path):
# hardcoding temporarily.. this test is failing with tmp_path mockdb. db = MockDB(str(tmp_path))
# Probably not being parsed right by the fts
db = MockDB("~/lancedb_")
# Create a LanceDB table schema with a vector and a text column # Create a LanceDB table schema with a vector and a text column
emb = EmbeddingFunctionRegistry.get_instance().get("test")() emb = EmbeddingFunctionRegistry.get_instance().get("test")()