Compare commits

..

27 Commits

Author SHA1 Message Date
ayush chaurasia
db67b27a42 ruff 2024-04-16 12:11:57 +05:30
ayush chaurasia
4b0820ef15 ruff 2024-04-16 12:09:18 +05:30
ayush chaurasia
c7bb919561 update benchmark script 2024-04-16 11:13:52 +05:30
ayush chaurasia
df404b726e ruff 2024-04-16 10:00:47 +05:30
ayush chaurasia
ffbb104648 remove protected namespaces 2024-04-16 09:51:08 +05:30
ayush chaurasia
3ebd561fd9 ruff 2024-04-16 09:26:47 +05:30
ayush chaurasia
6bc488f674 update 2024-04-16 09:24:29 +05:30
ayush chaurasia
ea34c0b4c4 update 2024-04-16 09:07:40 +05:30
ayush chaurasia
1a827925eb update docs 2024-04-16 08:59:36 +05:30
ayush chaurasia
fe5888d661 update usage 2024-04-16 08:24:40 +05:30
ayush chaurasia
6074e6b7ee update 2024-04-15 17:19:04 +05:30
ayush chaurasia
fd8de238bb ruff 2024-04-15 17:11:59 +05:30
ayush chaurasia
d0c1113417 add test 2024-04-15 17:07:29 +05:30
ayush chaurasia
3ca96a852f remove test file 2024-04-15 17:02:58 +05:30
ayush chaurasia
9428c6b565 update 2024-04-15 16:59:16 +05:30
ayush chaurasia
ff00a3242c update 2024-04-15 07:52:04 +05:30
ayush chaurasia
878deb73a0 update 2024-04-15 07:51:05 +05:30
ayush chaurasia
c75bb65609 update 2024-04-15 05:59:26 +05:30
Weston Pace
c7fbc4aaee docs: fix minor typo (#1220) 2024-04-14 03:32:57 +05:30
Lance Release
7e023c1ef2 [python] Bump version: 0.6.8 → 0.6.9 2024-04-12 22:09:12 +00:00
Weston Pace
1d0dd9a8b8 feat: bump lance version from 0.10.10 to 0.10.12 (#1219) 2024-04-12 15:08:39 -07:00
Weston Pace
deb947ddbd doc: fix typo, broken links (#1218) 2024-04-11 14:58:51 -07:00
Ayush Chaurasia
b039765d50 docs : Embedding functions quickstart and minor fixes (#1217) 2024-04-11 17:30:45 +05:30
Prashanth Rao
d155e82723 [docs] Fix broken links and clarify language in integrations docs (#1209)
This PR does the following:

- Fixes broken/outdated URLs
- Adds clarity to the way DuckDB/LanceDB integration works via Arrow
2024-04-11 15:32:08 +05:30
Ayush Chaurasia
5d8c91256c fix(python): Update to latest cohere reranking api (#1212)
Fixes https://github.com/lancedb/lancedb/issues/1196
Cohere introduced a breaking change in their reranker API starting
version 5.0.0. More context in discussion here
https://github.com/cohere-ai/cohere-python/issues/446
2024-04-11 15:20:29 +05:30
Ayush Chaurasia
44c03ebef3 docs : Update Reranking docs (#1213) 2024-04-11 15:20:00 +05:30
Will Jones
8ea06fe7f3 ci: fix failures in release scripts (#1215)
* Python release has been running when we create a Node release.
https://github.com/lancedb/lancedb/actions/runs/8635662585
* Rust is missing new enough compilers to check the kernels feature
https://github.com/lancedb/lancedb/actions/runs/8635662578
2024-04-10 13:09:39 -07:00
31 changed files with 1722 additions and 43 deletions

View File

@@ -8,6 +8,9 @@ env:
# This env var is used by Swatinem/rust-cache@v2 for the cache
# key, so we set it to make sure it is always consistent.
CARGO_TERM_COLOR: always
# Up-to-date compilers needed for fp16kernels.
CC: gcc-12
CXX: g++-12
jobs:
build:

View File

@@ -6,6 +6,8 @@ on:
jobs:
linux:
# Only runs on tags that matches the python-make-release action
if: startsWith(github.ref, 'refs/tags/python-v')
name: Python ${{ matrix.config.platform }} manylinux${{ matrix.config.manylinux }}
timeout-minutes: 60
strategy:
@@ -44,6 +46,8 @@ jobs:
token: ${{ secrets.LANCEDB_PYPI_API_TOKEN }}
repo: "pypi"
mac:
# Only runs on tags that matches the python-make-release action
if: startsWith(github.ref, 'refs/tags/python-v')
timeout-minutes: 60
runs-on: ${{ matrix.config.runner }}
strategy:
@@ -76,6 +80,8 @@ jobs:
token: ${{ secrets.LANCEDB_PYPI_API_TOKEN }}
repo: "pypi"
windows:
# Only runs on tags that matches the python-make-release action
if: startsWith(github.ref, 'refs/tags/python-v')
timeout-minutes: 60
runs-on: windows-latest
strategy:

View File

@@ -14,10 +14,10 @@ keywords = ["lancedb", "lance", "database", "vector", "search"]
categories = ["database-implementations"]
[workspace.dependencies]
lance = { "version" = "=0.10.10", "features" = ["dynamodb"] }
lance-index = { "version" = "=0.10.10" }
lance-linalg = { "version" = "=0.10.10" }
lance-testing = { "version" = "=0.10.10" }
lance = { "version" = "=0.10.12", "features" = ["dynamodb"] }
lance-index = { "version" = "=0.10.12" }
lance-linalg = { "version" = "=0.10.12" }
lance-testing = { "version" = "=0.10.12" }
# Note that this one does not include pyarrow
arrow = { version = "50.0", optional = false }
arrow-array = "50.0"

View File

@@ -0,0 +1,280 @@
import argparse
import os
from llama_index.core import SimpleDirectoryReader
from llama_index.core.llama_dataset import LabelledRagDataset
from llama_index.core.node_parser import SentenceSplitter
from lancedb.embeddings.fine_tuner.dataset import QADataset, TextChunk
from lancedb.embeddings.fine_tuner.llm import Openai
from lancedb.embeddings import get_registry
from llama_index.vector_stores.lancedb import LanceDBVectorStore
from llama_index.core import ServiceContext, VectorStoreIndex, StorageContext
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.core.llama_pack import download_llama_pack
import time
import wandb
import pandas as pd
def get_paths_from_dataset(dataset: str, split=True):
"""
Returns paths of:
- downloaded dataset, lance train dataset, lance test dataset, finetuned model
"""
if split:
return (
f"./data/{dataset}",
f"./data/{dataset}_lance_train",
f"./data/{dataset}_lance_test",
f"./data/tuned_{dataset}",
)
return f"./data/{dataset}", f"./data/{dataset}_lance", f"./data/tuned_{dataset}"
def get_llama_dataset(dataset: str):
"""
returns:
- nodes, documents, rag_dataset
"""
if not os.path.exists(f"./data/{dataset}"):
os.system(
f"llamaindex-cli download-llamadataset {dataset} --download-dir ./data/{dataset}" # noqa
)
rag_dataset = LabelledRagDataset.from_json(f"./data/{dataset}/rag_dataset.json")
docs = SimpleDirectoryReader(input_dir=f"./data/{dataset}/source_files").load_data()
parser = SentenceSplitter()
nodes = parser.get_nodes_from_documents(docs)
return nodes, docs, rag_dataset
def lance_dataset_from_llama_nodes(nodes: list, name: str, split=True):
llm = Openai()
chunks = [TextChunk.from_llama_index_node(node) for node in nodes]
# train test split 75-35
if not split:
if os.path.exists(f"./data/{name}_lance"):
ds = QADataset.load(f"./data/{name}_lance")
return ds
ds = QADataset.from_llm(chunks, llm)
ds.save(f"./data/{name}_lance")
return ds
if os.path.exists(f"./data/{name}_lance_train") and os.path.exists(
f"./data/{name}_lance_test"
):
train_ds = QADataset.load(f"./data/{name}_lance_train")
test_ds = QADataset.load(f"./data/{name}_lance_test")
return train_ds, test_ds
# split chunks random
train_size = int(len(chunks) * 0.65)
train_chunks = chunks[:train_size]
test_chunks = chunks[train_size:]
train_ds = QADataset.from_llm(train_chunks, llm)
test_ds = QADataset.from_llm(test_chunks, llm)
train_ds.save(f"./data/{name}_lance_train")
test_ds.save(f"./data/{name}_lance_test")
return train_ds, test_ds
def finetune(
trainset: str, model: str, epochs: int, path: str, valset: str = None, top_k=5
):
print(f"Finetuning {model} for {epochs} epochs")
print(f"trainset query instances: {len(trainset.queries)}")
print(f"valset query instances: {len(valset.queries)}")
valset = valset if valset is not None else trainset
model = get_registry().get("sentence-transformers").create(name=model)
base_result = model.evaluate(valset, path="./data/eval/", top_k=top_k)
base_hit_rate = pd.DataFrame(base_result)["is_hit"].mean()
model.finetune(trainset=trainset, valset=valset, path=path, epochs=epochs)
tuned = get_registry().get("sentence-transformers").create(name=path)
tuned_result = tuned.evaluate(
valset, path=f"./data/eval/{str(time.time())}", top_k=top_k
)
tuned_hit_rate = pd.DataFrame(tuned_result)["is_hit"].mean()
return base_hit_rate, tuned_hit_rate
def do_eval_rag(dataset: str, model: str):
# Requires - pip install llama-index-vector-stores-lancedb
# Requires - pip install llama-index-embeddings-huggingface
nodes, docs, rag_dataset = get_llama_dataset(dataset)
embed_model = HuggingFaceEmbedding(model)
vector_store = LanceDBVectorStore(uri="/tmp/lancedb")
storage_context = StorageContext.from_defaults(vector_store=vector_store)
service_context = ServiceContext.from_defaults(embed_model=embed_model)
index = VectorStoreIndex(
nodes,
service_context=service_context,
show_progress=True,
storage_context=storage_context,
)
# build basic RAG system
index = VectorStoreIndex.from_documents(documents=docs)
query_engine = index.as_query_engine()
# evaluate using the RagEvaluatorPack
RagEvaluatorPack = download_llama_pack("RagEvaluatorPack", "./rag_evaluator_pack")
rag_evaluator_pack = RagEvaluatorPack(
rag_dataset=rag_dataset, query_engine=query_engine
)
metrics = rag_evaluator_pack.run(
batch_size=20, # batches the number of openai api calls to make
sleep_time_in_seconds=1, # seconds to sleep before making an api call
)
return metrics
def main(
dataset,
model,
epochs,
top_k=5,
eval_rag=False,
split=True,
project: str = "lancedb_finetune",
):
nodes, _, _ = get_llama_dataset(dataset)
trainset = None
valset = None
if split:
trainset, valset = lance_dataset_from_llama_nodes(nodes, dataset, split)
data_path, lance_train_path, lance_test_path, tuned_path = (
get_paths_from_dataset(dataset, split=split)
)
else:
trainset = lance_dataset_from_llama_nodes(nodes, dataset, split)
valset = trainset
data_path, lance_path, tuned_path = get_paths_from_dataset(dataset, split=split)
base_hit_rate, tuned_hit_rate = finetune(
trainset, model, epochs, tuned_path, valset, top_k=top_k
)
# Base model model metrics
metrics = do_eval_rag(dataset, model) if eval_rag else {}
# Tuned model metrics
metrics_tuned = do_eval_rag(dataset, tuned_path) if eval_rag else {}
wandb.init(project="lancedb_finetune", name=f"{dataset}_{model}_{epochs}")
wandb.log(
{
"hit_rate": tuned_hit_rate,
}
)
wandb.log(metrics_tuned)
wandb.finish()
wandb.init(project="lancedb_finetune", name=f"{dataset}_{model}_base")
wandb.log(
{
"hit_rate": base_hit_rate,
}
)
wandb.log(metrics)
wandb.finish()
def banchmark_all():
datasets = [
"Uber10KDataset2021",
"MiniTruthfulQADataset",
"MiniSquadV2Dataset",
"MiniEsgBenchDataset",
"MiniCovidQaDataset",
"Llama2PaperDataset",
"HistoryOfAlexnetDataset",
"PatronusAIFinanceBenchDataset",
]
models = ["BAAI/bge-small-en-v1.5"]
top_ks = [5]
for top_k in top_ks:
for model in models:
for dataset in datasets:
main(dataset, model, 5, top_k=top_k)
if __name__ == "__main__":
"""
Benchmark the fine-tuning process for a given dataset and model.
Usage:
- For a single dataset
python lancedb/docs/benchmarks/llama-index-datasets.py --dataset Uber10KDataset2021 --model BAAI/bge-small-en-v1.5 --epochs 4 --top_k 5 --split 1 --project lancedb_finetune
- For all datasets and models across all top_ks
python lancedb/docs/benchmarks/llama-index-datasets.py --benchmark-all
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--dataset",
type=str,
default="BraintrustCodaHelpDeskDataset",
help="The dataset to use for fine-tuning",
)
parser.add_argument(
"--model",
type=str,
default="BAAI/bge-small-en-v1.5",
help="The model to use for fine-tuning",
)
parser.add_argument(
"--epochs",
type=int,
default=4,
help="The number of epochs to fine-tune the model",
)
parser.add_argument(
"--project",
type=str,
default="lancedb_finetune",
help="The wandb project to log the results",
)
parser.add_argument(
"--top_k", type=int, default=5, help="The number of top results to evaluate"
)
parser.add_argument(
"--split",
type=int,
default=1,
help="Whether to split the dataset into train and test(65-35 split), default is 1",
)
parser.add_argument(
"--eval-rag",
action="store_true",
default=False,
help="Whether to evaluate the model using RAG",
)
parser.add_argument(
"--benchmark-all",
action="store_true",
default=False,
help="Benchmark all datasets across all models and top_ks",
)
args = parser.parse_args()
if args.benchmark_all:
banchmark_all()
else:
main(
args.dataset,
args.model,
args.epochs,
args.top_k,
args.eval_rag,
args.split,
args.project,
)

View File

@@ -57,16 +57,6 @@ plugins:
- https://arrow.apache.org/docs/objects.inv
- https://pandas.pydata.org/docs/objects.inv
- mkdocs-jupyter
- ultralytics:
verbose: True
enabled: True
default_image: "assets/lancedb_and_lance.png" # Default image for all pages
add_image: True # Automatically add meta image
add_keywords: True # Add page keywords in the header tag
add_share_buttons: True # Add social share buttons
add_authors: False # Display page authors
add_desc: False
add_dates: False
markdown_extensions:
- admonition
@@ -104,6 +94,14 @@ nav:
- Overview: hybrid_search/hybrid_search.md
- Comparing Rerankers: hybrid_search/eval.md
- Airbnb financial data example: notebooks/hybrid_search.ipynb
- Reranking:
- Quickstart: reranking/index.md
- Cohere Reranker: reranking/cohere.md
- Linear Combination Reranker: reranking/linear_combination.md
- Cross Encoder Reranker: reranking/cross_encoder.md
- ColBERT Reranker: reranking/colbert.md
- OpenAI Reranker: reranking/openai.md
- Building Custom Rerankers: reranking/custom_reranker.md
- Filtering: sql.md
- Versioning & Reproducibility: notebooks/reproducibility.ipynb
- Configuring Storage: guides/storage.md
@@ -120,9 +118,10 @@ nav:
- Pandas and PyArrow: python/pandas_and_pyarrow.md
- Polars: python/polars_arrow.md
- DuckDB: python/duckdb.md
- LangChain 🔗: https://python.langchain.com/en/latest/modules/indexes/vectorstores/examples/lancedb.html
- LangChain JS/TS 🔗: https://js.langchain.com/docs/modules/data_connection/vectorstores/integrations/lancedb
- LlamaIndex 🦙: https://gpt-index.readthedocs.io/en/latest/examples/vector_stores/LanceDBIndexDemo.html
- LangChain:
- LangChain 🔗: https://python.langchain.com/docs/integrations/vectorstores/lancedb/
- LangChain JS/TS 🔗: https://js.langchain.com/docs/integrations/vectorstores/lancedb
- LlamaIndex 🦙: https://docs.llamaindex.ai/en/stable/examples/vector_stores/LanceDBIndexDemo/
- Pydantic: python/pydantic.md
- Voxel51: integrations/voxel51.md
- PromptTools: integrations/prompttools.md
@@ -170,6 +169,14 @@ nav:
- Overview: hybrid_search/hybrid_search.md
- Comparing Rerankers: hybrid_search/eval.md
- Airbnb financial data example: notebooks/hybrid_search.ipynb
- Reranking:
- Quickstart: reranking/index.md
- Cohere Reranker: reranking/cohere.md
- Linear Combination Reranker: reranking/linear_combination.md
- Cross Encoder Reranker: reranking/cross_encoder.md
- ColBERT Reranker: reranking/colbert.md
- OpenAI Reranker: reranking/openai.md
- Building Custom Rerankers: reranking/custom_reranker.md
- Filtering: sql.md
- Versioning & Reproducibility: notebooks/reproducibility.ipynb
- Configuring Storage: guides/storage.md
@@ -186,8 +193,8 @@ nav:
- Pandas and PyArrow: python/pandas_and_pyarrow.md
- Polars: python/polars_arrow.md
- DuckDB: python/duckdb.md
- LangChain 🦜️🔗↗: https://python.langchain.com/en/latest/modules/indexes/vectorstores/examples/lancedb.html
- LangChain.js 🦜️🔗↗: https://js.langchain.com/docs/modules/data_connection/vectorstores/integrations/lancedb
- LangChain 🦜️🔗↗: https://python.langchain.com/docs/integrations/vectorstores/lancedb
- LangChain.js 🦜️🔗↗: https://js.langchain.com/docs/integrations/vectorstores/lancedb
- LlamaIndex 🦙↗: https://gpt-index.readthedocs.io/en/latest/examples/vector_stores/LanceDBIndexDemo.html
- Pydantic: python/pydantic.md
- Voxel51: integrations/voxel51.md

View File

@@ -2,5 +2,4 @@ mkdocs==1.5.3
mkdocs-jupyter==0.24.1
mkdocs-material==9.5.3
mkdocstrings[python]==0.20.0
pydantic
mkdocs-ultralytics-plugin==0.0.44
pydantic

View File

@@ -154,9 +154,12 @@ Allows you to set parameters when registering a `sentence-transformers` object.
!!! note "BAAI Embeddings example"
Here is an example that uses BAAI embedding model from the HuggingFace Hub [supported models](https://huggingface.co/models?library=sentence-transformers)
```python
import lancedb
from lancedb.pydantic import LanceModel, Vector
from lancedb.embeddings import get_registry
db = lancedb.connect("/tmp/db")
registry = EmbeddingFunctionRegistry.get_instance()
model = registry.get("sentence-transformers").create(name="BAAI/bge-small-en-v1.5", device="cpu")
model = get_registry.get("sentence-transformers").create(name="BAAI/bge-small-en-v1.5", device="cpu")
class Words(LanceModel):
text: str = model.SourceField()
@@ -165,7 +168,7 @@ Allows you to set parameters when registering a `sentence-transformers` object.
table = db.create_table("words", schema=Words)
table.add(
[
{"text": "hello world"}
{"text": "hello world"},
{"text": "goodbye world"}
]
)
@@ -213,18 +216,21 @@ LanceDB registers the OpenAI embeddings function in the registry by default, as
```python
import lancedb
from lancedb.pydantic import LanceModel, Vector
from lancedb.embeddings import get_registry
db = lancedb.connect("/tmp/db")
registry = EmbeddingFunctionRegistry.get_instance()
func = registry.get("openai").create()
func = get_registry().get("openai").create(name="text-embedding-ada-002")
class Words(LanceModel):
text: str = func.SourceField()
vector: Vector(func.ndims()) = func.VectorField()
table = db.create_table("words", schema=Words)
table = db.create_table("words", schema=Words, mode="overwrite")
table.add(
[
{"text": "hello world"}
{"text": "hello world"},
{"text": "goodbye world"}
]
)
@@ -353,6 +359,10 @@ Supported parameters (to be passed in `create` method) are:
Usage Example:
```python
import lancedb
from lancedb.pydantic import LanceModel, Vector
from lancedb.embeddings import get_registry
model = get_registry().get("bedrock-text").create()
class TextModel(LanceModel):
@@ -387,10 +397,12 @@ This embedding function supports ingesting images as both bytes and urls. You ca
LanceDB supports ingesting images directly from accessible links.
```python
import lancedb
from lancedb.pydantic import LanceModel, Vector
from lancedb.embeddings import get_registry
db = lancedb.connect(tmp_path)
registry = EmbeddingFunctionRegistry.get_instance()
func = registry.get("open-clip").create()
func = get_registry.get("open-clip").create()
class Images(LanceModel):
label: str
@@ -465,9 +477,12 @@ This function is registered as `imagebind` and supports Audio, Video and Text mo
Below is an example demonstrating how the API works:
```python
import lancedb
from lancedb.pydantic import LanceModel, Vector
from lancedb.embeddings import get_registry
db = lancedb.connect(tmp_path)
registry = EmbeddingFunctionRegistry.get_instance()
func = registry.get("imagebind").create()
func = get_registry.get("imagebind").create()
class ImageBindModel(LanceModel):
text: str

View File

@@ -11,4 +11,64 @@ LanceDB supports 3 methods of working with embeddings.
that extends the default embedding functions.
For python users, there is also a legacy [with_embeddings API](./legacy.md).
It is retained for compatibility and will be removed in a future version.
It is retained for compatibility and will be removed in a future version.
## Quickstart
To get started with embeddings, you can use the built-in embedding functions.
### OpenAI Embedding function
LanceDB registers the OpenAI embeddings function in the registry as `openai`. You can pass any supported model name to the `create`. By default it uses `"text-embedding-ada-002"`.
```python
import lancedb
from lancedb.pydantic import LanceModel, Vector
from lancedb.embeddings import get_registry
db = lancedb.connect("/tmp/db")
func = get_registry().get("openai").create(name="text-embedding-ada-002")
class Words(LanceModel):
text: str = func.SourceField()
vector: Vector(func.ndims()) = func.VectorField()
table = db.create_table("words", schema=Words, mode="overwrite")
table.add(
[
{"text": "hello world"},
{"text": "goodbye world"}
]
)
query = "greetings"
actual = table.search(query).limit(1).to_pydantic(Words)[0]
print(actual.text)
```
### Sentence Transformers Embedding function
LanceDB registers the Sentence Transformers embeddings function in the registry as `sentence-transformers`. You can pass any supported model name to the `create`. By default it uses `"sentence-transformers/paraphrase-MiniLM-L6-v2"`.
```python
import lancedb
from lancedb.pydantic import LanceModel, Vector
from lancedb.embeddings import get_registry
db = lancedb.connect("/tmp/db")
model = get_registry().get("sentence-transformers").create(name="BAAI/bge-small-en-v1.5", device="cpu")
class Words(LanceModel):
text: str = model.SourceField()
vector: Vector(model.ndims()) = model.VectorField()
table = db.create_table("words", schema=Words)
table.add(
[
{"text": "hello world"},
{"text": "goodbye world"}
]
)
query = "greetings"
actual = table.search(query).limit(1).to_pydantic(Words)[0]
print(actual.text)
```

View File

@@ -24,7 +24,8 @@ data = [
table = db.create_table("pd_table", data=data)
```
To query the table, first call `to_lance` to convert the table to a "dataset", which is an object that can be queried by DuckDB. Then all you need to do is reference that dataset by the same name in your SQL query.
The `to_lance` method converts the LanceDB table to a `LanceDataset`, which is accessible to DuckDB through the Arrow compatibility layer.
To query the resulting Lance dataset in DuckDB, all you need to do is reference the dataset by the same name in your SQL query.
```python
import duckdb

View File

@@ -0,0 +1,75 @@
# Cohere Reranker
This re-ranker uses the [Cohere](https://cohere.ai/) API to rerank the search results. You can use this re-ranker by passing `CohereReranker()` to the `rerank()` method. Note that you'll either need to set the `COHERE_API_KEY` environment variable or pass the `api_key` argument to use this re-ranker.
!!! note
Supported Query Types: Hybrid, Vector, FTS
```python
import numpy
import lancedb
from lancedb.embeddings import get_registry
from lancedb.pydantic import LanceModel, Vector
from lancedb.rerankers import CohereReranker
embedder = get_registry().get("sentence-transformers").create()
db = lancedb.connect("~/.lancedb")
class Schema(LanceModel):
text: str = embedder.SourceField()
vector: Vector(embedder.ndims()) = embedder.VectorField()
data = [
{"text": "hello world"},
{"text": "goodbye world"}
]
tbl = db.create_table("test", schema=Schema, mode="overwrite")
tbl.add(data)
reranker = CohereReranker(api_key="key")
# Run vector search with a reranker
result = tbl.search("hello").rerank(reranker=reranker).to_list()
# Run FTS search with a reranker
result = tbl.search("hello", query_type="fts").rerank(reranker=reranker).to_list()
# Run hybrid search with a reranker
tbl.create_fts_index("text", replace=True)
result = tbl.search("hello", query_type="hybrid").rerank(reranker=reranker).to_list()
```
Accepted Arguments
----------------
| Argument | Type | Default | Description |
| --- | --- | --- | --- |
| `model_name` | `str` | `"rerank-english-v2.0"` | The name of the reranker model to use. Available cohere models are: rerank-english-v2.0, rerank-multilingual-v2.0 |
| `column` | `str` | `"text"` | The name of the column to use as input to the cross encoder model. |
| `top_n` | `str` | `None` | The number of results to return. If None, will return all results. |
| `api_key` | `str` | `None` | The API key for the Cohere API. If not provided, the `COHERE_API_KEY` environment variable is used. |
| `return_score` | str | `"relevance"` | Options are "relevance" or "all". The type of score to return. If "relevance", will return only the `_relevance_score. If "all" is supported, will return relevance score along with the vector and/or fts scores depending on query type |
## Supported Scores for each query type
You can specify the type of scores you want the reranker to return. The following are the supported scores for each query type:
### Hybrid Search
|`return_score`| Status | Description |
| --- | --- | --- |
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
| `all` | ❌ Not Supported | Returns have vector(`_distance`) and FTS(`score`) along with Hybrid Search score(`_relevance_score`) |
### Vector Search
|`return_score`| Status | Description |
| --- | --- | --- |
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
| `all` | ✅ Supported | Returns have vector(`_distance`) along with Hybrid Search score(`_relevance_score`) |
### FTS Search
|`return_score`| Status | Description |
| --- | --- | --- |
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
| `all` | ✅ Supported | Returns have FTS(`score`) along with Hybrid Search score(`_relevance_score`) |

View File

@@ -0,0 +1,71 @@
# ColBERT Reranker
This re-ranker uses ColBERT model to rerank the search results. You can use this re-ranker by passing `ColbertReranker()` to the `rerank()` method.
!!! note
Supported Query Types: Hybrid, Vector, FTS
```python
import numpy
import lancedb
from lancedb.embeddings import get_registry
from lancedb.pydantic import LanceModel, Vector
from lancedb.rerankers import ColbertReranker
embedder = get_registry().get("sentence-transformers").create()
db = lancedb.connect("~/.lancedb")
class Schema(LanceModel):
text: str = embedder.SourceField()
vector: Vector(embedder.ndims()) = embedder.VectorField()
data = [
{"text": "hello world"},
{"text": "goodbye world"}
]
tbl = db.create_table("test", schema=Schema, mode="overwrite")
tbl.add(data)
reranker = ColbertReranker()
# Run vector search with a reranker
result = tbl.search("hello").rerank(reranker=reranker).to_list()
# Run FTS search with a reranker
result = tbl.search("hello", query_type="fts").rerank(reranker=reranker).to_list()
# Run hybrid search with a reranker
tbl.create_fts_index("text", replace=True)
result = tbl.search("hello", query_type="hybrid").rerank(reranker=reranker).to_list()
```
Accepted Arguments
----------------
| Argument | Type | Default | Description |
| --- | --- | --- | --- |
| `model_name` | `str` | `"colbert-ir/colbertv2.0"` | The name of the reranker model to use.|
| `column` | `str` | `"text"` | The name of the column to use as input to the cross encoder model. |
| `device` | `str` | `None` | The device to use for the cross encoder model. If None, will use "cuda" if available, otherwise "cpu". |
| `return_score` | str | `"relevance"` | Options are "relevance" or "all". The type of score to return. If "relevance", will return only the `_relevance_score. If "all" is supported, will return relevance score along with the vector and/or fts scores depending on query type |
## Supported Scores for each query type
You can specify the type of scores you want the reranker to return. The following are the supported scores for each query type:
### Hybrid Search
|`return_score`| Status | Description |
| --- | --- | --- |
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
| `all` | ❌ Not Supported | Returns have vector(`_distance`) and FTS(`score`) along with Hybrid Search score(`_relevance_score`) |
### Vector Search
|`return_score`| Status | Description |
| --- | --- | --- |
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
| `all` | ✅ Supported | Returns have vector(`_distance`) along with Hybrid Search score(`_relevance_score`) |
### FTS Search
|`return_score`| Status | Description |
| --- | --- | --- |
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
| `all` | ✅ Supported | Returns have FTS(`score`) along with Hybrid Search score(`_relevance_score`) |

View File

@@ -0,0 +1,70 @@
# Cross Encoder Reranker
This re-ranker uses Cross Encoder models from sentence-transformers to rerank the search results. You can use this re-ranker by passing `CrossEncoderReranker()` to the `rerank()` method.
!!! note
Supported Query Types: Hybrid, Vector, FTS
```python
import numpy
import lancedb
from lancedb.embeddings import get_registry
from lancedb.pydantic import LanceModel, Vector
from lancedb.rerankers import CrossEncoderReranker
embedder = get_registry().get("sentence-transformers").create()
db = lancedb.connect("~/.lancedb")
class Schema(LanceModel):
text: str = embedder.SourceField()
vector: Vector(embedder.ndims()) = embedder.VectorField()
data = [
{"text": "hello world"},
{"text": "goodbye world"}
]
tbl = db.create_table("test", schema=Schema, mode="overwrite")
tbl.add(data)
reranker = CrossEncoderReranker()
# Run vector search with a reranker
result = tbl.search("hello").rerank(reranker=reranker).to_list()
# Run FTS search with a reranker
result = tbl.search("hello", query_type="fts").rerank(reranker=reranker).to_list()
# Run hybrid search with a reranker
tbl.create_fts_index("text", replace=True)
result = tbl.search("hello", query_type="hybrid").rerank(reranker=reranker).to_list()
```
Accepted Arguments
----------------
| Argument | Type | Default | Description |
| --- | --- | --- | --- |
| `model_name` | `str` | `""cross-encoder/ms-marco-TinyBERT-L-6"` | The name of the reranker model to use.|
| `column` | `str` | `"text"` | The name of the column to use as input to the cross encoder model. |
| `device` | `str` | `None` | The device to use for the cross encoder model. If None, will use "cuda" if available, otherwise "cpu". |
| `return_score` | str | `"relevance"` | Options are "relevance" or "all". The type of score to return. If "relevance", will return only the `_relevance_score. If "all" is supported, will return relevance score along with the vector and/or fts scores depending on query type |
## Supported Scores for each query type
You can specify the type of scores you want the reranker to return. The following are the supported scores for each query type:
### Hybrid Search
|`return_score`| Status | Description |
| --- | --- | --- |
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
| `all` | ❌ Not Supported | Returns have vector(`_distance`) and FTS(`score`) along with Hybrid Search score(`_relevance_score`) |
### Vector Search
|`return_score`| Status | Description |
| --- | --- | --- |
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
| `all` | ✅ Supported | Returns have vector(`_distance`) along with Hybrid Search score(`_relevance_score`) |
### FTS Search
|`return_score`| Status | Description |
| --- | --- | --- |
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
| `all` | ✅ Supported | Returns have FTS(`score`) along with Hybrid Search score(`_relevance_score`) |

View File

@@ -0,0 +1,88 @@
## Building Custom Rerankers
You can build your own custom reranker by subclassing the `Reranker` class and implementing the `rerank_hybrid()` method. Optionally, you can also implement the `rerank_vector()` and `rerank_fts()` methods if you want to support reranking for vector and FTS search separately.
Here's an example of a custom reranker that combines the results of semantic and full-text search using a linear combination of the scores.
The `Reranker` base interface comes with a `merge_results()` method that can be used to combine the results of semantic and full-text search. This is a vanilla merging algorithm that simply concatenates the results and removes the duplicates without taking the scores into consideration. It only keeps the first copy of the row encountered. This works well in cases that don't require the scores of semantic and full-text search to combine the results. If you want to use the scores or want to support `return_score="all"`, you'll need to implement your own merging algorithm.
```python
from lancedb.rerankers import Reranker
import pyarrow as pa
class MyReranker(Reranker):
def __init__(self, param1, param2, ..., return_score="relevance"):
super().__init__(return_score)
self.param1 = param1
self.param2 = param2
def rerank_hybrid(self, query: str, vector_results: pa.Table, fts_results: pa.Table):
# Use the built-in merging function
combined_result = self.merge_results(vector_results, fts_results)
# Do something with the combined results
# ...
# Return the combined results
return combined_result
def rerank_vector(self, query: str, vector_results: pa.Table):
# Do something with the vector results
# ...
# Return the vector results
return vector_results
def rerank_fts(self, query: str, fts_results: pa.Table):
# Do something with the FTS results
# ...
# Return the FTS results
return fts_results
```
### Example of a Custom Reranker
For the sake of simplicity let's build custom reranker that just enchances the Cohere Reranker by accepting a filter query, and accept other CohereReranker params as kwags.
```python
from typing import List, Union
import pandas as pd
from lancedb.rerankers import CohereReranker
class ModifiedCohereReranker(CohereReranker):
def __init__(self, filters: Union[str, List[str]], **kwargs):
super().__init__(**kwargs)
filters = filters if isinstance(filters, list) else [filters]
self.filters = filters
def rerank_hybrid(self, query: str, vector_results: pa.Table, fts_results: pa.Table)-> pa.Table:
combined_result = super().rerank_hybrid(query, vector_results, fts_results)
df = combined_result.to_pandas()
for filter in self.filters:
df = df.query("not text.str.contains(@filter)")
return pa.Table.from_pandas(df)
def rerank_vector(self, query: str, vector_results: pa.Table)-> pa.Table:
vector_results = super().rerank_vector(query, vector_results)
df = vector_results.to_pandas()
for filter in self.filters:
df = df.query("not text.str.contains(@filter)")
return pa.Table.from_pandas(df)
def rerank_fts(self, query: str, fts_results: pa.Table)-> pa.Table:
fts_results = super().rerank_fts(query, fts_results)
df = fts_results.to_pandas()
for filter in self.filters:
df = df.query("not text.str.contains(@filter)")
return pa.Table.from_pandas(df)
```
!!! tip
The `vector_results` and `fts_results` are pyarrow tables. Lean more about pyarrow tables [here](https://arrow.apache.org/docs/python). It can be convered to other data types like pandas dataframe, pydict, pylist etc.
For example, You can convert them to pandas dataframes using `to_pandas()` method and perform any operations you want. After you are done, you can convert the dataframe back to pyarrow table using `pa.Table.from_pandas()` method and return it.

View File

@@ -0,0 +1,60 @@
Reranking is the process of reordering a list of items based on some criteria. In the context of search, reranking is used to reorder the search results returned by a search engine based on some criteria. This can be useful when the initial ranking of the search results is not satisfactory or when the user has provided additional information that can be used to improve the ranking of the search results.
LanceDB comes with some built-in rerankers. Some of the rerankers that are available in LanceDB are:
| Reranker | Description | Supported Query Types |
| --- | --- | --- |
| `LinearCombinationReranker` | Reranks search results based on a linear combination of FTS and vector search scores | Hybrid |
| `CohereReranker` | Uses cohere Reranker API to rerank results | Vector, FTS, Hybrid |
| `CrossEncoderReranker` | Uses a cross-encoder model to rerank search results | Vector, FTS, Hybrid |
| `ColbertReranker` | Uses a colbert model to rerank search results | Vector, FTS, Hybrid |
| `OpenaiReranker`(Experimental) | Uses OpenAI's chat model to rerank search results | Vector, FTS, Hybrid |
## Using a Reranker
Using rerankers is optional for vector and FTS. However, for hybrid search, rerankers are required. To use a reranker, you need to create an instance of the reranker and pass it to the `rerank` method of the query builder.
```python
import numpy
import lancedb
from lancedb.embeddings import get_registry
from lancedb.pydantic import LanceModel, Vector
from lancedb.rerankers import CohereReranker
embedder = get_registry().get("sentence-transformers").create()
db = lancedb.connect("~/.lancedb")
class Schema(LanceModel):
text: str = embedder.SourceField()
vector: Vector(embedder.ndims()) = embedder.VectorField()
data = [
{"text": "hello world"},
{"text": "goodbye world"}
]
tbl = db.create_table("test", data)
reranker = CohereReranker(api_key="your_api_key")
# Run vector search with a reranker
result = tbl.query("hello").rerank(reranker).to_list()
# Run FTS search with a reranker
result = tbl.query("hello", query_type="fts").rerank(reranker).to_list()
# Run hybrid search with a reranker
tbl.create_fts_index("text")
result = tbl.query("hello", query_type="hybrid").rerank(reranker).to_list()
```
## Available Rerankers
LanceDB comes with some built-in rerankers. Here are some of the rerankers that are available in LanceDB:
- [Cohere Reranker](./cohere.md)
- [Cross Encoder Reranker](./cross_encoder.md)
- [ColBERT Reranker](./colbert.md)
- [OpenAI Reranker](./openai.md)
- [Linear Combination Reranker](./linear_combination.md)
## Creating Custom Rerankers
LanceDB also you to create custom rerankers by extending the base `Reranker` class. The custom reranker should implement the `rerank` method that takes a list of search results and returns a reranked list of search results. This is covered in more detail in the [Creating Custom Rerankers](./custom_reranker.md) section.

View File

@@ -0,0 +1,52 @@
# Linear Combination Reranker
This is the default re-ranker used by LanceDB hybrid search. It combines the results of semantic and full-text search using a linear combination of the scores. The weights for the linear combination can be specified. It defaults to 0.7, i.e, 70% weight for semantic search and 30% weight for full-text search.
!!! note
Supported Query Types: Hybrid
```python
import numpy
import lancedb
from lancedb.embeddings import get_registry
from lancedb.pydantic import LanceModel, Vector
from lancedb.rerankers import LinearCombinationReranker
embedder = get_registry().get("sentence-transformers").create()
db = lancedb.connect("~/.lancedb")
class Schema(LanceModel):
text: str = embedder.SourceField()
vector: Vector(embedder.ndims()) = embedder.VectorField()
data = [
{"text": "hello world"},
{"text": "goodbye world"}
]
tbl = db.create_table("test", schema=Schema, mode="overwrite")
tbl.add(data)
reranker = LinearCombinationReranker()
# Run hybrid search with a reranker
tbl.create_fts_index("text", replace=True)
result = tbl.search("hello", query_type="hybrid").rerank(reranker=reranker).to_list()
```
Accepted Arguments
----------------
| Argument | Type | Default | Description |
| --- | --- | --- | --- |
| `weight` | `float` | `0.7` | The weight to use for the semantic search score. The weight for the full-text search score is `1 - weights`. |
| `return_score` | str | `"relevance"` | Options are "relevance" or "all". The type of score to return. If "relevance", will return only the `_relevance_score. If "all", will return all scores from the vector and FTS search along with the relevance score. |
## Supported Scores for each query type
You can specify the type of scores you want the reranker to return. The following are the supported scores for each query type:
### Hybrid Search
|`return_score`| Status | Description |
| --- | --- | --- |
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
| `all` | ✅ Supported | Returns have vector(`_distance`) and FTS(`score`) along with Hybrid Search score(`_distance`) |

View File

@@ -0,0 +1,73 @@
# OpenAI Reranker (Experimental)
This re-ranker uses OpenAI chat model to rerank the search results. You can use this re-ranker by passing `OpenAI()` to the `rerank()` method.
!!! note
Supported Query Types: Hybrid, Vector, FTS
!!! warning
This re-ranker is experimental. OpenAI doesn't have a dedicated reranking model, so we are using the chat model for reranking.
```python
import numpy
import lancedb
from lancedb.embeddings import get_registry
from lancedb.pydantic import LanceModel, Vector
from lancedb.rerankers import OpenaiReranker
embedder = get_registry().get("sentence-transformers").create()
db = lancedb.connect("~/.lancedb")
class Schema(LanceModel):
text: str = embedder.SourceField()
vector: Vector(embedder.ndims()) = embedder.VectorField()
data = [
{"text": "hello world"},
{"text": "goodbye world"}
]
tbl = db.create_table("test", schema=Schema, mode="overwrite")
tbl.add(data)
reranker = OpenaiReranker()
# Run vector search with a reranker
result = tbl.search("hello").rerank(reranker=reranker).to_list()
# Run FTS search with a reranker
result = tbl.search("hello", query_type="fts").rerank(reranker=reranker).to_list()
# Run hybrid search with a reranker
tbl.create_fts_index("text", replace=True)
result = tbl.search("hello", query_type="hybrid").rerank(reranker=reranker).to_list()
```
Accepted Arguments
----------------
| Argument | Type | Default | Description |
| --- | --- | --- | --- |
| `model_name` | `str` | `"gpt-4-turbo-preview"` | The name of the reranker model to use.|
| `column` | `str` | `"text"` | The name of the column to use as input to the cross encoder model. |
| `return_score` | str | `"relevance"` | Options are "relevance" or "all". The type of score to return. If "relevance", will return only the `_relevance_score. If "all" is supported, will return relevance score along with the vector and/or fts scores depending on query type |
| `api_key` | str | `None` | The API key to use. If None, will use the OPENAI_API_KEY environment variable.
## Supported Scores for each query type
You can specify the type of scores you want the reranker to return. The following are the supported scores for each query type:
### Hybrid Search
|`return_score`| Status | Description |
| --- | --- | --- |
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
| `all` | ❌ Not Supported | Returns have vector(`_distance`) and FTS(`score`) along with Hybrid Search score(`_relevance_score`) |
### Vector Search
|`return_score`| Status | Description |
| --- | --- | --- |
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
| `all` | ✅ Supported | Returns have vector(`_distance`) along with Hybrid Search score(`_relevance_score`) |
### FTS Search
|`return_score`| Status | Description |
| --- | --- | --- |
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
| `all` | ✅ Supported | Returns have FTS(`score`) along with Hybrid Search score(`_relevance_score`) |

View File

@@ -15,6 +15,7 @@ excluded_globs = [
"../src/ann_indexes.md",
"../src/basic.md",
"../src/hybrid_search/hybrid_search.md",
"../src/reranking/*.md",
]
python_prefix = "py"

View File

@@ -163,7 +163,7 @@ export interface CreateTableOptions<T> {
/**
* Connect to a LanceDB instance at the given URI.
*
* Accpeted formats:
* Accepted formats:
*
* - `/path/to/database` - local database
* - `s3://bucket/path/to/database` or `gs://bucket/path/to/database` - database on cloud storage

View File

@@ -20,7 +20,7 @@ import { Table as ArrowTable, Schema } from "apache-arrow";
/**
* Connect to a LanceDB instance at the given URI.
*
* Accpeted formats:
* Accepted formats:
*
* - `/path/to/database` - local database
* - `s3://bucket/path/to/database` or `gs://bucket/path/to/database` - database on cloud storage

View File

@@ -1,5 +1,5 @@
[bumpversion]
current_version = 0.6.8
current_version = 0.6.9
commit = True
message = [python] Bump version: {current_version} → {new_version}
tag = True

View File

@@ -1,9 +1,9 @@
[project]
name = "lancedb"
version = "0.6.8"
version = "0.6.9"
dependencies = [
"deprecation",
"pylance==0.10.10",
"pylance==0.10.12",
"ratelimiter~=1.0",
"requests>=2.31.0",
"retry>=0.9.2",
@@ -65,7 +65,6 @@ docs = [
"mkdocs-jupyter",
"mkdocs-material",
"mkdocstrings[python]",
"mkdocs-ultralytics-plugin==0.0.44",
]
clip = ["torch", "pillow", "open-clip"]
embeddings = [

View File

@@ -83,7 +83,7 @@ def connect(
>>> db = lancedb.connect("s3://my-bucket/lancedb")
Connect to LancdDB cloud:
Connect to LanceDB cloud:
>>> db = lancedb.connect("db://my_database", api_key="ldb_...")

View File

@@ -10,13 +10,18 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from abc import ABC, abstractmethod
from typing import List, Union
import numpy as np
import pyarrow as pa
from pydantic import BaseModel, Field, PrivateAttr
from tqdm import tqdm
import lancedb
from .fine_tuner import QADataset
from .utils import TEXT, retry_with_exponential_backoff
@@ -126,6 +131,22 @@ class EmbeddingFunction(BaseModel, ABC):
def __hash__(self) -> int:
return hash(frozenset(vars(self).items()))
def finetune(self, dataset: QADataset, *args, **kwargs):
"""
Finetune the embedding function on a dataset
"""
raise NotImplementedError(
"Finetuning is not supported for this embedding function"
)
def evaluate(self, dataset: QADataset, top_k=5, path=None, *args, **kwargs):
"""
Evaluate the embedding function on a dataset
"""
raise NotImplementedError(
"Evaluation is not supported for this embedding function"
)
class EmbeddingFunctionConfig(BaseModel):
"""
@@ -159,3 +180,52 @@ class TextEmbeddingFunction(EmbeddingFunction):
Generate the embeddings for the given texts
"""
pass
def evaluate(self, dataset: QADataset, top_k=5, path=None, *args, **kwargs):
"""
Evaluate the embedding function on a dataset. This calculates the hit-rate for
the top-k retrieved documents for each query in the dataset. Assumes that the
first relevant document is the expected document.
Pro - Should work for any embedding model
Con - Returns every simple metric.
Parameters
----------
dataset: QADataset
The dataset to evaluate on
Returns
-------
dict
The evaluation results
"""
corpus = dataset.corpus
queries = dataset.queries
relevant_docs = dataset.relevant_docs
path = path or os.path.join(os.getcwd(), "eval")
db = lancedb.connect(path)
class Schema(lancedb.pydantic.LanceModel):
id: str
text: str = self.SourceField()
vector: lancedb.pydantic.Vector(self.ndims()) = self.VectorField()
retriever = db.create_table("eval", schema=Schema, mode="overwrite")
pylist = [{"id": str(k), "text": v} for k, v in corpus.items()]
retriever.add(pylist)
eval_results = []
for query_id, query in tqdm(queries.items()):
retrieved_nodes = retriever.search(query).limit(top_k).to_list()
retrieved_ids = [node["id"] for node in retrieved_nodes]
expected_id = relevant_docs[query_id][0]
is_hit = expected_id in retrieved_ids # assume 1 relevant doc
eval_result = {
"is_hit": is_hit,
"retrieved": retrieved_ids,
"expected": expected_id,
"query": query_id,
}
eval_results.append(eval_result)
return eval_results

View File

@@ -0,0 +1,147 @@
### Fine-tuning workflow
The fine-tuning workflow is as follows:
1. Create a `QADataset` object.
2. Initialize any embedding function using LanceDB embedding API
3. Call `finetune` method on the embedding object with the `QADataset` object as an argument.
4. Evaluate the fine-tuned model using the `evaluate` method in the embedding API.
# End-to-End Examples
The following is an example of how to fine-tune an embedding model using the LanceDB embedding API.
## Example 1: Fine-tuning from a synthetic dataset
```python
import os
import pandas as pd
from lancedb.embeddings.fine_tuner.llm import Openai
from lancedb.embeddings.fine_tuner.dataset import QADataset, TextChunk
from lancedb.pydantic import LanceModel, Vector
from llama_index.core import SimpleDirectoryReader
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.schema import MetadataMode
from lancedb.embeddings import get_registry
dataset = "Uber10KDataset2021"
lance_dataset_dir = dataset + "_lance"
valset_dir = dataset + "_lance_val"
finetuned_model_path = "./model_finetuned"
# 1. Create a QADataset object. See all datasets on llama-index here: https://github.com/run-llama/llama_index/tree/main/llama-datasets
if not os.path.exists(f"./data/{dataset}"):
os.system(
f"llamaindex-cli download-llamadataset {dataset} --download-dir ./data/{dataset}"
)
docs = SimpleDirectoryReader(input_dir=f"./data/{dataset}/source_files").load_data()
parser = SentenceSplitter()
nodes = parser.get_nodes_from_documents(docs)
# convert Llama-index TextNode to TextChunk
chunks = [TextChunk.from_llama_index_node(node) for node in nodes]
llm = Openai()
if os.path.exists(lance_dataset_dir):
trainset = QADataset.load(lance_dataset_dir)
else:
trainset = QADataset.from_llm(chunks, llm, num_questions_per_chunk=2)
trainset.save(lance_dataset_dir)
# Ideally, we should have a standard dataset for validation, but here we're just generating a synthetic dataset.
if os.path.exists(valset_dir):
valset = QADataset.load(valset_dir)
else:
valset = QADataset.from_llm(chunks, llm, num_questions_per_chunk=4)
valset.save(valset_dir)
# 2. Initialize the embedding model
model = get_registry().get("sentence-transformers").create(name="sentence-transformers/multi-qa-MiniLM-L6-cos-v1")
# 3. Fine-tune the model
model.finetune(trainset=trainset, path=finetuned_model_path, epochs=4)
# 4. Evaluate the fine-tuned model
base = get_registry().get("sentence-transformers").create(name="sentence-transformers/multi-qa-MiniLM-L6-cos-v1")
base_results = base.evaluate(valset, top_k=5)
tuned = get_registry().get("sentence-transformers").create(name=finetuned_model_path)
tuned_results = tuned.evaluate(valset, top_k=5)
openai = get_registry().get("openai").create(name="text-embedding-3-small")
openai_results = openai.evaluate(valset, top_k=5)
print("openai-embedding-v3 hit-rate - ", pd.DataFrame(openai_results)["is_hit"].mean())
print("fine-tuned hit-rate - ", pd.DataFrame(tuned_results)["is_hit"].mean())
print("Base model hite-rate - ", pd.DataFrame(base_results)["is_hit"].mean())
```
Fine-tuning workflow for embeddings consists for the following parts:
### QADataset
This class is used for managing the data for fine-tuning. It contains the following builder methods:
```
- from_llm(
nodes: 'List[TextChunk]' ,
llm: BaseLLM,
qa_generate_prompt_tmpl: str = DEFAULT_PROMPT_TMPL,
num_questions_per_chunk: int = 2,
) -> "QADataset"
```
Create synthetic data from a language model and text chunks of the original document on which the model is to be fine-tuned.
```python
from_responses(docs: List['TextChunk'], queries: Dict[str, str], relevant_docs: Dict[str, List[str]])-> "QADataset"
```
Create dataset from queries and responses based on a real-world scenario. Designed to be used for knowledge distillation from a larger LLM to a smaller one.
It also contains the following data attributes:
```
queries (Dict[str, str]): Dict id -> query.
corpus (Dict[str, str]): Dict id -> string.
relevant_docs (Dict[str, List[str]]): Dict query id -> list of doc ids.
```
### TextChunk
This class is used for managing the data for fine-tuning. It is designed to allow working with and standardize various text splitting/pre-processing tools like llama-index and langchain. It contains the following attributes:
```
text: str
id: str
metadata: Dict[str, Any] = {}
```
Builder Methods:
```python
from_llama_index_node(node) -> "TextChunk"
```
Create a text chunk from a llama index node.
```python
from_langchain_node(node) -> "TextChunk"
```
Create a text chunk from a langchain index node.
```python
from_chunk(cls, chunk: str, metadata: dict = {}) -> "TextChunk"
```
Create a text chunk from a string.
### FineTuner
This class is used for fine-tuning embeddings. It is exposed to the user via a high-level function in the base embedding api.
```python
class BaseEmbeddingTuner(ABC):
"""Base Embedding finetuning engine."""
@abstractmethod
def finetune(self) -> None:
"""Goes off and does stuff."""
def helper(self) -> None:
"""A helper method."""
pass
```
### Embedding API finetuning implementation
Each embedding API needs to implement `finetune` method in order to support fine-tuning. A vanilla evaluation technique has been implemented in the `BaseEmbedding` class that calculates hit_rate @ `top_k`.

View File

@@ -0,0 +1,4 @@
from .dataset import QADataset, TextChunk
from .llm import Gemini, Openai
__all__ = ["QADataset", "TextChunk", "Openai", "Gemini"]

View File

@@ -0,0 +1,19 @@
from abc import ABC, abstractmethod
class BaseEmbeddingTuner(ABC):
"""Base Embedding finetuning engine."""
@abstractmethod
def finetune(self) -> None:
"""
Finetune the embedding model.
"""
pass
def helper(self) -> None:
"""
A helper method called after finetuning. This is meant to provide
usage instructions or other helpful information.
"""
pass

View File

@@ -0,0 +1,283 @@
import re
import uuid
from pathlib import Path
from typing import Any, Dict, List, Tuple
import lance
import pyarrow as pa
from pydantic import BaseModel
from tqdm import tqdm
from .llm import BaseLLM
DEFAULT_PROMPT_TMPL = """\
Context information is below.
---------------------
{context_str}
---------------------
Given the context information and no prior knowledge.
generate only questions based on the below query.
You are a Teacher/ Professor. Your task is to setup \
{num_questions_per_chunk} questions for an upcoming \
quiz/examination. The questions should be diverse in nature \
across the document. Restrict the questions to the \
context information provided."
"""
class QADataset(BaseModel):
"""Embedding QA Finetuning Dataset.
Args:
queries (Dict[str, str]): Dict id -> query.
corpus (Dict[str, str]): Dict id -> string.
relevant_docs (Dict[str, List[str]]): Dict query id -> list of doc ids.
"""
queries: Dict[str, str] # id -> query
corpus: Dict[str, str] # id -> text
relevant_docs: Dict[str, List[str]] # query id -> list of retrieved doc ids
mode: str = "text"
@property
def query_docid_pairs(self) -> List[Tuple[str, List[str]]]:
"""Get query, relevant doc ids."""
return [
(query, self.relevant_docs[query_id])
for query_id, query in self.queries.items()
]
def save(self, path: str, mode: str = "overwrite") -> None:
"""
Save the current dataset to a directory as .lance files.
Parameters
----------
path : str
The path to save the dataset.
mode : str, optional
The mode to save the dataset, by default "overwrite". Accepts
lance modes.
"""
save_dir = Path(path)
save_dir.mkdir(parents=True, exist_ok=True)
# convert to pydict {"id": []}
queries = {
"id": list(self.queries.keys()),
"query": list(self.queries.values()),
}
corpus = {
"id": list(self.corpus.keys()),
"text": [
val or " " for val in self.corpus.values()
], # lance saves empty strings as null
}
relevant_docs = {
"query_id": list(self.relevant_docs.keys()),
"doc_id": list(self.relevant_docs.values()),
}
# write to lance
lance.write_dataset(
pa.Table.from_pydict(queries), save_dir / "queries.lance", mode=mode
)
lance.write_dataset(
pa.Table.from_pydict(corpus), save_dir / "corpus.lance", mode=mode
)
lance.write_dataset(
pa.Table.from_pydict(relevant_docs),
save_dir / "relevant_docs.lance",
mode=mode,
)
@classmethod
def load(cls, path: str) -> "QADataset":
"""
Load QADataset from a directory.
Parameters
----------
path : str
The path to load the dataset from.
Returns
-------
QADataset
The loaded QADataset.
"""
load_dir = Path(path)
queries = lance.dataset(load_dir / "queries.lance").to_table().to_pydict()
corpus = lance.dataset(load_dir / "corpus.lance").to_table().to_pydict()
relevant_docs = (
lance.dataset(load_dir / "relevant_docs.lance").to_table().to_pydict()
)
return cls(
queries=dict(zip(queries["id"], queries["query"])),
corpus=dict(zip(corpus["id"], corpus["text"])),
relevant_docs=dict(zip(relevant_docs["query_id"], relevant_docs["doc_id"])),
)
# generate queries as a convenience function
@classmethod
def from_llm(
cls,
nodes: "List[TextChunk]",
llm: BaseLLM,
qa_generate_prompt_tmpl: str = DEFAULT_PROMPT_TMPL,
num_questions_per_chunk: int = 2,
) -> "QADataset":
"""
Generate a QADataset from a list of TextChunks.
Parameters
----------
nodes : List[TextChunk]
The list of text chunks.
llm : BaseLLM
The language model to generate questions.
qa_generate_prompt_tmpl : str, optional
The template for generating questions, by default DEFAULT_PROMPT_TMPL.
num_questions_per_chunk : int, optional
The number of questions to generate per chunk, by default 2.
Returns
-------
QADataset
The generated QADataset.
"""
node_dict = {node.id: node.text for node in nodes}
queries = {}
relevant_docs = {}
for node_id, text in tqdm(node_dict.items()):
query = qa_generate_prompt_tmpl.format(
context_str=text, num_questions_per_chunk=num_questions_per_chunk
)
response = llm.chat_completion(query)
result = str(response).strip().split("\n")
questions = [
re.sub(r"^\d+[\).\s]", "", question).strip() for question in result
]
questions = [question for question in questions if len(question) > 0]
for question in questions:
question_id = str(uuid.uuid4())
queries[question_id] = question
relevant_docs[question_id] = [node_id]
return cls(queries=queries, corpus=node_dict, relevant_docs=relevant_docs)
@classmethod
def from_responses(
cls,
nodes: List["TextChunk"],
queries: Dict[str, str],
relevant_docs: Dict[str, List[str]],
) -> "QADataset":
"""
Create a QADataset from a list of TextChunks and a list of
questions, queries, and relevant docs.
Parameters
----------
nodes : List[TextChunk]
The list of text chunks.
queries : Dict[str, str]
The queries. query id -> query.
relevant_docs : Dict[str, List[str]]
The relevant docs. Dict query id -> list of doc ids.
Returns
-------
QADataset
The QADataset.
"""
node_dict = {node.id: node.text for node in nodes}
return cls(queries=queries, corpus=node_dict, relevant_docs=relevant_docs)
class TextChunk(BaseModel):
"""
Simple text chunk for storing text nodes. Acts as a wrapper around text.
Allow interoperability between different text processing libraries.
Args:
text (str): The text of the chunk.
id (str): The id of the chunk.
metadata (Dict[str, Any], optional): The metadata of the chunk. Defaults to {}.
"""
text: str
id: str
metadata: Dict[str, Any] = {}
@classmethod
def from_chunk(cls, chunk: str, metadata: dict = {}) -> "TextChunk":
"""
Create a SimpleTextChunk from a chunk.
Parameters
----------
chunk : str
The text chunk.
metadata : dict, optional
The metadata, by default {}.
Returns
-------
TextChunk
The text chunk.
"""
# generate a unique id
return cls(text=chunk, id=str(uuid.uuid4()), metadata=metadata)
@classmethod
def from_llama_index_node(cls, node):
"""
Generate a TextChunk from a llama index node.
Parameters
----------
node : llama_index.core.TextNode
The llama index node.
"""
return cls(text=node.text, id=node.node_id, metadata=node.metadata)
@classmethod
def from_langchain_node(cls, node):
"""
Generate a TextChunk from a langchain node.
Parameters
----------
node : langchain.core.TextNode
The langchain node.
"""
raise NotImplementedError("Not implemented yet.")
def to_dict(self) -> Dict[str, Any]:
"""
Convert to a dictionary.
Returns
-------
Dict[str, Any]
The dictionary.
"""
return self.dict()
def __str__(self) -> str:
return self.text
def __repr__(self) -> str:
return f"SimpleTextChunk(text={self.text}, id={self.id}, \
metadata={self.metadata})"

View File

@@ -0,0 +1,88 @@
import os
import re
from functools import cached_property
from typing import Optional
from pydantic import BaseModel
from ...util import attempt_import_or_raise
from ..utils import api_key_not_found_help
class BaseLLM(BaseModel):
"""
TODO:
Base class for Language Model based Embedding Functions. This class is
loosely desined rn, and will be updated as the usage gets clearer.
"""
class Config:
protected_namespaces = () # Disable protected namespace check
model_name: str
model_kwargs: dict = {}
@cached_property
def _client():
"""
Get the client for the language model
"""
raise NotImplementedError
def chat_completion(self, prompt: str, **kwargs):
"""
Get the chat completion for the given prompt
"""
raise NotImplementedError
class Openai(BaseLLM):
model_name: str = "gpt-3.5-turbo"
kwargs: dict = {}
api_key: Optional[str] = None
@cached_property
def _client(self):
"""
Get the client for the language model
"""
openai = attempt_import_or_raise("openai")
if not os.environ.get("OPENAI_API_KEY"):
api_key_not_found_help("openai")
return openai.OpenAI()
def chat_completion(self, prompt: str) -> str:
"""
Get the chat completion for the given prompt
"""
# TODO: this is legacy openai api replace with completions
completion = self._client.chat.completions.create(
model=self.model_name,
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt},
],
**self.kwargs,
)
text = completion.choices[0].message.content
return text
def get_questions(self, prompt: str) -> str:
"""
Get the chat completion for the given prompt
"""
response = self.chat_completion(prompt)
result = str(response).strip().split("\n")
questions = [
re.sub(r"^\d+[\).\s]", "", question).strip() for question in result
]
questions = [question for question in questions if len(question) > 0]
return questions
class Gemini(BaseLLM):
pass

View File

@@ -10,12 +10,15 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Union
from typing import Any, List, Optional, Union
import numpy as np
import logging
from lancedb.embeddings.fine_tuner import QADataset
from ..util import attempt_import_or_raise
from .base import TextEmbeddingFunction
from .fine_tuner.basetuner import BaseEmbeddingTuner
from .registry import register
from .utils import weak_lru
@@ -80,3 +83,151 @@ class SentenceTransformerEmbeddings(TextEmbeddingFunction):
"sentence_transformers", "sentence-transformers"
)
return sentence_transformers.SentenceTransformer(self.name, device=self.device)
def finetune(self, trainset: QADataset, *args, **kwargs):
"""
Finetune the Sentence Transformers model
Parameters
----------
dataset: QADataset
The dataset to use for finetuning
"""
tuner = SentenceTransformersTuner(
model=self.embedding_model,
trainset=trainset,
**kwargs,
)
tuner.finetune()
class SentenceTransformersTuner(BaseEmbeddingTuner):
"""Sentence Transformers Embedding Finetuning Engine."""
def __init__(
self,
model: Any,
trainset: QADataset,
valset: Optional[QADataset] = None,
path: Optional[str] = "~/.lancedb/embeddings/models",
batch_size: int = 8,
epochs: int = 1,
show_progress: bool = True,
eval_steps: int = 50,
max_input_per_doc: int = -1,
loss: Optional[Any] = None,
evaluator: Optional[Any] = None,
run_name: Optional[str] = None,
log_wandb: bool = False,
) -> None:
"""
Parameters
----------
model: str
The model to use for finetuning.
trainset: QADataset
The training dataset.
valset: Optional[QADataset]
The validation dataset.
path: Optional[str]
The path to save the model.
batch_size: int, default=8
The batch size.
epochs: int, default=1
The number of epochs.
show_progress: bool, default=True
Whether to show progress.
eval_steps: int, default=50
The number of steps to evaluate.
max_input_per_doc: int, default=-1
The number of input per document.
if -1, use all documents.
"""
from sentence_transformers import InputExample, losses
from sentence_transformers.evaluation import InformationRetrievalEvaluator
from torch.utils.data import DataLoader
self.model = model
self.trainset = trainset
self.valset = valset
self.path = path
self.batch_size = batch_size
self.epochs = epochs
self.show_progress = show_progress
self.eval_steps = eval_steps
self.max_input_per_doc = max_input_per_doc
self.evaluator = None
self.epochs = epochs
self.show_progress = show_progress
self.eval_steps = eval_steps
self.run_name = run_name
self.log_wandb = log_wandb
if self.max_input_per_doc < -1:
raise ValueError("max_input_per_doc must be -1 or greater than 0.")
examples: Any = []
for query_id, query in self.trainset.queries.items():
if max_input_per_doc == -1:
for node_id in self.trainset.relevant_docs[query_id]:
text = self.trainset.corpus[node_id]
example = InputExample(texts=[query, text])
examples.append(example)
else:
node_id = self.trainset.relevant_docs[query_id][
min(max_input_per_doc, len(self.trainset.relevant_docs[query_id]))
]
text = self.trainset.corpus[node_id]
example = InputExample(texts=[query, text])
examples.append(example)
self.examples = examples
self.loader: DataLoader = DataLoader(examples, batch_size=batch_size)
if self.valset is not None:
eval_engine = evaluator or InformationRetrievalEvaluator
self.evaluator = eval_engine(
valset.queries, valset.corpus, valset.relevant_docs
)
self.evaluator = evaluator
# define loss
self.loss = loss or losses.MultipleNegativesRankingLoss(self.model)
self.warmup_steps = int(len(self.loader) * epochs * 0.1)
def finetune(self) -> None:
"""Finetune the Sentence Transformers model."""
self.model.fit(
train_objectives=[(self.loader, self.loss)],
epochs=self.epochs,
warmup_steps=self.warmup_steps,
output_path=self.path,
show_progress_bar=self.show_progress,
evaluator=self.evaluator,
evaluation_steps=self.eval_steps,
callback=self._wandb_callback if self.log_wandb else None,
)
self.helper()
def helper(self) -> None:
"""A helper method."""
logging.info("Finetuning complete.")
logging.info(f"Model saved to {self.path}.") # noqa
logging.info("You can now use the model as follows:")
logging.info(
f"model = get_registry().get('sentence-transformers').create(name='./{self.path}')" # noqa
)
def _wandb_callback(self, score, epoch, steps):
try:
import wandb
except ImportError:
raise ImportError(
"wandb is not installed. Please install it using `pip install wandb`"
)
run = wandb.run or wandb.init(
project="sbert_lancedb_finetune", name=self.run_name
)
run.log({"epoch": epoch, "steps": steps, "score": score})

View File

@@ -1,4 +1,5 @@
import os
import semver
from functools import cached_property
from typing import Union
@@ -42,6 +43,14 @@ class CohereReranker(Reranker):
@cached_property
def _client(self):
cohere = attempt_import_or_raise("cohere")
# ensure version is at least 0.5.0
if (
hasattr(cohere, "__version__")
and semver.compare(cohere.__version__, "5.0.0") < 0
):
raise ValueError(
f"cohere version must be at least 0.5.0, found {cohere.__version__}"
)
if os.environ.get("COHERE_API_KEY") is None and self.api_key is None:
raise ValueError(
"COHERE_API_KEY not set. Either set it in your environment or \
@@ -51,11 +60,14 @@ class CohereReranker(Reranker):
def _rerank(self, result_set: pa.Table, query: str):
docs = result_set[self.column].to_pylist()
results = self._client.rerank(
response = self._client.rerank(
query=query,
documents=docs,
top_n=self.top_n,
model=self.model_name,
)
results = (
response.results
) # returns list (text, idx, relevance) attributes sorted descending by score
indices, scores = list(
zip(*[(result.index, result.relevance_score) for result in results])

View File

@@ -0,0 +1,45 @@
import uuid
import pytest
from lancedb.embeddings import get_registry
from lancedb.embeddings.fine_tuner import QADataset, TextChunk
from tqdm import tqdm
@pytest.mark.slow
def test_finetuning_sentence_transformers(tmp_path):
queries = {}
relevant_docs = {}
chunks = [
"This is a chunk related to legal docs",
"This is another chunk related financial docs",
"This is a chunk related to sports docs",
"This is another chunk related to fashion docs",
]
text_chunks = [TextChunk.from_chunk(chunk) for chunk in chunks]
for chunk in tqdm(text_chunks):
questions = [
"What is this chunk about?",
"What is the main topic of this chunk?",
]
for question in questions:
question_id = str(uuid.uuid4())
queries[question_id] = question
relevant_docs[question_id] = [chunk.id]
ds = QADataset.from_responses(text_chunks, queries, relevant_docs)
assert len(ds.queries) == 8
assert len(ds.corpus) == 4
model = get_registry().get("sentence-transformers").create()
model.finetune(trainset=ds, valset=ds, path=str(tmp_path / "model"), epochs=1)
model = (
get_registry().get("sentence-transformers").create(name=str(tmp_path / "model"))
)
res = model.evaluate(ds)
assert res is not None
def test_text_chunk():
# TODO
pass