Compare commits

...

12 Commits

Author SHA1 Message Date
Lance Release
4744640bd2 [python] Bump version: 0.1.6 → 0.1.7 2023-06-12 21:39:16 +00:00
gsilvestrin
094b5e643c bugfix(python) Make release action has invalid name (#180) 2023-06-12 14:24:15 -07:00
gsilvestrin
a318778d2a feat(python): add action to tag python releases (#172) 2023-06-12 13:59:08 -07:00
Tevin Wang
9b83ce3d2a add black to python CI (#178)
Closes #48
2023-06-12 11:22:34 -07:00
Nithin PS
7bad676f30 [Python] FIx Contextualizer validation to arguments (#168)
Closes #164

---------

Co-authored-by: Will Jones <willjones127@gmail.com>
2023-06-12 09:20:09 -07:00
gsilvestrin
0e981e782b [nodejs] bumping version to 0.1.5 (#171) 2023-06-09 12:33:17 -07:00
Utkarsh Gautam
e18cdfc7cf [docs] Fixed Minor typo in embedding.md (#167)
Added missing tab to python snippet
2023-06-08 22:01:51 -07:00
Will Jones
fed33a51d5 wip: make the python API reference a bit nicer (#162)
Adds:

* Make `mkdocstrings` aware we are using numpy-style docstrings
* Fixes broken link on `index.md` to Python API docs (and added link to
node ones)
* Added examples to various classes.
* Added doctest to verify examples work.
2023-06-08 16:07:06 -07:00
Jai
a56b65db84 rename examples for slugs (#159) 2023-06-07 16:44:54 -07:00
gsilvestrin
f21caebeda Update links in README.md (#161)
Current one 404s
2023-06-07 13:16:00 -07:00
gsilvestrin
12da77a9f7 [doc] removed index creation from quickstart (#160) 2023-06-07 09:29:38 -07:00
gsilvestrin
131b2dc57b [nodejs] Added completed youtube transcript example / docs (#156) 2023-06-06 16:26:21 -07:00
35 changed files with 875 additions and 181 deletions

View File

@@ -0,0 +1,56 @@
name: Python - Create release commit
on:
workflow_dispatch:
inputs:
dry_run:
description: 'Dry run (create the local commit/tags but do not push it)'
required: true
default: "false"
type: choice
options:
- "true"
- "false"
part:
description: 'What kind of release is this?'
required: true
default: 'patch'
type: choice
options:
- patch
- minor
- major
jobs:
bump-version:
runs-on: ubuntu-latest
steps:
- name: Check out main
uses: actions/checkout@v3
with:
ref: main
persist-credentials: false
fetch-depth: 0
lfs: true
- name: Set git configs for bumpversion
shell: bash
run: |
git config user.name 'Lance Release'
git config user.email 'lance-dev@lancedb.com'
- name: Set up Python 3.10
uses: actions/setup-python@v4
with:
python-version: "3.10"
- name: Bump version, create tag and commit
working-directory: python
run: |
pip install bump2version
bumpversion --verbose ${{ inputs.part }}
- name: Push new version and tag
if: ${{ inputs.dry_run }} == "false"
uses: ad-m/github-push-action@master
with:
github_token: ${{ secrets.LANCEDB_RELEASE_TOKEN }}
branch: main
tags: true

View File

@@ -32,9 +32,13 @@ jobs:
run: | run: |
pip install -e . pip install -e .
pip install tantivy@git+https://github.com/quickwit-oss/tantivy-py#164adc87e1a033117001cf70e38c82a53014d985 pip install tantivy@git+https://github.com/quickwit-oss/tantivy-py#164adc87e1a033117001cf70e38c82a53014d985
pip install pytest pytest-mock pip install pytest pytest-mock black
- name: Black
run: black --check --diff --no-color --quiet .
- name: Run tests - name: Run tests
run: pytest -x -v --durations=30 tests run: pytest -x -v --durations=30 tests
- name: doctest
run: pytest --doctest-modules lancedb
mac: mac:
timeout-minutes: 30 timeout-minutes: 30
runs-on: "macos-12" runs-on: "macos-12"

View File

@@ -75,4 +75,4 @@ result = table.search([100, 100]).limit(2).to_df()
## Blogs, Tutorials & Videos ## Blogs, Tutorials & Videos
* 📈 <a href="https://blog.eto.ai/benchmarking-random-access-in-lance-ed690757a826">2000x better performance with Lance over Parquet</a> * 📈 <a href="https://blog.eto.ai/benchmarking-random-access-in-lance-ed690757a826">2000x better performance with Lance over Parquet</a>
* 🤖 <a href="https://github.com/lancedb/lancedb/blob/main/notebooks/youtube_transcript_search.ipynb">Build a question and answer bot with LanceDB</a> * 🤖 <a href="https://github.com/lancedb/lancedb/blob/main/docs/src/notebooks/youtube_transcript_search.ipynb">Build a question and answer bot with LanceDB</a>

View File

@@ -14,10 +14,24 @@ theme:
plugins: plugins:
- search - search
- autorefs
- mkdocstrings: - mkdocstrings:
handlers: handlers:
python: python:
paths: [../python] paths: [../python]
selection:
docstring_style: numpy
rendering:
heading_level: 4
show_source: false
show_symbol_type_in_heading: true
show_signature_annotations: true
show_root_heading: true
members_order: source
import:
# for cross references
- https://arrow.apache.org/docs/objects.inv
- https://pandas.pydata.org/docs/objects.inv
- mkdocs-jupyter - mkdocs-jupyter
markdown_extensions: markdown_extensions:
@@ -41,9 +55,13 @@ nav:
- Python full-text search: fts.md - Python full-text search: fts.md
- Python integrations: integrations.md - Python integrations: integrations.md
- Python examples: - Python examples:
- YouTube Transcript Search using OpenAI: notebooks/youtube_transcript_search.ipynb - YouTube Transcript Search: notebooks/youtube_transcript_search.ipynb
- Documentation QA Bot using LangChain: notebooks/code_qa_bot.ipynb - Documentation QA Bot using LangChain: notebooks/code_qa_bot.ipynb
- Multimodal search using CLIP: notebooks/multimodal_search.ipynb - Multimodal search using CLIP: notebooks/multimodal_search.ipynb
- Serverless QA Bot with S3 and Lambda: examples/serverless_lancedb_with_s3_and_lambda.md
- Serverless QA Bot with Modal: examples/serverless_qa_bot_with_modal_and_langchain.md
- Javascript examples:
- YouTube Transcript Search: examples/youtube_transcript_bot_with_nodejs.md
- References: - References:
- Vector Search: search.md - Vector Search: search.md
- Indexing: ann_indexes.md - Indexing: ann_indexes.md

View File

@@ -15,13 +15,7 @@ from langchain.llms import OpenAI
from langchain.chains import RetrievalQA from langchain.chains import RetrievalQA
lancedb_image = Image.debian_slim().pip_install( lancedb_image = Image.debian_slim().pip_install(
"lancedb", "lancedb", "langchain", "openai", "pandas", "tiktoken", "unstructured", "tabulate"
"langchain",
"openai",
"pandas",
"tiktoken",
"unstructured",
"tabulate"
) )
stub = Stub( stub = Stub(
@@ -34,21 +28,26 @@ docsearch = None
docs_path = Path("docs.pkl") docs_path = Path("docs.pkl")
db_path = Path("lancedb") db_path = Path("lancedb")
def get_document_title(document): def get_document_title(document):
m = str(document.metadata["source"]) m = str(document.metadata["source"])
title = re.findall("pandas.documentation(.*).html", m) title = re.findall("pandas.documentation(.*).html", m)
if title[0] is not None: if title[0] is not None:
return(title[0]) return title[0]
return '' return ""
def download_docs(): def download_docs():
pandas_docs = requests.get("https://eto-public.s3.us-west-2.amazonaws.com/datasets/pandas_docs/pandas.documentation.zip") pandas_docs = requests.get(
"https://eto-public.s3.us-west-2.amazonaws.com/datasets/pandas_docs/pandas.documentation.zip"
)
with open(Path("pandas.documentation.zip"), "wb") as f: with open(Path("pandas.documentation.zip"), "wb") as f:
f.write(pandas_docs.content) f.write(pandas_docs.content)
file = zipfile.ZipFile(Path("pandas.documentation.zip")) file = zipfile.ZipFile(Path("pandas.documentation.zip"))
file.extractall(path=Path("pandas_docs")) file.extractall(path=Path("pandas_docs"))
def store_docs(): def store_docs():
docs = [] docs = []
@@ -74,6 +73,7 @@ def store_docs():
return docs return docs
def qanda_langchain(query): def qanda_langchain(query):
download_docs() download_docs()
docs = store_docs() docs = store_docs()
@@ -86,13 +86,24 @@ def qanda_langchain(query):
embeddings = OpenAIEmbeddings() embeddings = OpenAIEmbeddings()
db = lancedb.connect(db_path) db = lancedb.connect(db_path)
table = db.create_table("pandas_docs", data=[ table = db.create_table(
{"vector": embeddings.embed_query("Hello World"), "text": "Hello World", "id": "1"} "pandas_docs",
], mode="overwrite") data=[
{
"vector": embeddings.embed_query("Hello World"),
"text": "Hello World",
"id": "1",
}
],
mode="overwrite",
)
docsearch = LanceDB.from_documents(documents, embeddings, connection=table) docsearch = LanceDB.from_documents(documents, embeddings, connection=table)
qa = RetrievalQA.from_chain_type(llm=OpenAI(), chain_type="stuff", retriever=docsearch.as_retriever()) qa = RetrievalQA.from_chain_type(
llm=OpenAI(), chain_type="stuff", retriever=docsearch.as_retriever()
)
return qa.run(query) return qa.run(query)
@stub.function() @stub.function()
@web_endpoint(method="GET") @web_endpoint(method="GET")
def web(query: str): def web(query: str):
@@ -101,6 +112,7 @@ def web(query: str):
"answer": answer, "answer": answer,
} }
@stub.function() @stub.function()
def cli(query: str): def cli(query: str):
answer = qanda_langchain(query) answer = qanda_langchain(query)

View File

@@ -1,99 +0,0 @@
# YouTube transcript QA bot with NodeJS
## use LanceDB's Javascript API and OpenAI to build a QA bot for YouTube transcripts
<img id="splash" width="400" alt="nodejs" src="https://github.com/lancedb/lancedb/assets/917119/3a140e75-bf8e-438a-a1e4-af14a72bcf98">
This Q&A bot will allow you to search through youtube transcripts using natural language! We'll introduce how you can use LanceDB's Javascript API to store and manage your data easily.
For this example we're using a HuggingFace dataset that contains YouTube transcriptions: `jamescalam/youtube-transcriptions`, to make it easier, we've converted it to a LanceDB `db` already, which you can download and put in a working directory:
```wget -c https://eto-public.s3.us-west-2.amazonaws.com/lancedb_demo.tar.gz -O - | tar -xz -C .```
Now, we'll create a simple app that can:
1. Take a text based query and search for contexts in our corpus, using embeddings generated from the OpenAI Embedding API.
2. Create a prompt with the contexts, and call the OpenAI Completion API to answer the text based query.
Dependencies and setup of OpenAI API:
```javascript
const lancedb = require("vectordb");
const { Configuration, OpenAIApi } = require("openai");
const configuration = new Configuration({
apiKey: process.env.OPENAI_API_KEY,
});
const openai = new OpenAIApi(configuration);
```
First, let's set our question and the context amount. The context amount will be used to query similar documents in our corpus.
```javascript
const QUESTION = "who was the 12th person on the moon and when did they land?";
const CONTEXT_AMOUNT = 3;
```
Now, let's generate an embedding from this question:
```javascript
const embeddingResponse = await openai.createEmbedding({
model: "text-embedding-ada-002",
input: QUESTION,
});
const embedding = embeddingResponse.data["data"][0]["embedding"];
```
Once we have the embedding, we can connect to LanceDB (using the database we downloaded earlier), and search through the chatbot table.
We'll extract 3 similar documents found.
```javascript
const db = await lancedb.connect('./lancedb');
const tbl = await db.openTable('chatbot');
const query = tbl.search(embedding);
query.limit = CONTEXT_AMOUNT;
const context = await query.execute();
```
Let's combine the context together so we can pass it into our prompt:
```javascript
for (let i = 1; i < context.length; i++) {
context[0]["text"] += " " + context[i]["text"];
}
```
Lastly, let's construct the prompt. You could play around with this to create more accurate/better prompts to yield results.
```javascript
const prompt = "Answer the question based on the context below.\n\n" +
"Context:\n" +
`${context[0]["text"]}\n` +
`\n\nQuestion: ${QUESTION}\nAnswer:`;
```
We pass the prompt, along with the context, to the completion API.
```javascript
const completion = await openai.createCompletion({
model: "text-davinci-003",
prompt,
temperature: 0,
max_tokens: 400,
top_p: 1,
frequency_penalty: 0,
presence_penalty: 0,
});
```
And that's it!
```javascript
console.log(completion.data.choices[0].text);
```
The response is (which is non deterministic):
```
The 12th person on the moon was Harrison Schmitt and he landed on December 11, 1972.
```

View File

@@ -0,0 +1,139 @@
# YouTube transcript QA bot with NodeJS
## use LanceDB's Javascript API and OpenAI to build a QA bot for YouTube transcripts
<img id="splash" width="400" alt="nodejs" src="https://github.com/lancedb/lancedb/assets/917119/3a140e75-bf8e-438a-a1e4-af14a72bcf98">
This Q&A bot will allow you to search through youtube transcripts using natural language! We'll introduce how to use LanceDB's Javascript API to store and manage your data easily.
```bash
npm install vectordb
```
## Download the data
For this example, we're using a sample of a HuggingFace dataset that contains YouTube transcriptions: `jamescalam/youtube-transcriptions`. Download and extract this file under the `data` folder:
```bash
wget -c https://eto-public.s3.us-west-2.amazonaws.com/datasets/youtube_transcript/youtube-transcriptions_sample.jsonl
```
## Prepare Context
Each item in the dataset contains just a short chunk of text. We'll need to merge a bunch of these chunks together on a rolling basis. For this demo, we'll look back 20 records to create a more complete context for each sentence.
First, we need to read and parse the input file.
```javascript
const lines = (await fs.readFile(INPUT_FILE_NAME, 'utf-8'))
.toString()
.split('\n')
.filter(line => line.length > 0)
.map(line => JSON.parse(line))
const data = contextualize(lines, 20, 'video_id')
```
The contextualize function groups the transcripts by video_id and then creates the expanded context for each item.
```javascript
function contextualize (rows, contextSize, groupColumn) {
const grouped = []
rows.forEach(row => {
if (!grouped[row[groupColumn]]) {
grouped[row[groupColumn]] = []
}
grouped[row[groupColumn]].push(row)
})
const data = []
Object.keys(grouped).forEach(key => {
for (let i = 0; i < grouped[key].length; i++) {
const start = i - contextSize > 0 ? i - contextSize : 0
grouped[key][i].context = grouped[key].slice(start, i + 1).map(r => r.text).join(' ')
}
data.push(...grouped[key])
})
return data
}
```
## Create the LanceDB Table
To load our data into LanceDB, we need to create embedding (vectors) for each item. For this example, we will use the OpenAI embedding functions, which have a native integration with LanceDB.
```javascript
// You need to provide an OpenAI API key, here we read it from the OPENAI_API_KEY environment variable
const apiKey = process.env.OPENAI_API_KEY
// The embedding function will create embeddings for the 'context' column
const embedFunction = new lancedb.OpenAIEmbeddingFunction('context', apiKey)
// Connects to LanceDB
const db = await lancedb.connect('data/youtube-lancedb')
const tbl = await db.createTable('vectors', data, embedFunction)
```
## Create and answer the prompt
We will accept questions in natural language and use our corpus stored in LanceDB to answer them. First, we need to set up the OpenAI client:
```javascript
const configuration = new Configuration({ apiKey })
const openai = new OpenAIApi(configuration)
```
Then we can prompt questions and use LanceDB to retrieve the three most relevant transcripts for this prompt.
```javascript
const query = await rl.question('Prompt: ')
const results = await tbl
.search(query)
.select(['title', 'text', 'context'])
.limit(3)
.execute()
```
The query and the transcripts' context are appended together in a single prompt:
```javascript
function createPrompt (query, context) {
let prompt =
'Answer the question based on the context below.\n\n' +
'Context:\n'
// need to make sure our prompt is not larger than max size
prompt = prompt + context.map(c => c.context).join('\n\n---\n\n').substring(0, 3750)
prompt = prompt + `\n\nQuestion: ${query}\nAnswer:`
return prompt
}
```
We can now use the OpenAI Completion API to process our custom prompt and give us an answer.
```javascript
const response = await openai.createCompletion({
model: 'text-davinci-003',
prompt: createPrompt(query, results),
max_tokens: 400,
temperature: 0,
top_p: 1,
frequency_penalty: 0,
presence_penalty: 0
})
console.log(response.data.choices[0].text)
```
## Let's put it all together now
Now we can provide queries and have them answered based on your local LanceDB data.
```bash
Prompt: who was the 12th person on the moon and when did they land?
The 12th person on the moon was Harrison Schmitt and he landed on December 11, 1972.
Prompt: Which training method should I use for sentence transformers when I only have pairs of related sentences?
NLI with multiple negative ranking loss.
```
## That's a wrap
In this example, you learned how to use LanceDB to store and query embedding representations of your local data. The complete example code is on [GitHub](https://github.com/lancedb/lancedb/tree/main/node/examples), and you can also download the LanceDB dataset using [this link](https://eto-public.s3.us-west-2.amazonaws.com/datasets/youtube_transcript/youtube-lancedb.zip).

View File

@@ -8,6 +8,8 @@ The key features of LanceDB include:
* Store, query and filter vectors, metadata and multi-modal data (text, images, videos, point clouds, and more). * Store, query and filter vectors, metadata and multi-modal data (text, images, videos, point clouds, and more).
* Support for vector similarity search, full-text search and SQL.
* Native Python and Javascript/Typescript support. * Native Python and Javascript/Typescript support.
* Zero-copy, automatic versioning, manage versions of your data without needing extra infrastructure. * Zero-copy, automatic versioning, manage versions of your data without needing extra infrastructure.
@@ -54,6 +56,11 @@ LanceDB's core is written in Rust 🦀 and is built using <a href="https://githu
- [YouTube Transcript Search](notebooks/youtube_transcript_search.ipynb) - [YouTube Transcript Search](notebooks/youtube_transcript_search.ipynb)
- [Documentation QA Bot using LangChain](notebooks/code_qa_bot.ipynb) - [Documentation QA Bot using LangChain](notebooks/code_qa_bot.ipynb)
- [Multimodal search using CLIP](notebooks/multimodal_search.ipynb) - [Multimodal search using CLIP](notebooks/multimodal_search.ipynb)
- [Serverless QA Bot with S3 and Lambda](examples/serverless_lancedb_with_s3_and_lambda.md)
- [Serverless QA Bot with Modal](examples/serverless_qa_bot_with_modal_and_langchain.md)
## Complete Demos (JavaScript)
- [YouTube Transcript Search](examples/youtube_transcript_bot_with_nodejs.md)
## Documentation Quick Links ## Documentation Quick Links
* [`Basic Operations`](basic.md) - basic functionality of LanceDB. * [`Basic Operations`](basic.md) - basic functionality of LanceDB.
@@ -61,4 +68,5 @@ LanceDB's core is written in Rust 🦀 and is built using <a href="https://githu
* [`Indexing`](ann_indexes.md) - create vector indexes to speed up queries. * [`Indexing`](ann_indexes.md) - create vector indexes to speed up queries.
* [`Full text search`](fts.md) - [EXPERIMENTAL] full-text search API * [`Full text search`](fts.md) - [EXPERIMENTAL] full-text search API
* [`Ecosystem Integrations`](integrations.md) - integrating LanceDB with python data tooling ecosystem. * [`Ecosystem Integrations`](integrations.md) - integrating LanceDB with python data tooling ecosystem.
* [`API Reference`](python.md) - detailed documentation for the LanceDB Python SDK. * [`Python API Reference`](python/python.md) - detailed documentation for the LanceDB Python SDK.
* [`Node API Reference`](javascript/modules.md) - detailed documentation for the LanceDB Python SDK.

View File

@@ -24,9 +24,6 @@ data = pd.DataFrame({
"price": [10.0, 20.0] "price": [10.0, 20.0]
}) })
table = db.create_table("pd_table", data=data) table = db.create_table("pd_table", data=data)
# Optionally, create a IVF_PQ index
table.create_index(num_partitions=256, num_sub_vectors=96)
``` ```
You will find detailed instructions of creating dataset and index in [Basic Operations](basic.md) and [Indexing](indexing.md) You will find detailed instructions of creating dataset and index in [Basic Operations](basic.md) and [Indexing](indexing.md)

View File

@@ -1,11 +1,12 @@
{ {
"cells": [ "cells": [
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"id": "42bf01fb", "id": "42bf01fb",
"metadata": {}, "metadata": {},
"source": [ "source": [
"# We're going to build question and answer bot\n", "# Youtube Transcript Search QA Bot\n",
"\n", "\n",
"This Q&A bot will allow you to search through youtube transcripts using natural language! By going through this notebook, we'll introduce how you can use LanceDB to store and manage your data easily." "This Q&A bot will allow you to search through youtube transcripts using natural language! By going through this notebook, we'll introduce how you can use LanceDB to store and manage your data easily."
] ]
@@ -35,6 +36,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"id": "22e570f4", "id": "22e570f4",
"metadata": {}, "metadata": {},
@@ -87,6 +89,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"id": "5ac2b6a3", "id": "5ac2b6a3",
"metadata": {}, "metadata": {},
@@ -181,6 +184,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"id": "3044e0b0", "id": "3044e0b0",
"metadata": {}, "metadata": {},
@@ -209,6 +213,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"id": "db586267", "id": "db586267",
"metadata": {}, "metadata": {},
@@ -229,6 +234,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"id": "2106b5bb", "id": "2106b5bb",
"metadata": {}, "metadata": {},
@@ -338,6 +344,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"id": "53e4bff1", "id": "53e4bff1",
"metadata": {}, "metadata": {},
@@ -371,6 +378,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"id": "8ef34fca", "id": "8ef34fca",
"metadata": {}, "metadata": {},
@@ -459,6 +467,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"id": "23afc2f9", "id": "23afc2f9",
"metadata": {}, "metadata": {},
@@ -541,6 +550,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"id": "28705959", "id": "28705959",
"metadata": {}, "metadata": {},
@@ -571,6 +581,7 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"id": "559a095b", "id": "559a095b",
"metadata": {}, "metadata": {},

View File

@@ -6,9 +6,38 @@
pip install lancedb pip install lancedb
``` ```
## ::: lancedb ## Connection
## ::: lancedb.db
## ::: lancedb.table ::: lancedb.connect
## ::: lancedb.query
## ::: lancedb.embeddings ::: lancedb.LanceDBConnection
## ::: lancedb.context
## Table
::: lancedb.table.LanceTable
## Querying
::: lancedb.query.LanceQueryBuilder
::: lancedb.query.LanceFtsQueryBuilder
## Embeddings
::: lancedb.embeddings.with_embeddings
::: lancedb.embeddings.EmbeddingFunction
## Context
::: lancedb.context.contextualize
::: lancedb.context.Contextualizer
## Full text search
::: lancedb.fts.create_index
::: lancedb.fts.populate_index
::: lancedb.fts.search_index

View File

@@ -5,6 +5,12 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [0.1.5] - 2023-06-00
### Added
- Support for macOS X86
## [0.1.4] - 2023-06-03 ## [0.1.4] - 2023-06-03
### Added ### Added

View File

@@ -0,0 +1,122 @@
// Copyright 2023 Lance Developers.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
'use strict'
const lancedb = require('vectordb')
const fs = require('fs/promises')
const readline = require('readline/promises')
const { stdin: input, stdout: output } = require('process')
const { Configuration, OpenAIApi } = require('openai')
// Download file from XYZ
const INPUT_FILE_NAME = 'data/youtube-transcriptions_sample.jsonl';
(async () => {
// You need to provide an OpenAI API key, here we read it from the OPENAI_API_KEY environment variable
const apiKey = process.env.OPENAI_API_KEY
// The embedding function will create embeddings for the 'context' column
const embedFunction = new lancedb.OpenAIEmbeddingFunction('context', apiKey)
// Connects to LanceDB
const db = await lancedb.connect('data/youtube-lancedb')
// Open the vectors table or create one if it does not exist
let tbl
if ((await db.tableNames()).includes('vectors')) {
tbl = await db.openTable('vectors', embedFunction)
} else {
tbl = await createEmbeddingsTable(db, embedFunction)
}
// Use OpenAI Completion API to generate and answer based on the context that LanceDB provides
const configuration = new Configuration({ apiKey })
const openai = new OpenAIApi(configuration)
const rl = readline.createInterface({ input, output })
try {
while (true) {
const query = await rl.question('Prompt: ')
const results = await tbl
.search(query)
.select(['title', 'text', 'context'])
.limit(3)
.execute()
// console.table(results)
const response = await openai.createCompletion({
model: 'text-davinci-003',
prompt: createPrompt(query, results),
max_tokens: 400,
temperature: 0,
top_p: 1,
frequency_penalty: 0,
presence_penalty: 0
})
console.log(response.data.choices[0].text)
}
} catch (err) {
console.log('Error: ', err)
} finally {
rl.close()
}
process.exit(1)
})()
async function createEmbeddingsTable (db, embedFunction) {
console.log(`Creating embeddings from ${INPUT_FILE_NAME}`)
// read the input file into a JSON array, skipping empty lines
const lines = (await fs.readFile(INPUT_FILE_NAME, 'utf-8'))
.toString()
.split('\n')
.filter(line => line.length > 0)
.map(line => JSON.parse(line))
const data = contextualize(lines, 20, 'video_id')
return await db.createTable('vectors', data, embedFunction)
}
// Each transcript has a small text column, we include previous transcripts in order to
// have more context information when creating embeddings
function contextualize (rows, contextSize, groupColumn) {
const grouped = []
rows.forEach(row => {
if (!grouped[row[groupColumn]]) {
grouped[row[groupColumn]] = []
}
grouped[row[groupColumn]].push(row)
})
const data = []
Object.keys(grouped).forEach(key => {
for (let i = 0; i < grouped[key].length; i++) {
const start = i - contextSize > 0 ? i - contextSize : 0
grouped[key][i].context = grouped[key].slice(start, i + 1).map(r => r.text).join(' ')
}
data.push(...grouped[key])
})
return data
}
// Creates a prompt by aggregating all relevant contexts
function createPrompt (query, context) {
let prompt =
'Answer the question based on the context below.\n\n' +
'Context:\n'
// need to make sure our prompt is not larger than max size
prompt = prompt + context.map(c => c.context).join('\n\n---\n\n').substring(0, 3750)
prompt = prompt + `\n\nQuestion: ${query}\nAnswer:`
return prompt
}

View File

@@ -0,0 +1,15 @@
{
"name": "vectordb-example-js-openai",
"version": "1.0.0",
"description": "",
"main": "index.js",
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1"
},
"author": "Lance Devs",
"license": "Apache-2.0",
"dependencies": {
"vectordb": "file:../..",
"openai": "^3.2.1"
}
}

View File

@@ -1,12 +1,12 @@
{ {
"name": "vectordb", "name": "vectordb",
"version": "0.1.4", "version": "0.1.5",
"lockfileVersion": 2, "lockfileVersion": 2,
"requires": true, "requires": true,
"packages": { "packages": {
"": { "": {
"name": "vectordb", "name": "vectordb",
"version": "0.1.4", "version": "0.1.5",
"license": "Apache-2.0", "license": "Apache-2.0",
"dependencies": { "dependencies": {
"@apache-arrow/ts": "^12.0.0", "@apache-arrow/ts": "^12.0.0",

View File

@@ -1,6 +1,6 @@
{ {
"name": "vectordb", "name": "vectordb",
"version": "0.1.4", "version": "0.1.5",
"description": " Serverless, low-latency vector database for AI applications", "description": " Serverless, low-latency vector database for AI applications",
"main": "dist/index.js", "main": "dist/index.js",
"types": "dist/index.d.ts", "types": "dist/index.d.ts",

8
python/.bumpversion.cfg Normal file
View File

@@ -0,0 +1,8 @@
[bumpversion]
current_version = 0.1.7
commit = True
message = [python] Bump version: {current_version} → {new_version}
tag = True
tag_name = python-v{new_version}
[bumpversion:file:pyproject.toml]

View File

@@ -22,8 +22,21 @@ def connect(uri: URI) -> LanceDBConnection:
uri: str or Path uri: str or Path
The uri of the database. The uri of the database.
Examples
--------
For a local directory, provide a path for the database:
>>> import lancedb
>>> db = lancedb.connect("~/.lancedb")
For object storage, use a URI prefix:
>>> db = lancedb.connect("s3://my-bucket/lancedb")
Returns Returns
------- -------
conn : LanceDBConnection
A connection to a LanceDB database. A connection to a LanceDB database.
""" """
return LanceDBConnection(uri) return LanceDBConnection(uri)

View File

@@ -0,0 +1,18 @@
import builtins
import os
import pytest
# import lancedb so we don't have to in every example
import lancedb
@pytest.fixture(autouse=True)
def doctest_setup(monkeypatch, tmpdir):
# disable color for doctests so we don't have to include
# escape codes in docstrings
monkeypatch.setitem(os.environ, "NO_COLOR", "1")
# Explicitly set the column width
monkeypatch.setitem(os.environ, "COLUMNS", "80")
# Work in a temporary directory
monkeypatch.chdir(tmpdir)

View File

@@ -13,16 +13,80 @@
from __future__ import annotations from __future__ import annotations
import pandas as pd import pandas as pd
from .exceptions import MissingValueError, MissingColumnError
def contextualize(raw_df: pd.DataFrame) -> Contextualizer: def contextualize(raw_df: pd.DataFrame) -> Contextualizer:
"""Create a Contextualizer object for the given DataFrame. """Create a Contextualizer object for the given DataFrame.
Used to create context windows.
Used to create context windows. Context windows are rolling subsets of text
data.
The input text column should already be separated into rows that will be the
unit of the window. So to create a context window over tokens, start with
a DataFrame with one token per row. To create a context window over sentences,
start with a DataFrame with one sentence per row.
Examples
--------
>>> from lancedb.context import contextualize
>>> import pandas as pd
>>> data = pd.DataFrame({
... 'token': ['The', 'quick', 'brown', 'fox', 'jumped', 'over',
... 'the', 'lazy', 'dog', 'I', 'love', 'sandwiches'],
... 'document_id': [1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2]
... })
``window`` determines how many rows to include in each window. In our case
this how many tokens, but depending on the input data, it could be sentences,
paragraphs, messages, etc.
>>> contextualize(data).window(3).stride(1).text_col('token').to_df()
token document_id
0 The quick brown 1
1 quick brown fox 1
2 brown fox jumped 1
3 fox jumped over 1
4 jumped over the 1
5 over the lazy 1
6 the lazy dog 1
7 lazy dog I 1
8 dog I love 1
>>> contextualize(data).window(7).stride(1).text_col('token').to_df()
token document_id
0 The quick brown fox jumped over the 1
1 quick brown fox jumped over the lazy 1
2 brown fox jumped over the lazy dog 1
3 fox jumped over the lazy dog I 1
4 jumped over the lazy dog I love 1
``stride`` determines how many rows to skip between each window start. This can
be used to reduce the total number of windows generated.
>>> contextualize(data).window(4).stride(2).text_col('token').to_df()
token document_id
0 The quick brown fox 1
2 brown fox jumped over 1
4 jumped over the lazy 1
6 the lazy dog I 1
``groupby`` determines how to group the rows. For example, we would like to have
context windows that don't cross document boundaries. In this case, we can
pass ``document_id`` as the group by.
>>> contextualize(data).window(4).stride(2).text_col('token').groupby('document_id').to_df()
token document_id
0 The quick brown fox 1
2 brown fox jumped over 1
4 jumped over the lazy 1
""" """
return Contextualizer(raw_df) return Contextualizer(raw_df)
class Contextualizer: class Contextualizer:
"""Create context windows from a DataFrame. See [lancedb.context.contextualize][]."""
def __init__(self, raw_df): def __init__(self, raw_df):
self._text_col = None self._text_col = None
self._groupby = None self._groupby = None
@@ -78,6 +142,21 @@ class Contextualizer:
def to_df(self) -> pd.DataFrame: def to_df(self) -> pd.DataFrame:
"""Create the context windows and return a DataFrame.""" """Create the context windows and return a DataFrame."""
if self._text_col not in self._raw_df.columns.tolist():
raise MissingColumnError(self._text_col)
if self._window is None or self._window < 1:
raise MissingValueError(
"The value of window is None or less than 1. Specify the "
"window size (number of rows to include in each window)"
)
if self._stride is None or self._stride < 1:
raise MissingValueError(
"The value of stride is None or less than 1. Specify the "
"stride (number of rows to skip between each window)"
)
def process_group(grp): def process_group(grp):
# For each group, create the text rolling window # For each group, create the text rolling window
text = grp[self._text_col].values text = grp[self._text_col].values

View File

@@ -28,6 +28,31 @@ from .util import get_uri_scheme, get_uri_location
class LanceDBConnection: class LanceDBConnection:
""" """
A connection to a LanceDB database. A connection to a LanceDB database.
Parameters
----------
uri: str or Path
The root uri of the database.
Examples
--------
>>> import lancedb
>>> db = lancedb.connect("./.lancedb")
>>> db.create_table("my_table", data=[{"vector": [1.1, 1.2], "b": 2},
... {"vector": [0.5, 1.3], "b": 4}])
LanceTable(my_table)
>>> db.create_table("another_table", data=[{"vector": [0.4, 0.4], "b": 6}])
LanceTable(another_table)
>>> db.table_names()
['another_table', 'my_table']
>>> len(db)
2
>>> db["my_table"]
LanceTable(my_table)
>>> "my_table" in db
True
>>> db.drop_table("my_table")
>>> db.drop_table("another_table")
""" """
def __init__(self, uri: URI): def __init__(self, uri: URI):
@@ -48,21 +73,26 @@ class LanceDBConnection:
Returns Returns
------- -------
list of str
A list of table names. A list of table names.
""" """
try: try:
filesystem, path = fs.FileSystem.from_uri(self.uri) filesystem, path = fs.FileSystem.from_uri(self.uri)
except pa.ArrowInvalid: except pa.ArrowInvalid:
raise NotImplementedError( raise NotImplementedError("Unsupported scheme: " + self.uri)
"Unsupported scheme: " + self.uri
)
try: try:
paths = filesystem.get_file_info(fs.FileSelector(get_uri_location(self.uri))) paths = filesystem.get_file_info(
fs.FileSelector(get_uri_location(self.uri))
)
except FileNotFoundError: except FileNotFoundError:
# It is ok if the file does not exist since it will be created # It is ok if the file does not exist since it will be created
paths = [] paths = []
tables = [os.path.splitext(file_info.base_name)[0] for file_info in paths if file_info.extension == 'lance'] tables = [
os.path.splitext(file_info.base_name)[0]
for file_info in paths
if file_info.extension == "lance"
]
return tables return tables
def __len__(self) -> int: def __len__(self) -> int:
@@ -103,7 +133,73 @@ class LanceDBConnection:
Returns Returns
------- -------
A LanceTable object representing the table. LanceTable
A reference to the newly created table.
Examples
--------
Can create with list of tuples or dictionaries:
>>> import lancedb
>>> db = lancedb.connect("./.lancedb")
>>> data = [{"vector": [1.1, 1.2], "lat": 45.5, "long": -122.7},
... {"vector": [0.2, 1.8], "lat": 40.1, "long": -74.1}]
>>> db.create_table("my_table", data)
LanceTable(my_table)
>>> db["my_table"].head()
pyarrow.Table
vector: fixed_size_list<item: float>[2]
child 0, item: float
lat: double
long: double
----
vector: [[[1.1,1.2],[0.2,1.8]]]
lat: [[45.5,40.1]]
long: [[-122.7,-74.1]]
You can also pass a pandas DataFrame:
>>> import pandas as pd
>>> data = pd.DataFrame({
... "vector": [[1.1, 1.2], [0.2, 1.8]],
... "lat": [45.5, 40.1],
... "long": [-122.7, -74.1]
... })
>>> db.create_table("table2", data)
LanceTable(table2)
>>> db["table2"].head()
pyarrow.Table
vector: fixed_size_list<item: float>[2]
child 0, item: float
lat: double
long: double
----
vector: [[[1.1,1.2],[0.2,1.8]]]
lat: [[45.5,40.1]]
long: [[-122.7,-74.1]]
Data is converted to Arrow before being written to disk. For maximum
control over how data is saved, either provide the PyArrow schema to
convert to or else provide a PyArrow table directly.
>>> custom_schema = pa.schema([
... pa.field("vector", pa.list_(pa.float32(), 2)),
... pa.field("lat", pa.float32()),
... pa.field("long", pa.float32())
... ])
>>> db.create_table("table3", data, schema = custom_schema)
LanceTable(table3)
>>> db["table3"].head()
pyarrow.Table
vector: fixed_size_list<item: float>[2]
child 0, item: float
lat: float
long: float
----
vector: [[[1.1,1.2],[0.2,1.8]]]
lat: [[45.5,40.1]]
long: [[-122.7,-74.1]]
""" """
if data is not None: if data is not None:
tbl = LanceTable.create(self, name, data, schema, mode=mode) tbl = LanceTable.create(self, name, data, schema, mode=mode)

View File

@@ -29,7 +29,31 @@ def with_embeddings(
wrap_api: bool = True, wrap_api: bool = True,
show_progress: bool = False, show_progress: bool = False,
batch_size: int = 1000, batch_size: int = 1000,
): ) -> pa.Table:
"""Add a vector column to a table using the given embedding function.
The new columns will be called "vector".
Parameters
----------
func : Callable
A function that takes a list of strings and returns a list of vectors.
data : pa.Table or pd.DataFrame
The data to add an embedding column to.
column : str, default "text"
The name of the column to use as input to the embedding function.
wrap_api : bool, default True
Whether to wrap the embedding function in a retry and rate limiter.
show_progress : bool, default False
Whether to show a progress bar.
batch_size : int, default 1000
The number of row values to pass to each call of the embedding function.
Returns
-------
pa.Table
The input table with a new column called "vector" containing the embeddings.
"""
func = EmbeddingFunction(func) func = EmbeddingFunction(func)
if wrap_api: if wrap_api:
func = func.retry().rate_limit() func = func.retry().rate_limit()

View File

@@ -0,0 +1,22 @@
"""Custom exception handling"""
class MissingValueError(ValueError):
"""Exception raised when a required value is missing."""
pass
class MissingColumnError(KeyError):
"""
Exception raised when a column name specified is not in
the DataFrame object
"""
def __init__(self, column_name):
self.column_name = column_name
def __str__(self):
return (
f"Error: Column '{self.column_name}' does not exist in the DataFrame object"
)

View File

@@ -68,6 +68,11 @@ def populate_index(index: tantivy.Index, table: LanceTable, fields: List[str]) -
The table to index The table to index
fields : List[str] fields : List[str]
List of fields to index List of fields to index
Returns
-------
int
The number of rows indexed
""" """
# first check the fields exist and are string or large string type # first check the fields exist and are string or large string type
for name in fields: for name in fields:

View File

@@ -11,6 +11,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from __future__ import annotations from __future__ import annotations
from typing import Literal
import numpy as np import numpy as np
import pandas as pd import pandas as pd
@@ -22,6 +23,24 @@ from .common import VECTOR_COLUMN_NAME
class LanceQueryBuilder: class LanceQueryBuilder:
""" """
A builder for nearest neighbor queries for LanceDB. A builder for nearest neighbor queries for LanceDB.
Examples
--------
>>> import lancedb
>>> data = [{"vector": [1.1, 1.2], "b": 2},
... {"vector": [0.5, 1.3], "b": 4},
... {"vector": [0.4, 0.4], "b": 6},
... {"vector": [0.4, 0.4], "b": 10}]
>>> db = lancedb.connect("./.lancedb")
>>> table = db.create_table("my_table", data=data)
>>> (table.search([0.4, 0.4])
... .metric("cosine")
... .where("b < 10")
... .select(["b"])
... .limit(2)
... .to_df())
b vector score
0 6 [0.4, 0.4] 0.0
""" """
def __init__(self, table: "lancedb.table.LanceTable", query: np.ndarray): def __init__(self, table: "lancedb.table.LanceTable", query: np.ndarray):
@@ -44,6 +63,7 @@ class LanceQueryBuilder:
Returns Returns
------- -------
LanceQueryBuilder
The LanceQueryBuilder object. The LanceQueryBuilder object.
""" """
self._limit = limit self._limit = limit
@@ -59,6 +79,7 @@ class LanceQueryBuilder:
Returns Returns
------- -------
LanceQueryBuilder
The LanceQueryBuilder object. The LanceQueryBuilder object.
""" """
self._columns = columns self._columns = columns
@@ -74,21 +95,23 @@ class LanceQueryBuilder:
Returns Returns
------- -------
LanceQueryBuilder
The LanceQueryBuilder object. The LanceQueryBuilder object.
""" """
self._where = where self._where = where
return self return self
def metric(self, metric: str) -> LanceQueryBuilder: def metric(self, metric: Literal["L2", "cosine"]) -> LanceQueryBuilder:
"""Set the distance metric to use. """Set the distance metric to use.
Parameters Parameters
---------- ----------
metric: str metric: "L2" or "cosine"
The distance metric to use. By default "l2" is used. The distance metric to use. By default "L2" is used.
Returns Returns
------- -------
LanceQueryBuilder
The LanceQueryBuilder object. The LanceQueryBuilder object.
""" """
self._metric = metric self._metric = metric
@@ -97,6 +120,12 @@ class LanceQueryBuilder:
def nprobes(self, nprobes: int) -> LanceQueryBuilder: def nprobes(self, nprobes: int) -> LanceQueryBuilder:
"""Set the number of probes to use. """Set the number of probes to use.
Higher values will yield better recall (more likely to find vectors if
they exist) at the expense of latency.
See discussion in [Querying an ANN Index][../querying-an-ann-index] for
tuning advice.
Parameters Parameters
---------- ----------
nprobes: int nprobes: int
@@ -104,13 +133,20 @@ class LanceQueryBuilder:
Returns Returns
------- -------
LanceQueryBuilder
The LanceQueryBuilder object. The LanceQueryBuilder object.
""" """
self._nprobes = nprobes self._nprobes = nprobes
return self return self
def refine_factor(self, refine_factor: int) -> LanceQueryBuilder: def refine_factor(self, refine_factor: int) -> LanceQueryBuilder:
"""Set the refine factor to use. """Set the refine factor to use, increasing the number of vectors sampled.
As an example, a refine factor of 2 will sample 2x as many vectors as
requested, re-ranks them, and returns the top half most relevant results.
See discussion in [Querying an ANN Index][querying-an-ann-index] for
tuning advice.
Parameters Parameters
---------- ----------
@@ -119,6 +155,7 @@ class LanceQueryBuilder:
Returns Returns
------- -------
LanceQueryBuilder
The LanceQueryBuilder object. The LanceQueryBuilder object.
""" """
self._refine_factor = refine_factor self._refine_factor = refine_factor

View File

@@ -47,6 +47,40 @@ def _sanitize_data(data, schema):
class LanceTable: class LanceTable:
""" """
A table in a LanceDB database. A table in a LanceDB database.
Examples
--------
Create using [LanceDBConnection.create_table][lancedb.LanceDBConnection.create_table]
(more examples in that method's documentation).
>>> import lancedb
>>> db = lancedb.connect("./.lancedb")
>>> table = db.create_table("my_table", data=[{"vector": [1.1, 1.2], "b": 2}])
>>> table.head()
pyarrow.Table
vector: fixed_size_list<item: float>[2]
child 0, item: float
b: int64
----
vector: [[[1.1,1.2]]]
b: [[2]]
Can append new data with [LanceTable.add][lancedb.table.LanceTable.add].
>>> table.add([{"vector": [0.5, 1.3], "b": 4}])
2
Can query the table with [LanceTable.search][lancedb.table.LanceTable.search].
>>> table.search([0.4, 0.4]).select(["b"]).to_df()
b vector score
0 4 [0.5, 1.3] 0.82
1 2 [1.1, 1.2] 1.13
Search queries are much faster when an index is created. See
[LanceTable.create_index][lancedb.table.LanceTable.create_index].
""" """
def __init__( def __init__(
@@ -64,7 +98,12 @@ class LanceTable:
@property @property
def schema(self) -> pa.Schema: def schema(self) -> pa.Schema:
"""Return the schema of the table.""" """Return the schema of the table.
Returns
-------
pa.Schema
A PyArrow schema object."""
return self._dataset.schema return self._dataset.schema
def list_versions(self): def list_versions(self):
@@ -72,12 +111,39 @@ class LanceTable:
return self._dataset.versions() return self._dataset.versions()
@property @property
def version(self): def version(self) -> int:
"""Get the current version of the table""" """Get the current version of the table"""
return self._dataset.version return self._dataset.version
def checkout(self, version: int): def checkout(self, version: int):
"""Checkout a version of the table""" """Checkout a version of the table. This is an in-place operation.
This allows viewing previous versions of the table.
Parameters
----------
version : int
The version to checkout.
Examples
--------
>>> import lancedb
>>> db = lancedb.connect("./.lancedb")
>>> table = db.create_table("my_table", [{"vector": [1.1, 0.9], "type": "vector"}])
>>> table.version
1
>>> table.to_pandas()
vector type
0 [1.1, 0.9] vector
>>> table.add([{"vector": [0.5, 0.2], "type": "vector"}])
2
>>> table.version
2
>>> table.checkout(1)
>>> table.to_pandas()
vector type
0 [1.1, 0.9] vector
"""
max_ver = max([v["version"] for v in self._dataset.versions()]) max_ver = max([v["version"] for v in self._dataset.versions()])
if version < 1 or version > max_ver: if version < 1 or version > max_ver:
raise ValueError(f"Invalid version {version}") raise ValueError(f"Invalid version {version}")
@@ -98,11 +164,20 @@ class LanceTable:
return self._dataset.head(n) return self._dataset.head(n)
def to_pandas(self) -> pd.DataFrame: def to_pandas(self) -> pd.DataFrame:
"""Return the table as a pandas DataFrame.""" """Return the table as a pandas DataFrame.
Returns
-------
pd.DataFrame
"""
return self.to_arrow().to_pandas() return self.to_arrow().to_pandas()
def to_arrow(self) -> pa.Table: def to_arrow(self) -> pa.Table:
"""Return the table as a pyarrow Table.""" """Return the table as a pyarrow Table.
Returns
-------
pa.Table"""
return self._dataset.to_table() return self._dataset.to_table()
@property @property
@@ -175,7 +250,8 @@ class LanceTable:
Returns Returns
------- -------
The number of vectors added to the table. int
The number of vectors in the table.
""" """
data = _sanitize_data(data, self.schema) data = _sanitize_data(data, self.schema)
lance.write_dataset(data, self._dataset_uri, mode=mode) lance.write_dataset(data, self._dataset_uri, mode=mode)
@@ -193,7 +269,8 @@ class LanceTable:
Returns Returns
------- -------
A LanceQueryBuilder object representing the query. LanceQueryBuilder
A query builder object representing the query.
Once executed, the query returns selected columns, the vector, Once executed, the query returns selected columns, the vector,
and also the "score" column which is the distance between the query and also the "score" column which is the distance between the query
vector and the returned vector. vector and the returned vector.
@@ -265,4 +342,6 @@ def _sanitize_vector_column(data: pa.Table, vector_column_name: str) -> pa.Table
values = values.cast(pa.float32()) values = values.cast(pa.float32())
list_size = len(values) / len(data) list_size = len(values) / len(data)
vec_arr = pa.FixedSizeListArray.from_arrays(values, list_size) vec_arr = pa.FixedSizeListArray.from_arrays(values, list_size)
return data.set_column(data.column_names.index(vector_column_name), vector_column_name, vec_arr) return data.set_column(
data.column_names.index(vector_column_name), vector_column_name, vec_arr
)

View File

@@ -1,6 +1,6 @@
[project] [project]
name = "lancedb" name = "lancedb"
version = "0.1.6" version = "0.1.7"
dependencies = ["pylance>=0.4.17", "ratelimiter", "retry", "tqdm"] dependencies = ["pylance>=0.4.17", "ratelimiter", "retry", "tqdm"]
description = "lancedb" description = "lancedb"
authors = [ authors = [
@@ -37,7 +37,7 @@ repository = "https://github.com/lancedb/lancedb"
[project.optional-dependencies] [project.optional-dependencies]
tests = [ tests = [
"pytest", "pytest-mock" "pytest", "pytest-mock", "doctest"
] ]
dev = [ dev = [
"ruff", "pre-commit", "black" "ruff", "pre-commit", "black"

View File

@@ -19,6 +19,7 @@ import lancedb
# You need to setup AWS credentials an a base path to run this test. Example # You need to setup AWS credentials an a base path to run this test. Example
# AWS_PROFILE=default TEST_S3_BASE_URL=s3://my_bucket/dataset pytest tests/test_io.py # AWS_PROFILE=default TEST_S3_BASE_URL=s3://my_bucket/dataset pytest tests/test_io.py
@pytest.mark.skipif( @pytest.mark.skipif(
(os.environ.get("TEST_S3_BASE_URL") is None), (os.environ.get("TEST_S3_BASE_URL") is None),
reason="please setup s3 base url", reason="please setup s3 base url",

View File

@@ -30,23 +30,17 @@ class MockTable:
@pytest.fixture @pytest.fixture
def table(tmp_path) -> MockTable: def table(tmp_path) -> MockTable:
df = pd.DataFrame( df = pa.table(
{ {
"vector": [[1, 2], [3, 4]], "vector": pa.array(
"id": [1, 2], [[1, 2], [3, 4]], type=pa.list_(pa.float32(), list_size=2)
"str_field": ["a", "b"], ),
"float_field": [1.0, 2.0], "id": pa.array([1, 2]),
"str_field": pa.array(["a", "b"]),
"float_field": pa.array([1.0, 2.0]),
} }
) )
schema = pa.schema( lance.write_dataset(df, tmp_path)
[
pa.field("vector", pa.list_(pa.float32(), list_size=2)),
pa.field("id", pa.int32()),
pa.field("str_field", pa.string()),
pa.field("float_field", pa.float64()),
]
)
lance.write_dataset(df, tmp_path, schema)
return MockTable(tmp_path) return MockTable(tmp_path)
@@ -65,7 +59,7 @@ def test_query_builder_with_filter(table):
def test_query_builder_with_metric(table): def test_query_builder_with_metric(table):
query = [4, 8] query = [4, 8]
df_default = LanceQueryBuilder(table, query).to_df() df_default = LanceQueryBuilder(table, query).to_df()
df_l2 = LanceQueryBuilder(table, query).metric("l2").to_df() df_l2 = LanceQueryBuilder(table, query).metric("L2").to_df()
tm.assert_frame_equal(df_default, df_l2) tm.assert_frame_equal(df_default, df_l2)
df_cosine = LanceQueryBuilder(table, query).metric("cosine").limit(1).to_df() df_cosine = LanceQueryBuilder(table, query).metric("cosine").limit(1).to_df()