Compare commits

...

11 Commits

Author SHA1 Message Date
Lance Release
c3059dc689 [python] Bump version: 0.4.2 → 0.4.3 2023-12-30 00:52:54 +00:00
Lei Xu
a9caa5f2d4 chore: bump pylance to 0.9.2 (#754) 2023-12-29 16:39:45 -08:00
Xin Hao
8411c36b96 docs: fix link (#752) 2023-12-29 15:33:24 -08:00
Chang She
7773bda7ee feat(python): first cut batch queries for remote api (#753)
issue separate requests under the hood and concatenate results
2023-12-29 15:33:03 -08:00
Lance Release
392777952f [python] Bump version: 0.4.1 → 0.4.2 2023-12-29 00:19:21 +00:00
Chang She
7e75e50d3a chore(python): update embedding API to use openai 1.6.1 (#751)
API has changed significantly, namely `openai.Embedding.create` no
longer exists.
https://github.com/openai/openai-python/discussions/742

Update the OpenAI embedding function and put a minimum on the openai sdk
version.
2023-12-28 15:05:57 -08:00
Chang She
4b8af261a3 feat: add timezone handling for datetime in pydantic (#578)
If you add timezone information in the Field annotation for a datetime
then that will now be passed to the pyarrow data type.

I'm not sure how pyarrow enforces timezones, right now, it silently
coerces to the timezone given in the column regardless of whether the
input had the matching timezone or not. This is probably not the right
behavior. Though we could just make it so the user has to make the
pydantic model do the validation instead of doing that at the pyarrow
conversion layer.
2023-12-28 11:02:56 -08:00
Chang She
c8728d4ca1 feat(python): add post filtering for full text search (#739)
Closes #721 

fts will return results as a pyarrow table. Pyarrow tables has a
`filter` method but it does not take sql filter strings (only pyarrow
compute expressions). Instead, we do one of two things to support
`tbl.search("keywords").where("foo=5").limit(10).to_arrow()`:

Default path: If duckdb is available then use duckdb to execute the sql
filter string on the pyarrow table.
Backup path: Otherwise, write the pyarrow table to a lance dataset and
then do `to_table(filter=<filter>)`

Neither is ideal. 
Default path has two issues:
1. requires installing an extra library (duckdb)
2. duckdb mangles some fields (like fixed size list => list)

Backup path incurs a latency penalty (~20ms on ssd) to write the
resultset to disk.

In the short term, once #676 is addressed, we can write the dataset to
"memory://" instead of disk, this makes the post filter evaluate much
quicker (ETA next week).

In the longer term, we'd like to be able to evaluate the filter string
on the pyarrow Table directly, one possibility being that we use
Substrait to generate pyarrow compute expressions from sql string. Or if
there's enough progress on pyarrow, it could support Substrait
expressions directly (no ETA)

---------

Co-authored-by: Will Jones <willjones127@gmail.com>
2023-12-27 09:31:04 -08:00
Aidan
446f837335 fix: createIndex index cache size (#741) 2023-12-27 09:25:13 -08:00
Chang She
8f9ad978f5 feat(python): support list of list fields from pydantic schema (#747)
For object detection, each row may correspond to an image and each image
can have multiple bounding boxes of x-y coordinates. This means that a
`bbox` field is potentially "list of list of float". This adds support
in our pydantic-pyarrow conversion for nested lists.
2023-12-27 09:10:09 -08:00
Lance Release
0df38341d5 Updating package-lock.json 2023-12-26 17:21:51 +00:00
16 changed files with 269 additions and 70 deletions

View File

@@ -5,10 +5,10 @@ exclude = ["python"]
resolver = "2" resolver = "2"
[workspace.dependencies] [workspace.dependencies]
lance = { "version" = "=0.9.1", "features" = ["dynamodb"] } lance = { "version" = "=0.9.2", "features" = ["dynamodb"] }
lance-index = { "version" = "=0.9.1" } lance-index = { "version" = "=0.9.2" }
lance-linalg = { "version" = "=0.9.1" } lance-linalg = { "version" = "=0.9.2" }
lance-testing = { "version" = "=0.9.1" } lance-testing = { "version" = "=0.9.2" }
# Note that this one does not include pyarrow # Note that this one does not include pyarrow
arrow = { version = "49.0.0", optional = false } arrow = { version = "49.0.0", optional = false }
arrow-array = "49.0" arrow-array = "49.0"

View File

@@ -29,8 +29,9 @@ uri = "data/sample-lancedb"
db = lancedb.connect(uri) db = lancedb.connect(uri)
table = db.create_table("my_table", table = db.create_table("my_table",
data=[{"vector": [3.1, 4.1], "text": "Frodo was a happy puppy"}, data=[{"vector": [3.1, 4.1], "text": "Frodo was a happy puppy", "meta": "foo"},
{"vector": [5.9, 26.5], "text": "There are several kittens playing"}]) {"vector": [5.9, 26.5], "text": "Sam was a loyal puppy", "meta": "bar"},
{"vector": [15.9, 6.5], "text": "There are several kittens playing"}])
``` ```
@@ -64,10 +65,23 @@ table.create_fts_index(["text1", "text2"])
Note that the search API call does not change - you can search over all indexed columns at once. Note that the search API call does not change - you can search over all indexed columns at once.
## Filtering
Currently the LanceDB full text search feature supports *post-filtering*, meaning filters are
applied on top of the full text search results. This can be invoked via the familiar
`where` syntax:
```python
table.search("puppy").limit(10).where("meta='foo'").to_list()
```
## Current limitations ## Current limitations
1. Currently we do not yet support incremental writes. 1. Currently we do not yet support incremental writes.
If you add data after fts index creation, it won't be reflected If you add data after fts index creation, it won't be reflected
in search results until you do a full reindex. in search results until you do a full reindex.
2. We currently only support local filesystem paths for the fts index.
This is a tantivy limitation. We've implemented an object store plugin
but there's no way in tantivy-py to specify to use it.
2. We currently only support local filesystem paths for the fts index.

View File

@@ -118,6 +118,84 @@ This guide will show how to create tables, insert data into them, and update the
table = db.create_table(table_name, schema=Content) table = db.create_table(table_name, schema=Content)
``` ```
#### Nested schemas
Sometimes your data model may contain nested objects.
For example, you may want to store the document string
and the document soure name as a nested Document object:
```python
class Document(BaseModel):
content: str
source: str
```
This can be used as the type of a LanceDB table column:
```python
class NestedSchema(LanceModel):
id: str
vector: Vector(1536)
document: Document
tbl = db.create_table("nested_table", schema=NestedSchema, mode="overwrite")
```
This creates a struct column called "document" that has two subfields
called "content" and "source":
```
In [28]: tbl.schema
Out[28]:
id: string not null
vector: fixed_size_list<item: float>[1536] not null
child 0, item: float
document: struct<content: string not null, source: string not null> not null
child 0, content: string not null
child 1, source: string not null
```
#### Validators
Note that neither pydantic nor pyarrow automatically validates that input data
is of the *correct* timezone, but this is easy to add as a custom field validator:
```python
from datetime import datetime
from zoneinfo import ZoneInfo
from lancedb.pydantic import LanceModel
from pydantic import Field, field_validator, ValidationError, ValidationInfo
tzname = "America/New_York"
tz = ZoneInfo(tzname)
class TestModel(LanceModel):
dt_with_tz: datetime = Field(json_schema_extra={"tz": tzname})
@field_validator('dt_with_tz')
@classmethod
def tz_must_match(cls, dt: datetime) -> datetime:
assert dt.tzinfo == tz
return dt
ok = TestModel(dt_with_tz=datetime.now(tz))
try:
TestModel(dt_with_tz=datetime.now(ZoneInfo("Asia/Shanghai")))
assert 0 == 1, "this should raise ValidationError"
except ValidationError:
print("A ValidationError was raised.")
pass
```
When you run this code it should print "A ValidationError was raised."
#### Pydantic custom types
LanceDB does NOT yet support converting pydantic custom types. If this is something you need,
please file a feature request on the [LanceDB Github repo](https://github.com/lancedb/lancedb/issues/new).
### Using Iterators / Writing Large Datasets ### Using Iterators / Writing Large Datasets
It is recommended to use itertators to add large datasets in batches when creating your table in one go. This does not create multiple versions of your dataset unlike manually adding batches using `table.add()` It is recommended to use itertators to add large datasets in batches when creating your table in one go. This does not create multiple versions of your dataset unlike manually adding batches using `table.add()`
@@ -153,7 +231,7 @@ This guide will show how to create tables, insert data into them, and update the
You can also use iterators of other types like Pandas dataframe or Pylists directly in the above example. You can also use iterators of other types like Pandas dataframe or Pylists directly in the above example.
## Creating Empty Table ## Creating Empty Table
You can also create empty tables in python. Initialize it with schema and later ingest data into it. You can create empty tables in python. Initialize it with schema and later ingest data into it.
```python ```python
import lancedb import lancedb

View File

@@ -7,7 +7,7 @@ LanceDB integrates with Pydantic for schema inference, data ingestion, and query
LanceDB supports to create Apache Arrow Schema from a LanceDB supports to create Apache Arrow Schema from a
[Pydantic BaseModel](https://docs.pydantic.dev/latest/api/main/#pydantic.main.BaseModel) [Pydantic BaseModel](https://docs.pydantic.dev/latest/api/main/#pydantic.main.BaseModel)
via [pydantic_to_schema()](python.md##lancedb.pydantic.pydantic_to_schema) method. via [pydantic_to_schema()](python.md#lancedb.pydantic.pydantic_to_schema) method.
::: lancedb.pydantic.pydantic_to_schema ::: lancedb.pydantic.pydantic_to_schema

74
node/package-lock.json generated
View File

@@ -1,12 +1,12 @@
{ {
"name": "vectordb", "name": "vectordb",
"version": "0.4.0", "version": "0.4.1",
"lockfileVersion": 2, "lockfileVersion": 2,
"requires": true, "requires": true,
"packages": { "packages": {
"": { "": {
"name": "vectordb", "name": "vectordb",
"version": "0.4.0", "version": "0.4.1",
"cpu": [ "cpu": [
"x64", "x64",
"arm64" "arm64"
@@ -53,11 +53,11 @@
"uuid": "^9.0.0" "uuid": "^9.0.0"
}, },
"optionalDependencies": { "optionalDependencies": {
"@lancedb/vectordb-darwin-arm64": "0.4.0", "@lancedb/vectordb-darwin-arm64": "0.4.1",
"@lancedb/vectordb-darwin-x64": "0.4.0", "@lancedb/vectordb-darwin-x64": "0.4.1",
"@lancedb/vectordb-linux-arm64-gnu": "0.4.0", "@lancedb/vectordb-linux-arm64-gnu": "0.4.1",
"@lancedb/vectordb-linux-x64-gnu": "0.4.0", "@lancedb/vectordb-linux-x64-gnu": "0.4.1",
"@lancedb/vectordb-win32-x64-msvc": "0.4.0" "@lancedb/vectordb-win32-x64-msvc": "0.4.1"
} }
}, },
"node_modules/@apache-arrow/ts": { "node_modules/@apache-arrow/ts": {
@@ -317,9 +317,9 @@
} }
}, },
"node_modules/@lancedb/vectordb-darwin-arm64": { "node_modules/@lancedb/vectordb-darwin-arm64": {
"version": "0.4.0", "version": "0.4.1",
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-arm64/-/vectordb-darwin-arm64-0.4.0.tgz", "resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-arm64/-/vectordb-darwin-arm64-0.4.1.tgz",
"integrity": "sha512-cP6zGtBWXEcJHCI4uLNIP5ILtRvexvwmL8Uri1dnHG8dT8g12Ykug3BHO6Wt6wp/xASd2jJRIF/VAJsN9IeP1A==", "integrity": "sha512-ul/Hvv5RX2RThpKSuiUjJRVrmXuBPvpU+HrLjcBmu4dzpuWN4+IeHIUM6xe79gLxOKlwkscVweTOuZnmMfsZeg==",
"cpu": [ "cpu": [
"arm64" "arm64"
], ],
@@ -329,9 +329,9 @@
] ]
}, },
"node_modules/@lancedb/vectordb-darwin-x64": { "node_modules/@lancedb/vectordb-darwin-x64": {
"version": "0.4.0", "version": "0.4.1",
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-x64/-/vectordb-darwin-x64-0.4.0.tgz", "resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-x64/-/vectordb-darwin-x64-0.4.1.tgz",
"integrity": "sha512-ig0gV5ol1sFe2lb1HOatK0rizyj9I91WbnH79i7OdUl3nAQIcWm70CnxrPLtx0DS2NTGh2kFJbYCWcaUlu6YfA==", "integrity": "sha512-sJtF2Cv6T9RhUpdeHNkryiJwPuW9QPQ3aMs5fID1hMCJA2U3BX27t/WlkiPT2+kTLeUcwF1JvAOgsfvZkfvI8w==",
"cpu": [ "cpu": [
"x64" "x64"
], ],
@@ -341,9 +341,9 @@
] ]
}, },
"node_modules/@lancedb/vectordb-linux-arm64-gnu": { "node_modules/@lancedb/vectordb-linux-arm64-gnu": {
"version": "0.4.0", "version": "0.4.1",
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-arm64-gnu/-/vectordb-linux-arm64-gnu-0.4.0.tgz", "resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-arm64-gnu/-/vectordb-linux-arm64-gnu-0.4.1.tgz",
"integrity": "sha512-gMXIDT2kriAPDwWIRKXdaTCNdOeFGEok1S9Y30AOruHXddW1vCIo4JNJIYbBqHnwAeI4wI3ae6GRCFaf1UxO3g==", "integrity": "sha512-tNnziT0BRjPsznKI4GgWROFdCOsCGx0inFu0z+WV1UomwXKcMWGslpWBqKE8IUiCq14duPVx/ie7Wwcf51IeJQ==",
"cpu": [ "cpu": [
"arm64" "arm64"
], ],
@@ -353,9 +353,9 @@
] ]
}, },
"node_modules/@lancedb/vectordb-linux-x64-gnu": { "node_modules/@lancedb/vectordb-linux-x64-gnu": {
"version": "0.4.0", "version": "0.4.1",
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-x64-gnu/-/vectordb-linux-x64-gnu-0.4.0.tgz", "resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-x64-gnu/-/vectordb-linux-x64-gnu-0.4.1.tgz",
"integrity": "sha512-ZQ3lDrDSz1IKdx/mS9Lz08agFO+OD5oSFrrcFNCoT1+H93eS1mCLdmCoEARu3jKbx0tMs38l5J9yXZ2QmJye3w==", "integrity": "sha512-PAcF2p1FUsC0AD+qkLfgE5+ZlQwlHe9eTP9dSsX43V/NGPDQ9+gBzaBTEDbvyHj1wl2Wft2NwOqB1HAFhilSDg==",
"cpu": [ "cpu": [
"x64" "x64"
], ],
@@ -365,9 +365,9 @@
] ]
}, },
"node_modules/@lancedb/vectordb-win32-x64-msvc": { "node_modules/@lancedb/vectordb-win32-x64-msvc": {
"version": "0.4.0", "version": "0.4.1",
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-win32-x64-msvc/-/vectordb-win32-x64-msvc-0.4.0.tgz", "resolved": "https://registry.npmjs.org/@lancedb/vectordb-win32-x64-msvc/-/vectordb-win32-x64-msvc-0.4.1.tgz",
"integrity": "sha512-toNcNwBRE1sdsSf5hr7W8QiqZ33csc/knVEek4CyvYkZHJGh4Z6WI+DJUIASo5wzUez4TX7qUPpRPL9HuaPMCg==", "integrity": "sha512-8mvThCppI/AfSPby6Y3t6xpCfbo8IY6JH5exO8fDGTwBFHOqgwR4Izb2K7FgXxkwUYcN4EfGSsk/6B1GpwMudg==",
"cpu": [ "cpu": [
"x64" "x64"
], ],
@@ -4869,33 +4869,33 @@
} }
}, },
"@lancedb/vectordb-darwin-arm64": { "@lancedb/vectordb-darwin-arm64": {
"version": "0.4.0", "version": "0.4.1",
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-arm64/-/vectordb-darwin-arm64-0.4.0.tgz", "resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-arm64/-/vectordb-darwin-arm64-0.4.1.tgz",
"integrity": "sha512-cP6zGtBWXEcJHCI4uLNIP5ILtRvexvwmL8Uri1dnHG8dT8g12Ykug3BHO6Wt6wp/xASd2jJRIF/VAJsN9IeP1A==", "integrity": "sha512-ul/Hvv5RX2RThpKSuiUjJRVrmXuBPvpU+HrLjcBmu4dzpuWN4+IeHIUM6xe79gLxOKlwkscVweTOuZnmMfsZeg==",
"optional": true "optional": true
}, },
"@lancedb/vectordb-darwin-x64": { "@lancedb/vectordb-darwin-x64": {
"version": "0.4.0", "version": "0.4.1",
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-x64/-/vectordb-darwin-x64-0.4.0.tgz", "resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-x64/-/vectordb-darwin-x64-0.4.1.tgz",
"integrity": "sha512-ig0gV5ol1sFe2lb1HOatK0rizyj9I91WbnH79i7OdUl3nAQIcWm70CnxrPLtx0DS2NTGh2kFJbYCWcaUlu6YfA==", "integrity": "sha512-sJtF2Cv6T9RhUpdeHNkryiJwPuW9QPQ3aMs5fID1hMCJA2U3BX27t/WlkiPT2+kTLeUcwF1JvAOgsfvZkfvI8w==",
"optional": true "optional": true
}, },
"@lancedb/vectordb-linux-arm64-gnu": { "@lancedb/vectordb-linux-arm64-gnu": {
"version": "0.4.0", "version": "0.4.1",
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-arm64-gnu/-/vectordb-linux-arm64-gnu-0.4.0.tgz", "resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-arm64-gnu/-/vectordb-linux-arm64-gnu-0.4.1.tgz",
"integrity": "sha512-gMXIDT2kriAPDwWIRKXdaTCNdOeFGEok1S9Y30AOruHXddW1vCIo4JNJIYbBqHnwAeI4wI3ae6GRCFaf1UxO3g==", "integrity": "sha512-tNnziT0BRjPsznKI4GgWROFdCOsCGx0inFu0z+WV1UomwXKcMWGslpWBqKE8IUiCq14duPVx/ie7Wwcf51IeJQ==",
"optional": true "optional": true
}, },
"@lancedb/vectordb-linux-x64-gnu": { "@lancedb/vectordb-linux-x64-gnu": {
"version": "0.4.0", "version": "0.4.1",
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-x64-gnu/-/vectordb-linux-x64-gnu-0.4.0.tgz", "resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-x64-gnu/-/vectordb-linux-x64-gnu-0.4.1.tgz",
"integrity": "sha512-ZQ3lDrDSz1IKdx/mS9Lz08agFO+OD5oSFrrcFNCoT1+H93eS1mCLdmCoEARu3jKbx0tMs38l5J9yXZ2QmJye3w==", "integrity": "sha512-PAcF2p1FUsC0AD+qkLfgE5+ZlQwlHe9eTP9dSsX43V/NGPDQ9+gBzaBTEDbvyHj1wl2Wft2NwOqB1HAFhilSDg==",
"optional": true "optional": true
}, },
"@lancedb/vectordb-win32-x64-msvc": { "@lancedb/vectordb-win32-x64-msvc": {
"version": "0.4.0", "version": "0.4.1",
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-win32-x64-msvc/-/vectordb-win32-x64-msvc-0.4.0.tgz", "resolved": "https://registry.npmjs.org/@lancedb/vectordb-win32-x64-msvc/-/vectordb-win32-x64-msvc-0.4.1.tgz",
"integrity": "sha512-toNcNwBRE1sdsSf5hr7W8QiqZ33csc/knVEek4CyvYkZHJGh4Z6WI+DJUIASo5wzUez4TX7qUPpRPL9HuaPMCg==", "integrity": "sha512-8mvThCppI/AfSPby6Y3t6xpCfbo8IY6JH5exO8fDGTwBFHOqgwR4Izb2K7FgXxkwUYcN4EfGSsk/6B1GpwMudg==",
"optional": true "optional": true
}, },
"@neon-rs/cli": { "@neon-rs/cli": {

View File

@@ -267,7 +267,7 @@ export class RemoteTable<T = number[]> implements Table<T> {
const column = indexParams.column ?? 'vector' const column = indexParams.column ?? 'vector'
const indexType = 'vector' // only vector index is supported for remote connections const indexType = 'vector' // only vector index is supported for remote connections
const metricType = indexParams.metric_type ?? 'L2' const metricType = indexParams.metric_type ?? 'L2'
const indexCacheSize = indexParams ?? null const indexCacheSize = indexParams.index_cache_size ?? null
const data = { const data = {
column, column,

View File

@@ -1,5 +1,5 @@
[bumpversion] [bumpversion]
current_version = 0.4.1 current_version = 0.4.3
commit = True commit = True
message = [python] Bump version: {current_version} → {new_version} message = [python] Bump version: {current_version} → {new_version}
tag = True tag = True

View File

@@ -10,6 +10,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from functools import cached_property
from typing import List, Union from typing import List, Union
import numpy as np import numpy as np
@@ -44,6 +45,10 @@ class OpenAIEmbeddings(TextEmbeddingFunction):
The texts to embed The texts to embed
""" """
# TODO retry, rate limit, token limit # TODO retry, rate limit, token limit
rs = self._openai_client.embeddings.create(input=texts, model=self.name)
return [v.embedding for v in rs.data]
@cached_property
def _openai_client(self):
openai = self.safe_import("openai") openai = self.safe_import("openai")
rs = openai.Embedding.create(input=texts, model=self.name)["data"] return openai.OpenAI()
return [v["embedding"] for v in rs]

View File

@@ -249,7 +249,7 @@ def retry_with_exponential_backoff(
if num_retries > max_retries: if num_retries > max_retries:
raise Exception( raise Exception(
f"Maximum number of retries ({max_retries}) exceeded." f"Maximum number of retries ({max_retries}) exceeded.", e
) )
delay *= exponential_base * (1 + jitter * random.random()) delay *= exponential_base * (1 + jitter * random.random())

View File

@@ -26,6 +26,7 @@ import numpy as np
import pyarrow as pa import pyarrow as pa
import pydantic import pydantic
import semver import semver
from pydantic.fields import FieldInfo
from .embeddings import EmbeddingFunctionRegistry from .embeddings import EmbeddingFunctionRegistry
@@ -142,8 +143,8 @@ def Vector(
return FixedSizeList return FixedSizeList
def _py_type_to_arrow_type(py_type: Type[Any]) -> pa.DataType: def _py_type_to_arrow_type(py_type: Type[Any], field: FieldInfo) -> pa.DataType:
"""Convert Python Type to Arrow DataType. """Convert a field with native Python type to Arrow data type.
Raises Raises
------ ------
@@ -163,9 +164,13 @@ def _py_type_to_arrow_type(py_type: Type[Any]) -> pa.DataType:
elif py_type == date: elif py_type == date:
return pa.date32() return pa.date32()
elif py_type == datetime: elif py_type == datetime:
return pa.timestamp("us") tz = get_extras(field, "tz")
return pa.timestamp("us", tz=tz)
elif getattr(py_type, "__origin__", None) in (list, tuple):
child = py_type.__args__[0]
return pa.list_(_py_type_to_arrow_type(child, field))
raise TypeError( raise TypeError(
f"Converting Pydantic type to Arrow Type: unsupported type {py_type}" f"Converting Pydantic type to Arrow Type: unsupported type {py_type}."
) )
@@ -194,10 +199,10 @@ def _pydantic_to_arrow_type(field: pydantic.fields.FieldInfo) -> pa.DataType:
args = field.annotation.__args__ args = field.annotation.__args__
if origin == list: if origin == list:
child = args[0] child = args[0]
return pa.list_(_py_type_to_arrow_type(child)) return pa.list_(_py_type_to_arrow_type(child, field))
elif origin == Union: elif origin == Union:
if len(args) == 2 and args[1] == type(None): if len(args) == 2 and args[1] == type(None):
return _py_type_to_arrow_type(args[0]) return _py_type_to_arrow_type(args[0], field)
elif inspect.isclass(field.annotation): elif inspect.isclass(field.annotation):
if issubclass(field.annotation, pydantic.BaseModel): if issubclass(field.annotation, pydantic.BaseModel):
# Struct # Struct
@@ -205,7 +210,7 @@ def _pydantic_to_arrow_type(field: pydantic.fields.FieldInfo) -> pa.DataType:
return pa.struct(fields) return pa.struct(fields)
elif issubclass(field.annotation, FixedSizeListMixin): elif issubclass(field.annotation, FixedSizeListMixin):
return pa.list_(field.annotation.value_arrow_type(), field.annotation.dim()) return pa.list_(field.annotation.value_arrow_type(), field.annotation.dim())
return _py_type_to_arrow_type(field.annotation) return _py_type_to_arrow_type(field.annotation, field)
def is_nullable(field: pydantic.fields.FieldInfo) -> bool: def is_nullable(field: pydantic.fields.FieldInfo) -> bool:

View File

@@ -70,7 +70,7 @@ class Query(pydantic.BaseModel):
vector_column: str = VECTOR_COLUMN_NAME vector_column: str = VECTOR_COLUMN_NAME
# vector to search for # vector to search for
vector: List[float] vector: Union[List[float], List[List[float]]]
# sql filter to refine the query with # sql filter to refine the query with
filter: Optional[str] = None filter: Optional[str] = None
@@ -421,6 +421,8 @@ class LanceVectorQueryBuilder(LanceQueryBuilder):
vector and the returned vectors. vector and the returned vectors.
""" """
vector = self._query if isinstance(self._query, list) else self._query.tolist() vector = self._query if isinstance(self._query, list) else self._query.tolist()
if isinstance(vector[0], np.ndarray):
vector = [v.tolist() for v in vector]
query = Query( query = Query(
vector=vector, vector=vector,
filter=self._where, filter=self._where,
@@ -488,6 +490,27 @@ class LanceFtsQueryBuilder(LanceQueryBuilder):
scores = pa.array(scores) scores = pa.array(scores)
output_tbl = self._table.to_lance().take(row_ids, columns=self._columns) output_tbl = self._table.to_lance().take(row_ids, columns=self._columns)
output_tbl = output_tbl.append_column("score", scores) output_tbl = output_tbl.append_column("score", scores)
if self._where is not None:
try:
# TODO would be great to have Substrait generate pyarrow compute expressions
# or conversely have pyarrow support SQL expressions using Substrait
import duckdb
output_tbl = (
duckdb.sql(f"SELECT * FROM output_tbl")
.filter(self._where)
.to_arrow_table()
)
except ImportError:
import lance
import tempfile
# TODO Use "memory://" instead once that's supported
with tempfile.TemporaryDirectory() as tmp:
ds = lance.write_dataset(output_tbl, tmp)
output_tbl = ds.to_table(filter=self._where)
return output_tbl return output_tbl

View File

@@ -11,6 +11,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import asyncio
import uuid import uuid
from functools import cached_property from functools import cached_property
from typing import Dict, Optional, Union from typing import Dict, Optional, Union
@@ -227,8 +228,24 @@ class RemoteTable(Table):
return LanceVectorQueryBuilder(self, query, vector_column_name) return LanceVectorQueryBuilder(self, query, vector_column_name)
def _execute_query(self, query: Query) -> pa.Table: def _execute_query(self, query: Query) -> pa.Table:
result = self._conn._client.query(self._name, query) if (
return self._conn._loop.run_until_complete(result).to_arrow() query.vector is not None
and len(query.vector) > 0
and not isinstance(query.vector[0], float)
):
futures = []
for v in query.vector:
v = list(v)
q = query.copy()
q.vector = v
futures.append(self._conn._client.query(self._name, q))
result = self._conn._loop.run_until_complete(asyncio.gather(*futures))
return pa.concat_tables(
[add_index(r.to_arrow(), i) for i, r in enumerate(result)]
)
else:
result = self._conn._client.query(self._name, query)
return self._conn._loop.run_until_complete(result).to_arrow()
def delete(self, predicate: str): def delete(self, predicate: str):
"""Delete rows from the table. """Delete rows from the table.
@@ -342,3 +359,11 @@ class RemoteTable(Table):
self._conn._loop.run_until_complete( self._conn._loop.run_until_complete(
self._conn._client.post(f"/v1/table/{self._name}/update/", data=payload) self._conn._client.post(f"/v1/table/{self._name}/update/", data=payload)
) )
def add_index(tbl: pa.Table, i: int) -> pa.Table:
return tbl.add_column(
0,
pa.field("query_index", pa.uint32()),
pa.array([i] * len(tbl), pa.uint32()),
)

View File

@@ -1,9 +1,9 @@
[project] [project]
name = "lancedb" name = "lancedb"
version = "0.4.1" version = "0.4.3"
dependencies = [ dependencies = [
"deprecation", "deprecation",
"pylance==0.9.1", "pylance==0.9.2",
"ratelimiter~=1.0", "ratelimiter~=1.0",
"retry>=0.9.2", "retry>=0.9.2",
"tqdm>=4.27.0", "tqdm>=4.27.0",
@@ -49,11 +49,11 @@ classifiers = [
repository = "https://github.com/lancedb/lancedb" repository = "https://github.com/lancedb/lancedb"
[project.optional-dependencies] [project.optional-dependencies]
tests = ["pandas>=1.4", "pytest", "pytest-mock", "pytest-asyncio", "requests"] tests = ["pandas>=1.4", "pytest", "pytest-mock", "pytest-asyncio", "requests", "duckdb", "pytz"]
dev = ["ruff", "pre-commit", "black"] dev = ["ruff", "pre-commit", "black"]
docs = ["mkdocs", "mkdocs-jupyter", "mkdocs-material", "mkdocstrings[python]"] docs = ["mkdocs", "mkdocs-jupyter", "mkdocs-material", "mkdocstrings[python]"]
clip = ["torch", "pillow", "open-clip"] clip = ["torch", "pillow", "open-clip"]
embeddings = ["openai", "sentence-transformers", "torch", "pillow", "open-clip-torch", "cohere", "InstructorEmbedding"] embeddings = ["openai>=1.6.1", "sentence-transformers", "torch", "pillow", "open-clip-torch", "cohere", "InstructorEmbedding"]
[project.scripts] [project.scripts]
lancedb = "lancedb.cli.cli:cli" lancedb = "lancedb.cli.cli:cli"

View File

@@ -29,7 +29,7 @@ from lancedb.pydantic import LanceModel, Vector
@pytest.mark.slow @pytest.mark.slow
@pytest.mark.parametrize("alias", ["sentence-transformers", "openai"]) @pytest.mark.parametrize("alias", ["sentence-transformers", "openai"])
def test_sentence_transformer(alias, tmp_path): def test_basic_text_embeddings(alias, tmp_path):
db = lancedb.connect(tmp_path) db = lancedb.connect(tmp_path)
registry = get_registry() registry = get_registry()
func = registry.get(alias).create(max_retries=0) func = registry.get(alias).create(max_retries=0)

View File

@@ -12,6 +12,7 @@
# limitations under the License. # limitations under the License.
import os import os
import random import random
from unittest import mock
import numpy as np import numpy as np
import pandas as pd import pandas as pd
@@ -47,6 +48,7 @@ def table(tmp_path) -> ldb.table.LanceTable:
data=pd.DataFrame( data=pd.DataFrame(
{ {
"vector": vectors, "vector": vectors,
"id": [i % 2 for i in range(100)],
"text": text, "text": text,
"text2": text, "text2": text,
"nested": [{"text": t} for t in text], "nested": [{"text": t} for t in text],
@@ -88,6 +90,7 @@ def test_create_index_from_table(tmp_path, table):
[ [
{ {
"vector": np.random.randn(128), "vector": np.random.randn(128),
"id": 101,
"text": "gorilla", "text": "gorilla",
"text2": "gorilla", "text2": "gorilla",
"nested": {"text": "gorilla"}, "nested": {"text": "gorilla"},
@@ -121,3 +124,26 @@ def test_nested_schema(tmp_path, table):
table.create_fts_index("nested.text") table.create_fts_index("nested.text")
rs = table.search("puppy").limit(10).to_list() rs = table.search("puppy").limit(10).to_list()
assert len(rs) == 10 assert len(rs) == 10
def test_search_index_with_filter(table):
table.create_fts_index("text")
orig_import = __import__
def import_mock(name, *args):
if name == "duckdb":
raise ImportError
return orig_import(name, *args)
# no duckdb
with mock.patch("builtins.__import__", side_effect=import_mock):
rs = table.search("puppy").where("id=1").limit(10).to_list()
for r in rs:
assert r["id"] == 1
# yes duckdb
rs2 = table.search("puppy").where("id=1").limit(10).to_list()
for r in rs2:
assert r["id"] == 1
assert rs == rs2

View File

@@ -13,9 +13,10 @@
import json import json
import pytz
import sys import sys
from datetime import date, datetime from datetime import date, datetime
from typing import List, Optional from typing import List, Optional, Tuple
import pyarrow as pa import pyarrow as pa
import pydantic import pydantic
@@ -38,11 +39,14 @@ def test_pydantic_to_arrow():
id: int id: int
s: str s: str
vec: list[float] vec: list[float]
li: List[int] li: list[int]
lili: list[list[float]]
litu: list[tuple[float, float]]
opt: Optional[str] = None opt: Optional[str] = None
st: StructModel st: StructModel
dt: date dt: date
dtt: datetime dtt: datetime
dt_with_tz: datetime = Field(json_schema_extra={"tz": "Asia/Shanghai"})
# d: dict # d: dict
m = TestModel( m = TestModel(
@@ -50,9 +54,12 @@ def test_pydantic_to_arrow():
s="hello", s="hello",
vec=[1.0, 2.0, 3.0], vec=[1.0, 2.0, 3.0],
li=[2, 3, 4], li=[2, 3, 4],
lili=[[2.5, 1.5], [3.5, 4.5], [5.5, 6.5]],
litu=[(2.5, 1.5), (3.5, 4.5), (5.5, 6.5)],
st=StructModel(a="a", b=1.0), st=StructModel(a="a", b=1.0),
dt=date.today(), dt=date.today(),
dtt=datetime.now(), dtt=datetime.now(),
dt_with_tz=datetime.now(pytz.timezone("Asia/Shanghai")),
) )
schema = pydantic_to_schema(TestModel) schema = pydantic_to_schema(TestModel)
@@ -63,6 +70,8 @@ def test_pydantic_to_arrow():
pa.field("s", pa.utf8(), False), pa.field("s", pa.utf8(), False),
pa.field("vec", pa.list_(pa.float64()), False), pa.field("vec", pa.list_(pa.float64()), False),
pa.field("li", pa.list_(pa.int64()), False), pa.field("li", pa.list_(pa.int64()), False),
pa.field("lili", pa.list_(pa.list_(pa.float64())), False),
pa.field("litu", pa.list_(pa.list_(pa.float64())), False),
pa.field("opt", pa.utf8(), True), pa.field("opt", pa.utf8(), True),
pa.field( pa.field(
"st", "st",
@@ -73,11 +82,16 @@ def test_pydantic_to_arrow():
), ),
pa.field("dt", pa.date32(), False), pa.field("dt", pa.date32(), False),
pa.field("dtt", pa.timestamp("us"), False), pa.field("dtt", pa.timestamp("us"), False),
pa.field("dt_with_tz", pa.timestamp("us", tz="Asia/Shanghai"), False),
] ]
) )
assert schema == expect_schema assert schema == expect_schema
@pytest.mark.skipif(
sys.version_info > (3, 8),
reason="using native type alias requires python3.9 or higher",
)
def test_pydantic_to_arrow_py38(): def test_pydantic_to_arrow_py38():
class StructModel(pydantic.BaseModel): class StructModel(pydantic.BaseModel):
a: str a: str
@@ -88,10 +102,13 @@ def test_pydantic_to_arrow_py38():
s: str s: str
vec: List[float] vec: List[float]
li: List[int] li: List[int]
lili: List[List[float]]
litu: List[Tuple[float, float]]
opt: Optional[str] = None opt: Optional[str] = None
st: StructModel st: StructModel
dt: date dt: date
dtt: datetime dtt: datetime
dt_with_tz: datetime = Field(json_schema_extra={"tz": "Asia/Shanghai"})
# d: dict # d: dict
m = TestModel( m = TestModel(
@@ -99,9 +116,12 @@ def test_pydantic_to_arrow_py38():
s="hello", s="hello",
vec=[1.0, 2.0, 3.0], vec=[1.0, 2.0, 3.0],
li=[2, 3, 4], li=[2, 3, 4],
lili=[[2.5, 1.5], [3.5, 4.5], [5.5, 6.5]],
litu=[(2.5, 1.5), (3.5, 4.5), (5.5, 6.5)],
st=StructModel(a="a", b=1.0), st=StructModel(a="a", b=1.0),
dt=date.today(), dt=date.today(),
dtt=datetime.now(), dtt=datetime.now(),
dt_with_tz=datetime.now(pytz.timezone("Asia/Shanghai")),
) )
schema = pydantic_to_schema(TestModel) schema = pydantic_to_schema(TestModel)
@@ -112,6 +132,8 @@ def test_pydantic_to_arrow_py38():
pa.field("s", pa.utf8(), False), pa.field("s", pa.utf8(), False),
pa.field("vec", pa.list_(pa.float64()), False), pa.field("vec", pa.list_(pa.float64()), False),
pa.field("li", pa.list_(pa.int64()), False), pa.field("li", pa.list_(pa.int64()), False),
pa.field("lili", pa.list_(pa.list_(pa.float64())), False),
pa.field("litu", pa.list_(pa.list_(pa.float64())), False),
pa.field("opt", pa.utf8(), True), pa.field("opt", pa.utf8(), True),
pa.field( pa.field(
"st", "st",
@@ -122,6 +144,7 @@ def test_pydantic_to_arrow_py38():
), ),
pa.field("dt", pa.date32(), False), pa.field("dt", pa.date32(), False),
pa.field("dtt", pa.timestamp("us"), False), pa.field("dtt", pa.timestamp("us"), False),
pa.field("dt_with_tz", pa.timestamp("us", tz="Asia/Shanghai"), False),
] ]
) )
assert schema == expect_schema assert schema == expect_schema