mirror of
https://github.com/lancedb/lancedb.git
synced 2025-12-23 05:19:58 +00:00
Compare commits
29 Commits
docs/quick
...
python-v0.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e3f2fd3892 | ||
|
|
f401ccc599 | ||
|
|
81b59139f8 | ||
|
|
1026781ab6 | ||
|
|
9c699b8cd9 | ||
|
|
34bec59bc3 | ||
|
|
a5fbbf0d66 | ||
|
|
b42721167b | ||
|
|
543dec9ff0 | ||
|
|
04f962f6b0 | ||
|
|
19e896ff69 | ||
|
|
272e4103b2 | ||
|
|
75c257ebb6 | ||
|
|
9ee152eb42 | ||
|
|
c9ae1b1737 | ||
|
|
89dc80c42a | ||
|
|
7b020ac799 | ||
|
|
529e774bbb | ||
|
|
7c12239305 | ||
|
|
d83424d6b4 | ||
|
|
8bf89f887c | ||
|
|
b2160b2304 | ||
|
|
1bb82597be | ||
|
|
e4eee38b3c | ||
|
|
64fc2be503 | ||
|
|
dc8054e90d | ||
|
|
1684940946 | ||
|
|
695813463c | ||
|
|
ed594b0f76 |
@@ -1,5 +1,5 @@
|
||||
[tool.bumpversion]
|
||||
current_version = "0.19.1-beta.1"
|
||||
current_version = "0.19.1-beta.4"
|
||||
parse = """(?x)
|
||||
(?P<major>0|[1-9]\\d*)\\.
|
||||
(?P<minor>0|[1-9]\\d*)\\.
|
||||
|
||||
180
Cargo.lock
generated
180
Cargo.lock
generated
@@ -223,7 +223,7 @@ dependencies = [
|
||||
"chrono",
|
||||
"chrono-tz 0.10.3",
|
||||
"half",
|
||||
"hashbrown 0.15.2",
|
||||
"hashbrown 0.15.3",
|
||||
"num",
|
||||
]
|
||||
|
||||
@@ -602,9 +602,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "aws-sdk-bedrockruntime"
|
||||
version = "1.85.0"
|
||||
version = "1.86.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6f6c003cd82739447a18d7616468b047341c125efff11fdafc77a5e777a861c9"
|
||||
checksum = "db14a0566037a6c686ef075c406dec4b067537af3d76950522e9e89848ce7a5a"
|
||||
dependencies = [
|
||||
"aws-credential-types",
|
||||
"aws-runtime",
|
||||
@@ -628,9 +628,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "aws-sdk-dynamodb"
|
||||
version = "1.72.1"
|
||||
version = "1.73.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b14d5b5d6849d1caa7b404ea57cbe25ed8ba25c3c7d47f45bcbd5b51e098ceac"
|
||||
checksum = "8d954f3581bd7254f42bbaa3a21dfd99d40a14d82a324d2012b8f3ea0d15f12b"
|
||||
dependencies = [
|
||||
"aws-credential-types",
|
||||
"aws-runtime",
|
||||
@@ -651,9 +651,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "aws-sdk-kms"
|
||||
version = "1.66.0"
|
||||
version = "1.67.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "655097cd83ab1f15575890943135192560f77097413c6dd1733fdbdc453e81ac"
|
||||
checksum = "2b650cf9e1e153ab13acd3aa1f73b271dac14e019353ec0b0c176f24a21bad03"
|
||||
dependencies = [
|
||||
"aws-credential-types",
|
||||
"aws-runtime",
|
||||
@@ -674,9 +674,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "aws-sdk-s3"
|
||||
version = "1.83.0"
|
||||
version = "1.84.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "51384750334005f40e1a334b0d54eca822a77eacdcf3c50fdf38f583c5eee7a2"
|
||||
checksum = "2111975ef21dc06542918479df0df861b273eb8d99e6bb987da469b546dce32c"
|
||||
dependencies = [
|
||||
"aws-credential-types",
|
||||
"aws-runtime",
|
||||
@@ -709,9 +709,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "aws-sdk-sso"
|
||||
version = "1.65.0"
|
||||
version = "1.66.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8efec445fb78df585327094fcef4cad895b154b58711e504db7a93c41aa27151"
|
||||
checksum = "858007b14d0f1ade2e0124473c2126b24d334dc9486ad12eb7c0ed14757be464"
|
||||
dependencies = [
|
||||
"aws-credential-types",
|
||||
"aws-runtime",
|
||||
@@ -732,9 +732,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "aws-sdk-ssooidc"
|
||||
version = "1.66.0"
|
||||
version = "1.67.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5e49cca619c10e7b002dc8e66928ceed66ab7f56c1a3be86c5437bf2d8d89bba"
|
||||
checksum = "b83abf3ae8bd10a014933cc2383964a12ca5a3ebbe1948ad26b1b808e7d0d1f2"
|
||||
dependencies = [
|
||||
"aws-credential-types",
|
||||
"aws-runtime",
|
||||
@@ -755,9 +755,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "aws-sdk-sts"
|
||||
version = "1.66.0"
|
||||
version = "1.67.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7420479eac0a53f776cc8f0d493841ffe58ad9d9783f3947be7265784471b47a"
|
||||
checksum = "74e8e9ac4a837859c8f1d747054172e1e55933f02ed34728b0b34dea0591ec84"
|
||||
dependencies = [
|
||||
"aws-credential-types",
|
||||
"aws-runtime",
|
||||
@@ -879,7 +879,7 @@ dependencies = [
|
||||
"aws-smithy-async",
|
||||
"aws-smithy-runtime-api",
|
||||
"aws-smithy-types",
|
||||
"h2 0.4.9",
|
||||
"h2 0.4.10",
|
||||
"http 0.2.12",
|
||||
"http 1.3.1",
|
||||
"http-body 0.4.6",
|
||||
@@ -890,7 +890,7 @@ dependencies = [
|
||||
"hyper-util",
|
||||
"pin-project-lite",
|
||||
"rustls 0.21.12",
|
||||
"rustls 0.23.26",
|
||||
"rustls 0.23.27",
|
||||
"rustls-native-certs 0.8.1",
|
||||
"rustls-pki-types",
|
||||
"tokio",
|
||||
@@ -1326,9 +1326,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cc"
|
||||
version = "1.2.20"
|
||||
version = "1.2.21"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "04da6a0d40b948dfc4fa8f5bbf402b0fc1a64a28dbf7d12ffd683550f2c1b63a"
|
||||
checksum = "8691782945451c1c383942c4874dbe63814f61cb57ef773cda2972682b7bb3c0"
|
||||
dependencies = [
|
||||
"jobserver",
|
||||
"libc",
|
||||
@@ -2737,8 +2737,8 @@ checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c"
|
||||
|
||||
[[package]]
|
||||
name = "fsst"
|
||||
version = "0.27.0"
|
||||
source = "git+https://github.com/lancedb/lance.git?tag=v0.27.0-beta.2#cf903b470be1aaff2998830bd0358226f27f4185"
|
||||
version = "0.27.1"
|
||||
source = "git+https://github.com/lancedb/lance.git?tag=v0.27.1-beta.1#fe14671f1a5bdca3c135abd1a5d7baa5fe1faeae"
|
||||
dependencies = [
|
||||
"rand 0.8.5",
|
||||
]
|
||||
@@ -3065,9 +3065,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "h2"
|
||||
version = "0.4.9"
|
||||
version = "0.4.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "75249d144030531f8dee69fe9cea04d3edf809a017ae445e2abdff6629e86633"
|
||||
checksum = "a9421a676d1b147b16b82c9225157dc629087ef8ec4d5e2960f9437a90dac0a5"
|
||||
dependencies = [
|
||||
"atomic-waker",
|
||||
"bytes",
|
||||
@@ -3115,9 +3115,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "hashbrown"
|
||||
version = "0.15.2"
|
||||
version = "0.15.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289"
|
||||
checksum = "84b26c544d002229e640969970a2e74021aadf6e2f96372b9c58eff97de08eb3"
|
||||
dependencies = [
|
||||
"allocator-api2",
|
||||
"equivalent",
|
||||
@@ -3302,7 +3302,7 @@ dependencies = [
|
||||
"bytes",
|
||||
"futures-channel",
|
||||
"futures-util",
|
||||
"h2 0.4.9",
|
||||
"h2 0.4.10",
|
||||
"http 1.3.1",
|
||||
"http-body 1.0.1",
|
||||
"httparse",
|
||||
@@ -3339,7 +3339,7 @@ dependencies = [
|
||||
"http 1.3.1",
|
||||
"hyper 1.6.0",
|
||||
"hyper-util",
|
||||
"rustls 0.23.26",
|
||||
"rustls 0.23.27",
|
||||
"rustls-native-certs 0.8.1",
|
||||
"rustls-pki-types",
|
||||
"tokio",
|
||||
@@ -3564,7 +3564,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cea70ddb795996207ad57735b50c5982d8844f38ba9ee5f1aedcfb708a2aa11e"
|
||||
dependencies = [
|
||||
"equivalent",
|
||||
"hashbrown 0.15.2",
|
||||
"hashbrown 0.15.3",
|
||||
"serde",
|
||||
]
|
||||
|
||||
@@ -3661,9 +3661,9 @@ checksum = "9028f49264629065d057f340a86acb84867925865f73bbf8d47b4d149a7e88b8"
|
||||
|
||||
[[package]]
|
||||
name = "jiff"
|
||||
version = "0.2.10"
|
||||
version = "0.2.13"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5a064218214dc6a10fbae5ec5fa888d80c45d611aba169222fc272072bf7aef6"
|
||||
checksum = "f02000660d30638906021176af16b17498bd0d12813dbfe7b276d8bc7f3c0806"
|
||||
dependencies = [
|
||||
"jiff-static",
|
||||
"log",
|
||||
@@ -3674,9 +3674,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "jiff-static"
|
||||
version = "0.2.10"
|
||||
version = "0.2.13"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "199b7932d97e325aff3a7030e141eafe7f2c6268e1d1b24859b753a627f45254"
|
||||
checksum = "f3c30758ddd7188629c6713fc45d1188af4f44c90582311d0c8d8c9907f60c48"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -3727,8 +3727,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lance"
|
||||
version = "0.27.0"
|
||||
source = "git+https://github.com/lancedb/lance.git?tag=v0.27.0-beta.2#cf903b470be1aaff2998830bd0358226f27f4185"
|
||||
version = "0.27.1"
|
||||
source = "git+https://github.com/lancedb/lance.git?tag=v0.27.1-beta.1#fe14671f1a5bdca3c135abd1a5d7baa5fe1faeae"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-arith",
|
||||
@@ -3752,6 +3752,7 @@ dependencies = [
|
||||
"datafusion-expr",
|
||||
"datafusion-functions",
|
||||
"datafusion-physical-expr",
|
||||
"datafusion-physical-plan",
|
||||
"deepsize",
|
||||
"either",
|
||||
"futures",
|
||||
@@ -3790,8 +3791,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lance-arrow"
|
||||
version = "0.27.0"
|
||||
source = "git+https://github.com/lancedb/lance.git?tag=v0.27.0-beta.2#cf903b470be1aaff2998830bd0358226f27f4185"
|
||||
version = "0.27.1"
|
||||
source = "git+https://github.com/lancedb/lance.git?tag=v0.27.1-beta.1#fe14671f1a5bdca3c135abd1a5d7baa5fe1faeae"
|
||||
dependencies = [
|
||||
"arrow-array",
|
||||
"arrow-buffer",
|
||||
@@ -3808,8 +3809,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lance-core"
|
||||
version = "0.27.0"
|
||||
source = "git+https://github.com/lancedb/lance.git?tag=v0.27.0-beta.2#cf903b470be1aaff2998830bd0358226f27f4185"
|
||||
version = "0.27.1"
|
||||
source = "git+https://github.com/lancedb/lance.git?tag=v0.27.1-beta.1#fe14671f1a5bdca3c135abd1a5d7baa5fe1faeae"
|
||||
dependencies = [
|
||||
"arrow-array",
|
||||
"arrow-buffer",
|
||||
@@ -3845,8 +3846,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lance-datafusion"
|
||||
version = "0.27.0"
|
||||
source = "git+https://github.com/lancedb/lance.git?tag=v0.27.0-beta.2#cf903b470be1aaff2998830bd0358226f27f4185"
|
||||
version = "0.27.1"
|
||||
source = "git+https://github.com/lancedb/lance.git?tag=v0.27.1-beta.1#fe14671f1a5bdca3c135abd1a5d7baa5fe1faeae"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-array",
|
||||
@@ -3875,8 +3876,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lance-datagen"
|
||||
version = "0.27.0"
|
||||
source = "git+https://github.com/lancedb/lance.git?tag=v0.27.0-beta.2#cf903b470be1aaff2998830bd0358226f27f4185"
|
||||
version = "0.27.1"
|
||||
source = "git+https://github.com/lancedb/lance.git?tag=v0.27.1-beta.1#fe14671f1a5bdca3c135abd1a5d7baa5fe1faeae"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-array",
|
||||
@@ -3891,8 +3892,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lance-encoding"
|
||||
version = "0.27.0"
|
||||
source = "git+https://github.com/lancedb/lance.git?tag=v0.27.0-beta.2#cf903b470be1aaff2998830bd0358226f27f4185"
|
||||
version = "0.27.1"
|
||||
source = "git+https://github.com/lancedb/lance.git?tag=v0.27.1-beta.1#fe14671f1a5bdca3c135abd1a5d7baa5fe1faeae"
|
||||
dependencies = [
|
||||
"arrayref",
|
||||
"arrow",
|
||||
@@ -3931,8 +3932,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lance-file"
|
||||
version = "0.27.0"
|
||||
source = "git+https://github.com/lancedb/lance.git?tag=v0.27.0-beta.2#cf903b470be1aaff2998830bd0358226f27f4185"
|
||||
version = "0.27.1"
|
||||
source = "git+https://github.com/lancedb/lance.git?tag=v0.27.1-beta.1#fe14671f1a5bdca3c135abd1a5d7baa5fe1faeae"
|
||||
dependencies = [
|
||||
"arrow-arith",
|
||||
"arrow-array",
|
||||
@@ -3966,8 +3967,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lance-index"
|
||||
version = "0.27.0"
|
||||
source = "git+https://github.com/lancedb/lance.git?tag=v0.27.0-beta.2#cf903b470be1aaff2998830bd0358226f27f4185"
|
||||
version = "0.27.1"
|
||||
source = "git+https://github.com/lancedb/lance.git?tag=v0.27.1-beta.1#fe14671f1a5bdca3c135abd1a5d7baa5fe1faeae"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-array",
|
||||
@@ -4020,8 +4021,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lance-io"
|
||||
version = "0.27.0"
|
||||
source = "git+https://github.com/lancedb/lance.git?tag=v0.27.0-beta.2#cf903b470be1aaff2998830bd0358226f27f4185"
|
||||
version = "0.27.1"
|
||||
source = "git+https://github.com/lancedb/lance.git?tag=v0.27.1-beta.1#fe14671f1a5bdca3c135abd1a5d7baa5fe1faeae"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-arith",
|
||||
@@ -4050,6 +4051,7 @@ dependencies = [
|
||||
"pin-project",
|
||||
"prost",
|
||||
"rand 0.8.5",
|
||||
"serde",
|
||||
"shellexpand",
|
||||
"snafu",
|
||||
"tokio",
|
||||
@@ -4059,8 +4061,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lance-linalg"
|
||||
version = "0.27.0"
|
||||
source = "git+https://github.com/lancedb/lance.git?tag=v0.27.0-beta.2#cf903b470be1aaff2998830bd0358226f27f4185"
|
||||
version = "0.27.1"
|
||||
source = "git+https://github.com/lancedb/lance.git?tag=v0.27.1-beta.1#fe14671f1a5bdca3c135abd1a5d7baa5fe1faeae"
|
||||
dependencies = [
|
||||
"arrow-array",
|
||||
"arrow-ord",
|
||||
@@ -4083,8 +4085,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lance-table"
|
||||
version = "0.27.0"
|
||||
source = "git+https://github.com/lancedb/lance.git?tag=v0.27.0-beta.2#cf903b470be1aaff2998830bd0358226f27f4185"
|
||||
version = "0.27.1"
|
||||
source = "git+https://github.com/lancedb/lance.git?tag=v0.27.1-beta.1#fe14671f1a5bdca3c135abd1a5d7baa5fe1faeae"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-array",
|
||||
@@ -4123,8 +4125,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lance-testing"
|
||||
version = "0.27.0"
|
||||
source = "git+https://github.com/lancedb/lance.git?tag=v0.27.0-beta.2#cf903b470be1aaff2998830bd0358226f27f4185"
|
||||
version = "0.27.1"
|
||||
source = "git+https://github.com/lancedb/lance.git?tag=v0.27.1-beta.1#fe14671f1a5bdca3c135abd1a5d7baa5fe1faeae"
|
||||
dependencies = [
|
||||
"arrow-array",
|
||||
"arrow-schema",
|
||||
@@ -4135,7 +4137,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lancedb"
|
||||
version = "0.19.1-beta.1"
|
||||
version = "0.19.1-beta.4"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-array",
|
||||
@@ -4222,7 +4224,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lancedb-node"
|
||||
version = "0.19.1-beta.1"
|
||||
version = "0.19.1-beta.4"
|
||||
dependencies = [
|
||||
"arrow-array",
|
||||
"arrow-ipc",
|
||||
@@ -4247,7 +4249,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lancedb-nodejs"
|
||||
version = "0.19.1-beta.1"
|
||||
version = "0.19.1-beta.4"
|
||||
dependencies = [
|
||||
"arrow-array",
|
||||
"arrow-ipc",
|
||||
@@ -4266,7 +4268,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lancedb-python"
|
||||
version = "0.22.1-beta.1"
|
||||
version = "0.22.1-beta.4"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"env_logger",
|
||||
@@ -4389,9 +4391,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "libm"
|
||||
version = "0.2.13"
|
||||
version = "0.2.14"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c9627da5196e5d8ed0b0495e61e518847578da83483c37288316d9b2e03a7f72"
|
||||
checksum = "a25169bd5913a4b437588a7e3d127cd6e90127b60e0ffbd834a38f1599e016b8"
|
||||
|
||||
[[package]]
|
||||
name = "libredox"
|
||||
@@ -4456,7 +4458,7 @@ version = "0.12.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38"
|
||||
dependencies = [
|
||||
"hashbrown 0.15.2",
|
||||
"hashbrown 0.15.3",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -5848,7 +5850,7 @@ dependencies = [
|
||||
"quinn-proto",
|
||||
"quinn-udp",
|
||||
"rustc-hash 2.1.1",
|
||||
"rustls 0.23.26",
|
||||
"rustls 0.23.27",
|
||||
"socket2",
|
||||
"thiserror 2.0.12",
|
||||
"tokio",
|
||||
@@ -5867,7 +5869,7 @@ dependencies = [
|
||||
"rand 0.9.1",
|
||||
"ring",
|
||||
"rustc-hash 2.1.1",
|
||||
"rustls 0.23.26",
|
||||
"rustls 0.23.27",
|
||||
"rustls-pki-types",
|
||||
"slab",
|
||||
"thiserror 2.0.12",
|
||||
@@ -5878,9 +5880,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "quinn-udp"
|
||||
version = "0.5.11"
|
||||
version = "0.5.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "541d0f57c6ec747a90738a52741d3221f7960e8ac2f0ff4b1a63680e033b4ab5"
|
||||
checksum = "ee4e529991f949c5e25755532370b8af5d114acae52326361d68d47af64aa842"
|
||||
dependencies = [
|
||||
"cfg_aliases",
|
||||
"libc",
|
||||
@@ -6086,9 +6088,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "redox_syscall"
|
||||
version = "0.5.11"
|
||||
version = "0.5.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d2f103c6d277498fbceb16e84d317e2a400f160f46904d5f5410848c829511a3"
|
||||
checksum = "928fca9cf2aa042393a8325b9ead81d2f0df4cb12e1e24cef072922ccd99c5af"
|
||||
dependencies = [
|
||||
"bitflags 2.9.0",
|
||||
]
|
||||
@@ -6183,7 +6185,7 @@ dependencies = [
|
||||
"encoding_rs",
|
||||
"futures-core",
|
||||
"futures-util",
|
||||
"h2 0.4.9",
|
||||
"h2 0.4.10",
|
||||
"http 1.3.1",
|
||||
"http-body 1.0.1",
|
||||
"http-body-util",
|
||||
@@ -6199,7 +6201,7 @@ dependencies = [
|
||||
"percent-encoding",
|
||||
"pin-project-lite",
|
||||
"quinn",
|
||||
"rustls 0.23.26",
|
||||
"rustls 0.23.27",
|
||||
"rustls-native-certs 0.8.1",
|
||||
"rustls-pemfile 2.2.0",
|
||||
"rustls-pki-types",
|
||||
@@ -6355,9 +6357,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "rustix"
|
||||
version = "1.0.5"
|
||||
version = "1.0.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d97817398dd4bb2e6da002002db259209759911da105da92bec29ccb12cf58bf"
|
||||
checksum = "c71e83d6afe7ff64890ec6b71d6a69bb8a610ab78ce364b3352876bb4c801266"
|
||||
dependencies = [
|
||||
"bitflags 2.9.0",
|
||||
"errno",
|
||||
@@ -6380,16 +6382,16 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "rustls"
|
||||
version = "0.23.26"
|
||||
version = "0.23.27"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "df51b5869f3a441595eac5e8ff14d486ff285f7b8c0df8770e49c3b56351f0f0"
|
||||
checksum = "730944ca083c1c233a75c09f199e973ca499344a2b7ba9e755c457e86fb4a321"
|
||||
dependencies = [
|
||||
"aws-lc-rs",
|
||||
"log",
|
||||
"once_cell",
|
||||
"ring",
|
||||
"rustls-pki-types",
|
||||
"rustls-webpki 0.103.1",
|
||||
"rustls-webpki 0.103.2",
|
||||
"subtle",
|
||||
"zeroize",
|
||||
]
|
||||
@@ -6457,9 +6459,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "rustls-webpki"
|
||||
version = "0.103.1"
|
||||
version = "0.103.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fef8b8769aaccf73098557a87cd1816b4f9c7c16811c9c77142aa695c16f2c03"
|
||||
checksum = "7149975849f1abb3832b246010ef62ccc80d3a76169517ada7188252b9cfb437"
|
||||
dependencies = [
|
||||
"aws-lc-rs",
|
||||
"ring",
|
||||
@@ -6712,9 +6714,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sha2"
|
||||
version = "0.10.8"
|
||||
version = "0.10.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8"
|
||||
checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"cpufeatures",
|
||||
@@ -7033,9 +7035,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "synstructure"
|
||||
version = "0.13.1"
|
||||
version = "0.13.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971"
|
||||
checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -7265,7 +7267,7 @@ dependencies = [
|
||||
"fastrand",
|
||||
"getrandom 0.3.2",
|
||||
"once_cell",
|
||||
"rustix 1.0.5",
|
||||
"rustix 1.0.7",
|
||||
"windows-sys 0.59.0",
|
||||
]
|
||||
|
||||
@@ -7460,7 +7462,7 @@ version = "0.26.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8e727b36a1a0e8b74c376ac2211e40c2c8af09fb4013c60d910495810f008e9b"
|
||||
dependencies = [
|
||||
"rustls 0.23.26",
|
||||
"rustls 0.23.27",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
@@ -7685,7 +7687,7 @@ dependencies = [
|
||||
"flate2",
|
||||
"log",
|
||||
"once_cell",
|
||||
"rustls 0.23.26",
|
||||
"rustls 0.23.27",
|
||||
"rustls-pki-types",
|
||||
"serde",
|
||||
"serde_json",
|
||||
@@ -7905,9 +7907,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "webpki-roots"
|
||||
version = "0.26.9"
|
||||
version = "0.26.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "29aad86cec885cafd03e8305fd727c418e970a521322c91688414d5b8efba16b"
|
||||
checksum = "37493cadf42a2a939ed404698ded7fb378bf301b5011f973361779a3a74f8c93"
|
||||
dependencies = [
|
||||
"rustls-pki-types",
|
||||
]
|
||||
@@ -8397,9 +8399,9 @@ checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486"
|
||||
|
||||
[[package]]
|
||||
name = "winnow"
|
||||
version = "0.7.7"
|
||||
version = "0.7.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6cb8234a863ea0e8cd7284fcdd4f145233eb00fee02bbdd9861aec44e6477bc5"
|
||||
checksum = "d9fb597c990f03753e08d3c29efbfcf2019a003b4bf4ba19225c158e1549f0f3"
|
||||
dependencies = [
|
||||
"memchr",
|
||||
]
|
||||
|
||||
16
Cargo.toml
16
Cargo.toml
@@ -21,14 +21,14 @@ categories = ["database-implementations"]
|
||||
rust-version = "1.78.0"
|
||||
|
||||
[workspace.dependencies]
|
||||
lance = { "version" = "=0.27.0", "features" = ["dynamodb"], tag = "v0.27.0-beta.2", git="https://github.com/lancedb/lance.git" }
|
||||
lance-io = { version = "=0.27.0", tag = "v0.27.0-beta.2", git="https://github.com/lancedb/lance.git" }
|
||||
lance-index = { version = "=0.27.0", tag = "v0.27.0-beta.2", git="https://github.com/lancedb/lance.git" }
|
||||
lance-linalg = { version = "=0.27.0", tag = "v0.27.0-beta.2", git="https://github.com/lancedb/lance.git" }
|
||||
lance-table = { version = "=0.27.0", tag = "v0.27.0-beta.2", git="https://github.com/lancedb/lance.git" }
|
||||
lance-testing = { version = "=0.27.0", tag = "v0.27.0-beta.2", git="https://github.com/lancedb/lance.git" }
|
||||
lance-datafusion = { version = "=0.27.0", tag = "v0.27.0-beta.2", git="https://github.com/lancedb/lance.git" }
|
||||
lance-encoding = { version = "=0.27.0", tag = "v0.27.0-beta.2", git="https://github.com/lancedb/lance.git" }
|
||||
lance = { "version" = "=0.27.1", "features" = ["dynamodb"], tag = "v0.27.1-beta.1", git="https://github.com/lancedb/lance.git" }
|
||||
lance-io = { version = "=0.27.1", tag = "v0.27.1-beta.1", git="https://github.com/lancedb/lance.git" }
|
||||
lance-index = { version = "=0.27.1", tag = "v0.27.1-beta.1", git="https://github.com/lancedb/lance.git" }
|
||||
lance-linalg = { version = "=0.27.1", tag = "v0.27.1-beta.1", git="https://github.com/lancedb/lance.git" }
|
||||
lance-table = { version = "=0.27.1", tag = "v0.27.1-beta.1", git="https://github.com/lancedb/lance.git" }
|
||||
lance-testing = { version = "=0.27.1", tag = "v0.27.1-beta.1", git="https://github.com/lancedb/lance.git" }
|
||||
lance-datafusion = { version = "=0.27.1", tag = "v0.27.1-beta.1", git="https://github.com/lancedb/lance.git" }
|
||||
lance-encoding = { version = "=0.27.1", tag = "v0.27.1-beta.1", git="https://github.com/lancedb/lance.git" }
|
||||
# Note that this one does not include pyarrow
|
||||
arrow = { version = "54.1", optional = false }
|
||||
arrow-array = "54.1"
|
||||
|
||||
@@ -105,8 +105,7 @@ markdown_extensions:
|
||||
nav:
|
||||
- Home:
|
||||
- LanceDB: index.md
|
||||
- 👉 Quickstart: quickstart.md
|
||||
- 🏃🏼♂️ Basic Usage: basic.md
|
||||
- 🏃🏼♂️ Quick start: basic.md
|
||||
- 📚 Concepts:
|
||||
- Vector search: concepts/vector_search.md
|
||||
- Indexing:
|
||||
@@ -206,6 +205,7 @@ nav:
|
||||
- PromptTools: integrations/prompttools.md
|
||||
- dlt: integrations/dlt.md
|
||||
- phidata: integrations/phidata.md
|
||||
- Genkit: integrations/genkit.md
|
||||
- 🎯 Examples:
|
||||
- Overview: examples/index.md
|
||||
- 🐍 Python:
|
||||
@@ -238,9 +238,7 @@ nav:
|
||||
- 👾 JavaScript (lancedb): js/globals.md
|
||||
- 🦀 Rust: https://docs.rs/lancedb/latest/lancedb/
|
||||
|
||||
- Getting Started:
|
||||
- Quickstart: quickstart.md
|
||||
- Basic Usage: basic.md
|
||||
- Quick start: basic.md
|
||||
- Concepts:
|
||||
- Vector search: concepts/vector_search.md
|
||||
- Indexing:
|
||||
@@ -334,6 +332,7 @@ nav:
|
||||
- PromptTools: integrations/prompttools.md
|
||||
- dlt: integrations/dlt.md
|
||||
- phidata: integrations/phidata.md
|
||||
- Genkit: integrations/genkit.md
|
||||
- Examples:
|
||||
- examples/index.md
|
||||
- 🐍 Python:
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Basic Usage
|
||||
# Quick start
|
||||
|
||||
!!! info "LanceDB can be run in a number of ways:"
|
||||
|
||||
|
||||
181
docs/src/integrations/genkit.md
Normal file
181
docs/src/integrations/genkit.md
Normal file
@@ -0,0 +1,181 @@
|
||||
### genkitx-lancedb
|
||||
This is a lancedb plugin for genkit framework. It allows you to use LanceDB for ingesting and rereiving data using genkit framework.
|
||||
|
||||
### Installation
|
||||
```bash
|
||||
pnpm install genkitx-lancedb
|
||||
```
|
||||
|
||||
### Usage
|
||||
|
||||
Adding LanceDB plugin to your genkit instance.
|
||||
|
||||
```ts
|
||||
import { lancedbIndexerRef, lancedb, lancedbRetrieverRef, WriteMode } from 'genkitx-lancedb';
|
||||
import { textEmbedding004, vertexAI } from '@genkit-ai/vertexai';
|
||||
import { gemini } from '@genkit-ai/vertexai';
|
||||
import { z, genkit } from 'genkit';
|
||||
import { Document } from 'genkit/retriever';
|
||||
import { chunk } from 'llm-chunk';
|
||||
import { readFile } from 'fs/promises';
|
||||
import path from 'path';
|
||||
import pdf from 'pdf-parse/lib/pdf-parse';
|
||||
|
||||
const ai = genkit({
|
||||
plugins: [
|
||||
// vertexAI provides the textEmbedding004 embedder
|
||||
vertexAI(),
|
||||
|
||||
// the local vector store requires an embedder to translate from text to vector
|
||||
lancedb([
|
||||
{
|
||||
dbUri: '.db', // optional lancedb uri, default to .db
|
||||
tableName: 'table', // optional table name, default to table
|
||||
embedder: textEmbedding004,
|
||||
},
|
||||
]),
|
||||
],
|
||||
});
|
||||
```
|
||||
|
||||
You can run this app with the following command:
|
||||
```bash
|
||||
genkit start -- tsx --watch src/index.ts
|
||||
```
|
||||
|
||||
This'll add LanceDB as a retriever and indexer to the genkit instance. You can see it in the GUI view
|
||||
<img width="1710" alt="Screenshot 2025-05-11 at 7 21 05 PM" src="https://github.com/user-attachments/assets/e752f7f4-785b-4797-a11e-72ab06a531b7" />
|
||||
|
||||
**Testing retrieval on a sample table**
|
||||
Let's see the raw retrieval results
|
||||
|
||||
<img width="1710" alt="Screenshot 2025-05-11 at 7 21 05 PM" src="https://github.com/user-attachments/assets/b8d356ed-8421-4790-8fc0-d6af563b9657" />
|
||||
On running this query, you'll 5 results fetched from the lancedb table, where each result looks something like this:
|
||||
<img width="1417" alt="Screenshot 2025-05-11 at 7 21 18 PM" src="https://github.com/user-attachments/assets/77429525-36e2-4da6-a694-e58c1cf9eb83" />
|
||||
|
||||
|
||||
|
||||
## Creating a custom RAG flow
|
||||
|
||||
Now that we've seen how you can use LanceDB for in a genkit pipeline, let's refine the flow and create a RAG. A RAG flow will consist of an index and a retreiver with its outputs postprocessed an fed into an LLM for final response
|
||||
|
||||
### Creating custom indexer flows
|
||||
You can also create custom indexer flows, utilizing more options and features provided by LanceDB.
|
||||
|
||||
```ts
|
||||
export const menuPdfIndexer = lancedbIndexerRef({
|
||||
// Using all defaults, for dbUri, tableName, and embedder, etc
|
||||
});
|
||||
|
||||
const chunkingConfig = {
|
||||
minLength: 1000,
|
||||
maxLength: 2000,
|
||||
splitter: 'sentence',
|
||||
overlap: 100,
|
||||
delimiters: '',
|
||||
} as any;
|
||||
|
||||
|
||||
async function extractTextFromPdf(filePath: string) {
|
||||
const pdfFile = path.resolve(filePath);
|
||||
const dataBuffer = await readFile(pdfFile);
|
||||
const data = await pdf(dataBuffer);
|
||||
return data.text;
|
||||
}
|
||||
|
||||
export const indexMenu = ai.defineFlow(
|
||||
{
|
||||
name: 'indexMenu',
|
||||
inputSchema: z.string().describe('PDF file path'),
|
||||
outputSchema: z.void(),
|
||||
},
|
||||
async (filePath: string) => {
|
||||
filePath = path.resolve(filePath);
|
||||
|
||||
// Read the pdf.
|
||||
const pdfTxt = await ai.run('extract-text', () =>
|
||||
extractTextFromPdf(filePath)
|
||||
);
|
||||
|
||||
// Divide the pdf text into segments.
|
||||
const chunks = await ai.run('chunk-it', async () =>
|
||||
chunk(pdfTxt, chunkingConfig)
|
||||
);
|
||||
|
||||
// Convert chunks of text into documents to store in the index.
|
||||
const documents = chunks.map((text) => {
|
||||
return Document.fromText(text, { filePath });
|
||||
});
|
||||
|
||||
// Add documents to the index.
|
||||
await ai.index({
|
||||
indexer: menuPdfIndexer,
|
||||
documents,
|
||||
options: {
|
||||
writeMode: WriteMode.Overwrite,
|
||||
} as any
|
||||
});
|
||||
}
|
||||
);
|
||||
```
|
||||
|
||||
<img width="1316" alt="Screenshot 2025-05-11 at 8 35 56 PM" src="https://github.com/user-attachments/assets/e2a20ce4-d1d0-4fa2-9a84-f2cc26e3a29f" />
|
||||
|
||||
In your console, you can see the logs
|
||||
|
||||
<img width="511" alt="Screenshot 2025-05-11 at 7 19 14 PM" src="https://github.com/user-attachments/assets/243f26c5-ed38-40b6-b661-002f40f0423a" />
|
||||
|
||||
### Creating custom retriever flows
|
||||
You can also create custom retriever flows, utilizing more options and features provided by LanceDB.
|
||||
```ts
|
||||
export const menuRetriever = lancedbRetrieverRef({
|
||||
tableName: "table", // Use the same table name as the indexer.
|
||||
displayName: "Menu", // Use a custom display name.
|
||||
|
||||
export const menuQAFlow = ai.defineFlow(
|
||||
{ name: "Menu", inputSchema: z.string(), outputSchema: z.string() },
|
||||
async (input: string) => {
|
||||
// retrieve relevant documents
|
||||
const docs = await ai.retrieve({
|
||||
retriever: menuRetriever,
|
||||
query: input,
|
||||
options: {
|
||||
k: 3,
|
||||
},
|
||||
});
|
||||
|
||||
const extractedContent = docs.map(doc => {
|
||||
if (doc.content && Array.isArray(doc.content) && doc.content.length > 0) {
|
||||
if (doc.content[0].media && doc.content[0].media.url) {
|
||||
return doc.content[0].media.url;
|
||||
}
|
||||
}
|
||||
return "No content found";
|
||||
});
|
||||
|
||||
console.log("Extracted content:", extractedContent);
|
||||
|
||||
const { text } = await ai.generate({
|
||||
model: gemini('gemini-2.0-flash'),
|
||||
prompt: `
|
||||
You are acting as a helpful AI assistant that can answer
|
||||
questions about the food available on the menu at Genkit Grub Pub.
|
||||
|
||||
Use only the context provided to answer the question.
|
||||
If you don't know, do not make up an answer.
|
||||
Do not add or change items on the menu.
|
||||
|
||||
Context:
|
||||
${extractedContent.join('\n\n')}
|
||||
|
||||
Question: ${input}`,
|
||||
docs,
|
||||
});
|
||||
|
||||
return text;
|
||||
}
|
||||
);
|
||||
```
|
||||
Now using our retrieval flow, we can ask question about the ingsted PDF
|
||||
<img width="1306" alt="Screenshot 2025-05-11 at 7 18 45 PM" src="https://github.com/user-attachments/assets/86c66b13-7c12-4d5f-9d81-ae36bfb1c346" />
|
||||
|
||||
@@ -33,7 +33,7 @@ Construct a MergeInsertBuilder. __Internal use only.__
|
||||
### execute()
|
||||
|
||||
```ts
|
||||
execute(data): Promise<MergeStats>
|
||||
execute(data, execOptions?): Promise<MergeResult>
|
||||
```
|
||||
|
||||
Executes the merge insert operation
|
||||
@@ -42,11 +42,13 @@ Executes the merge insert operation
|
||||
|
||||
* **data**: [`Data`](../type-aliases/Data.md)
|
||||
|
||||
* **execOptions?**: `Partial`<[`WriteExecutionOptions`](../interfaces/WriteExecutionOptions.md)>
|
||||
|
||||
#### Returns
|
||||
|
||||
`Promise`<[`MergeStats`](../interfaces/MergeStats.md)>
|
||||
`Promise`<[`MergeResult`](../interfaces/MergeResult.md)>
|
||||
|
||||
Statistics about the merge operation: counts of inserted, updated, and deleted rows
|
||||
the merge result
|
||||
|
||||
***
|
||||
|
||||
|
||||
@@ -40,7 +40,7 @@ Returns the name of the table
|
||||
### add()
|
||||
|
||||
```ts
|
||||
abstract add(data, options?): Promise<void>
|
||||
abstract add(data, options?): Promise<AddResult>
|
||||
```
|
||||
|
||||
Insert records into this Table.
|
||||
@@ -54,14 +54,17 @@ Insert records into this Table.
|
||||
|
||||
#### Returns
|
||||
|
||||
`Promise`<`void`>
|
||||
`Promise`<[`AddResult`](../interfaces/AddResult.md)>
|
||||
|
||||
A promise that resolves to an object
|
||||
containing the new version number of the table
|
||||
|
||||
***
|
||||
|
||||
### addColumns()
|
||||
|
||||
```ts
|
||||
abstract addColumns(newColumnTransforms): Promise<void>
|
||||
abstract addColumns(newColumnTransforms): Promise<AddColumnsResult>
|
||||
```
|
||||
|
||||
Add new columns with defined values.
|
||||
@@ -76,14 +79,17 @@ Add new columns with defined values.
|
||||
|
||||
#### Returns
|
||||
|
||||
`Promise`<`void`>
|
||||
`Promise`<[`AddColumnsResult`](../interfaces/AddColumnsResult.md)>
|
||||
|
||||
A promise that resolves to an object
|
||||
containing the new version number of the table after adding the columns.
|
||||
|
||||
***
|
||||
|
||||
### alterColumns()
|
||||
|
||||
```ts
|
||||
abstract alterColumns(columnAlterations): Promise<void>
|
||||
abstract alterColumns(columnAlterations): Promise<AlterColumnsResult>
|
||||
```
|
||||
|
||||
Alter the name or nullability of columns.
|
||||
@@ -96,7 +102,10 @@ Alter the name or nullability of columns.
|
||||
|
||||
#### Returns
|
||||
|
||||
`Promise`<`void`>
|
||||
`Promise`<[`AlterColumnsResult`](../interfaces/AlterColumnsResult.md)>
|
||||
|
||||
A promise that resolves to an object
|
||||
containing the new version number of the table after altering the columns.
|
||||
|
||||
***
|
||||
|
||||
@@ -252,7 +261,7 @@ await table.createIndex("my_float_col");
|
||||
### delete()
|
||||
|
||||
```ts
|
||||
abstract delete(predicate): Promise<void>
|
||||
abstract delete(predicate): Promise<DeleteResult>
|
||||
```
|
||||
|
||||
Delete the rows that satisfy the predicate.
|
||||
@@ -263,7 +272,10 @@ Delete the rows that satisfy the predicate.
|
||||
|
||||
#### Returns
|
||||
|
||||
`Promise`<`void`>
|
||||
`Promise`<[`DeleteResult`](../interfaces/DeleteResult.md)>
|
||||
|
||||
A promise that resolves to an object
|
||||
containing the new version number of the table
|
||||
|
||||
***
|
||||
|
||||
@@ -284,7 +296,7 @@ Return a brief description of the table
|
||||
### dropColumns()
|
||||
|
||||
```ts
|
||||
abstract dropColumns(columnNames): Promise<void>
|
||||
abstract dropColumns(columnNames): Promise<DropColumnsResult>
|
||||
```
|
||||
|
||||
Drop one or more columns from the dataset
|
||||
@@ -303,7 +315,10 @@ then call ``cleanup_files`` to remove the old files.
|
||||
|
||||
#### Returns
|
||||
|
||||
`Promise`<`void`>
|
||||
`Promise`<[`DropColumnsResult`](../interfaces/DropColumnsResult.md)>
|
||||
|
||||
A promise that resolves to an object
|
||||
containing the new version number of the table after dropping the columns.
|
||||
|
||||
***
|
||||
|
||||
@@ -678,7 +693,7 @@ Return the table as an arrow table
|
||||
#### update(opts)
|
||||
|
||||
```ts
|
||||
abstract update(opts): Promise<void>
|
||||
abstract update(opts): Promise<UpdateResult>
|
||||
```
|
||||
|
||||
Update existing records in the Table
|
||||
@@ -689,7 +704,10 @@ Update existing records in the Table
|
||||
|
||||
##### Returns
|
||||
|
||||
`Promise`<`void`>
|
||||
`Promise`<[`UpdateResult`](../interfaces/UpdateResult.md)>
|
||||
|
||||
A promise that resolves to an object containing
|
||||
the number of rows updated and the new version number
|
||||
|
||||
##### Example
|
||||
|
||||
@@ -700,7 +718,7 @@ table.update({where:"x = 2", values:{"vector": [10, 10]}})
|
||||
#### update(opts)
|
||||
|
||||
```ts
|
||||
abstract update(opts): Promise<void>
|
||||
abstract update(opts): Promise<UpdateResult>
|
||||
```
|
||||
|
||||
Update existing records in the Table
|
||||
@@ -711,7 +729,10 @@ Update existing records in the Table
|
||||
|
||||
##### Returns
|
||||
|
||||
`Promise`<`void`>
|
||||
`Promise`<[`UpdateResult`](../interfaces/UpdateResult.md)>
|
||||
|
||||
A promise that resolves to an object containing
|
||||
the number of rows updated and the new version number
|
||||
|
||||
##### Example
|
||||
|
||||
@@ -722,7 +743,7 @@ table.update({where:"x = 2", valuesSql:{"x": "x + 1"}})
|
||||
#### update(updates, options)
|
||||
|
||||
```ts
|
||||
abstract update(updates, options?): Promise<void>
|
||||
abstract update(updates, options?): Promise<UpdateResult>
|
||||
```
|
||||
|
||||
Update existing records in the Table
|
||||
@@ -745,10 +766,6 @@ repeatedly calilng this method.
|
||||
* **updates**: `Record`<`string`, `string`> \| `Map`<`string`, `string`>
|
||||
the
|
||||
columns to update
|
||||
Keys in the map should specify the name of the column to update.
|
||||
Values in the map provide the new value of the column. These can
|
||||
be SQL literal strings (e.g. "7" or "'foo'") or they can be expressions
|
||||
based on the row being updated (e.g. "my_col + 1")
|
||||
|
||||
* **options?**: `Partial`<[`UpdateOptions`](../interfaces/UpdateOptions.md)>
|
||||
additional options to control
|
||||
@@ -756,7 +773,15 @@ repeatedly calilng this method.
|
||||
|
||||
##### Returns
|
||||
|
||||
`Promise`<`void`>
|
||||
`Promise`<[`UpdateResult`](../interfaces/UpdateResult.md)>
|
||||
|
||||
A promise that resolves to an object
|
||||
containing the number of rows updated and the new version number
|
||||
|
||||
Keys in the map should specify the name of the column to update.
|
||||
Values in the map provide the new value of the column. These can
|
||||
be SQL literal strings (e.g. "7" or "'foo'") or they can be expressions
|
||||
based on the row being updated (e.g. "my_col + 1")
|
||||
|
||||
***
|
||||
|
||||
|
||||
@@ -34,13 +34,18 @@
|
||||
|
||||
## Interfaces
|
||||
|
||||
- [AddColumnsResult](interfaces/AddColumnsResult.md)
|
||||
- [AddColumnsSql](interfaces/AddColumnsSql.md)
|
||||
- [AddDataOptions](interfaces/AddDataOptions.md)
|
||||
- [AddResult](interfaces/AddResult.md)
|
||||
- [AlterColumnsResult](interfaces/AlterColumnsResult.md)
|
||||
- [ClientConfig](interfaces/ClientConfig.md)
|
||||
- [ColumnAlteration](interfaces/ColumnAlteration.md)
|
||||
- [CompactionStats](interfaces/CompactionStats.md)
|
||||
- [ConnectionOptions](interfaces/ConnectionOptions.md)
|
||||
- [CreateTableOptions](interfaces/CreateTableOptions.md)
|
||||
- [DeleteResult](interfaces/DeleteResult.md)
|
||||
- [DropColumnsResult](interfaces/DropColumnsResult.md)
|
||||
- [ExecutableQuery](interfaces/ExecutableQuery.md)
|
||||
- [FragmentStatistics](interfaces/FragmentStatistics.md)
|
||||
- [FragmentSummaryStats](interfaces/FragmentSummaryStats.md)
|
||||
@@ -54,7 +59,7 @@
|
||||
- [IndexStatistics](interfaces/IndexStatistics.md)
|
||||
- [IvfFlatOptions](interfaces/IvfFlatOptions.md)
|
||||
- [IvfPqOptions](interfaces/IvfPqOptions.md)
|
||||
- [MergeStats](interfaces/MergeStats.md)
|
||||
- [MergeResult](interfaces/MergeResult.md)
|
||||
- [OpenTableOptions](interfaces/OpenTableOptions.md)
|
||||
- [OptimizeOptions](interfaces/OptimizeOptions.md)
|
||||
- [OptimizeStats](interfaces/OptimizeStats.md)
|
||||
@@ -65,7 +70,9 @@
|
||||
- [TableStatistics](interfaces/TableStatistics.md)
|
||||
- [TimeoutConfig](interfaces/TimeoutConfig.md)
|
||||
- [UpdateOptions](interfaces/UpdateOptions.md)
|
||||
- [UpdateResult](interfaces/UpdateResult.md)
|
||||
- [Version](interfaces/Version.md)
|
||||
- [WriteExecutionOptions](interfaces/WriteExecutionOptions.md)
|
||||
|
||||
## Type Aliases
|
||||
|
||||
|
||||
15
docs/src/js/interfaces/AddColumnsResult.md
Normal file
15
docs/src/js/interfaces/AddColumnsResult.md
Normal file
@@ -0,0 +1,15 @@
|
||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||
|
||||
***
|
||||
|
||||
[@lancedb/lancedb](../globals.md) / AddColumnsResult
|
||||
|
||||
# Interface: AddColumnsResult
|
||||
|
||||
## Properties
|
||||
|
||||
### version
|
||||
|
||||
```ts
|
||||
version: number;
|
||||
```
|
||||
15
docs/src/js/interfaces/AddResult.md
Normal file
15
docs/src/js/interfaces/AddResult.md
Normal file
@@ -0,0 +1,15 @@
|
||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||
|
||||
***
|
||||
|
||||
[@lancedb/lancedb](../globals.md) / AddResult
|
||||
|
||||
# Interface: AddResult
|
||||
|
||||
## Properties
|
||||
|
||||
### version
|
||||
|
||||
```ts
|
||||
version: number;
|
||||
```
|
||||
15
docs/src/js/interfaces/AlterColumnsResult.md
Normal file
15
docs/src/js/interfaces/AlterColumnsResult.md
Normal file
@@ -0,0 +1,15 @@
|
||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||
|
||||
***
|
||||
|
||||
[@lancedb/lancedb](../globals.md) / AlterColumnsResult
|
||||
|
||||
# Interface: AlterColumnsResult
|
||||
|
||||
## Properties
|
||||
|
||||
### version
|
||||
|
||||
```ts
|
||||
version: number;
|
||||
```
|
||||
15
docs/src/js/interfaces/DeleteResult.md
Normal file
15
docs/src/js/interfaces/DeleteResult.md
Normal file
@@ -0,0 +1,15 @@
|
||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||
|
||||
***
|
||||
|
||||
[@lancedb/lancedb](../globals.md) / DeleteResult
|
||||
|
||||
# Interface: DeleteResult
|
||||
|
||||
## Properties
|
||||
|
||||
### version
|
||||
|
||||
```ts
|
||||
version: number;
|
||||
```
|
||||
15
docs/src/js/interfaces/DropColumnsResult.md
Normal file
15
docs/src/js/interfaces/DropColumnsResult.md
Normal file
@@ -0,0 +1,15 @@
|
||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||
|
||||
***
|
||||
|
||||
[@lancedb/lancedb](../globals.md) / DropColumnsResult
|
||||
|
||||
# Interface: DropColumnsResult
|
||||
|
||||
## Properties
|
||||
|
||||
### version
|
||||
|
||||
```ts
|
||||
version: number;
|
||||
```
|
||||
39
docs/src/js/interfaces/MergeResult.md
Normal file
39
docs/src/js/interfaces/MergeResult.md
Normal file
@@ -0,0 +1,39 @@
|
||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||
|
||||
***
|
||||
|
||||
[@lancedb/lancedb](../globals.md) / MergeResult
|
||||
|
||||
# Interface: MergeResult
|
||||
|
||||
## Properties
|
||||
|
||||
### numDeletedRows
|
||||
|
||||
```ts
|
||||
numDeletedRows: number;
|
||||
```
|
||||
|
||||
***
|
||||
|
||||
### numInsertedRows
|
||||
|
||||
```ts
|
||||
numInsertedRows: number;
|
||||
```
|
||||
|
||||
***
|
||||
|
||||
### numUpdatedRows
|
||||
|
||||
```ts
|
||||
numUpdatedRows: number;
|
||||
```
|
||||
|
||||
***
|
||||
|
||||
### version
|
||||
|
||||
```ts
|
||||
version: number;
|
||||
```
|
||||
@@ -1,31 +0,0 @@
|
||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||
|
||||
***
|
||||
|
||||
[@lancedb/lancedb](../globals.md) / MergeStats
|
||||
|
||||
# Interface: MergeStats
|
||||
|
||||
## Properties
|
||||
|
||||
### numDeletedRows
|
||||
|
||||
```ts
|
||||
numDeletedRows: bigint;
|
||||
```
|
||||
|
||||
***
|
||||
|
||||
### numInsertedRows
|
||||
|
||||
```ts
|
||||
numInsertedRows: bigint;
|
||||
```
|
||||
|
||||
***
|
||||
|
||||
### numUpdatedRows
|
||||
|
||||
```ts
|
||||
numUpdatedRows: bigint;
|
||||
```
|
||||
23
docs/src/js/interfaces/UpdateResult.md
Normal file
23
docs/src/js/interfaces/UpdateResult.md
Normal file
@@ -0,0 +1,23 @@
|
||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||
|
||||
***
|
||||
|
||||
[@lancedb/lancedb](../globals.md) / UpdateResult
|
||||
|
||||
# Interface: UpdateResult
|
||||
|
||||
## Properties
|
||||
|
||||
### rowsUpdated
|
||||
|
||||
```ts
|
||||
rowsUpdated: number;
|
||||
```
|
||||
|
||||
***
|
||||
|
||||
### version
|
||||
|
||||
```ts
|
||||
version: number;
|
||||
```
|
||||
26
docs/src/js/interfaces/WriteExecutionOptions.md
Normal file
26
docs/src/js/interfaces/WriteExecutionOptions.md
Normal file
@@ -0,0 +1,26 @@
|
||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||
|
||||
***
|
||||
|
||||
[@lancedb/lancedb](../globals.md) / WriteExecutionOptions
|
||||
|
||||
# Interface: WriteExecutionOptions
|
||||
|
||||
## Properties
|
||||
|
||||
### timeoutMs?
|
||||
|
||||
```ts
|
||||
optional timeoutMs: number;
|
||||
```
|
||||
|
||||
Maximum time to run the operation before cancelling it.
|
||||
|
||||
By default, there is a 30-second timeout that is only enforced after the
|
||||
first attempt. This is to prevent spending too long retrying to resolve
|
||||
conflicts. For example, if a write attempt takes 20 seconds and fails,
|
||||
the second attempt will be cancelled after 10 seconds, hitting the
|
||||
30-second timeout. However, a write that takes one hour and succeeds on the
|
||||
first attempt will not be cancelled.
|
||||
|
||||
When this is set, the timeout is enforced on all attempts, including the first.
|
||||
@@ -1,101 +0,0 @@
|
||||
|
||||
# Getting Started with LanceDB: A Minimal Vector Search Tutorial
|
||||
|
||||
Let's set up a LanceDB database, insert vector data, and perform a simple vector search. We'll use simple character classes like "knight" and "rogue" to illustrate semantic relevance.
|
||||
|
||||
## 1. Install Dependencies
|
||||
|
||||
Before starting, make sure you have the necessary packages:
|
||||
|
||||
```bash
|
||||
pip install lancedb pandas numpy
|
||||
```
|
||||
|
||||
## 2. Import Required Libraries
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
```
|
||||
|
||||
## 3. Connect to LanceDB
|
||||
|
||||
You can use a local directory to store your database:
|
||||
|
||||
```python
|
||||
db = lancedb.connect("./lancedb")
|
||||
```
|
||||
|
||||
## 4. Create Sample Data
|
||||
|
||||
Add sample text data and corresponding 4D vectors:
|
||||
|
||||
```python
|
||||
data = pd.DataFrame([
|
||||
{"id": "1", "vector": [1.0, 0.0, 0.0, 0.0], "text": "knight"},
|
||||
{"id": "2", "vector": [0.9, 0.1, 0.0, 0.0], "text": "warrior"},
|
||||
{"id": "3", "vector": [0.0, 1.0, 0.0, 0.0], "text": "rogue"},
|
||||
{"id": "4", "vector": [0.0, 0.9, 0.1, 0.0], "text": "thief"},
|
||||
{"id": "5", "vector": [0.5, 0.5, 0.0, 0.0], "text": "ranger"},
|
||||
])
|
||||
```
|
||||
|
||||
## 5. Create a Table in LanceDB
|
||||
|
||||
```python
|
||||
table = db.create_table("rpg_classes", data=data, mode="overwrite")
|
||||
```
|
||||
|
||||
Let's see how the table looks:
|
||||
```python
|
||||
print(data)
|
||||
```
|
||||
|
||||
| id | vector | text |
|
||||
|----|--------|------|
|
||||
| 1 | [1.0, 0.0, 0.0, 0.0] | knight |
|
||||
| 2 | [0.9, 0.1, 0.0, 0.0] | warrior |
|
||||
| 3 | [0.0, 1.0, 0.0, 0.0] | rogue |
|
||||
| 4 | [0.0, 0.9, 0.1, 0.0] | thief |
|
||||
| 5 | [0.5, 0.5, 0.0, 0.0] | ranger |
|
||||
|
||||
|
||||
|
||||
## 6. Perform a Vector Search
|
||||
|
||||
Search for the most similar character classes to our query vector:
|
||||
|
||||
```python
|
||||
# Query as if we are searching for "rogue"
|
||||
results = table.search([0.95, 0.05, 0.0, 0.0]).limit(3).to_df()
|
||||
print(results)
|
||||
```
|
||||
|
||||
This will return the top 3 closest classes to the vector, effectively showing how LanceDB can be used for semantic search.
|
||||
|
||||
| id | vector | text | _distance |
|
||||
|------|------------------------|----------|-----------|
|
||||
| 3 | [0.0, 1.0, 0.0, 0.0] | rogue | 0.00 |
|
||||
| 4 | [0.0, 0.9, 0.1, 0.0] | thief | 0.02 |
|
||||
| 5 | [0.5, 0.5, 0.0, 0.0] | ranger | 0.50 |
|
||||
|
||||
Let's try searching for "knight"
|
||||
|
||||
```python
|
||||
query_vector = [1.0, 0.0, 0.0, 0.0]
|
||||
results = table.search(query_vector).limit(3).to_pandas()
|
||||
print(results)
|
||||
```
|
||||
|
||||
| id | vector | text | _distance |
|
||||
|------|------------------------|----------|-----------|
|
||||
| 1 | [1.0, 0.0, 0.0, 0.0] | knight | 0.00 |
|
||||
| 2 | [0.9, 0.1, 0.0, 0.0] | warrior | 0.02 |
|
||||
| 5 | [0.5, 0.5, 0.0, 0.0] | ranger | 0.50 |
|
||||
|
||||
## Next Steps
|
||||
|
||||
That's it - you just conducted vector search!
|
||||
|
||||
For more beginner tips, check out the [Basic Usage](basic.md) guide.
|
||||
@@ -8,7 +8,7 @@
|
||||
<parent>
|
||||
<groupId>com.lancedb</groupId>
|
||||
<artifactId>lancedb-parent</artifactId>
|
||||
<version>0.19.1-beta.1</version>
|
||||
<version>0.19.1-beta.4</version>
|
||||
<relativePath>../pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
<groupId>com.lancedb</groupId>
|
||||
<artifactId>lancedb-parent</artifactId>
|
||||
<version>0.19.1-beta.1</version>
|
||||
<version>0.19.1-beta.4</version>
|
||||
<packaging>pom</packaging>
|
||||
|
||||
<name>LanceDB Parent</name>
|
||||
|
||||
44
node/package-lock.json
generated
44
node/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "vectordb",
|
||||
"version": "0.19.1-beta.1",
|
||||
"version": "0.19.1-beta.4",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "vectordb",
|
||||
"version": "0.19.1-beta.1",
|
||||
"version": "0.19.1-beta.4",
|
||||
"cpu": [
|
||||
"x64",
|
||||
"arm64"
|
||||
@@ -52,11 +52,11 @@
|
||||
"uuid": "^9.0.0"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"@lancedb/vectordb-darwin-arm64": "0.19.1-beta.1",
|
||||
"@lancedb/vectordb-darwin-x64": "0.19.1-beta.1",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.19.1-beta.1",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.19.1-beta.1",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.19.1-beta.1"
|
||||
"@lancedb/vectordb-darwin-arm64": "0.19.1-beta.4",
|
||||
"@lancedb/vectordb-darwin-x64": "0.19.1-beta.4",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.19.1-beta.4",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.19.1-beta.4",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.19.1-beta.4"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@apache-arrow/ts": "^14.0.2",
|
||||
@@ -327,9 +327,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-darwin-arm64": {
|
||||
"version": "0.19.1-beta.1",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-arm64/-/vectordb-darwin-arm64-0.19.1-beta.1.tgz",
|
||||
"integrity": "sha512-Epvel0pF5TM6MtIWQ2KhqezqSSHTL3Wr7a2rGAwz6X/XY23i6DbMPpPs0HyeIDzDrhxNfE3cz3S+SiCA6xpR0g==",
|
||||
"version": "0.19.1-beta.4",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-arm64/-/vectordb-darwin-arm64-0.19.1-beta.4.tgz",
|
||||
"integrity": "sha512-qoo816HSK2VegdSEZwRI5AUCYbyFSNizzKi90Q7jaZFo2YWSym658WHrVv0OMFsyvJlYmy66VTBMf+r6O6BoWg==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
@@ -340,9 +340,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-darwin-x64": {
|
||||
"version": "0.19.1-beta.1",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-x64/-/vectordb-darwin-x64-0.19.1-beta.1.tgz",
|
||||
"integrity": "sha512-hOiUSlIoISbiXytp46hToi/r6sF5pImAsfbzCsIq8ExDV4TPa8fjbhcIT80vxxOwc2mpSSK4HsVJYod95RSbEQ==",
|
||||
"version": "0.19.1-beta.4",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-x64/-/vectordb-darwin-x64-0.19.1-beta.4.tgz",
|
||||
"integrity": "sha512-ETvT238GWu88mBNFqIAJCyKhlD0bzUgVFSAp0/wKOxcxWRtHZKgkjMBeYlmqk5kM/sXvpRJjKbdQc5UfsQzq7w==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
@@ -353,9 +353,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-linux-arm64-gnu": {
|
||||
"version": "0.19.1-beta.1",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-arm64-gnu/-/vectordb-linux-arm64-gnu-0.19.1-beta.1.tgz",
|
||||
"integrity": "sha512-/1JhGVDEngwrlM8o2TNW8G6nJ9U/VgHKAORmj/cTA7O30helJIoo9jfvUAUy+vZ4VoEwRXQbMI+gaYTg0l3MTg==",
|
||||
"version": "0.19.1-beta.4",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-arm64-gnu/-/vectordb-linux-arm64-gnu-0.19.1-beta.4.tgz",
|
||||
"integrity": "sha512-uevuU+P3DwRcVnPovbjAgr7drq8DTGhz+n4EfDHRuCpBKCAJqk4xe1+laFIVZWGEsR5ISsd2A2uoZBtjLLIhFA==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
@@ -366,9 +366,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-linux-x64-gnu": {
|
||||
"version": "0.19.1-beta.1",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-x64-gnu/-/vectordb-linux-x64-gnu-0.19.1-beta.1.tgz",
|
||||
"integrity": "sha512-zNRGSSUt8nTJMmll4NdxhQjwxR8Rezq3T4dsRoiDts5ienMam5HFjYiZ3FkDZQo16rgq2BcbFuH1G8u1chywlg==",
|
||||
"version": "0.19.1-beta.4",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-x64-gnu/-/vectordb-linux-x64-gnu-0.19.1-beta.4.tgz",
|
||||
"integrity": "sha512-tAc1Api1Z9WurYMDWIU5tB/cATcqHlsATUd5geM3m5n77acpp7i2EufA1BmLnZ6fws88klK5raRtLdedwTi6Hw==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
@@ -379,9 +379,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-win32-x64-msvc": {
|
||||
"version": "0.19.1-beta.1",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-win32-x64-msvc/-/vectordb-win32-x64-msvc-0.19.1-beta.1.tgz",
|
||||
"integrity": "sha512-yV550AJGlsIFdm1KoHQPJ1TZx121ZXCIdebBtBZj3wOObIhyB/i0kZAtGvwjkmr7EYyfzt1EHZzbjSGVdehIAA==",
|
||||
"version": "0.19.1-beta.4",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-win32-x64-msvc/-/vectordb-win32-x64-msvc-0.19.1-beta.4.tgz",
|
||||
"integrity": "sha512-LCtbvivrFvK56bq5kWuWcp4wznSPLvzVXYSNxODOzTFYGA/G4/XfA+IEWwlK4olfR2c2SljPXfhWYEX9E9XwVw==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "vectordb",
|
||||
"version": "0.19.1-beta.1",
|
||||
"version": "0.19.1-beta.4",
|
||||
"description": " Serverless, low-latency vector database for AI applications",
|
||||
"private": false,
|
||||
"main": "dist/index.js",
|
||||
@@ -89,10 +89,10 @@
|
||||
}
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"@lancedb/vectordb-darwin-x64": "0.19.1-beta.1",
|
||||
"@lancedb/vectordb-darwin-arm64": "0.19.1-beta.1",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.19.1-beta.1",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.19.1-beta.1",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.19.1-beta.1"
|
||||
"@lancedb/vectordb-darwin-x64": "0.19.1-beta.4",
|
||||
"@lancedb/vectordb-darwin-arm64": "0.19.1-beta.4",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.19.1-beta.4",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.19.1-beta.4",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.19.1-beta.4"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "lancedb-nodejs"
|
||||
edition.workspace = true
|
||||
version = "0.19.1-beta.1"
|
||||
version = "0.19.1-beta.4"
|
||||
license.workspace = true
|
||||
description.workspace = true
|
||||
repository.workspace = true
|
||||
|
||||
@@ -34,6 +34,7 @@ import {
|
||||
} from "../lancedb/embedding";
|
||||
import { Index } from "../lancedb/indices";
|
||||
import { instanceOfFullTextQuery } from "../lancedb/query";
|
||||
import exp = require("constants");
|
||||
|
||||
describe.each([arrow15, arrow16, arrow17, arrow18])(
|
||||
"Given a table",
|
||||
@@ -95,7 +96,9 @@ describe.each([arrow15, arrow16, arrow17, arrow18])(
|
||||
});
|
||||
|
||||
it("should overwrite data if asked", async () => {
|
||||
await table.add([{ id: 1 }, { id: 2 }]);
|
||||
const addRes = await table.add([{ id: 1 }, { id: 2 }]);
|
||||
expect(addRes).toHaveProperty("version");
|
||||
expect(addRes.version).toBe(2);
|
||||
await table.add([{ id: 1 }], { mode: "overwrite" });
|
||||
await expect(table.countRows()).resolves.toBe(1);
|
||||
});
|
||||
@@ -111,7 +114,11 @@ describe.each([arrow15, arrow16, arrow17, arrow18])(
|
||||
await table.add([{ id: 1 }]);
|
||||
expect(await table.countRows("id == 1")).toBe(1);
|
||||
expect(await table.countRows("id == 7")).toBe(0);
|
||||
await table.update({ id: "7" });
|
||||
const updateRes = await table.update({ id: "7" });
|
||||
expect(updateRes).toHaveProperty("version");
|
||||
expect(updateRes.version).toBe(3);
|
||||
expect(updateRes).toHaveProperty("rowsUpdated");
|
||||
expect(updateRes.rowsUpdated).toBe(1);
|
||||
expect(await table.countRows("id == 1")).toBe(0);
|
||||
expect(await table.countRows("id == 7")).toBe(1);
|
||||
await table.add([{ id: 2 }]);
|
||||
@@ -338,15 +345,16 @@ describe("merge insert", () => {
|
||||
{ a: 3, b: "y" },
|
||||
{ a: 4, b: "z" },
|
||||
];
|
||||
const stats = await table
|
||||
const mergeInsertRes = await table
|
||||
.mergeInsert("a")
|
||||
.whenMatchedUpdateAll()
|
||||
.whenNotMatchedInsertAll()
|
||||
.execute(newData);
|
||||
|
||||
expect(stats.numInsertedRows).toBe(1n);
|
||||
expect(stats.numUpdatedRows).toBe(2n);
|
||||
expect(stats.numDeletedRows).toBe(0n);
|
||||
.execute(newData, { timeoutMs: 10_000 });
|
||||
expect(mergeInsertRes).toHaveProperty("version");
|
||||
expect(mergeInsertRes.version).toBe(2);
|
||||
expect(mergeInsertRes.numInsertedRows).toBe(1);
|
||||
expect(mergeInsertRes.numUpdatedRows).toBe(2);
|
||||
expect(mergeInsertRes.numDeletedRows).toBe(0);
|
||||
|
||||
const expected = [
|
||||
{ a: 1, b: "a" },
|
||||
@@ -365,10 +373,12 @@ describe("merge insert", () => {
|
||||
{ a: 3, b: "y" },
|
||||
{ a: 4, b: "z" },
|
||||
];
|
||||
await table
|
||||
const mergeInsertRes = await table
|
||||
.mergeInsert("a")
|
||||
.whenMatchedUpdateAll({ where: "target.b = 'b'" })
|
||||
.execute(newData);
|
||||
expect(mergeInsertRes).toHaveProperty("version");
|
||||
expect(mergeInsertRes.version).toBe(2);
|
||||
|
||||
const expected = [
|
||||
{ a: 1, b: "a" },
|
||||
@@ -453,6 +463,20 @@ describe("merge insert", () => {
|
||||
res = res.sort((a, b) => a.a - b.a);
|
||||
expect(res).toEqual(expected);
|
||||
});
|
||||
|
||||
test("timeout", async () => {
|
||||
const newData = [
|
||||
{ a: 2, b: "x" },
|
||||
{ a: 4, b: "z" },
|
||||
];
|
||||
await expect(
|
||||
table
|
||||
.mergeInsert("a")
|
||||
.whenMatchedUpdateAll()
|
||||
.whenNotMatchedInsertAll()
|
||||
.execute(newData, { timeoutMs: 0 }),
|
||||
).rejects.toThrow("merge insert timed out");
|
||||
});
|
||||
});
|
||||
|
||||
describe("When creating an index", () => {
|
||||
@@ -1028,15 +1052,19 @@ describe("schema evolution", function () {
|
||||
{ id: 1n, vector: [0.1, 0.2] },
|
||||
]);
|
||||
// Can create a non-nullable column only through addColumns at the moment.
|
||||
await table.addColumns([
|
||||
const addColumnsRes = await table.addColumns([
|
||||
{ name: "price", valueSql: "cast(10.0 as double)" },
|
||||
]);
|
||||
expect(addColumnsRes).toHaveProperty("version");
|
||||
expect(addColumnsRes.version).toBe(2);
|
||||
expect(await table.schema()).toEqual(schema);
|
||||
|
||||
await table.alterColumns([
|
||||
const alterColumnsRes = await table.alterColumns([
|
||||
{ path: "id", rename: "new_id" },
|
||||
{ path: "price", nullable: true },
|
||||
]);
|
||||
expect(alterColumnsRes).toHaveProperty("version");
|
||||
expect(alterColumnsRes.version).toBe(3);
|
||||
|
||||
const expectedSchema = new Schema([
|
||||
new Field("new_id", new Int64(), true),
|
||||
@@ -1154,7 +1182,9 @@ describe("schema evolution", function () {
|
||||
const table = await con.createTable("vectors", [
|
||||
{ id: 1n, vector: [0.1, 0.2] },
|
||||
]);
|
||||
await table.dropColumns(["vector"]);
|
||||
const dropColumnsRes = await table.dropColumns(["vector"]);
|
||||
expect(dropColumnsRes).toHaveProperty("version");
|
||||
expect(dropColumnsRes.version).toBe(2);
|
||||
|
||||
const expectedSchema = new Schema([new Field("id", new Int64(), true)]);
|
||||
expect(await table.schema()).toEqual(expectedSchema);
|
||||
@@ -1271,6 +1301,32 @@ describe("when dealing with tags", () => {
|
||||
await table.checkoutLatest();
|
||||
expect(await table.version()).toBe(4);
|
||||
});
|
||||
|
||||
it("can checkout and restore tags", async () => {
|
||||
const conn = await connect(tmpDir.name, {
|
||||
readConsistencyInterval: 0,
|
||||
});
|
||||
|
||||
const table = await conn.createTable("my_table", [
|
||||
{ id: 1n, vector: [0.1, 0.2] },
|
||||
]);
|
||||
expect(await table.version()).toBe(1);
|
||||
expect(await table.countRows()).toBe(1);
|
||||
const tagsManager = await table.tags();
|
||||
const tag1 = "tag1";
|
||||
await tagsManager.create(tag1, 1);
|
||||
await table.add([{ id: 2n, vector: [0.3, 0.4] }]);
|
||||
const tag2 = "tag2";
|
||||
await tagsManager.create(tag2, 2);
|
||||
expect(await table.version()).toBe(2);
|
||||
await table.checkout(tag1);
|
||||
expect(await table.version()).toBe(1);
|
||||
await table.restore();
|
||||
expect(await table.version()).toBe(3);
|
||||
expect(await table.countRows()).toBe(1);
|
||||
await table.add([{ id: 3n, vector: [0.5, 0.6] }]);
|
||||
expect(await table.countRows()).toBe(2);
|
||||
});
|
||||
});
|
||||
|
||||
describe("when optimizing a dataset", () => {
|
||||
|
||||
@@ -28,7 +28,13 @@ export {
|
||||
FragmentSummaryStats,
|
||||
Tags,
|
||||
TagContents,
|
||||
MergeStats,
|
||||
MergeResult,
|
||||
AddResult,
|
||||
AddColumnsResult,
|
||||
AlterColumnsResult,
|
||||
DeleteResult,
|
||||
DropColumnsResult,
|
||||
UpdateResult,
|
||||
} from "./native.js";
|
||||
|
||||
export {
|
||||
@@ -80,7 +86,7 @@ export {
|
||||
ColumnAlteration,
|
||||
} from "./table";
|
||||
|
||||
export { MergeInsertBuilder } from "./merge";
|
||||
export { MergeInsertBuilder, WriteExecutionOptions } from "./merge";
|
||||
|
||||
export * as embedding from "./embedding";
|
||||
export * as rerankers from "./rerankers";
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
import { Data, Schema, fromDataToBuffer } from "./arrow";
|
||||
import { MergeStats, NativeMergeInsertBuilder } from "./native";
|
||||
import { MergeResult, NativeMergeInsertBuilder } from "./native";
|
||||
|
||||
/** A builder used to create and run a merge insert operation */
|
||||
export class MergeInsertBuilder {
|
||||
@@ -73,9 +73,12 @@ export class MergeInsertBuilder {
|
||||
/**
|
||||
* Executes the merge insert operation
|
||||
*
|
||||
* @returns Statistics about the merge operation: counts of inserted, updated, and deleted rows
|
||||
* @returns {Promise<MergeResult>} the merge result
|
||||
*/
|
||||
async execute(data: Data): Promise<MergeStats> {
|
||||
async execute(
|
||||
data: Data,
|
||||
execOptions?: Partial<WriteExecutionOptions>,
|
||||
): Promise<MergeResult> {
|
||||
let schema: Schema;
|
||||
if (this.#schema instanceof Promise) {
|
||||
schema = await this.#schema;
|
||||
@@ -83,7 +86,28 @@ export class MergeInsertBuilder {
|
||||
} else {
|
||||
schema = this.#schema;
|
||||
}
|
||||
|
||||
if (execOptions?.timeoutMs !== undefined) {
|
||||
this.#native.setTimeout(execOptions.timeoutMs);
|
||||
}
|
||||
|
||||
const buffer = await fromDataToBuffer(data, undefined, schema);
|
||||
return await this.#native.execute(buffer);
|
||||
}
|
||||
}
|
||||
|
||||
export interface WriteExecutionOptions {
|
||||
/**
|
||||
* Maximum time to run the operation before cancelling it.
|
||||
*
|
||||
* By default, there is a 30-second timeout that is only enforced after the
|
||||
* first attempt. This is to prevent spending too long retrying to resolve
|
||||
* conflicts. For example, if a write attempt takes 20 seconds and fails,
|
||||
* the second attempt will be cancelled after 10 seconds, hitting the
|
||||
* 30-second timeout. However, a write that takes one hour and succeeds on the
|
||||
* first attempt will not be cancelled.
|
||||
*
|
||||
* When this is set, the timeout is enforced on all attempts, including the first.
|
||||
*/
|
||||
timeoutMs?: number;
|
||||
}
|
||||
|
||||
@@ -16,12 +16,18 @@ import { EmbeddingFunctionConfig, getRegistry } from "./embedding/registry";
|
||||
import { IndexOptions } from "./indices";
|
||||
import { MergeInsertBuilder } from "./merge";
|
||||
import {
|
||||
AddColumnsResult,
|
||||
AddColumnsSql,
|
||||
AddResult,
|
||||
AlterColumnsResult,
|
||||
DeleteResult,
|
||||
DropColumnsResult,
|
||||
IndexConfig,
|
||||
IndexStatistics,
|
||||
OptimizeStats,
|
||||
TableStatistics,
|
||||
Tags,
|
||||
UpdateResult,
|
||||
Table as _NativeTable,
|
||||
} from "./native";
|
||||
import {
|
||||
@@ -126,12 +132,19 @@ export abstract class Table {
|
||||
/**
|
||||
* Insert records into this Table.
|
||||
* @param {Data} data Records to be inserted into the Table
|
||||
* @returns {Promise<AddResult>} A promise that resolves to an object
|
||||
* containing the new version number of the table
|
||||
*/
|
||||
abstract add(data: Data, options?: Partial<AddDataOptions>): Promise<void>;
|
||||
abstract add(
|
||||
data: Data,
|
||||
options?: Partial<AddDataOptions>,
|
||||
): Promise<AddResult>;
|
||||
/**
|
||||
* Update existing records in the Table
|
||||
* @param opts.values The values to update. The keys are the column names and the values
|
||||
* are the values to set.
|
||||
* @returns {Promise<UpdateResult>} A promise that resolves to an object containing
|
||||
* the number of rows updated and the new version number
|
||||
* @example
|
||||
* ```ts
|
||||
* table.update({where:"x = 2", values:{"vector": [10, 10]}})
|
||||
@@ -141,11 +154,13 @@ export abstract class Table {
|
||||
opts: {
|
||||
values: Map<string, IntoSql> | Record<string, IntoSql>;
|
||||
} & Partial<UpdateOptions>,
|
||||
): Promise<void>;
|
||||
): Promise<UpdateResult>;
|
||||
/**
|
||||
* Update existing records in the Table
|
||||
* @param opts.valuesSql The values to update. The keys are the column names and the values
|
||||
* are the values to set. The values are SQL expressions.
|
||||
* @returns {Promise<UpdateResult>} A promise that resolves to an object containing
|
||||
* the number of rows updated and the new version number
|
||||
* @example
|
||||
* ```ts
|
||||
* table.update({where:"x = 2", valuesSql:{"x": "x + 1"}})
|
||||
@@ -155,7 +170,7 @@ export abstract class Table {
|
||||
opts: {
|
||||
valuesSql: Map<string, string> | Record<string, string>;
|
||||
} & Partial<UpdateOptions>,
|
||||
): Promise<void>;
|
||||
): Promise<UpdateResult>;
|
||||
/**
|
||||
* Update existing records in the Table
|
||||
*
|
||||
@@ -173,6 +188,8 @@ export abstract class Table {
|
||||
* repeatedly calilng this method.
|
||||
* @param {Map<string, string> | Record<string, string>} updates - the
|
||||
* columns to update
|
||||
* @returns {Promise<UpdateResult>} A promise that resolves to an object
|
||||
* containing the number of rows updated and the new version number
|
||||
*
|
||||
* Keys in the map should specify the name of the column to update.
|
||||
* Values in the map provide the new value of the column. These can
|
||||
@@ -184,12 +201,16 @@ export abstract class Table {
|
||||
abstract update(
|
||||
updates: Map<string, string> | Record<string, string>,
|
||||
options?: Partial<UpdateOptions>,
|
||||
): Promise<void>;
|
||||
): Promise<UpdateResult>;
|
||||
|
||||
/** Count the total number of rows in the dataset. */
|
||||
abstract countRows(filter?: string): Promise<number>;
|
||||
/** Delete the rows that satisfy the predicate. */
|
||||
abstract delete(predicate: string): Promise<void>;
|
||||
/**
|
||||
* Delete the rows that satisfy the predicate.
|
||||
* @returns {Promise<DeleteResult>} A promise that resolves to an object
|
||||
* containing the new version number of the table
|
||||
*/
|
||||
abstract delete(predicate: string): Promise<DeleteResult>;
|
||||
/**
|
||||
* Create an index to speed up queries.
|
||||
*
|
||||
@@ -343,15 +364,23 @@ export abstract class Table {
|
||||
* the SQL expression to use to calculate the value of the new column. These
|
||||
* expressions will be evaluated for each row in the table, and can
|
||||
* reference existing columns in the table.
|
||||
* @returns {Promise<AddColumnsResult>} A promise that resolves to an object
|
||||
* containing the new version number of the table after adding the columns.
|
||||
*/
|
||||
abstract addColumns(newColumnTransforms: AddColumnsSql[]): Promise<void>;
|
||||
abstract addColumns(
|
||||
newColumnTransforms: AddColumnsSql[],
|
||||
): Promise<AddColumnsResult>;
|
||||
|
||||
/**
|
||||
* Alter the name or nullability of columns.
|
||||
* @param {ColumnAlteration[]} columnAlterations One or more alterations to
|
||||
* apply to columns.
|
||||
* @returns {Promise<AlterColumnsResult>} A promise that resolves to an object
|
||||
* containing the new version number of the table after altering the columns.
|
||||
*/
|
||||
abstract alterColumns(columnAlterations: ColumnAlteration[]): Promise<void>;
|
||||
abstract alterColumns(
|
||||
columnAlterations: ColumnAlteration[],
|
||||
): Promise<AlterColumnsResult>;
|
||||
/**
|
||||
* Drop one or more columns from the dataset
|
||||
*
|
||||
@@ -362,8 +391,10 @@ export abstract class Table {
|
||||
* @param {string[]} columnNames The names of the columns to drop. These can
|
||||
* be nested column references (e.g. "a.b.c") or top-level column names
|
||||
* (e.g. "a").
|
||||
* @returns {Promise<DropColumnsResult>} A promise that resolves to an object
|
||||
* containing the new version number of the table after dropping the columns.
|
||||
*/
|
||||
abstract dropColumns(columnNames: string[]): Promise<void>;
|
||||
abstract dropColumns(columnNames: string[]): Promise<DropColumnsResult>;
|
||||
/** Retrieve the version of the table */
|
||||
|
||||
abstract version(): Promise<number>;
|
||||
@@ -529,12 +560,12 @@ export class LocalTable extends Table {
|
||||
return tbl.schema;
|
||||
}
|
||||
|
||||
async add(data: Data, options?: Partial<AddDataOptions>): Promise<void> {
|
||||
async add(data: Data, options?: Partial<AddDataOptions>): Promise<AddResult> {
|
||||
const mode = options?.mode ?? "append";
|
||||
const schema = await this.schema();
|
||||
|
||||
const buffer = await fromDataToBuffer(data, undefined, schema);
|
||||
await this.inner.add(buffer, mode);
|
||||
return await this.inner.add(buffer, mode);
|
||||
}
|
||||
|
||||
async update(
|
||||
@@ -547,7 +578,7 @@ export class LocalTable extends Table {
|
||||
valuesSql: Map<string, string> | Record<string, string>;
|
||||
} & Partial<UpdateOptions>),
|
||||
options?: Partial<UpdateOptions>,
|
||||
) {
|
||||
): Promise<UpdateResult> {
|
||||
const isValues =
|
||||
"values" in optsOrUpdates && typeof optsOrUpdates.values !== "string";
|
||||
const isValuesSql =
|
||||
@@ -594,15 +625,15 @@ export class LocalTable extends Table {
|
||||
columns = Object.entries(optsOrUpdates as Record<string, string>);
|
||||
predicate = options?.where;
|
||||
}
|
||||
await this.inner.update(predicate, columns);
|
||||
return await this.inner.update(predicate, columns);
|
||||
}
|
||||
|
||||
async countRows(filter?: string): Promise<number> {
|
||||
return await this.inner.countRows(filter);
|
||||
}
|
||||
|
||||
async delete(predicate: string): Promise<void> {
|
||||
await this.inner.delete(predicate);
|
||||
async delete(predicate: string): Promise<DeleteResult> {
|
||||
return await this.inner.delete(predicate);
|
||||
}
|
||||
|
||||
async createIndex(column: string, options?: Partial<IndexOptions>) {
|
||||
@@ -690,11 +721,15 @@ export class LocalTable extends Table {
|
||||
|
||||
// TODO: Support BatchUDF
|
||||
|
||||
async addColumns(newColumnTransforms: AddColumnsSql[]): Promise<void> {
|
||||
await this.inner.addColumns(newColumnTransforms);
|
||||
async addColumns(
|
||||
newColumnTransforms: AddColumnsSql[],
|
||||
): Promise<AddColumnsResult> {
|
||||
return await this.inner.addColumns(newColumnTransforms);
|
||||
}
|
||||
|
||||
async alterColumns(columnAlterations: ColumnAlteration[]): Promise<void> {
|
||||
async alterColumns(
|
||||
columnAlterations: ColumnAlteration[],
|
||||
): Promise<AlterColumnsResult> {
|
||||
const processedAlterations = columnAlterations.map((alteration) => {
|
||||
if (typeof alteration.dataType === "string") {
|
||||
return {
|
||||
@@ -715,11 +750,11 @@ export class LocalTable extends Table {
|
||||
}
|
||||
});
|
||||
|
||||
await this.inner.alterColumns(processedAlterations);
|
||||
return await this.inner.alterColumns(processedAlterations);
|
||||
}
|
||||
|
||||
async dropColumns(columnNames: string[]): Promise<void> {
|
||||
await this.inner.dropColumns(columnNames);
|
||||
async dropColumns(columnNames: string[]): Promise<DropColumnsResult> {
|
||||
return await this.inner.dropColumns(columnNames);
|
||||
}
|
||||
|
||||
async version(): Promise<number> {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-darwin-arm64",
|
||||
"version": "0.19.1-beta.1",
|
||||
"version": "0.19.1-beta.4",
|
||||
"os": ["darwin"],
|
||||
"cpu": ["arm64"],
|
||||
"main": "lancedb.darwin-arm64.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-darwin-x64",
|
||||
"version": "0.19.1-beta.1",
|
||||
"version": "0.19.1-beta.4",
|
||||
"os": ["darwin"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.darwin-x64.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-arm64-gnu",
|
||||
"version": "0.19.1-beta.1",
|
||||
"version": "0.19.1-beta.4",
|
||||
"os": ["linux"],
|
||||
"cpu": ["arm64"],
|
||||
"main": "lancedb.linux-arm64-gnu.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-arm64-musl",
|
||||
"version": "0.19.1-beta.1",
|
||||
"version": "0.19.1-beta.4",
|
||||
"os": ["linux"],
|
||||
"cpu": ["arm64"],
|
||||
"main": "lancedb.linux-arm64-musl.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-x64-gnu",
|
||||
"version": "0.19.1-beta.1",
|
||||
"version": "0.19.1-beta.4",
|
||||
"os": ["linux"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.linux-x64-gnu.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-x64-musl",
|
||||
"version": "0.19.1-beta.1",
|
||||
"version": "0.19.1-beta.4",
|
||||
"os": ["linux"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.linux-x64-musl.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-win32-arm64-msvc",
|
||||
"version": "0.19.1-beta.1",
|
||||
"version": "0.19.1-beta.4",
|
||||
"os": [
|
||||
"win32"
|
||||
],
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-win32-x64-msvc",
|
||||
"version": "0.19.1-beta.1",
|
||||
"version": "0.19.1-beta.4",
|
||||
"os": ["win32"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.win32-x64-msvc.node",
|
||||
|
||||
4
nodejs/package-lock.json
generated
4
nodejs/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb",
|
||||
"version": "0.19.1-beta.1",
|
||||
"version": "0.19.1-beta.4",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@lancedb/lancedb",
|
||||
"version": "0.19.1-beta.1",
|
||||
"version": "0.19.1-beta.4",
|
||||
"cpu": [
|
||||
"x64",
|
||||
"arm64"
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
"ann"
|
||||
],
|
||||
"private": false,
|
||||
"version": "0.19.1-beta.1",
|
||||
"version": "0.19.1-beta.4",
|
||||
"main": "dist/index.js",
|
||||
"exports": {
|
||||
".": "./dist/index.js",
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
use std::time::Duration;
|
||||
|
||||
use lancedb::{arrow::IntoArrow, ipc::ipc_file_to_batches, table::merge::MergeInsertBuilder};
|
||||
use napi::bindgen_prelude::*;
|
||||
use napi_derive::napi;
|
||||
|
||||
use crate::error::convert_error;
|
||||
use crate::{error::convert_error, table::MergeResult};
|
||||
|
||||
#[napi]
|
||||
#[derive(Clone)]
|
||||
@@ -36,8 +38,13 @@ impl NativeMergeInsertBuilder {
|
||||
this
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub fn set_timeout(&mut self, timeout: u32) {
|
||||
self.inner.timeout(Duration::from_millis(timeout as u64));
|
||||
}
|
||||
|
||||
#[napi(catch_unwind)]
|
||||
pub async fn execute(&self, buf: Buffer) -> napi::Result<MergeStats> {
|
||||
pub async fn execute(&self, buf: Buffer) -> napi::Result<MergeResult> {
|
||||
let data = ipc_file_to_batches(buf.to_vec())
|
||||
.and_then(IntoArrow::into_arrow)
|
||||
.map_err(|e| {
|
||||
@@ -46,14 +53,13 @@ impl NativeMergeInsertBuilder {
|
||||
|
||||
let this = self.clone();
|
||||
|
||||
let stats = this.inner.execute(data).await.map_err(|e| {
|
||||
let res = this.inner.execute(data).await.map_err(|e| {
|
||||
napi::Error::from_reason(format!(
|
||||
"Failed to execute merge insert: {}",
|
||||
convert_error(&e)
|
||||
))
|
||||
})?;
|
||||
|
||||
Ok(stats.into())
|
||||
Ok(res.into())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -62,20 +68,3 @@ impl From<MergeInsertBuilder> for NativeMergeInsertBuilder {
|
||||
Self { inner }
|
||||
}
|
||||
}
|
||||
|
||||
#[napi(object)]
|
||||
pub struct MergeStats {
|
||||
pub num_inserted_rows: BigInt,
|
||||
pub num_updated_rows: BigInt,
|
||||
pub num_deleted_rows: BigInt,
|
||||
}
|
||||
|
||||
impl From<lancedb::table::MergeStats> for MergeStats {
|
||||
fn from(stats: lancedb::table::MergeStats) -> Self {
|
||||
Self {
|
||||
num_inserted_rows: stats.num_inserted_rows.into(),
|
||||
num_updated_rows: stats.num_updated_rows.into(),
|
||||
num_deleted_rows: stats.num_deleted_rows.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -75,7 +75,7 @@ impl Table {
|
||||
}
|
||||
|
||||
#[napi(catch_unwind)]
|
||||
pub async fn add(&self, buf: Buffer, mode: String) -> napi::Result<()> {
|
||||
pub async fn add(&self, buf: Buffer, mode: String) -> napi::Result<AddResult> {
|
||||
let batches = ipc_file_to_batches(buf.to_vec())
|
||||
.map_err(|e| napi::Error::from_reason(format!("Failed to read IPC file: {}", e)))?;
|
||||
let mut op = self.inner_ref()?.add(batches);
|
||||
@@ -88,7 +88,8 @@ impl Table {
|
||||
return Err(napi::Error::from_reason(format!("Invalid mode: {}", mode)));
|
||||
};
|
||||
|
||||
op.execute().await.default_error()
|
||||
let res = op.execute().await.default_error()?;
|
||||
Ok(res.into())
|
||||
}
|
||||
|
||||
#[napi(catch_unwind)]
|
||||
@@ -101,8 +102,9 @@ impl Table {
|
||||
}
|
||||
|
||||
#[napi(catch_unwind)]
|
||||
pub async fn delete(&self, predicate: String) -> napi::Result<()> {
|
||||
self.inner_ref()?.delete(&predicate).await.default_error()
|
||||
pub async fn delete(&self, predicate: String) -> napi::Result<DeleteResult> {
|
||||
let res = self.inner_ref()?.delete(&predicate).await.default_error()?;
|
||||
Ok(res.into())
|
||||
}
|
||||
|
||||
#[napi(catch_unwind)]
|
||||
@@ -168,7 +170,7 @@ impl Table {
|
||||
&self,
|
||||
only_if: Option<String>,
|
||||
columns: Vec<(String, String)>,
|
||||
) -> napi::Result<u64> {
|
||||
) -> napi::Result<UpdateResult> {
|
||||
let mut op = self.inner_ref()?.update();
|
||||
if let Some(only_if) = only_if {
|
||||
op = op.only_if(only_if);
|
||||
@@ -176,7 +178,8 @@ impl Table {
|
||||
for (column_name, value) in columns {
|
||||
op = op.column(column_name, value);
|
||||
}
|
||||
op.execute().await.default_error()
|
||||
let res = op.execute().await.default_error()?;
|
||||
Ok(res.into())
|
||||
}
|
||||
|
||||
#[napi(catch_unwind)]
|
||||
@@ -190,21 +193,28 @@ impl Table {
|
||||
}
|
||||
|
||||
#[napi(catch_unwind)]
|
||||
pub async fn add_columns(&self, transforms: Vec<AddColumnsSql>) -> napi::Result<()> {
|
||||
pub async fn add_columns(
|
||||
&self,
|
||||
transforms: Vec<AddColumnsSql>,
|
||||
) -> napi::Result<AddColumnsResult> {
|
||||
let transforms = transforms
|
||||
.into_iter()
|
||||
.map(|sql| (sql.name, sql.value_sql))
|
||||
.collect::<Vec<_>>();
|
||||
let transforms = NewColumnTransform::SqlExpressions(transforms);
|
||||
self.inner_ref()?
|
||||
let res = self
|
||||
.inner_ref()?
|
||||
.add_columns(transforms, None)
|
||||
.await
|
||||
.default_error()?;
|
||||
Ok(())
|
||||
Ok(res.into())
|
||||
}
|
||||
|
||||
#[napi(catch_unwind)]
|
||||
pub async fn alter_columns(&self, alterations: Vec<ColumnAlteration>) -> napi::Result<()> {
|
||||
pub async fn alter_columns(
|
||||
&self,
|
||||
alterations: Vec<ColumnAlteration>,
|
||||
) -> napi::Result<AlterColumnsResult> {
|
||||
for alteration in &alterations {
|
||||
if alteration.rename.is_none()
|
||||
&& alteration.nullable.is_none()
|
||||
@@ -221,21 +231,23 @@ impl Table {
|
||||
.collect::<std::result::Result<Vec<_>, String>>()
|
||||
.map_err(napi::Error::from_reason)?;
|
||||
|
||||
self.inner_ref()?
|
||||
let res = self
|
||||
.inner_ref()?
|
||||
.alter_columns(&alterations)
|
||||
.await
|
||||
.default_error()?;
|
||||
Ok(())
|
||||
Ok(res.into())
|
||||
}
|
||||
|
||||
#[napi(catch_unwind)]
|
||||
pub async fn drop_columns(&self, columns: Vec<String>) -> napi::Result<()> {
|
||||
pub async fn drop_columns(&self, columns: Vec<String>) -> napi::Result<DropColumnsResult> {
|
||||
let col_refs = columns.iter().map(String::as_str).collect::<Vec<_>>();
|
||||
self.inner_ref()?
|
||||
let res = self
|
||||
.inner_ref()?
|
||||
.drop_columns(&col_refs)
|
||||
.await
|
||||
.default_error()?;
|
||||
Ok(())
|
||||
Ok(res.into())
|
||||
}
|
||||
|
||||
#[napi(catch_unwind)]
|
||||
@@ -642,6 +654,105 @@ pub struct Version {
|
||||
pub metadata: HashMap<String, String>,
|
||||
}
|
||||
|
||||
#[napi(object)]
|
||||
pub struct UpdateResult {
|
||||
pub rows_updated: i64,
|
||||
pub version: i64,
|
||||
}
|
||||
|
||||
impl From<lancedb::table::UpdateResult> for UpdateResult {
|
||||
fn from(value: lancedb::table::UpdateResult) -> Self {
|
||||
Self {
|
||||
rows_updated: value.rows_updated as i64,
|
||||
version: value.version as i64,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[napi(object)]
|
||||
pub struct AddResult {
|
||||
pub version: i64,
|
||||
}
|
||||
|
||||
impl From<lancedb::table::AddResult> for AddResult {
|
||||
fn from(value: lancedb::table::AddResult) -> Self {
|
||||
Self {
|
||||
version: value.version as i64,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[napi(object)]
|
||||
pub struct DeleteResult {
|
||||
pub version: i64,
|
||||
}
|
||||
|
||||
impl From<lancedb::table::DeleteResult> for DeleteResult {
|
||||
fn from(value: lancedb::table::DeleteResult) -> Self {
|
||||
Self {
|
||||
version: value.version as i64,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[napi(object)]
|
||||
pub struct MergeResult {
|
||||
pub version: i64,
|
||||
pub num_inserted_rows: i64,
|
||||
pub num_updated_rows: i64,
|
||||
pub num_deleted_rows: i64,
|
||||
}
|
||||
|
||||
impl From<lancedb::table::MergeResult> for MergeResult {
|
||||
fn from(value: lancedb::table::MergeResult) -> Self {
|
||||
Self {
|
||||
version: value.version as i64,
|
||||
num_inserted_rows: value.num_inserted_rows as i64,
|
||||
num_updated_rows: value.num_updated_rows as i64,
|
||||
num_deleted_rows: value.num_deleted_rows as i64,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[napi(object)]
|
||||
pub struct AddColumnsResult {
|
||||
pub version: i64,
|
||||
}
|
||||
|
||||
impl From<lancedb::table::AddColumnsResult> for AddColumnsResult {
|
||||
fn from(value: lancedb::table::AddColumnsResult) -> Self {
|
||||
Self {
|
||||
version: value.version as i64,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[napi(object)]
|
||||
pub struct AlterColumnsResult {
|
||||
pub version: i64,
|
||||
}
|
||||
|
||||
impl From<lancedb::table::AlterColumnsResult> for AlterColumnsResult {
|
||||
fn from(value: lancedb::table::AlterColumnsResult) -> Self {
|
||||
Self {
|
||||
version: value.version as i64,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[napi(object)]
|
||||
pub struct DropColumnsResult {
|
||||
pub version: i64,
|
||||
}
|
||||
|
||||
impl From<lancedb::table::DropColumnsResult> for DropColumnsResult {
|
||||
fn from(value: lancedb::table::DropColumnsResult) -> Self {
|
||||
Self {
|
||||
version: value.version as i64,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub struct TagContents {
|
||||
pub version: i64,
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
[tool.bumpversion]
|
||||
current_version = "0.22.1-beta.1"
|
||||
current_version = "0.22.1-beta.5"
|
||||
parse = """(?x)
|
||||
(?P<major>0|[1-9]\\d*)\\.
|
||||
(?P<minor>0|[1-9]\\d*)\\.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "lancedb-python"
|
||||
version = "0.22.1-beta.1"
|
||||
version = "0.22.1-beta.5"
|
||||
edition.workspace = true
|
||||
description = "Python bindings for LanceDB"
|
||||
license.workspace = true
|
||||
|
||||
@@ -36,8 +36,10 @@ class Table:
|
||||
async def schema(self) -> pa.Schema: ...
|
||||
async def add(
|
||||
self, data: pa.RecordBatchReader, mode: Literal["append", "overwrite"]
|
||||
) -> None: ...
|
||||
async def update(self, updates: Dict[str, str], where: Optional[str]) -> None: ...
|
||||
) -> AddResult: ...
|
||||
async def update(
|
||||
self, updates: Dict[str, str], where: Optional[str]
|
||||
) -> UpdateResult: ...
|
||||
async def count_rows(self, filter: Optional[str]) -> int: ...
|
||||
async def create_index(
|
||||
self,
|
||||
@@ -49,12 +51,14 @@ class Table:
|
||||
async def version(self) -> int: ...
|
||||
async def checkout(self, version: Union[int, str]): ...
|
||||
async def checkout_latest(self): ...
|
||||
async def restore(self, version: Optional[int] = None): ...
|
||||
async def restore(self, version: Optional[Union[int, str]] = None): ...
|
||||
async def list_indices(self) -> list[IndexConfig]: ...
|
||||
async def delete(self, filter: str): ...
|
||||
async def add_columns(self, columns: list[tuple[str, str]]) -> None: ...
|
||||
async def add_columns_with_schema(self, schema: pa.Schema) -> None: ...
|
||||
async def alter_columns(self, columns: list[dict[str, Any]]) -> None: ...
|
||||
async def delete(self, filter: str) -> DeleteResult: ...
|
||||
async def add_columns(self, columns: list[tuple[str, str]]) -> AddColumnsResult: ...
|
||||
async def add_columns_with_schema(self, schema: pa.Schema) -> AddColumnsResult: ...
|
||||
async def alter_columns(
|
||||
self, columns: list[dict[str, Any]]
|
||||
) -> AlterColumnsResult: ...
|
||||
async def optimize(
|
||||
self,
|
||||
*,
|
||||
@@ -208,3 +212,28 @@ class OptimizeStats:
|
||||
class Tag(TypedDict):
|
||||
version: int
|
||||
manifest_size: int
|
||||
|
||||
class AddResult:
|
||||
version: int
|
||||
|
||||
class DeleteResult:
|
||||
version: int
|
||||
|
||||
class UpdateResult:
|
||||
rows_updated: int
|
||||
version: int
|
||||
|
||||
class MergeResult:
|
||||
version: int
|
||||
num_updated_rows: int
|
||||
num_inserted_rows: int
|
||||
num_deleted_rows: int
|
||||
|
||||
class AddColumnsResult:
|
||||
version: int
|
||||
|
||||
class AlterColumnsResult:
|
||||
version: int
|
||||
|
||||
class DropColumnsResult:
|
||||
version: int
|
||||
|
||||
@@ -4,10 +4,14 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import timedelta
|
||||
from typing import TYPE_CHECKING, List, Optional
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .common import DATA
|
||||
from ._lancedb import (
|
||||
MergeInsertResult,
|
||||
)
|
||||
|
||||
|
||||
class LanceMergeInsertBuilder(object):
|
||||
@@ -28,6 +32,7 @@ class LanceMergeInsertBuilder(object):
|
||||
self._when_not_matched_insert_all = False
|
||||
self._when_not_matched_by_source_delete = False
|
||||
self._when_not_matched_by_source_condition = None
|
||||
self._timeout = None
|
||||
|
||||
def when_matched_update_all(
|
||||
self, *, where: Optional[str] = None
|
||||
@@ -78,7 +83,8 @@ class LanceMergeInsertBuilder(object):
|
||||
new_data: DATA,
|
||||
on_bad_vectors: str = "error",
|
||||
fill_value: float = 0.0,
|
||||
):
|
||||
timeout: Optional[timedelta] = None,
|
||||
) -> MergeInsertResult:
|
||||
"""
|
||||
Executes the merge insert operation
|
||||
|
||||
@@ -95,5 +101,24 @@ class LanceMergeInsertBuilder(object):
|
||||
One of "error", "drop", "fill".
|
||||
fill_value: float, default 0.
|
||||
The value to use when filling vectors. Only used if on_bad_vectors="fill".
|
||||
timeout: Optional[timedelta], default None
|
||||
Maximum time to run the operation before cancelling it.
|
||||
|
||||
By default, there is a 30-second timeout that is only enforced after the
|
||||
first attempt. This is to prevent spending too long retrying to resolve
|
||||
conflicts. For example, if a write attempt takes 20 seconds and fails,
|
||||
the second attempt will be cancelled after 10 seconds, hitting the
|
||||
30-second timeout. However, a write that takes one hour and succeeds on the
|
||||
first attempt will not be cancelled.
|
||||
|
||||
When this is set, the timeout is enforced on all attempts, including
|
||||
the first.
|
||||
|
||||
Returns
|
||||
-------
|
||||
MergeInsertResult
|
||||
version: the new version number of the table after doing merge insert.
|
||||
"""
|
||||
if timeout is not None:
|
||||
self._timeout = timeout
|
||||
return self._table._do_merge(self, new_data, on_bad_vectors, fill_value)
|
||||
|
||||
@@ -415,6 +415,7 @@ class LanceModel(pydantic.BaseModel):
|
||||
>>> table.add([
|
||||
... TestModel(name="test", vector=[1.0, 2.0])
|
||||
... ])
|
||||
AddResult(version=2)
|
||||
>>> table.search([0., 0.]).limit(1).to_pydantic(TestModel)
|
||||
[TestModel(name='test', vector=FixedSizeList(dim=2))]
|
||||
"""
|
||||
|
||||
@@ -7,7 +7,16 @@ from functools import cached_property
|
||||
from typing import Dict, Iterable, List, Optional, Union, Literal
|
||||
import warnings
|
||||
|
||||
from lancedb._lancedb import IndexConfig
|
||||
from lancedb._lancedb import (
|
||||
AddColumnsResult,
|
||||
AddResult,
|
||||
AlterColumnsResult,
|
||||
DeleteResult,
|
||||
DropColumnsResult,
|
||||
IndexConfig,
|
||||
MergeResult,
|
||||
UpdateResult,
|
||||
)
|
||||
from lancedb.embeddings.base import EmbeddingFunctionConfig
|
||||
from lancedb.index import FTS, BTree, Bitmap, HnswPq, HnswSq, IvfFlat, IvfPq, LabelList
|
||||
from lancedb.remote.db import LOOP
|
||||
@@ -38,9 +47,6 @@ class RemoteTable(Table):
|
||||
def __repr__(self) -> str:
|
||||
return f"RemoteTable({self.db_name}.{self.name})"
|
||||
|
||||
def __len__(self) -> int:
|
||||
self.count_rows(None)
|
||||
|
||||
@property
|
||||
def schema(self) -> pa.Schema:
|
||||
"""The [Arrow Schema](https://arrow.apache.org/docs/python/api/datatypes.html#)
|
||||
@@ -91,7 +97,7 @@ class RemoteTable(Table):
|
||||
def checkout_latest(self):
|
||||
return LOOP.run(self._table.checkout_latest())
|
||||
|
||||
def restore(self, version: Optional[int] = None):
|
||||
def restore(self, version: Optional[Union[int, str]] = None):
|
||||
return LOOP.run(self._table.restore(version))
|
||||
|
||||
def list_indices(self) -> Iterable[IndexConfig]:
|
||||
@@ -263,7 +269,7 @@ class RemoteTable(Table):
|
||||
mode: str = "append",
|
||||
on_bad_vectors: str = "error",
|
||||
fill_value: float = 0.0,
|
||||
) -> int:
|
||||
) -> AddResult:
|
||||
"""Add more data to the [Table](Table). It has the same API signature as
|
||||
the OSS version.
|
||||
|
||||
@@ -286,8 +292,12 @@ class RemoteTable(Table):
|
||||
fill_value: float, default 0.
|
||||
The value to use when filling vectors. Only used if on_bad_vectors="fill".
|
||||
|
||||
Returns
|
||||
-------
|
||||
AddResult
|
||||
An object containing the new version number of the table after adding data.
|
||||
"""
|
||||
LOOP.run(
|
||||
return LOOP.run(
|
||||
self._table.add(
|
||||
data, mode=mode, on_bad_vectors=on_bad_vectors, fill_value=fill_value
|
||||
)
|
||||
@@ -413,10 +423,12 @@ class RemoteTable(Table):
|
||||
new_data: DATA,
|
||||
on_bad_vectors: str,
|
||||
fill_value: float,
|
||||
):
|
||||
LOOP.run(self._table._do_merge(merge, new_data, on_bad_vectors, fill_value))
|
||||
) -> MergeResult:
|
||||
return LOOP.run(
|
||||
self._table._do_merge(merge, new_data, on_bad_vectors, fill_value)
|
||||
)
|
||||
|
||||
def delete(self, predicate: str):
|
||||
def delete(self, predicate: str) -> DeleteResult:
|
||||
"""Delete rows from the table.
|
||||
|
||||
This can be used to delete a single row, many rows, all rows, or
|
||||
@@ -431,6 +443,11 @@ class RemoteTable(Table):
|
||||
|
||||
The filter must not be empty, or it will error.
|
||||
|
||||
Returns
|
||||
-------
|
||||
DeleteResult
|
||||
An object containing the new version number of the table after deletion.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import lancedb
|
||||
@@ -463,7 +480,7 @@ class RemoteTable(Table):
|
||||
x vector _distance # doctest: +SKIP
|
||||
0 2 [3.0, 4.0] 85.0 # doctest: +SKIP
|
||||
"""
|
||||
LOOP.run(self._table.delete(predicate))
|
||||
return LOOP.run(self._table.delete(predicate))
|
||||
|
||||
def update(
|
||||
self,
|
||||
@@ -471,7 +488,7 @@ class RemoteTable(Table):
|
||||
values: Optional[dict] = None,
|
||||
*,
|
||||
values_sql: Optional[Dict[str, str]] = None,
|
||||
):
|
||||
) -> UpdateResult:
|
||||
"""
|
||||
This can be used to update zero to all rows depending on how many
|
||||
rows match the where clause.
|
||||
@@ -489,6 +506,12 @@ class RemoteTable(Table):
|
||||
reference existing columns. For example, {"x": "x + 1"} will increment
|
||||
the x column by 1.
|
||||
|
||||
Returns
|
||||
-------
|
||||
UpdateResult
|
||||
- rows_updated: The number of rows that were updated
|
||||
- version: The new version number of the table after the update
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import lancedb
|
||||
@@ -513,7 +536,7 @@ class RemoteTable(Table):
|
||||
2 2 [10.0, 10.0] # doctest: +SKIP
|
||||
|
||||
"""
|
||||
LOOP.run(
|
||||
return LOOP.run(
|
||||
self._table.update(where=where, updates=values, updates_sql=values_sql)
|
||||
)
|
||||
|
||||
@@ -561,13 +584,15 @@ class RemoteTable(Table):
|
||||
def count_rows(self, filter: Optional[str] = None) -> int:
|
||||
return LOOP.run(self._table.count_rows(filter))
|
||||
|
||||
def add_columns(self, transforms: Dict[str, str]):
|
||||
def add_columns(self, transforms: Dict[str, str]) -> AddColumnsResult:
|
||||
return LOOP.run(self._table.add_columns(transforms))
|
||||
|
||||
def alter_columns(self, *alterations: Iterable[Dict[str, str]]):
|
||||
def alter_columns(
|
||||
self, *alterations: Iterable[Dict[str, str]]
|
||||
) -> AlterColumnsResult:
|
||||
return LOOP.run(self._table.alter_columns(*alterations))
|
||||
|
||||
def drop_columns(self, columns: Iterable[str]):
|
||||
def drop_columns(self, columns: Iterable[str]) -> DropColumnsResult:
|
||||
return LOOP.run(self._table.drop_columns(columns))
|
||||
|
||||
def drop_index(self, index_name: str):
|
||||
|
||||
@@ -78,6 +78,13 @@ if TYPE_CHECKING:
|
||||
CleanupStats,
|
||||
CompactionStats,
|
||||
Tag,
|
||||
AddColumnsResult,
|
||||
AddResult,
|
||||
AlterColumnsResult,
|
||||
DeleteResult,
|
||||
DropColumnsResult,
|
||||
MergeResult,
|
||||
UpdateResult,
|
||||
)
|
||||
from .db import LanceDBConnection
|
||||
from .index import IndexConfig
|
||||
@@ -550,6 +557,7 @@ class Table(ABC):
|
||||
Can append new data with [Table.add()][lancedb.table.Table.add].
|
||||
|
||||
>>> table.add([{"vector": [0.5, 1.3], "b": 4}])
|
||||
AddResult(version=2)
|
||||
|
||||
Can query the table with [Table.search][lancedb.table.Table.search].
|
||||
|
||||
@@ -612,6 +620,10 @@ class Table(ABC):
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def __len__(self) -> int:
|
||||
"""The number of rows in this Table"""
|
||||
return self.count_rows(None)
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def embedding_functions(self) -> Dict[str, EmbeddingFunctionConfig]:
|
||||
@@ -894,7 +906,7 @@ class Table(ABC):
|
||||
mode: AddMode = "append",
|
||||
on_bad_vectors: OnBadVectorsType = "error",
|
||||
fill_value: float = 0.0,
|
||||
):
|
||||
) -> AddResult:
|
||||
"""Add more data to the [Table](Table).
|
||||
|
||||
Parameters
|
||||
@@ -916,6 +928,10 @@ class Table(ABC):
|
||||
fill_value: float, default 0.
|
||||
The value to use when filling vectors. Only used if on_bad_vectors="fill".
|
||||
|
||||
Returns
|
||||
-------
|
||||
AddResult
|
||||
An object containing the new version number of the table after adding data.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@@ -962,12 +978,12 @@ class Table(ABC):
|
||||
>>> table = db.create_table("my_table", data)
|
||||
>>> new_data = pa.table({"a": [2, 3, 4], "b": ["x", "y", "z"]})
|
||||
>>> # Perform a "upsert" operation
|
||||
>>> stats = table.merge_insert("a") \\
|
||||
>>> res = table.merge_insert("a") \\
|
||||
... .when_matched_update_all() \\
|
||||
... .when_not_matched_insert_all() \\
|
||||
... .execute(new_data)
|
||||
>>> stats
|
||||
{'num_inserted_rows': 1, 'num_updated_rows': 2, 'num_deleted_rows': 0}
|
||||
>>> res
|
||||
MergeResult(version=2, num_updated_rows=2, num_inserted_rows=1, num_deleted_rows=0)
|
||||
>>> # The order of new rows is non-deterministic since we use
|
||||
>>> # a hash-join as part of this operation and so we sort here
|
||||
>>> table.to_arrow().sort_by("a").to_pandas()
|
||||
@@ -976,7 +992,7 @@ class Table(ABC):
|
||||
1 2 x
|
||||
2 3 y
|
||||
3 4 z
|
||||
"""
|
||||
""" # noqa: E501
|
||||
on = [on] if isinstance(on, str) else list(iter(on))
|
||||
|
||||
return LanceMergeInsertBuilder(self, on)
|
||||
@@ -1091,10 +1107,10 @@ class Table(ABC):
|
||||
new_data: DATA,
|
||||
on_bad_vectors: OnBadVectorsType,
|
||||
fill_value: float,
|
||||
): ...
|
||||
) -> MergeResult: ...
|
||||
|
||||
@abstractmethod
|
||||
def delete(self, where: str):
|
||||
def delete(self, where: str) -> DeleteResult:
|
||||
"""Delete rows from the table.
|
||||
|
||||
This can be used to delete a single row, many rows, all rows, or
|
||||
@@ -1109,6 +1125,11 @@ class Table(ABC):
|
||||
|
||||
The filter must not be empty, or it will error.
|
||||
|
||||
Returns
|
||||
-------
|
||||
DeleteResult
|
||||
An object containing the new version number of the table after deletion.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import lancedb
|
||||
@@ -1125,6 +1146,7 @@ class Table(ABC):
|
||||
1 2 [3.0, 4.0]
|
||||
2 3 [5.0, 6.0]
|
||||
>>> table.delete("x = 2")
|
||||
DeleteResult(version=2)
|
||||
>>> table.to_pandas()
|
||||
x vector
|
||||
0 1 [1.0, 2.0]
|
||||
@@ -1138,6 +1160,7 @@ class Table(ABC):
|
||||
>>> to_remove
|
||||
'1, 5'
|
||||
>>> table.delete(f"x IN ({to_remove})")
|
||||
DeleteResult(version=3)
|
||||
>>> table.to_pandas()
|
||||
x vector
|
||||
0 3 [5.0, 6.0]
|
||||
@@ -1151,7 +1174,7 @@ class Table(ABC):
|
||||
values: Optional[dict] = None,
|
||||
*,
|
||||
values_sql: Optional[Dict[str, str]] = None,
|
||||
):
|
||||
) -> UpdateResult:
|
||||
"""
|
||||
This can be used to update zero to all rows depending on how many
|
||||
rows match the where clause. If no where clause is provided, then
|
||||
@@ -1173,6 +1196,12 @@ class Table(ABC):
|
||||
reference existing columns. For example, {"x": "x + 1"} will increment
|
||||
the x column by 1.
|
||||
|
||||
Returns
|
||||
-------
|
||||
UpdateResult
|
||||
- rows_updated: The number of rows that were updated
|
||||
- version: The new version number of the table after the update
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import lancedb
|
||||
@@ -1186,12 +1215,14 @@ class Table(ABC):
|
||||
1 2 [3.0, 4.0]
|
||||
2 3 [5.0, 6.0]
|
||||
>>> table.update(where="x = 2", values={"vector": [10.0, 10]})
|
||||
UpdateResult(rows_updated=1, version=2)
|
||||
>>> table.to_pandas()
|
||||
x vector
|
||||
0 1 [1.0, 2.0]
|
||||
1 3 [5.0, 6.0]
|
||||
2 2 [10.0, 10.0]
|
||||
>>> table.update(values_sql={"x": "x + 1"})
|
||||
UpdateResult(rows_updated=3, version=3)
|
||||
>>> table.to_pandas()
|
||||
x vector
|
||||
0 2 [1.0, 2.0]
|
||||
@@ -1354,6 +1385,11 @@ class Table(ABC):
|
||||
Alternatively, a pyarrow Field or Schema can be provided to add
|
||||
new columns with the specified data types. The new columns will
|
||||
be initialized with null values.
|
||||
|
||||
Returns
|
||||
-------
|
||||
AddColumnsResult
|
||||
version: the new version number of the table after adding columns.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
@@ -1379,10 +1415,15 @@ class Table(ABC):
|
||||
nullability is not changed. Only non-nullable columns can be changed
|
||||
to nullable. Currently, you cannot change a nullable column to
|
||||
non-nullable.
|
||||
|
||||
Returns
|
||||
-------
|
||||
AlterColumnsResult
|
||||
version: the new version number of the table after the alteration.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def drop_columns(self, columns: Iterable[str]):
|
||||
def drop_columns(self, columns: Iterable[str]) -> DropColumnsResult:
|
||||
"""
|
||||
Drop columns from the table.
|
||||
|
||||
@@ -1390,6 +1431,11 @@ class Table(ABC):
|
||||
----------
|
||||
columns : Iterable[str]
|
||||
The names of the columns to drop.
|
||||
|
||||
Returns
|
||||
-------
|
||||
DropColumnsResult
|
||||
version: the new version number of the table dropping the columns.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
@@ -1428,7 +1474,7 @@ class Table(ABC):
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def restore(self, version: Optional[int] = None):
|
||||
def restore(self, version: Optional[Union[int, str]] = None):
|
||||
"""Restore a version of the table. This is an in-place operation.
|
||||
|
||||
This creates a new version where the data is equivalent to the
|
||||
@@ -1436,9 +1482,10 @@ class Table(ABC):
|
||||
|
||||
Parameters
|
||||
----------
|
||||
version : int, default None
|
||||
The version to restore. If unspecified then restores the currently
|
||||
checked out version. If the currently checked out version is the
|
||||
version : int or str, default None
|
||||
The version number or version tag to restore.
|
||||
If unspecified then restores the currently checked out version.
|
||||
If the currently checked out version is the
|
||||
latest version then this is a no-op.
|
||||
"""
|
||||
|
||||
@@ -1611,6 +1658,7 @@ class LanceTable(Table):
|
||||
... [{"vector": [1.1, 0.9], "type": "vector"}])
|
||||
>>> table.tags.create("v1", table.version)
|
||||
>>> table.add([{"vector": [0.5, 0.2], "type": "vector"}])
|
||||
AddResult(version=2)
|
||||
>>> tags = table.tags.list()
|
||||
>>> print(tags["v1"]["version"])
|
||||
1
|
||||
@@ -1649,6 +1697,7 @@ class LanceTable(Table):
|
||||
vector type
|
||||
0 [1.1, 0.9] vector
|
||||
>>> table.add([{"vector": [0.5, 0.2], "type": "vector"}])
|
||||
AddResult(version=2)
|
||||
>>> table.version
|
||||
2
|
||||
>>> table.checkout(1)
|
||||
@@ -1666,7 +1715,7 @@ class LanceTable(Table):
|
||||
"""
|
||||
LOOP.run(self._table.checkout_latest())
|
||||
|
||||
def restore(self, version: Optional[int] = None):
|
||||
def restore(self, version: Optional[Union[int, str]] = None):
|
||||
"""Restore a version of the table. This is an in-place operation.
|
||||
|
||||
This creates a new version where the data is equivalent to the
|
||||
@@ -1674,9 +1723,10 @@ class LanceTable(Table):
|
||||
|
||||
Parameters
|
||||
----------
|
||||
version : int, default None
|
||||
The version to restore. If unspecified then restores the currently
|
||||
checked out version. If the currently checked out version is the
|
||||
version : int or str, default None
|
||||
The version number or version tag to restore.
|
||||
If unspecified then restores the currently checked out version.
|
||||
If the currently checked out version is the
|
||||
latest version then this is a no-op.
|
||||
|
||||
Examples
|
||||
@@ -1691,14 +1741,23 @@ class LanceTable(Table):
|
||||
vector type
|
||||
0 [1.1, 0.9] vector
|
||||
>>> table.add([{"vector": [0.5, 0.2], "type": "vector"}])
|
||||
AddResult(version=2)
|
||||
>>> table.version
|
||||
2
|
||||
>>> table.tags.create("v2", 2)
|
||||
>>> table.restore(1)
|
||||
>>> table.to_pandas()
|
||||
vector type
|
||||
0 [1.1, 0.9] vector
|
||||
>>> len(table.list_versions())
|
||||
3
|
||||
>>> table.restore("v2")
|
||||
>>> table.to_pandas()
|
||||
vector type
|
||||
0 [1.1, 0.9] vector
|
||||
1 [0.5, 0.2] vector
|
||||
>>> len(table.list_versions())
|
||||
4
|
||||
"""
|
||||
if version is not None:
|
||||
LOOP.run(self._table.checkout(version))
|
||||
@@ -1707,9 +1766,6 @@ class LanceTable(Table):
|
||||
def count_rows(self, filter: Optional[str] = None) -> int:
|
||||
return LOOP.run(self._table.count_rows(filter))
|
||||
|
||||
def __len__(self) -> int:
|
||||
return self.count_rows()
|
||||
|
||||
def __repr__(self) -> str:
|
||||
val = f"{self.__class__.__name__}(name={self.name!r}, version={self.version}"
|
||||
if self._conn.read_consistency_interval is not None:
|
||||
@@ -2055,7 +2111,7 @@ class LanceTable(Table):
|
||||
mode: AddMode = "append",
|
||||
on_bad_vectors: OnBadVectorsType = "error",
|
||||
fill_value: float = 0.0,
|
||||
):
|
||||
) -> AddResult:
|
||||
"""Add data to the table.
|
||||
If vector columns are missing and the table
|
||||
has embedding functions, then the vector columns
|
||||
@@ -2079,7 +2135,7 @@ class LanceTable(Table):
|
||||
int
|
||||
The number of vectors in the table.
|
||||
"""
|
||||
LOOP.run(
|
||||
return LOOP.run(
|
||||
self._table.add(
|
||||
data, mode=mode, on_bad_vectors=on_bad_vectors, fill_value=fill_value
|
||||
)
|
||||
@@ -2409,8 +2465,8 @@ class LanceTable(Table):
|
||||
)
|
||||
return self
|
||||
|
||||
def delete(self, where: str):
|
||||
LOOP.run(self._table.delete(where))
|
||||
def delete(self, where: str) -> DeleteResult:
|
||||
return LOOP.run(self._table.delete(where))
|
||||
|
||||
def update(
|
||||
self,
|
||||
@@ -2418,7 +2474,7 @@ class LanceTable(Table):
|
||||
values: Optional[dict] = None,
|
||||
*,
|
||||
values_sql: Optional[Dict[str, str]] = None,
|
||||
):
|
||||
) -> UpdateResult:
|
||||
"""
|
||||
This can be used to update zero to all rows depending on how many
|
||||
rows match the where clause.
|
||||
@@ -2436,6 +2492,12 @@ class LanceTable(Table):
|
||||
reference existing columns. For example, {"x": "x + 1"} will increment
|
||||
the x column by 1.
|
||||
|
||||
Returns
|
||||
-------
|
||||
UpdateResult
|
||||
- rows_updated: The number of rows that were updated
|
||||
- version: The new version number of the table after the update
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import lancedb
|
||||
@@ -2449,6 +2511,7 @@ class LanceTable(Table):
|
||||
1 2 [3.0, 4.0]
|
||||
2 3 [5.0, 6.0]
|
||||
>>> table.update(where="x = 2", values={"vector": [10.0, 10]})
|
||||
UpdateResult(rows_updated=1, version=2)
|
||||
>>> table.to_pandas()
|
||||
x vector
|
||||
0 1 [1.0, 2.0]
|
||||
@@ -2456,7 +2519,7 @@ class LanceTable(Table):
|
||||
2 2 [10.0, 10.0]
|
||||
|
||||
"""
|
||||
LOOP.run(self._table.update(values, where=where, updates_sql=values_sql))
|
||||
return LOOP.run(self._table.update(values, where=where, updates_sql=values_sql))
|
||||
|
||||
def _execute_query(
|
||||
self,
|
||||
@@ -2490,7 +2553,7 @@ class LanceTable(Table):
|
||||
new_data: DATA,
|
||||
on_bad_vectors: OnBadVectorsType,
|
||||
fill_value: float,
|
||||
):
|
||||
) -> MergeResult:
|
||||
return LOOP.run(
|
||||
self._table._do_merge(merge, new_data, on_bad_vectors, fill_value)
|
||||
)
|
||||
@@ -2635,14 +2698,16 @@ class LanceTable(Table):
|
||||
|
||||
def add_columns(
|
||||
self, transforms: Dict[str, str] | pa.field | List[pa.field] | pa.Schema
|
||||
):
|
||||
LOOP.run(self._table.add_columns(transforms))
|
||||
) -> AddColumnsResult:
|
||||
return LOOP.run(self._table.add_columns(transforms))
|
||||
|
||||
def alter_columns(self, *alterations: Iterable[Dict[str, str]]):
|
||||
LOOP.run(self._table.alter_columns(*alterations))
|
||||
def alter_columns(
|
||||
self, *alterations: Iterable[Dict[str, str]]
|
||||
) -> AlterColumnsResult:
|
||||
return LOOP.run(self._table.alter_columns(*alterations))
|
||||
|
||||
def drop_columns(self, columns: Iterable[str]):
|
||||
LOOP.run(self._table.drop_columns(columns))
|
||||
def drop_columns(self, columns: Iterable[str]) -> DropColumnsResult:
|
||||
return LOOP.run(self._table.drop_columns(columns))
|
||||
|
||||
def uses_v2_manifest_paths(self) -> bool:
|
||||
"""
|
||||
@@ -3197,7 +3262,7 @@ class AsyncTable:
|
||||
mode: Optional[Literal["append", "overwrite"]] = "append",
|
||||
on_bad_vectors: Optional[OnBadVectorsType] = None,
|
||||
fill_value: Optional[float] = None,
|
||||
):
|
||||
) -> AddResult:
|
||||
"""Add more data to the [Table](Table).
|
||||
|
||||
Parameters
|
||||
@@ -3236,7 +3301,7 @@ class AsyncTable:
|
||||
if isinstance(data, pa.Table):
|
||||
data = data.to_reader()
|
||||
|
||||
await self._inner.add(data, mode or "append")
|
||||
return await self._inner.add(data, mode or "append")
|
||||
|
||||
def merge_insert(self, on: Union[str, Iterable[str]]) -> LanceMergeInsertBuilder:
|
||||
"""
|
||||
@@ -3281,12 +3346,12 @@ class AsyncTable:
|
||||
>>> table = db.create_table("my_table", data)
|
||||
>>> new_data = pa.table({"a": [2, 3, 4], "b": ["x", "y", "z"]})
|
||||
>>> # Perform a "upsert" operation
|
||||
>>> stats = table.merge_insert("a") \\
|
||||
>>> res = table.merge_insert("a") \\
|
||||
... .when_matched_update_all() \\
|
||||
... .when_not_matched_insert_all() \\
|
||||
... .execute(new_data)
|
||||
>>> stats
|
||||
{'num_inserted_rows': 1, 'num_updated_rows': 2, 'num_deleted_rows': 0}
|
||||
>>> res
|
||||
MergeResult(version=2, num_updated_rows=2, num_inserted_rows=1, num_deleted_rows=0)
|
||||
>>> # The order of new rows is non-deterministic since we use
|
||||
>>> # a hash-join as part of this operation and so we sort here
|
||||
>>> table.to_arrow().sort_by("a").to_pandas()
|
||||
@@ -3295,7 +3360,7 @@ class AsyncTable:
|
||||
1 2 x
|
||||
2 3 y
|
||||
3 4 z
|
||||
"""
|
||||
""" # noqa: E501
|
||||
on = [on] if isinstance(on, str) else list(iter(on))
|
||||
|
||||
return LanceMergeInsertBuilder(self, on)
|
||||
@@ -3626,7 +3691,7 @@ class AsyncTable:
|
||||
new_data: DATA,
|
||||
on_bad_vectors: OnBadVectorsType,
|
||||
fill_value: float,
|
||||
):
|
||||
) -> MergeResult:
|
||||
schema = await self.schema()
|
||||
if on_bad_vectors is None:
|
||||
on_bad_vectors = "error"
|
||||
@@ -3651,10 +3716,11 @@ class AsyncTable:
|
||||
when_not_matched_insert_all=merge._when_not_matched_insert_all,
|
||||
when_not_matched_by_source_delete=merge._when_not_matched_by_source_delete,
|
||||
when_not_matched_by_source_condition=merge._when_not_matched_by_source_condition,
|
||||
timeout=merge._timeout,
|
||||
),
|
||||
)
|
||||
|
||||
async def delete(self, where: str):
|
||||
async def delete(self, where: str) -> DeleteResult:
|
||||
"""Delete rows from the table.
|
||||
|
||||
This can be used to delete a single row, many rows, all rows, or
|
||||
@@ -3685,6 +3751,7 @@ class AsyncTable:
|
||||
1 2 [3.0, 4.0]
|
||||
2 3 [5.0, 6.0]
|
||||
>>> table.delete("x = 2")
|
||||
DeleteResult(version=2)
|
||||
>>> table.to_pandas()
|
||||
x vector
|
||||
0 1 [1.0, 2.0]
|
||||
@@ -3698,6 +3765,7 @@ class AsyncTable:
|
||||
>>> to_remove
|
||||
'1, 5'
|
||||
>>> table.delete(f"x IN ({to_remove})")
|
||||
DeleteResult(version=3)
|
||||
>>> table.to_pandas()
|
||||
x vector
|
||||
0 3 [5.0, 6.0]
|
||||
@@ -3710,7 +3778,7 @@ class AsyncTable:
|
||||
*,
|
||||
where: Optional[str] = None,
|
||||
updates_sql: Optional[Dict[str, str]] = None,
|
||||
):
|
||||
) -> UpdateResult:
|
||||
"""
|
||||
This can be used to update zero to all rows in the table.
|
||||
|
||||
@@ -3732,6 +3800,13 @@ class AsyncTable:
|
||||
literals (e.g. "7" or "'foo'") or they can be expressions based on the
|
||||
previous value of the row (e.g. "x + 1" to increment the x column by 1)
|
||||
|
||||
Returns
|
||||
-------
|
||||
UpdateResult
|
||||
An object containing:
|
||||
- rows_updated: The number of rows that were updated
|
||||
- version: The new version number of the table after the update
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import asyncio
|
||||
@@ -3760,7 +3835,7 @@ class AsyncTable:
|
||||
|
||||
async def add_columns(
|
||||
self, transforms: dict[str, str] | pa.field | List[pa.field] | pa.Schema
|
||||
):
|
||||
) -> AddColumnsResult:
|
||||
"""
|
||||
Add new columns with defined values.
|
||||
|
||||
@@ -3772,6 +3847,12 @@ class AsyncTable:
|
||||
each row in the table, and can reference existing columns.
|
||||
Alternatively, you can pass a pyarrow field or schema to add
|
||||
new columns with NULLs.
|
||||
|
||||
Returns
|
||||
-------
|
||||
AddColumnsResult
|
||||
version: the new version number of the table after adding columns.
|
||||
|
||||
"""
|
||||
if isinstance(transforms, pa.Field):
|
||||
transforms = [transforms]
|
||||
@@ -3780,11 +3861,13 @@ class AsyncTable:
|
||||
):
|
||||
transforms = pa.schema(transforms)
|
||||
if isinstance(transforms, pa.Schema):
|
||||
await self._inner.add_columns_with_schema(transforms)
|
||||
return await self._inner.add_columns_with_schema(transforms)
|
||||
else:
|
||||
await self._inner.add_columns(list(transforms.items()))
|
||||
return await self._inner.add_columns(list(transforms.items()))
|
||||
|
||||
async def alter_columns(self, *alterations: Iterable[dict[str, Any]]):
|
||||
async def alter_columns(
|
||||
self, *alterations: Iterable[dict[str, Any]]
|
||||
) -> AlterColumnsResult:
|
||||
"""
|
||||
Alter column names and nullability.
|
||||
|
||||
@@ -3804,8 +3887,13 @@ class AsyncTable:
|
||||
nullability is not changed. Only non-nullable columns can be changed
|
||||
to nullable. Currently, you cannot change a nullable column to
|
||||
non-nullable.
|
||||
|
||||
Returns
|
||||
-------
|
||||
AlterColumnsResult
|
||||
version: the new version number of the table after the alteration.
|
||||
"""
|
||||
await self._inner.alter_columns(alterations)
|
||||
return await self._inner.alter_columns(alterations)
|
||||
|
||||
async def drop_columns(self, columns: Iterable[str]):
|
||||
"""
|
||||
@@ -3816,7 +3904,7 @@ class AsyncTable:
|
||||
columns : Iterable[str]
|
||||
The names of the columns to drop.
|
||||
"""
|
||||
await self._inner.drop_columns(columns)
|
||||
return await self._inner.drop_columns(columns)
|
||||
|
||||
async def version(self) -> int:
|
||||
"""
|
||||
@@ -3886,7 +3974,7 @@ class AsyncTable:
|
||||
"""
|
||||
await self._inner.checkout_latest()
|
||||
|
||||
async def restore(self, version: Optional[int] = None):
|
||||
async def restore(self, version: Optional[int | str] = None):
|
||||
"""
|
||||
Restore the table to the currently checked out version
|
||||
|
||||
|
||||
@@ -18,19 +18,19 @@ def test_upsert(mem_db):
|
||||
{"id": 1, "name": "Bobby"},
|
||||
{"id": 2, "name": "Charlie"},
|
||||
]
|
||||
stats = (
|
||||
res = (
|
||||
table.merge_insert("id")
|
||||
.when_matched_update_all()
|
||||
.when_not_matched_insert_all()
|
||||
.execute(new_users)
|
||||
)
|
||||
table.count_rows() # 3
|
||||
stats # {'num_inserted_rows': 1, 'num_updated_rows': 1, 'num_deleted_rows': 0}
|
||||
res # {'num_inserted_rows': 1, 'num_updated_rows': 1, 'num_deleted_rows': 0}
|
||||
# --8<-- [end:upsert_basic]
|
||||
assert table.count_rows() == 3
|
||||
assert stats["num_inserted_rows"] == 1
|
||||
assert stats["num_updated_rows"] == 1
|
||||
assert stats["num_deleted_rows"] == 0
|
||||
assert res.num_inserted_rows == 1
|
||||
assert res.num_deleted_rows == 0
|
||||
assert res.num_updated_rows == 1
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@@ -48,19 +48,22 @@ async def test_upsert_async(mem_db_async):
|
||||
{"id": 1, "name": "Bobby"},
|
||||
{"id": 2, "name": "Charlie"},
|
||||
]
|
||||
stats = await (
|
||||
res = await (
|
||||
table.merge_insert("id")
|
||||
.when_matched_update_all()
|
||||
.when_not_matched_insert_all()
|
||||
.execute(new_users)
|
||||
)
|
||||
await table.count_rows() # 3
|
||||
stats # {'num_inserted_rows': 1, 'num_updated_rows': 1, 'num_deleted_rows': 0}
|
||||
res
|
||||
# MergeResult(version=2, num_updated_rows=1,
|
||||
# num_inserted_rows=1, num_deleted_rows=0)
|
||||
# --8<-- [end:upsert_basic_async]
|
||||
assert await table.count_rows() == 3
|
||||
assert stats["num_inserted_rows"] == 1
|
||||
assert stats["num_updated_rows"] == 1
|
||||
assert stats["num_deleted_rows"] == 0
|
||||
assert res.version == 2
|
||||
assert res.num_inserted_rows == 1
|
||||
assert res.num_deleted_rows == 0
|
||||
assert res.num_updated_rows == 1
|
||||
|
||||
|
||||
def test_insert_if_not_exists(mem_db):
|
||||
@@ -77,16 +80,19 @@ def test_insert_if_not_exists(mem_db):
|
||||
{"domain": "google.com", "name": "Google"},
|
||||
{"domain": "facebook.com", "name": "Facebook"},
|
||||
]
|
||||
stats = (
|
||||
res = (
|
||||
table.merge_insert("domain").when_not_matched_insert_all().execute(new_domains)
|
||||
)
|
||||
table.count_rows() # 3
|
||||
stats # {'num_inserted_rows': 1, 'num_updated_rows': 0, 'num_deleted_rows': 0}
|
||||
res
|
||||
# MergeResult(version=2, num_updated_rows=0,
|
||||
# num_inserted_rows=1, num_deleted_rows=0)
|
||||
# --8<-- [end:insert_if_not_exists]
|
||||
assert table.count_rows() == 3
|
||||
assert stats["num_inserted_rows"] == 1
|
||||
assert stats["num_updated_rows"] == 0
|
||||
assert stats["num_deleted_rows"] == 0
|
||||
assert res.version == 2
|
||||
assert res.num_inserted_rows == 1
|
||||
assert res.num_deleted_rows == 0
|
||||
assert res.num_updated_rows == 0
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@@ -104,16 +110,19 @@ async def test_insert_if_not_exists_async(mem_db_async):
|
||||
{"domain": "google.com", "name": "Google"},
|
||||
{"domain": "facebook.com", "name": "Facebook"},
|
||||
]
|
||||
stats = await (
|
||||
res = await (
|
||||
table.merge_insert("domain").when_not_matched_insert_all().execute(new_domains)
|
||||
)
|
||||
await table.count_rows() # 3
|
||||
stats # {'num_inserted_rows': 1, 'num_updated_rows': 0, 'num_deleted_rows': 0}
|
||||
# --8<-- [end:insert_if_not_exists_async]
|
||||
res
|
||||
# MergeResult(version=2, num_updated_rows=0,
|
||||
# num_inserted_rows=1, num_deleted_rows=0)
|
||||
# --8<-- [end:insert_if_not_exists]
|
||||
assert await table.count_rows() == 3
|
||||
assert stats["num_inserted_rows"] == 1
|
||||
assert stats["num_updated_rows"] == 0
|
||||
assert stats["num_deleted_rows"] == 0
|
||||
assert res.version == 2
|
||||
assert res.num_inserted_rows == 1
|
||||
assert res.num_deleted_rows == 0
|
||||
assert res.num_updated_rows == 0
|
||||
|
||||
|
||||
def test_replace_range(mem_db):
|
||||
@@ -131,7 +140,7 @@ def test_replace_range(mem_db):
|
||||
new_chunks = [
|
||||
{"doc_id": 1, "chunk_id": 0, "text": "Baz"},
|
||||
]
|
||||
stats = (
|
||||
res = (
|
||||
table.merge_insert(["doc_id", "chunk_id"])
|
||||
.when_matched_update_all()
|
||||
.when_not_matched_insert_all()
|
||||
@@ -139,12 +148,15 @@ def test_replace_range(mem_db):
|
||||
.execute(new_chunks)
|
||||
)
|
||||
table.count_rows("doc_id = 1") # 1
|
||||
stats # {'num_inserted_rows': 0, 'num_updated_rows': 1, 'num_deleted_rows': 1}
|
||||
# --8<-- [end:replace_range]
|
||||
res
|
||||
# MergeResult(version=2, num_updated_rows=1,
|
||||
# num_inserted_rows=0, num_deleted_rows=1)
|
||||
# --8<-- [end:insert_if_not_exists]
|
||||
assert table.count_rows("doc_id = 1") == 1
|
||||
assert stats["num_inserted_rows"] == 0
|
||||
assert stats["num_updated_rows"] == 1
|
||||
assert stats["num_deleted_rows"] == 1
|
||||
assert res.version == 2
|
||||
assert res.num_inserted_rows == 0
|
||||
assert res.num_deleted_rows == 1
|
||||
assert res.num_updated_rows == 1
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@@ -163,7 +175,7 @@ async def test_replace_range_async(mem_db_async):
|
||||
new_chunks = [
|
||||
{"doc_id": 1, "chunk_id": 0, "text": "Baz"},
|
||||
]
|
||||
stats = await (
|
||||
res = await (
|
||||
table.merge_insert(["doc_id", "chunk_id"])
|
||||
.when_matched_update_all()
|
||||
.when_not_matched_insert_all()
|
||||
@@ -171,9 +183,12 @@ async def test_replace_range_async(mem_db_async):
|
||||
.execute(new_chunks)
|
||||
)
|
||||
await table.count_rows("doc_id = 1") # 1
|
||||
stats # {'num_inserted_rows': 0, 'num_updated_rows': 1, 'num_deleted_rows': 1}
|
||||
# --8<-- [end:replace_range_async]
|
||||
res
|
||||
# MergeResult(version=2, num_updated_rows=1,
|
||||
# num_inserted_rows=0, num_deleted_rows=1)
|
||||
# --8<-- [end:insert_if_not_exists]
|
||||
assert await table.count_rows("doc_id = 1") == 1
|
||||
assert stats["num_inserted_rows"] == 0
|
||||
assert stats["num_updated_rows"] == 1
|
||||
assert stats["num_deleted_rows"] == 1
|
||||
assert res.version == 2
|
||||
assert res.num_inserted_rows == 0
|
||||
assert res.num_deleted_rows == 1
|
||||
assert res.num_updated_rows == 1
|
||||
|
||||
@@ -149,6 +149,24 @@ async def test_async_checkout():
|
||||
assert await table.count_rows() == 300
|
||||
|
||||
|
||||
def test_table_len_sync():
|
||||
def handler(request):
|
||||
if request.path == "/v1/table/test/create/?mode=create":
|
||||
request.send_response(200)
|
||||
request.send_header("Content-Type", "application/json")
|
||||
request.end_headers()
|
||||
request.wfile.write(b"{}")
|
||||
|
||||
request.send_response(200)
|
||||
request.send_header("Content-Type", "application/json")
|
||||
request.end_headers()
|
||||
request.wfile.write(json.dumps(1).encode())
|
||||
|
||||
with mock_lancedb_connection(handler) as db:
|
||||
table = db.create_table("test", [{"id": 1}])
|
||||
assert len(table) == 1
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_http_error():
|
||||
request_id_holder = {"request_id": None}
|
||||
|
||||
@@ -106,15 +106,22 @@ async def test_update_async(mem_db_async: AsyncConnection):
|
||||
table = await mem_db_async.create_table("some_table", data=[{"id": 0}])
|
||||
assert await table.count_rows("id == 0") == 1
|
||||
assert await table.count_rows("id == 7") == 0
|
||||
await table.update({"id": 7})
|
||||
update_res = await table.update({"id": 7})
|
||||
assert update_res.rows_updated == 1
|
||||
assert update_res.version == 2
|
||||
assert await table.count_rows("id == 7") == 1
|
||||
assert await table.count_rows("id == 0") == 0
|
||||
await table.add([{"id": 2}])
|
||||
await table.update(where="id % 2 == 0", updates_sql={"id": "5"})
|
||||
add_res = await table.add([{"id": 2}])
|
||||
assert add_res.version == 3
|
||||
update_res = await table.update(where="id % 2 == 0", updates_sql={"id": "5"})
|
||||
assert update_res.rows_updated == 1
|
||||
assert update_res.version == 4
|
||||
assert await table.count_rows("id == 7") == 1
|
||||
assert await table.count_rows("id == 2") == 0
|
||||
assert await table.count_rows("id == 5") == 1
|
||||
await table.update({"id": 10}, where="id == 5")
|
||||
update_res = await table.update({"id": 10}, where="id == 5")
|
||||
assert update_res.rows_updated == 1
|
||||
assert update_res.version == 5
|
||||
assert await table.count_rows("id == 10") == 1
|
||||
|
||||
|
||||
@@ -437,7 +444,8 @@ def test_add_pydantic_model(mem_db: DBConnection):
|
||||
content="foo", meta=Metadata(source="bar", timestamp=datetime.now())
|
||||
),
|
||||
)
|
||||
tbl.add([expected])
|
||||
add_res = tbl.add([expected])
|
||||
assert add_res.version == 2
|
||||
|
||||
result = tbl.search([0.0, 0.0]).limit(1).to_pydantic(LanceSchema)[0]
|
||||
assert result == expected
|
||||
@@ -459,11 +467,12 @@ async def test_add_async(mem_db_async: AsyncConnection):
|
||||
],
|
||||
)
|
||||
assert await table.count_rows() == 2
|
||||
await table.add(
|
||||
add_res = await table.add(
|
||||
data=[
|
||||
{"vector": [10.0, 11.0], "item": "baz", "price": 30.0},
|
||||
],
|
||||
)
|
||||
assert add_res.version == 2
|
||||
assert await table.count_rows() == 3
|
||||
|
||||
|
||||
@@ -760,6 +769,29 @@ def test_restore(mem_db: DBConnection):
|
||||
table.restore(0)
|
||||
|
||||
|
||||
def test_restore_with_tags(mem_db: DBConnection):
|
||||
table = mem_db.create_table(
|
||||
"my_table",
|
||||
data=[{"vector": [1.1, 0.9], "type": "vector"}],
|
||||
)
|
||||
tag = "tag1"
|
||||
table.tags.create(tag, 1)
|
||||
table.add([{"vector": [0.5, 0.2], "type": "vector"}])
|
||||
table.restore(tag)
|
||||
assert len(table.list_versions()) == 3
|
||||
assert len(table) == 1
|
||||
expected = table.to_arrow()
|
||||
|
||||
table.add([{"vector": [0.3, 0.3], "type": "vector"}])
|
||||
table.checkout("tag1")
|
||||
table.restore()
|
||||
assert len(table.list_versions()) == 5
|
||||
assert table.to_arrow() == expected
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
table.restore("tag_unknown")
|
||||
|
||||
|
||||
def test_merge(tmp_db: DBConnection, tmp_path):
|
||||
pytest.importorskip("lance")
|
||||
import lance
|
||||
@@ -795,7 +827,8 @@ def test_delete(mem_db: DBConnection):
|
||||
)
|
||||
assert len(table) == 2
|
||||
assert len(table.list_versions()) == 1
|
||||
table.delete("id=0")
|
||||
delete_res = table.delete("id=0")
|
||||
assert delete_res.version == 2
|
||||
assert len(table.list_versions()) == 2
|
||||
assert table.version == 2
|
||||
assert len(table) == 1
|
||||
@@ -809,7 +842,9 @@ def test_update(mem_db: DBConnection):
|
||||
)
|
||||
assert len(table) == 2
|
||||
assert len(table.list_versions()) == 1
|
||||
table.update(where="id=0", values={"vector": [1.1, 1.1]})
|
||||
update_res = table.update(where="id=0", values={"vector": [1.1, 1.1]})
|
||||
assert update_res.version == 2
|
||||
assert update_res.rows_updated == 1
|
||||
assert len(table.list_versions()) == 2
|
||||
assert table.version == 2
|
||||
assert len(table) == 2
|
||||
@@ -898,9 +933,16 @@ def test_merge_insert(mem_db: DBConnection):
|
||||
new_data = pa.table({"a": [2, 3, 4], "b": ["x", "y", "z"]})
|
||||
|
||||
# upsert
|
||||
table.merge_insert(
|
||||
"a"
|
||||
).when_matched_update_all().when_not_matched_insert_all().execute(new_data)
|
||||
merge_insert_res = (
|
||||
table.merge_insert("a")
|
||||
.when_matched_update_all()
|
||||
.when_not_matched_insert_all()
|
||||
.execute(new_data, timeout=timedelta(seconds=10))
|
||||
)
|
||||
assert merge_insert_res.version == 2
|
||||
assert merge_insert_res.num_inserted_rows == 1
|
||||
assert merge_insert_res.num_updated_rows == 2
|
||||
assert merge_insert_res.num_deleted_rows == 0
|
||||
|
||||
expected = pa.table({"a": [1, 2, 3, 4], "b": ["a", "x", "y", "z"]})
|
||||
assert table.to_arrow().sort_by("a") == expected
|
||||
@@ -908,17 +950,28 @@ def test_merge_insert(mem_db: DBConnection):
|
||||
table.restore(version)
|
||||
|
||||
# conditional update
|
||||
table.merge_insert("a").when_matched_update_all(where="target.b = 'b'").execute(
|
||||
new_data
|
||||
merge_insert_res = (
|
||||
table.merge_insert("a")
|
||||
.when_matched_update_all(where="target.b = 'b'")
|
||||
.execute(new_data)
|
||||
)
|
||||
assert merge_insert_res.version == 4
|
||||
assert merge_insert_res.num_inserted_rows == 0
|
||||
assert merge_insert_res.num_updated_rows == 1
|
||||
assert merge_insert_res.num_deleted_rows == 0
|
||||
expected = pa.table({"a": [1, 2, 3], "b": ["a", "x", "c"]})
|
||||
assert table.to_arrow().sort_by("a") == expected
|
||||
|
||||
table.restore(version)
|
||||
|
||||
# insert-if-not-exists
|
||||
table.merge_insert("a").when_not_matched_insert_all().execute(new_data)
|
||||
|
||||
merge_insert_res = (
|
||||
table.merge_insert("a").when_not_matched_insert_all().execute(new_data)
|
||||
)
|
||||
assert merge_insert_res.version == 6
|
||||
assert merge_insert_res.num_inserted_rows == 1
|
||||
assert merge_insert_res.num_updated_rows == 0
|
||||
assert merge_insert_res.num_deleted_rows == 0
|
||||
expected = pa.table({"a": [1, 2, 3, 4], "b": ["a", "b", "c", "z"]})
|
||||
assert table.to_arrow().sort_by("a") == expected
|
||||
|
||||
@@ -927,13 +980,17 @@ def test_merge_insert(mem_db: DBConnection):
|
||||
new_data = pa.table({"a": [2, 4], "b": ["x", "z"]})
|
||||
|
||||
# replace-range
|
||||
(
|
||||
merge_insert_res = (
|
||||
table.merge_insert("a")
|
||||
.when_matched_update_all()
|
||||
.when_not_matched_insert_all()
|
||||
.when_not_matched_by_source_delete("a > 2")
|
||||
.execute(new_data)
|
||||
)
|
||||
assert merge_insert_res.version == 8
|
||||
assert merge_insert_res.num_inserted_rows == 1
|
||||
assert merge_insert_res.num_updated_rows == 1
|
||||
assert merge_insert_res.num_deleted_rows == 1
|
||||
|
||||
expected = pa.table({"a": [1, 2, 4], "b": ["a", "x", "z"]})
|
||||
assert table.to_arrow().sort_by("a") == expected
|
||||
@@ -941,15 +998,27 @@ def test_merge_insert(mem_db: DBConnection):
|
||||
table.restore(version)
|
||||
|
||||
# replace-range no condition
|
||||
table.merge_insert(
|
||||
"a"
|
||||
).when_matched_update_all().when_not_matched_insert_all().when_not_matched_by_source_delete().execute(
|
||||
new_data
|
||||
merge_insert_res = (
|
||||
table.merge_insert("a")
|
||||
.when_matched_update_all()
|
||||
.when_not_matched_insert_all()
|
||||
.when_not_matched_by_source_delete()
|
||||
.execute(new_data)
|
||||
)
|
||||
assert merge_insert_res.version == 10
|
||||
assert merge_insert_res.num_inserted_rows == 1
|
||||
assert merge_insert_res.num_updated_rows == 1
|
||||
assert merge_insert_res.num_deleted_rows == 2
|
||||
|
||||
expected = pa.table({"a": [2, 4], "b": ["x", "z"]})
|
||||
assert table.to_arrow().sort_by("a") == expected
|
||||
|
||||
# timeout
|
||||
with pytest.raises(Exception, match="merge insert timed out"):
|
||||
table.merge_insert("a").when_matched_update_all().execute(
|
||||
new_data, timeout=timedelta(0)
|
||||
)
|
||||
|
||||
|
||||
# We vary the data format because there are slight differences in how
|
||||
# subschemas are handled in different formats
|
||||
@@ -1478,11 +1547,13 @@ def test_restore_consistency(tmp_path):
|
||||
def test_add_columns(mem_db: DBConnection):
|
||||
data = pa.table({"id": [0, 1]})
|
||||
table = LanceTable.create(mem_db, "my_table", data=data)
|
||||
table.add_columns({"new_col": "id + 2"})
|
||||
add_columns_res = table.add_columns({"new_col": "id + 2"})
|
||||
assert add_columns_res.version == 2
|
||||
assert table.to_arrow().column_names == ["id", "new_col"]
|
||||
assert table.to_arrow()["new_col"].to_pylist() == [2, 3]
|
||||
|
||||
table.add_columns({"null_int": "cast(null as bigint)"})
|
||||
add_columns_res = table.add_columns({"null_int": "cast(null as bigint)"})
|
||||
assert add_columns_res.version == 3
|
||||
assert table.schema.field("null_int").type == pa.int64()
|
||||
|
||||
|
||||
@@ -1490,7 +1561,8 @@ def test_add_columns(mem_db: DBConnection):
|
||||
async def test_add_columns_async(mem_db_async: AsyncConnection):
|
||||
data = pa.table({"id": [0, 1]})
|
||||
table = await mem_db_async.create_table("my_table", data=data)
|
||||
await table.add_columns({"new_col": "id + 2"})
|
||||
add_columns_res = await table.add_columns({"new_col": "id + 2"})
|
||||
assert add_columns_res.version == 2
|
||||
data = await table.to_arrow()
|
||||
assert data.column_names == ["id", "new_col"]
|
||||
assert data["new_col"].to_pylist() == [2, 3]
|
||||
@@ -1500,9 +1572,10 @@ async def test_add_columns_async(mem_db_async: AsyncConnection):
|
||||
async def test_add_columns_with_schema(mem_db_async: AsyncConnection):
|
||||
data = pa.table({"id": [0, 1]})
|
||||
table = await mem_db_async.create_table("my_table", data=data)
|
||||
await table.add_columns(
|
||||
add_columns_res = await table.add_columns(
|
||||
[pa.field("x", pa.int64()), pa.field("vector", pa.list_(pa.float32(), 8))]
|
||||
)
|
||||
assert add_columns_res.version == 2
|
||||
|
||||
assert await table.schema() == pa.schema(
|
||||
[
|
||||
@@ -1513,11 +1586,12 @@ async def test_add_columns_with_schema(mem_db_async: AsyncConnection):
|
||||
)
|
||||
|
||||
table = await mem_db_async.create_table("table2", data=data)
|
||||
await table.add_columns(
|
||||
add_columns_res = await table.add_columns(
|
||||
pa.schema(
|
||||
[pa.field("y", pa.int64()), pa.field("emb", pa.list_(pa.float32(), 8))]
|
||||
)
|
||||
)
|
||||
assert add_columns_res.version == 2
|
||||
assert await table.schema() == pa.schema(
|
||||
[
|
||||
pa.field("id", pa.int64()),
|
||||
@@ -1530,7 +1604,8 @@ async def test_add_columns_with_schema(mem_db_async: AsyncConnection):
|
||||
def test_alter_columns(mem_db: DBConnection):
|
||||
data = pa.table({"id": [0, 1]})
|
||||
table = mem_db.create_table("my_table", data=data)
|
||||
table.alter_columns({"path": "id", "rename": "new_id"})
|
||||
alter_columns_res = table.alter_columns({"path": "id", "rename": "new_id"})
|
||||
assert alter_columns_res.version == 2
|
||||
assert table.to_arrow().column_names == ["new_id"]
|
||||
|
||||
|
||||
@@ -1538,9 +1613,13 @@ def test_alter_columns(mem_db: DBConnection):
|
||||
async def test_alter_columns_async(mem_db_async: AsyncConnection):
|
||||
data = pa.table({"id": [0, 1]})
|
||||
table = await mem_db_async.create_table("my_table", data=data)
|
||||
await table.alter_columns({"path": "id", "rename": "new_id"})
|
||||
alter_columns_res = await table.alter_columns({"path": "id", "rename": "new_id"})
|
||||
assert alter_columns_res.version == 2
|
||||
assert (await table.to_arrow()).column_names == ["new_id"]
|
||||
await table.alter_columns(dict(path="new_id", data_type=pa.int16(), nullable=True))
|
||||
alter_columns_res = await table.alter_columns(
|
||||
dict(path="new_id", data_type=pa.int16(), nullable=True)
|
||||
)
|
||||
assert alter_columns_res.version == 3
|
||||
data = await table.to_arrow()
|
||||
assert data.column(0).type == pa.int16()
|
||||
assert data.schema.field(0).nullable
|
||||
@@ -1549,7 +1628,8 @@ async def test_alter_columns_async(mem_db_async: AsyncConnection):
|
||||
def test_drop_columns(mem_db: DBConnection):
|
||||
data = pa.table({"id": [0, 1], "category": ["a", "b"]})
|
||||
table = mem_db.create_table("my_table", data=data)
|
||||
table.drop_columns(["category"])
|
||||
drop_columns_res = table.drop_columns(["category"])
|
||||
assert drop_columns_res.version == 2
|
||||
assert table.to_arrow().column_names == ["id"]
|
||||
|
||||
|
||||
@@ -1557,7 +1637,8 @@ def test_drop_columns(mem_db: DBConnection):
|
||||
async def test_drop_columns_async(mem_db_async: AsyncConnection):
|
||||
data = pa.table({"id": [0, 1], "category": ["a", "b"]})
|
||||
table = await mem_db_async.create_table("my_table", data=data)
|
||||
await table.drop_columns(["category"])
|
||||
drop_columns_res = await table.drop_columns(["category"])
|
||||
assert drop_columns_res.version == 2
|
||||
assert (await table.to_arrow()).column_names == ["id"]
|
||||
|
||||
|
||||
|
||||
@@ -11,7 +11,10 @@ use pyo3::{
|
||||
wrap_pyfunction, Bound, PyResult, Python,
|
||||
};
|
||||
use query::{FTSQuery, HybridQuery, Query, VectorQuery};
|
||||
use table::Table;
|
||||
use table::{
|
||||
AddColumnsResult, AddResult, AlterColumnsResult, DeleteResult, DropColumnsResult, MergeResult,
|
||||
Table, UpdateResult,
|
||||
};
|
||||
|
||||
pub mod arrow;
|
||||
pub mod connection;
|
||||
@@ -35,6 +38,13 @@ pub fn _lancedb(_py: Python, m: &Bound<'_, PyModule>) -> PyResult<()> {
|
||||
m.add_class::<HybridQuery>()?;
|
||||
m.add_class::<VectorQuery>()?;
|
||||
m.add_class::<RecordBatchStream>()?;
|
||||
m.add_class::<AddColumnsResult>()?;
|
||||
m.add_class::<AlterColumnsResult>()?;
|
||||
m.add_class::<AddResult>()?;
|
||||
m.add_class::<MergeResult>()?;
|
||||
m.add_class::<DeleteResult>()?;
|
||||
m.add_class::<DropColumnsResult>()?;
|
||||
m.add_class::<UpdateResult>()?;
|
||||
m.add_function(wrap_pyfunction!(connect, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(util::validate_table_name, m)?)?;
|
||||
m.add("__version__", env!("CARGO_PKG_VERSION"))?;
|
||||
|
||||
@@ -17,10 +17,10 @@ use lancedb::table::{
|
||||
Table as LanceDbTable,
|
||||
};
|
||||
use pyo3::{
|
||||
exceptions::{PyIOError, PyKeyError, PyRuntimeError, PyValueError},
|
||||
exceptions::{PyKeyError, PyRuntimeError, PyValueError},
|
||||
pyclass, pymethods,
|
||||
types::{IntoPyDict, PyAnyMethods, PyDict, PyDictMethods, PyInt, PyString},
|
||||
Bound, FromPyObject, PyAny, PyObject, PyRef, PyResult, Python,
|
||||
types::{IntoPyDict, PyAnyMethods, PyDict, PyDictMethods},
|
||||
Bound, FromPyObject, PyAny, PyRef, PyResult, Python,
|
||||
};
|
||||
use pyo3_async_runtimes::tokio::future_into_py;
|
||||
|
||||
@@ -58,6 +58,170 @@ pub struct OptimizeStats {
|
||||
pub prune: RemovalStats,
|
||||
}
|
||||
|
||||
#[pyclass(get_all)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct UpdateResult {
|
||||
pub rows_updated: u64,
|
||||
pub version: u64,
|
||||
}
|
||||
|
||||
#[pymethods]
|
||||
impl UpdateResult {
|
||||
pub fn __repr__(&self) -> String {
|
||||
format!(
|
||||
"UpdateResult(rows_updated={}, version={})",
|
||||
self.rows_updated, self.version
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<lancedb::table::UpdateResult> for UpdateResult {
|
||||
fn from(result: lancedb::table::UpdateResult) -> Self {
|
||||
Self {
|
||||
rows_updated: result.rows_updated,
|
||||
version: result.version,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[pyclass(get_all)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct AddResult {
|
||||
pub version: u64,
|
||||
}
|
||||
|
||||
#[pymethods]
|
||||
impl AddResult {
|
||||
pub fn __repr__(&self) -> String {
|
||||
format!("AddResult(version={})", self.version)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<lancedb::table::AddResult> for AddResult {
|
||||
fn from(result: lancedb::table::AddResult) -> Self {
|
||||
Self {
|
||||
version: result.version,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[pyclass(get_all)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct DeleteResult {
|
||||
pub version: u64,
|
||||
}
|
||||
|
||||
#[pymethods]
|
||||
impl DeleteResult {
|
||||
pub fn __repr__(&self) -> String {
|
||||
format!("DeleteResult(version={})", self.version)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<lancedb::table::DeleteResult> for DeleteResult {
|
||||
fn from(result: lancedb::table::DeleteResult) -> Self {
|
||||
Self {
|
||||
version: result.version,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[pyclass(get_all)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct MergeResult {
|
||||
pub version: u64,
|
||||
pub num_updated_rows: u64,
|
||||
pub num_inserted_rows: u64,
|
||||
pub num_deleted_rows: u64,
|
||||
}
|
||||
|
||||
#[pymethods]
|
||||
impl MergeResult {
|
||||
pub fn __repr__(&self) -> String {
|
||||
format!(
|
||||
"MergeResult(version={}, num_updated_rows={}, num_inserted_rows={}, num_deleted_rows={})",
|
||||
self.version,
|
||||
self.num_updated_rows,
|
||||
self.num_inserted_rows,
|
||||
self.num_deleted_rows
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<lancedb::table::MergeResult> for MergeResult {
|
||||
fn from(result: lancedb::table::MergeResult) -> Self {
|
||||
Self {
|
||||
version: result.version,
|
||||
num_updated_rows: result.num_updated_rows,
|
||||
num_inserted_rows: result.num_inserted_rows,
|
||||
num_deleted_rows: result.num_deleted_rows,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[pyclass(get_all)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct AddColumnsResult {
|
||||
pub version: u64,
|
||||
}
|
||||
|
||||
#[pymethods]
|
||||
impl AddColumnsResult {
|
||||
pub fn __repr__(&self) -> String {
|
||||
format!("AddColumnsResult(version={})", self.version)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<lancedb::table::AddColumnsResult> for AddColumnsResult {
|
||||
fn from(result: lancedb::table::AddColumnsResult) -> Self {
|
||||
Self {
|
||||
version: result.version,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[pyclass(get_all)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct AlterColumnsResult {
|
||||
pub version: u64,
|
||||
}
|
||||
|
||||
#[pymethods]
|
||||
impl AlterColumnsResult {
|
||||
pub fn __repr__(&self) -> String {
|
||||
format!("AlterColumnsResult(version={})", self.version)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<lancedb::table::AlterColumnsResult> for AlterColumnsResult {
|
||||
fn from(result: lancedb::table::AlterColumnsResult) -> Self {
|
||||
Self {
|
||||
version: result.version,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[pyclass(get_all)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct DropColumnsResult {
|
||||
pub version: u64,
|
||||
}
|
||||
|
||||
#[pymethods]
|
||||
impl DropColumnsResult {
|
||||
pub fn __repr__(&self) -> String {
|
||||
format!("DropColumnsResult(version={})", self.version)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<lancedb::table::DropColumnsResult> for DropColumnsResult {
|
||||
fn from(result: lancedb::table::DropColumnsResult) -> Self {
|
||||
Self {
|
||||
version: result.version,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[pyclass]
|
||||
pub struct Table {
|
||||
// We keep a copy of the name to use if the inner table is dropped
|
||||
@@ -132,15 +296,16 @@ impl Table {
|
||||
}
|
||||
|
||||
future_into_py(self_.py(), async move {
|
||||
op.execute().await.infer_error()?;
|
||||
Ok(())
|
||||
let result = op.execute().await.infer_error()?;
|
||||
Ok(AddResult::from(result))
|
||||
})
|
||||
}
|
||||
|
||||
pub fn delete(self_: PyRef<'_, Self>, condition: String) -> PyResult<Bound<'_, PyAny>> {
|
||||
let inner = self_.inner_ref()?.clone();
|
||||
future_into_py(self_.py(), async move {
|
||||
inner.delete(&condition).await.infer_error()
|
||||
let result = inner.delete(&condition).await.infer_error()?;
|
||||
Ok(DeleteResult::from(result))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -160,8 +325,8 @@ impl Table {
|
||||
op = op.column(column_name, value);
|
||||
}
|
||||
future_into_py(self_.py(), async move {
|
||||
op.execute().await.infer_error()?;
|
||||
Ok(())
|
||||
let result = op.execute().await.infer_error()?;
|
||||
Ok(UpdateResult::from(result))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -355,25 +520,15 @@ impl Table {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn checkout(self_: PyRef<'_, Self>, version: PyObject) -> PyResult<Bound<'_, PyAny>> {
|
||||
pub fn checkout(self_: PyRef<'_, Self>, version: LanceVersion) -> PyResult<Bound<'_, PyAny>> {
|
||||
let inner = self_.inner_ref()?.clone();
|
||||
let py = self_.py();
|
||||
let (is_int, int_value, string_value) = if let Ok(i) = version.downcast_bound::<PyInt>(py) {
|
||||
let num: u64 = i.extract()?;
|
||||
(true, num, String::new())
|
||||
} else if let Ok(s) = version.downcast_bound::<PyString>(py) {
|
||||
let str_value = s.to_string();
|
||||
(false, 0, str_value)
|
||||
} else {
|
||||
return Err(PyIOError::new_err(
|
||||
"version must be an integer or a string.",
|
||||
));
|
||||
};
|
||||
future_into_py(py, async move {
|
||||
if is_int {
|
||||
inner.checkout(int_value).await.infer_error()
|
||||
} else {
|
||||
inner.checkout_tag(&string_value).await.infer_error()
|
||||
match version {
|
||||
LanceVersion::Version(version_num) => {
|
||||
inner.checkout(version_num).await.infer_error()
|
||||
}
|
||||
LanceVersion::Tag(tag) => inner.checkout_tag(&tag).await.infer_error(),
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -386,12 +541,19 @@ impl Table {
|
||||
}
|
||||
|
||||
#[pyo3(signature = (version=None))]
|
||||
pub fn restore(self_: PyRef<'_, Self>, version: Option<u64>) -> PyResult<Bound<'_, PyAny>> {
|
||||
pub fn restore(
|
||||
self_: PyRef<'_, Self>,
|
||||
version: Option<LanceVersion>,
|
||||
) -> PyResult<Bound<'_, PyAny>> {
|
||||
let inner = self_.inner_ref()?.clone();
|
||||
let py = self_.py();
|
||||
|
||||
future_into_py(self_.py(), async move {
|
||||
future_into_py(py, async move {
|
||||
if let Some(version) = version {
|
||||
inner.checkout(version).await.infer_error()?;
|
||||
match version {
|
||||
LanceVersion::Version(num) => inner.checkout(num).await.infer_error()?,
|
||||
LanceVersion::Tag(tag) => inner.checkout_tag(&tag).await.infer_error()?,
|
||||
}
|
||||
}
|
||||
inner.restore().await.infer_error()
|
||||
})
|
||||
@@ -487,16 +649,13 @@ impl Table {
|
||||
builder
|
||||
.when_not_matched_by_source_delete(parameters.when_not_matched_by_source_condition);
|
||||
}
|
||||
if let Some(timeout) = parameters.timeout {
|
||||
builder.timeout(timeout);
|
||||
}
|
||||
|
||||
future_into_py(self_.py(), async move {
|
||||
let stats = builder.execute(Box::new(batches)).await.infer_error()?;
|
||||
Python::with_gil(|py| {
|
||||
let dict = PyDict::new(py);
|
||||
dict.set_item("num_inserted_rows", stats.num_inserted_rows)?;
|
||||
dict.set_item("num_updated_rows", stats.num_updated_rows)?;
|
||||
dict.set_item("num_deleted_rows", stats.num_deleted_rows)?;
|
||||
Ok(dict.unbind())
|
||||
})
|
||||
let res = builder.execute(Box::new(batches)).await.infer_error()?;
|
||||
Ok(MergeResult::from(res))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -532,8 +691,8 @@ impl Table {
|
||||
|
||||
let inner = self_.inner_ref()?.clone();
|
||||
future_into_py(self_.py(), async move {
|
||||
inner.add_columns(definitions, None).await.infer_error()?;
|
||||
Ok(())
|
||||
let result = inner.add_columns(definitions, None).await.infer_error()?;
|
||||
Ok(AddColumnsResult::from(result))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -546,8 +705,8 @@ impl Table {
|
||||
|
||||
let inner = self_.inner_ref()?.clone();
|
||||
future_into_py(self_.py(), async move {
|
||||
inner.add_columns(transform, None).await.infer_error()?;
|
||||
Ok(())
|
||||
let result = inner.add_columns(transform, None).await.infer_error()?;
|
||||
Ok(AddColumnsResult::from(result))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -590,8 +749,8 @@ impl Table {
|
||||
|
||||
let inner = self_.inner_ref()?.clone();
|
||||
future_into_py(self_.py(), async move {
|
||||
inner.alter_columns(&alterations).await.infer_error()?;
|
||||
Ok(())
|
||||
let result = inner.alter_columns(&alterations).await.infer_error()?;
|
||||
Ok(AlterColumnsResult::from(result))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -599,8 +758,8 @@ impl Table {
|
||||
let inner = self_.inner_ref()?.clone();
|
||||
future_into_py(self_.py(), async move {
|
||||
let column_refs = columns.iter().map(String::as_str).collect::<Vec<&str>>();
|
||||
inner.drop_columns(&column_refs).await.infer_error()?;
|
||||
Ok(())
|
||||
let result = inner.drop_columns(&column_refs).await.infer_error()?;
|
||||
Ok(DropColumnsResult::from(result))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -636,6 +795,12 @@ impl Table {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(FromPyObject)]
|
||||
pub enum LanceVersion {
|
||||
Version(u64),
|
||||
Tag(String),
|
||||
}
|
||||
|
||||
#[derive(FromPyObject)]
|
||||
#[pyo3(from_item_all)]
|
||||
pub struct MergeInsertParams {
|
||||
@@ -645,6 +810,7 @@ pub struct MergeInsertParams {
|
||||
when_not_matched_insert_all: bool,
|
||||
when_not_matched_by_source_delete: bool,
|
||||
when_not_matched_by_source_condition: Option<String>,
|
||||
timeout: Option<std::time::Duration>,
|
||||
}
|
||||
|
||||
#[pyclass]
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "lancedb-node"
|
||||
version = "0.19.1-beta.1"
|
||||
version = "0.19.1-beta.4"
|
||||
description = "Serverless, low-latency vector database for AI applications"
|
||||
license.workspace = true
|
||||
edition.workspace = true
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "lancedb"
|
||||
version = "0.19.1-beta.1"
|
||||
version = "0.19.1-beta.4"
|
||||
edition.workspace = true
|
||||
description = "LanceDB: A serverless, low-latency vector database for AI applications"
|
||||
license.workspace = true
|
||||
|
||||
@@ -4,7 +4,14 @@
|
||||
use crate::index::Index;
|
||||
use crate::index::IndexStatistics;
|
||||
use crate::query::{QueryFilter, QueryRequest, Select, VectorQueryRequest};
|
||||
use crate::table::AddColumnsResult;
|
||||
use crate::table::AddResult;
|
||||
use crate::table::AlterColumnsResult;
|
||||
use crate::table::DeleteResult;
|
||||
use crate::table::DropColumnsResult;
|
||||
use crate::table::MergeResult;
|
||||
use crate::table::Tags;
|
||||
use crate::table::UpdateResult;
|
||||
use crate::table::{AddDataMode, AnyQuery, Filter, TableStatistics};
|
||||
use crate::utils::{supported_btree_data_type, supported_vector_data_type};
|
||||
use crate::{DistanceType, Error, Table};
|
||||
@@ -47,7 +54,6 @@ use crate::{
|
||||
TableDefinition, UpdateBuilder,
|
||||
},
|
||||
};
|
||||
use lance::dataset::MergeStats;
|
||||
|
||||
const REQUEST_TIMEOUT_HEADER: HeaderName = HeaderName::from_static("x-request-timeout-ms");
|
||||
|
||||
@@ -735,7 +741,7 @@ impl<S: HttpSend> BaseTable for RemoteTable<S> {
|
||||
&self,
|
||||
add: AddDataBuilder<NoData>,
|
||||
data: Box<dyn RecordBatchReader + Send>,
|
||||
) -> Result<()> {
|
||||
) -> Result<AddResult> {
|
||||
self.check_mutable().await?;
|
||||
let mut request = self
|
||||
.client
|
||||
@@ -750,9 +756,19 @@ impl<S: HttpSend> BaseTable for RemoteTable<S> {
|
||||
}
|
||||
|
||||
let (request_id, response) = self.send_streaming(request, data, true).await?;
|
||||
self.check_table_response(&request_id, response).await?;
|
||||
let response = self.check_table_response(&request_id, response).await?;
|
||||
let body = response.text().await.err_to_http(request_id.clone())?;
|
||||
if body.trim().is_empty() {
|
||||
// Backward compatible with old servers
|
||||
return Ok(AddResult { version: 0 });
|
||||
}
|
||||
|
||||
Ok(())
|
||||
let add_response: AddResult = serde_json::from_str(&body).map_err(|e| Error::Http {
|
||||
source: format!("Failed to parse add response: {}", e).into(),
|
||||
request_id,
|
||||
status_code: None,
|
||||
})?;
|
||||
Ok(add_response)
|
||||
}
|
||||
|
||||
async fn create_plan(
|
||||
@@ -885,7 +901,7 @@ impl<S: HttpSend> BaseTable for RemoteTable<S> {
|
||||
Ok(final_analyze)
|
||||
}
|
||||
|
||||
async fn update(&self, update: UpdateBuilder) -> Result<u64> {
|
||||
async fn update(&self, update: UpdateBuilder) -> Result<UpdateResult> {
|
||||
self.check_mutable().await?;
|
||||
let request = self
|
||||
.client
|
||||
@@ -902,13 +918,28 @@ impl<S: HttpSend> BaseTable for RemoteTable<S> {
|
||||
}));
|
||||
|
||||
let (request_id, response) = self.send(request, true).await?;
|
||||
let response = self.check_table_response(&request_id, response).await?;
|
||||
let body = response.text().await.err_to_http(request_id.clone())?;
|
||||
|
||||
self.check_table_response(&request_id, response).await?;
|
||||
if body.trim().is_empty() {
|
||||
// Backward compatible with old servers
|
||||
return Ok(UpdateResult {
|
||||
rows_updated: 0,
|
||||
version: 0,
|
||||
});
|
||||
}
|
||||
|
||||
Ok(0) // TODO: support returning number of modified rows once supported in SaaS.
|
||||
let update_response: UpdateResult =
|
||||
serde_json::from_str(&body).map_err(|e| Error::Http {
|
||||
source: format!("Failed to parse update response: {}", e).into(),
|
||||
request_id,
|
||||
status_code: None,
|
||||
})?;
|
||||
|
||||
Ok(update_response)
|
||||
}
|
||||
|
||||
async fn delete(&self, predicate: &str) -> Result<()> {
|
||||
async fn delete(&self, predicate: &str) -> Result<DeleteResult> {
|
||||
self.check_mutable().await?;
|
||||
let body = serde_json::json!({ "predicate": predicate });
|
||||
let request = self
|
||||
@@ -916,8 +947,19 @@ impl<S: HttpSend> BaseTable for RemoteTable<S> {
|
||||
.post(&format!("/v1/table/{}/delete/", self.name))
|
||||
.json(&body);
|
||||
let (request_id, response) = self.send(request, true).await?;
|
||||
self.check_table_response(&request_id, response).await?;
|
||||
Ok(())
|
||||
let response = self.check_table_response(&request_id, response).await?;
|
||||
let body = response.text().await.err_to_http(request_id.clone())?;
|
||||
if body.trim().is_empty() {
|
||||
// Backward compatible with old servers
|
||||
return Ok(DeleteResult { version: 0 });
|
||||
}
|
||||
let delete_response: DeleteResult =
|
||||
serde_json::from_str(&body).map_err(|e| Error::Http {
|
||||
source: format!("Failed to parse delete response: {}", e).into(),
|
||||
request_id,
|
||||
status_code: None,
|
||||
})?;
|
||||
Ok(delete_response)
|
||||
}
|
||||
|
||||
async fn create_index(&self, mut index: IndexBuilder) -> Result<()> {
|
||||
@@ -1023,23 +1065,48 @@ impl<S: HttpSend> BaseTable for RemoteTable<S> {
|
||||
&self,
|
||||
params: MergeInsertBuilder,
|
||||
new_data: Box<dyn RecordBatchReader + Send>,
|
||||
) -> Result<MergeStats> {
|
||||
) -> Result<MergeResult> {
|
||||
self.check_mutable().await?;
|
||||
|
||||
let timeout = params.timeout;
|
||||
|
||||
let query = MergeInsertRequest::try_from(params)?;
|
||||
let request = self
|
||||
let mut request = self
|
||||
.client
|
||||
.post(&format!("/v1/table/{}/merge_insert/", self.name))
|
||||
.query(&query)
|
||||
.header(CONTENT_TYPE, ARROW_STREAM_CONTENT_TYPE);
|
||||
|
||||
if let Some(timeout) = timeout {
|
||||
// (If it doesn't fit into u64, it's not worth sending anyways.)
|
||||
if let Ok(timeout_ms) = u64::try_from(timeout.as_millis()) {
|
||||
request = request.header(REQUEST_TIMEOUT_HEADER, timeout_ms);
|
||||
}
|
||||
}
|
||||
|
||||
let (request_id, response) = self.send_streaming(request, new_data, true).await?;
|
||||
|
||||
// TODO: server can response with these stats in response body.
|
||||
// We should test that we can handle both empty response from old server
|
||||
// and response with stats from new server.
|
||||
self.check_table_response(&request_id, response).await?;
|
||||
Ok(MergeStats::default())
|
||||
let response = self.check_table_response(&request_id, response).await?;
|
||||
let body = response.text().await.err_to_http(request_id.clone())?;
|
||||
|
||||
if body.trim().is_empty() {
|
||||
// Backward compatible with old servers
|
||||
return Ok(MergeResult {
|
||||
version: 0,
|
||||
num_deleted_rows: 0,
|
||||
num_inserted_rows: 0,
|
||||
num_updated_rows: 0,
|
||||
});
|
||||
}
|
||||
|
||||
let merge_insert_response: MergeResult =
|
||||
serde_json::from_str(&body).map_err(|e| Error::Http {
|
||||
source: format!("Failed to parse merge_insert response: {}", e).into(),
|
||||
request_id,
|
||||
status_code: None,
|
||||
})?;
|
||||
|
||||
Ok(merge_insert_response)
|
||||
}
|
||||
|
||||
async fn tags(&self) -> Result<Box<dyn Tags + '_>> {
|
||||
@@ -1062,7 +1129,7 @@ impl<S: HttpSend> BaseTable for RemoteTable<S> {
|
||||
&self,
|
||||
transforms: NewColumnTransform,
|
||||
_read_columns: Option<Vec<String>>,
|
||||
) -> Result<()> {
|
||||
) -> Result<AddColumnsResult> {
|
||||
self.check_mutable().await?;
|
||||
match transforms {
|
||||
NewColumnTransform::SqlExpressions(expressions) => {
|
||||
@@ -1080,9 +1147,23 @@ impl<S: HttpSend> BaseTable for RemoteTable<S> {
|
||||
.client
|
||||
.post(&format!("/v1/table/{}/add_columns/", self.name))
|
||||
.json(&body);
|
||||
let (request_id, response) = self.send(request, true).await?; // todo:
|
||||
self.check_table_response(&request_id, response).await?;
|
||||
Ok(())
|
||||
let (request_id, response) = self.send(request, true).await?;
|
||||
let response = self.check_table_response(&request_id, response).await?;
|
||||
let body = response.text().await.err_to_http(request_id.clone())?;
|
||||
|
||||
if body.trim().is_empty() {
|
||||
// Backward compatible with old servers
|
||||
return Ok(AddColumnsResult { version: 0 });
|
||||
}
|
||||
|
||||
let result: AddColumnsResult =
|
||||
serde_json::from_str(&body).map_err(|e| Error::Http {
|
||||
source: format!("Failed to parse add_columns response: {}", e).into(),
|
||||
request_id,
|
||||
status_code: None,
|
||||
})?;
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
_ => {
|
||||
return Err(Error::NotSupported {
|
||||
@@ -1092,7 +1173,7 @@ impl<S: HttpSend> BaseTable for RemoteTable<S> {
|
||||
}
|
||||
}
|
||||
|
||||
async fn alter_columns(&self, alterations: &[ColumnAlteration]) -> Result<()> {
|
||||
async fn alter_columns(&self, alterations: &[ColumnAlteration]) -> Result<AlterColumnsResult> {
|
||||
self.check_mutable().await?;
|
||||
let body = alterations
|
||||
.iter()
|
||||
@@ -1120,11 +1201,24 @@ impl<S: HttpSend> BaseTable for RemoteTable<S> {
|
||||
.post(&format!("/v1/table/{}/alter_columns/", self.name))
|
||||
.json(&body);
|
||||
let (request_id, response) = self.send(request, true).await?;
|
||||
self.check_table_response(&request_id, response).await?;
|
||||
Ok(())
|
||||
let response = self.check_table_response(&request_id, response).await?;
|
||||
let body = response.text().await.err_to_http(request_id.clone())?;
|
||||
|
||||
if body.trim().is_empty() {
|
||||
// Backward compatible with old servers
|
||||
return Ok(AlterColumnsResult { version: 0 });
|
||||
}
|
||||
|
||||
let result: AlterColumnsResult = serde_json::from_str(&body).map_err(|e| Error::Http {
|
||||
source: format!("Failed to parse alter_columns response: {}", e).into(),
|
||||
request_id,
|
||||
status_code: None,
|
||||
})?;
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
async fn drop_columns(&self, columns: &[&str]) -> Result<()> {
|
||||
async fn drop_columns(&self, columns: &[&str]) -> Result<DropColumnsResult> {
|
||||
self.check_mutable().await?;
|
||||
let body = serde_json::json!({ "columns": columns });
|
||||
let request = self
|
||||
@@ -1132,8 +1226,21 @@ impl<S: HttpSend> BaseTable for RemoteTable<S> {
|
||||
.post(&format!("/v1/table/{}/drop_columns/", self.name))
|
||||
.json(&body);
|
||||
let (request_id, response) = self.send(request, true).await?;
|
||||
self.check_table_response(&request_id, response).await?;
|
||||
Ok(())
|
||||
let response = self.check_table_response(&request_id, response).await?;
|
||||
let body = response.text().await.err_to_http(request_id.clone())?;
|
||||
|
||||
if body.trim().is_empty() {
|
||||
// Backward compatible with old servers
|
||||
return Ok(DropColumnsResult { version: 0 });
|
||||
}
|
||||
|
||||
let result: DropColumnsResult = serde_json::from_str(&body).map_err(|e| Error::Http {
|
||||
source: format!("Failed to parse drop_columns response: {}", e).into(),
|
||||
request_id,
|
||||
status_code: None,
|
||||
})?;
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
async fn list_indices(&self) -> Result<Vec<IndexConfig>> {
|
||||
@@ -1227,7 +1334,12 @@ impl<S: HttpSend> BaseTable for RemoteTable<S> {
|
||||
self.name, index_name
|
||||
));
|
||||
let (request_id, response) = self.send(request, true).await?;
|
||||
self.check_table_response(&request_id, response).await?;
|
||||
if response.status() == StatusCode::NOT_FOUND {
|
||||
return Err(Error::IndexNotFound {
|
||||
name: index_name.to_string(),
|
||||
});
|
||||
};
|
||||
self.client.check_response(&request_id, response).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -1357,16 +1469,20 @@ mod tests {
|
||||
.execute(example_data())
|
||||
.map_ok(|_| ()),
|
||||
),
|
||||
Box::pin(table.delete("false")),
|
||||
Box::pin(table.add_columns(
|
||||
NewColumnTransform::SqlExpressions(vec![("x".into(), "y".into())]),
|
||||
None,
|
||||
)),
|
||||
Box::pin(table.delete("false").map_ok(|_| ())),
|
||||
Box::pin(
|
||||
table
|
||||
.add_columns(
|
||||
NewColumnTransform::SqlExpressions(vec![("x".into(), "y".into())]),
|
||||
None,
|
||||
)
|
||||
.map_ok(|_| ()),
|
||||
),
|
||||
Box::pin(async {
|
||||
let alterations = vec![ColumnAlteration::new("x".into()).rename("y".into())];
|
||||
table.alter_columns(&alterations).await
|
||||
table.alter_columns(&alterations).await.map(|_| ())
|
||||
}),
|
||||
Box::pin(table.drop_columns(&["a"])),
|
||||
Box::pin(table.drop_columns(&["a"]).map_ok(|_| ())),
|
||||
// TODO: other endpoints.
|
||||
];
|
||||
|
||||
@@ -1497,52 +1613,66 @@ mod tests {
|
||||
body
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[case("", 0)]
|
||||
#[case("{}", 0)]
|
||||
#[case(r#"{"request_id": "test-request-id"}"#, 0)]
|
||||
#[case(r#"{"version": 43}"#, 43)]
|
||||
#[tokio::test]
|
||||
async fn test_add_append() {
|
||||
async fn test_add_append(#[case] response_body: &str, #[case] expected_version: u64) {
|
||||
let data = RecordBatch::try_new(
|
||||
Arc::new(Schema::new(vec![Field::new("a", DataType::Int32, false)])),
|
||||
vec![Arc::new(Int32Array::from(vec![1, 2, 3]))],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// Clone response_body to give it 'static lifetime for the closure
|
||||
let response_body = response_body.to_string();
|
||||
|
||||
let (sender, receiver) = std::sync::mpsc::channel();
|
||||
let table = Table::new_with_handler("my_table", move |mut request| {
|
||||
assert_eq!(request.method(), "POST");
|
||||
assert_eq!(request.url().path(), "/v1/table/my_table/insert/");
|
||||
// If mode is specified, it should be "append". Append is default
|
||||
// so it's not required.
|
||||
assert!(request
|
||||
.url()
|
||||
.query_pairs()
|
||||
.filter(|(k, _)| k == "mode")
|
||||
.all(|(_, v)| v == "append"));
|
||||
|
||||
assert_eq!(
|
||||
request.headers().get("Content-Type").unwrap(),
|
||||
ARROW_STREAM_CONTENT_TYPE
|
||||
);
|
||||
|
||||
let mut body_out = reqwest::Body::from(Vec::new());
|
||||
std::mem::swap(request.body_mut().as_mut().unwrap(), &mut body_out);
|
||||
sender.send(body_out).unwrap();
|
||||
|
||||
http::Response::builder().status(200).body("").unwrap()
|
||||
if request.url().path() == "/v1/table/my_table/insert/" {
|
||||
assert_eq!(request.method(), "POST");
|
||||
assert!(request
|
||||
.url()
|
||||
.query_pairs()
|
||||
.filter(|(k, _)| k == "mode")
|
||||
.all(|(_, v)| v == "append"));
|
||||
assert_eq!(
|
||||
request.headers().get("Content-Type").unwrap(),
|
||||
ARROW_STREAM_CONTENT_TYPE
|
||||
);
|
||||
let mut body_out = reqwest::Body::from(Vec::new());
|
||||
std::mem::swap(request.body_mut().as_mut().unwrap(), &mut body_out);
|
||||
sender.send(body_out).unwrap();
|
||||
http::Response::builder()
|
||||
.status(200)
|
||||
.body(response_body.clone())
|
||||
.unwrap()
|
||||
} else {
|
||||
panic!("Unexpected request path: {}", request.url().path());
|
||||
}
|
||||
});
|
||||
|
||||
table
|
||||
let result = table
|
||||
.add(RecordBatchIterator::new([Ok(data.clone())], data.schema()))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Check version matches expected value
|
||||
assert_eq!(result.version, expected_version);
|
||||
|
||||
let body = receiver.recv().unwrap();
|
||||
let body = collect_body(body).await;
|
||||
let expected_body = write_ipc_stream(&data);
|
||||
assert_eq!(&body, &expected_body);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[case(true)]
|
||||
#[case(false)]
|
||||
#[tokio::test]
|
||||
async fn test_add_overwrite() {
|
||||
async fn test_add_overwrite(#[case] old_server: bool) {
|
||||
let data = RecordBatch::try_new(
|
||||
Arc::new(Schema::new(vec![Field::new("a", DataType::Int32, false)])),
|
||||
vec![Arc::new(Int32Array::from(vec![1, 2, 3]))],
|
||||
@@ -1573,56 +1703,78 @@ mod tests {
|
||||
std::mem::swap(request.body_mut().as_mut().unwrap(), &mut body_out);
|
||||
sender.send(body_out).unwrap();
|
||||
|
||||
http::Response::builder().status(200).body("").unwrap()
|
||||
if old_server {
|
||||
http::Response::builder().status(200).body("").unwrap()
|
||||
} else {
|
||||
http::Response::builder()
|
||||
.status(200)
|
||||
.body(r#"{"version": 43}"#)
|
||||
.unwrap()
|
||||
}
|
||||
});
|
||||
|
||||
table
|
||||
let result = table
|
||||
.add(RecordBatchIterator::new([Ok(data.clone())], data.schema()))
|
||||
.mode(AddDataMode::Overwrite)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(result.version, if old_server { 0 } else { 43 });
|
||||
|
||||
let body = receiver.recv().unwrap();
|
||||
let body = collect_body(body).await;
|
||||
let expected_body = write_ipc_stream(&data);
|
||||
assert_eq!(&body, &expected_body);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[case(true)]
|
||||
#[case(false)]
|
||||
#[tokio::test]
|
||||
async fn test_update() {
|
||||
let table = Table::new_with_handler("my_table", |request| {
|
||||
assert_eq!(request.method(), "POST");
|
||||
assert_eq!(request.url().path(), "/v1/table/my_table/update/");
|
||||
assert_eq!(
|
||||
request.headers().get("Content-Type").unwrap(),
|
||||
JSON_CONTENT_TYPE
|
||||
);
|
||||
async fn test_update(#[case] old_server: bool) {
|
||||
let table = Table::new_with_handler("my_table", move |request| {
|
||||
if request.url().path() == "/v1/table/my_table/update/" {
|
||||
assert_eq!(request.method(), "POST");
|
||||
assert_eq!(
|
||||
request.headers().get("Content-Type").unwrap(),
|
||||
JSON_CONTENT_TYPE
|
||||
);
|
||||
|
||||
if let Some(body) = request.body().unwrap().as_bytes() {
|
||||
let body = std::str::from_utf8(body).unwrap();
|
||||
let value: serde_json::Value = serde_json::from_str(body).unwrap();
|
||||
let updates = value.get("updates").unwrap().as_array().unwrap();
|
||||
assert!(updates.len() == 2);
|
||||
if let Some(body) = request.body().unwrap().as_bytes() {
|
||||
let body = std::str::from_utf8(body).unwrap();
|
||||
let value: serde_json::Value = serde_json::from_str(body).unwrap();
|
||||
let updates = value.get("updates").unwrap().as_array().unwrap();
|
||||
assert!(updates.len() == 2);
|
||||
|
||||
let col_name = updates[0][0].as_str().unwrap();
|
||||
let expression = updates[0][1].as_str().unwrap();
|
||||
assert_eq!(col_name, "a");
|
||||
assert_eq!(expression, "a + 1");
|
||||
let col_name = updates[0][0].as_str().unwrap();
|
||||
let expression = updates[0][1].as_str().unwrap();
|
||||
assert_eq!(col_name, "a");
|
||||
assert_eq!(expression, "a + 1");
|
||||
|
||||
let col_name = updates[1][0].as_str().unwrap();
|
||||
let expression = updates[1][1].as_str().unwrap();
|
||||
assert_eq!(col_name, "b");
|
||||
assert_eq!(expression, "b - 1");
|
||||
let col_name = updates[1][0].as_str().unwrap();
|
||||
let expression = updates[1][1].as_str().unwrap();
|
||||
assert_eq!(col_name, "b");
|
||||
assert_eq!(expression, "b - 1");
|
||||
|
||||
let only_if = value.get("predicate").unwrap().as_str().unwrap();
|
||||
assert_eq!(only_if, "b > 10");
|
||||
let only_if = value.get("predicate").unwrap().as_str().unwrap();
|
||||
assert_eq!(only_if, "b > 10");
|
||||
}
|
||||
|
||||
if old_server {
|
||||
http::Response::builder().status(200).body("").unwrap()
|
||||
} else {
|
||||
http::Response::builder()
|
||||
.status(200)
|
||||
.body(r#"{"rows_updated": 5, "version": 43}"#)
|
||||
.unwrap()
|
||||
}
|
||||
} else {
|
||||
panic!("Unexpected request path: {}", request.url().path());
|
||||
}
|
||||
|
||||
http::Response::builder().status(200).body("{}").unwrap()
|
||||
});
|
||||
|
||||
table
|
||||
let result = table
|
||||
.update()
|
||||
.column("a", "a + 1")
|
||||
.column("b", "b - 1")
|
||||
@@ -1630,10 +1782,73 @@ mod tests {
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(result.version, if old_server { 0 } else { 43 });
|
||||
assert_eq!(result.rows_updated, if old_server { 0 } else { 5 });
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[case(true)]
|
||||
#[case(false)]
|
||||
#[tokio::test]
|
||||
async fn test_merge_insert() {
|
||||
async fn test_alter_columns(#[case] old_server: bool) {
|
||||
let table = Table::new_with_handler("my_table", move |request| {
|
||||
if request.url().path() == "/v1/table/my_table/alter_columns/" {
|
||||
assert_eq!(request.method(), "POST");
|
||||
assert_eq!(
|
||||
request.headers().get("Content-Type").unwrap(),
|
||||
JSON_CONTENT_TYPE
|
||||
);
|
||||
|
||||
let body = request.body().unwrap().as_bytes().unwrap();
|
||||
let body = std::str::from_utf8(body).unwrap();
|
||||
let value: serde_json::Value = serde_json::from_str(body).unwrap();
|
||||
let alterations = value.get("alterations").unwrap().as_array().unwrap();
|
||||
assert!(alterations.len() == 2);
|
||||
|
||||
let path = alterations[0]["path"].as_str().unwrap();
|
||||
let data_type = alterations[0]["data_type"]["type"].as_str().unwrap();
|
||||
assert_eq!(path, "b.c");
|
||||
assert_eq!(data_type, "int32");
|
||||
|
||||
let path = alterations[1]["path"].as_str().unwrap();
|
||||
let nullable = alterations[1]["nullable"].as_bool().unwrap();
|
||||
let rename = alterations[1]["rename"].as_str().unwrap();
|
||||
assert_eq!(path, "x");
|
||||
assert!(nullable);
|
||||
assert_eq!(rename, "y");
|
||||
|
||||
if old_server {
|
||||
http::Response::builder().status(200).body("{}").unwrap()
|
||||
} else {
|
||||
http::Response::builder()
|
||||
.status(200)
|
||||
.body(r#"{"version": 43}"#)
|
||||
.unwrap()
|
||||
}
|
||||
} else {
|
||||
panic!("Unexpected request path: {}", request.url().path());
|
||||
}
|
||||
});
|
||||
|
||||
let result = table
|
||||
.alter_columns(&[
|
||||
ColumnAlteration::new("b.c".into()).cast_to(DataType::Int32),
|
||||
ColumnAlteration::new("x".into())
|
||||
.rename("y".into())
|
||||
.set_nullable(true),
|
||||
])
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(result.version, if old_server { 0 } else { 43 });
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[case(true)]
|
||||
#[case(false)]
|
||||
#[tokio::test]
|
||||
async fn test_merge_insert(#[case] old_server: bool) {
|
||||
let batch = RecordBatch::try_new(
|
||||
Arc::new(Schema::new(vec![Field::new("a", DataType::Int32, false)])),
|
||||
vec![Arc::new(Int32Array::from(vec![1, 2, 3]))],
|
||||
@@ -1644,66 +1859,43 @@ mod tests {
|
||||
batch.schema(),
|
||||
));
|
||||
|
||||
// Default parameters
|
||||
let table = Table::new_with_handler("my_table", |request| {
|
||||
assert_eq!(request.method(), "POST");
|
||||
assert_eq!(request.url().path(), "/v1/table/my_table/merge_insert/");
|
||||
let table = Table::new_with_handler("my_table", move |request| {
|
||||
if request.url().path() == "/v1/table/my_table/merge_insert/" {
|
||||
assert_eq!(request.method(), "POST");
|
||||
|
||||
let params = request.url().query_pairs().collect::<HashMap<_, _>>();
|
||||
assert_eq!(params["on"], "some_col");
|
||||
assert_eq!(params["when_matched_update_all"], "false");
|
||||
assert_eq!(params["when_not_matched_insert_all"], "false");
|
||||
assert_eq!(params["when_not_matched_by_source_delete"], "false");
|
||||
assert!(!params.contains_key("when_matched_update_all_filt"));
|
||||
assert!(!params.contains_key("when_not_matched_by_source_delete_filt"));
|
||||
let params = request.url().query_pairs().collect::<HashMap<_, _>>();
|
||||
assert_eq!(params["on"], "some_col");
|
||||
assert_eq!(params["when_matched_update_all"], "false");
|
||||
assert_eq!(params["when_not_matched_insert_all"], "false");
|
||||
assert_eq!(params["when_not_matched_by_source_delete"], "false");
|
||||
assert!(!params.contains_key("when_matched_update_all_filt"));
|
||||
assert!(!params.contains_key("when_not_matched_by_source_delete_filt"));
|
||||
|
||||
http::Response::builder().status(200).body("").unwrap()
|
||||
if old_server {
|
||||
http::Response::builder().status(200).body("{}").unwrap()
|
||||
} else {
|
||||
http::Response::builder()
|
||||
.status(200)
|
||||
.body(r#"{"version": 43, "num_deleted_rows": 0, "num_inserted_rows": 3, "num_updated_rows": 0}"#)
|
||||
.unwrap()
|
||||
}
|
||||
} else {
|
||||
panic!("Unexpected request path: {}", request.url().path());
|
||||
}
|
||||
});
|
||||
|
||||
table
|
||||
let result = table
|
||||
.merge_insert(&["some_col"])
|
||||
.execute(data)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// All parameters specified
|
||||
let (sender, receiver) = std::sync::mpsc::channel();
|
||||
let table = Table::new_with_handler("my_table", move |mut request| {
|
||||
assert_eq!(request.method(), "POST");
|
||||
assert_eq!(request.url().path(), "/v1/table/my_table/merge_insert/");
|
||||
assert_eq!(
|
||||
request.headers().get("Content-Type").unwrap(),
|
||||
ARROW_STREAM_CONTENT_TYPE
|
||||
);
|
||||
|
||||
let params = request.url().query_pairs().collect::<HashMap<_, _>>();
|
||||
assert_eq!(params["on"], "some_col");
|
||||
assert_eq!(params["when_matched_update_all"], "true");
|
||||
assert_eq!(params["when_not_matched_insert_all"], "false");
|
||||
assert_eq!(params["when_not_matched_by_source_delete"], "true");
|
||||
assert_eq!(params["when_matched_update_all_filt"], "a = 1");
|
||||
assert_eq!(params["when_not_matched_by_source_delete_filt"], "b = 2");
|
||||
|
||||
let mut body_out = reqwest::Body::from(Vec::new());
|
||||
std::mem::swap(request.body_mut().as_mut().unwrap(), &mut body_out);
|
||||
sender.send(body_out).unwrap();
|
||||
|
||||
http::Response::builder().status(200).body("").unwrap()
|
||||
});
|
||||
let mut builder = table.merge_insert(&["some_col"]);
|
||||
builder
|
||||
.when_matched_update_all(Some("a = 1".into()))
|
||||
.when_not_matched_by_source_delete(Some("b = 2".into()));
|
||||
let data = Box::new(RecordBatchIterator::new(
|
||||
[Ok(batch.clone())],
|
||||
batch.schema(),
|
||||
));
|
||||
builder.execute(data).await.unwrap();
|
||||
|
||||
let body = receiver.recv().unwrap();
|
||||
let body = collect_body(body).await;
|
||||
let expected_body = write_ipc_stream(&batch);
|
||||
assert_eq!(&body, &expected_body);
|
||||
assert_eq!(result.version, if old_server { 0 } else { 43 });
|
||||
if !old_server {
|
||||
assert_eq!(result.num_deleted_rows, 0);
|
||||
assert_eq!(result.num_inserted_rows, 3);
|
||||
assert_eq!(result.num_updated_rows, 0);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -1742,25 +1934,80 @@ mod tests {
|
||||
assert!(e.to_string().contains("Hit retry limit"));
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[case(true)]
|
||||
#[case(false)]
|
||||
#[tokio::test]
|
||||
async fn test_delete() {
|
||||
let table = Table::new_with_handler("my_table", |request| {
|
||||
assert_eq!(request.method(), "POST");
|
||||
assert_eq!(request.url().path(), "/v1/table/my_table/delete/");
|
||||
assert_eq!(
|
||||
request.headers().get("Content-Type").unwrap(),
|
||||
JSON_CONTENT_TYPE
|
||||
);
|
||||
async fn test_delete(#[case] old_server: bool) {
|
||||
let table = Table::new_with_handler("my_table", move |request| {
|
||||
if request.url().path() == "/v1/table/my_table/delete/" {
|
||||
assert_eq!(request.method(), "POST");
|
||||
assert_eq!(
|
||||
request.headers().get("Content-Type").unwrap(),
|
||||
JSON_CONTENT_TYPE
|
||||
);
|
||||
|
||||
let body = request.body().unwrap().as_bytes().unwrap();
|
||||
let body: serde_json::Value = serde_json::from_slice(body).unwrap();
|
||||
let predicate = body.get("predicate").unwrap().as_str().unwrap();
|
||||
assert_eq!(predicate, "id in (1, 2, 3)");
|
||||
let body = request.body().unwrap().as_bytes().unwrap();
|
||||
let body: serde_json::Value = serde_json::from_slice(body).unwrap();
|
||||
let predicate = body.get("predicate").unwrap().as_str().unwrap();
|
||||
assert_eq!(predicate, "id in (1, 2, 3)");
|
||||
|
||||
http::Response::builder().status(200).body("").unwrap()
|
||||
if old_server {
|
||||
http::Response::builder().status(200).body("{}").unwrap()
|
||||
} else {
|
||||
http::Response::builder()
|
||||
.status(200)
|
||||
.body(r#"{"version": 43}"#)
|
||||
.unwrap()
|
||||
}
|
||||
} else {
|
||||
panic!("Unexpected request path: {}", request.url().path());
|
||||
}
|
||||
});
|
||||
|
||||
table.delete("id in (1, 2, 3)").await.unwrap();
|
||||
let result = table.delete("id in (1, 2, 3)").await.unwrap();
|
||||
assert_eq!(result.version, if old_server { 0 } else { 43 });
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[case(true)]
|
||||
#[case(false)]
|
||||
#[tokio::test]
|
||||
async fn test_drop_columns(#[case] old_server: bool) {
|
||||
let table = Table::new_with_handler("my_table", move |request| {
|
||||
if request.url().path() == "/v1/table/my_table/drop_columns/" {
|
||||
assert_eq!(request.method(), "POST");
|
||||
assert_eq!(
|
||||
request.headers().get("Content-Type").unwrap(),
|
||||
JSON_CONTENT_TYPE
|
||||
);
|
||||
|
||||
let body = request.body().unwrap().as_bytes().unwrap();
|
||||
let body = std::str::from_utf8(body).unwrap();
|
||||
let value: serde_json::Value = serde_json::from_str(body).unwrap();
|
||||
let columns = value.get("columns").unwrap().as_array().unwrap();
|
||||
assert!(columns.len() == 2);
|
||||
|
||||
let col1 = columns[0].as_str().unwrap();
|
||||
let col2 = columns[1].as_str().unwrap();
|
||||
assert_eq!(col1, "a");
|
||||
assert_eq!(col2, "b");
|
||||
|
||||
if old_server {
|
||||
http::Response::builder().status(200).body("{}").unwrap()
|
||||
} else {
|
||||
http::Response::builder()
|
||||
.status(200)
|
||||
.body(r#"{"version": 43}"#)
|
||||
.unwrap()
|
||||
}
|
||||
} else {
|
||||
panic!("Unexpected request path: {}", request.url().path());
|
||||
}
|
||||
});
|
||||
|
||||
let result = table.drop_columns(&["a", "b"]).await.unwrap();
|
||||
assert_eq!(result.version, if old_server { 0 } else { 43 });
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -2577,36 +2824,49 @@ mod tests {
|
||||
assert!(matches!(res, Err(Error::NotSupported { .. })));
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[case(true)]
|
||||
#[case(false)]
|
||||
#[tokio::test]
|
||||
async fn test_add_columns() {
|
||||
let table = Table::new_with_handler("my_table", |request| {
|
||||
assert_eq!(request.method(), "POST");
|
||||
assert_eq!(request.url().path(), "/v1/table/my_table/add_columns/");
|
||||
assert_eq!(
|
||||
request.headers().get("Content-Type").unwrap(),
|
||||
JSON_CONTENT_TYPE
|
||||
);
|
||||
async fn test_add_columns(#[case] old_server: bool) {
|
||||
let table = Table::new_with_handler("my_table", move |request| {
|
||||
if request.url().path() == "/v1/table/my_table/add_columns/" {
|
||||
assert_eq!(request.method(), "POST");
|
||||
assert_eq!(
|
||||
request.headers().get("Content-Type").unwrap(),
|
||||
JSON_CONTENT_TYPE
|
||||
);
|
||||
|
||||
let body = request.body().unwrap().as_bytes().unwrap();
|
||||
let body = std::str::from_utf8(body).unwrap();
|
||||
let value: serde_json::Value = serde_json::from_str(body).unwrap();
|
||||
let new_columns = value.get("new_columns").unwrap().as_array().unwrap();
|
||||
assert!(new_columns.len() == 2);
|
||||
let body = request.body().unwrap().as_bytes().unwrap();
|
||||
let body = std::str::from_utf8(body).unwrap();
|
||||
let value: serde_json::Value = serde_json::from_str(body).unwrap();
|
||||
let new_columns = value.get("new_columns").unwrap().as_array().unwrap();
|
||||
assert!(new_columns.len() == 2);
|
||||
|
||||
let col_name = new_columns[0]["name"].as_str().unwrap();
|
||||
let expression = new_columns[0]["expression"].as_str().unwrap();
|
||||
assert_eq!(col_name, "b");
|
||||
assert_eq!(expression, "a + 1");
|
||||
let col_name = new_columns[0]["name"].as_str().unwrap();
|
||||
let expression = new_columns[0]["expression"].as_str().unwrap();
|
||||
assert_eq!(col_name, "b");
|
||||
assert_eq!(expression, "a + 1");
|
||||
|
||||
let col_name = new_columns[1]["name"].as_str().unwrap();
|
||||
let expression = new_columns[1]["expression"].as_str().unwrap();
|
||||
assert_eq!(col_name, "x");
|
||||
assert_eq!(expression, "cast(NULL as int32)");
|
||||
let col_name = new_columns[1]["name"].as_str().unwrap();
|
||||
let expression = new_columns[1]["expression"].as_str().unwrap();
|
||||
assert_eq!(col_name, "x");
|
||||
assert_eq!(expression, "cast(NULL as int32)");
|
||||
|
||||
http::Response::builder().status(200).body("{}").unwrap()
|
||||
if old_server {
|
||||
http::Response::builder().status(200).body("{}").unwrap()
|
||||
} else {
|
||||
http::Response::builder()
|
||||
.status(200)
|
||||
.body(r#"{"version": 43}"#)
|
||||
.unwrap()
|
||||
}
|
||||
} else {
|
||||
panic!("Unexpected request path: {}", request.url().path());
|
||||
}
|
||||
});
|
||||
|
||||
table
|
||||
let result = table
|
||||
.add_columns(
|
||||
NewColumnTransform::SqlExpressions(vec![
|
||||
("b".into(), "a + 1".into()),
|
||||
@@ -2616,75 +2876,8 @@ mod tests {
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_alter_columns() {
|
||||
let table = Table::new_with_handler("my_table", |request| {
|
||||
assert_eq!(request.method(), "POST");
|
||||
assert_eq!(request.url().path(), "/v1/table/my_table/alter_columns/");
|
||||
assert_eq!(
|
||||
request.headers().get("Content-Type").unwrap(),
|
||||
JSON_CONTENT_TYPE
|
||||
);
|
||||
|
||||
let body = request.body().unwrap().as_bytes().unwrap();
|
||||
let body = std::str::from_utf8(body).unwrap();
|
||||
let value: serde_json::Value = serde_json::from_str(body).unwrap();
|
||||
let alterations = value.get("alterations").unwrap().as_array().unwrap();
|
||||
assert!(alterations.len() == 2);
|
||||
|
||||
let path = alterations[0]["path"].as_str().unwrap();
|
||||
let data_type = alterations[0]["data_type"]["type"].as_str().unwrap();
|
||||
assert_eq!(path, "b.c");
|
||||
assert_eq!(data_type, "int32");
|
||||
|
||||
let path = alterations[1]["path"].as_str().unwrap();
|
||||
let nullable = alterations[1]["nullable"].as_bool().unwrap();
|
||||
let rename = alterations[1]["rename"].as_str().unwrap();
|
||||
assert_eq!(path, "x");
|
||||
assert!(nullable);
|
||||
assert_eq!(rename, "y");
|
||||
|
||||
http::Response::builder().status(200).body("{}").unwrap()
|
||||
});
|
||||
|
||||
table
|
||||
.alter_columns(&[
|
||||
ColumnAlteration::new("b.c".into()).cast_to(DataType::Int32),
|
||||
ColumnAlteration::new("x".into())
|
||||
.rename("y".into())
|
||||
.set_nullable(true),
|
||||
])
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_drop_columns() {
|
||||
let table = Table::new_with_handler("my_table", |request| {
|
||||
assert_eq!(request.method(), "POST");
|
||||
assert_eq!(request.url().path(), "/v1/table/my_table/drop_columns/");
|
||||
assert_eq!(
|
||||
request.headers().get("Content-Type").unwrap(),
|
||||
JSON_CONTENT_TYPE
|
||||
);
|
||||
|
||||
let body = request.body().unwrap().as_bytes().unwrap();
|
||||
let body = std::str::from_utf8(body).unwrap();
|
||||
let value: serde_json::Value = serde_json::from_str(body).unwrap();
|
||||
let columns = value.get("columns").unwrap().as_array().unwrap();
|
||||
assert!(columns.len() == 2);
|
||||
|
||||
let col1 = columns[0].as_str().unwrap();
|
||||
let col2 = columns[1].as_str().unwrap();
|
||||
assert_eq!(col1, "a");
|
||||
assert_eq!(col2, "b");
|
||||
|
||||
http::Response::builder().status(200).body("{}").unwrap()
|
||||
});
|
||||
|
||||
table.drop_columns(&["a", "b"]).await.unwrap();
|
||||
assert_eq!(result.version, if old_server { 0 } else { 43 });
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -2700,6 +2893,22 @@ mod tests {
|
||||
table.drop_index("my_index").await.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_drop_index_not_exists() {
|
||||
let table = Table::new_with_handler("my_table", |request| {
|
||||
assert_eq!(request.method(), "POST");
|
||||
assert_eq!(
|
||||
request.url().path(),
|
||||
"/v1/table/my_table/index/my_index/drop/"
|
||||
);
|
||||
http::Response::builder().status(404).body("{}").unwrap()
|
||||
});
|
||||
|
||||
// Assert that the error is IndexNotFound
|
||||
let e = table.drop_index("my_index").await.unwrap_err();
|
||||
assert!(matches!(e, Error::IndexNotFound { .. }));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_wait_for_index() {
|
||||
let table = _make_table_with_indices(0);
|
||||
|
||||
@@ -14,13 +14,12 @@ use datafusion_physical_plan::projection::ProjectionExec;
|
||||
use datafusion_physical_plan::repartition::RepartitionExec;
|
||||
use datafusion_physical_plan::union::UnionExec;
|
||||
use datafusion_physical_plan::ExecutionPlan;
|
||||
use futures::{StreamExt, TryStreamExt};
|
||||
use futures::{FutureExt, StreamExt, TryFutureExt, TryStreamExt};
|
||||
use lance::dataset::builder::DatasetBuilder;
|
||||
use lance::dataset::cleanup::RemovalStats;
|
||||
use lance::dataset::optimize::{compact_files, CompactionMetrics, IndexRemapperOptions};
|
||||
use lance::dataset::scanner::Scanner;
|
||||
pub use lance::dataset::ColumnAlteration;
|
||||
pub use lance::dataset::MergeStats;
|
||||
pub use lance::dataset::NewColumnTransform;
|
||||
pub use lance::dataset::ReadParams;
|
||||
pub use lance::dataset::Version;
|
||||
@@ -81,7 +80,7 @@ pub mod merge;
|
||||
|
||||
use crate::index::waiter::wait_for_index;
|
||||
pub use chrono::Duration;
|
||||
use futures::future::join_all;
|
||||
use futures::future::{join_all, Either};
|
||||
pub use lance::dataset::optimize::CompactionOptions;
|
||||
pub use lance::dataset::refs::{TagContents, Tags as LanceTags};
|
||||
pub use lance::dataset::scanner::DatasetRecordBatchStream;
|
||||
@@ -312,7 +311,7 @@ impl<T: IntoArrow> AddDataBuilder<T> {
|
||||
self
|
||||
}
|
||||
|
||||
pub async fn execute(self) -> Result<()> {
|
||||
pub async fn execute(self) -> Result<AddResult> {
|
||||
let parent = self.parent.clone();
|
||||
let data = self.data.into_arrow()?;
|
||||
let without_data = AddDataBuilder::<NoData> {
|
||||
@@ -380,8 +379,8 @@ impl UpdateBuilder {
|
||||
}
|
||||
|
||||
/// Executes the update operation.
|
||||
/// Returns the number of rows that were updated.
|
||||
pub async fn execute(self) -> Result<u64> {
|
||||
/// Returns the update result
|
||||
pub async fn execute(self) -> Result<UpdateResult> {
|
||||
if self.columns.is_empty() {
|
||||
Err(Error::InvalidInput {
|
||||
message: "at least one column must be specified in an update operation".to_string(),
|
||||
@@ -424,6 +423,82 @@ pub trait Tags: Send + Sync {
|
||||
async fn update(&mut self, tag: &str, version: u64) -> Result<()>;
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||
pub struct UpdateResult {
|
||||
#[serde(default)]
|
||||
pub rows_updated: u64,
|
||||
// The commit version associated with the operation.
|
||||
// A version of `0` indicates compatibility with legacy servers that do not return
|
||||
/// a commit version.
|
||||
#[serde(default)]
|
||||
pub version: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||
pub struct AddResult {
|
||||
// The commit version associated with the operation.
|
||||
// A version of `0` indicates compatibility with legacy servers that do not return
|
||||
/// a commit version.
|
||||
#[serde(default)]
|
||||
pub version: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||
pub struct DeleteResult {
|
||||
// The commit version associated with the operation.
|
||||
// A version of `0` indicates compatibility with legacy servers that do not return
|
||||
/// a commit version.
|
||||
#[serde(default)]
|
||||
pub version: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||
pub struct MergeResult {
|
||||
// The commit version associated with the operation.
|
||||
// A version of `0` indicates compatibility with legacy servers that do not return
|
||||
/// a commit version.
|
||||
#[serde(default)]
|
||||
pub version: u64,
|
||||
/// Number of inserted rows (for user statistics)
|
||||
#[serde(default)]
|
||||
pub num_inserted_rows: u64,
|
||||
/// Number of updated rows (for user statistics)
|
||||
#[serde(default)]
|
||||
pub num_updated_rows: u64,
|
||||
/// Number of deleted rows (for user statistics)
|
||||
/// Note: This is different from internal references to 'deleted_rows', since we technically "delete" updated rows during processing.
|
||||
/// However those rows are not shared with the user.
|
||||
#[serde(default)]
|
||||
pub num_deleted_rows: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||
pub struct AddColumnsResult {
|
||||
// The commit version associated with the operation.
|
||||
// A version of `0` indicates compatibility with legacy servers that do not return
|
||||
/// a commit version.
|
||||
#[serde(default)]
|
||||
pub version: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||
pub struct AlterColumnsResult {
|
||||
// The commit version associated with the operation.
|
||||
// A version of `0` indicates compatibility with legacy servers that do not return
|
||||
/// a commit version.
|
||||
#[serde(default)]
|
||||
pub version: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||
pub struct DropColumnsResult {
|
||||
// The commit version associated with the operation.
|
||||
// A version of `0` indicates compatibility with legacy servers that do not return
|
||||
/// a commit version.
|
||||
#[serde(default)]
|
||||
pub version: u64,
|
||||
}
|
||||
|
||||
/// A trait for anything "table-like". This is used for both native tables (which target
|
||||
/// Lance datasets) and remote tables (which target LanceDB cloud)
|
||||
///
|
||||
@@ -468,11 +543,11 @@ pub trait BaseTable: std::fmt::Display + std::fmt::Debug + Send + Sync {
|
||||
&self,
|
||||
add: AddDataBuilder<NoData>,
|
||||
data: Box<dyn arrow_array::RecordBatchReader + Send>,
|
||||
) -> Result<()>;
|
||||
) -> Result<AddResult>;
|
||||
/// Delete rows from the table.
|
||||
async fn delete(&self, predicate: &str) -> Result<()>;
|
||||
async fn delete(&self, predicate: &str) -> Result<DeleteResult>;
|
||||
/// Update rows in the table.
|
||||
async fn update(&self, update: UpdateBuilder) -> Result<u64>;
|
||||
async fn update(&self, update: UpdateBuilder) -> Result<UpdateResult>;
|
||||
/// Create an index on the provided column(s).
|
||||
async fn create_index(&self, index: IndexBuilder) -> Result<()>;
|
||||
/// List the indices on the table.
|
||||
@@ -488,7 +563,7 @@ pub trait BaseTable: std::fmt::Display + std::fmt::Debug + Send + Sync {
|
||||
&self,
|
||||
params: MergeInsertBuilder,
|
||||
new_data: Box<dyn RecordBatchReader + Send>,
|
||||
) -> Result<MergeStats>;
|
||||
) -> Result<MergeResult>;
|
||||
/// Gets the table tag manager.
|
||||
async fn tags(&self) -> Result<Box<dyn Tags + '_>>;
|
||||
/// Optimize the dataset.
|
||||
@@ -498,11 +573,11 @@ pub trait BaseTable: std::fmt::Display + std::fmt::Debug + Send + Sync {
|
||||
&self,
|
||||
transforms: NewColumnTransform,
|
||||
read_columns: Option<Vec<String>>,
|
||||
) -> Result<()>;
|
||||
) -> Result<AddColumnsResult>;
|
||||
/// Alter columns in the table.
|
||||
async fn alter_columns(&self, alterations: &[ColumnAlteration]) -> Result<()>;
|
||||
async fn alter_columns(&self, alterations: &[ColumnAlteration]) -> Result<AlterColumnsResult>;
|
||||
/// Drop columns from the table.
|
||||
async fn drop_columns(&self, columns: &[&str]) -> Result<()>;
|
||||
async fn drop_columns(&self, columns: &[&str]) -> Result<DropColumnsResult>;
|
||||
/// Get the version of the table.
|
||||
async fn version(&self) -> Result<u64>;
|
||||
/// Checkout a specific version of the table.
|
||||
@@ -731,7 +806,7 @@ impl Table {
|
||||
/// tbl.delete("id > 5").await.unwrap();
|
||||
/// # });
|
||||
/// ```
|
||||
pub async fn delete(&self, predicate: &str) -> Result<()> {
|
||||
pub async fn delete(&self, predicate: &str) -> Result<DeleteResult> {
|
||||
self.inner.delete(predicate).await
|
||||
}
|
||||
|
||||
@@ -1046,17 +1121,20 @@ impl Table {
|
||||
&self,
|
||||
transforms: NewColumnTransform,
|
||||
read_columns: Option<Vec<String>>,
|
||||
) -> Result<()> {
|
||||
) -> Result<AddColumnsResult> {
|
||||
self.inner.add_columns(transforms, read_columns).await
|
||||
}
|
||||
|
||||
/// Change a column's name or nullability.
|
||||
pub async fn alter_columns(&self, alterations: &[ColumnAlteration]) -> Result<()> {
|
||||
pub async fn alter_columns(
|
||||
&self,
|
||||
alterations: &[ColumnAlteration],
|
||||
) -> Result<AlterColumnsResult> {
|
||||
self.inner.alter_columns(alterations).await
|
||||
}
|
||||
|
||||
/// Remove columns from the table.
|
||||
pub async fn drop_columns(&self, columns: &[&str]) -> Result<()> {
|
||||
pub async fn drop_columns(&self, columns: &[&str]) -> Result<DropColumnsResult> {
|
||||
self.inner.drop_columns(columns).await
|
||||
}
|
||||
|
||||
@@ -1936,7 +2014,7 @@ impl NativeTable {
|
||||
/// more information.
|
||||
pub async fn uses_v2_manifest_paths(&self) -> Result<bool> {
|
||||
let dataset = self.dataset.get().await?;
|
||||
Ok(dataset.manifest_naming_scheme == ManifestNamingScheme::V2)
|
||||
Ok(dataset.manifest_location().naming_scheme == ManifestNamingScheme::V2)
|
||||
}
|
||||
|
||||
/// Migrate the table to use the new manifest path scheme.
|
||||
@@ -2089,7 +2167,7 @@ impl BaseTable for NativeTable {
|
||||
&self,
|
||||
add: AddDataBuilder<NoData>,
|
||||
data: Box<dyn RecordBatchReader + Send>,
|
||||
) -> Result<()> {
|
||||
) -> Result<AddResult> {
|
||||
let data = Box::new(MaybeEmbedded::try_new(
|
||||
data,
|
||||
self.table_definition().await?,
|
||||
@@ -2112,9 +2190,9 @@ impl BaseTable for NativeTable {
|
||||
.execute_stream(data)
|
||||
.await?
|
||||
};
|
||||
|
||||
let version = dataset.manifest().version;
|
||||
self.dataset.set_latest(dataset).await;
|
||||
Ok(())
|
||||
Ok(AddResult { version })
|
||||
}
|
||||
|
||||
async fn create_index(&self, opts: IndexBuilder) -> Result<()> {
|
||||
@@ -2160,7 +2238,7 @@ impl BaseTable for NativeTable {
|
||||
Ok(dataset.prewarm_index(index_name).await?)
|
||||
}
|
||||
|
||||
async fn update(&self, update: UpdateBuilder) -> Result<u64> {
|
||||
async fn update(&self, update: UpdateBuilder) -> Result<UpdateResult> {
|
||||
let dataset = self.dataset.get().await?.clone();
|
||||
let mut builder = LanceUpdateBuilder::new(Arc::new(dataset));
|
||||
if let Some(predicate) = update.filter {
|
||||
@@ -2176,7 +2254,10 @@ impl BaseTable for NativeTable {
|
||||
self.dataset
|
||||
.set_latest(res.new_dataset.as_ref().clone())
|
||||
.await;
|
||||
Ok(res.rows_updated)
|
||||
Ok(UpdateResult {
|
||||
rows_updated: res.rows_updated,
|
||||
version: res.new_dataset.version().version,
|
||||
})
|
||||
}
|
||||
|
||||
async fn create_plan(
|
||||
@@ -2368,7 +2449,7 @@ impl BaseTable for NativeTable {
|
||||
&self,
|
||||
params: MergeInsertBuilder,
|
||||
new_data: Box<dyn RecordBatchReader + Send>,
|
||||
) -> Result<MergeStats> {
|
||||
) -> Result<MergeResult> {
|
||||
let dataset = Arc::new(self.dataset.get().await?.clone());
|
||||
let mut builder = LanceMergeInsertBuilder::try_new(dataset.clone(), params.on)?;
|
||||
match (
|
||||
@@ -2394,16 +2475,43 @@ impl BaseTable for NativeTable {
|
||||
} else {
|
||||
builder.when_not_matched_by_source(WhenNotMatchedBySource::Keep);
|
||||
}
|
||||
let job = builder.try_build()?;
|
||||
let (new_dataset, stats) = job.execute_reader(new_data).await?;
|
||||
|
||||
let future = if let Some(timeout) = params.timeout {
|
||||
// The default retry timeout is 30s, so we pass the full timeout down
|
||||
// as well in case it is longer than that.
|
||||
let future = builder
|
||||
.retry_timeout(timeout)
|
||||
.try_build()?
|
||||
.execute_reader(new_data);
|
||||
Either::Left(tokio::time::timeout(timeout, future).map(|res| match res {
|
||||
Ok(Ok((new_dataset, stats))) => Ok((new_dataset, stats)),
|
||||
Ok(Err(e)) => Err(e.into()),
|
||||
Err(_) => Err(Error::Runtime {
|
||||
message: "merge insert timed out".to_string(),
|
||||
}),
|
||||
}))
|
||||
} else {
|
||||
let job = builder.try_build()?;
|
||||
Either::Right(job.execute_reader(new_data).map_err(|e| e.into()))
|
||||
};
|
||||
let (new_dataset, stats) = future.await?;
|
||||
let version = new_dataset.manifest().version;
|
||||
self.dataset.set_latest(new_dataset.as_ref().clone()).await;
|
||||
Ok(stats)
|
||||
Ok(MergeResult {
|
||||
version,
|
||||
num_updated_rows: stats.num_updated_rows,
|
||||
num_inserted_rows: stats.num_inserted_rows,
|
||||
num_deleted_rows: stats.num_deleted_rows,
|
||||
})
|
||||
}
|
||||
|
||||
/// Delete rows from the table
|
||||
async fn delete(&self, predicate: &str) -> Result<()> {
|
||||
self.dataset.get_mut().await?.delete(predicate).await?;
|
||||
Ok(())
|
||||
async fn delete(&self, predicate: &str) -> Result<DeleteResult> {
|
||||
let mut dataset = self.dataset.get_mut().await?;
|
||||
dataset.delete(predicate).await?;
|
||||
Ok(DeleteResult {
|
||||
version: dataset.version().version,
|
||||
})
|
||||
}
|
||||
|
||||
async fn tags(&self) -> Result<Box<dyn Tags + '_>> {
|
||||
@@ -2470,27 +2578,28 @@ impl BaseTable for NativeTable {
|
||||
&self,
|
||||
transforms: NewColumnTransform,
|
||||
read_columns: Option<Vec<String>>,
|
||||
) -> Result<()> {
|
||||
self.dataset
|
||||
.get_mut()
|
||||
.await?
|
||||
.add_columns(transforms, read_columns, None)
|
||||
.await?;
|
||||
Ok(())
|
||||
) -> Result<AddColumnsResult> {
|
||||
let mut dataset = self.dataset.get_mut().await?;
|
||||
dataset.add_columns(transforms, read_columns, None).await?;
|
||||
Ok(AddColumnsResult {
|
||||
version: dataset.version().version,
|
||||
})
|
||||
}
|
||||
|
||||
async fn alter_columns(&self, alterations: &[ColumnAlteration]) -> Result<()> {
|
||||
self.dataset
|
||||
.get_mut()
|
||||
.await?
|
||||
.alter_columns(alterations)
|
||||
.await?;
|
||||
Ok(())
|
||||
async fn alter_columns(&self, alterations: &[ColumnAlteration]) -> Result<AlterColumnsResult> {
|
||||
let mut dataset = self.dataset.get_mut().await?;
|
||||
dataset.alter_columns(alterations).await?;
|
||||
Ok(AlterColumnsResult {
|
||||
version: dataset.version().version,
|
||||
})
|
||||
}
|
||||
|
||||
async fn drop_columns(&self, columns: &[&str]) -> Result<()> {
|
||||
self.dataset.get_mut().await?.drop_columns(columns).await?;
|
||||
Ok(())
|
||||
async fn drop_columns(&self, columns: &[&str]) -> Result<DropColumnsResult> {
|
||||
let mut dataset = self.dataset.get_mut().await?;
|
||||
dataset.drop_columns(columns).await?;
|
||||
Ok(DropColumnsResult {
|
||||
version: dataset.version().version,
|
||||
})
|
||||
}
|
||||
|
||||
async fn list_indices(&self) -> Result<Vec<IndexConfig>> {
|
||||
|
||||
@@ -1,14 +1,13 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::{sync::Arc, time::Duration};
|
||||
|
||||
use arrow_array::RecordBatchReader;
|
||||
use lance::dataset::MergeStats;
|
||||
|
||||
use crate::Result;
|
||||
|
||||
use super::BaseTable;
|
||||
use super::{BaseTable, MergeResult};
|
||||
|
||||
/// A builder used to create and run a merge insert operation
|
||||
///
|
||||
@@ -22,6 +21,7 @@ pub struct MergeInsertBuilder {
|
||||
pub(crate) when_not_matched_insert_all: bool,
|
||||
pub(crate) when_not_matched_by_source_delete: bool,
|
||||
pub(crate) when_not_matched_by_source_delete_filt: Option<String>,
|
||||
pub(crate) timeout: Option<Duration>,
|
||||
}
|
||||
|
||||
impl MergeInsertBuilder {
|
||||
@@ -34,6 +34,7 @@ impl MergeInsertBuilder {
|
||||
when_not_matched_insert_all: false,
|
||||
when_not_matched_by_source_delete: false,
|
||||
when_not_matched_by_source_delete_filt: None,
|
||||
timeout: None,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -85,11 +86,26 @@ impl MergeInsertBuilder {
|
||||
self
|
||||
}
|
||||
|
||||
/// Maximum time to run the operation before cancelling it.
|
||||
///
|
||||
/// By default, there is a 30-second timeout that is only enforced after the
|
||||
/// first attempt. This is to prevent spending too long retrying to resolve
|
||||
/// conflicts. For example, if a write attempt takes 20 seconds and fails,
|
||||
/// the second attempt will be cancelled after 10 seconds, hitting the
|
||||
/// 30-second timeout. However, a write that takes one hour and succeeds on the
|
||||
/// first attempt will not be cancelled.
|
||||
///
|
||||
/// When this is set, the timeout is enforced on all attempts, including the first.
|
||||
pub fn timeout(&mut self, timeout: Duration) -> &mut Self {
|
||||
self.timeout = Some(timeout);
|
||||
self
|
||||
}
|
||||
|
||||
/// Executes the merge insert operation
|
||||
///
|
||||
/// Returns statistics about the merge operation including the number of rows
|
||||
/// Returns version and statistics about the merge operation including the number of rows
|
||||
/// inserted, updated, and deleted.
|
||||
pub async fn execute(self, new_data: Box<dyn RecordBatchReader + Send>) -> Result<MergeStats> {
|
||||
pub async fn execute(self, new_data: Box<dyn RecordBatchReader + Send>) -> Result<MergeResult> {
|
||||
self.table.clone().merge_insert(self, new_data).await
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user