mirror of
https://github.com/lancedb/lancedb.git
synced 2025-12-24 13:59:58 +00:00
Compare commits
30 Commits
python-v0.
...
python-v0.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
419a433244 | ||
|
|
a9311c4dc0 | ||
|
|
178bcf9c90 | ||
|
|
b9be092cb1 | ||
|
|
e8c0c52315 | ||
|
|
a60fa0d3b7 | ||
|
|
726d629b9b | ||
|
|
b493f56dee | ||
|
|
a8b5ad7e74 | ||
|
|
f8f6264883 | ||
|
|
d8517117f1 | ||
|
|
ab66dd5ed2 | ||
|
|
cbb9a7877c | ||
|
|
b7fc223535 | ||
|
|
1fdaf7a1a4 | ||
|
|
d11819c90c | ||
|
|
9b902272f1 | ||
|
|
8c0622fa2c | ||
|
|
2191f948c3 | ||
|
|
acc3b03004 | ||
|
|
7f091b8c8e | ||
|
|
c19bdd9a24 | ||
|
|
dad0ff5cd2 | ||
|
|
a705621067 | ||
|
|
39614fdb7d | ||
|
|
96d534d4bc | ||
|
|
5051d30d09 | ||
|
|
db853c4041 | ||
|
|
76d1d22bdc | ||
|
|
d8746c61c6 |
@@ -1,5 +1,5 @@
|
||||
[tool.bumpversion]
|
||||
current_version = "0.19.0-beta.8"
|
||||
current_version = "0.19.0"
|
||||
parse = """(?x)
|
||||
(?P<major>0|[1-9]\\d*)\\.
|
||||
(?P<minor>0|[1-9]\\d*)\\.
|
||||
|
||||
13
.github/workflows/docs.yml
vendored
13
.github/workflows/docs.yml
vendored
@@ -18,17 +18,24 @@ concurrency:
|
||||
group: "pages"
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
# This reduces the disk space needed for the build
|
||||
RUSTFLAGS: "-C debuginfo=0"
|
||||
# according to: https://matklad.github.io/2021/09/04/fast-rust-builds.html
|
||||
# CI builds are faster with incremental disabled.
|
||||
CARGO_INCREMENTAL: "0"
|
||||
|
||||
jobs:
|
||||
# Single deploy job since we're just deploying
|
||||
build:
|
||||
environment:
|
||||
name: github-pages
|
||||
url: ${{ steps.deployment.outputs.page_url }}
|
||||
runs-on: buildjet-8vcpu-ubuntu-2204
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Install dependecies needed for ubuntu
|
||||
- name: Install dependencies needed for ubuntu
|
||||
run: |
|
||||
sudo apt install -y protobuf-compiler libssl-dev
|
||||
rustup update && rustup default
|
||||
@@ -38,6 +45,7 @@ jobs:
|
||||
python-version: "3.10"
|
||||
cache: "pip"
|
||||
cache-dependency-path: "docs/requirements.txt"
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- name: Build Python
|
||||
working-directory: python
|
||||
run: |
|
||||
@@ -49,7 +57,6 @@ jobs:
|
||||
node-version: 20
|
||||
cache: 'npm'
|
||||
cache-dependency-path: node/package-lock.json
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- name: Install node dependencies
|
||||
working-directory: node
|
||||
run: |
|
||||
|
||||
280
Cargo.lock
generated
280
Cargo.lock
generated
@@ -25,7 +25,7 @@ checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"const-random",
|
||||
"getrandom 0.2.15",
|
||||
"getrandom 0.2.16",
|
||||
"once_cell",
|
||||
"version_check",
|
||||
"zerocopy 0.7.35",
|
||||
@@ -128,9 +128,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "anyhow"
|
||||
version = "1.0.97"
|
||||
version = "1.0.98"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dcfed56ad506cb2c684a14971b8861fdc3baaaae314b9e5f9bb532cbe3ba7a4f"
|
||||
checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487"
|
||||
|
||||
[[package]]
|
||||
name = "arbitrary"
|
||||
@@ -390,9 +390,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "async-compression"
|
||||
version = "0.4.22"
|
||||
version = "0.4.23"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "59a194f9d963d8099596278594b3107448656ba73831c9d8c783e613ce86da64"
|
||||
checksum = "b37fc50485c4f3f736a4fb14199f6d5f5ba008d7f28fe710306c92780f004c07"
|
||||
dependencies = [
|
||||
"flate2",
|
||||
"futures-core",
|
||||
@@ -512,9 +512,9 @@ checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26"
|
||||
|
||||
[[package]]
|
||||
name = "aws-config"
|
||||
version = "1.6.1"
|
||||
version = "1.6.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8c39646d1a6b51240a1a23bb57ea4eebede7e16fbc237fdc876980233dcecb4f"
|
||||
checksum = "b6fcc63c9860579e4cb396239570e979376e70aab79e496621748a09913f8b36"
|
||||
dependencies = [
|
||||
"aws-credential-types",
|
||||
"aws-runtime",
|
||||
@@ -542,9 +542,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "aws-credential-types"
|
||||
version = "1.2.2"
|
||||
version = "1.2.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4471bef4c22a06d2c7a1b6492493d3fdf24a805323109d6874f9c94d5906ac14"
|
||||
checksum = "687bc16bc431a8533fe0097c7f0182874767f920989d7260950172ae8e3c4465"
|
||||
dependencies = [
|
||||
"aws-smithy-async",
|
||||
"aws-smithy-runtime-api",
|
||||
@@ -577,9 +577,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "aws-runtime"
|
||||
version = "1.5.6"
|
||||
version = "1.5.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0aff45ffe35196e593ea3b9dd65b320e51e2dda95aff4390bc459e461d09c6ad"
|
||||
checksum = "6c4063282c69991e57faab9e5cb21ae557e59f5b0fb285c196335243df8dc25c"
|
||||
dependencies = [
|
||||
"aws-credential-types",
|
||||
"aws-sigv4",
|
||||
@@ -594,7 +594,6 @@ dependencies = [
|
||||
"fastrand",
|
||||
"http 0.2.12",
|
||||
"http-body 0.4.6",
|
||||
"once_cell",
|
||||
"percent-encoding",
|
||||
"pin-project-lite",
|
||||
"tracing",
|
||||
@@ -603,9 +602,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "aws-sdk-bedrockruntime"
|
||||
version = "1.82.0"
|
||||
version = "1.83.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8cb95f77abd4321348dd2f52a25e1de199732f54d2a35860ad20f5df21c66b44"
|
||||
checksum = "b82a56e1a0c4b145031c3a99e68127eec0a4206ad34a5653ddf04afc18053376"
|
||||
dependencies = [
|
||||
"aws-credential-types",
|
||||
"aws-runtime",
|
||||
@@ -629,9 +628,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "aws-sdk-dynamodb"
|
||||
version = "1.71.2"
|
||||
version = "1.72.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2d49d08b1c99ca9a7de728a8975504857f2c24581a177f952e2a10244c305a1c"
|
||||
checksum = "412cd587b03bacb2f7b94a5446cc77dee49a8fa848e636f9545df3aadbbfaf8b"
|
||||
dependencies = [
|
||||
"aws-credential-types",
|
||||
"aws-runtime",
|
||||
@@ -652,9 +651,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "aws-sdk-kms"
|
||||
version = "1.65.0"
|
||||
version = "1.66.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f5325c5e2badf4148e850017cc56cc205888c6e0b52c9e29d3501ec577005230"
|
||||
checksum = "655097cd83ab1f15575890943135192560f77097413c6dd1733fdbdc453e81ac"
|
||||
dependencies = [
|
||||
"aws-credential-types",
|
||||
"aws-runtime",
|
||||
@@ -675,9 +674,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "aws-sdk-s3"
|
||||
version = "1.82.0"
|
||||
version = "1.83.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e6eab2900764411ab01c8e91a76fd11a63b4e12bc3da97d9e14a0ce1343d86d3"
|
||||
checksum = "51384750334005f40e1a334b0d54eca822a77eacdcf3c50fdf38f583c5eee7a2"
|
||||
dependencies = [
|
||||
"aws-credential-types",
|
||||
"aws-runtime",
|
||||
@@ -710,9 +709,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "aws-sdk-sso"
|
||||
version = "1.64.0"
|
||||
version = "1.65.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "02d4bdb0e5f80f0689e61c77ab678b2b9304af329616af38aef5b6b967b8e736"
|
||||
checksum = "8efec445fb78df585327094fcef4cad895b154b58711e504db7a93c41aa27151"
|
||||
dependencies = [
|
||||
"aws-credential-types",
|
||||
"aws-runtime",
|
||||
@@ -733,9 +732,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "aws-sdk-ssooidc"
|
||||
version = "1.65.0"
|
||||
version = "1.66.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "acbbb3ce8da257aedbccdcb1aadafbbb6a5fe9adf445db0e1ea897bdc7e22d08"
|
||||
checksum = "5e49cca619c10e7b002dc8e66928ceed66ab7f56c1a3be86c5437bf2d8d89bba"
|
||||
dependencies = [
|
||||
"aws-credential-types",
|
||||
"aws-runtime",
|
||||
@@ -756,9 +755,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "aws-sdk-sts"
|
||||
version = "1.65.0"
|
||||
version = "1.66.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "96a78a8f50a1630db757b60f679c8226a8a70ee2ab5f5e6e51dc67f6c61c7cfd"
|
||||
checksum = "7420479eac0a53f776cc8f0d493841ffe58ad9d9783f3947be7265784471b47a"
|
||||
dependencies = [
|
||||
"aws-credential-types",
|
||||
"aws-runtime",
|
||||
@@ -780,9 +779,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "aws-sigv4"
|
||||
version = "1.3.0"
|
||||
version = "1.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "69d03c3c05ff80d54ff860fe38c726f6f494c639ae975203a101335f223386db"
|
||||
checksum = "3503af839bd8751d0bdc5a46b9cac93a003a353e635b0c12cf2376b5b53e41ea"
|
||||
dependencies = [
|
||||
"aws-credential-types",
|
||||
"aws-smithy-eventstream",
|
||||
@@ -796,7 +795,6 @@ dependencies = [
|
||||
"hmac",
|
||||
"http 0.2.12",
|
||||
"http 1.3.1",
|
||||
"once_cell",
|
||||
"p256",
|
||||
"percent-encoding",
|
||||
"ring",
|
||||
@@ -853,9 +851,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "aws-smithy-http"
|
||||
version = "0.62.0"
|
||||
version = "0.62.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c5949124d11e538ca21142d1fba61ab0a2a2c1bc3ed323cdb3e4b878bfb83166"
|
||||
checksum = "99335bec6cdc50a346fda1437f9fefe33abf8c99060739a546a16457f2862ca9"
|
||||
dependencies = [
|
||||
"aws-smithy-eventstream",
|
||||
"aws-smithy-runtime-api",
|
||||
@@ -866,7 +864,6 @@ dependencies = [
|
||||
"http 0.2.12",
|
||||
"http 1.3.1",
|
||||
"http-body 0.4.6",
|
||||
"once_cell",
|
||||
"percent-encoding",
|
||||
"pin-project-lite",
|
||||
"pin-utils",
|
||||
@@ -882,7 +879,7 @@ dependencies = [
|
||||
"aws-smithy-async",
|
||||
"aws-smithy-runtime-api",
|
||||
"aws-smithy-types",
|
||||
"h2 0.4.8",
|
||||
"h2 0.4.9",
|
||||
"http 0.2.12",
|
||||
"http 1.3.1",
|
||||
"http-body 0.4.6",
|
||||
@@ -912,12 +909,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "aws-smithy-observability"
|
||||
version = "0.1.2"
|
||||
version = "0.1.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "445d065e76bc1ef54963db400319f1dd3ebb3e0a74af20f7f7630625b0cc7cc0"
|
||||
checksum = "9364d5989ac4dd918e5cc4c4bdcc61c9be17dcd2586ea7f69e348fc7c6cab393"
|
||||
dependencies = [
|
||||
"aws-smithy-runtime-api",
|
||||
"once_cell",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -932,9 +928,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "aws-smithy-runtime"
|
||||
version = "1.8.1"
|
||||
version = "1.8.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0152749e17ce4d1b47c7747bdfec09dac1ccafdcbc741ebf9daa2a373356730f"
|
||||
checksum = "14302f06d1d5b7d333fd819943075b13d27c7700b414f574c3c35859bfb55d5e"
|
||||
dependencies = [
|
||||
"aws-smithy-async",
|
||||
"aws-smithy-http",
|
||||
@@ -948,7 +944,6 @@ dependencies = [
|
||||
"http 1.3.1",
|
||||
"http-body 0.4.6",
|
||||
"http-body 1.0.1",
|
||||
"once_cell",
|
||||
"pin-project-lite",
|
||||
"pin-utils",
|
||||
"tokio",
|
||||
@@ -957,9 +952,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "aws-smithy-runtime-api"
|
||||
version = "1.7.4"
|
||||
version = "1.8.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3da37cf5d57011cb1753456518ec76e31691f1f474b73934a284eb2a1c76510f"
|
||||
checksum = "a1e5d9e3a80a18afa109391fb5ad09c3daf887b516c6fd805a157c6ea7994a57"
|
||||
dependencies = [
|
||||
"aws-smithy-async",
|
||||
"aws-smithy-types",
|
||||
@@ -974,9 +969,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "aws-smithy-types"
|
||||
version = "1.3.0"
|
||||
version = "1.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "836155caafba616c0ff9b07944324785de2ab016141c3550bd1c07882f8cee8f"
|
||||
checksum = "40076bd09fadbc12d5e026ae080d0930defa606856186e31d83ccc6a255eeaf3"
|
||||
dependencies = [
|
||||
"base64-simd",
|
||||
"bytes",
|
||||
@@ -1009,9 +1004,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "aws-types"
|
||||
version = "1.3.6"
|
||||
version = "1.3.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3873f8deed8927ce8d04487630dc9ff73193bab64742a61d050e57a68dec4125"
|
||||
checksum = "8a322fec39e4df22777ed3ad8ea868ac2f94cd15e1a55f6ee8d8d6305057689a"
|
||||
dependencies = [
|
||||
"aws-credential-types",
|
||||
"aws-smithy-async",
|
||||
@@ -1028,7 +1023,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b62ddb9cb1ec0a098ad4bbf9344d0713fa193ae1a80af55febcff2627b6a00c1"
|
||||
dependencies = [
|
||||
"futures-core",
|
||||
"getrandom 0.2.15",
|
||||
"getrandom 0.2.16",
|
||||
"instant",
|
||||
"pin-project-lite",
|
||||
"rand 0.8.5",
|
||||
@@ -1185,9 +1180,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "blake3"
|
||||
version = "1.8.1"
|
||||
version = "1.8.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "389a099b34312839e16420d499a9cad9650541715937ffbdd40d36f49e77eeb3"
|
||||
checksum = "3888aaa89e4b2a40fca9848e400f6a658a5a3978de7be858e209cafa8be9a4a0"
|
||||
dependencies = [
|
||||
"arrayref",
|
||||
"arrayvec",
|
||||
@@ -1331,9 +1326,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cc"
|
||||
version = "1.2.19"
|
||||
version = "1.2.20"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8e3a13707ac958681c13b39b458c073d0d9bc8a22cb1b2f4c8e55eb72c13f362"
|
||||
checksum = "04da6a0d40b948dfc4fa8f5bbf402b0fc1a64a28dbf7d12ffd683550f2c1b63a"
|
||||
dependencies = [
|
||||
"jobserver",
|
||||
"libc",
|
||||
@@ -1521,7 +1516,7 @@ version = "0.1.16"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f9d839f2a20b0aee515dc581a6172f2321f96cab76c1a38a4c584a194955390e"
|
||||
dependencies = [
|
||||
"getrandom 0.2.15",
|
||||
"getrandom 0.2.16",
|
||||
"once_cell",
|
||||
"tiny-keccak",
|
||||
]
|
||||
@@ -2377,7 +2372,16 @@ version = "5.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "44c45a9d03d6676652bcb5e724c7e988de1acad23a711b5217ab9cbecbec2225"
|
||||
dependencies = [
|
||||
"dirs-sys",
|
||||
"dirs-sys 0.4.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "dirs"
|
||||
version = "6.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c3e8aa94d75141228480295a7d0e7feb620b1a5ad9f12bc40be62411e38cce4e"
|
||||
dependencies = [
|
||||
"dirs-sys 0.5.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2388,10 +2392,22 @@ checksum = "520f05a5cbd335fae5a99ff7a6ab8627577660ee5cfd6a94a6a929b52ff0321c"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"option-ext",
|
||||
"redox_users",
|
||||
"redox_users 0.4.6",
|
||||
"windows-sys 0.48.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "dirs-sys"
|
||||
version = "0.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e01a3366d27ee9890022452ee61b2b63a67e6f13f58900b651ff5665f0bb1fab"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"option-ext",
|
||||
"redox_users 0.5.0",
|
||||
"windows-sys 0.59.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "displaydoc"
|
||||
version = "0.2.5"
|
||||
@@ -2558,9 +2574,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "ethnum"
|
||||
version = "1.5.0"
|
||||
version = "1.5.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b90ca2580b73ab6a1f724b76ca11ab632df820fd6040c336200d2c1df7b3c82c"
|
||||
checksum = "0939f82868b77ef93ce3c3c3daf2b3c526b456741da5a1a4559e590965b6026b"
|
||||
|
||||
[[package]]
|
||||
name = "event-listener"
|
||||
@@ -2722,7 +2738,8 @@ checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c"
|
||||
[[package]]
|
||||
name = "fsst"
|
||||
version = "0.26.0"
|
||||
source = "git+https://github.com/lancedb/lance?tag=v0.26.0-beta.1#8e46047e2dcb171bec28e28b507a9b7858348773"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cbbb5d86fdf9f56c54cf7ec48f4471c0e901af458ee9821677dc8ba0c38bc0be"
|
||||
dependencies = [
|
||||
"rand 0.8.5",
|
||||
]
|
||||
@@ -2980,9 +2997,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "getrandom"
|
||||
version = "0.2.15"
|
||||
version = "0.2.16"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7"
|
||||
checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"js-sys",
|
||||
@@ -3049,9 +3066,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "h2"
|
||||
version = "0.4.8"
|
||||
version = "0.4.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5017294ff4bb30944501348f6f8e42e6ad28f42c8bbef7a74029aff064a4e3c2"
|
||||
checksum = "75249d144030531f8dee69fe9cea04d3edf809a017ae445e2abdff6629e86633"
|
||||
dependencies = [
|
||||
"atomic-waker",
|
||||
"bytes",
|
||||
@@ -3138,7 +3155,7 @@ version = "0.4.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cc03dcb0b0a83ae3f3363ec811014ae669f083e4e499c66602f447c4828737a1"
|
||||
dependencies = [
|
||||
"dirs",
|
||||
"dirs 5.0.1",
|
||||
"futures",
|
||||
"http 1.3.1",
|
||||
"indicatif",
|
||||
@@ -3286,7 +3303,7 @@ dependencies = [
|
||||
"bytes",
|
||||
"futures-channel",
|
||||
"futures-util",
|
||||
"h2 0.4.8",
|
||||
"h2 0.4.9",
|
||||
"http 1.3.1",
|
||||
"http-body 1.0.1",
|
||||
"httparse",
|
||||
@@ -3645,9 +3662,9 @@ checksum = "9028f49264629065d057f340a86acb84867925865f73bbf8d47b4d149a7e88b8"
|
||||
|
||||
[[package]]
|
||||
name = "jiff"
|
||||
version = "0.2.6"
|
||||
version = "0.2.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1f33145a5cbea837164362c7bd596106eb7c5198f97d1ba6f6ebb3223952e488"
|
||||
checksum = "5a064218214dc6a10fbae5ec5fa888d80c45d611aba169222fc272072bf7aef6"
|
||||
dependencies = [
|
||||
"jiff-static",
|
||||
"log",
|
||||
@@ -3658,9 +3675,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "jiff-static"
|
||||
version = "0.2.6"
|
||||
version = "0.2.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "43ce13c40ec6956157a3635d97a1ee2df323b263f09ea14165131289cb0f5c19"
|
||||
checksum = "199b7932d97e325aff3a7030e141eafe7f2c6268e1d1b24859b753a627f45254"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -3712,12 +3729,14 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "lance"
|
||||
version = "0.26.0"
|
||||
source = "git+https://github.com/lancedb/lance?tag=v0.26.0-beta.1#8e46047e2dcb171bec28e28b507a9b7858348773"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "285c3f98a7182e2f35eabc9be67927bb9167b236c6d9c45d894928cbe330067c"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-arith",
|
||||
"arrow-array",
|
||||
"arrow-buffer",
|
||||
"arrow-ipc",
|
||||
"arrow-ord",
|
||||
"arrow-row",
|
||||
"arrow-schema",
|
||||
@@ -3736,6 +3755,7 @@ dependencies = [
|
||||
"datafusion-functions",
|
||||
"datafusion-physical-expr",
|
||||
"deepsize",
|
||||
"either",
|
||||
"futures",
|
||||
"half",
|
||||
"humantime",
|
||||
@@ -3773,7 +3793,8 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "lance-arrow"
|
||||
version = "0.26.0"
|
||||
source = "git+https://github.com/lancedb/lance?tag=v0.26.0-beta.1#8e46047e2dcb171bec28e28b507a9b7858348773"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0f877f217d6f93b24b54a2390a988a32f99d6608fe8af7766d93bd515e77dd2a"
|
||||
dependencies = [
|
||||
"arrow-array",
|
||||
"arrow-buffer",
|
||||
@@ -3782,7 +3803,7 @@ dependencies = [
|
||||
"arrow-schema",
|
||||
"arrow-select",
|
||||
"bytes",
|
||||
"getrandom 0.2.15",
|
||||
"getrandom 0.2.16",
|
||||
"half",
|
||||
"num-traits",
|
||||
"rand 0.8.5",
|
||||
@@ -3791,7 +3812,8 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "lance-core"
|
||||
version = "0.26.0"
|
||||
source = "git+https://github.com/lancedb/lance?tag=v0.26.0-beta.1#8e46047e2dcb171bec28e28b507a9b7858348773"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "52fa4661c532db5b53102e2b394c9735bf6e707c337dfa5b9d98baba5c0cba13"
|
||||
dependencies = [
|
||||
"arrow-array",
|
||||
"arrow-buffer",
|
||||
@@ -3828,7 +3850,8 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "lance-datafusion"
|
||||
version = "0.26.0"
|
||||
source = "git+https://github.com/lancedb/lance?tag=v0.26.0-beta.1#8e46047e2dcb171bec28e28b507a9b7858348773"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3efa610cc1168aaf96734f2f7911fb874609304716aab3318a86193da883f700"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-array",
|
||||
@@ -3847,8 +3870,10 @@ dependencies = [
|
||||
"lance-datagen",
|
||||
"lazy_static",
|
||||
"log",
|
||||
"pin-project",
|
||||
"prost",
|
||||
"snafu",
|
||||
"tempfile",
|
||||
"tokio",
|
||||
"tracing",
|
||||
]
|
||||
@@ -3856,7 +3881,8 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "lance-datagen"
|
||||
version = "0.26.0"
|
||||
source = "git+https://github.com/lancedb/lance?tag=v0.26.0-beta.1#8e46047e2dcb171bec28e28b507a9b7858348773"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4c61affd39495caa923f6a49a7cb0a9f36fea2d7231a039e557f908e0b3b59cf"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-array",
|
||||
@@ -3872,7 +3898,8 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "lance-encoding"
|
||||
version = "0.26.0"
|
||||
source = "git+https://github.com/lancedb/lance?tag=v0.26.0-beta.1#8e46047e2dcb171bec28e28b507a9b7858348773"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "051d65ab02790552c8790fe22915fbdd1629f3e1fa2a6ef69946e77c9d2b6f8e"
|
||||
dependencies = [
|
||||
"arrayref",
|
||||
"arrow",
|
||||
@@ -3912,7 +3939,8 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "lance-file"
|
||||
version = "0.26.0"
|
||||
source = "git+https://github.com/lancedb/lance?tag=v0.26.0-beta.1#8e46047e2dcb171bec28e28b507a9b7858348773"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9e4aff9ff0801c82d8fcb88cacec4880b6aaf53c6480291d50a4fcc12e6853c4"
|
||||
dependencies = [
|
||||
"arrow-arith",
|
||||
"arrow-array",
|
||||
@@ -3947,7 +3975,8 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "lance-index"
|
||||
version = "0.26.0"
|
||||
source = "git+https://github.com/lancedb/lance?tag=v0.26.0-beta.1#8e46047e2dcb171bec28e28b507a9b7858348773"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3823b5147002a3115456c4dd1e2b16c645c08f4653e6e9dc624b9381ba29c87f"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-array",
|
||||
@@ -3965,7 +3994,7 @@ dependencies = [
|
||||
"datafusion-physical-expr",
|
||||
"datafusion-sql",
|
||||
"deepsize",
|
||||
"dirs",
|
||||
"dirs 5.0.1",
|
||||
"fst",
|
||||
"futures",
|
||||
"half",
|
||||
@@ -4001,7 +4030,8 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "lance-io"
|
||||
version = "0.26.0"
|
||||
source = "git+https://github.com/lancedb/lance?tag=v0.26.0-beta.1#8e46047e2dcb171bec28e28b507a9b7858348773"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a401202f6a1997db4ea5e9eb1a73a352736b320808a2e8497686c44fe6badf01"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-arith",
|
||||
@@ -4040,7 +4070,8 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "lance-linalg"
|
||||
version = "0.26.0"
|
||||
source = "git+https://github.com/lancedb/lance?tag=v0.26.0-beta.1#8e46047e2dcb171bec28e28b507a9b7858348773"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "82cc5333ed9d12f1745e849ad161746da0b12ae3d4c9897d1937411e6533f504"
|
||||
dependencies = [
|
||||
"arrow-array",
|
||||
"arrow-ord",
|
||||
@@ -4064,7 +4095,8 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "lance-table"
|
||||
version = "0.26.0"
|
||||
source = "git+https://github.com/lancedb/lance?tag=v0.26.0-beta.1#8e46047e2dcb171bec28e28b507a9b7858348773"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "41a43c76277808c452135f33a6b46ca8ec6ba38167534ff5240b46098ed81e73"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-array",
|
||||
@@ -4104,7 +4136,8 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "lance-testing"
|
||||
version = "0.26.0"
|
||||
source = "git+https://github.com/lancedb/lance?tag=v0.26.0-beta.1#8e46047e2dcb171bec28e28b507a9b7858348773"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f697b2c273e4629c782205e563282c08a74fe237ca8dd36cf10f862951887a70"
|
||||
dependencies = [
|
||||
"arrow-array",
|
||||
"arrow-schema",
|
||||
@@ -4115,7 +4148,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lancedb"
|
||||
version = "0.19.0-beta.8"
|
||||
version = "0.19.0-beta.11"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-array",
|
||||
@@ -4202,7 +4235,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lancedb-node"
|
||||
version = "0.19.0-beta.8"
|
||||
version = "0.19.0-beta.11"
|
||||
dependencies = [
|
||||
"arrow-array",
|
||||
"arrow-ipc",
|
||||
@@ -4227,12 +4260,13 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lancedb-nodejs"
|
||||
version = "0.19.0-beta.8"
|
||||
version = "0.19.0-beta.11"
|
||||
dependencies = [
|
||||
"arrow-array",
|
||||
"arrow-ipc",
|
||||
"arrow-schema",
|
||||
"async-trait",
|
||||
"aws-lc-sys",
|
||||
"env_logger",
|
||||
"futures",
|
||||
"lancedb",
|
||||
@@ -4245,7 +4279,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lancedb-python"
|
||||
version = "0.22.0-beta.8"
|
||||
version = "0.22.0-beta.11"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"env_logger",
|
||||
@@ -4342,9 +4376,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.171"
|
||||
version = "0.2.172"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c19937216e9d3aa9956d9bb8dfc0b0c8beb6058fc4f7a4dc4d850edf86a237d6"
|
||||
checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa"
|
||||
|
||||
[[package]]
|
||||
name = "libloading"
|
||||
@@ -4368,9 +4402,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "libm"
|
||||
version = "0.2.11"
|
||||
version = "0.2.13"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8355be11b20d696c8f18f6cc018c4e372165b1fa8126cef092399c9951984ffa"
|
||||
checksum = "c9627da5196e5d8ed0b0495e61e518847578da83483c37288316d9b2e03a7f72"
|
||||
|
||||
[[package]]
|
||||
name = "libredox"
|
||||
@@ -5243,7 +5277,7 @@ version = "0.39.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0ea21b858b16b9c0e17a12db2800d11aa5b4bd182be6b3022eb537bbfc1f2db5"
|
||||
dependencies = [
|
||||
"getrandom 0.2.15",
|
||||
"getrandom 0.2.16",
|
||||
"polars-arrow",
|
||||
"polars-core",
|
||||
"polars-error",
|
||||
@@ -5274,7 +5308,7 @@ dependencies = [
|
||||
"ethnum",
|
||||
"fast-float",
|
||||
"foreign_vec",
|
||||
"getrandom 0.2.15",
|
||||
"getrandom 0.2.16",
|
||||
"hashbrown 0.14.5",
|
||||
"itoa",
|
||||
"itoap",
|
||||
@@ -5613,7 +5647,7 @@ version = "0.2.21"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9"
|
||||
dependencies = [
|
||||
"zerocopy 0.8.24",
|
||||
"zerocopy 0.8.25",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -5637,9 +5671,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro2"
|
||||
version = "1.0.94"
|
||||
version = "1.0.95"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a31971752e70b8b2686d7e46ec17fb38dad4051d94024c88df49b667caea9c84"
|
||||
checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778"
|
||||
dependencies = [
|
||||
"unicode-ident",
|
||||
]
|
||||
@@ -5837,13 +5871,13 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "quinn-proto"
|
||||
version = "0.11.10"
|
||||
version = "0.11.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b820744eb4dc9b57a3398183639c511b5a26d2ed702cedd3febaa1393caa22cc"
|
||||
checksum = "bcbafbbdbb0f638fe3f35f3c56739f77a8a1d070cb25603226c83339b391472b"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"getrandom 0.3.2",
|
||||
"rand 0.9.0",
|
||||
"rand 0.9.1",
|
||||
"ring",
|
||||
"rustc-hash 2.1.1",
|
||||
"rustls 0.23.26",
|
||||
@@ -5903,13 +5937,12 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "rand"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3779b94aeb87e8bd4e834cee3650289ee9e0d5677f976ecdb6d219e5f4f6cd94"
|
||||
checksum = "9fbfd9d094a40bf3ae768db9361049ace4c0e04a4fd6b359518bd7b73a73dd97"
|
||||
dependencies = [
|
||||
"rand_chacha 0.9.0",
|
||||
"rand_core 0.9.3",
|
||||
"zerocopy 0.8.24",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -5938,7 +5971,7 @@ version = "0.6.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
|
||||
dependencies = [
|
||||
"getrandom 0.2.15",
|
||||
"getrandom 0.2.16",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -6079,11 +6112,22 @@ version = "0.4.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43"
|
||||
dependencies = [
|
||||
"getrandom 0.2.15",
|
||||
"getrandom 0.2.16",
|
||||
"libredox",
|
||||
"thiserror 1.0.69",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "redox_users"
|
||||
version = "0.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dd6f9d3d47bdd2ad6945c5015a226ec6155d0bcdfd8f7cd29f86b71f8de99d2b"
|
||||
dependencies = [
|
||||
"getrandom 0.2.16",
|
||||
"libredox",
|
||||
"thiserror 2.0.12",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "1.11.1"
|
||||
@@ -6152,7 +6196,7 @@ dependencies = [
|
||||
"encoding_rs",
|
||||
"futures-core",
|
||||
"futures-util",
|
||||
"h2 0.4.8",
|
||||
"h2 0.4.9",
|
||||
"http 1.3.1",
|
||||
"http-body 1.0.1",
|
||||
"http-body-util",
|
||||
@@ -6226,7 +6270,7 @@ checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"cfg-if",
|
||||
"getrandom 0.2.15",
|
||||
"getrandom 0.2.16",
|
||||
"libc",
|
||||
"untrusted",
|
||||
"windows-sys 0.52.0",
|
||||
@@ -6701,11 +6745,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "shellexpand"
|
||||
version = "3.1.0"
|
||||
version = "3.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "da03fa3b94cc19e3ebfc88c4229c49d8f08cdbd1228870a45f0ffdf84988e14b"
|
||||
checksum = "8b1fdf65dd6331831494dd616b30351c38e96e45921a27745cf98490458b90bb"
|
||||
dependencies = [
|
||||
"dirs",
|
||||
"dirs 6.0.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -6716,9 +6760,9 @@ checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64"
|
||||
|
||||
[[package]]
|
||||
name = "signal-hook-registry"
|
||||
version = "1.4.2"
|
||||
version = "1.4.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1"
|
||||
checksum = "9203b8055f63a2a00e2f593bb0510367fe707d7ff1e5c872de2f537b339e5410"
|
||||
dependencies = [
|
||||
"libc",
|
||||
]
|
||||
@@ -7362,7 +7406,7 @@ dependencies = [
|
||||
"aho-corasick",
|
||||
"derive_builder",
|
||||
"esaxx-rs",
|
||||
"getrandom 0.2.15",
|
||||
"getrandom 0.2.16",
|
||||
"indicatif",
|
||||
"itertools 0.12.1",
|
||||
"lazy_static",
|
||||
@@ -7446,9 +7490,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tokio-util"
|
||||
version = "0.7.14"
|
||||
version = "0.7.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6b9590b93e6fcc1739458317cccd391ad3955e2bde8913edf6f95f9e65a8f034"
|
||||
checksum = "66a539a9ad6d5d281510d5bd368c973d636c02dbf8a67300bfb6b950696ad7df"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"futures-core",
|
||||
@@ -8366,9 +8410,9 @@ checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486"
|
||||
|
||||
[[package]]
|
||||
name = "winnow"
|
||||
version = "0.7.6"
|
||||
version = "0.7.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "63d3fcd9bba44b03821e7d699eeee959f3126dcc4aa8e4ae18ec617c2a5cea10"
|
||||
checksum = "6cb8234a863ea0e8cd7284fcdd4f145233eb00fee02bbdd9861aec44e6477bc5"
|
||||
dependencies = [
|
||||
"memchr",
|
||||
]
|
||||
@@ -8450,11 +8494,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "zerocopy"
|
||||
version = "0.8.24"
|
||||
version = "0.8.25"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2586fea28e186957ef732a5f8b3be2da217d65c5969d4b1e17f973ebbe876879"
|
||||
checksum = "a1702d9583232ddb9174e01bb7c15a2ab8fb1bc6f227aa1233858c351a3ba0cb"
|
||||
dependencies = [
|
||||
"zerocopy-derive 0.8.24",
|
||||
"zerocopy-derive 0.8.25",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -8470,9 +8514,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "zerocopy-derive"
|
||||
version = "0.8.24"
|
||||
version = "0.8.25"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a996a8f63c5c4448cd959ac1bab0aaa3306ccfd060472f85943ee0750f0169be"
|
||||
checksum = "28a6e20d751156648aa063f3800b706ee209a32c0b4d9f24be3d980b01be55ef"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
|
||||
18
Cargo.toml
18
Cargo.toml
@@ -21,16 +21,14 @@ categories = ["database-implementations"]
|
||||
rust-version = "1.78.0"
|
||||
|
||||
[workspace.dependencies]
|
||||
lance = { "version" = "=0.26.0", "features" = [
|
||||
"dynamodb",
|
||||
], tag = "v0.26.0-beta.1", git = "https://github.com/lancedb/lance" }
|
||||
lance-io = { version = "=0.26.0", tag = "v0.26.0-beta.1", git = "https://github.com/lancedb/lance" }
|
||||
lance-index = { version = "=0.26.0", tag = "v0.26.0-beta.1", git = "https://github.com/lancedb/lance" }
|
||||
lance-linalg = { version = "=0.26.0", tag = "v0.26.0-beta.1", git = "https://github.com/lancedb/lance" }
|
||||
lance-table = { version = "=0.26.0", tag = "v0.26.0-beta.1", git = "https://github.com/lancedb/lance" }
|
||||
lance-testing = { version = "=0.26.0", tag = "v0.26.0-beta.1", git = "https://github.com/lancedb/lance" }
|
||||
lance-datafusion = { version = "=0.26.0", tag = "v0.26.0-beta.1", git = "https://github.com/lancedb/lance" }
|
||||
lance-encoding = { version = "=0.26.0", tag = "v0.26.0-beta.1", git = "https://github.com/lancedb/lance" }
|
||||
lance = { "version" = "=0.26.0", "features" = ["dynamodb"] }
|
||||
lance-io = "=0.26.0"
|
||||
lance-index = "=0.26.0"
|
||||
lance-linalg = "=0.26.0"
|
||||
lance-table = "=0.26.0"
|
||||
lance-testing = "=0.26.0"
|
||||
lance-datafusion = "=0.26.0"
|
||||
lance-encoding = "=0.26.0"
|
||||
# Note that this one does not include pyarrow
|
||||
arrow = { version = "54.1", optional = false }
|
||||
arrow-array = "54.1"
|
||||
|
||||
@@ -117,8 +117,8 @@ wish to return to standard mode, call `checkoutLatest`.
|
||||
|
||||
#### Parameters
|
||||
|
||||
* **version**: `number`
|
||||
The version to checkout
|
||||
* **version**: `string` \| `number`
|
||||
The version to checkout, could be version number or tag
|
||||
|
||||
#### Returns
|
||||
|
||||
@@ -615,6 +615,34 @@ of the given query
|
||||
|
||||
***
|
||||
|
||||
### tags()
|
||||
|
||||
```ts
|
||||
abstract tags(): Promise<Tags>
|
||||
```
|
||||
|
||||
Get a tags manager for this table.
|
||||
|
||||
Tags allow you to label specific versions of a table with a human-readable name.
|
||||
The returned tags manager can be used to list, create, update, or delete tags.
|
||||
|
||||
#### Returns
|
||||
|
||||
`Promise`<[`Tags`](Tags.md)>
|
||||
|
||||
A tags manager for this table
|
||||
|
||||
#### Example
|
||||
|
||||
```typescript
|
||||
const tagsManager = await table.tags();
|
||||
await tagsManager.create("v1", 1);
|
||||
const tags = await tagsManager.list();
|
||||
console.log(tags); // { "v1": { version: 1, manifestSize: ... } }
|
||||
```
|
||||
|
||||
***
|
||||
|
||||
### toArrow()
|
||||
|
||||
```ts
|
||||
|
||||
35
docs/src/js/classes/TagContents.md
Normal file
35
docs/src/js/classes/TagContents.md
Normal file
@@ -0,0 +1,35 @@
|
||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||
|
||||
***
|
||||
|
||||
[@lancedb/lancedb](../globals.md) / TagContents
|
||||
|
||||
# Class: TagContents
|
||||
|
||||
## Constructors
|
||||
|
||||
### new TagContents()
|
||||
|
||||
```ts
|
||||
new TagContents(): TagContents
|
||||
```
|
||||
|
||||
#### Returns
|
||||
|
||||
[`TagContents`](TagContents.md)
|
||||
|
||||
## Properties
|
||||
|
||||
### manifestSize
|
||||
|
||||
```ts
|
||||
manifestSize: number;
|
||||
```
|
||||
|
||||
***
|
||||
|
||||
### version
|
||||
|
||||
```ts
|
||||
version: number;
|
||||
```
|
||||
99
docs/src/js/classes/Tags.md
Normal file
99
docs/src/js/classes/Tags.md
Normal file
@@ -0,0 +1,99 @@
|
||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||
|
||||
***
|
||||
|
||||
[@lancedb/lancedb](../globals.md) / Tags
|
||||
|
||||
# Class: Tags
|
||||
|
||||
## Constructors
|
||||
|
||||
### new Tags()
|
||||
|
||||
```ts
|
||||
new Tags(): Tags
|
||||
```
|
||||
|
||||
#### Returns
|
||||
|
||||
[`Tags`](Tags.md)
|
||||
|
||||
## Methods
|
||||
|
||||
### create()
|
||||
|
||||
```ts
|
||||
create(tag, version): Promise<void>
|
||||
```
|
||||
|
||||
#### Parameters
|
||||
|
||||
* **tag**: `string`
|
||||
|
||||
* **version**: `number`
|
||||
|
||||
#### Returns
|
||||
|
||||
`Promise`<`void`>
|
||||
|
||||
***
|
||||
|
||||
### delete()
|
||||
|
||||
```ts
|
||||
delete(tag): Promise<void>
|
||||
```
|
||||
|
||||
#### Parameters
|
||||
|
||||
* **tag**: `string`
|
||||
|
||||
#### Returns
|
||||
|
||||
`Promise`<`void`>
|
||||
|
||||
***
|
||||
|
||||
### getVersion()
|
||||
|
||||
```ts
|
||||
getVersion(tag): Promise<number>
|
||||
```
|
||||
|
||||
#### Parameters
|
||||
|
||||
* **tag**: `string`
|
||||
|
||||
#### Returns
|
||||
|
||||
`Promise`<`number`>
|
||||
|
||||
***
|
||||
|
||||
### list()
|
||||
|
||||
```ts
|
||||
list(): Promise<Record<string, TagContents>>
|
||||
```
|
||||
|
||||
#### Returns
|
||||
|
||||
`Promise`<`Record`<`string`, [`TagContents`](TagContents.md)>>
|
||||
|
||||
***
|
||||
|
||||
### update()
|
||||
|
||||
```ts
|
||||
update(tag, version): Promise<void>
|
||||
```
|
||||
|
||||
#### Parameters
|
||||
|
||||
* **tag**: `string`
|
||||
|
||||
* **version**: `number`
|
||||
|
||||
#### Returns
|
||||
|
||||
`Promise`<`void`>
|
||||
@@ -27,6 +27,8 @@
|
||||
- [QueryBase](classes/QueryBase.md)
|
||||
- [RecordBatchIterator](classes/RecordBatchIterator.md)
|
||||
- [Table](classes/Table.md)
|
||||
- [TagContents](classes/TagContents.md)
|
||||
- [Tags](classes/Tags.md)
|
||||
- [VectorColumnOptions](classes/VectorColumnOptions.md)
|
||||
- [VectorQuery](classes/VectorQuery.md)
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
<parent>
|
||||
<groupId>com.lancedb</groupId>
|
||||
<artifactId>lancedb-parent</artifactId>
|
||||
<version>0.19.0-beta.8</version>
|
||||
<version>0.19.0-final.0</version>
|
||||
<relativePath>../pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
<groupId>com.lancedb</groupId>
|
||||
<artifactId>lancedb-parent</artifactId>
|
||||
<version>0.19.0-beta.8</version>
|
||||
<version>0.19.0-final.0</version>
|
||||
<packaging>pom</packaging>
|
||||
|
||||
<name>LanceDB Parent</name>
|
||||
|
||||
44
node/package-lock.json
generated
44
node/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "vectordb",
|
||||
"version": "0.19.0-beta.8",
|
||||
"version": "0.19.0",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "vectordb",
|
||||
"version": "0.19.0-beta.8",
|
||||
"version": "0.19.0",
|
||||
"cpu": [
|
||||
"x64",
|
||||
"arm64"
|
||||
@@ -52,11 +52,11 @@
|
||||
"uuid": "^9.0.0"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"@lancedb/vectordb-darwin-arm64": "0.19.0-beta.8",
|
||||
"@lancedb/vectordb-darwin-x64": "0.19.0-beta.8",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.19.0-beta.8",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.19.0-beta.8",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.19.0-beta.8"
|
||||
"@lancedb/vectordb-darwin-arm64": "0.19.0",
|
||||
"@lancedb/vectordb-darwin-x64": "0.19.0",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.19.0",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.19.0",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.19.0"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@apache-arrow/ts": "^14.0.2",
|
||||
@@ -327,9 +327,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-darwin-arm64": {
|
||||
"version": "0.19.0-beta.8",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-arm64/-/vectordb-darwin-arm64-0.19.0-beta.8.tgz",
|
||||
"integrity": "sha512-zNKTlHemHUyU3+WtIQ029tZSl5C5hXWvwI073kfKuYOWGSRZeOcrU8WAuS9b17nfFD40X28YUD5qPB10GbMrNQ==",
|
||||
"version": "0.19.0",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-arm64/-/vectordb-darwin-arm64-0.19.0.tgz",
|
||||
"integrity": "sha512-cR04V8azbrEfJ3FX5WJjwvkmKySI+dS4laBWqtXaMyLDSX034E3P3Ve8jKfYdP4NaBSGlGZlySpGawEEBLH92A==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
@@ -340,9 +340,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-darwin-x64": {
|
||||
"version": "0.19.0-beta.8",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-x64/-/vectordb-darwin-x64-0.19.0-beta.8.tgz",
|
||||
"integrity": "sha512-OdnduXdX5ZTZd2s+5wW5gssDYQKwEfUKxjOWOjjLS8SQeTlPM6pI0z9QP9K1sipbTYpYoCgokr5+PKKhvMPezw==",
|
||||
"version": "0.19.0",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-x64/-/vectordb-darwin-x64-0.19.0.tgz",
|
||||
"integrity": "sha512-qDrui0LR4f2QqFovDx8VcbVY5So5gi0HgHWeh6kypl4R4SS+pYfW3jTPVDz1YpxxlB9GHACM5qBdul6KFpnoug==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
@@ -353,9 +353,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-linux-arm64-gnu": {
|
||||
"version": "0.19.0-beta.8",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-arm64-gnu/-/vectordb-linux-arm64-gnu-0.19.0-beta.8.tgz",
|
||||
"integrity": "sha512-9Y52zhZYFbgCJA3Vxj8EFnZ8lVuvqAJNapQPo7bH56ZgnEcAnWikk8yWwT63PtI22T6XOcj1hWWYfWKrUXMggg==",
|
||||
"version": "0.19.0",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-arm64-gnu/-/vectordb-linux-arm64-gnu-0.19.0.tgz",
|
||||
"integrity": "sha512-peoq/Mh9ml2h6xSngbfVt0yeuIO3ln4/dG9mfubXPJyNlM7tANzD+IY0Xs+B03m+fXbJ7LFZ8de4aIP9pWh4iQ==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
@@ -366,9 +366,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-linux-x64-gnu": {
|
||||
"version": "0.19.0-beta.8",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-x64-gnu/-/vectordb-linux-x64-gnu-0.19.0-beta.8.tgz",
|
||||
"integrity": "sha512-e0H+gSkvMGYx2DPcriXwwkALvZtmbWNtdpMAZceS8qHYv7xMtUPXG86od5vTbhKTrnC2hJLVj5E3JcAs8sJn6w==",
|
||||
"version": "0.19.0",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-x64-gnu/-/vectordb-linux-x64-gnu-0.19.0.tgz",
|
||||
"integrity": "sha512-MUsOXk+InI0ywuygcHvYG8+awrJUnsbrUstTPETN2+QAV7QOX+TlafupLUUrfp1/pUOPt/ZraHEaqFRw1Vdxqg==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
@@ -379,9 +379,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-win32-x64-msvc": {
|
||||
"version": "0.19.0-beta.8",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-win32-x64-msvc/-/vectordb-win32-x64-msvc-0.19.0-beta.8.tgz",
|
||||
"integrity": "sha512-olQKVpoWKJWOuVsFM92hmtHYFpCtITiKhUQ8gZu7ngrgLe7ofAASyqvWp5THV2zSXpwYITqrYjHOrtLy1/I9Jw==",
|
||||
"version": "0.19.0",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-win32-x64-msvc/-/vectordb-win32-x64-msvc-0.19.0.tgz",
|
||||
"integrity": "sha512-stk3uqMAbHxTodmzqMPKUl54GBfVKNDMR3EIo3d299QcXyOdSuEeHgeZa+iy0hHeIFL0TqHi4o8tStNzFLBAHg==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "vectordb",
|
||||
"version": "0.19.0-beta.8",
|
||||
"version": "0.19.0",
|
||||
"description": " Serverless, low-latency vector database for AI applications",
|
||||
"private": false,
|
||||
"main": "dist/index.js",
|
||||
@@ -89,10 +89,10 @@
|
||||
}
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"@lancedb/vectordb-darwin-x64": "0.19.0-beta.8",
|
||||
"@lancedb/vectordb-darwin-arm64": "0.19.0-beta.8",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.19.0-beta.8",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.19.0-beta.8",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.19.0-beta.8"
|
||||
"@lancedb/vectordb-darwin-x64": "0.19.0",
|
||||
"@lancedb/vectordb-darwin-arm64": "0.19.0",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.19.0",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.19.0",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.19.0"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "lancedb-nodejs"
|
||||
edition.workspace = true
|
||||
version = "0.19.0-beta.8"
|
||||
version = "0.19.0"
|
||||
license.workspace = true
|
||||
description.workspace = true
|
||||
repository.workspace = true
|
||||
@@ -28,6 +28,9 @@ napi-derive = "2.16.4"
|
||||
lzma-sys = { version = "*", features = ["static"] }
|
||||
log.workspace = true
|
||||
|
||||
# Workaround for build failure until we can fix it.
|
||||
aws-lc-sys = "=0.28.0"
|
||||
|
||||
[build-dependencies]
|
||||
napi-build = "2.1"
|
||||
|
||||
|
||||
@@ -1178,6 +1178,73 @@ describe("when dealing with versioning", () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe("when dealing with tags", () => {
|
||||
let tmpDir: tmp.DirResult;
|
||||
beforeEach(() => {
|
||||
tmpDir = tmp.dirSync({ unsafeCleanup: true });
|
||||
});
|
||||
afterEach(() => {
|
||||
tmpDir.removeCallback();
|
||||
});
|
||||
|
||||
it("can manage tags", async () => {
|
||||
const conn = await connect(tmpDir.name, {
|
||||
readConsistencyInterval: 0,
|
||||
});
|
||||
|
||||
const table = await conn.createTable("my_table", [
|
||||
{ id: 1n, vector: [0.1, 0.2] },
|
||||
]);
|
||||
expect(await table.version()).toBe(1);
|
||||
|
||||
await table.add([{ id: 2n, vector: [0.3, 0.4] }]);
|
||||
expect(await table.version()).toBe(2);
|
||||
|
||||
const tagsManager = await table.tags();
|
||||
|
||||
const initialTags = await tagsManager.list();
|
||||
expect(Object.keys(initialTags).length).toBe(0);
|
||||
|
||||
const tag1 = "tag1";
|
||||
await tagsManager.create(tag1, 1);
|
||||
expect(await tagsManager.getVersion(tag1)).toBe(1);
|
||||
|
||||
const tagsAfterFirst = await tagsManager.list();
|
||||
expect(Object.keys(tagsAfterFirst).length).toBe(1);
|
||||
expect(tagsAfterFirst).toHaveProperty(tag1);
|
||||
expect(tagsAfterFirst[tag1].version).toBe(1);
|
||||
|
||||
await tagsManager.create("tag2", 2);
|
||||
expect(await tagsManager.getVersion("tag2")).toBe(2);
|
||||
|
||||
const tagsAfterSecond = await tagsManager.list();
|
||||
expect(Object.keys(tagsAfterSecond).length).toBe(2);
|
||||
expect(tagsAfterSecond).toHaveProperty(tag1);
|
||||
expect(tagsAfterSecond[tag1].version).toBe(1);
|
||||
expect(tagsAfterSecond).toHaveProperty("tag2");
|
||||
expect(tagsAfterSecond["tag2"].version).toBe(2);
|
||||
|
||||
await table.add([{ id: 3n, vector: [0.5, 0.6] }]);
|
||||
await tagsManager.update(tag1, 3);
|
||||
expect(await tagsManager.getVersion(tag1)).toBe(3);
|
||||
|
||||
await tagsManager.delete("tag2");
|
||||
const tagsAfterDelete = await tagsManager.list();
|
||||
expect(Object.keys(tagsAfterDelete).length).toBe(1);
|
||||
expect(tagsAfterDelete).toHaveProperty(tag1);
|
||||
expect(tagsAfterDelete[tag1].version).toBe(3);
|
||||
|
||||
await table.add([{ id: 4n, vector: [0.7, 0.8] }]);
|
||||
expect(await table.version()).toBe(4);
|
||||
|
||||
await table.checkout(tag1);
|
||||
expect(await table.version()).toBe(3);
|
||||
|
||||
await table.checkoutLatest();
|
||||
expect(await table.version()).toBe(4);
|
||||
});
|
||||
});
|
||||
|
||||
describe("when optimizing a dataset", () => {
|
||||
let tmpDir: tmp.DirResult;
|
||||
let table: Table;
|
||||
|
||||
@@ -23,6 +23,8 @@ export {
|
||||
OptimizeStats,
|
||||
CompactionStats,
|
||||
RemovalStats,
|
||||
Tags,
|
||||
TagContents,
|
||||
} from "./native.js";
|
||||
|
||||
export {
|
||||
|
||||
@@ -20,6 +20,7 @@ import {
|
||||
IndexConfig,
|
||||
IndexStatistics,
|
||||
OptimizeStats,
|
||||
Tags,
|
||||
Table as _NativeTable,
|
||||
} from "./native";
|
||||
import {
|
||||
@@ -374,7 +375,7 @@ export abstract class Table {
|
||||
*
|
||||
* Calling this method will set the table into time-travel mode. If you
|
||||
* wish to return to standard mode, call `checkoutLatest`.
|
||||
* @param {number} version The version to checkout
|
||||
* @param {number | string} version The version to checkout, could be version number or tag
|
||||
* @example
|
||||
* ```typescript
|
||||
* import * as lancedb from "@lancedb/lancedb"
|
||||
@@ -390,7 +391,8 @@ export abstract class Table {
|
||||
* console.log(await table.version()); // 2
|
||||
* ```
|
||||
*/
|
||||
abstract checkout(version: number): Promise<void>;
|
||||
abstract checkout(version: number | string): Promise<void>;
|
||||
|
||||
/**
|
||||
* Checkout the latest version of the table. _This is an in-place operation._
|
||||
*
|
||||
@@ -404,6 +406,23 @@ export abstract class Table {
|
||||
*/
|
||||
abstract listVersions(): Promise<Version[]>;
|
||||
|
||||
/**
|
||||
* Get a tags manager for this table.
|
||||
*
|
||||
* Tags allow you to label specific versions of a table with a human-readable name.
|
||||
* The returned tags manager can be used to list, create, update, or delete tags.
|
||||
*
|
||||
* @returns {Tags} A tags manager for this table
|
||||
* @example
|
||||
* ```typescript
|
||||
* const tagsManager = await table.tags();
|
||||
* await tagsManager.create("v1", 1);
|
||||
* const tags = await tagsManager.list();
|
||||
* console.log(tags); // { "v1": { version: 1, manifestSize: ... } }
|
||||
* ```
|
||||
*/
|
||||
abstract tags(): Promise<Tags>;
|
||||
|
||||
/**
|
||||
* Restore the table to the currently checked out version
|
||||
*
|
||||
@@ -699,8 +718,11 @@ export class LocalTable extends Table {
|
||||
return await this.inner.version();
|
||||
}
|
||||
|
||||
async checkout(version: number): Promise<void> {
|
||||
await this.inner.checkout(version);
|
||||
async checkout(version: number | string): Promise<void> {
|
||||
if (typeof version === "string") {
|
||||
return this.inner.checkoutTag(version);
|
||||
}
|
||||
return this.inner.checkout(version);
|
||||
}
|
||||
|
||||
async checkoutLatest(): Promise<void> {
|
||||
@@ -719,6 +741,10 @@ export class LocalTable extends Table {
|
||||
await this.inner.restore();
|
||||
}
|
||||
|
||||
async tags(): Promise<Tags> {
|
||||
return await this.inner.tags();
|
||||
}
|
||||
|
||||
async optimize(options?: Partial<OptimizeOptions>): Promise<OptimizeStats> {
|
||||
let cleanupOlderThanMs;
|
||||
if (
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-darwin-arm64",
|
||||
"version": "0.19.0-beta.8",
|
||||
"version": "0.19.0",
|
||||
"os": ["darwin"],
|
||||
"cpu": ["arm64"],
|
||||
"main": "lancedb.darwin-arm64.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-darwin-x64",
|
||||
"version": "0.19.0-beta.8",
|
||||
"version": "0.19.0",
|
||||
"os": ["darwin"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.darwin-x64.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-arm64-gnu",
|
||||
"version": "0.19.0-beta.8",
|
||||
"version": "0.19.0",
|
||||
"os": ["linux"],
|
||||
"cpu": ["arm64"],
|
||||
"main": "lancedb.linux-arm64-gnu.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-arm64-musl",
|
||||
"version": "0.19.0-beta.8",
|
||||
"version": "0.19.0",
|
||||
"os": ["linux"],
|
||||
"cpu": ["arm64"],
|
||||
"main": "lancedb.linux-arm64-musl.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-x64-gnu",
|
||||
"version": "0.19.0-beta.8",
|
||||
"version": "0.19.0",
|
||||
"os": ["linux"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.linux-x64-gnu.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-x64-musl",
|
||||
"version": "0.19.0-beta.8",
|
||||
"version": "0.19.0",
|
||||
"os": ["linux"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.linux-x64-musl.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-win32-arm64-msvc",
|
||||
"version": "0.19.0-beta.8",
|
||||
"version": "0.19.0",
|
||||
"os": [
|
||||
"win32"
|
||||
],
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-win32-x64-msvc",
|
||||
"version": "0.19.0-beta.8",
|
||||
"version": "0.19.0",
|
||||
"os": ["win32"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.win32-x64-msvc.node",
|
||||
|
||||
4
nodejs/package-lock.json
generated
4
nodejs/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb",
|
||||
"version": "0.19.0-beta.8",
|
||||
"version": "0.19.0",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@lancedb/lancedb",
|
||||
"version": "0.19.0-beta.8",
|
||||
"version": "0.19.0",
|
||||
"cpu": [
|
||||
"x64",
|
||||
"arm64"
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
"ann"
|
||||
],
|
||||
"private": false,
|
||||
"version": "0.19.0-beta.8",
|
||||
"version": "0.19.0",
|
||||
"main": "dist/index.js",
|
||||
"exports": {
|
||||
".": "./dist/index.js",
|
||||
|
||||
@@ -249,6 +249,14 @@ impl Table {
|
||||
.default_error()
|
||||
}
|
||||
|
||||
#[napi(catch_unwind)]
|
||||
pub async fn checkout_tag(&self, tag: String) -> napi::Result<()> {
|
||||
self.inner_ref()?
|
||||
.checkout_tag(tag.as_str())
|
||||
.await
|
||||
.default_error()
|
||||
}
|
||||
|
||||
#[napi(catch_unwind)]
|
||||
pub async fn checkout_latest(&self) -> napi::Result<()> {
|
||||
self.inner_ref()?.checkout_latest().await.default_error()
|
||||
@@ -281,6 +289,13 @@ impl Table {
|
||||
self.inner_ref()?.restore().await.default_error()
|
||||
}
|
||||
|
||||
#[napi(catch_unwind)]
|
||||
pub async fn tags(&self) -> napi::Result<Tags> {
|
||||
Ok(Tags {
|
||||
inner: self.inner_ref()?.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
#[napi(catch_unwind)]
|
||||
pub async fn optimize(
|
||||
&self,
|
||||
@@ -546,3 +561,78 @@ pub struct Version {
|
||||
pub timestamp: i64,
|
||||
pub metadata: HashMap<String, String>,
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub struct TagContents {
|
||||
pub version: i64,
|
||||
pub manifest_size: i64,
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub struct Tags {
|
||||
inner: LanceDbTable,
|
||||
}
|
||||
|
||||
#[napi]
|
||||
impl Tags {
|
||||
#[napi]
|
||||
pub async fn list(&self) -> napi::Result<HashMap<String, TagContents>> {
|
||||
let rust_tags = self.inner.tags().await.default_error()?;
|
||||
let tag_list = rust_tags.as_ref().list().await.default_error()?;
|
||||
let tag_contents = tag_list
|
||||
.into_iter()
|
||||
.map(|(k, v)| {
|
||||
(
|
||||
k,
|
||||
TagContents {
|
||||
version: v.version as i64,
|
||||
manifest_size: v.manifest_size as i64,
|
||||
},
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
Ok(tag_contents)
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async fn get_version(&self, tag: String) -> napi::Result<i64> {
|
||||
let rust_tags = self.inner.tags().await.default_error()?;
|
||||
rust_tags
|
||||
.as_ref()
|
||||
.get_version(tag.as_str())
|
||||
.await
|
||||
.map(|v| v as i64)
|
||||
.default_error()
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async unsafe fn create(&mut self, tag: String, version: i64) -> napi::Result<()> {
|
||||
let mut rust_tags = self.inner.tags().await.default_error()?;
|
||||
rust_tags
|
||||
.as_mut()
|
||||
.create(tag.as_str(), version as u64)
|
||||
.await
|
||||
.default_error()
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async unsafe fn delete(&mut self, tag: String) -> napi::Result<()> {
|
||||
let mut rust_tags = self.inner.tags().await.default_error()?;
|
||||
rust_tags
|
||||
.as_mut()
|
||||
.delete(tag.as_str())
|
||||
.await
|
||||
.default_error()
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async unsafe fn update(&mut self, tag: String, version: i64) -> napi::Result<()> {
|
||||
let mut rust_tags = self.inner.tags().await.default_error()?;
|
||||
rust_tags
|
||||
.as_mut()
|
||||
.update(tag.as_str(), version as u64)
|
||||
.await
|
||||
.default_error()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
[tool.bumpversion]
|
||||
current_version = "0.22.0-beta.9"
|
||||
current_version = "0.22.1-beta.0"
|
||||
parse = """(?x)
|
||||
(?P<major>0|[1-9]\\d*)\\.
|
||||
(?P<minor>0|[1-9]\\d*)\\.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "lancedb-python"
|
||||
version = "0.22.0-beta.9"
|
||||
version = "0.22.1-beta.0"
|
||||
edition.workspace = true
|
||||
description = "Python bindings for LanceDB"
|
||||
license.workspace = true
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from datetime import timedelta
|
||||
from typing import Dict, List, Optional, Tuple, Any, Union, Literal
|
||||
from typing import Dict, List, Optional, Tuple, Any, TypedDict, Union, Literal
|
||||
|
||||
import pyarrow as pa
|
||||
|
||||
@@ -47,7 +47,7 @@ class Table:
|
||||
): ...
|
||||
async def list_versions(self) -> List[Dict[str, Any]]: ...
|
||||
async def version(self) -> int: ...
|
||||
async def checkout(self, version: int): ...
|
||||
async def checkout(self, version: Union[int, str]): ...
|
||||
async def checkout_latest(self): ...
|
||||
async def restore(self, version: Optional[int] = None): ...
|
||||
async def list_indices(self) -> list[IndexConfig]: ...
|
||||
@@ -61,9 +61,18 @@ class Table:
|
||||
cleanup_since_ms: Optional[int] = None,
|
||||
delete_unverified: Optional[bool] = None,
|
||||
) -> OptimizeStats: ...
|
||||
@property
|
||||
def tags(self) -> Tags: ...
|
||||
def query(self) -> Query: ...
|
||||
def vector_search(self) -> VectorQuery: ...
|
||||
|
||||
class Tags:
|
||||
async def list(self) -> Dict[str, Tag]: ...
|
||||
async def get_version(self, tag: str) -> int: ...
|
||||
async def create(self, tag: str, version: int): ...
|
||||
async def delete(self, tag: str): ...
|
||||
async def update(self, tag: str, version: int): ...
|
||||
|
||||
class IndexConfig:
|
||||
index_type: str
|
||||
columns: List[str]
|
||||
@@ -195,3 +204,7 @@ class RemovalStats:
|
||||
class OptimizeStats:
|
||||
compaction: CompactionStats
|
||||
prune: RemovalStats
|
||||
|
||||
class Tag(TypedDict):
|
||||
version: int
|
||||
manifest_size: int
|
||||
|
||||
@@ -28,6 +28,8 @@ import pyarrow.compute as pc
|
||||
import pyarrow.fs as pa_fs
|
||||
import pydantic
|
||||
|
||||
from lancedb.pydantic import PYDANTIC_VERSION
|
||||
|
||||
from . import __version__
|
||||
from .arrow import AsyncRecordBatchReader
|
||||
from .dependencies import pandas as pd
|
||||
@@ -498,10 +500,14 @@ class Query(pydantic.BaseModel):
|
||||
)
|
||||
return query
|
||||
|
||||
class Config:
|
||||
# This tells pydantic to allow custom types (needed for the `vector` query since
|
||||
# pa.Array wouln't be allowed otherwise)
|
||||
arbitrary_types_allowed = True
|
||||
# This tells pydantic to allow custom types (needed for the `vector` query since
|
||||
# pa.Array wouln't be allowed otherwise)
|
||||
if PYDANTIC_VERSION.major < 2: # Pydantic 1.x compat
|
||||
|
||||
class Config:
|
||||
arbitrary_types_allowed = True
|
||||
else:
|
||||
model_config = {"arbitrary_types_allowed": True}
|
||||
|
||||
|
||||
class LanceQueryBuilder(ABC):
|
||||
@@ -1586,6 +1592,8 @@ class LanceHybridQueryBuilder(LanceQueryBuilder):
|
||||
self._refine_factor = None
|
||||
self._distance_type = None
|
||||
self._phrase_query = None
|
||||
self._lower_bound = None
|
||||
self._upper_bound = None
|
||||
|
||||
def _validate_query(self, query, vector=None, text=None):
|
||||
if query is not None and (vector is not None or text is not None):
|
||||
@@ -1628,47 +1636,7 @@ class LanceHybridQueryBuilder(LanceQueryBuilder):
|
||||
raise NotImplementedError("to_query_object not yet supported on a hybrid query")
|
||||
|
||||
def to_arrow(self, *, timeout: Optional[timedelta] = None) -> pa.Table:
|
||||
vector_query, fts_query = self._validate_query(
|
||||
self._query, self._vector, self._text
|
||||
)
|
||||
self._fts_query = LanceFtsQueryBuilder(
|
||||
self._table, fts_query, fts_columns=self._fts_columns
|
||||
)
|
||||
vector_query = self._query_to_vector(
|
||||
self._table, vector_query, self._vector_column
|
||||
)
|
||||
self._vector_query = LanceVectorQueryBuilder(
|
||||
self._table, vector_query, self._vector_column
|
||||
)
|
||||
|
||||
if self._limit:
|
||||
self._vector_query.limit(self._limit)
|
||||
self._fts_query.limit(self._limit)
|
||||
if self._columns:
|
||||
self._vector_query.select(self._columns)
|
||||
self._fts_query.select(self._columns)
|
||||
if self._where:
|
||||
self._vector_query.where(self._where, self._postfilter)
|
||||
self._fts_query.where(self._where, self._postfilter)
|
||||
if self._with_row_id:
|
||||
self._vector_query.with_row_id(True)
|
||||
self._fts_query.with_row_id(True)
|
||||
if self._phrase_query:
|
||||
self._fts_query.phrase_query(True)
|
||||
if self._distance_type:
|
||||
self._vector_query.metric(self._distance_type)
|
||||
if self._nprobes:
|
||||
self._vector_query.nprobes(self._nprobes)
|
||||
if self._refine_factor:
|
||||
self._vector_query.refine_factor(self._refine_factor)
|
||||
if self._ef:
|
||||
self._vector_query.ef(self._ef)
|
||||
if self._bypass_vector_index:
|
||||
self._vector_query.bypass_vector_index()
|
||||
|
||||
if self._reranker is None:
|
||||
self._reranker = RRFReranker()
|
||||
|
||||
self._create_query_builders()
|
||||
with ThreadPoolExecutor() as executor:
|
||||
fts_future = executor.submit(
|
||||
self._fts_query.with_row_id(True).to_arrow, timeout=timeout
|
||||
@@ -1991,6 +1959,112 @@ class LanceHybridQueryBuilder(LanceQueryBuilder):
|
||||
self._bypass_vector_index = True
|
||||
return self
|
||||
|
||||
def explain_plan(self, verbose: Optional[bool] = False) -> str:
|
||||
"""Return the execution plan for this query.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import lancedb
|
||||
>>> db = lancedb.connect("./.lancedb")
|
||||
>>> table = db.create_table("my_table", [{"vector": [99.0, 99]}])
|
||||
>>> query = [100, 100]
|
||||
>>> plan = table.search(query).explain_plan(True)
|
||||
>>> print(plan) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
|
||||
ProjectionExec: expr=[vector@0 as vector, _distance@2 as _distance]
|
||||
GlobalLimitExec: skip=0, fetch=10
|
||||
FilterExec: _distance@2 IS NOT NULL
|
||||
SortExec: TopK(fetch=10), expr=[_distance@2 ASC NULLS LAST], preserve_partitioning=[false]
|
||||
KNNVectorDistance: metric=l2
|
||||
LanceScan: uri=..., projection=[vector], row_id=true, row_addr=false, ordered=false
|
||||
|
||||
Parameters
|
||||
----------
|
||||
verbose : bool, default False
|
||||
Use a verbose output format.
|
||||
|
||||
Returns
|
||||
-------
|
||||
plan : str
|
||||
""" # noqa: E501
|
||||
self._create_query_builders()
|
||||
|
||||
results = ["Vector Search Plan:"]
|
||||
results.append(
|
||||
self._table._explain_plan(
|
||||
self._vector_query.to_query_object(), verbose=verbose
|
||||
)
|
||||
)
|
||||
results.append("FTS Search Plan:")
|
||||
results.append(
|
||||
self._table._explain_plan(
|
||||
self._fts_query.to_query_object(), verbose=verbose
|
||||
)
|
||||
)
|
||||
return "\n".join(results)
|
||||
|
||||
def analyze_plan(self):
|
||||
"""Execute the query and display with runtime metrics.
|
||||
|
||||
Returns
|
||||
-------
|
||||
plan : str
|
||||
"""
|
||||
self._create_query_builders()
|
||||
|
||||
results = ["Vector Search Plan:"]
|
||||
results.append(self._table._analyze_plan(self._vector_query.to_query_object()))
|
||||
results.append("FTS Search Plan:")
|
||||
results.append(self._table._analyze_plan(self._fts_query.to_query_object()))
|
||||
return "\n".join(results)
|
||||
|
||||
def _create_query_builders(self):
|
||||
"""Set up and configure the vector and FTS query builders."""
|
||||
vector_query, fts_query = self._validate_query(
|
||||
self._query, self._vector, self._text
|
||||
)
|
||||
self._fts_query = LanceFtsQueryBuilder(
|
||||
self._table, fts_query, fts_columns=self._fts_columns
|
||||
)
|
||||
vector_query = self._query_to_vector(
|
||||
self._table, vector_query, self._vector_column
|
||||
)
|
||||
self._vector_query = LanceVectorQueryBuilder(
|
||||
self._table, vector_query, self._vector_column
|
||||
)
|
||||
|
||||
# Apply common configurations
|
||||
if self._limit:
|
||||
self._vector_query.limit(self._limit)
|
||||
self._fts_query.limit(self._limit)
|
||||
if self._columns:
|
||||
self._vector_query.select(self._columns)
|
||||
self._fts_query.select(self._columns)
|
||||
if self._where:
|
||||
self._vector_query.where(self._where, self._postfilter)
|
||||
self._fts_query.where(self._where, self._postfilter)
|
||||
if self._with_row_id:
|
||||
self._vector_query.with_row_id(True)
|
||||
self._fts_query.with_row_id(True)
|
||||
if self._phrase_query:
|
||||
self._fts_query.phrase_query(True)
|
||||
if self._distance_type:
|
||||
self._vector_query.metric(self._distance_type)
|
||||
if self._nprobes:
|
||||
self._vector_query.nprobes(self._nprobes)
|
||||
if self._refine_factor:
|
||||
self._vector_query.refine_factor(self._refine_factor)
|
||||
if self._ef:
|
||||
self._vector_query.ef(self._ef)
|
||||
if self._bypass_vector_index:
|
||||
self._vector_query.bypass_vector_index()
|
||||
if self._lower_bound or self._upper_bound:
|
||||
self._vector_query.distance_range(
|
||||
lower_bound=self._lower_bound, upper_bound=self._upper_bound
|
||||
)
|
||||
|
||||
if self._reranker is None:
|
||||
self._reranker = RRFReranker()
|
||||
|
||||
|
||||
class AsyncQueryBase(object):
|
||||
def __init__(self, inner: Union[LanceQuery, LanceVectorQuery]):
|
||||
|
||||
@@ -18,7 +18,7 @@ from lancedb.merge import LanceMergeInsertBuilder
|
||||
from lancedb.embeddings import EmbeddingFunctionRegistry
|
||||
|
||||
from ..query import LanceVectorQueryBuilder, LanceQueryBuilder
|
||||
from ..table import AsyncTable, IndexStatistics, Query, Table
|
||||
from ..table import AsyncTable, IndexStatistics, Query, Table, Tags
|
||||
|
||||
|
||||
class RemoteTable(Table):
|
||||
@@ -54,6 +54,10 @@ class RemoteTable(Table):
|
||||
"""Get the current version of the table"""
|
||||
return LOOP.run(self._table.version())
|
||||
|
||||
@property
|
||||
def tags(self) -> Tags:
|
||||
return Tags(self._table)
|
||||
|
||||
@cached_property
|
||||
def embedding_functions(self) -> Dict[str, EmbeddingFunctionConfig]:
|
||||
"""
|
||||
@@ -81,7 +85,7 @@ class RemoteTable(Table):
|
||||
"""to_pandas() is not yet supported on LanceDB cloud."""
|
||||
return NotImplementedError("to_pandas() is not yet supported on LanceDB cloud.")
|
||||
|
||||
def checkout(self, version: int):
|
||||
def checkout(self, version: Union[int, str]):
|
||||
return LOOP.run(self._table.checkout(version))
|
||||
|
||||
def checkout_latest(self):
|
||||
|
||||
@@ -77,6 +77,7 @@ if TYPE_CHECKING:
|
||||
OptimizeStats,
|
||||
CleanupStats,
|
||||
CompactionStats,
|
||||
Tag,
|
||||
)
|
||||
from .db import LanceDBConnection
|
||||
from .index import IndexConfig
|
||||
@@ -582,6 +583,35 @@ class Table(ABC):
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def tags(self) -> Tags:
|
||||
"""Tag management for the table.
|
||||
|
||||
Similar to Git, tags are a way to add metadata to a specific version of the
|
||||
table.
|
||||
|
||||
.. warning::
|
||||
|
||||
Tagged versions are exempted from the :py:meth:`cleanup_old_versions()`
|
||||
process.
|
||||
|
||||
To remove a version that has been tagged, you must first
|
||||
:py:meth:`~Tags.delete` the associated tag.
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
table = db.open_table("my_table")
|
||||
table.tags.create("v2-prod-20250203", 10)
|
||||
|
||||
tags = table.tags.list()
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def embedding_functions(self) -> Dict[str, EmbeddingFunctionConfig]:
|
||||
@@ -1354,7 +1384,7 @@ class Table(ABC):
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def checkout(self, version: int):
|
||||
def checkout(self, version: Union[int, str]):
|
||||
"""
|
||||
Checks out a specific version of the Table
|
||||
|
||||
@@ -1369,6 +1399,12 @@ class Table(ABC):
|
||||
Any operation that modifies the table will fail while the table is in a checked
|
||||
out state.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
version: int | str,
|
||||
The version to check out. A version number (`int`) or a tag
|
||||
(`str`) can be provided.
|
||||
|
||||
To return the table to a normal state use `[Self::checkout_latest]`
|
||||
"""
|
||||
|
||||
@@ -1538,7 +1574,45 @@ class LanceTable(Table):
|
||||
"""Get the current version of the table"""
|
||||
return LOOP.run(self._table.version())
|
||||
|
||||
def checkout(self, version: int):
|
||||
@property
|
||||
def tags(self) -> Tags:
|
||||
"""Tag management for the table.
|
||||
|
||||
Similar to Git, tags are a way to add metadata to a specific version of the
|
||||
table.
|
||||
|
||||
.. warning::
|
||||
|
||||
Tagged versions are exempted from the :py:meth:`cleanup_old_versions()`
|
||||
process.
|
||||
|
||||
To remove a version that has been tagged, you must first
|
||||
:py:meth:`~Tags.delete` the associated tag.
|
||||
|
||||
Returns
|
||||
-------
|
||||
Tags
|
||||
The tag manager for managing tags for the table.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import lancedb
|
||||
>>> db = lancedb.connect("./.lancedb")
|
||||
>>> table = db.create_table("my_table",
|
||||
... [{"vector": [1.1, 0.9], "type": "vector"}])
|
||||
>>> table.tags.create("v1", table.version)
|
||||
>>> table.add([{"vector": [0.5, 0.2], "type": "vector"}])
|
||||
>>> tags = table.tags.list()
|
||||
>>> print(tags["v1"]["version"])
|
||||
1
|
||||
>>> table.checkout("v1")
|
||||
>>> table.to_pandas()
|
||||
vector type
|
||||
0 [1.1, 0.9] vector
|
||||
"""
|
||||
return Tags(self._table)
|
||||
|
||||
def checkout(self, version: Union[int, str]):
|
||||
"""Checkout a version of the table. This is an in-place operation.
|
||||
|
||||
This allows viewing previous versions of the table. If you wish to
|
||||
@@ -1550,8 +1624,9 @@ class LanceTable(Table):
|
||||
|
||||
Parameters
|
||||
----------
|
||||
version : int
|
||||
The version to checkout.
|
||||
version: int | str,
|
||||
The version to check out. A version number (`int`) or a tag
|
||||
(`str`) can be provided.
|
||||
|
||||
Examples
|
||||
--------
|
||||
@@ -3746,7 +3821,7 @@ class AsyncTable:
|
||||
|
||||
return versions
|
||||
|
||||
async def checkout(self, version: int):
|
||||
async def checkout(self, version: int | str):
|
||||
"""
|
||||
Checks out a specific version of the Table
|
||||
|
||||
@@ -3761,6 +3836,12 @@ class AsyncTable:
|
||||
Any operation that modifies the table will fail while the table is in a checked
|
||||
out state.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
version: int | str,
|
||||
The version to check out. A version number (`int`) or a tag
|
||||
(`str`) can be provided.
|
||||
|
||||
To return the table to a normal state use `[Self::checkout_latest]`
|
||||
"""
|
||||
try:
|
||||
@@ -3798,6 +3879,24 @@ class AsyncTable:
|
||||
"""
|
||||
await self._inner.restore(version)
|
||||
|
||||
@property
|
||||
def tags(self) -> AsyncTags:
|
||||
"""Tag management for the dataset.
|
||||
|
||||
Similar to Git, tags are a way to add metadata to a specific version of the
|
||||
dataset.
|
||||
|
||||
.. warning::
|
||||
|
||||
Tagged versions are exempted from the
|
||||
:py:meth:`optimize(cleanup_older_than)` process.
|
||||
|
||||
To remove a version that has been tagged, you must first
|
||||
:py:meth:`~Tags.delete` the associated tag.
|
||||
|
||||
"""
|
||||
return AsyncTags(self._inner)
|
||||
|
||||
async def optimize(
|
||||
self,
|
||||
*,
|
||||
@@ -3967,3 +4066,141 @@ class IndexStatistics:
|
||||
# a dictionary instead of a class.
|
||||
def __getitem__(self, key):
|
||||
return getattr(self, key)
|
||||
|
||||
|
||||
class Tags:
|
||||
"""
|
||||
Table tag manager.
|
||||
"""
|
||||
|
||||
def __init__(self, table):
|
||||
self._table = table
|
||||
|
||||
def list(self) -> Dict[str, Tag]:
|
||||
"""
|
||||
List all table tags.
|
||||
|
||||
Returns
|
||||
-------
|
||||
dict[str, Tag]
|
||||
A dictionary mapping tag names to version numbers.
|
||||
"""
|
||||
return LOOP.run(self._table.tags.list())
|
||||
|
||||
def get_version(self, tag: str) -> int:
|
||||
"""
|
||||
Get the version of a tag.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
tag: str,
|
||||
The name of the tag to get the version for.
|
||||
"""
|
||||
return LOOP.run(self._table.tags.get_version(tag))
|
||||
|
||||
def create(self, tag: str, version: int) -> None:
|
||||
"""
|
||||
Create a tag for a given table version.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
tag: str,
|
||||
The name of the tag to create. This name must be unique among all tag
|
||||
names for the table.
|
||||
version: int,
|
||||
The table version to tag.
|
||||
"""
|
||||
LOOP.run(self._table.tags.create(tag, version))
|
||||
|
||||
def delete(self, tag: str) -> None:
|
||||
"""
|
||||
Delete tag from the table.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
tag: str,
|
||||
The name of the tag to delete.
|
||||
"""
|
||||
LOOP.run(self._table.tags.delete(tag))
|
||||
|
||||
def update(self, tag: str, version: int) -> None:
|
||||
"""
|
||||
Update tag to a new version.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
tag: str,
|
||||
The name of the tag to update.
|
||||
version: int,
|
||||
The new table version to tag.
|
||||
"""
|
||||
LOOP.run(self._table.tags.update(tag, version))
|
||||
|
||||
|
||||
class AsyncTags:
|
||||
"""
|
||||
Async table tag manager.
|
||||
"""
|
||||
|
||||
def __init__(self, table):
|
||||
self._table = table
|
||||
|
||||
async def list(self) -> Dict[str, Tag]:
|
||||
"""
|
||||
List all table tags.
|
||||
|
||||
Returns
|
||||
-------
|
||||
dict[str, Tag]
|
||||
A dictionary mapping tag names to version numbers.
|
||||
"""
|
||||
return await self._table.tags.list()
|
||||
|
||||
async def get_version(self, tag: str) -> int:
|
||||
"""
|
||||
Get the version of a tag.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
tag: str,
|
||||
The name of the tag to get the version for.
|
||||
"""
|
||||
return await self._table.tags.get_version(tag)
|
||||
|
||||
async def create(self, tag: str, version: int) -> None:
|
||||
"""
|
||||
Create a tag for a given table version.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
tag: str,
|
||||
The name of the tag to create. This name must be unique among all tag
|
||||
names for the table.
|
||||
version: int,
|
||||
The table version to tag.
|
||||
"""
|
||||
await self._table.tags.create(tag, version)
|
||||
|
||||
async def delete(self, tag: str) -> None:
|
||||
"""
|
||||
Delete tag from the table.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
tag: str,
|
||||
The name of the tag to delete.
|
||||
"""
|
||||
await self._table.tags.delete(tag)
|
||||
|
||||
async def update(self, tag: str, version: int) -> None:
|
||||
"""
|
||||
Update tag to a new version.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
tag: str,
|
||||
The name of the tag to update.
|
||||
version: int,
|
||||
The new table version to tag.
|
||||
"""
|
||||
await self._table.tags.update(tag, version)
|
||||
|
||||
@@ -4,13 +4,32 @@
|
||||
import lancedb
|
||||
|
||||
from lancedb.query import LanceHybridQueryBuilder
|
||||
from lancedb.rerankers.rrf import RRFReranker
|
||||
import pyarrow as pa
|
||||
import pyarrow.compute as pc
|
||||
import pytest
|
||||
import pytest_asyncio
|
||||
|
||||
from lancedb.index import FTS
|
||||
from lancedb.table import AsyncTable
|
||||
from lancedb.table import AsyncTable, Table
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sync_table(tmpdir_factory) -> Table:
|
||||
tmp_path = str(tmpdir_factory.mktemp("data"))
|
||||
db = lancedb.connect(tmp_path)
|
||||
data = pa.table(
|
||||
{
|
||||
"text": pa.array(["a", "b", "cat", "dog"]),
|
||||
"vector": pa.array(
|
||||
[[0.1, 0.1], [2, 2], [-0.1, -0.1], [0.5, -0.5]],
|
||||
type=pa.list_(pa.float32(), list_size=2),
|
||||
),
|
||||
}
|
||||
)
|
||||
table = db.create_table("test", data)
|
||||
table.create_fts_index("text", with_position=False, use_tantivy=False)
|
||||
return table
|
||||
|
||||
|
||||
@pytest_asyncio.fixture
|
||||
@@ -102,6 +121,42 @@ async def test_async_hybrid_query_default_limit(table: AsyncTable):
|
||||
assert texts.count("a") == 1
|
||||
|
||||
|
||||
def test_hybrid_query_distance_range(sync_table: Table):
|
||||
reranker = RRFReranker(return_score="all")
|
||||
result = (
|
||||
sync_table.search(query_type="hybrid")
|
||||
.vector([0.0, 0.4])
|
||||
.text("cat and dog")
|
||||
.distance_range(lower_bound=0.2, upper_bound=0.5)
|
||||
.rerank(reranker)
|
||||
.limit(2)
|
||||
.to_arrow()
|
||||
)
|
||||
assert len(result) == 2
|
||||
print(result)
|
||||
for dist in result["_distance"]:
|
||||
if dist.is_valid:
|
||||
assert 0.2 <= dist.as_py() <= 0.5
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_hybrid_query_distance_range_async(table: AsyncTable):
|
||||
reranker = RRFReranker(return_score="all")
|
||||
result = await (
|
||||
table.query()
|
||||
.nearest_to([0.0, 0.4])
|
||||
.nearest_to_text("cat and dog")
|
||||
.distance_range(lower_bound=0.2, upper_bound=0.5)
|
||||
.rerank(reranker)
|
||||
.limit(2)
|
||||
.to_arrow()
|
||||
)
|
||||
assert len(result) == 2
|
||||
for dist in result["_distance"]:
|
||||
if dist.is_valid:
|
||||
assert 0.2 <= dist.as_py() <= 0.5
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_explain_plan(table: AsyncTable):
|
||||
plan = await (
|
||||
|
||||
@@ -529,6 +529,113 @@ def test_versioning(mem_db: DBConnection):
|
||||
assert len(table) == 2
|
||||
|
||||
|
||||
def test_tags(mem_db: DBConnection):
|
||||
table = mem_db.create_table(
|
||||
"test",
|
||||
data=[
|
||||
{"vector": [3.1, 4.1], "item": "foo", "price": 10.0},
|
||||
{"vector": [5.9, 26.5], "item": "bar", "price": 20.0},
|
||||
],
|
||||
)
|
||||
|
||||
table.tags.create("tag1", 1)
|
||||
tags = table.tags.list()
|
||||
assert "tag1" in tags
|
||||
assert tags["tag1"]["version"] == 1
|
||||
|
||||
table.add(
|
||||
data=[
|
||||
{"vector": [10.0, 11.0], "item": "baz", "price": 30.0},
|
||||
],
|
||||
)
|
||||
|
||||
table.tags.create("tag2", 2)
|
||||
tags = table.tags.list()
|
||||
assert "tag1" in tags
|
||||
assert "tag2" in tags
|
||||
assert tags["tag1"]["version"] == 1
|
||||
assert tags["tag2"]["version"] == 2
|
||||
|
||||
table.tags.delete("tag2")
|
||||
table.tags.update("tag1", 2)
|
||||
tags = table.tags.list()
|
||||
assert "tag1" in tags
|
||||
assert tags["tag1"]["version"] == 2
|
||||
|
||||
table.tags.update("tag1", 1)
|
||||
tags = table.tags.list()
|
||||
assert "tag1" in tags
|
||||
assert tags["tag1"]["version"] == 1
|
||||
|
||||
table.checkout("tag1")
|
||||
assert table.version == 1
|
||||
assert table.count_rows() == 2
|
||||
table.tags.create("tag2", 2)
|
||||
table.checkout("tag2")
|
||||
assert table.version == 2
|
||||
assert table.count_rows() == 3
|
||||
table.checkout_latest()
|
||||
table.add(
|
||||
data=[
|
||||
{"vector": [12.0, 13.0], "item": "baz", "price": 40.0},
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_async_tags(mem_db_async: AsyncConnection):
|
||||
table = await mem_db_async.create_table(
|
||||
"test",
|
||||
data=[
|
||||
{"vector": [3.1, 4.1], "item": "foo", "price": 10.0},
|
||||
{"vector": [5.9, 26.5], "item": "bar", "price": 20.0},
|
||||
],
|
||||
)
|
||||
|
||||
await table.tags.create("tag1", 1)
|
||||
tags = await table.tags.list()
|
||||
assert "tag1" in tags
|
||||
assert tags["tag1"]["version"] == 1
|
||||
|
||||
await table.add(
|
||||
data=[
|
||||
{"vector": [10.0, 11.0], "item": "baz", "price": 30.0},
|
||||
],
|
||||
)
|
||||
|
||||
await table.tags.create("tag2", 2)
|
||||
tags = await table.tags.list()
|
||||
assert "tag1" in tags
|
||||
assert "tag2" in tags
|
||||
assert tags["tag1"]["version"] == 1
|
||||
assert tags["tag2"]["version"] == 2
|
||||
|
||||
await table.tags.delete("tag2")
|
||||
await table.tags.update("tag1", 2)
|
||||
tags = await table.tags.list()
|
||||
assert "tag1" in tags
|
||||
assert tags["tag1"]["version"] == 2
|
||||
|
||||
await table.tags.update("tag1", 1)
|
||||
tags = await table.tags.list()
|
||||
assert "tag1" in tags
|
||||
assert tags["tag1"]["version"] == 1
|
||||
|
||||
await table.checkout("tag1")
|
||||
assert await table.version() == 1
|
||||
assert await table.count_rows() == 2
|
||||
await table.tags.create("tag2", 2)
|
||||
await table.checkout("tag2")
|
||||
assert await table.version() == 2
|
||||
assert await table.count_rows() == 3
|
||||
await table.checkout_latest()
|
||||
await table.add(
|
||||
data=[
|
||||
{"vector": [12.0, 13.0], "item": "baz", "price": 40.0},
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@patch("lancedb.table.AsyncTable.create_index")
|
||||
def test_create_index_method(mock_create_index, mem_db: DBConnection):
|
||||
table = mem_db.create_table(
|
||||
|
||||
@@ -652,6 +652,11 @@ impl HybridQuery {
|
||||
self.inner_vec.bypass_vector_index();
|
||||
}
|
||||
|
||||
#[pyo3(signature = (lower_bound=None, upper_bound=None))]
|
||||
pub fn distance_range(&mut self, lower_bound: Option<f32>, upper_bound: Option<f32>) {
|
||||
self.inner_vec.distance_range(lower_bound, upper_bound);
|
||||
}
|
||||
|
||||
pub fn to_vector_query(&mut self) -> PyResult<VectorQuery> {
|
||||
Ok(VectorQuery {
|
||||
inner: self.inner_vec.inner.clone(),
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
|
||||
use crate::{
|
||||
error::PythonErrorExt,
|
||||
index::{extract_index_params, IndexConfig},
|
||||
query::Query,
|
||||
};
|
||||
use arrow::{
|
||||
datatypes::{DataType, Schema},
|
||||
ffi_stream::ArrowArrayStreamReader,
|
||||
@@ -12,19 +17,13 @@ use lancedb::table::{
|
||||
Table as LanceDbTable,
|
||||
};
|
||||
use pyo3::{
|
||||
exceptions::{PyKeyError, PyRuntimeError, PyValueError},
|
||||
exceptions::{PyIOError, PyKeyError, PyRuntimeError, PyValueError},
|
||||
pyclass, pymethods,
|
||||
types::{IntoPyDict, PyAnyMethods, PyDict, PyDictMethods},
|
||||
Bound, FromPyObject, PyAny, PyRef, PyResult, Python,
|
||||
types::{IntoPyDict, PyAnyMethods, PyDict, PyDictMethods, PyInt, PyString},
|
||||
Bound, FromPyObject, PyAny, PyObject, PyRef, PyResult, Python,
|
||||
};
|
||||
use pyo3_async_runtimes::tokio::future_into_py;
|
||||
|
||||
use crate::{
|
||||
error::PythonErrorExt,
|
||||
index::{extract_index_params, IndexConfig},
|
||||
query::Query,
|
||||
};
|
||||
|
||||
/// Statistics about a compaction operation.
|
||||
#[pyclass(get_all)]
|
||||
#[derive(Clone, Debug)]
|
||||
@@ -322,10 +321,26 @@ impl Table {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn checkout(self_: PyRef<'_, Self>, version: u64) -> PyResult<Bound<'_, PyAny>> {
|
||||
pub fn checkout(self_: PyRef<'_, Self>, version: PyObject) -> PyResult<Bound<'_, PyAny>> {
|
||||
let inner = self_.inner_ref()?.clone();
|
||||
future_into_py(self_.py(), async move {
|
||||
inner.checkout(version).await.infer_error()
|
||||
let py = self_.py();
|
||||
let (is_int, int_value, string_value) = if let Ok(i) = version.downcast_bound::<PyInt>(py) {
|
||||
let num: u64 = i.extract()?;
|
||||
(true, num, String::new())
|
||||
} else if let Ok(s) = version.downcast_bound::<PyString>(py) {
|
||||
let str_value = s.to_string();
|
||||
(false, 0, str_value)
|
||||
} else {
|
||||
return Err(PyIOError::new_err(
|
||||
"version must be an integer or a string.",
|
||||
));
|
||||
};
|
||||
future_into_py(py, async move {
|
||||
if is_int {
|
||||
inner.checkout(int_value).await.infer_error()
|
||||
} else {
|
||||
inner.checkout_tag(&string_value).await.infer_error()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -352,6 +367,11 @@ impl Table {
|
||||
Query::new(self.inner_ref().unwrap().query())
|
||||
}
|
||||
|
||||
#[getter]
|
||||
pub fn tags(&self) -> PyResult<Tags> {
|
||||
Ok(Tags::new(self.inner_ref()?.clone()))
|
||||
}
|
||||
|
||||
/// Optimize the on-disk data by compacting and pruning old data, for better performance.
|
||||
#[pyo3(signature = (cleanup_since_ms=None, delete_unverified=None, retrain=None))]
|
||||
pub fn optimize(
|
||||
@@ -586,3 +606,72 @@ pub struct MergeInsertParams {
|
||||
when_not_matched_by_source_delete: bool,
|
||||
when_not_matched_by_source_condition: Option<String>,
|
||||
}
|
||||
|
||||
#[pyclass]
|
||||
pub struct Tags {
|
||||
inner: LanceDbTable,
|
||||
}
|
||||
|
||||
impl Tags {
|
||||
pub fn new(table: LanceDbTable) -> Self {
|
||||
Self { inner: table }
|
||||
}
|
||||
}
|
||||
|
||||
#[pymethods]
|
||||
impl Tags {
|
||||
pub fn list(self_: PyRef<'_, Self>) -> PyResult<Bound<'_, PyAny>> {
|
||||
let inner = self_.inner.clone();
|
||||
future_into_py(self_.py(), async move {
|
||||
let tags = inner.tags().await.infer_error()?;
|
||||
let res = tags.list().await.infer_error()?;
|
||||
|
||||
Python::with_gil(|py| {
|
||||
let py_dict = PyDict::new(py);
|
||||
for (key, contents) in res {
|
||||
let value_dict = PyDict::new(py);
|
||||
value_dict.set_item("version", contents.version)?;
|
||||
value_dict.set_item("manifest_size", contents.manifest_size)?;
|
||||
py_dict.set_item(key, value_dict)?;
|
||||
}
|
||||
Ok(py_dict.unbind())
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_version(self_: PyRef<'_, Self>, tag: String) -> PyResult<Bound<'_, PyAny>> {
|
||||
let inner = self_.inner.clone();
|
||||
future_into_py(self_.py(), async move {
|
||||
let tags = inner.tags().await.infer_error()?;
|
||||
let res = tags.get_version(tag.as_str()).await.infer_error()?;
|
||||
Ok(res)
|
||||
})
|
||||
}
|
||||
|
||||
pub fn create(self_: PyRef<Self>, tag: String, version: u64) -> PyResult<Bound<PyAny>> {
|
||||
let inner = self_.inner.clone();
|
||||
future_into_py(self_.py(), async move {
|
||||
let mut tags = inner.tags().await.infer_error()?;
|
||||
tags.create(tag.as_str(), version).await.infer_error()?;
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
pub fn delete(self_: PyRef<Self>, tag: String) -> PyResult<Bound<PyAny>> {
|
||||
let inner = self_.inner.clone();
|
||||
future_into_py(self_.py(), async move {
|
||||
let mut tags = inner.tags().await.infer_error()?;
|
||||
tags.delete(tag.as_str()).await.infer_error()?;
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
pub fn update(self_: PyRef<Self>, tag: String, version: u64) -> PyResult<Bound<PyAny>> {
|
||||
let inner = self_.inner.clone();
|
||||
future_into_py(self_.py(), async move {
|
||||
let mut tags = inner.tags().await.infer_error()?;
|
||||
tags.update(tag.as_str(), version).await.infer_error()?;
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "lancedb-node"
|
||||
version = "0.19.0-beta.8"
|
||||
version = "0.19.0"
|
||||
description = "Serverless, low-latency vector database for AI applications"
|
||||
license.workspace = true
|
||||
edition.workspace = true
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "lancedb"
|
||||
version = "0.19.0-beta.8"
|
||||
version = "0.19.0"
|
||||
edition.workspace = true
|
||||
description = "LanceDB: A serverless, low-latency vector database for AI applications"
|
||||
license.workspace = true
|
||||
|
||||
@@ -81,7 +81,7 @@ impl ListingCatalogOptionsBuilder {
|
||||
/// [`crate::database::listing::ListingDatabase`]
|
||||
#[derive(Debug)]
|
||||
pub struct ListingCatalog {
|
||||
object_store: ObjectStore,
|
||||
object_store: Arc<ObjectStore>,
|
||||
|
||||
uri: String,
|
||||
|
||||
@@ -105,7 +105,7 @@ impl ListingCatalog {
|
||||
}
|
||||
|
||||
async fn open_path(path: &str) -> Result<Self> {
|
||||
let (object_store, base_path) = ObjectStore::from_path(path).unwrap();
|
||||
let (object_store, base_path) = ObjectStore::from_uri(path).await.unwrap();
|
||||
if object_store.is_local() {
|
||||
Self::try_create_dir(path).context(CreateDirSnafu { path })?;
|
||||
}
|
||||
|
||||
@@ -201,7 +201,7 @@ impl ListingDatabaseOptionsBuilder {
|
||||
/// We will have two tables named `table1` and `table2`.
|
||||
#[derive(Debug)]
|
||||
pub struct ListingDatabase {
|
||||
object_store: ObjectStore,
|
||||
object_store: Arc<ObjectStore>,
|
||||
query_string: Option<String>,
|
||||
|
||||
pub(crate) uri: String,
|
||||
|
||||
@@ -20,7 +20,7 @@ pub async fn wait_for_index(
|
||||
) -> Result<()> {
|
||||
if timeout > MAX_WAIT {
|
||||
return Err(Error::InvalidInput {
|
||||
message: format!("timeout must be less than {:?}", MAX_WAIT).to_string(),
|
||||
message: format!("timeout must be less than {:?}", MAX_WAIT),
|
||||
});
|
||||
}
|
||||
let start = Instant::now();
|
||||
@@ -84,7 +84,6 @@ pub async fn wait_for_index(
|
||||
message: format!(
|
||||
"timed out waiting for indices: {:?} after {:?}",
|
||||
remaining, timeout
|
||||
)
|
||||
.to_string(),
|
||||
),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
|
||||
pub(crate) mod client;
|
||||
pub(crate) mod db;
|
||||
mod retry;
|
||||
pub(crate) mod table;
|
||||
pub(crate) mod util;
|
||||
|
||||
|
||||
@@ -1,17 +1,17 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
use std::{collections::HashMap, future::Future, str::FromStr, time::Duration};
|
||||
|
||||
use http::HeaderName;
|
||||
use log::debug;
|
||||
use reqwest::{
|
||||
header::{HeaderMap, HeaderValue},
|
||||
Request, RequestBuilder, Response,
|
||||
Body, Request, RequestBuilder, Response,
|
||||
};
|
||||
use std::{collections::HashMap, future::Future, str::FromStr, time::Duration};
|
||||
|
||||
use crate::error::{Error, Result};
|
||||
use crate::remote::db::RemoteOptions;
|
||||
use crate::remote::retry::{ResolvedRetryConfig, RetryCounter};
|
||||
|
||||
const REQUEST_ID_HEADER: HeaderName = HeaderName::from_static("x-request-id");
|
||||
|
||||
@@ -118,41 +118,14 @@ pub struct RetryConfig {
|
||||
/// You can also set the `LANCE_CLIENT_RETRY_STATUSES` environment variable
|
||||
/// to set this value. Use a comma-separated list of integer values.
|
||||
///
|
||||
/// The default is 429, 500, 502, 503.
|
||||
/// Note that write operations will never be retried on 5xx errors as this may
|
||||
/// result in duplicated writes.
|
||||
///
|
||||
/// The default is 409, 429, 500, 502, 503, 504.
|
||||
pub statuses: Option<Vec<u16>>,
|
||||
// TODO: should we allow customizing methods?
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct ResolvedRetryConfig {
|
||||
retries: u8,
|
||||
connect_retries: u8,
|
||||
read_retries: u8,
|
||||
backoff_factor: f32,
|
||||
backoff_jitter: f32,
|
||||
statuses: Vec<reqwest::StatusCode>,
|
||||
}
|
||||
|
||||
impl TryFrom<RetryConfig> for ResolvedRetryConfig {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(retry_config: RetryConfig) -> Result<Self> {
|
||||
Ok(Self {
|
||||
retries: retry_config.retries.unwrap_or(3),
|
||||
connect_retries: retry_config.connect_retries.unwrap_or(3),
|
||||
read_retries: retry_config.read_retries.unwrap_or(3),
|
||||
backoff_factor: retry_config.backoff_factor.unwrap_or(0.25),
|
||||
backoff_jitter: retry_config.backoff_jitter.unwrap_or(0.25),
|
||||
statuses: retry_config
|
||||
.statuses
|
||||
.unwrap_or_else(|| vec![429, 500, 502, 503])
|
||||
.into_iter()
|
||||
.map(|status| reqwest::StatusCode::from_u16(status).unwrap())
|
||||
.collect(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// We use the `HttpSend` trait to abstract over the `reqwest::Client` so that
|
||||
// we can mock responses in tests. Based on the patterns from this blog post:
|
||||
// https://write.as/balrogboogie/testing-reqwest-based-clients
|
||||
@@ -160,8 +133,8 @@ impl TryFrom<RetryConfig> for ResolvedRetryConfig {
|
||||
pub struct RestfulLanceDbClient<S: HttpSend = Sender> {
|
||||
client: reqwest::Client,
|
||||
host: String,
|
||||
retry_config: ResolvedRetryConfig,
|
||||
sender: S,
|
||||
pub(crate) retry_config: ResolvedRetryConfig,
|
||||
pub(crate) sender: S,
|
||||
}
|
||||
|
||||
pub trait HttpSend: Clone + Send + Sync + std::fmt::Debug + 'static {
|
||||
@@ -375,74 +348,69 @@ impl<S: HttpSend> RestfulLanceDbClient<S> {
|
||||
self.client.post(full_uri)
|
||||
}
|
||||
|
||||
pub async fn send(&self, req: RequestBuilder, with_retry: bool) -> Result<(String, Response)> {
|
||||
pub async fn send(&self, req: RequestBuilder) -> Result<(String, Response)> {
|
||||
let (client, request) = req.build_split();
|
||||
let mut request = request.unwrap();
|
||||
let request_id = self.extract_request_id(&mut request);
|
||||
self.log_request(&request, &request_id);
|
||||
|
||||
// Set a request id.
|
||||
// TODO: allow the user to supply this, through middleware?
|
||||
let request_id = if let Some(request_id) = request.headers().get(REQUEST_ID_HEADER) {
|
||||
request_id.to_str().unwrap().to_string()
|
||||
} else {
|
||||
let request_id = uuid::Uuid::new_v4().to_string();
|
||||
let header = HeaderValue::from_str(&request_id).unwrap();
|
||||
request.headers_mut().insert(REQUEST_ID_HEADER, header);
|
||||
request_id
|
||||
};
|
||||
|
||||
if log::log_enabled!(log::Level::Debug) {
|
||||
let content_type = request
|
||||
.headers()
|
||||
.get("content-type")
|
||||
.map(|v| v.to_str().unwrap());
|
||||
if content_type == Some("application/json") {
|
||||
let body = request.body().as_ref().unwrap().as_bytes().unwrap();
|
||||
let body = String::from_utf8_lossy(body);
|
||||
debug!(
|
||||
"Sending request_id={}: {:?} with body {}",
|
||||
request_id, request, body
|
||||
);
|
||||
} else {
|
||||
debug!("Sending request_id={}: {:?}", request_id, request);
|
||||
}
|
||||
}
|
||||
|
||||
if with_retry {
|
||||
self.send_with_retry_impl(client, request, request_id).await
|
||||
} else {
|
||||
let response = self
|
||||
.sender
|
||||
.send(&client, request)
|
||||
.await
|
||||
.err_to_http(request_id.clone())?;
|
||||
debug!(
|
||||
"Received response for request_id={}: {:?}",
|
||||
request_id, &response
|
||||
);
|
||||
Ok((request_id, response))
|
||||
}
|
||||
let response = self
|
||||
.sender
|
||||
.send(&client, request)
|
||||
.await
|
||||
.err_to_http(request_id.clone())?;
|
||||
debug!(
|
||||
"Received response for request_id={}: {:?}",
|
||||
request_id, &response
|
||||
);
|
||||
Ok((request_id, response))
|
||||
}
|
||||
|
||||
async fn send_with_retry_impl(
|
||||
/// Send the request using retries configured in the RetryConfig.
|
||||
/// If retry_5xx is false, 5xx requests will not be retried regardless of the statuses configured
|
||||
/// in the RetryConfig.
|
||||
/// Since this requires arrow serialization, this is implemented here instead of in RestfulLanceDbClient
|
||||
pub async fn send_with_retry(
|
||||
&self,
|
||||
client: reqwest::Client,
|
||||
req: Request,
|
||||
request_id: String,
|
||||
req_builder: RequestBuilder,
|
||||
mut make_body: Option<Box<dyn FnMut() -> Result<Body> + Send + 'static>>,
|
||||
retry_5xx: bool,
|
||||
) -> Result<(String, Response)> {
|
||||
let mut retry_counter = RetryCounter::new(&self.retry_config, request_id);
|
||||
let retry_config = &self.retry_config;
|
||||
let non_5xx_statuses = retry_config
|
||||
.statuses
|
||||
.iter()
|
||||
.filter(|s| !s.is_server_error())
|
||||
.cloned()
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// clone and build the request to extract the request id
|
||||
let tmp_req = req_builder.try_clone().ok_or_else(|| Error::Runtime {
|
||||
message: "Attempted to retry a request that cannot be cloned".to_string(),
|
||||
})?;
|
||||
let (_, r) = tmp_req.build_split();
|
||||
let mut r = r.unwrap();
|
||||
let request_id = self.extract_request_id(&mut r);
|
||||
let mut retry_counter = RetryCounter::new(retry_config, request_id.clone());
|
||||
|
||||
loop {
|
||||
// This only works if the request body is not a stream. If it is
|
||||
// a stream, we can't use the retry path. We would need to implement
|
||||
// an outer retry.
|
||||
let request = req.try_clone().ok_or_else(|| Error::Runtime {
|
||||
let mut req_builder = req_builder.try_clone().ok_or_else(|| Error::Runtime {
|
||||
message: "Attempted to retry a request that cannot be cloned".to_string(),
|
||||
})?;
|
||||
let response = self
|
||||
.sender
|
||||
.send(&client, request)
|
||||
.await
|
||||
.map(|r| (r.status(), r));
|
||||
|
||||
// set the streaming body on the request builder after clone
|
||||
if let Some(body_gen) = make_body.as_mut() {
|
||||
let body = body_gen()?;
|
||||
req_builder = req_builder.body(body);
|
||||
}
|
||||
|
||||
let (c, request) = req_builder.build_split();
|
||||
let mut request = request.unwrap();
|
||||
self.set_request_id(&mut request, &request_id.clone());
|
||||
self.log_request(&request, &request_id);
|
||||
|
||||
let response = self.sender.send(&c, request).await.map(|r| (r.status(), r));
|
||||
|
||||
match response {
|
||||
Ok((status, response)) if status.is_success() => {
|
||||
debug!(
|
||||
@@ -451,7 +419,10 @@ impl<S: HttpSend> RestfulLanceDbClient<S> {
|
||||
);
|
||||
return Ok((retry_counter.request_id, response));
|
||||
}
|
||||
Ok((status, response)) if self.retry_config.statuses.contains(&status) => {
|
||||
Ok((status, response))
|
||||
if (retry_5xx && retry_config.statuses.contains(&status))
|
||||
|| non_5xx_statuses.contains(&status) =>
|
||||
{
|
||||
let source = self
|
||||
.check_response(&retry_counter.request_id, response)
|
||||
.await
|
||||
@@ -480,6 +451,47 @@ impl<S: HttpSend> RestfulLanceDbClient<S> {
|
||||
}
|
||||
}
|
||||
|
||||
fn log_request(&self, request: &Request, request_id: &String) {
|
||||
if log::log_enabled!(log::Level::Debug) {
|
||||
let content_type = request
|
||||
.headers()
|
||||
.get("content-type")
|
||||
.map(|v| v.to_str().unwrap());
|
||||
if content_type == Some("application/json") {
|
||||
let body = request.body().as_ref().unwrap().as_bytes().unwrap();
|
||||
let body = String::from_utf8_lossy(body);
|
||||
debug!(
|
||||
"Sending request_id={}: {:?} with body {}",
|
||||
request_id, request, body
|
||||
);
|
||||
} else {
|
||||
debug!("Sending request_id={}: {:?}", request_id, request);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Extract the request ID from the request headers.
|
||||
/// If the request ID header is not set, this will generate a new one and set
|
||||
/// it on the request headers
|
||||
pub fn extract_request_id(&self, request: &mut Request) -> String {
|
||||
// Set a request id.
|
||||
// TODO: allow the user to supply this, through middleware?
|
||||
let request_id = if let Some(request_id) = request.headers().get(REQUEST_ID_HEADER) {
|
||||
request_id.to_str().unwrap().to_string()
|
||||
} else {
|
||||
let request_id = uuid::Uuid::new_v4().to_string();
|
||||
self.set_request_id(request, &request_id);
|
||||
request_id
|
||||
};
|
||||
request_id
|
||||
}
|
||||
|
||||
/// Set the request ID header
|
||||
pub fn set_request_id(&self, request: &mut Request, request_id: &str) {
|
||||
let header = HeaderValue::from_str(request_id).unwrap();
|
||||
request.headers_mut().insert(REQUEST_ID_HEADER, header);
|
||||
}
|
||||
|
||||
pub async fn check_response(&self, request_id: &str, response: Response) -> Result<Response> {
|
||||
// Try to get the response text, but if that fails, just return the status code
|
||||
let status = response.status();
|
||||
@@ -501,91 +513,6 @@ impl<S: HttpSend> RestfulLanceDbClient<S> {
|
||||
}
|
||||
}
|
||||
|
||||
struct RetryCounter<'a> {
|
||||
request_failures: u8,
|
||||
connect_failures: u8,
|
||||
read_failures: u8,
|
||||
config: &'a ResolvedRetryConfig,
|
||||
request_id: String,
|
||||
}
|
||||
|
||||
impl<'a> RetryCounter<'a> {
|
||||
fn new(config: &'a ResolvedRetryConfig, request_id: String) -> Self {
|
||||
Self {
|
||||
request_failures: 0,
|
||||
connect_failures: 0,
|
||||
read_failures: 0,
|
||||
config,
|
||||
request_id,
|
||||
}
|
||||
}
|
||||
|
||||
fn check_out_of_retries(
|
||||
&self,
|
||||
source: Box<dyn std::error::Error + Send + Sync>,
|
||||
status_code: Option<reqwest::StatusCode>,
|
||||
) -> Result<()> {
|
||||
if self.request_failures >= self.config.retries
|
||||
|| self.connect_failures >= self.config.connect_retries
|
||||
|| self.read_failures >= self.config.read_retries
|
||||
{
|
||||
Err(Error::Retry {
|
||||
request_id: self.request_id.clone(),
|
||||
request_failures: self.request_failures,
|
||||
max_request_failures: self.config.retries,
|
||||
connect_failures: self.connect_failures,
|
||||
max_connect_failures: self.config.connect_retries,
|
||||
read_failures: self.read_failures,
|
||||
max_read_failures: self.config.read_retries,
|
||||
source,
|
||||
status_code,
|
||||
})
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn increment_request_failures(&mut self, source: crate::Error) -> Result<()> {
|
||||
self.request_failures += 1;
|
||||
let status_code = if let crate::Error::Http { status_code, .. } = &source {
|
||||
*status_code
|
||||
} else {
|
||||
None
|
||||
};
|
||||
self.check_out_of_retries(Box::new(source), status_code)
|
||||
}
|
||||
|
||||
fn increment_connect_failures(&mut self, source: reqwest::Error) -> Result<()> {
|
||||
self.connect_failures += 1;
|
||||
let status_code = source.status();
|
||||
self.check_out_of_retries(Box::new(source), status_code)
|
||||
}
|
||||
|
||||
fn increment_read_failures(&mut self, source: reqwest::Error) -> Result<()> {
|
||||
self.read_failures += 1;
|
||||
let status_code = source.status();
|
||||
self.check_out_of_retries(Box::new(source), status_code)
|
||||
}
|
||||
|
||||
fn next_sleep_time(&self) -> Duration {
|
||||
let backoff = self.config.backoff_factor * (2.0f32.powi(self.request_failures as i32));
|
||||
let jitter = rand::random::<f32>() * self.config.backoff_jitter;
|
||||
let sleep_time = Duration::from_secs_f32(backoff + jitter);
|
||||
debug!(
|
||||
"Retrying request {:?} ({}/{} connect, {}/{} read, {}/{} read) in {:?}",
|
||||
self.request_id,
|
||||
self.connect_failures,
|
||||
self.config.connect_retries,
|
||||
self.request_failures,
|
||||
self.config.retries,
|
||||
self.read_failures,
|
||||
self.config.read_retries,
|
||||
sleep_time
|
||||
);
|
||||
sleep_time
|
||||
}
|
||||
}
|
||||
|
||||
pub trait RequestResultExt {
|
||||
type Output;
|
||||
fn err_to_http(self, request_id: String) -> Result<Self::Output>;
|
||||
|
||||
@@ -255,7 +255,7 @@ impl<S: HttpSend> Database for RemoteDatabase<S> {
|
||||
if let Some(start_after) = request.start_after {
|
||||
req = req.query(&[("page_token", start_after)]);
|
||||
}
|
||||
let (request_id, rsp) = self.client.send(req, true).await?;
|
||||
let (request_id, rsp) = self.client.send_with_retry(req, None, true).await?;
|
||||
let rsp = self.client.check_response(&request_id, rsp).await?;
|
||||
let version = parse_server_version(&request_id, &rsp)?;
|
||||
let tables = rsp
|
||||
@@ -302,7 +302,7 @@ impl<S: HttpSend> Database for RemoteDatabase<S> {
|
||||
.body(data_buffer)
|
||||
.header(CONTENT_TYPE, ARROW_STREAM_CONTENT_TYPE);
|
||||
|
||||
let (request_id, rsp) = self.client.send(req, false).await?;
|
||||
let (request_id, rsp) = self.client.send(req).await?;
|
||||
|
||||
if rsp.status() == StatusCode::BAD_REQUEST {
|
||||
let body = rsp.text().await.err_to_http(request_id.clone())?;
|
||||
@@ -362,7 +362,7 @@ impl<S: HttpSend> Database for RemoteDatabase<S> {
|
||||
let req = self
|
||||
.client
|
||||
.post(&format!("/v1/table/{}/describe/", request.name));
|
||||
let (request_id, rsp) = self.client.send(req, true).await?;
|
||||
let (request_id, rsp) = self.client.send_with_retry(req, None, true).await?;
|
||||
if rsp.status() == StatusCode::NOT_FOUND {
|
||||
return Err(crate::Error::TableNotFound { name: request.name });
|
||||
}
|
||||
@@ -383,7 +383,7 @@ impl<S: HttpSend> Database for RemoteDatabase<S> {
|
||||
.client
|
||||
.post(&format!("/v1/table/{}/rename/", current_name));
|
||||
let req = req.json(&serde_json::json!({ "new_table_name": new_name }));
|
||||
let (request_id, resp) = self.client.send(req, false).await?;
|
||||
let (request_id, resp) = self.client.send(req).await?;
|
||||
self.client.check_response(&request_id, resp).await?;
|
||||
let table = self.table_cache.remove(current_name).await;
|
||||
if let Some(table) = table {
|
||||
@@ -394,7 +394,7 @@ impl<S: HttpSend> Database for RemoteDatabase<S> {
|
||||
|
||||
async fn drop_table(&self, name: &str) -> Result<()> {
|
||||
let req = self.client.post(&format!("/v1/table/{}/drop/", name));
|
||||
let (request_id, resp) = self.client.send(req, true).await?;
|
||||
let (request_id, resp) = self.client.send(req).await?;
|
||||
self.client.check_response(&request_id, resp).await?;
|
||||
self.table_cache.remove(name).await;
|
||||
Ok(())
|
||||
|
||||
122
rust/lancedb/src/remote/retry.rs
Normal file
122
rust/lancedb/src/remote/retry.rs
Normal file
@@ -0,0 +1,122 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
use crate::remote::RetryConfig;
|
||||
use crate::Error;
|
||||
use log::debug;
|
||||
use std::time::Duration;
|
||||
|
||||
pub struct RetryCounter<'a> {
|
||||
pub request_failures: u8,
|
||||
pub connect_failures: u8,
|
||||
pub read_failures: u8,
|
||||
pub config: &'a ResolvedRetryConfig,
|
||||
pub request_id: String,
|
||||
}
|
||||
|
||||
impl<'a> RetryCounter<'a> {
|
||||
pub(crate) fn new(config: &'a ResolvedRetryConfig, request_id: String) -> Self {
|
||||
Self {
|
||||
request_failures: 0,
|
||||
connect_failures: 0,
|
||||
read_failures: 0,
|
||||
config,
|
||||
request_id,
|
||||
}
|
||||
}
|
||||
|
||||
fn check_out_of_retries(
|
||||
&self,
|
||||
source: Box<dyn std::error::Error + Send + Sync>,
|
||||
status_code: Option<reqwest::StatusCode>,
|
||||
) -> crate::Result<()> {
|
||||
if self.request_failures >= self.config.retries
|
||||
|| self.connect_failures >= self.config.connect_retries
|
||||
|| self.read_failures >= self.config.read_retries
|
||||
{
|
||||
Err(Error::Retry {
|
||||
request_id: self.request_id.clone(),
|
||||
request_failures: self.request_failures,
|
||||
max_request_failures: self.config.retries,
|
||||
connect_failures: self.connect_failures,
|
||||
max_connect_failures: self.config.connect_retries,
|
||||
read_failures: self.read_failures,
|
||||
max_read_failures: self.config.read_retries,
|
||||
source,
|
||||
status_code,
|
||||
})
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn increment_request_failures(&mut self, source: crate::Error) -> crate::Result<()> {
|
||||
self.request_failures += 1;
|
||||
let status_code = if let crate::Error::Http { status_code, .. } = &source {
|
||||
*status_code
|
||||
} else {
|
||||
None
|
||||
};
|
||||
self.check_out_of_retries(Box::new(source), status_code)
|
||||
}
|
||||
|
||||
pub fn increment_connect_failures(&mut self, source: reqwest::Error) -> crate::Result<()> {
|
||||
self.connect_failures += 1;
|
||||
let status_code = source.status();
|
||||
self.check_out_of_retries(Box::new(source), status_code)
|
||||
}
|
||||
|
||||
pub fn increment_read_failures(&mut self, source: reqwest::Error) -> crate::Result<()> {
|
||||
self.read_failures += 1;
|
||||
let status_code = source.status();
|
||||
self.check_out_of_retries(Box::new(source), status_code)
|
||||
}
|
||||
|
||||
pub fn next_sleep_time(&self) -> Duration {
|
||||
let backoff = self.config.backoff_factor * (2.0f32.powi(self.request_failures as i32));
|
||||
let jitter = rand::random::<f32>() * self.config.backoff_jitter;
|
||||
let sleep_time = Duration::from_secs_f32(backoff + jitter);
|
||||
debug!(
|
||||
"Retrying request {:?} ({}/{} connect, {}/{} read, {}/{} read) in {:?}",
|
||||
self.request_id,
|
||||
self.connect_failures,
|
||||
self.config.connect_retries,
|
||||
self.request_failures,
|
||||
self.config.retries,
|
||||
self.read_failures,
|
||||
self.config.read_retries,
|
||||
sleep_time
|
||||
);
|
||||
sleep_time
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ResolvedRetryConfig {
|
||||
pub retries: u8,
|
||||
pub connect_retries: u8,
|
||||
pub read_retries: u8,
|
||||
pub backoff_factor: f32,
|
||||
pub backoff_jitter: f32,
|
||||
pub statuses: Vec<reqwest::StatusCode>,
|
||||
}
|
||||
|
||||
impl TryFrom<RetryConfig> for ResolvedRetryConfig {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(retry_config: RetryConfig) -> crate::Result<Self> {
|
||||
Ok(Self {
|
||||
retries: retry_config.retries.unwrap_or(3),
|
||||
connect_retries: retry_config.connect_retries.unwrap_or(3),
|
||||
read_retries: retry_config.read_retries.unwrap_or(3),
|
||||
backoff_factor: retry_config.backoff_factor.unwrap_or(0.25),
|
||||
backoff_jitter: retry_config.backoff_jitter.unwrap_or(0.25),
|
||||
statuses: retry_config
|
||||
.statuses
|
||||
.unwrap_or_else(|| vec![409, 429, 500, 502, 503, 504])
|
||||
.into_iter()
|
||||
.map(|status| reqwest::StatusCode::from_u16(status).unwrap())
|
||||
.collect(),
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -4,10 +4,11 @@
|
||||
use crate::index::Index;
|
||||
use crate::index::IndexStatistics;
|
||||
use crate::query::{QueryFilter, QueryRequest, Select, VectorQueryRequest};
|
||||
use crate::table::Tags;
|
||||
use crate::table::{AddDataMode, AnyQuery, Filter};
|
||||
use crate::utils::{supported_btree_data_type, supported_vector_data_type};
|
||||
use crate::{DistanceType, Error, Table};
|
||||
use arrow_array::RecordBatchReader;
|
||||
use arrow_array::{RecordBatch, RecordBatchIterator, RecordBatchReader};
|
||||
use arrow_ipc::reader::FileReader;
|
||||
use arrow_schema::{DataType, SchemaRef};
|
||||
use async_trait::async_trait;
|
||||
@@ -18,10 +19,13 @@ use futures::TryStreamExt;
|
||||
use http::header::CONTENT_TYPE;
|
||||
use http::{HeaderName, StatusCode};
|
||||
use lance::arrow::json::{JsonDataType, JsonSchema};
|
||||
use lance::dataset::refs::TagContents;
|
||||
use lance::dataset::scanner::DatasetRecordBatchStream;
|
||||
use lance::dataset::{ColumnAlteration, NewColumnTransform, Version};
|
||||
use lance_datafusion::exec::{execute_plan, OneShotExec};
|
||||
use reqwest::{RequestBuilder, Response};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use std::io::Cursor;
|
||||
use std::pin::Pin;
|
||||
use std::sync::{Arc, Mutex};
|
||||
@@ -46,6 +50,137 @@ use crate::{
|
||||
|
||||
const REQUEST_TIMEOUT_HEADER: HeaderName = HeaderName::from_static("x-request-timeout-ms");
|
||||
|
||||
pub struct RemoteTags<'a, S: HttpSend = Sender> {
|
||||
inner: &'a RemoteTable<S>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<S: HttpSend + 'static> Tags for RemoteTags<'_, S> {
|
||||
async fn list(&self) -> Result<HashMap<String, TagContents>> {
|
||||
let request = self
|
||||
.inner
|
||||
.client
|
||||
.post(&format!("/v1/table/{}/tags/list/", self.inner.name));
|
||||
let (request_id, response) = self.inner.send(request, true).await?;
|
||||
let response = self
|
||||
.inner
|
||||
.check_table_response(&request_id, response)
|
||||
.await?;
|
||||
|
||||
match response.text().await {
|
||||
Ok(body) => {
|
||||
// Explicitly tell serde_json what type we want to deserialize into
|
||||
let tags_map: HashMap<String, TagContents> =
|
||||
serde_json::from_str(&body).map_err(|e| Error::Http {
|
||||
source: format!("Failed to parse tags list: {}", e).into(),
|
||||
request_id,
|
||||
status_code: None,
|
||||
})?;
|
||||
|
||||
Ok(tags_map)
|
||||
}
|
||||
Err(err) => {
|
||||
let status_code = err.status();
|
||||
Err(Error::Http {
|
||||
source: Box::new(err),
|
||||
request_id,
|
||||
status_code,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_version(&self, tag: &str) -> Result<u64> {
|
||||
let request = self
|
||||
.inner
|
||||
.client
|
||||
.post(&format!("/v1/table/{}/tags/version/", self.inner.name))
|
||||
.json(&serde_json::json!({ "tag": tag }));
|
||||
|
||||
let (request_id, response) = self.inner.send(request, true).await?;
|
||||
let response = self
|
||||
.inner
|
||||
.check_table_response(&request_id, response)
|
||||
.await?;
|
||||
|
||||
match response.text().await {
|
||||
Ok(body) => {
|
||||
let value: serde_json::Value =
|
||||
serde_json::from_str(&body).map_err(|e| Error::Http {
|
||||
source: format!("Failed to parse tag version: {}", e).into(),
|
||||
request_id: request_id.clone(),
|
||||
status_code: None,
|
||||
})?;
|
||||
|
||||
value
|
||||
.get("version")
|
||||
.and_then(|v| v.as_u64())
|
||||
.ok_or_else(|| Error::Http {
|
||||
source: format!("Invalid tag version response: {}", body).into(),
|
||||
request_id,
|
||||
status_code: None,
|
||||
})
|
||||
}
|
||||
Err(err) => {
|
||||
let status_code = err.status();
|
||||
Err(Error::Http {
|
||||
source: Box::new(err),
|
||||
request_id,
|
||||
status_code,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn create(&mut self, tag: &str, version: u64) -> Result<()> {
|
||||
let request = self
|
||||
.inner
|
||||
.client
|
||||
.post(&format!("/v1/table/{}/tags/create/", self.inner.name))
|
||||
.json(&serde_json::json!({
|
||||
"tag": tag,
|
||||
"version": version
|
||||
}));
|
||||
|
||||
let (request_id, response) = self.inner.send(request, true).await?;
|
||||
self.inner
|
||||
.check_table_response(&request_id, response)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn delete(&mut self, tag: &str) -> Result<()> {
|
||||
let request = self
|
||||
.inner
|
||||
.client
|
||||
.post(&format!("/v1/table/{}/tags/delete/", self.inner.name))
|
||||
.json(&serde_json::json!({ "tag": tag }));
|
||||
|
||||
let (request_id, response) = self.inner.send(request, true).await?;
|
||||
self.inner
|
||||
.check_table_response(&request_id, response)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn update(&mut self, tag: &str, version: u64) -> Result<()> {
|
||||
let request = self
|
||||
.inner
|
||||
.client
|
||||
.post(&format!("/v1/table/{}/tags/update/", self.inner.name))
|
||||
.json(&serde_json::json!({
|
||||
"tag": tag,
|
||||
"version": version
|
||||
}));
|
||||
|
||||
let (request_id, response) = self.inner.send(request, true).await?;
|
||||
self.inner
|
||||
.check_table_response(&request_id, response)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct RemoteTable<S: HttpSend = Sender> {
|
||||
#[allow(dead_code)]
|
||||
@@ -83,7 +218,7 @@ impl<S: HttpSend> RemoteTable<S> {
|
||||
let body = serde_json::json!({ "version": version });
|
||||
request = request.json(&body);
|
||||
|
||||
let (request_id, response) = self.client.send(request, true).await?;
|
||||
let (request_id, response) = self.send(request, true).await?;
|
||||
|
||||
let response = self.check_table_response(&request_id, response).await?;
|
||||
|
||||
@@ -127,6 +262,61 @@ impl<S: HttpSend> RemoteTable<S> {
|
||||
Ok(reqwest::Body::wrap_stream(body_stream))
|
||||
}
|
||||
|
||||
/// Buffer the reader into memory
|
||||
async fn buffer_reader<R: RecordBatchReader + ?Sized>(
|
||||
reader: &mut R,
|
||||
) -> Result<(SchemaRef, Vec<RecordBatch>)> {
|
||||
let schema = reader.schema();
|
||||
let mut batches = Vec::new();
|
||||
for batch in reader {
|
||||
batches.push(batch?);
|
||||
}
|
||||
Ok((schema, batches))
|
||||
}
|
||||
|
||||
/// Create a new RecordBatchReader from buffered data
|
||||
fn make_reader(schema: SchemaRef, batches: Vec<RecordBatch>) -> impl RecordBatchReader {
|
||||
let iter = batches.into_iter().map(Ok);
|
||||
RecordBatchIterator::new(iter, schema)
|
||||
}
|
||||
|
||||
async fn send(&self, req: RequestBuilder, with_retry: bool) -> Result<(String, Response)> {
|
||||
let res = if with_retry {
|
||||
self.client.send_with_retry(req, None, true).await?
|
||||
} else {
|
||||
self.client.send(req).await?
|
||||
};
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
/// Send the request with streaming body.
|
||||
/// This will use retries if with_retry is set and the number of configured retries is > 0.
|
||||
/// If retries are enabled, the stream will be buffered into memory.
|
||||
async fn send_streaming(
|
||||
&self,
|
||||
req: RequestBuilder,
|
||||
mut data: Box<dyn RecordBatchReader + Send>,
|
||||
with_retry: bool,
|
||||
) -> Result<(String, Response)> {
|
||||
if !with_retry || self.client.retry_config.retries == 0 {
|
||||
let body = Self::reader_as_body(data)?;
|
||||
return self.client.send(req.body(body)).await;
|
||||
}
|
||||
|
||||
// to support retries, buffer into memory and clone the batches on each retry
|
||||
let (schema, batches) = Self::buffer_reader(&mut *data).await?;
|
||||
let make_body = Box::new(move || {
|
||||
let reader = Self::make_reader(schema.clone(), batches.clone());
|
||||
Self::reader_as_body(Box::new(reader))
|
||||
});
|
||||
let res = self
|
||||
.client
|
||||
.send_with_retry(req, Some(make_body), false)
|
||||
.await?;
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
async fn check_table_response(
|
||||
&self,
|
||||
request_id: &str,
|
||||
@@ -168,7 +358,8 @@ impl<S: HttpSend> RemoteTable<S> {
|
||||
}
|
||||
|
||||
// Server requires k.
|
||||
let limit = params.limit.unwrap_or(usize::MAX);
|
||||
// use isize::MAX as usize to avoid overflow: https://github.com/lancedb/lancedb/issues/2211
|
||||
let limit = params.limit.unwrap_or(isize::MAX as usize);
|
||||
body["k"] = serde_json::Value::Number(serde_json::Number::from(limit));
|
||||
|
||||
if let Some(filter) = ¶ms.filter {
|
||||
@@ -353,7 +544,7 @@ impl<S: HttpSend> RemoteTable<S> {
|
||||
.collect();
|
||||
|
||||
let futures = requests.into_iter().map(|req| async move {
|
||||
let (request_id, response) = self.client.send(req, true).await?;
|
||||
let (request_id, response) = self.send(req, true).await?;
|
||||
self.read_arrow_stream(&request_id, response).await
|
||||
});
|
||||
let streams = futures::future::try_join_all(futures);
|
||||
@@ -471,7 +662,7 @@ impl<S: HttpSend> BaseTable for RemoteTable<S> {
|
||||
let body = serde_json::json!({ "version": version });
|
||||
request = request.json(&body);
|
||||
|
||||
let (request_id, response) = self.client.send(request, true).await?;
|
||||
let (request_id, response) = self.send(request, true).await?;
|
||||
self.check_table_response(&request_id, response).await?;
|
||||
self.checkout_latest().await?;
|
||||
Ok(())
|
||||
@@ -481,7 +672,7 @@ impl<S: HttpSend> BaseTable for RemoteTable<S> {
|
||||
let request = self
|
||||
.client
|
||||
.post(&format!("/v1/table/{}/version/list/", self.name));
|
||||
let (request_id, response) = self.client.send(request, true).await?;
|
||||
let (request_id, response) = self.send(request, true).await?;
|
||||
let response = self.check_table_response(&request_id, response).await?;
|
||||
|
||||
#[derive(Deserialize)]
|
||||
@@ -527,7 +718,7 @@ impl<S: HttpSend> BaseTable for RemoteTable<S> {
|
||||
request = request.json(&body);
|
||||
}
|
||||
|
||||
let (request_id, response) = self.client.send(request, true).await?;
|
||||
let (request_id, response) = self.send(request, true).await?;
|
||||
|
||||
let response = self.check_table_response(&request_id, response).await?;
|
||||
|
||||
@@ -545,12 +736,10 @@ impl<S: HttpSend> BaseTable for RemoteTable<S> {
|
||||
data: Box<dyn RecordBatchReader + Send>,
|
||||
) -> Result<()> {
|
||||
self.check_mutable().await?;
|
||||
let body = Self::reader_as_body(data)?;
|
||||
let mut request = self
|
||||
.client
|
||||
.post(&format!("/v1/table/{}/insert/", self.name))
|
||||
.header(CONTENT_TYPE, ARROW_STREAM_CONTENT_TYPE)
|
||||
.body(body);
|
||||
.header(CONTENT_TYPE, ARROW_STREAM_CONTENT_TYPE);
|
||||
|
||||
match add.mode {
|
||||
AddDataMode::Append => {}
|
||||
@@ -559,8 +748,7 @@ impl<S: HttpSend> BaseTable for RemoteTable<S> {
|
||||
}
|
||||
}
|
||||
|
||||
let (request_id, response) = self.client.send(request, false).await?;
|
||||
|
||||
let (request_id, response) = self.send_streaming(request, data, true).await?;
|
||||
self.check_table_response(&request_id, response).await?;
|
||||
|
||||
Ok(())
|
||||
@@ -628,7 +816,7 @@ impl<S: HttpSend> BaseTable for RemoteTable<S> {
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let futures = requests.into_iter().map(|req| async move {
|
||||
let (request_id, response) = self.client.send(req, true).await?;
|
||||
let (request_id, response) = self.send(req, true).await?;
|
||||
let response = self.check_table_response(&request_id, response).await?;
|
||||
let body = response.text().await.err_to_http(request_id.clone())?;
|
||||
|
||||
@@ -670,7 +858,7 @@ impl<S: HttpSend> BaseTable for RemoteTable<S> {
|
||||
.collect();
|
||||
|
||||
let futures = requests.into_iter().map(|req| async move {
|
||||
let (request_id, response) = self.client.send(req, true).await?;
|
||||
let (request_id, response) = self.send(req, true).await?;
|
||||
let response = self.check_table_response(&request_id, response).await?;
|
||||
let body = response.text().await.err_to_http(request_id.clone())?;
|
||||
|
||||
@@ -712,7 +900,7 @@ impl<S: HttpSend> BaseTable for RemoteTable<S> {
|
||||
"predicate": update.filter,
|
||||
}));
|
||||
|
||||
let (request_id, response) = self.client.send(request, false).await?;
|
||||
let (request_id, response) = self.send(request, true).await?;
|
||||
|
||||
self.check_table_response(&request_id, response).await?;
|
||||
|
||||
@@ -726,7 +914,7 @@ impl<S: HttpSend> BaseTable for RemoteTable<S> {
|
||||
.client
|
||||
.post(&format!("/v1/table/{}/delete/", self.name))
|
||||
.json(&body);
|
||||
let (request_id, response) = self.client.send(request, false).await?;
|
||||
let (request_id, response) = self.send(request, true).await?;
|
||||
self.check_table_response(&request_id, response).await?;
|
||||
Ok(())
|
||||
}
|
||||
@@ -812,7 +1000,7 @@ impl<S: HttpSend> BaseTable for RemoteTable<S> {
|
||||
|
||||
let request = request.json(&body);
|
||||
|
||||
let (request_id, response) = self.client.send(request, false).await?;
|
||||
let (request_id, response) = self.send(request, true).await?;
|
||||
|
||||
self.check_table_response(&request_id, response).await?;
|
||||
|
||||
@@ -836,21 +1024,31 @@ impl<S: HttpSend> BaseTable for RemoteTable<S> {
|
||||
new_data: Box<dyn RecordBatchReader + Send>,
|
||||
) -> Result<()> {
|
||||
self.check_mutable().await?;
|
||||
|
||||
let query = MergeInsertRequest::try_from(params)?;
|
||||
let body = Self::reader_as_body(new_data)?;
|
||||
let request = self
|
||||
.client
|
||||
.post(&format!("/v1/table/{}/merge_insert/", self.name))
|
||||
.query(&query)
|
||||
.header(CONTENT_TYPE, ARROW_STREAM_CONTENT_TYPE)
|
||||
.body(body);
|
||||
.header(CONTENT_TYPE, ARROW_STREAM_CONTENT_TYPE);
|
||||
|
||||
let (request_id, response) = self.client.send(request, false).await?;
|
||||
let (request_id, response) = self.send_streaming(request, new_data, true).await?;
|
||||
|
||||
self.check_table_response(&request_id, response).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn tags(&self) -> Result<Box<dyn Tags + '_>> {
|
||||
Ok(Box::new(RemoteTags { inner: self }))
|
||||
}
|
||||
async fn checkout_tag(&self, tag: &str) -> Result<()> {
|
||||
let tags = self.tags().await?;
|
||||
let version = tags.get_version(tag).await?;
|
||||
let mut write_guard = self.version.write().await;
|
||||
*write_guard = Some(version);
|
||||
Ok(())
|
||||
}
|
||||
async fn optimize(&self, _action: OptimizeAction) -> Result<OptimizeStats> {
|
||||
self.check_mutable().await?;
|
||||
Err(Error::NotSupported {
|
||||
@@ -879,7 +1077,7 @@ impl<S: HttpSend> BaseTable for RemoteTable<S> {
|
||||
.client
|
||||
.post(&format!("/v1/table/{}/add_columns/", self.name))
|
||||
.json(&body);
|
||||
let (request_id, response) = self.client.send(request, false).await?;
|
||||
let (request_id, response) = self.send(request, true).await?; // todo:
|
||||
self.check_table_response(&request_id, response).await?;
|
||||
Ok(())
|
||||
}
|
||||
@@ -918,7 +1116,7 @@ impl<S: HttpSend> BaseTable for RemoteTable<S> {
|
||||
.client
|
||||
.post(&format!("/v1/table/{}/alter_columns/", self.name))
|
||||
.json(&body);
|
||||
let (request_id, response) = self.client.send(request, false).await?;
|
||||
let (request_id, response) = self.send(request, true).await?;
|
||||
self.check_table_response(&request_id, response).await?;
|
||||
Ok(())
|
||||
}
|
||||
@@ -930,7 +1128,7 @@ impl<S: HttpSend> BaseTable for RemoteTable<S> {
|
||||
.client
|
||||
.post(&format!("/v1/table/{}/drop_columns/", self.name))
|
||||
.json(&body);
|
||||
let (request_id, response) = self.client.send(request, false).await?;
|
||||
let (request_id, response) = self.send(request, true).await?;
|
||||
self.check_table_response(&request_id, response).await?;
|
||||
Ok(())
|
||||
}
|
||||
@@ -944,7 +1142,7 @@ impl<S: HttpSend> BaseTable for RemoteTable<S> {
|
||||
let body = serde_json::json!({ "version": version });
|
||||
request = request.json(&body);
|
||||
|
||||
let (request_id, response) = self.client.send(request, true).await?;
|
||||
let (request_id, response) = self.send(request, true).await?;
|
||||
let response = self.check_table_response(&request_id, response).await?;
|
||||
|
||||
#[derive(Deserialize)]
|
||||
@@ -1001,7 +1199,7 @@ impl<S: HttpSend> BaseTable for RemoteTable<S> {
|
||||
let body = serde_json::json!({ "version": version });
|
||||
request = request.json(&body);
|
||||
|
||||
let (request_id, response) = self.client.send(request, true).await?;
|
||||
let (request_id, response) = self.send(request, true).await?;
|
||||
|
||||
if response.status() == StatusCode::NOT_FOUND {
|
||||
return Ok(None);
|
||||
@@ -1011,7 +1209,6 @@ impl<S: HttpSend> BaseTable for RemoteTable<S> {
|
||||
|
||||
let body = response.text().await.err_to_http(request_id.clone())?;
|
||||
|
||||
println!("body: {:?}", body);
|
||||
let stats = serde_json::from_str(&body).map_err(|e| Error::Http {
|
||||
source: format!("Failed to parse index statistics: {}", e).into(),
|
||||
request_id,
|
||||
@@ -1026,7 +1223,7 @@ impl<S: HttpSend> BaseTable for RemoteTable<S> {
|
||||
"/v1/table/{}/index/{}/drop/",
|
||||
self.name, index_name
|
||||
));
|
||||
let (request_id, response) = self.client.send(request, true).await?;
|
||||
let (request_id, response) = self.send(request, true).await?;
|
||||
self.check_table_response(&request_id, response).await?;
|
||||
Ok(())
|
||||
}
|
||||
@@ -1487,6 +1684,42 @@ mod tests {
|
||||
assert_eq!(&body, &expected_body);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_merge_insert_retries_on_409() {
|
||||
let batch = RecordBatch::try_new(
|
||||
Arc::new(Schema::new(vec![Field::new("a", DataType::Int32, false)])),
|
||||
vec![Arc::new(Int32Array::from(vec![1, 2, 3]))],
|
||||
)
|
||||
.unwrap();
|
||||
let data = Box::new(RecordBatchIterator::new(
|
||||
[Ok(batch.clone())],
|
||||
batch.schema(),
|
||||
));
|
||||
|
||||
// Default parameters
|
||||
let table = Table::new_with_handler("my_table", |request| {
|
||||
assert_eq!(request.method(), "POST");
|
||||
assert_eq!(request.url().path(), "/v1/table/my_table/merge_insert/");
|
||||
|
||||
let params = request.url().query_pairs().collect::<HashMap<_, _>>();
|
||||
assert_eq!(params["on"], "some_col");
|
||||
assert_eq!(params["when_matched_update_all"], "false");
|
||||
assert_eq!(params["when_not_matched_insert_all"], "false");
|
||||
assert_eq!(params["when_not_matched_by_source_delete"], "false");
|
||||
assert!(!params.contains_key("when_matched_update_all_filt"));
|
||||
assert!(!params.contains_key("when_not_matched_by_source_delete_filt"));
|
||||
|
||||
http::Response::builder().status(409).body("").unwrap()
|
||||
});
|
||||
|
||||
let e = table
|
||||
.merge_insert(&["some_col"])
|
||||
.execute(data)
|
||||
.await
|
||||
.unwrap_err();
|
||||
assert!(e.to_string().contains("Hit retry limit"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_delete() {
|
||||
let table = Table::new_with_handler("my_table", |request| {
|
||||
@@ -1528,7 +1761,7 @@ mod tests {
|
||||
let body = request.body().unwrap().as_bytes().unwrap();
|
||||
let body: serde_json::Value = serde_json::from_slice(body).unwrap();
|
||||
let expected_body = serde_json::json!({
|
||||
"k": usize::MAX,
|
||||
"k": isize::MAX as usize,
|
||||
"prefilter": true,
|
||||
"vector": [], // Empty vector means no vector query.
|
||||
"version": null,
|
||||
|
||||
@@ -81,6 +81,7 @@ pub mod merge;
|
||||
use crate::index::waiter::wait_for_index;
|
||||
pub use chrono::Duration;
|
||||
pub use lance::dataset::optimize::CompactionOptions;
|
||||
pub use lance::dataset::refs::{TagContents, Tags as LanceTags};
|
||||
pub use lance::dataset::scanner::DatasetRecordBatchStream;
|
||||
pub use lance_index::optimize::OptimizeOptions;
|
||||
|
||||
@@ -401,6 +402,24 @@ pub enum AnyQuery {
|
||||
VectorQuery(VectorQueryRequest),
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait Tags: Send + Sync {
|
||||
/// List the tags of the table.
|
||||
async fn list(&self) -> Result<HashMap<String, TagContents>>;
|
||||
|
||||
/// Get the version of the table referenced by a tag.
|
||||
async fn get_version(&self, tag: &str) -> Result<u64>;
|
||||
|
||||
/// Create a new tag for the given version of the table.
|
||||
async fn create(&mut self, tag: &str, version: u64) -> Result<()>;
|
||||
|
||||
/// Delete a tag from the table.
|
||||
async fn delete(&mut self, tag: &str) -> Result<()>;
|
||||
|
||||
/// Update an existing tag to point to a new version of the table.
|
||||
async fn update(&mut self, tag: &str, version: u64) -> Result<()>;
|
||||
}
|
||||
|
||||
/// A trait for anything "table-like". This is used for both native tables (which target
|
||||
/// Lance datasets) and remote tables (which target LanceDB cloud)
|
||||
///
|
||||
@@ -466,6 +485,8 @@ pub trait BaseTable: std::fmt::Display + std::fmt::Debug + Send + Sync {
|
||||
params: MergeInsertBuilder,
|
||||
new_data: Box<dyn RecordBatchReader + Send>,
|
||||
) -> Result<()>;
|
||||
/// Gets the table tag manager.
|
||||
async fn tags(&self) -> Result<Box<dyn Tags + '_>>;
|
||||
/// Optimize the dataset.
|
||||
async fn optimize(&self, action: OptimizeAction) -> Result<OptimizeStats>;
|
||||
/// Add columns to the table.
|
||||
@@ -482,6 +503,9 @@ pub trait BaseTable: std::fmt::Display + std::fmt::Debug + Send + Sync {
|
||||
async fn version(&self) -> Result<u64>;
|
||||
/// Checkout a specific version of the table.
|
||||
async fn checkout(&self, version: u64) -> Result<()>;
|
||||
/// Checkout a table version referenced by a tag.
|
||||
/// Tags provide a human-readable way to reference specific versions of the table.
|
||||
async fn checkout_tag(&self, tag: &str) -> Result<()>;
|
||||
/// Checkout the latest version of the table.
|
||||
async fn checkout_latest(&self) -> Result<()>;
|
||||
/// Restore the table to the currently checked out version.
|
||||
@@ -1058,6 +1082,24 @@ impl Table {
|
||||
self.inner.checkout(version).await
|
||||
}
|
||||
|
||||
/// Checks out a specific version of the Table by tag
|
||||
///
|
||||
/// Any read operation on the table will now access the data at the version referenced by the tag.
|
||||
/// As a consequence, calling this method will disable any read consistency interval
|
||||
/// that was previously set.
|
||||
///
|
||||
/// This is a read-only operation that turns the table into a sort of "view"
|
||||
/// or "detached head". Other table instances will not be affected. To make the change
|
||||
/// permanent you can use the `[Self::restore]` method.
|
||||
///
|
||||
/// Any operation that modifies the table will fail while the table is in a checked
|
||||
/// out state.
|
||||
///
|
||||
/// To return the table to a normal state use `[Self::checkout_latest]`
|
||||
pub async fn checkout_tag(&self, tag: &str) -> Result<()> {
|
||||
self.inner.checkout_tag(tag).await
|
||||
}
|
||||
|
||||
/// Ensures the table is pointing at the latest version
|
||||
///
|
||||
/// This can be used to manually update a table when the read_consistency_interval is None
|
||||
@@ -1144,6 +1186,11 @@ impl Table {
|
||||
self.inner.wait_for_index(index_names, timeout).await
|
||||
}
|
||||
|
||||
/// Get the tags manager.
|
||||
pub async fn tags(&self) -> Result<Box<dyn Tags + '_>> {
|
||||
self.inner.tags().await
|
||||
}
|
||||
|
||||
// Take many execution plans and map them into a single plan that adds
|
||||
// a query_index column and unions them.
|
||||
pub(crate) fn multi_vector_plan(
|
||||
@@ -1196,6 +1243,35 @@ impl Table {
|
||||
}
|
||||
}
|
||||
|
||||
pub struct NativeTags {
|
||||
inner: LanceTags,
|
||||
}
|
||||
#[async_trait]
|
||||
impl Tags for NativeTags {
|
||||
async fn list(&self) -> Result<HashMap<String, TagContents>> {
|
||||
Ok(self.inner.list().await?)
|
||||
}
|
||||
|
||||
async fn get_version(&self, tag: &str) -> Result<u64> {
|
||||
Ok(self.inner.get_version(tag).await?)
|
||||
}
|
||||
|
||||
async fn create(&mut self, tag: &str, version: u64) -> Result<()> {
|
||||
self.inner.create(tag, version).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn delete(&mut self, tag: &str) -> Result<()> {
|
||||
self.inner.delete(tag).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn update(&mut self, tag: &str, version: u64) -> Result<()> {
|
||||
self.inner.update(tag, version).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<NativeTable> for Table {
|
||||
fn from(table: NativeTable) -> Self {
|
||||
Self::new(Arc::new(table))
|
||||
@@ -1940,6 +2016,10 @@ impl BaseTable for NativeTable {
|
||||
self.dataset.as_time_travel(version).await
|
||||
}
|
||||
|
||||
async fn checkout_tag(&self, tag: &str) -> Result<()> {
|
||||
self.dataset.as_time_travel(tag).await
|
||||
}
|
||||
|
||||
async fn checkout_latest(&self) -> Result<()> {
|
||||
self.dataset
|
||||
.as_latest(self.read_consistency_interval)
|
||||
@@ -2315,6 +2395,14 @@ impl BaseTable for NativeTable {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn tags(&self) -> Result<Box<dyn Tags + '_>> {
|
||||
let dataset = self.dataset.get().await?;
|
||||
|
||||
Ok(Box::new(NativeTags {
|
||||
inner: dataset.tags.clone(),
|
||||
}))
|
||||
}
|
||||
|
||||
async fn optimize(&self, action: OptimizeAction) -> Result<OptimizeStats> {
|
||||
let mut stats = OptimizeStats {
|
||||
compaction: None,
|
||||
@@ -3081,6 +3169,60 @@ mod tests {
|
||||
)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_tags() {
|
||||
let tmp_dir = tempdir().unwrap();
|
||||
let uri = tmp_dir.path().to_str().unwrap();
|
||||
|
||||
let conn = ConnectBuilder::new(uri)
|
||||
.read_consistency_interval(Duration::from_secs(0))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
let table = conn
|
||||
.create_table("my_table", some_sample_data())
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(table.version().await.unwrap(), 1);
|
||||
table.add(some_sample_data()).execute().await.unwrap();
|
||||
assert_eq!(table.version().await.unwrap(), 2);
|
||||
let mut tags_manager = table.tags().await.unwrap();
|
||||
let tags = tags_manager.list().await.unwrap();
|
||||
assert!(tags.is_empty(), "Tags should be empty initially");
|
||||
let tag1 = "tag1";
|
||||
tags_manager.create(tag1, 1).await.unwrap();
|
||||
assert_eq!(tags_manager.get_version(tag1).await.unwrap(), 1);
|
||||
let tags = tags_manager.list().await.unwrap();
|
||||
assert_eq!(tags.len(), 1);
|
||||
assert!(tags.contains_key(tag1));
|
||||
assert_eq!(tags.get(tag1).unwrap().version, 1);
|
||||
tags_manager.create("tag2", 2).await.unwrap();
|
||||
assert_eq!(tags_manager.get_version("tag2").await.unwrap(), 2);
|
||||
let tags = tags_manager.list().await.unwrap();
|
||||
assert_eq!(tags.len(), 2);
|
||||
assert!(tags.contains_key(tag1));
|
||||
assert_eq!(tags.get(tag1).unwrap().version, 1);
|
||||
assert!(tags.contains_key("tag2"));
|
||||
assert_eq!(tags.get("tag2").unwrap().version, 2);
|
||||
// Test update and delete
|
||||
table.add(some_sample_data()).execute().await.unwrap();
|
||||
tags_manager.update(tag1, 3).await.unwrap();
|
||||
assert_eq!(tags_manager.get_version(tag1).await.unwrap(), 3);
|
||||
tags_manager.delete("tag2").await.unwrap();
|
||||
let tags = tags_manager.list().await.unwrap();
|
||||
assert_eq!(tags.len(), 1);
|
||||
assert!(tags.contains_key(tag1));
|
||||
assert_eq!(tags.get(tag1).unwrap().version, 3);
|
||||
// Test checkout tag
|
||||
table.add(some_sample_data()).execute().await.unwrap();
|
||||
assert_eq!(table.version().await.unwrap(), 4);
|
||||
table.checkout_tag(tag1).await.unwrap();
|
||||
assert_eq!(table.version().await.unwrap(), 3);
|
||||
table.checkout_latest().await.unwrap();
|
||||
assert_eq!(table.version().await.unwrap(), 4);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_create_index() {
|
||||
use arrow_array::RecordBatch;
|
||||
|
||||
@@ -7,7 +7,7 @@ use std::{
|
||||
time::{self, Duration, Instant},
|
||||
};
|
||||
|
||||
use lance::Dataset;
|
||||
use lance::{dataset::refs, Dataset};
|
||||
use tokio::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard};
|
||||
|
||||
use crate::error::Result;
|
||||
@@ -83,19 +83,32 @@ impl DatasetRef {
|
||||
}
|
||||
}
|
||||
|
||||
async fn as_time_travel(&mut self, target_version: u64) -> Result<()> {
|
||||
async fn as_time_travel(&mut self, target_version: impl Into<refs::Ref>) -> Result<()> {
|
||||
let target_ref = target_version.into();
|
||||
|
||||
match self {
|
||||
Self::Latest { dataset, .. } => {
|
||||
let new_dataset = dataset.checkout_version(target_ref.clone()).await?;
|
||||
let version_value = new_dataset.version().version;
|
||||
|
||||
*self = Self::TimeTravel {
|
||||
dataset: dataset.checkout_version(target_version).await?,
|
||||
version: target_version,
|
||||
dataset: new_dataset,
|
||||
version: version_value,
|
||||
};
|
||||
}
|
||||
Self::TimeTravel { dataset, version } => {
|
||||
if *version != target_version {
|
||||
let should_checkout = match &target_ref {
|
||||
refs::Ref::Version(target_ver) => version != target_ver,
|
||||
refs::Ref::Tag(_) => true, // Always checkout for tags
|
||||
};
|
||||
|
||||
if should_checkout {
|
||||
let new_dataset = dataset.checkout_version(target_ref).await?;
|
||||
let version_value = new_dataset.version().version;
|
||||
|
||||
*self = Self::TimeTravel {
|
||||
dataset: dataset.checkout_version(target_version).await?,
|
||||
version: target_version,
|
||||
dataset: new_dataset,
|
||||
version: version_value,
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -175,7 +188,7 @@ impl DatasetConsistencyWrapper {
|
||||
write_guard.as_latest(read_consistency_interval).await
|
||||
}
|
||||
|
||||
pub async fn as_time_travel(&self, target_version: u64) -> Result<()> {
|
||||
pub async fn as_time_travel(&self, target_version: impl Into<refs::Ref>) -> Result<()> {
|
||||
self.0.write().await.as_time_travel(target_version).await
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user