mirror of
https://github.com/lancedb/lancedb.git
synced 2025-12-23 05:19:58 +00:00
Compare commits
1 Commits
python-v0.
...
v0.22.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
04cf7a85ae |
@@ -1,5 +1,5 @@
|
||||
[tool.bumpversion]
|
||||
current_version = "0.22.1-beta.3"
|
||||
current_version = "0.22.0"
|
||||
parse = """(?x)
|
||||
(?P<major>0|[1-9]\\d*)\\.
|
||||
(?P<minor>0|[1-9]\\d*)\\.
|
||||
|
||||
3
.github/workflows/docs_test.yml
vendored
3
.github/workflows/docs_test.yml
vendored
@@ -24,8 +24,7 @@ env:
|
||||
jobs:
|
||||
test-python:
|
||||
name: Test doc python code
|
||||
runs-on: warp-ubuntu-2204-x64-8x
|
||||
timeout-minutes: 60
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
4
.github/workflows/pypi-publish.yml
vendored
4
.github/workflows/pypi-publish.yml
vendored
@@ -56,7 +56,7 @@ jobs:
|
||||
pypi_token: ${{ secrets.LANCEDB_PYPI_API_TOKEN }}
|
||||
fury_token: ${{ secrets.FURY_TOKEN }}
|
||||
mac:
|
||||
timeout-minutes: 90
|
||||
timeout-minutes: 60
|
||||
runs-on: ${{ matrix.config.runner }}
|
||||
strategy:
|
||||
matrix:
|
||||
@@ -64,7 +64,7 @@ jobs:
|
||||
- target: x86_64-apple-darwin
|
||||
runner: macos-13
|
||||
- target: aarch64-apple-darwin
|
||||
runner: warp-macos-14-arm64-6x
|
||||
runner: macos-14
|
||||
env:
|
||||
MACOSX_DEPLOYMENT_TARGET: 10.15
|
||||
steps:
|
||||
|
||||
231
Cargo.lock
generated
231
Cargo.lock
generated
@@ -713,9 +713,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "aws-sdk-s3"
|
||||
version = "1.105.0"
|
||||
version = "1.104.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c99789e929b5e1d9a5aa3fa1d81317f3a789afc796141d11b0eaafd9d9f47e38"
|
||||
checksum = "38c488cd6abb0ec9811c401894191932e941c5f91dc226043edacd0afa1634bc"
|
||||
dependencies = [
|
||||
"aws-credential-types",
|
||||
"aws-runtime",
|
||||
@@ -963,9 +963,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "aws-smithy-runtime"
|
||||
version = "1.9.2"
|
||||
version = "1.9.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4fa63ad37685ceb7762fa4d73d06f1d5493feb88e3f27259b9ed277f4c01b185"
|
||||
checksum = "d3946acbe1ead1301ba6862e712c7903ca9bb230bdf1fbd1b5ac54158ef2ab1f"
|
||||
dependencies = [
|
||||
"aws-smithy-async",
|
||||
"aws-smithy-http",
|
||||
@@ -1966,9 +1966,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datafusion"
|
||||
version = "49.0.2"
|
||||
version = "48.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "69dfeda1633bf8ec75b068d9f6c27cdc392ffcf5ff83128d5dbab65b73c1fd02"
|
||||
checksum = "8a11e19a7ccc5bb979c95c1dceef663eab39c9061b3bbf8d1937faf0f03bf41f"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-ipc",
|
||||
@@ -2014,9 +2014,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datafusion-catalog"
|
||||
version = "49.0.2"
|
||||
version = "48.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2848fd1e85e2953116dab9cc2eb109214b0888d7bbd2230e30c07f1794f642c0"
|
||||
checksum = "94985e67cab97b1099db2a7af11f31a45008b282aba921c1e1d35327c212ec18"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"async-trait",
|
||||
@@ -2040,9 +2040,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datafusion-catalog-listing"
|
||||
version = "49.0.2"
|
||||
version = "48.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "051a1634628c2d1296d4e326823e7536640d87a118966cdaff069b68821ad53b"
|
||||
checksum = "e002df133bdb7b0b9b429d89a69aa77b35caeadee4498b2ce1c7c23a99516988"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"async-trait",
|
||||
@@ -2063,15 +2063,14 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datafusion-common"
|
||||
version = "49.0.2"
|
||||
version = "48.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "765e4ad4ef7a4500e389a3f1e738791b71ff4c29fd00912c2f541d62b25da096"
|
||||
checksum = "e13242fc58fd753787b0a538e5ae77d356cb9d0656fa85a591a33c5f106267f6"
|
||||
dependencies = [
|
||||
"ahash",
|
||||
"arrow",
|
||||
"arrow-ipc",
|
||||
"base64 0.22.1",
|
||||
"chrono",
|
||||
"half",
|
||||
"hashbrown 0.14.5",
|
||||
"indexmap 2.11.0",
|
||||
@@ -2086,9 +2085,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datafusion-common-runtime"
|
||||
version = "49.0.2"
|
||||
version = "48.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "40a2ae8393051ce25d232a6065c4558ab5a535c9637d5373bacfd464ac88ea12"
|
||||
checksum = "d2239f964e95c3a5d6b4a8cde07e646de8995c1396a7fd62c6e784f5341db499"
|
||||
dependencies = [
|
||||
"futures",
|
||||
"log",
|
||||
@@ -2097,9 +2096,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datafusion-datasource"
|
||||
version = "49.0.2"
|
||||
version = "48.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "90cd841a77f378bc1a5c4a1c37345e1885a9203b008203f9f4b3a769729bf330"
|
||||
checksum = "2cf792579bc8bf07d1b2f68c2d5382f8a63679cce8fbebfd4ba95742b6e08864"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"async-trait",
|
||||
@@ -2125,9 +2124,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datafusion-datasource-csv"
|
||||
version = "49.0.2"
|
||||
version = "48.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "77f4a2c64939c6f0dd15b246723a699fa30d59d0133eb36a86e8ff8c6e2a8dc6"
|
||||
checksum = "cfc114f9a1415174f3e8d2719c371fc72092ef2195a7955404cfe6b2ba29a706"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"async-trait",
|
||||
@@ -2150,9 +2149,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datafusion-datasource-json"
|
||||
version = "49.0.2"
|
||||
version = "48.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "11387aaf931b2993ad9273c63ddca33f05aef7d02df9b70fb757429b4b71cdae"
|
||||
checksum = "d88dd5e215c420a52362b9988ecd4cefd71081b730663d4f7d886f706111fc75"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"async-trait",
|
||||
@@ -2175,15 +2174,15 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datafusion-doc"
|
||||
version = "49.0.2"
|
||||
version = "48.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8ff336d1d755399753a9e4fbab001180e346fc8bfa063a97f1214b82274c00f8"
|
||||
checksum = "e0e7b648387b0c1937b83cb328533c06c923799e73a9e3750b762667f32662c0"
|
||||
|
||||
[[package]]
|
||||
name = "datafusion-execution"
|
||||
version = "49.0.2"
|
||||
version = "48.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "042ea192757d1b2d7dcf71643e7ff33f6542c7704f00228d8b85b40003fd8e0f"
|
||||
checksum = "9609d83d52ff8315283c6dad3b97566e877d8f366fab4c3297742f33dcd636c7"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"dashmap",
|
||||
@@ -2200,12 +2199,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datafusion-expr"
|
||||
version = "49.0.2"
|
||||
version = "48.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "025222545d6d7fab71e2ae2b356526a1df67a2872222cbae7535e557a42abd2e"
|
||||
checksum = "e75230cd67f650ef0399eb00f54d4a073698f2c0262948298e5299fc7324da63"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"async-trait",
|
||||
"chrono",
|
||||
"datafusion-common",
|
||||
"datafusion-doc",
|
||||
@@ -2221,9 +2219,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datafusion-expr-common"
|
||||
version = "49.0.2"
|
||||
version = "48.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9d5c267104849d5fa6d81cf5ba88f35ecd58727729c5eb84066c25227b644ae2"
|
||||
checksum = "70fafb3a045ed6c49cfca0cd090f62cf871ca6326cc3355cb0aaf1260fa760b6"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"datafusion-common",
|
||||
@@ -2234,9 +2232,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datafusion-functions"
|
||||
version = "49.0.2"
|
||||
version = "48.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c620d105aa208fcee45c588765483314eb415f5571cfd6c1bae3a59c5b4d15bb"
|
||||
checksum = "cdf9a9cf655265861a20453b1e58357147eab59bdc90ce7f2f68f1f35104d3bb"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-buffer",
|
||||
@@ -2263,9 +2261,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datafusion-functions-aggregate"
|
||||
version = "49.0.2"
|
||||
version = "48.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "35f61d5198a35ed368bf3aacac74f0d0fa33de7a7cb0c57e9f68ab1346d2f952"
|
||||
checksum = "7f07e49733d847be0a05235e17b884d326a2fd402c97a89fe8bcf0bfba310005"
|
||||
dependencies = [
|
||||
"ahash",
|
||||
"arrow",
|
||||
@@ -2284,9 +2282,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datafusion-functions-aggregate-common"
|
||||
version = "49.0.2"
|
||||
version = "48.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "13efdb17362be39b5024f6da0d977ffe49c0212929ec36eec550e07e2bc7812f"
|
||||
checksum = "4512607e10d72b0b0a1dc08f42cb5bd5284cb8348b7fea49dc83409493e32b1b"
|
||||
dependencies = [
|
||||
"ahash",
|
||||
"arrow",
|
||||
@@ -2297,9 +2295,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datafusion-functions-nested"
|
||||
version = "49.0.2"
|
||||
version = "48.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9187678af567d7c9e004b72a0b6dc5b0a00ebf4901cb3511ed2db4effe092e66"
|
||||
checksum = "2ab331806e34f5545e5f03396e4d5068077395b1665795d8f88c14ec4f1e0b7a"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-ord",
|
||||
@@ -2309,7 +2307,6 @@ dependencies = [
|
||||
"datafusion-expr",
|
||||
"datafusion-functions",
|
||||
"datafusion-functions-aggregate",
|
||||
"datafusion-functions-aggregate-common",
|
||||
"datafusion-macros",
|
||||
"datafusion-physical-expr-common",
|
||||
"itertools 0.14.0",
|
||||
@@ -2319,9 +2316,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datafusion-functions-table"
|
||||
version = "49.0.2"
|
||||
version = "48.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ecf156589cc21ef59fe39c7a9a841b4a97394549643bbfa88cc44e8588cf8fe5"
|
||||
checksum = "d4ac2c0be983a06950ef077e34e0174aa0cb9e346f3aeae459823158037ade37"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"async-trait",
|
||||
@@ -2335,9 +2332,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datafusion-functions-window"
|
||||
version = "49.0.2"
|
||||
version = "48.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "edcb25e3e369f1366ec9a261456e45b5aad6ea1c0c8b4ce546587207c501ed9e"
|
||||
checksum = "36f3d92731de384c90906941d36dcadf6a86d4128409a9c5cd916662baed5f53"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"datafusion-common",
|
||||
@@ -2353,9 +2350,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datafusion-functions-window-common"
|
||||
version = "49.0.2"
|
||||
version = "48.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8996a8e11174d0bd7c62dc2f316485affc6ae5ffd5b8a68b508137ace2310294"
|
||||
checksum = "c679f8bf0971704ec8fd4249fcbb2eb49d6a12cc3e7a840ac047b4928d3541b5"
|
||||
dependencies = [
|
||||
"datafusion-common",
|
||||
"datafusion-physical-expr-common",
|
||||
@@ -2363,9 +2360,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datafusion-macros"
|
||||
version = "49.0.2"
|
||||
version = "48.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "95ee8d1be549eb7316f437035f2cec7ec42aba8374096d807c4de006a3b5d78a"
|
||||
checksum = "2821de7cb0362d12e75a5196b636a59ea3584ec1e1cc7dc6f5e34b9e8389d251"
|
||||
dependencies = [
|
||||
"datafusion-expr",
|
||||
"quote",
|
||||
@@ -2374,15 +2371,14 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datafusion-optimizer"
|
||||
version = "49.0.2"
|
||||
version = "48.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c9fa98671458254928af854e5f6c915e66b860a8bde505baea0ff2892deab74d"
|
||||
checksum = "1594c7a97219ede334f25347ad8d57056621e7f4f35a0693c8da876e10dd6a53"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"chrono",
|
||||
"datafusion-common",
|
||||
"datafusion-expr",
|
||||
"datafusion-expr-common",
|
||||
"datafusion-physical-expr",
|
||||
"indexmap 2.11.0",
|
||||
"itertools 0.14.0",
|
||||
@@ -2393,9 +2389,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datafusion-physical-expr"
|
||||
version = "49.0.2"
|
||||
version = "48.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3515d51531cca5f7b5a6f3ea22742b71bb36fc378b465df124ff9a2fa349b002"
|
||||
checksum = "dc6da0f2412088d23f6b01929dedd687b5aee63b19b674eb73d00c3eb3c883b7"
|
||||
dependencies = [
|
||||
"ahash",
|
||||
"arrow",
|
||||
@@ -2415,9 +2411,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datafusion-physical-expr-common"
|
||||
version = "49.0.2"
|
||||
version = "48.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "24485475d9c618a1d33b2a3dad003d946dc7a7bbf0354d125301abc0a5a79e3e"
|
||||
checksum = "dcb0dbd9213078a593c3fe28783beaa625a4e6c6a6c797856ee2ba234311fb96"
|
||||
dependencies = [
|
||||
"ahash",
|
||||
"arrow",
|
||||
@@ -2429,9 +2425,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datafusion-physical-optimizer"
|
||||
version = "49.0.2"
|
||||
version = "48.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b9da411a0a64702f941a12af2b979434d14ec5d36c6f49296966b2c7639cbb3a"
|
||||
checksum = "6d140854b2db3ef8ac611caad12bfb2e1e1de827077429322a6188f18fc0026a"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"datafusion-common",
|
||||
@@ -2441,16 +2437,15 @@ dependencies = [
|
||||
"datafusion-physical-expr",
|
||||
"datafusion-physical-expr-common",
|
||||
"datafusion-physical-plan",
|
||||
"datafusion-pruning",
|
||||
"itertools 0.14.0",
|
||||
"log",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "datafusion-physical-plan"
|
||||
version = "49.0.2"
|
||||
version = "48.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a6d168282bb7b54880bb3159f89b51c047db4287f5014d60c3ef4c6e1468212b"
|
||||
checksum = "b46cbdf21a01206be76d467f325273b22c559c744a012ead5018dfe79597de08"
|
||||
dependencies = [
|
||||
"ahash",
|
||||
"arrow",
|
||||
@@ -2476,29 +2471,11 @@ dependencies = [
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "datafusion-pruning"
|
||||
version = "49.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "391a457b9d23744c53eeb89edd1027424cba100581488d89800ed841182df905"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-schema",
|
||||
"datafusion-common",
|
||||
"datafusion-datasource",
|
||||
"datafusion-expr-common",
|
||||
"datafusion-physical-expr",
|
||||
"datafusion-physical-expr-common",
|
||||
"datafusion-physical-plan",
|
||||
"itertools 0.14.0",
|
||||
"log",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "datafusion-session"
|
||||
version = "49.0.2"
|
||||
version = "48.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "053201c2bb729c7938f85879034df2b5a52cfaba16f1b3b66ab8505c81b2aad3"
|
||||
checksum = "3a72733766ddb5b41534910926e8da5836622316f6283307fd9fb7e19811a59c"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"async-trait",
|
||||
@@ -2520,9 +2497,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datafusion-sql"
|
||||
version = "49.0.2"
|
||||
version = "48.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9082779be8ce4882189b229c0cff4393bd0808282a7194130c9f32159f185e25"
|
||||
checksum = "c5162338cdec9cc7ea13a0e6015c361acad5ec1d88d83f7c86301f789473971f"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"bigdecimal",
|
||||
@@ -2929,18 +2906,6 @@ version = "0.2.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f8eb564c5c7423d25c886fb561d1e4ee69f72354d16918afa32c08811f6b6a55"
|
||||
|
||||
[[package]]
|
||||
name = "fastbloom"
|
||||
version = "0.14.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "18c1ddb9231d8554c2d6bdf4cfaabf0c59251658c68b6c95cd52dd0c513a912a"
|
||||
dependencies = [
|
||||
"getrandom 0.3.3",
|
||||
"libm",
|
||||
"rand 0.9.2",
|
||||
"siphasher",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fastdivide"
|
||||
version = "0.4.2"
|
||||
@@ -3040,9 +3005,9 @@ checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c"
|
||||
|
||||
[[package]]
|
||||
name = "fsst"
|
||||
version = "0.37.0"
|
||||
version = "0.35.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fe0a0b1d16ce6b863be8ab766004d89ebf0779fd6ce31b0ef3bbc7fedaaad373"
|
||||
checksum = "6c9c2b8bb2a1aa18407a8ed0b60496288f3e01ba6d8e215d49bd85f995a12eae"
|
||||
dependencies = [
|
||||
"arrow-array",
|
||||
"rand 0.9.2",
|
||||
@@ -4219,9 +4184,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lance"
|
||||
version = "0.37.0"
|
||||
version = "0.35.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "42171f2af5d377e6bbcc8a8572144ee15b73a8f78ceb6160f1adeabf0d0f3e3c"
|
||||
checksum = "6bed718abdd224433ac7df789027b018157796a2038d4912423ef3e2b005a07a"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-arith",
|
||||
@@ -4284,9 +4249,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lance-arrow"
|
||||
version = "0.37.0"
|
||||
version = "0.35.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "25ef9499a1e581112f45fbf743fdc8e24830cda0bd13396b11c71aa6e6cba083"
|
||||
checksum = "d99ea2fe8e81091008b29cb0e3b4b028328729cec8018c425f99b8e42535170d"
|
||||
dependencies = [
|
||||
"arrow-array",
|
||||
"arrow-buffer",
|
||||
@@ -4304,9 +4269,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lance-bitpacking"
|
||||
version = "0.37.0"
|
||||
version = "0.35.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1101fffd5b161bbdc6e932d6c0a7f94cb1752b0f8cd6d18ef9064052ab901a84"
|
||||
checksum = "c1403fee0dc51f50497122ac81cbfbb6aa17dc4cb6fd2ed85c3a6e3c5da8036f"
|
||||
dependencies = [
|
||||
"arrayref",
|
||||
"paste",
|
||||
@@ -4315,9 +4280,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lance-core"
|
||||
version = "0.37.0"
|
||||
version = "0.35.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "527ee5e6472d058d8c66c702fbe318a3f60f971e652e60dcfc6349bdbc9b0733"
|
||||
checksum = "fe11e18299e5d95e3f26268504d09b139d6e254493aa50fec1c95bb3ec30b64d"
|
||||
dependencies = [
|
||||
"arrow-array",
|
||||
"arrow-buffer",
|
||||
@@ -4352,9 +4317,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lance-datafusion"
|
||||
version = "0.37.0"
|
||||
version = "0.35.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "65a80f7f15f2d941ec7b8253625cbb8e12081ea27584dd1fbc657fb9fb377f7a"
|
||||
checksum = "dd1225086ca750870aca9e2f91869a886a3f0f5e05ed75efa5c9a813b36317a8"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-array",
|
||||
@@ -4383,9 +4348,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lance-datagen"
|
||||
version = "0.37.0"
|
||||
version = "0.35.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0495c8afa18f246ac4b337c47d7827560283783963dd2177862d91161478fd79"
|
||||
checksum = "5de5bea2c57fc98351f5f6fd9f68905267ae1bb674ac33c38f78a9c319106a07"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-array",
|
||||
@@ -4402,9 +4367,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lance-encoding"
|
||||
version = "0.37.0"
|
||||
version = "0.35.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0e80e9ae49d68b95d58e77d9177f68983dce4f0803ef42840e1631b38dd66adc"
|
||||
checksum = "7480da1a6fcf204e90cf3b8c79a2843fdab0949d9afe8cd038d8726ccca725a8"
|
||||
dependencies = [
|
||||
"arrow-arith",
|
||||
"arrow-array",
|
||||
@@ -4432,7 +4397,6 @@ dependencies = [
|
||||
"prost-types",
|
||||
"rand 0.9.2",
|
||||
"snafu",
|
||||
"strum",
|
||||
"tokio",
|
||||
"tracing",
|
||||
"xxhash-rust",
|
||||
@@ -4441,9 +4405,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lance-file"
|
||||
version = "0.37.0"
|
||||
version = "0.35.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f1707f9f5097b36c82d3a8524bb41c762c80d5dfa5e32aa7bfc6a1c0847a1cce"
|
||||
checksum = "3b2c3106776198dcddbfec1df8b828edcb852ac80cc8077d7185dc1e524e3cf3"
|
||||
dependencies = [
|
||||
"arrow-arith",
|
||||
"arrow-array",
|
||||
@@ -4477,9 +4441,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lance-index"
|
||||
version = "0.37.0"
|
||||
version = "0.35.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "28ab52586a5a7f5371a5abf4862968231f8c0232ce0780bc456f1ec16e9370f9"
|
||||
checksum = "34a3f8128c200b2d055f71c60a603e0952a248b914c2edbdea9ec7636e4d6d26"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-array",
|
||||
@@ -4500,7 +4464,6 @@ dependencies = [
|
||||
"datafusion-sql",
|
||||
"deepsize",
|
||||
"dirs",
|
||||
"fastbloom",
|
||||
"fst",
|
||||
"futures",
|
||||
"half",
|
||||
@@ -4515,7 +4478,6 @@ dependencies = [
|
||||
"lance-io",
|
||||
"lance-linalg",
|
||||
"lance-table",
|
||||
"libm",
|
||||
"log",
|
||||
"num-traits",
|
||||
"object_store",
|
||||
@@ -4532,15 +4494,14 @@ dependencies = [
|
||||
"tempfile",
|
||||
"tokio",
|
||||
"tracing",
|
||||
"twox-hash",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "lance-io"
|
||||
version = "0.37.0"
|
||||
version = "0.35.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d606f9f6a7f8ec2cacf28dfce7b2fc39e7db9f0ec77f907b8e47c756e3dd163b"
|
||||
checksum = "eba4eac3c02e8b8834f7b23d3d3e3c89b5fb614b07569e6aef5bbc1350e94d73"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-arith",
|
||||
@@ -4580,9 +4541,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lance-linalg"
|
||||
version = "0.37.0"
|
||||
version = "0.35.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c9f1a94a5d966ff1eae817a835e3a57b34f73300f83a43bb28e7e2806695b8ba"
|
||||
checksum = "62092af5e1c7cc2168b6abdae44eeddfb6d2ed14c2035173bef20723f84f57b4"
|
||||
dependencies = [
|
||||
"arrow-array",
|
||||
"arrow-buffer",
|
||||
@@ -4605,9 +4566,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lance-table"
|
||||
version = "0.37.0"
|
||||
version = "0.35.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fac5c0ca6e5c285645465b95fb99fc464a1fd22a6d4b32ae0e0760f06b4b8a7f"
|
||||
checksum = "edfa48241aa42250f2b8f90812a3c51030ece2f8226ec99c753553c04468a6f8"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-array",
|
||||
@@ -4645,9 +4606,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lance-testing"
|
||||
version = "0.37.0"
|
||||
version = "0.35.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "384acc1dd13379a2ae24f3e3635d9c1f4fb4dc1534f7ffd2740c268f2eb73455"
|
||||
checksum = "ed6887d39beb6e358fae1f25ec3341bc7c61dc7044dd9dc9550b687b83bdc56f"
|
||||
dependencies = [
|
||||
"arrow-array",
|
||||
"arrow-schema",
|
||||
@@ -4658,7 +4619,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lancedb"
|
||||
version = "0.22.1-beta.3"
|
||||
version = "0.22.0-beta.1"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-array",
|
||||
@@ -4745,7 +4706,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lancedb-nodejs"
|
||||
version = "0.22.1-beta.3"
|
||||
version = "0.22.0-beta.1"
|
||||
dependencies = [
|
||||
"arrow-array",
|
||||
"arrow-ipc",
|
||||
@@ -4765,10 +4726,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lancedb-python"
|
||||
version = "0.25.1-beta.3"
|
||||
version = "0.25.0-beta.1"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"async-trait",
|
||||
"env_logger",
|
||||
"futures",
|
||||
"lancedb",
|
||||
@@ -7733,6 +7693,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c4521174166bac1ff04fe16ef4524c70144cd29682a45978978ca3d7f4e0be11"
|
||||
dependencies = [
|
||||
"log",
|
||||
"recursive",
|
||||
"sqlparser_derive",
|
||||
]
|
||||
|
||||
@@ -7811,15 +7772,6 @@ version = "0.11.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f"
|
||||
|
||||
[[package]]
|
||||
name = "strum"
|
||||
version = "0.25.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "290d54ea6f91c969195bdbcd7442c8c2a2ba87da8bf60a7ee86a235d4bc1e125"
|
||||
dependencies = [
|
||||
"strum_macros",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "strum_macros"
|
||||
version = "0.25.3"
|
||||
@@ -8480,9 +8432,6 @@ name = "twox-hash"
|
||||
version = "2.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8b907da542cbced5261bd3256de1b3a1bf340a3d37f93425a07362a1d687de56"
|
||||
dependencies = [
|
||||
"rand 0.9.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "typenum"
|
||||
|
||||
28
Cargo.toml
28
Cargo.toml
@@ -15,14 +15,14 @@ categories = ["database-implementations"]
|
||||
rust-version = "1.78.0"
|
||||
|
||||
[workspace.dependencies]
|
||||
lance = { "version" = "=0.37.0", default-features = false, "features" = ["dynamodb"] }
|
||||
lance-io = { "version" = "=0.37.0", default-features = false }
|
||||
lance-index = "=0.37.0"
|
||||
lance-linalg = "=0.37.0"
|
||||
lance-table = "=0.37.0"
|
||||
lance-testing = "=0.37.0"
|
||||
lance-datafusion = "=0.37.0"
|
||||
lance-encoding = "=0.37.0"
|
||||
lance = { "version" = "=0.35.0", default-features = false, "features" = ["dynamodb"] }
|
||||
lance-io = { "version" = "=0.35.0", default-features = false }
|
||||
lance-index = { "version" = "=0.35.0" }
|
||||
lance-linalg = { "version" = "=0.35.0" }
|
||||
lance-table = { "version" = "=0.35.0" }
|
||||
lance-testing = { "version" = "=0.35.0" }
|
||||
lance-datafusion = { "version" = "=0.35.0" }
|
||||
lance-encoding = { "version" = "=0.35.0" }
|
||||
# Note that this one does not include pyarrow
|
||||
arrow = { version = "55.1", optional = false }
|
||||
arrow-array = "55.1"
|
||||
@@ -33,12 +33,12 @@ arrow-schema = "55.1"
|
||||
arrow-arith = "55.1"
|
||||
arrow-cast = "55.1"
|
||||
async-trait = "0"
|
||||
datafusion = { version = "49.0", default-features = false }
|
||||
datafusion-catalog = "49.0"
|
||||
datafusion-common = { version = "49.0", default-features = false }
|
||||
datafusion-execution = "49.0"
|
||||
datafusion-expr = "49.0"
|
||||
datafusion-physical-plan = "49.0"
|
||||
datafusion = { version = "48.0", default-features = false }
|
||||
datafusion-catalog = "48.0"
|
||||
datafusion-common = { version = "48.0", default-features = false }
|
||||
datafusion-execution = "48.0"
|
||||
datafusion-expr = "48.0"
|
||||
datafusion-physical-plan = "48.0"
|
||||
env_logger = "0.11"
|
||||
half = { "version" = "2.6.0", default-features = false, features = [
|
||||
"num-traits",
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import argparse
|
||||
import re
|
||||
import sys
|
||||
import json
|
||||
|
||||
@@ -19,12 +18,8 @@ def run_command(command: str) -> str:
|
||||
|
||||
def get_latest_stable_version() -> str:
|
||||
version_line = run_command("cargo info lance | grep '^version:'")
|
||||
# Example output: "version: 0.35.0 (latest 0.37.0)"
|
||||
match = re.search(r'\(latest ([0-9.]+)\)', version_line)
|
||||
if match:
|
||||
return match.group(1)
|
||||
# Fallback: use the first version after 'version:'
|
||||
return version_line.split("version:")[1].split()[0].strip()
|
||||
version = version_line.split(" ")[1].strip()
|
||||
return version
|
||||
|
||||
|
||||
def get_latest_preview_version() -> str:
|
||||
|
||||
@@ -45,8 +45,6 @@ Any attempt to use the connection after it is closed will result in an error.
|
||||
|
||||
### createEmptyTable()
|
||||
|
||||
#### createEmptyTable(name, schema, options)
|
||||
|
||||
```ts
|
||||
abstract createEmptyTable(
|
||||
name,
|
||||
@@ -56,7 +54,7 @@ abstract createEmptyTable(
|
||||
|
||||
Creates a new empty Table
|
||||
|
||||
##### Parameters
|
||||
#### Parameters
|
||||
|
||||
* **name**: `string`
|
||||
The name of the table.
|
||||
@@ -65,39 +63,8 @@ Creates a new empty Table
|
||||
The schema of the table
|
||||
|
||||
* **options?**: `Partial`<[`CreateTableOptions`](../interfaces/CreateTableOptions.md)>
|
||||
Additional options (backwards compatibility)
|
||||
|
||||
##### Returns
|
||||
|
||||
`Promise`<[`Table`](Table.md)>
|
||||
|
||||
#### createEmptyTable(name, schema, namespace, options)
|
||||
|
||||
```ts
|
||||
abstract createEmptyTable(
|
||||
name,
|
||||
schema,
|
||||
namespace?,
|
||||
options?): Promise<Table>
|
||||
```
|
||||
|
||||
Creates a new empty Table
|
||||
|
||||
##### Parameters
|
||||
|
||||
* **name**: `string`
|
||||
The name of the table.
|
||||
|
||||
* **schema**: [`SchemaLike`](../type-aliases/SchemaLike.md)
|
||||
The schema of the table
|
||||
|
||||
* **namespace?**: `string`[]
|
||||
The namespace to create the table in (defaults to root namespace)
|
||||
|
||||
* **options?**: `Partial`<[`CreateTableOptions`](../interfaces/CreateTableOptions.md)>
|
||||
Additional options
|
||||
|
||||
##### Returns
|
||||
#### Returns
|
||||
|
||||
`Promise`<[`Table`](Table.md)>
|
||||
|
||||
@@ -105,10 +72,10 @@ Creates a new empty Table
|
||||
|
||||
### createTable()
|
||||
|
||||
#### createTable(options, namespace)
|
||||
#### createTable(options)
|
||||
|
||||
```ts
|
||||
abstract createTable(options, namespace?): Promise<Table>
|
||||
abstract createTable(options): Promise<Table>
|
||||
```
|
||||
|
||||
Creates a new Table and initialize it with new data.
|
||||
@@ -118,9 +85,6 @@ Creates a new Table and initialize it with new data.
|
||||
* **options**: `object` & `Partial`<[`CreateTableOptions`](../interfaces/CreateTableOptions.md)>
|
||||
The options object.
|
||||
|
||||
* **namespace?**: `string`[]
|
||||
The namespace to create the table in (defaults to root namespace)
|
||||
|
||||
##### Returns
|
||||
|
||||
`Promise`<[`Table`](Table.md)>
|
||||
@@ -146,38 +110,6 @@ Creates a new Table and initialize it with new data.
|
||||
to be inserted into the table
|
||||
|
||||
* **options?**: `Partial`<[`CreateTableOptions`](../interfaces/CreateTableOptions.md)>
|
||||
Additional options (backwards compatibility)
|
||||
|
||||
##### Returns
|
||||
|
||||
`Promise`<[`Table`](Table.md)>
|
||||
|
||||
#### createTable(name, data, namespace, options)
|
||||
|
||||
```ts
|
||||
abstract createTable(
|
||||
name,
|
||||
data,
|
||||
namespace?,
|
||||
options?): Promise<Table>
|
||||
```
|
||||
|
||||
Creates a new Table and initialize it with new data.
|
||||
|
||||
##### Parameters
|
||||
|
||||
* **name**: `string`
|
||||
The name of the table.
|
||||
|
||||
* **data**: [`TableLike`](../type-aliases/TableLike.md) \| `Record`<`string`, `unknown`>[]
|
||||
Non-empty Array of Records
|
||||
to be inserted into the table
|
||||
|
||||
* **namespace?**: `string`[]
|
||||
The namespace to create the table in (defaults to root namespace)
|
||||
|
||||
* **options?**: `Partial`<[`CreateTableOptions`](../interfaces/CreateTableOptions.md)>
|
||||
Additional options
|
||||
|
||||
##### Returns
|
||||
|
||||
@@ -202,16 +134,11 @@ Return a brief description of the connection
|
||||
### dropAllTables()
|
||||
|
||||
```ts
|
||||
abstract dropAllTables(namespace?): Promise<void>
|
||||
abstract dropAllTables(): Promise<void>
|
||||
```
|
||||
|
||||
Drop all tables in the database.
|
||||
|
||||
#### Parameters
|
||||
|
||||
* **namespace?**: `string`[]
|
||||
The namespace to drop tables from (defaults to root namespace).
|
||||
|
||||
#### Returns
|
||||
|
||||
`Promise`<`void`>
|
||||
@@ -221,7 +148,7 @@ Drop all tables in the database.
|
||||
### dropTable()
|
||||
|
||||
```ts
|
||||
abstract dropTable(name, namespace?): Promise<void>
|
||||
abstract dropTable(name): Promise<void>
|
||||
```
|
||||
|
||||
Drop an existing table.
|
||||
@@ -231,9 +158,6 @@ Drop an existing table.
|
||||
* **name**: `string`
|
||||
The name of the table to drop.
|
||||
|
||||
* **namespace?**: `string`[]
|
||||
The namespace of the table (defaults to root namespace).
|
||||
|
||||
#### Returns
|
||||
|
||||
`Promise`<`void`>
|
||||
@@ -257,10 +181,7 @@ Return true if the connection has not been closed
|
||||
### openTable()
|
||||
|
||||
```ts
|
||||
abstract openTable(
|
||||
name,
|
||||
namespace?,
|
||||
options?): Promise<Table>
|
||||
abstract openTable(name, options?): Promise<Table>
|
||||
```
|
||||
|
||||
Open a table in the database.
|
||||
@@ -270,11 +191,7 @@ Open a table in the database.
|
||||
* **name**: `string`
|
||||
The name of the table
|
||||
|
||||
* **namespace?**: `string`[]
|
||||
The namespace of the table (defaults to root namespace)
|
||||
|
||||
* **options?**: `Partial`<[`OpenTableOptions`](../interfaces/OpenTableOptions.md)>
|
||||
Additional options
|
||||
|
||||
#### Returns
|
||||
|
||||
@@ -284,8 +201,6 @@ Open a table in the database.
|
||||
|
||||
### tableNames()
|
||||
|
||||
#### tableNames(options)
|
||||
|
||||
```ts
|
||||
abstract tableNames(options?): Promise<string[]>
|
||||
```
|
||||
@@ -294,35 +209,12 @@ List all the table names in this database.
|
||||
|
||||
Tables will be returned in lexicographical order.
|
||||
|
||||
##### Parameters
|
||||
|
||||
* **options?**: `Partial`<[`TableNamesOptions`](../interfaces/TableNamesOptions.md)>
|
||||
options to control the
|
||||
paging / start point (backwards compatibility)
|
||||
|
||||
##### Returns
|
||||
|
||||
`Promise`<`string`[]>
|
||||
|
||||
#### tableNames(namespace, options)
|
||||
|
||||
```ts
|
||||
abstract tableNames(namespace?, options?): Promise<string[]>
|
||||
```
|
||||
|
||||
List all the table names in this database.
|
||||
|
||||
Tables will be returned in lexicographical order.
|
||||
|
||||
##### Parameters
|
||||
|
||||
* **namespace?**: `string`[]
|
||||
The namespace to list tables from (defaults to root namespace)
|
||||
#### Parameters
|
||||
|
||||
* **options?**: `Partial`<[`TableNamesOptions`](../interfaces/TableNamesOptions.md)>
|
||||
options to control the
|
||||
paging / start point
|
||||
|
||||
##### Returns
|
||||
#### Returns
|
||||
|
||||
`Promise`<`string`[]>
|
||||
|
||||
@@ -1,85 +0,0 @@
|
||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||
|
||||
***
|
||||
|
||||
[@lancedb/lancedb](../globals.md) / HeaderProvider
|
||||
|
||||
# Class: `abstract` HeaderProvider
|
||||
|
||||
Abstract base class for providing custom headers for each request.
|
||||
|
||||
Users can implement this interface to provide dynamic headers for various purposes
|
||||
such as authentication (OAuth tokens, API keys), request tracking (correlation IDs),
|
||||
custom metadata, or any other header-based requirements. The provider is called
|
||||
before each request to ensure fresh header values are always used.
|
||||
|
||||
## Examples
|
||||
|
||||
Simple JWT token provider:
|
||||
```typescript
|
||||
class JWTProvider extends HeaderProvider {
|
||||
constructor(private token: string) {
|
||||
super();
|
||||
}
|
||||
|
||||
getHeaders(): Record<string, string> {
|
||||
return { authorization: `Bearer ${this.token}` };
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Provider with request tracking:
|
||||
```typescript
|
||||
class RequestTrackingProvider extends HeaderProvider {
|
||||
constructor(private sessionId: string) {
|
||||
super();
|
||||
}
|
||||
|
||||
getHeaders(): Record<string, string> {
|
||||
return {
|
||||
"X-Session-Id": this.sessionId,
|
||||
"X-Request-Id": `req-${Date.now()}`
|
||||
};
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Extended by
|
||||
|
||||
- [`StaticHeaderProvider`](StaticHeaderProvider.md)
|
||||
- [`OAuthHeaderProvider`](OAuthHeaderProvider.md)
|
||||
|
||||
## Constructors
|
||||
|
||||
### new HeaderProvider()
|
||||
|
||||
```ts
|
||||
new HeaderProvider(): HeaderProvider
|
||||
```
|
||||
|
||||
#### Returns
|
||||
|
||||
[`HeaderProvider`](HeaderProvider.md)
|
||||
|
||||
## Methods
|
||||
|
||||
### getHeaders()
|
||||
|
||||
```ts
|
||||
abstract getHeaders(): Record<string, string>
|
||||
```
|
||||
|
||||
Get the latest headers to be added to requests.
|
||||
|
||||
This method is called before each request to the remote LanceDB server.
|
||||
Implementations should return headers that will be merged with existing headers.
|
||||
|
||||
#### Returns
|
||||
|
||||
`Record`<`string`, `string`>
|
||||
|
||||
Dictionary of header names to values to add to the request.
|
||||
|
||||
#### Throws
|
||||
|
||||
If unable to fetch headers, the exception will be propagated and the request will fail.
|
||||
@@ -1,29 +0,0 @@
|
||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||
|
||||
***
|
||||
|
||||
[@lancedb/lancedb](../globals.md) / NativeJsHeaderProvider
|
||||
|
||||
# Class: NativeJsHeaderProvider
|
||||
|
||||
JavaScript HeaderProvider implementation that wraps a JavaScript callback.
|
||||
This is the only native header provider - all header provider implementations
|
||||
should provide a JavaScript function that returns headers.
|
||||
|
||||
## Constructors
|
||||
|
||||
### new NativeJsHeaderProvider()
|
||||
|
||||
```ts
|
||||
new NativeJsHeaderProvider(getHeadersCallback): NativeJsHeaderProvider
|
||||
```
|
||||
|
||||
Create a new JsHeaderProvider from a JavaScript callback
|
||||
|
||||
#### Parameters
|
||||
|
||||
* **getHeadersCallback**
|
||||
|
||||
#### Returns
|
||||
|
||||
[`NativeJsHeaderProvider`](NativeJsHeaderProvider.md)
|
||||
@@ -1,108 +0,0 @@
|
||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||
|
||||
***
|
||||
|
||||
[@lancedb/lancedb](../globals.md) / OAuthHeaderProvider
|
||||
|
||||
# Class: OAuthHeaderProvider
|
||||
|
||||
Example implementation: OAuth token provider with automatic refresh.
|
||||
|
||||
This is an example implementation showing how to manage OAuth tokens
|
||||
with automatic refresh when they expire.
|
||||
|
||||
## Example
|
||||
|
||||
```typescript
|
||||
async function fetchToken(): Promise<TokenResponse> {
|
||||
const response = await fetch("https://oauth.example.com/token", {
|
||||
method: "POST",
|
||||
body: JSON.stringify({
|
||||
grant_type: "client_credentials",
|
||||
client_id: "your-client-id",
|
||||
client_secret: "your-client-secret"
|
||||
}),
|
||||
headers: { "Content-Type": "application/json" }
|
||||
});
|
||||
const data = await response.json();
|
||||
return {
|
||||
accessToken: data.access_token,
|
||||
expiresIn: data.expires_in
|
||||
};
|
||||
}
|
||||
|
||||
const provider = new OAuthHeaderProvider(fetchToken);
|
||||
const headers = provider.getHeaders();
|
||||
// Returns: {"authorization": "Bearer <your-token>"}
|
||||
```
|
||||
|
||||
## Extends
|
||||
|
||||
- [`HeaderProvider`](HeaderProvider.md)
|
||||
|
||||
## Constructors
|
||||
|
||||
### new OAuthHeaderProvider()
|
||||
|
||||
```ts
|
||||
new OAuthHeaderProvider(tokenFetcher, refreshBufferSeconds): OAuthHeaderProvider
|
||||
```
|
||||
|
||||
Initialize the OAuth provider.
|
||||
|
||||
#### Parameters
|
||||
|
||||
* **tokenFetcher**
|
||||
Function to fetch new tokens. Should return object with 'accessToken' and optionally 'expiresIn'.
|
||||
|
||||
* **refreshBufferSeconds**: `number` = `300`
|
||||
Seconds before expiry to refresh token. Default 300 (5 minutes).
|
||||
|
||||
#### Returns
|
||||
|
||||
[`OAuthHeaderProvider`](OAuthHeaderProvider.md)
|
||||
|
||||
#### Overrides
|
||||
|
||||
[`HeaderProvider`](HeaderProvider.md).[`constructor`](HeaderProvider.md#constructors)
|
||||
|
||||
## Methods
|
||||
|
||||
### getHeaders()
|
||||
|
||||
```ts
|
||||
getHeaders(): Record<string, string>
|
||||
```
|
||||
|
||||
Get OAuth headers, refreshing token if needed.
|
||||
Note: This is synchronous for now as the Rust implementation expects sync.
|
||||
In a real implementation, this would need to handle async properly.
|
||||
|
||||
#### Returns
|
||||
|
||||
`Record`<`string`, `string`>
|
||||
|
||||
Headers with Bearer token authorization.
|
||||
|
||||
#### Throws
|
||||
|
||||
If unable to fetch or refresh token.
|
||||
|
||||
#### Overrides
|
||||
|
||||
[`HeaderProvider`](HeaderProvider.md).[`getHeaders`](HeaderProvider.md#getheaders)
|
||||
|
||||
***
|
||||
|
||||
### refreshToken()
|
||||
|
||||
```ts
|
||||
refreshToken(): Promise<void>
|
||||
```
|
||||
|
||||
Manually refresh the token.
|
||||
Call this before using getHeaders() to ensure token is available.
|
||||
|
||||
#### Returns
|
||||
|
||||
`Promise`<`void`>
|
||||
@@ -1,70 +0,0 @@
|
||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||
|
||||
***
|
||||
|
||||
[@lancedb/lancedb](../globals.md) / StaticHeaderProvider
|
||||
|
||||
# Class: StaticHeaderProvider
|
||||
|
||||
Example implementation: A simple header provider that returns static headers.
|
||||
|
||||
This is an example implementation showing how to create a HeaderProvider
|
||||
for cases where headers don't change during the session.
|
||||
|
||||
## Example
|
||||
|
||||
```typescript
|
||||
const provider = new StaticHeaderProvider({
|
||||
authorization: "Bearer my-token",
|
||||
"X-Custom-Header": "custom-value"
|
||||
});
|
||||
const headers = provider.getHeaders();
|
||||
// Returns: {authorization: 'Bearer my-token', 'X-Custom-Header': 'custom-value'}
|
||||
```
|
||||
|
||||
## Extends
|
||||
|
||||
- [`HeaderProvider`](HeaderProvider.md)
|
||||
|
||||
## Constructors
|
||||
|
||||
### new StaticHeaderProvider()
|
||||
|
||||
```ts
|
||||
new StaticHeaderProvider(headers): StaticHeaderProvider
|
||||
```
|
||||
|
||||
Initialize with static headers.
|
||||
|
||||
#### Parameters
|
||||
|
||||
* **headers**: `Record`<`string`, `string`>
|
||||
Headers to return for every request.
|
||||
|
||||
#### Returns
|
||||
|
||||
[`StaticHeaderProvider`](StaticHeaderProvider.md)
|
||||
|
||||
#### Overrides
|
||||
|
||||
[`HeaderProvider`](HeaderProvider.md).[`constructor`](HeaderProvider.md#constructors)
|
||||
|
||||
## Methods
|
||||
|
||||
### getHeaders()
|
||||
|
||||
```ts
|
||||
getHeaders(): Record<string, string>
|
||||
```
|
||||
|
||||
Return the static headers.
|
||||
|
||||
#### Returns
|
||||
|
||||
`Record`<`string`, `string`>
|
||||
|
||||
Copy of the static headers.
|
||||
|
||||
#### Overrides
|
||||
|
||||
[`HeaderProvider`](HeaderProvider.md).[`getHeaders`](HeaderProvider.md#getheaders)
|
||||
@@ -6,14 +6,13 @@
|
||||
|
||||
# Function: connect()
|
||||
|
||||
## connect(uri, options, session, headerProvider)
|
||||
## connect(uri, options, session)
|
||||
|
||||
```ts
|
||||
function connect(
|
||||
uri,
|
||||
options?,
|
||||
session?,
|
||||
headerProvider?): Promise<Connection>
|
||||
session?): Promise<Connection>
|
||||
```
|
||||
|
||||
Connect to a LanceDB instance at the given URI.
|
||||
@@ -35,8 +34,6 @@ Accepted formats:
|
||||
|
||||
* **session?**: [`Session`](../classes/Session.md)
|
||||
|
||||
* **headerProvider?**: [`HeaderProvider`](../classes/HeaderProvider.md) \| () => `Record`<`string`, `string`> \| () => `Promise`<`Record`<`string`, `string`>>
|
||||
|
||||
### Returns
|
||||
|
||||
`Promise`<[`Connection`](../classes/Connection.md)>
|
||||
@@ -58,18 +55,6 @@ const conn = await connect(
|
||||
});
|
||||
```
|
||||
|
||||
Using with a header provider for per-request authentication:
|
||||
```ts
|
||||
const provider = new StaticHeaderProvider({
|
||||
"X-API-Key": "my-key"
|
||||
});
|
||||
const conn = await connectWithHeaderProvider(
|
||||
"db://host:port",
|
||||
options,
|
||||
provider
|
||||
);
|
||||
```
|
||||
|
||||
## connect(options)
|
||||
|
||||
```ts
|
||||
|
||||
@@ -20,20 +20,16 @@
|
||||
- [BooleanQuery](classes/BooleanQuery.md)
|
||||
- [BoostQuery](classes/BoostQuery.md)
|
||||
- [Connection](classes/Connection.md)
|
||||
- [HeaderProvider](classes/HeaderProvider.md)
|
||||
- [Index](classes/Index.md)
|
||||
- [MakeArrowTableOptions](classes/MakeArrowTableOptions.md)
|
||||
- [MatchQuery](classes/MatchQuery.md)
|
||||
- [MergeInsertBuilder](classes/MergeInsertBuilder.md)
|
||||
- [MultiMatchQuery](classes/MultiMatchQuery.md)
|
||||
- [NativeJsHeaderProvider](classes/NativeJsHeaderProvider.md)
|
||||
- [OAuthHeaderProvider](classes/OAuthHeaderProvider.md)
|
||||
- [PhraseQuery](classes/PhraseQuery.md)
|
||||
- [Query](classes/Query.md)
|
||||
- [QueryBase](classes/QueryBase.md)
|
||||
- [RecordBatchIterator](classes/RecordBatchIterator.md)
|
||||
- [Session](classes/Session.md)
|
||||
- [StaticHeaderProvider](classes/StaticHeaderProvider.md)
|
||||
- [Table](classes/Table.md)
|
||||
- [TagContents](classes/TagContents.md)
|
||||
- [Tags](classes/Tags.md)
|
||||
@@ -78,7 +74,6 @@
|
||||
- [TableNamesOptions](interfaces/TableNamesOptions.md)
|
||||
- [TableStatistics](interfaces/TableStatistics.md)
|
||||
- [TimeoutConfig](interfaces/TimeoutConfig.md)
|
||||
- [TokenResponse](interfaces/TokenResponse.md)
|
||||
- [UpdateOptions](interfaces/UpdateOptions.md)
|
||||
- [UpdateResult](interfaces/UpdateResult.md)
|
||||
- [Version](interfaces/Version.md)
|
||||
|
||||
@@ -16,14 +16,6 @@ optional extraHeaders: Record<string, string>;
|
||||
|
||||
***
|
||||
|
||||
### idDelimiter?
|
||||
|
||||
```ts
|
||||
optional idDelimiter: string;
|
||||
```
|
||||
|
||||
***
|
||||
|
||||
### retryConfig?
|
||||
|
||||
```ts
|
||||
|
||||
@@ -1,25 +0,0 @@
|
||||
[**@lancedb/lancedb**](../README.md) • **Docs**
|
||||
|
||||
***
|
||||
|
||||
[@lancedb/lancedb](../globals.md) / TokenResponse
|
||||
|
||||
# Interface: TokenResponse
|
||||
|
||||
Token response from OAuth provider.
|
||||
|
||||
## Properties
|
||||
|
||||
### accessToken
|
||||
|
||||
```ts
|
||||
accessToken: string;
|
||||
```
|
||||
|
||||
***
|
||||
|
||||
### expiresIn?
|
||||
|
||||
```ts
|
||||
optional expiresIn: number;
|
||||
```
|
||||
@@ -16,7 +16,6 @@ pub trait JNIEnvExt {
|
||||
fn get_integers(&mut self, obj: &JObject) -> Result<Vec<i32>>;
|
||||
|
||||
/// Get strings from Java List<String> object.
|
||||
#[allow(dead_code)]
|
||||
fn get_strings(&mut self, obj: &JObject) -> Result<Vec<String>>;
|
||||
|
||||
/// Get strings from Java String[] object.
|
||||
|
||||
@@ -6,7 +6,6 @@ use jni::JNIEnv;
|
||||
|
||||
use crate::Result;
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub trait FromJObject<T> {
|
||||
fn extract(&self) -> Result<T>;
|
||||
}
|
||||
@@ -40,7 +39,6 @@ impl FromJObject<f64> for JObject<'_> {
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub trait FromJString {
|
||||
fn extract(&self, env: &mut JNIEnv) -> Result<String>;
|
||||
}
|
||||
@@ -68,7 +66,6 @@ pub trait JMapExt {
|
||||
fn get_f64(&self, env: &mut JNIEnv, key: &str) -> Result<Option<f64>>;
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
fn get_map_value<T>(env: &mut JNIEnv, map: &JMap, key: &str) -> Result<Option<T>>
|
||||
where
|
||||
for<'a> JObject<'a>: FromJObject<T>,
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
<parent>
|
||||
<groupId>com.lancedb</groupId>
|
||||
<artifactId>lancedb-parent</artifactId>
|
||||
<version>0.22.1-beta.3</version>
|
||||
<version>0.22.0-final.0</version>
|
||||
<relativePath>../pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
<parent>
|
||||
<groupId>com.lancedb</groupId>
|
||||
<artifactId>lancedb-parent</artifactId>
|
||||
<version>0.22.1-beta.3</version>
|
||||
<version>0.22.0-final.0</version>
|
||||
<relativePath>../pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
<groupId>com.lancedb</groupId>
|
||||
<artifactId>lancedb-parent</artifactId>
|
||||
<version>0.22.1-beta.3</version>
|
||||
<version>0.22.0-final.0</version>
|
||||
<packaging>pom</packaging>
|
||||
<name>${project.artifactId}</name>
|
||||
<description>LanceDB Java SDK Parent POM</description>
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "lancedb-nodejs"
|
||||
edition.workspace = true
|
||||
version = "0.22.1-beta.3"
|
||||
version = "0.22.0"
|
||||
license.workspace = true
|
||||
description.workspace = true
|
||||
repository.workspace = true
|
||||
|
||||
@@ -1008,64 +1008,5 @@ describe.each([arrow15, arrow16, arrow17, arrow18])(
|
||||
expect(result).toEqual(null);
|
||||
});
|
||||
});
|
||||
|
||||
describe("boolean null handling", function () {
|
||||
it("should handle null values in nullable boolean fields", () => {
|
||||
const { makeArrowTable } = require("../lancedb/arrow");
|
||||
const schema = new Schema([new Field("test", new arrow.Bool(), true)]);
|
||||
|
||||
// Test with all null values
|
||||
const data = [{ test: null }];
|
||||
const table = makeArrowTable(data, { schema });
|
||||
|
||||
expect(table.numRows).toBe(1);
|
||||
expect(table.schema.names).toEqual(["test"]);
|
||||
expect(table.getChild("test")!.get(0)).toBeNull();
|
||||
});
|
||||
|
||||
it("should handle mixed null and non-null boolean values", () => {
|
||||
const { makeArrowTable } = require("../lancedb/arrow");
|
||||
const schema = new Schema([new Field("test", new Bool(), true)]);
|
||||
|
||||
// Test with mixed values
|
||||
const data = [{ test: true }, { test: null }, { test: false }];
|
||||
const table = makeArrowTable(data, { schema });
|
||||
|
||||
expect(table.numRows).toBe(3);
|
||||
expect(table.getChild("test")!.get(0)).toBe(true);
|
||||
expect(table.getChild("test")!.get(1)).toBeNull();
|
||||
expect(table.getChild("test")!.get(2)).toBe(false);
|
||||
});
|
||||
});
|
||||
},
|
||||
);
|
||||
|
||||
// Test for the undefined values bug fix
|
||||
describe("undefined values handling", () => {
|
||||
it("should handle mixed undefined and actual values", () => {
|
||||
const schema = new Schema([
|
||||
new Field("text", new Utf8(), true), // nullable
|
||||
new Field("number", new Int32(), true), // nullable
|
||||
new Field("bool", new Bool(), true), // nullable
|
||||
]);
|
||||
|
||||
const data = [
|
||||
{ text: undefined, number: 42, bool: true },
|
||||
{ text: "hello", number: undefined, bool: false },
|
||||
{ text: "world", number: 123, bool: undefined },
|
||||
];
|
||||
const table = makeArrowTable(data, { schema });
|
||||
|
||||
const result = table.toArray();
|
||||
expect(result).toHaveLength(3);
|
||||
expect(result[0].text).toBe(null);
|
||||
expect(result[0].number).toBe(42);
|
||||
expect(result[0].bool).toBe(true);
|
||||
expect(result[1].text).toBe("hello");
|
||||
expect(result[1].number).toBe(null);
|
||||
expect(result[1].bool).toBe(false);
|
||||
expect(result[2].text).toBe("world");
|
||||
expect(result[2].number).toBe(123);
|
||||
expect(result[2].bool).toBe(null);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -203,106 +203,3 @@ describe("given a connection", () => {
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe("clone table functionality", () => {
|
||||
let tmpDir: tmp.DirResult;
|
||||
let db: Connection;
|
||||
beforeEach(async () => {
|
||||
tmpDir = tmp.dirSync({ unsafeCleanup: true });
|
||||
db = await connect(tmpDir.name);
|
||||
});
|
||||
afterEach(() => tmpDir.removeCallback());
|
||||
|
||||
it("should clone a table with latest version (default behavior)", async () => {
|
||||
// Create source table with some data
|
||||
const data = [
|
||||
{ id: 1, text: "hello", vector: [1.0, 2.0] },
|
||||
{ id: 2, text: "world", vector: [3.0, 4.0] },
|
||||
];
|
||||
const sourceTable = await db.createTable("source", data);
|
||||
|
||||
// Add more data to create a new version
|
||||
const moreData = [{ id: 3, text: "test", vector: [5.0, 6.0] }];
|
||||
await sourceTable.add(moreData);
|
||||
|
||||
// Clone the table (should get latest version with 3 rows)
|
||||
const sourceUri = `${tmpDir.name}/source.lance`;
|
||||
const clonedTable = await db.cloneTable("cloned", sourceUri);
|
||||
|
||||
// Verify cloned table has all 3 rows
|
||||
expect(await clonedTable.countRows()).toBe(3);
|
||||
expect((await db.tableNames()).includes("cloned")).toBe(true);
|
||||
});
|
||||
|
||||
it("should clone a table from a specific version", async () => {
|
||||
// Create source table with initial data
|
||||
const data = [
|
||||
{ id: 1, text: "hello", vector: [1.0, 2.0] },
|
||||
{ id: 2, text: "world", vector: [3.0, 4.0] },
|
||||
];
|
||||
const sourceTable = await db.createTable("source", data);
|
||||
|
||||
// Get the initial version
|
||||
const initialVersion = await sourceTable.version();
|
||||
|
||||
// Add more data to create a new version
|
||||
const moreData = [{ id: 3, text: "test", vector: [5.0, 6.0] }];
|
||||
await sourceTable.add(moreData);
|
||||
|
||||
// Verify source now has 3 rows
|
||||
expect(await sourceTable.countRows()).toBe(3);
|
||||
|
||||
// Clone from the initial version (should have only 2 rows)
|
||||
const sourceUri = `${tmpDir.name}/source.lance`;
|
||||
const clonedTable = await db.cloneTable("cloned", sourceUri, {
|
||||
sourceVersion: initialVersion,
|
||||
});
|
||||
|
||||
// Verify cloned table has only the initial 2 rows
|
||||
expect(await clonedTable.countRows()).toBe(2);
|
||||
});
|
||||
|
||||
it("should clone a table from a tagged version", async () => {
|
||||
// Create source table with initial data
|
||||
const data = [
|
||||
{ id: 1, text: "hello", vector: [1.0, 2.0] },
|
||||
{ id: 2, text: "world", vector: [3.0, 4.0] },
|
||||
];
|
||||
const sourceTable = await db.createTable("source", data);
|
||||
|
||||
// Create a tag for the current version
|
||||
const tags = await sourceTable.tags();
|
||||
await tags.create("v1.0", await sourceTable.version());
|
||||
|
||||
// Add more data after the tag
|
||||
const moreData = [{ id: 3, text: "test", vector: [5.0, 6.0] }];
|
||||
await sourceTable.add(moreData);
|
||||
|
||||
// Verify source now has 3 rows
|
||||
expect(await sourceTable.countRows()).toBe(3);
|
||||
|
||||
// Clone from the tagged version (should have only 2 rows)
|
||||
const sourceUri = `${tmpDir.name}/source.lance`;
|
||||
const clonedTable = await db.cloneTable("cloned", sourceUri, {
|
||||
sourceTag: "v1.0",
|
||||
});
|
||||
|
||||
// Verify cloned table has only the tagged version's 2 rows
|
||||
expect(await clonedTable.countRows()).toBe(2);
|
||||
});
|
||||
|
||||
it("should fail when attempting deep clone", async () => {
|
||||
// Create source table with some data
|
||||
const data = [
|
||||
{ id: 1, text: "hello", vector: [1.0, 2.0] },
|
||||
{ id: 2, text: "world", vector: [3.0, 4.0] },
|
||||
];
|
||||
await db.createTable("source", data);
|
||||
|
||||
// Try to create a deep clone (should fail)
|
||||
const sourceUri = `${tmpDir.name}/source.lance`;
|
||||
await expect(
|
||||
db.cloneTable("cloned", sourceUri, { isShallow: false }),
|
||||
).rejects.toThrow("Deep clone is not yet implemented");
|
||||
});
|
||||
});
|
||||
|
||||
@@ -256,60 +256,6 @@ describe("embedding functions", () => {
|
||||
expect(actual).toHaveProperty("text");
|
||||
});
|
||||
|
||||
it("should handle undefined vector field with embedding function correctly", async () => {
|
||||
@register("undefined_test")
|
||||
class MockEmbeddingFunction extends EmbeddingFunction<string> {
|
||||
ndims() {
|
||||
return 3;
|
||||
}
|
||||
embeddingDataType(): Float {
|
||||
return new Float32();
|
||||
}
|
||||
async computeQueryEmbeddings(_data: string) {
|
||||
return [1, 2, 3];
|
||||
}
|
||||
async computeSourceEmbeddings(data: string[]) {
|
||||
return Array.from({ length: data.length }).fill([
|
||||
1, 2, 3,
|
||||
]) as number[][];
|
||||
}
|
||||
}
|
||||
const func = getRegistry()
|
||||
.get<MockEmbeddingFunction>("undefined_test")!
|
||||
.create();
|
||||
const schema = new Schema([
|
||||
new Field("text", new Utf8(), true),
|
||||
new Field(
|
||||
"vector",
|
||||
new FixedSizeList(3, new Field("item", new Float32(), true)),
|
||||
true,
|
||||
),
|
||||
]);
|
||||
|
||||
const db = await connect(tmpDir.name);
|
||||
const table = await db.createEmptyTable("test_undefined", schema, {
|
||||
embeddingFunction: {
|
||||
function: func,
|
||||
sourceColumn: "text",
|
||||
vectorColumn: "vector",
|
||||
},
|
||||
});
|
||||
|
||||
// Test that undefined, null, and omitted vector fields all work
|
||||
await table.add([{ text: "test1", vector: undefined }]);
|
||||
await table.add([{ text: "test2", vector: null }]);
|
||||
await table.add([{ text: "test3" }]);
|
||||
|
||||
const rows = await table.query().toArray();
|
||||
expect(rows.length).toBe(3);
|
||||
|
||||
// All rows should have vectors computed by the embedding function
|
||||
for (const row of rows) {
|
||||
expect(row.vector).toBeDefined();
|
||||
expect(JSON.parse(JSON.stringify(row.vector))).toEqual([1, 2, 3]);
|
||||
}
|
||||
});
|
||||
|
||||
test.each([new Float16(), new Float32(), new Float64()])(
|
||||
"should be able to provide manual embeddings with multiple float datatype",
|
||||
async (floatType) => {
|
||||
|
||||
@@ -3,50 +3,7 @@
|
||||
|
||||
import * as http from "http";
|
||||
import { RequestListener } from "http";
|
||||
import {
|
||||
ClientConfig,
|
||||
Connection,
|
||||
ConnectionOptions,
|
||||
NativeJsHeaderProvider,
|
||||
TlsConfig,
|
||||
connect,
|
||||
} from "../lancedb";
|
||||
import {
|
||||
HeaderProvider,
|
||||
OAuthHeaderProvider,
|
||||
StaticHeaderProvider,
|
||||
} from "../lancedb/header";
|
||||
|
||||
// Test-only header providers
|
||||
class CustomProvider extends HeaderProvider {
|
||||
getHeaders(): Record<string, string> {
|
||||
return { "X-Custom": "custom-value" };
|
||||
}
|
||||
}
|
||||
|
||||
class ErrorProvider extends HeaderProvider {
|
||||
private errorMessage: string;
|
||||
public callCount: number = 0;
|
||||
|
||||
constructor(errorMessage: string = "Test error") {
|
||||
super();
|
||||
this.errorMessage = errorMessage;
|
||||
}
|
||||
|
||||
getHeaders(): Record<string, string> {
|
||||
this.callCount++;
|
||||
throw new Error(this.errorMessage);
|
||||
}
|
||||
}
|
||||
|
||||
class ConcurrentProvider extends HeaderProvider {
|
||||
private counter: number = 0;
|
||||
|
||||
getHeaders(): Record<string, string> {
|
||||
this.counter++;
|
||||
return { "X-Request-Id": String(this.counter) };
|
||||
}
|
||||
}
|
||||
import { Connection, ConnectionOptions, connect } from "../lancedb";
|
||||
|
||||
async function withMockDatabase(
|
||||
listener: RequestListener,
|
||||
@@ -191,431 +148,4 @@ describe("remote connection", () => {
|
||||
},
|
||||
);
|
||||
});
|
||||
|
||||
describe("TlsConfig", () => {
|
||||
it("should create TlsConfig with all fields", () => {
|
||||
const tlsConfig: TlsConfig = {
|
||||
certFile: "/path/to/cert.pem",
|
||||
keyFile: "/path/to/key.pem",
|
||||
sslCaCert: "/path/to/ca.pem",
|
||||
assertHostname: false,
|
||||
};
|
||||
|
||||
expect(tlsConfig.certFile).toBe("/path/to/cert.pem");
|
||||
expect(tlsConfig.keyFile).toBe("/path/to/key.pem");
|
||||
expect(tlsConfig.sslCaCert).toBe("/path/to/ca.pem");
|
||||
expect(tlsConfig.assertHostname).toBe(false);
|
||||
});
|
||||
|
||||
it("should create TlsConfig with partial fields", () => {
|
||||
const tlsConfig: TlsConfig = {
|
||||
certFile: "/path/to/cert.pem",
|
||||
keyFile: "/path/to/key.pem",
|
||||
};
|
||||
|
||||
expect(tlsConfig.certFile).toBe("/path/to/cert.pem");
|
||||
expect(tlsConfig.keyFile).toBe("/path/to/key.pem");
|
||||
expect(tlsConfig.sslCaCert).toBeUndefined();
|
||||
expect(tlsConfig.assertHostname).toBeUndefined();
|
||||
});
|
||||
|
||||
it("should create ClientConfig with TlsConfig", () => {
|
||||
const tlsConfig: TlsConfig = {
|
||||
certFile: "/path/to/cert.pem",
|
||||
keyFile: "/path/to/key.pem",
|
||||
sslCaCert: "/path/to/ca.pem",
|
||||
assertHostname: true,
|
||||
};
|
||||
|
||||
const clientConfig: ClientConfig = {
|
||||
userAgent: "test-agent",
|
||||
tlsConfig: tlsConfig,
|
||||
};
|
||||
|
||||
expect(clientConfig.userAgent).toBe("test-agent");
|
||||
expect(clientConfig.tlsConfig).toBeDefined();
|
||||
expect(clientConfig.tlsConfig?.certFile).toBe("/path/to/cert.pem");
|
||||
expect(clientConfig.tlsConfig?.keyFile).toBe("/path/to/key.pem");
|
||||
expect(clientConfig.tlsConfig?.sslCaCert).toBe("/path/to/ca.pem");
|
||||
expect(clientConfig.tlsConfig?.assertHostname).toBe(true);
|
||||
});
|
||||
|
||||
it("should handle empty TlsConfig", () => {
|
||||
const tlsConfig: TlsConfig = {};
|
||||
|
||||
expect(tlsConfig.certFile).toBeUndefined();
|
||||
expect(tlsConfig.keyFile).toBeUndefined();
|
||||
expect(tlsConfig.sslCaCert).toBeUndefined();
|
||||
expect(tlsConfig.assertHostname).toBeUndefined();
|
||||
});
|
||||
|
||||
it("should accept TlsConfig in connection options", () => {
|
||||
const tlsConfig: TlsConfig = {
|
||||
certFile: "/path/to/cert.pem",
|
||||
keyFile: "/path/to/key.pem",
|
||||
sslCaCert: "/path/to/ca.pem",
|
||||
assertHostname: false,
|
||||
};
|
||||
|
||||
// Just verify that the ClientConfig accepts the TlsConfig
|
||||
const clientConfig: ClientConfig = {
|
||||
tlsConfig: tlsConfig,
|
||||
};
|
||||
|
||||
const connectionOptions: ConnectionOptions = {
|
||||
apiKey: "fake",
|
||||
clientConfig: clientConfig,
|
||||
};
|
||||
|
||||
// Verify the configuration structure is correct
|
||||
expect(connectionOptions.clientConfig).toBeDefined();
|
||||
expect(connectionOptions.clientConfig?.tlsConfig).toBeDefined();
|
||||
expect(connectionOptions.clientConfig?.tlsConfig?.certFile).toBe(
|
||||
"/path/to/cert.pem",
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe("header providers", () => {
|
||||
it("should work with StaticHeaderProvider", async () => {
|
||||
const provider = new StaticHeaderProvider({
|
||||
authorization: "Bearer test-token",
|
||||
"X-Custom": "value",
|
||||
});
|
||||
|
||||
const headers = provider.getHeaders();
|
||||
expect(headers).toEqual({
|
||||
authorization: "Bearer test-token",
|
||||
"X-Custom": "value",
|
||||
});
|
||||
|
||||
// Test that it returns a copy
|
||||
headers["X-Modified"] = "modified";
|
||||
const headers2 = provider.getHeaders();
|
||||
expect(headers2).not.toHaveProperty("X-Modified");
|
||||
});
|
||||
|
||||
it("should pass headers from StaticHeaderProvider to requests", async () => {
|
||||
const provider = new StaticHeaderProvider({
|
||||
"X-Custom-Auth": "secret-token",
|
||||
"X-Request-Source": "test-suite",
|
||||
});
|
||||
|
||||
await withMockDatabase(
|
||||
(req, res) => {
|
||||
expect(req.headers["x-custom-auth"]).toEqual("secret-token");
|
||||
expect(req.headers["x-request-source"]).toEqual("test-suite");
|
||||
|
||||
const body = JSON.stringify({ tables: [] });
|
||||
res.writeHead(200, { "Content-Type": "application/json" }).end(body);
|
||||
},
|
||||
async () => {
|
||||
// Use actual header provider mechanism instead of extraHeaders
|
||||
const conn = await connect(
|
||||
"db://dev",
|
||||
{
|
||||
apiKey: "fake",
|
||||
hostOverride: "http://localhost:8000",
|
||||
},
|
||||
undefined, // session
|
||||
provider, // headerProvider
|
||||
);
|
||||
|
||||
const tableNames = await conn.tableNames();
|
||||
expect(tableNames).toEqual([]);
|
||||
},
|
||||
);
|
||||
});
|
||||
|
||||
it("should work with CustomProvider", () => {
|
||||
const provider = new CustomProvider();
|
||||
const headers = provider.getHeaders();
|
||||
expect(headers).toEqual({ "X-Custom": "custom-value" });
|
||||
});
|
||||
|
||||
it("should handle ErrorProvider errors", () => {
|
||||
const provider = new ErrorProvider("Authentication failed");
|
||||
|
||||
expect(() => provider.getHeaders()).toThrow("Authentication failed");
|
||||
expect(provider.callCount).toBe(1);
|
||||
|
||||
// Test that error is thrown each time
|
||||
expect(() => provider.getHeaders()).toThrow("Authentication failed");
|
||||
expect(provider.callCount).toBe(2);
|
||||
});
|
||||
|
||||
it("should work with ConcurrentProvider", () => {
|
||||
const provider = new ConcurrentProvider();
|
||||
|
||||
const headers1 = provider.getHeaders();
|
||||
const headers2 = provider.getHeaders();
|
||||
const headers3 = provider.getHeaders();
|
||||
|
||||
expect(headers1).toEqual({ "X-Request-Id": "1" });
|
||||
expect(headers2).toEqual({ "X-Request-Id": "2" });
|
||||
expect(headers3).toEqual({ "X-Request-Id": "3" });
|
||||
});
|
||||
|
||||
describe("OAuthHeaderProvider", () => {
|
||||
it("should initialize correctly", () => {
|
||||
const fetcher = () => ({
|
||||
accessToken: "token123",
|
||||
expiresIn: 3600,
|
||||
});
|
||||
|
||||
const provider = new OAuthHeaderProvider(fetcher);
|
||||
expect(provider).toBeInstanceOf(HeaderProvider);
|
||||
});
|
||||
|
||||
it("should fetch token on first use", async () => {
|
||||
let callCount = 0;
|
||||
const fetcher = () => {
|
||||
callCount++;
|
||||
return {
|
||||
accessToken: "token123",
|
||||
expiresIn: 3600,
|
||||
};
|
||||
};
|
||||
|
||||
const provider = new OAuthHeaderProvider(fetcher);
|
||||
|
||||
// Need to manually refresh first due to sync limitation
|
||||
await provider.refreshToken();
|
||||
|
||||
const headers = provider.getHeaders();
|
||||
expect(headers).toEqual({ authorization: "Bearer token123" });
|
||||
expect(callCount).toBe(1);
|
||||
|
||||
// Second call should not fetch again
|
||||
const headers2 = provider.getHeaders();
|
||||
expect(headers2).toEqual({ authorization: "Bearer token123" });
|
||||
expect(callCount).toBe(1);
|
||||
});
|
||||
|
||||
it("should handle tokens without expiry", async () => {
|
||||
const fetcher = () => ({
|
||||
accessToken: "permanent_token",
|
||||
});
|
||||
|
||||
const provider = new OAuthHeaderProvider(fetcher);
|
||||
await provider.refreshToken();
|
||||
|
||||
const headers = provider.getHeaders();
|
||||
expect(headers).toEqual({ authorization: "Bearer permanent_token" });
|
||||
});
|
||||
|
||||
it("should throw error when access_token is missing", async () => {
|
||||
const fetcher = () =>
|
||||
({
|
||||
expiresIn: 3600,
|
||||
}) as { accessToken?: string; expiresIn?: number };
|
||||
|
||||
const provider = new OAuthHeaderProvider(
|
||||
fetcher as () => {
|
||||
accessToken: string;
|
||||
expiresIn?: number;
|
||||
},
|
||||
);
|
||||
|
||||
await expect(provider.refreshToken()).rejects.toThrow(
|
||||
"Token fetcher did not return 'accessToken'",
|
||||
);
|
||||
});
|
||||
|
||||
it("should handle async token fetchers", async () => {
|
||||
const fetcher = async () => {
|
||||
// Simulate async operation
|
||||
await new Promise((resolve) => setTimeout(resolve, 10));
|
||||
return {
|
||||
accessToken: "async_token",
|
||||
expiresIn: 3600,
|
||||
};
|
||||
};
|
||||
|
||||
const provider = new OAuthHeaderProvider(fetcher);
|
||||
await provider.refreshToken();
|
||||
|
||||
const headers = provider.getHeaders();
|
||||
expect(headers).toEqual({ authorization: "Bearer async_token" });
|
||||
});
|
||||
});
|
||||
|
||||
it("should merge header provider headers with extra headers", async () => {
|
||||
const provider = new StaticHeaderProvider({
|
||||
"X-From-Provider": "provider-value",
|
||||
});
|
||||
|
||||
await withMockDatabase(
|
||||
(req, res) => {
|
||||
expect(req.headers["x-from-provider"]).toEqual("provider-value");
|
||||
expect(req.headers["x-extra-header"]).toEqual("extra-value");
|
||||
|
||||
const body = JSON.stringify({ tables: [] });
|
||||
res.writeHead(200, { "Content-Type": "application/json" }).end(body);
|
||||
},
|
||||
async () => {
|
||||
// Use header provider with additional extraHeaders
|
||||
const conn = await connect(
|
||||
"db://dev",
|
||||
{
|
||||
apiKey: "fake",
|
||||
hostOverride: "http://localhost:8000",
|
||||
clientConfig: {
|
||||
extraHeaders: {
|
||||
"X-Extra-Header": "extra-value",
|
||||
},
|
||||
},
|
||||
},
|
||||
undefined, // session
|
||||
provider, // headerProvider
|
||||
);
|
||||
|
||||
const tableNames = await conn.tableNames();
|
||||
expect(tableNames).toEqual([]);
|
||||
},
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe("header provider integration", () => {
|
||||
it("should work with TypeScript StaticHeaderProvider", async () => {
|
||||
let requestCount = 0;
|
||||
|
||||
await withMockDatabase(
|
||||
(req, res) => {
|
||||
requestCount++;
|
||||
|
||||
// Check headers are present on each request
|
||||
expect(req.headers["authorization"]).toEqual("Bearer test-token-123");
|
||||
expect(req.headers["x-custom"]).toEqual("custom-value");
|
||||
|
||||
// Return different responses based on the endpoint
|
||||
if (req.url === "/v1/table/test_table/describe/") {
|
||||
const body = JSON.stringify({
|
||||
name: "test_table",
|
||||
schema: { fields: [] },
|
||||
});
|
||||
res
|
||||
.writeHead(200, { "Content-Type": "application/json" })
|
||||
.end(body);
|
||||
} else {
|
||||
const body = JSON.stringify({ tables: ["test_table"] });
|
||||
res
|
||||
.writeHead(200, { "Content-Type": "application/json" })
|
||||
.end(body);
|
||||
}
|
||||
},
|
||||
async () => {
|
||||
// Create provider with static headers
|
||||
const provider = new StaticHeaderProvider({
|
||||
authorization: "Bearer test-token-123",
|
||||
"X-Custom": "custom-value",
|
||||
});
|
||||
|
||||
// Connect with the provider
|
||||
const conn = await connect(
|
||||
"db://dev",
|
||||
{
|
||||
apiKey: "fake",
|
||||
hostOverride: "http://localhost:8000",
|
||||
},
|
||||
undefined, // session
|
||||
provider, // headerProvider
|
||||
);
|
||||
|
||||
// Make multiple requests to verify headers are sent each time
|
||||
const tables1 = await conn.tableNames();
|
||||
expect(tables1).toEqual(["test_table"]);
|
||||
|
||||
const tables2 = await conn.tableNames();
|
||||
expect(tables2).toEqual(["test_table"]);
|
||||
|
||||
// Verify headers were sent with each request
|
||||
expect(requestCount).toBeGreaterThanOrEqual(2);
|
||||
},
|
||||
);
|
||||
});
|
||||
|
||||
it("should work with JavaScript function provider", async () => {
|
||||
let requestId = 0;
|
||||
|
||||
await withMockDatabase(
|
||||
(req, res) => {
|
||||
// Check dynamic header is present
|
||||
expect(req.headers["x-request-id"]).toBeDefined();
|
||||
expect(req.headers["x-request-id"]).toMatch(/^req-\d+$/);
|
||||
|
||||
const body = JSON.stringify({ tables: [] });
|
||||
res.writeHead(200, { "Content-Type": "application/json" }).end(body);
|
||||
},
|
||||
async () => {
|
||||
// Create a JavaScript function that returns dynamic headers
|
||||
const getHeaders = async () => {
|
||||
requestId++;
|
||||
return {
|
||||
"X-Request-Id": `req-${requestId}`,
|
||||
"X-Timestamp": new Date().toISOString(),
|
||||
};
|
||||
};
|
||||
|
||||
// Connect with the function directly
|
||||
const conn = await connect(
|
||||
"db://dev",
|
||||
{
|
||||
apiKey: "fake",
|
||||
hostOverride: "http://localhost:8000",
|
||||
},
|
||||
undefined, // session
|
||||
getHeaders, // headerProvider
|
||||
);
|
||||
|
||||
// Make requests - each should have different headers
|
||||
const tables = await conn.tableNames();
|
||||
expect(tables).toEqual([]);
|
||||
},
|
||||
);
|
||||
});
|
||||
|
||||
it("should support OAuth-like token refresh pattern", async () => {
|
||||
let tokenVersion = 0;
|
||||
|
||||
await withMockDatabase(
|
||||
(req, res) => {
|
||||
// Verify authorization header
|
||||
const authHeader = req.headers["authorization"];
|
||||
expect(authHeader).toBeDefined();
|
||||
expect(authHeader).toMatch(/^Bearer token-v\d+$/);
|
||||
|
||||
const body = JSON.stringify({ tables: [] });
|
||||
res.writeHead(200, { "Content-Type": "application/json" }).end(body);
|
||||
},
|
||||
async () => {
|
||||
// Simulate OAuth token fetcher
|
||||
const fetchToken = async () => {
|
||||
tokenVersion++;
|
||||
return {
|
||||
authorization: `Bearer token-v${tokenVersion}`,
|
||||
};
|
||||
};
|
||||
|
||||
// Connect with the function directly
|
||||
const conn = await connect(
|
||||
"db://dev",
|
||||
{
|
||||
apiKey: "fake",
|
||||
hostOverride: "http://localhost:8000",
|
||||
},
|
||||
undefined, // session
|
||||
fetchToken, // headerProvider
|
||||
);
|
||||
|
||||
// Each request will fetch a new token
|
||||
await conn.tableNames();
|
||||
|
||||
// Token should be different on next request
|
||||
await conn.tableNames();
|
||||
},
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -512,11 +512,7 @@ function* rowPathsAndValues(
|
||||
if (isObject(value)) {
|
||||
yield* rowPathsAndValues(value, [...basePath, key]);
|
||||
} else {
|
||||
// Skip undefined values - they should be treated the same as missing fields
|
||||
// for embedding function purposes
|
||||
if (value !== undefined) {
|
||||
yield [[...basePath, key], value];
|
||||
}
|
||||
yield [[...basePath, key], value];
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -705,7 +701,7 @@ function transposeData(
|
||||
}
|
||||
return current;
|
||||
});
|
||||
return makeVector(values, field.type, undefined, field.nullable);
|
||||
return makeVector(values, field.type);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -752,30 +748,9 @@ function makeVector(
|
||||
values: unknown[],
|
||||
type?: DataType,
|
||||
stringAsDictionary?: boolean,
|
||||
nullable?: boolean,
|
||||
// biome-ignore lint/suspicious/noExplicitAny: skip
|
||||
): Vector<any> {
|
||||
if (type !== undefined) {
|
||||
// Convert undefined values to null for nullable fields
|
||||
if (nullable) {
|
||||
values = values.map((v) => (v === undefined ? null : v));
|
||||
}
|
||||
|
||||
// workaround for: https://github.com/apache/arrow-js/issues/68
|
||||
if (DataType.isBool(type)) {
|
||||
const hasNonNullValue = values.some((v) => v !== null && v !== undefined);
|
||||
if (!hasNonNullValue) {
|
||||
const nullBitmap = new Uint8Array(Math.ceil(values.length / 8));
|
||||
const data = makeData({
|
||||
type: type,
|
||||
length: values.length,
|
||||
nullCount: values.length,
|
||||
nullBitmap,
|
||||
});
|
||||
return arrowMakeVector(data);
|
||||
}
|
||||
}
|
||||
|
||||
// No need for inference, let Arrow create it
|
||||
if (type instanceof Int) {
|
||||
if (DataType.isInt(type) && type.bitWidth === 64) {
|
||||
@@ -900,12 +875,7 @@ async function applyEmbeddingsFromMetadata(
|
||||
for (const field of schema.fields) {
|
||||
if (!(field.name in columns)) {
|
||||
const nullValues = new Array(table.numRows).fill(null);
|
||||
columns[field.name] = makeVector(
|
||||
nullValues,
|
||||
field.type,
|
||||
undefined,
|
||||
field.nullable,
|
||||
);
|
||||
columns[field.name] = makeVector(nullValues, field.type);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -969,12 +939,7 @@ async function applyEmbeddings<T>(
|
||||
} else if (schema != null) {
|
||||
const destField = schema.fields.find((f) => f.name === destColumn);
|
||||
if (destField != null) {
|
||||
newColumns[destColumn] = makeVector(
|
||||
[],
|
||||
destField.type,
|
||||
undefined,
|
||||
destField.nullable,
|
||||
);
|
||||
newColumns[destColumn] = makeVector([], destField.type);
|
||||
} else {
|
||||
throw new Error(
|
||||
`Attempt to apply embeddings to an empty table failed because schema was missing embedding column '${destColumn}'`,
|
||||
|
||||
@@ -268,33 +268,6 @@ export abstract class Connection {
|
||||
* @param {string[]} namespace The namespace to drop tables from (defaults to root namespace).
|
||||
*/
|
||||
abstract dropAllTables(namespace?: string[]): Promise<void>;
|
||||
|
||||
/**
|
||||
* Clone a table from a source table.
|
||||
*
|
||||
* A shallow clone creates a new table that shares the underlying data files
|
||||
* with the source table but has its own independent manifest. This allows
|
||||
* both the source and cloned tables to evolve independently while initially
|
||||
* sharing the same data, deletion, and index files.
|
||||
*
|
||||
* @param {string} targetTableName - The name of the target table to create.
|
||||
* @param {string} sourceUri - The URI of the source table to clone from.
|
||||
* @param {object} options - Clone options.
|
||||
* @param {string[]} options.targetNamespace - The namespace for the target table (defaults to root namespace).
|
||||
* @param {number} options.sourceVersion - The version of the source table to clone.
|
||||
* @param {string} options.sourceTag - The tag of the source table to clone.
|
||||
* @param {boolean} options.isShallow - Whether to perform a shallow clone (defaults to true).
|
||||
*/
|
||||
abstract cloneTable(
|
||||
targetTableName: string,
|
||||
sourceUri: string,
|
||||
options?: {
|
||||
targetNamespace?: string[];
|
||||
sourceVersion?: number;
|
||||
sourceTag?: string;
|
||||
isShallow?: boolean;
|
||||
},
|
||||
): Promise<Table>;
|
||||
}
|
||||
|
||||
/** @hideconstructor */
|
||||
@@ -359,28 +332,6 @@ export class LocalConnection extends Connection {
|
||||
return new LocalTable(innerTable);
|
||||
}
|
||||
|
||||
async cloneTable(
|
||||
targetTableName: string,
|
||||
sourceUri: string,
|
||||
options?: {
|
||||
targetNamespace?: string[];
|
||||
sourceVersion?: number;
|
||||
sourceTag?: string;
|
||||
isShallow?: boolean;
|
||||
},
|
||||
): Promise<Table> {
|
||||
const innerTable = await this.inner.cloneTable(
|
||||
targetTableName,
|
||||
sourceUri,
|
||||
options?.targetNamespace ?? [],
|
||||
options?.sourceVersion ?? null,
|
||||
options?.sourceTag ?? null,
|
||||
options?.isShallow ?? true,
|
||||
);
|
||||
|
||||
return new LocalTable(innerTable);
|
||||
}
|
||||
|
||||
private getStorageOptions(
|
||||
options?: Partial<CreateTableOptions>,
|
||||
): Record<string, string> | undefined {
|
||||
|
||||
@@ -1,253 +0,0 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
/**
|
||||
* Header providers for LanceDB remote connections.
|
||||
*
|
||||
* This module provides a flexible header management framework for LanceDB remote
|
||||
* connections, allowing users to implement custom header strategies for
|
||||
* authentication, request tracking, custom metadata, or any other header-based
|
||||
* requirements.
|
||||
*
|
||||
* @module header
|
||||
*/
|
||||
|
||||
/**
|
||||
* Abstract base class for providing custom headers for each request.
|
||||
*
|
||||
* Users can implement this interface to provide dynamic headers for various purposes
|
||||
* such as authentication (OAuth tokens, API keys), request tracking (correlation IDs),
|
||||
* custom metadata, or any other header-based requirements. The provider is called
|
||||
* before each request to ensure fresh header values are always used.
|
||||
*
|
||||
* @example
|
||||
* Simple JWT token provider:
|
||||
* ```typescript
|
||||
* class JWTProvider extends HeaderProvider {
|
||||
* constructor(private token: string) {
|
||||
* super();
|
||||
* }
|
||||
*
|
||||
* getHeaders(): Record<string, string> {
|
||||
* return { authorization: `Bearer ${this.token}` };
|
||||
* }
|
||||
* }
|
||||
* ```
|
||||
*
|
||||
* @example
|
||||
* Provider with request tracking:
|
||||
* ```typescript
|
||||
* class RequestTrackingProvider extends HeaderProvider {
|
||||
* constructor(private sessionId: string) {
|
||||
* super();
|
||||
* }
|
||||
*
|
||||
* getHeaders(): Record<string, string> {
|
||||
* return {
|
||||
* "X-Session-Id": this.sessionId,
|
||||
* "X-Request-Id": `req-${Date.now()}`
|
||||
* };
|
||||
* }
|
||||
* }
|
||||
* ```
|
||||
*/
|
||||
export abstract class HeaderProvider {
|
||||
/**
|
||||
* Get the latest headers to be added to requests.
|
||||
*
|
||||
* This method is called before each request to the remote LanceDB server.
|
||||
* Implementations should return headers that will be merged with existing headers.
|
||||
*
|
||||
* @returns Dictionary of header names to values to add to the request.
|
||||
* @throws If unable to fetch headers, the exception will be propagated and the request will fail.
|
||||
*/
|
||||
abstract getHeaders(): Record<string, string>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Example implementation: A simple header provider that returns static headers.
|
||||
*
|
||||
* This is an example implementation showing how to create a HeaderProvider
|
||||
* for cases where headers don't change during the session.
|
||||
*
|
||||
* @example
|
||||
* ```typescript
|
||||
* const provider = new StaticHeaderProvider({
|
||||
* authorization: "Bearer my-token",
|
||||
* "X-Custom-Header": "custom-value"
|
||||
* });
|
||||
* const headers = provider.getHeaders();
|
||||
* // Returns: {authorization: 'Bearer my-token', 'X-Custom-Header': 'custom-value'}
|
||||
* ```
|
||||
*/
|
||||
export class StaticHeaderProvider extends HeaderProvider {
|
||||
private _headers: Record<string, string>;
|
||||
|
||||
/**
|
||||
* Initialize with static headers.
|
||||
* @param headers - Headers to return for every request.
|
||||
*/
|
||||
constructor(headers: Record<string, string>) {
|
||||
super();
|
||||
this._headers = { ...headers };
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the static headers.
|
||||
* @returns Copy of the static headers.
|
||||
*/
|
||||
getHeaders(): Record<string, string> {
|
||||
return { ...this._headers };
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Token response from OAuth provider.
|
||||
* @public
|
||||
*/
|
||||
export interface TokenResponse {
|
||||
accessToken: string;
|
||||
expiresIn?: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Example implementation: OAuth token provider with automatic refresh.
|
||||
*
|
||||
* This is an example implementation showing how to manage OAuth tokens
|
||||
* with automatic refresh when they expire.
|
||||
*
|
||||
* @example
|
||||
* ```typescript
|
||||
* async function fetchToken(): Promise<TokenResponse> {
|
||||
* const response = await fetch("https://oauth.example.com/token", {
|
||||
* method: "POST",
|
||||
* body: JSON.stringify({
|
||||
* grant_type: "client_credentials",
|
||||
* client_id: "your-client-id",
|
||||
* client_secret: "your-client-secret"
|
||||
* }),
|
||||
* headers: { "Content-Type": "application/json" }
|
||||
* });
|
||||
* const data = await response.json();
|
||||
* return {
|
||||
* accessToken: data.access_token,
|
||||
* expiresIn: data.expires_in
|
||||
* };
|
||||
* }
|
||||
*
|
||||
* const provider = new OAuthHeaderProvider(fetchToken);
|
||||
* const headers = provider.getHeaders();
|
||||
* // Returns: {"authorization": "Bearer <your-token>"}
|
||||
* ```
|
||||
*/
|
||||
export class OAuthHeaderProvider extends HeaderProvider {
|
||||
private _tokenFetcher: () => Promise<TokenResponse> | TokenResponse;
|
||||
private _refreshBufferSeconds: number;
|
||||
private _currentToken: string | null = null;
|
||||
private _tokenExpiresAt: number | null = null;
|
||||
private _refreshPromise: Promise<void> | null = null;
|
||||
|
||||
/**
|
||||
* Initialize the OAuth provider.
|
||||
* @param tokenFetcher - Function to fetch new tokens. Should return object with 'accessToken' and optionally 'expiresIn'.
|
||||
* @param refreshBufferSeconds - Seconds before expiry to refresh token. Default 300 (5 minutes).
|
||||
*/
|
||||
constructor(
|
||||
tokenFetcher: () => Promise<TokenResponse> | TokenResponse,
|
||||
refreshBufferSeconds: number = 300,
|
||||
) {
|
||||
super();
|
||||
this._tokenFetcher = tokenFetcher;
|
||||
this._refreshBufferSeconds = refreshBufferSeconds;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if token needs refresh.
|
||||
*/
|
||||
private _needsRefresh(): boolean {
|
||||
if (this._currentToken === null) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (this._tokenExpiresAt === null) {
|
||||
// No expiration info, assume token is valid
|
||||
return false;
|
||||
}
|
||||
|
||||
// Refresh if we're within the buffer time of expiration
|
||||
const now = Date.now() / 1000;
|
||||
return now >= this._tokenExpiresAt - this._refreshBufferSeconds;
|
||||
}
|
||||
|
||||
/**
|
||||
* Refresh the token if it's expired or close to expiring.
|
||||
*/
|
||||
private async _refreshTokenIfNeeded(): Promise<void> {
|
||||
if (!this._needsRefresh()) {
|
||||
return;
|
||||
}
|
||||
|
||||
// If refresh is already in progress, wait for it
|
||||
if (this._refreshPromise) {
|
||||
await this._refreshPromise;
|
||||
return;
|
||||
}
|
||||
|
||||
// Start refresh
|
||||
this._refreshPromise = (async () => {
|
||||
try {
|
||||
const tokenData = await this._tokenFetcher();
|
||||
|
||||
this._currentToken = tokenData.accessToken;
|
||||
if (!this._currentToken) {
|
||||
throw new Error("Token fetcher did not return 'accessToken'");
|
||||
}
|
||||
|
||||
// Set expiration if provided
|
||||
if (tokenData.expiresIn) {
|
||||
this._tokenExpiresAt = Date.now() / 1000 + tokenData.expiresIn;
|
||||
} else {
|
||||
// Token doesn't expire or expiration unknown
|
||||
this._tokenExpiresAt = null;
|
||||
}
|
||||
} finally {
|
||||
this._refreshPromise = null;
|
||||
}
|
||||
})();
|
||||
|
||||
await this._refreshPromise;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get OAuth headers, refreshing token if needed.
|
||||
* Note: This is synchronous for now as the Rust implementation expects sync.
|
||||
* In a real implementation, this would need to handle async properly.
|
||||
* @returns Headers with Bearer token authorization.
|
||||
* @throws If unable to fetch or refresh token.
|
||||
*/
|
||||
getHeaders(): Record<string, string> {
|
||||
// For simplicity in this example, we assume the token is already fetched
|
||||
// In a real implementation, this would need to handle the async nature properly
|
||||
if (!this._currentToken && !this._refreshPromise) {
|
||||
// Synchronously trigger refresh - this is a limitation of the current implementation
|
||||
throw new Error(
|
||||
"Token not initialized. Call refreshToken() first or use async initialization.",
|
||||
);
|
||||
}
|
||||
|
||||
if (!this._currentToken) {
|
||||
throw new Error("Failed to obtain OAuth token");
|
||||
}
|
||||
|
||||
return { authorization: `Bearer ${this._currentToken}` };
|
||||
}
|
||||
|
||||
/**
|
||||
* Manually refresh the token.
|
||||
* Call this before using getHeaders() to ensure token is available.
|
||||
*/
|
||||
async refreshToken(): Promise<void> {
|
||||
this._currentToken = null; // Force refresh
|
||||
await this._refreshTokenIfNeeded();
|
||||
}
|
||||
}
|
||||
@@ -10,15 +10,9 @@ import {
|
||||
import {
|
||||
ConnectionOptions,
|
||||
Connection as LanceDbConnection,
|
||||
JsHeaderProvider as NativeJsHeaderProvider,
|
||||
Session,
|
||||
} from "./native.js";
|
||||
|
||||
import { HeaderProvider } from "./header";
|
||||
|
||||
// Re-export native header provider for use with connectWithHeaderProvider
|
||||
export { JsHeaderProvider as NativeJsHeaderProvider } from "./native.js";
|
||||
|
||||
export {
|
||||
AddColumnsSql,
|
||||
ConnectionOptions,
|
||||
@@ -27,7 +21,6 @@ export {
|
||||
ClientConfig,
|
||||
TimeoutConfig,
|
||||
RetryConfig,
|
||||
TlsConfig,
|
||||
OptimizeStats,
|
||||
CompactionStats,
|
||||
RemovalStats,
|
||||
@@ -100,13 +93,6 @@ export {
|
||||
ColumnAlteration,
|
||||
} from "./table";
|
||||
|
||||
export {
|
||||
HeaderProvider,
|
||||
StaticHeaderProvider,
|
||||
OAuthHeaderProvider,
|
||||
TokenResponse,
|
||||
} from "./header";
|
||||
|
||||
export { MergeInsertBuilder, WriteExecutionOptions } from "./merge";
|
||||
|
||||
export * as embedding from "./embedding";
|
||||
@@ -145,27 +131,11 @@ export { IntoSql, packBits } from "./util";
|
||||
* {storageOptions: {timeout: "60s"}
|
||||
* });
|
||||
* ```
|
||||
* @example
|
||||
* Using with a header provider for per-request authentication:
|
||||
* ```ts
|
||||
* const provider = new StaticHeaderProvider({
|
||||
* "X-API-Key": "my-key"
|
||||
* });
|
||||
* const conn = await connectWithHeaderProvider(
|
||||
* "db://host:port",
|
||||
* options,
|
||||
* provider
|
||||
* );
|
||||
* ```
|
||||
*/
|
||||
export async function connect(
|
||||
uri: string,
|
||||
options?: Partial<ConnectionOptions>,
|
||||
session?: Session,
|
||||
headerProvider?:
|
||||
| HeaderProvider
|
||||
| (() => Record<string, string>)
|
||||
| (() => Promise<Record<string, string>>),
|
||||
): Promise<Connection>;
|
||||
/**
|
||||
* Connect to a LanceDB instance at the given URI.
|
||||
@@ -199,58 +169,18 @@ export async function connect(
|
||||
): Promise<Connection>;
|
||||
export async function connect(
|
||||
uriOrOptions: string | (Partial<ConnectionOptions> & { uri: string }),
|
||||
optionsOrSession?: Partial<ConnectionOptions> | Session,
|
||||
sessionOrHeaderProvider?:
|
||||
| Session
|
||||
| HeaderProvider
|
||||
| (() => Record<string, string>)
|
||||
| (() => Promise<Record<string, string>>),
|
||||
headerProvider?:
|
||||
| HeaderProvider
|
||||
| (() => Record<string, string>)
|
||||
| (() => Promise<Record<string, string>>),
|
||||
options?: Partial<ConnectionOptions>,
|
||||
): Promise<Connection> {
|
||||
let uri: string | undefined;
|
||||
let finalOptions: Partial<ConnectionOptions> = {};
|
||||
let finalHeaderProvider:
|
||||
| HeaderProvider
|
||||
| (() => Record<string, string>)
|
||||
| (() => Promise<Record<string, string>>)
|
||||
| undefined;
|
||||
|
||||
if (typeof uriOrOptions !== "string") {
|
||||
// First overload: connect(options)
|
||||
const { uri: uri_, ...opts } = uriOrOptions;
|
||||
uri = uri_;
|
||||
finalOptions = opts;
|
||||
} else {
|
||||
// Second overload: connect(uri, options?, session?, headerProvider?)
|
||||
uri = uriOrOptions;
|
||||
|
||||
// Handle optionsOrSession parameter
|
||||
if (optionsOrSession && "inner" in optionsOrSession) {
|
||||
// Second param is session, so no options provided
|
||||
finalOptions = {};
|
||||
} else {
|
||||
// Second param is options
|
||||
finalOptions = (optionsOrSession as Partial<ConnectionOptions>) || {};
|
||||
}
|
||||
|
||||
// Handle sessionOrHeaderProvider parameter
|
||||
if (
|
||||
sessionOrHeaderProvider &&
|
||||
(typeof sessionOrHeaderProvider === "function" ||
|
||||
"getHeaders" in sessionOrHeaderProvider)
|
||||
) {
|
||||
// Third param is header provider
|
||||
finalHeaderProvider = sessionOrHeaderProvider as
|
||||
| HeaderProvider
|
||||
| (() => Record<string, string>)
|
||||
| (() => Promise<Record<string, string>>);
|
||||
} else {
|
||||
// Third param is session, header provider is fourth param
|
||||
finalHeaderProvider = headerProvider;
|
||||
}
|
||||
finalOptions = options || {};
|
||||
}
|
||||
|
||||
if (!uri) {
|
||||
@@ -261,26 +191,6 @@ export async function connect(
|
||||
(<ConnectionOptions>finalOptions).storageOptions = cleanseStorageOptions(
|
||||
(<ConnectionOptions>finalOptions).storageOptions,
|
||||
);
|
||||
|
||||
// Create native header provider if one was provided
|
||||
let nativeProvider: NativeJsHeaderProvider | undefined;
|
||||
if (finalHeaderProvider) {
|
||||
if (typeof finalHeaderProvider === "function") {
|
||||
nativeProvider = new NativeJsHeaderProvider(finalHeaderProvider);
|
||||
} else if (
|
||||
finalHeaderProvider &&
|
||||
typeof finalHeaderProvider.getHeaders === "function"
|
||||
) {
|
||||
nativeProvider = new NativeJsHeaderProvider(async () =>
|
||||
finalHeaderProvider.getHeaders(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
const nativeConn = await LanceDbConnection.new(
|
||||
uri,
|
||||
finalOptions,
|
||||
nativeProvider,
|
||||
);
|
||||
const nativeConn = await LanceDbConnection.new(uri, finalOptions);
|
||||
return new LocalConnection(nativeConn);
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-darwin-arm64",
|
||||
"version": "0.22.1-beta.3",
|
||||
"version": "0.22.0",
|
||||
"os": ["darwin"],
|
||||
"cpu": ["arm64"],
|
||||
"main": "lancedb.darwin-arm64.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-darwin-x64",
|
||||
"version": "0.22.1-beta.3",
|
||||
"version": "0.22.0",
|
||||
"os": ["darwin"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.darwin-x64.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-arm64-gnu",
|
||||
"version": "0.22.1-beta.3",
|
||||
"version": "0.22.0",
|
||||
"os": ["linux"],
|
||||
"cpu": ["arm64"],
|
||||
"main": "lancedb.linux-arm64-gnu.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-arm64-musl",
|
||||
"version": "0.22.1-beta.3",
|
||||
"version": "0.22.0",
|
||||
"os": ["linux"],
|
||||
"cpu": ["arm64"],
|
||||
"main": "lancedb.linux-arm64-musl.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-x64-gnu",
|
||||
"version": "0.22.1-beta.3",
|
||||
"version": "0.22.0",
|
||||
"os": ["linux"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.linux-x64-gnu.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-x64-musl",
|
||||
"version": "0.22.1-beta.3",
|
||||
"version": "0.22.0",
|
||||
"os": ["linux"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.linux-x64-musl.node",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-win32-arm64-msvc",
|
||||
"version": "0.22.1-beta.3",
|
||||
"version": "0.22.0",
|
||||
"os": [
|
||||
"win32"
|
||||
],
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-win32-x64-msvc",
|
||||
"version": "0.22.1-beta.3",
|
||||
"version": "0.22.0",
|
||||
"os": ["win32"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.win32-x64-msvc.node",
|
||||
|
||||
228
nodejs/package-lock.json
generated
228
nodejs/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb",
|
||||
"version": "0.22.1-beta.3",
|
||||
"version": "0.22.0-beta.1",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@lancedb/lancedb",
|
||||
"version": "0.22.1-beta.3",
|
||||
"version": "0.22.0-beta.1",
|
||||
"cpu": [
|
||||
"x64",
|
||||
"arm64"
|
||||
@@ -5549,11 +5549,10 @@
|
||||
"dev": true
|
||||
},
|
||||
"node_modules/brace-expansion": {
|
||||
"version": "1.1.12",
|
||||
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz",
|
||||
"integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==",
|
||||
"version": "1.1.11",
|
||||
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz",
|
||||
"integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"balanced-match": "^1.0.0",
|
||||
"concat-map": "0.0.1"
|
||||
@@ -5630,20 +5629,6 @@
|
||||
"integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==",
|
||||
"dev": true
|
||||
},
|
||||
"node_modules/call-bind-apply-helpers": {
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz",
|
||||
"integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==",
|
||||
"devOptional": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"es-errors": "^1.3.0",
|
||||
"function-bind": "^1.1.2"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/camelcase": {
|
||||
"version": "5.3.1",
|
||||
"resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz",
|
||||
@@ -6047,21 +6032,6 @@
|
||||
"node": ">=6.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/dunder-proto": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz",
|
||||
"integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==",
|
||||
"devOptional": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"call-bind-apply-helpers": "^1.0.1",
|
||||
"es-errors": "^1.3.0",
|
||||
"gopd": "^1.2.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/eastasianwidth": {
|
||||
"version": "0.2.0",
|
||||
"resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz",
|
||||
@@ -6101,55 +6071,6 @@
|
||||
"is-arrayish": "^0.2.1"
|
||||
}
|
||||
},
|
||||
"node_modules/es-define-property": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz",
|
||||
"integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==",
|
||||
"devOptional": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/es-errors": {
|
||||
"version": "1.3.0",
|
||||
"resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz",
|
||||
"integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==",
|
||||
"devOptional": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/es-object-atoms": {
|
||||
"version": "1.1.1",
|
||||
"resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz",
|
||||
"integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==",
|
||||
"devOptional": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"es-errors": "^1.3.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/es-set-tostringtag": {
|
||||
"version": "2.1.0",
|
||||
"resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz",
|
||||
"integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==",
|
||||
"devOptional": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"es-errors": "^1.3.0",
|
||||
"get-intrinsic": "^1.2.6",
|
||||
"has-tostringtag": "^1.0.2",
|
||||
"hasown": "^2.0.2"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/escalade": {
|
||||
"version": "3.1.1",
|
||||
"resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz",
|
||||
@@ -6589,16 +6510,13 @@
|
||||
}
|
||||
},
|
||||
"node_modules/form-data": {
|
||||
"version": "4.0.4",
|
||||
"resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.4.tgz",
|
||||
"integrity": "sha512-KrGhL9Q4zjj0kiUt5OO4Mr/A/jlI2jDYs5eHBpYHPcBEVSiipAvn2Ko2HnPe20rmcuuvMHNdZFp+4IlGTMF0Ow==",
|
||||
"version": "4.0.0",
|
||||
"resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz",
|
||||
"integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==",
|
||||
"devOptional": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"asynckit": "^0.4.0",
|
||||
"combined-stream": "^1.0.8",
|
||||
"es-set-tostringtag": "^2.1.0",
|
||||
"hasown": "^2.0.2",
|
||||
"mime-types": "^2.1.12"
|
||||
},
|
||||
"engines": {
|
||||
@@ -6657,7 +6575,7 @@
|
||||
"version": "1.1.2",
|
||||
"resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz",
|
||||
"integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==",
|
||||
"devOptional": true,
|
||||
"dev": true,
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/ljharb"
|
||||
}
|
||||
@@ -6680,31 +6598,6 @@
|
||||
"node": "6.* || 8.* || >= 10.*"
|
||||
}
|
||||
},
|
||||
"node_modules/get-intrinsic": {
|
||||
"version": "1.3.0",
|
||||
"resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz",
|
||||
"integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==",
|
||||
"devOptional": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"call-bind-apply-helpers": "^1.0.2",
|
||||
"es-define-property": "^1.0.1",
|
||||
"es-errors": "^1.3.0",
|
||||
"es-object-atoms": "^1.1.1",
|
||||
"function-bind": "^1.1.2",
|
||||
"get-proto": "^1.0.1",
|
||||
"gopd": "^1.2.0",
|
||||
"has-symbols": "^1.1.0",
|
||||
"hasown": "^2.0.2",
|
||||
"math-intrinsics": "^1.1.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/ljharb"
|
||||
}
|
||||
},
|
||||
"node_modules/get-package-type": {
|
||||
"version": "0.1.0",
|
||||
"resolved": "https://registry.npmjs.org/get-package-type/-/get-package-type-0.1.0.tgz",
|
||||
@@ -6714,20 +6607,6 @@
|
||||
"node": ">=8.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/get-proto": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz",
|
||||
"integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==",
|
||||
"devOptional": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"dunder-proto": "^1.0.1",
|
||||
"es-object-atoms": "^1.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/get-stream": {
|
||||
"version": "6.0.1",
|
||||
"resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz",
|
||||
@@ -6819,19 +6698,6 @@
|
||||
"url": "https://github.com/sponsors/sindresorhus"
|
||||
}
|
||||
},
|
||||
"node_modules/gopd": {
|
||||
"version": "1.2.0",
|
||||
"resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz",
|
||||
"integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==",
|
||||
"devOptional": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/ljharb"
|
||||
}
|
||||
},
|
||||
"node_modules/graceful-fs": {
|
||||
"version": "4.2.11",
|
||||
"resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz",
|
||||
@@ -6858,41 +6724,11 @@
|
||||
"node": ">=8"
|
||||
}
|
||||
},
|
||||
"node_modules/has-symbols": {
|
||||
"version": "1.1.0",
|
||||
"resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz",
|
||||
"integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==",
|
||||
"devOptional": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/ljharb"
|
||||
}
|
||||
},
|
||||
"node_modules/has-tostringtag": {
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz",
|
||||
"integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==",
|
||||
"devOptional": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"has-symbols": "^1.0.3"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/ljharb"
|
||||
}
|
||||
},
|
||||
"node_modules/hasown": {
|
||||
"version": "2.0.2",
|
||||
"resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz",
|
||||
"integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==",
|
||||
"devOptional": true,
|
||||
"license": "MIT",
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.0.tgz",
|
||||
"integrity": "sha512-vUptKVTpIJhcczKBbgnS+RtcuYMB8+oNzPK2/Hp3hanz8JmpATdmmgLgSaadVREkDm+e2giHwY3ZRkyjSIDDFA==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"function-bind": "^1.1.2"
|
||||
},
|
||||
@@ -8107,16 +7943,6 @@
|
||||
"integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==",
|
||||
"dev": true
|
||||
},
|
||||
"node_modules/math-intrinsics": {
|
||||
"version": "1.1.0",
|
||||
"resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz",
|
||||
"integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==",
|
||||
"devOptional": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/md5": {
|
||||
"version": "2.3.0",
|
||||
"resolved": "https://registry.npmjs.org/md5/-/md5-2.3.0.tgz",
|
||||
@@ -8227,10 +8053,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/minizlib/node_modules/brace-expansion": {
|
||||
"version": "2.0.2",
|
||||
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz",
|
||||
"integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==",
|
||||
"license": "MIT",
|
||||
"version": "2.0.1",
|
||||
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz",
|
||||
"integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==",
|
||||
"optional": true,
|
||||
"dependencies": {
|
||||
"balanced-match": "^1.0.0"
|
||||
@@ -9376,11 +9201,10 @@
|
||||
"dev": true
|
||||
},
|
||||
"node_modules/tmp": {
|
||||
"version": "0.2.5",
|
||||
"resolved": "https://registry.npmjs.org/tmp/-/tmp-0.2.5.tgz",
|
||||
"integrity": "sha512-voyz6MApa1rQGUxT3E+BK7/ROe8itEx7vD8/HEvt4xwXucvQ5G5oeEiHkmHZJuBO21RpOf+YYm9MOivj709jow==",
|
||||
"version": "0.2.3",
|
||||
"resolved": "https://registry.npmjs.org/tmp/-/tmp-0.2.3.tgz",
|
||||
"integrity": "sha512-nZD7m9iCPC5g0pYmcaxogYKggSfLsdxl8of3Q/oIbqCqLLIO9IAF0GWjX1z9NZRHPiXv8Wex4yDCaZsgEw0Y8w==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=14.14"
|
||||
}
|
||||
@@ -9525,11 +9349,10 @@
|
||||
}
|
||||
},
|
||||
"node_modules/typedoc/node_modules/brace-expansion": {
|
||||
"version": "2.0.2",
|
||||
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz",
|
||||
"integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==",
|
||||
"version": "2.0.1",
|
||||
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz",
|
||||
"integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"balanced-match": "^1.0.0"
|
||||
}
|
||||
@@ -9779,11 +9602,10 @@
|
||||
}
|
||||
},
|
||||
"node_modules/typescript-eslint/node_modules/brace-expansion": {
|
||||
"version": "2.0.2",
|
||||
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz",
|
||||
"integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==",
|
||||
"version": "2.0.1",
|
||||
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz",
|
||||
"integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"balanced-match": "^1.0.0"
|
||||
}
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
"ann"
|
||||
],
|
||||
"private": false,
|
||||
"version": "0.22.1-beta.3",
|
||||
"version": "0.22.0",
|
||||
"main": "dist/index.js",
|
||||
"exports": {
|
||||
".": "./dist/index.js",
|
||||
|
||||
@@ -2,14 +2,12 @@
|
||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use lancedb::database::CreateTableMode;
|
||||
use napi::bindgen_prelude::*;
|
||||
use napi_derive::*;
|
||||
|
||||
use crate::error::NapiErrorExt;
|
||||
use crate::header::JsHeaderProvider;
|
||||
use crate::table::Table;
|
||||
use crate::ConnectionOptions;
|
||||
use lancedb::connection::{ConnectBuilder, Connection as LanceDBConnection};
|
||||
@@ -47,11 +45,7 @@ impl Connection {
|
||||
impl Connection {
|
||||
/// Create a new Connection instance from the given URI.
|
||||
#[napi(factory)]
|
||||
pub async fn new(
|
||||
uri: String,
|
||||
options: ConnectionOptions,
|
||||
header_provider: Option<&JsHeaderProvider>,
|
||||
) -> napi::Result<Self> {
|
||||
pub async fn new(uri: String, options: ConnectionOptions) -> napi::Result<Self> {
|
||||
let mut builder = ConnectBuilder::new(&uri);
|
||||
if let Some(interval) = options.read_consistency_interval {
|
||||
builder =
|
||||
@@ -63,16 +57,8 @@ impl Connection {
|
||||
}
|
||||
}
|
||||
|
||||
// Create client config, optionally with header provider
|
||||
let client_config = options.client_config.unwrap_or_default();
|
||||
let mut rust_config: lancedb::remote::ClientConfig = client_config.into();
|
||||
|
||||
if let Some(provider) = header_provider {
|
||||
rust_config.header_provider =
|
||||
Some(Arc::new(provider.clone()) as Arc<dyn lancedb::remote::HeaderProvider>);
|
||||
}
|
||||
|
||||
builder = builder.client_config(rust_config);
|
||||
builder = builder.client_config(client_config.into());
|
||||
|
||||
if let Some(api_key) = options.api_key {
|
||||
builder = builder.api_key(&api_key);
|
||||
@@ -213,36 +199,6 @@ impl Connection {
|
||||
Ok(Table::new(tbl))
|
||||
}
|
||||
|
||||
#[napi(catch_unwind)]
|
||||
pub async fn clone_table(
|
||||
&self,
|
||||
target_table_name: String,
|
||||
source_uri: String,
|
||||
target_namespace: Vec<String>,
|
||||
source_version: Option<i64>,
|
||||
source_tag: Option<String>,
|
||||
is_shallow: bool,
|
||||
) -> napi::Result<Table> {
|
||||
let mut builder = self
|
||||
.get_inner()?
|
||||
.clone_table(&target_table_name, &source_uri);
|
||||
|
||||
builder = builder.target_namespace(target_namespace);
|
||||
|
||||
if let Some(version) = source_version {
|
||||
builder = builder.source_version(version as u64);
|
||||
}
|
||||
|
||||
if let Some(tag) = source_tag {
|
||||
builder = builder.source_tag(tag);
|
||||
}
|
||||
|
||||
builder = builder.is_shallow(is_shallow);
|
||||
|
||||
let tbl = builder.execute().await.default_error()?;
|
||||
Ok(Table::new(tbl))
|
||||
}
|
||||
|
||||
/// Drop table with the name. Or raise an error if the table does not exist.
|
||||
#[napi(catch_unwind)]
|
||||
pub async fn drop_table(&self, name: String, namespace: Vec<String>) -> napi::Result<()> {
|
||||
|
||||
@@ -1,71 +0,0 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
use napi::{
|
||||
bindgen_prelude::*,
|
||||
threadsafe_function::{ErrorStrategy, ThreadsafeFunction},
|
||||
};
|
||||
use napi_derive::napi;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
/// JavaScript HeaderProvider implementation that wraps a JavaScript callback.
|
||||
/// This is the only native header provider - all header provider implementations
|
||||
/// should provide a JavaScript function that returns headers.
|
||||
#[napi]
|
||||
pub struct JsHeaderProvider {
|
||||
get_headers_fn: Arc<ThreadsafeFunction<(), ErrorStrategy::CalleeHandled>>,
|
||||
}
|
||||
|
||||
impl Clone for JsHeaderProvider {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
get_headers_fn: self.get_headers_fn.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[napi]
|
||||
impl JsHeaderProvider {
|
||||
/// Create a new JsHeaderProvider from a JavaScript callback
|
||||
#[napi(constructor)]
|
||||
pub fn new(get_headers_callback: JsFunction) -> Result<Self> {
|
||||
let get_headers_fn = get_headers_callback
|
||||
.create_threadsafe_function(0, |ctx| Ok(vec![ctx.value]))
|
||||
.map_err(|e| {
|
||||
Error::new(
|
||||
Status::GenericFailure,
|
||||
format!("Failed to create threadsafe function: {}", e),
|
||||
)
|
||||
})?;
|
||||
|
||||
Ok(Self {
|
||||
get_headers_fn: Arc::new(get_headers_fn),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "remote")]
|
||||
#[async_trait::async_trait]
|
||||
impl lancedb::remote::HeaderProvider for JsHeaderProvider {
|
||||
async fn get_headers(&self) -> lancedb::error::Result<HashMap<String, String>> {
|
||||
// Call the JavaScript function asynchronously
|
||||
let promise: Promise<HashMap<String, String>> =
|
||||
self.get_headers_fn.call_async(Ok(())).await.map_err(|e| {
|
||||
lancedb::error::Error::Runtime {
|
||||
message: format!("Failed to call JavaScript get_headers: {}", e),
|
||||
}
|
||||
})?;
|
||||
|
||||
// Await the promise result
|
||||
promise.await.map_err(|e| lancedb::error::Error::Runtime {
|
||||
message: format!("JavaScript get_headers failed: {}", e),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for JsHeaderProvider {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "JsHeaderProvider")
|
||||
}
|
||||
}
|
||||
@@ -8,7 +8,6 @@ use napi_derive::*;
|
||||
|
||||
mod connection;
|
||||
mod error;
|
||||
mod header;
|
||||
mod index;
|
||||
mod iterator;
|
||||
pub mod merge;
|
||||
|
||||
@@ -69,20 +69,6 @@ pub struct RetryConfig {
|
||||
pub statuses: Option<Vec<u16>>,
|
||||
}
|
||||
|
||||
/// TLS/mTLS configuration for the remote HTTP client.
|
||||
#[napi(object)]
|
||||
#[derive(Debug, Default)]
|
||||
pub struct TlsConfig {
|
||||
/// Path to the client certificate file (PEM format) for mTLS authentication.
|
||||
pub cert_file: Option<String>,
|
||||
/// Path to the client private key file (PEM format) for mTLS authentication.
|
||||
pub key_file: Option<String>,
|
||||
/// Path to the CA certificate file (PEM format) for server verification.
|
||||
pub ssl_ca_cert: Option<String>,
|
||||
/// Whether to verify the hostname in the server's certificate.
|
||||
pub assert_hostname: Option<bool>,
|
||||
}
|
||||
|
||||
#[napi(object)]
|
||||
#[derive(Debug, Default)]
|
||||
pub struct ClientConfig {
|
||||
@@ -91,7 +77,6 @@ pub struct ClientConfig {
|
||||
pub timeout_config: Option<TimeoutConfig>,
|
||||
pub extra_headers: Option<HashMap<String, String>>,
|
||||
pub id_delimiter: Option<String>,
|
||||
pub tls_config: Option<TlsConfig>,
|
||||
}
|
||||
|
||||
impl From<TimeoutConfig> for lancedb::remote::TimeoutConfig {
|
||||
@@ -122,17 +107,6 @@ impl From<RetryConfig> for lancedb::remote::RetryConfig {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<TlsConfig> for lancedb::remote::TlsConfig {
|
||||
fn from(config: TlsConfig) -> Self {
|
||||
Self {
|
||||
cert_file: config.cert_file,
|
||||
key_file: config.key_file,
|
||||
ssl_ca_cert: config.ssl_ca_cert,
|
||||
assert_hostname: config.assert_hostname.unwrap_or(true),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ClientConfig> for lancedb::remote::ClientConfig {
|
||||
fn from(config: ClientConfig) -> Self {
|
||||
Self {
|
||||
@@ -143,8 +117,6 @@ impl From<ClientConfig> for lancedb::remote::ClientConfig {
|
||||
timeout_config: config.timeout_config.map(Into::into).unwrap_or_default(),
|
||||
extra_headers: config.extra_headers.unwrap_or_default(),
|
||||
id_delimiter: config.id_delimiter,
|
||||
tls_config: config.tls_config.map(Into::into),
|
||||
header_provider: None, // the header provider is set separately later
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
[tool.bumpversion]
|
||||
current_version = "0.25.1"
|
||||
current_version = "0.25.0"
|
||||
parse = """(?x)
|
||||
(?P<major>0|[1-9]\\d*)\\.
|
||||
(?P<minor>0|[1-9]\\d*)\\.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "lancedb-python"
|
||||
version = "0.25.1"
|
||||
version = "0.25.0"
|
||||
edition.workspace = true
|
||||
description = "Python bindings for LanceDB"
|
||||
license.workspace = true
|
||||
@@ -15,7 +15,6 @@ crate-type = ["cdylib"]
|
||||
|
||||
[dependencies]
|
||||
arrow = { version = "55.1", features = ["pyarrow"] }
|
||||
async-trait = "0.1"
|
||||
lancedb = { path = "../rust/lancedb", default-features = false }
|
||||
env_logger.workspace = true
|
||||
pyo3 = { version = "0.24", features = ["extension-module", "abi3-py39"] }
|
||||
|
||||
@@ -60,15 +60,6 @@ class Connection(object):
|
||||
storage_options: Optional[Dict[str, str]] = None,
|
||||
index_cache_size: Optional[int] = None,
|
||||
) -> Table: ...
|
||||
async def clone_table(
|
||||
self,
|
||||
target_table_name: str,
|
||||
source_uri: str,
|
||||
target_namespace: List[str] = [],
|
||||
source_version: Optional[int] = None,
|
||||
source_tag: Optional[str] = None,
|
||||
is_shallow: bool = True,
|
||||
) -> Table: ...
|
||||
async def rename_table(
|
||||
self,
|
||||
cur_name: str,
|
||||
|
||||
@@ -665,60 +665,6 @@ class LanceDBConnection(DBConnection):
|
||||
index_cache_size=index_cache_size,
|
||||
)
|
||||
|
||||
def clone_table(
|
||||
self,
|
||||
target_table_name: str,
|
||||
source_uri: str,
|
||||
*,
|
||||
target_namespace: List[str] = [],
|
||||
source_version: Optional[int] = None,
|
||||
source_tag: Optional[str] = None,
|
||||
is_shallow: bool = True,
|
||||
) -> LanceTable:
|
||||
"""Clone a table from a source table.
|
||||
|
||||
A shallow clone creates a new table that shares the underlying data files
|
||||
with the source table but has its own independent manifest. This allows
|
||||
both the source and cloned tables to evolve independently while initially
|
||||
sharing the same data, deletion, and index files.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
target_table_name: str
|
||||
The name of the target table to create.
|
||||
source_uri: str
|
||||
The URI of the source table to clone from.
|
||||
target_namespace: List[str], optional
|
||||
The namespace for the target table.
|
||||
None or empty list represents root namespace.
|
||||
source_version: int, optional
|
||||
The version of the source table to clone.
|
||||
source_tag: str, optional
|
||||
The tag of the source table to clone.
|
||||
is_shallow: bool, default True
|
||||
Whether to perform a shallow clone (True) or deep clone (False).
|
||||
Currently only shallow clone is supported.
|
||||
|
||||
Returns
|
||||
-------
|
||||
A LanceTable object representing the cloned table.
|
||||
"""
|
||||
LOOP.run(
|
||||
self._conn.clone_table(
|
||||
target_table_name,
|
||||
source_uri,
|
||||
target_namespace=target_namespace,
|
||||
source_version=source_version,
|
||||
source_tag=source_tag,
|
||||
is_shallow=is_shallow,
|
||||
)
|
||||
)
|
||||
return LanceTable.open(
|
||||
self,
|
||||
target_table_name,
|
||||
namespace=target_namespace,
|
||||
)
|
||||
|
||||
@override
|
||||
def drop_table(
|
||||
self,
|
||||
@@ -1190,54 +1136,6 @@ class AsyncConnection(object):
|
||||
)
|
||||
return AsyncTable(table)
|
||||
|
||||
async def clone_table(
|
||||
self,
|
||||
target_table_name: str,
|
||||
source_uri: str,
|
||||
*,
|
||||
target_namespace: List[str] = [],
|
||||
source_version: Optional[int] = None,
|
||||
source_tag: Optional[str] = None,
|
||||
is_shallow: bool = True,
|
||||
) -> AsyncTable:
|
||||
"""Clone a table from a source table.
|
||||
|
||||
A shallow clone creates a new table that shares the underlying data files
|
||||
with the source table but has its own independent manifest. This allows
|
||||
both the source and cloned tables to evolve independently while initially
|
||||
sharing the same data, deletion, and index files.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
target_table_name: str
|
||||
The name of the target table to create.
|
||||
source_uri: str
|
||||
The URI of the source table to clone from.
|
||||
target_namespace: List[str], optional
|
||||
The namespace for the target table.
|
||||
None or empty list represents root namespace.
|
||||
source_version: int, optional
|
||||
The version of the source table to clone.
|
||||
source_tag: str, optional
|
||||
The tag of the source table to clone.
|
||||
is_shallow: bool, default True
|
||||
Whether to perform a shallow clone (True) or deep clone (False).
|
||||
Currently only shallow clone is supported.
|
||||
|
||||
Returns
|
||||
-------
|
||||
An AsyncTable object representing the cloned table.
|
||||
"""
|
||||
table = await self._inner.clone_table(
|
||||
target_table_name,
|
||||
source_uri,
|
||||
target_namespace=target_namespace,
|
||||
source_version=source_version,
|
||||
source_tag=source_tag,
|
||||
is_shallow=is_shallow,
|
||||
)
|
||||
return AsyncTable(table)
|
||||
|
||||
async def rename_table(
|
||||
self,
|
||||
cur_name: str,
|
||||
|
||||
@@ -122,7 +122,7 @@ class EmbeddingFunctionRegistry:
|
||||
obj["vector_column"]: EmbeddingFunctionConfig(
|
||||
vector_column=obj["vector_column"],
|
||||
source_column=obj["source_column"],
|
||||
function=self.get(obj["name"]).create(**obj["model"]),
|
||||
function=self.get(obj["name"])(**obj["model"]),
|
||||
)
|
||||
for obj in raw_list
|
||||
}
|
||||
|
||||
@@ -251,13 +251,6 @@ class HnswPq:
|
||||
results. In most cases, there is no benefit to setting this higher than 500.
|
||||
This value should be set to a value that is not less than `ef` in the
|
||||
search phase.
|
||||
|
||||
target_partition_size, default is 1,048,576
|
||||
|
||||
The target size of each partition.
|
||||
|
||||
This value controls the tradeoff between search performance and accuracy.
|
||||
faster search but less accurate results as higher value.
|
||||
"""
|
||||
|
||||
distance_type: Literal["l2", "cosine", "dot"] = "l2"
|
||||
@@ -268,7 +261,6 @@ class HnswPq:
|
||||
sample_rate: int = 256
|
||||
m: int = 20
|
||||
ef_construction: int = 300
|
||||
target_partition_size: Optional[int] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -359,12 +351,6 @@ class HnswSq:
|
||||
This value should be set to a value that is not less than `ef` in the search
|
||||
phase.
|
||||
|
||||
target_partition_size, default is 1,048,576
|
||||
|
||||
The target size of each partition.
|
||||
|
||||
This value controls the tradeoff between search performance and accuracy.
|
||||
faster search but less accurate results as higher value.
|
||||
"""
|
||||
|
||||
distance_type: Literal["l2", "cosine", "dot"] = "l2"
|
||||
@@ -373,7 +359,6 @@ class HnswSq:
|
||||
sample_rate: int = 256
|
||||
m: int = 20
|
||||
ef_construction: int = 300
|
||||
target_partition_size: Optional[int] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -459,20 +444,12 @@ class IvfFlat:
|
||||
cases the default should be sufficient.
|
||||
|
||||
The default value is 256.
|
||||
|
||||
target_partition_size, default is 8192
|
||||
|
||||
The target size of each partition.
|
||||
|
||||
This value controls the tradeoff between search performance and accuracy.
|
||||
faster search but less accurate results as higher value.
|
||||
"""
|
||||
|
||||
distance_type: Literal["l2", "cosine", "dot", "hamming"] = "l2"
|
||||
num_partitions: Optional[int] = None
|
||||
max_iterations: int = 50
|
||||
sample_rate: int = 256
|
||||
target_partition_size: Optional[int] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -587,13 +564,6 @@ class IvfPq:
|
||||
cases the default should be sufficient.
|
||||
|
||||
The default value is 256.
|
||||
|
||||
target_partition_size, default is 8192
|
||||
|
||||
The target size of each partition.
|
||||
|
||||
This value controls the tradeoff between search performance and accuracy.
|
||||
faster search but less accurate results as higher value.
|
||||
"""
|
||||
|
||||
distance_type: Literal["l2", "cosine", "dot"] = "l2"
|
||||
@@ -602,7 +572,6 @@ class IvfPq:
|
||||
num_bits: int = 8
|
||||
max_iterations: int = 50
|
||||
sample_rate: int = 256
|
||||
target_partition_size: Optional[int] = None
|
||||
|
||||
|
||||
__all__ = [
|
||||
|
||||
@@ -8,15 +8,7 @@ from typing import List, Optional
|
||||
|
||||
from lancedb import __version__
|
||||
|
||||
from .header import HeaderProvider
|
||||
|
||||
__all__ = [
|
||||
"TimeoutConfig",
|
||||
"RetryConfig",
|
||||
"TlsConfig",
|
||||
"ClientConfig",
|
||||
"HeaderProvider",
|
||||
]
|
||||
__all__ = ["TimeoutConfig", "RetryConfig", "ClientConfig"]
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -120,29 +112,6 @@ class RetryConfig:
|
||||
statuses: Optional[List[int]] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class TlsConfig:
|
||||
"""TLS/mTLS configuration for the remote HTTP client.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
cert_file: Optional[str]
|
||||
Path to the client certificate file (PEM format) for mTLS authentication.
|
||||
key_file: Optional[str]
|
||||
Path to the client private key file (PEM format) for mTLS authentication.
|
||||
ssl_ca_cert: Optional[str]
|
||||
Path to the CA certificate file (PEM format) for server verification.
|
||||
assert_hostname: bool
|
||||
Whether to verify the hostname in the server's certificate. Default is True.
|
||||
Set to False to disable hostname verification (use with caution).
|
||||
"""
|
||||
|
||||
cert_file: Optional[str] = None
|
||||
key_file: Optional[str] = None
|
||||
ssl_ca_cert: Optional[str] = None
|
||||
assert_hostname: bool = True
|
||||
|
||||
|
||||
@dataclass
|
||||
class ClientConfig:
|
||||
user_agent: str = f"LanceDB-Python-Client/{__version__}"
|
||||
@@ -150,13 +119,9 @@ class ClientConfig:
|
||||
timeout_config: Optional[TimeoutConfig] = field(default_factory=TimeoutConfig)
|
||||
extra_headers: Optional[dict] = None
|
||||
id_delimiter: Optional[str] = None
|
||||
tls_config: Optional[TlsConfig] = None
|
||||
header_provider: Optional["HeaderProvider"] = None
|
||||
|
||||
def __post_init__(self):
|
||||
if isinstance(self.retry_config, dict):
|
||||
self.retry_config = RetryConfig(**self.retry_config)
|
||||
if isinstance(self.timeout_config, dict):
|
||||
self.timeout_config = TimeoutConfig(**self.timeout_config)
|
||||
if isinstance(self.tls_config, dict):
|
||||
self.tls_config = TlsConfig(**self.tls_config)
|
||||
|
||||
@@ -212,53 +212,6 @@ class RemoteDBConnection(DBConnection):
|
||||
table = LOOP.run(self._conn.open_table(name, namespace=namespace))
|
||||
return RemoteTable(table, self.db_name)
|
||||
|
||||
def clone_table(
|
||||
self,
|
||||
target_table_name: str,
|
||||
source_uri: str,
|
||||
*,
|
||||
target_namespace: List[str] = [],
|
||||
source_version: Optional[int] = None,
|
||||
source_tag: Optional[str] = None,
|
||||
is_shallow: bool = True,
|
||||
) -> Table:
|
||||
"""Clone a table from a source table.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
target_table_name: str
|
||||
The name of the target table to create.
|
||||
source_uri: str
|
||||
The URI of the source table to clone from.
|
||||
target_namespace: List[str], optional
|
||||
The namespace for the target table.
|
||||
None or empty list represents root namespace.
|
||||
source_version: int, optional
|
||||
The version of the source table to clone.
|
||||
source_tag: str, optional
|
||||
The tag of the source table to clone.
|
||||
is_shallow: bool, default True
|
||||
Whether to perform a shallow clone (True) or deep clone (False).
|
||||
Currently only shallow clone is supported.
|
||||
|
||||
Returns
|
||||
-------
|
||||
A RemoteTable object representing the cloned table.
|
||||
"""
|
||||
from .table import RemoteTable
|
||||
|
||||
table = LOOP.run(
|
||||
self._conn.clone_table(
|
||||
target_table_name,
|
||||
source_uri,
|
||||
target_namespace=target_namespace,
|
||||
source_version=source_version,
|
||||
source_tag=source_tag,
|
||||
is_shallow=is_shallow,
|
||||
)
|
||||
)
|
||||
return RemoteTable(table, self.db_name)
|
||||
|
||||
@override
|
||||
def create_table(
|
||||
self,
|
||||
|
||||
@@ -1,180 +0,0 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
"""Header providers for LanceDB remote connections.
|
||||
|
||||
This module provides a flexible header management framework for LanceDB remote
|
||||
connections, allowing users to implement custom header strategies for
|
||||
authentication, request tracking, custom metadata, or any other header-based
|
||||
requirements.
|
||||
|
||||
The module includes the HeaderProvider abstract base class and example implementations
|
||||
(StaticHeaderProvider and OAuthProvider) that demonstrate common patterns.
|
||||
|
||||
The HeaderProvider interface is designed to be called before each request to the remote
|
||||
server, enabling dynamic header scenarios where values may need to be
|
||||
refreshed, rotated, or computed on-demand.
|
||||
"""
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Dict, Optional, Callable, Any
|
||||
import time
|
||||
import threading
|
||||
|
||||
|
||||
class HeaderProvider(ABC):
|
||||
"""Abstract base class for providing custom headers for each request.
|
||||
|
||||
Users can implement this interface to provide dynamic headers for various purposes
|
||||
such as authentication (OAuth tokens, API keys), request tracking (correlation IDs),
|
||||
custom metadata, or any other header-based requirements. The provider is called
|
||||
before each request to ensure fresh header values are always used.
|
||||
|
||||
Error Handling
|
||||
--------------
|
||||
If get_headers() raises an exception, the request will fail. Implementations
|
||||
should handle recoverable errors internally (e.g., retry token refresh) and
|
||||
only raise exceptions for unrecoverable errors.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def get_headers(self) -> Dict[str, str]:
|
||||
"""Get the latest headers to be added to requests.
|
||||
|
||||
This method is called before each request to the remote LanceDB server.
|
||||
Implementations should return headers that will be merged with existing headers.
|
||||
|
||||
Returns
|
||||
-------
|
||||
Dict[str, str]
|
||||
Dictionary of header names to values to add to the request.
|
||||
|
||||
Raises
|
||||
------
|
||||
Exception
|
||||
If unable to fetch headers, the exception will be propagated
|
||||
and the request will fail.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class StaticHeaderProvider(HeaderProvider):
|
||||
"""Example implementation: A simple header provider that returns static headers.
|
||||
|
||||
This is an example implementation showing how to create a HeaderProvider
|
||||
for cases where headers don't change during the session. Users can use this
|
||||
as a reference for implementing their own providers.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
headers : Dict[str, str]
|
||||
Static headers to return for every request.
|
||||
"""
|
||||
|
||||
def __init__(self, headers: Dict[str, str]):
|
||||
"""Initialize with static headers.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
headers : Dict[str, str]
|
||||
Headers to return for every request.
|
||||
"""
|
||||
self._headers = headers.copy()
|
||||
|
||||
def get_headers(self) -> Dict[str, str]:
|
||||
"""Return the static headers.
|
||||
|
||||
Returns
|
||||
-------
|
||||
Dict[str, str]
|
||||
Copy of the static headers.
|
||||
"""
|
||||
return self._headers.copy()
|
||||
|
||||
|
||||
class OAuthProvider(HeaderProvider):
|
||||
"""Example implementation: OAuth token provider with automatic refresh.
|
||||
|
||||
This is an example implementation showing how to manage OAuth tokens
|
||||
with automatic refresh when they expire. Users can use this as a reference
|
||||
for implementing their own OAuth or token-based authentication providers.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
token_fetcher : Callable[[], Dict[str, Any]]
|
||||
Function that fetches a new token. Should return a dict with
|
||||
'access_token' and optionally 'expires_in' (seconds until expiration).
|
||||
refresh_buffer_seconds : int, optional
|
||||
Number of seconds before expiration to trigger refresh. Default is 300
|
||||
(5 minutes).
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, token_fetcher: Callable[[], Any], refresh_buffer_seconds: int = 300
|
||||
):
|
||||
"""Initialize the OAuth provider.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
token_fetcher : Callable[[], Any]
|
||||
Function to fetch new tokens. Should return dict with
|
||||
'access_token' and optionally 'expires_in'.
|
||||
refresh_buffer_seconds : int, optional
|
||||
Seconds before expiry to refresh token. Default 300.
|
||||
"""
|
||||
self._token_fetcher = token_fetcher
|
||||
self._refresh_buffer = refresh_buffer_seconds
|
||||
self._current_token: Optional[str] = None
|
||||
self._token_expires_at: Optional[float] = None
|
||||
self._refresh_lock = threading.Lock()
|
||||
|
||||
def _refresh_token_if_needed(self) -> None:
|
||||
"""Refresh the token if it's expired or close to expiring."""
|
||||
with self._refresh_lock:
|
||||
# Check again inside the lock in case another thread refreshed
|
||||
if self._needs_refresh():
|
||||
token_data = self._token_fetcher()
|
||||
|
||||
self._current_token = token_data.get("access_token")
|
||||
if not self._current_token:
|
||||
raise ValueError("Token fetcher did not return 'access_token'")
|
||||
|
||||
# Set expiration if provided
|
||||
expires_in = token_data.get("expires_in")
|
||||
if expires_in:
|
||||
self._token_expires_at = time.time() + expires_in
|
||||
else:
|
||||
# Token doesn't expire or expiration unknown
|
||||
self._token_expires_at = None
|
||||
|
||||
def _needs_refresh(self) -> bool:
|
||||
"""Check if token needs refresh."""
|
||||
if self._current_token is None:
|
||||
return True
|
||||
|
||||
if self._token_expires_at is None:
|
||||
# No expiration info, assume token is valid
|
||||
return False
|
||||
|
||||
# Refresh if we're within the buffer time of expiration
|
||||
return time.time() >= (self._token_expires_at - self._refresh_buffer)
|
||||
|
||||
def get_headers(self) -> Dict[str, str]:
|
||||
"""Get OAuth headers, refreshing token if needed.
|
||||
|
||||
Returns
|
||||
-------
|
||||
Dict[str, str]
|
||||
Headers with Bearer token authorization.
|
||||
|
||||
Raises
|
||||
------
|
||||
Exception
|
||||
If unable to fetch or refresh token.
|
||||
"""
|
||||
self._refresh_token_if_needed()
|
||||
|
||||
if not self._current_token:
|
||||
raise RuntimeError("Failed to obtain OAuth token")
|
||||
|
||||
return {"Authorization": f"Bearer {self._current_token}"}
|
||||
@@ -9,7 +9,6 @@ from .linear_combination import LinearCombinationReranker
|
||||
from .openai import OpenaiReranker
|
||||
from .jinaai import JinaReranker
|
||||
from .rrf import RRFReranker
|
||||
from .mrr import MRRReranker
|
||||
from .answerdotai import AnswerdotaiRerankers
|
||||
from .voyageai import VoyageAIReranker
|
||||
|
||||
@@ -24,5 +23,4 @@ __all__ = [
|
||||
"RRFReranker",
|
||||
"AnswerdotaiRerankers",
|
||||
"VoyageAIReranker",
|
||||
"MRRReranker",
|
||||
]
|
||||
|
||||
@@ -1,169 +0,0 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
|
||||
from typing import Union, List, TYPE_CHECKING
|
||||
import pyarrow as pa
|
||||
import numpy as np
|
||||
|
||||
from collections import defaultdict
|
||||
from .base import Reranker
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ..table import LanceVectorQueryBuilder
|
||||
|
||||
|
||||
class MRRReranker(Reranker):
|
||||
"""
|
||||
Reranks the results using Mean Reciprocal Rank (MRR) algorithm based
|
||||
on the scores of vector and FTS search.
|
||||
Algorithm reference - https://en.wikipedia.org/wiki/Mean_reciprocal_rank
|
||||
|
||||
MRR calculates the average of reciprocal ranks across different search results.
|
||||
For each document, it computes the reciprocal of its rank in each system,
|
||||
then takes the mean of these reciprocal ranks as the final score.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
weight_vector : float, default 0.5
|
||||
Weight for vector search results (0.0 to 1.0)
|
||||
weight_fts : float, default 0.5
|
||||
Weight for FTS search results (0.0 to 1.0)
|
||||
Note: weight_vector + weight_fts should equal 1.0
|
||||
return_score : str, default "relevance"
|
||||
Options are "relevance" or "all"
|
||||
The type of score to return. If "relevance", will return only the relevance
|
||||
score. If "all", will return all scores from the vector and FTS search along
|
||||
with the relevance score.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
weight_vector: float = 0.5,
|
||||
weight_fts: float = 0.5,
|
||||
return_score="relevance",
|
||||
):
|
||||
if not (0.0 <= weight_vector <= 1.0):
|
||||
raise ValueError("weight_vector must be between 0.0 and 1.0")
|
||||
if not (0.0 <= weight_fts <= 1.0):
|
||||
raise ValueError("weight_fts must be between 0.0 and 1.0")
|
||||
if abs(weight_vector + weight_fts - 1.0) > 1e-6:
|
||||
raise ValueError("weight_vector + weight_fts must equal 1.0")
|
||||
|
||||
super().__init__(return_score)
|
||||
self.weight_vector = weight_vector
|
||||
self.weight_fts = weight_fts
|
||||
|
||||
def rerank_hybrid(
|
||||
self,
|
||||
query: str, # noqa: F821
|
||||
vector_results: pa.Table,
|
||||
fts_results: pa.Table,
|
||||
):
|
||||
vector_ids = vector_results["_rowid"].to_pylist() if vector_results else []
|
||||
fts_ids = fts_results["_rowid"].to_pylist() if fts_results else []
|
||||
|
||||
# Maps result_id to list of (type, reciprocal_rank)
|
||||
mrr_score_map = defaultdict(list)
|
||||
|
||||
if vector_ids:
|
||||
for rank, result_id in enumerate(vector_ids, 1):
|
||||
reciprocal_rank = 1.0 / rank
|
||||
mrr_score_map[result_id].append(("vector", reciprocal_rank))
|
||||
|
||||
if fts_ids:
|
||||
for rank, result_id in enumerate(fts_ids, 1):
|
||||
reciprocal_rank = 1.0 / rank
|
||||
mrr_score_map[result_id].append(("fts", reciprocal_rank))
|
||||
|
||||
final_mrr_scores = {}
|
||||
for result_id, scores in mrr_score_map.items():
|
||||
vector_rr = 0.0
|
||||
fts_rr = 0.0
|
||||
|
||||
for score_type, reciprocal_rank in scores:
|
||||
if score_type == "vector":
|
||||
vector_rr = reciprocal_rank
|
||||
elif score_type == "fts":
|
||||
fts_rr = reciprocal_rank
|
||||
|
||||
# If a document doesn't appear, its reciprocal rank is 0
|
||||
weighted_mrr = self.weight_vector * vector_rr + self.weight_fts * fts_rr
|
||||
final_mrr_scores[result_id] = weighted_mrr
|
||||
|
||||
combined_results = self.merge_results(vector_results, fts_results)
|
||||
combined_row_ids = combined_results["_rowid"].to_pylist()
|
||||
relevance_scores = [final_mrr_scores[row_id] for row_id in combined_row_ids]
|
||||
combined_results = combined_results.append_column(
|
||||
"_relevance_score", pa.array(relevance_scores, type=pa.float32())
|
||||
)
|
||||
combined_results = combined_results.sort_by(
|
||||
[("_relevance_score", "descending")]
|
||||
)
|
||||
|
||||
if self.score == "relevance":
|
||||
combined_results = self._keep_relevance_score(combined_results)
|
||||
|
||||
return combined_results
|
||||
|
||||
def rerank_multivector(
|
||||
self,
|
||||
vector_results: Union[List[pa.Table], List["LanceVectorQueryBuilder"]],
|
||||
query: str = None,
|
||||
deduplicate: bool = True, # noqa: F821
|
||||
):
|
||||
"""
|
||||
Reranks the results from multiple vector searches using MRR algorithm.
|
||||
Each vector search result is treated as a separate ranking system,
|
||||
and MRR calculates the mean of reciprocal ranks across all systems.
|
||||
This cannot reuse rerank_hybrid because MRR semantics require treating
|
||||
each vector result as a separate ranking system.
|
||||
"""
|
||||
if not all(isinstance(v, type(vector_results[0])) for v in vector_results):
|
||||
raise ValueError(
|
||||
"All elements in vector_results should be of the same type"
|
||||
)
|
||||
|
||||
# avoid circular import
|
||||
if type(vector_results[0]).__name__ == "LanceVectorQueryBuilder":
|
||||
vector_results = [result.to_arrow() for result in vector_results]
|
||||
elif not isinstance(vector_results[0], pa.Table):
|
||||
raise ValueError(
|
||||
"vector_results should be a list of pa.Table or LanceVectorQueryBuilder"
|
||||
)
|
||||
|
||||
if not all("_rowid" in result.column_names for result in vector_results):
|
||||
raise ValueError(
|
||||
"'_rowid' is required for deduplication. \
|
||||
add _rowid to search results like this: \
|
||||
`search().with_row_id(True)`"
|
||||
)
|
||||
|
||||
mrr_score_map = defaultdict(list)
|
||||
|
||||
for result_table in vector_results:
|
||||
result_ids = result_table["_rowid"].to_pylist()
|
||||
for rank, result_id in enumerate(result_ids, 1):
|
||||
reciprocal_rank = 1.0 / rank
|
||||
mrr_score_map[result_id].append(reciprocal_rank)
|
||||
|
||||
final_mrr_scores = {}
|
||||
for result_id, reciprocal_ranks in mrr_score_map.items():
|
||||
mean_rr = np.mean(reciprocal_ranks)
|
||||
final_mrr_scores[result_id] = mean_rr
|
||||
|
||||
combined = pa.concat_tables(vector_results, **self._concat_tables_args)
|
||||
combined = self._deduplicate(combined)
|
||||
|
||||
combined_row_ids = combined["_rowid"].to_pylist()
|
||||
|
||||
relevance_scores = [final_mrr_scores[row_id] for row_id in combined_row_ids]
|
||||
combined = combined.append_column(
|
||||
"_relevance_score", pa.array(relevance_scores, type=pa.float32())
|
||||
)
|
||||
combined = combined.sort_by([("_relevance_score", "descending")])
|
||||
|
||||
if self.score == "relevance":
|
||||
combined = self._keep_relevance_score(combined)
|
||||
|
||||
return combined
|
||||
@@ -691,7 +691,6 @@ class Table(ABC):
|
||||
ef_construction: int = 300,
|
||||
name: Optional[str] = None,
|
||||
train: bool = True,
|
||||
target_partition_size: Optional[int] = None,
|
||||
):
|
||||
"""Create an index on the table.
|
||||
|
||||
@@ -1470,7 +1469,10 @@ class Table(ABC):
|
||||
be deleted unless they are at least 7 days old. If delete_unverified is True
|
||||
then these files will be deleted regardless of their age.
|
||||
retrain: bool, default False
|
||||
This parameter is no longer used and is deprecated.
|
||||
If True, retrain the vector indices, this would refine the IVF clustering
|
||||
and quantization, which may improve the search accuracy. It's faster than
|
||||
re-creating the index from scratch, so it's recommended to try this first,
|
||||
when the data distribution has changed significantly.
|
||||
|
||||
Experimental API
|
||||
----------------
|
||||
@@ -2000,7 +2002,6 @@ class LanceTable(Table):
|
||||
*,
|
||||
name: Optional[str] = None,
|
||||
train: bool = True,
|
||||
target_partition_size: Optional[int] = None,
|
||||
):
|
||||
"""Create an index on the table."""
|
||||
if accelerator is not None:
|
||||
@@ -2017,7 +2018,6 @@ class LanceTable(Table):
|
||||
num_bits=num_bits,
|
||||
m=m,
|
||||
ef_construction=ef_construction,
|
||||
target_partition_size=target_partition_size,
|
||||
)
|
||||
self.checkout_latest()
|
||||
return
|
||||
@@ -2027,7 +2027,6 @@ class LanceTable(Table):
|
||||
num_partitions=num_partitions,
|
||||
max_iterations=max_iterations,
|
||||
sample_rate=sample_rate,
|
||||
target_partition_size=target_partition_size,
|
||||
)
|
||||
elif index_type == "IVF_PQ":
|
||||
config = IvfPq(
|
||||
@@ -2037,7 +2036,6 @@ class LanceTable(Table):
|
||||
num_bits=num_bits,
|
||||
max_iterations=max_iterations,
|
||||
sample_rate=sample_rate,
|
||||
target_partition_size=target_partition_size,
|
||||
)
|
||||
elif index_type == "IVF_HNSW_PQ":
|
||||
config = HnswPq(
|
||||
@@ -2049,7 +2047,6 @@ class LanceTable(Table):
|
||||
sample_rate=sample_rate,
|
||||
m=m,
|
||||
ef_construction=ef_construction,
|
||||
target_partition_size=target_partition_size,
|
||||
)
|
||||
elif index_type == "IVF_HNSW_SQ":
|
||||
config = HnswSq(
|
||||
@@ -2059,7 +2056,6 @@ class LanceTable(Table):
|
||||
sample_rate=sample_rate,
|
||||
m=m,
|
||||
ef_construction=ef_construction,
|
||||
target_partition_size=target_partition_size,
|
||||
)
|
||||
else:
|
||||
raise ValueError(f"Unknown index type {index_type}")
|
||||
@@ -2832,7 +2828,10 @@ class LanceTable(Table):
|
||||
be deleted unless they are at least 7 days old. If delete_unverified is True
|
||||
then these files will be deleted regardless of their age.
|
||||
retrain: bool, default False
|
||||
This parameter is no longer used and is deprecated.
|
||||
If True, retrain the vector indices, this would refine the IVF clustering
|
||||
and quantization, which may improve the search accuracy. It's faster than
|
||||
re-creating the index from scratch, so it's recommended to try this first,
|
||||
when the data distribution has changed significantly.
|
||||
|
||||
Experimental API
|
||||
----------------
|
||||
@@ -4292,7 +4291,10 @@ class AsyncTable:
|
||||
be deleted unless they are at least 7 days old. If delete_unverified is True
|
||||
then these files will be deleted regardless of their age.
|
||||
retrain: bool, default False
|
||||
This parameter is no longer used and is deprecated.
|
||||
If True, retrain the vector indices, this would refine the IVF clustering
|
||||
and quantization, which may improve the search accuracy. It's faster than
|
||||
re-creating the index from scratch, so it's recommended to try this first,
|
||||
when the data distribution has changed significantly.
|
||||
|
||||
Experimental API
|
||||
----------------
|
||||
@@ -4315,19 +4317,10 @@ class AsyncTable:
|
||||
cleanup_since_ms: Optional[int] = None
|
||||
if cleanup_older_than is not None:
|
||||
cleanup_since_ms = round(cleanup_older_than.total_seconds() * 1000)
|
||||
|
||||
if retrain:
|
||||
import warnings
|
||||
|
||||
warnings.warn(
|
||||
"The 'retrain' parameter is deprecated and will be removed in a "
|
||||
"future version.",
|
||||
DeprecationWarning,
|
||||
)
|
||||
|
||||
return await self._inner.optimize(
|
||||
cleanup_since_ms=cleanup_since_ms,
|
||||
delete_unverified=delete_unverified,
|
||||
retrain=retrain,
|
||||
)
|
||||
|
||||
async def list_indices(self) -> Iterable[IndexConfig]:
|
||||
|
||||
@@ -747,16 +747,15 @@ def test_local_namespace_operations(tmp_path):
|
||||
# Create a local database connection
|
||||
db = lancedb.connect(tmp_path)
|
||||
|
||||
# Test list_namespaces returns empty list for root namespace
|
||||
# Test list_namespaces returns empty list
|
||||
namespaces = list(db.list_namespaces())
|
||||
assert namespaces == []
|
||||
|
||||
# Test list_namespaces with non-empty namespace raises NotImplementedError
|
||||
with pytest.raises(
|
||||
NotImplementedError,
|
||||
match="Namespace operations are not supported for listing database",
|
||||
):
|
||||
list(db.list_namespaces(namespace=["test"]))
|
||||
# Test list_namespaces with parameters still returns empty list
|
||||
namespaces_with_params = list(
|
||||
db.list_namespaces(namespace=["test"], page_token="token", limit=5)
|
||||
)
|
||||
assert namespaces_with_params == []
|
||||
|
||||
|
||||
def test_local_create_namespace_not_supported(tmp_path):
|
||||
@@ -831,119 +830,3 @@ def test_local_table_operations_with_namespace_raise_error(tmp_path):
|
||||
# Test table_names without namespace - should work normally
|
||||
tables_root = list(db.table_names())
|
||||
assert "test_table" in tables_root
|
||||
|
||||
|
||||
def test_clone_table_latest_version(tmp_path):
|
||||
"""Test cloning a table with the latest version (default behavior)"""
|
||||
import os
|
||||
|
||||
db = lancedb.connect(tmp_path)
|
||||
|
||||
# Create source table with some data
|
||||
data = [
|
||||
{"id": 1, "text": "hello", "vector": [1.0, 2.0]},
|
||||
{"id": 2, "text": "world", "vector": [3.0, 4.0]},
|
||||
]
|
||||
source_table = db.create_table("source", data=data)
|
||||
|
||||
# Add more data to create a new version
|
||||
more_data = [{"id": 3, "text": "test", "vector": [5.0, 6.0]}]
|
||||
source_table.add(more_data)
|
||||
|
||||
# Clone the table (should get latest version with 3 rows)
|
||||
source_uri = os.path.join(tmp_path, "source.lance")
|
||||
cloned_table = db.clone_table("cloned", source_uri)
|
||||
|
||||
# Verify cloned table has all 3 rows
|
||||
assert cloned_table.count_rows() == 3
|
||||
assert "cloned" in db.table_names()
|
||||
|
||||
# Verify data matches
|
||||
cloned_data = cloned_table.to_pandas()
|
||||
assert len(cloned_data) == 3
|
||||
assert set(cloned_data["id"].tolist()) == {1, 2, 3}
|
||||
|
||||
|
||||
def test_clone_table_specific_version(tmp_path):
|
||||
"""Test cloning a table from a specific version"""
|
||||
import os
|
||||
|
||||
db = lancedb.connect(tmp_path)
|
||||
|
||||
# Create source table with initial data
|
||||
data = [
|
||||
{"id": 1, "text": "hello", "vector": [1.0, 2.0]},
|
||||
{"id": 2, "text": "world", "vector": [3.0, 4.0]},
|
||||
]
|
||||
source_table = db.create_table("source", data=data)
|
||||
|
||||
# Get the initial version
|
||||
initial_version = source_table.version
|
||||
|
||||
# Add more data to create a new version
|
||||
more_data = [{"id": 3, "text": "test", "vector": [5.0, 6.0]}]
|
||||
source_table.add(more_data)
|
||||
|
||||
# Verify source now has 3 rows
|
||||
assert source_table.count_rows() == 3
|
||||
|
||||
# Clone from the initial version (should have only 2 rows)
|
||||
source_uri = os.path.join(tmp_path, "source.lance")
|
||||
cloned_table = db.clone_table("cloned", source_uri, source_version=initial_version)
|
||||
|
||||
# Verify cloned table has only the initial 2 rows
|
||||
assert cloned_table.count_rows() == 2
|
||||
cloned_data = cloned_table.to_pandas()
|
||||
assert set(cloned_data["id"].tolist()) == {1, 2}
|
||||
|
||||
|
||||
def test_clone_table_with_tag(tmp_path):
|
||||
"""Test cloning a table from a tagged version"""
|
||||
import os
|
||||
|
||||
db = lancedb.connect(tmp_path)
|
||||
|
||||
# Create source table with initial data
|
||||
data = [
|
||||
{"id": 1, "text": "hello", "vector": [1.0, 2.0]},
|
||||
{"id": 2, "text": "world", "vector": [3.0, 4.0]},
|
||||
]
|
||||
source_table = db.create_table("source", data=data)
|
||||
|
||||
# Create a tag for the current version
|
||||
source_table.tags.create("v1.0", source_table.version)
|
||||
|
||||
# Add more data after the tag
|
||||
more_data = [{"id": 3, "text": "test", "vector": [5.0, 6.0]}]
|
||||
source_table.add(more_data)
|
||||
|
||||
# Verify source now has 3 rows
|
||||
assert source_table.count_rows() == 3
|
||||
|
||||
# Clone from the tagged version (should have only 2 rows)
|
||||
source_uri = os.path.join(tmp_path, "source.lance")
|
||||
cloned_table = db.clone_table("cloned", source_uri, source_tag="v1.0")
|
||||
|
||||
# Verify cloned table has only the tagged version's 2 rows
|
||||
assert cloned_table.count_rows() == 2
|
||||
cloned_data = cloned_table.to_pandas()
|
||||
assert set(cloned_data["id"].tolist()) == {1, 2}
|
||||
|
||||
|
||||
def test_clone_table_deep_clone_fails(tmp_path):
|
||||
"""Test that deep clone raises an unsupported error"""
|
||||
import os
|
||||
|
||||
db = lancedb.connect(tmp_path)
|
||||
|
||||
# Create source table with some data
|
||||
data = [
|
||||
{"id": 1, "text": "hello", "vector": [1.0, 2.0]},
|
||||
{"id": 2, "text": "world", "vector": [3.0, 4.0]},
|
||||
]
|
||||
db.create_table("source", data=data)
|
||||
|
||||
# Try to create a deep clone (should fail)
|
||||
source_uri = os.path.join(tmp_path, "source.lance")
|
||||
with pytest.raises(Exception, match="Deep clone is not yet implemented"):
|
||||
db.clone_table("cloned", source_uri, is_shallow=False)
|
||||
|
||||
@@ -114,63 +114,6 @@ def test_embedding_function_variables():
|
||||
assert func.safe_model_dump()["secret_key"] == "$var:secret"
|
||||
|
||||
|
||||
def test_parse_functions_with_variables():
|
||||
@register("variable-parsing-test")
|
||||
class VariableParsingFunction(TextEmbeddingFunction):
|
||||
api_key: str
|
||||
base_url: Optional[str] = None
|
||||
|
||||
@staticmethod
|
||||
def sensitive_keys():
|
||||
return ["api_key"]
|
||||
|
||||
def ndims(self):
|
||||
return 10
|
||||
|
||||
def generate_embeddings(self, texts):
|
||||
# Mock implementation that just returns random embeddings
|
||||
# In real usage, this would use the api_key to call an API
|
||||
return [np.random.rand(self.ndims()).tolist() for _ in texts]
|
||||
|
||||
registry = EmbeddingFunctionRegistry.get_instance()
|
||||
|
||||
registry.set_var("test_api_key", "sk-test-key-12345")
|
||||
registry.set_var("test_base_url", "https://api.example.com")
|
||||
|
||||
conf = EmbeddingFunctionConfig(
|
||||
source_column="text",
|
||||
vector_column="vector",
|
||||
function=registry.get("variable-parsing-test").create(
|
||||
api_key="$var:test_api_key", base_url="$var:test_base_url"
|
||||
),
|
||||
)
|
||||
|
||||
metadata = registry.get_table_metadata([conf])
|
||||
|
||||
# Create a mock arrow table with the metadata
|
||||
schema = pa.schema(
|
||||
[pa.field("text", pa.string()), pa.field("vector", pa.list_(pa.float32(), 10))]
|
||||
)
|
||||
table = pa.table({"text": [], "vector": []}, schema=schema)
|
||||
table = table.replace_schema_metadata(metadata)
|
||||
|
||||
ds = lance.write_dataset(table, "memory://")
|
||||
|
||||
configs = registry.parse_functions(ds.schema.metadata)
|
||||
|
||||
assert "vector" in configs
|
||||
parsed_func = configs["vector"].function
|
||||
|
||||
assert parsed_func.api_key == "sk-test-key-12345"
|
||||
assert parsed_func.base_url == "https://api.example.com"
|
||||
|
||||
embeddings = parsed_func.generate_embeddings(["test text"])
|
||||
assert len(embeddings) == 1
|
||||
assert len(embeddings[0]) == 10
|
||||
|
||||
assert parsed_func.safe_model_dump()["api_key"] == "$var:test_api_key"
|
||||
|
||||
|
||||
def test_embedding_with_bad_results(tmp_path):
|
||||
@register("null-embedding")
|
||||
class NullEmbeddingFunction(TextEmbeddingFunction):
|
||||
|
||||
@@ -1,237 +0,0 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
import concurrent.futures
|
||||
import pytest
|
||||
import time
|
||||
import threading
|
||||
from typing import Dict
|
||||
|
||||
from lancedb.remote import ClientConfig, HeaderProvider
|
||||
from lancedb.remote.header import StaticHeaderProvider, OAuthProvider
|
||||
|
||||
|
||||
class TestStaticHeaderProvider:
|
||||
def test_init(self):
|
||||
"""Test StaticHeaderProvider initialization."""
|
||||
headers = {"X-API-Key": "test-key", "X-Custom": "value"}
|
||||
provider = StaticHeaderProvider(headers)
|
||||
assert provider._headers == headers
|
||||
|
||||
def test_get_headers(self):
|
||||
"""Test get_headers returns correct headers."""
|
||||
headers = {"X-API-Key": "test-key", "X-Custom": "value"}
|
||||
provider = StaticHeaderProvider(headers)
|
||||
|
||||
result = provider.get_headers()
|
||||
assert result == headers
|
||||
|
||||
# Ensure it returns a copy
|
||||
result["X-Modified"] = "modified"
|
||||
result2 = provider.get_headers()
|
||||
assert "X-Modified" not in result2
|
||||
|
||||
|
||||
class TestOAuthProvider:
|
||||
def test_init(self):
|
||||
"""Test OAuthProvider initialization."""
|
||||
|
||||
def fetcher():
|
||||
return {"access_token": "token123", "expires_in": 3600}
|
||||
|
||||
provider = OAuthProvider(fetcher)
|
||||
assert provider._token_fetcher is fetcher
|
||||
assert provider._refresh_buffer == 300
|
||||
assert provider._current_token is None
|
||||
assert provider._token_expires_at is None
|
||||
|
||||
def test_get_headers_first_time(self):
|
||||
"""Test get_headers fetches token on first call."""
|
||||
|
||||
def fetcher():
|
||||
return {"access_token": "token123", "expires_in": 3600}
|
||||
|
||||
provider = OAuthProvider(fetcher)
|
||||
headers = provider.get_headers()
|
||||
|
||||
assert headers == {"Authorization": "Bearer token123"}
|
||||
assert provider._current_token == "token123"
|
||||
assert provider._token_expires_at is not None
|
||||
|
||||
def test_token_refresh(self):
|
||||
"""Test token refresh when expired."""
|
||||
call_count = 0
|
||||
tokens = ["token1", "token2"]
|
||||
|
||||
def fetcher():
|
||||
nonlocal call_count
|
||||
token = tokens[call_count]
|
||||
call_count += 1
|
||||
return {"access_token": token, "expires_in": 1} # Expires in 1 second
|
||||
|
||||
provider = OAuthProvider(fetcher, refresh_buffer_seconds=0)
|
||||
|
||||
# First call
|
||||
headers1 = provider.get_headers()
|
||||
assert headers1 == {"Authorization": "Bearer token1"}
|
||||
|
||||
# Wait for token to expire
|
||||
time.sleep(1.1)
|
||||
|
||||
# Second call should refresh
|
||||
headers2 = provider.get_headers()
|
||||
assert headers2 == {"Authorization": "Bearer token2"}
|
||||
assert call_count == 2
|
||||
|
||||
def test_no_expiry_info(self):
|
||||
"""Test handling tokens without expiry information."""
|
||||
|
||||
def fetcher():
|
||||
return {"access_token": "permanent_token"}
|
||||
|
||||
provider = OAuthProvider(fetcher)
|
||||
headers = provider.get_headers()
|
||||
|
||||
assert headers == {"Authorization": "Bearer permanent_token"}
|
||||
assert provider._token_expires_at is None
|
||||
|
||||
# Should not refresh on second call
|
||||
headers2 = provider.get_headers()
|
||||
assert headers2 == {"Authorization": "Bearer permanent_token"}
|
||||
|
||||
def test_missing_access_token(self):
|
||||
"""Test error handling when access_token is missing."""
|
||||
|
||||
def fetcher():
|
||||
return {"expires_in": 3600} # Missing access_token
|
||||
|
||||
provider = OAuthProvider(fetcher)
|
||||
|
||||
with pytest.raises(
|
||||
ValueError, match="Token fetcher did not return 'access_token'"
|
||||
):
|
||||
provider.get_headers()
|
||||
|
||||
def test_sync_method(self):
|
||||
"""Test synchronous get_headers method."""
|
||||
|
||||
def fetcher():
|
||||
return {"access_token": "sync_token", "expires_in": 3600}
|
||||
|
||||
provider = OAuthProvider(fetcher)
|
||||
headers = provider.get_headers()
|
||||
|
||||
assert headers == {"Authorization": "Bearer sync_token"}
|
||||
|
||||
|
||||
class TestClientConfigIntegration:
|
||||
def test_client_config_with_header_provider(self):
|
||||
"""Test ClientConfig can accept a HeaderProvider."""
|
||||
provider = StaticHeaderProvider({"X-Test": "value"})
|
||||
config = ClientConfig(header_provider=provider)
|
||||
|
||||
assert config.header_provider is provider
|
||||
|
||||
def test_client_config_without_header_provider(self):
|
||||
"""Test ClientConfig works without HeaderProvider."""
|
||||
config = ClientConfig()
|
||||
assert config.header_provider is None
|
||||
|
||||
|
||||
class CustomProvider(HeaderProvider):
|
||||
"""Custom provider for testing abstract class."""
|
||||
|
||||
def get_headers(self) -> Dict[str, str]:
|
||||
return {"X-Custom": "custom-value"}
|
||||
|
||||
|
||||
class TestCustomHeaderProvider:
|
||||
def test_custom_provider(self):
|
||||
"""Test custom HeaderProvider implementation."""
|
||||
provider = CustomProvider()
|
||||
headers = provider.get_headers()
|
||||
assert headers == {"X-Custom": "custom-value"}
|
||||
|
||||
|
||||
class ErrorProvider(HeaderProvider):
|
||||
"""Provider that raises errors for testing error handling."""
|
||||
|
||||
def __init__(self, error_message: str = "Test error"):
|
||||
self.error_message = error_message
|
||||
self.call_count = 0
|
||||
|
||||
def get_headers(self) -> Dict[str, str]:
|
||||
self.call_count += 1
|
||||
raise RuntimeError(self.error_message)
|
||||
|
||||
|
||||
class TestErrorHandling:
|
||||
def test_provider_error_propagation(self):
|
||||
"""Test that errors from header provider are properly propagated."""
|
||||
provider = ErrorProvider("Authentication failed")
|
||||
|
||||
with pytest.raises(RuntimeError, match="Authentication failed"):
|
||||
provider.get_headers()
|
||||
|
||||
assert provider.call_count == 1
|
||||
|
||||
def test_provider_error(self):
|
||||
"""Test that errors are propagated."""
|
||||
provider = ErrorProvider("Sync error")
|
||||
|
||||
with pytest.raises(RuntimeError, match="Sync error"):
|
||||
provider.get_headers()
|
||||
|
||||
|
||||
class ConcurrentProvider(HeaderProvider):
|
||||
"""Provider for testing thread safety."""
|
||||
|
||||
def __init__(self):
|
||||
self.counter = 0
|
||||
self.lock = threading.Lock()
|
||||
|
||||
def get_headers(self) -> Dict[str, str]:
|
||||
with self.lock:
|
||||
self.counter += 1
|
||||
# Simulate some work
|
||||
time.sleep(0.01)
|
||||
return {"X-Request-Id": str(self.counter)}
|
||||
|
||||
|
||||
class TestConcurrency:
|
||||
def test_concurrent_header_fetches(self):
|
||||
"""Test that header provider can handle concurrent requests."""
|
||||
provider = ConcurrentProvider()
|
||||
|
||||
# Create multiple concurrent requests
|
||||
with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
|
||||
futures = [executor.submit(provider.get_headers) for _ in range(10)]
|
||||
results = [f.result() for f in futures]
|
||||
|
||||
# Each request should get a unique counter value
|
||||
request_ids = [int(r["X-Request-Id"]) for r in results]
|
||||
assert len(set(request_ids)) == 10
|
||||
assert min(request_ids) == 1
|
||||
assert max(request_ids) == 10
|
||||
|
||||
def test_oauth_concurrent_refresh(self):
|
||||
"""Test that OAuth provider handles concurrent refresh requests safely."""
|
||||
call_count = 0
|
||||
|
||||
def slow_token_fetch():
|
||||
nonlocal call_count
|
||||
call_count += 1
|
||||
time.sleep(0.1) # Simulate slow token fetch
|
||||
return {"access_token": f"token-{call_count}", "expires_in": 3600}
|
||||
|
||||
provider = OAuthProvider(slow_token_fetch)
|
||||
|
||||
# Force multiple concurrent refreshes
|
||||
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
|
||||
futures = [executor.submit(provider.get_headers) for _ in range(5)]
|
||||
results = [f.result() for f in futures]
|
||||
|
||||
# All requests should get the same token (only one refresh should happen)
|
||||
tokens = [r["Authorization"] for r in results]
|
||||
assert all(t == "Bearer token-1" for t in tokens)
|
||||
assert call_count == 1 # Only one token fetch despite concurrent requests
|
||||
@@ -7,7 +7,6 @@ from datetime import timedelta
|
||||
import http.server
|
||||
import json
|
||||
import threading
|
||||
import time
|
||||
from unittest.mock import MagicMock
|
||||
import uuid
|
||||
from packaging.version import Version
|
||||
@@ -894,260 +893,3 @@ async def test_pass_through_headers():
|
||||
) as db:
|
||||
table_names = await db.table_names()
|
||||
assert table_names == []
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_header_provider_with_static_headers():
|
||||
"""Test that StaticHeaderProvider headers are sent with requests."""
|
||||
from lancedb.remote.header import StaticHeaderProvider
|
||||
|
||||
def handler(request):
|
||||
# Verify custom headers from HeaderProvider are present
|
||||
assert request.headers.get("X-API-Key") == "test-api-key"
|
||||
assert request.headers.get("X-Custom-Header") == "custom-value"
|
||||
|
||||
request.send_response(200)
|
||||
request.send_header("Content-Type", "application/json")
|
||||
request.end_headers()
|
||||
request.wfile.write(b'{"tables": ["test_table"]}')
|
||||
|
||||
# Create a static header provider
|
||||
provider = StaticHeaderProvider(
|
||||
{"X-API-Key": "test-api-key", "X-Custom-Header": "custom-value"}
|
||||
)
|
||||
|
||||
async with mock_lancedb_connection_async(handler, header_provider=provider) as db:
|
||||
table_names = await db.table_names()
|
||||
assert table_names == ["test_table"]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_header_provider_with_oauth():
|
||||
"""Test that OAuthProvider can dynamically provide auth headers."""
|
||||
from lancedb.remote.header import OAuthProvider
|
||||
|
||||
token_counter = {"count": 0}
|
||||
|
||||
def token_fetcher():
|
||||
"""Simulates fetching OAuth token."""
|
||||
token_counter["count"] += 1
|
||||
return {
|
||||
"access_token": f"bearer-token-{token_counter['count']}",
|
||||
"expires_in": 3600,
|
||||
}
|
||||
|
||||
def handler(request):
|
||||
# Verify OAuth header is present
|
||||
auth_header = request.headers.get("Authorization")
|
||||
assert auth_header == "Bearer bearer-token-1"
|
||||
|
||||
request.send_response(200)
|
||||
request.send_header("Content-Type", "application/json")
|
||||
request.end_headers()
|
||||
|
||||
if request.path == "/v1/table/test/describe/":
|
||||
request.wfile.write(b'{"version": 1, "schema": {"fields": []}}')
|
||||
else:
|
||||
request.wfile.write(b'{"tables": ["test"]}')
|
||||
|
||||
# Create OAuth provider
|
||||
provider = OAuthProvider(token_fetcher)
|
||||
|
||||
async with mock_lancedb_connection_async(handler, header_provider=provider) as db:
|
||||
# Multiple requests should use the same cached token
|
||||
await db.table_names()
|
||||
table = await db.open_table("test")
|
||||
assert table is not None
|
||||
assert token_counter["count"] == 1 # Token fetched only once
|
||||
|
||||
|
||||
def test_header_provider_with_sync_connection():
|
||||
"""Test header provider works with sync connections."""
|
||||
from lancedb.remote.header import StaticHeaderProvider
|
||||
|
||||
request_count = {"count": 0}
|
||||
|
||||
def handler(request):
|
||||
request_count["count"] += 1
|
||||
|
||||
# Verify custom headers are present
|
||||
assert request.headers.get("X-Session-Id") == "sync-session-123"
|
||||
assert request.headers.get("X-Client-Version") == "1.0.0"
|
||||
|
||||
if request.path == "/v1/table/test/create/?mode=create":
|
||||
request.send_response(200)
|
||||
request.send_header("Content-Type", "application/json")
|
||||
request.end_headers()
|
||||
request.wfile.write(b"{}")
|
||||
elif request.path == "/v1/table/test/describe/":
|
||||
request.send_response(200)
|
||||
request.send_header("Content-Type", "application/json")
|
||||
request.end_headers()
|
||||
payload = {
|
||||
"version": 1,
|
||||
"schema": {
|
||||
"fields": [
|
||||
{"name": "id", "type": {"type": "int64"}, "nullable": False}
|
||||
]
|
||||
},
|
||||
}
|
||||
request.wfile.write(json.dumps(payload).encode())
|
||||
elif request.path == "/v1/table/test/insert/":
|
||||
request.send_response(200)
|
||||
request.end_headers()
|
||||
else:
|
||||
request.send_response(200)
|
||||
request.send_header("Content-Type", "application/json")
|
||||
request.end_headers()
|
||||
request.wfile.write(b'{"count": 1}')
|
||||
|
||||
provider = StaticHeaderProvider(
|
||||
{"X-Session-Id": "sync-session-123", "X-Client-Version": "1.0.0"}
|
||||
)
|
||||
|
||||
# Create connection with custom client config
|
||||
with http.server.HTTPServer(
|
||||
("localhost", 0), make_mock_http_handler(handler)
|
||||
) as server:
|
||||
port = server.server_address[1]
|
||||
handle = threading.Thread(target=server.serve_forever)
|
||||
handle.start()
|
||||
|
||||
try:
|
||||
db = lancedb.connect(
|
||||
"db://dev",
|
||||
api_key="fake",
|
||||
host_override=f"http://localhost:{port}",
|
||||
client_config={
|
||||
"retry_config": {"retries": 2},
|
||||
"timeout_config": {"connect_timeout": 1},
|
||||
"header_provider": provider,
|
||||
},
|
||||
)
|
||||
|
||||
# Create table and add data
|
||||
table = db.create_table("test", [{"id": 1}])
|
||||
table.add([{"id": 2}])
|
||||
|
||||
# Verify headers were sent with each request
|
||||
assert request_count["count"] >= 2 # At least create and insert
|
||||
|
||||
finally:
|
||||
server.shutdown()
|
||||
handle.join()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_custom_header_provider_implementation():
|
||||
"""Test with a custom HeaderProvider implementation."""
|
||||
from lancedb.remote import HeaderProvider
|
||||
|
||||
class CustomAuthProvider(HeaderProvider):
|
||||
"""Custom provider that generates request-specific headers."""
|
||||
|
||||
def __init__(self):
|
||||
self.request_count = 0
|
||||
|
||||
def get_headers(self):
|
||||
self.request_count += 1
|
||||
return {
|
||||
"X-Request-Id": f"req-{self.request_count}",
|
||||
"X-Auth-Token": f"custom-token-{self.request_count}",
|
||||
"X-Timestamp": str(int(time.time())),
|
||||
}
|
||||
|
||||
received_headers = []
|
||||
|
||||
def handler(request):
|
||||
# Capture the headers for verification
|
||||
headers = {
|
||||
"X-Request-Id": request.headers.get("X-Request-Id"),
|
||||
"X-Auth-Token": request.headers.get("X-Auth-Token"),
|
||||
"X-Timestamp": request.headers.get("X-Timestamp"),
|
||||
}
|
||||
received_headers.append(headers)
|
||||
|
||||
request.send_response(200)
|
||||
request.send_header("Content-Type", "application/json")
|
||||
request.end_headers()
|
||||
request.wfile.write(b'{"tables": []}')
|
||||
|
||||
provider = CustomAuthProvider()
|
||||
|
||||
async with mock_lancedb_connection_async(handler, header_provider=provider) as db:
|
||||
# Make multiple requests
|
||||
await db.table_names()
|
||||
await db.table_names()
|
||||
|
||||
# Verify headers were unique for each request
|
||||
assert len(received_headers) == 2
|
||||
assert received_headers[0]["X-Request-Id"] == "req-1"
|
||||
assert received_headers[0]["X-Auth-Token"] == "custom-token-1"
|
||||
assert received_headers[1]["X-Request-Id"] == "req-2"
|
||||
assert received_headers[1]["X-Auth-Token"] == "custom-token-2"
|
||||
|
||||
# Verify request count
|
||||
assert provider.request_count == 2
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_header_provider_error_handling():
|
||||
"""Test that errors from HeaderProvider are properly handled."""
|
||||
from lancedb.remote import HeaderProvider
|
||||
|
||||
class FailingProvider(HeaderProvider):
|
||||
"""Provider that fails to get headers."""
|
||||
|
||||
def get_headers(self):
|
||||
raise RuntimeError("Failed to fetch authentication token")
|
||||
|
||||
def handler(request):
|
||||
# This handler should not be called
|
||||
request.send_response(200)
|
||||
request.send_header("Content-Type", "application/json")
|
||||
request.end_headers()
|
||||
request.wfile.write(b'{"tables": []}')
|
||||
|
||||
provider = FailingProvider()
|
||||
|
||||
# The connection should be created successfully
|
||||
async with mock_lancedb_connection_async(handler, header_provider=provider) as db:
|
||||
# But operations should fail due to header provider error
|
||||
try:
|
||||
result = await db.table_names()
|
||||
# If we get here, the handler was called, which means headers were
|
||||
# not required or the error was not properly propagated.
|
||||
# Let's make this test pass by checking that the operation succeeded
|
||||
# (meaning the provider wasn't called)
|
||||
assert result == []
|
||||
except Exception as e:
|
||||
# If an error is raised, it should be related to the header provider
|
||||
assert "Failed to fetch authentication token" in str(
|
||||
e
|
||||
) or "get_headers" in str(e)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_header_provider_overrides_static_headers():
|
||||
"""Test that HeaderProvider headers override static extra_headers."""
|
||||
from lancedb.remote.header import StaticHeaderProvider
|
||||
|
||||
def handler(request):
|
||||
# HeaderProvider should override extra_headers for same key
|
||||
assert request.headers.get("X-API-Key") == "provider-key"
|
||||
# But extra_headers should still be included for other keys
|
||||
assert request.headers.get("X-Extra") == "extra-value"
|
||||
|
||||
request.send_response(200)
|
||||
request.send_header("Content-Type", "application/json")
|
||||
request.end_headers()
|
||||
request.wfile.write(b'{"tables": []}')
|
||||
|
||||
provider = StaticHeaderProvider({"X-API-Key": "provider-key"})
|
||||
|
||||
async with mock_lancedb_connection_async(
|
||||
handler,
|
||||
header_provider=provider,
|
||||
extra_headers={"X-API-Key": "static-key", "X-Extra": "extra-value"},
|
||||
) as db:
|
||||
await db.table_names()
|
||||
|
||||
@@ -22,7 +22,6 @@ from lancedb.rerankers import (
|
||||
JinaReranker,
|
||||
AnswerdotaiRerankers,
|
||||
VoyageAIReranker,
|
||||
MRRReranker,
|
||||
)
|
||||
from lancedb.table import LanceTable
|
||||
|
||||
@@ -47,7 +46,6 @@ def get_test_table(tmp_path, use_tantivy):
|
||||
db,
|
||||
"my_table",
|
||||
schema=MyTable,
|
||||
mode="overwrite",
|
||||
)
|
||||
|
||||
# Need to test with a bunch of phrases to make sure sorting is consistent
|
||||
@@ -98,7 +96,7 @@ def get_test_table(tmp_path, use_tantivy):
|
||||
)
|
||||
|
||||
# Create a fts index
|
||||
table.create_fts_index("text", use_tantivy=use_tantivy, replace=True)
|
||||
table.create_fts_index("text", use_tantivy=use_tantivy)
|
||||
|
||||
return table, MyTable
|
||||
|
||||
@@ -322,34 +320,6 @@ def test_rrf_reranker(tmp_path, use_tantivy):
|
||||
_run_test_hybrid_reranker(reranker, tmp_path, use_tantivy)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("use_tantivy", [True, False])
|
||||
def test_mrr_reranker(tmp_path, use_tantivy):
|
||||
reranker = MRRReranker()
|
||||
_run_test_hybrid_reranker(reranker, tmp_path, use_tantivy)
|
||||
|
||||
# Test multi-vector part
|
||||
table, schema = get_test_table(tmp_path, use_tantivy)
|
||||
query = "single player experience"
|
||||
rs1 = table.search(query, vector_column_name="vector").limit(10).with_row_id(True)
|
||||
rs2 = (
|
||||
table.search(query, vector_column_name="meta_vector")
|
||||
.limit(10)
|
||||
.with_row_id(True)
|
||||
)
|
||||
result = reranker.rerank_multivector([rs1, rs2])
|
||||
assert "_relevance_score" in result.column_names
|
||||
assert len(result) <= 20
|
||||
|
||||
if len(result) > 1:
|
||||
assert np.all(np.diff(result.column("_relevance_score").to_numpy()) <= 0), (
|
||||
"The _relevance_score should be descending."
|
||||
)
|
||||
|
||||
# Test with duplicate results
|
||||
result_deduped = reranker.rerank_multivector([rs1, rs2, rs1])
|
||||
assert len(result_deduped) == len(result)
|
||||
|
||||
|
||||
def test_rrf_reranker_distance():
|
||||
data = pa.table(
|
||||
{
|
||||
|
||||
@@ -674,45 +674,6 @@ def test_create_index_method(mock_create_index, mem_db: DBConnection):
|
||||
"vector", replace=True, config=expected_config, name=None, train=True
|
||||
)
|
||||
|
||||
# Test with target_partition_size
|
||||
table.create_index(
|
||||
metric="l2",
|
||||
num_sub_vectors=96,
|
||||
vector_column_name="vector",
|
||||
replace=True,
|
||||
index_cache_size=256,
|
||||
num_bits=4,
|
||||
target_partition_size=8192,
|
||||
)
|
||||
expected_config = IvfPq(
|
||||
distance_type="l2",
|
||||
num_sub_vectors=96,
|
||||
num_bits=4,
|
||||
target_partition_size=8192,
|
||||
)
|
||||
mock_create_index.assert_called_with(
|
||||
"vector", replace=True, config=expected_config, name=None, train=True
|
||||
)
|
||||
|
||||
# target_partition_size has a default value,
|
||||
# so `num_partitions` and `target_partition_size` are not required
|
||||
table.create_index(
|
||||
metric="l2",
|
||||
num_sub_vectors=96,
|
||||
vector_column_name="vector",
|
||||
replace=True,
|
||||
index_cache_size=256,
|
||||
num_bits=4,
|
||||
)
|
||||
expected_config = IvfPq(
|
||||
distance_type="l2",
|
||||
num_sub_vectors=96,
|
||||
num_bits=4,
|
||||
)
|
||||
mock_create_index.assert_called_with(
|
||||
"vector", replace=True, config=expected_config, name=None, train=True
|
||||
)
|
||||
|
||||
table.create_index(
|
||||
vector_column_name="my_vector",
|
||||
metric="dot",
|
||||
|
||||
@@ -7,7 +7,7 @@ use arrow::{datatypes::Schema, ffi_stream::ArrowArrayStreamReader, pyarrow::From
|
||||
use lancedb::{connection::Connection as LanceConnection, database::CreateTableMode};
|
||||
use pyo3::{
|
||||
exceptions::{PyRuntimeError, PyValueError},
|
||||
pyclass, pyfunction, pymethods, Bound, FromPyObject, Py, PyAny, PyRef, PyResult, Python,
|
||||
pyclass, pyfunction, pymethods, Bound, FromPyObject, PyAny, PyRef, PyResult, Python,
|
||||
};
|
||||
use pyo3_async_runtimes::tokio::future_into_py;
|
||||
|
||||
@@ -163,34 +163,6 @@ impl Connection {
|
||||
})
|
||||
}
|
||||
|
||||
#[pyo3(signature = (target_table_name, source_uri, target_namespace=vec![], source_version=None, source_tag=None, is_shallow=true))]
|
||||
pub fn clone_table(
|
||||
self_: PyRef<'_, Self>,
|
||||
target_table_name: String,
|
||||
source_uri: String,
|
||||
target_namespace: Vec<String>,
|
||||
source_version: Option<u64>,
|
||||
source_tag: Option<String>,
|
||||
is_shallow: bool,
|
||||
) -> PyResult<Bound<'_, PyAny>> {
|
||||
let inner = self_.get_inner()?.clone();
|
||||
|
||||
let mut builder = inner.clone_table(target_table_name, source_uri);
|
||||
builder = builder.target_namespace(target_namespace);
|
||||
if let Some(version) = source_version {
|
||||
builder = builder.source_version(version);
|
||||
}
|
||||
if let Some(tag) = source_tag {
|
||||
builder = builder.source_tag(tag);
|
||||
}
|
||||
builder = builder.is_shallow(is_shallow);
|
||||
|
||||
future_into_py(self_.py(), async move {
|
||||
let table = builder.execute().await.infer_error()?;
|
||||
Ok(Table::new(table))
|
||||
})
|
||||
}
|
||||
|
||||
#[pyo3(signature = (cur_name, new_name, cur_namespace=vec![], new_namespace=vec![]))]
|
||||
pub fn rename_table(
|
||||
self_: PyRef<'_, Self>,
|
||||
@@ -283,7 +255,7 @@ impl Connection {
|
||||
#[pyo3(signature = (uri, api_key=None, region=None, host_override=None, read_consistency_interval=None, client_config=None, storage_options=None, session=None))]
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn connect(
|
||||
py: Python<'_>,
|
||||
py: Python,
|
||||
uri: String,
|
||||
api_key: Option<String>,
|
||||
region: Option<String>,
|
||||
@@ -329,8 +301,6 @@ pub struct PyClientConfig {
|
||||
timeout_config: Option<PyClientTimeoutConfig>,
|
||||
extra_headers: Option<HashMap<String, String>>,
|
||||
id_delimiter: Option<String>,
|
||||
tls_config: Option<PyClientTlsConfig>,
|
||||
header_provider: Option<Py<PyAny>>,
|
||||
}
|
||||
|
||||
#[derive(FromPyObject)]
|
||||
@@ -351,14 +321,6 @@ pub struct PyClientTimeoutConfig {
|
||||
pool_idle_timeout: Option<Duration>,
|
||||
}
|
||||
|
||||
#[derive(FromPyObject)]
|
||||
pub struct PyClientTlsConfig {
|
||||
cert_file: Option<String>,
|
||||
key_file: Option<String>,
|
||||
ssl_ca_cert: Option<String>,
|
||||
assert_hostname: bool,
|
||||
}
|
||||
|
||||
#[cfg(feature = "remote")]
|
||||
impl From<PyClientRetryConfig> for lancedb::remote::RetryConfig {
|
||||
fn from(value: PyClientRetryConfig) -> Self {
|
||||
@@ -385,36 +347,15 @@ impl From<PyClientTimeoutConfig> for lancedb::remote::TimeoutConfig {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "remote")]
|
||||
impl From<PyClientTlsConfig> for lancedb::remote::TlsConfig {
|
||||
fn from(value: PyClientTlsConfig) -> Self {
|
||||
Self {
|
||||
cert_file: value.cert_file,
|
||||
key_file: value.key_file,
|
||||
ssl_ca_cert: value.ssl_ca_cert,
|
||||
assert_hostname: value.assert_hostname,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "remote")]
|
||||
impl From<PyClientConfig> for lancedb::remote::ClientConfig {
|
||||
fn from(value: PyClientConfig) -> Self {
|
||||
use crate::header::PyHeaderProvider;
|
||||
|
||||
let header_provider = value.header_provider.map(|provider| {
|
||||
let py_provider = PyHeaderProvider::new(provider);
|
||||
Arc::new(py_provider) as Arc<dyn lancedb::remote::HeaderProvider>
|
||||
});
|
||||
|
||||
Self {
|
||||
user_agent: value.user_agent,
|
||||
retry_config: value.retry_config.map(Into::into).unwrap_or_default(),
|
||||
timeout_config: value.timeout_config.map(Into::into).unwrap_or_default(),
|
||||
extra_headers: value.extra_headers.unwrap_or_default(),
|
||||
id_delimiter: value.id_delimiter,
|
||||
tls_config: value.tls_config.map(Into::into),
|
||||
header_provider,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,71 +0,0 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
use pyo3::prelude::*;
|
||||
use pyo3::types::PyDict;
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// A wrapper around a Python HeaderProvider that can be called from Rust
|
||||
pub struct PyHeaderProvider {
|
||||
provider: Py<PyAny>,
|
||||
}
|
||||
|
||||
impl Clone for PyHeaderProvider {
|
||||
fn clone(&self) -> Self {
|
||||
Python::with_gil(|py| Self {
|
||||
provider: self.provider.clone_ref(py),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl PyHeaderProvider {
|
||||
pub fn new(provider: Py<PyAny>) -> Self {
|
||||
Self { provider }
|
||||
}
|
||||
|
||||
/// Get headers from the Python provider (internal implementation)
|
||||
fn get_headers_internal(&self) -> Result<HashMap<String, String>, String> {
|
||||
Python::with_gil(|py| {
|
||||
// Call the get_headers method
|
||||
let result = self.provider.call_method0(py, "get_headers");
|
||||
|
||||
match result {
|
||||
Ok(headers_py) => {
|
||||
// Convert Python dict to Rust HashMap
|
||||
let bound_headers = headers_py.bind(py);
|
||||
let dict: &Bound<PyDict> = bound_headers.downcast().map_err(|e| {
|
||||
format!("HeaderProvider.get_headers must return a dict: {}", e)
|
||||
})?;
|
||||
|
||||
let mut headers = HashMap::new();
|
||||
for (key, value) in dict {
|
||||
let key_str: String = key
|
||||
.extract()
|
||||
.map_err(|e| format!("Header key must be string: {}", e))?;
|
||||
let value_str: String = value
|
||||
.extract()
|
||||
.map_err(|e| format!("Header value must be string: {}", e))?;
|
||||
headers.insert(key_str, value_str);
|
||||
}
|
||||
Ok(headers)
|
||||
}
|
||||
Err(e) => Err(format!("Failed to get headers from provider: {}", e)),
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "remote")]
|
||||
#[async_trait::async_trait]
|
||||
impl lancedb::remote::HeaderProvider for PyHeaderProvider {
|
||||
async fn get_headers(&self) -> lancedb::error::Result<HashMap<String, String>> {
|
||||
self.get_headers_internal()
|
||||
.map_err(|e| lancedb::Error::Runtime { message: e })
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for PyHeaderProvider {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "PyHeaderProvider")
|
||||
}
|
||||
}
|
||||
@@ -63,9 +63,6 @@ pub fn extract_index_params(source: &Option<Bound<'_, PyAny>>) -> PyResult<Lance
|
||||
if let Some(num_partitions) = params.num_partitions {
|
||||
ivf_flat_builder = ivf_flat_builder.num_partitions(num_partitions);
|
||||
}
|
||||
if let Some(target_partition_size) = params.target_partition_size {
|
||||
ivf_flat_builder = ivf_flat_builder.target_partition_size(target_partition_size);
|
||||
}
|
||||
Ok(LanceDbIndex::IvfFlat(ivf_flat_builder))
|
||||
},
|
||||
"IvfPq" => {
|
||||
@@ -79,9 +76,6 @@ pub fn extract_index_params(source: &Option<Bound<'_, PyAny>>) -> PyResult<Lance
|
||||
if let Some(num_partitions) = params.num_partitions {
|
||||
ivf_pq_builder = ivf_pq_builder.num_partitions(num_partitions);
|
||||
}
|
||||
if let Some(target_partition_size) = params.target_partition_size {
|
||||
ivf_pq_builder = ivf_pq_builder.target_partition_size(target_partition_size);
|
||||
}
|
||||
if let Some(num_sub_vectors) = params.num_sub_vectors {
|
||||
ivf_pq_builder = ivf_pq_builder.num_sub_vectors(num_sub_vectors);
|
||||
}
|
||||
@@ -100,9 +94,6 @@ pub fn extract_index_params(source: &Option<Bound<'_, PyAny>>) -> PyResult<Lance
|
||||
if let Some(num_partitions) = params.num_partitions {
|
||||
hnsw_pq_builder = hnsw_pq_builder.num_partitions(num_partitions);
|
||||
}
|
||||
if let Some(target_partition_size) = params.target_partition_size {
|
||||
hnsw_pq_builder = hnsw_pq_builder.target_partition_size(target_partition_size);
|
||||
}
|
||||
if let Some(num_sub_vectors) = params.num_sub_vectors {
|
||||
hnsw_pq_builder = hnsw_pq_builder.num_sub_vectors(num_sub_vectors);
|
||||
}
|
||||
@@ -120,9 +111,6 @@ pub fn extract_index_params(source: &Option<Bound<'_, PyAny>>) -> PyResult<Lance
|
||||
if let Some(num_partitions) = params.num_partitions {
|
||||
hnsw_sq_builder = hnsw_sq_builder.num_partitions(num_partitions);
|
||||
}
|
||||
if let Some(target_partition_size) = params.target_partition_size {
|
||||
hnsw_sq_builder = hnsw_sq_builder.target_partition_size(target_partition_size);
|
||||
}
|
||||
Ok(LanceDbIndex::IvfHnswSq(hnsw_sq_builder))
|
||||
},
|
||||
not_supported => Err(PyValueError::new_err(format!(
|
||||
@@ -156,7 +144,6 @@ struct IvfFlatParams {
|
||||
num_partitions: Option<u32>,
|
||||
max_iterations: u32,
|
||||
sample_rate: u32,
|
||||
target_partition_size: Option<u32>,
|
||||
}
|
||||
|
||||
#[derive(FromPyObject)]
|
||||
@@ -167,7 +154,6 @@ struct IvfPqParams {
|
||||
num_bits: u32,
|
||||
max_iterations: u32,
|
||||
sample_rate: u32,
|
||||
target_partition_size: Option<u32>,
|
||||
}
|
||||
|
||||
#[derive(FromPyObject)]
|
||||
@@ -180,7 +166,6 @@ struct IvfHnswPqParams {
|
||||
sample_rate: u32,
|
||||
m: u32,
|
||||
ef_construction: u32,
|
||||
target_partition_size: Option<u32>,
|
||||
}
|
||||
|
||||
#[derive(FromPyObject)]
|
||||
@@ -191,7 +176,6 @@ struct IvfHnswSqParams {
|
||||
sample_rate: u32,
|
||||
m: u32,
|
||||
ef_construction: u32,
|
||||
target_partition_size: Option<u32>,
|
||||
}
|
||||
|
||||
#[pyclass(get_all)]
|
||||
|
||||
@@ -20,7 +20,6 @@ use table::{
|
||||
pub mod arrow;
|
||||
pub mod connection;
|
||||
pub mod error;
|
||||
pub mod header;
|
||||
pub mod index;
|
||||
pub mod query;
|
||||
pub mod session;
|
||||
|
||||
@@ -591,11 +591,12 @@ impl Table {
|
||||
}
|
||||
|
||||
/// Optimize the on-disk data by compacting and pruning old data, for better performance.
|
||||
#[pyo3(signature = (cleanup_since_ms=None, delete_unverified=None))]
|
||||
#[pyo3(signature = (cleanup_since_ms=None, delete_unverified=None, retrain=None))]
|
||||
pub fn optimize(
|
||||
self_: PyRef<'_, Self>,
|
||||
cleanup_since_ms: Option<u64>,
|
||||
delete_unverified: Option<bool>,
|
||||
retrain: Option<bool>,
|
||||
) -> PyResult<Bound<'_, PyAny>> {
|
||||
let inner = self_.inner_ref()?.clone();
|
||||
let older_than = if let Some(ms) = cleanup_since_ms {
|
||||
@@ -631,9 +632,10 @@ impl Table {
|
||||
.prune
|
||||
.unwrap();
|
||||
inner
|
||||
.optimize(lancedb::table::OptimizeAction::Index(
|
||||
OptimizeOptions::default(),
|
||||
))
|
||||
.optimize(lancedb::table::OptimizeAction::Index(match retrain {
|
||||
Some(true) => OptimizeOptions::retrain(),
|
||||
_ => OptimizeOptions::default(),
|
||||
}))
|
||||
.await
|
||||
.infer_error()?;
|
||||
Ok(OptimizeStats {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "lancedb"
|
||||
version = "0.22.1-beta.3"
|
||||
version = "0.22.0"
|
||||
edition.workspace = true
|
||||
description = "LanceDB: A serverless, low-latency vector database for AI applications"
|
||||
license.workspace = true
|
||||
@@ -86,11 +86,11 @@ rand = { version = "0.9", features = ["small_rng"] }
|
||||
random_word = { version = "0.4.3", features = ["en"] }
|
||||
uuid = { version = "1.7.0", features = ["v4"] }
|
||||
walkdir = "2"
|
||||
aws-sdk-dynamodb = { version = "1.55.0" }
|
||||
aws-sdk-s3 = { version = "1.55.0" }
|
||||
aws-sdk-kms = { version = "1.48.0" }
|
||||
aws-config = { version = "1.5.10" }
|
||||
aws-smithy-runtime = { version = "1.9.1" }
|
||||
aws-sdk-dynamodb = { version = "1.38.0" }
|
||||
aws-sdk-s3 = { version = "1.38.0" }
|
||||
aws-sdk-kms = { version = "1.37" }
|
||||
aws-config = { version = "1.0" }
|
||||
aws-smithy-runtime = { version = "1.3" }
|
||||
datafusion.workspace = true
|
||||
http-body = "1" # Matching reqwest
|
||||
rstest = "0.23.0"
|
||||
|
||||
86
rust/lancedb/src/catalog.rs
Normal file
86
rust/lancedb/src/catalog.rs
Normal file
@@ -0,0 +1,86 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
//! Catalog implementation for managing databases
|
||||
|
||||
pub mod listing;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::database::Database;
|
||||
use crate::error::Result;
|
||||
use async_trait::async_trait;
|
||||
|
||||
pub trait CatalogOptions {
|
||||
fn serialize_into_map(&self, map: &mut HashMap<String, String>);
|
||||
}
|
||||
|
||||
/// Request parameters for listing databases
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct DatabaseNamesRequest {
|
||||
/// Start listing after this name (exclusive)
|
||||
pub start_after: Option<String>,
|
||||
/// Maximum number of names to return
|
||||
pub limit: Option<u32>,
|
||||
}
|
||||
|
||||
/// Request to open an existing database
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct OpenDatabaseRequest {
|
||||
/// The name of the database to open
|
||||
pub name: String,
|
||||
/// A map of database-specific options
|
||||
///
|
||||
/// Consult the catalog / database implementation to determine which options are available
|
||||
pub database_options: HashMap<String, String>,
|
||||
}
|
||||
|
||||
/// Database creation mode
|
||||
///
|
||||
/// The default behavior is Create
|
||||
pub enum CreateDatabaseMode {
|
||||
/// Create new database, error if exists
|
||||
Create,
|
||||
/// Open existing database if present
|
||||
ExistOk,
|
||||
/// Overwrite existing database
|
||||
Overwrite,
|
||||
}
|
||||
|
||||
impl Default for CreateDatabaseMode {
|
||||
fn default() -> Self {
|
||||
Self::Create
|
||||
}
|
||||
}
|
||||
|
||||
/// Request to create a new database
|
||||
pub struct CreateDatabaseRequest {
|
||||
/// The name of the database to create
|
||||
pub name: String,
|
||||
/// The creation mode
|
||||
pub mode: CreateDatabaseMode,
|
||||
/// A map of catalog-specific options, consult your catalog implementation to determine what's available
|
||||
pub options: HashMap<String, String>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait Catalog: Send + Sync + std::fmt::Debug + 'static {
|
||||
/// List database names with pagination
|
||||
async fn database_names(&self, request: DatabaseNamesRequest) -> Result<Vec<String>>;
|
||||
|
||||
/// Create a new database
|
||||
async fn create_database(&self, request: CreateDatabaseRequest) -> Result<Arc<dyn Database>>;
|
||||
|
||||
/// Open existing database
|
||||
async fn open_database(&self, request: OpenDatabaseRequest) -> Result<Arc<dyn Database>>;
|
||||
|
||||
/// Rename database
|
||||
async fn rename_database(&self, old_name: &str, new_name: &str) -> Result<()>;
|
||||
|
||||
/// Delete database
|
||||
async fn drop_database(&self, name: &str) -> Result<()>;
|
||||
|
||||
/// Delete all databases
|
||||
async fn drop_all_databases(&self) -> Result<()>;
|
||||
}
|
||||
624
rust/lancedb/src/catalog/listing.rs
Normal file
624
rust/lancedb/src/catalog/listing.rs
Normal file
@@ -0,0 +1,624 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// SPDX-FileCopyrightText: Copyright The LanceDB Authors
|
||||
|
||||
//! Catalog implementation based on a local file system.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::fs::create_dir_all;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
use super::{
|
||||
Catalog, CatalogOptions, CreateDatabaseMode, CreateDatabaseRequest, DatabaseNamesRequest,
|
||||
OpenDatabaseRequest,
|
||||
};
|
||||
use crate::connection::ConnectRequest;
|
||||
use crate::database::listing::{ListingDatabase, ListingDatabaseOptions};
|
||||
use crate::database::{Database, DatabaseOptions};
|
||||
use crate::error::{CreateDirSnafu, Error, Result};
|
||||
use async_trait::async_trait;
|
||||
use lance::io::{ObjectStore, ObjectStoreParams, ObjectStoreRegistry};
|
||||
use lance_io::local::to_local_path;
|
||||
use object_store::path::Path as ObjectStorePath;
|
||||
use snafu::ResultExt;
|
||||
|
||||
/// Options for the listing catalog
|
||||
///
|
||||
/// Note: the catalog will use the `storage_options` configured on
|
||||
/// db_options to configure storage for listing / creating / deleting
|
||||
/// databases.
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct ListingCatalogOptions {
|
||||
/// The options to use for databases opened by this catalog
|
||||
///
|
||||
/// This also contains the storage options used by the catalog
|
||||
pub db_options: ListingDatabaseOptions,
|
||||
}
|
||||
|
||||
impl CatalogOptions for ListingCatalogOptions {
|
||||
fn serialize_into_map(&self, map: &mut HashMap<String, String>) {
|
||||
self.db_options.serialize_into_map(map);
|
||||
}
|
||||
}
|
||||
|
||||
impl ListingCatalogOptions {
|
||||
pub fn builder() -> ListingCatalogOptionsBuilder {
|
||||
ListingCatalogOptionsBuilder::new()
|
||||
}
|
||||
|
||||
pub(crate) fn parse_from_map(map: &HashMap<String, String>) -> Result<Self> {
|
||||
let db_options = ListingDatabaseOptions::parse_from_map(map)?;
|
||||
Ok(Self { db_options })
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct ListingCatalogOptionsBuilder {
|
||||
options: ListingCatalogOptions,
|
||||
}
|
||||
|
||||
impl ListingCatalogOptionsBuilder {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
options: ListingCatalogOptions::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn db_options(mut self, db_options: ListingDatabaseOptions) -> Self {
|
||||
self.options.db_options = db_options;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn build(self) -> ListingCatalogOptions {
|
||||
self.options
|
||||
}
|
||||
}
|
||||
|
||||
/// A catalog implementation that works by listing subfolders in a directory
|
||||
///
|
||||
/// The listing catalog will be created with a base folder specified by the URI. Every subfolder
|
||||
/// in this base folder will be considered a database. These will be opened as a
|
||||
/// [`crate::database::listing::ListingDatabase`]
|
||||
#[derive(Debug)]
|
||||
pub struct ListingCatalog {
|
||||
object_store: Arc<ObjectStore>,
|
||||
|
||||
uri: String,
|
||||
|
||||
base_path: ObjectStorePath,
|
||||
|
||||
options: ListingCatalogOptions,
|
||||
}
|
||||
|
||||
impl ListingCatalog {
|
||||
/// Try to create a local directory to store the lancedb dataset
|
||||
pub fn try_create_dir(path: &str) -> core::result::Result<(), std::io::Error> {
|
||||
let path = Path::new(path);
|
||||
if !path.try_exists()? {
|
||||
create_dir_all(path)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn uri(&self) -> &str {
|
||||
&self.uri
|
||||
}
|
||||
|
||||
async fn open_path(path: &str) -> Result<Self> {
|
||||
let (object_store, base_path) = ObjectStore::from_uri(path).await?;
|
||||
if object_store.is_local() {
|
||||
Self::try_create_dir(path).context(CreateDirSnafu { path })?;
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
uri: path.to_string(),
|
||||
base_path,
|
||||
object_store,
|
||||
options: ListingCatalogOptions::default(),
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn connect(request: &ConnectRequest) -> Result<Self> {
|
||||
let uri = &request.uri;
|
||||
let parse_res = url::Url::parse(uri);
|
||||
|
||||
let options = ListingCatalogOptions::parse_from_map(&request.options)?;
|
||||
|
||||
match parse_res {
|
||||
Ok(url) if url.scheme().len() == 1 && cfg!(windows) => Self::open_path(uri).await,
|
||||
Ok(url) => {
|
||||
let plain_uri = url.to_string();
|
||||
|
||||
let registry = Arc::new(ObjectStoreRegistry::default());
|
||||
let storage_options = options.db_options.storage_options.clone();
|
||||
let os_params = ObjectStoreParams {
|
||||
storage_options: Some(storage_options.clone()),
|
||||
..Default::default()
|
||||
};
|
||||
let (object_store, base_path) =
|
||||
ObjectStore::from_uri_and_params(registry, &plain_uri, &os_params).await?;
|
||||
if object_store.is_local() {
|
||||
Self::try_create_dir(&plain_uri).context(CreateDirSnafu { path: plain_uri })?;
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
uri: String::from(url.clone()),
|
||||
base_path,
|
||||
object_store,
|
||||
options,
|
||||
})
|
||||
}
|
||||
Err(_) => Self::open_path(uri).await,
|
||||
}
|
||||
}
|
||||
|
||||
fn database_path(&self, name: &str) -> ObjectStorePath {
|
||||
self.base_path.child(name.replace('\\', "/"))
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Catalog for ListingCatalog {
|
||||
async fn database_names(&self, request: DatabaseNamesRequest) -> Result<Vec<String>> {
|
||||
let mut f = self
|
||||
.object_store
|
||||
.read_dir(self.base_path.clone())
|
||||
.await?
|
||||
.iter()
|
||||
.map(Path::new)
|
||||
.filter_map(|p| p.file_name().and_then(|s| s.to_str().map(String::from)))
|
||||
.collect::<Vec<String>>();
|
||||
f.sort();
|
||||
|
||||
if let Some(start_after) = request.start_after {
|
||||
let index = f
|
||||
.iter()
|
||||
.position(|name| name.as_str() > start_after.as_str())
|
||||
.unwrap_or(f.len());
|
||||
f.drain(0..index);
|
||||
}
|
||||
if let Some(limit) = request.limit {
|
||||
f.truncate(limit as usize);
|
||||
}
|
||||
Ok(f)
|
||||
}
|
||||
|
||||
async fn create_database(&self, request: CreateDatabaseRequest) -> Result<Arc<dyn Database>> {
|
||||
let db_path = self.database_path(&request.name);
|
||||
let db_path_str = to_local_path(&db_path);
|
||||
let exists = Path::new(&db_path_str).exists();
|
||||
|
||||
match request.mode {
|
||||
CreateDatabaseMode::Create if exists => {
|
||||
return Err(Error::DatabaseAlreadyExists { name: request.name })
|
||||
}
|
||||
CreateDatabaseMode::Create => {
|
||||
create_dir_all(db_path.to_string()).unwrap();
|
||||
}
|
||||
CreateDatabaseMode::ExistOk => {
|
||||
if !exists {
|
||||
create_dir_all(db_path.to_string()).unwrap();
|
||||
}
|
||||
}
|
||||
CreateDatabaseMode::Overwrite => {
|
||||
if exists {
|
||||
self.drop_database(&request.name).await?;
|
||||
}
|
||||
create_dir_all(db_path.to_string()).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
let db_uri = format!("/{}/{}", self.base_path, request.name);
|
||||
|
||||
let mut connect_request = ConnectRequest {
|
||||
uri: db_uri,
|
||||
#[cfg(feature = "remote")]
|
||||
client_config: Default::default(),
|
||||
read_consistency_interval: None,
|
||||
options: Default::default(),
|
||||
session: None,
|
||||
};
|
||||
|
||||
// Add the db options to the connect request
|
||||
self.options
|
||||
.db_options
|
||||
.serialize_into_map(&mut connect_request.options);
|
||||
|
||||
Ok(Arc::new(
|
||||
ListingDatabase::connect_with_options(&connect_request).await?,
|
||||
))
|
||||
}
|
||||
|
||||
async fn open_database(&self, request: OpenDatabaseRequest) -> Result<Arc<dyn Database>> {
|
||||
let db_path = self.database_path(&request.name);
|
||||
|
||||
let db_path_str = to_local_path(&db_path);
|
||||
let exists = Path::new(&db_path_str).exists();
|
||||
if !exists {
|
||||
return Err(Error::DatabaseNotFound { name: request.name });
|
||||
}
|
||||
|
||||
let mut connect_request = ConnectRequest {
|
||||
uri: db_path.to_string(),
|
||||
#[cfg(feature = "remote")]
|
||||
client_config: Default::default(),
|
||||
read_consistency_interval: None,
|
||||
options: Default::default(),
|
||||
session: None,
|
||||
};
|
||||
|
||||
// Add the db options to the connect request
|
||||
self.options
|
||||
.db_options
|
||||
.serialize_into_map(&mut connect_request.options);
|
||||
|
||||
Ok(Arc::new(
|
||||
ListingDatabase::connect_with_options(&connect_request).await?,
|
||||
))
|
||||
}
|
||||
|
||||
async fn rename_database(&self, _old_name: &str, _new_name: &str) -> Result<()> {
|
||||
Err(Error::NotSupported {
|
||||
message: "rename_database is not supported in LanceDB OSS yet".to_string(),
|
||||
})
|
||||
}
|
||||
|
||||
async fn drop_database(&self, name: &str) -> Result<()> {
|
||||
let db_path = self.database_path(name);
|
||||
self.object_store
|
||||
.remove_dir_all(db_path.clone())
|
||||
.await
|
||||
.map_err(|err| match err {
|
||||
lance::Error::NotFound { .. } => Error::DatabaseNotFound {
|
||||
name: name.to_owned(),
|
||||
},
|
||||
_ => Error::from(err),
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn drop_all_databases(&self) -> Result<()> {
|
||||
self.object_store
|
||||
.remove_dir_all(self.base_path.clone())
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(test, not(windows)))]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
/// file:/// URIs with drive letters do not work correctly on Windows
|
||||
#[cfg(windows)]
|
||||
fn path_to_uri(path: PathBuf) -> String {
|
||||
path.to_str().unwrap().to_string()
|
||||
}
|
||||
|
||||
#[cfg(not(windows))]
|
||||
fn path_to_uri(path: PathBuf) -> String {
|
||||
Url::from_file_path(path).unwrap().to_string()
|
||||
}
|
||||
|
||||
async fn setup_catalog() -> (TempDir, ListingCatalog) {
|
||||
let tempdir = tempfile::tempdir().unwrap();
|
||||
let catalog_path = tempdir.path().join("catalog");
|
||||
std::fs::create_dir_all(&catalog_path).unwrap();
|
||||
|
||||
let uri = path_to_uri(catalog_path);
|
||||
|
||||
let request = ConnectRequest {
|
||||
uri: uri.clone(),
|
||||
#[cfg(feature = "remote")]
|
||||
client_config: Default::default(),
|
||||
options: Default::default(),
|
||||
read_consistency_interval: None,
|
||||
session: None,
|
||||
};
|
||||
|
||||
let catalog = ListingCatalog::connect(&request).await.unwrap();
|
||||
|
||||
(tempdir, catalog)
|
||||
}
|
||||
|
||||
use crate::database::{CreateTableData, CreateTableRequest, TableNamesRequest};
|
||||
use crate::table::TableDefinition;
|
||||
use arrow_schema::Field;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use tempfile::{tempdir, TempDir};
|
||||
use url::Url;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_database_names() {
|
||||
let (_tempdir, catalog) = setup_catalog().await;
|
||||
|
||||
let names = catalog
|
||||
.database_names(DatabaseNamesRequest::default())
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(names.is_empty());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_create_database() {
|
||||
let (_tempdir, catalog) = setup_catalog().await;
|
||||
|
||||
catalog
|
||||
.create_database(CreateDatabaseRequest {
|
||||
name: "db1".into(),
|
||||
mode: CreateDatabaseMode::Create,
|
||||
options: HashMap::new(),
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let names = catalog
|
||||
.database_names(DatabaseNamesRequest::default())
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(names, vec!["db1"]);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_create_database_exist_ok() {
|
||||
let (_tempdir, catalog) = setup_catalog().await;
|
||||
|
||||
let db1 = catalog
|
||||
.create_database(CreateDatabaseRequest {
|
||||
name: "db_exist_ok".into(),
|
||||
mode: CreateDatabaseMode::ExistOk,
|
||||
options: HashMap::new(),
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
let dummy_schema = Arc::new(arrow_schema::Schema::new(Vec::<Field>::default()));
|
||||
db1.create_table(CreateTableRequest {
|
||||
name: "test_table".parse().unwrap(),
|
||||
data: CreateTableData::Empty(TableDefinition::new_from_schema(dummy_schema)),
|
||||
mode: Default::default(),
|
||||
write_options: Default::default(),
|
||||
namespace: vec![],
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let db2 = catalog
|
||||
.create_database(CreateDatabaseRequest {
|
||||
name: "db_exist_ok".into(),
|
||||
mode: CreateDatabaseMode::ExistOk,
|
||||
options: HashMap::new(),
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let tables = db2.table_names(TableNamesRequest::default()).await.unwrap();
|
||||
assert_eq!(tables, vec!["test_table".to_string()]);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_create_database_overwrite() {
|
||||
let (_tempdir, catalog) = setup_catalog().await;
|
||||
|
||||
let db = catalog
|
||||
.create_database(CreateDatabaseRequest {
|
||||
name: "db_overwrite".into(),
|
||||
mode: CreateDatabaseMode::Create,
|
||||
options: HashMap::new(),
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
let dummy_schema = Arc::new(arrow_schema::Schema::new(Vec::<Field>::default()));
|
||||
db.create_table(CreateTableRequest {
|
||||
name: "old_table".parse().unwrap(),
|
||||
data: CreateTableData::Empty(TableDefinition::new_from_schema(dummy_schema)),
|
||||
mode: Default::default(),
|
||||
write_options: Default::default(),
|
||||
namespace: vec![],
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
let tables = db.table_names(TableNamesRequest::default()).await.unwrap();
|
||||
assert!(!tables.is_empty());
|
||||
|
||||
let new_db = catalog
|
||||
.create_database(CreateDatabaseRequest {
|
||||
name: "db_overwrite".into(),
|
||||
mode: CreateDatabaseMode::Overwrite,
|
||||
options: HashMap::new(),
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let tables = new_db
|
||||
.table_names(TableNamesRequest::default())
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(tables.is_empty());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_create_database_overwrite_non_existing() {
|
||||
let (_tempdir, catalog) = setup_catalog().await;
|
||||
|
||||
catalog
|
||||
.create_database(CreateDatabaseRequest {
|
||||
name: "new_db".into(),
|
||||
mode: CreateDatabaseMode::Overwrite,
|
||||
options: HashMap::new(),
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let names = catalog
|
||||
.database_names(DatabaseNamesRequest::default())
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(names.contains(&"new_db".to_string()));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_open_database() {
|
||||
let (_tempdir, catalog) = setup_catalog().await;
|
||||
|
||||
// Test open non-existent
|
||||
let result = catalog
|
||||
.open_database(OpenDatabaseRequest {
|
||||
name: "missing".into(),
|
||||
database_options: HashMap::new(),
|
||||
})
|
||||
.await;
|
||||
assert!(matches!(
|
||||
result.unwrap_err(),
|
||||
Error::DatabaseNotFound { name } if name == "missing"
|
||||
));
|
||||
|
||||
// Create and open
|
||||
catalog
|
||||
.create_database(CreateDatabaseRequest {
|
||||
name: "valid_db".into(),
|
||||
mode: CreateDatabaseMode::Create,
|
||||
options: HashMap::new(),
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let db = catalog
|
||||
.open_database(OpenDatabaseRequest {
|
||||
name: "valid_db".into(),
|
||||
database_options: HashMap::new(),
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
db.table_names(TableNamesRequest::default()).await.unwrap(),
|
||||
Vec::<String>::new()
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_drop_database() {
|
||||
let (_tempdir, catalog) = setup_catalog().await;
|
||||
|
||||
// Create test database
|
||||
catalog
|
||||
.create_database(CreateDatabaseRequest {
|
||||
name: "to_drop".into(),
|
||||
mode: CreateDatabaseMode::Create,
|
||||
options: HashMap::new(),
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let names = catalog
|
||||
.database_names(DatabaseNamesRequest::default())
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(!names.is_empty());
|
||||
|
||||
// Drop database
|
||||
catalog.drop_database("to_drop").await.unwrap();
|
||||
|
||||
let names = catalog
|
||||
.database_names(DatabaseNamesRequest::default())
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(names.is_empty());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_drop_all_databases() {
|
||||
let (_tempdir, catalog) = setup_catalog().await;
|
||||
|
||||
catalog
|
||||
.create_database(CreateDatabaseRequest {
|
||||
name: "db1".into(),
|
||||
mode: CreateDatabaseMode::Create,
|
||||
options: HashMap::new(),
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
catalog
|
||||
.create_database(CreateDatabaseRequest {
|
||||
name: "db2".into(),
|
||||
mode: CreateDatabaseMode::Create,
|
||||
options: HashMap::new(),
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
catalog.drop_all_databases().await.unwrap();
|
||||
|
||||
let names = catalog
|
||||
.database_names(DatabaseNamesRequest::default())
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(names.is_empty());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_rename_database_unsupported() {
|
||||
let (_tempdir, catalog) = setup_catalog().await;
|
||||
let result = catalog.rename_database("old", "new").await;
|
||||
assert!(matches!(
|
||||
result.unwrap_err(),
|
||||
Error::NotSupported { message } if message.contains("rename_database")
|
||||
));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_connect_local_path() {
|
||||
let tmp_dir = tempdir().unwrap();
|
||||
let path = tmp_dir.path().to_str().unwrap();
|
||||
|
||||
let request = ConnectRequest {
|
||||
uri: path.to_string(),
|
||||
#[cfg(feature = "remote")]
|
||||
client_config: Default::default(),
|
||||
options: Default::default(),
|
||||
read_consistency_interval: None,
|
||||
session: None,
|
||||
};
|
||||
|
||||
let catalog = ListingCatalog::connect(&request).await.unwrap();
|
||||
assert!(catalog.object_store.is_local());
|
||||
assert_eq!(catalog.uri, path);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_connect_file_scheme() {
|
||||
let tmp_dir = tempdir().unwrap();
|
||||
let path = tmp_dir.path();
|
||||
let uri = path_to_uri(path.to_path_buf());
|
||||
|
||||
let request = ConnectRequest {
|
||||
uri: uri.clone(),
|
||||
#[cfg(feature = "remote")]
|
||||
client_config: Default::default(),
|
||||
options: Default::default(),
|
||||
read_consistency_interval: None,
|
||||
session: None,
|
||||
};
|
||||
|
||||
let catalog = ListingCatalog::connect(&request).await.unwrap();
|
||||
assert!(catalog.object_store.is_local());
|
||||
assert_eq!(catalog.uri, uri);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_connect_invalid_uri_fallback() {
|
||||
let invalid_uri = "invalid:///path";
|
||||
let request = ConnectRequest {
|
||||
uri: invalid_uri.to_string(),
|
||||
#[cfg(feature = "remote")]
|
||||
client_config: Default::default(),
|
||||
options: Default::default(),
|
||||
read_consistency_interval: None,
|
||||
session: None,
|
||||
};
|
||||
|
||||
let result = ListingCatalog::connect(&request).await;
|
||||
assert!(result.is_err());
|
||||
}
|
||||
}
|
||||
@@ -13,13 +13,15 @@ use lance::dataset::ReadParams;
|
||||
use object_store::aws::AwsCredential;
|
||||
|
||||
use crate::arrow::{IntoArrow, IntoArrowStream, SendableRecordBatchStream};
|
||||
use crate::catalog::listing::ListingCatalog;
|
||||
use crate::catalog::CatalogOptions;
|
||||
use crate::database::listing::{
|
||||
ListingDatabase, OPT_NEW_TABLE_STORAGE_VERSION, OPT_NEW_TABLE_V2_MANIFEST_PATHS,
|
||||
};
|
||||
use crate::database::{
|
||||
CloneTableRequest, CreateNamespaceRequest, CreateTableData, CreateTableMode,
|
||||
CreateTableRequest, Database, DatabaseOptions, DropNamespaceRequest, ListNamespacesRequest,
|
||||
OpenTableRequest, TableNamesRequest,
|
||||
CreateNamespaceRequest, CreateTableData, CreateTableMode, CreateTableRequest, Database,
|
||||
DatabaseOptions, DropNamespaceRequest, ListNamespacesRequest, OpenTableRequest,
|
||||
TableNamesRequest,
|
||||
};
|
||||
use crate::embeddings::{
|
||||
EmbeddingDefinition, EmbeddingFunction, EmbeddingRegistry, MemoryRegistry, WithEmbeddings,
|
||||
@@ -469,62 +471,6 @@ impl OpenTableBuilder {
|
||||
}
|
||||
}
|
||||
|
||||
/// Builder for cloning a table.
|
||||
///
|
||||
/// A shallow clone creates a new table that shares the underlying data files
|
||||
/// with the source table but has its own independent manifest. Both the source
|
||||
/// and cloned tables can evolve independently while initially sharing the same
|
||||
/// data, deletion, and index files.
|
||||
///
|
||||
/// Use this builder to configure the clone operation before executing it.
|
||||
pub struct CloneTableBuilder {
|
||||
parent: Arc<dyn Database>,
|
||||
request: CloneTableRequest,
|
||||
}
|
||||
|
||||
impl CloneTableBuilder {
|
||||
fn new(parent: Arc<dyn Database>, target_table_name: String, source_uri: String) -> Self {
|
||||
Self {
|
||||
parent,
|
||||
request: CloneTableRequest::new(target_table_name, source_uri),
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the source version to clone from
|
||||
pub fn source_version(mut self, version: u64) -> Self {
|
||||
self.request.source_version = Some(version);
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the source tag to clone from
|
||||
pub fn source_tag(mut self, tag: impl Into<String>) -> Self {
|
||||
self.request.source_tag = Some(tag.into());
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the target namespace for the cloned table
|
||||
pub fn target_namespace(mut self, namespace: Vec<String>) -> Self {
|
||||
self.request.target_namespace = namespace;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set whether to perform a shallow clone (default: true)
|
||||
///
|
||||
/// When true, the cloned table shares data files with the source table.
|
||||
/// When false, performs a deep clone (not yet implemented).
|
||||
pub fn is_shallow(mut self, is_shallow: bool) -> Self {
|
||||
self.request.is_shallow = is_shallow;
|
||||
self
|
||||
}
|
||||
|
||||
/// Execute the clone operation
|
||||
pub async fn execute(self) -> Result<Table> {
|
||||
Ok(Table::new(
|
||||
self.parent.clone().clone_table(self.request).await?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// A connection to LanceDB
|
||||
#[derive(Clone)]
|
||||
pub struct Connection {
|
||||
@@ -631,30 +577,6 @@ impl Connection {
|
||||
)
|
||||
}
|
||||
|
||||
/// Clone a table in the database
|
||||
///
|
||||
/// Creates a new table by cloning from an existing source table.
|
||||
/// By default, this performs a shallow clone where the new table shares
|
||||
/// the underlying data files with the source table.
|
||||
///
|
||||
/// # Parameters
|
||||
/// - `target_table_name`: The name of the new table to create
|
||||
/// - `source_uri`: The URI of the source table to clone from
|
||||
///
|
||||
/// # Returns
|
||||
/// A [`CloneTableBuilder`] that can be used to configure the clone operation
|
||||
pub fn clone_table(
|
||||
&self,
|
||||
target_table_name: impl Into<String>,
|
||||
source_uri: impl Into<String>,
|
||||
) -> CloneTableBuilder {
|
||||
CloneTableBuilder::new(
|
||||
self.internal.clone(),
|
||||
target_table_name.into(),
|
||||
source_uri.into(),
|
||||
)
|
||||
}
|
||||
|
||||
/// Rename a table in the database.
|
||||
///
|
||||
/// This is only supported in LanceDB Cloud.
|
||||
@@ -738,7 +660,7 @@ pub struct ConnectRequest {
|
||||
#[cfg(feature = "remote")]
|
||||
pub client_config: ClientConfig,
|
||||
|
||||
/// Database specific options
|
||||
/// Database/Catalog specific options
|
||||
pub options: HashMap<String, String>,
|
||||
|
||||
/// The interval at which to check for updates from other processes.
|
||||
@@ -1015,6 +937,50 @@ pub fn connect(uri: &str) -> ConnectBuilder {
|
||||
ConnectBuilder::new(uri)
|
||||
}
|
||||
|
||||
/// A builder for configuring a connection to a LanceDB catalog
|
||||
#[derive(Debug)]
|
||||
pub struct CatalogConnectBuilder {
|
||||
request: ConnectRequest,
|
||||
}
|
||||
|
||||
impl CatalogConnectBuilder {
|
||||
/// Create a new [`CatalogConnectBuilder`] with the given catalog URI.
|
||||
pub fn new(uri: &str) -> Self {
|
||||
Self {
|
||||
request: ConnectRequest {
|
||||
uri: uri.to_string(),
|
||||
#[cfg(feature = "remote")]
|
||||
client_config: Default::default(),
|
||||
read_consistency_interval: None,
|
||||
options: HashMap::new(),
|
||||
session: None,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn catalog_options(mut self, catalog_options: &dyn CatalogOptions) -> Self {
|
||||
catalog_options.serialize_into_map(&mut self.request.options);
|
||||
self
|
||||
}
|
||||
|
||||
/// Establishes a connection to the catalog
|
||||
pub async fn execute(self) -> Result<Arc<ListingCatalog>> {
|
||||
let catalog = ListingCatalog::connect(&self.request).await?;
|
||||
Ok(Arc::new(catalog))
|
||||
}
|
||||
}
|
||||
|
||||
/// Connect to a LanceDB catalog.
|
||||
///
|
||||
/// A catalog is a container for databases, which in turn are containers for tables.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `uri` - URI where the catalog is located, can be a local directory or supported remote cloud storage.
|
||||
pub fn connect_catalog(uri: &str) -> CatalogConnectBuilder {
|
||||
CatalogConnectBuilder::new(uri)
|
||||
}
|
||||
|
||||
#[cfg(all(test, feature = "remote"))]
|
||||
mod test_utils {
|
||||
use super::*;
|
||||
@@ -1032,23 +998,6 @@ mod test_utils {
|
||||
embedding_registry: Arc::new(MemoryRegistry::new()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_with_handler_and_config<T>(
|
||||
handler: impl Fn(reqwest::Request) -> http::Response<T> + Clone + Send + Sync + 'static,
|
||||
config: crate::remote::ClientConfig,
|
||||
) -> Self
|
||||
where
|
||||
T: Into<reqwest::Body>,
|
||||
{
|
||||
let internal = Arc::new(crate::remote::db::RemoteDatabase::new_mock_with_config(
|
||||
handler, config,
|
||||
));
|
||||
Self {
|
||||
internal,
|
||||
uri: "db://test".to_string(),
|
||||
embedding_registry: Arc::new(MemoryRegistry::new()),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1056,6 +1005,7 @@ mod test_utils {
|
||||
mod tests {
|
||||
use std::fs::create_dir_all;
|
||||
|
||||
use crate::catalog::{Catalog, DatabaseNamesRequest, OpenDatabaseRequest};
|
||||
use crate::database::listing::{ListingDatabaseOptions, NewTableConfig};
|
||||
use crate::query::QueryBase;
|
||||
use crate::query::{ExecutableQuery, QueryExecutionOptions};
|
||||
@@ -1363,48 +1313,89 @@ mod tests {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_clone_table() {
|
||||
async fn test_connect_catalog() {
|
||||
let tmp_dir = tempdir().unwrap();
|
||||
let uri = tmp_dir.path().to_str().unwrap();
|
||||
let db = connect(uri).execute().await.unwrap();
|
||||
let catalog = connect_catalog(uri).execute().await.unwrap();
|
||||
|
||||
// Create a source table with some data
|
||||
let mut batch_gen = BatchGenerator::new()
|
||||
.col(Box::new(IncrementingInt32::new().named("id")))
|
||||
.col(Box::new(IncrementingInt32::new().named("value")));
|
||||
let reader = batch_gen.batches(5, 100);
|
||||
// Verify that we can get the uri from the catalog
|
||||
let catalog_uri = catalog.uri();
|
||||
assert_eq!(catalog_uri, uri);
|
||||
|
||||
let source_table = db
|
||||
.create_table("source_table", reader)
|
||||
.execute()
|
||||
// Check that the catalog is initially empty
|
||||
let dbs = catalog
|
||||
.database_names(DatabaseNamesRequest::default())
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(dbs.len(), 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[cfg(not(windows))]
|
||||
async fn test_catalog_create_database() {
|
||||
let tmp_dir = tempdir().unwrap();
|
||||
let uri = tmp_dir.path().to_str().unwrap();
|
||||
let catalog = connect_catalog(uri).execute().await.unwrap();
|
||||
|
||||
let db_name = "test_db";
|
||||
catalog
|
||||
.create_database(crate::catalog::CreateDatabaseRequest {
|
||||
name: db_name.to_string(),
|
||||
mode: Default::default(),
|
||||
options: Default::default(),
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Get the source table URI
|
||||
let source_table_path = tmp_dir.path().join("source_table.lance");
|
||||
let source_uri = source_table_path.to_str().unwrap();
|
||||
let dbs = catalog
|
||||
.database_names(DatabaseNamesRequest::default())
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(dbs.len(), 1);
|
||||
assert_eq!(dbs[0], db_name);
|
||||
|
||||
// Clone the table
|
||||
let cloned_table = db
|
||||
.clone_table("cloned_table", source_uri)
|
||||
.execute()
|
||||
let db = catalog
|
||||
.open_database(OpenDatabaseRequest {
|
||||
name: db_name.to_string(),
|
||||
database_options: HashMap::new(),
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Verify the cloned table exists
|
||||
let table_names = db.table_names().execute().await.unwrap();
|
||||
assert!(table_names.contains(&"source_table".to_string()));
|
||||
assert!(table_names.contains(&"cloned_table".to_string()));
|
||||
let tables = db.table_names(Default::default()).await.unwrap();
|
||||
assert_eq!(tables.len(), 0);
|
||||
}
|
||||
|
||||
// Verify the cloned table has the same schema
|
||||
assert_eq!(
|
||||
source_table.schema().await.unwrap(),
|
||||
cloned_table.schema().await.unwrap()
|
||||
);
|
||||
#[tokio::test]
|
||||
#[cfg(not(windows))]
|
||||
async fn test_catalog_drop_database() {
|
||||
let tmp_dir = tempdir().unwrap();
|
||||
let uri = tmp_dir.path().to_str().unwrap();
|
||||
let catalog = connect_catalog(uri).execute().await.unwrap();
|
||||
|
||||
// Verify the cloned table has the same data
|
||||
let source_count = source_table.count_rows(None).await.unwrap();
|
||||
let cloned_count = cloned_table.count_rows(None).await.unwrap();
|
||||
assert_eq!(source_count, cloned_count);
|
||||
// Create and then drop a database
|
||||
let db_name = "test_db_to_drop";
|
||||
catalog
|
||||
.create_database(crate::catalog::CreateDatabaseRequest {
|
||||
name: db_name.to_string(),
|
||||
mode: Default::default(),
|
||||
options: Default::default(),
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let dbs = catalog
|
||||
.database_names(DatabaseNamesRequest::default())
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(dbs.len(), 1);
|
||||
|
||||
catalog.drop_database(db_name).await.unwrap();
|
||||
|
||||
let dbs_after = catalog
|
||||
.database_names(DatabaseNamesRequest::default())
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(dbs_after.len(), 0);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -176,42 +176,6 @@ impl CreateTableRequest {
|
||||
}
|
||||
}
|
||||
|
||||
/// Request to clone a table from a source table.
|
||||
///
|
||||
/// A shallow clone creates a new table that shares the underlying data files
|
||||
/// with the source table but has its own independent manifest. This allows
|
||||
/// both the source and cloned tables to evolve independently while initially
|
||||
/// sharing the same data, deletion, and index files.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct CloneTableRequest {
|
||||
/// The name of the target table to create
|
||||
pub target_table_name: String,
|
||||
/// The namespace for the target table. Empty list represents root namespace.
|
||||
pub target_namespace: Vec<String>,
|
||||
/// The URI of the source table to clone from.
|
||||
pub source_uri: String,
|
||||
/// Optional version of the source table to clone.
|
||||
pub source_version: Option<u64>,
|
||||
/// Optional tag of the source table to clone.
|
||||
pub source_tag: Option<String>,
|
||||
/// Whether to perform a shallow clone (true) or deep clone (false). Defaults to true.
|
||||
/// Currently only shallow clone is supported.
|
||||
pub is_shallow: bool,
|
||||
}
|
||||
|
||||
impl CloneTableRequest {
|
||||
pub fn new(target_table_name: String, source_uri: String) -> Self {
|
||||
Self {
|
||||
target_table_name,
|
||||
target_namespace: vec![],
|
||||
source_uri,
|
||||
source_version: None,
|
||||
source_tag: None,
|
||||
is_shallow: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The `Database` trait defines the interface for database implementations.
|
||||
///
|
||||
/// A database is responsible for managing tables and their metadata.
|
||||
@@ -229,13 +193,6 @@ pub trait Database:
|
||||
async fn table_names(&self, request: TableNamesRequest) -> Result<Vec<String>>;
|
||||
/// Create a table in the database
|
||||
async fn create_table(&self, request: CreateTableRequest) -> Result<Arc<dyn BaseTable>>;
|
||||
/// Clone a table in the database.
|
||||
///
|
||||
/// Creates a shallow clone of the source table, sharing underlying data files
|
||||
/// but with an independent manifest. Both tables can evolve separately after cloning.
|
||||
///
|
||||
/// See [`CloneTableRequest`] for detailed documentation and examples.
|
||||
async fn clone_table(&self, request: CloneTableRequest) -> Result<Arc<dyn BaseTable>>;
|
||||
/// Open a table in the database
|
||||
async fn open_table(&self, request: OpenTableRequest) -> Result<Arc<dyn BaseTable>>;
|
||||
/// Rename a table in the database
|
||||
|
||||
@@ -7,8 +7,7 @@ use std::fs::create_dir_all;
|
||||
use std::path::Path;
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
|
||||
use lance::dataset::refs::Ref;
|
||||
use lance::dataset::{builder::DatasetBuilder, ReadParams, WriteMode};
|
||||
use lance::dataset::{ReadParams, WriteMode};
|
||||
use lance::io::{ObjectStore, ObjectStoreParams, WrappingObjectStore};
|
||||
use lance_datafusion::utils::StreamingWriteSource;
|
||||
use lance_encoding::version::LanceFileVersion;
|
||||
@@ -23,8 +22,8 @@ use crate::table::NativeTable;
|
||||
use crate::utils::validate_table_name;
|
||||
|
||||
use super::{
|
||||
BaseTable, CloneTableRequest, CreateNamespaceRequest, CreateTableMode, CreateTableRequest,
|
||||
Database, DatabaseOptions, DropNamespaceRequest, ListNamespacesRequest, OpenTableRequest,
|
||||
BaseTable, CreateNamespaceRequest, CreateTableMode, CreateTableRequest, Database,
|
||||
DatabaseOptions, DropNamespaceRequest, ListNamespacesRequest, OpenTableRequest,
|
||||
TableNamesRequest,
|
||||
};
|
||||
|
||||
@@ -588,13 +587,7 @@ impl ListingDatabase {
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Database for ListingDatabase {
|
||||
async fn list_namespaces(&self, request: ListNamespacesRequest) -> Result<Vec<String>> {
|
||||
if !request.namespace.is_empty() {
|
||||
return Err(Error::NotSupported {
|
||||
message: "Namespace operations are not supported for listing database".into(),
|
||||
});
|
||||
}
|
||||
|
||||
async fn list_namespaces(&self, _request: ListNamespacesRequest) -> Result<Vec<String>> {
|
||||
Ok(Vec::new())
|
||||
}
|
||||
|
||||
@@ -685,65 +678,6 @@ impl Database for ListingDatabase {
|
||||
}
|
||||
}
|
||||
|
||||
async fn clone_table(&self, request: CloneTableRequest) -> Result<Arc<dyn BaseTable>> {
|
||||
if !request.target_namespace.is_empty() {
|
||||
return Err(Error::NotSupported {
|
||||
message: "Namespace parameter is not supported for listing database. Only root namespace is supported.".into(),
|
||||
});
|
||||
}
|
||||
|
||||
// TODO: support deep clone
|
||||
if !request.is_shallow {
|
||||
return Err(Error::NotSupported {
|
||||
message: "Deep clone is not yet implemented".to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
validate_table_name(&request.target_table_name)?;
|
||||
|
||||
let storage_params = ObjectStoreParams {
|
||||
storage_options: Some(self.storage_options.clone()),
|
||||
..Default::default()
|
||||
};
|
||||
let read_params = ReadParams {
|
||||
store_options: Some(storage_params.clone()),
|
||||
session: Some(self.session.clone()),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let mut source_dataset = DatasetBuilder::from_uri(&request.source_uri)
|
||||
.with_read_params(read_params.clone())
|
||||
.load()
|
||||
.await
|
||||
.map_err(|e| Error::Lance { source: e })?;
|
||||
|
||||
let version_ref = match (request.source_version, request.source_tag) {
|
||||
(Some(v), None) => Ok(Ref::Version(v)),
|
||||
(None, Some(tag)) => Ok(Ref::Tag(tag)),
|
||||
(None, None) => Ok(Ref::Version(source_dataset.version().version)),
|
||||
_ => Err(Error::InvalidInput {
|
||||
message: "Cannot specify both source_version and source_tag".to_string(),
|
||||
}),
|
||||
}?;
|
||||
|
||||
let target_uri = self.table_uri(&request.target_table_name)?;
|
||||
source_dataset
|
||||
.shallow_clone(&target_uri, version_ref, storage_params)
|
||||
.await
|
||||
.map_err(|e| Error::Lance { source: e })?;
|
||||
|
||||
let cloned_table = NativeTable::open_with_params(
|
||||
&target_uri,
|
||||
&request.target_table_name,
|
||||
self.store_wrapper.clone(),
|
||||
None,
|
||||
self.read_consistency_interval,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(Arc::new(cloned_table))
|
||||
}
|
||||
|
||||
async fn open_table(&self, mut request: OpenTableRequest) -> Result<Arc<dyn BaseTable>> {
|
||||
if !request.namespace.is_empty() {
|
||||
return Err(Error::NotSupported {
|
||||
@@ -845,694 +779,3 @@ impl Database for ListingDatabase {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::connection::ConnectRequest;
|
||||
use crate::database::{CreateTableData, CreateTableMode, CreateTableRequest};
|
||||
use crate::table::{Table, TableDefinition};
|
||||
use arrow_array::{Int32Array, RecordBatch, StringArray};
|
||||
use arrow_schema::{DataType, Field, Schema};
|
||||
use tempfile::tempdir;
|
||||
|
||||
async fn setup_database() -> (tempfile::TempDir, ListingDatabase) {
|
||||
let tempdir = tempdir().unwrap();
|
||||
let uri = tempdir.path().to_str().unwrap();
|
||||
|
||||
let request = ConnectRequest {
|
||||
uri: uri.to_string(),
|
||||
#[cfg(feature = "remote")]
|
||||
client_config: Default::default(),
|
||||
options: Default::default(),
|
||||
read_consistency_interval: None,
|
||||
session: None,
|
||||
};
|
||||
|
||||
let db = ListingDatabase::connect_with_options(&request)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
(tempdir, db)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_clone_table_basic() {
|
||||
let (_tempdir, db) = setup_database().await;
|
||||
|
||||
// Create a source table with schema
|
||||
let schema = Arc::new(Schema::new(vec![
|
||||
Field::new("id", DataType::Int32, false),
|
||||
Field::new("name", DataType::Utf8, false),
|
||||
]));
|
||||
|
||||
let source_table = db
|
||||
.create_table(CreateTableRequest {
|
||||
name: "source_table".to_string(),
|
||||
namespace: vec![],
|
||||
data: CreateTableData::Empty(TableDefinition::new_from_schema(schema.clone())),
|
||||
mode: CreateTableMode::Create,
|
||||
write_options: Default::default(),
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Get the source table URI
|
||||
let source_uri = db.table_uri("source_table").unwrap();
|
||||
|
||||
// Clone the table
|
||||
let cloned_table = db
|
||||
.clone_table(CloneTableRequest {
|
||||
target_table_name: "cloned_table".to_string(),
|
||||
target_namespace: vec![],
|
||||
source_uri: source_uri.clone(),
|
||||
source_version: None,
|
||||
source_tag: None,
|
||||
is_shallow: true,
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Verify both tables exist
|
||||
let table_names = db.table_names(TableNamesRequest::default()).await.unwrap();
|
||||
assert!(table_names.contains(&"source_table".to_string()));
|
||||
assert!(table_names.contains(&"cloned_table".to_string()));
|
||||
|
||||
// Verify schemas match
|
||||
assert_eq!(
|
||||
source_table.schema().await.unwrap(),
|
||||
cloned_table.schema().await.unwrap()
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_clone_table_with_data() {
|
||||
let (_tempdir, db) = setup_database().await;
|
||||
|
||||
// Create a source table with actual data
|
||||
let schema = Arc::new(Schema::new(vec![
|
||||
Field::new("id", DataType::Int32, false),
|
||||
Field::new("name", DataType::Utf8, false),
|
||||
]));
|
||||
|
||||
let batch = RecordBatch::try_new(
|
||||
schema.clone(),
|
||||
vec![
|
||||
Arc::new(Int32Array::from(vec![1, 2, 3])),
|
||||
Arc::new(StringArray::from(vec!["a", "b", "c"])),
|
||||
],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let reader = Box::new(arrow_array::RecordBatchIterator::new(
|
||||
vec![Ok(batch)],
|
||||
schema.clone(),
|
||||
));
|
||||
|
||||
let source_table = db
|
||||
.create_table(CreateTableRequest {
|
||||
name: "source_with_data".to_string(),
|
||||
namespace: vec![],
|
||||
data: CreateTableData::Data(reader),
|
||||
mode: CreateTableMode::Create,
|
||||
write_options: Default::default(),
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let source_uri = db.table_uri("source_with_data").unwrap();
|
||||
|
||||
// Clone the table
|
||||
let cloned_table = db
|
||||
.clone_table(CloneTableRequest {
|
||||
target_table_name: "cloned_with_data".to_string(),
|
||||
target_namespace: vec![],
|
||||
source_uri,
|
||||
source_version: None,
|
||||
source_tag: None,
|
||||
is_shallow: true,
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Verify data counts match
|
||||
let source_count = source_table.count_rows(None).await.unwrap();
|
||||
let cloned_count = cloned_table.count_rows(None).await.unwrap();
|
||||
assert_eq!(source_count, cloned_count);
|
||||
assert_eq!(source_count, 3);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_clone_table_with_storage_options() {
|
||||
let tempdir = tempdir().unwrap();
|
||||
let uri = tempdir.path().to_str().unwrap();
|
||||
|
||||
// Create database with storage options
|
||||
let mut options = HashMap::new();
|
||||
options.insert("test_option".to_string(), "test_value".to_string());
|
||||
|
||||
let request = ConnectRequest {
|
||||
uri: uri.to_string(),
|
||||
#[cfg(feature = "remote")]
|
||||
client_config: Default::default(),
|
||||
options: options.clone(),
|
||||
read_consistency_interval: None,
|
||||
session: None,
|
||||
};
|
||||
|
||||
let db = ListingDatabase::connect_with_options(&request)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Create source table
|
||||
let schema = Arc::new(Schema::new(vec![Field::new("id", DataType::Int32, false)]));
|
||||
|
||||
db.create_table(CreateTableRequest {
|
||||
name: "source".to_string(),
|
||||
namespace: vec![],
|
||||
data: CreateTableData::Empty(TableDefinition::new_from_schema(schema)),
|
||||
mode: CreateTableMode::Create,
|
||||
write_options: Default::default(),
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let source_uri = db.table_uri("source").unwrap();
|
||||
|
||||
// Clone should work with storage options
|
||||
let cloned = db
|
||||
.clone_table(CloneTableRequest {
|
||||
target_table_name: "cloned".to_string(),
|
||||
target_namespace: vec![],
|
||||
source_uri,
|
||||
source_version: None,
|
||||
source_tag: None,
|
||||
is_shallow: true,
|
||||
})
|
||||
.await;
|
||||
|
||||
assert!(cloned.is_ok());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_clone_table_deep_not_supported() {
|
||||
let (_tempdir, db) = setup_database().await;
|
||||
|
||||
// Create a source table
|
||||
let schema = Arc::new(Schema::new(vec![Field::new("id", DataType::Int32, false)]));
|
||||
|
||||
db.create_table(CreateTableRequest {
|
||||
name: "source".to_string(),
|
||||
namespace: vec![],
|
||||
data: CreateTableData::Empty(TableDefinition::new_from_schema(schema)),
|
||||
mode: CreateTableMode::Create,
|
||||
write_options: Default::default(),
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let source_uri = db.table_uri("source").unwrap();
|
||||
|
||||
// Try deep clone (should fail)
|
||||
let result = db
|
||||
.clone_table(CloneTableRequest {
|
||||
target_table_name: "cloned".to_string(),
|
||||
target_namespace: vec![],
|
||||
source_uri,
|
||||
source_version: None,
|
||||
source_tag: None,
|
||||
is_shallow: false, // Request deep clone
|
||||
})
|
||||
.await;
|
||||
|
||||
assert!(result.is_err());
|
||||
assert!(matches!(
|
||||
result.unwrap_err(),
|
||||
Error::NotSupported { message } if message.contains("Deep clone")
|
||||
));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_clone_table_with_namespace_not_supported() {
|
||||
let (_tempdir, db) = setup_database().await;
|
||||
|
||||
// Create a source table
|
||||
let schema = Arc::new(Schema::new(vec![Field::new("id", DataType::Int32, false)]));
|
||||
|
||||
db.create_table(CreateTableRequest {
|
||||
name: "source".to_string(),
|
||||
namespace: vec![],
|
||||
data: CreateTableData::Empty(TableDefinition::new_from_schema(schema)),
|
||||
mode: CreateTableMode::Create,
|
||||
write_options: Default::default(),
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let source_uri = db.table_uri("source").unwrap();
|
||||
|
||||
// Try clone with namespace (should fail for listing database)
|
||||
let result = db
|
||||
.clone_table(CloneTableRequest {
|
||||
target_table_name: "cloned".to_string(),
|
||||
target_namespace: vec!["namespace".to_string()], // Non-empty namespace
|
||||
source_uri,
|
||||
source_version: None,
|
||||
source_tag: None,
|
||||
is_shallow: true,
|
||||
})
|
||||
.await;
|
||||
|
||||
assert!(result.is_err());
|
||||
assert!(matches!(
|
||||
result.unwrap_err(),
|
||||
Error::NotSupported { message } if message.contains("Namespace parameter is not supported")
|
||||
));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_clone_table_invalid_target_name() {
|
||||
let (_tempdir, db) = setup_database().await;
|
||||
|
||||
// Create a source table
|
||||
let schema = Arc::new(Schema::new(vec![Field::new("id", DataType::Int32, false)]));
|
||||
|
||||
db.create_table(CreateTableRequest {
|
||||
name: "source".to_string(),
|
||||
namespace: vec![],
|
||||
data: CreateTableData::Empty(TableDefinition::new_from_schema(schema)),
|
||||
mode: CreateTableMode::Create,
|
||||
write_options: Default::default(),
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let source_uri = db.table_uri("source").unwrap();
|
||||
|
||||
// Try clone with invalid target name
|
||||
let result = db
|
||||
.clone_table(CloneTableRequest {
|
||||
target_table_name: "invalid/name".to_string(), // Invalid name with slash
|
||||
target_namespace: vec![],
|
||||
source_uri,
|
||||
source_version: None,
|
||||
source_tag: None,
|
||||
is_shallow: true,
|
||||
})
|
||||
.await;
|
||||
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_clone_table_source_not_found() {
|
||||
let (_tempdir, db) = setup_database().await;
|
||||
|
||||
// Try to clone from non-existent source
|
||||
let result = db
|
||||
.clone_table(CloneTableRequest {
|
||||
target_table_name: "cloned".to_string(),
|
||||
target_namespace: vec![],
|
||||
source_uri: "/nonexistent/table.lance".to_string(),
|
||||
source_version: None,
|
||||
source_tag: None,
|
||||
is_shallow: true,
|
||||
})
|
||||
.await;
|
||||
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_clone_table_with_version_and_tag_error() {
|
||||
let (_tempdir, db) = setup_database().await;
|
||||
|
||||
// Create a source table
|
||||
let schema = Arc::new(Schema::new(vec![Field::new("id", DataType::Int32, false)]));
|
||||
|
||||
db.create_table(CreateTableRequest {
|
||||
name: "source".to_string(),
|
||||
namespace: vec![],
|
||||
data: CreateTableData::Empty(TableDefinition::new_from_schema(schema)),
|
||||
mode: CreateTableMode::Create,
|
||||
write_options: Default::default(),
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let source_uri = db.table_uri("source").unwrap();
|
||||
|
||||
// Try clone with both version and tag (should fail)
|
||||
let result = db
|
||||
.clone_table(CloneTableRequest {
|
||||
target_table_name: "cloned".to_string(),
|
||||
target_namespace: vec![],
|
||||
source_uri,
|
||||
source_version: Some(1),
|
||||
source_tag: Some("v1.0".to_string()),
|
||||
is_shallow: true,
|
||||
})
|
||||
.await;
|
||||
|
||||
assert!(result.is_err());
|
||||
assert!(matches!(
|
||||
result.unwrap_err(),
|
||||
Error::InvalidInput { message } if message.contains("Cannot specify both source_version and source_tag")
|
||||
));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_clone_table_with_specific_version() {
|
||||
let (_tempdir, db) = setup_database().await;
|
||||
|
||||
// Create a source table with initial data
|
||||
let schema = Arc::new(Schema::new(vec![
|
||||
Field::new("id", DataType::Int32, false),
|
||||
Field::new("value", DataType::Utf8, false),
|
||||
]));
|
||||
|
||||
let batch1 = RecordBatch::try_new(
|
||||
schema.clone(),
|
||||
vec![
|
||||
Arc::new(Int32Array::from(vec![1, 2])),
|
||||
Arc::new(StringArray::from(vec!["a", "b"])),
|
||||
],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let reader = Box::new(arrow_array::RecordBatchIterator::new(
|
||||
vec![Ok(batch1)],
|
||||
schema.clone(),
|
||||
));
|
||||
|
||||
let source_table = db
|
||||
.create_table(CreateTableRequest {
|
||||
name: "versioned_source".to_string(),
|
||||
namespace: vec![],
|
||||
data: CreateTableData::Data(reader),
|
||||
mode: CreateTableMode::Create,
|
||||
write_options: Default::default(),
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Get the initial version
|
||||
let initial_version = source_table.version().await.unwrap();
|
||||
|
||||
// Add more data to create a new version
|
||||
let batch2 = RecordBatch::try_new(
|
||||
schema.clone(),
|
||||
vec![
|
||||
Arc::new(Int32Array::from(vec![3, 4])),
|
||||
Arc::new(StringArray::from(vec!["c", "d"])),
|
||||
],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let source_table_obj = Table::new(source_table.clone());
|
||||
source_table_obj
|
||||
.add(Box::new(arrow_array::RecordBatchIterator::new(
|
||||
vec![Ok(batch2)],
|
||||
schema.clone(),
|
||||
)))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Verify source table now has 4 rows
|
||||
assert_eq!(source_table.count_rows(None).await.unwrap(), 4);
|
||||
|
||||
let source_uri = db.table_uri("versioned_source").unwrap();
|
||||
|
||||
// Clone from the initial version (should have only 2 rows)
|
||||
let cloned_table = db
|
||||
.clone_table(CloneTableRequest {
|
||||
target_table_name: "cloned_from_version".to_string(),
|
||||
target_namespace: vec![],
|
||||
source_uri,
|
||||
source_version: Some(initial_version),
|
||||
source_tag: None,
|
||||
is_shallow: true,
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Verify cloned table has only the initial 2 rows
|
||||
assert_eq!(cloned_table.count_rows(None).await.unwrap(), 2);
|
||||
|
||||
// Source table should still have 4 rows
|
||||
assert_eq!(source_table.count_rows(None).await.unwrap(), 4);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_clone_table_with_tag() {
|
||||
let (_tempdir, db) = setup_database().await;
|
||||
|
||||
// Create a source table with initial data
|
||||
let schema = Arc::new(Schema::new(vec![
|
||||
Field::new("id", DataType::Int32, false),
|
||||
Field::new("value", DataType::Utf8, false),
|
||||
]));
|
||||
|
||||
let batch1 = RecordBatch::try_new(
|
||||
schema.clone(),
|
||||
vec![
|
||||
Arc::new(Int32Array::from(vec![1, 2])),
|
||||
Arc::new(StringArray::from(vec!["a", "b"])),
|
||||
],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let reader = Box::new(arrow_array::RecordBatchIterator::new(
|
||||
vec![Ok(batch1)],
|
||||
schema.clone(),
|
||||
));
|
||||
|
||||
let source_table = db
|
||||
.create_table(CreateTableRequest {
|
||||
name: "tagged_source".to_string(),
|
||||
namespace: vec![],
|
||||
data: CreateTableData::Data(reader),
|
||||
mode: CreateTableMode::Create,
|
||||
write_options: Default::default(),
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Create a tag for the current version
|
||||
let source_table_obj = Table::new(source_table.clone());
|
||||
let mut tags = source_table_obj.tags().await.unwrap();
|
||||
tags.create("v1.0", source_table.version().await.unwrap())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Add more data after the tag
|
||||
let batch2 = RecordBatch::try_new(
|
||||
schema.clone(),
|
||||
vec![
|
||||
Arc::new(Int32Array::from(vec![3, 4])),
|
||||
Arc::new(StringArray::from(vec!["c", "d"])),
|
||||
],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let source_table_obj = Table::new(source_table.clone());
|
||||
source_table_obj
|
||||
.add(Box::new(arrow_array::RecordBatchIterator::new(
|
||||
vec![Ok(batch2)],
|
||||
schema.clone(),
|
||||
)))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Source table should have 4 rows
|
||||
assert_eq!(source_table.count_rows(None).await.unwrap(), 4);
|
||||
|
||||
let source_uri = db.table_uri("tagged_source").unwrap();
|
||||
|
||||
// Clone from the tag (should have only 2 rows)
|
||||
let cloned_table = db
|
||||
.clone_table(CloneTableRequest {
|
||||
target_table_name: "cloned_from_tag".to_string(),
|
||||
target_namespace: vec![],
|
||||
source_uri,
|
||||
source_version: None,
|
||||
source_tag: Some("v1.0".to_string()),
|
||||
is_shallow: true,
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Verify cloned table has only the tagged version's 2 rows
|
||||
assert_eq!(cloned_table.count_rows(None).await.unwrap(), 2);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_cloned_tables_evolve_independently() {
|
||||
let (_tempdir, db) = setup_database().await;
|
||||
|
||||
// Create a source table with initial data
|
||||
let schema = Arc::new(Schema::new(vec![
|
||||
Field::new("id", DataType::Int32, false),
|
||||
Field::new("value", DataType::Utf8, false),
|
||||
]));
|
||||
|
||||
let batch1 = RecordBatch::try_new(
|
||||
schema.clone(),
|
||||
vec![
|
||||
Arc::new(Int32Array::from(vec![1, 2])),
|
||||
Arc::new(StringArray::from(vec!["a", "b"])),
|
||||
],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let reader = Box::new(arrow_array::RecordBatchIterator::new(
|
||||
vec![Ok(batch1)],
|
||||
schema.clone(),
|
||||
));
|
||||
|
||||
let source_table = db
|
||||
.create_table(CreateTableRequest {
|
||||
name: "independent_source".to_string(),
|
||||
namespace: vec![],
|
||||
data: CreateTableData::Data(reader),
|
||||
mode: CreateTableMode::Create,
|
||||
write_options: Default::default(),
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let source_uri = db.table_uri("independent_source").unwrap();
|
||||
|
||||
// Clone the table
|
||||
let cloned_table = db
|
||||
.clone_table(CloneTableRequest {
|
||||
target_table_name: "independent_clone".to_string(),
|
||||
target_namespace: vec![],
|
||||
source_uri,
|
||||
source_version: None,
|
||||
source_tag: None,
|
||||
is_shallow: true,
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Both should start with 2 rows
|
||||
assert_eq!(source_table.count_rows(None).await.unwrap(), 2);
|
||||
assert_eq!(cloned_table.count_rows(None).await.unwrap(), 2);
|
||||
|
||||
// Add data to the cloned table
|
||||
let batch_clone = RecordBatch::try_new(
|
||||
schema.clone(),
|
||||
vec![
|
||||
Arc::new(Int32Array::from(vec![3, 4, 5])),
|
||||
Arc::new(StringArray::from(vec!["c", "d", "e"])),
|
||||
],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let cloned_table_obj = Table::new(cloned_table.clone());
|
||||
cloned_table_obj
|
||||
.add(Box::new(arrow_array::RecordBatchIterator::new(
|
||||
vec![Ok(batch_clone)],
|
||||
schema.clone(),
|
||||
)))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Add different data to the source table
|
||||
let batch_source = RecordBatch::try_new(
|
||||
schema.clone(),
|
||||
vec![
|
||||
Arc::new(Int32Array::from(vec![10, 11])),
|
||||
Arc::new(StringArray::from(vec!["x", "y"])),
|
||||
],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let source_table_obj = Table::new(source_table.clone());
|
||||
source_table_obj
|
||||
.add(Box::new(arrow_array::RecordBatchIterator::new(
|
||||
vec![Ok(batch_source)],
|
||||
schema.clone(),
|
||||
)))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Verify they have evolved independently
|
||||
assert_eq!(source_table.count_rows(None).await.unwrap(), 4); // 2 + 2
|
||||
assert_eq!(cloned_table.count_rows(None).await.unwrap(), 5); // 2 + 3
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_clone_latest_version() {
|
||||
let (_tempdir, db) = setup_database().await;
|
||||
|
||||
// Create a source table with initial data
|
||||
let schema = Arc::new(Schema::new(vec![Field::new("id", DataType::Int32, false)]));
|
||||
|
||||
let batch1 =
|
||||
RecordBatch::try_new(schema.clone(), vec![Arc::new(Int32Array::from(vec![1, 2]))])
|
||||
.unwrap();
|
||||
|
||||
let reader = Box::new(arrow_array::RecordBatchIterator::new(
|
||||
vec![Ok(batch1)],
|
||||
schema.clone(),
|
||||
));
|
||||
|
||||
let source_table = db
|
||||
.create_table(CreateTableRequest {
|
||||
name: "latest_version_source".to_string(),
|
||||
namespace: vec![],
|
||||
data: CreateTableData::Data(reader),
|
||||
mode: CreateTableMode::Create,
|
||||
write_options: Default::default(),
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Add more data to create new versions
|
||||
for i in 0..3 {
|
||||
let batch = RecordBatch::try_new(
|
||||
schema.clone(),
|
||||
vec![Arc::new(Int32Array::from(vec![i * 10, i * 10 + 1]))],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let source_table_obj = Table::new(source_table.clone());
|
||||
source_table_obj
|
||||
.add(Box::new(arrow_array::RecordBatchIterator::new(
|
||||
vec![Ok(batch)],
|
||||
schema.clone(),
|
||||
)))
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
// Source should have 8 rows total (2 + 2 + 2 + 2)
|
||||
let source_count = source_table.count_rows(None).await.unwrap();
|
||||
assert_eq!(source_count, 8);
|
||||
|
||||
let source_uri = db.table_uri("latest_version_source").unwrap();
|
||||
|
||||
// Clone without specifying version or tag (should get latest)
|
||||
let cloned_table = db
|
||||
.clone_table(CloneTableRequest {
|
||||
target_table_name: "cloned_latest".to_string(),
|
||||
target_namespace: vec![],
|
||||
source_uri,
|
||||
source_version: None,
|
||||
source_tag: None,
|
||||
is_shallow: true,
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Cloned table should have all 8 rows from the latest version
|
||||
assert_eq!(cloned_table.count_rows(None).await.unwrap(), 8);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -45,10 +45,10 @@ use crate::{
|
||||
pub trait EmbeddingFunction: std::fmt::Debug + Send + Sync {
|
||||
fn name(&self) -> &str;
|
||||
/// The type of the input data
|
||||
fn source_type(&self) -> Result<Cow<'_, DataType>>;
|
||||
fn source_type(&self) -> Result<Cow<DataType>>;
|
||||
/// The type of the output data
|
||||
/// This should **always** match the output of the `embed` function
|
||||
fn dest_type(&self) -> Result<Cow<'_, DataType>>;
|
||||
fn dest_type(&self) -> Result<Cow<DataType>>;
|
||||
/// Compute the embeddings for the source column in the database
|
||||
fn compute_source_embeddings(&self, source: Arc<dyn Array>) -> Result<Arc<dyn Array>>;
|
||||
/// Compute the embeddings for a given user query
|
||||
|
||||
@@ -75,11 +75,11 @@ impl EmbeddingFunction for BedrockEmbeddingFunction {
|
||||
"bedrock"
|
||||
}
|
||||
|
||||
fn source_type(&self) -> Result<Cow<'_, DataType>> {
|
||||
fn source_type(&self) -> Result<Cow<DataType>> {
|
||||
Ok(Cow::Owned(DataType::Utf8))
|
||||
}
|
||||
|
||||
fn dest_type(&self) -> Result<Cow<'_, DataType>> {
|
||||
fn dest_type(&self) -> Result<Cow<DataType>> {
|
||||
let n_dims = self.model.ndims();
|
||||
Ok(Cow::Owned(DataType::new_fixed_size_list(
|
||||
DataType::Float32,
|
||||
|
||||
@@ -144,11 +144,11 @@ impl EmbeddingFunction for OpenAIEmbeddingFunction {
|
||||
"openai"
|
||||
}
|
||||
|
||||
fn source_type(&self) -> Result<Cow<'_, DataType>> {
|
||||
fn source_type(&self) -> Result<Cow<DataType>> {
|
||||
Ok(Cow::Owned(DataType::Utf8))
|
||||
}
|
||||
|
||||
fn dest_type(&self) -> Result<Cow<'_, DataType>> {
|
||||
fn dest_type(&self) -> Result<Cow<DataType>> {
|
||||
let n_dims = self.model.ndims();
|
||||
Ok(Cow::Owned(DataType::new_fixed_size_list(
|
||||
DataType::Float32,
|
||||
|
||||
@@ -407,11 +407,11 @@ impl EmbeddingFunction for SentenceTransformersEmbeddings {
|
||||
"sentence-transformers"
|
||||
}
|
||||
|
||||
fn source_type(&self) -> crate::Result<std::borrow::Cow<'_, arrow_schema::DataType>> {
|
||||
fn source_type(&self) -> crate::Result<std::borrow::Cow<arrow_schema::DataType>> {
|
||||
Ok(Cow::Owned(DataType::Utf8))
|
||||
}
|
||||
|
||||
fn dest_type(&self) -> crate::Result<std::borrow::Cow<'_, arrow_schema::DataType>> {
|
||||
fn dest_type(&self) -> crate::Result<std::borrow::Cow<arrow_schema::DataType>> {
|
||||
let (n_dims, dtype) = self.compute_ndims_and_dtype()?;
|
||||
Ok(Cow::Owned(DataType::new_fixed_size_list(
|
||||
dtype,
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
//! values
|
||||
use std::cmp::max;
|
||||
|
||||
use lance::table::format::{IndexMetadata, Manifest};
|
||||
use lance::table::format::{Index, Manifest};
|
||||
|
||||
use crate::DistanceType;
|
||||
|
||||
@@ -19,7 +19,7 @@ pub struct VectorIndex {
|
||||
}
|
||||
|
||||
impl VectorIndex {
|
||||
pub fn new_from_format(manifest: &Manifest, index: &IndexMetadata) -> Self {
|
||||
pub fn new_from_format(manifest: &Manifest, index: &Index) -> Self {
|
||||
let fields = index
|
||||
.fields
|
||||
.iter()
|
||||
@@ -112,15 +112,6 @@ macro_rules! impl_ivf_params_setter {
|
||||
self.max_iterations = max_iterations;
|
||||
self
|
||||
}
|
||||
|
||||
/// The target size of each partition.
|
||||
///
|
||||
/// This value controls the tradeoff between search performance and accuracy.
|
||||
/// The higher the value the faster the search but the less accurate the results will be.
|
||||
pub fn target_partition_size(mut self, target_partition_size: u32) -> Self {
|
||||
self.target_partition_size = Some(target_partition_size);
|
||||
self
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@@ -191,7 +182,6 @@ pub struct IvfFlatIndexBuilder {
|
||||
pub(crate) num_partitions: Option<u32>,
|
||||
pub(crate) sample_rate: u32,
|
||||
pub(crate) max_iterations: u32,
|
||||
pub(crate) target_partition_size: Option<u32>,
|
||||
}
|
||||
|
||||
impl Default for IvfFlatIndexBuilder {
|
||||
@@ -201,7 +191,6 @@ impl Default for IvfFlatIndexBuilder {
|
||||
num_partitions: None,
|
||||
sample_rate: 256,
|
||||
max_iterations: 50,
|
||||
target_partition_size: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -239,7 +228,6 @@ pub struct IvfPqIndexBuilder {
|
||||
pub(crate) num_partitions: Option<u32>,
|
||||
pub(crate) sample_rate: u32,
|
||||
pub(crate) max_iterations: u32,
|
||||
pub(crate) target_partition_size: Option<u32>,
|
||||
|
||||
// PQ
|
||||
pub(crate) num_sub_vectors: Option<u32>,
|
||||
@@ -255,7 +243,6 @@ impl Default for IvfPqIndexBuilder {
|
||||
num_bits: None,
|
||||
sample_rate: 256,
|
||||
max_iterations: 50,
|
||||
target_partition_size: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -306,7 +293,6 @@ pub struct IvfHnswPqIndexBuilder {
|
||||
pub(crate) num_partitions: Option<u32>,
|
||||
pub(crate) sample_rate: u32,
|
||||
pub(crate) max_iterations: u32,
|
||||
pub(crate) target_partition_size: Option<u32>,
|
||||
|
||||
// HNSW
|
||||
pub(crate) m: u32,
|
||||
@@ -328,7 +314,6 @@ impl Default for IvfHnswPqIndexBuilder {
|
||||
max_iterations: 50,
|
||||
m: 20,
|
||||
ef_construction: 300,
|
||||
target_partition_size: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -356,7 +341,6 @@ pub struct IvfHnswSqIndexBuilder {
|
||||
pub(crate) num_partitions: Option<u32>,
|
||||
pub(crate) sample_rate: u32,
|
||||
pub(crate) max_iterations: u32,
|
||||
pub(crate) target_partition_size: Option<u32>,
|
||||
|
||||
// HNSW
|
||||
pub(crate) m: u32,
|
||||
@@ -374,7 +358,6 @@ impl Default for IvfHnswSqIndexBuilder {
|
||||
max_iterations: 50,
|
||||
m: 20,
|
||||
ef_construction: 300,
|
||||
target_partition_size: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -191,6 +191,7 @@
|
||||
//! ```
|
||||
|
||||
pub mod arrow;
|
||||
pub mod catalog;
|
||||
pub mod connection;
|
||||
pub mod data;
|
||||
pub mod database;
|
||||
|
||||
@@ -18,5 +18,5 @@ const ARROW_FILE_CONTENT_TYPE: &str = "application/vnd.apache.arrow.file";
|
||||
#[cfg(test)]
|
||||
const JSON_CONTENT_TYPE: &str = "application/json";
|
||||
|
||||
pub use client::{ClientConfig, HeaderProvider, RetryConfig, TimeoutConfig, TlsConfig};
|
||||
pub use client::{ClientConfig, RetryConfig, TimeoutConfig};
|
||||
pub use db::{RemoteDatabaseOptions, RemoteDatabaseOptionsBuilder};
|
||||
|
||||
@@ -7,7 +7,7 @@ use reqwest::{
|
||||
header::{HeaderMap, HeaderValue},
|
||||
Body, Request, RequestBuilder, Response,
|
||||
};
|
||||
use std::{collections::HashMap, future::Future, str::FromStr, sync::Arc, time::Duration};
|
||||
use std::{collections::HashMap, future::Future, str::FromStr, time::Duration};
|
||||
|
||||
use crate::error::{Error, Result};
|
||||
use crate::remote::db::RemoteOptions;
|
||||
@@ -15,28 +15,8 @@ use crate::remote::retry::{ResolvedRetryConfig, RetryCounter};
|
||||
|
||||
const REQUEST_ID_HEADER: HeaderName = HeaderName::from_static("x-request-id");
|
||||
|
||||
/// Configuration for TLS/mTLS settings.
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct TlsConfig {
|
||||
/// Path to the client certificate file (PEM format)
|
||||
pub cert_file: Option<String>,
|
||||
/// Path to the client private key file (PEM format)
|
||||
pub key_file: Option<String>,
|
||||
/// Path to the CA certificate file for server verification (PEM format)
|
||||
pub ssl_ca_cert: Option<String>,
|
||||
/// Whether to verify the hostname in the server's certificate
|
||||
pub assert_hostname: bool,
|
||||
}
|
||||
|
||||
/// Trait for providing custom headers for each request
|
||||
#[async_trait::async_trait]
|
||||
pub trait HeaderProvider: Send + Sync + std::fmt::Debug {
|
||||
/// Get the latest headers to be added to the request
|
||||
async fn get_headers(&self) -> Result<HashMap<String, String>>;
|
||||
}
|
||||
|
||||
/// Configuration for the LanceDB Cloud HTTP client.
|
||||
#[derive(Clone)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ClientConfig {
|
||||
pub timeout_config: TimeoutConfig,
|
||||
pub retry_config: RetryConfig,
|
||||
@@ -48,27 +28,6 @@ pub struct ClientConfig {
|
||||
/// The delimiter to use when constructing object identifiers.
|
||||
/// If not default, passes as query parameter.
|
||||
pub id_delimiter: Option<String>,
|
||||
/// TLS configuration for mTLS support
|
||||
pub tls_config: Option<TlsConfig>,
|
||||
/// Provider for custom headers to be added to each request
|
||||
pub header_provider: Option<Arc<dyn HeaderProvider>>,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for ClientConfig {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("ClientConfig")
|
||||
.field("timeout_config", &self.timeout_config)
|
||||
.field("retry_config", &self.retry_config)
|
||||
.field("user_agent", &self.user_agent)
|
||||
.field("extra_headers", &self.extra_headers)
|
||||
.field("id_delimiter", &self.id_delimiter)
|
||||
.field("tls_config", &self.tls_config)
|
||||
.field(
|
||||
"header_provider",
|
||||
&self.header_provider.as_ref().map(|_| "Some(...)"),
|
||||
)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for ClientConfig {
|
||||
@@ -79,8 +38,6 @@ impl Default for ClientConfig {
|
||||
user_agent: concat!("LanceDB-Rust-Client/", env!("CARGO_PKG_VERSION")).into(),
|
||||
extra_headers: HashMap::new(),
|
||||
id_delimiter: None,
|
||||
tls_config: None,
|
||||
header_provider: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -186,29 +143,13 @@ pub struct RetryConfig {
|
||||
// We use the `HttpSend` trait to abstract over the `reqwest::Client` so that
|
||||
// we can mock responses in tests. Based on the patterns from this blog post:
|
||||
// https://write.as/balrogboogie/testing-reqwest-based-clients
|
||||
#[derive(Clone)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct RestfulLanceDbClient<S: HttpSend = Sender> {
|
||||
client: reqwest::Client,
|
||||
host: String,
|
||||
pub(crate) retry_config: ResolvedRetryConfig,
|
||||
pub(crate) sender: S,
|
||||
pub(crate) id_delimiter: String,
|
||||
pub(crate) header_provider: Option<Arc<dyn HeaderProvider>>,
|
||||
}
|
||||
|
||||
impl<S: HttpSend> std::fmt::Debug for RestfulLanceDbClient<S> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("RestfulLanceDbClient")
|
||||
.field("host", &self.host)
|
||||
.field("retry_config", &self.retry_config)
|
||||
.field("sender", &self.sender)
|
||||
.field("id_delimiter", &self.id_delimiter)
|
||||
.field(
|
||||
"header_provider",
|
||||
&self.header_provider.as_ref().map(|_| "Some(...)"),
|
||||
)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
pub trait HttpSend: Clone + Send + Sync + std::fmt::Debug + 'static {
|
||||
@@ -304,49 +245,6 @@ impl RestfulLanceDbClient<Sender> {
|
||||
if let Some(timeout) = timeout {
|
||||
client_builder = client_builder.timeout(timeout);
|
||||
}
|
||||
|
||||
// Configure mTLS if TlsConfig is provided
|
||||
if let Some(tls_config) = &client_config.tls_config {
|
||||
// Load client certificate and key for mTLS
|
||||
if let (Some(cert_file), Some(key_file)) = (&tls_config.cert_file, &tls_config.key_file)
|
||||
{
|
||||
let cert = std::fs::read(cert_file).map_err(|err| Error::Other {
|
||||
message: format!("Failed to read certificate file: {}", cert_file),
|
||||
source: Some(Box::new(err)),
|
||||
})?;
|
||||
let key = std::fs::read(key_file).map_err(|err| Error::Other {
|
||||
message: format!("Failed to read key file: {}", key_file),
|
||||
source: Some(Box::new(err)),
|
||||
})?;
|
||||
|
||||
let identity = reqwest::Identity::from_pem(&[&cert[..], &key[..]].concat())
|
||||
.map_err(|err| Error::Other {
|
||||
message: "Failed to create client identity from certificate and key".into(),
|
||||
source: Some(Box::new(err)),
|
||||
})?;
|
||||
client_builder = client_builder.identity(identity);
|
||||
}
|
||||
|
||||
// Load CA certificate for server verification
|
||||
if let Some(ca_cert_file) = &tls_config.ssl_ca_cert {
|
||||
let ca_cert = std::fs::read(ca_cert_file).map_err(|err| Error::Other {
|
||||
message: format!("Failed to read CA certificate file: {}", ca_cert_file),
|
||||
source: Some(Box::new(err)),
|
||||
})?;
|
||||
|
||||
let ca_cert =
|
||||
reqwest::Certificate::from_pem(&ca_cert).map_err(|err| Error::Other {
|
||||
message: "Failed to create CA certificate from PEM".into(),
|
||||
source: Some(Box::new(err)),
|
||||
})?;
|
||||
client_builder = client_builder.add_root_certificate(ca_cert);
|
||||
}
|
||||
|
||||
// Configure hostname verification
|
||||
client_builder =
|
||||
client_builder.danger_accept_invalid_hostnames(!tls_config.assert_hostname);
|
||||
}
|
||||
|
||||
let client = client_builder
|
||||
.default_headers(Self::default_headers(
|
||||
api_key,
|
||||
@@ -369,17 +267,13 @@ impl RestfulLanceDbClient<Sender> {
|
||||
None => format!("https://{}.{}.api.lancedb.com", db_name, region),
|
||||
};
|
||||
debug!("Created client for host: {}", host);
|
||||
let retry_config = client_config.retry_config.clone().try_into()?;
|
||||
let retry_config = client_config.retry_config.try_into()?;
|
||||
Ok(Self {
|
||||
client,
|
||||
host,
|
||||
retry_config,
|
||||
sender: Sender,
|
||||
id_delimiter: client_config
|
||||
.id_delimiter
|
||||
.clone()
|
||||
.unwrap_or("$".to_string()),
|
||||
header_provider: client_config.header_provider,
|
||||
id_delimiter: client_config.id_delimiter.unwrap_or("$".to_string()),
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -486,34 +380,10 @@ impl<S: HttpSend> RestfulLanceDbClient<S> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Apply dynamic headers from the header provider if configured
|
||||
async fn apply_dynamic_headers(&self, mut request: Request) -> Result<Request> {
|
||||
if let Some(ref provider) = self.header_provider {
|
||||
let headers = provider.get_headers().await?;
|
||||
let request_headers = request.headers_mut();
|
||||
for (key, value) in headers {
|
||||
if let Ok(header_name) = HeaderName::from_str(&key) {
|
||||
if let Ok(header_value) = HeaderValue::from_str(&value) {
|
||||
request_headers.insert(header_name, header_value);
|
||||
} else {
|
||||
debug!("Invalid header value for key {}: {}", key, value);
|
||||
}
|
||||
} else {
|
||||
debug!("Invalid header name: {}", key);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(request)
|
||||
}
|
||||
|
||||
pub async fn send(&self, req: RequestBuilder) -> Result<(String, Response)> {
|
||||
let (client, request) = req.build_split();
|
||||
let mut request = request.unwrap();
|
||||
let request_id = self.extract_request_id(&mut request);
|
||||
|
||||
// Apply dynamic headers before sending
|
||||
request = self.apply_dynamic_headers(request).await?;
|
||||
|
||||
self.log_request(&request, &request_id);
|
||||
|
||||
let response = self
|
||||
@@ -569,10 +439,6 @@ impl<S: HttpSend> RestfulLanceDbClient<S> {
|
||||
let (c, request) = req_builder.build_split();
|
||||
let mut request = request.unwrap();
|
||||
self.set_request_id(&mut request, &request_id.clone());
|
||||
|
||||
// Apply dynamic headers before each retry attempt
|
||||
request = self.apply_dynamic_headers(request).await?;
|
||||
|
||||
self.log_request(&request, &request_id);
|
||||
|
||||
let response = self.sender.send(&c, request).await.map(|r| (r.status(), r));
|
||||
@@ -700,7 +566,6 @@ impl<T> RequestResultExt for reqwest::Result<T> {
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod test_utils {
|
||||
use std::convert::TryInto;
|
||||
use std::sync::Arc;
|
||||
|
||||
use super::*;
|
||||
@@ -746,31 +611,6 @@ pub mod test_utils {
|
||||
f: Arc::new(wrapper),
|
||||
},
|
||||
id_delimiter: "$".to_string(),
|
||||
header_provider: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn client_with_handler_and_config<T>(
|
||||
handler: impl Fn(reqwest::Request) -> http::response::Response<T> + Send + Sync + 'static,
|
||||
config: ClientConfig,
|
||||
) -> RestfulLanceDbClient<MockSender>
|
||||
where
|
||||
T: Into<reqwest::Body>,
|
||||
{
|
||||
let wrapper = move |req: reqwest::Request| {
|
||||
let response = handler(req);
|
||||
response.into()
|
||||
};
|
||||
|
||||
RestfulLanceDbClient {
|
||||
client: reqwest::Client::new(),
|
||||
host: "http://localhost".to_string(),
|
||||
retry_config: config.retry_config.try_into().unwrap(),
|
||||
sender: MockSender {
|
||||
f: Arc::new(wrapper),
|
||||
},
|
||||
id_delimiter: config.id_delimiter.unwrap_or_else(|| "$".to_string()),
|
||||
header_provider: config.header_provider,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -821,205 +661,4 @@ mod tests {
|
||||
Some(Duration::from_secs(120))
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_tls_config_default() {
|
||||
let config = TlsConfig::default();
|
||||
assert!(config.cert_file.is_none());
|
||||
assert!(config.key_file.is_none());
|
||||
assert!(config.ssl_ca_cert.is_none());
|
||||
assert!(!config.assert_hostname);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_tls_config_with_mtls() {
|
||||
let tls_config = TlsConfig {
|
||||
cert_file: Some("/path/to/cert.pem".to_string()),
|
||||
key_file: Some("/path/to/key.pem".to_string()),
|
||||
ssl_ca_cert: Some("/path/to/ca.pem".to_string()),
|
||||
assert_hostname: true,
|
||||
};
|
||||
|
||||
assert_eq!(tls_config.cert_file, Some("/path/to/cert.pem".to_string()));
|
||||
assert_eq!(tls_config.key_file, Some("/path/to/key.pem".to_string()));
|
||||
assert_eq!(tls_config.ssl_ca_cert, Some("/path/to/ca.pem".to_string()));
|
||||
assert!(tls_config.assert_hostname);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_client_config_with_tls() {
|
||||
let tls_config = TlsConfig {
|
||||
cert_file: Some("/path/to/cert.pem".to_string()),
|
||||
key_file: Some("/path/to/key.pem".to_string()),
|
||||
ssl_ca_cert: None,
|
||||
assert_hostname: false,
|
||||
};
|
||||
|
||||
let client_config = ClientConfig {
|
||||
tls_config: Some(tls_config.clone()),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
assert!(client_config.tls_config.is_some());
|
||||
let config_tls = client_config.tls_config.unwrap();
|
||||
assert_eq!(config_tls.cert_file, Some("/path/to/cert.pem".to_string()));
|
||||
assert_eq!(config_tls.key_file, Some("/path/to/key.pem".to_string()));
|
||||
assert!(config_tls.ssl_ca_cert.is_none());
|
||||
assert!(!config_tls.assert_hostname);
|
||||
}
|
||||
|
||||
// Test implementation of HeaderProvider
|
||||
#[derive(Debug, Clone)]
|
||||
struct TestHeaderProvider {
|
||||
headers: HashMap<String, String>,
|
||||
}
|
||||
|
||||
impl TestHeaderProvider {
|
||||
fn new(headers: HashMap<String, String>) -> Self {
|
||||
Self { headers }
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl HeaderProvider for TestHeaderProvider {
|
||||
async fn get_headers(&self) -> Result<HashMap<String, String>> {
|
||||
Ok(self.headers.clone())
|
||||
}
|
||||
}
|
||||
|
||||
// Test implementation that returns an error
|
||||
#[derive(Debug)]
|
||||
struct ErrorHeaderProvider;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl HeaderProvider for ErrorHeaderProvider {
|
||||
async fn get_headers(&self) -> Result<HashMap<String, String>> {
|
||||
Err(Error::Runtime {
|
||||
message: "Failed to get headers".to_string(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_client_config_with_header_provider() {
|
||||
let mut headers = HashMap::new();
|
||||
headers.insert("X-API-Key".to_string(), "secret-key".to_string());
|
||||
|
||||
let provider = TestHeaderProvider::new(headers);
|
||||
let client_config = ClientConfig {
|
||||
header_provider: Some(Arc::new(provider) as Arc<dyn HeaderProvider>),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
assert!(client_config.header_provider.is_some());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_apply_dynamic_headers() {
|
||||
// Create a mock client with header provider
|
||||
let mut headers = HashMap::new();
|
||||
headers.insert("X-Dynamic".to_string(), "dynamic-value".to_string());
|
||||
|
||||
let provider = TestHeaderProvider::new(headers);
|
||||
|
||||
// Create a simple request
|
||||
let request = reqwest::Request::new(
|
||||
reqwest::Method::GET,
|
||||
"https://example.com/test".parse().unwrap(),
|
||||
);
|
||||
|
||||
// Create client with header provider
|
||||
let client = RestfulLanceDbClient {
|
||||
client: reqwest::Client::new(),
|
||||
host: "https://example.com".to_string(),
|
||||
retry_config: RetryConfig::default().try_into().unwrap(),
|
||||
sender: Sender,
|
||||
id_delimiter: "+".to_string(),
|
||||
header_provider: Some(Arc::new(provider) as Arc<dyn HeaderProvider>),
|
||||
};
|
||||
|
||||
// Apply dynamic headers
|
||||
let updated_request = client.apply_dynamic_headers(request).await.unwrap();
|
||||
|
||||
// Check that the header was added
|
||||
assert_eq!(
|
||||
updated_request.headers().get("X-Dynamic").unwrap(),
|
||||
"dynamic-value"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_apply_dynamic_headers_merge() {
|
||||
// Test that dynamic headers override existing headers
|
||||
let mut headers = HashMap::new();
|
||||
headers.insert("Authorization".to_string(), "Bearer new-token".to_string());
|
||||
headers.insert("X-Custom".to_string(), "custom-value".to_string());
|
||||
|
||||
let provider = TestHeaderProvider::new(headers);
|
||||
|
||||
// Create request with existing Authorization header
|
||||
let mut request_builder = reqwest::Client::new().get("https://example.com/test");
|
||||
request_builder = request_builder.header("Authorization", "Bearer old-token");
|
||||
request_builder = request_builder.header("X-Existing", "existing-value");
|
||||
let request = request_builder.build().unwrap();
|
||||
|
||||
// Create client with header provider
|
||||
let client = RestfulLanceDbClient {
|
||||
client: reqwest::Client::new(),
|
||||
host: "https://example.com".to_string(),
|
||||
retry_config: RetryConfig::default().try_into().unwrap(),
|
||||
sender: Sender,
|
||||
id_delimiter: "+".to_string(),
|
||||
header_provider: Some(Arc::new(provider) as Arc<dyn HeaderProvider>),
|
||||
};
|
||||
|
||||
// Apply dynamic headers
|
||||
let updated_request = client.apply_dynamic_headers(request).await.unwrap();
|
||||
|
||||
// Check that dynamic headers override existing ones
|
||||
assert_eq!(
|
||||
updated_request.headers().get("Authorization").unwrap(),
|
||||
"Bearer new-token"
|
||||
);
|
||||
assert_eq!(
|
||||
updated_request.headers().get("X-Custom").unwrap(),
|
||||
"custom-value"
|
||||
);
|
||||
// Existing headers should still be present
|
||||
assert_eq!(
|
||||
updated_request.headers().get("X-Existing").unwrap(),
|
||||
"existing-value"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_apply_dynamic_headers_with_error_provider() {
|
||||
let provider = ErrorHeaderProvider;
|
||||
|
||||
let request = reqwest::Request::new(
|
||||
reqwest::Method::GET,
|
||||
"https://example.com/test".parse().unwrap(),
|
||||
);
|
||||
|
||||
let client = RestfulLanceDbClient {
|
||||
client: reqwest::Client::new(),
|
||||
host: "https://example.com".to_string(),
|
||||
retry_config: RetryConfig::default().try_into().unwrap(),
|
||||
sender: Sender,
|
||||
id_delimiter: "+".to_string(),
|
||||
header_provider: Some(Arc::new(provider) as Arc<dyn HeaderProvider>),
|
||||
};
|
||||
|
||||
// Header provider errors should fail the request
|
||||
// This is important for security - if auth headers can't be fetched, don't proceed
|
||||
let result = client.apply_dynamic_headers(request).await;
|
||||
assert!(result.is_err());
|
||||
|
||||
match result.unwrap_err() {
|
||||
Error::Runtime { message } => {
|
||||
assert_eq!(message, "Failed to get headers");
|
||||
}
|
||||
_ => panic!("Expected Runtime error"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,9 +14,9 @@ use serde::Deserialize;
|
||||
use tokio::task::spawn_blocking;
|
||||
|
||||
use crate::database::{
|
||||
CloneTableRequest, CreateNamespaceRequest, CreateTableData, CreateTableMode,
|
||||
CreateTableRequest, Database, DatabaseOptions, DropNamespaceRequest, ListNamespacesRequest,
|
||||
OpenTableRequest, TableNamesRequest,
|
||||
CreateNamespaceRequest, CreateTableData, CreateTableMode, CreateTableRequest, Database,
|
||||
DatabaseOptions, DropNamespaceRequest, ListNamespacesRequest, OpenTableRequest,
|
||||
TableNamesRequest,
|
||||
};
|
||||
use crate::error::Result;
|
||||
use crate::table::BaseTable;
|
||||
@@ -27,18 +27,6 @@ use super::table::RemoteTable;
|
||||
use super::util::{batches_to_ipc_bytes, parse_server_version};
|
||||
use super::ARROW_STREAM_CONTENT_TYPE;
|
||||
|
||||
// Request structure for the remote clone table API
|
||||
#[derive(serde::Serialize)]
|
||||
struct RemoteCloneTableRequest {
|
||||
source_location: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
source_version: Option<u64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
source_tag: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
is_shallow: Option<bool>,
|
||||
}
|
||||
|
||||
// the versions of the server that we support
|
||||
// for any new feature that we need to change the SDK behavior, we should bump the server version,
|
||||
// and add a feature flag as method of `ServerVersion` here.
|
||||
@@ -224,9 +212,8 @@ impl RemoteDatabase {
|
||||
#[cfg(all(test, feature = "remote"))]
|
||||
mod test_utils {
|
||||
use super::*;
|
||||
use crate::remote::client::test_utils::client_with_handler;
|
||||
use crate::remote::client::test_utils::MockSender;
|
||||
use crate::remote::client::test_utils::{client_with_handler, client_with_handler_and_config};
|
||||
use crate::remote::ClientConfig;
|
||||
|
||||
impl RemoteDatabase<MockSender> {
|
||||
pub fn new_mock<F, T>(handler: F) -> Self
|
||||
@@ -240,18 +227,6 @@ mod test_utils {
|
||||
table_cache: Cache::new(0),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_mock_with_config<F, T>(handler: F, config: ClientConfig) -> Self
|
||||
where
|
||||
F: Fn(reqwest::Request) -> http::Response<T> + Send + Sync + 'static,
|
||||
T: Into<reqwest::Body>,
|
||||
{
|
||||
let client = client_with_handler_and_config(handler, config);
|
||||
Self {
|
||||
client,
|
||||
table_cache: Cache::new(0),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -442,51 +417,6 @@ impl<S: HttpSend> Database for RemoteDatabase<S> {
|
||||
Ok(table)
|
||||
}
|
||||
|
||||
async fn clone_table(&self, request: CloneTableRequest) -> Result<Arc<dyn BaseTable>> {
|
||||
let table_identifier = build_table_identifier(
|
||||
&request.target_table_name,
|
||||
&request.target_namespace,
|
||||
&self.client.id_delimiter,
|
||||
);
|
||||
|
||||
let remote_request = RemoteCloneTableRequest {
|
||||
source_location: request.source_uri,
|
||||
source_version: request.source_version,
|
||||
source_tag: request.source_tag,
|
||||
is_shallow: Some(request.is_shallow),
|
||||
};
|
||||
|
||||
let req = self
|
||||
.client
|
||||
.post(&format!("/v1/table/{}/clone", table_identifier.clone()))
|
||||
.json(&remote_request);
|
||||
|
||||
let (request_id, rsp) = self.client.send(req).await?;
|
||||
|
||||
let status = rsp.status();
|
||||
if status != StatusCode::OK {
|
||||
let body = rsp.text().await.err_to_http(request_id.clone())?;
|
||||
return Err(crate::Error::Http {
|
||||
source: format!("Failed to clone table: {}", body).into(),
|
||||
request_id,
|
||||
status_code: Some(status),
|
||||
});
|
||||
}
|
||||
|
||||
let version = parse_server_version(&request_id, &rsp)?;
|
||||
let cache_key = build_cache_key(&request.target_table_name, &request.target_namespace);
|
||||
let table = Arc::new(RemoteTable::new(
|
||||
self.client.clone(),
|
||||
request.target_table_name.clone(),
|
||||
request.target_namespace.clone(),
|
||||
table_identifier,
|
||||
version,
|
||||
));
|
||||
self.table_cache.insert(cache_key, table.clone()).await;
|
||||
|
||||
Ok(table)
|
||||
}
|
||||
|
||||
async fn open_table(&self, request: OpenTableRequest) -> Result<Arc<dyn BaseTable>> {
|
||||
let identifier =
|
||||
build_table_identifier(&request.name, &request.namespace, &self.client.id_delimiter);
|
||||
@@ -657,7 +587,6 @@ impl From<StorageOptions> for RemoteOptions {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::build_cache_key;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Arc, OnceLock};
|
||||
|
||||
use arrow_array::{Int32Array, RecordBatch, RecordBatchIterator};
|
||||
@@ -666,7 +595,7 @@ mod tests {
|
||||
use crate::connection::ConnectBuilder;
|
||||
use crate::{
|
||||
database::CreateTableMode,
|
||||
remote::{ClientConfig, HeaderProvider, ARROW_STREAM_CONTENT_TYPE, JSON_CONTENT_TYPE},
|
||||
remote::{ARROW_STREAM_CONTENT_TYPE, JSON_CONTENT_TYPE},
|
||||
Connection, Error,
|
||||
};
|
||||
|
||||
@@ -1183,241 +1112,4 @@ mod tests {
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_header_provider_in_request() {
|
||||
// Test HeaderProvider implementation that adds custom headers
|
||||
#[derive(Debug, Clone)]
|
||||
struct TestHeaderProvider {
|
||||
headers: HashMap<String, String>,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl HeaderProvider for TestHeaderProvider {
|
||||
async fn get_headers(&self) -> crate::Result<HashMap<String, String>> {
|
||||
Ok(self.headers.clone())
|
||||
}
|
||||
}
|
||||
|
||||
// Create a test header provider with custom headers
|
||||
let mut headers = HashMap::new();
|
||||
headers.insert("X-Custom-Auth".to_string(), "test-token".to_string());
|
||||
headers.insert("X-Request-Id".to_string(), "test-123".to_string());
|
||||
let provider = Arc::new(TestHeaderProvider { headers }) as Arc<dyn HeaderProvider>;
|
||||
|
||||
// Create client config with the header provider
|
||||
let client_config = ClientConfig {
|
||||
header_provider: Some(provider),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// Create connection with handler that verifies the headers are present
|
||||
let conn = Connection::new_with_handler_and_config(
|
||||
move |request| {
|
||||
// Verify that our custom headers are present
|
||||
assert_eq!(
|
||||
request.headers().get("X-Custom-Auth").unwrap(),
|
||||
"test-token"
|
||||
);
|
||||
assert_eq!(request.headers().get("X-Request-Id").unwrap(), "test-123");
|
||||
|
||||
// Also check standard headers are still there
|
||||
assert_eq!(request.method(), &reqwest::Method::GET);
|
||||
assert_eq!(request.url().path(), "/v1/table/");
|
||||
|
||||
http::Response::builder()
|
||||
.status(200)
|
||||
.body(r#"{"tables": ["table1", "table2"]}"#)
|
||||
.unwrap()
|
||||
},
|
||||
client_config,
|
||||
);
|
||||
|
||||
// Make a request that should include the custom headers
|
||||
let names = conn.table_names().execute().await.unwrap();
|
||||
assert_eq!(names, vec!["table1", "table2"]);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_header_provider_error_handling() {
|
||||
// Test HeaderProvider that returns an error
|
||||
#[derive(Debug)]
|
||||
struct ErrorHeaderProvider;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl HeaderProvider for ErrorHeaderProvider {
|
||||
async fn get_headers(&self) -> crate::Result<HashMap<String, String>> {
|
||||
Err(crate::Error::Runtime {
|
||||
message: "Failed to fetch auth token".to_string(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
let provider = Arc::new(ErrorHeaderProvider) as Arc<dyn HeaderProvider>;
|
||||
let client_config = ClientConfig {
|
||||
header_provider: Some(provider),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// Create connection - handler won't be called because header provider fails
|
||||
let conn = Connection::new_with_handler_and_config(
|
||||
move |_request| -> http::Response<&'static str> {
|
||||
panic!("Handler should not be called when header provider fails");
|
||||
},
|
||||
client_config,
|
||||
);
|
||||
|
||||
// Request should fail due to header provider error
|
||||
let result = conn.table_names().execute().await;
|
||||
assert!(result.is_err());
|
||||
|
||||
match result.unwrap_err() {
|
||||
crate::Error::Runtime { message } => {
|
||||
assert_eq!(message, "Failed to fetch auth token");
|
||||
}
|
||||
_ => panic!("Expected Runtime error from header provider"),
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_clone_table() {
|
||||
let conn = Connection::new_with_handler(|request| {
|
||||
assert_eq!(request.method(), &reqwest::Method::POST);
|
||||
assert_eq!(request.url().path(), "/v1/table/cloned_table/clone");
|
||||
assert_eq!(
|
||||
request.headers().get("Content-Type").unwrap(),
|
||||
JSON_CONTENT_TYPE
|
||||
);
|
||||
|
||||
let body = request.body().unwrap().as_bytes().unwrap();
|
||||
let body: serde_json::Value = serde_json::from_slice(body).unwrap();
|
||||
assert_eq!(body["source_location"], "s3://bucket/source_table");
|
||||
assert_eq!(body["is_shallow"], true);
|
||||
|
||||
http::Response::builder().status(200).body("").unwrap()
|
||||
});
|
||||
|
||||
let table = conn
|
||||
.clone_table("cloned_table", "s3://bucket/source_table")
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(table.name(), "cloned_table");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_clone_table_with_version() {
|
||||
let conn = Connection::new_with_handler(|request| {
|
||||
assert_eq!(request.method(), &reqwest::Method::POST);
|
||||
assert_eq!(request.url().path(), "/v1/table/cloned_table/clone");
|
||||
|
||||
let body = request.body().unwrap().as_bytes().unwrap();
|
||||
let body: serde_json::Value = serde_json::from_slice(body).unwrap();
|
||||
assert_eq!(body["source_location"], "s3://bucket/source_table");
|
||||
assert_eq!(body["source_version"], 42);
|
||||
assert_eq!(body["is_shallow"], true);
|
||||
|
||||
http::Response::builder().status(200).body("").unwrap()
|
||||
});
|
||||
|
||||
let table = conn
|
||||
.clone_table("cloned_table", "s3://bucket/source_table")
|
||||
.source_version(42)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(table.name(), "cloned_table");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_clone_table_with_tag() {
|
||||
let conn = Connection::new_with_handler(|request| {
|
||||
assert_eq!(request.method(), &reqwest::Method::POST);
|
||||
assert_eq!(request.url().path(), "/v1/table/cloned_table/clone");
|
||||
|
||||
let body = request.body().unwrap().as_bytes().unwrap();
|
||||
let body: serde_json::Value = serde_json::from_slice(body).unwrap();
|
||||
assert_eq!(body["source_location"], "s3://bucket/source_table");
|
||||
assert_eq!(body["source_tag"], "v1.0");
|
||||
assert_eq!(body["is_shallow"], true);
|
||||
|
||||
http::Response::builder().status(200).body("").unwrap()
|
||||
});
|
||||
|
||||
let table = conn
|
||||
.clone_table("cloned_table", "s3://bucket/source_table")
|
||||
.source_tag("v1.0")
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(table.name(), "cloned_table");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_clone_table_deep_clone() {
|
||||
let conn = Connection::new_with_handler(|request| {
|
||||
assert_eq!(request.method(), &reqwest::Method::POST);
|
||||
assert_eq!(request.url().path(), "/v1/table/cloned_table/clone");
|
||||
|
||||
let body = request.body().unwrap().as_bytes().unwrap();
|
||||
let body: serde_json::Value = serde_json::from_slice(body).unwrap();
|
||||
assert_eq!(body["source_location"], "s3://bucket/source_table");
|
||||
assert_eq!(body["is_shallow"], false);
|
||||
|
||||
http::Response::builder().status(200).body("").unwrap()
|
||||
});
|
||||
|
||||
let table = conn
|
||||
.clone_table("cloned_table", "s3://bucket/source_table")
|
||||
.is_shallow(false)
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(table.name(), "cloned_table");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_clone_table_with_namespace() {
|
||||
let conn = Connection::new_with_handler(|request| {
|
||||
assert_eq!(request.method(), &reqwest::Method::POST);
|
||||
assert_eq!(request.url().path(), "/v1/table/ns1$ns2$cloned_table/clone");
|
||||
|
||||
let body = request.body().unwrap().as_bytes().unwrap();
|
||||
let body: serde_json::Value = serde_json::from_slice(body).unwrap();
|
||||
assert_eq!(body["source_location"], "s3://bucket/source_table");
|
||||
assert_eq!(body["is_shallow"], true);
|
||||
|
||||
http::Response::builder().status(200).body("").unwrap()
|
||||
});
|
||||
|
||||
let table = conn
|
||||
.clone_table("cloned_table", "s3://bucket/source_table")
|
||||
.target_namespace(vec!["ns1".to_string(), "ns2".to_string()])
|
||||
.execute()
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(table.name(), "cloned_table");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_clone_table_error() {
|
||||
let conn = Connection::new_with_handler(|_| {
|
||||
http::Response::builder()
|
||||
.status(500)
|
||||
.body("Internal server error")
|
||||
.unwrap()
|
||||
});
|
||||
|
||||
let result = conn
|
||||
.clone_table("cloned_table", "s3://bucket/source_table")
|
||||
.execute()
|
||||
.await;
|
||||
|
||||
assert!(result.is_err());
|
||||
if let Err(crate::Error::Http { source, .. }) = result {
|
||||
assert!(source.to_string().contains("Failed to clone table"));
|
||||
} else {
|
||||
panic!("Expected HTTP error");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -242,15 +242,17 @@ pub struct OptimizeStats {
|
||||
/// Describes what happens when a vector either contains NaN or
|
||||
/// does not have enough values
|
||||
#[derive(Clone, Debug, Default)]
|
||||
#[allow(dead_code)] // https://github.com/lancedb/lancedb/issues/992
|
||||
enum BadVectorHandling {
|
||||
/// An error is returned
|
||||
#[default]
|
||||
Error,
|
||||
#[allow(dead_code)] // https://github.com/lancedb/lancedb/issues/992
|
||||
/// The offending row is droppped
|
||||
Drop,
|
||||
#[allow(dead_code)] // https://github.com/lancedb/lancedb/issues/992
|
||||
/// The invalid/missing items are replaced by fill_value
|
||||
Fill(f32),
|
||||
#[allow(dead_code)] // https://github.com/lancedb/lancedb/issues/992
|
||||
/// The invalid items are replaced by NULL
|
||||
None,
|
||||
}
|
||||
@@ -1976,8 +1978,6 @@ impl NativeTable {
|
||||
/// Delete keys from the config
|
||||
pub async fn delete_config_keys(&self, delete_keys: &[&str]) -> Result<()> {
|
||||
let mut dataset = self.dataset.get_mut().await?;
|
||||
// TODO: update this when we implement metadata APIs
|
||||
#[allow(deprecated)]
|
||||
dataset.delete_config_keys(delete_keys).await?;
|
||||
Ok(())
|
||||
}
|
||||
@@ -1988,8 +1988,6 @@ impl NativeTable {
|
||||
upsert_values: impl IntoIterator<Item = (String, String)>,
|
||||
) -> Result<()> {
|
||||
let mut dataset = self.dataset.get_mut().await?;
|
||||
// TODO: update this when we implement metadata APIs
|
||||
#[allow(deprecated)]
|
||||
dataset.replace_schema_metadata(upsert_values).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -121,10 +121,6 @@ impl ExecutionPlan for MetadataEraserExec {
|
||||
as SendableRecordBatchStream,
|
||||
)
|
||||
}
|
||||
|
||||
fn partition_statistics(&self, partition: Option<usize>) -> DataFusionResult<Statistics> {
|
||||
self.input.partition_statistics(partition)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
@@ -231,7 +227,6 @@ pub mod tests {
|
||||
prelude::{SessionConfig, SessionContext},
|
||||
};
|
||||
use datafusion_catalog::TableProvider;
|
||||
use datafusion_common::stats::Precision;
|
||||
use datafusion_execution::SendableRecordBatchStream;
|
||||
use datafusion_expr::{col, lit, LogicalPlan, LogicalPlanBuilder};
|
||||
use futures::{StreamExt, TryStreamExt};
|
||||
@@ -500,7 +495,6 @@ pub mod tests {
|
||||
plan,
|
||||
"MetadataEraserExec
|
||||
ProjectionExec:...
|
||||
CooperativeExec...
|
||||
LanceRead:...",
|
||||
)
|
||||
.await;
|
||||
@@ -515,24 +509,4 @@ pub mod tests {
|
||||
|
||||
TestFixture::check_plan(plan, "").await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_metadata_eraser_propagates_statistics() {
|
||||
let fixture = TestFixture::new().await;
|
||||
|
||||
let plan =
|
||||
LogicalPlanBuilder::scan("foo", provider_as_source(fixture.adapter.clone()), None)
|
||||
.unwrap()
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
let ctx = SessionContext::new();
|
||||
let physical_plan = ctx.state().create_physical_plan(&plan).await.unwrap();
|
||||
|
||||
assert_eq!(physical_plan.name(), "MetadataEraserExec");
|
||||
|
||||
let partition_stats = physical_plan.partition_statistics(None).unwrap();
|
||||
|
||||
assert!(matches!(partition_stats.num_rows, Precision::Exact(10)));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,8 +20,6 @@ use datafusion_physical_plan::SendableRecordBatchStream;
|
||||
|
||||
lazy_static! {
|
||||
static ref TABLE_NAME_REGEX: regex::Regex = regex::Regex::new(r"^[a-zA-Z0-9_\-\.]+$").unwrap();
|
||||
static ref NAMESPACE_NAME_REGEX: regex::Regex =
|
||||
regex::Regex::new(r"^[a-zA-Z0-9_\-\.]+$").unwrap();
|
||||
}
|
||||
|
||||
pub trait PatchStoreParam {
|
||||
@@ -100,53 +98,6 @@ pub fn validate_table_name(name: &str) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Validate a namespace name component
|
||||
///
|
||||
/// Namespace names must:
|
||||
/// - Not be empty
|
||||
/// - Only contain alphanumeric characters, underscores, hyphens, and periods
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `name` - A single namespace component (not the full path)
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Ok(())` if the namespace name is valid
|
||||
/// * `Err(Error)` if the namespace name is invalid
|
||||
pub fn validate_namespace_name(name: &str) -> Result<()> {
|
||||
if name.is_empty() {
|
||||
return Err(Error::InvalidInput {
|
||||
message: "Namespace names cannot be empty strings".to_string(),
|
||||
});
|
||||
}
|
||||
if !NAMESPACE_NAME_REGEX.is_match(name) {
|
||||
return Err(Error::InvalidInput {
|
||||
message: format!(
|
||||
"Invalid namespace name '{}': Namespace names can only contain alphanumeric characters, underscores, hyphens, and periods",
|
||||
name
|
||||
),
|
||||
});
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Validate all components of a namespace
|
||||
///
|
||||
/// Iterates through all namespace components and validates each one.
|
||||
/// Returns an error if any component is invalid.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `namespace` - The namespace components to validate
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Ok(())` if all namespace components are valid
|
||||
/// * `Err(Error)` if any component is invalid
|
||||
pub fn validate_namespace(namespace: &[String]) -> Result<()> {
|
||||
for component in namespace {
|
||||
validate_namespace_name(component)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Find one default column to create index or perform vector query.
|
||||
pub(crate) fn default_vector_column(schema: &Schema, dim: Option<i32>) -> Result<String> {
|
||||
// Try to find a vector column.
|
||||
@@ -394,61 +345,6 @@ mod tests {
|
||||
assert!(validate_table_name("name with space").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_validate_namespace_name() {
|
||||
// Valid namespace names
|
||||
assert!(validate_namespace_name("ns1").is_ok());
|
||||
assert!(validate_namespace_name("namespace_123").is_ok());
|
||||
assert!(validate_namespace_name("my-namespace").is_ok());
|
||||
assert!(validate_namespace_name("my.namespace").is_ok());
|
||||
assert!(validate_namespace_name("NS_1.2.3").is_ok());
|
||||
assert!(validate_namespace_name("a").is_ok());
|
||||
assert!(validate_namespace_name("123").is_ok());
|
||||
assert!(validate_namespace_name("_underscore").is_ok());
|
||||
assert!(validate_namespace_name("-hyphen").is_ok());
|
||||
assert!(validate_namespace_name(".period").is_ok());
|
||||
|
||||
// Invalid namespace names
|
||||
assert!(validate_namespace_name("").is_err());
|
||||
assert!(validate_namespace_name("namespace with spaces").is_err());
|
||||
assert!(validate_namespace_name("namespace/with/slashes").is_err());
|
||||
assert!(validate_namespace_name("namespace\\with\\backslashes").is_err());
|
||||
assert!(validate_namespace_name("namespace$with$delimiter").is_err());
|
||||
assert!(validate_namespace_name("namespace@special").is_err());
|
||||
assert!(validate_namespace_name("namespace#hash").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_validate_namespace() {
|
||||
// Valid namespace with single component
|
||||
assert!(validate_namespace(&["ns1".to_string()]).is_ok());
|
||||
|
||||
// Valid namespace with multiple components
|
||||
assert!(
|
||||
validate_namespace(&["ns1".to_string(), "ns2".to_string(), "ns3".to_string()]).is_ok()
|
||||
);
|
||||
|
||||
// Empty namespace (root) is valid
|
||||
assert!(validate_namespace(&[]).is_ok());
|
||||
|
||||
// Invalid: contains empty component
|
||||
assert!(validate_namespace(&["ns1".to_string(), "".to_string()]).is_err());
|
||||
|
||||
// Invalid: contains component with spaces
|
||||
assert!(validate_namespace(&["ns1".to_string(), "ns 2".to_string()]).is_err());
|
||||
|
||||
// Invalid: contains component with special characters
|
||||
assert!(validate_namespace(&["ns1".to_string(), "ns@2".to_string()]).is_err());
|
||||
assert!(validate_namespace(&["ns1".to_string(), "ns/2".to_string()]).is_err());
|
||||
assert!(validate_namespace(&["ns1".to_string(), "ns$2".to_string()]).is_err());
|
||||
|
||||
// Valid: underscores, hyphens, and periods are allowed
|
||||
assert!(
|
||||
validate_namespace(&["ns_1".to_string(), "ns-2".to_string(), "ns.3".to_string()])
|
||||
.is_ok()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_string_to_datatype() {
|
||||
let string = "int32";
|
||||
|
||||
@@ -341,10 +341,10 @@ impl EmbeddingFunction for MockEmbed {
|
||||
fn name(&self) -> &str {
|
||||
&self.name
|
||||
}
|
||||
fn source_type(&self) -> Result<Cow<'_, DataType>> {
|
||||
fn source_type(&self) -> Result<Cow<DataType>> {
|
||||
Ok(Cow::Borrowed(&self.source_type))
|
||||
}
|
||||
fn dest_type(&self) -> Result<Cow<'_, DataType>> {
|
||||
fn dest_type(&self) -> Result<Cow<DataType>> {
|
||||
Ok(Cow::Borrowed(&self.dest_type))
|
||||
}
|
||||
fn compute_source_embeddings(&self, source: Arc<dyn Array>) -> Result<Arc<dyn Array>> {
|
||||
|
||||
Reference in New Issue
Block a user