Compare commits

..

1 Commits

Author SHA1 Message Date
Lance Release
d8791ab804 Bump version: 0.20.0 → 0.20.1-beta.0 2025-06-16 16:29:43 +00:00
44 changed files with 700 additions and 928 deletions

View File

@@ -1,5 +1,5 @@
[tool.bumpversion] [tool.bumpversion]
current_version = "0.21.1-beta.0" current_version = "0.20.1-beta.0"
parse = """(?x) parse = """(?x)
(?P<major>0|[1-9]\\d*)\\. (?P<major>0|[1-9]\\d*)\\.
(?P<minor>0|[1-9]\\d*)\\. (?P<minor>0|[1-9]\\d*)\\.

View File

@@ -541,18 +541,10 @@ jobs:
run: npm deprecate vectordb "Use @lancedb/lancedb instead." run: npm deprecate vectordb "Use @lancedb/lancedb instead."
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v4
with:
ref: main
- name: Update package-lock.json - name: Update package-lock.json
run: | run: bash ci/update_lockfiles.sh
git config user.name 'Lance Release'
git config user.email 'lance-dev@lancedb.com'
bash ci/update_lockfiles.sh
- name: Push new commit - name: Push new commit
uses: ad-m/github-push-action@master uses: ad-m/github-push-action@master
with:
github_token: ${{ secrets.LANCEDB_RELEASE_TOKEN }}
branch: main
- name: Notify Slack Action - name: Notify Slack Action
uses: ravsamhq/notify-slack-action@2.3.0 uses: ravsamhq/notify-slack-action@2.3.0
if: ${{ always() }} if: ${{ always() }}

888
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -21,14 +21,14 @@ categories = ["database-implementations"]
rust-version = "1.78.0" rust-version = "1.78.0"
[workspace.dependencies] [workspace.dependencies]
lance = { "version" = "=0.31.1", tag="v0.31.1-beta.2", git="https://github.com/lancedb/lance.git", features = ["dynamodb"] } lance = { "version" = "=0.29.1", "features" = ["dynamodb"], tag = "v0.29.1-beta.1", git="https://github.com/lancedb/lance.git" }
lance-io = { "version" = "=0.31.1", tag="v0.31.1-beta.2", git="https://github.com/lancedb/lance.git" } lance-io = { version = "=0.29.1", tag = "v0.29.1-beta.1", git="https://github.com/lancedb/lance.git" }
lance-index = { "version" = "=0.31.1", tag="v0.31.1-beta.2", git="https://github.com/lancedb/lance.git" } lance-index = { version = "=0.29.1", tag = "v0.29.1-beta.1", git="https://github.com/lancedb/lance.git" }
lance-linalg = { "version" = "=0.31.1", tag="v0.31.1-beta.2", git="https://github.com/lancedb/lance.git" } lance-linalg = { version = "=0.29.1", tag = "v0.29.1-beta.1", git="https://github.com/lancedb/lance.git" }
lance-table = { "version" = "=0.31.1", tag="v0.31.1-beta.2", git="https://github.com/lancedb/lance.git" } lance-table = { version = "=0.29.1", tag = "v0.29.1-beta.1", git="https://github.com/lancedb/lance.git" }
lance-testing = { "version" = "=0.31.1", tag="v0.31.1-beta.2", git="https://github.com/lancedb/lance.git" } lance-testing = { version = "=0.29.1", tag = "v0.29.1-beta.1", git="https://github.com/lancedb/lance.git" }
lance-datafusion = { "version" = "=0.31.1", tag="v0.31.1-beta.2", git="https://github.com/lancedb/lance.git" } lance-datafusion = { version = "=0.29.1", tag = "v0.29.1-beta.1", git="https://github.com/lancedb/lance.git" }
lance-encoding = { "version" = "=0.31.1", tag="v0.31.1-beta.2", git="https://github.com/lancedb/lance.git" } lance-encoding = { version = "=0.29.1", tag = "v0.29.1-beta.1", git="https://github.com/lancedb/lance.git" }
# Note that this one does not include pyarrow # Note that this one does not include pyarrow
arrow = { version = "55.1", optional = false } arrow = { version = "55.1", optional = false }
arrow-array = "55.1" arrow-array = "55.1"
@@ -39,20 +39,20 @@ arrow-schema = "55.1"
arrow-arith = "55.1" arrow-arith = "55.1"
arrow-cast = "55.1" arrow-cast = "55.1"
async-trait = "0" async-trait = "0"
datafusion = { version = "48.0", default-features = false } datafusion = { version = "47.0", default-features = false }
datafusion-catalog = "48.0" datafusion-catalog = "47.0"
datafusion-common = { version = "48.0", default-features = false } datafusion-common = { version = "47.0", default-features = false }
datafusion-execution = "48.0" datafusion-execution = "47.0"
datafusion-expr = "48.0" datafusion-expr = "47.0"
datafusion-physical-plan = "48.0" datafusion-physical-plan = "47.0"
env_logger = "0.11" env_logger = "0.11"
half = { "version" = "2.6.0", default-features = false, features = [ half = { "version" = "=2.5.0", default-features = false, features = [
"num-traits", "num-traits",
] } ] }
futures = "0" futures = "0"
log = "0.4" log = "0.4"
moka = { version = "0.12", features = ["future"] } moka = { version = "0.12", features = ["future"] }
object_store = "0.12.0" object_store = "0.11.0"
pin-project = "1.0.7" pin-project = "1.0.7"
snafu = "0.8" snafu = "0.8"
url = "2" url = "2"

View File

@@ -428,7 +428,7 @@
"\n", "\n",
"**Why?** \n", "**Why?** \n",
"Embedding the UFO dataset and ingesting it into LanceDB takes **~2 hours on a T4 GPU**. To save time: \n", "Embedding the UFO dataset and ingesting it into LanceDB takes **~2 hours on a T4 GPU**. To save time: \n",
"- **Use the pre-prepared table with index created** (provided below) to proceed directly to **Step 7**: search. \n", "- **Use the pre-prepared table with index created ** (provided below) to proceed directly to step7: search. \n",
"- **Step 5a** contains the full ingestion code for reference (run it only if necessary). \n", "- **Step 5a** contains the full ingestion code for reference (run it only if necessary). \n",
"- **Step 6** contains the details on creating the index on the multivector column" "- **Step 6** contains the details on creating the index on the multivector column"
] ]

View File

@@ -8,7 +8,7 @@
<parent> <parent>
<groupId>com.lancedb</groupId> <groupId>com.lancedb</groupId>
<artifactId>lancedb-parent</artifactId> <artifactId>lancedb-parent</artifactId>
<version>0.21.1-beta.0</version> <version>0.20.1-beta.0</version>
<relativePath>../pom.xml</relativePath> <relativePath>../pom.xml</relativePath>
</parent> </parent>

View File

@@ -6,7 +6,7 @@
<groupId>com.lancedb</groupId> <groupId>com.lancedb</groupId>
<artifactId>lancedb-parent</artifactId> <artifactId>lancedb-parent</artifactId>
<version>0.21.1-beta.0</version> <version>0.20.1-beta.0</version>
<packaging>pom</packaging> <packaging>pom</packaging>
<name>LanceDB Parent</name> <name>LanceDB Parent</name>

49
node/package-lock.json generated
View File

@@ -1,12 +1,12 @@
{ {
"name": "vectordb", "name": "vectordb",
"version": "0.21.1-beta.0", "version": "0.20.0",
"lockfileVersion": 3, "lockfileVersion": 3,
"requires": true, "requires": true,
"packages": { "packages": {
"": { "": {
"name": "vectordb", "name": "vectordb",
"version": "0.21.1-beta.0", "version": "0.20.0",
"cpu": [ "cpu": [
"x64", "x64",
"arm64" "arm64"
@@ -52,11 +52,11 @@
"uuid": "^9.0.0" "uuid": "^9.0.0"
}, },
"optionalDependencies": { "optionalDependencies": {
"@lancedb/vectordb-darwin-arm64": "0.21.1-beta.0", "@lancedb/vectordb-darwin-arm64": "0.20.0",
"@lancedb/vectordb-darwin-x64": "0.21.1-beta.0", "@lancedb/vectordb-darwin-x64": "0.20.0",
"@lancedb/vectordb-linux-arm64-gnu": "0.21.1-beta.0", "@lancedb/vectordb-linux-arm64-gnu": "0.20.0",
"@lancedb/vectordb-linux-x64-gnu": "0.21.1-beta.0", "@lancedb/vectordb-linux-x64-gnu": "0.20.0",
"@lancedb/vectordb-win32-x64-msvc": "0.21.1-beta.0" "@lancedb/vectordb-win32-x64-msvc": "0.20.0"
}, },
"peerDependencies": { "peerDependencies": {
"@apache-arrow/ts": "^14.0.2", "@apache-arrow/ts": "^14.0.2",
@@ -327,65 +327,60 @@
} }
}, },
"node_modules/@lancedb/vectordb-darwin-arm64": { "node_modules/@lancedb/vectordb-darwin-arm64": {
"version": "0.21.1-beta.0", "version": "0.20.0",
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-arm64/-/vectordb-darwin-arm64-0.21.1-beta.0.tgz", "resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-arm64/-/vectordb-darwin-arm64-0.20.0.tgz",
"integrity": "sha512-easypFtN4rFFsSNumFLK/VEhD2BVp+jl6ysICGyutjD/UEiulVdhixBkK5miJOfu/1p67Rjit5C8u3acpX+k2g==", "integrity": "sha512-PEL4vFY42PaWPPnOfOcFBv1E+zumhZPMlQW7/M00ZA8O2uKiTc1xhajhaPcwVDZBYo36SRSIxUz2eYjXWA9sIw==",
"cpu": [ "cpu": [
"arm64" "arm64"
], ],
"license": "Apache-2.0",
"optional": true, "optional": true,
"os": [ "os": [
"darwin" "darwin"
] ]
}, },
"node_modules/@lancedb/vectordb-darwin-x64": { "node_modules/@lancedb/vectordb-darwin-x64": {
"version": "0.21.1-beta.0", "version": "0.20.0",
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-x64/-/vectordb-darwin-x64-0.21.1-beta.0.tgz", "resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-x64/-/vectordb-darwin-x64-0.20.0.tgz",
"integrity": "sha512-ez//lKtXu7EWgZlUYgwBM2We4/ty8rOtkDMF3RlveWJAKn+zNX0UM3vTa9W7WbCcBn9Ycs3eQGrBvb0iYFIDgw==", "integrity": "sha512-4A1f9DiyGhziN9P81jSmMgzXSc1XXM9bIJw5q/b2NmDoiqIr8tYv1FKdm0JDhMYjtnzBeNpc67gVy3GlGCuUWA==",
"cpu": [ "cpu": [
"x64" "x64"
], ],
"license": "Apache-2.0",
"optional": true, "optional": true,
"os": [ "os": [
"darwin" "darwin"
] ]
}, },
"node_modules/@lancedb/vectordb-linux-arm64-gnu": { "node_modules/@lancedb/vectordb-linux-arm64-gnu": {
"version": "0.21.1-beta.0", "version": "0.20.0",
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-arm64-gnu/-/vectordb-linux-arm64-gnu-0.21.1-beta.0.tgz", "resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-arm64-gnu/-/vectordb-linux-arm64-gnu-0.20.0.tgz",
"integrity": "sha512-T+vfr3A/59V8JMB5vonUmFDE8Vcf7Qe+DhQMf6kUlQxx80TujMeTdkaOf9/zBAopN2T8Y2h+GNScjl/WomYOFg==", "integrity": "sha512-A3teZC/zU0tccluIJZsTasP8vBQWhXsmvLOo9UopSeyCrA1sR2vEyvXV9hMRJo7+9QjOrYFLiFWPjXEdFb+/1Q==",
"cpu": [ "cpu": [
"arm64" "arm64"
], ],
"license": "Apache-2.0",
"optional": true, "optional": true,
"os": [ "os": [
"linux" "linux"
] ]
}, },
"node_modules/@lancedb/vectordb-linux-x64-gnu": { "node_modules/@lancedb/vectordb-linux-x64-gnu": {
"version": "0.21.1-beta.0", "version": "0.20.0",
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-x64-gnu/-/vectordb-linux-x64-gnu-0.21.1-beta.0.tgz", "resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-x64-gnu/-/vectordb-linux-x64-gnu-0.20.0.tgz",
"integrity": "sha512-FpDd4g2+xGrU41gywx4KFPGOlpBZq3VrE+4BBiTrRW6IO5Kbs2Mmq7ufJuDLlLqPs6ZQ5/Wlbcq5PmdRSoeq8A==", "integrity": "sha512-uREL9YF5iaeyfYh+5uvkSLQquFXYQoJyuDMPMZTwOE/Zghgw3lRl6KHIoMVCOfw+S8tkeyzU8UR4zgrbymbPGg==",
"cpu": [ "cpu": [
"x64" "x64"
], ],
"license": "Apache-2.0",
"optional": true, "optional": true,
"os": [ "os": [
"linux" "linux"
] ]
}, },
"node_modules/@lancedb/vectordb-win32-x64-msvc": { "node_modules/@lancedb/vectordb-win32-x64-msvc": {
"version": "0.21.1-beta.0", "version": "0.20.0",
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-win32-x64-msvc/-/vectordb-win32-x64-msvc-0.21.1-beta.0.tgz", "resolved": "https://registry.npmjs.org/@lancedb/vectordb-win32-x64-msvc/-/vectordb-win32-x64-msvc-0.20.0.tgz",
"integrity": "sha512-SEKHecFpgODmrUsAE8pBLu8OMKnAx97Ap0FrH6AGGglJKAVirrrg9BKSPfmHMZCvyPSHzG5TUMxhtNm+Ibg5DQ==", "integrity": "sha512-0G5FD8X9S70hH4QK4S2m7TrWCIlVr4vox4Rjhfqdxk/5QWwYVT6WltvPgTJlektI7sUWeioDNmluHzqLZKDlHQ==",
"cpu": [ "cpu": [
"x64" "x64"
], ],
"license": "Apache-2.0",
"optional": true, "optional": true,
"os": [ "os": [
"win32" "win32"

View File

@@ -1,6 +1,6 @@
{ {
"name": "vectordb", "name": "vectordb",
"version": "0.21.1-beta.0", "version": "0.20.1-beta.0",
"description": " Serverless, low-latency vector database for AI applications", "description": " Serverless, low-latency vector database for AI applications",
"private": false, "private": false,
"main": "dist/index.js", "main": "dist/index.js",
@@ -89,10 +89,10 @@
} }
}, },
"optionalDependencies": { "optionalDependencies": {
"@lancedb/vectordb-darwin-x64": "0.21.1-beta.0", "@lancedb/vectordb-darwin-x64": "0.20.1-beta.0",
"@lancedb/vectordb-darwin-arm64": "0.21.1-beta.0", "@lancedb/vectordb-darwin-arm64": "0.20.1-beta.0",
"@lancedb/vectordb-linux-x64-gnu": "0.21.1-beta.0", "@lancedb/vectordb-linux-x64-gnu": "0.20.1-beta.0",
"@lancedb/vectordb-linux-arm64-gnu": "0.21.1-beta.0", "@lancedb/vectordb-linux-arm64-gnu": "0.20.1-beta.0",
"@lancedb/vectordb-win32-x64-msvc": "0.21.1-beta.0" "@lancedb/vectordb-win32-x64-msvc": "0.20.1-beta.0"
} }
} }

View File

@@ -1,7 +1,7 @@
[package] [package]
name = "lancedb-nodejs" name = "lancedb-nodejs"
edition.workspace = true edition.workspace = true
version = "0.21.1-beta.0" version = "0.20.1-beta.0"
license.workspace = true license.workspace = true
description.workspace = true description.workspace = true
repository.workspace = true repository.workspace = true

View File

@@ -592,14 +592,14 @@ describe.each([arrow15, arrow16, arrow17, arrow18])(
).rejects.toThrow("column vector was missing"); ).rejects.toThrow("column vector was missing");
}); });
it("will skip embedding application if already applied", async function () { it("will provide a nice error if run twice", async function () {
const records = sampleRecords(); const records = sampleRecords();
const table = await convertToTable(records, dummyEmbeddingConfig); const table = await convertToTable(records, dummyEmbeddingConfig);
// fromTableToBuffer will try and apply the embeddings again // fromTableToBuffer will try and apply the embeddings again
// but should skip since the column already has non-null values await expect(
const result = await fromTableToBuffer(table, dummyEmbeddingConfig); fromTableToBuffer(table, dummyEmbeddingConfig),
expect(result.byteLength).toBeGreaterThan(0); ).rejects.toThrow("already existed");
}); });
}); });

View File

@@ -368,9 +368,9 @@ describe("merge insert", () => {
{ a: 4, b: "z" }, { a: 4, b: "z" },
]; ];
const result = (await table.toArrow()).toArray().sort((a, b) => a.a - b.a); expect(
JSON.parse(JSON.stringify((await table.toArrow()).toArray())),
expect(result.map((row) => ({ ...row }))).toEqual(expected); ).toEqual(expected);
}); });
test("conditional update", async () => { test("conditional update", async () => {
const newData = [ const newData = [
@@ -1650,25 +1650,13 @@ describe.each([arrow15, arrow16, arrow17, arrow18])(
expect(resultSet.has("fob")).toBe(true); expect(resultSet.has("fob")).toBe(true);
expect(resultSet.has("fo")).toBe(true); expect(resultSet.has("fo")).toBe(true);
expect(resultSet.has("food")).toBe(true); expect(resultSet.has("food")).toBe(true);
const prefixResults = await table
.search(
new MatchQuery("foo", "text", { fuzziness: 3, prefixLength: 3 }),
)
.toArray();
expect(prefixResults.length).toBe(2);
const resultSet2 = new Set(prefixResults.map((r) => r.text));
expect(resultSet2.has("foo")).toBe(true);
expect(resultSet2.has("food")).toBe(true);
}); });
test("full text search boolean query", async () => { test("full text search boolean query", async () => {
const db = await connect(tmpDir.name); const db = await connect(tmpDir.name);
const data = [ const data = [
{ text: "The cat and dog are playing" }, { text: "hello world", vector: [0.1, 0.2, 0.3] },
{ text: "The cat is sleeping" }, { text: "goodbye world", vector: [0.4, 0.5, 0.6] },
{ text: "The dog is barking" },
{ text: "The dog chases the cat" },
]; ];
const table = await db.createTable("test", data); const table = await db.createTable("test", data);
await table.createIndex("text", { await table.createIndex("text", {
@@ -1678,32 +1666,22 @@ describe.each([arrow15, arrow16, arrow17, arrow18])(
const shouldResults = await table const shouldResults = await table
.search( .search(
new BooleanQuery([ new BooleanQuery([
[Occur.Should, new MatchQuery("cat", "text")], [Occur.Should, new MatchQuery("hello", "text")],
[Occur.Should, new MatchQuery("dog", "text")], [Occur.Should, new MatchQuery("goodbye", "text")],
]), ]),
) )
.toArray(); .toArray();
expect(shouldResults.length).toBe(4); expect(shouldResults.length).toBe(2);
const mustResults = await table const mustResults = await table
.search( .search(
new BooleanQuery([ new BooleanQuery([
[Occur.Must, new MatchQuery("cat", "text")], [Occur.Must, new MatchQuery("hello", "text")],
[Occur.Must, new MatchQuery("dog", "text")], [Occur.Must, new MatchQuery("world", "text")],
]), ]),
) )
.toArray(); .toArray();
expect(mustResults.length).toBe(2); expect(mustResults.length).toBe(1);
const mustNotResults = await table
.search(
new BooleanQuery([
[Occur.Must, new MatchQuery("cat", "text")],
[Occur.MustNot, new MatchQuery("dog", "text")],
]),
)
.toArray();
expect(mustNotResults.length).toBe(1);
}); });
test.each([ test.each([

View File

@@ -417,9 +417,7 @@ function inferSchema(
} else { } else {
const inferredType = inferType(value, path, opts); const inferredType = inferType(value, path, opts);
if (inferredType === undefined) { if (inferredType === undefined) {
throw new Error(`Failed to infer data type for field ${path.join( throw new Error(`Failed to infer data type for field ${path.join(".")} at row ${rowI}. \
".",
)} at row ${rowI}. \
Consider providing an explicit schema.`); Consider providing an explicit schema.`);
} }
pathTree.set(path, inferredType); pathTree.set(path, inferredType);
@@ -801,17 +799,11 @@ async function applyEmbeddingsFromMetadata(
`Cannot apply embedding function because the source column '${functionEntry.sourceColumn}' was not present in the data`, `Cannot apply embedding function because the source column '${functionEntry.sourceColumn}' was not present in the data`,
); );
} }
// Check if destination column exists and handle accordingly
if (columns[destColumn] !== undefined) { if (columns[destColumn] !== undefined) {
const existingColumn = columns[destColumn]; throw new Error(
// If the column exists but is all null, we can fill it with embeddings `Attempt to apply embeddings to table failed because column ${destColumn} already existed`,
if (existingColumn.nullCount !== existingColumn.length) { );
// Column has non-null values, skip embedding application
continue;
}
} }
if (table.batches.length > 1) { if (table.batches.length > 1) {
throw new Error( throw new Error(
"Internal error: `makeArrowTable` unexpectedly created a table with more than one batch", "Internal error: `makeArrowTable` unexpectedly created a table with more than one batch",
@@ -911,23 +903,11 @@ async function applyEmbeddings<T>(
); );
} }
} else { } else {
// Check if destination column exists and handle accordingly
if (Object.prototype.hasOwnProperty.call(newColumns, destColumn)) { if (Object.prototype.hasOwnProperty.call(newColumns, destColumn)) {
const existingColumn = newColumns[destColumn]; throw new Error(
// If the column exists but is all null, we can fill it with embeddings `Attempt to apply embeddings to table failed because column ${destColumn} already existed`,
if (existingColumn.nullCount !== existingColumn.length) { );
// Column has non-null values, skip embedding application and return table as-is
let newTable = new ArrowTable(newColumns);
if (schema != null) {
newTable = alignTable(newTable, schema as Schema);
}
return new ArrowTable(
new Schema(newTable.schema.fields, schemaMetadata),
newTable.batches,
);
}
} }
if (table.batches.length > 1) { if (table.batches.length > 1) {
throw new Error( throw new Error(
"Internal error: `makeArrowTable` unexpectedly created a table with more than one batch", "Internal error: `makeArrowTable` unexpectedly created a table with more than one batch",

View File

@@ -812,12 +812,10 @@ export enum Operator {
* *
* - `Must`: The term must be present in the document. * - `Must`: The term must be present in the document.
* - `Should`: The term should contribute to the document score, but is not required. * - `Should`: The term should contribute to the document score, but is not required.
* - `MustNot`: The term must not be present in the document.
*/ */
export enum Occur { export enum Occur {
Should = "SHOULD",
Must = "MUST", Must = "MUST",
MustNot = "MUST_NOT", Should = "SHOULD",
} }
/** /**
@@ -858,7 +856,6 @@ export class MatchQuery implements FullTextQuery {
* - `fuzziness`: The fuzziness level for the query (default is 0). * - `fuzziness`: The fuzziness level for the query (default is 0).
* - `maxExpansions`: The maximum number of terms to consider for fuzzy matching (default is 50). * - `maxExpansions`: The maximum number of terms to consider for fuzzy matching (default is 50).
* - `operator`: The logical operator to use for combining terms in the query (default is "OR"). * - `operator`: The logical operator to use for combining terms in the query (default is "OR").
* - `prefixLength`: The number of beginning characters being unchanged for fuzzy matching.
*/ */
constructor( constructor(
query: string, query: string,
@@ -868,7 +865,6 @@ export class MatchQuery implements FullTextQuery {
fuzziness?: number; fuzziness?: number;
maxExpansions?: number; maxExpansions?: number;
operator?: Operator; operator?: Operator;
prefixLength?: number;
}, },
) { ) {
let fuzziness = options?.fuzziness; let fuzziness = options?.fuzziness;
@@ -882,7 +878,6 @@ export class MatchQuery implements FullTextQuery {
fuzziness, fuzziness,
options?.maxExpansions ?? 50, options?.maxExpansions ?? 50,
options?.operator ?? Operator.Or, options?.operator ?? Operator.Or,
options?.prefixLength ?? 0,
); );
} }

View File

@@ -1,6 +1,6 @@
{ {
"name": "@lancedb/lancedb-darwin-arm64", "name": "@lancedb/lancedb-darwin-arm64",
"version": "0.21.1-beta.0", "version": "0.20.1-beta.0",
"os": ["darwin"], "os": ["darwin"],
"cpu": ["arm64"], "cpu": ["arm64"],
"main": "lancedb.darwin-arm64.node", "main": "lancedb.darwin-arm64.node",

View File

@@ -1,6 +1,6 @@
{ {
"name": "@lancedb/lancedb-darwin-x64", "name": "@lancedb/lancedb-darwin-x64",
"version": "0.21.1-beta.0", "version": "0.20.1-beta.0",
"os": ["darwin"], "os": ["darwin"],
"cpu": ["x64"], "cpu": ["x64"],
"main": "lancedb.darwin-x64.node", "main": "lancedb.darwin-x64.node",

View File

@@ -1,6 +1,6 @@
{ {
"name": "@lancedb/lancedb-linux-arm64-gnu", "name": "@lancedb/lancedb-linux-arm64-gnu",
"version": "0.21.1-beta.0", "version": "0.20.1-beta.0",
"os": ["linux"], "os": ["linux"],
"cpu": ["arm64"], "cpu": ["arm64"],
"main": "lancedb.linux-arm64-gnu.node", "main": "lancedb.linux-arm64-gnu.node",

View File

@@ -1,6 +1,6 @@
{ {
"name": "@lancedb/lancedb-linux-arm64-musl", "name": "@lancedb/lancedb-linux-arm64-musl",
"version": "0.21.1-beta.0", "version": "0.20.1-beta.0",
"os": ["linux"], "os": ["linux"],
"cpu": ["arm64"], "cpu": ["arm64"],
"main": "lancedb.linux-arm64-musl.node", "main": "lancedb.linux-arm64-musl.node",

View File

@@ -1,6 +1,6 @@
{ {
"name": "@lancedb/lancedb-linux-x64-gnu", "name": "@lancedb/lancedb-linux-x64-gnu",
"version": "0.21.1-beta.0", "version": "0.20.1-beta.0",
"os": ["linux"], "os": ["linux"],
"cpu": ["x64"], "cpu": ["x64"],
"main": "lancedb.linux-x64-gnu.node", "main": "lancedb.linux-x64-gnu.node",

View File

@@ -1,6 +1,6 @@
{ {
"name": "@lancedb/lancedb-linux-x64-musl", "name": "@lancedb/lancedb-linux-x64-musl",
"version": "0.21.1-beta.0", "version": "0.20.1-beta.0",
"os": ["linux"], "os": ["linux"],
"cpu": ["x64"], "cpu": ["x64"],
"main": "lancedb.linux-x64-musl.node", "main": "lancedb.linux-x64-musl.node",

View File

@@ -1,6 +1,6 @@
{ {
"name": "@lancedb/lancedb-win32-arm64-msvc", "name": "@lancedb/lancedb-win32-arm64-msvc",
"version": "0.21.1-beta.0", "version": "0.20.1-beta.0",
"os": [ "os": [
"win32" "win32"
], ],

View File

@@ -1,6 +1,6 @@
{ {
"name": "@lancedb/lancedb-win32-x64-msvc", "name": "@lancedb/lancedb-win32-x64-msvc",
"version": "0.21.1-beta.0", "version": "0.20.1-beta.0",
"os": ["win32"], "os": ["win32"],
"cpu": ["x64"], "cpu": ["x64"],
"main": "lancedb.win32-x64-msvc.node", "main": "lancedb.win32-x64-msvc.node",

View File

@@ -1,12 +1,12 @@
{ {
"name": "@lancedb/lancedb", "name": "@lancedb/lancedb",
"version": "0.21.1-beta.0", "version": "0.20.0",
"lockfileVersion": 3, "lockfileVersion": 3,
"requires": true, "requires": true,
"packages": { "packages": {
"": { "": {
"name": "@lancedb/lancedb", "name": "@lancedb/lancedb",
"version": "0.21.1-beta.0", "version": "0.20.0",
"cpu": [ "cpu": [
"x64", "x64",
"arm64" "arm64"

View File

@@ -11,7 +11,7 @@
"ann" "ann"
], ],
"private": false, "private": false,
"version": "0.21.1-beta.0", "version": "0.20.1-beta.0",
"main": "dist/index.js", "main": "dist/index.js",
"exports": { "exports": {
".": "./dist/index.js", ".": "./dist/index.js",

View File

@@ -335,7 +335,6 @@ impl JsFullTextQuery {
fuzziness: Option<u32>, fuzziness: Option<u32>,
max_expansions: u32, max_expansions: u32,
operator: String, operator: String,
prefix_length: u32,
) -> napi::Result<Self> { ) -> napi::Result<Self> {
Ok(Self { Ok(Self {
inner: MatchQuery::new(query) inner: MatchQuery::new(query)
@@ -348,7 +347,6 @@ impl JsFullTextQuery {
napi::Error::from_reason(format!("Invalid operator: {}", e)) napi::Error::from_reason(format!("Invalid operator: {}", e))
})?, })?,
) )
.with_prefix_length(prefix_length)
.into(), .into(),
}) })
} }

View File

@@ -1,5 +1,5 @@
[tool.bumpversion] [tool.bumpversion]
current_version = "0.24.1-beta.1" current_version = "0.23.1-beta.0"
parse = """(?x) parse = """(?x)
(?P<major>0|[1-9]\\d*)\\. (?P<major>0|[1-9]\\d*)\\.
(?P<minor>0|[1-9]\\d*)\\. (?P<minor>0|[1-9]\\d*)\\.

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "lancedb-python" name = "lancedb-python"
version = "0.24.1-beta.1" version = "0.23.1-beta.0"
edition.workspace = true edition.workspace = true
description = "Python bindings for LanceDB" description = "Python bindings for LanceDB"
license.workspace = true license.workspace = true

View File

@@ -85,7 +85,7 @@ embeddings = [
"boto3>=1.28.57", "boto3>=1.28.57",
"awscli>=1.29.57", "awscli>=1.29.57",
"botocore>=1.31.57", "botocore>=1.31.57",
"ollama>=0.3.0", "ollama",
"ibm-watsonx-ai>=1.1.2", "ibm-watsonx-ai>=1.1.2",
] ]
azure = ["adlfs>=2024.2.0"] azure = ["adlfs>=2024.2.0"]

View File

@@ -2,15 +2,14 @@
# SPDX-FileCopyrightText: Copyright The LanceDB Authors # SPDX-FileCopyrightText: Copyright The LanceDB Authors
from functools import cached_property from functools import cached_property
from typing import TYPE_CHECKING, List, Optional, Sequence, Union from typing import TYPE_CHECKING, List, Optional, Union
import numpy as np
from ..util import attempt_import_or_raise from ..util import attempt_import_or_raise
from .base import TextEmbeddingFunction from .base import TextEmbeddingFunction
from .registry import register from .registry import register
if TYPE_CHECKING: if TYPE_CHECKING:
import numpy as np
import ollama import ollama
@@ -29,21 +28,23 @@ class OllamaEmbeddings(TextEmbeddingFunction):
keep_alive: Optional[Union[float, str]] = None keep_alive: Optional[Union[float, str]] = None
ollama_client_kwargs: Optional[dict] = {} ollama_client_kwargs: Optional[dict] = {}
def ndims(self) -> int: def ndims(self):
return len(self.generate_embeddings(["foo"])[0]) return len(self.generate_embeddings(["foo"])[0])
def _compute_embedding(self, text: Sequence[str]) -> Sequence[Sequence[float]]: def _compute_embedding(self, text) -> Union["np.array", None]:
response = self._ollama_client.embed( return (
model=self.name, self._ollama_client.embeddings(
input=text, model=self.name,
options=self.options, prompt=text,
keep_alive=self.keep_alive, options=self.options,
keep_alive=self.keep_alive,
)["embedding"]
or None
) )
return response.embeddings
def generate_embeddings( def generate_embeddings(
self, texts: Union[List[str], np.ndarray] self, texts: Union[List[str], "np.ndarray"]
) -> list[Union[np.array, None]]: ) -> list[Union["np.array", None]]:
""" """
Get the embeddings for the given texts Get the embeddings for the given texts
@@ -53,8 +54,8 @@ class OllamaEmbeddings(TextEmbeddingFunction):
The texts to embed The texts to embed
""" """
# TODO retry, rate limit, token limit # TODO retry, rate limit, token limit
embeddings = self._compute_embedding(texts) embeddings = [self._compute_embedding(text) for text in texts]
return list(embeddings) return embeddings
@cached_property @cached_property
def _ollama_client(self) -> "ollama.Client": def _ollama_client(self) -> "ollama.Client":

View File

@@ -101,9 +101,8 @@ class FullTextOperator(str, Enum):
class Occur(str, Enum): class Occur(str, Enum):
SHOULD = "SHOULD"
MUST = "MUST" MUST = "MUST"
MUST_NOT = "MUST_NOT" SHOULD = "SHOULD"
@pydantic.dataclasses.dataclass @pydantic.dataclasses.dataclass
@@ -182,9 +181,6 @@ class MatchQuery(FullTextQuery):
Can be either `AND` or `OR`. Can be either `AND` or `OR`.
If `AND`, all terms in the query must match. If `AND`, all terms in the query must match.
If `OR`, at least one term in the query must match. If `OR`, at least one term in the query must match.
prefix_length : int, optional
The number of beginning characters being unchanged for fuzzy matching.
This is useful to achieve prefix matching.
""" """
query: str query: str
@@ -193,7 +189,6 @@ class MatchQuery(FullTextQuery):
fuzziness: int = pydantic.Field(0, kw_only=True) fuzziness: int = pydantic.Field(0, kw_only=True)
max_expansions: int = pydantic.Field(50, kw_only=True) max_expansions: int = pydantic.Field(50, kw_only=True)
operator: FullTextOperator = pydantic.Field(FullTextOperator.OR, kw_only=True) operator: FullTextOperator = pydantic.Field(FullTextOperator.OR, kw_only=True)
prefix_length: int = pydantic.Field(0, kw_only=True)
def query_type(self) -> FullTextQueryType: def query_type(self) -> FullTextQueryType:
return FullTextQueryType.MATCH return FullTextQueryType.MATCH
@@ -1451,13 +1446,10 @@ class LanceFtsQueryBuilder(LanceQueryBuilder):
query = self._query query = self._query
if self._phrase_query: if self._phrase_query:
if isinstance(query, str): raise NotImplementedError(
if not query.startswith('"') or not query.endswith('"'): "Phrase query is not yet supported in Lance FTS. "
query = f'"{query}"' "Use tantivy-based index instead for now."
elif isinstance(query, FullTextQuery) and not isinstance( )
query, PhraseQuery
):
raise TypeError("Please use PhraseQuery for phrase queries.")
query = self.to_query_object() query = self.to_query_object()
results = self._table._execute_query(query, timeout=timeout) results = self._table._execute_query(query, timeout=timeout)
results = results.read_all() results = results.read_all()
@@ -3042,21 +3034,15 @@ class AsyncHybridQuery(AsyncQueryBase, AsyncVectorQueryBase):
>>> asyncio.run(doctest_example()) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE >>> asyncio.run(doctest_example()) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Vector Search Plan: Vector Search Plan:
ProjectionExec: expr=[vector@0 as vector, text@3 as text, _distance@2 as _distance] ProjectionExec: expr=[vector@0 as vector, text@3 as text, _distance@2 as _distance]
Take: columns="vector, _rowid, _distance, (text)" Take: columns="vector, _rowid, _distance, (text)"
CoalesceBatchesExec: target_batch_size=1024 CoalesceBatchesExec: target_batch_size=1024
GlobalLimitExec: skip=0, fetch=10 GlobalLimitExec: skip=0, fetch=10
FilterExec: _distance@2 IS NOT NULL FilterExec: _distance@2 IS NOT NULL
SortExec: TopK(fetch=10), expr=[_distance@2 ASC NULLS LAST], preserve_partitioning=[false] SortExec: TopK(fetch=10), expr=[_distance@2 ASC NULLS LAST], preserve_partitioning=[false]
KNNVectorDistance: metric=l2 KNNVectorDistance: metric=l2
LanceScan: uri=..., projection=[vector], row_id=true, row_addr=false, ordered=false LanceScan: uri=..., projection=[vector], row_id=true, row_addr=false, ordered=false
<BLANKLINE>
FTS Search Plan: FTS Search Plan:
ProjectionExec: expr=[vector@2 as vector, text@3 as text, _score@1 as _score] LanceScan: uri=..., projection=[vector, text], row_id=false, row_addr=false, ordered=true
Take: columns="_rowid, _score, (vector), (text)"
CoalesceBatchesExec: target_batch_size=1024
GlobalLimitExec: skip=0, fetch=10
MatchQuery: query=hello
<BLANKLINE>
Parameters Parameters
---------- ----------

View File

@@ -18,7 +18,7 @@ from lancedb._lancedb import (
UpdateResult, UpdateResult,
) )
from lancedb.embeddings.base import EmbeddingFunctionConfig from lancedb.embeddings.base import EmbeddingFunctionConfig
from lancedb.index import FTS, BTree, Bitmap, HnswSq, IvfFlat, IvfPq, LabelList from lancedb.index import FTS, BTree, Bitmap, HnswPq, HnswSq, IvfFlat, IvfPq, LabelList
from lancedb.remote.db import LOOP from lancedb.remote.db import LOOP
import pyarrow as pa import pyarrow as pa
@@ -186,8 +186,6 @@ class RemoteTable(Table):
accelerator: Optional[str] = None, accelerator: Optional[str] = None,
index_type="vector", index_type="vector",
wait_timeout: Optional[timedelta] = None, wait_timeout: Optional[timedelta] = None,
*,
num_bits: int = 8,
): ):
"""Create an index on the table. """Create an index on the table.
Currently, the only parameters that matter are Currently, the only parameters that matter are
@@ -222,6 +220,11 @@ class RemoteTable(Table):
>>> table.create_index("l2", "vector") # doctest: +SKIP >>> table.create_index("l2", "vector") # doctest: +SKIP
""" """
if num_partitions is not None:
logging.warning(
"num_partitions is not supported on LanceDB cloud."
"This parameter will be tuned automatically."
)
if num_sub_vectors is not None: if num_sub_vectors is not None:
logging.warning( logging.warning(
"num_sub_vectors is not supported on LanceDB cloud." "num_sub_vectors is not supported on LanceDB cloud."
@@ -241,21 +244,13 @@ class RemoteTable(Table):
index_type = index_type.upper() index_type = index_type.upper()
if index_type == "VECTOR" or index_type == "IVF_PQ": if index_type == "VECTOR" or index_type == "IVF_PQ":
config = IvfPq( config = IvfPq(distance_type=metric)
distance_type=metric,
num_partitions=num_partitions,
num_sub_vectors=num_sub_vectors,
num_bits=num_bits,
)
elif index_type == "IVF_HNSW_PQ": elif index_type == "IVF_HNSW_PQ":
raise ValueError( config = HnswPq(distance_type=metric)
"IVF_HNSW_PQ is not supported on LanceDB cloud."
"Please use IVF_HNSW_SQ instead."
)
elif index_type == "IVF_HNSW_SQ": elif index_type == "IVF_HNSW_SQ":
config = HnswSq(distance_type=metric, num_partitions=num_partitions) config = HnswSq(distance_type=metric)
elif index_type == "IVF_FLAT": elif index_type == "IVF_FLAT":
config = IvfFlat(distance_type=metric, num_partitions=num_partitions) config = IvfFlat(distance_type=metric)
else: else:
raise ValueError( raise ValueError(
f"Unknown vector index type: {index_type}. Valid options are" f"Unknown vector index type: {index_type}. Valid options are"

View File

@@ -827,7 +827,7 @@ class Table(ABC):
ordering_field_names: Optional[Union[str, List[str]]] = None, ordering_field_names: Optional[Union[str, List[str]]] = None,
replace: bool = False, replace: bool = False,
writer_heap_size: Optional[int] = 1024 * 1024 * 1024, writer_heap_size: Optional[int] = 1024 * 1024 * 1024,
use_tantivy: bool = False, use_tantivy: bool = True,
tokenizer_name: Optional[str] = None, tokenizer_name: Optional[str] = None,
with_position: bool = False, with_position: bool = False,
# tokenizer configs: # tokenizer configs:
@@ -864,7 +864,7 @@ class Table(ABC):
The tokenizer to use for the index. Can be "raw", "default" or the 2 letter The tokenizer to use for the index. Can be "raw", "default" or the 2 letter
language code followed by "_stem". So for english it would be "en_stem". language code followed by "_stem". So for english it would be "en_stem".
For available languages see: https://docs.rs/tantivy/latest/tantivy/tokenizer/enum.Language.html For available languages see: https://docs.rs/tantivy/latest/tantivy/tokenizer/enum.Language.html
use_tantivy: bool, default False use_tantivy: bool, default True
If True, use the legacy full-text search implementation based on tantivy. If True, use the legacy full-text search implementation based on tantivy.
If False, use the new full-text search implementation based on lance-index. If False, use the new full-text search implementation based on lance-index.
with_position: bool, default False with_position: bool, default False
@@ -1970,7 +1970,7 @@ class LanceTable(Table):
ordering_field_names: Optional[Union[str, List[str]]] = None, ordering_field_names: Optional[Union[str, List[str]]] = None,
replace: bool = False, replace: bool = False,
writer_heap_size: Optional[int] = 1024 * 1024 * 1024, writer_heap_size: Optional[int] = 1024 * 1024 * 1024,
use_tantivy: bool = False, use_tantivy: bool = True,
tokenizer_name: Optional[str] = None, tokenizer_name: Optional[str] = None,
with_position: bool = False, with_position: bool = False,
# tokenizer configs: # tokenizer configs:

View File

@@ -6,7 +6,7 @@ import lancedb
# --8<-- [end:import-lancedb] # --8<-- [end:import-lancedb]
# --8<-- [start:import-numpy] # --8<-- [start:import-numpy]
from lancedb.query import BooleanQuery, BoostQuery, MatchQuery, Occur from lancedb.query import BoostQuery, MatchQuery
import numpy as np import numpy as np
import pyarrow as pa import pyarrow as pa
@@ -191,15 +191,6 @@ def test_fts_fuzzy_query():
"food", # 1 insertion "food", # 1 insertion
} }
results = table.search(
MatchQuery("foo", "text", fuzziness=1, prefix_length=3)
).to_pandas()
assert len(results) == 2
assert set(results["text"].to_list()) == {
"foo",
"food",
}
@pytest.mark.skipif( @pytest.mark.skipif(
os.name == "nt", reason="Need to fix https://github.com/lancedb/lance/issues/3905" os.name == "nt", reason="Need to fix https://github.com/lancedb/lance/issues/3905"
@@ -249,60 +240,6 @@ def test_fts_boost_query():
) )
@pytest.mark.skipif(
os.name == "nt", reason="Need to fix https://github.com/lancedb/lance/issues/3905"
)
def test_fts_boolean_query(tmp_path):
uri = tmp_path / "boolean-example"
db = lancedb.connect(uri)
table = db.create_table(
"my_table_fts_boolean",
data=[
{"text": "The cat and dog are playing"},
{"text": "The cat is sleeping"},
{"text": "The dog is barking"},
{"text": "The dog chases the cat"},
],
mode="overwrite",
)
table.create_fts_index("text", use_tantivy=False, replace=True)
# SHOULD
results = table.search(
MatchQuery("cat", "text") | MatchQuery("dog", "text")
).to_pandas()
assert len(results) == 4
assert set(results["text"].to_list()) == {
"The cat and dog are playing",
"The cat is sleeping",
"The dog is barking",
"The dog chases the cat",
}
# MUST
results = table.search(
MatchQuery("cat", "text") & MatchQuery("dog", "text")
).to_pandas()
assert len(results) == 2
assert set(results["text"].to_list()) == {
"The cat and dog are playing",
"The dog chases the cat",
}
# MUST NOT
results = table.search(
BooleanQuery(
[
(Occur.MUST, MatchQuery("cat", "text")),
(Occur.MUST_NOT, MatchQuery("dog", "text")),
]
)
).to_pandas()
assert len(results) == 1
assert set(results["text"].to_list()) == {
"The cat is sleeping",
}
@pytest.mark.skipif( @pytest.mark.skipif(
os.name == "nt", reason="Need to fix https://github.com/lancedb/lance/issues/3905" os.name == "nt", reason="Need to fix https://github.com/lancedb/lance/issues/3905"
) )

View File

@@ -775,82 +775,6 @@ async def test_explain_plan_async(table_async: AsyncTable):
assert "KNN" in plan assert "KNN" in plan
@pytest.mark.asyncio
async def test_explain_plan_fts(table_async: AsyncTable):
"""Test explain plan for FTS queries"""
# Create FTS index
from lancedb.index import FTS
await table_async.create_index("text", config=FTS())
# Test pure FTS query
query = await table_async.search("dog", query_type="fts", fts_columns="text")
plan = await query.explain_plan()
# Should show FTS details (issue #2465 is now fixed)
assert "MatchQuery: query=dog" in plan
assert "GlobalLimitExec" in plan # Default limit
# Test FTS query with limit
query_with_limit = await table_async.search(
"dog", query_type="fts", fts_columns="text"
)
plan_with_limit = await query_with_limit.limit(1).explain_plan()
assert "MatchQuery: query=dog" in plan_with_limit
assert "GlobalLimitExec: skip=0, fetch=1" in plan_with_limit
# Test FTS query with offset and limit
query_with_offset = await table_async.search(
"dog", query_type="fts", fts_columns="text"
)
plan_with_offset = await query_with_offset.offset(1).limit(1).explain_plan()
assert "MatchQuery: query=dog" in plan_with_offset
assert "GlobalLimitExec: skip=1, fetch=1" in plan_with_offset
@pytest.mark.asyncio
async def test_explain_plan_vector_with_limit_offset(table_async: AsyncTable):
"""Test explain plan for vector queries with limit and offset"""
# Test vector query with limit
plan_with_limit = await (
table_async.query().nearest_to(pa.array([1, 2])).limit(1).explain_plan()
)
assert "KNN" in plan_with_limit
assert "GlobalLimitExec: skip=0, fetch=1" in plan_with_limit
# Test vector query with offset and limit
plan_with_offset = await (
table_async.query()
.nearest_to(pa.array([1, 2]))
.offset(1)
.limit(1)
.explain_plan()
)
assert "KNN" in plan_with_offset
assert "GlobalLimitExec: skip=1, fetch=1" in plan_with_offset
@pytest.mark.asyncio
async def test_explain_plan_with_filters(table_async: AsyncTable):
"""Test explain plan for queries with filters"""
# Test vector query with filter
plan_with_filter = await (
table_async.query().nearest_to(pa.array([1, 2])).where("id = 1").explain_plan()
)
assert "KNN" in plan_with_filter
assert "FilterExec" in plan_with_filter
# Test FTS query with filter
from lancedb.index import FTS
await table_async.create_index("text", config=FTS())
query_fts_filter = await table_async.search(
"dog", query_type="fts", fts_columns="text"
)
plan_fts_filter = await query_fts_filter.where("id = 1").explain_plan()
assert "MatchQuery: query=dog" in plan_fts_filter
assert "FilterExec: id@" in plan_fts_filter # Should show filter details
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_query_camelcase_async(tmp_path): async def test_query_camelcase_async(tmp_path):
db = await lancedb.connect_async(tmp_path) db = await lancedb.connect_async(tmp_path)

View File

@@ -245,7 +245,7 @@ def test_s3_dynamodb_sync(s3_bucket: str, commit_table: str, monkeypatch):
NotImplementedError, NotImplementedError,
match="Full-text search is only supported on the local filesystem", match="Full-text search is only supported on the local filesystem",
): ):
table.create_fts_index("x", use_tantivy=True) table.create_fts_index("x")
# make sure list tables still works # make sure list tables still works
assert db.table_names() == ["test_ddb_sync"] assert db.table_names() == ["test_ddb_sync"]

View File

@@ -50,9 +50,8 @@ impl FromPyObject<'_> for PyLanceDB<FtsQuery> {
let fuzziness = ob.getattr("fuzziness")?.extract()?; let fuzziness = ob.getattr("fuzziness")?.extract()?;
let max_expansions = ob.getattr("max_expansions")?.extract()?; let max_expansions = ob.getattr("max_expansions")?.extract()?;
let operator = ob.getattr("operator")?.extract::<String>()?; let operator = ob.getattr("operator")?.extract::<String>()?;
let prefix_length = ob.getattr("prefix_length")?.extract()?;
Ok(Self( Ok(PyLanceDB(
MatchQuery::new(query) MatchQuery::new(query)
.with_column(Some(column)) .with_column(Some(column))
.with_boost(boost) .with_boost(boost)
@@ -61,7 +60,6 @@ impl FromPyObject<'_> for PyLanceDB<FtsQuery> {
.with_operator(Operator::try_from(operator.as_str()).map_err(|e| { .with_operator(Operator::try_from(operator.as_str()).map_err(|e| {
PyValueError::new_err(format!("Invalid operator: {}", e)) PyValueError::new_err(format!("Invalid operator: {}", e))
})?) })?)
.with_prefix_length(prefix_length)
.into(), .into(),
)) ))
} }
@@ -70,7 +68,7 @@ impl FromPyObject<'_> for PyLanceDB<FtsQuery> {
let column = ob.getattr("column")?.extract()?; let column = ob.getattr("column")?.extract()?;
let slop = ob.getattr("slop")?.extract()?; let slop = ob.getattr("slop")?.extract()?;
Ok(Self( Ok(PyLanceDB(
PhraseQuery::new(query) PhraseQuery::new(query)
.with_column(Some(column)) .with_column(Some(column))
.with_slop(slop) .with_slop(slop)
@@ -78,10 +76,10 @@ impl FromPyObject<'_> for PyLanceDB<FtsQuery> {
)) ))
} }
"BoostQuery" => { "BoostQuery" => {
let positive: Self = ob.getattr("positive")?.extract()?; let positive: PyLanceDB<FtsQuery> = ob.getattr("positive")?.extract()?;
let negative: Self = ob.getattr("negative")?.extract()?; let negative: PyLanceDB<FtsQuery> = ob.getattr("negative")?.extract()?;
let negative_boost = ob.getattr("negative_boost")?.extract()?; let negative_boost = ob.getattr("negative_boost")?.extract()?;
Ok(Self( Ok(PyLanceDB(
BoostQuery::new(positive.0, negative.0, negative_boost).into(), BoostQuery::new(positive.0, negative.0, negative_boost).into(),
)) ))
} }
@@ -103,17 +101,18 @@ impl FromPyObject<'_> for PyLanceDB<FtsQuery> {
let op = Operator::try_from(operator.as_str()) let op = Operator::try_from(operator.as_str())
.map_err(|e| PyValueError::new_err(format!("Invalid operator: {}", e)))?; .map_err(|e| PyValueError::new_err(format!("Invalid operator: {}", e)))?;
Ok(Self(q.with_operator(op).into())) Ok(PyLanceDB(q.with_operator(op).into()))
} }
"BooleanQuery" => { "BooleanQuery" => {
let queries: Vec<(String, Self)> = ob.getattr("queries")?.extract()?; let queries: Vec<(String, PyLanceDB<FtsQuery>)> =
ob.getattr("queries")?.extract()?;
let mut sub_queries = Vec::with_capacity(queries.len()); let mut sub_queries = Vec::with_capacity(queries.len());
for (occur, q) in queries { for (occur, q) in queries {
let occur = Occur::try_from(occur.as_str()) let occur = Occur::try_from(occur.as_str())
.map_err(|e| PyValueError::new_err(e.to_string()))?; .map_err(|e| PyValueError::new_err(e.to_string()))?;
sub_queries.push((occur, q.0)); sub_queries.push((occur, q.0));
} }
Ok(Self(BooleanQuery::new(sub_queries).into())) Ok(PyLanceDB(BooleanQuery::new(sub_queries).into()))
} }
name => Err(PyValueError::new_err(format!( name => Err(PyValueError::new_err(format!(
"Unsupported FTS query type: {}", "Unsupported FTS query type: {}",
@@ -140,8 +139,7 @@ impl<'py> IntoPyObject<'py> for PyLanceDB<FtsQuery> {
kwargs.set_item("boost", query.boost)?; kwargs.set_item("boost", query.boost)?;
kwargs.set_item("fuzziness", query.fuzziness)?; kwargs.set_item("fuzziness", query.fuzziness)?;
kwargs.set_item("max_expansions", query.max_expansions)?; kwargs.set_item("max_expansions", query.max_expansions)?;
kwargs.set_item::<_, &str>("operator", query.operator.into())?; kwargs.set_item("operator", operator_to_str(query.operator))?;
kwargs.set_item("prefix_length", query.prefix_length)?;
namespace namespace
.getattr(intern!(py, "MatchQuery"))? .getattr(intern!(py, "MatchQuery"))?
.call((query.terms, query.column.unwrap()), Some(&kwargs)) .call((query.terms, query.column.unwrap()), Some(&kwargs))
@@ -154,8 +152,8 @@ impl<'py> IntoPyObject<'py> for PyLanceDB<FtsQuery> {
.call((query.terms, query.column.unwrap()), Some(&kwargs)) .call((query.terms, query.column.unwrap()), Some(&kwargs))
} }
FtsQuery::Boost(query) => { FtsQuery::Boost(query) => {
let positive = Self(query.positive.as_ref().clone()).into_pyobject(py)?; let positive = PyLanceDB(query.positive.as_ref().clone()).into_pyobject(py)?;
let negative = Self(query.negative.as_ref().clone()).into_pyobject(py)?; let negative = PyLanceDB(query.negative.as_ref().clone()).into_pyobject(py)?;
let kwargs = PyDict::new(py); let kwargs = PyDict::new(py);
kwargs.set_item("negative_boost", query.negative_boost)?; kwargs.set_item("negative_boost", query.negative_boost)?;
namespace namespace
@@ -171,25 +169,19 @@ impl<'py> IntoPyObject<'py> for PyLanceDB<FtsQuery> {
.unzip(); .unzip();
let kwargs = PyDict::new(py); let kwargs = PyDict::new(py);
kwargs.set_item("boosts", boosts)?; kwargs.set_item("boosts", boosts)?;
kwargs.set_item::<_, &str>("operator", first.operator.into())?; kwargs.set_item("operator", operator_to_str(first.operator))?;
namespace namespace
.getattr(intern!(py, "MultiMatchQuery"))? .getattr(intern!(py, "MultiMatchQuery"))?
.call((first.terms.clone(), columns), Some(&kwargs)) .call((first.terms.clone(), columns), Some(&kwargs))
} }
FtsQuery::Boolean(query) => { FtsQuery::Boolean(query) => {
let mut queries: Vec<(&str, Bound<'py, PyAny>)> = Vec::with_capacity( let mut queries = Vec::with_capacity(query.must.len() + query.should.len());
query.should.len() + query.must.len() + query.must_not.len(),
);
for q in query.should {
queries.push((Occur::Should.into(), Self(q).into_pyobject(py)?));
}
for q in query.must { for q in query.must {
queries.push((Occur::Must.into(), Self(q).into_pyobject(py)?)); queries.push((occur_to_str(Occur::Must), PyLanceDB(q).into_pyobject(py)?));
} }
for q in query.must_not { for q in query.should {
queries.push((Occur::MustNot.into(), Self(q).into_pyobject(py)?)); queries.push((occur_to_str(Occur::Should), PyLanceDB(q).into_pyobject(py)?));
} }
namespace namespace
.getattr(intern!(py, "BooleanQuery"))? .getattr(intern!(py, "BooleanQuery"))?
.call1((queries,)) .call1((queries,))
@@ -198,6 +190,20 @@ impl<'py> IntoPyObject<'py> for PyLanceDB<FtsQuery> {
} }
} }
fn operator_to_str(op: Operator) -> &'static str {
match op {
Operator::And => "AND",
Operator::Or => "OR",
}
}
fn occur_to_str(occur: Occur) -> &'static str {
match occur {
Occur::Must => "MUST",
Occur::Should => "SHOULD",
}
}
// Python representation of query vector(s) // Python representation of query vector(s)
#[derive(Clone)] #[derive(Clone)]
pub struct PyQueryVectors(Vec<Arc<dyn Array>>); pub struct PyQueryVectors(Vec<Arc<dyn Array>>);
@@ -562,10 +568,7 @@ impl FTSQuery {
} }
pub fn explain_plan(self_: PyRef<'_, Self>, verbose: bool) -> PyResult<Bound<'_, PyAny>> { pub fn explain_plan(self_: PyRef<'_, Self>, verbose: bool) -> PyResult<Bound<'_, PyAny>> {
let inner = self_ let inner = self_.inner.clone();
.inner
.clone()
.full_text_search(self_.fts_query.clone());
future_into_py(self_.py(), async move { future_into_py(self_.py(), async move {
inner inner
.explain_plan(verbose) .explain_plan(verbose)
@@ -575,10 +578,7 @@ impl FTSQuery {
} }
pub fn analyze_plan(self_: PyRef<'_, Self>) -> PyResult<Bound<'_, PyAny>> { pub fn analyze_plan(self_: PyRef<'_, Self>) -> PyResult<Bound<'_, PyAny>> {
let inner = self_ let inner = self_.inner.clone();
.inner
.clone()
.full_text_search(self_.fts_query.clone());
future_into_py(self_.py(), async move { future_into_py(self_.py(), async move {
inner inner
.analyze_plan() .analyze_plan()

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "lancedb-node" name = "lancedb-node"
version = "0.21.1-beta.0" version = "0.20.1-beta.0"
description = "Serverless, low-latency vector database for AI applications" description = "Serverless, low-latency vector database for AI applications"
license.workspace = true license.workspace = true
edition.workspace = true edition.workspace = true

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "lancedb" name = "lancedb"
version = "0.21.1-beta.0" version = "0.20.1-beta.0"
edition.workspace = true edition.workspace = true
description = "LanceDB: A serverless, low-latency vector database for AI applications" description = "LanceDB: A serverless, low-latency vector database for AI applications"
license.workspace = true license.workspace = true

View File

@@ -105,7 +105,7 @@ impl ListingCatalog {
} }
async fn open_path(path: &str) -> Result<Self> { async fn open_path(path: &str) -> Result<Self> {
let (object_store, base_path) = ObjectStore::from_uri(path).await?; let (object_store, base_path) = ObjectStore::from_uri(path).await.unwrap();
if object_store.is_local() { if object_store.is_local() {
Self::try_create_dir(path).context(CreateDirSnafu { path })?; Self::try_create_dir(path).context(CreateDirSnafu { path })?;
} }

View File

@@ -107,7 +107,7 @@ impl ObjectStore for MirroringObjectStore {
self.primary.delete(location).await self.primary.delete(location).await
} }
fn list(&self, prefix: Option<&Path>) -> BoxStream<'static, Result<ObjectMeta>> { fn list(&self, prefix: Option<&Path>) -> BoxStream<'_, Result<ObjectMeta>> {
self.primary.list(prefix) self.primary.list(prefix)
} }

View File

@@ -119,7 +119,7 @@ impl ObjectStore for IoTrackingStore {
let result = self.target.get(location).await; let result = self.target.get(location).await;
if let Ok(result) = &result { if let Ok(result) = &result {
let num_bytes = result.range.end - result.range.start; let num_bytes = result.range.end - result.range.start;
self.record_read(num_bytes); self.record_read(num_bytes as u64);
} }
result result
} }
@@ -128,12 +128,12 @@ impl ObjectStore for IoTrackingStore {
let result = self.target.get_opts(location, options).await; let result = self.target.get_opts(location, options).await;
if let Ok(result) = &result { if let Ok(result) = &result {
let num_bytes = result.range.end - result.range.start; let num_bytes = result.range.end - result.range.start;
self.record_read(num_bytes); self.record_read(num_bytes as u64);
} }
result result
} }
async fn get_range(&self, location: &Path, range: std::ops::Range<u64>) -> OSResult<Bytes> { async fn get_range(&self, location: &Path, range: std::ops::Range<usize>) -> OSResult<Bytes> {
let result = self.target.get_range(location, range).await; let result = self.target.get_range(location, range).await;
if let Ok(result) = &result { if let Ok(result) = &result {
self.record_read(result.len() as u64); self.record_read(result.len() as u64);
@@ -144,7 +144,7 @@ impl ObjectStore for IoTrackingStore {
async fn get_ranges( async fn get_ranges(
&self, &self,
location: &Path, location: &Path,
ranges: &[std::ops::Range<u64>], ranges: &[std::ops::Range<usize>],
) -> OSResult<Vec<Bytes>> { ) -> OSResult<Vec<Bytes>> {
let result = self.target.get_ranges(location, ranges).await; let result = self.target.get_ranges(location, ranges).await;
if let Ok(result) = &result { if let Ok(result) = &result {
@@ -170,7 +170,7 @@ impl ObjectStore for IoTrackingStore {
self.target.delete_stream(locations) self.target.delete_stream(locations)
} }
fn list(&self, prefix: Option<&Path>) -> BoxStream<'static, OSResult<ObjectMeta>> { fn list(&self, prefix: Option<&Path>) -> BoxStream<'_, OSResult<ObjectMeta>> {
self.record_read(0); self.record_read(0);
self.target.list(prefix) self.target.list(prefix)
} }
@@ -179,7 +179,7 @@ impl ObjectStore for IoTrackingStore {
&self, &self,
prefix: Option<&Path>, prefix: Option<&Path>,
offset: &Path, offset: &Path,
) -> BoxStream<'static, OSResult<ObjectMeta>> { ) -> BoxStream<'_, OSResult<ObjectMeta>> {
self.record_read(0); self.record_read(0);
self.target.list_with_offset(prefix, offset) self.target.list_with_offset(prefix, offset)
} }

View File

@@ -57,8 +57,6 @@ use crate::{
}; };
const REQUEST_TIMEOUT_HEADER: HeaderName = HeaderName::from_static("x-request-timeout-ms"); const REQUEST_TIMEOUT_HEADER: HeaderName = HeaderName::from_static("x-request-timeout-ms");
const METRIC_TYPE_KEY: &str = "metric_type";
const INDEX_TYPE_KEY: &str = "index_type";
pub struct RemoteTags<'a, S: HttpSend = Sender> { pub struct RemoteTags<'a, S: HttpSend = Sender> {
inner: &'a RemoteTable<S>, inner: &'a RemoteTable<S>,
@@ -999,53 +997,23 @@ impl<S: HttpSend> BaseTable for RemoteTable<S> {
"column": column "column": column
}); });
match index.index { let (index_type, distance_type) = match index.index {
// TODO: Should we pass the actual index parameters? SaaS does not // TODO: Should we pass the actual index parameters? SaaS does not
// yet support them. // yet support them.
Index::IvfFlat(index) => { Index::IvfFlat(index) => ("IVF_FLAT", Some(index.distance_type)),
body[INDEX_TYPE_KEY] = serde_json::Value::String("IVF_FLAT".to_string()); Index::IvfPq(index) => ("IVF_PQ", Some(index.distance_type)),
body[METRIC_TYPE_KEY] = Index::IvfHnswSq(index) => ("IVF_HNSW_SQ", Some(index.distance_type)),
serde_json::Value::String(index.distance_type.to_string().to_lowercase()); Index::BTree(_) => ("BTREE", None),
if let Some(num_partitions) = index.num_partitions { Index::Bitmap(_) => ("BITMAP", None),
body["num_partitions"] = serde_json::Value::Number(num_partitions.into()); Index::LabelList(_) => ("LABEL_LIST", None),
}
}
Index::IvfPq(index) => {
body[INDEX_TYPE_KEY] = serde_json::Value::String("IVF_PQ".to_string());
body[METRIC_TYPE_KEY] =
serde_json::Value::String(index.distance_type.to_string().to_lowercase());
if let Some(num_partitions) = index.num_partitions {
body["num_partitions"] = serde_json::Value::Number(num_partitions.into());
}
if let Some(num_bits) = index.num_bits {
body["num_bits"] = serde_json::Value::Number(num_bits.into());
}
}
Index::IvfHnswSq(index) => {
body[INDEX_TYPE_KEY] = serde_json::Value::String("IVF_HNSW_SQ".to_string());
body[METRIC_TYPE_KEY] =
serde_json::Value::String(index.distance_type.to_string().to_lowercase());
if let Some(num_partitions) = index.num_partitions {
body["num_partitions"] = serde_json::Value::Number(num_partitions.into());
}
}
Index::BTree(_) => {
body[INDEX_TYPE_KEY] = serde_json::Value::String("BTREE".to_string());
}
Index::Bitmap(_) => {
body[INDEX_TYPE_KEY] = serde_json::Value::String("BITMAP".to_string());
}
Index::LabelList(_) => {
body[INDEX_TYPE_KEY] = serde_json::Value::String("LABEL_LIST".to_string());
}
Index::FTS(fts) => { Index::FTS(fts) => {
body[INDEX_TYPE_KEY] = serde_json::Value::String("FTS".to_string());
let params = serde_json::to_value(&fts).map_err(|e| Error::InvalidInput { let params = serde_json::to_value(&fts).map_err(|e| Error::InvalidInput {
message: format!("failed to serialize FTS index params {:?}", e), message: format!("failed to serialize FTS index params {:?}", e),
})?; })?;
for (key, value) in params.as_object().unwrap() { for (key, value) in params.as_object().unwrap() {
body[key] = value.clone(); body[key] = value.clone();
} }
("FTS", None)
} }
Index::Auto => { Index::Auto => {
let schema = self.schema().await?; let schema = self.schema().await?;
@@ -1055,11 +1023,9 @@ impl<S: HttpSend> BaseTable for RemoteTable<S> {
message: format!("Column {} not found in schema", column), message: format!("Column {} not found in schema", column),
})?; })?;
if supported_vector_data_type(field.data_type()) { if supported_vector_data_type(field.data_type()) {
body[INDEX_TYPE_KEY] = serde_json::Value::String("IVF_PQ".to_string()); ("IVF_PQ", Some(DistanceType::L2))
body[METRIC_TYPE_KEY] =
serde_json::Value::String(DistanceType::L2.to_string().to_lowercase());
} else if supported_btree_data_type(field.data_type()) { } else if supported_btree_data_type(field.data_type()) {
body[INDEX_TYPE_KEY] = serde_json::Value::String("BTREE".to_string()); ("BTREE", None)
} else { } else {
return Err(Error::NotSupported { return Err(Error::NotSupported {
message: format!( message: format!(
@@ -1076,6 +1042,12 @@ impl<S: HttpSend> BaseTable for RemoteTable<S> {
}) })
} }
}; };
body["index_type"] = serde_json::Value::String(index_type.into());
if let Some(distance_type) = distance_type {
// Phalanx expects this to be lowercase right now.
body["metric_type"] =
serde_json::Value::String(distance_type.to_string().to_lowercase());
}
let request = request.json(&body); let request = request.json(&body);
@@ -1457,12 +1429,11 @@ mod tests {
use chrono::{DateTime, Utc}; use chrono::{DateTime, Utc};
use futures::{future::BoxFuture, StreamExt, TryFutureExt}; use futures::{future::BoxFuture, StreamExt, TryFutureExt};
use lance_index::scalar::inverted::query::MatchQuery; use lance_index::scalar::inverted::query::MatchQuery;
use lance_index::scalar::{FullTextSearchQuery, InvertedIndexParams}; use lance_index::scalar::FullTextSearchQuery;
use reqwest::Body; use reqwest::Body;
use rstest::rstest; use rstest::rstest;
use serde_json::json;
use crate::index::vector::{IvfFlatIndexBuilder, IvfHnswSqIndexBuilder}; use crate::index::vector::IvfFlatIndexBuilder;
use crate::remote::db::DEFAULT_SERVER_VERSION; use crate::remote::db::DEFAULT_SERVER_VERSION;
use crate::remote::JSON_CONTENT_TYPE; use crate::remote::JSON_CONTENT_TYPE;
use crate::{ use crate::{
@@ -2347,7 +2318,6 @@ mod tests {
"fuzziness": 0, "fuzziness": 0,
"max_expansions": 50, "max_expansions": 50,
"operator": "Or", "operator": "Or",
"prefix_length": 0,
}, },
} }
}, },
@@ -2462,79 +2432,29 @@ mod tests {
let cases = [ let cases = [
( (
"IVF_FLAT", "IVF_FLAT",
json!({ Some("hamming"),
"metric_type": "hamming",
}),
Index::IvfFlat(IvfFlatIndexBuilder::default().distance_type(DistanceType::Hamming)), Index::IvfFlat(IvfFlatIndexBuilder::default().distance_type(DistanceType::Hamming)),
), ),
( ("IVF_PQ", Some("l2"), Index::IvfPq(Default::default())),
"IVF_FLAT",
json!({
"metric_type": "hamming",
"num_partitions": 128,
}),
Index::IvfFlat(
IvfFlatIndexBuilder::default()
.distance_type(DistanceType::Hamming)
.num_partitions(128),
),
),
( (
"IVF_PQ", "IVF_PQ",
json!({ Some("cosine"),
"metric_type": "l2", Index::IvfPq(IvfPqIndexBuilder::default().distance_type(DistanceType::Cosine)),
}),
Index::IvfPq(Default::default()),
),
(
"IVF_PQ",
json!({
"metric_type": "cosine",
"num_partitions": 128,
"num_bits": 4,
}),
Index::IvfPq(
IvfPqIndexBuilder::default()
.distance_type(DistanceType::Cosine)
.num_partitions(128)
.num_bits(4),
),
), ),
( (
"IVF_HNSW_SQ", "IVF_HNSW_SQ",
json!({ Some("l2"),
"metric_type": "l2",
}),
Index::IvfHnswSq(Default::default()), Index::IvfHnswSq(Default::default()),
), ),
(
"IVF_HNSW_SQ",
json!({
"metric_type": "l2",
"num_partitions": 128,
}),
Index::IvfHnswSq(
IvfHnswSqIndexBuilder::default()
.distance_type(DistanceType::L2)
.num_partitions(128),
),
),
// HNSW_PQ isn't yet supported on SaaS // HNSW_PQ isn't yet supported on SaaS
("BTREE", json!({}), Index::BTree(Default::default())), ("BTREE", None, Index::BTree(Default::default())),
("BITMAP", json!({}), Index::Bitmap(Default::default())), ("BITMAP", None, Index::Bitmap(Default::default())),
( ("LABEL_LIST", None, Index::LabelList(Default::default())),
"LABEL_LIST", ("FTS", None, Index::FTS(Default::default())),
json!({}),
Index::LabelList(Default::default()),
),
(
"FTS",
serde_json::to_value(InvertedIndexParams::default()).unwrap(),
Index::FTS(Default::default()),
),
]; ];
for (index_type, expected_body, index) in cases { for (index_type, distance_type, index) in cases {
let params = index.clone();
let table = Table::new_with_handler("my_table", move |request| { let table = Table::new_with_handler("my_table", move |request| {
assert_eq!(request.method(), "POST"); assert_eq!(request.method(), "POST");
assert_eq!(request.url().path(), "/v1/table/my_table/create_index/"); assert_eq!(request.url().path(), "/v1/table/my_table/create_index/");
@@ -2544,9 +2464,19 @@ mod tests {
); );
let body = request.body().unwrap().as_bytes().unwrap(); let body = request.body().unwrap().as_bytes().unwrap();
let body: serde_json::Value = serde_json::from_slice(body).unwrap(); let body: serde_json::Value = serde_json::from_slice(body).unwrap();
let mut expected_body = expected_body.clone(); let mut expected_body = serde_json::json!({
expected_body["column"] = "a".into(); "column": "a",
expected_body[INDEX_TYPE_KEY] = index_type.into(); "index_type": index_type,
});
if let Some(distance_type) = distance_type {
expected_body["metric_type"] = distance_type.to_lowercase().into();
}
if let Index::FTS(fts) = &params {
let params = serde_json::to_value(fts).unwrap();
for (key, value) in params.as_object().unwrap() {
expected_body[key] = value.clone();
}
}
assert_eq!(body, expected_body); assert_eq!(body, expected_body);

View File

@@ -392,18 +392,9 @@ pub mod tests {
} else { } else {
expected_line.trim() expected_line.trim()
}; };
assert_eq!( assert_eq!(&actual_trimmed[..expected_trimmed.len()], expected_trimmed);
&actual_trimmed[..expected_trimmed.len()],
expected_trimmed,
"\nactual:\n{physical_plan}\nexpected:\n{expected}"
);
} }
assert_eq!( assert_eq!(lines_checked, expected.lines().count());
lines_checked,
expected.lines().count(),
"\nlines_checked:\n{lines_checked}\nexpected:\n{}",
expected.lines().count()
);
} }
} }
@@ -486,9 +477,9 @@ pub mod tests {
TestFixture::check_plan( TestFixture::check_plan(
plan, plan,
"MetadataEraserExec "MetadataEraserExec
RepartitionExec:...
CoalesceBatchesExec:... CoalesceBatchesExec:...
FilterExec: i@0 >= 5 FilterExec: i@0 >= 5
RepartitionExec:...
ProjectionExec:... ProjectionExec:...
LanceScan:...", LanceScan:...",
) )

View File

@@ -129,9 +129,7 @@ impl DatasetRef {
dataset: ref mut ds, dataset: ref mut ds,
.. ..
} => { } => {
if dataset.manifest().version > ds.manifest().version { *ds = dataset;
*ds = dataset;
}
} }
_ => unreachable!("Dataset should be in latest mode at this point"), _ => unreachable!("Dataset should be in latest mode at this point"),
} }