mirror of
https://github.com/lancedb/lancedb.git
synced 2025-12-23 13:29:57 +00:00
Compare commits
71 Commits
v0.4.17
...
python-v0.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
41b77f5e25 | ||
|
|
eb8b3b8c54 | ||
|
|
f69c3e0595 | ||
|
|
8511edaaab | ||
|
|
657aba3c05 | ||
|
|
2e197ef387 | ||
|
|
4f512af024 | ||
|
|
5349e8b1db | ||
|
|
5e01810438 | ||
|
|
6eaaee59f8 | ||
|
|
055efdcdb6 | ||
|
|
bc582bb702 | ||
|
|
df9c41f342 | ||
|
|
0bd6ac945e | ||
|
|
c9d5475333 | ||
|
|
3850d5fb35 | ||
|
|
b37c58342e | ||
|
|
a06e64f22d | ||
|
|
e983198f0e | ||
|
|
76e7b4abf8 | ||
|
|
5f6eb4651e | ||
|
|
805c78bb20 | ||
|
|
4746281b21 | ||
|
|
7b3b6bdccd | ||
|
|
37e1124c0f | ||
|
|
93f037ee41 | ||
|
|
e4fc06825a | ||
|
|
fe89a373a2 | ||
|
|
3d3915edef | ||
|
|
e2e8b6aee4 | ||
|
|
12dbca5248 | ||
|
|
a6babfa651 | ||
|
|
75ede86fab | ||
|
|
becd649130 | ||
|
|
9d2fb7d602 | ||
|
|
fdb5d6fdf1 | ||
|
|
2f13fa225f | ||
|
|
e933de003d | ||
|
|
05fd387425 | ||
|
|
82a1da554c | ||
|
|
a7c0d80b9e | ||
|
|
71323a064a | ||
|
|
df48454b70 | ||
|
|
6603414885 | ||
|
|
c256f6c502 | ||
|
|
cc03f90379 | ||
|
|
975da09b02 | ||
|
|
c32e17b497 | ||
|
|
0528abdf97 | ||
|
|
1090c311e8 | ||
|
|
e767cbb374 | ||
|
|
3d7c48feca | ||
|
|
08d62550bb | ||
|
|
b272408b05 | ||
|
|
46ffa87cd4 | ||
|
|
cd9fc37b95 | ||
|
|
431f94e564 | ||
|
|
c1a7d65473 | ||
|
|
1e5ccb1614 | ||
|
|
2e7ab373dc | ||
|
|
c7fbc4aaee | ||
|
|
7e023c1ef2 | ||
|
|
1d0dd9a8b8 | ||
|
|
deb947ddbd | ||
|
|
b039765d50 | ||
|
|
d155e82723 | ||
|
|
5d8c91256c | ||
|
|
44c03ebef3 | ||
|
|
8ea06fe7f3 | ||
|
|
cf06b653d4 | ||
|
|
09cfab6d00 |
@@ -1,22 +0,0 @@
|
||||
[bumpversion]
|
||||
current_version = 0.4.17
|
||||
commit = True
|
||||
message = Bump version: {current_version} → {new_version}
|
||||
tag = True
|
||||
tag_name = v{new_version}
|
||||
|
||||
[bumpversion:file:node/package.json]
|
||||
|
||||
[bumpversion:file:nodejs/package.json]
|
||||
|
||||
[bumpversion:file:nodejs/npm/darwin-x64/package.json]
|
||||
|
||||
[bumpversion:file:nodejs/npm/darwin-arm64/package.json]
|
||||
|
||||
[bumpversion:file:nodejs/npm/linux-x64-gnu/package.json]
|
||||
|
||||
[bumpversion:file:nodejs/npm/linux-arm64-gnu/package.json]
|
||||
|
||||
[bumpversion:file:rust/ffi/node/Cargo.toml]
|
||||
|
||||
[bumpversion:file:rust/lancedb/Cargo.toml]
|
||||
57
.bumpversion.toml
Normal file
57
.bumpversion.toml
Normal file
@@ -0,0 +1,57 @@
|
||||
[tool.bumpversion]
|
||||
current_version = "0.4.20"
|
||||
parse = """(?x)
|
||||
(?P<major>0|[1-9]\\d*)\\.
|
||||
(?P<minor>0|[1-9]\\d*)\\.
|
||||
(?P<patch>0|[1-9]\\d*)
|
||||
(?:-(?P<pre_l>[a-zA-Z-]+)\\.(?P<pre_n>0|[1-9]\\d*))?
|
||||
"""
|
||||
serialize = [
|
||||
"{major}.{minor}.{patch}-{pre_l}.{pre_n}",
|
||||
"{major}.{minor}.{patch}",
|
||||
]
|
||||
search = "{current_version}"
|
||||
replace = "{new_version}"
|
||||
regex = false
|
||||
ignore_missing_version = false
|
||||
ignore_missing_files = false
|
||||
tag = true
|
||||
sign_tags = false
|
||||
tag_name = "v{new_version}"
|
||||
tag_message = "Bump version: {current_version} → {new_version}"
|
||||
allow_dirty = true
|
||||
commit = true
|
||||
message = "Bump version: {current_version} → {new_version}"
|
||||
commit_args = ""
|
||||
|
||||
[tool.bumpversion.parts.pre_l]
|
||||
values = ["beta", "final"]
|
||||
optional_value = "final"
|
||||
|
||||
[[tool.bumpversion.files]]
|
||||
filename = "node/package.json"
|
||||
search = "\"version\": \"{current_version}\","
|
||||
replace = "\"version\": \"{new_version}\","
|
||||
|
||||
[[tool.bumpversion.files]]
|
||||
filename = "nodejs/package.json"
|
||||
search = "\"version\": \"{current_version}\","
|
||||
replace = "\"version\": \"{new_version}\","
|
||||
|
||||
# nodejs binary packages
|
||||
[[tool.bumpversion.files]]
|
||||
glob = "nodejs/npm/*/package.json"
|
||||
search = "\"version\": \"{current_version}\","
|
||||
replace = "\"version\": \"{new_version}\","
|
||||
|
||||
# Cargo files
|
||||
# ------------
|
||||
[[tool.bumpversion.files]]
|
||||
filename = "rust/ffi/node/Cargo.toml"
|
||||
search = "\nversion = \"{current_version}\""
|
||||
replace = "\nversion = \"{new_version}\""
|
||||
|
||||
[[tool.bumpversion.files]]
|
||||
filename = "rust/lancedb/Cargo.toml"
|
||||
search = "\nversion = \"{current_version}\""
|
||||
replace = "\nversion = \"{new_version}\""
|
||||
33
.github/labeler.yml
vendored
Normal file
33
.github/labeler.yml
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
version: 1
|
||||
appendOnly: true
|
||||
# Labels are applied based on conventional commits standard
|
||||
# https://www.conventionalcommits.org/en/v1.0.0/
|
||||
# These labels are later used in release notes. See .github/release.yml
|
||||
labels:
|
||||
# If the PR title has an ! before the : it will be considered a breaking change
|
||||
# For example, `feat!: add new feature` will be considered a breaking change
|
||||
- label: breaking-change
|
||||
title: "^[^:]+!:.*"
|
||||
- label: breaking-change
|
||||
body: "BREAKING CHANGE"
|
||||
- label: enhancement
|
||||
title: "^feat(\\(.+\\))?!?:.*"
|
||||
- label: bug
|
||||
title: "^fix(\\(.+\\))?!?:.*"
|
||||
- label: documentation
|
||||
title: "^docs(\\(.+\\))?!?:.*"
|
||||
- label: performance
|
||||
title: "^perf(\\(.+\\))?!?:.*"
|
||||
- label: ci
|
||||
title: "^ci(\\(.+\\))?!?:.*"
|
||||
- label: chore
|
||||
title: "^(chore|test|build|style)(\\(.+\\))?!?:.*"
|
||||
- label: Python
|
||||
files:
|
||||
- "^python\\/.*"
|
||||
- label: Rust
|
||||
files:
|
||||
- "^rust\\/.*"
|
||||
- label: typescript
|
||||
files:
|
||||
- "^node\\/.*"
|
||||
41
.github/release_notes.json
vendored
Normal file
41
.github/release_notes.json
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
{
|
||||
"ignore_labels": ["chore"],
|
||||
"pr_template": "- ${{TITLE}} by @${{AUTHOR}} in ${{URL}}",
|
||||
"categories": [
|
||||
{
|
||||
"title": "## 🏆 Highlights",
|
||||
"labels": ["highlight"]
|
||||
},
|
||||
{
|
||||
"title": "## 🛠 Breaking Changes",
|
||||
"labels": ["breaking-change"]
|
||||
},
|
||||
{
|
||||
"title": "## ⚠️ Deprecations ",
|
||||
"labels": ["deprecation"]
|
||||
},
|
||||
{
|
||||
"title": "## 🎉 New Features",
|
||||
"labels": ["enhancement"]
|
||||
},
|
||||
{
|
||||
"title": "## 🐛 Bug Fixes",
|
||||
"labels": ["bug"]
|
||||
},
|
||||
{
|
||||
"title": "## 📚 Documentation",
|
||||
"labels": ["documentation"]
|
||||
},
|
||||
{
|
||||
"title": "## 🚀 Performance Improvements",
|
||||
"labels": ["performance"]
|
||||
},
|
||||
{
|
||||
"title": "## Other Changes"
|
||||
},
|
||||
{
|
||||
"title": "## 🔧 Build and CI",
|
||||
"labels": ["ci"]
|
||||
}
|
||||
]
|
||||
}
|
||||
11
.github/workflows/cargo-publish.yml
vendored
11
.github/workflows/cargo-publish.yml
vendored
@@ -1,13 +1,20 @@
|
||||
name: Cargo Publish
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [ published ]
|
||||
push:
|
||||
tags-ignore:
|
||||
# We don't publish pre-releases for Rust. Crates.io is just a source
|
||||
# distribution, so we don't need to publish pre-releases.
|
||||
- 'v*-beta*'
|
||||
- '*-v*' # for example, python-vX.Y.Z
|
||||
|
||||
env:
|
||||
# This env var is used by Swatinem/rust-cache@v2 for the cache
|
||||
# key, so we set it to make sure it is always consistent.
|
||||
CARGO_TERM_COLOR: always
|
||||
# Up-to-date compilers needed for fp16kernels.
|
||||
CC: gcc-12
|
||||
CXX: g++-12
|
||||
|
||||
jobs:
|
||||
build:
|
||||
|
||||
81
.github/workflows/dev.yml
vendored
Normal file
81
.github/workflows/dev.yml
vendored
Normal file
@@ -0,0 +1,81 @@
|
||||
name: PR Checks
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [opened, edited, synchronize, reopened]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
labeler:
|
||||
permissions:
|
||||
pull-requests: write
|
||||
name: Label PR
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: srvaroa/labeler@master
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
commitlint:
|
||||
permissions:
|
||||
pull-requests: write
|
||||
name: Verify PR title / description conforms to semantic-release
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: "18"
|
||||
# These rules are disabled because Github will always ensure there
|
||||
# is a blank line between the title and the body and Github will
|
||||
# word wrap the description field to ensure a reasonable max line
|
||||
# length.
|
||||
- run: npm install @commitlint/config-conventional
|
||||
- run: >
|
||||
echo 'module.exports = {
|
||||
"rules": {
|
||||
"body-max-line-length": [0, "always", Infinity],
|
||||
"footer-max-line-length": [0, "always", Infinity],
|
||||
"body-leading-blank": [0, "always"]
|
||||
}
|
||||
}' > .commitlintrc.js
|
||||
- run: npx commitlint --extends @commitlint/config-conventional --verbose <<< $COMMIT_MSG
|
||||
env:
|
||||
COMMIT_MSG: >
|
||||
${{ github.event.pull_request.title }}
|
||||
|
||||
${{ github.event.pull_request.body }}
|
||||
- if: failure()
|
||||
uses: actions/github-script@v6
|
||||
with:
|
||||
script: |
|
||||
const message = `**ACTION NEEDED**
|
||||
|
||||
Lance follows the [Conventional Commits specification](https://www.conventionalcommits.org/en/v1.0.0/) for release automation.
|
||||
|
||||
The PR title and description are used as the merge commit message.\
|
||||
Please update your PR title and description to match the specification.
|
||||
|
||||
For details on the error please inspect the "PR Title Check" action.
|
||||
`
|
||||
// Get list of current comments
|
||||
const comments = await github.paginate(github.rest.issues.listComments, {
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number
|
||||
});
|
||||
// Check if this job already commented
|
||||
for (const comment of comments) {
|
||||
if (comment.body === message) {
|
||||
return // Already commented
|
||||
}
|
||||
}
|
||||
// Post the comment about Conventional Commits
|
||||
github.rest.issues.createComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number,
|
||||
body: message
|
||||
})
|
||||
core.setFailed(message)
|
||||
86
.github/workflows/make-release-commit.yml
vendored
86
.github/workflows/make-release-commit.yml
vendored
@@ -1,37 +1,62 @@
|
||||
name: Create release commit
|
||||
|
||||
# This workflow increments versions, tags the version, and pushes it.
|
||||
# When a tag is pushed, another workflow is triggered that creates a GH release
|
||||
# and uploads the binaries. This workflow is only for creating the tag.
|
||||
|
||||
# This script will enforce that a minor version is incremented if there are any
|
||||
# breaking changes since the last minor increment. However, it isn't able to
|
||||
# differentiate between breaking changes in Node versus Python. If you wish to
|
||||
# bypass this check, you can manually increment the version and push the tag.
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
dry_run:
|
||||
description: 'Dry run (create the local commit/tags but do not push it)'
|
||||
required: true
|
||||
default: "false"
|
||||
type: choice
|
||||
options:
|
||||
- "true"
|
||||
- "false"
|
||||
part:
|
||||
default: false
|
||||
type: boolean
|
||||
type:
|
||||
description: 'What kind of release is this?'
|
||||
required: true
|
||||
default: 'patch'
|
||||
default: 'preview'
|
||||
type: choice
|
||||
options:
|
||||
- patch
|
||||
- minor
|
||||
- major
|
||||
- preview
|
||||
- stable
|
||||
python:
|
||||
description: 'Make a Python release'
|
||||
required: true
|
||||
default: true
|
||||
type: boolean
|
||||
other:
|
||||
description: 'Make a Node/Rust release'
|
||||
required: true
|
||||
default: true
|
||||
type: boolean
|
||||
bump-minor:
|
||||
description: 'Bump minor version'
|
||||
required: true
|
||||
default: false
|
||||
type: boolean
|
||||
|
||||
jobs:
|
||||
bump-version:
|
||||
make-release:
|
||||
# Creates tag and GH release. The GH release will trigger the build and release jobs.
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- name: Check out main
|
||||
uses: actions/checkout@v4
|
||||
- name: Output Inputs
|
||||
run: echo "${{ toJSON(github.event.inputs) }}"
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: main
|
||||
persist-credentials: false
|
||||
fetch-depth: 0
|
||||
lfs: true
|
||||
# It's important we use our token here, as the default token will NOT
|
||||
# trigger any workflows watching for new tags. See:
|
||||
# https://docs.github.com/en/actions/using-workflows/triggering-a-workflow#triggering-a-workflow-from-a-workflow
|
||||
token: ${{ secrets.LANCEDB_RELEASE_TOKEN }}
|
||||
- name: Set git configs for bumpversion
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -41,19 +66,34 @@ jobs:
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11"
|
||||
- name: Bump version, create tag and commit
|
||||
- name: Bump Python version
|
||||
if: ${{ inputs.python }}
|
||||
working-directory: python
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
pip install bump2version
|
||||
bumpversion --verbose ${{ inputs.part }}
|
||||
- name: Push new version and tag
|
||||
if: ${{ inputs.dry_run }} == "false"
|
||||
# Need to get the commit before bumping the version, so we can
|
||||
# determine if there are breaking changes in the next step as well.
|
||||
echo "COMMIT_BEFORE_BUMP=$(git rev-parse HEAD)" >> $GITHUB_ENV
|
||||
|
||||
pip install bump-my-version PyGithub packaging
|
||||
bash ../ci/bump_version.sh ${{ inputs.type }} ${{ inputs.bump-minor }} python-v
|
||||
- name: Bump Node/Rust version
|
||||
if: ${{ inputs.other }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
pip install bump-my-version PyGithub packaging
|
||||
bash ci/bump_version.sh ${{ inputs.type }} ${{ inputs.bump-minor }} v $COMMIT_BEFORE_BUMP
|
||||
- name: Push new version tag
|
||||
if: ${{ !inputs.dry_run }}
|
||||
uses: ad-m/github-push-action@master
|
||||
with:
|
||||
# Need to use PAT here too to trigger next workflow. See comment above.
|
||||
github_token: ${{ secrets.LANCEDB_RELEASE_TOKEN }}
|
||||
branch: main
|
||||
branch: ${{ github.ref }}
|
||||
tags: true
|
||||
- uses: ./.github/workflows/update_package_lock
|
||||
if: ${{ inputs.dry_run }} == "false"
|
||||
with:
|
||||
github_token: ${{ secrets.LANCEDB_RELEASE_TOKEN }}
|
||||
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
3
.github/workflows/nodejs.yml
vendored
3
.github/workflows/nodejs.yml
vendored
@@ -52,8 +52,7 @@ jobs:
|
||||
cargo fmt --all -- --check
|
||||
cargo clippy --all --all-features -- -D warnings
|
||||
npm ci
|
||||
npm run lint
|
||||
npm run chkformat
|
||||
npm run lint-ci
|
||||
linux:
|
||||
name: Linux (NodeJS ${{ matrix.node-version }})
|
||||
timeout-minutes: 30
|
||||
|
||||
99
.github/workflows/npm-publish.yml
vendored
99
.github/workflows/npm-publish.yml
vendored
@@ -1,8 +1,9 @@
|
||||
name: NPM Publish
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
push:
|
||||
tags:
|
||||
- 'v*'
|
||||
|
||||
jobs:
|
||||
node:
|
||||
@@ -274,9 +275,15 @@ jobs:
|
||||
env:
|
||||
NODE_AUTH_TOKEN: ${{ secrets.LANCEDB_NPM_REGISTRY_TOKEN }}
|
||||
run: |
|
||||
# Tag beta as "preview" instead of default "latest". See lancedb
|
||||
# npm publish step for more info.
|
||||
if [[ $GITHUB_REF =~ refs/tags/v(.*)-beta.* ]]; then
|
||||
PUBLISH_ARGS="--tag preview"
|
||||
fi
|
||||
|
||||
mv */*.tgz .
|
||||
for filename in *.tgz; do
|
||||
npm publish $filename
|
||||
npm publish $PUBLISH_ARGS $filename
|
||||
done
|
||||
|
||||
release-nodejs:
|
||||
@@ -316,11 +323,23 @@ jobs:
|
||||
- name: Publish to NPM
|
||||
env:
|
||||
NODE_AUTH_TOKEN: ${{ secrets.LANCEDB_NPM_REGISTRY_TOKEN }}
|
||||
run: npm publish --access public
|
||||
# By default, things are published to the latest tag. This is what is
|
||||
# installed by default if the user does not specify a version. This is
|
||||
# good for stable releases, but for pre-releases, we want to publish to
|
||||
# the "preview" tag so they can install with `npm install lancedb@preview`.
|
||||
# See: https://medium.com/@mbostock/prereleases-and-npm-e778fc5e2420
|
||||
run: |
|
||||
if [[ $GITHUB_REF =~ refs/tags/v(.*)-beta.* ]]; then
|
||||
npm publish --access public --tag preview
|
||||
else
|
||||
npm publish --access public
|
||||
fi
|
||||
|
||||
update-package-lock:
|
||||
needs: [release]
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
@@ -331,11 +350,13 @@ jobs:
|
||||
lfs: true
|
||||
- uses: ./.github/workflows/update_package_lock
|
||||
with:
|
||||
github_token: ${{ secrets.LANCEDB_RELEASE_TOKEN }}
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
update-package-lock-nodejs:
|
||||
needs: [release-nodejs]
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
@@ -346,4 +367,70 @@ jobs:
|
||||
lfs: true
|
||||
- uses: ./.github/workflows/update_package_lock_nodejs
|
||||
with:
|
||||
github_token: ${{ secrets.LANCEDB_RELEASE_TOKEN }}
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
gh-release:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
lfs: true
|
||||
- name: Extract version
|
||||
id: extract_version
|
||||
env:
|
||||
GITHUB_REF: ${{ github.ref }}
|
||||
run: |
|
||||
set -e
|
||||
echo "Extracting tag and version from $GITHUB_REF"
|
||||
if [[ $GITHUB_REF =~ refs/tags/v(.*) ]]; then
|
||||
VERSION=${BASH_REMATCH[1]}
|
||||
TAG=v$VERSION
|
||||
echo "tag=$TAG" >> $GITHUB_OUTPUT
|
||||
echo "version=$VERSION" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "Failed to extract version from $GITHUB_REF"
|
||||
exit 1
|
||||
fi
|
||||
echo "Extracted version $VERSION from $GITHUB_REF"
|
||||
if [[ $VERSION =~ beta ]]; then
|
||||
echo "This is a beta release"
|
||||
|
||||
# Get last release (that is not this one)
|
||||
FROM_TAG=$(git tag --sort='version:refname' \
|
||||
| grep ^v \
|
||||
| grep -vF "$TAG" \
|
||||
| python ci/semver_sort.py v \
|
||||
| tail -n 1)
|
||||
else
|
||||
echo "This is a stable release"
|
||||
# Get last stable tag (ignore betas)
|
||||
FROM_TAG=$(git tag --sort='version:refname' \
|
||||
| grep ^v \
|
||||
| grep -vF "$TAG" \
|
||||
| grep -v beta \
|
||||
| python ci/semver_sort.py v \
|
||||
| tail -n 1)
|
||||
fi
|
||||
echo "Found from tag $FROM_TAG"
|
||||
echo "from_tag=$FROM_TAG" >> $GITHUB_OUTPUT
|
||||
- name: Create Release Notes
|
||||
id: release_notes
|
||||
uses: mikepenz/release-changelog-builder-action@v4
|
||||
with:
|
||||
configuration: .github/release_notes.json
|
||||
toTag: ${{ steps.extract_version.outputs.tag }}
|
||||
fromTag: ${{ steps.extract_version.outputs.from_tag }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Create GH release
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
prerelease: ${{ contains('beta', github.ref) }}
|
||||
tag_name: ${{ steps.extract_version.outputs.tag }}
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
generate_release_notes: false
|
||||
name: Node/Rust LanceDB v${{ steps.extract_version.outputs.version }}
|
||||
body: ${{ steps.release_notes.outputs.changelog }}
|
||||
|
||||
101
.github/workflows/pypi-publish.yml
vendored
101
.github/workflows/pypi-publish.yml
vendored
@@ -1,8 +1,9 @@
|
||||
name: PyPI Publish
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
push:
|
||||
tags:
|
||||
- 'python-v*'
|
||||
|
||||
jobs:
|
||||
linux:
|
||||
@@ -10,7 +11,6 @@ jobs:
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
matrix:
|
||||
python-minor-version: ["8"]
|
||||
config:
|
||||
- platform: x86_64
|
||||
manylinux: "2_17"
|
||||
@@ -32,23 +32,22 @@ jobs:
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: 3.${{ matrix.python-minor-version }}
|
||||
python-version: 3.8
|
||||
- uses: ./.github/workflows/build_linux_wheel
|
||||
with:
|
||||
python-minor-version: ${{ matrix.python-minor-version }}
|
||||
python-minor-version: 8
|
||||
args: "--release --strip ${{ matrix.config.extra_args }}"
|
||||
arm-build: ${{ matrix.config.platform == 'aarch64' }}
|
||||
manylinux: ${{ matrix.config.manylinux }}
|
||||
- uses: ./.github/workflows/upload_wheel
|
||||
with:
|
||||
token: ${{ secrets.LANCEDB_PYPI_API_TOKEN }}
|
||||
repo: "pypi"
|
||||
pypi_token: ${{ secrets.LANCEDB_PYPI_API_TOKEN }}
|
||||
fury_token: ${{ secrets.FURY_TOKEN }}
|
||||
mac:
|
||||
timeout-minutes: 60
|
||||
runs-on: ${{ matrix.config.runner }}
|
||||
strategy:
|
||||
matrix:
|
||||
python-minor-version: ["8"]
|
||||
config:
|
||||
- target: x86_64-apple-darwin
|
||||
runner: macos-13
|
||||
@@ -59,7 +58,6 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.ref }}
|
||||
fetch-depth: 0
|
||||
lfs: true
|
||||
- name: Set up Python
|
||||
@@ -68,36 +66,95 @@ jobs:
|
||||
python-version: 3.12
|
||||
- uses: ./.github/workflows/build_mac_wheel
|
||||
with:
|
||||
python-minor-version: ${{ matrix.python-minor-version }}
|
||||
python-minor-version: 8
|
||||
args: "--release --strip --target ${{ matrix.config.target }} --features fp16kernels"
|
||||
- uses: ./.github/workflows/upload_wheel
|
||||
with:
|
||||
python-minor-version: ${{ matrix.python-minor-version }}
|
||||
token: ${{ secrets.LANCEDB_PYPI_API_TOKEN }}
|
||||
repo: "pypi"
|
||||
pypi_token: ${{ secrets.LANCEDB_PYPI_API_TOKEN }}
|
||||
fury_token: ${{ secrets.FURY_TOKEN }}
|
||||
windows:
|
||||
timeout-minutes: 60
|
||||
runs-on: windows-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-minor-version: ["8"]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.ref }}
|
||||
fetch-depth: 0
|
||||
lfs: true
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: 3.${{ matrix.python-minor-version }}
|
||||
python-version: 3.8
|
||||
- uses: ./.github/workflows/build_windows_wheel
|
||||
with:
|
||||
python-minor-version: ${{ matrix.python-minor-version }}
|
||||
python-minor-version: 8
|
||||
args: "--release --strip"
|
||||
vcpkg_token: ${{ secrets.VCPKG_GITHUB_PACKAGES }}
|
||||
- uses: ./.github/workflows/upload_wheel
|
||||
with:
|
||||
python-minor-version: ${{ matrix.python-minor-version }}
|
||||
token: ${{ secrets.LANCEDB_PYPI_API_TOKEN }}
|
||||
repo: "pypi"
|
||||
pypi_token: ${{ secrets.LANCEDB_PYPI_API_TOKEN }}
|
||||
fury_token: ${{ secrets.FURY_TOKEN }}
|
||||
gh-release:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
lfs: true
|
||||
- name: Extract version
|
||||
id: extract_version
|
||||
env:
|
||||
GITHUB_REF: ${{ github.ref }}
|
||||
run: |
|
||||
set -e
|
||||
echo "Extracting tag and version from $GITHUB_REF"
|
||||
if [[ $GITHUB_REF =~ refs/tags/python-v(.*) ]]; then
|
||||
VERSION=${BASH_REMATCH[1]}
|
||||
TAG=python-v$VERSION
|
||||
echo "tag=$TAG" >> $GITHUB_OUTPUT
|
||||
echo "version=$VERSION" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "Failed to extract version from $GITHUB_REF"
|
||||
exit 1
|
||||
fi
|
||||
echo "Extracted version $VERSION from $GITHUB_REF"
|
||||
if [[ $VERSION =~ beta ]]; then
|
||||
echo "This is a beta release"
|
||||
|
||||
# Get last release (that is not this one)
|
||||
FROM_TAG=$(git tag --sort='version:refname' \
|
||||
| grep ^python-v \
|
||||
| grep -vF "$TAG" \
|
||||
| python ci/semver_sort.py python-v \
|
||||
| tail -n 1)
|
||||
else
|
||||
echo "This is a stable release"
|
||||
# Get last stable tag (ignore betas)
|
||||
FROM_TAG=$(git tag --sort='version:refname' \
|
||||
| grep ^python-v \
|
||||
| grep -vF "$TAG" \
|
||||
| grep -v beta \
|
||||
| python ci/semver_sort.py python-v \
|
||||
| tail -n 1)
|
||||
fi
|
||||
echo "Found from tag $FROM_TAG"
|
||||
echo "from_tag=$FROM_TAG" >> $GITHUB_OUTPUT
|
||||
- name: Create Python Release Notes
|
||||
id: python_release_notes
|
||||
uses: mikepenz/release-changelog-builder-action@v4
|
||||
with:
|
||||
configuration: .github/release_notes.json
|
||||
toTag: ${{ steps.extract_version.outputs.tag }}
|
||||
fromTag: ${{ steps.extract_version.outputs.from_tag }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Create Python GH release
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
prerelease: ${{ contains('beta', github.ref) }}
|
||||
tag_name: ${{ steps.extract_version.outputs.tag }}
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
generate_release_notes: false
|
||||
name: Python LanceDB v${{ steps.extract_version.outputs.version }}
|
||||
body: ${{ steps.python_release_notes.outputs.changelog }}
|
||||
|
||||
56
.github/workflows/python-make-release-commit.yml
vendored
56
.github/workflows/python-make-release-commit.yml
vendored
@@ -1,56 +0,0 @@
|
||||
name: Python - Create release commit
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
dry_run:
|
||||
description: 'Dry run (create the local commit/tags but do not push it)'
|
||||
required: true
|
||||
default: "false"
|
||||
type: choice
|
||||
options:
|
||||
- "true"
|
||||
- "false"
|
||||
part:
|
||||
description: 'What kind of release is this?'
|
||||
required: true
|
||||
default: 'patch'
|
||||
type: choice
|
||||
options:
|
||||
- patch
|
||||
- minor
|
||||
- major
|
||||
|
||||
jobs:
|
||||
bump-version:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out main
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: main
|
||||
persist-credentials: false
|
||||
fetch-depth: 0
|
||||
lfs: true
|
||||
- name: Set git configs for bumpversion
|
||||
shell: bash
|
||||
run: |
|
||||
git config user.name 'Lance Release'
|
||||
git config user.email 'lance-dev@lancedb.com'
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11"
|
||||
- name: Bump version, create tag and commit
|
||||
working-directory: python
|
||||
run: |
|
||||
pip install bump2version
|
||||
bumpversion --verbose ${{ inputs.part }}
|
||||
- name: Push new version and tag
|
||||
if: ${{ inputs.dry_run }} == "false"
|
||||
uses: ad-m/github-push-action@master
|
||||
with:
|
||||
github_token: ${{ secrets.LANCEDB_RELEASE_TOKEN }}
|
||||
branch: main
|
||||
tags: true
|
||||
|
||||
2
.github/workflows/python.yml
vendored
2
.github/workflows/python.yml
vendored
@@ -75,7 +75,7 @@ jobs:
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
matrix:
|
||||
python-minor-version: ["8", "11"]
|
||||
python-minor-version: ["9", "11"]
|
||||
runs-on: "ubuntu-22.04"
|
||||
defaults:
|
||||
run:
|
||||
|
||||
4
.github/workflows/rust.yml
vendored
4
.github/workflows/rust.yml
vendored
@@ -74,11 +74,11 @@ jobs:
|
||||
run: |
|
||||
sudo apt update
|
||||
sudo apt install -y protobuf-compiler libssl-dev
|
||||
- name: Build
|
||||
run: cargo build --all-features
|
||||
- name: Start S3 integration test environment
|
||||
working-directory: .
|
||||
run: docker compose up --detach --wait
|
||||
- name: Build
|
||||
run: cargo build --all-features
|
||||
- name: Run tests
|
||||
run: cargo test --all-features
|
||||
- name: Run examples
|
||||
|
||||
54
.github/workflows/upload_wheel/action.yml
vendored
54
.github/workflows/upload_wheel/action.yml
vendored
@@ -2,28 +2,44 @@ name: upload-wheel
|
||||
|
||||
description: "Upload wheels to Pypi"
|
||||
inputs:
|
||||
os:
|
||||
required: true
|
||||
description: "ubuntu-22.04 or macos-13"
|
||||
repo:
|
||||
required: false
|
||||
description: "pypi or testpypi"
|
||||
default: "pypi"
|
||||
token:
|
||||
pypi_token:
|
||||
required: true
|
||||
description: "release token for the repo"
|
||||
fury_token:
|
||||
required: true
|
||||
description: "release token for the fury repo"
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Install dependencies
|
||||
shell: bash
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install twine
|
||||
- name: Publish wheel
|
||||
env:
|
||||
TWINE_USERNAME: __token__
|
||||
TWINE_PASSWORD: ${{ inputs.token }}
|
||||
shell: bash
|
||||
run: twine upload --repository ${{ inputs.repo }} target/wheels/lancedb-*.whl
|
||||
- name: Install dependencies
|
||||
shell: bash
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install twine
|
||||
- name: Choose repo
|
||||
shell: bash
|
||||
id: choose_repo
|
||||
run: |
|
||||
if [ ${{ github.ref }} == "*beta*" ]; then
|
||||
echo "repo=fury" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "repo=pypi" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
- name: Publish to PyPI
|
||||
working-directory: python
|
||||
shell: bash
|
||||
env:
|
||||
FURY_TOKEN: ${{ inputs.fury_token }}
|
||||
PYPI_TOKEN: ${{ inputs.pypi_token }}
|
||||
run: |
|
||||
if [ ${{ steps.choose_repo.outputs.repo }} == "fury" ]; then
|
||||
WHEEL=$(ls target/wheels/lancedb-*.whl 2> /dev/null | head -n 1)
|
||||
echo "Uploading $WHEEL to Fury"
|
||||
curl -f -F package=@$WHEEL https://$FURY_TOKEN@push.fury.io/lancedb/
|
||||
else
|
||||
twine upload --repository ${{ steps.choose_repo.outputs.repo }} \
|
||||
--username __token__ \
|
||||
--password $PYPI_TOKEN \
|
||||
target/wheels/lancedb-*.whl
|
||||
fi
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -6,7 +6,7 @@
|
||||
venv
|
||||
|
||||
.vscode
|
||||
|
||||
.zed
|
||||
rust/target
|
||||
rust/Cargo.lock
|
||||
|
||||
|
||||
@@ -10,9 +10,12 @@ repos:
|
||||
rev: v0.2.2
|
||||
hooks:
|
||||
- id: ruff
|
||||
- repo: https://github.com/pre-commit/mirrors-prettier
|
||||
rev: v3.1.0
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: prettier
|
||||
- id: local-biome-check
|
||||
name: biome check
|
||||
entry: npx biome check
|
||||
language: system
|
||||
types: [text]
|
||||
files: "nodejs/.*"
|
||||
exclude: nodejs/lancedb/native.d.ts|nodejs/dist/.*
|
||||
|
||||
26
Cargo.toml
26
Cargo.toml
@@ -14,22 +14,22 @@ keywords = ["lancedb", "lance", "database", "vector", "search"]
|
||||
categories = ["database-implementations"]
|
||||
|
||||
[workspace.dependencies]
|
||||
lance = { "version" = "=0.10.10", "features" = ["dynamodb"] }
|
||||
lance-index = { "version" = "=0.10.10" }
|
||||
lance-linalg = { "version" = "=0.10.10" }
|
||||
lance-testing = { "version" = "=0.10.10" }
|
||||
lance = { "version" = "=0.11.0", "features" = ["dynamodb"] }
|
||||
lance-index = { "version" = "=0.11.0" }
|
||||
lance-linalg = { "version" = "=0.11.0" }
|
||||
lance-testing = { "version" = "=0.11.0" }
|
||||
# Note that this one does not include pyarrow
|
||||
arrow = { version = "50.0", optional = false }
|
||||
arrow-array = "50.0"
|
||||
arrow-data = "50.0"
|
||||
arrow-ipc = "50.0"
|
||||
arrow-ord = "50.0"
|
||||
arrow-schema = "50.0"
|
||||
arrow-arith = "50.0"
|
||||
arrow-cast = "50.0"
|
||||
arrow = { version = "51.0", optional = false }
|
||||
arrow-array = "51.0"
|
||||
arrow-data = "51.0"
|
||||
arrow-ipc = "51.0"
|
||||
arrow-ord = "51.0"
|
||||
arrow-schema = "51.0"
|
||||
arrow-arith = "51.0"
|
||||
arrow-cast = "51.0"
|
||||
async-trait = "0"
|
||||
chrono = "0.4.35"
|
||||
half = { "version" = "=2.3.1", default-features = false, features = [
|
||||
half = { "version" = "=2.4.1", default-features = false, features = [
|
||||
"num-traits",
|
||||
] }
|
||||
futures = "0"
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
|
||||
<hr />
|
||||
|
||||
LanceDB is an open-source database for vector-search built with persistent storage, which greatly simplifies retrevial, filtering and management of embeddings.
|
||||
LanceDB is an open-source database for vector-search built with persistent storage, which greatly simplifies retrieval, filtering and management of embeddings.
|
||||
|
||||
The key features of LanceDB include:
|
||||
|
||||
@@ -36,7 +36,7 @@ The key features of LanceDB include:
|
||||
|
||||
* GPU support in building vector index(*).
|
||||
|
||||
* Ecosystem integrations with [LangChain 🦜️🔗](https://python.langchain.com/en/latest/modules/indexes/vectorstores/examples/lanecdb.html), [LlamaIndex 🦙](https://gpt-index.readthedocs.io/en/latest/examples/vector_stores/LanceDBIndexDemo.html), Apache-Arrow, Pandas, Polars, DuckDB and more on the way.
|
||||
* Ecosystem integrations with [LangChain 🦜️🔗](https://python.langchain.com/docs/integrations/vectorstores/lancedb/), [LlamaIndex 🦙](https://gpt-index.readthedocs.io/en/latest/examples/vector_stores/LanceDBIndexDemo.html), Apache-Arrow, Pandas, Polars, DuckDB and more on the way.
|
||||
|
||||
LanceDB's core is written in Rust 🦀 and is built using <a href="https://github.com/lancedb/lance">Lance</a>, an open-source columnar format designed for performant ML workloads.
|
||||
|
||||
|
||||
51
ci/bump_version.sh
Normal file
51
ci/bump_version.sh
Normal file
@@ -0,0 +1,51 @@
|
||||
set -e
|
||||
|
||||
RELEASE_TYPE=${1:-"stable"}
|
||||
BUMP_MINOR=${2:-false}
|
||||
TAG_PREFIX=${3:-"v"} # Such as "python-v"
|
||||
HEAD_SHA=${4:-$(git rev-parse HEAD)}
|
||||
|
||||
readonly SELF_DIR=$(cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
|
||||
|
||||
PREV_TAG=$(git tag --sort='version:refname' | grep ^$TAG_PREFIX | python $SELF_DIR/semver_sort.py $TAG_PREFIX | tail -n 1)
|
||||
echo "Found previous tag $PREV_TAG"
|
||||
|
||||
# Initially, we don't want to tag if we are doing stable, because we will bump
|
||||
# again later. See comment at end for why.
|
||||
if [[ "$RELEASE_TYPE" == 'stable' ]]; then
|
||||
BUMP_ARGS="--no-tag"
|
||||
fi
|
||||
|
||||
# If last is stable and not bumping minor
|
||||
if [[ $PREV_TAG != *beta* ]]; then
|
||||
if [[ "$BUMP_MINOR" != "false" ]]; then
|
||||
# X.Y.Z -> X.(Y+1).0-beta.0
|
||||
bump-my-version bump -vv $BUMP_ARGS minor
|
||||
else
|
||||
# X.Y.Z -> X.Y.(Z+1)-beta.0
|
||||
bump-my-version bump -vv $BUMP_ARGS patch
|
||||
fi
|
||||
else
|
||||
if [[ "$BUMP_MINOR" != "false" ]]; then
|
||||
# X.Y.Z-beta.N -> X.(Y+1).0-beta.0
|
||||
bump-my-version bump -vv $BUMP_ARGS minor
|
||||
else
|
||||
# X.Y.Z-beta.N -> X.Y.Z-beta.(N+1)
|
||||
bump-my-version bump -vv $BUMP_ARGS pre_n
|
||||
fi
|
||||
fi
|
||||
|
||||
# The above bump will always bump to a pre-release version. If we are releasing
|
||||
# a stable version, bump the pre-release level ("pre_l") to make it stable.
|
||||
if [[ $RELEASE_TYPE == 'stable' ]]; then
|
||||
# X.Y.Z-beta.N -> X.Y.Z
|
||||
bump-my-version bump -vv pre_l
|
||||
fi
|
||||
|
||||
# Validate that we have incremented version appropriately for breaking changes
|
||||
NEW_TAG=$(git describe --tags --exact-match HEAD)
|
||||
NEW_VERSION=$(echo $NEW_TAG | sed "s/^$TAG_PREFIX//")
|
||||
LAST_STABLE_RELEASE=$(git tag --sort='version:refname' | grep ^$TAG_PREFIX | grep -v beta | grep -vF "$NEW_TAG" | python $SELF_DIR/semver_sort.py $TAG_PREFIX | tail -n 1)
|
||||
LAST_STABLE_VERSION=$(echo $LAST_STABLE_RELEASE | sed "s/^$TAG_PREFIX//")
|
||||
|
||||
python $SELF_DIR/check_breaking_changes.py $LAST_STABLE_RELEASE $HEAD_SHA $LAST_STABLE_VERSION $NEW_VERSION
|
||||
35
ci/check_breaking_changes.py
Normal file
35
ci/check_breaking_changes.py
Normal file
@@ -0,0 +1,35 @@
|
||||
"""
|
||||
Check whether there are any breaking changes in the PRs between the base and head commits.
|
||||
If there are, assert that we have incremented the minor version.
|
||||
"""
|
||||
import argparse
|
||||
import os
|
||||
from packaging.version import parse
|
||||
|
||||
from github import Github
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("base")
|
||||
parser.add_argument("head")
|
||||
parser.add_argument("last_stable_version")
|
||||
parser.add_argument("current_version")
|
||||
args = parser.parse_args()
|
||||
|
||||
repo = Github(os.environ["GITHUB_TOKEN"]).get_repo(os.environ["GITHUB_REPOSITORY"])
|
||||
commits = repo.compare(args.base, args.head).commits
|
||||
prs = (pr for commit in commits for pr in commit.get_pulls())
|
||||
|
||||
for pr in prs:
|
||||
if any(label.name == "breaking-change" for label in pr.labels):
|
||||
print(f"Breaking change in PR: {pr.html_url}")
|
||||
break
|
||||
else:
|
||||
print("No breaking changes found.")
|
||||
exit(0)
|
||||
|
||||
last_stable_version = parse(args.last_stable_version)
|
||||
current_version = parse(args.current_version)
|
||||
if current_version.minor <= last_stable_version.minor:
|
||||
print("Minor version is not greater than the last stable version.")
|
||||
exit(1)
|
||||
35
ci/semver_sort.py
Normal file
35
ci/semver_sort.py
Normal file
@@ -0,0 +1,35 @@
|
||||
"""
|
||||
Takes a list of semver strings and sorts them in ascending order.
|
||||
"""
|
||||
|
||||
import sys
|
||||
from packaging.version import parse, InvalidVersion
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("prefix", default="v")
|
||||
args = parser.parse_args()
|
||||
|
||||
# Read the input from stdin
|
||||
lines = sys.stdin.readlines()
|
||||
|
||||
# Parse the versions
|
||||
versions = []
|
||||
for line in lines:
|
||||
line = line.strip()
|
||||
try:
|
||||
version_str = line.removeprefix(args.prefix)
|
||||
version = parse(version_str)
|
||||
except InvalidVersion:
|
||||
# There are old tags that don't follow the semver format
|
||||
print(f"Invalid version: {line}", file=sys.stderr)
|
||||
continue
|
||||
versions.append((line, version))
|
||||
|
||||
# Sort the versions
|
||||
versions.sort(key=lambda x: x[1])
|
||||
|
||||
# Print the sorted versions as original strings
|
||||
for line, _ in versions:
|
||||
print(line)
|
||||
@@ -57,16 +57,6 @@ plugins:
|
||||
- https://arrow.apache.org/docs/objects.inv
|
||||
- https://pandas.pydata.org/docs/objects.inv
|
||||
- mkdocs-jupyter
|
||||
- ultralytics:
|
||||
verbose: True
|
||||
enabled: True
|
||||
default_image: "assets/lancedb_and_lance.png" # Default image for all pages
|
||||
add_image: True # Automatically add meta image
|
||||
add_keywords: True # Add page keywords in the header tag
|
||||
add_share_buttons: True # Add social share buttons
|
||||
add_authors: False # Display page authors
|
||||
add_desc: False
|
||||
add_dates: False
|
||||
|
||||
markdown_extensions:
|
||||
- admonition
|
||||
@@ -104,6 +94,14 @@ nav:
|
||||
- Overview: hybrid_search/hybrid_search.md
|
||||
- Comparing Rerankers: hybrid_search/eval.md
|
||||
- Airbnb financial data example: notebooks/hybrid_search.ipynb
|
||||
- Reranking:
|
||||
- Quickstart: reranking/index.md
|
||||
- Cohere Reranker: reranking/cohere.md
|
||||
- Linear Combination Reranker: reranking/linear_combination.md
|
||||
- Cross Encoder Reranker: reranking/cross_encoder.md
|
||||
- ColBERT Reranker: reranking/colbert.md
|
||||
- OpenAI Reranker: reranking/openai.md
|
||||
- Building Custom Rerankers: reranking/custom_reranker.md
|
||||
- Filtering: sql.md
|
||||
- Versioning & Reproducibility: notebooks/reproducibility.ipynb
|
||||
- Configuring Storage: guides/storage.md
|
||||
@@ -120,9 +118,10 @@ nav:
|
||||
- Pandas and PyArrow: python/pandas_and_pyarrow.md
|
||||
- Polars: python/polars_arrow.md
|
||||
- DuckDB: python/duckdb.md
|
||||
- LangChain 🔗: https://python.langchain.com/en/latest/modules/indexes/vectorstores/examples/lancedb.html
|
||||
- LangChain JS/TS 🔗: https://js.langchain.com/docs/modules/data_connection/vectorstores/integrations/lancedb
|
||||
- LlamaIndex 🦙: https://gpt-index.readthedocs.io/en/latest/examples/vector_stores/LanceDBIndexDemo.html
|
||||
- LangChain:
|
||||
- LangChain 🔗: integrations/langchain.md
|
||||
- LangChain JS/TS 🔗: https://js.langchain.com/docs/integrations/vectorstores/lancedb
|
||||
- LlamaIndex 🦙: https://docs.llamaindex.ai/en/stable/examples/vector_stores/LanceDBIndexDemo/
|
||||
- Pydantic: python/pydantic.md
|
||||
- Voxel51: integrations/voxel51.md
|
||||
- PromptTools: integrations/prompttools.md
|
||||
@@ -170,6 +169,14 @@ nav:
|
||||
- Overview: hybrid_search/hybrid_search.md
|
||||
- Comparing Rerankers: hybrid_search/eval.md
|
||||
- Airbnb financial data example: notebooks/hybrid_search.ipynb
|
||||
- Reranking:
|
||||
- Quickstart: reranking/index.md
|
||||
- Cohere Reranker: reranking/cohere.md
|
||||
- Linear Combination Reranker: reranking/linear_combination.md
|
||||
- Cross Encoder Reranker: reranking/cross_encoder.md
|
||||
- ColBERT Reranker: reranking/colbert.md
|
||||
- OpenAI Reranker: reranking/openai.md
|
||||
- Building Custom Rerankers: reranking/custom_reranker.md
|
||||
- Filtering: sql.md
|
||||
- Versioning & Reproducibility: notebooks/reproducibility.ipynb
|
||||
- Configuring Storage: guides/storage.md
|
||||
@@ -186,8 +193,8 @@ nav:
|
||||
- Pandas and PyArrow: python/pandas_and_pyarrow.md
|
||||
- Polars: python/polars_arrow.md
|
||||
- DuckDB: python/duckdb.md
|
||||
- LangChain 🦜️🔗↗: https://python.langchain.com/en/latest/modules/indexes/vectorstores/examples/lancedb.html
|
||||
- LangChain.js 🦜️🔗↗: https://js.langchain.com/docs/modules/data_connection/vectorstores/integrations/lancedb
|
||||
- LangChain 🦜️🔗↗: https://python.langchain.com/docs/integrations/vectorstores/lancedb
|
||||
- LangChain.js 🦜️🔗↗: https://js.langchain.com/docs/integrations/vectorstores/lancedb
|
||||
- LlamaIndex 🦙↗: https://gpt-index.readthedocs.io/en/latest/examples/vector_stores/LanceDBIndexDemo.html
|
||||
- Pydantic: python/pydantic.md
|
||||
- Voxel51: integrations/voxel51.md
|
||||
|
||||
@@ -2,5 +2,4 @@ mkdocs==1.5.3
|
||||
mkdocs-jupyter==0.24.1
|
||||
mkdocs-material==9.5.3
|
||||
mkdocstrings[python]==0.20.0
|
||||
pydantic
|
||||
mkdocs-ultralytics-plugin==0.0.44
|
||||
pydantic
|
||||
@@ -44,6 +44,36 @@
|
||||
|
||||
!!! info "Please also make sure you're using the same version of Arrow as in the [lancedb crate](https://github.com/lancedb/lancedb/blob/main/Cargo.toml)"
|
||||
|
||||
### Preview releases
|
||||
|
||||
Stable releases are created about every 2 weeks. For the latest features and bug
|
||||
fixes, you can install the preview release. These releases receive the same
|
||||
level of testing as stable releases, but are not guaranteed to be available for
|
||||
more than 6 months after they are released. Once your application is stable, we
|
||||
recommend switching to stable releases.
|
||||
|
||||
=== "Python"
|
||||
|
||||
```shell
|
||||
pip install --pre --extra-index-url https://pypi.fury.io/lancedb/ lancedb
|
||||
```
|
||||
|
||||
=== "Typescript"
|
||||
|
||||
```shell
|
||||
npm install vectordb@preview
|
||||
```
|
||||
|
||||
=== "Rust"
|
||||
|
||||
We don't push preview releases to crates.io, but you can referent the tag
|
||||
in GitHub within your Cargo dependencies:
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
lancedb = { git = "https://github.com/lancedb/lancedb.git", tag = "vX.Y.Z-beta.N" }
|
||||
```
|
||||
|
||||
## Connect to a database
|
||||
|
||||
=== "Python"
|
||||
|
||||
@@ -154,9 +154,12 @@ Allows you to set parameters when registering a `sentence-transformers` object.
|
||||
!!! note "BAAI Embeddings example"
|
||||
Here is an example that uses BAAI embedding model from the HuggingFace Hub [supported models](https://huggingface.co/models?library=sentence-transformers)
|
||||
```python
|
||||
import lancedb
|
||||
from lancedb.pydantic import LanceModel, Vector
|
||||
from lancedb.embeddings import get_registry
|
||||
|
||||
db = lancedb.connect("/tmp/db")
|
||||
registry = EmbeddingFunctionRegistry.get_instance()
|
||||
model = registry.get("sentence-transformers").create(name="BAAI/bge-small-en-v1.5", device="cpu")
|
||||
model = get_registry().get("sentence-transformers").create(name="BAAI/bge-small-en-v1.5", device="cpu")
|
||||
|
||||
class Words(LanceModel):
|
||||
text: str = model.SourceField()
|
||||
@@ -165,7 +168,7 @@ Allows you to set parameters when registering a `sentence-transformers` object.
|
||||
table = db.create_table("words", schema=Words)
|
||||
table.add(
|
||||
[
|
||||
{"text": "hello world"}
|
||||
{"text": "hello world"},
|
||||
{"text": "goodbye world"}
|
||||
]
|
||||
)
|
||||
@@ -203,6 +206,44 @@ print(actual.text)
|
||||
```
|
||||
|
||||
|
||||
### Ollama embeddings
|
||||
Generate embeddings via the [ollama](https://github.com/ollama/ollama-python) python library. More details:
|
||||
|
||||
- [Ollama docs on embeddings](https://github.com/ollama/ollama/blob/main/docs/api.md#generate-embeddings)
|
||||
- [Ollama blog on embeddings](https://ollama.com/blog/embedding-models)
|
||||
|
||||
| Parameter | Type | Default Value | Description |
|
||||
|------------------------|----------------------------|--------------------------|------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `name` | `str` | `nomic-embed-text` | The name of the model. |
|
||||
| `host` | `str` | `http://localhost:11434` | The Ollama host to connect to. |
|
||||
| `options` | `ollama.Options` or `dict` | `None` | Additional model parameters listed in the documentation for the [Modelfile](./modelfile.md#valid-parameters-and-values) such as `temperature`. |
|
||||
| `keep_alive` | `float` or `str` | `"5m"` | Controls how long the model will stay loaded into memory following the request. |
|
||||
| `ollama_client_kwargs` | `dict` | `{}` | kwargs that can be past to the `ollama.Client`. |
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
from lancedb.pydantic import LanceModel, Vector
|
||||
from lancedb.embeddings import get_registry
|
||||
|
||||
db = lancedb.connect("/tmp/db")
|
||||
func = get_registry().get("ollama").create(name="nomic-embed-text")
|
||||
|
||||
class Words(LanceModel):
|
||||
text: str = func.SourceField()
|
||||
vector: Vector(func.ndims()) = func.VectorField()
|
||||
|
||||
table = db.create_table("words", schema=Words, mode="overwrite")
|
||||
table.add([
|
||||
{"text": "hello world"},
|
||||
{"text": "goodbye world"}
|
||||
])
|
||||
|
||||
query = "greetings"
|
||||
actual = table.search(query).limit(1).to_pydantic(Words)[0]
|
||||
print(actual.text)
|
||||
```
|
||||
|
||||
|
||||
### OpenAI embeddings
|
||||
LanceDB registers the OpenAI embeddings function in the registry by default, as `openai`. Below are the parameters that you can customize when creating the instances:
|
||||
|
||||
@@ -213,18 +254,21 @@ LanceDB registers the OpenAI embeddings function in the registry by default, as
|
||||
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
from lancedb.pydantic import LanceModel, Vector
|
||||
from lancedb.embeddings import get_registry
|
||||
|
||||
db = lancedb.connect("/tmp/db")
|
||||
registry = EmbeddingFunctionRegistry.get_instance()
|
||||
func = registry.get("openai").create()
|
||||
func = get_registry().get("openai").create(name="text-embedding-ada-002")
|
||||
|
||||
class Words(LanceModel):
|
||||
text: str = func.SourceField()
|
||||
vector: Vector(func.ndims()) = func.VectorField()
|
||||
|
||||
table = db.create_table("words", schema=Words)
|
||||
table = db.create_table("words", schema=Words, mode="overwrite")
|
||||
table.add(
|
||||
[
|
||||
{"text": "hello world"}
|
||||
{"text": "hello world"},
|
||||
{"text": "goodbye world"}
|
||||
]
|
||||
)
|
||||
@@ -353,6 +397,10 @@ Supported parameters (to be passed in `create` method) are:
|
||||
Usage Example:
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
from lancedb.pydantic import LanceModel, Vector
|
||||
from lancedb.embeddings import get_registry
|
||||
|
||||
model = get_registry().get("bedrock-text").create()
|
||||
|
||||
class TextModel(LanceModel):
|
||||
@@ -387,10 +435,12 @@ This embedding function supports ingesting images as both bytes and urls. You ca
|
||||
LanceDB supports ingesting images directly from accessible links.
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
from lancedb.pydantic import LanceModel, Vector
|
||||
from lancedb.embeddings import get_registry
|
||||
|
||||
db = lancedb.connect(tmp_path)
|
||||
registry = EmbeddingFunctionRegistry.get_instance()
|
||||
func = registry.get("open-clip").create()
|
||||
func = get_registry.get("open-clip").create()
|
||||
|
||||
class Images(LanceModel):
|
||||
label: str
|
||||
@@ -465,9 +515,12 @@ This function is registered as `imagebind` and supports Audio, Video and Text mo
|
||||
Below is an example demonstrating how the API works:
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
from lancedb.pydantic import LanceModel, Vector
|
||||
from lancedb.embeddings import get_registry
|
||||
|
||||
db = lancedb.connect(tmp_path)
|
||||
registry = EmbeddingFunctionRegistry.get_instance()
|
||||
func = registry.get("imagebind").create()
|
||||
func = get_registry.get("imagebind").create()
|
||||
|
||||
class ImageBindModel(LanceModel):
|
||||
text: str
|
||||
|
||||
@@ -46,7 +46,7 @@ For this purpose, LanceDB introduces an **embedding functions API**, that allow
|
||||
|
||||
```python
|
||||
class Pets(LanceModel):
|
||||
vector: Vector(clip.ndims) = clip.VectorField()
|
||||
vector: Vector(clip.ndims()) = clip.VectorField()
|
||||
image_uri: str = clip.SourceField()
|
||||
```
|
||||
|
||||
@@ -149,7 +149,7 @@ You can also use the integration for adding utility operations in the schema. Fo
|
||||
|
||||
```python
|
||||
class Pets(LanceModel):
|
||||
vector: Vector(clip.ndims) = clip.VectorField()
|
||||
vector: Vector(clip.ndims()) = clip.VectorField()
|
||||
image_uri: str = clip.SourceField()
|
||||
|
||||
@property
|
||||
@@ -166,4 +166,4 @@ rs[2].image
|
||||

|
||||
|
||||
Now that you have the basic idea about LanceDB embedding functions and the embedding function registry,
|
||||
let's dive deeper into defining your own [custom functions](./custom_embedding_function.md).
|
||||
let's dive deeper into defining your own [custom functions](./custom_embedding_function.md).
|
||||
|
||||
@@ -11,4 +11,64 @@ LanceDB supports 3 methods of working with embeddings.
|
||||
that extends the default embedding functions.
|
||||
|
||||
For python users, there is also a legacy [with_embeddings API](./legacy.md).
|
||||
It is retained for compatibility and will be removed in a future version.
|
||||
It is retained for compatibility and will be removed in a future version.
|
||||
|
||||
## Quickstart
|
||||
|
||||
To get started with embeddings, you can use the built-in embedding functions.
|
||||
|
||||
### OpenAI Embedding function
|
||||
LanceDB registers the OpenAI embeddings function in the registry as `openai`. You can pass any supported model name to the `create`. By default it uses `"text-embedding-ada-002"`.
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
from lancedb.pydantic import LanceModel, Vector
|
||||
from lancedb.embeddings import get_registry
|
||||
|
||||
db = lancedb.connect("/tmp/db")
|
||||
func = get_registry().get("openai").create(name="text-embedding-ada-002")
|
||||
|
||||
class Words(LanceModel):
|
||||
text: str = func.SourceField()
|
||||
vector: Vector(func.ndims()) = func.VectorField()
|
||||
|
||||
table = db.create_table("words", schema=Words, mode="overwrite")
|
||||
table.add(
|
||||
[
|
||||
{"text": "hello world"},
|
||||
{"text": "goodbye world"}
|
||||
]
|
||||
)
|
||||
|
||||
query = "greetings"
|
||||
actual = table.search(query).limit(1).to_pydantic(Words)[0]
|
||||
print(actual.text)
|
||||
```
|
||||
|
||||
### Sentence Transformers Embedding function
|
||||
LanceDB registers the Sentence Transformers embeddings function in the registry as `sentence-transformers`. You can pass any supported model name to the `create`. By default it uses `"sentence-transformers/paraphrase-MiniLM-L6-v2"`.
|
||||
|
||||
```python
|
||||
import lancedb
|
||||
from lancedb.pydantic import LanceModel, Vector
|
||||
from lancedb.embeddings import get_registry
|
||||
|
||||
db = lancedb.connect("/tmp/db")
|
||||
model = get_registry().get("sentence-transformers").create(name="BAAI/bge-small-en-v1.5", device="cpu")
|
||||
|
||||
class Words(LanceModel):
|
||||
text: str = model.SourceField()
|
||||
vector: Vector(model.ndims()) = model.VectorField()
|
||||
|
||||
table = db.create_table("words", schema=Words)
|
||||
table.add(
|
||||
[
|
||||
{"text": "hello world"},
|
||||
{"text": "goodbye world"}
|
||||
]
|
||||
)
|
||||
|
||||
query = "greetings"
|
||||
actual = table.search(query).limit(1).to_pydantic(Words)[0]
|
||||
print(actual.text)
|
||||
```
|
||||
@@ -299,6 +299,14 @@ LanceDB can also connect to S3-compatible stores, such as MinIO. To do so, you m
|
||||
|
||||
This can also be done with the ``AWS_ENDPOINT`` and ``AWS_DEFAULT_REGION`` environment variables.
|
||||
|
||||
!!! tip "Local servers"
|
||||
|
||||
For local development, the server often has a `http` endpoint rather than a
|
||||
secure `https` endpoint. In this case, you must also set the `ALLOW_HTTP`
|
||||
environment variable to `true` to allow non-TLS connections, or pass the
|
||||
storage option `allow_http` as `true`. If you do not do this, you will get
|
||||
an error like `URL scheme is not allowed`.
|
||||
|
||||
#### S3 Express
|
||||
|
||||
LanceDB supports [S3 Express One Zone](https://aws.amazon.com/s3/storage-classes/express-one-zone/) endpoints, but requires additional configuration. Also, S3 Express endpoints only support connecting from an EC2 instance within the same region.
|
||||
|
||||
@@ -13,7 +13,7 @@ Get started using these examples and quick links.
|
||||
| Integrations | |
|
||||
|---|---:|
|
||||
| <h3> LlamaIndex </h3>LlamaIndex is a simple, flexible data framework for connecting custom data sources to large language models. Llama index integrates with LanceDB as the serverless VectorDB. <h3>[Lean More](https://gpt-index.readthedocs.io/en/latest/examples/vector_stores/LanceDBIndexDemo.html) </h3> |<img src="../assets/llama-index.jpg" alt="image" width="150" height="auto">|
|
||||
| <h3>Langchain</h3>Langchain allows building applications with LLMs through composability <h3>[Lean More](https://python.langchain.com/docs/integrations/vectorstores/lancedb) | <img src="../assets/langchain.png" alt="image" width="150" height="auto">|
|
||||
| <h3>Langchain</h3>Langchain allows building applications with LLMs through composability <h3>[Lean More](https://lancedb.github.io/lancedb/integrations/langchain/) | <img src="../assets/langchain.png" alt="image" width="150" height="auto">|
|
||||
| <h3>Langchain TS</h3> Javascript bindings for Langchain. It integrates with LanceDB's serverless vectordb allowing you to build powerful AI applications through composibility using only serverless functions. <h3>[Learn More]( https://js.langchain.com/docs/modules/data_connection/vectorstores/integrations/lancedb) | <img src="../assets/langchain.png" alt="image" width="150" height="auto">|
|
||||
| <h3>Voxel51</h3> It is an open source toolkit that enables you to build better computer vision workflows by improving the quality of your datasets and delivering insights about your models.<h3>[Learn More](./voxel51.md) | <img src="../assets/voxel.gif" alt="image" width="150" height="auto">|
|
||||
| <h3>PromptTools</h3> Offers a set of free, open-source tools for testing and experimenting with models, prompts, and configurations. The core idea is to enable developers to evaluate prompts using familiar interfaces like code and notebooks. You can use it to experiment with different configurations of LanceDB, and test how LanceDB integrates with the LLM of your choice.<h3>[Learn More](./prompttools.md) | <img src="../assets/prompttools.jpeg" alt="image" width="150" height="auto">|
|
||||
|
||||
92
docs/src/integrations/langchain.md
Normal file
92
docs/src/integrations/langchain.md
Normal file
@@ -0,0 +1,92 @@
|
||||
# Langchain
|
||||

|
||||
|
||||
## Quick Start
|
||||
You can load your document data using langchain's loaders, for this example we are using `TextLoader` and `OpenAIEmbeddings` as the embedding model.
|
||||
```python
|
||||
import os
|
||||
from langchain.document_loaders import TextLoader
|
||||
from langchain.vectorstores import LanceDB
|
||||
from langchain_openai import OpenAIEmbeddings
|
||||
from langchain_text_splitters import CharacterTextSplitter
|
||||
|
||||
os.environ["OPENAI_API_KEY"] = "sk-..."
|
||||
|
||||
loader = TextLoader("../../modules/state_of_the_union.txt") # Replace with your data path
|
||||
documents = loader.load()
|
||||
|
||||
documents = CharacterTextSplitter().split_documents(documents)
|
||||
embeddings = OpenAIEmbeddings()
|
||||
|
||||
docsearch = LanceDB.from_documents(documents, embeddings)
|
||||
query = "What did the president say about Ketanji Brown Jackson"
|
||||
docs = docsearch.similarity_search(query)
|
||||
print(docs[0].page_content)
|
||||
```
|
||||
|
||||
## Documentation
|
||||
In the above example `LanceDB` vector store class object is created using `from_documents()` method which is a `classmethod` and returns the initialized class object.
|
||||
You can also use `LanceDB.from_texts(texts: List[str],embedding: Embeddings)` class method.
|
||||
|
||||
The exhaustive list of parameters for `LanceDB` vector store are :
|
||||
- `connection`: (Optional) `lancedb.db.LanceDBConnection` connection object to use. If not provided, a new connection will be created.
|
||||
- `embedding`: Langchain embedding model.
|
||||
- `vector_key`: (Optional) Column name to use for vector's in the table. Defaults to `'vector'`.
|
||||
- `id_key`: (Optional) Column name to use for id's in the table. Defaults to `'id'`.
|
||||
- `text_key`: (Optional) Column name to use for text in the table. Defaults to `'text'`.
|
||||
- `table_name`: (Optional) Name of your table in the database. Defaults to `'vectorstore'`.
|
||||
- `api_key`: (Optional) API key to use for LanceDB cloud database. Defaults to `None`.
|
||||
- `region`: (Optional) Region to use for LanceDB cloud database. Only for LanceDB Cloud, defaults to `None`.
|
||||
- `mode`: (Optional) Mode to use for adding data to the table. Defaults to `'overwrite'`.
|
||||
|
||||
```python
|
||||
db_url = "db://lang_test" # url of db you created
|
||||
api_key = "xxxxx" # your API key
|
||||
region="us-east-1-dev" # your selected region
|
||||
|
||||
vector_store = LanceDB(
|
||||
uri=db_url,
|
||||
api_key=api_key, #(dont include for local API)
|
||||
region=region, #(dont include for local API)
|
||||
embedding=embeddings,
|
||||
table_name='langchain_test' #Optional
|
||||
)
|
||||
```
|
||||
|
||||
### Methods
|
||||
To add texts and store respective embeddings automatically:
|
||||
##### add_texts()
|
||||
- `texts`: `Iterable` of strings to add to the vectorstore.
|
||||
- `metadatas`: Optional `list[dict()]` of metadatas associated with the texts.
|
||||
- `ids`: Optional `list` of ids to associate with the texts.
|
||||
|
||||
|
||||
```python
|
||||
vector_store.add_texts(texts = ['test_123'], metadatas =[{'source' :'wiki'}])
|
||||
|
||||
#Additionaly, to explore the table you can load it into a df or save it in a csv file:
|
||||
|
||||
tbl = vector_store.get_table()
|
||||
print("tbl:", tbl)
|
||||
pd_df = tbl.to_pandas()
|
||||
pd_df.to_csv("docsearch.csv", index=False)
|
||||
|
||||
# you can also create a new vector store object using an older connection object:
|
||||
vector_store = LanceDB(connection=tbl, embedding=embeddings)
|
||||
```
|
||||
For index creation make sure your table has enough data in it. An ANN index is ususally not needed for datasets ~100K vectors. For large-scale (>1M) or higher dimension vectors, it is beneficial to create an ANN index.
|
||||
##### create_index()
|
||||
- `col_name`: `Optional[str] = None`
|
||||
- `vector_col`: `Optional[str] = None`
|
||||
- `num_partitions`: `Optional[int] = 256`
|
||||
- `num_sub_vectors`: `Optional[int] = 96`
|
||||
- `index_cache_size`: `Optional[int] = None`
|
||||
|
||||
```python
|
||||
# for creating vector index
|
||||
vector_store.create_index(vector_col='vector', metric = 'cosine')
|
||||
|
||||
# for creating scalar index(for non-vector columns)
|
||||
vector_store.create_index(col_name='text')
|
||||
|
||||
```
|
||||
@@ -36,7 +36,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"!pip install --quiet openai datasets \n",
|
||||
"!pip install --quiet openai datasets\n",
|
||||
"!pip install --quiet -U lancedb"
|
||||
]
|
||||
},
|
||||
@@ -213,7 +213,7 @@
|
||||
"if \"OPENAI_API_KEY\" not in os.environ:\n",
|
||||
" # OR set the key here as a variable\n",
|
||||
" os.environ[\"OPENAI_API_KEY\"] = \"sk-...\"\n",
|
||||
" \n",
|
||||
"\n",
|
||||
"client = OpenAI()\n",
|
||||
"assert len(client.models.list().data) > 0"
|
||||
]
|
||||
@@ -234,9 +234,12 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def embed_func(c): \n",
|
||||
"def embed_func(c):\n",
|
||||
" rs = client.embeddings.create(input=c, model=\"text-embedding-ada-002\")\n",
|
||||
" return [rs.data[0].embedding]"
|
||||
" return [\n",
|
||||
" data.embedding\n",
|
||||
" for data in rs.data\n",
|
||||
" ]"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -514,7 +517,7 @@
|
||||
" prompt_start +\n",
|
||||
" \"\\n\\n---\\n\\n\".join(context.text) +\n",
|
||||
" prompt_end\n",
|
||||
" ) \n",
|
||||
" )\n",
|
||||
" return prompt"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -24,7 +24,8 @@ data = [
|
||||
table = db.create_table("pd_table", data=data)
|
||||
```
|
||||
|
||||
To query the table, first call `to_lance` to convert the table to a "dataset", which is an object that can be queried by DuckDB. Then all you need to do is reference that dataset by the same name in your SQL query.
|
||||
The `to_lance` method converts the LanceDB table to a `LanceDataset`, which is accessible to DuckDB through the Arrow compatibility layer.
|
||||
To query the resulting Lance dataset in DuckDB, all you need to do is reference the dataset by the same name in your SQL query.
|
||||
|
||||
```python
|
||||
import duckdb
|
||||
|
||||
75
docs/src/reranking/cohere.md
Normal file
75
docs/src/reranking/cohere.md
Normal file
@@ -0,0 +1,75 @@
|
||||
# Cohere Reranker
|
||||
|
||||
This re-ranker uses the [Cohere](https://cohere.ai/) API to rerank the search results. You can use this re-ranker by passing `CohereReranker()` to the `rerank()` method. Note that you'll either need to set the `COHERE_API_KEY` environment variable or pass the `api_key` argument to use this re-ranker.
|
||||
|
||||
|
||||
!!! note
|
||||
Supported Query Types: Hybrid, Vector, FTS
|
||||
|
||||
|
||||
```python
|
||||
import numpy
|
||||
import lancedb
|
||||
from lancedb.embeddings import get_registry
|
||||
from lancedb.pydantic import LanceModel, Vector
|
||||
from lancedb.rerankers import CohereReranker
|
||||
|
||||
embedder = get_registry().get("sentence-transformers").create()
|
||||
db = lancedb.connect("~/.lancedb")
|
||||
|
||||
class Schema(LanceModel):
|
||||
text: str = embedder.SourceField()
|
||||
vector: Vector(embedder.ndims()) = embedder.VectorField()
|
||||
|
||||
data = [
|
||||
{"text": "hello world"},
|
||||
{"text": "goodbye world"}
|
||||
]
|
||||
tbl = db.create_table("test", schema=Schema, mode="overwrite")
|
||||
tbl.add(data)
|
||||
reranker = CohereReranker(api_key="key")
|
||||
|
||||
# Run vector search with a reranker
|
||||
result = tbl.search("hello").rerank(reranker=reranker).to_list()
|
||||
|
||||
# Run FTS search with a reranker
|
||||
result = tbl.search("hello", query_type="fts").rerank(reranker=reranker).to_list()
|
||||
|
||||
# Run hybrid search with a reranker
|
||||
tbl.create_fts_index("text", replace=True)
|
||||
result = tbl.search("hello", query_type="hybrid").rerank(reranker=reranker).to_list()
|
||||
|
||||
```
|
||||
|
||||
Accepted Arguments
|
||||
----------------
|
||||
| Argument | Type | Default | Description |
|
||||
| --- | --- | --- | --- |
|
||||
| `model_name` | `str` | `"rerank-english-v2.0"` | The name of the reranker model to use. Available cohere models are: rerank-english-v2.0, rerank-multilingual-v2.0 |
|
||||
| `column` | `str` | `"text"` | The name of the column to use as input to the cross encoder model. |
|
||||
| `top_n` | `str` | `None` | The number of results to return. If None, will return all results. |
|
||||
| `api_key` | `str` | `None` | The API key for the Cohere API. If not provided, the `COHERE_API_KEY` environment variable is used. |
|
||||
| `return_score` | str | `"relevance"` | Options are "relevance" or "all". The type of score to return. If "relevance", will return only the `_relevance_score. If "all" is supported, will return relevance score along with the vector and/or fts scores depending on query type |
|
||||
|
||||
|
||||
|
||||
## Supported Scores for each query type
|
||||
You can specify the type of scores you want the reranker to return. The following are the supported scores for each query type:
|
||||
|
||||
### Hybrid Search
|
||||
|`return_score`| Status | Description |
|
||||
| --- | --- | --- |
|
||||
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
|
||||
| `all` | ❌ Not Supported | Returns have vector(`_distance`) and FTS(`score`) along with Hybrid Search score(`_relevance_score`) |
|
||||
|
||||
### Vector Search
|
||||
|`return_score`| Status | Description |
|
||||
| --- | --- | --- |
|
||||
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
|
||||
| `all` | ✅ Supported | Returns have vector(`_distance`) along with Hybrid Search score(`_relevance_score`) |
|
||||
|
||||
### FTS Search
|
||||
|`return_score`| Status | Description |
|
||||
| --- | --- | --- |
|
||||
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
|
||||
| `all` | ✅ Supported | Returns have FTS(`score`) along with Hybrid Search score(`_relevance_score`) |
|
||||
71
docs/src/reranking/colbert.md
Normal file
71
docs/src/reranking/colbert.md
Normal file
@@ -0,0 +1,71 @@
|
||||
# ColBERT Reranker
|
||||
|
||||
This re-ranker uses ColBERT model to rerank the search results. You can use this re-ranker by passing `ColbertReranker()` to the `rerank()` method.
|
||||
!!! note
|
||||
Supported Query Types: Hybrid, Vector, FTS
|
||||
|
||||
|
||||
```python
|
||||
import numpy
|
||||
import lancedb
|
||||
from lancedb.embeddings import get_registry
|
||||
from lancedb.pydantic import LanceModel, Vector
|
||||
from lancedb.rerankers import ColbertReranker
|
||||
|
||||
embedder = get_registry().get("sentence-transformers").create()
|
||||
db = lancedb.connect("~/.lancedb")
|
||||
|
||||
class Schema(LanceModel):
|
||||
text: str = embedder.SourceField()
|
||||
vector: Vector(embedder.ndims()) = embedder.VectorField()
|
||||
|
||||
data = [
|
||||
{"text": "hello world"},
|
||||
{"text": "goodbye world"}
|
||||
]
|
||||
tbl = db.create_table("test", schema=Schema, mode="overwrite")
|
||||
tbl.add(data)
|
||||
reranker = ColbertReranker()
|
||||
|
||||
# Run vector search with a reranker
|
||||
result = tbl.search("hello").rerank(reranker=reranker).to_list()
|
||||
|
||||
# Run FTS search with a reranker
|
||||
result = tbl.search("hello", query_type="fts").rerank(reranker=reranker).to_list()
|
||||
|
||||
# Run hybrid search with a reranker
|
||||
tbl.create_fts_index("text", replace=True)
|
||||
result = tbl.search("hello", query_type="hybrid").rerank(reranker=reranker).to_list()
|
||||
|
||||
```
|
||||
|
||||
Accepted Arguments
|
||||
----------------
|
||||
| Argument | Type | Default | Description |
|
||||
| --- | --- | --- | --- |
|
||||
| `model_name` | `str` | `"colbert-ir/colbertv2.0"` | The name of the reranker model to use.|
|
||||
| `column` | `str` | `"text"` | The name of the column to use as input to the cross encoder model. |
|
||||
| `device` | `str` | `None` | The device to use for the cross encoder model. If None, will use "cuda" if available, otherwise "cpu". |
|
||||
| `return_score` | str | `"relevance"` | Options are "relevance" or "all". The type of score to return. If "relevance", will return only the `_relevance_score. If "all" is supported, will return relevance score along with the vector and/or fts scores depending on query type |
|
||||
|
||||
|
||||
## Supported Scores for each query type
|
||||
You can specify the type of scores you want the reranker to return. The following are the supported scores for each query type:
|
||||
|
||||
### Hybrid Search
|
||||
|`return_score`| Status | Description |
|
||||
| --- | --- | --- |
|
||||
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
|
||||
| `all` | ❌ Not Supported | Returns have vector(`_distance`) and FTS(`score`) along with Hybrid Search score(`_relevance_score`) |
|
||||
|
||||
### Vector Search
|
||||
|`return_score`| Status | Description |
|
||||
| --- | --- | --- |
|
||||
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
|
||||
| `all` | ✅ Supported | Returns have vector(`_distance`) along with Hybrid Search score(`_relevance_score`) |
|
||||
|
||||
### FTS Search
|
||||
|`return_score`| Status | Description |
|
||||
| --- | --- | --- |
|
||||
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
|
||||
| `all` | ✅ Supported | Returns have FTS(`score`) along with Hybrid Search score(`_relevance_score`) |
|
||||
70
docs/src/reranking/cross_encoder.md
Normal file
70
docs/src/reranking/cross_encoder.md
Normal file
@@ -0,0 +1,70 @@
|
||||
# Cross Encoder Reranker
|
||||
|
||||
This re-ranker uses Cross Encoder models from sentence-transformers to rerank the search results. You can use this re-ranker by passing `CrossEncoderReranker()` to the `rerank()` method.
|
||||
!!! note
|
||||
Supported Query Types: Hybrid, Vector, FTS
|
||||
|
||||
|
||||
```python
|
||||
import numpy
|
||||
import lancedb
|
||||
from lancedb.embeddings import get_registry
|
||||
from lancedb.pydantic import LanceModel, Vector
|
||||
from lancedb.rerankers import CrossEncoderReranker
|
||||
|
||||
embedder = get_registry().get("sentence-transformers").create()
|
||||
db = lancedb.connect("~/.lancedb")
|
||||
|
||||
class Schema(LanceModel):
|
||||
text: str = embedder.SourceField()
|
||||
vector: Vector(embedder.ndims()) = embedder.VectorField()
|
||||
|
||||
data = [
|
||||
{"text": "hello world"},
|
||||
{"text": "goodbye world"}
|
||||
]
|
||||
tbl = db.create_table("test", schema=Schema, mode="overwrite")
|
||||
tbl.add(data)
|
||||
reranker = CrossEncoderReranker()
|
||||
|
||||
# Run vector search with a reranker
|
||||
result = tbl.search("hello").rerank(reranker=reranker).to_list()
|
||||
|
||||
# Run FTS search with a reranker
|
||||
result = tbl.search("hello", query_type="fts").rerank(reranker=reranker).to_list()
|
||||
|
||||
# Run hybrid search with a reranker
|
||||
tbl.create_fts_index("text", replace=True)
|
||||
result = tbl.search("hello", query_type="hybrid").rerank(reranker=reranker).to_list()
|
||||
|
||||
```
|
||||
|
||||
Accepted Arguments
|
||||
----------------
|
||||
| Argument | Type | Default | Description |
|
||||
| --- | --- | --- | --- |
|
||||
| `model_name` | `str` | `""cross-encoder/ms-marco-TinyBERT-L-6"` | The name of the reranker model to use.|
|
||||
| `column` | `str` | `"text"` | The name of the column to use as input to the cross encoder model. |
|
||||
| `device` | `str` | `None` | The device to use for the cross encoder model. If None, will use "cuda" if available, otherwise "cpu". |
|
||||
| `return_score` | str | `"relevance"` | Options are "relevance" or "all". The type of score to return. If "relevance", will return only the `_relevance_score. If "all" is supported, will return relevance score along with the vector and/or fts scores depending on query type |
|
||||
|
||||
## Supported Scores for each query type
|
||||
You can specify the type of scores you want the reranker to return. The following are the supported scores for each query type:
|
||||
|
||||
### Hybrid Search
|
||||
|`return_score`| Status | Description |
|
||||
| --- | --- | --- |
|
||||
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
|
||||
| `all` | ❌ Not Supported | Returns have vector(`_distance`) and FTS(`score`) along with Hybrid Search score(`_relevance_score`) |
|
||||
|
||||
### Vector Search
|
||||
|`return_score`| Status | Description |
|
||||
| --- | --- | --- |
|
||||
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
|
||||
| `all` | ✅ Supported | Returns have vector(`_distance`) along with Hybrid Search score(`_relevance_score`) |
|
||||
|
||||
### FTS Search
|
||||
|`return_score`| Status | Description |
|
||||
| --- | --- | --- |
|
||||
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
|
||||
| `all` | ✅ Supported | Returns have FTS(`score`) along with Hybrid Search score(`_relevance_score`) |
|
||||
88
docs/src/reranking/custom_reranker.md
Normal file
88
docs/src/reranking/custom_reranker.md
Normal file
@@ -0,0 +1,88 @@
|
||||
## Building Custom Rerankers
|
||||
You can build your own custom reranker by subclassing the `Reranker` class and implementing the `rerank_hybrid()` method. Optionally, you can also implement the `rerank_vector()` and `rerank_fts()` methods if you want to support reranking for vector and FTS search separately.
|
||||
Here's an example of a custom reranker that combines the results of semantic and full-text search using a linear combination of the scores.
|
||||
|
||||
The `Reranker` base interface comes with a `merge_results()` method that can be used to combine the results of semantic and full-text search. This is a vanilla merging algorithm that simply concatenates the results and removes the duplicates without taking the scores into consideration. It only keeps the first copy of the row encountered. This works well in cases that don't require the scores of semantic and full-text search to combine the results. If you want to use the scores or want to support `return_score="all"`, you'll need to implement your own merging algorithm.
|
||||
|
||||
```python
|
||||
|
||||
from lancedb.rerankers import Reranker
|
||||
import pyarrow as pa
|
||||
|
||||
class MyReranker(Reranker):
|
||||
def __init__(self, param1, param2, ..., return_score="relevance"):
|
||||
super().__init__(return_score)
|
||||
self.param1 = param1
|
||||
self.param2 = param2
|
||||
|
||||
def rerank_hybrid(self, query: str, vector_results: pa.Table, fts_results: pa.Table):
|
||||
# Use the built-in merging function
|
||||
combined_result = self.merge_results(vector_results, fts_results)
|
||||
|
||||
# Do something with the combined results
|
||||
# ...
|
||||
|
||||
# Return the combined results
|
||||
return combined_result
|
||||
|
||||
def rerank_vector(self, query: str, vector_results: pa.Table):
|
||||
# Do something with the vector results
|
||||
# ...
|
||||
|
||||
# Return the vector results
|
||||
return vector_results
|
||||
|
||||
def rerank_fts(self, query: str, fts_results: pa.Table):
|
||||
# Do something with the FTS results
|
||||
# ...
|
||||
|
||||
# Return the FTS results
|
||||
return fts_results
|
||||
|
||||
```
|
||||
|
||||
### Example of a Custom Reranker
|
||||
For the sake of simplicity let's build custom reranker that just enchances the Cohere Reranker by accepting a filter query, and accept other CohereReranker params as kwags.
|
||||
|
||||
```python
|
||||
|
||||
from typing import List, Union
|
||||
import pandas as pd
|
||||
from lancedb.rerankers import CohereReranker
|
||||
|
||||
class ModifiedCohereReranker(CohereReranker):
|
||||
def __init__(self, filters: Union[str, List[str]], **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
filters = filters if isinstance(filters, list) else [filters]
|
||||
self.filters = filters
|
||||
|
||||
def rerank_hybrid(self, query: str, vector_results: pa.Table, fts_results: pa.Table)-> pa.Table:
|
||||
combined_result = super().rerank_hybrid(query, vector_results, fts_results)
|
||||
df = combined_result.to_pandas()
|
||||
for filter in self.filters:
|
||||
df = df.query("not text.str.contains(@filter)")
|
||||
|
||||
return pa.Table.from_pandas(df)
|
||||
|
||||
def rerank_vector(self, query: str, vector_results: pa.Table)-> pa.Table:
|
||||
vector_results = super().rerank_vector(query, vector_results)
|
||||
df = vector_results.to_pandas()
|
||||
for filter in self.filters:
|
||||
df = df.query("not text.str.contains(@filter)")
|
||||
|
||||
return pa.Table.from_pandas(df)
|
||||
|
||||
def rerank_fts(self, query: str, fts_results: pa.Table)-> pa.Table:
|
||||
fts_results = super().rerank_fts(query, fts_results)
|
||||
df = fts_results.to_pandas()
|
||||
for filter in self.filters:
|
||||
df = df.query("not text.str.contains(@filter)")
|
||||
|
||||
return pa.Table.from_pandas(df)
|
||||
|
||||
```
|
||||
|
||||
!!! tip
|
||||
The `vector_results` and `fts_results` are pyarrow tables. Lean more about pyarrow tables [here](https://arrow.apache.org/docs/python). It can be convered to other data types like pandas dataframe, pydict, pylist etc.
|
||||
|
||||
For example, You can convert them to pandas dataframes using `to_pandas()` method and perform any operations you want. After you are done, you can convert the dataframe back to pyarrow table using `pa.Table.from_pandas()` method and return it.
|
||||
60
docs/src/reranking/index.md
Normal file
60
docs/src/reranking/index.md
Normal file
@@ -0,0 +1,60 @@
|
||||
Reranking is the process of reordering a list of items based on some criteria. In the context of search, reranking is used to reorder the search results returned by a search engine based on some criteria. This can be useful when the initial ranking of the search results is not satisfactory or when the user has provided additional information that can be used to improve the ranking of the search results.
|
||||
|
||||
LanceDB comes with some built-in rerankers. Some of the rerankers that are available in LanceDB are:
|
||||
|
||||
| Reranker | Description | Supported Query Types |
|
||||
| --- | --- | --- |
|
||||
| `LinearCombinationReranker` | Reranks search results based on a linear combination of FTS and vector search scores | Hybrid |
|
||||
| `CohereReranker` | Uses cohere Reranker API to rerank results | Vector, FTS, Hybrid |
|
||||
| `CrossEncoderReranker` | Uses a cross-encoder model to rerank search results | Vector, FTS, Hybrid |
|
||||
| `ColbertReranker` | Uses a colbert model to rerank search results | Vector, FTS, Hybrid |
|
||||
| `OpenaiReranker`(Experimental) | Uses OpenAI's chat model to rerank search results | Vector, FTS, Hybrid |
|
||||
|
||||
|
||||
## Using a Reranker
|
||||
Using rerankers is optional for vector and FTS. However, for hybrid search, rerankers are required. To use a reranker, you need to create an instance of the reranker and pass it to the `rerank` method of the query builder.
|
||||
|
||||
```python
|
||||
import numpy
|
||||
import lancedb
|
||||
from lancedb.embeddings import get_registry
|
||||
from lancedb.pydantic import LanceModel, Vector
|
||||
from lancedb.rerankers import CohereReranker
|
||||
|
||||
embedder = get_registry().get("sentence-transformers").create()
|
||||
db = lancedb.connect("~/.lancedb")
|
||||
|
||||
class Schema(LanceModel):
|
||||
text: str = embedder.SourceField()
|
||||
vector: Vector(embedder.ndims()) = embedder.VectorField()
|
||||
|
||||
data = [
|
||||
{"text": "hello world"},
|
||||
{"text": "goodbye world"}
|
||||
]
|
||||
tbl = db.create_table("test", data)
|
||||
reranker = CohereReranker(api_key="your_api_key")
|
||||
|
||||
# Run vector search with a reranker
|
||||
result = tbl.query("hello").rerank(reranker).to_list()
|
||||
|
||||
# Run FTS search with a reranker
|
||||
result = tbl.query("hello", query_type="fts").rerank(reranker).to_list()
|
||||
|
||||
# Run hybrid search with a reranker
|
||||
tbl.create_fts_index("text")
|
||||
result = tbl.query("hello", query_type="hybrid").rerank(reranker).to_list()
|
||||
```
|
||||
|
||||
## Available Rerankers
|
||||
LanceDB comes with some built-in rerankers. Here are some of the rerankers that are available in LanceDB:
|
||||
|
||||
- [Cohere Reranker](./cohere.md)
|
||||
- [Cross Encoder Reranker](./cross_encoder.md)
|
||||
- [ColBERT Reranker](./colbert.md)
|
||||
- [OpenAI Reranker](./openai.md)
|
||||
- [Linear Combination Reranker](./linear_combination.md)
|
||||
|
||||
## Creating Custom Rerankers
|
||||
|
||||
LanceDB also you to create custom rerankers by extending the base `Reranker` class. The custom reranker should implement the `rerank` method that takes a list of search results and returns a reranked list of search results. This is covered in more detail in the [Creating Custom Rerankers](./custom_reranker.md) section.
|
||||
52
docs/src/reranking/linear_combination.md
Normal file
52
docs/src/reranking/linear_combination.md
Normal file
@@ -0,0 +1,52 @@
|
||||
# Linear Combination Reranker
|
||||
|
||||
This is the default re-ranker used by LanceDB hybrid search. It combines the results of semantic and full-text search using a linear combination of the scores. The weights for the linear combination can be specified. It defaults to 0.7, i.e, 70% weight for semantic search and 30% weight for full-text search.
|
||||
|
||||
!!! note
|
||||
Supported Query Types: Hybrid
|
||||
|
||||
|
||||
```python
|
||||
import numpy
|
||||
import lancedb
|
||||
from lancedb.embeddings import get_registry
|
||||
from lancedb.pydantic import LanceModel, Vector
|
||||
from lancedb.rerankers import LinearCombinationReranker
|
||||
|
||||
embedder = get_registry().get("sentence-transformers").create()
|
||||
db = lancedb.connect("~/.lancedb")
|
||||
|
||||
class Schema(LanceModel):
|
||||
text: str = embedder.SourceField()
|
||||
vector: Vector(embedder.ndims()) = embedder.VectorField()
|
||||
|
||||
data = [
|
||||
{"text": "hello world"},
|
||||
{"text": "goodbye world"}
|
||||
]
|
||||
tbl = db.create_table("test", schema=Schema, mode="overwrite")
|
||||
tbl.add(data)
|
||||
reranker = LinearCombinationReranker()
|
||||
|
||||
# Run hybrid search with a reranker
|
||||
tbl.create_fts_index("text", replace=True)
|
||||
result = tbl.search("hello", query_type="hybrid").rerank(reranker=reranker).to_list()
|
||||
|
||||
```
|
||||
|
||||
Accepted Arguments
|
||||
----------------
|
||||
| Argument | Type | Default | Description |
|
||||
| --- | --- | --- | --- |
|
||||
| `weight` | `float` | `0.7` | The weight to use for the semantic search score. The weight for the full-text search score is `1 - weights`. |
|
||||
| `return_score` | str | `"relevance"` | Options are "relevance" or "all". The type of score to return. If "relevance", will return only the `_relevance_score. If "all", will return all scores from the vector and FTS search along with the relevance score. |
|
||||
|
||||
|
||||
## Supported Scores for each query type
|
||||
You can specify the type of scores you want the reranker to return. The following are the supported scores for each query type:
|
||||
|
||||
### Hybrid Search
|
||||
|`return_score`| Status | Description |
|
||||
| --- | --- | --- |
|
||||
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
|
||||
| `all` | ✅ Supported | Returns have vector(`_distance`) and FTS(`score`) along with Hybrid Search score(`_distance`) |
|
||||
73
docs/src/reranking/openai.md
Normal file
73
docs/src/reranking/openai.md
Normal file
@@ -0,0 +1,73 @@
|
||||
# OpenAI Reranker (Experimental)
|
||||
|
||||
This re-ranker uses OpenAI chat model to rerank the search results. You can use this re-ranker by passing `OpenAI()` to the `rerank()` method.
|
||||
!!! note
|
||||
Supported Query Types: Hybrid, Vector, FTS
|
||||
|
||||
!!! warning
|
||||
This re-ranker is experimental. OpenAI doesn't have a dedicated reranking model, so we are using the chat model for reranking.
|
||||
|
||||
```python
|
||||
import numpy
|
||||
import lancedb
|
||||
from lancedb.embeddings import get_registry
|
||||
from lancedb.pydantic import LanceModel, Vector
|
||||
from lancedb.rerankers import OpenaiReranker
|
||||
|
||||
embedder = get_registry().get("sentence-transformers").create()
|
||||
db = lancedb.connect("~/.lancedb")
|
||||
|
||||
class Schema(LanceModel):
|
||||
text: str = embedder.SourceField()
|
||||
vector: Vector(embedder.ndims()) = embedder.VectorField()
|
||||
|
||||
data = [
|
||||
{"text": "hello world"},
|
||||
{"text": "goodbye world"}
|
||||
]
|
||||
tbl = db.create_table("test", schema=Schema, mode="overwrite")
|
||||
tbl.add(data)
|
||||
reranker = OpenaiReranker()
|
||||
|
||||
# Run vector search with a reranker
|
||||
result = tbl.search("hello").rerank(reranker=reranker).to_list()
|
||||
|
||||
# Run FTS search with a reranker
|
||||
result = tbl.search("hello", query_type="fts").rerank(reranker=reranker).to_list()
|
||||
|
||||
# Run hybrid search with a reranker
|
||||
tbl.create_fts_index("text", replace=True)
|
||||
result = tbl.search("hello", query_type="hybrid").rerank(reranker=reranker).to_list()
|
||||
|
||||
```
|
||||
|
||||
Accepted Arguments
|
||||
----------------
|
||||
| Argument | Type | Default | Description |
|
||||
| --- | --- | --- | --- |
|
||||
| `model_name` | `str` | `"gpt-4-turbo-preview"` | The name of the reranker model to use.|
|
||||
| `column` | `str` | `"text"` | The name of the column to use as input to the cross encoder model. |
|
||||
| `return_score` | str | `"relevance"` | Options are "relevance" or "all". The type of score to return. If "relevance", will return only the `_relevance_score. If "all" is supported, will return relevance score along with the vector and/or fts scores depending on query type |
|
||||
| `api_key` | str | `None` | The API key to use. If None, will use the OPENAI_API_KEY environment variable.
|
||||
|
||||
|
||||
## Supported Scores for each query type
|
||||
You can specify the type of scores you want the reranker to return. The following are the supported scores for each query type:
|
||||
|
||||
### Hybrid Search
|
||||
|`return_score`| Status | Description |
|
||||
| --- | --- | --- |
|
||||
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
|
||||
| `all` | ❌ Not Supported | Returns have vector(`_distance`) and FTS(`score`) along with Hybrid Search score(`_relevance_score`) |
|
||||
|
||||
### Vector Search
|
||||
|`return_score`| Status | Description |
|
||||
| --- | --- | --- |
|
||||
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
|
||||
| `all` | ✅ Supported | Returns have vector(`_distance`) along with Hybrid Search score(`_relevance_score`) |
|
||||
|
||||
### FTS Search
|
||||
|`return_score`| Status | Description |
|
||||
| --- | --- | --- |
|
||||
| `relevance` | ✅ Supported | Returns only have the `_relevance_score` column |
|
||||
| `all` | ✅ Supported | Returns have FTS(`score`) along with Hybrid Search score(`_relevance_score`) |
|
||||
@@ -8,6 +8,7 @@ excluded_globs = [
|
||||
"../src/embedding.md",
|
||||
"../src/examples/*.md",
|
||||
"../src/integrations/voxel51.md",
|
||||
"../src/integrations/langchain.md",
|
||||
"../src/guides/tables.md",
|
||||
"../src/python/duckdb.md",
|
||||
"../src/embeddings/*.md",
|
||||
@@ -15,6 +16,7 @@ excluded_globs = [
|
||||
"../src/ann_indexes.md",
|
||||
"../src/basic.md",
|
||||
"../src/hybrid_search/hybrid_search.md",
|
||||
"../src/reranking/*.md",
|
||||
]
|
||||
|
||||
python_prefix = "py"
|
||||
|
||||
44
node/package-lock.json
generated
44
node/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "vectordb",
|
||||
"version": "0.4.16",
|
||||
"version": "0.4.20",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "vectordb",
|
||||
"version": "0.4.16",
|
||||
"version": "0.4.20",
|
||||
"cpu": [
|
||||
"x64",
|
||||
"arm64"
|
||||
@@ -52,11 +52,11 @@
|
||||
"uuid": "^9.0.0"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"@lancedb/vectordb-darwin-arm64": "0.4.16",
|
||||
"@lancedb/vectordb-darwin-x64": "0.4.16",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.4.16",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.4.16",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.4.16"
|
||||
"@lancedb/vectordb-darwin-arm64": "0.4.20",
|
||||
"@lancedb/vectordb-darwin-x64": "0.4.20",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.4.20",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.4.20",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.4.20"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@apache-arrow/ts": "^14.0.2",
|
||||
@@ -334,9 +334,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-darwin-arm64": {
|
||||
"version": "0.4.16",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-arm64/-/vectordb-darwin-arm64-0.4.16.tgz",
|
||||
"integrity": "sha512-RtuizzrZIVDYQ4ZZIMQRHGuV0DvOV93lyvivJJBLP1zCORMHEtEduaVbFE/+H0OCo0oqPsKXEpbc0nUEXKQqRg==",
|
||||
"version": "0.4.20",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-arm64/-/vectordb-darwin-arm64-0.4.20.tgz",
|
||||
"integrity": "sha512-ffP2K4sA5mQTgePyARw1y8dPN996FmpvyAYoWO+TSItaXlhcXvc+KVa5udNMCZMDYeEnEv2Xpj6k4PwW3oBz+A==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
@@ -346,9 +346,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-darwin-x64": {
|
||||
"version": "0.4.16",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-x64/-/vectordb-darwin-x64-0.4.16.tgz",
|
||||
"integrity": "sha512-bxuh0scgVzAryZScRiTS3Z6hMZA3ekBTataDrlEJ/ddPTcAm14oQb8qrQu3mjWsYPMxlFHpSLMAJSU9SkWZbgg==",
|
||||
"version": "0.4.20",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-darwin-x64/-/vectordb-darwin-x64-0.4.20.tgz",
|
||||
"integrity": "sha512-GSYsXE20RIehDu30FjREhJdEzhnwOTV7ZsrSXagStzLY1gr7pyd7sfqxmmUtdD09di7LnQoiM71AOpPTa01YwQ==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
@@ -358,9 +358,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-linux-arm64-gnu": {
|
||||
"version": "0.4.16",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-arm64-gnu/-/vectordb-linux-arm64-gnu-0.4.16.tgz",
|
||||
"integrity": "sha512-howoWlHsOwDHm3jl1BC1a1NZ/MJR4J98jSLLfzmmQu071fj5IrZmKv1RyCYNWLm3KHxl+5XVkWxJOghb9x0ByQ==",
|
||||
"version": "0.4.20",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-arm64-gnu/-/vectordb-linux-arm64-gnu-0.4.20.tgz",
|
||||
"integrity": "sha512-FpNOjOsz3nJVm6EBGyNgbOW2aFhsWZ/igeY45Z8hbZaaK2YBwrg/DASoNlUzgv6IR8cUaGJ2irNVJfsKR2cG6g==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
@@ -370,9 +370,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-linux-x64-gnu": {
|
||||
"version": "0.4.16",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-x64-gnu/-/vectordb-linux-x64-gnu-0.4.16.tgz",
|
||||
"integrity": "sha512-oKx97pP8fnh+pm1mSVZ2+1VPqgT073iHT5nt+3wg7HP8A9XMGlCpdDHM/vC2NNCjbb9j64I5Tq/2x7s33bUfaw==",
|
||||
"version": "0.4.20",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-linux-x64-gnu/-/vectordb-linux-x64-gnu-0.4.20.tgz",
|
||||
"integrity": "sha512-pOqWjrRZQSrLTlQPkjidRii7NZDw8Xu9pN6ouVu2JAK8n81FXaPtFCyAI+Y3v9GpnYDN0rvD4eQ36aHAVPsa2g==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
@@ -382,9 +382,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@lancedb/vectordb-win32-x64-msvc": {
|
||||
"version": "0.4.16",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-win32-x64-msvc/-/vectordb-win32-x64-msvc-0.4.16.tgz",
|
||||
"integrity": "sha512-klG2HHeQ/CuLVFF3ZKJ61BIurEjqtTBxFh0CXL5aCG+pbA55IfzDDyhpGk2yCldZcF/XuNIufyRAqhJPlQzuVg==",
|
||||
"version": "0.4.20",
|
||||
"resolved": "https://registry.npmjs.org/@lancedb/vectordb-win32-x64-msvc/-/vectordb-win32-x64-msvc-0.4.20.tgz",
|
||||
"integrity": "sha512-5J5SsYSJ7jRCmU/sgwVHdrGz43B/7R2T9OEoFTKyVAtqTZdu75rkytXyn9SyEayXVhlUOaw76N0ASm0hAoDS/A==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "vectordb",
|
||||
"version": "0.4.17",
|
||||
"version": "0.4.20",
|
||||
"description": " Serverless, low-latency vector database for AI applications",
|
||||
"main": "dist/index.js",
|
||||
"types": "dist/index.d.ts",
|
||||
@@ -88,10 +88,10 @@
|
||||
}
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"@lancedb/vectordb-darwin-arm64": "0.4.17",
|
||||
"@lancedb/vectordb-darwin-x64": "0.4.17",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.4.17",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.4.17",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.4.17"
|
||||
"@lancedb/vectordb-darwin-arm64": "0.4.20",
|
||||
"@lancedb/vectordb-darwin-x64": "0.4.20",
|
||||
"@lancedb/vectordb-linux-arm64-gnu": "0.4.20",
|
||||
"@lancedb/vectordb-linux-x64-gnu": "0.4.20",
|
||||
"@lancedb/vectordb-win32-x64-msvc": "0.4.20"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,23 +27,23 @@ import {
|
||||
RecordBatch,
|
||||
makeData,
|
||||
Struct,
|
||||
Float,
|
||||
type Float,
|
||||
DataType,
|
||||
Binary,
|
||||
Float32
|
||||
} from 'apache-arrow'
|
||||
import { type EmbeddingFunction } from './index'
|
||||
import { sanitizeSchema } from './sanitize'
|
||||
} from "apache-arrow";
|
||||
import { type EmbeddingFunction } from "./index";
|
||||
import { sanitizeSchema } from "./sanitize";
|
||||
|
||||
/*
|
||||
* Options to control how a column should be converted to a vector array
|
||||
*/
|
||||
export class VectorColumnOptions {
|
||||
/** Vector column type. */
|
||||
type: Float = new Float32()
|
||||
type: Float = new Float32();
|
||||
|
||||
constructor (values?: Partial<VectorColumnOptions>) {
|
||||
Object.assign(this, values)
|
||||
constructor(values?: Partial<VectorColumnOptions>) {
|
||||
Object.assign(this, values);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -60,7 +60,7 @@ export class MakeArrowTableOptions {
|
||||
* The schema must be specified if there are no records (e.g. to make
|
||||
* an empty table)
|
||||
*/
|
||||
schema?: Schema
|
||||
schema?: Schema;
|
||||
|
||||
/*
|
||||
* Mapping from vector column name to expected type
|
||||
@@ -80,7 +80,9 @@ export class MakeArrowTableOptions {
|
||||
*/
|
||||
vectorColumns: Record<string, VectorColumnOptions> = {
|
||||
vector: new VectorColumnOptions()
|
||||
}
|
||||
};
|
||||
|
||||
embeddings?: EmbeddingFunction<any>;
|
||||
|
||||
/**
|
||||
* If true then string columns will be encoded with dictionary encoding
|
||||
@@ -91,10 +93,10 @@ export class MakeArrowTableOptions {
|
||||
*
|
||||
* If `schema` is provided then this property is ignored.
|
||||
*/
|
||||
dictionaryEncodeStrings: boolean = false
|
||||
dictionaryEncodeStrings: boolean = false;
|
||||
|
||||
constructor (values?: Partial<MakeArrowTableOptions>) {
|
||||
Object.assign(this, values)
|
||||
constructor(values?: Partial<MakeArrowTableOptions>) {
|
||||
Object.assign(this, values);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -193,59 +195,68 @@ export class MakeArrowTableOptions {
|
||||
* assert.deepEqual(table.schema, schema)
|
||||
* ```
|
||||
*/
|
||||
export function makeArrowTable (
|
||||
export function makeArrowTable(
|
||||
data: Array<Record<string, any>>,
|
||||
options?: Partial<MakeArrowTableOptions>
|
||||
): ArrowTable {
|
||||
if (data.length === 0 && (options?.schema === undefined || options?.schema === null)) {
|
||||
throw new Error('At least one record or a schema needs to be provided')
|
||||
if (
|
||||
data.length === 0 &&
|
||||
(options?.schema === undefined || options?.schema === null)
|
||||
) {
|
||||
throw new Error("At least one record or a schema needs to be provided");
|
||||
}
|
||||
|
||||
const opt = new MakeArrowTableOptions(options !== undefined ? options : {})
|
||||
const opt = new MakeArrowTableOptions(options !== undefined ? options : {});
|
||||
if (opt.schema !== undefined && opt.schema !== null) {
|
||||
opt.schema = sanitizeSchema(opt.schema)
|
||||
opt.schema = sanitizeSchema(opt.schema);
|
||||
opt.schema = validateSchemaEmbeddings(opt.schema, data, opt.embeddings);
|
||||
}
|
||||
const columns: Record<string, Vector> = {}
|
||||
|
||||
const columns: Record<string, Vector> = {};
|
||||
// TODO: sample dataset to find missing columns
|
||||
// Prefer the field ordering of the schema, if present
|
||||
const columnNames = ((opt.schema) != null) ? (opt.schema.names as string[]) : Object.keys(data[0])
|
||||
const columnNames =
|
||||
opt.schema != null ? (opt.schema.names as string[]) : Object.keys(data[0]);
|
||||
for (const colName of columnNames) {
|
||||
if (data.length !== 0 && !Object.prototype.hasOwnProperty.call(data[0], colName)) {
|
||||
if (
|
||||
data.length !== 0 &&
|
||||
!Object.prototype.hasOwnProperty.call(data[0], colName)
|
||||
) {
|
||||
// The field is present in the schema, but not in the data, skip it
|
||||
continue
|
||||
continue;
|
||||
}
|
||||
// Extract a single column from the records (transpose from row-major to col-major)
|
||||
let values = data.map((datum) => datum[colName])
|
||||
let values = data.map((datum) => datum[colName]);
|
||||
|
||||
// By default (type === undefined) arrow will infer the type from the JS type
|
||||
let type
|
||||
let type;
|
||||
if (opt.schema !== undefined) {
|
||||
// If there is a schema provided, then use that for the type instead
|
||||
type = opt.schema?.fields.filter((f) => f.name === colName)[0]?.type
|
||||
type = opt.schema?.fields.filter((f) => f.name === colName)[0]?.type;
|
||||
if (DataType.isInt(type) && type.bitWidth === 64) {
|
||||
// wrap in BigInt to avoid bug: https://github.com/apache/arrow/issues/40051
|
||||
values = values.map((v) => {
|
||||
if (v === null) {
|
||||
return v
|
||||
return v;
|
||||
}
|
||||
return BigInt(v)
|
||||
})
|
||||
return BigInt(v);
|
||||
});
|
||||
}
|
||||
} else {
|
||||
// Otherwise, check to see if this column is one of the vector columns
|
||||
// defined by opt.vectorColumns and, if so, use the fixed size list type
|
||||
const vectorColumnOptions = opt.vectorColumns[colName]
|
||||
const vectorColumnOptions = opt.vectorColumns[colName];
|
||||
if (vectorColumnOptions !== undefined) {
|
||||
type = newVectorType(values[0].length, vectorColumnOptions.type)
|
||||
type = newVectorType(values[0].length, vectorColumnOptions.type);
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
// Convert an Array of JS values to an arrow vector
|
||||
columns[colName] = makeVector(values, type, opt.dictionaryEncodeStrings)
|
||||
columns[colName] = makeVector(values, type, opt.dictionaryEncodeStrings);
|
||||
} catch (error: unknown) {
|
||||
// eslint-disable-next-line @typescript-eslint/restrict-template-expressions
|
||||
throw Error(`Could not convert column "${colName}" to Arrow: ${error}`)
|
||||
throw Error(`Could not convert column "${colName}" to Arrow: ${error}`);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -260,97 +271,116 @@ export function makeArrowTable (
|
||||
// To work around this we first create a table with the wrong schema and
|
||||
// then patch the schema of the batches so we can use
|
||||
// `new ArrowTable(schema, batches)` which does not do any schema inference
|
||||
const firstTable = new ArrowTable(columns)
|
||||
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
|
||||
const batchesFixed = firstTable.batches.map(batch => new RecordBatch(opt.schema!, batch.data))
|
||||
return new ArrowTable(opt.schema, batchesFixed)
|
||||
const firstTable = new ArrowTable(columns);
|
||||
const batchesFixed = firstTable.batches.map(
|
||||
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
|
||||
(batch) => new RecordBatch(opt.schema!, batch.data)
|
||||
);
|
||||
return new ArrowTable(opt.schema, batchesFixed);
|
||||
} else {
|
||||
return new ArrowTable(columns)
|
||||
return new ArrowTable(columns);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create an empty Arrow table with the provided schema
|
||||
*/
|
||||
export function makeEmptyTable (schema: Schema): ArrowTable {
|
||||
return makeArrowTable([], { schema })
|
||||
export function makeEmptyTable(schema: Schema): ArrowTable {
|
||||
return makeArrowTable([], { schema });
|
||||
}
|
||||
|
||||
// Helper function to convert Array<Array<any>> to a variable sized list array
|
||||
function makeListVector (lists: any[][]): Vector<any> {
|
||||
function makeListVector(lists: any[][]): Vector<any> {
|
||||
if (lists.length === 0 || lists[0].length === 0) {
|
||||
throw Error('Cannot infer list vector from empty array or empty list')
|
||||
throw Error("Cannot infer list vector from empty array or empty list");
|
||||
}
|
||||
const sampleList = lists[0]
|
||||
let inferredType
|
||||
const sampleList = lists[0];
|
||||
let inferredType;
|
||||
try {
|
||||
const sampleVector = makeVector(sampleList)
|
||||
inferredType = sampleVector.type
|
||||
const sampleVector = makeVector(sampleList);
|
||||
inferredType = sampleVector.type;
|
||||
} catch (error: unknown) {
|
||||
// eslint-disable-next-line @typescript-eslint/restrict-template-expressions
|
||||
throw Error(`Cannot infer list vector. Cannot infer inner type: ${error}`)
|
||||
throw Error(`Cannot infer list vector. Cannot infer inner type: ${error}`);
|
||||
}
|
||||
|
||||
const listBuilder = makeBuilder({
|
||||
type: new List(new Field('item', inferredType, true))
|
||||
})
|
||||
type: new List(new Field("item", inferredType, true))
|
||||
});
|
||||
for (const list of lists) {
|
||||
listBuilder.append(list)
|
||||
listBuilder.append(list);
|
||||
}
|
||||
return listBuilder.finish().toVector()
|
||||
return listBuilder.finish().toVector();
|
||||
}
|
||||
|
||||
// Helper function to convert an Array of JS values to an Arrow Vector
|
||||
function makeVector (values: any[], type?: DataType, stringAsDictionary?: boolean): Vector<any> {
|
||||
function makeVector(
|
||||
values: any[],
|
||||
type?: DataType,
|
||||
stringAsDictionary?: boolean
|
||||
): Vector<any> {
|
||||
if (type !== undefined) {
|
||||
// No need for inference, let Arrow create it
|
||||
return vectorFromArray(values, type)
|
||||
return vectorFromArray(values, type);
|
||||
}
|
||||
if (values.length === 0) {
|
||||
throw Error('makeVector requires at least one value or the type must be specfied')
|
||||
throw Error(
|
||||
"makeVector requires at least one value or the type must be specfied"
|
||||
);
|
||||
}
|
||||
const sampleValue = values.find(val => val !== null && val !== undefined)
|
||||
const sampleValue = values.find((val) => val !== null && val !== undefined);
|
||||
if (sampleValue === undefined) {
|
||||
throw Error('makeVector cannot infer the type if all values are null or undefined')
|
||||
throw Error(
|
||||
"makeVector cannot infer the type if all values are null or undefined"
|
||||
);
|
||||
}
|
||||
if (Array.isArray(sampleValue)) {
|
||||
// Default Arrow inference doesn't handle list types
|
||||
return makeListVector(values)
|
||||
return makeListVector(values);
|
||||
} else if (Buffer.isBuffer(sampleValue)) {
|
||||
// Default Arrow inference doesn't handle Buffer
|
||||
return vectorFromArray(values, new Binary())
|
||||
} else if (!(stringAsDictionary ?? false) && (typeof sampleValue === 'string' || sampleValue instanceof String)) {
|
||||
return vectorFromArray(values, new Binary());
|
||||
} else if (
|
||||
!(stringAsDictionary ?? false) &&
|
||||
(typeof sampleValue === "string" || sampleValue instanceof String)
|
||||
) {
|
||||
// If the type is string then don't use Arrow's default inference unless dictionaries are requested
|
||||
// because it will always use dictionary encoding for strings
|
||||
return vectorFromArray(values, new Utf8())
|
||||
return vectorFromArray(values, new Utf8());
|
||||
} else {
|
||||
// Convert a JS array of values to an arrow vector
|
||||
return vectorFromArray(values)
|
||||
return vectorFromArray(values);
|
||||
}
|
||||
}
|
||||
|
||||
async function applyEmbeddings<T> (table: ArrowTable, embeddings?: EmbeddingFunction<T>, schema?: Schema): Promise<ArrowTable> {
|
||||
async function applyEmbeddings<T>(
|
||||
table: ArrowTable,
|
||||
embeddings?: EmbeddingFunction<T>,
|
||||
schema?: Schema
|
||||
): Promise<ArrowTable> {
|
||||
if (embeddings == null) {
|
||||
return table
|
||||
return table;
|
||||
}
|
||||
if (schema !== undefined && schema !== null) {
|
||||
schema = sanitizeSchema(schema)
|
||||
schema = sanitizeSchema(schema);
|
||||
}
|
||||
|
||||
// Convert from ArrowTable to Record<String, Vector>
|
||||
const colEntries = [...Array(table.numCols).keys()].map((_, idx) => {
|
||||
const name = table.schema.fields[idx].name
|
||||
const name = table.schema.fields[idx].name;
|
||||
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
|
||||
const vec = table.getChildAt(idx)!
|
||||
return [name, vec]
|
||||
})
|
||||
const newColumns = Object.fromEntries(colEntries)
|
||||
const vec = table.getChildAt(idx)!;
|
||||
return [name, vec];
|
||||
});
|
||||
const newColumns = Object.fromEntries(colEntries);
|
||||
|
||||
const sourceColumn = newColumns[embeddings.sourceColumn]
|
||||
const destColumn = embeddings.destColumn ?? 'vector'
|
||||
const innerDestType = embeddings.embeddingDataType ?? new Float32()
|
||||
const sourceColumn = newColumns[embeddings.sourceColumn];
|
||||
const destColumn = embeddings.destColumn ?? "vector";
|
||||
const innerDestType = embeddings.embeddingDataType ?? new Float32();
|
||||
if (sourceColumn === undefined) {
|
||||
throw new Error(`Cannot apply embedding function because the source column '${embeddings.sourceColumn}' was not present in the data`)
|
||||
throw new Error(
|
||||
`Cannot apply embedding function because the source column '${embeddings.sourceColumn}' was not present in the data`
|
||||
);
|
||||
}
|
||||
|
||||
if (table.numRows === 0) {
|
||||
@@ -358,45 +388,60 @@ async function applyEmbeddings<T> (table: ArrowTable, embeddings?: EmbeddingFunc
|
||||
// We have an empty table and it already has the embedding column so no work needs to be done
|
||||
// Note: we don't return an error like we did below because this is a common occurrence. For example,
|
||||
// if we call convertToTable with 0 records and a schema that includes the embedding
|
||||
return table
|
||||
return table;
|
||||
}
|
||||
if (embeddings.embeddingDimension !== undefined) {
|
||||
const destType = newVectorType(embeddings.embeddingDimension, innerDestType)
|
||||
newColumns[destColumn] = makeVector([], destType)
|
||||
const destType = newVectorType(
|
||||
embeddings.embeddingDimension,
|
||||
innerDestType
|
||||
);
|
||||
newColumns[destColumn] = makeVector([], destType);
|
||||
} else if (schema != null) {
|
||||
const destField = schema.fields.find(f => f.name === destColumn)
|
||||
const destField = schema.fields.find((f) => f.name === destColumn);
|
||||
if (destField != null) {
|
||||
newColumns[destColumn] = makeVector([], destField.type)
|
||||
newColumns[destColumn] = makeVector([], destField.type);
|
||||
} else {
|
||||
throw new Error(`Attempt to apply embeddings to an empty table failed because schema was missing embedding column '${destColumn}'`)
|
||||
throw new Error(
|
||||
`Attempt to apply embeddings to an empty table failed because schema was missing embedding column '${destColumn}'`
|
||||
);
|
||||
}
|
||||
} else {
|
||||
throw new Error('Attempt to apply embeddings to an empty table when the embeddings function does not specify `embeddingDimension`')
|
||||
throw new Error(
|
||||
"Attempt to apply embeddings to an empty table when the embeddings function does not specify `embeddingDimension`"
|
||||
);
|
||||
}
|
||||
} else {
|
||||
if (Object.prototype.hasOwnProperty.call(newColumns, destColumn)) {
|
||||
throw new Error(`Attempt to apply embeddings to table failed because column ${destColumn} already existed`)
|
||||
throw new Error(
|
||||
`Attempt to apply embeddings to table failed because column ${destColumn} already existed`
|
||||
);
|
||||
}
|
||||
if (table.batches.length > 1) {
|
||||
throw new Error('Internal error: `makeArrowTable` unexpectedly created a table with more than one batch')
|
||||
throw new Error(
|
||||
"Internal error: `makeArrowTable` unexpectedly created a table with more than one batch"
|
||||
);
|
||||
}
|
||||
const values = sourceColumn.toArray()
|
||||
const vectors = await embeddings.embed(values as T[])
|
||||
const values = sourceColumn.toArray();
|
||||
const vectors = await embeddings.embed(values as T[]);
|
||||
if (vectors.length !== values.length) {
|
||||
throw new Error('Embedding function did not return an embedding for each input element')
|
||||
throw new Error(
|
||||
"Embedding function did not return an embedding for each input element"
|
||||
);
|
||||
}
|
||||
const destType = newVectorType(vectors[0].length, innerDestType)
|
||||
newColumns[destColumn] = makeVector(vectors, destType)
|
||||
const destType = newVectorType(vectors[0].length, innerDestType);
|
||||
newColumns[destColumn] = makeVector(vectors, destType);
|
||||
}
|
||||
|
||||
const newTable = new ArrowTable(newColumns)
|
||||
const newTable = new ArrowTable(newColumns);
|
||||
if (schema != null) {
|
||||
if (schema.fields.find(f => f.name === destColumn) === undefined) {
|
||||
throw new Error(`When using embedding functions and specifying a schema the schema should include the embedding column but the column ${destColumn} was missing`)
|
||||
if (schema.fields.find((f) => f.name === destColumn) === undefined) {
|
||||
throw new Error(
|
||||
`When using embedding functions and specifying a schema the schema should include the embedding column but the column ${destColumn} was missing`
|
||||
);
|
||||
}
|
||||
return alignTable(newTable, schema)
|
||||
return alignTable(newTable, schema);
|
||||
}
|
||||
return newTable
|
||||
return newTable;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -417,21 +462,24 @@ async function applyEmbeddings<T> (table: ArrowTable, embeddings?: EmbeddingFunc
|
||||
* embedding columns. If no schema is provded then embedding columns will
|
||||
* be placed at the end of the table, after all of the input columns.
|
||||
*/
|
||||
export async function convertToTable<T> (
|
||||
export async function convertToTable<T>(
|
||||
data: Array<Record<string, unknown>>,
|
||||
embeddings?: EmbeddingFunction<T>,
|
||||
makeTableOptions?: Partial<MakeArrowTableOptions>
|
||||
): Promise<ArrowTable> {
|
||||
const table = makeArrowTable(data, makeTableOptions)
|
||||
return await applyEmbeddings(table, embeddings, makeTableOptions?.schema)
|
||||
const table = makeArrowTable(data, makeTableOptions);
|
||||
return await applyEmbeddings(table, embeddings, makeTableOptions?.schema);
|
||||
}
|
||||
|
||||
// Creates the Arrow Type for a Vector column with dimension `dim`
|
||||
function newVectorType <T extends Float> (dim: number, innerType: T): FixedSizeList<T> {
|
||||
function newVectorType<T extends Float>(
|
||||
dim: number,
|
||||
innerType: T
|
||||
): FixedSizeList<T> {
|
||||
// Somewhere we always default to have the elements nullable, so we need to set it to true
|
||||
// otherwise we often get schema mismatches because the stored data always has schema with nullable elements
|
||||
const children = new Field<T>('item', innerType, true)
|
||||
return new FixedSizeList(dim, children)
|
||||
const children = new Field<T>("item", innerType, true);
|
||||
return new FixedSizeList(dim, children);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -441,17 +489,17 @@ function newVectorType <T extends Float> (dim: number, innerType: T): FixedSizeL
|
||||
*
|
||||
* `schema` is required if data is empty
|
||||
*/
|
||||
export async function fromRecordsToBuffer<T> (
|
||||
export async function fromRecordsToBuffer<T>(
|
||||
data: Array<Record<string, unknown>>,
|
||||
embeddings?: EmbeddingFunction<T>,
|
||||
schema?: Schema
|
||||
): Promise<Buffer> {
|
||||
if (schema !== undefined && schema !== null) {
|
||||
schema = sanitizeSchema(schema)
|
||||
schema = sanitizeSchema(schema);
|
||||
}
|
||||
const table = await convertToTable(data, embeddings, { schema })
|
||||
const writer = RecordBatchFileWriter.writeAll(table)
|
||||
return Buffer.from(await writer.toUint8Array())
|
||||
const table = await convertToTable(data, embeddings, { schema, embeddings });
|
||||
const writer = RecordBatchFileWriter.writeAll(table);
|
||||
return Buffer.from(await writer.toUint8Array());
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -461,17 +509,17 @@ export async function fromRecordsToBuffer<T> (
|
||||
*
|
||||
* `schema` is required if data is empty
|
||||
*/
|
||||
export async function fromRecordsToStreamBuffer<T> (
|
||||
export async function fromRecordsToStreamBuffer<T>(
|
||||
data: Array<Record<string, unknown>>,
|
||||
embeddings?: EmbeddingFunction<T>,
|
||||
schema?: Schema
|
||||
): Promise<Buffer> {
|
||||
if (schema !== null && schema !== undefined) {
|
||||
schema = sanitizeSchema(schema)
|
||||
schema = sanitizeSchema(schema);
|
||||
}
|
||||
const table = await convertToTable(data, embeddings, { schema })
|
||||
const writer = RecordBatchStreamWriter.writeAll(table)
|
||||
return Buffer.from(await writer.toUint8Array())
|
||||
const table = await convertToTable(data, embeddings, { schema });
|
||||
const writer = RecordBatchStreamWriter.writeAll(table);
|
||||
return Buffer.from(await writer.toUint8Array());
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -482,17 +530,17 @@ export async function fromRecordsToStreamBuffer<T> (
|
||||
*
|
||||
* `schema` is required if the table is empty
|
||||
*/
|
||||
export async function fromTableToBuffer<T> (
|
||||
export async function fromTableToBuffer<T>(
|
||||
table: ArrowTable,
|
||||
embeddings?: EmbeddingFunction<T>,
|
||||
schema?: Schema
|
||||
): Promise<Buffer> {
|
||||
if (schema !== null && schema !== undefined) {
|
||||
schema = sanitizeSchema(schema)
|
||||
schema = sanitizeSchema(schema);
|
||||
}
|
||||
const tableWithEmbeddings = await applyEmbeddings(table, embeddings, schema)
|
||||
const writer = RecordBatchFileWriter.writeAll(tableWithEmbeddings)
|
||||
return Buffer.from(await writer.toUint8Array())
|
||||
const tableWithEmbeddings = await applyEmbeddings(table, embeddings, schema);
|
||||
const writer = RecordBatchFileWriter.writeAll(tableWithEmbeddings);
|
||||
return Buffer.from(await writer.toUint8Array());
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -503,49 +551,85 @@ export async function fromTableToBuffer<T> (
|
||||
*
|
||||
* `schema` is required if the table is empty
|
||||
*/
|
||||
export async function fromTableToStreamBuffer<T> (
|
||||
export async function fromTableToStreamBuffer<T>(
|
||||
table: ArrowTable,
|
||||
embeddings?: EmbeddingFunction<T>,
|
||||
schema?: Schema
|
||||
): Promise<Buffer> {
|
||||
if (schema !== null && schema !== undefined) {
|
||||
schema = sanitizeSchema(schema)
|
||||
schema = sanitizeSchema(schema);
|
||||
}
|
||||
const tableWithEmbeddings = await applyEmbeddings(table, embeddings, schema)
|
||||
const writer = RecordBatchStreamWriter.writeAll(tableWithEmbeddings)
|
||||
return Buffer.from(await writer.toUint8Array())
|
||||
const tableWithEmbeddings = await applyEmbeddings(table, embeddings, schema);
|
||||
const writer = RecordBatchStreamWriter.writeAll(tableWithEmbeddings);
|
||||
return Buffer.from(await writer.toUint8Array());
|
||||
}
|
||||
|
||||
function alignBatch (batch: RecordBatch, schema: Schema): RecordBatch {
|
||||
const alignedChildren = []
|
||||
function alignBatch(batch: RecordBatch, schema: Schema): RecordBatch {
|
||||
const alignedChildren = [];
|
||||
for (const field of schema.fields) {
|
||||
const indexInBatch = batch.schema.fields?.findIndex(
|
||||
(f) => f.name === field.name
|
||||
)
|
||||
);
|
||||
if (indexInBatch < 0) {
|
||||
throw new Error(
|
||||
`The column ${field.name} was not found in the Arrow Table`
|
||||
)
|
||||
);
|
||||
}
|
||||
alignedChildren.push(batch.data.children[indexInBatch])
|
||||
alignedChildren.push(batch.data.children[indexInBatch]);
|
||||
}
|
||||
const newData = makeData({
|
||||
type: new Struct(schema.fields),
|
||||
length: batch.numRows,
|
||||
nullCount: batch.nullCount,
|
||||
children: alignedChildren
|
||||
})
|
||||
return new RecordBatch(schema, newData)
|
||||
});
|
||||
return new RecordBatch(schema, newData);
|
||||
}
|
||||
|
||||
function alignTable (table: ArrowTable, schema: Schema): ArrowTable {
|
||||
function alignTable(table: ArrowTable, schema: Schema): ArrowTable {
|
||||
const alignedBatches = table.batches.map((batch) =>
|
||||
alignBatch(batch, schema)
|
||||
)
|
||||
return new ArrowTable(schema, alignedBatches)
|
||||
);
|
||||
return new ArrowTable(schema, alignedBatches);
|
||||
}
|
||||
|
||||
// Creates an empty Arrow Table
|
||||
export function createEmptyTable (schema: Schema): ArrowTable {
|
||||
return new ArrowTable(sanitizeSchema(schema))
|
||||
export function createEmptyTable(schema: Schema): ArrowTable {
|
||||
return new ArrowTable(sanitizeSchema(schema));
|
||||
}
|
||||
|
||||
function validateSchemaEmbeddings(
|
||||
schema: Schema<any>,
|
||||
data: Array<Record<string, unknown>>,
|
||||
embeddings: EmbeddingFunction<any> | undefined
|
||||
) {
|
||||
const fields = [];
|
||||
const missingEmbeddingFields = [];
|
||||
|
||||
// First we check if the field is a `FixedSizeList`
|
||||
// Then we check if the data contains the field
|
||||
// if it does not, we add it to the list of missing embedding fields
|
||||
// Finally, we check if those missing embedding fields are `this._embeddings`
|
||||
// if they are not, we throw an error
|
||||
for (const field of schema.fields) {
|
||||
if (field.type instanceof FixedSizeList) {
|
||||
if (data.length !== 0 && data?.[0]?.[field.name] === undefined) {
|
||||
missingEmbeddingFields.push(field);
|
||||
} else {
|
||||
fields.push(field);
|
||||
}
|
||||
} else {
|
||||
fields.push(field);
|
||||
}
|
||||
}
|
||||
|
||||
if (missingEmbeddingFields.length > 0 && embeddings === undefined) {
|
||||
throw new Error(
|
||||
`Table has embeddings: "${missingEmbeddingFields
|
||||
.map((f) => f.name)
|
||||
.join(",")}", but no embedding function was provided`
|
||||
);
|
||||
}
|
||||
|
||||
return new Schema(fields, schema.metadata);
|
||||
}
|
||||
|
||||
@@ -12,19 +12,20 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import { type Schema, Table as ArrowTable, tableFromIPC } from 'apache-arrow'
|
||||
import { type Schema, Table as ArrowTable, tableFromIPC } from "apache-arrow";
|
||||
import {
|
||||
createEmptyTable,
|
||||
fromRecordsToBuffer,
|
||||
fromTableToBuffer,
|
||||
makeArrowTable
|
||||
} from './arrow'
|
||||
import type { EmbeddingFunction } from './embedding/embedding_function'
|
||||
import { RemoteConnection } from './remote'
|
||||
import { Query } from './query'
|
||||
import { isEmbeddingFunction } from './embedding/embedding_function'
|
||||
import { type Literal, toSQL } from './util'
|
||||
import { type HttpMiddleware } from './middleware'
|
||||
} from "./arrow";
|
||||
import type { EmbeddingFunction } from "./embedding/embedding_function";
|
||||
import { RemoteConnection } from "./remote";
|
||||
import { Query } from "./query";
|
||||
import { isEmbeddingFunction } from "./embedding/embedding_function";
|
||||
import { type Literal, toSQL } from "./util";
|
||||
|
||||
import { type HttpMiddleware } from "./middleware";
|
||||
|
||||
const {
|
||||
databaseNew,
|
||||
@@ -48,14 +49,18 @@ const {
|
||||
tableAlterColumns,
|
||||
tableDropColumns
|
||||
// eslint-disable-next-line @typescript-eslint/no-var-requires
|
||||
} = require('../native.js')
|
||||
} = require("../native.js");
|
||||
|
||||
export { Query }
|
||||
export type { EmbeddingFunction }
|
||||
export { OpenAIEmbeddingFunction } from './embedding/openai'
|
||||
export { convertToTable, makeArrowTable, type MakeArrowTableOptions } from './arrow'
|
||||
export { Query };
|
||||
export type { EmbeddingFunction };
|
||||
export { OpenAIEmbeddingFunction } from "./embedding/openai";
|
||||
export {
|
||||
convertToTable,
|
||||
makeArrowTable,
|
||||
type MakeArrowTableOptions
|
||||
} from "./arrow";
|
||||
|
||||
const defaultAwsRegion = 'us-west-2'
|
||||
const defaultAwsRegion = "us-west-2";
|
||||
|
||||
export interface AwsCredentials {
|
||||
accessKeyId: string
|
||||
@@ -128,19 +133,19 @@ export interface ConnectionOptions {
|
||||
readConsistencyInterval?: number
|
||||
}
|
||||
|
||||
function getAwsArgs (opts: ConnectionOptions): any[] {
|
||||
const callArgs: any[] = []
|
||||
const awsCredentials = opts.awsCredentials
|
||||
function getAwsArgs(opts: ConnectionOptions): any[] {
|
||||
const callArgs: any[] = [];
|
||||
const awsCredentials = opts.awsCredentials;
|
||||
if (awsCredentials !== undefined) {
|
||||
callArgs.push(awsCredentials.accessKeyId)
|
||||
callArgs.push(awsCredentials.secretKey)
|
||||
callArgs.push(awsCredentials.sessionToken)
|
||||
callArgs.push(awsCredentials.accessKeyId);
|
||||
callArgs.push(awsCredentials.secretKey);
|
||||
callArgs.push(awsCredentials.sessionToken);
|
||||
} else {
|
||||
callArgs.fill(undefined, 0, 3)
|
||||
callArgs.fill(undefined, 0, 3);
|
||||
}
|
||||
|
||||
callArgs.push(opts.awsRegion)
|
||||
return callArgs
|
||||
callArgs.push(opts.awsRegion);
|
||||
return callArgs;
|
||||
}
|
||||
|
||||
export interface CreateTableOptions<T> {
|
||||
@@ -163,7 +168,7 @@ export interface CreateTableOptions<T> {
|
||||
/**
|
||||
* Connect to a LanceDB instance at the given URI.
|
||||
*
|
||||
* Accpeted formats:
|
||||
* Accepted formats:
|
||||
*
|
||||
* - `/path/to/database` - local database
|
||||
* - `s3://bucket/path/to/database` or `gs://bucket/path/to/database` - database on cloud storage
|
||||
@@ -173,56 +178,56 @@ export interface CreateTableOptions<T> {
|
||||
*
|
||||
* @see {@link ConnectionOptions} for more details on the URI format.
|
||||
*/
|
||||
export async function connect (uri: string): Promise<Connection>
|
||||
export async function connect(uri: string): Promise<Connection>;
|
||||
/**
|
||||
* Connect to a LanceDB instance with connection options.
|
||||
*
|
||||
* @param opts The {@link ConnectionOptions} to use when connecting to the database.
|
||||
*/
|
||||
export async function connect (
|
||||
export async function connect(
|
||||
opts: Partial<ConnectionOptions>
|
||||
): Promise<Connection>
|
||||
export async function connect (
|
||||
): Promise<Connection>;
|
||||
export async function connect(
|
||||
arg: string | Partial<ConnectionOptions>
|
||||
): Promise<Connection> {
|
||||
let opts: ConnectionOptions
|
||||
if (typeof arg === 'string') {
|
||||
opts = { uri: arg }
|
||||
let opts: ConnectionOptions;
|
||||
if (typeof arg === "string") {
|
||||
opts = { uri: arg };
|
||||
} else {
|
||||
const keys = Object.keys(arg)
|
||||
if (keys.length === 1 && keys[0] === 'uri' && typeof arg.uri === 'string') {
|
||||
opts = { uri: arg.uri }
|
||||
const keys = Object.keys(arg);
|
||||
if (keys.length === 1 && keys[0] === "uri" && typeof arg.uri === "string") {
|
||||
opts = { uri: arg.uri };
|
||||
} else {
|
||||
opts = Object.assign(
|
||||
{
|
||||
uri: '',
|
||||
uri: "",
|
||||
awsCredentials: undefined,
|
||||
awsRegion: defaultAwsRegion,
|
||||
apiKey: undefined,
|
||||
region: defaultAwsRegion
|
||||
},
|
||||
arg
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if (opts.uri.startsWith('db://')) {
|
||||
if (opts.uri.startsWith("db://")) {
|
||||
// Remote connection
|
||||
return new RemoteConnection(opts)
|
||||
return new RemoteConnection(opts);
|
||||
}
|
||||
|
||||
const storageOptions = opts.storageOptions ?? {};
|
||||
if (opts.awsCredentials?.accessKeyId !== undefined) {
|
||||
storageOptions.aws_access_key_id = opts.awsCredentials.accessKeyId
|
||||
storageOptions.aws_access_key_id = opts.awsCredentials.accessKeyId;
|
||||
}
|
||||
if (opts.awsCredentials?.secretKey !== undefined) {
|
||||
storageOptions.aws_secret_access_key = opts.awsCredentials.secretKey
|
||||
storageOptions.aws_secret_access_key = opts.awsCredentials.secretKey;
|
||||
}
|
||||
if (opts.awsCredentials?.sessionToken !== undefined) {
|
||||
storageOptions.aws_session_token = opts.awsCredentials.sessionToken
|
||||
storageOptions.aws_session_token = opts.awsCredentials.sessionToken;
|
||||
}
|
||||
if (opts.awsRegion !== undefined) {
|
||||
storageOptions.region = opts.awsRegion
|
||||
storageOptions.region = opts.awsRegion;
|
||||
}
|
||||
// It's a pain to pass a record to Rust, so we convert it to an array of key-value pairs
|
||||
const storageOptionsArr = Object.entries(storageOptions);
|
||||
@@ -231,8 +236,8 @@ export async function connect (
|
||||
opts.uri,
|
||||
storageOptionsArr,
|
||||
opts.readConsistencyInterval
|
||||
)
|
||||
return new LocalConnection(db, opts)
|
||||
);
|
||||
return new LocalConnection(db, opts);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -533,7 +538,11 @@ export interface Table<T = number[]> {
|
||||
* @param data the new data to insert
|
||||
* @param args parameters controlling how the operation should behave
|
||||
*/
|
||||
mergeInsert: (on: string, data: Array<Record<string, unknown>> | ArrowTable, args: MergeInsertArgs) => Promise<void>
|
||||
mergeInsert: (
|
||||
on: string,
|
||||
data: Array<Record<string, unknown>> | ArrowTable,
|
||||
args: MergeInsertArgs
|
||||
) => Promise<void>
|
||||
|
||||
/**
|
||||
* List the indicies on this table.
|
||||
@@ -558,7 +567,9 @@ export interface Table<T = number[]> {
|
||||
* expressions will be evaluated for each row in the
|
||||
* table, and can reference existing columns in the table.
|
||||
*/
|
||||
addColumns(newColumnTransforms: Array<{ name: string, valueSql: string }>): Promise<void>
|
||||
addColumns(
|
||||
newColumnTransforms: Array<{ name: string, valueSql: string }>
|
||||
): Promise<void>
|
||||
|
||||
/**
|
||||
* Alter the name or nullability of columns.
|
||||
@@ -699,23 +710,23 @@ export interface IndexStats {
|
||||
* A connection to a LanceDB database.
|
||||
*/
|
||||
export class LocalConnection implements Connection {
|
||||
private readonly _options: () => ConnectionOptions
|
||||
private readonly _db: any
|
||||
private readonly _options: () => ConnectionOptions;
|
||||
private readonly _db: any;
|
||||
|
||||
constructor (db: any, options: ConnectionOptions) {
|
||||
this._options = () => options
|
||||
this._db = db
|
||||
constructor(db: any, options: ConnectionOptions) {
|
||||
this._options = () => options;
|
||||
this._db = db;
|
||||
}
|
||||
|
||||
get uri (): string {
|
||||
return this._options().uri
|
||||
get uri(): string {
|
||||
return this._options().uri;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the names of all tables in the database.
|
||||
*/
|
||||
async tableNames (): Promise<string[]> {
|
||||
return databaseTableNames.call(this._db)
|
||||
async tableNames(): Promise<string[]> {
|
||||
return databaseTableNames.call(this._db);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -723,7 +734,7 @@ export class LocalConnection implements Connection {
|
||||
*
|
||||
* @param name The name of the table.
|
||||
*/
|
||||
async openTable (name: string): Promise<Table>
|
||||
async openTable(name: string): Promise<Table>;
|
||||
|
||||
/**
|
||||
* Open a table in the database.
|
||||
@@ -734,23 +745,20 @@ export class LocalConnection implements Connection {
|
||||
async openTable<T>(
|
||||
name: string,
|
||||
embeddings: EmbeddingFunction<T>
|
||||
): Promise<Table<T>>
|
||||
): Promise<Table<T>>;
|
||||
async openTable<T>(
|
||||
name: string,
|
||||
embeddings?: EmbeddingFunction<T>
|
||||
): Promise<Table<T>>
|
||||
): Promise<Table<T>>;
|
||||
async openTable<T>(
|
||||
name: string,
|
||||
embeddings?: EmbeddingFunction<T>
|
||||
): Promise<Table<T>> {
|
||||
const tbl = await databaseOpenTable.call(
|
||||
this._db,
|
||||
name,
|
||||
)
|
||||
const tbl = await databaseOpenTable.call(this._db, name);
|
||||
if (embeddings !== undefined) {
|
||||
return new LocalTable(tbl, name, this._options(), embeddings)
|
||||
return new LocalTable(tbl, name, this._options(), embeddings);
|
||||
} else {
|
||||
return new LocalTable(tbl, name, this._options())
|
||||
return new LocalTable(tbl, name, this._options());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -760,32 +768,32 @@ export class LocalConnection implements Connection {
|
||||
optsOrEmbedding?: WriteOptions | EmbeddingFunction<T>,
|
||||
opt?: WriteOptions
|
||||
): Promise<Table<T>> {
|
||||
if (typeof name === 'string') {
|
||||
let writeOptions: WriteOptions = new DefaultWriteOptions()
|
||||
if (typeof name === "string") {
|
||||
let writeOptions: WriteOptions = new DefaultWriteOptions();
|
||||
if (opt !== undefined && isWriteOptions(opt)) {
|
||||
writeOptions = opt
|
||||
writeOptions = opt;
|
||||
} else if (
|
||||
optsOrEmbedding !== undefined &&
|
||||
isWriteOptions(optsOrEmbedding)
|
||||
) {
|
||||
writeOptions = optsOrEmbedding
|
||||
writeOptions = optsOrEmbedding;
|
||||
}
|
||||
|
||||
let embeddings: undefined | EmbeddingFunction<T>
|
||||
let embeddings: undefined | EmbeddingFunction<T>;
|
||||
if (
|
||||
optsOrEmbedding !== undefined &&
|
||||
isEmbeddingFunction(optsOrEmbedding)
|
||||
) {
|
||||
embeddings = optsOrEmbedding
|
||||
embeddings = optsOrEmbedding;
|
||||
}
|
||||
return await this.createTableImpl({
|
||||
name,
|
||||
data,
|
||||
embeddingFunction: embeddings,
|
||||
writeOptions
|
||||
})
|
||||
});
|
||||
}
|
||||
return await this.createTableImpl(name)
|
||||
return await this.createTableImpl(name);
|
||||
}
|
||||
|
||||
private async createTableImpl<T>({
|
||||
@@ -801,27 +809,27 @@ export class LocalConnection implements Connection {
|
||||
embeddingFunction?: EmbeddingFunction<T> | undefined
|
||||
writeOptions?: WriteOptions | undefined
|
||||
}): Promise<Table<T>> {
|
||||
let buffer: Buffer
|
||||
let buffer: Buffer;
|
||||
|
||||
function isEmpty (
|
||||
function isEmpty(
|
||||
data: Array<Record<string, unknown>> | ArrowTable<any>
|
||||
): boolean {
|
||||
if (data instanceof ArrowTable) {
|
||||
return data.data.length === 0
|
||||
return data.data.length === 0;
|
||||
}
|
||||
return data.length === 0
|
||||
return data.length === 0;
|
||||
}
|
||||
|
||||
if (data === undefined || isEmpty(data)) {
|
||||
if (schema === undefined) {
|
||||
throw new Error('Either data or schema needs to defined')
|
||||
throw new Error("Either data or schema needs to defined");
|
||||
}
|
||||
buffer = await fromTableToBuffer(createEmptyTable(schema))
|
||||
buffer = await fromTableToBuffer(createEmptyTable(schema));
|
||||
} else if (data instanceof ArrowTable) {
|
||||
buffer = await fromTableToBuffer(data, embeddingFunction, schema)
|
||||
buffer = await fromTableToBuffer(data, embeddingFunction, schema);
|
||||
} else {
|
||||
// data is Array<Record<...>>
|
||||
buffer = await fromRecordsToBuffer(data, embeddingFunction, schema)
|
||||
buffer = await fromRecordsToBuffer(data, embeddingFunction, schema);
|
||||
}
|
||||
|
||||
const tbl = await tableCreate.call(
|
||||
@@ -830,11 +838,11 @@ export class LocalConnection implements Connection {
|
||||
buffer,
|
||||
writeOptions?.writeMode?.toString(),
|
||||
...getAwsArgs(this._options())
|
||||
)
|
||||
);
|
||||
if (embeddingFunction !== undefined) {
|
||||
return new LocalTable(tbl, name, this._options(), embeddingFunction)
|
||||
return new LocalTable(tbl, name, this._options(), embeddingFunction);
|
||||
} else {
|
||||
return new LocalTable(tbl, name, this._options())
|
||||
return new LocalTable(tbl, name, this._options());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -842,69 +850,69 @@ export class LocalConnection implements Connection {
|
||||
* Drop an existing table.
|
||||
* @param name The name of the table to drop.
|
||||
*/
|
||||
async dropTable (name: string): Promise<void> {
|
||||
await databaseDropTable.call(this._db, name)
|
||||
async dropTable(name: string): Promise<void> {
|
||||
await databaseDropTable.call(this._db, name);
|
||||
}
|
||||
|
||||
withMiddleware (middleware: HttpMiddleware): Connection {
|
||||
return this
|
||||
withMiddleware(middleware: HttpMiddleware): Connection {
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
export class LocalTable<T = number[]> implements Table<T> {
|
||||
private _tbl: any
|
||||
private readonly _name: string
|
||||
private readonly _isElectron: boolean
|
||||
private readonly _embeddings?: EmbeddingFunction<T>
|
||||
private readonly _options: () => ConnectionOptions
|
||||
private _tbl: any;
|
||||
private readonly _name: string;
|
||||
private readonly _isElectron: boolean;
|
||||
private readonly _embeddings?: EmbeddingFunction<T>;
|
||||
private readonly _options: () => ConnectionOptions;
|
||||
|
||||
constructor (tbl: any, name: string, options: ConnectionOptions)
|
||||
constructor(tbl: any, name: string, options: ConnectionOptions);
|
||||
/**
|
||||
* @param tbl
|
||||
* @param name
|
||||
* @param options
|
||||
* @param embeddings An embedding function to use when interacting with this table
|
||||
*/
|
||||
constructor (
|
||||
constructor(
|
||||
tbl: any,
|
||||
name: string,
|
||||
options: ConnectionOptions,
|
||||
embeddings: EmbeddingFunction<T>
|
||||
)
|
||||
constructor (
|
||||
);
|
||||
constructor(
|
||||
tbl: any,
|
||||
name: string,
|
||||
options: ConnectionOptions,
|
||||
embeddings?: EmbeddingFunction<T>
|
||||
) {
|
||||
this._tbl = tbl
|
||||
this._name = name
|
||||
this._embeddings = embeddings
|
||||
this._options = () => options
|
||||
this._isElectron = this.checkElectron()
|
||||
this._tbl = tbl;
|
||||
this._name = name;
|
||||
this._embeddings = embeddings;
|
||||
this._options = () => options;
|
||||
this._isElectron = this.checkElectron();
|
||||
}
|
||||
|
||||
get name (): string {
|
||||
return this._name
|
||||
get name(): string {
|
||||
return this._name;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a search query to find the nearest neighbors of the given search term
|
||||
* @param query The query search term
|
||||
*/
|
||||
search (query: T): Query<T> {
|
||||
return new Query(query, this._tbl, this._embeddings)
|
||||
search(query: T): Query<T> {
|
||||
return new Query(query, this._tbl, this._embeddings);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a filter query to find all rows matching the specified criteria
|
||||
* @param value The filter criteria (like SQL where clause syntax)
|
||||
*/
|
||||
filter (value: string): Query<T> {
|
||||
return new Query(undefined, this._tbl, this._embeddings).filter(value)
|
||||
filter(value: string): Query<T> {
|
||||
return new Query(undefined, this._tbl, this._embeddings).filter(value);
|
||||
}
|
||||
|
||||
where = this.filter
|
||||
where = this.filter;
|
||||
|
||||
/**
|
||||
* Insert records into this Table.
|
||||
@@ -912,16 +920,19 @@ export class LocalTable<T = number[]> implements Table<T> {
|
||||
* @param data Records to be inserted into the Table
|
||||
* @return The number of rows added to the table
|
||||
*/
|
||||
async add (
|
||||
async add(
|
||||
data: Array<Record<string, unknown>> | ArrowTable
|
||||
): Promise<number> {
|
||||
const schema = await this.schema
|
||||
let tbl: ArrowTable
|
||||
const schema = await this.schema;
|
||||
|
||||
let tbl: ArrowTable;
|
||||
|
||||
if (data instanceof ArrowTable) {
|
||||
tbl = data
|
||||
tbl = data;
|
||||
} else {
|
||||
tbl = makeArrowTable(data, { schema })
|
||||
tbl = makeArrowTable(data, { schema, embeddings: this._embeddings });
|
||||
}
|
||||
|
||||
return tableAdd
|
||||
.call(
|
||||
this._tbl,
|
||||
@@ -930,8 +941,8 @@ export class LocalTable<T = number[]> implements Table<T> {
|
||||
...getAwsArgs(this._options())
|
||||
)
|
||||
.then((newTable: any) => {
|
||||
this._tbl = newTable
|
||||
})
|
||||
this._tbl = newTable;
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -940,14 +951,14 @@ export class LocalTable<T = number[]> implements Table<T> {
|
||||
* @param data Records to be inserted into the Table
|
||||
* @return The number of rows added to the table
|
||||
*/
|
||||
async overwrite (
|
||||
async overwrite(
|
||||
data: Array<Record<string, unknown>> | ArrowTable
|
||||
): Promise<number> {
|
||||
let buffer: Buffer
|
||||
let buffer: Buffer;
|
||||
if (data instanceof ArrowTable) {
|
||||
buffer = await fromTableToBuffer(data, this._embeddings)
|
||||
buffer = await fromTableToBuffer(data, this._embeddings);
|
||||
} else {
|
||||
buffer = await fromRecordsToBuffer(data, this._embeddings)
|
||||
buffer = await fromRecordsToBuffer(data, this._embeddings);
|
||||
}
|
||||
return tableAdd
|
||||
.call(
|
||||
@@ -957,8 +968,8 @@ export class LocalTable<T = number[]> implements Table<T> {
|
||||
...getAwsArgs(this._options())
|
||||
)
|
||||
.then((newTable: any) => {
|
||||
this._tbl = newTable
|
||||
})
|
||||
this._tbl = newTable;
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -966,26 +977,26 @@ export class LocalTable<T = number[]> implements Table<T> {
|
||||
*
|
||||
* @param indexParams The parameters of this Index, @see VectorIndexParams.
|
||||
*/
|
||||
async createIndex (indexParams: VectorIndexParams): Promise<any> {
|
||||
async createIndex(indexParams: VectorIndexParams): Promise<any> {
|
||||
return tableCreateVectorIndex
|
||||
.call(this._tbl, indexParams)
|
||||
.then((newTable: any) => {
|
||||
this._tbl = newTable
|
||||
})
|
||||
this._tbl = newTable;
|
||||
});
|
||||
}
|
||||
|
||||
async createScalarIndex (column: string, replace?: boolean): Promise<void> {
|
||||
async createScalarIndex(column: string, replace?: boolean): Promise<void> {
|
||||
if (replace === undefined) {
|
||||
replace = true
|
||||
replace = true;
|
||||
}
|
||||
return tableCreateScalarIndex.call(this._tbl, column, replace)
|
||||
return tableCreateScalarIndex.call(this._tbl, column, replace);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the number of rows in this table.
|
||||
*/
|
||||
async countRows (filter?: string): Promise<number> {
|
||||
return tableCountRows.call(this._tbl, filter)
|
||||
async countRows(filter?: string): Promise<number> {
|
||||
return tableCountRows.call(this._tbl, filter);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -993,10 +1004,10 @@ export class LocalTable<T = number[]> implements Table<T> {
|
||||
*
|
||||
* @param filter A filter in the same format used by a sql WHERE clause.
|
||||
*/
|
||||
async delete (filter: string): Promise<void> {
|
||||
async delete(filter: string): Promise<void> {
|
||||
return tableDelete.call(this._tbl, filter).then((newTable: any) => {
|
||||
this._tbl = newTable
|
||||
})
|
||||
this._tbl = newTable;
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1006,55 +1017,65 @@ export class LocalTable<T = number[]> implements Table<T> {
|
||||
*
|
||||
* @returns
|
||||
*/
|
||||
async update (args: UpdateArgs | UpdateSqlArgs): Promise<void> {
|
||||
let filter: string | null
|
||||
let updates: Record<string, string>
|
||||
async update(args: UpdateArgs | UpdateSqlArgs): Promise<void> {
|
||||
let filter: string | null;
|
||||
let updates: Record<string, string>;
|
||||
|
||||
if ('valuesSql' in args) {
|
||||
filter = args.where ?? null
|
||||
updates = args.valuesSql
|
||||
if ("valuesSql" in args) {
|
||||
filter = args.where ?? null;
|
||||
updates = args.valuesSql;
|
||||
} else {
|
||||
filter = args.where ?? null
|
||||
updates = {}
|
||||
filter = args.where ?? null;
|
||||
updates = {};
|
||||
for (const [key, value] of Object.entries(args.values)) {
|
||||
updates[key] = toSQL(value)
|
||||
updates[key] = toSQL(value);
|
||||
}
|
||||
}
|
||||
|
||||
return tableUpdate
|
||||
.call(this._tbl, filter, updates)
|
||||
.then((newTable: any) => {
|
||||
this._tbl = newTable
|
||||
})
|
||||
this._tbl = newTable;
|
||||
});
|
||||
}
|
||||
|
||||
async mergeInsert (on: string, data: Array<Record<string, unknown>> | ArrowTable, args: MergeInsertArgs): Promise<void> {
|
||||
let whenMatchedUpdateAll = false
|
||||
let whenMatchedUpdateAllFilt = null
|
||||
if (args.whenMatchedUpdateAll !== undefined && args.whenMatchedUpdateAll !== null) {
|
||||
whenMatchedUpdateAll = true
|
||||
async mergeInsert(
|
||||
on: string,
|
||||
data: Array<Record<string, unknown>> | ArrowTable,
|
||||
args: MergeInsertArgs
|
||||
): Promise<void> {
|
||||
let whenMatchedUpdateAll = false;
|
||||
let whenMatchedUpdateAllFilt = null;
|
||||
if (
|
||||
args.whenMatchedUpdateAll !== undefined &&
|
||||
args.whenMatchedUpdateAll !== null
|
||||
) {
|
||||
whenMatchedUpdateAll = true;
|
||||
if (args.whenMatchedUpdateAll !== true) {
|
||||
whenMatchedUpdateAllFilt = args.whenMatchedUpdateAll
|
||||
whenMatchedUpdateAllFilt = args.whenMatchedUpdateAll;
|
||||
}
|
||||
}
|
||||
const whenNotMatchedInsertAll = args.whenNotMatchedInsertAll ?? false
|
||||
let whenNotMatchedBySourceDelete = false
|
||||
let whenNotMatchedBySourceDeleteFilt = null
|
||||
if (args.whenNotMatchedBySourceDelete !== undefined && args.whenNotMatchedBySourceDelete !== null) {
|
||||
whenNotMatchedBySourceDelete = true
|
||||
const whenNotMatchedInsertAll = args.whenNotMatchedInsertAll ?? false;
|
||||
let whenNotMatchedBySourceDelete = false;
|
||||
let whenNotMatchedBySourceDeleteFilt = null;
|
||||
if (
|
||||
args.whenNotMatchedBySourceDelete !== undefined &&
|
||||
args.whenNotMatchedBySourceDelete !== null
|
||||
) {
|
||||
whenNotMatchedBySourceDelete = true;
|
||||
if (args.whenNotMatchedBySourceDelete !== true) {
|
||||
whenNotMatchedBySourceDeleteFilt = args.whenNotMatchedBySourceDelete
|
||||
whenNotMatchedBySourceDeleteFilt = args.whenNotMatchedBySourceDelete;
|
||||
}
|
||||
}
|
||||
|
||||
const schema = await this.schema
|
||||
let tbl: ArrowTable
|
||||
const schema = await this.schema;
|
||||
let tbl: ArrowTable;
|
||||
if (data instanceof ArrowTable) {
|
||||
tbl = data
|
||||
tbl = data;
|
||||
} else {
|
||||
tbl = makeArrowTable(data, { schema })
|
||||
tbl = makeArrowTable(data, { schema });
|
||||
}
|
||||
const buffer = await fromTableToBuffer(tbl, this._embeddings, schema)
|
||||
const buffer = await fromTableToBuffer(tbl, this._embeddings, schema);
|
||||
|
||||
this._tbl = await tableMergeInsert.call(
|
||||
this._tbl,
|
||||
@@ -1065,7 +1086,7 @@ export class LocalTable<T = number[]> implements Table<T> {
|
||||
whenNotMatchedBySourceDelete,
|
||||
whenNotMatchedBySourceDeleteFilt,
|
||||
buffer
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1083,16 +1104,16 @@ export class LocalTable<T = number[]> implements Table<T> {
|
||||
* uphold this promise can lead to corrupted tables.
|
||||
* @returns
|
||||
*/
|
||||
async cleanupOldVersions (
|
||||
async cleanupOldVersions(
|
||||
olderThan?: number,
|
||||
deleteUnverified?: boolean
|
||||
): Promise<CleanupStats> {
|
||||
return tableCleanupOldVersions
|
||||
.call(this._tbl, olderThan, deleteUnverified)
|
||||
.then((res: { newTable: any, metrics: CleanupStats }) => {
|
||||
this._tbl = res.newTable
|
||||
return res.metrics
|
||||
})
|
||||
this._tbl = res.newTable;
|
||||
return res.metrics;
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1106,62 +1127,64 @@ export class LocalTable<T = number[]> implements Table<T> {
|
||||
* for most tables.
|
||||
* @returns Metrics about the compaction operation.
|
||||
*/
|
||||
async compactFiles (options?: CompactionOptions): Promise<CompactionMetrics> {
|
||||
const optionsArg = options ?? {}
|
||||
async compactFiles(options?: CompactionOptions): Promise<CompactionMetrics> {
|
||||
const optionsArg = options ?? {};
|
||||
return tableCompactFiles
|
||||
.call(this._tbl, optionsArg)
|
||||
.then((res: { newTable: any, metrics: CompactionMetrics }) => {
|
||||
this._tbl = res.newTable
|
||||
return res.metrics
|
||||
})
|
||||
this._tbl = res.newTable;
|
||||
return res.metrics;
|
||||
});
|
||||
}
|
||||
|
||||
async listIndices (): Promise<VectorIndex[]> {
|
||||
return tableListIndices.call(this._tbl)
|
||||
async listIndices(): Promise<VectorIndex[]> {
|
||||
return tableListIndices.call(this._tbl);
|
||||
}
|
||||
|
||||
async indexStats (indexUuid: string): Promise<IndexStats> {
|
||||
return tableIndexStats.call(this._tbl, indexUuid)
|
||||
async indexStats(indexUuid: string): Promise<IndexStats> {
|
||||
return tableIndexStats.call(this._tbl, indexUuid);
|
||||
}
|
||||
|
||||
get schema (): Promise<Schema> {
|
||||
get schema(): Promise<Schema> {
|
||||
// empty table
|
||||
return this.getSchema()
|
||||
return this.getSchema();
|
||||
}
|
||||
|
||||
private async getSchema (): Promise<Schema> {
|
||||
const buffer = await tableSchema.call(this._tbl, this._isElectron)
|
||||
const table = tableFromIPC(buffer)
|
||||
return table.schema
|
||||
private async getSchema(): Promise<Schema> {
|
||||
const buffer = await tableSchema.call(this._tbl, this._isElectron);
|
||||
const table = tableFromIPC(buffer);
|
||||
return table.schema;
|
||||
}
|
||||
|
||||
// See https://github.com/electron/electron/issues/2288
|
||||
private checkElectron (): boolean {
|
||||
private checkElectron(): boolean {
|
||||
try {
|
||||
// eslint-disable-next-line no-prototype-builtins
|
||||
return (
|
||||
Object.prototype.hasOwnProperty.call(process?.versions, 'electron') ||
|
||||
navigator?.userAgent?.toLowerCase()?.includes(' electron')
|
||||
)
|
||||
Object.prototype.hasOwnProperty.call(process?.versions, "electron") ||
|
||||
navigator?.userAgent?.toLowerCase()?.includes(" electron")
|
||||
);
|
||||
} catch (e) {
|
||||
return false
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
async addColumns (newColumnTransforms: Array<{ name: string, valueSql: string }>): Promise<void> {
|
||||
return tableAddColumns.call(this._tbl, newColumnTransforms)
|
||||
async addColumns(
|
||||
newColumnTransforms: Array<{ name: string, valueSql: string }>
|
||||
): Promise<void> {
|
||||
return tableAddColumns.call(this._tbl, newColumnTransforms);
|
||||
}
|
||||
|
||||
async alterColumns (columnAlterations: ColumnAlteration[]): Promise<void> {
|
||||
return tableAlterColumns.call(this._tbl, columnAlterations)
|
||||
async alterColumns(columnAlterations: ColumnAlteration[]): Promise<void> {
|
||||
return tableAlterColumns.call(this._tbl, columnAlterations);
|
||||
}
|
||||
|
||||
async dropColumns (columnNames: string[]): Promise<void> {
|
||||
return tableDropColumns.call(this._tbl, columnNames)
|
||||
async dropColumns(columnNames: string[]): Promise<void> {
|
||||
return tableDropColumns.call(this._tbl, columnNames);
|
||||
}
|
||||
|
||||
withMiddleware (middleware: HttpMiddleware): Table<T> {
|
||||
return this
|
||||
withMiddleware(middleware: HttpMiddleware): Table<T> {
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1184,7 +1207,7 @@ export interface CompactionOptions {
|
||||
*/
|
||||
targetRowsPerFragment?: number
|
||||
/**
|
||||
* The maximum number of rows per group. Defaults to 1024.
|
||||
* The maximum number of T per group. Defaults to 1024.
|
||||
*/
|
||||
maxRowsPerGroup?: number
|
||||
/**
|
||||
@@ -1284,21 +1307,21 @@ export interface IvfPQIndexConfig {
|
||||
*/
|
||||
index_cache_size?: number
|
||||
|
||||
type: 'ivf_pq'
|
||||
type: "ivf_pq"
|
||||
}
|
||||
|
||||
export type VectorIndexParams = IvfPQIndexConfig
|
||||
export type VectorIndexParams = IvfPQIndexConfig;
|
||||
|
||||
/**
|
||||
* Write mode for writing a table.
|
||||
*/
|
||||
export enum WriteMode {
|
||||
/** Create a new {@link Table}. */
|
||||
Create = 'create',
|
||||
Create = "create",
|
||||
/** Overwrite the existing {@link Table} if presented. */
|
||||
Overwrite = 'overwrite',
|
||||
Overwrite = "overwrite",
|
||||
/** Append new data to the table. */
|
||||
Append = 'append',
|
||||
Append = "append",
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1310,14 +1333,14 @@ export interface WriteOptions {
|
||||
}
|
||||
|
||||
export class DefaultWriteOptions implements WriteOptions {
|
||||
writeMode = WriteMode.Create
|
||||
writeMode = WriteMode.Create;
|
||||
}
|
||||
|
||||
export function isWriteOptions (value: any): value is WriteOptions {
|
||||
export function isWriteOptions(value: any): value is WriteOptions {
|
||||
return (
|
||||
Object.keys(value).length === 1 &&
|
||||
(value.writeMode === undefined || typeof value.writeMode === 'string')
|
||||
)
|
||||
(value.writeMode === undefined || typeof value.writeMode === "string")
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1327,15 +1350,15 @@ export enum MetricType {
|
||||
/**
|
||||
* Euclidean distance
|
||||
*/
|
||||
L2 = 'l2',
|
||||
L2 = "l2",
|
||||
|
||||
/**
|
||||
* Cosine distance
|
||||
*/
|
||||
Cosine = 'cosine',
|
||||
Cosine = "cosine",
|
||||
|
||||
/**
|
||||
* Dot product
|
||||
*/
|
||||
Dot = 'dot',
|
||||
Dot = "dot",
|
||||
}
|
||||
|
||||
@@ -51,7 +51,7 @@ describe('LanceDB Mirrored Store Integration test', function () {
|
||||
|
||||
const dir = tmpdir()
|
||||
console.log(dir)
|
||||
const conn = await lancedb.connect(`s3://lancedb-integtest?mirroredStore=${dir}`)
|
||||
const conn = await lancedb.connect({ uri: `s3://lancedb-integtest?mirroredStore=${dir}`, storageOptions: { allowHttp: 'true' } })
|
||||
const data = Array(200).fill({ vector: Array(128).fill(1.0), id: 0 })
|
||||
data.push(...Array(200).fill({ vector: Array(128).fill(1.0), id: 1 }))
|
||||
data.push(...Array(200).fill({ vector: Array(128).fill(1.0), id: 2 }))
|
||||
|
||||
@@ -140,6 +140,9 @@ export class RemoteConnection implements Connection {
|
||||
schema = nameOrOpts.schema
|
||||
embeddings = nameOrOpts.embeddingFunction
|
||||
tableName = nameOrOpts.name
|
||||
if (data === undefined) {
|
||||
data = nameOrOpts.data
|
||||
}
|
||||
}
|
||||
|
||||
let buffer: Buffer
|
||||
|
||||
@@ -32,7 +32,7 @@ import {
|
||||
Bool,
|
||||
Date_,
|
||||
Decimal,
|
||||
DataType,
|
||||
type DataType,
|
||||
Dictionary,
|
||||
Binary,
|
||||
Float32,
|
||||
@@ -74,12 +74,12 @@ import {
|
||||
DurationNanosecond,
|
||||
DurationMicrosecond,
|
||||
DurationMillisecond,
|
||||
DurationSecond,
|
||||
DurationSecond
|
||||
} from "apache-arrow";
|
||||
import type { IntBitWidth, TimeBitWidth } from "apache-arrow/type";
|
||||
|
||||
function sanitizeMetadata(
|
||||
metadataLike?: unknown,
|
||||
metadataLike?: unknown
|
||||
): Map<string, string> | undefined {
|
||||
if (metadataLike === undefined || metadataLike === null) {
|
||||
return undefined;
|
||||
@@ -90,7 +90,7 @@ function sanitizeMetadata(
|
||||
for (const item of metadataLike) {
|
||||
if (!(typeof item[0] === "string" || !(typeof item[1] === "string"))) {
|
||||
throw Error(
|
||||
"Expected metadata, if present, to be a Map<string, string> but it had non-string keys or values",
|
||||
"Expected metadata, if present, to be a Map<string, string> but it had non-string keys or values"
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -105,7 +105,7 @@ function sanitizeInt(typeLike: object) {
|
||||
typeof typeLike.isSigned !== "boolean"
|
||||
) {
|
||||
throw Error(
|
||||
"Expected an Int Type to have a `bitWidth` and `isSigned` property",
|
||||
"Expected an Int Type to have a `bitWidth` and `isSigned` property"
|
||||
);
|
||||
}
|
||||
return new Int(typeLike.isSigned, typeLike.bitWidth as IntBitWidth);
|
||||
@@ -128,7 +128,7 @@ function sanitizeDecimal(typeLike: object) {
|
||||
typeof typeLike.bitWidth !== "number"
|
||||
) {
|
||||
throw Error(
|
||||
"Expected a Decimal Type to have `scale`, `precision`, and `bitWidth` properties",
|
||||
"Expected a Decimal Type to have `scale`, `precision`, and `bitWidth` properties"
|
||||
);
|
||||
}
|
||||
return new Decimal(typeLike.scale, typeLike.precision, typeLike.bitWidth);
|
||||
@@ -149,7 +149,7 @@ function sanitizeTime(typeLike: object) {
|
||||
typeof typeLike.bitWidth !== "number"
|
||||
) {
|
||||
throw Error(
|
||||
"Expected a Time type to have `unit` and `bitWidth` properties",
|
||||
"Expected a Time type to have `unit` and `bitWidth` properties"
|
||||
);
|
||||
}
|
||||
return new Time(typeLike.unit, typeLike.bitWidth as TimeBitWidth);
|
||||
@@ -172,7 +172,7 @@ function sanitizeTypedTimestamp(
|
||||
| typeof TimestampNanosecond
|
||||
| typeof TimestampMicrosecond
|
||||
| typeof TimestampMillisecond
|
||||
| typeof TimestampSecond,
|
||||
| typeof TimestampSecond
|
||||
) {
|
||||
let timezone = null;
|
||||
if ("timezone" in typeLike && typeof typeLike.timezone === "string") {
|
||||
@@ -191,7 +191,7 @@ function sanitizeInterval(typeLike: object) {
|
||||
function sanitizeList(typeLike: object) {
|
||||
if (!("children" in typeLike) || !Array.isArray(typeLike.children)) {
|
||||
throw Error(
|
||||
"Expected a List type to have an array-like `children` property",
|
||||
"Expected a List type to have an array-like `children` property"
|
||||
);
|
||||
}
|
||||
if (typeLike.children.length !== 1) {
|
||||
@@ -203,7 +203,7 @@ function sanitizeList(typeLike: object) {
|
||||
function sanitizeStruct(typeLike: object) {
|
||||
if (!("children" in typeLike) || !Array.isArray(typeLike.children)) {
|
||||
throw Error(
|
||||
"Expected a Struct type to have an array-like `children` property",
|
||||
"Expected a Struct type to have an array-like `children` property"
|
||||
);
|
||||
}
|
||||
return new Struct(typeLike.children.map((child) => sanitizeField(child)));
|
||||
@@ -216,47 +216,47 @@ function sanitizeUnion(typeLike: object) {
|
||||
typeof typeLike.mode !== "number"
|
||||
) {
|
||||
throw Error(
|
||||
"Expected a Union type to have `typeIds` and `mode` properties",
|
||||
"Expected a Union type to have `typeIds` and `mode` properties"
|
||||
);
|
||||
}
|
||||
if (!("children" in typeLike) || !Array.isArray(typeLike.children)) {
|
||||
throw Error(
|
||||
"Expected a Union type to have an array-like `children` property",
|
||||
"Expected a Union type to have an array-like `children` property"
|
||||
);
|
||||
}
|
||||
|
||||
return new Union(
|
||||
typeLike.mode,
|
||||
typeLike.typeIds as any,
|
||||
typeLike.children.map((child) => sanitizeField(child)),
|
||||
typeLike.children.map((child) => sanitizeField(child))
|
||||
);
|
||||
}
|
||||
|
||||
function sanitizeTypedUnion(
|
||||
typeLike: object,
|
||||
UnionType: typeof DenseUnion | typeof SparseUnion,
|
||||
UnionType: typeof DenseUnion | typeof SparseUnion
|
||||
) {
|
||||
if (!("typeIds" in typeLike)) {
|
||||
throw Error(
|
||||
"Expected a DenseUnion/SparseUnion type to have a `typeIds` property",
|
||||
"Expected a DenseUnion/SparseUnion type to have a `typeIds` property"
|
||||
);
|
||||
}
|
||||
if (!("children" in typeLike) || !Array.isArray(typeLike.children)) {
|
||||
throw Error(
|
||||
"Expected a DenseUnion/SparseUnion type to have an array-like `children` property",
|
||||
"Expected a DenseUnion/SparseUnion type to have an array-like `children` property"
|
||||
);
|
||||
}
|
||||
|
||||
return new UnionType(
|
||||
typeLike.typeIds as any,
|
||||
typeLike.children.map((child) => sanitizeField(child)),
|
||||
typeLike.children.map((child) => sanitizeField(child))
|
||||
);
|
||||
}
|
||||
|
||||
function sanitizeFixedSizeBinary(typeLike: object) {
|
||||
if (!("byteWidth" in typeLike) || typeof typeLike.byteWidth !== "number") {
|
||||
throw Error(
|
||||
"Expected a FixedSizeBinary type to have a `byteWidth` property",
|
||||
"Expected a FixedSizeBinary type to have a `byteWidth` property"
|
||||
);
|
||||
}
|
||||
return new FixedSizeBinary(typeLike.byteWidth);
|
||||
@@ -268,7 +268,7 @@ function sanitizeFixedSizeList(typeLike: object) {
|
||||
}
|
||||
if (!("children" in typeLike) || !Array.isArray(typeLike.children)) {
|
||||
throw Error(
|
||||
"Expected a FixedSizeList type to have an array-like `children` property",
|
||||
"Expected a FixedSizeList type to have an array-like `children` property"
|
||||
);
|
||||
}
|
||||
if (typeLike.children.length !== 1) {
|
||||
@@ -276,14 +276,14 @@ function sanitizeFixedSizeList(typeLike: object) {
|
||||
}
|
||||
return new FixedSizeList(
|
||||
typeLike.listSize,
|
||||
sanitizeField(typeLike.children[0]),
|
||||
sanitizeField(typeLike.children[0])
|
||||
);
|
||||
}
|
||||
|
||||
function sanitizeMap(typeLike: object) {
|
||||
if (!("children" in typeLike) || !Array.isArray(typeLike.children)) {
|
||||
throw Error(
|
||||
"Expected a Map type to have an array-like `children` property",
|
||||
"Expected a Map type to have an array-like `children` property"
|
||||
);
|
||||
}
|
||||
if (!("keysSorted" in typeLike) || typeof typeLike.keysSorted !== "boolean") {
|
||||
@@ -291,7 +291,7 @@ function sanitizeMap(typeLike: object) {
|
||||
}
|
||||
return new Map_(
|
||||
typeLike.children.map((field) => sanitizeField(field)) as any,
|
||||
typeLike.keysSorted,
|
||||
typeLike.keysSorted
|
||||
);
|
||||
}
|
||||
|
||||
@@ -319,7 +319,7 @@ function sanitizeDictionary(typeLike: object) {
|
||||
sanitizeType(typeLike.dictionary),
|
||||
sanitizeType(typeLike.indices) as any,
|
||||
typeLike.id,
|
||||
typeLike.isOrdered,
|
||||
typeLike.isOrdered
|
||||
);
|
||||
}
|
||||
|
||||
@@ -454,7 +454,7 @@ function sanitizeField(fieldLike: unknown): Field {
|
||||
!("nullable" in fieldLike)
|
||||
) {
|
||||
throw Error(
|
||||
"The field passed in is missing a `type`/`name`/`nullable` property",
|
||||
"The field passed in is missing a `type`/`name`/`nullable` property"
|
||||
);
|
||||
}
|
||||
const type = sanitizeType(fieldLike.type);
|
||||
@@ -489,7 +489,7 @@ export function sanitizeSchema(schemaLike: unknown): Schema {
|
||||
}
|
||||
if (!("fields" in schemaLike)) {
|
||||
throw Error(
|
||||
"The schema passed in does not appear to be a schema (no 'fields' property)",
|
||||
"The schema passed in does not appear to be a schema (no 'fields' property)"
|
||||
);
|
||||
}
|
||||
let metadata;
|
||||
@@ -498,11 +498,11 @@ export function sanitizeSchema(schemaLike: unknown): Schema {
|
||||
}
|
||||
if (!Array.isArray(schemaLike.fields)) {
|
||||
throw Error(
|
||||
"The schema passed in had a 'fields' property but it was not an array",
|
||||
"The schema passed in had a 'fields' property but it was not an array"
|
||||
);
|
||||
}
|
||||
const sanitizedFields = schemaLike.fields.map((field) =>
|
||||
sanitizeField(field),
|
||||
sanitizeField(field)
|
||||
);
|
||||
return new Schema(sanitizedFields, metadata);
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,3 +0,0 @@
|
||||
**/dist/**/*
|
||||
**/native.js
|
||||
**/native.d.ts
|
||||
1
nodejs/.gitignore
vendored
Normal file
1
nodejs/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
yarn.lock
|
||||
@@ -1 +0,0 @@
|
||||
.eslintignore
|
||||
@@ -43,29 +43,20 @@ npm run test
|
||||
|
||||
### Running lint / format
|
||||
|
||||
LanceDb uses eslint for linting. VSCode does not need any plugins to use eslint. However, it
|
||||
may need some additional configuration. Make sure that eslint.experimental.useFlatConfig is
|
||||
set to true. Also, if your vscode root folder is the repo root then you will need to set
|
||||
the eslint.workingDirectories to ["nodejs"]. To manually lint your code you can run:
|
||||
LanceDb uses [biome](https://biomejs.dev/) for linting and formatting. if you are using VSCode you will need to install the official [Biome](https://marketplace.visualstudio.com/items?itemName=biomejs.biome) extension.
|
||||
To manually lint your code you can run:
|
||||
|
||||
```sh
|
||||
npm run lint
|
||||
```
|
||||
|
||||
LanceDb uses prettier for formatting. If you are using VSCode you will need to install the
|
||||
"Prettier - Code formatter" extension. You should then configure it to be the default formatter
|
||||
for typescript and you should enable format on save. To manually check your code's format you
|
||||
can run:
|
||||
to automatically fix all fixable issues:
|
||||
|
||||
```sh
|
||||
npm run chkformat
|
||||
npm run lint-fix
|
||||
```
|
||||
|
||||
If you need to manually format your code you can run:
|
||||
|
||||
```sh
|
||||
npx prettier --write .
|
||||
```
|
||||
If you do not have your workspace root set to the `nodejs` directory, unfortunately the extension will not work. You can still run the linting and formatting commands manually.
|
||||
|
||||
### Generating docs
|
||||
|
||||
|
||||
@@ -13,32 +13,26 @@
|
||||
// limitations under the License.
|
||||
|
||||
import {
|
||||
convertToTable,
|
||||
fromTableToBuffer,
|
||||
makeArrowTable,
|
||||
makeEmptyTable,
|
||||
} from "../dist/arrow";
|
||||
import {
|
||||
Field,
|
||||
FixedSizeList,
|
||||
Float16,
|
||||
Float32,
|
||||
Int32,
|
||||
tableFromIPC,
|
||||
Schema,
|
||||
Float64,
|
||||
type Table,
|
||||
Binary,
|
||||
Bool,
|
||||
Utf8,
|
||||
Struct,
|
||||
List,
|
||||
DataType,
|
||||
Dictionary,
|
||||
Int64,
|
||||
Field,
|
||||
FixedSizeList,
|
||||
Float,
|
||||
Precision,
|
||||
Float16,
|
||||
Float32,
|
||||
Float64,
|
||||
Int32,
|
||||
Int64,
|
||||
List,
|
||||
MetadataVersion,
|
||||
Precision,
|
||||
Schema,
|
||||
Struct,
|
||||
type Table,
|
||||
Utf8,
|
||||
tableFromIPC,
|
||||
} from "apache-arrow";
|
||||
import {
|
||||
Dictionary as OldDictionary,
|
||||
@@ -46,14 +40,20 @@ import {
|
||||
FixedSizeList as OldFixedSizeList,
|
||||
Float32 as OldFloat32,
|
||||
Int32 as OldInt32,
|
||||
Struct as OldStruct,
|
||||
Schema as OldSchema,
|
||||
Struct as OldStruct,
|
||||
TimestampNanosecond as OldTimestampNanosecond,
|
||||
Utf8 as OldUtf8,
|
||||
} from "apache-arrow-old";
|
||||
import { type EmbeddingFunction } from "../dist/embedding/embedding_function";
|
||||
import {
|
||||
convertToTable,
|
||||
fromTableToBuffer,
|
||||
makeArrowTable,
|
||||
makeEmptyTable,
|
||||
} from "../lancedb/arrow";
|
||||
import { type EmbeddingFunction } from "../lancedb/embedding/embedding_function";
|
||||
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
// biome-ignore lint/suspicious/noExplicitAny: skip
|
||||
function sampleRecords(): Array<Record<string, any>> {
|
||||
return [
|
||||
{
|
||||
@@ -438,7 +438,7 @@ describe("when using two versions of arrow", function () {
|
||||
new OldField("ts_no_tz", new OldTimestampNanosecond(null)),
|
||||
]),
|
||||
),
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
// biome-ignore lint/suspicious/noExplicitAny: skip
|
||||
]) as any;
|
||||
schema.metadataVersion = MetadataVersion.V5;
|
||||
const table = makeArrowTable([], { schema });
|
||||
|
||||
@@ -14,11 +14,13 @@
|
||||
|
||||
import * as tmp from "tmp";
|
||||
|
||||
import { Connection, connect } from "../dist/index.js";
|
||||
import { Connection, connect } from "../lancedb";
|
||||
|
||||
describe("when connecting", () => {
|
||||
let tmpDir: tmp.DirResult;
|
||||
beforeEach(() => (tmpDir = tmp.dirSync({ unsafeCleanup: true })));
|
||||
beforeEach(() => {
|
||||
tmpDir = tmp.dirSync({ unsafeCleanup: true });
|
||||
});
|
||||
afterEach(() => tmpDir.removeCallback());
|
||||
|
||||
it("should connect", async () => {
|
||||
|
||||
@@ -14,7 +14,11 @@
|
||||
|
||||
/* eslint-disable @typescript-eslint/naming-convention */
|
||||
|
||||
import { connect } from "../dist";
|
||||
import {
|
||||
CreateKeyCommand,
|
||||
KMSClient,
|
||||
ScheduleKeyDeletionCommand,
|
||||
} from "@aws-sdk/client-kms";
|
||||
import {
|
||||
CreateBucketCommand,
|
||||
DeleteBucketCommand,
|
||||
@@ -23,11 +27,7 @@ import {
|
||||
ListObjectsV2Command,
|
||||
S3Client,
|
||||
} from "@aws-sdk/client-s3";
|
||||
import {
|
||||
CreateKeyCommand,
|
||||
ScheduleKeyDeletionCommand,
|
||||
KMSClient,
|
||||
} from "@aws-sdk/client-kms";
|
||||
import { connect } from "../lancedb";
|
||||
|
||||
// Skip these tests unless the S3_TEST environment variable is set
|
||||
const maybeDescribe = process.env.S3_TEST ? describe : describe.skip;
|
||||
@@ -63,9 +63,10 @@ class S3Bucket {
|
||||
// Delete the bucket if it already exists
|
||||
try {
|
||||
await this.deleteBucket(client, name);
|
||||
} catch (e) {
|
||||
} catch {
|
||||
// It's fine if the bucket doesn't exist
|
||||
}
|
||||
// biome-ignore lint/style/useNamingConvention: we dont control s3's api
|
||||
await client.send(new CreateBucketCommand({ Bucket: name }));
|
||||
return new S3Bucket(name);
|
||||
}
|
||||
@@ -78,27 +79,32 @@ class S3Bucket {
|
||||
static async deleteBucket(client: S3Client, name: string) {
|
||||
// Must delete all objects before we can delete the bucket
|
||||
const objects = await client.send(
|
||||
// biome-ignore lint/style/useNamingConvention: we dont control s3's api
|
||||
new ListObjectsV2Command({ Bucket: name }),
|
||||
);
|
||||
if (objects.Contents) {
|
||||
for (const object of objects.Contents) {
|
||||
await client.send(
|
||||
// biome-ignore lint/style/useNamingConvention: we dont control s3's api
|
||||
new DeleteObjectCommand({ Bucket: name, Key: object.Key }),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// biome-ignore lint/style/useNamingConvention: we dont control s3's api
|
||||
await client.send(new DeleteBucketCommand({ Bucket: name }));
|
||||
}
|
||||
|
||||
public async assertAllEncrypted(path: string, keyId: string) {
|
||||
const client = S3Bucket.s3Client();
|
||||
const objects = await client.send(
|
||||
// biome-ignore lint/style/useNamingConvention: we dont control s3's api
|
||||
new ListObjectsV2Command({ Bucket: this.name, Prefix: path }),
|
||||
);
|
||||
if (objects.Contents) {
|
||||
for (const object of objects.Contents) {
|
||||
const metadata = await client.send(
|
||||
// biome-ignore lint/style/useNamingConvention: we dont control s3's api
|
||||
new HeadObjectCommand({ Bucket: this.name, Key: object.Key }),
|
||||
);
|
||||
expect(metadata.ServerSideEncryption).toBe("aws:kms");
|
||||
@@ -137,6 +143,7 @@ class KmsKey {
|
||||
|
||||
public async delete() {
|
||||
const client = KmsKey.kmsClient();
|
||||
// biome-ignore lint/style/useNamingConvention: we dont control s3's api
|
||||
await client.send(new ScheduleKeyDeletionCommand({ KeyId: this.keyId }));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,18 +16,18 @@ import * as fs from "fs";
|
||||
import * as path from "path";
|
||||
import * as tmp from "tmp";
|
||||
|
||||
import { Table, connect } from "../dist";
|
||||
import {
|
||||
Schema,
|
||||
Field,
|
||||
Float32,
|
||||
Int32,
|
||||
FixedSizeList,
|
||||
Int64,
|
||||
Float32,
|
||||
Float64,
|
||||
Int32,
|
||||
Int64,
|
||||
Schema,
|
||||
} from "apache-arrow";
|
||||
import { makeArrowTable } from "../dist/arrow";
|
||||
import { Index } from "../dist/indices";
|
||||
import { Table, connect } from "../lancedb";
|
||||
import { makeArrowTable } from "../lancedb/arrow";
|
||||
import { Index } from "../lancedb/indices";
|
||||
|
||||
describe("Given a table", () => {
|
||||
let tmpDir: tmp.DirResult;
|
||||
@@ -419,3 +419,31 @@ describe("when dealing with versioning", () => {
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe("when optimizing a dataset", () => {
|
||||
let tmpDir: tmp.DirResult;
|
||||
let table: Table;
|
||||
beforeEach(async () => {
|
||||
tmpDir = tmp.dirSync({ unsafeCleanup: true });
|
||||
const con = await connect(tmpDir.name);
|
||||
table = await con.createTable("vectors", [{ id: 1 }]);
|
||||
await table.add([{ id: 2 }]);
|
||||
});
|
||||
afterEach(() => {
|
||||
tmpDir.removeCallback();
|
||||
});
|
||||
|
||||
it("compacts files", async () => {
|
||||
const stats = await table.optimize();
|
||||
expect(stats.compaction.filesAdded).toBe(1);
|
||||
expect(stats.compaction.filesRemoved).toBe(2);
|
||||
expect(stats.compaction.fragmentsAdded).toBe(1);
|
||||
expect(stats.compaction.fragmentsRemoved).toBe(2);
|
||||
});
|
||||
|
||||
it("cleanups old versions", async () => {
|
||||
const stats = await table.optimize({ cleanupOlderThan: new Date() });
|
||||
expect(stats.prune.bytesRemoved).toBeGreaterThan(0);
|
||||
expect(stats.prune.oldVersionsRemoved).toBe(3);
|
||||
});
|
||||
});
|
||||
|
||||
136
nodejs/biome.json
Normal file
136
nodejs/biome.json
Normal file
@@ -0,0 +1,136 @@
|
||||
{
|
||||
"$schema": "https://biomejs.dev/schemas/1.7.3/schema.json",
|
||||
"organizeImports": {
|
||||
"enabled": true
|
||||
},
|
||||
"files": {
|
||||
"ignore": [
|
||||
"**/dist/**/*",
|
||||
"**/native.js",
|
||||
"**/native.d.ts",
|
||||
"**/npm/**/*",
|
||||
"**/.vscode/**"
|
||||
]
|
||||
},
|
||||
"formatter": {
|
||||
"indentStyle": "space"
|
||||
},
|
||||
"linter": {
|
||||
"enabled": true,
|
||||
"rules": {
|
||||
"recommended": false,
|
||||
"complexity": {
|
||||
"noBannedTypes": "error",
|
||||
"noExtraBooleanCast": "error",
|
||||
"noMultipleSpacesInRegularExpressionLiterals": "error",
|
||||
"noUselessCatch": "error",
|
||||
"noUselessThisAlias": "error",
|
||||
"noUselessTypeConstraint": "error",
|
||||
"noWith": "error"
|
||||
},
|
||||
"correctness": {
|
||||
"noConstAssign": "error",
|
||||
"noConstantCondition": "error",
|
||||
"noEmptyCharacterClassInRegex": "error",
|
||||
"noEmptyPattern": "error",
|
||||
"noGlobalObjectCalls": "error",
|
||||
"noInnerDeclarations": "error",
|
||||
"noInvalidConstructorSuper": "error",
|
||||
"noNewSymbol": "error",
|
||||
"noNonoctalDecimalEscape": "error",
|
||||
"noPrecisionLoss": "error",
|
||||
"noSelfAssign": "error",
|
||||
"noSetterReturn": "error",
|
||||
"noSwitchDeclarations": "error",
|
||||
"noUndeclaredVariables": "error",
|
||||
"noUnreachable": "error",
|
||||
"noUnreachableSuper": "error",
|
||||
"noUnsafeFinally": "error",
|
||||
"noUnsafeOptionalChaining": "error",
|
||||
"noUnusedLabels": "error",
|
||||
"noUnusedVariables": "error",
|
||||
"useIsNan": "error",
|
||||
"useValidForDirection": "error",
|
||||
"useYield": "error"
|
||||
},
|
||||
"style": {
|
||||
"noNamespace": "error",
|
||||
"useAsConstAssertion": "error",
|
||||
"useBlockStatements": "off",
|
||||
"useNamingConvention": {
|
||||
"level": "error",
|
||||
"options": {
|
||||
"strictCase": false
|
||||
}
|
||||
}
|
||||
},
|
||||
"suspicious": {
|
||||
"noAssignInExpressions": "error",
|
||||
"noAsyncPromiseExecutor": "error",
|
||||
"noCatchAssign": "error",
|
||||
"noClassAssign": "error",
|
||||
"noCompareNegZero": "error",
|
||||
"noControlCharactersInRegex": "error",
|
||||
"noDebugger": "error",
|
||||
"noDuplicateCase": "error",
|
||||
"noDuplicateClassMembers": "error",
|
||||
"noDuplicateObjectKeys": "error",
|
||||
"noDuplicateParameters": "error",
|
||||
"noEmptyBlockStatements": "error",
|
||||
"noExplicitAny": "error",
|
||||
"noExtraNonNullAssertion": "error",
|
||||
"noFallthroughSwitchClause": "error",
|
||||
"noFunctionAssign": "error",
|
||||
"noGlobalAssign": "error",
|
||||
"noImportAssign": "error",
|
||||
"noMisleadingCharacterClass": "error",
|
||||
"noMisleadingInstantiator": "error",
|
||||
"noPrototypeBuiltins": "error",
|
||||
"noRedeclare": "error",
|
||||
"noShadowRestrictedNames": "error",
|
||||
"noUnsafeDeclarationMerging": "error",
|
||||
"noUnsafeNegation": "error",
|
||||
"useGetterReturn": "error",
|
||||
"useValidTypeof": "error"
|
||||
}
|
||||
},
|
||||
"ignore": ["**/dist/**/*", "**/native.js", "**/native.d.ts"]
|
||||
},
|
||||
"javascript": {
|
||||
"globals": []
|
||||
},
|
||||
"overrides": [
|
||||
{
|
||||
"include": ["**/*.ts", "**/*.tsx", "**/*.mts", "**/*.cts"],
|
||||
"linter": {
|
||||
"rules": {
|
||||
"correctness": {
|
||||
"noConstAssign": "off",
|
||||
"noGlobalObjectCalls": "off",
|
||||
"noInvalidConstructorSuper": "off",
|
||||
"noNewSymbol": "off",
|
||||
"noSetterReturn": "off",
|
||||
"noUndeclaredVariables": "off",
|
||||
"noUnreachable": "off",
|
||||
"noUnreachableSuper": "off"
|
||||
},
|
||||
"style": {
|
||||
"noArguments": "error",
|
||||
"noVar": "error",
|
||||
"useConst": "error"
|
||||
},
|
||||
"suspicious": {
|
||||
"noDuplicateClassMembers": "off",
|
||||
"noDuplicateObjectKeys": "off",
|
||||
"noDuplicateParameters": "off",
|
||||
"noFunctionAssign": "off",
|
||||
"noImportAssign": "off",
|
||||
"noRedeclare": "off",
|
||||
"noUnsafeNegation": "off",
|
||||
"useGetterReturn": "off"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
/* eslint-disable @typescript-eslint/naming-convention */
|
||||
// @ts-check
|
||||
|
||||
const eslint = require("@eslint/js");
|
||||
const tseslint = require("typescript-eslint");
|
||||
const eslintConfigPrettier = require("eslint-config-prettier");
|
||||
const jsdoc = require("eslint-plugin-jsdoc");
|
||||
|
||||
module.exports = tseslint.config(
|
||||
eslint.configs.recommended,
|
||||
jsdoc.configs["flat/recommended"],
|
||||
eslintConfigPrettier,
|
||||
...tseslint.configs.recommended,
|
||||
{
|
||||
rules: {
|
||||
"@typescript-eslint/naming-convention": "error",
|
||||
"jsdoc/require-returns": "off",
|
||||
"jsdoc/require-param": "off",
|
||||
"jsdoc/require-jsdoc": [
|
||||
"error",
|
||||
{
|
||||
publicOnly: true,
|
||||
},
|
||||
],
|
||||
},
|
||||
plugins: jsdoc,
|
||||
},
|
||||
);
|
||||
@@ -13,25 +13,25 @@
|
||||
// limitations under the License.
|
||||
|
||||
import {
|
||||
Field,
|
||||
makeBuilder,
|
||||
RecordBatchFileWriter,
|
||||
Utf8,
|
||||
type Vector,
|
||||
FixedSizeList,
|
||||
vectorFromArray,
|
||||
type Schema,
|
||||
Table as ArrowTable,
|
||||
RecordBatchStreamWriter,
|
||||
Binary,
|
||||
DataType,
|
||||
Field,
|
||||
FixedSizeList,
|
||||
type Float,
|
||||
Float32,
|
||||
List,
|
||||
RecordBatch,
|
||||
makeData,
|
||||
RecordBatchFileWriter,
|
||||
RecordBatchStreamWriter,
|
||||
Schema,
|
||||
Struct,
|
||||
type Float,
|
||||
DataType,
|
||||
Binary,
|
||||
Float32,
|
||||
Utf8,
|
||||
type Vector,
|
||||
makeBuilder,
|
||||
makeData,
|
||||
type makeTable,
|
||||
vectorFromArray,
|
||||
} from "apache-arrow";
|
||||
import { type EmbeddingFunction } from "./embedding/embedding_function";
|
||||
import { sanitizeSchema } from "./sanitize";
|
||||
@@ -85,6 +85,7 @@ export class MakeArrowTableOptions {
|
||||
vectorColumns: Record<string, VectorColumnOptions> = {
|
||||
vector: new VectorColumnOptions(),
|
||||
};
|
||||
embeddings?: EmbeddingFunction<unknown>;
|
||||
|
||||
/**
|
||||
* If true then string columns will be encoded with dictionary encoding
|
||||
@@ -208,6 +209,7 @@ export function makeArrowTable(
|
||||
const opt = new MakeArrowTableOptions(options !== undefined ? options : {});
|
||||
if (opt.schema !== undefined && opt.schema !== null) {
|
||||
opt.schema = sanitizeSchema(opt.schema);
|
||||
opt.schema = validateSchemaEmbeddings(opt.schema, data, opt.embeddings);
|
||||
}
|
||||
const columns: Record<string, Vector> = {};
|
||||
// TODO: sample dataset to find missing columns
|
||||
@@ -287,8 +289,8 @@ export function makeArrowTable(
|
||||
// then patch the schema of the batches so we can use
|
||||
// `new ArrowTable(schema, batches)` which does not do any schema inference
|
||||
const firstTable = new ArrowTable(columns);
|
||||
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
|
||||
const batchesFixed = firstTable.batches.map(
|
||||
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
|
||||
(batch) => new RecordBatch(opt.schema!, batch.data),
|
||||
);
|
||||
return new ArrowTable(opt.schema, batchesFixed);
|
||||
@@ -313,7 +315,7 @@ function makeListVector(lists: unknown[][]): Vector<unknown> {
|
||||
throw Error("Cannot infer list vector from empty array or empty list");
|
||||
}
|
||||
const sampleList = lists[0];
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
// biome-ignore lint/suspicious/noExplicitAny: skip
|
||||
let inferredType: any;
|
||||
try {
|
||||
const sampleVector = makeVector(sampleList);
|
||||
@@ -337,7 +339,7 @@ function makeVector(
|
||||
values: unknown[],
|
||||
type?: DataType,
|
||||
stringAsDictionary?: boolean,
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
// biome-ignore lint/suspicious/noExplicitAny: skip
|
||||
): Vector<any> {
|
||||
if (type !== undefined) {
|
||||
// No need for inference, let Arrow create it
|
||||
@@ -648,3 +650,39 @@ function alignTable(table: ArrowTable, schema: Schema): ArrowTable {
|
||||
export function createEmptyTable(schema: Schema): ArrowTable {
|
||||
return new ArrowTable(sanitizeSchema(schema));
|
||||
}
|
||||
|
||||
function validateSchemaEmbeddings(
|
||||
schema: Schema,
|
||||
data: Array<Record<string, unknown>>,
|
||||
embeddings: EmbeddingFunction<unknown> | undefined,
|
||||
) {
|
||||
const fields = [];
|
||||
const missingEmbeddingFields = [];
|
||||
|
||||
// First we check if the field is a `FixedSizeList`
|
||||
// Then we check if the data contains the field
|
||||
// if it does not, we add it to the list of missing embedding fields
|
||||
// Finally, we check if those missing embedding fields are `this._embeddings`
|
||||
// if they are not, we throw an error
|
||||
for (const field of schema.fields) {
|
||||
if (field.type instanceof FixedSizeList) {
|
||||
if (data.length !== 0 && data?.[0]?.[field.name] === undefined) {
|
||||
missingEmbeddingFields.push(field);
|
||||
} else {
|
||||
fields.push(field);
|
||||
}
|
||||
} else {
|
||||
fields.push(field);
|
||||
}
|
||||
}
|
||||
|
||||
if (missingEmbeddingFields.length > 0 && embeddings === undefined) {
|
||||
throw new Error(
|
||||
`Table has embeddings: "${missingEmbeddingFields
|
||||
.map((f) => f.name)
|
||||
.join(",")}", but no embedding function was provided`,
|
||||
);
|
||||
}
|
||||
|
||||
return new Schema(fields, schema.metadata);
|
||||
}
|
||||
|
||||
@@ -12,15 +12,15 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import { Table as ArrowTable, Schema } from "apache-arrow";
|
||||
import { fromTableToBuffer, makeArrowTable, makeEmptyTable } from "./arrow";
|
||||
import { ConnectionOptions, Connection as LanceDbConnection } from "./native";
|
||||
import { Table } from "./table";
|
||||
import { Table as ArrowTable, Schema } from "apache-arrow";
|
||||
|
||||
/**
|
||||
* Connect to a LanceDB instance at the given URI.
|
||||
*
|
||||
* Accpeted formats:
|
||||
* Accepted formats:
|
||||
*
|
||||
* - `/path/to/database` - local database
|
||||
* - `s3://bucket/path/to/database` or `gs://bucket/path/to/database` - database on cloud storage
|
||||
@@ -77,6 +77,18 @@ export interface OpenTableOptions {
|
||||
* The available options are described at https://lancedb.github.io/lancedb/guides/storage/
|
||||
*/
|
||||
storageOptions?: Record<string, string>;
|
||||
/**
|
||||
* Set the size of the index cache, specified as a number of entries
|
||||
*
|
||||
* The exact meaning of an "entry" will depend on the type of index:
|
||||
* - IVF: there is one entry for each IVF partition
|
||||
* - BTREE: there is one entry for the entire index
|
||||
*
|
||||
* This cache applies to the entire opened table, across all indices.
|
||||
* Setting this value higher will increase performance on larger datasets
|
||||
* at the expense of more RAM
|
||||
*/
|
||||
indexCacheSize?: number;
|
||||
}
|
||||
|
||||
export interface TableNamesOptions {
|
||||
@@ -160,6 +172,7 @@ export class Connection {
|
||||
const innerTable = await this.inner.openTable(
|
||||
name,
|
||||
cleanseStorageOptions(options?.storageOptions),
|
||||
options?.indexCacheSize,
|
||||
);
|
||||
return new Table(innerTable);
|
||||
}
|
||||
|
||||
@@ -12,8 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import { type EmbeddingFunction } from "./embedding_function";
|
||||
import type OpenAI from "openai";
|
||||
import { type EmbeddingFunction } from "./embedding_function";
|
||||
|
||||
export class OpenAIEmbeddingFunction implements EmbeddingFunction<string> {
|
||||
private readonly _openai: OpenAI;
|
||||
|
||||
@@ -12,14 +12,14 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import { RecordBatch, tableFromIPC, Table as ArrowTable } from "apache-arrow";
|
||||
import { Table as ArrowTable, RecordBatch, tableFromIPC } from "apache-arrow";
|
||||
import { type IvfPqOptions } from "./indices";
|
||||
import {
|
||||
RecordBatchIterator as NativeBatchIterator,
|
||||
Query as NativeQuery,
|
||||
Table as NativeTable,
|
||||
VectorQuery as NativeVectorQuery,
|
||||
} from "./native";
|
||||
import { type IvfPqOptions } from "./indices";
|
||||
export class RecordBatchIterator implements AsyncIterator<RecordBatch> {
|
||||
private promisedInner?: Promise<NativeBatchIterator>;
|
||||
private inner?: NativeBatchIterator;
|
||||
@@ -29,7 +29,7 @@ export class RecordBatchIterator implements AsyncIterator<RecordBatch> {
|
||||
this.promisedInner = promise;
|
||||
}
|
||||
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
// biome-ignore lint/suspicious/noExplicitAny: skip
|
||||
async next(): Promise<IteratorResult<RecordBatch<any>>> {
|
||||
if (this.inner === undefined) {
|
||||
this.inner = await this.promisedInner;
|
||||
@@ -56,7 +56,9 @@ export class QueryBase<
|
||||
QueryType,
|
||||
> implements AsyncIterable<RecordBatch>
|
||||
{
|
||||
protected constructor(protected inner: NativeQueryType) {}
|
||||
protected constructor(protected inner: NativeQueryType) {
|
||||
// intentionally empty
|
||||
}
|
||||
|
||||
/**
|
||||
* A filter statement to be applied to this query.
|
||||
@@ -150,7 +152,7 @@ export class QueryBase<
|
||||
return new RecordBatchIterator(this.nativeExecute());
|
||||
}
|
||||
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
// biome-ignore lint/suspicious/noExplicitAny: skip
|
||||
[Symbol.asyncIterator](): AsyncIterator<RecordBatch<any>> {
|
||||
const promise = this.nativeExecute();
|
||||
return new RecordBatchIterator(promise);
|
||||
@@ -368,7 +370,7 @@ export class Query extends QueryBase<NativeQuery, Query> {
|
||||
* a default `limit` of 10 will be used. @see {@link Query#limit}
|
||||
*/
|
||||
nearestTo(vector: unknown): VectorQuery {
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
// biome-ignore lint/suspicious/noExplicitAny: skip
|
||||
const vectorQuery = this.inner.nearestTo(Float32Array.from(vector as any));
|
||||
return new VectorQuery(vectorQuery);
|
||||
}
|
||||
|
||||
@@ -21,60 +21,60 @@
|
||||
// and so we must sanitize the input to ensure that it is compatible.
|
||||
|
||||
import {
|
||||
Field,
|
||||
Utf8,
|
||||
FixedSizeBinary,
|
||||
FixedSizeList,
|
||||
Schema,
|
||||
List,
|
||||
Struct,
|
||||
Float,
|
||||
Binary,
|
||||
Bool,
|
||||
DataType,
|
||||
DateDay,
|
||||
DateMillisecond,
|
||||
type DateUnit,
|
||||
Date_,
|
||||
Decimal,
|
||||
DataType,
|
||||
DenseUnion,
|
||||
Dictionary,
|
||||
Binary,
|
||||
Float32,
|
||||
Interval,
|
||||
Map_,
|
||||
Duration,
|
||||
Union,
|
||||
Time,
|
||||
Timestamp,
|
||||
Type,
|
||||
Null,
|
||||
DurationMicrosecond,
|
||||
DurationMillisecond,
|
||||
DurationNanosecond,
|
||||
DurationSecond,
|
||||
Field,
|
||||
FixedSizeBinary,
|
||||
FixedSizeList,
|
||||
Float,
|
||||
Float16,
|
||||
Float32,
|
||||
Float64,
|
||||
Int,
|
||||
type Precision,
|
||||
type DateUnit,
|
||||
Int8,
|
||||
Int16,
|
||||
Int32,
|
||||
Int64,
|
||||
Interval,
|
||||
IntervalDayTime,
|
||||
IntervalYearMonth,
|
||||
List,
|
||||
Map_,
|
||||
Null,
|
||||
type Precision,
|
||||
Schema,
|
||||
SparseUnion,
|
||||
Struct,
|
||||
Time,
|
||||
TimeMicrosecond,
|
||||
TimeMillisecond,
|
||||
TimeNanosecond,
|
||||
TimeSecond,
|
||||
Timestamp,
|
||||
TimestampMicrosecond,
|
||||
TimestampMillisecond,
|
||||
TimestampNanosecond,
|
||||
TimestampSecond,
|
||||
Type,
|
||||
Uint8,
|
||||
Uint16,
|
||||
Uint32,
|
||||
Uint64,
|
||||
Float16,
|
||||
Float64,
|
||||
DateDay,
|
||||
DateMillisecond,
|
||||
DenseUnion,
|
||||
SparseUnion,
|
||||
TimeNanosecond,
|
||||
TimeMicrosecond,
|
||||
TimeMillisecond,
|
||||
TimeSecond,
|
||||
TimestampNanosecond,
|
||||
TimestampMicrosecond,
|
||||
TimestampMillisecond,
|
||||
TimestampSecond,
|
||||
IntervalDayTime,
|
||||
IntervalYearMonth,
|
||||
DurationNanosecond,
|
||||
DurationMicrosecond,
|
||||
DurationMillisecond,
|
||||
DurationSecond,
|
||||
Union,
|
||||
Utf8,
|
||||
} from "apache-arrow";
|
||||
import type { IntBitWidth, TKeys, TimeBitWidth } from "apache-arrow/type";
|
||||
|
||||
@@ -228,7 +228,7 @@ function sanitizeUnion(typeLike: object) {
|
||||
|
||||
return new Union(
|
||||
typeLike.mode,
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
// biome-ignore lint/suspicious/noExplicitAny: skip
|
||||
typeLike.typeIds as any,
|
||||
typeLike.children.map((child) => sanitizeField(child)),
|
||||
);
|
||||
@@ -294,7 +294,7 @@ function sanitizeMap(typeLike: object) {
|
||||
}
|
||||
|
||||
return new Map_(
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
// biome-ignore lint/suspicious/noExplicitAny: skip
|
||||
typeLike.children.map((field) => sanitizeField(field)) as any,
|
||||
typeLike.keysSorted,
|
||||
);
|
||||
@@ -328,7 +328,7 @@ function sanitizeDictionary(typeLike: object) {
|
||||
);
|
||||
}
|
||||
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
// biome-ignore lint/suspicious/noExplicitAny: skip
|
||||
function sanitizeType(typeLike: unknown): DataType<any> {
|
||||
if (typeof typeLike !== "object" || typeLike === null) {
|
||||
throw Error("Expected a Type but object was null/undefined");
|
||||
|
||||
@@ -13,15 +13,16 @@
|
||||
// limitations under the License.
|
||||
|
||||
import { Schema, tableFromIPC } from "apache-arrow";
|
||||
import { Data, fromDataToBuffer } from "./arrow";
|
||||
import { IndexOptions } from "./indices";
|
||||
import {
|
||||
AddColumnsSql,
|
||||
ColumnAlteration,
|
||||
IndexConfig,
|
||||
OptimizeStats,
|
||||
Table as _NativeTable,
|
||||
} from "./native";
|
||||
import { Query, VectorQuery } from "./query";
|
||||
import { IndexOptions } from "./indices";
|
||||
import { Data, fromDataToBuffer } from "./arrow";
|
||||
|
||||
export { IndexConfig } from "./native";
|
||||
/**
|
||||
@@ -50,6 +51,23 @@ export interface UpdateOptions {
|
||||
where: string;
|
||||
}
|
||||
|
||||
export interface OptimizeOptions {
|
||||
/**
|
||||
* If set then all versions older than the given date
|
||||
* be removed. The current version will never be removed.
|
||||
* The default is 7 days
|
||||
* @example
|
||||
* // Delete all versions older than 1 day
|
||||
* const olderThan = new Date();
|
||||
* olderThan.setDate(olderThan.getDate() - 1));
|
||||
* tbl.cleanupOlderVersions(olderThan);
|
||||
*
|
||||
* // Delete all versions except the current version
|
||||
* tbl.cleanupOlderVersions(new Date());
|
||||
*/
|
||||
cleanupOlderThan: Date;
|
||||
}
|
||||
|
||||
/**
|
||||
* A Table is a collection of Records in a LanceDB Database.
|
||||
*
|
||||
@@ -169,21 +187,24 @@ export class Table {
|
||||
* // If the column has a vector (fixed size list) data type then
|
||||
* // an IvfPq vector index will be created.
|
||||
* const table = await conn.openTable("my_table");
|
||||
* await table.createIndex(["vector"]);
|
||||
* await table.createIndex("vector");
|
||||
* @example
|
||||
* // For advanced control over vector index creation you can specify
|
||||
* // the index type and options.
|
||||
* const table = await conn.openTable("my_table");
|
||||
* await table.createIndex(["vector"], I)
|
||||
* .ivf_pq({ num_partitions: 128, num_sub_vectors: 16 })
|
||||
* .build();
|
||||
* await table.createIndex("vector", {
|
||||
* config: lancedb.Index.ivfPq({
|
||||
* numPartitions: 128,
|
||||
* numSubVectors: 16,
|
||||
* }),
|
||||
* });
|
||||
* @example
|
||||
* // Or create a Scalar index
|
||||
* await table.createIndex("my_float_col").build();
|
||||
* await table.createIndex("my_float_col");
|
||||
*/
|
||||
async createIndex(column: string, options?: Partial<IndexOptions>) {
|
||||
// Bit of a hack to get around the fact that TS has no package-scope.
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
// biome-ignore lint/suspicious/noExplicitAny: skip
|
||||
const nativeIndex = (options?.config as any)?.inner;
|
||||
await this.inner.createIndex(nativeIndex, column, options?.replace);
|
||||
}
|
||||
@@ -197,8 +218,7 @@ export class Table {
|
||||
* vector similarity, sorting, and more.
|
||||
*
|
||||
* Note: By default, all columns are returned. For best performance, you should
|
||||
* only fetch the columns you need. See [`Query::select_with_projection`] for
|
||||
* more details.
|
||||
* only fetch the columns you need.
|
||||
*
|
||||
* When appropriate, various indices and statistics based pruning will be used to
|
||||
* accelerate the query.
|
||||
@@ -206,10 +226,13 @@ export class Table {
|
||||
* // SQL-style filtering
|
||||
* //
|
||||
* // This query will return up to 1000 rows whose value in the `id` column
|
||||
* // is greater than 5. LanceDb supports a broad set of filtering functions.
|
||||
* for await (const batch of table.query()
|
||||
* .filter("id > 1").select(["id"]).limit(20)) {
|
||||
* console.log(batch);
|
||||
* // is greater than 5. LanceDb supports a broad set of filtering functions.
|
||||
* for await (const batch of table
|
||||
* .query()
|
||||
* .where("id > 1")
|
||||
* .select(["id"])
|
||||
* .limit(20)) {
|
||||
* console.log(batch);
|
||||
* }
|
||||
* @example
|
||||
* // Vector Similarity Search
|
||||
@@ -218,13 +241,14 @@ export class Table {
|
||||
* // closest to the query vector [1.0, 2.0, 3.0]. If an index has been created
|
||||
* // on the "vector" column then this will perform an ANN search.
|
||||
* //
|
||||
* // The `refine_factor` and `nprobes` methods are used to control the recall /
|
||||
* // The `refineFactor` and `nprobes` methods are used to control the recall /
|
||||
* // latency tradeoff of the search.
|
||||
* for await (const batch of table.query()
|
||||
* .nearestTo([1, 2, 3])
|
||||
* .refineFactor(5).nprobe(10)
|
||||
* .limit(10)) {
|
||||
* console.log(batch);
|
||||
* for await (const batch of table
|
||||
* .query()
|
||||
* .where("id > 1")
|
||||
* .select(["id"])
|
||||
* .limit(20)) {
|
||||
* console.log(batch);
|
||||
* }
|
||||
* @example
|
||||
* // Scan the full dataset
|
||||
@@ -286,43 +310,45 @@ export class Table {
|
||||
await this.inner.dropColumns(columnNames);
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve the version of the table
|
||||
*
|
||||
* LanceDb supports versioning. Every operation that modifies the table increases
|
||||
* version. As long as a version hasn't been deleted you can `[Self::checkout]` that
|
||||
* version to view the data at that point. In addition, you can `[Self::restore]` the
|
||||
* version to replace the current table with a previous version.
|
||||
*/
|
||||
/** Retrieve the version of the table */
|
||||
async version(): Promise<number> {
|
||||
return await this.inner.version();
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks out a specific version of the Table
|
||||
* Checks out a specific version of the table _This is an in-place operation._
|
||||
*
|
||||
* Any read operation on the table will now access the data at the checked out version.
|
||||
* As a consequence, calling this method will disable any read consistency interval
|
||||
* that was previously set.
|
||||
* This allows viewing previous versions of the table. If you wish to
|
||||
* keep writing to the dataset starting from an old version, then use
|
||||
* the `restore` function.
|
||||
*
|
||||
* This is a read-only operation that turns the table into a sort of "view"
|
||||
* or "detached head". Other table instances will not be affected. To make the change
|
||||
* permanent you can use the `[Self::restore]` method.
|
||||
* Calling this method will set the table into time-travel mode. If you
|
||||
* wish to return to standard mode, call `checkoutLatest`.
|
||||
* @param {number} version The version to checkout
|
||||
* @example
|
||||
* ```typescript
|
||||
* import * as lancedb from "@lancedb/lancedb"
|
||||
* const db = await lancedb.connect("./.lancedb");
|
||||
* const table = await db.createTable("my_table", [
|
||||
* { vector: [1.1, 0.9], type: "vector" },
|
||||
* ]);
|
||||
*
|
||||
* Any operation that modifies the table will fail while the table is in a checked
|
||||
* out state.
|
||||
*
|
||||
* To return the table to a normal state use `[Self::checkout_latest]`
|
||||
* console.log(await table.version()); // 1
|
||||
* console.log(table.display());
|
||||
* await table.add([{ vector: [0.5, 0.2], type: "vector" }]);
|
||||
* await table.checkout(1);
|
||||
* console.log(await table.version()); // 2
|
||||
* ```
|
||||
*/
|
||||
async checkout(version: number): Promise<void> {
|
||||
await this.inner.checkout(version);
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensures the table is pointing at the latest version
|
||||
* Checkout the latest version of the table. _This is an in-place operation._
|
||||
*
|
||||
* This can be used to manually update a table when the read_consistency_interval is None
|
||||
* It can also be used to undo a `[Self::checkout]` operation
|
||||
* The table will be set back into standard mode, and will track the latest
|
||||
* version of the table.
|
||||
*/
|
||||
async checkoutLatest(): Promise<void> {
|
||||
await this.inner.checkoutLatest();
|
||||
@@ -345,8 +371,48 @@ export class Table {
|
||||
}
|
||||
|
||||
/**
|
||||
* List all indices that have been created with Self::create_index
|
||||
* Optimize the on-disk data and indices for better performance.
|
||||
*
|
||||
* Modeled after ``VACUUM`` in PostgreSQL.
|
||||
*
|
||||
* Optimization covers three operations:
|
||||
*
|
||||
* - Compaction: Merges small files into larger ones
|
||||
* - Prune: Removes old versions of the dataset
|
||||
* - Index: Optimizes the indices, adding new data to existing indices
|
||||
*
|
||||
*
|
||||
* Experimental API
|
||||
* ----------------
|
||||
*
|
||||
* The optimization process is undergoing active development and may change.
|
||||
* Our goal with these changes is to improve the performance of optimization and
|
||||
* reduce the complexity.
|
||||
*
|
||||
* That being said, it is essential today to run optimize if you want the best
|
||||
* performance. It should be stable and safe to use in production, but it our
|
||||
* hope that the API may be simplified (or not even need to be called) in the
|
||||
* future.
|
||||
*
|
||||
* The frequency an application shoudl call optimize is based on the frequency of
|
||||
* data modifications. If data is frequently added, deleted, or updated then
|
||||
* optimize should be run frequently. A good rule of thumb is to run optimize if
|
||||
* you have added or modified 100,000 or more records or run more than 20 data
|
||||
* modification operations.
|
||||
*/
|
||||
async optimize(options?: Partial<OptimizeOptions>): Promise<OptimizeStats> {
|
||||
let cleanupOlderThanMs;
|
||||
if (
|
||||
options?.cleanupOlderThan !== undefined &&
|
||||
options?.cleanupOlderThan !== null
|
||||
) {
|
||||
cleanupOlderThanMs =
|
||||
new Date().getTime() - options.cleanupOlderThan.getTime();
|
||||
}
|
||||
return await this.inner.optimize(cleanupOlderThanMs);
|
||||
}
|
||||
|
||||
/** List all indices that have been created with {@link Table.createIndex} */
|
||||
async listIndices(): Promise<IndexConfig[]> {
|
||||
return await this.inner.listIndices();
|
||||
}
|
||||
|
||||
@@ -1,18 +1,12 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-darwin-arm64",
|
||||
"version": "0.4.17",
|
||||
"os": [
|
||||
"darwin"
|
||||
],
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
"main": "lancedb.darwin-arm64.node",
|
||||
"files": [
|
||||
"lancedb.darwin-arm64.node"
|
||||
],
|
||||
"license": "Apache 2.0",
|
||||
"engines": {
|
||||
"node": ">= 18"
|
||||
}
|
||||
"name": "@lancedb/lancedb-darwin-arm64",
|
||||
"version": "0.4.20",
|
||||
"os": ["darwin"],
|
||||
"cpu": ["arm64"],
|
||||
"main": "lancedb.darwin-arm64.node",
|
||||
"files": ["lancedb.darwin-arm64.node"],
|
||||
"license": "Apache 2.0",
|
||||
"engines": {
|
||||
"node": ">= 18"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,18 +1,12 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-darwin-x64",
|
||||
"version": "0.4.17",
|
||||
"os": [
|
||||
"darwin"
|
||||
],
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
"main": "lancedb.darwin-x64.node",
|
||||
"files": [
|
||||
"lancedb.darwin-x64.node"
|
||||
],
|
||||
"license": "Apache 2.0",
|
||||
"engines": {
|
||||
"node": ">= 18"
|
||||
}
|
||||
"name": "@lancedb/lancedb-darwin-x64",
|
||||
"version": "0.4.20",
|
||||
"os": ["darwin"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.darwin-x64.node",
|
||||
"files": ["lancedb.darwin-x64.node"],
|
||||
"license": "Apache 2.0",
|
||||
"engines": {
|
||||
"node": ">= 18"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,21 +1,13 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-arm64-gnu",
|
||||
"version": "0.4.17",
|
||||
"os": [
|
||||
"linux"
|
||||
],
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
"main": "lancedb.linux-arm64-gnu.node",
|
||||
"files": [
|
||||
"lancedb.linux-arm64-gnu.node"
|
||||
],
|
||||
"license": "Apache 2.0",
|
||||
"engines": {
|
||||
"node": ">= 18"
|
||||
},
|
||||
"libc": [
|
||||
"glibc"
|
||||
]
|
||||
"name": "@lancedb/lancedb-linux-arm64-gnu",
|
||||
"version": "0.4.20",
|
||||
"os": ["linux"],
|
||||
"cpu": ["arm64"],
|
||||
"main": "lancedb.linux-arm64-gnu.node",
|
||||
"files": ["lancedb.linux-arm64-gnu.node"],
|
||||
"license": "Apache 2.0",
|
||||
"engines": {
|
||||
"node": ">= 18"
|
||||
},
|
||||
"libc": ["glibc"]
|
||||
}
|
||||
|
||||
@@ -1,21 +1,13 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-linux-x64-gnu",
|
||||
"version": "0.4.17",
|
||||
"os": [
|
||||
"linux"
|
||||
],
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
"main": "lancedb.linux-x64-gnu.node",
|
||||
"files": [
|
||||
"lancedb.linux-x64-gnu.node"
|
||||
],
|
||||
"license": "Apache 2.0",
|
||||
"engines": {
|
||||
"node": ">= 18"
|
||||
},
|
||||
"libc": [
|
||||
"glibc"
|
||||
]
|
||||
"name": "@lancedb/lancedb-linux-x64-gnu",
|
||||
"version": "0.4.20",
|
||||
"os": ["linux"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.linux-x64-gnu.node",
|
||||
"files": ["lancedb.linux-x64-gnu.node"],
|
||||
"license": "Apache 2.0",
|
||||
"engines": {
|
||||
"node": ">= 18"
|
||||
},
|
||||
"libc": ["glibc"]
|
||||
}
|
||||
|
||||
@@ -1,18 +1,12 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb-win32-x64-msvc",
|
||||
"version": "0.4.14",
|
||||
"os": [
|
||||
"win32"
|
||||
],
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
"main": "lancedb.win32-x64-msvc.node",
|
||||
"files": [
|
||||
"lancedb.win32-x64-msvc.node"
|
||||
],
|
||||
"license": "Apache 2.0",
|
||||
"engines": {
|
||||
"node": ">= 18"
|
||||
}
|
||||
"name": "@lancedb/lancedb-win32-x64-msvc",
|
||||
"version": "0.4.20",
|
||||
"os": ["win32"],
|
||||
"cpu": ["x64"],
|
||||
"main": "lancedb.win32-x64-msvc.node",
|
||||
"files": ["lancedb.win32-x64-msvc.node"],
|
||||
"license": "Apache 2.0",
|
||||
"engines": {
|
||||
"node": ">= 18"
|
||||
}
|
||||
}
|
||||
|
||||
15661
nodejs/package-lock.json
generated
15661
nodejs/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@lancedb/lancedb",
|
||||
"version": "0.4.17",
|
||||
"version": "0.4.20",
|
||||
"main": "./dist/index.js",
|
||||
"types": "./dist/index.d.ts",
|
||||
"napi": {
|
||||
@@ -18,19 +18,16 @@
|
||||
},
|
||||
"license": "Apache 2.0",
|
||||
"devDependencies": {
|
||||
"@aws-sdk/client-s3": "^3.33.0",
|
||||
"@aws-sdk/client-kms": "^3.33.0",
|
||||
"@aws-sdk/client-s3": "^3.33.0",
|
||||
"@biomejs/biome": "^1.7.3",
|
||||
"@jest/globals": "^29.7.0",
|
||||
"@napi-rs/cli": "^2.18.0",
|
||||
"@types/jest": "^29.1.2",
|
||||
"@types/tmp": "^0.2.6",
|
||||
"@typescript-eslint/eslint-plugin": "^6.19.0",
|
||||
"@typescript-eslint/parser": "^6.19.0",
|
||||
"apache-arrow-old": "npm:apache-arrow@13.0.0",
|
||||
"eslint": "^8.57.0",
|
||||
"eslint-config-prettier": "^9.1.0",
|
||||
"eslint-plugin-jsdoc": "^48.2.1",
|
||||
"jest": "^29.7.0",
|
||||
"prettier": "^3.1.0",
|
||||
"shx": "^0.3.4",
|
||||
"tmp": "^0.2.3",
|
||||
"ts-jest": "^29.1.2",
|
||||
@@ -45,39 +42,26 @@
|
||||
"engines": {
|
||||
"node": ">= 18"
|
||||
},
|
||||
"cpu": [
|
||||
"x64",
|
||||
"arm64"
|
||||
],
|
||||
"os": [
|
||||
"darwin",
|
||||
"linux",
|
||||
"win32"
|
||||
],
|
||||
"cpu": ["x64", "arm64"],
|
||||
"os": ["darwin", "linux", "win32"],
|
||||
"scripts": {
|
||||
"artifacts": "napi artifacts",
|
||||
"build:debug": "napi build --platform --dts ../lancedb/native.d.ts --js ../lancedb/native.js dist/",
|
||||
"build:debug": "napi build --platform --dts ../lancedb/native.d.ts --js ../lancedb/native.js lancedb",
|
||||
"build:release": "napi build --platform --release --dts ../lancedb/native.d.ts --js ../lancedb/native.js dist/",
|
||||
"build": "npm run build:debug && tsc -b && shx cp lancedb/native.d.ts dist/native.d.ts",
|
||||
"build": "npm run build:debug && tsc -b && shx cp lancedb/native.d.ts dist/native.d.ts && shx cp lancedb/*.node dist/",
|
||||
"build-release": "npm run build:release && tsc -b && shx cp lancedb/native.d.ts dist/native.d.ts",
|
||||
"chkformat": "prettier . --check",
|
||||
"lint-ci": "biome ci .",
|
||||
"docs": "typedoc --plugin typedoc-plugin-markdown --out ../docs/src/js lancedb/index.ts",
|
||||
"lint": "eslint lancedb && eslint __test__",
|
||||
"lint": "biome check . && biome format .",
|
||||
"lint-fix": "biome check --apply-unsafe . && biome format --write .",
|
||||
"prepublishOnly": "napi prepublish -t npm",
|
||||
"test": "npm run build && jest --verbose",
|
||||
"test": "jest --verbose",
|
||||
"integration": "S3_TEST=1 npm run test",
|
||||
"universal": "napi universal",
|
||||
"version": "napi version"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"@lancedb/lancedb-darwin-arm64": "0.4.17",
|
||||
"@lancedb/lancedb-darwin-x64": "0.4.17",
|
||||
"@lancedb/lancedb-linux-arm64-gnu": "0.4.17",
|
||||
"@lancedb/lancedb-linux-x64-gnu": "0.4.17",
|
||||
"@lancedb/lancedb-win32-x64-msvc": "0.4.17"
|
||||
},
|
||||
"dependencies": {
|
||||
"openai": "^4.29.2",
|
||||
"apache-arrow": "^15.0.0"
|
||||
"apache-arrow": "^15.0.0",
|
||||
"openai": "^4.29.2"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -176,6 +176,7 @@ impl Connection {
|
||||
&self,
|
||||
name: String,
|
||||
storage_options: Option<HashMap<String, String>>,
|
||||
index_cache_size: Option<u32>,
|
||||
) -> napi::Result<Table> {
|
||||
let mut builder = self.get_inner()?.open_table(&name);
|
||||
if let Some(storage_options) = storage_options {
|
||||
@@ -183,6 +184,9 @@ impl Connection {
|
||||
builder = builder.storage_option(key, value);
|
||||
}
|
||||
}
|
||||
if let Some(index_cache_size) = index_cache_size {
|
||||
builder = builder.index_cache_size(index_cache_size);
|
||||
}
|
||||
let tbl = builder
|
||||
.execute()
|
||||
.await
|
||||
|
||||
@@ -15,8 +15,8 @@
|
||||
use arrow_ipc::writer::FileWriter;
|
||||
use lancedb::ipc::ipc_file_to_batches;
|
||||
use lancedb::table::{
|
||||
AddDataMode, ColumnAlteration as LanceColumnAlteration, NewColumnTransform,
|
||||
Table as LanceDbTable,
|
||||
AddDataMode, ColumnAlteration as LanceColumnAlteration, Duration, NewColumnTransform,
|
||||
OptimizeAction, OptimizeOptions, Table as LanceDbTable,
|
||||
};
|
||||
use napi::bindgen_prelude::*;
|
||||
use napi_derive::napi;
|
||||
@@ -263,6 +263,60 @@ impl Table {
|
||||
self.inner_ref()?.restore().await.default_error()
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async fn optimize(&self, older_than_ms: Option<i64>) -> napi::Result<OptimizeStats> {
|
||||
let inner = self.inner_ref()?;
|
||||
|
||||
let older_than = if let Some(ms) = older_than_ms {
|
||||
if ms == i64::MIN {
|
||||
return Err(napi::Error::from_reason(format!(
|
||||
"older_than_ms can not be {}",
|
||||
i32::MIN,
|
||||
)));
|
||||
}
|
||||
Duration::try_milliseconds(ms)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let compaction_stats = inner
|
||||
.optimize(OptimizeAction::Compact {
|
||||
options: lancedb::table::CompactionOptions::default(),
|
||||
remap_options: None,
|
||||
})
|
||||
.await
|
||||
.default_error()?
|
||||
.compaction
|
||||
.unwrap();
|
||||
let prune_stats = inner
|
||||
.optimize(OptimizeAction::Prune {
|
||||
older_than,
|
||||
delete_unverified: None,
|
||||
})
|
||||
.await
|
||||
.default_error()?
|
||||
.prune
|
||||
.unwrap();
|
||||
inner
|
||||
.optimize(lancedb::table::OptimizeAction::Index(
|
||||
OptimizeOptions::default(),
|
||||
))
|
||||
.await
|
||||
.default_error()?;
|
||||
Ok(OptimizeStats {
|
||||
compaction: CompactionStats {
|
||||
files_added: compaction_stats.files_added as i64,
|
||||
files_removed: compaction_stats.files_removed as i64,
|
||||
fragments_added: compaction_stats.fragments_added as i64,
|
||||
fragments_removed: compaction_stats.fragments_removed as i64,
|
||||
},
|
||||
prune: RemovalStats {
|
||||
bytes_removed: prune_stats.bytes_removed as i64,
|
||||
old_versions_removed: prune_stats.old_versions as i64,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
#[napi]
|
||||
pub async fn list_indices(&self) -> napi::Result<Vec<IndexConfig>> {
|
||||
Ok(self
|
||||
@@ -298,6 +352,40 @@ impl From<lancedb::index::IndexConfig> for IndexConfig {
|
||||
}
|
||||
}
|
||||
|
||||
/// Statistics about a compaction operation.
|
||||
#[napi(object)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct CompactionStats {
|
||||
/// The number of fragments removed
|
||||
pub fragments_removed: i64,
|
||||
/// The number of new, compacted fragments added
|
||||
pub fragments_added: i64,
|
||||
/// The number of data files removed
|
||||
pub files_removed: i64,
|
||||
/// The number of new, compacted data files added
|
||||
pub files_added: i64,
|
||||
}
|
||||
|
||||
/// Statistics about a cleanup operation
|
||||
#[napi(object)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct RemovalStats {
|
||||
/// The number of bytes removed
|
||||
pub bytes_removed: i64,
|
||||
/// The number of old versions removed
|
||||
pub old_versions_removed: i64,
|
||||
}
|
||||
|
||||
/// Statistics about an optimize operation
|
||||
#[napi(object)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct OptimizeStats {
|
||||
/// Statistics about the compaction operation
|
||||
pub compaction: CompactionStats,
|
||||
/// Statistics about the removal operation
|
||||
pub prune: RemovalStats,
|
||||
}
|
||||
|
||||
/// A definition of a column alteration. The alteration changes the column at
|
||||
/// `path` to have the new name `name`, to be nullable if `nullable` is true,
|
||||
/// and to have the data type `data_type`. At least one of `rename` or `nullable`
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
[bumpversion]
|
||||
current_version = 0.6.7
|
||||
commit = True
|
||||
message = [python] Bump version: {current_version} → {new_version}
|
||||
tag = True
|
||||
tag_name = python-v{new_version}
|
||||
|
||||
[bumpversion:file:pyproject.toml]
|
||||
34
python/.bumpversion.toml
Normal file
34
python/.bumpversion.toml
Normal file
@@ -0,0 +1,34 @@
|
||||
[tool.bumpversion]
|
||||
current_version = "0.7.0"
|
||||
parse = """(?x)
|
||||
(?P<major>0|[1-9]\\d*)\\.
|
||||
(?P<minor>0|[1-9]\\d*)\\.
|
||||
(?P<patch>0|[1-9]\\d*)
|
||||
(?:-(?P<pre_l>[a-zA-Z-]+)\\.(?P<pre_n>0|[1-9]\\d*))?
|
||||
"""
|
||||
serialize = [
|
||||
"{major}.{minor}.{patch}-{pre_l}.{pre_n}",
|
||||
"{major}.{minor}.{patch}",
|
||||
]
|
||||
search = "{current_version}"
|
||||
replace = "{new_version}"
|
||||
regex = false
|
||||
ignore_missing_version = false
|
||||
ignore_missing_files = false
|
||||
tag = true
|
||||
sign_tags = false
|
||||
tag_name = "python-v{new_version}"
|
||||
tag_message = "Bump version: {current_version} → {new_version}"
|
||||
allow_dirty = true
|
||||
commit = true
|
||||
message = "Bump version: {current_version} → {new_version}"
|
||||
commit_args = ""
|
||||
|
||||
[tool.bumpversion.parts.pre_l]
|
||||
values = ["beta", "final"]
|
||||
optional_value = "final"
|
||||
|
||||
[[tool.bumpversion.files]]
|
||||
filename = "Cargo.toml"
|
||||
search = "\nversion = \"{current_version}\""
|
||||
replace = "\nversion = \"{new_version}\""
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "lancedb-python"
|
||||
version = "0.4.10"
|
||||
version = "0.7.0"
|
||||
edition.workspace = true
|
||||
description = "Python bindings for LanceDB"
|
||||
license.workspace = true
|
||||
@@ -14,7 +14,7 @@ name = "_lancedb"
|
||||
crate-type = ["cdylib"]
|
||||
|
||||
[dependencies]
|
||||
arrow = { version = "50.0.0", features = ["pyarrow"] }
|
||||
arrow = { version = "51.0.0", features = ["pyarrow"] }
|
||||
lancedb = { path = "../rust/lancedb" }
|
||||
env_logger = "0.10"
|
||||
pyo3 = { version = "0.20", features = ["extension-module", "abi3-py38"] }
|
||||
|
||||
@@ -1,16 +1,16 @@
|
||||
[project]
|
||||
name = "lancedb"
|
||||
version = "0.6.7"
|
||||
# version in Cargo.toml
|
||||
dependencies = [
|
||||
"deprecation",
|
||||
"pylance==0.10.10",
|
||||
"pylance==0.11.0",
|
||||
"ratelimiter~=1.0",
|
||||
"requests>=2.31.0",
|
||||
"retry>=0.9.2",
|
||||
"tqdm>=4.27.0",
|
||||
"pydantic>=1.10",
|
||||
"attrs>=21.3.0",
|
||||
"semver>=3.0",
|
||||
"semver",
|
||||
"cachetools",
|
||||
"overrides>=0.7",
|
||||
]
|
||||
@@ -65,7 +65,6 @@ docs = [
|
||||
"mkdocs-jupyter",
|
||||
"mkdocs-material",
|
||||
"mkdocstrings[python]",
|
||||
"mkdocs-ultralytics-plugin==0.0.44",
|
||||
]
|
||||
clip = ["torch", "pillow", "open-clip"]
|
||||
embeddings = [
|
||||
@@ -81,6 +80,7 @@ embeddings = [
|
||||
"boto3>=1.28.57",
|
||||
"awscli>=1.29.57",
|
||||
"botocore>=1.31.57",
|
||||
"ollama",
|
||||
]
|
||||
azure = ["adlfs>=2024.2.0"]
|
||||
|
||||
|
||||
@@ -83,7 +83,7 @@ def connect(
|
||||
|
||||
>>> db = lancedb.connect("s3://my-bucket/lancedb")
|
||||
|
||||
Connect to LancdDB cloud:
|
||||
Connect to LanceDB cloud:
|
||||
|
||||
>>> db = lancedb.connect("db://my_database", api_key="ldb_...")
|
||||
|
||||
@@ -107,6 +107,9 @@ def connect(
|
||||
request_thread_pool=request_thread_pool,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
if kwargs:
|
||||
raise ValueError(f"Unknown keyword arguments: {kwargs}")
|
||||
return LanceDBConnection(uri, read_consistency_interval=read_consistency_interval)
|
||||
|
||||
|
||||
|
||||
@@ -86,3 +86,17 @@ class VectorQuery:
|
||||
def refine_factor(self, refine_factor: int): ...
|
||||
def nprobes(self, nprobes: int): ...
|
||||
def bypass_vector_index(self): ...
|
||||
|
||||
class CompactionStats:
|
||||
fragments_removed: int
|
||||
fragments_added: int
|
||||
files_removed: int
|
||||
files_added: int
|
||||
|
||||
class RemovalStats:
|
||||
bytes_removed: int
|
||||
old_versions_removed: int
|
||||
|
||||
class OptimizeStats:
|
||||
compaction: CompactionStats
|
||||
prune: RemovalStats
|
||||
|
||||
@@ -224,13 +224,23 @@ class DBConnection(EnforceOverrides):
|
||||
def __getitem__(self, name: str) -> LanceTable:
|
||||
return self.open_table(name)
|
||||
|
||||
def open_table(self, name: str) -> Table:
|
||||
def open_table(self, name: str, *, index_cache_size: Optional[int] = None) -> Table:
|
||||
"""Open a Lance Table in the database.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
name: str
|
||||
The name of the table.
|
||||
index_cache_size: int, default 256
|
||||
Set the size of the index cache, specified as a number of entries
|
||||
|
||||
The exact meaning of an "entry" will depend on the type of index:
|
||||
* IVF - there is one entry for each IVF partition
|
||||
* BTREE - there is one entry for the entire index
|
||||
|
||||
This cache applies to the entire opened table, across all indices.
|
||||
Setting this value higher will increase performance on larger datasets
|
||||
at the expense of more RAM
|
||||
|
||||
Returns
|
||||
-------
|
||||
@@ -248,6 +258,18 @@ class DBConnection(EnforceOverrides):
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def rename_table(self, cur_name: str, new_name: str):
|
||||
"""Rename a table in the database.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
cur_name: str
|
||||
The current name of the table.
|
||||
new_name: str
|
||||
The new name of the table.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def drop_database(self):
|
||||
"""
|
||||
Drop database
|
||||
@@ -407,7 +429,9 @@ class LanceDBConnection(DBConnection):
|
||||
return tbl
|
||||
|
||||
@override
|
||||
def open_table(self, name: str) -> LanceTable:
|
||||
def open_table(
|
||||
self, name: str, *, index_cache_size: Optional[int] = None
|
||||
) -> LanceTable:
|
||||
"""Open a table in the database.
|
||||
|
||||
Parameters
|
||||
@@ -419,7 +443,7 @@ class LanceDBConnection(DBConnection):
|
||||
-------
|
||||
A LanceTable object representing the table.
|
||||
"""
|
||||
return LanceTable.open(self, name)
|
||||
return LanceTable.open(self, name, index_cache_size=index_cache_size)
|
||||
|
||||
@override
|
||||
def drop_table(self, name: str, ignore_missing: bool = False):
|
||||
@@ -751,7 +775,10 @@ class AsyncConnection(object):
|
||||
return AsyncTable(new_table)
|
||||
|
||||
async def open_table(
|
||||
self, name: str, storage_options: Optional[Dict[str, str]] = None
|
||||
self,
|
||||
name: str,
|
||||
storage_options: Optional[Dict[str, str]] = None,
|
||||
index_cache_size: Optional[int] = None,
|
||||
) -> Table:
|
||||
"""Open a Lance Table in the database.
|
||||
|
||||
@@ -764,12 +791,22 @@ class AsyncConnection(object):
|
||||
connection will be inherited by the table, but can be overridden here.
|
||||
See available options at
|
||||
https://lancedb.github.io/lancedb/guides/storage/
|
||||
index_cache_size: int, default 256
|
||||
Set the size of the index cache, specified as a number of entries
|
||||
|
||||
The exact meaning of an "entry" will depend on the type of index:
|
||||
* IVF - there is one entry for each IVF partition
|
||||
* BTREE - there is one entry for the entire index
|
||||
|
||||
This cache applies to the entire opened table, across all indices.
|
||||
Setting this value higher will increase performance on larger datasets
|
||||
at the expense of more RAM
|
||||
|
||||
Returns
|
||||
-------
|
||||
A LanceTable object representing the table.
|
||||
"""
|
||||
table = await self._inner.open_table(name, storage_options)
|
||||
table = await self._inner.open_table(name, storage_options, index_cache_size)
|
||||
return AsyncTable(table)
|
||||
|
||||
async def drop_table(self, name: str):
|
||||
|
||||
@@ -16,6 +16,7 @@ from .bedrock import BedRockText
|
||||
from .cohere import CohereEmbeddingFunction
|
||||
from .gemini_text import GeminiText
|
||||
from .instructor import InstructorEmbeddingFunction
|
||||
from .ollama import OllamaEmbeddings
|
||||
from .open_clip import OpenClipEmbeddings
|
||||
from .openai import OpenAIEmbeddings
|
||||
from .registry import EmbeddingFunctionRegistry, get_registry
|
||||
|
||||
69
python/python/lancedb/embeddings/ollama.py
Normal file
69
python/python/lancedb/embeddings/ollama.py
Normal file
@@ -0,0 +1,69 @@
|
||||
# Copyright (c) 2023. LanceDB Developers
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from functools import cached_property
|
||||
from typing import TYPE_CHECKING, List, Optional, Union
|
||||
|
||||
from ..util import attempt_import_or_raise
|
||||
from .base import TextEmbeddingFunction
|
||||
from .registry import register
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import numpy as np
|
||||
|
||||
|
||||
@register("ollama")
|
||||
class OllamaEmbeddings(TextEmbeddingFunction):
|
||||
"""
|
||||
An embedding function that uses Ollama
|
||||
|
||||
https://github.com/ollama/ollama/blob/main/docs/api.md#generate-embeddings
|
||||
https://ollama.com/blog/embedding-models
|
||||
"""
|
||||
|
||||
name: str = "nomic-embed-text"
|
||||
host: str = "http://localhost:11434"
|
||||
options: Optional[dict] = None # type = ollama.Options
|
||||
keep_alive: Optional[Union[float, str]] = None
|
||||
ollama_client_kwargs: Optional[dict] = {}
|
||||
|
||||
def ndims(self):
|
||||
return len(self.generate_embeddings(["foo"])[0])
|
||||
|
||||
def _compute_embedding(self, text):
|
||||
return self._ollama_client.embeddings(
|
||||
model=self.name,
|
||||
prompt=text,
|
||||
options=self.options,
|
||||
keep_alive=self.keep_alive,
|
||||
)["embedding"]
|
||||
|
||||
def generate_embeddings(
|
||||
self, texts: Union[List[str], "np.ndarray"]
|
||||
) -> List["np.array"]:
|
||||
"""
|
||||
Get the embeddings for the given texts
|
||||
|
||||
Parameters
|
||||
----------
|
||||
texts: list[str] or np.ndarray (of str)
|
||||
The texts to embed
|
||||
"""
|
||||
# TODO retry, rate limit, token limit
|
||||
embeddings = [self._compute_embedding(text) for text in texts]
|
||||
return embeddings
|
||||
|
||||
@cached_property
|
||||
def _ollama_client(self):
|
||||
ollama = attempt_import_or_raise("ollama")
|
||||
# ToDo explore ollama.AsyncClient
|
||||
return ollama.Client(host=self.host, **self.ollama_client_kwargs)
|
||||
@@ -255,7 +255,13 @@ def retry_with_exponential_backoff(
|
||||
)
|
||||
|
||||
delay *= exponential_base * (1 + jitter * random.random())
|
||||
logging.info("Retrying in %s seconds...", delay)
|
||||
logging.warning(
|
||||
"Error occurred: %s \n Retrying in %s seconds (retry %s of %s) \n",
|
||||
e,
|
||||
delay,
|
||||
num_retries,
|
||||
max_retries,
|
||||
)
|
||||
time.sleep(delay)
|
||||
|
||||
return wrapper
|
||||
|
||||
@@ -37,7 +37,7 @@ import pyarrow as pa
|
||||
import pydantic
|
||||
import semver
|
||||
|
||||
PYDANTIC_VERSION = semver.Version.parse(pydantic.__version__)
|
||||
PYDANTIC_VERSION = semver.parse_version_info(pydantic.__version__)
|
||||
try:
|
||||
from pydantic_core import CoreSchema, core_schema
|
||||
except ImportError:
|
||||
|
||||
@@ -30,6 +30,7 @@ from typing import (
|
||||
import deprecation
|
||||
import numpy as np
|
||||
import pyarrow as pa
|
||||
import pyarrow.fs as pa_fs
|
||||
import pydantic
|
||||
|
||||
from . import __version__
|
||||
@@ -37,7 +38,7 @@ from .arrow import AsyncRecordBatchReader
|
||||
from .common import VEC
|
||||
from .rerankers.base import Reranker
|
||||
from .rerankers.linear_combination import LinearCombinationReranker
|
||||
from .util import safe_import_pandas
|
||||
from .util import fs_from_uri, safe_import_pandas
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import PIL
|
||||
@@ -665,6 +666,14 @@ class LanceFtsQueryBuilder(LanceQueryBuilder):
|
||||
|
||||
# get the index path
|
||||
index_path = self._table._get_fts_index_path()
|
||||
|
||||
# Check that we are on local filesystem
|
||||
fs, _path = fs_from_uri(index_path)
|
||||
if not isinstance(fs, pa_fs.LocalFileSystem):
|
||||
raise NotImplementedError(
|
||||
"Full-text search is only supported on the local filesystem"
|
||||
)
|
||||
|
||||
# check if the index exist
|
||||
if not Path(index_path).exists():
|
||||
raise FileNotFoundError(
|
||||
|
||||
@@ -94,7 +94,7 @@ class RemoteDBConnection(DBConnection):
|
||||
yield item
|
||||
|
||||
@override
|
||||
def open_table(self, name: str) -> Table:
|
||||
def open_table(self, name: str, *, index_cache_size: Optional[int] = None) -> Table:
|
||||
"""Open a Lance Table in the database.
|
||||
|
||||
Parameters
|
||||
@@ -110,6 +110,12 @@ class RemoteDBConnection(DBConnection):
|
||||
|
||||
self._client.mount_retry_adapter_for_table(name)
|
||||
|
||||
if index_cache_size is not None:
|
||||
logging.info(
|
||||
"index_cache_size is ignored in LanceDb Cloud"
|
||||
" (there is no local cache to configure)"
|
||||
)
|
||||
|
||||
# check if table exists
|
||||
if self._table_cache.get(name) is None:
|
||||
self._client.post(f"/v1/table/{name}/describe/")
|
||||
@@ -279,7 +285,25 @@ class RemoteDBConnection(DBConnection):
|
||||
self._client.post(
|
||||
f"/v1/table/{name}/drop/",
|
||||
)
|
||||
self._table_cache.pop(name)
|
||||
self._table_cache.pop(name, default=None)
|
||||
|
||||
@override
|
||||
def rename_table(self, cur_name: str, new_name: str):
|
||||
"""Rename a table in the database.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
cur_name: str
|
||||
The current name of the table.
|
||||
new_name: str
|
||||
The new name of the table.
|
||||
"""
|
||||
self._client.post(
|
||||
f"/v1/table/{cur_name}/rename/",
|
||||
data={"new_table_name": new_name},
|
||||
)
|
||||
self._table_cache.pop(cur_name, default=None)
|
||||
self._table_cache[new_name] = True
|
||||
|
||||
async def close(self):
|
||||
"""Close the connection to the database."""
|
||||
|
||||
@@ -72,7 +72,7 @@ class RemoteTable(Table):
|
||||
return resp
|
||||
|
||||
def index_stats(self, index_uuid: str):
|
||||
"""List all the indices on the table"""
|
||||
"""List all the stats of a specified index"""
|
||||
resp = self._conn._client.post(
|
||||
f"/v1/table/{self._name}/index/{index_uuid}/stats/"
|
||||
)
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import os
|
||||
import semver
|
||||
from functools import cached_property
|
||||
from typing import Union
|
||||
|
||||
@@ -42,6 +43,14 @@ class CohereReranker(Reranker):
|
||||
@cached_property
|
||||
def _client(self):
|
||||
cohere = attempt_import_or_raise("cohere")
|
||||
# ensure version is at least 0.5.0
|
||||
if (
|
||||
hasattr(cohere, "__version__")
|
||||
and semver.compare(cohere.__version__, "5.0.0") < 0
|
||||
):
|
||||
raise ValueError(
|
||||
f"cohere version must be at least 0.5.0, found {cohere.__version__}"
|
||||
)
|
||||
if os.environ.get("COHERE_API_KEY") is None and self.api_key is None:
|
||||
raise ValueError(
|
||||
"COHERE_API_KEY not set. Either set it in your environment or \
|
||||
@@ -51,11 +60,14 @@ class CohereReranker(Reranker):
|
||||
|
||||
def _rerank(self, result_set: pa.Table, query: str):
|
||||
docs = result_set[self.column].to_pylist()
|
||||
results = self._client.rerank(
|
||||
response = self._client.rerank(
|
||||
query=query,
|
||||
documents=docs,
|
||||
top_n=self.top_n,
|
||||
model=self.model_name,
|
||||
)
|
||||
results = (
|
||||
response.results
|
||||
) # returns list (text, idx, relevance) attributes sorted descending by score
|
||||
indices, scores = list(
|
||||
zip(*[(result.index, result.relevance_score) for result in results])
|
||||
|
||||
@@ -58,7 +58,7 @@ if TYPE_CHECKING:
|
||||
import PIL
|
||||
from lance.dataset import CleanupStats, ReaderLike
|
||||
|
||||
from ._lancedb import Table as LanceDBTable
|
||||
from ._lancedb import Table as LanceDBTable, OptimizeStats
|
||||
from .db import LanceDBConnection
|
||||
from .index import BTree, IndexConfig, IvfPq
|
||||
|
||||
@@ -806,6 +806,7 @@ class _LanceLatestDatasetRef(_LanceDatasetRef):
|
||||
"""Reference to the latest version of a LanceDataset."""
|
||||
|
||||
uri: str
|
||||
index_cache_size: Optional[int] = None
|
||||
read_consistency_interval: Optional[timedelta] = None
|
||||
last_consistency_check: Optional[float] = None
|
||||
_dataset: Optional[LanceDataset] = None
|
||||
@@ -813,7 +814,9 @@ class _LanceLatestDatasetRef(_LanceDatasetRef):
|
||||
@property
|
||||
def dataset(self) -> LanceDataset:
|
||||
if not self._dataset:
|
||||
self._dataset = lance.dataset(self.uri)
|
||||
self._dataset = lance.dataset(
|
||||
self.uri, index_cache_size=self.index_cache_size
|
||||
)
|
||||
self.last_consistency_check = time.monotonic()
|
||||
elif self.read_consistency_interval is not None:
|
||||
now = time.monotonic()
|
||||
@@ -842,12 +845,15 @@ class _LanceLatestDatasetRef(_LanceDatasetRef):
|
||||
class _LanceTimeTravelRef(_LanceDatasetRef):
|
||||
uri: str
|
||||
version: int
|
||||
index_cache_size: Optional[int] = None
|
||||
_dataset: Optional[LanceDataset] = None
|
||||
|
||||
@property
|
||||
def dataset(self) -> LanceDataset:
|
||||
if not self._dataset:
|
||||
self._dataset = lance.dataset(self.uri, version=self.version)
|
||||
self._dataset = lance.dataset(
|
||||
self.uri, version=self.version, index_cache_size=self.index_cache_size
|
||||
)
|
||||
return self._dataset
|
||||
|
||||
@dataset.setter
|
||||
@@ -884,6 +890,8 @@ class LanceTable(Table):
|
||||
connection: "LanceDBConnection",
|
||||
name: str,
|
||||
version: Optional[int] = None,
|
||||
*,
|
||||
index_cache_size: Optional[int] = None,
|
||||
):
|
||||
self._conn = connection
|
||||
self.name = name
|
||||
@@ -892,11 +900,13 @@ class LanceTable(Table):
|
||||
self._ref = _LanceTimeTravelRef(
|
||||
uri=self._dataset_uri,
|
||||
version=version,
|
||||
index_cache_size=index_cache_size,
|
||||
)
|
||||
else:
|
||||
self._ref = _LanceLatestDatasetRef(
|
||||
uri=self._dataset_uri,
|
||||
read_consistency_interval=connection.read_consistency_interval,
|
||||
index_cache_size=index_cache_size,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
@@ -1199,6 +1209,11 @@ class LanceTable(Table):
|
||||
raise ValueError("Index already exists. Use replace=True to overwrite.")
|
||||
fs.delete_dir(path)
|
||||
|
||||
if not isinstance(fs, pa_fs.LocalFileSystem):
|
||||
raise NotImplementedError(
|
||||
"Full-text search is only supported on the local filesystem"
|
||||
)
|
||||
|
||||
index = create_index(
|
||||
self._get_fts_index_path(),
|
||||
field_names,
|
||||
@@ -2362,6 +2377,49 @@ class AsyncTable:
|
||||
"""
|
||||
await self._inner.restore()
|
||||
|
||||
async def optimize(
|
||||
self, *, cleanup_older_than: Optional[timedelta] = None
|
||||
) -> OptimizeStats:
|
||||
"""
|
||||
Optimize the on-disk data and indices for better performance.
|
||||
|
||||
Modeled after ``VACUUM`` in PostgreSQL.
|
||||
|
||||
Optimization covers three operations:
|
||||
|
||||
* Compaction: Merges small files into larger ones
|
||||
* Prune: Removes old versions of the dataset
|
||||
* Index: Optimizes the indices, adding new data to existing indices
|
||||
|
||||
Parameters
|
||||
----------
|
||||
cleanup_older_than: timedelta, optional default 7 days
|
||||
All files belonging to versions older than this will be removed. Set
|
||||
to 0 days to remove all versions except the latest. The latest version
|
||||
is never removed.
|
||||
|
||||
Experimental API
|
||||
----------------
|
||||
|
||||
The optimization process is undergoing active development and may change.
|
||||
Our goal with these changes is to improve the performance of optimization and
|
||||
reduce the complexity.
|
||||
|
||||
That being said, it is essential today to run optimize if you want the best
|
||||
performance. It should be stable and safe to use in production, but it our
|
||||
hope that the API may be simplified (or not even need to be called) in the
|
||||
future.
|
||||
|
||||
The frequency an application shoudl call optimize is based on the frequency of
|
||||
data modifications. If data is frequently added, deleted, or updated then
|
||||
optimize should be run frequently. A good rule of thumb is to run optimize if
|
||||
you have added or modified 100,000 or more records or run more than 20 data
|
||||
modification operations.
|
||||
"""
|
||||
if cleanup_older_than is not None:
|
||||
cleanup_older_than = round(cleanup_older_than.total_seconds() * 1000)
|
||||
return await self._inner.optimize(cleanup_older_than)
|
||||
|
||||
async def list_indices(self) -> IndexConfig:
|
||||
"""
|
||||
List all indices that have been created with Self::create_index
|
||||
|
||||
@@ -368,6 +368,15 @@ async def test_create_exist_ok_async(tmp_path):
|
||||
# await db.create_table("test", schema=bad_schema, exist_ok=True)
|
||||
|
||||
|
||||
def test_open_table_sync(tmp_path):
|
||||
db = lancedb.connect(tmp_path)
|
||||
db.create_table("test", data=[{"id": 0}])
|
||||
assert db.open_table("test").count_rows() == 1
|
||||
assert db.open_table("test", index_cache_size=0).count_rows() == 1
|
||||
with pytest.raises(FileNotFoundError, match="does not exist"):
|
||||
db.open_table("does_not_exist")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_open_table(tmp_path):
|
||||
db = await lancedb.connect_async(tmp_path)
|
||||
@@ -397,6 +406,10 @@ async def test_open_table(tmp_path):
|
||||
}
|
||||
)
|
||||
|
||||
# No way to verify this yet, but at least make sure we
|
||||
# can pass the parameter
|
||||
await db.open_table("test", index_cache_size=0)
|
||||
|
||||
with pytest.raises(ValueError, match="was not found"):
|
||||
await db.open_table("does_not_exist")
|
||||
|
||||
|
||||
@@ -45,7 +45,9 @@ except Exception:
|
||||
|
||||
|
||||
@pytest.mark.slow
|
||||
@pytest.mark.parametrize("alias", ["sentence-transformers", "openai", "huggingface"])
|
||||
@pytest.mark.parametrize(
|
||||
"alias", ["sentence-transformers", "openai", "huggingface", "ollama"]
|
||||
)
|
||||
def test_basic_text_embeddings(alias, tmp_path):
|
||||
db = lancedb.connect(tmp_path)
|
||||
registry = get_registry()
|
||||
|
||||
@@ -213,7 +213,7 @@ def test_syntax(table):
|
||||
# https://github.com/lancedb/lancedb/issues/769
|
||||
table.create_fts_index("text")
|
||||
with pytest.raises(ValueError, match="Syntax Error"):
|
||||
table.search("they could have been dogs OR cats").limit(10).to_list()
|
||||
table.search("they could have been dogs OR").limit(10).to_list()
|
||||
|
||||
# these should work
|
||||
|
||||
|
||||
@@ -1025,3 +1025,29 @@ async def test_time_travel(db_async: AsyncConnection):
|
||||
# Can't use restore if not checked out
|
||||
with pytest.raises(ValueError, match="checkout before running restore"):
|
||||
await table.restore()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_optimize(db_async: AsyncConnection):
|
||||
table = await db_async.create_table(
|
||||
"test",
|
||||
data=[{"x": [1]}],
|
||||
)
|
||||
await table.add(
|
||||
data=[
|
||||
{"x": [2]},
|
||||
],
|
||||
)
|
||||
stats = await table.optimize()
|
||||
assert stats.compaction.files_removed == 2
|
||||
assert stats.compaction.files_added == 1
|
||||
assert stats.compaction.fragments_added == 1
|
||||
assert stats.compaction.fragments_removed == 2
|
||||
assert stats.prune.bytes_removed == 0
|
||||
assert stats.prune.old_versions_removed == 0
|
||||
|
||||
stats = await table.optimize(cleanup_older_than=timedelta(seconds=0))
|
||||
assert stats.prune.bytes_removed > 0
|
||||
assert stats.prune.old_versions_removed == 3
|
||||
|
||||
assert await table.query().to_arrow() == pa.table({"x": [[1], [2]]})
|
||||
|
||||
@@ -134,17 +134,21 @@ impl Connection {
|
||||
})
|
||||
}
|
||||
|
||||
#[pyo3(signature = (name, storage_options = None))]
|
||||
#[pyo3(signature = (name, storage_options = None, index_cache_size = None))]
|
||||
pub fn open_table(
|
||||
self_: PyRef<'_, Self>,
|
||||
name: String,
|
||||
storage_options: Option<HashMap<String, String>>,
|
||||
index_cache_size: Option<u32>,
|
||||
) -> PyResult<&PyAny> {
|
||||
let inner = self_.get_inner()?.clone();
|
||||
let mut builder = inner.open_table(name);
|
||||
if let Some(storage_options) = storage_options {
|
||||
builder = builder.storage_options(storage_options);
|
||||
}
|
||||
if let Some(index_cache_size) = index_cache_size {
|
||||
builder = builder.index_cache_size(index_cache_size);
|
||||
}
|
||||
future_into_py(self_.py(), async move {
|
||||
let table = builder.execute().await.infer_error()?;
|
||||
Ok(Table::new(table))
|
||||
|
||||
@@ -35,21 +35,16 @@ impl<T> PythonErrorExt<T> for std::result::Result<T, LanceError> {
|
||||
match &self {
|
||||
Ok(_) => Ok(self.unwrap()),
|
||||
Err(err) => match err {
|
||||
LanceError::InvalidInput { .. } => self.value_error(),
|
||||
LanceError::InvalidTableName { .. } => self.value_error(),
|
||||
LanceError::TableNotFound { .. } => self.value_error(),
|
||||
LanceError::Schema { .. } => self.value_error(),
|
||||
LanceError::InvalidInput { .. }
|
||||
| LanceError::InvalidTableName { .. }
|
||||
| LanceError::TableNotFound { .. }
|
||||
| LanceError::Schema { .. } => self.value_error(),
|
||||
LanceError::CreateDir { .. } => self.os_error(),
|
||||
LanceError::TableAlreadyExists { .. } => self.runtime_error(),
|
||||
LanceError::ObjectStore { .. } => Err(PyIOError::new_err(err.to_string())),
|
||||
LanceError::Lance { .. } => self.runtime_error(),
|
||||
LanceError::Runtime { .. } => self.runtime_error(),
|
||||
LanceError::Http { .. } => self.runtime_error(),
|
||||
LanceError::Arrow { .. } => self.runtime_error(),
|
||||
LanceError::NotSupported { .. } => {
|
||||
Err(PyNotImplementedError::new_err(err.to_string()))
|
||||
}
|
||||
LanceError::Other { .. } => self.runtime_error(),
|
||||
_ => self.runtime_error(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,7 +2,9 @@ use arrow::{
|
||||
ffi_stream::ArrowArrayStreamReader,
|
||||
pyarrow::{FromPyArrow, ToPyArrow},
|
||||
};
|
||||
use lancedb::table::{AddDataMode, Table as LanceDbTable};
|
||||
use lancedb::table::{
|
||||
AddDataMode, Duration, OptimizeAction, OptimizeOptions, Table as LanceDbTable,
|
||||
};
|
||||
use pyo3::{
|
||||
exceptions::{PyRuntimeError, PyValueError},
|
||||
pyclass, pymethods,
|
||||
@@ -17,6 +19,40 @@ use crate::{
|
||||
query::Query,
|
||||
};
|
||||
|
||||
/// Statistics about a compaction operation.
|
||||
#[pyclass(get_all)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct CompactionStats {
|
||||
/// The number of fragments removed
|
||||
pub fragments_removed: u64,
|
||||
/// The number of new, compacted fragments added
|
||||
pub fragments_added: u64,
|
||||
/// The number of data files removed
|
||||
pub files_removed: u64,
|
||||
/// The number of new, compacted data files added
|
||||
pub files_added: u64,
|
||||
}
|
||||
|
||||
/// Statistics about a cleanup operation
|
||||
#[pyclass(get_all)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct RemovalStats {
|
||||
/// The number of bytes removed
|
||||
pub bytes_removed: u64,
|
||||
/// The number of old versions removed
|
||||
pub old_versions_removed: u64,
|
||||
}
|
||||
|
||||
/// Statistics about an optimize operation
|
||||
#[pyclass(get_all)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct OptimizeStats {
|
||||
/// Statistics about the compaction operation
|
||||
pub compaction: CompactionStats,
|
||||
/// Statistics about the removal operation
|
||||
pub prune: RemovalStats,
|
||||
}
|
||||
|
||||
#[pyclass]
|
||||
pub struct Table {
|
||||
// We keep a copy of the name to use if the inner table is dropped
|
||||
@@ -191,4 +227,58 @@ impl Table {
|
||||
pub fn query(&self) -> Query {
|
||||
Query::new(self.inner_ref().unwrap().query())
|
||||
}
|
||||
|
||||
pub fn optimize(self_: PyRef<'_, Self>, cleanup_since_ms: Option<u64>) -> PyResult<&PyAny> {
|
||||
let inner = self_.inner_ref()?.clone();
|
||||
let older_than = if let Some(ms) = cleanup_since_ms {
|
||||
if ms > i64::MAX as u64 {
|
||||
return Err(PyValueError::new_err(format!(
|
||||
"cleanup_since_ms must be between {} and -{}",
|
||||
i32::MAX,
|
||||
i32::MAX
|
||||
)));
|
||||
}
|
||||
Duration::try_milliseconds(ms as i64)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
future_into_py(self_.py(), async move {
|
||||
let compaction_stats = inner
|
||||
.optimize(OptimizeAction::Compact {
|
||||
options: lancedb::table::CompactionOptions::default(),
|
||||
remap_options: None,
|
||||
})
|
||||
.await
|
||||
.infer_error()?
|
||||
.compaction
|
||||
.unwrap();
|
||||
let prune_stats = inner
|
||||
.optimize(OptimizeAction::Prune {
|
||||
older_than,
|
||||
delete_unverified: None,
|
||||
})
|
||||
.await
|
||||
.infer_error()?
|
||||
.prune
|
||||
.unwrap();
|
||||
inner
|
||||
.optimize(lancedb::table::OptimizeAction::Index(
|
||||
OptimizeOptions::default(),
|
||||
))
|
||||
.await
|
||||
.infer_error()?;
|
||||
Ok(OptimizeStats {
|
||||
compaction: CompactionStats {
|
||||
files_added: compaction_stats.files_added as u64,
|
||||
files_removed: compaction_stats.files_removed as u64,
|
||||
fragments_added: compaction_stats.fragments_added as u64,
|
||||
fragments_removed: compaction_stats.fragments_removed as u64,
|
||||
},
|
||||
prune: RemovalStats {
|
||||
bytes_removed: prune_stats.bytes_removed,
|
||||
old_versions_removed: prune_stats.old_versions,
|
||||
},
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
87
release_process.md
Normal file
87
release_process.md
Normal file
@@ -0,0 +1,87 @@
|
||||
# Release process
|
||||
|
||||
There are five total packages we release. Three are the `lancedb` packages
|
||||
for Python, Rust, and Node.js. The other two are the legacy `vectordb`
|
||||
packages for Rust and node.js.
|
||||
|
||||
The Python package is versioned and released separately from the Rust and Node.js
|
||||
ones. For Rust and Node.js, the release process is shared between `lancedb` and
|
||||
`vectordb` for now.
|
||||
|
||||
## Preview releases
|
||||
|
||||
LanceDB has full releases about every 2 weeks, but in between we make frequent
|
||||
preview releases. These are released as `0.x.y.betaN` versions. They receive the
|
||||
same level of testing as normal releases and let you get access to the latest
|
||||
features. However, we do not guarantee that preview releases will be available
|
||||
more than 6 months after they are released. We may delete the preview releases
|
||||
from the packaging index after a while. Once your application is stable, we
|
||||
recommend switching to full releases, which will never be removed from package
|
||||
indexes.
|
||||
|
||||
## Making releases
|
||||
|
||||
The release process uses a handful of GitHub actions to automate the process.
|
||||
|
||||
```text
|
||||
┌─────────────────────┐
|
||||
│Create Release Commit│
|
||||
└─┬───────────────────┘
|
||||
│ ┌────────────┐ ┌──►Python GH Release
|
||||
├──►(tag) python-vX.Y.Z ───►│PyPI Publish├─┤
|
||||
│ └────────────┘ └──►Python Wheels
|
||||
│
|
||||
│ ┌───────────┐
|
||||
└──►(tag) vX.Y.Z ───┬──────►│NPM Publish├──┬──►Rust/Node GH Release
|
||||
│ └───────────┘ │
|
||||
│ └──►NPM Packages
|
||||
│ ┌─────────────┐
|
||||
└──────►│Cargo Publish├───►Cargo Release
|
||||
└─────────────┘
|
||||
```
|
||||
|
||||
To start a release, trigger a `Create Release Commit` action from
|
||||
[the workflows page](https://github.com/lancedb/lancedb/actions/workflows/make-release-commit.yml)
|
||||
(Click on "Run workflow").
|
||||
|
||||
* **For a preview release**, leave the default parameters.
|
||||
* **For a stable release**, set the `release_type` input to `stable`.
|
||||
|
||||
> [!IMPORTANT]
|
||||
> If there was a breaking change since the last stable release, and we haven't
|
||||
> done so yet, we should increment the minor version. The CI will detect if this
|
||||
> is needed and fail the `Create Release Commit` job. To fix, select the
|
||||
> "bump minor version" option.
|
||||
|
||||
## Breaking changes
|
||||
|
||||
We try to avoid breaking changes, but sometimes they are necessary. When there
|
||||
are breaking changes, we will increment the minor version. (This is valid
|
||||
semantic versioning because we are still in `0.x` versions.)
|
||||
|
||||
When a PR makes a breaking change, the PR author should mark the PR using the
|
||||
conventional commit markers: either exclamation mark after the type
|
||||
(such as `feat!: change signature of func`) or have `BREAKING CHANGE` in the
|
||||
body of the PR. A CI job will add a `breaking-change` label to the PR, which is
|
||||
what will ultimately be used to CI to determine if the minor version should be
|
||||
incremented.
|
||||
|
||||
> [!IMPORTANT]
|
||||
> Reviewers should check that PRs with breaking changes receive the `breaking-change`
|
||||
> label. If a PR is missing the label, please add it, even if after it was merged.
|
||||
> This label is used in the release process.
|
||||
|
||||
Some things that are considered breaking changes:
|
||||
|
||||
* Upgrading `lance` to a new minor version. Minor version bumps in Lance are
|
||||
considered breaking changes during `0.x` releases. This can change behavior
|
||||
in LanceDB.
|
||||
* Upgrading a dependency pin that is in the Rust API. In particular, upgrading
|
||||
`DataFusion` and `Arrow` are breaking changes. Changing dependencies that are
|
||||
not exposed in our public API are not considered breaking changes.
|
||||
* Changing the signature of a public function or method.
|
||||
* Removing a public function or method.
|
||||
|
||||
We do make exceptions for APIs that are marked as experimental. These are APIs
|
||||
that are under active development and not in major use. These changes should not
|
||||
receive the `breaking-change` label.
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "lancedb-node"
|
||||
version = "0.4.17"
|
||||
version = "0.4.20"
|
||||
description = "Serverless, low-latency vector database for AI applications"
|
||||
license.workspace = true
|
||||
edition.workspace = true
|
||||
|
||||
@@ -19,10 +19,12 @@ use snafu::Snafu;
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
pub enum Error {
|
||||
#[allow(dead_code)]
|
||||
#[snafu(display("column '{name}' is missing"))]
|
||||
MissingColumn { name: String },
|
||||
#[snafu(display("{name}: {message}"))]
|
||||
OutOfRange { name: String, message: String },
|
||||
#[allow(dead_code)]
|
||||
#[snafu(display("{index_type} is not a valid index type"))]
|
||||
InvalidIndexType { index_type: String },
|
||||
|
||||
|
||||
@@ -59,7 +59,7 @@ fn database_new(mut cx: FunctionContext) -> JsResult<JsPromise> {
|
||||
for handle in storage_options_js {
|
||||
let obj = handle.downcast::<JsArray, _>(&mut cx).unwrap();
|
||||
let key = obj.get::<JsString, _, _>(&mut cx, 0)?.value(&mut cx);
|
||||
let value = obj.get::<JsString, _, _>(&mut cx, 0)?.value(&mut cx);
|
||||
let value = obj.get::<JsString, _, _>(&mut cx, 1)?.value(&mut cx);
|
||||
|
||||
storage_options.push((key, value));
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user